From 2f529f9b558ca1c1bd74be7437a84e4711743404 Mon Sep 17 00:00:00 2001
From: hc <hc@nodka.com>
Date: Fri, 01 Nov 2024 02:11:33 +0000
Subject: [PATCH] add xenomai

---
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c                                   |   40 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h                                        |   54 
 kernel/xenomai-v3.2.4/lib/analogy/descriptor.c                                            |  503 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h                         |   93 
 kernel/arch/arm/kernel/entry-header.S                                                     |   23 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c                       |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h                                        |   31 
 kernel/arch/x86/kernel/cpu/mce/threshold.c                                                |    2 
 kernel/arch/arm/include/asm/outercache.h                                                  |    7 
 kernel/xenomai-v3.2.4/lib/analogy/calibration.h                                           |   68 
 kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h                                              |    2 
 kernel/xenomai-v3.2.4/kernel/drivers/testing/Kconfig                                      |   29 
 kernel/include/xenomai/cobalt/uapi/sched.h                                                |    1 
 kernel/kernel/xenomai/posix/monitor.h                                                     |    1 
 kernel/xenomai-v3.2.4/include/rtdm/analogy.h                                              |  264 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h                                         |  212 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c                         | 4446 
 kernel/arch/arm64/kernel/irq_pipeline.c                                                   |   23 
 kernel/xenomai-v3.2.4/testsuite/Makefile.am                                               |   21 
 kernel/drivers/dma/Kconfig                                                                |   18 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c                          |  608 
 kernel/drivers/xenomai/net/drivers/tulip/eeprom.c                                         |    1 
 kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c                                              |    5 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-8.c                                      |   66 
 kernel/kernel/trace/trace_output.c                                                        |    9 
 kernel/xenomai-v3.2.4/demo/posix/cyclictest/Makefile.am                                   |   33 
 kernel/drivers/xenomai/net/stack/include/ipv4/af_inet.h                                   |    1 
 kernel/drivers/pinctrl/intel/pinctrl-cherryview.c                                         |    5 
 kernel/arch/arm64/kernel/vdso.c                                                           |   28 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-2.c                                           |  123 
 kernel/lib/dump_stack.c                                                                   |   36 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_sock.c                             |  194 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile                             |    8 
 kernel/xenomai-v3.2.4/lib/copperplate/cluster.c                                           |  601 
 kernel/kernel/xenomai/posix/monitor.c                                                     |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h                                        |   94 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/thread.c                           |  174 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Makefile                             |   14 
 kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-common.c                                    |  290 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h                       |   24 
 kernel/include/xenomai/cobalt/kernel/intr.h                                               |    1 
 kernel/xenomai-v3.2.4/utils/analogy/insn_read.c                                           |  462 
 kernel/drivers/clocksource/timer-imx-gpt.c                                                |    8 
 kernel/drivers/cpuidle/cpuidle.c                                                          |   18 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno.adoc                                         |   37 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall.h          |   91 
 kernel/xenomai-v3.2.4/kernel/drivers/testing/Makefile                                     |   13 
 kernel/xenomai-v3.2.4/include/mercury/boilerplate/Makefile.am                             |    8 
 kernel/kernel/irq_work.c                                                                  |    9 
 kernel/xenomai-v3.2.4/include/boilerplate/shavl.h                                         |   30 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Makefile                             |   15 
 kernel/include/linux/smp.h                                                                |   15 
 kernel/xenomai-v3.2.4/lib/copperplate/regd/Makefile.am                                    |   23 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net.h                            |   55 
 kernel/kernel/printk/printk.c                                                             |   73 
 kernel/security/selinux/include/classmap.h                                                |    4 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/features.h         |   30 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h                                        |  248 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_proto.h            |   81 
 kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.h                                        |    1 
 kernel/kernel/sched/core.c                                                                |  317 
 kernel/kernel/stop_machine.c                                                              |    4 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/server.c                                |  178 
 kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.c                                        |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/tick.c                                          |  286 
 kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h                           |    1 
 kernel/drivers/xenomai/net/stack/iovec.c                                                  |    1 
 kernel/drivers/xenomai/net/drivers/igb/igb.h                                              |    1 
 kernel/drivers/xenomai/net/stack/ipv4/ip_output.c                                         |    1 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h                                          |   55 
 kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig                                          |   92 
 kernel/drivers/xenomai/net/stack/rtwlan.c                                                 |    1 
 kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c                                  |    4 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h                                |  559 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_proc.c                         |  347 
 kernel/xenomai-v3.2.4/include/cobalt/trace.h                                              |   52 
 kernel/arch/x86/include/asm/thread_info.h                                                 |   18 
 kernel/drivers/misc/eeprom/at24.c                                                         |   53 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-2.c                                      |   98 
 kernel/drivers/tty/serial/8250/8250_core.c                                                |   45 
 kernel/arch/x86/kernel/smp.c                                                              |   15 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/lock.h                         |   21 
 kernel/xenomai-v3.2.4/include/rtdm/udd.h                                                  |   26 
 kernel/arch/x86/include/asm/pgtable.h                                                     |    5 
 kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h                              |    1 
 kernel/xenomai-v3.2.4/lib/analogy/calibration.c                                           |  473 
 kernel/mm/ioremap.c                                                                       |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/synch.c                                               | 1185 
 kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/Makefile.am                         |    9 
 kernel/arch/x86/kernel/nmi.c                                                              |    4 
 kernel/kernel/fork.c                                                                      |    6 
 kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/Makefile.am                               |   10 
 kernel/drivers/xenomai/gpio/gpio-xilinx.c                                                 |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/icmp.h                        |   56 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_client_event.c                 | 1175 
 kernel/include/xenomai/cobalt/uapi/syscall.h                                              |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/current.c                                                |  156 
 kernel/arch/x86/kernel/apic/x2apic_phys.c                                                 |    4 
 kernel/xenomai-v3.2.4/lib/cobalt/current.h                                                |   98 
 kernel/xenomai-v3.2.4/lib/analogy/Makefile.am                                             |   23 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h                          |    1 
 kernel/xenomai-v3.2.4/include/boilerplate/hash.h                                          |  224 
 kernel/xenomai-v3.2.4/lib/boilerplate/ancillaries.c                                       |  542 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h                 |   28 
 kernel/xenomai-v3.2.4/include/copperplate/heapobj.h                                       |  529 
 kernel/arch/arm/kernel/signal.c                                                           |   34 
 kernel/xenomai-v3.2.4/testsuite/smokey/setsched/setsched.c                                |  149 
 kernel/kernel/xenomai/posix/timerfd.h                                                     |    1 
 kernel/xenomai-v3.2.4/lib/trank/posix.c                                                   |  175 
 kernel/kernel/xenomai/posix/timerfd.c                                                     |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/tsc.h               |   25 
 kernel/drivers/irqchip/irq-ti-sci-inta.c                                                  |    2 
 kernel/xenomai-v3.2.4/kernel/cobalt/registry.c                                            |  954 
 kernel/drivers/xenomai/net/stack/ipv4/udp/udp.c                                           |    1 
 kernel/include/linux/spinlock.h                                                           |   96 
 kernel/arch/x86/kernel/kvm.c                                                              |   17 
 kernel/drivers/xenomai/net/stack/ipv4/route.c                                             |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/Makefile.am                         |    2 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h               |   66 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/README                       |   67 
 kernel/xenomai-v3.2.4/kernel/cobalt/tree.c                                                |   57 
 kernel/xenomai-v3.2.4/lib/alchemy/heap.c                                                  |  679 
 kernel/xenomai-v3.2.4/lib/alchemy/heap.h                                                  |   49 
 kernel/drivers/xenomai/analogy/national_instruments/ni_stc.h                              |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/select.c                                                 |   48 
 kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig                                      |    9 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c                            |   53 
 kernel/include/linux/console.h                                                            |    1 
 kernel/xenomai-v3.2.4/include/trank/native/pipe.h                                         |   35 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanrecv.adoc                                    |   72 
 kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c                                  |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.c                     |  214 
 kernel/xenomai-v3.2.4/include/cobalt/sys/ioctl.h                                          |   36 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h                            |  391 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.h                     |   31 
 kernel/include/asm-generic/xenomai/wrappers.h                                             |    1 
 kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test.in                                    |  116 
 kernel/kernel/xenomai/posix/extension.h                                                   |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/cpu-affinity.c                        |  252 
 kernel/xenomai-v3.2.4/utils/net/rtroute.c                                                 |  393 
 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/kevents.c                                       |  541 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/Makefile.am                              |   12 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h                                       |  674 
 kernel/drivers/xenomai/analogy/national_instruments/tio_common.c                          |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/init.c                                                |  325 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c                               | 1854 
 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-bcm2835.c                                    |  722 
 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall32.h                        |    1 
 kernel/arch/arm64/include/asm/irq_pipeline.h                                              |  141 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan_io.h                        |  104 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-9.c                                         |   85 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-3.c                                       |   68 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c                         |  902 
 kernel/kernel/xenomai/sched-rt.c                                                          |    1 
 kernel/xenomai-v3.2.4/lib/boilerplate/time.c                                              |   85 
 kernel/drivers/xenomai/can/mscan/rtcan_mscan.h                                            |    1 
 kernel/kernel/xenomai/posix/thread.c                                                      |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h           |  142 
 kernel/arch/arm/kernel/ptrace.c                                                           |    2 
 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/arith.h                                |    1 
 kernel/arch/x86/kernel/irq.c                                                              |   20 
 kernel/kernel/xenomai/posix/thread.h                                                      |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/serial/rt_imx_uart.c                                 | 1651 
 kernel/xenomai-v3.2.4/include/boilerplate/scope.h                                         |   78 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c                                        | 1093 
 kernel/drivers/xenomai/net/stack/include/ethernet/eth.h                                   |    1 
 kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c                                        |    3 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h   |   95 
 kernel/xenomai-v3.2.4/include/alchemy/Makefile.am                                         |   15 
 kernel/drivers/xenomai/analogy/testing/Makefile                                           |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h                    |   27 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.drvporting                            |  251 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c   | 1401 
 kernel/drivers/xenomai/serial/rt_imx_uart.c                                               |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/malloc.c                                                 |   32 
 kernel/arch/arm64/xenomai/dovetail/machine.c                                              |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/21142.c                            |   51 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Makefile                          |    5 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtwlan.c                                   |  219 
 kernel/drivers/xenomai/can/mscan/rtcan_mscan.c                                            |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h                       |    1 
 kernel/drivers/xenomai/net/stack/rtmac/Makefile                                           |    1 
 kernel/drivers/xenomai/net/addons/Kconfig                                                 |    1 
 kernel/include/dovetail/mm_info.h                                                         |   12 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c                         |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c                       |    1 
 kernel/include/asm-generic/xenomai/syscall.h                                              |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h                         |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/thread.h       |   92 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Makefile                                   |   26 
 kernel/xenomai-v3.2.4/utils/slackspot/Makefile.am                                         |    7 
 kernel/kernel/xenomai/posix/nsem.c                                                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_ioctl.c                    |  663 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/machine.h          |   85 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h                                        |   92 
 kernel/arch/x86/kernel/apic/io_apic.c                                                     |   85 
 kernel/include/linux/irqflags.h                                                           |   37 
 kernel/drivers/xenomai/net/stack/include/nomac_chrdev.h                                   |    1 
 kernel/xenomai-v3.2.4/include/vxworks/rngLib.h                                            |   61 
 kernel/arch/arm/mm/context.c                                                              |   18 
 kernel/drivers/xenomai/net/stack/stack_mgr.c                                              |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/pshared.c                           |  121 
 kernel/xenomai-v3.2.4/lib/cobalt/Makefile.am                                              |   68 
 kernel/xenomai-v3.2.4/utils/hdb/Makefile.am                                               |   15 
 kernel/arch/x86/include/asm/mmu_context.h                                                 |    7 
 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/syscall.h                              |    1 
 kernel/xenomai-v3.2.4/CONTRIBUTING.md                                                     |  118 
 kernel/include/linux/mm_types.h                                                           |    5 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_rtpc.h                       |   71 
 kernel/arch/arm64/kernel/vdso32/vdso.lds.S                                                |    3 
 kernel/include/xenomai/pipeline/vdso_fallback.h                                           |    1 
 kernel/include/xenomai/cobalt/kernel/thread.h                                             |    1 
 kernel/drivers/xenomai/gpio/gpio-mxc.c                                                    |    1 
 kernel/xenomai-v3.2.4/include/trank/posix/pthread.h                                       |   93 
 kernel/xenomai-v3.2.4/lib/cobalt/sigshadow.c                                              |  133 
 kernel/drivers/xenomai/net/drivers/e1000/e1000_param.c                                    |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/task.h                                                  |   78 
 kernel/drivers/xenomai/net/drivers/e1000e/defines.h                                       |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/sched-quota.c                          |  335 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/udp.c                                      |   75 
 kernel/xenomai-v3.2.4/kernel/cobalt/lock.c                                                |   65 
 kernel/xenomai-v3.2.4/lib/alchemy/task.c                                                  | 2181 
 kernel/include/linux/vmalloc.h                                                            |    1 
 kernel/xenomai-v3.2.4/include/mercury/boilerplate/trace.h                                 |   67 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/packet_raw.c                        |  122 
 kernel/arch/arm64/kernel/process.c                                                        |   36 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/features.h               |   27 
 kernel/xenomai-v3.2.4/include/cobalt/syslog.h                                             |   51 
 kernel/fs/fcntl.c                                                                         |    2 
 kernel/xenomai-v3.2.4/lib/copperplate/syncobj.c                                           |  626 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig                     |   17 
 kernel/include/linux/thread_info.h                                                        |   66 
 kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig                                               |  489 
 kernel/drivers/xenomai/net/stack/rtnet_rtpc.c                                             |    1 
 kernel/drivers/tty/serial/st-asc.c                                                        |   26 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c                      |  906 
 kernel/drivers/xenomai/gpio/Makefile                                                      |    1 
 kernel/kernel/irq/settings.h                                                              |   34 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile                     |    5 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h                                          |  407 
 kernel/xenomai-v3.2.4/lib/psos/pt.c                                                       |  311 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proc.c                         |  132 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/slackspot.adoc                                    |  138 
 kernel/init/main.c                                                                        |    8 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_socket.h                     |  108 
 kernel/drivers/xenomai/net/drivers/e1000e/netdev.c                                        |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/Makefile                                        |    5 
 kernel/xenomai-v3.2.4/lib/psos/pt.h                                                       |   46 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h                   |  143 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-6.c                                           |   44 
 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h                               |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h                                       |  667 
 kernel/xenomai-v3.2.4/lib/psos/Makefile.am                                                |   38 
 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h                              |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_module.c                 |  161 
 kernel/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c                                      |    1 
 kernel/xenomai-v3.2.4/include/trank/native/misc.h                                         |   57 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_iovec.h                      |   38 
 kernel/xenomai-v3.2.4/kernel/cobalt/timer.c                                               |  719 
 kernel/drivers/xenomai/net/stack/include/rtnet_iovec.h                                    |    1 
 kernel/xenomai-v3.2.4/include/vxworks/taskHookLib.h                                       |   46 
 kernel/drivers/xenomai/net/drivers/rt_macb.h                                              |    1 
 kernel/arch/arm64/configs/rockchip_linux_defconfig                                        |  319 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/README                                 |    3 
 kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig                                   |   44 
 kernel/drivers/xenomai/net/stack/include/ipv4/protocol.h                                  |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile                                         |    1 
 kernel/include/xenomai/cobalt/kernel/vdso.h                                               |    1 
 kernel/include/xenomai/cobalt/uapi/thread.h                                               |    1 
 kernel/drivers/xenomai/net/stack/ipv4/ip_input.c                                          |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic2.c                            |  158 
 kernel/kernel/locking/spinlock_debug.c                                                    |    3 
 kernel/xenomai-v3.2.4/utils/net/rtnet.conf.in                                             |   79 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h                                         |   74 
 kernel/xenomai-v3.2.4/testsuite/xeno-test/Makefile.am                                     |   16 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/lst-1.c                                       |  144 
 kernel/drivers/xenomai/net/stack/rtmac/rtmac_syms.c                                       |    1 
 kernel/drivers/xenomai/spi/spi-device.h                                                   |    1 
 kernel/drivers/xenomai/spi/spi-device.c                                                   |    1 
 kernel/arch/arm64/kernel/irq.c                                                            |   10 
 kernel/arch/x86/kernel/time.c                                                             |    2 
 kernel/xenomai-v3.2.4/lib/cobalt/mutex.c                                                  | 1006 
 kernel/drivers/base/regmap/internal.h                                                     |    5 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_client_event.h         |   45 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h                                            |  203 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/machine.h          |   35 
 kernel/arch/arm/kernel/irq_pipeline.c                                                     |   20 
 kernel/xenomai-v3.2.4/lib/cobalt/init.c                                                   |  370 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Makefile                        |   10 
 kernel/drivers/xenomai/gpio/gpio-cherryview.c                                             |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/interrupt.c                        |  393 
 kernel/include/xenomai/pipeline/sirq.h                                                    |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip_core.c                       | 1406 
 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/features.h                             |    1 
 kernel/xenomai-v3.2.4/include/boilerplate/tunables.h                                      |  126 
 kernel/xenomai-v3.2.4/lib/cobalt/COPYING                                                  |  458 
 kernel/xenomai-v3.2.4/lib/alchemy/event.c                                                 |  622 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/dohell.adoc                                       |   55 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h                                         |   44 
 kernel/include/linux/socket.h                                                             |    4 
 kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/Makefile.am                               |   10 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/packet_dgram.c                    |   81 
 kernel/xenomai-v3.2.4/lib/boilerplate/init/Makefile.am                                    |   32 
 kernel/xenomai-v3.2.4/lib/alchemy/event.h                                                 |   39 
 kernel/init/Makefile                                                                      |    2 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-5.c                                         |  103 
 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/fptest.h                           |    1 
 kernel/drivers/xenomai/net/drivers/tulip/interrupt.c                                      |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/ipipe/thread_info.h                           |   38 
 kernel/drivers/xenomai/can/mscan/Kconfig                                                  |    1 
 kernel/include/xenomai/rtdm/ipc.h                                                         |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h                        |   48 
 kernel/kernel/printk/printk_safe.c                                                        |    3 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/c1e.c                               |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/Makefile.am                        |    9 
 kernel/include/xenomai/cobalt/kernel/bufd.h                                               |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/machine.h                      |   55 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_vnic.c                         |  333 
 kernel/xenomai-v3.2.4/lib/copperplate/timerobj.c                                          |  288 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_fragment.h                 |   37 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_file.c                         |   81 
 kernel/drivers/xenomai/analogy/driver.c                                                   |    1 
 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h                          |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c               | 1481 
 kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h                                  | 1186 
 kernel/xenomai-v3.2.4/scripts/prepare-kernel.sh                                           |  472 
 kernel/drivers/xenomai/analogy/instruction.c                                              |    1 
 kernel/drivers/xenomai/net/stack/ipv4/Makefile                                            |    1 
 kernel/xenomai-v3.2.4/include/smokey/smokey.h                                             |  274 
 kernel/xenomai-v3.2.4/lib/copperplate/COPYING                                             |  458 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c                                  | 1538 
 kernel/kernel/xenomai/time.c                                                              |    1 
 kernel/xenomai-v3.2.4/include/boilerplate/heapmem.h                                       |  155 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c                                |  427 
 kernel/kernel/xenomai/tree.c                                                              |    1 
 kernel/xenomai-v3.2.4/include/boilerplate/avl-inner.h                                     |  522 
 kernel/xenomai-v3.2.4/include/rtdm/can.h                                                  |  239 
 kernel/kernel/Makefile                                                                    |    3 
 kernel/kernel/time/tick-broadcast.c                                                       |   17 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c                          |  593 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/syscall.h                   |  216 
 kernel/drivers/xenomai/net/drivers/at91_ether.c                                           |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c                                           | 1373 
 kernel/kernel/notifier.c                                                                  |    3 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/fptest.h           |   56 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h                                        |  141 
 kernel/lib/vdso/gettimeofday.c                                                            |  286 
 kernel/xenomai-v3.2.4/utils/Makefile.am                                                   |    5 
 kernel/drivers/irqchip/irq-gic-v3-mbi.c                                                   |    2 
 kernel/drivers/xenomai/can/sja1000/rtcan_mem.c                                            |    1 
 kernel/xenomai-v3.2.4/include/trank/native/queue.h                                        |   26 
 kernel/arch/arm64/include/asm/thread_info.h                                               |   21 
 kernel/include/xenomai/pipeline/tick.h                                                    |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile                               |   10 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_ioctl.h          |   31 
 kernel/xenomai-v3.2.4/lib/vxworks/wdLib.h                                                 |   35 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/posix-clock.c                          |  458 
 kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c                                            |  464 
 kernel/xenomai-v3.2.4/include/boilerplate/namegen.h                                       |   47 
 kernel/drivers/xenomai/net/stack/Makefile                                                 |    1 
 kernel/xenomai-v3.2.4/include/copperplate/traceobj.h                                      |   97 
 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h                              |    1 
 kernel/xenomai-v3.2.4/utils/analogy/Makefile.am                                           |  100 
 kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c                                      |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c                                          |  224 
 kernel/xenomai-v3.2.4/lib/vxworks/wdLib.c                                                 |  169 
 kernel/xenomai-v3.2.4/include/trank/Makefile.am                                           |   10 
 kernel/drivers/xenomai/udd/Kconfig                                                        |    1 
 kernel/net/packet/af_packet.c                                                             |    1 
 kernel/xenomai-v3.2.4/lib/boilerplate/Makefile.am                                         |  121 
 kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.c                                         | 1011 
 kernel/drivers/xenomai/ipc/bufp.c                                                         |    1 
 kernel/include/xenomai/cobalt/uapi/sem.h                                                  |    1 
 kernel/drivers/xenomai/analogy/rtdm_interface.c                                           |    1 
 kernel/xenomai-v3.2.4/include/alchemy/buffer.h                                            |  148 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/via-rhine.c                              | 1823 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/route.c                               | 1057 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Kconfig                              |   16 
 kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.h                                         |   43 
 kernel/drivers/xenomai/spi/Kconfig                                                        |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/syscall.h       |   23 
 kernel/drivers/xenomai/net/stack/rtmac/tdma/Makefile                                      |    1 
 kernel/xenomai-v3.2.4/include/trank/native/buffer.h                                       |   23 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/features.c                                      |  106 
 kernel/kernel/xenomai/posix/process.c                                                     |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c                                  | 2027 
 kernel/xenomai-v3.2.4/kernel/drivers/Kconfig                                              |   35 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/wrappers.h         |   27 
 kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c                                  |    1 
 kernel/kernel/xenomai/posix/process.h                                                     |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c                                          |  424 
 kernel/arch/arm64/include/asm/efi.h                                                       |    6 
 kernel/drivers/irqchip/irq-bcm2835.c                                                      |    3 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig                                  |  147 
 kernel/xenomai-v3.2.4/lib/smokey/helpers.c                                                |  397 
 kernel/drivers/xenomai/can/rtcan_version.h                                                |    1 
 kernel/drivers/xenomai/ipc/xddp.c                                                         |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/intr.c                                          | 1230 
 kernel/xenomai-v3.2.4/utils/net/rtnet.in                                                  |  371 
 kernel/kernel/sched/sched.h                                                               |    2 
 kernel/arch/arm/include/asm/atomic.h                                                      |   16 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-2.c                                           |   65 
 kernel/arch/x86/xen/Kconfig                                                               |    2 
 kernel/drivers/clocksource/mmio.c                                                         |  503 
 kernel/kernel/xenomai/vfile.c                                                             |    1 
 kernel/xenomai-v3.2.4/utils/analogy/cmd_write.c                                           |  551 
 kernel/drivers/xenomai/net/drivers/tulip/media.c                                          |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c                             |  460 
 kernel/drivers/clocksource/arm_arch_timer.c                                               |   11 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h                                           |   56 
 kernel/drivers/clocksource/clksrc_st_lpc.c                                                |    2 
 kernel/xenomai-v3.2.4/include/trank/rtdm/rtserial.h                                       |   23 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_ioctl.c                  |   99 
 kernel/arch/arm64/include/asm/syscall.h                                                   |    5 
 kernel/arch/arm/mach-imx/gpc.c                                                            |   21 
 kernel/include/xenomai/rtdm/cobalt.h                                                      |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c                          |  842 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/Makefile                                         |   49 
 kernel/xenomai-v3.2.4/utils/hdb/hdb.c                                                     |  148 
 kernel/xenomai-v3.2.4/include/boilerplate/Makefile.am                                     |   23 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h                          |   48 
 kernel/drivers/xenomai/autotune/Makefile                                                  |    1 
 kernel/drivers/gpio/gpio-davinci.c                                                        |    2 
 kernel/xenomai-v3.2.4/doc/asciidoc/Makefile.am                                            |  136 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c                              |  756 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_chrdev.h                     |  116 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/wd-1.c                                        |   92 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h                                        |   40 
 kernel/drivers/xenomai/ipc/rtipc.c                                                        |    1 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/chkkconf.adoc                                     |  117 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h                                         |  109 
 kernel/include/xenomai/rtdm/driver.h                                                      |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h                        |   35 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c                                         |  853 
 kernel/arch/arm/include/asm/bitops.h                                                      |   24 
 kernel/net/sched/sch_oob.c                                                                |  294 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h                         |  852 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h                                             |   75 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c                              |  200 
 kernel/arch/x86/kernel/alternative.c                                                      |   14 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h    |   37 
 kernel/xenomai-v3.2.4/demo/Makefile.am                                                    |    2 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/Makefile                                      |   70 
 kernel/kernel/trace/trace_preemptirq.c                                                    |   52 
 kernel/xenomai-v3.2.4/utils/analogy/cmd_read.c                                            |  435 
 kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h                               |    1 
 kernel/drivers/xenomai/ipc/Makefile                                                       |    1 
 kernel/drivers/xenomai/spi/spi-sun6i.c                                                    |    1 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h                             |  365 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-1.c                                         |   38 
 kernel/xenomai-v3.2.4/scripts/Kconfig.frag                                                |   49 
 kernel/drivers/xenomai/net/stack/include/rtmac.h                                          |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/syscall.h               |   82 
 kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/Makefile.am                         |    9 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h               |   30 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h                        |   33 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-sendrecv.c                                   |  194 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/Makefile.am                            |    9 
 kernel/kernel/xenomai/posix/corectl.c                                                     |    1 
 kernel/drivers/xenomai/net/stack/rtdev_mgr.c                                              |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libalchemy-test.c                           |   65 
 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c                                         | 2493 
 kernel/xenomai-v3.2.4/include/copperplate/syncobj.h                                       |  233 
 kernel/arch/arm64/kernel/vdso/vdso.lds.S                                                  |    3 
 kernel/drivers/xenomai/net/drivers/igb/e1000_phy.h                                        |    1 
 kernel/drivers/xenomai/can/mscan/Makefile                                                 |    1 
 kernel/include/xenomai/cobalt/kernel/ancillaries.h                                        |    1 
 kernel/include/xenomai/rtdm/uapi/can.h                                                    |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c                                 |   40 
 kernel/drivers/xenomai/net/drivers/e1000e/80003es2lan.c                                   |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-4.c                                      |  103 
 kernel/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig                            |    1 
 kernel/xenomai-v3.2.4/testsuite/spitest/Makefile.am                                       |   19 
 kernel/include/linux/dmaengine.h                                                          |   41 
 kernel/xenomai-v3.2.4/lib/copperplate/clockobj.c                                          |  396 
 kernel/drivers/xenomai/analogy/national_instruments/ni_mio.h                              |    1 
 kernel/include/linux/interrupt.h                                                          |   29 
 kernel/drivers/xenomai/net/stack/rtcfg/Makefile                                           |    1 
 kernel/drivers/xenomai/net/stack/include/stack_mgr.h                                      |    1 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h                                   |   37 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h                   |   36 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_internal.h                   |   75 
 kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c                                          |   67 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h                                        |  152 
 kernel/xenomai-v3.2.4/kernel/cobalt/COPYING                                               |  281 
 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/thread.h                              |    1 
 kernel/arch/arm/include/asm/irq_pipeline.h                                                |  135 
 kernel/xenomai-v3.2.4/kernel/drivers/spi/Makefile                                         |   14 
 kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_io.h                                   |  210 
 kernel/drivers/xenomai/net/drivers/e1000e/hw.h                                            |    1 
 kernel/kernel/rcu/update.c                                                                |   31 
 kernel/kernel/xenomai/posix/corectl.h                                                     |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/features.h          |   40 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c                         |  361 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtnetproxy                            |   74 
 kernel/kernel/irq/Makefile                                                                |    2 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic.c                             |   53 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-4.c                                       |   74 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_module.c                       |   83 
 kernel/arch/x86/kernel/tsc.c                                                              |   19 
 kernel/xenomai-v3.2.4/lib/cobalt/clock.c                                                  |  489 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/fptest.h        |   74 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_disc.h                 |   95 
 kernel/kernel/xenomai/clock.c                                                             |    1 
 kernel/xenomai-v3.2.4/include/vxworks/types.h                                             |   40 
 kernel/xenomai-v3.2.4/testsuite/latency/Makefile.am                                       |   18 
 kernel/xenomai-v3.2.4/README                                                              |   74 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/stack_mgr.c                                |  256 
 kernel/xenomai-v3.2.4/lib/cobalt/internal.h                                               |  135 
 kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h                                              |   30 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/fpu.S                              |   87 
 kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c                                              |  220 
 kernel/xenomai-v3.2.4/include/copperplate/debug.h                                         |   42 
 kernel/xenomai-v3.2.4/lib/cobalt/internal.c                                               |  600 
 kernel/xenomai-v3.2.4/include/vxworks/sysLib.h                                            |   41 
 kernel/drivers/xenomai/net/addons/cap.c                                                   |    1 
 kernel/drivers/xenomai/net/stack/ipv4/udp/Kconfig                                         |    1 
 kernel/arch/x86/Kconfig                                                                   |    4 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Kconfig                           |   18 
 kernel/include/xenomai/rtdm/uapi/udd.h                                                    |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/calibration.h   |   40 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/setup.c                                 |  518 
 kernel/drivers/xenomai/net/stack/include/rtnet_port.h                                     |    1 
 kernel/kernel/xenomai/pipeline/Makefile                                                   |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg_chrdev.h                     |  176 
 kernel/drivers/xenomai/udd/Makefile                                                       |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/serial/mpc52xx_uart.c                                | 1438 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_sock.h                     |   31 
 kernel/xenomai-v3.2.4/demo/alchemy/cobalt/Makefile.am                                     |   22 
 kernel/drivers/xenomai/analogy/national_instruments/mio_common.c                          |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h                                          |   71 
 kernel/include/dovetail/spinlock.h                                                        |   21 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-2.c                                          |   68 
 kernel/include/xenomai/cobalt/kernel/list.h                                               |    1 
 kernel/drivers/xenomai/net/drivers/igb/e1000_phy.c                                        |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h                      |   65 
 kernel/include/xenomai/rtdm/can.h                                                         |    1 
 kernel/kernel/xenomai/sched.c                                                             |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile                                 |   63 
 kernel/xenomai-v3.2.4/kernel/drivers/testing/timerbench.c                                 |  529 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_macb.h                                |  624 
 kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c                                    |    1 
 kernel/xenomai-v3.2.4/lib/psos/COPYING                                                    |  458 
 kernel/drivers/xenomai/serial/16550A_io.h                                                 |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan.h                           |  263 
 kernel/arch/arm/vfp/entry.S                                                               |    2 
 kernel/xenomai-v3.2.4/lib/alchemy/queue.h                                                 |   59 
 kernel/xenomai-v3.2.4/lib/cobalt/thread.c                                                 |  819 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/Makefile                            |    1 
 kernel/drivers/xenomai/analogy/testing/Kconfig                                            |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h                    |   45 
 kernel/xenomai-v3.2.4/testsuite/smokey/xddp/Makefile.am                                   |   10 
 kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc                           |   25 
 kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h                                    |    1 
 kernel/xenomai-v3.2.4/utils/can/rtcansend.c                                               |  306 
 kernel/kernel/xenomai/arith.c                                                             |    1 
 kernel/net/core/dev.c                                                                     |   98 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c            | 5590 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c         | 1654 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.h                      |   62 
 kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.c                      |  220 
 kernel/xenomai-v3.2.4/lib/cobalt/timer.c                                                  |  255 
 kernel/xenomai-v3.2.4/lib/psos/rn.c                                                       |  343 
 kernel/xenomai-v3.2.4/lib/alchemy/queue.c                                                 | 1198 
 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h                         |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h                          |   73 
 kernel/xenomai-v3.2.4/testsuite/smokey/Makefile.am                                        |  119 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/machine.c                           |   70 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile                             |    5 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c                             | 2341 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/udp.c                             |  831 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c                          |  443 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h                |  270 
 kernel/drivers/xenomai/net/stack/rtmac/nomac/Makefile                                     |    1 
 kernel/arch/x86/include/asm/apic.h                                                        |    7 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/Makefile.am                         |    2 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/alarm-1.c                                     |   88 
 kernel/xenomai-v3.2.4/lib/boilerplate/setup.c                                             |  737 
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c                               |   42 
 kernel/include/xenomai/cobalt/kernel/registry.h                                           |    1 
 kernel/drivers/xenomai/net/stack/ipv4/protocol.c                                          |    1 
 kernel/xenomai-v3.2.4/include/boilerplate/setup.h                                         |  120 
 kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.h                                           |   37 
 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c                                       |  106 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h         |   44 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.pools                                 |  117 
 kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.c                                           |  107 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/Kconfig                                      |   16 
 kernel/arch/arm64/kernel/fpsimd.c                                                         |  194 
 kernel/arch/x86/kernel/cpu/mtrr/generic.c                                                 |   12 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h                        |    1 
 kernel/arch/x86/lib/usercopy.c                                                            |    2 
 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c                                       |    1 
 kernel/arch/arm64/kernel/smp.c                                                            |  107 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h      |   39 
 kernel/kernel/xenomai/pipeline/sched.c                                                    |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h         |   97 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h                                    |   31 
 kernel/include/xenomai/cobalt/uapi/time.h                                                 |    1 
 kernel/kernel/irq/debug.h                                                                 |    2 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h                                        |  136 
 kernel/drivers/xenomai/net/stack/ipv4/tcp/Makefile                                        |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/time.c                                                |   38 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/time.h                    |   16 
 kernel/arch/arm64/kernel/signal.c                                                         |   38 
 kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pnp.h                                  |  387 
 kernel/kernel/debug/debug_core.c                                                          |   19 
 kernel/include/linux/rcupdate.h                                                           |   14 
 kernel/arch/arm64/include/asm/uaccess.h                                                   |    8 
 kernel/xenomai-v3.2.4/include/boilerplate/debug.h                                         |  142 
 kernel/include/xenomai/cobalt/kernel/sched-tp.h                                           |    1 
 kernel/xenomai-v3.2.4/include/trank/trank.h                                               |   57 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h                                        |   61 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_disc.c                         |  271 
 kernel/xenomai-v3.2.4/lib/trank/internal.c                                                |   99 
 kernel/drivers/xenomai/net/drivers/igb/e1000_hw.h                                         |    1 
 kernel/drivers/xenomai/analogy/intel/Makefile                                             |    1 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h                                       |  141 
 kernel/xenomai-v3.2.4/utils/can/README                                                    |  150 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h                                         |  174 
 kernel/xenomai-v3.2.4/include/smokey/Makefile.am                                          |    3 
 kernel/drivers/xenomai/net/drivers/tulip/21142.c                                          |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/machine.h       |   34 
 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h                          |    1 
 kernel/xenomai-v3.2.4/include/alchemy/mutex.h                                             |  102 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c                                         |  497 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_dev.c                    |   84 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-1.c                                      |  113 
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c                                  |   37 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c                                        |  354 
 kernel/xenomai-v3.2.4/lib/cobalt/cobalt.wrappers                                          |  120 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/event-1.c                                     |   97 
 kernel/xenomai-v3.2.4/lib/trank/internal.h                                                |   54 
 kernel/drivers/xenomai/net/stack/packet/af_packet.c                                       |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c                                        |  544 
 kernel/drivers/xenomai/net/addons/Makefile                                                |    1 
 kernel/include/xenomai/cobalt/kernel/select.h                                             |    1 
 kernel/xenomai-v3.2.4/utils/autotune/Makefile.am                                          |   17 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall.h       |  101 
 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c                                    |  351 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/clock.h                        |   45 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-6.c                                         |  107 
 kernel/xenomai-v3.2.4/include/vxworks/intLib.h                                            |   39 
 kernel/xenomai-v3.2.4/include/vxworks/semLib.h                                            |   63 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/inband_work.h                  |   32 
 kernel/drivers/gpio/gpio-zynq.c                                                           |    4 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c                 |    1 
 kernel/xenomai-v3.2.4/scripts/make-release.sh                                             |   35 
 kernel/kernel/time/clockevents.c                                                          |   77 
 kernel/drivers/xenomai/gpio/gpio-omap.c                                                   |    1 
 kernel/drivers/xenomai/net/stack/ipv4/tcp/tcp.c                                           |    1 
 kernel/xenomai-v3.2.4/include/cobalt/sys/timerfd.h                                        |   42 
 kernel/kernel/xenomai/posix/gen-syscall-entries.sh                                        |    1 
 kernel/xenomai-v3.2.4/lib/psos/rn.h                                                       |   49 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h                                    |   99 
 kernel/xenomai-v3.2.4/config/Makefile.am                                                  |    7 
 kernel/kernel/xenomai/posix/syscall_entries.h                                             |  232 
 kernel/drivers/xenomai/net/stack/eth.c                                                    |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h                 |    1 
 kernel/drivers/xenomai/net/drivers/pcnet32.c                                              |    1 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-3.c                                           |  115 
 kernel/drivers/xenomai/can/rtcan_list.h                                                   |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c         |  680 
 kernel/xenomai-v3.2.4/include/alchemy/alarm.h                                             |   86 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c                                      |  994 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h                                      |   59 
 kernel/arch/arm64/include/asm/irqflags.h                                                  |   46 
 kernel/include/asm-generic/cmpxchg-local.h                                                |    8 
 kernel/include/xenomai/cobalt/uapi/corectl.h                                              |    1 
 kernel/kernel/irq/irqptorture.c                                                           |  254 
 kernel/include/uapi/asm-generic/dovetail.h                                                |    7 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c                          |  344 
 kernel/drivers/xenomai/net/stack/rtskb.c                                                  |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c                               |  139 
 kernel/xenomai-v3.2.4/lib/psos/task.h                                                     |   76 
 kernel/xenomai-v3.2.4/lib/psos/task.c                                                     |  763 
 kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.h                                       |   19 
 kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.c                                       |   96 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c                                | 1657 
 kernel/drivers/xenomai/net/drivers/tulip/Makefile                                         |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c                               |  256 
 kernel/drivers/xenomai/net/drivers/igb/e1000_i210.h                                       |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h         |   28 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.routing                               |  117 
 kernel/fs/udf/truncate.c                                                                  |   48 
 kernel/xenomai-v3.2.4/doc/doxygen/Makefile.am                                             |   43 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c                          | 1607 
 kernel/include/xenomai/rtdm/gpiopwm.h                                                     |    1 
 kernel/drivers/xenomai/net/drivers/igb/e1000_i210.c                                       |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h                          |   88 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac.h                |   51 
 kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.h                             |   18 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma.h                  |  161 
 kernel/xenomai-v3.2.4/include/boilerplate/atomic.h                                        |   89 
 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall.h                          |    1 
 kernel/xenomai-v3.2.4/include/cobalt/fcntl.h                                              |   44 
 kernel/drivers/xenomai/net/drivers/e1000e/82571.c                                         |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/buffer-1.c                                    |  108 
 kernel/drivers/xenomai/gpio/gpio-bcm2835.c                                                |    1 
 kernel/xenomai-v3.2.4/include/copperplate/reference.h                                     |  101 
 kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.c                             |   35 
 kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.xsl                                          |    5 
 kernel/kernel/xenomai/select.c                                                            |    1 
 kernel/kernel/irq/msi.c                                                                   |    3 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c                      |  648 
 kernel/fs/eventfd.c                                                                       |   10 
 kernel/include/xenomai/rtdm/rtdm.h                                                        |    1 
 kernel/xenomai-v3.2.4/lib/vxworks/COPYING                                                 |  458 
 kernel/xenomai-v3.2.4/lib/vxworks/taskInfo.c                                              |  175 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/fptest.h        |   56 
 kernel/drivers/xenomai/net/drivers/e1000e/param.c                                         |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/Makefile.am                 |    5 
 kernel/include/xenomai/rtdm/uapi/serial.h                                                 |    1 
 kernel/arch/arm/vfp/vfpmodule.c                                                           |   54 
 kernel/xenomai-v3.2.4/utils/net/nomaccfg.c                                                |  109 
 kernel/xenomai-v3.2.4/testsuite/switchtest/Makefile.am                                    |   18 
 kernel/kernel/irq/chip.c                                                                  |  269 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/cyclictest.adoc                                   |   80 
 kernel/arch/arm64/kernel/debug-monitors.c                                                 |    2 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h                                 |   93 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/tsc.h                   |   38 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h                     |   99 
 kernel/xenomai-v3.2.4/lib/alchemy/init.c                                                  |  135 
 kernel/xenomai-v3.2.4/lib/alchemy/alarm.h                                                 |   43 
 kernel/xenomai-v3.2.4/include/cobalt/semaphore.h                                          |   65 
 kernel/xenomai-v3.2.4/include/rtdm/net.h                                                  |   38 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/README.r8169                             |   42 
 kernel/xenomai-v3.2.4/lib/analogy/internal.h                                              |   58 
 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/clock.c                                         |  158 
 kernel/drivers/xenomai/testing/rtdmtest.c                                                 |    1 
 kernel/arch/x86/kernel/cpu/acrn.c                                                         |    3 
 kernel/xenomai-v3.2.4/lib/alchemy/alarm.c                                                 |  411 
 kernel/mm/vmalloc.c                                                                       |    6 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig                                      |   56 
 kernel/arch/arm64/include/asm/vdso.h                                                      |    5 
 kernel/kernel/trace/trace_functions_graph.c                                               |    8 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c                          |  249 
 kernel/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c                                    |    1 
 kernel/xenomai-v3.2.4/lib/analogy/async.c                                                 |  471 
 kernel/arch/x86/xen/enlighten_hvm.c                                                       |    3 
 kernel/xenomai-v3.2.4/utils/chkkconf/kconf-checklist                                      |   51 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h                                     |  293 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c                                     |  963 
 kernel/kernel/kthread.c                                                                   |    4 
 kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/Makefile.am                                   |   10 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h                                    |   30 
 kernel/drivers/xenomai/serial/16550A_pnp.h                                                |    1 
 kernel/include/xenomai/cobalt/kernel/sched-idle.h                                         |    1 
 kernel/include/xenomai/rtdm/uapi/rtdm.h                                                   |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/fptest.h            |   66 
 kernel/kernel/irq/resend.c                                                                |    8 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/smi.c                                  |  168 
 kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h                                   |  908 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/features.c                                  |   24 
 kernel/xenomai-v3.2.4/testsuite/clocktest/clocktest.c                                     |  395 
 kernel/drivers/xenomai/net/stack/packet/Kconfig                                           |    1 
 kernel/xenomai-v3.2.4/lib/smokey/Makefile.am                                              |   13 
 kernel/xenomai-v3.2.4/include/trank/rtdm/rttesting.h                                      |   23 
 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h                         |    1 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/gpiobench.adoc                                    |   70 
 kernel/arch/x86/entry/entry_64.S                                                          |    5 
 kernel/drivers/xenomai/net/stack/include/rtskb_fifo.h                                     |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile                       |    5 
 kernel/include/linux/hardirq.h                                                            |   18 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/syscall.h       |   85 
 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/init.c                                          |   81 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-1.c                                        |  103 
 kernel/drivers/xenomai/analogy/national_instruments/ni_tio.h                              |    1 
 kernel/drivers/xenomai/net/drivers/e1000e/e1000.h                                         |    1 
 kernel/arch/arm64/kernel/traps.c                                                          |   36 
 kernel/drivers/xenomai/can/rtcan_virt.c                                                   |    1 
 kernel/include/xenomai/rtdm/analogy/context.h                                             |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/irq.h                          |   14 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-2.c                                         |  104 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h                      |  148 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev_mgr.c                                |  127 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c                          |  393 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/features.h                  |   27 
 kernel/xenomai-v3.2.4/lib/copperplate/internal.c                                          |  298 
 kernel/include/asm-generic/percpu.h                                                       |   24 
 kernel/xenomai-v3.2.4/lib/copperplate/heapobj-heapmem.c                                   |  118 
 kernel/drivers/soc/qcom/smp2p.c                                                           |    1 
 kernel/xenomai-v3.2.4/include/cobalt/wrappers.h                                           |   55 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Makefile                            |    5 
 kernel/include/linux/dw_apb_timer.h                                                       |    2 
 kernel/include/linux/poll.h                                                               |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c         | 1430 
 kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.h                                       |   56 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h         |   40 
 kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.c                                       |  177 
 kernel/xenomai-v3.2.4/utils/net/rtping.c                                                  |  183 
 kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c                                   |  442 
 kernel/drivers/xenomai/analogy/national_instruments/Makefile                              |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/vdso-access.c                          |   31 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h        |   81 
 kernel/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h                           |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h                                 |   68 
 kernel/xenomai-v3.2.4/include/cobalt/time.h                                               |   77 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c                | 1603 
 kernel/drivers/xenomai/analogy/national_instruments/ni_660x.c                             |    1 
 kernel/include/xenomai/cobalt/uapi/kernel/synch.h                                         |    1 
 kernel/xenomai-v3.2.4/include/trank/posix/Makefile.am                                     |    3 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c        |  384 
 kernel/xenomai-v3.2.4/lib/analogy/sync.c                                                  |  426 
 kernel/include/xenomai/cobalt/uapi/cond.h                                                 |    1 
 kernel/kernel/xenomai/bufd.c                                                              |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_worker.h           |   34 
 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c                                 |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_at91_ether.h                          |  109 
 kernel/drivers/xenomai/net/stack/rtmac/tdma/Kconfig                                       |    1 
 kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h                             |    1 
 kernel/include/linux/tracepoint.h                                                         |    6 
 kernel/xenomai-v3.2.4/include/cobalt/unistd.h                                             |   44 
 kernel/xenomai-v3.2.4/testsuite/smokey/bufp/bufp.c                                        |  174 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h                       |    1 
 kernel/include/xenomai/rtdm/analogy/instruction.h                                         |    1 
 kernel/kernel/time/tick-proxy.c                                                           |  466 
 kernel/xenomai-v3.2.4/include/cobalt/sys/select.h                                         |   38 
 kernel/modules-only.symvers                                                               |    8 
 kernel/include/xenomai/cobalt/kernel/vfile.h                                              |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/Makefile                                              |   27 
 kernel/drivers/dma/dmaengine.c                                                            |   10 
 kernel/drivers/xenomai/udd/udd.c                                                          |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/Makefile.am                            |    9 
 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h                              |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/timerfd.c                                  |  389 
 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c                               |    1 
 kernel/xenomai-v3.2.4/include/boilerplate/ancillaries.h                                   |  108 
 kernel/drivers/xenomai/autotune/Kconfig                                                   |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/Makefile                            |    5 
 kernel/xenomai-v3.2.4/testsuite/spitest/spitest.c                                         |  466 
 kernel/arch/arm64/include/asm/daifflags.h                                                 |   14 
 kernel/drivers/xenomai/gpiopwm/Makefile                                                   |    1 
 kernel/drivers/xenomai/analogy/testing/fake.c                                             |    1 
 kernel/arch/arm64/include/asm/vdso/gettimeofday.h                                         |   65 
 kernel/arch/arm/include/asm/irqflags.h                                                    |   52 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_output.h                   |   42 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h                  |  435 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile                           |    8 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/media.c                            |  567 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile                            |    6 
 kernel/xenomai-v3.2.4/scripts/Makefile.am                                                 |   30 
 kernel/drivers/base/regmap/regmap.c                                                       |   41 
 kernel/drivers/xenomai/net/stack/include/tdma_chrdev.h                                    |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h                                      |  581 
 kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile                                  |    9 
 kernel/arch/x86/kernel/asm-offsets.c                                                      |    3 
 kernel/drivers/xenomai/net/drivers/tulip/pnic.c                                           |    1 
 kernel/include/xenomai/rtdm/analogy/rtdm_helpers.h                                        |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/features.h              |   30 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c                  |  839 
 kernel/drivers/xenomai/spi/spi-master.h                                                   |    1 
 kernel/include/xenomai/rtdm/autotune.h                                                    |    1 
 kernel/kernel/trace/trace.h                                                               |    7 
 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/features.h                         |    1 
 kernel/kernel/trace/trace.c                                                               |   16 
 kernel/drivers/tty/serial/imx.c                                                           |   48 
 kernel/drivers/xenomai/spi/spi-master.c                                                   |    1 
 kernel/include/dovetail/irq.h                                                             |   11 
 kernel/xenomai-v3.2.4/include/mercury/boilerplate/signal.h                                |   55 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-10.c                                     |   72 
 kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h                        |    1 
 kernel/include/xenomai/cobalt/uapi/kernel/thread.h                                        |    1 
 kernel/xenomai-v3.2.4/include/vxworks/taskInfo.h                                          |   64 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/time.h                      |   16 
 kernel/kernel/xenomai/map.c                                                               |    1 
 kernel/include/xenomai/pipeline/inband_work.h                                             |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/COPYING                                                 |  458 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h                                           |  133 
 kernel/drivers/xenomai/net/drivers/e1000/e1000_main.c                                     |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c                                           |  667 
 kernel/xenomai-v3.2.4/include/boilerplate/libc.h                                          |  296 
 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile                              |    8 
 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/fptest.h                              |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Kconfig                             |   14 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c                         |  389 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/Makefile.am                                 |   13 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/thread.h                       |   27 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-5.c                                           |   69 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/Makefile.am                         |   10 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/features.h     |   27 
 kernel/include/xenomai/cobalt/uapi/signal.h                                               |    1 
 kernel/net/Kconfig                                                                        |    3 
 kernel/xenomai-v3.2.4/include/trank/rtdm/rtipc.h                                          |   23 
 kernel/arch/arm64/kernel/entry.S                                                          |   62 
 kernel/include/linux/mm.h                                                                 |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/mutex.c                                                 |  521 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/Makefile                                      |   43 
 kernel/xenomai-v3.2.4/lib/vxworks/lstLib.c                                                |  121 
 kernel/include/net/netoob.h                                                               |   17 
 kernel/xenomai-v3.2.4/include/copperplate/eventobj.h                                      |  108 
 kernel/xenomai-v3.2.4/include/mercury/boilerplate/limits.h                                |   23 
 kernel/xenomai-v3.2.4/lib/alchemy/mutex.h                                                 |   42 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/Makefile.am                             |    9 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h                                         |   55 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h          |  105 
 kernel/include/linux/stop_machine.h                                                       |    3 
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c                                      |   42 
 kernel/xenomai-v3.2.4/lib/Makefile.am                                                     |   32 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h    |   15 
 kernel/include/xenomai/cobalt/uapi/kernel/types.h                                         |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c                                      |   99 
 kernel/xenomai-v3.2.4/include/vxworks/taskLib.h                                           |  119 
 kernel/include/xenomai/rtdm/uapi/testing.h                                                |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/af_inet.c                             |  340 
 kernel/xenomai-v3.2.4/include/trank/rtdm/rtdm.h                                           |  177 
 kernel/xenomai-v3.2.4/lib/vxworks/reference.h                                             |   22 
 kernel/xenomai-v3.2.4/lib/trank/Makefile.am                                               |   21 
 kernel/kernel/xenomai/init.c                                                              |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/ticks.c                                                  |  134 
 kernel/kernel/xenomai/sched-tp.c                                                          |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c                                                | 1201 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h                                      |   62 
 kernel/xenomai-v3.2.4/testsuite/gpiotest/Makefile.am                                      |   19 
 kernel/drivers/xenomai/net/drivers/mpc8xx_enet.c                                          |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c                                   |  259 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_checksum.h                   |   19 
 kernel/drivers/xenomai/net/stack/packet/Makefile                                          |    1 
 kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/target.h                                       |   30 
 kernel/include/linux/spinlock_types.h                                                     |  154 
 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/COPYING                                          |  281 
 kernel/drivers/pinctrl/qcom/pinctrl-msm.c                                                 |    5 
 kernel/drivers/xenomai/spi/spi-bcm2835.c                                                  |    1 
 kernel/arch/x86/kernel/cpu/mce/amd.c                                                      |    7 
 kernel/xenomai-v3.2.4/testsuite/smokey/iddp/iddp.c                                        |  178 
 kernel/xenomai-v3.2.4/doc/asciidoc/README.APPLICATIONS.adoc                               |   78 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h                                            |   79 
 kernel/kernel/xenomai/Makefile                                                            |    1 
 kernel/drivers/xenomai/net/stack/rtmac/rtmac_module.c                                     |    1 
 kernel/include/dovetail/netdevice.h                                                       |   13 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h                         |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c                                            |  394 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h                |  122 
 kernel/drivers/xenomai/Kconfig                                                            |    1 
 kernel/include/vdso/datapage.h                                                            |   25 
 kernel/kernel/panic.c                                                                     |   26 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sched.h                        |   58 
 kernel/xenomai-v3.2.4/lib/analogy/range.c                                                 |  638 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTcfg.spec                                   |  469 
 kernel/drivers/xenomai/net/drivers/mpc8xx_fec.c                                           |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ethernet/eth.h                     |   32 
 kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/Makefile.am                               |    8 
 kernel/include/net/sock.h                                                                 |    3 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c                              |  457 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c                                   |  439 
 kernel/drivers/xenomai/analogy/sensoray/Makefile                                          |    1 
 kernel/arch/arm64/include/dovetail/irq.h                                                  |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c                         |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c                               |  686 
 kernel/xenomai-v3.2.4/include/cobalt/sys/Makefile.am                                      |   10 
 kernel/include/xenomai/rtdm/compat.h                                                      |    1 
 kernel/lib/Kconfig.debug                                                                  |   53 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h           |   86 
 kernel/drivers/xenomai/can/Makefile                                                       |    1 
 kernel/drivers/xenomai/net/stack/include/rtnet_socket.h                                   |    1 
 kernel/arch/arm/mm/alignment.c                                                            |   21 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c           | 2039 
 kernel/arch/arm64/Kconfig                                                                 |    4 
 kernel/drivers/xenomai/net/stack/include/ipv4/arp.h                                       |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/posix-mutex.c                          | 1130 
 kernel/init/Kconfig                                                                       |   63 
 kernel/arch/x86/kvm/x86.c                                                                 |  106 
 kernel/kernel/irq/dummychip.c                                                             |    4 
 kernel/drivers/xenomai/net/stack/include/rtcfg_chrdev.h                                   |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile                                        |   38 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h                      |   28 
 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c                      |  392 
 kernel/xenomai-v3.2.4/lib/alchemy/Makefile.am                                             |   53 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/machine.h      |   39 
 kernel/drivers/xenomai/net/drivers/experimental/3c59x.c                                   |    1 
 kernel/arch/arm/kernel/smp.c                                                              |  121 
 kernel/xenomai-v3.2.4/include/rtdm/autotune.h                                             |   26 
 kernel/drivers/irqchip/irq-sun4i.c                                                        |    2 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h                                  |   77 
 kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c                                               |  976 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/Makefile.am                                       |   76 
 kernel/drivers/xenomai/net/drivers/macb.c                                                 |    1 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/eth_p_all.c                                       |  108 
 kernel/xenomai-v3.2.4/include/trank/native/mutex.h                                        |   23 
 kernel/include/xenomai/cobalt/uapi/kernel/pipe.h                                          |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h                                        |   94 
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c                                     |   43 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h                         |  427 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/wrappers.h      |   27 
 kernel/include/uapi/asm-generic/fcntl.h                                                   |    9 
 kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.c                                        |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c                                    |   25 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h                                      |  205 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c                                      |  305 
 kernel/drivers/xenomai/can/rtcan_flexcan.c                                                |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h                           |    1 
 kernel/include/asm-generic/cmpxchg.h                                                      |   16 
 kernel/xenomai-v3.2.4/scripts/maint/test-xeno-test.rb                                     |  301 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h                 |   61 
 kernel/drivers/xenomai/can/mscan/rtcan_mscan_regs.h                                       |    1 
 kernel/scripts/mkcompile_h                                                                |    6 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c          | 1274 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h          | 1498 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c            | 1999 
 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/calibration.h                      |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proto.c                        |   68 
 kernel/xenomai-v3.2.4/lib/boilerplate/avl.c                                               |  778 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h       | 1397 
 kernel/xenomai-v3.2.4/utils/net/Makefile.am                                               |   16 
 kernel/include/xenomai/cobalt/kernel/schedparam.h                                         |    1 
 kernel/xenomai-v3.2.4/lib/copperplate/internal.h                                          |  179 
 kernel/include/xenomai/cobalt/kernel/synch.h                                              |    1 
 kernel/xenomai-v3.2.4/include/cobalt/tunables.h                                           |   80 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h                          |  625 
 kernel/xenomai-v3.2.4/include/boilerplate/obstack.h                                       |  515 
 kernel/xenomai-v3.2.4/utils/net/rtifconfig.c                                              |  440 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c                           |  484 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/route.h                       |   60 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-3.c                                      |   48 
 kernel/kernel/xenomai/rtdm/device.c                                                       |    1 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/autotune.adoc                                     |  155 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-1.c                                           |   34 
 kernel/kernel/xenomai/heap.c                                                              |    1 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-3.c                                      |  111 
 kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.h                                        |    1 
 kernel/kernel/xenomai/thread.c                                                            |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/setsched/Makefile.am                               |    9 
 kernel/xenomai-v3.2.4/utils/ps/Makefile.am                                                |    7 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/time.h                  |   16 
 kernel/arch/arm64/configs/rockchip_linux_defconfig.rej                                    |   32 
 kernel/kernel/irq/generic-chip.c                                                          |    2 
 kernel/drivers/misc/Makefile                                                              |    1 
 kernel/xenomai-v3.2.4/include/trank/native/event.h                                        |   81 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig                                |   10 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/can-rtt.c                                         |  408 
 kernel/xenomai-v3.2.4/kernel/drivers/serial/Makefile                                      |    8 
 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall.h                             |    1 
 kernel/include/trace/events/cobalt-rtdm.h                                                 |    1 
 kernel/arch/x86/kernel/irq_work.c                                                         |    3 
 kernel/drivers/clocksource/Kconfig                                                        |    9 
 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c                                       |  146 
 kernel/xenomai-v3.2.4/lib/psos/reference.h                                                |   22 
 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c                                             | 1192 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c                     |   87 
 kernel/include/linux/net.h                                                                |    1 
 kernel/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts                              |   12 
 kernel/drivers/xenomai/net/stack/ipv4/icmp.c                                              |    1 
 kernel/drivers/xenomai/can/rtcan_raw_filter.c                                             |    1 
 kernel/kernel/xenomai/pipeline/init.c                                                     |    1 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h                                  |  113 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c                                     |  199 
 kernel/xenomai-v3.2.4/testsuite/xeno-test/dohell                                          |   96 
 kernel/kernel/exit.c                                                                      |    2 
 kernel/xenomai-v3.2.4/scripts/histo.gp                                                    |   22 
 kernel/xenomai-v3.2.4/scripts/xeno.in                                                     |   17 
 kernel/lib/atomic64.c                                                                     |   24 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/rng-1.c                                       |  213 
 kernel/arch/x86/kernel/tsc_sync.c                                                         |    4 
 kernel/include/xenomai/rtdm/net.h                                                         |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c                        |    1 
 kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c                                |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c                            | 1073 
 kernel/xenomai-v3.2.4/configure.ac                                                        | 1062 
 kernel/xenomai-v3.2.4/lib/cobalt/rtdm.c                                                   |  578 
 kernel/xenomai-v3.2.4/doc/asciidoc/README.INSTALL.adoc                                    |  879 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/corectl.c                                  |   77 
 kernel/xenomai-v3.2.4/include/copperplate/registry.h                                      |  154 
 kernel/drivers/dma/imx-sdma.c                                                             |  195 
 kernel/kernel/trace/ring_buffer.c                                                         |   18 
 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c                                      |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c                                         |  835 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h                                 |   61 
 kernel/xenomai-v3.2.4/demo/posix/Makefile.am                                              |    8 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/tcp.c                             | 2453 
 kernel/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c                                     |    1 
 kernel/include/xenomai/pipeline/trace.h                                                   |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h                           |  764 
 kernel/xenomai-v3.2.4/include/vxworks/memPartLib.h                                        |   66 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtskb.c                                    |  535 
 kernel/drivers/xenomai/net/drivers/eepro100.c                                             |    1 
 kernel/arch/x86/kernel/cpu/mce/core.c                                                     |    2 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/syscall.h           |   23 
 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c                   | 1001 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h             |   73 
 kernel/xenomai-v3.2.4/lib/smokey/COPYING                                                  |  458 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/trace.h                        |  116 
 kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/sched-tp.c                                |  244 
 kernel/drivers/spi/spi-bcm2835.c                                                          |   99 
 kernel/include/xenomai/cobalt/kernel/timer.h                                              |    1 
 kernel/arch/arm64/configs/rockchip_defconfig                                              |    5 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c                         | 9094 +
 kernel/include/xenomai/cobalt/kernel/pipe.h                                               |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile                                         |    8 
 kernel/drivers/irqchip/irq-bcm2836.c                                                      |    3 
 kernel/include/linux/spinlock_api_up.h                                                    |   18 
 kernel/xenomai-v3.2.4/lib/alchemy/internal.c                                              |   51 
 kernel/include/linux/printk.h                                                             |   17 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h                         | 3454 
 kernel/arch/x86/kernel/apic/x2apic_cluster.c                                              |    4 
 kernel/kernel/locking/Makefile                                                            |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/internal.h                                              |   93 
 kernel/include/linux/entry-common.h                                                       |   22 
 kernel/include/trace/events/cobalt-core.h                                                 |    1 
 kernel/drivers/pinctrl/samsung/pinctrl-exynos.c                                           |   23 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h                                             |   98 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/README                               |    3 
 kernel/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile                                   |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/Makefile.am             |    5 
 kernel/drivers/xenomai/net/stack/rtcfg/Kconfig                                            |    1 
 kernel/xenomai-v3.2.4/include/cobalt/stdio.h                                              |  129 
 kernel/xenomai-v3.2.4/utils/net/tdma.conf                                                 |   39 
 kernel/xenomai-v3.2.4/include/trank/native/task.h                                         |   58 
 kernel/xenomai-v3.2.4/include/alchemy/compat.h                                            |   23 
 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h                                       |   64 
 kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.h                                    |  260 
 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/calibration.h                         |    1 
 kernel/xenomai-v3.2.4/lib/boilerplate/debug.c                                             |  185 
 kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.c                                    | 1282 
 kernel/arch/Kconfig                                                                       |    3 
 kernel/drivers/xenomai/net/stack/include/ipv4/tcp.h                                       |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall32.h    |   24 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h                                       |   74 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall32.h     |   24 
 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h               |   51 
 kernel/xenomai-v3.2.4/lib/copperplate/Makefile.am                                         |   70 
 kernel/arch/arm/include/asm/syscall.h                                                     |    5 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/switchtest.adoc                                   |  106 
 kernel/include/xenomai/rtdm/uapi/gpio.h                                                   |    1 
 kernel/xenomai-v3.2.4/config/version-label                                                |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Kconfig                        |    9 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/syscall.h                 |  104 
 kernel/arch/x86/kernel/cpu/mce/therm_throt.c                                              |    6 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev.c                                    |  940 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h                                            |   43 
 kernel/drivers/xenomai/net/drivers/e1000/Makefile                                         |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_ioctl.c                        |  421 
 kernel/xenomai-v3.2.4/utils/chkkconf/Makefile.am                                          |   11 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h                                     |  431 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h                                        |  120 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/features.c                                    |  102 
 kernel/drivers/xenomai/net/drivers/igb/igb_main.c                                         |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/c1e.c                                  |   72 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Kconfig                                    |   41 
 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.c                                     |  451 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c                                        |  638 
 kernel/arch/x86/include/asm/uaccess.h                                                     |    2 
 kernel/kernel/xenomai/procfs.c                                                            |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/wrappers.c                                               |  574 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h   |   12 
 kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig                                     |    3 
 kernel/kernel/sched/idle.c                                                                |   22 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_fragment.c                         |  327 
 kernel/kernel/xenomai/procfs.h                                                            |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.h                                     |   82 
 kernel/include/xenomai/cobalt/kernel/sched-weak.h                                         |    1 
 kernel/drivers/xenomai/net/stack/ipv4/ip_sock.c                                           |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/Makefile                           |    8 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c                     |    1 
 kernel/drivers/xenomai/net/drivers/igb/e1000_defines.h                                    |    1 
 kernel/kernel/xenomai/posix/event.h                                                       |    1 
 kernel/kernel/xenomai/posix/sem.h                                                         |    1 
 kernel/xenomai-v3.2.4/include/copperplate/semobj.h                                        |   88 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h                                        |   65 
 kernel/xenomai-v3.2.4/kernel/cobalt/udev/00-rtnet.rules                                   |    2 
 kernel/drivers/xenomai/net/stack/rtdev.c                                                  |    1 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h                                             |  905 
 kernel/xenomai-v3.2.4/lib/cobalt/semaphore.c                                              |  654 
 kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-latex.conf.in                                  |  186 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-7.c                                      |  116 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_dev.h              |   37 
 kernel/drivers/xenomai/net/stack/include/rtnet_chrdev.h                                   |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h                                |   52 
 kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/README                                         |    4 
 kernel/kernel/time/tick-internal.h                                                        |   15 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/latency.adoc                                      |   85 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h                                         |   56 
 kernel/xenomai-v3.2.4/utils/corectl/Makefile.am                                           |   17 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/Kconfig                                  |    8 
 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/fptest.h                               |    1 
 kernel/arch/x86/kernel/apic/msi.c                                                         |   12 
 kernel/kernel/xenomai/posix/cond.c                                                        |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h                                      |  147 
 kernel/drivers/clocksource/bcm2835_timer.c                                                |   27 
 kernel/xenomai-v3.2.4/include/rtdm/testing.h                                              |   59 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h                      | 1018 
 kernel/kernel/xenomai/posix/mutex.h                                                       |    1 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/gpiopwm.c                                         |  504 
 kernel/drivers/xenomai/net/stack/Kconfig                                                  |    1 
 kernel/kernel/xenomai/posix/event.c                                                       |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h                                 |   33 
 kernel/kernel/xenomai/posix/sem.c                                                         |    1 
 kernel/kernel/xenomai/posix/mutex.c                                                       |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile                                     |    5 
 kernel/include/xenomai/pipeline/clock.h                                                   |    1 
 kernel/kernel/xenomai/posix/cond.h                                                        |    1 
 kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.conf                                         |   12 
 kernel/xenomai-v3.2.4/include/trank/native/alarm.h                                        |   38 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_rtpc.c                               |  258 
 kernel/arch/x86/kvm/emulate.c                                                             |   65 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h                                 |   24 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/nomac_chrdev.h                     |   39 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_dev.c                      |  186 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/machine.c                              |   78 
 kernel/xenomai-v3.2.4/testsuite/smokey/leaks/Makefile.am                                  |   10 
 kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c                                              |    4 
 kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c                                            |  257 
 kernel/drivers/dma/virt-dma.c                                                             |  122 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtmac                                 |  341 
 kernel/xenomai-v3.2.4/testsuite/smokey/bufp/Makefile.am                                   |   10 
 kernel/drivers/dma/virt-dma.h                                                             |  127 
 kernel/xenomai-v3.2.4/kernel/cobalt/debug.h                                               |   72 
 kernel/include/xenomai/pipeline/sched.h                                                   |    1 
 kernel/kernel/xenomai/pipeline/kevents.c                                                  |    1 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-1.c                                      |   49 
 kernel/xenomai-v3.2.4/kernel/cobalt/debug.c                                               |  657 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c                                   |  105 
 kernel/xenomai-v3.2.4/include/psos/Makefile.am                                            |    5 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h       |   27 
 kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/sigdebug.c                                |  302 
 kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pci.h                                  |  299 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/README                                 |    3 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h                                        |   55 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/fptest.h           |   75 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c         | 1328 
 kernel/arch/x86/kernel/smpboot.c                                                          |    9 
 kernel/arch/arm/kernel/process.c                                                          |   26 
 kernel/drivers/xenomai/analogy/proc.h                                                     |    1 
 kernel/xenomai-v3.2.4/utils/analogy/cmd_bits.c                                            |  279 
 kernel/kernel/rcu/tree.c                                                                  |   31 
 kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c                                      |  560 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h                                   |  207 
 kernel/kernel/irq/internals.h                                                             |    2 
 kernel/xenomai-v3.2.4/include/trank/rtdm/Makefile.am                                      |    8 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h        |   68 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb.h                            |  809 
 kernel/arch/x86/kernel/fpu/core.c                                                         |  111 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h                                    |   45 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h                       |   78 
 kernel/xenomai-v3.2.4/config/INSTALL                                                      |  229 
 kernel/xenomai-v3.2.4/include/cobalt/ticks.h                                              |   83 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c                                       |  215 
 kernel/include/xenomai/cobalt/kernel/map.h                                                |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/features.h      |   40 
 kernel/arch/x86/kernel/apic/ipi.c                                                         |   32 
 kernel/include/xenomai/pipeline/thread.h                                                  |    1 
 kernel/include/xenomai/pipeline/wrappers.h                                                |    1 
 kernel/arch/arm/vdso/datapage.S                                                           |   11 
 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopen.c                                    |   44 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h                                      |   72 
 kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c                                    |  298 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h                                       |   38 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h         |   84 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/Makefile                               |    5 
 kernel/xenomai-v3.2.4/include/cobalt/Makefile.am                                          |   27 
 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig                                    |    8 
 kernel/xenomai-v3.2.4/lib/copperplate/heapobj-malloc.c                                    |  192 
 kernel/xenomai-v3.2.4/include/psos/psos.h                                                 |  376 
 kernel/drivers/xenomai/can/sja1000/rtcan_ems_pci.c                                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_proto.h          |   38 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c                       | 2235 
 kernel/include/xenomai/rtdm/uapi/ipc.h                                                    |    1 
 kernel/kernel/rcu/tree_plugin.h                                                           |    2 
 kernel/drivers/xenomai/gpiopwm/Kconfig                                                    |    1 
 kernel/xenomai-v3.2.4/doc/install.rules                                                   |   56 
 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/signal.h                                 |   42 
 kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/Makefile.am                           |    9 
 kernel/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h                               |    1 
 kernel/xenomai-v3.2.4/include/rtdm/spi.h                                                  |   24 
 kernel/xenomai-v3.2.4/kernel/drivers/spi/Kconfig                                          |   39 
 kernel/drivers/xenomai/net/stack/include/rtnet_rtpc.h                                     |    1 
 kernel/drivers/irqchip/exynos-combiner.c                                                  |    7 
 kernel/xenomai-v3.2.4/lib/copperplate/eventobj.c                                          |  337 
 kernel/xenomai-v3.2.4/lib/vxworks/rngLib.c                                                |  198 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-1.c                                       |  161 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h                     |  206 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-7.c                                         |   64 
 kernel/xenomai-v3.2.4/lib/vxworks/rngLib.h                                                |   32 
 kernel/xenomai-v3.2.4/kernel/drivers/testing/heapcheck.c                                  |  550 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig              |    4 
 kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.h                                |   85 
 kernel/xenomai-v3.2.4/lib/copperplate/regd/regd.c                                         |  560 
 kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.c                                |  886 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/kevents.h                      |   36 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h                                  |   59 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/Makefile.am                       |   10 
 kernel/arch/arm64/kernel/asm-offsets.c                                                    |    1 
 kernel/include/xenomai/cobalt/kernel/tree.h                                               |    1 
 kernel/kernel/trace/trace_irqsoff.c                                                       |   11 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/thread.h           |  119 
 kernel/arch/x86/kernel/irq_pipeline.c                                                     |  387 
 kernel/include/xenomai/cobalt/kernel/lock.h                                               |    1 
 kernel/include/xenomai/cobalt/uapi/event.h                                                |    1 
 kernel/include/xenomai/cobalt/kernel/init.h                                               |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/posix-select.c                        |  147 
 kernel/kernel/xenomai/posix/signal.c                                                      |    1 
 kernel/drivers/xenomai/serial/16550A_pci.h                                                |    1 
 kernel/kernel/xenomai/posix/signal.h                                                      |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h                                        |   49 
 kernel/drivers/xenomai/analogy/intel/8255.c                                               |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/fptest.h        |  127 
 kernel/xenomai-v3.2.4/include/copperplate/registry-obstack.h                              |  128 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h     |   63 
 kernel/xenomai-v3.2.4/lib/psos/tm.c                                                       |  352 
 kernel/mm/memory.c                                                                        |   18 
 kernel/include/xenomai/cobalt/kernel/trace.h                                              |    1 
 kernel/xenomai-v3.2.4/lib/vxworks/Makefile.am                                             |   47 
 kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.c                                               |  395 
 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile                                         |   10 
 kernel/xenomai-v3.2.4/include/cobalt/arith.h                                              |   45 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h                                 |   60 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/Makefile.am                 |    5 
 kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.h                                               |   45 
 kernel/drivers/xenomai/analogy/intel/8255.h                                               |    1 
 kernel/arch/x86/include/asm/fpu/api.h                                                     |   25 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c                                 |  331 
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile                                        |   18 
 kernel/kernel/xenomai/pipeline/tick.c                                                     |    1 
 kernel/arch/x86/kernel/apic/apic.c                                                        |   70 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_proto.c                  |  127 
 kernel/kernel/xenomai/posix/sched.c                                                       |    1 
 kernel/kernel/xenomai/posix/sched.h                                                       |    1 
 kernel/drivers/xenomai/net/drivers/e1000/kcompat.h                                        |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/thread.c                               |  522 
 kernel/xenomai-v3.2.4/include/cobalt/sys/cobalt.h                                         |  145 
 kernel/drivers/gpio/gpio-pl061.c                                                          |    3 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proc.h                 |   34 
 kernel/xenomai-v3.2.4/utils/analogy/wf_generate.c                                         |  251 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh                          |   32 
 kernel/drivers/xenomai/net/stack/rtmac/Kconfig                                            |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_eth1394.h                             |  240 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h                                |   45 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h                                 |   75 
 kernel/include/uapi/linux/clocksource.h                                                   |   33 
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c                                     |  691 
 kernel/drivers/xenomai/can/rtcan_internal.h                                               |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c                          |  360 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Makefile                              |   19 
 kernel/drivers/xenomai/can/rtcan_socket.h                                                 |    1 
 kernel/xenomai-v3.2.4/demo/posix/cyclictest/README                                        |    5 
 kernel/drivers/xenomai/can/rtcan_socket.c                                                 |    1 
 kernel/vmlinux.symvers                                                                    |  464 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/TDMA.spec                                    |  597 
 kernel/arch/arm/kernel/entry-armv.S                                                       |   52 
 kernel/xenomai-v3.2.4/lib/copperplate/reference.c                                         |   57 
 kernel/drivers/xenomai/analogy/testing/loop.c                                             |    1 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h                                 |   23 
 kernel/drivers/iio/industrialio-trigger.c                                                 |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/Makefile.am                             |    2 
 kernel/include/xenomai/rtdm/analogy/buffer.h                                              |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h                        |  280 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/tsc.h                     |   54 
 kernel/include/xenomai/rtdm/gpio.h                                                        |    1 
 kernel/drivers/irqchip/irq-gic-v3.c                                                       |    6 
 kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h                          |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proto.h                |   78 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/wrappers.h     |   24 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_event.h                |  121 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c                        | 2891 
 kernel/drivers/xenomai/net/drivers/tulip/tulip_core.c                                     |    1 
 kernel/drivers/xenomai/testing/heapcheck.c                                                |    1 
 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c                                      |    1 
 kernel/drivers/xenomai/analogy/sensoray/s526.c                                            |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c                                          |  299 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/posix-cond.c                            |  788 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h                                         |  743 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h                                         |   42 
 kernel/arch/x86/kernel/Makefile                                                           |    1 
 kernel/drivers/xenomai/net/stack/rtnet_module.c                                           |    1 
 kernel/net/sched/Makefile                                                                 |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev.h                            |  275 
 kernel/kernel/xenomai/posix/internal.h                                                    |    1 
 kernel/tools/perf/trace/beauty/include/linux/socket.h                                     |    3 
 kernel/xenomai-v3.2.4/lib/cobalt/modechk.wrappers                                         |    2 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h                     |   62 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_frame.h                |  139 
 kernel/xenomai-v3.2.4/include/trank/native/cond.h                                         |   23 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c                       |    1 
 kernel/drivers/xenomai/net/stack/ipv4/af_inet.c                                           |    1 
 kernel/drivers/xenomai/can/sja1000/rtcan_plx_pci.c                                        |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/Kconfig                                      |    8 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h                                       |  178 
 kernel/kernel/irq/Kconfig                                                                 |   13 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/pipeline.h                     |   96 
 kernel/kernel/xenomai/sched-idle.c                                                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/af_inet.h                     |   35 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/pt-1.c                                           |   50 
 kernel/xenomai-v3.2.4/include/alchemy/event.h                                             |  130 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h             |  428 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/README                             |    3 
 kernel/drivers/xenomai/testing/Kconfig                                                    |    1 
 kernel/include/xenomai/rtdm/uapi/autotune.h                                               |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c                                         |  588 
 kernel/xenomai-v3.2.4/lib/psos/tm.h                                                       |   39 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c                                     | 1145 
 kernel/xenomai-v3.2.4/testsuite/smokey/gdb/Makefile.am                                    |   12 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/Makefile.am                             |   11 
 kernel/kernel/irq/handle.c                                                                |    9 
 kernel/drivers/cpuidle/poll_state.c                                                       |    2 
 kernel/xenomai-v3.2.4/lib/trank/native.c                                                  |  668 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/protocol.h                    |   54 
 kernel/xenomai-v3.2.4/include/mercury/Makefile.am                                         |    4 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h                                         |   86 
 kernel/xenomai-v3.2.4/include/copperplate/Makefile.am                                     |   19 
 kernel/xenomai-v3.2.4/include/rtdm/ipc.h                                                  |   26 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig                                    |   12 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-2.c                                        |  108 
 kernel/xenomai-v3.2.4/testsuite/smokey/leaks/leaks.c                                      |  286 
 kernel/include/asm-generic/xenomai/ipipe/thread.h                                         |    1 
 kernel/xenomai-v3.2.4/include/boilerplate/private-list.h                                  |  217 
 kernel/include/linux/dovetail.h                                                           |  325 
 kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile                                    |    4 
 kernel/drivers/xenomai/net/drivers/e1000e/Makefile                                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Kconfig                           |    6 
 kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c                                           |    1 
 kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c                                |    1 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-3.c                                         |   42 
 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libposix-test.c                             |   32 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h                                       |  551 
 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopentest.c                                |   79 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h                                  |   84 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_frame.c                        |  571 
 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/limits.h                                 |   23 
 kernel/kernel/xenomai/posix/clock.h                                                       |    1 
 kernel/kernel/Kconfig.dovetail                                                            |   23 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/tdma_chrdev.h                      |   81 
 kernel/drivers/Makefile                                                                   |    2 
 kernel/drivers/xenomai/can/sja1000/Makefile                                               |    1 
 kernel/include/linux/irqstage.h                                                           |  398 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile                                     |   16 
 kernel/drivers/xenomai/net/drivers/igb/Makefile                                           |    1 
 kernel/arch/x86/include/asm/special_insns.h                                               |    4 
 kernel/xenomai-v3.2.4/include/cobalt/pthread.h                                            |  180 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h                                      |  167 
 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.c                               |  836 
 kernel/kernel/xenomai/posix/clock.c                                                       |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c                           | 5676 
 kernel/xenomai-v3.2.4/lib/analogy/root_leaf.h                                             |   54 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile                                 |   24 
 kernel/xenomai-v3.2.4/include/vxworks/msgQLib.h                                           |   56 
 kernel/xenomai-v3.2.4/include/trank/native/sem.h                                          |   23 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h          |  307 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h                   |   51 
 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.h                               |  358 
 kernel/arch/x86/kernel/process_64.c                                                       |   32 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c                                | 2095 
 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-sun6i.c                                      |  674 
 kernel/include/linux/tick.h                                                               |    8 
 kernel/include/xenomai/cobalt/uapi/kernel/vdso.h                                          |    1 
 kernel/drivers/xenomai/gpio/gpio-sun8i-h3.c                                               |    1 
 kernel/xenomai-v3.2.4/lib/psos/queue.c                                                    |  507 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/Makefile                            |    5 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c                               |  214 
 kernel/xenomai-v3.2.4/lib/analogy/info.c                                                  |   84 
 kernel/xenomai-v3.2.4/lib/psos/queue.h                                                    |   51 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h         |   51 
 kernel/drivers/gpio/gpio-xilinx.c                                                         |   26 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c                                       | 1203 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_event.c                        |  745 
 kernel/xenomai-v3.2.4/include/boilerplate/avl.h                                           |   28 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/tsc.h                       |   48 
 kernel/xenomai-v3.2.4/lib/psos/init.c                                                     |  141 
 kernel/arch/x86/include/asm/fpu/types.h                                                   |   12 
 kernel/drivers/xenomai/net/drivers/e1000/e1000_osdep.h                                    |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/posix-fork.c                            |   36 
 kernel/include/xenomai/cobalt/uapi/kernel/limits.h                                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c           | 1164 
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c                                 |   43 
 kernel/drivers/xenomai/net/Makefile                                                       |    1 
 kernel/xenomai-v3.2.4/testsuite/gpiotest/gpiotest.c                                       |  267 
 kernel/arch/arm64/include/asm/ptrace.h                                                    |    6 
 kernel/xenomai-v3.2.4/include/alchemy/heap.h                                              |  137 
 kernel/drivers/xenomai/analogy/subdevice.c                                                |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h         |  124 
 kernel/xenomai-v3.2.4/testsuite/smokey/gdb/gdb.c                                          |  317 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c         |  444 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-2.c                                       |   66 
 kernel/xenomai-v3.2.4/testsuite/gpiobench/Makefile.am                                     |   18 
 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/sched.c                                         |  198 
 kernel/kernel/locking/pipeline.c                                                          |  231 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-label.c                                      |  238 
 kernel/xenomai-v3.2.4/doc/Makefile.am                                                     |    6 
 kernel/arch/arm64/xenomai/Kconfig                                                         |    1 
 kernel/kernel/xenomai/posix/syscall.c                                                     |    1 
 kernel/include/linux/spi/spi.h                                                            |   99 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h                                  |   71 
 kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.h                                   |  150 
 kernel/Makefile                                                                           |    2 
 kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.c                                   |  124 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/Makefile.am                         |    2 
 kernel/drivers/xenomai/net/drivers/rt_eth1394.h                                           |    1 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/rn-1.c                                           |   63 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h                | 1417 
 kernel/kernel/xenomai/posix/syscall.h                                                     |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h           |  166 
 kernel/include/xenomai/cobalt/uapi/monitor.h                                              |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/Makefile.am                            |    8 
 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c                                     |    1 
 kernel/drivers/xenomai/analogy/national_instruments/pcimio.c                              |    1 
 kernel/arch/x86/mm/fault.c                                                                |  158 
 kernel/xenomai-v3.2.4/lib/analogy/math.c                                                  |  457 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h                     |    1 
 kernel/xenomai-v3.2.4/testsuite/latency/latency.c                                         |  822 
 kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/tsc.h                                  |    1 
 kernel/xenomai-v3.2.4/include/xenomai/init.h                                              |   48 
 kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi                                      |  108 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/thread.h        |   32 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/Makefile.am                      |   12 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h                              |  997 
 kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/Makefile.am                            |    9 
 kernel/xenomai-v3.2.4/include/copperplate/tunables.h                                      |  100 
 kernel/xenomai-v3.2.4/include/alchemy/cond.h                                              |   98 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-4.c                                           |   71 
 kernel/xenomai-v3.2.4/lib/cobalt/mq.c                                                     |  589 
 kernel/kernel/irq/proc.c                                                                  |    3 
 kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/Makefile.am                             |    8 
 kernel/xenomai-v3.2.4/kernel/drivers/udd/udd.c                                            |  665 
 kernel/drivers/misc/atemsys-main/Makefile                                                 |   34 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h                                         |  138 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h                           |  570 
 kernel/drivers/xenomai/net/drivers/freescale/fec_ptp.c                                    |    1 
 kernel/xenomai-v3.2.4/include/vxworks/wdLib.h                                             |   50 
 kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/coreheap.c                         |  108 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h                                       |  179 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/protocol.c                            |   88 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall32.h        |   24 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_module.c                             |  411 
 kernel/include/xenomai/rtdm/analogy/driver.h                                              |    1 
 kernel/arch/arm/include/asm/efi.h                                                         |    4 
 kernel/net/core/net-sysfs.c                                                               |   52 
 kernel/xenomai-v3.2.4/lib/copperplate/init.c                                              |  385 
 kernel/kernel/xenomai/sched-quota.c                                                       |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c                                           | 1104 
 kernel/xenomai-v3.2.4/include/mercury/boilerplate/sched.h                                 |   37 
 kernel/xenomai-v3.2.4/testsuite/smokey/arith/Makefile.am                                  |    8 
 kernel/kernel/xenomai/posix/syscall32.h                                                   |    1 
 kernel/kernel/power/Makefile                                                              |    2 
 kernel/include/dovetail/thread_info.h                                                     |   13 
 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/LICENSE                                   |   21 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_ioctl.h            |   35 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_module.c                   |  317 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h       |   30 
 kernel/kernel/xenomai/posix/syscall32.c                                                   |    1 
 kernel/xenomai-v3.2.4/include/boilerplate/lock.h                                          |  224 
 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/README                                    |    1 
 kernel/drivers/xenomai/can/sja1000/rtcan_peak_dng.c                                       |    1 
 kernel/xenomai-v3.2.4/config/version-code                                                 |    1 
 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c                                    |    1 
 kernel/xenomai-v3.2.4/include/copperplate/threadobj.h                                     |  589 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h                                     |   68 
 kernel/drivers/xenomai/analogy/sensoray/Kconfig                                           |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c                                  |  820 
 kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/Makefile.am                                 |   58 
 kernel/arch/arm/include/asm/ptrace.h                                                      |    3 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/eeprom.c                           |  321 
 kernel/drivers/memory/omap-gpmc.c                                                         |    3 
 kernel/kernel/xenomai/posix/io.c                                                          |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/syscall.c                                       |   29 
 kernel/arch/arm64/kernel/entry-common.c                                                   |  152 
 kernel/kernel/xenomai/posix/io.h                                                          |    1 
 kernel/kernel/trace/ftrace.c                                                              |   18 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h                                    |  113 
 kernel/xenomai-v3.2.4/lib/cobalt/trace.c                                                  |   97 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/COPYING                                         |  281 
 kernel/arch/arm/kernel/raw_printk.c                                                       |   30 
 kernel/xenomai-v3.2.4/lib/copperplate/heapobj-tlsf.c                                      |  121 
 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c                                  |    1 
 kernel/drivers/xenomai/net/stack/corectl.c                                                |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h                     |   74 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h             |   25 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/Makefile                           |   12 
 kernel/drivers/xenomai/can/sja1000/rtcan_peak_pci.c                                       |    1 
 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/Makefile.am                              |    8 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c               |  443 
 kernel/xenomai-v3.2.4/lib/psos/internal.h                                                 |   36 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-2.c                                      |  102 
 kernel/include/xenomai/cobalt/uapi/asm-generic/arith.h                                    |    1 
 kernel/drivers/xenomai/net/drivers/loopback.c                                             |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h                                       |  135 
 kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.h                                            |   32 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-2.c                                      |  110 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/Makefile.am                       |    2 
 kernel/arch/arm64/mm/fault.c                                                              |   40 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h                                   |   34 
 kernel/drivers/xenomai/autotune/autotune.c                                                |    1 
 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c                                 |    1 
 kernel/arch/arm/kernel/vdso.c                                                             |   51 
 kernel/arch/arm64/mm/context.c                                                            |   11 
 kernel/xenomai-v3.2.4/lib/psos/README                                                     |   51 
 kernel/arch/arm/kernel/irq.c                                                              |    9 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/thread.c                               |  343 
 kernel/xenomai-v3.2.4/testsuite/smokey/xddp/xddp.c                                        |  264 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h                         |   67 
 kernel/xenomai-v3.2.4/include/copperplate/clockobj.h                                      |  263 
 kernel/xenomai-v3.2.4/include/xenomai/version.h                                           |   34 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/machine.c                              |   63 
 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/sched.h                                  |   23 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/c1e.h                    |   23 
 kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith.c                                      |  124 
 kernel/arch/arm64/include/dovetail/thread_info.h                                          |    1 
 kernel/kernel/trace/trace_stack.c                                                         |    5 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-3.c                                        |  118 
 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h                                    |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/Makefile.am               |    5 
 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.h                              |  173 
 kernel/include/xenomai/rtdm/analogy/command.h                                             |    1 
 kernel/drivers/xenomai/net/drivers/tulip/pnic2.c                                          |    1 
 kernel/drivers/xenomai/gpio/gpio-core.c                                                   |    1 
 kernel/arch/x86/hyperv/hv_init.c                                                          |    3 
 kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c                        |    1 
 kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.c                              |  380 
 kernel/drivers/xenomai/analogy/command.c                                                  |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_dev.h            |   37 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig                              |   13 
 kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile                    |    9 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/icmp.c                                |  510 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h                           |   39 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c                           | 2112 
 kernel/arch/arm64/kernel/syscall.c                                                        |   20 
 kernel/include/linux/wakeup_reason.h                                                      |    2 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/corectl.adoc                                      |  106 
 kernel/drivers/xenomai/net/stack/ipv4/arp.c                                               |    1 
 kernel/drivers/xenomai/analogy/driver_facilities.c                                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile                                         |   10 
 kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.c                                            |  242 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h                          |  114 
 kernel/arch/arm/kernel/asm-offsets.c                                                      |    4 
 kernel/xenomai-v3.2.4/doc/asciidoc/plaintext_postproc.awk                                 |   64 
 kernel/xenomai-v3.2.4/utils/analogy/insn_write.c                                          |  279 
 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c                                       |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h                                       |   33 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Kconfig                         |   21 
 kernel/xenomai-v3.2.4/lib/vxworks/taskLib.c                                               |  908 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig                                  |  100 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/udp.h                         |   33 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/smi.h                    |   32 
 kernel/xenomai-v3.2.4/lib/vxworks/intLib.c                                                |   25 
 kernel/arch/arm/kernel/Makefile                                                           |    5 
 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/wrappers.h                            |    1 
 kernel/xenomai-v3.2.4/lib/vxworks/taskLib.h                                               |  106 
 kernel/xenomai-v3.2.4/lib/analogy/COPYING                                                 |  458 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c                          | 4423 
 kernel/xenomai-v3.2.4/kernel/cobalt/thread.c                                              | 2531 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/calibration.h  |  106 
 kernel/xenomai-v3.2.4/lib/cobalt/sched.c                                                  |  649 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h                                    |  202 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c       | 2207 
 kernel/arch/arm64/xenomai/ipipe/syscall.c                                                 |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile             |    6 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h                                        |  228 
 kernel/arch/x86/include/asm/irq_pipeline.h                                                |  135 
 kernel/xenomai-v3.2.4/lib/vxworks/init.c                                                  |  105 
 kernel/drivers/xenomai/analogy/buffer.c                                                   |    1 
 kernel/drivers/xenomai/analogy/intel/Kconfig                                              |    1 
 kernel/include/linux/regmap.h                                                             |    1 
 kernel/xenomai-v3.2.4/include/cobalt/sched.h                                              |   63 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h                                |    1 
 kernel/include/asm-generic/xenomai/syscall32.h                                            |    1 
 kernel/include/linux/clocksource.h                                                        |   52 
 kernel/drivers/xenomai/net/stack/socket.c                                                 |    1 
 kernel/drivers/clocksource/dw_apb_timer.c                                                 |   40 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_conn_event.c                   |  364 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile              |   19 
 kernel/drivers/xenomai/net/drivers/experimental/rt2500/Makefile                           |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c                                        |  954 
 kernel/xenomai-v3.2.4/lib/boilerplate/version.c                                           |   55 
 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c                                      |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h                                         |   83 
 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c                                         |  651 
 kernel/arch/x86/include/asm/dovetail.h                                                    |   45 
 kernel/arch/x86/include/asm/idtentry.h                                                    |   93 
 kernel/kernel/xenomai/posix/mqueue.c                                                      |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A.c                                      | 1188 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c                                         |  415 
 kernel/kernel/irq/irqdesc.c                                                               |    9 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c                               |  285 
 kernel/xenomai-v3.2.4/testsuite/smokey/y2038/Makefile.am                                  |   10 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h         |   24 
 kernel/include/asm-generic/atomic.h                                                       |   12 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c                                | 1733 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h        |   77 
 kernel/drivers/xenomai/net/drivers/via-rhine.c                                            |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.ipfragmentation                       |   49 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Makefile                       |    9 
 kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-sched.h                                    |   61 
 kernel/drivers/xenomai/can/mscan/rtcan_mscan_proc.c                                       |    1 
 kernel/kernel/xenomai/posix/mqueue.h                                                      |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c                                     |  458 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev_mgr.h                        |   39 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h                  |  272 
 kernel/include/linux/fcntl.h                                                              |    2 
 kernel/arch/x86/kernel/i8259.c                                                            |    3 
 kernel/xenomai-v3.2.4/include/boilerplate/compiler.h                                      |   91 
 kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h                                   |  554 
 kernel/include/xenomai/cobalt/kernel/sched-sporadic.h                                     |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h                         |  461 
 kernel/drivers/xenomai/ipc/Kconfig                                                        |    1 
 kernel/include/xenomai/pipeline/machine.h                                                 |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTnet.oxy                                    | 1150 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h                               |   24 
 kernel/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c                              |    1 
 kernel/drivers/xenomai/ipc/internal.h                                                     |    1 
 kernel/xenomai-v3.2.4/scripts/dynlist.ld                                                  |    3 
 kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c                                          |    3 
 kernel/drivers/xenomai/net/stack/include/ipv4/udp.h                                       |    1 
 kernel/drivers/xenomai/net/stack/rtmac/rtmac_proto.c                                      |    1 
 kernel/include/xenomai/cobalt/kernel/time.h                                               |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/umm.h                                                    |   31 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_timer.h                |   34 
 kernel/xenomai-v3.2.4/lib/cobalt/umm.c                                                    |  136 
 kernel/xenomai-v3.2.4/include/cobalt/sys/mman.h                                           |   43 
 kernel/include/xenomai/rtdm/uapi/spi.h                                                    |    1 
 kernel/arch/arm64/boot/dts/rockchip/rk3588s.dtsi                                          |    0 
 kernel/kernel/irq/pipeline.c                                                              | 1764 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_chrdev.c                             |  241 
 kernel/arch/arm64/include/asm/fpsimd.h                                                    |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/features.c                                      |   70 
 kernel/xenomai-v3.2.4/lib/psos/sem.c                                                      |  231 
 kernel/drivers/gpio/gpio-mxc.c                                                            |    3 
 kernel/xenomai-v3.2.4/kernel/cobalt/map.c                                                 |  265 
 kernel/xenomai-v3.2.4/lib/psos/sem.h                                                      |   35 
 kernel/xenomai-v3.2.4/include/alchemy/task.h                                              |  213 
 kernel/arch/x86/include/asm/i8259.h                                                       |    2 
 kernel/Documentation/dovetail.rst                                                         |   30 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h                     |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_timer.c                        |  110 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h                                         |   83 
 kernel/arch/arm/mm/fault.c                                                                |  109 
 kernel/include/linux/xenomai/wrappers.h                                                   |    1 
 kernel/drivers/xenomai/net/drivers/igb/e1000_82575.c                                      |    1 
 kernel/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c                                       |    1 
 kernel/drivers/dma/bcm2835-dma.c                                                          |  148 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c                     |    1 
 kernel/drivers/xenomai/net/drivers/igb/e1000_mac.h                                        |    1 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-readwrite.c                                  |  186 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/fptest.h            |  132 
 kernel/drivers/spi/spi.c                                                                  |  295 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-6.c                                      |   77 
 kernel/drivers/xenomai/net/drivers/igb/e1000_mac.c                                        |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c                                         |  446 
 kernel/xenomai-v3.2.4/lib/boilerplate/hash.c                                              |  507 
 kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.COBALT.adoc                            |  573 
 kernel/xenomai-v3.2.4/utils/slackspot/slackspot.c                                         |  684 
 kernel/xenomai-v3.2.4/include/alchemy/queue.h                                             |  172 
 kernel/arch/arm/kernel/patch.c                                                            |    2 
 kernel/xenomai-v3.2.4/testsuite/clocktest/Makefile.am                                     |   18 
 kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run-wrapper                           |   22 
 kernel/drivers/xenomai/net/drivers/igb/e1000_82575.h                                      |    1 
 kernel/drivers/xenomai/net/stack/include/ipv4/ip_input.h                                  |    1 
 kernel/kernel/locking/lockdep_internals.h                                                 |    4 
 kernel/drivers/pci/controller/dwc/pcie-designware-host.c                                  |    1 
 kernel/include/xenomai/rtdm/fd.h                                                          |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/printf.c                                                 |  919 
 kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c                               |    1 
 kernel/drivers/xenomai/can/sja1000/rtcan_isa.c                                            |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c                                  |  449 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h                         |   74 
 kernel/xenomai-v3.2.4/utils/can/rtcanconfig.c                                             |  258 
 kernel/drivers/xenomai/net/drivers/natsemi.c                                              |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/tsc/tsc.c                                          |  186 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h               |   28 
 kernel/xenomai-v3.2.4/scripts/wrap-link.sh                                                |  213 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h                                        |  157 
 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/features.h                            |    1 
 kernel/xenomai-v3.2.4/include/vxworks/lstLib.h                                            |  148 
 kernel/arch/x86/kernel/traps.c                                                            |  159 
 kernel/kernel/trace/trace_functions.c                                                     |    4 
 kernel/drivers/mfd/tps65217.c                                                             |    1 
 kernel/drivers/xenomai/analogy/Makefile                                                   |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c                       |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/features.h                  |   63 
 kernel/include/linux/context_tracking_state.h                                             |    2 
 kernel/include/xenomai/rtdm/analogy/device.h                                              |    1 
 kernel/drivers/xenomai/can/rtcan_raw.h                                                    |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_port.h                       |  113 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h                       |    1 
 kernel/drivers/xenomai/can/rtcan_raw.c                                                    |    1 
 kernel/include/xenomai/rtdm/udd.h                                                         |    1 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-2.c                                      |  119 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h                                       |   80 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c                                       |  466 
 kernel/arch/x86/kernel/hpet.c                                                             |   10 
 kernel/arch/arm64/xenomai/ipipe/Makefile                                                  |    1 
 kernel/fs/udf/inode.c                                                                     |   76 
 kernel/kernel/xenomai/sched-sporadic.c                                                    |    1 
 kernel/arch/x86/include/asm/tlbflush.h                                                    |    8 
 kernel/xenomai-v3.2.4/testsuite/smokey/iddp/Makefile.am                                   |   10 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h                              |   75 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/fptest.h       |   62 
 kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/Makefile.am                                |   10 
 kernel/xenomai-v3.2.4/include/trank/rtdk.h                                                |   38 
 kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c                                   |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/iovec.c                                    |  103 
 kernel/drivers/xenomai/net/drivers/e1000/e1000.h                                          |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/syscall.h           |   30 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_conn_event.h           |   69 
 kernel/kernel/time/tick-common.c                                                          |   19 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c                     | 2752 
 kernel/drivers/xenomai/can/rtcan_raw_dev.c                                                |    1 
 kernel/drivers/xenomai/analogy/rtdm_helpers.c                                             |    1 
 kernel/kernel/xenomai/timer.c                                                             |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/vdso_fallback.h                |   16 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/socket.c                                   |  395 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h                                       |   10 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c                          | 2513 
 kernel/drivers/xenomai/analogy/national_instruments/mite.h                                |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/cond.h                                                  |   40 
 kernel/kernel/xenomai/posix/memory.h                                                      |    1 
 kernel/fs/exec.c                                                                          |   14 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h                                |   24 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c                                     |  104 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/Makefile.am                                   |   13 
 kernel/kernel/xenomai/posix/memory.c                                                      |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h                          |  175 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb_fifo.h                       |  144 
 kernel/drivers/xenomai/net/stack/rtmac/rtmac_proc.c                                       |    1 
 kernel/drivers/xenomai/analogy/national_instruments/mite.c                                |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c                        |  299 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32-table.h        |   70 
 kernel/include/xenomai/cobalt/uapi/kernel/heap.h                                          |    1 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcansend.adoc                                    |   86 
 kernel/kernel/xenomai/synch.c                                                             |    1 
 kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-cobalt.c                                    |  153 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h                      |   16 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h                                        |  172 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h       |  110 
 kernel/xenomai-v3.2.4/lib/cobalt/attr.c                                                   |  148 
 kernel/kernel/xenomai/rtdm/core.c                                                         |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mutex-1.c                                     |  148 
 kernel/kernel/xenomai/posix/compat.c                                                      |    1 
 kernel/drivers/xenomai/net/stack/ipv4/udp/Makefile                                        |    1 
 kernel/kernel/xenomai/debug.h                                                             |    1 
 kernel/drivers/xenomai/net/stack/include/rtdev.h                                          |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c       | 2582 
 kernel/kernel/time/Makefile                                                               |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-omap2-mcspi-rt.c                             | 1025 
 kernel/kernel/xenomai/debug.c                                                             |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig                                          |   25 
 kernel/xenomai-v3.2.4/demo/alchemy/cobalt/cross-link.c                                    |  328 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c                       |    1 
 kernel/include/linux/preempt.h                                                            |   57 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/features.h          |   43 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h                                    |  340 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall.h      |   83 
 kernel/arch/x86/kernel/fpu/signal.c                                                       |   29 
 kernel/include/trace/events/irq.h                                                         |   42 
 kernel/drivers/xenomai/net/stack/include/rtwlan_io.h                                      |    1 
 kernel/xenomai-v3.2.4/include/trank/native/timer.h                                        |   47 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h                       |    1 
 kernel/drivers/xenomai/testing/switchtest.c                                               |    1 
 kernel/drivers/spi/Kconfig                                                                |   11 
 kernel/include/linux/netdevice.h                                                          |   94 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/INCLUDE.policy                     |   15 
 kernel/drivers/xenomai/net/drivers/freescale/Makefile                                     |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-9.c                                      |   70 
 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig                               |    6 
 kernel/kernel/ptrace.c                                                                    |    2 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg.h                      |   47 
 kernel/arch/arm/vfp/vfphw.S                                                               |    2 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h                                   |  218 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h                                             |  881 
 kernel/xenomai-v3.2.4/config/ac_prog_cc_for_build.m4                                      |  108 
 kernel/xenomai-v3.2.4/include/Makefile.am                                                 |   31 
 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/machine.h                             |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h             |  649 
 kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-html.conf.in                                   |  196 
 kernel/xenomai-v3.2.4/kernel/drivers/testing/rtdmtest.c                                   |  293 
 kernel/kernel/entry/common.c                                                              |  196 
 kernel/xenomai-v3.2.4/kernel/drivers/Makefile                                             |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sirq.h                         |   59 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h              |  102 
 kernel/xenomai-v3.2.4/lib/vxworks/tickLib.h                                               |   26 
 kernel/arch/x86/include/asm/irq_vectors.h                                                 |   11 
 kernel/xenomai-v3.2.4/lib/alchemy/cond.c                                                  |  497 
 kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h                              |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/udd/Kconfig                                          |   10 
 kernel/xenomai-v3.2.4/lib/cobalt/parse_vdso.c                                             |  281 
 kernel/xenomai-v3.2.4/include/copperplate/timerobj.h                                      |   71 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/machine.c                          |   67 
 kernel/xenomai-v3.2.4/lib/vxworks/tickLib.c                                               |   45 
 kernel/include/xenomai/cobalt/kernel/compat.h                                             |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/testing/switchtest.c                                 |  764 
 kernel/drivers/xenomai/net/stack/include/ipv4/ip_output.h                                 |    1 
 kernel/drivers/xenomai/net/stack/rtnet_chrdev.c                                           |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile                                   |    6 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/README               |   58 
 kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c                              |  215 
 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/thread.h                           |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/tlsf.c                                 |  123 
 kernel/drivers/misc/atemsys-main/atemsys.h                                                |  428 
 kernel/xenomai-v3.2.4/include/COPYING                                                     |  305 
 kernel/drivers/irqchip/irq-imx-irqsteer.c                                                 |    3 
 kernel/drivers/misc/atemsys-main/atemsys.c                                                | 4885 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-label.c                                      |  221 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h                                       |   46 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/Makefile.am                                   |   37 
 kernel/drivers/xenomai/gpio/Kconfig                                                       |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c                             | 3385 
 kernel/kernel/signal.c                                                                    |   12 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c                                       |  798 
 kernel/xenomai-v3.2.4/lib/vxworks/kernLib.c                                               |   61 
 kernel/include/xenomai/cobalt/uapi/asm-generic/features.h                                 |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile                          |   12 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h                                       |   40 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h                                       |   35 
 kernel/arch/arm/include/asm/trace/exceptions.h                                            |   62 
 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile                                     |    5 
 kernel/fs/ioctl.c                                                                         |   16 
 kernel/drivers/irqchip/irq-gic.c                                                          |    5 
 kernel/drivers/xenomai/net/drivers/e1000e/ich8lan.c                                       |    1 
 kernel/arch/x86/kernel/apic/apic_flat_64.c                                                |    4 
 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c                                 |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c                         |  151 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-8.c                                         |  108 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-2.c                                       |  144 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.tcp                                   |   52 
 kernel/xenomai-v3.2.4/lib/boilerplate/heapmem.c                                           |  728 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/pipe-1.c                                      |  139 
 kernel/xenomai-v3.2.4/utils/net/rtiwconfig.c                                              |  236 
 kernel/drivers/clocksource/timer-ti-dm-systimer.c                                         |   29 
 kernel/kernel/xenomai/rtdm/Makefile                                                       |    1 
 kernel/arch/arm64/xenomai/ipipe/machine.c                                                 |    1 
 kernel/kernel/xenomai/rtdm/internal.h                                                     |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/can/README                                           |  143 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_input.c                            |  159 
 kernel/arch/arm/include/asm/vdso/gettimeofday.h                                           |   60 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h                      |   44 
 kernel/xenomai-v3.2.4/lib/analogy/sys.c                                                   |  213 
 kernel/include/xenomai/version.h                                                          |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c                     | 3708 
 kernel/xenomai-v3.2.4/lib/trank/init.c                                                    |   37 
 kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c                                          |  524 
 kernel/drivers/xenomai/net/stack/include/ipv4/icmp.h                                      |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/thread.h        |   32 
 kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c                                      |    1 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-1.c                                           |   61 
 kernel/xenomai-v3.2.4/doc/asciidoc/MIGRATION.adoc                                         | 1935 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Kconfig                               |   75 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/Makefile.am                                       |   18 
 kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/Makefile.am                            |   10 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcfg                                 |  135 
 kernel/drivers/pinctrl/samsung/pinctrl-samsung.c                                          |   22 
 kernel/include/xenomai/pipeline/lock.h                                                    |    1 
 kernel/kernel/xenomai/rtdm/wrappers.c                                                     |    1 
 kernel/drivers/xenomai/net/stack/include/rtnet_internal.h                                 |    1 
 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/wrappers.h                         |    1 
 kernel/drivers/pinctrl/samsung/pinctrl-samsung.h                                          |    2 
 kernel/xenomai-v3.2.4/include/alchemy/sem.h                                               |  108 
 kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall32.h                           |    1 
 kernel/xenomai-v3.2.4/lib/copperplate/traceobj.c                                          |  330 
 kernel/arch/x86/include/asm/syscall.h                                                     |    5 
 kernel/kernel/trace/trace_sched_wakeup.c                                                  |    2 
 kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c                                 |    1 
 kernel/xenomai-v3.2.4/utils/can/rtcanrecv.c                                               |  324 
 kernel/xenomai-v3.2.4/include/alchemy/pipe.h                                              |   87 
 kernel/include/xenomai/rtdm/analogy/transfer.h                                            |    1 
 kernel/xenomai-v3.2.4/include/vxworks/tickLib.h                                           |   41 
 kernel/xenomai-v3.2.4/lib/boilerplate/obstack.c                                           |  356 
 kernel/arch/arm/kernel/smp_twd.c                                                          |    7 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/Makefile                               |    5 
 kernel/include/linux/lockdep.h                                                            |  111 
 kernel/xenomai-v3.2.4/lib/smokey/init.c                                                   |  577 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig                             |    5 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h                         |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c                         |    1 
 kernel/include/xenomai/rtdm/analogy/channel_range.h                                       |    1 
 kernel/drivers/xenomai/net/stack/include/rtskb.h                                          |    1 
 kernel/include/linux/sched/coredump.h                                                     |    1 
 kernel/xenomai-v3.2.4/config/acinclude.m4                                                 |  579 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h               |  425 
 kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/Makefile.am                           |   10 
 kernel/drivers/xenomai/can/peak_canfd/Makefile                                            |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/fpu-stress.c                            |   95 
 kernel/xenomai-v3.2.4/utils/ps/rtps.c                                                     |   91 
 kernel/xenomai-v3.2.4/lib/trank/COPYING                                                   |  458 
 kernel/include/xenomai/rtdm/uapi/analogy.h                                                |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/malloc-nowrap.c                                          |   30 
 kernel/arch/arm64/kernel/Makefile                                                         |    1 
 kernel/include/xenomai/rtdm/serial.h                                                      |    1 
 kernel/security/selinux/hooks.c                                                           |    4 
 kernel/drivers/xenomai/serial/mpc52xx_uart.c                                              |    1 
 kernel/drivers/xenomai/net/drivers/8139too.c                                              |    1 
 kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt_numa.h                                     |  277 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/syscall.c                              |   53 
 kernel/xenomai-v3.2.4/kernel/cobalt/sched.c                                               | 1493 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c                             |  156 
 kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig                                         |   72 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c          | 5987 
 kernel/drivers/gpu/ipu-v3/ipu-common.c                                                    |    1 
 kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c                                       |   37 
 kernel/xenomai-v3.2.4/lib/copperplate/regd/sysregfs.h                                     |   60 
 kernel/include/xenomai/cobalt/kernel/assert.h                                             |    1 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/Makefile.am                         |    5 
 kernel/xenomai-v3.2.4/lib/vxworks/README                                                  |   66 
 kernel/drivers/xenomai/net/stack/rtmac/nomac/Kconfig                                      |    1 
 kernel/include/xenomai/cobalt/uapi/mutex.h                                                |    1 
 kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.h                                        |    1 
 kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.c                                        |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h        |   44 
 kernel/arch/x86/kernel/apic/vector.c                                                      |   72 
 kernel/xenomai-v3.2.4/kernel/cobalt/heap.c                                                |  863 
 kernel/xenomai-v3.2.4/kernel/cobalt/udev/rtdm.rules                                       |    2 
 kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h                               |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c         |  894 
 kernel/arch/x86/kernel/process.c                                                          |   16 
 kernel/xenomai-v3.2.4/Makefile.am                                                         |   78 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/Makefile.am                           |    2 
 kernel/drivers/xenomai/net/drivers/e1000e/phy.c                                           |    1 
 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h                               |    1 
 kernel/xenomai-v3.2.4/include/rtdm/rtdm.h                                                 |   59 
 kernel/xenomai-v3.2.4/kernel/cobalt/clock.c                                               |  830 
 kernel/arch/arm64/Makefile                                                                |    3 
 kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-common.conf.in                                 |  879 
 kernel/xenomai-v3.2.4/include/xenomai/tunables.h                                          |   24 
 kernel/kernel/sched/wait.c                                                                |    2 
 kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c                                           |  990 
 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/trace.h                                  |   23 
 kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-mercury.c                                   |  127 
 kernel/xenomai-v3.2.4/testsuite/switchtest/switchtest.c                                   | 1572 
 kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-1.c                                      |   97 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_syms.c                         |   36 
 kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c                                  |    1 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-7.c                                           |   42 
 kernel/drivers/tty/serial/amba-pl011.c                                                    |   39 
 kernel/arch/arm64/include/asm/mmu_context.h                                               |   34 
 kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.c                                    |  319 
 kernel/include/linux/sched.h                                                              |   10 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c                             | 2693 
 kernel/xenomai-v3.2.4/lib/mercury/Makefile.am                                             |    9 
 kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.h                                    |   32 
 kernel/drivers/xenomai/net/stack/ipv4/tcp/Kconfig                                         |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/calibration.h      |   63 
 kernel/drivers/xenomai/net/drivers/igb/e1000_regs.h                                       |    1 
 kernel/xenomai-v3.2.4/lib/copperplate/semobj.c                                            |  354 
 kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c                       |    1 
 kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c                                     |    8 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c                                    |  392 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c                          |  803 
 kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c                                                |  653 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig                 |   42 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c           |  893 
 kernel/drivers/xenomai/can/sja1000/Kconfig                                                |    1 
 kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h                               |    1 
 kernel/include/linux/clockchips.h                                                         |   71 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h           |   61 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h                          |   57 
 kernel/drivers/xenomai/ipc/iddp.c                                                         |    1 
 kernel/arch/arm/include/asm/dovetail.h                                                    |   61 
 kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c                                           | 1132 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/eth.c                                      |  131 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/machine.c                           |   44 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcap                                 |   39 
 kernel/drivers/base/regmap/regmap-irq.c                                                   |    1 
 kernel/drivers/xenomai/net/stack/include/rtwlan.h                                         |    1 
 kernel/xenomai-v3.2.4/scripts/bootstrap                                                   |    3 
 kernel/xenomai-v3.2.4/utils/chkkconf/checkconfig.c                                        |  331 
 kernel/include/linux/skbuff.h                                                             |   69 
 kernel/drivers/xenomai/gpiopwm/gpiopwm.c                                                  |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c                                  |  455 
 kernel/arch/arm64/include/asm/dovetail.h                                                  |   69 
 kernel/xenomai-v3.2.4/include/mercury/boilerplate/wrappers.h                              |   24 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_module.c                       |   80 
 kernel/kernel/irq/manage.c                                                                |   89 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h                      |  118 
 kernel/drivers/xenomai/net/stack/include/rtdev_mgr.h                                      |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/serial/Kconfig                                       |   79 
 kernel/include/linux/irq_pipeline.h                                                       |  145 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-1.c                                       |  151 
 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h                       |    1 
 kernel/xenomai-v3.2.4/include/cobalt/mqueue.h                                             |   74 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h      |   22 
 kernel/xenomai-v3.2.4/include/cobalt/boilerplate/wrappers.h                               |   23 
 kernel/include/xenomai/cobalt/kernel/heap.h                                               |    1 
 kernel/kernel/xenomai/lock.c                                                              |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/machine.h       |   72 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/task-4.c                                         |   69 
 kernel/drivers/xenomai/analogy/national_instruments/ni_670x.c                             |    1 
 kernel/kernel/xenomai/pipeline/syscall.c                                                  |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_worker.c                   |  231 
 kernel/kernel/power/hibernate.c                                                           |    3 
 kernel/xenomai-v3.2.4/scripts/xeno-config-mercury.in                                      |  221 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile                          |    5 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h                                       |  360 
 kernel/drivers/xenomai/analogy/device.c                                                   |    1 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-echo.c                                       |  248 
 kernel/arch/arm/common/mcpm_entry.c                                                       |   12 
 kernel/drivers/xenomai/net/addons/proxy.c                                                 |    1 
 kernel/include/xenomai/pipeline/irq.h                                                     |    1 
 kernel/xenomai-v3.2.4/include/vxworks/kernLib.h                                           |   39 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/Makefile.am                             |   28 
 kernel/xenomai-v3.2.4/utils/analogy/insn_bits.c                                           |  227 
 kernel/drivers/xenomai/net/drivers/freescale/fec_main.c                                   |    1 
 kernel/include/linux/fs.h                                                                 |   10 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h                                 | 1361 
 kernel/xenomai-v3.2.4/include/trank/native/Makefile.am                                    |   16 
 kernel/xenomai-v3.2.4/include/psos/tunables.h                                             |   49 
 kernel/xenomai-v3.2.4/lib/alchemy/pipe.c                                                  |  675 
 kernel/include/linux/kernel.h                                                             |    8 
 kernel/lib/smp_processor_id.c                                                             |    4 
 kernel/arch/arm/include/asm/cmpxchg.h                                                     |    8 
 kernel/xenomai-v3.2.4/scripts/xeno-config-cobalt.in                                       |  292 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c                                       |  334 
 kernel/drivers/xenomai/serial/16550A.c                                                    |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h                                       |   42 
 kernel/include/xenomai/cobalt/kernel/ppd.h                                                |    1 
 kernel/xenomai-v3.2.4/include/trank/native/heap.h                                         |   28 
 kernel/xenomai-v3.2.4/include/rtdm/Makefile.am                                            |   20 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac.h                            |   92 
 kernel/include/xenomai/cobalt/kernel/sched-quota.h                                        |    1 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-label.c                                      |  329 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTmac.spec                                   |   44 
 kernel/drivers/xenomai/net/drivers/freescale/fec.h                                        |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/pipe.h                                                  |   40 
 kernel/xenomai-v3.2.4/kernel/cobalt/arith.c                                               |   65 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h                          |  446 
 kernel/drivers/xenomai/can/sja1000/rtcan_adv_pci.c                                        |    1 
 kernel/include/xenomai/cobalt/uapi/kernel/urw.h                                           |    1 
 kernel/drivers/xenomai/net/stack/include/ipv4/ip_sock.h                                   |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c                | 1985 
 kernel/drivers/xenomai/net/stack/include/ipv4/route.h                                     |    1 
 kernel/xenomai-v3.2.4/testsuite/gpiobench/gpiobench.c                                     |  680 
 kernel/drivers/xenomai/testing/Makefile                                                   |    1 
 kernel/net/core/skbuff.c                                                                  |  115 
 kernel/xenomai-v3.2.4/include/vxworks/Makefile.am                                         |   18 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/syscall.h                   |  133 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/arp.h                         |   51 
 kernel/arch/arm/mm/cache-l2x0.c                                                           |   47 
 kernel/arch/x86/mm/tlb.c                                                                  |   50 
 kernel/kernel/xenomai/sched-weak.c                                                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c                     | 1515 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/switch.S                               |  219 
 kernel/xenomai-v3.2.4/lib/vxworks/sysLib.c                                                |   53 
 kernel/arch/arm/kernel/traps.c                                                            |    2 
 kernel/xenomai-v3.2.4/kernel/cobalt/select.c                                              |  461 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-test.adoc                                    |   47 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/tcp.h                         |   50 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h            |  711 
 kernel/arch/arm64/xenomai/ipipe/thread.c                                                  |    1 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/clocktest.adoc                                    |   53 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c                         |   41 
 kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c                    |  628 
 kernel/xenomai-v3.2.4/lib/cobalt/cond.c                                                   |  689 
 kernel/kernel/xenomai/posix/timer.c                                                       |    1 
 kernel/lib/vdso/Kconfig                                                                   |    8 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanconfig.adoc                                  |   63 
 kernel/kernel/xenomai/posix/timer.h                                                       |    1 
 kernel/include/asm-generic/xenomai/machine.h                                              |    1 
 kernel/include/linux/kvm_host.h                                                           |   55 
 kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-stream.c                                     |  254 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/Makefile.am                             |    2 
 kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/heapmem.c                           |   51 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/arith.h             |  243 
 kernel/xenomai-v3.2.4/include/boilerplate/shared-list.h                                   |  346 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_output.c                           |  267 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h     |   33 
 kernel/kernel/locking/lockdep.c                                                           |  213 
 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h                         |  226 
 kernel/modules.builtin.modinfo                                                            |    0 
 kernel/xenomai-v3.2.4/include/copperplate/cluster.h                                       |  308 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/arith.h         |   34 
 kernel/xenomai-v3.2.4/lib/copperplate/threadobj.c                                         | 1835 
 kernel/drivers/xenomai/net/drivers/rt_at91_ether.h                                        |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/tsc.h                       |   38 
 kernel/drivers/xenomai/net/Kconfig                                                        |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h                         |    1 
 kernel/drivers/gpio/gpio-omap.c                                                           |    6 
 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c                              |  798 
 kernel/arch/x86/include/asm/irq_stack.h                                                   |    7 
 kernel/drivers/xenomai/net/stack/ipv4/Kconfig                                             |    1 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-1.c                                      |   32 
 kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h                              |   38 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/time.h                      |   16 
 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h                            |    1 
 kernel/net/sched/Kconfig                                                                  |   23 
 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c                                       |   52 
 kernel/kernel/dovetail.c                                                                  |  450 
 kernel/xenomai-v3.2.4/lib/boilerplate/init/bootstrap.c                                    |  172 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-3.c                                           |   75 
 kernel/kernel/xenomai/rtdm/fd.c                                                           |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000.h                             |    1 
 kernel/drivers/xenomai/can/Kconfig                                                        |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h           |  168 
 kernel/drivers/xenomai/Makefile                                                           |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/smi.c                               |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c                             |  308 
 kernel/arch/x86/kvm/vmx/vmx.c                                                             |   37 
 kernel/kernel/xenomai/pipeline/intr.c                                                     |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c           | 2106 
 kernel/xenomai-v3.2.4/include/rtdm/gpiopwm.h                                              |   24 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/thread.h           |  101 
 kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h                              |    1 
 kernel/include/asm-generic/irq_pipeline.h                                                 |  109 
 kernel/drivers/xenomai/net/drivers/experimental/Kconfig                                   |    1 
 kernel/arch/arm/include/asm/assembler.h                                                   |   16 
 kernel/drivers/xenomai/net/drivers/e1000e/lib.c                                           |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h                | 1192 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_input.h                    |   45 
 kernel/drivers/clocksource/arm_global_timer.c                                             |   41 
 kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi                                           |    2 
 kernel/xenomai-v3.2.4/include/cobalt/stdlib.h                                             |   40 
 kernel/include/xenomai/cobalt/uapi/asm-generic/syscall.h                                  |    1 
 kernel/xenomai-v3.2.4/lib/vxworks/semLib.h                                                |   57 
 kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c                                     |  502 
 kernel/xenomai-v3.2.4/lib/vxworks/semLib.c                                                |  480 
 kernel/xenomai-v3.2.4/include/rtdm/serial.h                                               |   79 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile                             |   13 
 kernel/include/linux/spinlock_pipeline.h                                                  |  387 
 kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile                |   16 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c                            |   66 
 kernel/kernel/xenomai/posix/Makefile                                                      |    1 
 kernel/xenomai-v3.2.4/config/apirev                                                       |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/stack_mgr.h                        |   95 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h                                 |  115 
 kernel/xenomai-v3.2.4/utils/autotune/autotune.c                                           |  338 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c                                | 1536 
 kernel/include/xenomai/rtdm/analogy/subdevice.h                                           |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c                       | 3184 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Kconfig                              |   23 
 kernel/drivers/xenomai/analogy/transfer.c                                                 |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h            |   39 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/Makefile.am                              |   17 
 kernel/drivers/xenomai/can/rtcan_module.c                                                 |    1 
 kernel/arch/arm/include/asm/mmu_context.h                                                 |   40 
 kernel/xenomai-v3.2.4/.gitignore                                                          |   17 
 kernel/arch/arm/kernel/entry-common.S                                                     |   79 
 kernel/drivers/soc/ti/ti_sci_inta_msi.c                                                   |    1 
 kernel/kernel/xenomai/rtdm/drvlib.c                                                       |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip.h                            |  490 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_ioctl.h                |   33 
 kernel/drivers/irqchip/irq-gic-v2m.c                                                      |    3 
 kernel/arch/arm/Kconfig                                                                   |    9 
 kernel/include/xenomai/cobalt/kernel/sched-rt.h                                           |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/tsc/Makefile.am                                    |    8 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h             |  603 
 kernel/xenomai-v3.2.4/include/xenomai/Makefile.am                                         |    6 
 kernel/drivers/pci/controller/pcie-brcmstb.c                                              |    2 
 kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run.c                                 |  678 
 kernel/arch/x86/kernel/dumpstack.c                                                        |   12 
 kernel/drivers/xenomai/net/drivers/eth1394.c                                              |    1 
 kernel/xenomai-v3.2.4/lib/vxworks/errnoLib.c                                              |  144 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h                                   |   66 
 kernel/include/dovetail/poll.h                                                            |   12 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/wrappers.h                     |   25 
 kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/machine.h                          |    1 
 kernel/include/xenomai/linux/stdarg.h                                                     |    1 
 kernel/arch/x86/include/asm/irqflags.h                                                    |   71 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/Makefile.am                                     |   18 
 kernel/include/xenomai/cobalt/kernel/clock.h                                              |    1 
 kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-config.adoc                                  |  217 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Makefile                          |    7 
 kernel/include/xenomai/cobalt/kernel/schedqueue.h                                         |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/arp.c                                 |  212 
 kernel/xenomai-v3.2.4/doc/gitdoc/Makefile.am                                              |   43 
 kernel/kernel/trace/trace_branch.c                                                        |    4 
 kernel/xenomai-v3.2.4/kernel/drivers/udd/Makefile                                         |    5 
 kernel/arch/x86/kernel/cpu/mshyperv.c                                                     |    6 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h                  |    9 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4_chrdev.h                      |   94 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/syscall.h          |   81 
 kernel/drivers/clocksource/exynos_mct.c                                                   |   50 
 kernel/kernel/trace/trace_clock.c                                                         |    4 
 kernel/xenomai-v3.2.4/lib/alchemy/reference.h                                             |   22 
 kernel/xenomai-v3.2.4/lib/alchemy/timer.c                                                 |  113 
 kernel/xenomai-v3.2.4/include/alchemy/timer.h                                             |  125 
 kernel/xenomai-v3.2.4/lib/alchemy/buffer.h                                                |   49 
 kernel/xenomai-v3.2.4/lib/alchemy/timer.h                                                 |   25 
 kernel/drivers/xenomai/net/drivers/experimental/Makefile                                  |    1 
 kernel/xenomai-v3.2.4/utils/net/rtcfg.c                                                   |  562 
 kernel/xenomai-v3.2.4/.clang-format                                                       |  493 
 kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/rtdm.c                                        |  202 
 kernel/kernel/xenomai/Kconfig                                                             |    1 
 kernel/drivers/xenomai/net/drivers/Makefile                                               |    1 
 kernel/drivers/xenomai/net/stack/include/ipv4_chrdev.h                                    |    1 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c                      |    1 
 kernel/include/xenomai/cobalt/kernel/arith.h                                              |    1 
 kernel/xenomai-v3.2.4/demo/alchemy/Makefile.am                                            |   32 
 kernel/include/asm-generic/xenomai/dovetail/thread.h                                      |    1 
 kernel/arch/x86/kernel/idt.c                                                              |    4 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h                      |    1 
 kernel/kernel/irq/cpuhotplug.c                                                            |    5 
 kernel/drivers/xenomai/net/drivers/r8169.c                                                |    1 
 kernel/drivers/xenomai/net/drivers/Kconfig                                                |    1 
 kernel/drivers/xenomai/gpio/gpio-zynq7000.c                                               |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c                                       |  190 
 kernel/drivers/xenomai/analogy/Kconfig                                                    |    1 
 kernel/drivers/xenomai/net/stack/ipv4/ip_fragment.c                                       |    1 
 kernel/drivers/xenomai/spi/spi-omap2-mcspi-rt.c                                           |    1 
 kernel/xenomai-v3.2.4/config/docbook.m4                                                   |  170 
 kernel/arch/x86/include/asm/fpu/internal.h                                                |   31 
 kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h                                  |   27 
 kernel/xenomai-v3.2.4/testsuite/smokey/main.c                                             |   53 
 kernel/drivers/spmi/spmi-pmic-arb.c                                                       |    4 
 kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h                                             |   42 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_proto.c                    |  407 
 kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-1.c                                          |  149 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/features.h                |   30 
 kernel/kernel/time/vsyscall.c                                                             |   31 
 kernel/xenomai-v3.2.4/include/trank/native/types.h                                        |   25 
 kernel/xenomai-v3.2.4/lib/alchemy/buffer.c                                                |  953 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c                                   | 1828 
 kernel/drivers/misc/atemsys-main/COPYING                                                  |  339 
 kernel/include/linux/irq.h                                                                |   50 
 kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h                                     |  135 
 kernel/xenomai-v3.2.4/include/cobalt/signal.h                                             |   61 
 kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/API.CHANGES                                      |   53 
 kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h                 |  154 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_vnic.h                 |   59 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/features.h      |   30 
 kernel/xenomai-v3.2.4/utils/corectl/corectl.c                                             |  153 
 kernel/drivers/irqchip/irq-sunxi-nmi.c                                                    |    4 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/arith.h             |  142 
 kernel/kernel/time/clocksource.c                                                          |   30 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h      |   24 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/tick.h                         |   18 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h                                  |  106 
 kernel/drivers/xenomai/testing/timerbench.c                                               |    1 
 kernel/include/asm-generic/xenomai/pci_ids.h                                              |    1 
 kernel/xenomai-v3.2.4/lib/copperplate/registry.c                                          | 1022 
 kernel/include/xenomai/rtdm/uapi/gpiopwm.h                                                |    1 
 kernel/xenomai-v3.2.4/include/mercury/pthread.h                                           |   21 
 kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-5.c                                      |  107 
 kernel/xenomai-v3.2.4/lib/cobalt/signal.c                                                 |  128 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/Makefile.am                                |   10 
 kernel/kernel/xenomai/registry.c                                                          |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/calibration.h   |   38 
 kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig                                          |   81 
 kernel/drivers/tty/serial/samsung_tty.c                                                   |    4 
 kernel/drivers/xenomai/analogy/national_instruments/Kconfig                               |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/Makefile.am                                     |   13 
 kernel/arch/arm/include/asm/thread_info.h                                                 |   20 
 kernel/xenomai-v3.2.4/lib/alchemy/sem.h                                                   |   39 
 kernel/xenomai-v3.2.4/lib/copperplate/heapobj-pshared.c                                   | 1269 
 kernel/drivers/irqchip/irq-omap-intc.c                                                    |    2 
 kernel/xenomai-v3.2.4/lib/alchemy/sem.c                                                   |  576 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_proc.h                 |   63 
 kernel/xenomai-v3.2.4/utils/net/tdmacfg.c                                                 |  332 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_file.h                 |   43 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c                       |    1 
 kernel/drivers/xenomai/can/sja1000/rtcan_esd_pci.c                                        |    1 
 kernel/drivers/clocksource/timer-sun4i.c                                                  |    5 
 kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h                                   |    1 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h                                          |   39 
 kernel/xenomai-v3.2.4/kernel/drivers/can/CREDITS                                          |   37 
 kernel/drivers/xenomai/analogy/intel/parport.c                                            |    1 
 kernel/include/xenomai/cobalt/uapi/kernel/trace.h                                         |    1 
 kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c                                           |    1 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h                                    |  150 
 kernel/drivers/xenomai/can/peak_canfd/Kconfig                                             |    1 
 kernel/drivers/xenomai/spi/Makefile                                                       |    1 
 kernel/arch/arm64/boot/dts/broadcom/Makefile                                              |    1 
 kernel/drivers/xenomai/net/stack/rtmac/rtmac_disc.c                                       |    1 
 kernel/xenomai-v3.2.4/include/trank/rtdm/rtcan.h                                          |   23 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h                                   |   82 
 kernel/xenomai-v3.2.4/demo/posix/cyclictest/cyclictest.c                                  | 2269 
 kernel/mm/mprotect.c                                                                      |    4 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/Makefile.am                     |    2 
 kernel/xenomai-v3.2.4/include/cobalt/sys/time.h                                           |   39 
 kernel/include/xenomai/pipeline/kevents.h                                                 |    1 
 kernel/xenomai-v3.2.4/lib/boilerplate/COPYING                                             |  458 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/calibration.h      |   71 
 kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.c                                       |    1 
 kernel/drivers/xenomai/net/drivers/tulip/tulip.h                                          |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/af_packet.c                         |  659 
 kernel/include/xenomai/rtdm/testing.h                                                     |    1 
 kernel/include/linux/intel-iommu.h                                                        |    2 
 kernel/include/xenomai/pipeline/pipeline.h                                                |    1 
 kernel/include/xenomai/cobalt/kernel/stat.h                                               |    1 
 kernel/xenomai-v3.2.4/include/cobalt/sys/socket.h                                         |   88 
 kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c                         |    1 
 kernel/net/socket.c                                                                       |  127 
 kernel/xenomai-v3.2.4/include/vxworks/errnoLib.h                                          |   83 
 kernel/drivers/gpio/gpio-pca953x.c                                                        |    1 
 kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h                                |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h    |   15 
 kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h                  |  109 
 kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.h                                       |    1 
 kernel/xenomai-v3.2.4/lib/cobalt/timerfd.c                                                |   63 
 kernel/kernel/time/hrtimer.c                                                              |    1 
 kernel/xenomai-v3.2.4/include/rtdm/gpio.h                                                 |   24 
 kernel/xenomai-v3.2.4/utils/analogy/analogy_config.c                                      |  305 
 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.h                                     |   53 
 kernel/drivers/xenomai/serial/Makefile                                                    |    1 
 kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h |   25 
 kernel/drivers/xenomai/serial/Kconfig                                                     |    1 
 kernel/xenomai-v3.2.4/testsuite/smokey/net_common/client.c                                |  298 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/Makefile.am                                         |    4 
 kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.c                                     |  198 
 kernel/drivers/xenomai/can/rtcan_dev.h                                                    |    1 
 kernel/drivers/xenomai/can/rtcan_dev.c                                                    |    1 
 kernel/drivers/xenomai/net/stack/include/rtnet_checksum.h                                 |    1 
 kernel/drivers/xenomai/net/drivers/igb/igb_hwmon.c                                        |    1 
 kernel/include/trace/events/cobalt-posix.h                                                |    1 
 kernel/xenomai-v3.2.4/utils/can/Makefile.am                                               |   36 
 kernel/include/linux/irqdesc.h                                                            |   27 
 kernel/xenomai-v3.2.4/include/boilerplate/time.h                                          |  101 
 kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h                                  |   31 
 kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c         |  543 
 kernel/include/xenomai/cobalt/kernel/sched.h                                              |    1 
 kernel/xenomai-v3.2.4/demo/alchemy/altency.c                                              |  699 
 kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts                                  | 1676 
 kernel/arch/x86/kernel/apic/apic_numachip.c                                               |    4 
 kernel/xenomai-v3.2.4/testsuite/smokey/y2038/syscall-tests.c                              | 1203 
 kernel/kernel/xenomai/pipe.c                                                              |    1 
 kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.eth1394                               |   65 
 kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/Makefile.am                                     |   13 
 kernel/xenomai-v3.2.4/include/boilerplate/list.h                                          |   36 
 kernel/mm/kasan/report.c                                                                  |    6 
 kernel/arch/x86/entry/common.c                                                            |   32 
 kernel/include/xenomai/rtdm/uapi/net.h                                                    |    1 
 kernel/fs/file.c                                                                          |    8 
 kernel/arch/arm64/xenomai/dovetail/Makefile                                               |    1 
 2,407 files changed, 417,045 insertions(+), 2,671 deletions(-)

diff --git a/kernel/Documentation/dovetail.rst b/kernel/Documentation/dovetail.rst
new file mode 100644
index 0000000..5d37b04
--- /dev/null
+++ b/kernel/Documentation/dovetail.rst
@@ -0,0 +1,30 @@
+========================
+Introduction to Dovetail
+========================
+
+:Author: Philippe Gerum
+:Date: 08.04.2020
+
+Using Linux as a host for lightweight software cores specialized in
+delivering very short and bounded response times has been a popular
+way of supporting real-time applications in the embedded space over
+the years.
+
+In this so-called *dual kernel* design, the time-critical work is
+immediately delegated to a small companion core running out-of-band
+with respect to the regular, in-band kernel activities. Applications
+run in user space, obtaining real-time services from the
+core. Alternatively, when there is no real-time requirement, threads
+can still use the rich GPOS feature set Linux provides such as
+networking, data storage or GUIs.
+
+*Dovetail* introduces a high-priority execution stage into the main
+kernel logic reserved for such a companion core to run on.  At any
+time, out-of-band activities from this stage can preempt the common,
+in-band work. A companion core can be implemented as as a driver,
+which connects to the main kernel via the Dovetail interface for
+delivering ultra-low latency scheduling capabilities to applications.
+
+Dovetail is fully described at https://evlproject.org/dovetail/.
+The reference implementation of a Dovetail-based companion core is
+maintained at https://evlproject.org/core/.
diff --git a/kernel/Makefile b/kernel/Makefile
index 8bac90b..dba458a 100644
--- a/kernel/Makefile
+++ b/kernel/Makefile
@@ -1,7 +1,7 @@
 # SPDX-License-Identifier: GPL-2.0
 VERSION = 5
 PATCHLEVEL = 10
-SUBLEVEL = 160
+SUBLEVEL = 161
 EXTRAVERSION =
 NAME = Dare mighty things
 
diff --git a/kernel/arch/Kconfig b/kernel/arch/Kconfig
index c3a40c5..42eb62f 100644
--- a/kernel/arch/Kconfig
+++ b/kernel/arch/Kconfig
@@ -229,6 +229,9 @@
 config HAVE_NMI
 	bool
 
+config HAVE_PERCPU_PREEMPT_COUNT
+	bool
+
 #
 # An arch should select this if it provides all these things:
 #
diff --git a/kernel/arch/arm/Kconfig b/kernel/arch/arm/Kconfig
index f6ce22c..57afcf1 100644
--- a/kernel/arch/arm/Kconfig
+++ b/kernel/arch/arm/Kconfig
@@ -236,6 +236,11 @@
 config ARCH_MTD_XIP
 	bool
 
+# Limited I-pipe compat (syscall routing only).
+config IPIPE_COMPAT
+	bool
+	select DOVETAIL_LEGACY_SYSCALL_RANGE
+
 config ARM_PATCH_PHYS_VIRT
 	bool "Patch physical to virtual translations at runtime" if EMBEDDED
 	default y
@@ -585,6 +590,8 @@
 config ARCH_MULTI_V6_V7
 	bool
 	select MIGHT_HAVE_CACHE_L2X0
+	select HAVE_IRQ_PIPELINE
+	select HAVE_DOVETAIL if CPU_HAS_ASID
 
 config ARCH_MULTI_CPU_AUTO
 	def_bool !(ARCH_MULTI_V4 || ARCH_MULTI_V4T || ARCH_MULTI_V6_V7)
@@ -1237,6 +1244,8 @@
 	  MultiThreading at a cost of slightly increased overhead in some
 	  places. If unsure say N here.
 
+source "kernel/Kconfig.dovetail"
+
 config HAVE_ARM_SCU
 	bool
 	help
diff --git a/kernel/arch/arm/common/mcpm_entry.c b/kernel/arch/arm/common/mcpm_entry.c
index 8a9aeeb..53c3be5 100644
--- a/kernel/arch/arm/common/mcpm_entry.c
+++ b/kernel/arch/arm/common/mcpm_entry.c
@@ -206,7 +206,7 @@
 	 * Since this is called with IRQs enabled, and no arch_spin_lock_irq
 	 * variant exists, we need to disable IRQs manually here.
 	 */
-	local_irq_disable();
+	hard_local_irq_disable();
 	arch_spin_lock(&mcpm_lock);
 
 	cpu_is_down = !mcpm_cpu_use_count[cluster][cpu];
@@ -230,7 +230,7 @@
 		ret = platform_ops->cpu_powerup(cpu, cluster);
 
 	arch_spin_unlock(&mcpm_lock);
-	local_irq_enable();
+	hard_local_irq_enable();
 	return ret;
 }
 
@@ -349,7 +349,7 @@
 	mpidr = read_cpuid_mpidr();
 	cpu = MPIDR_AFFINITY_LEVEL(mpidr, 0);
 	cluster = MPIDR_AFFINITY_LEVEL(mpidr, 1);
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	arch_spin_lock(&mcpm_lock);
 
 	cpu_was_down = !mcpm_cpu_use_count[cluster][cpu];
@@ -363,7 +363,7 @@
 		platform_ops->cpu_is_up(cpu, cluster);
 
 	arch_spin_unlock(&mcpm_lock);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return 0;
 }
@@ -402,7 +402,7 @@
 	 * infrastructure. Let's play it safe by using cpu_pm_enter()
 	 * in case the CPU init code path resets the VFP or similar.
 	 */
-	local_irq_disable();
+	hard_local_irq_disable();
 	local_fiq_disable();
 	ret = cpu_pm_enter();
 	if (!ret) {
@@ -410,7 +410,7 @@
 		cpu_pm_exit();
 	}
 	local_fiq_enable();
-	local_irq_enable();
+	hard_local_irq_enable();
 	if (ret)
 		pr_err("%s returned %d\n", __func__, ret);
 	return ret;
diff --git a/kernel/arch/arm/include/asm/assembler.h b/kernel/arch/arm/include/asm/assembler.h
index 8ff20f9..e2df9c4 100644
--- a/kernel/arch/arm/include/asm/assembler.h
+++ b/kernel/arch/arm/include/asm/assembler.h
@@ -122,7 +122,7 @@
 	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
 	.endif
-	bl	trace_hardirqs_off
+	bl	trace_hardirqs_off_pipelined
 	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
 	.endif
@@ -138,13 +138,25 @@
 	.if \save
 	stmdb   sp!, {r0-r3, ip, lr}
 	.endif
-	bl\cond	trace_hardirqs_on
+	bl\cond	trace_hardirqs_on_pipelined
 	.if \save
 	ldmia	sp!, {r0-r3, ip, lr}
 	.endif
 #endif
 	.endm
 
+	.macro  disable_irq_if_pipelined
+#ifdef CONFIG_IRQ_PIPELINE
+	disable_irq_notrace
+#endif
+	.endm
+
+	.macro  enable_irq_if_pipelined
+#ifdef CONFIG_IRQ_PIPELINE
+	enable_irq_notrace
+#endif
+	.endm
+
 	.macro disable_irq, save=1
 	disable_irq_notrace
 	asm_trace_hardirqs_off \save
diff --git a/kernel/arch/arm/include/asm/atomic.h b/kernel/arch/arm/include/asm/atomic.h
index 455eb19..0cf92e5 100644
--- a/kernel/arch/arm/include/asm/atomic.h
+++ b/kernel/arch/arm/include/asm/atomic.h
@@ -164,9 +164,9 @@
 {									\
 	unsigned long flags;						\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	v->counter c_op i;						\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 }									\
 
 #define ATOMIC_OP_RETURN(op, c_op, asm_op)				\
@@ -175,10 +175,10 @@
 	unsigned long flags;						\
 	int val;							\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	v->counter c_op i;						\
 	val = v->counter;						\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 									\
 	return val;							\
 }
@@ -189,10 +189,10 @@
 	unsigned long flags;						\
 	int val;							\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	val = v->counter;						\
 	v->counter c_op i;						\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 									\
 	return val;							\
 }
@@ -202,11 +202,11 @@
 	int ret;
 	unsigned long flags;
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	ret = v->counter;
 	if (likely(ret == old))
 		v->counter = new;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return ret;
 }
diff --git a/kernel/arch/arm/include/asm/bitops.h b/kernel/arch/arm/include/asm/bitops.h
index c92e42a..9779f32 100644
--- a/kernel/arch/arm/include/asm/bitops.h
+++ b/kernel/arch/arm/include/asm/bitops.h
@@ -40,9 +40,9 @@
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	*p |= mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline void ____atomic_clear_bit(unsigned int bit, volatile unsigned long *p)
@@ -52,9 +52,9 @@
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	*p &= ~mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline void ____atomic_change_bit(unsigned int bit, volatile unsigned long *p)
@@ -64,9 +64,9 @@
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	*p ^= mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline int
@@ -78,10 +78,10 @@
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	res = *p;
 	*p = res | mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return (res & mask) != 0;
 }
@@ -95,10 +95,10 @@
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	res = *p;
 	*p = res & ~mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return (res & mask) != 0;
 }
@@ -112,10 +112,10 @@
 
 	p += BIT_WORD(bit);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	res = *p;
 	*p = res ^ mask;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return (res & mask) != 0;
 }
diff --git a/kernel/arch/arm/include/asm/cmpxchg.h b/kernel/arch/arm/include/asm/cmpxchg.h
index 8b701f8..60ccad3 100644
--- a/kernel/arch/arm/include/asm/cmpxchg.h
+++ b/kernel/arch/arm/include/asm/cmpxchg.h
@@ -77,17 +77,17 @@
 #error SMP is not supported on this platform
 #endif
 	case 1:
-		raw_local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile unsigned char *)ptr;
 		*(volatile unsigned char *)ptr = x;
-		raw_local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		break;
 
 	case 4:
-		raw_local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile unsigned long *)ptr;
 		*(volatile unsigned long *)ptr = x;
-		raw_local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		break;
 #else
 	case 1:
diff --git a/kernel/arch/arm/include/asm/dovetail.h b/kernel/arch/arm/include/asm/dovetail.h
new file mode 100644
index 0000000..f8fe64a
--- /dev/null
+++ b/kernel/arch/arm/include/asm/dovetail.h
@@ -0,0 +1,61 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum.
+ */
+#ifndef _ASM_ARM_DOVETAIL_H
+#define _ASM_ARM_DOVETAIL_H
+
+/* ARM traps */
+#define ARM_TRAP_ACCESS		0	/* Data or instruction access exception */
+#define ARM_TRAP_SECTION	1	/* Section fault */
+#define ARM_TRAP_DABT		2	/* Generic data abort */
+#define ARM_TRAP_PABT		3	/* Prefetch abort */
+#define ARM_TRAP_BREAK		4	/* Instruction breakpoint */
+#define ARM_TRAP_FPU		5	/* Floating point exception */
+#define ARM_TRAP_VFP		6	/* VFP floating point exception */
+#define ARM_TRAP_UNDEFINSTR	7	/* Undefined instruction */
+#define ARM_TRAP_ALIGNMENT	8	/* Unaligned access exception */
+
+#if !defined(__ASSEMBLY__)
+
+#ifdef CONFIG_DOVETAIL
+
+static inline void arch_dovetail_exec_prepare(void)
+{ }
+
+static inline void arch_dovetail_switch_prepare(bool leave_inband)
+{ }
+
+static inline void arch_dovetail_switch_finish(bool enter_inband)
+{ }
+
+#endif
+
+/*
+ * Pass the trap event to the companion core. Return true if running
+ * in-band afterwards.
+ */
+#define mark_cond_trap_entry(__trapnr, __regs)		\
+	({						\
+		oob_trap_notify(__trapnr, __regs);	\
+		running_inband();			\
+	})
+
+/*
+ * Pass the trap event to the companion core. We expect the current
+ * context to be running on the in-band stage upon return so that our
+ * caller can tread on common kernel code.
+ */
+#define mark_trap_entry(__trapnr, __regs)				\
+	do {								\
+		bool __ret = mark_cond_trap_entry(__trapnr, __regs);	\
+		BUG_ON(dovetail_debug() && !__ret);			\
+	} while (0)
+
+#define mark_trap_exit(__trapnr, __regs)				\
+	oob_trap_unwind(__trapnr, __regs)
+
+#endif	/* !__ASSEMBLY__ */
+
+#endif /* _ASM_ARM_DOVETAIL_H */
diff --git a/kernel/arch/arm/include/asm/efi.h b/kernel/arch/arm/include/asm/efi.h
index 3ee4f43..dedafc6 100644
--- a/kernel/arch/arm/include/asm/efi.h
+++ b/kernel/arch/arm/include/asm/efi.h
@@ -37,7 +37,11 @@
 
 static inline void efi_set_pgd(struct mm_struct *mm)
 {
+	unsigned long flags;
+
+	protect_inband_mm(flags);
 	check_and_switch_context(mm, NULL);
+	unprotect_inband_mm(flags);
 }
 
 void efi_virtmap_load(void);
diff --git a/kernel/arch/arm/include/asm/irq_pipeline.h b/kernel/arch/arm/include/asm/irq_pipeline.h
new file mode 100644
index 0000000..5970c6d
--- /dev/null
+++ b/kernel/arch/arm/include/asm/irq_pipeline.h
@@ -0,0 +1,135 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _ASM_ARM_IRQ_PIPELINE_H
+#define _ASM_ARM_IRQ_PIPELINE_H
+
+#include <asm-generic/irq_pipeline.h>
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+/*
+ * In order to cope with the limited number of SGIs available to us,
+ * In-band IPI messages are multiplexed over SGI0, whereas out-of-band
+ * IPIs are directly mapped to SGI1-2.
+ */
+#define OOB_NR_IPI		2
+#define OOB_IPI_OFFSET		1 /* SGI1 */
+#define TIMER_OOB_IPI		(ipi_irq_base + OOB_IPI_OFFSET)
+#define RESCHEDULE_OOB_IPI	(TIMER_OOB_IPI + 1)
+
+extern int ipi_irq_base;
+
+static inline notrace
+unsigned long arch_irqs_virtual_to_native_flags(int stalled)
+{
+	return (!!stalled) << IRQMASK_I_POS;
+}
+
+static inline notrace
+unsigned long arch_irqs_native_to_virtual_flags(unsigned long flags)
+{
+	return (!!hard_irqs_disabled_flags(flags)) << IRQMASK_i_POS;
+}
+
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+	int stalled = inband_irq_save();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+	barrier();
+	inband_irq_enable();
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+	inband_irq_disable();
+	barrier();
+}
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+	int stalled = inband_irqs_disabled();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return native_irqs_disabled_flags(flags);
+}
+
+static inline notrace void arch_local_irq_restore(unsigned long flags)
+{
+	inband_irq_restore(arch_irqs_disabled_flags(flags));
+	barrier();
+}
+
+static inline
+void arch_save_timer_regs(struct pt_regs *dst, struct pt_regs *src)
+{
+	dst->ARM_cpsr = src->ARM_cpsr;
+	dst->ARM_pc = src->ARM_pc;
+}
+
+static inline bool arch_steal_pipelined_tick(struct pt_regs *regs)
+{
+	return !!(regs->ARM_cpsr & IRQMASK_I_BIT);
+}
+
+static inline int arch_enable_oob_stage(void)
+{
+	return 0;
+}
+
+#define arch_kentry_get_irqstate(__regs)		\
+	({						\
+		to_svc_pt_regs(__regs)->irqstate;	\
+	})
+
+#define arch_kentry_set_irqstate(__regs, __irqstate)		\
+	do {							\
+		to_svc_pt_regs(__regs)->irqstate = __irqstate;	\
+	} while (0)
+
+#else /* !CONFIG_IRQ_PIPELINE */
+
+static inline unsigned long arch_local_irq_save(void)
+{
+	return native_irq_save();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+	native_irq_enable();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+	native_irq_disable();
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+	return native_save_flags();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+	native_irq_restore(flags);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return native_irqs_disabled_flags(flags);
+}
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+#endif /* _ASM_ARM_IRQ_PIPELINE_H */
diff --git a/kernel/arch/arm/include/asm/irqflags.h b/kernel/arch/arm/include/asm/irqflags.h
index aeec7f2..3c67ce2 100644
--- a/kernel/arch/arm/include/asm/irqflags.h
+++ b/kernel/arch/arm/include/asm/irqflags.h
@@ -5,6 +5,7 @@
 #ifdef __KERNEL__
 
 #include <asm/ptrace.h>
+#include <asm/barrier.h>
 
 /*
  * CPU interrupt mask handling.
@@ -13,41 +14,44 @@
 #define IRQMASK_REG_NAME_R "primask"
 #define IRQMASK_REG_NAME_W "primask"
 #define IRQMASK_I_BIT	1
+#define IRQMASK_I_POS	0
 #else
 #define IRQMASK_REG_NAME_R "cpsr"
 #define IRQMASK_REG_NAME_W "cpsr_c"
 #define IRQMASK_I_BIT	PSR_I_BIT
+#define IRQMASK_I_POS	7
 #endif
+#define IRQMASK_i_POS	31
 
 #if __LINUX_ARM_ARCH__ >= 6
 
 #define arch_local_irq_save arch_local_irq_save
-static inline unsigned long arch_local_irq_save(void)
+static inline unsigned long native_irq_save(void)
 {
 	unsigned long flags;
 
 	asm volatile(
-		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ arch_local_irq_save\n"
+		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ native_irq_save\n"
 		"	cpsid	i"
 		: "=r" (flags) : : "memory", "cc");
 	return flags;
 }
 
 #define arch_local_irq_enable arch_local_irq_enable
-static inline void arch_local_irq_enable(void)
+static inline void native_irq_enable(void)
 {
 	asm volatile(
-		"	cpsie i			@ arch_local_irq_enable"
+		"	cpsie i			@ native_irq_enable"
 		:
 		:
 		: "memory", "cc");
 }
 
 #define arch_local_irq_disable arch_local_irq_disable
-static inline void arch_local_irq_disable(void)
+static inline void native_irq_disable(void)
 {
 	asm volatile(
-		"	cpsid i			@ arch_local_irq_disable"
+		"	cpsid i			@ native_irq_disable"
 		:
 		:
 		: "memory", "cc");
@@ -69,12 +73,12 @@
  * Save the current interrupt enable state & disable IRQs
  */
 #define arch_local_irq_save arch_local_irq_save
-static inline unsigned long arch_local_irq_save(void)
+static inline unsigned long native_irq_save(void)
 {
 	unsigned long flags, temp;
 
 	asm volatile(
-		"	mrs	%0, cpsr	@ arch_local_irq_save\n"
+		"	mrs	%0, cpsr	@ native_irq_save\n"
 		"	orr	%1, %0, #128\n"
 		"	msr	cpsr_c, %1"
 		: "=r" (flags), "=r" (temp)
@@ -87,11 +91,11 @@
  * Enable IRQs
  */
 #define arch_local_irq_enable arch_local_irq_enable
-static inline void arch_local_irq_enable(void)
+static inline void native_irq_enable(void)
 {
 	unsigned long temp;
 	asm volatile(
-		"	mrs	%0, cpsr	@ arch_local_irq_enable\n"
+		"	mrs	%0, cpsr	@ native_irq_enable\n"
 		"	bic	%0, %0, #128\n"
 		"	msr	cpsr_c, %0"
 		: "=r" (temp)
@@ -103,11 +107,11 @@
  * Disable IRQs
  */
 #define arch_local_irq_disable arch_local_irq_disable
-static inline void arch_local_irq_disable(void)
+static inline void native_irq_disable(void)
 {
 	unsigned long temp;
 	asm volatile(
-		"	mrs	%0, cpsr	@ arch_local_irq_disable\n"
+		"	mrs	%0, cpsr	@ native_irq_disable\n"
 		"	orr	%0, %0, #128\n"
 		"	msr	cpsr_c, %0"
 		: "=r" (temp)
@@ -149,15 +153,22 @@
 #define local_abt_disable()	do { } while (0)
 #endif
 
+static inline void native_irq_sync(void)
+{
+	native_irq_enable();
+	isb();
+	native_irq_disable();
+}
+
 /*
  * Save the current interrupt enable state.
  */
 #define arch_local_save_flags arch_local_save_flags
-static inline unsigned long arch_local_save_flags(void)
+static inline unsigned long native_save_flags(void)
 {
 	unsigned long flags;
 	asm volatile(
-		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ local_save_flags"
+		"	mrs	%0, " IRQMASK_REG_NAME_R "	@ native_save_flags"
 		: "=r" (flags) : : "memory", "cc");
 	return flags;
 }
@@ -166,21 +177,28 @@
  * restore saved IRQ & FIQ state
  */
 #define arch_local_irq_restore arch_local_irq_restore
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline void native_irq_restore(unsigned long flags)
 {
 	asm volatile(
-		"	msr	" IRQMASK_REG_NAME_W ", %0	@ local_irq_restore"
+		"	msr	" IRQMASK_REG_NAME_W ", %0	@ native_irq_restore"
 		:
 		: "r" (flags)
 		: "memory", "cc");
 }
 
 #define arch_irqs_disabled_flags arch_irqs_disabled_flags
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline int native_irqs_disabled_flags(unsigned long flags)
 {
 	return flags & IRQMASK_I_BIT;
 }
 
+static inline bool native_irqs_disabled(void)
+{
+	unsigned long flags = native_save_flags();
+	return native_irqs_disabled_flags(flags);
+}
+
+#include <asm/irq_pipeline.h>
 #include <asm-generic/irqflags.h>
 
 #endif /* ifdef __KERNEL__ */
diff --git a/kernel/arch/arm/include/asm/mmu_context.h b/kernel/arch/arm/include/asm/mmu_context.h
index f99ed52..32cded3 100644
--- a/kernel/arch/arm/include/asm/mmu_context.h
+++ b/kernel/arch/arm/include/asm/mmu_context.h
@@ -14,6 +14,7 @@
 #include <linux/sched.h>
 #include <linux/mm_types.h>
 #include <linux/preempt.h>
+#include <linux/irq_pipeline.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cachetype.h>
@@ -72,6 +73,7 @@
 static inline void finish_arch_post_lock_switch(void)
 {
 	struct mm_struct *mm = current->mm;
+	unsigned long flags;
 
 	if (mm && mm->context.switch_pending) {
 		/*
@@ -83,7 +85,9 @@
 		preempt_disable();
 		if (mm->context.switch_pending) {
 			mm->context.switch_pending = 0;
+			protect_inband_mm(flags);
 			cpu_switch_mm(mm->pgd, mm);
+			unprotect_inband_mm(flags);
 		}
 		preempt_enable_no_resched();
 	}
@@ -102,7 +106,7 @@
 #endif	/* CONFIG_CPU_HAS_ASID */
 
 #define destroy_context(mm)		do { } while(0)
-#define activate_mm(prev,next)		switch_mm(prev, next, NULL)
+#define activate_mm(prev,next)		__switch_mm(prev, next, NULL)
 
 /*
  * This is called when "tsk" is about to enter lazy TLB mode.
@@ -118,15 +122,9 @@
 {
 }
 
-/*
- * This is the actual mm switch as far as the scheduler
- * is concerned.  No registers are touched.  We avoid
- * calling the CPU specific function when the mm hasn't
- * actually changed.
- */
 static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
-	  struct task_struct *tsk)
+__switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	    struct task_struct *tsk)
 {
 #ifdef CONFIG_MMU
 	unsigned int cpu = smp_processor_id();
@@ -149,6 +147,30 @@
 #endif
 }
 
+/*
+ * This is the actual mm switch as far as the scheduler
+ * is concerned.  No registers are touched.  We avoid
+ * calling the CPU specific function when the mm hasn't
+ * actually changed.
+ */
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	  struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	protect_inband_mm(flags);
+	__switch_mm(prev, next, tsk);
+	unprotect_inband_mm(flags);
+}
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
 
+static inline void
+switch_oob_mm(struct mm_struct *prev, struct mm_struct *next,
+	      struct task_struct *tsk)
+{
+	__switch_mm(prev, next, tsk);
+}
+
 #endif
diff --git a/kernel/arch/arm/include/asm/outercache.h b/kernel/arch/arm/include/asm/outercache.h
index 3364637..811978d 100644
--- a/kernel/arch/arm/include/asm/outercache.h
+++ b/kernel/arch/arm/include/asm/outercache.h
@@ -78,8 +78,13 @@
  */
 static inline void outer_flush_all(void)
 {
-	if (outer_cache.flush_all)
+	unsigned long flags;
+
+	if (outer_cache.flush_all) {
+		flags = hard_cond_local_irq_save();
 		outer_cache.flush_all();
+		hard_cond_local_irq_restore(flags);
+	}
 }
 
 /**
diff --git a/kernel/arch/arm/include/asm/ptrace.h b/kernel/arch/arm/include/asm/ptrace.h
index 73c83f4..5b4f5c4 100644
--- a/kernel/arch/arm/include/asm/ptrace.h
+++ b/kernel/arch/arm/include/asm/ptrace.h
@@ -20,6 +20,9 @@
 	struct pt_regs regs;
 	u32 dacr;
 	u32 addr_limit;
+#ifdef CONFIG_IRQ_PIPELINE
+	long irqstate;
+#endif
 };
 
 #define to_svc_pt_regs(r) container_of(r, struct svc_pt_regs, regs)
diff --git a/kernel/arch/arm/include/asm/syscall.h b/kernel/arch/arm/include/asm/syscall.h
index fd02761..eec5bca 100644
--- a/kernel/arch/arm/include/asm/syscall.h
+++ b/kernel/arch/arm/include/asm/syscall.h
@@ -63,6 +63,11 @@
 	memcpy(args, &regs->ARM_r0 + 1, 5 * sizeof(args[0]));
 }
 
+static inline unsigned long syscall_get_arg0(struct pt_regs *regs)
+{
+	return regs->ARM_ORIG_r0;
+}
+
 static inline void syscall_set_arguments(struct task_struct *task,
 					 struct pt_regs *regs,
 					 const unsigned long *args)
diff --git a/kernel/arch/arm/include/asm/thread_info.h b/kernel/arch/arm/include/asm/thread_info.h
index eb7ce27..7c84af4 100644
--- a/kernel/arch/arm/include/asm/thread_info.h
+++ b/kernel/arch/arm/include/asm/thread_info.h
@@ -21,6 +21,7 @@
 
 struct task_struct;
 
+#include <dovetail/thread_info.h>
 #include <asm/types.h>
 
 typedef unsigned long mm_segment_t;
@@ -45,6 +46,7 @@
  */
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
+	__u32			local_flags;	/* local (synchronous) flags */
 	int			preempt_count;	/* 0 => preemptable, <0 => bug */
 	mm_segment_t		addr_limit;	/* address limit */
 	struct task_struct	*task;		/* main task structure */
@@ -65,15 +67,19 @@
 #ifdef CONFIG_ARM_THUMBEE
 	unsigned long		thumbee_state;	/* ThumbEE Handler Base register */
 #endif
+	struct oob_thread_state	oob_state; /* co-kernel thread state */
 };
 
 #define INIT_THREAD_INFO(tsk)						\
 {									\
 	.task		= &tsk,						\
 	.flags		= 0,						\
+	.local_flags	= 0,						\
 	.preempt_count	= INIT_PREEMPT_COUNT,				\
 	.addr_limit	= KERNEL_DS,					\
 }
+
+#define ti_local_flags(__ti)	((__ti)->local_flags)
 
 /*
  * how to get the thread information struct from C
@@ -142,6 +148,8 @@
 #define TIF_USING_IWMMXT	17
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_RESTORE_SIGMASK	20
+#define TIF_MAYDAY		21	/* emergency trap pending */
+#define TIF_RETUSER		22	/* INBAND_TASK_RETUSER is pending */
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
@@ -153,6 +161,8 @@
 #define _TIF_SECCOMP		(1 << TIF_SECCOMP)
 #define _TIF_NOTIFY_SIGNAL	(1 << TIF_NOTIFY_SIGNAL)
 #define _TIF_USING_IWMMXT	(1 << TIF_USING_IWMMXT)
+#define _TIF_MAYDAY		(1 << TIF_MAYDAY)
+#define _TIF_RETUSER		(1 << TIF_RETUSER)
 
 /* Checks for any syscall work in entry-common.S */
 #define _TIF_SYSCALL_WORK (_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
@@ -163,7 +173,15 @@
  */
 #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
 				 _TIF_NOTIFY_RESUME | _TIF_UPROBE | \
-				 _TIF_NOTIFY_SIGNAL)
+				 _TIF_NOTIFY_SIGNAL | _TIF_RETUSER)
+
+/*
+ * Local (synchronous) thread flags.
+ */
+#define _TLF_OOB		0x0001
+#define _TLF_DOVETAIL		0x0002
+#define _TLF_OFFSTAGE		0x0004
+#define _TLF_OOBTRAP		0x0008
 
 #endif /* __KERNEL__ */
 #endif /* __ASM_ARM_THREAD_INFO_H */
diff --git a/kernel/arch/arm/include/asm/trace/exceptions.h b/kernel/arch/arm/include/asm/trace/exceptions.h
new file mode 100644
index 0000000..bdb666b
--- /dev/null
+++ b/kernel/arch/arm/include/asm/trace/exceptions.h
@@ -0,0 +1,62 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM exceptions
+
+#if !defined(_TRACE_EXCEPTIONS_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_EXCEPTIONS_H
+
+#include <linux/tracepoint.h>
+#include <asm/ptrace.h>
+#include <asm/dovetail.h>
+
+#define __trace_trap(__sym)	{ __sym, #__sym }
+
+#define trace_trap_symbolic(__trapnr)				\
+	__print_symbolic(__trapnr,				\
+			__trace_trap(ARM_TRAP_ACCESS),		\
+			__trace_trap(ARM_TRAP_SECTION),		\
+			__trace_trap(ARM_TRAP_DABT),		\
+			__trace_trap(ARM_TRAP_PABT),		\
+			__trace_trap(ARM_TRAP_BREAK),		\
+			__trace_trap(ARM_TRAP_FPU),		\
+			__trace_trap(ARM_TRAP_VFP),		\
+			__trace_trap(ARM_TRAP_UNDEFINSTR),	\
+			__trace_trap(ARM_TRAP_ALIGNMENT))
+
+DECLARE_EVENT_CLASS(ARM_trap_event,
+	TP_PROTO(int trapnr, struct pt_regs *regs),
+	TP_ARGS(trapnr, regs),
+
+	TP_STRUCT__entry(
+		__field(int, trapnr)
+		__field(struct pt_regs *, regs)
+		),
+
+	TP_fast_assign(
+		__entry->trapnr = trapnr;
+		__entry->regs = regs;
+		),
+
+	TP_printk("%s mode trap: %s",
+		user_mode(__entry->regs) ? "user" : "kernel",
+		trace_trap_symbolic(__entry->trapnr))
+);
+
+DEFINE_EVENT(ARM_trap_event, ARM_trap_entry,
+	TP_PROTO(int trapnr, struct pt_regs *regs),
+	TP_ARGS(trapnr, regs)
+);
+
+DEFINE_EVENT(ARM_trap_event, ARM_trap_exit,
+	TP_PROTO(int trapnr, struct pt_regs *regs),
+	TP_ARGS(trapnr, regs)
+);
+
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_PATH asm/trace
+#define TRACE_INCLUDE_FILE exceptions
+#endif /*  _TRACE_EXCEPTIONS_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
diff --git a/kernel/arch/arm/include/asm/vdso/gettimeofday.h b/kernel/arch/arm/include/asm/vdso/gettimeofday.h
index 2134cbd..eadbcde 100644
--- a/kernel/arch/arm/include/asm/vdso/gettimeofday.h
+++ b/kernel/arch/arm/include/asm/vdso/gettimeofday.h
@@ -142,6 +142,66 @@
 	return __get_datapage();
 }
 
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+
+extern struct vdso_priv *__get_privpage(void);
+
+static __always_inline struct vdso_priv *__arch_get_vdso_priv(void)
+{
+	return __get_privpage();
+}
+
+static __always_inline long clock_open_device(const char *path, int mode)
+{
+	register u32 r0 asm("r0") = (u32)path;
+	register u32 r1 asm("r1") = (u32)mode;
+	register long ret asm ("r0");
+	register long nr asm("r7") = __NR_open;
+
+	asm volatile(
+		"	swi #0\n"
+		: "=r" (ret)
+		: "r"(r0), "r"(r1), "r"(nr)
+		: "memory");
+
+	return ret;
+}
+
+static __always_inline
+long clock_ioctl_device(int fd, unsigned int cmd, long arg)
+{
+	register u32 r0 asm("r0") = (u32)fd;
+	register u32 r1 asm("r1") = (u32)cmd;
+	register u32 r2 asm("r2") = (u32)arg;
+	register long ret asm ("r0");
+	register long nr asm("r7") = __NR_ioctl;
+
+ 	asm volatile(
+		"	swi #0\n"
+		: "=r" (ret)
+		: "r"(r0), "r"(r1), "r"(r2), "r"(nr)
+		: "memory");
+
+ 	return ret;
+}
+
+static __always_inline long clock_close_device(int fd)
+{
+	register u32 r0 asm("r0") = (u32)fd;
+	register long ret asm ("r0");
+	register long nr asm("r7") = __NR_close;
+
+	asm volatile(
+		"	swi #0\n"
+		: "=r" (ret)
+		: "r"(r0), "r"(nr)
+		: "memory");
+
+	return ret;
+}
+
+#endif	/* CONFIG_GENERIC_CLOCKSOURCE_VDSO */
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/kernel/arch/arm/kernel/Makefile b/kernel/arch/arm/kernel/Makefile
index 79588b5..5d9267c 100644
--- a/kernel/arch/arm/kernel/Makefile
+++ b/kernel/arch/arm/kernel/Makefile
@@ -92,6 +92,11 @@
 head-y			:= head$(MMUEXT).o
 obj-$(CONFIG_DEBUG_LL)	+= debug.o
 obj-$(CONFIG_EARLY_PRINTK)	+= early_printk.o
+ifeq ($(CONFIG_DEBUG_LL),y)
+obj-$(CONFIG_RAW_PRINTK)	+= raw_printk.o
+endif
+
+obj-$(CONFIG_IRQ_PIPELINE)	+= irq_pipeline.o
 
 # This is executed very early using a temporary stack when no memory allocator
 # nor global data is available. Everything has to be allocated on the stack.
diff --git a/kernel/arch/arm/kernel/asm-offsets.c b/kernel/arch/arm/kernel/asm-offsets.c
index 70993af..6dd7a8f 100644
--- a/kernel/arch/arm/kernel/asm-offsets.c
+++ b/kernel/arch/arm/kernel/asm-offsets.c
@@ -42,6 +42,8 @@
 #endif
   BLANK();
   DEFINE(TI_FLAGS,		offsetof(struct thread_info, flags));
+  DEFINE(TI_LOCAL_FLAGS,	offsetof(struct thread_info, local_flags));
+  DEFINE(TI_SYSCALL,		offsetof(struct thread_info, syscall));
   DEFINE(TI_PREEMPT,		offsetof(struct thread_info, preempt_count));
   DEFINE(TI_ADDR_LIMIT,		offsetof(struct thread_info, addr_limit));
   DEFINE(TI_TASK,		offsetof(struct thread_info, task));
@@ -51,6 +53,7 @@
   DEFINE(TI_USED_CP,		offsetof(struct thread_info, used_cp));
   DEFINE(TI_TP_VALUE,		offsetof(struct thread_info, tp_value));
   DEFINE(TI_FPSTATE,		offsetof(struct thread_info, fpstate));
+  DEFINE(TI_OOB_MASK,		STAGE_MASK);
 #ifdef CONFIG_VFP
   DEFINE(TI_VFPSTATE,		offsetof(struct thread_info, vfpstate));
 #ifdef CONFIG_SMP
@@ -161,6 +164,7 @@
   BLANK();
 #ifdef CONFIG_VDSO
   DEFINE(VDSO_DATA_SIZE,	sizeof(union vdso_data_store));
+  DEFINE(VDSO_PRIV_SIZE,	PAGE_SIZE);
 #endif
   BLANK();
 #ifdef CONFIG_ARM_MPU
diff --git a/kernel/arch/arm/kernel/entry-armv.S b/kernel/arch/arm/kernel/entry-armv.S
index 030351d..a3932c6 100644
--- a/kernel/arch/arm/kernel/entry-armv.S
+++ b/kernel/arch/arm/kernel/entry-armv.S
@@ -5,6 +5,7 @@
  *  Copyright (C) 1996,1997,1998 Russell King.
  *  ARM700 fix by Matthew Godbolt (linux-user@willothewisp.demon.co.uk)
  *  nommu support by Hyok S. Choi (hyok.choi@samsung.com)
+ *  Copyright (C) 2005 Stelian Pop.
  *
  *  Low-level vector interface routines
  *
@@ -32,16 +33,24 @@
 #include "entry-header.S"
 #include <asm/entry-macro-multi.S>
 #include <asm/probes.h>
+#include <asm/dovetail.h>
 
 /*
  * Interrupt handling.
  */
 	.macro	irq_handler
 #ifdef CONFIG_GENERIC_IRQ_MULTI_HANDLER
-	ldr	r1, =handle_arch_irq
 	mov	r0, sp
 	badr	lr, 9997f
+#ifdef CONFIG_IRQ_PIPELINE
+	ldr	r1, =handle_arch_irq_pipelined
+	mov	pc, r1
+#else
+	ldr	r1, =handle_arch_irq
 	ldr	pc, [r1]
+#endif
+#elif CONFIG_IRQ_PIPELINE
+#error "Legacy IRQ handling not pipelined"
 #else
 	arch_irq_handler_default
 #endif
@@ -183,7 +192,10 @@
 	uaccess_entry tsk, r0, r1, r2, \uaccess
 
 	.if \trace
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQ_PIPELINE
+	mov	r0, sp
+	bl	kentry_enter_pipelined
+#elif defined(CONFIG_TRACE_IRQFLAGS)
 	bl	trace_hardirqs_off
 #endif
 	.endif
@@ -203,6 +215,10 @@
 __irq_svc:
 	svc_entry
 	irq_handler
+#ifdef CONFIG_IRQ_PIPELINE
+	tst	r0, r0				@ skip epilogue if oob or in-band stalled
+	beq	1f
+#endif
 
 #ifdef CONFIG_PREEMPTION
 	ldr	r8, [tsk, #TI_PREEMPT]		@ get preempt count
@@ -213,6 +229,7 @@
 	blne	svc_preempt
 #endif
 
+1:
 	svc_exit r5, irq = 1			@ return from exception
  UNWIND(.fnend		)
 ENDPROC(__irq_svc)
@@ -222,7 +239,7 @@
 #ifdef CONFIG_PREEMPTION
 svc_preempt:
 	mov	r8, lr
-1:	bl	preempt_schedule_irq		@ irq en/disable is done inside
+1:	bl	arm_preempt_schedule_irq	@ irq en/disable is done inside
 	ldr	r0, [tsk, #TI_FLAGS]		@ get new tasks TI_FLAGS
 	tst	r0, #_TIF_NEED_RESCHED
 	reteq	r8				@ go again
@@ -252,6 +269,16 @@
 #else
 	svc_entry
 #endif
+#ifdef CONFIG_DOVETAIL
+	get_thread_info tsk
+	ldr	r0, [tsk, #TI_PREEMPT]		@ get preempt count
+	tst	r0, #TI_OOB_MASK		@ oob stage?
+	beq	1f
+	mov	r0, #ARM_TRAP_UNDEFINSTR
+	mov	r1, sp				@ r1 = &regs
+	bl	__oob_trap_notify
+1:
+#endif
 
 	mov	r1, #4				@ PC correction to apply
  THUMB(	tst	r5, #PSR_T_BIT		)	@ exception taken in Thumb mode?
@@ -261,6 +288,15 @@
 
 __und_svc_finish:
 	get_thread_info tsk
+#ifdef CONFIG_DOVETAIL
+	ldr	r0, [tsk, #TI_PREEMPT]		@ get preempt count
+	tst	r0, #TI_OOB_MASK		@ oob stage?
+	beq	1f
+	mov	r0, #ARM_TRAP_UNDEFINSTR
+	mov	r1, sp				@ r1 = &regs
+	bl	__oob_trap_unwind
+1:
+#endif
 	ldr	r5, [sp, #S_PSR]		@ Get SVC cpsr
 	svc_exit r5				@ return from exception
  UNWIND(.fnend		)
@@ -391,7 +427,7 @@
 
 	.if	\trace
 #ifdef CONFIG_TRACE_IRQFLAGS
-	bl	trace_hardirqs_off
+	bl	trace_hardirqs_off_pipelined
 #endif
 	ct_user_exit save = 0
 	.endif
@@ -427,8 +463,12 @@
 	usr_entry
 	kuser_cmpxchg_check
 	irq_handler
-	get_thread_info tsk
 	mov	why, #0
+#ifdef CONFIG_IRQ_PIPELINE
+	tst	r0, r0
+	beq	fast_ret_to_user	@ skip epilogue if oob (in-band cannot be stalled)
+#endif
+	get_thread_info tsk
 	b	ret_to_user_from_irq
  UNWIND(.fnend		)
 ENDPROC(__irq_usr)
@@ -721,7 +761,7 @@
  UNWIND(.cantunwind	)
 	get_thread_info tsk
 	mov	why, #0
-	b	ret_to_user
+	ret_to_user_pipelined r1
  UNWIND(.fnend		)
 ENDPROC(__pabt_usr)
 ENDPROC(ret_from_exception)
diff --git a/kernel/arch/arm/kernel/entry-common.S b/kernel/arch/arm/kernel/entry-common.S
index 9b3c737..7a75a49 100644
--- a/kernel/arch/arm/kernel/entry-common.S
+++ b/kernel/arch/arm/kernel/entry-common.S
@@ -3,6 +3,7 @@
  *  linux/arch/arm/kernel/entry-common.S
  *
  *  Copyright (C) 2000 Russell King
+ *  Copyright (C) 2005 Stelian Pop.
  */
 
 #include <asm/assembler.h>
@@ -12,6 +13,7 @@
 #include <asm/memory.h>
 #ifdef CONFIG_AEABI
 #include <asm/unistd-oabi.h>
+#include <uapi/asm-generic/dovetail.h>
 #endif
 
 	.equ	NR_syscalls, __NR_syscalls
@@ -54,6 +56,8 @@
 	blne	addr_limit_check_failed
 	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 	movs	r1, r1, lsl #16
+	ldr	r2, =#_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+	ands	r2, r1, r2
 	bne	fast_work_pending
 
 
@@ -91,6 +95,8 @@
 	blne	addr_limit_check_failed
 	ldr	r1, [tsk, #TI_FLAGS]		@ re-check for syscall tracing
 	movs	r1, r1, lsl #16
+	ldr	r2, =#_TIF_SYSCALL_WORK | _TIF_WORK_MASK
+	ands	r2, r1, r2
 	beq	no_work_pending
  UNWIND(.fnend		)
 ENDPROC(ret_fast_syscall)
@@ -132,6 +138,8 @@
 	blne	addr_limit_check_failed
 	ldr	r1, [tsk, #TI_FLAGS]
 	movs	r1, r1, lsl #16
+	ldr	r2, =#_TIF_WORK_MASK
+	ands	r2, r1, r2
 	bne	slow_work_pending
 no_work_pending:
 	asm_trace_hardirqs_on save = 0
@@ -143,6 +151,10 @@
 	restore_user_regs fast = 0, offset = 0
 ENDPROC(ret_to_user_from_irq)
 ENDPROC(ret_to_user)
+ENTRY(fast_ret_to_user)
+	disable_irq_notrace			@ disable interrupts
+	b	no_work_pending
+ENDPROC(fast_ret_to_user)
 
 /*
  * This is how we return from a fork.
@@ -265,6 +277,9 @@
 	eor	scno, scno, #__NR_SYSCALL_BASE	@ check OS number
 #endif
 	get_thread_info tsk
+#ifdef CONFIG_DOVETAIL
+	str	scno, [tsk, #TI_SYSCALL]
+#endif
 	/*
 	 * Reload the registers that may have been corrupted on entry to
 	 * the syscall assembly (by tracing or context tracking.)
@@ -272,6 +287,70 @@
  TRACE(	ldmia	sp, {r0 - r3}		)
 
 local_restart:
+#ifdef CONFIG_DOVETAIL
+	ldr	r10, [tsk, #TI_LOCAL_FLAGS]	@ tsk(r10) is callee-saved
+#ifdef CONFIG_IPIPE_COMPAT
+	ldr	r0, =#0xf0042			@ old syscall signature
+	cmp	scno, r0
+	bne	1f
+	add	scno, scno, #__OOB_SYSCALL_BIT	@ force in oob marker
+	b	fastcall_try
+1:
+#endif	
+#ifdef CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE
+	ldr	r0, =#__OOB_SYSCALL_BIT
+	ands	r0, scno, r0
+	bne	fastcall_try
+#endif
+	cmp	scno, #__NR_prctl
+	bne	slow_path
+	ldr	r0, [sp, #S_OLD_R0]
+	tst	r0, #__OOB_SYSCALL_BIT
+	beq	slow_path
+fastcall_try:
+	tst	r10, #_TLF_OOB
+	beq	slow_path
+	mov	r0, sp				@ regs
+	bl	handle_oob_syscall
+	ldr	r10, [tsk, #TI_LOCAL_FLAGS]
+	tst	r0, r0
+	beq	slow_path
+	tst	r10, #_TLF_OOB
+	bne	fastcall_exit_check		@ check for MAYDAY
+	bl	sync_inband_irqs
+	b	ret_slow_syscall
+fastcall_exit_check:
+	ldr	r10, [tsk, #TI_FLAGS]
+	tst	r10, #_TIF_MAYDAY
+	beq	fast_ret_to_user
+	mov	r0, sp
+	bl	dovetail_call_mayday
+	b	fast_ret_to_user
+slow_path:
+	tst	r10, #_TLF_DOVETAIL
+	bne	pipeline_syscall
+#ifdef CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE
+	ldr	r0, =#__OOB_SYSCALL_BIT
+	ands	r0, scno, r0
+	bne	pipeline_syscall
+#endif	
+	cmp	scno, #__NR_prctl
+	bne	root_syscall
+	ldr	r0, [sp, #S_OLD_R0]
+	tst	r0, #__OOB_SYSCALL_BIT
+	beq	root_syscall
+pipeline_syscall:
+	mov	r0, sp				@ regs
+	bl	__pipeline_syscall
+	ldr	r10, [tsk, #TI_LOCAL_FLAGS]
+	tst	r10, #_TLF_OOB
+	bne	fast_ret_to_user
+	cmp	r0, #0
+	bgt	ret_slow_syscall
+root_syscall:
+	ldmia	sp, { r0 - r3 }
+#endif /* CONFIG_DOVETAIL */
+
 	ldr	r10, [tsk, #TI_FLAGS]		@ check for syscall tracing
 	stmdb	sp!, {r4, r5}			@ push fifth and sixth args
 
diff --git a/kernel/arch/arm/kernel/entry-header.S b/kernel/arch/arm/kernel/entry-header.S
index 40db0f9..da1251c 100644
--- a/kernel/arch/arm/kernel/entry-header.S
+++ b/kernel/arch/arm/kernel/entry-header.S
@@ -203,15 +203,21 @@
 	.macro	svc_exit, rpsr, irq = 0
 	.if	\irq != 0
 	@ IRQs already off
-#ifdef CONFIG_TRACE_IRQFLAGS
 	@ The parent context IRQs must have been enabled to get here in
 	@ the first place, so there's no point checking the PSR I bit.
+#ifdef CONFIG_IRQ_PIPELINE
+	mov	r0, sp
+	bl	kentry_exit_pipelined
+#elif defined(CONFIG_TRACE_IRQFLAGS)
 	bl	trace_hardirqs_on
 #endif
 	.else
 	@ IRQs off again before pulling preserved data off the stack
 	disable_irq_notrace
-#ifdef CONFIG_TRACE_IRQFLAGS
+#ifdef CONFIG_IRQ_PIPELINE
+	mov	r0, sp
+	bl	kentry_exit_pipelined
+#elif defined(CONFIG_TRACE_IRQFLAGS)
 	tst	\rpsr, #PSR_I_BIT
 	bleq	trace_hardirqs_on
 	tst	\rpsr, #PSR_I_BIT
@@ -402,6 +408,19 @@
 	.endm
 
 /*
+ * Branch to the exception epilogue, skipping the in-band work
+ * if running over the out-of-band interrupt stage.
+ */
+	.macro ret_to_user_pipelined, tmp
+#ifdef CONFIG_IRQ_PIPELINE
+	ldr	\tmp, [tsk, #TI_LOCAL_FLAGS]
+	tst	\tmp, #_TLF_OOB
+	bne	fast_ret_to_user
+#endif
+	b	ret_to_user
+	.endm
+
+/*
  * These are the registers used in the syscall handler, and allow us to
  * have in theory up to 7 arguments to a function - r0 to r6.
  *
diff --git a/kernel/arch/arm/kernel/irq.c b/kernel/arch/arm/kernel/irq.c
index 698b6f6..0c3b893 100644
--- a/kernel/arch/arm/kernel/irq.c
+++ b/kernel/arch/arm/kernel/irq.c
@@ -23,6 +23,7 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqchip.h>
+#include <linux/irq_pipeline.h>
 #include <linux/random.h>
 #include <linux/smp.h>
 #include <linux/init.h>
@@ -97,6 +98,14 @@
 	uniphier_cache_init();
 }
 
+#ifdef CONFIG_IRQ_PIPELINE
+asmlinkage int __exception_irq_entry
+handle_arch_irq_pipelined(struct pt_regs *regs)
+{
+	return handle_irq_pipelined(regs);
+}
+#endif
+
 #ifdef CONFIG_SPARSE_IRQ
 int __init arch_probe_nr_irqs(void)
 {
diff --git a/kernel/arch/arm/kernel/irq_pipeline.c b/kernel/arch/arm/kernel/irq_pipeline.c
new file mode 100644
index 0000000..aa12dce
--- /dev/null
+++ b/kernel/arch/arm/kernel/irq_pipeline.c
@@ -0,0 +1,20 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/irq.h>
+#include <linux/irq_pipeline.h>
+
+void arch_do_IRQ_pipelined(struct irq_desc *desc)
+{
+	struct pt_regs *regs = raw_cpu_ptr(&irq_pipeline.tick_regs);
+	unsigned int irq = irq_desc_get_irq(desc);
+
+	__handle_domain_irq(NULL, irq, false, regs);
+}
+
+void __init arch_irq_pipeline_init(void)
+{
+	/* no per-arch init. */
+}
diff --git a/kernel/arch/arm/kernel/patch.c b/kernel/arch/arm/kernel/patch.c
index e9e828b..35c7285 100644
--- a/kernel/arch/arm/kernel/patch.c
+++ b/kernel/arch/arm/kernel/patch.c
@@ -17,7 +17,7 @@
 };
 
 #ifdef CONFIG_MMU
-static DEFINE_RAW_SPINLOCK(patch_lock);
+static DEFINE_HARD_SPINLOCK(patch_lock);
 
 static void __kprobes *patch_map(void *addr, int fixmap, unsigned long *flags)
 {
diff --git a/kernel/arch/arm/kernel/process.c b/kernel/arch/arm/kernel/process.c
index 47a30ff..d8da49a 100644
--- a/kernel/arch/arm/kernel/process.c
+++ b/kernel/arch/arm/kernel/process.c
@@ -71,6 +71,7 @@
 		arm_pm_idle();
 	else
 		cpu_do_idle();
+	hard_cond_local_irq_enable();
 	raw_local_irq_enable();
 }
 
@@ -448,3 +449,28 @@
 	return ret;
 }
 #endif
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+/*
+ * When pipelining interrupts, we have to reconcile the hardware and
+ * the virtual states. Hard irqs are off on entry while the current
+ * stage has to be unstalled: fix this up by stalling the in-band
+ * stage on entry, unstalling on exit.
+ */
+asmlinkage void __sched arm_preempt_schedule_irq(void)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall());
+	stall_inband_nocheck();
+	preempt_schedule_irq();
+	unstall_inband_nocheck();
+}
+
+#else
+
+asmlinkage void __sched arm_preempt_schedule_irq(void)
+{
+	preempt_schedule_irq();
+}
+
+#endif
diff --git a/kernel/arch/arm/kernel/ptrace.c b/kernel/arch/arm/kernel/ptrace.c
index 2771e68..1074670 100644
--- a/kernel/arch/arm/kernel/ptrace.c
+++ b/kernel/arch/arm/kernel/ptrace.c
@@ -205,7 +205,9 @@
 
 static int break_trap(struct pt_regs *regs, unsigned int instr)
 {
+	mark_trap_entry(ARM_TRAP_BREAK, regs);
 	ptrace_break(regs);
+	mark_trap_exit(ARM_TRAP_BREAK, regs);
 	return 0;
 }
 
diff --git a/kernel/arch/arm/kernel/raw_printk.c b/kernel/arch/arm/kernel/raw_printk.c
new file mode 100644
index 0000000..9024b77
--- /dev/null
+++ b/kernel/arch/arm/kernel/raw_printk.c
@@ -0,0 +1,30 @@
+#include <linux/kernel.h>
+#include <linux/console.h>
+#include <linux/init.h>
+
+/*
+ * If both CONFIG_DEBUG_LL and CONFIG_RAW_PRINTK are set, create a
+ * console device sending the raw output to printascii().
+ */
+void printascii(const char *s);
+
+static void raw_console_write(struct console *co,
+			      const char *s, unsigned count)
+{
+	printascii(s);
+}
+
+static struct console raw_console = {
+	.name		= "rawcon",
+	.write_raw	= raw_console_write,
+	.flags		= CON_PRINTBUFFER | CON_ENABLED,
+	.index		= -1,
+};
+
+static int __init raw_console_init(void)
+{
+	register_console(&raw_console);
+
+	return 0;
+}
+console_initcall(raw_console_init);
diff --git a/kernel/arch/arm/kernel/signal.c b/kernel/arch/arm/kernel/signal.c
index a3a38d0..da78d2f 100644
--- a/kernel/arch/arm/kernel/signal.c
+++ b/kernel/arch/arm/kernel/signal.c
@@ -8,6 +8,7 @@
 #include <linux/random.h>
 #include <linux/signal.h>
 #include <linux/personality.h>
+#include <linux/irq_pipeline.h>
 #include <linux/uaccess.h>
 #include <linux/tracehook.h>
 #include <linux/uprobes.h>
@@ -639,16 +640,36 @@
 	return 0;
 }
 
+static inline void do_retuser(void)
+{
+	unsigned int thread_flags;
+
+	if (dovetailing()) {
+		thread_flags = current_thread_info()->flags;
+		if (thread_flags & _TIF_RETUSER)
+			inband_retuser_notify();
+	}
+}
+
 asmlinkage int
 do_work_pending(struct pt_regs *regs, unsigned int thread_flags, int syscall)
 {
+	WARN_ON_ONCE(irq_pipeline_debug() &&
+		(irqs_disabled() || running_oob()));
+
 	/*
 	 * The assembly code enters us with IRQs off, but it hasn't
 	 * informed the tracing code of that for efficiency reasons.
 	 * Update the trace code with the current status.
 	 */
-	trace_hardirqs_off();
+	if (!irqs_pipelined())
+		trace_hardirqs_off();
 	do {
+		if (irqs_pipelined()) {
+			local_irq_disable();
+			hard_cond_local_irq_enable();
+		}
+
 		if (likely(thread_flags & _TIF_NEED_RESCHED)) {
 			schedule();
 		} else {
@@ -658,6 +679,7 @@
 			if (thread_flags & (_TIF_SIGPENDING | _TIF_NOTIFY_SIGNAL)) {
 				int restart = do_signal(regs, syscall);
 				if (unlikely(restart)) {
+					do_retuser();
 					/*
 					 * Restart without handlers.
 					 * Deal with it without leaving
@@ -672,10 +694,16 @@
 				tracehook_notify_resume(regs);
 				rseq_handle_notify_resume(NULL, regs);
 			}
+			do_retuser();
 		}
-		local_irq_disable();
+		hard_local_irq_disable();
+
+		/* RETUSER might have switched oob */
+		if (!running_inband())
+			break;
+
 		thread_flags = current_thread_info()->flags;
-	} while (thread_flags & _TIF_WORK_MASK);
+	} while (inband_irq_pending() || (thread_flags & _TIF_WORK_MASK));
 	return 0;
 }
 
diff --git a/kernel/arch/arm/kernel/smp.c b/kernel/arch/arm/kernel/smp.c
index 123432b..bdb4f7e 100644
--- a/kernel/arch/arm/kernel/smp.c
+++ b/kernel/arch/arm/kernel/smp.c
@@ -84,7 +84,7 @@
 	MAX_IPI
 };
 
-static int ipi_irq_base __read_mostly;
+int ipi_irq_base __read_mostly;
 static int nr_ipi __read_mostly = NR_IPI;
 static struct irq_desc *ipi_desc[MAX_IPI] __read_mostly;
 
@@ -329,7 +329,7 @@
 
 	idle_task_exit();
 
-	local_irq_disable();
+	local_irq_disable_full();
 
 	/*
 	 * Flush the data out of the L1 cache for this CPU.  This must be
@@ -421,6 +421,13 @@
 	local_flush_tlb_all();
 
 	/*
+	 * irq_pipeline: debug_smp_processor_id() accesses percpu
+	 * data.
+	 */
+	if (irqs_pipelined())
+		set_my_cpu_offset(per_cpu_offset(raw_smp_processor_id()));
+
+	/*
 	 * All kernel threads share the same mm context; grab a
 	 * reference and switch to it.
 	 */
@@ -463,7 +470,7 @@
 
 	complete(&cpu_running);
 
-	local_irq_enable();
+	local_irq_enable_full();
 	local_fiq_enable();
 	local_abt_enable();
 
@@ -539,6 +546,8 @@
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
 
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu);
+
 void show_ipi_list(struct seq_file *p, int prec)
 {
 	unsigned int cpu, i;
@@ -553,7 +562,7 @@
 		seq_printf(p, "%*s%u: ", prec - 1, "IPI", i);
 
 		for_each_online_cpu(cpu)
-			seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
+			seq_printf(p, "%10u ", get_ipi_count(irq, cpu));
 
 		seq_printf(p, " %s\n", ipi_types[i]);
 	}
@@ -606,7 +615,7 @@
 	set_cpu_online(cpu, false);
 
 	local_fiq_disable();
-	local_irq_disable();
+	local_irq_disable_full();
 
 	while (1) {
 		cpu_relax();
@@ -695,12 +704,85 @@
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
+	/*
+	 * We don't support legacy IPI delivery when pipelining
+	 * interrupts.
+	 */
+	WARN_ON_ONCE(irqs_pipelined());
+
 	irq_enter();
 	do_handle_IPI(ipinr);
 	irq_exit();
 
 	set_irq_regs(old_regs);
 }
+
+static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	trace_ipi_raise(target, ipi_types[ipinr]);
+	__ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+static DEFINE_PER_CPU(unsigned long, ipi_messages);
+
+static DEFINE_PER_CPU(unsigned int [MAX_IPI], ipi_counts);
+
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+	unsigned long *pmsg;
+	unsigned int ipinr;
+
+	/*
+	 * Decode in-band IPIs (0..MAX_IPI - 1) multiplexed over
+	 * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own
+	 * individual handler.
+	 */
+	pmsg = raw_cpu_ptr(&ipi_messages);
+	while (*pmsg) {
+		ipinr = ffs(*pmsg) - 1;
+		clear_bit(ipinr, pmsg);
+		__this_cpu_inc(ipi_counts[ipinr]);
+		do_handle_IPI(ipinr);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	unsigned int cpu;
+
+	/* regular in-band IPI (multiplexed over SGI0). */
+	for_each_cpu(cpu, target)
+		set_bit(ipinr, &per_cpu(ipi_messages, cpu));
+
+	wmb();
+	__smp_cross_call(target, 0);
+}
+
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu)
+{
+	return per_cpu(ipi_counts[irq - ipi_irq_base], cpu);
+}
+
+void irq_send_oob_ipi(unsigned int irq,
+		const struct cpumask *cpumask)
+{
+	unsigned int sgi = irq - ipi_irq_base;
+
+	if (WARN_ON(irq_pipeline_debug() &&
+		    (sgi < OOB_IPI_OFFSET ||
+		     sgi >= OOB_IPI_OFFSET + OOB_NR_IPI)))
+		return;
+
+	/* Out-of-band IPI (SGI1-2). */
+	__smp_cross_call(cpumask, sgi);
+}
+EXPORT_SYMBOL_GPL(irq_send_oob_ipi);
+
+#else
 
 static irqreturn_t ipi_handler(int irq, void *data)
 {
@@ -710,9 +792,15 @@
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
-	trace_ipi_raise_rcuidle(target, ipi_types[ipinr]);
-	__ipi_send_mask(ipi_desc[ipinr], target);
+	__smp_cross_call(target, ipinr);
 }
+
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu)
+{
+	return kstat_irqs_cpu(irq, cpu);
+}
+
+#endif /* CONFIG_IRQ_PIPELINE */
 
 static void ipi_setup(int cpu)
 {
@@ -727,18 +815,25 @@
 
 void __init set_smp_ipi_range(int ipi_base, int n)
 {
-	int i;
+	int i, inband_nr_ipi;
 
 	WARN_ON(n < MAX_IPI);
 	nr_ipi = min(n, MAX_IPI);
+	/*
+	 * irq_pipeline: the in-band stage traps SGI0 only,
+	 * over which IPI messages are mutiplexed. Other SGIs
+	 * are available for exchanging out-of-band IPIs.
+	 */
+	inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi;
 
 	for (i = 0; i < nr_ipi; i++) {
-		int err;
+		if (i < inband_nr_ipi) {
+			int err;
 
-		err = request_percpu_irq(ipi_base + i, ipi_handler,
-					 "IPI", &irq_stat);
-		WARN_ON(err);
-
+			err = request_percpu_irq(ipi_base + i, ipi_handler,
+						"IPI", &irq_stat);
+			WARN_ON(err);
+		}
 		ipi_desc[i] = irq_to_desc(ipi_base + i);
 		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
 
diff --git a/kernel/arch/arm/kernel/smp_twd.c b/kernel/arch/arm/kernel/smp_twd.c
index 9a14f72..8377f1d 100644
--- a/kernel/arch/arm/kernel/smp_twd.c
+++ b/kernel/arch/arm/kernel/smp_twd.c
@@ -31,7 +31,7 @@
 
 static struct clock_event_device __percpu *twd_evt;
 static unsigned int twd_features =
-		CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+		CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE;
 static int twd_ppi;
 
 static int twd_shutdown(struct clock_event_device *clk)
@@ -182,7 +182,7 @@
 	struct clock_event_device *evt = dev_id;
 
 	if (twd_timer_ack()) {
-		evt->event_handler(evt);
+		clockevents_handle_event(evt);
 		return IRQ_HANDLED;
 	}
 
@@ -279,7 +279,8 @@
 		goto out_free;
 	}
 
-	err = request_percpu_irq(twd_ppi, twd_handler, "twd", twd_evt);
+	err = __request_percpu_irq(twd_ppi, twd_handler,
+				   IRQF_TIMER, "twd", twd_evt);
 	if (err) {
 		pr_err("twd: can't register interrupt %d (%d)\n", twd_ppi, err);
 		goto out_free;
diff --git a/kernel/arch/arm/kernel/traps.c b/kernel/arch/arm/kernel/traps.c
index a531afa..e33ea9c 100644
--- a/kernel/arch/arm/kernel/traps.c
+++ b/kernel/arch/arm/kernel/traps.c
@@ -406,7 +406,7 @@
 #endif
 
 static LIST_HEAD(undef_hook);
-static DEFINE_RAW_SPINLOCK(undef_lock);
+static DEFINE_HARD_SPINLOCK(undef_lock);
 
 void register_undef_hook(struct undef_hook *hook)
 {
diff --git a/kernel/arch/arm/kernel/vdso.c b/kernel/arch/arm/kernel/vdso.c
index fddd08a..557fb35 100644
--- a/kernel/arch/arm/kernel/vdso.c
+++ b/kernel/arch/arm/kernel/vdso.c
@@ -32,7 +32,10 @@
 
 extern char vdso_start[], vdso_end[];
 
-/* Total number of pages needed for the data and text portions of the VDSO. */
+/*
+ * Total number of pages needed for the data, private and text
+ * portions of the VDSO.
+ */
 unsigned int vdso_total_pages __ro_after_init;
 
 /*
@@ -53,8 +56,8 @@
 	unsigned long new_size = new_vma->vm_end - new_vma->vm_start;
 	unsigned long vdso_size;
 
-	/* without VVAR page */
-	vdso_size = (vdso_total_pages - 1) << PAGE_SHIFT;
+	/* without VVAR and VPRIV pages */
+	vdso_size = (vdso_total_pages - 2) << PAGE_SHIFT;
 
 	if (vdso_size != new_size)
 		return -EINVAL;
@@ -180,8 +183,10 @@
 	/* If the virtual counter is absent or non-functional we don't
 	 * want programs to incur the slight additional overhead of
 	 * dispatching through the VDSO only to fall back to syscalls.
+	 * However, if clocksources supporting generic MMIO access can
+	 * be reached via the vDSO, keep this fast path enabled.
 	 */
-	if (!cntvct_ok) {
+	if (!cntvct_ok && !IS_ENABLED(CONFIG_GENERIC_CLOCKSOURCE_VDSO)) {
 		vdso_nullpatch_one(&einfo, "__vdso_gettimeofday");
 		vdso_nullpatch_one(&einfo, "__vdso_clock_gettime");
 		vdso_nullpatch_one(&einfo, "__vdso_clock_gettime64");
@@ -219,16 +224,26 @@
 
 	vdso_text_mapping.pages = vdso_text_pagelist;
 
-	vdso_total_pages = 1; /* for the data/vvar page */
+	vdso_total_pages = 2; /* for the data/vvar and vpriv pages */
 	vdso_total_pages += text_pages;
 
 	cntvct_ok = cntvct_functional();
 
 	patch_vdso(vdso_start);
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+	vdso_data->cs_type_seq = CLOCKSOURCE_VDSO_NONE << 16 | 1;
+#endif
 
 	return 0;
 }
 arch_initcall(vdso_init);
+
+static int install_vpriv(struct mm_struct *mm, unsigned long addr)
+{
+	return mmap_region(NULL, addr, PAGE_SIZE,
+			  VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
+			   0, NULL) != addr ? -EINVAL : 0;
+}
 
 static int install_vvar(struct mm_struct *mm, unsigned long addr)
 {
@@ -237,8 +252,13 @@
 	vma = _install_special_mapping(mm, addr, PAGE_SIZE,
 				       VM_READ | VM_MAYREAD,
 				       &vdso_data_mapping);
+	if (IS_ERR(vma))
+		return PTR_ERR(vma);
 
-	return PTR_ERR_OR_ZERO(vma);
+	if (cache_is_vivt())
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	return vma->vm_start != addr ? -EINVAL : 0;
 }
 
 /* assumes mmap_lock is write-locked */
@@ -252,18 +272,29 @@
 	if (vdso_text_pagelist == NULL)
 		return;
 
-	if (install_vvar(mm, addr))
+	if (install_vpriv(mm, addr)) {
+		pr_err("cannot map VPRIV at expected address!\n");
 		return;
+	}
 
-	/* Account for vvar page. */
+	/* Account for the private storage. */
 	addr += PAGE_SIZE;
-	len = (vdso_total_pages - 1) << PAGE_SHIFT;
+	if (install_vvar(mm, addr)) {
+		WARN(1, "cannot map VVAR at expected address!\n");
+		return;
+	}
+
+	/* Account for vvar and vpriv pages. */
+	addr += PAGE_SIZE;
+	len = (vdso_total_pages - 2) << PAGE_SHIFT;
 
 	vma = _install_special_mapping(mm, addr, len,
 		VM_READ | VM_EXEC | VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC,
 		&vdso_text_mapping);
 
-	if (!IS_ERR(vma))
+	if (IS_ERR(vma) || vma->vm_start != addr)
+		WARN(1, "cannot map VDSO at expected address!\n");
+	else
 		mm->context.vdso = addr;
 }
 
diff --git a/kernel/arch/arm/mach-imx/gpc.c b/kernel/arch/arm/mach-imx/gpc.c
index ebc4339..189642e 100644
--- a/kernel/arch/arm/mach-imx/gpc.c
+++ b/kernel/arch/arm/mach-imx/gpc.c
@@ -62,28 +62,38 @@
 void imx_gpc_pre_suspend(bool arm_power_off)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+	unsigned long flags;
 	int i;
 
 	/* Tell GPC to power off ARM core when suspend */
 	if (arm_power_off)
 		imx_gpc_set_arm_power_in_lpm(arm_power_off);
 
+	flags = hard_cond_local_irq_save();
+
 	for (i = 0; i < IMR_NUM; i++) {
 		gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
 		writel_relaxed(~gpc_wake_irqs[i], reg_imr1 + i * 4);
 	}
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void imx_gpc_post_resume(void)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+	unsigned long flags;
 	int i;
 
 	/* Keep ARM core powered on for other low-power modes */
 	imx_gpc_set_arm_power_in_lpm(false);
 
+	flags = hard_cond_local_irq_save();
+
 	for (i = 0; i < IMR_NUM; i++)
 		writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 static int imx_gpc_irq_set_wake(struct irq_data *d, unsigned int on)
@@ -105,21 +115,31 @@
 void imx_gpc_mask_all(void)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+	unsigned long flags;
 	int i;
+
+	flags = hard_cond_local_irq_save();
 
 	for (i = 0; i < IMR_NUM; i++) {
 		gpc_saved_imrs[i] = readl_relaxed(reg_imr1 + i * 4);
 		writel_relaxed(~0, reg_imr1 + i * 4);
 	}
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void imx_gpc_restore_all(void)
 {
 	void __iomem *reg_imr1 = gpc_base + GPC_IMR1;
+	unsigned long flags;
 	int i;
+
+	flags = hard_cond_local_irq_save();
 
 	for (i = 0; i < IMR_NUM; i++)
 		writel_relaxed(gpc_saved_imrs[i], reg_imr1 + i * 4);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void imx_gpc_hwirq_unmask(unsigned int hwirq)
@@ -167,6 +187,7 @@
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
 #endif
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int imx_gpc_domain_translate(struct irq_domain *d,
diff --git a/kernel/arch/arm/mm/alignment.c b/kernel/arch/arm/mm/alignment.c
index bcefe3f..6018226 100644
--- a/kernel/arch/arm/mm/alignment.c
+++ b/kernel/arch/arm/mm/alignment.c
@@ -19,6 +19,7 @@
 #include <linux/init.h>
 #include <linux/sched/signal.h>
 #include <linux/uaccess.h>
+#include <linux/dovetail.h>
 
 #include <asm/cp15.h>
 #include <asm/system_info.h>
@@ -807,10 +808,12 @@
 	u16 tinstr = 0;
 	int isize = 4;
 	int thumb2_32b = 0;
-	int fault;
+	int fault, ret = 0;
 
 	if (interrupts_enabled(regs))
-		local_irq_enable();
+		hard_local_irq_enable();
+
+	mark_trap_entry(ARM_TRAP_ALIGNMENT, regs);
 
 	instrptr = instruction_pointer(regs);
 
@@ -938,7 +941,7 @@
 	if (thumb_mode(regs))
 		regs->ARM_cpsr = it_advance(regs->ARM_cpsr);
 
-	return 0;
+	goto out;
 
  bad_or_fault:
 	if (type == TYPE_ERROR)
@@ -947,7 +950,7 @@
 	 * We got a fault - fix it up, or die.
 	 */
 	do_bad_area(addr, fsr, regs);
-	return 0;
+	goto out;
 
  swp:
 	pr_err("Alignment trap: not handling swp instruction\n");
@@ -961,7 +964,8 @@
 		isize << 1,
 		isize == 2 ? tinstr : instr, instrptr);
 	ai_skipped += 1;
-	return 1;
+	ret = 1;
+	goto out;
 
  user:
 	ai_user += 1;
@@ -992,12 +996,15 @@
 		 * entry-common.S) and disable the alignment trap only if
 		 * there is no work pending for this thread.
 		 */
-		raw_local_irq_disable();
+		hard_local_irq_disable();
 		if (!(current_thread_info()->flags & _TIF_WORK_MASK))
 			set_cr(cr_no_alignment);
 	}
 
-	return 0;
+out:
+	mark_trap_exit(ARM_TRAP_ALIGNMENT, regs);
+
+	return ret;
 }
 
 static int __init noalign_setup(char *__unused)
diff --git a/kernel/arch/arm/mm/cache-l2x0.c b/kernel/arch/arm/mm/cache-l2x0.c
index 43d91bf..b2af3e0 100644
--- a/kernel/arch/arm/mm/cache-l2x0.c
+++ b/kernel/arch/arm/mm/cache-l2x0.c
@@ -38,7 +38,7 @@
 
 static void __iomem *l2x0_base;
 static const struct l2c_init_data *l2x0_data;
-static DEFINE_RAW_SPINLOCK(l2x0_lock);
+static DEFINE_HARD_SPINLOCK(l2x0_lock);
 static u32 l2x0_way_mask;	/* Bitmask of active ways */
 static u32 l2x0_size;
 static unsigned long sync_reg_offset = L2X0_CACHE_SYNC;
@@ -47,6 +47,19 @@
 
 static bool l2x0_bresp_disable;
 static bool l2x0_flz_disable;
+
+#ifdef CONFIG_IRQ_PIPELINE
+#define CACHE_RANGE_ATOMIC_MAX	512UL
+static int l2x0_wa = -1;
+static int __init l2x0_setup_wa(char *str)
+{
+	l2x0_wa = !!simple_strtol(str, NULL, 0);
+	return 0;
+}
+early_param("l2x0_write_allocate", l2x0_setup_wa);
+#else
+#define CACHE_RANGE_ATOMIC_MAX	4096UL
+#endif
 
 /*
  * Common code for all cache controllers.
@@ -120,11 +133,11 @@
 
 	l2x0_data->unlock(base, num_lock);
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	__l2c_op_way(base + L2X0_INV_WAY);
 	writel_relaxed(0, base + sync_reg_offset);
 	l2c_wait_mask(base + sync_reg_offset, 1);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	l2c_write_sec(L2X0_CTRL_EN, base, L2X0_CTRL);
 }
@@ -225,7 +238,7 @@
 {
 	void __iomem *base = l2x0_base;
 
-	BUG_ON(!irqs_disabled());
+	BUG_ON(!hard_irqs_disabled());
 
 	__l2c_op_way(base + L2X0_CLEAN_INV_WAY);
 	__l2c210_cache_sync(base);
@@ -284,10 +297,10 @@
 static unsigned long l2c220_op_pa_range(void __iomem *reg, unsigned long start,
 	unsigned long end, unsigned long flags)
 {
-	raw_spinlock_t *lock = &l2x0_lock;
+	typeof(l2x0_lock) *lock = &l2x0_lock;
 
 	while (start < end) {
-		unsigned long blk_end = start + min(end - start, 4096UL);
+		unsigned long blk_end = start + min(end - start, CACHE_RANGE_ATOMIC_MAX);
 
 		while (start < blk_end) {
 			l2c_wait_mask(reg, 1);
@@ -498,13 +511,13 @@
 
 static void l2c310_flush_range_erratum(unsigned long start, unsigned long end)
 {
-	raw_spinlock_t *lock = &l2x0_lock;
+	typeof(l2x0_lock) *lock = &l2x0_lock;
 	unsigned long flags;
 	void __iomem *base = l2x0_base;
 
 	raw_spin_lock_irqsave(lock, flags);
 	while (start < end) {
-		unsigned long blk_end = start + min(end - start, 4096UL);
+		unsigned long blk_end = start + min(end - start, CACHE_RANGE_ATOMIC_MAX);
 
 		l2c_set_debug(base, 0x03);
 		while (start < blk_end) {
@@ -800,6 +813,24 @@
 	if (aux_val & aux_mask)
 		pr_alert("L2C: platform provided aux values permit register corruption.\n");
 
+#ifdef CONFIG_IRQ_PIPELINE
+	if (!l2x0_wa) {
+		/*
+		 * Disable WA by setting bit 23 in the auxiliary
+		 * control register.
+		 */
+		aux_mask &= ~L220_AUX_CTRL_FWA_MASK;
+		aux_val &= ~L220_AUX_CTRL_FWA_MASK;
+		aux_val |= 1 << L220_AUX_CTRL_FWA_SHIFT;
+		pr_warn("%s: irq_pipeline: write-allocate disabled via command line\n",
+			data->type);
+	} else if ((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L220 ||
+		   ((cache_id & L2X0_CACHE_ID_PART_MASK) == L2X0_CACHE_ID_PART_L310 &&
+		    (cache_id & L2X0_CACHE_ID_RTL_MASK) < L310_CACHE_ID_RTL_R3P2))
+		pr_alert("%s: irq_pipeline: write-allocate enabled, may induce high latency\n",
+			 data->type);
+#endif
+
 	old_aux = aux = readl_relaxed(l2x0_base + L2X0_AUX_CTRL);
 	aux &= aux_mask;
 	aux |= aux_val;
diff --git a/kernel/arch/arm/mm/context.c b/kernel/arch/arm/mm/context.c
index b7525b4..0cf14bd 100644
--- a/kernel/arch/arm/mm/context.c
+++ b/kernel/arch/arm/mm/context.c
@@ -39,7 +39,7 @@
 #define ASID_FIRST_VERSION	(1ULL << ASID_BITS)
 #define NUM_USER_ASIDS		ASID_FIRST_VERSION
 
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+static DEFINE_HARD_SPINLOCK(cpu_asid_lock);
 static atomic64_t asid_generation = ATOMIC64_INIT(ASID_FIRST_VERSION);
 static DECLARE_BITMAP(asid_map, NUM_USER_ASIDS);
 
@@ -237,8 +237,11 @@
 void check_and_switch_context(struct mm_struct *mm, struct task_struct *tsk)
 {
 	unsigned long flags;
-	unsigned int cpu = smp_processor_id();
+	unsigned int cpu = raw_smp_processor_id();
+	bool need_flush;
 	u64 asid;
+
+	WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled());
 
 	if (unlikely(mm->context.vmalloc_seq != init_mm.context.vmalloc_seq))
 		__check_vmalloc_seq(mm);
@@ -263,15 +266,16 @@
 		atomic64_set(&mm->context.id, asid);
 	}
 
-	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending)) {
-		local_flush_bp_all();
-		local_flush_tlb_all();
-	}
-
+	need_flush = cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending);
 	atomic64_set(&per_cpu(active_asids, cpu), asid);
 	cpumask_set_cpu(cpu, mm_cpumask(mm));
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
+	if (need_flush) {
+		local_flush_bp_all();
+		local_flush_tlb_all();
+	}
+
 switch_mm_fastpath:
 	cpu_switch_mm(mm->pgd, mm);
 }
diff --git a/kernel/arch/arm/mm/fault.c b/kernel/arch/arm/mm/fault.c
index efa4020..e23d0ff 100644
--- a/kernel/arch/arm/mm/fault.c
+++ b/kernel/arch/arm/mm/fault.c
@@ -9,6 +9,7 @@
 #include <linux/signal.h>
 #include <linux/mm.h>
 #include <linux/hardirq.h>
+#include <linux/irq_pipeline.h>
 #include <linux/init.h>
 #include <linux/kprobes.h>
 #include <linux/uaccess.h>
@@ -21,10 +22,68 @@
 #include <asm/system_misc.h>
 #include <asm/system_info.h>
 #include <asm/tlbflush.h>
+#include <asm/dovetail.h>
+#define CREATE_TRACE_POINTS
+#include <asm/trace/exceptions.h>
 
 #include "fault.h"
 
 #ifdef CONFIG_MMU
+
+#ifdef CONFIG_IRQ_PIPELINE
+/*
+ * We need to synchronize the virtual interrupt state with the hard
+ * interrupt state we received on entry, then turn hardirqs back on to
+ * allow code which does not require strict serialization to be
+ * preempted by an out-of-band activity.
+ */
+static inline
+unsigned long fault_entry(int exception, struct pt_regs *regs)
+{
+	unsigned long flags;
+
+	trace_ARM_trap_entry(exception, regs);
+
+	flags = hard_local_save_flags();
+
+	/*
+	 * The companion core must demote the current context to
+	 * in-band stage if running oob on entry.
+	 */
+	mark_trap_entry(exception, regs);
+
+	if (raw_irqs_disabled_flags(flags)) {
+		stall_inband();
+		trace_hardirqs_off();
+	}
+
+	hard_local_irq_enable();
+
+	return flags;
+}
+
+static inline
+void fault_exit(int exception, struct pt_regs *regs,
+		unsigned long flags)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled());
+
+	/*
+	 * We expect kentry_exit_pipelined() to clear the stall bit if
+	 * kentry_enter_pipelined() observed it that way.
+	 */
+	mark_trap_exit(exception, regs);
+	trace_ARM_trap_exit(exception, regs);
+	hard_local_irq_restore(flags);
+}
+
+#else	/* !CONFIG_IRQ_PIPELINE */
+
+#define fault_entry(__exception, __regs)  ({ 0; })
+#define fault_exit(__exception, __regs, __flags)  \
+	do { (void)(__flags); } while (0)
+
+#endif	/* !CONFIG_IRQ_PIPELINE */
 
 /*
  * This is useful to dump out the page tables associated with
@@ -96,6 +155,15 @@
 	pr_cont("\n");
 }
 #else					/* CONFIG_MMU */
+unsigned long fault_entry(int exception, struct pt_regs *regs)
+{
+	return 0;
+}
+
+static inline void fault_exit(int exception, struct pt_regs *regs,
+			unsigned long combo)
+{ }
+
 void show_pte(const char *lvl, struct mm_struct *mm, unsigned long addr)
 { }
 #endif					/* CONFIG_MMU */
@@ -116,6 +184,7 @@
 	/*
 	 * No handler, we'll have to terminate things with extreme prejudice.
 	 */
+	irq_pipeline_oops();
 	bust_spinlocks(1);
 	pr_alert("8<--- cut here ---\n");
 	pr_alert("Unable to handle kernel %s at virtual address %08lx\n",
@@ -168,14 +237,22 @@
 {
 	struct task_struct *tsk = current;
 	struct mm_struct *mm = tsk->active_mm;
+	unsigned long irqflags;
 
 	/*
 	 * If we are in kernel mode at this point, we
 	 * have no context to handle this fault with.
 	 */
-	if (user_mode(regs))
+	  if (user_mode(regs)) {
+		irqflags = fault_entry(ARM_TRAP_ACCESS, regs);
 		__do_user_fault(addr, fsr, SIGSEGV, SEGV_MAPERR, regs);
-	else
+		fault_exit(ARM_TRAP_ACCESS, regs, irqflags);
+	  } else
+		/*
+		 * irq_pipeline: kernel faults are either quickly
+		 * recoverable via fixup, or lethal. In both cases, we
+		 * can skip the interrupt state synchronization.
+		 */
 		__do_kernel_fault(mm, addr, fsr, regs);
 }
 
@@ -244,9 +321,12 @@
 	int sig, code;
 	vm_fault_t fault;
 	unsigned int flags = FAULT_FLAG_DEFAULT;
+	unsigned long irqflags;
+
+	irqflags = fault_entry(ARM_TRAP_ACCESS, regs);
 
 	if (kprobe_page_fault(regs, fsr))
-		return 0;
+		goto out;
 
 	tsk = current;
 	mm  = tsk->mm;
@@ -302,7 +382,7 @@
 	if (fault_signal_pending(fault, regs)) {
 		if (!user_mode(regs))
 			goto no_context;
-		return 0;
+		goto out;
 	}
 
 	if (!(fault & VM_FAULT_ERROR) && flags & FAULT_FLAG_ALLOW_RETRY) {
@@ -318,7 +398,7 @@
 	 * Handle the "normal" case first - VM_FAULT_MAJOR
 	 */
 	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP | VM_FAULT_BADACCESS))))
-		return 0;
+		goto out;
 
 	/*
 	 * If we are in kernel mode at this point, we
@@ -334,7 +414,7 @@
 		 * got oom-killed)
 		 */
 		pagefault_out_of_memory();
-		return 0;
+		goto out;
 	}
 
 	if (fault & VM_FAULT_SIGBUS) {
@@ -355,10 +435,13 @@
 	}
 
 	__do_user_fault(addr, fsr, sig, code, regs);
-	return 0;
+	goto out;
 
 no_context:
 	__do_kernel_fault(mm, addr, fsr, regs);
+out:
+	fault_exit(ARM_TRAP_ACCESS, regs, irqflags);
+
 	return 0;
 }
 #else					/* CONFIG_MMU */
@@ -396,6 +479,8 @@
 	p4d_t *p4d, *p4d_k;
 	pud_t *pud, *pud_k;
 	pmd_t *pmd, *pmd_k;
+
+	WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
 
 	if (addr < TASK_SIZE)
 		return do_page_fault(addr, fsr, regs);
@@ -470,7 +555,11 @@
 static int
 do_sect_fault(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
+	unsigned long irqflags;
+
+	irqflags = fault_entry(ARM_TRAP_SECTION, regs);
 	do_bad_area(addr, fsr, regs);
+	fault_exit(ARM_TRAP_SECTION, regs, irqflags);
 	return 0;
 }
 #endif /* CONFIG_ARM_LPAE */
@@ -518,10 +607,12 @@
 do_DataAbort(unsigned long addr, unsigned int fsr, struct pt_regs *regs)
 {
 	const struct fsr_info *inf = fsr_info + fsr_fs(fsr);
+	unsigned long irqflags;
 
 	if (!inf->fn(addr, fsr & ~FSR_LNX_PF, regs))
 		return;
 
+	irqflags = fault_entry(ARM_TRAP_DABT, regs);
 	pr_alert("8<--- cut here ---\n");
 	pr_alert("Unhandled fault: %s (0x%03x) at 0x%08lx\n",
 		inf->name, fsr, addr);
@@ -529,6 +620,7 @@
 
 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
 		       fsr, 0);
+	fault_exit(ARM_TRAP_DABT, regs, irqflags);
 }
 
 void __init
@@ -548,15 +640,18 @@
 do_PrefetchAbort(unsigned long addr, unsigned int ifsr, struct pt_regs *regs)
 {
 	const struct fsr_info *inf = ifsr_info + fsr_fs(ifsr);
+	unsigned long irqflags;
 
 	if (!inf->fn(addr, ifsr | FSR_LNX_PF, regs))
 		return;
 
+	irqflags = fault_entry(ARM_TRAP_PABT, regs);
 	pr_alert("Unhandled prefetch abort: %s (0x%03x) at 0x%08lx\n",
 		inf->name, ifsr, addr);
 
 	arm_notify_die("", regs, inf->sig, inf->code, (void __user *)addr,
 		       ifsr, 0);
+	fault_exit(ARM_TRAP_PABT, regs, irqflags);
 }
 
 /*
diff --git a/kernel/arch/arm/vdso/datapage.S b/kernel/arch/arm/vdso/datapage.S
index 9cd73b7..9beb76d 100644
--- a/kernel/arch/arm/vdso/datapage.S
+++ b/kernel/arch/arm/vdso/datapage.S
@@ -5,6 +5,8 @@
 	.align 2
 .L_vdso_data_ptr:
 	.long	_start - . - VDSO_DATA_SIZE
+.L_vdso_priv_ptr:
+	.long	_start - . - VDSO_DATA_SIZE - VDSO_PRIV_SIZE
 
 ENTRY(__get_datapage)
 	.fnstart
@@ -14,3 +16,12 @@
 	bx	lr
 	.fnend
 ENDPROC(__get_datapage)
+
+ENTRY(__get_privpage)
+	.fnstart
+	adr	r0, .L_vdso_priv_ptr
+	ldr	r1, [r0]
+	add	r0, r0, r1
+	bx	lr
+	.fnend
+ENDPROC(__get_privpage)
diff --git a/kernel/arch/arm/vfp/entry.S b/kernel/arch/arm/vfp/entry.S
index 27b0a1f..2e6680c 100644
--- a/kernel/arch/arm/vfp/entry.S
+++ b/kernel/arch/arm/vfp/entry.S
@@ -23,6 +23,7 @@
 @
 ENTRY(do_vfp)
 	inc_preempt_count r10, r4
+	disable_irq_if_pipelined
  	ldr	r4, .LCvfp
 	ldr	r11, [r10, #TI_CPU]	@ CPU number
 	add	r10, r10, #TI_VFPSTATE	@ r10 = workspace
@@ -30,6 +31,7 @@
 ENDPROC(do_vfp)
 
 ENTRY(vfp_null_entry)
+	enable_irq_if_pipelined
 	dec_preempt_count_ti r10, r4
 	ret	lr
 ENDPROC(vfp_null_entry)
diff --git a/kernel/arch/arm/vfp/vfphw.S b/kernel/arch/arm/vfp/vfphw.S
index d5837bf..d512f9f 100644
--- a/kernel/arch/arm/vfp/vfphw.S
+++ b/kernel/arch/arm/vfp/vfphw.S
@@ -170,6 +170,7 @@
 					@ out before setting an FPEXC that
 					@ stops us reading stuff
 	VFPFMXR	FPEXC, r1		@ Restore FPEXC last
+	enable_irq_if_pipelined
 	sub	r2, r2, #4		@ Retry current instruction - if Thumb
 	str	r2, [sp, #S_PC]		@ mode it's two 16-bit instructions,
 					@ else it's one 32-bit instruction, so
@@ -199,6 +200,7 @@
 	@ Fall into hand on to next handler - appropriate coproc instr
 	@ not recognised by VFP
 
+	enable_irq_if_pipelined
 	DBGSTR	"not VFP"
 	dec_preempt_count_ti r10, r4
 	ret	lr
diff --git a/kernel/arch/arm/vfp/vfpmodule.c b/kernel/arch/arm/vfp/vfpmodule.c
index 2cb355c..de099bc 100644
--- a/kernel/arch/arm/vfp/vfpmodule.c
+++ b/kernel/arch/arm/vfp/vfpmodule.c
@@ -14,10 +14,12 @@
 #include <linux/signal.h>
 #include <linux/sched/signal.h>
 #include <linux/smp.h>
+#include <linux/dovetail.h>
 #include <linux/init.h>
 #include <linux/uaccess.h>
 #include <linux/user.h>
 #include <linux/export.h>
+#include <linux/smp.h>
 
 #include <asm/cp15.h>
 #include <asm/cputype.h>
@@ -90,6 +92,7 @@
 static void vfp_thread_flush(struct thread_info *thread)
 {
 	union vfp_state *vfp = &thread->vfpstate;
+	unsigned long flags;
 	unsigned int cpu;
 
 	/*
@@ -100,11 +103,11 @@
 	 * Do this first to ensure that preemption won't overwrite our
 	 * state saving should access to the VFP be enabled at this point.
 	 */
-	cpu = get_cpu();
+	cpu = hard_get_cpu(flags);
 	if (vfp_current_hw_state[cpu] == vfp)
 		vfp_current_hw_state[cpu] = NULL;
 	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
-	put_cpu();
+	hard_put_cpu(flags);
 
 	memset(vfp, 0, sizeof(union vfp_state));
 
@@ -119,11 +122,12 @@
 {
 	/* release case: Per-thread VFP cleanup. */
 	union vfp_state *vfp = &thread->vfpstate;
-	unsigned int cpu = get_cpu();
+	unsigned long flags;
+	unsigned int cpu = hard_get_cpu(flags);
 
 	if (vfp_current_hw_state[cpu] == vfp)
 		vfp_current_hw_state[cpu] = NULL;
-	put_cpu();
+	hard_put_cpu(flags);
 }
 
 static void vfp_thread_copy(struct thread_info *thread)
@@ -159,6 +163,7 @@
 static int vfp_notifier(struct notifier_block *self, unsigned long cmd, void *v)
 {
 	struct thread_info *thread = v;
+	unsigned long flags;
 	u32 fpexc;
 #ifdef CONFIG_SMP
 	unsigned int cpu;
@@ -166,6 +171,7 @@
 
 	switch (cmd) {
 	case THREAD_NOTIFY_SWITCH:
+		flags = hard_cond_local_irq_save();
 		fpexc = fmrx(FPEXC);
 
 #ifdef CONFIG_SMP
@@ -185,6 +191,7 @@
 		 * old state.
 		 */
 		fmxr(FPEXC, fpexc & ~FPEXC_EN);
+		hard_cond_local_irq_restore(flags);
 		break;
 
 	case THREAD_NOTIFY_FLUSH:
@@ -248,7 +255,10 @@
 
 	if (exceptions == VFP_EXCEPTION_ERROR) {
 		vfp_panic("unhandled bounce", inst);
-		vfp_raise_sigfpe(FPE_FLTINV, regs);
+		if (mark_cond_trap_entry(ARM_TRAP_VFP, regs)) {
+			vfp_raise_sigfpe(FPE_FLTINV, regs);
+			mark_trap_exit(ARM_TRAP_VFP, regs);
+		}
 		return;
 	}
 
@@ -322,7 +332,7 @@
  */
 void VFP_bounce(u32 trigger, u32 fpexc, struct pt_regs *regs)
 {
-	u32 fpscr, orig_fpscr, fpsid, exceptions;
+	u32 fpscr, orig_fpscr, fpsid, exceptions, next_trigger = 0;
 
 	pr_debug("VFP: bounce: trigger %08x fpexc %08x\n", trigger, fpexc);
 
@@ -352,6 +362,7 @@
 		/*
 		 * Synchronous exception, emulate the trigger instruction
 		 */
+		hard_cond_local_irq_enable();
 		goto emulate;
 	}
 
@@ -364,7 +375,18 @@
 		trigger = fmrx(FPINST);
 		regs->ARM_pc -= 4;
 #endif
-	} else if (!(fpexc & FPEXC_DEX)) {
+		if (fpexc & FPEXC_FP2V) {
+			/*
+			 * The barrier() here prevents fpinst2 being read
+			 * before the condition above.
+			 */
+			barrier();
+			next_trigger = fmrx(FPINST2);
+		}
+	}
+	hard_cond_local_irq_enable();
+
+	if (!(fpexc & (FPEXC_EX | FPEXC_DEX))) {
 		/*
 		 * Illegal combination of bits. It can be caused by an
 		 * unallocated VFP instruction but with FPSCR.IXE set and not
@@ -404,18 +426,14 @@
 	if ((fpexc & (FPEXC_EX | FPEXC_FP2V)) != (FPEXC_EX | FPEXC_FP2V))
 		goto exit;
 
-	/*
-	 * The barrier() here prevents fpinst2 being read
-	 * before the condition above.
-	 */
-	barrier();
-	trigger = fmrx(FPINST2);
+	trigger = next_trigger;
 
  emulate:
 	exceptions = vfp_emulate_instruction(trigger, orig_fpscr, regs);
 	if (exceptions)
 		vfp_raise_exceptions(exceptions, trigger, orig_fpscr, regs);
  exit:
+	hard_cond_local_irq_enable();
 	preempt_enable();
 }
 
@@ -515,7 +533,8 @@
  */
 void vfp_sync_hwstate(struct thread_info *thread)
 {
-	unsigned int cpu = get_cpu();
+	unsigned long flags;
+	unsigned int cpu = hard_get_cpu(flags);
 
 	if (vfp_state_in_hw(cpu, thread)) {
 		u32 fpexc = fmrx(FPEXC);
@@ -528,17 +547,18 @@
 		fmxr(FPEXC, fpexc);
 	}
 
-	put_cpu();
+	hard_put_cpu(flags);
 }
 
 /* Ensure that the thread reloads the hardware VFP state on the next use. */
 void vfp_flush_hwstate(struct thread_info *thread)
 {
-	unsigned int cpu = get_cpu();
+	unsigned long flags;
+	unsigned int cpu = hard_get_cpu(flags);
 
 	vfp_force_reload(cpu, thread);
 
-	put_cpu();
+	hard_put_cpu(flags);
 }
 
 /*
diff --git a/kernel/arch/arm64/Kconfig b/kernel/arch/arm64/Kconfig
index 88ac9f0..d453ece 100644
--- a/kernel/arch/arm64/Kconfig
+++ b/kernel/arch/arm64/Kconfig
@@ -176,6 +176,8 @@
 	select HAVE_FUNCTION_GRAPH_TRACER
 	select HAVE_GCC_PLUGINS
 	select HAVE_HW_BREAKPOINT if PERF_EVENTS
+	select HAVE_IRQ_PIPELINE
+	select HAVE_DOVETAIL
 	select HAVE_IRQ_TIME_ACCOUNTING
 	select HAVE_NMI
 	select HAVE_PATA_PLATFORM
@@ -1178,6 +1180,8 @@
 config CC_HAVE_SHADOW_CALL_STACK
 	def_bool $(cc-option, -fsanitize=shadow-call-stack -ffixed-x18)
 
+source "kernel/Kconfig.dovetail"
+
 config PARAVIRT
 	bool "Enable paravirtualization code"
 	help
diff --git a/kernel/arch/arm64/Makefile b/kernel/arch/arm64/Makefile
index b3a57c5..c77b376 100644
--- a/kernel/arch/arm64/Makefile
+++ b/kernel/arch/arm64/Makefile
@@ -219,3 +219,6 @@
 
 CLEAN_DIRS += out
 CLEAN_FILES += boot.img kernel.img resource.img zboot.img
+
+KBUILD_CFLAGS += -I$(srctree)/arch/$(SRCARCH)/xenomai/include -I$(srctree)/arch/$(SRCARCH)/xenomai/dovetail/include -I$(srctree)/include/xenomai
+core-$(CONFIG_XENOMAI) += arch/arm64/xenomai/dovetail/
diff --git a/kernel/arch/arm64/boot/dts/broadcom/Makefile b/kernel/arch/arm64/boot/dts/broadcom/Makefile
index cb7de8d..7f86784 100644
--- a/kernel/arch/arm64/boot/dts/broadcom/Makefile
+++ b/kernel/arch/arm64/boot/dts/broadcom/Makefile
@@ -3,6 +3,7 @@
 			      bcm2837-rpi-3-a-plus.dtb \
 			      bcm2837-rpi-3-b.dtb \
 			      bcm2837-rpi-3-b-plus.dtb \
+			      bcm2837-rpi-3-b-nobt.dtb \
 			      bcm2837-rpi-cm3-io3.dtb
 
 subdir-y	+= northstar2
diff --git a/kernel/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts b/kernel/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts
new file mode 100644
index 0000000..43f9d0f
--- /dev/null
+++ b/kernel/arch/arm64/boot/dts/broadcom/bcm2837-rpi-3-b-nobt.dts
@@ -0,0 +1,12 @@
+/dts-v1/;
+#include "bcm2837-rpi-3-b.dts"
+
+&uart0 {
+	status = "okay";
+	pinctrl-names = "default";
+	pinctrl-0 = <&uart0_gpio32>;
+};
+
+&uart1 {
+	status = "disabled";
+};
diff --git a/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi b/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi
index 282038e..fc812b5 100755
--- a/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi
+++ b/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dtsi
@@ -27,7 +27,7 @@
 	};
 
 	es8316_sound: es8316-sound {
-		status = "okay";
+		status = "disabled";
 		compatible = "rockchip,multicodecs-card";
 		rockchip,card-name = "rockchip-es8316";
 		rockchip,format = "i2s";
@@ -235,7 +235,7 @@
 		BT,reset_gpio    = <&gpio0 RK_PB2 GPIO_ACTIVE_HIGH>; //BT_DISABLE_GPIO0_B2_u_1V8
 		//BT,wake_gpio     = <&gpio3 RK_PA1 GPIO_ACTIVE_HIGH>;//HOST_WAKE_BT_H
 		//BT,wake_host_irq = <&gpio0 RK_PA0 GPIO_ACTIVE_HIGH>;//BT_WAKE_HOST_H
-		status = "okay";
+		status = "disabled";
 	};
 
 	wireless_wlan: wireless-wlan {
@@ -245,7 +245,7 @@
 	//	pinctrl-0 = <&wifi_host_wake_irq>;
 	//	WIFI,host_wake_irq = <&gpio2 RK_PB5 GPIO_ACTIVE_HIGH>; //GPIO2_B5_u_1V8_WF-BT_WAKEUP_IN
 	//	WIFI,poweren_gpio = <&gpio0 RK_PC4 GPIO_ACTIVE_HIGH>;
-		status = "okay";
+		status = "disabled";
 	};
 	
 	ndj_io_init {
@@ -253,45 +253,13 @@
 			pinctrl-names = "default";
           	pinctrl-0 = <&ndj_io_gpio>;
 									
-			vcc_12v {
-				gpio_num = <&gpio0 RK_PD3 GPIO_ACTIVE_HIGH>;
-				gpio_function = <0>;
-			};//VCC12_IO_EN_GPIO0_D3_u_3V3
-			
-			vcc_3v {
-				gpio_num = <&gpio4 RK_PA1 GPIO_ACTIVE_HIGH>;
-				gpio_function = <0>;
-			};//VCC3_IO_EN_GPIO4_A1_d_3V3
+
 			
            	hub_5V_reset {
 				gpio_num = <&gpio4 RK_PB6 GPIO_ACTIVE_HIGH>; 
 				gpio_function = <3>;
 			};//HUB_RESET_GPIO4_B6_d_3V3
 			
-			4g_power {
-				gpio_num = <&gpio0 RK_PC6 GPIO_ACTIVE_HIGH>;
-				gpio_function = <0>;
-			};//4G_PWREN_GPIO3_C7_u_3V3
-			
-			5g_power {
-				gpio_num =<&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;
-				gpio_function = <0>;
-			};
-			
-         	wake_wifi_bt {
-				gpio_num = <&gpio2 RK_PB5 GPIO_ACTIVE_LOW>;
-				gpio_function = <0>;
-			};//GPIO2_B5_u_1V8_WF-BT_WAKEUP_IN
-			
-			air_mode_4g {
-				gpio_num = <&gpio2 RK_PB4 GPIO_ACTIVE_LOW>;
-				gpio_function = <0>;
-			};	//GPIO2_B4_u_1V8_4G_AIR_MODE_IN		
-			
-			reset_4g {
-				gpio_num = <&gpio2 RK_PC3 GPIO_ACTIVE_LOW>;
-				gpio_function = <3>;
-			};	//GPIO2_C3_d_1V8_4G_RESET_N_IN
 			
 			
 		};
@@ -439,7 +407,7 @@
 };
 
 &dsi0_in_vp3 {
-	status = "okay";
+	status = "disabled";
 };
 
 /*
@@ -472,6 +440,31 @@
 	//pinctrl-0 = <&lcd_rst_gpio>;
 };
 
+&gmac0 {
+	/* Use rgmii-rxid mode to disable rx delay inside Soc */
+	phy-mode = "rgmii-rxid";
+	clock_in_out = "output";
+
+	snps,reset-gpio = <&gpio2 RK_PB5 GPIO_ACTIVE_LOW>;
+	snps,reset-active-low;
+	/* Reset time is 20ms, 100ms for rtl8211f */
+	snps,reset-delays-us = <0 20000 100000>;
+
+	pinctrl-names = "default";
+	pinctrl-0 = <&gmac0_miim
+                     &gmac0_tx_bus2
+                     &gmac0_rx_bus2
+                     &gmac0_rgmii_clk
+                     &gmac0_rgmii_bus
+                     &eth0_pins
+		     &gmac0_clkinout>;
+	tx_delay = <0x44>;
+	/* rx_delay = <0x4f>; */
+
+	phy-handle = <&rgmii_phy0>;
+	status = "okay";
+};
+
 &gmac1 {
 	/* Use rgmii-rxid mode to disable rx delay inside Soc */
 	phy-mode = "rgmii-rxid";
@@ -492,7 +485,7 @@
 	tx_delay = <0x43>;
 	/* rx_delay = <0x3f>; */
 
-	phy-handle = <&rgmii_phy>;
+	phy-handle = <&rgmii_phy1>;
 	status = "okay";
 };
 
@@ -524,7 +517,7 @@
 
 /* Should work with at least 128MB cma reserved above. */
 &hdmirx_ctrler {
-	status = "okay";
+	status = "disabled";
 
 	#sound-dai-cells = <1>;
 	/* Effective level used to trigger HPD: 0-low, 1-high */
@@ -824,12 +817,18 @@
 };
 
 &mdio1 {
-	rgmii_phy: phy@1 {
+	rgmii_phy1: phy@1 {
 		compatible = "ethernet-phy-ieee802.3-c22";
 		reg = <0x1>;
 	};
 };
 
+&mdio0 {
+	rgmii_phy0: phy@1 {
+		compatible = "ethernet-phy-ieee802.3-c22";
+		reg = <0x1>;
+	};
+};
 
 
 &mipi_dcphy1 {
@@ -838,7 +837,7 @@
 
 &pcie2x1l2 {
 	phys = <&combphy0_ps PHY_TYPE_PCIE>;
-	reset-gpios = <&gpio3 RK_PD0 GPIO_ACTIVE_HIGH>;//PCIE20x1_2_RSTn_GPIO3_D0_3V3
+	reset-gpios = <&gpio3 RK_PD1 GPIO_ACTIVE_HIGH>;//PCIE20x1_2_RSTn_GPIO3_D0_3V3
 	vpcie3v3-supply = <&vcc3v3_pcie30>;
 	status = "okay";
 };//MINIPCIE
@@ -847,7 +846,7 @@
 	phys = <&combphy2_psu PHY_TYPE_PCIE>;
 	reset-gpios = <&gpio4 RK_PA2 GPIO_ACTIVE_HIGH>;//PCIEX1_1_PERSTn_M1_L
 	vpcie3v3-supply = <&vcc3v3_pcie30>;
-	status = "disabled";
+	status = "okay";
 };//M.2 WIFI6
 
 &pcie2x1l0 {
@@ -976,20 +975,7 @@
 	ndj_io_init{
 		ndj_io_gpio: ndj_io_gpio_col{
 				rockchip,pins =
-					<0 RK_PD3 RK_FUNC_GPIO &pcfg_pull_none>,
-					<4 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>,
-					<4 RK_PB6 RK_FUNC_GPIO &pcfg_pull_none>,
-					<0 RK_PC6 RK_FUNC_GPIO &pcfg_pull_none>,
-					<2 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>,
-					<2 RK_PC3 RK_FUNC_GPIO &pcfg_pull_none>,
-					<4 RK_PB5 RK_FUNC_GPIO &pcfg_pull_none>,  //vcc_5v				
-					<1 RK_PB1 RK_FUNC_GPIO &pcfg_pull_none>,  //SPI0_MISO_M2_1V8 41
-					<1 RK_PA0 RK_FUNC_GPIO &pcfg_pull_none>,  //SPI4_MISO_M2_1V8 32
-					<1 RK_PB2 RK_FUNC_GPIO &pcfg_pull_none>,  //SPI0_MOSI_M2_3V3 42
-					<1 RK_PA1 RK_FUNC_GPIO &pcfg_pull_none>,  //SPI4_MOSI_M2_1V8 33
-					<1 RK_PB3 RK_FUNC_GPIO &pcfg_pull_none>,  //SPI0_CLK_M2_1V8  43 
-					<1 RK_PA2 RK_FUNC_GPIO &pcfg_pull_none>,  //SPI4_CLK_M2_1V8  34
-					<1 RK_PB4 RK_FUNC_GPIO &pcfg_pull_none>,  //SPI0_CS0_M2_1V8  44
+				
 					<1 RK_PA3 RK_FUNC_GPIO &pcfg_pull_none>;  //SPI4_CS0_M2_1V8  35		 		
 		};
 	};
@@ -1049,7 +1035,7 @@
 	pinctrl-names = "default";
 	pinctrl-0 = <&sdiom0_pins>;
 	sd-uhs-sdr104;
-	status = "okay";
+	status = "disabled";
 };
 
 &sdmmc {
@@ -1109,7 +1095,7 @@
 #endif
 
 &uart1 {
-	status = "okay";
+	status = "disabled";
 //	dma-names = "tx", "rx"; //ʹ��dma����ģʽ
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart1m0_xfer>;
@@ -1123,7 +1109,7 @@
 
 
 &uart4 {
-	status = "okay";
+	status = "disabled";
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart4m0_xfer>;
 };
@@ -1141,7 +1127,7 @@
 };
 
 &uart7 {
-	status = "okay";
+	status = "disabled";
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart7m1_xfer>;
 };
@@ -1154,7 +1140,7 @@
 
 
 &uart9 {
-	status = "okay";
+	status = "disabled";
 	pinctrl-names = "default";
 	pinctrl-0 = <&uart9m0_xfer &uart9m0_ctsn>;	
 };
diff --git a/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts b/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts
index e3ba742..7ab118b 100644
--- a/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts
+++ b/kernel/arch/arm64/boot/dts/rockchip/NK-6A13_V0A.dump.dts
@@ -95,7 +95,7 @@
 			#clock-cells = <0x0>;
 			clock-frequency = <0x29d7ab80>;
 			clock-output-names = "spll";
-			phandle = <0x1ec>;
+			phandle = <0x1f4>;
 		};
 
 		xin32k {
@@ -103,7 +103,7 @@
 			#clock-cells = <0x0>;
 			clock-frequency = <0x8000>;
 			clock-output-names = "xin32k";
-			phandle = <0x1ed>;
+			phandle = <0x1f5>;
 		};
 
 		xin24m {
@@ -111,7 +111,7 @@
 			#clock-cells = <0x0>;
 			clock-frequency = <0x16e3600>;
 			clock-output-names = "xin24m";
-			phandle = <0x1ee>;
+			phandle = <0x1f6>;
 		};
 
 		hclk_vo1@fd7c08ec {
@@ -131,7 +131,7 @@
 			clocks = <0x2 0x1bc>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1ef>;
+			phandle = <0x1f7>;
 		};
 
 		hclk_vo0@fd7c08dc {
@@ -151,7 +151,7 @@
 			clocks = <0x2 0x264>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f0>;
+			phandle = <0x1f8>;
 		};
 
 		hclk_nvm@fd7c087c {
@@ -181,7 +181,7 @@
 			clocks = <0x2 0x1e1>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f1>;
+			phandle = <0x1f9>;
 		};
 
 		aclk_isp1_pre@fd7c0868 {
@@ -191,7 +191,7 @@
 			clocks = <0x2 0x1e0>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f2>;
+			phandle = <0x1fa>;
 		};
 
 		aclk_rkvdec0_pre@fd7c08a0 {
@@ -201,7 +201,7 @@
 			clocks = <0x2 0x1bc>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f3>;
+			phandle = <0x1fb>;
 		};
 
 		hclk_rkvdec0_pre@fd7c08a0 {
@@ -211,7 +211,7 @@
 			clocks = <0x2 0x1be>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f4>;
+			phandle = <0x1fc>;
 		};
 
 		aclk_rkvdec1_pre@fd7c08a4 {
@@ -221,7 +221,7 @@
 			clocks = <0x2 0x1bc>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f5>;
+			phandle = <0x1fd>;
 		};
 
 		hclk_rkvdec1_pre@fd7c08a4 {
@@ -231,7 +231,7 @@
 			clocks = <0x2 0x1be>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f6>;
+			phandle = <0x1fe>;
 		};
 
 		aclk_jpeg_decoder_pre@fd7c08b0 {
@@ -241,7 +241,7 @@
 			clocks = <0x2 0x1bc>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f7>;
+			phandle = <0x1ff>;
 		};
 
 		aclk_rkvenc1_pre@fd7c08c0 {
@@ -251,7 +251,7 @@
 			clocks = <0x2 0x1c5>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f8>;
+			phandle = <0x200>;
 		};
 
 		hclk_rkvenc1_pre@fd7c08c0 {
@@ -261,7 +261,7 @@
 			clocks = <0x2 0x1c4>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1f9>;
+			phandle = <0x201>;
 		};
 
 		aclk_hdcp0_pre@fd7c08dc {
@@ -271,7 +271,7 @@
 			clocks = <0x2 0x26c>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1fa>;
+			phandle = <0x202>;
 		};
 
 		aclk_hdcp1_pre@fd7c08ec {
@@ -281,7 +281,7 @@
 			clocks = <0x2 0x263>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1fb>;
+			phandle = <0x203>;
 		};
 
 		pclk_av1_pre@fd7c0910 {
@@ -291,7 +291,7 @@
 			clocks = <0x2 0x1be>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1fc>;
+			phandle = <0x204>;
 		};
 
 		aclk_av1_pre@fd7c0910 {
@@ -301,7 +301,7 @@
 			clocks = <0x2 0x1bc>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1fd>;
+			phandle = <0x205>;
 		};
 
 		hclk_sdio_pre@fd7c092c {
@@ -311,7 +311,7 @@
 			clocks = <0x3>;
 			#power-domain-cells = <0x1>;
 			#clock-cells = <0x0>;
-			phandle = <0x1fe>;
+			phandle = <0x206>;
 		};
 
 		pclk_vo0_grf@fd7c08dc {
@@ -337,7 +337,7 @@
 			#clock-cells = <0x0>;
 			clock-frequency = <0x0>;
 			clock-output-names = "i2s0_mclkin";
-			phandle = <0x1ff>;
+			phandle = <0x207>;
 		};
 
 		mclkin-i2s1 {
@@ -345,7 +345,7 @@
 			#clock-cells = <0x0>;
 			clock-frequency = <0x0>;
 			clock-output-names = "i2s1_mclkin";
-			phandle = <0x200>;
+			phandle = <0x208>;
 		};
 
 		mclkin-i2s2 {
@@ -353,7 +353,7 @@
 			#clock-cells = <0x0>;
 			clock-frequency = <0x0>;
 			clock-output-names = "i2s2_mclkin";
-			phandle = <0x201>;
+			phandle = <0x209>;
 		};
 
 		mclkin-i2s3 {
@@ -361,7 +361,7 @@
 			#clock-cells = <0x0>;
 			clock-frequency = <0x0>;
 			clock-output-names = "i2s3_mclkin";
-			phandle = <0x202>;
+			phandle = <0x20a>;
 		};
 
 		mclkout-i2s0@fd58c318 {
@@ -383,7 +383,7 @@
 			clock-output-names = "i2s1_mclkout_to_io";
 			rockchip,bit-shift = <0x1>;
 			rockchip,bit-set-to-disable;
-			phandle = <0x203>;
+			phandle = <0x20b>;
 		};
 
 		mclkout-i2s1@fd58a000 {
@@ -393,7 +393,7 @@
 			#clock-cells = <0x0>;
 			clock-output-names = "i2s1m1_mclkout_to_io";
 			rockchip,bit-shift = <0x6>;
-			phandle = <0x204>;
+			phandle = <0x20c>;
 		};
 
 		mclkout-i2s2@fd58c318 {
@@ -404,7 +404,7 @@
 			clock-output-names = "i2s2_mclkout_to_io";
 			rockchip,bit-shift = <0x2>;
 			rockchip,bit-set-to-disable;
-			phandle = <0x205>;
+			phandle = <0x20d>;
 		};
 
 		mclkout-i2s3@fd58c318 {
@@ -415,7 +415,7 @@
 			clock-output-names = "i2s3_mclkout_to_io";
 			rockchip,bit-shift = <0x7>;
 			rockchip,bit-set-to-disable;
-			phandle = <0x206>;
+			phandle = <0x20e>;
 		};
 	};
 
@@ -1420,7 +1420,7 @@
 		compatible = "arm,armv8-pmuv3";
 		interrupts = <0x1 0x7 0x8>;
 		interrupt-affinity = <0x6 0x7 0x8 0x9 0xa 0xb 0xc 0xd>;
-		phandle = <0x207>;
+		phandle = <0x20f>;
 	};
 
 	cpuinfo {
@@ -1435,7 +1435,7 @@
 		phys = <0x2f 0x30>;
 		phy-names = "dcphy0", "dcphy1";
 		status = "disabled";
-		phandle = <0x208>;
+		phandle = <0x210>;
 	};
 
 	csi2-dcphy1 {
@@ -1444,7 +1444,7 @@
 		phys = <0x2f 0x30>;
 		phy-names = "dcphy0", "dcphy1";
 		status = "disabled";
-		phandle = <0x209>;
+		phandle = <0x211>;
 	};
 
 	csi2-dphy0 {
@@ -1453,7 +1453,7 @@
 		phys = <0x2f 0x30>;
 		phy-names = "dcphy0", "dcphy1";
 		status = "disabled";
-		phandle = <0x20a>;
+		phandle = <0x212>;
 	};
 
 	csi2-dphy1 {
@@ -1462,7 +1462,7 @@
 		phys = <0x2f 0x30>;
 		phy-names = "dcphy0", "dcphy1";
 		status = "disabled";
-		phandle = <0x20b>;
+		phandle = <0x213>;
 	};
 
 	csi2-dphy2 {
@@ -1471,7 +1471,7 @@
 		phys = <0x2f 0x30>;
 		phy-names = "dcphy0", "dcphy1";
 		status = "disabled";
-		phandle = <0x20c>;
+		phandle = <0x214>;
 	};
 
 	csi2-dphy3 {
@@ -1480,7 +1480,7 @@
 		phys = <0x2f 0x30>;
 		phy-names = "dcphy0", "dcphy1";
 		status = "disabled";
-		phandle = <0x20d>;
+		phandle = <0x215>;
 	};
 
 	csi2-dphy4 {
@@ -1489,7 +1489,7 @@
 		phys = <0x2f 0x30>;
 		phy-names = "dcphy0", "dcphy1";
 		status = "disabled";
-		phandle = <0x20e>;
+		phandle = <0x216>;
 	};
 
 	csi2-dphy5 {
@@ -1498,7 +1498,7 @@
 		phys = <0x2f 0x30>;
 		phy-names = "dcphy0", "dcphy1";
 		status = "disabled";
-		phandle = <0x20f>;
+		phandle = <0x217>;
 	};
 
 	display-subsystem {
@@ -1506,7 +1506,7 @@
 		ports = <0x31>;
 		memory-region = <0x32>;
 		memory-region-names = "drm-logo";
-		phandle = <0x210>;
+		phandle = <0x218>;
 
 		route {
 
@@ -1517,7 +1517,7 @@
 				logo,mode = "center";
 				charge_logo,mode = "center";
 				connect = <0x33>;
-				phandle = <0x211>;
+				phandle = <0x219>;
 			};
 
 			route-dsi0 {
@@ -1527,7 +1527,7 @@
 				logo,mode = "center";
 				charge_logo,mode = "center";
 				connect = <0x34>;
-				phandle = <0x212>;
+				phandle = <0x21a>;
 			};
 
 			route-dsi1 {
@@ -1537,7 +1537,7 @@
 				logo,mode = "center";
 				charge_logo,mode = "center";
 				connect = <0x35>;
-				phandle = <0x213>;
+				phandle = <0x21b>;
 			};
 
 			route-edp0 {
@@ -1547,7 +1547,7 @@
 				logo,mode = "center";
 				charge_logo,mode = "center";
 				connect = <0x36>;
-				phandle = <0x214>;
+				phandle = <0x21c>;
 			};
 
 			route-edp1 {
@@ -1557,7 +1557,7 @@
 				logo,mode = "center";
 				charge_logo,mode = "center";
 				connect = <0x37>;
-				phandle = <0x215>;
+				phandle = <0x21d>;
 			};
 
 			route-hdmi0 {
@@ -1567,7 +1567,7 @@
 				logo,mode = "center";
 				charge_logo,mode = "center";
 				connect = <0x38>;
-				phandle = <0x216>;
+				phandle = <0x21e>;
 			};
 
 			route-rgb {
@@ -1577,7 +1577,7 @@
 				logo,mode = "center";
 				charge_logo,mode = "center";
 				connect = <0x39>;
-				phandle = <0x217>;
+				phandle = <0x21f>;
 			};
 
 			route-dp1 {
@@ -1587,7 +1587,7 @@
 				logo,mode = "center";
 				charge_logo,mode = "center";
 				connect = <0x3a>;
-				phandle = <0x218>;
+				phandle = <0x220>;
 			};
 
 			route-hdmi1 {
@@ -1597,7 +1597,7 @@
 				logo,mode = "center";
 				charge_logo,mode = "center";
 				connect = <0x3b>;
-				phandle = <0x219>;
+				phandle = <0x221>;
 			};
 		};
 	};
@@ -1617,7 +1617,7 @@
 		status = "okay";
 		center-supply = <0x3e>;
 		mem-supply = <0x3f>;
-		phandle = <0x21a>;
+		phandle = <0x222>;
 	};
 
 	dmc-opp-table {
@@ -1706,7 +1706,7 @@
 			arm,smc-id = <0x82000010>;
 			#address-cells = <0x1>;
 			#size-cells = <0x0>;
-			phandle = <0x21b>;
+			phandle = <0x223>;
 
 			protocol@14 {
 				reg = <0x14>;
@@ -1726,13 +1726,13 @@
 		sdei {
 			compatible = "arm,sdei-1.0";
 			method = "smc";
-			phandle = <0x21c>;
+			phandle = <0x224>;
 		};
 
 		optee {
 			compatible = "linaro,optee-tz";
 			method = "smc";
-			phandle = <0x21d>;
+			phandle = <0x225>;
 		};
 	};
 
@@ -1744,49 +1744,49 @@
 
 	mipi-dcphy-dummy {
 		status = "disabled";
-		phandle = <0x21e>;
+		phandle = <0x226>;
 	};
 
 	mipi0-csi2 {
 		compatible = "rockchip,rk3588-mipi-csi2";
 		rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>;
 		status = "disabled";
-		phandle = <0x21f>;
+		phandle = <0x227>;
 	};
 
 	mipi1-csi2 {
 		compatible = "rockchip,rk3588-mipi-csi2";
 		rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>;
 		status = "disabled";
-		phandle = <0x220>;
+		phandle = <0x228>;
 	};
 
 	mipi2-csi2 {
 		compatible = "rockchip,rk3588-mipi-csi2";
 		rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>;
 		status = "disabled";
-		phandle = <0x221>;
+		phandle = <0x229>;
 	};
 
 	mipi3-csi2 {
 		compatible = "rockchip,rk3588-mipi-csi2";
 		rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>;
 		status = "disabled";
-		phandle = <0x222>;
+		phandle = <0x22a>;
 	};
 
 	mipi4-csi2 {
 		compatible = "rockchip,rk3588-mipi-csi2";
 		rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>;
 		status = "disabled";
-		phandle = <0x223>;
+		phandle = <0x22b>;
 	};
 
 	mipi5-csi2 {
 		compatible = "rockchip,rk3588-mipi-csi2";
 		rockchip,hw = <0x43 0x44 0x45 0x46 0x47 0x48>;
 		status = "disabled";
-		phandle = <0x224>;
+		phandle = <0x22c>;
 	};
 
 	mpp-srv {
@@ -1814,7 +1814,7 @@
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4b>;
 		status = "disabled";
-		phandle = <0x225>;
+		phandle = <0x22d>;
 	};
 
 	rkcif-mipi-lvds {
@@ -1829,28 +1829,28 @@
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4c>;
 		status = "disabled";
-		phandle = <0x226>;
+		phandle = <0x22e>;
 	};
 
 	rkcif-mipi-lvds-sditf-vir1 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4c>;
 		status = "disabled";
-		phandle = <0x227>;
+		phandle = <0x22f>;
 	};
 
 	rkcif-mipi-lvds-sditf-vir2 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4c>;
 		status = "disabled";
-		phandle = <0x228>;
+		phandle = <0x230>;
 	};
 
 	rkcif-mipi-lvds-sditf-vir3 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4c>;
 		status = "disabled";
-		phandle = <0x229>;
+		phandle = <0x231>;
 	};
 
 	rkcif-mipi-lvds1 {
@@ -1865,28 +1865,28 @@
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4d>;
 		status = "disabled";
-		phandle = <0x22a>;
+		phandle = <0x232>;
 	};
 
 	rkcif-mipi-lvds1-sditf-vir1 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4d>;
 		status = "disabled";
-		phandle = <0x22b>;
+		phandle = <0x233>;
 	};
 
 	rkcif-mipi-lvds1-sditf-vir2 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4d>;
 		status = "disabled";
-		phandle = <0x22c>;
+		phandle = <0x234>;
 	};
 
 	rkcif-mipi-lvds1-sditf-vir3 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4d>;
 		status = "disabled";
-		phandle = <0x22d>;
+		phandle = <0x235>;
 	};
 
 	rkcif-mipi-lvds2 {
@@ -1901,28 +1901,28 @@
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4e>;
 		status = "disabled";
-		phandle = <0x22e>;
+		phandle = <0x236>;
 	};
 
 	rkcif-mipi-lvds2-sditf-vir1 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4e>;
 		status = "disabled";
-		phandle = <0x22f>;
+		phandle = <0x237>;
 	};
 
 	rkcif-mipi-lvds2-sditf-vir2 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4e>;
 		status = "disabled";
-		phandle = <0x230>;
+		phandle = <0x238>;
 	};
 
 	rkcif-mipi-lvds2-sditf-vir3 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4e>;
 		status = "disabled";
-		phandle = <0x231>;
+		phandle = <0x239>;
 	};
 
 	rkcif-mipi-lvds3 {
@@ -1937,98 +1937,98 @@
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4f>;
 		status = "disabled";
-		phandle = <0x232>;
+		phandle = <0x23a>;
 	};
 
 	rkcif-mipi-lvds3-sditf-vir1 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4f>;
 		status = "disabled";
-		phandle = <0x233>;
+		phandle = <0x23b>;
 	};
 
 	rkcif-mipi-lvds3-sditf-vir2 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4f>;
 		status = "disabled";
-		phandle = <0x234>;
+		phandle = <0x23c>;
 	};
 
 	rkcif-mipi-lvds3-sditf-vir3 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x4f>;
 		status = "disabled";
-		phandle = <0x235>;
+		phandle = <0x23d>;
 	};
 
 	rkisp0-vir0 {
 		compatible = "rockchip,rkisp-vir";
 		rockchip,hw = <0x50>;
 		status = "disabled";
-		phandle = <0x236>;
+		phandle = <0x23e>;
 	};
 
 	rkisp0-vir1 {
 		compatible = "rockchip,rkisp-vir";
 		rockchip,hw = <0x50>;
 		status = "disabled";
-		phandle = <0x237>;
+		phandle = <0x23f>;
 	};
 
 	rkisp0-vir2 {
 		compatible = "rockchip,rkisp-vir";
 		rockchip,hw = <0x50>;
 		status = "disabled";
-		phandle = <0x238>;
+		phandle = <0x240>;
 	};
 
 	rkisp0-vir3 {
 		compatible = "rockchip,rkisp-vir";
 		rockchip,hw = <0x50>;
 		status = "disabled";
-		phandle = <0x239>;
+		phandle = <0x241>;
 	};
 
 	rkisp1-vir0 {
 		compatible = "rockchip,rkisp-vir";
 		rockchip,hw = <0x51>;
 		status = "disabled";
-		phandle = <0x23a>;
+		phandle = <0x242>;
 	};
 
 	rkisp1-vir1 {
 		compatible = "rockchip,rkisp-vir";
 		rockchip,hw = <0x51>;
 		status = "disabled";
-		phandle = <0x23b>;
+		phandle = <0x243>;
 	};
 
 	rkisp1-vir2 {
 		compatible = "rockchip,rkisp-vir";
 		rockchip,hw = <0x51>;
 		status = "disabled";
-		phandle = <0x23c>;
+		phandle = <0x244>;
 	};
 
 	rkisp1-vir3 {
 		compatible = "rockchip,rkisp-vir";
 		rockchip,hw = <0x51>;
 		status = "disabled";
-		phandle = <0x23d>;
+		phandle = <0x245>;
 	};
 
 	rkispp0-vir0 {
 		compatible = "rockchip,rk3588-rkispp-vir";
 		rockchip,hw = <0x52>;
 		status = "disabled";
-		phandle = <0x23e>;
+		phandle = <0x246>;
 	};
 
 	rkispp1-vir0 {
 		compatible = "rockchip,rk3588-rkispp-vir";
 		rockchip,hw = <0x53>;
 		status = "disabled";
-		phandle = <0x23f>;
+		phandle = <0x247>;
 	};
 
 	rkvenc-ccu {
@@ -2043,24 +2043,24 @@
 		rockchip,sleep-debug-en = <0x1>;
 		rockchip,sleep-mode-config = <0x1000608>;
 		rockchip,wakeup-config = <0x100>;
-		phandle = <0x240>;
+		phandle = <0x248>;
 	};
 
 	rockchip-system-monitor {
 		compatible = "rockchip,system-monitor";
 		rockchip,thermal-zone = "soc-thermal";
-		phandle = <0x241>;
+		phandle = <0x249>;
 	};
 
 	thermal-zones {
-		phandle = <0x242>;
+		phandle = <0x24a>;
 
 		soc-thermal {
 			polling-delay-passive = <0x14>;
 			polling-delay = <0x3e8>;
 			sustainable-power = <0x834>;
 			thermal-sensors = <0x54 0x0>;
-			phandle = <0x243>;
+			phandle = <0x24b>;
 
 			trips {
 
@@ -2068,7 +2068,7 @@
 					temperature = <0x124f8>;
 					hysteresis = <0x7d0>;
 					type = "passive";
-					phandle = <0x244>;
+					phandle = <0x24c>;
 				};
 
 				trip-point-1 {
@@ -2082,7 +2082,7 @@
 					temperature = <0x1c138>;
 					hysteresis = <0x7d0>;
 					type = "critical";
-					phandle = <0x245>;
+					phandle = <0x24d>;
 				};
 			};
 
@@ -2118,42 +2118,42 @@
 			polling-delay-passive = <0x14>;
 			polling-delay = <0x3e8>;
 			thermal-sensors = <0x54 0x1>;
-			phandle = <0x246>;
+			phandle = <0x24e>;
 		};
 
 		bigcore1-thermal {
 			polling-delay-passive = <0x14>;
 			polling-delay = <0x3e8>;
 			thermal-sensors = <0x54 0x2>;
-			phandle = <0x247>;
+			phandle = <0x24f>;
 		};
 
 		littlecore-thermal {
 			polling-delay-passive = <0x14>;
 			polling-delay = <0x3e8>;
 			thermal-sensors = <0x54 0x3>;
-			phandle = <0x248>;
+			phandle = <0x250>;
 		};
 
 		center-thermal {
 			polling-delay-passive = <0x14>;
 			polling-delay = <0x3e8>;
 			thermal-sensors = <0x54 0x4>;
-			phandle = <0x249>;
+			phandle = <0x251>;
 		};
 
 		gpu-thermal {
 			polling-delay-passive = <0x14>;
 			polling-delay = <0x3e8>;
 			thermal-sensors = <0x54 0x5>;
-			phandle = <0x24a>;
+			phandle = <0x252>;
 		};
 
 		npu-thermal {
 			polling-delay-passive = <0x14>;
 			polling-delay = <0x3e8>;
 			thermal-sensors = <0x54 0x6>;
-			phandle = <0x24b>;
+			phandle = <0x253>;
 		};
 	};
 
@@ -2371,7 +2371,7 @@
 		#size-cells = <0x2>;
 		ranges;
 		status = "okay";
-		phandle = <0x24c>;
+		phandle = <0x254>;
 
 		usb@fc000000 {
 			compatible = "snps,dwc3";
@@ -2394,7 +2394,7 @@
 			snps,parkmode-disable-ss-quirk;
 			quirk-skip-phy-init;
 			status = "okay";
-			phandle = <0x24d>;
+			phandle = <0x255>;
 		};
 	};
 
@@ -2409,7 +2409,7 @@
 		phy-names = "usb2-phy";
 		power-domains = <0x57 0x1f>;
 		status = "okay";
-		phandle = <0x24e>;
+		phandle = <0x256>;
 	};
 
 	usb@fc840000 {
@@ -2436,7 +2436,7 @@
 		phy-names = "usb2-phy";
 		power-domains = <0x57 0x1f>;
 		status = "okay";
-		phandle = <0x24f>;
+		phandle = <0x257>;
 	};
 
 	usb@fc8c0000 {
@@ -2459,7 +2459,7 @@
 		interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
 		#iommu-cells = <0x1>;
 		status = "disabled";
-		phandle = <0x250>;
+		phandle = <0x258>;
 	};
 
 	iommu@fcb00000 {
@@ -2469,7 +2469,7 @@
 		interrupt-names = "eventq", "gerror", "priq", "cmdq-sync";
 		#iommu-cells = <0x1>;
 		status = "disabled";
-		phandle = <0x251>;
+		phandle = <0x259>;
 	};
 
 	usbhost3_0 {
@@ -2480,7 +2480,7 @@
 		#size-cells = <0x2>;
 		ranges;
 		status = "okay";
-		phandle = <0x252>;
+		phandle = <0x25a>;
 
 		usb@fcd00000 {
 			compatible = "snps,dwc3";
@@ -2500,14 +2500,14 @@
 			snps,parkmode-disable-hs-quirk;
 			snps,parkmode-disable-ss-quirk;
 			status = "okay";
-			phandle = <0x253>;
+			phandle = <0x25b>;
 		};
 	};
 
 	syscon@fd588000 {
 		compatible = "rockchip,rk3588-pmu0-grf", "syscon", "simple-mfd";
 		reg = <0x0 0xfd588000 0x0 0x2000>;
-		phandle = <0x254>;
+		phandle = <0x25c>;
 
 		reboot-mode {
 			compatible = "syscon-reboot-mode";
@@ -2522,7 +2522,7 @@
 			mode-panic = <0x5242c307>;
 			mode-watchdog = <0x5242c308>;
 			mode-quiescent = <0x5242c30e>;
-			phandle = <0x255>;
+			phandle = <0x25d>;
 		};
 	};
 
@@ -2542,7 +2542,7 @@
 			pinctrl-names = "default";
 			pinctrl-0 = <0x67>;
 			status = "disabled";
-			phandle = <0x256>;
+			phandle = <0x25e>;
 
 			ports {
 				#address-cells = <0x1>;
@@ -2697,7 +2697,7 @@
 		reg = <0x0 0xfd5d8000 0x0 0x4000>;
 		#address-cells = <0x1>;
 		#size-cells = <0x1>;
-		phandle = <0x257>;
+		phandle = <0x25f>;
 
 		usb2-phy@8000 {
 			compatible = "rockchip,rk3588-usb2phy";
@@ -2726,7 +2726,7 @@
 		reg = <0x0 0xfd5dc000 0x0 0x4000>;
 		#address-cells = <0x1>;
 		#size-cells = <0x1>;
-		phandle = <0x258>;
+		phandle = <0x260>;
 
 		usb2-phy@c000 {
 			compatible = "rockchip,rk3588-usb2phy";
@@ -2796,7 +2796,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "okay";
-		phandle = <0x259>;
+		phandle = <0x261>;
 
 		rk8602@42 {
 			compatible = "rockchip,rk8602";
@@ -2849,7 +2849,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <0x70>;
 		status = "disabled";
-		phandle = <0x25a>;
+		phandle = <0x262>;
 	};
 
 	pwm@fd8b0000 {
@@ -2862,7 +2862,7 @@
 		clocks = <0x2 0x2a5 0x2 0x2a4>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x25b>;
+		phandle = <0x263>;
 	};
 
 	pwm@fd8b0010 {
@@ -2875,7 +2875,7 @@
 		clocks = <0x2 0x2a5 0x2 0x2a4>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x25c>;
+		phandle = <0x264>;
 	};
 
 	pwm@fd8b0020 {
@@ -2888,7 +2888,7 @@
 		clocks = <0x2 0x2a5 0x2 0x2a4>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x25d>;
+		phandle = <0x265>;
 	};
 
 	pwm@fd8b0030 {
@@ -2901,7 +2901,7 @@
 		clocks = <0x2 0x2a5 0x2 0x2a4>;
 		clock-names = "pwm", "pclk";
 		status = "okay";
-		phandle = <0x1c5>;
+		phandle = <0x1ce>;
 	};
 
 	power-management@fd8d8000 {
@@ -3180,7 +3180,7 @@
 		status = "okay";
 		rknpu-supply = <0xa6>;
 		mem-supply = <0xa6>;
-		phandle = <0x25e>;
+		phandle = <0x266>;
 	};
 
 	npu-opp-table {
@@ -3378,7 +3378,7 @@
 		rockchip,resetgroup-node = <0x0>;
 		power-domains = <0x57 0x15>;
 		status = "okay";
-		phandle = <0x25f>;
+		phandle = <0x267>;
 	};
 
 	vdpu@fdb50400 {
@@ -3401,7 +3401,7 @@
 		rockchip,resetgroup-node = <0x0>;
 		power-domains = <0x57 0x15>;
 		status = "okay";
-		phandle = <0x260>;
+		phandle = <0x268>;
 	};
 
 	iommu@fdb50800 {
@@ -3437,7 +3437,7 @@
 		rockchip,taskqueue-node = <0x0>;
 		rockchip,resetgroup-node = <0x0>;
 		status = "okay";
-		phandle = <0x261>;
+		phandle = <0x269>;
 	};
 
 	rga@fdb60000 {
@@ -3450,7 +3450,7 @@
 		power-domains = <0x57 0x16>;
 		iommus = <0xac>;
 		status = "okay";
-		phandle = <0x262>;
+		phandle = <0x26a>;
 	};
 
 	iommu@fdb60f00 {
@@ -3476,7 +3476,7 @@
 		power-domains = <0x57 0x1e>;
 		iommus = <0xad>;
 		status = "okay";
-		phandle = <0x263>;
+		phandle = <0x26b>;
 	};
 
 	iommu@fdb70f00 {
@@ -3501,7 +3501,7 @@
 		clock-names = "aclk_rga2", "hclk_rga2", "clk_rga2";
 		power-domains = <0x57 0x15>;
 		status = "okay";
-		phandle = <0x264>;
+		phandle = <0x26c>;
 	};
 
 	jpegd@fdb90000 {
@@ -3522,7 +3522,7 @@
 		rockchip,taskqueue-node = <0x1>;
 		power-domains = <0x57 0x15>;
 		status = "okay";
-		phandle = <0x265>;
+		phandle = <0x26d>;
 	};
 
 	iommu@fdb90480 {
@@ -3558,7 +3558,7 @@
 		rockchip,ccu = <0xb0>;
 		power-domains = <0x57 0x15>;
 		status = "okay";
-		phandle = <0x266>;
+		phandle = <0x26e>;
 	};
 
 	iommu@fdba0800 {
@@ -3594,7 +3594,7 @@
 		rockchip,ccu = <0xb0>;
 		power-domains = <0x57 0x15>;
 		status = "okay";
-		phandle = <0x267>;
+		phandle = <0x26f>;
 	};
 
 	iommu@fdba4800 {
@@ -3630,7 +3630,7 @@
 		rockchip,ccu = <0xb0>;
 		power-domains = <0x57 0x15>;
 		status = "okay";
-		phandle = <0x268>;
+		phandle = <0x270>;
 	};
 
 	iommu@fdba8800 {
@@ -3666,7 +3666,7 @@
 		rockchip,ccu = <0xb0>;
 		power-domains = <0x57 0x15>;
 		status = "okay";
-		phandle = <0x269>;
+		phandle = <0x271>;
 	};
 
 	iommu@fdbac800 {
@@ -3701,7 +3701,7 @@
 		rockchip,taskqueue-node = <0x6>;
 		iommus = <0xb4>;
 		status = "okay";
-		phandle = <0x26a>;
+		phandle = <0x272>;
 	};
 
 	iommu@fdbb0800 {
@@ -3740,7 +3740,7 @@
 		status = "okay";
 		venc-supply = <0xb8>;
 		mem-supply = <0xb8>;
-		phandle = <0x26b>;
+		phandle = <0x273>;
 	};
 
 	iommu@fdbdf000 {
@@ -3782,7 +3782,7 @@
 		status = "okay";
 		venc-supply = <0xb8>;
 		mem-supply = <0xb8>;
-		phandle = <0x26c>;
+		phandle = <0x274>;
 	};
 
 	iommu@fdbef000 {
@@ -3862,7 +3862,7 @@
 		rockchip,rcb-min-width = <0x200>;
 		power-domains = <0x57 0xe>;
 		status = "okay";
-		phandle = <0x26d>;
+		phandle = <0x275>;
 	};
 
 	iommu@fdc38700 {
@@ -3908,7 +3908,7 @@
 		rockchip,rcb-min-width = <0x200>;
 		power-domains = <0x57 0xf>;
 		status = "okay";
-		phandle = <0x26e>;
+		phandle = <0x276>;
 	};
 
 	iommu@fdc48700 {
@@ -3946,7 +3946,7 @@
 		rockchip,taskqueue-node = <0xb>;
 		power-domains = <0x57 0x17>;
 		status = "disabled";
-		phandle = <0x26f>;
+		phandle = <0x277>;
 	};
 
 	iommu@fdca0000 {
@@ -3972,7 +3972,7 @@
 		power-domains = <0x57 0x1c>;
 		iommus = <0xc3>;
 		status = "disabled";
-		phandle = <0x270>;
+		phandle = <0x278>;
 	};
 
 	rkisp@fdcb0000 {
@@ -4238,7 +4238,7 @@
 		rockchip,vo1-grf = <0xcc>;
 		rockchip,pmu = <0xcd>;
 		status = "okay";
-		phandle = <0x271>;
+		phandle = <0x279>;
 
 		ports {
 			#address-cells = <0x1>;
@@ -4251,7 +4251,7 @@
 				reg = <0x0>;
 				rockchip,plane-mask = <0x5>;
 				rockchip,primary-plane = <0x2>;
-				phandle = <0x272>;
+				phandle = <0x27a>;
 
 				endpoint@0 {
 					reg = <0x0>;
@@ -4296,7 +4296,7 @@
 				reg = <0x1>;
 				rockchip,plane-mask = <0xa>;
 				rockchip,primary-plane = <0x3>;
-				phandle = <0x273>;
+				phandle = <0x27b>;
 
 				endpoint@0 {
 					reg = <0x0>;
@@ -4343,7 +4343,7 @@
 				assigned-clock-parents = <0x2 0x4>;
 				rockchip,plane-mask = <0x140>;
 				rockchip,primary-plane = <0x8>;
-				phandle = <0x274>;
+				phandle = <0x27c>;
 
 				endpoint@0 {
 					reg = <0x0>;
@@ -4400,7 +4400,7 @@
 				reg = <0x3>;
 				rockchip,plane-mask = <0x280>;
 				rockchip,primary-plane = <0x9>;
-				phandle = <0x275>;
+				phandle = <0x27d>;
 
 				endpoint@0 {
 					reg = <0x0>;
@@ -4450,7 +4450,7 @@
 		power-domains = <0x57 0x19>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x1cd>;
+		phandle = <0x1d6>;
 	};
 
 	i2s@fddc0000 {
@@ -4469,7 +4469,7 @@
 		rockchip,playback-only;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x276>;
+		phandle = <0x27e>;
 	};
 
 	spdif-tx@fdde0000 {
@@ -4485,7 +4485,7 @@
 		power-domains = <0x57 0x1a>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x277>;
+		phandle = <0x27f>;
 	};
 
 	i2s@fddf0000 {
@@ -4506,7 +4506,7 @@
 		rockchip,playback-only;
 		#sound-dai-cells = <0x0>;
 		status = "okay";
-		phandle = <0x1c9>;
+		phandle = <0x1d2>;
 	};
 
 	i2s@fddfc000 {
@@ -4525,7 +4525,7 @@
 		rockchip,capture-only;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x278>;
+		phandle = <0x280>;
 	};
 
 	spdif-rx@fde08000 {
@@ -4543,7 +4543,7 @@
 		reset-names = "spdifrx-m";
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x279>;
+		phandle = <0x281>;
 	};
 
 	dsi@fde20000 {
@@ -4561,7 +4561,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x27a>;
+		phandle = <0x282>;
 
 		ports {
 			#address-cells = <0x1>;
@@ -4571,7 +4571,7 @@
 				reg = <0x0>;
 				#address-cells = <0x1>;
 				#size-cells = <0x0>;
-				phandle = <0x27b>;
+				phandle = <0x283>;
 
 				endpoint@0 {
 					reg = <0x0>;
@@ -4583,7 +4583,7 @@
 				endpoint@1 {
 					reg = <0x1>;
 					remote-endpoint = <0x34>;
-					status = "okay";
+					status = "disabled";
 					phandle = <0xe2>;
 				};
 			};
@@ -4614,11 +4614,11 @@
 			dsi,lanes = <0x4>;
 			panel-init-sequence = <0x390004ff 0x98810315 0x20100 0x15000202 0x150002 0x3531500 0x204d315 0x20500 0x15000206 0xd150002 0x7081500 0x2080015 0x20900 0x1500020a 0x150002 0xb001500 0x20c0015 0x20d00 0x1500020e 0x150002 0xf281500 0x2102815 0x21100 0x15000212 0x150002 0x13001500 0x2140015 0x21500 0x15000216 0x150002 0x17001500 0x2180015 0x21900 0x1500021a 0x150002 0x1b001500 0x21c0015 0x21d00 0x1500021e 0x40150002 0x1f801500 0x2200615 0x22101 0x15000222 0x150002 0x23001500 0x2240015 0x22500 0x15000226 0x150002 0x27001500 0x2283315 0x22933 0x1500022a 0x150002 0x2b001500 0x22c0015 0x22d00 0x1500022e 0x150002 0x2f001500 0x2300015 0x23100 0x15000232 0x150002 0x33001500 0x2340315 0x23500 0x15000236 0x150002 0x37001500 0x2389615 0x23900 0x1500023a 0x150002 0x3b001500 0x23c0015 0x23d00 0x1500023e 0x150002 0x3f001500 0x2400015 0x24100 0x15000242 0x150002 0x43001500 0x2440015 0x25000 0x15000251 0x23150002 0x52451500 0x2536715 0x25489 0x15000255 0xab150002 0x56011500 0x2572315 0x25845 0x15000259 0x67150002 0x5a891500 0x25bab15 0x25ccd 0x1500025d 0xef150002 0x5e001500 0x25f0815 0x26008 0x15000261 0x6150002 0x62061500 0x2630115 0x26401 0x15000265 0x150002 0x66001500 0x2670215 0x26815 0x15000269 0x15150002 0x6a141500 0x26b1415 0x26c0d 0x1500026d 0xd150002 0x6e0c1500 0x26f0c15 0x2700f 0x15000271 0xf150002 0x720e1500 0x2730e15 0x27402 0x15000275 0x8150002 0x76081500 0x2770615 0x27806 0x15000279 0x1150002 0x7a011500 0x27b0015 0x27c00 0x1500027d 0x2150002 0x7e151500 0x27f1515 0x28014 0x15000281 0x14150002 0x820d1500 0x2830d15 0x2840c 0x15000285 0xc150002 0x860f1500 0x2870f15 0x2880e 0x15000289 0xe150002 0x8a023900 0x4ff9881 0x4150002 0xc53a1500 0x26e2b15 0x26f37 0x1500023a 0x24150002 0x8d1a1500 0x287ba15 0x2b2d1 0x15000288 0xb150002 0x38011500 0x2390015 0x2b502 0x15000231 0x25150002 0x3b983900 0x4ff9881 0x1150002 0x220a1500 0x2310015 0x2533d 0x15000255 0x3d150002 0x50851500 0x2518015 0x26006 0x15000262 0x20150002 0xa0001500 0x2a12115 0x2a235 0x150002a3 0x19150002 0xa41e1500 0x2a53315 0x2a627 0x150002a7 0x26150002 0xa8af1500 0x2a91b15 0x2aa27 0x150002ab 0x8d150002 0xac1a1500 0x2ad1b15 0x2ae50 0x150002af 0x26150002 0xb02b1500 0x2b15415 0x2b25e 0x150002b3 0x23150002 0xc0001500 0x2c12115 0x2c235 0x150002c3 0x19150002 0xc41e1500 0x2c53315 0x2c627 0x150002c7 0x26150002 0xc8af1500 0x2c91b15 0x2ca27 0x150002cb 0x8d150002 0xcc1a1500 0x2cd1b15 0x2ce50 0x150002cf 0x26150002 0xd02b1500 0x2d15415 0x2d25e 0x150002d3 0x23390004 0xff988100 0x15780111 0x15050129>;
 			panel-exit-sequence = <0x5000128 0x5000110>;
-			phandle = <0x27c>;
+			phandle = <0x284>;
 
 			display-timings {
 				native-mode = <0xea>;
-				phandle = <0x27d>;
+				phandle = <0x285>;
 
 				timing0 {
 					clock-frequency = <0x41cdb40>;
@@ -4669,7 +4669,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x27e>;
+		phandle = <0x286>;
 
 		ports {
 			#address-cells = <0x1>;
@@ -4679,7 +4679,7 @@
 				reg = <0x0>;
 				#address-cells = <0x1>;
 				#size-cells = <0x0>;
-				phandle = <0x27f>;
+				phandle = <0x287>;
 
 				endpoint@0 {
 					reg = <0x0>;
@@ -4722,11 +4722,11 @@
 			panel-init-sequence = [23 00 02 fe 21 23 00 02 04 00 23 00 02 00 64 23 00 02 2a 00 23 00 02 26 64 23 00 02 54 00 23 00 02 50 64 23 00 02 7b 00 23 00 02 77 64 23 00 02 a2 00 23 00 02 9d 64 23 00 02 c9 00 23 00 02 c5 64 23 00 02 01 71 23 00 02 27 71 23 00 02 51 71 23 00 02 78 71 23 00 02 9e 71 23 00 02 c6 71 23 00 02 02 89 23 00 02 28 89 23 00 02 52 89 23 00 02 79 89 23 00 02 9f 89 23 00 02 c7 89 23 00 02 03 9e 23 00 02 29 9e 23 00 02 53 9e 23 00 02 7a 9e 23 00 02 a0 9e 23 00 02 c8 9e 23 00 02 09 00 23 00 02 05 b0 23 00 02 31 00 23 00 02 2b b0 23 00 02 5a 00 23 00 02 55 b0 23 00 02 80 00 23 00 02 7c b0 23 00 02 a7 00 23 00 02 a3 b0 23 00 02 ce 00 23 00 02 ca b0 23 00 02 06 c0 23 00 02 2d c0 23 00 02 56 c0 23 00 02 7d c0 23 00 02 a4 c0 23 00 02 cb c0 23 00 02 07 cf 23 00 02 2f cf 23 00 02 58 cf 23 00 02 7e cf 23 00 02 a5 cf 23 00 02 cc cf 23 00 02 08 dd 23 00 02 30 dd 23 00 02 59 dd 23 00 02 7f dd 23 00 02 a6 dd 23 00 02 cd dd 23 00 02 0e 15 23 00 02 0a e9 23 00 02 36 15 23 00 02 32 e9 23 00 02 5f 15 23 00 02 5b e9 23 00 02 85 15 23 00 02 81 e9 23 00 02 ad 15 23 00 02 a9 e9 23 00 02 d3 15 23 00 02 cf e9 23 00 02 0b 14 23 00 02 33 14 23 00 02 5c 14 23 00 02 82 14 23 00 02 aa 14 23 00 02 d0 14 23 00 02 0c 36 23 00 02 34 36 23 00 02 5d 36 23 00 02 83 36 23 00 02 ab 36 23 00 02 d1 36 23 00 02 0d 6b 23 00 02 35 6b 23 00 02 5e 6b 23 00 02 84 6b 23 00 02 ac 6b 23 00 02 d2 6b 23 00 02 13 5a 23 00 02 0f 94 23 00 02 3b 5a 23 00 02 37 94 23 00 02 64 5a 23 00 02 60 94 23 00 02 8a 5a 23 00 02 86 94 23 00 02 b2 5a 23 00 02 ae 94 23 00 02 d8 5a 23 00 02 d4 94 23 00 02 10 d1 23 00 02 38 d1 23 00 02 61 d1 23 00 02 87 d1 23 00 02 af d1 23 00 02 d5 d1 23 00 02 11 04 23 00 02 39 04 23 00 02 62 04 23 00 02 88 04 23 00 02 b0 04 23 00 02 d6 04 23 00 02 12 05 23 00 02 3a 05 23 00 02 63 05 23 00 02 89 05 23 00 02 b1 05 23 00 02 d7 05 23 00 02 18 aa 23 00 02 14 36 23 00 02 42 aa 23 00 02 3d 36 23 00 02 69 aa 23 00 02 65 36 23 00 02 8f aa 23 00 02 8b 36 23 00 02 b7 aa 23 00 02 b3 36 23 00 02 dd aa 23 00 02 d9 36 23 00 02 15 74 23 00 02 3f 74 23 00 02 66 74 23 00 02 8c 74 23 00 02 b4 74 23 00 02 da 74 23 00 02 16 9f 23 00 02 40 9f 23 00 02 67 9f 23 00 02 8d 9f 23 00 02 b5 9f 23 00 02 db 9f 23 00 02 17 dc 23 00 02 41 dc 23 00 02 68 dc 23 00 02 8e dc 23 00 02 b6 dc 23 00 02 dc dc 23 00 02 1d ff 23 00 02 19 03 23 00 02 47 ff 23 00 02 43 03 23 00 02 6e ff 23 00 02 6a 03 23 00 02 94 ff 23 00 02 90 03 23 00 02 bc ff 23 00 02 b8 03 23 00 02 e2 ff 23 00 02 de 03 23 00 02 1a 35 23 00 02 44 35 23 00 02 6b 35 23 00 02 91 35 23 00 02 b9 35 23 00 02 df 35 23 00 02 1b 45 23 00 02 45 45 23 00 02 6c 45 23 00 02 92 45 23 00 02 ba 45 23 00 02 e0 45 23 00 02 1c 55 23 00 02 46 55 23 00 02 6d 55 23 00 02 93 55 23 00 02 bb 55 23 00 02 e1 55 23 00 02 22 ff 23 00 02 1e 68 23 00 02 4c ff 23 00 02 48 68 23 00 02 73 ff 23 00 02 6f 68 23 00 02 99 ff 23 00 02 95 68 23 00 02 c1 ff 23 00 02 bd 68 23 00 02 e7 ff 23 00 02 e3 68 23 00 02 1f 7e 23 00 02 49 7e 23 00 02 70 7e 23 00 02 96 7e 23 00 02 be 7e 23 00 02 e4 7e 23 00 02 20 97 23 00 02 4a 97 23 00 02 71 97 23 00 02 97 97 23 00 02 bf 97 23 00 02 e5 97 23 00 02 21 b5 23 00 02 4b b5 23 00 02 72 b5 23 00 02 98 b5 23 00 02 c0 b5 23 00 02 e6 b5 23 00 02 25 f0 23 00 02 23 e8 23 00 02 4f f0 23 00 02 4d e8 23 00 02 76 f0 23 00 02 74 e8 23 00 02 9c f0 23 00 02 9a e8 23 00 02 c4 f0 23 00 02 c2 e8 23 00 02 ea f0 23 00 02 e8 e8 23 00 02 24 ff 23 00 02 4e ff 23 00 02 75 ff 23 00 02 9b ff 23 00 02 c3 ff 23 00 02 e9 ff 23 00 02 fe 3d 23 00 02 00 04 23 00 02 fe 23 23 00 02 08 82 23 00 02 0a 00 23 00 02 0b 00 23 00 02 0c 01 23 00 02 16 00 23 00 02 18 02 23 00 02 1b 04 23 00 02 19 04 23 00 02 1c 81 23 00 02 1f 00 23 00 02 20 03 23 00 02 23 04 23 00 02 21 01 23 00 02 54 63 23 00 02 55 54 23 00 02 6e 45 23 00 02 6d 36 23 00 02 fe 3d 23 00 02 55 78 23 00 02 fe 20 23 00 02 26 30 23 00 02 fe 3d 23 00 02 20 71 23 00 02 50 8f 23 00 02 51 8f 23 00 02 fe 00 23 00 02 35 00 05 78 01 11 05 00 01 29];
 			panel-exit-sequence = <0x5000128 0x5000110>;
 			power-supply = <0xef>;
-			phandle = <0x280>;
+			phandle = <0x288>;
 
 			display-timings {
 				native-mode = <0xf0>;
-				phandle = <0x281>;
+				phandle = <0x289>;
 
 				timing0 {
 					clock-frequency = <0x7de2900>;
@@ -4773,7 +4773,7 @@
 		power-domains = <0x57 0x19>;
 		rockchip,vo-grf = <0xf2>;
 		status = "disabled";
-		phandle = <0x282>;
+		phandle = <0x28a>;
 	};
 
 	dp@fde50000 {
@@ -4789,7 +4789,7 @@
 		power-domains = <0x57 0x19>;
 		#sound-dai-cells = <0x1>;
 		status = "disabled";
-		phandle = <0x1ce>;
+		phandle = <0x1d7>;
 
 		ports {
 			#address-cells = <0x1>;
@@ -4826,7 +4826,7 @@
 				reg = <0x1>;
 
 				endpoint {
-					phandle = <0x283>;
+					phandle = <0x28b>;
 				};
 			};
 		};
@@ -4843,7 +4843,7 @@
 		power-domains = <0x57 0x1a>;
 		rockchip,vo-grf = <0xcc>;
 		status = "disabled";
-		phandle = <0x284>;
+		phandle = <0x28c>;
 	};
 
 	hdmi@fde80000 {
@@ -4865,7 +4865,7 @@
 		#sound-dai-cells = <0x0>;
 		status = "okay";
 		enable-gpios = <0xfc 0x9 0x0>;
-		phandle = <0x1ca>;
+		phandle = <0x1d3>;
 
 		ports {
 			#address-cells = <0x1>;
@@ -4875,7 +4875,7 @@
 				reg = <0x0>;
 				#address-cells = <0x1>;
 				#size-cells = <0x0>;
-				phandle = <0x285>;
+				phandle = <0x28d>;
 
 				endpoint@0 {
 					reg = <0x0>;
@@ -4914,7 +4914,7 @@
 		power-domains = <0x57 0x1a>;
 		rockchip,grf = <0xcc>;
 		status = "disabled";
-		phandle = <0x286>;
+		phandle = <0x28e>;
 
 		ports {
 			#address-cells = <0x1>;
@@ -4951,7 +4951,7 @@
 				reg = <0x1>;
 
 				endpoint {
-					phandle = <0x287>;
+					phandle = <0x28f>;
 				};
 			};
 		};
@@ -5182,7 +5182,7 @@
 	qos@fdf67200 {
 		compatible = "syscon";
 		reg = <0x0 0xfdf67200 0x0 0x20>;
-		phandle = <0x288>;
+		phandle = <0x290>;
 	};
 
 	qos@fdf70000 {
@@ -5284,7 +5284,7 @@
 		status = "okay";
 		reset-gpios = <0xfc 0x2 0x0>;
 		vpcie3v3-supply = <0x105>;
-		phandle = <0x289>;
+		phandle = <0x291>;
 
 		legacy-interrupt-controller {
 			interrupt-controller;
@@ -5324,10 +5324,10 @@
 		resets = <0x2 0x211 0x2 0x220>;
 		reset-names = "pcie", "periph";
 		rockchip,pipe-grf = <0x6c>;
-		status = "disabled";
-		reset-gpios = <0x108 0x18 0x0>;
+		status = "okay";
+		reset-gpios = <0x108 0x19 0x0>;
 		vpcie3v3-supply = <0x105>;
-		phandle = <0x28a>;
+		phandle = <0x292>;
 
 		legacy-interrupt-controller {
 			interrupt-controller;
@@ -5344,7 +5344,7 @@
 		reg = <0x0 0xfe1c0000 0x0 0x10000>;
 		rockchip,ethernet = <0x109>;
 		status = "disabled";
-		phandle = <0x28b>;
+		phandle = <0x293>;
 	};
 
 	ethernet@fe1c0000 {
@@ -5380,7 +5380,7 @@
 			compatible = "snps,dwmac-mdio";
 			#address-cells = <0x1>;
 			#size-cells = <0x0>;
-			phandle = <0x28c>;
+			phandle = <0x294>;
 
 			phy@1 {
 				compatible = "ethernet-phy-ieee802.3-c22";
@@ -5423,8 +5423,8 @@
 		phys = <0x107 0x1>;
 		phy-names = "sata-phy";
 		ports-implemented = <0x1>;
-		status = "okay";
-		phandle = <0x28d>;
+		status = "disabled";
+		phandle = <0x295>;
 	};
 
 	sata@fe230000 {
@@ -5438,7 +5438,7 @@
 		phy-names = "sata-phy";
 		ports-implemented = <0x1>;
 		status = "okay";
-		phandle = <0x28e>;
+		phandle = <0x296>;
 	};
 
 	spi@fe2b0000 {
@@ -5452,7 +5452,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x28f>;
+		phandle = <0x297>;
 	};
 
 	mmc@fe2c0000 {
@@ -5476,7 +5476,7 @@
 		sd-uhs-sdr104;
 		vqmmc-supply = <0x117>;
 		vmmc-supply = <0x118>;
-		phandle = <0x290>;
+		phandle = <0x298>;
 	};
 
 	mmc@fe2d0000 {
@@ -5490,7 +5490,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <0x119>;
 		power-domains = <0x57 0x25>;
-		status = "okay";
+		status = "disabled";
 		no-sd;
 		no-mmc;
 		bus-width = <0x4>;
@@ -5501,7 +5501,7 @@
 		mmc-pwrseq = <0x11a>;
 		non-removable;
 		sd-uhs-sdr104;
-		phandle = <0x291>;
+		phandle = <0x299>;
 	};
 
 	mmc@fe2e0000 {
@@ -5523,7 +5523,7 @@
 		mmc-hs400-1_8v;
 		mmc-hs400-enhanced-strobe;
 		full-pwr-cycle-in-suspend;
-		phandle = <0x292>;
+		phandle = <0x29a>;
 	};
 
 	crypto@fe370000 {
@@ -5535,7 +5535,7 @@
 		resets = <0x11b 0xf>;
 		reset-names = "crypto-rst";
 		status = "disabled";
-		phandle = <0x293>;
+		phandle = <0x29b>;
 	};
 
 	rng@fe378000 {
@@ -5547,7 +5547,7 @@
 		resets = <0x11b 0x30>;
 		reset-names = "reset";
 		status = "okay";
-		phandle = <0x294>;
+		phandle = <0x29c>;
 	};
 
 	i2s@fe470000 {
@@ -5570,7 +5570,7 @@
 		pinctrl-2 = <0x11c 0x11d>;
 		#sound-dai-cells = <0x0>;
 		status = "okay";
-		phandle = <0x1d7>;
+		phandle = <0x1e0>;
 	};
 
 	i2s@fe480000 {
@@ -5588,7 +5588,7 @@
 		pinctrl-0 = <0x121 0x122 0x123 0x124 0x125 0x126 0x127 0x128 0x129 0x12a>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x295>;
+		phandle = <0x29d>;
 	};
 
 	i2s@fe490000 {
@@ -5610,7 +5610,7 @@
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
 		rockchip,bclk-fs = <0x20>;
-		phandle = <0x1c7>;
+		phandle = <0x1d0>;
 	};
 
 	i2s@fe4a0000 {
@@ -5631,7 +5631,7 @@
 		pinctrl-2 = <0x135 0x136>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x296>;
+		phandle = <0x29e>;
 	};
 
 	pdm@fe4b0000 {
@@ -5647,7 +5647,7 @@
 		pinctrl-2 = <0x13c 0x13d>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x297>;
+		phandle = <0x29f>;
 	};
 
 	pdm@fe4c0000 {
@@ -5666,7 +5666,7 @@
 		pinctrl-2 = <0x143 0x144>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x298>;
+		phandle = <0x2a0>;
 	};
 
 	vad@fe4d0000 {
@@ -5681,7 +5681,7 @@
 		rockchip,mode = <0x0>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x299>;
+		phandle = <0x2a1>;
 	};
 
 	spdif-tx@fe4e0000 {
@@ -5699,7 +5699,7 @@
 		pinctrl-0 = <0x145>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x1d1>;
+		phandle = <0x1da>;
 	};
 
 	spdif-tx@fe4f0000 {
@@ -5717,7 +5717,7 @@
 		pinctrl-0 = <0x146>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x1d3>;
+		phandle = <0x1dc>;
 	};
 
 	codec-digital@fe500000 {
@@ -5734,14 +5734,14 @@
 		pinctrl-0 = <0x147>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x29a>;
+		phandle = <0x2a2>;
 	};
 
 	hwspinlock@fe5a0000 {
 		compatible = "rockchip,hwspinlock";
 		reg = <0x0 0xfe5a0000 0x0 0x100>;
 		#hwlock-cells = <0x1>;
-		phandle = <0x29b>;
+		phandle = <0x2a3>;
 	};
 
 	interrupt-controller@fe600000 {
@@ -5807,7 +5807,7 @@
 		tx-fifo-depth = <0x1>;
 		rx-fifo-depth = <0x6>;
 		status = "okay";
-		phandle = <0x29c>;
+		phandle = <0x2a4>;
 	};
 
 	can@fea60000 {
@@ -5823,7 +5823,7 @@
 		tx-fifo-depth = <0x1>;
 		rx-fifo-depth = <0x6>;
 		status = "okay";
-		phandle = <0x29d>;
+		phandle = <0x2a5>;
 	};
 
 	can@fea70000 {
@@ -5839,7 +5839,7 @@
 		tx-fifo-depth = <0x1>;
 		rx-fifo-depth = <0x6>;
 		status = "disabled";
-		phandle = <0x29e>;
+		phandle = <0x2a6>;
 	};
 
 	decompress@fea80000 {
@@ -5851,7 +5851,7 @@
 		resets = <0x2 0x118>;
 		reset-names = "dresetn";
 		status = "disabled";
-		phandle = <0x29f>;
+		phandle = <0x2a7>;
 	};
 
 	i2c@fea90000 {
@@ -5865,7 +5865,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "okay";
-		phandle = <0x2a0>;
+		phandle = <0x2a8>;
 
 		rk8602@42 {
 			compatible = "rockchip,rk8602";
@@ -5898,7 +5898,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x2a1>;
+		phandle = <0x2a9>;
 	};
 
 	i2c@feab0000 {
@@ -5912,7 +5912,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "okay";
-		phandle = <0x2a2>;
+		phandle = <0x2aa>;
 
 		es8316@10 {
 			compatible = "everest,es8316";
@@ -5922,7 +5922,7 @@
 			pinctrl-names = "default";
 			pinctrl-0 = <0x14f>;
 			#sound-dai-cells = <0x0>;
-			phandle = <0x1d8>;
+			phandle = <0x1e1>;
 		};
 	};
 
@@ -5937,7 +5937,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x2a3>;
+		phandle = <0x2ab>;
 
 		light@47 {
 			compatible = "ls_stk3332";
@@ -5949,7 +5949,7 @@
 			als_threshold_low = <0xa>;
 			als_ctrl_gain = <0x2>;
 			poll_delay_ms = <0x64>;
-			phandle = <0x2a4>;
+			phandle = <0x2ac>;
 		};
 
 		proximity@47 {
@@ -5962,7 +5962,7 @@
 			ps_ctrl_gain = <0x3>;
 			ps_led_current = <0x4>;
 			poll_delay_ms = <0x64>;
-			phandle = <0x2a5>;
+			phandle = <0x2ad>;
 		};
 
 		icm_acc@68 {
@@ -5974,7 +5974,7 @@
 			poll_delay_ms = <0x1e>;
 			type = <0x2>;
 			layout = <0x0>;
-			phandle = <0x2a6>;
+			phandle = <0x2ae>;
 		};
 
 		icm_gyro@68 {
@@ -5984,7 +5984,7 @@
 			poll_delay_ms = <0x1e>;
 			type = <0x4>;
 			layout = <0x0>;
-			phandle = <0x2a7>;
+			phandle = <0x2af>;
 		};
 	};
 
@@ -5999,7 +5999,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x2a8>;
+		phandle = <0x2b0>;
 
 		gt1x@14 {
 			compatible = "goodix,gt1x";
@@ -6009,7 +6009,7 @@
 			goodix,rst-gpio = <0x108 0x11 0x0>;
 			goodix,irq-gpio = <0x108 0x10 0x8>;
 			power-supply = <0xef>;
-			phandle = <0x2a9>;
+			phandle = <0x2b1>;
 		};
 	};
 
@@ -6019,7 +6019,7 @@
 		interrupts = <0x0 0x121 0x4>;
 		clocks = <0x2 0x5c 0x2 0x5f>;
 		clock-names = "pclk", "timer";
-		phandle = <0x2aa>;
+		phandle = <0x2b2>;
 	};
 
 	watchdog@feaf0000 {
@@ -6029,7 +6029,7 @@
 		clock-names = "tclk", "pclk";
 		interrupts = <0x0 0x13b 0x4>;
 		status = "disabled";
-		phandle = <0x2ab>;
+		phandle = <0x2b3>;
 	};
 
 	spi@feb00000 {
@@ -6046,7 +6046,7 @@
 		pinctrl-0 = <0x153 0x154 0x155>;
 		num-cs = <0x2>;
 		status = "disabled";
-		phandle = <0x2ac>;
+		phandle = <0x2b4>;
 	};
 
 	spi@feb10000 {
@@ -6063,7 +6063,7 @@
 		pinctrl-0 = <0x156 0x157 0x158>;
 		num-cs = <0x2>;
 		status = "disabled";
-		phandle = <0x2ad>;
+		phandle = <0x2b5>;
 	};
 
 	spi@feb20000 {
@@ -6082,7 +6082,7 @@
 		status = "okay";
 		assigned-clocks = <0x2 0xa5>;
 		assigned-clock-rates = <0xbebc200>;
-		phandle = <0x2ae>;
+		phandle = <0x2b6>;
 
 		rk806single@0 {
 			compatible = "rockchip,rk806";
@@ -6113,7 +6113,7 @@
 			vcc13-supply = <0x162>;
 			vcc14-supply = <0x162>;
 			vcca-supply = <0x6e>;
-			phandle = <0x2af>;
+			phandle = <0x2b7>;
 
 			pwrkey {
 				status = "okay";
@@ -6122,7 +6122,7 @@
 			pinctrl_rk806 {
 				gpio-controller;
 				#gpio-cells = <0x2>;
-				phandle = <0x2b0>;
+				phandle = <0x2b8>;
 
 				rk806_dvs1_null {
 					pins = "gpio_pwrctrl2";
@@ -6133,7 +6133,7 @@
 				rk806_dvs1_slp {
 					pins = "gpio_pwrctrl1";
 					function = "pin_fun1";
-					phandle = <0x2b1>;
+					phandle = <0x2b9>;
 				};
 
 				rk806_dvs1_pwrdn {
@@ -6145,7 +6145,7 @@
 				rk806_dvs1_rst {
 					pins = "gpio_pwrctrl1";
 					function = "pin_fun3";
-					phandle = <0x2b2>;
+					phandle = <0x2ba>;
 				};
 
 				rk806_dvs2_null {
@@ -6157,31 +6157,31 @@
 				rk806_dvs2_slp {
 					pins = "gpio_pwrctrl2";
 					function = "pin_fun1";
-					phandle = <0x2b3>;
+					phandle = <0x2bb>;
 				};
 
 				rk806_dvs2_pwrdn {
 					pins = "gpio_pwrctrl2";
 					function = "pin_fun2";
-					phandle = <0x2b4>;
+					phandle = <0x2bc>;
 				};
 
 				rk806_dvs2_rst {
 					pins = "gpio_pwrctrl2";
 					function = "pin_fun3";
-					phandle = <0x2b5>;
+					phandle = <0x2bd>;
 				};
 
 				rk806_dvs2_dvs {
 					pins = "gpio_pwrctrl2";
 					function = "pin_fun4";
-					phandle = <0x2b6>;
+					phandle = <0x2be>;
 				};
 
 				rk806_dvs2_gpio {
 					pins = "gpio_pwrctrl2";
 					function = "pin_fun5";
-					phandle = <0x2b7>;
+					phandle = <0x2bf>;
 				};
 
 				rk806_dvs3_null {
@@ -6193,31 +6193,31 @@
 				rk806_dvs3_slp {
 					pins = "gpio_pwrctrl3";
 					function = "pin_fun1";
-					phandle = <0x2b8>;
+					phandle = <0x2c0>;
 				};
 
 				rk806_dvs3_pwrdn {
 					pins = "gpio_pwrctrl3";
 					function = "pin_fun2";
-					phandle = <0x2b9>;
+					phandle = <0x2c1>;
 				};
 
 				rk806_dvs3_rst {
 					pins = "gpio_pwrctrl3";
 					function = "pin_fun3";
-					phandle = <0x2ba>;
+					phandle = <0x2c2>;
 				};
 
 				rk806_dvs3_dvs {
 					pins = "gpio_pwrctrl3";
 					function = "pin_fun4";
-					phandle = <0x2bb>;
+					phandle = <0x2c3>;
 				};
 
 				rk806_dvs3_gpio {
 					pins = "gpio_pwrctrl3";
 					function = "pin_fun5";
-					phandle = <0x2bc>;
+					phandle = <0x2c4>;
 				};
 			};
 
@@ -6300,7 +6300,7 @@
 					regulator-always-on;
 					regulator-boot-on;
 					regulator-name = "vdd2_ddr_s3";
-					phandle = <0x2bd>;
+					phandle = <0x2c5>;
 
 					regulator-state-mem {
 						regulator-on-in-suspend;
@@ -6327,7 +6327,7 @@
 					regulator-min-microvolt = <0x325aa0>;
 					regulator-max-microvolt = <0x325aa0>;
 					regulator-name = "vcc_3v3_s3";
-					phandle = <0x2be>;
+					phandle = <0x2c6>;
 
 					regulator-state-mem {
 						regulator-on-in-suspend;
@@ -6339,7 +6339,7 @@
 					regulator-always-on;
 					regulator-boot-on;
 					regulator-name = "vddq_ddr_s0";
-					phandle = <0x2bf>;
+					phandle = <0x2c7>;
 
 					regulator-state-mem {
 						regulator-off-in-suspend;
@@ -6352,7 +6352,7 @@
 					regulator-min-microvolt = <0x1b7740>;
 					regulator-max-microvolt = <0x1b7740>;
 					regulator-name = "vcc_1v8_s3";
-					phandle = <0x2c0>;
+					phandle = <0x2c8>;
 
 					regulator-state-mem {
 						regulator-on-in-suspend;
@@ -6366,7 +6366,7 @@
 					regulator-min-microvolt = <0x1b7740>;
 					regulator-max-microvolt = <0x1b7740>;
 					regulator-name = "avcc_1v8_s0";
-					phandle = <0x1dd>;
+					phandle = <0x1e6>;
 
 					regulator-state-mem {
 						regulator-off-in-suspend;
@@ -6393,7 +6393,7 @@
 					regulator-min-microvolt = <0x124f80>;
 					regulator-max-microvolt = <0x124f80>;
 					regulator-name = "avdd_1v2_s0";
-					phandle = <0x2c1>;
+					phandle = <0x2c9>;
 
 					regulator-state-mem {
 						regulator-off-in-suspend;
@@ -6406,7 +6406,7 @@
 					regulator-min-microvolt = <0x325aa0>;
 					regulator-max-microvolt = <0x325aa0>;
 					regulator-name = "vcc_3v3_s0";
-					phandle = <0x2c2>;
+					phandle = <0x2ca>;
 
 					regulator-state-mem {
 						regulator-off-in-suspend;
@@ -6432,7 +6432,7 @@
 					regulator-min-microvolt = <0x1b7740>;
 					regulator-max-microvolt = <0x1b7740>;
 					regulator-name = "pldo6_s3";
-					phandle = <0x2c3>;
+					phandle = <0x2cb>;
 
 					regulator-state-mem {
 						regulator-on-in-suspend;
@@ -6446,7 +6446,7 @@
 					regulator-min-microvolt = <0xb71b0>;
 					regulator-max-microvolt = <0xb71b0>;
 					regulator-name = "vdd_0v75_s3";
-					phandle = <0x2c4>;
+					phandle = <0x2cc>;
 
 					regulator-state-mem {
 						regulator-on-in-suspend;
@@ -6460,7 +6460,7 @@
 					regulator-min-microvolt = <0xcf850>;
 					regulator-max-microvolt = <0xcf850>;
 					regulator-name = "vdd_ddr_pll_s0";
-					phandle = <0x2c5>;
+					phandle = <0x2cd>;
 
 					regulator-state-mem {
 						regulator-off-in-suspend;
@@ -6474,7 +6474,7 @@
 					regulator-min-microvolt = <0xcc77c>;
 					regulator-max-microvolt = <0xcc77c>;
 					regulator-name = "avdd_0v75_s0";
-					phandle = <0x1de>;
+					phandle = <0x1e7>;
 
 					regulator-state-mem {
 						regulator-off-in-suspend;
@@ -6487,7 +6487,7 @@
 					regulator-min-microvolt = <0xcf850>;
 					regulator-max-microvolt = <0xcf850>;
 					regulator-name = "vdd_0v85_s0";
-					phandle = <0x1dc>;
+					phandle = <0x1e5>;
 
 					regulator-state-mem {
 						regulator-off-in-suspend;
@@ -6500,7 +6500,7 @@
 					regulator-min-microvolt = <0xb71b0>;
 					regulator-max-microvolt = <0xb71b0>;
 					regulator-name = "vdd_0v75_s0";
-					phandle = <0x2c6>;
+					phandle = <0x2ce>;
 
 					regulator-state-mem {
 						regulator-off-in-suspend;
@@ -6524,7 +6524,7 @@
 		pinctrl-0 = <0x163 0x164 0x165>;
 		num-cs = <0x2>;
 		status = "disabled";
-		phandle = <0x2c7>;
+		phandle = <0x2cf>;
 	};
 
 	serial@feb40000 {
@@ -6538,8 +6538,8 @@
 		dmas = <0x6f 0x8 0x6f 0x9>;
 		pinctrl-names = "default";
 		pinctrl-0 = <0x166>;
-		status = "okay";
-		phandle = <0x2c8>;
+		status = "disabled";
+		phandle = <0x2d0>;
 	};
 
 	serial@feb50000 {
@@ -6554,7 +6554,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <0x167>;
 		status = "disabled";
-		phandle = <0x2c9>;
+		phandle = <0x2d1>;
 	};
 
 	serial@feb60000 {
@@ -6569,7 +6569,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <0x168>;
 		status = "disabled";
-		phandle = <0x2ca>;
+		phandle = <0x2d2>;
 	};
 
 	serial@feb70000 {
@@ -6583,8 +6583,8 @@
 		dmas = <0xe5 0x9 0xe5 0xa>;
 		pinctrl-names = "default";
 		pinctrl-0 = <0x169>;
-		status = "okay";
-		phandle = <0x2cb>;
+		status = "disabled";
+		phandle = <0x2d3>;
 	};
 
 	serial@feb80000 {
@@ -6599,7 +6599,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <0x16a>;
 		status = "okay";
-		phandle = <0x2cc>;
+		phandle = <0x2d4>;
 	};
 
 	serial@feb90000 {
@@ -6614,7 +6614,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <0x16b>;
 		status = "okay";
-		phandle = <0x2cd>;
+		phandle = <0x2d5>;
 	};
 
 	serial@feba0000 {
@@ -6628,8 +6628,8 @@
 		dmas = <0xe6 0x7 0xe6 0x8>;
 		pinctrl-names = "default";
 		pinctrl-0 = <0x16c>;
-		status = "okay";
-		phandle = <0x2ce>;
+		status = "disabled";
+		phandle = <0x2d6>;
 	};
 
 	serial@febb0000 {
@@ -6644,7 +6644,7 @@
 		pinctrl-names = "default";
 		pinctrl-0 = <0x16d>;
 		status = "disabled";
-		phandle = <0x2cf>;
+		phandle = <0x2d7>;
 	};
 
 	serial@febc0000 {
@@ -6658,8 +6658,8 @@
 		dmas = <0xe6 0xb 0xe6 0xc>;
 		pinctrl-names = "default";
 		pinctrl-0 = <0x16e 0x16f>;
-		status = "okay";
-		phandle = <0x2d0>;
+		status = "disabled";
+		phandle = <0x2d8>;
 	};
 
 	pwm@febd0000 {
@@ -6672,7 +6672,7 @@
 		clocks = <0x2 0x54 0x2 0x53>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2d1>;
+		phandle = <0x2d9>;
 	};
 
 	pwm@febd0010 {
@@ -6685,7 +6685,7 @@
 		clocks = <0x2 0x54 0x2 0x53>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2d2>;
+		phandle = <0x2da>;
 	};
 
 	pwm@febd0020 {
@@ -6698,7 +6698,7 @@
 		clocks = <0x2 0x54 0x2 0x53>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2d3>;
+		phandle = <0x2db>;
 	};
 
 	pwm@febd0030 {
@@ -6711,7 +6711,7 @@
 		clocks = <0x2 0x54 0x2 0x53>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2d4>;
+		phandle = <0x2dc>;
 	};
 
 	pwm@febe0000 {
@@ -6724,7 +6724,7 @@
 		clocks = <0x2 0x57 0x2 0x56>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2d5>;
+		phandle = <0x2dd>;
 	};
 
 	pwm@febe0010 {
@@ -6737,7 +6737,7 @@
 		clocks = <0x2 0x57 0x2 0x56>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2d6>;
+		phandle = <0x2de>;
 	};
 
 	pwm@febe0020 {
@@ -6750,7 +6750,7 @@
 		clocks = <0x2 0x57 0x2 0x56>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2d7>;
+		phandle = <0x2df>;
 	};
 
 	pwm@febe0030 {
@@ -6763,7 +6763,7 @@
 		clocks = <0x2 0x57 0x2 0x56>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x1c6>;
+		phandle = <0x1cf>;
 	};
 
 	pwm@febf0000 {
@@ -6776,7 +6776,7 @@
 		clocks = <0x2 0x5a 0x2 0x59>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2d8>;
+		phandle = <0x2e0>;
 	};
 
 	pwm@febf0010 {
@@ -6789,7 +6789,7 @@
 		clocks = <0x2 0x5a 0x2 0x59>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2d9>;
+		phandle = <0x2e1>;
 	};
 
 	pwm@febf0020 {
@@ -6802,7 +6802,7 @@
 		clocks = <0x2 0x5a 0x2 0x59>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2da>;
+		phandle = <0x2e2>;
 	};
 
 	pwm@febf0030 {
@@ -6815,7 +6815,7 @@
 		clocks = <0x2 0x5a 0x2 0x59>;
 		clock-names = "pwm", "pclk";
 		status = "disabled";
-		phandle = <0x2db>;
+		phandle = <0x2e3>;
 	};
 
 	tsadc@fec00000 {
@@ -6850,7 +6850,7 @@
 		reset-names = "saradc-apb";
 		status = "okay";
 		vref-supply = <0x17e>;
-		phandle = <0x1c4>;
+		phandle = <0x1cd>;
 	};
 
 	mailbox@fec60000 {
@@ -6861,7 +6861,7 @@
 		clock-names = "pclk_mailbox";
 		#mbox-cells = <0x1>;
 		status = "disabled";
-		phandle = <0x2dc>;
+		phandle = <0x2e4>;
 	};
 
 	mailbox@fec70000 {
@@ -6872,7 +6872,7 @@
 		clock-names = "pclk_mailbox";
 		#mbox-cells = <0x1>;
 		status = "disabled";
-		phandle = <0x2dd>;
+		phandle = <0x2e5>;
 	};
 
 	i2c@fec80000 {
@@ -6886,7 +6886,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "okay";
-		phandle = <0x2de>;
+		phandle = <0x2e6>;
 
 		nkmcu@15 {
 			compatible = "nk_mcu";
@@ -6904,7 +6904,7 @@
 			interrupt-parent = <0x15b>;
 			interrupts = <0x8 0x8>;
 			wakeup-source;
-			phandle = <0x1df>;
+			phandle = <0x1e8>;
 		};
 
 		eeprom@50 {
@@ -6929,7 +6929,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x2df>;
+		phandle = <0x2e7>;
 	};
 
 	i2c@feca0000 {
@@ -6943,7 +6943,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x2e0>;
+		phandle = <0x2e8>;
 	};
 
 	spi@fecb0000 {
@@ -6960,7 +6960,7 @@
 		pinctrl-0 = <0x183 0x184 0x185>;
 		num-cs = <0x2>;
 		status = "disabled";
-		phandle = <0x2e1>;
+		phandle = <0x2e9>;
 	};
 
 	otp@fecc0000 {
@@ -6972,7 +6972,7 @@
 		clock-names = "otpc", "apb", "arb", "phy";
 		resets = <0x2 0x12a 0x2 0x129 0x2 0x12b>;
 		reset-names = "otpc", "apb", "arb";
-		phandle = <0x2e2>;
+		phandle = <0x2ea>;
 
 		cpu-code@2 {
 			reg = <0x2 0x2>;
@@ -7075,7 +7075,7 @@
 
 		vop-opp-info@61 {
 			reg = <0x61 0x6>;
-			phandle = <0x2e3>;
+			phandle = <0x2eb>;
 		};
 
 		venc-opp-info@67 {
@@ -7092,7 +7092,7 @@
 		clock-names = "pclk_mailbox";
 		#mbox-cells = <0x1>;
 		status = "disabled";
-		phandle = <0x2e4>;
+		phandle = <0x2ec>;
 	};
 
 	dma-controller@fed10000 {
@@ -7150,7 +7150,7 @@
 		resets = <0x2 0x28 0x2 0x29 0x2 0x2a 0x2 0x2b 0x2 0x482>;
 		reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
 		status = "okay";
-		phandle = <0x2e5>;
+		phandle = <0x2ed>;
 
 		dp-port {
 			#phy-cells = <0x0>;
@@ -7256,7 +7256,7 @@
 		#address-cells = <0x1>;
 		#size-cells = <0x1>;
 		ranges = <0x0 0x0 0xff001000 0xef000>;
-		phandle = <0x2e6>;
+		phandle = <0x2ee>;
 
 		rkvdec-sram@0 {
 			reg = <0x0 0x78000>;
@@ -7313,7 +7313,7 @@
 			gpio-ranges = <0x191 0x0 0x40 0x20>;
 			interrupt-controller;
 			#interrupt-cells = <0x2>;
-			phandle = <0x1e8>;
+			phandle = <0x1be>;
 		};
 
 		gpio@fec40000 {
@@ -7360,13 +7360,13 @@
 		pcfg-pull-none-drv-level-0 {
 			bias-disable;
 			drive-strength = <0x0>;
-			phandle = <0x2e7>;
+			phandle = <0x2ef>;
 		};
 
 		pcfg-pull-none-drv-level-1 {
 			bias-disable;
 			drive-strength = <0x1>;
-			phandle = <0x2e8>;
+			phandle = <0x2f0>;
 		};
 
 		pcfg-pull-none-drv-level-2 {
@@ -7378,31 +7378,31 @@
 		pcfg-pull-none-drv-level-3 {
 			bias-disable;
 			drive-strength = <0x3>;
-			phandle = <0x2e9>;
+			phandle = <0x2f1>;
 		};
 
 		pcfg-pull-none-drv-level-4 {
 			bias-disable;
 			drive-strength = <0x4>;
-			phandle = <0x2ea>;
+			phandle = <0x2f2>;
 		};
 
 		pcfg-pull-none-drv-level-5 {
 			bias-disable;
 			drive-strength = <0x5>;
-			phandle = <0x2eb>;
+			phandle = <0x2f3>;
 		};
 
 		pcfg-pull-none-drv-level-6 {
 			bias-disable;
 			drive-strength = <0x6>;
-			phandle = <0x2ec>;
+			phandle = <0x2f4>;
 		};
 
 		pcfg-pull-up-drv-level-0 {
 			bias-pull-up;
 			drive-strength = <0x0>;
-			phandle = <0x2ed>;
+			phandle = <0x2f5>;
 		};
 
 		pcfg-pull-up-drv-level-1 {
@@ -7420,19 +7420,19 @@
 		pcfg-pull-up-drv-level-3 {
 			bias-pull-up;
 			drive-strength = <0x3>;
-			phandle = <0x2ee>;
+			phandle = <0x2f6>;
 		};
 
 		pcfg-pull-up-drv-level-4 {
 			bias-pull-up;
 			drive-strength = <0x4>;
-			phandle = <0x2ef>;
+			phandle = <0x2f7>;
 		};
 
 		pcfg-pull-up-drv-level-5 {
 			bias-pull-up;
 			drive-strength = <0x5>;
-			phandle = <0x2f0>;
+			phandle = <0x2f8>;
 		};
 
 		pcfg-pull-up-drv-level-6 {
@@ -7444,55 +7444,55 @@
 		pcfg-pull-down-drv-level-0 {
 			bias-pull-down;
 			drive-strength = <0x0>;
-			phandle = <0x2f1>;
+			phandle = <0x2f9>;
 		};
 
 		pcfg-pull-down-drv-level-1 {
 			bias-pull-down;
 			drive-strength = <0x1>;
-			phandle = <0x2f2>;
+			phandle = <0x2fa>;
 		};
 
 		pcfg-pull-down-drv-level-2 {
 			bias-pull-down;
 			drive-strength = <0x2>;
-			phandle = <0x2f3>;
+			phandle = <0x2fb>;
 		};
 
 		pcfg-pull-down-drv-level-3 {
 			bias-pull-down;
 			drive-strength = <0x3>;
-			phandle = <0x2f4>;
+			phandle = <0x2fc>;
 		};
 
 		pcfg-pull-down-drv-level-4 {
 			bias-pull-down;
 			drive-strength = <0x4>;
-			phandle = <0x2f5>;
+			phandle = <0x2fd>;
 		};
 
 		pcfg-pull-down-drv-level-5 {
 			bias-pull-down;
 			drive-strength = <0x5>;
-			phandle = <0x2f6>;
+			phandle = <0x2fe>;
 		};
 
 		pcfg-pull-down-drv-level-6 {
 			bias-pull-down;
 			drive-strength = <0x6>;
-			phandle = <0x2f7>;
+			phandle = <0x2ff>;
 		};
 
 		pcfg-pull-up-smt {
 			bias-pull-up;
 			input-schmitt-enable;
-			phandle = <0x2f8>;
+			phandle = <0x300>;
 		};
 
 		pcfg-pull-down-smt {
 			bias-pull-down;
 			input-schmitt-enable;
-			phandle = <0x2f9>;
+			phandle = <0x301>;
 		};
 
 		pcfg-pull-none-smt {
@@ -7505,7 +7505,7 @@
 			bias-disable;
 			drive-strength = <0x0>;
 			input-schmitt-enable;
-			phandle = <0x2fa>;
+			phandle = <0x302>;
 		};
 
 		pcfg-pull-none-drv-level-1-smt {
@@ -7519,21 +7519,21 @@
 			bias-disable;
 			drive-strength = <0x2>;
 			input-schmitt-enable;
-			phandle = <0x2fb>;
+			phandle = <0x303>;
 		};
 
 		pcfg-pull-none-drv-level-3-smt {
 			bias-disable;
 			drive-strength = <0x3>;
 			input-schmitt-enable;
-			phandle = <0x2fc>;
+			phandle = <0x304>;
 		};
 
 		pcfg-pull-none-drv-level-4-smt {
 			bias-disable;
 			drive-strength = <0x4>;
 			input-schmitt-enable;
-			phandle = <0x2fd>;
+			phandle = <0x305>;
 		};
 
 		pcfg-pull-none-drv-level-5-smt {
@@ -7547,53 +7547,53 @@
 			bias-disable;
 			drive-strength = <0x6>;
 			input-schmitt-enable;
-			phandle = <0x2fe>;
+			phandle = <0x306>;
 		};
 
 		pcfg-output-high {
 			output-high;
-			phandle = <0x2ff>;
+			phandle = <0x307>;
 		};
 
 		pcfg-output-high-pull-up {
 			output-high;
 			bias-pull-up;
-			phandle = <0x300>;
+			phandle = <0x308>;
 		};
 
 		pcfg-output-high-pull-down {
 			output-high;
 			bias-pull-down;
-			phandle = <0x301>;
+			phandle = <0x309>;
 		};
 
 		pcfg-output-high-pull-none {
 			output-high;
 			bias-disable;
-			phandle = <0x302>;
+			phandle = <0x30a>;
 		};
 
 		pcfg-output-low {
 			output-low;
-			phandle = <0x303>;
+			phandle = <0x30b>;
 		};
 
 		pcfg-output-low-pull-up {
 			output-low;
 			bias-pull-up;
-			phandle = <0x304>;
+			phandle = <0x30c>;
 		};
 
 		pcfg-output-low-pull-down {
 			output-low;
 			bias-pull-down;
-			phandle = <0x305>;
+			phandle = <0x30d>;
 		};
 
 		pcfg-output-low-pull-none {
 			output-low;
 			bias-disable;
-			phandle = <0x306>;
+			phandle = <0x30e>;
 		};
 
 		auddsm {
@@ -7621,7 +7621,7 @@
 
 			can0m1-pins {
 				rockchip,pins = <0x4 0x1d 0x9 0x192 0x4 0x1c 0x9 0x192>;
-				phandle = <0x307>;
+				phandle = <0x30f>;
 			};
 		};
 
@@ -7629,7 +7629,7 @@
 
 			can1m0-pins {
 				rockchip,pins = <0x3 0xd 0x9 0x192 0x3 0xe 0x9 0x192>;
-				phandle = <0x308>;
+				phandle = <0x310>;
 			};
 
 			can1m1-pins {
@@ -7647,7 +7647,7 @@
 
 			can2m1-pins {
 				rockchip,pins = <0x0 0x1c 0xa 0x192 0x0 0x1d 0xa 0x192>;
-				phandle = <0x309>;
+				phandle = <0x311>;
 			};
 		};
 
@@ -7655,22 +7655,22 @@
 
 			cif-clk {
 				rockchip,pins = <0x4 0xc 0x1 0x192>;
-				phandle = <0x30a>;
+				phandle = <0x312>;
 			};
 
 			cif-dvp-clk {
 				rockchip,pins = <0x4 0x8 0x1 0x192 0x4 0xa 0x1 0x192 0x4 0xb 0x1 0x192>;
-				phandle = <0x30b>;
+				phandle = <0x313>;
 			};
 
 			cif-dvp-bus16 {
 				rockchip,pins = <0x3 0x14 0x1 0x192 0x3 0x15 0x1 0x192 0x3 0x16 0x1 0x192 0x3 0x17 0x1 0x192 0x3 0x18 0x1 0x192 0x3 0x19 0x1 0x192 0x3 0x1a 0x1 0x192 0x3 0x1b 0x1 0x192>;
-				phandle = <0x30c>;
+				phandle = <0x314>;
 			};
 
 			cif-dvp-bus8 {
 				rockchip,pins = <0x4 0x0 0x1 0x192 0x4 0x1 0x1 0x192 0x4 0x2 0x1 0x192 0x4 0x3 0x1 0x192 0x4 0x4 0x1 0x192 0x4 0x5 0x1 0x192 0x4 0x6 0x1 0x192 0x4 0x7 0x1 0x192>;
-				phandle = <0x30d>;
+				phandle = <0x315>;
 			};
 		};
 
@@ -7678,17 +7678,17 @@
 
 			clk32k-in {
 				rockchip,pins = <0x0 0xa 0x1 0x192>;
-				phandle = <0x30e>;
+				phandle = <0x316>;
 			};
 
 			clk32k-out0 {
 				rockchip,pins = <0x0 0xa 0x2 0x192>;
-				phandle = <0x30f>;
+				phandle = <0x317>;
 			};
 
 			clk32k-out1 {
 				rockchip,pins = <0x2 0x15 0x1 0x192>;
-				phandle = <0x310>;
+				phandle = <0x318>;
 			};
 		};
 
@@ -7696,7 +7696,7 @@
 
 			cpu-pins {
 				rockchip,pins = <0x0 0x19 0x2 0x192 0x0 0x1d 0x2 0x192>;
-				phandle = <0x311>;
+				phandle = <0x319>;
 			};
 		};
 
@@ -7704,7 +7704,7 @@
 
 			ddrphych0-pins {
 				rockchip,pins = <0x4 0x0 0x7 0x192 0x4 0x1 0x7 0x192 0x4 0x2 0x7 0x192 0x4 0x3 0x7 0x192>;
-				phandle = <0x312>;
+				phandle = <0x31a>;
 			};
 		};
 
@@ -7712,7 +7712,7 @@
 
 			ddrphych1-pins {
 				rockchip,pins = <0x4 0x4 0x7 0x192 0x4 0x5 0x7 0x192 0x4 0x6 0x7 0x192 0x4 0x7 0x7 0x192>;
-				phandle = <0x313>;
+				phandle = <0x31b>;
 			};
 		};
 
@@ -7720,7 +7720,7 @@
 
 			ddrphych2-pins {
 				rockchip,pins = <0x4 0x8 0x7 0x192 0x4 0x9 0x7 0x192 0x4 0xa 0x7 0x192 0x4 0xb 0x7 0x192>;
-				phandle = <0x314>;
+				phandle = <0x31c>;
 			};
 		};
 
@@ -7728,7 +7728,7 @@
 
 			ddrphych3-pins {
 				rockchip,pins = <0x4 0xc 0x7 0x192 0x4 0xd 0x7 0x192 0x4 0xe 0x7 0x192 0x4 0xf 0x7 0x192>;
-				phandle = <0x315>;
+				phandle = <0x31d>;
 			};
 		};
 
@@ -7736,17 +7736,17 @@
 
 			dp0m0-pins {
 				rockchip,pins = <0x4 0xc 0x5 0x192>;
-				phandle = <0x316>;
+				phandle = <0x31e>;
 			};
 
 			dp0m1-pins {
 				rockchip,pins = <0x0 0x14 0xa 0x192>;
-				phandle = <0x317>;
+				phandle = <0x31f>;
 			};
 
 			dp0m2-pins {
 				rockchip,pins = <0x1 0x0 0x5 0x192>;
-				phandle = <0x318>;
+				phandle = <0x320>;
 			};
 		};
 
@@ -7759,12 +7759,12 @@
 
 			dp1m1-pins {
 				rockchip,pins = <0x0 0x15 0xa 0x192>;
-				phandle = <0x319>;
+				phandle = <0x321>;
 			};
 
 			dp1m2-pins {
 				rockchip,pins = <0x1 0x1 0x5 0x192>;
-				phandle = <0x31a>;
+				phandle = <0x322>;
 			};
 		};
 
@@ -7772,27 +7772,27 @@
 
 			emmc-rstnout {
 				rockchip,pins = <0x2 0x3 0x1 0x192>;
-				phandle = <0x31b>;
+				phandle = <0x323>;
 			};
 
 			emmc-bus8 {
 				rockchip,pins = <0x2 0x18 0x1 0x193 0x2 0x19 0x1 0x193 0x2 0x1a 0x1 0x193 0x2 0x1b 0x1 0x193 0x2 0x1c 0x1 0x193 0x2 0x1d 0x1 0x193 0x2 0x1e 0x1 0x193 0x2 0x1f 0x1 0x193>;
-				phandle = <0x31c>;
+				phandle = <0x324>;
 			};
 
 			emmc-clk {
 				rockchip,pins = <0x2 0x1 0x1 0x193>;
-				phandle = <0x31d>;
+				phandle = <0x325>;
 			};
 
 			emmc-cmd {
 				rockchip,pins = <0x2 0x0 0x1 0x193>;
-				phandle = <0x31e>;
+				phandle = <0x326>;
 			};
 
 			emmc-data-strobe {
 				rockchip,pins = <0x2 0x2 0x1 0x192>;
-				phandle = <0x31f>;
+				phandle = <0x327>;
 			};
 		};
 
@@ -7800,7 +7800,7 @@
 
 			eth1-pins {
 				rockchip,pins = <0x3 0x6 0x1 0x192>;
-				phandle = <0x320>;
+				phandle = <0x328>;
 			};
 		};
 
@@ -7808,32 +7808,32 @@
 
 			fspim0-pins {
 				rockchip,pins = <0x2 0x0 0x2 0x193 0x2 0x1e 0x2 0x193 0x2 0x18 0x2 0x193 0x2 0x19 0x2 0x193 0x2 0x1a 0x2 0x193 0x2 0x1b 0x2 0x193>;
-				phandle = <0x321>;
+				phandle = <0x329>;
 			};
 
 			fspim0-cs1 {
 				rockchip,pins = <0x2 0x1f 0x2 0x193>;
-				phandle = <0x322>;
+				phandle = <0x32a>;
 			};
 
 			fspim2-pins {
 				rockchip,pins = <0x3 0x5 0x5 0x193 0x3 0x14 0x2 0x193 0x3 0x0 0x5 0x193 0x3 0x1 0x5 0x193 0x3 0x2 0x5 0x193 0x3 0x3 0x5 0x193>;
-				phandle = <0x323>;
+				phandle = <0x32b>;
 			};
 
 			fspim2-cs1 {
 				rockchip,pins = <0x3 0x15 0x2 0x193>;
-				phandle = <0x324>;
+				phandle = <0x32c>;
 			};
 
 			fspim1-pins {
 				rockchip,pins = <0x2 0xb 0x3 0x193 0x2 0xc 0x3 0x193 0x2 0x6 0x3 0x193 0x2 0x7 0x3 0x193 0x2 0x8 0x3 0x193 0x2 0x9 0x3 0x193>;
-				phandle = <0x325>;
+				phandle = <0x32d>;
 			};
 
 			fspim1-cs1 {
 				rockchip,pins = <0x2 0xd 0x3 0x193>;
-				phandle = <0x326>;
+				phandle = <0x32e>;
 			};
 		};
 
@@ -7846,7 +7846,7 @@
 
 			gmac1-clkinout {
 				rockchip,pins = <0x3 0xe 0x1 0x192>;
-				phandle = <0x327>;
+				phandle = <0x32f>;
 			};
 
 			gmac1-rx-bus2 {
@@ -7871,22 +7871,22 @@
 
 			gmac1-ppsclk {
 				rockchip,pins = <0x3 0x11 0x1 0x192>;
-				phandle = <0x328>;
+				phandle = <0x330>;
 			};
 
 			gmac1-ppstrig {
 				rockchip,pins = <0x3 0x10 0x1 0x192>;
-				phandle = <0x329>;
+				phandle = <0x331>;
 			};
 
 			gmac1-ptp-ref-clk {
 				rockchip,pins = <0x3 0xf 0x1 0x192>;
-				phandle = <0x32a>;
+				phandle = <0x332>;
 			};
 
 			gmac1-txer {
 				rockchip,pins = <0x3 0xa 0x1 0x192>;
-				phandle = <0x32b>;
+				phandle = <0x333>;
 			};
 		};
 
@@ -7894,7 +7894,7 @@
 
 			gpu-pins {
 				rockchip,pins = <0x0 0x15 0x2 0x192>;
-				phandle = <0x32c>;
+				phandle = <0x334>;
 			};
 		};
 
@@ -7902,22 +7902,22 @@
 
 			hdmim0-rx-cec {
 				rockchip,pins = <0x4 0xd 0x5 0x192>;
-				phandle = <0x32d>;
+				phandle = <0x335>;
 			};
 
 			hdmim0-rx-hpdin {
 				rockchip,pins = <0x4 0xe 0x5 0x192>;
-				phandle = <0x32e>;
+				phandle = <0x336>;
 			};
 
 			hdmim0-rx-scl {
 				rockchip,pins = <0x0 0x1a 0xb 0x192>;
-				phandle = <0x32f>;
+				phandle = <0x337>;
 			};
 
 			hdmim0-rx-sda {
 				rockchip,pins = <0x0 0x19 0xb 0x192>;
-				phandle = <0x330>;
+				phandle = <0x338>;
 			};
 
 			hdmim0-tx0-cec {
@@ -7952,52 +7952,52 @@
 
 			hdmim1-rx-cec {
 				rockchip,pins = <0x3 0x19 0x5 0x192>;
-				phandle = <0x331>;
+				phandle = <0x339>;
 			};
 
 			hdmim1-rx-hpdin {
 				rockchip,pins = <0x3 0x1c 0x5 0x192>;
-				phandle = <0x332>;
+				phandle = <0x33a>;
 			};
 
 			hdmim1-rx-scl {
 				rockchip,pins = <0x3 0x1a 0x5 0x196>;
-				phandle = <0x333>;
+				phandle = <0x33b>;
 			};
 
 			hdmim1-rx-sda {
 				rockchip,pins = <0x3 0x1b 0x5 0x196>;
-				phandle = <0x334>;
+				phandle = <0x33c>;
 			};
 
 			hdmim1-tx0-cec {
 				rockchip,pins = <0x0 0x19 0xd 0x192>;
-				phandle = <0x335>;
+				phandle = <0x33d>;
 			};
 
 			hdmim1-tx0-hpd {
 				rockchip,pins = <0x3 0x1c 0x3 0x192>;
-				phandle = <0x336>;
+				phandle = <0x33e>;
 			};
 
 			hdmim1-tx0-scl {
 				rockchip,pins = <0x0 0x1d 0xb 0x194>;
-				phandle = <0x337>;
+				phandle = <0x33f>;
 			};
 
 			hdmim1-tx0-sda {
 				rockchip,pins = <0x0 0x1c 0xb 0x195>;
-				phandle = <0x338>;
+				phandle = <0x340>;
 			};
 
 			hdmim1-tx1-cec {
 				rockchip,pins = <0x0 0x1a 0xd 0x192>;
-				phandle = <0x339>;
+				phandle = <0x341>;
 			};
 
 			hdmim1-tx1-hpd {
 				rockchip,pins = <0x3 0xf 0x5 0x192>;
-				phandle = <0x33a>;
+				phandle = <0x342>;
 			};
 
 			hdmim1-tx1-scl {
@@ -8012,32 +8012,32 @@
 
 			hdmim2-rx-cec {
 				rockchip,pins = <0x1 0xf 0x5 0x192>;
-				phandle = <0x33b>;
+				phandle = <0x343>;
 			};
 
 			hdmim2-rx-hpdin {
 				rockchip,pins = <0x1 0xe 0x5 0x192>;
-				phandle = <0x33c>;
+				phandle = <0x344>;
 			};
 
 			hdmim2-rx-scl {
 				rockchip,pins = <0x1 0x1e 0x5 0x192>;
-				phandle = <0x33d>;
+				phandle = <0x345>;
 			};
 
 			hdmim2-rx-sda {
 				rockchip,pins = <0x1 0x1f 0x5 0x192>;
-				phandle = <0x33e>;
+				phandle = <0x346>;
 			};
 
 			hdmim2-tx0-scl {
 				rockchip,pins = <0x3 0x17 0x5 0x194>;
-				phandle = <0x33f>;
+				phandle = <0x347>;
 			};
 
 			hdmim2-tx0-sda {
 				rockchip,pins = <0x3 0x18 0x5 0x195>;
-				phandle = <0x340>;
+				phandle = <0x348>;
 			};
 
 			hdmim2-tx1-cec {
@@ -8047,62 +8047,62 @@
 
 			hdmim2-tx1-scl {
 				rockchip,pins = <0x1 0x4 0x5 0x194>;
-				phandle = <0x341>;
+				phandle = <0x349>;
 			};
 
 			hdmim2-tx1-sda {
 				rockchip,pins = <0x1 0x3 0x5 0x195>;
-				phandle = <0x342>;
+				phandle = <0x34a>;
 			};
 
 			hdmi-debug0 {
 				rockchip,pins = <0x1 0x7 0x7 0x192>;
-				phandle = <0x343>;
+				phandle = <0x34b>;
 			};
 
 			hdmi-debug1 {
 				rockchip,pins = <0x1 0x8 0x7 0x192>;
-				phandle = <0x344>;
+				phandle = <0x34c>;
 			};
 
 			hdmi-debug2 {
 				rockchip,pins = <0x1 0x9 0x7 0x192>;
-				phandle = <0x345>;
+				phandle = <0x34d>;
 			};
 
 			hdmi-debug3 {
 				rockchip,pins = <0x1 0xa 0x7 0x192>;
-				phandle = <0x346>;
+				phandle = <0x34e>;
 			};
 
 			hdmi-debug4 {
 				rockchip,pins = <0x1 0xb 0x7 0x192>;
-				phandle = <0x347>;
+				phandle = <0x34f>;
 			};
 
 			hdmi-debug5 {
 				rockchip,pins = <0x1 0xc 0x7 0x192>;
-				phandle = <0x348>;
+				phandle = <0x350>;
 			};
 
 			hdmi-debug6 {
 				rockchip,pins = <0x1 0x0 0x7 0x192>;
-				phandle = <0x349>;
+				phandle = <0x351>;
 			};
 
 			hdmim0-tx1-cec {
 				rockchip,pins = <0x2 0x14 0x4 0x192>;
-				phandle = <0x34a>;
+				phandle = <0x352>;
 			};
 
 			hdmim0-tx1-scl {
 				rockchip,pins = <0x2 0xd 0x4 0x192>;
-				phandle = <0x34b>;
+				phandle = <0x353>;
 			};
 
 			hdmim0-tx1-sda {
 				rockchip,pins = <0x2 0xc 0x4 0x192>;
-				phandle = <0x34c>;
+				phandle = <0x354>;
 			};
 
 			hdmirx-det {
@@ -8115,7 +8115,7 @@
 
 			i2c0m0-xfer {
 				rockchip,pins = <0x0 0xb 0x2 0x196 0x0 0x6 0x2 0x196>;
-				phandle = <0x34d>;
+				phandle = <0x355>;
 			};
 
 			i2c0m2-xfer {
@@ -8125,7 +8125,7 @@
 
 			i2c0m1-xfer {
 				rockchip,pins = <0x4 0x15 0x9 0x196 0x4 0x16 0x9 0x196>;
-				phandle = <0x34e>;
+				phandle = <0x356>;
 			};
 		};
 
@@ -8133,12 +8133,12 @@
 
 			i2c1m0-xfer {
 				rockchip,pins = <0x0 0xd 0x9 0x196 0x0 0xe 0x9 0x196>;
-				phandle = <0x34f>;
+				phandle = <0x357>;
 			};
 
 			i2c1m1-xfer {
 				rockchip,pins = <0x0 0x8 0x2 0x196 0x0 0x9 0x2 0x196>;
-				phandle = <0x350>;
+				phandle = <0x358>;
 			};
 
 			i2c1m2-xfer {
@@ -8148,12 +8148,12 @@
 
 			i2c1m3-xfer {
 				rockchip,pins = <0x2 0x1c 0x9 0x196 0x2 0x1d 0x9 0x196>;
-				phandle = <0x351>;
+				phandle = <0x359>;
 			};
 
 			i2c1m4-xfer {
 				rockchip,pins = <0x1 0x1a 0x9 0x196 0x1 0x1b 0x9 0x196>;
-				phandle = <0x352>;
+				phandle = <0x35a>;
 			};
 		};
 
@@ -8166,22 +8166,22 @@
 
 			i2c2m2-xfer {
 				rockchip,pins = <0x2 0x3 0x9 0x196 0x2 0x2 0x9 0x196>;
-				phandle = <0x353>;
+				phandle = <0x35b>;
 			};
 
 			i2c2m3-xfer {
 				rockchip,pins = <0x1 0x15 0x9 0x196 0x1 0x14 0x9 0x196>;
-				phandle = <0x354>;
+				phandle = <0x35c>;
 			};
 
 			i2c2m4-xfer {
 				rockchip,pins = <0x1 0x1 0x9 0x196 0x1 0x0 0x9 0x196>;
-				phandle = <0x355>;
+				phandle = <0x35d>;
 			};
 
 			i2c2m1-xfer {
 				rockchip,pins = <0x2 0x11 0x9 0x196 0x2 0x10 0x9 0x196>;
-				phandle = <0x356>;
+				phandle = <0x35e>;
 			};
 		};
 
@@ -8194,22 +8194,22 @@
 
 			i2c3m1-xfer {
 				rockchip,pins = <0x3 0xf 0x9 0x196 0x3 0x10 0x9 0x196>;
-				phandle = <0x357>;
+				phandle = <0x35f>;
 			};
 
 			i2c3m2-xfer {
 				rockchip,pins = <0x4 0x4 0x9 0x196 0x4 0x5 0x9 0x196>;
-				phandle = <0x358>;
+				phandle = <0x360>;
 			};
 
 			i2c3m4-xfer {
 				rockchip,pins = <0x4 0x18 0x9 0x196 0x4 0x19 0x9 0x196>;
-				phandle = <0x359>;
+				phandle = <0x361>;
 			};
 
 			i2c3m3-xfer {
 				rockchip,pins = <0x2 0xa 0x9 0x196 0x2 0xb 0x9 0x196>;
-				phandle = <0x35a>;
+				phandle = <0x362>;
 			};
 		};
 
@@ -8217,22 +8217,22 @@
 
 			i2c4m0-xfer {
 				rockchip,pins = <0x3 0x6 0x9 0x196 0x3 0x5 0x9 0x196>;
-				phandle = <0x35b>;
+				phandle = <0x363>;
 			};
 
 			i2c4m2-xfer {
 				rockchip,pins = <0x0 0x15 0x9 0x196 0x0 0x14 0x9 0x196>;
-				phandle = <0x35c>;
+				phandle = <0x364>;
 			};
 
 			i2c4m3-xfer {
 				rockchip,pins = <0x1 0x3 0x9 0x196 0x1 0x2 0x9 0x196>;
-				phandle = <0x35d>;
+				phandle = <0x365>;
 			};
 
 			i2c4m4-xfer {
 				rockchip,pins = <0x1 0x17 0x9 0x196 0x1 0x16 0x9 0x196>;
-				phandle = <0x35e>;
+				phandle = <0x366>;
 			};
 
 			i2c4m1-xfer {
@@ -8250,22 +8250,22 @@
 
 			i2c5m1-xfer {
 				rockchip,pins = <0x4 0xe 0x9 0x196 0x4 0xf 0x9 0x196>;
-				phandle = <0x35f>;
+				phandle = <0x367>;
 			};
 
 			i2c5m2-xfer {
 				rockchip,pins = <0x4 0x6 0x9 0x196 0x4 0x7 0x9 0x196>;
-				phandle = <0x360>;
+				phandle = <0x368>;
 			};
 
 			i2c5m3-xfer {
 				rockchip,pins = <0x1 0xe 0x9 0x196 0x1 0xf 0x9 0x196>;
-				phandle = <0x361>;
+				phandle = <0x369>;
 			};
 
 			i2c5m4-xfer {
 				rockchip,pins = <0x2 0xe 0x9 0x196 0x2 0xf 0x9 0x196>;
-				phandle = <0x362>;
+				phandle = <0x36a>;
 			};
 		};
 
@@ -8278,22 +8278,22 @@
 
 			i2c6m1-xfer {
 				rockchip,pins = <0x1 0x13 0x9 0x196 0x1 0x12 0x9 0x196>;
-				phandle = <0x363>;
+				phandle = <0x36b>;
 			};
 
 			i2c6m3-xfer {
 				rockchip,pins = <0x4 0x9 0x9 0x196 0x4 0x8 0x9 0x196>;
-				phandle = <0x364>;
+				phandle = <0x36c>;
 			};
 
 			i2c6m4-xfer {
 				rockchip,pins = <0x3 0x1 0x9 0x196 0x3 0x0 0x9 0x196>;
-				phandle = <0x365>;
+				phandle = <0x36d>;
 			};
 
 			i2c6m2-xfer {
 				rockchip,pins = <0x2 0x13 0x9 0x196 0x2 0x12 0x9 0x196>;
-				phandle = <0x366>;
+				phandle = <0x36e>;
 			};
 		};
 
@@ -8306,17 +8306,17 @@
 
 			i2c7m2-xfer {
 				rockchip,pins = <0x3 0x1a 0x9 0x196 0x3 0x1b 0x9 0x196>;
-				phandle = <0x367>;
+				phandle = <0x36f>;
 			};
 
 			i2c7m3-xfer {
 				rockchip,pins = <0x4 0xa 0x9 0x196 0x4 0xb 0x9 0x196>;
-				phandle = <0x368>;
+				phandle = <0x370>;
 			};
 
 			i2c7m1-xfer {
 				rockchip,pins = <0x4 0x13 0x9 0x196 0x4 0x14 0x9 0x196>;
-				phandle = <0x369>;
+				phandle = <0x371>;
 			};
 		};
 
@@ -8329,22 +8329,22 @@
 
 			i2c8m2-xfer {
 				rockchip,pins = <0x1 0x1e 0x9 0x196 0x1 0x1f 0x9 0x196>;
-				phandle = <0x36a>;
+				phandle = <0x372>;
 			};
 
 			i2c8m3-xfer {
 				rockchip,pins = <0x4 0x10 0x9 0x196 0x4 0x11 0x9 0x196>;
-				phandle = <0x36b>;
+				phandle = <0x373>;
 			};
 
 			i2c8m4-xfer {
 				rockchip,pins = <0x3 0x12 0x9 0x196 0x3 0x13 0x9 0x196>;
-				phandle = <0x36c>;
+				phandle = <0x374>;
 			};
 
 			i2c8m1-xfer {
 				rockchip,pins = <0x2 0x8 0x9 0x196 0x2 0x9 0x9 0x196>;
-				phandle = <0x36d>;
+				phandle = <0x375>;
 			};
 		};
 
@@ -8377,17 +8377,17 @@
 
 			i2s0-sdi1 {
 				rockchip,pins = <0x1 0x1b 0x2 0x192>;
-				phandle = <0x36e>;
+				phandle = <0x376>;
 			};
 
 			i2s0-sdi2 {
 				rockchip,pins = <0x1 0x1a 0x2 0x192>;
-				phandle = <0x36f>;
+				phandle = <0x377>;
 			};
 
 			i2s0-sdi3 {
 				rockchip,pins = <0x1 0x19 0x2 0x192>;
-				phandle = <0x370>;
+				phandle = <0x378>;
 			};
 
 			i2s0-sdo0 {
@@ -8397,17 +8397,17 @@
 
 			i2s0-sdo1 {
 				rockchip,pins = <0x1 0x18 0x1 0x192>;
-				phandle = <0x371>;
+				phandle = <0x379>;
 			};
 
 			i2s0-sdo2 {
 				rockchip,pins = <0x1 0x19 0x1 0x192>;
-				phandle = <0x372>;
+				phandle = <0x37a>;
 			};
 
 			i2s0-sdo3 {
 				rockchip,pins = <0x1 0x1a 0x1 0x192>;
-				phandle = <0x373>;
+				phandle = <0x37b>;
 			};
 		};
 
@@ -8420,7 +8420,7 @@
 
 			i2s1m0-mclk {
 				rockchip,pins = <0x4 0x0 0x3 0x196>;
-				phandle = <0x374>;
+				phandle = <0x37c>;
 			};
 
 			i2s1m0-sclk {
@@ -8470,57 +8470,57 @@
 
 			i2s1m1-lrck {
 				rockchip,pins = <0x0 0xf 0x1 0x196>;
-				phandle = <0x375>;
+				phandle = <0x37d>;
 			};
 
 			i2s1m1-mclk {
 				rockchip,pins = <0x0 0xd 0x1 0x196>;
-				phandle = <0x376>;
+				phandle = <0x37e>;
 			};
 
 			i2s1m1-sclk {
 				rockchip,pins = <0x0 0xe 0x1 0x196>;
-				phandle = <0x377>;
+				phandle = <0x37f>;
 			};
 
 			i2s1m1-sdi0 {
 				rockchip,pins = <0x0 0x15 0x1 0x192>;
-				phandle = <0x378>;
+				phandle = <0x380>;
 			};
 
 			i2s1m1-sdi1 {
 				rockchip,pins = <0x0 0x16 0x1 0x192>;
-				phandle = <0x379>;
+				phandle = <0x381>;
 			};
 
 			i2s1m1-sdi2 {
 				rockchip,pins = <0x0 0x17 0x1 0x192>;
-				phandle = <0x37a>;
+				phandle = <0x382>;
 			};
 
 			i2s1m1-sdi3 {
 				rockchip,pins = <0x0 0x18 0x1 0x192>;
-				phandle = <0x37b>;
+				phandle = <0x383>;
 			};
 
 			i2s1m1-sdo0 {
 				rockchip,pins = <0x0 0x19 0x1 0x192>;
-				phandle = <0x37c>;
+				phandle = <0x384>;
 			};
 
 			i2s1m1-sdo1 {
 				rockchip,pins = <0x0 0x1a 0x1 0x192>;
-				phandle = <0x37d>;
+				phandle = <0x385>;
 			};
 
 			i2s1m1-sdo2 {
 				rockchip,pins = <0x0 0x1c 0x1 0x192>;
-				phandle = <0x37e>;
+				phandle = <0x386>;
 			};
 
 			i2s1m1-sdo3 {
 				rockchip,pins = <0x0 0x1d 0x1 0x192>;
-				phandle = <0x37f>;
+				phandle = <0x387>;
 			};
 		};
 
@@ -8538,7 +8538,7 @@
 
 			i2s2m1-mclk {
 				rockchip,pins = <0x3 0xc 0x3 0x196>;
-				phandle = <0x380>;
+				phandle = <0x388>;
 			};
 
 			i2s2m1-sclk {
@@ -8548,17 +8548,17 @@
 
 			i2s2m1-sdi {
 				rockchip,pins = <0x3 0xa 0x3 0x192>;
-				phandle = <0x381>;
+				phandle = <0x389>;
 			};
 
 			i2s2m1-sdo {
 				rockchip,pins = <0x3 0xb 0x3 0x192>;
-				phandle = <0x382>;
+				phandle = <0x38a>;
 			};
 
 			i2s2m0-idle {
 				rockchip,pins = <0x2 0x10 0x0 0x192 0x2 0xf 0x0 0x192>;
-				phandle = <0x383>;
+				phandle = <0x38b>;
 			};
 
 			i2s2m0-lrck {
@@ -8568,7 +8568,7 @@
 
 			i2s2m0-mclk {
 				rockchip,pins = <0x2 0xe 0x2 0x196>;
-				phandle = <0x384>;
+				phandle = <0x38c>;
 			};
 
 			i2s2m0-sclk {
@@ -8601,7 +8601,7 @@
 
 			i2s3-mclk {
 				rockchip,pins = <0x3 0x0 0x3 0x196>;
-				phandle = <0x385>;
+				phandle = <0x38d>;
 			};
 
 			i2s3-sclk {
@@ -8624,17 +8624,17 @@
 
 			jtagm0-pins {
 				rockchip,pins = <0x4 0x1a 0x5 0x192 0x4 0x1b 0x5 0x192>;
-				phandle = <0x386>;
+				phandle = <0x38e>;
 			};
 
 			jtagm1-pins {
 				rockchip,pins = <0x4 0x18 0x5 0x192 0x4 0x19 0x5 0x192>;
-				phandle = <0x387>;
+				phandle = <0x38f>;
 			};
 
 			jtagm2-pins {
 				rockchip,pins = <0x0 0xd 0x2 0x192 0x0 0xe 0x2 0x192>;
-				phandle = <0x388>;
+				phandle = <0x390>;
 			};
 		};
 
@@ -8642,7 +8642,7 @@
 
 			litcpu-pins {
 				rockchip,pins = <0x0 0x1b 0x1 0x192>;
-				phandle = <0x389>;
+				phandle = <0x391>;
 			};
 		};
 
@@ -8650,12 +8650,12 @@
 
 			mcum0-pins {
 				rockchip,pins = <0x4 0x1c 0x5 0x192 0x4 0x1d 0x5 0x192>;
-				phandle = <0x38a>;
+				phandle = <0x392>;
 			};
 
 			mcum1-pins {
 				rockchip,pins = <0x3 0x1c 0x6 0x192 0x3 0x1d 0x6 0x192>;
-				phandle = <0x38b>;
+				phandle = <0x393>;
 			};
 		};
 
@@ -8663,62 +8663,62 @@
 
 			mipim0-camera0-clk {
 				rockchip,pins = <0x4 0x9 0x1 0x192>;
-				phandle = <0x38c>;
+				phandle = <0x394>;
 			};
 
 			mipim0-camera1-clk {
 				rockchip,pins = <0x1 0xe 0x2 0x192>;
-				phandle = <0x38d>;
+				phandle = <0x395>;
 			};
 
 			mipim0-camera2-clk {
 				rockchip,pins = <0x1 0xf 0x2 0x192>;
-				phandle = <0x38e>;
+				phandle = <0x396>;
 			};
 
 			mipim0-camera3-clk {
 				rockchip,pins = <0x1 0x1e 0x2 0x192>;
-				phandle = <0x38f>;
+				phandle = <0x397>;
 			};
 
 			mipim0-camera4-clk {
 				rockchip,pins = <0x1 0x1f 0x2 0x192>;
-				phandle = <0x390>;
+				phandle = <0x398>;
 			};
 
 			mipim1-camera0-clk {
 				rockchip,pins = <0x3 0x5 0x4 0x192>;
-				phandle = <0x391>;
+				phandle = <0x399>;
 			};
 
 			mipim1-camera1-clk {
 				rockchip,pins = <0x3 0x6 0x4 0x192>;
-				phandle = <0x392>;
+				phandle = <0x39a>;
 			};
 
 			mipim1-camera2-clk {
 				rockchip,pins = <0x3 0x7 0x4 0x192>;
-				phandle = <0x393>;
+				phandle = <0x39b>;
 			};
 
 			mipim1-camera3-clk {
 				rockchip,pins = <0x3 0x8 0x4 0x192>;
-				phandle = <0x394>;
+				phandle = <0x39c>;
 			};
 
 			mipim1-camera4-clk {
 				rockchip,pins = <0x3 0x9 0x4 0x192>;
-				phandle = <0x395>;
+				phandle = <0x39d>;
 			};
 
 			mipi-te0 {
 				rockchip,pins = <0x3 0x12 0x2 0x192>;
-				phandle = <0x396>;
+				phandle = <0x39e>;
 			};
 
 			mipi-te1 {
 				rockchip,pins = <0x3 0x13 0x2 0x192>;
-				phandle = <0x397>;
+				phandle = <0x39f>;
 			};
 		};
 
@@ -8726,7 +8726,7 @@
 
 			npu-pins {
 				rockchip,pins = <0x0 0x16 0x2 0x192>;
-				phandle = <0x398>;
+				phandle = <0x3a0>;
 			};
 		};
 
@@ -8734,17 +8734,17 @@
 
 			pcie20x1m0-pins {
 				rockchip,pins = <0x3 0x17 0x4 0x192 0x3 0x19 0x4 0x192 0x3 0x18 0x4 0x192>;
-				phandle = <0x399>;
+				phandle = <0x3a1>;
 			};
 
 			pcie20x1m1-pins {
 				rockchip,pins = <0x4 0xf 0x4 0x192 0x4 0x11 0x4 0x192 0x4 0x10 0x4 0x192>;
-				phandle = <0x39a>;
+				phandle = <0x3a2>;
 			};
 
 			pcie20x1-2-button-rstn {
 				rockchip,pins = <0x4 0xb 0x4 0x192>;
-				phandle = <0x39b>;
+				phandle = <0x3a3>;
 			};
 		};
 
@@ -8752,7 +8752,7 @@
 
 			pcie30phy-pins {
 				rockchip,pins = <0x1 0x14 0x4 0x192 0x1 0x19 0x4 0x192>;
-				phandle = <0x39c>;
+				phandle = <0x3a4>;
 			};
 		};
 
@@ -8760,27 +8760,27 @@
 
 			pcie30x1m0-pins {
 				rockchip,pins = <0x0 0x10 0xc 0x192 0x0 0x15 0xc 0x192 0x0 0x14 0xc 0x192 0x0 0xd 0xc 0x192 0x0 0xf 0xc 0x192 0x0 0xe 0xc 0x192>;
-				phandle = <0x39d>;
+				phandle = <0x3a5>;
 			};
 
 			pcie30x1m1-pins {
 				rockchip,pins = <0x4 0x3 0x4 0x192 0x4 0x5 0x4 0x192 0x4 0x4 0x4 0x192 0x4 0x0 0x4 0x192 0x4 0x2 0x4 0x192 0x4 0x1 0x4 0x192>;
-				phandle = <0x39e>;
+				phandle = <0x3a6>;
 			};
 
 			pcie30x1m2-pins {
 				rockchip,pins = <0x1 0xd 0x4 0x192 0x1 0xc 0x4 0x192 0x1 0xb 0x4 0x192 0x1 0x0 0x4 0x192 0x1 0x7 0x4 0x192 0x1 0x1 0x4 0x192>;
-				phandle = <0x39f>;
+				phandle = <0x3a7>;
 			};
 
 			pcie30x1-0-button-rstn {
 				rockchip,pins = <0x4 0x9 0x4 0x192>;
-				phandle = <0x3a0>;
+				phandle = <0x3a8>;
 			};
 
 			pcie30x1-1-button-rstn {
 				rockchip,pins = <0x4 0xa 0x4 0x192>;
-				phandle = <0x3a1>;
+				phandle = <0x3a9>;
 			};
 		};
 
@@ -8788,27 +8788,27 @@
 
 			pcie30x2m0-pins {
 				rockchip,pins = <0x0 0x19 0xc 0x192 0x0 0x1c 0xc 0x192 0x0 0x1a 0xc 0x192>;
-				phandle = <0x3a2>;
+				phandle = <0x3aa>;
 			};
 
 			pcie30x2m1-pins {
 				rockchip,pins = <0x4 0x6 0x4 0x192 0x4 0x8 0x4 0x192 0x4 0x7 0x4 0x192>;
-				phandle = <0x3a3>;
+				phandle = <0x3ab>;
 			};
 
 			pcie30x2m2-pins {
 				rockchip,pins = <0x3 0x1a 0x4 0x192 0x3 0x1c 0x4 0x192 0x3 0x1b 0x4 0x192>;
-				phandle = <0x3a4>;
+				phandle = <0x3ac>;
 			};
 
 			pcie30x2m3-pins {
 				rockchip,pins = <0x1 0x1f 0x4 0x192 0x1 0xf 0x4 0x192 0x1 0xe 0x4 0x192>;
-				phandle = <0x3a5>;
+				phandle = <0x3ad>;
 			};
 
 			pcie30x2-button-rstn {
 				rockchip,pins = <0x3 0x11 0x4 0x192>;
-				phandle = <0x3a6>;
+				phandle = <0x3ae>;
 			};
 		};
 
@@ -8816,27 +8816,27 @@
 
 			pcie30x4m0-pins {
 				rockchip,pins = <0x0 0x16 0xc 0x192 0x0 0x18 0xc 0x192 0x0 0x17 0xc 0x192>;
-				phandle = <0x3a7>;
+				phandle = <0x3af>;
 			};
 
 			pcie30x4m1-pins {
 				rockchip,pins = <0x4 0xc 0x4 0x192 0x4 0xe 0x4 0x192 0x4 0xd 0x4 0x192>;
-				phandle = <0x3a8>;
+				phandle = <0x3b0>;
 			};
 
 			pcie30x4m2-pins {
 				rockchip,pins = <0x3 0x14 0x4 0x192 0x3 0x16 0x4 0x192 0x3 0x15 0x4 0x192>;
-				phandle = <0x3a9>;
+				phandle = <0x3b1>;
 			};
 
 			pcie30x4m3-pins {
 				rockchip,pins = <0x1 0x8 0x4 0x192 0x1 0xa 0x4 0x192 0x1 0x9 0x4 0x192>;
-				phandle = <0x3aa>;
+				phandle = <0x3b2>;
 			};
 
 			pcie30x4-button-rstn {
 				rockchip,pins = <0x3 0x1d 0x4 0x192>;
-				phandle = <0x3ab>;
+				phandle = <0x3b3>;
 			};
 
 			pcie30x4-clkreqn-m1 {
@@ -8884,37 +8884,37 @@
 
 			pdm0m1-clk {
 				rockchip,pins = <0x0 0x10 0x2 0x192>;
-				phandle = <0x3ac>;
+				phandle = <0x3b4>;
 			};
 
 			pdm0m1-clk1 {
 				rockchip,pins = <0x0 0x14 0x2 0x192>;
-				phandle = <0x3ad>;
+				phandle = <0x3b5>;
 			};
 
 			pdm0m1-idle {
 				rockchip,pins = <0x0 0x10 0x0 0x192 0x0 0x14 0x0 0x192>;
-				phandle = <0x3ae>;
+				phandle = <0x3b6>;
 			};
 
 			pdm0m1-sdi0 {
 				rockchip,pins = <0x0 0x17 0x2 0x192>;
-				phandle = <0x3af>;
+				phandle = <0x3b7>;
 			};
 
 			pdm0m1-sdi1 {
 				rockchip,pins = <0x0 0x18 0x2 0x192>;
-				phandle = <0x3b0>;
+				phandle = <0x3b8>;
 			};
 
 			pdm0m1-sdi2 {
 				rockchip,pins = <0x0 0x1c 0x2 0x192>;
-				phandle = <0x3b1>;
+				phandle = <0x3b9>;
 			};
 
 			pdm0m1-sdi3 {
 				rockchip,pins = <0x0 0x1e 0x2 0x192>;
-				phandle = <0x3b2>;
+				phandle = <0x3ba>;
 			};
 		};
 
@@ -8957,37 +8957,37 @@
 
 			pdm1m1-clk {
 				rockchip,pins = <0x1 0xc 0x2 0x192>;
-				phandle = <0x3b3>;
+				phandle = <0x3bb>;
 			};
 
 			pdm1m1-clk1 {
 				rockchip,pins = <0x1 0xb 0x2 0x192>;
-				phandle = <0x3b4>;
+				phandle = <0x3bc>;
 			};
 
 			pdm1m1-idle {
 				rockchip,pins = <0x1 0xc 0x0 0x192 0x1 0xb 0x0 0x192>;
-				phandle = <0x3b5>;
+				phandle = <0x3bd>;
 			};
 
 			pdm1m1-sdi0 {
 				rockchip,pins = <0x1 0x7 0x2 0x192>;
-				phandle = <0x3b6>;
+				phandle = <0x3be>;
 			};
 
 			pdm1m1-sdi1 {
 				rockchip,pins = <0x1 0x8 0x2 0x192>;
-				phandle = <0x3b7>;
+				phandle = <0x3bf>;
 			};
 
 			pdm1m1-sdi2 {
 				rockchip,pins = <0x1 0x9 0x2 0x192>;
-				phandle = <0x3b8>;
+				phandle = <0x3c0>;
 			};
 
 			pdm1m1-sdi3 {
 				rockchip,pins = <0x1 0xa 0x2 0x192>;
-				phandle = <0x3b9>;
+				phandle = <0x3c1>;
 			};
 		};
 
@@ -9003,7 +9003,7 @@
 
 			pmu-pins {
 				rockchip,pins = <0x0 0x5 0x3 0x192>;
-				phandle = <0x3ba>;
+				phandle = <0x3c2>;
 			};
 		};
 
@@ -9016,12 +9016,12 @@
 
 			pwm0m1-pins {
 				rockchip,pins = <0x1 0x1a 0xb 0x192>;
-				phandle = <0x3bb>;
+				phandle = <0x3c3>;
 			};
 
 			pwm0m2-pins {
 				rockchip,pins = <0x1 0x2 0xb 0x192>;
-				phandle = <0x3bc>;
+				phandle = <0x3c4>;
 			};
 		};
 
@@ -9034,12 +9034,12 @@
 
 			pwm1m1-pins {
 				rockchip,pins = <0x1 0x1b 0xb 0x192>;
-				phandle = <0x3bd>;
+				phandle = <0x3c5>;
 			};
 
 			pwm1m2-pins {
 				rockchip,pins = <0x1 0x3 0xb 0x192>;
-				phandle = <0x3be>;
+				phandle = <0x3c6>;
 			};
 		};
 
@@ -9052,12 +9052,12 @@
 
 			pwm2m1-pins {
 				rockchip,pins = <0x3 0x9 0xb 0x192>;
-				phandle = <0x3bf>;
+				phandle = <0x3c7>;
 			};
 
 			pwm2m2-pins {
 				rockchip,pins = <0x4 0x12 0xb 0x192>;
-				phandle = <0x3c0>;
+				phandle = <0x3c8>;
 			};
 		};
 
@@ -9065,7 +9065,7 @@
 
 			pwm3m0-pins {
 				rockchip,pins = <0x0 0x1c 0x3 0x192>;
-				phandle = <0x3c1>;
+				phandle = <0x3c9>;
 			};
 
 			pwm3m1-pins {
@@ -9075,12 +9075,12 @@
 
 			pwm3m2-pins {
 				rockchip,pins = <0x1 0x12 0xb 0x192>;
-				phandle = <0x3c2>;
+				phandle = <0x3ca>;
 			};
 
 			pwm3m3-pins {
 				rockchip,pins = <0x1 0x7 0xb 0x192>;
-				phandle = <0x3c3>;
+				phandle = <0x3cb>;
 			};
 		};
 
@@ -9093,7 +9093,7 @@
 
 			pwm4m1-pins {
 				rockchip,pins = <0x4 0x13 0xb 0x192>;
-				phandle = <0x3c4>;
+				phandle = <0x3cc>;
 			};
 		};
 
@@ -9106,12 +9106,12 @@
 
 			pwm5m1-pins {
 				rockchip,pins = <0x0 0x16 0xb 0x192>;
-				phandle = <0x3c5>;
+				phandle = <0x3cd>;
 			};
 
 			pwm5m2-pins {
 				rockchip,pins = <0x4 0x14 0xb 0x192>;
-				phandle = <0x3c6>;
+				phandle = <0x3ce>;
 			};
 		};
 
@@ -9124,12 +9124,12 @@
 
 			pwm6m1-pins {
 				rockchip,pins = <0x4 0x11 0xb 0x192>;
-				phandle = <0x3c7>;
+				phandle = <0x3cf>;
 			};
 
 			pwm6m2-pins {
 				rockchip,pins = <0x4 0x15 0xb 0x192>;
-				phandle = <0x3c8>;
+				phandle = <0x3d0>;
 			};
 		};
 
@@ -9142,17 +9142,17 @@
 
 			pwm7m1-pins {
 				rockchip,pins = <0x4 0x1c 0xb 0x192>;
-				phandle = <0x3c9>;
+				phandle = <0x3d1>;
 			};
 
 			pwm7m2-pins {
 				rockchip,pins = <0x1 0x13 0xb 0x192>;
-				phandle = <0x3ca>;
+				phandle = <0x3d2>;
 			};
 
 			pwm7m3-pins {
 				rockchip,pins = <0x4 0x16 0xb 0x192>;
-				phandle = <0x3cb>;
+				phandle = <0x3d3>;
 			};
 		};
 
@@ -9165,12 +9165,12 @@
 
 			pwm8m1-pins {
 				rockchip,pins = <0x4 0x18 0xb 0x192>;
-				phandle = <0x3cc>;
+				phandle = <0x3d4>;
 			};
 
 			pwm8m2-pins {
 				rockchip,pins = <0x3 0x18 0xb 0x192>;
-				phandle = <0x3cd>;
+				phandle = <0x3d5>;
 			};
 		};
 
@@ -9183,12 +9183,12 @@
 
 			pwm9m1-pins {
 				rockchip,pins = <0x4 0x19 0xb 0x192>;
-				phandle = <0x3ce>;
+				phandle = <0x3d6>;
 			};
 
 			pwm9m2-pins {
 				rockchip,pins = <0x3 0x19 0xb 0x192>;
-				phandle = <0x3cf>;
+				phandle = <0x3d7>;
 			};
 		};
 
@@ -9196,12 +9196,12 @@
 
 			pwm10m0-pins {
 				rockchip,pins = <0x3 0x0 0xb 0x192>;
-				phandle = <0x3d0>;
+				phandle = <0x3d8>;
 			};
 
 			pwm10m1-pins {
 				rockchip,pins = <0x4 0x1b 0xb 0x192>;
-				phandle = <0x3d1>;
+				phandle = <0x3d9>;
 			};
 
 			pwm10m2-pins {
@@ -9214,17 +9214,17 @@
 
 			pwm11m0-pins {
 				rockchip,pins = <0x3 0x1 0xb 0x192>;
-				phandle = <0x3d2>;
+				phandle = <0x3da>;
 			};
 
 			pwm11m1-pins {
 				rockchip,pins = <0x4 0xc 0xb 0x192>;
-				phandle = <0x3d3>;
+				phandle = <0x3db>;
 			};
 
 			pwm11m2-pins {
 				rockchip,pins = <0x1 0x14 0xb 0x192>;
-				phandle = <0x3d4>;
+				phandle = <0x3dc>;
 			};
 
 			pwm11m3-pins {
@@ -9242,7 +9242,7 @@
 
 			pwm12m1-pins {
 				rockchip,pins = <0x4 0xd 0xb 0x192>;
-				phandle = <0x3d5>;
+				phandle = <0x3dd>;
 			};
 		};
 
@@ -9255,12 +9255,12 @@
 
 			pwm13m1-pins {
 				rockchip,pins = <0x4 0xe 0xb 0x192>;
-				phandle = <0x3d6>;
+				phandle = <0x3de>;
 			};
 
 			pwm13m2-pins {
 				rockchip,pins = <0x1 0xf 0xb 0x192>;
-				phandle = <0x3d7>;
+				phandle = <0x3df>;
 			};
 		};
 
@@ -9273,12 +9273,12 @@
 
 			pwm14m1-pins {
 				rockchip,pins = <0x4 0xa 0xb 0x192>;
-				phandle = <0x3d8>;
+				phandle = <0x3e0>;
 			};
 
 			pwm14m2-pins {
 				rockchip,pins = <0x1 0x1e 0xb 0x192>;
-				phandle = <0x3d9>;
+				phandle = <0x3e1>;
 			};
 		};
 
@@ -9291,17 +9291,17 @@
 
 			pwm15m1-pins {
 				rockchip,pins = <0x4 0xb 0xb 0x192>;
-				phandle = <0x3da>;
+				phandle = <0x3e2>;
 			};
 
 			pwm15m2-pins {
 				rockchip,pins = <0x1 0x16 0xb 0x192>;
-				phandle = <0x3db>;
+				phandle = <0x3e3>;
 			};
 
 			pwm15m3-pins {
 				rockchip,pins = <0x1 0x1f 0xb 0x192>;
-				phandle = <0x3dc>;
+				phandle = <0x3e4>;
 			};
 		};
 
@@ -9309,7 +9309,7 @@
 
 			refclk-pins {
 				rockchip,pins = <0x0 0x0 0x1 0x192>;
-				phandle = <0x3dd>;
+				phandle = <0x3e5>;
 			};
 		};
 
@@ -9317,7 +9317,7 @@
 
 			sata-pins {
 				rockchip,pins = <0x0 0x16 0xd 0x192 0x0 0x1c 0xd 0x192 0x0 0x1d 0xd 0x192>;
-				phandle = <0x3de>;
+				phandle = <0x3e6>;
 			};
 		};
 
@@ -9325,12 +9325,12 @@
 
 			sata0m0-pins {
 				rockchip,pins = <0x4 0xe 0x6 0x192>;
-				phandle = <0x3df>;
+				phandle = <0x3e7>;
 			};
 
 			sata0m1-pins {
 				rockchip,pins = <0x1 0xb 0x6 0x192>;
-				phandle = <0x3e0>;
+				phandle = <0x3e8>;
 			};
 		};
 
@@ -9338,12 +9338,12 @@
 
 			sata1m0-pins {
 				rockchip,pins = <0x4 0xd 0x6 0x192>;
-				phandle = <0x3e1>;
+				phandle = <0x3e9>;
 			};
 
 			sata1m1-pins {
 				rockchip,pins = <0x1 0x1 0x6 0x192>;
-				phandle = <0x3e2>;
+				phandle = <0x3ea>;
 			};
 		};
 
@@ -9351,12 +9351,12 @@
 
 			sata2m0-pins {
 				rockchip,pins = <0x4 0x9 0x6 0x192>;
-				phandle = <0x3e3>;
+				phandle = <0x3eb>;
 			};
 
 			sata2m1-pins {
 				rockchip,pins = <0x1 0xf 0x6 0x192>;
-				phandle = <0x3e4>;
+				phandle = <0x3ec>;
 			};
 		};
 
@@ -9364,7 +9364,7 @@
 
 			sdiom1-pins {
 				rockchip,pins = <0x3 0x5 0x2 0x192 0x3 0x4 0x2 0x197 0x3 0x0 0x2 0x197 0x3 0x1 0x2 0x197 0x3 0x2 0x2 0x197 0x3 0x3 0x2 0x197>;
-				phandle = <0x3e5>;
+				phandle = <0x3ed>;
 			};
 
 			sdiom0-pins {
@@ -9397,7 +9397,7 @@
 
 			sdmmc-pwren {
 				rockchip,pins = <0x0 0x5 0x2 0x192>;
-				phandle = <0x3e6>;
+				phandle = <0x3ee>;
 			};
 		};
 
@@ -9410,7 +9410,7 @@
 
 			spdif0m1-tx {
 				rockchip,pins = <0x4 0xc 0x6 0x192>;
-				phandle = <0x3e7>;
+				phandle = <0x3ef>;
 			};
 		};
 
@@ -9423,12 +9423,12 @@
 
 			spdif1m1-tx {
 				rockchip,pins = <0x4 0x9 0x2 0x192>;
-				phandle = <0x3e8>;
+				phandle = <0x3f0>;
 			};
 
 			spdif1m2-tx {
 				rockchip,pins = <0x4 0x11 0x3 0x192>;
-				phandle = <0x3e9>;
+				phandle = <0x3f1>;
 			};
 		};
 
@@ -9451,47 +9451,47 @@
 
 			spi0m1-pins {
 				rockchip,pins = <0x4 0x2 0x8 0x199 0x4 0x0 0x8 0x199 0x4 0x1 0x8 0x199>;
-				phandle = <0x3ea>;
+				phandle = <0x3f2>;
 			};
 
 			spi0m1-cs0 {
 				rockchip,pins = <0x4 0xa 0x8 0x199>;
-				phandle = <0x3eb>;
+				phandle = <0x3f3>;
 			};
 
 			spi0m1-cs1 {
 				rockchip,pins = <0x4 0x9 0x8 0x199>;
-				phandle = <0x3ec>;
+				phandle = <0x3f4>;
 			};
 
 			spi0m2-pins {
 				rockchip,pins = <0x1 0xb 0x8 0x199 0x1 0x9 0x8 0x199 0x1 0xa 0x8 0x199>;
-				phandle = <0x3ed>;
+				phandle = <0x3f5>;
 			};
 
 			spi0m2-cs0 {
 				rockchip,pins = <0x1 0xc 0x8 0x199>;
-				phandle = <0x3ee>;
+				phandle = <0x3f6>;
 			};
 
 			spi0m2-cs1 {
 				rockchip,pins = <0x1 0xd 0x8 0x199>;
-				phandle = <0x3ef>;
+				phandle = <0x3f7>;
 			};
 
 			spi0m3-pins {
 				rockchip,pins = <0x3 0x1b 0x8 0x199 0x3 0x19 0x8 0x199 0x3 0x1a 0x8 0x199>;
-				phandle = <0x3f0>;
+				phandle = <0x3f8>;
 			};
 
 			spi0m3-cs0 {
 				rockchip,pins = <0x3 0x1c 0x8 0x199>;
-				phandle = <0x3f1>;
+				phandle = <0x3f9>;
 			};
 
 			spi0m3-cs1 {
 				rockchip,pins = <0x3 0x1d 0x8 0x199>;
-				phandle = <0x3f2>;
+				phandle = <0x3fa>;
 			};
 		};
 
@@ -9514,32 +9514,32 @@
 
 			spi1m2-pins {
 				rockchip,pins = <0x1 0x1a 0x8 0x199 0x1 0x18 0x8 0x199 0x1 0x19 0x8 0x199>;
-				phandle = <0x3f3>;
+				phandle = <0x3fb>;
 			};
 
 			spi1m2-cs0 {
 				rockchip,pins = <0x1 0x1b 0x8 0x199>;
-				phandle = <0x3f4>;
+				phandle = <0x3fc>;
 			};
 
 			spi1m2-cs1 {
 				rockchip,pins = <0x1 0x1d 0x8 0x199>;
-				phandle = <0x3f5>;
+				phandle = <0x3fd>;
 			};
 
 			spi1m0-pins {
 				rockchip,pins = <0x2 0x10 0x8 0x19a 0x2 0x11 0x8 0x19a 0x2 0x12 0x8 0x19a>;
-				phandle = <0x3f6>;
+				phandle = <0x3fe>;
 			};
 
 			spi1m0-cs0 {
 				rockchip,pins = <0x2 0x13 0x8 0x19a>;
-				phandle = <0x3f7>;
+				phandle = <0x3ff>;
 			};
 
 			spi1m0-cs1 {
 				rockchip,pins = <0x2 0x14 0x8 0x19a>;
-				phandle = <0x3f8>;
+				phandle = <0x400>;
 			};
 		};
 
@@ -9547,32 +9547,32 @@
 
 			spi2m0-pins {
 				rockchip,pins = <0x1 0x6 0x8 0x199 0x1 0x4 0x8 0x199 0x1 0x5 0x8 0x199>;
-				phandle = <0x3f9>;
+				phandle = <0x401>;
 			};
 
 			spi2m0-cs0 {
 				rockchip,pins = <0x1 0x7 0x8 0x199>;
-				phandle = <0x3fa>;
+				phandle = <0x402>;
 			};
 
 			spi2m0-cs1 {
 				rockchip,pins = <0x1 0x8 0x8 0x199>;
-				phandle = <0x3fb>;
+				phandle = <0x403>;
 			};
 
 			spi2m1-pins {
 				rockchip,pins = <0x4 0x6 0x8 0x199 0x4 0x4 0x8 0x199 0x4 0x5 0x8 0x199>;
-				phandle = <0x3fc>;
+				phandle = <0x404>;
 			};
 
 			spi2m1-cs0 {
 				rockchip,pins = <0x4 0x7 0x8 0x199>;
-				phandle = <0x3fd>;
+				phandle = <0x405>;
 			};
 
 			spi2m1-cs1 {
 				rockchip,pins = <0x4 0x8 0x8 0x199>;
-				phandle = <0x3fe>;
+				phandle = <0x406>;
 			};
 
 			spi2m2-pins {
@@ -9587,7 +9587,7 @@
 
 			spi2m2-cs1 {
 				rockchip,pins = <0x0 0x8 0x1 0x19a>;
-				phandle = <0x3ff>;
+				phandle = <0x407>;
 			};
 		};
 
@@ -9610,47 +9610,47 @@
 
 			spi3m2-pins {
 				rockchip,pins = <0x0 0x1b 0x8 0x199 0x0 0x18 0x8 0x199 0x0 0x1a 0x8 0x199>;
-				phandle = <0x400>;
+				phandle = <0x408>;
 			};
 
 			spi3m2-cs0 {
 				rockchip,pins = <0x0 0x1c 0x8 0x199>;
-				phandle = <0x401>;
+				phandle = <0x409>;
 			};
 
 			spi3m2-cs1 {
 				rockchip,pins = <0x0 0x1d 0x8 0x199>;
-				phandle = <0x402>;
+				phandle = <0x40a>;
 			};
 
 			spi3m3-pins {
 				rockchip,pins = <0x3 0x18 0x8 0x199 0x3 0x16 0x8 0x199 0x3 0x17 0x8 0x199>;
-				phandle = <0x403>;
+				phandle = <0x40b>;
 			};
 
 			spi3m3-cs0 {
 				rockchip,pins = <0x3 0x14 0x8 0x199>;
-				phandle = <0x404>;
+				phandle = <0x40c>;
 			};
 
 			spi3m3-cs1 {
 				rockchip,pins = <0x3 0x15 0x8 0x199>;
-				phandle = <0x405>;
+				phandle = <0x40d>;
 			};
 
 			spi3m0-pins {
 				rockchip,pins = <0x4 0x16 0x8 0x19a 0x4 0x14 0x8 0x19a 0x4 0x15 0x8 0x19a>;
-				phandle = <0x406>;
+				phandle = <0x40e>;
 			};
 
 			spi3m0-cs0 {
 				rockchip,pins = <0x4 0x12 0x8 0x19a>;
-				phandle = <0x407>;
+				phandle = <0x40f>;
 			};
 
 			spi3m0-cs1 {
 				rockchip,pins = <0x4 0x13 0x8 0x19a>;
-				phandle = <0x408>;
+				phandle = <0x410>;
 			};
 		};
 
@@ -9673,27 +9673,27 @@
 
 			spi4m1-pins {
 				rockchip,pins = <0x3 0x2 0x8 0x199 0x3 0x0 0x8 0x199 0x3 0x1 0x8 0x199>;
-				phandle = <0x409>;
+				phandle = <0x411>;
 			};
 
 			spi4m1-cs0 {
 				rockchip,pins = <0x3 0x3 0x8 0x199>;
-				phandle = <0x40a>;
+				phandle = <0x412>;
 			};
 
 			spi4m1-cs1 {
 				rockchip,pins = <0x3 0x4 0x8 0x199>;
-				phandle = <0x40b>;
+				phandle = <0x413>;
 			};
 
 			spi4m2-pins {
 				rockchip,pins = <0x1 0x2 0x8 0x199 0x1 0x0 0x8 0x199 0x1 0x1 0x8 0x199>;
-				phandle = <0x40c>;
+				phandle = <0x414>;
 			};
 
 			spi4m2-cs0 {
 				rockchip,pins = <0x1 0x3 0x8 0x199>;
-				phandle = <0x40d>;
+				phandle = <0x415>;
 			};
 		};
 
@@ -9701,7 +9701,7 @@
 
 			tsadcm1-shut {
 				rockchip,pins = <0x0 0x2 0x2 0x192>;
-				phandle = <0x40e>;
+				phandle = <0x416>;
 			};
 
 			tsadc-shut {
@@ -9711,7 +9711,7 @@
 
 			tsadc-shut-org {
 				rockchip,pins = <0x0 0x1 0x1 0x192>;
-				phandle = <0x40f>;
+				phandle = <0x417>;
 			};
 		};
 
@@ -9719,7 +9719,7 @@
 
 			uart0m0-xfer {
 				rockchip,pins = <0x0 0x14 0x4 0x197 0x0 0x15 0x4 0x197>;
-				phandle = <0x410>;
+				phandle = <0x418>;
 			};
 
 			uart0m1-xfer {
@@ -9729,17 +9729,17 @@
 
 			uart0m2-xfer {
 				rockchip,pins = <0x4 0x4 0xa 0x197 0x4 0x3 0xa 0x197>;
-				phandle = <0x411>;
+				phandle = <0x419>;
 			};
 
 			uart0-ctsn {
 				rockchip,pins = <0x0 0x19 0x4 0x192>;
-				phandle = <0x412>;
+				phandle = <0x41a>;
 			};
 
 			uart0-rtsn {
 				rockchip,pins = <0x0 0x16 0x4 0x192>;
-				phandle = <0x413>;
+				phandle = <0x41b>;
 			};
 		};
 
@@ -9747,32 +9747,32 @@
 
 			uart1m1-xfer {
 				rockchip,pins = <0x1 0xf 0xa 0x197 0x1 0xe 0xa 0x197>;
-				phandle = <0x414>;
+				phandle = <0x41c>;
 			};
 
 			uart1m1-ctsn {
 				rockchip,pins = <0x1 0x1f 0xa 0x192>;
-				phandle = <0x415>;
+				phandle = <0x41d>;
 			};
 
 			uart1m1-rtsn {
 				rockchip,pins = <0x1 0x1e 0xa 0x192>;
-				phandle = <0x416>;
+				phandle = <0x41e>;
 			};
 
 			uart1m2-xfer {
 				rockchip,pins = <0x0 0x1a 0xa 0x197 0x0 0x19 0xa 0x197>;
-				phandle = <0x417>;
+				phandle = <0x41f>;
 			};
 
 			uart1m2-ctsn {
 				rockchip,pins = <0x0 0x18 0xa 0x192>;
-				phandle = <0x418>;
+				phandle = <0x420>;
 			};
 
 			uart1m2-rtsn {
 				rockchip,pins = <0x0 0x17 0xa 0x192>;
-				phandle = <0x419>;
+				phandle = <0x421>;
 			};
 
 			uart1m0-xfer {
@@ -9782,12 +9782,12 @@
 
 			uart1m0-ctsn {
 				rockchip,pins = <0x2 0x11 0xa 0x192>;
-				phandle = <0x41a>;
+				phandle = <0x422>;
 			};
 
 			uart1m0-rtsn {
 				rockchip,pins = <0x2 0x10 0xa 0x192>;
-				phandle = <0x41b>;
+				phandle = <0x423>;
 			};
 		};
 
@@ -9795,7 +9795,7 @@
 
 			uart2m0-xfer {
 				rockchip,pins = <0x0 0xe 0xa 0x197 0x0 0xd 0xa 0x197>;
-				phandle = <0x1eb>;
+				phandle = <0x1f3>;
 			};
 
 			uart2m1-xfer {
@@ -9805,17 +9805,17 @@
 
 			uart2m2-xfer {
 				rockchip,pins = <0x3 0xa 0xa 0x197 0x3 0x9 0xa 0x197>;
-				phandle = <0x41c>;
+				phandle = <0x424>;
 			};
 
 			uart2-ctsn {
 				rockchip,pins = <0x3 0xc 0xa 0x192>;
-				phandle = <0x41d>;
+				phandle = <0x425>;
 			};
 
 			uart2-rtsn {
 				rockchip,pins = <0x3 0xb 0xa 0x192>;
-				phandle = <0x41e>;
+				phandle = <0x426>;
 			};
 		};
 
@@ -9823,7 +9823,7 @@
 
 			uart3m0-xfer {
 				rockchip,pins = <0x1 0x10 0xa 0x197 0x1 0x11 0xa 0x197>;
-				phandle = <0x41f>;
+				phandle = <0x427>;
 			};
 
 			uart3m1-xfer {
@@ -9833,17 +9833,17 @@
 
 			uart3m2-xfer {
 				rockchip,pins = <0x4 0x6 0xa 0x197 0x4 0x5 0xa 0x197>;
-				phandle = <0x420>;
+				phandle = <0x428>;
 			};
 
 			uart3-ctsn {
 				rockchip,pins = <0x1 0x13 0xa 0x192>;
-				phandle = <0x421>;
+				phandle = <0x429>;
 			};
 
 			uart3-rtsn {
 				rockchip,pins = <0x1 0x12 0xa 0x192>;
-				phandle = <0x422>;
+				phandle = <0x42a>;
 			};
 		};
 
@@ -9856,22 +9856,22 @@
 
 			uart4m1-xfer {
 				rockchip,pins = <0x3 0x18 0xa 0x197 0x3 0x19 0xa 0x197>;
-				phandle = <0x423>;
+				phandle = <0x42b>;
 			};
 
 			uart4m2-xfer {
 				rockchip,pins = <0x1 0xa 0xa 0x197 0x1 0xb 0xa 0x197>;
-				phandle = <0x424>;
+				phandle = <0x42c>;
 			};
 
 			uart4-ctsn {
 				rockchip,pins = <0x1 0x17 0xa 0x192>;
-				phandle = <0x425>;
+				phandle = <0x42d>;
 			};
 
 			uart4-rtsn {
 				rockchip,pins = <0x1 0x15 0xa 0x192>;
-				phandle = <0x426>;
+				phandle = <0x42e>;
 			};
 		};
 
@@ -9879,17 +9879,17 @@
 
 			uart5m0-xfer {
 				rockchip,pins = <0x4 0x1c 0xa 0x197 0x4 0x1d 0xa 0x197>;
-				phandle = <0x427>;
+				phandle = <0x42f>;
 			};
 
 			uart5m0-ctsn {
 				rockchip,pins = <0x4 0x1a 0xa 0x192>;
-				phandle = <0x428>;
+				phandle = <0x430>;
 			};
 
 			uart5m0-rtsn {
 				rockchip,pins = <0x4 0x1b 0xa 0x192>;
-				phandle = <0x429>;
+				phandle = <0x431>;
 			};
 
 			uart5m1-xfer {
@@ -9899,17 +9899,17 @@
 
 			uart5m1-ctsn {
 				rockchip,pins = <0x2 0x2 0xa 0x192>;
-				phandle = <0x42a>;
+				phandle = <0x432>;
 			};
 
 			uart5m1-rtsn {
 				rockchip,pins = <0x2 0x3 0xa 0x192>;
-				phandle = <0x42b>;
+				phandle = <0x433>;
 			};
 
 			uart5m2-xfer {
 				rockchip,pins = <0x2 0x1c 0xa 0x197 0x2 0x1d 0xa 0x197>;
-				phandle = <0x42c>;
+				phandle = <0x434>;
 			};
 		};
 
@@ -9917,17 +9917,17 @@
 
 			uart6m1-xfer {
 				rockchip,pins = <0x1 0x0 0xa 0x197 0x1 0x1 0xa 0x197>;
-				phandle = <0x42d>;
+				phandle = <0x435>;
 			};
 
 			uart6m1-ctsn {
 				rockchip,pins = <0x1 0x3 0xa 0x192>;
-				phandle = <0x42e>;
+				phandle = <0x436>;
 			};
 
 			uart6m1-rtsn {
 				rockchip,pins = <0x1 0x2 0xa 0x192>;
-				phandle = <0x42f>;
+				phandle = <0x437>;
 			};
 
 			uart6m2-xfer {
@@ -9937,17 +9937,17 @@
 
 			uart6m0-xfer {
 				rockchip,pins = <0x2 0x6 0xa 0x197 0x2 0x7 0xa 0x197>;
-				phandle = <0x430>;
+				phandle = <0x438>;
 			};
 
 			uart6m0-ctsn {
 				rockchip,pins = <0x2 0x9 0xa 0x192>;
-				phandle = <0x431>;
+				phandle = <0x439>;
 			};
 
 			uart6m0-rtsn {
 				rockchip,pins = <0x2 0x8 0xa 0x192>;
-				phandle = <0x432>;
+				phandle = <0x43a>;
 			};
 		};
 
@@ -9960,32 +9960,32 @@
 
 			uart7m1-ctsn {
 				rockchip,pins = <0x3 0x13 0xa 0x192>;
-				phandle = <0x433>;
+				phandle = <0x43b>;
 			};
 
 			uart7m1-rtsn {
 				rockchip,pins = <0x3 0x12 0xa 0x192>;
-				phandle = <0x434>;
+				phandle = <0x43c>;
 			};
 
 			uart7m2-xfer {
 				rockchip,pins = <0x1 0xc 0xa 0x197 0x1 0xd 0xa 0x197>;
-				phandle = <0x435>;
+				phandle = <0x43d>;
 			};
 
 			uart7m0-xfer {
 				rockchip,pins = <0x2 0xc 0xa 0x197 0x2 0xd 0xa 0x197>;
-				phandle = <0x436>;
+				phandle = <0x43e>;
 			};
 
 			uart7m0-ctsn {
 				rockchip,pins = <0x4 0x16 0xa 0x192>;
-				phandle = <0x437>;
+				phandle = <0x43f>;
 			};
 
 			uart7m0-rtsn {
 				rockchip,pins = <0x4 0x12 0xa 0x192>;
-				phandle = <0x438>;
+				phandle = <0x440>;
 			};
 		};
 
@@ -9993,17 +9993,17 @@
 
 			uart8m0-xfer {
 				rockchip,pins = <0x4 0x9 0xa 0x197 0x4 0x8 0xa 0x197>;
-				phandle = <0x439>;
+				phandle = <0x441>;
 			};
 
 			uart8m0-ctsn {
 				rockchip,pins = <0x4 0xb 0xa 0x192>;
-				phandle = <0x43a>;
+				phandle = <0x442>;
 			};
 
 			uart8m0-rtsn {
 				rockchip,pins = <0x4 0xa 0xa 0x192>;
-				phandle = <0x43b>;
+				phandle = <0x443>;
 			};
 
 			uart8m1-xfer {
@@ -10013,17 +10013,17 @@
 
 			uart8m1-ctsn {
 				rockchip,pins = <0x3 0x5 0xa 0x192>;
-				phandle = <0x43c>;
+				phandle = <0x444>;
 			};
 
 			uart8m1-rtsn {
 				rockchip,pins = <0x3 0x4 0xa 0x192>;
-				phandle = <0x43d>;
+				phandle = <0x445>;
 			};
 
 			uart8-xfer {
 				rockchip,pins = <0x4 0x9 0xa 0x197>;
-				phandle = <0x43e>;
+				phandle = <0x446>;
 			};
 		};
 
@@ -10031,32 +10031,32 @@
 
 			uart9m1-xfer {
 				rockchip,pins = <0x4 0xd 0xa 0x197 0x4 0xc 0xa 0x197>;
-				phandle = <0x43f>;
+				phandle = <0x447>;
 			};
 
 			uart9m1-ctsn {
 				rockchip,pins = <0x4 0x1 0xa 0x192>;
-				phandle = <0x440>;
+				phandle = <0x448>;
 			};
 
 			uart9m1-rtsn {
 				rockchip,pins = <0x4 0x0 0xa 0x192>;
-				phandle = <0x441>;
+				phandle = <0x449>;
 			};
 
 			uart9m2-xfer {
 				rockchip,pins = <0x3 0x1c 0xa 0x197 0x3 0x1d 0xa 0x197>;
-				phandle = <0x442>;
+				phandle = <0x44a>;
 			};
 
 			uart9m2-ctsn {
 				rockchip,pins = <0x3 0x1b 0xa 0x192>;
-				phandle = <0x443>;
+				phandle = <0x44b>;
 			};
 
 			uart9m2-rtsn {
 				rockchip,pins = <0x3 0x1a 0xa 0x192>;
-				phandle = <0x444>;
+				phandle = <0x44c>;
 			};
 
 			uart9m0-xfer {
@@ -10071,7 +10071,7 @@
 
 			uart9m0-rtsn {
 				rockchip,pins = <0x4 0x14 0xa 0x192>;
-				phandle = <0x1e4>;
+				phandle = <0x1ed>;
 			};
 		};
 
@@ -10079,7 +10079,7 @@
 
 			vop-pins {
 				rockchip,pins = <0x1 0x2 0x1 0x192>;
-				phandle = <0x445>;
+				phandle = <0x44d>;
 			};
 		};
 
@@ -10087,7 +10087,7 @@
 
 			bt656-pins {
 				rockchip,pins = <0x4 0x8 0x2 0x19b 0x4 0x0 0x2 0x19b 0x4 0x1 0x2 0x19b 0x4 0x2 0x2 0x19b 0x4 0x3 0x2 0x19b 0x4 0x4 0x2 0x19b 0x4 0x5 0x2 0x19b 0x4 0x6 0x2 0x19b 0x4 0x7 0x2 0x19b>;
-				phandle = <0x446>;
+				phandle = <0x44e>;
 			};
 		};
 
@@ -10102,170 +10102,170 @@
 		pcfg-pull-none-drv-level-7 {
 			bias-disable;
 			drive-strength = <0x7>;
-			phandle = <0x447>;
+			phandle = <0x44f>;
 		};
 
 		pcfg-pull-none-drv-level-8 {
 			bias-disable;
 			drive-strength = <0x8>;
-			phandle = <0x448>;
+			phandle = <0x450>;
 		};
 
 		pcfg-pull-none-drv-level-9 {
 			bias-disable;
 			drive-strength = <0x9>;
-			phandle = <0x449>;
+			phandle = <0x451>;
 		};
 
 		pcfg-pull-none-drv-level-10 {
 			bias-disable;
 			drive-strength = <0xa>;
-			phandle = <0x44a>;
+			phandle = <0x452>;
 		};
 
 		pcfg-pull-none-drv-level-11 {
 			bias-disable;
 			drive-strength = <0xb>;
-			phandle = <0x44b>;
+			phandle = <0x453>;
 		};
 
 		pcfg-pull-none-drv-level-12 {
 			bias-disable;
 			drive-strength = <0xc>;
-			phandle = <0x44c>;
+			phandle = <0x454>;
 		};
 
 		pcfg-pull-none-drv-level-13 {
 			bias-disable;
 			drive-strength = <0xd>;
-			phandle = <0x44d>;
+			phandle = <0x455>;
 		};
 
 		pcfg-pull-none-drv-level-14 {
 			bias-disable;
 			drive-strength = <0xe>;
-			phandle = <0x44e>;
+			phandle = <0x456>;
 		};
 
 		pcfg-pull-none-drv-level-15 {
 			bias-disable;
 			drive-strength = <0xf>;
-			phandle = <0x44f>;
+			phandle = <0x457>;
 		};
 
 		pcfg-pull-up-drv-level-7 {
 			bias-pull-up;
 			drive-strength = <0x7>;
-			phandle = <0x450>;
+			phandle = <0x458>;
 		};
 
 		pcfg-pull-up-drv-level-8 {
 			bias-pull-up;
 			drive-strength = <0x8>;
-			phandle = <0x451>;
+			phandle = <0x459>;
 		};
 
 		pcfg-pull-up-drv-level-9 {
 			bias-pull-up;
 			drive-strength = <0x9>;
-			phandle = <0x452>;
+			phandle = <0x45a>;
 		};
 
 		pcfg-pull-up-drv-level-10 {
 			bias-pull-up;
 			drive-strength = <0xa>;
-			phandle = <0x453>;
+			phandle = <0x45b>;
 		};
 
 		pcfg-pull-up-drv-level-11 {
 			bias-pull-up;
 			drive-strength = <0xb>;
-			phandle = <0x454>;
+			phandle = <0x45c>;
 		};
 
 		pcfg-pull-up-drv-level-12 {
 			bias-pull-up;
 			drive-strength = <0xc>;
-			phandle = <0x455>;
+			phandle = <0x45d>;
 		};
 
 		pcfg-pull-up-drv-level-13 {
 			bias-pull-up;
 			drive-strength = <0xd>;
-			phandle = <0x456>;
+			phandle = <0x45e>;
 		};
 
 		pcfg-pull-up-drv-level-14 {
 			bias-pull-up;
 			drive-strength = <0xe>;
-			phandle = <0x457>;
+			phandle = <0x45f>;
 		};
 
 		pcfg-pull-up-drv-level-15 {
 			bias-pull-up;
 			drive-strength = <0xf>;
-			phandle = <0x458>;
+			phandle = <0x460>;
 		};
 
 		pcfg-pull-down-drv-level-7 {
 			bias-pull-down;
 			drive-strength = <0x7>;
-			phandle = <0x459>;
+			phandle = <0x461>;
 		};
 
 		pcfg-pull-down-drv-level-8 {
 			bias-pull-down;
 			drive-strength = <0x8>;
-			phandle = <0x45a>;
+			phandle = <0x462>;
 		};
 
 		pcfg-pull-down-drv-level-9 {
 			bias-pull-down;
 			drive-strength = <0x9>;
-			phandle = <0x45b>;
+			phandle = <0x463>;
 		};
 
 		pcfg-pull-down-drv-level-10 {
 			bias-pull-down;
 			drive-strength = <0xa>;
-			phandle = <0x45c>;
+			phandle = <0x464>;
 		};
 
 		pcfg-pull-down-drv-level-11 {
 			bias-pull-down;
 			drive-strength = <0xb>;
-			phandle = <0x45d>;
+			phandle = <0x465>;
 		};
 
 		pcfg-pull-down-drv-level-12 {
 			bias-pull-down;
 			drive-strength = <0xc>;
-			phandle = <0x45e>;
+			phandle = <0x466>;
 		};
 
 		pcfg-pull-down-drv-level-13 {
 			bias-pull-down;
 			drive-strength = <0xd>;
-			phandle = <0x45f>;
+			phandle = <0x467>;
 		};
 
 		pcfg-pull-down-drv-level-14 {
 			bias-pull-down;
 			drive-strength = <0xe>;
-			phandle = <0x460>;
+			phandle = <0x468>;
 		};
 
 		pcfg-pull-down-drv-level-15 {
 			bias-pull-down;
 			drive-strength = <0xf>;
-			phandle = <0x461>;
+			phandle = <0x469>;
 		};
 
 		eth0 {
 
 			eth0-pins {
 				rockchip,pins = <0x2 0x13 0x1 0x192>;
-				phandle = <0x462>;
+				phandle = <0x1c4>;
 			};
 		};
 
@@ -10273,52 +10273,52 @@
 
 			gmac0-miim {
 				rockchip,pins = <0x4 0x14 0x1 0x192 0x4 0x15 0x1 0x192>;
-				phandle = <0x463>;
+				phandle = <0x1bf>;
 			};
 
 			gmac0-clkinout {
 				rockchip,pins = <0x4 0x13 0x1 0x192>;
-				phandle = <0x464>;
+				phandle = <0x1c5>;
 			};
 
 			gmac0-rx-bus2 {
 				rockchip,pins = <0x2 0x11 0x1 0x192 0x2 0x12 0x1 0x192 0x4 0x12 0x1 0x192>;
-				phandle = <0x465>;
+				phandle = <0x1c1>;
 			};
 
 			gmac0-tx-bus2 {
 				rockchip,pins = <0x2 0xe 0x1 0x192 0x2 0xf 0x1 0x192 0x2 0x10 0x1 0x192>;
-				phandle = <0x466>;
+				phandle = <0x1c0>;
 			};
 
 			gmac0-rgmii-clk {
 				rockchip,pins = <0x2 0x8 0x1 0x192 0x2 0xb 0x1 0x192>;
-				phandle = <0x467>;
+				phandle = <0x1c2>;
 			};
 
 			gmac0-rgmii-bus {
 				rockchip,pins = <0x2 0x6 0x1 0x192 0x2 0x7 0x1 0x192 0x2 0x9 0x1 0x192 0x2 0xa 0x1 0x192>;
-				phandle = <0x468>;
+				phandle = <0x1c3>;
 			};
 
 			gmac0-ppsclk {
 				rockchip,pins = <0x2 0x14 0x1 0x192>;
-				phandle = <0x469>;
+				phandle = <0x46a>;
 			};
 
 			gmac0-ppstring {
 				rockchip,pins = <0x2 0xd 0x1 0x192>;
-				phandle = <0x46a>;
+				phandle = <0x46b>;
 			};
 
 			gmac0-ptp-refclk {
 				rockchip,pins = <0x2 0xc 0x1 0x192>;
-				phandle = <0x46b>;
+				phandle = <0x46c>;
 			};
 
 			gmac0-txer {
 				rockchip,pins = <0x4 0x16 0x1 0x192>;
-				phandle = <0x46c>;
+				phandle = <0x46d>;
 			};
 		};
 
@@ -10326,17 +10326,17 @@
 
 			mipicsi0-pwr {
 				rockchip,pins = <0x1 0x1a 0x0 0x192>;
-				phandle = <0x46d>;
+				phandle = <0x46e>;
 			};
 
 			mipicsi1-pwr {
 				rockchip,pins = <0x1 0x1b 0x0 0x192>;
-				phandle = <0x46e>;
+				phandle = <0x46f>;
 			};
 
 			mipidcphy0-pwr {
 				rockchip,pins = <0x2 0x14 0x0 0x192>;
-				phandle = <0x46f>;
+				phandle = <0x470>;
 			};
 		};
 
@@ -10344,7 +10344,7 @@
 
 			vga-hpdin-l {
 				rockchip,pins = <0x3 0x6 0x0 0x192>;
-				phandle = <0x470>;
+				phandle = <0x471>;
 			};
 		};
 
@@ -10352,12 +10352,12 @@
 
 			hp-det {
 				rockchip,pins = <0x3 0x1d 0x0 0x192>;
-				phandle = <0x1e1>;
+				phandle = <0x1ea>;
 			};
 
 			spk-con {
 				rockchip,pins = <0x4 0xc 0x0 0x192>;
-				phandle = <0x1d9>;
+				phandle = <0x1e2>;
 			};
 		};
 
@@ -10373,7 +10373,7 @@
 
 			lcd-rst-gpio {
 				rockchip,pins = <0x1 0x0 0x0 0x192>;
-				phandle = <0x471>;
+				phandle = <0x472>;
 			};
 		};
 
@@ -10381,7 +10381,7 @@
 
 			wifi-enable-h {
 				rockchip,pins = <0x1 0x16 0x0 0x197>;
-				phandle = <0x1e0>;
+				phandle = <0x1e9>;
 			};
 		};
 
@@ -10397,7 +10397,7 @@
 
 			vcc5v0-host-en {
 				rockchip,pins = <0x4 0x8 0x0 0x192>;
-				phandle = <0x1e3>;
+				phandle = <0x1ec>;
 			};
 		};
 
@@ -10405,20 +10405,20 @@
 
 			uart9-gpios {
 				rockchip,pins = <0x4 0x14 0x0 0x192>;
-				phandle = <0x1e6>;
+				phandle = <0x1ef>;
 			};
 
 			bt-reset-gpio {
 				rockchip,pins = <0x0 0xa 0x0 0x192>;
-				phandle = <0x1e5>;
+				phandle = <0x1ee>;
 			};
 		};
 
 		ndj_io_init {
 
 			ndj_io_gpio_col {
-				rockchip,pins = <0x0 0x1b 0x0 0x192 0x4 0x1 0x0 0x192 0x4 0xe 0x0 0x192 0x0 0x16 0x0 0x192 0x2 0xc 0x0 0x192 0x2 0x13 0x0 0x192 0x4 0xd 0x0 0x192 0x1 0x9 0x0 0x192 0x1 0x0 0x0 0x192 0x1 0xa 0x0 0x192 0x1 0x1 0x0 0x192 0x1 0xb 0x0 0x192 0x1 0x2 0x0 0x192 0x1 0xc 0x0 0x192 0x1 0x3 0x0 0x192>;
-				phandle = <0x1e7>;
+				rockchip,pins = <0x1 0x3 0x0 0x192>;
+				phandle = <0x1f0>;
 			};
 		};
 	};
@@ -10435,28 +10435,28 @@
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x19c>;
 		status = "disabled";
-		phandle = <0x472>;
+		phandle = <0x473>;
 	};
 
 	rkcif-mipi-lvds4-sditf-vir1 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x19c>;
 		status = "disabled";
-		phandle = <0x473>;
+		phandle = <0x474>;
 	};
 
 	rkcif-mipi-lvds4-sditf-vir2 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x19c>;
 		status = "disabled";
-		phandle = <0x474>;
+		phandle = <0x475>;
 	};
 
 	rkcif-mipi-lvds4-sditf-vir3 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x19c>;
 		status = "disabled";
-		phandle = <0x475>;
+		phandle = <0x476>;
 	};
 
 	rkcif-mipi-lvds5 {
@@ -10471,28 +10471,28 @@
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x19d>;
 		status = "disabled";
-		phandle = <0x476>;
+		phandle = <0x477>;
 	};
 
 	rkcif-mipi-lvds5-sditf-vir1 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x19d>;
 		status = "disabled";
-		phandle = <0x477>;
+		phandle = <0x478>;
 	};
 
 	rkcif-mipi-lvds5-sditf-vir2 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x19d>;
 		status = "disabled";
-		phandle = <0x478>;
+		phandle = <0x479>;
 	};
 
 	rkcif-mipi-lvds5-sditf-vir3 {
 		compatible = "rockchip,rkcif-sditf";
 		rockchip,cif = <0x19d>;
 		status = "disabled";
-		phandle = <0x479>;
+		phandle = <0x47a>;
 	};
 
 	usbdrd3_1 {
@@ -10503,7 +10503,7 @@
 		#size-cells = <0x2>;
 		ranges;
 		status = "okay";
-		phandle = <0x47a>;
+		phandle = <0x47b>;
 
 		usb@fc400000 {
 			compatible = "snps,dwc3";
@@ -10521,26 +10521,26 @@
 			snps,dis-del-phy-power-chg-quirk;
 			snps,dis-tx-ipgap-linecheck-quirk;
 			status = "okay";
-			phandle = <0x47b>;
+			phandle = <0x47c>;
 		};
 	};
 
 	syscon@fd5b8000 {
 		compatible = "rockchip,pcie30-phy-grf", "syscon";
 		reg = <0x0 0xfd5b8000 0x0 0x10000>;
-		phandle = <0x1c3>;
+		phandle = <0x1cc>;
 	};
 
 	syscon@fd5c0000 {
 		compatible = "rockchip,pipe-phy-grf", "syscon";
 		reg = <0x0 0xfd5c0000 0x0 0x100>;
-		phandle = <0x1c2>;
+		phandle = <0x1cb>;
 	};
 
 	syscon@fd5cc000 {
 		compatible = "rockchip,rk3588-usbdpphy-grf", "syscon";
 		reg = <0x0 0xfd5cc000 0x0 0x4000>;
-		phandle = <0x1c0>;
+		phandle = <0x1c9>;
 	};
 
 	syscon@fd5d4000 {
@@ -10548,7 +10548,7 @@
 		reg = <0x0 0xfd5d4000 0x0 0x4000>;
 		#address-cells = <0x1>;
 		#size-cells = <0x1>;
-		phandle = <0x1bf>;
+		phandle = <0x1c8>;
 
 		usb2-phy@4000 {
 			compatible = "rockchip,rk3588-usb2phy";
@@ -10562,7 +10562,7 @@
 			#clock-cells = <0x0>;
 			rockchip,usbctrl-grf = <0x6a>;
 			status = "okay";
-			phandle = <0x1c1>;
+			phandle = <0x1ca>;
 
 			otg-port {
 				#phy-cells = <0x0>;
@@ -10576,7 +10576,7 @@
 	syscon@fd5e4000 {
 		compatible = "rockchip,rk3588-hdptxphy-grf", "syscon";
 		reg = <0x0 0xfd5e4000 0x0 0x100>;
-		phandle = <0x1be>;
+		phandle = <0x1c7>;
 	};
 
 	spdif-tx@fddb8000 {
@@ -10592,7 +10592,7 @@
 		power-domains = <0x57 0x19>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x1cf>;
+		phandle = <0x1d8>;
 	};
 
 	i2s@fddc8000 {
@@ -10611,7 +10611,7 @@
 		rockchip,playback-only;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x47c>;
+		phandle = <0x47d>;
 	};
 
 	spdif-tx@fdde8000 {
@@ -10627,7 +10627,7 @@
 		power-domains = <0x57 0x1a>;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x47d>;
+		phandle = <0x47e>;
 	};
 
 	i2s@fddf4000 {
@@ -10648,7 +10648,7 @@
 		rockchip,playback-only;
 		#sound-dai-cells = <0x0>;
 		status = "okay";
-		phandle = <0x1cb>;
+		phandle = <0x1d4>;
 	};
 
 	i2s@fddf8000 {
@@ -10667,7 +10667,7 @@
 		rockchip,capture-only;
 		#sound-dai-cells = <0x0>;
 		status = "okay";
-		phandle = <0x1db>;
+		phandle = <0x1e4>;
 	};
 
 	i2s@fde00000 {
@@ -10686,7 +10686,7 @@
 		rockchip,capture-only;
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x47e>;
+		phandle = <0x47f>;
 	};
 
 	spdif-rx@fde10000 {
@@ -10704,7 +10704,7 @@
 		reset-names = "spdifrx-m";
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x47f>;
+		phandle = <0x480>;
 	};
 
 	spdif-rx@fde18000 {
@@ -10722,7 +10722,7 @@
 		reset-names = "spdifrx-m";
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
-		phandle = <0x480>;
+		phandle = <0x481>;
 	};
 
 	dp@fde60000 {
@@ -10740,7 +10740,7 @@
 		status = "disabled";
 		pinctrl-names = "default";
 		pinctrl-0 = <0x1a1>;
-		phandle = <0x1d0>;
+		phandle = <0x1d9>;
 
 		ports {
 			#address-cells = <0x1>;
@@ -10777,7 +10777,7 @@
 				reg = <0x1>;
 
 				endpoint {
-					phandle = <0x481>;
+					phandle = <0x482>;
 				};
 			};
 		};
@@ -10802,7 +10802,7 @@
 		#sound-dai-cells = <0x0>;
 		status = "disabled";
 		enable-gpios = <0xfc 0xa 0x0>;
-		phandle = <0x1cc>;
+		phandle = <0x1d5>;
 
 		ports {
 			#address-cells = <0x1>;
@@ -10812,7 +10812,7 @@
 				reg = <0x0>;
 				#address-cells = <0x1>;
 				#size-cells = <0x0>;
-				phandle = <0x482>;
+				phandle = <0x483>;
 
 				endpoint@0 {
 					reg = <0x0>;
@@ -10852,7 +10852,7 @@
 		rockchip,grf = <0xcc>;
 		status = "okay";
 		force-hpd;
-		phandle = <0x483>;
+		phandle = <0x484>;
 
 		ports {
 			#address-cells = <0x1>;
@@ -10890,7 +10890,7 @@
 
 				endpoint {
 					remote-endpoint = <0x1af>;
-					phandle = <0x1ea>;
+					phandle = <0x1f2>;
 				};
 			};
 		};
@@ -10911,11 +10911,11 @@
 		reset-names = "rst_a", "rst_p", "rst_ref", "rst_biu";
 		pinctrl-0 = <0x1b0 0x1b1>;
 		pinctrl-names = "default";
-		status = "okay";
+		status = "disabled";
 		#sound-dai-cells = <0x1>;
 		hpd-trigger-level = <0x1>;
 		hdmirx-det-gpios = <0x1b2 0xe 0x1>;
-		phandle = <0x1da>;
+		phandle = <0x1e3>;
 	};
 
 	pcie@fe150000 {
@@ -10952,7 +10952,7 @@
 		vpcie3v3-supply = <0x105>;
 		pinctrl-names = "default";
 		pinctrl-0 = <0x1b6>;
-		phandle = <0x484>;
+		phandle = <0x485>;
 
 		legacy-interrupt-controller {
 			interrupt-controller;
@@ -10994,7 +10994,7 @@
 		reset-names = "pcie", "periph";
 		rockchip,pipe-grf = <0x6c>;
 		status = "disabled";
-		phandle = <0x485>;
+		phandle = <0x486>;
 
 		legacy-interrupt-controller {
 			interrupt-controller;
@@ -11037,7 +11037,7 @@
 		status = "okay";
 		reset-gpios = <0xfc 0x5 0x0>;
 		vpcie3v3-supply = <0x105>;
-		phandle = <0x486>;
+		phandle = <0x487>;
 
 		legacy-interrupt-controller {
 			interrupt-controller;
@@ -11054,11 +11054,11 @@
 		reg = <0x0 0xfe1b0000 0x0 0x10000>;
 		rockchip,ethernet = <0x1ba>;
 		status = "disabled";
-		phandle = <0x487>;
+		phandle = <0x488>;
 	};
 
 	ethernet@fe1b0000 {
-		compatible = "rockchip,rk3588-gmac", "snps,dwmac-4.20a";
+		compatible = "atemsys", "!!rockchip,rk3588-gmac", "!!snps,dwmac-4.20a";
 		reg = <0x0 0xfe1b0000 0x0 0x10000>;
 		interrupts = <0x0 0xe3 0x4 0x0 0xe2 0x4>;
 		interrupt-names = "macirq", "eth_wake_irq";
@@ -11074,14 +11074,29 @@
 		snps,axi-config = <0x1bb>;
 		snps,mtl-rx-config = <0x1bc>;
 		snps,mtl-tx-config = <0x1bd>;
-		status = "disabled";
+		status = "okay";
+		phy-mode = "rgmii-rxid";
+		clock_in_out = "output";
+		snps,reset-gpio = <0x1be 0xd 0x1>;
+		snps,reset-active-low;
+		snps,reset-delays-us = <0x0 0x4e20 0x186a0>;
+		pinctrl-names = "default";
+		pinctrl-0 = <0x1bf 0x1c0 0x1c1 0x1c2 0x1c3 0x1c4 0x1c5>;
+		tx_delay = <0x44>;
+		phy-handle = <0x1c6>;
 		phandle = <0x1ba>;
 
 		mdio {
 			compatible = "snps,dwmac-mdio";
 			#address-cells = <0x1>;
 			#size-cells = <0x0>;
-			phandle = <0x488>;
+			phandle = <0x489>;
+
+			phy@1 {
+				compatible = "ethernet-phy-ieee802.3-c22";
+				reg = <0x1>;
+				phandle = <0x1c6>;
+			};
 		};
 
 		stmmac-axi-config {
@@ -11119,7 +11134,7 @@
 		phy-names = "sata-phy";
 		ports-implemented = <0x1>;
 		status = "disabled";
-		phandle = <0x489>;
+		phandle = <0x48a>;
 	};
 
 	phy@fed70000 {
@@ -11129,7 +11144,7 @@
 		clock-names = "ref", "apb";
 		resets = <0x2 0x486 0x2 0xc003f 0x2 0xc0040 0x2 0xc0041>;
 		reset-names = "apb", "init", "cmn", "lane";
-		rockchip,grf = <0x1be>;
+		rockchip,grf = <0x1c7>;
 		#phy-cells = <0x0>;
 		status = "okay";
 		phandle = <0x1ac>;
@@ -11142,7 +11157,7 @@
 		clock-names = "ref", "apb";
 		resets = <0x2 0x491 0x2 0x486 0x2 0xc003f 0x2 0xc0040 0x2 0xc0041 0x2 0x48f 0x2 0x490>;
 		reset-names = "phy", "apb", "init", "cmn", "lane", "ropll", "lcpll";
-		rockchip,grf = <0x1be>;
+		rockchip,grf = <0x1c7>;
 		#phy-cells = <0x0>;
 		status = "disabled";
 		phandle = <0x1a9>;
@@ -11157,16 +11172,16 @@
 	phy@fed90000 {
 		compatible = "rockchip,rk3588-usbdp-phy";
 		reg = <0x0 0xfed90000 0x0 0x10000>;
-		rockchip,u2phy-grf = <0x1bf>;
+		rockchip,u2phy-grf = <0x1c8>;
 		rockchip,usb-grf = <0x6a>;
-		rockchip,usbdpphy-grf = <0x1c0>;
+		rockchip,usbdpphy-grf = <0x1c9>;
 		rockchip,vo-grf = <0xf2>;
-		clocks = <0x2 0x2b6 0x2 0x280 0x2 0x26a 0x1c1>;
+		clocks = <0x2 0x2b6 0x2 0x280 0x2 0x26a 0x1ca>;
 		clock-names = "refclk", "immortal", "pclk", "utmi";
 		resets = <0x2 0x2f 0x2 0x30 0x2 0x31 0x2 0x32 0x2 0x484>;
 		reset-names = "init", "cmn", "lane", "pcs_apb", "pma_apb";
 		status = "okay";
-		phandle = <0x48a>;
+		phandle = <0x48b>;
 
 		dp-port {
 			#phy-cells = <0x0>;
@@ -11192,7 +11207,7 @@
 		resets = <0x2 0x20006 0x2 0x4d7>;
 		reset-names = "combphy-apb", "combphy";
 		rockchip,pipe-grf = <0x6c>;
-		rockchip,pipe-phy-grf = <0x1c2>;
+		rockchip,pipe-phy-grf = <0x1cb>;
 		rockchip,pcie1ln-sel-bits = <0x100 0x0 0x0 0x0>;
 		status = "okay";
 		phandle = <0x1b9>;
@@ -11207,7 +11222,7 @@
 		resets = <0x2 0x2000a>;
 		reset-names = "phy";
 		rockchip,pipe-grf = <0x6c>;
-		rockchip,phy-grf = <0x1c3>;
+		rockchip,phy-grf = <0x1cc>;
 		status = "disabled";
 		rockchip,pcie30-phymode = <0x4>;
 		phandle = <0x1b5>;
@@ -11215,11 +11230,11 @@
 
 	adc-keys {
 		compatible = "adc-keys";
-		io-channels = <0x1c4 0x1>;
+		io-channels = <0x1cd 0x1>;
 		io-channel-names = "buttons";
 		keyup-threshold-microvolt = <0x1b7740>;
 		poll-interval = <0x64>;
-		phandle = <0x48b>;
+		phandle = <0x48c>;
 
 		vol-up-key {
 			label = "volume up";
@@ -11250,7 +11265,7 @@
 		compatible = "pwm-backlight";
 		brightness-levels = <0x0 0x14 0x14 0x15 0x15 0x16 0x16 0x17 0x17 0x18 0x18 0x19 0x19 0x1a 0x1a 0x1b 0x1b 0x1c 0x1c 0x1d 0x1d 0x1e 0x1e 0x1f 0x1f 0x20 0x20 0x21 0x21 0x22 0x22 0x23 0x23 0x24 0x24 0x25 0x25 0x26 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff>;
 		default-brightness-level = <0xc8>;
-		pwms = <0x1c5 0x0 0x61a8 0x0>;
+		pwms = <0x1ce 0x0 0x61a8 0x0>;
 		status = "okay";
 		phandle = <0xee>;
 	};
@@ -11259,7 +11274,7 @@
 		compatible = "pwm-backlight";
 		brightness-levels = <0x0 0x14 0x14 0x15 0x15 0x16 0x16 0x17 0x17 0x18 0x18 0x19 0x19 0x1a 0x1a 0x1b 0x1b 0x1c 0x1c 0x1d 0x1d 0x1e 0x1e 0x1f 0x1f 0x20 0x20 0x21 0x21 0x22 0x22 0x23 0x23 0x24 0x24 0x25 0x25 0x26 0x26 0x27 0x28 0x29 0x2a 0x2b 0x2c 0x2d 0x2e 0x2f 0x30 0x31 0x32 0x33 0x34 0x35 0x36 0x37 0x38 0x39 0x3a 0x3b 0x3c 0x3d 0x3e 0x3f 0x40 0x41 0x42 0x43 0x44 0x45 0x46 0x47 0x48 0x49 0x4a 0x4b 0x4c 0x4d 0x4e 0x4f 0x50 0x51 0x52 0x53 0x54 0x55 0x56 0x57 0x58 0x59 0x5a 0x5b 0x5c 0x5d 0x5e 0x5f 0x60 0x61 0x62 0x63 0x64 0x65 0x66 0x67 0x68 0x69 0x6a 0x6b 0x6c 0x6d 0x6e 0x6f 0x70 0x71 0x72 0x73 0x74 0x75 0x76 0x77 0x78 0x79 0x7a 0x7b 0x7c 0x7d 0x7e 0x7f 0x80 0x81 0x82 0x83 0x84 0x85 0x86 0x87 0x88 0x89 0x8a 0x8b 0x8c 0x8d 0x8e 0x8f 0x90 0x91 0x92 0x93 0x94 0x95 0x96 0x97 0x98 0x99 0x9a 0x9b 0x9c 0x9d 0x9e 0x9f 0xa0 0xa1 0xa2 0xa3 0xa4 0xa5 0xa6 0xa7 0xa8 0xa9 0xaa 0xab 0xac 0xad 0xae 0xaf 0xb0 0xb1 0xb2 0xb3 0xb4 0xb5 0xb6 0xb7 0xb8 0xb9 0xba 0xbb 0xbc 0xbd 0xbe 0xbf 0xc0 0xc1 0xc2 0xc3 0xc4 0xc5 0xc6 0xc7 0xc8 0xc9 0xca 0xcb 0xcc 0xcd 0xce 0xcf 0xd0 0xd1 0xd2 0xd3 0xd4 0xd5 0xd6 0xd7 0xd8 0xd9 0xda 0xdb 0xdc 0xdd 0xde 0xdf 0xe0 0xe1 0xe2 0xe3 0xe4 0xe5 0xe6 0xe7 0xe8 0xe9 0xea 0xeb 0xec 0xed 0xee 0xef 0xf0 0xf1 0xf2 0xf3 0xf4 0xf5 0xf6 0xf7 0xf8 0xf9 0xfa 0xfb 0xfc 0xfd 0xfe 0xff>;
 		default-brightness-level = <0xc8>;
-		pwms = <0x1c6 0x0 0x61a8 0x0>;
+		pwms = <0x1cf 0x0 0x61a8 0x0>;
 		status = "disabled";
 		phandle = <0xe9>;
 	};
@@ -11268,7 +11283,7 @@
 		status = "disabled";
 		compatible = "delta,dfbmcs320";
 		#sound-dai-cells = <0x1>;
-		phandle = <0x1c8>;
+		phandle = <0x1d1>;
 	};
 
 	bt-sound {
@@ -11278,14 +11293,14 @@
 		simple-audio-card,bitclock-inversion = <0x0>;
 		simple-audio-card,mclk-fs = <0x100>;
 		simple-audio-card,name = "rockchip,bt";
-		phandle = <0x48c>;
+		phandle = <0x48d>;
 
 		simple-audio-card,cpu {
-			sound-dai = <0x1c7>;
+			sound-dai = <0x1d0>;
 		};
 
 		simple-audio-card,codec {
-			sound-dai = <0x1c8 0x1>;
+			sound-dai = <0x1d1 0x1>;
 		};
 	};
 
@@ -11294,10 +11309,10 @@
 		compatible = "rockchip,hdmi";
 		rockchip,mclk-fs = <0x80>;
 		rockchip,card-name = "rockchip-hdmi0";
-		rockchip,cpu = <0x1c9>;
-		rockchip,codec = <0x1ca>;
+		rockchip,cpu = <0x1d2>;
+		rockchip,codec = <0x1d3>;
 		rockchip,jack-det;
-		phandle = <0x48d>;
+		phandle = <0x48e>;
 	};
 
 	hdmi1-sound {
@@ -11305,10 +11320,10 @@
 		compatible = "rockchip,hdmi";
 		rockchip,mclk-fs = <0x80>;
 		rockchip,card-name = "rockchip-hdmi1";
-		rockchip,cpu = <0x1cb>;
-		rockchip,codec = <0x1cc>;
+		rockchip,cpu = <0x1d4>;
+		rockchip,codec = <0x1d5>;
 		rockchip,jack-det;
-		phandle = <0x48e>;
+		phandle = <0x48f>;
 	};
 
 	dp0-sound {
@@ -11316,10 +11331,10 @@
 		compatible = "rockchip,hdmi";
 		rockchip,card-name = "rockchip-dp0";
 		rockchip,mclk-fs = <0x200>;
-		rockchip,cpu = <0x1cd>;
-		rockchip,codec = <0x1ce 0x1>;
+		rockchip,cpu = <0x1d6>;
+		rockchip,codec = <0x1d7 0x1>;
 		rockchip,jack-det;
-		phandle = <0x48f>;
+		phandle = <0x490>;
 	};
 
 	dp1-sound {
@@ -11327,20 +11342,20 @@
 		compatible = "rockchip,hdmi";
 		rockchip,card-name = "rockchip-dp1";
 		rockchip,mclk-fs = <0x200>;
-		rockchip,cpu = <0x1cf>;
-		rockchip,codec = <0x1d0 0x1>;
+		rockchip,cpu = <0x1d8>;
+		rockchip,codec = <0x1d9 0x1>;
 		rockchip,jack-det;
-		phandle = <0x490>;
+		phandle = <0x491>;
 	};
 
 	leds {
 		compatible = "gpio-leds";
-		phandle = <0x491>;
+		phandle = <0x492>;
 
 		sys_led {
 			gpios = <0x1b2 0x1e 0x0>;
 			linux,default-trigger = "heartbeat";
-			phandle = <0x492>;
+			phandle = <0x493>;
 		};
 	};
 
@@ -11348,7 +11363,7 @@
 		status = "disabled";
 		compatible = "linux,spdif-dit";
 		#sound-dai-cells = <0x0>;
-		phandle = <0x1d2>;
+		phandle = <0x1db>;
 	};
 
 	spdif-tx0-sound {
@@ -11356,14 +11371,14 @@
 		compatible = "simple-audio-card";
 		simple-audio-card,mclk-fs = <0x80>;
 		simple-audio-card,name = "rockchip,spdif-tx0";
-		phandle = <0x493>;
+		phandle = <0x494>;
 
 		simple-audio-card,cpu {
-			sound-dai = <0x1d1>;
+			sound-dai = <0x1da>;
 		};
 
 		simple-audio-card,codec {
-			sound-dai = <0x1d2>;
+			sound-dai = <0x1db>;
 		};
 	};
 
@@ -11371,7 +11386,7 @@
 		status = "disabled";
 		compatible = "linux,spdif-dit";
 		#sound-dai-cells = <0x0>;
-		phandle = <0x1d4>;
+		phandle = <0x1dd>;
 	};
 
 	spdif-tx1-sound {
@@ -11379,14 +11394,14 @@
 		compatible = "simple-audio-card";
 		simple-audio-card,mclk-fs = <0x80>;
 		simple-audio-card,name = "rockchip,spdif-tx1";
-		phandle = <0x494>;
+		phandle = <0x495>;
 
 		simple-audio-card,cpu {
-			sound-dai = <0x1d3>;
+			sound-dai = <0x1dc>;
 		};
 
 		simple-audio-card,codec {
-			sound-dai = <0x1d4>;
+			sound-dai = <0x1dd>;
 		};
 	};
 
@@ -11401,7 +11416,7 @@
 		regulator-boot-on;
 		regulator-min-microvolt = <0xb71b00>;
 		regulator-max-microvolt = <0xb71b00>;
-		phandle = <0x1d5>;
+		phandle = <0x1de>;
 	};
 
 	vcc5v0-sys {
@@ -11411,7 +11426,7 @@
 		regulator-boot-on;
 		regulator-min-microvolt = <0x4c4b40>;
 		regulator-max-microvolt = <0x4c4b40>;
-		vin-supply = <0x1d5>;
+		vin-supply = <0x1de>;
 		phandle = <0x6e>;
 	};
 
@@ -11422,8 +11437,8 @@
 		regulator-boot-on;
 		regulator-min-microvolt = <0x4c4b40>;
 		regulator-max-microvolt = <0x4c4b40>;
-		vin-supply = <0x1d5>;
-		phandle = <0x1d6>;
+		vin-supply = <0x1de>;
+		phandle = <0x1df>;
 	};
 
 	vcc5v0-usb {
@@ -11433,8 +11448,8 @@
 		regulator-boot-on;
 		regulator-min-microvolt = <0x4c4b40>;
 		regulator-max-microvolt = <0x4c4b40>;
-		vin-supply = <0x1d6>;
-		phandle = <0x1e2>;
+		vin-supply = <0x1df>;
+		phandle = <0x1eb>;
 	};
 
 	reserved-memory {
@@ -11459,7 +11474,7 @@
 		drm-cubic-lut@00000000 {
 			compatible = "rockchip,drm-cubic-lut";
 			reg = <0x0 0x0 0x0 0x0>;
-			phandle = <0x495>;
+			phandle = <0x496>;
 		};
 
 		ramoops@110000 {
@@ -11469,25 +11484,25 @@
 			console-size = <0x80000>;
 			ftrace-size = <0x0>;
 			pmsg-size = <0x50000>;
-			phandle = <0x496>;
+			phandle = <0x497>;
 		};
 	};
 
 	es8316-sound {
-		status = "okay";
+		status = "disabled";
 		compatible = "rockchip,multicodecs-card";
 		rockchip,card-name = "rockchip-es8316";
 		rockchip,format = "i2s";
 		rockchip,mclk-fs = <0x100>;
-		rockchip,cpu = <0x1d7>;
-		rockchip,codec = <0x1d8>;
+		rockchip,cpu = <0x1e0>;
+		rockchip,codec = <0x1e1>;
 		poll-interval = <0x64>;
-		io-channels = <0x1c4 0x3>;
+		io-channels = <0x1cd 0x3>;
 		io-channel-names = "adc-detect";
 		keyup-threshold-microvolt = <0x1b7740>;
-		pinctrl-0 = <0x1d9>;
+		pinctrl-0 = <0x1e2>;
 		pinctrl-names = "default";
-		phandle = <0x497>;
+		phandle = <0x498>;
 
 		play-pause-key {
 			label = "playpause";
@@ -11499,21 +11514,21 @@
 	pwm-fan {
 		compatible = "pwm-fan";
 		#cooling-cells = <0x2>;
-		pwms = <0x1c5 0x0 0xc350 0x0>;
+		pwms = <0x1ce 0x0 0xc350 0x0>;
 		cooling-levels = <0x0 0x32 0x64 0x96 0xc8 0xff>;
 		rockchip,temp-trips = <0xc350 0x1 0xd6d8 0x2 0xea60 0x3 0xfde8 0x4 0x11170 0x5>;
-		phandle = <0x498>;
+		phandle = <0x499>;
 	};
 
 	hdmiin-sound {
 		compatible = "rockchip,hdmi";
 		rockchip,mclk-fs = <0x80>;
 		rockchip,format = "i2s";
-		rockchip,bitclock-master = <0x1da>;
-		rockchip,frame-master = <0x1da>;
+		rockchip,bitclock-master = <0x1e3>;
+		rockchip,frame-master = <0x1e3>;
 		rockchip,card-name = "rockchip,hdmiin";
-		rockchip,cpu = <0x1db>;
-		rockchip,codec = <0x1da 0x0>;
+		rockchip,cpu = <0x1e4>;
+		rockchip,codec = <0x1e3 0x0>;
 		rockchip,jack-det;
 	};
 
@@ -11524,8 +11539,8 @@
 		regulator-always-on;
 		regulator-min-microvolt = <0xcf850>;
 		regulator-max-microvolt = <0xcf850>;
-		vin-supply = <0x1dc>;
-		phandle = <0x499>;
+		vin-supply = <0x1e5>;
+		phandle = <0x49a>;
 	};
 
 	pcie20-avdd1v8 {
@@ -11535,8 +11550,8 @@
 		regulator-always-on;
 		regulator-min-microvolt = <0x1b7740>;
 		regulator-max-microvolt = <0x1b7740>;
-		vin-supply = <0x1dd>;
-		phandle = <0x49a>;
+		vin-supply = <0x1e6>;
+		phandle = <0x49b>;
 	};
 
 	pcie30-avdd0v75 {
@@ -11546,8 +11561,8 @@
 		regulator-always-on;
 		regulator-min-microvolt = <0xb71b0>;
 		regulator-max-microvolt = <0xb71b0>;
-		vin-supply = <0x1de>;
-		phandle = <0x49b>;
+		vin-supply = <0x1e7>;
+		phandle = <0x49c>;
 	};
 
 	pcie30-avdd1v8 {
@@ -11557,16 +11572,16 @@
 		regulator-always-on;
 		regulator-min-microvolt = <0x1b7740>;
 		regulator-max-microvolt = <0x1b7740>;
-		vin-supply = <0x1dd>;
-		phandle = <0x49c>;
+		vin-supply = <0x1e6>;
+		phandle = <0x49d>;
 	};
 
 	sdio-pwrseq {
 		compatible = "mmc-pwrseq-simple";
-		clocks = <0x1df>;
+		clocks = <0x1e8>;
 		clock-names = "ext_clock";
 		pinctrl-names = "default";
-		pinctrl-0 = <0x1e0>;
+		pinctrl-0 = <0x1e9>;
 		post-power-on-delay-ms = <0xc8>;
 		reset-gpios = <0x1b2 0x16 0x1>;
 		phandle = <0x11a>;
@@ -11578,9 +11593,9 @@
 		headset_gpio = <0x108 0x1d 0x0>;
 		spk_ctl_gpio = <0xfc 0xc 0x1>;
 		pinctrl-names = "default";
-		pinctrl-0 = <0x1e1>;
-		io-channels = <0x1c4 0x3>;
-		phandle = <0x49d>;
+		pinctrl-0 = <0x1ea>;
+		io-channels = <0x1cd 0x3>;
+		phandle = <0x49e>;
 	};
 
 	vcc-1v1-nldo-s3 {
@@ -11612,7 +11627,7 @@
 		regulator-max-microvolt = <0x325aa0>;
 		enable-active-high;
 		startup-delay-us = <0x1388>;
-		vin-supply = <0x1d5>;
+		vin-supply = <0x1de>;
 		phandle = <0x105>;
 	};
 
@@ -11625,9 +11640,9 @@
 		regulator-max-microvolt = <0x4c4b40>;
 		enable-active-high;
 		gpio = <0xfc 0x8 0x0>;
-		vin-supply = <0x1e2>;
+		vin-supply = <0x1eb>;
 		pinctrl-names = "default";
-		pinctrl-0 = <0x1e3>;
+		pinctrl-0 = <0x1ec>;
 		phandle = <0x6b>;
 	};
 
@@ -11640,61 +11655,31 @@
 
 	wireless-bluetooth {
 		compatible = "bluetooth-platdata";
-		clocks = <0x1df>;
+		clocks = <0x1e8>;
 		clock-names = "ext_clock";
 		uart_rts_gpios = <0xfc 0x14 0x1>;
 		pinctrl-names = "default", "rts_gpio";
-		pinctrl-0 = <0x1e4 0x1e5>;
-		pinctrl-1 = <0x1e6>;
+		pinctrl-0 = <0x1ed 0x1ee>;
+		pinctrl-1 = <0x1ef>;
 		BT,reset_gpio = <0x15b 0xa 0x0>;
-		status = "okay";
-		phandle = <0x49e>;
+		status = "disabled";
+		phandle = <0x49f>;
 	};
 
 	wireless-wlan {
 		compatible = "wlan-platdata";
 		wifi_chip_type = "ap6398s";
-		status = "okay";
-		phandle = <0x49f>;
+		status = "disabled";
+		phandle = <0x4a0>;
 	};
 
 	ndj_io_init {
 		compatible = "nk_io_control";
 		pinctrl-names = "default";
-		pinctrl-0 = <0x1e7>;
-
-		vcc_12v {
-			gpio_num = <0x15b 0x1b 0x0>;
-			gpio_function = <0x0>;
-		};
-
-		vcc_3v {
-			gpio_num = <0xfc 0x1 0x0>;
-			gpio_function = <0x0>;
-		};
+		pinctrl-0 = <0x1f0>;
 
 		hub_5V_reset {
 			gpio_num = <0xfc 0xe 0x0>;
-			gpio_function = <0x3>;
-		};
-
-		4g_power {
-			gpio_num = <0x15b 0x16 0x0>;
-			gpio_function = <0x0>;
-		};
-
-		wake_wifi_bt {
-			gpio_num = <0x1e8 0xd 0x1>;
-			gpio_function = <0x0>;
-		};
-
-		air_mode_4g {
-			gpio_num = <0x1e8 0xc 0x1>;
-			gpio_function = <0x0>;
-		};
-
-		reset_4g {
-			gpio_num = <0x1e8 0x13 0x1>;
 			gpio_function = <0x3>;
 		};
 	};
@@ -11712,15 +11697,15 @@
 		bpc = <0x8>;
 		prepare-delay-ms = <0xc8>;
 		enable-delay-ms = <0x14>;
-		lvds-gpio0 = <0x1e8 0x15 0x0>;
+		lvds-gpio0 = <0x1be 0x15 0x0>;
 		lvds-gpio1 = <0xfc 0x12 0x0>;
 		lvds-gpio2 = <0xfc 0x13 0x0>;
 		lvds-gpio3 = <0xfc 0x16 0x0>;
 		nodka-lvds = <0xf>;
-		phandle = <0x4a0>;
+		phandle = <0x4a1>;
 
 		display-timings {
-			native-mode = <0x1e9>;
+			native-mode = <0x1f1>;
 
 			timing0 {
 				clock-frequency = <0x459e440>;
@@ -11736,14 +11721,14 @@
 				vsync-active = <0x0>;
 				de-active = <0x0>;
 				pixelclk-active = <0x0>;
-				phandle = <0x1e9>;
+				phandle = <0x1f1>;
 			};
 		};
 
 		port {
 
 			endpoint {
-				remote-endpoint = <0x1ea>;
+				remote-endpoint = <0x1f2>;
 				phandle = <0x1af>;
 			};
 		};
@@ -11751,19 +11736,19 @@
 
 	chosen {
 		bootargs = "earlycon=uart8250,mmio32,0xfeb50000 console=ttyFIQ0 irqchip.gicv3_pseudo_nmi=0 root=PARTUUID=614e0000-0000 rw rootwait  net.ifnames=0";
-		phandle = <0x4a1>;
+		phandle = <0x4a2>;
 	};
 
 	cspmu@fd10c000 {
 		compatible = "rockchip,cspmu";
 		reg = <0x0 0xfd10c000 0x0 0x1000 0x0 0xfd10d000 0x0 0x1000 0x0 0xfd10e000 0x0 0x1000 0x0 0xfd10f000 0x0 0x1000 0x0 0xfd12c000 0x0 0x1000 0x0 0xfd12d000 0x0 0x1000 0x0 0xfd12e000 0x0 0x1000 0x0 0xfd12f000 0x0 0x1000>;
-		phandle = <0x4a2>;
+		phandle = <0x4a3>;
 	};
 
 	debug@fd104000 {
 		compatible = "rockchip,debug";
 		reg = <0x0 0xfd104000 0x0 0x1000 0x0 0xfd105000 0x0 0x1000 0x0 0xfd106000 0x0 0x1000 0x0 0xfd107000 0x0 0x1000 0x0 0xfd124000 0x0 0x1000 0x0 0xfd125000 0x0 0x1000 0x0 0xfd126000 0x0 0x1000 0x0 0xfd127000 0x0 0x1000>;
-		phandle = <0x4a3>;
+		phandle = <0x4a4>;
 	};
 
 	fiq-debugger {
@@ -11774,9 +11759,9 @@
 		rockchip,baudrate = <0x16e360>;
 		interrupts = <0x0 0x1a7 0x8>;
 		pinctrl-names = "default";
-		pinctrl-0 = <0x1eb>;
+		pinctrl-0 = <0x1f3>;
 		status = "okay";
-		phandle = <0x4a4>;
+		phandle = <0x4a5>;
 	};
 
 	__symbols__ {
@@ -12159,7 +12144,7 @@
 		gmac_uio1 = "/uio@fe1c0000";
 		gmac1 = "/ethernet@fe1c0000";
 		mdio1 = "/ethernet@fe1c0000/mdio";
-		rgmii_phy = "/ethernet@fe1c0000/mdio/phy@1";
+		rgmii_phy1 = "/ethernet@fe1c0000/mdio/phy@1";
 		gmac1_stmmac_axi_setup = "/ethernet@fe1c0000/stmmac-axi-config";
 		gmac1_mtl_rx_setup = "/ethernet@fe1c0000/rx-queues-config";
 		gmac1_mtl_tx_setup = "/ethernet@fe1c0000/tx-queues-config";
@@ -12920,6 +12905,7 @@
 		gmac_uio0 = "/uio@fe1b0000";
 		gmac0 = "/ethernet@fe1b0000";
 		mdio0 = "/ethernet@fe1b0000/mdio";
+		rgmii_phy0 = "/ethernet@fe1b0000/mdio/phy@1";
 		gmac0_stmmac_axi_setup = "/ethernet@fe1b0000/stmmac-axi-config";
 		gmac0_mtl_rx_setup = "/ethernet@fe1b0000/rx-queues-config";
 		gmac0_mtl_tx_setup = "/ethernet@fe1b0000/tx-queues-config";
diff --git a/kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi b/kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi
index f344d09..e3c25d4 100755
--- a/kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi
+++ b/kernel/arch/arm64/boot/dts/rockchip/rk3588.dtsi
@@ -684,7 +684,7 @@
 	};
 
 	gmac0: ethernet@fe1b0000 {
-		compatible = "rockchip,rk3588-gmac", "snps,dwmac-4.20a";
+		compatible = "atemsys","!!rockchip,rk3588-gmac", "!!snps,dwmac-4.20a";
 		reg = <0x0 0xfe1b0000 0x0 0x10000>;
 		interrupts = <GIC_SPI 227 IRQ_TYPE_LEVEL_HIGH>,
 			     <GIC_SPI 226 IRQ_TYPE_LEVEL_HIGH>;
diff --git a/kernel/arch/arm64/boot/dts/rockchip/rk3588s.dtsi b/kernel/arch/arm64/boot/dts/rockchip/rk3588s.dtsi
old mode 100644
new mode 100755
diff --git a/kernel/arch/arm64/configs/rockchip_defconfig b/kernel/arch/arm64/configs/rockchip_defconfig
index 2e6450c..ab294dc 100644
--- a/kernel/arch/arm64/configs/rockchip_defconfig
+++ b/kernel/arch/arm64/configs/rockchip_defconfig
@@ -98,7 +98,6 @@
 CONFIG_PM_ADVANCED_DEBUG=y
 CONFIG_ENERGY_MODEL=y
 CONFIG_CPU_IDLE=y
-CONFIG_ARM_CPUIDLE=y
 CONFIG_ARM_PSCI_CPUIDLE=y
 CONFIG_CPU_FREQ=y
 CONFIG_CPU_FREQ_STAT=y
@@ -108,6 +107,7 @@
 CONFIG_CPU_FREQ_GOV_ONDEMAND=y
 CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
 CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
 CONFIG_CPUFREQ_DT=y
 CONFIG_ARM_ROCKCHIP_CPUFREQ=y
 CONFIG_ARM_SCMI_PROTOCOL=y
@@ -920,8 +920,6 @@
 CONFIG_DEVFREQ_GOV_POWERSAVE=y
 CONFIG_DEVFREQ_GOV_USERSPACE=y
 CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ=y
-CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ=y
-CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP=y
 CONFIG_IIO=y
 CONFIG_IIO_BUFFER_CB=y
 CONFIG_ROCKCHIP_SARADC=y
@@ -1028,3 +1026,4 @@
 CONFIG_BUG_ON_DATA_CORRUPTION=y
 CONFIG_ENABLE_DEFAULT_TRACERS=y
 # CONFIG_RUNTIME_TESTING_MENU is not set
+CONFIG_DOVETAIL=y
diff --git a/kernel/arch/arm64/configs/rockchip_linux_defconfig b/kernel/arch/arm64/configs/rockchip_linux_defconfig
index 52fd3e9..da35330 100644
--- a/kernel/arch/arm64/configs/rockchip_linux_defconfig
+++ b/kernel/arch/arm64/configs/rockchip_linux_defconfig
@@ -1,6 +1,6 @@
 #
 # Automatically generated file; DO NOT EDIT.
-# Linux/arm64 5.10.160 Kernel Configuration
+# Linux/arm64 5.10.161 Kernel Configuration
 #
 CONFIG_CC_VERSION_TEXT="gcc (Ubuntu 7.5.0-3ubuntu1~18.04) 7.5.0"
 CONFIG_CC_IS_GCC=y
@@ -59,6 +59,8 @@
 CONFIG_IRQ_FORCED_THREADING=y
 CONFIG_SPARSE_IRQ=y
 # CONFIG_GENERIC_IRQ_DEBUGFS is not set
+CONFIG_HAVE_IRQ_PIPELINE=y
+CONFIG_IRQ_PIPELINE=y
 # end of IRQ subsystem
 
 CONFIG_GENERIC_IRQ_MULTI_HANDLER=y
@@ -122,8 +124,6 @@
 #
 # Scheduler features
 #
-CONFIG_UCLAMP_TASK=y
-CONFIG_UCLAMP_BUCKETS_COUNT=20
 # end of Scheduler features
 
 CONFIG_ARCH_SUPPORTS_NUMA_BALANCING=y
@@ -140,7 +140,6 @@
 CONFIG_FAIR_GROUP_SCHED=y
 CONFIG_CFS_BANDWIDTH=y
 CONFIG_RT_GROUP_SCHED=y
-CONFIG_UCLAMP_TASK_GROUP=y
 CONFIG_CGROUP_PIDS=y
 # CONFIG_CGROUP_RDMA is not set
 CONFIG_CGROUP_FREEZER=y
@@ -191,6 +190,7 @@
 CONFIG_POSIX_TIMERS=y
 CONFIG_PRINTK=y
 CONFIG_PRINTK_NMI=y
+# CONFIG_RAW_PRINTK is not set
 CONFIG_BUG=y
 CONFIG_ELF_CORE=y
 CONFIG_BASE_FULL=y
@@ -400,6 +400,9 @@
 CONFIG_ARCH_WANT_HUGE_PMD_SHARE=y
 CONFIG_ARCH_HAS_CACHE_LINE_SIZE=y
 CONFIG_ARCH_ENABLE_SPLIT_PMD_PTLOCK=y
+CONFIG_HAVE_DOVETAIL=y
+CONFIG_DOVETAIL=y
+CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE=y
 # CONFIG_PARAVIRT is not set
 # CONFIG_PARAVIRT_TIME_ACCOUNTING is not set
 # CONFIG_KEXEC is not set
@@ -509,43 +512,28 @@
 #
 # CPU Idle
 #
-CONFIG_CPU_IDLE=y
-CONFIG_CPU_IDLE_MULTIPLE_DRIVERS=y
-# CONFIG_CPU_IDLE_GOV_LADDER is not set
-CONFIG_CPU_IDLE_GOV_MENU=y
-# CONFIG_CPU_IDLE_GOV_TEO is not set
-CONFIG_DT_IDLE_STATES=y
-
-#
-# ARM CPU Idle Drivers
-#
-CONFIG_ARM_CPUIDLE=y
-CONFIG_ARM_PSCI_CPUIDLE=y
-CONFIG_ARM_PSCI_CPUIDLE_DOMAIN=y
-# end of ARM CPU Idle Drivers
+# CONFIG_CPU_IDLE is not set
 # end of CPU Idle
 
 #
 # CPU Frequency scaling
 #
 CONFIG_CPU_FREQ=y
-CONFIG_CPU_FREQ_GOV_ATTR_SET=y
-CONFIG_CPU_FREQ_GOV_COMMON=y
 CONFIG_CPU_FREQ_STAT=y
 CONFIG_CPU_FREQ_TIMES=y
-# CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE is not set
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
 # CONFIG_CPU_FREQ_DEFAULT_GOV_POWERSAVE is not set
 # CONFIG_CPU_FREQ_DEFAULT_GOV_USERSPACE is not set
 # CONFIG_CPU_FREQ_DEFAULT_GOV_ONDEMAND is not set
 # CONFIG_CPU_FREQ_DEFAULT_GOV_CONSERVATIVE is not set
-CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL=y
+# CONFIG_CPU_FREQ_DEFAULT_GOV_SCHEDUTIL is not set
 # CONFIG_CPU_FREQ_DEFAULT_GOV_INTERACTIVE is not set
 CONFIG_CPU_FREQ_GOV_PERFORMANCE=y
-CONFIG_CPU_FREQ_GOV_POWERSAVE=y
-CONFIG_CPU_FREQ_GOV_USERSPACE=y
-CONFIG_CPU_FREQ_GOV_ONDEMAND=y
-CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
-CONFIG_CPU_FREQ_GOV_SCHEDUTIL=y
+# CONFIG_CPU_FREQ_GOV_POWERSAVE is not set
+# CONFIG_CPU_FREQ_GOV_USERSPACE is not set
+# CONFIG_CPU_FREQ_GOV_ONDEMAND is not set
+# CONFIG_CPU_FREQ_GOV_CONSERVATIVE is not set
+# CONFIG_CPU_FREQ_GOV_SCHEDUTIL is not set
 # CONFIG_CPU_FREQ_GOV_INTERACTIVE is not set
 
 #
@@ -593,7 +581,6 @@
 
 CONFIG_EFI_EARLYCON=y
 CONFIG_ARM_PSCI_FW=y
-# CONFIG_ARM_PSCI_CHECKER is not set
 CONFIG_HAVE_ARM_SMCCC=y
 CONFIG_HAVE_ARM_SMCCC_DISCOVERY=y
 CONFIG_ARM_SMCCC_SOC_ID=y
@@ -660,6 +647,7 @@
 CONFIG_HAVE_ARCH_JUMP_LABEL_RELATIVE=y
 CONFIG_MMU_GATHER_TABLE_FREE=y
 CONFIG_MMU_GATHER_RCU_TABLE_FREE=y
+CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM=y
 CONFIG_ARCH_HAVE_NMI_SAFE_CMPXCHG=y
 CONFIG_HAVE_ALIGNED_STRUCT_PAGE=y
 CONFIG_HAVE_CMPXCHG_LOCAL=y
@@ -850,6 +838,137 @@
 # CONFIG_GKI_HIDDEN_DMA_CONFIGS is not set
 # CONFIG_GKI_HIDDEN_ETHERNET_CONFIGS is not set
 # CONFIG_GKI_HACKS_TO_FIX is not set
+CONFIG_XENOMAI=y
+
+#
+# Core features
+#
+# CONFIG_XENO_OPT_SCHED_CLASSES is not set
+CONFIG_XENO_OPT_STATS=y
+# CONFIG_XENO_OPT_SHIRQ is not set
+CONFIG_XENO_OPT_RR_QUANTUM=1000
+CONFIG_XENO_OPT_AUTOTUNE=y
+# CONFIG_XENO_OPT_SCALABLE_SCHED is not set
+CONFIG_XENO_OPT_TIMER_LIST=y
+# CONFIG_XENO_OPT_TIMER_RBTREE is not set
+CONFIG_XENO_OPT_VFILE=y
+# end of Core features
+
+#
+# Sizes and static limits
+#
+CONFIG_XENO_OPT_REGISTRY_NRSLOTS=8192
+CONFIG_XENO_OPT_SYS_HEAPSZ=8192
+CONFIG_XENO_OPT_PRIVATE_HEAPSZ=512
+CONFIG_XENO_OPT_SHARED_HEAPSZ=512
+CONFIG_XENO_OPT_NRTIMERS=512
+# end of Sizes and static limits
+
+#
+# Latency settings
+#
+CONFIG_XENO_OPT_TIMING_SCHEDLAT=0
+CONFIG_XENO_OPT_TIMING_KSCHEDLAT=0
+CONFIG_XENO_OPT_TIMING_IRQLAT=0
+# end of Latency settings
+
+# CONFIG_XENO_OPT_DEBUG is not set
+
+#
+# Drivers
+#
+CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE=y
+CONFIG_XENO_DRIVERS_AUTOTUNE=y
+
+#
+# Serial drivers
+#
+# CONFIG_XENO_DRIVERS_16550A is not set
+# end of Serial drivers
+
+#
+# Testing drivers
+#
+CONFIG_XENO_DRIVERS_TIMERBENCH=y
+CONFIG_XENO_DRIVERS_SWITCHTEST=y
+CONFIG_XENO_DRIVERS_HEAPCHECK=y
+# CONFIG_XENO_DRIVERS_RTDMTEST is not set
+# end of Testing drivers
+
+#
+# CAN drivers
+#
+# CONFIG_XENO_DRIVERS_CAN is not set
+# end of CAN drivers
+
+#
+# RTnet
+#
+# CONFIG_XENO_DRIVERS_NET is not set
+# end of RTnet
+
+#
+# ANALOGY drivers
+#
+# CONFIG_XENO_DRIVERS_ANALOGY is not set
+# end of ANALOGY drivers
+
+#
+# Real-time IPC drivers
+#
+# CONFIG_XENO_DRIVERS_RTIPC is not set
+# end of Real-time IPC drivers
+
+#
+# UDD support
+#
+# CONFIG_XENO_DRIVERS_UDD is not set
+# end of UDD support
+
+#
+# Real-time GPIO drivers
+#
+# CONFIG_XENO_DRIVERS_GPIO is not set
+# end of Real-time GPIO drivers
+
+#
+# GPIOPWM support
+#
+# CONFIG_XENO_DRIVERS_GPIOPWM is not set
+# end of GPIOPWM support
+
+#
+# Real-time SPI master drivers
+#
+# end of Real-time SPI master drivers
+# end of Drivers
+
+CONFIG_XENO_ARCH_FPU=y
+CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK=y
+
+#
+# WARNING! Page migration (CONFIG_MIGRATION) may increase
+#
+
+#
+# latency.
+#
+
+#
+# WARNING! At least one of APM, CPU frequency scaling, ACPI 'processor'
+#
+
+#
+# or CPU idle features is enabled. Any of these options may
+#
+
+#
+# cause troubles with Xenomai. You should disable them.
+#
+CONFIG_XENO_VERSION_MAJOR=3
+CONFIG_XENO_VERSION_MINOR=2
+CONFIG_XENO_REVISION_LEVEL=4
+CONFIG_XENO_VERSION_STRING="3.2.4"
 CONFIG_FREEZER=y
 
 #
@@ -924,7 +1043,6 @@
 # end of Memory Management options
 
 CONFIG_NET=y
-CONFIG_COMPAT_NETLINK_MESSAGES=y
 CONFIG_NET_INGRESS=y
 CONFIG_SKB_EXTENSIONS=y
 
@@ -1361,10 +1479,6 @@
 # CONFIG_AF_KCM is not set
 CONFIG_FIB_RULES=y
 CONFIG_WIRELESS=y
-CONFIG_WIRELESS_EXT=y
-CONFIG_WEXT_CORE=y
-CONFIG_WEXT_PROC=y
-CONFIG_WEXT_PRIV=y
 CONFIG_CFG80211=y
 # CONFIG_NL80211_TESTMODE is not set
 # CONFIG_CFG80211_DEVELOPER_WARNINGS is not set
@@ -1952,55 +2066,43 @@
 # CONFIG_NET_VENDOR_3COM is not set
 # CONFIG_NET_VENDOR_ADAPTEC is not set
 # CONFIG_NET_VENDOR_AGERE is not set
-CONFIG_NET_VENDOR_ALACRITECH=y
-# CONFIG_SLICOSS is not set
+# CONFIG_NET_VENDOR_ALACRITECH is not set
 # CONFIG_NET_VENDOR_ALTEON is not set
 # CONFIG_ALTERA_TSE is not set
-CONFIG_NET_VENDOR_AMAZON=y
-# CONFIG_ENA_ETHERNET is not set
+# CONFIG_NET_VENDOR_AMAZON is not set
 # CONFIG_NET_VENDOR_AMD is not set
-CONFIG_NET_VENDOR_AQUANTIA=y
-# CONFIG_AQTION is not set
+# CONFIG_NET_VENDOR_AQUANTIA is not set
 # CONFIG_NET_VENDOR_ARC is not set
 # CONFIG_NET_VENDOR_ATHEROS is not set
-CONFIG_NET_VENDOR_AURORA=y
-# CONFIG_AURORA_NB8800 is not set
+# CONFIG_NET_VENDOR_AURORA is not set
 # CONFIG_NET_VENDOR_BROADCOM is not set
 # CONFIG_NET_VENDOR_BROCADE is not set
-CONFIG_NET_VENDOR_CADENCE=y
-# CONFIG_MACB is not set
+# CONFIG_NET_VENDOR_CADENCE is not set
 # CONFIG_NET_VENDOR_CAVIUM is not set
 # CONFIG_NET_VENDOR_CHELSIO is not set
 # CONFIG_NET_VENDOR_CISCO is not set
-CONFIG_NET_VENDOR_CORTINA=y
-# CONFIG_GEMINI_ETHERNET is not set
+# CONFIG_NET_VENDOR_CORTINA is not set
 # CONFIG_DNET is not set
 # CONFIG_NET_VENDOR_DEC is not set
 # CONFIG_NET_VENDOR_DLINK is not set
 # CONFIG_NET_VENDOR_EMULEX is not set
 # CONFIG_NET_VENDOR_EZCHIP is not set
-CONFIG_NET_VENDOR_GOOGLE=y
-# CONFIG_GVE is not set
+# CONFIG_NET_VENDOR_GOOGLE is not set
 # CONFIG_NET_VENDOR_HISILICON is not set
-CONFIG_NET_VENDOR_HUAWEI=y
-# CONFIG_HINIC is not set
+# CONFIG_NET_VENDOR_HUAWEI is not set
 # CONFIG_NET_VENDOR_INTEL is not set
 # CONFIG_JME is not set
 # CONFIG_NET_VENDOR_MARVELL is not set
 # CONFIG_NET_VENDOR_MELLANOX is not set
 # CONFIG_NET_VENDOR_MICREL is not set
 # CONFIG_NET_VENDOR_MICROCHIP is not set
-CONFIG_NET_VENDOR_MICROSEMI=y
+# CONFIG_NET_VENDOR_MICROSEMI is not set
 # CONFIG_NET_VENDOR_MYRI is not set
 # CONFIG_FEALNX is not set
 # CONFIG_NET_VENDOR_NATSEMI is not set
-CONFIG_NET_VENDOR_NETERION=y
-# CONFIG_S2IO is not set
-# CONFIG_VXGE is not set
-CONFIG_NET_VENDOR_NETRONOME=y
-# CONFIG_NFP is not set
-CONFIG_NET_VENDOR_NI=y
-# CONFIG_NI_XGE_MANAGEMENT_ENET is not set
+# CONFIG_NET_VENDOR_NETERION is not set
+# CONFIG_NET_VENDOR_NETRONOME is not set
+# CONFIG_NET_VENDOR_NI is not set
 # CONFIG_NET_VENDOR_NVIDIA is not set
 # CONFIG_NET_VENDOR_OKI is not set
 # CONFIG_ETHOC is not set
@@ -2015,8 +2117,8 @@
 CONFIG_NET_VENDOR_REALTEK=y
 # CONFIG_8139CP is not set
 # CONFIG_8139TOO is not set
-# CONFIG_R8168 is not set
-CONFIG_R8169=y
+CONFIG_R8168=y
+# CONFIG_R8169 is not set
 # CONFIG_R8125 is not set
 # CONFIG_REALTEK_PGTOOL is not set
 # CONFIG_NET_VENDOR_RENESAS is not set
@@ -2188,82 +2290,7 @@
 # CONFIG_USB_VL600 is not set
 # CONFIG_USB_NET_CH9200 is not set
 # CONFIG_USB_NET_AQC111 is not set
-CONFIG_WLAN=y
-# CONFIG_WIRELESS_WDS is not set
-# CONFIG_WLAN_VENDOR_ADMTEK is not set
-CONFIG_ATH_COMMON=y
-CONFIG_WLAN_VENDOR_ATH=y
-# CONFIG_ATH_DEBUG is not set
-# CONFIG_ATH5K is not set
-# CONFIG_ATH5K_PCI is not set
-CONFIG_ATH9K_HW=y
-CONFIG_ATH9K_COMMON=y
-CONFIG_ATH9K_BTCOEX_SUPPORT=y
-CONFIG_ATH9K=y
-CONFIG_ATH9K_PCI=y
-# CONFIG_ATH9K_AHB is not set
-# CONFIG_ATH9K_DEBUGFS is not set
-# CONFIG_ATH9K_DYNACK is not set
-# CONFIG_ATH9K_WOW is not set
-CONFIG_ATH9K_RFKILL=y
-# CONFIG_ATH9K_CHANNEL_CONTEXT is not set
-CONFIG_ATH9K_PCOEM=y
-# CONFIG_ATH9K_PCI_NO_EEPROM is not set
-# CONFIG_ATH9K_HTC is not set
-# CONFIG_ATH9K_HWRNG is not set
-# CONFIG_CARL9170 is not set
-# CONFIG_ATH6KL is not set
-# CONFIG_AR5523 is not set
-# CONFIG_WIL6210 is not set
-# CONFIG_ATH10K is not set
-# CONFIG_WCN36XX is not set
-CONFIG_WLAN_VENDOR_ATMEL=y
-# CONFIG_ATMEL is not set
-# CONFIG_AT76C50X_USB is not set
-CONFIG_WLAN_VENDOR_BROADCOM=y
-# CONFIG_B43 is not set
-# CONFIG_B43LEGACY is not set
-# CONFIG_BRCMSMAC is not set
-# CONFIG_BRCMFMAC is not set
-# CONFIG_WLAN_VENDOR_CISCO is not set
-# CONFIG_WLAN_VENDOR_INTEL is not set
-# CONFIG_WLAN_VENDOR_INTERSIL is not set
-# CONFIG_WLAN_VENDOR_MARVELL is not set
-# CONFIG_WLAN_VENDOR_MEDIATEK is not set
-CONFIG_WLAN_VENDOR_MICROCHIP=y
-# CONFIG_WILC1000_SDIO is not set
-# CONFIG_WILC1000_SPI is not set
-# CONFIG_WLAN_VENDOR_RALINK is not set
-# CONFIG_WLAN_VENDOR_REALTEK is not set
-# CONFIG_WLAN_VENDOR_RSI is not set
-# CONFIG_WLAN_VENDOR_ST is not set
-# CONFIG_WLAN_VENDOR_TI is not set
-# CONFIG_RTL8188EU is not set
-# CONFIG_RTL8822BU is not set
-# CONFIG_RTL8821CU is not set
-# CONFIG_WLAN_VENDOR_ZYDAS is not set
-# CONFIG_WLAN_VENDOR_QUANTENNA is not set
-CONFIG_WL_ROCKCHIP=y
-CONFIG_WIFI_BUILD_MODULE=y
-# CONFIG_WIFI_LOAD_DRIVER_WHEN_KERNEL_BOOTUP is not set
-# CONFIG_WIFI_GENERATE_RANDOM_MAC_ADDR is not set
-CONFIG_BCMDHD=y
-CONFIG_AP6XXX=m
-CONFIG_BCMDHD_SDIO=y
-# CONFIG_BCMDHD_PCIE is not set
-CONFIG_BCMDHD_FW_PATH="/vendor/etc/firmware/fw_bcmdhd.bin"
-CONFIG_BCMDHD_NVRAM_PATH="/vendor/etc/firmware/nvram.txt"
-# CONFIG_BCMDHD_STATIC_IF is not set
-# CONFIG_CYW_BCMDHD is not set
-# CONFIG_INFINEON_DHD is not set
-CONFIG_RTL8852BE=m
-# CONFIG_RTL8852BU is not set
-# CONFIG_RTL8821CS is not set
-# CONFIG_SPARD_WLAN_SUPPORT is not set
-# CONFIG_AIC_WLAN_SUPPORT is not set
-# CONFIG_MAC80211_HWSIM is not set
-# CONFIG_USB_NET_RNDIS_WLAN is not set
-# CONFIG_VIRT_WIFI is not set
+# CONFIG_WLAN is not set
 
 #
 # Enable WiMAX (Networking options) to see the WiMAX drivers
@@ -4394,7 +4421,11 @@
 CONFIG_DUMMY_CONSOLE=y
 CONFIG_DUMMY_CONSOLE_COLUMNS=80
 CONFIG_DUMMY_CONSOLE_ROWS=25
-# CONFIG_FRAMEBUFFER_CONSOLE is not set
+CONFIG_FRAMEBUFFER_CONSOLE=y
+# CONFIG_FRAMEBUFFER_CONSOLE_LEGACY_ACCELERATION is not set
+CONFIG_FRAMEBUFFER_CONSOLE_DETECT_PRIMARY=y
+# CONFIG_FRAMEBUFFER_CONSOLE_ROTATION is not set
+# CONFIG_FRAMEBUFFER_CONSOLE_DEFERRED_TAKEOVER is not set
 # end of Console display driver support
 
 # CONFIG_LOGO is not set
@@ -5450,16 +5481,9 @@
 
 # CONFIG_GREYBUS is not set
 CONFIG_STAGING=y
-# CONFIG_PRISM2_USB is not set
 # CONFIG_COMEDI is not set
-# CONFIG_RTL8192U is not set
-# CONFIG_RTLLIB is not set
-# CONFIG_RTL8723BS is not set
-# CONFIG_R8712U is not set
-# CONFIG_R8188EU is not set
 # CONFIG_RTS5208 is not set
 # CONFIG_VT6655 is not set
-# CONFIG_VT6656 is not set
 
 #
 # IIO staging drivers
@@ -5798,10 +5822,10 @@
 # DEVFREQ Drivers
 #
 CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ=y
-CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ=y
+# CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ is not set
 CONFIG_PM_DEVFREQ_EVENT=y
 CONFIG_DEVFREQ_EVENT_ROCKCHIP_DFI=y
-CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP=y
+# CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP is not set
 CONFIG_EXTCON=y
 
 #
@@ -7041,8 +7065,9 @@
 CONFIG_GENERIC_GETTIMEOFDAY=y
 CONFIG_GENERIC_VDSO_TIME_NS=y
 CONFIG_FONT_SUPPORT=y
+# CONFIG_FONTS is not set
+CONFIG_FONT_8x8=y
 CONFIG_FONT_8x16=y
-CONFIG_FONT_AUTOSELECT=y
 CONFIG_SG_POOL=y
 CONFIG_ARCH_STACKWALK=y
 CONFIG_SBITMAP=y
@@ -7149,6 +7174,8 @@
 # end of Memory Debugging
 
 # CONFIG_DEBUG_SHIRQ is not set
+# CONFIG_DEBUG_IRQ_PIPELINE is not set
+# CONFIG_DEBUG_DOVETAIL is not set
 
 #
 # Debug Oops, Lockups and Hangs
diff --git a/kernel/arch/arm64/configs/rockchip_linux_defconfig.rej b/kernel/arch/arm64/configs/rockchip_linux_defconfig.rej
new file mode 100644
index 0000000..26ecf6b
--- /dev/null
+++ b/kernel/arch/arm64/configs/rockchip_linux_defconfig.rej
@@ -0,0 +1,32 @@
+--- arch/arm64/configs/rockchip_linux_defconfig
++++ arch/arm64/configs/rockchip_linux_defconfig
+@@ -71,7 +71,6 @@ CONFIG_PM_ADVANCED_DEBUG=y
+ CONFIG_WQ_POWER_EFFICIENT_DEFAULT=y
+ CONFIG_ENERGY_MODEL=y
+ CONFIG_CPU_IDLE=y
+-CONFIG_ARM_CPUIDLE=y
+ CONFIG_ARM_PSCI_CPUIDLE=y
+ CONFIG_CPU_FREQ=y
+ CONFIG_CPU_FREQ_STAT=y
+@@ -81,6 +80,7 @@ CONFIG_CPU_FREQ_GOV_USERSPACE=y
+ CONFIG_CPU_FREQ_GOV_ONDEMAND=y
+ CONFIG_CPU_FREQ_GOV_CONSERVATIVE=y
+ CONFIG_CPU_FREQ_GOV_INTERACTIVE=y
++CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y
+ CONFIG_CPUFREQ_DT=y
+ CONFIG_ARM_ROCKCHIP_CPUFREQ=y
+ CONFIG_ARM_SCMI_PROTOCOL=y
+@@ -537,8 +537,6 @@ CONFIG_DEVFREQ_GOV_PERFORMANCE=y
+ CONFIG_DEVFREQ_GOV_POWERSAVE=y
+ CONFIG_DEVFREQ_GOV_USERSPACE=y
+ CONFIG_ARM_ROCKCHIP_BUS_DEVFREQ=y
+-CONFIG_ARM_ROCKCHIP_DMC_DEVFREQ=y
+-CONFIG_DEVFREQ_EVENT_ROCKCHIP_NOCP=y
+ CONFIG_IIO=y
+ CONFIG_ROCKCHIP_SARADC=y
+ CONFIG_SENSORS_ISL29018=y
+@@ -634,3 +632,4 @@ CONFIG_RCU_CPU_STALL_TIMEOUT=60
+ CONFIG_FUNCTION_TRACER=y
+ CONFIG_BLK_DEV_IO_TRACE=y
+ CONFIG_LKDTM=y
++CONFIG_DOVETAIL=y
diff --git a/kernel/arch/arm64/include/asm/daifflags.h b/kernel/arch/arm64/include/asm/daifflags.h
index cfdde3a..982c807 100644
--- a/kernel/arch/arm64/include/asm/daifflags.h
+++ b/kernel/arch/arm64/include/asm/daifflags.h
@@ -12,6 +12,12 @@
 #include <asm/cpufeature.h>
 #include <asm/ptrace.h>
 
+/*
+ * irq_pipeline: DAIF masking is only used in contexts where hard
+ * interrupt masking applies, so no need to virtualize for the inband
+ * stage here (the pipeline core does assume this).
+ */
+
 #define DAIF_PROCCTX		0
 #define DAIF_PROCCTX_NOIRQ	PSR_I_BIT
 #define DAIF_ERRCTX		(PSR_I_BIT | PSR_A_BIT)
@@ -35,7 +41,7 @@
 	if (system_uses_irq_prio_masking())
 		gic_write_pmr(GIC_PRIO_IRQON | GIC_PRIO_PSR_I_SET);
 
-	trace_hardirqs_off();
+	trace_hardirqs_off_pipelined();
 }
 
 static inline unsigned long local_daif_save_flags(void)
@@ -72,7 +78,7 @@
 		!(read_sysreg(daif) & PSR_I_BIT));
 
 	if (!irq_disabled) {
-		trace_hardirqs_on();
+		trace_hardirqs_on_pipelined();
 
 		if (system_uses_irq_prio_masking()) {
 			gic_write_pmr(GIC_PRIO_IRQON);
@@ -117,7 +123,7 @@
 	write_sysreg(flags, daif);
 
 	if (irq_disabled)
-		trace_hardirqs_off();
+		trace_hardirqs_off_pipelined();
 }
 
 /*
@@ -129,7 +135,7 @@
 	unsigned long flags = regs->pstate & DAIF_MASK;
 
 	if (interrupts_enabled(regs))
-		trace_hardirqs_on();
+		trace_hardirqs_on_pipelined();
 
 	if (system_uses_irq_prio_masking())
 		gic_write_pmr(regs->pmr_save);
diff --git a/kernel/arch/arm64/include/asm/dovetail.h b/kernel/arch/arm64/include/asm/dovetail.h
new file mode 100644
index 0000000..6fca696
--- /dev/null
+++ b/kernel/arch/arm64/include/asm/dovetail.h
@@ -0,0 +1,69 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _ASM_ARM64_DOVETAIL_H
+#define _ASM_ARM64_DOVETAIL_H
+
+#include <asm/fpsimd.h>
+
+/* ARM64 traps */
+#define ARM64_TRAP_ACCESS	0	/* Data or instruction access exception */
+#define ARM64_TRAP_ALIGN	1	/* SP/PC alignment abort */
+#define ARM64_TRAP_SEA		2	/* Synchronous external abort */
+#define ARM64_TRAP_DEBUG	3	/* Debug trap */
+#define ARM64_TRAP_UNDI		4	/* Undefined instruction */
+#define ARM64_TRAP_UNDSE	5	/* Undefined synchronous exception */
+#define ARM64_TRAP_FPE		6	/* FPSIMD exception */
+#define ARM64_TRAP_SVE		7	/* SVE access trap */
+#define ARM64_TRAP_BTI		8	/* Branch target identification */
+
+#ifdef CONFIG_DOVETAIL
+
+static inline void arch_dovetail_exec_prepare(void)
+{ }
+
+static inline void arch_dovetail_switch_prepare(bool leave_inband)
+{ }
+
+static inline void arch_dovetail_switch_finish(bool enter_inband)
+{
+	fpsimd_restore_current_oob();
+}
+
+/*
+ * 172 is __NR_prctl from unistd32 in ARM32 mode, without #inclusion
+ * hell. At the end of the day, this number is written in stone to
+ * honor the ABI stability promise anyway.
+ */
+#define arch_dovetail_is_syscall(__nr)	\
+	(is_compat_task() ? (__nr) == 172 : (__nr) == __NR_prctl)
+
+#endif
+
+/*
+ * Pass the trap event to the companion core. Return true if running
+ * in-band afterwards.
+ */
+#define mark_cond_trap_entry(__trapnr, __regs)		\
+	({						\
+		oob_trap_notify(__trapnr, __regs);	\
+		running_inband();			\
+	})
+
+/*
+ * Pass the trap event to the companion core. We expect the current
+ * context to be running on the in-band stage upon return so that our
+ * caller can tread on common kernel code.
+ */
+#define mark_trap_entry(__trapnr, __regs)				\
+	do {								\
+		bool __ret = mark_cond_trap_entry(__trapnr, __regs);	\
+		BUG_ON(dovetail_debug() && !__ret);			\
+	} while (0)
+
+#define mark_trap_exit(__trapnr, __regs)				\
+	oob_trap_unwind(__trapnr, __regs)
+
+#endif /* _ASM_ARM64_DOVETAIL_H */
diff --git a/kernel/arch/arm64/include/asm/efi.h b/kernel/arch/arm64/include/asm/efi.h
index 16892f0..6f52727 100644
--- a/kernel/arch/arm64/include/asm/efi.h
+++ b/kernel/arch/arm64/include/asm/efi.h
@@ -115,6 +115,10 @@
 
 static inline void efi_set_pgd(struct mm_struct *mm)
 {
+	unsigned long flags;
+
+	protect_inband_mm(flags);
+
 	__switch_mm(mm);
 
 	if (system_uses_ttbr0_pan()) {
@@ -139,6 +143,8 @@
 			update_saved_ttbr0(current, current->active_mm);
 		}
 	}
+
+	unprotect_inband_mm(flags);
 }
 
 void efi_virtmap_load(void);
diff --git a/kernel/arch/arm64/include/asm/fpsimd.h b/kernel/arch/arm64/include/asm/fpsimd.h
index 05c9c55..47417a0 100644
--- a/kernel/arch/arm64/include/asm/fpsimd.h
+++ b/kernel/arch/arm64/include/asm/fpsimd.h
@@ -43,6 +43,7 @@
 extern void fpsimd_signal_preserve_current_state(void);
 extern void fpsimd_preserve_current_state(void);
 extern void fpsimd_restore_current_state(void);
+extern void fpsimd_restore_current_oob(void);
 extern void fpsimd_update_current_state(struct user_fpsimd_state const *state);
 
 extern void fpsimd_bind_task_to_cpu(void);
diff --git a/kernel/arch/arm64/include/asm/irq_pipeline.h b/kernel/arch/arm64/include/asm/irq_pipeline.h
new file mode 100644
index 0000000..5861ab3
--- /dev/null
+++ b/kernel/arch/arm64/include/asm/irq_pipeline.h
@@ -0,0 +1,141 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _ASM_ARM64_IRQ_PIPELINE_H
+#define _ASM_ARM64_IRQ_PIPELINE_H
+
+#include <asm-generic/irq_pipeline.h>
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+/*
+ * In order to cope with the limited number of SGIs available to us,
+ * In-band IPI messages are multiplexed over SGI0, whereas out-of-band
+ * IPIs are directly mapped to SGI1-2.
+ */
+#define OOB_NR_IPI		2
+#define OOB_IPI_OFFSET		1 /* SGI1 */
+#define TIMER_OOB_IPI		(ipi_irq_base + OOB_IPI_OFFSET)
+#define RESCHEDULE_OOB_IPI	(TIMER_OOB_IPI + 1)
+
+extern int ipi_irq_base;
+
+static inline notrace
+unsigned long arch_irqs_virtual_to_native_flags(int stalled)
+{
+	return (!!stalled) << IRQMASK_I_POS;
+}
+
+static inline notrace
+unsigned long arch_irqs_native_to_virtual_flags(unsigned long flags)
+{
+	return (!!hard_irqs_disabled_flags(flags)) << IRQMASK_i_POS;
+}
+
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+	int stalled = inband_irq_save();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+	barrier();
+	inband_irq_enable();
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+	inband_irq_disable();
+	barrier();
+}
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+	int stalled = inband_irqs_disabled();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return native_irqs_disabled_flags(flags);
+}
+
+static inline notrace void arch_local_irq_restore(unsigned long flags)
+{
+	inband_irq_restore(arch_irqs_disabled_flags(flags));
+	barrier();
+}
+
+static inline
+void arch_save_timer_regs(struct pt_regs *dst, struct pt_regs *src)
+{
+	dst->pstate = src->pstate;
+	dst->pc = src->pc;
+}
+
+static inline bool arch_steal_pipelined_tick(struct pt_regs *regs)
+{
+	return !!(regs->pstate & IRQMASK_I_BIT);
+}
+
+static inline int arch_enable_oob_stage(void)
+{
+	return 0;
+}
+
+/*
+ * We use neither the generic entry code nor
+ * kentry_enter/exit_pipelined yet. We still build a no-op version of
+ * the latter for now, until we enventually switch to using whichever
+ * of them is available first.
+ */
+#define arch_kentry_get_irqstate(__regs)	0
+
+#define arch_kentry_set_irqstate(__regs, __irqstate)	\
+	do { (void)__irqstate; } while (0)
+
+#else  /* !CONFIG_IRQ_PIPELINE */
+
+static inline unsigned long arch_local_irq_save(void)
+{
+	return native_irq_save();
+}
+
+static inline void arch_local_irq_enable(void)
+{
+	native_irq_enable();
+}
+
+static inline void arch_local_irq_disable(void)
+{
+	native_irq_disable();
+}
+
+static inline unsigned long arch_local_save_flags(void)
+{
+	return native_save_flags();
+}
+
+static inline void arch_local_irq_restore(unsigned long flags)
+{
+	native_irq_restore(flags);
+}
+
+static inline int arch_irqs_disabled_flags(unsigned long flags)
+{
+	return native_irqs_disabled_flags(flags);
+}
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+static inline int arch_irqs_disabled(void)
+{
+	return arch_irqs_disabled_flags(arch_local_save_flags());
+}
+
+#endif /* _ASM_ARM64_IRQ_PIPELINE_H */
diff --git a/kernel/arch/arm64/include/asm/irqflags.h b/kernel/arch/arm64/include/asm/irqflags.h
index ff328e5..177c7e7 100644
--- a/kernel/arch/arm64/include/asm/irqflags.h
+++ b/kernel/arch/arm64/include/asm/irqflags.h
@@ -10,6 +10,10 @@
 #include <asm/ptrace.h>
 #include <asm/sysreg.h>
 
+#define IRQMASK_I_BIT	PSR_I_BIT
+#define IRQMASK_I_POS	7
+#define IRQMASK_i_POS	31
+
 /*
  * Aarch64 has flags for masking: Debug, Asynchronous (serror), Interrupts and
  * FIQ exceptions, in the 'daif' register. We mask and unmask them in 'dai'
@@ -26,7 +30,7 @@
 /*
  * CPU interrupt mask handling.
  */
-static inline void arch_local_irq_enable(void)
+static inline void native_irq_enable(void)
 {
 	if (system_has_prio_mask_debugging()) {
 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
@@ -35,7 +39,7 @@
 	}
 
 	asm volatile(ALTERNATIVE(
-		"msr	daifclr, #2		// arch_local_irq_enable",
+		"msr	daifclr, #2		// native_irq_enable",
 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
 		ARM64_HAS_IRQ_PRIO_MASKING)
 		:
@@ -45,7 +49,7 @@
 	pmr_sync();
 }
 
-static inline void arch_local_irq_disable(void)
+static inline void native_irq_disable(void)
 {
 	if (system_has_prio_mask_debugging()) {
 		u32 pmr = read_sysreg_s(SYS_ICC_PMR_EL1);
@@ -54,7 +58,7 @@
 	}
 
 	asm volatile(ALTERNATIVE(
-		"msr	daifset, #2		// arch_local_irq_disable",
+		"msr	daifset, #2		// native_irq_disable",
 		__msr_s(SYS_ICC_PMR_EL1, "%0"),
 		ARM64_HAS_IRQ_PRIO_MASKING)
 		:
@@ -62,10 +66,17 @@
 		: "memory");
 }
 
+static inline void native_irq_sync(void)
+{
+	native_irq_enable();
+	isb();
+	native_irq_disable();
+}
+
 /*
  * Save the current interrupt enable state.
  */
-static inline unsigned long arch_local_save_flags(void)
+static inline unsigned long native_save_flags(void)
 {
 	unsigned long flags;
 
@@ -80,7 +91,7 @@
 	return flags;
 }
 
-static inline int arch_irqs_disabled_flags(unsigned long flags)
+static inline int native_irqs_disabled_flags(unsigned long flags)
 {
 	int res;
 
@@ -95,23 +106,18 @@
 	return res;
 }
 
-static inline int arch_irqs_disabled(void)
-{
-	return arch_irqs_disabled_flags(arch_local_save_flags());
-}
-
-static inline unsigned long arch_local_irq_save(void)
+static inline unsigned long native_irq_save(void)
 {
 	unsigned long flags;
 
-	flags = arch_local_save_flags();
+	flags = native_save_flags();
 
 	/*
 	 * There are too many states with IRQs disabled, just keep the current
 	 * state if interrupts are already disabled/masked.
 	 */
-	if (!arch_irqs_disabled_flags(flags))
-		arch_local_irq_disable();
+	if (!native_irqs_disabled_flags(flags))
+		native_irq_disable();
 
 	return flags;
 }
@@ -119,7 +125,7 @@
 /*
  * restore saved IRQ state
  */
-static inline void arch_local_irq_restore(unsigned long flags)
+static inline void native_irq_restore(unsigned long flags)
 {
 	asm volatile(ALTERNATIVE(
 		"msr	daif, %0",
@@ -132,4 +138,12 @@
 	pmr_sync();
 }
 
+static inline bool native_irqs_disabled(void)
+{
+	unsigned long flags = native_save_flags();
+	return native_irqs_disabled_flags(flags);
+}
+
+#include <asm/irq_pipeline.h>
+
 #endif /* __ASM_IRQFLAGS_H */
diff --git a/kernel/arch/arm64/include/asm/mmu_context.h b/kernel/arch/arm64/include/asm/mmu_context.h
index cc58614..1b0551e 100644
--- a/kernel/arch/arm64/include/asm/mmu_context.h
+++ b/kernel/arch/arm64/include/asm/mmu_context.h
@@ -15,6 +15,7 @@
 #include <linux/sched/hotplug.h>
 #include <linux/mm_types.h>
 #include <linux/pgtable.h>
+#include <linux/irq_pipeline.h>
 
 #include <asm/cacheflush.h>
 #include <asm/cpufeature.h>
@@ -112,6 +113,9 @@
 static inline void cpu_uninstall_idmap(void)
 {
 	struct mm_struct *mm = current->active_mm;
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
 
 	cpu_set_reserved_ttbr0();
 	local_flush_tlb_all();
@@ -119,15 +123,23 @@
 
 	if (mm != &init_mm && !system_uses_ttbr0_pan())
 		cpu_switch_mm(mm->pgd, mm);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 static inline void cpu_install_idmap(void)
 {
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
+
 	cpu_set_reserved_ttbr0();
 	local_flush_tlb_all();
 	cpu_set_idmap_tcr_t0sz();
 
 	cpu_switch_mm(lm_alias(idmap_pg_dir), &init_mm);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 /*
@@ -230,7 +242,7 @@
 }
 
 static inline void
-switch_mm(struct mm_struct *prev, struct mm_struct *next,
+do_switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	  struct task_struct *tsk)
 {
 	if (prev != next)
@@ -245,8 +257,26 @@
 	update_saved_ttbr0(tsk, next);
 }
 
+static inline void
+switch_mm(struct mm_struct *prev, struct mm_struct *next,
+	  struct task_struct *tsk)
+{
+	unsigned long flags;
+
+	protect_inband_mm(flags);
+	do_switch_mm(prev, next, tsk);
+	unprotect_inband_mm(flags);
+}
+
 #define deactivate_mm(tsk,mm)	do { } while (0)
-#define activate_mm(prev,next)	switch_mm(prev, next, current)
+#define activate_mm(prev,next)	do_switch_mm(prev, next, current)
+
+static inline void
+switch_oob_mm(struct mm_struct *prev, struct mm_struct *next,
+	      struct task_struct *tsk)
+{
+	do_switch_mm(prev, next, tsk);
+}
 
 static inline const struct cpumask *
 task_cpu_possible_mask(struct task_struct *p)
diff --git a/kernel/arch/arm64/include/asm/ptrace.h b/kernel/arch/arm64/include/asm/ptrace.h
index f834744..ac73af4 100644
--- a/kernel/arch/arm64/include/asm/ptrace.h
+++ b/kernel/arch/arm64/include/asm/ptrace.h
@@ -201,7 +201,13 @@
 
 	/* Only valid for some EL1 exceptions. */
 	u64 lockdep_hardirqs;
+#ifdef CONFIG_IRQ_PIPELINE
+	u64 exit_rcu : 1,
+		oob_on_entry : 1,
+		stalled_on_entry : 1;
+#else
 	u64 exit_rcu;
+#endif
 };
 
 static inline bool in_syscall(struct pt_regs const *regs)
diff --git a/kernel/arch/arm64/include/asm/syscall.h b/kernel/arch/arm64/include/asm/syscall.h
index 03e2089..8827217 100644
--- a/kernel/arch/arm64/include/asm/syscall.h
+++ b/kernel/arch/arm64/include/asm/syscall.h
@@ -73,6 +73,11 @@
 	memcpy(args, &regs->regs[1], 5 * sizeof(args[0]));
 }
 
+static inline unsigned long syscall_get_arg0(struct pt_regs *regs)
+{
+	return regs->orig_x0;
+}
+
 static inline void syscall_set_arguments(struct task_struct *task,
 					 struct pt_regs *regs,
 					 const unsigned long *args)
diff --git a/kernel/arch/arm64/include/asm/thread_info.h b/kernel/arch/arm64/include/asm/thread_info.h
index cdcf307..1499c2c 100644
--- a/kernel/arch/arm64/include/asm/thread_info.h
+++ b/kernel/arch/arm64/include/asm/thread_info.h
@@ -14,6 +14,7 @@
 
 struct task_struct;
 
+#include <dovetail/thread_info.h>
 #include <asm/memory.h>
 #include <asm/stack_pointer.h>
 #include <asm/types.h>
@@ -25,6 +26,7 @@
  */
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
+	unsigned long		local_flags;	/* local (synchronous) flags */
 	mm_segment_t		addr_limit;	/* address limit */
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
 	u64			ttbr0;		/* saved TTBR0_EL1 */
@@ -45,6 +47,7 @@
 	void			*scs_base;
 	void			*scs_sp;
 #endif
+	struct oob_thread_state	oob_state;
 };
 
 #define thread_saved_pc(tsk)	\
@@ -59,6 +62,8 @@
 
 void arch_release_task_struct(struct task_struct *tsk);
 
+#define ti_local_flags(__ti)	((__ti)->local_flags)
+
 #endif
 
 #define TIF_SIGPENDING		0	/* signal pending */
@@ -69,11 +74,12 @@
 #define TIF_FSCHECK		5	/* Check FS is USER_DS on return */
 #define TIF_MTE_ASYNC_FAULT	6	/* MTE Asynchronous Tag Check Fault */
 #define TIF_NOTIFY_SIGNAL	7	/* signal notifications exist */
-#define TIF_SYSCALL_TRACE	8	/* syscall trace active */
+#define TIF_SYSCALL_TRACE	13	/* syscall trace active */
 #define TIF_SYSCALL_AUDIT	9	/* syscall auditing */
 #define TIF_SYSCALL_TRACEPOINT	10	/* syscall tracepoint for ftrace */
 #define TIF_SECCOMP		11	/* syscall secure computing */
 #define TIF_SYSCALL_EMU		12	/* syscall emulation active */
+#define TIF_RETUSER		8	/* INBAND_TASK_RETUSER is pending */
 #define TIF_MEMDIE		18	/* is terminating due to OOM killer */
 #define TIF_FREEZE		19
 #define TIF_RESTORE_SIGMASK	20
@@ -83,6 +89,7 @@
 #define TIF_SVE_VL_INHERIT	24	/* Inherit sve_vl_onexec across exec */
 #define TIF_SSBD		25	/* Wants SSB mitigation */
 #define TIF_TAGGED_ADDR		26	/* Allow tagged user addresses */
+#define TIF_MAYDAY		27	/* Emergency trap pending */
 
 #define _TIF_SIGPENDING		(1 << TIF_SIGPENDING)
 #define _TIF_NEED_RESCHED	(1 << TIF_NEED_RESCHED)
@@ -100,11 +107,13 @@
 #define _TIF_SVE		(1 << TIF_SVE)
 #define _TIF_MTE_ASYNC_FAULT	(1 << TIF_MTE_ASYNC_FAULT)
 #define _TIF_NOTIFY_SIGNAL	(1 << TIF_NOTIFY_SIGNAL)
+#define _TIF_MAYDAY		(1 << TIF_MAYDAY)
+#define _TIF_RETUSER		(1 << TIF_RETUSER)
 
 #define _TIF_WORK_MASK		(_TIF_NEED_RESCHED | _TIF_SIGPENDING | \
 				 _TIF_NOTIFY_RESUME | _TIF_FOREIGN_FPSTATE | \
 				 _TIF_UPROBE | _TIF_FSCHECK | _TIF_MTE_ASYNC_FAULT | \
-				 _TIF_NOTIFY_SIGNAL)
+				 _TIF_NOTIFY_SIGNAL | _TIF_RETUSER)
 
 #define _TIF_SYSCALL_WORK	(_TIF_SYSCALL_TRACE | _TIF_SYSCALL_AUDIT | \
 				 _TIF_SYSCALL_TRACEPOINT | _TIF_SECCOMP | \
@@ -126,4 +135,12 @@
 	INIT_SCS							\
 }
 
+/*
+ * Local (synchronous) thread flags.
+ */
+#define _TLF_OOB		0x0001
+#define _TLF_DOVETAIL		0x0002
+#define _TLF_OFFSTAGE		0x0004
+#define _TLF_OOBTRAP		0x0008
+
 #endif /* __ASM_THREAD_INFO_H */
diff --git a/kernel/arch/arm64/include/asm/uaccess.h b/kernel/arch/arm64/include/asm/uaccess.h
index 5d0111a..ceb30f4 100644
--- a/kernel/arch/arm64/include/asm/uaccess.h
+++ b/kernel/arch/arm64/include/asm/uaccess.h
@@ -113,7 +113,7 @@
 {
 	unsigned long flags, ttbr;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	ttbr = read_sysreg(ttbr1_el1);
 	ttbr &= ~TTBR_ASID_MASK;
 	/* reserved_pg_dir placed before swapper_pg_dir */
@@ -122,7 +122,7 @@
 	/* Set reserved ASID */
 	write_sysreg(ttbr, ttbr1_el1);
 	isb();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline void __uaccess_ttbr0_enable(void)
@@ -134,7 +134,7 @@
 	 * variable and the MSR. A context switch could trigger an ASID
 	 * roll-over and an update of 'ttbr0'.
 	 */
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	ttbr0 = READ_ONCE(current_thread_info()->ttbr0);
 
 	/* Restore active ASID */
@@ -147,7 +147,7 @@
 	/* Restore user page table */
 	write_sysreg(ttbr0, ttbr0_el1);
 	isb();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline bool uaccess_ttbr0_disable(void)
diff --git a/kernel/arch/arm64/include/asm/vdso.h b/kernel/arch/arm64/include/asm/vdso.h
index f99dcb9..c63c5ac 100644
--- a/kernel/arch/arm64/include/asm/vdso.h
+++ b/kernel/arch/arm64/include/asm/vdso.h
@@ -13,6 +13,11 @@
 #define VDSO_LBASE	0x0
 
 #define __VVAR_PAGES    2
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+#define __VPRIV_PAGES   1
+#else
+#define __VPRIV_PAGES   0
+#endif
 
 #ifndef __ASSEMBLY__
 
diff --git a/kernel/arch/arm64/include/asm/vdso/gettimeofday.h b/kernel/arch/arm64/include/asm/vdso/gettimeofday.h
index 4b4c0da..030fe8d 100644
--- a/kernel/arch/arm64/include/asm/vdso/gettimeofday.h
+++ b/kernel/arch/arm64/include/asm/vdso/gettimeofday.h
@@ -102,6 +102,71 @@
 }
 #endif
 
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+
+#include <uapi/linux/fcntl.h>
+
+extern struct vdso_priv _vdso_priv; /* vdso.lds.S */
+
+static __always_inline struct vdso_priv *__arch_get_vdso_priv(void)
+{
+	return &_vdso_priv;
+}
+
+static __always_inline int clock_open_device(const char *path, int mode)
+{
+	register int  sc  asm("w8") = __NR_openat;
+	register long ret asm("x0");
+	register long x0  asm("x0") = AT_FDCWD;
+	register long x1  asm("x1") = (long)path;
+	register long x2  asm("x2") = mode;
+
+	asm volatile(
+		"svc #0\n"
+		: "=r" (ret)
+		: "r" (sc),
+		  "r" (x0), "r" (x1), "r" (x2)
+		: "cc", "memory");
+
+	return ret;
+}
+
+static __always_inline int clock_ioctl_device(int fd, unsigned int cmd, long arg)
+{
+	register int  sc  asm("w8") = __NR_ioctl;
+	register long ret asm("x0");
+	register long x0  asm("x0") = fd;
+	register long x1  asm("x1") = cmd;
+	register long x2  asm("x2") = arg;
+
+	asm volatile(
+		"svc #0\n"
+		: "=r" (ret)
+		: "r" (sc),
+		  "r" (x0), "r" (x1), "r" (x2)
+		: "cc", "memory");
+
+	return ret;
+}
+
+static __always_inline int clock_close_device(int fd)
+{
+	register int  sc  asm("w8") = __NR_close;
+	register long ret asm("x0");
+	register long x0  asm("x0") = fd;
+
+	asm volatile(
+		"svc #0\n"
+		: "=r" (ret)
+		: "r" (sc),
+		  "r" (x0)
+		: "cc", "memory");
+
+	return ret;
+}
+
+#endif	/* CONFIG_GENERIC_CLOCKSOURCE_VDSO */
+
 #endif /* !__ASSEMBLY__ */
 
 #endif /* __ASM_VDSO_GETTIMEOFDAY_H */
diff --git a/kernel/arch/arm64/include/dovetail/irq.h b/kernel/arch/arm64/include/dovetail/irq.h
new file mode 120000
index 0000000..86483e7
--- /dev/null
+++ b/kernel/arch/arm64/include/dovetail/irq.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/include/dovetail/thread_info.h b/kernel/arch/arm64/include/dovetail/thread_info.h
new file mode 120000
index 0000000..e932ae3
--- /dev/null
+++ b/kernel/arch/arm64/include/dovetail/thread_info.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/kernel/Makefile b/kernel/arch/arm64/kernel/Makefile
index 64e84b7..6c4fd5b 100644
--- a/kernel/arch/arm64/kernel/Makefile
+++ b/kernel/arch/arm64/kernel/Makefile
@@ -48,6 +48,7 @@
 obj-$(CONFIG_ACPI_NUMA)			+= acpi_numa.o
 obj-$(CONFIG_ARM64_ACPI_PARKING_PROTOCOL)	+= acpi_parking_protocol.o
 obj-$(CONFIG_PARAVIRT)			+= paravirt.o
+obj-$(CONFIG_IRQ_PIPELINE)		+= irq_pipeline.o
 obj-$(CONFIG_RANDOMIZE_BASE)		+= kaslr.o
 obj-$(CONFIG_HIBERNATION)		+= hibernate.o hibernate-asm.o
 obj-$(CONFIG_KEXEC_CORE)		+= machine_kexec.o relocate_kernel.o	\
diff --git a/kernel/arch/arm64/kernel/asm-offsets.c b/kernel/arch/arm64/kernel/asm-offsets.c
index 93da876..26b54f4 100644
--- a/kernel/arch/arm64/kernel/asm-offsets.c
+++ b/kernel/arch/arm64/kernel/asm-offsets.c
@@ -29,6 +29,7 @@
   DEFINE(TSK_ACTIVE_MM,		offsetof(struct task_struct, active_mm));
   BLANK();
   DEFINE(TSK_TI_FLAGS,		offsetof(struct task_struct, thread_info.flags));
+  DEFINE(TSK_TI_LOCAL_FLAGS,	offsetof(struct task_struct, thread_info.local_flags));
   DEFINE(TSK_TI_PREEMPT,	offsetof(struct task_struct, thread_info.preempt_count));
   DEFINE(TSK_TI_ADDR_LIMIT,	offsetof(struct task_struct, thread_info.addr_limit));
 #ifdef CONFIG_ARM64_SW_TTBR0_PAN
diff --git a/kernel/arch/arm64/kernel/debug-monitors.c b/kernel/arch/arm64/kernel/debug-monitors.c
index d7f904c..73382fc 100644
--- a/kernel/arch/arm64/kernel/debug-monitors.c
+++ b/kernel/arch/arm64/kernel/debug-monitors.c
@@ -232,7 +232,7 @@
 		return;
 
 	if (interrupts_enabled(regs))
-		local_irq_enable();
+		local_irq_enable_full();
 
 	arm64_force_sig_fault(SIGTRAP, si_code, instruction_pointer(regs),
 			      "User debug trap");
diff --git a/kernel/arch/arm64/kernel/entry-common.c b/kernel/arch/arm64/kernel/entry-common.c
index 64cfe4a..120cff8 100644
--- a/kernel/arch/arm64/kernel/entry-common.c
+++ b/kernel/arch/arm64/kernel/entry-common.c
@@ -8,6 +8,7 @@
 #include <linux/context_tracking.h>
 #include <linux/ptrace.h>
 #include <linux/thread_info.h>
+#include <linux/irqstage.h>
 
 #include <asm/cpufeature.h>
 #include <asm/daifflags.h>
@@ -21,7 +22,7 @@
  * This is intended to match the logic in irqentry_enter(), handling the kernel
  * mode transitions only.
  */
-static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+static void noinstr __enter_from_kernel_mode(struct pt_regs *regs)
 {
 	regs->exit_rcu = false;
 
@@ -41,11 +42,50 @@
 	mte_check_tfsr_entry();
 }
 
+static void noinstr enter_from_kernel_mode(struct pt_regs *regs)
+{
+#ifdef CONFIG_IRQ_PIPELINE
+	/*
+	 * CAUTION: we may switch in-band as a result of handling a
+	 * trap, so if we are running out-of-band, we must make sure
+	 * not to perform the RCU exit since we did not enter it in
+	 * the first place.
+	 */
+	regs->oob_on_entry = running_oob();
+	if (regs->oob_on_entry) {
+		regs->exit_rcu = false;
+		return;
+	}
+
+	/*
+	 * We trapped from kernel space running in-band, we need to
+	 * record the virtual interrupt state into the current
+	 * register frame (regs->stalled_on_entry) in order to
+	 * reinstate it from exit_to_kernel_mode(). Next we stall the
+	 * in-band stage in order to mirror the current hardware state
+	 * (i.e. hardirqs are off).
+	 */
+	regs->stalled_on_entry = test_and_stall_inband_nocheck();
+#endif
+
+	__enter_from_kernel_mode(regs);
+
+#ifdef CONFIG_IRQ_PIPELINE
+	/*
+	 * Our caller is going to inherit the hardware interrupt state
+	 * from the trapped context once we have returned: if running
+	 * in-band, align the stall bit on the upcoming state.
+	 */
+	if (running_inband() && interrupts_enabled(regs))
+		unstall_inband_nocheck();
+#endif
+}
+
 /*
  * This is intended to match the logic in irqentry_exit(), handling the kernel
  * mode transitions only, and with preemption handled elsewhere.
  */
-static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+static void noinstr __exit_to_kernel_mode(struct pt_regs *regs)
 {
 	lockdep_assert_irqs_disabled();
 
@@ -67,8 +107,35 @@
 	}
 }
 
+/*
+ * This is intended to match the logic in irqentry_exit(), handling the kernel
+ * mode transitions only, and with preemption handled elsewhere.
+ */
+static void noinstr exit_to_kernel_mode(struct pt_regs *regs)
+{
+	if (running_oob())
+		return;
+
+	__exit_to_kernel_mode(regs);
+
+#ifdef CONFIG_IRQ_PIPELINE
+	/*
+	 * Reinstate the virtual interrupt state which was in effect
+	 * on entry to the trap.
+	 */
+	if (!regs->oob_on_entry) {
+		if (regs->stalled_on_entry)
+			stall_inband_nocheck();
+		else
+			unstall_inband_nocheck();
+	}
+#endif
+	return;
+}
+
 void noinstr arm64_enter_nmi(struct pt_regs *regs)
 {
+	/* irq_pipeline: running this code oob is ok. */
 	regs->lockdep_hardirqs = lockdep_hardirqs_enabled();
 
 	__nmi_enter();
@@ -99,18 +166,57 @@
 
 asmlinkage void noinstr enter_el1_irq_or_nmi(struct pt_regs *regs)
 {
-	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+	/*
+	 * IRQ pipeline: the interrupt entry is special in that we may
+	 * run the lockdep and RCU prologue/epilogue only if the IRQ
+	 * is going to be dispatched to its handler on behalf of the
+	 * current context, i.e. only if running in-band and
+	 * unstalled. If so, we also have to reconcile the hardware
+	 * and virtual interrupt states temporarily in order to run
+	 * such prologue.
+	 */
+	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) {
 		arm64_enter_nmi(regs);
-	else
-		enter_from_kernel_mode(regs);
+	} else {
+#ifdef CONFIG_IRQ_PIPELINE
+		if (running_inband()) {
+			regs->stalled_on_entry = test_inband_stall();
+			if (!regs->stalled_on_entry) {
+				stall_inband_nocheck();
+				__enter_from_kernel_mode(regs);
+				unstall_inband_nocheck();
+			}
+		}
+#else
+		__enter_from_kernel_mode(regs);
+#endif
+	}
 }
 
 asmlinkage void noinstr exit_el1_irq_or_nmi(struct pt_regs *regs)
 {
-	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs))
+	if (IS_ENABLED(CONFIG_ARM64_PSEUDO_NMI) && !interrupts_enabled(regs)) {
 		arm64_exit_nmi(regs);
-	else
-		exit_to_kernel_mode(regs);
+	} else {
+#ifdef CONFIG_IRQ_PIPELINE
+		/*
+		 * See enter_el1_irq_or_nmi() for details. UGLY: we
+		 * also have to tell the tracer that irqs are off,
+		 * since sync_current_irq_stage() did the opposite on
+		 * exit. Hopefully, at some point arm64 will convert
+		 * to the generic entry code which exhibits a less
+		 * convoluted logic.
+		 */
+		if (running_inband() && !regs->stalled_on_entry) {
+			stall_inband_nocheck();
+			trace_hardirqs_off();
+			__exit_to_kernel_mode(regs);
+			unstall_inband_nocheck();
+		}
+#else
+		__exit_to_kernel_mode(regs);
+#endif
+	}
 }
 
 static void noinstr el1_abort(struct pt_regs *regs, unsigned long esr)
@@ -231,20 +337,32 @@
 
 asmlinkage void noinstr enter_from_user_mode(void)
 {
-	lockdep_hardirqs_off(CALLER_ADDR0);
-	CT_WARN_ON(ct_state() != CONTEXT_USER);
-	user_exit_irqoff();
-	trace_hardirqs_off_finish();
+	if (running_inband()) {
+		lockdep_hardirqs_off(CALLER_ADDR0);
+		WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall());
+		CT_WARN_ON(ct_state() != CONTEXT_USER);
+		stall_inband_nocheck();
+		user_exit_irqoff();
+		unstall_inband_nocheck();
+		trace_hardirqs_off_finish();
+	}
 }
 
 asmlinkage void noinstr exit_to_user_mode(void)
 {
-	mte_check_tfsr_exit();
+	if (running_inband()) {
+		trace_hardirqs_on_prepare();
+		lockdep_hardirqs_on_prepare(CALLER_ADDR0);
+		user_enter_irqoff();
+		lockdep_hardirqs_on(CALLER_ADDR0);
+		unstall_inband_nocheck();
+	}
+}
 
-	trace_hardirqs_on_prepare();
-	lockdep_hardirqs_on_prepare(CALLER_ADDR0);
-	user_enter_irqoff();
-	lockdep_hardirqs_on(CALLER_ADDR0);
+asmlinkage void noinstr enter_el0_irq(void)
+{
+	if (running_inband() && !test_inband_stall())
+		enter_from_user_mode();
 }
 
 static void noinstr el0_da(struct pt_regs *regs, unsigned long esr)
diff --git a/kernel/arch/arm64/kernel/entry.S b/kernel/arch/arm64/kernel/entry.S
index 9f19e6b..49a7349 100644
--- a/kernel/arch/arm64/kernel/entry.S
+++ b/kernel/arch/arm64/kernel/entry.S
@@ -39,6 +39,12 @@
 #endif
 	.endm
 
+	.macro user_exit_el0_irq
+#if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
+	bl	enter_el0_irq
+#endif
+	.endm
+
 	.macro user_enter_irqoff
 #if defined(CONFIG_CONTEXT_TRACKING) || defined(CONFIG_TRACE_IRQFLAGS)
 	bl	exit_to_user_mode
@@ -534,6 +540,21 @@
 	mov	x24, scs_sp		// preserve the original shadow stack
 #endif
 
+#ifdef CONFIG_DOVETAIL
+	/*
+	 * When the pipeline is enabled, context switches over the irq
+	 * stack are allowed (for the co-kernel), and more interrupts
+	 * can be taken over sibling stack contexts. So we need a not so
+	 * subtle way of figuring out whether the irq stack was actually
+	 * exited, which cannot depend on the current task pointer.
+	 */
+	adr_this_cpu x25, irq_nesting, x26
+	ldr	w26, [x25]
+	cmp	w26, #0
+	add	w26, w26, #1
+	str	w26, [x25]
+	b.ne	9998f
+#else
 	/*
 	 * Compare sp with the base of the task stack.
 	 * If the top ~(THREAD_SIZE - 1) bits match, we are on a task stack,
@@ -543,6 +564,7 @@
 	eor	x25, x25, x19
 	and	x25, x25, #~(THREAD_SIZE - 1)
 	cbnz	x25, 9998f
+#endif
 
 	ldr_this_cpu x25, irq_stack_ptr, x26
 	mov	x26, #IRQ_STACK_SIZE
@@ -563,11 +585,18 @@
 	 * The callee-saved regs (x19-x29) should be preserved between
 	 * irq_stack_entry and irq_stack_exit, but note that kernel_entry
 	 * uses x20-x23 to store data for later use.
+	 * IRQ_PIPELINE: caution, we have to preserve w0.
 	 */
 	.macro	irq_stack_exit
 	mov	sp, x19
 #ifdef CONFIG_SHADOW_CALL_STACK
 	mov	scs_sp, x24
+#endif
+#ifdef CONFIG_DOVETAIL
+	adr_this_cpu x1, irq_nesting, x2
+	ldr	w2, [x1]
+	add	w2, w2, #-1
+	str	w2, [x1]
 #endif
 	.endm
 
@@ -578,7 +607,15 @@
  * Interrupt handling.
  */
 	.macro	irq_handler, handler:req
+#ifdef CONFIG_IRQ_PIPELINE
+#	.if	\handler == handle_arch_irq
+	ldr	x1, =handle_arch_irq_pipelined
+#	.else
+#	.error	"irq_pipeline: cannot handle interrupt"
+#	.endif
+#else
 	ldr_l	x1, \handler
+#endif
 	mov	x0, sp
 	irq_stack_entry
 	blr	x1
@@ -616,6 +653,9 @@
 
 	irq_handler	\handler
 
+#ifdef CONFIG_IRQ_PIPELINE
+	cbz     w0, 66f				// skip epilogue if oob or in-band stalled
+#endif
 #ifdef CONFIG_PREEMPTION
 	ldr	x24, [tsk, #TSK_TI_PREEMPT]	// get preempt count
 alternative_if ARM64_HAS_IRQ_PRIO_MASKING
@@ -630,13 +670,13 @@
 	bl	arm64_preempt_schedule_irq	// irq en/disable is done inside
 1:
 #endif
-
+66:
 	mov	x0, sp
 	bl	exit_el1_irq_or_nmi
 	.endm
 
 	.macro el0_interrupt_handler, handler:req
-	user_exit_irqoff
+	user_exit_el0_irq
 	enable_da_f
 
 	tbz	x22, #55, 1f
@@ -815,6 +855,9 @@
 	kernel_entry 0
 el0_irq_naked:
 	el0_interrupt_handler handle_arch_irq
+#ifdef CONFIG_IRQ_PIPELINE
+	cbz	w0, fast_ret_from_el0_irq	// skip epilogue if oob
+#endif
 	b	ret_to_user
 SYM_CODE_END(el0_irq)
 
@@ -846,6 +889,11 @@
 SYM_CODE_START_LOCAL(ret_to_user)
 	disable_daif
 	gic_prio_kentry_setup tmp=x3
+#ifdef CONFIG_IRQ_PIPELINE
+	ldr	x0, [tsk, #TSK_TI_LOCAL_FLAGS]
+	tst	x0, #_TLF_OOB
+	b.ne	fast_ret_to_user
+#endif
 #ifdef CONFIG_TRACE_IRQFLAGS
 	bl	trace_hardirqs_off
 #endif
@@ -854,12 +902,22 @@
 	cbnz	x2, work_pending
 finish_ret_to_user:
 	user_enter_irqoff
+ret_to_user_naked:
 	enable_step_tsk x19, x2
 #ifdef CONFIG_GCC_PLUGIN_STACKLEAK
 	bl	stackleak_erase
 #endif
 	kernel_exit 0
 
+#ifdef CONFIG_IRQ_PIPELINE
+fast_ret_from_el0_irq:	
+ 	disable_daif
+ 	gic_prio_kentry_setup tmp=x3
+fast_ret_to_user:
+	ldr	x19, [tsk, #TSK_TI_FLAGS]
+	b	ret_to_user_naked
+#endif
+
 /*
  * Ok, we need to do extra processing, enter the slow path.
  */
diff --git a/kernel/arch/arm64/kernel/fpsimd.c b/kernel/arch/arm64/kernel/fpsimd.c
index 5335a6b..175353e 100644
--- a/kernel/arch/arm64/kernel/fpsimd.c
+++ b/kernel/arch/arm64/kernel/fpsimd.c
@@ -169,6 +169,42 @@
 	WARN_ON(busy);
 }
 
+static void __put_cpu_fpsimd_context(void)
+{
+	bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
+
+	WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
+}
+
+#ifdef CONFIG_DOVETAIL
+
+#define get_cpu_fpsimd_context(__flags)			\
+	do {						\
+		(__flags) = hard_preempt_disable();	\
+		__get_cpu_fpsimd_context();		\
+	} while (0)
+
+#define put_cpu_fpsimd_context(__flags)		\
+	do {					\
+		__put_cpu_fpsimd_context();	\
+		hard_preempt_enable(__flags);	\
+	} while (0)
+
+void fpsimd_restore_current_oob(void)
+{
+	/*
+	 * Restore the fpsimd context for the current task as it
+	 * resumes from dovetail_context_switch(), which always happen
+	 * on the out-of-band stage. Skip this for kernel threads
+	 * which have no such context but always bear
+	 * TIF_FOREIGN_FPSTATE.
+	 */
+	if (current->mm)
+		fpsimd_restore_current_state();
+}
+
+#else
+
 /*
  * Claim ownership of the CPU FPSIMD context for use by the calling context.
  *
@@ -178,19 +214,12 @@
  * The double-underscore version must only be called if you know the task
  * can't be preempted.
  */
-static void get_cpu_fpsimd_context(void)
-{
-	local_bh_disable();
-	__get_cpu_fpsimd_context();
-}
-
-static void __put_cpu_fpsimd_context(void)
-{
-	bool busy = __this_cpu_xchg(fpsimd_context_busy, false);
-
-	WARN_ON(!busy); /* No matching get_cpu_fpsimd_context()? */
-}
-
+#define get_cpu_fpsimd_context(__flags)			\
+	do {						\
+		preempt_disable();			\
+		__get_cpu_fpsimd_context();		\
+		(void)(__flags);			\
+	} while (0)
 /*
  * Release the CPU FPSIMD context.
  *
@@ -198,12 +227,14 @@
  * previously called, with no call to put_cpu_fpsimd_context() in the
  * meantime.
  */
-static void put_cpu_fpsimd_context(void)
-{
-	__put_cpu_fpsimd_context();
-	local_bh_enable();
-}
+#define put_cpu_fpsimd_context(__flags)			\
+	do {						\
+		__put_cpu_fpsimd_context();		\
+		preempt_enable();			\
+		(void)(__flags);			\
+	} while (0)
 
+#endif	/* !CONFIG_DOVETAIL */
 static bool have_cpu_fpsimd_context(void)
 {
 	return !preemptible() && __this_cpu_read(fpsimd_context_busy);
@@ -283,7 +314,7 @@
 static void task_fpsimd_load(void)
 {
 	WARN_ON(!system_supports_fpsimd());
-	WARN_ON(!have_cpu_fpsimd_context());
+	WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context());
 
 	if (system_supports_sve() && test_thread_flag(TIF_SVE))
 		sve_load_state(sve_pffr(&current->thread),
@@ -297,14 +328,14 @@
  * Ensure FPSIMD/SVE storage in memory for the loaded context is up to
  * date with respect to the CPU registers.
  */
-static void fpsimd_save(void)
+static void __fpsimd_save(void)
 {
 	struct fpsimd_last_state_struct const *last =
 		this_cpu_ptr(&fpsimd_last_state);
 	/* set by fpsimd_bind_task_to_cpu() or fpsimd_bind_state_to_cpu() */
 
 	WARN_ON(!system_supports_fpsimd());
-	WARN_ON(!have_cpu_fpsimd_context());
+	WARN_ON(!hard_irqs_disabled() && !have_cpu_fpsimd_context());
 
 	if (!test_thread_flag(TIF_FOREIGN_FPSTATE)) {
 		if (system_supports_sve() && test_thread_flag(TIF_SVE)) {
@@ -324,6 +355,15 @@
 		} else
 			fpsimd_save_state(last->st);
 	}
+}
+
+void fpsimd_save(void)
+{
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
+	__fpsimd_save();
+	hard_cond_local_irq_restore(flags);
 }
 
 /*
@@ -444,7 +484,7 @@
  * task->thread.uw.fpsimd_state must be up to date before calling this
  * function.
  */
-static void fpsimd_to_sve(struct task_struct *task)
+static void _fpsimd_to_sve(struct task_struct *task)
 {
 	unsigned int vq;
 	void *sst = task->thread.sve_state;
@@ -455,6 +495,15 @@
 
 	vq = sve_vq_from_vl(task->thread.sve_vl);
 	__fpsimd_to_sve(sst, fst, vq);
+}
+
+static void fpsimd_to_sve(struct task_struct *task)
+{
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
+	_fpsimd_to_sve(task);
+	hard_cond_local_irq_restore(flags);
 }
 
 /*
@@ -475,15 +524,20 @@
 	struct user_fpsimd_state *fst = &task->thread.uw.fpsimd_state;
 	unsigned int i;
 	__uint128_t const *p;
+	unsigned long flags;
 
 	if (!system_supports_sve())
 		return;
+
+	flags = hard_cond_local_irq_save();
 
 	vq = sve_vq_from_vl(task->thread.sve_vl);
 	for (i = 0; i < SVE_NUM_ZREGS; ++i) {
 		p = (__uint128_t const *)ZREG(sst, vq, i);
 		fst->vregs[i] = arm64_le128_to_cpu(*p);
 	}
+
+	hard_cond_local_irq_restore(flags);
 }
 
 #ifdef CONFIG_ARM64_SVE
@@ -584,6 +638,8 @@
 int sve_set_vector_length(struct task_struct *task,
 			  unsigned long vl, unsigned long flags)
 {
+	unsigned long irqflags = 0;
+
 	if (flags & ~(unsigned long)(PR_SVE_VL_INHERIT |
 				     PR_SVE_SET_VL_ONEXEC))
 		return -EINVAL;
@@ -621,9 +677,9 @@
 	 * non-SVE thread.
 	 */
 	if (task == current) {
-		get_cpu_fpsimd_context();
+		get_cpu_fpsimd_context(irqflags);
 
-		fpsimd_save();
+		__fpsimd_save();
 	}
 
 	fpsimd_flush_task_state(task);
@@ -631,7 +687,7 @@
 		sve_to_fpsimd(task);
 
 	if (task == current)
-		put_cpu_fpsimd_context();
+		put_cpu_fpsimd_context(irqflags);
 
 	/*
 	 * Force reallocation of task SVE state to the correct size
@@ -936,17 +992,21 @@
  */
 void do_sve_acc(unsigned int esr, struct pt_regs *regs)
 {
+	unsigned long flags;
+
+	mark_trap_entry(ARM64_TRAP_SVE, regs);
+
 	/* Even if we chose not to use SVE, the hardware could still trap: */
 	if (unlikely(!system_supports_sve()) || WARN_ON(is_compat_task())) {
 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
-		return;
+		goto out;
 	}
 
 	sve_alloc(current);
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
-	fpsimd_save();
+	__fpsimd_save();
 
 	/* Force ret_to_user to reload the registers: */
 	fpsimd_flush_task_state(current);
@@ -955,7 +1015,9 @@
 	if (test_and_set_thread_flag(TIF_SVE))
 		WARN_ON(1); /* SVE access shouldn't have trapped */
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
+out:
+	mark_trap_exit(ARM64_TRAP_SVE, regs);
 }
 
 /*
@@ -974,6 +1036,9 @@
 {
 	unsigned int si_code = FPE_FLTUNK;
 
+	if (!mark_cond_trap_entry(ARM64_TRAP_FPE, regs))
+		return;
+
 	if (esr & ESR_ELx_FP_EXC_TFV) {
 		if (esr & FPEXC_IOF)
 			si_code = FPE_FLTINV;
@@ -990,19 +1055,24 @@
 	send_sig_fault(SIGFPE, si_code,
 		       (void __user *)instruction_pointer(regs),
 		       current);
+
+	mark_trap_exit(ARM64_TRAP_FPE, regs);
 }
 
 void fpsimd_thread_switch(struct task_struct *next)
 {
 	bool wrong_task, wrong_cpu;
+	unsigned long flags;
 
 	if (!system_supports_fpsimd())
 		return;
 
+	flags = hard_cond_local_irq_save();
+
 	__get_cpu_fpsimd_context();
 
 	/* Save unsaved fpsimd state, if any: */
-	fpsimd_save();
+	__fpsimd_save();
 
 	/*
 	 * Fix up TIF_FOREIGN_FPSTATE to correctly describe next's
@@ -1017,16 +1087,19 @@
 			       wrong_task || wrong_cpu);
 
 	__put_cpu_fpsimd_context();
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void fpsimd_flush_thread(void)
 {
 	int vl, supported_vl;
+	unsigned long flags;
 
 	if (!system_supports_fpsimd())
 		return;
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
 	fpsimd_flush_task_state(current);
 	memset(&current->thread.uw.fpsimd_state, 0,
@@ -1067,7 +1140,7 @@
 			current->thread.sve_vl_onexec = 0;
 	}
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
 }
 
 /*
@@ -1076,12 +1149,14 @@
  */
 void fpsimd_preserve_current_state(void)
 {
+	unsigned long flags;
+
 	if (!system_supports_fpsimd())
 		return;
 
-	get_cpu_fpsimd_context();
-	fpsimd_save();
-	put_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
+	__fpsimd_save();
+	put_cpu_fpsimd_context(flags);
 }
 
 /*
@@ -1123,7 +1198,7 @@
 	}
 }
 
-void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+static void __fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
 			      unsigned int sve_vl)
 {
 	struct fpsimd_last_state_struct *last =
@@ -1137,6 +1212,18 @@
 	last->sve_vl = sve_vl;
 }
 
+void fpsimd_bind_state_to_cpu(struct user_fpsimd_state *st, void *sve_state,
+			      unsigned int sve_vl)
+{
+	unsigned long flags;
+
+	WARN_ON(!in_softirq() && !irqs_disabled());
+
+	flags = hard_cond_local_irq_save();
+	__fpsimd_bind_state_to_cpu(st, sve_state, sve_vl);
+	hard_cond_local_irq_restore(flags);
+}
+
 /*
  * Load the userland FPSIMD state of 'current' from memory, but only if the
  * FPSIMD state already held in the registers is /not/ the most recent FPSIMD
@@ -1144,6 +1231,8 @@
  */
 void fpsimd_restore_current_state(void)
 {
+	unsigned long flags;
+
 	/*
 	 * For the tasks that were created before we detected the absence of
 	 * FP/SIMD, the TIF_FOREIGN_FPSTATE could be set via fpsimd_thread_switch(),
@@ -1158,14 +1247,14 @@
 		return;
 	}
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
 	if (test_and_clear_thread_flag(TIF_FOREIGN_FPSTATE)) {
 		task_fpsimd_load();
 		fpsimd_bind_task_to_cpu();
 	}
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
 }
 
 /*
@@ -1175,21 +1264,23 @@
  */
 void fpsimd_update_current_state(struct user_fpsimd_state const *state)
 {
+	unsigned long flags;
+
 	if (WARN_ON(!system_supports_fpsimd()))
 		return;
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
 	current->thread.uw.fpsimd_state = *state;
 	if (system_supports_sve() && test_thread_flag(TIF_SVE))
-		fpsimd_to_sve(current);
+		_fpsimd_to_sve(current);
 
 	task_fpsimd_load();
 	fpsimd_bind_task_to_cpu();
 
 	clear_thread_flag(TIF_FOREIGN_FPSTATE);
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
 }
 
 /*
@@ -1239,9 +1330,9 @@
 {
 	if (!system_supports_fpsimd())
 		return;
-	WARN_ON(preemptible());
+	WARN_ON(!hard_irqs_disabled() && preemptible());
 	__get_cpu_fpsimd_context();
-	fpsimd_save();
+	__fpsimd_save();
 	fpsimd_flush_cpu_state();
 	__put_cpu_fpsimd_context();
 }
@@ -1267,18 +1358,23 @@
  */
 void kernel_neon_begin(void)
 {
+	unsigned long flags;
+
 	if (WARN_ON(!system_supports_fpsimd()))
 		return;
 
 	BUG_ON(!may_use_simd());
 
-	get_cpu_fpsimd_context();
+	get_cpu_fpsimd_context(flags);
 
 	/* Save unsaved fpsimd state, if any: */
-	fpsimd_save();
+	__fpsimd_save();
 
 	/* Invalidate any task state remaining in the fpsimd regs: */
 	fpsimd_flush_cpu_state();
+
+	if (dovetailing())
+		hard_cond_local_irq_restore(flags);
 }
 EXPORT_SYMBOL(kernel_neon_begin);
 
@@ -1293,10 +1389,12 @@
  */
 void kernel_neon_end(void)
 {
+	unsigned long flags = hard_local_save_flags();
+
 	if (!system_supports_fpsimd())
 		return;
 
-	put_cpu_fpsimd_context();
+	put_cpu_fpsimd_context(flags);
 }
 EXPORT_SYMBOL(kernel_neon_end);
 
@@ -1386,9 +1484,13 @@
 static int fpsimd_cpu_pm_notifier(struct notifier_block *self,
 				  unsigned long cmd, void *v)
 {
+	unsigned long flags;
+
 	switch (cmd) {
 	case CPU_PM_ENTER:
+		flags = hard_cond_local_irq_save();
 		fpsimd_save_and_flush_cpu_state();
+		hard_cond_local_irq_restore(flags);
 		break;
 	case CPU_PM_EXIT:
 		break;
diff --git a/kernel/arch/arm64/kernel/irq.c b/kernel/arch/arm64/kernel/irq.c
index dfb1fea..e625e14 100644
--- a/kernel/arch/arm64/kernel/irq.c
+++ b/kernel/arch/arm64/kernel/irq.c
@@ -14,6 +14,7 @@
 #include <linux/memory.h>
 #include <linux/smp.h>
 #include <linux/hardirq.h>
+#include <linux/irq_pipeline.h>
 #include <linux/init.h>
 #include <linux/irqchip.h>
 #include <linux/kprobes.h>
@@ -28,6 +29,15 @@
 
 DEFINE_PER_CPU(unsigned long *, irq_stack_ptr);
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+asmlinkage int notrace
+handle_arch_irq_pipelined(struct pt_regs *regs)
+{
+	return handle_irq_pipelined(regs);
+}
+
+#endif
 
 DECLARE_PER_CPU(unsigned long *, irq_shadow_call_stack_ptr);
 
diff --git a/kernel/arch/arm64/kernel/irq_pipeline.c b/kernel/arch/arm64/kernel/irq_pipeline.c
new file mode 100644
index 0000000..cc1b354
--- /dev/null
+++ b/kernel/arch/arm64/kernel/irq_pipeline.c
@@ -0,0 +1,23 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2018 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/irq.h>
+#include <linux/irq_pipeline.h>
+
+/* irq_nesting tracks the interrupt nesting level for a CPU. */
+DEFINE_PER_CPU(int, irq_nesting);
+
+void arch_do_IRQ_pipelined(struct irq_desc *desc)
+{
+	struct pt_regs *regs = raw_cpu_ptr(&irq_pipeline.tick_regs);
+	unsigned int irq = irq_desc_get_irq(desc);
+
+	__handle_domain_irq(NULL, irq, false, regs);
+}
+
+void __init arch_irq_pipeline_init(void)
+{
+	/* no per-arch init. */
+}
diff --git a/kernel/arch/arm64/kernel/process.c b/kernel/arch/arm64/kernel/process.c
index c38a5ab..c2b328f 100644
--- a/kernel/arch/arm64/kernel/process.c
+++ b/kernel/arch/arm64/kernel/process.c
@@ -125,6 +125,7 @@
 	 * tricks
 	 */
 	cpu_do_idle();
+	hard_cond_local_irq_enable();
 	raw_local_irq_enable();
 }
 
@@ -824,8 +825,41 @@
 core_initcall(tagged_addr_init);
 #endif	/* CONFIG_ARM64_TAGGED_ADDR_ABI */
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+/*
+ * When pipelining interrupts, we have to reconcile the hardware and
+ * the virtual states. Hard irqs are off on entry while the current
+ * stage has to be unstalled: fix this up by stalling the in-band
+ * stage on entry, unstalling on exit.
+ */
+static inline void arm64_preempt_irq_enter(void)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && test_inband_stall());
+	stall_inband();
+	trace_hardirqs_off();
+}
+
+static inline void arm64_preempt_irq_exit(void)
+{
+	trace_hardirqs_on();
+	unstall_inband();
+}
+
+#else
+
+static inline void arm64_preempt_irq_enter(void)
+{ }
+
+static inline void arm64_preempt_irq_exit(void)
+{ }
+
+#endif
+
 asmlinkage void __sched arm64_preempt_schedule_irq(void)
 {
+	arm64_preempt_irq_enter();
+
 	lockdep_assert_irqs_disabled();
 
 	/*
@@ -838,6 +872,8 @@
 	 */
 	if (system_capabilities_finalized())
 		preempt_schedule_irq();
+
+	arm64_preempt_irq_exit();
 }
 
 #ifdef CONFIG_BINFMT_ELF
diff --git a/kernel/arch/arm64/kernel/signal.c b/kernel/arch/arm64/kernel/signal.c
index b6fbbd5..3bf8eae 100644
--- a/kernel/arch/arm64/kernel/signal.c
+++ b/kernel/arch/arm64/kernel/signal.c
@@ -11,6 +11,7 @@
 #include <linux/errno.h>
 #include <linux/kernel.h>
 #include <linux/signal.h>
+#include <linux/irq_pipeline.h>
 #include <linux/personality.h>
 #include <linux/freezer.h>
 #include <linux/stddef.h>
@@ -914,19 +915,36 @@
 	restore_saved_sigmask();
 }
 
+static inline void do_retuser(void)
+{
+	unsigned long thread_flags;
+
+	if (dovetailing()) {
+		thread_flags = current_thread_info()->flags;
+		if (thread_flags & _TIF_RETUSER)
+			inband_retuser_notify();
+	}
+}
+
 asmlinkage void do_notify_resume(struct pt_regs *regs,
 				 unsigned long thread_flags)
 {
+	WARN_ON_ONCE(irq_pipeline_debug() && running_oob());
+
+	stall_inband();
+
 	do {
 		/* Check valid user FS if needed */
 		addr_limit_user_check();
 
 		if (thread_flags & _TIF_NEED_RESCHED) {
 			/* Unmask Debug and SError for the next task */
-			local_daif_restore(DAIF_PROCCTX_NOIRQ);
+			local_daif_restore(irqs_pipelined() ? DAIF_PROCCTX :
+					DAIF_PROCCTX_NOIRQ);
 
 			schedule();
 		} else {
+			unstall_inband();
 			local_daif_restore(DAIF_PROCCTX);
 
 			if (thread_flags & _TIF_UPROBE)
@@ -948,11 +966,29 @@
 
 			if (thread_flags & _TIF_FOREIGN_FPSTATE)
 				fpsimd_restore_current_state();
+
+			do_retuser();
+			/* RETUSER might have switched oob */
+			if (running_oob()) {
+				local_daif_mask();
+				return;
+			}
 		}
 
+		/*
+		 * Dovetail: we may have restored the fpsimd state for
+		 * current with no other opportunity to check for
+		 * _TIF_FOREIGN_FPSTATE until we are back running on
+		 * el0, so we must not take any interrupt until then,
+		 * otherwise we may end up resuming with some OOB
+		 * thread's fpsimd state.
+		 */
 		local_daif_mask();
+		stall_inband();
 		thread_flags = READ_ONCE(current_thread_info()->flags);
 	} while (thread_flags & _TIF_WORK_MASK);
+
+	unstall_inband();
 }
 
 unsigned long __ro_after_init signal_minsigstksz;
diff --git a/kernel/arch/arm64/kernel/smp.c b/kernel/arch/arm64/kernel/smp.c
index 581defe..ba10c60 100644
--- a/kernel/arch/arm64/kernel/smp.c
+++ b/kernel/arch/arm64/kernel/smp.c
@@ -86,7 +86,7 @@
 	NR_IPI
 };
 
-static int ipi_irq_base __read_mostly;
+int ipi_irq_base __read_mostly;
 static int nr_ipi __read_mostly = NR_IPI;
 static struct irq_desc *ipi_desc[NR_IPI] __read_mostly;
 
@@ -273,6 +273,7 @@
 	complete(&cpu_running);
 
 	local_daif_restore(DAIF_PROCCTX);
+	local_irq_enable_full();
 
 	/*
 	 * OK, it's off to the idle thread for us
@@ -811,6 +812,8 @@
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr);
 
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu);
+
 unsigned long irq_err_count;
 
 int arch_show_interrupts(struct seq_file *p, int prec)
@@ -822,7 +825,7 @@
 		seq_printf(p, "%*s%u:%s", prec - 1, "IPI", i,
 			   prec >= 4 ? " " : "");
 		for_each_online_cpu(cpu)
-			seq_printf(p, "%10u ", kstat_irqs_cpu(irq, cpu));
+			seq_printf(p, "%10u ", get_ipi_count(irq, cpu));
 		seq_printf(p, "      %s\n", ipi_types[i]);
 	}
 
@@ -888,7 +891,7 @@
 
 	atomic_dec(&waiting_for_crash_ipi);
 
-	local_irq_disable();
+	local_irq_disable_full();
 	sdei_mask_local_cpu();
 
 	if (IS_ENABLED(CONFIG_HOTPLUG_CPU))
@@ -900,7 +903,7 @@
 }
 
 /*
- * Main handler for inter-processor interrupts
+ * Main handler for inter-processor interrupts on the in-band stage.
  */
 static void do_handle_IPI(int ipinr)
 {
@@ -963,6 +966,73 @@
 		trace_ipi_exit_rcuidle(ipi_types[ipinr]);
 }
 
+static void __smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	trace_ipi_raise(target, ipi_types[ipinr]);
+	__ipi_send_mask(ipi_desc[ipinr], target);
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+static DEFINE_PER_CPU(unsigned long, ipi_messages);
+
+static DEFINE_PER_CPU(unsigned int [NR_IPI], ipi_counts);
+
+static irqreturn_t ipi_handler(int irq, void *data)
+{
+	unsigned long *pmsg;
+	unsigned int ipinr;
+
+	/*
+	 * Decode in-band IPIs (0..NR_IPI - 1) multiplexed over
+	 * SGI0. Out-of-band IPIs (SGI1, SGI2) have their own
+	 * individual handler.
+	 */
+	pmsg = raw_cpu_ptr(&ipi_messages);
+	while (*pmsg) {
+		ipinr = ffs(*pmsg) - 1;
+		clear_bit(ipinr, pmsg);
+		__this_cpu_inc(ipi_counts[ipinr]);
+		do_handle_IPI(ipinr);
+	}
+
+	return IRQ_HANDLED;
+}
+
+static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
+{
+	unsigned int cpu;
+
+	/* regular in-band IPI (multiplexed over SGI0). */
+	for_each_cpu(cpu, target)
+		set_bit(ipinr, &per_cpu(ipi_messages, cpu));
+
+	wmb();
+	__smp_cross_call(target, 0);
+}
+
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu)
+{
+	return per_cpu(ipi_counts[irq - ipi_irq_base], cpu);
+}
+
+void irq_send_oob_ipi(unsigned int irq,
+		const struct cpumask *cpumask)
+{
+	unsigned int sgi = irq - ipi_irq_base;
+
+	if (WARN_ON(irq_pipeline_debug() &&
+		    (sgi < OOB_IPI_OFFSET ||
+		     sgi >= OOB_IPI_OFFSET + OOB_NR_IPI)))
+		return;
+
+	/* Out-of-band IPI (SGI1-2). */
+	__smp_cross_call(cpumask, sgi);
+}
+EXPORT_SYMBOL_GPL(irq_send_oob_ipi);
+
+#else
+
 static irqreturn_t ipi_handler(int irq, void *data)
 {
 	do_handle_IPI(irq - ipi_irq_base);
@@ -971,9 +1041,15 @@
 
 static void smp_cross_call(const struct cpumask *target, unsigned int ipinr)
 {
-	trace_ipi_raise(target, ipi_types[ipinr]);
-	__ipi_send_mask(ipi_desc[ipinr], target);
+	__smp_cross_call(target, ipinr);
 }
+
+static unsigned int get_ipi_count(unsigned int irq, unsigned int cpu)
+{
+	return kstat_irqs_cpu(irq, cpu);
+}
+
+#endif /* CONFIG_IRQ_PIPELINE */
 
 static void ipi_setup(int cpu)
 {
@@ -1001,18 +1077,25 @@
 
 void __init set_smp_ipi_range(int ipi_base, int n)
 {
-	int i;
+	int i, inband_nr_ipi;
 
 	WARN_ON(n < NR_IPI);
 	nr_ipi = min(n, NR_IPI);
+	/*
+	 * irq_pipeline: the in-band stage traps SGI0 only,
+	 * over which IPI messages are mutiplexed. Other SGIs
+	 * are available for exchanging out-of-band IPIs.
+	 */
+	inband_nr_ipi = irqs_pipelined() ? 1 : nr_ipi;
 
 	for (i = 0; i < nr_ipi; i++) {
-		int err;
+		if (i < inband_nr_ipi) {
+			int err;
 
-		err = request_percpu_irq(ipi_base + i, ipi_handler,
-					 "IPI", &cpu_number);
-		WARN_ON(err);
-
+			err = request_percpu_irq(ipi_base + i, ipi_handler,
+						"IPI", &cpu_number);
+			WARN_ON(err);
+		}
 		ipi_desc[i] = irq_to_desc(ipi_base + i);
 		irq_set_status_flags(ipi_base + i, IRQ_HIDDEN);
 
diff --git a/kernel/arch/arm64/kernel/syscall.c b/kernel/arch/arm64/kernel/syscall.c
index 6f9839d..24ab737 100644
--- a/kernel/arch/arm64/kernel/syscall.c
+++ b/kernel/arch/arm64/kernel/syscall.c
@@ -2,6 +2,7 @@
 
 #include <linux/compiler.h>
 #include <linux/context_tracking.h>
+#include <linux/irqstage.h>
 #include <linux/errno.h>
 #include <linux/nospec.h>
 #include <linux/ptrace.h>
@@ -94,6 +95,7 @@
 			   const syscall_fn_t syscall_table[])
 {
 	unsigned long flags = current_thread_info()->flags;
+	int ret;
 
 	regs->orig_x0 = regs->regs[0];
 	regs->syscallno = scno;
@@ -117,9 +119,18 @@
 	 */
 
 	cortex_a76_erratum_1463225_svc_handler();
+	WARN_ON_ONCE(dovetail_debug() &&
+		     running_inband() && test_inband_stall());
 	local_daif_restore(DAIF_PROCCTX);
 
-	if (flags & _TIF_MTE_ASYNC_FAULT) {
+	ret = pipeline_syscall(scno, regs);
+	if (ret > 0)
+		return;
+
+	if (ret < 0)
+		goto tail_work;
+
+	if (system_supports_mte() && (flags & _TIF_MTE_ASYNC_FAULT)) {
 		/*
 		 * Process the asynchronous tag check fault before the actual
 		 * syscall. do_notify_resume() will send a signal to userspace
@@ -159,11 +170,16 @@
 	 * check again. However, if we were tracing entry, then we always trace
 	 * exit regardless, as the old entry assembly did.
 	 */
+tail_work:
 	if (!has_syscall_work(flags) && !IS_ENABLED(CONFIG_DEBUG_RSEQ)) {
 		local_daif_mask();
+		stall_inband();
 		flags = current_thread_info()->flags;
-		if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP))
+		if (!has_syscall_work(flags) && !(flags & _TIF_SINGLESTEP)) {
+			unstall_inband();
 			return;
+		}
+		unstall_inband();
 		local_daif_restore(DAIF_PROCCTX);
 	}
 
diff --git a/kernel/arch/arm64/kernel/traps.c b/kernel/arch/arm64/kernel/traps.c
index 49b4b7b..a670df1 100644
--- a/kernel/arch/arm64/kernel/traps.c
+++ b/kernel/arch/arm64/kernel/traps.c
@@ -15,6 +15,7 @@
 #include <linux/spinlock.h>
 #include <linux/uaccess.h>
 #include <linux/hardirq.h>
+#include <linux/irqstage.h>
 #include <linux/kdebug.h>
 #include <linux/module.h>
 #include <linux/kexec.h>
@@ -117,7 +118,7 @@
 	return ret;
 }
 
-static DEFINE_RAW_SPINLOCK(die_lock);
+static DEFINE_HARD_SPINLOCK(die_lock);
 
 /*
  * This function is protected against re-entrancy.
@@ -292,7 +293,7 @@
 }
 
 static LIST_HEAD(undef_hook);
-static DEFINE_RAW_SPINLOCK(undef_lock);
+static DEFINE_HARD_SPINLOCK(undef_lock);
 
 void register_undef_hook(struct undef_hook *hook)
 {
@@ -406,6 +407,13 @@
 
 void do_undefinstr(struct pt_regs *regs)
 {
+	/*
+	 * If the companion core did not switched us to in-band
+	 * context, we may assume that it has handled the trap.
+	 */
+	if (running_oob())
+		return;
+
 	/* check for AArch32 breakpoint instructions */
 	if (!aarch32_break_handler(regs))
 		return;
@@ -415,14 +423,18 @@
 
 	trace_android_rvh_do_undefinstr(regs, user_mode(regs));
 	BUG_ON(!user_mode(regs));
+	mark_trap_entry(ARM64_TRAP_UNDI, regs);
 	force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+	mark_trap_exit(ARM64_TRAP_UNDI, regs);
 }
 NOKPROBE_SYMBOL(do_undefinstr);
 
 void do_bti(struct pt_regs *regs)
 {
 	BUG_ON(!user_mode(regs));
+	mark_trap_entry(ARM64_TRAP_BTI, regs);
 	force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+	mark_trap_exit(ARM64_TRAP_BTI, regs);
 }
 NOKPROBE_SYMBOL(do_bti);
 
@@ -492,9 +504,11 @@
 		return;
 	}
 
-	if (ret)
-		arm64_notify_segfault(tagged_address);
-	else
+	if (ret) {
+		mark_trap_entry(ARM64_TRAP_ACCESS, regs);
+		arm64_notify_segfault(address);
+		mark_trap_exit(ARM64_TRAP_ACCESS, regs);
+	} else
 		arm64_skip_faulting_instruction(regs, AARCH64_INSN_SIZE);
 }
 
@@ -540,8 +554,11 @@
 	rt = ESR_ELx_SYS64_ISS_RT(esr);
 	sysreg = esr_sys64_to_sysreg(esr);
 
-	if (do_emulate_mrs(regs, sysreg, rt) != 0)
+	if (do_emulate_mrs(regs, sysreg, rt) != 0) {
+		mark_trap_entry(ARM64_TRAP_ACCESS, regs);
 		force_signal_inject(SIGILL, ILL_ILLOPC, regs->pc, 0);
+		mark_trap_exit(ARM64_TRAP_ACCESS, regs);
+	}
 }
 
 static void wfi_handler(unsigned int esr, struct pt_regs *regs)
@@ -768,6 +785,11 @@
  */
 asmlinkage void notrace bad_mode(struct pt_regs *regs, int reason, unsigned int esr)
 {
+	/*
+	 * Dovetail: Same as __do_kernel_fault(), don't bother
+	 * restoring the in-band stage, this trap is fatal and we are
+	 * already walking on thin ice.
+	 */
 	arm64_enter_nmi(regs);
 
 	console_verbose();
@@ -790,11 +812,13 @@
 {
 	unsigned long pc = instruction_pointer(regs);
 
+	mark_trap_entry(ARM64_TRAP_ACCESS, regs);
 	current->thread.fault_address = 0;
 	current->thread.fault_code = esr;
 
 	arm64_force_sig_fault(SIGILL, ILL_ILLOPC, pc,
 			      "Bad EL0 synchronous exception");
+	mark_trap_exit(ARM64_TRAP_ACCESS, regs);
 }
 
 #ifdef CONFIG_VMAP_STACK
diff --git a/kernel/arch/arm64/kernel/vdso.c b/kernel/arch/arm64/kernel/vdso.c
index debb899..9ddd257 100644
--- a/kernel/arch/arm64/kernel/vdso.c
+++ b/kernel/arch/arm64/kernel/vdso.c
@@ -43,6 +43,8 @@
 	VVAR_NR_PAGES,
 };
 
+#define VPRIV_NR_PAGES __VPRIV_PAGES
+
 struct vdso_abi_info {
 	const char *name;
 	const char *vdso_code_start;
@@ -123,6 +125,9 @@
 		vdso_pagelist[i] = pfn_to_page(pfn + i);
 
 	vdso_info[abi].cm->pages = vdso_pagelist;
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+	vdso_data->cs_type_seq = CLOCKSOURCE_VDSO_NONE << 16 | 1;
+#endif
 
 	return 0;
 }
@@ -243,7 +248,8 @@
 
 	vdso_text_len = vdso_info[abi].vdso_pages << PAGE_SHIFT;
 	/* Be sure to map the data page */
-	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE;
+	vdso_mapping_len = vdso_text_len + VVAR_NR_PAGES * PAGE_SIZE
+		+ VPRIV_NR_PAGES * PAGE_SIZE;
 
 	vdso_base = get_unmapped_area(NULL, 0, vdso_mapping_len, 0, 0);
 	if (IS_ERR_VALUE(vdso_base)) {
@@ -251,6 +257,26 @@
 		goto up_fail;
 	}
 
+	/*
+	 * Install the vDSO mappings we need:
+	 *
+	 * +----------------+
+	 * |     vpriv      |  PAGE_SIZE (private anon page if GENERIC_CLOCKSOURCE_VDSO)
+	 * |----------------|
+	 * |     vvar       |  PAGE_SIZE (shared)
+	 * |----------------|
+	 * |     text       |  text_pages * PAGE_SIZE (shared)
+	 * |        ...     |
+	 * +----------------+
+	 */
+	if (VPRIV_NR_PAGES > 0 && mmap_region(NULL, vdso_base, PAGE_SIZE,
+			VM_READ | VM_WRITE | VM_MAYREAD | VM_MAYWRITE,
+			0, NULL) != vdso_base) {
+		ret = ERR_PTR(-EINVAL);
+		goto up_fail;
+	}
+
+	vdso_base += VPRIV_NR_PAGES * PAGE_SIZE; /* Skip private area. */
 	ret = _install_special_mapping(mm, vdso_base, VVAR_NR_PAGES * PAGE_SIZE,
 				       VM_READ|VM_MAYREAD|VM_PFNMAP,
 				       vdso_info[abi].dm);
diff --git a/kernel/arch/arm64/kernel/vdso/vdso.lds.S b/kernel/arch/arm64/kernel/vdso/vdso.lds.S
index b840ab1..93ff9fa 100644
--- a/kernel/arch/arm64/kernel/vdso/vdso.lds.S
+++ b/kernel/arch/arm64/kernel/vdso/vdso.lds.S
@@ -21,6 +21,9 @@
 #ifdef CONFIG_TIME_NS
 	PROVIDE(_timens_data = _vdso_data + PAGE_SIZE);
 #endif
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+	PROVIDE(_vdso_priv = _vdso_data - __VPRIV_PAGES * PAGE_SIZE);
+#endif	
 	. = VDSO_LBASE + SIZEOF_HEADERS;
 
 	.hash		: { *(.hash) }			:text
diff --git a/kernel/arch/arm64/kernel/vdso32/vdso.lds.S b/kernel/arch/arm64/kernel/vdso32/vdso.lds.S
index 3348ce5..63354d1 100644
--- a/kernel/arch/arm64/kernel/vdso32/vdso.lds.S
+++ b/kernel/arch/arm64/kernel/vdso32/vdso.lds.S
@@ -21,6 +21,9 @@
 #ifdef CONFIG_TIME_NS
 	PROVIDE_HIDDEN(_timens_data = _vdso_data + PAGE_SIZE);
 #endif
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+	PROVIDE_HIDDEN(_vdso_priv = _vdso_data - __VPRIV_PAGES * PAGE_SIZE);
+#endif	
 	. = VDSO_LBASE + SIZEOF_HEADERS;
 
 	.hash		: { *(.hash) }			:text
diff --git a/kernel/arch/arm64/mm/context.c b/kernel/arch/arm64/mm/context.c
index 001737a..336aca9 100644
--- a/kernel/arch/arm64/mm/context.c
+++ b/kernel/arch/arm64/mm/context.c
@@ -18,7 +18,7 @@
 #include <asm/tlbflush.h>
 
 static u32 asid_bits;
-static DEFINE_RAW_SPINLOCK(cpu_asid_lock);
+static DEFINE_HARD_SPINLOCK(cpu_asid_lock);
 
 static atomic64_t asid_generation;
 static unsigned long *asid_map;
@@ -217,6 +217,9 @@
 	unsigned long flags;
 	unsigned int cpu;
 	u64 asid, old_active_asid;
+	bool need_flush;
+
+	WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled());
 
 	if (system_supports_cnp())
 		cpu_set_reserved_ttbr0();
@@ -252,12 +255,14 @@
 	}
 
 	cpu = smp_processor_id();
-	if (cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending))
-		local_flush_tlb_all();
+	need_flush = cpumask_test_and_clear_cpu(cpu, &tlb_flush_pending);
 
 	atomic64_set(this_cpu_ptr(&active_asids), asid);
 	raw_spin_unlock_irqrestore(&cpu_asid_lock, flags);
 
+	if (need_flush)
+		local_flush_tlb_all();
+
 switch_mm_fastpath:
 
 	arm64_apply_bp_hardening();
diff --git a/kernel/arch/arm64/mm/fault.c b/kernel/arch/arm64/mm/fault.c
index 45e652d..d755cbb 100644
--- a/kernel/arch/arm64/mm/fault.c
+++ b/kernel/arch/arm64/mm/fault.c
@@ -264,11 +264,11 @@
 	    (esr & ESR_ELx_FSC_TYPE) != ESR_ELx_FSC_FAULT)
 		return false;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	asm volatile("at s1e1r, %0" :: "r" (addr));
 	isb();
 	par = read_sysreg_par();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	/*
 	 * If we now have a valid translation, treat the translation fault as
@@ -399,6 +399,12 @@
 		msg = "paging request";
 	}
 
+	/*
+	 * Dovetail: Don't bother restoring the in-band stage in the
+	 * non-recoverable fault case, we got busted and a full stage
+	 * switch is likely to make things even worse. Try at least to
+	 * get some debug output before panicing.
+	 */
 	die_kernel_fault(msg, addr, esr, regs);
 }
 
@@ -471,8 +477,10 @@
 	if (user_mode(regs)) {
 		const struct fault_info *inf = esr_to_fault_info(esr);
 
+		mark_trap_entry(ARM64_TRAP_ACCESS, regs);
 		set_thread_esr(addr, esr);
 		arm64_force_sig_fault(inf->sig, inf->code, far, inf->name);
+		mark_trap_exit(ARM64_TRAP_ACCESS, regs);
 	} else {
 		__do_kernel_fault(addr, esr, regs);
 	}
@@ -536,6 +544,8 @@
 
 	if (kprobe_page_fault(regs, esr))
 		return 0;
+
+	mark_trap_entry(ARM64_TRAP_ACCESS, regs);
 
 	/*
 	 * If we're in an interrupt or have no user context, we must not take
@@ -612,7 +622,7 @@
 	if (fault_signal_pending(fault, regs)) {
 		if (!user_mode(regs))
 			goto no_context;
-		return 0;
+		goto out;
 	}
 
 	if (fault & VM_FAULT_RETRY) {
@@ -637,7 +647,7 @@
 	 */
 	if (likely(!(fault & (VM_FAULT_ERROR | VM_FAULT_BADMAP |
 			      VM_FAULT_BADACCESS))))
-		return 0;
+		goto out;
 
 	/*
 	 * If we are in kernel mode at this point, we have no context to
@@ -653,7 +663,7 @@
 		 * oom-killed).
 		 */
 		pagefault_out_of_memory();
-		return 0;
+		goto out;
 	}
 
 	inf = esr_to_fault_info(esr);
@@ -682,10 +692,12 @@
 				      far, inf->name);
 	}
 
-	return 0;
+	goto out;
 
 no_context:
 	__do_kernel_fault(addr, esr, regs);
+out:
+	mark_trap_exit(ARM64_TRAP_ACCESS, regs);
 	return 0;
 }
 
@@ -731,6 +743,8 @@
 	const struct fault_info *inf;
 	unsigned long siaddr;
 
+	mark_trap_entry(ARM64_TRAP_SEA, regs);
+
 	inf = esr_to_fault_info(esr);
 
 	if (user_mode(regs) && apei_claim_sea(regs) == 0) {
@@ -738,7 +752,7 @@
 		 * APEI claimed this as a firmware-first notification.
 		 * Some processing deferred to task_work before ret_to_user().
 		 */
-		return 0;
+		goto out;
 	}
 
 	if (esr & ESR_ELx_FnV) {
@@ -753,6 +767,8 @@
 	}
 	trace_android_rvh_do_sea(regs, esr, siaddr, inf->name);
 	arm64_notify_die(inf->name, regs, inf->sig, inf->code, siaddr, esr);
+out:
+	mark_trap_exit(ARM64_TRAP_SEA, regs);
 
 	return 0;
 }
@@ -845,6 +861,8 @@
 	if (!inf->fn(far, esr, regs))
 		return;
 
+	mark_trap_entry(ARM64_TRAP_ACCESS, regs);
+
 	if (!user_mode(regs)) {
 		pr_alert("Unhandled fault at 0x%016lx\n", addr);
 		trace_android_rvh_do_mem_abort(regs, esr, addr, inf->name);
@@ -858,6 +876,7 @@
 	 * address to the signal handler.
 	 */
 	arm64_notify_die(inf->name, regs, inf->sig, inf->code, addr, esr);
+	mark_trap_exit(ARM64_TRAP_ACCESS, regs);
 }
 NOKPROBE_SYMBOL(do_mem_abort);
 
@@ -871,9 +890,12 @@
 void do_sp_pc_abort(unsigned long addr, unsigned int esr, struct pt_regs *regs)
 {
 	trace_android_rvh_do_sp_pc_abort(regs, esr, addr, user_mode(regs));
+	mark_trap_entry(ARM64_TRAP_ALIGN, regs);
 
 	arm64_notify_die("SP/PC alignment exception", regs, SIGBUS, BUS_ADRALN,
 			 addr, esr);
+
+	mark_trap_exit(ARM64_TRAP_ALIGN, regs);
 }
 NOKPROBE_SYMBOL(do_sp_pc_abort);
 
@@ -968,6 +990,8 @@
 	if (cortex_a76_erratum_1463225_debug_handler(regs))
 		return;
 
+	mark_trap_entry(ARM64_TRAP_DEBUG, regs);
+
 	debug_exception_enter(regs);
 
 	if (user_mode(regs) && !is_ttbr0_addr(pc))
@@ -978,6 +1002,8 @@
 	}
 
 	debug_exception_exit(regs);
+
+	mark_trap_exit(ARM64_TRAP_DEBUG, regs);
 }
 NOKPROBE_SYMBOL(do_debug_exception);
 
diff --git a/kernel/arch/arm64/xenomai/Kconfig b/kernel/arch/arm64/xenomai/Kconfig
new file mode 120000
index 0000000..883810c
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/Makefile b/kernel/arch/arm64/xenomai/dovetail/Makefile
new file mode 120000
index 0000000..65b9c47
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/calibration.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/calibration.h
new file mode 120000
index 0000000..a78ce4e
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/calibration.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/features.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/features.h
new file mode 120000
index 0000000..4aa781f
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/features.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/fptest.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/fptest.h
new file mode 120000
index 0000000..5f8f2ba
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/fptest.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/machine.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/machine.h
new file mode 120000
index 0000000..5357527
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/machine.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall.h
new file mode 120000
index 0000000..74725b2
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall32.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall32.h
new file mode 120000
index 0000000..f5d0dcb
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/syscall32.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/thread.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/thread.h
new file mode 120000
index 0000000..c75bd9b
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/thread.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/wrappers.h b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/wrappers.h
new file mode 120000
index 0000000..1cc6d3f
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/include/asm/xenomai/wrappers.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/dovetail/machine.c b/kernel/arch/arm64/xenomai/dovetail/machine.c
new file mode 120000
index 0000000..9f2c965
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/dovetail/machine.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/arith.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/arith.h
new file mode 120000
index 0000000..2f40f5f
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/arith.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/features.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/features.h
new file mode 120000
index 0000000..aa1899e
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/features.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/fptest.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/fptest.h
new file mode 120000
index 0000000..e11fa39
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/fptest.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/syscall.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/syscall.h
new file mode 120000
index 0000000..55c4265
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/syscall.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/tsc.h b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/tsc.h
new file mode 120000
index 0000000..39f44c7
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/include/asm/xenomai/uapi/tsc.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/Makefile b/kernel/arch/arm64/xenomai/ipipe/Makefile
new file mode 120000
index 0000000..2591050
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/calibration.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/calibration.h
new file mode 120000
index 0000000..d7db202
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/calibration.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/features.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/features.h
new file mode 120000
index 0000000..f465a22
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/features.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/fptest.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/fptest.h
new file mode 120000
index 0000000..26e47be
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/fptest.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/machine.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/machine.h
new file mode 120000
index 0000000..e7ecef4
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/machine.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall.h
new file mode 120000
index 0000000..ac8ecae
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall32.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall32.h
new file mode 120000
index 0000000..b78d657
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/syscall32.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/thread.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/thread.h
new file mode 120000
index 0000000..352ebb9
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/thread.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/wrappers.h b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/wrappers.h
new file mode 120000
index 0000000..51a1bb5
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/include/asm/xenomai/wrappers.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/machine.c b/kernel/arch/arm64/xenomai/ipipe/machine.c
new file mode 120000
index 0000000..64a9700
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/machine.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/syscall.c b/kernel/arch/arm64/xenomai/ipipe/syscall.c
new file mode 120000
index 0000000..6ce0ebb
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/syscall.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c
\ No newline at end of file
diff --git a/kernel/arch/arm64/xenomai/ipipe/thread.c b/kernel/arch/arm64/xenomai/ipipe/thread.c
new file mode 120000
index 0000000..5dc5094
--- /dev/null
+++ b/kernel/arch/arm64/xenomai/ipipe/thread.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c
\ No newline at end of file
diff --git a/kernel/arch/x86/Kconfig b/kernel/arch/x86/Kconfig
index 32536ff..3f5a5ad 100644
--- a/kernel/arch/x86/Kconfig
+++ b/kernel/arch/x86/Kconfig
@@ -29,6 +29,8 @@
 	select ARCH_SUPPORTS_SPECULATIVE_PAGE_FAULT
 	select ARCH_USE_CMPXCHG_LOCKREF
 	select HAVE_ARCH_SOFT_DIRTY
+	select HAVE_IRQ_PIPELINE
+	select HAVE_DOVETAIL
 	select MODULES_USE_ELF_RELA
 	select NEED_DMA_MAP_STATE
 	select SWIOTLB
@@ -208,6 +210,7 @@
 	select HAVE_MOVE_PMD
 	select HAVE_MOVE_PUD
 	select HAVE_NMI
+	select HAVE_PERCPU_PREEMPT_COUNT
 	select HAVE_OPROFILE
 	select HAVE_OPTPROBES
 	select HAVE_PCSPKR_PLATFORM
@@ -864,6 +867,7 @@
 
 endif #HYPERVISOR_GUEST
 
+source "kernel/Kconfig.dovetail"
 source "arch/x86/Kconfig.cpu"
 
 config HPET_TIMER
diff --git a/kernel/arch/x86/entry/common.c b/kernel/arch/x86/entry/common.c
index 93a3122..9fdc77a 100644
--- a/kernel/arch/x86/entry/common.c
+++ b/kernel/arch/x86/entry/common.c
@@ -40,6 +40,15 @@
 {
 	nr = syscall_enter_from_user_mode(regs, nr);
 
+	if (dovetailing()) {
+		if (nr == EXIT_SYSCALL_OOB) {
+			hard_local_irq_disable();
+			return;
+		}
+		if (nr == EXIT_SYSCALL_TAIL)
+			goto done;
+	}
+
 	instrumentation_begin();
 	if (likely(nr < NR_syscalls)) {
 		nr = array_index_nospec(nr, NR_syscalls);
@@ -53,6 +62,7 @@
 #endif
 	}
 	instrumentation_end();
+done:
 	syscall_exit_to_user_mode(regs);
 }
 #endif
@@ -89,11 +99,22 @@
 	 * or may not be necessary, but it matches the old asm behavior.
 	 */
 	nr = (unsigned int)syscall_enter_from_user_mode(regs, nr);
+
+	if (dovetailing()) {
+		if (nr == EXIT_SYSCALL_OOB) {
+			hard_local_irq_disable();
+			return;
+		}
+		if (nr == EXIT_SYSCALL_TAIL)
+			goto done;
+	}
+
 	instrumentation_begin();
 
 	do_syscall_32_irqs_on(regs, nr);
 
 	instrumentation_end();
+done:
 	syscall_exit_to_user_mode(regs);
 }
 
@@ -136,9 +157,20 @@
 	/* The case truncates any ptrace induced syscall nr > 2^32 -1 */
 	nr = (unsigned int)syscall_enter_from_user_mode_work(regs, nr);
 
+	if (dovetailing()) {
+		if (nr == EXIT_SYSCALL_OOB) {
+			instrumentation_end();
+			hard_local_irq_disable();
+			return true;
+		}
+		if (nr == EXIT_SYSCALL_TAIL)
+			goto done;
+	}
+
 	/* Now this is just like a normal syscall. */
 	do_syscall_32_irqs_on(regs, nr);
 
+done:
 	instrumentation_end();
 	syscall_exit_to_user_mode(regs);
 	return true;
diff --git a/kernel/arch/x86/entry/entry_64.S b/kernel/arch/x86/entry/entry_64.S
index 559c82b..1d07a15 100644
--- a/kernel/arch/x86/entry/entry_64.S
+++ b/kernel/arch/x86/entry/entry_64.S
@@ -417,6 +417,11 @@
  * If hits in kernel mode then it needs to go through the paranoid
  * entry as the exception can hit any random state. No preemption
  * check on exit to keep the paranoid path simple.
+ *
+ * irq_pipeline: since those events are non-maskable in essence,
+ * we may assume NMI-type restrictions for their handlers, which
+ * means the latter may - and actually have to - run immediately
+ * regardless of the current stage.
  */
 .macro idtentry_mce_db vector asmsym cfunc
 SYM_CODE_START(\asmsym)
diff --git a/kernel/arch/x86/hyperv/hv_init.c b/kernel/arch/x86/hyperv/hv_init.c
index 01860c0..fd30d6c 100644
--- a/kernel/arch/x86/hyperv/hv_init.c
+++ b/kernel/arch/x86/hyperv/hv_init.c
@@ -156,7 +156,8 @@
 		ms_hyperv.features & HV_ACCESS_REENLIGHTENMENT;
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_reenlightenment)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_REENLIGHTENMENT_VECTOR,
+				 sysvec_hyperv_reenlightenment)
 {
 	ack_APIC_irq();
 	inc_irq_stat(irq_hv_reenlightenment_count);
diff --git a/kernel/arch/x86/include/asm/apic.h b/kernel/arch/x86/include/asm/apic.h
index 3b4412c..e104d0d 100644
--- a/kernel/arch/x86/include/asm/apic.h
+++ b/kernel/arch/x86/include/asm/apic.h
@@ -437,7 +437,7 @@
 
 extern void apic_ack_irq(struct irq_data *data);
 
-static inline void ack_APIC_irq(void)
+static inline void __ack_APIC_irq(void)
 {
 	/*
 	 * ack_APIC_irq() actually gets compiled as a single instruction
@@ -446,6 +446,11 @@
 	apic_eoi();
 }
 
+static inline void ack_APIC_irq(void)
+{
+	if (!irqs_pipelined())
+		__ack_APIC_irq();
+}
 
 static inline bool lapic_vector_set_in_irr(unsigned int vector)
 {
diff --git a/kernel/arch/x86/include/asm/dovetail.h b/kernel/arch/x86/include/asm/dovetail.h
new file mode 100644
index 0000000..940726f
--- /dev/null
+++ b/kernel/arch/x86/include/asm/dovetail.h
@@ -0,0 +1,45 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum.
+ */
+#ifndef _ASM_X86_DOVETAIL_H
+#define _ASM_X86_DOVETAIL_H
+
+#if !defined(__ASSEMBLY__) && defined(CONFIG_DOVETAIL)
+
+#include <asm/fpu/api.h>
+#include <asm/io_bitmap.h>
+
+static inline void arch_dovetail_exec_prepare(void)
+{
+	clear_thread_flag(TIF_NEED_FPU_LOAD);
+}
+
+static inline
+void arch_dovetail_switch_prepare(bool leave_inband)
+{
+	if (leave_inband)
+		fpu__suspend_inband();
+}
+
+static inline
+void arch_dovetail_switch_finish(bool enter_inband)
+{
+	unsigned int ti_work = READ_ONCE(current_thread_info()->flags);
+
+	if (unlikely(ti_work & _TIF_IO_BITMAP))
+		tss_update_io_bitmap();
+
+	if (enter_inband) {
+		fpu__resume_inband();
+	} else {
+		  if (unlikely(ti_work & _TIF_NEED_FPU_LOAD &&
+				  !(current->flags & PF_KTHREAD)))
+			  switch_fpu_return();
+	}
+}
+
+#endif
+
+#endif /* _ASM_X86_DOVETAIL_H */
diff --git a/kernel/arch/x86/include/asm/fpu/api.h b/kernel/arch/x86/include/asm/fpu/api.h
index 8b9bfaa..5ef1216 100644
--- a/kernel/arch/x86/include/asm/fpu/api.h
+++ b/kernel/arch/x86/include/asm/fpu/api.h
@@ -41,16 +41,25 @@
  * fpu->state and set TIF_NEED_FPU_LOAD leaving CPU's FPU registers in
  * a random state.
  */
-static inline void fpregs_lock(void)
+static inline unsigned long fpregs_lock(void)
 {
-	preempt_disable();
-	local_bh_disable();
+	if (IS_ENABLED(CONFIG_IRQ_PIPELINE)) {
+		return hard_preempt_disable();
+	} else {
+		preempt_disable();
+		local_bh_disable();
+		return 0;
+	}
 }
 
-static inline void fpregs_unlock(void)
+static inline void fpregs_unlock(unsigned long flags)
 {
-	local_bh_enable();
-	preempt_enable();
+	if (IS_ENABLED(CONFIG_IRQ_PIPELINE)) {
+		hard_preempt_enable(flags);
+	} else {
+		local_bh_enable();
+		preempt_enable();
+	}
 }
 
 #ifdef CONFIG_X86_DEBUG_FPU
@@ -64,6 +73,10 @@
  */
 extern void switch_fpu_return(void);
 
+/* For Dovetail context switching. */
+void fpu__suspend_inband(void);
+void fpu__resume_inband(void);
+
 /*
  * Query the presence of one or more xfeatures. Works on any legacy CPU as well.
  *
diff --git a/kernel/arch/x86/include/asm/fpu/internal.h b/kernel/arch/x86/include/asm/fpu/internal.h
index 70b9bc5..11d31cf 100644
--- a/kernel/arch/x86/include/asm/fpu/internal.h
+++ b/kernel/arch/x86/include/asm/fpu/internal.h
@@ -15,6 +15,7 @@
 #include <linux/sched.h>
 #include <linux/slab.h>
 #include <linux/mm.h>
+#include <linux/dovetail.h>
 
 #include <asm/user.h>
 #include <asm/fpu/api.h>
@@ -509,6 +510,32 @@
 	clear_thread_flag(TIF_NEED_FPU_LOAD);
 }
 
+#ifdef CONFIG_DOVETAIL
+
+static inline void oob_fpu_set_preempt(struct fpu *fpu)
+{
+	fpu->preempted = 1;
+}
+
+static inline void oob_fpu_clear_preempt(struct fpu *fpu)
+{
+	fpu->preempted = 0;
+}
+
+static inline bool oob_fpu_preempted(struct fpu *old_fpu)
+{
+	return old_fpu->preempted;
+}
+
+#else
+
+static inline bool oob_fpu_preempted(struct fpu *old_fpu)
+{
+	return false;
+}
+
+#endif	/* !CONFIG_DOVETAIL */
+
 /*
  * FPU state switching for scheduling.
  *
@@ -535,7 +562,9 @@
 {
 	struct fpu *old_fpu = &prev->thread.fpu;
 
-	if (static_cpu_has(X86_FEATURE_FPU) && !(prev->flags & PF_KTHREAD)) {
+	if (static_cpu_has(X86_FEATURE_FPU) &&
+		!(prev->flags & PF_KTHREAD) &&
+		!oob_fpu_preempted(old_fpu)) {
 		if (!copy_fpregs_to_fpstate(old_fpu))
 			old_fpu->last_cpu = -1;
 		else
diff --git a/kernel/arch/x86/include/asm/fpu/types.h b/kernel/arch/x86/include/asm/fpu/types.h
index f5a38a5..ce2bdeb 100644
--- a/kernel/arch/x86/include/asm/fpu/types.h
+++ b/kernel/arch/x86/include/asm/fpu/types.h
@@ -329,6 +329,18 @@
 	 */
 	unsigned int			last_cpu;
 
+#ifdef CONFIG_DOVETAIL
+	/*
+	 * @preempted:
+	 *
+	 * When Dovetail is enabled, this flag is set for the inband
+	 * task context saved when entering a kernel_fpu_begin/end()
+	 * section before the latter got preempted by an out-of-band
+	 * task.
+	 */
+	unsigned char			preempted : 1;
+#endif
+
 	/*
 	 * @avx512_timestamp:
 	 *
diff --git a/kernel/arch/x86/include/asm/i8259.h b/kernel/arch/x86/include/asm/i8259.h
index 89789e8..facf1bc 100644
--- a/kernel/arch/x86/include/asm/i8259.h
+++ b/kernel/arch/x86/include/asm/i8259.h
@@ -26,7 +26,7 @@
 #define SLAVE_ICW4_DEFAULT	0x01
 #define PIC_ICW4_AEOI		2
 
-extern raw_spinlock_t i8259A_lock;
+extern hard_spinlock_t i8259A_lock;
 
 /* the PIC may need a careful delay on some platforms, hence specific calls */
 static inline unsigned char inb_pic(unsigned int port)
diff --git a/kernel/arch/x86/include/asm/idtentry.h b/kernel/arch/x86/include/asm/idtentry.h
index dc2a8b1..c0d1f94 100644
--- a/kernel/arch/x86/include/asm/idtentry.h
+++ b/kernel/arch/x86/include/asm/idtentry.h
@@ -174,6 +174,56 @@
 #define DECLARE_IDTENTRY_IRQ(vector, func)				\
 	DECLARE_IDTENTRY_ERRORCODE(vector, func)
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+struct irq_stage_data;
+
+struct irq_stage_data *
+handle_irq_pipelined_prepare(struct pt_regs *regs);
+
+int handle_irq_pipelined_finish(struct irq_stage_data *prevd,
+				struct pt_regs *regs);;
+
+void arch_pipeline_entry(struct pt_regs *regs, u8 vector);
+
+#define DECLARE_IDTENTRY_SYSVEC_PIPELINED(vector, func)			\
+	DECLARE_IDTENTRY_SYSVEC(vector, func);				\
+	__visible void __##func(struct pt_regs *regs)
+
+#define DEFINE_IDTENTRY_IRQ_PIPELINED(func)				\
+__visible noinstr void func(struct pt_regs *regs,			\
+			    unsigned long error_code)			\
+{									\
+	arch_pipeline_entry(regs, (u8)error_code);			\
+}									\
+static __always_inline void __##func(struct pt_regs *regs, u8 vector)
+
+/*
+ * In a pipelined model, the actual sysvec __handler() is directly
+ * instrumentable, just like it is in fact in the non-pipelined
+ * model. The indirect call via run_on_irqstack_cond() in
+ * DEFINE_IDTENTRY_SYSVEC() happens to hide the noinstr dependency
+ * from objtool in the latter case.
+ */
+#define DEFINE_IDTENTRY_SYSVEC_PIPELINED(vector, func)			\
+__visible noinstr void func(struct pt_regs *regs)			\
+{									\
+	arch_pipeline_entry(regs, vector);				\
+}									\
+									\
+__visible void __##func(struct pt_regs *regs)
+
+#define DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(vector, func)		\
+	DEFINE_IDTENTRY_SYSVEC_PIPELINED(vector, func)
+
+#else  /* !CONFIG_IRQ_PIPELINE */
+
+#define DECLARE_IDTENTRY_SYSVEC_PIPELINED(vector, func)		DECLARE_IDTENTRY_SYSVEC(vector, func)
+
+#define DEFINE_IDTENTRY_IRQ_PIPELINED(func)			DEFINE_IDTENTRY_IRQ(func)
+#define DEFINE_IDTENTRY_SYSVEC_PIPELINED(vector, func)		DEFINE_IDTENTRY_SYSVEC(func)
+#define DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(vector, func)	DEFINE_IDTENTRY_SYSVEC_SIMPLE(func)
+
 /**
  * DEFINE_IDTENTRY_IRQ - Emit code for device interrupt IDT entry points
  * @func:	Function name of the entry point
@@ -204,6 +254,8 @@
 }									\
 									\
 static __always_inline void __##func(struct pt_regs *regs, u8 vector)
+
+#endif	/* !CONFIG_IRQ_PIPELINE */
 
 /**
  * DECLARE_IDTENTRY_SYSVEC - Declare functions for system vector entry points
@@ -450,6 +502,9 @@
 #define DECLARE_IDTENTRY_SYSVEC(vector, func)				\
 	idtentry_sysvec vector func
 
+#define DECLARE_IDTENTRY_SYSVEC_PIPELINED(vector, func)			\
+	DECLARE_IDTENTRY_SYSVEC(vector, func)
+
 #ifdef CONFIG_X86_64
 # define DECLARE_IDTENTRY_MCE(vector, func)				\
 	idtentry_mce_db vector asm_##func func
@@ -632,21 +687,25 @@
 #ifdef CONFIG_X86_LOCAL_APIC
 DECLARE_IDTENTRY_SYSVEC(ERROR_APIC_VECTOR,		sysvec_error_interrupt);
 DECLARE_IDTENTRY_SYSVEC(SPURIOUS_APIC_VECTOR,		sysvec_spurious_apic_interrupt);
-DECLARE_IDTENTRY_SYSVEC(LOCAL_TIMER_VECTOR,		sysvec_apic_timer_interrupt);
-DECLARE_IDTENTRY_SYSVEC(X86_PLATFORM_IPI_VECTOR,	sysvec_x86_platform_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(LOCAL_TIMER_VECTOR,		sysvec_apic_timer_interrupt);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(X86_PLATFORM_IPI_VECTOR,	sysvec_x86_platform_ipi);
 #endif
 
 #ifdef CONFIG_SMP
-DECLARE_IDTENTRY(RESCHEDULE_VECTOR,			sysvec_reschedule_ipi);
-DECLARE_IDTENTRY_SYSVEC(IRQ_MOVE_CLEANUP_VECTOR,	sysvec_irq_move_cleanup);
-DECLARE_IDTENTRY_SYSVEC(REBOOT_VECTOR,			sysvec_reboot);
-DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_SINGLE_VECTOR,	sysvec_call_function_single);
-DECLARE_IDTENTRY_SYSVEC(CALL_FUNCTION_VECTOR,		sysvec_call_function);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(RESCHEDULE_VECTOR,		sysvec_reschedule_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(IRQ_MOVE_CLEANUP_VECTOR,	sysvec_irq_move_cleanup);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(REBOOT_VECTOR,		sysvec_reboot);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_SINGLE_VECTOR,	sysvec_call_function_single);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_VECTOR,		sysvec_call_function);
+#ifdef CONFIG_IRQ_PIPELINE
+DECLARE_IDTENTRY_SYSVEC(RESCHEDULE_OOB_VECTOR,		sysvec_reschedule_oob_ipi);
+DECLARE_IDTENTRY_SYSVEC(TIMER_OOB_VECTOR,		sysvec_timer_oob_ipi);
+#endif
 #endif
 
 #ifdef CONFIG_X86_LOCAL_APIC
 # ifdef CONFIG_X86_MCE_THRESHOLD
-DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR,		sysvec_threshold);
+DECLARE_IDTENTRY_SYSVEC(THRESHOLD_APIC_VECTOR,	sysvec_threshold);
 # endif
 
 # ifdef CONFIG_X86_MCE_AMD
@@ -658,28 +717,28 @@
 # endif
 
 # ifdef CONFIG_IRQ_WORK
-DECLARE_IDTENTRY_SYSVEC(IRQ_WORK_VECTOR,		sysvec_irq_work);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(IRQ_WORK_VECTOR,	sysvec_irq_work);
 # endif
 #endif
 
 #ifdef CONFIG_HAVE_KVM
-DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_VECTOR,		sysvec_kvm_posted_intr_ipi);
-DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_WAKEUP_VECTOR,	sysvec_kvm_posted_intr_wakeup_ipi);
-DECLARE_IDTENTRY_SYSVEC(POSTED_INTR_NESTED_VECTOR,	sysvec_kvm_posted_intr_nested_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_VECTOR,		sysvec_kvm_posted_intr_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_WAKEUP_VECTOR,	sysvec_kvm_posted_intr_wakeup_ipi);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_NESTED_VECTOR,	sysvec_kvm_posted_intr_nested_ipi);
 #endif
 
 #if IS_ENABLED(CONFIG_HYPERV)
-DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_hyperv_callback);
-DECLARE_IDTENTRY_SYSVEC(HYPERV_REENLIGHTENMENT_VECTOR,	sysvec_hyperv_reenlightenment);
-DECLARE_IDTENTRY_SYSVEC(HYPERV_STIMER0_VECTOR,	sysvec_hyperv_stimer0);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR, sysvec_hyperv_callback);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_REENLIGHTENMENT_VECTOR, sysvec_hyperv_reenlightenment);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_STIMER0_VECTOR, sysvec_hyperv_stimer0);
 #endif
 
 #if IS_ENABLED(CONFIG_ACRN_GUEST)
-DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_acrn_hv_callback);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,	sysvec_acrn_hv_callback);
 #endif
 
 #ifdef CONFIG_XEN_PVHVM
-DECLARE_IDTENTRY_SYSVEC(HYPERVISOR_CALLBACK_VECTOR,	sysvec_xen_hvm_callback);
+DECLARE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,	sysvec_xen_hvm_callback);
 #endif
 
 #ifdef CONFIG_KVM_GUEST
diff --git a/kernel/arch/x86/include/asm/irq_pipeline.h b/kernel/arch/x86/include/asm/irq_pipeline.h
new file mode 100644
index 0000000..5fa0cce
--- /dev/null
+++ b/kernel/arch/x86/include/asm/irq_pipeline.h
@@ -0,0 +1,135 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _ASM_X86_IRQ_PIPELINE_H
+#define _ASM_X86_IRQ_PIPELINE_H
+
+#include <asm-generic/irq_pipeline.h>
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+#include <asm/ptrace.h>
+
+#define FIRST_SYSTEM_IRQ	NR_IRQS
+#define TIMER_OOB_IPI		apicm_vector_irq(TIMER_OOB_VECTOR)
+#define RESCHEDULE_OOB_IPI	apicm_vector_irq(RESCHEDULE_OOB_VECTOR)
+#define apicm_irq_vector(__irq) ((__irq) - FIRST_SYSTEM_IRQ + FIRST_SYSTEM_VECTOR)
+#define apicm_vector_irq(__vec) ((__vec) - FIRST_SYSTEM_VECTOR + FIRST_SYSTEM_IRQ)
+
+#define X86_EFLAGS_SS_BIT	31
+
+static inline notrace
+unsigned long arch_irqs_virtual_to_native_flags(int stalled)
+{
+	return (!stalled) << X86_EFLAGS_IF_BIT;
+}
+
+static inline notrace
+unsigned long arch_irqs_native_to_virtual_flags(unsigned long flags)
+{
+	return hard_irqs_disabled_flags(flags) << X86_EFLAGS_SS_BIT;
+}
+
+#ifndef CONFIG_PARAVIRT_XXL
+
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+	int stalled = inband_irq_save();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+	barrier();
+	inband_irq_enable();
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+	inband_irq_disable();
+	barrier();
+}
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+	int stalled = inband_irqs_disabled();
+	barrier();
+	return arch_irqs_virtual_to_native_flags(stalled);
+}
+
+static inline notrace void arch_local_irq_restore(unsigned long flags)
+{
+	inband_irq_restore(native_irqs_disabled_flags(flags));
+	barrier();
+}
+
+#endif /* !CONFIG_PARAVIRT_XXL */
+
+static inline
+void arch_save_timer_regs(struct pt_regs *dst, struct pt_regs *src)
+{
+	dst->flags = src->flags;
+	dst->cs = src->cs;
+	dst->ip = src->ip;
+	dst->bp = src->bp;
+	dst->ss = src->ss;
+	dst->sp = src->sp;
+}
+
+static inline bool arch_steal_pipelined_tick(struct pt_regs *regs)
+{
+	return !(regs->flags & X86_EFLAGS_IF);
+}
+
+static inline int arch_enable_oob_stage(void)
+{
+	return 0;
+}
+
+static inline void handle_arch_irq(struct pt_regs *regs)
+{ }
+
+#else /* !CONFIG_IRQ_PIPELINE */
+
+struct pt_regs;
+
+#ifndef CONFIG_PARAVIRT_XXL
+
+static inline notrace unsigned long arch_local_save_flags(void)
+{
+	return native_save_fl();
+}
+
+static inline notrace void arch_local_irq_restore(unsigned long flags)
+{
+	native_restore_fl(flags);
+}
+
+static inline notrace void arch_local_irq_disable(void)
+{
+	native_irq_disable();
+}
+
+static inline notrace void arch_local_irq_enable(void)
+{
+	native_irq_enable();
+}
+
+/*
+ * For spinlocks, etc:
+ */
+static inline notrace unsigned long arch_local_irq_save(void)
+{
+	unsigned long flags = arch_local_save_flags();
+	arch_local_irq_disable();
+	return flags;
+}
+
+#endif /* !CONFIG_PARAVIRT_XXL */
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+#endif /* _ASM_X86_IRQ_PIPELINE_H */
diff --git a/kernel/arch/x86/include/asm/irq_stack.h b/kernel/arch/x86/include/asm/irq_stack.h
index 7758169..58ad3c4 100644
--- a/kernel/arch/x86/include/asm/irq_stack.h
+++ b/kernel/arch/x86/include/asm/irq_stack.h
@@ -18,6 +18,13 @@
 void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc),
 			   struct irq_desc *desc);
 
+/*
+ * IRQ pipeline: only in-band (soft-)irq handlers have to run on the
+ * irqstack, oob irq handlers must be lean by design therefore can run
+ * directly over the preempted context. Therefore, the guarantee that
+ * the in-band stage is currently stalled on the current CPU is enough
+ * to update irq_count atomically.
+ */
 static __always_inline void __run_on_irqstack(void (*func)(void))
 {
 	void *tos = __this_cpu_read(hardirq_stack_ptr);
diff --git a/kernel/arch/x86/include/asm/irq_vectors.h b/kernel/arch/x86/include/asm/irq_vectors.h
index 889f8b1..1e51dc4 100644
--- a/kernel/arch/x86/include/asm/irq_vectors.h
+++ b/kernel/arch/x86/include/asm/irq_vectors.h
@@ -106,10 +106,19 @@
 
 #define LOCAL_TIMER_VECTOR		0xec
 
+#ifdef CONFIG_IRQ_PIPELINE
+#define TIMER_OOB_VECTOR		0xeb
+#define RESCHEDULE_OOB_VECTOR		0xea
+#define FIRST_SYSTEM_APIC_VECTOR	RESCHEDULE_OOB_VECTOR
+#define NR_APIC_VECTORS	        	(NR_VECTORS - FIRST_SYSTEM_VECTOR)
+#else
+#define FIRST_SYSTEM_APIC_VECTOR	LOCAL_TIMER_VECTOR
+#endif
+
 #define NR_VECTORS			 256
 
 #ifdef CONFIG_X86_LOCAL_APIC
-#define FIRST_SYSTEM_VECTOR		LOCAL_TIMER_VECTOR
+#define FIRST_SYSTEM_VECTOR		FIRST_SYSTEM_APIC_VECTOR
 #else
 #define FIRST_SYSTEM_VECTOR		NR_VECTORS
 #endif
diff --git a/kernel/arch/x86/include/asm/irqflags.h b/kernel/arch/x86/include/asm/irqflags.h
index 8c86ede..ca2a870 100644
--- a/kernel/arch/x86/include/asm/irqflags.h
+++ b/kernel/arch/x86/include/asm/irqflags.h
@@ -35,8 +35,13 @@
 	return flags;
 }
 
+static inline unsigned long native_save_flags(void)
+{
+	return native_save_fl();
+}
+
 extern inline void native_restore_fl(unsigned long flags);
-extern inline void native_restore_fl(unsigned long flags)
+extern __always_inline void native_restore_fl(unsigned long flags)
 {
 	asm volatile("push %0 ; popf"
 		     : /* no output */
@@ -52,6 +57,38 @@
 static __always_inline void native_irq_enable(void)
 {
 	asm volatile("sti": : :"memory");
+}
+
+static __always_inline void native_irq_sync(void)
+{
+	asm volatile("sti ; nop ; cli": : :"memory");
+}
+
+static __always_inline unsigned long native_irq_save(void)
+{
+	unsigned long flags;
+
+	flags = native_save_flags();
+
+	native_irq_disable();
+
+	return flags;
+}
+
+static __always_inline void native_irq_restore(unsigned long flags)
+{
+	return native_restore_fl(flags);
+}
+
+static __always_inline int native_irqs_disabled_flags(unsigned long flags)
+{
+	return !(flags & X86_EFLAGS_IF);
+}
+
+static __always_inline bool native_irqs_disabled(void)
+{
+	unsigned long flags = native_save_flags();
+	return native_irqs_disabled_flags(flags);
 }
 
 static inline __cpuidle void native_safe_halt(void)
@@ -73,26 +110,7 @@
 #else
 #ifndef __ASSEMBLY__
 #include <linux/types.h>
-
-static __always_inline unsigned long arch_local_save_flags(void)
-{
-	return native_save_fl();
-}
-
-static __always_inline void arch_local_irq_restore(unsigned long flags)
-{
-	native_restore_fl(flags);
-}
-
-static __always_inline void arch_local_irq_disable(void)
-{
-	native_irq_disable();
-}
-
-static __always_inline void arch_local_irq_enable(void)
-{
-	native_irq_enable();
-}
+#include <asm/irq_pipeline.h>
 
 /*
  * Used in the idle loop; sti takes one instruction cycle
@@ -112,15 +130,6 @@
 	native_halt();
 }
 
-/*
- * For spinlocks, etc:
- */
-static __always_inline unsigned long arch_local_irq_save(void)
-{
-	unsigned long flags = arch_local_save_flags();
-	arch_local_irq_disable();
-	return flags;
-}
 #else
 
 #define ENABLE_INTERRUPTS(x)	sti
@@ -149,7 +158,7 @@
 #ifndef __ASSEMBLY__
 static __always_inline int arch_irqs_disabled_flags(unsigned long flags)
 {
-	return !(flags & X86_EFLAGS_IF);
+	return native_irqs_disabled_flags(flags);
 }
 
 static __always_inline int arch_irqs_disabled(void)
diff --git a/kernel/arch/x86/include/asm/mmu_context.h b/kernel/arch/x86/include/asm/mmu_context.h
index d98016b..b20f7de 100644
--- a/kernel/arch/x86/include/asm/mmu_context.h
+++ b/kernel/arch/x86/include/asm/mmu_context.h
@@ -128,6 +128,13 @@
 			       struct task_struct *tsk);
 #define switch_mm_irqs_off switch_mm_irqs_off
 
+static inline void
+switch_oob_mm(struct mm_struct *prev, struct mm_struct *next,
+	      struct task_struct *tsk)
+{
+	switch_mm_irqs_off(prev, next, tsk);
+}
+
 #define activate_mm(prev, next)			\
 do {						\
 	paravirt_activate_mm((prev), (next));	\
diff --git a/kernel/arch/x86/include/asm/pgtable.h b/kernel/arch/x86/include/asm/pgtable.h
index 87de9f2..d4b8b84 100644
--- a/kernel/arch/x86/include/asm/pgtable.h
+++ b/kernel/arch/x86/include/asm/pgtable.h
@@ -137,6 +137,7 @@
 static inline void write_pkru(u32 pkru)
 {
 	struct pkru_state *pk;
+	unsigned long flags;
 
 	if (!boot_cpu_has(X86_FEATURE_OSPKE))
 		return;
@@ -148,11 +149,11 @@
 	 * written to the CPU. The FPU restore on return to userland would
 	 * otherwise load the previous value again.
 	 */
-	fpregs_lock();
+	flags = fpregs_lock();
 	if (pk)
 		pk->pkru = pkru;
 	__write_pkru(pkru);
-	fpregs_unlock();
+	fpregs_unlock(flags);
 }
 
 static inline int pte_young(pte_t pte)
diff --git a/kernel/arch/x86/include/asm/special_insns.h b/kernel/arch/x86/include/asm/special_insns.h
index 415693f..2035bbe 100644
--- a/kernel/arch/x86/include/asm/special_insns.h
+++ b/kernel/arch/x86/include/asm/special_insns.h
@@ -138,9 +138,9 @@
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	asm_load_gs_index(selector);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline unsigned long __read_cr4(void)
diff --git a/kernel/arch/x86/include/asm/syscall.h b/kernel/arch/x86/include/asm/syscall.h
index 7cbf733..7b2a464 100644
--- a/kernel/arch/x86/include/asm/syscall.h
+++ b/kernel/arch/x86/include/asm/syscall.h
@@ -128,6 +128,11 @@
 	}
 }
 
+static inline unsigned long syscall_get_arg0(struct pt_regs *regs)
+{
+	return regs->di;
+}
+
 static inline void syscall_set_arguments(struct task_struct *task,
 					 struct pt_regs *regs,
 					 const unsigned long *args)
diff --git a/kernel/arch/x86/include/asm/thread_info.h b/kernel/arch/x86/include/asm/thread_info.h
index 012c8ee..ecd2701 100644
--- a/kernel/arch/x86/include/asm/thread_info.h
+++ b/kernel/arch/x86/include/asm/thread_info.h
@@ -52,16 +52,20 @@
 struct task_struct;
 #include <asm/cpufeature.h>
 #include <linux/atomic.h>
+#include <dovetail/thread_info.h>
 
 struct thread_info {
 	unsigned long		flags;		/* low level flags */
 	u32			status;		/* thread synchronous flags */
+	struct oob_thread_state	oob_state;	/* co-kernel thread state */
 };
 
 #define INIT_THREAD_INFO(tsk)			\
 {						\
 	.flags		= 0,			\
 }
+
+#define ti_local_flags(__ti)	((__ti)->status)
 
 #else /* !__ASSEMBLY__ */
 
@@ -97,8 +101,10 @@
 #define TIF_MEMDIE		20	/* is terminating due to OOM killer */
 #define TIF_POLLING_NRFLAG	21	/* idle is polling for TIF_NEED_RESCHED */
 #define TIF_IO_BITMAP		22	/* uses I/O bitmap */
+#define TIF_RETUSER		23	/* INBAND_TASK_RETUSER is pending */
 #define TIF_FORCED_TF		24	/* true if TF in eflags artificially */
 #define TIF_BLOCKSTEP		25	/* set when we want DEBUGCTLMSR_BTF */
+#define TIF_MAYDAY		26	/* emergency trap pending */
 #define TIF_LAZY_MMU_UPDATES	27	/* task is updating the mmu lazily */
 #define TIF_SYSCALL_TRACEPOINT	28	/* syscall tracepoint instrumentation */
 #define TIF_ADDR32		29	/* 32-bit address space on 64 bits */
@@ -126,7 +132,9 @@
 #define _TIF_SLD		(1 << TIF_SLD)
 #define _TIF_POLLING_NRFLAG	(1 << TIF_POLLING_NRFLAG)
 #define _TIF_IO_BITMAP		(1 << TIF_IO_BITMAP)
+#define _TIF_RETUSER		(1 << TIF_RETUSER)
 #define _TIF_FORCED_TF		(1 << TIF_FORCED_TF)
+#define _TIF_MAYDAY		(1 << TIF_MAYDAY)
 #define _TIF_BLOCKSTEP		(1 << TIF_BLOCKSTEP)
 #define _TIF_LAZY_MMU_UPDATES	(1 << TIF_LAZY_MMU_UPDATES)
 #define _TIF_SYSCALL_TRACEPOINT	(1 << TIF_SYSCALL_TRACEPOINT)
@@ -226,6 +234,16 @@
  * have to worry about atomic accesses.
  */
 #define TS_COMPAT		0x0002	/* 32bit syscall active (64BIT)*/
+/* bits 2 and 3 reserved for compat */
+#define TS_OOB			0x0010	/* Thread is running out-of-band */
+#define TS_DOVETAIL		0x0020  /* Dovetail notifier enabled */
+#define TS_OFFSTAGE		0x0040	/* Thread is in-flight to OOB context */
+#define TS_OOBTRAP		0x0080	/* Handling a trap from OOB context */
+
+#define _TLF_OOB		TS_OOB
+#define _TLF_DOVETAIL		TS_DOVETAIL
+#define _TLF_OFFSTAGE		TS_OFFSTAGE
+#define _TLF_OOBTRAP		TS_OOBTRAP
 
 #ifndef __ASSEMBLY__
 #ifdef CONFIG_COMPAT
diff --git a/kernel/arch/x86/include/asm/tlbflush.h b/kernel/arch/x86/include/asm/tlbflush.h
index 8c87a2e..1dfab59 100644
--- a/kernel/arch/x86/include/asm/tlbflush.h
+++ b/kernel/arch/x86/include/asm/tlbflush.h
@@ -37,9 +37,9 @@
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	cr4_set_bits_irqsoff(mask);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /* Clear in this cpu's CR4. */
@@ -47,9 +47,9 @@
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	cr4_clear_bits_irqsoff(mask);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 #ifndef MODULE
diff --git a/kernel/arch/x86/include/asm/uaccess.h b/kernel/arch/x86/include/asm/uaccess.h
index bb14302..25b7697 100644
--- a/kernel/arch/x86/include/asm/uaccess.h
+++ b/kernel/arch/x86/include/asm/uaccess.h
@@ -44,7 +44,7 @@
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 static inline bool pagefault_disabled(void);
 # define WARN_ON_IN_IRQ()	\
-	WARN_ON_ONCE(!in_task() && !pagefault_disabled())
+	WARN_ON_ONCE(running_inband() && !in_task() && !pagefault_disabled())
 #else
 # define WARN_ON_IN_IRQ()
 #endif
diff --git a/kernel/arch/x86/kernel/Makefile b/kernel/arch/x86/kernel/Makefile
index c06f3a9..74449e6 100644
--- a/kernel/arch/x86/kernel/Makefile
+++ b/kernel/arch/x86/kernel/Makefile
@@ -131,6 +131,7 @@
 obj-$(CONFIG_X86_PMEM_LEGACY_DEVICE) += pmem.o
 
 obj-$(CONFIG_JAILHOUSE_GUEST)	+= jailhouse.o
+obj-$(CONFIG_IRQ_PIPELINE)	+= irq_pipeline.o
 
 obj-$(CONFIG_EISA)		+= eisa.o
 obj-$(CONFIG_PCSPKR_PLATFORM)	+= pcspeaker.o
diff --git a/kernel/arch/x86/kernel/alternative.c b/kernel/arch/x86/kernel/alternative.c
index 92f0a97..e0be5d4 100644
--- a/kernel/arch/x86/kernel/alternative.c
+++ b/kernel/arch/x86/kernel/alternative.c
@@ -8,6 +8,7 @@
 #include <linux/list.h>
 #include <linux/stringify.h>
 #include <linux/highmem.h>
+#include <linux/irq_pipeline.h>
 #include <linux/mm.h>
 #include <linux/vmalloc.h>
 #include <linux/memory.h>
@@ -366,9 +367,9 @@
 	if (nnops <= 1)
 		return nnops;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	add_nops(instr + off, nnops);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	DUMP_BYTES(instr, instrlen, "%px: [%d:%d) optimized NOPs: ", instr, off, i);
 
@@ -1065,9 +1066,9 @@
 		 */
 		memcpy(addr, opcode, len);
 	} else {
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		memcpy(addr, opcode, len);
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		sync_core();
 
 		/*
@@ -1099,6 +1100,7 @@
 	temp_mm_state_t temp_state;
 
 	lockdep_assert_irqs_disabled();
+	WARN_ON_ONCE(irq_pipeline_debug() && !hard_irqs_disabled());
 
 	/*
 	 * Make sure not to be in TLB lazy mode, as otherwise we'll end up
@@ -1192,7 +1194,7 @@
 	 */
 	VM_BUG_ON(!ptep);
 
-	local_irq_save(flags);
+	local_irq_save_full(flags);
 
 	pte = mk_pte(pages[0], pgprot);
 	set_pte_at(poking_mm, poking_addr, ptep, pte);
@@ -1243,7 +1245,7 @@
 	 */
 	BUG_ON(memcmp(addr, opcode, len));
 
-	local_irq_restore(flags);
+	local_irq_restore_full(flags);
 	pte_unmap_unlock(ptep, ptl);
 	return addr;
 }
diff --git a/kernel/arch/x86/kernel/apic/apic.c b/kernel/arch/x86/kernel/apic/apic.c
index 1c96f24..8984c79 100644
--- a/kernel/arch/x86/kernel/apic/apic.c
+++ b/kernel/arch/x86/kernel/apic/apic.c
@@ -31,6 +31,7 @@
 #include <linux/i8253.h>
 #include <linux/dmar.h>
 #include <linux/init.h>
+#include <linux/irq.h>
 #include <linux/cpu.h>
 #include <linux/dmi.h>
 #include <linux/smp.h>
@@ -272,10 +273,10 @@
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	apic_write(APIC_ICR2, SET_APIC_DEST_FIELD(id));
 	apic_write(APIC_ICR, low);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 u64 native_apic_icr_read(void)
@@ -331,6 +332,9 @@
 static void __setup_APIC_LVTT(unsigned int clocks, int oneshot, int irqen)
 {
 	unsigned int lvtt_value, tmp_value;
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
 
 	lvtt_value = LOCAL_TIMER_VECTOR;
 	if (!oneshot)
@@ -353,6 +357,8 @@
 		 * According to Intel, MFENCE can do the serialization here.
 		 */
 		asm volatile("mfence" : : : "memory");
+		hard_cond_local_irq_restore(flags);
+		printk_once(KERN_DEBUG "TSC deadline timer enabled\n");
 		return;
 	}
 
@@ -366,6 +372,8 @@
 
 	if (!oneshot)
 		apic_write(APIC_TMICT, clocks / APIC_DIVISOR);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 /*
@@ -471,28 +479,34 @@
 static int lapic_next_deadline(unsigned long delta,
 			       struct clock_event_device *evt)
 {
+	unsigned long flags;
 	u64 tsc;
 
 	/* This MSR is special and need a special fence: */
 	weak_wrmsr_fence();
 
+	flags = hard_local_irq_save();
 	tsc = rdtsc();
 	wrmsrl(MSR_IA32_TSC_DEADLINE, tsc + (((u64) delta) * TSC_DIVISOR));
+	hard_local_irq_restore(flags);
 	return 0;
 }
 
 static int lapic_timer_shutdown(struct clock_event_device *evt)
 {
+	unsigned long flags;
 	unsigned int v;
 
 	/* Lapic used as dummy for broadcast ? */
 	if (evt->features & CLOCK_EVT_FEAT_DUMMY)
 		return 0;
 
+	flags = hard_local_irq_save();
 	v = apic_read(APIC_LVTT);
 	v |= (APIC_LVT_MASKED | LOCAL_TIMER_VECTOR);
 	apic_write(APIC_LVTT, v);
 	apic_write(APIC_TMICT, 0);
+	hard_local_irq_restore(flags);
 	return 0;
 }
 
@@ -527,6 +541,32 @@
 #endif
 }
 
+static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+#define LAPIC_TIMER_IRQ  apicm_vector_irq(LOCAL_TIMER_VECTOR)
+
+static irqreturn_t lapic_oob_handler(int irq, void *dev_id)
+{
+	struct clock_event_device *evt = this_cpu_ptr(&lapic_events);
+
+	trace_local_timer_entry(LOCAL_TIMER_VECTOR);
+	clockevents_handle_event(evt);
+	trace_local_timer_exit(LOCAL_TIMER_VECTOR);
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction lapic_oob_action = {
+	.handler = lapic_oob_handler,
+	.name = "Out-of-band LAPIC timer interrupt",
+	.flags = IRQF_TIMER | IRQF_PERCPU,
+};
+
+#else
+#define LAPIC_TIMER_IRQ  -1
+#endif
 
 /*
  * The local apic timer can be used for any function which is CPU local.
@@ -534,8 +574,8 @@
 static struct clock_event_device lapic_clockevent = {
 	.name				= "lapic",
 	.features			= CLOCK_EVT_FEAT_PERIODIC |
-					  CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP
-					  | CLOCK_EVT_FEAT_DUMMY,
+					  CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_C3STOP  |
+					  CLOCK_EVT_FEAT_PIPELINE | CLOCK_EVT_FEAT_DUMMY,
 	.shift				= 32,
 	.set_state_shutdown		= lapic_timer_shutdown,
 	.set_state_periodic		= lapic_timer_set_periodic,
@@ -544,9 +584,8 @@
 	.set_next_event			= lapic_next_event,
 	.broadcast			= lapic_timer_broadcast,
 	.rating				= 100,
-	.irq				= -1,
+	.irq				= LAPIC_TIMER_IRQ,
 };
-static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
 
 static const struct x86_cpu_id deadline_match[] __initconst = {
 	X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
@@ -1042,6 +1081,9 @@
 	/* Setup the lapic or request the broadcast */
 	setup_APIC_timer();
 	amd_e400_c1e_apic_setup();
+#ifdef CONFIG_IRQ_PIPELINE
+	setup_percpu_irq(LAPIC_TIMER_IRQ, &lapic_oob_action);
+#endif
 }
 
 void setup_secondary_APIC_clock(void)
@@ -1092,7 +1134,8 @@
  * [ if a single-CPU system runs an SMP kernel then we call the local
  *   interrupt as well. Thus we cannot inline the local irq ... ]
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_apic_timer_interrupt)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(LOCAL_TIMER_VECTOR,
+				 sysvec_apic_timer_interrupt)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
@@ -1513,7 +1556,7 @@
 		 * per set bit.
 		 */
 		for_each_set_bit(bit, isr->map, APIC_IR_BITS)
-			ack_APIC_irq();
+			__ack_APIC_irq();
 		return true;
 	}
 
@@ -2131,7 +2174,7 @@
  *
  * Also called from sysvec_spurious_apic_interrupt().
  */
-DEFINE_IDTENTRY_IRQ(spurious_interrupt)
+DEFINE_IDTENTRY_IRQ_PIPELINED(spurious_interrupt)
 {
 	u32 v;
 
@@ -2157,7 +2200,7 @@
 	if (v & (1 << (vector & 0x1f))) {
 		pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Acked\n",
 			vector, smp_processor_id());
-		ack_APIC_irq();
+		__ack_APIC_irq();
 	} else {
 		pr_info("Spurious interrupt (vector 0x%02x) on CPU#%d. Not pending!\n",
 			vector, smp_processor_id());
@@ -2166,13 +2209,18 @@
 	trace_spurious_apic_exit(vector);
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_spurious_apic_interrupt)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(SPURIOUS_APIC_VECTOR,
+				 sysvec_spurious_apic_interrupt)
 {
 	__spurious_interrupt(regs, SPURIOUS_APIC_VECTOR);
 }
 
 /*
  * This interrupt should never happen with our APIC/SMP architecture
+ *
+ * irq_pipeline: same as spurious_interrupt, would run directly out of
+ * the IDT, no deferral via the interrupt log which means that only
+ * the hardware IRQ state is considered for masking.
  */
 DEFINE_IDTENTRY_SYSVEC(sysvec_error_interrupt)
 {
diff --git a/kernel/arch/x86/kernel/apic/apic_flat_64.c b/kernel/arch/x86/kernel/apic/apic_flat_64.c
index 7862b15..d376218 100644
--- a/kernel/arch/x86/kernel/apic/apic_flat_64.c
+++ b/kernel/arch/x86/kernel/apic/apic_flat_64.c
@@ -52,9 +52,9 @@
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static void flat_send_IPI_mask(const struct cpumask *cpumask, int vector)
diff --git a/kernel/arch/x86/kernel/apic/apic_numachip.c b/kernel/arch/x86/kernel/apic/apic_numachip.c
index 35edd57..3522a47 100644
--- a/kernel/arch/x86/kernel/apic/apic_numachip.c
+++ b/kernel/arch/x86/kernel/apic/apic_numachip.c
@@ -103,10 +103,10 @@
 	if (!((apicid ^ local_apicid) >> NUMACHIP_LAPIC_BITS)) {
 		unsigned long flags;
 
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		__default_send_IPI_dest_field(apicid, vector,
 			APIC_DEST_PHYSICAL);
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		preempt_enable();
 		return;
 	}
diff --git a/kernel/arch/x86/kernel/apic/io_apic.c b/kernel/arch/x86/kernel/apic/io_apic.c
index 25b1d5c..47f9d4a 100644
--- a/kernel/arch/x86/kernel/apic/io_apic.c
+++ b/kernel/arch/x86/kernel/apic/io_apic.c
@@ -78,7 +78,7 @@
 #define for_each_irq_pin(entry, head) \
 	list_for_each_entry(entry, &head, list)
 
-static DEFINE_RAW_SPINLOCK(ioapic_lock);
+static DEFINE_HARD_SPINLOCK(ioapic_lock);
 static DEFINE_MUTEX(ioapic_mutex);
 static unsigned int ioapic_dynirq_base;
 static int ioapic_initialized;
@@ -1634,7 +1634,7 @@
 		return 1;
 
 	local_save_flags(flags);
-	local_irq_enable();
+	local_irq_enable_full();
 
 	if (boot_cpu_has(X86_FEATURE_TSC))
 		delay_with_tsc();
@@ -1642,6 +1642,8 @@
 		delay_without_tsc();
 
 	local_irq_restore(flags);
+	if (raw_irqs_disabled_flags(flags))
+		hard_local_irq_disable();
 
 	/*
 	 * Expect a few ticks at least, to be sure some possible
@@ -1722,14 +1724,56 @@
 	return false;
 }
 
+static inline void do_prepare_move(struct irq_data *data)
+{
+	if (!irqd_irq_masked(data))
+		mask_ioapic_irq(data);
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+static inline void ioapic_finish_move(struct irq_data *data, bool moveit);
+
+static void ioapic_deferred_irq_move(struct irq_work *work)
+{
+	struct irq_data *data;
+	struct irq_desc *desc;
+	unsigned long flags;
+
+	data = container_of(work, struct irq_data, move_work);
+	desc = irq_data_to_desc(data);
+	raw_spin_lock_irqsave(&desc->lock, flags);
+	do_prepare_move(data);
+	ioapic_finish_move(data, true);
+	raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+static inline bool __ioapic_prepare_move(struct irq_data *data)
+{
+	init_irq_work(&data->move_work, ioapic_deferred_irq_move);
+	irq_work_queue(&data->move_work);
+
+	return false;	/* Postpone ioapic_finish_move(). */
+}
+
+#else  /* !CONFIG_IRQ_PIPELINE */
+
+static inline bool __ioapic_prepare_move(struct irq_data *data)
+{
+	do_prepare_move(data);
+
+	return true;
+}
+
+#endif
+
 static inline bool ioapic_prepare_move(struct irq_data *data)
 {
 	/* If we are moving the IRQ we need to mask it */
-	if (unlikely(irqd_is_setaffinity_pending(data))) {
-		if (!irqd_irq_masked(data))
-			mask_ioapic_irq(data);
-		return true;
-	}
+	if (irqd_is_setaffinity_pending(data) &&
+		!irqd_is_setaffinity_blocked(data))
+		return __ioapic_prepare_move(data);
+
 	return false;
 }
 
@@ -1828,7 +1872,7 @@
 	 * We must acknowledge the irq before we move it or the acknowledge will
 	 * not propagate properly.
 	 */
-	ack_APIC_irq();
+	__ack_APIC_irq();
 
 	/*
 	 * Tail end of clearing remote IRR bit (either by delivering the EOI
@@ -1949,7 +1993,8 @@
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.irq_get_irqchip_state	= ioapic_irq_get_chip_state,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct irq_chip ioapic_ir_chip __read_mostly = {
@@ -1963,7 +2008,8 @@
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.irq_get_irqchip_state	= ioapic_irq_get_chip_state,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static inline void init_IO_APIC_traps(void)
@@ -2010,7 +2056,7 @@
 
 static void ack_lapic_irq(struct irq_data *data)
 {
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
 
 static struct irq_chip lapic_chip __read_mostly = {
@@ -2018,6 +2064,7 @@
 	.irq_mask	= mask_lapic_irq,
 	.irq_unmask	= unmask_lapic_irq,
 	.irq_ack	= ack_lapic_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void lapic_register_intr(int irq)
@@ -2135,7 +2182,7 @@
 	if (!global_clock_event)
 		return;
 
-	local_irq_save(flags);
+	local_irq_save_full(flags);
 
 	/*
 	 * get/set the timer IRQ vector:
@@ -2203,7 +2250,7 @@
 			goto out;
 		}
 		panic_if_irq_remap("timer doesn't work through Interrupt-remapped IO-APIC");
-		local_irq_disable();
+		local_irq_disable_full();
 		clear_IO_APIC_pin(apic1, pin1);
 		if (!no_pin1)
 			apic_printk(APIC_QUIET, KERN_ERR "..MP-BIOS bug: "
@@ -2227,7 +2274,7 @@
 		/*
 		 * Cleanup, just in case ...
 		 */
-		local_irq_disable();
+		local_irq_disable_full();
 		legacy_pic->mask(0);
 		clear_IO_APIC_pin(apic2, pin2);
 		apic_printk(APIC_QUIET, KERN_INFO "....... failed.\n");
@@ -2244,7 +2291,7 @@
 		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
 		goto out;
 	}
-	local_irq_disable();
+	local_irq_disable_full();
 	legacy_pic->mask(0);
 	apic_write(APIC_LVT0, APIC_LVT_MASKED | APIC_DM_FIXED | cfg->vector);
 	apic_printk(APIC_QUIET, KERN_INFO "..... failed.\n");
@@ -2263,7 +2310,7 @@
 		apic_printk(APIC_QUIET, KERN_INFO "..... works.\n");
 		goto out;
 	}
-	local_irq_disable();
+	local_irq_disable_full();
 	apic_printk(APIC_QUIET, KERN_INFO "..... failed :(.\n");
 	if (apic_is_x2apic_enabled())
 		apic_printk(APIC_QUIET, KERN_INFO
@@ -2272,7 +2319,7 @@
 	panic("IO-APIC + timer doesn't work!  Boot with apic=debug and send a "
 		"report.  Then try booting with the 'noapic' option.\n");
 out:
-	local_irq_restore(flags);
+	local_irq_restore_full(flags);
 }
 
 /*
@@ -3018,13 +3065,13 @@
 	cfg = irqd_cfg(irq_data);
 	add_pin_to_irq_node(data, ioapic_alloc_attr_node(info), ioapic, pin);
 
-	local_irq_save(flags);
+	local_irq_save_full(flags);
 	if (info->ioapic.entry)
 		mp_setup_entry(cfg, data, info->ioapic.entry);
 	mp_register_handler(virq, data->trigger);
 	if (virq < nr_legacy_irqs())
 		legacy_pic->mask(virq);
-	local_irq_restore(flags);
+	local_irq_restore_full(flags);
 
 	apic_printk(APIC_VERBOSE, KERN_DEBUG
 		    "IOAPIC[%d]: Set routing entry (%d-%d -> 0x%x -> IRQ %d Mode:%i Active:%i Dest:%d)\n",
diff --git a/kernel/arch/x86/kernel/apic/ipi.c b/kernel/arch/x86/kernel/apic/ipi.c
index 387154e..bd2ffae 100644
--- a/kernel/arch/x86/kernel/apic/ipi.c
+++ b/kernel/arch/x86/kernel/apic/ipi.c
@@ -117,8 +117,10 @@
 	 * cli/sti.  Otherwise we use an even cheaper single atomic write
 	 * to the APIC.
 	 */
+	unsigned long flags;
 	unsigned int cfg;
 
+	flags = hard_cond_local_irq_save();
 	/*
 	 * Wait for idle.
 	 */
@@ -137,6 +139,8 @@
 	 * Send the IPI. The write to APIC_ICR fires this off.
 	 */
 	native_apic_mem_write(APIC_ICR, cfg);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 /*
@@ -145,8 +149,10 @@
  */
 void __default_send_IPI_dest_field(unsigned int mask, int vector, unsigned int dest)
 {
+	unsigned long flags;
 	unsigned long cfg;
 
+	flags = hard_cond_local_irq_save();
 	/*
 	 * Wait for idle.
 	 */
@@ -170,16 +176,18 @@
 	 * Send the IPI. The write to APIC_ICR fires this off.
 	 */
 	native_apic_mem_write(APIC_ICR, cfg);
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void default_send_IPI_single_phys(int cpu, int vector)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid, cpu),
 				      vector, APIC_DEST_PHYSICAL);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 void default_send_IPI_mask_sequence_phys(const struct cpumask *mask, int vector)
@@ -192,12 +200,12 @@
 	 * to an arbitrary mask, so I do a unicast to each CPU instead.
 	 * - mbligh
 	 */
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	for_each_cpu(query_cpu, mask) {
 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
 				query_cpu), vector, APIC_DEST_PHYSICAL);
 	}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 void default_send_IPI_mask_allbutself_phys(const struct cpumask *mask,
@@ -209,14 +217,14 @@
 
 	/* See Hack comment above */
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	for_each_cpu(query_cpu, mask) {
 		if (query_cpu == this_cpu)
 			continue;
 		__default_send_IPI_dest_field(per_cpu(x86_cpu_to_apicid,
 				 query_cpu), vector, APIC_DEST_PHYSICAL);
 	}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /*
@@ -256,12 +264,12 @@
 	 * should be modified to do 1 message per cluster ID - mbligh
 	 */
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	for_each_cpu(query_cpu, mask)
 		__default_send_IPI_dest_field(
 			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
 			vector, apic->dest_logical);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 void default_send_IPI_mask_allbutself_logical(const struct cpumask *mask,
@@ -273,7 +281,7 @@
 
 	/* See Hack comment above */
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	for_each_cpu(query_cpu, mask) {
 		if (query_cpu == this_cpu)
 			continue;
@@ -281,7 +289,7 @@
 			early_per_cpu(x86_cpu_to_logical_apicid, query_cpu),
 			vector, apic->dest_logical);
 		}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /*
@@ -295,10 +303,10 @@
 	if (!mask)
 		return;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	WARN_ON(mask & ~cpumask_bits(cpu_online_mask)[0]);
 	__default_send_IPI_dest_field(mask, vector, apic->dest_logical);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /* must come after the send_IPI functions above for inlining */
diff --git a/kernel/arch/x86/kernel/apic/msi.c b/kernel/arch/x86/kernel/apic/msi.c
index 6bd98a2..69dc36e 100644
--- a/kernel/arch/x86/kernel/apic/msi.c
+++ b/kernel/arch/x86/kernel/apic/msi.c
@@ -181,7 +181,8 @@
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.irq_set_affinity	= msi_set_affinity,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 int pci_msi_prepare(struct irq_domain *domain, struct device *dev, int nvec,
@@ -251,7 +252,8 @@
 	.irq_ack		= irq_chip_ack_parent,
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_info pci_msi_ir_domain_info = {
@@ -294,7 +296,8 @@
 	.irq_retrigger		= irq_chip_retrigger_hierarchy,
 	.irq_write_msi_msg	= dmar_msi_write_msg,
 	.flags			= IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_AFFINITY_PRE_STARTUP,
+				  IRQCHIP_AFFINITY_PRE_STARTUP |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static int dmar_msi_init(struct irq_domain *domain,
@@ -386,7 +389,8 @@
 	.irq_set_affinity = msi_domain_set_affinity,
 	.irq_retrigger = irq_chip_retrigger_hierarchy,
 	.irq_write_msi_msg = hpet_msi_write_msg,
-	.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP,
+	.flags = IRQCHIP_SKIP_SET_WAKE | IRQCHIP_AFFINITY_PRE_STARTUP |
+		 IRQCHIP_PIPELINE_SAFE,
 };
 
 static int hpet_msi_init(struct irq_domain *domain,
diff --git a/kernel/arch/x86/kernel/apic/vector.c b/kernel/arch/x86/kernel/apic/vector.c
index bd557e9..1cb7ab4 100644
--- a/kernel/arch/x86/kernel/apic/vector.c
+++ b/kernel/arch/x86/kernel/apic/vector.c
@@ -39,7 +39,7 @@
 
 struct irq_domain *x86_vector_domain;
 EXPORT_SYMBOL_GPL(x86_vector_domain);
-static DEFINE_RAW_SPINLOCK(vector_lock);
+static DEFINE_HARD_SPINLOCK(vector_lock);
 static cpumask_var_t vector_searchmask;
 static struct irq_chip lapic_controller;
 static struct irq_matrix *vector_matrix;
@@ -757,6 +757,10 @@
 {
 	int isairq = vector - ISA_IRQ_VECTOR(0);
 
+	/* Copy the cleanup vector if irqs are pipelined. */
+	if (IS_ENABLED(CONFIG_IRQ_PIPELINE) &&
+		vector == IRQ_MOVE_CLEANUP_VECTOR)
+		return irq_to_desc(IRQ_MOVE_CLEANUP_VECTOR); /* 1:1 mapping */
 	/* Check whether the irq is in the legacy space */
 	if (isairq < 0 || isairq >= nr_legacy_irqs())
 		return VECTOR_UNUSED;
@@ -791,15 +795,19 @@
 
 void lapic_offline(void)
 {
-	lock_vector_lock();
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vector_lock, flags);
 	irq_matrix_offline(vector_matrix);
-	unlock_vector_lock();
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 static int apic_set_affinity(struct irq_data *irqd,
 			     const struct cpumask *dest, bool force)
 {
 	int err;
+
+	WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
 
 	if (WARN_ON_ONCE(!irqd_is_activated(irqd)))
 		return -EIO;
@@ -830,10 +838,44 @@
 	return 1;
 }
 
-void apic_ack_irq(struct irq_data *irqd)
+#if defined(CONFIG_IRQ_PIPELINE) &&	\
+	defined(CONFIG_GENERIC_PENDING_IRQ)
+
+static void apic_deferred_irq_move(struct irq_work *work)
+{
+	struct irq_data *irqd;
+	struct irq_desc *desc;
+	unsigned long flags;
+
+	irqd = container_of(work, struct irq_data, move_work);
+	desc = irq_data_to_desc(irqd);
+	raw_spin_lock_irqsave(&desc->lock, flags);
+	__irq_move_irq(irqd);
+	raw_spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+static inline void apic_move_irq(struct irq_data *irqd)
+{
+	if (irqd_is_setaffinity_pending(irqd) &&
+		!irqd_is_setaffinity_blocked(irqd)) {
+		init_irq_work(&irqd->move_work, apic_deferred_irq_move);
+		irq_work_queue(&irqd->move_work);
+	}
+}
+
+#else
+
+static inline void apic_move_irq(struct irq_data *irqd)
 {
 	irq_move_irq(irqd);
-	ack_APIC_irq();
+}
+
+#endif
+
+void apic_ack_irq(struct irq_data *irqd)
+{
+	apic_move_irq(irqd);
+	__ack_APIC_irq();
 }
 
 void apic_ack_edge(struct irq_data *irqd)
@@ -876,15 +918,17 @@
 	apicd->move_in_progress = 0;
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_irq_move_cleanup)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(IRQ_MOVE_CLEANUP_VECTOR,
+				 sysvec_irq_move_cleanup)
 {
 	struct hlist_head *clhead = this_cpu_ptr(&cleanup_list);
 	struct apic_chip_data *apicd;
 	struct hlist_node *tmp;
+	unsigned long flags;
 
 	ack_APIC_irq();
 	/* Prevent vectors vanishing under us */
-	raw_spin_lock(&vector_lock);
+	raw_spin_lock_irqsave(&vector_lock, flags);
 
 	hlist_for_each_entry_safe(apicd, tmp, clhead, clist) {
 		unsigned int irr, vector = apicd->prev_vector;
@@ -906,14 +950,15 @@
 		free_moved_vector(apicd);
 	}
 
-	raw_spin_unlock(&vector_lock);
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 static void __send_cleanup_vector(struct apic_chip_data *apicd)
 {
+	unsigned long flags;
 	unsigned int cpu;
 
-	raw_spin_lock(&vector_lock);
+	raw_spin_lock_irqsave(&vector_lock, flags);
 	apicd->move_in_progress = 0;
 	cpu = apicd->prev_cpu;
 	if (cpu_online(cpu)) {
@@ -922,7 +967,7 @@
 	} else {
 		apicd->prev_vector = 0;
 	}
-	raw_spin_unlock(&vector_lock);
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
 }
 
 void send_cleanup_vector(struct irq_cfg *cfg)
@@ -960,6 +1005,8 @@
 	struct apic_chip_data *apicd;
 	struct irq_data *irqd;
 	unsigned int vector;
+
+	WARN_ON_ONCE(irqs_pipelined() && !hard_irqs_disabled());
 
 	/*
 	 * The function is called for all descriptors regardless of which
@@ -1051,9 +1098,10 @@
 int lapic_can_unplug_cpu(void)
 {
 	unsigned int rsvd, avl, tomove, cpu = smp_processor_id();
+	unsigned long flags;
 	int ret = 0;
 
-	raw_spin_lock(&vector_lock);
+	raw_spin_lock_irqsave(&vector_lock, flags);
 	tomove = irq_matrix_allocated(vector_matrix);
 	avl = irq_matrix_available(vector_matrix, true);
 	if (avl < tomove) {
@@ -1068,7 +1116,7 @@
 			rsvd, avl);
 	}
 out:
-	raw_spin_unlock(&vector_lock);
+	raw_spin_unlock_irqrestore(&vector_lock, flags);
 	return ret;
 }
 #endif /* HOTPLUG_CPU */
diff --git a/kernel/arch/x86/kernel/apic/x2apic_cluster.c b/kernel/arch/x86/kernel/apic/x2apic_cluster.c
index 7eec3c1..52fdf80 100644
--- a/kernel/arch/x86/kernel/apic/x2apic_cluster.c
+++ b/kernel/arch/x86/kernel/apic/x2apic_cluster.c
@@ -44,7 +44,7 @@
 
 	/* x2apic MSRs are special and need a special fence: */
 	weak_wrmsr_fence();
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	tmpmsk = this_cpu_cpumask_var_ptr(ipi_mask);
 	cpumask_copy(tmpmsk, mask);
@@ -68,7 +68,7 @@
 		cpumask_andnot(tmpmsk, tmpmsk, &cmsk->mask);
 	}
 
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
diff --git a/kernel/arch/x86/kernel/apic/x2apic_phys.c b/kernel/arch/x86/kernel/apic/x2apic_phys.c
index 032a00e..72ebc33 100644
--- a/kernel/arch/x86/kernel/apic/x2apic_phys.c
+++ b/kernel/arch/x86/kernel/apic/x2apic_phys.c
@@ -58,7 +58,7 @@
 	/* x2apic MSRs are special and need a special fence: */
 	weak_wrmsr_fence();
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	this_cpu = smp_processor_id();
 	for_each_cpu(query_cpu, mask) {
@@ -67,7 +67,7 @@
 		__x2apic_send_IPI_dest(per_cpu(x86_cpu_to_apicid, query_cpu),
 				       vector, APIC_DEST_PHYSICAL);
 	}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static void x2apic_send_IPI_mask(const struct cpumask *mask, int vector)
diff --git a/kernel/arch/x86/kernel/asm-offsets.c b/kernel/arch/x86/kernel/asm-offsets.c
index 70b7154..09c539f 100644
--- a/kernel/arch/x86/kernel/asm-offsets.c
+++ b/kernel/arch/x86/kernel/asm-offsets.c
@@ -38,6 +38,9 @@
 #endif
 
 	BLANK();
+#ifdef CONFIG_IRQ_PIPELINE
+	DEFINE(OOB_stage_mask, STAGE_MASK);
+#endif
 	OFFSET(crypto_tfm_ctx_offset, crypto_tfm, __crt_ctx);
 
 	BLANK();
diff --git a/kernel/arch/x86/kernel/cpu/acrn.c b/kernel/arch/x86/kernel/cpu/acrn.c
index 0b2c039..7f0694b 100644
--- a/kernel/arch/x86/kernel/cpu/acrn.c
+++ b/kernel/arch/x86/kernel/cpu/acrn.c
@@ -35,7 +35,8 @@
 
 static void (*acrn_intr_handler)(void);
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_acrn_hv_callback)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,
+				 sysvec_acrn_hv_callback)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
diff --git a/kernel/arch/x86/kernel/cpu/mce/amd.c b/kernel/arch/x86/kernel/cpu/mce/amd.c
index 09f7c65..d158d83 100644
--- a/kernel/arch/x86/kernel/cpu/mce/amd.c
+++ b/kernel/arch/x86/kernel/cpu/mce/amd.c
@@ -921,13 +921,18 @@
 	mce_log(&m);
 }
 
+/*
+ * irq_pipeline: Deferred error events have NMI semantics wrt to
+ * pipelining, they can and should be handled immediately out of the
+ * IDT.
+ */
 DEFINE_IDTENTRY_SYSVEC(sysvec_deferred_error)
 {
 	trace_deferred_error_apic_entry(DEFERRED_ERROR_VECTOR);
 	inc_irq_stat(irq_deferred_error_count);
 	deferred_error_int_vector();
 	trace_deferred_error_apic_exit(DEFERRED_ERROR_VECTOR);
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
 
 /*
diff --git a/kernel/arch/x86/kernel/cpu/mce/core.c b/kernel/arch/x86/kernel/cpu/mce/core.c
index 5cf1a02..e18dae3 100644
--- a/kernel/arch/x86/kernel/cpu/mce/core.c
+++ b/kernel/arch/x86/kernel/cpu/mce/core.c
@@ -1473,7 +1473,9 @@
 		/* If this triggers there is no way to recover. Die hard. */
 		BUG_ON(!on_thread_stack() || !user_mode(regs));
 
+		hard_local_irq_enable();
 		queue_task_work(&m, msg, kill_it);
+		hard_local_irq_disable();
 
 	} else {
 		/*
diff --git a/kernel/arch/x86/kernel/cpu/mce/therm_throt.c b/kernel/arch/x86/kernel/cpu/mce/therm_throt.c
index a7cd2d2..115dd0b 100644
--- a/kernel/arch/x86/kernel/cpu/mce/therm_throt.c
+++ b/kernel/arch/x86/kernel/cpu/mce/therm_throt.c
@@ -614,13 +614,17 @@
 
 static void (*smp_thermal_vector)(void) = unexpected_thermal_interrupt;
 
+/*
+ * irq_pipeline: MCE have NMI semantics wrt to pipelining, they can
+ * and should be handled immediately out of the IDT.
+ */
 DEFINE_IDTENTRY_SYSVEC(sysvec_thermal)
 {
 	trace_thermal_apic_entry(THERMAL_APIC_VECTOR);
 	inc_irq_stat(irq_thermal_count);
 	smp_thermal_vector();
 	trace_thermal_apic_exit(THERMAL_APIC_VECTOR);
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
 
 /* Thermal monitoring depends on APIC, ACPI and clock modulation */
diff --git a/kernel/arch/x86/kernel/cpu/mce/threshold.c b/kernel/arch/x86/kernel/cpu/mce/threshold.c
index 6a059a0..a2515dc 100644
--- a/kernel/arch/x86/kernel/cpu/mce/threshold.c
+++ b/kernel/arch/x86/kernel/cpu/mce/threshold.c
@@ -27,5 +27,5 @@
 	inc_irq_stat(irq_threshold_count);
 	mce_threshold_vector();
 	trace_threshold_apic_exit(THRESHOLD_APIC_VECTOR);
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
diff --git a/kernel/arch/x86/kernel/cpu/mshyperv.c b/kernel/arch/x86/kernel/cpu/mshyperv.c
index 021cd06..ffa7ff0 100644
--- a/kernel/arch/x86/kernel/cpu/mshyperv.c
+++ b/kernel/arch/x86/kernel/cpu/mshyperv.c
@@ -41,7 +41,8 @@
 static void (*hv_kexec_handler)(void);
 static void (*hv_crash_handler)(struct pt_regs *regs);
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_callback)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,
+				 sysvec_hyperv_callback)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
@@ -77,7 +78,8 @@
  * Routines to do per-architecture handling of stimer0
  * interrupts when in Direct Mode
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_hyperv_stimer0)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERV_STIMER0_VECTOR,
+				 sysvec_hyperv_stimer0)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
diff --git a/kernel/arch/x86/kernel/cpu/mtrr/generic.c b/kernel/arch/x86/kernel/cpu/mtrr/generic.c
index a29997e..fc15869 100644
--- a/kernel/arch/x86/kernel/cpu/mtrr/generic.c
+++ b/kernel/arch/x86/kernel/cpu/mtrr/generic.c
@@ -450,13 +450,13 @@
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	prepare_set();
 
 	pat_init();
 
 	post_set();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /* Grab all of the MTRR state for this CPU into *state */
@@ -797,7 +797,7 @@
 	unsigned long mask, count;
 	unsigned long flags;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	prepare_set();
 
 	/* Actually set the state */
@@ -807,7 +807,7 @@
 	pat_init();
 
 	post_set();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	/* Use the atomic bitops to update the global mask */
 	for (count = 0; count < sizeof(mask) * 8; ++count) {
@@ -836,7 +836,7 @@
 
 	vr = &mtrr_state.var_ranges[reg];
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	prepare_set();
 
 	if (size == 0) {
@@ -857,7 +857,7 @@
 	}
 
 	post_set();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 int generic_validate_add_page(unsigned long base, unsigned long size,
diff --git a/kernel/arch/x86/kernel/dumpstack.c b/kernel/arch/x86/kernel/dumpstack.c
index 97aa900..0fe58ed 100644
--- a/kernel/arch/x86/kernel/dumpstack.c
+++ b/kernel/arch/x86/kernel/dumpstack.c
@@ -7,6 +7,7 @@
 #include <linux/uaccess.h>
 #include <linux/utsname.h>
 #include <linux/hardirq.h>
+#include <linux/irq_pipeline.h>
 #include <linux/kdebug.h>
 #include <linux/module.h>
 #include <linux/ptrace.h>
@@ -335,7 +336,7 @@
 	oops_enter();
 
 	/* racy, but better than risking deadlock. */
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	cpu = smp_processor_id();
 	if (!arch_spin_trylock(&die_lock)) {
 		if (cpu == die_owner)
@@ -365,7 +366,7 @@
 	if (!die_nest_count)
 		/* Nest count reaches zero, release the lock. */
 		arch_spin_unlock(&die_lock);
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 	oops_exit();
 
 	/* Executive summary in case the oops scrolled away */
@@ -394,6 +395,8 @@
 {
 	const char *pr = "";
 
+	irq_pipeline_oops();
+
 	/* Save the regs of the first oops for the executive summary later. */
 	if (!die_counter)
 		exec_summary_regs = *regs;
@@ -402,13 +405,14 @@
 		pr = IS_ENABLED(CONFIG_PREEMPT_RT) ? " PREEMPT_RT" : " PREEMPT";
 
 	printk(KERN_DEFAULT
-	       "%s: %04lx [#%d]%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
+	       "%s: %04lx [#%d]%s%s%s%s%s%s\n", str, err & 0xffff, ++die_counter,
 	       pr,
 	       IS_ENABLED(CONFIG_SMP)     ? " SMP"             : "",
 	       debug_pagealloc_enabled()  ? " DEBUG_PAGEALLOC" : "",
 	       IS_ENABLED(CONFIG_KASAN)   ? " KASAN"           : "",
 	       IS_ENABLED(CONFIG_PAGE_TABLE_ISOLATION) ?
-	       (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "");
+	       (boot_cpu_has(X86_FEATURE_PTI) ? " PTI" : " NOPTI") : "",
+	       irqs_pipelined()           ? " IRQ_PIPELINE"    : "");
 }
 NOKPROBE_SYMBOL(__die_header);
 
diff --git a/kernel/arch/x86/kernel/fpu/core.c b/kernel/arch/x86/kernel/fpu/core.c
index 571220a..49e2853 100644
--- a/kernel/arch/x86/kernel/fpu/core.c
+++ b/kernel/arch/x86/kernel/fpu/core.c
@@ -15,6 +15,7 @@
 
 #include <linux/hardirq.h>
 #include <linux/pkeys.h>
+#include <linux/cpuhotplug.h>
 
 #define CREATE_TRACE_POINTS
 #include <asm/trace/fpu.h>
@@ -76,9 +77,10 @@
  */
 bool irq_fpu_usable(void)
 {
-	return !in_interrupt() ||
-		interrupted_user_mode() ||
-		interrupted_kernel_fpu_idle();
+	return running_inband() &&
+		(!in_interrupt() ||
+			interrupted_user_mode() ||
+			interrupted_kernel_fpu_idle());
 }
 EXPORT_SYMBOL(irq_fpu_usable);
 
@@ -123,10 +125,14 @@
 
 void kernel_fpu_begin_mask(unsigned int kfpu_mask)
 {
+	unsigned long flags;
+
 	preempt_disable();
 
 	WARN_ON_FPU(!irq_fpu_usable());
 	WARN_ON_FPU(this_cpu_read(in_kernel_fpu));
+
+	flags = hard_cond_local_irq_save();
 
 	this_cpu_write(in_kernel_fpu, true);
 
@@ -139,6 +145,7 @@
 		 */
 		copy_fpregs_to_fpstate(&current->thread.fpu);
 	}
+
 	__cpu_invalidate_fpregs_state();
 
 	/* Put sane initial values into the control registers. */
@@ -147,6 +154,8 @@
 
 	if (unlikely(kfpu_mask & KFPU_387) && boot_cpu_has(X86_FEATURE_FPU))
 		asm volatile ("fninit");
+
+	hard_cond_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(kernel_fpu_begin_mask);
 
@@ -166,9 +175,11 @@
  */
 void fpu__save(struct fpu *fpu)
 {
+	unsigned long flags;
+
 	WARN_ON_FPU(fpu != &current->thread.fpu);
 
-	fpregs_lock();
+	flags = fpregs_lock();
 	trace_x86_fpu_before_save(fpu);
 
 	if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
@@ -178,7 +189,7 @@
 	}
 
 	trace_x86_fpu_after_save(fpu);
-	fpregs_unlock();
+	fpregs_unlock(flags);
 }
 
 /*
@@ -214,6 +225,7 @@
 {
 	struct fpu *dst_fpu = &dst->thread.fpu;
 	struct fpu *src_fpu = &src->thread.fpu;
+	unsigned long flags;
 
 	dst_fpu->last_cpu = -1;
 
@@ -236,14 +248,14 @@
 	 * ( The function 'fails' in the FNSAVE case, which destroys
 	 *   register contents so we have to load them back. )
 	 */
-	fpregs_lock();
+	flags = fpregs_lock();
 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
 		memcpy(&dst_fpu->state, &src_fpu->state, fpu_kernel_xstate_size);
 
 	else if (!copy_fpregs_to_fpstate(dst_fpu))
 		copy_kernel_to_fpregs(&dst_fpu->state);
 
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	set_tsk_thread_flag(dst, TIF_NEED_FPU_LOAD);
 
@@ -321,7 +333,9 @@
  */
 void fpu__drop(struct fpu *fpu)
 {
-	preempt_disable();
+	unsigned long flags;
+
+	flags = hard_preempt_disable();
 
 	if (fpu == &current->thread.fpu) {
 		/* Ignore delayed exceptions from user space */
@@ -333,7 +347,7 @@
 
 	trace_x86_fpu_dropped(fpu);
 
-	preempt_enable();
+	hard_preempt_enable(flags);
 }
 
 /*
@@ -361,15 +375,19 @@
  */
 static void fpu__clear(struct fpu *fpu, bool user_only)
 {
+	unsigned long flags;
+
 	WARN_ON_FPU(fpu != &current->thread.fpu);
 
 	if (!static_cpu_has(X86_FEATURE_FPU)) {
+		flags = hard_cond_local_irq_save();
 		fpu__drop(fpu);
 		fpu__initialize(fpu);
+		hard_cond_local_irq_restore(flags);
 		return;
 	}
 
-	fpregs_lock();
+	flags = fpregs_lock();
 
 	if (user_only) {
 		if (!fpregs_state_valid(fpu, smp_processor_id()) &&
@@ -382,7 +400,7 @@
 	}
 
 	fpregs_mark_activate();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 }
 
 void fpu__clear_user_states(struct fpu *fpu)
@@ -400,10 +418,14 @@
  */
 void switch_fpu_return(void)
 {
+	unsigned long flags;
+
 	if (!static_cpu_has(X86_FEATURE_FPU))
 		return;
 
+	flags = hard_cond_local_irq_save();
 	__fpregs_load_activate();
+	hard_cond_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(switch_fpu_return);
 
@@ -503,3 +525,70 @@
 	 */
 	return 0;
 }
+
+#ifdef CONFIG_DOVETAIL
+
+/*
+ * Holds the in-kernel fpu state when preempted by a task running on
+ * the out-of-band stage.
+ */
+static DEFINE_PER_CPU(struct fpu *, in_kernel_fpstate);
+
+static int fpu__init_kernel_fpstate(unsigned int cpu)
+{
+	struct fpu *fpu;
+
+	fpu = kzalloc(sizeof(*fpu) + fpu_kernel_xstate_size, GFP_KERNEL);
+	if (fpu == NULL)
+		return -ENOMEM;
+
+	this_cpu_write(in_kernel_fpstate, fpu);
+	fpstate_init(&fpu->state);
+
+	return 0;
+}
+
+static int fpu__drop_kernel_fpstate(unsigned int cpu)
+{
+	struct fpu *fpu = this_cpu_read(in_kernel_fpstate);
+
+	kfree(fpu);
+
+	return 0;
+}
+
+void fpu__suspend_inband(void)
+{
+	struct fpu *kfpu = this_cpu_read(in_kernel_fpstate);
+	struct task_struct *tsk = current;
+
+	if (kernel_fpu_disabled()) {
+		copy_fpregs_to_fpstate(kfpu);
+		__cpu_invalidate_fpregs_state();
+		oob_fpu_set_preempt(&tsk->thread.fpu);
+	}
+}
+
+void fpu__resume_inband(void)
+{
+	struct fpu *kfpu = this_cpu_read(in_kernel_fpstate);
+	struct task_struct *tsk = current;
+
+	if (oob_fpu_preempted(&tsk->thread.fpu)) {
+		copy_kernel_to_fpregs(&kfpu->state);
+		__cpu_invalidate_fpregs_state();
+		oob_fpu_clear_preempt(&tsk->thread.fpu);
+	} else if (!(tsk->flags & PF_KTHREAD) &&
+		test_thread_flag(TIF_NEED_FPU_LOAD))
+		switch_fpu_return();
+}
+
+static void __init fpu__init_dovetail(void)
+{
+	cpuhp_setup_state(CPUHP_AP_ONLINE_DYN,
+			"platform/x86/dovetail:online",
+			fpu__init_kernel_fpstate, fpu__drop_kernel_fpstate);
+}
+core_initcall(fpu__init_dovetail);
+
+#endif
diff --git a/kernel/arch/x86/kernel/fpu/signal.c b/kernel/arch/x86/kernel/fpu/signal.c
index b7b92cd..20d04a3 100644
--- a/kernel/arch/x86/kernel/fpu/signal.c
+++ b/kernel/arch/x86/kernel/fpu/signal.c
@@ -61,11 +61,12 @@
 		struct xregs_state *xsave = &tsk->thread.fpu.state.xsave;
 		struct user_i387_ia32_struct env;
 		struct _fpstate_32 __user *fp = buf;
+		unsigned long flags;
 
-		fpregs_lock();
+		flags = fpregs_lock();
 		if (!test_thread_flag(TIF_NEED_FPU_LOAD))
 			copy_fxregs_to_kernel(&tsk->thread.fpu);
-		fpregs_unlock();
+		fpregs_unlock(flags);
 
 		convert_from_fxsr(&env, tsk);
 
@@ -165,6 +166,7 @@
 {
 	struct task_struct *tsk = current;
 	int ia32_fxstate = (buf != buf_fx);
+	unsigned long flags;
 	int ret;
 
 	ia32_fxstate &= (IS_ENABLED(CONFIG_X86_32) ||
@@ -186,14 +188,14 @@
 	 * userland's stack frame which will likely succeed. If it does not,
 	 * resolve the fault in the user memory and try again.
 	 */
-	fpregs_lock();
+	flags = fpregs_lock();
 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
 		__fpregs_load_activate();
 
 	pagefault_disable();
 	ret = copy_fpregs_to_sigframe(buf_fx);
 	pagefault_enable();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	if (ret) {
 		if (!fault_in_pages_writeable(buf_fx, fpu_user_xstate_size))
@@ -286,6 +288,7 @@
 	struct fpu *fpu = &tsk->thread.fpu;
 	struct user_i387_ia32_struct env;
 	u64 user_xfeatures = 0;
+	unsigned long flags;
 	int fx_only = 0;
 	int ret = 0;
 
@@ -337,7 +340,7 @@
 		 * going through the kernel buffer with the enabled pagefault
 		 * handler.
 		 */
-		fpregs_lock();
+		flags = fpregs_lock();
 		pagefault_disable();
 		ret = copy_user_to_fpregs_zeroing(buf_fx, user_xfeatures, fx_only);
 		pagefault_enable();
@@ -360,7 +363,7 @@
 				copy_kernel_to_xregs(&fpu->state.xsave,
 						     xfeatures_mask_supervisor());
 			fpregs_mark_activate();
-			fpregs_unlock();
+			fpregs_unlock(flags);
 			return 0;
 		}
 
@@ -382,7 +385,7 @@
 		if (test_thread_flag(TIF_NEED_FPU_LOAD))
 			__cpu_invalidate_fpregs_state();
 
-		fpregs_unlock();
+		fpregs_unlock(flags);
 	} else {
 		/*
 		 * For 32-bit frames with fxstate, copy the fxstate so it can
@@ -400,7 +403,7 @@
 	 * to be loaded again on return to userland (overriding last_cpu avoids
 	 * the optimisation).
 	 */
-	fpregs_lock();
+	flags = fpregs_lock();
 
 	if (!test_thread_flag(TIF_NEED_FPU_LOAD)) {
 
@@ -413,7 +416,7 @@
 		set_thread_flag(TIF_NEED_FPU_LOAD);
 	}
 	__fpu_invalidate_fpregs_state(fpu);
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	if (use_xsave() && !fx_only) {
 		u64 init_bv = xfeatures_mask_user() & ~user_xfeatures;
@@ -425,7 +428,7 @@
 		sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
 					      fx_only);
 
-		fpregs_lock();
+		flags = fpregs_lock();
 		if (unlikely(init_bv))
 			copy_kernel_to_xregs(&init_fpstate.xsave, init_bv);
 
@@ -446,7 +449,7 @@
 		sanitize_restored_user_xstate(&fpu->state, envp, user_xfeatures,
 					      fx_only);
 
-		fpregs_lock();
+		flags = fpregs_lock();
 		if (use_xsave()) {
 			u64 init_bv;
 
@@ -460,14 +463,14 @@
 		if (ret)
 			goto out;
 
-		fpregs_lock();
+		flags = fpregs_lock();
 		ret = copy_kernel_to_fregs_err(&fpu->state.fsave);
 	}
 	if (!ret)
 		fpregs_mark_activate();
 	else
 		fpregs_deactivate(fpu);
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 out:
 	if (ret)
diff --git a/kernel/arch/x86/kernel/hpet.c b/kernel/arch/x86/kernel/hpet.c
index 574df24..f44da6c 100644
--- a/kernel/arch/x86/kernel/hpet.c
+++ b/kernel/arch/x86/kernel/hpet.c
@@ -406,7 +406,7 @@
 	evt->set_next_event	= hpet_clkevt_set_next_event;
 	evt->set_state_shutdown	= hpet_clkevt_set_state_shutdown;
 
-	evt->features = CLOCK_EVT_FEAT_ONESHOT;
+	evt->features = CLOCK_EVT_FEAT_ONESHOT|CLOCK_EVT_FEAT_PIPELINE;
 	if (hc->boot_cfg & HPET_TN_PERIODIC) {
 		evt->features		|= CLOCK_EVT_FEAT_PERIODIC;
 		evt->set_state_periodic	= hpet_clkevt_set_state_periodic;
@@ -519,7 +519,7 @@
 		return IRQ_HANDLED;
 	}
 
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 	return IRQ_HANDLED;
 }
 
@@ -702,7 +702,7 @@
 	if (arch_spin_is_locked(&old.lock))
 		goto contended;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	if (arch_spin_trylock(&hpet.lock)) {
 		new.value = hpet_readl(HPET_COUNTER);
 		/*
@@ -710,10 +710,10 @@
 		 */
 		WRITE_ONCE(hpet.value, new.value);
 		arch_spin_unlock(&hpet.lock);
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return (u64)new.value;
 	}
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 contended:
 	/*
diff --git a/kernel/arch/x86/kernel/i8259.c b/kernel/arch/x86/kernel/i8259.c
index 282b4ee..6abdbd0 100644
--- a/kernel/arch/x86/kernel/i8259.c
+++ b/kernel/arch/x86/kernel/i8259.c
@@ -33,7 +33,7 @@
 static void init_8259A(int auto_eoi);
 
 static int i8259A_auto_eoi;
-DEFINE_RAW_SPINLOCK(i8259A_lock);
+DEFINE_HARD_SPINLOCK(i8259A_lock);
 
 /*
  * 8259A PIC functions to handle ISA devices:
@@ -227,6 +227,7 @@
 	.irq_disable	= disable_8259A_irq,
 	.irq_unmask	= enable_8259A_irq,
 	.irq_mask_ack	= mask_and_ack_8259A,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static char irq_trigger[2];
diff --git a/kernel/arch/x86/kernel/idt.c b/kernel/arch/x86/kernel/idt.c
index ee1a283..fb4333d 100644
--- a/kernel/arch/x86/kernel/idt.c
+++ b/kernel/arch/x86/kernel/idt.c
@@ -117,6 +117,10 @@
 	INTG(CALL_FUNCTION_SINGLE_VECTOR,	asm_sysvec_call_function_single),
 	INTG(IRQ_MOVE_CLEANUP_VECTOR,		asm_sysvec_irq_move_cleanup),
 	INTG(REBOOT_VECTOR,			asm_sysvec_reboot),
+#ifdef CONFIG_IRQ_PIPELINE
+	INTG(RESCHEDULE_OOB_VECTOR,		asm_sysvec_reschedule_oob_ipi),
+	INTG(TIMER_OOB_VECTOR,			asm_sysvec_timer_oob_ipi),
+#endif
 #endif
 
 #ifdef CONFIG_X86_THERMAL_VECTOR
diff --git a/kernel/arch/x86/kernel/irq.c b/kernel/arch/x86/kernel/irq.c
index ce904c8..753e4d6 100644
--- a/kernel/arch/x86/kernel/irq.c
+++ b/kernel/arch/x86/kernel/irq.c
@@ -4,6 +4,7 @@
  */
 #include <linux/cpu.h>
 #include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
 #include <linux/kernel_stat.h>
 #include <linux/of.h>
 #include <linux/seq_file.h>
@@ -48,7 +49,7 @@
 	 * completely.
 	 * But only ack when the APIC is enabled -AK
 	 */
-	ack_APIC_irq();
+	__ack_APIC_irq();
 }
 
 #define irq_stats(x)		(&per_cpu(irq_stat, x))
@@ -235,8 +236,11 @@
 /*
  * common_interrupt() handles all normal device IRQ's (the special SMP
  * cross-CPU interrupts have their own entry points).
+ *
+ * Compiled out if CONFIG_IRQ_PIPELINE is enabled, replaced by
+ * arch_handle_irq().
  */
-DEFINE_IDTENTRY_IRQ(common_interrupt)
+DEFINE_IDTENTRY_IRQ_PIPELINED(common_interrupt)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 	struct irq_desc *desc;
@@ -268,7 +272,8 @@
 /*
  * Handler for X86_PLATFORM_IPI_VECTOR.
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_x86_platform_ipi)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(X86_PLATFORM_IPI_VECTOR,
+				 sysvec_x86_platform_ipi)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
@@ -300,7 +305,8 @@
 /*
  * Handler for POSTED_INTERRUPT_VECTOR.
  */
-DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_ipi)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(POSTED_INTR_VECTOR,
+					sysvec_kvm_posted_intr_ipi)
 {
 	ack_APIC_irq();
 	inc_irq_stat(kvm_posted_intr_ipis);
@@ -309,7 +315,8 @@
 /*
  * Handler for POSTED_INTERRUPT_WAKEUP_VECTOR.
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_kvm_posted_intr_wakeup_ipi)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(POSTED_INTR_WAKEUP_VECTOR,
+				 sysvec_kvm_posted_intr_wakeup_ipi)
 {
 	ack_APIC_irq();
 	inc_irq_stat(kvm_posted_intr_wakeup_ipis);
@@ -319,7 +326,8 @@
 /*
  * Handler for POSTED_INTERRUPT_NESTED_VECTOR.
  */
-DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_kvm_posted_intr_nested_ipi)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(POSTED_INTR_NESTED_VECTOR,
+					sysvec_kvm_posted_intr_nested_ipi)
 {
 	ack_APIC_irq();
 	inc_irq_stat(kvm_posted_intr_nested_ipis);
diff --git a/kernel/arch/x86/kernel/irq_pipeline.c b/kernel/arch/x86/kernel/irq_pipeline.c
new file mode 100644
index 0000000..725a326
--- /dev/null
+++ b/kernel/arch/x86/kernel/irq_pipeline.c
@@ -0,0 +1,387 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/kernel.h>
+#include <linux/smp.h>
+#include <linux/irq.h>
+#include <linux/irq_pipeline.h>
+#include <asm/irqdomain.h>
+#include <asm/apic.h>
+#include <asm/traps.h>
+#include <asm/irq_work.h>
+#include <asm/mshyperv.h>
+#include <asm/idtentry.h>
+
+static struct irq_domain *sipic_domain;
+
+static void sipic_irq_noop(struct irq_data *data) { }
+
+static unsigned int sipic_irq_noop_ret(struct irq_data *data)
+{
+	return 0;
+}
+
+static struct irq_chip sipic_chip = {
+	.name		= "SIPIC",
+	.irq_startup	= sipic_irq_noop_ret,
+	.irq_shutdown	= sipic_irq_noop,
+	.irq_enable	= sipic_irq_noop,
+	.irq_disable	= sipic_irq_noop,
+	.flags		= IRQCHIP_PIPELINE_SAFE | IRQCHIP_SKIP_SET_WAKE,
+};
+
+void handle_apic_irq(struct irq_desc *desc)
+{
+	if (WARN_ON_ONCE(irq_pipeline_debug() && !on_pipeline_entry()))
+		return;
+
+	/*
+	 * MCE events are non-maskable therefore their in-band
+	 * handlers have to be oob-compatible by construction. Those
+	 * handlers run immediately out of the IDT for this reason as
+	 * well. We won't see them here since they are not routed via
+	 * arch_handle_irq() -> generic_pipeline_irq().
+	 *
+	 * All we need to do at this stage is to acknowledge other
+	 * APIC events, then pipeline the corresponding interrupt from
+	 * our synthetic controller chip (SIPIC).
+	 */
+	__ack_APIC_irq();
+
+	handle_oob_irq(desc);
+}
+
+void irq_send_oob_ipi(unsigned int ipi,
+		const struct cpumask *cpumask)
+{
+	apic->send_IPI_mask_allbutself(cpumask,	apicm_irq_vector(ipi));
+}
+EXPORT_SYMBOL_GPL(irq_send_oob_ipi);
+
+static void do_sysvec_inband(struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct pt_regs *regs = get_irq_regs();
+	int vector = apicm_irq_vector(irq);
+
+	/*
+	 * This code only sees pipelined sysvec events tagged with
+	 * DEFINE_IDTENTRY_SYSVEC_PIPELINED:
+	 *
+	 * 	arch_handle_irq(irq)
+	 *		generic_pipeline_irq(irq)
+	 *			handle_apic_irq(irq)
+	 *				handle_oob_irq(irq)
+	 *				[...irq_post_inband...]
+	 *
+	 *      arch_do_IRQ_pipelined(desc)
+	 *      <switch_to_irqstack>
+	 *                |
+	 *                v
+	 *	do_sysvec_inband(desc)
+	 *
+	 * System vectors which are still tagged as
+	 * DEFINE_IDTENTRY_SYSVEC/DEFINE_IDTENTRY_SYSVEC_SIMPLE are
+	 * directly dispatched out of the IDT, assuming their handler
+	 * is oob-safe (like NMI handlers) therefore never reach this
+	 * in-band stage handler.
+	 */
+
+	switch (vector) {
+#ifdef CONFIG_SMP
+	case RESCHEDULE_VECTOR:
+		__sysvec_reschedule_ipi(regs);
+		break;
+	case CALL_FUNCTION_VECTOR:
+		__sysvec_call_function(regs);
+		break;
+	case CALL_FUNCTION_SINGLE_VECTOR:
+		__sysvec_call_function_single(regs);
+		break;
+	case REBOOT_VECTOR:
+		__sysvec_reboot(regs);
+		break;
+#endif
+	case X86_PLATFORM_IPI_VECTOR:
+		__sysvec_x86_platform_ipi(regs);
+		break;
+	case IRQ_WORK_VECTOR:
+		__sysvec_irq_work(regs);
+		break;
+#ifdef CONFIG_HAVE_KVM
+	case POSTED_INTR_VECTOR:
+		__sysvec_kvm_posted_intr_ipi(regs);
+		break;
+	case POSTED_INTR_WAKEUP_VECTOR:
+		__sysvec_kvm_posted_intr_wakeup_ipi(regs);
+		break;
+	case POSTED_INTR_NESTED_VECTOR:
+		__sysvec_kvm_posted_intr_nested_ipi(regs);
+		break;
+#endif
+#ifdef CONFIG_HYPERV
+	case HYPERVISOR_CALLBACK_VECTOR:
+		__sysvec_hyperv_callback(regs);
+		break;
+	case HYPERV_REENLIGHTENMENT_VECTOR:
+		__sysvec_hyperv_reenlightenment(regs);
+		break;
+	case HYPERV_STIMER0_VECTOR:
+		__sysvec_hyperv_stimer0(regs);
+		break;
+#endif
+#ifdef CONFIG_ACRN_GUEST
+	case HYPERVISOR_CALLBACK_VECTOR:
+		__sysvec_acrn_hv_callback(regs);
+		break;
+#endif
+#ifdef CONFIG_XEN_PVHVM
+	case HYPERVISOR_CALLBACK_VECTOR:
+		__sysvec_xen_hvm_callback(regs);
+		break;
+#endif
+	case LOCAL_TIMER_VECTOR:
+		__sysvec_apic_timer_interrupt(regs);
+		break;
+	default:
+		printk_once(KERN_ERR "irq_pipeline: unexpected event"
+			" on vector #%.2x (irq=%u)", vector, irq);
+	}
+}
+
+static irqentry_state_t pipeline_enter_rcu(void)
+{
+	irqentry_state_t state = {
+		.exit_rcu = false,
+		.stage_info = IRQENTRY_INBAND_UNSTALLED,
+	};
+
+	if (!IS_ENABLED(CONFIG_TINY_RCU) && is_idle_task(current)) {
+		rcu_irq_enter();
+		state.exit_rcu = true;
+	} else {
+		rcu_irq_enter_check_tick();
+	}
+
+	return state;
+}
+
+static void pipeline_exit_rcu(irqentry_state_t state)
+{
+	if (state.exit_rcu)
+		rcu_irq_exit();
+}
+
+void arch_do_IRQ_pipelined(struct irq_desc *desc)
+{
+	struct pt_regs *regs = raw_cpu_ptr(&irq_pipeline.tick_regs);
+	struct pt_regs *old_regs = set_irq_regs(regs);
+	irqentry_state_t state;
+
+	/* Emulate a kernel entry. */
+	state = pipeline_enter_rcu();
+	irq_enter_rcu();
+
+	if (desc->irq_data.domain == sipic_domain)
+		run_irq_on_irqstack_cond(do_sysvec_inband, desc, regs);
+	else
+		run_irq_on_irqstack_cond(desc->handle_irq, desc, regs);
+
+	irq_exit_rcu();
+	pipeline_exit_rcu(state);
+
+	set_irq_regs(old_regs);
+}
+
+void arch_handle_irq(struct pt_regs *regs, u8 vector, bool irq_movable)
+{
+	struct irq_desc *desc;
+	unsigned int irq;
+
+	if (vector >= FIRST_SYSTEM_VECTOR) {
+		irq = apicm_vector_irq(vector);
+	} else {
+		desc = __this_cpu_read(vector_irq[vector]);
+		if (unlikely(IS_ERR_OR_NULL(desc))) {
+			__ack_APIC_irq();
+
+			if (desc == VECTOR_UNUSED) {
+				pr_emerg_ratelimited("%s: %d.%u No irq handler for vector\n",
+						__func__, smp_processor_id(),
+						vector);
+			} else {
+				__this_cpu_write(vector_irq[vector], VECTOR_UNUSED);
+			}
+			return;
+		}
+		if (irqd_is_setaffinity_pending(&desc->irq_data)) {
+			raw_spin_lock(&desc->lock);
+			if (irq_movable)
+				irqd_clr_move_blocked(&desc->irq_data);
+			else
+				irqd_set_move_blocked(&desc->irq_data);
+			raw_spin_unlock(&desc->lock);
+		}
+		irq = irq_desc_get_irq(desc);
+	}
+
+	generic_pipeline_irq(irq, regs);
+}
+
+noinstr void arch_pipeline_entry(struct pt_regs *regs, u8 vector)
+{
+	struct irq_stage_data *prevd;
+	irqentry_state_t state;
+
+	/*
+	 * The tricky one: we distinguish the following cases:
+	 *
+	 * [1] entry from oob context, either kernel or user code was
+	 * preempted by the IRQ, the in-band (virtual) interrupt state
+	 * is 'undefined' (could be either stalled/unstalled, it is
+	 * not relevant).
+	 *
+	 * [2] entry from in-band context while the stage is stalled,
+	 * which means that some kernel code was preempted by the IRQ
+	 * since in-band user code cannot run with interrupts
+	 * (virtually) disabled.
+	 *
+	 * [3] entry from in-band context while the stage is
+	 * unstalled: the common case for IRQ entry. Kernel or user
+	 * code may have been preempted, we handle the event
+	 * identically.
+	 *
+	 * [1] and [2] are processed almost the same way, except for
+	 * one key aspect: the potential stage demotion of the
+	 * preempted task which originally entered [1] on the oob
+	 * stage, then left it for the in-band stage as a result of
+	 * handling the IRQ (such demotion normally happens during
+	 * handle_irq_pipelined_finish() if required). In this
+	 * particular case, we want to run the common IRQ epilogue
+	 * code before returning to user mode, so that all pending
+	 * in-band work (_TIF_WORK_*) is carried out for the task
+	 * which is about to exit kernel mode.
+	 *
+	 * If the task runs in-band at the exit point and a user mode
+	 * context was preempted, then case [2] is excluded by
+	 * definition so we know for sure that we just observed a
+	 * stage demotion, therefore we have to run the work loop by
+	 * calling irqentry_exit_to_user_mode().
+	 */
+	if (unlikely(running_oob() || irqs_disabled())) {
+		instrumentation_begin();
+		prevd = handle_irq_pipelined_prepare(regs);
+		arch_handle_irq(regs, vector, false);
+		kvm_set_cpu_l1tf_flush_l1d();
+		handle_irq_pipelined_finish(prevd, regs);
+		if (running_inband() && user_mode(regs)) {
+			stall_inband_nocheck();
+			irqentry_exit_to_user_mode(regs);
+		}
+		instrumentation_end();
+		return;
+	}
+
+	/* In-band on entry, accepting interrupts. */
+	state = irqentry_enter(regs);
+	instrumentation_begin();
+	/* Prep for handling, switching oob. */
+	prevd = handle_irq_pipelined_prepare(regs);
+	arch_handle_irq(regs, vector, true);
+	kvm_set_cpu_l1tf_flush_l1d();
+	/* irqentry_enter() stalled the in-band stage. */
+	trace_hardirqs_on();
+	unstall_inband_nocheck();
+	handle_irq_pipelined_finish(prevd, regs);
+	stall_inband_nocheck();
+	trace_hardirqs_off();
+	instrumentation_end();
+	irqentry_exit(regs, state);
+}
+
+static int sipic_irq_map(struct irq_domain *d, unsigned int irq,
+			irq_hw_number_t hwirq)
+{
+	irq_set_percpu_devid(irq);
+	irq_set_chip_and_handler(irq, &sipic_chip, handle_apic_irq);
+
+	return 0;
+}
+
+static struct irq_domain_ops sipic_domain_ops = {
+	.map	= sipic_irq_map,
+};
+
+static void create_x86_apic_domain(void)
+{
+	sipic_domain = irq_domain_add_simple(NULL, NR_APIC_VECTORS,
+					     FIRST_SYSTEM_IRQ,
+					     &sipic_domain_ops, NULL);
+}
+
+#ifdef CONFIG_SMP
+
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(RESCHEDULE_OOB_VECTOR,
+				 sysvec_reschedule_oob_ipi)
+{ /* In-band handler is unused. */ }
+
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(TIMER_OOB_VECTOR,
+				 sysvec_timer_oob_ipi)
+{ /* In-band handler is unused. */ }
+
+void handle_irq_move_cleanup(struct irq_desc *desc)
+{
+	if (on_pipeline_entry()) {
+		/* 1. on receipt from hardware. */
+		__ack_APIC_irq();
+		handle_oob_irq(desc);
+	} else {
+		/* 2. in-band delivery. */
+		__sysvec_irq_move_cleanup(NULL);
+	}
+}
+
+static void smp_setup(void)
+{
+	int irq;
+
+	/*
+	 * The IRQ cleanup event must be pipelined to the inband
+	 * stage, so we need a valid IRQ descriptor for it. Since we
+	 * still are in the early boot stage on CPU0, we ask for a 1:1
+	 * mapping between the vector number and IRQ number, to make
+	 * things easier for us later on.
+	 */
+	irq = irq_alloc_desc_at(IRQ_MOVE_CLEANUP_VECTOR, 0);
+	WARN_ON(IRQ_MOVE_CLEANUP_VECTOR != irq);
+	/*
+	 * Set up the vector_irq[] mapping array for the boot CPU,
+	 * other CPUs will copy this entry when their APIC is going
+	 * online (see lapic_online()).
+	 */
+	per_cpu(vector_irq, 0)[irq] = irq_to_desc(irq);
+
+	irq_set_chip_and_handler(irq, &dummy_irq_chip,
+				handle_irq_move_cleanup);
+}
+
+#else
+
+static void smp_setup(void) { }
+
+#endif
+
+void __init arch_irq_pipeline_init(void)
+{
+	/*
+	 * Create an IRQ domain for mapping APIC system interrupts
+	 * (in-band and out-of-band), with fixed sirq numbers starting
+	 * from FIRST_SYSTEM_IRQ. Upon receipt of a system interrupt,
+	 * the corresponding sirq is injected into the pipeline.
+	 */
+	create_x86_apic_domain();
+
+	smp_setup();
+}
diff --git a/kernel/arch/x86/kernel/irq_work.c b/kernel/arch/x86/kernel/irq_work.c
index 890d477..f2c8d14 100644
--- a/kernel/arch/x86/kernel/irq_work.c
+++ b/kernel/arch/x86/kernel/irq_work.c
@@ -14,7 +14,8 @@
 #include <linux/interrupt.h>
 
 #ifdef CONFIG_X86_LOCAL_APIC
-DEFINE_IDTENTRY_SYSVEC(sysvec_irq_work)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(IRQ_WORK_VECTOR,
+				 sysvec_irq_work)
 {
 	ack_APIC_irq();
 	trace_irq_work_entry(IRQ_WORK_VECTOR);
diff --git a/kernel/arch/x86/kernel/kvm.c b/kernel/arch/x86/kernel/kvm.c
index fe9babe..6988375 100644
--- a/kernel/arch/x86/kernel/kvm.c
+++ b/kernel/arch/x86/kernel/kvm.c
@@ -255,12 +255,15 @@
 {
 	u32 flags = kvm_read_and_reset_apf_flags();
 	irqentry_state_t state;
+	unsigned long irqflags;
 
 	if (!flags)
 		return false;
 
 	state = irqentry_enter(regs);
+	oob_trap_notify(X86_TRAP_PF, regs);
 	instrumentation_begin();
+	irqflags = hard_cond_local_irq_save();
 
 	/*
 	 * If the host managed to inject an async #PF into an interrupt
@@ -279,7 +282,9 @@
 		WARN_ONCE(1, "Unexpected async PF flags: %x\n", flags);
 	}
 
+	hard_cond_local_irq_restore(irqflags);
 	instrumentation_end();
+	oob_trap_unwind(X86_TRAP_PF, regs);
 	irqentry_exit(regs, state);
 	return true;
 }
@@ -478,6 +483,9 @@
 
 static void kvm_guest_cpu_offline(bool shutdown)
 {
+	unsigned long flags;
+
+	flags = hard_local_irq_save();
 	kvm_disable_steal_time();
 	if (kvm_para_has_feature(KVM_FEATURE_PV_EOI))
 		wrmsrl(MSR_KVM_PV_EOI_EN, 0);
@@ -485,15 +493,16 @@
 	if (!shutdown)
 		apf_task_wake_all();
 	kvmclock_disable();
+	hard_local_irq_restore(flags);
 }
 
 static int kvm_cpu_online(unsigned int cpu)
 {
 	unsigned long flags;
 
-	local_irq_save(flags);
+	local_irq_save_full(flags);
 	kvm_guest_cpu_init();
-	local_irq_restore(flags);
+	local_irq_restore_full(flags);
 	return 0;
 }
 
@@ -907,7 +916,7 @@
 	if (in_nmi())
 		return;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	if (READ_ONCE(*ptr) != val)
 		goto out;
@@ -923,7 +932,7 @@
 		safe_halt();
 
 out:
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 #ifdef CONFIG_X86_32
diff --git a/kernel/arch/x86/kernel/nmi.c b/kernel/arch/x86/kernel/nmi.c
index 2ef961c..bf0766f 100644
--- a/kernel/arch/x86/kernel/nmi.c
+++ b/kernel/arch/x86/kernel/nmi.c
@@ -473,6 +473,10 @@
 static DEFINE_PER_CPU(unsigned long, nmi_cr2);
 static DEFINE_PER_CPU(unsigned long, nmi_dr7);
 
+/*
+ * IRQ pipeline: fixing up the virtual IRQ state makes no sense on
+ * NMI.
+ */
 DEFINE_IDTENTRY_RAW(exc_nmi)
 {
 	irqentry_state_t irq_state;
diff --git a/kernel/arch/x86/kernel/process.c b/kernel/arch/x86/kernel/process.c
index 5e17c39..a78266b 100644
--- a/kernel/arch/x86/kernel/process.c
+++ b/kernel/arch/x86/kernel/process.c
@@ -598,9 +598,9 @@
 	unsigned long flags;
 
 	/* Forced update. Make sure all relevant TIF flags are different */
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	__speculation_ctrl_update(~tif, tif);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /* Called from seccomp/prctl update */
@@ -700,6 +700,9 @@
 
 /*
  * We use this if we don't have any better idle routine..
+ *
+ * IRQ pipeline: safe_halt() returns with hard irqs on, caller does
+ * not need to force enable.
  */
 void __cpuidle default_idle(void)
 {
@@ -722,7 +725,7 @@
 
 void stop_this_cpu(void *dummy)
 {
-	local_irq_disable();
+	hard_local_irq_disable();
 	/*
 	 * Remove this CPU:
 	 */
@@ -822,11 +825,14 @@
 		}
 
 		__monitor((void *)&current_thread_info()->flags, 0, 0);
-		if (!need_resched())
+		if (!need_resched()) {
 			__sti_mwait(0, 0);
-		else
+		} else {
+			hard_cond_local_irq_enable();
 			raw_local_irq_enable();
+		}
 	} else {
+		hard_cond_local_irq_enable();
 		raw_local_irq_enable();
 	}
 	__current_clr_polling();
diff --git a/kernel/arch/x86/kernel/process_64.c b/kernel/arch/x86/kernel/process_64.c
index ad3f82a..65d6171 100644
--- a/kernel/arch/x86/kernel/process_64.c
+++ b/kernel/arch/x86/kernel/process_64.c
@@ -272,9 +272,9 @@
 	unsigned long flags;
 
 	/* Interrupts need to be off for FSGSBASE */
-	local_irq_save(flags);
+	local_irq_save_full(flags);
 	save_fsgs(current);
-	local_irq_restore(flags);
+	local_irq_restore_full(flags);
 }
 #if IS_ENABLED(CONFIG_KVM)
 EXPORT_SYMBOL_GPL(current_save_fsgs);
@@ -410,9 +410,9 @@
 	if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
 		unsigned long flags;
 
-		local_irq_save(flags);
+		local_irq_save_full(flags);
 		gsbase = __rdgsbase_inactive();
-		local_irq_restore(flags);
+		local_irq_restore_full(flags);
 	} else {
 		rdmsrl(MSR_KERNEL_GS_BASE, gsbase);
 	}
@@ -425,9 +425,9 @@
 	if (boot_cpu_has(X86_FEATURE_FSGSBASE)) {
 		unsigned long flags;
 
-		local_irq_save(flags);
+		local_irq_save_full(flags);
 		__wrgsbase_inactive(gsbase);
-		local_irq_restore(flags);
+		local_irq_restore_full(flags);
 	} else {
 		wrmsrl(MSR_KERNEL_GS_BASE, gsbase);
 	}
@@ -537,8 +537,17 @@
 	struct thread_struct *next = &next_p->thread;
 	int cpu = smp_processor_id();
 
+	/*
+	 * Dovetail: Switching context on the out-of-band stage is
+	 * legit, and we may have preempted an in-band (soft)irq
+	 * handler earlier. Since oob handlers never switch stack,
+	 * make sure to restrict the following test to in-band
+	 * callers.
+	 */
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_DEBUG_ENTRY) &&
-		     this_cpu_read(irq_count) != -1);
+		     running_inband() && this_cpu_read(irq_count) != -1);
+
+	WARN_ON_ONCE(dovetail_debug() && !hard_irqs_disabled());
 
 	if (!test_thread_flag(TIF_NEED_FPU_LOAD))
 		switch_fpu_prepare(prev_p, cpu);
@@ -719,6 +728,7 @@
 
 long do_arch_prctl_64(struct task_struct *task, int option, unsigned long arg2)
 {
+	unsigned long flags;
 	int ret = 0;
 
 	switch (option) {
@@ -726,7 +736,7 @@
 		if (unlikely(arg2 >= TASK_SIZE_MAX))
 			return -EPERM;
 
-		preempt_disable();
+		flags = hard_preempt_disable();
 		/*
 		 * ARCH_SET_GS has always overwritten the index
 		 * and the base. Zero is the most sensible value
@@ -747,7 +757,7 @@
 			task->thread.gsindex = 0;
 			x86_gsbase_write_task(task, arg2);
 		}
-		preempt_enable();
+		hard_preempt_enable(flags);
 		break;
 	}
 	case ARCH_SET_FS: {
@@ -758,7 +768,7 @@
 		if (unlikely(arg2 >= TASK_SIZE_MAX))
 			return -EPERM;
 
-		preempt_disable();
+		flags = hard_preempt_disable();
 		/*
 		 * Set the selector to 0 for the same reason
 		 * as %gs above.
@@ -776,7 +786,7 @@
 			task->thread.fsindex = 0;
 			x86_fsbase_write_task(task, arg2);
 		}
-		preempt_enable();
+		hard_preempt_enable(flags);
 		break;
 	}
 	case ARCH_GET_FS: {
diff --git a/kernel/arch/x86/kernel/smp.c b/kernel/arch/x86/kernel/smp.c
index eff4ce3..c4684db 100644
--- a/kernel/arch/x86/kernel/smp.c
+++ b/kernel/arch/x86/kernel/smp.c
@@ -131,7 +131,7 @@
 /*
  * this function calls the 'stop' function on all other CPUs in the system.
  */
-DEFINE_IDTENTRY_SYSVEC(sysvec_reboot)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(REBOOT_VECTOR, sysvec_reboot)
 {
 	ack_APIC_irq();
 	cpu_emergency_vmxoff();
@@ -212,17 +212,18 @@
 			udelay(1);
 	}
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	disable_local_APIC();
 	mcheck_cpu_clear(this_cpu_ptr(&cpu_info));
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /*
  * Reschedule call back. KVM uses this interrupt to force a cpu out of
  * guest mode.
  */
-DEFINE_IDTENTRY_SYSVEC_SIMPLE(sysvec_reschedule_ipi)
+DEFINE_IDTENTRY_SYSVEC_SIMPLE_PIPELINED(RESCHEDULE_VECTOR,
+					sysvec_reschedule_ipi)
 {
 	ack_APIC_irq();
 	trace_reschedule_entry(RESCHEDULE_VECTOR);
@@ -231,7 +232,8 @@
 	trace_reschedule_exit(RESCHEDULE_VECTOR);
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_call_function)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_VECTOR,
+				 sysvec_call_function)
 {
 	ack_APIC_irq();
 	trace_call_function_entry(CALL_FUNCTION_VECTOR);
@@ -240,7 +242,8 @@
 	trace_call_function_exit(CALL_FUNCTION_VECTOR);
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_call_function_single)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(CALL_FUNCTION_SINGLE_VECTOR,
+				 sysvec_call_function_single)
 {
 	ack_APIC_irq();
 	trace_call_function_single_entry(CALL_FUNCTION_SINGLE_VECTOR);
diff --git a/kernel/arch/x86/kernel/smpboot.c b/kernel/arch/x86/kernel/smpboot.c
index e8e5515..e9a7b3c 100644
--- a/kernel/arch/x86/kernel/smpboot.c
+++ b/kernel/arch/x86/kernel/smpboot.c
@@ -258,7 +258,7 @@
 	x86_platform.nmi_init();
 
 	/* enable local interrupts */
-	local_irq_enable();
+	local_irq_enable_full();
 
 	x86_cpuinit.setup_percpu_clockev();
 
@@ -1133,7 +1133,6 @@
 {
 	int apicid = apic->cpu_present_to_apicid(cpu);
 	int cpu0_nmi_registered = 0;
-	unsigned long flags;
 	int err, ret = 0;
 
 	lockdep_assert_irqs_enabled();
@@ -1184,9 +1183,9 @@
 	 * Check TSC synchronization with the AP (keep irqs disabled
 	 * while doing so):
 	 */
-	local_irq_save(flags);
+	local_irq_disable_full();
 	check_tsc_sync_source(cpu);
-	local_irq_restore(flags);
+	local_irq_enable_full();
 
 	while (!cpu_online(cpu)) {
 		cpu_relax();
@@ -1654,7 +1653,7 @@
 	/*
 	 * With physical CPU hotplug, we should halt the cpu
 	 */
-	local_irq_disable();
+	local_irq_disable_full();
 }
 
 /**
diff --git a/kernel/arch/x86/kernel/time.c b/kernel/arch/x86/kernel/time.c
index e42faa7..874fd2e 100644
--- a/kernel/arch/x86/kernel/time.c
+++ b/kernel/arch/x86/kernel/time.c
@@ -54,7 +54,7 @@
  */
 static irqreturn_t timer_interrupt(int irq, void *dev_id)
 {
-	global_clock_event->event_handler(global_clock_event);
+	clockevents_handle_event(global_clock_event);
 	return IRQ_HANDLED;
 }
 
diff --git a/kernel/arch/x86/kernel/traps.c b/kernel/arch/x86/kernel/traps.c
index 2a39a2d..8ac9772 100644
--- a/kernel/arch/x86/kernel/traps.c
+++ b/kernel/arch/x86/kernel/traps.c
@@ -74,14 +74,22 @@
 
 static inline void cond_local_irq_enable(struct pt_regs *regs)
 {
-	if (regs->flags & X86_EFLAGS_IF)
-		local_irq_enable();
+	if (regs->flags & X86_EFLAGS_IF) {
+		if (running_inband())
+			local_irq_enable_full();
+		else
+			hard_local_irq_enable();
+	}
 }
 
 static inline void cond_local_irq_disable(struct pt_regs *regs)
 {
-	if (regs->flags & X86_EFLAGS_IF)
-		local_irq_disable();
+	if (regs->flags & X86_EFLAGS_IF) {
+		if (running_inband())
+			local_irq_disable_full();
+		else
+			hard_local_irq_disable();
+	}
 }
 
 __always_inline int is_valid_bugaddr(unsigned long addr)
@@ -148,6 +156,39 @@
 	}
 }
 
+static __always_inline
+bool mark_trap_entry(int trapnr, struct pt_regs *regs)
+{
+	oob_trap_notify(trapnr, regs);
+
+	if (likely(running_inband())) {
+		hard_cond_local_irq_enable();
+		return true;
+	}
+
+	return false;
+}
+
+static __always_inline
+void mark_trap_exit(int trapnr, struct pt_regs *regs)
+{
+	oob_trap_unwind(trapnr, regs);
+	hard_cond_local_irq_disable();
+}
+
+static __always_inline
+bool mark_trap_entry_raw(int trapnr, struct pt_regs *regs)
+{
+	oob_trap_notify(trapnr, regs);
+	return running_inband();
+}
+
+static __always_inline
+void mark_trap_exit_raw(int trapnr, struct pt_regs *regs)
+{
+	oob_trap_unwind(trapnr, regs);
+}
+
 static void
 do_trap(int trapnr, int signr, char *str, struct pt_regs *regs,
 	long error_code, int sicode, void __user *addr)
@@ -171,12 +212,17 @@
 {
 	RCU_LOCKDEP_WARN(!rcu_is_watching(), "entry code didn't wake RCU");
 
+	if (!mark_trap_entry(trapnr, regs))
+		return;
+
 	if (notify_die(DIE_TRAP, str, regs, error_code, trapnr, signr) !=
 			NOTIFY_STOP) {
 		cond_local_irq_enable(regs);
 		do_trap(trapnr, signr, str, regs, error_code, sicode, addr);
 		cond_local_irq_disable(regs);
 	}
+
+	mark_trap_exit(trapnr, regs);
 }
 
 /*
@@ -230,14 +276,22 @@
 	 * Since we're emulating a CALL with exceptions, restore the interrupt
 	 * state to what it was at the exception site.
 	 */
-	if (regs->flags & X86_EFLAGS_IF)
-		raw_local_irq_enable();
+	if (regs->flags & X86_EFLAGS_IF) {
+		if (running_oob())
+			hard_local_irq_enable();
+		else
+			local_irq_enable_full();
+	}
 	if (report_bug(regs->ip, regs) == BUG_TRAP_TYPE_WARN) {
 		regs->ip += LEN_UD2;
 		handled = true;
 	}
-	if (regs->flags & X86_EFLAGS_IF)
-		raw_local_irq_disable();
+	if (regs->flags & X86_EFLAGS_IF) {
+		if (running_oob())
+			hard_local_irq_disable();
+		else
+			local_irq_disable_full();
+	}
 	instrumentation_end();
 
 	return handled;
@@ -251,15 +305,26 @@
 	 * We use UD2 as a short encoding for 'CALL __WARN', as such
 	 * handle it before exception entry to avoid recursive WARN
 	 * in case exception entry is the one triggering WARNs.
+	 *
+	 * dovetail: handle_bug() may run oob, so we do not downgrade
+	 * in-band upon a failed __WARN assertion since it might have
+	 * tripped in a section of code which would not be happy to
+	 * switch stage. However, anything else should be notified to
+	 * the core, because the kernel execution might be about to
+	 * stop, so we'd need to switch in-band to get any output
+	 * before this happens.
 	 */
 	if (!user_mode(regs) && handle_bug(regs))
 		return;
 
-	state = irqentry_enter(regs);
-	instrumentation_begin();
-	handle_invalid_op(regs);
-	instrumentation_end();
-	irqentry_exit(regs, state);
+	if (mark_trap_entry_raw(X86_TRAP_UD, regs)) {
+		state = irqentry_enter(regs);
+		instrumentation_begin();
+		handle_invalid_op(regs);
+		instrumentation_end();
+		irqentry_exit(regs, state);
+		mark_trap_exit_raw(X86_TRAP_UD, regs);
+	}
 }
 
 DEFINE_IDTENTRY(exc_coproc_segment_overrun)
@@ -290,8 +355,11 @@
 {
 	char *str = "alignment check";
 
-	if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
+	if (!mark_trap_entry(X86_TRAP_AC, regs))
 		return;
+
+	if (notify_die(DIE_TRAP, str, regs, error_code, X86_TRAP_AC, SIGBUS) == NOTIFY_STOP)
+		goto mark_exit;
 
 	if (!user_mode(regs))
 		die("Split lock detected\n", regs, error_code);
@@ -306,6 +374,9 @@
 
 out:
 	local_irq_disable();
+
+mark_exit:
+	mark_trap_exit(X86_TRAP_AC, regs);
 }
 
 #ifdef CONFIG_VMAP_STACK
@@ -341,6 +412,9 @@
  *
  * The 32bit #DF shim provides CR2 already as an argument. On 64bit it needs
  * to be read before doing anything else.
+ *
+ * Dovetail: do not even ask the companion core to try restoring the
+ * in-band stage on double-fault, this would be a lost cause.
  */
 DEFINE_IDTENTRY_DF(exc_double_fault)
 {
@@ -465,9 +539,12 @@
 
 DEFINE_IDTENTRY(exc_bounds)
 {
+	if (!mark_trap_entry(X86_TRAP_BR, regs))
+		return;
+
 	if (notify_die(DIE_TRAP, "bounds", regs, 0,
 			X86_TRAP_BR, SIGSEGV) == NOTIFY_STOP)
-		return;
+		goto out;
 	cond_local_irq_enable(regs);
 
 	if (!user_mode(regs))
@@ -476,6 +553,8 @@
 	do_trap(X86_TRAP_BR, SIGSEGV, "bounds", regs, 0, 0, NULL);
 
 	cond_local_irq_disable(regs);
+out:
+	mark_trap_exit(X86_TRAP_BR, regs);
 }
 
 enum kernel_gp_hint {
@@ -570,9 +649,9 @@
 	}
 
 	if (v8086_mode(regs)) {
-		local_irq_enable();
+		local_irq_enable_full();
 		handle_vm86_fault((struct kernel_vm86_regs *) regs, error_code);
-		local_irq_disable();
+		local_irq_disable_full();
 		return;
 	}
 
@@ -585,9 +664,12 @@
 		tsk->thread.error_code = error_code;
 		tsk->thread.trap_nr = X86_TRAP_GP;
 
+		if (!mark_trap_entry(X86_TRAP_GP, regs))
+			goto exit;
+
 		show_signal(tsk, SIGSEGV, "", desc, regs, error_code);
 		force_sig(SIGSEGV);
-		goto exit;
+		goto mark_exit;
 	}
 
 	if (fixup_exception(regs, X86_TRAP_GP, error_code, 0))
@@ -605,9 +687,12 @@
 	    kprobe_fault_handler(regs, X86_TRAP_GP))
 		goto exit;
 
+	if (!mark_trap_entry(X86_TRAP_GP, regs))
+		goto exit;
+
 	ret = notify_die(DIE_GPF, desc, regs, error_code, X86_TRAP_GP, SIGSEGV);
 	if (ret == NOTIFY_STOP)
-		goto exit;
+		goto mark_exit;
 
 	if (error_code)
 		snprintf(desc, sizeof(desc), "segment-related " GPFSTR);
@@ -629,6 +714,8 @@
 
 	die_addr(desc, regs, error_code, gp_addr);
 
+mark_exit:
+	mark_trap_exit(X86_TRAP_GP, regs);
 exit:
 	cond_local_irq_disable(regs);
 }
@@ -673,6 +760,9 @@
 	if (poke_int3_handler(regs))
 		return;
 
+	if (!mark_trap_entry_raw(X86_TRAP_BP, regs))
+		return;
+
 	/*
 	 * irqentry_enter_from_user_mode() uses static_branch_{,un}likely()
 	 * and therefore can trigger INT3, hence poke_int3_handler() must
@@ -695,6 +785,8 @@
 		instrumentation_end();
 		irqentry_nmi_exit(regs, irq_state);
 	}
+
+	mark_trap_exit_raw(X86_TRAP_BP, regs);
 }
 
 #ifdef CONFIG_X86_64
@@ -999,7 +1091,7 @@
 		goto out;
 
 	/* It's safe to allow irq's after DR6 has been saved */
-	local_irq_enable();
+	local_irq_enable_full();
 
 	if (v8086_mode(regs)) {
 		handle_vm86_trap((struct kernel_vm86_regs *)regs, 0, X86_TRAP_DB);
@@ -1012,7 +1104,7 @@
 		send_sigtrap(regs, 0, get_si_code(dr6));
 
 out_irq:
-	local_irq_disable();
+	local_irq_disable_full();
 out:
 	instrumentation_end();
 	irqentry_exit_to_user_mode(regs);
@@ -1022,13 +1114,19 @@
 /* IST stack entry */
 DEFINE_IDTENTRY_DEBUG(exc_debug)
 {
-	exc_debug_kernel(regs, debug_read_clear_dr6());
+	if (mark_trap_entry_raw(X86_TRAP_DB, regs)) {
+		exc_debug_kernel(regs, debug_read_clear_dr6());
+		mark_trap_exit_raw(X86_TRAP_DB, regs);
+	}
 }
 
 /* User entry, runs on regular task stack */
 DEFINE_IDTENTRY_DEBUG_USER(exc_debug)
 {
-	exc_debug_user(regs, debug_read_clear_dr6());
+	if (mark_trap_entry_raw(X86_TRAP_DB, regs)) {
+		exc_debug_user(regs, debug_read_clear_dr6());
+		mark_trap_exit_raw(X86_TRAP_DB, regs);
+	}
 }
 #else
 /* 32 bit does not have separate entry points. */
@@ -1062,13 +1160,16 @@
 		if (fixup_exception(regs, trapnr, 0, 0))
 			goto exit;
 
+		if (!mark_trap_entry(trapnr, regs))
+			goto exit;
+
 		task->thread.error_code = 0;
 		task->thread.trap_nr = trapnr;
 
 		if (notify_die(DIE_TRAP, str, regs, 0, trapnr,
 			       SIGFPE) != NOTIFY_STOP)
 			die(str, regs, 0);
-		goto exit;
+		goto mark_exit;
 	}
 
 	/*
@@ -1084,8 +1185,13 @@
 	if (!si_code)
 		goto exit;
 
+	if (!mark_trap_entry(trapnr, regs))
+		goto exit;
+
 	force_sig_fault(SIGFPE, si_code,
 			(void __user *)uprobe_get_trap_addr(regs));
+mark_exit:
+	mark_trap_exit(trapnr, regs);
 exit:
 	cond_local_irq_disable(regs);
 }
@@ -1158,7 +1264,10 @@
 		 * to kill the task than getting stuck in a never-ending
 		 * loop of #NM faults.
 		 */
-		die("unexpected #NM exception", regs, 0);
+		if (mark_trap_entry(X86_TRAP_NM, regs)) {
+			die("unexpected #NM exception", regs, 0);
+			mark_trap_exit(X86_TRAP_NM, regs);
+		}
 	}
 }
 
diff --git a/kernel/arch/x86/kernel/tsc.c b/kernel/arch/x86/kernel/tsc.c
index f9f1b45..2b59b30 100644
--- a/kernel/arch/x86/kernel/tsc.c
+++ b/kernel/arch/x86/kernel/tsc.c
@@ -131,7 +131,10 @@
 {
 	unsigned long long ns_now;
 	struct cyc2ns_data data;
+	unsigned long flags;
 	struct cyc2ns *c2n;
+
+	flags = hard_cond_local_irq_save();
 
 	ns_now = cycles_2_ns(tsc_now);
 
@@ -163,6 +166,8 @@
 	c2n->data[0] = data;
 	raw_write_seqcount_latch(&c2n->seq);
 	c2n->data[1] = data;
+
+	hard_cond_local_irq_restore(flags);
 }
 
 static void set_cyc2ns_scale(unsigned long khz, int cpu, unsigned long long tsc_now)
@@ -759,11 +764,11 @@
 		 * calibration, which will take at least 50ms, and
 		 * read the end value.
 		 */
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		tsc1 = tsc_read_refs(&ref1, hpet);
 		tsc_pit_khz = pit_calibrate_tsc(latch, ms, loopmin);
 		tsc2 = tsc_read_refs(&ref2, hpet);
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 
 		/* Pick the lowest PIT TSC calibration so far */
 		tsc_pit_min = min(tsc_pit_min, tsc_pit_khz);
@@ -872,9 +877,9 @@
 	if (!fast_calibrate)
 		fast_calibrate = cpu_khz_from_msr();
 	if (!fast_calibrate) {
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		fast_calibrate = quick_pit_calibrate();
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 	}
 	return fast_calibrate;
 }
@@ -942,7 +947,7 @@
 	if (!sched_clock_stable())
 		return;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	/*
 	 * We're coming out of suspend, there's no concurrency yet; don't
@@ -960,7 +965,7 @@
 		per_cpu(cyc2ns.data[1].cyc2ns_offset, cpu) = offset;
 	}
 
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 #ifdef CONFIG_CPU_FREQ
@@ -1411,6 +1416,8 @@
 	if (boot_cpu_has(X86_FEATURE_NONSTOP_TSC_S3))
 		clocksource_tsc.flags |= CLOCK_SOURCE_SUSPEND_NONSTOP;
 
+	clocksource_tsc.vdso_type = CLOCKSOURCE_VDSO_ARCHITECTED;
+
 	/*
 	 * When TSC frequency is known (retrieved via MSR or CPUID), we skip
 	 * the refined calibration and directly register it as a clocksource.
diff --git a/kernel/arch/x86/kernel/tsc_sync.c b/kernel/arch/x86/kernel/tsc_sync.c
index 9236600..883c0df 100644
--- a/kernel/arch/x86/kernel/tsc_sync.c
+++ b/kernel/arch/x86/kernel/tsc_sync.c
@@ -367,6 +367,8 @@
 		atomic_set(&test_runs, 1);
 	else
 		atomic_set(&test_runs, 3);
+
+	hard_cond_local_irq_disable();
 retry:
 	/*
 	 * Wait for the target to start or to skip the test:
@@ -448,6 +450,8 @@
 	if (unsynchronized_tsc())
 		return;
 
+	hard_cond_local_irq_disable();
+
 	/*
 	 * Store, verify and sanitize the TSC adjust register. If
 	 * successful skip the test.
diff --git a/kernel/arch/x86/kvm/emulate.c b/kernel/arch/x86/kvm/emulate.c
index 63efccc..4301a49 100644
--- a/kernel/arch/x86/kvm/emulate.c
+++ b/kernel/arch/x86/kvm/emulate.c
@@ -1125,23 +1125,27 @@
 	}
 }
 
-static void emulator_get_fpu(void)
+static unsigned long emulator_get_fpu(void)
 {
-	fpregs_lock();
+	unsigned long flags = fpregs_lock();
 
 	fpregs_assert_state_consistent();
 	if (test_thread_flag(TIF_NEED_FPU_LOAD))
 		switch_fpu_return();
+
+	return flags;
 }
 
-static void emulator_put_fpu(void)
+static void emulator_put_fpu(unsigned long flags)
 {
-	fpregs_unlock();
+	fpregs_unlock(flags);
 }
 
 static void read_sse_reg(sse128_t *data, int reg)
 {
-	emulator_get_fpu();
+	unsigned long flags;
+
+	flags = emulator_get_fpu();
 	switch (reg) {
 	case 0: asm("movdqa %%xmm0, %0" : "=m"(*data)); break;
 	case 1: asm("movdqa %%xmm1, %0" : "=m"(*data)); break;
@@ -1163,12 +1167,14 @@
 #endif
 	default: BUG();
 	}
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 }
 
 static void write_sse_reg(sse128_t *data, int reg)
 {
-	emulator_get_fpu();
+	unsigned long flags;
+
+	flags = emulator_get_fpu();
 	switch (reg) {
 	case 0: asm("movdqa %0, %%xmm0" : : "m"(*data)); break;
 	case 1: asm("movdqa %0, %%xmm1" : : "m"(*data)); break;
@@ -1190,12 +1196,14 @@
 #endif
 	default: BUG();
 	}
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 }
 
 static void read_mmx_reg(u64 *data, int reg)
 {
-	emulator_get_fpu();
+	unsigned long flags;
+
+	flags = emulator_get_fpu();
 	switch (reg) {
 	case 0: asm("movq %%mm0, %0" : "=m"(*data)); break;
 	case 1: asm("movq %%mm1, %0" : "=m"(*data)); break;
@@ -1207,12 +1215,14 @@
 	case 7: asm("movq %%mm7, %0" : "=m"(*data)); break;
 	default: BUG();
 	}
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 }
 
 static void write_mmx_reg(u64 *data, int reg)
 {
-	emulator_get_fpu();
+	unsigned long flags;
+
+	flags = emulator_get_fpu();
 	switch (reg) {
 	case 0: asm("movq %0, %%mm0" : : "m"(*data)); break;
 	case 1: asm("movq %0, %%mm1" : : "m"(*data)); break;
@@ -1224,30 +1234,33 @@
 	case 7: asm("movq %0, %%mm7" : : "m"(*data)); break;
 	default: BUG();
 	}
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 }
 
 static int em_fninit(struct x86_emulate_ctxt *ctxt)
 {
+	unsigned long flags;
+
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
 		return emulate_nm(ctxt);
 
-	emulator_get_fpu();
+	flags = emulator_get_fpu();
 	asm volatile("fninit");
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 	return X86EMUL_CONTINUE;
 }
 
 static int em_fnstcw(struct x86_emulate_ctxt *ctxt)
 {
+	unsigned long flags;
 	u16 fcw;
 
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
 		return emulate_nm(ctxt);
 
-	emulator_get_fpu();
+	flags = emulator_get_fpu();
 	asm volatile("fnstcw %0": "+m"(fcw));
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 
 	ctxt->dst.val = fcw;
 
@@ -1256,14 +1269,15 @@
 
 static int em_fnstsw(struct x86_emulate_ctxt *ctxt)
 {
+	unsigned long flags;
 	u16 fsw;
 
 	if (ctxt->ops->get_cr(ctxt, 0) & (X86_CR0_TS | X86_CR0_EM))
 		return emulate_nm(ctxt);
 
-	emulator_get_fpu();
+	flags = emulator_get_fpu();
 	asm volatile("fnstsw %0": "+m"(fsw));
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 
 	ctxt->dst.val = fsw;
 
@@ -4182,17 +4196,18 @@
 static int em_fxsave(struct x86_emulate_ctxt *ctxt)
 {
 	struct fxregs_state fx_state;
+	unsigned long flags;
 	int rc;
 
 	rc = check_fxsr(ctxt);
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	emulator_get_fpu();
+	flags = emulator_get_fpu();
 
 	rc = asm_safe("fxsave %[fx]", , [fx] "+m"(fx_state));
 
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
@@ -4224,6 +4239,7 @@
 static int em_fxrstor(struct x86_emulate_ctxt *ctxt)
 {
 	struct fxregs_state fx_state;
+	unsigned long flags;
 	int rc;
 	size_t size;
 
@@ -4236,7 +4252,7 @@
 	if (rc != X86EMUL_CONTINUE)
 		return rc;
 
-	emulator_get_fpu();
+	flags = emulator_get_fpu();
 
 	if (size < __fxstate_size(16)) {
 		rc = fxregs_fixup(&fx_state, size);
@@ -4253,7 +4269,7 @@
 		rc = asm_safe("fxrstor %[fx]", : [fx] "m"(fx_state));
 
 out:
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 
 	return rc;
 }
@@ -5498,11 +5514,12 @@
 
 static int flush_pending_x87_faults(struct x86_emulate_ctxt *ctxt)
 {
+	unsigned long flags;
 	int rc;
 
-	emulator_get_fpu();
+	flags = emulator_get_fpu();
 	rc = asm_safe("fwait");
-	emulator_put_fpu();
+	emulator_put_fpu(flags);
 
 	if (unlikely(rc != X86EMUL_CONTINUE))
 		return emulate_exception(ctxt, MF_VECTOR, 0, false);
diff --git a/kernel/arch/x86/kvm/vmx/vmx.c b/kernel/arch/x86/kvm/vmx/vmx.c
index af6742d..4554a63 100644
--- a/kernel/arch/x86/kvm/vmx/vmx.c
+++ b/kernel/arch/x86/kvm/vmx/vmx.c
@@ -752,14 +752,15 @@
 static int vmx_set_guest_uret_msr(struct vcpu_vmx *vmx,
 				  struct vmx_uret_msr *msr, u64 data)
 {
+	unsigned long flags;
 	int ret = 0;
 
 	u64 old_msr_data = msr->data;
 	msr->data = data;
 	if (msr - vmx->guest_uret_msrs < vmx->nr_active_uret_msrs) {
-		preempt_disable();
+		flags = hard_preempt_disable();
 		ret = kvm_set_user_return_msr(msr->slot, msr->data, msr->mask);
-		preempt_enable();
+		hard_preempt_enable(flags);
 		if (ret)
 			msr->data = old_msr_data;
 	}
@@ -1383,19 +1384,23 @@
 #ifdef CONFIG_X86_64
 static u64 vmx_read_guest_kernel_gs_base(struct vcpu_vmx *vmx)
 {
-	preempt_disable();
+	unsigned long flags;
+
+	flags = hard_preempt_disable();
 	if (vmx->guest_state_loaded)
 		rdmsrl(MSR_KERNEL_GS_BASE, vmx->msr_guest_kernel_gs_base);
-	preempt_enable();
+	hard_preempt_enable(flags);
 	return vmx->msr_guest_kernel_gs_base;
 }
 
 static void vmx_write_guest_kernel_gs_base(struct vcpu_vmx *vmx, u64 data)
 {
-	preempt_disable();
+	unsigned long flags;
+
+	flags = hard_preempt_disable();
 	if (vmx->guest_state_loaded)
 		wrmsrl(MSR_KERNEL_GS_BASE, data);
-	preempt_enable();
+	hard_preempt_enable(flags);
 	vmx->msr_guest_kernel_gs_base = data;
 }
 #endif
@@ -1795,6 +1800,7 @@
  */
 static void setup_msrs(struct vcpu_vmx *vmx)
 {
+	hard_cond_local_irq_disable();
 	vmx->guest_uret_msrs_loaded = false;
 	vmx->nr_active_uret_msrs = 0;
 #ifdef CONFIG_X86_64
@@ -1815,6 +1821,7 @@
 		vmx_setup_uret_msr(vmx, MSR_TSC_AUX);
 
 	vmx_setup_uret_msr(vmx, MSR_IA32_TSX_CTRL);
+	hard_cond_local_irq_enable();
 
 	if (cpu_has_vmx_msr_bitmap())
 		vmx_update_msr_bitmap(&vmx->vcpu);
@@ -2050,6 +2057,7 @@
 	u32 msr_index = msr_info->index;
 	u64 data = msr_info->data;
 	u32 index;
+	unsigned long flags;
 
 	switch (msr_index) {
 	case MSR_EFER:
@@ -2289,11 +2297,22 @@
 
 	default:
 	find_uret_msr:
+		/*
+		 * Guest MSRs may be activated independently from
+		 * vcpu_run(): rely on the notifier for restoring them
+		 * upon preemption by the companion core, right before
+		 * the current CPU switches to out-of-band scheduling
+		 * (see dovetail_context_switch()).
+		 */
 		msr = vmx_find_uret_msr(vmx, msr_index);
-		if (msr)
+		if (msr) {
+			flags = hard_cond_local_irq_save();
+			inband_enter_guest(vcpu);
 			ret = vmx_set_guest_uret_msr(vmx, msr, data);
-		else
+			hard_cond_local_irq_restore(flags);
+		} else {
 			ret = kvm_set_msr_common(vcpu, msr_info);
+		}
 	}
 
 	/* FB_CLEAR may have changed, also update the FB_CLEAR_DIS behavior */
@@ -7056,7 +7075,9 @@
 	vmx_vcpu_load(vcpu, cpu);
 	vcpu->cpu = cpu;
 	init_vmcs(vmx);
+	hard_cond_local_irq_disable();
 	vmx_vcpu_put(vcpu);
+	hard_cond_local_irq_enable();
 	put_cpu();
 	if (cpu_need_virtualize_apic_accesses(vcpu)) {
 		err = alloc_apic_access_page(vcpu->kvm);
diff --git a/kernel/arch/x86/kvm/x86.c b/kernel/arch/x86/kvm/x86.c
index 23d7c56..7928751 100644
--- a/kernel/arch/x86/kvm/x86.c
+++ b/kernel/arch/x86/kvm/x86.c
@@ -178,6 +178,7 @@
 struct kvm_user_return_msrs {
 	struct user_return_notifier urn;
 	bool registered;
+	bool dirty;
 	struct kvm_user_return_msr_values {
 		u64 host;
 		u64 curr;
@@ -295,12 +296,29 @@
 		vcpu->arch.apf.gfns[i] = ~0;
 }
 
+static void __kvm_on_user_return(struct kvm_user_return_msrs *msrs)
+{
+	struct kvm_user_return_msr_values *values;
+	unsigned slot;
+
+	if (!msrs->dirty)
+		return;
+
+	for (slot = 0; slot < user_return_msrs_global.nr; ++slot) {
+		values = &msrs->values[slot];
+		if (values->host != values->curr) {
+			wrmsrl(user_return_msrs_global.msrs[slot], values->host);
+			values->curr = values->host;
+		}
+	}
+
+	msrs->dirty = false;
+}
+
 static void kvm_on_user_return(struct user_return_notifier *urn)
 {
-	unsigned slot;
 	struct kvm_user_return_msrs *msrs
 		= container_of(urn, struct kvm_user_return_msrs, urn);
-	struct kvm_user_return_msr_values *values;
 	unsigned long flags;
 
 	/*
@@ -313,13 +331,10 @@
 		user_return_notifier_unregister(urn);
 	}
 	local_irq_restore(flags);
-	for (slot = 0; slot < user_return_msrs_global.nr; ++slot) {
-		values = &msrs->values[slot];
-		if (values->host != values->curr) {
-			wrmsrl(user_return_msrs_global.msrs[slot], values->host);
-			values->curr = values->host;
-		}
-	}
+	flags = hard_cond_local_irq_save();
+	__kvm_on_user_return(msrs);
+	hard_cond_local_irq_restore(flags);
+	inband_exit_guest();
 }
 
 int kvm_probe_user_return_msr(u32 msr)
@@ -374,6 +389,7 @@
 	if (err)
 		return 1;
 
+	msrs->dirty = true;
 	msrs->values[slot].curr = value;
 	if (!msrs->registered) {
 		msrs->urn.on_user_return = kvm_on_user_return;
@@ -4072,10 +4088,22 @@
 
 void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
 {
+	struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
+	unsigned long flags;
 	int idx;
 
 	if (vcpu->preempted)
 		vcpu->arch.preempted_in_kernel = !kvm_x86_ops.get_cpl(vcpu);
+
+	flags = hard_cond_local_irq_save();
+	/*
+	 * Skip steal time accounting from the out-of-band stage since
+	 * this is oob-unsafe. We leave it to the next call from the
+	 * inband stage.
+	 */
+	if (running_oob())
+		goto skip_steal_time_update;
+
 
 	/*
 	 * Disable page faults because we're in atomic context here.
@@ -4094,6 +4122,7 @@
 	kvm_steal_time_set_preempted(vcpu);
 	srcu_read_unlock(&vcpu->kvm->srcu, idx);
 	pagefault_enable();
+skip_steal_time_update:
 	kvm_x86_ops.vcpu_put(vcpu);
 	vcpu->arch.last_host_tsc = rdtsc();
 	/*
@@ -4102,7 +4131,40 @@
 	 * guest. do_debug expects dr6 to be cleared after it runs, do the same.
 	 */
 	set_debugreg(0, 6);
+
+	inband_set_vcpu_release_state(vcpu, false);
+	if (!msrs->dirty)
+		inband_exit_guest();
+
+	hard_cond_local_irq_restore(flags);
 }
+
+#ifdef CONFIG_DOVETAIL
+/* hard irqs off. */
+void kvm_handle_oob_switch(struct kvm_oob_notifier *nfy)
+{
+	struct kvm_user_return_msrs *msrs = this_cpu_ptr(user_return_msrs);
+	struct kvm_vcpu *vcpu;
+
+	vcpu = container_of(nfy, struct kvm_vcpu, oob_notifier);
+	/*
+	 * If user_return MSRs were still active when leaving
+	 * kvm_arch_vcpu_put(), inband_exit_guest() was not invoked,
+	 * so we might get called later on before kvm_on_user_return()
+	 * had a chance to run, if a switch to out-of-band scheduling
+	 * sneaks in in the meantime.  Prevent kvm_arch_vcpu_put()
+	 * from running twice in such a case by checking ->put_vcpu
+	 * from the notifier block.
+	 */
+	if (nfy->put_vcpu)
+		kvm_arch_vcpu_put(vcpu);
+
+	__kvm_on_user_return(msrs);
+	inband_exit_guest();
+}
+#else
+#define kvm_handle_oob_switch  NULL
+#endif
 
 static int kvm_vcpu_ioctl_get_lapic(struct kvm_vcpu *vcpu,
 				    struct kvm_lapic_state *s)
@@ -9142,6 +9204,10 @@
 	}
 
 	preempt_disable();
+	local_irq_disable_full();
+
+	inband_enter_guest(vcpu);
+	inband_set_vcpu_release_state(vcpu, true);
 
 	kvm_x86_ops.prepare_guest_switch(vcpu);
 
@@ -9150,7 +9216,6 @@
 	 * IPI are then delayed after guest entry, which ensures that they
 	 * result in virtual interrupt delivery.
 	 */
-	local_irq_disable();
 	vcpu->mode = IN_GUEST_MODE;
 
 	srcu_read_unlock(&vcpu->kvm->srcu, vcpu->srcu_idx);
@@ -9179,7 +9244,7 @@
 	if (kvm_vcpu_exit_request(vcpu)) {
 		vcpu->mode = OUTSIDE_GUEST_MODE;
 		smp_wmb();
-		local_irq_enable();
+		local_irq_enable_full();
 		preempt_enable();
 		vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
 		r = 1;
@@ -9251,9 +9316,9 @@
 	 * stat.exits increment will do nicely.
 	 */
 	kvm_before_interrupt(vcpu);
-	local_irq_enable();
+	local_irq_enable_full();
 	++vcpu->stat.exits;
-	local_irq_disable();
+	local_irq_disable_full();
 	kvm_after_interrupt(vcpu);
 
 	/*
@@ -9273,7 +9338,7 @@
 		}
 	}
 
-	local_irq_enable();
+	local_irq_enable_full();
 	preempt_enable();
 
 	vcpu->srcu_idx = srcu_read_lock(&vcpu->kvm->srcu);
@@ -9487,7 +9552,9 @@
 /* Swap (qemu) user FPU context for the guest FPU context. */
 static void kvm_load_guest_fpu(struct kvm_vcpu *vcpu)
 {
-	fpregs_lock();
+	unsigned long flags;
+
+	flags = fpregs_lock();
 
 	kvm_save_current_fpu(vcpu->arch.user_fpu);
 
@@ -9496,7 +9563,7 @@
 				~XFEATURE_MASK_PKRU);
 
 	fpregs_mark_activate();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	trace_kvm_fpu(1);
 }
@@ -9504,14 +9571,16 @@
 /* When vcpu_run ends, restore user space FPU context. */
 static void kvm_put_guest_fpu(struct kvm_vcpu *vcpu)
 {
-	fpregs_lock();
+	unsigned long flags;
+
+	flags = fpregs_lock();
 
 	kvm_save_current_fpu(vcpu->arch.guest_fpu);
 
 	copy_kernel_to_fpregs(&vcpu->arch.user_fpu->state);
 
 	fpregs_mark_activate();
-	fpregs_unlock();
+	fpregs_unlock(flags);
 
 	++vcpu->stat.fpu_reload;
 	trace_kvm_fpu(0);
@@ -10189,6 +10258,7 @@
 	if (r)
 		goto free_guest_fpu;
 
+	inband_init_vcpu(vcpu, kvm_handle_oob_switch);
 	vcpu->arch.arch_capabilities = kvm_get_arch_capabilities();
 	vcpu->arch.msr_platform_info = MSR_PLATFORM_INFO_CPUID_FAULT;
 	kvm_vcpu_mtrr_init(vcpu);
diff --git a/kernel/arch/x86/lib/usercopy.c b/kernel/arch/x86/lib/usercopy.c
index 3f435d7..1cdb806 100644
--- a/kernel/arch/x86/lib/usercopy.c
+++ b/kernel/arch/x86/lib/usercopy.c
@@ -18,7 +18,7 @@
 {
 	unsigned long ret;
 
-	if (__range_not_ok(from, n, TASK_SIZE))
+	if (running_oob() || __range_not_ok(from, n, TASK_SIZE))
 		return n;
 
 	if (!nmi_uaccess_okay())
diff --git a/kernel/arch/x86/mm/fault.c b/kernel/arch/x86/mm/fault.c
index e9afbf8..a4d3b18 100644
--- a/kernel/arch/x86/mm/fault.c
+++ b/kernel/arch/x86/mm/fault.c
@@ -19,6 +19,7 @@
 #include <linux/uaccess.h>		/* faulthandler_disabled()	*/
 #include <linux/efi.h>			/* efi_recover_from_page_fault()*/
 #include <linux/mm_types.h>
+#include <linux/irqstage.h>
 
 #include <asm/cpufeature.h>		/* boot_cpu_has, ...		*/
 #include <asm/traps.h>			/* dotraplinkage, ...		*/
@@ -656,7 +657,7 @@
 		 * the below recursive fault logic only apply to a faults from
 		 * task context.
 		 */
-		if (in_interrupt())
+		if (running_oob() || in_interrupt())
 			return;
 
 		/*
@@ -666,10 +667,12 @@
 		 * faulting through the emulate_vsyscall() logic.
 		 */
 		if (current->thread.sig_on_uaccess_err && signal) {
+			oob_trap_notify(X86_TRAP_PF, regs);
 			set_signal_archinfo(address, error_code);
 
 			/* XXX: hwpoison faults will set the wrong code. */
 			force_sig_fault(signal, si_code, (void __user *)address);
+			oob_trap_unwind(X86_TRAP_PF, regs);
 		}
 
 		/*
@@ -677,6 +680,12 @@
 		 */
 		return;
 	}
+
+	/*
+	 * Do not bother unwinding the notification context on
+	 * CPU/firmware/kernel bug.
+	 */
+	oob_trap_notify(X86_TRAP_PF, regs);
 
 #ifdef CONFIG_VMAP_STACK
 	/*
@@ -796,6 +805,55 @@
 	return unlikely((vaddr & PAGE_MASK) == VSYSCALL_ADDR);
 }
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+static inline void cond_reenable_irqs_user(void)
+{
+	hard_local_irq_enable();
+
+	if (running_inband())
+		local_irq_enable();
+}
+
+static inline void cond_reenable_irqs_kernel(irqentry_state_t state,
+					struct pt_regs *regs)
+{
+	if (regs->flags & X86_EFLAGS_IF) {
+		hard_local_irq_enable();
+		if (state.stage_info == IRQENTRY_INBAND_UNSTALLED)
+			local_irq_enable();
+	}
+}
+
+static inline void cond_disable_irqs(void)
+{
+	hard_local_irq_disable();
+
+	if (running_inband())
+		local_irq_disable();
+}
+
+#else  /* !CONFIG_IRQ_PIPELINE */
+
+static inline void cond_reenable_irqs_user(void)
+{
+	local_irq_enable();
+}
+
+static inline void cond_reenable_irqs_kernel(irqentry_state_t state,
+					struct pt_regs *regs)
+{
+	if (regs->flags & X86_EFLAGS_IF)
+		local_irq_enable();
+}
+
+static inline void cond_disable_irqs(void)
+{
+	local_irq_disable();
+}
+
+#endif  /* !CONFIG_IRQ_PIPELINE */
+
 static void
 __bad_area_nosemaphore(struct pt_regs *regs, unsigned long error_code,
 		       unsigned long address, u32 pkey, int si_code)
@@ -807,7 +865,7 @@
 		/*
 		 * It's possible to have interrupts off here:
 		 */
-		local_irq_enable();
+		cond_reenable_irqs_user();
 
 		/*
 		 * Valid to do another page fault here because this one came
@@ -818,6 +876,12 @@
 
 		if (is_errata100(regs, address))
 			return;
+
+		oob_trap_notify(X86_TRAP_PF, regs);
+		if (!running_inband()) {
+			local_irq_disable_full();
+			return;
+		}
 
 		/*
 		 * To avoid leaking information about the kernel page table
@@ -837,7 +901,9 @@
 
 		force_sig_fault(SIGSEGV, si_code, (void __user *)address);
 
-		local_irq_disable();
+		local_irq_disable_full();
+
+		oob_trap_unwind(X86_TRAP_PF, regs);
 
 		return;
 	}
@@ -1225,7 +1291,8 @@
 static inline
 void do_user_addr_fault(struct pt_regs *regs,
 			unsigned long hw_error_code,
-			unsigned long address)
+			unsigned long address,
+			irqentry_state_t state)
 {
 	struct vm_area_struct *vma = NULL;
 	struct task_struct *tsk;
@@ -1266,7 +1333,7 @@
 	 * If we're in an interrupt, have no user context or are running
 	 * in a region with pagefaults disabled then we must not take the fault
 	 */
-	if (unlikely(faulthandler_disabled() || !mm)) {
+	if (unlikely(running_inband() && (faulthandler_disabled() || !mm))) {
 		bad_area_nosemaphore(regs, hw_error_code, address);
 		return;
 	}
@@ -1279,12 +1346,22 @@
 	 * potential system fault or CPU buglet:
 	 */
 	if (user_mode(regs)) {
-		local_irq_enable();
+		cond_reenable_irqs_user();
 		flags |= FAULT_FLAG_USER;
 	} else {
-		if (regs->flags & X86_EFLAGS_IF)
-			local_irq_enable();
+		cond_reenable_irqs_kernel(state, regs);
 	}
+
+	/*
+	 * At this point, we would have to stop running
+	 * out-of-band. Tell the companion core about the page fault
+	 * event, so that it might switch current to in-band mode if
+	 * need be. If it does not, then we may assume that it would
+	 * also handle the fixups.
+	 */
+	oob_trap_notify(X86_TRAP_PF, regs);
+	if (!running_inband())
+		return;
 
 	perf_sw_event(PERF_COUNT_SW_PAGE_FAULTS, 1, regs, address);
 
@@ -1307,7 +1384,7 @@
 	 */
 	if (is_vsyscall_vaddr(address)) {
 		if (emulate_vsyscall(hw_error_code, regs, address))
-			return;
+			goto out;
 	}
 #endif
 
@@ -1340,7 +1417,7 @@
 			 * which we do not expect faults.
 			 */
 			bad_area_nosemaphore(regs, hw_error_code, address);
-			return;
+			goto out;
 		}
 retry:
 		mmap_read_lock(mm);
@@ -1357,17 +1434,17 @@
 		vma = find_vma(mm, address);
 	if (unlikely(!vma)) {
 		bad_area(regs, hw_error_code, address);
-		return;
+		goto out;
 	}
 	if (likely(vma->vm_start <= address))
 		goto good_area;
 	if (unlikely(!(vma->vm_flags & VM_GROWSDOWN))) {
 		bad_area(regs, hw_error_code, address);
-		return;
+		goto out;
 	}
 	if (unlikely(expand_stack(vma, address))) {
 		bad_area(regs, hw_error_code, address);
-		return;
+		goto out;
 	}
 
 	/*
@@ -1377,7 +1454,7 @@
 good_area:
 	if (unlikely(access_error(hw_error_code, vma))) {
 		bad_area_access_error(regs, hw_error_code, address, vma);
-		return;
+		goto out;
 	}
 
 	/*
@@ -1400,7 +1477,7 @@
 		if (!user_mode(regs))
 			no_context(regs, hw_error_code, address, SIGBUS,
 				   BUS_ADRERR);
-		return;
+		goto out;
 	}
 
 	/*
@@ -1426,10 +1503,12 @@
 done:
 	if (unlikely(fault & VM_FAULT_ERROR)) {
 		mm_fault_error(regs, hw_error_code, address, fault);
-		return;
+		goto out;
 	}
 
 	check_v8086_mode(regs, address, tsk);
+out:
+	oob_trap_unwind(X86_TRAP_PF, regs);
 }
 NOKPROBE_SYMBOL(do_user_addr_fault);
 
@@ -1448,7 +1527,8 @@
 
 static __always_inline void
 handle_page_fault(struct pt_regs *regs, unsigned long error_code,
-			      unsigned long address)
+		unsigned long address,
+		irqentry_state_t state)
 {
 	trace_page_fault_entries(regs, error_code, address);
 
@@ -1459,7 +1539,7 @@
 	if (unlikely(fault_in_kernel_space(address))) {
 		do_kern_addr_fault(regs, error_code, address);
 	} else {
-		do_user_addr_fault(regs, error_code, address);
+		do_user_addr_fault(regs, error_code, address, state);
 		/*
 		 * User address page fault handling might have reenabled
 		 * interrupts. Fixing up all potential exit points of
@@ -1467,7 +1547,7 @@
 		 * doable w/o creating an unholy mess or turning the code
 		 * upside down.
 		 */
-		local_irq_disable();
+		cond_disable_irqs();
 	}
 }
 
@@ -1515,8 +1595,46 @@
 	state = irqentry_enter(regs);
 
 	instrumentation_begin();
-	handle_page_fault(regs, error_code, address);
+	handle_page_fault(regs, error_code, address, state);
 	instrumentation_end();
 
 	irqentry_exit(regs, state);
 }
+
+#ifdef CONFIG_DOVETAIL
+
+void arch_advertise_page_mapping(unsigned long start, unsigned long end)
+{
+	unsigned long next, addr = start;
+	pgd_t *pgd, *pgd_ref;
+	struct page *page;
+
+	/*
+	 * APEI may create temporary mappings in interrupt context -
+	 * nothing we can and need to propagate globally.
+	 */
+	if (in_interrupt())
+		return;
+
+	if (!(start >= VMALLOC_START && start < VMALLOC_END))
+		return;
+
+	do {
+		next = pgd_addr_end(addr, end);
+		pgd_ref = pgd_offset_k(addr);
+		if (pgd_none(*pgd_ref))
+			continue;
+		spin_lock(&pgd_lock);
+		list_for_each_entry(page, &pgd_list, lru) {
+			pgd = page_address(page) + pgd_index(addr);
+			if (pgd_none(*pgd))
+				set_pgd(pgd, *pgd_ref);
+		}
+		spin_unlock(&pgd_lock);
+		addr = next;
+	} while (addr != end);
+
+	arch_flush_lazy_mmu_mode();
+}
+
+#endif
diff --git a/kernel/arch/x86/mm/tlb.c b/kernel/arch/x86/mm/tlb.c
index 569ac1d..b720da2 100644
--- a/kernel/arch/x86/mm/tlb.c
+++ b/kernel/arch/x86/mm/tlb.c
@@ -5,6 +5,7 @@
 #include <linux/spinlock.h>
 #include <linux/smp.h>
 #include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
 #include <linux/export.h>
 #include <linux/cpu.h>
 #include <linux/debugfs.h>
@@ -309,10 +310,12 @@
 void switch_mm(struct mm_struct *prev, struct mm_struct *next,
 	       struct task_struct *tsk)
 {
-	unsigned long flags;
+	unsigned long flags, _flags;
 
 	local_irq_save(flags);
+	protect_inband_mm(_flags);
 	switch_mm_irqs_off(prev, next, tsk);
+	unprotect_inband_mm(_flags);
 	local_irq_restore(flags);
 }
 
@@ -440,7 +443,9 @@
 	 */
 
 	/* We don't want flush_tlb_func_* to run concurrently with us. */
-	if (IS_ENABLED(CONFIG_PROVE_LOCKING))
+	if (IS_ENABLED(CONFIG_DOVETAIL))
+		WARN_ON_ONCE(!hard_irqs_disabled());
+	else if (IS_ENABLED(CONFIG_PROVE_LOCKING))
 		WARN_ON_ONCE(!irqs_disabled());
 
 	/*
@@ -666,15 +671,24 @@
 	 *                   wants us to catch up to.
 	 */
 	struct mm_struct *loaded_mm = this_cpu_read(cpu_tlbstate.loaded_mm);
-	u32 loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
-	u64 mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
-	u64 local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+	u32 loaded_mm_asid;
+	u64 mm_tlb_gen;
+	u64 local_tlb_gen;
+	unsigned long flags;
 
 	/* This code cannot presently handle being reentered. */
 	VM_WARN_ON(!irqs_disabled());
 
-	if (unlikely(loaded_mm == &init_mm))
+	protect_inband_mm(flags);
+
+	loaded_mm_asid = this_cpu_read(cpu_tlbstate.loaded_mm_asid);
+	mm_tlb_gen = atomic64_read(&loaded_mm->context.tlb_gen);
+	local_tlb_gen = this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].tlb_gen);
+
+	if (unlikely(loaded_mm == &init_mm)) {
+		unprotect_inband_mm(flags);
 		return;
+	}
 
 	VM_WARN_ON(this_cpu_read(cpu_tlbstate.ctxs[loaded_mm_asid].ctx_id) !=
 		   loaded_mm->context.ctx_id);
@@ -690,6 +704,7 @@
 		 * IPIs to lazy TLB mode CPUs.
 		 */
 		switch_mm_irqs_off(NULL, &init_mm, NULL);
+		unprotect_inband_mm(flags);
 		return;
 	}
 
@@ -700,12 +715,15 @@
 		 * be handled can catch us all the way up, leaving no work for
 		 * the second flush.
 		 */
+		unprotect_inband_mm(flags);
 		trace_tlb_flush(reason, 0);
 		return;
 	}
 
 	WARN_ON_ONCE(local_tlb_gen > mm_tlb_gen);
 	WARN_ON_ONCE(f->new_tlb_gen > mm_tlb_gen);
+
+	unprotect_inband_mm(flags);
 
 	/*
 	 * If we get to this point, we know that our TLB is out of date.
@@ -1063,7 +1081,7 @@
 	 * from interrupts. (Use the raw variant because this code can
 	 * be called from deep inside debugging code.)
 	 */
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	cr4 = this_cpu_read(cpu_tlbstate.cr4);
 	/* toggle PGE */
@@ -1071,7 +1089,7 @@
 	/* write old PGE again and flush TLBs */
 	native_write_cr4(cr4);
 
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 /*
@@ -1079,6 +1097,8 @@
  */
 STATIC_NOPV void native_flush_tlb_local(void)
 {
+	unsigned long flags;
+
 	/*
 	 * Preemption or interrupts must be disabled to protect the access
 	 * to the per CPU variable and to prevent being preempted between
@@ -1086,10 +1106,14 @@
 	 */
 	WARN_ON_ONCE(preemptible());
 
+	flags = hard_cond_local_irq_save();
+
 	invalidate_user_asid(this_cpu_read(cpu_tlbstate.loaded_mm_asid));
 
 	/* If current->mm == NULL then the read_cr3() "borrows" an mm */
 	native_write_cr3(__native_read_cr3());
+
+	hard_cond_local_irq_restore(flags);
 }
 
 void flush_tlb_local(void)
@@ -1165,6 +1189,16 @@
 	VM_WARN_ON_ONCE(!loaded_mm);
 
 	/*
+	 * There would be no way for the companion core to switch an
+	 * out-of-band task back in-band in order to handle an access
+	 * fault over NMI safely. Tell the caller that uaccess from
+	 * NMI is NOT ok if the preempted task was running
+	 * out-of-band.
+	 */
+	if (running_oob())
+		return false;
+
+	/*
 	 * The condition we want to check is
 	 * current_mm->pgd == __va(read_cr3_pa()).  This may be slow, though,
 	 * if we're running in a VM with shadow paging, and nmi_uaccess_okay()
diff --git a/kernel/arch/x86/xen/Kconfig b/kernel/arch/x86/xen/Kconfig
index 218acbd..a02524c 100644
--- a/kernel/arch/x86/xen/Kconfig
+++ b/kernel/arch/x86/xen/Kconfig
@@ -5,7 +5,7 @@
 
 config XEN
 	bool "Xen guest support"
-	depends on PARAVIRT
+	depends on PARAVIRT && !IRQ_PIPELINE
 	select PARAVIRT_CLOCK
 	select X86_HV_CALLBACK_VECTOR
 	depends on X86_64 || (X86_32 && X86_PAE)
diff --git a/kernel/arch/x86/xen/enlighten_hvm.c b/kernel/arch/x86/xen/enlighten_hvm.c
index ec50b74..8617927 100644
--- a/kernel/arch/x86/xen/enlighten_hvm.c
+++ b/kernel/arch/x86/xen/enlighten_hvm.c
@@ -120,7 +120,8 @@
 		this_cpu_write(xen_vcpu_id, smp_processor_id());
 }
 
-DEFINE_IDTENTRY_SYSVEC(sysvec_xen_hvm_callback)
+DEFINE_IDTENTRY_SYSVEC_PIPELINED(HYPERVISOR_CALLBACK_VECTOR,
+				 sysvec_xen_hvm_callback)
 {
 	struct pt_regs *old_regs = set_irq_regs(regs);
 
diff --git a/kernel/drivers/Makefile b/kernel/drivers/Makefile
index 21cb556..bb031d3 100644
--- a/kernel/drivers/Makefile
+++ b/kernel/drivers/Makefile
@@ -193,3 +193,5 @@
 obj-$(CONFIG_RK_HEADSET)	+= headset_observe/
 obj-$(CONFIG_RK_NAND)		+= rk_nand/
 obj-$(CONFIG_ROCKCHIP_RKNPU)	+= rknpu/
+
+obj-$(CONFIG_XENOMAI) += xenomai/
diff --git a/kernel/drivers/base/regmap/internal.h b/kernel/drivers/base/regmap/internal.h
index 0097696..ec9bea3 100644
--- a/kernel/drivers/base/regmap/internal.h
+++ b/kernel/drivers/base/regmap/internal.h
@@ -50,7 +50,10 @@
 	union {
 		struct mutex mutex;
 		struct {
-			spinlock_t spinlock;
+			union {
+				spinlock_t spinlock;
+				hard_spinlock_t oob_lock;
+			};
 			unsigned long spinlock_flags;
 		};
 	};
diff --git a/kernel/drivers/base/regmap/regmap-irq.c b/kernel/drivers/base/regmap/regmap-irq.c
index 4466f8b..ea1fc2f 100644
--- a/kernel/drivers/base/regmap/regmap-irq.c
+++ b/kernel/drivers/base/regmap/regmap-irq.c
@@ -331,6 +331,7 @@
 	.irq_enable		= regmap_irq_enable,
 	.irq_set_type		= regmap_irq_set_type,
 	.irq_set_wake		= regmap_irq_set_wake,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static inline int read_sub_irq_data(struct regmap_irq_chip_data *data,
diff --git a/kernel/drivers/base/regmap/regmap.c b/kernel/drivers/base/regmap/regmap.c
index 55a30af..f586293 100644
--- a/kernel/drivers/base/regmap/regmap.c
+++ b/kernel/drivers/base/regmap/regmap.c
@@ -14,6 +14,7 @@
 #include <linux/property.h>
 #include <linux/rbtree.h>
 #include <linux/sched.h>
+#include <linux/dovetail.h>
 #include <linux/delay.h>
 #include <linux/log2.h>
 #include <linux/hwspinlock.h>
@@ -523,6 +524,23 @@
 	spin_unlock_irqrestore(&map->spinlock, map->spinlock_flags);
 }
 
+static void regmap_lock_oob(void *__map)
+__acquires(&map->oob_lock)
+{
+	struct regmap *map = __map;
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&map->oob_lock, flags);
+	map->spinlock_flags = flags;
+}
+
+static void regmap_unlock_oob(void *__map)
+__releases(&map->oob_lock)
+{
+	struct regmap *map = __map;
+	raw_spin_unlock_irqrestore(&map->oob_lock, map->spinlock_flags);
+}
+
 static void dev_get_regmap_release(struct device *dev, void *res)
 {
 	/*
@@ -761,18 +779,29 @@
 	} else {
 		if ((bus && bus->fast_io) ||
 		    config->fast_io) {
-			spin_lock_init(&map->spinlock);
-			map->lock = regmap_lock_spinlock;
-			map->unlock = regmap_unlock_spinlock;
-			lockdep_set_class_and_name(&map->spinlock,
-						   lock_key, lock_name);
-		} else {
+			if (dovetailing() && config->oob_io) {
+				raw_spin_lock_init(&map->oob_lock);
+				map->lock = regmap_lock_oob;
+				map->unlock = regmap_unlock_oob;
+				lockdep_set_class_and_name(&map->oob_lock,
+							lock_key, lock_name);
+			} else {
+				spin_lock_init(&map->spinlock);
+				map->lock = regmap_lock_spinlock;
+				map->unlock = regmap_unlock_spinlock;
+				lockdep_set_class_and_name(&map->spinlock,
+							lock_key, lock_name);
+			}
+		} else if (!config->oob_io) {
 			mutex_init(&map->mutex);
 			map->lock = regmap_lock_mutex;
 			map->unlock = regmap_unlock_mutex;
 			map->can_sleep = true;
 			lockdep_set_class_and_name(&map->mutex,
 						   lock_key, lock_name);
+		} else {
+			ret = -ENXIO;
+			goto err_name;
 		}
 		map->lock_arg = map;
 	}
diff --git a/kernel/drivers/clocksource/Kconfig b/kernel/drivers/clocksource/Kconfig
index 99c6b44..ae971d3 100644
--- a/kernel/drivers/clocksource/Kconfig
+++ b/kernel/drivers/clocksource/Kconfig
@@ -25,6 +25,7 @@
 config OMAP_DM_TIMER
 	bool
 	select TIMER_OF
+	imply GENERIC_CLOCKSOURCE_VDSO
 
 config CLKBLD_I8253
 	def_bool y if CLKSRC_I8253 || CLKEVT_I8253 || I8253_LOCK
@@ -58,6 +59,8 @@
 
 config DW_APB_TIMER
 	bool "DW APB timer driver" if COMPILE_TEST
+        select CLKSRC_MMIO
+	imply GENERIC_CLOCKSOURCE_VDSO
 	help
 	  Enables the support for the dw_apb timer.
 
@@ -395,6 +398,7 @@
 config ARM_GLOBAL_TIMER
 	bool "Support for the ARM global timer" if COMPILE_TEST
 	select TIMER_OF if OF
+	imply GENERIC_CLOCKSOURCE_VDSO
 	depends on ARM
 	help
 	  This option enables support for the ARM global timer unit.
@@ -444,6 +448,7 @@
 config CLKSRC_EXYNOS_MCT
 	bool "Exynos multi core timer driver" if COMPILE_TEST
 	depends on ARM || ARM64
+	imply GENERIC_CLOCKSOURCE_VDSO
 	help
 	  Support for Multi Core Timer controller on Exynos SoCs.
 
@@ -620,7 +625,7 @@
 config CLKSRC_IMX_GPT
 	bool "Clocksource using i.MX GPT" if COMPILE_TEST
 	depends on (ARM || ARM64) && CLKDEV_LOOKUP
-	select CLKSRC_MMIO
+	imply GENERIC_CLOCKSOURCE_VDSO
 
 config CLKSRC_IMX_TPM
 	bool "Clocksource using i.MX TPM" if COMPILE_TEST
@@ -642,7 +647,7 @@
 	bool "Low power clocksource found in the LPC" if COMPILE_TEST
 	select TIMER_OF if OF
 	depends on HAS_IOMEM
-	select CLKSRC_MMIO
+	imply GENERIC_CLOCKSOURCE_VDSO
 	help
 	  Enable this option to use the Low Power controller timer
 	  as clocksource.
diff --git a/kernel/drivers/clocksource/arm_arch_timer.c b/kernel/drivers/clocksource/arm_arch_timer.c
index f488176..fb4fb16 100644
--- a/kernel/drivers/clocksource/arm_arch_timer.c
+++ b/kernel/drivers/clocksource/arm_arch_timer.c
@@ -21,6 +21,7 @@
 #include <linux/of_address.h>
 #include <linux/io.h>
 #include <linux/slab.h>
+#include <linux/dovetail.h>
 #include <linux/sched/clock.h>
 #include <linux/sched_clock.h>
 #include <linux/acpi.h>
@@ -644,7 +645,7 @@
 	if (ctrl & ARCH_TIMER_CTRL_IT_STAT) {
 		ctrl |= ARCH_TIMER_CTRL_IT_MASK;
 		arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, evt);
-		evt->event_handler(evt);
+		clockevents_handle_event(evt);
 		return IRQ_HANDLED;
 	}
 
@@ -753,7 +754,7 @@
 static void __arch_timer_setup(unsigned type,
 			       struct clock_event_device *clk)
 {
-	clk->features = CLOCK_EVT_FEAT_ONESHOT;
+	clk->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE;
 
 	if (type == ARCH_TIMER_TYPE_CP15) {
 		typeof(clk->set_next_event) sne;
@@ -864,6 +865,9 @@
 	else
 		cntkctl |= ARCH_TIMER_USR_VCT_ACCESS_EN;
 
+	if (IS_ENABLED(CONFIG_GENERIC_CLOCKSOURCE_VDSO))
+		cntkctl |= ARCH_TIMER_USR_PT_ACCESS_EN;
+
 	arch_timer_set_cntkctl(cntkctl);
 }
 
@@ -897,6 +901,7 @@
 	enable_percpu_irq(arch_timer_ppi[arch_timer_uses_ppi], flags);
 
 	if (arch_timer_has_nonsecure_ppi()) {
+		clk->irq = arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI];
 		flags = check_ppi_trigger(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI]);
 		enable_percpu_irq(arch_timer_ppi[ARCH_TIMER_PHYS_NONSECURE_PPI],
 				  flags);
@@ -1015,6 +1020,8 @@
 
 		arch_timer_read_counter = rd;
 		clocksource_counter.vdso_clock_mode = vdso_default;
+		if (vdso_default != VDSO_CLOCKMODE_NONE)
+			clocksource_counter.vdso_type = CLOCKSOURCE_VDSO_ARCHITECTED;
 	} else {
 		arch_timer_read_counter = arch_counter_get_cntvct_mem;
 	}
diff --git a/kernel/drivers/clocksource/arm_global_timer.c b/kernel/drivers/clocksource/arm_global_timer.c
index 88b2d38..3273fc6 100644
--- a/kernel/drivers/clocksource/arm_global_timer.c
+++ b/kernel/drivers/clocksource/arm_global_timer.c
@@ -153,11 +153,11 @@
 	 *	the Global Timer flag _after_ having incremented
 	 *	the Comparator register	value to a higher value.
 	 */
-	if (clockevent_state_oneshot(evt))
+	if (clockevent_is_oob(evt) || clockevent_state_oneshot(evt))
 		gt_compare_set(ULONG_MAX, 0);
 
 	writel_relaxed(GT_INT_STATUS_EVENT_FLAG, gt_base + GT_INT_STATUS);
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 
 	return IRQ_HANDLED;
 }
@@ -168,7 +168,7 @@
 
 	clk->name = "arm_global_timer";
 	clk->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT |
-		CLOCK_EVT_FEAT_PERCPU;
+		CLOCK_EVT_FEAT_PERCPU | CLOCK_EVT_FEAT_PIPELINE;
 	clk->set_state_shutdown = gt_clockevent_shutdown;
 	clk->set_state_periodic = gt_clockevent_set_periodic;
 	clk->set_state_oneshot = gt_clockevent_shutdown;
@@ -192,11 +192,6 @@
 	return 0;
 }
 
-static u64 gt_clocksource_read(struct clocksource *cs)
-{
-	return gt_counter_read();
-}
-
 static void gt_resume(struct clocksource *cs)
 {
 	unsigned long ctrl;
@@ -207,13 +202,15 @@
 		writel(GT_CONTROL_TIMER_ENABLE, gt_base + GT_CONTROL);
 }
 
-static struct clocksource gt_clocksource = {
-	.name	= "arm_global_timer",
-	.rating	= 300,
-	.read	= gt_clocksource_read,
-	.mask	= CLOCKSOURCE_MASK(64),
-	.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
-	.resume = gt_resume,
+static struct clocksource_user_mmio gt_clocksource = {
+	.mmio.clksrc = {
+		.name	= "arm_global_timer",
+		.rating	= 300,
+		.read	= clocksource_dual_mmio_readl_up,
+		.mask	= CLOCKSOURCE_MASK(64),
+		.flags	= CLOCK_SOURCE_IS_CONTINUOUS,
+		.resume = gt_resume,
+	},
 };
 
 #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
@@ -240,6 +237,8 @@
 
 static int __init gt_clocksource_init(void)
 {
+	struct clocksource_mmio_regs mmr;
+
 	writel(0, gt_base + GT_CONTROL);
 	writel(0, gt_base + GT_COUNTER0);
 	writel(0, gt_base + GT_COUNTER1);
@@ -249,7 +248,13 @@
 #ifdef CONFIG_CLKSRC_ARM_GLOBAL_TIMER_SCHED_CLOCK
 	sched_clock_register(gt_sched_clock_read, 64, gt_clk_rate);
 #endif
-	return clocksource_register_hz(&gt_clocksource, gt_clk_rate);
+	mmr.reg_upper = gt_base + GT_COUNTER1;
+	mmr.reg_lower = gt_base + GT_COUNTER0;
+	mmr.bits_upper = 32;
+	mmr.bits_lower = 32;
+	mmr.revmap = NULL;
+
+	return clocksource_user_mmio_init(&gt_clocksource, &mmr, gt_clk_rate);
 }
 
 static int __init global_timer_of_register(struct device_node *np)
@@ -299,8 +304,8 @@
 		goto out_clk;
 	}
 
-	err = request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
-				 "gt", gt_evt);
+	err = __request_percpu_irq(gt_ppi, gt_clockevent_interrupt,
+				   IRQF_TIMER, "gt", gt_evt);
 	if (err) {
 		pr_warn("global-timer: can't register interrupt %d (%d)\n",
 			gt_ppi, err);
diff --git a/kernel/drivers/clocksource/bcm2835_timer.c b/kernel/drivers/clocksource/bcm2835_timer.c
index 1592650..687e9f2 100644
--- a/kernel/drivers/clocksource/bcm2835_timer.c
+++ b/kernel/drivers/clocksource/bcm2835_timer.c
@@ -53,18 +53,25 @@
 static irqreturn_t bcm2835_time_interrupt(int irq, void *dev_id)
 {
 	struct bcm2835_timer *timer = dev_id;
-	void (*event_handler)(struct clock_event_device *);
+
 	if (readl_relaxed(timer->control) & timer->match_mask) {
 		writel_relaxed(timer->match_mask, timer->control);
 
-		event_handler = READ_ONCE(timer->evt.event_handler);
-		if (event_handler)
-			event_handler(&timer->evt);
+		clockevents_handle_event(&timer->evt);
 		return IRQ_HANDLED;
 	} else {
 		return IRQ_NONE;
 	}
 }
+
+static struct clocksource_user_mmio clocksource_bcm2835 = {
+	.mmio.clksrc = {
+		.rating		= 300,
+		.read		= clocksource_mmio_readl_up,
+		.mask		= CLOCKSOURCE_MASK(32),
+		.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+	},
+};
 
 static int __init bcm2835_timer_init(struct device_node *node)
 {
@@ -72,6 +79,7 @@
 	u32 freq;
 	int irq, ret;
 	struct bcm2835_timer *timer;
+	struct clocksource_mmio_regs mmr;
 
 	base = of_iomap(node, 0);
 	if (!base) {
@@ -88,8 +96,13 @@
 	system_clock = base + REG_COUNTER_LO;
 	sched_clock_register(bcm2835_sched_read, 32, freq);
 
-	clocksource_mmio_init(base + REG_COUNTER_LO, node->name,
-		freq, 300, 32, clocksource_mmio_readl_up);
+	mmr.reg_lower = base + REG_COUNTER_LO;
+	mmr.bits_lower = 32;
+	mmr.reg_upper = 0;
+	mmr.bits_upper = 0;
+	mmr.revmap = NULL;
+	clocksource_bcm2835.mmio.clksrc.name = node->name;
+	clocksource_user_mmio_init(&clocksource_bcm2835, &mmr, freq);
 
 	irq = irq_of_parse_and_map(node, DEFAULT_TIMER);
 	if (irq <= 0) {
@@ -109,7 +122,7 @@
 	timer->match_mask = BIT(DEFAULT_TIMER);
 	timer->evt.name = node->name;
 	timer->evt.rating = 300;
-	timer->evt.features = CLOCK_EVT_FEAT_ONESHOT;
+	timer->evt.features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE;
 	timer->evt.set_next_event = bcm2835_time_set_next_event;
 	timer->evt.cpumask = cpumask_of(0);
 
diff --git a/kernel/drivers/clocksource/clksrc_st_lpc.c b/kernel/drivers/clocksource/clksrc_st_lpc.c
index 419a886..b30b814 100644
--- a/kernel/drivers/clocksource/clksrc_st_lpc.c
+++ b/kernel/drivers/clocksource/clksrc_st_lpc.c
@@ -51,7 +51,7 @@
 
 	sched_clock_register(st_clksrc_sched_clock_read, 32, rate);
 
-	ret = clocksource_mmio_init(ddata.base + LPC_LPT_LSB_OFF,
+	ret = clocksource_user_single_mmio_init(ddata.base + LPC_LPT_LSB_OFF,
 				    "clksrc-st-lpc", rate, 300, 32,
 				    clocksource_mmio_readl_up);
 	if (ret) {
diff --git a/kernel/drivers/clocksource/dw_apb_timer.c b/kernel/drivers/clocksource/dw_apb_timer.c
index f5f24a9..a974b9d 100644
--- a/kernel/drivers/clocksource/dw_apb_timer.c
+++ b/kernel/drivers/clocksource/dw_apb_timer.c
@@ -43,7 +43,7 @@
 static inline struct dw_apb_clocksource *
 clocksource_to_dw_apb_clocksource(struct clocksource *cs)
 {
-	return container_of(cs, struct dw_apb_clocksource, cs);
+	return container_of(cs, struct dw_apb_clocksource, ummio.mmio.clksrc);
 }
 
 static inline u32 apbt_readl(struct dw_apb_timer *timer, unsigned long offs)
@@ -343,18 +343,6 @@
 	dw_apb_clocksource_read(dw_cs);
 }
 
-static u64 __apbt_read_clocksource(struct clocksource *cs)
-{
-	u32 current_count;
-	struct dw_apb_clocksource *dw_cs =
-		clocksource_to_dw_apb_clocksource(cs);
-
-	current_count = apbt_readl_relaxed(&dw_cs->timer,
-					APBTMR_N_CURRENT_VALUE);
-
-	return (u64)~current_count;
-}
-
 static void apbt_restart_clocksource(struct clocksource *cs)
 {
 	struct dw_apb_clocksource *dw_cs =
@@ -376,7 +364,7 @@
  * dw_apb_clocksource_register() as the next step.
  */
 struct dw_apb_clocksource *
-dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
+__init dw_apb_clocksource_init(unsigned rating, const char *name, void __iomem *base,
 			unsigned long freq)
 {
 	struct dw_apb_clocksource *dw_cs = kzalloc(sizeof(*dw_cs), GFP_KERNEL);
@@ -386,12 +374,12 @@
 
 	dw_cs->timer.base = base;
 	dw_cs->timer.freq = freq;
-	dw_cs->cs.name = name;
-	dw_cs->cs.rating = rating;
-	dw_cs->cs.read = __apbt_read_clocksource;
-	dw_cs->cs.mask = CLOCKSOURCE_MASK(32);
-	dw_cs->cs.flags = CLOCK_SOURCE_IS_CONTINUOUS;
-	dw_cs->cs.resume = apbt_restart_clocksource;
+	dw_cs->ummio.mmio.clksrc.name = name;
+	dw_cs->ummio.mmio.clksrc.rating = rating;
+	dw_cs->ummio.mmio.clksrc.read = clocksource_mmio_readl_down;
+	dw_cs->ummio.mmio.clksrc.mask = CLOCKSOURCE_MASK(32);
+	dw_cs->ummio.mmio.clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+	dw_cs->ummio.mmio.clksrc.resume = apbt_restart_clocksource;
 
 	return dw_cs;
 }
@@ -401,9 +389,17 @@
  *
  * @dw_cs:	The clocksource to register.
  */
-void dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
+void __init dw_apb_clocksource_register(struct dw_apb_clocksource *dw_cs)
 {
-	clocksource_register_hz(&dw_cs->cs, dw_cs->timer.freq);
+	struct clocksource_mmio_regs mmr;
+
+	mmr.reg_lower = dw_cs->timer.base + APBTMR_N_CURRENT_VALUE;
+	mmr.bits_lower = 32;
+	mmr.reg_upper = 0;
+	mmr.bits_upper = 0;
+	mmr.revmap = NULL;
+
+	clocksource_user_mmio_init(&dw_cs->ummio, &mmr, dw_cs->timer.freq);
 }
 
 /**
diff --git a/kernel/drivers/clocksource/exynos_mct.c b/kernel/drivers/clocksource/exynos_mct.c
index df194b0..243adda 100644
--- a/kernel/drivers/clocksource/exynos_mct.c
+++ b/kernel/drivers/clocksource/exynos_mct.c
@@ -194,23 +194,20 @@
 	return readl_relaxed(reg_base + EXYNOS4_MCT_G_CNT_L);
 }
 
-static u64 exynos4_frc_read(struct clocksource *cs)
-{
-	return exynos4_read_count_32();
-}
-
 static void exynos4_frc_resume(struct clocksource *cs)
 {
 	exynos4_mct_frc_start();
 }
 
-static struct clocksource mct_frc = {
-	.name		= "mct-frc",
-	.rating		= 450,	/* use value higher than ARM arch timer */
-	.read		= exynos4_frc_read,
-	.mask		= CLOCKSOURCE_MASK(32),
-	.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
-	.resume		= exynos4_frc_resume,
+static struct clocksource_user_mmio mct_frc = {
+	.mmio.clksrc = {
+		.name		= "mct-frc",
+		.rating		= 450,	/* use value higher than ARM arch timer */
+		.read		= clocksource_mmio_readl_up,
+		.mask		= CLOCKSOURCE_MASK(32),
+		.flags		= CLOCK_SOURCE_IS_CONTINUOUS,
+		.resume		= exynos4_frc_resume,
+	},
 };
 
 static u64 notrace exynos4_read_sched_clock(void)
@@ -231,6 +228,8 @@
 
 static int __init exynos4_clocksource_init(void)
 {
+	struct clocksource_mmio_regs mmr;
+
 	exynos4_mct_frc_start();
 
 #if defined(CONFIG_ARM)
@@ -239,8 +238,13 @@
 	register_current_timer_delay(&exynos4_delay_timer);
 #endif
 
-	if (clocksource_register_hz(&mct_frc, clk_rate))
-		panic("%s: can't register clocksource\n", mct_frc.name);
+	mmr.reg_upper = NULL;
+	mmr.reg_lower = reg_base + EXYNOS4_MCT_G_CNT_L;
+	mmr.bits_upper = 0;
+	mmr.bits_lower = 32;
+	mmr.revmap = NULL;
+	if (clocksource_user_mmio_init(&mct_frc, &mmr, clk_rate))
+		panic("%s: can't register clocksource\n", mct_frc.mmio.clksrc.name);
 
 	sched_clock_register(exynos4_read_sched_clock, 32, clk_rate);
 
@@ -308,7 +312,8 @@
 static struct clock_event_device mct_comp_device = {
 	.name			= "mct-comp",
 	.features		= CLOCK_EVT_FEAT_PERIODIC |
-				  CLOCK_EVT_FEAT_ONESHOT,
+				  CLOCK_EVT_FEAT_ONESHOT |
+				  CLOCK_EVT_FEAT_PIPELINE,
 	.rating			= 250,
 	.set_next_event		= exynos4_comp_set_next_event,
 	.set_state_periodic	= mct_set_state_periodic,
@@ -324,7 +329,7 @@
 
 	exynos4_mct_write(0x1, EXYNOS4_MCT_G_INT_CSTAT);
 
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 
 	return IRQ_HANDLED;
 }
@@ -335,7 +340,7 @@
 	clockevents_config_and_register(&mct_comp_device, clk_rate,
 					0xf, 0xffffffff);
 	if (request_irq(mct_irqs[MCT_G0_IRQ], exynos4_mct_comp_isr,
-			IRQF_TIMER | IRQF_IRQPOLL, "mct_comp_irq",
+			IRQF_TIMER | IRQF_IRQPOLL | IRQF_OOB, "mct_comp_irq",
 			&mct_comp_device))
 		pr_err("%s: request_irq() failed\n", "mct_comp_irq");
 
@@ -434,7 +439,7 @@
 
 	exynos4_mct_tick_clear(mevt);
 
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 
 	return IRQ_HANDLED;
 }
@@ -456,7 +461,8 @@
 	evt->set_state_oneshot = set_state_shutdown;
 	evt->set_state_oneshot_stopped = set_state_shutdown;
 	evt->tick_resume = set_state_shutdown;
-	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
+	evt->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | \
+		CLOCK_EVT_FEAT_PIPELINE;
 	evt->rating = 500;	/* use value higher than ARM arch timer */
 
 	exynos4_mct_write(TICK_BASE_CNT, mevt->base + MCT_L_TCNTB_OFFSET);
@@ -541,9 +547,9 @@
 
 	if (mct_int_type == MCT_INT_PPI) {
 
-		err = request_percpu_irq(mct_irqs[MCT_L0_IRQ],
-					 exynos4_mct_tick_isr, "MCT",
-					 &percpu_mct_tick);
+		err = __request_percpu_irq(mct_irqs[MCT_L0_IRQ],
+					exynos4_mct_tick_isr, IRQF_TIMER,
+					"MCT", &percpu_mct_tick);
 		WARN(err, "MCT: can't request IRQ %d (%d)\n",
 		     mct_irqs[MCT_L0_IRQ], err);
 	} else {
diff --git a/kernel/drivers/clocksource/mmio.c b/kernel/drivers/clocksource/mmio.c
index 826dcc4..163a50a 100644
--- a/kernel/drivers/clocksource/mmio.c
+++ b/kernel/drivers/clocksource/mmio.c
@@ -6,11 +6,30 @@
 #include <linux/errno.h>
 #include <linux/init.h>
 #include <linux/slab.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+#include <linux/miscdevice.h>
+#include <linux/list.h>
+#include <linux/slab.h>
+#include <linux/fs.h>
+#include <linux/mm.h>
+#include <linux/mman.h>
+#include <linux/device.h>
 
-struct clocksource_mmio {
-	void __iomem *reg;
-	struct clocksource clksrc;
+struct clocksource_user_mapping {
+	struct mm_struct *mm;
+	struct clocksource_user_mmio *ucs;
+	void *regs;
+	struct hlist_node link;
+	atomic_t refs;
 };
+
+static struct class *user_mmio_class;
+static dev_t user_mmio_devt;
+
+static DEFINE_SPINLOCK(user_clksrcs_lock);
+static unsigned int user_clksrcs_count;
+static LIST_HEAD(user_clksrcs);
 
 static inline struct clocksource_mmio *to_mmio_clksrc(struct clocksource *c)
 {
@@ -38,6 +57,53 @@
 	return ~(u64)readw_relaxed(to_mmio_clksrc(c)->reg) & c->mask;
 }
 
+static inline struct clocksource_user_mmio *
+to_mmio_ucs(struct clocksource *c)
+{
+	return container_of(c, struct clocksource_user_mmio, mmio.clksrc);
+}
+
+u64 clocksource_dual_mmio_readl_up(struct clocksource *c)
+{
+	struct clocksource_user_mmio *ucs = to_mmio_ucs(c);
+	u32 upper, old_upper, lower;
+
+	upper = readl_relaxed(ucs->reg_upper);
+	do {
+		old_upper = upper;
+		lower = readl_relaxed(ucs->mmio.reg);
+		upper = readl_relaxed(ucs->reg_upper);
+	} while (upper != old_upper);
+
+	return (((u64)upper) << ucs->bits_lower) | lower;
+}
+
+u64 clocksource_dual_mmio_readw_up(struct clocksource *c)
+{
+	struct clocksource_user_mmio *ucs = to_mmio_ucs(c);
+	u16 upper, old_upper, lower;
+
+	upper = readw_relaxed(ucs->reg_upper);
+	do {
+		old_upper = upper;
+		lower = readw_relaxed(ucs->mmio.reg);
+		upper = readw_relaxed(ucs->reg_upper);
+	} while (upper != old_upper);
+
+	return (((u64)upper) << ucs->bits_lower) | lower;
+}
+
+static void mmio_base_init(const char *name,int rating, unsigned int bits,
+			   u64 (*read)(struct clocksource *),
+			   struct clocksource *cs)
+{
+	cs->name = name;
+	cs->rating = rating;
+	cs->read = read;
+	cs->mask = CLOCKSOURCE_MASK(bits);
+	cs->flags = CLOCK_SOURCE_IS_CONTINUOUS;
+}
+
 /**
  * clocksource_mmio_init - Initialize a simple mmio based clocksource
  * @base:	Virtual address of the clock readout register
@@ -52,6 +118,7 @@
 	u64 (*read)(struct clocksource *))
 {
 	struct clocksource_mmio *cs;
+	int err;
 
 	if (bits > 64 || bits < 16)
 		return -EINVAL;
@@ -61,12 +128,428 @@
 		return -ENOMEM;
 
 	cs->reg = base;
-	cs->clksrc.name = name;
-	cs->clksrc.rating = rating;
-	cs->clksrc.read = read;
-	cs->clksrc.mask = CLOCKSOURCE_MASK(bits);
-	cs->clksrc.flags = CLOCK_SOURCE_IS_CONTINUOUS;
+	mmio_base_init(name, rating, bits, read, &cs->clksrc);
 
-	return clocksource_register_hz(&cs->clksrc, hz);
+	err = clocksource_register_hz(&cs->clksrc, hz);
+	if (err < 0) {
+		kfree(cs);
+		return err;
+	}
+
+	return err;
 }
-EXPORT_SYMBOL_GPL(clocksource_mmio_init);
+
+static void mmio_ucs_vmopen(struct vm_area_struct *vma)
+{
+	struct clocksource_user_mapping *mapping, *clone;
+	struct clocksource_user_mmio *ucs;
+	unsigned long h_key;
+
+	mapping = vma->vm_private_data;
+
+	if (mapping->mm == vma->vm_mm) {
+		atomic_inc(&mapping->refs);
+	} else if (mapping->mm) {
+		/*
+		 * We must be duplicating the original mm upon fork(),
+		 * clone the parent ucs mapping struct then rehash it
+		 * on the child mm key. If we cannot get memory for
+		 * this, mitigate the issue for users by preventing a
+		 * stale parent mm from being matched later on by a
+		 * process which reused its mm_struct (h_key is based
+		 * on this struct address).
+		 */
+		clone = kmalloc(sizeof(*mapping), GFP_KERNEL);
+		if (clone == NULL) {
+			pr_alert("out-of-memory for UCS mapping!\n");
+			atomic_inc(&mapping->refs);
+			mapping->mm = NULL;
+			return;
+		}
+		ucs = mapping->ucs;
+		clone->mm = vma->vm_mm;
+		clone->ucs = ucs;
+		clone->regs = mapping->regs;
+		atomic_set(&clone->refs, 1);
+		vma->vm_private_data = clone;
+		h_key = (unsigned long)vma->vm_mm / sizeof(*vma->vm_mm);
+		spin_lock(&ucs->lock);
+		hash_add(ucs->mappings, &clone->link, h_key);
+		spin_unlock(&ucs->lock);
+	}
+}
+
+static void mmio_ucs_vmclose(struct vm_area_struct *vma)
+{
+	struct clocksource_user_mapping *mapping;
+
+	mapping = vma->vm_private_data;
+
+	if (atomic_dec_and_test(&mapping->refs)) {
+		spin_lock(&mapping->ucs->lock);
+		hash_del(&mapping->link);
+		spin_unlock(&mapping->ucs->lock);
+		kfree(mapping);
+	}
+}
+
+static const struct vm_operations_struct mmio_ucs_vmops = {
+	.open = mmio_ucs_vmopen,
+	.close = mmio_ucs_vmclose,
+};
+
+static int mmio_ucs_mmap(struct file *file, struct vm_area_struct *vma)
+{
+	unsigned long addr, upper_pfn, lower_pfn;
+	struct clocksource_user_mapping *mapping, *tmp;
+	struct clocksource_user_mmio *ucs;
+	unsigned int bits_upper;
+	unsigned long h_key;
+	pgprot_t prot;
+	size_t pages;
+	int err;
+
+	pages = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
+	if (pages > 2)
+		return -EINVAL;
+
+	vma->vm_private_data = NULL;
+
+	ucs = file->private_data;
+	upper_pfn = ucs->phys_upper >> PAGE_SHIFT;
+	lower_pfn = ucs->phys_lower >> PAGE_SHIFT;
+	bits_upper = fls(ucs->mmio.clksrc.mask) - ucs->bits_lower;
+	if (pages == 2 && (!bits_upper || upper_pfn == lower_pfn))
+		return -EINVAL;
+
+	mapping = kmalloc(sizeof(*mapping), GFP_KERNEL);
+	if (!mapping)
+		return -ENOSPC;
+
+	mapping->mm = vma->vm_mm;
+	mapping->ucs = ucs;
+	mapping->regs = (void *)vma->vm_start;
+	atomic_set(&mapping->refs, 1);
+
+	vma->vm_private_data = mapping;
+	vma->vm_ops = &mmio_ucs_vmops;
+	prot = pgprot_noncached(vma->vm_page_prot);
+	addr = vma->vm_start;
+
+	err = remap_pfn_range(vma, addr, lower_pfn, PAGE_SIZE, prot);
+	if (err < 0)
+		goto fail;
+
+	if (pages > 1) {
+		addr += PAGE_SIZE;
+		err = remap_pfn_range(vma, addr, upper_pfn, PAGE_SIZE, prot);
+		if (err < 0)
+			goto fail;
+	}
+
+	h_key = (unsigned long)vma->vm_mm / sizeof(*vma->vm_mm);
+
+	spin_lock(&ucs->lock);
+	hash_for_each_possible(ucs->mappings, tmp, link, h_key) {
+		if (tmp->mm == vma->vm_mm) {
+			spin_unlock(&ucs->lock);
+			err = -EBUSY;
+			goto fail;
+		}
+	}
+	hash_add(ucs->mappings, &mapping->link, h_key);
+	spin_unlock(&ucs->lock);
+
+	return 0;
+fail:
+	kfree(mapping);
+
+	return err;
+}
+
+static long
+mmio_ucs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct clocksource_user_mapping *mapping;
+	struct clksrc_user_mmio_info __user *u;
+	unsigned long upper_pfn, lower_pfn;
+	struct clksrc_user_mmio_info info;
+	struct clocksource_user_mmio *ucs;
+	unsigned int bits_upper;
+	void __user *map_base;
+	unsigned long h_key;
+	size_t size;
+
+	u = (struct clksrc_user_mmio_info __user *)arg;
+
+	switch (cmd) {
+	case CLKSRC_USER_MMIO_MAP:
+		break;
+	default:
+		return -ENOTTY;
+	}
+
+	h_key = (unsigned long)current->mm / sizeof(*current->mm);
+
+	ucs = file->private_data;
+	upper_pfn = ucs->phys_upper >> PAGE_SHIFT;
+	lower_pfn = ucs->phys_lower >> PAGE_SHIFT;
+	bits_upper = fls(ucs->mmio.clksrc.mask) - ucs->bits_lower;
+	size = PAGE_SIZE;
+	if (bits_upper && upper_pfn != lower_pfn)
+		size += PAGE_SIZE;
+
+	do {
+		spin_lock(&ucs->lock);
+		hash_for_each_possible(ucs->mappings, mapping, link, h_key) {
+			if (mapping->mm == current->mm) {
+				spin_unlock(&ucs->lock);
+				map_base = mapping->regs;
+				goto found;
+			}
+		}
+		spin_unlock(&ucs->lock);
+
+		map_base = (void *)
+			vm_mmap(file, 0, size, PROT_READ, MAP_SHARED, 0);
+	} while (IS_ERR(map_base) && PTR_ERR(map_base) == -EBUSY);
+
+	if (IS_ERR(map_base))
+		return PTR_ERR(map_base);
+
+found:
+	info.type = ucs->type;
+	info.reg_lower = map_base + offset_in_page(ucs->phys_lower);
+	info.mask_lower = ucs->mmio.clksrc.mask;
+	info.bits_lower = ucs->bits_lower;
+	info.reg_upper = NULL;
+	if (ucs->phys_upper)
+		info.reg_upper = map_base + (size - PAGE_SIZE)
+			+ offset_in_page(ucs->phys_upper);
+	info.mask_upper = ucs->mask_upper;
+
+	return copy_to_user(u, &info, sizeof(*u));
+}
+
+static int mmio_ucs_open(struct inode *inode, struct file *file)
+{
+	struct clocksource_user_mmio *ucs;
+
+	if (file->f_mode & FMODE_WRITE)
+		return -EINVAL;
+
+	ucs = container_of(inode->i_cdev, typeof(*ucs), cdev);
+	file->private_data = ucs;
+
+	return 0;
+}
+
+static const struct file_operations mmio_ucs_fops = {
+	.owner		= THIS_MODULE,
+	.unlocked_ioctl = mmio_ucs_ioctl,
+	.open		= mmio_ucs_open,
+	.mmap		= mmio_ucs_mmap,
+};
+
+static int __init
+ucs_create_cdev(struct class *class, struct clocksource_user_mmio *ucs)
+{
+	int err;
+
+	ucs->dev = device_create(class, NULL,
+				MKDEV(MAJOR(user_mmio_devt), ucs->id),
+				ucs, "ucs/%d", ucs->id);
+	if (IS_ERR(ucs->dev))
+		return PTR_ERR(ucs->dev);
+
+	spin_lock_init(&ucs->lock);
+	hash_init(ucs->mappings);
+
+	cdev_init(&ucs->cdev, &mmio_ucs_fops);
+	ucs->cdev.kobj.parent = &ucs->dev->kobj;
+
+	err = cdev_add(&ucs->cdev, ucs->dev->devt, 1);
+	if (err < 0)
+		goto err_device_destroy;
+
+	return 0;
+
+err_device_destroy:
+	device_destroy(class, MKDEV(MAJOR(user_mmio_devt), ucs->id));
+	return err;
+}
+
+static unsigned long default_revmap(void *virt)
+{
+	struct vm_struct *vm;
+
+	vm = find_vm_area(virt);
+	if (!vm)
+		return 0;
+
+	return vm->phys_addr + (virt - vm->addr);
+}
+
+int __init clocksource_user_mmio_init(struct clocksource_user_mmio *ucs,
+				      const struct clocksource_mmio_regs *regs,
+				      unsigned long hz)
+{
+	static u64 (*user_types[CLKSRC_MMIO_TYPE_NR])(struct clocksource *) = {
+		[CLKSRC_MMIO_L_UP] = clocksource_mmio_readl_up,
+		[CLKSRC_MMIO_L_DOWN] = clocksource_mmio_readl_down,
+		[CLKSRC_DMMIO_L_UP] = clocksource_dual_mmio_readl_up,
+		[CLKSRC_MMIO_W_UP] = clocksource_mmio_readw_up,
+		[CLKSRC_MMIO_W_DOWN] = clocksource_mmio_readw_down,
+		[CLKSRC_DMMIO_W_UP] = clocksource_dual_mmio_readw_up,
+	};
+	const char *name = ucs->mmio.clksrc.name;
+	unsigned long phys_upper = 0, phys_lower;
+	enum clksrc_user_mmio_type type;
+	unsigned long (*revmap)(void *);
+	int err;
+
+	if (regs->bits_lower > 32 || regs->bits_lower < 16 ||
+	    regs->bits_upper > 32)
+		return -EINVAL;
+
+	for (type = 0; type < ARRAY_SIZE(user_types); type++)
+		if (ucs->mmio.clksrc.read == user_types[type])
+			break;
+
+	if (type == ARRAY_SIZE(user_types))
+		return -EINVAL;
+
+	if (!(ucs->mmio.clksrc.flags & CLOCK_SOURCE_IS_CONTINUOUS))
+		return -EINVAL;
+
+	revmap = regs->revmap;
+	if (!revmap)
+		revmap = default_revmap;
+
+	phys_lower = revmap(regs->reg_lower);
+	if (!phys_lower)
+		return -EINVAL;
+
+	if (regs->bits_upper) {
+		phys_upper = revmap(regs->reg_upper);
+		if (!phys_upper)
+			return -EINVAL;
+	}
+
+	ucs->mmio.reg = regs->reg_lower;
+	ucs->type = type;
+	ucs->bits_lower = regs->bits_lower;
+	ucs->reg_upper = regs->reg_upper;
+	ucs->mask_lower = CLOCKSOURCE_MASK(regs->bits_lower);
+	ucs->mask_upper = CLOCKSOURCE_MASK(regs->bits_upper);
+	ucs->phys_lower = phys_lower;
+	ucs->phys_upper = phys_upper;
+	spin_lock_init(&ucs->lock);
+
+	err = clocksource_register_hz(&ucs->mmio.clksrc, hz);
+	if (err < 0)
+		return err;
+
+	spin_lock(&user_clksrcs_lock);
+
+	ucs->id = user_clksrcs_count++;
+	if (ucs->id < CLKSRC_USER_MMIO_MAX)
+		list_add_tail(&ucs->link, &user_clksrcs);
+
+	spin_unlock(&user_clksrcs_lock);
+
+	if (ucs->id >= CLKSRC_USER_MMIO_MAX) {
+		pr_warn("%s: Too many clocksources\n", name);
+		err = -EAGAIN;
+		goto fail;
+	}
+
+	ucs->mmio.clksrc.vdso_type = CLOCKSOURCE_VDSO_MMIO + ucs->id;
+
+	if (user_mmio_class) {
+		err = ucs_create_cdev(user_mmio_class, ucs);
+		if (err < 0) {
+			pr_warn("%s: Failed to add character device\n", name);
+			goto fail;
+		}
+	}
+
+	return 0;
+
+fail:
+	clocksource_unregister(&ucs->mmio.clksrc);
+
+	return err;
+}
+
+int __init clocksource_user_single_mmio_init(
+	void __iomem *base, const char *name,
+	unsigned long hz, int rating, unsigned int bits,
+	u64 (*read)(struct clocksource *))
+{
+	struct clocksource_user_mmio *ucs;
+	struct clocksource_mmio_regs regs;
+	int ret;
+
+	ucs = kzalloc(sizeof(*ucs), GFP_KERNEL);
+	if (!ucs)
+		return -ENOMEM;
+
+	mmio_base_init(name, rating, bits, read, &ucs->mmio.clksrc);
+	regs.reg_lower = base;
+	regs.reg_upper = NULL;
+	regs.bits_lower = bits;
+	regs.bits_upper = 0;
+	regs.revmap = NULL;
+
+	ret = clocksource_user_mmio_init(ucs, &regs, hz);
+	if (ret)
+		kfree(ucs);
+
+	return ret;
+}
+
+static int __init mmio_clksrc_chr_dev_init(void)
+{
+	struct clocksource_user_mmio *ucs;
+	struct class *class;
+	int err;
+
+	class = class_create(THIS_MODULE, "mmio_ucs");
+	if (IS_ERR(class)) {
+		pr_err("couldn't create user mmio clocksources class\n");
+		return PTR_ERR(class);
+	}
+
+	err = alloc_chrdev_region(&user_mmio_devt, 0, CLKSRC_USER_MMIO_MAX,
+				  "mmio_ucs");
+	if (err < 0) {
+		pr_err("failed to allocate user mmio clocksources character devivces region\n");
+		goto err_class_destroy;
+	}
+
+	/*
+	 * Calling list_for_each_entry is safe here: clocksources are always
+	 * added to the list tail, never removed.
+	 */
+	spin_lock(&user_clksrcs_lock);
+	list_for_each_entry(ucs, &user_clksrcs, link) {
+		spin_unlock(&user_clksrcs_lock);
+
+		err = ucs_create_cdev(class, ucs);
+		if (err < 0)
+			pr_err("%s: Failed to add character device\n",
+			       ucs->mmio.clksrc.name);
+
+		spin_lock(&user_clksrcs_lock);
+	}
+	user_mmio_class = class;
+	spin_unlock(&user_clksrcs_lock);
+
+	return 0;
+
+err_class_destroy:
+	class_destroy(class);
+	return err;
+}
+device_initcall(mmio_clksrc_chr_dev_init);
diff --git a/kernel/drivers/clocksource/timer-imx-gpt.c b/kernel/drivers/clocksource/timer-imx-gpt.c
index 7b2c70f..5c46458 100644
--- a/kernel/drivers/clocksource/timer-imx-gpt.c
+++ b/kernel/drivers/clocksource/timer-imx-gpt.c
@@ -163,8 +163,8 @@
 	sched_clock_reg = reg;
 
 	sched_clock_register(mxc_read_sched_clock, 32, c);
-	return clocksource_mmio_init(reg, "mxc_timer1", c, 200, 32,
-			clocksource_mmio_readl_up);
+	return clocksource_user_single_mmio_init(reg, "mxc_timer1", c, 200, 32,
+					 clocksource_mmio_readl_up);
 }
 
 /* clock event */
@@ -264,7 +264,7 @@
 
 	imxtm->gpt->gpt_irq_acknowledge(imxtm);
 
-	ced->event_handler(ced);
+	clockevents_handle_event(ced);
 
 	return IRQ_HANDLED;
 }
@@ -274,7 +274,7 @@
 	struct clock_event_device *ced = &imxtm->ced;
 
 	ced->name = "mxc_timer1";
-	ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ;
+	ced->features = CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_DYNIRQ | CLOCK_EVT_FEAT_PIPELINE;
 	ced->set_state_shutdown = mxc_shutdown;
 	ced->set_state_oneshot = mxc_set_oneshot;
 	ced->tick_resume = mxc_shutdown;
diff --git a/kernel/drivers/clocksource/timer-sun4i.c b/kernel/drivers/clocksource/timer-sun4i.c
index 0ba8155..43886c3 100644
--- a/kernel/drivers/clocksource/timer-sun4i.c
+++ b/kernel/drivers/clocksource/timer-sun4i.c
@@ -19,6 +19,7 @@
 #include <linux/interrupt.h>
 #include <linux/irq.h>
 #include <linux/irqreturn.h>
+#include <linux/dovetail.h>
 #include <linux/sched_clock.h>
 #include <linux/of.h>
 #include <linux/of_address.h>
@@ -135,7 +136,7 @@
 	struct timer_of *to = to_timer_of(evt);
 
 	sun4i_timer_clear_interrupt(timer_of_base(to));
-	evt->event_handler(evt);
+	clockevents_handle_event(evt);
 
 	return IRQ_HANDLED;
 }
@@ -146,7 +147,7 @@
 	.clkevt = {
 		.name = "sun4i_tick",
 		.rating = 350,
-		.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT,
+		.features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT | CLOCK_EVT_FEAT_PIPELINE,
 		.set_state_shutdown = sun4i_clkevt_shutdown,
 		.set_state_periodic = sun4i_clkevt_set_periodic,
 		.set_state_oneshot = sun4i_clkevt_set_oneshot,
diff --git a/kernel/drivers/clocksource/timer-ti-dm-systimer.c b/kernel/drivers/clocksource/timer-ti-dm-systimer.c
index 2737407..345569d 100644
--- a/kernel/drivers/clocksource/timer-ti-dm-systimer.c
+++ b/kernel/drivers/clocksource/timer-ti-dm-systimer.c
@@ -57,7 +57,7 @@
 };
 
 struct dmtimer_clocksource {
-	struct clocksource dev;
+	struct clocksource_user_mmio mmio;
 	struct dmtimer_systimer t;
 	unsigned int loadval;
 };
@@ -437,7 +437,7 @@
 	struct dmtimer_systimer *t = &clkevt->t;
 
 	writel_relaxed(OMAP_TIMER_INT_OVERFLOW, t->base + t->irq_stat);
-	clkevt->dev.event_handler(&clkevt->dev);
+	clockevents_handle_event(&clkevt->dev);
 
 	return IRQ_HANDLED;
 }
@@ -548,7 +548,7 @@
 	 * We mostly use cpuidle_coupled with ARM local timers for runtime,
 	 * so there's probably no use for CLOCK_EVT_FEAT_DYNIRQ here.
 	 */
-	dev->features = features;
+	dev->features = features | CLOCK_EVT_FEAT_PIPELINE;
 	dev->rating = rating;
 	dev->set_next_event = dmtimer_set_next_event;
 	dev->set_state_shutdown = dmtimer_clockevent_shutdown;
@@ -706,15 +706,7 @@
 static struct dmtimer_clocksource *
 to_dmtimer_clocksource(struct clocksource *cs)
 {
-	return container_of(cs, struct dmtimer_clocksource, dev);
-}
-
-static u64 dmtimer_clocksource_read_cycles(struct clocksource *cs)
-{
-	struct dmtimer_clocksource *clksrc = to_dmtimer_clocksource(cs);
-	struct dmtimer_systimer *t = &clksrc->t;
-
-	return (u64)readl_relaxed(t->base + t->counter);
+	return container_of(cs, struct dmtimer_clocksource, mmio.mmio.clksrc);
 }
 
 static void __iomem *dmtimer_sched_clock_counter;
@@ -753,6 +745,7 @@
 static int __init dmtimer_clocksource_init(struct device_node *np)
 {
 	struct dmtimer_clocksource *clksrc;
+	struct clocksource_mmio_regs mmr;
 	struct dmtimer_systimer *t;
 	struct clocksource *dev;
 	int error;
@@ -761,7 +754,7 @@
 	if (!clksrc)
 		return -ENOMEM;
 
-	dev = &clksrc->dev;
+	dev = &clksrc->mmio.mmio.clksrc;
 	t = &clksrc->t;
 
 	error = dmtimer_systimer_setup(np, t);
@@ -770,7 +763,7 @@
 
 	dev->name = "dmtimer";
 	dev->rating = 300;
-	dev->read = dmtimer_clocksource_read_cycles;
+	dev->read = clocksource_mmio_readl_up,
 	dev->mask = CLOCKSOURCE_MASK(32);
 	dev->flags = CLOCK_SOURCE_IS_CONTINUOUS;
 
@@ -793,7 +786,13 @@
 		sched_clock_register(dmtimer_read_sched_clock, 32, t->rate);
 	}
 
-	if (clocksource_register_hz(dev, t->rate))
+	mmr.reg_lower = t->base + t->counter;
+	mmr.bits_lower = 32;
+	mmr.reg_upper = 0;
+	mmr.bits_upper = 0;
+	mmr.revmap = NULL;
+
+	if (clocksource_user_mmio_init(&clksrc->mmio, &mmr, t->rate))
 		pr_err("Could not register clocksource %pOF\n", np);
 
 	return 0;
diff --git a/kernel/drivers/cpuidle/cpuidle.c b/kernel/drivers/cpuidle/cpuidle.c
index ab77a36..9c6004d 100644
--- a/kernel/drivers/cpuidle/cpuidle.c
+++ b/kernel/drivers/cpuidle/cpuidle.c
@@ -17,6 +17,7 @@
 #include <linux/pm_qos.h>
 #include <linux/cpu.h>
 #include <linux/cpuidle.h>
+#include <linux/irq_pipeline.h>
 #include <linux/ktime.h>
 #include <linux/hrtimer.h>
 #include <linux/module.h>
@@ -219,6 +220,22 @@
 	broadcast = !!(target_state->flags & CPUIDLE_FLAG_TIMER_STOP);
 
 	/*
+	 * A companion core running on the oob stage of the IRQ
+	 * pipeline may deny switching to a deeper C-state. If so,
+	 * call the default idle routine instead. If the core cannot
+	 * bear with the latency induced by the default idling
+	 * operation, then CPUIDLE is not usable and should be
+	 * disabled at build time. The in-band stage is currently
+	 * stalled, hard irqs are on. irq_cpuidle_enter() leaves us
+	 * stalled but returns with hard irqs off so that no event may
+	 * sneak in until we actually go idle.
+	 */
+	if (!irq_cpuidle_enter(dev, target_state)) {
+		default_idle_call();
+		return -EBUSY;
+	}
+
+	/*
 	 * Tell the time framework to switch to a broadcast timer because our
 	 * local timer will be shut down.  If a local timer is used from another
 	 * CPU as a broadcast timer, this call may fail if it is not available.
@@ -247,6 +264,7 @@
 	if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
 		rcu_idle_enter();
 	entered_state = target_state->enter(dev, drv, index);
+	hard_cond_local_irq_enable();
 	if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE))
 		rcu_idle_exit();
 	start_critical_timings();
diff --git a/kernel/drivers/cpuidle/poll_state.c b/kernel/drivers/cpuidle/poll_state.c
index f7e8361..1245138 100644
--- a/kernel/drivers/cpuidle/poll_state.c
+++ b/kernel/drivers/cpuidle/poll_state.c
@@ -17,7 +17,7 @@
 
 	dev->poll_time_limit = false;
 
-	local_irq_enable();
+	local_irq_enable_full();
 	if (!current_set_polling_and_test()) {
 		unsigned int loop_count = 0;
 		u64 limit;
diff --git a/kernel/drivers/dma/Kconfig b/kernel/drivers/dma/Kconfig
index 0801334..821fe3d 100644
--- a/kernel/drivers/dma/Kconfig
+++ b/kernel/drivers/dma/Kconfig
@@ -47,6 +47,10 @@
 config DMA_VIRTUAL_CHANNELS
 	tristate
 
+config DMA_VIRTUAL_CHANNELS_OOB
+	def_bool n
+	depends on DMA_VIRTUAL_CHANNELS && DOVETAIL
+
 config DMA_ACPI
 	def_bool y
 	depends on ACPI
@@ -137,6 +141,13 @@
 	depends on ARCH_BCM2835
 	select DMA_ENGINE
 	select DMA_VIRTUAL_CHANNELS
+
+config DMA_BCM2835_OOB
+	bool "Out-of-band support for BCM2835 DMA"
+	depends on DMA_BCM2835 && DOVETAIL
+	select DMA_VIRTUAL_CHANNELS_OOB
+	help
+	  Enable out-of-band requests to BCM2835 DMA.
 
 config DMA_JZ4780
 	tristate "JZ4780 DMA support"
@@ -275,6 +286,13 @@
 	  Support the i.MX SDMA engine. This engine is integrated into
 	  Freescale i.MX25/31/35/51/53/6 chips.
 
+config IMX_SDMA_OOB
+	bool "Out-of-band support for i.MX SDMA"
+	depends on IMX_SDMA && DOVETAIL
+	select DMA_VIRTUAL_CHANNELS_OOB
+	help
+	  Enable out-of-band requests to i.MX SDMA.
+
 config INTEL_IDMA64
 	tristate "Intel integrated DMA 64-bit support"
 	select DMA_ENGINE
diff --git a/kernel/drivers/dma/bcm2835-dma.c b/kernel/drivers/dma/bcm2835-dma.c
index 630dfbb..6161f76 100644
--- a/kernel/drivers/dma/bcm2835-dma.c
+++ b/kernel/drivers/dma/bcm2835-dma.c
@@ -29,6 +29,7 @@
 #include <linux/slab.h>
 #include <linux/io.h>
 #include <linux/spinlock.h>
+#include <linux/irqstage.h>
 #include <linux/of.h>
 #include <linux/of_dma.h>
 
@@ -435,10 +436,20 @@
 	writel(BCM2835_DMA_RESET, chan_base + BCM2835_DMA_CS);
 }
 
+static inline void bcm2835_dma_enable_channel(struct bcm2835_chan *c)
+{
+	writel(c->desc->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
+	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+}
+
+static inline bool bcm2835_dma_oob_capable(void)
+{
+	return IS_ENABLED(CONFIG_DMA_BCM2835_OOB);
+}
+
 static void bcm2835_dma_start_desc(struct bcm2835_chan *c)
 {
 	struct virt_dma_desc *vd = vchan_next_desc(&c->vc);
-	struct bcm2835_desc *d;
 
 	if (!vd) {
 		c->desc = NULL;
@@ -447,10 +458,41 @@
 
 	list_del(&vd->node);
 
-	c->desc = d = to_bcm2835_dma_desc(&vd->tx);
+	c->desc = to_bcm2835_dma_desc(&vd->tx);
+	if (!bcm2835_dma_oob_capable() || !vchan_oob_pulsed(vd))
+		bcm2835_dma_enable_channel(c);
+}
 
-	writel(d->cb_list[0].paddr, c->chan_base + BCM2835_DMA_ADDR);
-	writel(BCM2835_DMA_ACTIVE, c->chan_base + BCM2835_DMA_CS);
+static bool do_channel(struct bcm2835_chan *c, struct bcm2835_desc *d)
+{
+	struct dmaengine_desc_callback cb;
+
+	if (running_oob()) {
+		if (!vchan_oob_handled(&d->vd))
+			return false;
+		dmaengine_desc_get_callback(&d->vd.tx, &cb);
+		if (dmaengine_desc_callback_valid(&cb)) {
+			vchan_unlock(&c->vc);
+			dmaengine_desc_callback_invoke(&cb, NULL);
+			vchan_lock(&c->vc);
+		}
+		return true;
+	}
+
+	if (d->cyclic) {
+		/* call the cyclic callback */
+		vchan_cyclic_callback(&d->vd);
+	} else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
+		vchan_cookie_complete(&c->desc->vd);
+		bcm2835_dma_start_desc(c);
+	}
+
+	return true;
+}
+
+static inline bool is_base_irq_handler(void)
+{
+	return !bcm2835_dma_oob_capable() || running_oob();
 }
 
 static irqreturn_t bcm2835_dma_callback(int irq, void *data)
@@ -460,7 +502,7 @@
 	unsigned long flags;
 
 	/* check the shared interrupt */
-	if (c->irq_flags & IRQF_SHARED) {
+	if (is_base_irq_handler() && c->irq_flags & IRQF_SHARED) {
 		/* check if the interrupt is enabled */
 		flags = readl(c->chan_base + BCM2835_DMA_CS);
 		/* if not set then we are not the reason for the irq */
@@ -468,7 +510,8 @@
 			return IRQ_NONE;
 	}
 
-	spin_lock_irqsave(&c->vc.lock, flags);
+	/* CAUTION: If running in-band, hard irqs are on. */
+	vchan_lock_irqsave(&c->vc, flags);
 
 	/*
 	 * Clear the INT flag to receive further interrupts. Keep the channel
@@ -477,22 +520,27 @@
 	 * if this IRQ handler is threaded.) If the channel is finished, it
 	 * will remain idle despite the ACTIVE flag being set.
 	 */
-	writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
-	       c->chan_base + BCM2835_DMA_CS);
+	if (is_base_irq_handler())
+		writel(BCM2835_DMA_INT | BCM2835_DMA_ACTIVE,
+			c->chan_base + BCM2835_DMA_CS);
 
 	d = c->desc;
+	if (!d)
+		goto out;
 
-	if (d) {
-		if (d->cyclic) {
-			/* call the cyclic callback */
-			vchan_cyclic_callback(&d->vd);
-		} else if (!readl(c->chan_base + BCM2835_DMA_ADDR)) {
-			vchan_cookie_complete(&c->desc->vd);
-			bcm2835_dma_start_desc(c);
-		}
+	if (bcm2835_dma_oob_capable() && running_oob()) {
+		/*
+		 * If we cannot process this from the out-of-band
+		 * stage, schedule a callback from in-band context.
+		 */
+		if (!do_channel(c, d))
+			irq_post_inband(irq);
+	} else {
+		do_channel(c, d);
 	}
 
-	spin_unlock_irqrestore(&c->vc.lock, flags);
+out:
+	vchan_unlock_irqrestore(&c->vc, flags);
 
 	return IRQ_HANDLED;
 }
@@ -571,7 +619,7 @@
 	if (ret == DMA_COMPLETE || !txstate)
 		return ret;
 
-	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_lock_irqsave(&c->vc, flags);
 	vd = vchan_find_desc(&c->vc, cookie);
 	if (vd) {
 		txstate->residue =
@@ -592,7 +640,7 @@
 		txstate->residue = 0;
 	}
 
-	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_unlock_irqrestore(&c->vc, flags);
 
 	return ret;
 }
@@ -602,12 +650,35 @@
 	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
 	unsigned long flags;
 
-	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_lock_irqsave(&c->vc, flags);
 	if (vchan_issue_pending(&c->vc) && !c->desc)
 		bcm2835_dma_start_desc(c);
 
-	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_unlock_irqrestore(&c->vc, flags);
 }
+
+#ifdef CONFIG_DMA_BCM2835_OOB
+static int bcm2835_dma_pulse_oob(struct dma_chan *chan)
+{
+	struct bcm2835_chan *c = to_bcm2835_dma_chan(chan);
+	unsigned long flags;
+	int ret = -EIO;
+
+	vchan_lock_irqsave(&c->vc, flags);
+	if (c->desc && vchan_oob_pulsed(&c->desc->vd)) {
+		bcm2835_dma_enable_channel(c);
+		ret = 0;
+	}
+	vchan_unlock_irqrestore(&c->vc, flags);
+
+	return ret;
+}
+#else
+static int bcm2835_dma_pulse_oob(struct dma_chan *chan)
+{
+	return -ENOTSUPP;
+}
+#endif
 
 static struct dma_async_tx_descriptor *bcm2835_dma_prep_dma_memcpy(
 	struct dma_chan *chan, dma_addr_t dst, dma_addr_t src,
@@ -649,6 +720,15 @@
 	u32 info = BCM2835_DMA_WAIT_RESP;
 	u32 extra = BCM2835_DMA_INT_EN;
 	size_t frames;
+
+	if (!bcm2835_dma_oob_capable()) {
+		if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
+			dev_err(chan->device->dev,
+				"%s: out-of-band slave transfers disabled\n",
+				__func__);
+			return NULL;
+		}
+	}
 
 	if (!is_slave_direction(direction)) {
 		dev_err(chan->device->dev,
@@ -715,7 +795,21 @@
 		return NULL;
 	}
 
-	if (flags & DMA_PREP_INTERRUPT)
+	if (!bcm2835_dma_oob_capable()) {
+		if (flags & DMA_OOB_INTERRUPT) {
+			dev_err(chan->device->dev,
+				"%s: out-of-band cyclic transfers disabled\n",
+				__func__);
+			return NULL;
+		}
+	} else if (flags & DMA_OOB_PULSE) {
+		dev_err(chan->device->dev,
+			"%s: no pulse mode with out-of-band cyclic transfers\n",
+			__func__);
+		return NULL;
+	}
+
+	if (flags & (DMA_PREP_INTERRUPT|DMA_OOB_INTERRUPT))
 		extra |= BCM2835_DMA_INT_EN;
 	else
 		period_len = buf_len;
@@ -791,7 +885,7 @@
 	unsigned long flags;
 	LIST_HEAD(head);
 
-	spin_lock_irqsave(&c->vc.lock, flags);
+	vchan_lock_irqsave(&c->vc, flags);
 
 	/* stop DMA activity */
 	if (c->desc) {
@@ -801,7 +895,7 @@
 	}
 
 	vchan_get_all_descriptors(&c->vc, &head);
-	spin_unlock_irqrestore(&c->vc.lock, flags);
+	vchan_unlock_irqrestore(&c->vc, flags);
 	vchan_dma_desc_free_list(&c->vc, &head);
 
 	return 0;
@@ -912,11 +1006,13 @@
 	dma_cap_set(DMA_SLAVE, od->ddev.cap_mask);
 	dma_cap_set(DMA_PRIVATE, od->ddev.cap_mask);
 	dma_cap_set(DMA_CYCLIC, od->ddev.cap_mask);
+	dma_cap_set(DMA_OOB, od->ddev.cap_mask);
 	dma_cap_set(DMA_MEMCPY, od->ddev.cap_mask);
 	od->ddev.device_alloc_chan_resources = bcm2835_dma_alloc_chan_resources;
 	od->ddev.device_free_chan_resources = bcm2835_dma_free_chan_resources;
 	od->ddev.device_tx_status = bcm2835_dma_tx_status;
 	od->ddev.device_issue_pending = bcm2835_dma_issue_pending;
+	od->ddev.device_pulse_oob = bcm2835_dma_pulse_oob;
 	od->ddev.device_prep_dma_cyclic = bcm2835_dma_prep_dma_cyclic;
 	od->ddev.device_prep_slave_sg = bcm2835_dma_prep_slave_sg;
 	od->ddev.device_prep_dma_memcpy = bcm2835_dma_prep_dma_memcpy;
@@ -982,10 +1078,10 @@
 			continue;
 
 		/* check if there are other channels that also use this irq */
-		irq_flags = 0;
+		irq_flags = IS_ENABLED(CONFIG_DMA_BCM2835_OOB) ? IRQF_OOB : 0;
 		for (j = 0; j <= BCM2835_DMA_MAX_DMA_CHAN_SUPPORTED; j++)
 			if ((i != j) && (irq[j] == irq[i])) {
-				irq_flags = IRQF_SHARED;
+				irq_flags |= IRQF_SHARED;
 				break;
 			}
 
diff --git a/kernel/drivers/dma/dmaengine.c b/kernel/drivers/dma/dmaengine.c
index af3ee28..e79a94d 100644
--- a/kernel/drivers/dma/dmaengine.c
+++ b/kernel/drivers/dma/dmaengine.c
@@ -578,7 +578,8 @@
 
 	/* check if the channel supports slave transactions */
 	if (!(test_bit(DMA_SLAVE, device->cap_mask.bits) ||
-	      test_bit(DMA_CYCLIC, device->cap_mask.bits)))
+	      test_bit(DMA_CYCLIC, device->cap_mask.bits) ||
+	      test_bit(DMA_OOB, device->cap_mask.bits)))
 		return -ENXIO;
 
 	/*
@@ -1209,6 +1210,13 @@
 		return -EIO;
 	}
 
+	if (dma_has_cap(DMA_OOB, device->cap_mask) && !device->device_pulse_oob) {
+		dev_err(device->dev,
+			"Device claims capability %s, but pulse handler is not defined\n",
+			"DMA_OOB");
+		return -EIO;
+	}
+
 	if (dma_has_cap(DMA_INTERLEAVE, device->cap_mask) && !device->device_prep_interleaved_dma) {
 		dev_err(device->dev,
 			"Device claims capability %s, but op is not defined\n",
diff --git a/kernel/drivers/dma/imx-sdma.c b/kernel/drivers/dma/imx-sdma.c
index 2283dcd..3648d3c 100644
--- a/kernel/drivers/dma/imx-sdma.c
+++ b/kernel/drivers/dma/imx-sdma.c
@@ -444,6 +444,10 @@
 	struct sdma_buffer_descriptor	*bd0;
 	/* clock ratio for AHB:SDMA core. 1:1 is 1, 2:1 is 0*/
 	bool				clk_ratio;
+#ifdef CONFIG_IMX_SDMA_OOB
+	hard_spinlock_t			oob_lock;
+	u32				pending_stat;
+#endif
 };
 
 static int sdma_config_write(struct dma_chan *chan,
@@ -748,6 +752,11 @@
 	return container_of(t, struct sdma_desc, vd.tx);
 }
 
+static inline bool sdma_oob_capable(void)
+{
+	return IS_ENABLED(CONFIG_IMX_SDMA_OOB);
+}
+
 static void sdma_start_desc(struct sdma_channel *sdmac)
 {
 	struct virt_dma_desc *vd = vchan_next_desc(&sdmac->vc);
@@ -765,7 +774,8 @@
 
 	sdma->channel_control[channel].base_bd_ptr = desc->bd_phys;
 	sdma->channel_control[channel].current_bd_ptr = desc->bd_phys;
-	sdma_enable_channel(sdma, sdmac->channel);
+	if (!sdma_oob_capable() || !vchan_oob_pulsed(vd))
+		sdma_enable_channel(sdma, sdmac->channel);
 }
 
 static void sdma_update_channel_loop(struct sdma_channel *sdmac)
@@ -809,9 +819,9 @@
 		 * SDMA transaction status by the time the client tasklet is
 		 * executed.
 		 */
-		spin_unlock(&sdmac->vc.lock);
+		vchan_unlock(&sdmac->vc);
 		dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
-		spin_lock(&sdmac->vc.lock);
+		vchan_lock(&sdmac->vc);
 
 		if (error)
 			sdmac->status = old_status;
@@ -821,20 +831,21 @@
 static void mxc_sdma_handle_channel_normal(struct sdma_channel *data)
 {
 	struct sdma_channel *sdmac = (struct sdma_channel *) data;
+	struct sdma_desc *desc = sdmac->desc;
 	struct sdma_buffer_descriptor *bd;
 	int i, error = 0;
 
-	sdmac->desc->chn_real_count = 0;
+	desc->chn_real_count = 0;
 	/*
 	 * non loop mode. Iterate over all descriptors, collect
 	 * errors and call callback function
 	 */
-	for (i = 0; i < sdmac->desc->num_bd; i++) {
-		bd = &sdmac->desc->bd[i];
+	for (i = 0; i < desc->num_bd; i++) {
+		bd = &desc->bd[i];
 
 		 if (bd->mode.status & (BD_DONE | BD_RROR))
 			error = -EIO;
-		 sdmac->desc->chn_real_count += bd->mode.count;
+		 desc->chn_real_count += bd->mode.count;
 	}
 
 	if (error)
@@ -843,36 +854,83 @@
 		sdmac->status = DMA_COMPLETE;
 }
 
-static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+static unsigned long sdma_do_channels(struct sdma_engine *sdma,
+				unsigned long stat)
 {
-	struct sdma_engine *sdma = dev_id;
-	unsigned long stat;
+	unsigned long mask = stat;
 
-	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
-	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
-	/* channel 0 is special and not handled here, see run_channel0() */
-	stat &= ~1;
-
-	while (stat) {
-		int channel = fls(stat) - 1;
+	while (mask) {
+		int channel = fls(mask) - 1;
 		struct sdma_channel *sdmac = &sdma->channel[channel];
 		struct sdma_desc *desc;
 
-		spin_lock(&sdmac->vc.lock);
+		vchan_lock(&sdmac->vc);
 		desc = sdmac->desc;
 		if (desc) {
+			if (running_oob() && !vchan_oob_handled(&desc->vd))
+				goto next;
 			if (sdmac->flags & IMX_DMA_SG_LOOP) {
 				sdma_update_channel_loop(sdmac);
 			} else {
 				mxc_sdma_handle_channel_normal(sdmac);
+				if (running_oob()) {
+					vchan_unlock(&sdmac->vc);
+					dmaengine_desc_get_callback_invoke(&desc->vd.tx, NULL);
+					__clear_bit(channel, &stat);
+					goto next_unlocked;
+				}
 				vchan_cookie_complete(&desc->vd);
 				sdma_start_desc(sdmac);
 			}
 		}
-
-		spin_unlock(&sdmac->vc.lock);
 		__clear_bit(channel, &stat);
+	next:
+		vchan_unlock(&sdmac->vc);
+	next_unlocked:
+		__clear_bit(channel, &mask);
 	}
+
+	return stat;
+}
+
+static irqreturn_t sdma_int_handler(int irq, void *dev_id)
+{
+	struct sdma_engine *sdma = dev_id;
+	unsigned long stat, flags __maybe_unused;
+
+#ifdef CONFIG_IMX_SDMA_OOB
+	if (running_oob()) {
+		stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+		writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
+		/*
+		 * Locking is only to guard against IRQ migration with
+		 * a delayed in-band event running from a remote CPU
+		 * after some IRQ routing changed the affinity of the
+		 * out-of-band handler in the meantime.
+		 */
+		stat = sdma_do_channels(sdma, stat & ~1);
+		if (stat) {
+			raw_spin_lock(&sdma->oob_lock);
+			sdma->pending_stat |= stat;
+			raw_spin_unlock(&sdma->oob_lock);
+			/* Call us back from in-band context. */
+			irq_post_inband(irq);
+		}
+		return IRQ_HANDLED;
+	}
+
+	/* In-band IRQ context: stalled, but hard irqs are on. */
+	raw_spin_lock_irqsave(&sdma->oob_lock, flags);
+	stat = sdma->pending_stat;
+	sdma->pending_stat = 0;
+	raw_spin_unlock_irqrestore(&sdma->oob_lock, flags);
+	sdma_do_channels(sdma, stat);
+#else
+	stat = readl_relaxed(sdma->regs + SDMA_H_INTR);
+	writel_relaxed(stat, sdma->regs + SDMA_H_INTR);
+	/* channel 0 is special and not handled here, see run_channel0() */
+	sdma_do_channels(sdma, stat & ~1);
+#endif
 
 	return IRQ_HANDLED;
 }
@@ -1060,9 +1118,9 @@
 	 */
 	usleep_range(1000, 2000);
 
-	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	vchan_lock_irqsave(&sdmac->vc, flags);
 	vchan_get_all_descriptors(&sdmac->vc, &head);
-	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+	vchan_unlock_irqrestore(&sdmac->vc, flags);
 	vchan_dma_desc_free_list(&sdmac->vc, &head);
 }
 
@@ -1071,17 +1129,18 @@
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	unsigned long flags;
 
-	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	vchan_lock_irqsave(&sdmac->vc, flags);
 
 	sdma_disable_channel(chan);
 
 	if (sdmac->desc) {
 		vchan_terminate_vdesc(&sdmac->desc->vd);
 		sdmac->desc = NULL;
+		vchan_unlock_irqrestore(&sdmac->vc, flags);
 		schedule_work(&sdmac->terminate_worker);
+	} else {
+		vchan_unlock_irqrestore(&sdmac->vc, flags);
 	}
-
-	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
 
 	return 0;
 }
@@ -1441,6 +1500,15 @@
 	struct scatterlist *sg;
 	struct sdma_desc *desc;
 
+	if (!sdma_oob_capable()) {
+		if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
+			dev_err(sdma->dev,
+				"%s: out-of-band slave transfers disabled\n",
+				__func__);
+			return NULL;
+		}
+	}
+
 	sdma_config_write(chan, &sdmac->slave_config, direction);
 
 	desc = sdma_transfer_init(sdmac, direction, sg_len);
@@ -1492,7 +1560,8 @@
 
 		if (i + 1 == sg_len) {
 			param |= BD_INTR;
-			param |= BD_LAST;
+			if (!sdma_oob_capable() || !(flags & DMA_OOB_PULSE))
+				param |= BD_LAST;
 			param &= ~BD_CONT;
 		}
 
@@ -1526,6 +1595,20 @@
 	struct sdma_desc *desc;
 
 	dev_dbg(sdma->dev, "%s channel: %d\n", __func__, channel);
+
+	if (!sdma_oob_capable()) {
+		if (flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE)) {
+			dev_err(sdma->dev,
+				"%s: out-of-band cyclic transfers disabled\n",
+				__func__);
+			return NULL;
+		}
+	} else if (flags & DMA_OOB_PULSE) {
+		dev_err(chan->device->dev,
+			"%s: no pulse mode with out-of-band cyclic transfers\n",
+			__func__);
+		return NULL;
+	}
 
 	sdma_config_write(chan, &sdmac->slave_config, direction);
 
@@ -1649,7 +1732,7 @@
 	if (ret == DMA_COMPLETE || !txstate)
 		return ret;
 
-	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	vchan_lock_irqsave(&sdmac->vc, flags);
 
 	vd = vchan_find_desc(&sdmac->vc, cookie);
 	if (vd)
@@ -1667,7 +1750,7 @@
 		residue = 0;
 	}
 
-	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+	vchan_unlock_irqrestore(&sdmac->vc, flags);
 
 	dma_set_tx_state(txstate, chan->completed_cookie, chan->cookie,
 			 residue);
@@ -1680,11 +1763,38 @@
 	struct sdma_channel *sdmac = to_sdma_chan(chan);
 	unsigned long flags;
 
-	spin_lock_irqsave(&sdmac->vc.lock, flags);
+	vchan_lock_irqsave(&sdmac->vc, flags);
 	if (vchan_issue_pending(&sdmac->vc) && !sdmac->desc)
 		sdma_start_desc(sdmac);
-	spin_unlock_irqrestore(&sdmac->vc.lock, flags);
+	vchan_unlock_irqrestore(&sdmac->vc, flags);
 }
+
+#ifdef CONFIG_IMX_SDMA_OOB
+static int sdma_pulse_oob(struct dma_chan *chan)
+{
+	struct sdma_channel *sdmac = to_sdma_chan(chan);
+	struct sdma_desc *desc = sdmac->desc;
+	unsigned long flags;
+	int n, ret = -EIO;
+
+	vchan_lock_irqsave(&sdmac->vc, flags);
+	if (desc && vchan_oob_pulsed(&desc->vd)) {
+		for (n = 0; n < desc->num_bd - 1; n++)
+			desc->bd[n].mode.status |= BD_DONE;
+		desc->bd[n].mode.status |= BD_DONE|BD_WRAP;
+		sdma_enable_channel(sdmac->sdma, sdmac->channel);
+		ret = 0;
+	}
+	vchan_unlock_irqrestore(&sdmac->vc, flags);
+
+	return ret;
+}
+#else
+static int sdma_pulse_oob(struct dma_chan *chan)
+{
+	return -ENOTSUPP;
+}
+#endif
 
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V1	34
 #define SDMA_SCRIPT_ADDRS_ARRAY_SIZE_V2	38
@@ -1920,6 +2030,9 @@
 	clk_disable(sdma->clk_ipg);
 	clk_disable(sdma->clk_ahb);
 
+#ifdef CONFIG_IMX_SDMA_OOB
+	raw_spin_lock_init(&sdma->oob_lock);
+#endif
 	return 0;
 
 err_dma_alloc:
@@ -2035,8 +2148,9 @@
 	if (ret)
 		goto err_clk;
 
-	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler, 0, "sdma",
-			       sdma);
+	ret = devm_request_irq(&pdev->dev, irq, sdma_int_handler,
+			IS_ENABLED(CONFIG_IMX_SDMA_OOB) ? IRQF_OOB : 0,
+			"sdma", sdma);
 	if (ret)
 		goto err_irq;
 
@@ -2055,6 +2169,7 @@
 
 	dma_cap_set(DMA_SLAVE, sdma->dma_device.cap_mask);
 	dma_cap_set(DMA_CYCLIC, sdma->dma_device.cap_mask);
+	dma_cap_set(DMA_OOB, sdma->dma_device.cap_mask);
 	dma_cap_set(DMA_MEMCPY, sdma->dma_device.cap_mask);
 
 	INIT_LIST_HEAD(&sdma->dma_device.channels);
@@ -2106,6 +2221,7 @@
 	sdma->dma_device.residue_granularity = DMA_RESIDUE_GRANULARITY_SEGMENT;
 	sdma->dma_device.device_prep_dma_memcpy = sdma_prep_memcpy;
 	sdma->dma_device.device_issue_pending = sdma_issue_pending;
+	sdma->dma_device.device_pulse_oob = sdma_pulse_oob;
 	sdma->dma_device.copy_align = 2;
 	dma_set_max_seg_size(sdma->dma_device.dev, SDMA_BD_MAX_CNT);
 
@@ -2160,6 +2276,16 @@
 		}
 	}
 
+	/*
+	 * Keep the clocks enabled at any time if we plan to use the
+	 * DMA from out-of-band context, bumping their refcount to
+	 * keep them on until sdma_remove() is called eventually.
+	 */
+	if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) {
+		clk_enable(sdma->clk_ipg);
+		clk_enable(sdma->clk_ahb);
+	}
+
 	return 0;
 
 err_register:
@@ -2178,6 +2304,11 @@
 	struct sdma_engine *sdma = platform_get_drvdata(pdev);
 	int i;
 
+	if (IS_ENABLED(CONFIG_IMX_SDMA_OOB)) {
+		clk_disable(sdma->clk_ahb);
+		clk_disable(sdma->clk_ipg);
+	}
+
 	devm_free_irq(&pdev->dev, sdma->irq, sdma);
 	dma_async_device_unregister(&sdma->dma_device);
 	kfree(sdma->script_addrs);
diff --git a/kernel/drivers/dma/virt-dma.c b/kernel/drivers/dma/virt-dma.c
index a6f4265..89e0116 100644
--- a/kernel/drivers/dma/virt-dma.c
+++ b/kernel/drivers/dma/virt-dma.c
@@ -23,11 +23,11 @@
 	unsigned long flags;
 	dma_cookie_t cookie;
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 	cookie = dma_cookie_assign(tx);
 
 	list_move_tail(&vd->node, &vc->desc_submitted);
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: submitted\n",
 		vc, vd, cookie);
@@ -52,9 +52,9 @@
 	struct virt_dma_desc *vd = to_virt_desc(tx);
 	unsigned long flags;
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 	list_del(&vd->node);
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	dev_dbg(vc->chan.device->dev, "vchan %p: txd %p[%x]: freeing\n",
 		vc, vd, vd->tx.cookie);
@@ -87,7 +87,7 @@
 	struct dmaengine_desc_callback cb;
 	LIST_HEAD(head);
 
-	spin_lock_irq(&vc->lock);
+	vchan_lock_irq(vc);
 	list_splice_tail_init(&vc->desc_completed, &head);
 	vd = vc->cyclic;
 	if (vd) {
@@ -96,7 +96,7 @@
 	} else {
 		memset(&cb, 0, sizeof(cb));
 	}
-	spin_unlock_irq(&vc->lock);
+	vchan_unlock_irq(vc);
 
 	dmaengine_desc_callback_invoke(&cb, &vd->tx_result);
 
@@ -120,11 +120,119 @@
 }
 EXPORT_SYMBOL_GPL(vchan_dma_desc_free_list);
 
+#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB
+
+static void inband_init_chan_lock(struct virt_dma_chan *vc)
+{
+	spin_lock_init(&vc->lock);
+}
+
+static void inband_lock_chan(struct virt_dma_chan *vc)
+{
+	spin_lock(&vc->lock);
+}
+
+static void inband_unlock_chan(struct virt_dma_chan *vc)
+{
+	spin_unlock(&vc->lock);
+}
+
+static void inband_lock_irq_chan(struct virt_dma_chan *vc)
+{
+	spin_lock_irq(&vc->lock);
+}
+
+static void inband_unlock_irq_chan(struct virt_dma_chan *vc)
+{
+	spin_unlock_irq(&vc->lock);
+}
+
+static unsigned long inband_lock_irqsave_chan(struct virt_dma_chan *vc)
+{
+	unsigned long flags;
+
+	spin_lock_irqsave(&vc->lock, flags);
+
+	return flags;
+}
+
+static void inband_unlock_irqrestore_chan(struct virt_dma_chan *vc,
+			unsigned long flags)
+{
+	spin_unlock_irqrestore(&vc->lock, flags);
+}
+
+static struct virt_dma_lockops inband_lock_ops = {
+	.init			= inband_init_chan_lock,
+	.lock			= inband_lock_chan,
+	.unlock			= inband_unlock_chan,
+	.lock_irq		= inband_lock_irq_chan,
+	.unlock_irq		= inband_unlock_irq_chan,
+	.lock_irqsave		= inband_lock_irqsave_chan,
+	.unlock_irqrestore	= inband_unlock_irqrestore_chan,
+};
+
+static void oob_init_chan_lock(struct virt_dma_chan *vc)
+{
+	raw_spin_lock_init(&vc->oob_lock);
+}
+
+static void oob_lock_chan(struct virt_dma_chan *vc)
+{
+	raw_spin_lock(&vc->oob_lock);
+}
+
+static void oob_unlock_chan(struct virt_dma_chan *vc)
+{
+	raw_spin_unlock(&vc->oob_lock);
+}
+
+static void oob_lock_irq_chan(struct virt_dma_chan *vc)
+{
+	raw_spin_lock_irq(&vc->oob_lock);
+}
+
+static void oob_unlock_irq_chan(struct virt_dma_chan *vc)
+{
+	raw_spin_unlock_irq(&vc->oob_lock);
+}
+
+static unsigned long oob_lock_irqsave_chan(struct virt_dma_chan *vc)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&vc->oob_lock, flags);
+
+	return flags;
+}
+
+static void oob_unlock_irqrestore_chan(struct virt_dma_chan *vc,
+				unsigned long flags)
+{
+	raw_spin_unlock_irqrestore(&vc->oob_lock, flags);
+}
+
+static struct virt_dma_lockops oob_lock_ops = {
+	.init			= oob_init_chan_lock,
+	.lock			= oob_lock_chan,
+	.unlock			= oob_unlock_chan,
+	.lock_irq		= oob_lock_irq_chan,
+	.unlock_irq		= oob_unlock_irq_chan,
+	.lock_irqsave		= oob_lock_irqsave_chan,
+	.unlock_irqrestore	= oob_unlock_irqrestore_chan,
+};
+
+#endif
+
 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev)
 {
 	dma_cookie_init(&vc->chan);
 
-	spin_lock_init(&vc->lock);
+#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB
+	vc->lock_ops = test_bit(DMA_OOB, dmadev->cap_mask.bits) ?
+		&oob_lock_ops : &inband_lock_ops;
+#endif
+	vchan_lock_init(vc);
 	INIT_LIST_HEAD(&vc->desc_allocated);
 	INIT_LIST_HEAD(&vc->desc_submitted);
 	INIT_LIST_HEAD(&vc->desc_issued);
diff --git a/kernel/drivers/dma/virt-dma.h b/kernel/drivers/dma/virt-dma.h
index e9f5250..5e01bc8 100644
--- a/kernel/drivers/dma/virt-dma.h
+++ b/kernel/drivers/dma/virt-dma.h
@@ -19,12 +19,22 @@
 	struct list_head node;
 };
 
+struct virt_dma_lockops;
+
 struct virt_dma_chan {
 	struct dma_chan	chan;
 	struct tasklet_struct task;
 	void (*desc_free)(struct virt_dma_desc *);
 
+#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB
+	struct virt_dma_lockops *lock_ops;
+	union {
+		spinlock_t lock;
+		hard_spinlock_t oob_lock;
+	};
+#else
 	spinlock_t lock;
+#endif
 
 	/* protected by vc.lock */
 	struct list_head desc_allocated;
@@ -40,6 +50,107 @@
 {
 	return container_of(chan, struct virt_dma_chan, chan);
 }
+
+#ifdef CONFIG_DMA_VIRTUAL_CHANNELS_OOB
+
+struct virt_dma_lockops {
+	void (*init)(struct virt_dma_chan *vc);
+	void (*lock)(struct virt_dma_chan *vc);
+	void (*unlock)(struct virt_dma_chan *vc);
+	void (*lock_irq)(struct virt_dma_chan *vc);
+	void (*unlock_irq)(struct virt_dma_chan *vc);
+	unsigned long (*lock_irqsave)(struct virt_dma_chan *vc);
+	void (*unlock_irqrestore)(struct virt_dma_chan *vc,
+				unsigned long flags);
+};
+
+static inline void vchan_lock_init(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->init(vc);
+}
+
+static inline void vchan_lock(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->lock(vc);
+}
+
+static inline void vchan_unlock(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->unlock(vc);
+}
+
+static inline void vchan_lock_irq(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->lock_irq(vc);
+}
+
+static inline void vchan_unlock_irq(struct virt_dma_chan *vc)
+{
+	vc->lock_ops->unlock_irq(vc);
+}
+
+static inline
+unsigned long __vchan_lock_irqsave(struct virt_dma_chan *vc)
+{
+	return vc->lock_ops->lock_irqsave(vc);
+}
+
+#define vchan_lock_irqsave(__vc, __flags)		\
+	do {						\
+		(__flags) = __vchan_lock_irqsave(__vc);	\
+	} while (0)
+
+static inline
+void vchan_unlock_irqrestore(struct virt_dma_chan *vc,
+			unsigned long flags)
+{
+	vc->lock_ops->unlock_irqrestore(vc, flags);
+}
+
+static inline bool vchan_oob_handled(struct virt_dma_desc *vd)
+{
+	return !!(vd->tx.flags & DMA_OOB_INTERRUPT);
+}
+
+static inline bool vchan_oob_pulsed(struct virt_dma_desc *vd)
+{
+	return !!(vd->tx.flags & DMA_OOB_PULSE);
+}
+
+#else
+
+#define vchan_lock_init(__vc)				\
+	spin_lock_init(&(__vc)->lock)
+
+#define vchan_lock(__vc)				\
+	spin_lock(&(__vc)->lock)
+
+#define vchan_unlock(__vc)				\
+	spin_unlock(&(__vc)->lock)
+
+#define vchan_lock_irq(__vc)				\
+	spin_lock_irq(&(__vc)->lock)
+
+#define vchan_unlock_irq(__vc)				\
+	spin_unlock_irq(&(__vc)->lock)
+
+#define vchan_lock_irqsave(__vc, __flags)		\
+	spin_lock_irqsave(&(__vc)->lock, __flags)
+
+#define vchan_unlock_irqrestore(__vc, __flags)		\
+	spin_unlock_irqrestore(&(__vc)->lock, __flags)
+
+static inline bool vchan_oob_handled(struct virt_dma_desc *vd)
+{
+	return false;
+}
+
+static inline bool vchan_oob_pulsed(struct virt_dma_desc *vd)
+{
+	return false;
+}
+
+#endif	/* !CONFIG_DMA_VIRTUAL_CHANNELS_OOB */
 
 void vchan_dma_desc_free_list(struct virt_dma_chan *vc, struct list_head *head);
 void vchan_init(struct virt_dma_chan *vc, struct dma_device *dmadev);
@@ -66,9 +177,9 @@
 	vd->tx_result.result = DMA_TRANS_NOERROR;
 	vd->tx_result.residue = 0;
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 	list_add_tail(&vd->node, &vc->desc_allocated);
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	return &vd->tx;
 }
@@ -116,9 +227,9 @@
 	if (dmaengine_desc_test_reuse(&vd->tx)) {
 		unsigned long flags;
 
-		spin_lock_irqsave(&vc->lock, flags);
+		vchan_lock_irqsave(vc, flags);
 		list_add(&vd->node, &vc->desc_allocated);
-		spin_unlock_irqrestore(&vc->lock, flags);
+		vchan_unlock_irqrestore(vc, flags);
 	} else {
 		vc->desc_free(vd);
 	}
@@ -190,11 +301,11 @@
 	unsigned long flags;
 	LIST_HEAD(head);
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 	vchan_get_all_descriptors(vc, &head);
 	list_for_each_entry(vd, &head, node)
 		dmaengine_desc_clear_reuse(&vd->tx);
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	vchan_dma_desc_free_list(vc, &head);
 }
@@ -215,11 +326,11 @@
 
 	tasklet_kill(&vc->task);
 
-	spin_lock_irqsave(&vc->lock, flags);
+	vchan_lock_irqsave(vc, flags);
 
 	list_splice_tail_init(&vc->desc_terminated, &head);
 
-	spin_unlock_irqrestore(&vc->lock, flags);
+	vchan_unlock_irqrestore(vc, flags);
 
 	vchan_dma_desc_free_list(vc, &head);
 }
diff --git a/kernel/drivers/gpio/gpio-davinci.c b/kernel/drivers/gpio/gpio-davinci.c
index 6f21385..b7ae10d 100644
--- a/kernel/drivers/gpio/gpio-davinci.c
+++ b/kernel/drivers/gpio/gpio-davinci.c
@@ -326,7 +326,7 @@
 	.irq_enable	= gpio_irq_enable,
 	.irq_disable	= gpio_irq_disable,
 	.irq_set_type	= gpio_irq_type,
-	.flags		= IRQCHIP_SET_TYPE_MASKED,
+	.flags		= IRQCHIP_SET_TYPE_MASKED | IRQCHIP_PIPELINE_SAFE,
 };
 
 static void gpio_irq_handler(struct irq_desc *desc)
diff --git a/kernel/drivers/gpio/gpio-mxc.c b/kernel/drivers/gpio/gpio-mxc.c
index ba6ed2a..96d523f 100644
--- a/kernel/drivers/gpio/gpio-mxc.c
+++ b/kernel/drivers/gpio/gpio-mxc.c
@@ -361,7 +361,8 @@
 	ct->chip.irq_unmask = irq_gc_mask_set_bit;
 	ct->chip.irq_set_type = gpio_set_irq_type;
 	ct->chip.irq_set_wake = gpio_set_wake_irq;
-	ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
+	ct->chip.flags = IRQCHIP_MASK_ON_SUSPEND |
+		IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE;
 	ct->regs.ack = GPIO_ISR;
 	ct->regs.mask = GPIO_IMR;
 
diff --git a/kernel/drivers/gpio/gpio-omap.c b/kernel/drivers/gpio/gpio-omap.c
index a7e8ed5..3316893 100644
--- a/kernel/drivers/gpio/gpio-omap.c
+++ b/kernel/drivers/gpio/gpio-omap.c
@@ -55,7 +55,7 @@
 	u32 saved_datain;
 	u32 level_mask;
 	u32 toggle_mask;
-	raw_spinlock_t lock;
+	hard_spinlock_t lock;
 	raw_spinlock_t wa_lock;
 	struct gpio_chip chip;
 	struct clk *dbck;
@@ -1058,7 +1058,7 @@
 
 	ret = devm_request_irq(bank->chip.parent, bank->irq,
 			       omap_gpio_irq_handler,
-			       0, dev_name(bank->chip.parent), bank);
+			       IRQF_OOB, dev_name(bank->chip.parent), bank);
 	if (ret)
 		gpiochip_remove(&bank->chip);
 
@@ -1406,7 +1406,7 @@
 	irqc->irq_bus_lock = omap_gpio_irq_bus_lock,
 	irqc->irq_bus_sync_unlock = gpio_irq_bus_sync_unlock,
 	irqc->name = dev_name(&pdev->dev);
-	irqc->flags = IRQCHIP_MASK_ON_SUSPEND;
+	irqc->flags = IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE;
 	irqc->parent_device = dev;
 
 	bank->irq = platform_get_irq(pdev, 0);
diff --git a/kernel/drivers/gpio/gpio-pca953x.c b/kernel/drivers/gpio/gpio-pca953x.c
index 3ad1a9e..cad58d0 100644
--- a/kernel/drivers/gpio/gpio-pca953x.c
+++ b/kernel/drivers/gpio/gpio-pca953x.c
@@ -855,6 +855,7 @@
 	irq_chip->irq_bus_sync_unlock = pca953x_irq_bus_sync_unlock;
 	irq_chip->irq_set_type = pca953x_irq_set_type;
 	irq_chip->irq_shutdown = pca953x_irq_shutdown;
+	irq_chip->flags |= IRQCHIP_PIPELINE_SAFE;
 
 	girq = &chip->gpio_chip.irq;
 	girq->chip = irq_chip;
diff --git a/kernel/drivers/gpio/gpio-pl061.c b/kernel/drivers/gpio/gpio-pl061.c
index f1b53dd..c890b9c 100644
--- a/kernel/drivers/gpio/gpio-pl061.c
+++ b/kernel/drivers/gpio/gpio-pl061.c
@@ -48,7 +48,7 @@
 #endif
 
 struct pl061 {
-	raw_spinlock_t		lock;
+	hard_spinlock_t		lock;
 
 	void __iomem		*base;
 	struct gpio_chip	gc;
@@ -321,6 +321,7 @@
 	pl061->irq_chip.irq_unmask = pl061_irq_unmask;
 	pl061->irq_chip.irq_set_type = pl061_irq_type;
 	pl061->irq_chip.irq_set_wake = pl061_irq_set_wake;
+	pl061->irq_chip.flags = IRQCHIP_PIPELINE_SAFE;
 
 	writeb(0, pl061->base + GPIOIE); /* disable irqs */
 	irq = adev->irq[0];
diff --git a/kernel/drivers/gpio/gpio-xilinx.c b/kernel/drivers/gpio/gpio-xilinx.c
index 67f9f82..33f03ac 100644
--- a/kernel/drivers/gpio/gpio-xilinx.c
+++ b/kernel/drivers/gpio/gpio-xilinx.c
@@ -45,7 +45,7 @@
 	unsigned int gpio_width[2];
 	u32 gpio_state[2];
 	u32 gpio_dir[2];
-	spinlock_t gpio_lock[2];
+	hard_spinlock_t gpio_lock[2];
 };
 
 static inline int xgpio_index(struct xgpio_instance *chip, int gpio)
@@ -110,7 +110,7 @@
 	int index =  xgpio_index(chip, gpio);
 	int offset =  xgpio_offset(chip, gpio);
 
-	spin_lock_irqsave(&chip->gpio_lock[index], flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock[index], flags);
 
 	/* Write to GPIO signal and set its direction to output */
 	if (val)
@@ -121,7 +121,7 @@
 	xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
 		       xgpio_regoffset(chip, gpio), chip->gpio_state[index]);
 
-	spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
 }
 
 /**
@@ -141,7 +141,7 @@
 	int index = xgpio_index(chip, 0);
 	int offset, i;
 
-	spin_lock_irqsave(&chip->gpio_lock[index], flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock[index], flags);
 
 	/* Write to GPIO signals */
 	for (i = 0; i < gc->ngpio; i++) {
@@ -152,9 +152,9 @@
 			xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
 				       index * XGPIO_CHANNEL_OFFSET,
 				       chip->gpio_state[index]);
-			spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+			raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
 			index =  xgpio_index(chip, i);
-			spin_lock_irqsave(&chip->gpio_lock[index], flags);
+			raw_spin_lock_irqsave(&chip->gpio_lock[index], flags);
 		}
 		if (__test_and_clear_bit(i, mask)) {
 			offset =  xgpio_offset(chip, i);
@@ -168,7 +168,7 @@
 	xgpio_writereg(chip->regs + XGPIO_DATA_OFFSET +
 		       index * XGPIO_CHANNEL_OFFSET, chip->gpio_state[index]);
 
-	spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
 }
 
 /**
@@ -187,14 +187,14 @@
 	int index =  xgpio_index(chip, gpio);
 	int offset =  xgpio_offset(chip, gpio);
 
-	spin_lock_irqsave(&chip->gpio_lock[index], flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock[index], flags);
 
 	/* Set the GPIO bit in shadow register and set direction as input */
 	chip->gpio_dir[index] |= BIT(offset);
 	xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
 		       xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
 
-	spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
 
 	return 0;
 }
@@ -218,7 +218,7 @@
 	int index =  xgpio_index(chip, gpio);
 	int offset =  xgpio_offset(chip, gpio);
 
-	spin_lock_irqsave(&chip->gpio_lock[index], flags);
+	raw_spin_lock_irqsave(&chip->gpio_lock[index], flags);
 
 	/* Write state of GPIO signal */
 	if (val)
@@ -233,7 +233,7 @@
 	xgpio_writereg(chip->regs + XGPIO_TRI_OFFSET +
 			xgpio_regoffset(chip, gpio), chip->gpio_dir[index]);
 
-	spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
+	raw_spin_unlock_irqrestore(&chip->gpio_lock[index], flags);
 
 	return 0;
 }
@@ -291,7 +291,7 @@
 	if (of_property_read_u32(np, "xlnx,gpio-width", &chip->gpio_width[0]))
 		chip->gpio_width[0] = 32;
 
-	spin_lock_init(&chip->gpio_lock[0]);
+	raw_spin_lock_init(&chip->gpio_lock[0]);
 
 	if (of_property_read_u32(np, "xlnx,is-dual", &is_dual))
 		is_dual = 0;
@@ -314,7 +314,7 @@
 					 &chip->gpio_width[1]))
 			chip->gpio_width[1] = 32;
 
-		spin_lock_init(&chip->gpio_lock[1]);
+		raw_spin_lock_init(&chip->gpio_lock[1]);
 	}
 
 	chip->gc.base = -1;
diff --git a/kernel/drivers/gpio/gpio-zynq.c b/kernel/drivers/gpio/gpio-zynq.c
index c288a75..28c0280 100644
--- a/kernel/drivers/gpio/gpio-zynq.c
+++ b/kernel/drivers/gpio/gpio-zynq.c
@@ -601,7 +601,7 @@
 	.irq_request_resources = zynq_gpio_irq_reqres,
 	.irq_release_resources = zynq_gpio_irq_relres,
 	.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED |
-			  IRQCHIP_MASK_ON_SUSPEND,
+			  IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct irq_chip zynq_gpio_edge_irqchip = {
@@ -614,7 +614,7 @@
 	.irq_set_wake	= zynq_gpio_set_wake,
 	.irq_request_resources = zynq_gpio_irq_reqres,
 	.irq_release_resources = zynq_gpio_irq_relres,
-	.flags		= IRQCHIP_MASK_ON_SUSPEND,
+	.flags		= IRQCHIP_MASK_ON_SUSPEND | IRQCHIP_PIPELINE_SAFE,
 };
 
 static void zynq_gpio_handle_bank_irq(struct zynq_gpio *gpio,
diff --git a/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
index 96f3908..e82dda7 100644
--- a/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
+++ b/kernel/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -94,6 +94,7 @@
 	.name = "dpu_mdss",
 	.irq_mask = dpu_mdss_irq_mask,
 	.irq_unmask = dpu_mdss_irq_unmask,
+	.flags = IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct lock_class_key dpu_mdss_lock_key, dpu_mdss_request_key;
diff --git a/kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index 09bd46a..781d701 100644
--- a/kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/kernel/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -91,6 +91,7 @@
 	.name		= "mdss",
 	.irq_mask	= mdss_hw_mask_irq,
 	.irq_unmask	= mdss_hw_unmask_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
@@ -254,7 +255,7 @@
 	}
 
 	ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
-			       mdss_irq, 0, "mdss_isr", mdp5_mdss);
+			       mdss_irq, IRQF_OOB, "mdss_isr", mdp5_mdss);
 	if (ret) {
 		DRM_DEV_ERROR(dev->dev, "failed to init irq: %d\n", ret);
 		goto fail_irq;
diff --git a/kernel/drivers/gpu/ipu-v3/ipu-common.c b/kernel/drivers/gpu/ipu-v3/ipu-common.c
index d166ee2..6f4db39 100644
--- a/kernel/drivers/gpu/ipu-v3/ipu-common.c
+++ b/kernel/drivers/gpu/ipu-v3/ipu-common.c
@@ -1238,6 +1238,7 @@
 		ct->chip.irq_ack = irq_gc_ack_set_bit;
 		ct->chip.irq_mask = irq_gc_mask_clr_bit;
 		ct->chip.irq_unmask = irq_gc_mask_set_bit;
+		ct->chip.flags = IRQCHIP_PIPELINE_SAFE;
 		ct->regs.ack = IPU_INT_STAT(i / 32);
 		ct->regs.mask = IPU_INT_CTRL(i / 32);
 	}
diff --git a/kernel/drivers/iio/industrialio-trigger.c b/kernel/drivers/iio/industrialio-trigger.c
index 6bcc562..e9172bb 100644
--- a/kernel/drivers/iio/industrialio-trigger.c
+++ b/kernel/drivers/iio/industrialio-trigger.c
@@ -544,6 +544,7 @@
 	trig->subirq_chip.name = trig->name;
 	trig->subirq_chip.irq_mask = &iio_trig_subirqmask;
 	trig->subirq_chip.irq_unmask = &iio_trig_subirqunmask;
+	trig->subirq_chip.flags = IRQCHIP_PIPELINE_SAFE;
 	for (i = 0; i < CONFIG_IIO_CONSUMERS_PER_TRIGGER; i++) {
 		irq_set_chip(trig->subirq_base + i, &trig->subirq_chip);
 		irq_set_handler(trig->subirq_base + i, &handle_simple_irq);
diff --git a/kernel/drivers/irqchip/exynos-combiner.c b/kernel/drivers/irqchip/exynos-combiner.c
index 0b85d9a..504bdd0 100644
--- a/kernel/drivers/irqchip/exynos-combiner.c
+++ b/kernel/drivers/irqchip/exynos-combiner.c
@@ -24,7 +24,7 @@
 
 #define IRQ_IN_COMBINER		8
 
-static DEFINE_SPINLOCK(irq_controller_lock);
+static DEFINE_HARD_SPINLOCK(irq_controller_lock);
 
 struct combiner_chip_data {
 	unsigned int hwirq_offset;
@@ -71,9 +71,9 @@
 
 	chained_irq_enter(chip, desc);
 
-	spin_lock(&irq_controller_lock);
+	raw_spin_lock(&irq_controller_lock);
 	status = readl_relaxed(chip_data->base + COMBINER_INT_STATUS);
-	spin_unlock(&irq_controller_lock);
+	raw_spin_unlock(&irq_controller_lock);
 	status &= chip_data->irq_mask;
 
 	if (status == 0)
@@ -113,6 +113,7 @@
 #ifdef CONFIG_SMP
 	.irq_set_affinity	= combiner_set_affinity,
 #endif
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void __init combiner_cascade_irq(struct combiner_chip_data *combiner_data,
diff --git a/kernel/drivers/irqchip/irq-bcm2835.c b/kernel/drivers/irqchip/irq-bcm2835.c
index a1e004a..bbf1cdb 100644
--- a/kernel/drivers/irqchip/irq-bcm2835.c
+++ b/kernel/drivers/irqchip/irq-bcm2835.c
@@ -102,7 +102,8 @@
 static struct irq_chip armctrl_chip = {
 	.name = "ARMCTRL-level",
 	.irq_mask = armctrl_mask_irq,
-	.irq_unmask = armctrl_unmask_irq
+	.irq_unmask = armctrl_unmask_irq,
+	.flags = IRQCHIP_PIPELINE_SAFE,
 };
 
 static int armctrl_xlate(struct irq_domain *d, struct device_node *ctrlr,
diff --git a/kernel/drivers/irqchip/irq-bcm2836.c b/kernel/drivers/irqchip/irq-bcm2836.c
index cbc7c74..89b47fc 100644
--- a/kernel/drivers/irqchip/irq-bcm2836.c
+++ b/kernel/drivers/irqchip/irq-bcm2836.c
@@ -58,6 +58,7 @@
 	.name		= "bcm2836-timer",
 	.irq_mask	= bcm2836_arm_irqchip_mask_timer_irq,
 	.irq_unmask	= bcm2836_arm_irqchip_unmask_timer_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void bcm2836_arm_irqchip_mask_pmu_irq(struct irq_data *d)
@@ -74,6 +75,7 @@
 	.name		= "bcm2836-pmu",
 	.irq_mask	= bcm2836_arm_irqchip_mask_pmu_irq,
 	.irq_unmask	= bcm2836_arm_irqchip_unmask_pmu_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void bcm2836_arm_irqchip_mask_gpu_irq(struct irq_data *d)
@@ -88,6 +90,7 @@
 	.name		= "bcm2836-gpu",
 	.irq_mask	= bcm2836_arm_irqchip_mask_gpu_irq,
 	.irq_unmask	= bcm2836_arm_irqchip_unmask_gpu_irq,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static void bcm2836_arm_irqchip_dummy_op(struct irq_data *d)
diff --git a/kernel/drivers/irqchip/irq-gic-v2m.c b/kernel/drivers/irqchip/irq-gic-v2m.c
index fbec07d..c3f19ef 100644
--- a/kernel/drivers/irqchip/irq-gic-v2m.c
+++ b/kernel/drivers/irqchip/irq-gic-v2m.c
@@ -89,6 +89,7 @@
 	.irq_unmask		= gicv2m_unmask_msi_irq,
 	.irq_eoi		= irq_chip_eoi_parent,
 	.irq_write_msi_msg	= pci_msi_domain_write_msg,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_info gicv2m_msi_domain_info = {
@@ -130,6 +131,7 @@
 	.irq_eoi		= irq_chip_eoi_parent,
 	.irq_set_affinity	= irq_chip_set_affinity_parent,
 	.irq_compose_msi_msg	= gicv2m_compose_msi_msg,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain,
@@ -252,6 +254,7 @@
 
 static struct irq_chip gicv2m_pmsi_irq_chip = {
 	.name			= "pMSI",
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_ops gicv2m_pmsi_ops = {
diff --git a/kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c b/kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
index 634263d..0b4b81a 100644
--- a/kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
+++ b/kernel/drivers/irqchip/irq-gic-v3-its-fsl-mc-msi.c
@@ -22,7 +22,8 @@
 	.irq_mask = irq_chip_mask_parent,
 	.irq_unmask = irq_chip_unmask_parent,
 	.irq_eoi = irq_chip_eoi_parent,
-	.irq_set_affinity = msi_domain_set_affinity
+	.irq_set_affinity = msi_domain_set_affinity,
+	.flags = IRQCHIP_PIPELINE_SAFE,
 };
 
 static u32 fsl_mc_msi_domain_get_msi_id(struct irq_domain *domain,
diff --git a/kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c b/kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c
index 87711e0..a148d0d 100644
--- a/kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c
+++ b/kernel/drivers/irqchip/irq-gic-v3-its-pci-msi.c
@@ -29,6 +29,7 @@
 	.irq_mask		= its_mask_msi_irq,
 	.irq_eoi		= irq_chip_eoi_parent,
 	.irq_write_msi_msg	= pci_msi_domain_write_msg,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int its_pci_msi_vec_count(struct pci_dev *pdev, void *data)
diff --git a/kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c b/kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c
index daa6d50..ae29443 100644
--- a/kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c
+++ b/kernel/drivers/irqchip/irq-gic-v3-its-platform-msi.c
@@ -12,6 +12,7 @@
 
 static struct irq_chip its_pmsi_irq_chip = {
 	.name			= "ITS-pMSI",
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int of_pmsi_get_dev_id(struct irq_domain *domain, struct device *dev,
diff --git a/kernel/drivers/irqchip/irq-gic-v3-mbi.c b/kernel/drivers/irqchip/irq-gic-v3-mbi.c
index e81e89a..b213cd7 100644
--- a/kernel/drivers/irqchip/irq-gic-v3-mbi.c
+++ b/kernel/drivers/irqchip/irq-gic-v3-mbi.c
@@ -216,7 +216,7 @@
 	.name			= "pMSI",
 	.irq_set_type		= irq_chip_set_type_parent,
 	.irq_compose_msi_msg	= mbi_compose_mbi_msg,
-	.flags			= IRQCHIP_SUPPORTS_LEVEL_MSI,
+	.flags			= IRQCHIP_SUPPORTS_LEVEL_MSI | IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_ops mbi_pmsi_ops = {
diff --git a/kernel/drivers/irqchip/irq-gic-v3.c b/kernel/drivers/irqchip/irq-gic-v3.c
index 6db1dbc..128a69f 100644
--- a/kernel/drivers/irqchip/irq-gic-v3.c
+++ b/kernel/drivers/irqchip/irq-gic-v3.c
@@ -1356,7 +1356,8 @@
 	.ipi_send_mask		= gic_ipi_send_mask,
 	.flags			= IRQCHIP_SET_TYPE_MASKED |
 				  IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_MASK_ON_SUSPEND,
+				  IRQCHIP_MASK_ON_SUSPEND |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct irq_chip gic_eoimode1_chip = {
@@ -1375,7 +1376,8 @@
 	.ipi_send_mask		= gic_ipi_send_mask,
 	.flags			= IRQCHIP_SET_TYPE_MASKED |
 				  IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_MASK_ON_SUSPEND,
+				  IRQCHIP_MASK_ON_SUSPEND |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq,
diff --git a/kernel/drivers/irqchip/irq-gic.c b/kernel/drivers/irqchip/irq-gic.c
index cd3f72b..4768411 100644
--- a/kernel/drivers/irqchip/irq-gic.c
+++ b/kernel/drivers/irqchip/irq-gic.c
@@ -91,7 +91,7 @@
 
 #ifdef CONFIG_BL_SWITCHER
 
-static DEFINE_RAW_SPINLOCK(cpu_map_lock);
+static DEFINE_HARD_SPINLOCK(cpu_map_lock);
 
 #define gic_lock_irqsave(f)		\
 	raw_spin_lock_irqsave(&cpu_map_lock, (f))
@@ -449,7 +449,8 @@
 	.irq_set_irqchip_state	= gic_irq_set_irqchip_state,
 	.flags			= IRQCHIP_SET_TYPE_MASKED |
 				  IRQCHIP_SKIP_SET_WAKE |
-				  IRQCHIP_MASK_ON_SUSPEND,
+				  IRQCHIP_MASK_ON_SUSPEND |
+				  IRQCHIP_PIPELINE_SAFE,
 };
 
 void __init gic_cascade_irq(unsigned int gic_nr, unsigned int irq)
diff --git a/kernel/drivers/irqchip/irq-imx-irqsteer.c b/kernel/drivers/irqchip/irq-imx-irqsteer.c
index 1edf769..54a576a 100644
--- a/kernel/drivers/irqchip/irq-imx-irqsteer.c
+++ b/kernel/drivers/irqchip/irq-imx-irqsteer.c
@@ -29,7 +29,7 @@
 	struct clk		*ipg_clk;
 	int			irq[CHAN_MAX_OUTPUT_INT];
 	int			irq_count;
-	raw_spinlock_t		lock;
+	hard_spinlock_t		lock;
 	int			reg_num;
 	int			channel;
 	struct irq_domain	*domain;
@@ -74,6 +74,7 @@
 	.name		= "irqsteer",
 	.irq_mask	= imx_irqsteer_irq_mask,
 	.irq_unmask	= imx_irqsteer_irq_unmask,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int imx_irqsteer_irq_map(struct irq_domain *h, unsigned int irq,
diff --git a/kernel/drivers/irqchip/irq-omap-intc.c b/kernel/drivers/irqchip/irq-omap-intc.c
index d360a6e..a864744 100644
--- a/kernel/drivers/irqchip/irq-omap-intc.c
+++ b/kernel/drivers/irqchip/irq-omap-intc.c
@@ -211,7 +211,7 @@
 		ct->chip.irq_mask = irq_gc_mask_disable_reg;
 		ct->chip.irq_unmask = irq_gc_unmask_enable_reg;
 
-		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE;
+		ct->chip.flags |= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE;
 
 		ct->regs.enable = INTC_MIR_CLEAR0 + 32 * i;
 		ct->regs.disable = INTC_MIR_SET0 + 32 * i;
diff --git a/kernel/drivers/irqchip/irq-sun4i.c b/kernel/drivers/irqchip/irq-sun4i.c
index fb78d66..4f5c42b 100644
--- a/kernel/drivers/irqchip/irq-sun4i.c
+++ b/kernel/drivers/irqchip/irq-sun4i.c
@@ -87,7 +87,7 @@
 	.irq_eoi	= sun4i_irq_ack,
 	.irq_mask	= sun4i_irq_mask,
 	.irq_unmask	= sun4i_irq_unmask,
-	.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED,
+	.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED | IRQCHIP_PIPELINE_SAFE,
 };
 
 static int sun4i_irq_map(struct irq_domain *d, unsigned int virq,
diff --git a/kernel/drivers/irqchip/irq-sunxi-nmi.c b/kernel/drivers/irqchip/irq-sunxi-nmi.c
index a412b5d..ada16f4 100644
--- a/kernel/drivers/irqchip/irq-sunxi-nmi.c
+++ b/kernel/drivers/irqchip/irq-sunxi-nmi.c
@@ -200,7 +200,9 @@
 	gc->chip_types[0].chip.irq_unmask	= irq_gc_mask_set_bit;
 	gc->chip_types[0].chip.irq_eoi		= irq_gc_ack_set_bit;
 	gc->chip_types[0].chip.irq_set_type	= sunxi_sc_nmi_set_type;
-	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED | IRQCHIP_EOI_IF_HANDLED;
+	gc->chip_types[0].chip.flags		= IRQCHIP_EOI_THREADED |
+									IRQCHIP_EOI_IF_HANDLED |
+									IRQCHIP_PIPELINE_SAFE;
 	gc->chip_types[0].regs.ack		= reg_offs->pend;
 	gc->chip_types[0].regs.mask		= reg_offs->enable;
 	gc->chip_types[0].regs.type		= reg_offs->ctrl;
diff --git a/kernel/drivers/irqchip/irq-ti-sci-inta.c b/kernel/drivers/irqchip/irq-ti-sci-inta.c
index 532d0ae..eec751b 100644
--- a/kernel/drivers/irqchip/irq-ti-sci-inta.c
+++ b/kernel/drivers/irqchip/irq-ti-sci-inta.c
@@ -262,6 +262,7 @@
 	list_add_tail(&vint_desc->list, &inta->vint_list);
 	irq_set_chained_handler_and_data(vint_desc->parent_virq,
 					 ti_sci_inta_irq_handler, vint_desc);
+	irq_switch_oob(vint_desc->parent_virq, true);
 
 	return vint_desc;
 free_vint_desc:
@@ -543,6 +544,7 @@
 	.irq_set_affinity	= ti_sci_inta_set_affinity,
 	.irq_request_resources	= ti_sci_inta_request_resources,
 	.irq_release_resources	= ti_sci_inta_release_resources,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 /**
diff --git a/kernel/drivers/memory/omap-gpmc.c b/kernel/drivers/memory/omap-gpmc.c
index f80c2ea..ebcda04 100644
--- a/kernel/drivers/memory/omap-gpmc.c
+++ b/kernel/drivers/memory/omap-gpmc.c
@@ -1405,6 +1405,7 @@
 	gpmc->irq_chip.irq_mask = gpmc_irq_mask;
 	gpmc->irq_chip.irq_unmask = gpmc_irq_unmask;
 	gpmc->irq_chip.irq_set_type = gpmc_irq_set_type;
+	gpmc->irq_chip.flags = IRQCHIP_PIPELINE_SAFE;
 
 	gpmc_irq_domain = irq_domain_add_linear(gpmc->dev->of_node,
 						gpmc->nirqs,
@@ -1415,7 +1416,7 @@
 		return -ENODEV;
 	}
 
-	rc = request_irq(gpmc->irq, gpmc_handle_irq, 0, "gpmc", gpmc);
+	rc = request_irq(gpmc->irq, gpmc_handle_irq, IRQF_OOB, "gpmc", gpmc);
 	if (rc) {
 		dev_err(gpmc->dev, "failed to request irq %d: %d\n",
 			gpmc->irq, rc);
diff --git a/kernel/drivers/mfd/tps65217.c b/kernel/drivers/mfd/tps65217.c
index 2d9c282..7987fea 100644
--- a/kernel/drivers/mfd/tps65217.c
+++ b/kernel/drivers/mfd/tps65217.c
@@ -84,6 +84,7 @@
 	.irq_bus_sync_unlock	= tps65217_irq_sync_unlock,
 	.irq_enable		= tps65217_irq_enable,
 	.irq_disable		= tps65217_irq_disable,
+	.flags			= IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct mfd_cell tps65217s[] = {
diff --git a/kernel/drivers/misc/Makefile b/kernel/drivers/misc/Makefile
index eb485c3..d2adfbd 100644
--- a/kernel/drivers/misc/Makefile
+++ b/kernel/drivers/misc/Makefile
@@ -67,3 +67,4 @@
 obj-$(CONFIG_KHADAS_MCU) += khadas-mcu.o
 obj-y				+= nkio/
 obj-y				+= nkmcu/
+obj-y				+= atemsys-main/
diff --git a/kernel/drivers/misc/atemsys-main/COPYING b/kernel/drivers/misc/atemsys-main/COPYING
new file mode 100644
index 0000000..d159169
--- /dev/null
+++ b/kernel/drivers/misc/atemsys-main/COPYING
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.
diff --git a/kernel/drivers/misc/atemsys-main/Makefile b/kernel/drivers/misc/atemsys-main/Makefile
new file mode 100644
index 0000000..28e2cc9
--- /dev/null
+++ b/kernel/drivers/misc/atemsys-main/Makefile
@@ -0,0 +1,34 @@
+# atemsys.ko: Provides usermode access to:
+# 
+#   - PCI configuration space
+#   - Device IO memory
+#   - Contiguous DMA memory
+#   - Single device interrupt
+# 
+# Copyright (c) 2009 - 2018 acontis technologies GmbH, Ravensburg, Germany <info@acontis.com>
+# All rights reserved.
+#
+# Author: K. Olbrich <k.olbrich@acontis.com>
+#
+# To compile and load the atemsys driver
+#
+# make modules 
+# [ -c /dev/atemsys ] || sudo mknod /dev/atemsys c 101 0 
+# sudo insmod atemsys.ko
+
+CONFIG_MODULE_SIG=n
+
+KERNELDIR ?= /lib/modules/$(shell uname -r)/build
+
+obj-m += atemsys.o
+
+all: modules
+
+modules:
+	$(MAKE) -C $(KERNELDIR) M=$(shell pwd) modules
+
+modules_install:
+	$(MAKE) -C $(KERNELDIR) M=$(shell pwd) modules_install
+
+clean:
+	$(MAKE) -C $(KERNELDIR) M=$(shell pwd) modules clean
diff --git a/kernel/drivers/misc/atemsys-main/atemsys.c b/kernel/drivers/misc/atemsys-main/atemsys.c
new file mode 100644
index 0000000..31dc919
--- /dev/null
+++ b/kernel/drivers/misc/atemsys-main/atemsys.c
@@ -0,0 +1,4885 @@
+/*-----------------------------------------------------------------------------
+ * atemsys.c
+ * Copyright (c) 2009 - 2020 acontis technologies GmbH, Ravensburg, Germany
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Response                  Paul Bussmann
+ * Description               Provides usermode access to:
+ *   - PCI configuration space
+ *   - Device IO memory
+ *   - Contiguous DMA memory
+ *   - Single device interrupt
+ *
+ *
+ * The driver should be used in the following way:
+ *
+ * - Make sure this driver's device node is present. I.e. call "mknod /dev/atemsys c 101 0"
+ *
+ * - open()
+ *   Open driver (There can be more then one file descriptor active in parallel).
+ *
+ * - close()
+ *   Close driver. Free resources, if any were allocated.
+ *
+ * - ioctl(ATEMSYS_IOCTL_PCI_FIND_DEVICE)
+ *   Scan for PCI Devices.
+ *   Input:  VendorID, DeviceID, InstanceNo
+ *   Output: BusNo, DevNo, FuncNo
+ *
+ * - ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE)
+ *   Configures PCI device. This ioctl pins the given PCI device to the current filedescriptor.
+ *   Input:  BusNo, DevNo, FuncNo
+ *   Output: Physical IO base address, IO area length, IRQ number
+ *   The device must be released explicitly in order to configure the next device. The ioctl gets
+ *   errno EBUSY if the device is in use by another device driver.
+ *
+ * - ioctl(ATEMSYS_IOCTL_PCI_RELEASE_DEVICE)
+ *   Release PCI device and free resources assigned to PCI device (interrupt, DMA memory, ...).
+ *
+ * - mmap(0, dwSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, fd, 0);
+ *   Allocates and maps DMA memory of size dwSize. Note that the last parameter (offset) must be 0.
+ *   Input:  Length in byte
+ *   Output: Pointer to the allocated memory and DMA physical address. On success this address is
+ *           written into the first 4 bytes of the allocated memory.
+ *
+ * - mmap(0, IOphysSize, PROT_READ | PROT_WRITE, MAP_SHARED | MAP_LOCKED, fd, IOphysAddr);
+ *   Maps IO memory of size IOphysSize.
+ *   PCI device:
+ *     First call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE). The IOphysAddr and IOphysSize
+ *     parameter must corespond with the base IO address and size returned by
+ *     ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE), or the ioctl will fail.
+ *   Non-PCI device:
+ *     Don't call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE) before and just pass
+ *     IOphysAddr and IOphysSize. There are no checks done.
+ *   Input:  Phys.IO base address, IO area length in byte
+ *   Output: Pointer to the mapped IO memory.
+ *   The user should call dev_munmap() if the requested DMA memory is not needed anymore. In any cases
+ *   the allocated / mapped memory is released / unmapped if the module is unloaded.
+ *
+ * - ioctl(ATEMSYS_IOCTL_INT_CONNECT)
+ *   Connect an ISR to the device's interrupt.
+ *   If the parameter is USE_PCI_INT, then the IRQ is taken from the selected PCI device.
+ *   So in this case you have to call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE) first, or it will fail.
+ *   Input:  IRQ-Number or USE_PCI_INT
+ *   Output: none
+ *   The device interrupt is active if this ioctl succeeds. The caller should do a read() on the file
+ *   descriptor. The read call unblocks if an interrupt is received. If the read is unblocked, the
+ *   interrupt is disabled on the (A)PIC and the caller must acknowledge the interrupt on the device
+ *   (write to mmaped IO register). If the next read() is executed, the interrupt is enabled again
+ *   on the (A)PIC. So a missing interrupt acknowledge will held the INT line active and interrupt
+ *   trashing will happen (ISR is called again, read() unblocks, ...).
+ *   Note that this ioctl will fail with errno EPERM if the interrupt line is shared.
+ *   PCI device:
+ *     The ioctl will try to use Message Signaled Interrupts (MSI) if supported
+ *     by the PCI device. By definition, interrupts are never shared with MSI and MSI are mandatory
+ *     for PCI-Express :).
+ *
+ * - ioctl(ATEMSYS_IOCTL_INT_DISCONNECT)
+ *   Disconnect from device's interrupt.
+ *
+ * - ioctl(ATEMSYS_IOCTL_INT_INFO)
+ *   Query used interrupt number.
+ *
+ * - read()
+ *   see ioctl(ATEMSYS_IOCTL_INT_CONNECT)
+ *
+ *
+ *  Changes see atemsys.h
+ *
+ *----------------------------------------------------------------------------*/
+
+#define ATEMSYS_C
+
+#include <linux/module.h>
+#include "atemsys.h"
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+
+#if !(defined NO_IRQ) && (defined __aarch64__)
+#define NO_IRQ   ((unsigned int)(-1))
+#endif
+
+#if (defined CONFIG_XENO_COBALT)
+#include <rtdm/driver.h>
+#else
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/fs.h>
+#include <linux/interrupt.h>
+#include <linux/semaphore.h>
+#include <linux/mutex.h>
+#include <linux/sched.h>
+#include <linux/smp.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,11,00))
+#include <linux/sched/signal.h>
+#endif
+#include <linux/irq.h>
+#include <linux/list.h>
+#if (defined CONFIG_OF)
+#include <linux/of_device.h>
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(4,13,0))
+#include <linux/uaccess.h>
+#else
+#include <asm/uaccess.h>
+#endif
+
+#include <asm/current.h>
+#include <linux/compat.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,10,0))
+#include <linux/dma-direct.h>
+#endif
+
+#if (defined CONFIG_DTC)
+#include <linux/of.h>
+#include <linux/of_irq.h>
+#endif /* CONFIG_DTC */
+#endif /* CONFIG_XENO_COBALT */
+
+#if ((defined CONFIG_OF) \
+       && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0) /* not tested */))
+#define INCLUDE_ATEMSYS_DT_DRIVER    1
+#include <linux/etherdevice.h>
+#include <linux/clk.h>
+#include <linux/phy.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/regulator/consumer.h>
+#include <linux/pm_runtime.h>
+#include <linux/of_mdio.h>
+#include <linux/of_device.h>
+#include <linux/of_irq.h>
+#include <linux/of_net.h>
+#include <linux/delay.h>
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <asm/param.h>
+#include <linux/of_gpio.h>
+#include <linux/reset.h>
+#endif
+#if ((defined CONFIG_PCI) \
+       && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,4,0) /* not tested */))
+#define INCLUDE_ATEMSYS_PCI_DRIVER    1
+#include <linux/aer.h>
+#endif
+
+#if !(defined HAVE_IRQ_TO_DESC) && !(defined CONFIG_HAVE_DOVETAIL) && !(defined CONFIG_IRQ_PIPELINE)
+ #if (LINUX_VERSION_CODE >= KERNEL_VERSION(3,4,1))
+  #define INCLUDE_IRQ_TO_DESC
+ #endif
+#else
+ #if HAVE_IRQ_TO_DESC
+  #define INCLUDE_IRQ_TO_DESC
+ #endif
+#endif
+
+/* legacy support */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(4,13,0))
+#define wait_queue_entry_t wait_queue_t
+#endif
+
+#ifndef VM_RESERVED
+#define VM_RESERVED (VM_DONTEXPAND | VM_DONTDUMP)
+#endif
+
+
+/* define this if IO memory should also be mapped into the kernel (for debugging only) */
+#undef DEBUG_IOREMAP
+
+MODULE_AUTHOR("acontis technologies GmbH <info@acontis.com>");
+MODULE_DESCRIPTION("Generic usermode PCI driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(ATEMSYS_VERSION_STR);
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,18))
+#error "At least kernel version 2.6.18 is needed to compile!"
+#endif
+
+static char* AllowedPciDevices = "PCI_ANY_ID";
+module_param(AllowedPciDevices, charp, 0000);
+MODULE_PARM_DESC(AllowedPciDevices, "Bind only pci devices in semicolon separated list e.g. AllowedPciDevices=\"0000:01:00.0\", empty string will turn off atemsys_pci driver.");
+
+/* Workaround for older kernels */
+/* from 'linux/kern_levels.h' */
+/* integer equivalents of KERN_<LEVEL> */
+#ifndef LOGLEVEL_ERR
+#define LOGLEVEL_ERR        3   /* error conditions */
+#endif
+#ifndef LOGLEVEL_WARNING
+#define LOGLEVEL_WARNING    4   /* warning conditions */
+#endif
+#ifndef LOGLEVEL_INFO
+#define LOGLEVEL_INFO       6   /* informational */
+#endif
+#ifndef LOGLEVEL_DEBUG
+#define LOGLEVEL_DEBUG      7   /* debug-level messages */
+#endif
+
+static int loglevel = LOGLEVEL_INFO;
+module_param(loglevel, int, 0);
+MODULE_PARM_DESC(loglevel, "Set log level default LOGLEVEL_INFO, see /include/linux/kern_levels.h");
+
+#if (defined CONFIG_XENO_COBALT)
+#define PRINTK(prio, str, ...) rtdm_printk(prio ATEMSYS_DEVICE_NAME ": " str,  ##__VA_ARGS__)
+#else
+#define PRINTK(prio, str, ...) printk(prio ATEMSYS_DEVICE_NAME ": " str,  ##__VA_ARGS__)
+#endif /* CONFIG_XENO_COBALT */
+
+#define ERR(str, ...) (LOGLEVEL_ERR <= loglevel)?     PRINTK(KERN_ERR, str, ##__VA_ARGS__)     :0
+#define WRN(str, ...) (LOGLEVEL_WARNING <= loglevel)? PRINTK(KERN_WARNING, str, ##__VA_ARGS__) :0
+#define INF(str, ...) (LOGLEVEL_INFO <= loglevel)?    PRINTK(KERN_INFO, str, ##__VA_ARGS__)    :0
+#define DBG(str, ...) (LOGLEVEL_DEBUG <= loglevel)?   PRINTK(KERN_INFO, str, ##__VA_ARGS__)    :0
+
+
+#ifndef PAGE_UP
+#define PAGE_UP(addr)   (((addr)+((PAGE_SIZE)-1))&(~((PAGE_SIZE)-1)))
+#endif
+#ifndef PAGE_DOWN
+#define PAGE_DOWN(addr) ((addr)&(~((PAGE_SIZE)-1)))
+#endif
+
+/* Comments: for kernel 2.6.18 add DMA_BIT_MASK*/
+#ifndef DMA_BIT_MASK
+#define DMA_BIT_MASK(n) (((n) == 64) ? ~0ULL : ((1ULL<<(n))-1))
+#endif
+
+#ifndef HAVE_ACCESS_OK_TYPE
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0))
+#define HAVE_ACCESS_OK_TYPE 0
+#else
+#define HAVE_ACCESS_OK_TYPE 1
+#endif
+#endif
+
+#if HAVE_ACCESS_OK_TYPE
+#define ACCESS_OK(type, addr, size)     access_ok(type, addr, size)
+#else
+#define ACCESS_OK(type, addr, size)     access_ok(addr, size)
+#endif
+
+#if ((defined CONFIG_OF) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,18,0)) && !(defined CONFIG_XENO_COBALT))
+  #define OF_DMA_CONFIGURE(dev, of_node) of_dma_configure(dev, of_node, true)
+#elif ((defined CONFIG_OF) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,1,0)) && !(defined CONFIG_XENO_COBALT))
+  #define OF_DMA_CONFIGURE(dev, of_node) of_dma_configure(dev, of_node)
+#elif ((defined CONFIG_OF) && (LINUX_VERSION_CODE >= KERNEL_VERSION(3,16,0)) && !(defined CONFIG_XENO_COBALT))
+ #define OF_DMA_CONFIGURE(dev, of_node) of_dma_configure(dev)
+#else
+ #define OF_DMA_CONFIGURE(dev, of_node)
+#endif
+
+typedef struct _ATEMSYS_T_IRQ_DESC
+{
+    u32               irq;
+    atomic_t          count;
+    atomic_t          totalCount;
+#if (defined CONFIG_XENO_COBALT)
+    rtdm_irq_t        irq_handle;
+    rtdm_event_t      irq_event;
+#else
+    atomic_t          irqStatus;
+    wait_queue_head_t q;
+#endif /* CONFIG_XENO_COBALT */
+#if (defined INCLUDE_IRQ_TO_DESC)
+    bool              irq_is_level;
+#endif
+} ATEMSYS_T_IRQ_DESC;
+
+struct _ATEMSYS_T_PCI_DRV_DESC_PRIVATE;
+struct _ATEMSYS_T_DRV_DESC_PRIVATE;
+typedef struct _ATEMSYS_T_DEVICE_DESC
+{
+    struct list_head list;
+#if (defined CONFIG_PCI)
+    struct pci_dev* pPcidev;
+  #if (defined INCLUDE_ATEMSYS_PCI_DRIVER)
+    struct _ATEMSYS_T_PCI_DRV_DESC_PRIVATE* pPciDrvDesc;
+  #endif
+#endif
+    struct platform_device* pPlatformDev;
+  #if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+    struct _ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDesc;
+  #endif
+
+    ATEMSYS_T_IRQ_DESC  irqDesc;
+
+    /* supported features */
+    bool bSupport64BitDma;
+} ATEMSYS_T_DEVICE_DESC;
+
+typedef struct _ATEMSYS_T_MMAP_DESC
+{
+   struct list_head  list;
+   ATEMSYS_T_DEVICE_DESC* pDevDesc;
+   dma_addr_t        dmaAddr;
+   void*             pVirtAddr;
+   size_t            len;
+} ATEMSYS_T_MMAP_DESC;
+
+#if (defined CONFIG_OF)
+#define ATEMSYS_DT_DRIVER_NAME "atemsys"
+/* udev auto-loading support via DTB */
+static const struct of_device_id atemsys_ids[] = {
+    { .compatible = ATEMSYS_DT_DRIVER_NAME },
+    { /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, atemsys_ids);
+#endif /* CONFIG_OF */
+
+
+#define ATEMSYS_MAX_NUMBER_DRV_INSTANCES 10
+
+#if (defined INCLUDE_ATEMSYS_PCI_DRIVER)
+typedef struct _ATEMSYS_T_PCI_DRV_DESC_PRIVATE
+{
+    struct pci_dev*             pPciDev;
+
+    int                         nPciDomain;
+    int                         nPciBus;
+    int                         nPciDev;
+    int                         nPciFun;
+
+    unsigned short              wVendorId;
+    unsigned short              wDevice;
+    unsigned short              wRevision;
+    unsigned short              wSubsystem_vendor;
+    unsigned short              wSubsystem_device;
+
+    ATEMSYS_T_PCI_MEMBAR        aBars[ATEMSYS_PCI_MAXBAR];
+    int                         nBarCnt;
+
+    ATEMSYS_T_DEVICE_DESC*      pDevDesc;
+    unsigned int                dwIndex;
+} ATEMSYS_T_PCI_DRV_DESC_PRIVATE;
+
+static ATEMSYS_T_PCI_DRV_DESC_PRIVATE*  S_apPciDrvDescPrivate[ATEMSYS_MAX_NUMBER_DRV_INSTANCES];
+#endif
+
+#if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+#define ATEMSYS_MAX_NUMBER_OF_CLOCKS 10
+
+typedef struct
+{
+    void __iomem*   pbyBase;
+    __u64           qwPhys;
+    __u32           dwSize;
+} ATEMSYS_T_IOMEM;
+
+typedef struct _ATEMSYS_T_DRV_DESC_PRIVATE
+{
+    int                         nDev_id;
+    struct net_device*          netdev;
+    struct platform_device*     pPDev;
+    struct device_node*         pDevNode;
+
+    /* storage and identification */
+    ATEMSYS_T_MAC_INFO          MacInfo;
+
+    /* powermanagement */
+    struct reset_control*       pResetCtl;
+
+    /* clocks */
+    const char*                 clk_ids[ATEMSYS_MAX_NUMBER_OF_CLOCKS];
+    struct clk*                 clks[ATEMSYS_MAX_NUMBER_OF_CLOCKS];
+    int                         nCountClk;
+
+    /* PHY */
+    ATEMSYS_T_PHY_INFO          PhyInfo;
+    phy_interface_t             PhyInterface;
+    struct device_node*         pPhyNode;
+    struct device_node*         pMdioNode;
+    struct device_node*         pMdioDevNode; /* node for own mdio bus */
+    struct phy_device*          pPhyDev;
+    struct regulator*           pPhyRegulator;
+    struct task_struct*         etx_thread_StartPhy;
+    struct task_struct*         etx_thread_StopPhy;
+
+    /* PHY reset*/
+    int                         nPhyResetGpioPin;
+    bool                        bPhyResetGpioActiveHigh;
+    int                         nPhyResetDuration;
+    int                         nPhyResetPostDelay;
+
+    /* mdio */
+    ATEMSYS_T_MDIO_ORDER        MdioOrder;
+    struct mii_bus*             pMdioBus;
+    struct mutex                mdio_order_mutex;
+    struct mutex                mdio_mutex;
+    wait_queue_head_t           mdio_wait_queue;
+    int                         mdio_wait_queue_cnt;
+
+#ifdef CONFIG_TI_K3_UDMA
+    /* Ti CPSWG Channel, Flow & Ring */
+#define ATEMSYS_UDMA_CHANNELS 10
+    void*                       apvTxChan[ATEMSYS_UDMA_CHANNELS];
+    int                         anTxIrq[ATEMSYS_UDMA_CHANNELS];
+    void*                       apvRxChan[ATEMSYS_UDMA_CHANNELS];
+    int                         anRxIrq[ATEMSYS_UDMA_CHANNELS];
+#endif /*#ifdef CONFIG_TI_K3_UDMA*/
+
+#define IOMEMLIST_LENGTH 20
+    ATEMSYS_T_IOMEM             oIoMemList[IOMEMLIST_LENGTH];
+
+    /* frame descriptor of the EcMaster connection */
+    ATEMSYS_T_DEVICE_DESC*      pDevDesc;
+
+} ATEMSYS_T_DRV_DESC_PRIVATE;
+
+static ATEMSYS_T_DRV_DESC_PRIVATE*  S_apDrvDescPrivate[ATEMSYS_MAX_NUMBER_DRV_INSTANCES];
+
+static int StartPhyThread(void* pvData);
+static int StopPhyThread(void* pvData);
+static int CleanUpEthernetDriverOnRelease(ATEMSYS_T_DEVICE_DESC* pDevDesc);
+static int GetMacInfoIoctl(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam);
+static int PhyStartStopIoctl( unsigned long ioctlParam);
+static int GetMdioOrderIoctl(unsigned long ioctlParam);
+static int ReturnMdioOrderIoctl(unsigned long ioctlParam);
+static int GetPhyInfoIoctl(unsigned long ioctlParam);
+static int PhyResetIoctl(unsigned long ioctlParam);
+static int ResetPhyViaGpio(ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate);
+static int EthernetDriverRemove(struct platform_device* pPDev);
+static int EthernetDriverProbe(struct platform_device* pPDev);
+
+#if (defined CONFIG_XENO_COBALT)
+static int StartPhy(struct platform_device* pPDev);
+static int StopPhy(struct platform_device* pPDev);
+typedef struct _ATEMSYS_T_WORKER_THREAD_DESC
+{
+    struct task_struct*     etx_thread;
+    int (* pfNextTask)(void*);
+    void*                   pNextTaskData;
+    struct mutex            WorkerTask_mutex;
+    bool                    bWorkerTaskShutdown;
+    bool                    bWorkerTaskRunning;
+} ATEMSYS_T_WORKER_THREAD_DESC;
+static ATEMSYS_T_WORKER_THREAD_DESC S_oAtemsysWorkerThreadDesc;
+
+static int AtemsysWorkerThread(void* data)
+{
+    void* pWorkerTaskData = NULL;
+    int (* pfWorkerTask)(void*);
+    pfWorkerTask = NULL;
+
+    S_oAtemsysWorkerThreadDesc.bWorkerTaskRunning = true;
+
+    for (;;)
+    {
+        mutex_lock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+        if (S_oAtemsysWorkerThreadDesc.bWorkerTaskShutdown)
+        {
+            mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+            break;
+        }
+        pfWorkerTask = S_oAtemsysWorkerThreadDesc.pfNextTask;
+        pWorkerTaskData = S_oAtemsysWorkerThreadDesc.pNextTaskData;
+        S_oAtemsysWorkerThreadDesc.pfNextTask = NULL;
+        S_oAtemsysWorkerThreadDesc.pNextTaskData = NULL;
+        mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+
+        if ((NULL != pfWorkerTask) && (NULL != pWorkerTaskData))
+        {
+            pfWorkerTask(pWorkerTaskData);
+        }
+        msleep(100);
+    }
+
+    S_oAtemsysWorkerThreadDesc.bWorkerTaskRunning = false;
+
+    return 0;
+}
+#endif /* #if (defined CONFIG_XENO_COBALT) */
+
+#endif /* INCLUDE_ATEMSYS_DT_DRIVER */
+
+
+static void dev_munmap(struct vm_area_struct* vma);
+
+#if (defined CONFIG_XENO_COBALT)
+   static int dev_interrupt_handler(rtdm_irq_t* irq_handle);
+#else
+   static irqreturn_t dev_interrupt_handler(int nIrq, void* pParam);
+#endif /* CONFIG_XENO_COBALT */
+
+static struct vm_operations_struct mmap_vmop =
+{
+   .close = dev_munmap,
+};
+
+static DEFINE_MUTEX(S_mtx);
+static ATEMSYS_T_DEVICE_DESC S_DevNode;
+static struct class* S_pDevClass;
+static struct device* S_pDev;
+static struct platform_device* S_pPlatformDev = NULL;
+
+#if !(defined CONFIG_XENO_COBALT)
+static void dev_enable_irq(ATEMSYS_T_IRQ_DESC* pIrqDesc)
+{
+    /* enable/disable level type interrupts, not edge type interrupts */
+#if (defined INCLUDE_IRQ_TO_DESC)
+    if (pIrqDesc->irq_is_level)
+#endif
+    {
+        atomic_inc(&pIrqDesc->irqStatus);
+        enable_irq(pIrqDesc->irq);
+    }
+}
+
+static void dev_disable_irq(ATEMSYS_T_IRQ_DESC* pIrqDesc)
+{
+    /* enable/disable level type interrupts, not edge type interrupts */
+#if (defined INCLUDE_IRQ_TO_DESC)
+    if (!pIrqDesc->irq_is_level) return;
+#endif
+
+    if (atomic_read(&pIrqDesc->irqStatus) > 0)
+    {
+        disable_irq_nosync(pIrqDesc->irq);
+        atomic_dec(&pIrqDesc->irqStatus);
+    }
+}
+
+static int dev_irq_disabled(ATEMSYS_T_IRQ_DESC* pIrqDesc)
+{
+    /* only level type interrupts get disabled */
+#if (defined INCLUDE_IRQ_TO_DESC)
+    if (!pIrqDesc->irq_is_level) return 0;
+#endif
+
+    if (atomic_read(&pIrqDesc->irqStatus) == 0)
+    {
+        return 1;
+    }
+    return 0;
+}
+#endif /* !CONFIG_XENO_COBALT */
+
+#if (!defined __arm__) && (!defined __aarch64__)
+static void* dev_dma_alloc(u32 dwLen, dma_addr_t* pDmaAddr)
+{
+   unsigned long virtAddr;
+   unsigned long tmpAddr;
+   u32 tmpSize;
+
+   virtAddr =  __get_free_pages(GFP_KERNEL | GFP_DMA, get_order(dwLen));
+   if (! virtAddr)
+   {
+      ERR("mmap: __get_free_pages failed\n");
+      return NULL;
+   }
+
+   tmpAddr = virtAddr;
+   tmpSize = dwLen;
+
+   while (tmpSize > 0)
+   {
+     SetPageReserved( virt_to_page(tmpAddr) );
+     tmpAddr += PAGE_SIZE;
+     tmpSize -= PAGE_SIZE;
+   }
+
+   *pDmaAddr = virt_to_phys((void*) virtAddr);
+
+   return (void*) virtAddr;
+}
+
+static void dev_dma_free(u32 dwLen, void* virtAddr)
+{
+   unsigned long tmpAddr = (unsigned long) virtAddr;
+   u32 tmpSize = dwLen;
+
+   while (tmpSize > 0)
+   {
+     ClearPageReserved( virt_to_page(tmpAddr) );
+     tmpAddr += PAGE_SIZE;
+     tmpSize -= PAGE_SIZE;
+   }
+
+   free_pages((unsigned long) virtAddr, get_order(dwLen));
+}
+#endif /* !__arm__ */
+
+static void dev_munmap(struct vm_area_struct* vma)
+{
+   ATEMSYS_T_MMAP_DESC* pMmapDesc = (ATEMSYS_T_MMAP_DESC*) vma->vm_private_data;
+
+   INF("dev_munmap: 0x%px -> 0x%px (%d)\n",
+         (void*) pMmapDesc->pVirtAddr, (void*)(unsigned long)pMmapDesc->dmaAddr, (int) pMmapDesc->len);
+    if (0 == pMmapDesc->dmaAddr) { INF("dev_munmap: 0 == pMmapDesc->dmaAddr!\n"); return; }
+    if (NULL == pMmapDesc->pVirtAddr) { INF("dev_munmap: NULL == pMmapDesc->pVirtAddr!\n"); return; }
+
+   /* free DMA memory */
+#if (defined CONFIG_PCI)
+   if (pMmapDesc->pDevDesc->pPcidev == NULL)
+#endif
+   {
+#if (defined __arm__) || (defined __aarch64__)
+      dmam_free_coherent(&pMmapDesc->pDevDesc->pPlatformDev->dev, pMmapDesc->len, pMmapDesc->pVirtAddr, pMmapDesc->dmaAddr);
+#else
+      dev_dma_free(pMmapDesc->len, pMmapDesc->pVirtAddr);
+#endif
+   }
+#if (defined CONFIG_PCI)
+   else
+   {
+#if ((defined __aarch64__) \
+    || (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) \
+    || ((defined __arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))) \
+    || ((defined __amd64__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))) )
+      dma_free_coherent(&pMmapDesc->pDevDesc->pPcidev->dev, pMmapDesc->len, pMmapDesc->pVirtAddr, pMmapDesc->dmaAddr);
+#else
+      pci_free_consistent(pMmapDesc->pDevDesc->pPcidev, pMmapDesc->len, pMmapDesc->pVirtAddr, pMmapDesc->dmaAddr);
+#endif /* __aarch64__ */
+   }
+#endif /* CONFIG_PCI */
+   kfree(pMmapDesc);
+}
+
+#if (defined CONFIG_PCI)
+/*
+ * Lookup PCI device
+ */
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
+struct pci_dev *pci_get_bus_and_slot(unsigned int bus, unsigned int devfn)
+{
+    struct pci_dev* dev = NULL;
+
+    for_each_pci_dev(dev) {
+        if (pci_domain_nr(dev->bus) == 0 &&
+            (dev->bus->number == bus && dev->devfn == devfn))
+            return dev;
+    }
+    return dev;
+}
+#endif
+
+static int dev_pci_select_device(ATEMSYS_T_DEVICE_DESC* pDevDesc, ATEMSYS_T_PCI_SELECT_DESC* pPciDesc, size_t size)
+{
+    int nRetVal = -EFAULT;
+    s32 nPciBus, nPciDev, nPciFun, nPciDomain;
+
+    switch (size)
+    {
+    case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00):
+    {
+        ATEMSYS_T_PCI_SELECT_DESC_v1_0_00 oPciDesc_v1_0_00;
+        nRetVal = copy_from_user(&oPciDesc_v1_0_00, (ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)pPciDesc, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00));
+        if (0 != nRetVal)
+        {
+            ERR("dev_pci_select_device failed: %d\n", nRetVal);
+            goto Exit;
+        }
+        nPciBus    = oPciDesc_v1_0_00.nPciBus;
+        nPciDev    = oPciDesc_v1_0_00.nPciDev;
+        nPciFun    = oPciDesc_v1_0_00.nPciFun;
+        nPciDomain = 0;
+    } break;
+    case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05):
+    {
+        ATEMSYS_T_PCI_SELECT_DESC_v1_3_05 oPciDesc_v1_3_05;
+        nRetVal = copy_from_user(&oPciDesc_v1_3_05, (ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)pPciDesc, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05));
+        if (0 != nRetVal)
+        {
+            ERR("dev_pci_select_device failed: %d\n", nRetVal);
+            goto Exit;
+        }
+        nPciBus    = oPciDesc_v1_3_05.nPciBus;
+        nPciDev    = oPciDesc_v1_3_05.nPciDev;
+        nPciFun    = oPciDesc_v1_3_05.nPciFun;
+        nPciDomain = oPciDesc_v1_3_05.nPciDomain;
+    } break;
+    case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12):
+    {
+        ATEMSYS_T_PCI_SELECT_DESC_v1_4_12 oPciDesc_v1_4_12;
+        nRetVal = copy_from_user(&oPciDesc_v1_4_12, (ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)pPciDesc, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12));
+        if (0 != nRetVal)
+        {
+            ERR("dev_pci_select_device failed: %d\n", nRetVal);
+            goto Exit;
+        }
+        nPciBus    = oPciDesc_v1_4_12.nPciBus;
+        nPciDev    = oPciDesc_v1_4_12.nPciDev;
+        nPciFun    = oPciDesc_v1_4_12.nPciFun;
+        nPciDomain = oPciDesc_v1_4_12.nPciDomain;
+    } break;
+    default:
+    {
+        nRetVal = -EFAULT;
+        ERR("pci_conf: EFAULT\n");
+        goto Exit;
+    }
+    }
+
+    /* Lookup for pci_dev object */
+    pDevDesc->pPcidev       = NULL;
+#if (defined INCLUDE_ATEMSYS_PCI_DRIVER)
+    pDevDesc->pPciDrvDesc   = NULL;
+    {
+        unsigned int i = 0;
+
+        for (i = 0; i < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; i++)
+        {
+            ATEMSYS_T_PCI_DRV_DESC_PRIVATE* pDrvInstance = S_apPciDrvDescPrivate[i];
+            if (   (pDrvInstance                != NULL)
+                && (pDrvInstance->nPciDomain    == nPciDomain)
+                && (pDrvInstance->nPciBus       == nPciBus)
+                && (pDrvInstance->nPciDev       == nPciDev)
+                && (pDrvInstance->nPciFun       == nPciFun))
+            {
+                if (pDrvInstance->pDevDesc != NULL)
+                {
+                    ERR("dev_pci_select_device: device \"%s\" in use by another instance?\n", pci_name(pDrvInstance->pPciDev));
+                    nRetVal = -EBUSY;
+                    goto Exit;
+                }
+                pDevDesc->pPcidev        = pDrvInstance->pPciDev;
+                pDevDesc->pPciDrvDesc    = pDrvInstance;
+                pDrvInstance->pDevDesc   = pDevDesc;
+                INF("pci_select: from pci driver %04x:%02x:%02x.%x\n", (u32)nPciDomain, (u32)nPciBus, (u32)nPciDev, (u32)nPciFun);
+                break;
+            }
+        }
+    }
+    if (pDevDesc->pPcidev == NULL)
+#endif
+    {
+        pDevDesc->pPcidev = pci_get_domain_bus_and_slot(nPciDomain, nPciBus, PCI_DEVFN(nPciDev, nPciFun));
+        INF("pci_select: %04x:%02x:%02x.%x\n", (u32)nPciDomain, (u32)nPciBus, (u32)nPciDev, (u32)nPciFun);
+    }
+    if (pDevDesc->pPcidev == NULL)
+    {
+        WRN("pci_select: PCI-Device  %04x:%02x:%02x.%x not found\n",
+            (unsigned) nPciDomain, (unsigned) nPciBus, (unsigned) nPciDev, (unsigned) nPciFun);
+        goto Exit;
+    }
+
+    nRetVal = DRIVER_SUCCESS;
+
+Exit:
+    return nRetVal;
+}
+
+static int DefaultPciSettings(struct pci_dev* pPciDev)
+{
+    int nRetVal = -EIO;
+    int nRes = -EIO;
+
+    /* Turn on Memory-Write-Invalidate if it is supported by the device*/
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
+    pci_set_mwi(pPciDev);
+#else
+    pci_try_set_mwi(pPciDev);
+#endif
+
+    /* remove wrong dma_coherent bit on ARM systems */
+#if ((defined __aarch64__) || (defined __arm__))
+ #if (LINUX_VERSION_CODE < KERNEL_VERSION(4,15,0))
+  #if (defined CONFIG_PHYS_ADDR_T_64BIT)
+    if (is_device_dma_coherent(&pPciDev->dev))
+    {
+        pPciDev->dev.archdata.dma_coherent = false;
+        INF("%s: DefaultPciSettings: Clear device.archdata dma_coherent bit!\n", pci_name(pPciDev));
+    }
+  #endif
+ #else
+  #if ((defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL)))
+    if (0 != pPciDev->dev.dma_coherent)
+    {
+        pPciDev->dev.dma_coherent = 0;
+        INF("%s: DefaultPciSettings: Clear device dma_coherent bit!\n", pci_name(pPciDev));
+    }
+  #endif
+ #endif
+#endif
+
+#if ((LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0)) || !(defined __aarch64__))
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,55))
+    nRes = dma_set_coherent_mask(&pPciDev->dev, DMA_BIT_MASK(32));
+#else
+    nRes = dma_set_mask_and_coherent(&pPciDev->dev, DMA_BIT_MASK(32));
+#endif
+    if (nRes)
+#endif
+    {
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(3,12,55))
+        nRes = dma_set_coherent_mask(&pPciDev->dev, DMA_BIT_MASK(64));
+#else
+        nRes = dma_set_mask_and_coherent(&pPciDev->dev, DMA_BIT_MASK(64));
+#endif
+        if (nRes)
+        {
+            ERR("%s: DefaultPciSettings: dma_set_mask_and_coherent failed\n", pci_name(pPciDev));
+            nRetVal = nRes;
+            goto Exit;
+        }
+    }
+    pci_set_master(pPciDev);
+
+    /* Try to enable MSI (Message Signaled Interrupts). MSI's are non shared, so we can
+    * use interrupt mode, also if we have a non exclusive interrupt line with legacy
+    * interrupts.
+    */
+    if (pci_enable_msi(pPciDev))
+    {
+        INF("%s: DefaultPciSettings: legacy INT configured\n", pci_name(pPciDev));
+    }
+    else
+    {
+        INF("%s: DefaultPciSettings: MSI configured\n", pci_name(pPciDev));
+    }
+
+    nRetVal = 0;
+
+Exit:
+   return nRetVal;
+}
+
+/*
+ * See also kernel/Documentation/PCI/pci.txt for the recommended PCI initialization sequence
+ */
+static int ioctl_pci_configure_device(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam, size_t size)
+{
+    int nRetVal = -EIO;
+    int nRc;
+    int i;
+    unsigned long ioBase;
+    s32 nBar = 0;
+    u32 dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,0,0);
+    ATEMSYS_T_PCI_SELECT_DESC_v1_4_12 oPciDesc;
+    memset(&oPciDesc, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12));
+    switch (size)
+    {
+    case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00):
+    {
+        dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,0,0);
+    } break;
+    case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05):
+    {
+        dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,3,5);
+    } break;
+    case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12):
+    {
+        dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,4,12);
+    } break;
+    default:
+    {
+        nRetVal = -EIO;
+        ERR("pci_conf: Invalid parameter\n");
+        goto Exit;
+    }
+    }
+
+    if (pDevDesc->pPcidev != NULL)
+    {
+        WRN("pci_conf: error call ioctl(ATEMSYS_IOCTL_PCI_RELEASE_DEVICE) first\n");
+        goto Exit;
+    }
+    if (dev_pci_select_device(pDevDesc, (ATEMSYS_T_PCI_SELECT_DESC*)ioctlParam, size) != DRIVER_SUCCESS)
+    {
+        goto Exit;
+    }
+
+#if (defined INCLUDE_ATEMSYS_PCI_DRIVER)
+    if (NULL != pDevDesc->pPciDrvDesc)
+    {
+        for (i = 0; i < pDevDesc->pPciDrvDesc->nBarCnt ; i++)
+        {
+            if ((EC_ATEMSYSVERSION(1,4,12) != dwAtemsysApiVersion) && (pDevDesc->pPciDrvDesc->aBars[i].qwIOMem > 0xFFFFFFFF))
+            {
+                ERR("pci_conf: 64-Bit IO address not supported\n");
+                INF("pci_conf: Update LinkLayer for 64-Bit IO address support!\n");
+                nRetVal = -ENODEV;
+                goto Exit;
+            }
+
+            oPciDesc.aBar[i].qwIOMem = pDevDesc->pPciDrvDesc->aBars[i].qwIOMem;
+            oPciDesc.aBar[i].dwIOLen = pDevDesc->pPciDrvDesc->aBars[i].dwIOLen;
+
+        }
+
+        oPciDesc.nBarCnt = pDevDesc->pPciDrvDesc->nBarCnt;
+        oPciDesc.dwIrq   = (u32)pDevDesc->pPcidev->irq;
+    }
+    else
+#endif
+    {
+        /* enable device */
+        nRc = pci_enable_device(pDevDesc->pPcidev);
+        if (nRc < 0)
+        {
+            ERR("pci_conf: pci_enable_device failed\n");
+            pDevDesc->pPcidev = NULL;
+            goto Exit;
+        }
+
+        /* Check if IO-memory is in use by another driver */
+        nRc = pci_request_regions(pDevDesc->pPcidev, ATEMSYS_DEVICE_NAME);
+        if (nRc < 0)
+        {
+            ERR("pci_conf: device \"%s\" in use by another driver?\n", pci_name(pDevDesc->pPcidev));
+            pDevDesc->pPcidev = NULL;
+            nRetVal = -EBUSY;
+            goto Exit;
+        }
+
+        /* find the memory BAR */
+        for (i = 0; i < ATEMSYS_PCI_MAXBAR ; i++)
+        {
+            if (pci_resource_flags(pDevDesc->pPcidev, i) & IORESOURCE_MEM)
+            {
+                /* IO area address */
+                ioBase = pci_resource_start(pDevDesc->pPcidev, i);
+
+                if ((EC_ATEMSYSVERSION(1,4,12) != dwAtemsysApiVersion) && (ioBase > 0xFFFFFFFF))
+                {
+                    ERR("pci_conf: 64-Bit IO address not supported\n");
+                    pci_release_regions(pDevDesc->pPcidev);
+                    pDevDesc->pPcidev = NULL;
+                    nRetVal = -ENODEV;
+                    goto Exit;
+                }
+
+                /* IO area length */
+                oPciDesc.aBar[nBar].dwIOLen = pci_resource_len(pDevDesc->pPcidev, i);
+                oPciDesc.aBar[nBar].qwIOMem = ioBase;
+
+                nBar++;
+            }
+        }
+
+        nRc = DefaultPciSettings(pDevDesc->pPcidev);
+        if (nRc)
+        {
+            pci_release_regions(pDevDesc->pPcidev);
+            pDevDesc->pPcidev = NULL;
+            goto Exit;
+        }
+
+        /* number of memory BARs */
+        /* assigned IRQ */
+        oPciDesc.nBarCnt = nBar;
+        oPciDesc.dwIrq   = pDevDesc->pPcidev->irq;
+    }
+
+#if defined(__arm__) && 0
+   /*
+    * This is required for TI's TMDXEVM8168 (Cortex A8) eval board
+    * \sa TI "DM81xx AM38xx PCI Express Root Complex Driver User Guide"
+    * "DM81xx RC supports maximum remote read request size (MRRQS) as 256 bytes"
+    */
+   pcie_set_readrq(pDevDesc->pPcidev, 256);
+#endif
+
+    switch (dwAtemsysApiVersion)
+    {
+    case EC_ATEMSYSVERSION(1,0,0):
+    {
+        ATEMSYS_T_PCI_SELECT_DESC_v1_0_00 oPciDesc_v1_0_00;
+        memset(&oPciDesc_v1_0_00, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00));
+        if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)))
+        {
+            nRetVal = -EFAULT;
+            ERR("pci_conf: EFAULT\n");
+            goto Exit;
+        }
+        oPciDesc_v1_0_00.nBarCnt = oPciDesc.nBarCnt;
+        oPciDesc_v1_0_00.dwIrq   = oPciDesc.dwIrq;
+        for (i = 0; i < oPciDesc_v1_0_00.nBarCnt ; i++)
+        {
+            oPciDesc_v1_0_00.aBar[i].dwIOLen = oPciDesc.aBar[i].dwIOLen;
+            oPciDesc_v1_0_00.aBar[i].dwIOMem = (u32)oPciDesc.aBar[i].qwIOMem;
+        }
+        nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, &oPciDesc_v1_0_00, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00));
+        if (0 != nRetVal)
+        {
+            ERR("ioctl_pci_configure_device failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    case EC_ATEMSYSVERSION(1,3,5):
+    {
+        ATEMSYS_T_PCI_SELECT_DESC_v1_3_05 oPciDesc_v1_3_05;
+        memset(&oPciDesc_v1_3_05, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05));
+        if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)))
+        {
+            nRetVal = -EFAULT;
+            ERR("pci_conf: EFAULT\n");
+            goto Exit;
+        }
+        oPciDesc_v1_3_05.nBarCnt = oPciDesc.nBarCnt;
+        oPciDesc_v1_3_05.dwIrq   = oPciDesc.dwIrq;
+        for (i = 0; i < oPciDesc_v1_3_05.nBarCnt ; i++)
+        {
+            oPciDesc_v1_3_05.aBar[i].dwIOLen = oPciDesc.aBar[i].dwIOLen;
+            oPciDesc_v1_3_05.aBar[i].dwIOMem = (u32)oPciDesc.aBar[i].qwIOMem;
+        }
+        nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, &oPciDesc_v1_3_05, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05));
+        if (0 != nRetVal)
+        {
+            ERR("ioctl_pci_configure_device failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    case EC_ATEMSYSVERSION(1,4,12):
+    {
+        if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)))
+        {
+            nRetVal = -EFAULT;
+            ERR("pci_conf: EFAULT\n");
+            goto Exit;
+        }
+        nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, &oPciDesc, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12));
+        if (0 != nRetVal)
+        {
+            ERR("ioctl_pci_configure_device failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    default:
+    {
+        nRetVal = -EFAULT;
+        goto Exit;
+    }
+    }
+
+   nRetVal = 0;
+
+Exit:
+   return nRetVal;
+}
+
+static int ioctl_pci_finddevice(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam, size_t size)
+{
+    int nRetVal = -EIO;
+    struct pci_dev* pPciDev = NULL;
+    s32 nVendor, nDevice, nInstance, nInstanceId;
+    u32 dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,0,0);
+    ATEMSYS_T_PCI_SELECT_DESC_v1_0_00 oPciDesc_v1_0_00;
+    ATEMSYS_T_PCI_SELECT_DESC_v1_3_05 oPciDesc_v1_3_05;
+    ATEMSYS_T_PCI_SELECT_DESC_v1_4_12 oPciDesc_v1_4_12;
+
+
+
+
+    switch (size)
+    {
+    case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00):
+    {
+        dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,0,0);
+        memset(&oPciDesc_v1_0_00, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00));
+        if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)))
+        {
+            nRetVal = -EFAULT;
+        }
+        nRetVal = copy_from_user(&oPciDesc_v1_0_00, (ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00));
+        if (0 != nRetVal)
+        {
+            ERR("ioctl_pci_finddevice failed: %d\n", nRetVal);
+            goto Exit;
+        }
+        nVendor   = oPciDesc_v1_0_00.nVendID;
+        nDevice   = oPciDesc_v1_0_00.nDevID;
+        nInstance = oPciDesc_v1_0_00.nInstance;
+    } break;
+    case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05):
+    {
+        dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,3,5);
+        memset(&oPciDesc_v1_3_05, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05));
+        if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)))
+        {
+            nRetVal = -EFAULT;
+        }
+        nRetVal = copy_from_user(&oPciDesc_v1_3_05, (ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05));
+        if (0 != nRetVal)
+        {
+            ERR("ioctl_pci_finddevice failed: %d\n", nRetVal);
+            goto Exit;
+        }
+        nVendor   = oPciDesc_v1_3_05.nVendID;
+        nDevice   = oPciDesc_v1_3_05.nDevID;
+        nInstance = oPciDesc_v1_3_05.nInstance;
+    } break;
+    case sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12):
+    {
+        dwAtemsysApiVersion = EC_ATEMSYSVERSION(1,4,12);
+        memset(&oPciDesc_v1_4_12, 0, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12));
+        if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)))
+        {
+            nRetVal = -EFAULT;
+        }
+        nRetVal = copy_from_user(&oPciDesc_v1_4_12, (ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12));
+        if (0 != nRetVal)
+        {
+            ERR("ioctl_pci_finddevice failed: %d\n", nRetVal);
+            goto Exit;
+        }
+        nVendor   = oPciDesc_v1_4_12.nVendID;
+        nDevice   = oPciDesc_v1_4_12.nDevID;
+        nInstance = oPciDesc_v1_4_12.nInstance;
+    } break;
+    default:
+    {
+        nRetVal = -EIO;
+        ERR("pci_conf: Invalid parameter\n");
+        goto Exit;
+    }
+    }
+
+    if (-EFAULT == nRetVal)
+    {
+        ERR("pci_find: EFAULT\n");
+        nRetVal = -EFAULT;
+        goto Exit;
+    }
+
+    INF("pci_find: ven 0x%x dev 0x%x nInstance %d\n", nVendor, nDevice, nInstance);
+
+    for (nInstanceId = 0; nInstanceId <= nInstance; nInstanceId++ )
+    {
+        pPciDev = pci_get_device (nVendor, nDevice, pPciDev);
+    }
+
+    if (pPciDev == NULL)
+    {
+        WRN("pci_find: device 0x%x:0x%x:%d not found\n", nVendor, nDevice, nInstance);
+        nRetVal = -ENODEV;
+        goto Exit;
+    }
+
+    INF("pci_find: found 0x%x:0x%x:%d -> %s\n",
+       nVendor, nDevice, nInstance, pci_name(pPciDev));
+
+    switch (dwAtemsysApiVersion)
+    {
+    case EC_ATEMSYSVERSION(1,0,0):
+    {
+        oPciDesc_v1_0_00.nPciBus = (s32)pPciDev->bus->number;
+        oPciDesc_v1_0_00.nPciDev = (s32)PCI_SLOT(pPciDev->devfn);
+        oPciDesc_v1_0_00.nPciFun = (s32)PCI_FUNC(pPciDev->devfn);
+
+        nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_0_00*)ioctlParam, &oPciDesc_v1_0_00, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_0_00));
+        if (0 != nRetVal)
+        {
+            ERR("ioctl_pci_finddevice failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    case EC_ATEMSYSVERSION(1,3,5):
+    {
+        oPciDesc_v1_3_05.nPciDomain = (s32)pci_domain_nr(pPciDev->bus);
+        oPciDesc_v1_3_05.nPciBus    = (s32)pPciDev->bus->number;
+        oPciDesc_v1_3_05.nPciDev    = (s32)PCI_SLOT(pPciDev->devfn);
+        oPciDesc_v1_3_05.nPciFun    = (s32)PCI_FUNC(pPciDev->devfn);
+
+        nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_3_05*)ioctlParam, &oPciDesc_v1_3_05, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_3_05));
+        if (0 != nRetVal)
+        {
+            ERR("ioctl_pci_finddevice failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    case EC_ATEMSYSVERSION(1,4,12):
+    {
+        oPciDesc_v1_4_12.nPciDomain = (s32)pci_domain_nr(pPciDev->bus);
+        oPciDesc_v1_4_12.nPciBus    = (s32)pPciDev->bus->number;
+        oPciDesc_v1_4_12.nPciDev    = (s32)PCI_SLOT(pPciDev->devfn);
+        oPciDesc_v1_4_12.nPciFun    = (s32)PCI_FUNC(pPciDev->devfn);
+
+        nRetVal = copy_to_user((ATEMSYS_T_PCI_SELECT_DESC_v1_4_12*)ioctlParam, &oPciDesc_v1_4_12, sizeof(ATEMSYS_T_PCI_SELECT_DESC_v1_4_12));
+        if (0 != nRetVal)
+        {
+            ERR("ioctl_pci_finddevice failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    }
+
+    nRetVal = 0;
+
+Exit:
+    return nRetVal;
+}
+#endif /* CONFIG_PCI */
+
+#if (defined CONFIG_DTC)
+/*
+ * Lookup Nth (0: first) compatible device tree node with "interrupts" property present.
+ */
+static struct device_node * atemsys_of_lookup_intnode(const char* compatible, int deviceIdx)
+{
+   struct device_node* device = NULL;
+   struct device_node* child = NULL;
+   struct device_node* tmp = NULL;
+   int devCnt;
+
+   /* Lookup Nth device tree node */
+   devCnt = 0;
+   for_each_compatible_node(tmp, NULL, compatible)
+   {
+      if (devCnt == deviceIdx)
+      {
+         device = tmp;
+         break;
+      }
+      ++devCnt;
+   }
+
+   if (device == NULL) return NULL;
+
+   if (of_get_property(device, "interrupts", NULL)) return device;
+
+   /* i.e. vETSEC has 2 groups. Search them */
+   for_each_child_of_node(device, child)
+   {
+      if (of_get_property(child, "interrupts", NULL)) return child;
+   }
+
+   return NULL;
+}
+
+/*
+ * Map interrupt number taken from the OF Device Tree (\sa .dts file) into
+ * virtual interrupt number which can be passed to request_irq().
+ * The usual (device driver) way is to use the irq_of_parse_and_map() function.
+ *
+ * We search all device tree nodes which have the "compatible" property
+ * equal to compatible. Search until the Nth device is found. Then
+ * map the Nth interrupt (given by intIdx) with irq_of_parse_and_map().
+ */
+static unsigned atemsys_of_map_irq_to_virq(const char* compatible, int deviceIdx, int intIdx)
+{
+   unsigned virq;
+   struct device_node* device = NULL;
+
+   /* Lookup Nth device */
+   device = atemsys_of_lookup_intnode(compatible, deviceIdx);
+   if (! device)
+   {
+      INF("atemsys_of_map_irq_to_virq: device tree node '%s':%d not found.\n",
+         compatible, deviceIdx);
+      return NO_IRQ;
+   }
+
+   virq = irq_of_parse_and_map(device, intIdx);
+   if (virq == NO_IRQ)
+   {
+      ERR("atemsys_of_map_irq_to_virq: irq_of_parse_and_map failed for"
+          " device tree node '%s':%d, IntIdx %d.\n",
+         compatible, deviceIdx, intIdx);
+   }
+
+   return virq;
+}
+#if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+static unsigned int atemsysDtDriver_of_map_irq_to_virq(ATEMSYS_T_DEVICE_DESC* pDevDesc, int nIdx)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    struct device_node*         device          = NULL;
+    unsigned int                irq;
+    unsigned int                i               = 0;
+
+    /* get node from atemsys platform driver list */
+    for (i = 0; i < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; i++)
+    {
+
+        pDrvDescPrivate = S_apDrvDescPrivate[i];
+        if (NULL == pDrvDescPrivate)
+        {
+            continue;
+        }
+
+        if (pDrvDescPrivate->pDevDesc == pDevDesc)
+        {
+            device = pDrvDescPrivate->pDevNode;
+            break;
+        }
+    }
+    if ((NULL == device) || (NULL == pDrvDescPrivate))
+    {
+        INF("atemsysDtDriver_of_map_irq_to_virq: Cannot find connected device tree node\n");
+        return NO_IRQ;
+    }
+
+    /* get interrupt from node */
+    irq = irq_of_parse_and_map(device, nIdx);
+    if (NO_IRQ == irq)
+    {
+        ERR("atemsysDtDriver_of_map_irq_to_virq: irq_of_parse_and_map failed for"
+            " device tree node Interrupt index %d\n",
+            nIdx);
+    }
+
+    return irq;
+}
+#endif /* INCLUDE_ATEMSYS_DT_DRIVER) */
+#endif /* CONFIG_DTC */
+
+#if (defined INCLUDE_IRQ_TO_DESC)
+static bool atemsys_irq_is_level(unsigned int irq_id)
+{
+     bool irq_is_level = true;
+     struct irq_data* irq_data = NULL;
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,1))
+    {
+        irq_data = irq_get_irq_data(irq_id);
+    }
+#else
+    {
+        struct irq_desc* desc;
+        desc = irq_to_desc(irq_id);
+        if (desc)
+        {
+            irq_data = &desc->irq_data;
+        }
+    }
+#endif
+    if (irq_data)
+    {
+        irq_is_level = irqd_is_level_type(irq_data);
+    }
+
+    return irq_is_level;
+}
+#endif /* INCLUDE_IRQ_TO_DESC */
+
+static int ioctl_int_connect(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam)
+{
+    int nRetVal = -EIO;
+    int nRc;
+    ATEMSYS_T_IRQ_DESC* pIrqDesc = NULL;
+    unsigned int irq = 0;
+
+#if (defined CONFIG_PCI)
+    if (ioctlParam == ATEMSYS_USE_PCI_INT)
+    {
+        /* Use IRQ number from selected PCI device */
+
+        if (pDevDesc->pPcidev == NULL)
+        {
+            WRN("intcon: error call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE) first\n");
+            goto Exit;
+        }
+
+        irq = pDevDesc->pPcidev->irq;
+        INF("intcon: Use IRQ (%d) from PCI config\n", irq);
+    }
+    else
+#endif /* CONFIG_PCI */
+    {
+#if (defined CONFIG_DTC)
+        /* The ioctlParam is the Nth compatible device in the OF device tree (0: first, 1: second, ...)
+         * TODO "compatible string" and "interrupt index" should be provided by usermode as IOCTL param
+         */
+        if ( /* Use interrupt number at idx 1 (Rx-Interrupt) for TSEC / eTSEC */
+             ((irq = atemsys_of_map_irq_to_virq("fsl,etsec2", ioctlParam, 1)) == NO_IRQ) /* PPC, eTSEC */
+          && ((irq = atemsys_of_map_irq_to_virq("gianfar", ioctlParam, 1)) == NO_IRQ) /* PPC, eTSEC */
+          /* PRU-ICSS for am572x, am335x */
+          && ((irq = atemsys_of_map_irq_to_virq("acontis,device", 0, ioctlParam)) == NO_IRQ)
+          /* Use interrupt number at idx 0 (Catch-All-Interrupt) for GEM */
+          && ((irq = atemsys_of_map_irq_to_virq("xlnx,ps7-ethernet-1.00.a", ioctlParam, 0)) == NO_IRQ) /* ARM, Xilinx Zynq */
+           )
+        {
+#if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+            /* Get Interrupt from binded device tree node */
+            if ((irq = atemsysDtDriver_of_map_irq_to_virq(pDevDesc, ioctlParam)) == NO_IRQ)
+#endif
+            {
+                nRetVal = -EPERM;
+                goto Exit;
+            }
+        }
+
+#else
+        /* Use IRQ number passed as ioctl argument */
+        irq = ioctlParam;
+        INF("intcon: Use IRQ (%d) passed by user\n", irq);
+#endif
+    }
+
+    pIrqDesc = &pDevDesc->irqDesc;
+    if (pIrqDesc->irq)
+    {
+        WRN("intcon: error IRQ %u already connected. Call ioctl(ATEMSYS_IOCTL_INT_DISCONNECT) first\n",
+            (unsigned) pIrqDesc->irq);
+        goto Exit;
+    }
+
+    /* Setup some data which is needed during Interrupt handling */
+    memset(pIrqDesc, 0, sizeof(ATEMSYS_T_IRQ_DESC));
+    atomic_set(&pIrqDesc->count, 0);
+    atomic_set(&pIrqDesc->totalCount, 0);
+
+#if (defined CONFIG_XENO_COBALT)
+    rtdm_event_init(&pIrqDesc->irq_event, 0);
+    nRc = rtdm_irq_request(&pIrqDesc->irq_handle, irq, dev_interrupt_handler, 0, ATEMSYS_DEVICE_NAME, pDevDesc);
+    if (nRc)
+    {
+        ERR("ioctl_int_connect: rtdm_irq_request() for IRQ %d returned error: %d\n", irq, nRc);
+        nRetVal = nRc;
+        goto Exit;
+    }
+    nRc = rtdm_irq_enable(&pIrqDesc->irq_handle);
+    if (nRc)
+    {
+        ERR("ioctl_int_connect: rtdm_irq_enable() for IRQ %d returned error: %d\n", irq, nRc);
+        nRetVal = nRc;
+        goto Exit;
+    }
+#else
+    init_waitqueue_head(&pIrqDesc->q);
+    atomic_set(&pIrqDesc->irqStatus, 1); /* IRQ enabled */
+
+    /* Setup non shared IRQ */
+    nRc = request_irq(irq, dev_interrupt_handler, 0, ATEMSYS_DEVICE_NAME, pDevDesc);
+    if (nRc)
+    {
+        ERR("ioctl_int_connect: request_irq (IRQ %d) failed. Err %d\n", irq, nRc);
+        nRetVal = -EPERM;
+        goto Exit;
+    }
+#endif /* CONFIG_XENO_COBALT */
+
+    pIrqDesc->irq = irq;
+#if (defined INCLUDE_IRQ_TO_DESC)
+    pIrqDesc->irq_is_level = atemsys_irq_is_level(irq);
+#endif
+
+#if (defined INCLUDE_IRQ_TO_DESC)
+    INF("intcon: IRQ %d connected, irq_is_level = %d\n", irq, pIrqDesc->irq_is_level);
+#else
+    INF("intcon: IRQ %d connected\n", irq);
+#endif
+
+    nRetVal = 0;
+Exit:
+    return nRetVal;
+}
+
+static int ioctl_intinfo(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam)
+{
+   int nRetVal = -EIO;
+#if (defined CONFIG_XENO_COBALT)
+   ATEMSYS_T_INT_INFO* pIntInfo = (ATEMSYS_T_INT_INFO*) ioctlParam;
+   struct rtdm_fd* fd = rtdm_private_to_fd(pDevDesc);
+   if (rtdm_fd_is_user(fd))
+   {
+      nRetVal = rtdm_safe_copy_to_user(fd, &pIntInfo->dwInterrupt, &pDevDesc->irqDesc.irq, sizeof(__u32));
+      if (nRetVal)
+      {
+         ERR("ioctl_intinfo failed: %d\n", nRetVal);
+         goto Exit;
+      }
+   }
+#else
+   ATEMSYS_T_INT_INFO oIntInfo;
+   memset(&oIntInfo, 0, sizeof(ATEMSYS_T_INT_INFO));
+   if (!ACCESS_OK(VERIFY_WRITE, (ATEMSYS_T_INT_INFO*)ioctlParam, sizeof(ATEMSYS_T_INT_INFO)))
+   {
+      ERR("ioctl_intinfo: EFAULT\n");
+      nRetVal = -EFAULT;
+      goto Exit;
+   }
+   oIntInfo.dwInterrupt = pDevDesc->irqDesc.irq;
+   nRetVal = copy_to_user((ATEMSYS_T_INT_INFO*)ioctlParam, &oIntInfo, sizeof(ATEMSYS_T_INT_INFO));
+   if (0 != nRetVal)
+   {
+      ERR("ioctl_intinfo failed: %d\n", nRetVal);
+      goto Exit;
+   }
+#endif /* CONFIG_XENO_COBALT */
+
+Exit:
+    return nRetVal;
+}
+
+
+static int dev_int_disconnect(ATEMSYS_T_DEVICE_DESC* pDevDesc)
+{
+   int nRetVal = -EIO;
+   int nCnt;
+   ATEMSYS_T_IRQ_DESC* pIrqDesc = &(pDevDesc->irqDesc);
+
+#if (defined CONFIG_XENO_COBALT)
+      int nRc;
+      if (pIrqDesc->irq)
+      {
+         nRc = rtdm_irq_disable(&pIrqDesc->irq_handle);
+         if (nRc)
+         {
+            ERR("dev_int_disconnect: rtdm_irq_disable() for IRQ %d returned error: %d\n", (u32) pIrqDesc->irq, nRc);
+            nRetVal = nRc;
+            goto Exit;
+         }
+
+         nRc = rtdm_irq_free(&pIrqDesc->irq_handle);
+         if (nRc)
+         {
+            ERR("dev_int_disconnect: rtdm_irq_free() for IRQ %d returned error: %d\n", (u32) pIrqDesc->irq, nRc);
+            nRetVal = nRc;
+            goto Exit;
+         }
+
+         nCnt = atomic_read(&pIrqDesc->totalCount);
+         INF("pci_intdcon: IRQ %u disconnected. %d interrupts rcvd\n", (u32) pIrqDesc->irq, nCnt);
+
+         pIrqDesc->irq = 0;
+         rtdm_event_signal(&pIrqDesc->irq_event);
+      }
+#else
+      if (pIrqDesc->irq)
+      {
+         /* Disable INT line. We can call this, because we only allow exclusive interrupts */
+         disable_irq_nosync(pIrqDesc->irq);
+
+         /* Unregister INT routine.This will block until all pending interrupts are handled */
+         free_irq(pIrqDesc->irq, pDevDesc);
+
+         nCnt = atomic_read(&pIrqDesc->totalCount);
+         INF("pci_intdcon: IRQ %u disconnected. %d interrupts rcvd\n", (u32) pIrqDesc->irq, nCnt);
+
+         pIrqDesc->irq = 0;
+
+         /* Wakeup sleeping threads -> read() */
+         wake_up(&pIrqDesc->q);
+      }
+#endif /* CONFIG_XENO_COBALT */
+   nRetVal = 0;
+
+#if (defined CONFIG_XENO_COBALT)
+Exit:
+#endif
+   return nRetVal;
+}
+
+#if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+#ifdef CONFIG_TI_K3_UDMA
+
+#if (LINUX_VERSION_CODE > KERNEL_VERSION(5,10,0))
+ #define CPSWG_STRUCT_VERSION_2 1
+#endif
+
+#include <linux/soc/ti/k3-ringacc.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+#include <linux/soc/ti/ti_sci_protocol.h>
+
+/* from */
+struct k3_ring_state {
+    u32 free;
+    u32 occ;
+    u32 windex;
+    u32 rindex;
+#ifdef CPSWG_STRUCT_VERSION_2
+    u32 tdown_complete:1;
+#endif
+};
+
+struct k3_ring {
+    struct k3_ring_rt_regs __iomem *rt;
+    struct k3_ring_fifo_regs __iomem *fifos;
+    struct k3_ringacc_proxy_target_regs  __iomem *proxy;
+    dma_addr_t  ring_mem_dma;
+    void        *ring_mem_virt;
+    struct k3_ring_ops *ops;
+    u32     size;
+    enum k3_ring_size elm_size;
+    enum k3_ring_mode mode;
+    u32     flags;
+#define K3_RING_FLAG_BUSY   BIT(1)
+#define K3_RING_FLAG_SHARED BIT(2)
+#ifdef CPSWG_STRUCT_VERSION_2
+ #define K3_RING_FLAG_REVERSE BIT(3)
+#endif
+    struct k3_ring_state state;
+    u32     ring_id;
+    struct k3_ringacc   *parent;
+    u32     use_count;
+    int     proxy_id;
+#ifdef CPSWG_STRUCT_VERSION_2
+    struct device   *dma_dev;
+    u32     asel;
+#define K3_ADDRESS_ASEL_SHIFT   48
+#endif
+};
+
+struct k3_udma_glue_common {
+    struct device *dev;
+#ifdef CPSWG_STRUCT_VERSION_2
+    struct device chan_dev;
+#endif
+    struct udma_dev *udmax;
+    const struct udma_tisci_rm *tisci_rm;
+    struct k3_ringacc *ringacc;
+    u32 src_thread;
+    u32 dst_thread;
+
+    u32  hdesc_size;
+    bool epib;
+    u32  psdata_size;
+    u32  swdata_size;
+    u32  atype;
+#ifdef CPSWG_STRUCT_VERSION_2
+    struct psil_endpoint_config *ep_config;
+#endif
+};
+
+struct k3_udma_glue_tx_channel {
+    struct k3_udma_glue_common common;
+
+    struct udma_tchan *udma_tchanx;
+    int udma_tchan_id;
+
+    struct k3_ring *ringtx;
+    struct k3_ring *ringtxcq;
+
+    bool psil_paired;
+
+    int virq;
+
+    atomic_t free_pkts;
+    bool tx_pause_on_err;
+    bool tx_filt_einfo;
+    bool tx_filt_pswords;
+    bool tx_supr_tdpkt;
+#ifdef CPSWG_STRUCT_VERSION_2
+    int udma_tflow_id;
+#endif
+};
+
+
+
+struct k3_udma_glue_rx_flow {
+    struct udma_rflow *udma_rflow;
+    int udma_rflow_id;
+    struct k3_ring *ringrx;
+    struct k3_ring *ringrxfdq;
+
+    int virq;
+};
+
+struct k3_udma_glue_rx_channel {
+    struct k3_udma_glue_common common;
+
+    struct udma_rchan *udma_rchanx;
+    int udma_rchan_id;
+    bool remote;
+
+    bool psil_paired;
+
+    u32  swdata_size;
+    int  flow_id_base;
+
+    struct k3_udma_glue_rx_flow *flows;
+    u32 flow_num;
+    u32 flows_ready;
+};
+
+
+#define AM65_CPSW_NAV_SW_DATA_SIZE 16
+#define AM65_CPSW_MAX_RX_FLOWS  1
+
+#include "../drivers/dma/ti/k3-udma.h"
+
+#include <linux/dma/k3-udma-glue.h>
+void cleanup(void *data, dma_addr_t desc_dma)
+{
+    return;
+}
+
+static int CpswgCmd(void* arg,  ATEMSYS_T_CPSWG_CMD* pConfig)
+{
+    struct k3_udma_glue_tx_channel** ppTxChn = NULL;
+    struct k3_udma_glue_rx_channel** ppRxChn = NULL;
+    __u32* pnTxIrq;
+    __u32* pnRxIrq;
+    ATEMSYS_T_CPSWG_CMD oConfig;
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */
+    int nRetVal = -1;
+    memset(&oConfig, 0, sizeof(ATEMSYS_T_CPSWG_CMD));
+
+    if (NULL == pConfig)
+    {
+        nRetVal = copy_from_user(&oConfig, (ATEMSYS_T_CPSWG_CMD *)arg, sizeof(ATEMSYS_T_CPSWG_CMD));
+    }
+    else
+    {
+        memcpy(&oConfig, pConfig, sizeof(ATEMSYS_T_CPSWG_CMD));
+        nRetVal = 0;
+    }
+    if (0 != nRetVal)
+    {
+        ERR("CpswgCmd(): failed: %d\n", nRetVal);
+        goto Exit;
+    }
+    if (oConfig.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES)
+    {
+        dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */
+        nRetVal = 0;
+        goto Exit;
+    }
+    pDrvDescPrivate = S_apDrvDescPrivate[oConfig.dwIndex];
+    if (NULL == pDrvDescPrivate)
+    {
+        ERR("CpswgCmd(): cant find instance\n");
+        nRetVal = -EBUSY;
+        goto Exit;
+    }
+
+    DBG("CpswgCmd(): dwCmd: %d\n", oConfig.dwCmd);
+    ppTxChn = (struct k3_udma_glue_tx_channel**)&pDrvDescPrivate->apvTxChan[oConfig.dwChannelIdx];
+    ppRxChn = (struct k3_udma_glue_rx_channel**)&pDrvDescPrivate->apvRxChan[oConfig.dwChannelIdx];
+    pnTxIrq = &pDrvDescPrivate->anTxIrq[oConfig.dwChannelIdx];
+    pnRxIrq = &pDrvDescPrivate->anRxIrq[oConfig.dwChannelIdx];
+
+
+    switch (oConfig.dwCmd)
+    {
+    case ATEMSYS_CPSWG_CMD_CONFIG_TX:
+    {
+        char tx_chn_name[128];
+        struct k3_ring_cfg ring_cfg =
+        {
+            .elm_size = K3_RINGACC_RING_ELSIZE_8,
+            .mode = K3_RINGACC_RING_MODE_RING,
+            .flags = 0
+        };
+        struct k3_udma_glue_tx_channel_cfg tx_cfg = { 0 };
+
+        tx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
+        tx_cfg.tx_cfg = ring_cfg;
+        tx_cfg.txcq_cfg = ring_cfg;
+        tx_cfg.tx_cfg.size = oConfig.dwRingSize;
+        tx_cfg.txcq_cfg.size = oConfig.dwRingSize;
+        snprintf(tx_chn_name, sizeof(tx_chn_name), "tx%d", 0);
+
+        *ppTxChn = k3_udma_glue_request_tx_chn(&pDrvDescPrivate->pPDev->dev,
+                            tx_chn_name,
+                            &tx_cfg);
+        if (IS_ERR(*ppTxChn))
+        {
+            ERR("CpswgCmd(): Failed to request tx dma channel %ld\n", PTR_ERR(*ppTxChn));
+            *ppTxChn = NULL;
+            goto Exit;
+        }
+
+        *pnTxIrq = k3_udma_glue_tx_get_irq(*ppTxChn);
+        if (*pnTxIrq <= 0)
+        {
+            ERR("CpswgCmd(): Failed to get tx dma irq %d\n", *pnTxIrq);
+            goto Exit;
+        }
+
+        {
+            struct k3_udma_glue_tx_channel* pData = (struct k3_udma_glue_tx_channel*)*ppTxChn;
+            DBG("CpswgCmd(): k3_udma_glue_request_tx_chn(): udma_tchan_id:0x%x, ringtx:0x%x::0x%px, ringtxcq:0x%x::0x%px\n",
+            pData->udma_tchan_id,
+            pData->ringtx->ring_id, (unsigned char*)NULL + pData->ringtx->ring_mem_dma,
+            pData->ringtxcq->ring_id, (unsigned char*)NULL + pData->ringtxcq->ring_mem_dma);
+
+            oConfig.dwChanId = pData->udma_tchan_id;
+            oConfig.dwRingId = pData->ringtx->ring_id;
+            oConfig.qwRingDma = pData->ringtx->ring_mem_dma;
+            oConfig.dwRingSize = pData->ringtx->size;
+            oConfig.dwRingFdqId = pData->ringtxcq->ring_id;
+            oConfig.qwRingFdqDma = pData->ringtxcq->ring_mem_dma;
+            oConfig.dwRingFdqSize = pData->ringtxcq->size;
+
+            nRetVal = copy_to_user((ATEMSYS_T_CPSWG_CMD *)arg, &oConfig, sizeof(ATEMSYS_T_CPSWG_CMD));
+            if (0 != nRetVal)
+            {
+                ERR("CpswgCmd(): copy_to_user() failed: %d\n", nRetVal);
+            }
+        }
+    } break;
+    case ATEMSYS_CPSWG_CMD_CONFIG_RX:
+    {
+        u32  rx_flow_id_base = -1;
+        u32 fdqring_id;
+
+        struct k3_udma_glue_rx_channel_cfg rx_cfg = { 0 };
+
+        rx_cfg.swdata_size = AM65_CPSW_NAV_SW_DATA_SIZE;
+        rx_cfg.flow_id_num = AM65_CPSW_MAX_RX_FLOWS;
+        rx_cfg.flow_id_base = rx_flow_id_base;
+
+        *ppRxChn = k3_udma_glue_request_rx_chn(&pDrvDescPrivate->pPDev->dev, "rx", &rx_cfg);
+        if (IS_ERR(*ppRxChn)) {
+            ERR("CpswgCmd(): Failed to request rx dma channel %ld\n", PTR_ERR(*ppRxChn));
+           *ppRxChn = NULL;
+            goto Exit;
+        }
+
+        rx_flow_id_base = k3_udma_glue_rx_get_flow_id_base(*ppRxChn);
+        fdqring_id = K3_RINGACC_RING_ID_ANY;
+        /*for*/
+        {
+            u32 i = 0;
+            struct k3_ring_cfg rxring_cfg = {
+                .elm_size = K3_RINGACC_RING_ELSIZE_8,
+                .mode = K3_RINGACC_RING_MODE_RING,
+                .flags = 0,
+            };
+            struct k3_ring_cfg fdqring_cfg = {
+                .elm_size = K3_RINGACC_RING_ELSIZE_8,
+                .mode = K3_RINGACC_RING_MODE_MESSAGE,
+                .flags = K3_RINGACC_RING_SHARED,
+            };
+            struct k3_udma_glue_rx_flow_cfg rx_flow_cfg = {
+                .rx_cfg = rxring_cfg,
+                .rxfdq_cfg = fdqring_cfg,
+                .ring_rxq_id = K3_RINGACC_RING_ID_ANY,
+                .src_tag_lo_sel = K3_UDMA_GLUE_SRC_TAG_LO_USE_REMOTE_SRC_TAG,
+            };
+
+            rx_flow_cfg.ring_rxfdq0_id = fdqring_id;
+            rx_flow_cfg.rx_cfg.size = oConfig.dwRingSize;
+            rx_flow_cfg.rxfdq_cfg.size = oConfig.dwRingSize;
+
+            nRetVal = k3_udma_glue_rx_flow_init(*ppRxChn, i, &rx_flow_cfg);
+            if (nRetVal) {
+                ERR("CpswgCmd(): Failed to init rx flow%d %d\n", i, nRetVal);
+                goto Exit;
+            }
+            if (!i)
+                fdqring_id = k3_udma_glue_rx_flow_get_fdq_id(*ppRxChn, i);
+
+            *pnRxIrq = k3_udma_glue_rx_get_irq(*ppRxChn, i);
+
+            if (*pnRxIrq <= 0) {
+                ERR("CpswgCmd(): Failed to get rx dma irq %d\n", *pnRxIrq);
+                goto Exit;
+            }
+        }
+        {
+            struct k3_udma_glue_rx_flow* pData = (struct k3_udma_glue_rx_flow*)(*ppRxChn)->flows;
+
+            DBG("CpswgCmd(): k3_udma_glue_request_tx_chn(): udma_rflow_id:0x%x, rx_flow_id_base:0x%x, ringrx:0x%x::0x%px, ringrxfdq:0x%x::0x%px\n",
+            pData->udma_rflow_id, rx_flow_id_base,
+            pData->ringrx->ring_id, (unsigned char*)NULL + pData->ringrx->ring_mem_dma,
+            pData->ringrxfdq->ring_id, (unsigned char*)NULL + pData->ringrxfdq->ring_mem_dma);
+
+            oConfig.dwChanId = pData->udma_rflow_id;
+            oConfig.dwRingId = pData->ringrx->ring_id;
+            oConfig.qwRingDma = pData->ringrx->ring_mem_dma;
+            oConfig.dwRingSize = pData->ringrx->size;
+            oConfig.dwRingFdqId = pData->ringrxfdq->ring_id;
+            oConfig.qwRingFdqDma = pData->ringrxfdq->ring_mem_dma;
+            oConfig.dwRingFdqSize = pData->ringrxfdq->size;
+            oConfig.dwFlowIdBase = rx_flow_id_base;
+
+            nRetVal = copy_to_user((ATEMSYS_T_CPSWG_CMD *)arg, &oConfig, sizeof(ATEMSYS_T_CPSWG_CMD));
+            if (0 != nRetVal)
+            {
+                ERR("CpswgCmd(): copy_to_user() failed: %d\n", nRetVal);
+            }
+        }
+    } break;
+    case ATEMSYS_CPSWG_CMD_ENABLE_TX:
+    {
+        if (NULL == *ppTxChn)
+        {
+            nRetVal = -1;
+            ERR("CpswgCmd(): tx channel not ready %d\n", nRetVal);
+            goto Exit;
+        }
+        nRetVal = k3_udma_glue_enable_tx_chn(*ppTxChn);
+        if (nRetVal)
+        {
+            ERR("CpswgCmd(): k3_udma_glue_enable_tx_chn() failed %d\n", nRetVal);
+            goto Exit;
+        }
+
+    } break;
+    case ATEMSYS_CPSWG_CMD_ENABLE_RX:
+    {
+        if (NULL == *ppRxChn)
+        {
+            nRetVal = -1;
+            ERR("CpswgCmd(): rx channel not ready %d\n", nRetVal);
+            goto Exit;
+        }
+        nRetVal = k3_udma_glue_enable_rx_chn(*ppRxChn);
+        if (nRetVal) {
+            ERR("CpswgCmd(): k3_udma_glue_enable_rx_chn() failed %d\n", nRetVal);
+            goto Exit;
+        }
+
+    } break;
+    case ATEMSYS_CPSWG_CMD_DISABLE_TX:
+    {
+        if (NULL == *ppTxChn)
+        {
+            nRetVal = -1;
+            ERR("CpswgCmd(): tx channel not ready %d\n", nRetVal);
+            goto Exit;
+        }
+        //for (i = 0; i < tx_ch_num; i++)
+            k3_udma_glue_tdown_tx_chn(*ppTxChn, false);
+
+        //for (i = 0; i < tx_ch_num; i++)
+        {
+            k3_udma_glue_reset_tx_chn(*ppTxChn, NULL, cleanup);
+            k3_udma_glue_disable_tx_chn(*ppTxChn);
+        }
+    } break;
+    case ATEMSYS_CPSWG_CMD_DISABLE_RX:
+    {
+        int i = 0;
+        if (NULL == *ppRxChn)
+        {
+            nRetVal = -1;
+            ERR("CpswgCmd(): rx channel not ready %d\n", nRetVal);
+            goto Exit;
+        }
+        k3_udma_glue_tdown_rx_chn(*ppRxChn, true);
+        for (i = 0; i < AM65_CPSW_MAX_RX_FLOWS; i++)
+            k3_udma_glue_reset_rx_chn(*ppRxChn, i, NULL, cleanup, !!i);
+
+        k3_udma_glue_disable_rx_chn(*ppRxChn);
+    } break;
+    case ATEMSYS_CPSWG_CMD_RELEASE_TX:
+    {
+        if (NULL == *ppTxChn)
+        {
+            nRetVal = -1;
+            ERR("CpswgCmd(): tx channel not ready %d\n", nRetVal);
+            goto Exit;
+        }
+        k3_udma_glue_release_tx_chn(*ppTxChn);
+        *ppTxChn = NULL;
+    } break;
+    case ATEMSYS_CPSWG_CMD_RELEASE_RX:
+    {
+        if (NULL == *ppRxChn)
+        {
+            nRetVal = -1;
+            ERR("CpswgCmd(): rx channel not ready %d\n", nRetVal);
+            goto Exit;
+        }
+        k3_udma_glue_release_rx_chn(*ppRxChn);
+        *ppRxChn = NULL;
+    } break;
+    }
+
+
+
+Exit:
+    return nRetVal;
+}
+
+
+
+static void CleanCpswgCmd(ATEMSYS_T_DEVICE_DESC* pDevDesc)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    ATEMSYS_T_CPSWG_CMD oConfig;
+    unsigned int dwChannelIdx = 0;
+    unsigned int dwIndex = 0;
+    if (pDevDesc == NULL)
+    {
+       return;
+    }
+    for (dwIndex = 0; dwIndex < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; dwIndex++)
+    {
+        if ((NULL != S_apDrvDescPrivate[dwIndex]) && pDevDesc == S_apDrvDescPrivate[dwIndex]->pDevDesc)
+        {
+            pDrvDescPrivate = S_apDrvDescPrivate[dwIndex];
+            break;
+        }
+    }
+    if (pDrvDescPrivate == NULL)
+    {
+        return;
+    }
+    for (dwChannelIdx = 0; ATEMSYS_UDMA_CHANNELS > dwChannelIdx; dwChannelIdx++)
+    {
+        void** ppvTxChn = &pDrvDescPrivate->apvTxChan[dwChannelIdx];
+        void** ppvRxChn = &pDrvDescPrivate->apvRxChan[dwChannelIdx];
+
+        if ((NULL != ppvTxChn) && (NULL != *ppvTxChn))
+        {
+            memset(&oConfig, 0, sizeof(ATEMSYS_T_CPSWG_CMD));
+            oConfig.dwIndex = dwIndex;
+            oConfig.dwChannelIdx = dwChannelIdx;
+            oConfig.dwCmd = ATEMSYS_CPSWG_CMD_DISABLE_TX;
+            CpswgCmd(NULL,  &oConfig);
+            oConfig.dwCmd = ATEMSYS_CPSWG_CMD_RELEASE_TX;
+            CpswgCmd(NULL,  &oConfig);
+        }
+        if ((NULL != ppvRxChn) && (NULL != *ppvRxChn))
+        {
+            memset(&oConfig, 0, sizeof(ATEMSYS_T_CPSWG_CMD));
+            oConfig.dwIndex = dwIndex;
+            oConfig.dwChannelIdx = dwChannelIdx;
+            oConfig.dwCmd = ATEMSYS_CPSWG_CMD_DISABLE_RX;
+            CpswgCmd(NULL,  &oConfig);
+            oConfig.dwCmd = ATEMSYS_CPSWG_CMD_RELEASE_RX;
+            CpswgCmd(NULL,  &oConfig);
+        }
+    }
+}
+#endif /*#ifdef CONFIG_TI_K3_UDMA*/
+
+
+static int IoMemCmd(void* arg)
+{
+    ATEMSYS_T_IOMEM_CMD oIoMem;
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    unsigned int dwRetVal = 0;
+    int nRetVal = -1;
+    unsigned int dwIndex = 0;
+    nRetVal = copy_from_user(&oIoMem, (unsigned long long *)arg, sizeof(ATEMSYS_T_IOMEM_CMD));
+    if (0 != nRetVal)
+    {
+        goto Exit;
+    }
+    if (oIoMem.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES)
+    {
+        dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */
+        nRetVal = 0;
+        goto Exit;
+    }
+    pDrvDescPrivate = S_apDrvDescPrivate[oIoMem.dwIndex];
+    if (NULL == pDrvDescPrivate)
+    {
+        ERR("IoMemCmd(): cant find instance\n");
+        nRetVal = -EBUSY;
+        goto Exit;
+    }
+
+
+    if (ATEMSYS_IOMEM_CMD_MAP_PERMANENT == oIoMem.dwCmd)
+    {
+        for (dwIndex = 0; IOMEMLIST_LENGTH>dwIndex; dwIndex++)
+        {
+            if (NULL == pDrvDescPrivate->oIoMemList[dwIndex].pbyBase)
+            {
+                break;
+            }
+        }
+        if (IOMEMLIST_LENGTH < dwIndex)
+        {
+            nRetVal = -EFAULT;
+            goto Exit;
+        }
+        pDrvDescPrivate->oIoMemList[dwIndex].pbyBase = devm_ioremap(&pDrvDescPrivate->pPDev->dev, oIoMem.qwPhys, oIoMem.dwSize);
+        if (NULL == pDrvDescPrivate->oIoMemList[dwIndex].pbyBase )
+        {
+            pDrvDescPrivate->oIoMemList[dwIndex].pbyBase = NULL;
+            nRetVal = -ENOMEM;;
+            goto Exit;
+        }
+        pDrvDescPrivate->oIoMemList[dwIndex].qwPhys = oIoMem.qwPhys;
+        pDrvDescPrivate->oIoMemList[dwIndex].dwSize = oIoMem.dwSize;
+        DBG("IoMemCmd(): ATEMSYS_IOMEM_CMD_MAP_PERMANENT Virt:0x%px, Phys:0x%px, Size:0x%08x\n", pDrvDescPrivate->oIoMemList[dwIndex].pbyBase, (unsigned char*)NULL + oIoMem.qwPhys, oIoMem.dwSize);
+    }
+    else
+    {
+        for (dwIndex = 0; IOMEMLIST_LENGTH>dwIndex; dwIndex++)
+        {
+            if (pDrvDescPrivate->oIoMemList[dwIndex].qwPhys == oIoMem.qwPhys)
+            {
+                break;
+            }
+        }
+        if (IOMEMLIST_LENGTH == dwIndex)
+        {
+            nRetVal = EFAULT;
+            goto Exit;
+        }
+
+        if (ATEMSYS_IOMEM_CMD_UNMAP_PERMANENT == oIoMem.dwCmd)
+        {
+            devm_iounmap(&pDrvDescPrivate->pPDev->dev, pDrvDescPrivate->oIoMemList[dwIndex].pbyBase);
+            pDrvDescPrivate->oIoMemList[dwIndex].pbyBase = NULL;
+            pDrvDescPrivate->oIoMemList[dwIndex].qwPhys = 0;
+            pDrvDescPrivate->oIoMemList[dwIndex].dwSize = 0;
+        }
+        else
+        {
+            if (ATEMSYS_IOMEM_CMD_WRITE == oIoMem.dwCmd)
+            {
+                if (sizeof(unsigned int)/* 4 */  == oIoMem.dwDataSize)
+                    *(unsigned int*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset) = oIoMem.dwData[0];
+                else if (sizeof(unsigned long long)/* 8 */ == oIoMem.dwDataSize)
+                {
+                    *(unsigned long long*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset) = *(unsigned long long*)&oIoMem.dwData[0];
+                }
+                else
+                {
+                    int i = 0;
+                    for (i = 0; i < oIoMem.dwDataSize; i++)
+                    {
+                        ((unsigned char*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset))[i] = ((unsigned char*)oIoMem.dwData)[i];
+                    }
+                }
+            }
+            else if (ATEMSYS_IOMEM_CMD_READ == oIoMem.dwCmd)
+            {
+                if (sizeof(unsigned int)/* 4 */ == oIoMem.dwDataSize)
+                    oIoMem.dwData[0] = *(unsigned int*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset);
+                else
+                {
+                    int i = 0;
+                    for (i = 0; i < oIoMem.dwDataSize; i++)
+                    {
+                        ((unsigned char*)oIoMem.dwData)[i] = ((unsigned char*)(pDrvDescPrivate->oIoMemList[dwIndex].pbyBase + oIoMem.dwOffset))[i];
+                    }
+                }
+                nRetVal = copy_to_user((unsigned long long *)arg, &oIoMem, sizeof(ATEMSYS_T_IOMEM_CMD));
+                if (0 != nRetVal)
+                {
+                    goto Exit;
+                }
+            }
+        }
+    }
+    nRetVal = 0;
+Exit:
+        return nRetVal;
+}
+
+static void CleanIoMemCmd(ATEMSYS_T_DEVICE_DESC* pDevDesc)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    unsigned int dwIndex = 0;
+    if (pDevDesc == NULL)
+    {
+        return;
+    }
+    for (dwIndex = 0; dwIndex < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; dwIndex++)
+    {
+        pDrvDescPrivate = S_apDrvDescPrivate[dwIndex];
+        if (NULL == pDrvDescPrivate)
+            continue;
+        if (pDrvDescPrivate->pDevDesc == pDevDesc)
+            break;
+        pDrvDescPrivate = NULL;
+    }
+    if (NULL == pDrvDescPrivate)
+    {
+        return;
+    }
+    for (dwIndex = 0; IOMEMLIST_LENGTH>dwIndex; dwIndex++)
+    {
+        if (NULL != pDrvDescPrivate->oIoMemList[dwIndex].pbyBase )
+        {
+            devm_iounmap(&pDrvDescPrivate->pPDev->dev, pDrvDescPrivate->oIoMemList[dwIndex].pbyBase);
+            pDrvDescPrivate->oIoMemList[dwIndex].pbyBase = NULL;
+            pDrvDescPrivate->oIoMemList[dwIndex].qwPhys = 0;
+            pDrvDescPrivate->oIoMemList[dwIndex].dwSize = 0;
+        }
+    }
+}
+#endif /*#ifdef INCLUDE_ATEMSYS_DT_DRIVER)*/
+
+
+#if ((defined CONFIG_SMP) && (LINUX_VERSION_CODE > KERNEL_VERSION(5,14,0)))
+static int SetIntCpuAffinityIoctl(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam, size_t size)
+{
+    int nRetVal = -EIO;
+    ATEMSYS_T_IRQ_DESC* pIrqDesc = &(pDevDesc->irqDesc);
+    struct cpumask* pCpuMask = 0;
+
+    if (size > sizeof(struct cpumask))
+    {
+        ERR("SetIntCpuAffinityIoctl: cpu mask length mismatch\n");
+        nRetVal = -EINVAL;
+        goto Exit;
+    }
+
+    /* prepare cpu affinity mask*/
+    pCpuMask = (struct cpumask*)kzalloc(sizeof(struct cpumask), GFP_KERNEL);
+    if (NULL == pCpuMask)
+    {
+        ERR("SetIntCpuAffinityIoctl: no memory\n");
+        nRetVal = -ENOMEM;
+        goto Exit;
+    }
+    memset(pCpuMask, 0, sizeof(struct cpumask)>size? sizeof(struct cpumask): size);
+
+    nRetVal = copy_from_user(pCpuMask, (struct cpumask *)ioctlParam, size);
+    if (0 != nRetVal)
+    {
+        ERR("SetIntCpuAffinityIoctl failed: %d\n", nRetVal);
+        goto Exit;
+    }
+
+    /* set cpu affinity mask*/
+    if (pIrqDesc->irq)
+    {
+        nRetVal = irq_set_affinity(pIrqDesc->irq, pCpuMask);
+        if (0 != nRetVal)
+        {
+            ERR("SetIntCpuAffinityIoctl: irq_set_affinity failed: %d\n", nRetVal);
+            nRetVal = -EIO;
+            goto Exit;
+        }
+    }
+
+    nRetVal = 0;
+Exit:
+    if (NULL != pCpuMask)
+        kfree(pCpuMask);
+
+    return nRetVal;
+}
+#endif /* #if ((defined CONFIG_SMP) && (LINUX_VERSION_CODE > KERNEL_VERSION(5,14,0))) */
+
+#if (defined CONFIG_PCI)
+static void dev_pci_release(ATEMSYS_T_DEVICE_DESC* pDevDesc)
+{
+#if (defined INCLUDE_ATEMSYS_PCI_DRIVER)
+    if (NULL != pDevDesc->pPciDrvDesc)
+    {
+        INF("pci_release: Disconnect from PCI device driver %s \n", pci_name(pDevDesc->pPcidev));
+        pDevDesc->pPciDrvDesc->pDevDesc = NULL;
+#if !(defined CONFIG_XENO_COBALT)
+        pDevDesc->pPcidev               = NULL;
+#endif
+        pDevDesc->pPciDrvDesc           = NULL;
+    }
+    else
+#endif
+
+   if (pDevDesc->pPcidev)
+   {
+      pci_disable_device(pDevDesc->pPcidev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,29))
+      /* Make sure bus master DMA is disabled if the DMA buffers are finally released */
+      pci_clear_master(pDevDesc->pPcidev);
+#endif
+      pci_release_regions(pDevDesc->pPcidev);
+
+      pci_disable_msi(pDevDesc->pPcidev);
+
+      INF("pci_release: PCI device %s released\n", pci_name(pDevDesc->pPcidev));
+
+#if !(defined CONFIG_XENO_COBALT)
+      pDevDesc->pPcidev = NULL;
+#endif
+   }
+}
+#endif /* CONFIG_PCI */
+
+#if (defined CONFIG_XENO_COBALT)
+static int dev_interrupt_handler(rtdm_irq_t* irq_handle)
+{
+    ATEMSYS_T_DEVICE_DESC* pDevDesc = rtdm_irq_get_arg(irq_handle, ATEMSYS_T_DEVICE_DESC);
+    ATEMSYS_T_IRQ_DESC* pIrqDesc = NULL;
+
+    if (pDevDesc != NULL)
+    {
+        pIrqDesc = &(pDevDesc->irqDesc);
+        if (pIrqDesc != NULL)
+        {
+            atomic_inc(&pIrqDesc->count);
+            atomic_inc(&pIrqDesc->totalCount);
+            rtdm_event_signal(&pIrqDesc->irq_event);
+        }
+    }
+    return RTDM_IRQ_HANDLED;
+}
+#else
+static irqreturn_t dev_interrupt_handler(int nIrq, void* pParam)
+{
+   ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) pParam;
+   ATEMSYS_T_IRQ_DESC* pIrqDesc = &(pDevDesc->irqDesc);
+
+   /* Disable IRQ on (A)PIC to prevent interrupt trashing if the ISR is left.
+    * In usermode the IRQ must be acknowledged on the device (IO register).
+    * The IRQ is enabled again in the read() handler!
+    * Just disabling the IRQ here doesn't work with shared IRQs!
+    */
+   dev_disable_irq(pIrqDesc);
+
+   atomic_inc(&pIrqDesc->count);
+   atomic_inc(&pIrqDesc->totalCount);
+
+   /* Wakeup sleeping threads -> read() */
+   wake_up(&pIrqDesc->q);
+
+   return IRQ_HANDLED;
+}
+#endif /* CONFIG_XENO_COBALT */
+
+/*
+ * This is called whenever a process attempts to open the device file
+ */
+#if (defined CONFIG_XENO_COBALT)
+static int device_open(struct rtdm_fd* fd, int oflags)
+{
+   ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd);
+   memset(pDevDesc, 0, sizeof(ATEMSYS_T_DEVICE_DESC));
+   rtdm_event_init(&pDevDesc->irqDesc.irq_event, 0);
+   INF("device_open %s\n", rtdm_fd_device(fd)->label);
+#else
+static int device_open(struct inode* inode, struct file* file)
+{
+   ATEMSYS_T_DEVICE_DESC* pDevDesc;
+
+   INF("device_open(0x%px)\n", file);
+
+   /* create device descriptor */
+   pDevDesc = (ATEMSYS_T_DEVICE_DESC*) kzalloc(sizeof(ATEMSYS_T_DEVICE_DESC), GFP_KERNEL);
+   if (pDevDesc == NULL)
+   {
+      return -ENOMEM;
+   }
+
+   file->private_data = (void*) pDevDesc;
+
+   /* Add descriptor to descriptor list */
+   mutex_lock(&S_mtx);
+   list_add(&pDevDesc->list, &S_DevNode.list);
+   mutex_unlock(&S_mtx);
+   try_module_get(THIS_MODULE);
+#endif /* CONFIG_XENO_COBALT */
+
+   /* use module's platform device for memory maping and allocation */
+   pDevDesc->pPlatformDev = S_pPlatformDev;
+
+   return DRIVER_SUCCESS;
+}
+
+#if (defined CONFIG_XENO_COBALT)
+static void device_release(struct rtdm_fd* fd)
+{
+    ATEMSYS_T_DEVICE_DESC* pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd);
+    ATEMSYS_T_IRQ_DESC* pIrqDesc = NULL;
+#else
+static int device_release(struct inode* inode, struct file* file)
+{
+   ATEMSYS_T_DEVICE_DESC* pDevDesc = file->private_data;
+#endif /* CONFIG_XENO_COBALT */
+
+   /* release device descriptor */
+   if (pDevDesc != NULL )
+   {
+       INF("device_release, pDevDesc = 0x%px\n", pDevDesc);
+
+       /* Try to tear down interrupts if they are on */
+       dev_int_disconnect(pDevDesc);
+
+#if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+       CleanIoMemCmd(pDevDesc);
+
+ #ifdef CONFIG_TI_K3_UDMA
+       CleanCpswgCmd(pDevDesc);
+ #endif
+
+       CleanUpEthernetDriverOnRelease(pDevDesc);
+#endif
+
+#if (defined CONFIG_PCI)
+       /* Try to release PCI resources */
+       dev_pci_release(pDevDesc);
+#endif
+
+#if (defined CONFIG_XENO_COBALT)
+       pIrqDesc = &(pDevDesc->irqDesc);
+
+       if (pIrqDesc != NULL )
+       {
+          rtdm_event_clear(&pIrqDesc->irq_event);
+          rtdm_event_destroy(&pIrqDesc->irq_event);
+       }
+    }
+    return;
+#else
+       /* Remove descriptor from descriptor list */
+       mutex_lock(&S_mtx);
+
+       list_del(&pDevDesc->list);
+
+       mutex_unlock(&S_mtx);
+
+       kfree(pDevDesc);
+   }
+
+   module_put(THIS_MODULE);
+
+   return DRIVER_SUCCESS;
+#endif /* CONFIG_XENO_COBALT */
+}
+
+/*
+ * This function is called whenever a process which has already opened the
+ * device file attempts to read from it.
+ */
+ #if (defined CONFIG_XENO_COBALT)
+static ssize_t device_read(struct rtdm_fd* fd, void* bufp, size_t len)
+{
+   ATEMSYS_T_DEVICE_DESC*   pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd);
+   ATEMSYS_T_IRQ_DESC*      pIrqDesc = NULL;
+   s32 nPending;
+   int ret=0;
+
+   if (! pDevDesc)
+   {
+      return -EINVAL;
+   }
+
+   pIrqDesc = &(pDevDesc->irqDesc);
+   if (! pIrqDesc)
+   {
+      return -EINVAL;
+   }
+
+   if (len < sizeof(u32))
+   {
+      return -EINVAL;
+   }
+
+   if (rtdm_in_rt_context() == false)
+   {
+       return -EINVAL;
+   }
+
+   if (rtdm_fd_is_user(fd) == false)
+   {
+       return -EINVAL;
+   }
+
+   ret = rtdm_event_wait(&pIrqDesc->irq_event);
+   if (ret)
+   {
+       return ret;
+   }
+
+   nPending = atomic_read(&pIrqDesc->count);
+
+   ret = rtdm_safe_copy_to_user(fd, bufp, &nPending, sizeof(nPending));
+
+   if (ret)
+   {
+       ERR("device_read: rtdm_safe_copy_to_user() returned error: %d\n", ret);
+       return ret;
+   }
+
+   atomic_sub(nPending, &pIrqDesc->count);
+
+   return sizeof(nPending);
+}
+#else
+static ssize_t device_read(
+      struct file* filp,   /* see include/linux/fs.h   */
+      char __user* bufp,   /* buffer to be filled with data */
+      size_t       len,    /* length of the buffer     */
+      loff_t*      ppos)
+{
+
+   ATEMSYS_T_DEVICE_DESC*   pDevDesc = (ATEMSYS_T_DEVICE_DESC*) filp->private_data;
+   ATEMSYS_T_IRQ_DESC*      pIrqDesc = NULL;
+   s32 nPending;
+   wait_queue_entry_t wait;
+
+   if (! pDevDesc)
+   {
+      return -EINVAL;
+   }
+
+   pIrqDesc = &(pDevDesc->irqDesc);
+
+   /* DBG("device_read...(0x%px,0x%px,%d)\n", filp, bufp, len); */
+
+   init_wait(&wait);
+
+   if (len < sizeof(u32))
+   {
+      return -EINVAL;
+   }
+
+   if (pIrqDesc->irq == 0) /* IRQ already disabled */
+   {
+      return -EINVAL;
+   }
+
+   nPending = atomic_read(&pIrqDesc->count);
+   if (nPending == 0)
+   {
+      if (dev_irq_disabled(pIrqDesc))
+      {
+         dev_enable_irq(pIrqDesc);
+      }
+      if (filp->f_flags & O_NONBLOCK)
+      {
+         return -EWOULDBLOCK;
+      }
+   }
+
+   while (nPending == 0)
+   {
+      prepare_to_wait(&pIrqDesc->q, &wait, TASK_INTERRUPTIBLE);
+      nPending = atomic_read(&pIrqDesc->count);
+      if (nPending == 0)
+      {
+         schedule();
+      }
+      finish_wait(&pIrqDesc->q, &wait);
+      if (pIrqDesc->irq == 0) /* IRQ disabled while waiting for IRQ */
+      {
+         return -EINVAL;
+      }
+      if (signal_pending(current))
+      {
+         return -ERESTARTSYS;
+      }
+   }
+
+   if (copy_to_user(bufp, &nPending, sizeof(nPending)))
+   {
+      return -EFAULT;
+   }
+
+   *ppos += sizeof(nPending);
+   atomic_sub(nPending, &pIrqDesc->count);
+
+   return sizeof(nPending);
+}
+#endif /* CONFIG_XENO_COBALT */
+
+/*
+ * character device mmap method
+ */
+#if (defined CONFIG_XENO_COBALT)
+static int device_mmap(struct rtdm_fd* fd, struct vm_area_struct* vma)
+{
+   ATEMSYS_T_DEVICE_DESC*   pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd);
+#else
+static int device_mmap(struct file* filp, struct vm_area_struct* vma)
+{
+   ATEMSYS_T_DEVICE_DESC*   pDevDesc = filp->private_data;
+#endif /* CONFIG_XENO_COBALT */
+
+   int         nRet = -EIO;
+   u32         dwLen;
+   void*       pVa = NULL;
+   dma_addr_t  dmaAddr;
+   ATEMSYS_T_MMAP_DESC* pMmapNode;
+#if (defined CONFIG_PCI)
+   int         i;
+   unsigned long ioBase;
+   u32 dwIOLen, dwPageOffset;
+#endif
+
+   DBG("mmap: vm_pgoff 0x%px vm_start = 0x%px vm_end = 0x%px\n",
+         (void*) vma->vm_pgoff, (void*) vma->vm_start, (void*) vma->vm_end);
+
+   if (pDevDesc == NULL)
+   {
+      ERR("mmap: Invalid device dtor\n");
+      goto Exit;
+   }
+
+   dwLen = PAGE_UP(vma->vm_end - vma->vm_start);
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0))
+   vm_flags_set(vma, VM_RESERVED | VM_LOCKED | VM_DONTCOPY);
+#else
+   vma->vm_flags |= VM_RESERVED | VM_LOCKED | VM_DONTCOPY;
+#endif 
+
+   if (vma->vm_pgoff != 0)
+   {
+      /* map device IO memory */
+#if (defined CONFIG_PCI)
+      if (pDevDesc->pPcidev != NULL)
+      {
+         INF("mmap: Doing PCI device sanity check\n");
+
+         /* sanity check. Make sure that the offset parameter of the mmap() call in userspace
+          * corresponds with the PCI base IO address.
+          * Make sure the user doesn't map more IO memory than the device provides.
+          */
+         for (i = 0; i < ATEMSYS_PCI_MAXBAR; i++)
+         {
+            if (pci_resource_flags(pDevDesc->pPcidev, i) & IORESOURCE_MEM)
+            {
+               /* IO area address */
+               ioBase = PAGE_DOWN( pci_resource_start(pDevDesc->pPcidev, i) );
+
+               dwPageOffset = pci_resource_start(pDevDesc->pPcidev, i) - ioBase;
+
+               /* IO area length */
+               dwIOLen = PAGE_UP( pci_resource_len(pDevDesc->pPcidev, i) + dwPageOffset );
+
+               if (    ((vma->vm_pgoff << PAGE_SHIFT) >= ioBase)
+                    && (((vma->vm_pgoff << PAGE_SHIFT) + dwLen) <= (ioBase + dwIOLen))
+                  )
+               {
+                  /* for systems where physical address is in x64 space, high dword is not passes from user io
+                   * use correct address from pci_resource_start */
+                  resource_size_t res_start = pci_resource_start(pDevDesc->pPcidev, i);
+                  unsigned long pgoff_new = (res_start>>PAGE_SHIFT);
+                  if (pgoff_new != vma->vm_pgoff)
+                  {
+                      INF("mmap: Correcting page offset from 0x%lx to 0x%lx, for Phys address 0x%llx",
+                              vma->vm_pgoff, pgoff_new, (u64)res_start);
+                      vma->vm_pgoff =  pgoff_new;
+                  }
+
+                  break;
+               }
+            }
+         }
+
+         /* IO bar not found? */
+         if (i == ATEMSYS_PCI_MAXBAR)
+         {
+            ERR("mmap: Invalid arguments\n");
+            nRet = -EINVAL;
+            goto Exit;
+         }
+      }
+#endif /* CONFIG_PCI */
+
+      /* avoid swapping, request IO memory */
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,3,0))
+      vm_flags_set(vma, VM_IO);
+#else
+      vma->vm_flags |= VM_IO;
+#endif 
+
+      /*
+       * avoid caching (this is at least needed for POWERPC,
+       * or machine will lock on first IO access)
+       */
+      vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+      if ((nRet = remap_pfn_range(vma,
+                                 vma->vm_start,
+                                 vma->vm_pgoff,
+                                 dwLen,
+                                 vma->vm_page_prot)) < 0)
+      {
+         ERR("mmap: remap_pfn_range failed\n");
+         goto Exit;
+      }
+
+      INF("mmap: mapped IO memory, Phys:0x%llx UVirt:0x%px Size:%u\n",
+           (u64) (((u64)vma->vm_pgoff) << PAGE_SHIFT), (void*) vma->vm_start, dwLen);
+
+#if (defined DEBUG_IOREMAP)
+      {
+        volatile unsigned char* ioaddr;
+        unsigned long ioBase = vma->vm_pgoff << PAGE_SHIFT;
+        INF("try to remap %p\n", (void*)ioBase);
+        /* DEBUG Map device's IO memory into kernel space pagetables */
+        ioaddr = (volatile unsigned char*) ioremap_nocache(ioBase, dwLen);
+        if (ioaddr == NULL)
+        {
+          ERR("ioremap_nocache failed\n");
+          goto Exit;
+        }
+        INF("io_base %p, *io_base[0]: %08x\n", ioaddr, readl(ioaddr));
+      }
+#endif /* DEBUG_IOREMAP */
+   }
+   else
+   {
+      /* allocated and map DMA memory */
+#if (defined CONFIG_PCI)
+      if (pDevDesc->pPcidev != NULL)
+      {
+#if ( (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) \
+    || (defined __aarch64__) \
+    || ((defined __arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))) \
+    || ((defined __i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0))) \
+    || ((defined __amd64__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0))) )
+         pVa = dma_alloc_coherent(&pDevDesc->pPcidev->dev, dwLen, &dmaAddr, GFP_KERNEL);
+         if (NULL == pVa)
+         {
+            ERR("mmap: dma_alloc_coherent failed\n");
+            nRet = -ENOMEM;
+            goto Exit;
+         }
+#else
+         pVa = pci_alloc_consistent(pDevDesc->pPcidev, dwLen, &dmaAddr);
+         if (NULL == pVa)
+         {
+            ERR("mmap: pci_alloc_consistent failed\n");
+            nRet = -ENOMEM;
+            goto Exit;
+         }
+#endif
+      }
+      else
+#endif /* CONFIG_PCI */
+      {
+#if (defined __arm__) || (defined __aarch64__)
+ #if (defined CONFIG_OF)
+         OF_DMA_CONFIGURE(&pDevDesc->pPlatformDev->dev,pDevDesc->pPlatformDev->dev.of_node);
+ #endif
+         /* dma_alloc_coherent() is currently not tested on PPC.
+          * TODO test this and remove legacy dev_dma_alloc()
+          */
+         pVa = dmam_alloc_coherent(&pDevDesc->pPlatformDev->dev, dwLen, &dmaAddr, GFP_KERNEL);
+         if (NULL == pVa)
+         {
+            ERR("mmap: dmam_alloc_coherent failed\n");
+            nRet = -ENOMEM;
+            goto Exit;
+         }
+#else
+         pVa = dev_dma_alloc(dwLen, &dmaAddr);
+         if (NULL == pVa)
+         {
+            ERR("mmap: dev_dma_alloc failed\n");
+            nRet = -ENOMEM;
+            goto Exit;
+         }
+#endif
+      }
+
+      if ((dmaAddr > 0xFFFFFFFF) && !pDevDesc->bSupport64BitDma)
+      {
+         ERR("mmap: Can't handle 64-Bit DMA address\n");
+         INF("mmap: Update LinkLayer for 64-Bit DMA support!\n");
+         nRet = -ENOMEM;
+         goto ExitAndFree;
+      }
+
+      /* zero memory for security reasons */
+      memset(pVa, 0, dwLen);
+
+      /* Always use noncached DMA memory for ARM. Otherwise cache invaliation/sync
+       * would be necessary from usermode.
+       * Can't do that without a kernel call because this OP's are privileged.
+       */
+
+      /* map the whole physically contiguous area in one piece */
+#if (!(defined ATEMSYS_LEGACY_DMA) && (LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0))) || ((defined ATEMSYS_LEGACY_DMA) && (0 != ATEMSYS_LEGACY_DMA))
+      {
+         unsigned int dwDmaPfn = 0;
+
+#if (defined __arm__) || (defined __aarch64__)
+         dwDmaPfn = (dmaAddr >> PAGE_SHIFT);
+ #if (defined CONFIG_PCI)
+  #if (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0))
+         if ((NULL != pDevDesc->pPcidev) && (0 != pDevDesc->pPcidev->dev.dma_pfn_offset))
+         {
+            dwDmaPfn = dwDmaPfn + pDevDesc->pPcidev->dev.dma_pfn_offset;
+            INF("mmap: remap_pfn_range dma pfn 0x%x, offset pfn 0x%x\n",
+                        dwDmaPfn, (u32)pDevDesc->pPcidev->dev.dma_pfn_offset);
+         }
+  #else
+         if ((NULL != pDevDesc->pPcidev) && (NULL != pDevDesc->pPcidev->dev.dma_range_map))
+         {
+            const struct bus_dma_region* map = pDevDesc->pPcidev->dev.dma_range_map;
+            unsigned long dma_pfn_offset = ((map->offset) >> PAGE_SHIFT);
+            dwDmaPfn = dwDmaPfn + dma_pfn_offset;
+            INF("mmap: remap_pfn_range dma pfn 0x%x, offset pfn 0x%x\n",
+                        dwDmaPfn, (u32)dma_pfn_offset);
+         }
+  #endif /* (LINUX_VERSION_CODE < KERNEL_VERSION(5,10,0))*/
+ #endif /* (defined CONFIG_PCI) */
+#if (!defined ATEMSYS_DONT_SET_NONCACHED_DMA_PAGEPROTECTIONLFAG)
+         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
+#elif (defined __PPC__)
+         dwDmaPfn = (dmaAddr >> PAGE_SHIFT);
+#else /* x86 / x86_64 */
+         dwDmaPfn = virt_to_phys((void*)pVa) >> PAGE_SHIFT;
+#endif
+         nRet = remap_pfn_range(vma,               /* user space mapping */
+                                vma->vm_start,     /* User space virtual addr */
+                                dwDmaPfn,          /* physical page frame number */
+                                dwLen,             /* size in bytes */
+                                vma->vm_page_prot);
+         if (nRet < 0)
+         {
+            ERR("remap_pfn_range failed\n");
+            goto ExitAndFree;
+         }
+      }
+#else /* #if (defined ATEMSYS_LEGACY_DMA) */
+      {
+         struct device* pDmaDev = NULL;
+
+ #if (defined CONFIG_PCI)
+         if (NULL != pDevDesc->pPcidev)
+         {
+            pDmaDev = &pDevDesc->pPcidev->dev;
+         }
+         else
+ #endif /* (defined CONFIG_PCI) */
+         if (NULL != pDevDesc->pPlatformDev)
+         {
+            pDmaDev = &pDevDesc->pPlatformDev->dev;
+         }
+
+#if ((defined __arm__) || (defined __aarch64__)) && (!defined ATEMSYS_DONT_SET_NONCACHED_DMA_PAGEPROTECTIONLFAG)
+         vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+#endif
+            /* for Platform Device */
+         nRet = dma_mmap_coherent(pDmaDev,
+                                     vma,       /* user space mapping                   */
+                                     pVa,       /* kernel virtual address               */
+                                     dmaAddr,   /* Phys address                         */
+                                     dwLen);    /* size         */
+         if (nRet < 0)
+         {
+            ERR("dma_mmap_coherent failed\n");
+            goto ExitAndFree;
+         }
+      }
+#endif /* #if (defined ATEMSYS_LEGACY_DMA) */
+
+      /* Write the physical DMA address into the first 4 bytes of allocated memory */
+      /* If there is 64 bit DMA support write upper part into the the next 4 byte  */
+      if (pDevDesc->bSupport64BitDma)
+      {
+         ((u32*) pVa)[0] = (u32)((u64)dmaAddr & 0xFFFFFFFF);
+         ((u32*) pVa)[1] = (u32)(((u64)dmaAddr >> 32) & 0xFFFFFFFF);
+      }
+      else
+      {
+         *((u32*) pVa) = (u32) dmaAddr;
+      }
+
+      /* Some housekeeping to be able to cleanup the allocated memory later */
+      pMmapNode = kzalloc(sizeof(ATEMSYS_T_MMAP_DESC), GFP_KERNEL);
+      if (! pMmapNode)
+      {
+         ERR("mmap: kmalloc() failed\n");
+         nRet = -ENOMEM;
+         goto ExitAndFree;
+      }
+
+      pMmapNode->pDevDesc = pDevDesc;
+      pMmapNode->dmaAddr = dmaAddr;
+      pMmapNode->pVirtAddr = pVa;
+      pMmapNode->len = dwLen;
+
+      /* Setup close callback -> deallocates DMA memory if region is unmapped by the system */
+      vma->vm_ops = &mmap_vmop;
+      vma->vm_private_data = pMmapNode;
+
+      INF("mmap: mapped DMA memory, Phys:0x%px KVirt:0x%px UVirt:0x%px Size:%u\n",
+             (void*)(unsigned long)dmaAddr, (void*)pVa, (void*)vma->vm_start, dwLen);
+   }
+
+   nRet = 0;
+
+   goto Exit;
+
+ExitAndFree:
+
+   if (pVa == NULL) goto Exit;
+
+#if (defined CONFIG_PCI)
+   if (pDevDesc->pPcidev != NULL)
+   {
+#if ( (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0)) \
+    || (defined __aarch64__) \
+    || ((defined __arm__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0))) \
+    || ((defined __i386__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0))) \
+    || ((defined __amd64__) && (LINUX_VERSION_CODE >= KERNEL_VERSION(4,9,0))) )
+      dma_free_coherent(&pDevDesc->pPcidev->dev, dwLen, pVa, dmaAddr);
+#else
+      pci_free_consistent(pDevDesc->pPcidev, dwLen, pVa, dmaAddr);
+#endif
+   }
+   else
+#endif
+   {
+#if (defined __arm__) || (defined __aarch64__)
+      dmam_free_coherent(&pDevDesc->pPlatformDev->dev, dwLen, pVa, dmaAddr);
+#else
+      dev_dma_free(dwLen, pVa);
+#endif
+   }
+
+Exit:
+   return nRet;
+}
+
+
+/*
+ * This function is called whenever a process tries to do an ioctl on our
+ * device file.
+ *
+ * If the ioctl is write or read/write (meaning output is returned to the
+ * calling process), the ioctl call returns the output of this function.
+ *
+ */
+#if (defined CONFIG_XENO_COBALT)
+static int atemsys_ioctl(struct rtdm_fd* fd, unsigned int cmd, void __user* user_arg)
+{
+   ATEMSYS_T_DEVICE_DESC*   pDevDesc = (ATEMSYS_T_DEVICE_DESC*) rtdm_fd_to_private(fd);
+   unsigned long   arg = (unsigned long) user_arg;
+#else
+static long atemsys_ioctl(
+      struct file* file,
+      unsigned int cmd,
+      unsigned long arg)
+{
+   ATEMSYS_T_DEVICE_DESC*   pDevDesc = file->private_data;
+#endif /* CONFIG_XENO_COBALT */
+
+   int nRetVal = -EFAULT;
+
+   if (pDevDesc == NULL)
+   {
+      ERR("ioctl: Invalid device dtor\n");
+      goto Exit;
+   }
+
+   /*
+    * Switch according to the ioctl called
+    */
+   switch (cmd)
+   {
+#if (defined CONFIG_PCI)
+      case ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_0_00:
+      case ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_05:
+      case ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_4_12:
+      {
+         nRetVal = ioctl_pci_finddevice(pDevDesc, arg, _IOC_SIZE(cmd)); /* size determines version */
+         if (0 != nRetVal)
+         {
+           /* be quiet. ioctl may fail */
+           goto Exit;
+         }
+      } break;
+      case ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_0_00:
+      case ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_05:
+      case ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_4_12:
+      {
+         nRetVal = ioctl_pci_configure_device(pDevDesc, arg, _IOC_SIZE(cmd)); /* size determines version */
+         if (0 != nRetVal)
+         {
+            ERR("ioctl ATEMSYS_IOCTL_PCI_CONF_DEVICE failed: %d\n", nRetVal);
+            goto Exit;
+         }
+      } break;
+
+      case ATEMSYS_IOCTL_PCI_RELEASE_DEVICE:
+      {
+         if (pDevDesc->pPcidev == NULL)
+         {
+            DBG("pci_release: No PCI device selected. Call ioctl(ATEMSYS_IOCTL_PCI_CONF_DEVICE) first\n");
+            goto Exit;
+         }
+         /* do nothing */
+         /* see device_release() -> dev_pci_release(pDevDesc)*/
+      } break;
+#endif
+      case ATEMSYS_IOCTL_INT_CONNECT:
+      {
+         nRetVal = ioctl_int_connect(pDevDesc, arg);
+         if (0 != nRetVal)
+         {
+            ERR("ioctl ATEMSYS_IOCTL_INT_CONNECT failed: %d\n", nRetVal);
+            goto Exit;
+         }
+      } break;
+
+      case ATEMSYS_IOCTL_INT_DISCONNECT:
+      {
+         nRetVal = dev_int_disconnect(pDevDesc);
+         if (0 != nRetVal)
+         {
+            /* be quiet. ioctl may fail */
+            goto Exit;
+         }
+      } break;
+
+      case ATEMSYS_IOCTL_INT_INFO:
+      {
+         nRetVal = ioctl_intinfo(pDevDesc, arg);
+         if (0 != nRetVal)
+         {
+            ERR("ioctl ATEMSYS_IOCTL_INT_INFO failed: %d\n", nRetVal);
+            goto Exit;
+         }
+      } break;
+
+      case ATEMSYS_IOCTL_MOD_GETVERSION:
+      {
+         __u32 dwVersion = USE_ATEMSYS_API_VERSION;
+
+#if (defined CONFIG_XENO_COBALT)
+         nRetVal = rtdm_safe_copy_to_user(fd, user_arg, &dwVersion, sizeof(__u32));
+#else
+         nRetVal = put_user(dwVersion, (__u32*)arg);
+#endif /* CONFIG_XENO_COBALT */
+
+         if (0 != nRetVal)
+         {
+            ERR("ioctl ATEMSYS_IOCTL_MOD_GETVERSION failed: %d\n", nRetVal);
+            goto Exit;
+         }
+      } break;
+
+      case ATEMSYS_IOCTL_MOD_SET_API_VERSION:
+      {
+         __u32 dwApiVersion = 0;
+
+#if (defined CONFIG_XENO_COBALT)
+         nRetVal = rtdm_safe_copy_from_user(fd, &dwApiVersion, user_arg, sizeof(__u32));
+#else
+         nRetVal = get_user(dwApiVersion, (__u32*)arg);
+#endif
+
+         /* activate supported features */
+         if (EC_ATEMSYSVERSION(1,4,15) <= dwApiVersion)
+         {
+            pDevDesc->bSupport64BitDma = true;
+         }
+
+         if (0 != nRetVal)
+         {
+            ERR("ioctl ATEMSYS_IOCTL_MOD_SETVERSION failed: %d\n", nRetVal);
+            goto Exit;
+         }
+      } break;
+#if ((defined CONFIG_SMP) && (LINUX_VERSION_CODE > KERNEL_VERSION(5,14,0)))
+      case ATEMSYS_IOCTL_INT_SET_CPU_AFFINITY:
+      {
+          nRetVal = SetIntCpuAffinityIoctl(pDevDesc, arg, _IOC_SIZE(cmd));
+          if (0 != nRetVal)
+          {
+              ERR("ioctl ATEMSYS_IOCTL_INT_SET_CPU_AFFINITY failed: %d\n", nRetVal);
+              goto Exit;
+          }
+      } break;
+#endif
+
+#if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+    case ATEMSYS_IOCTL_IOMEM_CMD:
+    {
+        nRetVal = IoMemCmd((void*)arg);
+        if (0 != nRetVal)
+        {
+            ERR("ioctl ATEMSYS_IOCTL_IOMEM_CMD failed: 0x%x\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+
+
+#ifdef CONFIG_TI_K3_UDMA
+    case ATEMSYS_IOCTL_CPSWG_CMD:
+    {
+        nRetVal = CpswgCmd((__u32*)arg, NULL);
+        if (0 != nRetVal)
+        {
+            ERR("ioctl ATEMSYS_IOCTL_CPSWG_CMD failed: 0x%x\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+#endif /*#ifdef CONFIG_TI_K3_UDMA*/
+
+    case ATEMSYS_IOCTL_GET_MAC_INFO:
+    {
+        nRetVal = GetMacInfoIoctl(pDevDesc, arg);
+        if (0 != nRetVal)
+        {
+            ERR("ioctl ATEMSYS_IOCTL_GET_MAC_INFO failed: 0x%x\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    case ATEMSYS_IOCTL_PHY_START_STOP:
+    {
+        nRetVal = PhyStartStopIoctl(arg);
+        if (0 != nRetVal)
+        {
+            ERR("ioctl ATEMSYS_IOCTL_PHY_START_STOP failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    case ATEMSYS_IOCTL_GET_MDIO_ORDER:
+    {
+        nRetVal = GetMdioOrderIoctl(arg);
+        if (0 != nRetVal)
+        {
+            ERR("ioctl ATEMSYS_IOCTL_GET_MDIO_ORDER failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    case ATEMSYS_IOCTL_RETURN_MDIO_ORDER:
+    {
+        nRetVal = ReturnMdioOrderIoctl(arg);
+        if (0 != nRetVal)
+        {
+            ERR("ioctl ATEMSYS_IOCTL_RETURN_MDIO_ORDER failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+    case ATEMSYS_IOCTL_GET_PHY_INFO:
+    {
+        nRetVal = GetPhyInfoIoctl(arg);
+        if (0 != nRetVal)
+        {
+            ERR("ioctl ATEMSYS_IOCTL_GET_PHY_INFO failed: %d\n", nRetVal);
+            goto Exit;
+        }
+      } break;
+    case ATEMSYS_IOCTL_PHY_RESET:
+    {
+        nRetVal = PhyResetIoctl(arg);
+        if (0 != nRetVal)
+        {
+            ERR("ioctl ATEMSYS_IOCTL_PHY_RESET failed: %d\n", nRetVal);
+            goto Exit;
+        }
+    } break;
+#endif /* INCLUDE_ATEMSYS_DT_DRIVER */
+
+      default:
+      {
+         nRetVal = -EOPNOTSUPP;
+         goto Exit;
+      } /* no break */
+   }
+
+   nRetVal = DRIVER_SUCCESS;
+
+Exit:
+   return nRetVal;
+}
+
+#if (defined CONFIG_COMPAT) && !(defined CONFIG_XENO_COBALT)
+/*
+ * ioctl processing for 32 bit process on 64 bit system
+ */
+static long atemsys_compat_ioctl(
+      struct file*  file,
+      unsigned int  cmd,
+      unsigned long arg)
+{
+   return atemsys_ioctl(file, cmd, (unsigned long) compat_ptr(arg));
+}
+#endif /* CONFIG_COMPAT && !CONFIG_XENO_COBALT */
+
+/* Module Declarations */
+
+/*
+ * This structure will hold the functions to be called
+ * when a process does something to the device we
+ * created. Since a pointer to this structure is kept in
+ * the devices table, it can't be local to
+ * module_init. NULL is for unimplemented functions.
+ */
+
+#if (defined CONFIG_XENO_COBALT)
+static struct rtdm_driver driver = {
+        .profile_info = RTDM_PROFILE_INFO(atemsys, RTDM_CLASS_EXPERIMENTAL, MAJOR_NUM, 1),
+        .device_flags = RTDM_NAMED_DEVICE,
+        .device_count = 1,
+        .context_size = sizeof(ATEMSYS_T_DEVICE_DESC),
+
+        .ops = {
+        .open = device_open,
+        .close = device_release,
+        .read_rt = device_read,
+        .ioctl_rt = atemsys_ioctl,
+        .ioctl_nrt = atemsys_ioctl,
+        .mmap = device_mmap,
+    },
+};
+
+static struct rtdm_device device = {
+        .driver = &driver,
+        .label = ATEMSYS_DEVICE_NAME,
+};
+#else /* !CONFIG_XENO_COBALT */
+struct file_operations Fops = {
+   .read = device_read,
+   .unlocked_ioctl = atemsys_ioctl,
+#if (defined CONFIG_COMPAT)
+   .compat_ioctl = atemsys_compat_ioctl, /* ioctl processing for 32 bit process on 64 bit system */
+#endif
+   .open = device_open,
+   .mmap = device_mmap,
+   .release = device_release,   /* a.k.a. close */
+};
+#endif /* !CONFIG_XENO_COBALT */
+
+
+#if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+static int GetMacInfoIoctl(ATEMSYS_T_DEVICE_DESC* pDevDesc, unsigned long ioctlParam)
+{
+    ATEMSYS_T_MAC_INFO oInfo;
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */
+    int nRetVal = -1;
+    unsigned int i = 0;
+
+    memset(&oInfo, 0, sizeof(ATEMSYS_T_MAC_INFO));
+    nRetVal = copy_from_user(&oInfo, (ATEMSYS_T_MAC_INFO *)ioctlParam, sizeof(ATEMSYS_T_MAC_INFO));
+    if (0 != nRetVal)
+    {
+        ERR("GetMacInfoIoctl failed: %d\n", nRetVal);
+        goto Exit;
+    }
+
+    for (i = 0; i < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; i++)
+    {
+        if (NULL == S_apDrvDescPrivate[i])
+        {
+            continue;
+        }
+        if ((0 == strcmp(S_apDrvDescPrivate[i]->MacInfo.szIdent, oInfo.szIdent)) &&
+            (S_apDrvDescPrivate[i]->MacInfo.dwInstance == oInfo.dwInstance))
+        {
+            pDrvDescPrivate = S_apDrvDescPrivate[i];
+            break;
+        }
+    }
+
+    if (NULL != pDrvDescPrivate)
+    {
+        if (pDrvDescPrivate->pDevDesc != NULL)
+        {
+            ERR("GetMacInfoIoctl: device \"%s\" in use by another instance?\n", pDrvDescPrivate->pPDev->name);
+            nRetVal = -EBUSY;
+            goto Exit;
+        }
+
+        oInfo.qwRegAddr            = pDrvDescPrivate->MacInfo.qwRegAddr;
+        oInfo.dwRegSize            = pDrvDescPrivate->MacInfo.dwRegSize;
+        oInfo.dwStatus             = pDrvDescPrivate->MacInfo.dwStatus;
+        oInfo.ePhyMode             = pDrvDescPrivate->MacInfo.ePhyMode;
+        oInfo.dwIndex              = pDrvDescPrivate->MacInfo.dwIndex;
+        oInfo.bNoMdioBus           = pDrvDescPrivate->MacInfo.bNoMdioBus;
+        oInfo.dwPhyAddr            = pDrvDescPrivate->MacInfo.dwPhyAddr;
+        oInfo.bPhyResetSupported   = pDrvDescPrivate->MacInfo.bPhyResetSupported;
+
+        /* save descriptor of callee for cleanup on device_release */
+        pDrvDescPrivate->pDevDesc = pDevDesc;
+
+        /* add driver's platfrom device to device descriptor of callee for memory mapping and allocation */
+        pDevDesc->pPlatformDev    = pDrvDescPrivate->pPDev;
+        pDevDesc->pDrvDesc        = pDrvDescPrivate;
+        dwRetVal = 0; /* EC_E_NOERROR */
+    }
+    else
+    {
+        dwRetVal = 0x9811000C; /* EC_E_NOTFOUND */
+    }
+
+    nRetVal = 0;
+Exit:
+    oInfo.dwErrorCode = dwRetVal;
+    nRetVal = copy_to_user((ATEMSYS_T_MAC_INFO *)ioctlParam, &oInfo, sizeof(ATEMSYS_T_MAC_INFO));
+    if (0 != nRetVal)
+    {
+        ERR("GetMacInfoIoctl failed: %d\n", nRetVal);
+    }
+    return nRetVal;
+}
+
+static int PhyStartStopIoctl( unsigned long ioctlParam)
+{
+    ATEMSYS_T_PHY_START_STOP_INFO oPhyStartStopInfo;
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */
+    int nRetVal = -1;
+    memset(&oPhyStartStopInfo, 0, sizeof(ATEMSYS_T_PHY_START_STOP_INFO));
+    nRetVal = copy_from_user(&oPhyStartStopInfo, (ATEMSYS_T_PHY_START_STOP_INFO *)ioctlParam, sizeof(ATEMSYS_T_PHY_START_STOP_INFO));
+    if (0 != nRetVal)
+    {
+        ERR("PhyStartStopIoctl failed: %d\n", nRetVal);
+        goto Exit;
+    }
+    if (oPhyStartStopInfo.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES)
+    {
+        dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */
+        nRetVal = 0;
+        goto Exit;
+    }
+    pDrvDescPrivate = S_apDrvDescPrivate[oPhyStartStopInfo.dwIndex];
+    if (NULL == pDrvDescPrivate)
+    {
+        dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/
+        nRetVal = 0;
+        goto Exit;
+    }
+    if (oPhyStartStopInfo.bStart)
+    {
+#if (defined CONFIG_XENO_COBALT)
+        mutex_lock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+        if (NULL == S_oAtemsysWorkerThreadDesc.pfNextTask)
+        {
+            S_oAtemsysWorkerThreadDesc.pfNextTask = StartPhyThread;
+            S_oAtemsysWorkerThreadDesc.pNextTaskData = (void*)pDrvDescPrivate->pPDev;
+        }
+        else
+        {
+            mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+            ERR("PhyStartStopIoctl: StartPhy failed! WorkerThread is busy!\n");
+            nRetVal = -EAGAIN;
+            goto Exit;
+        }
+        mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+#else
+        pDrvDescPrivate->etx_thread_StartPhy = kthread_create(StartPhyThread,(void*)pDrvDescPrivate->pPDev,"StartPhyThread");
+        if(NULL == pDrvDescPrivate->etx_thread_StartPhy)
+        {
+            ERR("PhyStartStopIoctl: Cannot create kthread for StartPhyThread\n");
+            nRetVal = -EAGAIN;
+            goto Exit;
+        }
+        wake_up_process(pDrvDescPrivate->etx_thread_StartPhy);
+#endif /*#if (defined CONFIG_XENO_COBALT)*/
+    }
+    else
+    {
+#if (defined CONFIG_XENO_COBALT)
+        mutex_lock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+        if (NULL == S_oAtemsysWorkerThreadDesc.pfNextTask)
+        {
+            S_oAtemsysWorkerThreadDesc.pfNextTask = StopPhyThread;
+            S_oAtemsysWorkerThreadDesc.pNextTaskData = (void*)pDrvDescPrivate->pPDev;
+        }
+        else
+        {
+            mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+            ERR("PhyStartStopIoctl: StopPhy failed! WorkerThread is busy!\n");
+            nRetVal = -EAGAIN;
+            goto Exit;
+        }
+        mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+#else
+        pDrvDescPrivate->etx_thread_StopPhy = kthread_create(StopPhyThread,(void*)pDrvDescPrivate->pPDev,"StopPhyThread");
+        if(NULL == pDrvDescPrivate->etx_thread_StopPhy)
+        {
+            ERR("PhyStartStopIoctl: Cannot create kthread for StopPhyThread\n");
+            nRetVal = -EAGAIN;
+            goto Exit;
+        }
+        wake_up_process(pDrvDescPrivate->etx_thread_StopPhy);
+#endif /* #if (defined CONFIG_XENO_COBALT) */
+    }
+    nRetVal = 0;
+    dwRetVal = 0; /* EC_E_NOERROR */
+Exit:
+    oPhyStartStopInfo.dwErrorCode = dwRetVal;
+
+    nRetVal = copy_to_user((ATEMSYS_T_PHY_START_STOP_INFO *)ioctlParam, &oPhyStartStopInfo, sizeof(ATEMSYS_T_PHY_START_STOP_INFO));
+    if (0 != nRetVal)
+    {
+        ERR("PhyStartStopIoctl failed: %d\n", nRetVal);
+    }
+    return nRetVal;
+}
+
+
+static int GetMdioOrderIoctl( unsigned long ioctlParam)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    ATEMSYS_T_MDIO_ORDER oOrder;
+    bool bLocked = false;
+    unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */
+    int nRetVal = -1;
+    memset(&oOrder, 0, sizeof(ATEMSYS_T_MDIO_ORDER));
+    nRetVal = copy_from_user(&oOrder, (ATEMSYS_T_MDIO_ORDER *)ioctlParam, sizeof(ATEMSYS_T_MDIO_ORDER));
+    if (0 != nRetVal)
+    {
+        ERR("GetMdioOrderIoctl failed: %d\n", nRetVal);
+        goto Exit;
+    }
+    if (oOrder.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES)
+    {
+        dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */
+        nRetVal = 0;
+        goto Exit;
+    }
+    pDrvDescPrivate = S_apDrvDescPrivate[oOrder.dwIndex];
+    if (NULL == pDrvDescPrivate)
+    {
+        dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/
+        nRetVal = 0;
+        goto Exit;
+    }
+
+    if (mutex_trylock(&pDrvDescPrivate->mdio_order_mutex))
+    {
+        bLocked = true;
+        if ((pDrvDescPrivate->MdioOrder.bInUse) && (pDrvDescPrivate->MdioOrder.bInUseByIoctl))
+        {
+            oOrder.bInUse        = pDrvDescPrivate->MdioOrder.bInUse;
+            oOrder.bInUseByIoctl = pDrvDescPrivate->MdioOrder.bInUseByIoctl;
+            oOrder.bWriteOrder   = pDrvDescPrivate->MdioOrder.bWriteOrder;
+            oOrder.wMdioAddr     = pDrvDescPrivate->MdioOrder.wMdioAddr;
+            oOrder.wReg          = pDrvDescPrivate->MdioOrder.wReg;
+            oOrder.wValue        = pDrvDescPrivate->MdioOrder.wValue;
+        }
+    }
+
+    dwRetVal = 0; /* EC_E_NOERROR*/
+    nRetVal = 0;
+Exit:
+    if (bLocked)
+    {
+        mutex_unlock(&pDrvDescPrivate->mdio_order_mutex);
+    }
+    oOrder.dwErrorCode = dwRetVal;
+    nRetVal = copy_to_user((ATEMSYS_T_MDIO_ORDER *)ioctlParam, &oOrder, sizeof(ATEMSYS_T_MDIO_ORDER));
+    if (0 != nRetVal)
+    {
+        ERR("GetMdioOrderIoctl failed: %d\n", nRetVal);
+    }
+    return nRetVal;
+}
+
+static int ReturnMdioOrderIoctl( unsigned long ioctlParam)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    ATEMSYS_T_MDIO_ORDER oOrder;
+    unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */
+    int nRetVal = -1;
+    memset(&oOrder, 0, sizeof(ATEMSYS_T_MDIO_ORDER));
+    nRetVal = copy_from_user(&oOrder, (ATEMSYS_T_MDIO_ORDER *)ioctlParam, sizeof(ATEMSYS_T_MDIO_ORDER));
+    if (0 != nRetVal)
+    {
+        ERR("ReturnMdioOrderIoctl failed: %d\n", nRetVal);
+        goto Exit;
+    }
+
+    if (oOrder.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES)
+    {
+        dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */
+        nRetVal = 0;
+        goto Exit;
+    }
+    pDrvDescPrivate = S_apDrvDescPrivate[oOrder.dwIndex];
+    if (NULL == pDrvDescPrivate)
+    {
+        dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/
+        nRetVal = 0;
+        goto Exit;
+    }
+
+    pDrvDescPrivate = S_apDrvDescPrivate[oOrder.dwIndex];
+    if (NULL == pDrvDescPrivate)
+    {
+        dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/
+        nRetVal = 0;
+        goto Exit;
+    }
+
+    mutex_lock(&pDrvDescPrivate->mdio_order_mutex);
+    pDrvDescPrivate->MdioOrder.wValue = oOrder.wValue;
+    pDrvDescPrivate->MdioOrder.bInUseByIoctl = false;
+    mutex_unlock(&pDrvDescPrivate->mdio_order_mutex);
+
+    /* wake MdioRead or MdioWrite */
+    pDrvDescPrivate->mdio_wait_queue_cnt = 1;
+    wake_up_interruptible(&pDrvDescPrivate->mdio_wait_queue);
+
+    dwRetVal = 0 /* EC_E_NOERROR*/;
+    nRetVal = 0;
+
+Exit:
+    oOrder.dwErrorCode = dwRetVal;
+    nRetVal = copy_to_user((ATEMSYS_T_MDIO_ORDER *)ioctlParam, &oOrder, sizeof(ATEMSYS_T_MDIO_ORDER));
+    if (0 != nRetVal)
+    {
+        ERR("ReturnMdioOrderIoctl failed: %d\n", nRetVal);
+    }
+    return nRetVal;
+}
+
+static int GetPhyInfoIoctl(unsigned long ioctlParam)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate  = NULL;
+    ATEMSYS_T_PHY_INFO oStatus;
+    unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */
+    int nRetVal = -1;
+    memset(&oStatus, 0, sizeof(ATEMSYS_T_PHY_INFO));
+    nRetVal = copy_from_user(&oStatus, (ATEMSYS_T_PHY_INFO *)ioctlParam, sizeof(ATEMSYS_T_PHY_INFO));
+    if (0 != nRetVal)
+    {
+        ERR("GetPhyInfoIoctl failed: %d\n", nRetVal);
+        goto Exit;
+    }
+
+    if (oStatus.dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES)
+    {
+        dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */
+        nRetVal = 0;
+        goto Exit;
+    }
+    pDrvDescPrivate = S_apDrvDescPrivate[oStatus.dwIndex];
+    if (NULL == pDrvDescPrivate)
+    {
+        dwRetVal = 0x9811000C; /* EC_E_NOTFOUND*/
+        nRetVal = 0;
+        goto Exit;
+    }
+
+    oStatus.dwLink    = pDrvDescPrivate->PhyInfo.dwLink;
+    oStatus.dwDuplex  = pDrvDescPrivate->PhyInfo.dwDuplex;
+    oStatus.dwSpeed   = pDrvDescPrivate->PhyInfo.dwSpeed;
+    oStatus.bPhyReady = pDrvDescPrivate->PhyInfo.bPhyReady;
+
+    dwRetVal = 0; /* EC_E_NOERROR */
+    nRetVal = 0;
+Exit:
+    oStatus.dwErrorCode = dwRetVal;
+    nRetVal = copy_to_user((ATEMSYS_T_PHY_INFO *)ioctlParam, &oStatus, sizeof(ATEMSYS_T_PHY_INFO));
+    if (0 != nRetVal)
+    {
+        ERR("GetPhyInfoIoctl failed: %d\n", nRetVal);
+    }
+    return nRetVal;
+}
+
+static int PhyResetIoctl(unsigned long ioctlParam)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate  = NULL;
+    unsigned int* pdwIoctlData = (__u32*)ioctlParam;
+    unsigned int dwIndex = 0;
+    unsigned int dwRetVal = 0x98110000; /* EC_E_ERROR */
+    int nRetVal = -1;
+    int nRes = -1;
+
+    nRes = get_user(dwIndex, pdwIoctlData);
+    if (0 != nRes) { nRetVal = nRes; goto Exit; }
+
+    if (dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES)
+    {
+        dwRetVal = 0x98110002; /* EC_E_INVALIDINDEX */
+        nRetVal = 0;
+        goto Exit;
+    }
+    pDrvDescPrivate = S_apDrvDescPrivate[dwIndex];
+    if (NULL == pDrvDescPrivate)
+    {
+        dwRetVal = 0x9811000C; /* EC_E_NOTFOUND */
+        nRetVal = 0;
+        goto Exit;
+    }
+
+    if (!pDrvDescPrivate->MacInfo.bPhyResetSupported)
+    {
+        DBG("PhyResetIoctl: PhyReset not supported\n");
+        dwRetVal = 0x98110001; /* EC_E_NOTSUPPORTED */
+        nRetVal = 0;
+        goto Exit;
+    }
+
+    nRes = ResetPhyViaGpio(pDrvDescPrivate);
+    if (0 != nRes)
+    {
+        dwRetVal = 0x98110000; /* EC_E_ERROR */
+        nRetVal = 0;
+        goto Exit;
+    }
+
+    dwRetVal = 0; /* EC_E_NOERROR */
+    nRetVal = 0;
+Exit:
+    put_user(dwRetVal, pdwIoctlData);
+
+    return nRetVal;
+}
+
+static void UpdatePhyInfoByLinuxPhyDriver(struct net_device* ndev)
+{
+    struct phy_device* phy_dev = ndev->phydev;
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(ndev);
+
+    if (LOGLEVEL_DEBUG <= loglevel)
+    {
+        phy_print_status(phy_dev);
+    }
+
+    pDrvDescPrivate->PhyInfo.dwLink = phy_dev->link;
+    pDrvDescPrivate->PhyInfo.dwDuplex = phy_dev->duplex;
+    pDrvDescPrivate->PhyInfo.dwSpeed = phy_dev->speed;
+    pDrvDescPrivate->PhyInfo.bPhyReady = true;
+}
+
+static int MdioProbe(struct net_device* ndev)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(ndev);
+    struct phy_device* pPhyDev = NULL;
+    char mdio_bus_id[MII_BUS_ID_SIZE];
+    char phy_name[MII_BUS_ID_SIZE + 3];
+    int nPhy_id = 0;
+
+    if (NULL != pDrvDescPrivate->pPhyNode)
+    {
+        pPhyDev = of_phy_connect(ndev, pDrvDescPrivate->pPhyNode,
+                     &UpdatePhyInfoByLinuxPhyDriver, 0,
+                     pDrvDescPrivate->PhyInterface);
+    }
+    else if (NULL != pDrvDescPrivate->pMdioNode)
+    {
+        struct platform_device* mdio;
+        mdio = of_find_device_by_node(pDrvDescPrivate->pMdioNode);
+        snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio->name, pDrvDescPrivate->MacInfo.dwPhyAddr);
+        pPhyDev = phy_connect(ndev, phy_name, &UpdatePhyInfoByLinuxPhyDriver, pDrvDescPrivate->PhyInterface);
+    }
+    else if (NULL != pDrvDescPrivate->pMdioBus)
+    {
+        int nDev_id = pDrvDescPrivate->nDev_id;
+        /* check for attached phy */
+        for (nPhy_id = 0; (nPhy_id < PHY_MAX_ADDR); nPhy_id++)
+        {
+            if (!mdiobus_is_registered_device(pDrvDescPrivate->pMdioBus, nPhy_id))
+            {
+                continue;
+            }
+            if (0 != nDev_id--)
+            {
+                continue;
+            }
+            strlcpy(mdio_bus_id, pDrvDescPrivate->pMdioBus->id, MII_BUS_ID_SIZE);
+            break;
+        }
+
+        if (nPhy_id >= PHY_MAX_ADDR)
+        {
+            INF("%s: no PHY, assuming direct connection to switch\n", pDrvDescPrivate->pPDev->name);
+            strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+            nPhy_id = 0;
+        }
+
+        snprintf(phy_name, sizeof(phy_name), PHY_ID_FMT, mdio_bus_id, nPhy_id);
+        pPhyDev = phy_connect(ndev, phy_name, &UpdatePhyInfoByLinuxPhyDriver, pDrvDescPrivate->PhyInterface);
+    }
+
+    if ((NULL == pPhyDev) || IS_ERR(pPhyDev))
+    {
+        ERR("%s: Could not attach to PHY (pPhyDev %p)\n", pDrvDescPrivate->pPDev->name, pPhyDev);
+        return -ENODEV;
+    }
+
+    /* adjust maximal link speed */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,0,0))
+    phy_set_max_speed(pPhyDev, 100);
+#else
+    pPhyDev->supported &= PHY_BASIC_FEATURES;
+    pPhyDev->advertising = pPhyDev->supported;
+#endif
+    if (LOGLEVEL_INFO <= loglevel)
+    {
+        phy_attached_info(pPhyDev);
+    }
+
+    pDrvDescPrivate->pPhyDev = pPhyDev;
+    pDrvDescPrivate->PhyInfo.dwLink = 0;
+    pDrvDescPrivate->PhyInfo.dwDuplex = 0;
+    pDrvDescPrivate->PhyInfo.dwSpeed = 0;
+
+    return 0;
+}
+
+static int MdioRead(struct mii_bus* pBus, int mii_id, int regnum)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = pBus->priv;
+    int nRetVal = -1;
+    int nRes = -1;
+
+    nRes = pm_runtime_get_sync(&pDrvDescPrivate->pPDev->dev);
+    if (0 > nRes)
+    {
+        return nRes;
+    }
+
+    /* get lock for the Mdio bus only one MdioRead or MdioWrite*/
+    mutex_lock(&pDrvDescPrivate->mdio_mutex);
+
+    mutex_lock(&pDrvDescPrivate->mdio_order_mutex);
+    memset(&pDrvDescPrivate->MdioOrder, 0, sizeof(ATEMSYS_T_MDIO_ORDER));
+    pDrvDescPrivate->MdioOrder.bInUse = true;
+    pDrvDescPrivate->MdioOrder.bInUseByIoctl = true;
+    pDrvDescPrivate->MdioOrder.bWriteOrder = false;
+    pDrvDescPrivate->MdioOrder.wMdioAddr = (__u16)mii_id;
+    pDrvDescPrivate->MdioOrder.wReg = (__u16)regnum;
+    mutex_unlock(&pDrvDescPrivate->mdio_order_mutex);
+
+    /* wait for result */
+    wait_event_interruptible(pDrvDescPrivate->mdio_wait_queue, pDrvDescPrivate->mdio_wait_queue_cnt != 0);
+    pDrvDescPrivate->mdio_wait_queue_cnt = pDrvDescPrivate->mdio_wait_queue_cnt - 1;
+
+    nRetVal = pDrvDescPrivate->MdioOrder.wValue;
+
+    mutex_lock(&pDrvDescPrivate->mdio_order_mutex);
+    pDrvDescPrivate->MdioOrder.bInUse = false;
+    pDrvDescPrivate->MdioOrder.bInUseByIoctl = false;
+    mutex_unlock(&pDrvDescPrivate->mdio_order_mutex);
+
+    pm_runtime_mark_last_busy(&pDrvDescPrivate->pPDev->dev);
+    pm_runtime_put_autosuspend(&pDrvDescPrivate->pPDev->dev);
+
+    mutex_unlock(&pDrvDescPrivate->mdio_mutex);
+
+    return nRetVal;
+}
+
+static int MdioWrite(struct mii_bus* pBus, int mii_id, int regnum, u16 value)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = pBus->priv;
+    int nRetVal;
+
+    nRetVal = pm_runtime_get_sync(&pDrvDescPrivate->pPDev->dev);
+    if (0 > nRetVal)
+    {
+        return nRetVal;
+    }
+
+    /* get lock for the Mdio bus only one MdioRead or MdioWrite*/
+    mutex_lock(&pDrvDescPrivate->mdio_mutex);
+
+    mutex_lock(&pDrvDescPrivate->mdio_order_mutex);
+    memset(&pDrvDescPrivate->MdioOrder, 0, sizeof(ATEMSYS_T_MDIO_ORDER));
+    pDrvDescPrivate->MdioOrder.bInUse = true;
+    pDrvDescPrivate->MdioOrder.bInUseByIoctl = true;
+    pDrvDescPrivate->MdioOrder.bWriteOrder = true;
+    pDrvDescPrivate->MdioOrder.wMdioAddr = (__u16)mii_id;
+    pDrvDescPrivate->MdioOrder.wReg = (__u16)regnum;
+    pDrvDescPrivate->MdioOrder.wValue = (__u16)value;
+    mutex_unlock(&pDrvDescPrivate->mdio_order_mutex);
+
+    /* wait for result */
+    wait_event_interruptible(pDrvDescPrivate->mdio_wait_queue, pDrvDescPrivate->mdio_wait_queue_cnt != 0);
+    pDrvDescPrivate->mdio_wait_queue_cnt = pDrvDescPrivate->mdio_wait_queue_cnt - 1;
+
+    nRetVal = 0;
+
+    mutex_lock(&pDrvDescPrivate->mdio_order_mutex);
+    pDrvDescPrivate->MdioOrder.bInUse = false;
+    pDrvDescPrivate->MdioOrder.bInUseByIoctl = false;
+    mutex_unlock(&pDrvDescPrivate->mdio_order_mutex);
+
+    pm_runtime_mark_last_busy(&pDrvDescPrivate->pPDev->dev);
+    pm_runtime_put_autosuspend(&pDrvDescPrivate->pPDev->dev);
+
+    mutex_unlock(&pDrvDescPrivate->mdio_mutex);
+
+    return nRetVal;
+}
+
+static int MdioInit(struct platform_device* pPDev)
+{
+    struct net_device* pNDev = platform_get_drvdata(pPDev);
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev);
+    int nRes = -ENXIO;
+
+    if (pDrvDescPrivate->MacInfo.bNoMdioBus)
+    {
+        pDrvDescPrivate->pMdioBus = NULL;
+        nRes = 0;
+        goto Exit;
+    }
+
+    pDrvDescPrivate->pMdioBus = mdiobus_alloc();
+    if (NULL == pDrvDescPrivate->pMdioBus)
+    {
+        nRes = -ENOMEM;
+        goto Exit;
+    }
+
+    pDrvDescPrivate->pMdioBus->name = "atemsys_mdio_bus";
+    pDrvDescPrivate->pMdioBus->read = &MdioRead;
+    pDrvDescPrivate->pMdioBus->write = &MdioWrite;
+    snprintf(pDrvDescPrivate->pMdioBus->id, MII_BUS_ID_SIZE, "%s-%x", pPDev->name, pDrvDescPrivate->nDev_id + 1);
+    pDrvDescPrivate->pMdioBus->priv = pDrvDescPrivate;
+    pDrvDescPrivate->pMdioBus->parent = &pPDev->dev;
+
+    if (NULL != pDrvDescPrivate->pMdioDevNode)
+    {
+        nRes = of_mdiobus_register(pDrvDescPrivate->pMdioBus, pDrvDescPrivate->pMdioDevNode);
+        of_node_put(pDrvDescPrivate->pMdioDevNode);
+    }
+    else
+    {
+        if (NULL == pDrvDescPrivate->pPhyNode)
+        {
+            nRes = mdiobus_register(pDrvDescPrivate->pMdioBus);
+        }
+        else
+        {
+            /* no Mdio sub-node use main node */
+            nRes = of_mdiobus_register(pDrvDescPrivate->pMdioBus, pDrvDescPrivate->pDevNode);
+        }
+    }
+    if (0 != nRes)
+    {
+        mdiobus_free(pDrvDescPrivate->pMdioBus);
+    }
+
+Exit:
+    return nRes;
+}
+
+
+static int StopPhy(struct platform_device* pPDev)
+{
+    struct net_device* pNDev = platform_get_drvdata(pPDev);
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev);
+
+    /* phy */
+    if (NULL != pDrvDescPrivate->pPhyDev)
+    {
+        phy_stop(pDrvDescPrivate->pPhyDev);
+        phy_disconnect(pDrvDescPrivate->pPhyDev);
+        pDrvDescPrivate->pPhyDev = NULL;
+    }
+
+    /* mdio bus */
+    if (NULL != pDrvDescPrivate->pMdioBus)
+    {
+        mdiobus_unregister(pDrvDescPrivate->pMdioBus);
+        mdiobus_free(pDrvDescPrivate->pMdioBus);
+        pDrvDescPrivate->pMdioBus = NULL;
+    }
+
+    pDrvDescPrivate->PhyInfo.bPhyReady = false;
+    pDrvDescPrivate->mdio_wait_queue_cnt = 0;
+
+    return 0;
+}
+
+static int StartPhy(struct platform_device* pPDev)
+{
+    struct net_device* pNDev = platform_get_drvdata(pPDev);
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev);
+    int nRes = -1;
+
+    if ((NULL != pDrvDescPrivate->pPhyDev) || (NULL != pDrvDescPrivate->pMdioBus))
+    {
+        StopPhy(pPDev);
+    }
+
+    /* mdio bus */
+    nRes = MdioInit(pPDev);
+    if (0 != nRes)
+    {
+        pDrvDescPrivate->pMdioBus = NULL;
+    }
+    nRes = MdioProbe(pNDev);
+    if (0 != nRes)
+    {
+        return nRes;
+    }
+    /* phy */
+    phy_start(pDrvDescPrivate->pPhyDev);
+    phy_start_aneg(pDrvDescPrivate->pPhyDev);
+
+    return 0;
+}
+
+static int StartPhyThread(void* data)
+{
+    struct platform_device* pPDev = (struct platform_device*)data;
+
+    StartPhy(pPDev);
+
+    return 0;
+}
+
+static int StopPhyThread(void* data)
+{
+    struct platform_device* pPDev = (struct platform_device*)data;
+
+    StopPhy(pPDev);
+
+    return 0;
+}
+
+static int StopPhyWithoutIoctlMdioHandling(struct platform_device* pPDev)
+{
+    struct net_device* pNDev = platform_get_drvdata(pPDev);
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev);
+
+    /* start StopPhy as thread */
+#if (defined CONFIG_XENO_COBALT)
+    mutex_lock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+    if (NULL == S_oAtemsysWorkerThreadDesc.pfNextTask)
+    {
+        S_oAtemsysWorkerThreadDesc.pfNextTask = StopPhyThread;
+        S_oAtemsysWorkerThreadDesc.pNextTaskData = (void*)pDrvDescPrivate->pPDev;
+    }
+    else
+    {
+        ERR("StopPhyWithoutIoctlMdioHandling failed! WorkerThread is busy!\n");
+        return -EAGAIN;
+    }
+    mutex_unlock(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+#else
+    pDrvDescPrivate->etx_thread_StopPhy = kthread_create(StopPhyThread,(void*)pDrvDescPrivate->pPDev,"StopPhyThread");
+    if(NULL == pDrvDescPrivate->etx_thread_StopPhy)
+    {
+        ERR("Cannot create kthread for StopPhyThread\n");
+        return -1;
+    }
+    wake_up_process(pDrvDescPrivate->etx_thread_StopPhy);
+#endif /* #if (defined CONFIG_XENO_COBALT) */
+
+    /* trigger event to continue MdioRead and MdioWrite */
+    /* MdioRead returns always 0 */
+    pDrvDescPrivate->mdio_wait_queue_cnt = 1000; // wait will be skipped 1000 times
+    wake_up_interruptible(&pDrvDescPrivate->mdio_wait_queue);
+
+    return 0;
+}
+
+static struct device_node* findDeviceTreeNode(struct platform_device* pPDev)
+{
+    int                    nTimeout;
+    unsigned int           dwRegAddr32;
+    long long unsigned int qwRegAddr64;
+    char                   aBuff[32] = {0};
+    struct device_node*    pDevNode;
+
+    pDevNode = NULL;
+    nTimeout = 100;
+    while(0 < nTimeout)
+    {
+        pDevNode = of_find_node_by_name(pDevNode, "ethernet");
+        if (NULL == pDevNode)
+            break;
+
+        of_property_read_u32(pDevNode, "reg", &dwRegAddr32);
+        of_property_read_u64(pDevNode, "reg", &qwRegAddr64);
+
+        sprintf(aBuff, "%x.ethernet", dwRegAddr32);
+        if (strcmp(pPDev->name, aBuff) == 0) break;
+
+        sprintf(aBuff, "%x.ethernet", (unsigned int)qwRegAddr64);
+        if (strcmp(pPDev->name, aBuff) == 0) break;
+
+        nTimeout--;
+    }
+    if (0 == nTimeout)
+        pDevNode = NULL;
+
+    return pDevNode;
+}
+
+static int ResetPhyViaGpio(ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate)
+{
+    int nRes = 0;
+
+    nRes = devm_gpio_request_one(&pDrvDescPrivate->pPDev->dev, pDrvDescPrivate->nPhyResetGpioPin,
+            pDrvDescPrivate->bPhyResetGpioActiveHigh ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+            "phy-reset");
+    if (nRes)
+    {
+        ERR("%s: failed to get atemsys-phy-reset-gpios: %d \n", pDrvDescPrivate->pPDev->name, nRes);
+        return nRes;
+    }
+
+    if (pDrvDescPrivate->nPhyResetDuration > 20)
+        msleep(pDrvDescPrivate->nPhyResetDuration);
+    else
+        usleep_range(pDrvDescPrivate->nPhyResetDuration * 1000, pDrvDescPrivate->nPhyResetDuration * 1000 + 1000);
+
+    gpio_set_value_cansleep(pDrvDescPrivate->nPhyResetGpioPin, !pDrvDescPrivate->bPhyResetGpioActiveHigh);
+
+#if (LINUX_VERSION_CODE <= KERNEL_VERSION(6,0,0))
+    devm_gpio_free(&pDrvDescPrivate->pPDev->dev, pDrvDescPrivate->nPhyResetGpioPin);
+#endif
+
+    if (!pDrvDescPrivate->nPhyResetPostDelay)
+        return 0;
+
+    if (pDrvDescPrivate->nPhyResetPostDelay > 20)
+        msleep(pDrvDescPrivate->nPhyResetPostDelay);
+    else
+        usleep_range(pDrvDescPrivate->nPhyResetPostDelay * 1000, pDrvDescPrivate->nPhyResetPostDelay * 1000 + 1000);
+
+    return 0;
+}
+
+static int EthernetDriverProbe(struct platform_device* pPDev)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    struct net_device* pNDev = NULL;
+    const struct of_device_id* pOf_id = NULL;
+    static int nDev_id = 0;
+    unsigned int dwIndex = 0;
+    int nRes = 0;
+    struct device_node* pDevNode = NULL;
+
+    INF("Atemsys: Probe device: %s\n", pPDev->name);
+
+    pDevNode = pPDev->dev.of_node;
+    if (NULL == pDevNode)
+    {
+        struct device_node* pDevNodeNew = NULL;
+        WRN("%s: Device node empty\n", pPDev->name);
+
+        pDevNodeNew = findDeviceTreeNode(pPDev);
+        if (NULL == pDevNodeNew)
+        {
+            ERR("%s: Device node not found\n", pPDev->name);
+            return -ENODATA;
+        }
+        else
+        {
+            pDevNode = pDevNodeNew;
+        }
+    }
+
+    /* Init network device */
+    pNDev = alloc_etherdev_mqs(sizeof(ATEMSYS_T_DRV_DESC_PRIVATE), 1 , 1); /* No TX and RX queues requiered */
+    if (NULL == pNDev)
+    {
+        return -ENOMEM;
+    }
+    SET_NETDEV_DEV(pNDev, &pPDev->dev);
+
+    /* setup board info structure */
+    pOf_id = of_match_device(atemsys_ids, &pPDev->dev);
+    if (NULL != pOf_id)
+    {
+        pPDev->id_entry = pOf_id->data;
+    }
+
+    pDrvDescPrivate = netdev_priv(pNDev);
+    memset(pDrvDescPrivate, 0, sizeof(ATEMSYS_T_DRV_DESC_PRIVATE));
+    pDrvDescPrivate->pPDev = pPDev;
+    pDrvDescPrivate->nDev_id  = nDev_id++;
+    platform_set_drvdata(pPDev, pNDev);
+    pDrvDescPrivate->netdev = pNDev;
+    pDrvDescPrivate->pDevNode = pDevNode;
+
+    /* Select default pin state */
+    pinctrl_pm_select_default_state(&pPDev->dev);
+
+    /* enable clock */
+    pDrvDescPrivate->nCountClk = of_property_count_strings(pDevNode,"clock-names");
+    if (0 > pDrvDescPrivate->nCountClk)
+    {
+        pDrvDescPrivate->nCountClk = 0;
+    }
+    DBG("%s: found %d Clocks\n", pPDev->name , pDrvDescPrivate->nCountClk);
+
+    for (dwIndex = 0; dwIndex < pDrvDescPrivate->nCountClk; dwIndex++)
+    {
+        if(!of_property_read_string_index(pDevNode, "clock-names", dwIndex, &pDrvDescPrivate->clk_ids[dwIndex]))
+        {
+            pDrvDescPrivate->clks[dwIndex] = devm_clk_get(&pPDev->dev, pDrvDescPrivate->clk_ids[dwIndex]);
+            if (!IS_ERR(pDrvDescPrivate->clks[dwIndex]))
+            {
+                clk_prepare_enable(pDrvDescPrivate->clks[dwIndex]);
+                DBG("%s: Clock %s enabled\n", pPDev->name, pDrvDescPrivate->clk_ids[dwIndex]);
+            }
+            else
+            {
+                pDrvDescPrivate->clks[dwIndex] = NULL;
+            }
+        }
+    }
+
+    /* enable PHY regulator*/
+    pDrvDescPrivate->pPhyRegulator = devm_regulator_get(&pPDev->dev, "phy");
+    if (!IS_ERR(pDrvDescPrivate->pPhyRegulator))
+    {
+        if (regulator_enable(pDrvDescPrivate->pPhyRegulator))
+        {
+            WRN("%s: can't enable PHY regulator!\n", pPDev->name);
+        }
+    }
+    else
+    {
+        pDrvDescPrivate->pPhyRegulator = NULL;
+    }
+
+    /* Device run-time power management */
+    pm_runtime_dont_use_autosuspend(&pPDev->dev);
+    pm_runtime_get_noresume(&pPDev->dev);
+    pm_runtime_set_active(&pPDev->dev);
+    pm_runtime_enable(&pPDev->dev);
+
+    /* resets */
+    {
+        struct reset_control*   pResetCtl;
+        const char*             szTempString = NULL;
+
+        nRes = of_property_read_string(pDevNode, "reset-names", &szTempString);
+        pResetCtl = devm_reset_control_get_optional(&pPDev->dev, szTempString);
+        if (NULL != pResetCtl)
+        {
+            nRes = reset_control_assert(pResetCtl);
+            reset_control_deassert(pResetCtl);
+
+            /* Some reset controllers have only reset callback instead of
+             * assert + deassert callbacks pair.
+             */
+            if (-ENOTSUPP == nRes)
+            {
+                reset_control_reset(pResetCtl);
+                pDrvDescPrivate->pResetCtl = pResetCtl;
+            }
+        }
+    }
+
+    /* get prepare data for atemsys and print some data to kernel log */
+    {
+        unsigned int    dwTemp          = 0;
+        const char*     szTempString    = NULL;
+        unsigned int    adwTempValues[6];
+
+        /* get identification */
+        nRes = of_property_read_string(pDevNode, "atemsys-Ident", &szTempString);
+        if ((0 == nRes) && (NULL != szTempString))
+        {
+            INF("%s: atemsys-Ident: %s\n", pPDev->name, szTempString);
+            memcpy(pDrvDescPrivate->MacInfo.szIdent,szTempString, EC_LINKOS_IDENT_MAX_LEN);
+        }
+        else
+        {
+            INF("%s: Missing atemsys-Ident in the Device Tree\n", pPDev->name);
+        }
+
+        /* get instance number */
+        nRes = of_property_read_u32(pDevNode, "atemsys-Instance", &dwTemp);
+        if (0 == nRes)
+        {
+            INF("%s: atemsys-Instance: %d\n", pPDev->name , dwTemp);
+            pDrvDescPrivate->MacInfo.dwInstance = dwTemp;
+        }
+        else
+        {
+            pDrvDescPrivate->MacInfo.dwInstance = 0;
+            INF("%s: Missing atemsys-Instance in the Device Tree\n", pPDev->name);
+        }
+
+        /* status */
+        szTempString = NULL;
+        nRes = of_property_read_string(pDevNode, "status", &szTempString);
+        if ((0 == nRes) && (NULL != szTempString))
+        {
+            DBG("%s: status: %s\n", pPDev->name , szTempString);
+            pDrvDescPrivate->MacInfo.dwStatus = (strcmp(szTempString, "okay")==0)? 1:0;
+        }
+
+        /* interrupt-parent */
+        nRes = of_property_read_u32(pDevNode, "interrupt-parent", &dwTemp);
+        if (0 == nRes)
+        {
+            DBG("%s: interrupt-parent: %d\n", pPDev->name , dwTemp);
+        }
+
+        /* interrupts */
+        nRes = of_property_read_u32_array(pDevNode, "interrupts", adwTempValues, 6);
+        if (0 == nRes)
+        {
+            DBG("%s: interrupts: 0x%x 0x%x 0x%x 0x%x 0x%x 0x%x\n", pPDev->name ,
+                adwTempValues[0], adwTempValues[1], adwTempValues[2], adwTempValues[3], adwTempValues[4], adwTempValues[5]);
+        }
+
+        /* reg */
+#if (defined __arm__)
+        nRes = of_property_read_u32_array(pDevNode, "reg", adwTempValues, 2);
+        if (0 == nRes)
+        {
+            DBG("%s: reg: 0x%x 0x%x\n", pPDev->name , adwTempValues[0], adwTempValues[1]);
+            pDrvDescPrivate->MacInfo.qwRegAddr = adwTempValues[0];
+            pDrvDescPrivate->MacInfo.dwRegSize = adwTempValues[1];
+        }
+#endif
+
+        /* get phy-mode */
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0))
+        nRes = of_get_phy_mode(pPDev->dev.of_node, &pDrvDescPrivate->PhyInterface);
+        if ((strcmp(pDrvDescPrivate->MacInfo.szIdent, "CPSWG") == 0) && (0==pDrvDescPrivate->PhyInterface))
+        {
+            struct device_node* pDevNodeNew = pDevNode;
+            pDevNodeNew = of_get_child_by_name(pDevNodeNew, "ethernet-ports");
+            pDevNodeNew = of_get_child_by_name(pDevNodeNew, "port");
+            nRes = of_get_phy_mode(pDevNodeNew, &pDrvDescPrivate->PhyInterface);
+        }
+#else
+        pDrvDescPrivate->PhyInterface = of_get_phy_mode(pPDev->dev.of_node);
+#endif
+        switch (pDrvDescPrivate->PhyInterface)
+        {
+            case PHY_INTERFACE_MODE_MII:
+            {
+                INF("%s: phy-mode: MII\n", pPDev->name);
+                pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_MII;
+            } break;
+            case PHY_INTERFACE_MODE_RMII:
+            {
+                INF("%s: phy-mode: RMII\n", pPDev->name);
+                pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_RMII;
+            } break;
+            case PHY_INTERFACE_MODE_GMII:
+            {
+                INF("%s: phy-mode: GMII\n", pPDev->name);
+                pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_GMII;
+            } break;
+            case PHY_INTERFACE_MODE_SGMII:
+            {
+                INF("%s: phy-mode: SGMII\n", pPDev->name);
+                pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_SGMII;
+            } break;
+            case PHY_INTERFACE_MODE_RGMII_ID:
+            case PHY_INTERFACE_MODE_RGMII_RXID:
+            case PHY_INTERFACE_MODE_RGMII_TXID:
+            case PHY_INTERFACE_MODE_RGMII:
+            {
+                INF("%s: phy-mode: RGMII\n", pPDev->name);
+                pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_RGMII;
+            } break;
+            default:
+            {
+                pDrvDescPrivate->MacInfo.ePhyMode = eATEMSYS_PHY_RGMII;
+                pDrvDescPrivate->PhyInterface = PHY_INTERFACE_MODE_RGMII;
+                WRN("%s: Missing phy-mode in the Device Tree, using RGMII\n", pPDev->name);
+            }
+        }
+
+        /* pinctrl-names */
+        szTempString = NULL;
+        nRes = of_property_read_string(pDevNode, "pinctrl-names", &szTempString);
+        if ((0 == nRes) && (NULL != szTempString))
+        {
+            DBG("%s: pinctrl-names: %s\n", pPDev->name , szTempString);
+        }
+
+        /* PHY address*/
+        pDrvDescPrivate->MacInfo.dwPhyAddr = PHY_AUTO_ADDR;
+        pDrvDescPrivate->pPhyNode = of_parse_phandle(pDevNode, "phy-handle", 0);
+        if ((strcmp(pDrvDescPrivate->MacInfo.szIdent, "CPSWG") == 0) && (NULL == pDrvDescPrivate->pPhyNode))
+        {
+            struct device_node* pDevNodeNew = pDevNode;
+            pDevNodeNew = of_get_child_by_name(pDevNodeNew, "ethernet-ports");
+            pDevNodeNew = of_get_child_by_name(pDevNodeNew, "port");
+            pDrvDescPrivate->pPhyNode = of_parse_phandle(pDevNodeNew, "phy-handle", 0);
+        }
+        if (NULL != pDrvDescPrivate->pPhyNode)
+        {
+            nRes = of_property_read_u32(pDrvDescPrivate->pPhyNode, "reg", &dwTemp);
+            if (0 == nRes)
+            {
+                INF("%s: PHY mdio addr: %d\n", pPDev->name , dwTemp);
+                pDrvDescPrivate->MacInfo.dwPhyAddr = dwTemp;
+            }
+        }
+        else
+        {
+            int nLen;
+            const __be32* pPhyId;
+            pPhyId = of_get_property(pDevNode, "phy_id", &nLen);
+
+            if (nLen == (sizeof(__be32) * 2))
+            {
+                pDrvDescPrivate->pMdioNode = of_find_node_by_phandle(be32_to_cpup(pPhyId));
+                pDrvDescPrivate->MacInfo.dwPhyAddr = be32_to_cpup(pPhyId+1);
+            }
+            else
+            {
+                INF("%s: Missing phy-handle in the Device Tree\n", pPDev->name);
+            }
+        }
+
+        /* check if mdio node is sub-node and mac has own mdio bus */
+        {
+            pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "mdio");
+            if (NULL == pDrvDescPrivate->pMdioDevNode)
+                pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "mdio0");
+            if (NULL == pDrvDescPrivate->pMdioDevNode)
+                pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "mdio1");
+            if (NULL == pDrvDescPrivate->pMdioDevNode)
+                pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "phy");
+            if (NULL == pDrvDescPrivate->pMdioDevNode)
+                pDrvDescPrivate->pMdioDevNode = of_get_child_by_name(pDevNode, "ethernet-phy");
+
+            if ((NULL == pDrvDescPrivate->pMdioDevNode) && (NULL != pDrvDescPrivate->pPhyNode))
+            {
+                /* check if phy node is subnode and us first sub-node as node for mdio bus */
+                struct device_node *pTempNode = of_get_parent(pDrvDescPrivate->pPhyNode);
+                if ((NULL != pTempNode) && (pTempNode == pDevNode))
+                {
+                    pDrvDescPrivate->pMdioDevNode = pDrvDescPrivate->pPhyNode;
+                }
+                else if ((NULL != pTempNode) && (of_get_parent(pTempNode) == pDevNode))
+                {
+                    pDrvDescPrivate->pMdioDevNode = pTempNode;
+                }
+            }
+
+            if (NULL != pDrvDescPrivate->pMdioDevNode)
+            {
+                /* mdio bus is owned by current mac instance */
+                pDrvDescPrivate->MacInfo.bNoMdioBus = false;
+                INF("%s: mac has mdio bus.\n", pPDev->name );
+            }
+            else if ((NULL != pDrvDescPrivate->pPhyNode) || (NULL != pDrvDescPrivate->pMdioNode))
+            {
+                /* mdio bus owned by another mac instance */
+                pDrvDescPrivate->MacInfo.bNoMdioBus = true;
+                INF("%s: mac has no mdio bus, uses mdio bus of other instance.\n", pPDev->name);
+            }
+            else
+            {
+                /* legacy mode: no node for mdio bus in device tree defined */
+                pDrvDescPrivate->MacInfo.bNoMdioBus = false;
+                INF("%s: handle mdio bus without device tree node.\n", pPDev->name );
+            }
+        }
+
+        /* PHY reset data */
+        nRes = of_property_read_u32(pDevNode, "atemsys-phy-reset-duration", &pDrvDescPrivate->nPhyResetDuration);
+        if (nRes) pDrvDescPrivate->nPhyResetDuration = 0;
+        pDrvDescPrivate->nPhyResetGpioPin = of_get_named_gpio(pDevNode, "atemsys-phy-reset-gpios", 0);
+        nRes = of_property_read_u32(pDevNode, "atemsys-phy-reset-post-delay", &pDrvDescPrivate->nPhyResetPostDelay);
+        if (nRes) pDrvDescPrivate->nPhyResetPostDelay = 0;
+        pDrvDescPrivate->bPhyResetGpioActiveHigh = of_property_read_bool(pDevNode, "atemsys-phy-reset-active-high");
+
+        if ((0 != pDrvDescPrivate->nPhyResetDuration) && (pDrvDescPrivate->nPhyResetGpioPin != -EPROBE_DEFER)
+                && gpio_is_valid(pDrvDescPrivate->nPhyResetGpioPin))
+        {
+            pDrvDescPrivate->MacInfo.bPhyResetSupported = true;
+            DBG("%s: PhyReset ready: GpioPin: %d; Duration %d, bActiveHigh %d, post delay %d\n", pPDev->name,
+                pDrvDescPrivate->nPhyResetGpioPin, pDrvDescPrivate->nPhyResetDuration,
+                pDrvDescPrivate->bPhyResetGpioActiveHigh, pDrvDescPrivate->nPhyResetPostDelay);
+        }
+    }
+
+    /* insert device to array */
+    for (dwIndex = 0; dwIndex < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; dwIndex++)
+    {
+        if (NULL == S_apDrvDescPrivate[dwIndex])
+        {
+            S_apDrvDescPrivate[dwIndex] = pDrvDescPrivate;
+            pDrvDescPrivate->MacInfo.dwIndex =  dwIndex;
+            break;
+        }
+    }
+    if (dwIndex >= ATEMSYS_MAX_NUMBER_DRV_INSTANCES)
+    {
+        ERR("%s: Maximum number of instances exceeded!\n", pPDev->name);
+        return EthernetDriverRemove(pPDev);
+    }
+
+    /* start drivers of sub-nodes */
+    if (strcmp(pDrvDescPrivate->MacInfo.szIdent, "CPSW") == 0
+       || strcmp(pDrvDescPrivate->MacInfo.szIdent, "ICSS") == 0)
+    {
+        of_platform_populate(pDevNode, NULL, NULL, &pPDev->dev);
+        DBG("%s: start drivers of sub-nodes.\n", pPDev->name );
+    }
+    if (strcmp(pDrvDescPrivate->MacInfo.szIdent, "CPSWG") == 0)
+    {
+        /* in subnode "ethernet-ports" start driver for "port@2" */
+        struct device_node* pDevNodeNew = pDevNode;
+        pDevNodeNew = of_get_child_by_name(pDevNodeNew, "ethernet-ports");
+        of_platform_populate(pDevNodeNew, NULL, NULL, &pPDev->dev);
+        DBG("%s: start drivers of sub-nodes.\n", pPDev->name );
+    }
+
+    /* prepare mutex for mdio */
+    mutex_init(&pDrvDescPrivate->mdio_mutex);
+    mutex_init(&pDrvDescPrivate->mdio_order_mutex);
+    init_waitqueue_head(&pDrvDescPrivate->mdio_wait_queue);
+    pDrvDescPrivate->mdio_wait_queue_cnt = 0;
+
+    return 0;
+}
+
+
+static int EthernetDriverRemove(struct platform_device* pPDev)
+{
+    struct net_device* pNDev = platform_get_drvdata(pPDev);
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = netdev_priv(pNDev);
+    unsigned int i = 0;
+
+    if ((NULL != pDrvDescPrivate->pPhyDev) || (NULL != pDrvDescPrivate->pMdioBus))
+    {
+        ERR("%s: EthernetDriverRemove: PHY driver is still active!\n", pPDev->name);
+    }
+
+    if (NULL != pDrvDescPrivate->pPhyRegulator)
+    {
+        regulator_disable(pDrvDescPrivate->pPhyRegulator);
+    }
+
+    /* Decrement refcount */
+    of_node_put(pDrvDescPrivate->pPhyNode);
+
+    pm_runtime_put(&pPDev->dev);
+    pm_runtime_disable(&pPDev->dev);
+
+    /* resets */
+    if (NULL != pDrvDescPrivate->pResetCtl)
+    {
+        reset_control_assert(pDrvDescPrivate->pResetCtl);
+    }
+    for (i = 0; i < ATEMSYS_MAX_NUMBER_OF_CLOCKS; i++)
+    {
+        if (NULL != pDrvDescPrivate->clk_ids[i])
+        {
+            clk_disable_unprepare(pDrvDescPrivate->clks[i]);
+            DBG("%s: Clock %s unprepared\n", pPDev->name, pDrvDescPrivate->clk_ids[i]);
+        }
+    }
+    mutex_destroy(&pDrvDescPrivate->mdio_mutex);
+    mutex_destroy(&pDrvDescPrivate->mdio_order_mutex);
+
+    pinctrl_pm_select_sleep_state(&pPDev->dev);
+
+    free_netdev(pNDev);
+
+    INF("%s: atemsys driver removed: %s Instance %d\n", pPDev->name, pDrvDescPrivate->MacInfo.szIdent, pDrvDescPrivate->MacInfo.dwInstance);
+
+    S_apDrvDescPrivate[pDrvDescPrivate->MacInfo.dwIndex] = NULL;
+
+    if (NULL != pDrvDescPrivate->pDevDesc)
+    {
+        pDrvDescPrivate->pDevDesc->pPlatformDev = NULL;
+        pDrvDescPrivate->pDevDesc->pDrvDesc     = NULL;
+        pDrvDescPrivate->pDevDesc               = NULL;
+    }
+
+    return 0;
+}
+
+static int CleanUpEthernetDriverOnRelease(ATEMSYS_T_DEVICE_DESC* pDevDesc)
+{
+    ATEMSYS_T_DRV_DESC_PRIVATE* pDrvDescPrivate = NULL;
+    int nRes = -1;
+    unsigned int i = 0;
+
+    if (pDevDesc == NULL)
+    {
+        return 0;
+    }
+
+    for (i = 0; i < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; i++)
+    {
+
+        pDrvDescPrivate = S_apDrvDescPrivate[i];
+        if (NULL == pDrvDescPrivate)
+        {
+            continue;
+        }
+
+        if (pDrvDescPrivate->pDevDesc == pDevDesc)
+        {
+            INF("%s: Cleanup: pDevDesc = 0x%px\n", pDrvDescPrivate->pPDev->name, pDevDesc);
+
+            /* ensure mdio bus and PHY are down */
+            if ((NULL != pDrvDescPrivate->pPhyDev) || (NULL != pDrvDescPrivate->pMdioBus))
+            {
+                int timeout = 0;
+                for (timeout = 50; timeout-- < 0; msleep(100))
+                {
+                    nRes = StopPhyWithoutIoctlMdioHandling(pDrvDescPrivate->pPDev);
+                    if (-EAGAIN != nRes)
+                        break;
+                }
+            }
+            /* clean descriptor */
+            pDrvDescPrivate->pDevDesc = NULL;
+            pDevDesc->pPlatformDev    = NULL;
+            pDevDesc->pDrvDesc        = NULL;
+        }
+    }
+
+    return 0;
+}
+
+static struct platform_device_id mac_devtype[] = {
+    {
+        .name = ATEMSYS_DT_DRIVER_NAME,
+        .driver_data = 0,
+    }, {
+        /* sentinel */
+    }
+};
+
+
+MODULE_DEVICE_TABLE(platform, mac_devtype);
+
+static struct platform_driver mac_driver = {
+    .driver    = {
+        .name           = ATEMSYS_DT_DRIVER_NAME,
+        .of_match_table = atemsys_ids,
+    },
+    .id_table  = mac_devtype,
+    .probe     = EthernetDriverProbe,
+    .remove    = EthernetDriverRemove,
+};
+#endif /* INCLUDE_ATEMSYS_DT_DRIVER */
+
+
+#if (defined INCLUDE_ATEMSYS_PCI_DRIVER)
+#define ATEMSYS_PCI_DRIVER_NAME "atemsys_pci"
+#define PCI_VENDOR_ID_BECKHOFF  0x15EC
+
+static void PciDriverRemove(struct pci_dev* pPciDev)
+{
+    ATEMSYS_T_PCI_DRV_DESC_PRIVATE* pPciDrvDescPrivate = (ATEMSYS_T_PCI_DRV_DESC_PRIVATE*)pci_get_drvdata(pPciDev);
+
+    if (NULL != pPciDrvDescPrivate)
+    {
+        /* remove references to the device */
+        if (NULL != pPciDrvDescPrivate->pDevDesc)
+        {
+            pPciDrvDescPrivate->pDevDesc->pPcidev = NULL;
+            pPciDrvDescPrivate->pDevDesc->pPciDrvDesc = NULL;
+            pPciDrvDescPrivate->pDevDesc = NULL;
+        }
+        S_apPciDrvDescPrivate[pPciDrvDescPrivate->dwIndex] = NULL;
+
+        kfree(pPciDrvDescPrivate);
+    }
+
+    /* disable device */
+    pci_disable_msi(pPciDev);
+    pci_release_regions(pPciDev);
+    pci_disable_pcie_error_reporting(pPciDev);
+    pci_disable_device(pPciDev);
+
+    INF("%s: %s: disconnected\n", pci_name(pPciDev), ATEMSYS_PCI_DRIVER_NAME);
+}
+
+static int PciDriverProbe(struct pci_dev* pPciDev, const struct pci_device_id* id)
+{
+    ATEMSYS_T_PCI_DRV_DESC_PRIVATE* pPciDrvDescPrivate = NULL;
+    int nRes = -ENODEV;
+    int dwIndex = 0;
+
+    /* check if is wanted pci device */
+    if ((strcmp(AllowedPciDevices, "PCI_ANY_ID") != 0) && (strstr(AllowedPciDevices, pci_name(pPciDev)) == NULL))
+    {
+        /* don't attach driver */
+        DBG("%s: PciDriverProbe: restricted by user parameters!\n", pci_name(pPciDev));
+
+        return -ENODEV; /* error code doesn't create error message */
+    }
+
+    /* setup pci device */
+    nRes = pci_enable_device_mem(pPciDev);
+    if (nRes)
+    {
+        ERR("%s: PciDriverProbe: pci_enable_device_mem failed!\n", pci_name(pPciDev));
+        goto Exit;
+    }
+
+    nRes = DefaultPciSettings(pPciDev);
+    if (nRes)
+    {
+        ERR("%s: PciDriverProbe: DefaultPciSettings failed\n", pci_name(pPciDev));
+        goto Exit;
+    }
+    pci_save_state(pPciDev);
+    pci_enable_pcie_error_reporting(pPciDev);
+    nRes = pci_request_regions(pPciDev, ATEMSYS_DEVICE_NAME);
+    if (nRes < 0)
+    {
+        ERR("%s: PciDriverProbe: device in use by another driver?\n", pci_name(pPciDev));
+        nRes = -EBUSY;
+        goto Exit;
+    }
+
+    /* create private desc */
+    pPciDrvDescPrivate = (ATEMSYS_T_PCI_DRV_DESC_PRIVATE*)kzalloc(sizeof(ATEMSYS_T_PCI_DRV_DESC_PRIVATE), GFP_KERNEL);
+    if (NULL == pPciDrvDescPrivate)
+    {
+        nRes = -ENOMEM;
+        goto Exit;
+    }
+    pPciDrvDescPrivate->pPciDev = pPciDev;
+
+    /* get Pci Info */
+    pPciDrvDescPrivate->wVendorId         = pPciDev->vendor;
+    pPciDrvDescPrivate->wDevice           = pPciDev->device;
+    pPciDrvDescPrivate->wRevision         = pPciDev->revision;
+    pPciDrvDescPrivate->wSubsystem_vendor = pPciDev->subsystem_vendor;
+    pPciDrvDescPrivate->wSubsystem_device = pPciDev->subsystem_device;
+    pPciDrvDescPrivate->nPciBus           = pPciDev->bus->number;
+    pPciDrvDescPrivate->nPciDomain        = pci_domain_nr(pPciDev->bus);
+    pPciDrvDescPrivate->nPciDev           = PCI_SLOT(pPciDev->devfn);
+    pPciDrvDescPrivate->nPciFun           = PCI_FUNC(pPciDev->devfn);
+
+    INF("%s: %s: connected vendor:0x%04x device:0x%04x rev:0x%02x - sub_vendor:0x%04x sub_device:0x%04x\n", pci_name(pPciDev), ATEMSYS_PCI_DRIVER_NAME,
+            pPciDev->vendor, pPciDev->device, pPciDev->revision,
+            pPciDev->subsystem_vendor, pPciDev->subsystem_device);
+
+    /* find the memory BAR */
+    {
+       unsigned long ioBase  = 0;
+       unsigned int  dwIOLen = 0;
+       int i    = 0;
+       int nBar = 0;
+
+       for (i = 0; i < ATEMSYS_PCI_MAXBAR ; i++)
+       {
+          if (pci_resource_flags(pPciDev, i) & IORESOURCE_MEM)
+          {
+             /* IO area address */
+             ioBase = pci_resource_start(pPciDev, i);
+             pPciDrvDescPrivate->aBars[nBar].qwIOMem = ioBase;
+
+             /* IO area length */
+             dwIOLen = pci_resource_len(pPciDev, i);
+             pPciDrvDescPrivate->aBars[nBar].dwIOLen = dwIOLen;
+
+             nBar++;
+          }
+       }
+
+       if (nBar == 0)
+       {
+          WRN("%s: PciDriverProbe: No memory BAR found\n", pci_name(pPciDev));
+       }
+
+       pPciDrvDescPrivate->nBarCnt = nBar;
+    }
+
+    /* insert device to array */
+    for (dwIndex = 0; dwIndex < ATEMSYS_MAX_NUMBER_DRV_INSTANCES; dwIndex++)
+    {
+        if (NULL == S_apPciDrvDescPrivate[dwIndex])
+        {
+            S_apPciDrvDescPrivate[dwIndex] = pPciDrvDescPrivate;
+            pPciDrvDescPrivate->dwIndex =  dwIndex;
+            break;
+        }
+    }
+    if (ATEMSYS_MAX_NUMBER_DRV_INSTANCES <= dwIndex)
+    {
+        ERR("%s: PciDriverProbe: insert device to array failed\n", pci_name(pPciDev));
+        nRes = -EBUSY;
+        goto Exit;
+    }
+
+    pci_set_drvdata(pPciDev, pPciDrvDescPrivate);
+
+    nRes = 0; /* OK */
+Exit:
+    if (nRes != 0 /* OK */)
+    {
+        if (NULL != pPciDrvDescPrivate)
+        {
+            kfree(pPciDrvDescPrivate);
+        }
+    }
+    return nRes;
+}
+
+typedef struct _ATEMSYS_PCI_INFO {
+} ATEMSYS_PCI_INFO;
+
+static const struct _ATEMSYS_PCI_INFO oAtemsysPciInfo = {
+};
+
+
+static const struct pci_device_id pci_devtype[] = {
+    {
+    /* all devices of class PCI_CLASS_NETWORK_ETHERNET */
+    .vendor      = PCI_ANY_ID,
+    .device      = PCI_ANY_ID,
+    .subvendor   = PCI_ANY_ID,
+    .subdevice   = PCI_ANY_ID,
+    .class       = (PCI_CLASS_NETWORK_ETHERNET << 8),
+    .class_mask  = (0xFFFF00),
+    .driver_data = (kernel_ulong_t)&oAtemsysPciInfo
+    },
+    {
+     /* all devices with BECKHOFF vendor id */
+    .vendor      = PCI_VENDOR_ID_BECKHOFF,
+    .device      = PCI_ANY_ID,
+    .subvendor   = PCI_ANY_ID,
+    .subdevice   = PCI_ANY_ID,
+    .driver_data = (kernel_ulong_t)&oAtemsysPciInfo
+    },
+    {}
+};
+
+MODULE_DEVICE_TABLE(pci, pci_devtype);
+static struct pci_driver oPciDriver = {
+    .name     = ATEMSYS_PCI_DRIVER_NAME,
+    .id_table = pci_devtype,
+    .probe    = PciDriverProbe,
+    .remove   = PciDriverRemove,
+};
+
+#endif /* (defined INCLUDE_ATEMSYS_PCI_DRIVER) */
+
+
+/*
+ * Initialize the module - Register the character device
+ */
+int init_module(void)
+{
+#if (defined CONFIG_XENO_COBALT)
+
+    int major = rtdm_dev_register(&device);
+    if (major < 0)
+    {
+        INF("Failed to register %s (err: %d)\n", device.label, major);
+        return major;
+    }
+#else
+
+    /* Register the character device */
+    int major = register_chrdev(MAJOR_NUM, ATEMSYS_DEVICE_NAME, &Fops);
+    if (major < 0)
+    {
+        INF("Failed to register %s (err: %d)\n",
+               ATEMSYS_DEVICE_NAME, major);
+        return major;
+    }
+#endif /* CONFIG_XENO_COBALT */
+
+    /* Register Pci and Platform Driver */
+#if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+    memset(S_apDrvDescPrivate ,0, ATEMSYS_MAX_NUMBER_DRV_INSTANCES * sizeof(ATEMSYS_T_DRV_DESC_PRIVATE*));
+    platform_driver_register(&mac_driver);
+#if (defined CONFIG_XENO_COBALT)
+    memset(&S_oAtemsysWorkerThreadDesc, 0, sizeof(ATEMSYS_T_WORKER_THREAD_DESC));
+    mutex_init(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+    S_oAtemsysWorkerThreadDesc.etx_thread = kthread_create(AtemsysWorkerThread,(void*)&S_oAtemsysWorkerThreadDesc,"Atemsys_WorkerThread");
+    if(NULL == S_oAtemsysWorkerThreadDesc.etx_thread)
+    {
+        ERR("Cannot create kthread for AtemsysWorkerThread\n");
+    }
+    wake_up_process(S_oAtemsysWorkerThreadDesc.etx_thread);
+#endif /*#if (defined CONFIG_XENO_COBALT)*/
+#endif
+
+#if (defined INCLUDE_ATEMSYS_PCI_DRIVER)
+    memset(S_apPciDrvDescPrivate ,0, ATEMSYS_MAX_NUMBER_DRV_INSTANCES * sizeof(ATEMSYS_T_PCI_DRV_DESC_PRIVATE*));
+
+    if (0 == strcmp(AllowedPciDevices, ""))
+    {
+        DBG("Atemsys PCI driver not registered\n");
+    }
+    else
+    {
+        if (0 != pci_register_driver(&oPciDriver))
+        {
+            INF("Register Atemsys PCI driver failed!\n");
+        }
+    }
+#endif
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(6,4,0))
+    S_pDevClass = class_create(ATEMSYS_DEVICE_NAME);
+#else
+    S_pDevClass = class_create(THIS_MODULE, ATEMSYS_DEVICE_NAME);
+#endif
+    if (IS_ERR(S_pDevClass))
+    {
+        INF("class_create failed\n");
+        return -1;
+    }
+
+#if (LINUX_VERSION_CODE < KERNEL_VERSION(2,6,24))
+    S_pDev = class_device_create(S_pDevClass, NULL, MKDEV(MAJOR_NUM, 0), NULL, ATEMSYS_DEVICE_NAME);
+#else
+    S_pDev = device_create(S_pDevClass, NULL, MKDEV(MAJOR_NUM, 0), NULL, ATEMSYS_DEVICE_NAME);
+#endif
+
+#if (defined __arm__) || (defined __aarch64__)
+    {
+        int nRetVal = 0;
+        S_pPlatformDev = platform_device_alloc("atemsys_PDev", MKDEV(MAJOR_NUM, 0));
+        S_pPlatformDev->dev.parent = S_pDev;
+
+        nRetVal = platform_device_add(S_pPlatformDev);
+        if (nRetVal != 0) {
+            ERR("platform_device_add failed. return=%d\n", nRetVal);
+        }
+
+ #if (defined __arm__) || (defined CONFIG_ZONE_DMA32)
+        S_pPlatformDev->dev.coherent_dma_mask = DMA_BIT_MASK(32);
+        if (!S_pPlatformDev->dev.dma_mask)
+        {
+            S_pPlatformDev->dev.dma_mask = &S_pPlatformDev->dev.coherent_dma_mask;
+        }
+ #endif
+    }
+#else
+    S_pPlatformDev = NULL;
+#endif
+
+    if (IS_ERR(S_pDev))
+    {
+        INF("device_create failed\n");
+        return -1;
+    }
+
+    S_pDev->coherent_dma_mask = DMA_BIT_MASK(32);
+    if (!S_pDev->dma_mask)
+    {
+        S_pDev->dma_mask = &S_pDev->coherent_dma_mask;
+    }
+
+#if (defined CONFIG_OF)
+    OF_DMA_CONFIGURE(S_pDev,S_pDev->of_node);
+#endif
+
+    INIT_LIST_HEAD(&S_DevNode.list);
+
+    INF("%s v%s loaded\n", ATEMSYS_DEVICE_NAME, ATEMSYS_VERSION_STR);
+    return 0;
+}
+
+/*
+ * Cleanup - unregister the appropriate file from /proc
+ */
+void cleanup_module(void)
+{
+   INF("%s v%s unloaded\n", ATEMSYS_DEVICE_NAME, ATEMSYS_VERSION_STR);
+
+    /* Unregister Pci and Platform Driver */
+#if (defined INCLUDE_ATEMSYS_DT_DRIVER)
+    platform_driver_unregister(&mac_driver);
+#if (defined CONFIG_XENO_COBALT)
+    S_oAtemsysWorkerThreadDesc.bWorkerTaskShutdown = true;
+    for (;;)
+    {
+        if (!S_oAtemsysWorkerThreadDesc.bWorkerTaskRunning)
+        {
+            break;
+        }
+
+        msleep(100);
+    }
+    mutex_destroy(&S_oAtemsysWorkerThreadDesc.WorkerTask_mutex);
+#endif /*#if (defined CONFIG_XENO_COBALT)*/
+#endif
+
+#if (defined INCLUDE_ATEMSYS_PCI_DRIVER)
+    if (0 != strcmp(AllowedPciDevices, ""))
+    {
+        pci_unregister_driver(&oPciDriver);
+    }
+#endif
+
+#if (defined __arm__) || (defined __aarch64__)
+    if (NULL != S_pPlatformDev)
+    {
+        platform_device_del(S_pPlatformDev);
+        platform_device_put(S_pPlatformDev);
+        S_pPlatformDev = NULL;
+    }
+#endif
+
+#if (defined CONFIG_OF)
+   device_release_driver(S_pDev); //see device_del() -> bus_remove_device()
+#endif
+
+   device_destroy(S_pDevClass, MKDEV(MAJOR_NUM, 0));
+   class_destroy(S_pDevClass);
+
+#if (defined CONFIG_XENO_COBALT)
+   rtdm_dev_unregister(&device);
+#else
+   unregister_chrdev(MAJOR_NUM, ATEMSYS_DEVICE_NAME);
+#endif /* CONFIG_XENO_COBALT */
+}
+
diff --git a/kernel/drivers/misc/atemsys-main/atemsys.h b/kernel/drivers/misc/atemsys-main/atemsys.h
new file mode 100644
index 0000000..d17c370
--- /dev/null
+++ b/kernel/drivers/misc/atemsys-main/atemsys.h
@@ -0,0 +1,428 @@
+/*-----------------------------------------------------------------------------
+ * atemsys.h
+ * Copyright (c) 2009 - 2020 acontis technologies GmbH, Ravensburg, Germany
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute  it and/or modify it
+ * under  the terms of  the GNU General  Public License as published by the
+ * Free Software Foundation;  either version 2 of the  License, or (at your
+ * option) any later version.
+ *
+ * Response                  Paul Bussmann
+ * Description               atemsys.ko headerfile
+ * Note: This header is also included by userspace!
+
+ *  Changes:
+ *
+ *  V1.0.00 - Inital, PCI/PCIe only.
+ *  V1.1.00 - PowerPC tweaks.
+ *            Support for SoC devices (no PCI, i.e. Freescale eTSEC).
+ *            Support for current linux kernel's (3.0). Removed deprecated code.
+ *  V1.2.00 - 64 bit support. Compat IOCTL's for 32-Bit usermode apps.
+ *  V1.2.01 - request_irq() sometimes failed -> Map irq to virq under powerpc.
+ *  V1.2.02 - Support for current Linux kernel (3.8.0)
+ *  V1.2.03 - Support for current Linux kernel (3.8.13) on armv7l (beaglebone)
+ *  V1.2.04 - Use dma_alloc_coherent for arm, because of DMA memory corruption on
+ *            Xilinx Zynq.
+ *  V1.2.05 - OF Device Tree support for Xilinx Zynq (VIRQ mapping)
+ *  V1.2.06 - Wrong major version.
+ *  V1.2.07 - Tolerate closing, e.g. due to system()-calls.
+ *  V1.2.08 - Add VM_DONTCOPY to prevent crash on system()-calls
+ *  V1.2.09 - Apply second controller name change in dts (standard GEM driver for Xilinx Zynq) to avoid default driver loading.
+ *  V1.2.10 - Removed IO address alignment to support R6040
+ *  V1.2.11 - Fix lockup in device_read (tLinkOsIst if NIC in interrupt mode) on dev_int_disconnect
+ *  V1.2.12 - Fix underflow in dev_disable_irq() when more than one interrupts pending because of disable_irq_nosync usage
+ *  V1.2.13 - Fix usage of x64 PCI physical addresses
+ *  V1.2.14 - Changes for using with kernel beginnig from 2.6.18
+ *  V1.2.15 - Add udev auto-loading support via DTB
+ *  V1.2.16 - Add interrupt mode support for Xenomai 3 (Cobalt)
+ *  V1.3.01 - Add IOCTL_MOD_GETVERSION
+ *  V1.3.02 - Add support for kernel >= 4.11.00
+ *  V1.3.03 - Fix IOCTL_MOD_GETVERSION
+ *  V1.3.04 - Fix interrupt deadlock in Xenomai 2
+ *  V1.3.05 - Use correct PCI domain
+ *  V1.3.06 - Use rtdm_printk for Cobalt, add check if dev_int_disconnect was successful
+ *  V1.3.07 - Remove IOCTL_PCI_RELEASE_DEVICE warnings due to untracked IOCTL_PCI_CONF_DEVICE
+ *  V1.3.08 - Add support for kernel >= 4.13.00
+ *  V1.3.09 - Add support for PRU ICSS in Device Tree
+ *  V1.3.10 - Fix compilation on Ubuntu 18.04, Kernel 4.9.90, Xenomai 3.0.6 x64 Cobalt
+ *  V1.3.11 - Add enable access to ARM cycle count register(CCNT)
+ *  V1.3.12 - Add atemsys API version selection
+ *  V1.3.13 - Add ARM64 support
+ *  V1.3.14 - Fix edge type interrupt (enabled if Kernel >= 3.4.1, because exported irq_to_desc needed)
+ *            Fix Xenomai Cobalt interrupt mode
+ *  V1.3.15 - Fix crash while loading kernel module on ARM64
+ *            Add support for kernel >= 5.0.00
+ *  V1.3.16 - Handle API changes at kernel >= 4.18.00
+ *            Fix ARM DMA allocation for PCIe
+ *  V1.4.01 - Register atemsys as Device Tree Ethernet driver "atemsys"
+ *            and use Linux PHY and Mdio-Bus Handling
+ *  V1.4.02 - Device Tree Ethernet driver improved robustness for unbind linux driver
+ *            Fix for kernel >= 5.0.00  with device tree,
+ *            Fix ARM/AARCH64 DMA configuration for PCIe and
+ *            Fix occasional insmod Kernel Oops
+ *  V1.4.03 - Add log level (insmod atemsys loglevel=6) analog to kernel log level
+ *  V1.4.04 - Fix Device Tree Ethernet driver robustness
+ *            Add Device Tree Ethernet driver support for ICSS
+ *  V1.4.05 - Add IOMMU/Vt-D support
+ *  V1.4.06 - Fix IOMMU/Vt-D support for ARM
+ *            Fix Mdio-Bus timeout for kernel >= 5.0.00
+ *  V1.4.07 - Add support for imx8 / FslFec 64bit
+ *  V1.4.08 - Fix Xilinx Ultrascale
+ *            Fix cleanup of Device Tree Ethernet driver
+ *  V1.4.09 - Add atemsys as PCI driver for Intel, Realtek and Beckhoff
+ *            Add memory allocation and mapping on platform / PCI driver device
+ *            Fix PHY driver for FslFec 64Bit
+ *  V1.4.10 - Fix Device Tree Ethernet driver: Mdio/Phy sup-node, test 4.6.x kernel
+ *            Add Device Tree Ethernet driver support for GEM
+ *            Fix PCI driver: force DMA to 32 bit
+ *  V1.4.11 - Fix for kernel >= 5.5.00  with device tree,
+ *            Fix Device Tree Ethernet driver support for DW3504
+ *            Fix PCI driver: only for kernel >= 4.4.00
+ *  V1.4.12 - Fix for kernel >= 5.11.00,
+ *            Add support for 64Bit IO Memory of PCI card
+ *  V1.4.13 - Fix for kernel <= 3.16.00,
+ *            Add HAVE_ACCESS_OK_TYPE define to handle non-mainstream API variance
+ *            Connect to interrupt via binded device tree - platform device
+ *  V1.4.14 - Fix for arm/aarch64 kernel >= 5.10.00,
+ *            Add support for 64Bit DMA Memory
+ *            Add support for PCI DMA address translation
+ *  V1.4.15 - Fix API version IO Controls
+ *  V1.4.16 - Fix Xenomai3 on arm,
+ *            Add support for Device Tree Ethernet driver and PCI driver with Xenomai3
+ *            Fix PCI DMA address translation on arm
+ *  V1.4.17 - Fix dma_set_mask_and_coherent() missing in kernels under 3.12.55
+ *  V1.4.18 - Remove obsolete ARM cycle count register(CCNT)
+ *            Fix PCI driver do registration for all Ethernet network adapters
+ *            Add modul parameter AllowedPciDevices to adjust PCI driver, AllowedPciDevices="" will turn off PCI driver,
+ *            (insmod atemsys AllowedPciDevices="0000:01:00.0;0000:02:00.0")
+ *  V1.4.19 - Fix Xenomai2 ARMv8 32Bit
+ *  V1.4.20 - Fix support for CMA for kernel > 4.9.00
+ *  V1.4.21 - Add Device Tree Ethernet driver support for CPSW
+ *            Add Device Tree Ethernet driver phy reset
+ *            Fix Device Tree Ethernet on Xenomai3
+ *            Add HAVE_IRQ_TO_DESC define to handle non-mainstream API variance
+ *  V1.4.22 - Fix Build Warnings
+ *            Fix kernel config depending irq structures
+ *            Fix kernel version 4.12 to 4.15 for handle of dma_coherent bit
+ *            Add IOMMU support, new mapping to userspace active and tested for kernel > 5.4,
+ *             use old mapping with ATEMSYS_LEGACY_DMA=1 define or
+ *             activate new mapping with ATEMSYS_LEGACY_DMA=0 define for older kernel
+ *  V1.4.23 - Fix PCI bars
+ *  V1.4.24 - Add Device Tree Ethernet driver support for STM32mp135
+ *  V1.4.25 - Add IOCTL_INT_CPU_AFFINITY
+ *            Add Device Tree Ethernet driver support for RockChip
+ *  V1.4.26 - Fix for arm/aarch64 kernel >= 6.00.00,
+ *            Fix version of_dma_configure
+ *            Add ATEMSYS_IOCTL_IOMEM_CMD for Kernel mode access to protected registers
+ *            Add ATEMSYS_IOCTL_CPSWG_CMD to configure K3_UDMA_CPSWG Channels, Flows and Rings
+ *  V1.4.27 - Fix ATEMSYS_IOCTL_CPSWG_CMD kernel version,
+ *            Add Device Tree Ethernet driver support for CPSWG
+ *  V1.4.28 - Fix for PCIe compatibility with Atemsys before V1.3.5,
+ *          - Fix for Kernel > 6.05.00
+ *  atemsys is shared across EC-Master V2.7+
+
+ *----------------------------------------------------------------------------*/
+
+#ifndef ATEMSYS_H
+#define ATEMSYS_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+#ifndef EC_ATEMSYSVERSION
+#define EC_ATEMSYSVERSION(a,b,c) (((a)<<2*8)+((b)<<1*8)+((c)<<0*8))
+#endif
+
+#define ATEMSYS_VERSION_STR "1.4.28"
+#define ATEMSYS_VERSION_NUM  1,4,28
+#if (defined ATEMSYS_C)
+#define USE_ATEMSYS_API_VERSION EC_ATEMSYSVERSION(1,4,28)
+#endif
+
+/* support selection */
+
+#if   (USE_ATEMSYS_API_VERSION < EC_ATEMSYSVERSION(1,3,5)) || (!defined USE_ATEMSYS_API_VERSION)
+/* till v1.3.04 */
+#define ATEMSYS_T_PCI_SELECT_DESC               ATEMSYS_T_PCI_SELECT_DESC_v1_0_00
+#define ATEMSYS_T_PCI_MEMBAR                    ATEMSYS_T_PCI_MEMBAR_v1_0_00
+#define ATEMSYS_IOCTL_PCI_FIND_DEVICE           ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_0_00
+#define ATEMSYS_IOCTL_PCI_CONF_DEVICE           ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_0_00
+
+#elif (USE_ATEMSYS_API_VERSION < EC_ATEMSYSVERSION(1,4,12))
+/* v1.3.05 till v1.4.11 */
+#define ATEMSYS_T_PCI_SELECT_DESC               ATEMSYS_T_PCI_SELECT_DESC_v1_3_05
+#define ATEMSYS_T_PCI_MEMBAR                    ATEMSYS_T_PCI_MEMBAR_v1_3_05
+#define ATEMSYS_IOCTL_PCI_FIND_DEVICE           ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_05
+#define ATEMSYS_IOCTL_PCI_CONF_DEVICE           ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_05
+
+#else /* v1.4.12 and later */
+#define ATEMSYS_T_PCI_SELECT_DESC               ATEMSYS_T_PCI_SELECT_DESC_v1_4_12
+#define ATEMSYS_T_PCI_MEMBAR                    ATEMSYS_T_PCI_MEMBAR_v1_4_12
+#define ATEMSYS_IOCTL_PCI_FIND_DEVICE           ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_4_12
+#define ATEMSYS_IOCTL_PCI_CONF_DEVICE           ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_4_12
+#endif
+
+#define DRIVER_SUCCESS  0
+
+/*
+ * The major device number. We can't rely on dynamic
+ * registration any more, because ioctls need to know
+ * it.
+ */
+#define MAJOR_NUM 101
+
+#define ATEMSYS_IOCTL_PCI_RELEASE_DEVICE        _IO(MAJOR_NUM,    2)
+#define ATEMSYS_IOCTL_INT_CONNECT               _IOW(MAJOR_NUM,   3, __u32)
+#define ATEMSYS_IOCTL_INT_DISCONNECT            _IOW(MAJOR_NUM,   4, __u32)
+#define ATEMSYS_IOCTL_INT_INFO                  _IOR(MAJOR_NUM,   5, ATEMSYS_T_INT_INFO)
+#define ATEMSYS_IOCTL_MOD_GETVERSION            _IOR(MAJOR_NUM,   6, __u32)
+#define ATEMSYS_IOCTL_CPU_ENABLE_CYCLE_COUNT    _IOW(MAJOR_NUM,   7, __u32)
+#define ATEMSYS_IOCTL_GET_MAC_INFO              _IOWR(MAJOR_NUM,  8, ATEMSYS_T_MAC_INFO)
+#define ATEMSYS_IOCTL_PHY_START_STOP            _IOWR(MAJOR_NUM,  9, ATEMSYS_T_PHY_START_STOP_INFO)
+#define ATEMSYS_IOCTL_GET_MDIO_ORDER            _IOWR(MAJOR_NUM, 10, ATEMSYS_T_MDIO_ORDER)
+#define ATEMSYS_IOCTL_RETURN_MDIO_ORDER         _IOWR(MAJOR_NUM, 11, ATEMSYS_T_MDIO_ORDER)
+#define ATEMSYS_IOCTL_GET_PHY_INFO              _IOWR(MAJOR_NUM, 12, ATEMSYS_T_PHY_INFO)
+#define ATEMSYS_IOCTL_MOD_SET_API_VERSION       _IOR(MAJOR_NUM,  13, __u32)
+#define ATEMSYS_IOCTL_PHY_RESET                 _IOWR(MAJOR_NUM, 14, __u32)
+#define ATEMSYS_IOCTL_INT_SET_CPU_AFFINITY      _IOWR(MAJOR_NUM, 15, __u32)
+#define ATEMSYS_IOCTL_IOMEM_CMD                 _IOWR(MAJOR_NUM, 16, ATEMSYS_T_IOMEM_CMD)
+#define ATEMSYS_IOCTL_CPSWG_CMD                 _IOWR(MAJOR_NUM, 17, ATEMSYS_T_CPSWG_CMD)
+
+/* support legacy source code */
+#define IOCTL_PCI_FIND_DEVICE           ATEMSYS_IOCTL_PCI_FIND_DEVICE
+#define IOCTL_PCI_CONF_DEVICE           ATEMSYS_IOCTL_PCI_CONF_DEVICE
+#define IOCTL_PCI_RELEASE_DEVICE        ATEMSYS_IOCTL_PCI_RELEASE_DEVICE
+#define IOCTL_INT_CONNECT               ATEMSYS_IOCTL_INT_CONNECT
+#define IOCTL_INT_DISCONNECT            ATEMSYS_IOCTL_INT_DISCONNECT
+#define IOCTL_INT_INFO                  ATEMSYS_IOCTL_INT_INFO
+#define IOCTL_MOD_GETVERSION            ATEMSYS_IOCTL_MOD_GETVERSION
+#define IOCTL_CPU_ENABLE_CYCLE_COUNT    ATEMSYS_IOCTL_CPU_ENABLE_CYCLE_COUNT
+#define IOCTL_PCI_FIND_DEVICE_v1_3_04   ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_04
+#define IOCTL_PCI_CONF_DEVICE_v1_3_04   ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_04
+#define USE_PCI_INT                     ATEMSYS_USE_PCI_INT
+#define INT_INFO                        ATEMSYS_T_INT_INFO
+#define PCI_SELECT_DESC                 ATEMSYS_T_PCI_SELECT_DESC
+
+
+/*
+ * The name of the device driver
+ */
+#define ATEMSYS_DEVICE_NAME "atemsys"
+
+/* CONFIG_XENO_COBALT/CONFIG_XENO_MERCURY defined in xeno_config.h (may not be available when building atemsys.ko) */
+#if (!defined CONFIG_XENO_COBALT) && (!defined CONFIG_XENO_MERCURY) && (defined CONFIG_XENO_VERSION_MAJOR) && (CONFIG_XENO_VERSION_MAJOR >= 3)
+#define CONFIG_XENO_COBALT
+#endif
+
+/*
+ * The name of the device file
+ */
+#ifdef CONFIG_XENO_COBALT
+#define ATEMSYS_FILE_NAME "/dev/rtdm/" ATEMSYS_DEVICE_NAME
+#else
+#define ATEMSYS_FILE_NAME "/dev/" ATEMSYS_DEVICE_NAME
+#endif /* CONFIG_XENO_COBALT */
+
+#define ATEMSYS_PCI_MAXBAR (6)
+#define ATEMSYS_USE_PCI_INT (0xFFFFFFFF) /* Query the selected PCI device for the assigned IRQ number */
+
+typedef struct
+{
+    __u32       dwInterrupt;
+} __attribute__((packed)) ATEMSYS_T_INT_INFO;
+
+
+/* v1_4_12 */
+
+#define ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_4_12   _IOWR(MAJOR_NUM,  0, ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)
+#define ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_4_12   _IOWR(MAJOR_NUM,  1, ATEMSYS_T_PCI_SELECT_DESC_v1_4_12)
+
+typedef struct
+{
+    __u64       qwIOMem;          /* [out] IO Memory of PCI card (physical address) */
+    __u32       dwIOLen;          /* [out] Length of the IO Memory area*/
+} __attribute__((packed)) ATEMSYS_T_PCI_MEMBAR_v1_4_12;
+
+typedef struct
+{
+    __s32       nVendID;          /* [in] vendor ID */
+    __s32       nDevID;           /* [in] device ID */
+    __s32       nInstance;        /* [in] instance to look for (0 is the first instance) */
+    __s32       nPciBus;          /* [in/out] bus */
+    __s32       nPciDev;          /* [in/out] device */
+    __s32       nPciFun;          /* [in/out] function */
+    __s32       nBarCnt;          /* [out] Number of entries in aBar */
+    __u32       dwIrq;            /* [out] IRQ or USE_PCI_INT */
+    ATEMSYS_T_PCI_MEMBAR_v1_4_12  aBar[ATEMSYS_PCI_MAXBAR]; /* [out] IO memory */
+    __s32       nPciDomain;       /* [in/out] domain */
+} __attribute__((packed)) ATEMSYS_T_PCI_SELECT_DESC_v1_4_12;
+
+
+/* v1_3_05 */
+
+#define ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_05   _IOWR(MAJOR_NUM,  0, ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)
+#define ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_05   _IOWR(MAJOR_NUM,  1, ATEMSYS_T_PCI_SELECT_DESC_v1_3_05)
+
+typedef struct
+{
+    __u32       dwIOMem;          /* [out] IO Memory of PCI card (physical address) */
+    __u32       dwIOLen;          /* [out] Length of the IO Memory area*/
+} __attribute__((packed)) ATEMSYS_T_PCI_MEMBAR_v1_3_05;
+
+typedef struct
+{
+    __s32       nVendID;          /* [in] vendor ID */
+    __s32       nDevID;           /* [in] device ID */
+    __s32       nInstance;        /* [in] instance to look for (0 is the first instance) */
+    __s32       nPciBus;          /* [in/out] bus */
+    __s32       nPciDev;          /* [in/out] device */
+    __s32       nPciFun;          /* [in/out] function */
+    __s32       nBarCnt;          /* [out] Number of entries in aBar */
+    __u32       dwIrq;            /* [out] IRQ or USE_PCI_INT */
+    ATEMSYS_T_PCI_MEMBAR_v1_3_05  aBar[ATEMSYS_PCI_MAXBAR]; /* [out] IO memory */
+    __s32       nPciDomain;       /* [in/out] domain */
+} __attribute__((packed)) ATEMSYS_T_PCI_SELECT_DESC_v1_3_05;
+
+
+/* v1_0_00 */
+
+#define ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_3_04   ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_0_00
+#define ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_3_04   ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_0_00
+
+#define ATEMSYS_IOCTL_PCI_FIND_DEVICE_v1_0_00   _IOWR(MAJOR_NUM, 0, ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)
+#define ATEMSYS_IOCTL_PCI_CONF_DEVICE_v1_0_00   _IOWR(MAJOR_NUM, 1, ATEMSYS_T_PCI_SELECT_DESC_v1_0_00)
+
+typedef struct
+{
+    __u32       dwIOMem;          /* [out] IO Memory of PCI card (physical address) */
+    __u32       dwIOLen;          /* [out] Length of the IO Memory area*/
+} __attribute__((packed)) ATEMSYS_T_PCI_MEMBAR_v1_0_00;
+
+typedef struct
+{
+    __s32       nVendID;          /* [in] vendor ID */
+    __s32       nDevID;           /* [in] device ID */
+    __s32       nInstance;        /* [in] instance to look for (0 is the first instance) */
+    __s32       nPciBus;          /* [in/out] bus */
+    __s32       nPciDev;          /* [in/out] device */
+    __s32       nPciFun;          /* [in/out] function */
+    __s32       nBarCnt;          /* [out] Number of entries in aBar */
+    __u32       dwIrq;            /* [out] IRQ or USE_PCI_INT */
+    ATEMSYS_T_PCI_MEMBAR_v1_0_00   aBar[ATEMSYS_PCI_MAXBAR]; /* [out] IO memory */
+} __attribute__((packed)) ATEMSYS_T_PCI_SELECT_DESC_v1_0_00;
+
+/* must match EC_T_PHYINTERFACE in EcLink.h */
+typedef enum _EC_T_PHYINTERFACE_ATEMSYS
+{
+    eATEMSYS_PHY_FIXED_LINK = 1 << 0,
+    eATEMSYS_PHY_MII        = 1 << 1,
+    eATEMSYS_PHY_RMII       = 1 << 2,
+    eATEMSYS_PHY_GMII       = 1 << 3,
+    eATEMSYS_PHY_SGMII      = 1 << 4,
+    eATEMSYS_PHY_RGMII      = 1 << 5,
+    eATEMSYS_PHY_OSDRIVER   = 1 << 6,
+
+    /* Borland C++ datatype alignment correction */
+    eATEMSYS_PHY_BCppDummy  = 0xFFFFFFFF
+} ATEMSYS_T_PHYINTERFACE;
+
+
+#define EC_LINKOS_IDENT_MAX_LEN            0x20  /* must match EcLink.h */
+#define PHY_AUTO_ADDR                (__u32) -1  /* must match EcPhy.h */
+typedef struct
+{
+    char                        szIdent[EC_LINKOS_IDENT_MAX_LEN];   /* [out]    Name of Mac e.g. "FslFec" */
+    __u32                       dwInstance;                         /* [out]    Number of used Mac (in official order!) */
+    __u32                       dwIndex;                            /* [in]     Index of Mac in atemsys handling */
+    __u64                       qwRegAddr;                          /* [in]     Hardware register address of mac */
+    __u32                       dwRegSize;                          /* [in]     Hardware register size of mac */
+    __u32                       dwStatus;                           /* [in]     Status of mac according to device tree */
+    ATEMSYS_T_PHYINTERFACE      ePhyMode;                           /* [in]     Phy mac connection mode mii, rmii, rgmii, gmii, sgmii defined in SDK/INC/EcLink.h */
+    __u32                       bNoMdioBus;                         /* [in]     Mac don't need to run own Mdio Bus */
+    __u32                       dwPhyAddr;                          /* [in]     Address of PHY on mdio bus */
+    __u32                       dwErrorCode;                        /* [in]     Error code defined in SDK/INC/EcError.h */
+    __u32                       bPhyResetSupported;                 /* [in]    Device tree has data for phy reset */
+    __u32                       dwReserved[15];
+} __attribute__((packed)) ATEMSYS_T_MAC_INFO;
+
+typedef struct
+{
+    __u32                       dwIndex;                            /* [out]    Index of Mac in atemsys handling */
+    __u32                       bInUse;                             /* [in]     Descriptor is in use */
+    __u32                       bInUseByIoctl;                      /* [in]     Descriptor is in use by ATEMSYS_IOCTRLs */
+    __u32                       bWriteOrder;                        /* [in/out] Mdio operation - write = 1, read = 0 */
+    __u16                       wMdioAddr;                          /* [in/out] Current address */
+    __u16                       wReg;                               /* [in/out] Current register */
+    __u16                       wValue;                             /* [in/out] Current value read or write */
+    __u32                       dwTimeoutMsec;                      /* [in]     Timeout in milli seconds */
+    __u32                       dwErrorCode;                        /* [in]     Error code defined in SDK/INC/EcError.h */
+    __u32                       dwReserved[4];
+} __attribute__((packed)) ATEMSYS_T_MDIO_ORDER;
+
+typedef struct
+{
+    __u32                       dwIndex;                            /* [out]    Index of Mac in atemsys handling */
+    __u32                       dwLink;                             /* [in]     Link defined in /linux/phy.h */
+    __u32                       dwDuplex;                           /* [in]     Duplex defined in /linux/phy.h (0x00: half, 0x01: full, 0xFF: unknown) */
+    __u32                       dwSpeed;                            /* [in]     Speed defined in /linux/phy.h */
+    __u32                       bPhyReady;                          /* [in]     Mdio Bus is currently not active */
+    __u32                       dwErrorCode;                        /* [in]     Error code defined in SDK/INC/EcError.h */
+    __u32                       dwReserved[4];
+} __attribute__((packed)) ATEMSYS_T_PHY_INFO;
+
+typedef struct
+{
+    __u32                       dwIndex;                            /* [out]    Index of Mac in atemsys handling */
+    __u32                       bStart;                             /* [out]    Start = 1, stop = 0 */
+    __u32                       dwErrorCode;                        /* [in]     Error code defined in SDK/INC/EcError.h */
+    __u32                       dwReserved[4];
+} __attribute__((packed)) ATEMSYS_T_PHY_START_STOP_INFO;
+
+
+
+
+typedef struct
+{
+    __u32                       dwIndex;                            /* [out]    Index of Mac in atemsys handling */
+    __u32                       dwCmd;                              /* [out]    Id of the command */
+#define ATEMSYS_IOMEM_CMD_MAP_PERMANENT   1
+#define ATEMSYS_IOMEM_CMD_UNMAP_PERMANENT 2
+#define ATEMSYS_IOMEM_CMD_READ            3
+#define ATEMSYS_IOMEM_CMD_WRITE           4
+
+    __u64                       qwPhys;                             /* [out]    physical memory address */
+    __u32                       dwSize;                             /* [out]    size of the memory area */
+    __u32                       dwOffset;                           /* [out]    memory offset for read and write command */
+    __u32                       dwDataSize;                         /* [out]    data size for read and write command */
+    __u32                       dwData[4];                          /* [in/out] data buffer for read and write command */
+} __attribute__((packed)) ATEMSYS_T_IOMEM_CMD;
+
+
+typedef struct
+{
+    __u32                       dwIndex;                            /* [out]    Index of Mac in atemsys handling */
+    __u32                       dwChannelIdx;                       /* [out]    Index of the internal channel handling */
+    __u32                       dwCmd;                              /* [out]    Id of the command */
+#define ATEMSYS_CPSWG_CMD_CONFIG_TX  1
+#define ATEMSYS_CPSWG_CMD_CONFIG_RX  2
+#define ATEMSYS_CPSWG_CMD_ENABLE_TX  3
+#define ATEMSYS_CPSWG_CMD_ENABLE_RX  4
+#define ATEMSYS_CPSWG_CMD_DISABLE_TX 5
+#define ATEMSYS_CPSWG_CMD_DISABLE_RX 6
+#define ATEMSYS_CPSWG_CMD_RELEASE_TX 7
+#define ATEMSYS_CPSWG_CMD_RELEASE_RX 8
+
+    __u64                       qwRingDma;                          /* [in]     1. ring physical memory address */
+    __u32                       dwRingSize;                         /* [in/out] 1. ring size / number of elements */
+    __u32                       dwRingId;                           /* [in]     1. ring index */
+    __u64                       qwRingFdqDma;                       /* [in]     2. ring physical memory address */
+    __u32                       dwRingFdqSize;                      /* [in/put] 2. ring size / number of elements */
+    __u32                       dwRingFdqId;                        /* [in]     2. ring index */
+    __u32                       dwChanId;                           /* [in]     2. ring index */
+    __u32                       dwFlowIdBase;                       /* [in]     2. ring index */
+    __u32                       dwReserved[32];
+} __attribute__((packed)) ATEMSYS_T_CPSWG_CMD;
+
+#endif  /* ATEMSYS_H */
+
diff --git a/kernel/drivers/misc/eeprom/at24.c b/kernel/drivers/misc/eeprom/at24.c
index d02bf9c..09d976a 100644
--- a/kernel/drivers/misc/eeprom/at24.c
+++ b/kernel/drivers/misc/eeprom/at24.c
@@ -607,6 +607,59 @@
 }
 EXPORT_SYMBOL(at24_mac1_read);
 
+ssize_t at24_mac2_read(unsigned char* mac)
+{
+	char buf[20];
+	char buf_tmp[12];
+	ssize_t ret;
+	if (at24_private == NULL)
+	{
+		printk("zcl: at24_mac_read at24_private==null error");
+		return 0;
+	}
+	memset(buf, 0x00, 20);
+	memset(buf_tmp, 0x00, 12);
+	ret = at24_read_private(at24_private, buf, 0x20, 6);
+	if (ret > 0)
+	{
+		*mac = buf[0];
+		*(mac + 1) = buf[1];
+		*(mac + 2) = buf[2];
+		*(mac + 3) = buf[3];
+		*(mac + 4) = buf[4];
+		*(mac + 5) = buf[5];
+	}
+	printk("at24_mac2_read ...............\n");
+	return ret;
+}
+EXPORT_SYMBOL(at24_mac2_read);
+
+ssize_t at24_mac3_read(unsigned char* mac)
+{
+	char buf[20];
+	char buf_tmp[12];
+	ssize_t ret;
+	if (at24_private == NULL)
+	{
+		printk("zcl: at24_mac_read at24_private==null error");
+		return 0;
+	}
+	memset(buf, 0x00, 20);
+	memset(buf_tmp, 0x00, 12);
+	ret = at24_read_private(at24_private, buf, 0x30, 6);
+	if (ret > 0)
+	{
+		*mac = buf[0];
+		*(mac + 1) = buf[1];
+		*(mac + 2) = buf[2];
+		*(mac + 3) = buf[3];
+		*(mac + 4) = buf[4];
+		*(mac + 5) = buf[5];
+	}
+	printk("at24_mac3_read ...............\n");
+	return ret;
+}
+EXPORT_SYMBOL(at24_mac3_read);
 static int at24_write(void *priv, unsigned int off, void *val, size_t count)
 {
 	struct at24_data *at24;
diff --git a/kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c b/kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c
index 84c4d09..d07e7b0 100644
--- a/kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c
+++ b/kernel/drivers/net/ethernet/realtek/r8168/r8168_n.c
@@ -115,6 +115,8 @@
 #define FIRMWARE_8168FP_3   "rtl_nic/rtl8168fp-3.fw"
 #define FIRMWARE_8168FP_4   "rtl_nic/rtl8168fp-4.fw"
 
+static int my_id=1;
+
 /* Maximum number of multicast addresses to filter (vs. Rx-all-multicast).
    The RTL chips use a 64 element hash table based on the Ethernet CRC. */
 static const int multicast_filter_limit = 32;
@@ -24055,12 +24057,18 @@
         free_netdev(dev);
 }
 
+extern ssize_t at24_mac1_read(unsigned char* mac);
+extern ssize_t at24_mac2_read(unsigned char* mac);
+extern ssize_t at24_mac3_read(unsigned char* mac);
+
+
 static int
 rtl8168_get_mac_address(struct net_device *dev)
 {
         struct rtl8168_private *tp = netdev_priv(dev);
         int i;
         u8 mac_addr[MAC_ADDR_LEN];
+	unsigned char mac[6];
 
         for (i = 0; i < MAC_ADDR_LEN; i++)
                 mac_addr[i] = RTL_R8(tp, MAC0 + i);
@@ -24115,6 +24123,32 @@
                         }
                 }
         }
+
+
+	if (my_id == 1)
+		at24_mac1_read(mac);
+	if (my_id == 2)
+       		at24_mac2_read(mac);
+	if (my_id == 3)
+       		at24_mac3_read(mac);
+  	if ((mac[0] == 0x68) && (mac[1] == 0xed))
+	   {
+      	 	for (i = 0; i < ETH_ALEN; i++)
+          	mac_addr[i] = mac[i];
+  	     	my_id+=1;
+                netif_err(tp, probe, dev, "Get ether addr form at24 %pM\n",
+                          mac_addr);
+  	}
+   	else{
+      		printk("rtl811h mac read from eeprom error!! \n");
+             	mac_addr[0] = 0x66;
+             	mac_addr[1] = 0xED;
+             	mac_addr[2] = 0xB5;
+             	mac_addr[3] = 0x64;
+             	mac_addr[4] = 0x72;
+             	mac_addr[5] = my_id;
+  	     	my_id+=1;
+	}
 
         if (!is_valid_ether_addr(mac_addr)) {
                 netif_err(tp, probe, dev, "Invalid ether addr %pM\n",
@@ -28739,5 +28773,6 @@
 #endif
 }
 
-module_init(rtl8168_init_module);
+//module_init(rtl8168_init_module);
+late_initcall(rtl8168_init_module);
 module_exit(rtl8168_cleanup_module);
diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c b/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
index 27310a8..53bba2a 100644
--- a/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
+++ b/kernel/drivers/net/ethernet/stmicro/stmmac/dwmac-rk.c
@@ -2717,9 +2717,11 @@
 {
 	struct rk_priv_data *bsp_priv = priv;
 	struct device *dev = &bsp_priv->pdev->dev;
+	unsigned char ethaddr[ETH_ALEN * MAX_ETH] = {0};
+	int ret, id = bsp_priv->bus_id;
 	int i;
 
-#if 0
+#if 1
 	if (is_valid_ether_addr(addr))
 		goto out;
 
@@ -2751,7 +2753,7 @@
 	}
 #endif
 		
-	#if 1
+	#if 0
         if (at24_mac_read(macaddr) > 0) {
                 printk("ben %s: at24_mac_read Success!! \n", __func__);
                 memcpy(addr, macaddr, 6);
@@ -2938,7 +2940,7 @@
 		.of_match_table = rk_gmac_dwmac_match,
 	},
 };
-module_platform_driver1(rk_gmac_dwmac_driver);
+module_platform_driver(rk_gmac_dwmac_driver);
 
 MODULE_AUTHOR("Chen-Zhi (Roger Chen) <roger.chen@rock-chips.com>");
 MODULE_DESCRIPTION("Rockchip RK3288 DWMAC specific glue layer");
diff --git a/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
index 6f1074a..a9dc4fc 100644
--- a/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
+++ b/kernel/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c
@@ -2353,8 +2353,8 @@
  */
 static void stmmac_check_ether_addr(struct stmmac_priv *priv)
 {
-//	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
-	if(1) {
+	if (!is_valid_ether_addr(priv->dev->dev_addr)) {
+//	if(1) {
 		stmmac_get_umac_addr(priv, priv->hw, priv->dev->dev_addr, 0);
 		if (likely(priv->plat->get_eth_addr))
 			priv->plat->get_eth_addr(priv->plat->bsp_priv,
diff --git a/kernel/drivers/pci/controller/dwc/pcie-designware-host.c b/kernel/drivers/pci/controller/dwc/pcie-designware-host.c
index 1cb04f4..0b4c19f 100644
--- a/kernel/drivers/pci/controller/dwc/pcie-designware-host.c
+++ b/kernel/drivers/pci/controller/dwc/pcie-designware-host.c
@@ -44,6 +44,7 @@
 	.irq_ack = dw_msi_ack_irq,
 	.irq_mask = dw_msi_mask_irq,
 	.irq_unmask = dw_msi_unmask_irq,
+	.flags = IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_info dw_pcie_msi_domain_info = {
diff --git a/kernel/drivers/pci/controller/pcie-brcmstb.c b/kernel/drivers/pci/controller/pcie-brcmstb.c
index 9c3d298..452e2bb 100644
--- a/kernel/drivers/pci/controller/pcie-brcmstb.c
+++ b/kernel/drivers/pci/controller/pcie-brcmstb.c
@@ -457,6 +457,7 @@
 	.irq_ack         = irq_chip_ack_parent,
 	.irq_mask        = pci_msi_mask_irq,
 	.irq_unmask      = pci_msi_unmask_irq,
+	.flags           = IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct msi_domain_info brcm_msi_domain_info = {
@@ -520,6 +521,7 @@
 	.irq_compose_msi_msg	= brcm_msi_compose_msi_msg,
 	.irq_set_affinity	= brcm_msi_set_affinity,
 	.irq_ack                = brcm_msi_ack_irq,
+	.flags                  = IRQCHIP_PIPELINE_SAFE,
 };
 
 static int brcm_msi_alloc(struct brcm_msi *msi)
diff --git a/kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c b/kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c
index 6768b2f..e37dd66 100644
--- a/kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c
+++ b/kernel/drivers/pinctrl/bcm/pinctrl-bcm2835.c
@@ -88,7 +88,7 @@
 	struct pinctrl_desc pctl_desc;
 	struct pinctrl_gpio_range gpio_range;
 
-	raw_spinlock_t irq_lock[BCM2835_NUM_BANKS];
+	hard_spinlock_t irq_lock[BCM2835_NUM_BANKS];
 };
 
 /* pins are just named GPIO0..GPIO53 */
@@ -678,7 +678,7 @@
 	.irq_mask = bcm2835_gpio_irq_disable,
 	.irq_unmask = bcm2835_gpio_irq_enable,
 	.irq_set_wake = bcm2835_gpio_irq_set_wake,
-	.flags = IRQCHIP_MASK_ON_SUSPEND,
+	.flags = IRQCHIP_MASK_ON_SUSPEND|IRQCHIP_PIPELINE_SAFE,
 };
 
 static int bcm2835_pctl_get_groups_count(struct pinctrl_dev *pctldev)
diff --git a/kernel/drivers/pinctrl/intel/pinctrl-cherryview.c b/kernel/drivers/pinctrl/intel/pinctrl-cherryview.c
index 2ed17cd..80ee69a 100644
--- a/kernel/drivers/pinctrl/intel/pinctrl-cherryview.c
+++ b/kernel/drivers/pinctrl/intel/pinctrl-cherryview.c
@@ -562,7 +562,7 @@
  * See Intel Atom Z8000 Processor Series Specification Update (Rev. 005),
  * errata #CHT34, for further information.
  */
-static DEFINE_RAW_SPINLOCK(chv_lock);
+static DEFINE_HARD_SPINLOCK(chv_lock);
 
 static u32 chv_pctrl_readl(struct intel_pinctrl *pctrl, unsigned int offset)
 {
@@ -1554,7 +1554,8 @@
 	pctrl->irqchip.irq_mask = chv_gpio_irq_mask;
 	pctrl->irqchip.irq_unmask = chv_gpio_irq_unmask;
 	pctrl->irqchip.irq_set_type = chv_gpio_irq_type;
-	pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE;
+	pctrl->irqchip.flags = IRQCHIP_SKIP_SET_WAKE |
+				IRQCHIP_PIPELINE_SAFE;
 
 	chip->irq.chip = &pctrl->irqchip;
 	chip->irq.init_hw = chv_gpio_irq_init_hw;
diff --git a/kernel/drivers/pinctrl/qcom/pinctrl-msm.c b/kernel/drivers/pinctrl/qcom/pinctrl-msm.c
index a3cef80..c9e4452 100644
--- a/kernel/drivers/pinctrl/qcom/pinctrl-msm.c
+++ b/kernel/drivers/pinctrl/qcom/pinctrl-msm.c
@@ -68,7 +68,7 @@
 
 	bool intr_target_use_scm;
 
-	raw_spinlock_t lock;
+	hard_spinlock_t lock;
 
 	DECLARE_BITMAP(dual_edge_irqs, MAX_NR_GPIO);
 	DECLARE_BITMAP(enabled_irqs, MAX_NR_GPIO);
@@ -1273,7 +1273,8 @@
 	pctrl->irq_chip.irq_set_vcpu_affinity = msm_gpio_irq_set_vcpu_affinity;
 	pctrl->irq_chip.flags = IRQCHIP_MASK_ON_SUSPEND |
 				IRQCHIP_SET_TYPE_MASKED |
-				IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND;
+				IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND |
+				IRQCHIP_PIPELINE_SAFE;
 
 	np = of_parse_phandle(pctrl->dev->of_node, "wakeup-parent", 0);
 	if (np) {
diff --git a/kernel/drivers/pinctrl/samsung/pinctrl-exynos.c b/kernel/drivers/pinctrl/samsung/pinctrl-exynos.c
index 493079a..7da7f80 100644
--- a/kernel/drivers/pinctrl/samsung/pinctrl-exynos.c
+++ b/kernel/drivers/pinctrl/samsung/pinctrl-exynos.c
@@ -58,13 +58,13 @@
 	unsigned int mask;
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->slock, flags);
+	raw_spin_lock_irqsave(&bank->slock, flags);
 
 	mask = readl(bank->eint_base + reg_mask);
 	mask |= 1 << irqd->hwirq;
 	writel(mask, bank->eint_base + reg_mask);
 
-	spin_unlock_irqrestore(&bank->slock, flags);
+	raw_spin_unlock_irqrestore(&bank->slock, flags);
 }
 
 static void exynos_irq_ack(struct irq_data *irqd)
@@ -97,13 +97,13 @@
 	if (irqd_get_trigger_type(irqd) & IRQ_TYPE_LEVEL_MASK)
 		exynos_irq_ack(irqd);
 
-	spin_lock_irqsave(&bank->slock, flags);
+	raw_spin_lock_irqsave(&bank->slock, flags);
 
 	mask = readl(bank->eint_base + reg_mask);
 	mask &= ~(1 << irqd->hwirq);
 	writel(mask, bank->eint_base + reg_mask);
 
-	spin_unlock_irqrestore(&bank->slock, flags);
+	raw_spin_unlock_irqrestore(&bank->slock, flags);
 }
 
 static int exynos_irq_set_type(struct irq_data *irqd, unsigned int type)
@@ -169,14 +169,14 @@
 	shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
 	mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
 
-	spin_lock_irqsave(&bank->slock, flags);
+	raw_spin_lock_irqsave(&bank->slock, flags);
 
 	con = readl(bank->pctl_base + reg_con);
 	con &= ~(mask << shift);
 	con |= EXYNOS_PIN_FUNC_EINT << shift;
 	writel(con, bank->pctl_base + reg_con);
 
-	spin_unlock_irqrestore(&bank->slock, flags);
+	raw_spin_unlock_irqrestore(&bank->slock, flags);
 
 	return 0;
 }
@@ -192,14 +192,14 @@
 	shift = irqd->hwirq * bank_type->fld_width[PINCFG_TYPE_FUNC];
 	mask = (1 << bank_type->fld_width[PINCFG_TYPE_FUNC]) - 1;
 
-	spin_lock_irqsave(&bank->slock, flags);
+	raw_spin_lock_irqsave(&bank->slock, flags);
 
 	con = readl(bank->pctl_base + reg_con);
 	con &= ~(mask << shift);
 	con |= EXYNOS_PIN_FUNC_INPUT << shift;
 	writel(con, bank->pctl_base + reg_con);
 
-	spin_unlock_irqrestore(&bank->slock, flags);
+	raw_spin_unlock_irqrestore(&bank->slock, flags);
 
 	gpiochip_unlock_as_irq(&bank->gpio_chip, irqd->hwirq);
 }
@@ -216,6 +216,7 @@
 		.irq_set_type = exynos_irq_set_type,
 		.irq_request_resources = exynos_irq_request_resources,
 		.irq_release_resources = exynos_irq_release_resources,
+		.flags = IRQCHIP_PIPELINE_SAFE,
 	},
 	.eint_con = EXYNOS_GPIO_ECON_OFFSET,
 	.eint_mask = EXYNOS_GPIO_EMASK_OFFSET,
@@ -287,7 +288,7 @@
 	}
 
 	ret = devm_request_irq(dev, d->irq, exynos_eint_gpio_irq,
-					0, dev_name(dev), d);
+					IRQF_OOB, dev_name(dev), d);
 	if (ret) {
 		dev_err(dev, "irq request failed\n");
 		return -ENXIO;
@@ -305,6 +306,7 @@
 			goto err_domains;
 		}
 		bank->irq_chip->chip.name = bank->name;
+		bank->irq_chip->chip.flags |= IRQCHIP_PIPELINE_SAFE;
 
 		bank->irq_domain = irq_domain_add_linear(bank->of_node,
 				bank->nr_pins, &exynos_eint_irqd_ops, bank);
@@ -408,6 +410,7 @@
 		.irq_set_wake = exynos_wkup_irq_set_wake,
 		.irq_request_resources = exynos_irq_request_resources,
 		.irq_release_resources = exynos_irq_release_resources,
+		.flags = IRQCHIP_PIPELINE_SAFE,
 	},
 	.eint_con = EXYNOS_WKUP_ECON_OFFSET,
 	.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
@@ -428,6 +431,7 @@
 		.irq_set_wake = exynos_wkup_irq_set_wake,
 		.irq_request_resources = exynos_irq_request_resources,
 		.irq_release_resources = exynos_irq_release_resources,
+		.flags = IRQCHIP_PIPELINE_SAFE,
 	},
 	.eint_con = EXYNOS_WKUP_ECON_OFFSET,
 	.eint_mask = EXYNOS_WKUP_EMASK_OFFSET,
@@ -447,6 +451,7 @@
 		.irq_set_wake = exynos_wkup_irq_set_wake,
 		.irq_request_resources = exynos_irq_request_resources,
 		.irq_release_resources = exynos_irq_release_resources,
+		.flags = IRQCHIP_PIPELINE_SAFE,
 	},
 	.eint_con = EXYNOS7_WKUP_ECON_OFFSET,
 	.eint_mask = EXYNOS7_WKUP_EMASK_OFFSET,
diff --git a/kernel/drivers/pinctrl/samsung/pinctrl-samsung.c b/kernel/drivers/pinctrl/samsung/pinctrl-samsung.c
index 56fff83..f88f9f9 100644
--- a/kernel/drivers/pinctrl/samsung/pinctrl-samsung.c
+++ b/kernel/drivers/pinctrl/samsung/pinctrl-samsung.c
@@ -400,14 +400,14 @@
 		reg += 4;
 	}
 
-	spin_lock_irqsave(&bank->slock, flags);
+	raw_spin_lock_irqsave(&bank->slock, flags);
 
 	data = readl(reg + type->reg_offset[PINCFG_TYPE_FUNC]);
 	data &= ~(mask << shift);
 	data |= func->val << shift;
 	writel(data, reg + type->reg_offset[PINCFG_TYPE_FUNC]);
 
-	spin_unlock_irqrestore(&bank->slock, flags);
+	raw_spin_unlock_irqrestore(&bank->slock, flags);
 }
 
 /* enable a specified pinmux by writing to registers */
@@ -451,7 +451,7 @@
 	width = type->fld_width[cfg_type];
 	cfg_reg = type->reg_offset[cfg_type];
 
-	spin_lock_irqsave(&bank->slock, flags);
+	raw_spin_lock_irqsave(&bank->slock, flags);
 
 	mask = (1 << width) - 1;
 	shift = pin_offset * width;
@@ -468,7 +468,7 @@
 		*config = PINCFG_PACK(cfg_type, data);
 	}
 
-	spin_unlock_irqrestore(&bank->slock, flags);
+	raw_spin_unlock_irqrestore(&bank->slock, flags);
 
 	return 0;
 }
@@ -561,9 +561,9 @@
 	struct samsung_pin_bank *bank = gpiochip_get_data(gc);
 	unsigned long flags;
 
-	spin_lock_irqsave(&bank->slock, flags);
+	raw_spin_lock_irqsave(&bank->slock, flags);
 	samsung_gpio_set_value(gc, offset, value);
-	spin_unlock_irqrestore(&bank->slock, flags);
+	raw_spin_unlock_irqrestore(&bank->slock, flags);
 }
 
 /* gpiolib gpio_get callback function */
@@ -626,9 +626,9 @@
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&bank->slock, flags);
+	raw_spin_lock_irqsave(&bank->slock, flags);
 	ret = samsung_gpio_set_direction(gc, offset, true);
-	spin_unlock_irqrestore(&bank->slock, flags);
+	raw_spin_unlock_irqrestore(&bank->slock, flags);
 	return ret;
 }
 
@@ -640,10 +640,10 @@
 	unsigned long flags;
 	int ret;
 
-	spin_lock_irqsave(&bank->slock, flags);
+	raw_spin_lock_irqsave(&bank->slock, flags);
 	samsung_gpio_set_value(gc, offset, value);
 	ret = samsung_gpio_set_direction(gc, offset, false);
-	spin_unlock_irqrestore(&bank->slock, flags);
+	raw_spin_unlock_irqrestore(&bank->slock, flags);
 
 	return ret;
 }
@@ -1067,7 +1067,7 @@
 		bank->eint_offset = bdata->eint_offset;
 		bank->name = bdata->name;
 
-		spin_lock_init(&bank->slock);
+		raw_spin_lock_init(&bank->slock);
 		bank->drvdata = d;
 		bank->pin_base = d->nr_pins;
 		d->nr_pins += bank->nr_pins;
diff --git a/kernel/drivers/pinctrl/samsung/pinctrl-samsung.h b/kernel/drivers/pinctrl/samsung/pinctrl-samsung.h
index 379f34a..59ce47a 100644
--- a/kernel/drivers/pinctrl/samsung/pinctrl-samsung.h
+++ b/kernel/drivers/pinctrl/samsung/pinctrl-samsung.h
@@ -171,7 +171,7 @@
 	struct gpio_chip gpio_chip;
 	struct pinctrl_gpio_range grange;
 	struct exynos_irq_chip *irq_chip;
-	spinlock_t slock;
+	hard_spinlock_t slock;
 
 	u32 pm_save[PINCFG_TYPE_NUM + 1]; /* +1 to handle double CON registers*/
 };
diff --git a/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c b/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c
index e4b41cc..a6170c0 100644
--- a/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c
+++ b/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.c
@@ -1086,7 +1086,7 @@
 	.irq_release_resources = sunxi_pinctrl_irq_release_resources,
 	.irq_set_type	= sunxi_pinctrl_irq_set_type,
 	.irq_set_wake	= sunxi_pinctrl_irq_set_wake,
-	.flags		= IRQCHIP_MASK_ON_SUSPEND,
+	.flags		= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE,
 };
 
 static struct irq_chip sunxi_pinctrl_level_irq_chip = {
@@ -1104,7 +1104,8 @@
 	.irq_set_wake	= sunxi_pinctrl_irq_set_wake,
 	.flags		= IRQCHIP_EOI_THREADED |
 			  IRQCHIP_MASK_ON_SUSPEND |
-			  IRQCHIP_EOI_IF_HANDLED,
+			  IRQCHIP_EOI_IF_HANDLED |
+			  IRQCHIP_PIPELINE_SAFE,
 };
 
 static int sunxi_pinctrl_irq_of_xlate(struct irq_domain *d,
diff --git a/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h b/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h
index a32bb5b..a1849aa 100644
--- a/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h
+++ b/kernel/drivers/pinctrl/sunxi/pinctrl-sunxi.h
@@ -167,7 +167,7 @@
 	unsigned			ngroups;
 	int				*irq;
 	unsigned			*irq_array;
-	raw_spinlock_t			lock;
+	hard_spinlock_t			lock;
 	struct pinctrl_dev		*pctl_dev;
 	unsigned long			variant;
 };
diff --git a/kernel/drivers/soc/qcom/smp2p.c b/kernel/drivers/soc/qcom/smp2p.c
index fb76c8b..9eecb34 100644
--- a/kernel/drivers/soc/qcom/smp2p.c
+++ b/kernel/drivers/soc/qcom/smp2p.c
@@ -281,6 +281,7 @@
 	.irq_mask       = smp2p_mask_irq,
 	.irq_unmask     = smp2p_unmask_irq,
 	.irq_set_type	= smp2p_set_irq_type,
+	.flags		= IRQCHIP_PIPELINE_SAFE,
 };
 
 static int smp2p_irq_map(struct irq_domain *d,
diff --git a/kernel/drivers/soc/ti/ti_sci_inta_msi.c b/kernel/drivers/soc/ti/ti_sci_inta_msi.c
index 0eb9462..21d222b 100644
--- a/kernel/drivers/soc/ti/ti_sci_inta_msi.c
+++ b/kernel/drivers/soc/ti/ti_sci_inta_msi.c
@@ -42,6 +42,7 @@
 	chip->irq_unmask = irq_chip_unmask_parent;
 	chip->irq_mask = irq_chip_mask_parent;
 	chip->irq_ack = irq_chip_ack_parent;
+	chip->flags |= IRQCHIP_PIPELINE_SAFE;
 }
 
 struct irq_domain *ti_sci_inta_msi_create_irq_domain(struct fwnode_handle *fwnode,
diff --git a/kernel/drivers/spi/Kconfig b/kernel/drivers/spi/Kconfig
index 7a2cb27..a853a43 100644
--- a/kernel/drivers/spi/Kconfig
+++ b/kernel/drivers/spi/Kconfig
@@ -32,6 +32,10 @@
 	  Say "yes" to enable debug messaging (like dev_dbg and pr_debug),
 	  sysfs, and debugfs support in SPI controller and protocol drivers.
 
+config SPI_OOB
+	def_bool n
+	depends on HAS_DMA && DOVETAIL
+
 #
 # MASTER side ... talking to discrete SPI slave chips including microcontrollers
 #
@@ -138,6 +142,13 @@
 	  is for the regular SPI controller. Slave mode operation is not also
 	  not supported.
 
+config SPI_BCM2835_OOB
+	bool "Out-of-band support for BCM2835 SPI controller"
+	depends on SPI_BCM2835 && DOVETAIL
+	select SPI_OOB
+	help
+	  Enable out-of-band cyclic transfers.
+
 config SPI_BCM2835AUX
 	tristate "BCM2835 SPI auxiliary controller"
 	depends on ((ARCH_BCM2835 || ARCH_BRCMSTB) && GPIOLIB) || COMPILE_TEST
diff --git a/kernel/drivers/spi/spi-bcm2835.c b/kernel/drivers/spi/spi-bcm2835.c
index bb9d838..4a3dbc0 100644
--- a/kernel/drivers/spi/spi-bcm2835.c
+++ b/kernel/drivers/spi/spi-bcm2835.c
@@ -1079,17 +1079,10 @@
 	return 0;
 }
 
-static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
-				    struct spi_device *spi,
-				    struct spi_transfer *tfr)
+static unsigned long bcm2835_get_clkdiv(struct bcm2835_spi *bs, u32 spi_hz,
+					u32 *effective_speed_hz)
 {
-	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
-	unsigned long spi_hz, cdiv;
-	unsigned long hz_per_byte, byte_limit;
-	u32 cs = bs->prepare_cs[spi->chip_select];
-
-	/* set clock */
-	spi_hz = tfr->speed_hz;
+	unsigned long cdiv;
 
 	if (spi_hz >= bs->clk_hz / 2) {
 		cdiv = 2; /* clk_hz/2 is the fastest we can go */
@@ -1103,7 +1096,25 @@
 	} else {
 		cdiv = 0; /* 0 is the slowest we can go */
 	}
-	tfr->effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
+
+	*effective_speed_hz = cdiv ? (bs->clk_hz / cdiv) : (bs->clk_hz / 65536);
+
+	return cdiv;
+}
+
+static int bcm2835_spi_transfer_one(struct spi_controller *ctlr,
+				    struct spi_device *spi,
+				    struct spi_transfer *tfr)
+{
+	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+	unsigned long spi_hz, cdiv;
+	unsigned long hz_per_byte, byte_limit;
+	u32 cs = bs->prepare_cs[spi->chip_select];
+
+	/* set clock */
+	spi_hz = tfr->speed_hz;
+
+	cdiv = bcm2835_get_clkdiv(bs, spi_hz, &tfr->effective_speed_hz);
 	bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
 
 	/* handle all the 3-wire mode */
@@ -1283,6 +1294,68 @@
 	return 0;
 }
 
+#ifdef CONFIG_SPI_BCM2835_OOB
+
+static int bcm2835_spi_prepare_oob_transfer(struct spi_controller *ctlr,
+					struct spi_oob_transfer *xfer)
+{
+	/*
+	 * The size of a transfer is limited by DLEN which is 16-bit
+	 * wide, and we don't want to scatter transfers in out-of-band
+	 * mode, so cap the frame size accordingly.
+	 */
+	if (xfer->setup.frame_len > 65532)
+		return -EINVAL;
+
+	return 0;
+}
+
+static void bcm2835_spi_start_oob_transfer(struct spi_controller *ctlr,
+					struct spi_oob_transfer *xfer)
+{
+	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+	struct spi_device *spi = xfer->spi;
+	u32 cs = bs->prepare_cs[spi->chip_select], effective_speed_hz;
+	unsigned long cdiv;
+
+	/* See bcm2835_spi_prepare_message(). */
+	bcm2835_wr(bs, BCM2835_SPI_CS, cs);
+
+	cdiv = bcm2835_get_clkdiv(bs, xfer->setup.speed_hz, &effective_speed_hz);
+	xfer->effective_speed_hz = effective_speed_hz;
+	bcm2835_wr(bs, BCM2835_SPI_CLK, cdiv);
+	bcm2835_wr(bs, BCM2835_SPI_DLEN, xfer->setup.frame_len);
+
+	if (spi->mode & SPI_3WIRE)
+		cs |= BCM2835_SPI_CS_REN;
+	bcm2835_wr(bs, BCM2835_SPI_CS,
+		   cs | BCM2835_SPI_CS_TA | BCM2835_SPI_CS_DMAEN);
+}
+
+static void bcm2835_spi_pulse_oob_transfer(struct spi_controller *ctlr,
+					struct spi_oob_transfer *xfer)
+{
+	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+	/* Reload DLEN for the next pulse. */
+	bcm2835_wr(bs, BCM2835_SPI_DLEN, xfer->setup.frame_len);
+}
+
+static void bcm2835_spi_terminate_oob_transfer(struct spi_controller *ctlr,
+					struct spi_oob_transfer *xfer)
+{
+	struct bcm2835_spi *bs = spi_controller_get_devdata(ctlr);
+
+	bcm2835_spi_reset_hw(bs);
+}
+
+#else
+#define bcm2835_spi_prepare_oob_transfer	NULL
+#define bcm2835_spi_start_oob_transfer		NULL
+#define bcm2835_spi_pulse_oob_transfer		NULL
+#define bcm2835_spi_terminate_oob_transfer	NULL
+#endif
+
 static int bcm2835_spi_probe(struct platform_device *pdev)
 {
 	struct spi_controller *ctlr;
@@ -1304,6 +1377,10 @@
 	ctlr->transfer_one = bcm2835_spi_transfer_one;
 	ctlr->handle_err = bcm2835_spi_handle_err;
 	ctlr->prepare_message = bcm2835_spi_prepare_message;
+	ctlr->prepare_oob_transfer = bcm2835_spi_prepare_oob_transfer;
+	ctlr->start_oob_transfer = bcm2835_spi_start_oob_transfer;
+	ctlr->pulse_oob_transfer = bcm2835_spi_pulse_oob_transfer;
+	ctlr->terminate_oob_transfer = bcm2835_spi_terminate_oob_transfer;
 	ctlr->dev.of_node = pdev->dev.of_node;
 
 	bs = spi_controller_get_devdata(ctlr);
diff --git a/kernel/drivers/spi/spi.c b/kernel/drivers/spi/spi.c
index b1a638d..206e245 100644
--- a/kernel/drivers/spi/spi.c
+++ b/kernel/drivers/spi/spi.c
@@ -2729,6 +2729,9 @@
 	spin_lock_init(&ctlr->bus_lock_spinlock);
 	mutex_init(&ctlr->bus_lock_mutex);
 	mutex_init(&ctlr->io_mutex);
+#ifdef CONFIG_SPI_OOB
+	sema_init(&ctlr->bus_oob_lock_sem, 1);
+#endif
 	ctlr->bus_lock_flag = 0;
 	init_completion(&ctlr->xfer_completion);
 	if (!ctlr->max_dma_len)
@@ -3804,6 +3807,22 @@
  * inline functions.
  */
 
+static void get_spi_bus(struct spi_controller *ctlr)
+{
+	mutex_lock(&ctlr->bus_lock_mutex);
+#ifdef CONFIG_SPI_OOB
+	down(&ctlr->bus_oob_lock_sem);
+#endif
+}
+
+static void put_spi_bus(struct spi_controller *ctlr)
+{
+#ifdef CONFIG_SPI_OOB
+	up(&ctlr->bus_oob_lock_sem);
+#endif
+	mutex_unlock(&ctlr->bus_lock_mutex);
+}
+
 static void spi_complete(void *arg)
 {
 	complete(arg);
@@ -3888,9 +3907,9 @@
 {
 	int ret;
 
-	mutex_lock(&spi->controller->bus_lock_mutex);
+	get_spi_bus(spi->controller);
 	ret = __spi_sync(spi, message);
-	mutex_unlock(&spi->controller->bus_lock_mutex);
+	put_spi_bus(spi->controller);
 
 	return ret;
 }
@@ -3937,7 +3956,7 @@
 {
 	unsigned long flags;
 
-	mutex_lock(&ctlr->bus_lock_mutex);
+	get_spi_bus(ctlr);
 
 	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
 	ctlr->bus_lock_flag = 1;
@@ -3966,7 +3985,7 @@
 {
 	ctlr->bus_lock_flag = 0;
 
-	mutex_unlock(&ctlr->bus_lock_mutex);
+	put_spi_bus(ctlr);
 
 	return 0;
 }
@@ -4051,6 +4070,274 @@
 }
 EXPORT_SYMBOL_GPL(spi_write_then_read);
 
+#ifdef CONFIG_SPI_OOB
+
+static int bus_lock_oob(struct spi_controller *ctlr)
+{
+	unsigned long flags;
+	int ret = -EBUSY;
+
+	mutex_lock(&ctlr->bus_lock_mutex);
+
+	spin_lock_irqsave(&ctlr->bus_lock_spinlock, flags);
+
+	if (!ctlr->bus_lock_flag && !down_trylock(&ctlr->bus_oob_lock_sem)) {
+		ctlr->bus_lock_flag = 1;
+		ret = 0;
+	}
+
+	spin_unlock_irqrestore(&ctlr->bus_lock_spinlock, flags);
+
+	mutex_unlock(&ctlr->bus_lock_mutex);
+
+	return ret;
+}
+
+static int bus_unlock_oob(struct spi_controller *ctlr)
+{
+	ctlr->bus_lock_flag = 0;
+	up(&ctlr->bus_oob_lock_sem);
+
+	return 0;
+}
+
+static int prepare_oob_dma(struct spi_controller *ctlr,
+			struct spi_oob_transfer *xfer)
+{
+	struct dma_async_tx_descriptor *desc;
+	size_t len = xfer->setup.frame_len;
+	dma_cookie_t cookie;
+	dma_addr_t addr;
+	int ret;
+
+	/* TX to second half of I/O buffer. */
+	addr = xfer->dma_addr + xfer->aligned_frame_len;
+	desc = dmaengine_prep_slave_single(ctlr->dma_tx, addr, len,
+					DMA_MEM_TO_DEV,
+					DMA_OOB_INTERRUPT|DMA_OOB_PULSE);
+	if (!desc)
+		return -EIO;
+
+	xfer->txd = desc;
+	cookie = dmaengine_submit(desc);
+	ret = dma_submit_error(cookie);
+	if (ret)
+		return ret;
+
+	dma_async_issue_pending(ctlr->dma_tx);
+
+	/* RX to first half of I/O buffer. */
+	addr = xfer->dma_addr;
+	desc = dmaengine_prep_slave_single(ctlr->dma_rx, addr, len,
+					DMA_DEV_TO_MEM,
+					DMA_OOB_INTERRUPT|DMA_OOB_PULSE);
+	if (!desc) {
+		ret = -EIO;
+		goto fail_rx;
+	}
+
+	desc->callback = xfer->setup.xfer_done;
+	desc->callback_param = xfer;
+
+	xfer->rxd = desc;
+	cookie = dmaengine_submit(desc);
+	ret = dma_submit_error(cookie);
+	if (ret)
+		goto fail_rx;
+
+	dma_async_issue_pending(ctlr->dma_rx);
+
+	return 0;
+
+fail_rx:
+	dmaengine_terminate_sync(ctlr->dma_tx);
+
+	return ret;
+}
+
+static void unprepare_oob_dma(struct spi_controller *ctlr)
+{
+	dmaengine_terminate_sync(ctlr->dma_rx);
+	dmaengine_terminate_sync(ctlr->dma_tx);
+}
+
+/*
+ * A simpler version of __spi_validate() for oob transfers.
+ */
+static int validate_oob_xfer(struct spi_device *spi,
+			struct spi_oob_transfer *xfer)
+{
+	struct spi_controller *ctlr = spi->controller;
+	struct spi_oob_setup *p = &xfer->setup;
+	int w_size;
+
+	if (p->frame_len == 0)
+		return -EINVAL;
+
+	if (!p->bits_per_word)
+		p->bits_per_word = spi->bits_per_word;
+
+	if (!p->speed_hz)
+		p->speed_hz = spi->max_speed_hz;
+
+	if (ctlr->max_speed_hz && p->speed_hz > ctlr->max_speed_hz)
+		p->speed_hz = ctlr->max_speed_hz;
+
+	if (__spi_validate_bits_per_word(ctlr, p->bits_per_word))
+		return -EINVAL;
+
+	if (p->bits_per_word <= 8)
+		w_size = 1;
+	else if (p->bits_per_word <= 16)
+		w_size = 2;
+	else
+		w_size = 4;
+
+	if (p->frame_len % w_size)
+		return -EINVAL;
+
+	if (p->speed_hz && ctlr->min_speed_hz &&
+		p->speed_hz < ctlr->min_speed_hz)
+		return -EINVAL;
+
+	return 0;
+}
+
+int spi_prepare_oob_transfer(struct spi_device *spi,
+			struct spi_oob_transfer *xfer)
+{
+	struct spi_controller *ctlr;
+	dma_addr_t dma_addr;
+	size_t alen, iolen;
+	void *iobuf;
+	int ret;
+
+	/* Controller must support oob transactions. */
+	ctlr = spi->controller;
+	if (!ctlr->prepare_oob_transfer)
+		return -ENOTSUPP;
+
+	/* Out-of-band transfers require DMA support. */
+	if (!ctlr->can_dma)
+		return -ENODEV;
+
+	ret = validate_oob_xfer(spi, xfer);
+	if (ret)
+		return ret;
+
+	alen = L1_CACHE_ALIGN(xfer->setup.frame_len);
+	/*
+	 * Allocate a single coherent I/O buffer which is twice as
+	 * large as the user specified transfer length, TX data goes
+	 * to the upper half, RX data to the lower half.
+	 */
+	iolen = alen * 2;
+	iobuf = dma_alloc_coherent(ctlr->dev.parent, iolen,
+				&dma_addr, GFP_KERNEL);
+	if (iobuf == NULL)
+		return -ENOMEM;
+
+	xfer->spi = spi;
+	xfer->dma_addr = dma_addr;
+	xfer->io_buffer = iobuf;
+	xfer->aligned_frame_len = alen;
+	xfer->effective_speed_hz = 0;
+
+	ret = prepare_oob_dma(ctlr, xfer);
+	if (ret)
+		goto fail_prep_dma;
+
+	ret = bus_lock_oob(ctlr);
+	if (ret)
+		goto fail_bus_lock;
+
+	ret = ctlr->prepare_oob_transfer(ctlr, xfer);
+	if (ret)
+		goto fail_prep_xfer;
+
+	return 0;
+
+fail_prep_xfer:
+	bus_unlock_oob(ctlr);
+fail_bus_lock:
+	unprepare_oob_dma(ctlr);
+fail_prep_dma:
+	dma_free_coherent(ctlr->dev.parent, iolen, iobuf, dma_addr);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(spi_prepare_oob_transfer);
+
+void spi_start_oob_transfer(struct spi_oob_transfer *xfer)
+{
+	struct spi_device *spi = xfer->spi;
+	struct spi_controller *ctlr = spi->controller;
+
+	ctlr->start_oob_transfer(ctlr, xfer);
+}
+EXPORT_SYMBOL_GPL(spi_start_oob_transfer);
+
+int spi_pulse_oob_transfer(struct spi_oob_transfer *xfer) /* oob stage */
+{
+	struct spi_device *spi = xfer->spi;
+	struct spi_controller *ctlr = spi->controller;
+	int ret;
+
+	if (ctlr->pulse_oob_transfer)
+		ctlr->pulse_oob_transfer(ctlr, xfer);
+
+	ret = dma_pulse_oob(ctlr->dma_rx);
+	if (likely(!ret))
+		ret = dma_pulse_oob(ctlr->dma_tx);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(spi_pulse_oob_transfer);
+
+void spi_terminate_oob_transfer(struct spi_oob_transfer *xfer)
+{
+	struct spi_device *spi = xfer->spi;
+	struct spi_controller *ctlr = spi->controller;
+
+	if (ctlr->terminate_oob_transfer)
+		ctlr->terminate_oob_transfer(ctlr, xfer);
+
+	unprepare_oob_dma(ctlr);
+	bus_unlock_oob(ctlr);
+	dma_free_coherent(ctlr->dev.parent, xfer->aligned_frame_len * 2,
+			xfer->io_buffer, xfer->dma_addr);
+}
+EXPORT_SYMBOL_GPL(spi_terminate_oob_transfer);
+
+int spi_mmap_oob_transfer(struct vm_area_struct *vma,
+			struct spi_oob_transfer *xfer)
+{
+	struct spi_device *spi = xfer->spi;
+	struct spi_controller *ctlr = spi->controller;
+	size_t len;
+	int ret;
+
+	/*
+	 * We may have an IOMMU, rely on dma_mmap_coherent() for
+	 * dealing with the nitty-gritty details of mapping a coherent
+	 * buffer.
+	 */
+	len = vma->vm_end - vma->vm_start;
+	if (spi_get_oob_iolen(xfer) <= len)
+		ret = dma_mmap_coherent(ctlr->dev.parent,
+					vma,
+					xfer->io_buffer,
+					xfer->dma_addr,
+					len);
+	else
+		ret = -EINVAL;
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(spi_mmap_oob_transfer);
+
+#endif	/* SPI_OOB */
+
 /*-------------------------------------------------------------------------*/
 
 #if IS_ENABLED(CONFIG_OF)
diff --git a/kernel/drivers/spmi/spmi-pmic-arb.c b/kernel/drivers/spmi/spmi-pmic-arb.c
index e6de2ae..1066934 100644
--- a/kernel/drivers/spmi/spmi-pmic-arb.c
+++ b/kernel/drivers/spmi/spmi-pmic-arb.c
@@ -145,7 +145,7 @@
 	void __iomem		*cnfg;
 	void __iomem		*core;
 	resource_size_t		core_size;
-	raw_spinlock_t		lock;
+	hard_spinlock_t		lock;
 	u8			channel;
 	int			irq;
 	u8			ee;
@@ -684,7 +684,7 @@
 	.irq_set_type	= qpnpint_irq_set_type,
 	.irq_set_wake	= qpnpint_irq_set_wake,
 	.irq_get_irqchip_state	= qpnpint_get_irqchip_state,
-	.flags		= IRQCHIP_MASK_ON_SUSPEND,
+	.flags		= IRQCHIP_MASK_ON_SUSPEND|IRQCHIP_PIPELINE_SAFE,
 };
 
 static int qpnpint_irq_domain_translate(struct irq_domain *d,
diff --git a/kernel/drivers/tty/serial/8250/8250_core.c b/kernel/drivers/tty/serial/8250/8250_core.c
index 00f6dc7..da7ba88 100644
--- a/kernel/drivers/tty/serial/8250/8250_core.c
+++ b/kernel/drivers/tty/serial/8250/8250_core.c
@@ -675,6 +675,48 @@
 	return -ENODEV;
 }
 
+#ifdef CONFIG_RAW_PRINTK
+
+static void raw_write_char(struct uart_8250_port *up, int c)
+{
+	unsigned int status, tmout = 10000;
+
+	for (;;) {
+		status = serial_in(up, UART_LSR);
+		up->lsr_saved_flags |= status & LSR_SAVE_FLAGS;
+		if ((status & UART_LSR_THRE) == UART_LSR_THRE)
+			break;
+		if (--tmout == 0)
+			break;
+		cpu_relax();
+	}
+	serial_port_out(&up->port, UART_TX, c);
+}
+
+static void univ8250_console_write_raw(struct console *co, const char *s,
+				       unsigned int count)
+{
+	struct uart_8250_port *up = &serial8250_ports[co->index];
+	unsigned int ier;
+
+        ier = serial_in(up, UART_IER);
+
+        if (up->capabilities & UART_CAP_UUE)
+                serial_out(up, UART_IER, UART_IER_UUE);
+        else
+                serial_out(up, UART_IER, 0);
+
+	while (count-- > 0) {
+		if (*s == '\n')
+			raw_write_char(up, '\r');
+		raw_write_char(up, *s++);
+	}
+
+        serial_out(up, UART_IER, ier);
+}
+
+#endif
+
 static struct console univ8250_console = {
 	.name		= "ttyS",
 	.write		= univ8250_console_write,
@@ -682,6 +724,9 @@
 	.setup		= univ8250_console_setup,
 	.exit		= univ8250_console_exit,
 	.match		= univ8250_console_match,
+#ifdef CONFIG_RAW_PRINTK
+	.write_raw	= univ8250_console_write_raw,
+#endif
 	.flags		= CON_PRINTBUFFER | CON_ANYTIME,
 	.index		= -1,
 	.data		= &serial8250_reg,
diff --git a/kernel/drivers/tty/serial/amba-pl011.c b/kernel/drivers/tty/serial/amba-pl011.c
index 9900ee3..6ea53e1 100644
--- a/kernel/drivers/tty/serial/amba-pl011.c
+++ b/kernel/drivers/tty/serial/amba-pl011.c
@@ -1887,6 +1887,8 @@
 
 	pl011_disable_uart(uap);
 
+	if (IS_ENABLED(CONFIG_RAW_PRINTK))
+		clk_disable(uap->clk);
 	/*
 	 * Shut down the clock producer
 	 */
@@ -2194,6 +2196,37 @@
 	pl011_write(ch, uap, REG_DR);
 }
 
+#ifdef CONFIG_RAW_PRINTK
+
+static void
+pl011_console_write_raw(struct console *co, const char *s, unsigned int count)
+{
+	struct uart_amba_port *uap = amba_ports[co->index];
+	unsigned int old_cr = 0, new_cr;
+
+	if (!uap->vendor->always_enabled) {
+		old_cr = pl011_read(uap, REG_CR);
+		new_cr = old_cr & ~UART011_CR_CTSEN;
+		new_cr |= UART01x_CR_UARTEN | UART011_CR_TXE;
+		pl011_write(new_cr, uap, REG_CR);
+	}
+
+	while (count-- > 0) {
+		if (*s == '\n')
+			pl011_console_putchar(&uap->port, '\r');
+		pl011_console_putchar(&uap->port, *s++);
+	}
+
+	while ((pl011_read(uap, REG_FR) ^ uap->vendor->inv_fr)
+		& uap->vendor->fr_busy)
+		cpu_relax();
+
+	if (!uap->vendor->always_enabled)
+		pl011_write(old_cr, uap, REG_CR);
+}
+
+#endif  /* !CONFIG_RAW_PRINTK */
+
 static void
 pl011_console_write(struct console *co, const char *s, unsigned int count)
 {
@@ -2323,6 +2356,9 @@
 			pl011_console_get_options(uap, &baud, &parity, &bits);
 	}
 
+	if (IS_ENABLED(CONFIG_RAW_PRINTK))
+		clk_enable(uap->clk);
+
 	return uart_set_options(&uap->port, co, baud, parity, bits, flow);
 }
 
@@ -2393,6 +2429,9 @@
 	.device		= uart_console_device,
 	.setup		= pl011_console_setup,
 	.match		= pl011_console_match,
+#ifdef CONFIG_RAW_PRINTK
+	.write_raw	= pl011_console_write_raw,
+#endif
 	.flags		= CON_PRINTBUFFER | CON_ANYTIME,
 	.index		= -1,
 	.data		= &amba_reg,
diff --git a/kernel/drivers/tty/serial/imx.c b/kernel/drivers/tty/serial/imx.c
index 164597e..fa7d220 100644
--- a/kernel/drivers/tty/serial/imx.c
+++ b/kernel/drivers/tty/serial/imx.c
@@ -1998,24 +1998,11 @@
 	imx_uart_writel(sport, ch, URTX0);
 }
 
-/*
- * Interrupts are disabled on entering
- */
 static void
-imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+__imx_uart_console_write(struct imx_port *sport, const char *s, unsigned int count)
 {
-	struct imx_port *sport = imx_uart_ports[co->index];
 	struct imx_port_ucrs old_ucr;
 	unsigned int ucr1;
-	unsigned long flags = 0;
-	int locked = 1;
-
-	if (sport->port.sysrq)
-		locked = 0;
-	else if (oops_in_progress)
-		locked = spin_trylock_irqsave(&sport->port.lock, flags);
-	else
-		spin_lock_irqsave(&sport->port.lock, flags);
 
 	/*
 	 *	First, save UCR1/2/3 and then disable interrupts
@@ -2041,10 +2028,40 @@
 	while (!(imx_uart_readl(sport, USR2) & USR2_TXDC));
 
 	imx_uart_ucrs_restore(sport, &old_ucr);
+}
+
+/*
+ * Interrupts are disabled on entering
+ */
+static void
+imx_uart_console_write(struct console *co, const char *s, unsigned int count)
+{
+	struct imx_port *sport = imx_uart_ports[co->index];
+	unsigned long flags;
+	int locked = 1;
+
+	if (sport->port.sysrq)
+		locked = 0;
+	else if (oops_in_progress)
+		locked = spin_trylock_irqsave(&sport->port.lock, flags);
+	else
+		spin_lock_irqsave(&sport->port.lock, flags);
+
+	__imx_uart_console_write(sport, s, count);
 
 	if (locked)
 		spin_unlock_irqrestore(&sport->port.lock, flags);
 }
+
+#ifdef CONFIG_RAW_PRINTK
+static void
+imx_uart_console_write_raw(struct console *co, const char *s, unsigned int count)
+{
+	struct imx_port *sport = imx_uart_ports[co->index];
+
+	return __imx_uart_console_write(sport, s, count);
+}
+#endif
 
 /*
  * If the port was already initialised (eg, by a boot loader),
@@ -2161,6 +2178,9 @@
 static struct console imx_uart_console = {
 	.name		= DEV_NAME,
 	.write		= imx_uart_console_write,
+#ifdef CONFIG_RAW_PRINTK
+	.write_raw	= imx_uart_console_write_raw,
+#endif
 	.device		= uart_console_device,
 	.setup		= imx_uart_console_setup,
 	.flags		= CON_PRINTBUFFER,
diff --git a/kernel/drivers/tty/serial/samsung_tty.c b/kernel/drivers/tty/serial/samsung_tty.c
index 263c332..a36d1f4 100644
--- a/kernel/drivers/tty/serial/samsung_tty.c
+++ b/kernel/drivers/tty/serial/samsung_tty.c
@@ -2367,6 +2367,10 @@
 	.flags		= CON_PRINTBUFFER,
 	.index		= -1,
 	.write		= s3c24xx_serial_console_write,
+#ifdef CONFIG_RAW_PRINTK
+	/* The common write handler can run from atomic context. */
+	.write_raw	= s3c24xx_serial_console_write,
+#endif
 	.setup		= s3c24xx_serial_console_setup,
 	.data		= &s3c24xx_uart_drv,
 };
diff --git a/kernel/drivers/tty/serial/st-asc.c b/kernel/drivers/tty/serial/st-asc.c
index 97d36f8..1239fd5 100644
--- a/kernel/drivers/tty/serial/st-asc.c
+++ b/kernel/drivers/tty/serial/st-asc.c
@@ -908,6 +908,29 @@
 		spin_unlock_irqrestore(&port->lock, flags);
 }
 
+#ifdef CONFIG_RAW_PRINTK
+
+static void asc_console_write_raw(struct console *co,
+				  const char *s, unsigned int count)
+{
+	struct uart_port *port = &asc_ports[co->index].port;
+	unsigned long timeout = 1000000;
+	u32 intenable;
+
+	intenable = asc_in(port, ASC_INTEN);
+	asc_out(port, ASC_INTEN, 0);
+	(void)asc_in(port, ASC_INTEN);	/* Defeat bus write posting */
+
+	uart_console_write(port, s, count, asc_console_putchar);
+
+	while (timeout-- && !asc_txfifo_is_empty(port))
+		cpu_relax();	/* wait shorter */
+
+	asc_out(port, ASC_INTEN, intenable);
+}
+
+#endif
+
 static int asc_console_setup(struct console *co, char *options)
 {
 	struct asc_port *ascport;
@@ -940,6 +963,9 @@
 	.name		= ASC_SERIAL_NAME,
 	.device		= uart_console_device,
 	.write		= asc_console_write,
+#ifdef CONFIG_RAW_PRINTK
+	.write_raw	= asc_console_write_raw,
+#endif
 	.setup		= asc_console_setup,
 	.flags		= CON_PRINTBUFFER,
 	.index		= -1,
diff --git a/kernel/drivers/xenomai/Kconfig b/kernel/drivers/xenomai/Kconfig
new file mode 120000
index 0000000..1481352
--- /dev/null
+++ b/kernel/drivers/xenomai/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/Makefile b/kernel/drivers/xenomai/Makefile
new file mode 120000
index 0000000..2865738
--- /dev/null
+++ b/kernel/drivers/xenomai/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/Kconfig b/kernel/drivers/xenomai/analogy/Kconfig
new file mode 120000
index 0000000..178cf30
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/Makefile b/kernel/drivers/xenomai/analogy/Makefile
new file mode 120000
index 0000000..8eb912c
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/buffer.c b/kernel/drivers/xenomai/analogy/buffer.c
new file mode 120000
index 0000000..d9a8285
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/buffer.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/command.c b/kernel/drivers/xenomai/analogy/command.c
new file mode 120000
index 0000000..173bc3e
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/command.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/device.c b/kernel/drivers/xenomai/analogy/device.c
new file mode 120000
index 0000000..f98ec1d
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/device.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/driver.c b/kernel/drivers/xenomai/analogy/driver.c
new file mode 120000
index 0000000..176187a
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/driver.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/driver_facilities.c b/kernel/drivers/xenomai/analogy/driver_facilities.c
new file mode 120000
index 0000000..f10c5e4
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/driver_facilities.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/instruction.c b/kernel/drivers/xenomai/analogy/instruction.c
new file mode 120000
index 0000000..d560444
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/instruction.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/intel/8255.c b/kernel/drivers/xenomai/analogy/intel/8255.c
new file mode 120000
index 0000000..f05950a
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/intel/8255.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/intel/8255.h b/kernel/drivers/xenomai/analogy/intel/8255.h
new file mode 120000
index 0000000..c03d400
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/intel/8255.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/intel/Kconfig b/kernel/drivers/xenomai/analogy/intel/Kconfig
new file mode 120000
index 0000000..c475508
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/intel/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/intel/Makefile b/kernel/drivers/xenomai/analogy/intel/Makefile
new file mode 120000
index 0000000..3cd46a8
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/intel/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/intel/parport.c b/kernel/drivers/xenomai/analogy/intel/parport.c
new file mode 120000
index 0000000..31342d5
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/intel/parport.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/Kconfig b/kernel/drivers/xenomai/analogy/national_instruments/Kconfig
new file mode 120000
index 0000000..dd8b00d
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/Makefile b/kernel/drivers/xenomai/analogy/national_instruments/Makefile
new file mode 120000
index 0000000..f474ec0
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/mio_common.c b/kernel/drivers/xenomai/analogy/national_instruments/mio_common.c
new file mode 120000
index 0000000..95dd21e
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/mio_common.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/mite.c b/kernel/drivers/xenomai/analogy/national_instruments/mite.c
new file mode 120000
index 0000000..f4629c7
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/mite.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/mite.h b/kernel/drivers/xenomai/analogy/national_instruments/mite.h
new file mode 120000
index 0000000..23e1b95
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/mite.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_660x.c b/kernel/drivers/xenomai/analogy/national_instruments/ni_660x.c
new file mode 120000
index 0000000..70875ba
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_660x.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_670x.c b/kernel/drivers/xenomai/analogy/national_instruments/ni_670x.c
new file mode 120000
index 0000000..40c9329
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_670x.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_mio.h b/kernel/drivers/xenomai/analogy/national_instruments/ni_mio.h
new file mode 120000
index 0000000..f4d092b
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_mio.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_stc.h b/kernel/drivers/xenomai/analogy/national_instruments/ni_stc.h
new file mode 120000
index 0000000..45ba968
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_stc.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/ni_tio.h b/kernel/drivers/xenomai/analogy/national_instruments/ni_tio.h
new file mode 120000
index 0000000..68ddf7a
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/ni_tio.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/pcimio.c b/kernel/drivers/xenomai/analogy/national_instruments/pcimio.c
new file mode 120000
index 0000000..627dbf7
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/pcimio.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/national_instruments/tio_common.c b/kernel/drivers/xenomai/analogy/national_instruments/tio_common.c
new file mode 120000
index 0000000..0510312
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/national_instruments/tio_common.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/proc.h b/kernel/drivers/xenomai/analogy/proc.h
new file mode 120000
index 0000000..88c837d
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/proc.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/rtdm_helpers.c b/kernel/drivers/xenomai/analogy/rtdm_helpers.c
new file mode 120000
index 0000000..f78bc88
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/rtdm_helpers.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/rtdm_interface.c b/kernel/drivers/xenomai/analogy/rtdm_interface.c
new file mode 120000
index 0000000..7bcf22b
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/rtdm_interface.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/sensoray/Kconfig b/kernel/drivers/xenomai/analogy/sensoray/Kconfig
new file mode 120000
index 0000000..2131de1
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/sensoray/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/sensoray/Makefile b/kernel/drivers/xenomai/analogy/sensoray/Makefile
new file mode 120000
index 0000000..a222ac1
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/sensoray/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/sensoray/s526.c b/kernel/drivers/xenomai/analogy/sensoray/s526.c
new file mode 120000
index 0000000..71deae2
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/sensoray/s526.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/subdevice.c b/kernel/drivers/xenomai/analogy/subdevice.c
new file mode 120000
index 0000000..8b7fb41
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/subdevice.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/testing/Kconfig b/kernel/drivers/xenomai/analogy/testing/Kconfig
new file mode 120000
index 0000000..0e8da90
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/testing/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/testing/Makefile b/kernel/drivers/xenomai/analogy/testing/Makefile
new file mode 120000
index 0000000..b6acebc
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/testing/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/testing/fake.c b/kernel/drivers/xenomai/analogy/testing/fake.c
new file mode 120000
index 0000000..0527926
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/testing/fake.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/testing/loop.c b/kernel/drivers/xenomai/analogy/testing/loop.c
new file mode 120000
index 0000000..c25e863
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/testing/loop.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/analogy/transfer.c b/kernel/drivers/xenomai/analogy/transfer.c
new file mode 120000
index 0000000..55096e0
--- /dev/null
+++ b/kernel/drivers/xenomai/analogy/transfer.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/autotune/Kconfig b/kernel/drivers/xenomai/autotune/Kconfig
new file mode 120000
index 0000000..f45b3de
--- /dev/null
+++ b/kernel/drivers/xenomai/autotune/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/autotune/Makefile b/kernel/drivers/xenomai/autotune/Makefile
new file mode 120000
index 0000000..c543ca7
--- /dev/null
+++ b/kernel/drivers/xenomai/autotune/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/autotune/autotune.c b/kernel/drivers/xenomai/autotune/autotune.c
new file mode 120000
index 0000000..49f359e
--- /dev/null
+++ b/kernel/drivers/xenomai/autotune/autotune.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/Kconfig b/kernel/drivers/xenomai/can/Kconfig
new file mode 120000
index 0000000..40c089f
--- /dev/null
+++ b/kernel/drivers/xenomai/can/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/Makefile b/kernel/drivers/xenomai/can/Makefile
new file mode 120000
index 0000000..7159870
--- /dev/null
+++ b/kernel/drivers/xenomai/can/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/mscan/Kconfig b/kernel/drivers/xenomai/can/mscan/Kconfig
new file mode 120000
index 0000000..d11e2d5
--- /dev/null
+++ b/kernel/drivers/xenomai/can/mscan/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/mscan/Makefile b/kernel/drivers/xenomai/can/mscan/Makefile
new file mode 120000
index 0000000..9ecd45e
--- /dev/null
+++ b/kernel/drivers/xenomai/can/mscan/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan.c b/kernel/drivers/xenomai/can/mscan/rtcan_mscan.c
new file mode 120000
index 0000000..faa04b2
--- /dev/null
+++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan.h b/kernel/drivers/xenomai/can/mscan/rtcan_mscan.h
new file mode 120000
index 0000000..9dbbc0f
--- /dev/null
+++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c
new file mode 120000
index 0000000..25b90a6
--- /dev/null
+++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_mpc5xxx.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan_proc.c b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_proc.c
new file mode 120000
index 0000000..3f0e38a
--- /dev/null
+++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_proc.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/mscan/rtcan_mscan_regs.h b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_regs.h
new file mode 120000
index 0000000..84efea4
--- /dev/null
+++ b/kernel/drivers/xenomai/can/mscan/rtcan_mscan_regs.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/peak_canfd/Kconfig b/kernel/drivers/xenomai/can/peak_canfd/Kconfig
new file mode 120000
index 0000000..15631af
--- /dev/null
+++ b/kernel/drivers/xenomai/can/peak_canfd/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/peak_canfd/Makefile b/kernel/drivers/xenomai/can/peak_canfd/Makefile
new file mode 120000
index 0000000..9f298b5
--- /dev/null
+++ b/kernel/drivers/xenomai/can/peak_canfd/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c
new file mode 120000
index 0000000..c19dcda
--- /dev/null
+++ b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h
new file mode 120000
index 0000000..ad581ca
--- /dev/null
+++ b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_canfd_user.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c
new file mode 120000
index 0000000..e390e68
--- /dev/null
+++ b/kernel/drivers/xenomai/can/peak_canfd/rtcan_peak_pciefd.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_dev.c b/kernel/drivers/xenomai/can/rtcan_dev.c
new file mode 120000
index 0000000..e89026d
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_dev.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_dev.h b/kernel/drivers/xenomai/can/rtcan_dev.h
new file mode 120000
index 0000000..de5bf5a
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_dev.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_flexcan.c b/kernel/drivers/xenomai/can/rtcan_flexcan.c
new file mode 120000
index 0000000..3135675
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_flexcan.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_internal.h b/kernel/drivers/xenomai/can/rtcan_internal.h
new file mode 120000
index 0000000..a059d53
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_internal.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_list.h b/kernel/drivers/xenomai/can/rtcan_list.h
new file mode 120000
index 0000000..4390be4
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_list.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_module.c b/kernel/drivers/xenomai/can/rtcan_module.c
new file mode 120000
index 0000000..dc519dd
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_module.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_raw.c b/kernel/drivers/xenomai/can/rtcan_raw.c
new file mode 120000
index 0000000..83098b5
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_raw.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_raw.h b/kernel/drivers/xenomai/can/rtcan_raw.h
new file mode 120000
index 0000000..8afed34
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_raw.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_raw_dev.c b/kernel/drivers/xenomai/can/rtcan_raw_dev.c
new file mode 120000
index 0000000..dcbcc40
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_raw_dev.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_raw_filter.c b/kernel/drivers/xenomai/can/rtcan_raw_filter.c
new file mode 120000
index 0000000..1903db0
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_raw_filter.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_socket.c b/kernel/drivers/xenomai/can/rtcan_socket.c
new file mode 120000
index 0000000..066d572
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_socket.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_socket.h b/kernel/drivers/xenomai/can/rtcan_socket.h
new file mode 120000
index 0000000..a0bfef8
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_socket.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_version.h b/kernel/drivers/xenomai/can/rtcan_version.h
new file mode 120000
index 0000000..fb151ac
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_version.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/rtcan_virt.c b/kernel/drivers/xenomai/can/rtcan_virt.c
new file mode 120000
index 0000000..b8622eb
--- /dev/null
+++ b/kernel/drivers/xenomai/can/rtcan_virt.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/Kconfig b/kernel/drivers/xenomai/can/sja1000/Kconfig
new file mode 120000
index 0000000..9a12ec1
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/Makefile b/kernel/drivers/xenomai/can/sja1000/Makefile
new file mode 120000
index 0000000..92a39c3
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_adv_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_adv_pci.c
new file mode 120000
index 0000000..deea61b
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_adv_pci.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_ems_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_ems_pci.c
new file mode 120000
index 0000000..cb5f886
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_ems_pci.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_esd_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_esd_pci.c
new file mode 120000
index 0000000..aa62ac0
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_esd_pci.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_isa.c b/kernel/drivers/xenomai/can/sja1000/rtcan_isa.c
new file mode 120000
index 0000000..0f086f3
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_isa.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c
new file mode 120000
index 0000000..a6ad991
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_ixxat_pci.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_mem.c b/kernel/drivers/xenomai/can/sja1000/rtcan_mem.c
new file mode 120000
index 0000000..a717566
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_mem.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_peak_dng.c b/kernel/drivers/xenomai/can/sja1000/rtcan_peak_dng.c
new file mode 120000
index 0000000..8c0cac7
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_peak_dng.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_peak_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_peak_pci.c
new file mode 120000
index 0000000..f39a12f
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_peak_pci.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_plx_pci.c b/kernel/drivers/xenomai/can/sja1000/rtcan_plx_pci.c
new file mode 120000
index 0000000..acdd065
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_plx_pci.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.c b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.c
new file mode 120000
index 0000000..955104a
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.h b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.h
new file mode 120000
index 0000000..23315ba
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c
new file mode 120000
index 0000000..974d2d0
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_proc.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h
new file mode 120000
index 0000000..8bd6676
--- /dev/null
+++ b/kernel/drivers/xenomai/can/sja1000/rtcan_sja1000_regs.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/Kconfig b/kernel/drivers/xenomai/gpio/Kconfig
new file mode 120000
index 0000000..6555e52
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/Makefile b/kernel/drivers/xenomai/gpio/Makefile
new file mode 120000
index 0000000..80ac680
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/gpio-bcm2835.c b/kernel/drivers/xenomai/gpio/gpio-bcm2835.c
new file mode 120000
index 0000000..d0f7323
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/gpio-bcm2835.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/gpio-cherryview.c b/kernel/drivers/xenomai/gpio/gpio-cherryview.c
new file mode 120000
index 0000000..014eadf
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/gpio-cherryview.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/gpio-core.c b/kernel/drivers/xenomai/gpio/gpio-core.c
new file mode 120000
index 0000000..2990f4e
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/gpio-core.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/gpio-mxc.c b/kernel/drivers/xenomai/gpio/gpio-mxc.c
new file mode 120000
index 0000000..14b366f
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/gpio-mxc.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/gpio-omap.c b/kernel/drivers/xenomai/gpio/gpio-omap.c
new file mode 120000
index 0000000..aed52b8
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/gpio-omap.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/gpio-sun8i-h3.c b/kernel/drivers/xenomai/gpio/gpio-sun8i-h3.c
new file mode 120000
index 0000000..a8558d1
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/gpio-sun8i-h3.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/gpio-xilinx.c b/kernel/drivers/xenomai/gpio/gpio-xilinx.c
new file mode 120000
index 0000000..8cb0b56
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/gpio-xilinx.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpio/gpio-zynq7000.c b/kernel/drivers/xenomai/gpio/gpio-zynq7000.c
new file mode 120000
index 0000000..268930c
--- /dev/null
+++ b/kernel/drivers/xenomai/gpio/gpio-zynq7000.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpiopwm/Kconfig b/kernel/drivers/xenomai/gpiopwm/Kconfig
new file mode 120000
index 0000000..cb8e52a
--- /dev/null
+++ b/kernel/drivers/xenomai/gpiopwm/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpiopwm/Makefile b/kernel/drivers/xenomai/gpiopwm/Makefile
new file mode 120000
index 0000000..298cfaa
--- /dev/null
+++ b/kernel/drivers/xenomai/gpiopwm/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/gpiopwm/gpiopwm.c b/kernel/drivers/xenomai/gpiopwm/gpiopwm.c
new file mode 120000
index 0000000..b2ccb0a
--- /dev/null
+++ b/kernel/drivers/xenomai/gpiopwm/gpiopwm.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/ipc/Kconfig b/kernel/drivers/xenomai/ipc/Kconfig
new file mode 120000
index 0000000..d014c12
--- /dev/null
+++ b/kernel/drivers/xenomai/ipc/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/ipc/Makefile b/kernel/drivers/xenomai/ipc/Makefile
new file mode 120000
index 0000000..a2a14a0
--- /dev/null
+++ b/kernel/drivers/xenomai/ipc/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/ipc/bufp.c b/kernel/drivers/xenomai/ipc/bufp.c
new file mode 120000
index 0000000..45476ee
--- /dev/null
+++ b/kernel/drivers/xenomai/ipc/bufp.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/ipc/iddp.c b/kernel/drivers/xenomai/ipc/iddp.c
new file mode 120000
index 0000000..0d81c84
--- /dev/null
+++ b/kernel/drivers/xenomai/ipc/iddp.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/ipc/internal.h b/kernel/drivers/xenomai/ipc/internal.h
new file mode 120000
index 0000000..386d5da
--- /dev/null
+++ b/kernel/drivers/xenomai/ipc/internal.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/ipc/rtipc.c b/kernel/drivers/xenomai/ipc/rtipc.c
new file mode 120000
index 0000000..3806453
--- /dev/null
+++ b/kernel/drivers/xenomai/ipc/rtipc.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/ipc/xddp.c b/kernel/drivers/xenomai/ipc/xddp.c
new file mode 120000
index 0000000..c727995
--- /dev/null
+++ b/kernel/drivers/xenomai/ipc/xddp.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/Kconfig b/kernel/drivers/xenomai/net/Kconfig
new file mode 120000
index 0000000..c97ece1
--- /dev/null
+++ b/kernel/drivers/xenomai/net/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/Makefile b/kernel/drivers/xenomai/net/Makefile
new file mode 120000
index 0000000..23918b1
--- /dev/null
+++ b/kernel/drivers/xenomai/net/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/addons/Kconfig b/kernel/drivers/xenomai/net/addons/Kconfig
new file mode 120000
index 0000000..6a3ebd6
--- /dev/null
+++ b/kernel/drivers/xenomai/net/addons/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/addons/Makefile b/kernel/drivers/xenomai/net/addons/Makefile
new file mode 120000
index 0000000..a094573
--- /dev/null
+++ b/kernel/drivers/xenomai/net/addons/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/addons/cap.c b/kernel/drivers/xenomai/net/addons/cap.c
new file mode 120000
index 0000000..21a6de5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/addons/cap.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/addons/proxy.c b/kernel/drivers/xenomai/net/addons/proxy.c
new file mode 120000
index 0000000..6c4011d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/addons/proxy.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/8139too.c b/kernel/drivers/xenomai/net/drivers/8139too.c
new file mode 120000
index 0000000..711169f
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/8139too.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/Kconfig b/kernel/drivers/xenomai/net/drivers/Kconfig
new file mode 120000
index 0000000..7ba614e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/Makefile b/kernel/drivers/xenomai/net/drivers/Makefile
new file mode 120000
index 0000000..b73d187
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/at91_ether.c b/kernel/drivers/xenomai/net/drivers/at91_ether.c
new file mode 120000
index 0000000..fe06f6f
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/at91_ether.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000/Makefile b/kernel/drivers/xenomai/net/drivers/e1000/Makefile
new file mode 120000
index 0000000..31ae439
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000.h b/kernel/drivers/xenomai/net/drivers/e1000/e1000.h
new file mode 120000
index 0000000..5ce29b3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.c b/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.c
new file mode 120000
index 0000000..db7bc01
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.h b/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.h
new file mode 120000
index 0000000..f0bd1f2
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_hw.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_main.c b/kernel/drivers/xenomai/net/drivers/e1000/e1000_main.c
new file mode 120000
index 0000000..e94b664
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_main.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_osdep.h b/kernel/drivers/xenomai/net/drivers/e1000/e1000_osdep.h
new file mode 120000
index 0000000..f0ae997
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_osdep.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000/e1000_param.c b/kernel/drivers/xenomai/net/drivers/e1000/e1000_param.c
new file mode 120000
index 0000000..afa14a4
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000/e1000_param.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000/kcompat.h b/kernel/drivers/xenomai/net/drivers/e1000/kcompat.h
new file mode 120000
index 0000000..aaea99e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000/kcompat.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/80003es2lan.c b/kernel/drivers/xenomai/net/drivers/e1000e/80003es2lan.c
new file mode 120000
index 0000000..decbd6d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/80003es2lan.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/82571.c b/kernel/drivers/xenomai/net/drivers/e1000e/82571.c
new file mode 120000
index 0000000..551403a
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/82571.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/Makefile b/kernel/drivers/xenomai/net/drivers/e1000e/Makefile
new file mode 120000
index 0000000..490bf68
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/defines.h b/kernel/drivers/xenomai/net/drivers/e1000e/defines.h
new file mode 120000
index 0000000..2655b5f
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/defines.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/e1000.h b/kernel/drivers/xenomai/net/drivers/e1000e/e1000.h
new file mode 120000
index 0000000..58ddbc8
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/e1000.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/hw.h b/kernel/drivers/xenomai/net/drivers/e1000e/hw.h
new file mode 120000
index 0000000..3495bb7
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/hw.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/ich8lan.c b/kernel/drivers/xenomai/net/drivers/e1000e/ich8lan.c
new file mode 120000
index 0000000..620a5a7
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/ich8lan.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/lib.c b/kernel/drivers/xenomai/net/drivers/e1000e/lib.c
new file mode 120000
index 0000000..f7dccb0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/lib.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/netdev.c b/kernel/drivers/xenomai/net/drivers/e1000e/netdev.c
new file mode 120000
index 0000000..de5cec7
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/netdev.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/param.c b/kernel/drivers/xenomai/net/drivers/e1000e/param.c
new file mode 120000
index 0000000..d4e2427
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/param.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/e1000e/phy.c b/kernel/drivers/xenomai/net/drivers/e1000e/phy.c
new file mode 120000
index 0000000..7f57b2e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/e1000e/phy.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/eepro100.c b/kernel/drivers/xenomai/net/drivers/eepro100.c
new file mode 120000
index 0000000..68f0bd9
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/eepro100.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/eth1394.c b/kernel/drivers/xenomai/net/drivers/eth1394.c
new file mode 120000
index 0000000..0d2385c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/eth1394.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/3c59x.c b/kernel/drivers/xenomai/net/drivers/experimental/3c59x.c
new file mode 120000
index 0000000..f392120
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/3c59x.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/Kconfig b/kernel/drivers/xenomai/net/drivers/experimental/Kconfig
new file mode 120000
index 0000000..a8b7aa6
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/Makefile b/kernel/drivers/xenomai/net/drivers/experimental/Makefile
new file mode 120000
index 0000000..b2cd54b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/Makefile b/kernel/drivers/xenomai/net/drivers/experimental/e1000/Makefile
new file mode 120000
index 0000000..ef5e157
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000.h
new file mode 120000
index 0000000..5517ffe
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c
new file mode 120000
index 0000000..b90773e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h
new file mode 120000
index 0000000..3c91d39
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_80003es2lan.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c
new file mode 120000
index 0000000..107b3fc
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82540.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c
new file mode 120000
index 0000000..af49424
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h
new file mode 120000
index 0000000..4a6b12d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82541.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c
new file mode 120000
index 0000000..b5fece5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82542.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c
new file mode 120000
index 0000000..123b677
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h
new file mode 120000
index 0000000..f6c618b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82543.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c
new file mode 120000
index 0000000..8be311a
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h
new file mode 120000
index 0000000..4e913d2
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_82571.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c
new file mode 120000
index 0000000..be7440a
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h
new file mode 120000
index 0000000..d8f9c11
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_api.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h
new file mode 120000
index 0000000..c7b46f5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_defines.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c
new file mode 120000
index 0000000..56ff658
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ethtool.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h
new file mode 120000
index 0000000..a98c043
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_hw.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c
new file mode 120000
index 0000000..24e724c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h
new file mode 120000
index 0000000..af038e9
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_ich8lan.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c
new file mode 120000
index 0000000..b9c80ae
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h
new file mode 120000
index 0000000..7b49cc1
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_mac.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c
new file mode 120000
index 0000000..0b4dca5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_main.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c
new file mode 120000
index 0000000..592efe5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h
new file mode 120000
index 0000000..195e5a6
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_manage.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c
new file mode 120000
index 0000000..6b1f68b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h
new file mode 120000
index 0000000..7e9c493
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_nvm.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h
new file mode 120000
index 0000000..940b211
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_osdep.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c
new file mode 120000
index 0000000..d579c65
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_param.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c
new file mode 120000
index 0000000..1cd2488
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h
new file mode 120000
index 0000000..01bc39c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_phy.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h
new file mode 120000
index 0000000..6e60c43
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/e1000_regs.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h b/kernel/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h
new file mode 120000
index 0000000..042a181
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/e1000/kcompat.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig
new file mode 120000
index 0000000..81d9074
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Makefile b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Makefile
new file mode 120000
index 0000000..38e0133
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c
new file mode 120000
index 0000000..17f9400
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h
new file mode 120000
index 0000000..d2ac490
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2500pci.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h
new file mode 120000
index 0000000..110ca4d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c
new file mode 120000
index 0000000..0814623
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/experimental/rt2500/rt2x00core.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/freescale/Makefile b/kernel/drivers/xenomai/net/drivers/freescale/Makefile
new file mode 120000
index 0000000..22c40e9
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/freescale/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/freescale/fec.h b/kernel/drivers/xenomai/net/drivers/freescale/fec.h
new file mode 120000
index 0000000..3bba88c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/freescale/fec.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/freescale/fec_main.c b/kernel/drivers/xenomai/net/drivers/freescale/fec_main.c
new file mode 120000
index 0000000..acf4436
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/freescale/fec_main.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/freescale/fec_ptp.c b/kernel/drivers/xenomai/net/drivers/freescale/fec_ptp.c
new file mode 120000
index 0000000..95e170b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/freescale/fec_ptp.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/Makefile b/kernel/drivers/xenomai/net/drivers/igb/Makefile
new file mode 120000
index 0000000..40d6051
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.c
new file mode 120000
index 0000000..5289bc3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.h
new file mode 120000
index 0000000..09ad7d0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_82575.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_defines.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_defines.h
new file mode 120000
index 0000000..d771f53
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_defines.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_hw.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_hw.h
new file mode 120000
index 0000000..ded3741
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_hw.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.c
new file mode 120000
index 0000000..d9f065f
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.h
new file mode 120000
index 0000000..2169a38
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_i210.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.c
new file mode 120000
index 0000000..8009a7b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.h
new file mode 120000
index 0000000..992439d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_mac.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.c
new file mode 120000
index 0000000..5890d94
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.h
new file mode 120000
index 0000000..1c63b6e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_mbx.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.c
new file mode 120000
index 0000000..7c68ffe
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.h
new file mode 120000
index 0000000..ffcb83d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_nvm.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.c b/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.c
new file mode 120000
index 0000000..7e0bd20
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.h
new file mode 120000
index 0000000..ef0bb5d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_phy.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/e1000_regs.h b/kernel/drivers/xenomai/net/drivers/igb/e1000_regs.h
new file mode 120000
index 0000000..391d5df
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/e1000_regs.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/igb.h b/kernel/drivers/xenomai/net/drivers/igb/igb.h
new file mode 120000
index 0000000..18c03a5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/igb.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/igb_hwmon.c b/kernel/drivers/xenomai/net/drivers/igb/igb_hwmon.c
new file mode 120000
index 0000000..72486f0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/igb_hwmon.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/igb/igb_main.c b/kernel/drivers/xenomai/net/drivers/igb/igb_main.c
new file mode 120000
index 0000000..d26d2cd
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/igb/igb_main.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/loopback.c b/kernel/drivers/xenomai/net/drivers/loopback.c
new file mode 120000
index 0000000..f1ab73a
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/loopback.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/macb.c b/kernel/drivers/xenomai/net/drivers/macb.c
new file mode 120000
index 0000000..a227f81
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/macb.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile
new file mode 120000
index 0000000..8ded6f9
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c
new file mode 120000
index 0000000..3d95c32
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/mpc52xx_fec.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h
new file mode 120000
index 0000000..25f9694
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c b/kernel/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c
new file mode 120000
index 0000000..3bb85ab
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/mpc8260_fcc_enet.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/mpc8xx_enet.c b/kernel/drivers/xenomai/net/drivers/mpc8xx_enet.c
new file mode 120000
index 0000000..299866a
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/mpc8xx_enet.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/mpc8xx_fec.c b/kernel/drivers/xenomai/net/drivers/mpc8xx_fec.c
new file mode 120000
index 0000000..fae6619
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/mpc8xx_fec.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/natsemi.c b/kernel/drivers/xenomai/net/drivers/natsemi.c
new file mode 120000
index 0000000..0f25145
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/natsemi.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/pcnet32.c b/kernel/drivers/xenomai/net/drivers/pcnet32.c
new file mode 120000
index 0000000..1cede2f
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/pcnet32.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/r8169.c b/kernel/drivers/xenomai/net/drivers/r8169.c
new file mode 120000
index 0000000..56b7ae9
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/r8169.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/rt_at91_ether.h b/kernel/drivers/xenomai/net/drivers/rt_at91_ether.h
new file mode 120000
index 0000000..69fb95d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/rt_at91_ether.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_at91_ether.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/rt_eth1394.h b/kernel/drivers/xenomai/net/drivers/rt_eth1394.h
new file mode 120000
index 0000000..5cf6d26
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/rt_eth1394.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_eth1394.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/rt_macb.h b/kernel/drivers/xenomai/net/drivers/rt_macb.h
new file mode 120000
index 0000000..431147b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/rt_macb.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_macb.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/tulip/21142.c b/kernel/drivers/xenomai/net/drivers/tulip/21142.c
new file mode 120000
index 0000000..266476c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/tulip/21142.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/21142.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/tulip/Makefile b/kernel/drivers/xenomai/net/drivers/tulip/Makefile
new file mode 120000
index 0000000..186e9f5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/tulip/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/tulip/eeprom.c b/kernel/drivers/xenomai/net/drivers/tulip/eeprom.c
new file mode 120000
index 0000000..c6fa166
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/tulip/eeprom.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/eeprom.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/tulip/interrupt.c b/kernel/drivers/xenomai/net/drivers/tulip/interrupt.c
new file mode 120000
index 0000000..89e9c98
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/tulip/interrupt.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/interrupt.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/tulip/media.c b/kernel/drivers/xenomai/net/drivers/tulip/media.c
new file mode 120000
index 0000000..041379d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/tulip/media.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/media.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/tulip/pnic.c b/kernel/drivers/xenomai/net/drivers/tulip/pnic.c
new file mode 120000
index 0000000..637f52d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/tulip/pnic.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/tulip/pnic2.c b/kernel/drivers/xenomai/net/drivers/tulip/pnic2.c
new file mode 120000
index 0000000..3689226
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/tulip/pnic2.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic2.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/tulip/tulip.h b/kernel/drivers/xenomai/net/drivers/tulip/tulip.h
new file mode 120000
index 0000000..097fe09
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/tulip/tulip.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/tulip/tulip_core.c b/kernel/drivers/xenomai/net/drivers/tulip/tulip_core.c
new file mode 120000
index 0000000..e894d6d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/tulip/tulip_core.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip_core.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/drivers/via-rhine.c b/kernel/drivers/xenomai/net/drivers/via-rhine.c
new file mode 120000
index 0000000..393ebe0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/drivers/via-rhine.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/via-rhine.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/Kconfig b/kernel/drivers/xenomai/net/stack/Kconfig
new file mode 120000
index 0000000..9b30843
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/Makefile b/kernel/drivers/xenomai/net/stack/Makefile
new file mode 120000
index 0000000..00de54e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/corectl.c b/kernel/drivers/xenomai/net/stack/corectl.c
new file mode 120000
index 0000000..673509b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/corectl.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/corectl.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/eth.c b/kernel/drivers/xenomai/net/stack/eth.c
new file mode 120000
index 0000000..37a7b72
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/eth.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/eth.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ethernet/eth.h b/kernel/drivers/xenomai/net/stack/include/ethernet/eth.h
new file mode 120000
index 0000000..b9481c0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ethernet/eth.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ethernet/eth.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/af_inet.h b/kernel/drivers/xenomai/net/stack/include/ipv4/af_inet.h
new file mode 120000
index 0000000..00c836d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/af_inet.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/af_inet.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/arp.h b/kernel/drivers/xenomai/net/stack/include/ipv4/arp.h
new file mode 120000
index 0000000..da86d2b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/arp.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/arp.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/icmp.h b/kernel/drivers/xenomai/net/stack/include/ipv4/icmp.h
new file mode 120000
index 0000000..a0fb11c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/icmp.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/icmp.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h
new file mode 120000
index 0000000..1c11a2b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_fragment.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_fragment.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/ip_input.h b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_input.h
new file mode 120000
index 0000000..62c806d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_input.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_input.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/ip_output.h b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_output.h
new file mode 120000
index 0000000..e808ec3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_output.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_output.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/ip_sock.h b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_sock.h
new file mode 120000
index 0000000..d90d3a2
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/ip_sock.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_sock.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/protocol.h b/kernel/drivers/xenomai/net/stack/include/ipv4/protocol.h
new file mode 120000
index 0000000..643408a
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/protocol.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/protocol.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/route.h b/kernel/drivers/xenomai/net/stack/include/ipv4/route.h
new file mode 120000
index 0000000..d289da0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/route.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/route.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/tcp.h b/kernel/drivers/xenomai/net/stack/include/ipv4/tcp.h
new file mode 120000
index 0000000..43f4287
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/tcp.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/tcp.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4/udp.h b/kernel/drivers/xenomai/net/stack/include/ipv4/udp.h
new file mode 120000
index 0000000..b65b654
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4/udp.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/udp.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/ipv4_chrdev.h b/kernel/drivers/xenomai/net/stack/include/ipv4_chrdev.h
new file mode 120000
index 0000000..b31a416
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/ipv4_chrdev.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4_chrdev.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/nomac_chrdev.h b/kernel/drivers/xenomai/net/stack/include/nomac_chrdev.h
new file mode 120000
index 0000000..366c1b2
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/nomac_chrdev.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/nomac_chrdev.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h
new file mode 120000
index 0000000..7e2a1fd
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h
new file mode 120000
index 0000000..45e3024
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_client_event.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_client_event.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h
new file mode 120000
index 0000000..2339ba4
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_conn_event.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_conn_event.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h
new file mode 120000
index 0000000..7e12020
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_event.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_event.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h
new file mode 120000
index 0000000..86f06b7
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_file.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_file.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h
new file mode 120000
index 0000000..6e4e6f9
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_frame.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_frame.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h
new file mode 120000
index 0000000..17fe163
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_ioctl.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_ioctl.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h
new file mode 120000
index 0000000..804f5f3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_proc.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_proc.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h
new file mode 120000
index 0000000..57e6e2b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg/rtcfg_timer.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_timer.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtcfg_chrdev.h b/kernel/drivers/xenomai/net/stack/include/rtcfg_chrdev.h
new file mode 120000
index 0000000..a8453a6
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtcfg_chrdev.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg_chrdev.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtdev.h b/kernel/drivers/xenomai/net/stack/include/rtdev.h
new file mode 120000
index 0000000..3c4d68d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtdev.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtdev_mgr.h b/kernel/drivers/xenomai/net/stack/include/rtdev_mgr.h
new file mode 120000
index 0000000..e594fb0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtdev_mgr.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev_mgr.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac.h b/kernel/drivers/xenomai/net/stack/include/rtmac.h
new file mode 120000
index 0000000..476f312
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h
new file mode 120000
index 0000000..88578ef
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h
new file mode 120000
index 0000000..569606f
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_dev.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_dev.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h
new file mode 120000
index 0000000..9a0ad0a
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_ioctl.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_ioctl.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h
new file mode 120000
index 0000000..284e21b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/nomac/nomac_proto.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_proto.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h
new file mode 120000
index 0000000..e562573
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_disc.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_disc.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h
new file mode 120000
index 0000000..3876ab3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proc.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proc.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h
new file mode 120000
index 0000000..88fee6b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_proto.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proto.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h
new file mode 120000
index 0000000..504f7a5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/rtmac_vnic.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_vnic.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h
new file mode 120000
index 0000000..a332fc0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h
new file mode 120000
index 0000000..00e9e92
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_dev.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_dev.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h
new file mode 120000
index 0000000..4d3d2db
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_ioctl.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_ioctl.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h
new file mode 120000
index 0000000..d93a3dc
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_proto.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_proto.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h
new file mode 120000
index 0000000..38db4a3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtmac/tdma/tdma_worker.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_worker.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_checksum.h b/kernel/drivers/xenomai/net/stack/include/rtnet_checksum.h
new file mode 120000
index 0000000..ff42b33
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtnet_checksum.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_checksum.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_chrdev.h b/kernel/drivers/xenomai/net/stack/include/rtnet_chrdev.h
new file mode 120000
index 0000000..d22aed3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtnet_chrdev.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_chrdev.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_internal.h b/kernel/drivers/xenomai/net/stack/include/rtnet_internal.h
new file mode 120000
index 0000000..424b223
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtnet_internal.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_internal.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_iovec.h b/kernel/drivers/xenomai/net/stack/include/rtnet_iovec.h
new file mode 120000
index 0000000..4c1e22b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtnet_iovec.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_iovec.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_port.h b/kernel/drivers/xenomai/net/stack/include/rtnet_port.h
new file mode 120000
index 0000000..eface39
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtnet_port.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_port.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_rtpc.h b/kernel/drivers/xenomai/net/stack/include/rtnet_rtpc.h
new file mode 120000
index 0000000..cc7a107
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtnet_rtpc.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_rtpc.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtnet_socket.h b/kernel/drivers/xenomai/net/stack/include/rtnet_socket.h
new file mode 120000
index 0000000..20aeea1
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtnet_socket.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_socket.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtskb.h b/kernel/drivers/xenomai/net/stack/include/rtskb.h
new file mode 120000
index 0000000..3289f85
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtskb.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtskb_fifo.h b/kernel/drivers/xenomai/net/stack/include/rtskb_fifo.h
new file mode 120000
index 0000000..418a2f5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtskb_fifo.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb_fifo.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtwlan.h b/kernel/drivers/xenomai/net/stack/include/rtwlan.h
new file mode 120000
index 0000000..560fc39
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtwlan.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/rtwlan_io.h b/kernel/drivers/xenomai/net/stack/include/rtwlan_io.h
new file mode 120000
index 0000000..e90bc6e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/rtwlan_io.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan_io.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/stack_mgr.h b/kernel/drivers/xenomai/net/stack/include/stack_mgr.h
new file mode 120000
index 0000000..a8caad8
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/stack_mgr.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/stack_mgr.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/include/tdma_chrdev.h b/kernel/drivers/xenomai/net/stack/include/tdma_chrdev.h
new file mode 120000
index 0000000..c6e60fc
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/include/tdma_chrdev.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/tdma_chrdev.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/iovec.c b/kernel/drivers/xenomai/net/stack/iovec.c
new file mode 120000
index 0000000..45a892b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/iovec.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/iovec.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/Kconfig b/kernel/drivers/xenomai/net/stack/ipv4/Kconfig
new file mode 120000
index 0000000..7f181fb
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/Makefile b/kernel/drivers/xenomai/net/stack/ipv4/Makefile
new file mode 120000
index 0000000..bc89433
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/af_inet.c b/kernel/drivers/xenomai/net/stack/ipv4/af_inet.c
new file mode 120000
index 0000000..d887393
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/af_inet.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/af_inet.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/arp.c b/kernel/drivers/xenomai/net/stack/ipv4/arp.c
new file mode 120000
index 0000000..81a5aa3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/arp.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/arp.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/icmp.c b/kernel/drivers/xenomai/net/stack/ipv4/icmp.c
new file mode 120000
index 0000000..8673d88
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/icmp.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/icmp.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/ip_fragment.c b/kernel/drivers/xenomai/net/stack/ipv4/ip_fragment.c
new file mode 120000
index 0000000..805c5b4
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/ip_fragment.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_fragment.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/ip_input.c b/kernel/drivers/xenomai/net/stack/ipv4/ip_input.c
new file mode 120000
index 0000000..981f17c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/ip_input.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_input.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/ip_output.c b/kernel/drivers/xenomai/net/stack/ipv4/ip_output.c
new file mode 120000
index 0000000..75cecb0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/ip_output.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_output.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/ip_sock.c b/kernel/drivers/xenomai/net/stack/ipv4/ip_sock.c
new file mode 120000
index 0000000..8b59934
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/ip_sock.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_sock.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/protocol.c b/kernel/drivers/xenomai/net/stack/ipv4/protocol.c
new file mode 120000
index 0000000..634f4e4
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/protocol.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/protocol.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/route.c b/kernel/drivers/xenomai/net/stack/ipv4/route.c
new file mode 120000
index 0000000..94f66a2
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/route.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/route.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/Kconfig b/kernel/drivers/xenomai/net/stack/ipv4/tcp/Kconfig
new file mode 120000
index 0000000..b7e9ae1
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/Makefile b/kernel/drivers/xenomai/net/stack/ipv4/tcp/Makefile
new file mode 120000
index 0000000..3d49e2d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/tcp.c b/kernel/drivers/xenomai/net/stack/ipv4/tcp/tcp.c
new file mode 120000
index 0000000..23bd53e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/tcp.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/tcp.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c b/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c
new file mode 120000
index 0000000..0bf7b27
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h b/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h
new file mode 120000
index 0000000..5b8a527
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/tcp/timerwheel.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/udp/Kconfig b/kernel/drivers/xenomai/net/stack/ipv4/udp/Kconfig
new file mode 120000
index 0000000..cf6793b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/udp/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/udp/Makefile b/kernel/drivers/xenomai/net/stack/ipv4/udp/Makefile
new file mode 120000
index 0000000..9e6bdf6
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/udp/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/ipv4/udp/udp.c b/kernel/drivers/xenomai/net/stack/ipv4/udp/udp.c
new file mode 120000
index 0000000..ac053dc
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/ipv4/udp/udp.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/udp.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/packet/Kconfig b/kernel/drivers/xenomai/net/stack/packet/Kconfig
new file mode 120000
index 0000000..7eed8ca
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/packet/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/packet/Makefile b/kernel/drivers/xenomai/net/stack/packet/Makefile
new file mode 120000
index 0000000..8542ff3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/packet/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/packet/af_packet.c b/kernel/drivers/xenomai/net/stack/packet/af_packet.c
new file mode 120000
index 0000000..c460e64
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/packet/af_packet.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/af_packet.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/Kconfig b/kernel/drivers/xenomai/net/stack/rtcfg/Kconfig
new file mode 120000
index 0000000..2f4060e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/Makefile b/kernel/drivers/xenomai/net/stack/rtcfg/Makefile
new file mode 120000
index 0000000..98efb5e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c
new file mode 120000
index 0000000..e1443a5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_client_event.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_client_event.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c
new file mode 120000
index 0000000..c87c3be
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_conn_event.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_conn_event.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c
new file mode 120000
index 0000000..40b0e21
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_event.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_event.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c
new file mode 120000
index 0000000..a283f7d
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_file.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_file.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c
new file mode 120000
index 0000000..47b9f15
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_frame.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_frame.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c
new file mode 120000
index 0000000..3b8a27f
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_ioctl.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_ioctl.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c
new file mode 120000
index 0000000..fe38d0b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_module.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_module.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c
new file mode 120000
index 0000000..0bfb928
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_proc.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_proc.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c
new file mode 120000
index 0000000..de1d308
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtcfg/rtcfg_timer.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_timer.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtdev.c b/kernel/drivers/xenomai/net/stack/rtdev.c
new file mode 120000
index 0000000..d8c935c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtdev.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtdev_mgr.c b/kernel/drivers/xenomai/net/stack/rtdev_mgr.c
new file mode 120000
index 0000000..fdcb124
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtdev_mgr.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev_mgr.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/Kconfig b/kernel/drivers/xenomai/net/stack/rtmac/Kconfig
new file mode 120000
index 0000000..a603e39
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/Makefile b/kernel/drivers/xenomai/net/stack/rtmac/Makefile
new file mode 120000
index 0000000..b2433a5
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/Kconfig b/kernel/drivers/xenomai/net/stack/rtmac/nomac/Kconfig
new file mode 120000
index 0000000..11eabd8
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/Makefile b/kernel/drivers/xenomai/net/stack/rtmac/nomac/Makefile
new file mode 120000
index 0000000..a268292
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c
new file mode 120000
index 0000000..8cacc9a
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_dev.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_dev.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c
new file mode 120000
index 0000000..a5429a9
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_ioctl.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_ioctl.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c
new file mode 120000
index 0000000..49f992e
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_module.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_module.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c
new file mode 120000
index 0000000..4717f17
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/nomac/nomac_proto.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_proto.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_disc.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_disc.c
new file mode 120000
index 0000000..7e5725b
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_disc.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_disc.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_module.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_module.c
new file mode 120000
index 0000000..2905432
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_module.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_module.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proc.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proc.c
new file mode 120000
index 0000000..406357f
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proc.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proc.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proto.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proto.c
new file mode 120000
index 0000000..d4095f8
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_proto.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proto.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_syms.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_syms.c
new file mode 120000
index 0000000..13ba402
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_syms.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_syms.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c
new file mode 120000
index 0000000..d6e8a3c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/rtmac_vnic.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_vnic.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/Kconfig b/kernel/drivers/xenomai/net/stack/rtmac/tdma/Kconfig
new file mode 120000
index 0000000..5cec7e1
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/Makefile b/kernel/drivers/xenomai/net/stack/rtmac/tdma/Makefile
new file mode 120000
index 0000000..50923b3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c
new file mode 120000
index 0000000..2c0fbb0
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_dev.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_dev.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c
new file mode 120000
index 0000000..f9a94c9
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_ioctl.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_ioctl.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c
new file mode 120000
index 0000000..824d271
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_module.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_module.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c
new file mode 120000
index 0000000..9b337d7
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_proto.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_proto.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c
new file mode 120000
index 0000000..0896aee
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtmac/tdma/tdma_worker.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_worker.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtnet_chrdev.c b/kernel/drivers/xenomai/net/stack/rtnet_chrdev.c
new file mode 120000
index 0000000..9803648
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtnet_chrdev.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_chrdev.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtnet_module.c b/kernel/drivers/xenomai/net/stack/rtnet_module.c
new file mode 120000
index 0000000..446daa3
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtnet_module.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_module.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtnet_rtpc.c b/kernel/drivers/xenomai/net/stack/rtnet_rtpc.c
new file mode 120000
index 0000000..dd7b978
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtnet_rtpc.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_rtpc.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtskb.c b/kernel/drivers/xenomai/net/stack/rtskb.c
new file mode 120000
index 0000000..34fda4a
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtskb.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtskb.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/rtwlan.c b/kernel/drivers/xenomai/net/stack/rtwlan.c
new file mode 120000
index 0000000..b8b1f7c
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/rtwlan.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtwlan.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/socket.c b/kernel/drivers/xenomai/net/stack/socket.c
new file mode 120000
index 0000000..901d381
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/socket.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/socket.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/net/stack/stack_mgr.c b/kernel/drivers/xenomai/net/stack/stack_mgr.c
new file mode 120000
index 0000000..a8e5ad1
--- /dev/null
+++ b/kernel/drivers/xenomai/net/stack/stack_mgr.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/stack_mgr.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/serial/16550A.c b/kernel/drivers/xenomai/serial/16550A.c
new file mode 120000
index 0000000..1a3cb18
--- /dev/null
+++ b/kernel/drivers/xenomai/serial/16550A.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/serial/16550A_io.h b/kernel/drivers/xenomai/serial/16550A_io.h
new file mode 120000
index 0000000..42d9558
--- /dev/null
+++ b/kernel/drivers/xenomai/serial/16550A_io.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_io.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/serial/16550A_pci.h b/kernel/drivers/xenomai/serial/16550A_pci.h
new file mode 120000
index 0000000..006336c
--- /dev/null
+++ b/kernel/drivers/xenomai/serial/16550A_pci.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pci.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/serial/16550A_pnp.h b/kernel/drivers/xenomai/serial/16550A_pnp.h
new file mode 120000
index 0000000..ba90fce
--- /dev/null
+++ b/kernel/drivers/xenomai/serial/16550A_pnp.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pnp.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/serial/Kconfig b/kernel/drivers/xenomai/serial/Kconfig
new file mode 120000
index 0000000..9366f46
--- /dev/null
+++ b/kernel/drivers/xenomai/serial/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/serial/Makefile b/kernel/drivers/xenomai/serial/Makefile
new file mode 120000
index 0000000..eb08892
--- /dev/null
+++ b/kernel/drivers/xenomai/serial/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/serial/mpc52xx_uart.c b/kernel/drivers/xenomai/serial/mpc52xx_uart.c
new file mode 120000
index 0000000..4c548ef
--- /dev/null
+++ b/kernel/drivers/xenomai/serial/mpc52xx_uart.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/mpc52xx_uart.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/serial/rt_imx_uart.c b/kernel/drivers/xenomai/serial/rt_imx_uart.c
new file mode 120000
index 0000000..cc37903
--- /dev/null
+++ b/kernel/drivers/xenomai/serial/rt_imx_uart.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/serial/rt_imx_uart.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/spi/Kconfig b/kernel/drivers/xenomai/spi/Kconfig
new file mode 120000
index 0000000..d0bf131
--- /dev/null
+++ b/kernel/drivers/xenomai/spi/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/spi/Makefile b/kernel/drivers/xenomai/spi/Makefile
new file mode 120000
index 0000000..a62ea3b
--- /dev/null
+++ b/kernel/drivers/xenomai/spi/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/spi/spi-bcm2835.c b/kernel/drivers/xenomai/spi/spi-bcm2835.c
new file mode 120000
index 0000000..8fb2df6
--- /dev/null
+++ b/kernel/drivers/xenomai/spi/spi-bcm2835.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-bcm2835.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/spi/spi-device.c b/kernel/drivers/xenomai/spi/spi-device.c
new file mode 120000
index 0000000..47dc016
--- /dev/null
+++ b/kernel/drivers/xenomai/spi/spi-device.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/spi/spi-device.h b/kernel/drivers/xenomai/spi/spi-device.h
new file mode 120000
index 0000000..1dd400a
--- /dev/null
+++ b/kernel/drivers/xenomai/spi/spi-device.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/spi/spi-master.c b/kernel/drivers/xenomai/spi/spi-master.c
new file mode 120000
index 0000000..d6f160d
--- /dev/null
+++ b/kernel/drivers/xenomai/spi/spi-master.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/spi/spi-master.h b/kernel/drivers/xenomai/spi/spi-master.h
new file mode 120000
index 0000000..343d478
--- /dev/null
+++ b/kernel/drivers/xenomai/spi/spi-master.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.h
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/spi/spi-omap2-mcspi-rt.c b/kernel/drivers/xenomai/spi/spi-omap2-mcspi-rt.c
new file mode 120000
index 0000000..82aec04
--- /dev/null
+++ b/kernel/drivers/xenomai/spi/spi-omap2-mcspi-rt.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-omap2-mcspi-rt.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/spi/spi-sun6i.c b/kernel/drivers/xenomai/spi/spi-sun6i.c
new file mode 120000
index 0000000..885e4e0
--- /dev/null
+++ b/kernel/drivers/xenomai/spi/spi-sun6i.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-sun6i.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/testing/Kconfig b/kernel/drivers/xenomai/testing/Kconfig
new file mode 120000
index 0000000..6c82f99
--- /dev/null
+++ b/kernel/drivers/xenomai/testing/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/testing/Makefile b/kernel/drivers/xenomai/testing/Makefile
new file mode 120000
index 0000000..137da61
--- /dev/null
+++ b/kernel/drivers/xenomai/testing/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/testing/heapcheck.c b/kernel/drivers/xenomai/testing/heapcheck.c
new file mode 120000
index 0000000..b912a63
--- /dev/null
+++ b/kernel/drivers/xenomai/testing/heapcheck.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/heapcheck.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/testing/rtdmtest.c b/kernel/drivers/xenomai/testing/rtdmtest.c
new file mode 120000
index 0000000..873a071
--- /dev/null
+++ b/kernel/drivers/xenomai/testing/rtdmtest.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/rtdmtest.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/testing/switchtest.c b/kernel/drivers/xenomai/testing/switchtest.c
new file mode 120000
index 0000000..cd9cbf8
--- /dev/null
+++ b/kernel/drivers/xenomai/testing/switchtest.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/switchtest.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/testing/timerbench.c b/kernel/drivers/xenomai/testing/timerbench.c
new file mode 120000
index 0000000..8568fdb
--- /dev/null
+++ b/kernel/drivers/xenomai/testing/timerbench.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/testing/timerbench.c
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/udd/Kconfig b/kernel/drivers/xenomai/udd/Kconfig
new file mode 120000
index 0000000..1cf8e77
--- /dev/null
+++ b/kernel/drivers/xenomai/udd/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/udd/Kconfig
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/udd/Makefile b/kernel/drivers/xenomai/udd/Makefile
new file mode 120000
index 0000000..daeb2ea
--- /dev/null
+++ b/kernel/drivers/xenomai/udd/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/udd/Makefile
\ No newline at end of file
diff --git a/kernel/drivers/xenomai/udd/udd.c b/kernel/drivers/xenomai/udd/udd.c
new file mode 120000
index 0000000..b5e71ff
--- /dev/null
+++ b/kernel/drivers/xenomai/udd/udd.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/drivers/udd/udd.c
\ No newline at end of file
diff --git a/kernel/fs/eventfd.c b/kernel/fs/eventfd.c
index 4a14295..cb99868 100644
--- a/kernel/fs/eventfd.c
+++ b/kernel/fs/eventfd.c
@@ -266,17 +266,17 @@
 	return sizeof(ucnt);
 }
 
-static ssize_t eventfd_write(struct file *file, const char __user *buf, size_t count,
-			     loff_t *ppos)
+static ssize_t eventfd_write(struct kiocb *iocb, struct iov_iter *from)
 {
+	struct file *file = iocb->ki_filp;
 	struct eventfd_ctx *ctx = file->private_data;
 	ssize_t res;
 	__u64 ucnt;
 	DECLARE_WAITQUEUE(wait, current);
 
-	if (count < sizeof(ucnt))
+	if (iov_iter_count(from) < sizeof(ucnt))
 		return -EINVAL;
-	if (copy_from_user(&ucnt, buf, sizeof(ucnt)))
+	if (copy_from_iter(&ucnt, sizeof(ucnt), from) != sizeof(ucnt))
 		return -EFAULT;
 	if (ucnt == ULLONG_MAX)
 		return -EINVAL;
@@ -333,7 +333,7 @@
 	.release	= eventfd_release,
 	.poll		= eventfd_poll,
 	.read_iter	= eventfd_read,
-	.write		= eventfd_write,
+	.write_iter	= eventfd_write,
 	.llseek		= noop_llseek,
 };
 
diff --git a/kernel/fs/exec.c b/kernel/fs/exec.c
index b798885..30f75c0 100644
--- a/kernel/fs/exec.c
+++ b/kernel/fs/exec.c
@@ -34,6 +34,7 @@
 #include <linux/swap.h>
 #include <linux/string.h>
 #include <linux/init.h>
+#include <linux/irq_pipeline.h>
 #include <linux/sched/mm.h>
 #include <linux/sched/coredump.h>
 #include <linux/sched/signal.h>
@@ -68,6 +69,7 @@
 #include <linux/uaccess.h>
 #include <asm/mmu_context.h>
 #include <asm/tlb.h>
+#include <asm/dovetail.h>
 
 #include <trace/events/task.h>
 #include "internal.h"
@@ -981,6 +983,7 @@
 	struct task_struct *tsk;
 	struct mm_struct *old_mm, *active_mm;
 	int ret;
+	unsigned long flags;
 
 	/* Notify parent that we're no longer interested in the old VM */
 	tsk = current;
@@ -1013,6 +1016,7 @@
 
 	local_irq_disable();
 	active_mm = tsk->active_mm;
+	protect_inband_mm(flags);
 	tsk->active_mm = mm;
 	tsk->mm = mm;
 	/*
@@ -1021,10 +1025,17 @@
 	 * lazy tlb mm refcounting when these are updated by context
 	 * switches. Not all architectures can handle irqs off over
 	 * activate_mm yet.
+	 *
+	 * irq_pipeline: activate_mm() allowing irqs off context is a
+	 * requirement. e.g. TLB shootdown must not involve IPIs. We
+	 * make sure protect_inband_mm() is in effect while switching
+	 * in and activating the new mm by forcing
+	 * CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM on.
 	 */
 	if (!IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
 		local_irq_enable();
 	activate_mm(active_mm, mm);
+	unprotect_inband_mm(flags);
 	if (IS_ENABLED(CONFIG_ARCH_WANT_IRQS_OFF_ACTIVATE_MM))
 		local_irq_enable();
 	tsk->mm->vmacache_seqnum = 0;
@@ -1302,6 +1313,9 @@
 	if (retval)
 		goto out_unlock;
 
+	/* Tell Dovetail about the ongoing exec(). */
+	arch_dovetail_exec_prepare();
+
 	/*
 	 * Ensure that the uaccess routines can actually operate on userspace
 	 * pointers:
diff --git a/kernel/fs/fcntl.c b/kernel/fs/fcntl.c
index fcf34f8..0b131a8 100644
--- a/kernel/fs/fcntl.c
+++ b/kernel/fs/fcntl.c
@@ -1044,7 +1044,7 @@
 	 * Exceptions: O_NONBLOCK is a two bit define on parisc; O_NDELAY
 	 * is defined as O_NONBLOCK on some platforms and not on others.
 	 */
-	BUILD_BUG_ON(21 - 1 /* for O_RDONLY being 0 */ !=
+	BUILD_BUG_ON(22 - 1 /* for O_RDONLY being 0 */ !=
 		HWEIGHT32(
 			(VALID_OPEN_FLAGS & ~(O_NONBLOCK | O_NDELAY)) |
 			__FMODE_EXEC | __FMODE_NONOTIFY));
diff --git a/kernel/fs/file.c b/kernel/fs/file.c
index 97a0cd3..6d84335 100644
--- a/kernel/fs/file.c
+++ b/kernel/fs/file.c
@@ -429,6 +429,7 @@
 			if (set & 1) {
 				struct file * file = xchg(&fdt->fd[i], NULL);
 				if (file) {
+					uninstall_inband_fd(i, file, files);
 					filp_close(file, files);
 					cond_resched();
 				}
@@ -646,6 +647,7 @@
 		fdt = files_fdtable(files);
 		BUG_ON(fdt->fd[fd] != NULL);
 		rcu_assign_pointer(fdt->fd[fd], file);
+		install_inband_fd(fd, file, files);
 		spin_unlock(&files->file_lock);
 		return;
 	}
@@ -654,6 +656,7 @@
 	fdt = rcu_dereference_sched(files->fdt);
 	BUG_ON(fdt->fd[fd] != NULL);
 	rcu_assign_pointer(fdt->fd[fd], file);
+	install_inband_fd(fd, file, files);
 	rcu_read_unlock_sched();
 }
 
@@ -682,6 +685,7 @@
 		goto out_unlock;
 	rcu_assign_pointer(fdt->fd[fd], NULL);
 	__put_unused_fd(files, fd);
+	uninstall_inband_fd(fd, file, files);
 
 out_unlock:
 	spin_unlock(&files->file_lock);
@@ -799,6 +803,8 @@
 		goto out_err;
 	rcu_assign_pointer(fdt->fd[fd], NULL);
 	__put_unused_fd(files, fd);
+	uninstall_inband_fd(fd, file, files);
+	spin_unlock(&files->file_lock);
 	get_file(file);
 	*res = file;
 	return 0;
@@ -850,6 +856,7 @@
 				continue;
 			rcu_assign_pointer(fdt->fd[fd], NULL);
 			__put_unused_fd(files, fd);
+			uninstall_inband_fd(fd, file, files);
 			spin_unlock(&files->file_lock);
 			filp_close(file, files);
 			cond_resched();
@@ -1088,6 +1095,7 @@
 		__set_close_on_exec(fd, fdt);
 	else
 		__clear_close_on_exec(fd, fdt);
+	replace_inband_fd(fd, file, files);
 	spin_unlock(&files->file_lock);
 
 	if (tofree)
diff --git a/kernel/fs/ioctl.c b/kernel/fs/ioctl.c
index ac3b386..ad1d738 100644
--- a/kernel/fs/ioctl.c
+++ b/kernel/fs/ioctl.c
@@ -790,6 +790,22 @@
 }
 EXPORT_SYMBOL(compat_ptr_ioctl);
 
+/**
+ * compat_ptr_oob_ioctl - generic implementation of .compat_oob_ioctl file operation
+ *
+ * The equivalent of compat_ptr_ioctl, dealing with out-of-band ioctl
+ * calls. Management of this handler is delegated to the code
+ * implementing the out-of-band ioctl() syscall in the companion core.
+ */
+long compat_ptr_oob_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	if (!file->f_op->oob_ioctl)
+		return -ENOIOCTLCMD;
+
+	return file->f_op->oob_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+EXPORT_SYMBOL(compat_ptr_oob_ioctl);
+
 COMPAT_SYSCALL_DEFINE3(ioctl, unsigned int, fd, unsigned int, cmd,
 		       compat_ulong_t, arg)
 {
diff --git a/kernel/fs/udf/inode.c b/kernel/fs/udf/inode.c
index d32b836..e94a18b 100644
--- a/kernel/fs/udf/inode.c
+++ b/kernel/fs/udf/inode.c
@@ -438,6 +438,12 @@
 		iinfo->i_next_alloc_goal++;
 	}
 
+	/*
+	 * Block beyond EOF and prealloc extents? Just discard preallocation
+	 * as it is not useful and complicates things.
+	 */
+	if (((loff_t)block) << inode->i_blkbits > iinfo->i_lenExtents)
+		udf_discard_prealloc(inode);
 	udf_clear_extent_cache(inode);
 	phys = inode_getblk(inode, block, &err, &new);
 	if (!phys)
@@ -487,8 +493,6 @@
 	uint32_t add;
 	int count = 0, fake = !(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
 	struct super_block *sb = inode->i_sb;
-	struct kernel_lb_addr prealloc_loc = {};
-	uint32_t prealloc_len = 0;
 	struct udf_inode_info *iinfo;
 	int err;
 
@@ -507,19 +511,6 @@
 		iinfo->i_lenExtents =
 			(iinfo->i_lenExtents + sb->s_blocksize - 1) &
 			~(sb->s_blocksize - 1);
-	}
-
-	/* Last extent are just preallocated blocks? */
-	if ((last_ext->extLength & UDF_EXTENT_FLAG_MASK) ==
-						EXT_NOT_RECORDED_ALLOCATED) {
-		/* Save the extent so that we can reattach it to the end */
-		prealloc_loc = last_ext->extLocation;
-		prealloc_len = last_ext->extLength;
-		/* Mark the extent as a hole */
-		last_ext->extLength = EXT_NOT_RECORDED_NOT_ALLOCATED |
-			(last_ext->extLength & UDF_EXTENT_LENGTH_MASK);
-		last_ext->extLocation.logicalBlockNum = 0;
-		last_ext->extLocation.partitionReferenceNum = 0;
 	}
 
 	/* Can we merge with the previous extent? */
@@ -549,7 +540,7 @@
 		 * more extents, we may need to enter possible following
 		 * empty indirect extent.
 		 */
-		if (new_block_bytes || prealloc_len)
+		if (new_block_bytes)
 			udf_next_aext(inode, last_pos, &tmploc, &tmplen, 0);
 	}
 
@@ -583,17 +574,6 @@
 	}
 
 out:
-	/* Do we have some preallocated blocks saved? */
-	if (prealloc_len) {
-		err = udf_add_aext(inode, last_pos, &prealloc_loc,
-				   prealloc_len, 1);
-		if (err)
-			return err;
-		last_ext->extLocation = prealloc_loc;
-		last_ext->extLength = prealloc_len;
-		count++;
-	}
-
 	/* last_pos should point to the last written extent... */
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
 		last_pos->offset -= sizeof(struct short_ad);
@@ -609,13 +589,17 @@
 static void udf_do_extend_final_block(struct inode *inode,
 				      struct extent_position *last_pos,
 				      struct kernel_long_ad *last_ext,
-				      uint32_t final_block_len)
+				      uint32_t new_elen)
 {
-	struct super_block *sb = inode->i_sb;
 	uint32_t added_bytes;
 
-	added_bytes = final_block_len -
-		      (last_ext->extLength & (sb->s_blocksize - 1));
+	/*
+	 * Extent already large enough? It may be already rounded up to block
+	 * size...
+	 */
+	if (new_elen <= (last_ext->extLength & UDF_EXTENT_LENGTH_MASK))
+		return;
+	added_bytes = (last_ext->extLength & UDF_EXTENT_LENGTH_MASK) - new_elen;
 	last_ext->extLength += added_bytes;
 	UDF_I(inode)->i_lenExtents += added_bytes;
 
@@ -632,12 +616,12 @@
 	int8_t etype;
 	struct super_block *sb = inode->i_sb;
 	sector_t first_block = newsize >> sb->s_blocksize_bits, offset;
-	unsigned long partial_final_block;
+	loff_t new_elen;
 	int adsize;
 	struct udf_inode_info *iinfo = UDF_I(inode);
 	struct kernel_long_ad extent;
 	int err = 0;
-	int within_final_block;
+	bool within_last_ext;
 
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
 		adsize = sizeof(struct short_ad);
@@ -646,8 +630,17 @@
 	else
 		BUG();
 
+	/*
+	 * When creating hole in file, just don't bother with preserving
+	 * preallocation. It likely won't be very useful anyway.
+	 */
+	udf_discard_prealloc(inode);
+
 	etype = inode_bmap(inode, first_block, &epos, &eloc, &elen, &offset);
-	within_final_block = (etype != -1);
+	within_last_ext = (etype != -1);
+	/* We don't expect extents past EOF... */
+	WARN_ON_ONCE(within_last_ext &&
+		     elen > ((loff_t)offset + 1) << inode->i_blkbits);
 
 	if ((!epos.bh && epos.offset == udf_file_entry_alloc_offset(inode)) ||
 	    (epos.bh && epos.offset == sizeof(struct allocExtDesc))) {
@@ -663,19 +656,17 @@
 		extent.extLength |= etype << 30;
 	}
 
-	partial_final_block = newsize & (sb->s_blocksize - 1);
+	new_elen = ((loff_t)offset << inode->i_blkbits) |
+					(newsize & (sb->s_blocksize - 1));
 
 	/* File has extent covering the new size (could happen when extending
 	 * inside a block)?
 	 */
-	if (within_final_block) {
+	if (within_last_ext) {
 		/* Extending file within the last file block */
-		udf_do_extend_final_block(inode, &epos, &extent,
-					  partial_final_block);
+		udf_do_extend_final_block(inode, &epos, &extent, new_elen);
 	} else {
-		loff_t add = ((loff_t)offset << sb->s_blocksize_bits) |
-			     partial_final_block;
-		err = udf_do_extend_file(inode, &epos, &extent, add);
+		err = udf_do_extend_file(inode, &epos, &extent, new_elen);
 	}
 
 	if (err < 0)
@@ -776,10 +767,11 @@
 		goto out_free;
 	}
 
-	/* Are we beyond EOF? */
+	/* Are we beyond EOF and preallocated extent? */
 	if (etype == -1) {
 		int ret;
 		loff_t hole_len;
+
 		isBeyondEOF = true;
 		if (count) {
 			if (c)
diff --git a/kernel/fs/udf/truncate.c b/kernel/fs/udf/truncate.c
index 532cda9..036ebd8 100644
--- a/kernel/fs/udf/truncate.c
+++ b/kernel/fs/udf/truncate.c
@@ -120,60 +120,42 @@
 
 void udf_discard_prealloc(struct inode *inode)
 {
-	struct extent_position epos = { NULL, 0, {0, 0} };
+	struct extent_position epos = {};
+	struct extent_position prev_epos = {};
 	struct kernel_lb_addr eloc;
 	uint32_t elen;
 	uint64_t lbcount = 0;
 	int8_t etype = -1, netype;
-	int adsize;
 	struct udf_inode_info *iinfo = UDF_I(inode);
+	int bsize = 1 << inode->i_blkbits;
 
 	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_IN_ICB ||
-	    inode->i_size == iinfo->i_lenExtents)
+	    ALIGN(inode->i_size, bsize) == ALIGN(iinfo->i_lenExtents, bsize))
 		return;
-
-	if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_SHORT)
-		adsize = sizeof(struct short_ad);
-	else if (iinfo->i_alloc_type == ICBTAG_FLAG_AD_LONG)
-		adsize = sizeof(struct long_ad);
-	else
-		adsize = 0;
 
 	epos.block = iinfo->i_location;
 
 	/* Find the last extent in the file */
-	while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 1)) != -1) {
-		etype = netype;
+	while ((netype = udf_next_aext(inode, &epos, &eloc, &elen, 0)) != -1) {
+		brelse(prev_epos.bh);
+		prev_epos = epos;
+		if (prev_epos.bh)
+			get_bh(prev_epos.bh);
+
+		etype = udf_next_aext(inode, &epos, &eloc, &elen, 1);
 		lbcount += elen;
 	}
 	if (etype == (EXT_NOT_RECORDED_ALLOCATED >> 30)) {
-		epos.offset -= adsize;
 		lbcount -= elen;
-		extent_trunc(inode, &epos, &eloc, etype, elen, 0);
-		if (!epos.bh) {
-			iinfo->i_lenAlloc =
-				epos.offset -
-				udf_file_entry_alloc_offset(inode);
-			mark_inode_dirty(inode);
-		} else {
-			struct allocExtDesc *aed =
-				(struct allocExtDesc *)(epos.bh->b_data);
-			aed->lengthAllocDescs =
-				cpu_to_le32(epos.offset -
-					    sizeof(struct allocExtDesc));
-			if (!UDF_QUERY_FLAG(inode->i_sb, UDF_FLAG_STRICT) ||
-			    UDF_SB(inode->i_sb)->s_udfrev >= 0x0201)
-				udf_update_tag(epos.bh->b_data, epos.offset);
-			else
-				udf_update_tag(epos.bh->b_data,
-					       sizeof(struct allocExtDesc));
-			mark_buffer_dirty_inode(epos.bh, inode);
-		}
+		udf_delete_aext(inode, prev_epos);
+		udf_free_blocks(inode->i_sb, inode, &eloc, 0,
+				DIV_ROUND_UP(elen, 1 << inode->i_blkbits));
 	}
 	/* This inode entry is in-memory only and thus we don't have to mark
 	 * the inode dirty */
 	iinfo->i_lenExtents = lbcount;
 	brelse(epos.bh);
+	brelse(prev_epos.bh);
 }
 
 static void udf_update_alloc_ext_desc(struct inode *inode,
diff --git a/kernel/include/asm-generic/atomic.h b/kernel/include/asm-generic/atomic.h
index 11f96f4..34100ba 100644
--- a/kernel/include/asm-generic/atomic.h
+++ b/kernel/include/asm-generic/atomic.h
@@ -76,9 +76,9 @@
 {									\
 	unsigned long flags;						\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	v->counter = v->counter c_op i;					\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 }
 
 #define ATOMIC_OP_RETURN(op, c_op)					\
@@ -87,9 +87,9 @@
 	unsigned long flags;						\
 	int ret;							\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	ret = (v->counter = v->counter c_op i);				\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 									\
 	return ret;							\
 }
@@ -100,10 +100,10 @@
 	unsigned long flags;						\
 	int ret;							\
 									\
-	raw_local_irq_save(flags);					\
+	flags = hard_local_irq_save();					\
 	ret = v->counter;						\
 	v->counter = v->counter c_op i;					\
-	raw_local_irq_restore(flags);					\
+	hard_local_irq_restore(flags);					\
 									\
 	return ret;							\
 }
diff --git a/kernel/include/asm-generic/cmpxchg-local.h b/kernel/include/asm-generic/cmpxchg-local.h
index f17f14f..67d712f 100644
--- a/kernel/include/asm-generic/cmpxchg-local.h
+++ b/kernel/include/asm-generic/cmpxchg-local.h
@@ -23,7 +23,7 @@
 	if (size == 8 && sizeof(unsigned long) != 8)
 		wrong_size_cmpxchg(ptr);
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	switch (size) {
 	case 1: prev = *(u8 *)ptr;
 		if (prev == old)
@@ -44,7 +44,7 @@
 	default:
 		wrong_size_cmpxchg(ptr);
 	}
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 	return prev;
 }
 
@@ -57,11 +57,11 @@
 	u64 prev;
 	unsigned long flags;
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	prev = *(u64 *)ptr;
 	if (prev == old)
 		*(u64 *)ptr = new;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 	return prev;
 }
 
diff --git a/kernel/include/asm-generic/cmpxchg.h b/kernel/include/asm-generic/cmpxchg.h
index 9a24510..475206b 100644
--- a/kernel/include/asm-generic/cmpxchg.h
+++ b/kernel/include/asm-generic/cmpxchg.h
@@ -32,10 +32,10 @@
 #ifdef __xchg_u8
 		return __xchg_u8(x, ptr);
 #else
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile u8 *)ptr;
 		*(volatile u8 *)ptr = x;
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return ret;
 #endif /* __xchg_u8 */
 
@@ -43,10 +43,10 @@
 #ifdef __xchg_u16
 		return __xchg_u16(x, ptr);
 #else
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile u16 *)ptr;
 		*(volatile u16 *)ptr = x;
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return ret;
 #endif /* __xchg_u16 */
 
@@ -54,10 +54,10 @@
 #ifdef __xchg_u32
 		return __xchg_u32(x, ptr);
 #else
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile u32 *)ptr;
 		*(volatile u32 *)ptr = x;
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return ret;
 #endif /* __xchg_u32 */
 
@@ -66,10 +66,10 @@
 #ifdef __xchg_u64
 		return __xchg_u64(x, ptr);
 #else
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 		ret = *(volatile u64 *)ptr;
 		*(volatile u64 *)ptr = x;
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		return ret;
 #endif /* __xchg_u64 */
 #endif /* CONFIG_64BIT */
diff --git a/kernel/include/asm-generic/irq_pipeline.h b/kernel/include/asm-generic/irq_pipeline.h
new file mode 100644
index 0000000..0f81ed0
--- /dev/null
+++ b/kernel/include/asm-generic/irq_pipeline.h
@@ -0,0 +1,109 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef __ASM_GENERIC_IRQ_PIPELINE_H
+#define __ASM_GENERIC_IRQ_PIPELINE_H
+
+#include <linux/kconfig.h>
+#include <linux/types.h>
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+unsigned long inband_irq_save(void);
+void inband_irq_restore(unsigned long flags);
+void inband_irq_enable(void);
+void inband_irq_disable(void);
+int inband_irqs_disabled(void);
+
+#define hard_cond_local_irq_enable()		hard_local_irq_enable()
+#define hard_cond_local_irq_disable()		hard_local_irq_disable()
+#define hard_cond_local_irq_save()		hard_local_irq_save()
+#define hard_cond_local_irq_restore(__flags)	hard_local_irq_restore(__flags)
+
+#define hard_local_irq_save()			native_irq_save()
+#define hard_local_irq_restore(__flags)		native_irq_restore(__flags)
+#define hard_local_irq_enable()			native_irq_enable()
+#define hard_local_irq_disable()		native_irq_disable()
+#define hard_local_save_flags()			native_save_flags()
+
+#define hard_irqs_disabled()			native_irqs_disabled()
+#define hard_irqs_disabled_flags(__flags)	native_irqs_disabled_flags(__flags)
+
+void irq_pipeline_nmi_enter(void);
+void irq_pipeline_nmi_exit(void);
+
+/* Swap then merge virtual and hardware interrupt states. */
+#define irqs_merge_flags(__flags, __stalled)				\
+	({								\
+		unsigned long __combo =					\
+			arch_irqs_virtual_to_native_flags(__stalled) |	\
+			arch_irqs_native_to_virtual_flags(__flags);	\
+		__combo;						\
+	})
+
+/* Extract swap virtual and hardware interrupt states. */
+#define irqs_split_flags(__combo, __stall_r)				\
+	({								\
+		unsigned long __virt = (__combo);			\
+		*(__stall_r) = hard_irqs_disabled_flags(__combo);	\
+		__virt &= ~arch_irqs_virtual_to_native_flags(*(__stall_r)); \
+		arch_irqs_virtual_to_native_flags(__virt);		\
+	})
+
+#define hard_local_irq_sync()			native_irq_sync()
+
+#else /* !CONFIG_IRQ_PIPELINE */
+
+#define hard_local_save_flags()			({ unsigned long __flags; \
+						raw_local_save_flags(__flags); __flags; })
+#define hard_local_irq_enable()			raw_local_irq_enable()
+#define hard_local_irq_disable()		raw_local_irq_disable()
+#define hard_local_irq_save()			({ unsigned long __flags; \
+						raw_local_irq_save(__flags); __flags; })
+#define hard_local_irq_restore(__flags)		raw_local_irq_restore(__flags)
+
+#define hard_cond_local_irq_enable()		do { } while(0)
+#define hard_cond_local_irq_disable()		do { } while(0)
+#define hard_cond_local_irq_save()		0
+#define hard_cond_local_irq_restore(__flags)	do { (void)(__flags); } while(0)
+
+#define hard_irqs_disabled()			irqs_disabled()
+#define hard_irqs_disabled_flags(__flags)	raw_irqs_disabled_flags(__flags)
+
+static inline void irq_pipeline_nmi_enter(void) { }
+static inline void irq_pipeline_nmi_exit(void) { }
+
+#define hard_local_irq_sync()			do { } while (0)
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+#ifdef CONFIG_DEBUG_IRQ_PIPELINE
+void check_inband_stage(void);
+#define check_hard_irqs_disabled()		\
+	WARN_ON_ONCE(!hard_irqs_disabled())
+#else
+static inline void check_inband_stage(void) { }
+static inline int check_hard_irqs_disabled(void) { return 0; }
+#endif
+
+extern bool irq_pipeline_oopsing;
+
+static __always_inline bool irqs_pipelined(void)
+{
+	return IS_ENABLED(CONFIG_IRQ_PIPELINE);
+}
+
+static __always_inline bool irq_pipeline_debug(void)
+{
+	return IS_ENABLED(CONFIG_DEBUG_IRQ_PIPELINE) &&
+		!irq_pipeline_oopsing;
+}
+
+static __always_inline bool irq_pipeline_debug_locking(void)
+{
+	return IS_ENABLED(CONFIG_DEBUG_HARD_LOCKS);
+}
+
+#endif /* __ASM_GENERIC_IRQ_PIPELINE_H */
diff --git a/kernel/include/asm-generic/percpu.h b/kernel/include/asm-generic/percpu.h
index 6432a7f..8a35f48 100644
--- a/kernel/include/asm-generic/percpu.h
+++ b/kernel/include/asm-generic/percpu.h
@@ -125,9 +125,9 @@
 ({									\
 	typeof(pcp) ___ret;						\
 	unsigned long ___flags;						\
-	raw_local_irq_save(___flags);					\
+	___flags = hard_local_irq_save();				\
 	___ret = raw_cpu_generic_read(pcp);				\
-	raw_local_irq_restore(___flags);				\
+	hard_local_irq_restore(___flags);				\
 	___ret;								\
 })
 
@@ -144,9 +144,9 @@
 #define this_cpu_generic_to_op(pcp, val, op)				\
 do {									\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	raw_cpu_generic_to_op(pcp, val, op);				\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 } while (0)
 
 
@@ -154,9 +154,9 @@
 ({									\
 	typeof(pcp) __ret;						\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	__ret = raw_cpu_generic_add_return(pcp, val);			\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 	__ret;								\
 })
 
@@ -164,9 +164,9 @@
 ({									\
 	typeof(pcp) __ret;						\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	__ret = raw_cpu_generic_xchg(pcp, nval);			\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 	__ret;								\
 })
 
@@ -174,9 +174,9 @@
 ({									\
 	typeof(pcp) __ret;						\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	__ret = raw_cpu_generic_cmpxchg(pcp, oval, nval);		\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 	__ret;								\
 })
 
@@ -184,10 +184,10 @@
 ({									\
 	int __ret;							\
 	unsigned long __flags;						\
-	raw_local_irq_save(__flags);					\
+	__flags = hard_local_irq_save();				\
 	__ret = raw_cpu_generic_cmpxchg_double(pcp1, pcp2,		\
 			oval1, oval2, nval1, nval2);			\
-	raw_local_irq_restore(__flags);					\
+	hard_local_irq_restore(__flags);				\
 	__ret;								\
 })
 
diff --git a/kernel/include/asm-generic/xenomai/dovetail/thread.h b/kernel/include/asm-generic/xenomai/dovetail/thread.h
new file mode 120000
index 0000000..3e37870
--- /dev/null
+++ b/kernel/include/asm-generic/xenomai/dovetail/thread.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h
\ No newline at end of file
diff --git a/kernel/include/asm-generic/xenomai/ipipe/thread.h b/kernel/include/asm-generic/xenomai/ipipe/thread.h
new file mode 120000
index 0000000..e113f79
--- /dev/null
+++ b/kernel/include/asm-generic/xenomai/ipipe/thread.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h
\ No newline at end of file
diff --git a/kernel/include/asm-generic/xenomai/machine.h b/kernel/include/asm-generic/xenomai/machine.h
new file mode 120000
index 0000000..0b119c7
--- /dev/null
+++ b/kernel/include/asm-generic/xenomai/machine.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h
\ No newline at end of file
diff --git a/kernel/include/asm-generic/xenomai/pci_ids.h b/kernel/include/asm-generic/xenomai/pci_ids.h
new file mode 120000
index 0000000..fb9916d
--- /dev/null
+++ b/kernel/include/asm-generic/xenomai/pci_ids.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h
\ No newline at end of file
diff --git a/kernel/include/asm-generic/xenomai/syscall.h b/kernel/include/asm-generic/xenomai/syscall.h
new file mode 120000
index 0000000..7f6597c
--- /dev/null
+++ b/kernel/include/asm-generic/xenomai/syscall.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h
\ No newline at end of file
diff --git a/kernel/include/asm-generic/xenomai/syscall32.h b/kernel/include/asm-generic/xenomai/syscall32.h
new file mode 120000
index 0000000..8f1d676
--- /dev/null
+++ b/kernel/include/asm-generic/xenomai/syscall32.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h
\ No newline at end of file
diff --git a/kernel/include/asm-generic/xenomai/wrappers.h b/kernel/include/asm-generic/xenomai/wrappers.h
new file mode 120000
index 0000000..07d4764
--- /dev/null
+++ b/kernel/include/asm-generic/xenomai/wrappers.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h
\ No newline at end of file
diff --git a/kernel/include/dovetail/irq.h b/kernel/include/dovetail/irq.h
new file mode 100644
index 0000000..ac8b531
--- /dev/null
+++ b/kernel/include/dovetail/irq.h
@@ -0,0 +1,11 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_IRQ_H
+#define _DOVETAIL_IRQ_H
+
+/* Placeholders for pre- and post-IRQ handling. */
+
+static inline void irq_enter_pipeline(void) { }
+
+static inline void irq_exit_pipeline(void) { }
+
+#endif /* !_DOVETAIL_IRQ_H */
diff --git a/kernel/include/dovetail/mm_info.h b/kernel/include/dovetail/mm_info.h
new file mode 100644
index 0000000..504bd1d
--- /dev/null
+++ b/kernel/include/dovetail/mm_info.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_MM_INFO_H
+#define _DOVETAIL_MM_INFO_H
+
+/*
+ * Placeholder for per-mm state information defined by the co-kernel.
+ */
+
+struct oob_mm_state {
+};
+
+#endif /* !_DOVETAIL_MM_INFO_H */
diff --git a/kernel/include/dovetail/netdevice.h b/kernel/include/dovetail/netdevice.h
new file mode 100644
index 0000000..06e8205
--- /dev/null
+++ b/kernel/include/dovetail/netdevice.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_NETDEVICE_H
+#define _DOVETAIL_NETDEVICE_H
+
+/*
+ * Placeholder for per-device state information defined by the
+ * out-of-band network stack.
+ */
+
+struct oob_netdev_state {
+};
+
+#endif /* !_DOVETAIL_NETDEVICE_H */
diff --git a/kernel/include/dovetail/poll.h b/kernel/include/dovetail/poll.h
new file mode 100644
index 0000000..d15b14f
--- /dev/null
+++ b/kernel/include/dovetail/poll.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_POLL_H
+#define _DOVETAIL_POLL_H
+
+/*
+ * Placeholder for the out-of-band poll operation descriptor.
+ */
+
+struct oob_poll_wait {
+};
+
+#endif /* !_DOVETAIL_POLL_H */
diff --git a/kernel/include/dovetail/spinlock.h b/kernel/include/dovetail/spinlock.h
new file mode 100644
index 0000000..381031a
--- /dev/null
+++ b/kernel/include/dovetail/spinlock.h
@@ -0,0 +1,21 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_SPINLOCK_H
+#define _DOVETAIL_SPINLOCK_H
+
+/* Placeholders for hard/hybrid spinlock modifiers. */
+
+struct raw_spinlock;
+
+static inline void hard_spin_lock_prepare(struct raw_spinlock *lock)
+{ }
+
+static inline void hard_spin_unlock_finish(struct raw_spinlock *lock)
+{ }
+
+static inline void hard_spin_trylock_prepare(struct raw_spinlock *lock)
+{ }
+
+static inline void hard_spin_trylock_fail(struct raw_spinlock *lock)
+{ }
+
+#endif /* !_DOVETAIL_SPINLOCK_H */
diff --git a/kernel/include/dovetail/thread_info.h b/kernel/include/dovetail/thread_info.h
new file mode 100644
index 0000000..4dea8bf
--- /dev/null
+++ b/kernel/include/dovetail/thread_info.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _DOVETAIL_THREAD_INFO_H
+#define _DOVETAIL_THREAD_INFO_H
+
+/*
+ * Placeholder for per-thread state information defined by the
+ * co-kernel.
+ */
+
+struct oob_thread_state {
+};
+
+#endif /* !_DOVETAIL_THREAD_INFO_H */
diff --git a/kernel/include/linux/clockchips.h b/kernel/include/linux/clockchips.h
index 8ae9a95..bda5d7d 100644
--- a/kernel/include/linux/clockchips.h
+++ b/kernel/include/linux/clockchips.h
@@ -15,6 +15,7 @@
 # include <linux/cpumask.h>
 # include <linux/ktime.h>
 # include <linux/notifier.h>
+# include <linux/irqstage.h>
 
 struct clock_event_device;
 struct module;
@@ -31,6 +32,7 @@
  *		from DETACHED or SHUTDOWN.
  * ONESHOT_STOPPED: Device was programmed in ONESHOT mode and is temporarily
  *		    stopped.
+ * RESERVED:	Device is controlled by an out-of-band core via a proxy.
  */
 enum clock_event_state {
 	CLOCK_EVT_STATE_DETACHED,
@@ -38,6 +40,7 @@
 	CLOCK_EVT_STATE_PERIODIC,
 	CLOCK_EVT_STATE_ONESHOT,
 	CLOCK_EVT_STATE_ONESHOT_STOPPED,
+	CLOCK_EVT_STATE_RESERVED,
 };
 
 /*
@@ -67,6 +70,17 @@
  */
 # define CLOCK_EVT_FEAT_HRTIMER		0x000080
 
+/*
+ * Interrupt pipeline support:
+ *
+ * - Clockevent device can work with pipelined timer events (i.e. proxied).
+ * - Device currently delivers high-precision events via out-of-band interrupts.
+ * - Device acts as a proxy for timer interrupt pipelining.
+ */
+# define CLOCK_EVT_FEAT_PIPELINE	0x000100
+# define CLOCK_EVT_FEAT_OOB		0x000200
+# define CLOCK_EVT_FEAT_PROXY		0x000400
+
 /**
  * struct clock_event_device - clock event device descriptor
  * @event_handler:	Assigned by the framework to be called by the low
@@ -91,7 +105,7 @@
  * @max_delta_ticks:	maximum delta value in ticks stored for reconfiguration
  * @name:		ptr to clock event name
  * @rating:		variable to rate clock event devices
- * @irq:		IRQ number (only for non CPU local devices)
+ * @irq:		IRQ number (only for non CPU local devices, or pipelined timers)
  * @bound_on:		Bound on CPU
  * @cpumask:		cpumask to indicate for which CPUs this device works
  * @list:		list head for the management code
@@ -137,6 +151,11 @@
 	return dev->state_use_accessors == CLOCK_EVT_STATE_DETACHED;
 }
 
+static inline bool clockevent_state_reserved(struct clock_event_device *dev)
+{
+	return dev->state_use_accessors == CLOCK_EVT_STATE_RESERVED;
+}
+
 static inline bool clockevent_state_shutdown(struct clock_event_device *dev)
 {
 	return dev->state_use_accessors == CLOCK_EVT_STATE_SHUTDOWN;
@@ -155,6 +174,11 @@
 static inline bool clockevent_state_oneshot_stopped(struct clock_event_device *dev)
 {
 	return dev->state_use_accessors == CLOCK_EVT_STATE_ONESHOT_STOPPED;
+}
+
+static inline bool clockevent_is_oob(struct clock_event_device *dev)
+{
+	return !!(dev->features & CLOCK_EVT_FEAT_OOB);
 }
 
 /*
@@ -186,6 +210,8 @@
 extern void clockevents_config_and_register(struct clock_event_device *dev,
 					    u32 freq, unsigned long min_delta,
 					    unsigned long max_delta);
+extern void clockevents_switch_state(struct clock_event_device *dev,
+				     enum clock_event_state state);
 
 extern int clockevents_update_freq(struct clock_event_device *ce, u32 freq);
 
@@ -215,6 +241,49 @@
 static inline void tick_setup_hrtimer_broadcast(void) { }
 # endif
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+struct clock_proxy_device {
+	struct clock_event_device proxy_device;
+	struct clock_event_device *real_device;
+	void (*handle_oob_event)(struct clock_event_device *dev);
+	void (*__setup_handler)(struct clock_proxy_device *dev);
+	void (*__original_handler)(struct clock_event_device *dev);
+};
+
+void tick_notify_proxy(void);
+
+static inline
+void clockevents_handle_event(struct clock_event_device *ced)
+{
+	/*
+	 * If called from the in-band stage, or for delivering a
+	 * high-precision timer event to the out-of-band stage, call
+	 * the event handler immediately.
+	 *
+	 * Otherwise, ced is still the in-band tick device for the
+	 * current CPU, so just relay the incoming tick to the in-band
+	 * stage via tick_notify_proxy().  This situation can happen
+	 * when all CPUs receive the same out-of-band IRQ from a given
+	 * clock event device, but only a subset of the online CPUs has
+	 * enabled a proxy.
+	 */
+	if (clockevent_is_oob(ced) || running_inband())
+		ced->event_handler(ced);
+	else
+		tick_notify_proxy();
+}
+
+#else
+
+static inline
+void clockevents_handle_event(struct clock_event_device *ced)
+{
+	ced->event_handler(ced);
+}
+
+#endif	/* !CONFIG_IRQ_PIPELINE */
+
 #else /* !CONFIG_GENERIC_CLOCKEVENTS: */
 
 static inline void clockevents_suspend(void) { }
diff --git a/kernel/include/linux/clocksource.h b/kernel/include/linux/clocksource.h
index 83a3ebf..9665974 100644
--- a/kernel/include/linux/clocksource.h
+++ b/kernel/include/linux/clocksource.h
@@ -13,12 +13,15 @@
 #include <linux/timex.h>
 #include <linux/time.h>
 #include <linux/list.h>
+#include <linux/hashtable.h>
 #include <linux/cache.h>
 #include <linux/timer.h>
+#include <linux/cdev.h>
 #include <linux/init.h>
 #include <linux/of.h>
 #include <asm/div64.h>
 #include <asm/io.h>
+#include <uapi/linux/clocksource.h>
 
 struct clocksource;
 struct module;
@@ -28,7 +31,14 @@
 #include <asm/clocksource.h>
 #endif
 
+
 #include <vdso/clocksource.h>
+
+enum clocksource_vdso_type {
+	CLOCKSOURCE_VDSO_NONE = 0,
+	CLOCKSOURCE_VDSO_ARCHITECTED,
+	CLOCKSOURCE_VDSO_MMIO,	/* <= Must be last. */
+};
 
 /**
  * struct clocksource - hardware abstraction for a free running counter
@@ -101,6 +111,7 @@
 	struct list_head	list;
 	int			rating;
 	enum vdso_clock_mode	vdso_clock_mode;
+  	enum clocksource_vdso_type vdso_type;
 	unsigned long		flags;
 
 	int			(*enable)(struct clocksource *cs);
@@ -118,6 +129,36 @@
 	u64			wd_last;
 #endif
 	struct module		*owner;
+};
+
+struct clocksource_mmio {
+	void __iomem *reg;
+	struct clocksource clksrc;
+};
+
+struct clocksource_user_mmio {
+	struct clocksource_mmio mmio;
+	void __iomem *reg_upper;
+	unsigned int bits_lower;
+	unsigned int mask_lower;
+	unsigned int mask_upper;
+	enum clksrc_user_mmio_type type;
+	unsigned long phys_lower;
+	unsigned long phys_upper;
+	unsigned int id;
+	struct device *dev;
+	struct cdev cdev;
+	DECLARE_HASHTABLE(mappings, 10);
+	struct spinlock lock;
+	struct list_head link;
+};
+
+struct clocksource_mmio_regs {
+	void __iomem *reg_upper;
+	void __iomem *reg_lower;
+	unsigned int bits_upper;
+	unsigned int bits_lower;
+	unsigned long (*revmap)(void *);
 };
 
 /*
@@ -264,10 +305,21 @@
 extern u64 clocksource_mmio_readl_down(struct clocksource *);
 extern u64 clocksource_mmio_readw_up(struct clocksource *);
 extern u64 clocksource_mmio_readw_down(struct clocksource *);
+extern u64 clocksource_dual_mmio_readw_up(struct clocksource *);
+extern u64 clocksource_dual_mmio_readl_up(struct clocksource *);
 
 extern int clocksource_mmio_init(void __iomem *, const char *,
 	unsigned long, int, unsigned, u64 (*)(struct clocksource *));
 
+extern int clocksource_user_mmio_init(struct clocksource_user_mmio *ucs,
+				      const struct clocksource_mmio_regs *regs,
+				      unsigned long hz);
+
+extern int clocksource_user_single_mmio_init(
+	void __iomem *base, const char *name,
+	unsigned long hz, int rating, unsigned int bits,
+	u64 (*read)(struct clocksource *));
+
 extern int clocksource_i8253_init(void);
 
 #define TIMER_OF_DECLARE(name, compat, fn) \
diff --git a/kernel/include/linux/console.h b/kernel/include/linux/console.h
index 4b1e26c..1413a45 100644
--- a/kernel/include/linux/console.h
+++ b/kernel/include/linux/console.h
@@ -141,6 +141,7 @@
 struct console {
 	char	name[16];
 	void	(*write)(struct console *, const char *, unsigned);
+	void	(*write_raw)(struct console *, const char *, unsigned);
 	int	(*read)(struct console *, char *, unsigned);
 	struct tty_driver *(*device)(struct console *, int *);
 	void	(*unblank)(void);
diff --git a/kernel/include/linux/context_tracking_state.h b/kernel/include/linux/context_tracking_state.h
index 65a60d3..814b57d 100644
--- a/kernel/include/linux/context_tracking_state.h
+++ b/kernel/include/linux/context_tracking_state.h
@@ -28,7 +28,7 @@
 
 static __always_inline bool context_tracking_enabled(void)
 {
-	return static_branch_unlikely(&context_tracking_key);
+	return static_branch_unlikely(&context_tracking_key) && running_inband();
 }
 
 static __always_inline bool context_tracking_enabled_cpu(int cpu)
diff --git a/kernel/include/linux/dmaengine.h b/kernel/include/linux/dmaengine.h
index 08537ef..e8bc400 100644
--- a/kernel/include/linux/dmaengine.h
+++ b/kernel/include/linux/dmaengine.h
@@ -62,6 +62,7 @@
 	DMA_ASYNC_TX,
 	DMA_SLAVE,
 	DMA_CYCLIC,
+	DMA_OOB,
 	DMA_INTERLEAVE,
 	DMA_COMPLETION_NO_ORDER,
 	DMA_REPEAT,
@@ -191,6 +192,13 @@
  *  transaction is marked with DMA_PREP_REPEAT will cause the new transaction
  *  to never be processed and stay in the issued queue forever. The flag is
  *  ignored if the previous transaction is not a repeated transaction.
+ * @DMA_OOB_INTERRUPT - if DMA_OOB is supported, handle the completion
+ *  interrupt for this transaction from the out-of-band stage (implies
+ *  DMA_PREP_INTERRUPT). This includes calling the completion callback routine
+ *  from such context if defined for the transaction.
+ * @DMA_OOB_PULSE - if DMA_OOB is supported, (slave) transactions on the
+ *  out-of-band channel should be triggered manually by a call to
+ *  dma_pulse_oob() (implies DMA_OOB_INTERRUPT).
  */
 enum dma_ctrl_flags {
 	DMA_PREP_INTERRUPT = (1 << 0),
@@ -203,6 +211,8 @@
 	DMA_PREP_CMD = (1 << 7),
 	DMA_PREP_REPEAT = (1 << 8),
 	DMA_PREP_LOAD_EOT = (1 << 9),
+	DMA_OOB_INTERRUPT = (1 << 10),
+	DMA_OOB_PULSE = (1 << 11),
 };
 
 /**
@@ -940,6 +950,7 @@
 					    dma_cookie_t cookie,
 					    struct dma_tx_state *txstate);
 	void (*device_issue_pending)(struct dma_chan *chan);
+	int (*device_pulse_oob)(struct dma_chan *chan);
 	void (*device_release)(struct dma_device *dev);
 	/* debugfs support */
 #ifdef CONFIG_DEBUG_FS
@@ -983,11 +994,22 @@
 						  dir, flags, NULL);
 }
 
+static inline bool dmaengine_oob_valid(struct dma_chan *chan,
+				unsigned long flags)
+{
+	return !(dovetailing() &&
+		flags & (DMA_OOB_INTERRUPT|DMA_OOB_PULSE) &&
+		!test_bit(DMA_OOB, chan->device->cap_mask.bits));
+}
+
 static inline struct dma_async_tx_descriptor *dmaengine_prep_slave_sg(
 	struct dma_chan *chan, struct scatterlist *sgl,	unsigned int sg_len,
 	enum dma_transfer_direction dir, unsigned long flags)
 {
 	if (!chan || !chan->device || !chan->device->device_prep_slave_sg)
+		return NULL;
+
+	if (!dmaengine_oob_valid(chan, flags))
 		return NULL;
 
 	return chan->device->device_prep_slave_sg(chan, sgl, sg_len,
@@ -1015,6 +1037,9 @@
 		unsigned long flags)
 {
 	if (!chan || !chan->device || !chan->device->device_prep_dma_cyclic)
+		return NULL;
+
+	if (!dmaengine_oob_valid(chan, flags))
 		return NULL;
 
 	return chan->device->device_prep_dma_cyclic(chan, buf_addr, buf_len,
@@ -1422,6 +1447,22 @@
 }
 
 /**
+ * dma_pulse_oob - manual trigger of an out-of-band transaction
+ * @chan: target DMA channel
+ *
+ * Trigger the next out-of-band transaction immediately.
+ */
+static inline int dma_pulse_oob(struct dma_chan *chan)
+{
+	int ret = -ENOTSUPP;
+
+	if (chan->device->device_pulse_oob)
+		ret = chan->device->device_pulse_oob(chan);
+
+	return ret;
+}
+
+/**
  * dma_async_is_tx_complete - poll for transaction completion
  * @chan: DMA channel
  * @cookie: transaction identifier to check status of
diff --git a/kernel/include/linux/dovetail.h b/kernel/include/linux/dovetail.h
new file mode 100644
index 0000000..9dcbfc5
--- /dev/null
+++ b/kernel/include/linux/dovetail.h
@@ -0,0 +1,325 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _LINUX_DOVETAIL_H
+#define _LINUX_DOVETAIL_H
+
+#ifdef CONFIG_DOVETAIL
+
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/thread_info.h>
+#include <linux/irqstage.h>
+#include <uapi/asm-generic/dovetail.h>
+#include <asm/dovetail.h>
+
+struct pt_regs;
+struct task_struct;
+struct file;
+struct files_struct;
+
+enum inband_event_type {
+	INBAND_TASK_SIGNAL,
+	INBAND_TASK_MIGRATION,
+	INBAND_TASK_EXIT,
+	INBAND_TASK_RETUSER,
+	INBAND_TASK_PTSTEP,
+	INBAND_TASK_PTSTOP,
+	INBAND_TASK_PTCONT,
+	INBAND_PROCESS_CLEANUP,
+};
+
+struct dovetail_migration_data {
+	struct task_struct *task;
+	int dest_cpu;
+};
+
+struct dovetail_altsched_context {
+	struct task_struct *task;
+	struct mm_struct *active_mm;
+	bool borrowed_mm;
+};
+
+#define protect_inband_mm(__flags)			\
+	do {						\
+		(__flags) = hard_cond_local_irq_save();	\
+		barrier();				\
+	} while (0)					\
+
+#define unprotect_inband_mm(__flags)			\
+	do {						\
+		barrier();				\
+		hard_cond_local_irq_restore(__flags);	\
+	} while (0)					\
+
+void inband_task_init(struct task_struct *p);
+
+int pipeline_syscall(unsigned int nr, struct pt_regs *regs);
+
+void __oob_trap_notify(unsigned int exception,
+		       struct pt_regs *regs);
+
+static __always_inline void oob_trap_notify(unsigned int exception,
+					struct pt_regs *regs)
+{
+	if (running_oob() && !test_thread_local_flags(_TLF_OOBTRAP))
+		__oob_trap_notify(exception, regs);
+}
+
+void __oob_trap_unwind(unsigned int exception,
+		struct pt_regs *regs);
+
+static __always_inline void oob_trap_unwind(unsigned int exception,
+					struct pt_regs *regs)
+{
+	if (test_thread_local_flags(_TLF_OOBTRAP))
+		__oob_trap_unwind(exception, regs);
+}
+
+void inband_event_notify(enum inband_event_type,
+			 void *data);
+
+void inband_clock_was_set(void);
+
+static inline void inband_signal_notify(struct task_struct *p)
+{
+	if (test_ti_local_flags(task_thread_info(p), _TLF_DOVETAIL))
+		inband_event_notify(INBAND_TASK_SIGNAL, p);
+}
+
+static inline void inband_migration_notify(struct task_struct *p, int cpu)
+{
+	if (test_ti_local_flags(task_thread_info(p), _TLF_DOVETAIL)) {
+		struct dovetail_migration_data d = {
+			.task = p,
+			.dest_cpu = cpu,
+		};
+		inband_event_notify(INBAND_TASK_MIGRATION, &d);
+	}
+}
+
+static inline void inband_exit_notify(void)
+{
+	inband_event_notify(INBAND_TASK_EXIT, NULL);
+}
+
+static inline void inband_cleanup_notify(struct mm_struct *mm)
+{
+	/*
+	 * Notify regardless of _TLF_DOVETAIL: current may have
+	 * resources to clean up although it might not be interested
+	 * in other kernel events.
+	 */
+	inband_event_notify(INBAND_PROCESS_CLEANUP, mm);
+}
+
+static inline void inband_ptstop_notify(void)
+{
+	if (test_thread_local_flags(_TLF_DOVETAIL))
+		inband_event_notify(INBAND_TASK_PTSTOP, current);
+}
+
+static inline void inband_ptcont_notify(void)
+{
+	if (test_thread_local_flags(_TLF_DOVETAIL))
+		inband_event_notify(INBAND_TASK_PTCONT, current);
+}
+
+static inline void inband_ptstep_notify(struct task_struct *tracee)
+{
+	if (test_ti_local_flags(task_thread_info(tracee), _TLF_DOVETAIL))
+		inband_event_notify(INBAND_TASK_PTSTEP, tracee);
+}
+
+static inline
+void prepare_inband_switch(struct task_struct *next)
+{
+	struct task_struct *prev = current;
+
+	if (test_ti_local_flags(task_thread_info(next), _TLF_DOVETAIL))
+		__this_cpu_write(irq_pipeline.rqlock_owner, prev);
+}
+
+void inband_retuser_notify(void);
+
+bool inband_switch_tail(void);
+
+void oob_trampoline(void);
+
+void arch_inband_task_init(struct task_struct *p);
+
+int dovetail_start(void);
+
+void dovetail_stop(void);
+
+void dovetail_init_altsched(struct dovetail_altsched_context *p);
+
+void dovetail_start_altsched(void);
+
+void dovetail_stop_altsched(void);
+
+__must_check int dovetail_leave_inband(void);
+
+static inline void dovetail_leave_oob(void)
+{
+	clear_thread_local_flags(_TLF_OOB|_TLF_OFFSTAGE);
+	clear_thread_flag(TIF_MAYDAY);
+}
+
+void dovetail_resume_inband(void);
+
+bool dovetail_context_switch(struct dovetail_altsched_context *out,
+			struct dovetail_altsched_context *in,
+			bool leave_inband);
+
+static inline
+struct oob_thread_state *dovetail_current_state(void)
+{
+	return &current_thread_info()->oob_state;
+}
+
+static inline
+struct oob_thread_state *dovetail_task_state(struct task_struct *p)
+{
+	return &task_thread_info(p)->oob_state;
+}
+
+static inline
+struct oob_mm_state *dovetail_mm_state(void)
+{
+	if (current->flags & PF_KTHREAD)
+		return NULL;
+
+	return &current->mm->oob_state;
+}
+
+void dovetail_call_mayday(struct pt_regs *regs);
+
+static inline void dovetail_send_mayday(struct task_struct *castaway)
+{
+	struct thread_info *ti = task_thread_info(castaway);
+
+	if (test_ti_local_flags(ti, _TLF_DOVETAIL))
+		set_ti_thread_flag(ti, TIF_MAYDAY);
+}
+
+static inline void dovetail_request_ucall(struct task_struct *task)
+{
+	struct thread_info *ti = task_thread_info(task);
+
+	if (test_ti_local_flags(ti, _TLF_DOVETAIL))
+		set_ti_thread_flag(ti, TIF_RETUSER);
+}
+
+static inline void dovetail_clear_ucall(void)
+{
+	if (test_thread_flag(TIF_RETUSER))
+		clear_thread_flag(TIF_RETUSER);
+}
+
+void install_inband_fd(unsigned int fd, struct file *file,
+		       struct files_struct *files);
+
+void uninstall_inband_fd(unsigned int fd, struct file *file,
+			 struct files_struct *files);
+
+void replace_inband_fd(unsigned int fd, struct file *file,
+		       struct files_struct *files);
+
+#else	/* !CONFIG_DOVETAIL */
+
+/* We may have arch-specific placeholders. */
+#include <asm/dovetail.h>
+
+struct files_struct;
+
+#define protect_inband_mm(__flags)	\
+	do { (void)(__flags); } while (0)
+
+#define unprotect_inband_mm(__flags)	\
+	do { (void)(__flags); } while (0)
+
+static inline
+void inband_task_init(struct task_struct *p) { }
+
+static inline void arch_dovetail_exec_prepare(void)
+{ }
+
+/*
+ * Keep the trap helpers as macros, we might not be able to resolve
+ * trap numbers if CONFIG_DOVETAIL is off.
+ */
+#define oob_trap_notify(__exception, __regs)	do { } while (0)
+#define oob_trap_unwind(__exception, __regs)	do { } while (0)
+
+static inline
+int pipeline_syscall(unsigned int nr, struct pt_regs *regs)
+{
+	return 0;
+}
+
+static inline void inband_signal_notify(struct task_struct *p) { }
+
+static inline
+void inband_migration_notify(struct task_struct *p, int cpu) { }
+
+static inline void inband_exit_notify(void) { }
+
+static inline void inband_cleanup_notify(struct mm_struct *mm) { }
+
+static inline void inband_retuser_notify(void) { }
+
+static inline void inband_ptstop_notify(void) { }
+
+static inline void inband_ptcont_notify(void) { }
+
+static inline void inband_ptstep_notify(struct task_struct *tracee) { }
+
+static inline void oob_trampoline(void) { }
+
+static inline void prepare_inband_switch(struct task_struct *next) { }
+
+static inline bool inband_switch_tail(void)
+{
+	/* Matches converse disabling in prepare_task_switch(). */
+	hard_cond_local_irq_enable();
+	return false;
+}
+
+static inline void dovetail_request_ucall(struct task_struct *task) { }
+
+static inline void dovetail_clear_ucall(void) { }
+
+static inline void inband_clock_was_set(void) { }
+
+static inline
+void install_inband_fd(unsigned int fd, struct file *file,
+		       struct files_struct *files) { }
+
+static inline
+void uninstall_inband_fd(unsigned int fd, struct file *file,
+			 struct files_struct *files) { }
+
+static inline
+void replace_inband_fd(unsigned int fd, struct file *file,
+		       struct files_struct *files) { }
+
+#endif	/* !CONFIG_DOVETAIL */
+
+static __always_inline bool dovetailing(void)
+{
+	return IS_ENABLED(CONFIG_DOVETAIL);
+}
+
+static __always_inline bool dovetail_debug(void)
+{
+	return IS_ENABLED(CONFIG_DEBUG_DOVETAIL);
+}
+
+#ifndef arch_dovetail_is_syscall
+#define arch_dovetail_is_syscall(__nr)	((__nr) == __NR_prctl)
+#endif
+
+#endif /* _LINUX_DOVETAIL_H */
diff --git a/kernel/include/linux/dw_apb_timer.h b/kernel/include/linux/dw_apb_timer.h
index 82ebf92..d69dbd0 100644
--- a/kernel/include/linux/dw_apb_timer.h
+++ b/kernel/include/linux/dw_apb_timer.h
@@ -30,7 +30,7 @@
 
 struct dw_apb_clocksource {
 	struct dw_apb_timer			timer;
-	struct clocksource			cs;
+	struct clocksource_user_mmio		ummio;
 };
 
 void dw_apb_clockevent_register(struct dw_apb_clock_event_device *dw_ced);
diff --git a/kernel/include/linux/entry-common.h b/kernel/include/linux/entry-common.h
index 46c4247..3d7d78c 100644
--- a/kernel/include/linux/entry-common.h
+++ b/kernel/include/linux/entry-common.h
@@ -72,6 +72,14 @@
 	 _TIF_NEED_RESCHED | _TIF_PATCH_PENDING | _TIF_NOTIFY_SIGNAL |	\
 	 ARCH_EXIT_TO_USER_MODE_WORK)
 
+/*
+ * Status codes of syscall entry when Dovetail is enabled. Must not
+ * conflict with valid syscall numbers. And with -1 which seccomp uses
+ * to skip an syscall.
+ */
+#define EXIT_SYSCALL_OOB	(-2)
+#define EXIT_SYSCALL_TAIL	(-3)
+
 /**
  * arch_check_user_regs - Architecture specific sanity check for user mode regs
  * @regs:	Pointer to currents pt_regs
@@ -181,7 +189,7 @@
 #ifndef local_irq_enable_exit_to_user
 static inline void local_irq_enable_exit_to_user(unsigned long ti_work)
 {
-	local_irq_enable();
+	local_irq_enable_full();
 }
 #endif
 
@@ -196,7 +204,7 @@
 #ifndef local_irq_disable_exit_to_user
 static inline void local_irq_disable_exit_to_user(void)
 {
-	local_irq_disable();
+	local_irq_disable_full();
 }
 #endif
 
@@ -341,6 +349,12 @@
  */
 void irqentry_exit_to_user_mode(struct pt_regs *regs);
 
+enum irqentry_info {
+	IRQENTRY_INBAND_UNSTALLED = 0,
+	IRQENTRY_INBAND_STALLED,
+	IRQENTRY_OOB,
+};
+
 #ifndef irqentry_state
 /**
  * struct irqentry_state - Opaque object for exception state storage
@@ -348,6 +362,7 @@
  *            exit path has to invoke rcu_irq_exit().
  * @lockdep: Used exclusively in the irqentry_nmi_*() calls; ensures that
  *           lockdep state is restored correctly on exit from nmi.
+ * @stage_info: Information about pipeline state and current stage on IRQ entry.
  *
  * This opaque object is filled in by the irqentry_*_enter() functions and
  * must be passed back into the corresponding irqentry_*_exit() functions
@@ -362,6 +377,9 @@
 		bool	exit_rcu;
 		bool	lockdep;
 	};
+#ifdef CONFIG_IRQ_PIPELINE
+	enum irqentry_info stage_info;
+#endif
 } irqentry_state_t;
 #endif
 
diff --git a/kernel/include/linux/fcntl.h b/kernel/include/linux/fcntl.h
index 766fcd9..5cb2aa2 100644
--- a/kernel/include/linux/fcntl.h
+++ b/kernel/include/linux/fcntl.h
@@ -10,7 +10,7 @@
 	(O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
 	 O_APPEND | O_NDELAY | O_NONBLOCK | __O_SYNC | O_DSYNC | \
 	 FASYNC	| O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
-	 O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
+	 O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE | O_OOB)
 
 /* List of all valid flags for the how->upgrade_mask argument: */
 #define VALID_UPGRADE_FLAGS \
diff --git a/kernel/include/linux/fs.h b/kernel/include/linux/fs.h
index 7297765..d167e43 100644
--- a/kernel/include/linux/fs.h
+++ b/kernel/include/linux/fs.h
@@ -56,6 +56,7 @@
 struct kobject;
 struct pipe_inode_info;
 struct poll_table_struct;
+struct oob_poll_wait;
 struct kstatfs;
 struct vm_area_struct;
 struct vfsmount;
@@ -963,6 +964,7 @@
 #endif
 	/* needed for tty driver, and maybe others */
 	void			*private_data;
+	void			*oob_data;
 
 #ifdef CONFIG_EPOLL
 	/* Used by fs/eventpoll.c to link all the hooks to this file */
@@ -1800,8 +1802,11 @@
 #ifdef CONFIG_COMPAT
 extern long compat_ptr_ioctl(struct file *file, unsigned int cmd,
 					unsigned long arg);
+extern long compat_ptr_oob_ioctl(struct file *file, unsigned int cmd,
+				 unsigned long arg);
 #else
 #define compat_ptr_ioctl NULL
+#define compat_ptr_oob_ioctl NULL
 #endif
 
 /*
@@ -1888,6 +1893,11 @@
 	__poll_t (*poll) (struct file *, struct poll_table_struct *);
 	long (*unlocked_ioctl) (struct file *, unsigned int, unsigned long);
 	long (*compat_ioctl) (struct file *, unsigned int, unsigned long);
+	ssize_t (*oob_read) (struct file *, char __user *, size_t);
+	ssize_t (*oob_write) (struct file *, const char __user *, size_t);
+	long (*oob_ioctl) (struct file *, unsigned int, unsigned long);
+	long (*compat_oob_ioctl) (struct file *, unsigned int, unsigned long);
+	__poll_t (*oob_poll) (struct file *, struct oob_poll_wait *);
 	int (*mmap) (struct file *, struct vm_area_struct *);
 	unsigned long mmap_supported_flags;
 	int (*open) (struct inode *, struct file *);
diff --git a/kernel/include/linux/hardirq.h b/kernel/include/linux/hardirq.h
index 754f67a..955b6ce 100644
--- a/kernel/include/linux/hardirq.h
+++ b/kernel/include/linux/hardirq.h
@@ -7,6 +7,7 @@
 #include <linux/lockdep.h>
 #include <linux/ftrace_irq.h>
 #include <linux/vtime.h>
+#include <asm-generic/irq_pipeline.h>
 #include <asm/hardirq.h>
 
 extern void synchronize_irq(unsigned int irq);
@@ -122,6 +123,7 @@
 
 #define nmi_enter()						\
 	do {							\
+		irq_pipeline_nmi_enter();			\
 		__nmi_enter();					\
 		lockdep_hardirq_enter();			\
 		rcu_nmi_enter();				\
@@ -147,6 +149,22 @@
 		rcu_nmi_exit();					\
 		lockdep_hardirq_exit();				\
 		__nmi_exit();					\
+		irq_pipeline_nmi_exit();			\
 	} while (0)
 
+static inline bool start_irq_flow(void)
+{
+	return !irqs_pipelined() || in_pipeline();
+}
+
+static inline bool on_pipeline_entry(void)
+{
+	return irqs_pipelined() && in_pipeline();
+}
+
+static inline bool in_hard_irq(void)
+{
+	return irqs_pipelined() ? in_pipeline() : in_irq();
+}
+
 #endif /* LINUX_HARDIRQ_H */
diff --git a/kernel/include/linux/intel-iommu.h b/kernel/include/linux/intel-iommu.h
index 142ec79..c1be3c0 100644
--- a/kernel/include/linux/intel-iommu.h
+++ b/kernel/include/linux/intel-iommu.h
@@ -576,7 +576,7 @@
 	u64		ecap;
 	u64		vccap;
 	u32		gcmd; /* Holds TE, EAFL. Don't need SRTP, SFL, WBF */
-	raw_spinlock_t	register_lock; /* protect register handling */
+	hard_spinlock_t	register_lock; /* protect register handling */
 	int		seq_id;	/* sequence id of the iommu */
 	int		agaw; /* agaw of this iommu */
 	int		msagaw; /* max sagaw of this iommu */
diff --git a/kernel/include/linux/interrupt.h b/kernel/include/linux/interrupt.h
index 386ddf4..c89728f 100644
--- a/kernel/include/linux/interrupt.h
+++ b/kernel/include/linux/interrupt.h
@@ -61,6 +61,12 @@
  *                interrupt handler after suspending interrupts. For system
  *                wakeup devices users need to implement wakeup detection in
  *                their interrupt handlers.
+ * IRQF_OOB - Interrupt is attached to an out-of-band handler living
+ *            on the heading stage of the interrupt pipeline
+ *            (CONFIG_IRQ_PIPELINE).  It may be delivered to the
+ *            handler any time interrupts are enabled in the CPU,
+ *            regardless of the (virtualized) interrupt state
+ *            maintained by local_irq_save/disable().
  */
 #define IRQF_SHARED		0x00000080
 #define IRQF_PROBE_SHARED	0x00000100
@@ -74,6 +80,7 @@
 #define IRQF_NO_THREAD		0x00010000
 #define IRQF_EARLY_RESUME	0x00020000
 #define IRQF_COND_SUSPEND	0x00040000
+#define IRQF_OOB		0x00080000
 
 #define IRQF_TIMER		(__IRQF_TIMER | IRQF_NO_SUSPEND | IRQF_NO_THREAD)
 
@@ -514,9 +521,29 @@
  * to ensure that after a local_irq_disable(), interrupts have
  * really been disabled in hardware. Such architectures need to
  * implement the following hook.
+ *
+ * Those cases also apply when interrupt pipelining is in effect,
+ * since we are virtualizing the interrupt disable state here too.
  */
 #ifndef hard_irq_disable
-#define hard_irq_disable()	do { } while(0)
+#define hard_irq_disable()	hard_cond_local_irq_disable()
+#endif
+
+/*
+ * Unlike other virtualized interrupt disabling schemes may assume, we
+ * can't expect local_irq_restore() to turn hard interrupts on when
+ * pipelining.  hard_irq_enable() is introduced to be paired with
+ * hard_irq_disable(), for unconditionally turning them on. The only
+ * sane sequence mixing virtual and real disable state manipulation
+ * is:
+ *
+ * 1. local_irq_save/disable
+ * 2. hard_irq_disable
+ * 3. hard_irq_enable
+ * 4. local_irq_restore/enable
+ */
+#ifndef hard_irq_enable
+#define hard_irq_enable()	hard_cond_local_irq_enable()
 #endif
 
 /* PLEASE, avoid to allocate new softirqs, if you need not _really_ high
diff --git a/kernel/include/linux/irq.h b/kernel/include/linux/irq.h
index b2b956d..44bd457 100644
--- a/kernel/include/linux/irq.h
+++ b/kernel/include/linux/irq.h
@@ -16,6 +16,7 @@
 #include <linux/irqhandler.h>
 #include <linux/irqreturn.h>
 #include <linux/irqnr.h>
+#include <linux/irq_work.h>
 #include <linux/topology.h>
 #include <linux/io.h>
 #include <linux/slab.h>
@@ -73,6 +74,11 @@
  * IRQ_DISABLE_UNLAZY		- Disable lazy irq disable
  * IRQ_HIDDEN			- Don't show up in /proc/interrupts
  * IRQ_RAW			- Skip tick management and irqtime accounting
+ * IRQ_OOB                      - Interrupt can be delivered to the out-of-band handler
+ *                                when pipelining is enabled (CONFIG_IRQ_PIPELINE),
+ *                                regardless of the (virtualized) interrupt state
+ *                                maintained by local_irq_save/disable().
+ * IRQ_CHAINED                  - Interrupt is chained.
  */
 enum {
 	IRQ_TYPE_NONE		= 0x00000000,
@@ -101,13 +107,15 @@
 	IRQ_DISABLE_UNLAZY	= (1 << 19),
 	IRQ_HIDDEN		= (1 << 20),
 	IRQ_RAW			= (1 << 21),
+	IRQ_OOB			= (1 << 22),
+	IRQ_CHAINED		= (1 << 23),
 };
 
 #define IRQF_MODIFY_MASK	\
 	(IRQ_TYPE_SENSE_MASK | IRQ_NOPROBE | IRQ_NOREQUEST | \
 	 IRQ_NOAUTOEN | IRQ_MOVE_PCNTXT | IRQ_LEVEL | IRQ_NO_BALANCING | \
 	 IRQ_PER_CPU | IRQ_NESTED_THREAD | IRQ_NOTHREAD | IRQ_PER_CPU_DEVID | \
-	 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN)
+	 IRQ_IS_POLLED | IRQ_DISABLE_UNLAZY | IRQ_HIDDEN | IRQ_OOB)
 
 #define IRQ_NO_BALANCING_MASK	(IRQ_PER_CPU | IRQ_NO_BALANCING)
 
@@ -173,6 +181,7 @@
  *			irq_domain
  * @chip_data:		platform-specific per-chip private data for the chip
  *			methods, to allow shared chip implementations
+ * @move_work:		irq_work for setaffinity deferral when pipelining irqs
  */
 struct irq_data {
 	u32			mask;
@@ -183,6 +192,9 @@
 	struct irq_domain	*domain;
 #ifdef	CONFIG_IRQ_DOMAIN_HIERARCHY
 	struct irq_data		*parent_data;
+#endif
+#if defined(CONFIG_IRQ_PIPELINE) && defined(CONFIG_GENERIC_PENDING_IRQ)
+	struct irq_work		move_work;
 #endif
 	void			*chip_data;
 };
@@ -221,6 +233,7 @@
  *				  irq_chip::irq_set_affinity() when deactivated.
  * IRQD_IRQ_ENABLED_ON_SUSPEND	- Interrupt is enabled on suspend by irq pm if
  *				  irqchip have flag IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND set.
+ * IRQD_SETAFFINITY_BLOCKED	- Pending affinity setting on hold (IRQ_PIPELINE)
  */
 enum {
 	IRQD_TRIGGER_MASK		= 0xf,
@@ -247,6 +260,7 @@
 	IRQD_HANDLE_ENFORCE_IRQCTX	= (1 << 28),
 	IRQD_AFFINITY_ON_ACTIVATE	= (1 << 29),
 	IRQD_IRQ_ENABLED_ON_SUSPEND	= (1 << 30),
+	IRQD_SETAFFINITY_BLOCKED	= (1 << 31),
 };
 
 #define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -254,6 +268,21 @@
 static inline bool irqd_is_setaffinity_pending(struct irq_data *d)
 {
 	return __irqd_to_state(d) & IRQD_SETAFFINITY_PENDING;
+}
+
+static inline void irqd_set_move_blocked(struct irq_data *d)
+{
+	__irqd_to_state(d) |= IRQD_SETAFFINITY_BLOCKED;
+}
+
+static inline void irqd_clr_move_blocked(struct irq_data *d)
+{
+	__irqd_to_state(d) &= ~IRQD_SETAFFINITY_BLOCKED;
+}
+
+static inline bool irqd_is_setaffinity_blocked(struct irq_data *d)
+{
+	return irqs_pipelined() && __irqd_to_state(d) & IRQD_SETAFFINITY_BLOCKED;
 }
 
 static inline bool irqd_is_per_cpu(struct irq_data *d)
@@ -570,6 +599,7 @@
  * IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND:  Invokes __enable_irq()/__disable_irq() for wake irqs
  *                                    in the suspend path if they are in disabled state
  * IRQCHIP_AFFINITY_PRE_STARTUP:      Default affinity update before startup
+ * IRQCHIP_PIPELINE_SAFE:             Chip can work in pipelined mode
  */
 enum {
 	IRQCHIP_SET_TYPE_MASKED			= (1 <<  0),
@@ -583,6 +613,7 @@
 	IRQCHIP_SUPPORTS_NMI			= (1 <<  8),
 	IRQCHIP_ENABLE_WAKEUP_ON_SUSPEND	= (1 <<  9),
 	IRQCHIP_AFFINITY_PRE_STARTUP		= (1 << 10),
+	IRQCHIP_PIPELINE_SAFE			= (1 << 11),
 };
 
 #include <linux/irqdesc.h>
@@ -660,6 +691,7 @@
 extern void handle_percpu_devid_irq(struct irq_desc *desc);
 extern void handle_bad_irq(struct irq_desc *desc);
 extern void handle_nested_irq(unsigned int irq);
+extern void handle_synthetic_irq(struct irq_desc *desc);
 
 extern void handle_fasteoi_nmi(struct irq_desc *desc);
 extern void handle_percpu_devid_fasteoi_nmi(struct irq_desc *desc);
@@ -813,7 +845,13 @@
 extern int irq_set_msi_desc(unsigned int irq, struct msi_desc *entry);
 extern int irq_set_msi_desc_off(unsigned int irq_base, unsigned int irq_offset,
 				struct msi_desc *entry);
-extern struct irq_data *irq_get_irq_data(unsigned int irq);
+
+static inline struct irq_data *irq_get_irq_data(unsigned int irq)
+{
+	struct irq_desc *desc = irq_to_desc(irq);
+
+	return desc ? &desc->irq_data : NULL;
+}
 
 static inline struct irq_chip *irq_get_chip(unsigned int irq)
 {
@@ -1056,7 +1094,7 @@
  * different flow mechanisms (level/edge) for it.
  */
 struct irq_chip_generic {
-	raw_spinlock_t		lock;
+	hard_spinlock_t		lock;
 	void __iomem		*reg_base;
 	u32			(*reg_readl)(void __iomem *addr);
 	void			(*reg_writel)(u32 val, void __iomem *addr);
@@ -1183,6 +1221,12 @@
 
 #define IRQ_MSK(n) (u32)((n) < 32 ? ((1 << (n)) - 1) : UINT_MAX)
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+int irq_switch_oob(unsigned int irq, bool on);
+
+#endif	/* !CONFIG_IRQ_PIPELINE */
+
 #ifdef CONFIG_SMP
 static inline void irq_gc_lock(struct irq_chip_generic *gc)
 {
diff --git a/kernel/include/linux/irq_pipeline.h b/kernel/include/linux/irq_pipeline.h
new file mode 100644
index 0000000..cbeb010
--- /dev/null
+++ b/kernel/include/linux/irq_pipeline.h
@@ -0,0 +1,145 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2002 Philippe Gerum  <rpm@xenomai.org>.
+ *               2006 Gilles Chanteperdrix.
+ *               2007 Jan Kiszka.
+ */
+#ifndef _LINUX_IRQ_PIPELINE_H
+#define _LINUX_IRQ_PIPELINE_H
+
+struct cpuidle_device;
+struct cpuidle_state;
+struct irq_desc;
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+#include <linux/compiler.h>
+#include <linux/irqdomain.h>
+#include <linux/percpu.h>
+#include <linux/interrupt.h>
+#include <linux/irqstage.h>
+#include <linux/thread_info.h>
+#include <asm/irqflags.h>
+
+void irq_pipeline_init_early(void);
+
+void irq_pipeline_init(void);
+
+void arch_irq_pipeline_init(void);
+
+void generic_pipeline_irq_desc(struct irq_desc *desc,
+			       struct pt_regs *regs);
+
+int irq_inject_pipeline(unsigned int irq);
+
+void synchronize_pipeline(void);
+
+static __always_inline void synchronize_pipeline_on_irq(void)
+{
+	/*
+	 * Optimize if we preempted the high priority oob stage: we
+	 * don't need to synchronize the pipeline unless there is a
+	 * pending interrupt for it.
+	 */
+	if (running_inband() ||
+	    stage_irqs_pending(this_oob_staged()))
+		synchronize_pipeline();
+}
+
+bool handle_oob_irq(struct irq_desc *desc);
+
+void arch_do_IRQ_pipelined(struct irq_desc *desc);
+
+#ifdef CONFIG_SMP
+void irq_send_oob_ipi(unsigned int ipi,
+		const struct cpumask *cpumask);
+#endif	/* CONFIG_SMP */
+
+void irq_pipeline_oops(void);
+
+bool irq_cpuidle_enter(struct cpuidle_device *dev,
+		       struct cpuidle_state *state);
+
+int run_oob_call(int (*fn)(void *arg), void *arg);
+
+extern bool irq_pipeline_active;
+
+static inline bool inband_unsafe(void)
+{
+	return running_oob() ||
+		(hard_irqs_disabled() && irq_pipeline_active);
+}
+
+static inline bool inband_irq_pending(void)
+{
+	check_hard_irqs_disabled();
+
+	return stage_irqs_pending(this_inband_staged());
+}
+
+struct irq_stage_data *
+handle_irq_pipelined_prepare(struct pt_regs *regs);
+
+int handle_irq_pipelined_finish(struct irq_stage_data *prevd,
+				struct pt_regs *regs);
+
+int handle_irq_pipelined(struct pt_regs *regs);
+
+void sync_inband_irqs(void);
+
+extern struct irq_domain *synthetic_irq_domain;
+
+#else /* !CONFIG_IRQ_PIPELINE */
+
+#include <linux/irqstage.h>
+#include <linux/hardirq.h>
+
+static inline
+void irq_pipeline_init_early(void) { }
+
+static inline
+void irq_pipeline_init(void) { }
+
+static inline
+void irq_pipeline_oops(void) { }
+
+static inline int
+generic_pipeline_irq_desc(struct irq_desc *desc,
+			struct pt_regs *regs)
+{
+	return 0;
+}
+
+static inline bool handle_oob_irq(struct irq_desc *desc)
+{
+	return false;
+}
+
+static inline bool irq_cpuidle_enter(struct cpuidle_device *dev,
+				     struct cpuidle_state *state)
+{
+	return true;
+}
+
+static inline bool inband_unsafe(void)
+{
+	return false;
+}
+
+static inline bool inband_irq_pending(void)
+{
+	return false;
+}
+
+static inline void sync_inband_irqs(void) { }
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+#if !defined(CONFIG_IRQ_PIPELINE) || !defined(CONFIG_SPARSE_IRQ)
+static inline void uncache_irq_desc(unsigned int irq) { }
+#else
+void uncache_irq_desc(unsigned int irq);
+#endif
+
+#endif /* _LINUX_IRQ_PIPELINE_H */
diff --git a/kernel/include/linux/irqdesc.h b/kernel/include/linux/irqdesc.h
index 5745491..f134909 100644
--- a/kernel/include/linux/irqdesc.h
+++ b/kernel/include/linux/irqdesc.h
@@ -68,7 +68,7 @@
 	unsigned int		irqs_unhandled;
 	atomic_t		threads_handled;
 	int			threads_handled_last;
-	raw_spinlock_t		lock;
+	hybrid_spinlock_t	lock;
 	struct cpumask		*percpu_enabled;
 	const struct cpumask	*percpu_affinity;
 #ifdef CONFIG_SMP
@@ -154,6 +154,8 @@
 
 int generic_handle_irq(unsigned int irq);
 
+void generic_pipeline_irq(unsigned int irq, struct pt_regs *regs);
+
 #ifdef CONFIG_HANDLE_DOMAIN_IRQ
 /*
  * Convert a HW interrupt number to a logical one using a IRQ domain,
@@ -164,11 +166,26 @@
 int __handle_domain_irq(struct irq_domain *domain, unsigned int hwirq,
 			bool lookup, struct pt_regs *regs);
 
+#ifdef CONFIG_IRQ_PIPELINE
+unsigned int irq_find_mapping(struct irq_domain *host,
+			irq_hw_number_t hwirq);
+
+static inline int handle_domain_irq(struct irq_domain *domain,
+				    unsigned int hwirq, struct pt_regs *regs)
+{
+	unsigned int irq = irq_find_mapping(domain, hwirq);
+
+	generic_pipeline_irq(irq, regs);
+
+	return 0;
+}
+#else
 static inline int handle_domain_irq(struct irq_domain *domain,
 				    unsigned int hwirq, struct pt_regs *regs)
 {
 	return __handle_domain_irq(domain, hwirq, true, regs);
 }
+#endif	/* !CONFIG_IRQ_PIPELINE */
 
 #ifdef CONFIG_IRQ_DOMAIN
 int handle_domain_nmi(struct irq_domain *domain, unsigned int hwirq,
@@ -252,6 +269,14 @@
 	return desc->status_use_accessors & IRQ_PER_CPU_DEVID;
 }
 
+static inline int irq_is_oob(unsigned int irq)
+{
+	struct irq_desc *desc;
+
+	desc = irq_to_desc(irq);
+	return desc->status_use_accessors & IRQ_OOB;
+}
+
 static inline void
 irq_set_lockdep_class(unsigned int irq, struct lock_class_key *lock_class,
 		      struct lock_class_key *request_class)
diff --git a/kernel/include/linux/irqflags.h b/kernel/include/linux/irqflags.h
index 3ed4e87..051c727 100644
--- a/kernel/include/linux/irqflags.h
+++ b/kernel/include/linux/irqflags.h
@@ -13,6 +13,7 @@
 #define _LINUX_TRACE_IRQFLAGS_H
 
 #include <linux/typecheck.h>
+#include <asm-generic/irq_pipeline.h>
 #include <asm/irqflags.h>
 #include <asm/percpu.h>
 
@@ -52,7 +53,9 @@
 extern void trace_hardirqs_on_prepare(void);
 extern void trace_hardirqs_off_finish(void);
 extern void trace_hardirqs_on(void);
+extern void trace_hardirqs_on_pipelined(void);
 extern void trace_hardirqs_off(void);
+extern void trace_hardirqs_off_pipelined(void);
 
 # define lockdep_hardirq_context()	(raw_cpu_read(hardirq_context))
 # define lockdep_softirq_context(p)	((p)->softirq_context)
@@ -122,7 +125,9 @@
 # define trace_hardirqs_on_prepare()		do { } while (0)
 # define trace_hardirqs_off_finish()		do { } while (0)
 # define trace_hardirqs_on()			do { } while (0)
+# define trace_hardirqs_on_pipelined()		do { } while (0)
 # define trace_hardirqs_off()			do { } while (0)
+# define trace_hardirqs_off_pipelined()		do { } while (0)
 # define lockdep_hardirq_context()		0
 # define lockdep_softirq_context(p)		0
 # define lockdep_hardirqs_enabled()		0
@@ -228,6 +233,38 @@
 
 #endif /* CONFIG_TRACE_IRQFLAGS */
 
+#ifdef CONFIG_IRQ_PIPELINE
+#define local_irq_enable_full()			\
+	do {					\
+		hard_local_irq_enable();	\
+		local_irq_enable();		\
+	} while (0)
+
+#define local_irq_disable_full()		\
+	do {					\
+		hard_local_irq_disable();	\
+		local_irq_disable();		\
+	} while (0)
+
+#define local_irq_save_full(__flags)		\
+  	do {					\
+		hard_local_irq_disable();	\
+		local_irq_save(__flags);	\
+	} while (0)
+
+#define local_irq_restore_full(__flags)			\
+	do {						\
+		if (!irqs_disabled_flags(__flags))	\
+			hard_local_irq_enable();	\
+		local_irq_restore(__flags);		\
+	} while (0)
+#else
+#define local_irq_enable_full()		local_irq_enable()
+#define local_irq_disable_full()	local_irq_disable()
+#define local_irq_save_full(__flags)	local_irq_save(__flags)
+#define local_irq_restore_full(__flags)	local_irq_restore(__flags)
+#endif
+
 #define local_save_flags(flags)	raw_local_save_flags(flags)
 
 /*
diff --git a/kernel/include/linux/irqstage.h b/kernel/include/linux/irqstage.h
new file mode 100644
index 0000000..46bfb84
--- /dev/null
+++ b/kernel/include/linux/irqstage.h
@@ -0,0 +1,398 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016, 2019 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef _LINUX_IRQSTAGE_H
+#define _LINUX_IRQSTAGE_H
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+#include <linux/percpu.h>
+#include <linux/bitops.h>
+#include <linux/preempt.h>
+#include <linux/sched.h>
+#include <asm/irq_pipeline.h>
+
+struct kvm_oob_notifier;
+
+struct irq_stage {
+	int index;
+	const char *name;
+};
+
+extern struct irq_stage inband_stage;
+
+extern struct irq_stage oob_stage;
+
+struct irq_event_map;
+
+struct irq_log {
+	unsigned long index_0;
+	struct irq_event_map *map;
+};
+
+/* Per-CPU, per-stage data. */
+struct irq_stage_data {
+	struct irq_log log;
+	struct irq_stage *stage;
+#ifdef CONFIG_DEBUG_IRQ_PIPELINE
+	int cpu;
+#endif
+};
+
+/* Per-CPU pipeline descriptor. */
+struct irq_pipeline_data {
+	struct irq_stage_data stages[2];
+	struct pt_regs tick_regs;
+#ifdef CONFIG_DOVETAIL
+	struct task_struct *task_inflight;
+	struct task_struct *rqlock_owner;
+#ifdef CONFIG_KVM
+	struct kvm_oob_notifier *vcpu_notify;
+#endif
+#endif
+};
+
+DECLARE_PER_CPU(struct irq_pipeline_data, irq_pipeline);
+
+/*
+ * The low-level stall bit accessors. Should be used by the Dovetail
+ * core implementation exclusively, inband_irq_*() and oob_irq_*()
+ * accessors are available to common code.
+ */
+
+#define INBAND_STALL_BIT  0
+#define OOB_STALL_BIT     1
+
+static __always_inline void init_task_stall_bits(struct task_struct *p)
+{
+	__set_bit(INBAND_STALL_BIT, &p->stall_bits);
+	__clear_bit(OOB_STALL_BIT, &p->stall_bits);
+}
+
+static __always_inline void stall_inband_nocheck(void)
+{
+	__set_bit(INBAND_STALL_BIT, &current->stall_bits);
+	barrier();
+}
+
+static __always_inline void stall_inband(void)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && running_oob());
+	stall_inband_nocheck();
+}
+
+static __always_inline void unstall_inband_nocheck(void)
+{
+	barrier();
+	__clear_bit(INBAND_STALL_BIT, &current->stall_bits);
+}
+
+static __always_inline void unstall_inband(void)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && running_oob());
+	unstall_inband_nocheck();
+}
+
+static __always_inline int test_and_stall_inband_nocheck(void)
+{
+	return __test_and_set_bit(INBAND_STALL_BIT, &current->stall_bits);
+}
+
+static __always_inline int test_and_stall_inband(void)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && running_oob());
+	return test_and_stall_inband_nocheck();
+}
+
+static __always_inline int test_inband_stall(void)
+{
+	return test_bit(INBAND_STALL_BIT, &current->stall_bits);
+}
+
+static __always_inline void stall_oob(void)
+{
+	__set_bit(OOB_STALL_BIT, &current->stall_bits);
+	barrier();
+}
+
+static __always_inline void unstall_oob(void)
+{
+	barrier();
+	__clear_bit(OOB_STALL_BIT, &current->stall_bits);
+}
+
+static __always_inline int test_and_stall_oob(void)
+{
+	return __test_and_set_bit(OOB_STALL_BIT, &current->stall_bits);
+}
+
+static __always_inline int test_oob_stall(void)
+{
+	return test_bit(OOB_STALL_BIT, &current->stall_bits);
+}
+
+/**
+ * this_staged - IRQ stage data on the current CPU
+ *
+ * Return the address of @stage's data on the current CPU. IRQs must
+ * be hard disabled to prevent CPU migration.
+ */
+static __always_inline
+struct irq_stage_data *this_staged(struct irq_stage *stage)
+{
+	return &raw_cpu_ptr(irq_pipeline.stages)[stage->index];
+}
+
+/**
+ * percpu_inband_staged - IRQ stage data on specified CPU
+ *
+ * Return the address of @stage's data on @cpu.
+ *
+ * This is the slowest accessor, use it carefully. Prefer
+ * this_staged() for requests referring to the current
+ * CPU. Additionally, if the target stage is known at build time,
+ * consider using this_{inband, oob}_staged() instead.
+ */
+static __always_inline
+struct irq_stage_data *percpu_inband_staged(struct irq_stage *stage, int cpu)
+{
+	return &per_cpu(irq_pipeline.stages, cpu)[stage->index];
+}
+
+/**
+ * this_inband_staged - return the address of the pipeline context
+ * data for the inband stage on the current CPU. CPU migration must be
+ * disabled.
+ *
+ * This accessor is recommended when the stage we refer to is known at
+ * build time to be the inband one.
+ */
+static __always_inline struct irq_stage_data *this_inband_staged(void)
+{
+	return raw_cpu_ptr(&irq_pipeline.stages[0]);
+}
+
+/**
+ * this_oob_staged - return the address of the pipeline context data
+ * for the registered oob stage on the current CPU. CPU migration must
+ * be disabled.
+ *
+ * This accessor is recommended when the stage we refer to is known at
+ * build time to be the registered oob stage. This address is always
+ * different from the context data of the inband stage, even in
+ * absence of registered oob stage.
+ */
+static __always_inline struct irq_stage_data *this_oob_staged(void)
+{
+	return raw_cpu_ptr(&irq_pipeline.stages[1]);
+}
+
+static __always_inline struct irq_stage_data *__current_irq_staged(void)
+{
+	return &raw_cpu_ptr(irq_pipeline.stages)[stage_level()];
+}
+
+/**
+ * current_irq_staged - return the address of the pipeline context
+ * data for the current stage. CPU migration must be disabled.
+ */
+#define current_irq_staged __current_irq_staged()
+
+static __always_inline
+void check_staged_locality(struct irq_stage_data *pd)
+{
+#ifdef CONFIG_DEBUG_IRQ_PIPELINE
+	/*
+	 * Setting our context with another processor's is a really
+	 * bad idea, our caller definitely went loopy.
+	 */
+	WARN_ON_ONCE(raw_smp_processor_id() != pd->cpu);
+#endif
+}
+
+/**
+ * switch_oob(), switch_inband() - switch the current CPU to the
+ * specified stage context. CPU migration must be disabled.
+ *
+ * Calling these routines is the only sane and safe way to change the
+ * interrupt stage for the current CPU. Don't bypass them, ever.
+ * Really.
+ */
+static __always_inline
+void switch_oob(struct irq_stage_data *pd)
+{
+	check_staged_locality(pd);
+	if (!(preempt_count() & STAGE_MASK))
+		preempt_count_add(STAGE_OFFSET);
+}
+
+static __always_inline
+void switch_inband(struct irq_stage_data *pd)
+{
+	check_staged_locality(pd);
+	if (preempt_count() & STAGE_MASK)
+		preempt_count_sub(STAGE_OFFSET);
+}
+
+static __always_inline
+void set_current_irq_staged(struct irq_stage_data *pd)
+{
+	if (pd->stage == &inband_stage)
+		switch_inband(pd);
+	else
+		switch_oob(pd);
+}
+
+static __always_inline struct irq_stage *__current_irq_stage(void)
+{
+	/*
+	 * We don't have to hard disable irqs while accessing the
+	 * per-CPU stage data here, because there is no way we could
+	 * switch stage and CPU at the same time.
+	 */
+	return __current_irq_staged()->stage;
+}
+
+#define current_irq_stage	__current_irq_stage()
+
+static __always_inline bool oob_stage_present(void)
+{
+	return oob_stage.index != 0;
+}
+
+/**
+ * stage_irqs_pending() - Whether we have interrupts pending
+ * (i.e. logged) on the current CPU for the given stage. Hard IRQs
+ * must be disabled.
+ */
+static __always_inline int stage_irqs_pending(struct irq_stage_data *pd)
+{
+	return pd->log.index_0 != 0;
+}
+
+void sync_current_irq_stage(void);
+
+void sync_irq_stage(struct irq_stage *top);
+
+void irq_post_stage(struct irq_stage *stage,
+		    unsigned int irq);
+
+static __always_inline void irq_post_oob(unsigned int irq)
+{
+	irq_post_stage(&oob_stage, irq);
+}
+
+static __always_inline void irq_post_inband(unsigned int irq)
+{
+	irq_post_stage(&inband_stage, irq);
+}
+
+static __always_inline void oob_irq_disable(void)
+{
+	hard_local_irq_disable();
+	stall_oob();
+}
+
+static __always_inline unsigned long oob_irq_save(void)
+{
+	hard_local_irq_disable();
+	return test_and_stall_oob();
+}
+
+static __always_inline int oob_irqs_disabled(void)
+{
+	return test_oob_stall();
+}
+
+void oob_irq_enable(void);
+
+void __oob_irq_restore(unsigned long x);
+
+static __always_inline void oob_irq_restore(unsigned long x)
+{
+	if ((x ^ test_oob_stall()) & 1)
+		__oob_irq_restore(x);
+}
+
+bool stage_disabled(void);
+
+unsigned long test_and_lock_stage(int *irqsoff);
+
+void unlock_stage(unsigned long irqstate);
+
+#define stage_save_flags(__irqstate)					\
+  	do {								\
+	  unsigned long __flags = hard_local_save_flags();		\
+	  (__irqstate) = irqs_merge_flags(__flags,			\
+					  irqs_disabled());		\
+	} while (0)
+
+int enable_oob_stage(const char *name);
+
+int arch_enable_oob_stage(void);
+
+void disable_oob_stage(void);
+
+#else /* !CONFIG_IRQ_PIPELINE */
+
+#include <linux/irqflags.h>
+
+void call_is_nop_without_pipelining(void);
+
+static __always_inline void stall_inband(void) { }
+
+static __always_inline void unstall_inband(void) { }
+
+static __always_inline int test_and_stall_inband(void)
+{
+	return false;
+}
+
+static __always_inline int test_inband_stall(void)
+{
+	return false;
+}
+
+static __always_inline bool oob_stage_present(void)
+{
+	return false;
+}
+
+static __always_inline bool stage_disabled(void)
+{
+	return irqs_disabled();
+}
+
+static __always_inline void irq_post_inband(unsigned int irq)
+{
+	call_is_nop_without_pipelining();
+}
+
+#define test_and_lock_stage(__irqsoff)				\
+	({							\
+		unsigned long __flags;				\
+		raw_local_irq_save(__flags);			\
+		*(__irqsoff) = irqs_disabled_flags(__flags);	\
+		__flags;					\
+	})
+
+#define unlock_stage(__flags)		raw_local_irq_restore(__flags)
+
+#define stage_save_flags(__flags)	raw_local_save_flags(__flags)
+
+static __always_inline void stall_inband_nocheck(void)
+{ }
+
+static __always_inline void unstall_inband_nocheck(void)
+{ }
+
+static __always_inline int test_and_stall_inband_nocheck(void)
+{
+	return irqs_disabled();
+}
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
+#endif	/* !_LINUX_IRQSTAGE_H */
diff --git a/kernel/include/linux/kernel.h b/kernel/include/linux/kernel.h
index c333dc6..b45aaf3 100644
--- a/kernel/include/linux/kernel.h
+++ b/kernel/include/linux/kernel.h
@@ -15,6 +15,7 @@
 #include <linux/typecheck.h>
 #include <linux/printk.h>
 #include <linux/build_bug.h>
+#include <asm-generic/irq_pipeline.h>
 #include <asm/byteorder.h>
 #include <asm/div64.h>
 #include <uapi/linux/kernel.h>
@@ -195,9 +196,12 @@
 
 #ifdef CONFIG_PREEMPT_VOLUNTARY
 extern int _cond_resched(void);
-# define might_resched() _cond_resched()
+# define might_resched() do { \
+		check_inband_stage(); \
+		_cond_resched(); \
+	} while (0)
 #else
-# define might_resched() do { } while (0)
+# define might_resched() check_inband_stage()
 #endif
 
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
diff --git a/kernel/include/linux/kvm_host.h b/kernel/include/linux/kvm_host.h
index 9cb0a3d..36d741e 100644
--- a/kernel/include/linux/kvm_host.h
+++ b/kernel/include/linux/kvm_host.h
@@ -14,6 +14,7 @@
 #include <linux/mm.h>
 #include <linux/mmu_notifier.h>
 #include <linux/preempt.h>
+#include <linux/dovetail.h>
 #include <linux/msi.h>
 #include <linux/slab.h>
 #include <linux/vmalloc.h>
@@ -260,10 +261,23 @@
 	unsigned len;
 };
 
+/*
+ * Called when the host is about to leave the inband stage. Typically
+ * used for switching the current vcpu out of guest mode before a
+ * companion core reinstates an oob task context.
+ */
+struct kvm_oob_notifier {
+	void (*handler)(struct kvm_oob_notifier *nfy);
+	bool put_vcpu;
+};
+
 struct kvm_vcpu {
 	struct kvm *kvm;
 #ifdef CONFIG_PREEMPT_NOTIFIERS
 	struct preempt_notifier preempt_notifier;
+#endif
+#ifdef CONFIG_DOVETAIL
+	struct kvm_oob_notifier oob_notifier;
 #endif
 	int cpu;
 	int vcpu_id; /* id given by userspace at creation */
@@ -1502,6 +1516,47 @@
 }
 #endif /* CONFIG_HAVE_KVM_VCPU_RUN_PID_CHANGE */
 
+#if defined(CONFIG_DOVETAIL) && defined(CONFIG_KVM)
+static inline void inband_init_vcpu(struct kvm_vcpu *vcpu,
+		void (*preempt_handler)(struct kvm_oob_notifier *nfy))
+{
+	vcpu->oob_notifier.handler = preempt_handler;
+	vcpu->oob_notifier.put_vcpu = false;
+}
+
+static inline void inband_enter_guest(struct kvm_vcpu *vcpu)
+{
+	struct irq_pipeline_data *p = raw_cpu_ptr(&irq_pipeline);
+	WRITE_ONCE(p->vcpu_notify, &vcpu->oob_notifier);
+}
+
+static inline void inband_exit_guest(void)
+{
+	struct irq_pipeline_data *p = raw_cpu_ptr(&irq_pipeline);
+	WRITE_ONCE(p->vcpu_notify, NULL);
+}
+
+static inline void inband_set_vcpu_release_state(struct kvm_vcpu *vcpu,
+						bool pending)
+{
+	vcpu->oob_notifier.put_vcpu = pending;
+}
+#else
+static inline void inband_init_vcpu(struct kvm_vcpu *vcpu,
+		void (*preempt_handler)(struct kvm_oob_notifier *nfy))
+{ }
+
+static inline void inband_enter_guest(struct kvm_vcpu *vcpu)
+{ }
+
+static inline void inband_exit_guest(void)
+{ }
+
+static inline void inband_set_vcpu_release_state(struct kvm_vcpu *vcpu,
+						bool pending)
+{ }
+#endif
+
 typedef int (*kvm_vm_thread_fn_t)(struct kvm *kvm, uintptr_t data);
 
 int kvm_vm_create_worker_thread(struct kvm *kvm, kvm_vm_thread_fn_t thread_fn,
diff --git a/kernel/include/linux/lockdep.h b/kernel/include/linux/lockdep.h
index 2c25863..3a6b855 100644
--- a/kernel/include/linux/lockdep.h
+++ b/kernel/include/linux/lockdep.h
@@ -214,29 +214,30 @@
  * of dependencies wrong: they are either too broad (they need a class-split)
  * or they are too narrow (they suffer from a false class-split):
  */
-#define lockdep_set_class(lock, key)				\
-	lockdep_init_map_type(&(lock)->dep_map, #key, key, 0,	\
-			      (lock)->dep_map.wait_type_inner,	\
-			      (lock)->dep_map.wait_type_outer,	\
-			      (lock)->dep_map.lock_type)
+#define lockdep_set_class(lock, key)					\
+	lockdep_init_map_type(LOCKDEP_ALT_DEPMAP(lock), #key, key, 0,	\
+			LOCKDEP_ALT_DEPMAP(lock)->wait_type_inner,	\
+			LOCKDEP_ALT_DEPMAP(lock)->wait_type_outer,	\
+			LOCKDEP_ALT_DEPMAP(lock)->lock_type)
 
-#define lockdep_set_class_and_name(lock, key, name)		\
-	lockdep_init_map_type(&(lock)->dep_map, name, key, 0,	\
-			      (lock)->dep_map.wait_type_inner,	\
-			      (lock)->dep_map.wait_type_outer,	\
-			      (lock)->dep_map.lock_type)
+#define lockdep_set_class_and_name(lock, key, name)			\
+	lockdep_init_map_type(LOCKDEP_ALT_DEPMAP(lock), name, key, 0,	\
+			LOCKDEP_ALT_DEPMAP(lock)->wait_type_inner,	\
+			LOCKDEP_ALT_DEPMAP(lock)->wait_type_outer,	\
+			LOCKDEP_ALT_DEPMAP(lock)->lock_type)
 
-#define lockdep_set_class_and_subclass(lock, key, sub)		\
-	lockdep_init_map_type(&(lock)->dep_map, #key, key, sub,	\
-			      (lock)->dep_map.wait_type_inner,	\
-			      (lock)->dep_map.wait_type_outer,	\
-			      (lock)->dep_map.lock_type)
+#define lockdep_set_class_and_subclass(lock, key, sub)			\
+	lockdep_init_map_type(LOCKDEP_ALT_DEPMAP(lock), #key, key, sub,	\
+			LOCKDEP_ALT_DEPMAP(lock)->wait_type_inner,	\
+			LOCKDEP_ALT_DEPMAP(lock)->wait_type_outer,	\
+			LOCKDEP_ALT_DEPMAP(lock)->lock_type)
 
 #define lockdep_set_subclass(lock, sub)					\
-	lockdep_init_map_type(&(lock)->dep_map, #lock, (lock)->dep_map.key, sub,\
-			      (lock)->dep_map.wait_type_inner,		\
-			      (lock)->dep_map.wait_type_outer,		\
-			      (lock)->dep_map.lock_type)
+	lockdep_init_map_type(LOCKDEP_ALT_DEPMAP(lock), #lock,		\
+			LOCKDEP_ALT_DEPMAP(lock)->key, sub,		\
+			LOCKDEP_ALT_DEPMAP(lock)->wait_type_inner,	\
+			LOCKDEP_ALT_DEPMAP(lock)->wait_type_outer,	\
+			LOCKDEP_ALT_DEPMAP(lock)->lock_type)
 
 #define lockdep_set_novalidate_class(lock) \
 	lockdep_set_class_and_name(lock, &__lockdep_no_validate__, #lock)
@@ -244,7 +245,8 @@
 /*
  * Compare locking classes
  */
-#define lockdep_match_class(lock, key) lockdep_match_key(&(lock)->dep_map, key)
+#define lockdep_match_class(lock, key) \
+	lockdep_match_key(LOCKDEP_ALT_DEPMAP(lock), key)
 
 static inline int lockdep_match_key(struct lockdep_map *lock,
 				    struct lock_class_key *key)
@@ -282,8 +284,8 @@
 	return lock_is_held_type(lock, -1);
 }
 
-#define lockdep_is_held(lock)		lock_is_held(&(lock)->dep_map)
-#define lockdep_is_held_type(lock, r)	lock_is_held_type(&(lock)->dep_map, (r))
+#define lockdep_is_held(lock)		lock_is_held(LOCKDEP_ALT_DEPMAP(lock))
+#define lockdep_is_held_type(lock, r)	lock_is_held_type(LOCKDEP_ALT_DEPMAP(lock), (r))
 
 extern void lock_set_class(struct lockdep_map *lock, const char *name,
 			   struct lock_class_key *key, unsigned int subclass,
@@ -306,26 +308,27 @@
 #define lockdep_depth(tsk)	(debug_locks ? (tsk)->lockdep_depth : 0)
 
 #define lockdep_assert_held(l)	do {				\
-		WARN_ON(debug_locks && !lockdep_is_held(l));	\
+		WARN_ON(debug_locks && !LOCKDEP_HARD_DEBUG_RET(l, 1, lockdep_is_held(l))); \
 	} while (0)
 
 #define lockdep_assert_held_write(l)	do {			\
-		WARN_ON(debug_locks && !lockdep_is_held_type(l, 0));	\
+		WARN_ON(debug_locks && !LOCKDEP_HARD_DEBUG_RET(l, 1, lockdep_is_held_type(l, 0))); \
 	} while (0)
 
 #define lockdep_assert_held_read(l)	do {				\
-		WARN_ON(debug_locks && !lockdep_is_held_type(l, 1));	\
+		WARN_ON(debug_locks && !LOCKDEP_HARD_DEBUG_RET(l, 1, lockdep_is_held_type(l, 1))); \
 	} while (0)
 
 #define lockdep_assert_held_once(l)	do {				\
-		WARN_ON_ONCE(debug_locks && !lockdep_is_held(l));	\
+		WARN_ON_ONCE(debug_locks && !LOCKDEP_HARD_DEBUG_RET(l, 1, lockdep_is_held(l))); \
 	} while (0)
 
 #define lockdep_recursing(tsk)	((tsk)->lockdep_recursion)
 
-#define lockdep_pin_lock(l)	lock_pin_lock(&(l)->dep_map)
-#define lockdep_repin_lock(l,c)	lock_repin_lock(&(l)->dep_map, (c))
-#define lockdep_unpin_lock(l,c)	lock_unpin_lock(&(l)->dep_map, (c))
+#define lockdep_pin_lock(l)	LOCKDEP_HARD_DEBUG_RET(l, ({ struct pin_cookie cookie; cookie;} ), \
+							lock_pin_lock(LOCKDEP_ALT_DEPMAP(l)))
+#define lockdep_repin_lock(l,c)	LOCKDEP_HARD_DEBUG(l,, lock_repin_lock(LOCKDEP_ALT_DEPMAP(l), (c)))
+#define lockdep_unpin_lock(l,c)	LOCKDEP_HARD_DEBUG(l,, lock_unpin_lock(LOCKDEP_ALT_DEPMAP(l), (c)))
 
 #else /* !CONFIG_LOCKDEP */
 
@@ -552,22 +555,22 @@
 #ifdef CONFIG_PROVE_LOCKING
 # define might_lock(lock)						\
 do {									\
-	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
-	lock_acquire(&(lock)->dep_map, 0, 0, 0, 1, NULL, _THIS_IP_);	\
-	lock_release(&(lock)->dep_map, _THIS_IP_);			\
+	typecheck(struct lockdep_map *, LOCKDEP_ALT_DEPMAP(lock));	\
+	lock_acquire(LOCKDEP_ALT_DEPMAP(lock), 0, 0, 0, 1, NULL, _THIS_IP_);	\
+	lock_release(LOCKDEP_ALT_DEPMAP(lock), _THIS_IP_);			\
 } while (0)
 # define might_lock_read(lock)						\
 do {									\
-	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
-	lock_acquire(&(lock)->dep_map, 0, 0, 1, 1, NULL, _THIS_IP_);	\
-	lock_release(&(lock)->dep_map, _THIS_IP_);			\
+	typecheck(struct lockdep_map *, LOCKDEP_ALT_DEPMAP(lock));	\
+	lock_acquire(LOCKDEP_ALT_DEPMAP(lock), 0, 0, 1, 1, NULL, _THIS_IP_); \
+	lock_release(LOCKDEP_ALT_DEPMAP(lock), _THIS_IP_);		\
 } while (0)
 # define might_lock_nested(lock, subclass)				\
 do {									\
-	typecheck(struct lockdep_map *, &(lock)->dep_map);		\
-	lock_acquire(&(lock)->dep_map, subclass, 0, 1, 1, NULL,		\
+	typecheck(struct lockdep_map *, LOCKDEP_ALT_DEPMAP(lock));	\
+	lock_acquire(LOCKDEP_ALT_DEPMAP(lock), subclass, 0, 1, 1, NULL,	\
 		     _THIS_IP_);					\
-	lock_release(&(lock)->dep_map, _THIS_IP_);			\
+	lock_release(LOCKDEP_ALT_DEPMAP(lock), _THIS_IP_);		\
 } while (0)
 
 DECLARE_PER_CPU(int, hardirqs_enabled);
@@ -576,14 +579,32 @@
 
 #define __lockdep_enabled	(debug_locks && !this_cpu_read(lockdep_recursion))
 
+#define __lockdep_check_irqs_enabled()					\
+	({ !hard_irqs_disabled() &&					\
+		(running_oob() || this_cpu_read(hardirqs_enabled)); })
+
 #define lockdep_assert_irqs_enabled()					\
-do {									\
-	WARN_ON_ONCE(__lockdep_enabled && !this_cpu_read(hardirqs_enabled)); \
-} while (0)
+	do {								\
+		WARN_ON_ONCE(__lockdep_enabled &&			\
+			!__lockdep_check_irqs_enabled());		\
+	} while (0)
+
+#define __lockdep_check_irqs_disabled()					\
+	({ hard_irqs_disabled() ||					\
+		(running_inband() && !this_cpu_read(hardirqs_enabled)); })
 
 #define lockdep_assert_irqs_disabled()					\
+	  do {								\
+		  WARN_ON_ONCE(__lockdep_enabled &&			\
+			  !__lockdep_check_irqs_disabled());		\
+	  } while (0)
+
+#define lockdep_read_irqs_state()					\
+	({ this_cpu_read(hardirqs_enabled); })
+
+#define lockdep_write_irqs_state(__state)				\
 do {									\
-	WARN_ON_ONCE(__lockdep_enabled && this_cpu_read(hardirqs_enabled)); \
+	this_cpu_write(hardirqs_enabled, __state);			\
 } while (0)
 
 #define lockdep_assert_in_irq()						\
@@ -596,7 +617,7 @@
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
 		     __lockdep_enabled			&&		\
 		     (preempt_count() != 0		||		\
-		      !this_cpu_read(hardirqs_enabled)));		\
+		     __lockdep_check_irqs_disabled()));			\
 } while (0)
 
 #define lockdep_assert_preemption_disabled()				\
@@ -604,7 +625,7 @@
 	WARN_ON_ONCE(IS_ENABLED(CONFIG_PREEMPT_COUNT)	&&		\
 		     __lockdep_enabled			&&		\
 		     (preempt_count() == 0		&&		\
-		      this_cpu_read(hardirqs_enabled)));		\
+		     __lockdep_check_irqs_enabled()));			\
 } while (0)
 
 #else
@@ -614,6 +635,8 @@
 
 # define lockdep_assert_irqs_enabled() do { } while (0)
 # define lockdep_assert_irqs_disabled() do { } while (0)
+# define lockdep_read_irqs_state() 0
+# define lockdep_write_irqs_state(__state) do { (void)(__state); } while (0)
 # define lockdep_assert_in_irq() do { } while (0)
 
 # define lockdep_assert_preemption_enabled() do { } while (0)
diff --git a/kernel/include/linux/mm.h b/kernel/include/linux/mm.h
index dfefcfa..7e23752 100644
--- a/kernel/include/linux/mm.h
+++ b/kernel/include/linux/mm.h
@@ -20,6 +20,7 @@
 #include <linux/pfn.h>
 #include <linux/percpu-refcount.h>
 #include <linux/bit_spinlock.h>
+#include <linux/dovetail.h>
 #include <linux/shrinker.h>
 #include <linux/resource.h>
 #include <linux/page_ext.h>
diff --git a/kernel/include/linux/mm_types.h b/kernel/include/linux/mm_types.h
index c853f61..5820ea3 100644
--- a/kernel/include/linux/mm_types.h
+++ b/kernel/include/linux/mm_types.h
@@ -19,6 +19,8 @@
 
 #include <asm/mmu.h>
 
+#include <dovetail/mm_info.h>
+
 #ifndef AT_VECTOR_SIZE_ARCH
 #define AT_VECTOR_SIZE_ARCH 0
 #endif
@@ -593,6 +595,9 @@
 #ifdef CONFIG_HUGETLB_PAGE
 		atomic_long_t hugetlb_usage;
 #endif
+#ifdef CONFIG_DOVETAIL
+		struct oob_mm_state oob_state;
+#endif
 		struct work_struct async_put_work;
 
 #ifdef CONFIG_IOMMU_SUPPORT
diff --git a/kernel/include/linux/net.h b/kernel/include/linux/net.h
index e201a7f..f963695 100644
--- a/kernel/include/linux/net.h
+++ b/kernel/include/linux/net.h
@@ -79,6 +79,7 @@
 #ifndef SOCK_NONBLOCK
 #define SOCK_NONBLOCK	O_NONBLOCK
 #endif
+#define SOCK_OOB	O_OOB
 
 #endif /* ARCH_HAS_SOCKET_TYPES */
 
diff --git a/kernel/include/linux/netdevice.h b/kernel/include/linux/netdevice.h
index 7e7a003..d25e9aa 100644
--- a/kernel/include/linux/netdevice.h
+++ b/kernel/include/linux/netdevice.h
@@ -41,6 +41,7 @@
 #endif
 #include <net/netprio_cgroup.h>
 #include <net/xdp.h>
+#include <net/netoob.h>
 
 #include <linux/netdev_features.h>
 #include <linux/neighbour.h>
@@ -296,6 +297,7 @@
 	__LINK_STATE_LINKWATCH_PENDING,
 	__LINK_STATE_DORMANT,
 	__LINK_STATE_TESTING,
+	__LINK_STATE_OOB,
 };
 
 
@@ -1534,6 +1536,13 @@
 	ANDROID_KABI_RESERVE(6);
 	ANDROID_KABI_RESERVE(7);
 	ANDROID_KABI_RESERVE(8);
+#ifdef CONFIG_NET_OOB
+	struct sk_buff *	(*ndo_alloc_oob_skb)(struct net_device *dev,
+						     dma_addr_t *dma_addr);
+	void			(*ndo_free_oob_skb)(struct net_device *dev,
+						    struct sk_buff *skb,
+						    dma_addr_t dma_addr);
+#endif
 };
 
 /**
@@ -1725,6 +1734,7 @@
  *	@tlsdev_ops:	Transport Layer Security offload operations
  *	@header_ops:	Includes callbacks for creating,parsing,caching,etc
  *			of Layer 2 headers.
+ *	@net_oob_context:	Out-of-band networking context (oob stage diversion)
  *
  *	@flags:		Interface flags (a la BSD)
  *	@priv_flags:	Like 'flags' but invisible to userspace,
@@ -1982,6 +1992,10 @@
 
 #if IS_ENABLED(CONFIG_TLS_DEVICE)
 	const struct tlsdev_ops *tlsdev_ops;
+#endif
+
+#ifdef CONFIG_NET_OOB
+	struct oob_netdev_context  oob_context;
 #endif
 
 	const struct header_ops *header_ops;
@@ -4190,6 +4204,86 @@
 
 void netif_device_attach(struct net_device *dev);
 
+#ifdef CONFIG_NET_OOB
+
+static inline bool netif_oob_diversion(const struct net_device *dev)
+{
+	return test_bit(__LINK_STATE_OOB, &dev->state);
+}
+
+static inline void netif_enable_oob_diversion(struct net_device *dev)
+{
+	return set_bit(__LINK_STATE_OOB, &dev->state);
+}
+
+static inline void netif_disable_oob_diversion(struct net_device *dev)
+{
+	clear_bit(__LINK_STATE_OOB, &dev->state);
+	smp_mb__after_atomic();
+}
+
+int netif_xmit_oob(struct sk_buff *skb);
+
+static inline bool netdev_is_oob_capable(struct net_device *dev)
+{
+	return !!(dev->oob_context.flags & IFF_OOB_CAPABLE);
+}
+
+static inline void netdev_enable_oob_port(struct net_device *dev)
+{
+	dev->oob_context.flags |= IFF_OOB_PORT;
+}
+
+static inline void netdev_disable_oob_port(struct net_device *dev)
+{
+	dev->oob_context.flags &= ~IFF_OOB_PORT;
+}
+
+static inline bool netdev_is_oob_port(struct net_device *dev)
+{
+	return !!(dev->oob_context.flags & IFF_OOB_PORT);
+}
+
+static inline struct sk_buff *netdev_alloc_oob_skb(struct net_device *dev,
+						   dma_addr_t *dma_addr)
+{
+	return dev->netdev_ops->ndo_alloc_oob_skb(dev, dma_addr);
+}
+
+static inline void netdev_free_oob_skb(struct net_device *dev,
+				       struct sk_buff *skb,
+				       dma_addr_t dma_addr)
+{
+	dev->netdev_ops->ndo_free_oob_skb(dev, skb, dma_addr);
+}
+
+#else
+
+static inline bool netif_oob_diversion(const struct net_device *dev)
+{
+	return false;
+}
+
+static inline bool netdev_is_oob_capable(struct net_device *dev)
+{
+	return false;
+}
+
+static inline void netdev_enable_oob_port(struct net_device *dev)
+{
+}
+
+static inline void netdev_disable_oob_port(struct net_device *dev)
+{
+}
+
+static inline bool netdev_is_oob_port(struct net_device *dev)
+{
+	return false;
+}
+
+#endif
+
 /*
  * Network interface message level settings
  */
diff --git a/kernel/include/linux/poll.h b/kernel/include/linux/poll.h
index 1cdc32b..2701db2 100644
--- a/kernel/include/linux/poll.h
+++ b/kernel/include/linux/poll.h
@@ -10,6 +10,7 @@
 #include <linux/fs.h>
 #include <linux/sysctl.h>
 #include <linux/uaccess.h>
+#include <dovetail/poll.h>
 #include <uapi/linux/poll.h>
 #include <uapi/linux/eventpoll.h>
 
diff --git a/kernel/include/linux/preempt.h b/kernel/include/linux/preempt.h
index 7d9c1c0..58c21bc 100644
--- a/kernel/include/linux/preempt.h
+++ b/kernel/include/linux/preempt.h
@@ -27,17 +27,23 @@
  *         SOFTIRQ_MASK:	0x0000ff00
  *         HARDIRQ_MASK:	0x000f0000
  *             NMI_MASK:	0x00f00000
+ *         PIPELINE_MASK:	0x01000000
+ *         STAGE_MASK:		0x02000000
  * PREEMPT_NEED_RESCHED:	0x80000000
  */
 #define PREEMPT_BITS	8
 #define SOFTIRQ_BITS	8
 #define HARDIRQ_BITS	4
 #define NMI_BITS	4
+#define PIPELINE_BITS	1
+#define STAGE_BITS	1
 
 #define PREEMPT_SHIFT	0
 #define SOFTIRQ_SHIFT	(PREEMPT_SHIFT + PREEMPT_BITS)
 #define HARDIRQ_SHIFT	(SOFTIRQ_SHIFT + SOFTIRQ_BITS)
 #define NMI_SHIFT	(HARDIRQ_SHIFT + HARDIRQ_BITS)
+#define PIPELINE_SHIFT	(NMI_SHIFT + NMI_BITS)
+#define STAGE_SHIFT	(PIPELINE_SHIFT + PIPELINE_BITS)
 
 #define __IRQ_MASK(x)	((1UL << (x))-1)
 
@@ -45,11 +51,15 @@
 #define SOFTIRQ_MASK	(__IRQ_MASK(SOFTIRQ_BITS) << SOFTIRQ_SHIFT)
 #define HARDIRQ_MASK	(__IRQ_MASK(HARDIRQ_BITS) << HARDIRQ_SHIFT)
 #define NMI_MASK	(__IRQ_MASK(NMI_BITS)     << NMI_SHIFT)
+#define PIPELINE_MASK	(__IRQ_MASK(PIPELINE_BITS) << PIPELINE_SHIFT)
+#define STAGE_MASK	(__IRQ_MASK(STAGE_BITS) << STAGE_SHIFT)
 
 #define PREEMPT_OFFSET	(1UL << PREEMPT_SHIFT)
 #define SOFTIRQ_OFFSET	(1UL << SOFTIRQ_SHIFT)
 #define HARDIRQ_OFFSET	(1UL << HARDIRQ_SHIFT)
 #define NMI_OFFSET	(1UL << NMI_SHIFT)
+#define PIPELINE_OFFSET	(1UL << PIPELINE_SHIFT)
+#define STAGE_OFFSET	(1UL << STAGE_SHIFT)
 
 #define SOFTIRQ_DISABLE_OFFSET	(2 * SOFTIRQ_OFFSET)
 
@@ -82,6 +92,9 @@
 #define irq_count()	(preempt_count() & (HARDIRQ_MASK | SOFTIRQ_MASK \
 				 | NMI_MASK))
 
+/* The current IRQ stage level: 0=inband, 1=oob */
+#define stage_level()	((preempt_count() & STAGE_MASK) >> STAGE_SHIFT)
+
 /*
  * Are we doing bottom half or hardware interrupt processing?
  *
@@ -91,6 +104,7 @@
  * in_serving_softirq() - We're in softirq context
  * in_nmi()       - We're in NMI context
  * in_task()	  - We're in task context
+ * in_pipeline()  - We're on pipeline entry
  *
  * Note: due to the BH disabled confusion: in_softirq(),in_interrupt() really
  *       should not be used in new code.
@@ -102,6 +116,7 @@
 #define in_nmi()		(preempt_count() & NMI_MASK)
 #define in_task()		(!(preempt_count() & \
 				   (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+#define in_pipeline()		(preempt_count() & PIPELINE_MASK)
 
 /*
  * The preempt_count offset after preempt_disable();
@@ -180,7 +195,8 @@
 
 #define preempt_enable_no_resched() sched_preempt_enable_no_resched()
 
-#define preemptible()	(preempt_count() == 0 && !irqs_disabled())
+#define preemptible()	(preempt_count() == 0 && \
+			 !hard_irqs_disabled() && !irqs_disabled())
 
 #ifdef CONFIG_PREEMPTION
 #define preempt_enable() \
@@ -352,4 +368,43 @@
 	preempt_enable();
 }
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+static __always_inline bool running_inband(void)
+{
+	return stage_level() == 0;
+}
+
+static __always_inline bool running_oob(void)
+{
+	return !running_inband();
+}
+
+unsigned long hard_preempt_disable(void);
+void hard_preempt_enable(unsigned long flags);
+
+#else
+
+static __always_inline bool running_inband(void)
+{
+	return true;
+}
+
+static __always_inline bool running_oob(void)
+{
+	return false;
+}
+
+#define hard_preempt_disable()		\
+({					\
+	preempt_disable();		\
+	0;				\
+})
+#define hard_preempt_enable(__flags)	\
+	do {				\
+		preempt_enable();	\
+		(void)(__flags);	\
+	} while (0)
+#endif
+
 #endif /* __LINUX_PREEMPT_H */
diff --git a/kernel/include/linux/printk.h b/kernel/include/linux/printk.h
index 14d13ec..270fa48 100644
--- a/kernel/include/linux/printk.h
+++ b/kernel/include/linux/printk.h
@@ -160,7 +160,22 @@
 static inline void printk_nmi_direct_exit(void) { }
 #endif /* PRINTK_NMI */
 
+
 struct dev_printk_info;
+
+#ifdef CONFIG_RAW_PRINTK
+void raw_puts(const char *s, size_t len);
+void raw_vprintk(const char *fmt, va_list ap);
+asmlinkage __printf(1, 2)
+void raw_printk(const char *fmt, ...);
+#else
+static inline __cold
+void raw_puts(const char *s, size_t len) { }
+static inline __cold
+void raw_vprintk(const char *s, va_list ap) { }
+static inline __printf(1, 2) __cold
+void raw_printk(const char *s, ...) { }
+#endif
 
 #ifdef CONFIG_PRINTK
 asmlinkage __printf(4, 0)
@@ -512,7 +527,7 @@
 				      DEFAULT_RATELIMIT_INTERVAL,	\
 				      DEFAULT_RATELIMIT_BURST);		\
 									\
-	if (__ratelimit(&_rs))						\
+	if (running_oob() || __ratelimit(&_rs))				\
 		printk(fmt, ##__VA_ARGS__);				\
 })
 #else
diff --git a/kernel/include/linux/rcupdate.h b/kernel/include/linux/rcupdate.h
index 095b3b3..a4388ef 100644
--- a/kernel/include/linux/rcupdate.h
+++ b/kernel/include/linux/rcupdate.h
@@ -118,6 +118,14 @@
 static inline void rcu_nocb_flush_deferred_wakeup(void) { }
 #endif /* #else #ifdef CONFIG_RCU_NOCB_CPU */
 
+#ifdef CONFIG_IRQ_PIPELINE
+void rcu_oob_prepare_lock(void);
+void rcu_oob_finish_lock(void);
+#else
+#define rcu_oob_prepare_lock()	 do { } while (0)
+#define rcu_oob_finish_lock()	 do { } while (0)
+#endif
+
 /**
  * RCU_NONIDLE - Indicate idle-loop code that needs RCU readers
  * @a: Code that RCU needs to pay attention to.
@@ -290,7 +298,7 @@
 
 static inline int rcu_read_lock_sched_held(void)
 {
-	return !preemptible();
+	return !running_inband() || !preemptible();
 }
 
 static inline int rcu_read_lock_any_held(void)
@@ -646,6 +654,7 @@
  */
 static __always_inline void rcu_read_lock(void)
 {
+	rcu_oob_prepare_lock();
 	__rcu_read_lock();
 	__acquire(RCU);
 	rcu_lock_acquire(&rcu_lock_map);
@@ -702,6 +711,7 @@
 			 "rcu_read_unlock() used illegally while idle");
 	__release(RCU);
 	__rcu_read_unlock();
+	rcu_oob_finish_lock();
 	rcu_lock_release(&rcu_lock_map); /* Keep acq info for rls diags. */
 }
 
@@ -755,6 +765,7 @@
 static inline void rcu_read_lock_sched(void)
 {
 	preempt_disable();
+	rcu_oob_prepare_lock();
 	__acquire(RCU_SCHED);
 	rcu_lock_acquire(&rcu_sched_lock_map);
 	RCU_LOCKDEP_WARN(!rcu_is_watching(),
@@ -779,6 +790,7 @@
 			 "rcu_read_unlock_sched() used illegally while idle");
 	rcu_lock_release(&rcu_sched_lock_map);
 	__release(RCU_SCHED);
+	rcu_oob_finish_lock();
 	preempt_enable();
 }
 
diff --git a/kernel/include/linux/regmap.h b/kernel/include/linux/regmap.h
index 751ca38..4df7952 100644
--- a/kernel/include/linux/regmap.h
+++ b/kernel/include/linux/regmap.h
@@ -369,6 +369,7 @@
 	int (*reg_write)(void *context, unsigned int reg, unsigned int val);
 
 	bool fast_io;
+	bool oob_io;
 
 	unsigned int max_register;
 	const struct regmap_access_table *wr_table;
diff --git a/kernel/include/linux/sched.h b/kernel/include/linux/sched.h
index d3cc279..38cac8c 100644
--- a/kernel/include/linux/sched.h
+++ b/kernel/include/linux/sched.h
@@ -119,6 +119,12 @@
 
 #define task_is_stopped_or_traced(task)	((task->state & (__TASK_STOPPED | __TASK_TRACED)) != 0)
 
+#ifdef CONFIG_DOVETAIL
+#define task_is_off_stage(task)		test_ti_local_flags(task_thread_info(task), _TLF_OFFSTAGE)
+#else
+#define task_is_off_stage(task)		0
+#endif
+
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 
 /*
@@ -1052,6 +1058,10 @@
 	int				irq_config;
 #endif
 
+#ifdef CONFIG_IRQ_PIPELINE
+	unsigned long			stall_bits;
+#endif
+
 #ifdef CONFIG_LOCKDEP
 # define MAX_LOCK_DEPTH			48UL
 	u64				curr_chain_key;
diff --git a/kernel/include/linux/sched/coredump.h b/kernel/include/linux/sched/coredump.h
index dfd82ea..0b06940 100644
--- a/kernel/include/linux/sched/coredump.h
+++ b/kernel/include/linux/sched/coredump.h
@@ -74,6 +74,7 @@
 #define MMF_OOM_REAP_QUEUED	26	/* mm was queued for oom_reaper */
 #define MMF_MULTIPROCESS	27	/* mm is shared between processes */
 #define MMF_DISABLE_THP_MASK	(1 << MMF_DISABLE_THP)
+#define MMF_DOVETAILED		31	/* mm belongs to a dovetailed process */
 
 #define MMF_INIT_MASK		(MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
 				 MMF_DISABLE_THP_MASK)
diff --git a/kernel/include/linux/skbuff.h b/kernel/include/linux/skbuff.h
index f73efb3..3556cf6 100644
--- a/kernel/include/linux/skbuff.h
+++ b/kernel/include/linux/skbuff.h
@@ -793,6 +793,12 @@
 #ifdef CONFIG_SKB_EXTENSIONS
 	__u8			active_extensions;
 #endif
+#ifdef CONFIG_NET_OOB
+	__u8			oob:1;
+	__u8			oob_clone:1;
+	__u8			oob_cloned:1;
+#endif
+
 	/* fields enclosed in headers_start/headers_end are copied
 	 * using a single memcpy() in __copy_skb_header()
 	 */
@@ -1102,6 +1108,69 @@
 struct sk_buff *build_skb(void *data, unsigned int frag_size);
 struct sk_buff *build_skb_around(struct sk_buff *skb,
 				 void *data, unsigned int frag_size);
+#ifdef CONFIG_NET_OOB
+
+static inline void __skb_oob_copy(struct sk_buff *new,
+				const struct sk_buff *old)
+{
+	new->oob = old->oob;
+	new->oob_clone = old->oob_clone;
+	new->oob_cloned = old->oob_cloned;
+}
+
+static inline bool skb_is_oob(const struct sk_buff *skb)
+{
+	return skb->oob;
+}
+
+static inline bool skb_is_oob_clone(const struct sk_buff *skb)
+{
+	return skb->oob_clone;
+}
+
+static inline bool skb_has_oob_clone(const struct sk_buff *skb)
+{
+	return skb->oob_cloned;
+}
+
+struct sk_buff *__netdev_alloc_oob_skb(struct net_device *dev,
+				size_t len, size_t headroom,
+				gfp_t gfp_mask);
+void __netdev_free_oob_skb(struct net_device *dev, struct sk_buff *skb);
+void netdev_reset_oob_skb(struct net_device *dev, struct sk_buff *skb,
+			size_t headroom);
+struct sk_buff *skb_alloc_oob_head(gfp_t gfp_mask);
+void skb_morph_oob_skb(struct sk_buff *n, struct sk_buff *skb);
+bool skb_release_oob_skb(struct sk_buff *skb, int *dref);
+
+static inline bool recycle_oob_skb(struct sk_buff *skb)
+{
+	bool skb_oob_recycle(struct sk_buff *skb);
+
+	if (!skb->oob)
+		return false;
+
+	return skb_oob_recycle(skb);
+}
+
+#else  /* !CONFIG_NET_OOB */
+
+static inline void __skb_oob_copy(struct sk_buff *new,
+				const struct sk_buff *old)
+{
+}
+
+static inline bool skb_is_oob(const struct sk_buff *skb)
+{
+	return false;
+}
+
+static inline bool recycle_oob_skb(struct sk_buff *skb)
+{
+	return false;
+}
+
+#endif	/* !CONFIG_NET_OOB */
 
 /**
  * alloc_skb - allocate a network buffer
diff --git a/kernel/include/linux/smp.h b/kernel/include/linux/smp.h
index 7ce15c3..44888c0 100644
--- a/kernel/include/linux/smp.h
+++ b/kernel/include/linux/smp.h
@@ -241,6 +241,21 @@
 #define get_cpu()		({ preempt_disable(); __smp_processor_id(); })
 #define put_cpu()		preempt_enable()
 
+#ifdef CONFIG_IRQ_PIPELINE
+#define hard_get_cpu(flags)	({			\
+		(flags) = hard_preempt_disable();	\
+		raw_smp_processor_id();			\
+	})
+#define hard_put_cpu(flags)	hard_preempt_enable(flags)
+#else
+#define hard_get_cpu(flags)	({ (void)(flags); get_cpu(); })
+#define hard_put_cpu(flags)	\
+	do {			\
+		(void)(flags);	\
+		put_cpu();	\
+	} while (0)
+#endif
+
 /*
  * Callback to arch code if there's nosmp or maxcpus=0 on the
  * boot command line:
diff --git a/kernel/include/linux/socket.h b/kernel/include/linux/socket.h
index c3b35d1..eb8bf6a 100644
--- a/kernel/include/linux/socket.h
+++ b/kernel/include/linux/socket.h
@@ -223,8 +223,9 @@
 				 * reuses AF_INET address family
 				 */
 #define AF_XDP		44	/* XDP sockets			*/
+#define AF_OOB		45	/* Out-of-band domain sockets */
 
-#define AF_MAX		45	/* For now.. */
+#define AF_MAX		46	/* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
@@ -274,6 +275,7 @@
 #define PF_QIPCRTR	AF_QIPCRTR
 #define PF_SMC		AF_SMC
 #define PF_XDP		AF_XDP
+#define PF_OOB		AF_OOB
 #define PF_MAX		AF_MAX
 
 /* Maximum queue length specifiable by listen.  */
diff --git a/kernel/include/linux/spi/spi.h b/kernel/include/linux/spi/spi.h
index f50c766..04f35c4 100644
--- a/kernel/include/linux/spi/spi.h
+++ b/kernel/include/linux/spi/spi.h
@@ -9,6 +9,7 @@
 #include <linux/device.h>
 #include <linux/mod_devicetable.h>
 #include <linux/slab.h>
+#include <linux/dmaengine.h>
 #include <linux/kthread.h>
 #include <linux/completion.h>
 #include <linux/scatterlist.h>
@@ -253,6 +254,7 @@
 
 struct spi_message;
 struct spi_transfer;
+struct spi_oob_transfer;
 
 /**
  * struct spi_driver - Host side "protocol" driver
@@ -352,6 +354,7 @@
  * @io_mutex: mutex for physical bus access
  * @bus_lock_spinlock: spinlock for SPI bus locking
  * @bus_lock_mutex: mutex for exclusion of multiple callers
+ * @bus_oob_lock_sem: semaphore for exclusion during oob operations
  * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
  * @setup: updates the device mode and clocking records used by a
  *	device's SPI controller; protocol code may call this.  This
@@ -534,6 +537,10 @@
 	spinlock_t		bus_lock_spinlock;
 	struct mutex		bus_lock_mutex;
 
+#ifdef CONFIG_SPI_OOB
+	struct semaphore	bus_oob_lock_sem;
+#endif
+
 	/* flag indicating that the SPI bus is locked for exclusive use */
 	bool			bus_lock_flag;
 
@@ -626,6 +633,14 @@
 	int (*unprepare_message)(struct spi_controller *ctlr,
 				 struct spi_message *message);
 	int (*slave_abort)(struct spi_controller *ctlr);
+	int (*prepare_oob_transfer)(struct spi_controller *ctlr,
+				struct spi_oob_transfer *xfer);
+	void (*start_oob_transfer)(struct spi_controller *ctlr,
+				struct spi_oob_transfer *xfer);
+	void (*pulse_oob_transfer)(struct spi_controller *ctlr,
+				struct spi_oob_transfer *xfer);
+	void (*terminate_oob_transfer)(struct spi_controller *ctlr,
+				struct spi_oob_transfer *xfer);
 
 	/*
 	 * These hooks are for drivers that use a generic implementation
@@ -1137,6 +1152,90 @@
 	kfree(m);
 }
 
+struct spi_oob_transfer {
+	struct spi_device *spi;
+	dma_addr_t dma_addr;
+	size_t aligned_frame_len;
+	void *io_buffer;	/* 2 x aligned_frame_len */
+	struct dma_async_tx_descriptor *txd;
+	struct dma_async_tx_descriptor *rxd;
+	u32 effective_speed_hz;
+	/*
+	 * Caller-defined settings for the transfer.
+	 */
+	struct spi_oob_setup {
+		u32 frame_len;
+		u32 speed_hz;
+		u8 bits_per_word;
+		dma_async_tx_callback xfer_done;
+	} setup;
+};
+
+static inline off_t spi_get_oob_rxoff(struct spi_oob_transfer *xfer)
+{
+	/* RX area is in first half of the I/O buffer. */
+	return 0;
+}
+
+static inline off_t spi_get_oob_txoff(struct spi_oob_transfer *xfer)
+{
+	/* TX area is in second half of the I/O buffer. */
+	return xfer->aligned_frame_len;
+}
+
+static inline size_t spi_get_oob_iolen(struct spi_oob_transfer *xfer)
+{
+	return xfer->aligned_frame_len * 2;
+}
+
+#ifdef CONFIG_SPI_OOB
+
+struct vm_area_struct;
+
+int spi_prepare_oob_transfer(struct spi_device *spi,
+			struct spi_oob_transfer *xfer);
+
+void spi_start_oob_transfer(struct spi_oob_transfer *xfer);
+
+int spi_pulse_oob_transfer(struct spi_oob_transfer *xfer);
+
+void spi_terminate_oob_transfer(struct spi_oob_transfer *xfer);
+
+int spi_mmap_oob_transfer(struct vm_area_struct *vma,
+			struct spi_oob_transfer *xfer);
+
+#else
+
+static inline
+int spi_prepare_oob_transfer(struct spi_device *spi,
+			struct spi_oob_transfer *xfer)
+{
+	return -ENOTSUPP;
+}
+
+static inline
+void spi_start_oob_transfer(struct spi_oob_transfer *xfer)
+{ }
+
+static inline
+int spi_pulse_oob_transfer(struct spi_oob_transfer *xfer)
+{
+	return -EIO;
+}
+
+static inline
+void spi_terminate_oob_transfer(struct spi_oob_transfer *xfer)
+{ }
+
+static inline
+int spi_mmap_oob_transfer(struct vm_area_struct *vma,
+			struct spi_oob_transfer *xfer)
+{
+	return -ENXIO;
+}
+
+#endif
+
 extern int spi_set_cs_timing(struct spi_device *spi,
 			     struct spi_delay *setup,
 			     struct spi_delay *hold,
diff --git a/kernel/include/linux/spinlock.h b/kernel/include/linux/spinlock.h
index 7989784..311854f 100644
--- a/kernel/include/linux/spinlock.h
+++ b/kernel/include/linux/spinlock.h
@@ -97,21 +97,27 @@
 				   struct lock_class_key *key, short inner);
 
 # define raw_spin_lock_init(lock)					\
+	LOCK_ALTERNATIVES(lock,	spin_lock_init,				\
 do {									\
 	static struct lock_class_key __key;				\
 									\
-	__raw_spin_lock_init((lock), #lock, &__key, LD_WAIT_SPIN);	\
-} while (0)
+	__raw_spin_lock_init(__RAWLOCK(lock), #lock, &__key, LD_WAIT_SPIN); \
+} while (0))
 
 #else
 # define raw_spin_lock_init(lock)				\
-	do { *(lock) = __RAW_SPIN_LOCK_UNLOCKED(lock); } while (0)
+	LOCK_ALTERNATIVES(lock,	spin_lock_init,			\
+	do { *(__RAWLOCK(lock)) = __RAW_SPIN_LOCK_UNLOCKED(__RAWLOCK(lock)); } while (0))
 #endif
 
-#define raw_spin_is_locked(lock)	arch_spin_is_locked(&(lock)->raw_lock)
+#define raw_spin_is_locked(lock)		\
+	LOCK_ALTERNATIVES_RET(lock, spin_is_locked,	\
+	      arch_spin_is_locked(&(__RAWLOCK(lock))->raw_lock))
 
 #ifdef arch_spin_is_contended
-#define raw_spin_is_contended(lock)	arch_spin_is_contended(&(lock)->raw_lock)
+#define raw_spin_is_contended(lock)			\
+	LOCK_ALTERNATIVES_RET(lock, spin_is_contended,	\
+	      arch_spin_is_contended(&(__RAWLOCK(lock))->raw_lock))
 #else
 #define raw_spin_is_contended(lock)	(((void)(lock), 0))
 #endif /*arch_spin_is_contended*/
@@ -220,13 +226,19 @@
  * various methods are defined as nops in the case they are not
  * required.
  */
-#define raw_spin_trylock(lock)	__cond_lock(lock, _raw_spin_trylock(lock))
+#define raw_spin_trylock(lock)			\
+	__cond_lock(lock,			\
+		    LOCK_ALTERNATIVES_RET(lock,	\
+		    spin_trylock, _raw_spin_trylock(__RAWLOCK(lock))))
 
-#define raw_spin_lock(lock)	_raw_spin_lock(lock)
+#define raw_spin_lock(lock)	\
+	LOCK_ALTERNATIVES(lock, spin_lock, _raw_spin_lock(__RAWLOCK(lock)))
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
+
 # define raw_spin_lock_nested(lock, subclass) \
-	_raw_spin_lock_nested(lock, subclass)
+	LOCK_ALTERNATIVES(lock, spin_lock_nested, \
+		_raw_spin_lock_nested(__RAWLOCK(lock), subclass), subclass)
 
 # define raw_spin_lock_nest_lock(lock, nest_lock)			\
 	 do {								\
@@ -239,18 +251,20 @@
  * warns about set-but-not-used variables when building with
  * CONFIG_DEBUG_LOCK_ALLOC=n and with W=1.
  */
-# define raw_spin_lock_nested(lock, subclass)		\
-	_raw_spin_lock(((void)(subclass), (lock)))
+# define raw_spin_lock_nested(lock, subclass)	\
+	LOCK_ALTERNATIVES(lock, spin_lock_nested, \
+		_raw_spin_lock(((void)(subclass), __RAWLOCK(lock))), subclass)
 # define raw_spin_lock_nest_lock(lock, nest_lock)	_raw_spin_lock(lock)
 #endif
 
 #if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
 
-#define raw_spin_lock_irqsave(lock, flags)			\
-	do {						\
-		typecheck(unsigned long, flags);	\
-		flags = _raw_spin_lock_irqsave(lock);	\
-	} while (0)
+#define raw_spin_lock_irqsave(lock, flags)				\
+	LOCK_ALTERNATIVES(lock, spin_lock_irqsave,			\
+	do {								\
+		typecheck(unsigned long, flags);			\
+		flags = _raw_spin_lock_irqsave(__RAWLOCK(lock));	\
+	} while (0), flags)
 
 #ifdef CONFIG_DEBUG_LOCK_ALLOC
 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)		\
@@ -268,45 +282,55 @@
 
 #else
 
-#define raw_spin_lock_irqsave(lock, flags)		\
-	do {						\
-		typecheck(unsigned long, flags);	\
-		_raw_spin_lock_irqsave(lock, flags);	\
-	} while (0)
+#define raw_spin_lock_irqsave(lock, flags)			\
+	LOCK_ALTERNATIVES(lock, spin_lock_irqsave,		\
+	do {							\
+		typecheck(unsigned long, flags);		\
+		_raw_spin_lock_irqsave(__RAWLOCK(lock), flags);	\
+	} while (0), flags)
 
 #define raw_spin_lock_irqsave_nested(lock, flags, subclass)	\
 	raw_spin_lock_irqsave(lock, flags)
 
 #endif
 
-#define raw_spin_lock_irq(lock)		_raw_spin_lock_irq(lock)
+#define raw_spin_lock_irq(lock)		       \
+	LOCK_ALTERNATIVES(lock, spin_lock_irq, \
+			  _raw_spin_lock_irq(__RAWLOCK(lock)))
 #define raw_spin_lock_bh(lock)		_raw_spin_lock_bh(lock)
-#define raw_spin_unlock(lock)		_raw_spin_unlock(lock)
-#define raw_spin_unlock_irq(lock)	_raw_spin_unlock_irq(lock)
+#define raw_spin_unlock(lock)		     \
+	LOCK_ALTERNATIVES(lock, spin_unlock, \
+			  _raw_spin_unlock(__RAWLOCK(lock)))
+#define raw_spin_unlock_irq(lock)	\
+	LOCK_ALTERNATIVES(lock, spin_unlock_irq, \
+			  _raw_spin_unlock_irq(__RAWLOCK(lock)))
 
-#define raw_spin_unlock_irqrestore(lock, flags)		\
-	do {							\
-		typecheck(unsigned long, flags);		\
-		_raw_spin_unlock_irqrestore(lock, flags);	\
-	} while (0)
+#define raw_spin_unlock_irqrestore(lock, flags)				\
+	LOCK_ALTERNATIVES(lock, spin_unlock_irqrestore,			\
+	do {								\
+		typecheck(unsigned long, flags);			\
+		_raw_spin_unlock_irqrestore(__RAWLOCK(lock), flags);	\
+	} while (0), flags)
 #define raw_spin_unlock_bh(lock)	_raw_spin_unlock_bh(lock)
 
 #define raw_spin_trylock_bh(lock) \
 	__cond_lock(lock, _raw_spin_trylock_bh(lock))
 
 #define raw_spin_trylock_irq(lock) \
+	LOCK_ALTERNATIVES_RET(lock, spin_trylock_irq, \
 ({ \
 	local_irq_disable(); \
-	raw_spin_trylock(lock) ? \
+	raw_spin_trylock(__RAWLOCK(lock)) ?	\
 	1 : ({ local_irq_enable(); 0;  }); \
-})
+}))
 
 #define raw_spin_trylock_irqsave(lock, flags) \
+	LOCK_ALTERNATIVES_RET(lock, spin_trylock_irqsave, \
 ({ \
 	local_irq_save(flags); \
-	raw_spin_trylock(lock) ? \
+	raw_spin_trylock(__RAWLOCK(lock)) ?	\
 	1 : ({ local_irq_restore(flags); 0; }); \
-})
+}), flags)
 
 /* Include rwlock functions */
 #include <linux/rwlock.h>
@@ -320,12 +344,20 @@
 # include <linux/spinlock_api_up.h>
 #endif
 
+/* Pull the lock types specific to the IRQ pipeline. */
+#ifdef CONFIG_IRQ_PIPELINE
+#include <linux/spinlock_pipeline.h>
+#else
+static inline void check_spinlock_context(void) { }
+#endif
+
 /*
  * Map the spin_lock functions to the raw variants for PREEMPT_RT=n
  */
 
 static __always_inline raw_spinlock_t *spinlock_check(spinlock_t *lock)
 {
+	check_spinlock_context();
 	return &lock->rlock;
 }
 
diff --git a/kernel/include/linux/spinlock_api_up.h b/kernel/include/linux/spinlock_api_up.h
index d0d1888..6895779 100644
--- a/kernel/include/linux/spinlock_api_up.h
+++ b/kernel/include/linux/spinlock_api_up.h
@@ -30,20 +30,32 @@
 #define __LOCK(lock) \
   do { preempt_disable(); ___LOCK(lock); } while (0)
 
+#define __HARD_LOCK(lock) \
+  do { ___LOCK(lock); } while (0)
+
 #define __LOCK_BH(lock) \
   do { __local_bh_disable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); ___LOCK(lock); } while (0)
 
 #define __LOCK_IRQ(lock) \
   do { local_irq_disable(); __LOCK(lock); } while (0)
 
+#define __HARD_LOCK_IRQ(lock) \
+  do { hard_local_irq_disable(); __HARD_LOCK(lock); } while (0)
+
 #define __LOCK_IRQSAVE(lock, flags) \
   do { local_irq_save(flags); __LOCK(lock); } while (0)
+
+#define __HARD_LOCK_IRQSAVE(lock, flags) \
+  do { flags = hard_local_irq_save(); __HARD_LOCK(lock); } while (0)
 
 #define ___UNLOCK(lock) \
   do { __release(lock); (void)(lock); } while (0)
 
 #define __UNLOCK(lock) \
   do { preempt_enable(); ___UNLOCK(lock); } while (0)
+
+#define __HARD_UNLOCK(lock) \
+  do { ___UNLOCK(lock); } while (0)
 
 #define __UNLOCK_BH(lock) \
   do { __local_bh_enable_ip(_THIS_IP_, SOFTIRQ_LOCK_OFFSET); \
@@ -52,9 +64,15 @@
 #define __UNLOCK_IRQ(lock) \
   do { local_irq_enable(); __UNLOCK(lock); } while (0)
 
+#define __HARD_UNLOCK_IRQ(lock) \
+  do { hard_local_irq_enable(); __HARD_UNLOCK(lock); } while (0)
+
 #define __UNLOCK_IRQRESTORE(lock, flags) \
   do { local_irq_restore(flags); __UNLOCK(lock); } while (0)
 
+#define __HARD_UNLOCK_IRQRESTORE(lock, flags) \
+  do { hard_local_irq_restore(flags); __HARD_UNLOCK(lock); } while (0)
+
 #define _raw_spin_lock(lock)			__LOCK(lock)
 #define _raw_spin_lock_nested(lock, subclass)	__LOCK(lock)
 #define _raw_read_lock(lock)			__LOCK(lock)
diff --git a/kernel/include/linux/spinlock_pipeline.h b/kernel/include/linux/spinlock_pipeline.h
new file mode 100644
index 0000000..1652735
--- /dev/null
+++ b/kernel/include/linux/spinlock_pipeline.h
@@ -0,0 +1,387 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#ifndef __LINUX_SPINLOCK_PIPELINE_H
+#define __LINUX_SPINLOCK_PIPELINE_H
+
+#ifndef __LINUX_SPINLOCK_H
+# error "Please don't include this file directly. Use spinlock.h."
+#endif
+
+#include <dovetail/spinlock.h>
+
+#define hard_spin_lock_irqsave(__rlock, __flags)		\
+	do {							\
+		(__flags) = __hard_spin_lock_irqsave(__rlock);	\
+	} while (0)
+
+#define hard_spin_trylock_irqsave(__rlock, __flags)			\
+	({								\
+		int __locked;						\
+		(__flags) = __hard_spin_trylock_irqsave(__rlock, &__locked); \
+		__locked;						\
+	})
+
+#define hybrid_spin_lock_init(__rlock)	hard_spin_lock_init(__rlock)
+
+/*
+ * CAUTION: We don't want the hand-coded irq-enable of
+ * do_raw_spin_lock_flags(), hard locked sections assume that
+ * interrupts are not re-enabled during lock-acquire.
+ */
+#define hard_lock_acquire(__rlock, __try, __ip)				\
+	do {								\
+		hard_spin_lock_prepare(__rlock);			\
+		if (irq_pipeline_debug_locking()) {			\
+			spin_acquire(&(__rlock)->dep_map, 0, __try, __ip); \
+			LOCK_CONTENDED(__rlock, do_raw_spin_trylock, do_raw_spin_lock); \
+		} else {						\
+			do_raw_spin_lock(__rlock);			\
+		}							\
+	} while (0)
+
+#define hard_lock_acquire_nested(__rlock, __subclass, __ip)		\
+	do {								\
+		hard_spin_lock_prepare(__rlock);			\
+		if (irq_pipeline_debug_locking()) {			\
+			spin_acquire(&(__rlock)->dep_map, __subclass, 0, __ip); \
+			LOCK_CONTENDED(__rlock, do_raw_spin_trylock, do_raw_spin_lock); \
+		} else {						\
+			do_raw_spin_lock(__rlock);			\
+		}							\
+	} while (0)
+
+#define hard_trylock_acquire(__rlock, __try, __ip)			\
+	do {								\
+		if (irq_pipeline_debug_locking())			\
+			spin_acquire(&(__rlock)->dep_map, 0, __try, __ip); \
+	} while (0)
+
+#define hard_lock_release(__rlock, __ip)				\
+	do {								\
+		if (irq_pipeline_debug_locking())			\
+			spin_release(&(__rlock)->dep_map, __ip);	\
+		do_raw_spin_unlock(__rlock);				\
+		hard_spin_unlock_finish(__rlock);			\
+	} while (0)
+
+#if defined(CONFIG_SMP) || defined(CONFIG_DEBUG_SPINLOCK)
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+/*
+ * Hard spinlocks are not checked for invalid wait context in-band
+ * wise (LD_WAIT_INV). We could be smarter with handling a specific
+ * wait type for them, so that we could detect hard_spin_lock ->
+ * {raw_}spin_lock for instance, but we already have
+ * check_inband_stage() calls all over the place in the latter API, so
+ * that kind of misusage would be detected regardless.
+ */
+#define hard_spin_lock_init(__lock)				\
+	do {							\
+		static struct lock_class_key __key;		\
+		__raw_spin_lock_init((raw_spinlock_t *)__lock, #__lock, &__key, LD_WAIT_INV); \
+	} while (0)
+#else
+#define hard_spin_lock_init(__rlock)				\
+	do { *(__rlock) = __RAW_SPIN_LOCK_UNLOCKED(__rlock); } while (0)
+#endif
+
+/*
+ * XXX: no preempt_enable/disable when hard locking.
+ */
+
+static inline
+void hard_spin_lock(struct raw_spinlock *rlock)
+{
+	hard_lock_acquire(rlock, 0, _THIS_IP_);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline
+void hard_spin_lock_nested(struct raw_spinlock *rlock, int subclass)
+{
+	hard_lock_acquire_nested(rlock, subclass, _THIS_IP_);
+}
+#else
+static inline
+void hard_spin_lock_nested(struct raw_spinlock *rlock, int subclass)
+{
+	hard_spin_lock(rlock);
+}
+#endif
+
+static inline
+void hard_spin_unlock(struct raw_spinlock *rlock)
+{
+	hard_lock_release(rlock, _THIS_IP_);
+}
+
+static inline
+void hard_spin_lock_irq(struct raw_spinlock *rlock)
+{
+	hard_local_irq_disable();
+	hard_lock_acquire(rlock, 0, _THIS_IP_);
+}
+
+static inline
+void hard_spin_unlock_irq(struct raw_spinlock *rlock)
+{
+	hard_lock_release(rlock, _THIS_IP_);
+	hard_local_irq_enable();
+}
+
+static inline
+void hard_spin_unlock_irqrestore(struct raw_spinlock *rlock,
+				 unsigned long flags)
+{
+	hard_lock_release(rlock, _THIS_IP_);
+	hard_local_irq_restore(flags);
+}
+
+static inline
+unsigned long __hard_spin_lock_irqsave(struct raw_spinlock *rlock)
+{
+	unsigned long flags = hard_local_irq_save();
+
+	hard_lock_acquire(rlock, 0, _THIS_IP_);
+
+	return flags;
+}
+
+static inline
+int hard_spin_trylock(struct raw_spinlock *rlock)
+{
+	hard_spin_trylock_prepare(rlock);
+
+	if (do_raw_spin_trylock(rlock)) {
+		hard_trylock_acquire(rlock, 1, _THIS_IP_);
+		return 1;
+	}
+
+	hard_spin_trylock_fail(rlock);
+
+	return 0;
+}
+
+static inline
+unsigned long __hard_spin_trylock_irqsave(struct raw_spinlock *rlock,
+					  int *locked)
+{
+	unsigned long flags = hard_local_irq_save();
+	*locked = hard_spin_trylock(rlock);
+	return *locked ? flags : ({ hard_local_irq_restore(flags); flags; });
+}
+
+static inline
+int hard_spin_trylock_irq(struct raw_spinlock *rlock)
+{
+	hard_local_irq_disable();
+	return hard_spin_trylock(rlock) ? : ({ hard_local_irq_enable(); 0; });
+}
+
+static inline
+int hard_spin_is_locked(struct raw_spinlock *rlock)
+{
+	return arch_spin_is_locked(&rlock->raw_lock);
+}
+
+static inline
+int hard_spin_is_contended(struct raw_spinlock *rlock)
+{
+#ifdef CONFIG_GENERIC_LOCKBREAK
+	return rlock->break_lock;
+#elif defined(arch_spin_is_contended)
+	return arch_spin_is_contended(&rlock->raw_lock);
+#else
+	return 0;
+#endif
+}
+
+#else  /* !SMP && !DEBUG_SPINLOCK */
+
+#define hard_spin_lock_init(__rlock)	do { (void)(__rlock); } while (0)
+#define hard_spin_lock(__rlock)		__HARD_LOCK(__rlock)
+#define hard_spin_lock_nested(__rlock, __subclass)  \
+	do { __HARD_LOCK(__rlock); (void)(__subclass); } while (0)
+#define hard_spin_unlock(__rlock)	__HARD_UNLOCK(__rlock)
+#define hard_spin_lock_irq(__rlock)	__HARD_LOCK_IRQ(__rlock)
+#define hard_spin_unlock_irq(__rlock)	__HARD_UNLOCK_IRQ(__rlock)
+#define hard_spin_unlock_irqrestore(__rlock, __flags)	\
+	__HARD_UNLOCK_IRQRESTORE(__rlock, __flags)
+#define __hard_spin_lock_irqsave(__rlock)		\
+	({						\
+		unsigned long __flags;			\
+		__HARD_LOCK_IRQSAVE(__rlock, __flags);	\
+		__flags;				\
+	})
+#define __hard_spin_trylock_irqsave(__rlock, __locked)	\
+	({						\
+		unsigned long __flags;			\
+		__HARD_LOCK_IRQSAVE(__rlock, __flags);	\
+		*(__locked) = 1;			\
+		__flags;				\
+	})
+#define hard_spin_trylock(__rlock)	({ __HARD_LOCK(__rlock); 1; })
+#define hard_spin_trylock_irq(__rlock)	({ __HARD_LOCK_IRQ(__rlock); 1; })
+#define hard_spin_is_locked(__rlock)	((void)(__rlock), 0)
+#define hard_spin_is_contended(__rlock)	((void)(__rlock), 0)
+#endif	/* !SMP && !DEBUG_SPINLOCK */
+
+/*
+ * In the pipeline entry context, the regular preemption and root
+ * stall logic do not apply since we may actually have preempted any
+ * critical section of the kernel which is protected by regular
+ * locking (spin or stall), or we may even have preempted the
+ * out-of-band stage. Therefore, we just need to grab the raw spinlock
+ * underlying a hybrid spinlock to exclude other CPUs.
+ *
+ * NOTE: When entering the pipeline, IRQs are already hard disabled.
+ */
+
+void __hybrid_spin_lock(struct raw_spinlock *rlock);
+void __hybrid_spin_lock_nested(struct raw_spinlock *rlock, int subclass);
+
+static inline void hybrid_spin_lock(struct raw_spinlock *rlock)
+{
+	if (in_pipeline())
+		hard_lock_acquire(rlock, 0, _THIS_IP_);
+	else
+		__hybrid_spin_lock(rlock);
+}
+
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+static inline
+void hybrid_spin_lock_nested(struct raw_spinlock *rlock, int subclass)
+{
+	if (in_pipeline())
+		hard_lock_acquire_nested(rlock, subclass, _THIS_IP_);
+	else
+		__hybrid_spin_lock_nested(rlock, subclass);
+}
+#else
+static inline
+void hybrid_spin_lock_nested(struct raw_spinlock *rlock, int subclass)
+{
+	hybrid_spin_lock(rlock);
+}
+#endif
+
+void __hybrid_spin_unlock(struct raw_spinlock *rlock);
+
+static inline void hybrid_spin_unlock(struct raw_spinlock *rlock)
+{
+	if (in_pipeline())
+		hard_lock_release(rlock, _THIS_IP_);
+	else
+		__hybrid_spin_unlock(rlock);
+}
+
+void __hybrid_spin_lock_irq(struct raw_spinlock *rlock);
+
+static inline void hybrid_spin_lock_irq(struct raw_spinlock *rlock)
+{
+	if (in_pipeline())
+		hard_lock_acquire(rlock, 0, _THIS_IP_);
+	else
+		__hybrid_spin_lock_irq(rlock);
+}
+
+void __hybrid_spin_unlock_irq(struct raw_spinlock *rlock);
+
+static inline void hybrid_spin_unlock_irq(struct raw_spinlock *rlock)
+{
+	if (in_pipeline())
+		hard_lock_release(rlock, _THIS_IP_);
+	else
+		__hybrid_spin_unlock_irq(rlock);
+}
+
+unsigned long __hybrid_spin_lock_irqsave(struct raw_spinlock *rlock);
+
+#define hybrid_spin_lock_irqsave(__rlock, __flags)			\
+	do {								\
+		if (in_pipeline()) {					\
+			hard_lock_acquire(__rlock, 0, _THIS_IP_);	\
+			(__flags) = hard_local_save_flags();		\
+		} else							\
+			(__flags) = __hybrid_spin_lock_irqsave(__rlock); \
+	} while (0)
+
+void __hybrid_spin_unlock_irqrestore(struct raw_spinlock *rlock,
+				      unsigned long flags);
+
+static inline void hybrid_spin_unlock_irqrestore(struct raw_spinlock *rlock,
+						  unsigned long flags)
+{
+
+	if (in_pipeline())
+		hard_lock_release(rlock, _THIS_IP_);
+	else
+		__hybrid_spin_unlock_irqrestore(rlock, flags);
+}
+
+int __hybrid_spin_trylock(struct raw_spinlock *rlock);
+
+static inline int hybrid_spin_trylock(struct raw_spinlock *rlock)
+{
+	if (in_pipeline()) {
+		hard_spin_trylock_prepare(rlock);
+		if (do_raw_spin_trylock(rlock)) {
+			hard_trylock_acquire(rlock, 1, _THIS_IP_);
+			return 1;
+		}
+		hard_spin_trylock_fail(rlock);
+		return 0;
+	}
+
+	return __hybrid_spin_trylock(rlock);
+}
+
+int __hybrid_spin_trylock_irqsave(struct raw_spinlock *rlock,
+				   unsigned long *flags);
+
+#define hybrid_spin_trylock_irqsave(__rlock, __flags)			\
+	({								\
+		int __ret = 1;						\
+		if (in_pipeline()) {					\
+			hard_spin_trylock_prepare(__rlock);		\
+			if (do_raw_spin_trylock(__rlock)) {		\
+				hard_trylock_acquire(__rlock, 1, _THIS_IP_); \
+				(__flags) = hard_local_save_flags();	\
+			} else {					\
+				hard_spin_trylock_fail(__rlock);	\
+				__ret = 0;				\
+			}						\
+		} else {						\
+			__ret = __hybrid_spin_trylock_irqsave(__rlock, &(__flags)); \
+		}							\
+		__ret;							\
+	})
+
+static inline int hybrid_spin_trylock_irq(struct raw_spinlock *rlock)
+{
+	unsigned long flags;
+	return hybrid_spin_trylock_irqsave(rlock, flags);
+}
+
+static inline
+int hybrid_spin_is_locked(struct raw_spinlock *rlock)
+{
+	return hard_spin_is_locked(rlock);
+}
+
+static inline
+int hybrid_spin_is_contended(struct raw_spinlock *rlock)
+{
+	return hard_spin_is_contended(rlock);
+}
+
+#ifdef CONFIG_DEBUG_IRQ_PIPELINE
+void check_spinlock_context(void);
+#else
+static inline void check_spinlock_context(void) { }
+#endif
+
+#endif /* __LINUX_SPINLOCK_PIPELINE_H */
diff --git a/kernel/include/linux/spinlock_types.h b/kernel/include/linux/spinlock_types.h
index b981caa..c385825 100644
--- a/kernel/include/linux/spinlock_types.h
+++ b/kernel/include/linux/spinlock_types.h
@@ -43,9 +43,15 @@
 		.name = #lockname,			\
 		.wait_type_inner = LD_WAIT_CONFIG,	\
 	}
+# define HARD_SPIN_DEP_MAP_INIT(lockname)		\
+	.dep_map = {					\
+		.name = #lockname,			\
+		.wait_type_inner = LD_WAIT_INV,		\
+	}
 #else
 # define RAW_SPIN_DEP_MAP_INIT(lockname)
 # define SPIN_DEP_MAP_INIT(lockname)
+# define HARD_SPIN_DEP_MAP_INIT(lockname)
 #endif
 
 #ifdef CONFIG_DEBUG_SPINLOCK
@@ -96,6 +102,154 @@
 
 #define DEFINE_SPINLOCK(x)	spinlock_t x = __SPIN_LOCK_UNLOCKED(x)
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+void __bad_spinlock_type(void);
+
+#define __RAWLOCK(x) ((struct raw_spinlock *)(x))
+
+#define LOCK_ALTERNATIVES(__lock, __base_op, __raw_form, __args...)	\
+	do {								\
+		if (__builtin_types_compatible_p(typeof(__lock),	\
+						 raw_spinlock_t *))	\
+			__raw_form;					\
+		else if (__builtin_types_compatible_p(typeof(__lock),	\
+						 hard_spinlock_t *))	\
+			hard_ ## __base_op(__RAWLOCK(__lock), ##__args); \
+		else if (__builtin_types_compatible_p(typeof(__lock),	\
+						 hybrid_spinlock_t *))	\
+			hybrid_ ## __base_op(__RAWLOCK(__lock), ##__args); \
+		else							\
+			__bad_spinlock_type();				\
+	} while (0)
+
+#define LOCK_ALTERNATIVES_RET(__lock, __base_op, __raw_form, __args...) \
+	({								\
+		long __ret = 0;						\
+		if (__builtin_types_compatible_p(typeof(__lock),	\
+						 raw_spinlock_t *))	\
+			__ret = __raw_form;				\
+		else if (__builtin_types_compatible_p(typeof(__lock),	\
+						 hard_spinlock_t *))	\
+			__ret = hard_ ## __base_op(__RAWLOCK(__lock), ##__args); \
+		else if (__builtin_types_compatible_p(typeof(__lock),	\
+						 hybrid_spinlock_t *))	\
+			__ret = hybrid_ ## __base_op(__RAWLOCK(__lock), ##__args); \
+		else							\
+			__bad_spinlock_type();				\
+		__ret;							\
+	})
+
+#define LOCKDEP_ALT_DEPMAP(__lock)					\
+	({								\
+		struct lockdep_map *__ret;				\
+		if (__builtin_types_compatible_p(typeof(&(__lock)->dep_map), \
+						 struct phony_lockdep_map *)) \
+			__ret = &__RAWLOCK(__lock)->dep_map;		\
+		else							\
+			__ret = (struct lockdep_map *)(&(__lock)->dep_map); \
+		__ret;							\
+	})
+
+#define LOCKDEP_HARD_DEBUG(__lock, __nodebug, __debug)	\
+	do {						\
+		if (__builtin_types_compatible_p(typeof(__lock),	\
+						raw_spinlock_t *) ||	\
+			irq_pipeline_debug_locking()) {			\
+			__debug;			\
+		} else {				\
+			__nodebug;			\
+		}					\
+	} while (0)
+
+#define LOCKDEP_HARD_DEBUG_RET(__lock, __nodebug, __debug)	\
+	({						\
+		typeof(__nodebug) __ret;		\
+		if (__builtin_types_compatible_p(typeof(__lock),	\
+						raw_spinlock_t *) ||	\
+			irq_pipeline_debug_locking()) {			\
+			__ret = (__debug);		\
+		} else {				\
+			__ret = (__nodebug);		\
+		}					\
+		__ret;					\
+	})
+
+#define __HARD_SPIN_LOCK_INITIALIZER(x)	{			\
+		.rlock = {					\
+			.raw_lock = __ARCH_SPIN_LOCK_UNLOCKED,	\
+			SPIN_DEBUG_INIT(x)			\
+			HARD_SPIN_DEP_MAP_INIT(x)		\
+		}						\
+	}
+
+#define __HARD_SPIN_LOCK_UNLOCKED(x)	\
+	(hard_spinlock_t) __HARD_SPIN_LOCK_INITIALIZER(x)
+
+#define DEFINE_HARD_SPINLOCK(x)  	hard_spinlock_t x = __HARD_SPIN_LOCK_UNLOCKED(x)
+
+#define DECLARE_HARD_SPINLOCK(x)	hard_spinlock_t x
+
+/*
+ * The presence of a phony depmap is tested by LOCKDEP_ALT_DEPMAP() to
+ * locate the real depmap without enumerating every spinlock type
+ * which may contain one.
+ */
+struct phony_lockdep_map { };
+
+typedef struct hard_spinlock {
+	/* XXX: offset_of(struct hard_spinlock, rlock) == 0 */
+	struct raw_spinlock rlock;
+	struct phony_lockdep_map dep_map;
+} hard_spinlock_t;
+
+#define DEFINE_MUTABLE_SPINLOCK(x)	hybrid_spinlock_t x = {	\
+		.rlock = __RAW_SPIN_LOCK_UNLOCKED(x),			\
+	}
+
+#define DECLARE_MUTABLE_SPINLOCK(x)	hybrid_spinlock_t x
+
+typedef struct hybrid_spinlock {
+	/* XXX: offset_of(struct hybrid_spinlock, rlock) == 0 */
+	struct raw_spinlock rlock;
+	unsigned long hwflags;
+	struct phony_lockdep_map dep_map;
+} hybrid_spinlock_t;
+
+#else
+
+typedef raw_spinlock_t hard_spinlock_t;
+
+typedef raw_spinlock_t hybrid_spinlock_t;
+
+#define LOCK_ALTERNATIVES(__lock, __base_op, __raw_form, __args...)	\
+	__raw_form
+
+#define LOCK_ALTERNATIVES_RET(__lock, __base_op, __raw_form, __args...) \
+	__raw_form
+
+#define LOCKDEP_ALT_DEPMAP(__lock)	(&(__lock)->dep_map)
+
+#define LOCKDEP_HARD_DEBUG(__lock, __nondebug, __debug)		do { __debug; } while (0)
+
+#define LOCKDEP_HARD_DEBUG_RET(__lock, __nondebug, __debug)	({ __debug; })
+
+#define DEFINE_HARD_SPINLOCK(x)		DEFINE_RAW_SPINLOCK(x)
+
+#define DECLARE_HARD_SPINLOCK(x)	raw_spinlock_t x
+
+#define DEFINE_MUTABLE_SPINLOCK(x)	DEFINE_RAW_SPINLOCK(x)
+
+#define DECLARE_MUTABLE_SPINLOCK(x)	raw_spinlock_t x
+
+#define __RAWLOCK(x) (x)
+
+#define __HARD_SPIN_LOCK_UNLOCKED(__lock)	__RAW_SPIN_LOCK_UNLOCKED(__lock)
+
+#define __HARD_SPIN_LOCK_INITIALIZER(__lock)	__RAW_SPIN_LOCK_UNLOCKED(__lock)
+
+#endif	/* CONFIG_IRQ_PIPELINE */
+
 #include <linux/rwlock_types.h>
 
 #endif /* __LINUX_SPINLOCK_TYPES_H */
diff --git a/kernel/include/linux/stop_machine.h b/kernel/include/linux/stop_machine.h
index ddafb3c..82c56df 100644
--- a/kernel/include/linux/stop_machine.h
+++ b/kernel/include/linux/stop_machine.h
@@ -6,6 +6,7 @@
 #include <linux/cpumask.h>
 #include <linux/smp.h>
 #include <linux/list.h>
+#include <linux/interrupt.h>
 
 /*
  * stop_cpu[s]() is simplistic per-cpu maximum priority cpu
@@ -143,7 +144,9 @@
 	unsigned long flags;
 	int ret;
 	local_irq_save(flags);
+	hard_irq_disable();
 	ret = fn(data);
+	hard_irq_enable();
 	local_irq_restore(flags);
 	return ret;
 }
diff --git a/kernel/include/linux/thread_info.h b/kernel/include/linux/thread_info.h
index f3040b0..83187b9 100644
--- a/kernel/include/linux/thread_info.h
+++ b/kernel/include/linux/thread_info.h
@@ -170,6 +170,72 @@
 static inline void arch_setup_new_exec(void) { }
 #endif
 
+#ifdef ti_local_flags
+/*
+ * If the arch defines a set of per-thread synchronous flags, provide
+ * generic accessors to them.
+ */
+static __always_inline
+void set_ti_local_flags(struct thread_info *ti, unsigned int mask)
+{
+	ti_local_flags(ti) |= mask;
+}
+
+static __always_inline void set_thread_local_flags(unsigned int mask)
+{
+	set_ti_local_flags(current_thread_info(), mask);
+}
+
+static __always_inline
+int test_and_set_ti_local_flags(struct thread_info *ti, unsigned int mask)
+{
+	int old = ti_local_flags(ti) & mask;
+	ti_local_flags(ti) |= mask;
+	return old != 0;
+}
+
+static __always_inline int test_and_set_thread_local_flags(unsigned int mask)
+{
+	return test_and_set_ti_local_flags(current_thread_info(), mask);
+}
+
+static __always_inline
+void clear_ti_local_flags(struct thread_info *ti, unsigned int mask)
+{
+	ti_local_flags(ti) &= ~mask;
+}
+
+static __always_inline
+int test_and_clear_ti_local_flags(struct thread_info *ti, unsigned int mask)
+{
+	int old = ti_local_flags(ti) & mask;
+	ti_local_flags(ti) &= ~mask;
+	return old != 0;
+}
+
+static __always_inline int test_and_clear_thread_local_flags(unsigned int mask)
+{
+	return test_and_clear_ti_local_flags(current_thread_info(), mask);
+}
+
+static __always_inline void clear_thread_local_flags(unsigned int mask)
+{
+	clear_ti_local_flags(current_thread_info(), mask);
+}
+
+static __always_inline
+bool test_ti_local_flags(struct thread_info *ti, unsigned int mask)
+{
+	return (ti_local_flags(ti) & mask) != 0;
+}
+
+static __always_inline bool test_thread_local_flags(unsigned int mask)
+{
+	return test_ti_local_flags(current_thread_info(), mask);
+}
+
+#endif	/* ti_local_flags */
+
 #endif	/* __KERNEL__ */
 
 #endif /* _LINUX_THREAD_INFO_H */
diff --git a/kernel/include/linux/tick.h b/kernel/include/linux/tick.h
index 7340613..b9896c0 100644
--- a/kernel/include/linux/tick.h
+++ b/kernel/include/linux/tick.h
@@ -20,6 +20,14 @@
 extern void tick_resume_local(void);
 extern void tick_handover_do_timer(void);
 extern void tick_cleanup_dead_cpu(int cpu);
+
+#ifdef CONFIG_IRQ_PIPELINE
+int tick_install_proxy(void (*setup_proxy)(struct clock_proxy_device *dev),
+		const struct cpumask *cpumask);
+void tick_uninstall_proxy(const struct cpumask *cpumask);
+void tick_notify_proxy(void);
+#endif
+
 #else /* CONFIG_GENERIC_CLOCKEVENTS */
 static inline void tick_init(void) { }
 static inline void tick_suspend_local(void) { }
diff --git a/kernel/include/linux/tracepoint.h b/kernel/include/linux/tracepoint.h
index c51a002..c8629cd 100644
--- a/kernel/include/linux/tracepoint.h
+++ b/kernel/include/linux/tracepoint.h
@@ -175,6 +175,10 @@
  * The reason for this is to handle the "void" prototype. If a tracepoint
  * has a "void" prototype, then it is invalid to declare a function
  * as "(void *, void)".
+ *
+ * IRQ pipeline: we may not depend on RCU for data which may be
+ * manipulated from the out-of-band stage, so rcuidle has to be false
+ * if running_oob().
  */
 #define __DO_TRACE(name, proto, args, cond, rcuidle)			\
 	do {								\
@@ -223,7 +227,7 @@
 			__DO_TRACE(name,				\
 				TP_PROTO(data_proto),			\
 				TP_ARGS(data_args),			\
-				TP_CONDITION(cond), 1);			\
+				TP_CONDITION(cond), running_inband());	\
 	}
 #else
 #define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
diff --git a/kernel/include/linux/vmalloc.h b/kernel/include/linux/vmalloc.h
index 167a953..761ed17 100644
--- a/kernel/include/linux/vmalloc.h
+++ b/kernel/include/linux/vmalloc.h
@@ -244,6 +244,7 @@
 
 int register_vmap_purge_notifier(struct notifier_block *nb);
 int unregister_vmap_purge_notifier(struct notifier_block *nb);
+void arch_advertise_page_mapping(unsigned long start, unsigned long end);
 
 /* Allow disabling lazy TLB flushing */
 extern bool lazy_vunmap_enable;
diff --git a/kernel/include/linux/wakeup_reason.h b/kernel/include/linux/wakeup_reason.h
index 54f5caa..2fe0fd0 100644
--- a/kernel/include/linux/wakeup_reason.h
+++ b/kernel/include/linux/wakeup_reason.h
@@ -20,7 +20,7 @@
 
 #define MAX_SUSPEND_ABORT_LEN 256
 
-#ifdef CONFIG_SUSPEND
+#if IS_ENABLED(CONFIG_SUSPEND) && !IS_ENABLED(CONFIG_DOVETAIL)
 void log_irq_wakeup_reason(int irq);
 void log_threaded_irq_wakeup_reason(int irq, int parent_irq);
 void log_suspend_abort_reason(const char *fmt, ...);
diff --git a/kernel/include/linux/xenomai/wrappers.h b/kernel/include/linux/xenomai/wrappers.h
new file mode 120000
index 0000000..3cdb0a9
--- /dev/null
+++ b/kernel/include/linux/xenomai/wrappers.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h
\ No newline at end of file
diff --git a/kernel/include/net/netoob.h b/kernel/include/net/netoob.h
new file mode 100644
index 0000000..907376a
--- /dev/null
+++ b/kernel/include/net/netoob.h
@@ -0,0 +1,17 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _NET_OOBNET_H
+#define _NET_OOBNET_H
+
+#include <dovetail/netdevice.h>
+
+/* Device supports direct out-of-band operations (RX & TX) */
+#define IFF_OOB_CAPABLE		BIT(0)
+/* Device is an out-of-band port */
+#define IFF_OOB_PORT		BIT(1)
+
+struct oob_netdev_context {
+	int flags;
+	struct oob_netdev_state dev_state;
+};
+
+#endif /* !_NET_OOBNET_H */
diff --git a/kernel/include/net/sock.h b/kernel/include/net/sock.h
index c604052..ea5f2fa 100644
--- a/kernel/include/net/sock.h
+++ b/kernel/include/net/sock.h
@@ -540,6 +540,9 @@
 	ANDROID_KABI_RESERVE(8);
 
 	ANDROID_OEM_DATA(1);
+#ifdef CONFIG_NET_OOB
+	void			*oob_data;
+#endif
 };
 
 enum sk_pacing {
diff --git a/kernel/include/trace/events/cobalt-core.h b/kernel/include/trace/events/cobalt-core.h
new file mode 120000
index 0000000..735e8e8
--- /dev/null
+++ b/kernel/include/trace/events/cobalt-core.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h
\ No newline at end of file
diff --git a/kernel/include/trace/events/cobalt-posix.h b/kernel/include/trace/events/cobalt-posix.h
new file mode 120000
index 0000000..9dc0fe2
--- /dev/null
+++ b/kernel/include/trace/events/cobalt-posix.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h
\ No newline at end of file
diff --git a/kernel/include/trace/events/cobalt-rtdm.h b/kernel/include/trace/events/cobalt-rtdm.h
new file mode 120000
index 0000000..79c5693
--- /dev/null
+++ b/kernel/include/trace/events/cobalt-rtdm.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h
\ No newline at end of file
diff --git a/kernel/include/trace/events/irq.h b/kernel/include/trace/events/irq.h
index bb70f46..4642f45 100644
--- a/kernel/include/trace/events/irq.h
+++ b/kernel/include/trace/events/irq.h
@@ -100,6 +100,48 @@
 		  __entry->irq, __entry->ret ? "handled" : "unhandled")
 );
 
+/**
+ * irq_pipeline_entry - called when an external irq enters the pipeline
+ * @irq: irq number
+ */
+TRACE_EVENT(irq_pipeline_entry,
+
+	TP_PROTO(int irq),
+
+	TP_ARGS(irq),
+
+	TP_STRUCT__entry(
+		__field(	int,	irq		)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+	),
+
+	TP_printk("irq=%d", __entry->irq)
+);
+
+/**
+ * irq_pipeline_exit - called when an external irq leaves the pipeline
+ * @irq: irq number
+ */
+TRACE_EVENT(irq_pipeline_exit,
+
+	TP_PROTO(int irq),
+
+	TP_ARGS(irq),
+
+	TP_STRUCT__entry(
+		__field(	int,	irq		)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+	),
+
+	TP_printk("irq=%d", __entry->irq)
+);
+
 DECLARE_EVENT_CLASS(softirq,
 
 	TP_PROTO(unsigned int vec_nr),
diff --git a/kernel/include/uapi/asm-generic/dovetail.h b/kernel/include/uapi/asm-generic/dovetail.h
new file mode 100644
index 0000000..795aa38
--- /dev/null
+++ b/kernel/include/uapi/asm-generic/dovetail.h
@@ -0,0 +1,7 @@
+/* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note */
+#ifndef __ASM_GENERIC_DOVETAIL_H
+#define __ASM_GENERIC_DOVETAIL_H
+
+#define __OOB_SYSCALL_BIT	0x10000000
+
+#endif /* !__ASM_GENERIC_DOVETAIL_H */
diff --git a/kernel/include/uapi/asm-generic/fcntl.h b/kernel/include/uapi/asm-generic/fcntl.h
index 9dc0bf0..11415c6 100644
--- a/kernel/include/uapi/asm-generic/fcntl.h
+++ b/kernel/include/uapi/asm-generic/fcntl.h
@@ -89,6 +89,15 @@
 #define __O_TMPFILE	020000000
 #endif
 
+/*
+ * Tells the open call that out-of-band operations should be enabled
+ * for the file (if supported). Can also be passed along to socket(2)
+ * via the type argument as SOCK_OOB.
+ */
+#ifndef O_OOB
+#define O_OOB		010000000000
+#endif
+
 /* a horrid kludge trying to make sure that this will fail on old kernels */
 #define O_TMPFILE (__O_TMPFILE | O_DIRECTORY)
 #define O_TMPFILE_MASK (__O_TMPFILE | O_DIRECTORY | O_CREAT)      
diff --git a/kernel/include/uapi/linux/clocksource.h b/kernel/include/uapi/linux/clocksource.h
new file mode 100644
index 0000000..a0a1c27
--- /dev/null
+++ b/kernel/include/uapi/linux/clocksource.h
@@ -0,0 +1,33 @@
+/*
+ * Definitions for user-mappable clock sources.
+ *
+ * Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+#ifndef _UAPI_LINUX_CLOCKSOURCE_H
+#define _UAPI_LINUX_CLOCKSOURCE_H
+
+enum clksrc_user_mmio_type {
+	CLKSRC_MMIO_L_UP,
+	CLKSRC_MMIO_L_DOWN,
+	CLKSRC_MMIO_W_UP,
+	CLKSRC_MMIO_W_DOWN,
+	CLKSRC_DMMIO_L_UP,
+	CLKSRC_DMMIO_W_UP,
+
+	CLKSRC_MMIO_TYPE_NR,
+};
+
+struct clksrc_user_mmio_info {
+	enum clksrc_user_mmio_type type;
+	void *reg_lower;
+	unsigned int mask_lower;
+	unsigned int bits_lower;
+	void *reg_upper;
+	unsigned int mask_upper;
+};
+
+#define CLKSRC_USER_MMIO_MAX 16
+
+#define CLKSRC_USER_MMIO_MAP _IOWR(0xC1, 0, struct clksrc_user_mmio_info)
+
+#endif /* _UAPI_LINUX_CLOCKSOURCE_H */
diff --git a/kernel/include/vdso/datapage.h b/kernel/include/vdso/datapage.h
index 73eb622..e2cd1e8 100644
--- a/kernel/include/vdso/datapage.h
+++ b/kernel/include/vdso/datapage.h
@@ -106,9 +106,34 @@
 	u32			hrtimer_res;
 	u32			__unused;
 
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+	u32			cs_type_seq;
+	char			cs_mmdev[16];
+#endif
+ 
 	struct arch_vdso_data	arch_data;
 };
 
+#if defined(CONFIG_GENERIC_CLOCKSOURCE_VDSO) && !defined(ENABLE_COMPAT_VDSO)
+
+#include <linux/clocksource.h>
+
+struct clksrc_info;
+
+typedef u64 vdso_read_cycles_t(const struct clksrc_info *info);
+
+struct clksrc_info {
+	vdso_read_cycles_t *read_cycles;
+	struct clksrc_user_mmio_info mmio;
+};
+
+struct vdso_priv {
+	u32 current_cs_type_seq;
+	struct clksrc_info clksrc_info[CLOCKSOURCE_VDSO_MMIO + CLKSRC_USER_MMIO_MAX];
+};
+
+#endif	/* !CONFIG_GENERIC_CLOCKSOURCE_VDSO */
+
 /*
  * We use the hidden visibility to prevent the compiler from generating a GOT
  * relocation. Not only is going through a GOT useless (the entry couldn't and
diff --git a/kernel/include/xenomai/cobalt/kernel/ancillaries.h b/kernel/include/xenomai/cobalt/kernel/ancillaries.h
new file mode 120000
index 0000000..b1779f7
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/ancillaries.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/arith.h b/kernel/include/xenomai/cobalt/kernel/arith.h
new file mode 120000
index 0000000..02a5eaa
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/arith.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/assert.h b/kernel/include/xenomai/cobalt/kernel/assert.h
new file mode 120000
index 0000000..4764f28
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/assert.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/bufd.h b/kernel/include/xenomai/cobalt/kernel/bufd.h
new file mode 120000
index 0000000..dea345e
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/bufd.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/clock.h b/kernel/include/xenomai/cobalt/kernel/clock.h
new file mode 120000
index 0000000..bb75117
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/clock.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/compat.h b/kernel/include/xenomai/cobalt/kernel/compat.h
new file mode 120000
index 0000000..806f38c
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/compat.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/heap.h b/kernel/include/xenomai/cobalt/kernel/heap.h
new file mode 120000
index 0000000..bf74265
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/heap.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/init.h b/kernel/include/xenomai/cobalt/kernel/init.h
new file mode 120000
index 0000000..769d0be
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/init.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/intr.h b/kernel/include/xenomai/cobalt/kernel/intr.h
new file mode 120000
index 0000000..82c1f25
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/intr.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/list.h b/kernel/include/xenomai/cobalt/kernel/list.h
new file mode 120000
index 0000000..811ee1d
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/list.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/lock.h b/kernel/include/xenomai/cobalt/kernel/lock.h
new file mode 120000
index 0000000..8513e93
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/lock.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/map.h b/kernel/include/xenomai/cobalt/kernel/map.h
new file mode 120000
index 0000000..114ea04
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/map.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/pipe.h b/kernel/include/xenomai/cobalt/kernel/pipe.h
new file mode 120000
index 0000000..dda8199
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/pipe.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/ppd.h b/kernel/include/xenomai/cobalt/kernel/ppd.h
new file mode 120000
index 0000000..7afa5ef
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/ppd.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/registry.h b/kernel/include/xenomai/cobalt/kernel/registry.h
new file mode 120000
index 0000000..e92a257
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/registry.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/sched-idle.h b/kernel/include/xenomai/cobalt/kernel/sched-idle.h
new file mode 120000
index 0000000..c882e34
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/sched-idle.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/sched-quota.h b/kernel/include/xenomai/cobalt/kernel/sched-quota.h
new file mode 120000
index 0000000..96dd8fa
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/sched-quota.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/sched-rt.h b/kernel/include/xenomai/cobalt/kernel/sched-rt.h
new file mode 120000
index 0000000..c70900d
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/sched-rt.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/sched-sporadic.h b/kernel/include/xenomai/cobalt/kernel/sched-sporadic.h
new file mode 120000
index 0000000..c4c1024
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/sched-sporadic.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/sched-tp.h b/kernel/include/xenomai/cobalt/kernel/sched-tp.h
new file mode 120000
index 0000000..3ad87af
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/sched-tp.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/sched-weak.h b/kernel/include/xenomai/cobalt/kernel/sched-weak.h
new file mode 120000
index 0000000..bba38c1
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/sched-weak.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/sched.h b/kernel/include/xenomai/cobalt/kernel/sched.h
new file mode 120000
index 0000000..1f6c51f
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/sched.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/schedparam.h b/kernel/include/xenomai/cobalt/kernel/schedparam.h
new file mode 120000
index 0000000..bf1c35a
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/schedparam.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/schedqueue.h b/kernel/include/xenomai/cobalt/kernel/schedqueue.h
new file mode 120000
index 0000000..8c2abcb
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/schedqueue.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/select.h b/kernel/include/xenomai/cobalt/kernel/select.h
new file mode 120000
index 0000000..37337d9
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/select.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/stat.h b/kernel/include/xenomai/cobalt/kernel/stat.h
new file mode 120000
index 0000000..21ad687
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/stat.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/synch.h b/kernel/include/xenomai/cobalt/kernel/synch.h
new file mode 120000
index 0000000..df9eaf7
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/synch.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/thread.h b/kernel/include/xenomai/cobalt/kernel/thread.h
new file mode 120000
index 0000000..35fb1f0
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/thread.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/time.h b/kernel/include/xenomai/cobalt/kernel/time.h
new file mode 120000
index 0000000..d85138d
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/time.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/timer.h b/kernel/include/xenomai/cobalt/kernel/timer.h
new file mode 120000
index 0000000..b32c1d2
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/timer.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/trace.h b/kernel/include/xenomai/cobalt/kernel/trace.h
new file mode 120000
index 0000000..e27bfc3
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/trace.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/tree.h b/kernel/include/xenomai/cobalt/kernel/tree.h
new file mode 120000
index 0000000..bad47ea
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/tree.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/vdso.h b/kernel/include/xenomai/cobalt/kernel/vdso.h
new file mode 120000
index 0000000..7cec828
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/vdso.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/kernel/vfile.h b/kernel/include/xenomai/cobalt/kernel/vfile.h
new file mode 120000
index 0000000..63e86ab
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/kernel/vfile.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/asm-generic/arith.h b/kernel/include/xenomai/cobalt/uapi/asm-generic/arith.h
new file mode 120000
index 0000000..c44382c
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/asm-generic/arith.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/asm-generic/features.h b/kernel/include/xenomai/cobalt/uapi/asm-generic/features.h
new file mode 120000
index 0000000..b2baff9
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/asm-generic/features.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/asm-generic/syscall.h b/kernel/include/xenomai/cobalt/uapi/asm-generic/syscall.h
new file mode 120000
index 0000000..54b1276
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/asm-generic/syscall.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/cond.h b/kernel/include/xenomai/cobalt/uapi/cond.h
new file mode 120000
index 0000000..52c870b
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/cond.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/corectl.h b/kernel/include/xenomai/cobalt/uapi/corectl.h
new file mode 120000
index 0000000..b6747f6
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/corectl.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/event.h b/kernel/include/xenomai/cobalt/uapi/event.h
new file mode 120000
index 0000000..dae845f
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/event.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/heap.h b/kernel/include/xenomai/cobalt/uapi/kernel/heap.h
new file mode 120000
index 0000000..4b7b7e7
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/kernel/heap.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/limits.h b/kernel/include/xenomai/cobalt/uapi/kernel/limits.h
new file mode 120000
index 0000000..b2d6b1a
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/kernel/limits.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/pipe.h b/kernel/include/xenomai/cobalt/uapi/kernel/pipe.h
new file mode 120000
index 0000000..29e61ce
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/kernel/pipe.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/synch.h b/kernel/include/xenomai/cobalt/uapi/kernel/synch.h
new file mode 120000
index 0000000..96af408
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/kernel/synch.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/thread.h b/kernel/include/xenomai/cobalt/uapi/kernel/thread.h
new file mode 120000
index 0000000..41c6343
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/kernel/thread.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/trace.h b/kernel/include/xenomai/cobalt/uapi/kernel/trace.h
new file mode 120000
index 0000000..f18e012
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/kernel/trace.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/types.h b/kernel/include/xenomai/cobalt/uapi/kernel/types.h
new file mode 120000
index 0000000..740cd9e
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/kernel/types.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/urw.h b/kernel/include/xenomai/cobalt/uapi/kernel/urw.h
new file mode 120000
index 0000000..88675ba
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/kernel/urw.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/kernel/vdso.h b/kernel/include/xenomai/cobalt/uapi/kernel/vdso.h
new file mode 120000
index 0000000..3679dc4
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/kernel/vdso.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/monitor.h b/kernel/include/xenomai/cobalt/uapi/monitor.h
new file mode 120000
index 0000000..1fef3c2
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/monitor.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/mutex.h b/kernel/include/xenomai/cobalt/uapi/mutex.h
new file mode 120000
index 0000000..aaea1b2
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/mutex.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/sched.h b/kernel/include/xenomai/cobalt/uapi/sched.h
new file mode 120000
index 0000000..4b96766
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/sched.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/sem.h b/kernel/include/xenomai/cobalt/uapi/sem.h
new file mode 120000
index 0000000..2284303
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/sem.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/signal.h b/kernel/include/xenomai/cobalt/uapi/signal.h
new file mode 120000
index 0000000..8330646
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/signal.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/syscall.h b/kernel/include/xenomai/cobalt/uapi/syscall.h
new file mode 120000
index 0000000..f3c6f55
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/syscall.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/thread.h b/kernel/include/xenomai/cobalt/uapi/thread.h
new file mode 120000
index 0000000..23043e0
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/thread.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/cobalt/uapi/time.h b/kernel/include/xenomai/cobalt/uapi/time.h
new file mode 120000
index 0000000..60b0c9c
--- /dev/null
+++ b/kernel/include/xenomai/cobalt/uapi/time.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/linux/stdarg.h b/kernel/include/xenomai/linux/stdarg.h
new file mode 120000
index 0000000..dc30f53
--- /dev/null
+++ b/kernel/include/xenomai/linux/stdarg.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/clock.h b/kernel/include/xenomai/pipeline/clock.h
new file mode 120000
index 0000000..0605361
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/clock.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/inband_work.h b/kernel/include/xenomai/pipeline/inband_work.h
new file mode 120000
index 0000000..8c96199
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/inband_work.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/irq.h b/kernel/include/xenomai/pipeline/irq.h
new file mode 120000
index 0000000..db68bdf
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/irq.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/kevents.h b/kernel/include/xenomai/pipeline/kevents.h
new file mode 120000
index 0000000..c441eb1
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/kevents.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/lock.h b/kernel/include/xenomai/pipeline/lock.h
new file mode 120000
index 0000000..f0a97be
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/lock.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/machine.h b/kernel/include/xenomai/pipeline/machine.h
new file mode 120000
index 0000000..2bf5b78
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/machine.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/pipeline.h b/kernel/include/xenomai/pipeline/pipeline.h
new file mode 120000
index 0000000..e050cd6
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/pipeline.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/sched.h b/kernel/include/xenomai/pipeline/sched.h
new file mode 120000
index 0000000..fcb384a
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/sched.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/sirq.h b/kernel/include/xenomai/pipeline/sirq.h
new file mode 120000
index 0000000..0335464
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/sirq.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/thread.h b/kernel/include/xenomai/pipeline/thread.h
new file mode 120000
index 0000000..bdc0546
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/thread.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/tick.h b/kernel/include/xenomai/pipeline/tick.h
new file mode 120000
index 0000000..5c20516
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/tick.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/trace.h b/kernel/include/xenomai/pipeline/trace.h
new file mode 120000
index 0000000..74b9e80
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/trace.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/vdso_fallback.h b/kernel/include/xenomai/pipeline/vdso_fallback.h
new file mode 120000
index 0000000..4ca7a0d
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/vdso_fallback.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/pipeline/wrappers.h b/kernel/include/xenomai/pipeline/wrappers.h
new file mode 120000
index 0000000..7f1efd0
--- /dev/null
+++ b/kernel/include/xenomai/pipeline/wrappers.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/buffer.h b/kernel/include/xenomai/rtdm/analogy/buffer.h
new file mode 120000
index 0000000..c75cce3
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/buffer.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/channel_range.h b/kernel/include/xenomai/rtdm/analogy/channel_range.h
new file mode 120000
index 0000000..bec9757
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/channel_range.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/command.h b/kernel/include/xenomai/rtdm/analogy/command.h
new file mode 120000
index 0000000..6f997c9
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/command.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/context.h b/kernel/include/xenomai/rtdm/analogy/context.h
new file mode 120000
index 0000000..fd74ca2
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/context.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/device.h b/kernel/include/xenomai/rtdm/analogy/device.h
new file mode 120000
index 0000000..bf80883
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/device.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/driver.h b/kernel/include/xenomai/rtdm/analogy/driver.h
new file mode 120000
index 0000000..aa75656
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/driver.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/instruction.h b/kernel/include/xenomai/rtdm/analogy/instruction.h
new file mode 120000
index 0000000..e82a550
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/instruction.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/rtdm_helpers.h b/kernel/include/xenomai/rtdm/analogy/rtdm_helpers.h
new file mode 120000
index 0000000..a0e1b96
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/rtdm_helpers.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/subdevice.h b/kernel/include/xenomai/rtdm/analogy/subdevice.h
new file mode 120000
index 0000000..a11623e
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/subdevice.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/analogy/transfer.h b/kernel/include/xenomai/rtdm/analogy/transfer.h
new file mode 120000
index 0000000..df69c91
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/analogy/transfer.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/autotune.h b/kernel/include/xenomai/rtdm/autotune.h
new file mode 120000
index 0000000..ba8efcb
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/autotune.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/can.h b/kernel/include/xenomai/rtdm/can.h
new file mode 120000
index 0000000..8195f8a
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/can.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/cobalt.h b/kernel/include/xenomai/rtdm/cobalt.h
new file mode 120000
index 0000000..b7bbe77
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/cobalt.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/compat.h b/kernel/include/xenomai/rtdm/compat.h
new file mode 120000
index 0000000..23cff61
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/compat.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/driver.h b/kernel/include/xenomai/rtdm/driver.h
new file mode 120000
index 0000000..bd8e46a
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/driver.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/fd.h b/kernel/include/xenomai/rtdm/fd.h
new file mode 120000
index 0000000..804d905
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/fd.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/gpio.h b/kernel/include/xenomai/rtdm/gpio.h
new file mode 120000
index 0000000..c808633
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/gpio.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/gpiopwm.h b/kernel/include/xenomai/rtdm/gpiopwm.h
new file mode 120000
index 0000000..967bde3
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/gpiopwm.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/ipc.h b/kernel/include/xenomai/rtdm/ipc.h
new file mode 120000
index 0000000..4c62921
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/ipc.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/net.h b/kernel/include/xenomai/rtdm/net.h
new file mode 120000
index 0000000..7eaab5c
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/net.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/rtdm.h b/kernel/include/xenomai/rtdm/rtdm.h
new file mode 120000
index 0000000..097a4e3
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/rtdm.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/serial.h b/kernel/include/xenomai/rtdm/serial.h
new file mode 120000
index 0000000..9552598
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/serial.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/testing.h b/kernel/include/xenomai/rtdm/testing.h
new file mode 120000
index 0000000..2b183c3
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/testing.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/analogy.h b/kernel/include/xenomai/rtdm/uapi/analogy.h
new file mode 120000
index 0000000..827e76a
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/analogy.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/autotune.h b/kernel/include/xenomai/rtdm/uapi/autotune.h
new file mode 120000
index 0000000..a5ac10a
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/autotune.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/can.h b/kernel/include/xenomai/rtdm/uapi/can.h
new file mode 120000
index 0000000..af8f0cc
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/can.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/gpio.h b/kernel/include/xenomai/rtdm/uapi/gpio.h
new file mode 120000
index 0000000..2526036
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/gpio.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/gpiopwm.h b/kernel/include/xenomai/rtdm/uapi/gpiopwm.h
new file mode 120000
index 0000000..cbf4b3c
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/gpiopwm.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/ipc.h b/kernel/include/xenomai/rtdm/uapi/ipc.h
new file mode 120000
index 0000000..dcc43c0
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/ipc.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/net.h b/kernel/include/xenomai/rtdm/uapi/net.h
new file mode 120000
index 0000000..155c861
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/net.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/rtdm.h b/kernel/include/xenomai/rtdm/uapi/rtdm.h
new file mode 120000
index 0000000..d6262f9
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/rtdm.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/serial.h b/kernel/include/xenomai/rtdm/uapi/serial.h
new file mode 120000
index 0000000..bd64996
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/serial.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/spi.h b/kernel/include/xenomai/rtdm/uapi/spi.h
new file mode 120000
index 0000000..dfa76d6
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/spi.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/testing.h b/kernel/include/xenomai/rtdm/uapi/testing.h
new file mode 120000
index 0000000..eb092d8
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/testing.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/uapi/udd.h b/kernel/include/xenomai/rtdm/uapi/udd.h
new file mode 120000
index 0000000..0cba891
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/uapi/udd.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/rtdm/udd.h b/kernel/include/xenomai/rtdm/udd.h
new file mode 120000
index 0000000..f822979
--- /dev/null
+++ b/kernel/include/xenomai/rtdm/udd.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h
\ No newline at end of file
diff --git a/kernel/include/xenomai/version.h b/kernel/include/xenomai/version.h
new file mode 120000
index 0000000..74a7bf3
--- /dev/null
+++ b/kernel/include/xenomai/version.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/include/xenomai/version.h
\ No newline at end of file
diff --git a/kernel/init/Kconfig b/kernel/init/Kconfig
index 2c92f94..39812ff 100644
--- a/kernel/init/Kconfig
+++ b/kernel/init/Kconfig
@@ -1531,7 +1531,19 @@
 config PRINTK_NMI
 	def_bool y
 	depends on PRINTK
-	depends on HAVE_NMI
+	depends on HAVE_NMI || IRQ_PIPELINE
+
+config RAW_PRINTK
+       bool "Enable support for raw printk"
+       default n
+       help
+         This option enables a printk variant called raw_printk() for
+         writing all output unmodified to a raw console channel
+         immediately, without any header or preparation whatsoever,
+         usable from any context.
+
+	 Unlike early_printk() console devices, raw_printk() devices
+         can live past the boot sequence.
 
 config BUG
 	bool "BUG() support" if EXPERT
@@ -2449,3 +2461,52 @@
 if !ROCKCHIP_MINI_KERNEL
 source "init/Kconfig.gki"
 endif
+menuconfig XENOMAI
+	depends on X86_TSC || !X86
+	bool "Xenomai/cobalt"
+	select IPIPE if HAVE_IPIPE_SUPPORT
+	select IPIPE_WANT_APIREV_2 if IPIPE
+	select DOVETAIL if HAVE_DOVETAIL
+	select DOVETAIL_LEGACY_SYSCALL_RANGE if HAVE_DOVETAIL
+	default y
+	help
+	  Xenomai's Cobalt core is a real-time extension to the Linux
+	  kernel, which exhibits very short interrupt and scheduling
+	  latency, without affecting the regular kernel services.
+
+	  This option enables the set of extended kernel services
+	  required to run the real-time applications in user-space,
+	  over the Xenomai libraries.
+
+	  Please visit http://xenomai.org for more information.
+
+if XENOMAI
+source "arch/arm64/xenomai/Kconfig"
+endif
+
+if MIGRATION
+comment "WARNING! Page migration (CONFIG_MIGRATION) may increase"
+comment "latency."
+endif
+
+if APM || CPU_FREQ || ACPI_PROCESSOR || INTEL_IDLE
+comment "WARNING! At least one of APM, CPU frequency scaling, ACPI 'processor'"
+comment "or CPU idle features is enabled. Any of these options may"
+comment "cause troubles with Xenomai. You should disable them."
+endif
+
+config XENO_VERSION_MAJOR
+       int
+       default 3
+
+config XENO_VERSION_MINOR
+       int
+       default 2
+
+config XENO_REVISION_LEVEL
+       int
+       default 4
+
+config XENO_VERSION_STRING
+       string
+       default "3.2.4"
diff --git a/kernel/init/Makefile b/kernel/init/Makefile
index 6bc37f6..ce21edc 100644
--- a/kernel/init/Makefile
+++ b/kernel/init/Makefile
@@ -34,4 +34,4 @@
 	@$($(quiet)chk_compile.h)
 	$(Q)$(CONFIG_SHELL) $(srctree)/scripts/mkcompile_h $@	\
 	"$(UTS_MACHINE)" "$(CONFIG_SMP)" "$(CONFIG_PREEMPT)"	\
-	"$(CONFIG_PREEMPT_RT)" $(CONFIG_CC_VERSION_TEXT) "$(LD)"
+	"$(CONFIG_PREEMPT_RT)" "$(CONFIG_IRQ_PIPELINE)" $(CONFIG_CC_VERSION_TEXT) "$(LD)"
diff --git a/kernel/init/main.c b/kernel/init/main.c
index 45ca352..6443f34 100644
--- a/kernel/init/main.c
+++ b/kernel/init/main.c
@@ -52,6 +52,7 @@
 #include <linux/tick.h>
 #include <linux/sched/isolation.h>
 #include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
 #include <linux/taskstats_kern.h>
 #include <linux/delayacct.h>
 #include <linux/unistd.h>
@@ -847,13 +848,14 @@
 	char *command_line;
 	char *after_dashes;
 
+	stall_inband_nocheck();
 	set_task_stack_end_magic(&init_task);
 	smp_setup_processor_id();
 	debug_objects_early_init();
 
 	cgroup_init_early();
 
-	local_irq_disable();
+	local_irq_disable_full();
 	early_boot_irqs_disabled = true;
 
 	/*
@@ -913,6 +915,7 @@
 	setup_log_buf(0);
 	vfs_caches_init_early();
 	sort_main_extable();
+	irq_pipeline_init_early();
 	trap_init();
 	mm_init();
 
@@ -958,6 +961,7 @@
 	/* init some links before init_ISA_irqs() */
 	early_irq_init();
 	init_IRQ();
+	irq_pipeline_init();
 	tick_init();
 	rcu_init_nohz();
 	init_timers();
@@ -983,7 +987,7 @@
 	WARN(!irqs_disabled(), "Interrupts were enabled early\n");
 
 	early_boot_irqs_disabled = false;
-	local_irq_enable();
+	local_irq_enable_full();
 
 	kmem_cache_init_late();
 
diff --git a/kernel/kernel/Kconfig.dovetail b/kernel/kernel/Kconfig.dovetail
new file mode 100644
index 0000000..c9ec30d
--- /dev/null
+++ b/kernel/kernel/Kconfig.dovetail
@@ -0,0 +1,23 @@
+
+# DOVETAIL dual-kernel interface
+config HAVE_DOVETAIL
+	bool
+
+# Selecting ARCH_WANT_IRQS_OFF_ACTIVATE_MM in this generic Kconfig
+# portion is ugly, but the whole ARCH_WANT_IRQS_OFF_ACTIVATE_MM logic
+# is a temporary kludge which is meant to disappear anyway. See
+# the related comments in exec_mmap() for details.
+config DOVETAIL
+	bool "Dovetail interface"
+	depends on HAVE_DOVETAIL
+	select IRQ_PIPELINE
+	select ARCH_WANT_IRQS_OFF_ACTIVATE_MM
+	default n
+	help
+	  Activate this option if you want to enable the interface for
+	  running a secondary kernel side-by-side with Linux (aka
+	  "dual kernel" configuration).
+
+config DOVETAIL_LEGACY_SYSCALL_RANGE
+       depends on DOVETAIL
+       def_bool y
diff --git a/kernel/kernel/Makefile b/kernel/kernel/Makefile
index 6ee614d..cafe18f 100644
--- a/kernel/kernel/Makefile
+++ b/kernel/kernel/Makefile
@@ -106,6 +106,7 @@
 obj-$(CONFIG_RING_BUFFER) += trace/
 obj-$(CONFIG_TRACEPOINTS) += trace/
 obj-$(CONFIG_IRQ_WORK) += irq_work.o
+obj-$(CONFIG_DOVETAIL) += dovetail.o
 obj-$(CONFIG_CPU_PM) += cpu_pm.o
 obj-$(CONFIG_BPF) += bpf/
 obj-$(CONFIG_KCSAN) += kcsan/
@@ -155,3 +156,5 @@
 	$(call cmd,genikh)
 
 clean-files := kheaders_data.tar.xz kheaders.md5
+
+obj-$(CONFIG_XENOMAI) += xenomai/
diff --git a/kernel/kernel/debug/debug_core.c b/kernel/kernel/debug/debug_core.c
index 0f31b22..6e034f0 100644
--- a/kernel/kernel/debug/debug_core.c
+++ b/kernel/kernel/debug/debug_core.c
@@ -111,8 +111,8 @@
  */
 atomic_t			kgdb_active = ATOMIC_INIT(-1);
 EXPORT_SYMBOL_GPL(kgdb_active);
-static DEFINE_RAW_SPINLOCK(dbg_master_lock);
-static DEFINE_RAW_SPINLOCK(dbg_slave_lock);
+static DEFINE_HARD_SPINLOCK(dbg_master_lock);
+static DEFINE_HARD_SPINLOCK(dbg_slave_lock);
 
 /*
  * We use NR_CPUs not PERCPU, in case kgdb is used to debug early
@@ -612,7 +612,7 @@
 	 * Interrupts will be restored by the 'trap return' code, except when
 	 * single stepping.
 	 */
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	cpu = ks->cpu;
 	kgdb_info[cpu].debuggerinfo = regs;
@@ -666,7 +666,7 @@
 			smp_mb__before_atomic();
 			atomic_dec(&slaves_in_kgdb);
 			dbg_touch_watchdogs();
-			local_irq_restore(flags);
+			hard_local_irq_restore(flags);
 			rcu_read_unlock();
 			return 0;
 		}
@@ -685,7 +685,7 @@
 		atomic_set(&kgdb_active, -1);
 		raw_spin_unlock(&dbg_master_lock);
 		dbg_touch_watchdogs();
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 		rcu_read_unlock();
 
 		goto acquirelock;
@@ -721,8 +721,11 @@
 		atomic_set(ks->send_ready, 1);
 
 	/* Signal the other CPUs to enter kgdb_wait() */
-	else if ((!kgdb_single_step) && kgdb_do_roundup)
+	else if ((!kgdb_single_step) && kgdb_do_roundup && running_inband()) {
+		hard_cond_local_irq_enable();
 		kgdb_roundup_cpus();
+		hard_cond_local_irq_disable();
+	}
 #endif
 
 	/*
@@ -834,7 +837,7 @@
 	atomic_set(&kgdb_active, -1);
 	raw_spin_unlock(&dbg_master_lock);
 	dbg_touch_watchdogs();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 	rcu_read_unlock();
 
 	return kgdb_info[cpu].ret_state;
@@ -957,7 +960,7 @@
 	if (!kgdb_connected || atomic_read(&kgdb_active) != -1 || dbg_kdb_mode)
 		return;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	gdbstub_msg_write(s, count);
 	local_irq_restore(flags);
 }
diff --git a/kernel/kernel/dovetail.c b/kernel/kernel/dovetail.c
new file mode 100644
index 0000000..4d1e4c3
--- /dev/null
+++ b/kernel/kernel/dovetail.c
@@ -0,0 +1,450 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/timekeeper_internal.h>
+#include <linux/sched/signal.h>
+#include <linux/irq_pipeline.h>
+#include <linux/dovetail.h>
+#include <asm/unistd.h>
+#include <asm/syscall.h>
+#include <uapi/asm-generic/dovetail.h>
+
+static bool dovetail_enabled;
+
+void __weak arch_inband_task_init(struct task_struct *p)
+{
+}
+
+void inband_task_init(struct task_struct *p)
+{
+	struct thread_info *ti = task_thread_info(p);
+
+	clear_ti_local_flags(ti, _TLF_DOVETAIL|_TLF_OOB|_TLF_OFFSTAGE);
+	arch_inband_task_init(p);
+}
+
+void dovetail_init_altsched(struct dovetail_altsched_context *p)
+{
+	struct task_struct *tsk = current;
+	struct mm_struct *mm = tsk->mm;
+
+	check_inband_stage();
+	p->task = tsk;
+	p->active_mm = mm;
+	p->borrowed_mm = false;
+
+	/*
+	 * Make sure the current process will not share any private
+	 * page with its child upon fork(), sparing it the random
+	 * latency induced by COW. MMF_DOVETAILED is never cleared once
+	 * set. We serialize with dup_mmap() which holds the mm write
+	 * lock.
+	 */
+	if (!(tsk->flags & PF_KTHREAD) &&
+		!test_bit(MMF_DOVETAILED, &mm->flags)) {
+		mmap_write_lock(mm);
+		__set_bit(MMF_DOVETAILED, &mm->flags);
+		mmap_write_unlock(mm);
+	}
+}
+EXPORT_SYMBOL_GPL(dovetail_init_altsched);
+
+void dovetail_start_altsched(void)
+{
+	check_inband_stage();
+	set_thread_local_flags(_TLF_DOVETAIL);
+}
+EXPORT_SYMBOL_GPL(dovetail_start_altsched);
+
+void dovetail_stop_altsched(void)
+{
+	clear_thread_local_flags(_TLF_DOVETAIL);
+	clear_thread_flag(TIF_MAYDAY);
+}
+EXPORT_SYMBOL_GPL(dovetail_stop_altsched);
+
+int __weak handle_oob_syscall(struct pt_regs *regs)
+{
+	return 0;
+}
+
+int __weak handle_pipelined_syscall(struct irq_stage *stage,
+				    struct pt_regs *regs)
+{
+	return 0;	/* i.e. propagate to in-band handler. */
+}
+
+void __weak handle_oob_mayday(struct pt_regs *regs)
+{
+}
+
+static inline
+void call_mayday(struct thread_info *ti, struct pt_regs *regs)
+{
+	clear_ti_thread_flag(ti, TIF_MAYDAY);
+	handle_oob_mayday(regs);
+}
+
+void dovetail_call_mayday(struct pt_regs *regs)
+{
+	struct thread_info *ti = current_thread_info();
+	unsigned long flags;
+
+	flags = hard_local_irq_save();
+	call_mayday(ti, regs);
+	hard_local_irq_restore(flags);
+}
+
+void inband_retuser_notify(void)
+{
+	clear_thread_flag(TIF_RETUSER);
+	inband_event_notify(INBAND_TASK_RETUSER, current);
+	/* CAUTION: we might have switched out-of-band here. */
+}
+
+int __pipeline_syscall(struct pt_regs *regs)
+{
+	struct thread_info *ti = current_thread_info();
+	struct irq_stage *caller_stage, *target_stage;
+	struct irq_stage_data *p, *this_context;
+	unsigned long flags;
+	int ret = 0;
+
+	/*
+	 * We should definitely not pipeline a syscall through the
+	 * slow path with IRQs off.
+	 */
+	WARN_ON_ONCE(dovetail_debug() && hard_irqs_disabled());
+
+	if (!dovetail_enabled)
+		return 0;
+
+	flags = hard_local_irq_save();
+	caller_stage = current_irq_stage;
+	this_context = current_irq_staged;
+	target_stage = &oob_stage;
+next:
+	p = this_staged(target_stage);
+	set_current_irq_staged(p);
+	hard_local_irq_restore(flags);
+	ret = handle_pipelined_syscall(caller_stage, regs);
+	flags = hard_local_irq_save();
+	/*
+	 * Be careful about stage switching _and_ CPU migration that
+	 * might have happened as a result of handing over the syscall
+	 * to the out-of-band handler.
+	 *
+	 * - if a stage migration is detected, fetch the new
+	 * per-stage, per-CPU context pointer.
+	 *
+	 * - if no stage migration happened, switch back to the
+	 * initial call stage, on a possibly different CPU though.
+	 */
+	if (current_irq_stage != target_stage) {
+		this_context = current_irq_staged;
+	} else {
+		p = this_staged(this_context->stage);
+		set_current_irq_staged(p);
+	}
+
+	if (this_context->stage == &inband_stage) {
+		if (target_stage != &inband_stage && ret == 0) {
+			target_stage = &inband_stage;
+			goto next;
+		}
+		p = this_inband_staged();
+		if (stage_irqs_pending(p))
+			sync_current_irq_stage();
+	} else {
+		if (test_ti_thread_flag(ti, TIF_MAYDAY))
+			call_mayday(ti, regs);
+	}
+
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+
+static inline bool maybe_oob_syscall(unsigned int nr, struct pt_regs *regs)
+{
+	/*
+	 * Check whether the companion core might be interested in the
+	 * syscall call. If the old syscall form is handled, pass the
+	 * request to the core if __OOB_SYSCALL_BIT is set in
+	 * @nr. Otherwise, only check whether an oob syscall is folded
+	 * into a prctl() request.
+	 */
+	if (IS_ENABLED(CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE)) {
+		if (nr & __OOB_SYSCALL_BIT)
+			return true;
+	}
+
+	return arch_dovetail_is_syscall(nr) && syscall_get_arg0(regs) & __OOB_SYSCALL_BIT;
+}
+
+int pipeline_syscall(unsigned int nr, struct pt_regs *regs)
+{
+	struct thread_info *ti = current_thread_info();
+	unsigned long local_flags = READ_ONCE(ti_local_flags(ti));
+	int ret;
+
+	WARN_ON_ONCE(dovetail_debug() && hard_irqs_disabled());
+
+	/*
+	 * If the syscall signature belongs to the out-of-band syscall
+	 * set and we are running out-of-band, pass the request
+	 * directly to the companion core by calling the oob syscall
+	 * handler.
+	 *
+	 * Otherwise, if this is an out-of-band syscall or alternate
+	 * scheduling is enabled for the caller, propagate the syscall
+	 * through the pipeline stages, so that:
+	 *
+	 * - the core can manipulate the current execution stage for
+	 * handling the request, which includes switching the current
+	 * thread back to the in-band context if the syscall is a
+	 * native one, or promoting it to the oob stage if handling an
+	 * oob syscall requires this.
+	 *
+	 * - the core can receive the initial oob syscall a thread
+	 * might have to emit for enabling dovetailing from the
+	 * in-band stage.
+	 *
+	 * Native syscalls from common (non-dovetailed) threads are
+	 * not subject to pipelining, but flow down to the in-band
+	 * system call handler directly.
+	 *
+	 * Sanity check: we bark on returning from a syscall on a
+	 * stalled in-band stage, which combined with running with
+	 * hard irqs on might cause interrupts to linger in the log
+	 * after exiting to user.
+	 */
+
+	if ((local_flags & _TLF_OOB) && maybe_oob_syscall(nr, regs)) {
+		ret = handle_oob_syscall(regs);
+		if (!IS_ENABLED(CONFIG_DOVETAIL_LEGACY_SYSCALL_RANGE))
+			WARN_ON_ONCE(dovetail_debug() && !ret);
+		local_flags = READ_ONCE(ti_local_flags(ti));
+		if (likely(ret)) {
+			if (local_flags & _TLF_OOB) {
+				if (test_ti_thread_flag(ti, TIF_MAYDAY))
+					dovetail_call_mayday(regs);
+				return 1; /* don't pass down, no tail work. */
+			} else {
+				WARN_ON_ONCE(dovetail_debug() && irqs_disabled());
+				return -1; /* don't pass down, do tail work. */
+			}
+		}
+	}
+
+	if ((local_flags & _TLF_DOVETAIL) || maybe_oob_syscall(nr, regs)) {
+		ret = __pipeline_syscall(regs);
+		local_flags = READ_ONCE(ti_local_flags(ti));
+		if (local_flags & _TLF_OOB)
+			return 1; /* don't pass down, no tail work. */
+		if (ret) {
+			WARN_ON_ONCE(dovetail_debug() && irqs_disabled());
+			return -1; /* don't pass down, do tail work. */
+		}
+	}
+
+	return 0; /* pass syscall down to the in-band dispatcher. */
+}
+
+void __weak handle_oob_trap_entry(unsigned int trapnr, struct pt_regs *regs)
+{
+}
+
+noinstr void __oob_trap_notify(unsigned int exception,
+			       struct pt_regs *regs)
+{
+	unsigned long flags;
+
+	/*
+	 * We send a notification about exceptions raised over a
+	 * registered oob stage only. The trap_entry handler expects
+	 * hard irqs off on entry. It may demote the current context
+	 * to the in-band stage, may return with hard irqs on.
+	 */
+	if (dovetail_enabled) {
+		set_thread_local_flags(_TLF_OOBTRAP);
+		flags = hard_local_irq_save();
+		instrumentation_begin();
+		handle_oob_trap_entry(exception, regs);
+		instrumentation_end();
+		hard_local_irq_restore(flags);
+	}
+}
+
+void __weak handle_oob_trap_exit(unsigned int trapnr, struct pt_regs *regs)
+{
+}
+
+noinstr void __oob_trap_unwind(unsigned int exception, struct pt_regs *regs)
+{
+	/*
+	 * The trap_exit handler runs only if trap_entry was called
+	 * for the same trap occurrence. It expects hard irqs off on
+	 * entry, may switch the current context back to the oob
+	 * stage. Must return with hard irqs off.
+	 */
+	hard_local_irq_disable();
+	clear_thread_local_flags(_TLF_OOBTRAP);
+	instrumentation_begin();
+	handle_oob_trap_exit(exception, regs);
+	instrumentation_end();
+}
+
+void __weak handle_inband_event(enum inband_event_type event, void *data)
+{
+}
+
+void inband_event_notify(enum inband_event_type event, void *data)
+{
+	check_inband_stage();
+
+	if (dovetail_enabled)
+		handle_inband_event(event, data);
+}
+
+void __weak resume_oob_task(struct task_struct *p)
+{
+}
+
+static void finalize_oob_transition(void) /* hard IRQs off */
+{
+	struct irq_pipeline_data *pd;
+	struct irq_stage_data *p;
+	struct thread_info *ti;
+	struct task_struct *t;
+
+	pd = raw_cpu_ptr(&irq_pipeline);
+	t = pd->task_inflight;
+	if (t == NULL)
+		return;
+
+	/*
+	 * @t which is in flight to the oob stage might have received
+	 * a signal while waiting in off-stage state to be actually
+	 * scheduled out. We can't act upon that signal safely from
+	 * here, we simply let the task complete the migration process
+	 * to the oob stage. The pending signal will be handled when
+	 * the task eventually exits the out-of-band context by the
+	 * converse migration.
+	 */
+	pd->task_inflight = NULL;
+	ti = task_thread_info(t);
+
+	/*
+	 * The transition handler in the companion core assumes the
+	 * oob stage is stalled, fix this up.
+	 */
+	stall_oob();
+	resume_oob_task(t);
+	unstall_oob();
+	p = this_oob_staged();
+	if (stage_irqs_pending(p))
+		/* Current stage (in-band) != p->stage (oob). */
+		sync_irq_stage(p->stage);
+}
+
+void oob_trampoline(void)
+{
+	unsigned long flags;
+
+	check_inband_stage();
+	flags = hard_local_irq_save();
+	finalize_oob_transition();
+	hard_local_irq_restore(flags);
+}
+
+bool inband_switch_tail(void)
+{
+	bool oob;
+
+	check_hard_irqs_disabled();
+
+	/*
+	 * We may run this code either over the inband or oob
+	 * contexts. If inband, we may have a thread blocked in
+	 * dovetail_leave_inband(), waiting for the companion core to
+	 * schedule it back in over the oob context, in which case
+	 * finalize_oob_transition() should take care of it. If oob,
+	 * the core just switched us back, and we may update the
+	 * context markers before returning to context_switch().
+	 *
+	 * Since the preemption count does not reflect the active
+	 * stage yet upon inband -> oob transition, we figure out
+	 * which one we are on by testing _TLF_OFFSTAGE. Having this
+	 * bit set when running the inband switch tail code means that
+	 * we are completing such transition for the current task,
+	 * switched in by dovetail_context_switch() over the oob
+	 * stage. If so, update the context markers appropriately.
+	 */
+	oob = test_thread_local_flags(_TLF_OFFSTAGE);
+	if (oob) {
+		/*
+		 * The companion core assumes a stalled stage on exit
+		 * from dovetail_leave_inband().
+		 */
+		stall_oob();
+		set_thread_local_flags(_TLF_OOB);
+		if (!IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT)) {
+			WARN_ON_ONCE(dovetail_debug() &&
+				(preempt_count() & STAGE_MASK));
+			preempt_count_add(STAGE_OFFSET);
+		}
+	} else {
+		finalize_oob_transition();
+		hard_local_irq_enable();
+	}
+
+	return oob;
+}
+
+void __weak inband_clock_was_set(void)
+{
+}
+
+void __weak install_inband_fd(unsigned int fd, struct file *file,
+			      struct files_struct *files)
+{
+}
+
+void __weak uninstall_inband_fd(unsigned int fd, struct file *file,
+				struct files_struct *files)
+{
+}
+
+void __weak replace_inband_fd(unsigned int fd, struct file *file,
+			      struct files_struct *files)
+{
+}
+
+int dovetail_start(void)
+{
+	check_inband_stage();
+
+	if (dovetail_enabled)
+		return -EBUSY;
+
+	if (!oob_stage_present())
+		return -EAGAIN;
+
+	dovetail_enabled = true;
+	smp_wmb();
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(dovetail_start);
+
+void dovetail_stop(void)
+{
+	check_inband_stage();
+
+	dovetail_enabled = false;
+	smp_wmb();
+}
+EXPORT_SYMBOL_GPL(dovetail_stop);
diff --git a/kernel/kernel/entry/common.c b/kernel/kernel/entry/common.c
index 09f5885..5f043bb 100644
--- a/kernel/kernel/entry/common.c
+++ b/kernel/kernel/entry/common.c
@@ -2,6 +2,7 @@
 
 #include <linux/context_tracking.h>
 #include <linux/entry-common.h>
+#include <linux/irq_pipeline.h>
 #include <linux/livepatch.h>
 #include <linux/audit.h>
 
@@ -71,10 +72,45 @@
 	return ret ? : syscall;
 }
 
+static __always_inline void
+syscall_enter_from_user_enable_irqs(void)
+{
+	if (running_inband()) {
+		/*
+		 * If pipelining interrupts, prepare for emulating a
+		 * stall -> unstall transition (we are currently
+		 * unstalled), fixing up the IRQ trace state in order
+		 * to keep lockdep happy (and silent).
+		 */
+		stall_inband_nocheck();
+		hard_cond_local_irq_enable();
+		local_irq_enable();
+	} else {
+		/*
+		 * We are running on the out-of-band stage, don't mess
+		 * with the in-band interrupt state. This is none of
+		 * our business. We may manipulate the hardware state
+		 * only.
+		 */
+		hard_local_irq_enable();
+	}
+}
+
 static __always_inline long
 __syscall_enter_from_user_work(struct pt_regs *regs, long syscall)
 {
 	unsigned long ti_work;
+	int ret;
+
+	/*
+	 * Pipeline the syscall to the companion core if the current
+	 * task wants this. Compiled out if not dovetailing.
+	 */
+	ret = pipeline_syscall(syscall, regs);
+	if (ret > 0)	/* out-of-band, bail out. */
+		return EXIT_SYSCALL_OOB;
+	if (ret < 0)		/* in-band, tail work only. */
+		return EXIT_SYSCALL_TAIL;
 
 	ti_work = READ_ONCE(current_thread_info()->flags);
 	if (ti_work & SYSCALL_ENTER_WORK)
@@ -95,7 +131,7 @@
 	enter_from_user_mode(regs);
 
 	instrumentation_begin();
-	local_irq_enable();
+	syscall_enter_from_user_enable_irqs();
 	ret = __syscall_enter_from_user_work(regs, syscall);
 	instrumentation_end();
 
@@ -106,7 +142,7 @@
 {
 	enter_from_user_mode(regs);
 	instrumentation_begin();
-	local_irq_enable();
+	syscall_enter_from_user_enable_irqs();
 	instrumentation_end();
 }
 
@@ -121,6 +157,7 @@
  * 3) Invoke architecture specific last minute exit code, e.g. speculation
  *    mitigations, etc.
  * 4) Tell lockdep that interrupts are enabled
+ * 5) Unstall the in-band stage of the interrupt pipeline if current
  */
 static __always_inline void exit_to_user_mode(void)
 {
@@ -132,6 +169,8 @@
 	user_enter_irqoff();
 	arch_exit_to_user_mode();
 	lockdep_hardirqs_on(CALLER_ADDR0);
+	if (running_inband())
+		unstall_inband();
 }
 
 /* Workaround to allow gradual conversion of architecture code */
@@ -155,6 +194,12 @@
 	while (ti_work & EXIT_TO_USER_MODE_WORK) {
 
 		local_irq_enable_exit_to_user(ti_work);
+
+		/*
+		 * Check that local_irq_enable_exit_to_user() does the
+		 * right thing when pipelining.
+		 */
+		WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled());
 
 		if (ti_work & _TIF_NEED_RESCHED)
 			schedule();
@@ -182,6 +227,7 @@
 		 * enabled above.
 		 */
 		local_irq_disable_exit_to_user();
+		WARN_ON_ONCE(irq_pipeline_debug() && !hard_irqs_disabled());
 		ti_work = READ_ONCE(current_thread_info()->flags);
 	}
 
@@ -189,16 +235,36 @@
 	return ti_work;
 }
 
+static inline bool do_retuser(unsigned long ti_work)
+{
+	if (dovetailing() && (ti_work & _TIF_RETUSER)) {
+		hard_local_irq_enable();
+		inband_retuser_notify();
+		hard_local_irq_disable();
+		/* RETUSER might have switched oob */
+		return running_inband();
+	}
+
+	return false;
+}
+
 static void exit_to_user_mode_prepare(struct pt_regs *regs)
 {
-	unsigned long ti_work = READ_ONCE(current_thread_info()->flags);
+	unsigned long ti_work;
+
+	check_hard_irqs_disabled();
 
 	lockdep_assert_irqs_disabled();
+again:
+	ti_work = READ_ONCE(current_thread_info()->flags);
 
 	if (unlikely(ti_work & EXIT_TO_USER_MODE_WORK))
 		ti_work = exit_to_user_mode_loop(regs, ti_work);
 
 	arch_exit_to_user_mode_prepare(regs, ti_work);
+
+	if (do_retuser(ti_work))
+		goto again;
 
 	/* Ensure that the address limit is intact and no locks are held */
 	addr_limit_user_check();
@@ -252,7 +318,7 @@
 
 	if (IS_ENABLED(CONFIG_PROVE_LOCKING)) {
 		if (WARN(irqs_disabled(), "syscall %lu left IRQs disabled", nr))
-			local_irq_enable();
+			local_irq_enable_full();
 	}
 
 	rseq_syscall(regs);
@@ -261,8 +327,15 @@
 	 * Do one-time syscall specific work. If these work items are
 	 * enabled, we want to run them exactly once per syscall exit with
 	 * interrupts enabled.
+	 *
+	 * Dovetail: if this does not look like an in-band syscall, it
+	 * has to belong to the companion core. Typically,
+	 * __OOB_SYSCALL_BIT would be set in this value. Skip the
+	 * work for those syscalls.
 	 */
-	if (unlikely(cached_flags & SYSCALL_EXIT_WORK))
+	if (unlikely((cached_flags & SYSCALL_EXIT_WORK) &&
+		(!irqs_pipelined() ||
+			syscall_get_nr(current, regs) < NR_syscalls)))
 		syscall_exit_work(regs, cached_flags);
 }
 
@@ -278,6 +351,8 @@
 
 noinstr void irqentry_enter_from_user_mode(struct pt_regs *regs)
 {
+	WARN_ON_ONCE(irq_pipeline_debug() && irqs_disabled());
+	stall_inband_nocheck();
 	enter_from_user_mode(regs);
 }
 
@@ -293,12 +368,36 @@
 {
 	irqentry_state_t ret = {
 		.exit_rcu = false,
+#ifdef CONFIG_IRQ_PIPELINE
+		.stage_info = IRQENTRY_INBAND_STALLED,
+#endif
 	};
 
+#ifdef CONFIG_IRQ_PIPELINE
+	if (running_oob()) {
+		WARN_ON_ONCE(irq_pipeline_debug() && oob_irqs_disabled());
+		ret.stage_info = IRQENTRY_OOB;
+		return ret;
+	}
+#endif
+
 	if (user_mode(regs)) {
+#ifdef CONFIG_IRQ_PIPELINE
+		ret.stage_info = IRQENTRY_INBAND_UNSTALLED;
+#endif
 		irqentry_enter_from_user_mode(regs);
 		return ret;
 	}
+
+#ifdef CONFIG_IRQ_PIPELINE
+	/*
+	 * IRQ pipeline: If we trapped from kernel space, the virtual
+	 * state may or may not match the hardware state. Since hard
+	 * irqs are off on entry, we have to stall the in-band stage.
+	 */
+	if (!test_and_stall_inband_nocheck())
+		ret.stage_info = IRQENTRY_INBAND_UNSTALLED;
+#endif
 
 	/*
 	 * If this entry hit the idle task invoke rcu_irq_enter() whether
@@ -366,14 +465,91 @@
 	}
 }
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+static inline
+bool irqexit_may_preempt_schedule(irqentry_state_t state,
+				struct pt_regs *regs)
+{
+	return state.stage_info == IRQENTRY_INBAND_UNSTALLED;
+}
+
+#else
+
+static inline
+bool irqexit_may_preempt_schedule(irqentry_state_t state,
+				struct pt_regs *regs)
+{
+	return !regs_irqs_disabled(regs);
+}
+
+#endif
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+static bool irqentry_syncstage(irqentry_state_t state) /* hard irqs off */
+{
+	/*
+	 * If pipelining interrupts, enable in-band IRQs then
+	 * synchronize the interrupt log on exit if:
+	 *
+	 * - irqentry_enter() stalled the stage in order to mirror the
+	 * hardware state.
+	 *
+	 * - we where coming from oob, thus went through a stage migration
+	 * that was caused by taking a CPU exception, e.g., a fault.
+	 *
+	 * We run before preempt_schedule_irq() may be called later on
+	 * by preemptible kernels, so that any rescheduling request
+	 * triggered by in-band IRQ handlers is considered.
+	 */
+	if (state.stage_info == IRQENTRY_INBAND_UNSTALLED ||
+		state.stage_info == IRQENTRY_OOB) {
+		unstall_inband_nocheck();
+		synchronize_pipeline_on_irq();
+		stall_inband_nocheck();
+		return true;
+	}
+
+	return false;
+}
+
+static void irqentry_unstall(void)
+{
+	unstall_inband_nocheck();
+}
+
+#else
+
+static bool irqentry_syncstage(irqentry_state_t state)
+{
+	return false;
+}
+
+static void irqentry_unstall(void)
+{
+}
+
+#endif
+
 noinstr void irqentry_exit(struct pt_regs *regs, irqentry_state_t state)
 {
+	bool synchronized = false;
+
+	if (running_oob())
+		return;
+
 	lockdep_assert_irqs_disabled();
 
 	/* Check whether this returns to user mode */
 	if (user_mode(regs)) {
 		irqentry_exit_to_user_mode(regs);
-	} else if (!regs_irqs_disabled(regs)) {
+		return;
+	}
+
+	synchronized = irqentry_syncstage(state);
+
+	if (irqexit_may_preempt_schedule(state, regs)) {
 		/*
 		 * If RCU was not watching on entry this needs to be done
 		 * carefully and needs the same ordering of lockdep/tracing
@@ -387,7 +563,7 @@
 			instrumentation_end();
 			rcu_irq_exit();
 			lockdep_hardirqs_on(CALLER_ADDR0);
-			return;
+			goto out;
 		}
 
 		instrumentation_begin();
@@ -404,6 +580,12 @@
 		if (state.exit_rcu)
 			rcu_irq_exit();
 	}
+
+out:
+	if (synchronized)
+		irqentry_unstall();
+
+	return;
 }
 
 irqentry_state_t noinstr irqentry_nmi_enter(struct pt_regs *regs)
diff --git a/kernel/kernel/exit.c b/kernel/kernel/exit.c
index 86e4031..9150d66 100644
--- a/kernel/kernel/exit.c
+++ b/kernel/kernel/exit.c
@@ -14,6 +14,7 @@
 #include <linux/sched/task_stack.h>
 #include <linux/sched/cputime.h>
 #include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
 #include <linux/module.h>
 #include <linux/capability.h>
 #include <linux/completion.h>
@@ -767,6 +768,7 @@
 
 	io_uring_files_cancel();
 	exit_signals(tsk);  /* sets PF_EXITING */
+	inband_exit_notify();
 
 	/* sync mm's RSS info before statistics gathering */
 	if (tsk->mm)
diff --git a/kernel/kernel/fork.c b/kernel/kernel/fork.c
index f73e3e6..978ecbf 100644
--- a/kernel/kernel/fork.c
+++ b/kernel/kernel/fork.c
@@ -48,6 +48,7 @@
 #include <linux/cpu.h>
 #include <linux/cgroup.h>
 #include <linux/security.h>
+#include <linux/dovetail.h>
 #include <linux/hugetlb.h>
 #include <linux/seccomp.h>
 #include <linux/swap.h>
@@ -938,6 +939,7 @@
 #endif
 
 	setup_thread_stack(tsk, orig);
+	inband_task_init(tsk);
 	clear_user_return_notifier(tsk);
 	clear_tsk_need_resched(tsk);
 	set_task_stack_end_magic(tsk);
@@ -1083,6 +1085,9 @@
 #endif
 	mm_init_uprobes_state(mm);
 	hugetlb_count_init(mm);
+#ifdef CONFIG_DOVETAIL
+	memset(&mm->oob_state, 0, sizeof(mm->oob_state));
+#endif
 
 	if (current->mm) {
 		mm->flags = current->mm->flags & MMF_INIT_MASK;
@@ -1131,6 +1136,7 @@
 	exit_aio(mm);
 	ksm_exit(mm);
 	khugepaged_exit(mm); /* must run before exit_mmap */
+	inband_cleanup_notify(mm); /* ditto. */
 	exit_mmap(mm);
 	mm_put_huge_zero_page(mm);
 	set_mm_exe_file(mm, NULL);
diff --git a/kernel/kernel/irq/Kconfig b/kernel/kernel/irq/Kconfig
index 1bd144e..d170936 100644
--- a/kernel/kernel/irq/Kconfig
+++ b/kernel/kernel/irq/Kconfig
@@ -142,6 +142,19 @@
 
 	  If you don't know what to do here, say N.
 
+# Interrupt pipeline
+config HAVE_IRQ_PIPELINE
+	bool
+
+config IRQ_PIPELINE
+	bool "Interrupt pipeline"
+	depends on HAVE_IRQ_PIPELINE
+	select IRQ_DOMAIN
+	default n
+	help
+	  Activate this option if you want the interrupt pipeline to be
+	  compiled in.
+
 endmenu
 
 config GENERIC_IRQ_MULTI_HANDLER
diff --git a/kernel/kernel/irq/Makefile b/kernel/kernel/irq/Makefile
index b4f5371..b6e43ec 100644
--- a/kernel/kernel/irq/Makefile
+++ b/kernel/kernel/irq/Makefile
@@ -9,6 +9,8 @@
 obj-$(CONFIG_GENERIC_IRQ_PROBE) += autoprobe.o
 obj-$(CONFIG_IRQ_DOMAIN) += irqdomain.o
 obj-$(CONFIG_IRQ_SIM) += irq_sim.o
+obj-$(CONFIG_IRQ_PIPELINE) += pipeline.o
+obj-$(CONFIG_IRQ_PIPELINE_TORTURE_TEST) += irqptorture.o
 obj-$(CONFIG_PROC_FS) += proc.o
 obj-$(CONFIG_GENERIC_PENDING_IRQ) += migration.o
 obj-$(CONFIG_GENERIC_IRQ_MIGRATION) += cpuhotplug.o
diff --git a/kernel/kernel/irq/chip.c b/kernel/kernel/irq/chip.c
index 520b9fa..13edfa8 100644
--- a/kernel/kernel/irq/chip.c
+++ b/kernel/kernel/irq/chip.c
@@ -15,6 +15,7 @@
 #include <linux/kernel_stat.h>
 #include <linux/irqdomain.h>
 #include <linux/wakeup_reason.h>
+#include <linux/irq_pipeline.h>
 
 #include <trace/events/irq.h>
 
@@ -49,6 +50,10 @@
 
 	if (!chip)
 		chip = &no_irq_chip;
+	else
+		WARN_ONCE(irqs_pipelined() &&
+			  (chip->flags & IRQCHIP_PIPELINE_SAFE) == 0,
+			  "irqchip %s is not pipeline-safe!", chip->name);
 
 	desc->irq_data.chip = chip;
 	irq_put_desc_unlock(desc, flags);
@@ -155,14 +160,6 @@
 	return 0;
 }
 EXPORT_SYMBOL(irq_set_chip_data);
-
-struct irq_data *irq_get_irq_data(unsigned int irq)
-{
-	struct irq_desc *desc = irq_to_desc(irq);
-
-	return desc ? &desc->irq_data : NULL;
-}
-EXPORT_SYMBOL_GPL(irq_get_irq_data);
 
 static void irq_state_clr_disabled(struct irq_desc *desc)
 {
@@ -386,7 +383,8 @@
  */
 void irq_disable(struct irq_desc *desc)
 {
-	__irq_disable(desc, irq_settings_disable_unlazy(desc));
+	__irq_disable(desc,
+	      irq_settings_disable_unlazy(desc) || irqs_pipelined());
 }
 
 void irq_percpu_enable(struct irq_desc *desc, unsigned int cpu)
@@ -532,8 +530,22 @@
 	 * If the interrupt is an armed wakeup source, mark it pending
 	 * and suspended, disable it and notify the pm core about the
 	 * event.
+	 *
+	 * When pipelining, the logic is as follows:
+	 *
+	 * - from a pipeline entry context, we might have preempted
+	 * the oob stage, or irqs might be [virtually] off, so we may
+	 * not run the in-band PM code. Just make sure any wakeup
+	 * interrupt is detected later on when the flow handler
+	 * re-runs from the in-band stage.
+	 *
+	 * - from the in-band context, run the PM wakeup check.
 	 */
-	if (irq_pm_check_wakeup(desc))
+	if (irqs_pipelined()) {
+		WARN_ON_ONCE(irq_pipeline_debug() && !in_pipeline());
+		if (irqd_is_wakeup_armed(&desc->irq_data))
+			return true;
+	} else if (irq_pm_check_wakeup(desc))
 		return false;
 
 	/*
@@ -557,8 +569,13 @@
 {
 	raw_spin_lock(&desc->lock);
 
-	if (!irq_may_run(desc))
+	if (start_irq_flow() && !irq_may_run(desc))
 		goto out_unlock;
+
+	if (on_pipeline_entry()) {
+		handle_oob_irq(desc);
+		goto out_unlock;
+	}
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 
@@ -594,8 +611,13 @@
 
 	raw_spin_lock(&desc->lock);
 
-	if (!irq_may_run(desc))
+	if (start_irq_flow() && !irq_may_run(desc))
 		goto out_unlock;
+
+	if (on_pipeline_entry()) {
+		handle_oob_irq(desc);
+		goto out_unlock;
+	}
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 
@@ -617,6 +639,20 @@
 	raw_spin_unlock(&desc->lock);
 }
 EXPORT_SYMBOL_GPL(handle_untracked_irq);
+
+static inline void cond_eoi_irq(struct irq_desc *desc)
+{
+	struct irq_chip *chip = desc->irq_data.chip;
+
+	if (!(chip->flags & IRQCHIP_EOI_THREADED))
+		chip->irq_eoi(&desc->irq_data);
+}
+
+static inline void mask_cond_eoi_irq(struct irq_desc *desc)
+{
+	mask_irq(desc);
+	cond_eoi_irq(desc);
+}
 
 /*
  * Called unconditionally from handle_level_irq() and only for oneshot
@@ -648,10 +684,19 @@
 void handle_level_irq(struct irq_desc *desc)
 {
 	raw_spin_lock(&desc->lock);
-	mask_ack_irq(desc);
 
-	if (!irq_may_run(desc))
+	if (start_irq_flow()) {
+		mask_ack_irq(desc);
+
+		if (!irq_may_run(desc))
+			goto out_unlock;
+	}
+
+	if (on_pipeline_entry()) {
+		if (handle_oob_irq(desc))
+			goto out_unmask;
 		goto out_unlock;
+	}
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 
@@ -666,7 +711,7 @@
 
 	kstat_incr_irqs_this_cpu(desc);
 	handle_irq_event(desc);
-
+out_unmask:
 	cond_unmask_irq(desc);
 
 out_unlock:
@@ -677,7 +722,10 @@
 static void cond_unmask_eoi_irq(struct irq_desc *desc, struct irq_chip *chip)
 {
 	if (!(desc->istate & IRQS_ONESHOT)) {
-		chip->irq_eoi(&desc->irq_data);
+		if (!irqs_pipelined())
+			chip->irq_eoi(&desc->irq_data);
+		else if (!irqd_irq_disabled(&desc->irq_data))
+			unmask_irq(desc);
 		return;
 	}
 	/*
@@ -688,9 +736,11 @@
 	 */
 	if (!irqd_irq_disabled(&desc->irq_data) &&
 	    irqd_irq_masked(&desc->irq_data) && !desc->threads_oneshot) {
-		chip->irq_eoi(&desc->irq_data);
+		if (!irqs_pipelined())
+			chip->irq_eoi(&desc->irq_data);
 		unmask_irq(desc);
-	} else if (!(chip->flags & IRQCHIP_EOI_THREADED)) {
+	} else if (!irqs_pipelined() &&
+		   !(chip->flags & IRQCHIP_EOI_THREADED)) {
 		chip->irq_eoi(&desc->irq_data);
 	}
 }
@@ -710,8 +760,16 @@
 
 	raw_spin_lock(&desc->lock);
 
-	if (!irq_may_run(desc))
+	if (start_irq_flow() && !irq_may_run(desc))
 		goto out;
+
+	if (on_pipeline_entry()) {
+		if (handle_oob_irq(desc))
+			chip->irq_eoi(&desc->irq_data);
+		else
+			mask_cond_eoi_irq(desc);
+		goto out_unlock;
+	}
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 
@@ -726,13 +784,13 @@
 	}
 
 	kstat_incr_irqs_this_cpu(desc);
-	if (desc->istate & IRQS_ONESHOT)
+	if (!irqs_pipelined() && (desc->istate & IRQS_ONESHOT))
 		mask_irq(desc);
 
 	handle_irq_event(desc);
 
 	cond_unmask_eoi_irq(desc, chip);
-
+out_unlock:
 	raw_spin_unlock(&desc->lock);
 	return;
 out:
@@ -792,30 +850,42 @@
  */
 void handle_edge_irq(struct irq_desc *desc)
 {
+	struct irq_chip *chip = irq_desc_get_chip(desc);
+
 	raw_spin_lock(&desc->lock);
 
-	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+	if (start_irq_flow()) {
+		desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 
-	if (!irq_may_run(desc)) {
-		desc->istate |= IRQS_PENDING;
-		mask_ack_irq(desc);
-		goto out_unlock;
+		if (!irq_may_run(desc)) {
+			desc->istate |= IRQS_PENDING;
+			mask_ack_irq(desc);
+			goto out_unlock;
+		}
+
+		/*
+		 * If its disabled or no action available then mask it
+		 * and get out of here.
+		 */
+		if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
+			desc->istate |= IRQS_PENDING;
+			mask_ack_irq(desc);
+			goto out_unlock;
+		}
 	}
 
-	/*
-	 * If its disabled or no action available then mask it and get
-	 * out of here.
-	 */
-	if (irqd_irq_disabled(&desc->irq_data) || !desc->action) {
-		desc->istate |= IRQS_PENDING;
-		mask_ack_irq(desc);
+	if (on_pipeline_entry()) {
+		chip->irq_ack(&desc->irq_data);
+		desc->istate |= IRQS_EDGE;
+		handle_oob_irq(desc);
 		goto out_unlock;
 	}
 
 	kstat_incr_irqs_this_cpu(desc);
 
 	/* Start handling the irq */
-	desc->irq_data.chip->irq_ack(&desc->irq_data);
+	if (!irqs_pipelined())
+		chip->irq_ack(&desc->irq_data);
 
 	do {
 		if (unlikely(!desc->action)) {
@@ -840,6 +910,8 @@
 		 !irqd_irq_disabled(&desc->irq_data));
 
 out_unlock:
+	if (on_pipeline_entry())
+		desc->istate &= ~IRQS_EDGE;
 	raw_spin_unlock(&desc->lock);
 }
 EXPORT_SYMBOL(handle_edge_irq);
@@ -858,11 +930,20 @@
 
 	raw_spin_lock(&desc->lock);
 
-	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
+	if (start_irq_flow()) {
+		desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 
-	if (!irq_may_run(desc)) {
-		desc->istate |= IRQS_PENDING;
-		goto out_eoi;
+		if (!irq_may_run(desc)) {
+			desc->istate |= IRQS_PENDING;
+			goto out_eoi;
+		}
+	}
+
+	if (on_pipeline_entry()) {
+		desc->istate |= IRQS_EDGE;
+		if (handle_oob_irq(desc))
+			goto out_eoi;
+		goto out;
 	}
 
 	/*
@@ -887,6 +968,9 @@
 
 out_eoi:
 	chip->irq_eoi(&desc->irq_data);
+out:
+	if (on_pipeline_entry())
+		desc->istate &= ~IRQS_EDGE;
 	raw_spin_unlock(&desc->lock);
 }
 #endif
@@ -900,6 +984,18 @@
 void handle_percpu_irq(struct irq_desc *desc)
 {
 	struct irq_chip *chip = irq_desc_get_chip(desc);
+	bool handled;
+
+	if (on_pipeline_entry()) {
+		if (chip->irq_ack)
+			chip->irq_ack(&desc->irq_data);
+		handled = handle_oob_irq(desc);
+		if (chip->irq_eoi)
+			chip->irq_eoi(&desc->irq_data);
+		if (!handled && chip->irq_mask)
+			chip->irq_mask(&desc->irq_data);
+		return;
+	}
 
 	/*
 	 * PER CPU interrupts are not serialized. Do not touch
@@ -907,13 +1003,17 @@
 	 */
 	__kstat_incr_irqs_this_cpu(desc);
 
-	if (chip->irq_ack)
-		chip->irq_ack(&desc->irq_data);
-
-	handle_irq_event_percpu(desc);
-
-	if (chip->irq_eoi)
-		chip->irq_eoi(&desc->irq_data);
+	if (irqs_pipelined()) {
+		handle_irq_event_percpu(desc);
+		if (chip->irq_unmask)
+			chip->irq_unmask(&desc->irq_data);
+	} else {
+		if (chip->irq_ack)
+			chip->irq_ack(&desc->irq_data);
+		handle_irq_event_percpu(desc);
+		if (chip->irq_eoi)
+			chip->irq_eoi(&desc->irq_data);
+	}
 }
 
 /**
@@ -933,6 +1033,18 @@
 	struct irqaction *action = desc->action;
 	unsigned int irq = irq_desc_get_irq(desc);
 	irqreturn_t res;
+	bool handled;
+
+	if (on_pipeline_entry()) {
+		if (chip->irq_ack)
+			chip->irq_ack(&desc->irq_data);
+		handled = handle_oob_irq(desc);
+		if (chip->irq_eoi)
+			chip->irq_eoi(&desc->irq_data);
+		if (!handled && chip->irq_mask)
+			chip->irq_mask(&desc->irq_data);
+		return;
+	}
 
 	/*
 	 * PER CPU interrupts are not serialized. Do not touch
@@ -940,7 +1052,7 @@
 	 */
 	__kstat_incr_irqs_this_cpu(desc);
 
-	if (chip->irq_ack)
+	if (!irqs_pipelined() && chip->irq_ack)
 		chip->irq_ack(&desc->irq_data);
 
 	if (likely(action)) {
@@ -958,8 +1070,11 @@
 			    enabled ? " and unmasked" : "", irq, cpu);
 	}
 
-	if (chip->irq_eoi)
-		chip->irq_eoi(&desc->irq_data);
+	if (irqs_pipelined()) {
+		if (chip->irq_unmask)
+			chip->irq_unmask(&desc->irq_data);
+	} else if (chip->irq_eoi)
+			chip->irq_eoi(&desc->irq_data);
 }
 
 /**
@@ -979,10 +1094,21 @@
 	unsigned int irq = irq_desc_get_irq(desc);
 	irqreturn_t res;
 
-	__kstat_incr_irqs_this_cpu(desc);
-
 	if (chip->irq_eoi)
 		chip->irq_eoi(&desc->irq_data);
+
+	if (on_pipeline_entry()) {
+		handle_oob_irq(desc);
+		return;
+	}
+
+	/* Trap spurious IPIs if pipelined. */
+	if (irqs_pipelined() && !action) {
+		print_irq_desc(irq, desc);
+		return;
+	}
+
+	__kstat_incr_irqs_this_cpu(desc);
 
 	trace_irq_handler_entry(irq, action);
 	res = action->handler(irq, raw_cpu_ptr(action->percpu_dev_id));
@@ -1076,6 +1202,7 @@
 			desc->handle_irq = handle;
 		}
 
+		irq_settings_set_chained(desc);
 		irq_settings_set_noprobe(desc);
 		irq_settings_set_norequest(desc);
 		irq_settings_set_nothread(desc);
@@ -1251,8 +1378,17 @@
 
 	raw_spin_lock(&desc->lock);
 
-	if (!irq_may_run(desc))
+	if (start_irq_flow() && !irq_may_run(desc))
 		goto out;
+
+	if (on_pipeline_entry()) {
+		chip->irq_ack(&desc->irq_data);
+		if (handle_oob_irq(desc))
+			chip->irq_eoi(&desc->irq_data);
+		else
+			mask_cond_eoi_irq(desc);
+		goto out_unlock;
+	}
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 
@@ -1267,11 +1403,13 @@
 	}
 
 	kstat_incr_irqs_this_cpu(desc);
-	if (desc->istate & IRQS_ONESHOT)
-		mask_irq(desc);
+	if (!irqs_pipelined()) {
+		if (desc->istate & IRQS_ONESHOT)
+			mask_irq(desc);
 
-	/* Start handling the irq */
-	desc->irq_data.chip->irq_ack(&desc->irq_data);
+		/* Start handling the irq */
+		chip->irq_ack(&desc->irq_data);
+	}
 
 	handle_irq_event(desc);
 
@@ -1282,6 +1420,7 @@
 out:
 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
 		chip->irq_eoi(&desc->irq_data);
+out_unlock:
 	raw_spin_unlock(&desc->lock);
 }
 EXPORT_SYMBOL_GPL(handle_fasteoi_ack_irq);
@@ -1301,10 +1440,21 @@
 	struct irq_chip *chip = desc->irq_data.chip;
 
 	raw_spin_lock(&desc->lock);
-	mask_ack_irq(desc);
 
-	if (!irq_may_run(desc))
-		goto out;
+	if (start_irq_flow()) {
+		mask_ack_irq(desc);
+
+		if (!irq_may_run(desc))
+			goto out;
+	}
+
+	if (on_pipeline_entry()) {
+		if (handle_oob_irq(desc))
+			chip->irq_eoi(&desc->irq_data);
+		else
+			cond_eoi_irq(desc);
+		goto out_unlock;
+	}
 
 	desc->istate &= ~(IRQS_REPLAY | IRQS_WAITING);
 
@@ -1319,7 +1469,7 @@
 	}
 
 	kstat_incr_irqs_this_cpu(desc);
-	if (desc->istate & IRQS_ONESHOT)
+	if (!irqs_pipelined() && (desc->istate & IRQS_ONESHOT))
 		mask_irq(desc);
 
 	handle_irq_event(desc);
@@ -1331,6 +1481,7 @@
 out:
 	if (!(chip->flags & IRQCHIP_EOI_IF_HANDLED))
 		chip->irq_eoi(&desc->irq_data);
+out_unlock:
 	raw_spin_unlock(&desc->lock);
 }
 EXPORT_SYMBOL_GPL(handle_fasteoi_mask_irq);
diff --git a/kernel/kernel/irq/cpuhotplug.c b/kernel/kernel/irq/cpuhotplug.c
index cf8d4f7..867db10 100644
--- a/kernel/kernel/irq/cpuhotplug.c
+++ b/kernel/kernel/irq/cpuhotplug.c
@@ -156,6 +156,9 @@
 {
 	struct irq_desc *desc;
 	unsigned int irq;
+	unsigned long flags;
+
+	flags = hard_local_irq_save();
 
 	for_each_active_irq(irq) {
 		bool affinity_broken;
@@ -170,6 +173,8 @@
 					    irq, smp_processor_id());
 		}
 	}
+
+	hard_local_irq_restore(flags);
 }
 
 static bool hk_should_isolate(struct irq_data *data, unsigned int cpu)
diff --git a/kernel/kernel/irq/debug.h b/kernel/kernel/irq/debug.h
index 8ccb326..40f7268 100644
--- a/kernel/kernel/irq/debug.h
+++ b/kernel/kernel/irq/debug.h
@@ -33,6 +33,8 @@
 	___P(IRQ_NOREQUEST);
 	___P(IRQ_NOTHREAD);
 	___P(IRQ_NOAUTOEN);
+	___P(IRQ_OOB);
+	___P(IRQ_CHAINED);
 
 	___PS(IRQS_AUTODETECT);
 	___PS(IRQS_REPLAY);
diff --git a/kernel/kernel/irq/dummychip.c b/kernel/kernel/irq/dummychip.c
index 0b0cdf2..7bf8cbe 100644
--- a/kernel/kernel/irq/dummychip.c
+++ b/kernel/kernel/irq/dummychip.c
@@ -43,7 +43,7 @@
 	.irq_enable	= noop,
 	.irq_disable	= noop,
 	.irq_ack	= ack_bad,
-	.flags		= IRQCHIP_SKIP_SET_WAKE,
+	.flags		= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE,
 };
 
 /*
@@ -59,6 +59,6 @@
 	.irq_ack	= noop,
 	.irq_mask	= noop,
 	.irq_unmask	= noop,
-	.flags		= IRQCHIP_SKIP_SET_WAKE,
+	.flags		= IRQCHIP_SKIP_SET_WAKE | IRQCHIP_PIPELINE_SAFE,
 };
 EXPORT_SYMBOL_GPL(dummy_irq_chip);
diff --git a/kernel/kernel/irq/generic-chip.c b/kernel/kernel/irq/generic-chip.c
index 79cb6d0..6ca043e 100644
--- a/kernel/kernel/irq/generic-chip.c
+++ b/kernel/kernel/irq/generic-chip.c
@@ -16,7 +16,7 @@
 #include "internals.h"
 
 static LIST_HEAD(gc_list);
-static DEFINE_RAW_SPINLOCK(gc_lock);
+static DEFINE_HARD_SPINLOCK(gc_lock);
 
 /**
  * irq_gc_noop - NOOP function
diff --git a/kernel/kernel/irq/handle.c b/kernel/kernel/irq/handle.c
index 8806444..e4fe9c6 100644
--- a/kernel/kernel/irq/handle.c
+++ b/kernel/kernel/irq/handle.c
@@ -32,9 +32,16 @@
 {
 	unsigned int irq = irq_desc_get_irq(desc);
 
+	/* Let the in-band stage report the issue. */
+	if (on_pipeline_entry()) {
+		ack_bad_irq(irq);
+		return;
+	}
+
 	print_irq_desc(irq, desc);
 	kstat_incr_irqs_this_cpu(desc);
-	ack_bad_irq(irq);
+	if (!irqs_pipelined())
+		ack_bad_irq(irq);
 }
 EXPORT_SYMBOL_GPL(handle_bad_irq);
 
diff --git a/kernel/kernel/irq/internals.h b/kernel/kernel/irq/internals.h
index e58342a..341c8f6 100644
--- a/kernel/kernel/irq/internals.h
+++ b/kernel/kernel/irq/internals.h
@@ -52,6 +52,7 @@
  * IRQS_PENDING			- irq is pending and replayed later
  * IRQS_SUSPENDED		- irq is suspended
  * IRQS_NMI			- irq line is used to deliver NMIs
+ * IRQS_EDGE			- irq line received an edge event
  */
 enum {
 	IRQS_AUTODETECT		= 0x00000001,
@@ -64,6 +65,7 @@
 	IRQS_SUSPENDED		= 0x00000800,
 	IRQS_TIMINGS		= 0x00001000,
 	IRQS_NMI		= 0x00002000,
+	IRQS_EDGE		= 0x00004000,
 };
 
 #include "debug.h"
diff --git a/kernel/kernel/irq/irqdesc.c b/kernel/kernel/irq/irqdesc.c
index 2f35de3..846c2c8 100644
--- a/kernel/kernel/irq/irqdesc.c
+++ b/kernel/kernel/irq/irqdesc.c
@@ -16,6 +16,7 @@
 #include <linux/bitmap.h>
 #include <linux/irqdomain.h>
 #include <linux/sysfs.h>
+#include <linux/irq_pipeline.h>
 
 #include "internals.h"
 
@@ -453,6 +454,7 @@
 	 * irq_sysfs_init() as well.
 	 */
 	irq_sysfs_del(desc);
+	uncache_irq_desc(irq);
 	delete_irq_desc(irq);
 
 	/*
@@ -633,9 +635,12 @@
 #endif /* !CONFIG_SPARSE_IRQ */
 
 /**
- * generic_handle_irq - Invoke the handler for a particular irq
+ * generic_handle_irq - Handle a particular irq
  * @irq:	The irq number to handle
  *
+ * The handler is invoked, unless we are entering the interrupt
+ * pipeline, in which case the incoming IRQ is only scheduled for
+ * deferred delivery.
  */
 int generic_handle_irq(unsigned int irq)
 {
@@ -646,7 +651,7 @@
 		return -EINVAL;
 
 	data = irq_desc_get_irq_data(desc);
-	if (WARN_ON_ONCE(!in_irq() && handle_enforce_irqctx(data)))
+	if (WARN_ON_ONCE(!in_hard_irq() && handle_enforce_irqctx(data)))
 		return -EPERM;
 
 	generic_handle_irq_desc(desc);
diff --git a/kernel/kernel/irq/irqptorture.c b/kernel/kernel/irq/irqptorture.c
new file mode 100644
index 0000000..2518c47
--- /dev/null
+++ b/kernel/kernel/irq/irqptorture.c
@@ -0,0 +1,254 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2017 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/kernel.h>
+#include <linux/torture.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+#include <linux/tick.h>
+#include <linux/smp.h>
+#include <linux/cpumask.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irq_pipeline.h>
+#include <linux/stop_machine.h>
+#include <linux/irq_work.h>
+#include <linux/completion.h>
+#include <linux/slab.h>
+#include "settings.h"
+
+static void torture_event_handler(struct clock_event_device *dev)
+{
+	/*
+	 * We are running on the oob stage, in NMI-like mode. Schedule
+	 * a tick on the proxy device to satisfy the corresponding
+	 * timing request asap.
+	 */
+	tick_notify_proxy();
+}
+
+static void setup_proxy(struct clock_proxy_device *dev)
+{
+	dev->handle_oob_event = torture_event_handler;
+}
+
+static int start_tick_takeover_test(void)
+{
+	return tick_install_proxy(setup_proxy, cpu_online_mask);
+}
+
+static void stop_tick_takeover_test(void)
+{
+	tick_uninstall_proxy(cpu_online_mask);
+}
+
+struct stop_machine_p_data {
+	int origin_cpu;
+	cpumask_var_t disable_mask;
+};
+
+static int stop_machine_handler(void *arg)
+{
+	struct stop_machine_p_data *p = arg;
+	int cpu = raw_smp_processor_id();
+
+	/*
+	 * The stop_machine() handler must run with hard
+	 * IRQs off, note the current state in the result mask.
+	 */
+	if (hard_irqs_disabled())
+		cpumask_set_cpu(cpu, p->disable_mask);
+
+	if (cpu != p->origin_cpu)
+		pr_alert("irq_pipeline" TORTURE_FLAG
+			 " CPU%d responds to stop_machine()\n", cpu);
+	return 0;
+}
+
+/*
+ * We test stop_machine() as a way to validate IPI handling in a
+ * pipelined interrupt context.
+ */
+static int test_stop_machine(void)
+{
+	struct stop_machine_p_data d;
+	cpumask_var_t tmp_mask;
+	int ret = -ENOMEM, cpu;
+
+	if (!zalloc_cpumask_var(&d.disable_mask, GFP_KERNEL)) {
+		WARN_ON(1);
+		return ret;
+	}
+
+	if (!alloc_cpumask_var(&tmp_mask, GFP_KERNEL)) {
+		WARN_ON(1);
+		goto fail;
+	}
+
+	ret = -EINVAL;
+	d.origin_cpu = raw_smp_processor_id();
+	pr_alert("irq_pipeline" TORTURE_FLAG
+		 " CPU%d initiates stop_machine()\n",
+		 d.origin_cpu);
+
+	ret = stop_machine(stop_machine_handler, &d, cpu_online_mask);
+	WARN_ON(ret);
+	if (ret)
+		goto fail;
+
+	/*
+	 * Check whether all handlers did run with hard IRQs off. If
+	 * some of them did not, then we have a problem with the stop
+	 * IRQ delivery.
+	 */
+	cpumask_xor(tmp_mask, cpu_online_mask, d.disable_mask);
+	if (!cpumask_empty(tmp_mask)) {
+		for_each_cpu(cpu, tmp_mask)
+			pr_alert("irq_pipeline" TORTURE_FLAG
+				 " CPU%d: hard IRQs ON in stop_machine()"
+				 " handler!\n", cpu);
+	}
+
+	free_cpumask_var(tmp_mask);
+fail:
+	free_cpumask_var(d.disable_mask);
+
+	return ret;
+}
+
+static struct irq_work_tester {
+	struct irq_work work;
+	struct completion done;
+} irq_work_tester;
+
+static void irq_work_handler(struct irq_work *work)
+{
+	int cpu = raw_smp_processor_id();
+
+	if (!running_inband()) {
+		pr_alert("irq_pipeline" TORTURE_FLAG
+			 " CPU%d: irq_work handler not running on"
+			 " in-band stage?!\n", cpu);
+		return;
+	}
+
+	if (work != &irq_work_tester.work)
+		pr_alert("irq_pipeline" TORTURE_FLAG
+			 " CPU%d: irq_work handler received broken"
+			 " arg?!\n", cpu);
+	else {
+		pr_alert("irq_pipeline" TORTURE_FLAG
+			 " CPU%d: irq_work handled\n", cpu);
+		complete(&irq_work_tester.done);
+	}
+}
+
+static int trigger_oob_work(void *arg)
+{
+	int cpu = raw_smp_processor_id();
+
+	if (!running_oob()) {
+		pr_alert("irq_pipeline" TORTURE_FLAG
+			 " CPU%d: escalated request not running on"
+			 " oob stage?!\n", cpu);
+		return -EINVAL;
+	}
+
+	if ((struct irq_work_tester *)arg != &irq_work_tester) {
+		pr_alert("irq_pipeline" TORTURE_FLAG
+			 " CPU%d: escalation handler received broken"
+			 " arg?!\n", cpu);
+		return -EINVAL;
+	}
+
+	irq_work_queue(&irq_work_tester.work);
+	pr_alert("irq_pipeline" TORTURE_FLAG
+		 " CPU%d: stage escalation request works\n",
+		 cpu);
+
+	return 0;
+}
+
+static int test_interstage_work_injection(void)
+{
+	struct irq_work_tester *p = &irq_work_tester;
+	int ret, cpu = raw_smp_processor_id();
+	unsigned long rem;
+
+	init_completion(&p->done);
+	init_irq_work(&p->work, irq_work_handler);
+
+	/* Trigger over the in-band stage. */
+	irq_work_queue(&p->work);
+	rem = wait_for_completion_timeout(&p->done, HZ / 10);
+	if (!rem) {
+		pr_alert("irq_pipeline" TORTURE_FLAG
+			 " CPU%d: irq_work trigger from in-band stage not handled!\n",
+			 cpu);
+		return -EINVAL;
+	}
+
+	pr_alert("irq_pipeline" TORTURE_FLAG
+		 " CPU%d: in-band->in-band irq_work trigger works\n", cpu);
+
+	reinit_completion(&p->done);
+
+	/* Now try over the oob stage. */
+	ret = run_oob_call(trigger_oob_work, p);
+	if (ret)
+		return ret;
+
+	ret = wait_for_completion_timeout(&p->done, HZ / 10);
+	if (!rem) {
+		pr_alert("irq_pipeline" TORTURE_FLAG
+			 " CPU%d: irq_work trigger from oob"
+			 " stage not handled!\n", cpu);
+		return -EINVAL;
+	}
+
+	pr_alert("irq_pipeline" TORTURE_FLAG
+		 " CPU%d: oob->in-band irq_work trigger works\n",
+		 cpu);
+
+	return 0;
+}
+
+static int __init irqp_torture_init(void)
+{
+	int ret;
+
+	pr_info("Starting IRQ pipeline tests...");
+
+	ret = enable_oob_stage("torture");
+	if (ret) {
+		if (ret == -EBUSY)
+			pr_alert("irq_pipeline" TORTURE_FLAG
+			 " won't run, oob stage '%s' is already installed",
+			 oob_stage.name);
+
+		return ret;
+	}
+
+	ret = test_stop_machine();
+	if (ret)
+		goto out;
+
+	ret = start_tick_takeover_test();
+	if (ret)
+		goto out;
+
+	ret = test_interstage_work_injection();
+	if (!ret)
+		msleep(1000);
+
+	stop_tick_takeover_test();
+out:
+	disable_oob_stage();
+	pr_info("IRQ pipeline tests %s.", ret ? "FAILED" : "OK");
+
+	return 0;
+}
+late_initcall(irqp_torture_init);
diff --git a/kernel/kernel/irq/manage.c b/kernel/kernel/irq/manage.c
index 76da8de..4b06f5a 100644
--- a/kernel/kernel/irq/manage.c
+++ b/kernel/kernel/irq/manage.c
@@ -10,6 +10,7 @@
 
 #include <linux/irq.h>
 #include <linux/kthread.h>
+#include <linux/kconfig.h>
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/interrupt.h>
@@ -829,6 +830,50 @@
 }
 EXPORT_SYMBOL(irq_set_irq_wake);
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+/**
+ *	irq_switch_oob - Control out-of-band setting for a registered IRQ descriptor
+ *	@irq:	interrupt to control
+ *	@on:	enable/disable pipelining
+ *
+ *	Enable/disable out-of-band handling for an IRQ. At least one
+ *	action must have been previously registered for such
+ *	interrupt.
+ *
+ *      The previously registered action(s) need(s) not bearing the
+ *      IRQF_OOB flag for the IRQ to be switched to out-of-band
+ *      handling. This call enables switching pre-installed IRQs from
+ *      in-band to out-of-band handling.
+ *
+ *      NOTE: This routine affects all action handlers sharing the
+ *      IRQ.
+ */
+int irq_switch_oob(unsigned int irq, bool on)
+{
+	struct irq_desc *desc;
+	unsigned long flags;
+	int ret = 0;
+
+	desc = irq_get_desc_lock(irq, &flags, 0);
+	if (!desc)
+		return -EINVAL;
+
+	if (!desc->action)
+		ret = -EINVAL;
+	else if (on)
+		irq_settings_set_oob(desc);
+	else
+		irq_settings_clr_oob(desc);
+
+	irq_put_desc_unlock(desc, flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(irq_switch_oob);
+
+#endif /* CONFIG_IRQ_PIPELINE */
+
 /*
  * Internal function that tells the architecture code whether a
  * particular irq has been exclusively allocated or is available
@@ -845,7 +890,8 @@
 
 	if (irq_settings_can_request(desc)) {
 		if (!desc->action ||
-		    irqflags & desc->action->flags & IRQF_SHARED)
+		    ((irqflags & desc->action->flags & IRQF_SHARED) &&
+		     !((irqflags ^ desc->action->flags) & IRQF_OOB)))
 			canrequest = 1;
 	}
 	irq_put_desc_unlock(desc, flags);
@@ -1419,6 +1465,21 @@
 
 	new->irq = irq;
 
+	ret = -EINVAL;
+	/*
+	 *  Out-of-band interrupts can be shared but not threaded.  We
+	 *  silently ignore the OOB setting if interrupt pipelining is
+	 *  disabled.
+	 */
+	if (!irqs_pipelined())
+		new->flags &= ~IRQF_OOB;
+	else if (new->flags & IRQF_OOB) {
+		if (new->thread_fn)
+			goto out_mput;
+		new->flags |= IRQF_NO_THREAD;
+		new->flags &= ~IRQF_ONESHOT;
+	}
+
 	/*
 	 * If the trigger type is not specified by the caller,
 	 * then use the default for this interrupt.
@@ -1432,10 +1493,8 @@
 	 */
 	nested = irq_settings_is_nested_thread(desc);
 	if (nested) {
-		if (!new->thread_fn) {
-			ret = -EINVAL;
+		if (!new->thread_fn)
 			goto out_mput;
-		}
 		/*
 		 * Replace the primary handler which was provided from
 		 * the driver for non nested interrupt handling by the
@@ -1519,7 +1578,7 @@
 		 * the same type (level, edge, polarity). So both flag
 		 * fields must have IRQF_SHARED set and the bits which
 		 * set the trigger type must match. Also all must
-		 * agree on ONESHOT.
+		 * agree on ONESHOT and OOB.
 		 * Interrupt lines used for NMIs cannot be shared.
 		 */
 		unsigned int oldtype;
@@ -1544,7 +1603,7 @@
 
 		if (!((old->flags & new->flags) & IRQF_SHARED) ||
 		    (oldtype != (new->flags & IRQF_TRIGGER_MASK)) ||
-		    ((old->flags ^ new->flags) & IRQF_ONESHOT))
+		    ((old->flags ^ new->flags) & (IRQF_OOB|IRQF_ONESHOT)))
 			goto mismatch;
 
 		/* All handlers must agree on per-cpuness */
@@ -1661,6 +1720,9 @@
 
 		if (new->flags & IRQF_ONESHOT)
 			desc->istate |= IRQS_ONESHOT;
+
+		if (new->flags & IRQF_OOB)
+			irq_settings_set_oob(desc);
 
 		/* Exclude IRQ from balancing if requested */
 		if (new->flags & IRQF_NOBALANCING) {
@@ -1809,6 +1871,8 @@
 		irq_settings_clr_disable_unlazy(desc);
 		/* Only shutdown. Deactivate after synchronize_hardirq() */
 		irq_shutdown(desc);
+		/* Turn off OOB handling (after shutdown). */
+		irq_settings_clr_oob(desc);
 	}
 
 #ifdef CONFIG_SMP
@@ -1845,14 +1909,15 @@
 
 #ifdef CONFIG_DEBUG_SHIRQ
 	/*
-	 * It's a shared IRQ -- the driver ought to be prepared for an IRQ
-	 * event to happen even now it's being freed, so let's make sure that
-	 * is so by doing an extra call to the handler ....
+	 * It's a shared IRQ (with in-band handler) -- the driver
+	 * ought to be prepared for an IRQ event to happen even now
+	 * it's being freed, so let's make sure that is so by doing an
+	 * extra call to the handler ....
 	 *
 	 * ( We do this after actually deregistering it, to make sure that a
 	 *   'real' IRQ doesn't run in parallel with our fake. )
 	 */
-	if (action->flags & IRQF_SHARED) {
+	if ((action->flags & (IRQF_SHARED|IRQF_OOB)) == IRQF_SHARED) {
 		local_irq_save(flags);
 		action->handler(irq, dev_id);
 		local_irq_restore(flags);
@@ -2473,7 +2538,7 @@
  *	__request_percpu_irq - allocate a percpu interrupt line
  *	@irq: Interrupt line to allocate
  *	@handler: Function to be called when the IRQ occurs.
- *	@flags: Interrupt type flags (IRQF_TIMER only)
+ *	@flags: Interrupt type flags (IRQF_TIMER and/or IRQF_OOB only)
  *	@devname: An ascii name for the claiming device
  *	@dev_id: A percpu cookie passed back to the handler function
  *
@@ -2502,7 +2567,7 @@
 	    !irq_settings_is_per_cpu_devid(desc))
 		return -EINVAL;
 
-	if (flags && flags != IRQF_TIMER)
+	if (flags & ~(IRQF_TIMER|IRQF_OOB))
 		return -EINVAL;
 
 	action = kzalloc(sizeof(struct irqaction), GFP_KERNEL);
diff --git a/kernel/kernel/irq/msi.c b/kernel/kernel/irq/msi.c
index b47d95b..8fc1d87 100644
--- a/kernel/kernel/irq/msi.c
+++ b/kernel/kernel/irq/msi.c
@@ -272,6 +272,9 @@
 	struct irq_chip *chip = info->chip;
 
 	BUG_ON(!chip || !chip->irq_mask || !chip->irq_unmask);
+	WARN_ONCE(IS_ENABLED(CONFIG_IRQ_PIPELINE) &&
+		  (chip->flags & IRQCHIP_PIPELINE_SAFE) == 0,
+		  "MSI domain irqchip %s is not pipeline-safe!", chip->name);
 	if (!chip->irq_set_affinity)
 		chip->irq_set_affinity = msi_domain_set_affinity;
 }
diff --git a/kernel/kernel/irq/pipeline.c b/kernel/kernel/irq/pipeline.c
new file mode 100644
index 0000000..f64d731
--- /dev/null
+++ b/kernel/kernel/irq/pipeline.c
@@ -0,0 +1,1764 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irqdomain.h>
+#include <linux/irq_pipeline.h>
+#include <linux/irq_work.h>
+#include <linux/jhash.h>
+#include <linux/debug_locks.h>
+#include <linux/dovetail.h>
+#include <dovetail/irq.h>
+#include <trace/events/irq.h>
+#include "internals.h"
+
+#ifdef CONFIG_DEBUG_IRQ_PIPELINE
+#define trace_on_debug
+#else
+#define trace_on_debug  notrace
+#endif
+
+struct irq_stage inband_stage = {
+	.name = "Linux",
+};
+EXPORT_SYMBOL_GPL(inband_stage);
+
+struct irq_stage oob_stage;
+EXPORT_SYMBOL_GPL(oob_stage);
+
+struct irq_domain *synthetic_irq_domain;
+EXPORT_SYMBOL_GPL(synthetic_irq_domain);
+
+bool irq_pipeline_oopsing;
+EXPORT_SYMBOL_GPL(irq_pipeline_oopsing);
+
+bool irq_pipeline_active;
+EXPORT_SYMBOL_GPL(irq_pipeline_active);
+
+#define IRQ_L1_MAPSZ	BITS_PER_LONG
+#define IRQ_L2_MAPSZ	(BITS_PER_LONG * BITS_PER_LONG)
+#define IRQ_FLAT_MAPSZ	DIV_ROUND_UP(IRQ_BITMAP_BITS, BITS_PER_LONG)
+
+#if IRQ_FLAT_MAPSZ > IRQ_L2_MAPSZ
+#define __IRQ_STAGE_MAP_LEVELS	4	/* up to 4/16M vectors */
+#elif IRQ_FLAT_MAPSZ > IRQ_L1_MAPSZ
+#define __IRQ_STAGE_MAP_LEVELS	3	/* up to 64/256M vectors */
+#else
+#define __IRQ_STAGE_MAP_LEVELS	2	/* up to 1024/4096 vectors */
+#endif
+
+struct irq_event_map {
+#if __IRQ_STAGE_MAP_LEVELS >= 3
+	unsigned long index_1[IRQ_L1_MAPSZ];
+#if __IRQ_STAGE_MAP_LEVELS >= 4
+	unsigned long index_2[IRQ_L2_MAPSZ];
+#endif
+#endif
+	unsigned long flat[IRQ_FLAT_MAPSZ];
+};
+
+#ifdef CONFIG_SMP
+
+static struct irq_event_map bootup_irq_map __initdata;
+
+static DEFINE_PER_CPU(struct irq_event_map, irq_map_array[2]);
+
+DEFINE_PER_CPU(struct irq_pipeline_data, irq_pipeline) = {
+	.stages = {
+		[0] = {
+			.log = {
+				.map = &bootup_irq_map,
+			},
+			.stage = &inband_stage,
+		},
+	},
+};
+
+#else /* !CONFIG_SMP */
+
+static struct irq_event_map inband_irq_map;
+
+static struct irq_event_map oob_irq_map;
+
+DEFINE_PER_CPU(struct irq_pipeline_data, irq_pipeline) = {
+	.stages = {
+		[0] = {
+			.log = {
+				.map = &inband_irq_map,
+			},
+			.stage = &inband_stage,
+		},
+		[1] = {
+			.log = {
+				.map = &oob_irq_map,
+			},
+		},
+	},
+};
+
+#endif /* !CONFIG_SMP */
+
+EXPORT_PER_CPU_SYMBOL(irq_pipeline);
+
+static void sirq_noop(struct irq_data *data) { }
+
+/* Virtual interrupt controller for synthetic IRQs. */
+static struct irq_chip sirq_chip = {
+	.name		= "SIRQC",
+	.irq_enable	= sirq_noop,
+	.irq_disable	= sirq_noop,
+	.flags		= IRQCHIP_PIPELINE_SAFE | IRQCHIP_SKIP_SET_WAKE,
+};
+
+static int sirq_map(struct irq_domain *d, unsigned int irq,
+		    irq_hw_number_t hwirq)
+{
+	irq_set_percpu_devid(irq);
+	irq_set_chip_and_handler(irq, &sirq_chip, handle_synthetic_irq);
+
+	return 0;
+}
+
+static struct irq_domain_ops sirq_domain_ops = {
+	.map	= sirq_map,
+};
+
+#ifdef CONFIG_SPARSE_IRQ
+/*
+ * The performances of the radix tree in sparse mode are really ugly
+ * under mm stress on some hw, use a local descriptor cache to ease
+ * the pain.
+ */
+#define DESC_CACHE_SZ  128
+
+static struct irq_desc *desc_cache[DESC_CACHE_SZ] __cacheline_aligned;
+
+static inline u32 hash_irq(unsigned int irq)
+{
+	return jhash(&irq, sizeof(irq), irq) % DESC_CACHE_SZ;
+}
+
+static __always_inline
+struct irq_desc *irq_to_cached_desc(unsigned int irq)
+{
+	int hval = hash_irq(irq);
+	struct irq_desc *desc = desc_cache[hval];
+
+	if (unlikely(desc == NULL || irq_desc_get_irq(desc) != irq)) {
+		desc = irq_to_desc(irq);
+		desc_cache[hval] = desc;
+	}
+
+	return desc;
+}
+
+void uncache_irq_desc(unsigned int irq)
+{
+	int hval = hash_irq(irq);
+
+	desc_cache[hval] = NULL;
+}
+
+#else
+
+static struct irq_desc *irq_to_cached_desc(unsigned int irq)
+{
+	return irq_to_desc(irq);
+}
+
+#endif
+
+/**
+ *	handle_synthetic_irq -  synthetic irq handler
+ *	@desc:	the interrupt description structure for this irq
+ *
+ *	Handles synthetic interrupts flowing down the IRQ pipeline
+ *	with per-CPU semantics.
+ *
+ *      CAUTION: synthetic IRQs may be used to map hardware-generated
+ *      events (e.g. IPIs or traps), we must start handling them as
+ *      common interrupts.
+ */
+void handle_synthetic_irq(struct irq_desc *desc)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irqaction *action;
+	irqreturn_t ret;
+	void *dev_id;
+
+	if (on_pipeline_entry()) {
+		handle_oob_irq(desc);
+		return;
+	}
+
+	action = desc->action;
+	if (action == NULL) {
+		if (printk_ratelimit())
+			printk(KERN_WARNING
+			       "CPU%d: WARNING: synthetic IRQ%d has no action.\n",
+			       smp_processor_id(), irq);
+		return;
+	}
+
+	__kstat_incr_irqs_this_cpu(desc);
+	trace_irq_handler_entry(irq, action);
+	dev_id = raw_cpu_ptr(action->percpu_dev_id);
+	ret = action->handler(irq, dev_id);
+	trace_irq_handler_exit(irq, action, ret);
+}
+
+void sync_irq_stage(struct irq_stage *top)
+{
+	struct irq_stage_data *p;
+	struct irq_stage *stage;
+
+	/* We must enter over the inband stage with hardirqs off. */
+	if (irq_pipeline_debug()) {
+		WARN_ON_ONCE(!hard_irqs_disabled());
+		WARN_ON_ONCE(current_irq_stage != &inband_stage);
+	}
+
+	stage = top;
+
+	for (;;) {
+		if (stage == &inband_stage) {
+			if (test_inband_stall())
+				break;
+		} else {
+			if (test_oob_stall())
+				break;
+		}
+
+		p = this_staged(stage);
+		if (stage_irqs_pending(p)) {
+			if (stage == &inband_stage)
+				sync_current_irq_stage();
+			else {
+				/* Switch to oob before synchronizing. */
+				switch_oob(p);
+				sync_current_irq_stage();
+				/* Then back to the inband stage. */
+				switch_inband(this_inband_staged());
+			}
+		}
+
+		if (stage == &inband_stage)
+			break;
+
+		stage = &inband_stage;
+	}
+}
+
+void synchronize_pipeline(void) /* hardirqs off */
+{
+	struct irq_stage *top = &oob_stage;
+	int stalled = test_oob_stall();
+
+	if (unlikely(!oob_stage_present())) {
+		top = &inband_stage;
+		stalled = test_inband_stall();
+	}
+
+	if (current_irq_stage != top)
+		sync_irq_stage(top);
+	else if (!stalled)
+		sync_current_irq_stage();
+}
+
+static void __inband_irq_enable(void)
+{
+	struct irq_stage_data *p;
+	unsigned long flags;
+
+	check_inband_stage();
+
+	flags = hard_local_irq_save();
+
+	unstall_inband_nocheck();
+
+	p = this_inband_staged();
+	if (unlikely(stage_irqs_pending(p) && !in_pipeline())) {
+		sync_current_irq_stage();
+		hard_local_irq_restore(flags);
+		preempt_check_resched();
+	} else {
+		hard_local_irq_restore(flags);
+	}
+}
+
+/**
+ *	inband_irq_enable - enable interrupts for the inband stage
+ *
+ *	Enable interrupts for the inband stage, allowing interrupts to
+ *	preempt the in-band code. If in-band IRQs are pending for the
+ *	inband stage in the per-CPU log at the time of this call, they
+ *	are played back.
+ *
+ *      The caller is expected to tell the tracer about the change, by
+ *      calling trace_hardirqs_on().
+ */
+notrace void inband_irq_enable(void)
+{
+	/*
+	 * We are NOT supposed to enter this code with hard IRQs off.
+	 * If we do, then the caller might be wrongly assuming that
+	 * invoking local_irq_enable() implies enabling hard
+	 * interrupts like the legacy I-pipe did, which is not the
+	 * case anymore. Relax this requirement when oopsing, since
+	 * the kernel may be in a weird state.
+	 */
+	WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled());
+	__inband_irq_enable();
+}
+EXPORT_SYMBOL(inband_irq_enable);
+
+/**
+ *	inband_irq_disable - disable interrupts for the inband stage
+ *
+ *	Disable interrupts for the inband stage, disabling in-band
+ *	interrupts. Out-of-band interrupts can still be taken and
+ *	delivered to their respective handlers though.
+ */
+notrace void inband_irq_disable(void)
+{
+	check_inband_stage();
+	stall_inband_nocheck();
+}
+EXPORT_SYMBOL(inband_irq_disable);
+
+/**
+ *	inband_irqs_disabled - test the virtual interrupt state
+ *
+ *	Returns non-zero if interrupts are currently disabled for the
+ *	inband stage, zero otherwise.
+ *
+ *	May be used from the oob stage too (e.g. for tracing
+ *	purpose).
+ */
+noinstr int inband_irqs_disabled(void)
+{
+	return test_inband_stall();
+}
+EXPORT_SYMBOL(inband_irqs_disabled);
+
+/**
+ *	inband_irq_save - test and disable (virtual) interrupts
+ *
+ *	Save the virtual interrupt state then disables interrupts for
+ *	the inband stage.
+ *
+ *      Returns the original interrupt state.
+ */
+trace_on_debug unsigned long inband_irq_save(void)
+{
+	check_inband_stage();
+	return test_and_stall_inband_nocheck();
+}
+EXPORT_SYMBOL(inband_irq_save);
+
+/**
+ *	inband_irq_restore - restore the (virtual) interrupt state
+ *      @x:	Interrupt state to restore
+ *
+ *	Restore the virtual interrupt state from x. If the inband
+ *	stage is unstalled as a consequence of this operation, any
+ *	interrupt pending for the inband stage in the per-CPU log is
+ *	played back.
+ */
+trace_on_debug void inband_irq_restore(unsigned long flags)
+{
+	if (flags)
+		inband_irq_disable();
+	else
+		__inband_irq_enable();
+}
+EXPORT_SYMBOL(inband_irq_restore);
+
+/**
+ *	oob_irq_enable - enable interrupts in the CPU
+ *
+ *	Enable interrupts in the CPU, allowing out-of-band interrupts
+ *	to preempt any code. If out-of-band IRQs are pending in the
+ *	per-CPU log for the oob stage at the time of this call, they
+ *	are played back.
+ */
+trace_on_debug void oob_irq_enable(void)
+{
+	struct irq_stage_data *p;
+
+	hard_local_irq_disable();
+
+	unstall_oob();
+
+	p = this_oob_staged();
+	if (unlikely(stage_irqs_pending(p)))
+		synchronize_pipeline();
+
+	hard_local_irq_enable();
+}
+EXPORT_SYMBOL(oob_irq_enable);
+
+/**
+ *	oob_irq_restore - restore the hardware interrupt state
+ *      @x:	Interrupt state to restore
+ *
+ *	Restore the harware interrupt state from x. If the oob stage
+ *	is unstalled as a consequence of this operation, any interrupt
+ *	pending for the oob stage in the per-CPU log is played back
+ *	prior to turning IRQs on.
+ *
+ *      NOTE: Stalling the oob stage must always be paired with
+ *      disabling hard irqs and conversely when calling
+ *      oob_irq_restore(), otherwise the latter would badly misbehave
+ *      in unbalanced conditions.
+ */
+trace_on_debug void __oob_irq_restore(unsigned long flags) /* hw interrupt off */
+{
+	struct irq_stage_data *p = this_oob_staged();
+
+	check_hard_irqs_disabled();
+
+	if (!flags) {
+		unstall_oob();
+		if (unlikely(stage_irqs_pending(p)))
+			synchronize_pipeline();
+		hard_local_irq_enable();
+	}
+}
+EXPORT_SYMBOL(__oob_irq_restore);
+
+/**
+ *	stage_disabled - test the interrupt state of the current stage
+ *
+ *	Returns non-zero if interrupts are currently disabled for the
+ *	current interrupt stage, zero otherwise.
+ *      In other words, returns non-zero either if:
+ *      - interrupts are disabled for the OOB context (i.e. hard disabled),
+ *      - the inband stage is current and inband interrupts are disabled.
+ */
+noinstr bool stage_disabled(void)
+{
+	bool ret = true;
+
+	if (!hard_irqs_disabled()) {
+		ret = false;
+		if (running_inband())
+			ret = test_inband_stall();
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(stage_disabled);
+
+/**
+ *	test_and_lock_stage - test and disable interrupts for the current stage
+ *	@irqsoff:	Pointer to boolean denoting stage_disabled()
+ *                      on entry
+ *
+ *	Fully disables interrupts for the current stage. When the
+ *	inband stage is current, the stall bit is raised and hardware
+ *	IRQs are masked as well. Only the latter operation is
+ *	performed when the oob stage is current.
+ *
+ *      Returns the combined interrupt state on entry including the
+ *      real/hardware (in CPU) and virtual (inband stage) states. For
+ *      this reason, [test_and_]lock_stage() must be paired with
+ *      unlock_stage() exclusively. The combined irq state returned by
+ *      the former may NOT be passed to hard_local_irq_restore().
+ *
+ *      The interrupt state of the current stage in the return value
+ *      (i.e. stall bit for the inband stage, hardware interrupt bit
+ *      for the oob stage) must be testable using
+ *      arch_irqs_disabled_flags().
+ *
+ *	Notice that test_and_lock_stage(), unlock_stage() are raw
+ *	level ops, which substitute to raw_local_irq_save(),
+ *	raw_local_irq_restore() in lockdep code. Therefore, changes to
+ *	the in-band stall bit must not be propagated to the tracing
+ *	core (i.e. no trace_hardirqs_*() annotations).
+ */
+noinstr unsigned long test_and_lock_stage(int *irqsoff)
+{
+	unsigned long flags;
+	int stalled, dummy;
+
+	if (irqsoff == NULL)
+		irqsoff = &dummy;
+
+	/*
+	 * Combine the hard irq flag and the stall bit into a single
+	 * state word. We need to fill in the stall bit only if the
+	 * inband stage is current, otherwise it is not relevant.
+	 */
+	flags = hard_local_irq_save();
+	*irqsoff = hard_irqs_disabled_flags(flags);
+	if (running_inband()) {
+		stalled = test_and_stall_inband_nocheck();
+		flags = irqs_merge_flags(flags, stalled);
+		if (stalled)
+			*irqsoff = 1;
+	}
+
+	/*
+	 * CAUTION: don't ever pass this verbatim to
+	 * hard_local_irq_restore(). Only unlock_stage() knows how to
+	 * decode and use a combined state word.
+	 */
+	return flags;
+}
+EXPORT_SYMBOL_GPL(test_and_lock_stage);
+
+/**
+ *	unlock_stage - restore interrupts for the current stage
+ *	@flags: 	Combined interrupt state to restore as received from
+ *              	test_and_lock_stage()
+ *
+ *	Restore the virtual interrupt state if the inband stage is
+ *      current, and the hardware interrupt state unconditionally.
+ *      The per-CPU log is not played for any stage.
+ */
+noinstr void unlock_stage(unsigned long irqstate)
+{
+	unsigned long flags = irqstate;
+	int stalled;
+
+	WARN_ON_ONCE(irq_pipeline_debug_locking() && !hard_irqs_disabled());
+
+	if (running_inband()) {
+		flags = irqs_split_flags(irqstate, &stalled);
+		if (!stalled)
+			unstall_inband_nocheck();
+	}
+
+	/*
+	 * The hardware interrupt bit is the only flag which may be
+	 * present in the combined state at this point, all other
+	 * status bits have been cleared by irqs_merge_flags(), so
+	 * don't ever try to reload the hardware status register with
+	 * such value directly!
+	 */
+	if (!hard_irqs_disabled_flags(flags))
+		hard_local_irq_enable();
+}
+EXPORT_SYMBOL_GPL(unlock_stage);
+
+/**
+ * sync_inband_irqs	- Synchronize the inband log
+ *
+ * Play any deferred interrupt which might have been logged for the
+ * in-band stage while running with hard irqs on but stalled.
+ *
+ * Called from the unstalled in-band stage. Returns with hard irqs off.
+ */
+void sync_inband_irqs(void)
+{
+	struct irq_stage_data *p;
+
+	check_inband_stage();
+	WARN_ON_ONCE(irq_pipeline_debug() && irqs_disabled());
+
+	if (!hard_irqs_disabled())
+		hard_local_irq_disable();
+
+	p = this_inband_staged();
+	if (unlikely(stage_irqs_pending(p))) {
+		/* Do not pile up preemption frames. */
+		preempt_disable_notrace();
+		sync_current_irq_stage();
+		preempt_enable_no_resched_notrace();
+	}
+}
+
+static inline bool irq_post_check(struct irq_stage *stage, unsigned int irq)
+{
+	if (irq_pipeline_debug()) {
+		if (WARN_ONCE(!hard_irqs_disabled(),
+				"hard irqs on posting IRQ%u to %s\n",
+				irq, stage->name))
+			return true;
+		if (WARN_ONCE(irq >= IRQ_BITMAP_BITS,
+				"cannot post invalid IRQ%u to %s\n",
+				irq, stage->name))
+			return true;
+	}
+
+	return false;
+}
+
+#if __IRQ_STAGE_MAP_LEVELS == 4
+
+/* Must be called hard irqs off. */
+void irq_post_stage(struct irq_stage *stage, unsigned int irq)
+{
+	struct irq_stage_data *p = this_staged(stage);
+	int l0b, l1b, l2b;
+
+	if (irq_post_check(stage, irq))
+		return;
+
+	l0b = irq / (BITS_PER_LONG * BITS_PER_LONG * BITS_PER_LONG);
+	l1b = irq / (BITS_PER_LONG * BITS_PER_LONG);
+	l2b = irq / BITS_PER_LONG;
+
+	__set_bit(irq, p->log.map->flat);
+	__set_bit(l2b, p->log.map->index_2);
+	__set_bit(l1b, p->log.map->index_1);
+	__set_bit(l0b, &p->log.index_0);
+}
+EXPORT_SYMBOL_GPL(irq_post_stage);
+
+#define ltob_1(__n)  ((__n) * BITS_PER_LONG)
+#define ltob_2(__n)  (ltob_1(__n) * BITS_PER_LONG)
+#define ltob_3(__n)  (ltob_2(__n) * BITS_PER_LONG)
+
+static inline int pull_next_irq(struct irq_stage_data *p)
+{
+	unsigned long l0m, l1m, l2m, l3m;
+	int l0b, l1b, l2b, l3b;
+	unsigned int irq;
+
+	l0m = p->log.index_0;
+	if (l0m == 0)
+		return -1;
+	l0b = __ffs(l0m);
+	irq = ltob_3(l0b);
+
+	l1m = p->log.map->index_1[l0b];
+	if (unlikely(l1m == 0)) {
+		WARN_ON_ONCE(1);
+		return -1;
+	}
+	l1b = __ffs(l1m);
+	irq += ltob_2(l1b);
+
+	l2m = p->log.map->index_2[ltob_1(l0b) + l1b];
+	if (unlikely(l2m == 0)) {
+		WARN_ON_ONCE(1);
+		return -1;
+	}
+	l2b = __ffs(l2m);
+	irq += ltob_1(l2b);
+
+	l3m = p->log.map->flat[ltob_2(l0b) + ltob_1(l1b) + l2b];
+	if (unlikely(l3m == 0))
+		return -1;
+	l3b = __ffs(l3m);
+	irq += l3b;
+
+	__clear_bit(irq, p->log.map->flat);
+	if (p->log.map->flat[irq / BITS_PER_LONG] == 0) {
+		__clear_bit(l2b, &p->log.map->index_2[ltob_1(l0b) + l1b]);
+		if (p->log.map->index_2[ltob_1(l0b) + l1b] == 0) {
+			__clear_bit(l1b, &p->log.map->index_1[l0b]);
+			if (p->log.map->index_1[l0b] == 0)
+				__clear_bit(l0b, &p->log.index_0);
+		}
+	}
+
+	return irq;
+}
+
+#elif __IRQ_STAGE_MAP_LEVELS == 3
+
+/* Must be called hard irqs off. */
+void irq_post_stage(struct irq_stage *stage, unsigned int irq)
+{
+	struct irq_stage_data *p = this_staged(stage);
+	int l0b, l1b;
+
+	if (irq_post_check(stage, irq))
+		return;
+
+	l0b = irq / (BITS_PER_LONG * BITS_PER_LONG);
+	l1b = irq / BITS_PER_LONG;
+
+	__set_bit(irq, p->log.map->flat);
+	__set_bit(l1b, p->log.map->index_1);
+	__set_bit(l0b, &p->log.index_0);
+}
+EXPORT_SYMBOL_GPL(irq_post_stage);
+
+static inline int pull_next_irq(struct irq_stage_data *p)
+{
+	unsigned long l0m, l1m, l2m;
+	int l0b, l1b, l2b, irq;
+
+	l0m = p->log.index_0;
+	if (unlikely(l0m == 0))
+		return -1;
+
+	l0b = __ffs(l0m);
+	l1m = p->log.map->index_1[l0b];
+	if (l1m == 0)
+		return -1;
+
+	l1b = __ffs(l1m) + l0b * BITS_PER_LONG;
+	l2m = p->log.map->flat[l1b];
+	if (unlikely(l2m == 0)) {
+		WARN_ON_ONCE(1);
+		return -1;
+	}
+
+	l2b = __ffs(l2m);
+	irq = l1b * BITS_PER_LONG + l2b;
+
+	__clear_bit(irq, p->log.map->flat);
+	if (p->log.map->flat[l1b] == 0) {
+		__clear_bit(l1b, p->log.map->index_1);
+		if (p->log.map->index_1[l0b] == 0)
+			__clear_bit(l0b, &p->log.index_0);
+	}
+
+	return irq;
+}
+
+#else /* __IRQ_STAGE_MAP_LEVELS == 2 */
+
+/* Must be called hard irqs off. */
+void irq_post_stage(struct irq_stage *stage, unsigned int irq)
+{
+	struct irq_stage_data *p = this_staged(stage);
+	int l0b = irq / BITS_PER_LONG;
+
+	if (irq_post_check(stage, irq))
+		return;
+
+	__set_bit(irq, p->log.map->flat);
+	__set_bit(l0b, &p->log.index_0);
+}
+EXPORT_SYMBOL_GPL(irq_post_stage);
+
+static inline int pull_next_irq(struct irq_stage_data *p)
+{
+	unsigned long l0m, l1m;
+	int l0b, l1b;
+
+	l0m = p->log.index_0;
+	if (l0m == 0)
+		return -1;
+
+	l0b = __ffs(l0m);
+	l1m = p->log.map->flat[l0b];
+	if (unlikely(l1m == 0)) {
+		WARN_ON_ONCE(1);
+		return -1;
+	}
+
+	l1b = __ffs(l1m);
+	__clear_bit(l1b, &p->log.map->flat[l0b]);
+	if (p->log.map->flat[l0b] == 0)
+		__clear_bit(l0b, &p->log.index_0);
+
+	return l0b * BITS_PER_LONG + l1b;
+}
+
+#endif  /* __IRQ_STAGE_MAP_LEVELS == 2 */
+
+/**
+ *	hard_preempt_disable - Disable preemption the hard way
+ *
+ *      Disable hardware interrupts in the CPU, and disable preemption
+ *      if currently running in-band code on the inband stage.
+ *
+ *      Return the hardware interrupt state.
+ */
+unsigned long hard_preempt_disable(void)
+{
+	unsigned long flags = hard_local_irq_save();
+
+	if (running_inband())
+		preempt_disable();
+
+	return flags;
+}
+EXPORT_SYMBOL_GPL(hard_preempt_disable);
+
+/**
+ *	hard_preempt_enable - Enable preemption the hard way
+ *
+ *      Enable preemption if currently running in-band code on the
+ *      inband stage, restoring the hardware interrupt state in the CPU.
+ *      The per-CPU log is not played for the oob stage.
+ */
+void hard_preempt_enable(unsigned long flags)
+{
+	if (running_inband()) {
+		preempt_enable_no_resched();
+		hard_local_irq_restore(flags);
+		if (!hard_irqs_disabled_flags(flags))
+			preempt_check_resched();
+	} else
+		hard_local_irq_restore(flags);
+}
+EXPORT_SYMBOL_GPL(hard_preempt_enable);
+
+static void handle_unexpected_irq(struct irq_desc *desc, irqreturn_t ret)
+{
+	unsigned int irq = irq_desc_get_irq(desc);
+	struct irqaction *action;
+
+	/*
+	 * Since IRQ_HANDLED was not received from any handler, we may
+	 * have a problem dealing with an OOB interrupt. The error
+	 * detection logic is as follows:
+	 *
+	 * - check and complain about any bogus return value from a
+	 * out-of-band IRQ handler: we only allow IRQ_HANDLED and
+	 * IRQ_NONE from those routines.
+	 *
+	 * - filter out spurious IRQs which may have been due to bus
+	 * asynchronicity, those tend to happen infrequently and
+	 * should not cause us to pull the break (see
+	 * note_interrupt()).
+	 *
+	 * - otherwise, stop pipelining the IRQ line after a thousand
+	 * consecutive unhandled events.
+	 *
+	 * NOTE: we should already be holding desc->lock for non
+	 * per-cpu IRQs, since we should only get there from the
+	 * pipeline entry context.
+	 */
+
+	WARN_ON_ONCE(irq_pipeline_debug() &&
+		     !irq_settings_is_per_cpu(desc) &&
+		     !raw_spin_is_locked(&desc->lock));
+
+	if (ret != IRQ_NONE) {
+		printk(KERN_ERR "out-of-band irq event %d: bogus return value %x\n",
+		       irq, ret);
+		for_each_action_of_desc(desc, action)
+			printk(KERN_ERR "[<%p>] %pf",
+			       action->handler, action->handler);
+		printk(KERN_CONT "\n");
+		return;
+	}
+
+	if (time_after(jiffies, desc->last_unhandled + HZ/10))
+		desc->irqs_unhandled = 0;
+	else
+		desc->irqs_unhandled++;
+
+	desc->last_unhandled = jiffies;
+
+	if (unlikely(desc->irqs_unhandled > 1000)) {
+		printk(KERN_ERR "out-of-band irq %d: stuck or unexpected\n", irq);
+		irq_settings_clr_oob(desc);
+		desc->istate |= IRQS_SPURIOUS_DISABLED;
+		irq_disable(desc);
+	}
+}
+
+static inline void incr_irq_kstat(struct irq_desc *desc)
+{
+	if (irq_settings_is_per_cpu_devid(desc))
+		__kstat_incr_irqs_this_cpu(desc);
+	else
+		kstat_incr_irqs_this_cpu(desc);
+}
+
+/*
+ * do_oob_irq() - Handles interrupts over the oob stage. Hard irqs
+ * off.
+ */
+static void do_oob_irq(struct irq_desc *desc)
+{
+	bool percpu_devid = irq_settings_is_per_cpu_devid(desc);
+	unsigned int irq = irq_desc_get_irq(desc);
+	irqreturn_t ret = IRQ_NONE, res;
+	struct irqaction *action;
+	void *dev_id;
+
+	action = desc->action;
+	if (unlikely(action == NULL))
+		goto done;
+
+	if (percpu_devid) {
+		trace_irq_handler_entry(irq, action);
+		dev_id = raw_cpu_ptr(action->percpu_dev_id);
+		ret = action->handler(irq, dev_id);
+		trace_irq_handler_exit(irq, action, ret);
+	} else {
+		desc->istate &= ~IRQS_PENDING;
+		if (unlikely(irqd_irq_disabled(&desc->irq_data)))
+			return;
+		irqd_set(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+		raw_spin_unlock(&desc->lock);
+		for_each_action_of_desc(desc, action) {
+			trace_irq_handler_entry(irq, action);
+			dev_id = action->dev_id;
+			res = action->handler(irq, dev_id);
+			trace_irq_handler_exit(irq, action, res);
+			ret |= res;
+		}
+		raw_spin_lock(&desc->lock);
+		irqd_clear(&desc->irq_data, IRQD_IRQ_INPROGRESS);
+	}
+done:
+	incr_irq_kstat(desc);
+
+	if (likely(ret & IRQ_HANDLED)) {
+		desc->irqs_unhandled = 0;
+		return;
+	}
+
+	handle_unexpected_irq(desc, ret);
+}
+
+/*
+ * Over the inband stage, IRQs must be dispatched by the arch-specific
+ * arch_do_IRQ_pipelined() routine.
+ *
+ * Entered with hardirqs on, inband stalled.
+ */
+static inline
+void do_inband_irq(struct irq_desc *desc)
+{
+	arch_do_IRQ_pipelined(desc);
+	WARN_ON_ONCE(irq_pipeline_debug() && !irqs_disabled());
+}
+
+static inline bool is_active_edge_event(struct irq_desc *desc)
+{
+	return (desc->istate & IRQS_PENDING) &&
+		!irqd_irq_disabled(&desc->irq_data);
+}
+
+bool handle_oob_irq(struct irq_desc *desc) /* hardirqs off */
+{
+	struct irq_stage_data *oobd = this_oob_staged();
+	unsigned int irq = irq_desc_get_irq(desc);
+	int stalled;
+
+	/*
+	 * Flow handlers of chained interrupts have no business
+	 * running here: they should decode the event, invoking
+	 * generic_handle_irq() for each cascaded IRQ.
+	 */
+	if (WARN_ON_ONCE(irq_pipeline_debug() &&
+			 irq_settings_is_chained(desc)))
+		return false;
+
+	/*
+	 * If no oob stage is present, all interrupts must go to the
+	 * inband stage through the interrupt log. Otherwise,
+	 * out-of-band IRQs are immediately delivered to the oob
+	 * stage, while in-band IRQs still go through the inband stage
+	 * log.
+	 *
+	 * This routine returns a boolean status telling the caller
+	 * whether an out-of-band interrupt was delivered.
+	 */
+	if (!oob_stage_present() || !irq_settings_is_oob(desc)) {
+		irq_post_stage(&inband_stage, irq);
+		return false;
+	}
+
+	if (WARN_ON_ONCE(irq_pipeline_debug() && running_inband()))
+		return false;
+
+	stalled = test_and_stall_oob();
+
+	if (unlikely(desc->istate & IRQS_EDGE)) {
+		do {
+			if (is_active_edge_event(desc))  {
+				if (irqd_irq_masked(&desc->irq_data))
+					unmask_irq(desc);
+			}
+			do_oob_irq(desc);
+		} while (is_active_edge_event(desc));
+	} else {
+		do_oob_irq(desc);
+	}
+
+	/*
+	 * Cascaded interrupts enter handle_oob_irq() on the stalled
+	 * out-of-band stage during the parent invocation. Make sure
+	 * to restore the stall bit accordingly.
+	 */
+	if (likely(!stalled))
+		unstall_oob();
+
+	/*
+	 * CPU migration and/or stage switching over the handler are
+	 * NOT allowed. These should take place over
+	 * irq_exit_pipeline().
+	 */
+	if (irq_pipeline_debug()) {
+		/* No CPU migration allowed. */
+		WARN_ON_ONCE(this_oob_staged() != oobd);
+		/* No stage migration allowed. */
+		WARN_ON_ONCE(current_irq_staged != oobd);
+	}
+
+	return true;
+}
+
+static inline
+void copy_timer_regs(struct irq_desc *desc, struct pt_regs *regs)
+{
+	struct irq_pipeline_data *p;
+
+	if (desc->action == NULL || !(desc->action->flags & __IRQF_TIMER))
+		return;
+	/*
+	 * Given our deferred dispatching model for regular IRQs, we
+	 * record the preempted context registers only for the latest
+	 * timer interrupt, so that the regular tick handler charges
+	 * CPU times properly. It is assumed that no other interrupt
+	 * handler cares for such information.
+	 */
+	p = raw_cpu_ptr(&irq_pipeline);
+	arch_save_timer_regs(&p->tick_regs, regs);
+}
+
+static __always_inline
+struct irq_stage_data *switch_stage_on_irq(void)
+{
+	struct irq_stage_data *prevd = current_irq_staged, *nextd;
+
+	if (oob_stage_present()) {
+		nextd = this_oob_staged();
+		if (prevd != nextd)
+			switch_oob(nextd);
+	}
+
+	return prevd;
+}
+
+static __always_inline
+void restore_stage_on_irq(struct irq_stage_data *prevd)
+{
+	/*
+	 * CPU migration and/or stage switching over
+	 * irq_exit_pipeline() are allowed.  Our exit logic is as
+	 * follows:
+	 *
+	 *    ENTRY      EXIT      EPILOGUE
+	 *
+	 *    oob        oob       nop
+	 *    inband     oob       switch inband
+	 *    oob        inband    nop
+	 *    inband     inband    nop
+	 */
+	if (prevd->stage == &inband_stage &&
+		current_irq_staged == this_oob_staged())
+		switch_inband(this_inband_staged());
+}
+
+/**
+ *	generic_pipeline_irq_desc - Pass an IRQ to the pipeline
+ *	@desc:	Descriptor of the IRQ to pass
+ *	@regs:	Register file coming from the low-level handling code
+ *
+ *	Inject an IRQ into the pipeline from a CPU interrupt or trap
+ *	context.  A flow handler runs next for this IRQ.
+ *
+ *      Hard irqs must be off on entry. Caller should have pushed the
+ *      IRQ regs using set_irq_regs().
+ */
+void generic_pipeline_irq_desc(struct irq_desc *desc, struct pt_regs *regs)
+{
+	int irq = irq_desc_get_irq(desc);
+
+	if (irq_pipeline_debug() && !hard_irqs_disabled()) {
+		hard_local_irq_disable();
+		pr_err("IRQ pipeline: interrupts enabled on entry (IRQ%u)\n", irq);
+	}
+
+	trace_irq_pipeline_entry(irq);
+	copy_timer_regs(desc, regs);
+	generic_handle_irq_desc(desc);
+	trace_irq_pipeline_exit(irq);
+}
+
+void generic_pipeline_irq(unsigned int irq, struct pt_regs *regs)
+{
+	struct irq_desc *desc = irq_to_cached_desc(irq);
+	struct pt_regs *old_regs;
+
+	old_regs = set_irq_regs(regs);
+	generic_pipeline_irq_desc(desc, regs);
+	set_irq_regs(old_regs);
+}
+
+struct irq_stage_data *handle_irq_pipelined_prepare(struct pt_regs *regs)
+{
+	struct irq_stage_data *prevd;
+
+	/*
+	 * Running with the oob stage stalled implies hardirqs off.
+	 * For this reason, if the oob stage is stalled when we
+	 * receive an interrupt from the hardware, something is badly
+	 * broken in our interrupt state. Try fixing up, but without
+	 * great hopes.
+	 */
+	if (irq_pipeline_debug()) {
+		if (test_oob_stall()) {
+			pr_err("IRQ pipeline: out-of-band stage stalled on IRQ entry\n");
+			unstall_oob();
+		}
+		WARN_ON(on_pipeline_entry());
+	}
+
+	/*
+	 * Switch early on to the out-of-band stage if present,
+	 * anticipating a companion kernel is going to handle the
+	 * incoming event. If not, never mind, we will switch back
+	 * in-band before synchronizing interrupts.
+	 */
+	prevd = switch_stage_on_irq();
+
+	/* Tell the companion core about the entry. */
+	irq_enter_pipeline();
+
+	/*
+	 * Invariant: IRQs may not pile up in the section covered by
+	 * the PIPELINE_OFFSET marker, because:
+	 *
+	 * - out-of-band handlers called from handle_oob_irq() may NOT
+	 * re-enable hard interrupts. Ever.
+	 *
+	 * - synchronizing the in-band log with hard interrupts
+	 * enabled is done outside of this section.
+	 */
+	preempt_count_add(PIPELINE_OFFSET);
+
+	/*
+	 * From the standpoint of the in-band context when pipelining
+	 * is in effect, an interrupt entry is unsafe in a similar way
+	 * a NMI is, since it may preempt almost anywhere as IRQs are
+	 * only virtually masked most of the time, including inside
+	 * (virtually) interrupt-free sections. Declare a NMI entry so
+	 * that the low handling code is allowed to enter RCU read
+	 * sides (e.g. handle_domain_irq() needs this to resolve IRQ
+	 * mappings).
+	 */
+	rcu_nmi_enter();
+
+	return prevd;
+}
+
+int handle_irq_pipelined_finish(struct irq_stage_data *prevd,
+				struct pt_regs *regs)
+{
+	/*
+	 * Leave the (pseudo-)NMI entry for RCU before the out-of-band
+	 * core might reschedule in irq_exit_pipeline(), and
+	 * interrupts are hard enabled again on this CPU as a result
+	 * of switching context.
+	 */
+	rcu_nmi_exit();
+
+	/*
+	 * Make sure to leave the pipeline entry context before
+	 * allowing the companion core to reschedule, and eventually
+	 * synchronizing interrupts.
+	 */
+	preempt_count_sub(PIPELINE_OFFSET);
+
+	/* Allow the companion core to reschedule. */
+	irq_exit_pipeline();
+
+	/* Back to the preempted stage. */
+	restore_stage_on_irq(prevd);
+
+	/*
+	 * We have to synchronize interrupts because some might have
+	 * been logged while we were busy handling an out-of-band
+	 * event coming from the hardware:
+	 *
+	 * - as a result of calling an out-of-band handler which in
+	 * turn posted them.
+	 *
+	 * - because we posted them directly for scheduling the
+	 * interrupt to happen from the in-band stage.
+	 */
+	synchronize_pipeline_on_irq();
+
+#ifdef CONFIG_DOVETAIL
+	/*
+	 * Sending MAYDAY is in essence a rare case, so prefer test
+	 * then maybe clear over test_and_clear.
+	 */
+	if (user_mode(regs) && test_thread_flag(TIF_MAYDAY))
+		dovetail_call_mayday(regs);
+#endif
+
+	return running_inband() && !irqs_disabled();
+}
+
+int handle_irq_pipelined(struct pt_regs *regs)
+{
+	struct irq_stage_data *prevd;
+
+	prevd = handle_irq_pipelined_prepare(regs);
+	handle_arch_irq(regs);
+	return handle_irq_pipelined_finish(prevd, regs);
+}
+
+/**
+ *	irq_inject_pipeline - Inject a software-generated IRQ into the
+ *	pipeline @irq: IRQ to inject
+ *
+ *	Inject an IRQ into the pipeline by software as if such
+ *	hardware event had happened on the current CPU.
+ */
+int irq_inject_pipeline(unsigned int irq)
+{
+	struct irq_stage_data *oobd, *prevd;
+	struct irq_desc *desc;
+	unsigned long flags;
+
+	desc = irq_to_cached_desc(irq);
+	if (desc == NULL)
+		return -EINVAL;
+
+	flags = hard_local_irq_save();
+
+	/*
+	 * Handle the case of an IRQ sent to a stalled oob stage here,
+	 * which allows to trap the same condition in handle_oob_irq()
+	 * in a debug check (see comment there).
+	 */
+	oobd = this_oob_staged();
+	if (oob_stage_present() &&
+		irq_settings_is_oob(desc) &&
+		test_oob_stall()) {
+		irq_post_stage(&oob_stage, irq);
+	} else {
+		prevd = switch_stage_on_irq();
+		irq_enter_pipeline();
+		handle_oob_irq(desc);
+		irq_exit_pipeline();
+		restore_stage_on_irq(prevd);
+		synchronize_pipeline_on_irq();
+	}
+
+	hard_local_irq_restore(flags);
+
+	return 0;
+
+}
+EXPORT_SYMBOL_GPL(irq_inject_pipeline);
+
+/*
+ * sync_current_irq_stage() -- Flush the pending IRQs for the current
+ * stage (and processor). This routine flushes the interrupt log (see
+ * "Optimistic interrupt protection" from D. Stodolsky et al. for more
+ * on the deferred interrupt scheme). Every interrupt which has
+ * occurred while the pipeline was stalled gets played.
+ *
+ * CAUTION: CPU migration may occur over this routine if running over
+ * the inband stage.
+ */
+void sync_current_irq_stage(void) /* hard irqs off */
+{
+	struct irq_stage_data *p;
+	struct irq_stage *stage;
+	struct irq_desc *desc;
+	int irq;
+
+	WARN_ON_ONCE(irq_pipeline_debug() && on_pipeline_entry());
+	check_hard_irqs_disabled();
+
+	p = current_irq_staged;
+respin:
+	stage = p->stage;
+	if (stage == &inband_stage) {
+		/*
+		 * Since we manipulate the stall bit directly, we have
+		 * to open code the IRQ state tracing.
+		 */
+		stall_inband_nocheck();
+		trace_hardirqs_off();
+	} else {
+		stall_oob();
+	}
+
+	for (;;) {
+		irq = pull_next_irq(p);
+		if (irq < 0)
+			break;
+		/*
+		 * Make sure the compiler does not reorder wrongly, so
+		 * that all updates to maps are done before the
+		 * handler gets called.
+		 */
+		barrier();
+
+		desc = irq_to_cached_desc(irq);
+
+		if (stage == &inband_stage) {
+			hard_local_irq_enable();
+			do_inband_irq(desc);
+			hard_local_irq_disable();
+		} else {
+			do_oob_irq(desc);
+		}
+
+		/*
+		 * We might have switched from the oob stage to the
+		 * in-band one on return from the handler, in which
+		 * case we might also have migrated to a different CPU
+		 * (the converse in-band -> oob switch is NOT allowed
+		 * though). Reload the current per-cpu context
+		 * pointer, so that we further pull pending interrupts
+		 * from the proper in-band log.
+		 */
+		p = current_irq_staged;
+		if (p->stage != stage) {
+			if (WARN_ON_ONCE(irq_pipeline_debug() &&
+					stage == &inband_stage))
+				break;
+			goto respin;
+		}
+	}
+
+	if (stage == &inband_stage) {
+		trace_hardirqs_on();
+		unstall_inband_nocheck();
+	} else {
+		unstall_oob();
+	}
+}
+
+#ifndef CONFIG_GENERIC_ENTRY
+
+/*
+ * These helpers are normally called from the kernel entry/exit code
+ * in the asm section by architectures which do not use the generic
+ * kernel entry code, in order to save the interrupt and lockdep
+ * states for the in-band stage on entry, restoring them when leaving
+ * the kernel.  The per-architecture arch_kentry_set/get_irqstate()
+ * calls determine where this information should be kept while running
+ * in kernel context, indexed on the current register frame.
+ */
+
+#define KENTRY_STALL_BIT      BIT(0) /* Tracks INBAND_STALL_BIT */
+#define KENTRY_LOCKDEP_BIT    BIT(1) /* Tracks hardirqs_enabled */
+
+asmlinkage __visible noinstr void kentry_enter_pipelined(struct pt_regs *regs)
+{
+	long irqstate = 0;
+
+	WARN_ON(irq_pipeline_debug() && !hard_irqs_disabled());
+
+	if (!running_inband())
+		return;
+
+	if (lockdep_read_irqs_state())
+		irqstate |= KENTRY_LOCKDEP_BIT;
+
+	if (irqs_disabled())
+		irqstate |= KENTRY_STALL_BIT;
+	else
+		trace_hardirqs_off();
+
+	arch_kentry_set_irqstate(regs, irqstate);
+}
+
+asmlinkage void __visible noinstr kentry_exit_pipelined(struct pt_regs *regs)
+{
+	long irqstate;
+
+	WARN_ON(irq_pipeline_debug() && !hard_irqs_disabled());
+
+	if (!running_inband())
+		return;
+
+	/*
+	 * If the in-band stage of the kernel is current but the IRQ
+	 * is not going to be delivered because the latter is stalled,
+	 * keep the tracing logic unaware of the receipt, so that no
+	 * false positive is triggered in lockdep (e.g. IN-HARDIRQ-W
+	 * -> HARDIRQ-ON-W). In this case, we still have to restore
+	 * the lockdep irq state independently, since it might not be
+	 * in sync with the stall bit (e.g. raw_local_irq_disable/save
+	 * do flip the stall bit, but are not tracked by lockdep).
+	 */
+
+	irqstate = arch_kentry_get_irqstate(regs);
+	if (!(irqstate & KENTRY_STALL_BIT)) {
+		stall_inband_nocheck();
+		trace_hardirqs_on();
+		unstall_inband_nocheck();
+	} else {
+		lockdep_write_irqs_state(!!(irqstate & KENTRY_LOCKDEP_BIT));
+	}
+}
+
+#endif /* !CONFIG_GENERIC_ENTRY */
+
+/**
+ *      run_oob_call - escalate function call to the oob stage
+ *      @fn:    address of routine
+ *      @arg:   routine argument
+ *
+ *      Make the specified function run on the oob stage, switching
+ *      the current stage accordingly if needed. The escalated call is
+ *      allowed to perform a stage migration in the process.
+ */
+int notrace run_oob_call(int (*fn)(void *arg), void *arg)
+{
+	struct irq_stage_data *p, *old;
+	struct irq_stage *oob;
+	unsigned long flags;
+	int ret, s;
+
+	flags = hard_local_irq_save();
+
+	/* Switch to the oob stage if not current. */
+	p = this_oob_staged();
+	oob = p->stage;
+	old = current_irq_staged;
+	if (old != p)
+		switch_oob(p);
+
+	s = test_and_stall_oob();
+	barrier();
+	ret = fn(arg);
+	hard_local_irq_disable();
+	if (!s)
+		unstall_oob();
+
+	/*
+	 * The exit logic is as follows:
+	 *
+	 *    ON-ENTRY  AFTER-CALL  EPILOGUE
+	 *
+	 *    oob       oob         sync current stage if !stalled
+	 *    inband    oob         switch to inband + sync all stages
+	 *    oob       inband      sync all stages
+	 *    inband    inband      sync all stages
+	 *
+	 * Each path which has stalled the oob stage while running on
+	 * the inband stage at some point during the escalation
+	 * process must synchronize all stages of the pipeline on
+	 * exit. Otherwise, we may restrict the synchronization scope
+	 * to the current stage when the whole sequence ran on the oob
+	 * stage.
+	 */
+	p = this_oob_staged();
+	if (likely(current_irq_staged == p)) {
+		if (old->stage == oob) {
+			if (!s && stage_irqs_pending(p))
+				sync_current_irq_stage();
+			goto out;
+		}
+		switch_inband(this_inband_staged());
+	}
+
+	sync_irq_stage(oob);
+out:
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(run_oob_call);
+
+int enable_oob_stage(const char *name)
+{
+	struct irq_event_map *map;
+	struct irq_stage_data *p;
+	int cpu, ret;
+
+	if (oob_stage_present())
+		return -EBUSY;
+
+	/* Set up the out-of-band interrupt stage on all CPUs. */
+
+	for_each_possible_cpu(cpu) {
+		p = &per_cpu(irq_pipeline.stages, cpu)[1];
+		map = p->log.map; /* save/restore after memset(). */
+		memset(p, 0, sizeof(*p));
+		p->stage = &oob_stage;
+		memset(map, 0, sizeof(struct irq_event_map));
+		p->log.map = map;
+#ifdef CONFIG_DEBUG_IRQ_PIPELINE
+		p->cpu = cpu;
+#endif
+	}
+
+	ret = arch_enable_oob_stage();
+	if (ret)
+		return ret;
+
+	oob_stage.name = name;
+	smp_wmb();
+	oob_stage.index = 1;
+
+	pr_info("IRQ pipeline: high-priority %s stage added.\n", name);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(enable_oob_stage);
+
+void disable_oob_stage(void)
+{
+	const char *name = oob_stage.name;
+
+	WARN_ON(!running_inband() || !oob_stage_present());
+
+	oob_stage.index = 0;
+	smp_wmb();
+
+	pr_info("IRQ pipeline: %s stage removed.\n", name);
+}
+EXPORT_SYMBOL_GPL(disable_oob_stage);
+
+void irq_pipeline_oops(void)
+{
+	irq_pipeline_oopsing = true;
+	local_irq_disable_full();
+}
+
+/*
+ * Used to save/restore the status bits of the inband stage across runs
+ * of NMI-triggered code, so that we can restore the original pipeline
+ * state before leaving NMI context.
+ */
+static DEFINE_PER_CPU(unsigned long, nmi_saved_stall_bits);
+
+noinstr void irq_pipeline_nmi_enter(void)
+{
+	raw_cpu_write(nmi_saved_stall_bits, current->stall_bits);
+
+}
+EXPORT_SYMBOL(irq_pipeline_nmi_enter);
+
+noinstr void irq_pipeline_nmi_exit(void)
+{
+	current->stall_bits = raw_cpu_read(nmi_saved_stall_bits);
+}
+EXPORT_SYMBOL(irq_pipeline_nmi_exit);
+
+bool __weak irq_cpuidle_control(struct cpuidle_device *dev,
+				struct cpuidle_state *state)
+{
+	/*
+	 * Allow entering the idle state by default, matching the
+	 * original behavior when CPU_IDLE is turned
+	 * on. irq_cpuidle_control() may be overriden by an
+	 * out-of-band code for determining whether the CPU may
+	 * actually enter the idle state.
+	 */
+	return true;
+}
+
+/**
+ *	irq_cpuidle_enter - Prepare for entering the next idle state
+ *	@dev: CPUIDLE device
+ *	@state: CPUIDLE state to be entered
+ *
+ *	Flush the in-band interrupt log before the caller idles, so
+ *	that no event lingers before we actually wait for the next
+ *	IRQ, in which case we ask the caller to abort the idling
+ *	process altogether. The companion core is also given the
+ *	opportunity to block the idling process by having
+ *	irq_cpuidle_control() return @false.
+ *
+ *	Returns @true if caller may proceed with idling, @false
+ *	otherwise. The in-band log is guaranteed empty on return, hard
+ *	irqs left off so that no event might sneak in until the caller
+ *	actually idles.
+ */
+bool irq_cpuidle_enter(struct cpuidle_device *dev,
+		       struct cpuidle_state *state)
+{
+	WARN_ON_ONCE(irq_pipeline_debug() && !irqs_disabled());
+
+	hard_local_irq_disable();
+
+	if (stage_irqs_pending(this_inband_staged())) {
+		unstall_inband_nocheck();
+		synchronize_pipeline();
+		stall_inband_nocheck();
+		trace_hardirqs_off();
+		return false;
+	}
+
+	return irq_cpuidle_control(dev, state);
+}
+
+static unsigned int inband_work_sirq;
+
+static irqreturn_t inband_work_interrupt(int sirq, void *dev_id)
+{
+	irq_work_run();
+
+	return IRQ_HANDLED;
+}
+
+static struct irqaction inband_work = {
+	.handler = inband_work_interrupt,
+	.name = "in-band work",
+	.flags = IRQF_NO_THREAD,
+};
+
+void irq_local_work_raise(void)
+{
+	unsigned long flags;
+
+	/*
+	 * irq_work_queue() may be called from the in-band stage too
+	 * in case we want to delay a work until the hard irqs are on
+	 * again, so we may only sync the in-band log when unstalled,
+	 * with hard irqs on.
+	 */
+	flags = hard_local_irq_save();
+	irq_post_inband(inband_work_sirq);
+	if (running_inband() &&
+	    !hard_irqs_disabled_flags(flags) && !irqs_disabled())
+		sync_current_irq_stage();
+	hard_local_irq_restore(flags);
+}
+
+#ifdef CONFIG_DEBUG_IRQ_PIPELINE
+
+#ifdef CONFIG_LOCKDEP
+static inline bool lockdep_on_error(void)
+{
+	return !debug_locks;
+}
+#else
+static inline bool lockdep_on_error(void)
+{
+	return false;
+}
+#endif
+
+notrace void check_inband_stage(void)
+{
+	struct irq_stage *this_stage;
+	unsigned long flags;
+
+	flags = hard_local_irq_save();
+
+	this_stage = current_irq_stage;
+	if (likely(this_stage == &inband_stage && !test_oob_stall())) {
+		hard_local_irq_restore(flags);
+		return;
+	}
+
+	if (in_nmi() || irq_pipeline_oopsing || lockdep_on_error()) {
+		hard_local_irq_restore(flags);
+		return;
+	}
+
+	/*
+	 * This will disable all further pipeline debug checks, since
+	 * a wrecked interrupt state is likely to trigger many of
+	 * them, ending up in a terrible mess. IOW, the current
+	 * situation must be fixed prior to investigating any
+	 * subsequent issue that might still exist.
+	 */
+	irq_pipeline_oopsing = true;
+
+	hard_local_irq_restore(flags);
+
+	if (this_stage != &inband_stage)
+		pr_err("IRQ pipeline: some code running in oob context '%s'\n"
+		       "              called an in-band only routine\n",
+		       this_stage->name);
+	else
+		pr_err("IRQ pipeline: oob stage found stalled while modifying in-band\n"
+		       "              interrupt state and/or running sleeping code\n");
+
+	dump_stack();
+}
+EXPORT_SYMBOL(check_inband_stage);
+
+void check_spinlock_context(void)
+{
+	WARN_ON_ONCE(in_pipeline() || running_oob());
+
+}
+EXPORT_SYMBOL(check_spinlock_context);
+
+#endif /* CONFIG_DEBUG_IRQ_PIPELINE */
+
+static inline void fixup_percpu_data(void)
+{
+#ifdef CONFIG_SMP
+	struct irq_pipeline_data *p;
+	int cpu;
+
+	/*
+	 * A temporary event log is used by the inband stage during the
+	 * early boot up (bootup_irq_map), until the per-cpu areas
+	 * have been set up.
+	 *
+	 * Obviously, this code must run over the boot CPU, before SMP
+	 * operations start, with hard IRQs off so that nothing can
+	 * change under our feet.
+	 */
+	WARN_ON(!hard_irqs_disabled());
+
+	memcpy(&per_cpu(irq_map_array, 0)[0], &bootup_irq_map,
+	       sizeof(struct irq_event_map));
+
+	for_each_possible_cpu(cpu) {
+		p = &per_cpu(irq_pipeline, cpu);
+		p->stages[0].stage = &inband_stage;
+		p->stages[0].log.map = &per_cpu(irq_map_array, cpu)[0];
+		p->stages[1].log.map = &per_cpu(irq_map_array, cpu)[1];
+#ifdef CONFIG_DEBUG_IRQ_PIPELINE
+		p->stages[0].cpu = cpu;
+		p->stages[1].cpu = cpu;
+#endif
+	}
+#endif
+}
+
+void __init irq_pipeline_init_early(void)
+{
+	/*
+	 * This is called early from start_kernel(), even before the
+	 * actual number of IRQs is known. We are running on the boot
+	 * CPU, hw interrupts are off, and secondary CPUs are still
+	 * lost in space. Careful.
+	 */
+	fixup_percpu_data();
+}
+
+/**
+ *	irq_pipeline_init - Main pipeline core inits
+ *
+ *	This is step #2 of the 3-step pipeline initialization, which
+ *	should happen right after init_IRQ() has run. The internal
+ *	service interrupts are created along with the synthetic IRQ
+ *	domain, and the arch-specific init chores are performed too.
+ *
+ *	Interrupt pipelining should be fully functional when this
+ *	routine returns.
+ */
+void __init irq_pipeline_init(void)
+{
+	WARN_ON(!hard_irqs_disabled());
+
+	synthetic_irq_domain = irq_domain_add_nomap(NULL, ~0,
+						    &sirq_domain_ops,
+						    NULL);
+	inband_work_sirq = irq_create_direct_mapping(synthetic_irq_domain);
+	setup_percpu_irq(inband_work_sirq, &inband_work);
+
+	/*
+	 * We are running on the boot CPU, hw interrupts are off, and
+	 * secondary CPUs are still lost in space. Now we may run
+	 * arch-specific code for enabling the pipeline.
+	 */
+	arch_irq_pipeline_init();
+
+	irq_pipeline_active = true;
+
+	pr_info("IRQ pipeline enabled\n");
+}
+
+#ifndef CONFIG_SPARSE_IRQ
+EXPORT_SYMBOL_GPL(irq_desc);
+#endif
diff --git a/kernel/kernel/irq/proc.c b/kernel/kernel/irq/proc.c
index 0459b69..cad725f 100644
--- a/kernel/kernel/irq/proc.c
+++ b/kernel/kernel/irq/proc.c
@@ -518,6 +518,9 @@
 #ifdef CONFIG_GENERIC_IRQ_SHOW_LEVEL
 	seq_printf(p, " %-8s", irqd_is_level_type(&desc->irq_data) ? "Level" : "Edge");
 #endif
+#ifdef CONFIG_IRQ_PIPELINE
+	seq_printf(p, " %-3s", irq_settings_is_oob(desc) ? "oob" : "");
+#endif
 	if (desc->name)
 		seq_printf(p, "-%-8s", desc->name);
 
diff --git a/kernel/kernel/irq/resend.c b/kernel/kernel/irq/resend.c
index 8ccd32a..01b9f23 100644
--- a/kernel/kernel/irq/resend.c
+++ b/kernel/kernel/irq/resend.c
@@ -16,10 +16,11 @@
 #include <linux/module.h>
 #include <linux/random.h>
 #include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
 
 #include "internals.h"
 
-#ifdef CONFIG_HARDIRQS_SW_RESEND
+#if defined(CONFIG_HARDIRQS_SW_RESEND) && !defined(CONFIG_IRQ_PIPELINE)
 
 /* Bitmap to handle software resend of interrupts: */
 static DECLARE_BITMAP(irqs_resend, IRQ_BITMAP_BITS);
@@ -82,7 +83,12 @@
 #else
 static int irq_sw_resend(struct irq_desc *desc)
 {
+#if defined(CONFIG_HARDIRQS_SW_RESEND) && defined(CONFIG_IRQ_PIPELINE)
+	irq_inject_pipeline(irq_desc_get_irq(desc));
+	return 0;
+#else
 	return -EINVAL;
+#endif
 }
 #endif
 
diff --git a/kernel/kernel/irq/settings.h b/kernel/kernel/irq/settings.h
index 0033d45..aa97556 100644
--- a/kernel/kernel/irq/settings.h
+++ b/kernel/kernel/irq/settings.h
@@ -19,6 +19,8 @@
 	_IRQ_DISABLE_UNLAZY	= IRQ_DISABLE_UNLAZY,
 	_IRQ_HIDDEN		= IRQ_HIDDEN,
 	_IRQ_RAW		= IRQ_RAW,
+	_IRQ_OOB		= IRQ_OOB,
+	_IRQ_CHAINED		= IRQ_CHAINED,
 	_IRQF_MODIFY_MASK	= IRQF_MODIFY_MASK,
 };
 
@@ -35,6 +37,8 @@
 #define IRQ_DISABLE_UNLAZY	GOT_YOU_MORON
 #define IRQ_HIDDEN		GOT_YOU_MORON
 #define IRQ_RAW			GOT_YOU_MORON
+#define IRQ_OOB			GOT_YOU_MORON
+#define IRQ_CHAINED		GOT_YOU_MORON
 #undef IRQF_MODIFY_MASK
 #define IRQF_MODIFY_MASK	GOT_YOU_MORON
 
@@ -195,3 +199,33 @@
 	WARN_ON_ONCE(1);
 	return false;
 }
+
+static inline bool irq_settings_is_oob(struct irq_desc *desc)
+{
+	return desc->status_use_accessors & _IRQ_OOB;
+}
+
+static inline void irq_settings_clr_oob(struct irq_desc *desc)
+{
+	desc->status_use_accessors &= ~_IRQ_OOB;
+}
+
+static inline void irq_settings_set_oob(struct irq_desc *desc)
+{
+	desc->status_use_accessors |= _IRQ_OOB;
+}
+
+static inline bool irq_settings_is_chained(struct irq_desc *desc)
+{
+	return desc->status_use_accessors & _IRQ_CHAINED;
+}
+
+static inline void irq_settings_set_chained(struct irq_desc *desc)
+{
+	desc->status_use_accessors |= _IRQ_CHAINED;
+}
+
+static inline void irq_settings_clr_chained(struct irq_desc *desc)
+{
+	desc->status_use_accessors &= ~_IRQ_CHAINED;
+}
diff --git a/kernel/kernel/irq_work.c b/kernel/kernel/irq_work.c
index e0ed16d..19417ce 100644
--- a/kernel/kernel/irq_work.c
+++ b/kernel/kernel/irq_work.c
@@ -49,6 +49,11 @@
 	 */
 }
 
+void __weak irq_local_work_raise(void)
+{
+	arch_irq_work_raise();
+}
+
 /* Enqueue on current CPU, work must already be claimed and preempt disabled */
 static void __irq_work_queue_local(struct irq_work *work)
 {
@@ -56,10 +61,10 @@
 	if (atomic_read(&work->flags) & IRQ_WORK_LAZY) {
 		if (llist_add(&work->llnode, this_cpu_ptr(&lazy_list)) &&
 		    tick_nohz_tick_stopped())
-			arch_irq_work_raise();
+			irq_local_work_raise();
 	} else {
 		if (llist_add(&work->llnode, this_cpu_ptr(&raised_list)))
-			arch_irq_work_raise();
+			irq_local_work_raise();
 	}
 }
 
diff --git a/kernel/kernel/kthread.c b/kernel/kernel/kthread.c
index 9d736f5..896383b 100644
--- a/kernel/kernel/kthread.c
+++ b/kernel/kernel/kthread.c
@@ -14,6 +14,7 @@
 #include <linux/sched/mm.h>
 #include <linux/sched/task.h>
 #include <linux/kthread.h>
+#include <linux/irq_pipeline.h>
 #include <linux/completion.h>
 #include <linux/err.h>
 #include <linux/cgroup.h>
@@ -1331,6 +1332,7 @@
 {
 	struct mm_struct *active_mm;
 	struct task_struct *tsk = current;
+	unsigned long flags;
 
 	WARN_ON_ONCE(!(tsk->flags & PF_KTHREAD));
 	WARN_ON_ONCE(tsk->mm);
@@ -1339,12 +1341,14 @@
 	/* Hold off tlb flush IPIs while switching mm's */
 	local_irq_disable();
 	active_mm = tsk->active_mm;
+	protect_inband_mm(flags);
 	if (active_mm != mm) {
 		mmgrab(mm);
 		tsk->active_mm = mm;
 	}
 	tsk->mm = mm;
 	switch_mm_irqs_off(active_mm, mm, tsk);
+	unprotect_inband_mm(flags);
 	local_irq_enable();
 	task_unlock(tsk);
 #ifdef finish_arch_post_lock_switch
diff --git a/kernel/kernel/locking/Makefile b/kernel/kernel/locking/Makefile
index 6d11cfb..c491040 100644
--- a/kernel/kernel/locking/Makefile
+++ b/kernel/kernel/locking/Makefile
@@ -28,6 +28,7 @@
 obj-$(CONFIG_DEBUG_RT_MUTEXES) += rtmutex-debug.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock.o
 obj-$(CONFIG_DEBUG_SPINLOCK) += spinlock_debug.o
+obj-$(CONFIG_IRQ_PIPELINE) += pipeline.o
 obj-$(CONFIG_QUEUED_RWLOCKS) += qrwlock.o
 obj-$(CONFIG_LOCK_TORTURE_TEST) += locktorture.o
 obj-$(CONFIG_WW_MUTEX_SELFTEST) += test-ww_mutex.o
diff --git a/kernel/kernel/locking/lockdep.c b/kernel/kernel/locking/lockdep.c
index 6cbd2b4..48f5a6b 100644
--- a/kernel/kernel/locking/lockdep.c
+++ b/kernel/kernel/locking/lockdep.c
@@ -42,6 +42,7 @@
 #include <linux/stacktrace.h>
 #include <linux/debug_locks.h>
 #include <linux/irqflags.h>
+#include <linux/irqstage.h>
 #include <linux/utsname.h>
 #include <linux/hash.h>
 #include <linux/ftrace.h>
@@ -104,9 +105,56 @@
 static arch_spinlock_t __lock = (arch_spinlock_t)__ARCH_SPIN_LOCK_UNLOCKED;
 static struct task_struct *__owner;
 
+static __always_inline bool lockdep_stage_disabled(void)
+{
+	return stage_disabled();
+}
+
+#ifdef CONFIG_IRQ_PIPELINE
+/*
+ * If LOCKDEP is enabled, we want irqs to be disabled for both stages
+ * when traversing the lockdep code for hard and mutable locks (at the
+ * expense of massive latency overhead though).
+ */
+static __always_inline unsigned long lockdep_stage_test_and_disable(int *irqsoff)
+{
+	return test_and_lock_stage(irqsoff);
+}
+
+static __always_inline unsigned long lockdep_stage_disable(void)
+{
+	return lockdep_stage_test_and_disable(NULL);
+}
+
+static __always_inline void lockdep_stage_restore(unsigned long flags)
+{
+	unlock_stage(flags);
+}
+
+#else
+
+#define lockdep_stage_test_and_disable(__irqsoff)		\
+	({							\
+		unsigned long __flags;				\
+		raw_local_irq_save(__flags);			\
+		*(__irqsoff) = irqs_disabled_flags(__flags);	\
+		__flags;					\
+	})
+
+#define lockdep_stage_disable()					\
+	({							\
+		unsigned long __flags;				\
+		raw_local_irq_save(__flags);			\
+		__flags;					\
+	})
+
+#define lockdep_stage_restore(__flags)		raw_local_irq_restore(__flags)
+
+#endif /* !CONFIG_IRQ_PIPELINE */
+
 static inline void lockdep_lock(void)
 {
-	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+	DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled());
 
 	__this_cpu_inc(lockdep_recursion);
 	arch_spin_lock(&__lock);
@@ -115,7 +163,7 @@
 
 static inline void lockdep_unlock(void)
 {
-	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+	DEBUG_LOCKS_WARN_ON(!hard_irqs_disabled());
 
 	if (debug_locks && DEBUG_LOCKS_WARN_ON(__owner != current))
 		return;
@@ -882,7 +930,7 @@
 	/*
 	 * We do an RCU walk of the hash, see lockdep_free_key_range().
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+	if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
 		return NULL;
 
 	hlist_for_each_entry_rcu_notrace(class, hash_head, hash_entry) {
@@ -1179,7 +1227,7 @@
 		return;
 	hash_head = keyhashentry(key);
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	if (!graph_lock())
 		goto restore_irqs;
 	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
@@ -1190,7 +1238,7 @@
 out_unlock:
 	graph_unlock();
 restore_irqs:
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lockdep_register_key);
 
@@ -1239,7 +1287,7 @@
 	struct lock_class *class;
 	int idx;
 
-	DEBUG_LOCKS_WARN_ON(!irqs_disabled());
+	DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled());
 
 	class = look_up_lock_class(lock, subclass);
 	if (likely(class))
@@ -2035,11 +2083,11 @@
 
 	__bfs_init_root(&this, class);
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_lock();
 	ret = __lockdep_count_forward_deps(&this);
 	lockdep_unlock();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 
 	return ret;
 }
@@ -2061,11 +2109,11 @@
 
 	__bfs_init_root(&this, class);
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_lock();
 	ret = __lockdep_count_backward_deps(&this);
 	lockdep_unlock();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 
 	return ret;
 }
@@ -4170,6 +4218,8 @@
  */
 void lockdep_hardirqs_on_prepare(unsigned long ip)
 {
+	unsigned long flags;
+
 	if (unlikely(!debug_locks))
 		return;
 
@@ -4192,38 +4242,43 @@
 		return;
 	}
 
+	flags = hard_cond_local_irq_save();
+
 	/*
 	 * We're enabling irqs and according to our state above irqs weren't
 	 * already enabled, yet we find the hardware thinks they are in fact
 	 * enabled.. someone messed up their IRQ state tracing.
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-		return;
+	if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+		goto out;
 
 	/*
 	 * See the fine text that goes along with this variable definition.
 	 */
 	if (DEBUG_LOCKS_WARN_ON(early_boot_irqs_disabled))
-		return;
+		goto out;
 
 	/*
 	 * Can't allow enabling interrupts while in an interrupt handler,
 	 * that's general bad form and such. Recursion, limited stack etc..
 	 */
-	if (DEBUG_LOCKS_WARN_ON(lockdep_hardirq_context()))
-		return;
+	if (DEBUG_LOCKS_WARN_ON(running_inband() && lockdep_hardirq_context()))
+		goto out;
 
 	current->hardirq_chain_key = current->curr_chain_key;
 
 	lockdep_recursion_inc();
 	__trace_hardirqs_on_caller();
 	lockdep_recursion_finish();
+out:
+	hard_cond_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on_prepare);
 
 void noinstr lockdep_hardirqs_on(unsigned long ip)
 {
 	struct irqtrace_events *trace = &current->irqtrace;
+	unsigned long flags;
 
 	if (unlikely(!debug_locks))
 		return;
@@ -4261,13 +4316,15 @@
 		return;
 	}
 
+	flags = hard_cond_local_irq_save();
+
 	/*
 	 * We're enabling irqs and according to our state above irqs weren't
 	 * already enabled, yet we find the hardware thinks they are in fact
 	 * enabled.. someone messed up their IRQ state tracing.
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-		return;
+	if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+		goto out;
 
 	/*
 	 * Ensure the lock stack remained unchanged between
@@ -4282,6 +4339,8 @@
 	trace->hardirq_enable_ip = ip;
 	trace->hardirq_enable_event = ++trace->irq_events;
 	debug_atomic_inc(hardirqs_on_events);
+out:
+	hard_cond_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lockdep_hardirqs_on);
 
@@ -4290,6 +4349,8 @@
  */
 void noinstr lockdep_hardirqs_off(unsigned long ip)
 {
+	unsigned long flags;
+
 	if (unlikely(!debug_locks))
 		return;
 
@@ -4304,12 +4365,14 @@
 	} else if (__this_cpu_read(lockdep_recursion))
 		return;
 
+	flags = hard_cond_local_irq_save();
+
 	/*
 	 * So we're supposed to get called after you mask local IRQs, but for
 	 * some reason the hardware doesn't quite think you did a proper job.
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-		return;
+	if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+		goto out;
 
 	if (lockdep_hardirqs_enabled()) {
 		struct irqtrace_events *trace = &current->irqtrace;
@@ -4324,6 +4387,8 @@
 	} else {
 		debug_atomic_inc(redundant_hardirqs_off);
 	}
+out:
+	hard_cond_local_irq_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lockdep_hardirqs_off);
 
@@ -4333,20 +4398,23 @@
 void lockdep_softirqs_on(unsigned long ip)
 {
 	struct irqtrace_events *trace = &current->irqtrace;
+	unsigned long flags;
 
 	if (unlikely(!lockdep_enabled()))
 		return;
+
+	flags = hard_cond_local_irq_save();
 
 	/*
 	 * We fancy IRQs being disabled here, see softirq.c, avoids
 	 * funny state and nesting things.
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-		return;
+	if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+		goto out;
 
 	if (current->softirqs_enabled) {
 		debug_atomic_inc(redundant_softirqs_on);
-		return;
+		goto out;
 	}
 
 	lockdep_recursion_inc();
@@ -4365,6 +4433,8 @@
 	if (lockdep_hardirqs_enabled())
 		mark_held_locks(current, LOCK_ENABLED_SOFTIRQ);
 	lockdep_recursion_finish();
+out:
+	hard_cond_local_irq_restore(flags);
 }
 
 /*
@@ -4372,14 +4442,18 @@
  */
 void lockdep_softirqs_off(unsigned long ip)
 {
+	unsigned long flags;
+
 	if (unlikely(!lockdep_enabled()))
 		return;
+
+	flags = hard_cond_local_irq_save();
 
 	/*
 	 * We fancy IRQs being disabled here, see softirq.c
 	 */
-	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
-		return;
+	if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
+		goto out;
 
 	if (current->softirqs_enabled) {
 		struct irqtrace_events *trace = &current->irqtrace;
@@ -4397,6 +4471,8 @@
 		DEBUG_LOCKS_WARN_ON(!softirq_count());
 	} else
 		debug_atomic_inc(redundant_softirqs_off);
+out:
+	hard_cond_local_irq_restore(flags);
 }
 
 static int
@@ -4751,11 +4827,11 @@
 		if (DEBUG_LOCKS_WARN_ON(!lockdep_enabled()))
 			return;
 
-		raw_local_irq_save(flags);
+		flags = lockdep_stage_disable();
 		lockdep_recursion_inc();
 		register_lock_class(lock, subclass, 1);
 		lockdep_recursion_finish();
-		raw_local_irq_restore(flags);
+		lockdep_stage_restore(flags);
 	}
 }
 EXPORT_SYMBOL_GPL(lockdep_init_map_type);
@@ -5085,7 +5161,7 @@
 	struct held_lock *hlock;
 	int first_idx = idx;
 
-	if (DEBUG_LOCKS_WARN_ON(!irqs_disabled()))
+	if (DEBUG_LOCKS_WARN_ON(!lockdep_stage_disabled()))
 		return 0;
 
 	for (hlock = curr->held_locks + idx; idx < depth; idx++, hlock++) {
@@ -5397,7 +5473,13 @@
 static noinstr void check_flags(unsigned long flags)
 {
 #if defined(CONFIG_PROVE_LOCKING) && defined(CONFIG_DEBUG_LOCKDEP)
-	if (!debug_locks)
+	/*
+	 * irq_pipeline: we can't and don't want to check the
+	 * consistency of the irq tracer when running over the
+	 * pipeline entry or oob stage contexts, since the inband
+	 * stall bit does not reflect the current irq state there.
+	 */
+	if (on_pipeline_entry() || running_oob() || !debug_locks)
 		return;
 
 	/* Get the warning out..  */
@@ -5444,13 +5526,13 @@
 	if (unlikely(!lockdep_enabled()))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_recursion_inc();
 	check_flags(flags);
 	if (__lock_set_class(lock, name, key, subclass, ip))
 		check_chain_key(current);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lock_set_class);
 
@@ -5461,13 +5543,13 @@
 	if (unlikely(!lockdep_enabled()))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_recursion_inc();
 	check_flags(flags);
 	if (__lock_downgrade(lock, ip))
 		check_chain_key(current);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lock_downgrade);
 
@@ -5532,6 +5614,7 @@
 			  struct lockdep_map *nest_lock, unsigned long ip)
 {
 	unsigned long flags;
+	int irqsoff;
 
 	trace_lock_acquire(lock, subclass, trylock, read, check, nest_lock, ip);
 
@@ -5558,14 +5641,14 @@
 		return;
 	}
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_test_and_disable(&irqsoff);
 	check_flags(flags);
 
 	lockdep_recursion_inc();
 	__lock_acquire(lock, subclass, trylock, read, check,
-		       irqs_disabled_flags(flags), nest_lock, ip, 0, 0);
+		       irqsoff, nest_lock, ip, 0, 0);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lock_acquire);
 
@@ -5578,14 +5661,14 @@
 	if (unlikely(!lockdep_enabled()))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	check_flags(flags);
 
 	lockdep_recursion_inc();
 	if (__lock_release(lock, ip))
 		check_chain_key(current);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lock_release);
 
@@ -5597,13 +5680,13 @@
 	if (unlikely(!lockdep_enabled()))
 		return 1; /* avoid false negative lockdep_assert_held() */
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	check_flags(flags);
 
 	lockdep_recursion_inc();
 	ret = __lock_is_held(lock, read);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 
 	return ret;
 }
@@ -5618,13 +5701,13 @@
 	if (unlikely(!lockdep_enabled()))
 		return cookie;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	check_flags(flags);
 
 	lockdep_recursion_inc();
 	cookie = __lock_pin_lock(lock);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 
 	return cookie;
 }
@@ -5637,13 +5720,13 @@
 	if (unlikely(!lockdep_enabled()))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	check_flags(flags);
 
 	lockdep_recursion_inc();
 	__lock_repin_lock(lock, cookie);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lock_repin_lock);
 
@@ -5654,13 +5737,13 @@
 	if (unlikely(!lockdep_enabled()))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	check_flags(flags);
 
 	lockdep_recursion_inc();
 	__lock_unpin_lock(lock, cookie);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lock_unpin_lock);
 
@@ -5790,12 +5873,12 @@
 	if (unlikely(!lock_stat || !lockdep_enabled()))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	check_flags(flags);
 	lockdep_recursion_inc();
 	__lock_contended(lock, ip);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lock_contended);
 
@@ -5808,12 +5891,12 @@
 	if (unlikely(!lock_stat || !lockdep_enabled()))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	check_flags(flags);
 	lockdep_recursion_inc();
 	__lock_acquired(lock, ip);
 	lockdep_recursion_finish();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(lock_acquired);
 #endif
@@ -5828,7 +5911,7 @@
 	unsigned long flags;
 	int i;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_init_task(current);
 	memset(current->held_locks, 0, MAX_LOCK_DEPTH*sizeof(struct held_lock));
 	nr_hardirq_chains = 0;
@@ -5837,7 +5920,7 @@
 	debug_locks = 1;
 	for (i = 0; i < CHAINHASH_SIZE; i++)
 		INIT_HLIST_HEAD(chainhash_table + i);
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 
 /* Remove a class from a lock chain. Must be called with the graph lock held. */
@@ -6014,7 +6097,7 @@
 	if (WARN_ON_ONCE(ch != &delayed_free.rcu_head))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_lock();
 
 	/* closed head */
@@ -6028,7 +6111,7 @@
 	call_rcu_zapped(delayed_free.pf + delayed_free.index);
 
 	lockdep_unlock();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 
 /*
@@ -6071,13 +6154,13 @@
 
 	init_data_structures_once();
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_lock();
 	pf = get_pending_free();
 	__lockdep_free_key_range(pf, start, size);
 	call_rcu_zapped(pf);
 	lockdep_unlock();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 
 	/*
 	 * Wait for any possible iterators from look_up_lock_class() to pass
@@ -6097,12 +6180,12 @@
 
 	init_data_structures_once();
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_lock();
 	__lockdep_free_key_range(pf, start, size);
 	__free_zapped_classes(pf);
 	lockdep_unlock();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 
 void lockdep_free_key_range(void *start, unsigned long size)
@@ -6173,7 +6256,7 @@
 	unsigned long flags;
 	int locked;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	locked = graph_lock();
 	if (!locked)
 		goto out_irq;
@@ -6184,7 +6267,7 @@
 
 	graph_unlock();
 out_irq:
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 
 /*
@@ -6196,12 +6279,12 @@
 	struct pending_free *pf = delayed_free.pf;
 	unsigned long flags;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_lock();
 	__lockdep_reset_lock(pf, lock);
 	__free_zapped_classes(pf);
 	lockdep_unlock();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 
 void lockdep_reset_lock(struct lockdep_map *lock)
@@ -6234,7 +6317,7 @@
 	if (WARN_ON_ONCE(static_obj(key)))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	lockdep_lock();
 
 	hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
@@ -6251,7 +6334,7 @@
 		call_rcu_zapped(pf);
 	}
 	lockdep_unlock();
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 
 	/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
 	synchronize_rcu();
@@ -6342,7 +6425,7 @@
 	if (unlikely(!debug_locks))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = lockdep_stage_disable();
 	for (i = 0; i < curr->lockdep_depth; i++) {
 		hlock = curr->held_locks + i;
 
@@ -6353,7 +6436,7 @@
 		print_freed_lock_bug(curr, mem_from, mem_from + mem_len, hlock);
 		break;
 	}
-	raw_local_irq_restore(flags);
+	lockdep_stage_restore(flags);
 }
 EXPORT_SYMBOL_GPL(debug_check_no_locks_freed);
 
diff --git a/kernel/kernel/locking/lockdep_internals.h b/kernel/kernel/locking/lockdep_internals.h
index bbe9000..6f78acc 100644
--- a/kernel/kernel/locking/lockdep_internals.h
+++ b/kernel/kernel/locking/lockdep_internals.h
@@ -213,12 +213,12 @@
 	this_cpu_inc(lockdep_stats.ptr);
 
 #define debug_atomic_inc(ptr)			{		\
-	WARN_ON_ONCE(!irqs_disabled());				\
+	WARN_ON_ONCE(!hard_irqs_disabled() && !irqs_disabled());\
 	__this_cpu_inc(lockdep_stats.ptr);			\
 }
 
 #define debug_atomic_dec(ptr)			{		\
-	WARN_ON_ONCE(!irqs_disabled());				\
+	WARN_ON_ONCE(!hard_irqs_disabled() && !irqs_disabled());\
 	__this_cpu_dec(lockdep_stats.ptr);			\
 }
 
diff --git a/kernel/kernel/locking/pipeline.c b/kernel/kernel/locking/pipeline.c
new file mode 100644
index 0000000..fde458e
--- /dev/null
+++ b/kernel/kernel/locking/pipeline.c
@@ -0,0 +1,231 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2016 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/linkage.h>
+#include <linux/preempt.h>
+#include <linux/spinlock.h>
+#include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
+#include <linux/kconfig.h>
+
+/*
+ * A hybrid spinlock behaves in different ways depending on the
+ * current interrupt stage on entry.
+ *
+ * Such spinlock always leaves hard IRQs disabled once locked. In
+ * addition, it stalls the in-band stage when protecting a critical
+ * section there, disabling preemption like regular spinlocks do as
+ * well. This combination preserves the regular locking logic when
+ * called from the in-band stage, while fully disabling preemption by
+ * other interrupt stages.
+ *
+ * When taken from the pipeline entry context, a hybrid lock behaves
+ * like a hard spinlock, assuming that hard IRQs are already disabled.
+ *
+ * The irq descriptor lock (struct irq_desc) is a typical example of
+ * such lock, which properly serializes accesses regardless of the
+ * calling context.
+ */
+void __hybrid_spin_lock(struct raw_spinlock *rlock)
+{
+	struct hybrid_spinlock *lock;
+	unsigned long __flags;
+
+	if (running_inband())
+		preempt_disable();
+
+	__flags = hard_local_irq_save();
+	hard_lock_acquire(rlock, 0, _RET_IP_);
+	lock = container_of(rlock, struct hybrid_spinlock, rlock);
+	lock->hwflags = __flags;
+}
+EXPORT_SYMBOL(__hybrid_spin_lock);
+
+void __hybrid_spin_lock_nested(struct raw_spinlock *rlock, int subclass)
+{
+	struct hybrid_spinlock *lock;
+	unsigned long __flags;
+
+	if (running_inband())
+		preempt_disable();
+
+	__flags = hard_local_irq_save();
+	hard_lock_acquire_nested(rlock, subclass, _RET_IP_);
+	lock = container_of(rlock, struct hybrid_spinlock, rlock);
+	lock->hwflags = __flags;
+}
+EXPORT_SYMBOL(__hybrid_spin_lock_nested);
+
+void __hybrid_spin_unlock(struct raw_spinlock *rlock)
+{
+	struct hybrid_spinlock *lock;
+	unsigned long __flags;
+
+	/* Pick the flags before releasing the lock. */
+	lock = container_of(rlock, struct hybrid_spinlock, rlock);
+	__flags = lock->hwflags;
+	hard_lock_release(rlock, _RET_IP_);
+	hard_local_irq_restore(__flags);
+
+	if (running_inband())
+		preempt_enable();
+}
+EXPORT_SYMBOL(__hybrid_spin_unlock);
+
+void __hybrid_spin_lock_irq(struct raw_spinlock *rlock)
+{
+	struct hybrid_spinlock *lock;
+	unsigned long __flags;
+
+	__flags = hard_local_irq_save();
+
+	if (running_inband()) {
+		stall_inband();
+		trace_hardirqs_off();
+		preempt_disable();
+	}
+
+	hard_lock_acquire(rlock, 0, _RET_IP_);
+	lock = container_of(rlock, struct hybrid_spinlock, rlock);
+	lock->hwflags = __flags;
+}
+EXPORT_SYMBOL(__hybrid_spin_lock_irq);
+
+void __hybrid_spin_unlock_irq(struct raw_spinlock *rlock)
+{
+	struct hybrid_spinlock *lock;
+	unsigned long __flags;
+
+	/* Pick the flags before releasing the lock. */
+	lock = container_of(rlock, struct hybrid_spinlock, rlock);
+	__flags = lock->hwflags;
+	hard_lock_release(rlock, _RET_IP_);
+
+	if (running_inband()) {
+		trace_hardirqs_on();
+		unstall_inband_nocheck();
+		hard_local_irq_restore(__flags);
+		preempt_enable();
+		return;
+	}
+
+	hard_local_irq_restore(__flags);
+}
+EXPORT_SYMBOL(__hybrid_spin_unlock_irq);
+
+unsigned long __hybrid_spin_lock_irqsave(struct raw_spinlock *rlock)
+{
+	struct hybrid_spinlock *lock;
+	unsigned long __flags, flags;
+
+	__flags = flags = hard_local_irq_save();
+
+	if (running_inband()) {
+		flags = test_and_stall_inband();
+		trace_hardirqs_off();
+		preempt_disable();
+	}
+
+	hard_lock_acquire(rlock, 0, _RET_IP_);
+	lock = container_of(rlock, struct hybrid_spinlock, rlock);
+	lock->hwflags = __flags;
+
+	return flags;
+}
+EXPORT_SYMBOL(__hybrid_spin_lock_irqsave);
+
+void __hybrid_spin_unlock_irqrestore(struct raw_spinlock *rlock,
+				      unsigned long flags)
+{
+	struct hybrid_spinlock *lock;
+	unsigned long __flags;
+
+	/* Pick the flags before releasing the lock. */
+	lock = container_of(rlock, struct hybrid_spinlock, rlock);
+	__flags = lock->hwflags;
+	hard_lock_release(rlock, _RET_IP_);
+
+	if (running_inband()) {
+		if (!flags) {
+			trace_hardirqs_on();
+			unstall_inband_nocheck();
+		}
+		hard_local_irq_restore(__flags);
+		preempt_enable();
+		return;
+	}
+
+	hard_local_irq_restore(__flags);
+}
+EXPORT_SYMBOL(__hybrid_spin_unlock_irqrestore);
+
+int __hybrid_spin_trylock(struct raw_spinlock *rlock)
+{
+	struct hybrid_spinlock *lock;
+	unsigned long __flags;
+
+	if (running_inband())
+		preempt_disable();
+
+	lock = container_of(rlock, struct hybrid_spinlock, rlock);
+	__flags = hard_local_irq_save();
+
+	hard_spin_trylock_prepare(rlock);
+	if (do_raw_spin_trylock(rlock)) {
+		lock->hwflags = __flags;
+		hard_trylock_acquire(rlock, 1, _RET_IP_);
+		return 1;
+	}
+
+	hard_spin_trylock_fail(rlock);
+	hard_local_irq_restore(__flags);
+
+	if (running_inband())
+		preempt_enable();
+
+	return 0;
+}
+EXPORT_SYMBOL(__hybrid_spin_trylock);
+
+int __hybrid_spin_trylock_irqsave(struct raw_spinlock *rlock,
+				   unsigned long *flags)
+{
+	struct hybrid_spinlock *lock;
+	unsigned long __flags;
+	bool inband;
+
+	inband = running_inband();
+
+	__flags = *flags = hard_local_irq_save();
+
+	lock = container_of(rlock, struct hybrid_spinlock, rlock);
+	if (inband) {
+		*flags = test_and_stall_inband();
+		trace_hardirqs_off();
+		preempt_disable();
+	}
+
+	hard_spin_trylock_prepare(rlock);
+	if (do_raw_spin_trylock(rlock)) {
+		hard_trylock_acquire(rlock, 1, _RET_IP_);
+		lock->hwflags = __flags;
+		return 1;
+	}
+
+	hard_spin_trylock_fail(rlock);
+
+	if (inband && !*flags) {
+		trace_hardirqs_on();
+		unstall_inband_nocheck();
+	}
+
+	hard_local_irq_restore(__flags);
+
+	if (inband)
+		preempt_enable();
+
+	return 0;
+}
+EXPORT_SYMBOL(__hybrid_spin_trylock_irqsave);
diff --git a/kernel/kernel/locking/spinlock_debug.c b/kernel/kernel/locking/spinlock_debug.c
index b9d9308..0bcc39f 100644
--- a/kernel/kernel/locking/spinlock_debug.c
+++ b/kernel/kernel/locking/spinlock_debug.c
@@ -114,6 +114,7 @@
 	mmiowb_spin_lock();
 	debug_spin_lock_after(lock);
 }
+EXPORT_SYMBOL_GPL(do_raw_spin_lock);
 
 int do_raw_spin_trylock(raw_spinlock_t *lock)
 {
@@ -131,6 +132,7 @@
 #endif
 	return ret;
 }
+EXPORT_SYMBOL_GPL(do_raw_spin_trylock);
 
 void do_raw_spin_unlock(raw_spinlock_t *lock)
 {
@@ -138,6 +140,7 @@
 	debug_spin_unlock(lock);
 	arch_spin_unlock(&lock->raw_lock);
 }
+EXPORT_SYMBOL_GPL(do_raw_spin_unlock);
 
 static void rwlock_bug(rwlock_t *lock, const char *msg)
 {
diff --git a/kernel/kernel/notifier.c b/kernel/kernel/notifier.c
index 1b019cb..b116e14 100644
--- a/kernel/kernel/notifier.c
+++ b/kernel/kernel/notifier.c
@@ -213,6 +213,9 @@
 {
 	int ret;
 
+	if (!running_inband())
+		return notifier_call_chain(&nh->head, val, v, -1, NULL);
+
 	rcu_read_lock();
 	ret = notifier_call_chain(&nh->head, val, v, -1, NULL);
 	rcu_read_unlock();
diff --git a/kernel/kernel/panic.c b/kernel/kernel/panic.c
index 332736a..f4a05dd 100644
--- a/kernel/kernel/panic.c
+++ b/kernel/kernel/panic.c
@@ -27,6 +27,7 @@
 #include <linux/sysrq.h>
 #include <linux/init.h>
 #include <linux/nmi.h>
+#include <linux/irq_pipeline.h>
 #include <linux/console.h>
 #include <linux/bug.h>
 #include <linux/ratelimit.h>
@@ -49,7 +50,7 @@
 	IS_ENABLED(CONFIG_GCC_PLUGIN_RANDSTRUCT) ? (1 << TAINT_RANDSTRUCT) : 0;
 static int pause_on_oops;
 static int pause_on_oops_flag;
-static DEFINE_SPINLOCK(pause_on_oops_lock);
+static DEFINE_HARD_SPINLOCK(pause_on_oops_lock);
 bool crash_kexec_post_notifiers;
 int panic_on_warn __read_mostly;
 unsigned long panic_on_taint;
@@ -189,8 +190,9 @@
 	 * there is nothing to prevent an interrupt handler (that runs
 	 * after setting panic_cpu) from invoking panic() again.
 	 */
-	local_irq_disable();
+	hard_local_irq_disable();
 	preempt_disable_notrace();
+	irq_pipeline_oops();
 
 	/*
 	 * It's possible to come here directly from a panic-assertion and
@@ -267,9 +269,12 @@
 
 	/*
 	 * Run any panic handlers, including those that might need to
-	 * add information to the kmsg dump output.
+	 * add information to the kmsg dump output. Skip panic
+	 * handlers if running over the oob stage, as they would most
+	 * likely break.
 	 */
-	atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
+	if (running_inband())
+		atomic_notifier_call_chain(&panic_notifier_list, 0, buf);
 
 	/* Call flush even twice. It tries harder with a single online CPU */
 	printk_safe_flush_on_panic();
@@ -474,7 +479,7 @@
 	if (!pause_on_oops)
 		return;
 
-	spin_lock_irqsave(&pause_on_oops_lock, flags);
+	raw_spin_lock_irqsave(&pause_on_oops_lock, flags);
 	if (pause_on_oops_flag == 0) {
 		/* This CPU may now print the oops message */
 		pause_on_oops_flag = 1;
@@ -484,21 +489,21 @@
 			/* This CPU gets to do the counting */
 			spin_counter = pause_on_oops;
 			do {
-				spin_unlock(&pause_on_oops_lock);
+				raw_spin_unlock(&pause_on_oops_lock);
 				spin_msec(MSEC_PER_SEC);
-				spin_lock(&pause_on_oops_lock);
+				raw_spin_lock(&pause_on_oops_lock);
 			} while (--spin_counter);
 			pause_on_oops_flag = 0;
 		} else {
 			/* This CPU waits for a different one */
 			while (spin_counter) {
-				spin_unlock(&pause_on_oops_lock);
+				raw_spin_unlock(&pause_on_oops_lock);
 				spin_msec(1);
-				spin_lock(&pause_on_oops_lock);
+				raw_spin_lock(&pause_on_oops_lock);
 			}
 		}
 	}
-	spin_unlock_irqrestore(&pause_on_oops_lock, flags);
+	raw_spin_unlock_irqrestore(&pause_on_oops_lock, flags);
 }
 
 /*
@@ -528,6 +533,7 @@
 {
 	tracing_off();
 	/* can't trust the integrity of the kernel anymore: */
+	irq_pipeline_oops();
 	debug_locks_off();
 	do_oops_enter_exit();
 
diff --git a/kernel/kernel/power/Makefile b/kernel/kernel/power/Makefile
index 9770575..bab1221 100644
--- a/kernel/kernel/power/Makefile
+++ b/kernel/kernel/power/Makefile
@@ -17,5 +17,7 @@
 
 obj-$(CONFIG_MAGIC_SYSRQ)	+= poweroff.o
 
+ifndef CONFIG_DOVETAIL
 obj-$(CONFIG_SUSPEND)		+= wakeup_reason.o
+endif
 obj-$(CONFIG_ENERGY_MODEL)	+= energy_model.o
diff --git a/kernel/kernel/power/hibernate.c b/kernel/kernel/power/hibernate.c
index b6875eb..bc68e0f 100644
--- a/kernel/kernel/power/hibernate.c
+++ b/kernel/kernel/power/hibernate.c
@@ -302,6 +302,7 @@
 		goto Enable_cpus;
 
 	local_irq_disable();
+	hard_cond_local_irq_disable();
 
 	system_state = SYSTEM_SUSPEND;
 
@@ -469,6 +470,7 @@
 
 	local_irq_disable();
 	system_state = SYSTEM_SUSPEND;
+	hard_cond_local_irq_disable();
 
 	error = syscore_suspend();
 	if (error)
@@ -590,6 +592,7 @@
 
 	local_irq_disable();
 	system_state = SYSTEM_SUSPEND;
+	hard_cond_local_irq_disable();
 	syscore_suspend();
 	if (pm_wakeup_pending()) {
 		error = -EAGAIN;
diff --git a/kernel/kernel/printk/printk.c b/kernel/kernel/printk/printk.c
index e253475..db67ef3 100644
--- a/kernel/kernel/printk/printk.c
+++ b/kernel/kernel/printk/printk.c
@@ -47,6 +47,7 @@
 #include <linux/sched/clock.h>
 #include <linux/sched/debug.h>
 #include <linux/sched/task_stack.h>
+#include <linux/irqstage.h>
 
 #include <linux/uaccess.h>
 #include <asm/sections.h>
@@ -2188,6 +2189,73 @@
 }
 #endif
 
+#ifdef CONFIG_RAW_PRINTK
+static struct console *raw_console;
+static DEFINE_HARD_SPINLOCK(raw_console_lock);
+
+void raw_puts(const char *s, size_t len)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&raw_console_lock, flags);
+	if (raw_console)
+		raw_console->write_raw(raw_console, s, len);
+	raw_spin_unlock_irqrestore(&raw_console_lock, flags);
+}
+EXPORT_SYMBOL(raw_puts);
+
+void raw_vprintk(const char *fmt, va_list ap)
+{
+	char buf[256];
+	size_t n;
+
+	if (raw_console == NULL || console_suspended)
+		return;
+
+        touch_nmi_watchdog();
+	n = vscnprintf(buf, sizeof(buf), fmt, ap);
+	raw_puts(buf, n);
+}
+EXPORT_SYMBOL(raw_vprintk);
+
+asmlinkage __visible void raw_printk(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	raw_vprintk(fmt, ap);
+	va_end(ap);
+}
+EXPORT_SYMBOL(raw_printk);
+
+static inline void register_raw_console(struct console *newcon)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&raw_console_lock, flags);
+	if (newcon->write_raw)
+		raw_console = newcon;
+	raw_spin_unlock_irqrestore(&raw_console_lock, flags);
+}
+
+static inline void unregister_raw_console(struct console *oldcon)
+{
+	unsigned long flags;
+
+	raw_spin_lock_irqsave(&raw_console_lock, flags);
+	if (oldcon == raw_console)
+		raw_console = NULL;
+	raw_spin_unlock_irqrestore(&raw_console_lock, flags);
+}
+
+#else
+
+static inline void register_raw_console(struct console *newcon) { }
+
+static inline void unregister_raw_console(struct console *oldcon) { }
+
+#endif
+
 static int __add_preferred_console(char *name, int idx, char *options,
 				   char *brl_options, bool user_specified)
 {
@@ -2854,6 +2922,9 @@
 	if (err || newcon->flags & CON_BRL)
 		return;
 
+	/* The latest raw console to register is current. */
+	register_raw_console(newcon);
+
 	/*
 	 * If we have a bootconsole, and are switching to a real console,
 	 * don't print everything out again, since when the boot console, and
@@ -2938,6 +3009,8 @@
 		(console->flags & CON_BOOT) ? "boot" : "" ,
 		console->name, console->index);
 
+	unregister_raw_console(console);
+
 	res = _braille_unregister_console(console);
 	if (res < 0)
 		return res;
diff --git a/kernel/kernel/printk/printk_safe.c b/kernel/kernel/printk/printk_safe.c
index 2e9e3ed..9c065e4 100644
--- a/kernel/kernel/printk/printk_safe.c
+++ b/kernel/kernel/printk/printk_safe.c
@@ -9,6 +9,7 @@
 #include <linux/kdb.h>
 #include <linux/smp.h>
 #include <linux/cpumask.h>
+#include <linux/irq_pipeline.h>
 #include <linux/irq_work.h>
 #include <linux/printk.h>
 #include <linux/kprobes.h>
@@ -374,6 +375,8 @@
 	if (unlikely(kdb_trap_printk && kdb_printf_cpu < 0))
 		return vkdb_printf(KDB_MSGSRC_PRINTK, fmt, args);
 #endif
+	if (inband_unsafe())
+		return vprintk_nmi(fmt, args);
 
 	/*
 	 * Try to use the main logbuf even in NMI. But avoid calling console
diff --git a/kernel/kernel/ptrace.c b/kernel/kernel/ptrace.c
index aab480e..18f26ae 100644
--- a/kernel/kernel/ptrace.c
+++ b/kernel/kernel/ptrace.c
@@ -854,10 +854,12 @@
 		if (unlikely(!arch_has_block_step()))
 			return -EIO;
 		user_enable_block_step(child);
+		inband_ptstep_notify(child);
 	} else if (is_singlestep(request) || is_sysemu_singlestep(request)) {
 		if (unlikely(!arch_has_single_step()))
 			return -EIO;
 		user_enable_single_step(child);
+		inband_ptstep_notify(child);
 	} else {
 		user_disable_single_step(child);
 	}
diff --git a/kernel/kernel/rcu/tree.c b/kernel/kernel/rcu/tree.c
index b10d6bc..7e71bc8 100644
--- a/kernel/kernel/rcu/tree.c
+++ b/kernel/kernel/rcu/tree.c
@@ -232,6 +232,11 @@
 	return 0;
 }
 
+static inline bool rcu_in_nonmaskable(void)
+{
+	return on_pipeline_entry() || in_nmi();
+}
+
 void rcu_softirq_qs(void)
 {
 	rcu_qs();
@@ -710,6 +715,7 @@
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
 	instrumentation_begin();
+
 	/*
 	 * Check for ->dynticks_nmi_nesting underflow and bad ->dynticks.
 	 * (We are exiting an NMI handler, so RCU better be paying attention
@@ -735,7 +741,7 @@
 	trace_rcu_dyntick(TPS("Startirq"), rdp->dynticks_nmi_nesting, 0, atomic_read(&rdp->dynticks));
 	WRITE_ONCE(rdp->dynticks_nmi_nesting, 0); /* Avoid store tearing. */
 
-	if (!in_nmi())
+	if (!rcu_in_nonmaskable())
 		rcu_prepare_for_idle();
 
 	// instrumentation for the noinstr rcu_dynticks_eqs_enter()
@@ -746,7 +752,7 @@
 	rcu_dynticks_eqs_enter();
 	// ... but is no longer watching here.
 
-	if (!in_nmi())
+	if (!rcu_in_nonmaskable())
 		rcu_dynticks_task_enter();
 }
 
@@ -935,7 +941,7 @@
 	struct rcu_data *rdp = this_cpu_ptr(&rcu_data);
 
 	// If we're here from NMI there's nothing to do.
-	if (in_nmi())
+	if (rcu_in_nonmaskable())
 		return;
 
 	RCU_LOCKDEP_WARN(rcu_dynticks_curr_cpu_in_eqs(),
@@ -996,14 +1002,14 @@
 	 */
 	if (rcu_dynticks_curr_cpu_in_eqs()) {
 
-		if (!in_nmi())
+		if (!rcu_in_nonmaskable())
 			rcu_dynticks_task_exit();
 
 		// RCU is not watching here ...
 		rcu_dynticks_eqs_exit();
 		// ... but is watching here.
 
-		if (!in_nmi()) {
+		if (!rcu_in_nonmaskable()) {
 			instrumentation_begin();
 			rcu_cleanup_after_idle();
 			instrumentation_end();
@@ -1016,7 +1022,7 @@
 		instrument_atomic_write(&rdp->dynticks, sizeof(rdp->dynticks));
 
 		incby = 1;
-	} else if (!in_nmi()) {
+	} else if (!rcu_in_nonmaskable()) {
 		instrumentation_begin();
 		rcu_irq_enter_check_tick();
 	} else  {
@@ -1094,10 +1100,10 @@
 /**
  * rcu_is_watching - see if RCU thinks that the current CPU is not idle
  *
- * Return true if RCU is watching the running CPU, which means that this
- * CPU can safely enter RCU read-side critical sections.  In other words,
- * if the current CPU is not in its idle loop or is in an interrupt or
- * NMI handler, return true.
+ * Return true if RCU is watching the running CPU, which means that
+ * this CPU can safely enter RCU read-side critical sections.  In
+ * other words, if the current CPU is not in its idle loop or is in an
+ * interrupt or NMI handler, return true.
  *
  * Make notrace because it can be called by the internal functions of
  * ftrace, and making this notrace removes unnecessary recursion calls.
@@ -1106,6 +1112,9 @@
 {
 	bool ret;
 
+	if (on_pipeline_entry())
+ 		return true;
+ 
 	preempt_disable_notrace();
 	ret = !rcu_dynticks_curr_cpu_in_eqs();
 	preempt_enable_notrace();
@@ -1152,7 +1161,7 @@
 	struct rcu_node *rnp;
 	bool ret = false;
 
-	if (in_nmi() || !rcu_scheduler_fully_active)
+	if (rcu_in_nonmaskable() || !rcu_scheduler_fully_active)
 		return true;
 	preempt_disable_notrace();
 	rdp = this_cpu_ptr(&rcu_data);
diff --git a/kernel/kernel/rcu/tree_plugin.h b/kernel/kernel/rcu/tree_plugin.h
index f5ba074..7d9ec7a 100644
--- a/kernel/kernel/rcu/tree_plugin.h
+++ b/kernel/kernel/rcu/tree_plugin.h
@@ -790,7 +790,7 @@
 	struct rcu_data *rdp;
 
 	if (!IS_ENABLED(CONFIG_RCU_STRICT_GRACE_PERIOD) ||
-	   irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
+		on_pipeline_entry() || running_oob() || irqs_disabled() || preempt_count() || !rcu_state.gp_kthread)
 		return;
 	rdp = this_cpu_ptr(&rcu_data);
 	rcu_report_qs_rdp(rdp);
diff --git a/kernel/kernel/rcu/update.c b/kernel/kernel/rcu/update.c
index 849f0aa9..8e4b48b 100644
--- a/kernel/kernel/rcu/update.c
+++ b/kernel/kernel/rcu/update.c
@@ -99,6 +99,11 @@
  */
 static bool rcu_read_lock_held_common(bool *ret)
 {
+	if (irqs_pipelined() &&
+		(hard_irqs_disabled() || running_oob())) {
+		*ret = 1;
+		return true;
+	}
 	if (!debug_lockdep_rcu_enabled()) {
 		*ret = true;
 		return true;
@@ -209,6 +214,32 @@
 
 #endif /* #ifndef CONFIG_TINY_RCU */
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+/*
+ * Prepare for taking the RCU read lock when running out-of-band. Nop
+ * otherwise.
+ */
+void rcu_oob_prepare_lock(void)
+{
+	if (!on_pipeline_entry() && running_oob())
+		rcu_nmi_enter();
+}
+EXPORT_SYMBOL_GPL(rcu_oob_prepare_lock);
+
+/*
+ * Converse to rcu_oob_prepare_lock(), after dropping the RCU read
+ * lock.
+ */
+void rcu_oob_finish_lock(void)
+{
+	if (!on_pipeline_entry() && running_oob())
+		rcu_nmi_exit();
+}
+EXPORT_SYMBOL_GPL(rcu_oob_finish_lock);
+
+#endif	/* CONFIG_IRQ_PIPELINE */
+
 /*
  * Test each non-SRCU synchronous grace-period wait API.  This is
  * useful just after a change in mode for these primitives, and
diff --git a/kernel/kernel/sched/core.c b/kernel/kernel/sched/core.c
index 7359375..b14a6fb 100644
--- a/kernel/kernel/sched/core.c
+++ b/kernel/kernel/sched/core.c
@@ -2045,6 +2045,7 @@
 	if (cpumask_test_cpu(task_cpu(p), new_mask))
 		goto out;
 
+	inband_migration_notify(p, dest_cpu);
 	if (task_running(rq, p) || p->state == TASK_WAKING) {
 		struct migration_arg arg = { p, dest_cpu };
 		/* Need help from migration thread: drop lock and wait. */
@@ -3065,7 +3066,7 @@
 		 *  - we're serialized against set_special_state() by virtue of
 		 *    it disabling IRQs (this allows not taking ->pi_lock).
 		 */
-		if (!(p->state & state))
+		if (!(p->state & state) || task_is_off_stage(p))
 			goto out;
 
 		success = 1;
@@ -3083,7 +3084,7 @@
 	 */
 	raw_spin_lock_irqsave(&p->pi_lock, flags);
 	smp_mb__after_spinlock();
-	if (!(p->state & state))
+	if (!(p->state & state) || task_is_off_stage(p))
 		goto unlock;
 
 #ifdef CONFIG_FREEZER
@@ -3348,6 +3349,9 @@
 	init_numa_balancing(clone_flags, p);
 #ifdef CONFIG_SMP
 	p->wake_entry.u_flags = CSD_TYPE_TTWU;
+#endif
+#ifdef CONFIG_IRQ_PIPELINE
+	init_task_stall_bits(p);
 #endif
 }
 
@@ -3816,6 +3820,13 @@
 	rseq_preempt(prev);
 	fire_sched_out_preempt_notifiers(prev, next);
 	prepare_task(next);
+	prepare_inband_switch(next);
+	/*
+	 * Do not fold the following hard irqs disabling into
+	 * prepare_inband_switch(), this is required when pipelining
+	 * interrupts, not only by alternate scheduling.
+	 */
+	hard_cond_local_irq_disable();
 	prepare_arch_switch(next);
 }
 
@@ -3973,8 +3984,19 @@
 	 * finish_task_switch() will drop rq->lock() and lower preempt_count
 	 * and the preempt_enable() will end up enabling preemption (on
 	 * PREEMPT_COUNT kernels).
+	 *
+	 * If interrupts are pipelined, we may enable hard irqs since
+	 * the in-band stage is stalled. If dovetailing is enabled
+	 * too, schedule_tail() is the place where transitions of
+	 * tasks from the in-band to the oob stage completes. The
+	 * companion core is notified that 'prev' is now suspended in
+	 * the in-band stage, and can be safely resumed in the oob
+	 * stage.
 	 */
 
+	WARN_ON_ONCE(irq_pipeline_debug() && !irqs_disabled());
+	hard_cond_local_irq_enable();
+	oob_trampoline();
 	rq = finish_task_switch(prev);
 	balance_callback(rq);
 	preempt_enable();
@@ -4028,6 +4050,20 @@
 		 */
 		switch_mm_irqs_off(prev->active_mm, next->mm, next);
 
+		/*
+		 * If dovetail is enabled, insert a short window of
+		 * opportunity for preemption by out-of-band IRQs
+		 * before finalizing the context switch.
+		 * dovetail_context_switch() can deal with preempting
+		 * partially switched in-band contexts.
+		 */
+		if (dovetailing()) {
+			struct mm_struct *oldmm = prev->active_mm;
+			prev->active_mm = next->mm;
+			hard_local_irq_sync();
+			prev->active_mm = oldmm;
+		}
+
 		if (!prev->mm) {                        // from kernel
 			/* will mmdrop() in finish_task_switch(). */
 			rq->prev_mm = prev->active_mm;
@@ -4042,6 +4078,15 @@
 	/* Here we just switch the register state and the stack. */
 	switch_to(prev, next, prev);
 	barrier();
+
+	/*
+	 * If 'next' is on its way to the oob stage, don't run the
+	 * context switch epilogue just yet. We will do that at some
+	 * point later, when the task switches back to the in-band
+	 * stage.
+	 */
+	if (unlikely(inband_switch_tail()))
+		return NULL;
 
 	return finish_task_switch(prev);
 }
@@ -4557,6 +4602,8 @@
 		panic("corrupted shadow stack detected inside scheduler\n");
 #endif
 
+	check_inband_stage();
+
 #ifdef CONFIG_DEBUG_ATOMIC_SLEEP
 	if (!preempt && prev->state && prev->non_block_count) {
 		printk(KERN_ERR "BUG: scheduling in a non-blocking section: %s/%d/%i\n",
@@ -4682,7 +4729,7 @@
  *
  * WARNING: must be called with preemption disabled!
  */
-static void __sched notrace __schedule(bool preempt)
+static int __sched notrace __schedule(bool preempt)
 {
 	struct task_struct *prev, *next;
 	unsigned long *switch_count;
@@ -4802,12 +4849,17 @@
 
 		/* Also unlocks the rq: */
 		rq = context_switch(rq, prev, next, &rf);
+		if (dovetailing() && rq == NULL)
+			/* Task moved to the oob stage. */
+			return 1;
 	} else {
 		rq->clock_update_flags &= ~(RQCF_ACT_SKIP|RQCF_REQ_SKIP);
 		rq_unlock_irq(rq, &rf);
 	}
 
 	balance_callback(rq);
+
+	return 0;
 }
 
 void __noreturn do_task_dead(void)
@@ -4879,7 +4931,8 @@
 	sched_submit_work(tsk);
 	do {
 		preempt_disable();
-		__schedule(false);
+		if (__schedule(false))
+			return;
 		sched_preempt_enable_no_resched();
 	} while (need_resched());
 	sched_update_worker(tsk);
@@ -4960,7 +5013,8 @@
 		 */
 		preempt_disable_notrace();
 		preempt_latency_start(1);
-		__schedule(true);
+		if (__schedule(true))
+			return;
 		preempt_latency_stop(1);
 		preempt_enable_no_resched_notrace();
 
@@ -4982,7 +5036,7 @@
 	 * If there is a non-zero preempt_count or interrupts are disabled,
 	 * we do not want to preempt the current task. Just return..
 	 */
-	if (likely(!preemptible()))
+	if (likely(!running_inband() || !preemptible()))
 		return;
 
 	preempt_schedule_common();
@@ -5008,7 +5062,7 @@
 {
 	enum ctx_state prev_ctx;
 
-	if (likely(!preemptible()))
+	if (likely(!running_inband() || !preemptible()))
 		return;
 
 	do {
@@ -5049,23 +5103,41 @@
  * off of irq context.
  * Note, that this is called and return with irqs disabled. This will
  * protect us against recursive calling from irq.
+ *
+ * IRQ pipeline: we are called with hard irqs off, synchronize the
+ * pipeline then return the same way, so that the in-band log is
+ * guaranteed empty and further interrupt delivery is postponed by the
+ * hardware until have exited the kernel.
  */
 asmlinkage __visible void __sched preempt_schedule_irq(void)
 {
 	enum ctx_state prev_state;
+
+	if (irq_pipeline_debug()) {
+		/* Catch any weirdness in pipelined entry code. */
+		if (WARN_ON_ONCE(!running_inband()))
+			return;
+		WARN_ON_ONCE(!hard_irqs_disabled());
+	}
+
+	hard_cond_local_irq_enable();
 
 	/* Catch callers which need to be fixed */
 	BUG_ON(preempt_count() || !irqs_disabled());
 
 	prev_state = exception_enter();
 
-	do {
+	for (;;) {
 		preempt_disable();
 		local_irq_enable();
 		__schedule(true);
+		sync_inband_irqs();
 		local_irq_disable();
 		sched_preempt_enable_no_resched();
-	} while (need_resched());
+		if (!need_resched())
+			break;
+		hard_cond_local_irq_enable();
+	}
 
 	exception_exit(prev_state);
 }
@@ -8892,6 +8964,233 @@
 
 #endif	/* CONFIG_CGROUP_SCHED */
 
+#ifdef CONFIG_DOVETAIL
+
+int dovetail_leave_inband(void)
+{
+	struct task_struct *p = current;
+	struct irq_pipeline_data *pd;
+	unsigned long flags;
+
+	preempt_disable();
+
+	pd = raw_cpu_ptr(&irq_pipeline);
+
+	if (WARN_ON_ONCE(dovetail_debug() && pd->task_inflight))
+		goto out;	/* Paranoid. */
+
+	raw_spin_lock_irqsave(&p->pi_lock, flags);
+	pd->task_inflight = p;
+	/*
+	 * The scope of the off-stage state is broader than _TLF_OOB,
+	 * in that it includes the transition path from the in-band
+	 * context to the oob stage.
+	 */
+	set_thread_local_flags(_TLF_OFFSTAGE);
+	set_current_state(TASK_INTERRUPTIBLE);
+	raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+	sched_submit_work(p);
+	/*
+	 * The current task is scheduled out from the inband stage,
+	 * before resuming on the oob stage. Since this code stands
+	 * for the scheduling tail of the oob scheduler,
+	 * arch_dovetail_switch_finish() is called to perform
+	 * architecture-specific fixups (e.g. fpu context reload).
+	 */
+	if (likely(__schedule(false))) {
+		arch_dovetail_switch_finish(false);
+		return 0;
+	}
+
+	clear_thread_local_flags(_TLF_OFFSTAGE);
+	pd->task_inflight = NULL;
+out:
+	preempt_enable();
+
+	return -ERESTARTSYS;
+}
+EXPORT_SYMBOL_GPL(dovetail_leave_inband);
+
+void dovetail_resume_inband(void)
+{
+	struct task_struct *p;
+	struct rq *rq;
+
+	p = __this_cpu_read(irq_pipeline.rqlock_owner);
+	if (WARN_ON_ONCE(dovetail_debug() && p == NULL))
+		return;
+
+	if (WARN_ON_ONCE(dovetail_debug() && (preempt_count() & STAGE_MASK)))
+		return;
+
+	rq = finish_task_switch(p);
+	balance_callback(rq);
+	preempt_enable();
+	oob_trampoline();
+}
+EXPORT_SYMBOL_GPL(dovetail_resume_inband);
+
+#ifdef CONFIG_KVM
+
+#include <linux/kvm_host.h>
+
+static inline void notify_guest_preempt(void)
+{
+	struct kvm_oob_notifier *nfy;
+	struct irq_pipeline_data *p;
+
+	p = raw_cpu_ptr(&irq_pipeline);
+	nfy = p->vcpu_notify;
+	if (unlikely(nfy))
+		nfy->handler(nfy);
+}
+#else
+static inline void notify_guest_preempt(void)
+{ }
+#endif
+
+bool dovetail_context_switch(struct dovetail_altsched_context *out,
+			struct dovetail_altsched_context *in,
+			bool leave_inband)
+{
+	unsigned long pc __maybe_unused, lockdep_irqs;
+	struct task_struct *next, *prev, *last;
+	struct mm_struct *prev_mm, *next_mm;
+	bool inband_tail = false;
+
+	WARN_ON_ONCE(dovetail_debug() && on_pipeline_entry());
+
+	if (leave_inband) {
+		struct task_struct *tsk = current;
+		/*
+		 * We are about to leave the current inband context
+		 * for switching to an out-of-band task, save the
+		 * preempted context information.
+		 */
+		out->task = tsk;
+		out->active_mm = tsk->active_mm;
+		/*
+		 * Switching out-of-band may require some housekeeping
+		 * from a kernel VM which might currently run guest
+		 * code, notify it about the upcoming preemption.
+		 */
+		notify_guest_preempt();
+	}
+
+	arch_dovetail_switch_prepare(leave_inband);
+
+	next = in->task;
+	prev = out->task;
+	prev_mm = out->active_mm;
+	next_mm = in->active_mm;
+
+	if (next_mm == NULL) {
+		in->active_mm = prev_mm;
+		in->borrowed_mm = true;
+		enter_lazy_tlb(prev_mm, next);
+	} else {
+		switch_oob_mm(prev_mm, next_mm, next);
+		/*
+		 * We might be switching back to the inband context
+		 * which we preempted earlier, shortly after "current"
+		 * dropped its mm context in the do_exit() path
+		 * (next->mm == NULL). In such a case, a lazy TLB
+		 * state is expected when leaving the mm.
+		 */
+		if (next->mm == NULL)
+			enter_lazy_tlb(prev_mm, next);
+	}
+
+	if (out->borrowed_mm) {
+		out->borrowed_mm = false;
+		out->active_mm = NULL;
+	}
+
+	/*
+	 * Tasks running out-of-band may alter the (in-band)
+	 * preemption count as long as they don't trigger an in-band
+	 * rescheduling, which Dovetail properly blocks.
+	 *
+	 * If the preemption count is not stack-based but a global
+	 * per-cpu variable instead, changing it has a globally
+	 * visible side-effect though, which is a problem if the
+	 * out-of-band task is preempted and schedules away before the
+	 * change is rolled back: this may cause the in-band context
+	 * to later resume with a broken preemption count.
+	 *
+	 * For this reason, the preemption count of any context which
+	 * blocks from the out-of-band stage is carried over and
+	 * restored across switches, emulating a stack-based
+	 * storage.
+	 *
+	 * Eventually, the count is reset to FORK_PREEMPT_COUNT upon
+	 * transition from out-of-band to in-band stage, reinstating
+	 * the value in effect when the converse transition happened
+	 * at some point before.
+	 */
+	if (IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT))
+		pc = preempt_count();
+
+	/*
+	 * Like the preemption count and for the same reason, the irq
+	 * state maintained by lockdep must be preserved across
+	 * switches.
+	 */
+	lockdep_irqs = lockdep_read_irqs_state();
+
+	switch_to(prev, next, last);
+	barrier();
+
+	if (check_hard_irqs_disabled())
+		hard_local_irq_disable();
+
+	/*
+	 * If we entered this routine for switching to an out-of-band
+	 * task but don't have _TLF_OOB set for the current context
+	 * when resuming, this portion of code is the switch tail of
+	 * the inband schedule() routine, finalizing a transition to
+	 * the inband stage for the current task. Update the stage
+	 * level as/if required.
+	 */
+	if (unlikely(!leave_inband && !test_thread_local_flags(_TLF_OOB))) {
+		if (IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT))
+			preempt_count_set(FORK_PREEMPT_COUNT);
+		else if (unlikely(dovetail_debug() &&
+					!(preempt_count() & STAGE_MASK)))
+			WARN_ON_ONCE(1);
+		else
+			preempt_count_sub(STAGE_OFFSET);
+
+		lockdep_write_irqs_state(lockdep_irqs);
+
+		/*
+		 * Fixup the interrupt state conversely to what
+		 * inband_switch_tail() does for the opposite stage
+		 * switching direction.
+		 */
+		stall_inband();
+		trace_hardirqs_off();
+		inband_tail = true;
+	} else {
+		if (IS_ENABLED(CONFIG_HAVE_PERCPU_PREEMPT_COUNT))
+			preempt_count_set(pc);
+
+		lockdep_write_irqs_state(lockdep_irqs);
+	}
+
+	arch_dovetail_switch_finish(leave_inband);
+
+	/*
+	 * inband_tail is true whenever we are finalizing a transition
+	 * to the inband stage from the oob context for current. See
+	 * above.
+	 */
+	return inband_tail;
+}
+EXPORT_SYMBOL_GPL(dovetail_context_switch);
+
+#endif /* CONFIG_DOVETAIL */
+
 void dump_cpu_task(int cpu)
 {
 	pr_info("Task dump for CPU %d:\n", cpu);
diff --git a/kernel/kernel/sched/idle.c b/kernel/kernel/sched/idle.c
index 6dc7d9a..cc2710e 100644
--- a/kernel/kernel/sched/idle.c
+++ b/kernel/kernel/sched/idle.c
@@ -80,6 +80,7 @@
 void __weak arch_cpu_idle(void)
 {
 	cpu_idle_force_poll = 1;
+	hard_local_irq_enable();
 	raw_local_irq_enable();
 }
 
@@ -87,13 +88,18 @@
  * default_idle_call - Default CPU idle routine.
  *
  * To use when the cpuidle framework cannot be used.
+ *
+ * When interrupts are pipelined, this call is entered with hard irqs
+ * on and the in-band stage is stalled. Returns with hard irqs on,
+ * in-band stage stalled. irq_cpuidle_enter() then turns off hard irqs
+ * before synchronizing irqs, making sure we have no event lingering
+ * in the interrupt log as we go for a nap.
  */
 void __cpuidle default_idle_call(void)
 {
 	if (current_clr_polling_and_test()) {
-		local_irq_enable();
-	} else {
-
+		local_irq_enable_full();
+	} else if (irq_cpuidle_enter(NULL, NULL)) { /* hard irqs off now */
 		trace_cpu_idle(1, smp_processor_id());
 		stop_critical_timings();
 
@@ -127,6 +133,8 @@
 
 		start_critical_timings();
 		trace_cpu_idle(PWR_EVENT_EXIT, smp_processor_id());
+	} else {
+		local_irq_enable_full();
 	}
 }
 
@@ -249,6 +257,13 @@
 	__current_set_polling();
 
 	/*
+	 *  Catch mishandling of the CPU's interrupt disable flag when
+	 *  pipelining IRQs.
+	 */
+	if (WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled()))
+		hard_local_irq_enable();
+
+	/*
 	 * It is up to the idle functions to reenable local interrupts
 	 */
 	if (WARN_ON_ONCE(irqs_disabled()))
@@ -300,6 +315,7 @@
 			cpu_idle_poll();
 		} else {
 			cpuidle_idle_call();
+			WARN_ON_ONCE(irq_pipeline_debug() && hard_irqs_disabled());
 		}
 		arch_cpu_idle_exit();
 	}
diff --git a/kernel/kernel/sched/sched.h b/kernel/kernel/sched/sched.h
index 9e798c5..2f1c1ab 100644
--- a/kernel/kernel/sched/sched.h
+++ b/kernel/kernel/sched/sched.h
@@ -52,6 +52,8 @@
 #include <linux/membarrier.h>
 #include <linux/migrate.h>
 #include <linux/mmu_context.h>
+#include <linux/irq_pipeline.h>
+#include <linux/dovetail.h>
 #include <linux/nmi.h>
 #include <linux/proc_fs.h>
 #include <linux/prefetch.h>
diff --git a/kernel/kernel/sched/wait.c b/kernel/kernel/sched/wait.c
index c4f324a..c3a42d9 100644
--- a/kernel/kernel/sched/wait.c
+++ b/kernel/kernel/sched/wait.c
@@ -71,6 +71,8 @@
 	wait_queue_entry_t *curr, *next;
 	int cnt = 0;
 
+	check_inband_stage();
+
 	lockdep_assert_held(&wq_head->lock);
 
 	if (bookmark && (bookmark->flags & WQ_FLAG_BOOKMARK)) {
diff --git a/kernel/kernel/signal.c b/kernel/kernel/signal.c
index f6ecd01..afadec2 100644
--- a/kernel/kernel/signal.c
+++ b/kernel/kernel/signal.c
@@ -763,6 +763,10 @@
 void signal_wake_up_state(struct task_struct *t, unsigned int state)
 {
 	set_tsk_thread_flag(t, TIF_SIGPENDING);
+
+	/* TIF_SIGPENDING must be set prior to notifying. */
+	inband_signal_notify(t);
+
 	/*
 	 * TASK_WAKEKILL also means wake it up in the stopped/traced/killable
 	 * case. We don't check t->state here because there is a race with it
@@ -984,8 +988,11 @@
 	if (sig == SIGKILL)
 		return true;
 
-	if (task_is_stopped_or_traced(p))
+	if (task_is_stopped_or_traced(p)) {
+		if (!signal_pending(p))
+			inband_signal_notify(p);
 		return false;
+	}
 
 	return task_curr(p) || !task_sigpending(p);
 }
@@ -2145,6 +2152,7 @@
 	 * schedule() will not sleep if there is a pending signal that
 	 * can awaken the task.
 	 */
+	inband_ptstop_notify();
 	set_special_state(TASK_TRACED);
 
 	/*
@@ -2238,6 +2246,8 @@
 		read_unlock(&tasklist_lock);
 	}
 
+	inband_ptcont_notify();
+
 	/*
 	 * We are back.  Now reacquire the siglock before touching
 	 * last_siginfo, so that we are sure to have synchronized with
diff --git a/kernel/kernel/stop_machine.c b/kernel/kernel/stop_machine.c
index c65cfb7..49c00e4 100644
--- a/kernel/kernel/stop_machine.c
+++ b/kernel/kernel/stop_machine.c
@@ -207,8 +207,8 @@
 			curstate = newstate;
 			switch (curstate) {
 			case MULTI_STOP_DISABLE_IRQ:
-				local_irq_disable();
 				hard_irq_disable();
+				local_irq_disable();
 				break;
 			case MULTI_STOP_RUN:
 				if (is_active)
@@ -229,6 +229,7 @@
 		rcu_momentary_dyntick_idle();
 	} while (curstate != MULTI_STOP_EXIT);
 
+	hard_irq_enable();
 	local_irq_restore(flags);
 	return err;
 }
@@ -629,6 +630,7 @@
 		local_irq_save(flags);
 		hard_irq_disable();
 		ret = (*fn)(data);
+		hard_irq_enable();
 		local_irq_restore(flags);
 
 		return ret;
diff --git a/kernel/kernel/time/Makefile b/kernel/kernel/time/Makefile
index c8f0016..14cb45c 100644
--- a/kernel/kernel/time/Makefile
+++ b/kernel/kernel/time/Makefile
@@ -16,6 +16,7 @@
 endif
 obj-$(CONFIG_GENERIC_SCHED_CLOCK)		+= sched_clock.o
 obj-$(CONFIG_TICK_ONESHOT)			+= tick-oneshot.o tick-sched.o
+obj-$(CONFIG_IRQ_PIPELINE)			+= tick-proxy.o
 obj-$(CONFIG_HAVE_GENERIC_VDSO)			+= vsyscall.o
 obj-$(CONFIG_DEBUG_FS)				+= timekeeping_debug.o
 obj-$(CONFIG_TEST_UDELAY)			+= test_udelay.o
diff --git a/kernel/kernel/time/clockevents.c b/kernel/kernel/time/clockevents.c
index f549022..da6735d 100644
--- a/kernel/kernel/time/clockevents.c
+++ b/kernel/kernel/time/clockevents.c
@@ -97,6 +97,7 @@
 	/* Transition with new state-specific callbacks */
 	switch (state) {
 	case CLOCK_EVT_STATE_DETACHED:
+	case CLOCK_EVT_STATE_RESERVED:
 		/* The clockevent device is getting replaced. Shut it down. */
 
 	case CLOCK_EVT_STATE_SHUTDOWN:
@@ -437,6 +438,69 @@
 }
 EXPORT_SYMBOL_GPL(clockevents_unbind_device);
 
+#ifdef CONFIG_IRQ_PIPELINE
+
+/**
+ * clockevents_register_proxy - register a proxy device on the current CPU
+ * @dev:	proxy to register
+ */
+int clockevents_register_proxy(struct clock_proxy_device *dev)
+{
+	struct clock_event_device *proxy_dev, *real_dev;
+	unsigned long flags;
+	u32 freq;
+	int ret;
+
+	raw_spin_lock_irqsave(&clockevents_lock, flags);
+
+	ret = tick_setup_proxy(dev);
+	if (ret)  {
+		raw_spin_unlock_irqrestore(&clockevents_lock, flags);
+		return ret;
+	}
+
+	proxy_dev = &dev->proxy_device;
+	clockevent_set_state(proxy_dev, CLOCK_EVT_STATE_DETACHED);
+
+	list_add(&proxy_dev->list, &clockevent_devices);
+	tick_check_new_device(proxy_dev);
+	clockevents_notify_released();
+
+	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
+
+	real_dev = dev->real_device;
+	freq = (1000000000ULL * real_dev->mult) >> real_dev->shift;
+	printk(KERN_INFO "CPU%d: proxy tick device registered (%u.%02uMHz)\n",
+		 smp_processor_id(), freq / 1000000, (freq / 10000) % 100);
+
+	return ret;
+}
+
+void clockevents_unregister_proxy(struct clock_proxy_device *dev)
+{
+	unsigned long flags;
+	int ret;
+
+	clockevents_register_device(dev->real_device);
+	clockevents_switch_state(dev->real_device, CLOCK_EVT_STATE_DETACHED);
+
+	/*
+	 *  Pop the proxy device, do not give it back to the
+	 *  framework.
+	 */
+	raw_spin_lock_irqsave(&clockevents_lock, flags);
+	ret = clockevents_replace(&dev->proxy_device);
+	raw_spin_unlock_irqrestore(&clockevents_lock, flags);
+
+	if (WARN_ON(ret))
+		return;
+
+	printk(KERN_INFO "CPU%d: proxy tick device unregistered\n",
+		smp_processor_id());
+}
+
+#endif
+
 /**
  * clockevents_register_device - register a clock event device
  * @dev:	device to register
@@ -575,9 +639,18 @@
 	 */
 	if (old) {
 		module_put(old->owner);
-		clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
+		/*
+		 * Do not move the device backing a proxy tick device
+		 * to the release list, keep it around but mark it as
+		 * reserved.
+		 */
 		list_del(&old->list);
-		list_add(&old->list, &clockevents_released);
+		if (tick_check_is_proxy(new)) {
+			clockevents_switch_state(old, CLOCK_EVT_STATE_RESERVED);
+		} else {
+			clockevents_switch_state(old, CLOCK_EVT_STATE_DETACHED);
+			list_add(&old->list, &clockevents_released);
+		}
 	}
 
 	if (new) {
diff --git a/kernel/kernel/time/clocksource.c b/kernel/kernel/time/clocksource.c
index 74492f0..2921e18 100644
--- a/kernel/kernel/time/clocksource.c
+++ b/kernel/kernel/time/clocksource.c
@@ -1007,8 +1007,8 @@
 
 	clocksource_update_max_deferment(cs);
 
-	pr_info("%s: mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
-		cs->name, cs->mask, cs->max_cycles, cs->max_idle_ns);
+	pr_info("%s: freq: %Lu Hz, mask: 0x%llx max_cycles: 0x%llx, max_idle_ns: %lld ns\n",
+		cs->name, (u64)freq * scale, cs->mask, cs->max_cycles, cs->max_idle_ns);
 }
 EXPORT_SYMBOL_GPL(__clocksource_update_freq_scale);
 
@@ -1275,10 +1275,36 @@
 }
 static DEVICE_ATTR_RO(available_clocksource);
 
+/**
+ * vdso_clocksource_show - sysfs interface for vDSO type of
+ *      current clocksource
+ * @dev:	unused
+ * @attr:	unused
+ * @buf:	char buffer to be filled with vDSO type
+ */
+static ssize_t vdso_clocksource_show(struct device *dev,
+				struct device_attribute *attr,
+				char *buf)
+{
+	ssize_t count = 0, type;
+
+	mutex_lock(&clocksource_mutex);
+	type = curr_clocksource->vdso_type;
+	count = snprintf(buf, PAGE_SIZE, "%s\n",
+			type == CLOCKSOURCE_VDSO_NONE ?	"none" :
+			type == CLOCKSOURCE_VDSO_ARCHITECTED ?	"architected" :
+			"mmio");
+	mutex_unlock(&clocksource_mutex);
+
+	return count;
+}
+static DEVICE_ATTR_RO(vdso_clocksource);
+
 static struct attribute *clocksource_attrs[] = {
 	&dev_attr_current_clocksource.attr,
 	&dev_attr_unbind_clocksource.attr,
 	&dev_attr_available_clocksource.attr,
+	&dev_attr_vdso_clocksource.attr,
 	NULL
 };
 ATTRIBUTE_GROUPS(clocksource);
diff --git a/kernel/kernel/time/hrtimer.c b/kernel/kernel/time/hrtimer.c
index 544ce87..e52623c 100644
--- a/kernel/kernel/time/hrtimer.c
+++ b/kernel/kernel/time/hrtimer.c
@@ -873,6 +873,7 @@
 	on_each_cpu(retrigger_next_event, NULL, 1);
 #endif
 	timerfd_clock_was_set();
+	inband_clock_was_set();
 }
 
 static void clock_was_set_work(struct work_struct *work)
diff --git a/kernel/kernel/time/tick-broadcast.c b/kernel/kernel/time/tick-broadcast.c
index 086d36b..e3d15b3 100644
--- a/kernel/kernel/time/tick-broadcast.c
+++ b/kernel/kernel/time/tick-broadcast.c
@@ -796,6 +796,23 @@
 	int ret = 0;
 	ktime_t now;
 
+	/*
+	 * If there is no broadcast device, tell the caller not to go
+	 * into deep idle.
+	 */
+	if (!tick_broadcast_device.evtdev)
+		return -EBUSY;
+
+	dev = this_cpu_ptr(&tick_cpu_device)->evtdev;
+
+	/*
+	 * If proxying the hardware timer for high-precision tick
+	 * delivery to the out-of-band stage, the whole broadcast
+	 * dance is a no-go. Deny entering deep idle.
+	 */
+	if (dev->features & CLOCK_EVT_FEAT_PROXY)
+		return -EBUSY;
+
 	raw_spin_lock(&tick_broadcast_lock);
 	bc = tick_broadcast_device.evtdev;
 
diff --git a/kernel/kernel/time/tick-common.c b/kernel/kernel/time/tick-common.c
index 572b4c0..3f4b7ec 100644
--- a/kernel/kernel/time/tick-common.c
+++ b/kernel/kernel/time/tick-common.c
@@ -248,7 +248,8 @@
 	} else {
 		handler = td->evtdev->event_handler;
 		next_event = td->evtdev->next_event;
-		td->evtdev->event_handler = clockevents_handle_noop;
+		if (!clockevent_state_reserved(td->evtdev))
+			td->evtdev->event_handler = clockevents_handle_noop;
 	}
 
 	td->evtdev = newdev;
@@ -330,6 +331,12 @@
 bool tick_check_replacement(struct clock_event_device *curdev,
 			    struct clock_event_device *newdev)
 {
+	/*
+	 * Never replace an active proxy except when unregistering it.
+	 */
+	if (tick_check_is_proxy(curdev))
+		return false;
+
 	if (!tick_check_percpu(curdev, newdev, smp_processor_id()))
 		return false;
 
@@ -350,6 +357,9 @@
 	td = &per_cpu(tick_cpu_device, cpu);
 	curdev = td->evtdev;
 
+	if (tick_check_is_proxy(curdev))
+		goto out_bc;
+
 	/* cpu local device ? */
 	if (!tick_check_percpu(curdev, newdev, cpu))
 		goto out_bc;
@@ -367,7 +377,12 @@
 	 * not give it back to the clockevents layer !
 	 */
 	if (tick_is_broadcast_device(curdev)) {
-		clockevents_shutdown(curdev);
+		if (tick_check_is_proxy(newdev)) {
+			list_del(&curdev->list);
+			clockevents_switch_state(curdev, CLOCK_EVT_STATE_RESERVED);
+		} else {
+			clockevents_shutdown(curdev);
+		}
 		curdev = NULL;
 	}
 	clockevents_exchange_device(curdev, newdev);
diff --git a/kernel/kernel/time/tick-internal.h b/kernel/kernel/time/tick-internal.h
index ab9cb68..1fc4bfc 100644
--- a/kernel/kernel/time/tick-internal.h
+++ b/kernel/kernel/time/tick-internal.h
@@ -48,15 +48,26 @@
 	dev->state_use_accessors = state;
 }
 
+static inline bool tick_check_is_proxy(struct clock_event_device *dev)
+{
+	if (!irqs_pipelined())
+		return false;
+
+	return dev && dev->features & CLOCK_EVT_FEAT_PROXY;
+}
+
 extern void clockevents_shutdown(struct clock_event_device *dev);
 extern void clockevents_exchange_device(struct clock_event_device *old,
 					struct clock_event_device *new);
-extern void clockevents_switch_state(struct clock_event_device *dev,
-				     enum clock_event_state state);
 extern int clockevents_program_event(struct clock_event_device *dev,
 				     ktime_t expires, bool force);
 extern void clockevents_handle_noop(struct clock_event_device *dev);
 extern int __clockevents_update_freq(struct clock_event_device *dev, u32 freq);
+#ifdef CONFIG_IRQ_PIPELINE
+int clockevents_register_proxy(struct clock_proxy_device *dev);
+extern void clockevents_unregister_proxy(struct clock_proxy_device *dev);
+int tick_setup_proxy(struct clock_proxy_device *dev);
+#endif
 extern ssize_t sysfs_get_uname(const char *buf, char *dst, size_t cnt);
 
 /* Broadcasting support */
diff --git a/kernel/kernel/time/tick-proxy.c b/kernel/kernel/time/tick-proxy.c
new file mode 100644
index 0000000..5a87798
--- /dev/null
+++ b/kernel/kernel/time/tick-proxy.c
@@ -0,0 +1,466 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2017 Philippe Gerum  <rpm@xenomai.org>.
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/printk.h>
+#include <linux/delay.h>
+#include <linux/smp.h>
+#include <linux/err.h>
+#include <linux/cpumask.h>
+#include <linux/clockchips.h>
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/irq_pipeline.h>
+#include <linux/stop_machine.h>
+#include <linux/slab.h>
+#include "tick-internal.h"
+
+static unsigned int proxy_tick_irq;
+
+static DEFINE_MUTEX(proxy_mutex);
+
+static DEFINE_PER_CPU(struct clock_proxy_device, proxy_tick_device);
+
+static inline struct clock_event_device *
+get_real_tick_device(struct clock_event_device *proxy_dev)
+{
+	return container_of(proxy_dev, struct clock_proxy_device, proxy_device)->real_device;
+}
+
+static void proxy_event_handler(struct clock_event_device *real_dev)
+{
+	struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device);
+	struct clock_event_device *proxy_dev = &dev->proxy_device;
+
+	proxy_dev->event_handler(proxy_dev);
+}
+
+static int proxy_set_state_oneshot(struct clock_event_device *dev)
+{
+	struct clock_event_device *real_dev = get_real_tick_device(dev);
+	unsigned long flags;
+	int ret;
+
+	flags = hard_local_irq_save();
+	ret = real_dev->set_state_oneshot(real_dev);
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+
+static int proxy_set_state_periodic(struct clock_event_device *dev)
+{
+	struct clock_event_device *real_dev = get_real_tick_device(dev);
+	unsigned long flags;
+	int ret;
+
+	flags = hard_local_irq_save();
+	ret = real_dev->set_state_periodic(real_dev);
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+
+static int proxy_set_state_oneshot_stopped(struct clock_event_device *dev)
+{
+        struct clock_event_device *real_dev = get_real_tick_device(dev);
+	unsigned long flags;
+	int ret;
+
+	flags = hard_local_irq_save();
+	ret = real_dev->set_state_oneshot_stopped(real_dev);
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+
+static int proxy_set_state_shutdown(struct clock_event_device *dev)
+{
+        struct clock_event_device *real_dev = get_real_tick_device(dev);
+	unsigned long flags;
+	int ret;
+
+	flags = hard_local_irq_save();
+	ret = real_dev->set_state_shutdown(real_dev);
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+
+static void proxy_suspend(struct clock_event_device *dev)
+{
+        struct clock_event_device *real_dev = get_real_tick_device(dev);
+	unsigned long flags;
+
+	flags = hard_local_irq_save();
+	real_dev->suspend(real_dev);
+	hard_local_irq_restore(flags);
+}
+
+static void proxy_resume(struct clock_event_device *dev)
+{
+        struct clock_event_device *real_dev = get_real_tick_device(dev);
+	unsigned long flags;
+
+	flags = hard_local_irq_save();
+	real_dev->resume(real_dev);
+	hard_local_irq_restore(flags);
+}
+
+static int proxy_tick_resume(struct clock_event_device *dev)
+{
+        struct clock_event_device *real_dev = get_real_tick_device(dev);
+	unsigned long flags;
+	int ret;
+
+	flags = hard_local_irq_save();
+	ret = real_dev->tick_resume(real_dev);
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+
+static void proxy_broadcast(const struct cpumask *mask)
+{
+	struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device);
+        struct clock_event_device *real_dev = dev->real_device;
+	unsigned long flags;
+
+	flags = hard_local_irq_save();
+	real_dev->broadcast(mask);
+	hard_local_irq_restore(flags);
+}
+
+static int proxy_set_next_event(unsigned long delay,
+				struct clock_event_device *dev)
+{
+	struct clock_event_device *real_dev = get_real_tick_device(dev);
+	unsigned long flags;
+	int ret;
+
+	flags = hard_local_irq_save();
+	ret = real_dev->set_next_event(delay, real_dev);
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+
+static int proxy_set_next_ktime(ktime_t expires,
+				struct clock_event_device *dev)
+{
+	struct clock_event_device *real_dev = get_real_tick_device(dev);
+	unsigned long flags;
+	int ret;
+
+	flags = hard_local_irq_save();
+	ret = real_dev->set_next_ktime(expires, real_dev);
+	hard_local_irq_restore(flags);
+
+	return ret;
+}
+
+static irqreturn_t proxy_irq_handler(int sirq, void *dev_id)
+{
+	struct clock_event_device *evt;
+
+	/*
+	 * Tricky: we may end up running this in-band IRQ handler
+	 * because tick_notify_proxy() was posted either:
+	 *
+	 * - from the out-of-band stage via ->handle_oob_event() for
+	 * emulating an in-band tick.  In this case, the active tick
+	 * device for the in-band timing core is the proxy device,
+	 * whose event handler is still the same than the real tick
+	 * device's.
+	 *
+	 * - directly by the clock chip driver on the local CPU via
+	 * clockevents_handle_event(), for propagating a tick to the
+	 * in-band stage nobody from the out-of-band stage is
+	 * interested on i.e. no proxy device was registered on the
+	 * receiving CPU, which was excluded from @cpumask in the call
+	 * to tick_install_proxy(). In this case, the active tick
+	 * device for the in-band timing core is a real clock event
+	 * device.
+	 *
+	 * In both cases, we are running on the in-band stage, and we
+	 * should fire the event handler of the currently active tick
+	 * device for the in-band timing core.
+	 */
+	evt = raw_cpu_ptr(&tick_cpu_device)->evtdev;
+	evt->event_handler(evt);
+
+	return IRQ_HANDLED;
+}
+
+#define interpose_proxy_handler(__proxy, __real, __h)		\
+	do {							\
+		if ((__real)->__h)				\
+			(__proxy)->__h = proxy_ ## __h;		\
+	} while (0)
+
+/*
+ * Setup a proxy which is about to override the tick device on the
+ * current CPU. Called with clockevents_lock held and irqs off so that
+ * the tick device does not change under our feet.
+ */
+int tick_setup_proxy(struct clock_proxy_device *dev)
+{
+	struct clock_event_device *proxy_dev, *real_dev;
+
+	real_dev = raw_cpu_ptr(&tick_cpu_device)->evtdev;
+	if ((real_dev->features &
+			(CLOCK_EVT_FEAT_PIPELINE|CLOCK_EVT_FEAT_ONESHOT))
+		!= (CLOCK_EVT_FEAT_PIPELINE|CLOCK_EVT_FEAT_ONESHOT)) {
+		WARN(1, "cannot use clockevent device %s in proxy mode!",
+			real_dev->name);
+		return -ENODEV;
+	}
+
+ 	/*
+ 	 * The assumption is that neither us nor clockevents_register_proxy()
+	 * can fail afterwards, so this is ok to advertise the new proxy as
+	 * built by setting dev->real_device early.
+ 	 */
+	dev->real_device = real_dev;
+	dev->__original_handler = real_dev->event_handler;
+
+	/*
+	 * Inherit the feature bits since the proxy device has the
+	 * same capabilities than the real one we are overriding
+	 * (including CLOCK_EVT_FEAT_C3STOP if present).
+	 */
+	proxy_dev = &dev->proxy_device;
+	memset(proxy_dev, 0, sizeof(*proxy_dev));
+	proxy_dev->features = real_dev->features |
+		CLOCK_EVT_FEAT_PERCPU | CLOCK_EVT_FEAT_PROXY;
+	proxy_dev->name = "proxy";
+	proxy_dev->irq = real_dev->irq;
+	proxy_dev->bound_on = -1;
+	proxy_dev->cpumask = cpumask_of(smp_processor_id());
+	proxy_dev->rating = real_dev->rating + 1;
+	proxy_dev->mult = real_dev->mult;
+	proxy_dev->shift = real_dev->shift;
+	proxy_dev->max_delta_ticks = real_dev->max_delta_ticks;
+	proxy_dev->min_delta_ticks = real_dev->min_delta_ticks;
+	proxy_dev->max_delta_ns = real_dev->max_delta_ns;
+	proxy_dev->min_delta_ns = real_dev->min_delta_ns;
+	/*
+	 * Interpose default handlers which are safe wrt preemption by
+	 * the out-of-band stage.
+	 */
+	interpose_proxy_handler(proxy_dev, real_dev, set_state_oneshot);
+	interpose_proxy_handler(proxy_dev, real_dev, set_state_oneshot_stopped);
+	interpose_proxy_handler(proxy_dev, real_dev, set_state_periodic);
+	interpose_proxy_handler(proxy_dev, real_dev, set_state_shutdown);
+	interpose_proxy_handler(proxy_dev, real_dev, suspend);
+	interpose_proxy_handler(proxy_dev, real_dev, resume);
+	interpose_proxy_handler(proxy_dev, real_dev, tick_resume);
+	interpose_proxy_handler(proxy_dev, real_dev, broadcast);
+	interpose_proxy_handler(proxy_dev, real_dev, set_next_event);
+	interpose_proxy_handler(proxy_dev, real_dev, set_next_ktime);
+
+	dev->__setup_handler(dev);
+
+	return 0;
+}
+
+static int enable_oob_timer(void *arg) /* hard_irqs_disabled() */
+{
+	struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device);
+	struct clock_event_device *real_dev;
+
+	/*
+	 * Install the out-of-band handler on this CPU's real clock
+	 * device, then turn on out-of-band mode for the associated
+	 * IRQ (duplicates are silently ignored if the IRQ is common
+	 * to multiple CPUs).
+	 */
+	real_dev = dev->real_device;
+	real_dev->event_handler = dev->handle_oob_event;
+	real_dev->features |= CLOCK_EVT_FEAT_OOB;
+	barrier();
+
+	/*
+	 * irq_switch_oob() grabs the IRQ descriptor lock which is
+	 * hybrid, so that is fine to invoke this routine with hard
+	 * IRQs off.
+	 */
+	irq_switch_oob(real_dev->irq, true);
+
+	return 0;
+}
+
+struct proxy_install_arg {
+	void (*setup_proxy)(struct clock_proxy_device *dev);
+	int result;
+};
+
+static void register_proxy_device(void *arg) /* irqs_disabled() */
+{
+	struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device);
+	struct proxy_install_arg *req = arg;
+	int ret;
+
+	dev->__setup_handler = req->setup_proxy;
+	ret = clockevents_register_proxy(dev);
+	if (ret) {
+		if (!req->result)
+			req->result = ret;
+	} else {
+		dev->real_device->event_handler = proxy_event_handler;
+	}
+}
+
+int tick_install_proxy(void (*setup_proxy)(struct clock_proxy_device *dev),
+		const struct cpumask *cpumask)
+{
+	struct proxy_install_arg arg;
+	int ret, sirq;
+
+	mutex_lock(&proxy_mutex);
+
+	ret = -EAGAIN;
+	if (proxy_tick_irq)
+		goto out;
+
+	sirq = irq_create_direct_mapping(synthetic_irq_domain);
+	if (WARN_ON(sirq == 0))
+		goto out;
+
+	ret = __request_percpu_irq(sirq, proxy_irq_handler,
+				   IRQF_NO_THREAD, /* no IRQF_TIMER here. */
+				   "proxy tick",
+				   &proxy_tick_device);
+	if (WARN_ON(ret)) {
+		irq_dispose_mapping(sirq);
+		goto out;
+	}
+
+	proxy_tick_irq = sirq;
+	barrier();
+
+	/*
+	 * Install a proxy tick device on each CPU. As the proxy
+	 * device is picked, the previous (real) tick device is
+	 * switched to reserved state by the clockevent core.
+	 * Immediately after, the proxy device starts controlling the
+	 * real device under the hood to carry out the timing requests
+	 * it receives.
+	 *
+	 * For a short period of time, after the proxy device is
+	 * installed, and until the real device IRQ is switched to
+	 * out-of-band mode, the flow is as follows:
+	 *
+	 *    [inband timing request]
+	 *        proxy_dev->set_next_event(proxy_dev)
+	 *            oob_program_event(proxy_dev)
+	 *                real_dev->set_next_event(real_dev)
+	 *        ...
+	 *        <tick event>
+	 *        original_timer_handler() [in-band stage]
+	 *            clockevents_handle_event(real_dev)
+	 *               proxy_event_handler(real_dev)
+	 *                  inband_event_handler(proxy_dev)
+	 *
+	 * Eventually, we substitute the original (in-band) clock
+	 * event handler with the out-of-band handler for the real
+	 * clock event device, then turn on out-of-band mode for the
+	 * timer IRQ associated to the latter. These two steps are
+	 * performed over a stop_machine() context, so that no tick
+	 * can race with this code while we swap handlers.
+	 *
+	 * Once the hand over is complete, the flow is as follows:
+	 *
+	 *    [inband timing request]
+	 *        proxy_dev->set_next_event(proxy_dev)
+	 *            oob_program_event(proxy_dev)
+	 *                real_dev->set_next_event(real_dev)
+	 *        ...
+	 *        <tick event>
+	 *        inband_event_handler() [out-of-band stage]
+	 *            clockevents_handle_event(real_dev)
+	 *                handle_oob_event(proxy_dev)
+	 *                    ...(inband tick emulation)...
+	 *                         tick_notify_proxy()
+	 *        ...
+	 *        proxy_irq_handler(proxy_dev) [in-band stage]
+	 *            clockevents_handle_event(proxy_dev)
+	 *                inband_event_handler(proxy_dev)
+	 */
+	arg.setup_proxy = setup_proxy;
+	arg.result = 0;
+	on_each_cpu_mask(cpumask, register_proxy_device, &arg, true);
+	if (arg.result) {
+		tick_uninstall_proxy(cpumask);
+		return arg.result;
+	}
+
+	/*
+	 * Start ticking from the out-of-band interrupt stage upon
+	 * receipt of out-of-band timer events.
+	 */
+	stop_machine(enable_oob_timer, NULL, cpumask);
+out:
+	mutex_unlock(&proxy_mutex);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(tick_install_proxy);
+
+static int disable_oob_timer(void *arg) /* hard_irqs_disabled() */
+{
+	struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device);
+	struct clock_event_device *real_dev;
+
+	dev = raw_cpu_ptr(&proxy_tick_device);
+	real_dev = dev->real_device;
+	real_dev->event_handler = dev->__original_handler;
+	real_dev->features &= ~CLOCK_EVT_FEAT_OOB;
+	barrier();
+
+	irq_switch_oob(real_dev->irq, false);
+
+	return 0;
+}
+
+static void unregister_proxy_device(void *arg) /* irqs_disabled() */
+{
+	struct clock_proxy_device *dev = raw_cpu_ptr(&proxy_tick_device);
+
+	if (dev->real_device) {
+		clockevents_unregister_proxy(dev);
+		dev->real_device = NULL;
+	}
+}
+
+void tick_uninstall_proxy(const struct cpumask *cpumask)
+{
+	/*
+	 * Undo all we did in tick_install_proxy(), handing over
+	 * control of the tick device back to the inband code.
+	 */
+	mutex_lock(&proxy_mutex);
+	stop_machine(disable_oob_timer, NULL, cpu_online_mask);
+	on_each_cpu_mask(cpumask, unregister_proxy_device, NULL, true);
+	free_percpu_irq(proxy_tick_irq, &proxy_tick_device);
+	irq_dispose_mapping(proxy_tick_irq);
+	proxy_tick_irq = 0;
+	mutex_unlock(&proxy_mutex);
+}
+EXPORT_SYMBOL_GPL(tick_uninstall_proxy);
+
+void tick_notify_proxy(void)
+{
+	/*
+	 * Schedule a tick on the proxy device to occur from the
+	 * in-band stage, which will trigger proxy_irq_handler() at
+	 * some point (i.e. when the in-band stage is back in control
+	 * and not stalled). Note that we might be called from the
+	 * in-band stage in some cases (see proxy_irq_handler()).
+	 */
+	irq_post_inband(proxy_tick_irq);
+}
+EXPORT_SYMBOL_GPL(tick_notify_proxy);
diff --git a/kernel/kernel/time/vsyscall.c b/kernel/kernel/time/vsyscall.c
index 88e6b8e..2b9b786 100644
--- a/kernel/kernel/time/vsyscall.c
+++ b/kernel/kernel/time/vsyscall.c
@@ -69,15 +69,41 @@
 	vdso_ts->nsec	= tk->tkr_mono.xtime_nsec;
 }
 
+static void update_generic_mmio(struct vdso_data *vdata, struct timekeeper *tk)
+{
+#ifdef CONFIG_GENERIC_CLOCKSOURCE_VDSO
+	const struct clocksource *cs = tk->tkr_mono.clock;
+	u16 seq;
+
+	if (cs->vdso_type == (vdata->cs_type_seq >> 16))
+		return;
+
+	seq = vdata->cs_type_seq;
+	if (++seq == 0)
+		seq = 1;
+
+	vdata->cs_type_seq = cs->vdso_type << 16 | seq;
+
+	if (cs->vdso_type >= CLOCKSOURCE_VDSO_MMIO)
+		snprintf(vdata->cs_mmdev, sizeof(vdata->cs_mmdev),
+			"/dev/ucs/%u", cs->vdso_type - CLOCKSOURCE_VDSO_MMIO);
+#endif
+}
+
 void update_vsyscall(struct timekeeper *tk)
 {
 	struct vdso_data *vdata = __arch_get_k_vdso_data();
 	struct vdso_timestamp *vdso_ts;
+	unsigned long flags;
 	s32 clock_mode;
 	u64 nsec;
 
+	flags = hard_cond_local_irq_save();
+
 	/* copy vsyscall data */
 	vdso_write_begin(vdata);
+
+	update_generic_mmio(vdata, tk);
 
 	clock_mode = tk->tkr_mono.clock->vdso_clock_mode;
 	vdata[CS_HRES_COARSE].clock_mode	= clock_mode;
@@ -110,13 +136,16 @@
 	 * If the current clocksource is not VDSO capable, then spare the
 	 * update of the high reolution parts.
 	 */
-	if (clock_mode != VDSO_CLOCKMODE_NONE)
+	if (IS_ENABLED(CONFIG_GENERIC_CLOCKSOURCE_VDSO) ||
+	    clock_mode != VDSO_CLOCKMODE_NONE)
 		update_vdso_data(vdata, tk);
 
 	__arch_update_vsyscall(vdata, tk);
 
 	vdso_write_end(vdata);
 
+	hard_cond_local_irq_restore(flags);
+
 	__arch_sync_vdso_data(vdata);
 }
 
diff --git a/kernel/kernel/trace/ftrace.c b/kernel/kernel/trace/ftrace.c
index d97c189..1e39489 100644
--- a/kernel/kernel/trace/ftrace.c
+++ b/kernel/kernel/trace/ftrace.c
@@ -6271,10 +6271,10 @@
 	 * reason to cause large interrupt latencies while we do it.
 	 */
 	if (!mod)
-		local_irq_save(flags);
+		flags = hard_local_irq_save();
 	ftrace_update_code(mod, start_pg);
 	if (!mod)
-		local_irq_restore(flags);
+		hard_local_irq_restore(flags);
 	ret = 0;
  out:
 	mutex_unlock(&ftrace_lock);
@@ -6865,9 +6865,9 @@
 	unsigned long count, flags;
 	int ret;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	ret = ftrace_dyn_arch_init();
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 	if (ret)
 		goto failed;
 
@@ -7022,7 +7022,15 @@
 		}
 	} while_for_each_ftrace_op(op);
 out:
-	preempt_enable_notrace();
+	if (irqs_pipelined() && (hard_irqs_disabled() || !running_inband()))
+		/*
+		 * Nothing urgent to schedule here. At latest the
+		 * timer tick will pick up whatever the tracing
+		 * functions kicked off.
+		 */
+		preempt_enable_no_resched_notrace();
+	else
+		preempt_enable_notrace();
 	trace_clear_recursion(bit);
 }
 
diff --git a/kernel/kernel/trace/ring_buffer.c b/kernel/kernel/trace/ring_buffer.c
index 49ebb8c..391151f 100644
--- a/kernel/kernel/trace/ring_buffer.c
+++ b/kernel/kernel/trace/ring_buffer.c
@@ -3165,8 +3165,8 @@
 static __always_inline int
 trace_recursive_lock(struct ring_buffer_per_cpu *cpu_buffer)
 {
-	unsigned int val = cpu_buffer->current_context;
-	unsigned long pc = preempt_count();
+	unsigned int val;
+	unsigned long pc = preempt_count(), flags;
 	int bit;
 
 	if (!(pc & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
@@ -3175,6 +3175,10 @@
 		bit = pc & NMI_MASK ? RB_CTX_NMI :
 			pc & HARDIRQ_MASK ? RB_CTX_IRQ : RB_CTX_SOFTIRQ;
 
+	flags = hard_cond_local_irq_save();
+
+	val = cpu_buffer->current_context;
+
 	if (unlikely(val & (1 << (bit + cpu_buffer->nest)))) {
 		/*
 		 * It is possible that this was called by transitioning
@@ -3182,12 +3186,16 @@
 		 * been updated yet. In this case, use the TRANSITION bit.
 		 */
 		bit = RB_CTX_TRANSITION;
-		if (val & (1 << (bit + cpu_buffer->nest)))
+		if (val & (1 << (bit + cpu_buffer->nest))) {
+			hard_cond_local_irq_restore(flags);
 			return 1;
+		}
 	}
 
 	val |= (1 << (bit + cpu_buffer->nest));
 	cpu_buffer->current_context = val;
+
+	hard_cond_local_irq_restore(flags);
 
 	return 0;
 }
@@ -3195,8 +3203,12 @@
 static __always_inline void
 trace_recursive_unlock(struct ring_buffer_per_cpu *cpu_buffer)
 {
+	unsigned long flags;
+
+	flags = hard_cond_local_irq_save();
 	cpu_buffer->current_context &=
 		cpu_buffer->current_context - (1 << cpu_buffer->nest);
+	hard_cond_local_irq_restore(flags);
 }
 
 /* The recursive locking above uses 5 bits */
diff --git a/kernel/kernel/trace/trace.c b/kernel/kernel/trace/trace.c
index 8b1f74e..f339a56 100644
--- a/kernel/kernel/trace/trace.c
+++ b/kernel/kernel/trace/trace.c
@@ -1129,9 +1129,9 @@
 		return;
 	}
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	update_max_tr(tr, current, smp_processor_id(), cond_data);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 void tracing_snapshot_instance(struct trace_array *tr)
@@ -1822,7 +1822,7 @@
 	if (tr->stop_count)
 		return;
 
-	WARN_ON_ONCE(!irqs_disabled());
+	WARN_ON_ONCE(!hard_irqs_disabled());
 
 	if (!tr->allocated_snapshot) {
 		/* Only the nop tracer should hit this when disabling */
@@ -1866,7 +1866,7 @@
 	if (tr->stop_count)
 		return;
 
-	WARN_ON_ONCE(!irqs_disabled());
+	WARN_ON_ONCE(!hard_irqs_disabled());
 	if (!tr->allocated_snapshot) {
 		/* Only the nop tracer should hit this when disabling */
 		WARN_ON_ONCE(tr->current_trace != &nop_trace);
@@ -2626,12 +2626,14 @@
 	entry->flags =
 #ifdef CONFIG_TRACE_IRQFLAGS_SUPPORT
 		(irqs_disabled_flags(flags) ? TRACE_FLAG_IRQS_OFF : 0) |
-#else
+		(hard_irqs_disabled() ? TRACE_FLAG_IRQS_HARDOFF : 0) |
+#elif !defined(CONFIG_IRQ_PIPELINE)
 		TRACE_FLAG_IRQS_NOSUPPORT |
 #endif
 		((pc & NMI_MASK    ) ? TRACE_FLAG_NMI     : 0) |
 		((pc & HARDIRQ_MASK) ? TRACE_FLAG_HARDIRQ : 0) |
 		((pc & SOFTIRQ_OFFSET) ? TRACE_FLAG_SOFTIRQ : 0) |
+		(running_oob() ? TRACE_FLAG_OOB_STAGE : 0) |
 		(tif_need_resched() ? TRACE_FLAG_NEED_RESCHED : 0) |
 		(test_preempt_need_resched() ? TRACE_FLAG_PREEMPT_RESCHED : 0);
 }
@@ -7085,13 +7087,13 @@
 			ret = tracing_alloc_snapshot_instance(tr);
 		if (ret < 0)
 			break;
-		local_irq_disable();
+		hard_local_irq_disable();
 		/* Now, we're going to swap */
 		if (iter->cpu_file == RING_BUFFER_ALL_CPUS)
 			update_max_tr(tr, current, smp_processor_id(), NULL);
 		else
 			update_max_tr_single(tr, current, iter->cpu_file);
-		local_irq_enable();
+		hard_local_irq_enable();
 		break;
 	default:
 		if (tr->allocated_snapshot) {
diff --git a/kernel/kernel/trace/trace.h b/kernel/kernel/trace/trace.h
index 8d67f7f..c838a90 100644
--- a/kernel/kernel/trace/trace.h
+++ b/kernel/kernel/trace/trace.h
@@ -139,11 +139,14 @@
 /*
  * trace_flag_type is an enumeration that holds different
  * states when a trace occurs. These are:
- *  IRQS_OFF		- interrupts were disabled
+ *  IRQS_OFF		- interrupts were off (only virtually if pipelining)
  *  IRQS_NOSUPPORT	- arch does not support irqs_disabled_flags
  *  NEED_RESCHED	- reschedule is requested
  *  HARDIRQ		- inside an interrupt handler
  *  SOFTIRQ		- inside a softirq handler
+ *  IRQS_HARDOFF	- interrupts were hard disabled
+ *  OOB_STAGE		- running over the oob stage (assume IRQ tracing
+ *                        support is always available w/ pipelining).
  */
 enum trace_flag_type {
 	TRACE_FLAG_IRQS_OFF		= 0x01,
@@ -153,6 +156,8 @@
 	TRACE_FLAG_SOFTIRQ		= 0x10,
 	TRACE_FLAG_PREEMPT_RESCHED	= 0x20,
 	TRACE_FLAG_NMI			= 0x40,
+	TRACE_FLAG_IRQS_HARDOFF		= 0x80,
+	TRACE_FLAG_OOB_STAGE		= TRACE_FLAG_IRQS_NOSUPPORT,
 };
 
 #define TRACE_BUF_SIZE		1024
diff --git a/kernel/kernel/trace/trace_branch.c b/kernel/kernel/trace/trace_branch.c
index eff0991..e9e754f 100644
--- a/kernel/kernel/trace/trace_branch.c
+++ b/kernel/kernel/trace/trace_branch.c
@@ -53,7 +53,7 @@
 	if (unlikely(!tr))
 		return;
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 	current->trace_recursion |= TRACE_BRANCH_BIT;
 	data = this_cpu_ptr(tr->array_buffer.data);
 	if (atomic_read(&data->disabled))
@@ -87,7 +87,7 @@
 
  out:
 	current->trace_recursion &= ~TRACE_BRANCH_BIT;
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static inline
diff --git a/kernel/kernel/trace/trace_clock.c b/kernel/kernel/trace/trace_clock.c
index 4702efb..79a4cc1 100644
--- a/kernel/kernel/trace/trace_clock.c
+++ b/kernel/kernel/trace/trace_clock.c
@@ -97,7 +97,7 @@
 	int this_cpu;
 	u64 now, prev_time;
 
-	raw_local_irq_save(flags);
+	flags = hard_local_irq_save();
 
 	this_cpu = raw_smp_processor_id();
 
@@ -139,7 +139,7 @@
 		arch_spin_unlock(&trace_clock_struct.lock);
 	}
  out:
-	raw_local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return now;
 }
diff --git a/kernel/kernel/trace/trace_functions.c b/kernel/kernel/trace/trace_functions.c
index 93e20ed..c4d0338 100644
--- a/kernel/kernel/trace/trace_functions.c
+++ b/kernel/kernel/trace/trace_functions.c
@@ -196,7 +196,7 @@
 	 * Need to use raw, since this must be called before the
 	 * recursive protection is performed.
 	 */
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	cpu = raw_smp_processor_id();
 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	disabled = atomic_inc_return(&data->disabled);
@@ -208,7 +208,7 @@
 	}
 
 	atomic_dec(&data->disabled);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 static struct tracer_opt func_opts[] = {
diff --git a/kernel/kernel/trace/trace_functions_graph.c b/kernel/kernel/trace/trace_functions_graph.c
index 60d6627..8a3d156 100644
--- a/kernel/kernel/trace/trace_functions_graph.c
+++ b/kernel/kernel/trace/trace_functions_graph.c
@@ -169,7 +169,7 @@
 	if (tracing_thresh)
 		return 1;
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	cpu = raw_smp_processor_id();
 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	disabled = atomic_inc_return(&data->disabled);
@@ -181,7 +181,7 @@
 	}
 
 	atomic_dec(&data->disabled);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 
 	return ret;
 }
@@ -250,7 +250,7 @@
 		return;
 	}
 
-	local_irq_save(flags);
+	flags = hard_local_irq_save();
 	cpu = raw_smp_processor_id();
 	data = per_cpu_ptr(tr->array_buffer.data, cpu);
 	disabled = atomic_inc_return(&data->disabled);
@@ -259,7 +259,7 @@
 		__trace_graph_return(tr, trace, flags, pc);
 	}
 	atomic_dec(&data->disabled);
-	local_irq_restore(flags);
+	hard_local_irq_restore(flags);
 }
 
 void set_graph_array(struct trace_array *tr)
diff --git a/kernel/kernel/trace/trace_irqsoff.c b/kernel/kernel/trace/trace_irqsoff.c
index ee4571b..92a816d 100644
--- a/kernel/kernel/trace/trace_irqsoff.c
+++ b/kernel/kernel/trace/trace_irqsoff.c
@@ -14,6 +14,7 @@
 #include <linux/uaccess.h>
 #include <linux/module.h>
 #include <linux/ftrace.h>
+#include <linux/irqstage.h>
 #include <linux/kprobes.h>
 
 #include "trace.h"
@@ -26,7 +27,7 @@
 
 static DEFINE_PER_CPU(int, tracing_cpu);
 
-static DEFINE_RAW_SPINLOCK(max_trace_lock);
+static DEFINE_HARD_SPINLOCK(max_trace_lock);
 
 enum {
 	TRACER_IRQS_OFF		= (1 << 1),
@@ -44,7 +45,7 @@
 static inline int
 preempt_trace(int pc)
 {
-	return ((trace_type & TRACER_PREEMPT_OFF) && pc);
+	return (running_inband() && (trace_type & TRACER_PREEMPT_OFF) && pc);
 }
 #else
 # define preempt_trace(pc) (0)
@@ -55,7 +56,7 @@
 irq_trace(void)
 {
 	return ((trace_type & TRACER_IRQS_OFF) &&
-		irqs_disabled());
+		(hard_irqs_disabled() || (running_inband() && irqs_disabled())));
 }
 #else
 # define irq_trace() (0)
@@ -393,7 +394,7 @@
 	data->preempt_timestamp = ftrace_now(cpu);
 	data->critical_start = parent_ip ? : ip;
 
-	local_save_flags(flags);
+	stage_save_flags(flags);
 
 	__trace_function(tr, ip, parent_ip, flags, pc);
 
@@ -428,7 +429,7 @@
 
 	atomic_inc(&data->disabled);
 
-	local_save_flags(flags);
+	stage_save_flags(flags);
 	__trace_function(tr, ip, parent_ip, flags, pc);
 	check_critical_timing(tr, data, parent_ip ? : ip, cpu);
 	data->critical_start = 0;
diff --git a/kernel/kernel/trace/trace_output.c b/kernel/kernel/trace/trace_output.c
index 000e9dc..3858934 100644
--- a/kernel/kernel/trace/trace_output.c
+++ b/kernel/kernel/trace/trace_output.c
@@ -445,14 +445,19 @@
 	int hardirq;
 	int softirq;
 	int nmi;
+	int oob;
 
 	nmi = entry->flags & TRACE_FLAG_NMI;
 	hardirq = entry->flags & TRACE_FLAG_HARDIRQ;
 	softirq = entry->flags & TRACE_FLAG_SOFTIRQ;
+	oob = irqs_pipelined() && (entry->flags & TRACE_FLAG_OOB_STAGE);
 
 	irqs_off =
+		(entry->flags & (TRACE_FLAG_IRQS_OFF|TRACE_FLAG_IRQS_HARDOFF)) ==
+		(TRACE_FLAG_IRQS_OFF|TRACE_FLAG_IRQS_HARDOFF) ? '*' :
+		(entry->flags & TRACE_FLAG_IRQS_HARDOFF) ? 'D' :
 		(entry->flags & TRACE_FLAG_IRQS_OFF) ? 'd' :
-		(entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
+		!irqs_pipelined() && (entry->flags & TRACE_FLAG_IRQS_NOSUPPORT) ? 'X' :
 		'.';
 
 	switch (entry->flags & (TRACE_FLAG_NEED_RESCHED |
@@ -472,6 +477,8 @@
 	}
 
 	hardsoft_irq =
+		(nmi && oob)  ? '#' :
+		oob           ? '~' :
 		(nmi && hardirq)     ? 'Z' :
 		nmi                  ? 'z' :
 		(hardirq && softirq) ? 'H' :
diff --git a/kernel/kernel/trace/trace_preemptirq.c b/kernel/kernel/trace/trace_preemptirq.c
index 4593f16..42b1790 100644
--- a/kernel/kernel/trace/trace_preemptirq.c
+++ b/kernel/kernel/trace/trace_preemptirq.c
@@ -10,6 +10,7 @@
 #include <linux/module.h>
 #include <linux/ftrace.h>
 #include <linux/kprobes.h>
+#include <linux/irq_pipeline.h>
 #include "trace.h"
 
 #define CREATE_TRACE_POINTS
@@ -133,6 +134,57 @@
 }
 EXPORT_SYMBOL(trace_hardirqs_off_caller);
 NOKPROBE_SYMBOL(trace_hardirqs_off_caller);
+
+#ifdef CONFIG_IRQ_PIPELINE
+
+void trace_hardirqs_off_pipelined(void)
+{
+	WARN_ON(irq_pipeline_debug() && !hard_irqs_disabled());
+
+	if (running_inband())
+		trace_hardirqs_off();
+}
+EXPORT_SYMBOL(trace_hardirqs_off_pipelined);
+NOKPROBE_SYMBOL(trace_hardirqs_off_pipelined);
+
+void trace_hardirqs_on_pipelined(void)
+{
+	WARN_ON(irq_pipeline_debug() && !hard_irqs_disabled());
+
+	/*
+	 * If the in-band stage of the kernel is current but the IRQ
+	 * was not delivered because the latter is stalled, keep the
+	 * tracing logic unaware of the receipt, so that no false
+	 * positive is triggered in lockdep (e.g. IN-HARDIRQ-W ->
+	 * HARDIRQ-ON-W).
+	 */
+	if (running_inband() && !irqs_disabled()) {
+		stall_inband();
+		trace_hardirqs_on();
+		unstall_inband_nocheck();
+	}
+}
+EXPORT_SYMBOL(trace_hardirqs_on_pipelined);
+NOKPROBE_SYMBOL(trace_hardirqs_on_pipelined);
+
+#else
+
+void trace_hardirqs_off_pipelined(void)
+{
+	trace_hardirqs_off();
+}
+EXPORT_SYMBOL(trace_hardirqs_off_pipelined);
+NOKPROBE_SYMBOL(trace_hardirqs_off_pipelined);
+
+void trace_hardirqs_on_pipelined(void)
+{
+	trace_hardirqs_on();
+}
+EXPORT_SYMBOL(trace_hardirqs_on_pipelined);
+NOKPROBE_SYMBOL(trace_hardirqs_on_pipelined);
+
+#endif
+
 #endif /* CONFIG_TRACE_IRQFLAGS */
 
 #ifdef CONFIG_TRACE_PREEMPT_TOGGLE
diff --git a/kernel/kernel/trace/trace_sched_wakeup.c b/kernel/kernel/trace/trace_sched_wakeup.c
index 97b10bb..f7637f9 100644
--- a/kernel/kernel/trace/trace_sched_wakeup.c
+++ b/kernel/kernel/trace/trace_sched_wakeup.c
@@ -486,7 +486,9 @@
 
 	if (likely(!is_tracing_stopped())) {
 		wakeup_trace->max_latency = delta;
+		hard_local_irq_disable();
 		update_max_tr(wakeup_trace, wakeup_task, wakeup_cpu, NULL);
+		hard_local_irq_enable();
 	}
 
 out_unlock:
diff --git a/kernel/kernel/trace/trace_stack.c b/kernel/kernel/trace/trace_stack.c
index c408423..16392f4 100644
--- a/kernel/kernel/trace/trace_stack.c
+++ b/kernel/kernel/trace/trace_stack.c
@@ -171,8 +171,9 @@
 	if (!object_is_on_stack(stack))
 		return;
 
-	/* Can't do this from NMI context (can cause deadlocks) */
-	if (in_nmi())
+	/* Can't do this from NMI or oob stage contexts (can cause
+	   deadlocks) */
+	if (in_nmi() || !running_inband())
 		return;
 
 	local_irq_save(flags);
diff --git a/kernel/kernel/xenomai/Kconfig b/kernel/kernel/xenomai/Kconfig
new file mode 120000
index 0000000..e5347bf
--- /dev/null
+++ b/kernel/kernel/xenomai/Kconfig
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/Makefile b/kernel/kernel/xenomai/Makefile
new file mode 120000
index 0000000..07e1a0e
--- /dev/null
+++ b/kernel/kernel/xenomai/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/Makefile
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/arith.c b/kernel/kernel/xenomai/arith.c
new file mode 120000
index 0000000..f4ad084
--- /dev/null
+++ b/kernel/kernel/xenomai/arith.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/arith.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/bufd.c b/kernel/kernel/xenomai/bufd.c
new file mode 120000
index 0000000..4237c0f
--- /dev/null
+++ b/kernel/kernel/xenomai/bufd.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/clock.c b/kernel/kernel/xenomai/clock.c
new file mode 120000
index 0000000..1f5852f
--- /dev/null
+++ b/kernel/kernel/xenomai/clock.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/clock.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/debug.c b/kernel/kernel/xenomai/debug.c
new file mode 120000
index 0000000..ecb182b
--- /dev/null
+++ b/kernel/kernel/xenomai/debug.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/debug.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/debug.h b/kernel/kernel/xenomai/debug.h
new file mode 120000
index 0000000..50a1185
--- /dev/null
+++ b/kernel/kernel/xenomai/debug.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/debug.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/heap.c b/kernel/kernel/xenomai/heap.c
new file mode 120000
index 0000000..5295d18
--- /dev/null
+++ b/kernel/kernel/xenomai/heap.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/heap.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/init.c b/kernel/kernel/xenomai/init.c
new file mode 120000
index 0000000..32bd592
--- /dev/null
+++ b/kernel/kernel/xenomai/init.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/init.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/lock.c b/kernel/kernel/xenomai/lock.c
new file mode 120000
index 0000000..7a952d9
--- /dev/null
+++ b/kernel/kernel/xenomai/lock.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/lock.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/map.c b/kernel/kernel/xenomai/map.c
new file mode 120000
index 0000000..4f5c1ee
--- /dev/null
+++ b/kernel/kernel/xenomai/map.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/map.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/pipe.c b/kernel/kernel/xenomai/pipe.c
new file mode 120000
index 0000000..bf8638c
--- /dev/null
+++ b/kernel/kernel/xenomai/pipe.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/pipeline/Makefile b/kernel/kernel/xenomai/pipeline/Makefile
new file mode 120000
index 0000000..d77990a
--- /dev/null
+++ b/kernel/kernel/xenomai/pipeline/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/pipeline/init.c b/kernel/kernel/xenomai/pipeline/init.c
new file mode 120000
index 0000000..fb9680a
--- /dev/null
+++ b/kernel/kernel/xenomai/pipeline/init.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/pipeline/intr.c b/kernel/kernel/xenomai/pipeline/intr.c
new file mode 120000
index 0000000..b461d69
--- /dev/null
+++ b/kernel/kernel/xenomai/pipeline/intr.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/pipeline/kevents.c b/kernel/kernel/xenomai/pipeline/kevents.c
new file mode 120000
index 0000000..ea6afff
--- /dev/null
+++ b/kernel/kernel/xenomai/pipeline/kevents.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/pipeline/sched.c b/kernel/kernel/xenomai/pipeline/sched.c
new file mode 120000
index 0000000..e64ea1a
--- /dev/null
+++ b/kernel/kernel/xenomai/pipeline/sched.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/pipeline/syscall.c b/kernel/kernel/xenomai/pipeline/syscall.c
new file mode 120000
index 0000000..0969e5b
--- /dev/null
+++ b/kernel/kernel/xenomai/pipeline/syscall.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/pipeline/tick.c b/kernel/kernel/xenomai/pipeline/tick.c
new file mode 120000
index 0000000..a008287
--- /dev/null
+++ b/kernel/kernel/xenomai/pipeline/tick.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/Makefile b/kernel/kernel/xenomai/posix/Makefile
new file mode 120000
index 0000000..b251962
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/clock.c b/kernel/kernel/xenomai/posix/clock.c
new file mode 120000
index 0000000..a519dae
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/clock.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/clock.h b/kernel/kernel/xenomai/posix/clock.h
new file mode 120000
index 0000000..c22aef0
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/clock.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/compat.c b/kernel/kernel/xenomai/posix/compat.c
new file mode 120000
index 0000000..11292df
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/compat.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/cond.c b/kernel/kernel/xenomai/posix/cond.c
new file mode 120000
index 0000000..50eef82
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/cond.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/cond.h b/kernel/kernel/xenomai/posix/cond.h
new file mode 120000
index 0000000..cfb1e6e
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/cond.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/corectl.c b/kernel/kernel/xenomai/posix/corectl.c
new file mode 120000
index 0000000..0daec86
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/corectl.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/corectl.h b/kernel/kernel/xenomai/posix/corectl.h
new file mode 120000
index 0000000..798a76d
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/corectl.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/event.c b/kernel/kernel/xenomai/posix/event.c
new file mode 120000
index 0000000..0dbd0e8
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/event.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/event.h b/kernel/kernel/xenomai/posix/event.h
new file mode 120000
index 0000000..fd94213
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/event.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/extension.h b/kernel/kernel/xenomai/posix/extension.h
new file mode 120000
index 0000000..12fa756
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/extension.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/gen-syscall-entries.sh b/kernel/kernel/xenomai/posix/gen-syscall-entries.sh
new file mode 120000
index 0000000..51c613a
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/gen-syscall-entries.sh
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/internal.h b/kernel/kernel/xenomai/posix/internal.h
new file mode 120000
index 0000000..90e2524
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/internal.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/io.c b/kernel/kernel/xenomai/posix/io.c
new file mode 120000
index 0000000..55b863b
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/io.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/io.h b/kernel/kernel/xenomai/posix/io.h
new file mode 120000
index 0000000..2bafb42
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/io.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/memory.c b/kernel/kernel/xenomai/posix/memory.c
new file mode 120000
index 0000000..5f70b5d
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/memory.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/memory.h b/kernel/kernel/xenomai/posix/memory.h
new file mode 120000
index 0000000..1799ae1
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/memory.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/monitor.c b/kernel/kernel/xenomai/posix/monitor.c
new file mode 120000
index 0000000..7ea9e29
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/monitor.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/monitor.h b/kernel/kernel/xenomai/posix/monitor.h
new file mode 120000
index 0000000..5504660
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/monitor.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/mqueue.c b/kernel/kernel/xenomai/posix/mqueue.c
new file mode 120000
index 0000000..bde04fe
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/mqueue.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/mqueue.h b/kernel/kernel/xenomai/posix/mqueue.h
new file mode 120000
index 0000000..ed5ae91
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/mqueue.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/mutex.c b/kernel/kernel/xenomai/posix/mutex.c
new file mode 120000
index 0000000..24f63a2
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/mutex.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/mutex.h b/kernel/kernel/xenomai/posix/mutex.h
new file mode 120000
index 0000000..996eab0
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/mutex.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/nsem.c b/kernel/kernel/xenomai/posix/nsem.c
new file mode 120000
index 0000000..8f07ded
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/nsem.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/process.c b/kernel/kernel/xenomai/posix/process.c
new file mode 120000
index 0000000..9ce4ea4
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/process.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/process.h b/kernel/kernel/xenomai/posix/process.h
new file mode 120000
index 0000000..ed01222
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/process.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/sched.c b/kernel/kernel/xenomai/posix/sched.c
new file mode 120000
index 0000000..389173c
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/sched.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/sched.h b/kernel/kernel/xenomai/posix/sched.h
new file mode 120000
index 0000000..cd48370
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/sched.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/sem.c b/kernel/kernel/xenomai/posix/sem.c
new file mode 120000
index 0000000..1540be1
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/sem.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/sem.h b/kernel/kernel/xenomai/posix/sem.h
new file mode 120000
index 0000000..134f807
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/sem.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/signal.c b/kernel/kernel/xenomai/posix/signal.c
new file mode 120000
index 0000000..c3a7793
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/signal.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/signal.h b/kernel/kernel/xenomai/posix/signal.h
new file mode 120000
index 0000000..6dfed5d
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/signal.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/syscall.c b/kernel/kernel/xenomai/posix/syscall.c
new file mode 120000
index 0000000..5eab046
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/syscall.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/syscall.h b/kernel/kernel/xenomai/posix/syscall.h
new file mode 120000
index 0000000..8760e9b
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/syscall.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/syscall32.c b/kernel/kernel/xenomai/posix/syscall32.c
new file mode 120000
index 0000000..036ba80
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/syscall32.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/syscall32.h b/kernel/kernel/xenomai/posix/syscall32.h
new file mode 120000
index 0000000..7202c21
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/syscall32.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/syscall_entries.h b/kernel/kernel/xenomai/posix/syscall_entries.h
new file mode 100644
index 0000000..8572390
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/syscall_entries.h
@@ -0,0 +1,232 @@
+#define __COBALT_CALL_ENTRIES \
+	__COBALT_CALL_ENTRY(migrate) \
+	__COBALT_CALL_ENTRY(trace) \
+	__COBALT_CALL_ENTRY(ftrace_puts) \
+	__COBALT_CALL_ENTRY(archcall) \
+	__COBALT_CALL_ENTRY(get_current) \
+	__COBALT_CALL_ENTRY(backtrace) \
+	__COBALT_CALL_ENTRY(serialdbg) \
+	__COBALT_CALL_ENTRY(bind) \
+	__COBALT_CALL_ENTRY(extend) \
+	__COBALT_CALL_ENTRY(sched_minprio) \
+	__COBALT_CALL_ENTRY(sched_maxprio) \
+	__COBALT_CALL_ENTRY(sched_yield) \
+	__COBALT_CALL_ENTRY(sched_setconfig_np) \
+	__COBALT_CALL_ENTRY(sched_getconfig_np) \
+	__COBALT_CALL_ENTRY(sched_weightprio) \
+	__COBALT_CALL_ENTRY(sched_setscheduler_ex) \
+	__COBALT_CALL_ENTRY(sched_getscheduler_ex) \
+	__COBALT_CALL_ENTRY(timerfd_create) \
+	__COBALT_CALL_ENTRY(timerfd_settime) \
+	__COBALT_CALL_ENTRY(timerfd_gettime) \
+	__COBALT_CALL_ENTRY(open) \
+	__COBALT_CALL_ENTRY(socket) \
+	__COBALT_CALL_ENTRY(close) \
+	__COBALT_CALL_ENTRY(fcntl) \
+	__COBALT_CALL_ENTRY(ioctl) \
+	__COBALT_CALL_ENTRY(read) \
+	__COBALT_CALL_ENTRY(write) \
+	__COBALT_CALL_ENTRY(recvmsg) \
+	__COBALT_CALL_ENTRY(recvmmsg) \
+	__COBALT_CALL_ENTRY(recvmmsg64) \
+	__COBALT_CALL_ENTRY(sendmsg) \
+	__COBALT_CALL_ENTRY(sendmmsg) \
+	__COBALT_CALL_ENTRY(mmap) \
+	__COBALT_CALL_ENTRY(select) \
+	__COBALT_CALL_ENTRY(sem_init) \
+	__COBALT_CALL_ENTRY(sem_post) \
+	__COBALT_CALL_ENTRY(sem_wait) \
+	__COBALT_CALL_ENTRY(sem_timedwait) \
+	__COBALT_CALL_ENTRY(sem_timedwait64) \
+	__COBALT_CALL_ENTRY(sem_trywait) \
+	__COBALT_CALL_ENTRY(sem_getvalue) \
+	__COBALT_CALL_ENTRY(sem_destroy) \
+	__COBALT_CALL_ENTRY(sem_broadcast_np) \
+	__COBALT_CALL_ENTRY(sem_inquire) \
+	__COBALT_CALL_ENTRY(mutex_check_init) \
+	__COBALT_CALL_ENTRY(mutex_init) \
+	__COBALT_CALL_ENTRY(mutex_destroy) \
+	__COBALT_CALL_ENTRY(mutex_trylock) \
+	__COBALT_CALL_ENTRY(mutex_lock) \
+	__COBALT_CALL_ENTRY(mutex_timedlock) \
+	__COBALT_CALL_ENTRY(mutex_timedlock64) \
+	__COBALT_CALL_ENTRY(mutex_unlock) \
+	__COBALT_CALL_ENTRY(event_init) \
+	__COBALT_CALL_ENTRY(event_wait) \
+	__COBALT_CALL_ENTRY(event_wait64) \
+	__COBALT_CALL_ENTRY(event_sync) \
+	__COBALT_CALL_ENTRY(event_destroy) \
+	__COBALT_CALL_ENTRY(event_inquire) \
+	__COBALT_CALL_ENTRY(mq_notify) \
+	__COBALT_CALL_ENTRY(mq_open) \
+	__COBALT_CALL_ENTRY(mq_close) \
+	__COBALT_CALL_ENTRY(mq_unlink) \
+	__COBALT_CALL_ENTRY(mq_getattr) \
+	__COBALT_CALL_ENTRY(mq_timedsend) \
+	__COBALT_CALL_ENTRY(mq_timedsend64) \
+	__COBALT_CALL_ENTRY(mq_timedreceive) \
+	__COBALT_CALL_ENTRY(mq_timedreceive64) \
+	__COBALT_CALL_ENTRY(sigwait) \
+	__COBALT_CALL_ENTRY(sigtimedwait) \
+	__COBALT_CALL_ENTRY(sigtimedwait64) \
+	__COBALT_CALL_ENTRY(sigwaitinfo) \
+	__COBALT_CALL_ENTRY(sigpending) \
+	__COBALT_CALL_ENTRY(kill) \
+	__COBALT_CALL_ENTRY(sigqueue) \
+	__COBALT_CALL_ENTRY(corectl) \
+	__COBALT_CALL_ENTRY(cond_init) \
+	__COBALT_CALL_ENTRY(cond_destroy) \
+	__COBALT_CALL_ENTRY(cond_wait_prologue) \
+	__COBALT_CALL_ENTRY(cond_wait_epilogue) \
+	__COBALT_CALL_ENTRY(sem_open) \
+	__COBALT_CALL_ENTRY(sem_close) \
+	__COBALT_CALL_ENTRY(sem_unlink) \
+	__COBALT_CALL_ENTRY(monitor_init) \
+	__COBALT_CALL_ENTRY(monitor_enter) \
+	__COBALT_CALL_ENTRY(monitor_wait) \
+	__COBALT_CALL_ENTRY(monitor_wait64) \
+	__COBALT_CALL_ENTRY(monitor_sync) \
+	__COBALT_CALL_ENTRY(monitor_exit) \
+	__COBALT_CALL_ENTRY(monitor_destroy) \
+	__COBALT_CALL_ENTRY(clock_getres) \
+	__COBALT_CALL_ENTRY(clock_getres64) \
+	__COBALT_CALL_ENTRY(clock_gettime) \
+	__COBALT_CALL_ENTRY(clock_gettime64) \
+	__COBALT_CALL_ENTRY(clock_settime) \
+	__COBALT_CALL_ENTRY(clock_settime64) \
+	__COBALT_CALL_ENTRY(clock_adjtime) \
+	__COBALT_CALL_ENTRY(clock_adjtime64) \
+	__COBALT_CALL_ENTRY(clock_nanosleep) \
+	__COBALT_CALL_ENTRY(clock_nanosleep64) \
+	__COBALT_CALL_ENTRY(thread_setschedparam_ex) \
+	__COBALT_CALL_ENTRY(thread_getschedparam_ex) \
+	__COBALT_CALL_ENTRY(thread_setschedprio) \
+	__COBALT_CALL_ENTRY(thread_create) \
+	__COBALT_CALL_ENTRY(thread_setmode) \
+	__COBALT_CALL_ENTRY(thread_setname) \
+	__COBALT_CALL_ENTRY(thread_kill) \
+	__COBALT_CALL_ENTRY(thread_join) \
+	__COBALT_CALL_ENTRY(thread_getpid) \
+	__COBALT_CALL_ENTRY(thread_getstat) \
+	__COBALT_CALL_ENTRY(timer_delete) \
+	__COBALT_CALL_ENTRY(timer_create) \
+	__COBALT_CALL_ENTRY(timer_settime) \
+	__COBALT_CALL_ENTRY(timer_gettime) \
+	__COBALT_CALL_ENTRY(timer_getoverrun) \
+	/* end */
+#define __COBALT_CALL_MODES \
+	__COBALT_MODE(migrate, current) \
+	__COBALT_MODE(trace, current) \
+	__COBALT_MODE(ftrace_puts, current) \
+	__COBALT_MODE(archcall, current) \
+	__COBALT_MODE(get_current, current) \
+	__COBALT_MODE(backtrace, lostage) \
+	__COBALT_MODE(serialdbg, current) \
+	__COBALT_MODE(bind, lostage) \
+	__COBALT_MODE(extend, lostage) \
+	__COBALT_MODE(sched_minprio, current) \
+	__COBALT_MODE(sched_maxprio, current) \
+	__COBALT_MODE(sched_yield, primary) \
+	__COBALT_MODE(sched_setconfig_np, conforming) \
+	__COBALT_MODE(sched_getconfig_np, conforming) \
+	__COBALT_MODE(sched_weightprio, current) \
+	__COBALT_MODE(sched_setscheduler_ex, conforming) \
+	__COBALT_MODE(sched_getscheduler_ex, current) \
+	__COBALT_MODE(timerfd_create, lostage) \
+	__COBALT_MODE(timerfd_settime, primary) \
+	__COBALT_MODE(timerfd_gettime, current) \
+	__COBALT_MODE(open, lostage) \
+	__COBALT_MODE(socket, lostage) \
+	__COBALT_MODE(close, lostage) \
+	__COBALT_MODE(fcntl, current) \
+	__COBALT_MODE(ioctl, handover) \
+	__COBALT_MODE(read, handover) \
+	__COBALT_MODE(write, handover) \
+	__COBALT_MODE(recvmsg, handover) \
+	__COBALT_MODE(recvmmsg, primary) \
+	__COBALT_MODE(recvmmsg64, primary) \
+	__COBALT_MODE(sendmsg, handover) \
+	__COBALT_MODE(sendmmsg, primary) \
+	__COBALT_MODE(mmap, lostage) \
+	__COBALT_MODE(select, primary) \
+	__COBALT_MODE(sem_init, current) \
+	__COBALT_MODE(sem_post, current) \
+	__COBALT_MODE(sem_wait, primary) \
+	__COBALT_MODE(sem_timedwait, primary) \
+	__COBALT_MODE(sem_timedwait64, primary) \
+	__COBALT_MODE(sem_trywait, primary) \
+	__COBALT_MODE(sem_getvalue, current) \
+	__COBALT_MODE(sem_destroy, current) \
+	__COBALT_MODE(sem_broadcast_np, current) \
+	__COBALT_MODE(sem_inquire, current) \
+	__COBALT_MODE(mutex_check_init, current) \
+	__COBALT_MODE(mutex_init, current) \
+	__COBALT_MODE(mutex_destroy, current) \
+	__COBALT_MODE(mutex_trylock, primary) \
+	__COBALT_MODE(mutex_lock, primary) \
+	__COBALT_MODE(mutex_timedlock, primary) \
+	__COBALT_MODE(mutex_timedlock64, primary) \
+	__COBALT_MODE(mutex_unlock, nonrestartable) \
+	__COBALT_MODE(event_init, current) \
+	__COBALT_MODE(event_wait, primary) \
+	__COBALT_MODE(event_wait64, primary) \
+	__COBALT_MODE(event_sync, current) \
+	__COBALT_MODE(event_destroy, current) \
+	__COBALT_MODE(event_inquire, current) \
+	__COBALT_MODE(mq_notify, primary) \
+	__COBALT_MODE(mq_open, lostage) \
+	__COBALT_MODE(mq_close, lostage) \
+	__COBALT_MODE(mq_unlink, lostage) \
+	__COBALT_MODE(mq_getattr, current) \
+	__COBALT_MODE(mq_timedsend, primary) \
+	__COBALT_MODE(mq_timedsend64, primary) \
+	__COBALT_MODE(mq_timedreceive, primary) \
+	__COBALT_MODE(mq_timedreceive64, primary) \
+	__COBALT_MODE(sigwait, primary) \
+	__COBALT_MODE(sigtimedwait, nonrestartable) \
+	__COBALT_MODE(sigtimedwait64, nonrestartable) \
+	__COBALT_MODE(sigwaitinfo, nonrestartable) \
+	__COBALT_MODE(sigpending, primary) \
+	__COBALT_MODE(kill, conforming) \
+	__COBALT_MODE(sigqueue, conforming) \
+	__COBALT_MODE(corectl, probing) \
+	__COBALT_MODE(cond_init, current) \
+	__COBALT_MODE(cond_destroy, current) \
+	__COBALT_MODE(cond_wait_prologue, nonrestartable) \
+	__COBALT_MODE(cond_wait_epilogue, primary) \
+	__COBALT_MODE(sem_open, lostage) \
+	__COBALT_MODE(sem_close, lostage) \
+	__COBALT_MODE(sem_unlink, lostage) \
+	__COBALT_MODE(monitor_init, current) \
+	__COBALT_MODE(monitor_enter, primary) \
+	__COBALT_MODE(monitor_wait, nonrestartable) \
+	__COBALT_MODE(monitor_wait64, nonrestartable) \
+	__COBALT_MODE(monitor_sync, nonrestartable) \
+	__COBALT_MODE(monitor_exit, primary) \
+	__COBALT_MODE(monitor_destroy, primary) \
+	__COBALT_MODE(clock_getres, current) \
+	__COBALT_MODE(clock_getres64, current) \
+	__COBALT_MODE(clock_gettime, current) \
+	__COBALT_MODE(clock_gettime64, current) \
+	__COBALT_MODE(clock_settime, current) \
+	__COBALT_MODE(clock_settime64, current) \
+	__COBALT_MODE(clock_adjtime, current) \
+	__COBALT_MODE(clock_adjtime64, current) \
+	__COBALT_MODE(clock_nanosleep, primary) \
+	__COBALT_MODE(clock_nanosleep64, primary) \
+	__COBALT_MODE(thread_setschedparam_ex, conforming) \
+	__COBALT_MODE(thread_getschedparam_ex, current) \
+	__COBALT_MODE(thread_setschedprio, conforming) \
+	__COBALT_MODE(thread_create, init) \
+	__COBALT_MODE(thread_setmode, primary) \
+	__COBALT_MODE(thread_setname, current) \
+	__COBALT_MODE(thread_kill, conforming) \
+	__COBALT_MODE(thread_join, primary) \
+	__COBALT_MODE(thread_getpid, current) \
+	__COBALT_MODE(thread_getstat, current) \
+	__COBALT_MODE(timer_delete, current) \
+	__COBALT_MODE(timer_create, current) \
+	__COBALT_MODE(timer_settime, primary) \
+	__COBALT_MODE(timer_gettime, current) \
+	__COBALT_MODE(timer_getoverrun, current) \
+	/* end */
diff --git a/kernel/kernel/xenomai/posix/thread.c b/kernel/kernel/xenomai/posix/thread.c
new file mode 120000
index 0000000..b163b92
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/thread.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/thread.h b/kernel/kernel/xenomai/posix/thread.h
new file mode 120000
index 0000000..e887d4f
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/thread.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/timer.c b/kernel/kernel/xenomai/posix/timer.c
new file mode 120000
index 0000000..237cc02
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/timer.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/timer.h b/kernel/kernel/xenomai/posix/timer.h
new file mode 120000
index 0000000..dd06406
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/timer.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/timerfd.c b/kernel/kernel/xenomai/posix/timerfd.c
new file mode 120000
index 0000000..f76a00a
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/timerfd.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/posix/timerfd.h b/kernel/kernel/xenomai/posix/timerfd.h
new file mode 120000
index 0000000..7d95e73
--- /dev/null
+++ b/kernel/kernel/xenomai/posix/timerfd.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/procfs.c b/kernel/kernel/xenomai/procfs.c
new file mode 120000
index 0000000..4056fd1
--- /dev/null
+++ b/kernel/kernel/xenomai/procfs.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/procfs.h b/kernel/kernel/xenomai/procfs.h
new file mode 120000
index 0000000..fd1d435
--- /dev/null
+++ b/kernel/kernel/xenomai/procfs.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/registry.c b/kernel/kernel/xenomai/registry.c
new file mode 120000
index 0000000..d33780e
--- /dev/null
+++ b/kernel/kernel/xenomai/registry.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/registry.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/rtdm/Makefile b/kernel/kernel/xenomai/rtdm/Makefile
new file mode 120000
index 0000000..ba00841
--- /dev/null
+++ b/kernel/kernel/xenomai/rtdm/Makefile
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/rtdm/core.c b/kernel/kernel/xenomai/rtdm/core.c
new file mode 120000
index 0000000..bb434a7
--- /dev/null
+++ b/kernel/kernel/xenomai/rtdm/core.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/rtdm/device.c b/kernel/kernel/xenomai/rtdm/device.c
new file mode 120000
index 0000000..4fc6518
--- /dev/null
+++ b/kernel/kernel/xenomai/rtdm/device.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/rtdm/drvlib.c b/kernel/kernel/xenomai/rtdm/drvlib.c
new file mode 120000
index 0000000..2583b7b
--- /dev/null
+++ b/kernel/kernel/xenomai/rtdm/drvlib.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/rtdm/fd.c b/kernel/kernel/xenomai/rtdm/fd.c
new file mode 120000
index 0000000..3ea703a
--- /dev/null
+++ b/kernel/kernel/xenomai/rtdm/fd.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/rtdm/internal.h b/kernel/kernel/xenomai/rtdm/internal.h
new file mode 120000
index 0000000..7a64daa
--- /dev/null
+++ b/kernel/kernel/xenomai/rtdm/internal.h
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/rtdm/wrappers.c b/kernel/kernel/xenomai/rtdm/wrappers.c
new file mode 120000
index 0000000..75b2d7a
--- /dev/null
+++ b/kernel/kernel/xenomai/rtdm/wrappers.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/sched-idle.c b/kernel/kernel/xenomai/sched-idle.c
new file mode 120000
index 0000000..696b902
--- /dev/null
+++ b/kernel/kernel/xenomai/sched-idle.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/sched-quota.c b/kernel/kernel/xenomai/sched-quota.c
new file mode 120000
index 0000000..7069986
--- /dev/null
+++ b/kernel/kernel/xenomai/sched-quota.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/sched-rt.c b/kernel/kernel/xenomai/sched-rt.c
new file mode 120000
index 0000000..598d169
--- /dev/null
+++ b/kernel/kernel/xenomai/sched-rt.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/sched-sporadic.c b/kernel/kernel/xenomai/sched-sporadic.c
new file mode 120000
index 0000000..e9c647e
--- /dev/null
+++ b/kernel/kernel/xenomai/sched-sporadic.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/sched-tp.c b/kernel/kernel/xenomai/sched-tp.c
new file mode 120000
index 0000000..63f2fd3
--- /dev/null
+++ b/kernel/kernel/xenomai/sched-tp.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/sched-weak.c b/kernel/kernel/xenomai/sched-weak.c
new file mode 120000
index 0000000..795eb2b
--- /dev/null
+++ b/kernel/kernel/xenomai/sched-weak.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/sched.c b/kernel/kernel/xenomai/sched.c
new file mode 120000
index 0000000..501961b
--- /dev/null
+++ b/kernel/kernel/xenomai/sched.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/sched.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/select.c b/kernel/kernel/xenomai/select.c
new file mode 120000
index 0000000..df93574
--- /dev/null
+++ b/kernel/kernel/xenomai/select.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/select.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/synch.c b/kernel/kernel/xenomai/synch.c
new file mode 120000
index 0000000..9ac2df7
--- /dev/null
+++ b/kernel/kernel/xenomai/synch.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/synch.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/thread.c b/kernel/kernel/xenomai/thread.c
new file mode 120000
index 0000000..339f26e
--- /dev/null
+++ b/kernel/kernel/xenomai/thread.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/thread.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/time.c b/kernel/kernel/xenomai/time.c
new file mode 120000
index 0000000..c414ee1
--- /dev/null
+++ b/kernel/kernel/xenomai/time.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/time.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/timer.c b/kernel/kernel/xenomai/timer.c
new file mode 120000
index 0000000..2aad719
--- /dev/null
+++ b/kernel/kernel/xenomai/timer.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/timer.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/tree.c b/kernel/kernel/xenomai/tree.c
new file mode 120000
index 0000000..64e1e4e
--- /dev/null
+++ b/kernel/kernel/xenomai/tree.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/tree.c
\ No newline at end of file
diff --git a/kernel/kernel/xenomai/vfile.c b/kernel/kernel/xenomai/vfile.c
new file mode 120000
index 0000000..4d992e0
--- /dev/null
+++ b/kernel/kernel/xenomai/vfile.c
@@ -0,0 +1 @@
+/home/data/hc/rk3588_linux/kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c
\ No newline at end of file
diff --git a/kernel/lib/Kconfig.debug b/kernel/lib/Kconfig.debug
index 8608332..77c03b3 100644
--- a/kernel/lib/Kconfig.debug
+++ b/kernel/lib/Kconfig.debug
@@ -897,6 +897,38 @@
 	  is currently disabled). Drivers need to handle this correctly. Some
 	  don't and need to be caught.
 
+config DEBUG_IRQ_PIPELINE
+	bool "Debug IRQ pipeline"
+	depends on IRQ_PIPELINE && DEBUG_KERNEL
+	help
+	  Turn on this option for enabling debug checks related to
+	  interrupt pipelining, like interrupt state consistency and
+	  proper context isolation between the in-band and oob stages.
+
+	  If unsure, say N.
+
+config IRQ_PIPELINE_TORTURE_TEST
+	bool "Torture tests for IRQ pipeline"
+	depends on DEBUG_IRQ_PIPELINE
+	select TORTURE_TEST
+	default n
+	help
+	  This option provides a kernel module that runs torture tests
+	  on the IRQ pipeline mechanism.
+
+	  Say Y here if you want the IRQ pipeline torture tests to run
+	  when the kernel starts. Say N if you are unsure.
+
+config DEBUG_DOVETAIL
+	bool "Debug Dovetail interface"
+	depends on DOVETAIL && DEBUG_KERNEL
+	select DEBUG_IRQ_PIPELINE
+	help
+	  Turn on this option for enabling debug checks related to
+	  running a dual kernel configuration, aka dovetailing. This
+	  option implicitly enables the interrupt pipeline debugging
+	  features.
+
 menu "Debug Oops, Lockups and Hangs"
 
 config PANIC_ON_OOPS
@@ -1315,6 +1347,27 @@
 	 spin_lock_init()/mutex_init()/etc., or whether there is any lock
 	 held during task exit.
 
+config DEBUG_HARD_LOCKS
+	bool "Debug hard spinlocks"
+	depends on DEBUG_IRQ_PIPELINE && LOCKDEP && EXPERT
+	help
+	  Turn on this option for enabling LOCKDEP for hard spinlock
+	  types used in interrupt pipelining.
+
+	  Keep in mind that enabling such feature will ruin the
+	  latency figures for any out-of-band code, this is merely
+	  useful for proving the correctness of the locking scheme of
+	  such code without any consideration for real-time
+	  guarantees. You have been warned.
+
+	  If unsure, say N.
+
+if DEBUG_HARD_LOCKS
+comment "WARNING! DEBUG_HARD_LOCKS induces **massive** latency"
+comment "overhead for the code running on the out-of-band"
+comment "interrupt stage."
+endif
+
 config LOCKDEP
 	bool
 	depends on DEBUG_KERNEL && LOCK_DEBUGGING_SUPPORT
diff --git a/kernel/lib/atomic64.c b/kernel/lib/atomic64.c
index e98c85a..bf7d040 100644
--- a/kernel/lib/atomic64.c
+++ b/kernel/lib/atomic64.c
@@ -25,15 +25,15 @@
  * Ensure each lock is in a separate cacheline.
  */
 static union {
-	raw_spinlock_t lock;
+	hard_spinlock_t lock;
 	char pad[L1_CACHE_BYTES];
 } atomic64_lock[NR_LOCKS] __cacheline_aligned_in_smp = {
 	[0 ... (NR_LOCKS - 1)] = {
-		.lock =  __RAW_SPIN_LOCK_UNLOCKED(atomic64_lock.lock),
+		.lock =  __HARD_SPIN_LOCK_INITIALIZER(atomic64_lock.lock),
 	},
 };
 
-static inline raw_spinlock_t *lock_addr(const atomic64_t *v)
+static inline hard_spinlock_t *lock_addr(const atomic64_t *v)
 {
 	unsigned long addr = (unsigned long) v;
 
@@ -45,7 +45,7 @@
 s64 atomic64_read(const atomic64_t *v)
 {
 	unsigned long flags;
-	raw_spinlock_t *lock = lock_addr(v);
+	hard_spinlock_t *lock = lock_addr(v);
 	s64 val;
 
 	raw_spin_lock_irqsave(lock, flags);
@@ -58,7 +58,7 @@
 void atomic64_set(atomic64_t *v, s64 i)
 {
 	unsigned long flags;
-	raw_spinlock_t *lock = lock_addr(v);
+	hard_spinlock_t *lock = lock_addr(v);
 
 	raw_spin_lock_irqsave(lock, flags);
 	v->counter = i;
@@ -70,7 +70,7 @@
 void atomic64_##op(s64 a, atomic64_t *v)				\
 {									\
 	unsigned long flags;						\
-	raw_spinlock_t *lock = lock_addr(v);				\
+	hard_spinlock_t *lock = lock_addr(v);				\
 									\
 	raw_spin_lock_irqsave(lock, flags);				\
 	v->counter c_op a;						\
@@ -82,7 +82,7 @@
 s64 atomic64_##op##_return(s64 a, atomic64_t *v)			\
 {									\
 	unsigned long flags;						\
-	raw_spinlock_t *lock = lock_addr(v);				\
+	hard_spinlock_t *lock = lock_addr(v);				\
 	s64 val;							\
 									\
 	raw_spin_lock_irqsave(lock, flags);				\
@@ -96,7 +96,7 @@
 s64 atomic64_fetch_##op(s64 a, atomic64_t *v)				\
 {									\
 	unsigned long flags;						\
-	raw_spinlock_t *lock = lock_addr(v);				\
+	hard_spinlock_t *lock = lock_addr(v);				\
 	s64 val;							\
 									\
 	raw_spin_lock_irqsave(lock, flags);				\
@@ -133,7 +133,7 @@
 s64 atomic64_dec_if_positive(atomic64_t *v)
 {
 	unsigned long flags;
-	raw_spinlock_t *lock = lock_addr(v);
+	hard_spinlock_t *lock = lock_addr(v);
 	s64 val;
 
 	raw_spin_lock_irqsave(lock, flags);
@@ -148,7 +148,7 @@
 s64 atomic64_cmpxchg(atomic64_t *v, s64 o, s64 n)
 {
 	unsigned long flags;
-	raw_spinlock_t *lock = lock_addr(v);
+	hard_spinlock_t *lock = lock_addr(v);
 	s64 val;
 
 	raw_spin_lock_irqsave(lock, flags);
@@ -163,7 +163,7 @@
 s64 atomic64_xchg(atomic64_t *v, s64 new)
 {
 	unsigned long flags;
-	raw_spinlock_t *lock = lock_addr(v);
+	hard_spinlock_t *lock = lock_addr(v);
 	s64 val;
 
 	raw_spin_lock_irqsave(lock, flags);
@@ -177,7 +177,7 @@
 s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
 {
 	unsigned long flags;
-	raw_spinlock_t *lock = lock_addr(v);
+	hard_spinlock_t *lock = lock_addr(v);
 	s64 val;
 
 	raw_spin_lock_irqsave(lock, flags);
diff --git a/kernel/lib/dump_stack.c b/kernel/lib/dump_stack.c
index b9acd9c..aed74e3 100644
--- a/kernel/lib/dump_stack.c
+++ b/kernel/lib/dump_stack.c
@@ -9,9 +9,11 @@
 #include <linux/sched.h>
 #include <linux/sched/debug.h>
 #include <linux/smp.h>
+#include <linux/irqstage.h>
 #include <linux/atomic.h>
 #include <linux/kexec.h>
 #include <linux/utsname.h>
+#include <linux/hardirq.h>
 
 static char dump_stack_arch_desc_str[128];
 
@@ -56,6 +58,11 @@
 		printk("%sHardware name: %s\n",
 		       log_lvl, dump_stack_arch_desc_str);
 
+#ifdef CONFIG_IRQ_PIPELINE
+	printk("%sIRQ stage: %s\n",
+	       log_lvl, current_irq_stage->name);
+#endif
+
 	print_worker_info(log_lvl, current);
 }
 
@@ -85,6 +92,29 @@
 #ifdef CONFIG_SMP
 static atomic_t dump_lock = ATOMIC_INIT(-1);
 
+static unsigned long disable_local_irqs(void)
+{
+	unsigned long flags = 0; /* only to trick the UMR detection */
+
+	/*
+	 * We neither need nor want to disable in-band IRQs over the
+	 * oob stage, where CPU migration can't happen. Conversely, we
+	 * neither need nor want to disable hard IRQs from the oob
+	 * stage, so that latency won't skyrocket as a result of
+	 * dumping the stack backtrace.
+	 */
+	if (running_inband() && !on_pipeline_entry())
+		local_irq_save(flags);
+
+	return flags;
+}
+
+static void restore_local_irqs(unsigned long flags)
+{
+	if (running_inband() && !on_pipeline_entry())
+		local_irq_restore(flags);
+}
+
 asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
 {
 	unsigned long flags;
@@ -97,7 +127,7 @@
 	 * against other CPUs
 	 */
 retry:
-	local_irq_save(flags);
+	flags = disable_local_irqs();
 	cpu = smp_processor_id();
 	old = atomic_cmpxchg(&dump_lock, -1, cpu);
 	if (old == -1) {
@@ -105,7 +135,7 @@
 	} else if (old == cpu) {
 		was_locked = 1;
 	} else {
-		local_irq_restore(flags);
+		restore_local_irqs(flags);
 		/*
 		 * Wait for the lock to release before jumping to
 		 * atomic_cmpxchg() in order to mitigate the thundering herd
@@ -120,7 +150,7 @@
 	if (!was_locked)
 		atomic_set(&dump_lock, -1);
 
-	local_irq_restore(flags);
+	restore_local_irqs(flags);
 }
 #else
 asmlinkage __visible void dump_stack_lvl(const char *log_lvl)
diff --git a/kernel/lib/smp_processor_id.c b/kernel/lib/smp_processor_id.c
index 2916606..952e2ad 100644
--- a/kernel/lib/smp_processor_id.c
+++ b/kernel/lib/smp_processor_id.c
@@ -7,12 +7,16 @@
 #include <linux/export.h>
 #include <linux/kprobes.h>
 #include <linux/sched.h>
+#include <linux/irqstage.h>
 
 noinstr static
 unsigned int check_preemption_disabled(const char *what1, const char *what2)
 {
 	int this_cpu = raw_smp_processor_id();
 
+	if (hard_irqs_disabled() || !running_inband())
+		goto out;
+
 	if (likely(preempt_count()))
 		goto out;
 
diff --git a/kernel/lib/vdso/Kconfig b/kernel/lib/vdso/Kconfig
index d883ac2..7b327e1 100644
--- a/kernel/lib/vdso/Kconfig
+++ b/kernel/lib/vdso/Kconfig
@@ -30,4 +30,12 @@
 	  Selected by architectures which support time namespaces in the
 	  VDSO
 
+config GENERIC_CLOCKSOURCE_VDSO
+        depends on ARM || ARM64
+        select CLKSRC_MMIO
+	bool
+	help
+	   Enables access to clocksources via the vDSO based on
+	   generic MMIO operations.
+
 endif
diff --git a/kernel/lib/vdso/gettimeofday.c b/kernel/lib/vdso/gettimeofday.c
index c6f6dee..57a5627 100644
--- a/kernel/lib/vdso/gettimeofday.c
+++ b/kernel/lib/vdso/gettimeofday.c
@@ -5,6 +5,245 @@
 #include <vdso/datapage.h>
 #include <vdso/helpers.h>
 
+static int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
+			struct __kernel_timespec *ts);
+
+#ifndef vdso_clocksource_ok
+static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
+{
+	return vd->clock_mode != VDSO_CLOCKMODE_NONE;
+}
+#endif
+
+#ifndef vdso_cycles_ok
+static inline bool vdso_cycles_ok(u64 cycles)
+{
+	return true;
+}
+#endif
+
+#if defined(CONFIG_GENERIC_CLOCKSOURCE_VDSO) && !defined(BUILD_VDSO32)
+
+#include <linux/fcntl.h>
+#include <linux/io.h>
+#include <linux/ioctl.h>
+#include <uapi/linux/clocksource.h>
+
+static notrace u64 readl_mmio_up(const struct clksrc_info *vinfo)
+{
+	const struct clksrc_user_mmio_info *info = &vinfo->mmio;
+	return readl_relaxed(info->reg_lower);
+}
+
+static notrace u64 readl_mmio_down(const struct clksrc_info *vinfo)
+{
+	const struct clksrc_user_mmio_info *info = &vinfo->mmio;
+	return ~(u64)readl_relaxed(info->reg_lower) & info->mask_lower;
+}
+
+static notrace u64 readw_mmio_up(const struct clksrc_info *vinfo)
+{
+	const struct clksrc_user_mmio_info *info = &vinfo->mmio;
+	return readw_relaxed(info->reg_lower);
+}
+
+static notrace u64 readw_mmio_down(const struct clksrc_info *vinfo)
+{
+	const struct clksrc_user_mmio_info *info = &vinfo->mmio;
+	return ~(u64)readl_relaxed(info->reg_lower) & info->mask_lower;
+}
+
+static notrace u64 readl_dmmio_up(const struct clksrc_info *vinfo)
+{
+	const struct clksrc_user_mmio_info *info = &vinfo->mmio;
+	void __iomem *reg_lower, *reg_upper;
+	u32 upper, old_upper, lower;
+
+	reg_lower = info->reg_lower;
+	reg_upper = info->reg_upper;
+
+	upper = readl_relaxed(reg_upper);
+	do {
+		old_upper = upper;
+		lower = readl_relaxed(reg_lower);
+		upper = readl_relaxed(reg_upper);
+	} while (upper != old_upper);
+
+	return (((u64)upper) << info->bits_lower) | lower;
+}
+
+static notrace u64 readw_dmmio_up(const struct clksrc_info *vinfo)
+{
+	const struct clksrc_user_mmio_info *info = &vinfo->mmio;
+	void __iomem *reg_lower, *reg_upper;
+	u16 upper, old_upper, lower;
+
+	reg_lower = info->reg_lower;
+	reg_upper = info->reg_upper;
+
+	upper = readw_relaxed(reg_upper);
+	do {
+		old_upper = upper;
+		lower = readw_relaxed(reg_lower);
+		upper = readw_relaxed(reg_upper);
+	} while (upper != old_upper);
+
+	return (((u64)upper) << info->bits_lower) | lower;
+}
+
+static notrace __cold vdso_read_cycles_t *get_mmio_read_cycles(unsigned int type)
+{
+	switch (type) {
+	case CLKSRC_MMIO_L_UP:
+		return &readl_mmio_up;
+	case CLKSRC_MMIO_L_DOWN:
+		return &readl_mmio_down;
+	case CLKSRC_MMIO_W_UP:
+		return &readw_mmio_up;
+	case CLKSRC_MMIO_W_DOWN:
+		return &readw_mmio_down;
+	case CLKSRC_DMMIO_L_UP:
+		return &readl_dmmio_up;
+	case CLKSRC_DMMIO_W_UP:
+		return &readw_dmmio_up;
+	default:
+		return NULL;
+	}
+}
+
+static __always_inline u16 to_cs_type(u32 cs_type_seq)
+{
+	return cs_type_seq >> 16;
+}
+
+static __always_inline u16 to_seq(u32 cs_type_seq)
+{
+	return cs_type_seq;
+}
+
+static __always_inline u32 to_cs_type_seq(u16 type, u16 seq)
+{
+	return (u32)type << 16U | seq;
+}
+
+static notrace noinline __cold
+void map_clocksource(const struct vdso_data *vd, struct vdso_priv *vp,
+		     u32 seq, u32 new_cs_type_seq)
+{
+	vdso_read_cycles_t *read_cycles = NULL;
+	u32 new_cs_seq, new_cs_type;
+	struct clksrc_info *info;
+	int fd, ret;
+
+	new_cs_seq = to_seq(new_cs_type_seq);
+	new_cs_type = to_cs_type(new_cs_type_seq);
+	info = &vp->clksrc_info[new_cs_type];
+
+	if (new_cs_type < CLOCKSOURCE_VDSO_MMIO)
+		goto done;
+
+	fd = clock_open_device(vd->cs_mmdev, O_RDONLY);
+	if (fd < 0)
+		goto fallback_to_syscall;
+
+	if (vdso_read_retry(vd, seq)) {
+		vdso_read_begin(vd);
+		if (to_seq(vd->cs_type_seq) != new_cs_seq) {
+			/*
+			 * cs_mmdev no longer corresponds to
+			 * vd->cs_type_seq.
+			 */
+			clock_close_device(fd);
+			return;
+		}
+	}
+
+	ret = clock_ioctl_device(fd, CLKSRC_USER_MMIO_MAP, (long)&info->mmio);
+	clock_close_device(fd);
+	if (ret < 0)
+		goto fallback_to_syscall;
+
+	read_cycles = get_mmio_read_cycles(info->mmio.type);
+	if (read_cycles == NULL) /* Mmhf, misconfigured. */
+		goto fallback_to_syscall;
+done:
+	info->read_cycles = read_cycles;
+	smp_wmb();
+	new_cs_type_seq = to_cs_type_seq(new_cs_type, new_cs_seq);
+	WRITE_ONCE(vp->current_cs_type_seq, new_cs_type_seq);
+
+	return;
+
+fallback_to_syscall:
+	new_cs_type = CLOCKSOURCE_VDSO_NONE;
+	info = &vp->clksrc_info[new_cs_type];
+	goto done;
+}
+
+static inline notrace
+bool get_hw_counter(const struct vdso_data *vd, u32 *r_seq, u64 *cycles)
+{
+	const struct clksrc_info *info;
+	struct vdso_priv *vp;
+	u32 seq, cs_type_seq;
+	unsigned int cs;
+
+	vp = __arch_get_vdso_priv();
+
+	for (;;) {
+		seq = vdso_read_begin(vd);
+		cs_type_seq = READ_ONCE(vp->current_cs_type_seq);
+		if (likely(to_seq(cs_type_seq) == to_seq(vd->cs_type_seq)))
+			break;
+
+		map_clocksource(vd, vp, seq, vd->cs_type_seq);
+	}
+
+	switch (to_cs_type(cs_type_seq)) {
+	case CLOCKSOURCE_VDSO_NONE:
+		return false; /* Use fallback. */
+	case CLOCKSOURCE_VDSO_ARCHITECTED:
+		if (unlikely(!vdso_clocksource_ok(vd)))
+			return false;
+		*cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+		if (unlikely(!vdso_cycles_ok(*cycles)))
+			return false;
+		break;
+	default:
+		cs = to_cs_type(READ_ONCE(cs_type_seq));
+		info = &vp->clksrc_info[cs];
+		*cycles = info->read_cycles(info);
+		break;
+	}
+
+	*r_seq = seq;
+
+	return true;
+}
+
+#else
+
+static inline notrace
+bool get_hw_counter(const struct vdso_data *vd, u32 *r_seq, u64 *cycles)
+{
+	*r_seq = vdso_read_begin(vd);
+
+	/*
+	 * CAUTION: checking the clocksource mode must happen inside
+	 * the seqlocked section.
+	 */
+	if (unlikely(!vdso_clocksource_ok(vd)))
+		return false;
+
+	*cycles = __arch_get_hw_counter(vd->clock_mode, vd);
+	if (unlikely(!vdso_cycles_ok(*cycles)))
+		  return false;
+
+	return true;
+}
+
+#endif /* CONFIG_GENERIC_CLOCKSOURCE_VDSO */
+
 #ifndef vdso_calc_delta
 /*
  * Default implementation which works for all sane clocksources. That
@@ -31,20 +270,6 @@
 }
 #endif
 
-#ifndef vdso_clocksource_ok
-static inline bool vdso_clocksource_ok(const struct vdso_data *vd)
-{
-	return vd->clock_mode != VDSO_CLOCKMODE_NONE;
-}
-#endif
-
-#ifndef vdso_cycles_ok
-static inline bool vdso_cycles_ok(u64 cycles)
-{
-	return true;
-}
-#endif
-
 #ifdef CONFIG_TIME_NS
 static __always_inline int do_hres_timens(const struct vdso_data *vdns, clockid_t clk,
 					  struct __kernel_timespec *ts)
@@ -63,13 +288,7 @@
 	vdso_ts = &vd->basetime[clk];
 
 	do {
-		seq = vdso_read_begin(vd);
-
-		if (unlikely(!vdso_clocksource_ok(vd)))
-			return -1;
-
-		cycles = __arch_get_hw_counter(vd->clock_mode, vd);
-		if (unlikely(!vdso_cycles_ok(cycles)))
+		if (!get_hw_counter(vd, &seq, &cycles))
 			return -1;
 		ns = vdso_ts->nsec;
 		last = vd->cycle_last;
@@ -117,30 +336,29 @@
 
 	do {
 		/*
-		 * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time namespace
-		 * enabled tasks have a special VVAR page installed which
-		 * has vd->seq set to 1 and vd->clock_mode set to
-		 * VDSO_CLOCKMODE_TIMENS. For non time namespace affected tasks
-		 * this does not affect performance because if vd->seq is
-		 * odd, i.e. a concurrent update is in progress the extra
+		 * Open coded to handle VDSO_CLOCKMODE_TIMENS. Time
+		 * namespace enabled tasks have a special VVAR page
+		 * installed which has vd->seq set to 1 and
+		 * vd->clock_mode set to VDSO_CLOCKMODE_TIMENS. For
+		 * non time namespace affected tasks this does not
+		 * affect performance because if vd->seq is odd,
+		 * i.e. a concurrent update is in progress the extra
 		 * check for vd->clock_mode is just a few extra
-		 * instructions while spin waiting for vd->seq to become
-		 * even again.
+		 * instructions while spin waiting for vd->seq to
+		 * become even again.
 		 */
 		while (unlikely((seq = READ_ONCE(vd->seq)) & 1)) {
 			if (IS_ENABLED(CONFIG_TIME_NS) &&
-			    vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
+				vd->clock_mode == VDSO_CLOCKMODE_TIMENS)
 				return do_hres_timens(vd, clk, ts);
 			cpu_relax();
 		}
+
 		smp_rmb();
 
-		if (unlikely(!vdso_clocksource_ok(vd)))
+		if (!get_hw_counter(vd, &seq, &cycles))
 			return -1;
 
-		cycles = __arch_get_hw_counter(vd->clock_mode, vd);
-		if (unlikely(!vdso_cycles_ok(cycles)))
-			return -1;
 		ns = vdso_ts->nsec;
 		last = vd->cycle_last;
 		ns += vdso_calc_delta(cycles, last, vd->mask, vd->mult);
diff --git a/kernel/mm/ioremap.c b/kernel/mm/ioremap.c
index 5fa1ab4..4071aa6 100644
--- a/kernel/mm/ioremap.c
+++ b/kernel/mm/ioremap.c
@@ -241,6 +241,7 @@
 			break;
 	} while (pgd++, phys_addr += (next - addr), addr = next, addr != end);
 
+	arch_advertise_page_mapping(start, end);
 	flush_cache_vmap(start, end);
 
 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
diff --git a/kernel/mm/kasan/report.c b/kernel/mm/kasan/report.c
index 8fff182..ea779a4 100644
--- a/kernel/mm/kasan/report.c
+++ b/kernel/mm/kasan/report.c
@@ -73,7 +73,7 @@
 			info->access_addr, current->comm, task_pid_nr(current));
 }
 
-static DEFINE_SPINLOCK(report_lock);
+static DEFINE_HARD_SPINLOCK(report_lock);
 
 static void start_report(unsigned long *flags)
 {
@@ -81,7 +81,7 @@
 	 * Make sure we don't end up in loop.
 	 */
 	kasan_disable_current();
-	spin_lock_irqsave(&report_lock, *flags);
+	raw_spin_lock_irqsave(&report_lock, *flags);
 	pr_err("==================================================================\n");
 }
 
@@ -91,7 +91,7 @@
 		trace_error_report_end(ERROR_DETECTOR_KASAN, addr);
 	pr_err("==================================================================\n");
 	add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
-	spin_unlock_irqrestore(&report_lock, *flags);
+	raw_spin_unlock_irqrestore(&report_lock, *flags);
 	if (panic_on_warn && !test_bit(KASAN_BIT_MULTI_SHOT, &kasan_flags)) {
 		/*
 		 * This thread may hit another WARN() in the panic path.
diff --git a/kernel/mm/memory.c b/kernel/mm/memory.c
index 834078a..51ec122 100644
--- a/kernel/mm/memory.c
+++ b/kernel/mm/memory.c
@@ -845,6 +845,14 @@
 		return 1;
 
 	/*
+	 * If the source mm belongs to a Dovetail-enabled process, we
+	 * don't want to impose the COW-induced latency on it: make
+	 * sure the child gets its own copy of the page.
+	 */
+	if (dovetailing() && test_bit(MMF_DOVETAILED, &src_mm->flags))
+		goto do_copy;
+
+	/*
 	 * What we want to do is to check whether this page may
 	 * have been pinned by the parent process.  If so,
 	 * instead of wrprotect the pte on both sides, we copy
@@ -862,6 +870,7 @@
 	if (likely(!page_maybe_dma_pinned(page)))
 		return 1;
 
+do_copy:
 	/*
 	 * The vma->anon_vma of the child process may be NULL
 	 * because the entire vma does not contain anonymous pages.
@@ -5696,6 +5705,15 @@
 void __might_fault(const char *file, int line)
 {
 	/*
+	 * When running over the oob stage (e.g. some co-kernel's own
+	 * thread), we should only make sure to run with hw IRQs
+	 * enabled before accessing the memory.
+	 */
+	if (running_oob()) {
+		WARN_ON_ONCE(hard_irqs_disabled());
+		return;
+	}
+	/*
 	 * Some code (nfs/sunrpc) uses socket ops on kernel memory while
 	 * holding the mmap_lock, this is safe because kernel memory doesn't
 	 * get paged out, therefore we'll never actually fault, and the
diff --git a/kernel/mm/mprotect.c b/kernel/mm/mprotect.c
index c1c3315..2015be9 100644
--- a/kernel/mm/mprotect.c
+++ b/kernel/mm/mprotect.c
@@ -41,7 +41,7 @@
 {
 	pte_t *pte, oldpte;
 	spinlock_t *ptl;
-	unsigned long pages = 0;
+	unsigned long pages = 0, flags;
 	int target_node = NUMA_NO_NODE;
 	bool dirty_accountable = cp_flags & MM_CP_DIRTY_ACCT;
 	bool prot_numa = cp_flags & MM_CP_PROT_NUMA;
@@ -113,6 +113,7 @@
 					continue;
 			}
 
+			flags = hard_local_irq_save();
 			oldpte = ptep_modify_prot_start(vma, addr, pte);
 			ptent = pte_modify(oldpte, newprot);
 			if (preserve_write)
@@ -138,6 +139,7 @@
 				ptent = pte_mkwrite(ptent);
 			}
 			ptep_modify_prot_commit(vma, addr, pte, oldpte, ptent);
+			hard_local_irq_restore(flags);
 			pages++;
 		} else if (is_swap_pte(oldpte)) {
 			swp_entry_t entry = pte_to_swp_entry(oldpte);
diff --git a/kernel/mm/vmalloc.c b/kernel/mm/vmalloc.c
index 3b56c30..7fccdee 100644
--- a/kernel/mm/vmalloc.c
+++ b/kernel/mm/vmalloc.c
@@ -272,6 +272,10 @@
 	return 0;
 }
 
+void __weak arch_advertise_page_mapping(unsigned long start, unsigned long end)
+{
+}
+
 /**
  * map_kernel_range_noflush - map kernel VM area with the specified pages
  * @addr: start of the VM area to map
@@ -315,6 +319,8 @@
 	if (mask & ARCH_PAGE_TABLE_SYNC_MASK)
 		arch_sync_kernel_mappings(start, end);
 
+	arch_advertise_page_mapping(start, end);
+
 	return 0;
 }
 
diff --git a/kernel/modules-only.symvers b/kernel/modules-only.symvers
index 59b0cfa..c3863bf 100644
--- a/kernel/modules-only.symvers
+++ b/kernel/modules-only.symvers
@@ -1,18 +1,15 @@
 0x00000000	stv0288_attach	drivers/media/dvb-frontends/stv0288	EXPORT_SYMBOL	
 0x00000000	cx24123_attach	drivers/media/dvb-frontends/cx24123	EXPORT_SYMBOL	
-0x00000000	rtkm_kzalloc	drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm	EXPORT_SYMBOL	
 0x00000000	au8522_led_ctrl	drivers/media/dvb-frontends/au8522_common	EXPORT_SYMBOL	
 0x00000000	dib0070_wbd_offset	drivers/media/dvb-frontends/dib0070	EXPORT_SYMBOL	
 0x00000000	stv0910_attach	drivers/media/dvb-frontends/stv0910	EXPORT_SYMBOL_GPL	
 0x00000000	stv6110_attach	drivers/media/dvb-frontends/stv6110	EXPORT_SYMBOL	
 0x00000000	lnbp22_attach	drivers/media/dvb-frontends/lnbp22	EXPORT_SYMBOL	
 0x00000000	s921_attach	drivers/media/dvb-frontends/s921	EXPORT_SYMBOL	
-0x00000000	dhd_wlan_mem_prealloc	drivers/net/wireless/rockchip_wlan/rkwifi/bcmdhd/dhd_static_buf	EXPORT_SYMBOL	
 0x00000000	zl10036_attach	drivers/media/dvb-frontends/zl10036	EXPORT_SYMBOL	
 0x00000000	s5h1411_attach	drivers/media/dvb-frontends/s5h1411	EXPORT_SYMBOL	
 0x00000000	zd1301_demod_get_dvb_frontend	drivers/media/dvb-frontends/zd1301_demod	EXPORT_SYMBOL	
 0x00000000	dib3000mc_pid_parse	drivers/media/dvb-frontends/dib3000mc	EXPORT_SYMBOL	
-0x00000000	rtkm_kmalloc	drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm	EXPORT_SYMBOL	
 0x00000000	m88ds3103_attach	drivers/media/dvb-frontends/m88ds3103	EXPORT_SYMBOL	
 0x00000000	dib9000_set_gpio	drivers/media/dvb-frontends/dib9000	EXPORT_SYMBOL	
 0x00000000	xc4000_attach	drivers/media/tuners/xc4000	EXPORT_SYMBOL	
@@ -20,7 +17,6 @@
 0x00000000	au8522_release_state	drivers/media/dvb-frontends/au8522_common	EXPORT_SYMBOL	
 0x00000000	lgs8gl5_attach	drivers/media/dvb-frontends/lgs8gl5	EXPORT_SYMBOL	
 0x00000000	dib0090_update_rframp_7090	drivers/media/dvb-frontends/dib0090	EXPORT_SYMBOL	
-0x00000000	rtkm_prealloc_destroy	drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm	EXPORT_SYMBOL	
 0x00000000	cxd2820r_attach	drivers/media/dvb-frontends/cxd2820r	EXPORT_SYMBOL	
 0x00000000	zl10039_attach	drivers/media/dvb-frontends/zl10039	EXPORT_SYMBOL	
 0x00000000	dib7000p_attach	drivers/media/dvb-frontends/dib7000p	EXPORT_SYMBOL	
@@ -98,7 +94,6 @@
 0x00000000	dib9000_set_slave_frontend	drivers/media/dvb-frontends/dib9000	EXPORT_SYMBOL	
 0x00000000	ds3000_attach	drivers/media/dvb-frontends/ds3000	EXPORT_SYMBOL	
 0x00000000	dib0070_ctrl_agc_filter	drivers/media/dvb-frontends/dib0070	EXPORT_SYMBOL	
-0x00000000	rtkm_dump_mstatus	drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm	EXPORT_SYMBOL	
 0x00000000	dib0090_set_tune_state	drivers/media/dvb-frontends/dib0090	EXPORT_SYMBOL	
 0x00000000	dib0090_get_tune_state	drivers/media/dvb-frontends/dib0090	EXPORT_SYMBOL	
 0x00000000	stv0297_attach	drivers/media/dvb-frontends/stv0297	EXPORT_SYMBOL	
@@ -138,7 +133,6 @@
 0x00000000	tda8083_attach	drivers/media/dvb-frontends/tda8083	EXPORT_SYMBOL	
 0x00000000	dib0090_get_wbd_target	drivers/media/dvb-frontends/dib0090	EXPORT_SYMBOL	
 0x00000000	lgs8gxx_attach	drivers/media/dvb-frontends/lgs8gxx	EXPORT_SYMBOL	
-0x00000000	rtkm_kfree	drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm	EXPORT_SYMBOL	
 0x00000000	nxt200x_attach	drivers/media/dvb-frontends/nxt200x	EXPORT_SYMBOL	
 0x00000000	tda826x_attach	drivers/media/dvb-frontends/tda826x	EXPORT_SYMBOL	
 0x00000000	dib9000_fw_pid_filter	drivers/media/dvb-frontends/dib9000	EXPORT_SYMBOL	
@@ -147,7 +141,6 @@
 0x00000000	au8522_readreg	drivers/media/dvb-frontends/au8522_common	EXPORT_SYMBOL	
 0x00000000	stv6111_attach	drivers/media/dvb-frontends/stv6111	EXPORT_SYMBOL_GPL	
 0x00000000	or51132_attach	drivers/media/dvb-frontends/or51132	EXPORT_SYMBOL	
-0x00000000	rtkm_prealloc_init	drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm	EXPORT_SYMBOL	
 0x00000000	dib9000_get_tuner_interface	drivers/media/dvb-frontends/dib9000	EXPORT_SYMBOL	
 0x00000000	dibx000_reset_i2c_master	drivers/media/dvb-frontends/dibx000_common	EXPORT_SYMBOL	
 0x00000000	cx24110_attach	drivers/media/dvb-frontends/cx24110	EXPORT_SYMBOL	
@@ -163,7 +156,6 @@
 0x00000000	dib0090_register	drivers/media/dvb-frontends/dib0090	EXPORT_SYMBOL	
 0x00000000	cx24113_attach	drivers/media/dvb-frontends/cx24113	EXPORT_SYMBOL	
 0x00000000	fc0013_rc_cal_reset	drivers/media/tuners/fc0013	EXPORT_SYMBOL	
-0x00000000	rtkm_set_trace	drivers/net/wireless/rockchip_wlan/rtl8852be/rtkm	EXPORT_SYMBOL	
 0x00000000	stv0900_attach	drivers/media/dvb-frontends/stv0900	EXPORT_SYMBOL	
 0x00000000	stb6000_attach	drivers/media/dvb-frontends/stb6000	EXPORT_SYMBOL	
 0x00000000	lnbp21_attach	drivers/media/dvb-frontends/lnbp21	EXPORT_SYMBOL	
diff --git a/kernel/modules.builtin.modinfo b/kernel/modules.builtin.modinfo
index 9e0d13e..7f461d2 100644
--- a/kernel/modules.builtin.modinfo
+++ b/kernel/modules.builtin.modinfo
Binary files differ
diff --git a/kernel/net/Kconfig b/kernel/net/Kconfig
index d656716..0d39d1f 100644
--- a/kernel/net/Kconfig
+++ b/kernel/net/Kconfig
@@ -58,6 +58,9 @@
 config SKB_EXTENSIONS
 	bool
 
+config NET_OOB
+	bool
+
 menu "Networking options"
 
 source "net/packet/Kconfig"
diff --git a/kernel/net/core/dev.c b/kernel/net/core/dev.c
index bc5dcf5..01d2396 100644
--- a/kernel/net/core/dev.c
+++ b/kernel/net/core/dev.c
@@ -3111,6 +3111,10 @@
 	} else if (likely(!refcount_dec_and_test(&skb->users))) {
 		return;
 	}
+
+	if (recycle_oob_skb(skb))
+		return;
+
 	get_kfree_skb_cb(skb)->reason = reason;
 	local_irq_save(flags);
 	skb->next = __this_cpu_read(softnet_data.completion_queue);
@@ -3584,7 +3588,12 @@
 	unsigned int len;
 	int rc;
 
-	if (dev_nit_active(dev))
+	/*
+	 * Clone-relay outgoing packet to listening taps. Network taps
+	 * interested in out-of-band traffic should be handled by the
+	 * companion core.
+	 */
+	if (dev_nit_active(dev) && !skb_is_oob(skb))
 		dev_queue_xmit_nit(skb, dev);
 
 	len = skb->len;
@@ -4797,6 +4806,81 @@
 }
 EXPORT_SYMBOL_GPL(do_xdp_generic);
 
+#ifdef CONFIG_NET_OOB
+
+__weak bool netif_oob_deliver(struct sk_buff *skb)
+{
+	return false;
+}
+
+__weak int netif_xmit_oob(struct sk_buff *skb)
+{
+	return NET_XMIT_DROP;
+}
+
+static bool netif_receive_oob(struct sk_buff *skb)
+{
+	struct net_device *dev = skb->dev;
+
+	if (dev && netif_oob_diversion(dev))
+		return netif_oob_deliver(skb);
+
+	return false;
+}
+
+static bool netif_receive_oob_list(struct list_head *head)
+{
+	struct sk_buff *skb, *next;
+	struct net_device *dev;
+
+	if (list_empty(head))
+		return false;
+
+	dev = list_first_entry(head, struct sk_buff, list)->dev;
+	if (!dev || !netif_oob_diversion(dev))
+		return false;
+
+	/* Callee dequeues every skb it consumes. */
+	list_for_each_entry_safe(skb, next, head, list)
+		netif_oob_deliver(skb);
+
+	return list_empty(head);
+}
+
+__weak void netif_oob_run(struct net_device *dev)
+{ }
+
+static void napi_complete_oob(struct napi_struct *n)
+{
+	struct net_device *dev = n->dev;
+
+	if (netif_oob_diversion(dev))
+		netif_oob_run(dev);
+}
+
+__weak void skb_inband_xmit_backlog(void)
+{ }
+
+#else
+
+static inline bool netif_receive_oob(struct sk_buff *skb)
+{
+	return false;
+}
+
+static inline bool netif_receive_oob_list(struct list_head *head)
+{
+	return false;
+}
+
+static inline void napi_complete_oob(struct napi_struct *n)
+{ }
+
+static inline void skb_inband_xmit_backlog(void)
+{ }
+
+#endif
+
 static int netif_rx_internal(struct sk_buff *skb)
 {
 	int ret;
@@ -4895,6 +4979,8 @@
 static __latent_entropy void net_tx_action(struct softirq_action *h)
 {
 	struct softnet_data *sd = this_cpu_ptr(&softnet_data);
+
+	skb_inband_xmit_backlog();
 
 	if (sd->completion_queue) {
 		struct sk_buff *clist;
@@ -5639,6 +5725,9 @@
 {
 	int ret;
 
+	if (netif_receive_oob(skb))
+		return NET_RX_SUCCESS;
+
 	trace_netif_receive_skb_entry(skb);
 
 	ret = netif_receive_skb_internal(skb);
@@ -5662,6 +5751,8 @@
 {
 	struct sk_buff *skb;
 
+	if (netif_receive_oob_list(head))
+		return;
 	if (list_empty(head))
 		return;
 	if (trace_netif_receive_skb_list_entry_enabled()) {
@@ -6152,6 +6243,9 @@
 {
 	gro_result_t ret;
 
+	if (netif_receive_oob(skb))
+		return GRO_NORMAL;
+
 	skb_mark_napi_id(skb, napi);
 	trace_napi_gro_receive_entry(skb);
 
@@ -6489,6 +6583,8 @@
 	unsigned long flags, val, new, timeout = 0;
 	bool ret = true;
 
+	napi_complete_oob(n);
+
 	/*
 	 * 1) Don't let napi dequeue from the cpu poll list
 	 *    just in case its running on a different cpu.
diff --git a/kernel/net/core/net-sysfs.c b/kernel/net/core/net-sysfs.c
index 989b3f7..a467fca 100644
--- a/kernel/net/core/net-sysfs.c
+++ b/kernel/net/core/net-sysfs.c
@@ -386,6 +386,54 @@
 }
 NETDEVICE_SHOW_RW(tx_queue_len, fmt_dec);
 
+#ifdef CONFIG_NET_OOB
+
+__weak int netif_oob_switch_port(struct net_device *dev, bool enabled)
+{
+	return 0;
+}
+
+__weak bool netif_oob_get_port(struct net_device *dev)
+{
+	return false;
+}
+
+__weak ssize_t netif_oob_query_pool(struct net_device *dev, char *buf)
+{
+	return -EIO;
+}
+
+static int switch_oob_port(struct net_device *dev, unsigned long enable)
+{
+	return netif_oob_switch_port(dev, (bool)enable);
+}
+
+static ssize_t oob_port_store(struct device *dev, struct device_attribute *attr,
+			const char *buf, size_t len)
+{
+	return netdev_store(dev, attr, buf, len, switch_oob_port);
+}
+
+static ssize_t oob_port_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_dev(dev);
+
+	return sprintf(buf, fmt_dec, netif_oob_get_port(netdev));
+}
+static DEVICE_ATTR_RW(oob_port);
+
+static ssize_t oob_pool_show(struct device *dev,
+			struct device_attribute *attr, char *buf)
+{
+	struct net_device *netdev = to_net_dev(dev);
+
+	return netif_oob_query_pool(netdev, buf);
+}
+static DEVICE_ATTR_RO(oob_pool);
+
+#endif
+
 static int change_gro_flush_timeout(struct net_device *dev, unsigned long val)
 {
 	WRITE_ONCE(dev->gro_flush_timeout, val);
@@ -619,6 +667,10 @@
 	&dev_attr_proto_down.attr,
 	&dev_attr_carrier_up_count.attr,
 	&dev_attr_carrier_down_count.attr,
+#ifdef CONFIG_NET_OOB
+	&dev_attr_oob_port.attr,
+	&dev_attr_oob_pool.attr,
+#endif
 	NULL,
 };
 ATTRIBUTE_GROUPS(net_class);
diff --git a/kernel/net/core/skbuff.c b/kernel/net/core/skbuff.c
index 382dbdc..ba8222f 100644
--- a/kernel/net/core/skbuff.c
+++ b/kernel/net/core/skbuff.c
@@ -291,6 +291,108 @@
 	return skb;
 }
 
+#ifdef CONFIG_NET_OOB
+
+struct sk_buff *__netdev_alloc_oob_skb(struct net_device *dev, size_t len,
+				size_t headroom, gfp_t gfp_mask)
+{
+	struct sk_buff *skb;
+
+	headroom = ALIGN(NET_SKB_PAD + headroom, NET_SKB_PAD);
+	skb = __alloc_skb(len + headroom, gfp_mask,
+			SKB_ALLOC_RX, NUMA_NO_NODE);
+	if (!skb)
+		return NULL;
+
+	skb_reserve(skb, headroom);
+	skb->dev = dev;
+	skb->oob = true;
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(__netdev_alloc_oob_skb);
+
+void __netdev_free_oob_skb(struct net_device *dev, struct sk_buff *skb)
+{
+	skb->oob = false;
+	skb->oob_clone = false;
+	dev_kfree_skb(skb);
+}
+EXPORT_SYMBOL_GPL(__netdev_free_oob_skb);
+
+void netdev_reset_oob_skb(struct net_device *dev, struct sk_buff *skb,
+			size_t headroom)
+{
+	unsigned char *data = skb->head; /* Always from kmalloc_reserve(). */
+
+	if (WARN_ON_ONCE(!skb->oob || skb->oob_clone))
+		return;
+
+	memset(skb, 0, offsetof(struct sk_buff, tail));
+	__build_skb_around(skb, data, 0);
+	headroom = ALIGN(NET_SKB_PAD + headroom, NET_SKB_PAD);
+	skb_reserve(skb, headroom);
+	skb->oob = true;
+	skb->dev = dev;
+}
+EXPORT_SYMBOL_GPL(netdev_reset_oob_skb);
+
+struct sk_buff *skb_alloc_oob_head(gfp_t gfp_mask)
+{
+	struct sk_buff *skb = kmem_cache_alloc(skbuff_head_cache, gfp_mask);
+
+	if (!skb)
+		return NULL;
+
+	/*
+	 * skb heads allocated for out-of-band traffic should be
+	 * reserved for clones, so memset is extraneous in the sense
+	 * that skb_morph_oob() should follow the allocation.
+	 */
+	memset(skb, 0, offsetof(struct sk_buff, tail));
+	refcount_set(&skb->users, 1);
+	skb->oob_clone = true;
+	skb_set_kcov_handle(skb, kcov_common_handle());
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(skb_alloc_oob_head);
+
+static struct sk_buff *__skb_clone(struct sk_buff *n, struct sk_buff *skb);
+
+void skb_morph_oob_skb(struct sk_buff *n, struct sk_buff *skb)
+{
+	__skb_clone(n, skb);
+	n->oob = true;
+	n->oob_clone = true;
+	skb->oob_cloned = true;
+}
+EXPORT_SYMBOL_GPL(skb_morph_oob_skb);
+
+bool skb_release_oob_skb(struct sk_buff *skb, int *dref)
+{
+	struct skb_shared_info *shinfo = skb_shinfo(skb);
+
+	if (!skb_unref(skb))
+		return false;
+
+	/*
+	 * ->nohdr is never set for oob shells, so we always refcount
+         * the full data (header + payload) when cloned.
+	 */
+	*dref = skb->cloned ? atomic_sub_return(1, &shinfo->dataref) : 0;
+
+	return true;
+}
+EXPORT_SYMBOL_GPL(skb_release_oob_skb);
+
+__weak bool skb_oob_recycle(struct sk_buff *skb)
+{
+	return false;
+}
+
+#endif	/* CONFIG_NET_OOB */
+
 /**
  * __build_skb - build a network buffer
  * @data: data buffer provided by caller
@@ -691,6 +793,9 @@
 
 void __kfree_skb(struct sk_buff *skb)
 {
+	if (recycle_oob_skb(skb))
+		return;
+
 	skb_release_all(skb);
 	kfree_skbmem(skb);
 }
@@ -884,6 +989,9 @@
 	struct napi_alloc_cache *nc = this_cpu_ptr(&napi_alloc_cache);
 
 	/* drop skb->head and call any destructors for packet */
+	if (recycle_oob_skb(skb))
+		return;
+
 	skb_release_all(skb);
 
 	/* record skb to CPU local list */
@@ -903,6 +1011,9 @@
 }
 void __kfree_skb_defer(struct sk_buff *skb)
 {
+	if (recycle_oob_skb(skb))
+		return;
+
 	_kfree_skb_defer(skb);
 }
 
@@ -926,6 +1037,9 @@
 		return;
 	}
 
+	if (recycle_oob_skb(skb))
+		return;
+
 	_kfree_skb_defer(skb);
 }
 EXPORT_SYMBOL(napi_consume_skb);
@@ -946,6 +1060,7 @@
 	skb_dst_copy(new, old);
 	__skb_ext_copy(new, old);
 	__nf_copy(new, old, false);
+	__skb_oob_copy(new, old);
 
 	/* Note : this field could be in headers_start/headers_end section
 	 * It is not yet because we do not want to have a 16 bit hole
diff --git a/kernel/net/packet/af_packet.c b/kernel/net/packet/af_packet.c
index eaa030e..9a19b4a 100644
--- a/kernel/net/packet/af_packet.c
+++ b/kernel/net/packet/af_packet.c
@@ -3309,6 +3309,7 @@
 	po = pkt_sk(sk);
 	init_completion(&po->skb_completion);
 	sk->sk_family = PF_PACKET;
+	sk->sk_protocol	= protocol;
 	po->num = proto;
 	po->xmit = dev_queue_xmit;
 
diff --git a/kernel/net/sched/Kconfig b/kernel/net/sched/Kconfig
index bc4e5da..1bcf1b9 100644
--- a/kernel/net/sched/Kconfig
+++ b/kernel/net/sched/Kconfig
@@ -117,6 +117,29 @@
 	  To compile this code as a module, choose M here: the
 	  module will be called sch_multiq.
 
+config NET_SCH_OOB
+	tristate "Out-of-band packet queuing (OOB)"
+	depends on NET_OOB
+	help
+	  Say Y here if you want to use a Dovetail-aware packet
+	  scheduler for prioritizing egress traffic between the
+	  regular (in-band) network stack and a companion core. This
+	  scheduler helps in two cases:
+
+	  - for sending high priority packets originating from the
+	    out-of-band stage to NICs which cannot handle outgoing
+	    packets from that stage directly. In this case, these
+	    packets take precedence over regular traffic for
+	    transmission.
+
+	  - for sharing an out-of-band capable interface between the
+            in-band and out-of-band network stacks, proxying regular
+            traffic originating from the in-band stage to NICs which
+            will be processing all packets from the out-of-band stage.
+
+	  To compile this code as a module, choose M here: the
+	  module will be called sch_oob.
+
 config NET_SCH_RED
 	tristate "Random Early Detection (RED)"
 	help
diff --git a/kernel/net/sched/Makefile b/kernel/net/sched/Makefile
index 66bbf9a..20fc082 100644
--- a/kernel/net/sched/Makefile
+++ b/kernel/net/sched/Makefile
@@ -45,6 +45,7 @@
 obj-$(CONFIG_NET_SCH_TEQL)	+= sch_teql.o
 obj-$(CONFIG_NET_SCH_PRIO)	+= sch_prio.o
 obj-$(CONFIG_NET_SCH_MULTIQ)	+= sch_multiq.o
+obj-$(CONFIG_NET_SCH_OOB)	+= sch_oob.o
 obj-$(CONFIG_NET_SCH_ATM)	+= sch_atm.o
 obj-$(CONFIG_NET_SCH_NETEM)	+= sch_netem.o
 obj-$(CONFIG_NET_SCH_DRR)	+= sch_drr.o
diff --git a/kernel/net/sched/sch_oob.c b/kernel/net/sched/sch_oob.c
new file mode 100644
index 0000000..22373e8
--- /dev/null
+++ b/kernel/net/sched/sch_oob.c
@@ -0,0 +1,294 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#include <linux/netdevice.h>
+#include <linux/skbuff.h>
+#include <net/pkt_sched.h>
+#include <net/pkt_cls.h>
+
+/*
+ * With Qdisc[2], 0=oob_fallback and 1=inband. User can graft whatever
+ * qdisc on these slots; both preset to pfifo_ops. skb->oob is checked
+ * to determine which qdisc should handle the packet eventually.
+ */
+
+struct oob_qdisc_priv {
+	struct Qdisc *qdisc[2];	/* 0=oob_fallback, 1=in-band */
+	struct tcf_proto __rcu *filter_list;
+	struct tcf_block *block;
+};
+
+static int oob_enqueue(struct sk_buff *skb, struct Qdisc *sch,
+		struct sk_buff **to_free)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+	struct net_device *dev = skb->dev;
+	struct Qdisc *qdisc;
+	int ret;
+
+	/*
+	 * If the device accepts oob traffic and can handle it
+	 * directly from the oob stage, pass the outgoing packet to
+	 * the transmit handler of the oob stack. This makes sure that
+	 * all traffic, including the in-band one, flows through the
+	 * oob stack which may implement its own queuing discipline.
+	 *
+	 * netif_xmit_oob() might fail handling the packet, in which
+	 * case we leave it to the in-band packet scheduler, applying
+	 * a best-effort strategy by giving higher priority to oob
+	 * packets over mere in-band traffic.
+	 */
+	if (dev && netif_oob_diversion(dev) && netdev_is_oob_capable(dev)) {
+		ret = netif_xmit_oob(skb);
+		if (ret == NET_XMIT_SUCCESS)
+			return NET_XMIT_SUCCESS;
+	}
+
+	/*
+	 * Out-of-band fast lane is closed. Best effort: use a special
+	 * 'high priority' queue for oob packets we handle from
+	 * in-band context the usual way through the common stack.
+	 */
+	qdisc = skb->oob ? p->qdisc[0] : p->qdisc[1];
+	ret = qdisc_enqueue(skb, qdisc, to_free);
+	if (ret == NET_XMIT_SUCCESS) {
+		sch->q.qlen++;
+		return NET_XMIT_SUCCESS;
+	}
+
+	if (net_xmit_drop_count(ret))
+		qdisc_qstats_drop(sch);
+
+	return ret;
+}
+
+static struct sk_buff *oob_dequeue(struct Qdisc *sch)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+	struct sk_buff *skb;
+	struct Qdisc *qdisc;
+	int band;
+
+	/*
+	 * First try to dequeue pending out-of-band packets. If none,
+	 * then check for in-band traffic.
+	 */
+	for (band = 0; band < 2; band++) {
+		qdisc = p->qdisc[band];
+		skb = qdisc->dequeue(qdisc);
+		if (skb) {
+			qdisc_bstats_update(sch, skb);
+			sch->q.qlen--;
+			return skb;
+		}
+	}
+
+	return NULL;
+}
+
+static struct sk_buff *oob_peek(struct Qdisc *sch)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+	struct sk_buff *skb;
+	struct Qdisc *qdisc;
+	int band;
+
+	for (band = 0; band < 2; band++) {
+		qdisc = p->qdisc[band];
+		skb = qdisc->ops->peek(qdisc);
+		if (skb)
+			return skb;
+	}
+
+	return NULL;
+}
+
+static int oob_init(struct Qdisc *sch, struct nlattr *opt,
+		struct netlink_ext_ack *extack)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+	int ret;
+
+	ret = tcf_block_get(&p->block, &p->filter_list, sch, extack);
+	if (ret)
+		return ret;
+
+	p->qdisc[0] = qdisc_create_dflt(sch->dev_queue,
+					&pfifo_qdisc_ops, sch->handle,
+					extack);
+	p->qdisc[1] = qdisc_create_dflt(sch->dev_queue,
+					&pfifo_fast_ops, sch->handle,
+					extack);
+
+	return 0;
+}
+
+static void oob_reset(struct Qdisc *sch)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+
+	qdisc_reset(p->qdisc[0]);
+	qdisc_reset(p->qdisc[1]);
+	sch->q.qlen = 0;
+}
+
+static void oob_destroy(struct Qdisc *sch)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+
+	tcf_block_put(p->block);
+	qdisc_put(p->qdisc[0]);
+	qdisc_put(p->qdisc[1]);
+}
+
+static int oob_tune(struct Qdisc *sch, struct nlattr *opt,
+		struct netlink_ext_ack *extack)
+{
+	return 0;
+}
+
+static int oob_dump(struct Qdisc *sch, struct sk_buff *skb)
+{
+	return skb->len;
+}
+
+static int oob_graft(struct Qdisc *sch, unsigned long arg, struct Qdisc *new,
+		struct Qdisc **old, struct netlink_ext_ack *extack)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+	unsigned long band = arg - 1;
+
+	if (new == NULL)
+		new = &noop_qdisc;
+
+	*old = qdisc_replace(sch, new, &p->qdisc[band]);
+
+	return 0;
+}
+
+static struct Qdisc *
+oob_leaf(struct Qdisc *sch, unsigned long arg)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+	unsigned long band = arg - 1;
+
+	return p->qdisc[band];
+}
+
+static unsigned long oob_find(struct Qdisc *sch, u32 classid)
+{
+	unsigned long band = TC_H_MIN(classid);
+
+	return band - 1 >= 2 ? 0 : band;
+}
+
+static int oob_dump_class(struct Qdisc *sch, unsigned long cl,
+			struct sk_buff *skb, struct tcmsg *tcm)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+
+	tcm->tcm_handle |= TC_H_MIN(cl);
+	tcm->tcm_info = p->qdisc[cl - 1]->handle;
+
+	return 0;
+}
+
+static int oob_dump_class_stats(struct Qdisc *sch, unsigned long cl,
+				struct gnet_dump *d)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+	struct Qdisc *cl_q = p->qdisc[cl - 1];
+
+	if (gnet_stats_copy_basic(qdisc_root_sleeping_running(sch),
+				  d, cl_q->cpu_bstats, &cl_q->bstats) < 0 ||
+	    qdisc_qstats_copy(d, cl_q) < 0)
+		return -1;
+
+	return 0;
+}
+
+static void oob_walk(struct Qdisc *sch, struct qdisc_walker *arg)
+{
+	int band;
+
+	if (arg->stop)
+		return;
+
+	for (band = 0; band < 2; band++) {
+		if (arg->count < arg->skip) {
+			arg->count++;
+			continue;
+		}
+		if (arg->fn(sch, band + 1, arg) < 0) {
+			arg->stop = 1;
+			break;
+		}
+		arg->count++;
+	}
+}
+
+static unsigned long oob_tcf_bind(struct Qdisc *sch, unsigned long parent,
+				 u32 classid)
+{
+	return oob_find(sch, classid);
+}
+
+static void oob_tcf_unbind(struct Qdisc *q, unsigned long cl)
+{
+}
+
+static struct tcf_block *oob_tcf_block(struct Qdisc *sch, unsigned long cl,
+				       struct netlink_ext_ack *extack)
+{
+	struct oob_qdisc_priv *p = qdisc_priv(sch);
+
+	if (cl)
+		return NULL;
+
+	return p->block;
+}
+
+static const struct Qdisc_class_ops oob_class_ops = {
+	.graft		=	oob_graft,
+	.leaf		=	oob_leaf,
+	.find		=	oob_find,
+	.walk		=	oob_walk,
+	.dump		=	oob_dump_class,
+	.dump_stats	=	oob_dump_class_stats,
+	.tcf_block	=	oob_tcf_block,
+	.bind_tcf	=	oob_tcf_bind,
+	.unbind_tcf	=	oob_tcf_unbind,
+};
+
+static struct Qdisc_ops oob_qdisc_ops __read_mostly = {
+	.cl_ops		=	&oob_class_ops,
+	.id		=	"oob",
+	.priv_size	=	sizeof(struct oob_qdisc_priv),
+	.enqueue	=	oob_enqueue,
+	.dequeue	=	oob_dequeue,
+	.peek		=	oob_peek,
+	.init		=	oob_init,
+	.reset		=	oob_reset,
+	.destroy	=	oob_destroy,
+	.change		=	oob_tune,
+	.dump		=	oob_dump,
+	.owner		=	THIS_MODULE,
+};
+
+static int __init oob_module_init(void)
+{
+	return register_qdisc(&oob_qdisc_ops);
+}
+
+static void __exit oob_module_exit(void)
+{
+	unregister_qdisc(&oob_qdisc_ops);
+}
+
+module_init(oob_module_init)
+module_exit(oob_module_exit)
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/net/socket.c b/kernel/net/socket.c
index 938ab3a..ac19dee 100644
--- a/kernel/net/socket.c
+++ b/kernel/net/socket.c
@@ -141,6 +141,95 @@
 #define sock_show_fdinfo NULL
 #endif
 
+#ifdef CONFIG_NET_OOB
+
+static inline bool sock_oob_capable(struct socket *sock)
+{
+	return sock->sk && sock->sk->oob_data;
+}
+
+int __weak sock_oob_attach(struct socket *sock)
+{
+	return 0;
+}
+
+void __weak sock_oob_detach(struct socket *sock)
+{
+}
+
+int __weak sock_oob_bind(struct socket *sock, struct sockaddr *addr, int len)
+{
+	return 0;
+}
+
+long __weak sock_inband_ioctl_redirect(struct socket *sock,
+				unsigned int cmd, unsigned long arg)
+{
+	return -ENOTTY;
+}
+
+long __weak sock_oob_ioctl(struct file *file,
+			unsigned int cmd, unsigned long arg)
+{
+	return -ENOTTY;
+}
+
+ssize_t __weak sock_oob_write(struct file *filp,
+				const char __user *u_buf, size_t count)
+{
+	return -EOPNOTSUPP;
+}
+
+ssize_t __weak sock_oob_read(struct file *filp,
+			char __user *u_buf, size_t count)
+{
+	return -EOPNOTSUPP;
+}
+
+__poll_t __weak sock_oob_poll(struct file *filp,
+				struct oob_poll_wait *wait)
+{
+	return -EOPNOTSUPP;
+}
+
+#define compat_sock_oob_ioctl compat_ptr_oob_ioctl
+
+#else	/* !CONFIG_NET_OOB */
+
+static inline bool sock_oob_capable(struct socket *sock)
+{
+	return false;
+}
+
+static inline int sock_oob_attach(struct socket *sock)
+{
+	return 0;
+}
+
+static inline void sock_oob_detach(struct socket *sock)
+{
+}
+
+static int sock_oob_bind(struct socket *sock,
+			struct sockaddr *addr, int len)
+{
+	return 0;
+}
+
+static inline long sock_inband_ioctl_redirect(struct socket *sock,
+					unsigned int cmd, unsigned long arg)
+{
+	return -ENOTTY;
+}
+
+#define sock_oob_ioctl		NULL
+#define sock_oob_write		NULL
+#define sock_oob_read		NULL
+#define sock_oob_poll		NULL
+#define compat_sock_oob_ioctl	NULL
+
+#endif	/* !CONFIG_NET_OOB */
+
 /*
  *	Socket files have a set of 'special' operations as well as the generic file ones. These don't appear
  *	in the operation structures but are done directly via the socketcall() multiplexor.
@@ -153,8 +242,13 @@
 	.write_iter =	sock_write_iter,
 	.poll =		sock_poll,
 	.unlocked_ioctl = sock_ioctl,
+	.oob_ioctl =	sock_oob_ioctl,
+	.oob_write =	sock_oob_write,
+	.oob_read =	sock_oob_read,
+	.oob_poll =	sock_oob_poll,
 #ifdef CONFIG_COMPAT
 	.compat_ioctl = compat_sock_ioctl,
+	.compat_oob_ioctl = compat_sock_oob_ioctl,
 #endif
 	.mmap =		sock_mmap,
 	.release =	sock_close,
@@ -427,7 +521,7 @@
 static int sock_map_fd(struct socket *sock, int flags)
 {
 	struct file *newfile;
-	int fd = get_unused_fd_flags(flags);
+	int fd = get_unused_fd_flags(flags), ret;
 	if (unlikely(fd < 0)) {
 		sock_release(sock);
 		return fd;
@@ -435,6 +529,14 @@
 
 	newfile = sock_alloc_file(sock, flags, NULL);
 	if (!IS_ERR(newfile)) {
+		if (IS_ENABLED(CONFIG_NET_OOB) && (flags & SOCK_OOB)) {
+			ret = sock_oob_attach(sock);
+			if (ret < 0) {
+				put_unused_fd(fd);
+				sock_release(sock);
+				return ret;
+			}
+		}
 		fd_install(fd, newfile);
 		return fd;
 	}
@@ -589,6 +691,9 @@
 
 static void __sock_release(struct socket *sock, struct inode *inode)
 {
+	if (sock_oob_capable(sock))
+		sock_oob_detach(sock);
+
 	if (sock->ops) {
 		struct module *owner = sock->ops->owner;
 
@@ -1185,6 +1290,11 @@
 						   false);
 			break;
 		default:
+			if (sock_oob_capable(sock)) {
+				err = sock_inband_ioctl_redirect(sock, cmd, arg);
+				if (!err || err != -ENOIOCTLCMD)
+					break;
+			}
 			err = sock_do_ioctl(net, sock, cmd, arg);
 			break;
 		}
@@ -1498,10 +1608,18 @@
 	BUILD_BUG_ON((SOCK_MAX | SOCK_TYPE_MASK) != SOCK_TYPE_MASK);
 	BUILD_BUG_ON(SOCK_CLOEXEC & SOCK_TYPE_MASK);
 	BUILD_BUG_ON(SOCK_NONBLOCK & SOCK_TYPE_MASK);
+	BUILD_BUG_ON(SOCK_OOB & SOCK_TYPE_MASK);
 
 	flags = type & ~SOCK_TYPE_MASK;
-	if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK))
+	if (flags & ~(SOCK_CLOEXEC | SOCK_NONBLOCK | SOCK_OOB))
 		return -EINVAL;
+	/*
+	 * Not every protocol family supports out-of-band operations,
+	 * however PF_OOB certainly does: force SOCK_OOB in, so that
+	 * sock_oob_attach() runs for this socket.
+	 */
+	if (IS_ENABLED(CONFIG_NET_OOB) && family == AF_OOB)
+		flags |= SOCK_OOB;
 	type &= SOCK_TYPE_MASK;
 
 	if (SOCK_NONBLOCK != O_NONBLOCK && (flags & SOCK_NONBLOCK))
@@ -1511,7 +1629,7 @@
 	if (retval < 0)
 		return retval;
 
-	return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK));
+	return sock_map_fd(sock, flags & (O_CLOEXEC | O_NONBLOCK | O_OOB));
 }
 
 SYSCALL_DEFINE3(socket, int, family, int, type, int, protocol)
@@ -1642,6 +1760,9 @@
 			err = security_socket_bind(sock,
 						   (struct sockaddr *)&address,
 						   addrlen);
+			if (sock_oob_capable(sock) && !err)
+				err = sock_oob_bind(sock, (struct sockaddr *)
+						&address, addrlen);
 			if (!err)
 				err = sock->ops->bind(sock,
 						      (struct sockaddr *)
diff --git a/kernel/scripts/mkcompile_h b/kernel/scripts/mkcompile_h
index a72b154..5a34fe4 100755
--- a/kernel/scripts/mkcompile_h
+++ b/kernel/scripts/mkcompile_h
@@ -6,8 +6,9 @@
 SMP=$3
 PREEMPT=$4
 PREEMPT_RT=$5
-CC_VERSION="$6"
-LD=$7
+IRQPIPE=$6
+CC_VERSION="$7"
+LD=$8
 
 vecho() { [ "${quiet}" = "silent_" ] || echo "$@" ; }
 
@@ -45,6 +46,7 @@
 if [ -n "$SMP" ] ; then CONFIG_FLAGS="SMP"; fi
 if [ -n "$PREEMPT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT"; fi
 if [ -n "$PREEMPT_RT" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS PREEMPT_RT"; fi
+if [ -n "$IRQPIPE" ] ; then CONFIG_FLAGS="$CONFIG_FLAGS IRQPIPE"; fi
 
 # Truncate to maximum length
 UTS_LEN=64
diff --git a/kernel/security/selinux/hooks.c b/kernel/security/selinux/hooks.c
index 1235854..765efd0 100644
--- a/kernel/security/selinux/hooks.c
+++ b/kernel/security/selinux/hooks.c
@@ -1280,7 +1280,9 @@
 			return SECCLASS_SMC_SOCKET;
 		case PF_XDP:
 			return SECCLASS_XDP_SOCKET;
-#if PF_MAX > 45
+		case PF_OOB:
+			return SECCLASS_OOB_SOCKET;
+#if PF_MAX > 46
 #error New address family defined, please update this function.
 #endif
 		}
diff --git a/kernel/security/selinux/include/classmap.h b/kernel/security/selinux/include/classmap.h
index 955e8c8..79ea017 100644
--- a/kernel/security/selinux/include/classmap.h
+++ b/kernel/security/selinux/include/classmap.h
@@ -247,6 +247,8 @@
 	    NULL } },
 	{ "xdp_socket",
 	  { COMMON_SOCK_PERMS, NULL } },
+	{ "oob_socket",
+	  { COMMON_SOCK_PERMS, NULL } },
 	{ "perf_event",
 	  { "open", "cpu", "kernel", "tracepoint", "read", "write", NULL } },
 	{ "anon_inode",
@@ -254,6 +256,6 @@
 	{ NULL }
   };
 
-#if PF_MAX > 45
+#if PF_MAX > 46
 #error New address family defined, please update secclass_map.
 #endif
diff --git a/kernel/tools/perf/trace/beauty/include/linux/socket.h b/kernel/tools/perf/trace/beauty/include/linux/socket.h
index 9aa530d..93b104e 100644
--- a/kernel/tools/perf/trace/beauty/include/linux/socket.h
+++ b/kernel/tools/perf/trace/beauty/include/linux/socket.h
@@ -223,8 +223,9 @@
 				 * reuses AF_INET address family
 				 */
 #define AF_XDP		44	/* XDP sockets			*/
+#define AF_OOB		45	/* Out-of-band domain sockets */
 
-#define AF_MAX		45	/* For now.. */
+#define AF_MAX		46	/* For now.. */
 
 /* Protocol families, same as address families. */
 #define PF_UNSPEC	AF_UNSPEC
diff --git a/kernel/vmlinux.symvers b/kernel/vmlinux.symvers
index 62cce50..e37a20b 100644
--- a/kernel/vmlinux.symvers
+++ b/kernel/vmlinux.symvers
@@ -46,6 +46,7 @@
 0x00000000	ZSTD_initCStream_usingCDict	vmlinux	EXPORT_SYMBOL	
 0x00000000	__kfifo_alloc	vmlinux	EXPORT_SYMBOL	
 0x00000000	reclaim_shmem_address_space	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	irq_inject_pipeline	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_log_register	vmlinux	EXPORT_SYMBOL	
 0x00000000	serial8250_do_shutdown	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	phy_reset	vmlinux	EXPORT_SYMBOL_GPL	
@@ -90,6 +91,7 @@
 0x00000000	fb_validate_mode	vmlinux	EXPORT_SYMBOL	
 0x00000000	errseq_sample	vmlinux	EXPORT_SYMBOL	
 0x00000000	__unregister_chrdev	vmlinux	EXPORT_SYMBOL	
+0x00000000	xntimer_get_date	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tracing_alloc_snapshot	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	srcu_notifier_chain_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	g_token_size	vmlinux	EXPORT_SYMBOL_GPL	
@@ -98,6 +100,7 @@
 0x00000000	__traceiter_vb2_buf_done	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	badblocks_store	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	filp_open_block	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnheap_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	raw_notifier_call_chain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_ct_delete	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__dev_direct_xmit	vmlinux	EXPORT_SYMBOL	
@@ -170,6 +173,7 @@
 0x00000000	tty_register_device	vmlinux	EXPORT_SYMBOL	
 0x00000000	simple_write_to_buffer	vmlinux	EXPORT_SYMBOL	
 0x00000000	__srcu_read_unlock	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__hybrid_spin_unlock	vmlinux	EXPORT_SYMBOL	
 0x00000000	reset_devices	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_find_all_nodes	vmlinux	EXPORT_SYMBOL	
 0x00000000	mmc_app_cmd	vmlinux	EXPORT_SYMBOL_GPL	
@@ -225,6 +229,7 @@
 0x00000000	scatterwalk_copychunks	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pin_user_pages_unlocked	vmlinux	EXPORT_SYMBOL	
 0x00000000	ktime_get_real_fast_ns	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	stage_disabled	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_irq_alloc_generic_chip	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	check_preempt_curr	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_free	vmlinux	EXPORT_SYMBOL_GPL	
@@ -280,6 +285,7 @@
 0x00000000	__kfifo_dma_in_prepare_r	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_mq_stop_hw_queues	vmlinux	EXPORT_SYMBOL	
 0x00000000	__fat_fs_error	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_toseq_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bpf_preload_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	prof_on	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_free_irq	vmlinux	EXPORT_SYMBOL	
@@ -308,6 +314,7 @@
 0x00000000	devm_ioremap_resource	vmlinux	EXPORT_SYMBOL	
 0x00000000	sg_miter_start	vmlinux	EXPORT_SYMBOL	
 0x00000000	disk_has_partitions	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnthread_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	freq_qos_remove_request	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	udp_encap_disable	vmlinux	EXPORT_SYMBOL	
 0x00000000	tso_build_data	vmlinux	EXPORT_SYMBOL	
@@ -315,7 +322,6 @@
 0x00000000	snd_card_free	vmlinux	EXPORT_SYMBOL	
 0x00000000	dm_tm_issue_prefetches	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_m2m_decoder_cmd	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_disable_interrupts	vmlinux	EXPORT_SYMBOL	
 0x00000000	dma_buf_map_attachment	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_mali_pm_status	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tty_mode_ioctl	vmlinux	EXPORT_SYMBOL_GPL	
@@ -324,11 +330,11 @@
 0x00000000	debugfs_lookup_and_remove	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	load_nls	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	nfs_show_stats	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnvfile_get_blob	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rfkill_set_hw_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	xfrm_flush_gc	vmlinux	EXPORT_SYMBOL	
 0x00000000	rtc_year_days	vmlinux	EXPORT_SYMBOL	
 0x00000000	ehci_cf_port_reset_rwsem	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_hw_keysetmac	vmlinux	EXPORT_SYMBOL	
 0x00000000	ata_host_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ata_link_offline	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	scsi_normalize_sense	vmlinux	EXPORT_SYMBOL	
@@ -368,6 +374,7 @@
 0x00000000	seq_escape_mem_ascii	vmlinux	EXPORT_SYMBOL	
 0x00000000	vmf_insert_mixed_prot	vmlinux	EXPORT_SYMBOL	
 0x00000000	__SCK__tp_func_rpm_suspend	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__hybrid_spin_lock	vmlinux	EXPORT_SYMBOL	
 0x00000000	xfrm_state_add	vmlinux	EXPORT_SYMBOL	
 0x00000000	ipt_unregister_table_exit	vmlinux	EXPORT_SYMBOL	
 0x00000000	ping_rcv	vmlinux	EXPORT_SYMBOL_GPL	
@@ -375,11 +382,11 @@
 0x00000000	of_property_read_u64_index	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_property_read_u32_index	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_ctrl_notify	vmlinux	EXPORT_SYMBOL	
-0x00000000	ar9003_mci_send_wlan_channels	vmlinux	EXPORT_SYMBOL	
 0x00000000	cn_del_callback	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	regulator_is_enabled	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	iov_iter_bvec	vmlinux	EXPORT_SYMBOL	
 0x00000000	iov_iter_kvec	vmlinux	EXPORT_SYMBOL	
+0x00000000	__xnselect_signal	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rt5640_sel_asrc_clk_src	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	led_get_default_pattern	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dvb_register_device	vmlinux	EXPORT_SYMBOL	
@@ -444,11 +451,11 @@
 0x00000000	drm_dp_atomic_release_vcpi_slots	vmlinux	EXPORT_SYMBOL	
 0x00000000	pci_bus_write_config_dword	vmlinux	EXPORT_SYMBOL	
 0x00000000	net_prio_cgrp_subsys_enabled_key	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__hybrid_spin_trylock	vmlinux	EXPORT_SYMBOL	
 0x00000000	dst_cache_get_ip6	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netdev_lower_state_changed	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_pcm_lib_default_mmap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	iio_buffer_get	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_mci_cleanup	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_dp_aux_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_atomic_helper_prepare_planes	vmlinux	EXPORT_SYMBOL	
 0x00000000	tty_hung_up_p	vmlinux	EXPORT_SYMBOL	
@@ -461,7 +468,6 @@
 0x00000000	dev_pm_opp_get_voltage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_clk_put	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_subdev_get_fwnode_pad_1_to_1	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_setmcastfilter	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_dev_info_list_add_keyed	vmlinux	EXPORT_SYMBOL	
 0x00000000	syscore_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pcim_enable_device	vmlinux	EXPORT_SYMBOL	
@@ -475,6 +481,8 @@
 0x00000000	proc_mkdir_data	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mpage_writepages	vmlinux	EXPORT_SYMBOL	
 0x00000000	write_one_page	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_harden	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnsynch_try_acquire	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	async_synchronize_cookie	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_notify_new_peer_candidate	vmlinux	EXPORT_SYMBOL	
 0x00000000	udp6_set_csum	vmlinux	EXPORT_SYMBOL	
@@ -557,7 +565,6 @@
 0x00000000	clocksource_mmio_readl_up	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mmc_can_discard	vmlinux	EXPORT_SYMBOL	
 0x00000000	rk628_control_assert	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath_is_mybeacon	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_flush_work	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	idr_destroy	vmlinux	EXPORT_SYMBOL	
 0x00000000	ida_destroy	vmlinux	EXPORT_SYMBOL	
@@ -626,6 +633,7 @@
 0x00000000	idr_alloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	jbd2_journal_update_sb_errno	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	vfs_unlink	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	xnsched_set_policy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_compat_sigset	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_task_exe_file	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_stop_tx_ba_cb_irqsafe	vmlinux	EXPORT_SYMBOL	
@@ -638,7 +646,6 @@
 0x00000000	snd_dmaengine_pcm_open_request_chan	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	iio_trigger_generic_data_rdy_poll	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_get_child_by_name	vmlinux	EXPORT_SYMBOL	
-0x00000000	dt_init_idle_driver	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpufreq_frequency_table_get_index	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	phy_aneg_done	vmlinux	EXPORT_SYMBOL	
 0x00000000	key_revoke	vmlinux	EXPORT_SYMBOL	
@@ -666,8 +673,6 @@
 0x00000000	ieee80211_sched_scan_stopped	vmlinux	EXPORT_SYMBOL	
 0x00000000	dst_cache_set_ip6	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rtnl_lock	vmlinux	EXPORT_SYMBOL	
-0x00000000	rockchip_dmcfreq_vop_bandwidth_update	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_addrxbuf_edma	vmlinux	EXPORT_SYMBOL	
 0x00000000	ata_scsi_slave_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_crtc_vblank_restore	vmlinux	EXPORT_SYMBOL	
 0x00000000	__drm_atomic_helper_connector_reset	vmlinux	EXPORT_SYMBOL	
@@ -685,7 +690,6 @@
 0x00000000	udp_push_pending_frames	vmlinux	EXPORT_SYMBOL	
 0x00000000	llc_add_pack	vmlinux	EXPORT_SYMBOL	
 0x00000000	hidinput_connect	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_deinit	vmlinux	EXPORT_SYMBOL	
 0x00000000	__platform_driver_probe	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kbase_csf_find_queue_group	vmlinux	EXPORT_SYMBOL	
 0x00000000	clk_unregister_fixed_factor	vmlinux	EXPORT_SYMBOL_GPL	
@@ -709,12 +713,13 @@
 0x00000000	ieee80211_nan_func_terminated	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_get_by_index	vmlinux	EXPORT_SYMBOL	
 0x00000000	hid_compare_device_paths	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_abortpcurecv	vmlinux	EXPORT_SYMBOL	
 0x00000000	dma_buf_unmap_attachment	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	serial8250_rpm_get_tx	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xa_delete_node	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	match_strlcpy	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_nrtsig_pend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__bpf_call_base	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	tick_uninstall_proxy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_freq_khz_to_channel	vmlinux	EXPORT_SYMBOL	
 0x00000000	inet6_hash	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	init_net	vmlinux	EXPORT_SYMBOL	
@@ -737,7 +742,6 @@
 0x00000000	swake_up_one	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_soc_calc_bclk	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_interval_ranges	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpuidle_register_governor	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_fence_remove_callback	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_atomic_helper_connector_tv_reset	vmlinux	EXPORT_SYMBOL	
 0x00000000	gpiod_direction_output_raw	vmlinux	EXPORT_SYMBOL_GPL	
@@ -832,6 +836,8 @@
 0x00000000	cdc_ncm_tx_fixup	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nanddev_bbt_set_block_status	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nanddev_bbt_get_block_status	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	at24_mac3_read	vmlinux	EXPORT_SYMBOL	
+0x00000000	at24_mac2_read	vmlinux	EXPORT_SYMBOL	
 0x00000000	at24_mac1_read	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_pm_qos_update_request	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	generic_device_group	vmlinux	EXPORT_SYMBOL_GPL	
@@ -972,7 +978,6 @@
 0x00000000	mmc_cqe_post_req	vmlinux	EXPORT_SYMBOL	
 0x00000000	ptp_schedule_worker	vmlinux	EXPORT_SYMBOL	
 0x00000000	ohci_hub_status_data	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_check_alive	vmlinux	EXPORT_SYMBOL	
 0x00000000	dma_buf_cache_map_attachment	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_device_add_group	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kbase_context_mmap	vmlinux	EXPORT_SYMBOL	
@@ -996,8 +1001,6 @@
 0x00000000	register_fib_notifier	vmlinux	EXPORT_SYMBOL	
 0x00000000	rt5645_sel_asrc_clk_src	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_component_nc_pin_unlocked	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpuidle_disable_device	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_setopmode	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_client_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_atomic_helper_check	vmlinux	EXPORT_SYMBOL	
 0x00000000	serial8250_resume_port	vmlinux	EXPORT_SYMBOL	
@@ -1026,6 +1029,7 @@
 0x00000000	LZ4_setStreamDecode	vmlinux	EXPORT_SYMBOL	
 0x00000000	del_gendisk	vmlinux	EXPORT_SYMBOL	
 0x00000000	block_write_full_page	vmlinux	EXPORT_SYMBOL	
+0x00000000	xntimer_set_gravity	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpufreq_this_cpu_can_update	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_register_type	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_invert_tuple	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1056,6 +1060,7 @@
 0x00000000	drm_property_create_bool	vmlinux	EXPORT_SYMBOL	
 0x00000000	regulator_map_voltage_iterate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	list_lru_isolate	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnheap_alloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bpf_prog_alloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_tdls_oper_request	vmlinux	EXPORT_SYMBOL	
 0x00000000	km_new_mapping	vmlinux	EXPORT_SYMBOL	
@@ -1075,6 +1080,7 @@
 0x00000000	prandom_seed_full_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_queue_write_cache	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	generic_block_bmap	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_fd_recvmsg	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__symbol_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ktime_get_boot_fast_ns	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpuhp_tasks_frozen	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1119,6 +1125,7 @@
 0x00000000	xa_destroy	vmlinux	EXPORT_SYMBOL	
 0x00000000	bdev_read_only	vmlinux	EXPORT_SYMBOL	
 0x00000000	put_pages_list	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_cpu_affinity	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	from_kprojid	vmlinux	EXPORT_SYMBOL	
 0x00000000	skb_copy_datagram_iter	vmlinux	EXPORT_SYMBOL	
 0x00000000	sock_wfree	vmlinux	EXPORT_SYMBOL	
@@ -1130,6 +1137,7 @@
 0x00000000	utf8nfdicf	vmlinux	EXPORT_SYMBOL	
 0x00000000	utf8version_is_supported	vmlinux	EXPORT_SYMBOL	
 0x00000000	__posix_acl_create	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_resume	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	round_jiffies	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__task_rq_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_proto_csum_replace16	vmlinux	EXPORT_SYMBOL	
@@ -1156,7 +1164,6 @@
 0x00000000	nf_confirm	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mm_account_pinned_pages	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devfreq_unregister_opp_notifier	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_stop_dma_queue	vmlinux	EXPORT_SYMBOL	
 0x00000000	rockchip_rkvdec_driver	vmlinux	EXPORT_SYMBOL	
 0x00000000	no_hash_pointers	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	strpbrk	vmlinux	EXPORT_SYMBOL	
@@ -1191,11 +1198,11 @@
 0x00000000	regulator_set_mode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	lzo1x_1_compress	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	set_disk_ro	vmlinux	EXPORT_SYMBOL	
+0x00000000	__hybrid_spin_lock_nested	vmlinux	EXPORT_SYMBOL	
 0x00000000	svc_alien_sock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm6_rcv_tnl	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_zone_dflt	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_limit_volume	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	rockchip_dmcfreq_lock	vmlinux	EXPORT_SYMBOL	
 0x00000000	dm_read_arg_group	vmlinux	EXPORT_SYMBOL	
 0x00000000	rkcif_subdev_driver	vmlinux	EXPORT_SYMBOL	
 0x00000000	bus_find_device	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1225,15 +1232,14 @@
 0x00000000	iio_device_attach_buffer	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cec_notifier_set_phys_addr	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	typec_set_data_role	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_btcoex_init_2wire	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_mem_pool_set_max_size	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_connector_list_iter_begin	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_helper_mode_fill_fb_struct	vmlinux	EXPORT_SYMBOL	
 0x00000000	regulator_bulk_enable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_block_rq_insert	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fuse_conn_put	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__xntimer_stop	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_irq_setup_generic_chip	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	irq_get_irq_data	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	atomic_notifier_call_chain_robust	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_frag_pull_head	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_seq_next	vmlinux	EXPORT_SYMBOL	
@@ -1267,6 +1273,7 @@
 0x00000000	__kernel_write	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cma_get_name	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_kmalloc_node	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnsynch_flush	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kthread_cancel_work_sync	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_update_owe_info_event	vmlinux	EXPORT_SYMBOL	
 0x00000000	svc_fill_write_vector	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1274,19 +1281,15 @@
 0x00000000	release_sock	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_pm_opp_disable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__i2c_first_dynamic_bus_num	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_mci_set_bt_version	vmlinux	EXPORT_SYMBOL	
-0x00000000	ar9003_is_paprd_enabled	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_validate_pause	vmlinux	EXPORT_SYMBOL	
 0x00000000	ipvlan_link_setup	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pm_clk_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_i2c_encoder_init	vmlinux	EXPORT_SYMBOL	
-0x00000000	log_threaded_irq_wakeup_reason	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__netlink_ns_capable	vmlinux	EXPORT_SYMBOL	
 0x00000000	__sk_receive_skb	vmlinux	EXPORT_SYMBOL	
 0x00000000	__snd_rawmidi_transmit_peek	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_pcm_hw_constraint_msbits	vmlinux	EXPORT_SYMBOL	
 0x00000000	usbnet_status_stop	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_btcoex_set_concur_txprio	vmlinux	EXPORT_SYMBOL	
 0x00000000	mdiobus_setup_mdiodev_from_board_info	vmlinux	EXPORT_SYMBOL	
 0x00000000	clk_set_rate_range	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sync_inode	vmlinux	EXPORT_SYMBOL	
@@ -1303,6 +1306,7 @@
 0x00000000	crypto_inst_setname	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__invalidate_device	vmlinux	EXPORT_SYMBOL	
 0x00000000	seq_put_decimal_ull	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_remove_config_chain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rcu_barrier_tasks_rude	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_ct_remove_expect	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_midi_event_reset_encode	vmlinux	EXPORT_SYMBOL	
@@ -1357,6 +1361,7 @@
 0x00000000	sata_async_notification	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__bforget	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	generic_file_write_iter	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_vfroot	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	param_set_ullong	vmlinux	EXPORT_SYMBOL	
 0x00000000	xdr_reserve_space	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet6_add_protocol	vmlinux	EXPORT_SYMBOL	
@@ -1380,6 +1385,7 @@
 0x00000000	get_random_bytes	vmlinux	EXPORT_SYMBOL	
 0x00000000	gic_pmr_sync	vmlinux	EXPORT_SYMBOL	
 0x00000000	ZSTD_findFrameCompressedSize	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_prepare_wait	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	wakeme_after_rcu	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	probe_irq_off	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_alloc_hw_nm	vmlinux	EXPORT_SYMBOL	
@@ -1431,6 +1437,7 @@
 0x00000000	__arch_copy_from_user	vmlinux	EXPORT_SYMBOL	
 0x00000000	__page_mapcount	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mempool_kfree	vmlinux	EXPORT_SYMBOL	
+0x00000000	__rtdm_dev_open	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nsecs_to_jiffies64	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_graph_get_remote_port_parent	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_wakeup_enabled_descendants	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1452,7 +1459,6 @@
 0x00000000	iio_enum_read	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vb2_wait_for_all_buffers	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l_disable_media_source	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_rxprocdesc	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_register_fixup	vmlinux	EXPORT_SYMBOL	
 0x00000000	device_bind_driver	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	device_store_ulong	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1460,6 +1466,7 @@
 0x00000000	radix_tree_maybe_preload	vmlinux	EXPORT_SYMBOL	
 0x00000000	iomap_writepage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	file_update_time	vmlinux	EXPORT_SYMBOL	
+0x00000000	__rtdm_dev_socket	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kthread_bind_mask	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	regulatory_set_wiphy_regd_sync_rtnl	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_logger_put	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1479,6 +1486,7 @@
 0x00000000	perf_trace_run_bpf_submit	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	relay_file_operations	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	init_timer_key	vmlinux	EXPORT_SYMBOL	
+0x00000000	irq_pipeline_oopsing	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	_raw_read_unlock	vmlinux	EXPORT_SYMBOL	
 0x00000000	rpc_wake_up_status	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_set_mac_address_user	vmlinux	EXPORT_SYMBOL	
@@ -1714,6 +1722,7 @@
 0x00000000	mark_mounts_for_expiry	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bdi_put	vmlinux	EXPORT_SYMBOL	
 0x00000000	file_check_and_advance_wb_err	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_thread_find	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_ipi_raise	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_csk_reqsk_queue_drop_and_put	vmlinux	EXPORT_SYMBOL	
 0x00000000	page_pool_update_nid	vmlinux	EXPORT_SYMBOL	
@@ -1764,7 +1773,6 @@
 0x00000000	skb_coalesce_rx_frag	vmlinux	EXPORT_SYMBOL	
 0x00000000	rk_cryptodev_unregister_dev	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	power_supply_am_i_supplied	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_reset_tsf	vmlinux	EXPORT_SYMBOL	
 0x00000000	regcache_mark_dirty	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	transport_configure_device	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nla_put	vmlinux	EXPORT_SYMBOL	
@@ -1793,8 +1801,6 @@
 0x00000000	netdev_walk_all_lower_dev_rcu	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_vb2_v4l2_buf_done	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	alloc_ep_req	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_setrxfilter	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_getrxfilter	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_attr_unload_heads	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_modeset_backoff	vmlinux	EXPORT_SYMBOL	
 0x00000000	rockchip_get_iommu_base	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1803,6 +1809,8 @@
 0x00000000	locks_end_grace	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	file_modified	vmlinux	EXPORT_SYMBOL	
 0x00000000	kernel_write	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	__vfile_hostlock_put	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__vfile_hostlock_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__devm_irq_alloc_descs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_put_sb_net	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rtnl_af_unregister	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1841,6 +1849,7 @@
 0x00000000	inode_set_bytes	vmlinux	EXPORT_SYMBOL	
 0x00000000	inode_sub_bytes	vmlinux	EXPORT_SYMBOL	
 0x00000000	unpin_user_page	vmlinux	EXPORT_SYMBOL	
+0x00000000	do_raw_spin_unlock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_iter_combinations	vmlinux	EXPORT_SYMBOL	
 0x00000000	svc_unreg_xprt_class	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_prepare_reply_pages	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1853,14 +1862,13 @@
 0x00000000	pps_register_source	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_ctrl_new_std	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_serial_generic_write	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_paprd_is_done	vmlinux	EXPORT_SYMBOL	
-0x00000000	ar9003_get_pll_sqsum_dvc	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_remove_va_region	vmlinux	EXPORT_SYMBOL	
 0x00000000	regulator_desc_list_voltage_linear_range	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	clkdev_hw_create	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pci_release_selected_regions	vmlinux	EXPORT_SYMBOL	
 0x00000000	crypto_comp_decompress	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	debugfs_real_fops	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_sem_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	_raw_read_unlock_irqrestore	vmlinux	EXPORT_SYMBOL	
 0x00000000	qword_add	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_max_bc_payload	vmlinux	EXPORT_SYMBOL_GPL	
@@ -1945,7 +1953,6 @@
 0x00000000	of_property_count_elems_of_size	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sdhci_enable_v4_mode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_wwan_close	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_getnf	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_rescan_device	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_modeset_lock_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_gem_put_pages	vmlinux	EXPORT_SYMBOL	
@@ -1958,6 +1965,7 @@
 0x00000000	forget_cached_acl	vmlinux	EXPORT_SYMBOL	
 0x00000000	bd_unlink_disk_holder	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	task_handoff_unregister	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rcu_oob_finish_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	down_killable	vmlinux	EXPORT_SYMBOL	
 0x00000000	netdev_master_upper_dev_get_rcu	vmlinux	EXPORT_SYMBOL	
 0x00000000	napi_gro_receive	vmlinux	EXPORT_SYMBOL	
@@ -2049,7 +2057,6 @@
 0x00000000	__netdev_alloc_skb	vmlinux	EXPORT_SYMBOL	
 0x00000000	_snd_pcm_lib_alloc_vmalloc_buffer	vmlinux	EXPORT_SYMBOL	
 0x00000000	fsg_show_file	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_computetxtime	vmlinux	EXPORT_SYMBOL	
 0x00000000	pm_clk_resume	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kbase_reset_gpu_wait	vmlinux	EXPORT_SYMBOL	
 0x00000000	pcie_capability_read_word	vmlinux	EXPORT_SYMBOL	
@@ -2062,7 +2069,6 @@
 0x00000000	rkcif_rockit_pause_stream	vmlinux	EXPORT_SYMBOL	
 0x00000000	cec_s_phys_addr_from_edid	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_s_ext_ctrls	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_init_btcoex_hw	vmlinux	EXPORT_SYMBOL	
 0x00000000	nvme_sync_queues	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dw_hdmi_set_plugged_cb	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tty_kref_put	vmlinux	EXPORT_SYMBOL	
@@ -2081,7 +2087,6 @@
 0x00000000	snd_dmaengine_pcm_open	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fill_inquiry_response	vmlinux	EXPORT_SYMBOL_GPL	USB_STORAGE
 0x00000000	usb_match_one_id	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_paprd_create_curve	vmlinux	EXPORT_SYMBOL	
 0x00000000	spi_delay_exec	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sata_deb_timing_long	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_buf_move_notify	vmlinux	EXPORT_SYMBOL_GPL	
@@ -2093,7 +2098,6 @@
 0x00000000	ip6_route_output_flags_noref	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sk_free	vmlinux	EXPORT_SYMBOL	
 0x00000000	asoc_simple_init_jack	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_gpio_get	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_is_sdev_device	vmlinux	EXPORT_SYMBOL	
 0x00000000	regmap_check_range_table	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fwnode_graph_get_port_parent	vmlinux	EXPORT_SYMBOL_GPL	
@@ -2142,6 +2146,7 @@
 0x00000000	devm_drm_panel_bridge_add_typed	vmlinux	EXPORT_SYMBOL	
 0x00000000	rockchip_register_system_status_notifier	vmlinux	EXPORT_SYMBOL	
 0x00000000	gpiochip_disable_irq	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnsynch_release	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_policy_insert	vmlinux	EXPORT_SYMBOL	
 0x00000000	skb_segment	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_suspended	vmlinux	EXPORT_SYMBOL_GPL	
@@ -2288,6 +2293,8 @@
 0x00000000	debugfs_file_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	debugfs_file_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	d_find_any_alias	vmlinux	EXPORT_SYMBOL	
+0x00000000	__xntimer_get_timeout	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	dovetail_stop_altsched	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sk_detach_filter	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sk_attach_filter	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_rawmidi_transmit_empty	vmlinux	EXPORT_SYMBOL	
@@ -2337,8 +2344,6 @@
 0x00000000	rpc_run_task	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	skb_flow_dissect_tunnel_info	vmlinux	EXPORT_SYMBOL	
 0x00000000	efivar_entry_set_safe	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpufreq_dbs_governor_exit	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpufreq_dbs_governor_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_get_dev_t	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_i2c_subdev_set_name	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	typec_altmode_update_active	vmlinux	EXPORT_SYMBOL_GPL	
@@ -2467,19 +2472,20 @@
 0x00000000	pci_disable_rom	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_phy_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_phy_get	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnbufd_copy_from_kmem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	svc_wake_up	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xprt_write_space	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_ct_helper_expectfn_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sip_smc_lastlog_request	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fsg_ss_bulk_out_comp_desc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_usb_get_phy	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_setpower	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_do_ioctl_running	vmlinux	EXPORT_SYMBOL	
 0x00000000	mtd_blktrans_cease_background	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	scsi_mode_sense	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_kcpu_fence_signal_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	fb_mode_is_equal	vmlinux	EXPORT_SYMBOL	
 0x00000000	fs_param_is_blockdev	vmlinux	EXPORT_SYMBOL	
+0x00000000	___xnlock_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rcu_gp_is_normal	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__init_waitqueue_head	vmlinux	EXPORT_SYMBOL	
 0x00000000	ns_capable_setid	vmlinux	EXPORT_SYMBOL	
@@ -2502,6 +2508,7 @@
 0x00000000	gpiochip_reqres_irq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip_compute_csum	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_mq_kick_requeue_list	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_fd_unlock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rcu_all_qs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	suspend_set_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dst_alloc	vmlinux	EXPORT_SYMBOL	
@@ -2515,6 +2522,7 @@
 0x00000000	proc_create_mount_point	vmlinux	EXPORT_SYMBOL	
 0x00000000	single_release	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_user_pages	vmlinux	EXPORT_SYMBOL	
+0x00000000	enable_oob_stage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pm_suspend_global_flags	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	housekeeping_affine	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip6_append_data	vmlinux	EXPORT_SYMBOL_GPL	
@@ -2544,6 +2552,7 @@
 0x00000000	cfb_copyarea	vmlinux	EXPORT_SYMBOL	
 0x00000000	__bitmap_xor	vmlinux	EXPORT_SYMBOL	
 0x00000000	mnt_want_write	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnselect_bind	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	panic_reboot_mode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__dev_remove_pack	vmlinux	EXPORT_SYMBOL	
 0x00000000	secure_ipv6_port_ephemeral	vmlinux	EXPORT_SYMBOL	
@@ -2600,6 +2609,7 @@
 0x00000000	invalidate_bh_lrus	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vfs_iter_read	vmlinux	EXPORT_SYMBOL	
 0x00000000	unregister_trace_event	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	irq_pipeline_nmi_enter	vmlinux	EXPORT_SYMBOL	
 0x00000000	can_rx_register	vmlinux	EXPORT_SYMBOL	
 0x00000000	xt_check_match	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_nat_helper_register	vmlinux	EXPORT_SYMBOL_GPL	
@@ -2734,6 +2744,7 @@
 0x00000000	pcim_iomap_regions_request_all	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_mq_rq_cpu	vmlinux	EXPORT_SYMBOL	
 0x00000000	dentry_open	vmlinux	EXPORT_SYMBOL	
+0x00000000	___xnsched_run	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	freq_qos_add_notifier	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_sched_scan_results	vmlinux	EXPORT_SYMBOL	
 0x00000000	unregister_netdevice_queue	vmlinux	EXPORT_SYMBOL	
@@ -2750,9 +2761,7 @@
 0x00000000	nf_nat_setup_info	vmlinux	EXPORT_SYMBOL	
 0x00000000	ethtool_convert_legacy_u32_to_link_mode	vmlinux	EXPORT_SYMBOL	
 0x00000000	led_trigger_set_default	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpuidle_pause_and_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	thermal_zone_of_sensor_unregister	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_setup_statusring	vmlinux	EXPORT_SYMBOL	
 0x00000000	sata_link_debounce	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	amba_bustype	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	jbd2_journal_get_undo_access	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
@@ -2785,6 +2794,7 @@
 0x00000000	fs_umode_to_ftype	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fiemap_fill_next_extent	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	swap_alloc_cluster	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnselector_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_event_ignore_this_pid	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_queue_xmit_accel	vmlinux	EXPORT_SYMBOL	
 0x00000000	dm_tm_unlock	vmlinux	EXPORT_SYMBOL_GPL	
@@ -2797,6 +2807,7 @@
 0x00000000	crypto_ahash_digest	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	crypto_remove_final	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_user_pages_unlocked	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnsched_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	svc_authenticate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rtnl_set_sk_err	vmlinux	EXPORT_SYMBOL	
 0x00000000	__sock_queue_rcv_skb	vmlinux	EXPORT_SYMBOL	
@@ -2804,7 +2815,6 @@
 0x00000000	snd_pcm_create_iec958_consumer_default	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_cdc_wdm_register	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_hcd_pci_remove	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_set_tx_filter	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_device_remove	vmlinux	EXPORT_SYMBOL	
 0x00000000	class_remove_file_ns	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_atomic_helper_wait_for_vblanks	vmlinux	EXPORT_SYMBOL	
@@ -2870,6 +2880,7 @@
 0x00000000	clkdev_drop	vmlinux	EXPORT_SYMBOL	
 0x00000000	kblockd_schedule_work	vmlinux	EXPORT_SYMBOL	
 0x00000000	mem_section	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_task_join	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	send_sig_info	vmlinux	EXPORT_SYMBOL	
 0x00000000	iounmap	vmlinux	EXPORT_SYMBOL	
 0x00000000	cache_seq_next_rcu	vmlinux	EXPORT_SYMBOL_GPL	
@@ -2940,11 +2951,11 @@
 0x00000000	xa_load	vmlinux	EXPORT_SYMBOL	
 0x00000000	ucs2_as_utf8	vmlinux	EXPORT_SYMBOL	
 0x00000000	set_cached_acl	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_mmap_to_user	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	param_ops_ulong	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_ctrl_find	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_ctrl_fill	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l_printk_ioctl	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_cmn_init_crypto	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_package_join	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_i2c_encoder_commit	vmlinux	EXPORT_SYMBOL	
 0x00000000	tty_buffer_set_limit	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3010,6 +3021,7 @@
 0x00000000	blk_mq_flush_busy_ctxs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dcookie_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	locks_mandatory_area	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnsynch_sleep_on	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xt_table_unlock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sock_alloc_send_pskb	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_soc_link_compr_shutdown	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3067,13 +3079,13 @@
 0x00000000	crypto_shash_tfm_digest	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	flow_rule_match_basic	vmlinux	EXPORT_SYMBOL	
 0x00000000	call_fib_notifiers	vmlinux	EXPORT_SYMBOL	
-0x00000000	dfs_pattern_detector_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_free_phy_pages_helper_locked	vmlinux	EXPORT_SYMBOL	
 0x00000000	__drm_atomic_helper_connector_destroy_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_reset_controller_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	regulator_bulk_force_disable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kstrdup_quotable_cmdline	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__kfifo_out_peek_r	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnintr_affinity	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip6_frag_next	vmlinux	EXPORT_SYMBOL	
 0x00000000	netlink_ns_capable	vmlinux	EXPORT_SYMBOL	
 0x00000000	eth_commit_mac_addr_change	vmlinux	EXPORT_SYMBOL	
@@ -3166,6 +3178,7 @@
 0x00000000	fuse_dev_install	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_acl	vmlinux	EXPORT_SYMBOL	
 0x00000000	set_page_dirty_lock	vmlinux	EXPORT_SYMBOL	
+0x00000000	irq_pipeline_nmi_exit	vmlinux	EXPORT_SYMBOL	
 0x00000000	cache_unregister_net	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_v4_conn_request	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_sock_set_keepidle	vmlinux	EXPORT_SYMBOL	
@@ -3186,7 +3199,6 @@
 0x00000000	tcp_v4_syn_recv_sock	vmlinux	EXPORT_SYMBOL	
 0x00000000	__SCK__tp_func_vb2_v4l2_dqbuf	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_put_dev	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_loadnf	vmlinux	EXPORT_SYMBOL	
 0x00000000	dw_hdmi_qp_set_sample_rate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_panel_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_dp_link_rate_to_bw_code	vmlinux	EXPORT_SYMBOL	
@@ -3219,13 +3231,13 @@
 0x00000000	fd_install	vmlinux	EXPORT_SYMBOL	
 0x00000000	_raw_read_lock_bh	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_nat_packet	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	rockchip_dmcfreq_lock_nested	vmlinux	EXPORT_SYMBOL	
 0x00000000	dm_bufio_get_block_data	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__phy_resume	vmlinux	EXPORT_SYMBOL	
 0x00000000	spi_mem_get_name	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bio_devname	vmlinux	EXPORT_SYMBOL	
 0x00000000	vfs_fsync_range	vmlinux	EXPORT_SYMBOL	
 0x00000000	memory_cgrp_subsys	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_signal_send_pid	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_suspend_resume	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	disable_irq	vmlinux	EXPORT_SYMBOL	
 0x00000000	bit_wait_io_timeout	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3280,6 +3292,9 @@
 0x00000000	__sg_alloc_table_from_pages	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_finish_plug	vmlinux	EXPORT_SYMBOL	
 0x00000000	vmalloc_32_user	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_drv_set_sysclass	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnthread_set_periodic	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnclock_core_read_monotonic	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rfkill_get_wifi_power_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	ping_recvmsg	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_pm_opp_unregister_set_opp_helper	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3319,8 +3334,6 @@
 0x00000000	rkcif_sditf_disconnect	vmlinux	EXPORT_SYMBOL	
 0x00000000	media_device_pci_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usbnet_open	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_putrxbuf	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_puttxbuf	vmlinux	EXPORT_SYMBOL	
 0x00000000	pci_test_config_bits	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_atomic_add_encoder_bridges	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_dp_dsc_sink_max_slice_count	vmlinux	EXPORT_SYMBOL	
@@ -3362,6 +3375,7 @@
 0x00000000	drm_atomic_set_crtc_for_plane	vmlinux	EXPORT_SYMBOL	
 0x00000000	splice_direct_to_actor	vmlinux	EXPORT_SYMBOL	
 0x00000000	do_traversal_all_lruvec	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnsynch_acquire	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_release_client	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	br_fdb_find_port	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	genl_lock	vmlinux	EXPORT_SYMBOL	
@@ -3378,10 +3392,8 @@
 0x00000000	unlock_buffer	vmlinux	EXPORT_SYMBOL	
 0x00000000	simple_transaction_release	vmlinux	EXPORT_SYMBOL	
 0x00000000	rcu_read_unlock_strict	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	iwe_stream_add_value	vmlinux	EXPORT_SYMBOL	
 0x00000000	extcon_find_edev_by_node	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mmc_abort_tuning	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_regd_find_country_by_name	vmlinux	EXPORT_SYMBOL	
 0x00000000	regmap_irq_get_domain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	subsys_find_device_by_id	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__devm_drm_dev_alloc	vmlinux	EXPORT_SYMBOL	
@@ -3446,7 +3458,6 @@
 0x00000000	sip_fiq_debugger_enable_fiq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mmc_erase	vmlinux	EXPORT_SYMBOL	
 0x00000000	cpufreq_quick_get	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_setantenna	vmlinux	EXPORT_SYMBOL	
 0x00000000	can_dlc2len	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_mode_equal	vmlinux	EXPORT_SYMBOL	
 0x00000000	percpu_counter_set	vmlinux	EXPORT_SYMBOL	
@@ -3467,6 +3478,8 @@
 0x00000000	radix_tree_gang_lookup_tag	vmlinux	EXPORT_SYMBOL	
 0x00000000	klist_add_head	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	uuid_gen	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_timer_handler	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	dovetail_context_switch	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	flush_dcache_page	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_soc_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_querymenu	vmlinux	EXPORT_SYMBOL	
@@ -3491,7 +3504,6 @@
 0x00000000	dma_supported	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_ctrl_sub_ev_ops	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_alloc_urb	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_releasetxqueue	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_driver_is_genphy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	spi_setup	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rockchip_connector_update_vfp_for_vrr	vmlinux	EXPORT_SYMBOL	
@@ -3499,6 +3511,7 @@
 0x00000000	serial8250_get_port	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	phy_optional_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inode_init_once	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	__xnclock_ratelimit	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	uprobe_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tracing_snapshot	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_change_overwrite	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3516,6 +3529,7 @@
 0x00000000	raid6_datap_recov	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	scsi_cmd_ioctl	vmlinux	EXPORT_SYMBOL	
 0x00000000	page_cache_async_ra	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnthread_signal	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	arm64_mm_context_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	arm64_mm_context_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_tx_mlme_mgmt	vmlinux	EXPORT_SYMBOL	
@@ -3545,13 +3559,13 @@
 0x00000000	clk_bulk_prepare	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fb_prepare_logo	vmlinux	EXPORT_SYMBOL	
 0x00000000	memset32	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_fd_ioctl	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_sched_overutilized_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blocking_notifier_chain_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_is_element_inherited	vmlinux	EXPORT_SYMBOL	
 0x00000000	neigh_table_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	rk_emmc_transfer	vmlinux	EXPORT_SYMBOL	
 0x00000000	mmc_register_driver	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpuidle_register_device	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	input_ff_effect_from_user	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcpm_cc_change	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mfd_cell_enable	vmlinux	EXPORT_SYMBOL	
@@ -3564,6 +3578,7 @@
 0x00000000	__traceiter_block_rq_issue	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	acomp_request_alloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sysfs_create_link	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_get_iov_flatlen	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	task_active_pid_ns	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	svc_set_client	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_fastopen_defer_connect	vmlinux	EXPORT_SYMBOL	
@@ -3595,6 +3610,7 @@
 0x00000000	mb_cache_entry_delete	vmlinux	EXPORT_SYMBOL	
 0x00000000	seq_release	vmlinux	EXPORT_SYMBOL	
 0x00000000	insert_inode_locked	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_mmap_vmem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_size	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_free	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tick_broadcast_control	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3630,6 +3646,8 @@
 0x00000000	pstore_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fs_context_for_submount	vmlinux	EXPORT_SYMBOL	
 0x00000000	kfree_link	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_mutex_unlock	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xenomai_personality	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	up_write	vmlinux	EXPORT_SYMBOL	
 0x00000000	__SCK__tp_func_pelt_cfs_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpu_all_bits	vmlinux	EXPORT_SYMBOL	
@@ -3660,6 +3678,7 @@
 0x00000000	jbd2_log_start_commit	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	remove_proc_subtree	vmlinux	EXPORT_SYMBOL	
 0x00000000	rcu_is_watching	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__hybrid_spin_lock_irq	vmlinux	EXPORT_SYMBOL	
 0x00000000	asoc_simple_startup	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_component_enable_pin	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_of_parse_card_name	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3687,7 +3706,6 @@
 0x00000000	neigh_lookup_nodev	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_m2m_get_curr_priv	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_stor_disconnect	vmlinux	EXPORT_SYMBOL_GPL	USB_STORAGE
-0x00000000	ath_is_world_regd	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_change_queue_depth	vmlinux	EXPORT_SYMBOL	
 0x00000000	fwnode_get_mac_address	vmlinux	EXPORT_SYMBOL	
 0x00000000	dw_pcie_read	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3695,7 +3713,6 @@
 0x00000000	mpi_read_raw_from_sgl	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mpi_ec_deinit	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	crc16	vmlinux	EXPORT_SYMBOL	
-0x00000000	log_suspend_abort_reason	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sched_show_task	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	work_on_cpu	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_nvmem_cell_put	vmlinux	EXPORT_SYMBOL	
@@ -3709,6 +3726,7 @@
 0x00000000	rhltable_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bsearch	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_put_request	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_munmap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	async_synchronize_full_domain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dst_blackhole_update_pmtu	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sock_zerocopy_put_abort	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3716,7 +3734,6 @@
 0x00000000	input_get_timestamp	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_serial_generic_throttle	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cdrom_mode_select	vmlinux	EXPORT_SYMBOL	
-0x00000000	ar9003_paprd_enable	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_syncobj_create	vmlinux	EXPORT_SYMBOL	
 0x00000000	ZSTD_compress_usingCDict	vmlinux	EXPORT_SYMBOL	
 0x00000000	__tracepoint_android_fs_datawrite_end	vmlinux	EXPORT_SYMBOL	
@@ -3738,17 +3755,18 @@
 0x00000000	rhashtable_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blk_limits_io_min	vmlinux	EXPORT_SYMBOL	
 0x00000000	sysfs_remove_files	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	dovetail_start_altsched	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	percpu_up_write	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_setbufsize	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	unix_table_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usbnet_purge_paused_rxq	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_set_gpio	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_internal_device_unblock_nowait	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_hdmi_avi_infoframe_bars	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_dp_downstream_444_to_420_conversion	vmlinux	EXPORT_SYMBOL	
 0x00000000	color_table	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_clk_hw_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	put_disk	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnheap_vmalloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	add_timer	vmlinux	EXPORT_SYMBOL	
 0x00000000	__SCK__tp_func_irq_handler_entry	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	can_proto_register	vmlinux	EXPORT_SYMBOL	
@@ -3839,6 +3857,7 @@
 0x00000000	fuse_dequeue_forget	vmlinux	EXPORT_SYMBOL	
 0x00000000	__traceiter_android_fs_dataread_start	vmlinux	EXPORT_SYMBOL	
 0x00000000	swapcache_free_entries	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_event_select	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	clockevents_register_device	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cache_check	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fib_rules_seq_read	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3896,6 +3915,7 @@
 0x00000000	jbd2_journal_unlock_updates	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	sysctl_vfs_cache_pressure	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_mm_vmscan_direct_reclaim_end	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_unregister_personality	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_sync_single_for_cpu	vmlinux	EXPORT_SYMBOL	
 0x00000000	panic	vmlinux	EXPORT_SYMBOL	
 0x00000000	xprt_wake_up_backlog	vmlinux	EXPORT_SYMBOL_GPL	
@@ -3911,12 +3931,12 @@
 0x00000000	__traceiter_nfs_fsync_enter	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__kmalloc	vmlinux	EXPORT_SYMBOL	
 0x00000000	__vmalloc	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnintr_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xdr_truncate_encode	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_set_keepalive	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_pm_opp_of_get_sharing_cpus	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ohci_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usbnet_get_ethernet_addr	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_btcoex_deinit	vmlinux	EXPORT_SYMBOL	
 0x00000000	ata_sas_port_start	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_property_replace_blob	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_atomic_bridge_chain_enable	vmlinux	EXPORT_SYMBOL	
@@ -3939,6 +3959,7 @@
 0x00000000	pci_find_ht_capability	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pci_find_next_ht_capability	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	gf128mul_init_64k_bbe	vmlinux	EXPORT_SYMBOL	
+0x00000000	__xntimer_migrate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_find_vendor_elem	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_ie_split_ric	vmlinux	EXPORT_SYMBOL	
 0x00000000	xt_find_match	vmlinux	EXPORT_SYMBOL	
@@ -3978,7 +3999,6 @@
 0x00000000	of_translate_address	vmlinux	EXPORT_SYMBOL	
 0x00000000	power_supply_property_is_writeable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cec_unregister_adapter	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_hw_disable_phy_restart	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_encoder_cleanup	vmlinux	EXPORT_SYMBOL	
 0x00000000	rockchip_vdpu2_driver	vmlinux	EXPORT_SYMBOL	
 0x00000000	rockchip_vdpu1_driver	vmlinux	EXPORT_SYMBOL	
@@ -4093,6 +4113,7 @@
 0x00000000	pwmchip_remove	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	simple_nosetlease	vmlinux	EXPORT_SYMBOL	
 0x00000000	mount_nodev	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_irq_request	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_find_acq	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_nat_ipv6_unregister_fn	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	eth_mac_addr	vmlinux	EXPORT_SYMBOL	
@@ -4113,7 +4134,6 @@
 0x00000000	allocate_resource	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_channel_to_freq_khz	vmlinux	EXPORT_SYMBOL	
 0x00000000	inet6addr_notifier_call_chain	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpuidle_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	phy_10gbit_features	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pm_schedule_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	clk_bulk_get_all	vmlinux	EXPORT_SYMBOL	
@@ -4204,8 +4224,6 @@
 0x00000000	mbox_client_peek_data	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cec_s_log_addrs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rndis_set_host_mac	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_regd_init	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_wait	vmlinux	EXPORT_SYMBOL	
 0x00000000	unregister_mtd_chip_driver	vmlinux	EXPORT_SYMBOL	
 0x00000000	regulator_has_full_constraints	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	gpiod_get_raw_array_value_cansleep	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4277,6 +4295,8 @@
 0x00000000	crypto_unregister_acomps	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	key_link	vmlinux	EXPORT_SYMBOL	
 0x00000000	read_cache_page	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_fd_put_iovec	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_fd_get_iovec	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tick_broadcast_oneshot_control	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rcu_unexpedite_gp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_pelt_irq_tp	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4302,13 +4322,13 @@
 0x00000000	sbitmap_queue_wake_all	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nla_put_nohdr	vmlinux	EXPORT_SYMBOL	
 0x00000000	copy_to_user_nofault	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__rtdm_synch_flush	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pid_nr_ns	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bt_sock_unregister	vmlinux	EXPORT_SYMBOL	
 0x00000000	napi_gro_flush	vmlinux	EXPORT_SYMBOL	
 0x00000000	sip_fiq_control	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	power_supply_temp2resist_simple	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rkcif_plat_drv	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_setuptxqueue	vmlinux	EXPORT_SYMBOL	
 0x00000000	nanddev_bbt_in_flash_update	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ahci_platform_suspend_host	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	scsi_set_medium_removal	vmlinux	EXPORT_SYMBOL	
@@ -4343,7 +4363,6 @@
 0x00000000	nf_ct_helper_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sdhci_pltfm_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vb2_ops_wait_finish	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_set_sta_beacon_timers	vmlinux	EXPORT_SYMBOL	
 0x00000000	midgard__mali_profiling_control	vmlinux	EXPORT_SYMBOL	
 0x00000000	tpm_is_tpm2	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sysrq_mask	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4362,6 +4381,8 @@
 0x00000000	add_device_randomness	vmlinux	EXPORT_SYMBOL	
 0x00000000	fb_find_nearest_mode	vmlinux	EXPORT_SYMBOL	
 0x00000000	gpiod_get_array	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_clock_register	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnheap_check_block	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_put_event_file	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_count_pfkey_auth_supported	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	secure_tcpv6_seq	vmlinux	EXPORT_SYMBOL	
@@ -4435,7 +4456,6 @@
 0x00000000	snd_pcm_stop_xrun	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_hwdep_new	vmlinux	EXPORT_SYMBOL	
 0x00000000	ffs_lock	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_rx_accept	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_csf_scheduler_pm_suspend	vmlinux	EXPORT_SYMBOL	
 0x00000000	regulator_suspend_disable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pci_bus_resource_n	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4444,6 +4464,7 @@
 0x00000000	bio_free_pages	vmlinux	EXPORT_SYMBOL	
 0x00000000	__ksize	vmlinux	EXPORT_SYMBOL	
 0x00000000	down_interruptible	vmlinux	EXPORT_SYMBOL	
+0x00000000	dovetail_resume_inband	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_abort	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netif_carrier_off	vmlinux	EXPORT_SYMBOL	
 0x00000000	mmc_request_done	vmlinux	EXPORT_SYMBOL	
@@ -4478,7 +4499,6 @@
 0x00000000	inet_csk_prepare_forced_close	vmlinux	EXPORT_SYMBOL	
 0x00000000	cqhci_pltfm_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	__SCK__tp_func_vb2_v4l2_buf_done	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_btcoex_set_weight	vmlinux	EXPORT_SYMBOL	
 0x00000000	__SCK__tp_func_mali_page_fault_insert_pages	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dw_hdmi_phy_setup_hpd	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__rb_erase_color	vmlinux	EXPORT_SYMBOL	
@@ -4518,6 +4538,7 @@
 0x00000000	filp_open	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	check_cache_active	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_clock_jiffies	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	inband_irq_disable	vmlinux	EXPORT_SYMBOL	
 0x00000000	smpboot_register_percpu_thread	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_calg_get_byname	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip_vs_tcp_conn_listen	vmlinux	EXPORT_SYMBOL	
@@ -4630,7 +4651,6 @@
 0x00000000	sdio_register_driver	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_kill_urb	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_get_intf	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_stopdmarecv	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_pm_genpd_set_performance_state	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_crtc_vblank_reset	vmlinux	EXPORT_SYMBOL	
 0x00000000	regulator_get_voltage_rdev	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4664,6 +4684,7 @@
 0x00000000	skcipher_walk_complete	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	crypto_remove_spawns	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_empty_cpu	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	irq_post_stage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip6_input	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip6tun_encaps	vmlinux	EXPORT_SYMBOL	
 0x00000000	xt_replace_table	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4674,6 +4695,7 @@
 0x00000000	pci_free_host_bridge	vmlinux	EXPORT_SYMBOL	
 0x00000000	__percpu_counter_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_user_pages_remote	vmlinux	EXPORT_SYMBOL	
+0x00000000	__cobalt_sigqueue	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_get_domain_generic_chip	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_set_affinity_notifier	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_hashinfo_init	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4707,6 +4729,7 @@
 0x00000000	aes_encrypt	vmlinux	EXPORT_SYMBOL	
 0x00000000	sg_miter_next	vmlinux	EXPORT_SYMBOL	
 0x00000000	zero_pfn	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnintr_disable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	param_get_ushort	vmlinux	EXPORT_SYMBOL	
 0x00000000	svc_sock_update_bufs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_sock_set_cork	vmlinux	EXPORT_SYMBOL	
@@ -4791,6 +4814,7 @@
 0x00000000	hid_setup_resolution_multiplier	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ata_do_dev_read_id	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kbase_csf_firmware_csg_output	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_irq_request_affine	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__srcu_read_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_domain_alloc_irqs_parent	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip_sock_set_pktinfo	vmlinux	EXPORT_SYMBOL	
@@ -4799,7 +4823,6 @@
 0x00000000	nvmem_add_cell_table	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_clk_get	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_function_unregister	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_mci_send_message	vmlinux	EXPORT_SYMBOL	
 0x00000000	dma_fence_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	__drm_atomic_helper_set_config	vmlinux	EXPORT_SYMBOL	
 0x00000000	__SCK__tp_func_block_split	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4810,7 +4833,6 @@
 0x00000000	mr_vif_seq_idx	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_mc_add_excl	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_uc_add_excl	vmlinux	EXPORT_SYMBOL	
-0x00000000	rockchip_dmcfreq_unlock	vmlinux	EXPORT_SYMBOL	
 0x00000000	media_request_object_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	i2c_bus_type	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	spi_mem_adjust_op_size	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4861,6 +4883,7 @@
 0x00000000	blk_queue_io_min	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_writeback_update_inode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nobh_truncate_page	vmlinux	EXPORT_SYMBOL	
+0x00000000	__rtdm_nrtsig_execute	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_generic_frame_duration	vmlinux	EXPORT_SYMBOL	
 0x00000000	xprt_lock_connect	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_unregister_dai	vmlinux	EXPORT_SYMBOL_GPL	
@@ -4909,7 +4932,6 @@
 0x00000000	__skb_free_datagram_locked	vmlinux	EXPORT_SYMBOL	
 0x00000000	lock_sock_fast	vmlinux	EXPORT_SYMBOL	
 0x00000000	sdhci_resume_host	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_disable_mib_counters	vmlinux	EXPORT_SYMBOL	
 0x00000000	cn_add_callback	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	alloc_iova_fast	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ilookup	vmlinux	EXPORT_SYMBOL	
@@ -4999,8 +5021,10 @@
 0x00000000	cpufreq_boost_enabled	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_get_hcd	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_ht_find_item	vmlinux	EXPORT_SYMBOL	
+0x00000000	fbcon_set_bitops	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_update_request	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	jbd2_journal_put_journal_head	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	__oob_irq_restore	vmlinux	EXPORT_SYMBOL	
 0x00000000	__wake_up_bit	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_task_cred	vmlinux	EXPORT_SYMBOL	
 0x00000000	xdr_encode_array2	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5071,7 +5095,6 @@
 0x00000000	udp_tunnel6_xmit_skb	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	flow_block_cb_alloc	vmlinux	EXPORT_SYMBOL	
 0x00000000	sysctl_max_skb_frags	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_phy_disable	vmlinux	EXPORT_SYMBOL	
 0x00000000	mtd_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_resv_test_signaled_rcu	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_gem_object_free	vmlinux	EXPORT_SYMBOL	
@@ -5169,7 +5192,6 @@
 0x00000000	param_get_int	vmlinux	EXPORT_SYMBOL	
 0x00000000	skb_ext_add	vmlinux	EXPORT_SYMBOL	
 0x00000000	typec_set_mode	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_process_rxdesc_edma	vmlinux	EXPORT_SYMBOL	
 0x00000000	vxlan_dev_create	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_buf_dynamic_attach	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	start_tty	vmlinux	EXPORT_SYMBOL	
@@ -5178,6 +5200,7 @@
 0x00000000	shash_ahash_finup	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	user_update	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	configfs_unregister_group	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnvfile_init_regular	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ktime_get_with_offset	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	resched_curr	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_ct_get_tuplepr	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5235,6 +5258,7 @@
 0x00000000	radix_tree_gang_lookup_tag_slot	vmlinux	EXPORT_SYMBOL	
 0x00000000	fuse_file_poll	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mnt_want_write_file	vmlinux	EXPORT_SYMBOL_GPL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	rtdm_sem_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pause_cpus	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	hci_register_cb	vmlinux	EXPORT_SYMBOL	
 0x00000000	ip_ct_attach	vmlinux	EXPORT_SYMBOL	
@@ -5252,7 +5276,6 @@
 0x00000000	snd_pcm_fill_iec958_consumer	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_fwnode_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_pm_opp_get_opp_count	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_gettsf32	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_pm_disable_interrupts	vmlinux	EXPORT_SYMBOL	
 0x00000000	regulator_set_soft_start_regmap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	regulator_get_error_flags	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5261,10 +5284,8 @@
 0x00000000	svc_seq_show	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_br_fdb_update	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	asoc_simple_canonicalize_platform	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	od_unregister_powersave_bias_handler	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vb2_core_queue_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_amd_prefetch_quirk	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_key_delete	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_free_host_dev	vmlinux	EXPORT_SYMBOL	
 0x00000000	pm_print_active_wakeup_sources	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	driver_register	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5288,7 +5309,6 @@
 0x00000000	of_dma_is_coherent	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	input_ff_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_wwan_chars_in_buffer	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath_is_49ghz_allowed	vmlinux	EXPORT_SYMBOL	
 0x00000000	mfd_cell_disable	vmlinux	EXPORT_SYMBOL	
 0x00000000	platform_get_irq_byname	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ZSTD_decompressContinue	vmlinux	EXPORT_SYMBOL	
@@ -5309,6 +5329,7 @@
 0x00000000	scsi_verify_blk_ioctl	vmlinux	EXPORT_SYMBOL	
 0x00000000	configfs_register_group	vmlinux	EXPORT_SYMBOL	
 0x00000000	pagecache_isize_extended	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_fd_read	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__warn_printk	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_beacon_get_tim	vmlinux	EXPORT_SYMBOL	
 0x00000000	ipv6_dup_options	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5324,7 +5345,6 @@
 0x00000000	ieee80211_tkip_add_iv	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_conntrack_locks	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	iio_read_channel_offset	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_hw_setbssidmask	vmlinux	EXPORT_SYMBOL	
 0x00000000	configfs_remove_default_groups	vmlinux	EXPORT_SYMBOL	
 0x00000000	zap_vma_ptes	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_pelt_rt_tp	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5349,8 +5369,6 @@
 0x00000000	rpc_init_pipe_dir_object	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip_mc_leave_group	vmlinux	EXPORT_SYMBOL	
 0x00000000	netif_skb_features	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_settsf64	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_gettsf64	vmlinux	EXPORT_SYMBOL	
 0x00000000	pm_runtime_set_memalloc_noio	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	iommu_fwspec_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_gpiod_get_array	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5410,7 +5428,6 @@
 0x00000000	dev_addr_flush	vmlinux	EXPORT_SYMBOL	
 0x00000000	nvmem_device_write	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	thermal_notify_framework	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_init_global_settings	vmlinux	EXPORT_SYMBOL	
 0x00000000	alloc_can_skb	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	recover_lost_locks	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	register_nfs_version	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5472,13 +5489,13 @@
 0x00000000	usb_wwan_write	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_unlink_anchored_urbs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usbnet_write_cmd	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_getchan_noise	vmlinux	EXPORT_SYMBOL	
 0x00000000	xas_nomem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	zlib_inflateEnd	vmlinux	EXPORT_SYMBOL	
 0x00000000	linear_range_values_in_range_array	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__blkdev_issue_discard	vmlinux	EXPORT_SYMBOL	
 0x00000000	__tracepoint_task_rename	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	contig_page_data	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnselect_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_num_bc_slots	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xt_check_proc_name	vmlinux	EXPORT_SYMBOL	
 0x00000000	xt_unregister_target	vmlinux	EXPORT_SYMBOL	
@@ -5511,6 +5528,7 @@
 0x00000000	part_end_io_acct	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	simple_transaction_read	vmlinux	EXPORT_SYMBOL	
 0x00000000	mempool_free_slab	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tick_nohz_get_sleep_length	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__memset_io	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_ctl_register_ioctl_compat	vmlinux	EXPORT_SYMBOL	
@@ -5551,7 +5569,6 @@
 0x00000000	snd_dmaengine_pcm_close	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_table_event	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_watchdog_register_device	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_abort_tx_dma	vmlinux	EXPORT_SYMBOL	
 0x00000000	spi_controller_dma_map_mem_op_data	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ata_pack_xfermask	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	class_dev_iter_next	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5596,6 +5613,7 @@
 0x00000000	noop_direct_IO	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vm_unmapped_area	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	shmem_file_setup_with_mnt	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	dovetail_init_altsched	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rcu_force_quiescent_state	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_domain_push_irq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_make_synack	vmlinux	EXPORT_SYMBOL	
@@ -5670,6 +5688,7 @@
 0x00000000	__tracepoint_block_rq_remap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	proc_mkdir_mode	vmlinux	EXPORT_SYMBOL	
 0x00000000	tag_pages_for_writeback	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnregistry_bind	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_check_station_change	vmlinux	EXPORT_SYMBOL	
 0x00000000	rpcauth_destroy_credcache	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	flow_indr_dev_register	vmlinux	EXPORT_SYMBOL	
@@ -5690,7 +5709,6 @@
 0x00000000	cfg80211_assoc_timeout	vmlinux	EXPORT_SYMBOL	
 0x00000000	__napi_alloc_skb	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_disable_xhci_ports	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_hw_get_listen_time	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_report_opcode	vmlinux	EXPORT_SYMBOL	
 0x00000000	pm_clk_create	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_bridge_chain_enable	vmlinux	EXPORT_SYMBOL	
@@ -5713,6 +5731,7 @@
 0x00000000	nfs_pageio_init_read	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sysfs_unbreak_active_protection	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__mnt_is_readonly	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_task_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_cpu_idle	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_close	vmlinux	EXPORT_SYMBOL	
 0x00000000	inet_add_offload	vmlinux	EXPORT_SYMBOL	
@@ -5723,7 +5742,6 @@
 0x00000000	get_thermal_instance	vmlinux	EXPORT_SYMBOL	
 0x00000000	config_ep_by_speed_and_alt	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_register_dev	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_mci_get_interrupt	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_dma_map	vmlinux	EXPORT_SYMBOL	
 0x00000000	bprintf	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xz_dec_init	vmlinux	EXPORT_SYMBOL	
@@ -5738,7 +5756,6 @@
 0x00000000	ping_getfrag	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	skb_csum_hwoffload_help	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_root	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_cmn_get_channel	vmlinux	EXPORT_SYMBOL	
 0x00000000	__tracepoint_mali_pm_status	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_edid_duplicate	vmlinux	EXPORT_SYMBOL	
 0x00000000	__drm_mm_interval_first	vmlinux	EXPORT_SYMBOL	
@@ -5748,6 +5765,8 @@
 0x00000000	register_framebuffer	vmlinux	EXPORT_SYMBOL	
 0x00000000	crypto_unregister_templates	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	migrate_page_states	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnsynch_init	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	___xnlock_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_pelt_irq_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_pelt_cfs_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bpf_sk_storage_diag_put	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5780,7 +5799,6 @@
 0x00000000	nfs_debug	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_replay_seqhi	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_enter_cwr	vmlinux	EXPORT_SYMBOL	
-0x00000000	rockchip_dmcfreq_vop_bandwidth_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	hidraw_report_event	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_bitset_del	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_dv_timings_aspect_ratio	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5790,6 +5808,7 @@
 0x00000000	mtd_unlock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nvme_put_ns	vmlinux	EXPORT_SYMBOL_GPL	NVME_TARGET_PASSTHRU
 0x00000000	sbitmap_del_wait_queue	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_thread_find_local	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	memunmap	vmlinux	EXPORT_SYMBOL	
 0x00000000	memremap	vmlinux	EXPORT_SYMBOL	
 0x00000000	call_rcu	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5827,6 +5846,7 @@
 0x00000000	drm_client_framebuffer_delete	vmlinux	EXPORT_SYMBOL	
 0x00000000	find_next_zero_bit	vmlinux	EXPORT_SYMBOL	
 0x00000000	inc_nlink	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_relax	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	smpboot_unregister_percpu_thread	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	wiphy_free	vmlinux	EXPORT_SYMBOL	
 0x00000000	__netlink_dump_start	vmlinux	EXPORT_SYMBOL	
@@ -5840,6 +5860,7 @@
 0x00000000	mnt_drop_write	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vmf_insert_pfn_prot	vmlinux	EXPORT_SYMBOL	
 0x00000000	__alloc_percpu_gfp	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_fd_get_setsockaddr_args	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_uaddr2sockaddr	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xt_compat_unlock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpufreq_freq_transition_begin	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5858,7 +5879,6 @@
 0x00000000	perf_event_pause	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	synchronize_srcu_expedited	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_cong_avoid_ai	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpuidle_resume_and_unlock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_vb2_qbuf	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_altnum_to_altsetting	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pm_clk_runtime_resume	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5869,6 +5889,7 @@
 0x00000000	nfs_close_context	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	follow_down	vmlinux	EXPORT_SYMBOL	
 0x00000000	filemap_write_and_wait_range	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnintr_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_rpm_resume	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_rpm_idle	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_cpu_idle	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5915,7 +5936,6 @@
 0x00000000	sip_smc_request_share_mem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	thermal_cooling_device_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_dwc3_complete_trb	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_gen_timer_free	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_framebuffer_plane_width	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_gem_dmabuf_vunmap	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_atomic_helper_connector_reset	vmlinux	EXPORT_SYMBOL	
@@ -5934,6 +5954,7 @@
 0x00000000	pcie_update_link_speed	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blk_mq_start_request	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_init_cinfo	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_clock_deregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	hci_suspend_dev	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_v4_send_check	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_expect_find_get	vmlinux	EXPORT_SYMBOL_GPL	
@@ -5988,7 +6009,6 @@
 0x00000000	rockchip_cpufreq_opp_set_rate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_pm_opp_set_clkname	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_usb_gadget_connect	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_set_interrupts	vmlinux	EXPORT_SYMBOL	
 0x00000000	__tracepoint_spi_transfer_start	vmlinux	EXPORT_SYMBOL	
 0x00000000	ahci_reset_controller	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_resv_get_singleton	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6062,7 +6082,6 @@
 0x00000000	perf_aux_output_end	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bpf_prog_select_runtime	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kmsg_dump_unregister	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpu_latency_qos_add_request	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	override_creds	vmlinux	EXPORT_SYMBOL	
 0x00000000	system_long_wq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__ip_queue_xmit	vmlinux	EXPORT_SYMBOL	
@@ -6095,6 +6114,8 @@
 0x00000000	__bio_clone_fast	vmlinux	EXPORT_SYMBOL	
 0x00000000	kern_unmount	vmlinux	EXPORT_SYMBOL	
 0x00000000	total_swapcache_pages	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnthread_join	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	nksched	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip_vs_proto_get	vmlinux	EXPORT_SYMBOL	
 0x00000000	__traceiter_br_fdb_external_learn_add	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_ctrl_query_fill	vmlinux	EXPORT_SYMBOL	
@@ -6138,6 +6159,7 @@
 0x00000000	nfs4_schedule_lease_recovery	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_android_fs_datawrite_start	vmlinux	EXPORT_SYMBOL	
 0x00000000	kvrealloc	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnregistry_vfreg_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_sta_opmode_change_notify	vmlinux	EXPORT_SYMBOL	
 0x00000000	__xfrm_state_destroy	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_soc_lookup_component	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6149,6 +6171,7 @@
 0x00000000	pm_genpd_remove_subdomain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tty_port_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	complete_request_key	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_machine_cpudata	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_sched_util_est_cfs_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	panic_timeout	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_queue	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6156,7 +6179,6 @@
 0x00000000	extcon_dev_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	extcon_set_state_sync	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_hcd_resume_root_hub	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_paprd_init_table	vmlinux	EXPORT_SYMBOL	
 0x00000000	ZSTD_initDStream	vmlinux	EXPORT_SYMBOL	
 0x00000000	ZSTD_initCStream	vmlinux	EXPORT_SYMBOL	
 0x00000000	aead_init_geniv	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6164,7 +6186,6 @@
 0x00000000	jbd2_journal_grab_journal_head	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	noop_fsync	vmlinux	EXPORT_SYMBOL	
 0x00000000	list_lru_destroy	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpu_latency_qos_update_request	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_ibss_joined	vmlinux	EXPORT_SYMBOL	
 0x00000000	rpc_count_iostats	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_destroy_pipe_data	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6178,6 +6199,7 @@
 0x00000000	crypto_hash_walk_first	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	alloc_anon_inode	vmlinux	EXPORT_SYMBOL	
 0x00000000	vfs_mkobj	vmlinux	EXPORT_SYMBOL	
+0x00000000	tick_notify_proxy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_sendpage	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_set_allmulti	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_get_by_index_rcu	vmlinux	EXPORT_SYMBOL	
@@ -6191,6 +6213,8 @@
 0x00000000	xxh32_digest	vmlinux	EXPORT_SYMBOL	
 0x00000000	__bitmap_intersects	vmlinux	EXPORT_SYMBOL	
 0x00000000	ksize	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnvfile_get_string	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnthread_killall	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kstat	vmlinux	EXPORT_SYMBOL	
 0x00000000	reg_query_regdb_wmm	vmlinux	EXPORT_SYMBOL	
 0x00000000	hci_conn_security	vmlinux	EXPORT_SYMBOL	
@@ -6275,6 +6299,7 @@
 0x00000000	sg_free_table	vmlinux	EXPORT_SYMBOL	
 0x00000000	gf128mul_free_64k	vmlinux	EXPORT_SYMBOL	
 0x00000000	locks_remove_posix	vmlinux	EXPORT_SYMBOL	
+0x00000000	registry_obj_slots	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_iter_reset	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__skb_recv_udp	vmlinux	EXPORT_SYMBOL	
 0x00000000	__phy_read_mmd	vmlinux	EXPORT_SYMBOL	
@@ -6346,6 +6371,7 @@
 0x00000000	key_instantiate_and_link	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_check_flags	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dentry_path_raw	vmlinux	EXPORT_SYMBOL	
+0x00000000	__hybrid_spin_lock_irqsave	vmlinux	EXPORT_SYMBOL	
 0x00000000	ipv6_sock_mc_drop	vmlinux	EXPORT_SYMBOL	
 0x00000000	xfrm_sad_getinfo	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_logger_find_get	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6361,6 +6387,7 @@
 0x00000000	simple_open	vmlinux	EXPORT_SYMBOL	
 0x00000000	single_open	vmlinux	EXPORT_SYMBOL	
 0x00000000	__page_file_mapping	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnclock_apply_offset	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm6_find_1stfragopt	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_request_card	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_graph_get_port_parent	vmlinux	EXPORT_SYMBOL	
@@ -6407,6 +6434,8 @@
 0x00000000	rockchip_nvmem_cell_read_u8	vmlinux	EXPORT_SYMBOL	
 0x00000000	dmaengine_desc_set_metadata_len	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	utf32_to_utf8	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_mutex_destroy	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_task_busy_sleep	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blk_trace_remove	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cleanup_srcu_struct	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	groups_free	vmlinux	EXPORT_SYMBOL	
@@ -6421,17 +6450,18 @@
 0x00000000	gpiod_set_transitory	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	strreplace	vmlinux	EXPORT_SYMBOL	
 0x00000000	gf128mul_64k_bbe	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_add_state_chain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_stop_rx_ba_session	vmlinux	EXPORT_SYMBOL	
 0x00000000	inet6addr_validator_notifier_call_chain	vmlinux	EXPORT_SYMBOL	
 0x00000000	ip_sock_set_recverr	vmlinux	EXPORT_SYMBOL	
 0x00000000	ip_vs_scheduler_err	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_soc_link_compr_startup	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_dai_set_bclk_ratio	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_push_personality	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	_raw_read_unlock_bh	vmlinux	EXPORT_SYMBOL	
 0x00000000	rfkill_register	vmlinux	EXPORT_SYMBOL	
 0x00000000	cfg80211_cqm_beacon_loss_notify	vmlinux	EXPORT_SYMBOL	
 0x00000000	neigh_parms_alloc	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpuidle_unregister_device	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_btree_cursor_begin	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_os_desc_prepare_interf_dir	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_disabled	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6453,6 +6483,7 @@
 0x00000000	iommu_uapi_sva_bind_gpasid	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	asymmetric_key_id_partial	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bpf_prog_inc	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	do_raw_spin_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpupri_find_fitness	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	io_schedule	vmlinux	EXPORT_SYMBOL	
 0x00000000	remove_cpu	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6466,7 +6497,6 @@
 0x00000000	tee_client_close_context	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	iio_device_claim_direct_mode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sdhci_runtime_resume_host	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_beaconinit	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_regmap_del_irq_chip	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_regmap_add_irq_chip	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pm_wakeup_ws_event	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6541,13 +6571,13 @@
 0x00000000	fb_destroy_modedb	vmlinux	EXPORT_SYMBOL	
 0x00000000	__sg_page_iter_start	vmlinux	EXPORT_SYMBOL	
 0x00000000	fsstack_copy_attr_all	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnintr_enable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_init_replay	vmlinux	EXPORT_SYMBOL	
 0x00000000	__nf_ct_try_assign_helper	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sock_zerocopy_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_jack_report	vmlinux	EXPORT_SYMBOL	
 0x00000000	sdio_signal_irq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_pm_opp_get_opp_table	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_regd_get_band_ctl	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_command_normalize_sense	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_kmalloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__drm_atomic_helper_disable_plane	vmlinux	EXPORT_SYMBOL	
@@ -6608,6 +6638,8 @@
 0x00000000	crypto_register_shashes	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nfs_pageio_reset_read_mds	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_anon_bdev	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_event_destroy	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnsched_class_rt	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_event_length	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_gtk_rekey_notify	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sock_kfree_s	vmlinux	EXPORT_SYMBOL	
@@ -6624,6 +6656,7 @@
 0x00000000	empty_aops	vmlinux	EXPORT_SYMBOL	
 0x00000000	zs_malloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_kfree	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnregistry_vfsnap_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ktime_get_coarse_real_ts64	vmlinux	EXPORT_SYMBOL	
 0x00000000	irq_domain_pop_irq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__task_pid_nr_ns	vmlinux	EXPORT_SYMBOL	
@@ -6717,6 +6750,7 @@
 0x00000000	sgl_alloc_order	vmlinux	EXPORT_SYMBOL	
 0x00000000	aead_exit_geniv	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	d_move	vmlinux	EXPORT_SYMBOL	
+0x00000000	dovetail_start	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_read_start	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_wake_up_first	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ping_common_sendmsg	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6725,6 +6759,7 @@
 0x00000000	touch_atime	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	evict_inodes	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_cpu_idle_time_us	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__hybrid_spin_unlock_irq	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_unregister_hw	vmlinux	EXPORT_SYMBOL	
 0x00000000	kernel_sendmsg_locked	vmlinux	EXPORT_SYMBOL	
 0x00000000	device_set_wakeup_capable	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6794,11 +6829,9 @@
 0x00000000	rtnl_link_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	make_flow_keys_digest	vmlinux	EXPORT_SYMBOL	
 0x00000000	sk_set_memalloc	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	gov_update_cpu_data	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	thermal_zone_device_enable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	intlog2	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_gadget_activate	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_setrxabort	vmlinux	EXPORT_SYMBOL	
 0x00000000	dma_buf_export	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	clk_unregister_gate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	platform_irqchip_probe	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6837,7 +6870,6 @@
 0x00000000	__set_page_dirty_buffers	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	unlock_two_nondirectories	vmlinux	EXPORT_SYMBOL	
 0x00000000	__wake_up_locked	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	uclamp_eff_value	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bt_sock_poll	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_pre_changeaddr_notify	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_queue_xmit_nit	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6851,7 +6883,6 @@
 0x00000000	put_nfs_open_context	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mod_zone_page_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	perf_event_refresh	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	sched_uclamp_used	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mini_qdisc_pair_swap	vmlinux	EXPORT_SYMBOL	
 0x00000000	netdev_reset_tc	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_property_read_string_helper	vmlinux	EXPORT_SYMBOL_GPL	
@@ -6977,6 +7008,7 @@
 0x00000000	gpiod_put_array	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_kmalloc	vmlinux	EXPORT_SYMBOL	
 0x00000000	pcpu_base_addr	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnthread_get_timeout	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_conntrack_helper_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_seq_root	vmlinux	EXPORT_SYMBOL	
 0x00000000	media_graph_walk_start	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7074,7 +7106,6 @@
 0x00000000	sock_no_sendpage	vmlinux	EXPORT_SYMBOL	
 0x00000000	iio_read_channel_raw	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	led_put	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_startpcureceive	vmlinux	EXPORT_SYMBOL	
 0x00000000	__ctzdi2	vmlinux	EXPORT_SYMBOL	
 0x00000000	__clzdi2	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_pre_runtime_suspend	vmlinux	EXPORT_SYMBOL	
@@ -7139,6 +7170,7 @@
 0x00000000	dm_bio_prison_destroy_v2	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	video_device_alloc	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_pm_context_active	vmlinux	EXPORT_SYMBOL	
+0x00000000	xntimer_get_overruns	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpus_read_trylock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	udp_disconnect	vmlinux	EXPORT_SYMBOL	
 0x00000000	ethnl_cable_test_pulse	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7148,7 +7180,6 @@
 0x00000000	devm_nvmem_cell_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_choose_configuration	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_free_streams	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_btcoex_init_scheme	vmlinux	EXPORT_SYMBOL	
 0x00000000	ubi_do_get_device_info	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	regcache_cache_only	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pinctrl_gpio_set_config	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7156,7 +7187,6 @@
 0x00000000	addrconf_prefix_rcv_add_addr	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xt_compat_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	input_set_timestamp	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_gen_timer_stop	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_set_asym_pause	vmlinux	EXPORT_SYMBOL	
 0x00000000	mtd_add_partition	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fwnode_graph_get_remote_port	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7183,7 +7213,6 @@
 0x00000000	svc_max_payload	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_rawmidi_new	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_of_platform_populate	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_updatetxtriglevel	vmlinux	EXPORT_SYMBOL	
 0x00000000	pci_pci_problems	vmlinux	EXPORT_SYMBOL	
 0x00000000	pcie_capability_read_dword	vmlinux	EXPORT_SYMBOL	
 0x00000000	drop_super_exclusive	vmlinux	EXPORT_SYMBOL	
@@ -7206,6 +7235,9 @@
 0x00000000	regulator_set_current_limit_regmap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xas_find_conflict	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	proc_set_size	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_get_context	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_sem_up	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnvfile_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_alloc_pages	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_sched_scan_stopped	vmlinux	EXPORT_SYMBOL	
 0x00000000	netif_rx_any_context	vmlinux	EXPORT_SYMBOL	
@@ -7257,6 +7289,7 @@
 0x00000000	ZSTD_findDecompressedSize	vmlinux	EXPORT_SYMBOL	
 0x00000000	bio_list_copy_data	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_commitdata_release	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_schedule_nrt_work	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tracing_on	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sock_gen_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tee_client_open_session	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7293,6 +7326,7 @@
 0x00000000	blk_queue_update_readahead	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	always_delete_dentry	vmlinux	EXPORT_SYMBOL	
 0x00000000	setattr_copy	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_set_schedparam	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	em_dev_unregister_perf_domain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_probe_status	vmlinux	EXPORT_SYMBOL	
 0x00000000	br_multicast_list_adjacent	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7379,6 +7413,7 @@
 0x00000000	mipi_dsi_packet_format_is_long	vmlinux	EXPORT_SYMBOL	
 0x00000000	pcie_has_flr	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	utf8s_to_utf16s	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnsched_unlock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pvclock_gtod_register_notifier	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ns_capable	vmlinux	EXPORT_SYMBOL	
 0x00000000	cfg80211_report_obss_beacon_khz	vmlinux	EXPORT_SYMBOL	
@@ -7388,12 +7423,12 @@
 0x00000000	dm_cell_error	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_bufio_read	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_control_msg_recv	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_bus_type_strings	vmlinux	EXPORT_SYMBOL	
 0x00000000	clk_hw_register_clkdev	vmlinux	EXPORT_SYMBOL	
 0x00000000	fb_deferred_io_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	debugfs_attr_read	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	slash_name	vmlinux	EXPORT_SYMBOL	
 0x00000000	memblock_end_of_DRAM	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnthread_cancel	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ktime_get_coarse_with_offset	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sched_trace_cfs_rq_avg	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kernel_sigaction	vmlinux	EXPORT_SYMBOL	
@@ -7433,14 +7468,13 @@
 0x00000000	v4l2_i2c_tuner_addrs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	typec_altmode_get_plug	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_dwc3_event	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_init_channels_rates	vmlinux	EXPORT_SYMBOL	
 0x00000000	mdio_device_reset	vmlinux	EXPORT_SYMBOL	
 0x00000000	spi_statistics_add_transfer_stats	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drmm_kfree	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_dp_channel_eq_ok	vmlinux	EXPORT_SYMBOL	
 0x00000000	fb_sys_write	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fuse_dev_alloc_install	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpu_latency_qos_remove_request	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnsynch_peek_pendq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	param_ops_long	vmlinux	EXPORT_SYMBOL	
 0x00000000	rpc_add_pipe_dir_object	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_csk_update_pmtu	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7472,7 +7506,6 @@
 0x00000000	dev_pm_opp_free_cpufreq_table	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_pm_opp_enable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_string_ids_n	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_hw_bb_watchdog_check	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_save_page	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pm_runtime_force_resume	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_pci_remap_cfgspace	vmlinux	EXPORT_SYMBOL	
@@ -7481,6 +7514,7 @@
 0x00000000	vli_cmp	vmlinux	EXPORT_SYMBOL	
 0x00000000	iomap_page_mkwrite	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	user_path_create	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_signal_send	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kthread_bind	vmlinux	EXPORT_SYMBOL	
 0x00000000	xprt_disconnect_done	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ipv6_setsockopt	vmlinux	EXPORT_SYMBOL	
@@ -7497,7 +7531,6 @@
 0x00000000	inet_frag_find	vmlinux	EXPORT_SYMBOL	
 0x00000000	inetdev_by_index	vmlinux	EXPORT_SYMBOL	
 0x00000000	__sk_backlog_rcv	vmlinux	EXPORT_SYMBOL	
-0x00000000	rockchip_dmcfreq_write_unlock	vmlinux	EXPORT_SYMBOL	
 0x00000000	fwnode_usb_role_switch_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_poison_anchored_urbs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	con_is_visible	vmlinux	EXPORT_SYMBOL	
@@ -7550,7 +7583,6 @@
 0x00000000	__alloc_disk_node	vmlinux	EXPORT_SYMBOL	
 0x00000000	vm_unmap_aliases	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rockchip_wifi_reset	vmlinux	EXPORT_SYMBOL	
-0x00000000	wireless_nlevent_flush	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	br_ip6_fragment	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_state_lookup_byspi	vmlinux	EXPORT_SYMBOL	
 0x00000000	netlink_capable	vmlinux	EXPORT_SYMBOL	
@@ -7594,10 +7626,10 @@
 0x00000000	dmi_get_bios_year	vmlinux	EXPORT_SYMBOL	
 0x00000000	power_supply_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_assign_descriptors	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_btcoex_enable	vmlinux	EXPORT_SYMBOL	
 0x00000000	serial8250_register_8250_port	vmlinux	EXPORT_SYMBOL	
 0x00000000	__tracepoint_kmalloc	vmlinux	EXPORT_SYMBOL	
 0x00000000	perf_event_release_kernel	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	inband_irqs_disabled	vmlinux	EXPORT_SYMBOL	
 0x00000000	destroy_workqueue	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	idle_notifier_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_twsk_put	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7620,6 +7652,7 @@
 0x00000000	nfs_pgio_header_free	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fat_get_dotdot_entry	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	writeback_inodes_sb	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnclock_deregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_printk_init_buffers	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	param_get_uint	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_wchan	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7629,13 +7662,11 @@
 0x00000000	compat_ptr_ioctl	vmlinux	EXPORT_SYMBOL	
 0x00000000	page_put_link	vmlinux	EXPORT_SYMBOL	
 0x00000000	release_pages	vmlinux	EXPORT_SYMBOL	
-0x00000000	schedutil_cpu_util	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_sleep_on	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bt_sock_stream_recvmsg	vmlinux	EXPORT_SYMBOL	
 0x00000000	mmc_cqe_start_req	vmlinux	EXPORT_SYMBOL	
 0x00000000	cec_notifier_cec_adap_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rk628_audio_fifoints_enabled	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_gettxbuf	vmlinux	EXPORT_SYMBOL	
 0x00000000	fixed_phy_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ata_id_c_string	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dpm_resume_start	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7686,7 +7717,6 @@
 0x00000000	vb2_thread_stop	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	i2c_match_id	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_gadget_ep_match_desc	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_gen_timer_start	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_dp_mst_topology_mgr_destroy	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_hwrng_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tty_wait_until_sent	vmlinux	EXPORT_SYMBOL	
@@ -7708,7 +7738,6 @@
 0x00000000	alarm_forward	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__netif_schedule	vmlinux	EXPORT_SYMBOL	
 0x00000000	mmc_retune_unpause	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpuidle_get_driver	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_attach_device_to_domain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	clk_fractional_divider_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	generic_fh_to_parent	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7717,7 +7746,6 @@
 0x00000000	param_set_invbool	vmlinux	EXPORT_SYMBOL	
 0x00000000	__inet_hash	vmlinux	EXPORT_SYMBOL	
 0x00000000	iio_trigger_alloc	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpuidle_unregister_driver	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	i2c_register_driver	vmlinux	EXPORT_SYMBOL	
 0x00000000	__traceiter_xhci_dbg_quirks	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fixed_phy_unregister	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7765,6 +7793,7 @@
 0x00000000	amba_request_regions	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_fwnode_gpiod_get_index	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inode_dio_wait	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	rtdm_dev_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_chip_request_resources_parent	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snmp_fold_field	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_register_card	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7807,6 +7836,7 @@
 0x00000000	jbd2_journal_force_commit_nested	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	seq_pad	vmlinux	EXPORT_SYMBOL	
 0x00000000	seq_release_private	vmlinux	EXPORT_SYMBOL	
+0x00000000	oob_stage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ww_mutex_lock	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_ctl_remove	vmlinux	EXPORT_SYMBOL	
 0x00000000	__mdiobus_modify_changed	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7832,6 +7862,7 @@
 0x00000000	drm_i2c_encoder_mode_set	vmlinux	EXPORT_SYMBOL	
 0x00000000	pcix_get_mmrbc	vmlinux	EXPORT_SYMBOL	
 0x00000000	jbd2_journal_check_used_features	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	xnthread_start	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	system_highpri_wq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_send_eosp_nullfunc	vmlinux	EXPORT_SYMBOL	
 0x00000000	auth_domain_lookup	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7846,7 +7877,6 @@
 0x00000000	fb_get_mode	vmlinux	EXPORT_SYMBOL	
 0x00000000	__SCK__tp_func_tcp_send_reset	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mmc_detect_change	vmlinux	EXPORT_SYMBOL	
-0x00000000	gov_attr_set_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcpm_is_toggling	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_debugfs_remove_files	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_mode_convert_to_umode	vmlinux	EXPORT_SYMBOL_GPL	
@@ -7869,6 +7899,7 @@
 0x00000000	midgard_kbase_instr_hwcnt_request_dump	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_fs_type	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nfs_server_insert_lists	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_sem_select	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_work_queue	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	resume_cpus	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_disconnect	vmlinux	EXPORT_SYMBOL	
@@ -7931,7 +7962,6 @@
 0x00000000	v4l2_m2m_create_bufs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rndis_borrow_net	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_composite_setup_continue	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_mci_get_next_gpm_offset	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_attr_sw_activity	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__vfs_setxattr_locked	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mangle_path	vmlinux	EXPORT_SYMBOL	
@@ -8019,6 +8049,7 @@
 0x00000000	tcpm_pd_transmit_complete	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	device_init_wakeup	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	clkdev_hw_alloc	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_mutex_timedlock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	avenrun	vmlinux	EXPORT_SYMBOL	
 0x00000000	system_wq	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_extend_register	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8030,6 +8061,7 @@
 0x00000000	drm_gem_dmabuf_mmap	vmlinux	EXPORT_SYMBOL	
 0x00000000	rockchip_clk_of_add_provider	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mark_page_accessed	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnvfile_init_snapshot	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ktime_get_raw_ts64	vmlinux	EXPORT_SYMBOL	
 0x00000000	in6addr_any	vmlinux	EXPORT_SYMBOL	
 0x00000000	inet_protos	vmlinux	EXPORT_SYMBOL	
@@ -8044,7 +8076,6 @@
 0x00000000	__snd_usbmidi_create	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_get_next_available_child	vmlinux	EXPORT_SYMBOL	
 0x00000000	hid_output_report	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpuidle_get_cpu_driver	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__usb_create_hcd	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	analogix_dp_audio_hw_params	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mpi_cmpabs	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8083,7 +8114,6 @@
 0x00000000	file_ns_capable	vmlinux	EXPORT_SYMBOL	
 0x00000000	iio_trigger_notify_done	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_m2m_streamoff	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_numtxpending	vmlinux	EXPORT_SYMBOL	
 0x00000000	ata_xfer_mode2shift	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_clk_set_defaults	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pci_find_next_capability	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8122,6 +8152,7 @@
 0x00000000	mpi_alloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	af_alg_get_rsgl	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mempool_create_node	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_set_slice	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tracepoint_probe_register_prio_may_exist	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_gc_set_wake	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	find_get_pid	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8129,8 +8160,6 @@
 0x00000000	dev_set_promiscuity	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_pcm_hw_limit_rates	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_stor_transparent_scsi_command	vmlinux	EXPORT_SYMBOL_GPL	USB_STORAGE
-0x00000000	ath9k_hw_btcoex_init_3wire	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_gpio_request_in	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_bios_ptable	vmlinux	EXPORT_SYMBOL	
 0x00000000	dmabuf_page_pool_free	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pm_generic_resume	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8168,7 +8197,6 @@
 0x00000000	usb_add_function	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_ep_autoconfig_release	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_phy_roothub_calibrate	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_key_config	vmlinux	EXPORT_SYMBOL	
 0x00000000	genphy_c45_pma_setup_forced	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pm_generic_poweroff	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	analogix_dp_remove	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8213,7 +8241,6 @@
 0x00000000	v4l2_src_change_event_subdev_subscribe	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	i2c_verify_client	vmlinux	EXPORT_SYMBOL	
 0x00000000	input_allocate_device	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_set_txpowerlimit	vmlinux	EXPORT_SYMBOL	
 0x00000000	pm_runtime_barrier	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_printk	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_dp_dsc_sink_supported_input_bpcs	vmlinux	EXPORT_SYMBOL	
@@ -8224,6 +8251,7 @@
 0x00000000	bitmap_parselist	vmlinux	EXPORT_SYMBOL	
 0x00000000	__insert_inode_hash	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	kfree_const	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnclock_tick	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_cpu_frequency	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	revert_creds	vmlinux	EXPORT_SYMBOL	
 0x00000000	fs_overflowgid	vmlinux	EXPORT_SYMBOL	
@@ -8248,6 +8276,7 @@
 0x00000000	pcie_get_speed_cap	vmlinux	EXPORT_SYMBOL	
 0x00000000	debug_locks_silent	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nfs_client_init_is_complete	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_call_state_chain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blocking_notifier_call_chain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ping_hash	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__napi_schedule_irqoff	vmlinux	EXPORT_SYMBOL	
@@ -8332,7 +8361,6 @@
 0x00000000	skb_set_owner_w	vmlinux	EXPORT_SYMBOL	
 0x00000000	thermal_zone_of_sensor_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vb2_queue_init	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_rxbuf_alloc	vmlinux	EXPORT_SYMBOL	
 0x00000000	dma_heap_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kbase_ipa_control_handle_gpu_sleep_enter	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_mm_scan_remove_block	vmlinux	EXPORT_SYMBOL	
@@ -8369,6 +8397,7 @@
 0x00000000	__dec_node_page_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	__inc_node_page_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	__mod_node_page_state	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_set_clock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_frag_rbtree_purge	vmlinux	EXPORT_SYMBOL	
 0x00000000	skb_queue_tail	vmlinux	EXPORT_SYMBOL	
 0x00000000	dm_bitset_set_bit	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8381,6 +8410,8 @@
 0x00000000	__register_nls	vmlinux	EXPORT_SYMBOL	
 0x00000000	register_sysctl	vmlinux	EXPORT_SYMBOL	
 0x00000000	vfs_test_lock	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_event_signal	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnarch_generic_full_divmod64	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	do_exit	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	l3mdev_master_upper_ifindex_by_index_rcu	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_sta_register_airtime	vmlinux	EXPORT_SYMBOL	
@@ -8398,7 +8429,6 @@
 0x00000000	v4l2_set_edid_phys_addr	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	media_create_intf_link	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_xhci_urb_enqueue	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_resume_interrupts	vmlinux	EXPORT_SYMBOL	
 0x00000000	__put_mtd_device	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__get_mtd_device	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mtd_wunit_to_pairing_info	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8411,6 +8441,7 @@
 0x00000000	simple_dentry_operations	vmlinux	EXPORT_SYMBOL	
 0x00000000	__traceiter_device_pm_callback_end	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_cpu_frequency	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	irq_switch_oob	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	oops_in_progress	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_helper_log	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netdev_lower_dev_get_private	vmlinux	EXPORT_SYMBOL	
@@ -8436,7 +8467,6 @@
 0x00000000	vb2_dma_sg_memops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vb2_cma_sg_memops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	create_function_device	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_beacon_config_adhoc	vmlinux	EXPORT_SYMBOL	
 0x00000000	software_node_register_node_group	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	attribute_container_classdev_to_container	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_put_dev	vmlinux	EXPORT_SYMBOL	
@@ -8452,7 +8482,6 @@
 0x00000000	kill_pid_usb_asyncio	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_find_sta_by_ifaddr	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_dapm_info_pin_switch	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	store_sampling_rate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_bufio_write_dirty_buffers_async	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	typec_switch_set	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	typec_switch_put	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8474,6 +8503,7 @@
 0x00000000	__SCK__tp_func_block_rq_issue	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nfs_file_llseek	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_kmalloc_node	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_mmap_iomem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_seq_bitmask	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sched_clock_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	schedule_timeout	vmlinux	EXPORT_SYMBOL	
@@ -8514,7 +8544,6 @@
 0x00000000	iio_get_debugfs_dentry	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	hid_open_report	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_pm_opp_of_cpumask_remove_table	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_check_nav	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_advertise_supported	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_schedule_eh	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	regmap_attach_dev	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8528,7 +8557,6 @@
 0x00000000	ping_init_sock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netdev_update_features	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_get_port_parent_id	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_btcoex_disable	vmlinux	EXPORT_SYMBOL	
 0x00000000	spi_async	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ata_sas_port_resume	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	regmap_mmio_detach_clk	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8558,6 +8586,7 @@
 0x00000000	kfree_strarray	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	llist_del_first	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dec_zone_page_state	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnvfile_init_link	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	timespec64_to_jiffies	vmlinux	EXPORT_SYMBOL	
 0x00000000	sunrpc_cache_pipe_upcall_timeout	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nf_conntrack_max	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8586,8 +8615,6 @@
 0x00000000	video_unregister_device	vmlinux	EXPORT_SYMBOL	
 0x00000000	__i2c_smbus_xfer	vmlinux	EXPORT_SYMBOL	
 0x00000000	typec_find_power_role	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_enable_interrupts	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_write_associd	vmlinux	EXPORT_SYMBOL	
 0x00000000	dw_hdmi_phy_reset	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mipi_dsi_packet_format_is_short	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_atomic_helper_dirtyfb	vmlinux	EXPORT_SYMBOL	
@@ -8597,6 +8624,7 @@
 0x00000000	blk_queue_update_dma_alignment	vmlinux	EXPORT_SYMBOL	
 0x00000000	fsnotify_wait_marks_destroyed	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	hrtimer_try_to_cancel	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	oob_irq_enable	vmlinux	EXPORT_SYMBOL	
 0x00000000	irq_chip_get_parent_state	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_chip_set_parent_state	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	_raw_read_lock_irq	vmlinux	EXPORT_SYMBOL	
@@ -8613,6 +8641,7 @@
 0x00000000	blk_mq_quiesce_queue	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	register_asymmetric_key_parser	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mod_node_page_state	vmlinux	EXPORT_SYMBOL	
+0x00000000	__rtdm_task_sleep	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_dispose_mapping	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pm_suspend	vmlinux	EXPORT_SYMBOL	
 0x00000000	usermodehelper_read_unlock	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8650,7 +8679,6 @@
 0x00000000	skb_seq_read	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_ctrl_request_setup	vmlinux	EXPORT_SYMBOL	
 0x00000000	i2c_transfer	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_beaconq_setup	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_device_lookup	vmlinux	EXPORT_SYMBOL	
 0x00000000	dma_buf_get_flags	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_i2c_encoder_prepare	vmlinux	EXPORT_SYMBOL	
@@ -8659,6 +8687,7 @@
 0x00000000	bitmap_find_next_zero_area_off	vmlinux	EXPORT_SYMBOL	
 0x00000000	skcipher_walk_aead_decrypt	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	wbc_detach_inode	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnheap_set_name	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cgroup_path_ns	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	param_set_int	vmlinux	EXPORT_SYMBOL	
 0x00000000	rockchip_wifi_country_code	vmlinux	EXPORT_SYMBOL	
@@ -8681,10 +8710,10 @@
 0x00000000	tty_port_register_device_attr_serdev	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blk_mq_init_queue	vmlinux	EXPORT_SYMBOL	
 0x00000000	rsa_parse_pub_key	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	unlock_stage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_sched_stat_wait	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	param_set_short	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_ctrl_handler_free	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_cmn_beacon_config_ap	vmlinux	EXPORT_SYMBOL	
 0x00000000	can_rx_offload_queue_tail	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_connector_oob_hotplug_event	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_dsc_pps_payload_pack	vmlinux	EXPORT_SYMBOL	
@@ -8746,6 +8775,7 @@
 0x00000000	pci_dev_run_wake	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nfs_sync_inode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	generic_fadvise	vmlinux	EXPORT_SYMBOL	
+0x00000000	__hybrid_spin_unlock_irqrestore	vmlinux	EXPORT_SYMBOL	
 0x00000000	rpc_alloc_iostats	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__fib6_flush_trees	vmlinux	EXPORT_SYMBOL	
 0x00000000	flow_rule_match_enc_ipv6_addrs	vmlinux	EXPORT_SYMBOL	
@@ -8763,6 +8793,7 @@
 0x00000000	phy_power_off	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mpi_sub_ui	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	shmem_mark_page_lazyfree	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	hard_preempt_enable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mr_mfc_seq_next	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_prot	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_tmpl_alloc	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8772,7 +8803,6 @@
 0x00000000	dvb_frontend_sleep_until	vmlinux	EXPORT_SYMBOL	
 0x00000000	media_entity_get_fwnode_pad	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rndis_msg_parser	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_reload_chainmask	vmlinux	EXPORT_SYMBOL	
 0x00000000	root_device_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mipi_dsi_turn_on_peripheral	vmlinux	EXPORT_SYMBOL	
 0x00000000	fuse_direct_io	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8794,7 +8824,6 @@
 0x00000000	mempool_free	vmlinux	EXPORT_SYMBOL	
 0x00000000	llc_sap_open	vmlinux	EXPORT_SYMBOL	
 0x00000000	neigh_carrier_down	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpuidle_governor_latency_req	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	typec_unregister_altmode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_stor_Bulk_reset	vmlinux	EXPORT_SYMBOL_GPL	USB_STORAGE
 0x00000000	attribute_container_unregister	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8840,7 +8869,6 @@
 0x00000000	dvb_ringbuffer_write_user	vmlinux	EXPORT_SYMBOL	
 0x00000000	i2c_smbus_write_byte_data	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_unlink_urb	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_hw_cycle_counters_update	vmlinux	EXPORT_SYMBOL	
 0x00000000	phylink_ethtool_nway_reset	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_pm_domain_start	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kbase_csf_firmware_trace_buffer_update_trace_enable_bit	vmlinux	EXPORT_SYMBOL	
@@ -8912,6 +8940,7 @@
 0x00000000	seq_hlist_next_percpu	vmlinux	EXPORT_SYMBOL	
 0x00000000	kfree_sensitive	vmlinux	EXPORT_SYMBOL	
 0x00000000	clear_page_dirty_for_io	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnselect_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_sched_cpu_capacity_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	srcu_init_notifier_head	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__memcpy_fromio	vmlinux	EXPORT_SYMBOL	
@@ -8945,6 +8974,7 @@
 0x00000000	sha224_zero_message_hash	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	write_dirty_buffer	vmlinux	EXPORT_SYMBOL	
 0x00000000	noop_llseek	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_timer_stop	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_array_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_entries_cpu	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kstat_irqs_cpu	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8987,6 +9017,7 @@
 0x00000000	jbd2_wait_inode_data	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	proc_mkdir	vmlinux	EXPORT_SYMBOL	
 0x00000000	lookup_positive_unlocked	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnheap_vfree	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dummy_irq_chip	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpu_mitigations_auto_nosmt	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_usb_get_phy_by_phandle	vmlinux	EXPORT_SYMBOL_GPL	
@@ -8996,6 +9027,7 @@
 0x00000000	phy_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_cached_acl_rcu	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_mem_cgroup_from_mm	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_get_period	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	alarm_start_relative	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	proc_dointvec_jiffies	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_peek_len	vmlinux	EXPORT_SYMBOL	
@@ -9014,6 +9046,7 @@
 0x00000000	kstrtos16_from_user	vmlinux	EXPORT_SYMBOL	
 0x00000000	__bitmap_or	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_dcookie	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_timer_start	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_event_raw_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	relay_late_setup_files	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_sched_switch	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9025,12 +9058,12 @@
 0x00000000	sata_lpm_ignore_phy_events	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	strspn	vmlinux	EXPORT_SYMBOL	
 0x00000000	strlen	vmlinux	EXPORT_SYMBOL	
+0x00000000	xntimer_start	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_array_set_clr_event	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rfkill_destroy	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_rx_ba_timer_expired	vmlinux	EXPORT_SYMBOL	
 0x00000000	hci_recv_frame	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_m2m_ctx_release	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_count_streams	vmlinux	EXPORT_SYMBOL	
 0x00000000	stmmac_init_tstamp_counter	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	scsi_host_block	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_vma_offset_add	vmlinux	EXPORT_SYMBOL	
@@ -9094,6 +9127,7 @@
 0x00000000	drm_edid_block_valid	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_options	vmlinux	EXPORT_SYMBOL	
 0x00000000	vfs_iter_write	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnsynch_forget_sleeper	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ktime_get_coarse_ts64	vmlinux	EXPORT_SYMBOL	
 0x00000000	udp_sock_create6	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fib_new_table	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9107,6 +9141,7 @@
 0x00000000	iomap_dio_iopoll	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bh_uptodate_or_lock	vmlinux	EXPORT_SYMBOL	
 0x00000000	mem_cgroup_from_task	vmlinux	EXPORT_SYMBOL	
+0x00000000	disable_oob_stage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_chswitch_done	vmlinux	EXPORT_SYMBOL	
 0x00000000	xfrm_state_walk_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	udp4_hwcsum	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9123,11 +9158,11 @@
 0x00000000	param_get_hexint	vmlinux	EXPORT_SYMBOL	
 0x00000000	__sock_cmsg_send	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_pci_quirk_lookup	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_cmn_process_rate	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_can_transceiver	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_mdio_find_bus	vmlinux	EXPORT_SYMBOL	
 0x00000000	pci_bus_write_config_word	vmlinux	EXPORT_SYMBOL	
 0x00000000	sg_alloc_table	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_signal_free	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	handle_simple_irq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	async_synchronize_cookie_domain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	call_usermodehelper_exec	vmlinux	EXPORT_SYMBOL	
@@ -9136,7 +9171,6 @@
 0x00000000	power_supply_batinfo_ocv2cap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_async_subdev_notifier_register	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_ctrl_handler_init_class	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_reset_calvalid	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_gem_prime_fd_to_handle	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_scdc_write	vmlinux	EXPORT_SYMBOL	
 0x00000000	pci_release_region	vmlinux	EXPORT_SYMBOL	
@@ -9156,7 +9190,10 @@
 0x00000000	__clk_get_name	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	gen_pool_dma_alloc	vmlinux	EXPORT_SYMBOL	
 0x00000000	seq_list_start	vmlinux	EXPORT_SYMBOL	
+0x00000000	__xntimer_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_find_matching_fwspec	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__hybrid_spin_trylock_irqsave	vmlinux	EXPORT_SYMBOL	
+0x00000000	do_raw_spin_trylock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_sched_update_nr_running_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kthread_queue_work	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_workqueue_execute_end	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9165,9 +9202,9 @@
 0x00000000	km_state_notify	vmlinux	EXPORT_SYMBOL	
 0x00000000	register_ip_vs_scheduler	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_pcm_set_ops	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpuidle_enable_device	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_run_dependencies	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sbitmap_prepare_to_wait	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_mmap_kmem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bpf_prog_add	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_seq_putmem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	_raw_write_lock	vmlinux	EXPORT_SYMBOL	
@@ -9242,7 +9279,6 @@
 0x00000000	nvmem_device_read	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dvb_generic_release	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_clear_halt	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_setuprxdesc	vmlinux	EXPORT_SYMBOL	
 0x00000000	swphy_read_reg	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ubi_sync	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_bridge_hpd_notify	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9336,6 +9372,8 @@
 0x00000000	pci_choose_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_access_add_cache	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	delete_from_page_cache	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_timer_destroy	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnvfile_init_dir	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ktime_get_seconds	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	wait_for_completion_killable	vmlinux	EXPORT_SYMBOL	
 0x00000000	put_task_stack	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9404,6 +9442,8 @@
 0x00000000	kbase_reg_read	vmlinux	EXPORT_SYMBOL	
 0x00000000	clk_multiplier_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nla_put_64bit	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnintr_detach	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnintr_attach	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	unregister_ftrace_export	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	wait_for_completion_timeout	vmlinux	EXPORT_SYMBOL	
 0x00000000	set_current_groups	vmlinux	EXPORT_SYMBOL	
@@ -9415,7 +9455,6 @@
 0x00000000	rk628_i2c_register	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_stor_probe2	vmlinux	EXPORT_SYMBOL_GPL	USB_STORAGE
 0x00000000	usb_stor_probe1	vmlinux	EXPORT_SYMBOL_GPL	USB_STORAGE
-0x00000000	ar9003_hw_bb_watchdog_dbg_info	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_reg_write	vmlinux	EXPORT_SYMBOL	
 0x00000000	update_region	vmlinux	EXPORT_SYMBOL	
 0x00000000	dma_request_chan_by_mask	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9423,6 +9462,7 @@
 0x00000000	public_key_signature_free	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nfs_show_options	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	page_frag_free	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_clock_find	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sprint_symbol	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_pelt_se_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_pelt_dl_tp	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9439,6 +9479,8 @@
 0x00000000	kstrtobool_from_user	vmlinux	EXPORT_SYMBOL	
 0x00000000	fuse_len_args	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	shrink_dcache_parent	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_mutex_init	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnthread_kick	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_seq_vprintf	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	synchronize_net	vmlinux	EXPORT_SYMBOL	
 0x00000000	netdev_adjacent_change_abort	vmlinux	EXPORT_SYMBOL	
@@ -9461,6 +9503,7 @@
 0x00000000	lease_unregister_notifier	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kern_path_create	vmlinux	EXPORT_SYMBOL	
 0x00000000	init_on_free	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_yield	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_create_mapping_affinity	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	_raw_write_lock_bh	vmlinux	EXPORT_SYMBOL	
 0x00000000	xdr_terminate_string	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9510,7 +9553,6 @@
 0x00000000	devm_power_supply_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rk628_txphy_get_bus_width	vmlinux	EXPORT_SYMBOL	
 0x00000000	rk628_txphy_set_bus_width	vmlinux	EXPORT_SYMBOL	
-0x00000000	ar9003_mci_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_start_machine	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_mdiobus_alloc_size	vmlinux	EXPORT_SYMBOL	
 0x00000000	__pm_runtime_suspend	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9518,6 +9560,7 @@
 0x00000000	drm_atomic_helper_update_plane	vmlinux	EXPORT_SYMBOL	
 0x00000000	crc_t10dif	vmlinux	EXPORT_SYMBOL	
 0x00000000	kvasprintf_const	vmlinux	EXPORT_SYMBOL	
+0x00000000	compat_ptr_oob_ioctl	vmlinux	EXPORT_SYMBOL	
 0x00000000	param_get_charp	vmlinux	EXPORT_SYMBOL	
 0x00000000	xdp_rxq_info_unreg	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netdev_next_lower_dev_rcu	vmlinux	EXPORT_SYMBOL	
@@ -9547,6 +9590,7 @@
 0x00000000	sysfs_create_mount_point	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mount_bdev	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	can_do_mlock	vmlinux	EXPORT_SYMBOL	
+0x00000000	__xnthread_discard	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rcu_barrier	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ndisc_mc_map	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_nat_masquerade_ipv6	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9614,7 +9658,6 @@
 0x00000000	irq_do_set_affinity	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_mkpipe_dentry	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ipt_unregister_table_pre_exit	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_ani_monitor	vmlinux	EXPORT_SYMBOL	
 0x00000000	ahci_stop_engine	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rockchip_init_opp_table	vmlinux	EXPORT_SYMBOL	
 0x00000000	pci_release_regions	vmlinux	EXPORT_SYMBOL	
@@ -9639,7 +9682,10 @@
 0x00000000	siphash_1u32	vmlinux	EXPORT_SYMBOL	
 0x00000000	generic_check_addressable	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_tree_nodev	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_event_init	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnbufd_copy_to_kmem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	perf_event_read_value	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	dovetail_stop	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__refrigerator	vmlinux	EXPORT_SYMBOL	
 0x00000000	irqchip_fwnode_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet6_csk_addr2sockaddr	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9649,7 +9695,6 @@
 0x00000000	input_mt_init_slots	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_disable_lpm	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_disable_ltm	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_gen_timer_alloc	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_10gbit_full_features	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ata_link_printk	vmlinux	EXPORT_SYMBOL	
 0x00000000	devres_close_group	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9686,6 +9731,7 @@
 0x00000000	drm_panel_bridge_add	vmlinux	EXPORT_SYMBOL	
 0x00000000	iommu_unregister_device_fault_handler	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	proc_symlink	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_kernel_ppd	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_br_fdb_external_learn_add	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__rtnl_link_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rtnl_kfree_skbs	vmlinux	EXPORT_SYMBOL	
@@ -9731,6 +9777,7 @@
 0x00000000	drm_mode_create	vmlinux	EXPORT_SYMBOL	
 0x00000000	memchr	vmlinux	EXPORT_SYMBOL	
 0x00000000	kern_unmount_array	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnsynch_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet6_release	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_expect_hsize	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_get_parent	vmlinux	EXPORT_SYMBOL	
@@ -9785,7 +9832,6 @@
 0x00000000	tcp_mtup_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	perf_pmu_name	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mmc_cqe_request_done	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpuidle_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__media_remove_intf_links	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	typec_match_altmode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_clk_unregister	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9800,7 +9846,6 @@
 0x00000000	snd_soc_jack_report	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_dapm_put_enum_double	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_dapm_get_enum_double	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	dbs_update	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_tm_commit	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kbase_pm_set_policy	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_pm_get_policy	vmlinux	EXPORT_SYMBOL	
@@ -9819,7 +9864,6 @@
 0x00000000	mmc_set_timing	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_accept_partial_bio	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usbnet_get_endpoints	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_set_tsfadjust	vmlinux	EXPORT_SYMBOL	
 0x00000000	phylink_set_pcs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nvme_sync_io_queues	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pci_bus_read_dev_vendor_id	vmlinux	EXPORT_SYMBOL	
@@ -9884,6 +9928,7 @@
 0x00000000	usb_alloc_coherent	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_tree_mtd	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	crypto_larval_kill	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	test_and_lock_stage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_set_default_host	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_csk_get_port	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netdev_is_rx_handler_busy	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9926,6 +9971,7 @@
 0x00000000	jbd2_journal_inode_ranged_wait	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	__SCK__tp_func_android_fs_dataread_end	vmlinux	EXPORT_SYMBOL	
 0x00000000	freeze_super	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_runstate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__devm_request_region	vmlinux	EXPORT_SYMBOL	
 0x00000000	l3mdev_fib_table_by_index	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	hid_connect	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9949,7 +9995,6 @@
 0x00000000	cfg80211_auth_timeout	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_snd_soc_register_dai	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_ctrl_new_std_menu	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_bstuck_nfcal	vmlinux	EXPORT_SYMBOL	
 0x00000000	ata_common_sdev_attrs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_add_edid_modes	vmlinux	EXPORT_SYMBOL	
 0x00000000	fsl8250_handle_irq	vmlinux	EXPORT_SYMBOL_GPL	
@@ -9997,6 +10042,7 @@
 0x00000000	seq_hlist_start_head_rcu	vmlinux	EXPORT_SYMBOL	
 0x00000000	current_time	vmlinux	EXPORT_SYMBOL	
 0x00000000	user_path_at_empty	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_dev_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	page_pool_release_page	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_mc_flush	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_uc_flush	vmlinux	EXPORT_SYMBOL	
@@ -10021,6 +10067,8 @@
 0x00000000	dma_fence_release	vmlinux	EXPORT_SYMBOL	
 0x00000000	dw_hdmi_qp_set_audio_infoframe	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_client_register	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_personality	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnsynch_wakeup_one_sleeper	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_rpm_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	do_trace_rcu_torture_read	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpufreq_update_util_data	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10032,8 +10080,6 @@
 0x00000000	__tracepoint_rpm_idle	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bt_procfs_cleanup	vmlinux	EXPORT_SYMBOL	
 0x00000000	mr_fill_mroute	vmlinux	EXPORT_SYMBOL	
-0x00000000	gov_attr_set_put	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	gov_attr_set_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	media_device_usb_allocate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	wakeup_source_remove	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	stop_tty	vmlinux	EXPORT_SYMBOL	
@@ -10047,7 +10093,6 @@
 0x00000000	nfs_pageio_reset_write_mds	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	unlock_page_memcg	vmlinux	EXPORT_SYMBOL	
 0x00000000	si_swapinfo	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	iwe_stream_add_point	vmlinux	EXPORT_SYMBOL	
 0x00000000	xfrm_policy_destroy	vmlinux	EXPORT_SYMBOL	
 0x00000000	inet_del_offload	vmlinux	EXPORT_SYMBOL	
 0x00000000	sysctl_fb_tunnels_only_for_init_net	vmlinux	EXPORT_SYMBOL	
@@ -10132,7 +10177,6 @@
 0x00000000	kfree_skb_list	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_soc_of_parse_audio_simple_widgets	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ohci_setup	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_kill_interrupts	vmlinux	EXPORT_SYMBOL	
 0x00000000	deregister_mtd_blktrans	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_dp_psr_setup_time	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_simple_encoder_init	vmlinux	EXPORT_SYMBOL	
@@ -10173,6 +10217,7 @@
 0x00000000	gpiochip_add_pin_range	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	generate_random_uuid	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_mem_cgroup_from_page	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnheap_free	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_policy_hash_rebuild	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_tcp_seqadj_set	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_net_ns_by_pid	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10199,7 +10244,6 @@
 0x00000000	xfrm_input_register_afinfo	vmlinux	EXPORT_SYMBOL	
 0x00000000	udp_ioctl	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_acct_add	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	__mdiobus_write	vmlinux	EXPORT_SYMBOL	
 0x00000000	genphy_loopback	vmlinux	EXPORT_SYMBOL	
 0x00000000	tty_port_default_client_ops	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10219,6 +10263,7 @@
 0x00000000	pwm_set_chip_data	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blk_queue_max_write_same_sectors	vmlinux	EXPORT_SYMBOL	
 0x00000000	may_umount	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnbufd_map_umem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pids_cgrp_subsys_enabled_key	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_switch_client_transport	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mmc_hsq_suspend	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10257,6 +10302,7 @@
 0x00000000	regulator_bulk_disable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	memweight	vmlinux	EXPORT_SYMBOL	
 0x00000000	jbd2_journal_ack_err	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	nkvdso	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_sched_stat_blocked	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	emergency_restart	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netdev_upper_dev_link	vmlinux	EXPORT_SYMBOL	
@@ -10279,7 +10325,6 @@
 0x00000000	cpufreq_driver_target	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_post_suspending	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vb2_fop_read	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_get_tsf_offset	vmlinux	EXPORT_SYMBOL	
 0x00000000	mtd_panic_write	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	soc_device_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tpm_transmit_cmd	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10299,9 +10344,6 @@
 0x00000000	ohci_restart	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_phy_get_charger_current	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_phy_set_charger_current	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_get_hw_crypto_keytype	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_btcoex_bt_stomp	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_txstart	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_get_eee_err	vmlinux	EXPORT_SYMBOL	
 0x00000000	nvme_reset_ctrl	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_self_refresh_helper_init	vmlinux	EXPORT_SYMBOL	
@@ -10318,7 +10360,6 @@
 0x00000000	dm_shift_arg	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_async_unregister_subdev	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_stor_resume	vmlinux	EXPORT_SYMBOL_GPL	USB_STORAGE
-0x00000000	ath9k_hw_gpio_request_out	vmlinux	EXPORT_SYMBOL	
 0x00000000	open_candev	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_mdiobus_register	vmlinux	EXPORT_SYMBOL	
 0x00000000	ubi_open_volume	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10329,7 +10370,6 @@
 0x00000000	__generic_file_write_iter	vmlinux	EXPORT_SYMBOL	
 0x00000000	xfrm_lookup	vmlinux	EXPORT_SYMBOL	
 0x00000000	skb_store_bits	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath_reg_notifier_apply	vmlinux	EXPORT_SYMBOL	
 0x00000000	ata_sas_scsi_ioctl	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_resv_fini	vmlinux	EXPORT_SYMBOL	
 0x00000000	plist_add	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10343,7 +10383,6 @@
 0x00000000	snd_soc_put_volsw	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_get_volsw	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devm_devfreq_register_opp_notifier	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpufreq_dbs_governor_stop	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_bufio_new	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	i2c_smbus_read_byte	vmlinux	EXPORT_SYMBOL	
 0x00000000	i2c_probe_func_quick_read	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10383,7 +10422,6 @@
 0x00000000	nfs_file_fsync	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mark_buffer_write_io_error	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	profile_event_register	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpu_latency_qos_request_active	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__nf_conntrack_helper_find	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	gether_register_netdev	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ppp_input_error	vmlinux	EXPORT_SYMBOL	
@@ -10492,6 +10530,7 @@
 0x00000000	usbnet_manage_power	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_mmu_report_mcu_as_fault_and_reset	vmlinux	EXPORT_SYMBOL	
 0x00000000	clk_hw_register	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnsynch_wakeup_many_sleepers	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	disable_irq_nosync	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_tx_prepare_skb	vmlinux	EXPORT_SYMBOL	
 0x00000000	cfg80211_nan_match	vmlinux	EXPORT_SYMBOL	
@@ -10515,6 +10554,7 @@
 0x00000000	extcon_unregister_notifier_all	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	media_entity_remote_pad	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	platform_get_resource_byname	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	fbcon_update_vcs	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_find_node_by_type	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_stor_adjust_quirks	vmlinux	EXPORT_SYMBOL_GPL	USB_STORAGE
 0x00000000	dwc3_stop_active_transfer	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10539,17 +10579,18 @@
 0x00000000	pci_bus_write_config_byte	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_gpiod_put_array	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	free_anon_bdev	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_sem_timeddown	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	raw_hash_sk	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_seq_start	vmlinux	EXPORT_SYMBOL	
 0x00000000	dst_cache_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rps_sock_flow_table	vmlinux	EXPORT_SYMBOL	
 0x00000000	iio_trigger_poll	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_stor_control_msg	vmlinux	EXPORT_SYMBOL_GPL	USB_STORAGE
-0x00000000	ath9k_hw_reset	vmlinux	EXPORT_SYMBOL	
 0x00000000	spi_res_free	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_genpd_add_subdomain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__drm_get_edid_firmware_path	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_mq_complete_request	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_fd_write	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rcu_exp_batches_completed	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	request_any_context_irq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_clnt_swap_activate	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10640,6 +10681,7 @@
 0x00000000	drm_modeset_lock	vmlinux	EXPORT_SYMBOL	
 0x00000000	pci_walk_bus	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	block_is_partially_uptodate	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	xnthread_wait_period	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_overruns	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_workqueue_execute_end	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	l3mdev_table_lookup_unregister	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10650,6 +10692,7 @@
 0x00000000	drm_atomic_helper_commit_hw_done	vmlinux	EXPORT_SYMBOL	
 0x00000000	uart_resume_port	vmlinux	EXPORT_SYMBOL	
 0x00000000	gpiod_get_value_cansleep	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__xnthread_test_cancel	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sysctl_rmem_max	vmlinux	EXPORT_SYMBOL	
 0x00000000	sysctl_wmem_max	vmlinux	EXPORT_SYMBOL	
 0x00000000	iio_trigger_using_own	vmlinux	EXPORT_SYMBOL	
@@ -10664,6 +10707,7 @@
 0x00000000	crypto_grab_spawn	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	jbd2_journal_errno	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	register_vmap_purge_notifier	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnclock_divrem_billion	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netdev_rss_key_fill	vmlinux	EXPORT_SYMBOL	
 0x00000000	dm_bitset_cursor_next	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	input_free_minor	vmlinux	EXPORT_SYMBOL	
@@ -10719,6 +10763,7 @@
 0x00000000	drm_mode_prune_invalid	vmlinux	EXPORT_SYMBOL	
 0x00000000	mpi_clear	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vfs_readlink	vmlinux	EXPORT_SYMBOL	
+0x00000000	inband_irq_enable	vmlinux	EXPORT_SYMBOL	
 0x00000000	complete_and_exit	vmlinux	EXPORT_SYMBOL	
 0x00000000	cfg80211_unregister_wdev	vmlinux	EXPORT_SYMBOL	
 0x00000000	inet6_lookup	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10777,6 +10822,7 @@
 0x00000000	crypto_register_acomp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nfs4_disable_idmapping	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	d_genocide	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_event_clear	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tracing_generic_entry_update	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__module_put_and_exit	vmlinux	EXPORT_SYMBOL	
 0x00000000	ns_to_kernel_old_timeval	vmlinux	EXPORT_SYMBOL	
@@ -10807,7 +10853,6 @@
 0x00000000	d_rehash	vmlinux	EXPORT_SYMBOL	
 0x00000000	d_drop	vmlinux	EXPORT_SYMBOL	
 0x00000000	file_open_root	vmlinux	EXPORT_SYMBOL	
-0x00000000	iwe_stream_add_event	vmlinux	EXPORT_SYMBOL	
 0x00000000	tso_build_hdr	vmlinux	EXPORT_SYMBOL	
 0x00000000	sock_recv_errqueue	vmlinux	EXPORT_SYMBOL	
 0x00000000	__mmc_claim_host	vmlinux	EXPORT_SYMBOL	
@@ -10817,7 +10862,6 @@
 0x00000000	pci_get_class	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_initiate_pgio	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nfs_create_rpc_client	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	log_abnormal_wakeup_reason	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	region_intersects	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netif_rx	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_pcm_hw_refine	vmlinux	EXPORT_SYMBOL	
@@ -10848,6 +10892,7 @@
 0x00000000	gpiochip_get_data	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	reciprocal_value	vmlinux	EXPORT_SYMBOL	
 0x00000000	unregister_binfmt	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnregistry_vlink_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	make_kgid	vmlinux	EXPORT_SYMBOL	
 0x00000000	_raw_read_trylock	vmlinux	EXPORT_SYMBOL	
 0x00000000	housekeeping_enabled	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10874,16 +10919,18 @@
 0x00000000	dw_hdmi_qp_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	klist_del	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__lock_buffer	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_event_pulse	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	memory_cgrp_subsys_enabled_key	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_openreq_init_rwin	vmlinux	EXPORT_SYMBOL	
 0x00000000	xdp_rxq_info_reg_mem_model	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devfreq_remove_governor	vmlinux	EXPORT_SYMBOL	
 0x00000000	dm_copy_name_and_uuid	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_paprd_populate_single_table	vmlinux	EXPORT_SYMBOL	
 0x00000000	analogix_dp_phy_test	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_detach_device_from_domain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	debug_locks	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	aead_geniv_alloc	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_fd_put	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_fd_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	perf_event_update_userpage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	trace_print_bitmask_seq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fib6_check_nexthop	vmlinux	EXPORT_SYMBOL_GPL	
@@ -10931,7 +10978,6 @@
 0x00000000	__nla_validate	vmlinux	EXPORT_SYMBOL	
 0x00000000	vfs_dedupe_file_range	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_proberesp_get	vmlinux	EXPORT_SYMBOL	
-0x00000000	wireless_send_event	vmlinux	EXPORT_SYMBOL	
 0x00000000	xprt_release_xprt_cong	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ipv6_recv_error	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	udp_sk_rx_dst_set	vmlinux	EXPORT_SYMBOL	
@@ -11049,6 +11095,7 @@
 0x00000000	pwmchip_add	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blk_queue_max_discard_segments	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pkcs7_parse_message	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_sem_down	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip_fib_metrics_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	flow_block_cb_decref	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_prop_next_string	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11066,13 +11113,12 @@
 0x00000000	backlight_device_set_brightness	vmlinux	EXPORT_SYMBOL	
 0x00000000	pcie_capability_write_word	vmlinux	EXPORT_SYMBOL	
 0x00000000	copy_page_to_iter	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_call_mayday	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	css_next_descendant_pre	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ioremap_cache	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_device_get_state	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_find_device_by_node	vmlinux	EXPORT_SYMBOL	
 0x00000000	vb2_streamoff	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_setup_ht_cap	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_gpio_free	vmlinux	EXPORT_SYMBOL	
 0x00000000	ata_port_pbar_desc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__blkdev_issue_zeroout	vmlinux	EXPORT_SYMBOL	
 0x00000000	kblockd_mod_delayed_work_on	vmlinux	EXPORT_SYMBOL	
@@ -11100,7 +11146,6 @@
 0x00000000	tcp_rtx_synack	vmlinux	EXPORT_SYMBOL	
 0x00000000	skb_put	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_m2m_ioctl_reqbufs	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_name	vmlinux	EXPORT_SYMBOL	
 0x00000000	ubi_leb_write	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mtd_concat_create	vmlinux	EXPORT_SYMBOL	
 0x00000000	scsi_host_put	vmlinux	EXPORT_SYMBOL	
@@ -11118,6 +11163,7 @@
 0x00000000	pinctrl_get	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	configfs_depend_item	vmlinux	EXPORT_SYMBOL	
 0x00000000	generic_block_fiemap	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_mutex_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	add_wait_queue	vmlinux	EXPORT_SYMBOL	
 0x00000000	param_ops_invbool	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_add_pack	vmlinux	EXPORT_SYMBOL	
@@ -11176,7 +11222,6 @@
 0x00000000	skb_tstamp_tx	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	devfreq_monitor_resume	vmlinux	EXPORT_SYMBOL	
 0x00000000	sip_hdcp_config	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_hw_keyreset	vmlinux	EXPORT_SYMBOL	
 0x00000000	mdio_xpcs_get_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__scsi_format_command	vmlinux	EXPORT_SYMBOL	
 0x00000000	tty_set_operations	vmlinux	EXPORT_SYMBOL	
@@ -11207,7 +11252,6 @@
 0x00000000	asoc_simple_parse_clk	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	media_graph_walk_next	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	media_graph_walk_init	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_rx_skb_postprocess	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_mode_validate_driver	vmlinux	EXPORT_SYMBOL	
 0x00000000	fb_deferred_io_fsync	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pci_alloc_irq_vectors_affinity	vmlinux	EXPORT_SYMBOL	
@@ -11260,6 +11304,7 @@
 0x00000000	xas_store	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	crypto_register_scomps	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sysfs_emit	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xntimer_format_time	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_powernv_throttle	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_get_fils_discovery_tmpl	vmlinux	EXPORT_SYMBOL	
 0x00000000	net_namespace_list	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11297,7 +11342,6 @@
 0x00000000	perf_event_period	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rfc1042_header	vmlinux	EXPORT_SYMBOL	
 0x00000000	iio_buffer_init	vmlinux	EXPORT_SYMBOL	
-0x00000000	rockchip_dmcfreq_vop_bandwidth_request	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_devfreq_unregister_notifier	vmlinux	EXPORT_SYMBOL	
 0x00000000	sdio_get_host_pm_caps	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	media_device_register_entity_notify	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11308,6 +11352,7 @@
 0x00000000	hdmi_vendor_infoframe_pack_only	vmlinux	EXPORT_SYMBOL	
 0x00000000	rockchip_pcie_get_phys	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	add_swap_extent	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_heap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_sched_stat_iowait	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_rx_mgmt_khz	vmlinux	EXPORT_SYMBOL	
 0x00000000	svc_rpcb_cleanup	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11362,6 +11407,7 @@
 0x00000000	bioset_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	bioset_exit	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_sb_deactive	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnsynch_wakeup_this_sleeper	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	irq_alloc_generic_chip	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	udp_destruct_sock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_hashinfo2_init_mod	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11395,7 +11441,6 @@
 0x00000000	snd_seq_autoload_exit	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_seq_autoload_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	__devm_iio_device_register	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	rockchip_dmcfreq_write_trylock	vmlinux	EXPORT_SYMBOL	
 0x00000000	vb2_mmap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rk628_rxphy_power_on	vmlinux	EXPORT_SYMBOL	
 0x00000000	ahci_do_softreset	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11466,6 +11511,7 @@
 0x00000000	kbasep_find_enclosing_cpu_mapping_offset	vmlinux	EXPORT_SYMBOL	
 0x00000000	pci_root_buses	vmlinux	EXPORT_SYMBOL	
 0x00000000	blk_freeze_queue_start	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	run_oob_call	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_syn_ack_timeout	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_log_unregister	vmlinux	EXPORT_SYMBOL	
 0x00000000	iio_alloc_pollfunc	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11478,6 +11524,7 @@
 0x00000000	gpiochip_irqchip_add_key	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_block_bio_remap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	d_hash_and_lookup	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_demote	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_irq_handler_exit	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_bss_iter	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_rawmidi_drain_input	vmlinux	EXPORT_SYMBOL	
@@ -11514,6 +11561,7 @@
 0x00000000	drm_legacy_ioremap_wc	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_reconfigure	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	vzalloc	vmlinux	EXPORT_SYMBOL	
+0x00000000	nkclock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_contiguous_default_area	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_pelt_thermal_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_free_hw	vmlinux	EXPORT_SYMBOL	
@@ -11528,6 +11576,7 @@
 0x00000000	ZSTD_CStreamWorkspaceBound	vmlinux	EXPORT_SYMBOL	
 0x00000000	unregister_key_type	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_commit_inode	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_put_iovec	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_shutdown_client	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	skb_mpls_pop	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	iio_buffer_set_attrs	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11554,6 +11603,7 @@
 0x00000000	gpiod_is_active_low	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__kfifo_len_r	vmlinux	EXPORT_SYMBOL	
 0x00000000	vfs_create_mount	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_iomap_to_user	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_iter_advance	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	srcutorture_get_gp_data	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	read_sanitised_ftr_reg	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11579,6 +11629,7 @@
 0x00000000	block_truncate_page	vmlinux	EXPORT_SYMBOL	
 0x00000000	vfs_tmpfile	vmlinux	EXPORT_SYMBOL	
 0x00000000	generic_write_checks	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_pop_personality	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kthread_park	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_dev_state_flush	vmlinux	EXPORT_SYMBOL	
 0x00000000	netdev_increment_features	vmlinux	EXPORT_SYMBOL	
@@ -11672,6 +11723,7 @@
 0x00000000	drm_atomic_get_bridge_state	vmlinux	EXPORT_SYMBOL	
 0x00000000	of_phy_simple_xlate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mpi_ec_get_affine	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_fd_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_can_mmap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	wake_up_process	vmlinux	EXPORT_SYMBOL	
 0x00000000	fs_overflowuid	vmlinux	EXPORT_SYMBOL	
@@ -11704,6 +11756,7 @@
 0x00000000	blk_mq_delay_run_hw_queue	vmlinux	EXPORT_SYMBOL	
 0x00000000	crypto_alg_tested	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	iomap_seek_hole	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_event_timedwait	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sprint_symbol_no_offset	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_pelt_thermal_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	led_init_core	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11793,6 +11846,7 @@
 0x00000000	pci_scan_root_bus	vmlinux	EXPORT_SYMBOL	
 0x00000000	create_empty_buffers	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	init_special_inode	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	rtdm_fd_fcntl	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	on_each_cpu_cond	vmlinux	EXPORT_SYMBOL	
 0x00000000	cfg80211_mgmt_tx_status	vmlinux	EXPORT_SYMBOL	
 0x00000000	__xfrm_dst_lookup	vmlinux	EXPORT_SYMBOL	
@@ -11956,6 +12010,8 @@
 0x00000000	gpiochip_add_pingroup_range	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	klist_node_attached	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rhashtable_walk_exit	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnselect	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnheap_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	sched_trace_rd_span	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	flush_work	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	udp_tunnel_notify_add_rx_port	vmlinux	EXPORT_SYMBOL_GPL	
@@ -11983,6 +12039,7 @@
 0x00000000	pci_msi_unmask_irq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dump_stack	vmlinux	EXPORT_SYMBOL	
 0x00000000	mpage_writepage	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_pipeline	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_record_off	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	set_security_override	vmlinux	EXPORT_SYMBOL	
 0x00000000	raw_seq_start	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12005,6 +12062,7 @@
 0x00000000	crypto_unregister_shash	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	crypto_unregister_ahash	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kmalloc_order_trace	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnregistry_enter	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	stop_one_cpu_nowait	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_hrtimer_expire_exit	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_sched_stat_runtime	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12013,7 +12071,6 @@
 0x00000000	tcp_sock_set_user_timeout	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_ctl_find_numid	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_devfreq_add_device	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpufreq_dbs_governor_start	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	policy_has_boost_freq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dm_bio_detain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	typec_port_register_altmodes	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12080,6 +12137,8 @@
 0x00000000	gpiod_set_raw_value_cansleep	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__sg_alloc_table	vmlinux	EXPORT_SYMBOL	
 0x00000000	shrink_dcache_sb	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnthread_set_mode	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnregistry_remove	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__tracepoint_xdp_bulk_tx	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	percpu_free_rwsem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	param_ops_int	vmlinux	EXPORT_SYMBOL	
@@ -12096,7 +12155,6 @@
 0x00000000	page_cache_sync_ra	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	raw_v4_hashinfo	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_bytes_info_ext	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpuidle_driver_state_disabled	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	typec_register_plug	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_gem_object_init	vmlinux	EXPORT_SYMBOL	
 0x00000000	devm_clk_hw_get_clk	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12190,7 +12248,6 @@
 0x00000000	nf_conntrack_helper_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xdp_rxq_info_unreg_mem_model	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	i2c_for_each_dev	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_mci_setup	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_driver_string	vmlinux	EXPORT_SYMBOL	
 0x00000000	corestack_driver_control	vmlinux	EXPORT_SYMBOL	
 0x00000000	pcie_get_readrq	vmlinux	EXPORT_SYMBOL	
@@ -12240,13 +12297,13 @@
 0x00000000	iunique	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	try_lookup_one_len	vmlinux	EXPORT_SYMBOL	
 0x00000000	__traceiter_kmem_cache_free	vmlinux	EXPORT_SYMBOL	
+0x00000000	cobalt_remove_state_chain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	unregister_die_notifier	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xfrm_policy_walk_done	vmlinux	EXPORT_SYMBOL	
 0x00000000	inet_dgram_ops	vmlinux	EXPORT_SYMBOL	
 0x00000000	media_pipeline_start	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	input_release_device	vmlinux	EXPORT_SYMBOL	
 0x00000000	usb_add_gadget	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_btcoex_init_mci	vmlinux	EXPORT_SYMBOL	
 0x00000000	ata_dev_next	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_format_info	vmlinux	EXPORT_SYMBOL	
 0x00000000	__do_once_done	vmlinux	EXPORT_SYMBOL	
@@ -12269,8 +12326,6 @@
 0x00000000	hwmon_device_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_interface_id	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_store_new_id	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_get_txq_props	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_set_txq_props	vmlinux	EXPORT_SYMBOL	
 0x00000000	sdev_evt_send_simple	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_find_mipi_dsi_host_by_node	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_fb_helper_blank	vmlinux	EXPORT_SYMBOL	
@@ -12281,6 +12336,7 @@
 0x00000000	seq_hex_dump	vmlinux	EXPORT_SYMBOL	
 0x00000000	frame_vector_create	vmlinux	EXPORT_SYMBOL	
 0x00000000	truncate_inode_pages_final	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnvfile_get_integer	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpu_pm_enter	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	freezer_cgrp_subsys	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__wake_up_locked_key	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12342,6 +12398,8 @@
 0x00000000	sgl_free_order	vmlinux	EXPORT_SYMBOL	
 0x00000000	debugfs_create_file	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	new_inode	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnbufd_unmap_kwrite	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnbufd_unmap_uwrite	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_restart_call_prepare	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	build_skb_around	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_ctl_find_id	vmlinux	EXPORT_SYMBOL	
@@ -12377,6 +12435,7 @@
 0x00000000	nfs_refresh_inode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	generic_remap_file_range_prep	vmlinux	EXPORT_SYMBOL	
 0x00000000	wbc_attach_and_unlock_inode	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_timer_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dynevent_create	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ndo_dflt_fdb_del	vmlinux	EXPORT_SYMBOL	
 0x00000000	sdio_release_host	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12402,6 +12461,7 @@
 0x00000000	__tracepoint_block_split	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	jbd2_journal_release_jbd_inode	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	wbc_account_cgroup_owner	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_fd_close	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	stack_trace_save	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inet_csk_reset_keepalive_timer	vmlinux	EXPORT_SYMBOL	
 0x00000000	__traceiter_tcp_send_reset	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12429,9 +12489,9 @@
 0x00000000	of_get_cpu_state_node	vmlinux	EXPORT_SYMBOL	
 0x00000000	v4l2_m2m_dqbuf	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	i2c_mux_alloc	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ar9003_paprd_setup_gain_table	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_gem_dumb_destroy	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_atomic_helper_check_plane_damage	vmlinux	EXPORT_SYMBOL	
+0x00000000	soft_cursor	vmlinux	EXPORT_SYMBOL	
 0x00000000	__rht_bucket_nested	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fsnotify_init_mark	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ping_seq_stop	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12550,6 +12610,7 @@
 0x00000000	key_put	vmlinux	EXPORT_SYMBOL	
 0x00000000	bmap	vmlinux	EXPORT_SYMBOL	
 0x00000000	kmemdup	vmlinux	EXPORT_SYMBOL	
+0x00000000	nklock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_free_pages	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	param_set_hexint	vmlinux	EXPORT_SYMBOL	
 0x00000000	show_regs	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12562,6 +12623,7 @@
 0x00000000	__drm_atomic_helper_plane_reset	vmlinux	EXPORT_SYMBOL	
 0x00000000	prandom_bytes	vmlinux	EXPORT_SYMBOL	
 0x00000000	generic_parse_monolithic	vmlinux	EXPORT_SYMBOL	
+0x00000000	inband_stage	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_stop_iface	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_gro_complete	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_register_congestion_control	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12619,7 +12681,6 @@
 0x00000000	snd_soc_close_delayed_work	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tee_device_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	iio_trigger_free	vmlinux	EXPORT_SYMBOL	
-0x00000000	governor_sysfs_ops	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kbase_ipa_term_model	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_atomic_helper_commit_modeset_enables	vmlinux	EXPORT_SYMBOL	
 0x00000000	__tracepoint_block_rq_issue	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12640,6 +12701,7 @@
 0x00000000	memory_read_from_buffer	vmlinux	EXPORT_SYMBOL	
 0x00000000	seq_read_iter	vmlinux	EXPORT_SYMBOL	
 0x00000000	alarmtimer_get_rtcdev	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	irq_pipeline_active	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_probereq_get	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_send_bar	vmlinux	EXPORT_SYMBOL	
 0x00000000	xprt_force_disconnect	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12695,10 +12757,10 @@
 0x00000000	refresh_frequency_limits	vmlinux	EXPORT_SYMBOL	
 0x00000000	typec_altmode_exit	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_property_create_range	vmlinux	EXPORT_SYMBOL	
+0x00000000	tick_install_proxy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	getboottime64	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dequeue_signal	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_role_switch_register	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_beacon_config_sta	vmlinux	EXPORT_SYMBOL	
 0x00000000	clocks_calc_mult_shift	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_iter_keys	vmlinux	EXPORT_SYMBOL	
 0x00000000	flow_rule_match_mpls	vmlinux	EXPORT_SYMBOL	
@@ -12722,6 +12784,7 @@
 0x00000000	pci_reset_function_locked	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pinctrl_add_gpio_ranges	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	phy_pm_runtime_forbid	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	synthetic_irq_domain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rfkill_blocked	vmlinux	EXPORT_SYMBOL	
 0x00000000	ip6_push_pending_frames	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dev_change_net_namespace	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12733,6 +12796,7 @@
 0x00000000	blk_queue_rq_timeout	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blk_queue_flag_clear	vmlinux	EXPORT_SYMBOL	
 0x00000000	nfs_path	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rcu_oob_prepare_lock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_sched_stat_runtime	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cache_seq_stop_rcu	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xdr_init_decode_pages	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12767,6 +12831,7 @@
 0x00000000	__SCK__tp_func_kmem_cache_free	vmlinux	EXPORT_SYMBOL	
 0x00000000	ring_buffer_dropped_events_cpu	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	posix_clock_register	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	inband_irq_save	vmlinux	EXPORT_SYMBOL	
 0x00000000	async_schedule_node_domain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_calc_tx_airtime	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usbnet_read_cmd_nopm	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12782,7 +12847,6 @@
 0x00000000	unix_tot_inflight	vmlinux	EXPORT_SYMBOL	
 0x00000000	xfrm_ealg_get_byid	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xt_copy_counters	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_set_rx_bufsize	vmlinux	EXPORT_SYMBOL	
 0x00000000	syscon_regmap_lookup_by_compatible	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	request_partial_firmware_into_buf	vmlinux	EXPORT_SYMBOL	
 0x00000000	device_pm_wait_for_dev	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12809,6 +12873,7 @@
 0x00000000	mipi_dsi_dcs_write	vmlinux	EXPORT_SYMBOL	
 0x00000000	iommu_dma_enable_best_fit_algo	vmlinux	EXPORT_SYMBOL	
 0x00000000	linear_range_get_max_value	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	rtdm_fd_sendmsg	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	housekeeping_overridden	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ip_tunnel_encap_del_ops	vmlinux	EXPORT_SYMBOL	
 0x00000000	dst_blackhole_redirect	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12835,6 +12900,7 @@
 0x00000000	xa_find	vmlinux	EXPORT_SYMBOL	
 0x00000000	swap_type_to_swap_info	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	atomic_dec_and_mutex_lock	vmlinux	EXPORT_SYMBOL	
+0x00000000	dovetail_leave_inband	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kthread_delayed_work_timer_fn	vmlinux	EXPORT_SYMBOL	
 0x00000000	__traceiter_irq_handler_entry	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_ap_probereq_get	vmlinux	EXPORT_SYMBOL	
@@ -12855,6 +12921,7 @@
 0x00000000	crypto_grab_ahash	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kernel_read_file_from_path_initns	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	inode_set_flags	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
+0x00000000	cobalt_signal_alloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	dma_mmap_attrs	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_resume_disconnect	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bt_accept_enqueue	vmlinux	EXPORT_SYMBOL	
@@ -12934,7 +13001,6 @@
 0x00000000	flush_rcu_work	vmlinux	EXPORT_SYMBOL	
 0x00000000	__cpuhp_state_remove_instance	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	unregister_inetaddr_notifier	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_cmn_update_txpow	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_csf_firmware_global_output	vmlinux	EXPORT_SYMBOL	
 0x00000000	mipi_dsi_compression_mode	vmlinux	EXPORT_SYMBOL	
 0x00000000	amba_release_regions	vmlinux	EXPORT_SYMBOL	
@@ -12944,9 +13010,9 @@
 0x00000000	af_alg_free_sg	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	af_alg_make_sg	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_kernel_pages	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_register_personality	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	perf_event_enable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nexthop_free_rcu	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	od_register_powersave_bias_handler	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_dwc3_readl	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	phy_reset_after_clk_enable	vmlinux	EXPORT_SYMBOL	
 0x00000000	errno_to_blk_status	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12958,8 +13024,6 @@
 0x00000000	nf_hook_entries_delete_raw	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netlink_ack	vmlinux	EXPORT_SYMBOL	
 0x00000000	sock_no_bind	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath_printk	vmlinux	EXPORT_SYMBOL	
-0x00000000	ath9k_hw_disable	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_unregister_fixup_for_uid	vmlinux	EXPORT_SYMBOL	
 0x00000000	dmaengine_unmap_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	clk_bulk_unprepare	vmlinux	EXPORT_SYMBOL_GPL	
@@ -12972,7 +13036,6 @@
 0x00000000	dm_tm_create_with_sm	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	v4l2_event_unsubscribe_all	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	xhci_check_bandwidth	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_resettxqueue	vmlinux	EXPORT_SYMBOL	
 0x00000000	kbase_mem_pool_grow	vmlinux	EXPORT_SYMBOL	
 0x00000000	dw_hdmi_cec_wake_ops_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	analogix_dp_probe	vmlinux	EXPORT_SYMBOL_GPL	
@@ -13011,7 +13074,9 @@
 0x00000000	__SCK__tp_func_device_pm_callback_end	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	is_module_sig_enforced	vmlinux	EXPORT_SYMBOL	
 0x00000000	get_state_synchronize_rcu	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	irq_pipeline	vmlinux	EXPORT_SYMBOL	
 0x00000000	vprintk	vmlinux	EXPORT_SYMBOL	
+0x00000000	irq_send_oob_ipi	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ipv6_sock_mc_join	vmlinux	EXPORT_SYMBOL	
 0x00000000	__udp4_lib_lookup	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tcp_poll	vmlinux	EXPORT_SYMBOL	
@@ -13036,9 +13101,9 @@
 0x00000000	pwm_request_from_chip	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	register_key_type	vmlinux	EXPORT_SYMBOL	
 0x00000000	grab_cache_page_write_begin	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_fd_get_setsockopt_args	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	lock_system_sleep	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rt_mutex_lock_interruptible	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	clocksource_mmio_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rk_get_temperature	vmlinux	EXPORT_SYMBOL	
 0x00000000	vb2_core_reqbufs	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nanddev_init	vmlinux	EXPORT_SYMBOL_GPL	
@@ -13062,6 +13127,8 @@
 0x00000000	simple_write_begin	vmlinux	EXPORT_SYMBOL	
 0x00000000	redirty_page_for_writepage	vmlinux	EXPORT_SYMBOL	
 0x00000000	bdi_set_max_ratio	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnsynch_requeue_sleeper	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	inband_irq_restore	vmlinux	EXPORT_SYMBOL	
 0x00000000	ieee80211_ctstoself_get	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_conntrack_htable_size	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	netdev_emerg	vmlinux	EXPORT_SYMBOL	
@@ -13090,6 +13157,8 @@
 0x00000000	blk_check_plugged	vmlinux	EXPORT_SYMBOL	
 0x00000000	debugfs_initialized	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	lookup_one_len_unlocked	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_event_wait	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnclock_set_wallclock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bpf_map_put	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	tracing_snapshot_cond_enable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	task_cputime_adjusted	vmlinux	EXPORT_SYMBOL_GPL	
@@ -13126,6 +13195,7 @@
 0x00000000	fb_get_buffer_offset	vmlinux	EXPORT_SYMBOL	
 0x00000000	key_payload_reserve	vmlinux	EXPORT_SYMBOL	
 0x00000000	pin_user_pages_fast_only	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnthread_unblock	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ftrace_set_notrace	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	blocking_notifier_chain_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_get_key_rx_seq	vmlinux	EXPORT_SYMBOL	
@@ -13187,6 +13257,7 @@
 0x00000000	drm_atomic_helper_commit_planes_on_crtc	vmlinux	EXPORT_SYMBOL	
 0x00000000	regulator_list_hardware_vsel	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kmem_cache_size	vmlinux	EXPORT_SYMBOL	
+0x00000000	rtdm_get_iovec	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	regulatory_hint	vmlinux	EXPORT_SYMBOL	
 0x00000000	wiphy_rfkill_start_polling	vmlinux	EXPORT_SYMBOL	
 0x00000000	inet_ioctl	vmlinux	EXPORT_SYMBOL	
@@ -13235,6 +13306,7 @@
 0x00000000	iomap_file_buffered_write	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	get_cached_acl	vmlinux	EXPORT_SYMBOL	
 0x00000000	wait_for_stable_page	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	__xntimer_set_affinity	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	event_triggers_post_call	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cpu_bit_bitmap	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rpc_free_iostats	vmlinux	EXPORT_SYMBOL_GPL	
@@ -13250,6 +13322,7 @@
 0x00000000	crypto_register_kpp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	touch_buffer	vmlinux	EXPORT_SYMBOL	
 0x00000000	alloc_file_pseudo	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnclock_register	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	prepare_to_swait_event	vmlinux	EXPORT_SYMBOL	
 0x00000000	__kthread_should_park	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	wiphy_apply_custom_regulatory	vmlinux	EXPORT_SYMBOL	
@@ -13292,6 +13365,7 @@
 0x00000000	phy_pm_runtime_put_sync	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fat_update_time	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	reclaim_pages	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_thread_lookup	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	rcu_inkernel_boot_has_ended	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	try_wait_for_completion	vmlinux	EXPORT_SYMBOL	
 0x00000000	param_set_ulong	vmlinux	EXPORT_SYMBOL	
@@ -13299,7 +13373,6 @@
 0x00000000	tcp_mmap	vmlinux	EXPORT_SYMBOL	
 0x00000000	nf_ct_expect_alloc	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	of_alias_get_alias_list	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_hw_intrpend	vmlinux	EXPORT_SYMBOL	
 0x00000000	spi_unregister_device	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_fb_helper_deferred_io	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_kms_helper_poll_init	vmlinux	EXPORT_SYMBOL	
@@ -13335,9 +13408,9 @@
 0x00000000	__SCK__tp_func_nfs_xdr_status	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_kmem_cache_alloc_node	vmlinux	EXPORT_SYMBOL	
 0x00000000	register_user_hw_breakpoint	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	hard_preempt_disable	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	cfg80211_rx_control_port	vmlinux	EXPORT_SYMBOL	
 0x00000000	xfrm_policy_walk_init	vmlinux	EXPORT_SYMBOL	
-0x00000000	cpufreq_dbs_governor_limits	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	driver_attach	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	gpiochip_irq_map	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ZSTD_copyDCtx	vmlinux	EXPORT_SYMBOL	
@@ -13393,6 +13466,7 @@
 0x00000000	nfs_may_open	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	jbd2_journal_dirty_metadata	vmlinux	EXPORT_SYMBOL	VFS_internal_I_am_really_a_filesystem_and_am_NOT_a_driver
 0x00000000	account_locked_vm	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	cobalt_add_config_chain	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	io_cgrp_subsys_enabled_key	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	handle_fasteoi_irq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	task_rq_lock	vmlinux	EXPORT_SYMBOL_GPL	
@@ -13442,6 +13516,7 @@
 0x00000000	drm_vblank_work_schedule	vmlinux	EXPORT_SYMBOL	
 0x00000000	drm_display_mode_to_videomode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_dp_downstream_420_passthrough	vmlinux	EXPORT_SYMBOL	
+0x00000000	fbcon_modechange_possible	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fb_get_options	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kasprintf	vmlinux	EXPORT_SYMBOL	
@@ -13532,7 +13607,6 @@
 0x00000000	dev_pm_opp_of_register_em	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	gserial_suspend	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	usb_intf_get_dma_device	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath_gen_timer_isr	vmlinux	EXPORT_SYMBOL	
 0x00000000	dev_pm_qos_expose_latency_tolerance	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kbase_gator_hwcnt_init_names	vmlinux	EXPORT_SYMBOL	
 0x00000000	tty_put_char	vmlinux	EXPORT_SYMBOL_GPL	
@@ -13569,6 +13643,7 @@
 0x00000000	dw_pcie_find_ext_capability	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	nfs_free_inode	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	seq_list_next	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnselector_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__irq_alloc_domain_generic_chips	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__SCK__tp_func_pelt_dl_tp	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	unregister_inet6addr_notifier	vmlinux	EXPORT_SYMBOL	
@@ -13658,7 +13733,6 @@
 0x00000000	nvmem_register_notifier	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	mmc_of_parse_voltage	vmlinux	EXPORT_SYMBOL	
 0x00000000	dm_array_cursor_end	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	ath9k_cmn_process_rssi	vmlinux	EXPORT_SYMBOL	
 0x00000000	phy_mii_ioctl	vmlinux	EXPORT_SYMBOL	
 0x00000000	spi_bitbang_init	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	drm_atomic_helper_plane_destroy_state	vmlinux	EXPORT_SYMBOL	
@@ -13713,6 +13787,8 @@
 0x00000000	unregister_asymmetric_key_parser	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	fuse_dev_release	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	default_llseek	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnbufd_unmap_kread	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xnbufd_unmap_uread	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ring_buffer_oldest_event_ts	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ipi_get_hwirq	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	svc_bind	vmlinux	EXPORT_SYMBOL_GPL	
@@ -13727,12 +13803,12 @@
 0x00000000	dw_hdmi_probe	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	pinconf_generic_dt_subnode_to_map	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	guid_parse	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnbufd_map_kmem	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	io_cgrp_subsys_on_dfl_key	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ieee80211_get_unsol_bcast_probe_resp_tmpl	vmlinux	EXPORT_SYMBOL	
 0x00000000	rpc_peeraddr	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_dmaengine_pcm_unregister	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	snd_soc_dapm_nc_pin_unlocked	vmlinux	EXPORT_SYMBOL_GPL	
-0x00000000	cpuidle_register_driver	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	ptp_cancel_worker_sync	vmlinux	EXPORT_SYMBOL	
 0x00000000	phylink_mii_c22_pcs_set_advertisement	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	scsi_eh_prep_cmnd	vmlinux	EXPORT_SYMBOL	
@@ -13759,6 +13835,7 @@
 0x00000000	vfs_fsync	vmlinux	EXPORT_SYMBOL	
 0x00000000	poll_freewait	vmlinux	EXPORT_SYMBOL	
 0x00000000	si_meminfo	vmlinux	EXPORT_SYMBOL	
+0x00000000	xnbufd_invalidate	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	kthread_create_on_node	vmlinux	EXPORT_SYMBOL	
 0x00000000	snd_timer_pause	vmlinux	EXPORT_SYMBOL	
 0x00000000	typec_cable_set_identity	vmlinux	EXPORT_SYMBOL_GPL	
@@ -13804,6 +13881,7 @@
 0x00000000	__SCK__tp_func_usb_gadget_disconnect	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	property_entries_free	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	gpio_to_desc	vmlinux	EXPORT_SYMBOL_GPL	
+0x00000000	xntimer_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	bpf_offload_dev_destroy	vmlinux	EXPORT_SYMBOL_GPL	
 0x00000000	__traceiter_module_get	vmlinux	EXPORT_SYMBOL	
 0x00000000	tcp_ca_openreq_child	vmlinux	EXPORT_SYMBOL_GPL	
diff --git a/kernel/xenomai-v3.2.4/.clang-format b/kernel/xenomai-v3.2.4/.clang-format
new file mode 100755
index 0000000..2ffd69a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/.clang-format
@@ -0,0 +1,493 @@
+# SPDX-License-Identifier: GPL-2.0
+#
+# clang-format configuration file. Intended for clang-format >= 4.
+#
+# For more information, see:
+#
+#   Documentation/process/clang-format.rst
+#   https://clang.llvm.org/docs/ClangFormat.html
+#   https://clang.llvm.org/docs/ClangFormatStyleOptions.html
+#
+---
+AccessModifierOffset: -4
+AlignAfterOpenBracket: Align
+AlignConsecutiveAssignments: false
+AlignConsecutiveDeclarations: false
+#AlignEscapedNewlines: Left # Unknown to clang-format-4.0
+AlignOperands: true
+AlignTrailingComments: false
+AllowAllParametersOfDeclarationOnNextLine: false
+AllowShortBlocksOnASingleLine: false
+AllowShortCaseLabelsOnASingleLine: false
+AllowShortFunctionsOnASingleLine: None
+AllowShortIfStatementsOnASingleLine: false
+AllowShortLoopsOnASingleLine: false
+AlwaysBreakAfterDefinitionReturnType: None
+AlwaysBreakAfterReturnType: None
+AlwaysBreakBeforeMultilineStrings: false
+AlwaysBreakTemplateDeclarations: false
+BinPackArguments: true
+BinPackParameters: true
+BraceWrapping:
+  AfterClass: false
+  AfterControlStatement: false
+  AfterEnum: false
+  AfterFunction: true
+  AfterNamespace: true
+  AfterObjCDeclaration: false
+  AfterStruct: false
+  AfterUnion: false
+  #AfterExternBlock: false # Unknown to clang-format-5.0
+  BeforeCatch: false
+  BeforeElse: false
+  IndentBraces: false
+  #SplitEmptyFunction: true # Unknown to clang-format-4.0
+  #SplitEmptyRecord: true # Unknown to clang-format-4.0
+  #SplitEmptyNamespace: true # Unknown to clang-format-4.0
+BreakBeforeBinaryOperators: None
+BreakBeforeBraces: Custom
+#BreakBeforeInheritanceComma: false # Unknown to clang-format-4.0
+BreakBeforeTernaryOperators: false
+BreakConstructorInitializersBeforeComma: false
+#BreakConstructorInitializers: BeforeComma # Unknown to clang-format-4.0
+BreakAfterJavaFieldAnnotations: false
+BreakStringLiterals: false
+ColumnLimit: 80
+CommentPragmas: '^ IWYU pragma:'
+#CompactNamespaces: false # Unknown to clang-format-4.0
+ConstructorInitializerAllOnOneLineOrOnePerLine: false
+ConstructorInitializerIndentWidth: 8
+ContinuationIndentWidth: 8
+Cpp11BracedListStyle: false
+DerivePointerAlignment: false
+DisableFormat: false
+ExperimentalAutoDetectBinPacking: false
+#FixNamespaceComments: false # Unknown to clang-format-4.0
+
+# Taken from:
+#   git grep -h '^#define [^[:space:]]*for_each[^[:space:]]*(' include/ \
+#   | sed "s,^#define \([^[:space:]]*for_each[^[:space:]]*\)(.*$,  - '\1'," \
+#   | sort | uniq
+ForEachMacros:
+  - 'apei_estatus_for_each_section'
+  - 'ata_for_each_dev'
+  - 'ata_for_each_link'
+  - '__ata_qc_for_each'
+  - 'ata_qc_for_each'
+  - 'ata_qc_for_each_raw'
+  - 'ata_qc_for_each_with_internal'
+  - 'ax25_for_each'
+  - 'ax25_uid_for_each'
+  - '__bio_for_each_bvec'
+  - 'bio_for_each_bvec'
+  - 'bio_for_each_integrity_vec'
+  - '__bio_for_each_segment'
+  - 'bio_for_each_segment'
+  - 'bio_for_each_segment_all'
+  - 'bio_list_for_each'
+  - 'bip_for_each_vec'
+  - 'blkg_for_each_descendant_post'
+  - 'blkg_for_each_descendant_pre'
+  - 'blk_queue_for_each_rl'
+  - 'bond_for_each_slave'
+  - 'bond_for_each_slave_rcu'
+  - 'bpf_for_each_spilled_reg'
+  - 'btree_for_each_safe128'
+  - 'btree_for_each_safe32'
+  - 'btree_for_each_safe64'
+  - 'btree_for_each_safel'
+  - 'card_for_each_dev'
+  - 'cgroup_taskset_for_each'
+  - 'cgroup_taskset_for_each_leader'
+  - 'cpufreq_for_each_entry'
+  - 'cpufreq_for_each_entry_idx'
+  - 'cpufreq_for_each_valid_entry'
+  - 'cpufreq_for_each_valid_entry_idx'
+  - 'css_for_each_child'
+  - 'css_for_each_descendant_post'
+  - 'css_for_each_descendant_pre'
+  - 'device_for_each_child_node'
+  - 'drm_atomic_crtc_for_each_plane'
+  - 'drm_atomic_crtc_state_for_each_plane'
+  - 'drm_atomic_crtc_state_for_each_plane_state'
+  - 'drm_atomic_for_each_plane_damage'
+  - 'drm_connector_for_each_possible_encoder'
+  - 'drm_for_each_connector_iter'
+  - 'drm_for_each_crtc'
+  - 'drm_for_each_encoder'
+  - 'drm_for_each_encoder_mask'
+  - 'drm_for_each_fb'
+  - 'drm_for_each_legacy_plane'
+  - 'drm_for_each_plane'
+  - 'drm_for_each_plane_mask'
+  - 'drm_for_each_privobj'
+  - 'drm_mm_for_each_hole'
+  - 'drm_mm_for_each_node'
+  - 'drm_mm_for_each_node_in_range'
+  - 'drm_mm_for_each_node_safe'
+  - 'flow_action_for_each'
+  - 'for_each_active_drhd_unit'
+  - 'for_each_active_iommu'
+  - 'for_each_available_child_of_node'
+  - 'for_each_bio'
+  - 'for_each_board_func_rsrc'
+  - 'for_each_bvec'
+  - 'for_each_card_components'
+  - 'for_each_card_links'
+  - 'for_each_card_links_safe'
+  - 'for_each_card_prelinks'
+  - 'for_each_card_rtds'
+  - 'for_each_card_rtds_safe'
+  - 'for_each_cgroup_storage_type'
+  - 'for_each_child_of_node'
+  - 'for_each_clear_bit'
+  - 'for_each_clear_bit_from'
+  - 'for_each_cmsghdr'
+  - 'for_each_compatible_node'
+  - 'for_each_component_dais'
+  - 'for_each_component_dais_safe'
+  - 'for_each_comp_order'
+  - 'for_each_console'
+  - 'for_each_cpu'
+  - 'for_each_cpu_and'
+  - 'for_each_cpu_not'
+  - 'for_each_cpu_wrap'
+  - 'for_each_dev_addr'
+  - 'for_each_dma_cap_mask'
+  - 'for_each_dpcm_be'
+  - 'for_each_dpcm_be_rollback'
+  - 'for_each_dpcm_be_safe'
+  - 'for_each_dpcm_fe'
+  - 'for_each_drhd_unit'
+  - 'for_each_dss_dev'
+  - 'for_each_efi_memory_desc'
+  - 'for_each_efi_memory_desc_in_map'
+  - 'for_each_element'
+  - 'for_each_element_extid'
+  - 'for_each_element_id'
+  - 'for_each_endpoint_of_node'
+  - 'for_each_evictable_lru'
+  - 'for_each_fib6_node_rt_rcu'
+  - 'for_each_fib6_walker_rt'
+  - 'for_each_free_mem_range'
+  - 'for_each_free_mem_range_reverse'
+  - 'for_each_func_rsrc'
+  - 'for_each_hstate'
+  - 'for_each_if'
+  - 'for_each_iommu'
+  - 'for_each_ip_tunnel_rcu'
+  - 'for_each_irq_nr'
+  - 'for_each_link_codecs'
+  - 'for_each_lru'
+  - 'for_each_matching_node'
+  - 'for_each_matching_node_and_match'
+  - 'for_each_memblock'
+  - 'for_each_memblock_type'
+  - 'for_each_memcg_cache_index'
+  - 'for_each_mem_pfn_range'
+  - 'for_each_mem_range'
+  - 'for_each_mem_range_rev'
+  - 'for_each_migratetype_order'
+  - 'for_each_msi_entry'
+  - 'for_each_msi_entry_safe'
+  - 'for_each_net'
+  - 'for_each_netdev'
+  - 'for_each_netdev_continue'
+  - 'for_each_netdev_continue_rcu'
+  - 'for_each_netdev_feature'
+  - 'for_each_netdev_in_bond_rcu'
+  - 'for_each_netdev_rcu'
+  - 'for_each_netdev_reverse'
+  - 'for_each_netdev_safe'
+  - 'for_each_net_rcu'
+  - 'for_each_new_connector_in_state'
+  - 'for_each_new_crtc_in_state'
+  - 'for_each_new_mst_mgr_in_state'
+  - 'for_each_new_plane_in_state'
+  - 'for_each_new_private_obj_in_state'
+  - 'for_each_node'
+  - 'for_each_node_by_name'
+  - 'for_each_node_by_type'
+  - 'for_each_node_mask'
+  - 'for_each_node_state'
+  - 'for_each_node_with_cpus'
+  - 'for_each_node_with_property'
+  - 'for_each_of_allnodes'
+  - 'for_each_of_allnodes_from'
+  - 'for_each_of_cpu_node'
+  - 'for_each_of_pci_range'
+  - 'for_each_old_connector_in_state'
+  - 'for_each_old_crtc_in_state'
+  - 'for_each_old_mst_mgr_in_state'
+  - 'for_each_oldnew_connector_in_state'
+  - 'for_each_oldnew_crtc_in_state'
+  - 'for_each_oldnew_mst_mgr_in_state'
+  - 'for_each_oldnew_plane_in_state'
+  - 'for_each_oldnew_plane_in_state_reverse'
+  - 'for_each_oldnew_private_obj_in_state'
+  - 'for_each_old_plane_in_state'
+  - 'for_each_old_private_obj_in_state'
+  - 'for_each_online_cpu'
+  - 'for_each_online_node'
+  - 'for_each_online_pgdat'
+  - 'for_each_pci_bridge'
+  - 'for_each_pci_dev'
+  - 'for_each_pci_msi_entry'
+  - 'for_each_populated_zone'
+  - 'for_each_possible_cpu'
+  - 'for_each_present_cpu'
+  - 'for_each_prime_number'
+  - 'for_each_prime_number_from'
+  - 'for_each_process'
+  - 'for_each_process_thread'
+  - 'for_each_property_of_node'
+  - 'for_each_registered_fb'
+  - 'for_each_reserved_mem_region'
+  - 'for_each_rtd_codec_dai'
+  - 'for_each_rtd_codec_dai_rollback'
+  - 'for_each_rtdcom'
+  - 'for_each_rtdcom_safe'
+  - 'for_each_set_bit'
+  - 'for_each_set_bit_from'
+  - 'for_each_sg'
+  - 'for_each_sg_dma_page'
+  - 'for_each_sg_page'
+  - 'for_each_sibling_event'
+  - 'for_each_subelement'
+  - 'for_each_subelement_extid'
+  - 'for_each_subelement_id'
+  - '__for_each_thread'
+  - 'for_each_thread'
+  - 'for_each_zone'
+  - 'for_each_zone_zonelist'
+  - 'for_each_zone_zonelist_nodemask'
+  - 'fwnode_for_each_available_child_node'
+  - 'fwnode_for_each_child_node'
+  - 'fwnode_graph_for_each_endpoint'
+  - 'gadget_for_each_ep'
+  - 'genradix_for_each'
+  - 'genradix_for_each_from'
+  - 'hash_for_each'
+  - 'hash_for_each_possible'
+  - 'hash_for_each_possible_rcu'
+  - 'hash_for_each_possible_rcu_notrace'
+  - 'hash_for_each_possible_safe'
+  - 'hash_for_each_rcu'
+  - 'hash_for_each_safe'
+  - 'hctx_for_each_ctx'
+  - 'hlist_bl_for_each_entry'
+  - 'hlist_bl_for_each_entry_rcu'
+  - 'hlist_bl_for_each_entry_safe'
+  - 'hlist_for_each'
+  - 'hlist_for_each_entry'
+  - 'hlist_for_each_entry_continue'
+  - 'hlist_for_each_entry_continue_rcu'
+  - 'hlist_for_each_entry_continue_rcu_bh'
+  - 'hlist_for_each_entry_from'
+  - 'hlist_for_each_entry_from_rcu'
+  - 'hlist_for_each_entry_rcu'
+  - 'hlist_for_each_entry_rcu_bh'
+  - 'hlist_for_each_entry_rcu_notrace'
+  - 'hlist_for_each_entry_safe'
+  - '__hlist_for_each_rcu'
+  - 'hlist_for_each_safe'
+  - 'hlist_nulls_for_each_entry'
+  - 'hlist_nulls_for_each_entry_from'
+  - 'hlist_nulls_for_each_entry_rcu'
+  - 'hlist_nulls_for_each_entry_safe'
+  - 'i3c_bus_for_each_i2cdev'
+  - 'i3c_bus_for_each_i3cdev'
+  - 'ide_host_for_each_port'
+  - 'ide_port_for_each_dev'
+  - 'ide_port_for_each_present_dev'
+  - 'idr_for_each_entry'
+  - 'idr_for_each_entry_continue'
+  - 'idr_for_each_entry_ul'
+  - 'inet_bind_bucket_for_each'
+  - 'inet_lhash2_for_each_icsk_rcu'
+  - 'key_for_each'
+  - 'key_for_each_safe'
+  - 'klp_for_each_func'
+  - 'klp_for_each_func_safe'
+  - 'klp_for_each_func_static'
+  - 'klp_for_each_object'
+  - 'klp_for_each_object_safe'
+  - 'klp_for_each_object_static'
+  - 'kvm_for_each_memslot'
+  - 'kvm_for_each_vcpu'
+  - 'list_for_each'
+  - 'list_for_each_codec'
+  - 'list_for_each_codec_safe'
+  - 'list_for_each_entry'
+  - 'list_for_each_entry_continue'
+  - 'list_for_each_entry_continue_rcu'
+  - 'list_for_each_entry_continue_reverse'
+  - 'list_for_each_entry_from'
+  - 'list_for_each_entry_from_rcu'
+  - 'list_for_each_entry_from_reverse'
+  - 'list_for_each_entry_lockless'
+  - 'list_for_each_entry_rcu'
+  - 'list_for_each_entry_reverse'
+  - 'list_for_each_entry_safe'
+  - 'list_for_each_entry_safe_continue'
+  - 'list_for_each_entry_safe_from'
+  - 'list_for_each_entry_safe_reverse'
+  - 'list_for_each_prev'
+  - 'list_for_each_prev_safe'
+  - 'list_for_each_safe'
+  - 'llist_for_each'
+  - 'llist_for_each_entry'
+  - 'llist_for_each_entry_safe'
+  - 'llist_for_each_safe'
+  - 'media_device_for_each_entity'
+  - 'media_device_for_each_intf'
+  - 'media_device_for_each_link'
+  - 'media_device_for_each_pad'
+  - 'mp_bvec_for_each_page'
+  - 'mp_bvec_for_each_segment'
+  - 'nanddev_io_for_each_page'
+  - 'netdev_for_each_lower_dev'
+  - 'netdev_for_each_lower_private'
+  - 'netdev_for_each_lower_private_rcu'
+  - 'netdev_for_each_mc_addr'
+  - 'netdev_for_each_uc_addr'
+  - 'netdev_for_each_upper_dev_rcu'
+  - 'netdev_hw_addr_list_for_each'
+  - 'nft_rule_for_each_expr'
+  - 'nla_for_each_attr'
+  - 'nla_for_each_nested'
+  - 'nlmsg_for_each_attr'
+  - 'nlmsg_for_each_msg'
+  - 'nr_neigh_for_each'
+  - 'nr_neigh_for_each_safe'
+  - 'nr_node_for_each'
+  - 'nr_node_for_each_safe'
+  - 'of_for_each_phandle'
+  - 'of_property_for_each_string'
+  - 'of_property_for_each_u32'
+  - 'pci_bus_for_each_resource'
+  - 'ping_portaddr_for_each_entry'
+  - 'plist_for_each'
+  - 'plist_for_each_continue'
+  - 'plist_for_each_entry'
+  - 'plist_for_each_entry_continue'
+  - 'plist_for_each_entry_safe'
+  - 'plist_for_each_safe'
+  - 'pnp_for_each_card'
+  - 'pnp_for_each_dev'
+  - 'protocol_for_each_card'
+  - 'protocol_for_each_dev'
+  - 'queue_for_each_hw_ctx'
+  - 'radix_tree_for_each_slot'
+  - 'radix_tree_for_each_tagged'
+  - 'rbtree_postorder_for_each_entry_safe'
+  - 'rdma_for_each_port'
+  - 'resource_list_for_each_entry'
+  - 'resource_list_for_each_entry_safe'
+  - 'rhl_for_each_entry_rcu'
+  - 'rhl_for_each_rcu'
+  - 'rht_for_each'
+  - 'rht_for_each_from'
+  - 'rht_for_each_entry'
+  - 'rht_for_each_entry_from'
+  - 'rht_for_each_entry_rcu'
+  - 'rht_for_each_entry_rcu_from'
+  - 'rht_for_each_entry_safe'
+  - 'rht_for_each_rcu'
+  - 'rht_for_each_rcu_from'
+  - '__rq_for_each_bio'
+  - 'rq_for_each_bvec'
+  - 'rq_for_each_segment'
+  - 'scsi_for_each_prot_sg'
+  - 'scsi_for_each_sg'
+  - 'sctp_for_each_hentry'
+  - 'sctp_skb_for_each'
+  - 'shdma_for_each_chan'
+  - '__shost_for_each_device'
+  - 'shost_for_each_device'
+  - 'sk_for_each'
+  - 'sk_for_each_bound'
+  - 'sk_for_each_entry_offset_rcu'
+  - 'sk_for_each_from'
+  - 'sk_for_each_rcu'
+  - 'sk_for_each_safe'
+  - 'sk_nulls_for_each'
+  - 'sk_nulls_for_each_from'
+  - 'sk_nulls_for_each_rcu'
+  - 'snd_array_for_each'
+  - 'snd_pcm_group_for_each_entry'
+  - 'snd_soc_dapm_widget_for_each_path'
+  - 'snd_soc_dapm_widget_for_each_path_safe'
+  - 'snd_soc_dapm_widget_for_each_sink_path'
+  - 'snd_soc_dapm_widget_for_each_source_path'
+  - 'tb_property_for_each'
+  - 'tcf_exts_for_each_action'
+  - 'udp_portaddr_for_each_entry'
+  - 'udp_portaddr_for_each_entry_rcu'
+  - 'usb_hub_for_each_child'
+  - 'v4l2_device_for_each_subdev'
+  - 'v4l2_m2m_for_each_dst_buf'
+  - 'v4l2_m2m_for_each_dst_buf_safe'
+  - 'v4l2_m2m_for_each_src_buf'
+  - 'v4l2_m2m_for_each_src_buf_safe'
+  - 'virtio_device_for_each_vq'
+  - 'xa_for_each'
+  - 'xa_for_each_marked'
+  - 'xa_for_each_start'
+  - 'xas_for_each'
+  - 'xas_for_each_conflict'
+  - 'xas_for_each_marked'
+  - 'zorro_for_each_dev'
+
+#IncludeBlocks: Preserve # Unknown to clang-format-5.0
+IncludeCategories:
+  - Regex: '.*'
+    Priority: 1
+IncludeIsMainRegex: '(Test)?$'
+IndentCaseLabels: false
+#IndentPPDirectives: None # Unknown to clang-format-5.0
+IndentWidth: 8
+IndentWrappedFunctionNames: false
+JavaScriptQuotes: Leave
+JavaScriptWrapImports: true
+KeepEmptyLinesAtTheStartOfBlocks: false
+MacroBlockBegin: ''
+MacroBlockEnd: ''
+MaxEmptyLinesToKeep: 1
+NamespaceIndentation: Inner
+#ObjCBinPackProtocolList: Auto # Unknown to clang-format-5.0
+ObjCBlockIndentWidth: 8
+ObjCSpaceAfterProperty: true
+ObjCSpaceBeforeProtocolList: true
+
+# Taken from git's rules
+#PenaltyBreakAssignment: 10 # Unknown to clang-format-4.0
+PenaltyBreakBeforeFirstCallParameter: 30
+PenaltyBreakComment: 10
+PenaltyBreakFirstLessLess: 0
+PenaltyBreakString: 10
+PenaltyExcessCharacter: 100
+PenaltyReturnTypeOnItsOwnLine: 60
+
+PointerAlignment: Right
+ReflowComments: false
+SortIncludes: false
+#SortUsingDeclarations: false # Unknown to clang-format-4.0
+SpaceAfterCStyleCast: false
+SpaceAfterTemplateKeyword: true
+SpaceBeforeAssignmentOperators: true
+#SpaceBeforeCtorInitializerColon: true # Unknown to clang-format-5.0
+#SpaceBeforeInheritanceColon: true # Unknown to clang-format-5.0
+SpaceBeforeParens: ControlStatements
+#SpaceBeforeRangeBasedForLoopColon: true # Unknown to clang-format-5.0
+SpaceInEmptyParentheses: false
+SpacesBeforeTrailingComments: 1
+SpacesInAngles: false
+SpacesInContainerLiterals: false
+SpacesInCStyleCastParentheses: false
+SpacesInParentheses: false
+SpacesInSquareBrackets: false
+Standard: Cpp03
+TabWidth: 8
+UseTab: Always
+...
diff --git a/kernel/xenomai-v3.2.4/.gitignore b/kernel/xenomai-v3.2.4/.gitignore
new file mode 100644
index 0000000..b1d6820
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/.gitignore
@@ -0,0 +1,17 @@
+Makefile.in
+include/xeno_config.h.in*
+config/compile
+config/config.guess
+config/config.sub
+config/depcomp
+config/install-sh
+config/libtool.m4
+config/ltmain.sh
+config/ltoptions.m4
+config/ltsugar.m4
+config/ltversion.m4
+config/lt~obsolete.m4
+config/missing
+configure
+aclocal.m4
+autom4te.cache
diff --git a/kernel/xenomai-v3.2.4/CONTRIBUTING.md b/kernel/xenomai-v3.2.4/CONTRIBUTING.md
new file mode 100644
index 0000000..b55949e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/CONTRIBUTING.md
@@ -0,0 +1,118 @@
+Contributing to Xenomai
+=======================
+
+Contributions to Xenomai are always welcome. This document explains the general
+requirements on contributions and the recommended preparation steps. It also
+sketches the typical integration process of patches.
+
+
+Contribution Checklist
+----------------------
+
+- use git to manage your changes [*recommended*]
+
+- follow Linux Kernel coding style [**required**]
+    - see also [Linux kernel coding style](https://www.kernel.org/doc/html/latest/process/coding-style.html)
+    - try out the checkpatch.pl script from the Linux kernel
+
+- add the required copyright header to each new file introduced [**required**]
+
+- structure patches logically, in small steps [**required**]
+    - one separable functionality/fix/refactoring = one patch
+    - do not mix those three into a single patch (e.g. first refactor, then
+      add a new functionality that builds onto the refactoring)
+    - after each patch, the tree still has to build and work, i.e. do not add
+      even temporary breakages inside a patch series (helps when tracking down
+      bugs)
+    - use `git rebase -i` to restructure a patch series
+
+- base patches on top of latest master or - if there are dependencies - on next
+  (note: next is an integration branch that may change non-linearly) [**required**]
+
+- test patches sufficiently AFTER the last edit (obvious, but...) [**required**]
+
+- add signed-off to all patches [**required**]
+    - to certify the "Developer's Certificate of Origin", see below
+    - check with your employer when not working on your own!
+
+- indicate if you think a patch fixes a bug present in a stable branch as well [*recommended*]
+    - add a note to the cover letter of the patch series
+    - or add some remark after the "---" separator of the patch itself
+
+- post patches to mailing list [**required**]
+    - use `git format-patch/send-email` if possible
+    - send patches inline, do not append them
+    - no HTML emails!
+    - CC people who you think should look at the patches, e.g.
+      - affected maintainers
+      - someone who wrote a change that is fixed or reverted by you now
+      - who commented on related changes in the recent past
+      - who otherwise has expertise and is interested in the topic
+    - pull requests on gitlab are only optional
+
+- post follow-up version(s) if feedback requires this [**required**]
+
+- send reminder if nothing happened after about two weeks [*recommended*]
+
+
+Developer's Certificate of Origin 1.1
+-------------------------------------
+
+When signing-off a patch for this project like this
+
+    Signed-off-by: Random J Developer <random@developer.example.org>
+
+using your real name (no pseudonyms or anonymous contributions), you declare the
+following:
+
+    By making a contribution to this project, I certify that:
+
+        (a) The contribution was created in whole or in part by me and I
+            have the right to submit it under the open source license
+            indicated in the file; or
+
+        (b) The contribution is based upon previous work that, to the best
+            of my knowledge, is covered under an appropriate open source
+            license and I have the right under that license to submit that
+            work with modifications, whether created in whole or in part
+            by me, under the same open source license (unless I am
+            permitted to submit under a different license), as indicated
+            in the file; or
+
+        (c) The contribution was provided directly to me by some other
+            person who certified (a), (b) or (c) and I have not modified
+            it.
+
+        (d) I understand and agree that this project and the contribution
+            are public and that a record of the contribution (including all
+            personal information I submit with it, including my sign-off) is
+            maintained indefinitely and may be redistributed consistent with
+            this project or the open source license(s) involved.
+
+See also [Sign your work - the Developer’s Certificate of Origin](https://www.kernel.org/doc/html/latest/process/submitting-patches.html#sign-your-work-the-developer-s-certificate-of-origin)
+for further background on this process which was adopted from the Linux kernel.
+
+
+Contribution Integration Process
+--------------------------------
+
+1. patch reviews performed on mailing list
+    * at least by maintainers, but everyone is invited
+    * feedback has to consider design, functionality and style
+    * simpler and clearer code preferred, even if original code works fine
+
+2. accepted patches merged into next branch
+
+3. further testing done by community, including CI build tests, code analyzer
+   runs, on-target tests
+
+4. if no new problems or discussions showed up, acceptance into master
+    * grace period for master: about 3 days
+    * urgent fixes may be applied sooner
+
+5. a stable-relevant patch is applied to the related stable branch after it was
+   merged into master (except for patches that are stable-specific)
+
+gitlab facilities are not used for the review process so that people can follow
+all changes and related discussions at a single stop, the mailing list. This
+may change in the future if gitlab should improve their email integration.
diff --git a/kernel/xenomai-v3.2.4/Makefile.am b/kernel/xenomai-v3.2.4/Makefile.am
new file mode 100644
index 0000000..6046442
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/Makefile.am
@@ -0,0 +1,78 @@
+ACLOCAL_AMFLAGS=-I config
+
+SUBDIRS = 		\
+	doc		\
+	lib 		\
+	config		\
+	include		\
+	scripts		\
+	utils
+
+if XENO_ENABLE_DEMO
+SUBDIRS += 		\
+	demo
+endif
+
+if XENO_ENABLE_TESTSUITE
+SUBDIRS += 		\
+	testsuite
+endif
+
+EXTRA_DIST = kernel debian
+
+DIST_SUBDIRS =		\
+	config		\
+	demo		\
+	doc		\
+	include		\
+	lib 		\
+	scripts		\
+	testsuite	\
+	utils
+
+doc/%: FORCE
+	$(MAKE) -C doc/ $*
+
+dist-hook:
+	rm -fr `find $(distdir) -name '.svn' -o -name CVS  -o -name '.#*' \
+		-o -name '*~' -o -name autom4te.cache`
+
+install-udev-rules:
+if XENO_COBALT
+	if test -r $(DESTDIR)/$(sysconfdir)/udev/udev.rules ; then \
+	    for f in $(srcdir)/kernel/cobalt/udev/*.rules ; do \
+		b=`basename $$f` ; \
+		grep -q Xenomai:`basename $$b .rules` $(DESTDIR)/$(sysconfdir)/udev/udev.rules || \
+		( echo ; cat $$f ) >> $(DESTDIR)/$(sysconfdir)/udev/udev.rules ; \
+	    done ; \
+	else \
+	    $(mkinstalldirs) $(DESTDIR)/$(sysconfdir)/udev/rules.d; \
+	    for f in $(srcdir)/kernel/cobalt/udev/*.rules ; do \
+		$(INSTALL_DATA) $$f $(DESTDIR)/$(sysconfdir)/udev/rules.d/ ; \
+	    done ; \
+	fi
+endif
+
+uninstall-udev-rules:
+if XENO_COBALT
+	cd $(srcdir)/kernel/cobalt/udev && for f in *.rules; do \
+	    rm -f $(DESTDIR)/$(sysconfdir)/udev/rules.d/$$f ; \
+	done
+endif
+
+# legacy alias
+install-user: install
+
+install-exec-local: install-udev-rules
+
+uninstall-local: uninstall-udev-rules
+
+uninstall-local:
+if XENO_COBALT
+	cd $(srcdir)/kernel/cobalt/udev ; \
+	for f in *.rules ; do \
+	    $(RM) $(DESTDIR)$(sysconfdir)/udev/rules.d/$$f ; \
+	done
+endif
+
+.PHONY: FORCE
diff --git a/kernel/xenomai-v3.2.4/README b/kernel/xenomai-v3.2.4/README
new file mode 100644
index 0000000..d7241a1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/README
@@ -0,0 +1,74 @@
+
+Where to start from?
+====================
+
+http://xenomai.org/start-here/ is the best place to start learning
+about Xenomai 3.
+
+Also, make sure to read the per-architecture README files, i.e.:
+kernel/cobalt/arch/*/README
+
+Documentation
+=============
+
+The Xenomai 3.x documentation can be built then installed this way:
+
+xenomai-3.x.y/configure --enable-doc-build --prefix=<install-dir>
+
+Asciidoc, Doxygen, W3M and Dot packages are required for building the
+documentation.
+
+Online documentation
+====================
+
+The online version of the documentation is available from our website
+for the current release:
+
+http://xenomai.org/installing-xenomai-3-x/
+http://xenomai.org/building-applications-with-xenomai-3-x/
+http://xenomai.org/running-applications-with-xenomai-3-x/
+http://xenomai.org/migrating-from-xenomai-2-x-to-3-x/
+http://xenomai.org/documentation/xenomai-3/html/xeno3prm/index.html
+http://xenomai.org/troubleshooting-a-dual-kernel-configuration/
+http://xenomai.org/troubleshooting-a-single-kernel-configuration/
+
+Building from sources
+=====================
+
+Detailed instructions for building from sources are available at:
+http://xenomai.org/installing-xenomai-3-x/
+
+- GIT clone:
+
+  git://git.xenomai.org/xenomai-3.git
+  http://git.xenomai.org/xenomai-3.git
+  http://git.xenomai.org/xenomai-3.git
+
+  Once the repository is cloned, make sure to bootstrap the autoconf
+  system in the top-level directory by running scripts/bootstrap.  In
+  order to do this, you will need the GNU autotools installed on your
+  workstation.
+
+  If you intend to update the Xenomai code base, you may want to pass
+  --enable-maintainer-mode to the configure script for building, so
+  that autoconf/automake output files are automatically regenerated at
+  the next (re)build in case the corresponding templates have changed.
+
+- Tarballs:
+
+  http://xenomai.org/downloads/xenomai/
+
+  Source tarballs are self-contained and ready for building.
+
+Licensing terms
+===============
+
+Source files which implement the Xenomai software system generally
+include a copyright notice and license header. In absence of license
+header in a particular file, the terms and conditions stated by the
+COPYING or LICENSE file present in the top-level directory of the
+relevant package apply.
+
+For instance, lib/cobalt/COPYING states the licensing terms and
+conditions applicable to the source files present in the hierarchy
+rooted at lib/cobalt.
diff --git a/kernel/xenomai-v3.2.4/config/INSTALL b/kernel/xenomai-v3.2.4/config/INSTALL
new file mode 100644
index 0000000..54caf7c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/config/INSTALL
@@ -0,0 +1,229 @@
+Copyright (C) 1994, 1995, 1996, 1999, 2000, 2001, 2002 Free Software
+Foundation, Inc.
+
+   This file is free documentation; the Free Software Foundation gives
+unlimited permission to copy, distribute and modify it.
+
+Basic Installation
+==================
+
+   These are generic installation instructions.
+
+   The `configure' shell script attempts to guess correct values for
+various system-dependent variables used during compilation.  It uses
+those values to create a `Makefile' in each directory of the package.
+It may also create one or more `.h' files containing system-dependent
+definitions.  Finally, it creates a shell script `config.status' that
+you can run in the future to recreate the current configuration, and a
+file `config.log' containing compiler output (useful mainly for
+debugging `configure').
+
+   It can also use an optional file (typically called `config.cache'
+and enabled with `--cache-file=config.cache' or simply `-C') that saves
+the results of its tests to speed up reconfiguring.  (Caching is
+disabled by default to prevent problems with accidental use of stale
+cache files.)
+
+   If you need to do unusual things to compile the package, please try
+to figure out how `configure' could check whether to do them, and mail
+diffs or instructions to the address given in the `README' so they can
+be considered for the next release.  If you are using the cache, and at
+some point `config.cache' contains results you don't want to keep, you
+may remove or edit it.
+
+   The file `configure.ac' (or `configure.in') is used to create
+`configure' by a program called `autoconf'.  You only need
+`configure.ac' if you want to change it or regenerate `configure' using
+a newer version of `autoconf'.
+
+The simplest way to compile this package is:
+
+  1. `cd' to the directory containing the package's source code and type
+     `./configure' to configure the package for your system.  If you're
+     using `csh' on an old version of System V, you might need to type
+     `sh ./configure' instead to prevent `csh' from trying to execute
+     `configure' itself.
+
+     Running `configure' takes awhile.  While running, it prints some
+     messages telling which features it is checking for.
+
+  2. Type `make' to compile the package.
+
+  3. Optionally, type `make check' to run any self-tests that come with
+     the package.
+
+  4. Type `make install' to install the programs and any data files and
+     documentation.
+
+  5. You can remove the program binaries and object files from the
+     source code directory by typing `make clean'.  To also remove the
+     files that `configure' created (so you can compile the package for
+     a different kind of computer), type `make distclean'.  There is
+     also a `make maintainer-clean' target, but that is intended mainly
+     for the package's developers.  If you use it, you may have to get
+     all sorts of other programs in order to regenerate files that came
+     with the distribution.
+
+Compilers and Options
+=====================
+
+   Some systems require unusual options for compilation or linking that
+the `configure' script does not know about.  Run `./configure --help'
+for details on some of the pertinent environment variables.
+
+   You can give `configure' initial values for configuration parameters
+by setting variables in the command line or in the environment.  Here
+is an example:
+
+     ./configure CC=c89 CFLAGS=-O2 LIBS=-lposix
+
+   *Note Defining Variables::, for more details.
+
+Compiling For Multiple Architectures
+====================================
+
+   You can compile the package for more than one kind of computer at the
+same time, by placing the object files for each architecture in their
+own directory.  To do this, you must use a version of `make' that
+supports the `VPATH' variable, such as GNU `make'.  `cd' to the
+directory where you want the object files and executables to go and run
+the `configure' script.  `configure' automatically checks for the
+source code in the directory that `configure' is in and in `..'.
+
+   If you have to use a `make' that does not support the `VPATH'
+variable, you have to compile the package for one architecture at a
+time in the source code directory.  After you have installed the
+package for one architecture, use `make distclean' before reconfiguring
+for another architecture.
+
+Installation Names
+==================
+
+   By default, `make install' will install the package's files in
+`/usr/local/bin', `/usr/local/man', etc.  You can specify an
+installation prefix other than `/usr/local' by giving `configure' the
+option `--prefix=PATH'.
+
+   You can specify separate installation prefixes for
+architecture-specific files and architecture-independent files.  If you
+give `configure' the option `--exec-prefix=PATH', the package will use
+PATH as the prefix for installing programs and libraries.
+Documentation and other data files will still use the regular prefix.
+
+   In addition, if you use an unusual directory layout you can give
+options like `--bindir=PATH' to specify different values for particular
+kinds of files.  Run `configure --help' for a list of the directories
+you can set and what kinds of files go in them.
+
+   If the package supports it, you can cause programs to be installed
+with an extra prefix or suffix on their names by giving `configure' the
+option `--program-prefix=PREFIX' or `--program-suffix=SUFFIX'.
+
+Optional Features
+=================
+
+   Some packages pay attention to `--enable-FEATURE' options to
+`configure', where FEATURE indicates an optional part of the package.
+They may also pay attention to `--with-PACKAGE' options, where PACKAGE
+is something like `gnu-as' or `x' (for the X Window System).  The
+`README' should mention any `--enable-' and `--with-' options that the
+package recognizes.
+
+   For packages that use the X Window System, `configure' can usually
+find the X include and library files automatically, but if it doesn't,
+you can use the `configure' options `--x-includes=DIR' and
+`--x-libraries=DIR' to specify their locations.
+
+Specifying the System Type
+==========================
+
+   There may be some features `configure' cannot figure out
+automatically, but needs to determine by the type of machine the package
+will run on.  Usually, assuming the package is built to be run on the
+_same_ architectures, `configure' can figure that out, but if it prints
+a message saying it cannot guess the machine type, give it the
+`--build=TYPE' option.  TYPE can either be a short name for the system
+type, such as `sun4', or a canonical name which has the form:
+
+     CPU-COMPANY-SYSTEM
+
+where SYSTEM can have one of these forms:
+
+     OS KERNEL-OS
+
+   See the file `config.sub' for the possible values of each field.  If
+`config.sub' isn't included in this package, then this package doesn't
+need to know the machine type.
+
+   If you are _building_ compiler tools for cross-compiling, you should
+use the `--target=TYPE' option to select the type of system they will
+produce code for.
+
+   If you want to _use_ a cross compiler, that generates code for a
+platform different from the build platform, you should specify the
+"host" platform (i.e., that on which the generated programs will
+eventually be run) with `--host=TYPE'.
+
+Sharing Defaults
+================
+
+   If you want to set default values for `configure' scripts to share,
+you can create a site shell script called `config.site' that gives
+default values for variables like `CC', `cache_file', and `prefix'.
+`configure' looks for `PREFIX/share/config.site' if it exists, then
+`PREFIX/etc/config.site' if it exists.  Or, you can set the
+`CONFIG_SITE' environment variable to the location of the site script.
+A warning: not all `configure' scripts look for a site script.
+
+Defining Variables
+==================
+
+   Variables not defined in a site shell script can be set in the
+environment passed to `configure'.  However, some packages may run
+configure again during the build, and the customized values of these
+variables may be lost.  In order to avoid this problem, you should set
+them in the `configure' command line, using `VAR=value'.  For example:
+
+     ./configure CC=/usr/local2/bin/gcc
+
+will cause the specified gcc to be used as the C compiler (unless it is
+overridden in the site shell script).
+
+`configure' Invocation
+======================
+
+   `configure' recognizes the following options to control how it
+operates.
+
+`--help'
+`-h'
+     Print a summary of the options to `configure', and exit.
+
+`--version'
+`-V'
+     Print the version of Autoconf used to generate the `configure'
+     script, and exit.
+
+`--cache-file=FILE'
+     Enable the cache: use and save the results of the tests in FILE,
+     traditionally `config.cache'.  FILE defaults to `/dev/null' to
+     disable caching.
+
+`--config-cache'
+`-C'
+     Alias for `--cache-file=config.cache'.
+
+`--quiet'
+`--silent'
+`-q'
+     Do not print messages saying which checks are being made.  To
+     suppress all normal output, redirect it to `/dev/null' (any error
+     messages will still be shown).
+
+`--srcdir=DIR'
+     Look for the package's source code in directory DIR.  Usually
+     `configure' can determine that directory automatically.
+
+`configure' also accepts some other, not widely useful, options.  Run
+`configure --help' for more details.
+
diff --git a/kernel/xenomai-v3.2.4/config/Makefile.am b/kernel/xenomai-v3.2.4/config/Makefile.am
new file mode 100644
index 0000000..9ce4a9c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/config/Makefile.am
@@ -0,0 +1,7 @@
+EXTRA_DIST= 		\
+	acinclude.m4 	\
+	docbook.m4	\
+	apirev		\
+	version-label	\
+	version-code	\
+	ac_prog_cc_for_build.m4
diff --git a/kernel/xenomai-v3.2.4/config/ac_prog_cc_for_build.m4 b/kernel/xenomai-v3.2.4/config/ac_prog_cc_for_build.m4
new file mode 100644
index 0000000..8cba249
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/config/ac_prog_cc_for_build.m4
@@ -0,0 +1,108 @@
+dnl Available from the GNU Autoconf Macro Archive at:
+dnl http://www.gnu.org/software/ac-archive/htmldoc/ac_prog_cc_for_build.html
+dnl
+AC_DEFUN([AC_PROG_CC_FOR_BUILD], [dnl
+AC_REQUIRE([AC_PROG_CC])dnl
+AC_REQUIRE([AC_PROG_CPP])dnl
+AC_REQUIRE([AC_EXEEXT])dnl
+AC_REQUIRE([AC_CANONICAL_SYSTEM])dnl
+dnl
+pushdef([AC_TRY_COMPILER], [
+cat > conftest.$ac_ext << EOF
+#line __oline__ "configure"
+#include "confdefs.h"
+[$1]
+EOF
+# If we can't run a trivial program, we are probably using a cross
+compiler.
+# Fail miserably.
+if AC_TRY_EVAL(ac_link) && test -s conftest${ac_exeext} && (./conftest;
+exit) 2>/dev/null; then
+  [$2]=yes
+else
+  echo "configure: failed program was:" >&AC_FD_CC
+  cat conftest.$ac_ext >&AC_FD_CC
+  [$2]=no
+fi
+[$3]=no
+rm -fr conftest*])dnl
+
+dnl Use the standard macros, but make them use other variable names
+dnl
+pushdef([cross_compiling], [#])dnl
+pushdef([ac_cv_prog_CPP], ac_cv_build_prog_CPP)dnl
+pushdef([ac_cv_prog_gcc], ac_cv_build_prog_gcc)dnl
+pushdef([ac_cv_prog_cc_works], ac_cv_build_prog_cc_works)dnl
+pushdef([ac_cv_prog_cc_cross], ac_cv_build_prog_cc_cross)dnl
+pushdef([ac_cv_prog_cc_g], ac_cv_build_prog_cc_g)dnl
+pushdef([ac_cv_exeext], ac_cv_build_exeext)dnl
+pushdef([ac_cv_objext], ac_cv_build_objext)dnl
+pushdef([ac_exeext], ac_build_exeext)dnl
+pushdef([ac_objext], ac_build_objext)dnl
+pushdef([CC], CC_FOR_BUILD)dnl
+pushdef([CPP], CPP_FOR_BUILD)dnl
+pushdef([CFLAGS], CFLAGS_FOR_BUILD)dnl
+pushdef([CPPFLAGS], CPPFLAGS_FOR_BUILD)dnl
+pushdef([host], build)dnl
+pushdef([host_alias], build_alias)dnl
+pushdef([host_cpu], build_cpu)dnl
+pushdef([host_vendor], build_vendor)dnl
+pushdef([host_os], build_os)dnl
+pushdef([ac_cv_host], ac_cv_build)dnl
+pushdef([ac_cv_host_alias], ac_cv_build_alias)dnl
+pushdef([ac_cv_host_cpu], ac_cv_build_cpu)dnl
+pushdef([ac_cv_host_vendor], ac_cv_build_vendor)dnl
+pushdef([ac_cv_host_os], ac_cv_build_os)dnl
+pushdef([ac_cpp], ac_build_cpp)dnl
+pushdef([ac_compile], ac_build_compile)dnl
+pushdef([ac_link], ac_build_link)dnl
+
+dnl dnl Defeat the anti-duplication mechanism
+dnl dnl
+dnl undefine([AC_PROVIDE_AC_PROG_CPP])dnl
+dnl undefine([AC_PROVIDE_AC_PROG_C])dnl
+dnl undefine([AC_PROVIDE_AC_EXEEXT])dnl
+
+AC_PROG_CC
+AC_PROG_CPP
+AC_EXEEXT
+
+dnl Restore the old definitions
+dnl
+popdef([AC_TRY_COMPILER])dnl
+popdef([ac_link])dnl
+popdef([ac_compile])dnl
+popdef([ac_cpp])dnl
+popdef([ac_cv_host_os])dnl
+popdef([ac_cv_host_vendor])dnl
+popdef([ac_cv_host_cpu])dnl
+popdef([ac_cv_host_alias])dnl
+popdef([ac_cv_host])dnl
+popdef([host_os])dnl
+popdef([host_vendor])dnl
+popdef([host_cpu])dnl
+popdef([host_alias])dnl
+popdef([host])dnl
+popdef([CPPFLAGS])dnl
+popdef([CFLAGS])dnl
+popdef([CPP])dnl
+popdef([CC])dnl
+popdef([ac_objext])dnl
+popdef([ac_exeext])dnl
+popdef([ac_cv_objext])dnl
+popdef([ac_cv_exeext])dnl
+popdef([ac_cv_prog_cc_g])dnl
+popdef([ac_cv_prog_cc_works])dnl
+popdef([ac_cv_prog_cc_cross])dnl
+popdef([ac_cv_prog_gcc])dnl
+popdef([cross_compiling])dnl
+
+dnl Finally, set Makefile variables
+dnl
+BUILD_EXEEXT=$ac_build_exeext
+BUILD_OBJEXT=$ac_build_objext
+AC_SUBST(BUILD_EXEEXT)dnl
+AC_SUBST(BUILD_OBJEXT)dnl
+AC_SUBST([CFLAGS_FOR_BUILD])dnl
+AC_SUBST([CPPFLAGS_FOR_BUILD])dnl
+])
diff --git a/kernel/xenomai-v3.2.4/config/acinclude.m4 b/kernel/xenomai-v3.2.4/config/acinclude.m4
new file mode 100644
index 0000000..9521613
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/config/acinclude.m4
@@ -0,0 +1,579 @@
+dnl AC_PATH_XREQUIRED() requires X libs. This frag has been
+dnl lifted nearly "as is" from Postgresql's configure.in script.
+
+AC_DEFUN([AC_PATH_XREQUIRED],
+[
+	save_LIBS="$LIBS"
+	save_CFLAGS="$CFLAGS"
+	save_CPPFLAGS="$CPPFLAGS"
+	save_LDFLAGS="$LDFLAGS"
+
+	AC_PATH_X
+	AC_PATH_XTRA
+
+	LIBS="$LIBS $X_EXTRA_LIBS"
+	CFLAGS="$CFLAGS $X_CFLAGS"
+	CPPFLAGS="$CPPFLAGS $X_CFLAGS"
+	LDFLAGS="$LDFLAGS $X_LIBS"
+
+	dnl Check for X library
+
+	X11_LIBS=""
+	AC_CHECK_LIB(X11, XOpenDisplay, X11_LIBS="-lX11",,${X_PRE_LIBS})
+	if test "$X11_LIBS" = ""; then
+		dnl Not having X is bad news, period. Let the user fix this.
+		AC_MSG_ERROR([The X11 library '-lX11' could not be found,
+ so I won't go further. Please use the configure
+ options '--x-includes=DIR' and '--x-libraries=DIR'
+ to specify the X location. See the file 'config.log'
+ for further diagnostics.])
+	fi
+	AC_SUBST(X_LIBS)
+	AC_SUBST(X11_LIBS)
+	AC_SUBST(X_PRE_LIBS)
+
+	LIBS="$save_LIBS"
+	CFLAGS="$save_CFLAGS"
+	CPPFLAGS="$save_CPPFLAGS"
+	LDFLAGS="$save_LDFLAGS"
+])
+
+dnl AC_POSIX_SIGHANDLER() determines whether
+dnl signal handlers are posix compliant. This frag
+dnl has been adapted from readline's aclocal.m4.
+
+AC_DEFUN([AC_POSIX_SIGHANDLER],
+[AC_MSG_CHECKING([if signal handlers are posix compliant])
+AC_CACHE_VAL(ac_cv_posix_sighandler,
+[AC_TRY_COMPILE([#include <sys/types.h>
+#include <signal.h>
+#ifdef signal
+#undef signal
+#endif
+#ifdef __cplusplus
+extern "C"
+#endif
+void (*signal(void))(void);],
+[int i;], ac_cv_posix_sighandler=no, ac_cv_posix_sighandler=yes)])dnl
+AC_MSG_RESULT($ac_cv_posix_sighandler)
+if test $ac_cv_posix_sighandler = yes; then
+AC_DEFINE(HAVE_POSIX_SIGHANDLER,1,[Kconfig])
+fi
+])
+
+#------------------------------------------------------------------------
+# SC_PATH_TCLCONFIG --
+#
+#	Locate the tclConfig.sh file and perform a sanity check on
+#	the Tcl compile flags
+#
+# Arguments:
+#	none
+#
+# Results:
+#
+#	Adds the following arguments to configure:
+#		--with-tcl=...
+#
+#	Defines the following vars:
+#		TCL_BIN_DIR	Full path to the directory containing
+#				the tclConfig.sh file
+#------------------------------------------------------------------------
+
+AC_DEFUN([SC_PATH_TCLCONFIG], [
+    #
+    # Ok, lets find the tcl configuration
+    # First, look for one uninstalled.
+    # the alternative search directory is invoked by --with-tcl
+    #
+
+    if test x"${no_tcl}" = x ; then
+	# we reset no_tcl in case something fails here
+	no_tcl=true
+	AC_ARG_WITH(tcl, [  --with-tcl              directory containing tcl configuration (tclConfig.sh)], with_tclconfig=${withval})
+	AC_MSG_CHECKING([for Tcl configuration])
+	AC_CACHE_VAL(ac_cv_c_tclconfig,[
+
+	    # First check to see if --with-tcl was specified.
+	    if test x"${with_tclconfig}" != x ; then
+		if test -f "${with_tclconfig}/tclConfig.sh" ; then
+		    ac_cv_c_tclconfig=`(cd ${with_tclconfig}; pwd)`
+		else
+		    AC_MSG_ERROR([${with_tclconfig} directory doesn't contain tclConfig.sh])
+		fi
+	    fi
+
+	    # then check for a private Tcl installation
+	    if test x"${ac_cv_c_tclconfig}" = x ; then
+		for i in \
+			../tcl \
+			`ls -dr ../tcl[[8-9]].[[0-9]]* 2>/dev/null` \
+			../../tcl \
+			`ls -dr ../../tcl[[8-9]].[[0-9]]* 2>/dev/null` \
+			../../../tcl \
+			`ls -dr ../../../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do
+		    if test -f "$i/unix/tclConfig.sh" ; then
+			ac_cv_c_tclconfig=`(cd $i/unix; pwd)`
+			break
+		    fi
+		done
+	    fi
+
+	    # check in a few common install locations
+	    if test x"${ac_cv_c_tclconfig}" = x ; then
+		for i in ${prefix}/lib /usr/local/lib /usr/pkg/lib /usr/lib \
+			`ls -dr /usr/lib/tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do
+		    if test -f "$i/tclConfig.sh" ; then
+			ac_cv_c_tclconfig=`(cd $i; pwd)`
+			break
+		    fi
+		done
+	    fi
+
+	    # check in a few other private locations
+	    if test x"${ac_cv_c_tclconfig}" = x ; then
+		for i in \
+			${srcdir}/../tcl \
+			`ls -dr ${srcdir}/../tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do
+		    if test -f "$i/unix/tclConfig.sh" ; then
+		    ac_cv_c_tclconfig=`(cd $i/unix; pwd)`
+		    break
+		fi
+		done
+	    fi
+	])
+
+	if test x"${ac_cv_c_tclconfig}" = x ; then
+	    TCL_BIN_DIR="# no Tcl configs found"
+	    AC_MSG_WARN(Can't find Tcl configuration definitions)
+	    exit 1
+	else
+	    no_tcl=
+	    TCL_BIN_DIR=${ac_cv_c_tclconfig}
+	    AC_MSG_RESULT(found $TCL_BIN_DIR/tclConfig.sh)
+	fi
+    fi
+])
+
+#------------------------------------------------------------------------
+# SC_PATH_TKCONFIG --
+#
+#	Locate the tkConfig.sh file
+#
+# Arguments:
+#	none
+#
+# Results:
+#
+#	Adds the following arguments to configure:
+#		--with-tk=...
+#
+#	Defines the following vars:
+#		TK_BIN_DIR	Full path to the directory containing
+#				the tkConfig.sh file
+#------------------------------------------------------------------------
+
+AC_DEFUN([SC_PATH_TKCONFIG], [
+    #
+    # Ok, lets find the tk configuration
+    # First, look for one uninstalled.
+    # the alternative search directory is invoked by --with-tk
+    #
+
+    if test x"${no_tk}" = x ; then
+	# we reset no_tk in case something fails here
+	no_tk=true
+	AC_ARG_WITH(tk, [  --with-tk               directory containing tk configuration (tkConfig.sh)], with_tkconfig=${withval})
+	AC_MSG_CHECKING([for Tk configuration])
+	AC_CACHE_VAL(ac_cv_c_tkconfig,[
+
+	    # First check to see if --with-tkconfig was specified.
+	    if test x"${with_tkconfig}" != x ; then
+		if test -f "${with_tkconfig}/tkConfig.sh" ; then
+		    ac_cv_c_tkconfig=`(cd ${with_tkconfig}; pwd)`
+		else
+		    AC_MSG_ERROR([${with_tkconfig} directory doesn't contain tkConfig.sh])
+		fi
+	    fi
+
+	    # then check for a private Tk library
+	    if test x"${ac_cv_c_tkconfig}" = x ; then
+		for i in \
+			../tk \
+			`ls -dr ../tk[[8-9]].[[0-9]]* 2>/dev/null` \
+			../../tk \
+			`ls -dr ../../tk[[8-9]].[[0-9]]* 2>/dev/null` \
+			../../../tk \
+			`ls -dr ../../../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do
+		    if test -f "$i/unix/tkConfig.sh" ; then
+			ac_cv_c_tkconfig=`(cd $i/unix; pwd)`
+			break
+		    fi
+		done
+	    fi
+	    # check in a few common install locations
+	    if test x"${ac_cv_c_tkconfig}" = x ; then
+		for i in ${prefix}/lib /usr/local/lib /usr/pkg/lib /usr/lib \
+			`ls -dr /usr/lib/tk[[8-9]].[[0-9]]* 2>/dev/null` ; do
+		    if test -f "$i/tkConfig.sh" ; then
+			ac_cv_c_tkconfig=`(cd $i; pwd)`
+			break
+		    fi
+		done
+	    fi
+	    # check in a few other private locations
+	    if test x"${ac_cv_c_tkconfig}" = x ; then
+		for i in \
+			${srcdir}/../tk \
+			`ls -dr ${srcdir}/../tk[[8-9]].[[0-9]]* 2>/dev/null` ; do
+		    if test -f "$i/unix/tkConfig.sh" ; then
+			ac_cv_c_tkconfig=`(cd $i/unix; pwd)`
+			break
+		    fi
+		done
+	    fi
+	])
+	if test x"${ac_cv_c_tkconfig}" = x ; then
+	    TK_BIN_DIR="# no Tk configs found"
+	    AC_MSG_WARN(Can't find Tk configuration definitions)
+	    exit 1
+	else
+	    no_tk=
+	    TK_BIN_DIR=${ac_cv_c_tkconfig}
+	    AC_MSG_RESULT(found $TK_BIN_DIR/tkConfig.sh)
+	fi
+    fi
+
+])
+
+#------------------------------------------------------------------------
+# SC_LOAD_TCLCONFIG --
+#
+#	Load the tclConfig.sh file
+#
+# Arguments:
+#	
+#	Requires the following vars to be set:
+#		TCL_BIN_DIR
+#
+# Results:
+#
+#	Subst the following vars:
+#		TCL_BIN_DIR
+#		TCL_SRC_DIR
+#		TCL_LIB_FILE
+#
+#------------------------------------------------------------------------
+
+AC_DEFUN([SC_LOAD_TCLCONFIG], [
+    AC_MSG_CHECKING([for existence of $TCL_BIN_DIR/tclConfig.sh])
+
+    if test -f "$TCL_BIN_DIR/tclConfig.sh" ; then
+        AC_MSG_RESULT([loading])
+	. $TCL_BIN_DIR/tclConfig.sh
+    else
+        AC_MSG_ERROR([not found])
+    fi
+
+    AC_PATH_PROG(TCL_SCRIPT, tclsh${TCL_VERSION}, tclsh)
+
+    AC_SUBST(TCL_BIN_DIR)
+    AC_SUBST(TCL_SRC_DIR)
+    AC_SUBST(TCL_LIB_FILE)
+    AC_SUBST(TCL_LIBS)
+    AC_SUBST(TCL_DEFS)
+    AC_SUBST(TCL_SHLIB_LD_LIBS)
+    AC_SUBST(TCL_EXTRA_CFLAGS)
+    AC_SUBST(TCL_LD_FLAGS)
+    AC_SUBST(TCL_LIB_FILE)
+    AC_SUBST(TCL_STUB_LIB_FILE)
+    AC_SUBST(TCL_LIB_SPEC)
+    AC_SUBST(TCL_BUILD_LIB_SPEC)
+    AC_SUBST(TCL_STUB_LIB_SPEC)
+    AC_SUBST(TCL_BUILD_STUB_LIB_SPEC)
+    AC_SUBST(TCL_DBGX)
+])
+
+#------------------------------------------------------------------------
+# SC_LOAD_TKCONFIG --
+#
+#	Load the tkConfig.sh file
+#
+# Arguments:
+#	
+#	Requires the following vars to be set:
+#		TK_BIN_DIR
+#
+# Results:
+#
+#	Sets the following vars that should be in tkConfig.sh:
+#		TK_BIN_DIR
+#------------------------------------------------------------------------
+
+AC_DEFUN([SC_LOAD_TKCONFIG], [
+    AC_MSG_CHECKING([for existence of $TK_BIN_DIR/tkConfig.sh])
+
+    if test -f "$TK_BIN_DIR/tkConfig.sh" ; then
+        AC_MSG_RESULT([loading])
+	. $TK_BIN_DIR/tkConfig.sh
+    else
+        AC_MSG_ERROR([not found])
+    fi
+
+    AC_SUBST(TK_BIN_DIR)
+    AC_SUBST(TK_SRC_DIR)
+    AC_SUBST(TK_LIB_FILE)
+    AC_SUBST(TK_LIB_FLAG)
+    AC_SUBST(TK_LIB_SPEC)
+    AC_SUBST(TK_DBGX)
+])
+
+#------------------------------------------------------------------------
+# SC_PATH_TIX --
+#
+#	Locate the Tix installation.
+#
+# Arguments:
+#	None.
+#
+# Results:
+#
+#	Substs the following vars:
+#		TIX_TCL_LIB
+#		TIX_LIB_SPEC
+#------------------------------------------------------------------------
+
+AC_DEFUN([SC_PATH_TIX], [
+    AC_MSG_CHECKING(for Tix's Tcl library)
+
+    AC_ARG_WITH(tixlibrary, [  --with-tixlibrary      directory containing the Tix library files.], with_tixlibrary=${withval})
+
+    if test x"${with_tixlibrary}" != x ; then
+	if test -f "${with_tixlibrary}/Init.tcl" ; then
+	    ac_cv_tix_libdir=${with_tixlibrary}
+	else
+	    AC_MSG_ERROR([${with_tixlibrary} directory does not contain Tix's init file Init.tcl])
+	fi
+    else
+	AC_CACHE_VAL(ac_cv_tix_libdir, [
+	    for d in \
+	    `ls -dr /usr/local/lib/tix[[0-9]]* 2>/dev/null ` \
+	    `ls -dr /usr/local/share/tix[[0-9]]* 2>/dev/null ` \
+	    `ls -dr /usr/pkg/lib/tix[[0-9]]* 2>/dev/null ` \
+	    `ls -dr /usr/lib/tix[[0-9]]* 2>/dev/null ` \
+	    `ls -dr /usr/share/tix[[0-9]]* 2>/dev/null ` ; do
+		if test -f "$d/Init.tcl" ; then
+		ac_cv_tix_libdir=$d
+	        break
+	        fi
+	    done
+        ])
+    fi
+
+    AC_MSG_RESULT($ac_cv_tix_libdir)
+    TIX_TCL_LIB=$ac_cv_tix_libdir
+    AC_SUBST(TIX_TCL_LIB)
+
+    SC_LIB_SPEC(tix)
+    TIX_LIB_SPEC=$tix_LIB_SPEC
+    AC_SUBST(TIX_LIB_SPEC)
+])
+
+#------------------------------------------------------------------------
+# SC_LIB_SPEC --
+#
+#	Compute the name of an existing object library located in libdir
+#	from the given base name and produce the appropriate linker flags.
+#
+# Arguments:
+#	basename	The base name of the library without version
+#			numbers, extensions, or "lib" prefixes.
+#
+#	Requires:
+#
+# Results:
+#
+#	Defines the following vars:
+#		${basename}_LIB_NAME	The computed library name.
+#		${basename}_LIB_SPEC	The computed linker flags.
+#------------------------------------------------------------------------
+
+AC_DEFUN([SC_LIB_SPEC], [
+    AC_MSG_CHECKING(for $1 library)
+    eval "sc_lib_name_dir=${libdir}"
+    for i in \
+	    `ls -dr ${sc_lib_name_dir}/$1[[0-9]]*.lib 2>/dev/null ` \
+	    `ls -dr ${sc_lib_name_dir}/lib$1.* 2>/dev/null ` \
+	    `ls -dr ${sc_lib_name_dir}/lib$1[[0-9]]* 2>/dev/null ` \
+	    `ls -dr /usr/pkg/*/lib$1.so 2>/dev/null ` \
+	    `ls -dr /usr/pkg/*/lib$1[[0-9]]* 2>/dev/null ` \
+	    `ls -dr /usr/pkg/lib/lib$1.so 2>/dev/null ` \
+	    `ls -dr /usr/pkg/lib/lib$1[[0-9]]* 2>/dev/null ` \
+	    `ls -dr /usr/lib/$1[[0-9]]*.lib 2>/dev/null ` \
+	    `ls -dr /usr/lib/lib$1.so 2>/dev/null ` \
+	    `ls -dr /usr/lib/lib$1[[0-9]]* 2>/dev/null ` \
+	    `ls -dr /usr/local/lib/$1[[0-9]]*.lib 2>/dev/null ` \
+	    `ls -dr /usr/local/lib/lib$1.so 2>/dev/null ` \
+	    `ls -dr /usr/local/lib/lib$1[[0-9]]* 2>/dev/null ` ; do
+	if test -f "$i" ; then
+	    sc_lib_name_dir=`dirname $i`
+	    $1_LIB_NAME=`basename $i`
+	    break
+	fi
+    done
+
+    case "`uname -s`" in
+	*win32* | *WIN32* | *CYGWIN_NT*)
+	    $1_LIB_SPEC=${$1_LIB_NAME}
+	    ;;
+	*)
+	    # Strip off the leading "lib" and trailing ".a" or ".so"
+	    sc_lib_name_lib=`echo ${$1_LIB_NAME}|sed -e 's/^lib//' -e 's/\.so.*$//' -e 's/\.a$//'`
+	    $1_LIB_SPEC="-L${sc_lib_name_dir} -l${sc_lib_name_lib}"
+	    ;;
+    esac
+    if test "x${sc_lib_name_lib}" = x ; then
+	AC_MSG_ERROR(not found)
+    else
+	AC_MSG_RESULT(${$1_LIB_SPEC})
+    fi
+])
+
+#------------------------------------------------------------------------
+# SC_PUBLIC_TCL_HEADERS --
+#
+#	Locate the installed public Tcl header files
+#
+# Arguments:
+#	None.
+#
+# Requires:
+#
+# Results:
+#
+#	Adds a --with-tclinclude switch to configure.
+#	Result is cached.
+#
+#	Substs the following vars:
+#		TCL_INCLUDES
+#------------------------------------------------------------------------
+
+AC_DEFUN([SC_PUBLIC_TCL_HEADERS], [
+    AC_MSG_CHECKING(for Tcl public headers)
+
+    AC_ARG_WITH(tclinclude, [  --with-tclinclude      directory containing the public Tcl header files.], with_tclinclude=${withval})
+
+    if test x"${with_tclinclude}" != x ; then
+	if test -f "${with_tclinclude}/tcl.h" ; then
+	    ac_cv_c_tclh=${with_tclinclude}
+	else
+	    AC_MSG_ERROR([${with_tclinclude} directory does not contain Tcl public header file tcl.h])
+	fi
+    else
+	AC_CACHE_VAL(ac_cv_c_tclh, [
+	    # Use the value from --with-tclinclude, if it was given
+
+	    if test x"${with_tclinclude}" != x ; then
+		ac_cv_c_tclh=${with_tclinclude}
+	    else
+		# Check in the includedir, if --prefix was specified
+
+		eval "temp_includedir=${includedir}"
+		for i in \
+			${temp_includedir} /usr/local/include /usr/include /usr/pkg/include \
+			`ls -dr /usr/include/tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do
+		    if test -f "$i/tcl.h" ; then
+			ac_cv_c_tclh=$i
+			break
+		    fi
+		done
+	    fi
+	])
+    fi
+
+    # Print a message based on how we determined the include path
+
+    if test x"${ac_cv_c_tclh}" = x ; then
+	AC_MSG_ERROR(tcl.h not found.  Please specify its location with --with-tclinclude)
+    else
+	AC_MSG_RESULT(${ac_cv_c_tclh})
+    fi
+
+    # Convert to a native path and substitute into the output files.
+
+    INCLUDE_DIR_NATIVE=`echo ${ac_cv_c_tclh}`
+
+    TCL_INCLUDES="-I${INCLUDE_DIR_NATIVE}"
+
+    AC_SUBST(TCL_INCLUDES)
+])
+
+#------------------------------------------------------------------------
+# SC_PUBLIC_TK_HEADERS --
+#
+#	Locate the installed public Tk header files
+#
+# Arguments:
+#	None.
+#
+# Requires:
+#
+# Results:
+#
+#	Adds a --with-tkinclude switch to configure.
+#	Result is cached.
+#
+#	Substs the following vars:
+#		TK_INCLUDES
+#------------------------------------------------------------------------
+
+AC_DEFUN([SC_PUBLIC_TK_HEADERS], [
+    AC_MSG_CHECKING(for Tk public headers)
+
+    AC_ARG_WITH(tkinclude, [  --with-tkinclude      directory containing the public Tk header files.], with_tkinclude=${withval})
+
+    if test x"${with_tkinclude}" != x ; then
+	if test -f "${with_tkinclude}/tk.h" ; then
+	    ac_cv_c_tkh=${with_tkinclude}
+	else
+	    AC_MSG_ERROR([${with_tkinclude} directory does not contain Tk public header file tk.h])
+	fi
+    else
+	AC_CACHE_VAL(ac_cv_c_tkh, [
+	    # Use the value from --with-tkinclude, if it was given
+
+	    if test x"${with_tkinclude}" != x ; then
+		ac_cv_c_tkh=${with_tkinclude}
+	    else
+		# Check in the includedir, if --prefix was specified
+
+		eval "temp_includedir=${includedir}"
+		for i in \
+			${temp_includedir} /usr/local/include /usr/include /usr/pkg/include \
+			`ls -dr /usr/include/tk[[8-9]].[[0-9]]* 2>/dev/null` \
+			`ls -dr /usr/include/tcl[[8-9]].[[0-9]]* 2>/dev/null` ; do
+		    if test -f "$i/tk.h" ; then
+			ac_cv_c_tkh=$i
+			break
+		    fi
+		done
+	    fi
+	])
+    fi
+
+    # Print a message based on how we determined the include path
+
+    if test x"${ac_cv_c_tkh}" = x ; then
+	AC_MSG_ERROR(tk.h not found.  Please specify its location with --with-tkinclude)
+    else
+	AC_MSG_RESULT(${ac_cv_c_tkh})
+    fi
+
+    # Convert to a native path and substitute into the output files.
+
+    INCLUDE_DIR_NATIVE=`echo ${ac_cv_c_tkh}`
+
+    TK_INCLUDES="-I${INCLUDE_DIR_NATIVE}"
+
+    AC_SUBST(TK_INCLUDES)
+])
diff --git a/kernel/xenomai-v3.2.4/config/apirev b/kernel/xenomai-v3.2.4/config/apirev
new file mode 100644
index 0000000..60d3b2f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/config/apirev
@@ -0,0 +1 @@
+15
diff --git a/kernel/xenomai-v3.2.4/config/docbook.m4 b/kernel/xenomai-v3.2.4/config/docbook.m4
new file mode 100644
index 0000000..f0391c1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/config/docbook.m4
@@ -0,0 +1,170 @@
+#                                               -*- Autoconf -*-
+# Building Docbook-XML documents with the autotools.
+#
+# Check whether needed tools for generating Docbook XML doc are installed and
+# running. 
+#  - "docbook-root" is the name of the source tree subdirectory which is the
+# docbook documentation root. It is expected to contain at least : 
+#      catalog.in, used to match DTD generic addresses to their local copy ;
+#      pictures, with all the pictures referenced by the XML documents ;
+#      css, with the CSS referenced by the html documents.
+#
+#  - "generated-doc-root" is the name of the source tree subdirectory which 
+# contains the generated documentation. It is expected to contain at least :
+#      html/pictures with the pictures needed by the html documents ;
+#      html/css with the css needed by the html documents ;
+#      html/* one directory by html document ;
+#      pdf/*.pdf one pdf file by pdf document ;
+#
+#  - "docbook-dtd-version" is the version of the Docbook-XML DTD used.
+#
+# DBX_DOC_INIT(docbook-root, generated-doc-root, docbook-dtd-version)
+# ------------------------------------------------------------------------------
+AC_DEFUN([DBX_DOC_INIT], 
+[
+DBX_DOC_ROOT="$1"
+AC_SUBST(DBX_DOC_ROOT)
+
+AC_MSG_CHECKING(whether compiling Docbook XML documentation)
+AC_ARG_ENABLE(dbx, 
+        AS_HELP_STRING([--enable-dbx],[Build Docbook XML documentation.]),
+	[case "$enableval" in
+	y | ye | yes) DBX_DOC=yes;;
+	*) DBX_DOC="" ;;
+	esac])
+
+if test \! -f "$srcdir/$1/catalog.in"; then
+    if test x$DBX_DOC = xyes;
+    then
+        AC_MSG_ERROR([$1/catalog.in could not be found in the source tree,
+DocBook documentation can not be generated.])
+    fi
+    AC_MSG_RESULT([not present.])
+else
+    AC_MSG_RESULT(${DBX_DOC:-no})
+fi
+AM_CONDITIONAL(DBX_DOC,[test "$DBX_DOC" = yes])
+
+# 
+DBX_GEN_DOC_ROOT="$2"
+AC_SUBST(DBX_GEN_DOC_ROOT)
+
+# First: search for needed tools.
+AC_CHECK_PROG(DBX_LINT, xmllint, xmllint)
+if test x"$DBX_LINT" = x -a -n "$DBX_DOC"; then
+   AC_MSG_ERROR([xmllint was not found. Check your PATH variable and try again.])
+fi
+AC_SUBST(DBX_LINT)
+
+
+AC_CHECK_PROG(DBX_XSLTPROC, xsltproc, xsltproc)
+if test x"$DBX_XSLTPROC" = x -a -n "$DBX_DOC"; then
+   AC_MSG_ERROR([xsltproc was not found. Check your PATH variable and try 
+again.])
+fi
+AC_SUBST(DBX_XSLTPROC)
+
+
+AC_CHECK_PROG(DBX_FOP, fop, fop)
+if test x"$DBX_FOP" = x -a -n "$DBX_DOC"; then
+   AC_MSG_ERROR([fop was not found. Check your PATH variable and try again.])
+fi
+AC_SUBST(DBX_FOP)
+
+# Second: search for DTD and XSL stylesheets.
+DBX_DTD_VERSION="$3"
+
+AC_MSG_CHECKING(whether Docbook XML documentation generation can use network.)
+AC_ARG_ENABLE(dbx-network,
+        AS_HELP_STRING([--enable-dbx-network],[Try to access Docbook DTD and
+XSL stylesheets through network (default is to die if local installation can not
+be found by configure).]),
+        [ case "$enable_dbx_network" in 
+           y | yes | yes )
+                DBX_NET=yes;;
+           n | no )
+                DBX_NET="";;
+          esac
+        ])
+
+# Do not define the --nonet xsltproc flag if the option --enable-dbx-network was
+# passed
+AC_MSG_RESULT(${DBX_NET:-no})
+if test -n "$DBX_NET"; then
+    unset DBX_MAYBE_NONET
+else
+    DBX_MAYBE_NONET=--nonet
+fi
+AC_SUBST(DBX_MAYBE_NONET)
+
+
+AC_MSG_CHECKING(for docbook-xml root dir)
+AC_ARG_WITH(dbx-root,
+        AS_HELP_STRING([--with-dbx-root],[specify the Docbook XML root (that 
+is, the directory where docbookx.dtd should be found). Default is to use
+well-known locations (or network if --enable-dbx-network was passed).]),
+        [DBX_ROOT="$withval"])
+if test x"$DBX_ROOT" = x; then
+   # Still not found, we will hence look for it using the "well-known"
+   # places (well... for the moment, only the Debian package directory) 
+   for dir in \
+       /usr/share/sgml/docbook/dtd/xml/$DBX_DTD_VERSION
+   do
+        if test -e $dir/docbookx.dtd; then
+           DBX_ROOT="$dir"; 
+           break;
+        fi
+   done
+fi
+AC_MSG_RESULT(${DBX_ROOT:-network})
+if test x"$DBX_ROOT" = x; then
+   if test x"$enable_dbx_network" != x  -a -n "$DBX_DOC"; then 
+        AC_MSG_ERROR([The Docbook XML DTD was not found, and accessing it
+through network is forbidden.])
+   fi
+   DBX_ROOT="http://www.oasis-open.org/docbook/xml/$DBX_DTD_VERSION/"
+else
+   DBX_ROOT="file://$DBX_ROOT"
+fi
+AC_SUBST(DBX_ROOT)
+
+
+AC_MSG_CHECKING(for docbook-xsl root dir)
+AC_ARG_WITH(docbook-xsl-root,
+	AS_HELP_STRING([--with-dbx-xsl-root],[specify the Docbook XML XSL
+stylesheet root. Default is to use well-known locations (or network if
+--enable-dbx-network was passed)]),
+        [ DBX_XSL_ROOT="$withval" ])
+if test x"$DBX_XSL_ROOT" = x; then
+   # Still not found, we will hence look for it using the "well-known"
+   # places (well... for the moment, only the Debian standard directory) 
+   for dir in \
+       /usr/share/sgml/docbook/stylesheet/xsl/nwalsh
+   do
+        if test -e "$dir/html/docbook.xsl"; then
+           DBX_XSL_ROOT="$dir"; 
+           break;
+        fi
+   done
+fi
+AC_MSG_RESULT(${DBX_XSL_ROOT:-network})
+if test x"$DBX_XSL_ROOT" = x; then
+   if test x"$enable_dbx_network" != x -a -n "$DBX_DOC"; then
+        AC_MSG_ERROR([The Docbook XML DTD was not found, and accessing it 
+through network is forbidden.])
+   fi
+   DBX_XSL_ROOT="http://http://docbook.sourceforge.net/release/xsl/current"
+else
+   DBX_XSL_ROOT="file://$DBX_XSL_ROOT"
+fi
+AC_SUBST(DBX_XSL_ROOT)
+
+
+DBX_ABS_SRCDIR=`case $srcdir in
+  [\\/]* | ?:[\\/]* ) echo : ;;
+  *) echo false ;;
+esac`
+AM_CONDITIONAL(DBX_ABS_SRCDIR, $DBX_ABS_SRCDIR)
+
+
+])
diff --git a/kernel/xenomai-v3.2.4/config/version-code b/kernel/xenomai-v3.2.4/config/version-code
new file mode 100644
index 0000000..351227f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/config/version-code
@@ -0,0 +1 @@
+3.2.4
diff --git a/kernel/xenomai-v3.2.4/config/version-label b/kernel/xenomai-v3.2.4/config/version-label
new file mode 100644
index 0000000..351227f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/config/version-label
@@ -0,0 +1 @@
+3.2.4
diff --git a/kernel/xenomai-v3.2.4/configure.ac b/kernel/xenomai-v3.2.4/configure.ac
new file mode 100644
index 0000000..e7a1701
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/configure.ac
@@ -0,0 +1,1062 @@
+dnl Process this file with autoconf to produce a configure script.
+AC_PREREQ(2.62)
+
+# The config/version-code file defines the general versioning data
+# as: <major>.<minor>.<subrev>, giving the full Xenomai version stamp.
+# config/apirev defines the revision level of the user API we
+# implement (which actually expresses the revision level of the
+# Copperplate library).  The kernel ABI is Cobalt-specific and is
+# defined for each architecture in the asm/features.h file.
+AC_INIT([Xenomai],m4_normalize(m4_include([config/version-label])),xenomai@xenomai.org)
+
+AC_CONFIG_HEADERS(include/xeno_config.h)
+AC_CONFIG_AUX_DIR(config)
+AC_CONFIG_MACRO_DIR([config])
+AC_CONFIG_SRCDIR(lib/cobalt/thread.c)
+AC_PREFIX_DEFAULT(/usr/xenomai)
+# We want $prefix to be set for the configure script
+if test x$prefix = xNONE; then
+   prefix=$ac_default_prefix
+fi
+
+version_code=`cat $srcdir/config/version-code`
+CONFIG_XENO_VERSION_MAJOR=`expr $version_code : '\([[0-9]]*\)'`
+CONFIG_XENO_VERSION_MINOR=`expr $version_code : '[[0-9]]*\.\([[0-9]]*\)'`
+CONFIG_XENO_REVISION_LEVEL=`expr $version_code : '[[0-9]]*\.[[0-9]]*\.\([[0-9]]*\)'`
+CONFIG_XENO_UAPI_LEVEL=`cat $srcdir/config/apirev`
+CONFIG_XENO_VERSION_STRING="$PACKAGE_VERSION"
+topdir=`cd $srcdir && pwd`
+
+dnl Find out whether we build for Cobalt core, i.e. dual kernel mode,
+dnl or Mercury (single image kernel). Defaults to Cobalt.
+rtcore_type=cobalt
+AC_MSG_CHECKING([whether we build for Cobalt or Mercury core])
+AC_ARG_WITH(core,
+    AS_HELP_STRING([--with-core=<cobalt | mercury>],[build for dual kernel or single image]),
+    [
+	case "$withval" in
+	"" | y | ye | yes | n | no)
+	    AC_MSG_ERROR([You must supply an argument to --with-core])
+	  ;;
+	cobalt|mercury)
+	   rtcore_type=$withval
+	   ;;
+	*)
+	    AC_MSG_ERROR([--with-core=<cobalt | mercury>])
+	esac
+    ])
+AC_MSG_RESULT($rtcore_type)
+
+AM_CONDITIONAL(XENO_COBALT,[test x$rtcore_type = xcobalt])
+test x$rtcore_type = xcobalt && AC_DEFINE(CONFIG_XENO_COBALT,1,[config])
+AM_CONDITIONAL(XENO_MERCURY,[test x$rtcore_type = xmercury])
+test x$rtcore_type = xmercury && AC_DEFINE(CONFIG_XENO_MERCURY,1,[config])
+XENO_TARGET_CORE=$rtcore_type
+
+if test "x$CFLAGS" = "x"; then
+	XENO_EMPTY_CFLAGS=true
+else
+	XENO_EMPTY_CFLAGS=false
+fi
+
+if eval test $includedir = /usr/include; then
+  AC_MSG_ERROR([Using /usr/include as includedir is not supported. Please change your --prefix or specify another --includedir])
+fi
+
+AC_CANONICAL_BUILD
+AC_CANONICAL_HOST
+AC_PROG_INSTALL
+
+AC_ARG_WITH(cc,
+    AS_HELP_STRING([--with-cc=compiler],[use specific C compiler]),
+    [
+	case "$withval" in
+	"" | y | ye | yes | n | no)
+	    AC_MSG_ERROR([You must supply an argument to --with-cc])
+	  ;;
+	esac
+	CC="$withval"
+    ])
+AC_PROG_CC
+
+# Do not let autoconf set the default value of CFLAGS
+if $XENO_EMPTY_CFLAGS; then
+	CFLAGS=""
+fi
+
+AC_PROG_CC_FOR_BUILD
+AC_PROG_GREP
+LT_PATH_NM
+
+XENO_SYMBOL_PREFIX=
+LT_SYS_SYMBOL_USCORE
+if test \! x$sys_symbol_underscore = xno; then
+   XENO_SYMBOL_PREFIX=_
+fi
+AC_SUBST(XENO_SYMBOL_PREFIX)
+
+AC_DEFINE_UNQUOTED(CONFIG_XENO_BUILD_STRING,"$build",[Build system alias])
+XENO_BUILD_STRING="$build"
+AC_DEFINE_UNQUOTED(CONFIG_XENO_HOST_STRING,"$host",[Host system alias])
+XENO_HOST_STRING="$host"
+XENO_BUILD_COMPILER="`$CC -v 2>&1 | tail -n 1`"
+AC_DEFINE_UNQUOTED(CONFIG_XENO_COMPILER,"$XENO_BUILD_COMPILER",[Compiler])
+
+AM_INIT_AUTOMAKE([foreign no-exeext dist-bzip2 tar-ustar subdir-objects])
+m4_ifdef([AM_SILENT_RULES],[AM_SILENT_RULES([yes])])
+AM_MAINTAINER_MODE
+AM_PROG_AS
+AM_PROG_LEX
+
+XENO_BUILD_ARGS="$ac_configure_args"
+
+AC_MSG_CHECKING([for target architecture])
+
+if test x$host_alias = x; then
+  build_for=$host
+else
+  build_for=$host_alias
+fi
+
+use_tls=no
+case "$build_for" in
+ i*86*-*)
+	use_tls=yes
+	target_cpu_arch=x86
+	CONFIG_XENO_DEFAULT_PERIOD=100000
+	;;
+ ppc-*|powerpc-*)
+	use_tls=yes
+	target_cpu_arch=powerpc
+	CONFIG_XENO_DEFAULT_PERIOD=100000
+	;;
+ arm*-*)
+	target_cpu_arch=arm
+	CONFIG_XENO_DEFAULT_PERIOD=1000000
+	;;
+ aarch64-*)
+	target_cpu_arch=arm64
+	CONFIG_XENO_DEFAULT_PERIOD=1000000
+	;;
+ x86_64-*|amd64-*)
+	use_tls=yes
+	target_cpu_arch=x86
+	CONFIG_XENO_DEFAULT_PERIOD=100000
+	;;
+ *)
+	if test $rtcore_type = cobalt; then
+	   echo ""
+	   echo "**********************************************"
+	   echo "Cobalt not supported over $build_for."
+	   echo "**********************************************"
+	   echo ""
+	   exit 1
+	else
+	   CONFIG_XENO_DEFAULT_PERIOD=100000
+	   target_cpu_arch=`echo $build_for|cut -d- -f1`
+	fi
+	;;
+esac
+
+AC_MSG_RESULT([$target_cpu_arch])
+XENO_TARGET_ARCH=$target_cpu_arch
+AC_ENABLE_SHARED
+AC_PROG_LIBTOOL
+
+dnl
+dnl Parse options
+dnl
+
+dnl Debug build (default: off, no symbols)
+
+debug_mode=
+debug_symbols=
+AC_MSG_CHECKING(whether to enable debug mode)
+AC_ARG_ENABLE(debug,
+	AS_HELP_STRING([--enable-debug], [Enable debug mode in programs]),
+	[case "$enableval" in
+	symbols)
+		debug_symbols=y
+		;;
+	y | yes | partial)
+		debug_mode=partial
+		debug_symbols=y
+		;;
+	full)
+		debug_mode=full
+		debug_symbols=y
+		;;
+	n | no)
+		debug_mode=
+		debug_symbols=
+		;;
+	*)
+		 AC_MSG_ERROR([invalid debug level $enableval])
+		 ;;
+	esac])
+AC_MSG_RESULT(${debug_mode:-no})
+AM_CONDITIONAL(XENO_DEBUG,[test \! x$debug_mode = x])
+test \! x$debug_mode = x && AC_DEFINE(CONFIG_XENO_DEBUG,1,[config])
+AM_CONDITIONAL(XENO_DEBUG_FULL,[test x$debug_mode = xfull])
+test x$debug_mode = xfull && AC_DEFINE(CONFIG_XENO_DEBUG_FULL,1,[config])
+
+dnl Demo (default: on)
+
+AC_ARG_ENABLE(demo,
+	AS_HELP_STRING([--disable-demo], [Disable demonstration code]))
+AM_CONDITIONAL(XENO_ENABLE_DEMO,[test x$enable_demo != xno])
+
+dnl Testsuite (default: on)
+
+AC_ARG_ENABLE(testsuite,
+	AS_HELP_STRING([--disable-testsuite], [Disable testsuite]))
+AM_CONDITIONAL(XENO_ENABLE_TESTSUITE,[test x$enable_testsuite != xno])
+
+dnl Low resolution clock (default: off)
+
+unset lores_clock
+AC_MSG_CHECKING(whether to enable the low resolution clock)
+AC_ARG_ENABLE(lores-clock,
+	AS_HELP_STRING([--enable-lores-clock], [Enable low resolution clock]),
+	[case "$enableval" in
+	y | yes) lores_clock=y ;;
+	*) unset lores_clock ;;
+	esac])
+AC_MSG_RESULT(${lores_clock:-no})
+if test x$lores_clock = x; then
+	AC_DEFINE(CONFIG_XENO_LORES_CLOCK_DISABLED,1,[config])
+fi
+
+dnl Raw monotonic clock (default: cobalt=on, mercury=off)
+
+if test $rtcore_type = cobalt; then
+   raw_monotonic_clock=y
+else
+   raw_monotonic_clock=
+fi
+AC_MSG_CHECKING(whether we may use CLOCK_MONOTONIC_RAW)
+AC_ARG_ENABLE(clock-monotonic-raw,
+	AS_HELP_STRING([--enable-clock-monotonic-raw], [Use CLOCK_MONOTONIC_RAW for timings]),
+	[case "$enableval" in
+	y | yes) raw_monotonic_clock=y ;;
+	*) unset raw_monotonic_clock ;;
+	esac])
+AC_MSG_RESULT(${raw_monotonic_clock:-no})
+if test x$raw_monotonic_clock = xy; then
+	AC_DEFINE(CONFIG_XENO_RAW_CLOCK_ENABLED,1,[config])
+fi
+
+checkflags="-nostdinc -isystem \$(SYSROOT)/usr/include -Wbitwise -Wno-transparent-union -D_GNU_SOURCE -D_XOPEN_SOURCE=500 -D_REENTRANT \$(DEFS) \$(DEFAULT_INCLUDES) \$(INCLUDES) \$(AM_CPPFLAGS) \$(CPPFLAGS) -I\$(top_srcdir)/include -isystem \$(shell \$(CC) -print-file-name=include) -include \$(top_builddir)/include/xeno_config.h \$(shell \$(CC) -dM -E -xc /dev/null|sed -e 's/^\\#define /-D/' -e \"s/ /=\'/\" -e \"s/\$\$/\'/\")"
+
+dnl Used with sparse
+AC_SUBST(CHECKFLAGS, $checkflags)
+
+dnl Enable assertions (default: depends on debug mode)
+
+test x$debug_mode = x || use_assert=y
+AC_MSG_CHECKING(whether assertions should be enabled)
+AC_ARG_ENABLE(assert,
+	AS_HELP_STRING([--enable-assert], [Enable runtime assertions]),
+	[case "$enableval" in
+	y | yes) use_assert=y ;;
+	*) unset use_assert ;;
+	esac])
+AC_MSG_RESULT(${use_assert:-no})
+
+dnl Enable asynchronous cancellation (default: off)
+
+async_cancel=
+AC_MSG_CHECKING(whether asynchronous cancellation of threads is enabled)
+AC_ARG_ENABLE(async-cancel,
+	AS_HELP_STRING([--enable-async-cancel], [Enable asynchronous cancellation]),
+	[case "$enableval" in
+	y | yes) async_cancel=y ;;
+	n | no) unset async_cancel ;;
+	esac])
+AC_MSG_RESULT(${async_cancel:-no})
+
+if test x$async_cancel = xy; then
+	AC_DEFINE(CONFIG_XENO_ASYNC_CANCEL,1,[config])
+fi
+
+dnl Work-around for broken PI with condvars on Mercury (default: off)
+
+unset workaround_condvar_pi
+AC_MSG_CHECKING(whether to enable the workaround for broken PI with condvars)
+AC_ARG_ENABLE(condvar-workaround,
+	AS_HELP_STRING([--enable-condvar-workaround], [Enable workaround for broken PI with condvars in glibc]),
+	[case "$enableval" in
+	y | yes) workaround_condvar_pi=y ;;
+	*) unset workaround_condvar_pi ;;
+	esac])
+AC_MSG_RESULT(${workaround_condvar_pi:-no})
+if test x$workaround_condvar_pi = xy; then
+   if test $rtcore_type = mercury; then
+	AC_DEFINE(CONFIG_XENO_WORKAROUND_CONDVAR_PI,1,[config])
+   else
+        AC_MSG_WARN([PI workaround for condvars useless over Cobalt - ignoring])
+   fi
+fi
+
+dnl Lazy schedparam propagation for Cobalt (default: off)
+
+unset lazy_setsched_update
+AC_MSG_CHECKING(whether to enable lazy scheduling parameter update)
+AC_ARG_ENABLE(lazy-setsched,
+	AS_HELP_STRING([--enable-lazy-setsched], [Enable lazy scheduling parameter update]),
+	[case "$enableval" in
+	y | yes) lazy_setsched_update=y ;;
+	*) unset lazy_setsched_update ;;
+	esac])
+AC_MSG_RESULT(${lazy_setsched_update:-no})
+if test x$lazy_setsched_update = xy; then
+   if test x$rtcore_type = xcobalt; then
+	AC_DEFINE(CONFIG_XENO_LAZY_SETSCHED,1,[config])
+   else
+        AC_MSG_WARN([No lazy scheduling parameter updates over Mercury - ignoring])
+   fi
+fi
+
+dnl Enable shared multi-processing (default: off)
+
+use_pshared=
+AC_MSG_CHECKING(whether shared multi-processing should be supported)
+AC_ARG_ENABLE(pshared,
+	AS_HELP_STRING([--enable-pshared], [Enable shared multi-processing for capable skins]),
+	[case "$enableval" in
+	y | yes) use_pshared=y ;;
+	*) unset use_pshared ;;
+	esac])
+AC_MSG_RESULT(${use_pshared:-no})
+
+if test x$use_pshared = xy; then
+	AC_DEFINE(CONFIG_XENO_PSHARED,1,[config])
+fi
+AM_CONDITIONAL(XENO_PSHARED,[test x$use_pshared = xy])
+
+dnl Allocator selection
+
+localmem_allocator=heapmem
+AC_MSG_CHECKING([for process-local memory allocator])
+AC_ARG_WITH(localmem,
+    AS_HELP_STRING([--with-localmem=<heapmem | tlsf>],[Select process-local memory allocator]),
+    [
+	case "$withval" in
+	"" | y | ye | yes | n | no)
+	    AC_MSG_ERROR([You must supply an argument to --with-localmem])
+	  ;;
+	heapmem|tlsf)
+	   localmem_allocator=$withval
+	   ;;
+	*)
+	    AC_MSG_ERROR([--localmem-allocator=<heapmem | tlsf>])
+	esac
+    ])
+AC_MSG_RESULT($localmem_allocator)
+
+dnl Registry support in user-space (FUSE-based, default: off)
+
+use_registry=
+registry_root=
+AC_MSG_CHECKING(whether the registry should be enabled)
+AC_ARG_ENABLE(registry,
+	AS_HELP_STRING([--enable-registry], [Export real-time objects to a registry]),
+	[case "$enableval" in
+	y | yes) use_registry=y; registry_root=/var/run/xenomai ;;
+	/*) use_registry=y; registry_root=$enableval ;;
+	*) unset use_registry ;;
+	esac])
+AC_MSG_RESULT(${use_registry:-no}${registry_root:+[,] mounted on ${registry_root}})
+
+if test x$use_registry = xy; then
+	PKG_CHECK_MODULES(FUSE, fuse)
+	FUSE_CFLAGS="$FUSE_CFLAGS -DFUSE_USE_VERSION=25"
+	AC_DEFINE(CONFIG_XENO_REGISTRY,1,[config])
+	AC_DEFINE_UNQUOTED(CONFIG_XENO_REGISTRY_ROOT,"$registry_root",[config])
+fi
+AM_CONDITIONAL(XENO_REGISTRY,[test x$use_registry = xy])
+
+dnl SMP support (default: on for cobalt/x86, off otherwise)
+
+CONFIG_SMP=
+if test $target_cpu_arch = x86 -a $rtcore_type = cobalt; then
+	CONFIG_SMP=y
+fi
+AC_MSG_CHECKING(for SMP support)
+AC_ARG_ENABLE(smp,
+	AS_HELP_STRING([--enable-smp], [Enable SMP support]),
+	[case "$enableval" in
+	y | yes) CONFIG_SMP=y ;;
+	*) unset CONFIG_SMP ;;
+	esac])
+AC_MSG_RESULT(${CONFIG_SMP:-no})
+
+dnl Runtime sanity checks (default: on)
+
+CONFIG_XENO_SANITY=y
+AC_MSG_CHECKING(whether to enable sanity checks)
+AC_ARG_ENABLE(sanity,
+	AS_HELP_STRING([--enable-sanity], [Enable sanity checks at runtime]),
+	[case "$enableval" in
+	y | yes) CONFIG_XENO_SANITY=y ;;
+	*) unset CONFIG_XENO_SANITY= ;;
+	esac])
+AC_MSG_RESULT(${CONFIG_XENO_SANITY:-no})
+
+if test x$CONFIG_XENO_SANITY = xy; then
+  AC_DEFINE(CONFIG_XENO_SANITY,1,[config])
+else
+  AC_DEFINE(CONFIG_XENO_SANITY,0,[config])
+fi
+
+dnl VSYSCALL (default: enabled) for Cobalt/x86
+
+if test $target_cpu_arch = x86 -a $rtcore_type = cobalt; then
+  CONFIG_XENO_X86_VSYSCALL=y
+  AC_MSG_CHECKING(for x86 VSYSCALL availability)
+  AC_ARG_ENABLE(x86-vsyscall,
+	AS_HELP_STRING([--enable-x86-vsyscall], [Assume VSYSCALL enabled for issuing syscalls]),
+	[case "$enableval" in
+	y | yes) CONFIG_XENO_X86_VSYSCALL=y ;;
+	*) unset CONFIG_XENO_X86_VSYSCALL ;;
+	esac])
+  AC_MSG_RESULT(${CONFIG_XENO_X86_VSYSCALL:-no})
+fi
+
+dnl Documentation package.
+
+XENO_BUILD_DOC=
+XENO_DOC_GIT=
+AC_MSG_CHECKING(whether to build documentation)
+AC_ARG_ENABLE(doc-build,
+	AS_HELP_STRING([--enable-doc-build], [Build Xenomai documentation]),
+	[case "$enableval" in
+	y | yes) XENO_BUILD_DOC=y ;;
+	n | no) ;;
+	*) if test \! x$enableval = x; then
+	      XENO_BUILD_DOC=y
+	      XENO_DOC_GIT=$enableval
+	   fi
+	   ;;
+	esac])
+AM_CONDITIONAL(XENO_BUILD_DOC,[test "$XENO_BUILD_DOC" = y])
+AC_SUBST(XENO_DOC_GIT)
+
+AC_CHECK_PROG(DOXYGEN, doxygen, doxygen)
+
+if test x${XENO_BUILD_DOC} = xy -a x"$DOXYGEN" = x ; then
+   AC_MSG_ERROR([Missing the Doxygen tools to build the documentation])
+fi
+
+AC_CHECK_PROG(DOXYGEN_HAVE_DOT, dot, YES, NO)
+if test x"$DOXYGEN_HAVE_DOT" = xYES ; then
+   DOXYGEN_SHOW_INCLUDE_FILES=NO
+else
+   DOXYGEN_SHOW_INCLUDE_FILES=YES
+fi
+
+LATEX_BATCHMODE=YES
+LATEX_MODE=batch
+AC_MSG_CHECKING(for LaTeX mode)
+AC_ARG_ENABLE(verbose-latex,
+	AS_HELP_STRING([--enable-verbose-latex], [Disable LaTeX non-stop mode]),
+	[case "$enableval" in
+	y | yes)
+	   LATEX_BATCHMODE=NO
+	   LATEX_MODE=non-stop
+	   ;;
+	*) ;;
+	esac])
+AC_MSG_RESULT(${LATEX_MODE})
+
+AC_CHECK_PROG(ASCIIDOC, asciidoc, asciidoc)
+if test x${XENO_BUILD_DOC} = xy -a x"$ASCIIDOC" = x ; then
+   AC_MSG_ERROR([Missing the asciidoc tool to build the documentation])
+fi
+AC_CHECK_PROG(A2X, a2x, a2x)
+if test x${XENO_BUILD_DOC} = xy -a x"$A2X" = x ; then
+   AC_MSG_ERROR([Missing the a2x tool to build the documentation])
+fi
+AC_CHECK_PROG(W3M, w3m, w3m)
+if test x${XENO_BUILD_DOC} = xy -a x"$W3M" = x ; then
+   AC_MSG_ERROR([Missing the w3m tool to build the documentation])
+fi
+
+dnl Set better default values for pdfdir, mandir and htmldir
+dnl This won't override user settings, unless the user wants
+dnl the default values, which we ban...
+
+if test x$pdfdir = x'${docdir}'; then
+   pdfdir='${docdir}/pdf'
+fi
+AC_SUBST(pdfdir)
+if test x$mandir = x'${docdir}'; then
+   mandir='${docdir}/man'
+fi
+AC_SUBST(mandir)
+if test x$htmldir = x'${docdir}'; then
+   htmldir='${docdir}/html'
+fi
+AC_SUBST(htmldir)
+
+dnl Check for Valgrind client API support.
+dnl Some GCC releases produce broken assembly code for Valgrind
+dnl client calls, so we check this too. --disable-valgrind-client
+dnl may be used to forcibly turn this API off.
+
+AC_CHECK_HEADER(valgrind/valgrind.h,CONFIG_XENO_VALGRIND_API=y)
+
+AC_MSG_CHECKING(for Valgrind client API)
+AC_ARG_ENABLE(valgrind-client,
+	AS_HELP_STRING([--enable-valgrind-client], [Enable Valgrind client API]),
+	[case "$enableval" in
+	n | no) unset CONFIG_XENO_VALGRIND_API ;;
+	esac])
+AC_MSG_RESULT(${CONFIG_XENO_VALGRIND_API:-no})
+
+if test \! x$CONFIG_XENO_VALGRIND_API = x ; then
+   AC_MSG_CHECKING([whether GCC emits sane code for Valgrind calls])
+   AC_COMPILE_IFELSE([AC_LANG_PROGRAM([[#include <valgrind/valgrind.h>]],
+   				      [[return RUNNING_ON_VALGRIND;]])],
+				      [ac_cv_valgrind_client=yes],
+				      [ac_cv_valgrind_client="no (DISABLING)"])
+   if [[ \! "$ac_cv_valgrind_client" = yes ]]; then
+      unset CONFIG_XENO_VALGRIND_API
+   fi
+   AC_MSG_RESULT([$ac_cv_valgrind_client])
+fi
+
+test x$CONFIG_XENO_VALGRIND_API = xy && AC_DEFINE(CONFIG_XENO_VALGRIND_API,1,[config])
+
+dnl Check for obstack support in *libc
+AC_CHECK_HEADERS(obstack.h,libc_has_obstack=y)
+AM_CONDITIONAL(XENO_PRIVATE_OBSTACK,[test x$libc_has_obstack = x])
+
+dnl Check for presence of some headers
+AC_CHECK_HEADERS(mqueue.h)
+
+dnl Check for presence of some routines we need
+save_LIBS="$LIBS"
+LIBS="$LIBS -lrt -lpthread"
+AC_CHECK_FUNCS([pthread_mutexattr_setprotocol	\
+		pthread_mutexattr_getprotocol	\
+		pthread_mutexattr_getprioceiling \
+		pthread_mutexattr_setprioceiling \
+		pthread_mutexattr_setrobust	\
+		pthread_mutexattr_setrobust_np	\
+		pthread_mutex_getprioceiling	\
+		pthread_mutex_setprioceiling	\
+		pthread_condattr_getclock	\
+		pthread_condattr_setclock	\
+		pthread_spin_lock fork		\
+		pthread_attr_setaffinity_np	\
+		pthread_setaffinity_np		\
+		pthread_getattr_np		\
+		pthread_atfork			\
+		pthread_setname_np		\
+		pthread_setschedprio		\
+		sched_getcpu			\
+		clock_nanosleep			\
+		shm_open			\
+		shm_unlink			\
+		backtrace])
+LIBS="$save_LIBS"
+
+save_CPPFLAGS="$CPPFLAGS"
+CPPFLAGS="$CPPFLAGS -D_GNU_SOURCE"
+AC_CHECK_DECLS([PTHREAD_PRIO_NONE], [], [], [#include <pthread.h>])
+AC_CHECK_DECLS([PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP], [], [], [#include <pthread.h>])
+AC_CHECK_DECLS([PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP], [], [], [#include <pthread.h>])
+CPPFLAGS=$save_CPPFLAGS
+
+dnl If we can't set the clock for condvar timeouts, then
+dnl we have to restrict the Copperplate clock to CLOCK_REALTIME over
+dnl Mercury unconditionally. Cobalt is different: although we may not
+dnl have pthread_condattr_setclock() available from the threading library,
+dnl Copperplate is still able to attach Cobalt condvars to specific clocks
+dnl internally, therefore we don't have to use a restricted clock in
+dnl Copperplate.
+dnl
+dnl In effect this means that updating the host system date may affect
+dnl wait times of all blocking services implemented by Copperplate over
+dnl Mercury, but will only affect explicit calls to pthread_cond_timedwait()
+dnl over Cobalt.
+dnl
+dnl This is a provision for running over legacy threading libraries
+dnl such as linuxthreads.
+dnl
+dnl CAUTION: the CLOCK_COPPERPLATE value is part of the ABI between
+dnl the Xenomai core libraries and the applications. Therefore it shall
+dnl remain stable even if applications depend on a different libc
+dnl than Xenomai libraries were built against originally. Hence the
+dnl built-in CONFIG_XENO_COPPERPLATE_CLOCK_RESTRICTED flag, which
+dnl won't vary for a given Xenomai installation.
+
+if test $rtcore_type = mercury; then
+   AC_CHECK_FUNC(pthread_condattr_setclock,,
+	 [AC_DEFINE(CONFIG_XENO_COPPERPLATE_CLOCK_RESTRICTED, 1,[config])])
+fi
+
+dnl Check that Copperplate can implement a shared heap if
+dnl --enable-pshared was given.
+if test x$use_pshared = xy;  then
+  AC_CHECK_FUNC(shm_open,,
+      [AC_MSG_ERROR([shm_open() is missing, --disable-pshared is required])])
+fi
+
+dnl
+dnl Produce the info needed to build xeno_config.h
+dnl
+
+AC_DEFINE_UNQUOTED(CONFIG_XENO_VERSION_MAJOR,$CONFIG_XENO_VERSION_MAJOR,[config])
+AC_DEFINE_UNQUOTED(CONFIG_XENO_VERSION_MINOR,$CONFIG_XENO_VERSION_MINOR,[config])
+AC_DEFINE_UNQUOTED(CONFIG_XENO_REVISION_LEVEL,$CONFIG_XENO_REVISION_LEVEL,[config])
+AC_DEFINE_UNQUOTED(CONFIG_XENO_UAPI_LEVEL,$CONFIG_XENO_UAPI_LEVEL,[config])
+AC_DEFINE_UNQUOTED(CONFIG_XENO_VERSION_STRING,"$CONFIG_XENO_VERSION_STRING",[config])
+AC_DEFINE_UNQUOTED(CONFIG_XENO_PREFIX,"$prefix",[config])
+AC_DEFINE_UNQUOTED(CONFIG_XENO_BUILD_ARGS,"$XENO_BUILD_ARGS",[config])
+
+dnl
+dnl Features we enabled and likely want to find at kernel level.
+dnl When applicable, we reuse the kernel option symbol so that we
+dnl don't need to make particular cases with kernel code which may
+dnl also be compiled in user-space libs.
+dnl
+
+test x$CONFIG_XENO_X86_VSYSCALL = xy && AC_DEFINE(CONFIG_XENO_X86_VSYSCALL,1,[config])
+test x$CONFIG_SMP = xy && AC_DEFINE(CONFIG_SMP,1,[config])
+
+dnl
+dnl Userland may want to know about MMU availability on the target.
+dnl For now, we assume that having fork() means having an MMU.
+dnl
+test x$ac_cv_func_fork = xyes && AC_DEFINE(CONFIG_MMU,1,[config])
+
+AM_CONDITIONAL(CONFIG_XENO_SHARED,[test "$enable_shared" = 'yes'])
+
+# Default sampling period (ns) used in various tests
+AC_DEFINE_UNQUOTED(CONFIG_XENO_DEFAULT_PERIOD,$CONFIG_XENO_DEFAULT_PERIOD,[config])
+
+dnl Allocator for Copperplate. Note: in dual kernel mode, we don't
+dnl want malloc, no matter what: pick either heapmem or tlsf, defaults
+dnl to heapmem. Force switch to malloc over the Mercury core in debug
+dnl mode, to ease debugging with valgrind, instrumented glibc etc.
+
+if test $rtcore_type = cobalt -o x$debug_mode = x; then
+   case $localmem_allocator in
+   heapmem)
+	    AC_DEFINE(CONFIG_XENO_HEAPMEM,1,[config])
+	    use_heapmem=y
+	    use_tlsf=
+   	    ;;
+   tlsf)    
+   	    AC_DEFINE(CONFIG_XENO_TLSF,1,[config])
+	    use_tlsf=y
+	    use_heapmem=
+   	    ;;
+   esac
+else
+	use_heapmem=
+	use_tlsf=
+        AC_MSG_WARN([using malloc() for private memory in debug mode])
+fi
+AM_CONDITIONAL(XENO_TLSF,[test x$use_tlsf = xy])
+AM_CONDITIONAL(XENO_HEAPMEM,[test x$use_heapmem = xy])
+
+dnl Check for atomic builtins. For now we only check for the legacy
+dnl interface, i.e. __sync_*.
+
+AC_CACHE_CHECK([whether the compiler provides atomic builtins], ac_cv_atomic_builtins, [
+LIBS=
+AC_TRY_LINK([
+int atomic_sub(int i) { return __sync_sub_and_fetch(&i, 1); }
+int atomic_add(int i) { return __sync_add_and_fetch(&i, 1); }
+], [], ac_cv_atomic_builtins="yes")
+])
+if test "$ac_cv_atomic_builtins" != "yes"; then
+   AC_MSG_ERROR([compiler does not support atomic builtins])
+fi
+
+unset want_fortify
+AC_MSG_CHECKING(for fortify support)
+AC_ARG_ENABLE([fortify],
+	      AC_HELP_STRING([--enable-fortify],
+			     [Enable _FORTIFY_SOURCE]),
+	      [case "$enableval" in
+	      y | yes) want_fortify=yes;;
+	      *) want_fortify=no;;
+	      esac])
+AC_MSG_RESULT(${want_fortify:-autodetect})
+AC_CHECK_FUNC(__vfprintf_chk,
+      [AC_DEFINE(CONFIG_XENO_FORTIFY, 1,[config])],
+      [if test x"$want_fortify" = "xyes"; then
+      AC_MSG_ERROR([Fortify support enabled but not available in *libc])
+      fi])
+
+dnl Exported CFLAGS and LDFLAGS, shared with internal flags
+XENO_USER_APP_CFLAGS="-D_GNU_SOURCE -D_REENTRANT -fasynchronous-unwind-tables"
+XENO_USER_APP_LDFLAGS=
+
+if test x$use_registry = xy; then
+   XENO_FUSE_CFLAGS=$FUSE_CFLAGS
+   XENO_USER_APP_LDFLAGS="$XENO_USER_APP_LDFLAGS $FUSE_LIBS"
+fi
+
+dnl Internal CFLAGS and LDFLAGS, may be enhanced per-arch below
+XENO_USER_CFLAGS="$XENO_USER_APP_CFLAGS -pipe -fstrict-aliasing \
+-Wall -Wstrict-prototypes -Wmissing-prototypes -Wno-long-long \
+-Wno-unused-parameter -Wno-format-truncation -Werror -Wformat-security \
+-D__XENO__ -D__IN_XENO__"
+if test x$want_fortify = xyes -a x$debug_mode != xfull; then
+   XENO_USER_CFLAGS="$XENO_USER_CFLAGS -U_FORTIFY_SOURCE -D_FORTIFY_SOURCE=2"
+fi
+XENO_USER_LDADD="$XENO_USER_APP_LDFLAGS"
+
+dnl Add any flags forced on the command line, but only
+dnl for building apps.
+XENO_USER_APP_CFLAGS="$CFLAGS $XENO_USER_APP_CFLAGS"
+XENO_USER_APP_LDFLAGS="$LDFLAGS  $XENO_USER_APP_LDFLAGS"
+
+if test x$debug_mode = xpartial; then
+   XENO_USER_CFLAGS="-g -O2 $XENO_USER_CFLAGS"
+elif test x$debug_mode = xfull; then
+   XENO_USER_CFLAGS="-g -O0 $XENO_USER_CFLAGS"
+elif test x$debug_symbols = xy; then
+   XENO_USER_CFLAGS="-g -O2 $XENO_USER_CFLAGS"
+else
+   XENO_USER_CFLAGS="-O2 $XENO_USER_CFLAGS"
+fi
+
+if test x$use_assert = x; then
+   XENO_USER_CFLAGS="-DNDEBUG $XENO_USER_CFLAGS"
+fi
+
+XENO_USER_CFLAGS_STDLIB="$XENO_USER_CFLAGS"
+XENO_USER_CFLAGS="$XENO_USER_CFLAGS -I$topdir/include/$rtcore_type"
+
+AC_MSG_CHECKING([whether ld supports @file])
+AC_CACHE_VAL(ac_cv_ld_file_option,
+  AC_LANG_SAVE
+  AC_LANG_C
+  save_LDFLAGS="$LDFLAGS"
+  [LDFLAGS="-Wl,@/dev/null"]
+  AC_LINK_IFELSE([AC_LANG_SOURCE([main(){}])],
+    [ac_cv_ld_file_option=yes],
+    [ac_cv_ld_file_option=no])
+  LDFLAGS="$save_LDFLAGS"
+  AC_LANG_RESTORE)
+AC_MSG_RESULT(${ac_cv_ld_file_option:-no})
+LD_FILE_OPTION=$ac_cv_ld_file_option
+AC_SUBST(LD_FILE_OPTION)
+
+AC_MSG_CHECKING(whether to enable dlopening of Xenomai libraries)
+AC_ARG_ENABLE(dlopen-libs,
+	AC_HELP_STRING([--enable-dlopen-libs], [Allow dynamic loading of Xenomai libraries]),
+	[case "$enableval" in
+	y | yes) CONFIG_XENO_LIBS_DLOPEN=y ;;
+	*) CONFIG_XENO_LIBS_DLOPEN=$enableval ;;
+	esac])
+AC_MSG_RESULT(${CONFIG_XENO_LIBS_DLOPEN:-no})
+if test x$CONFIG_XENO_LIBS_DLOPEN = xy; then
+	AC_DEFINE(CONFIG_XENO_LIBS_DLOPEN,1,[config])
+	AC_DEFINE_UNQUOTED(CONFIG_XENO_TLS_MODEL,"global-dynamic",[TLS model])
+	XENO_LIB_LDFLAGS="-Wl,-z -Wl,nodelete"
+else
+	AC_DEFINE_UNQUOTED(CONFIG_XENO_TLS_MODEL,"initial-exec",[TLS model])
+	XENO_LIB_LDFLAGS="-Wl,-z -Wl,nodlopen"
+fi
+AM_CONDITIONAL(CONFIG_XENO_LIBS_DLOPEN,[test x$CONFIG_XENO_LIBS_DLOPEN = xy])
+
+AC_MSG_CHECKING(whether to enable TLS support)
+AC_ARG_ENABLE([tls],
+	    AC_HELP_STRING([--enable-tls],
+			   [Enable thread local storage]),
+	    [use_tls=$enableval])
+AC_MSG_RESULT($use_tls)
+
+dnl Check whether the compiler supports the __thread keyword.
+if test "x$use_tls" != xno; then
+	AC_CACHE_CHECK([for __thread keyword], libc_cv_gcc_tls,
+	[cat > conftest.c <<\EOF
+__thread int a __attribute__ ((tls_model ("initial-exec"))) = 42;
+__thread int b __attribute__ ((tls_model ("global-dynamic"))) = 12;
+EOF
+	if AC_TRY_COMMAND([${CC-cc} $CFLAGS $CPPFLAGS -c -Werror conftest.c >&AS_MESSAGE_LOG_FD]); then
+		libc_cv_gcc_tls=yes
+	else
+		libc_cv_gcc_tls=no
+	fi
+	rm -f conftest*])
+	if test "$libc_cv_gcc_tls" = yes; then
+		AC_DEFINE(HAVE_TLS,1,[config])
+	fi
+fi
+
+AC_MSG_CHECKING(location for test executables)
+AC_ARG_WITH(testdir,
+    AS_HELP_STRING([--with-testdir=<test-exec-dir>],[location for test executables (defaults to $bindir)]),
+    [
+	case "$withval" in
+	"" | y | ye | yes | n | no)
+	    AC_MSG_ERROR([You must supply an argument to --with-testdir])
+	  ;;
+	esac
+	XENO_TEST_DIR="$withval"
+    ], [XENO_TEST_DIR=$bindir])
+AC_MSG_RESULT($XENO_TEST_DIR)
+
+demodir='${exec_prefix}/demo'
+AC_MSG_CHECKING(location for demo programs)
+AC_ARG_WITH(demodir,
+    AS_HELP_STRING([--with-demodir=<demo-program-dir>],[location for demo programs (defaults to $prefix/demo)]),
+    [
+	case "$withval" in
+	"" | y | ye | yes | n | no)
+	    AC_MSG_ERROR([You must supply an argument to --with-demodir])
+	  ;;
+	esac
+	XENO_DEMO_DIR="$withval"
+    ], [XENO_DEMO_DIR=$demodir])
+AC_MSG_RESULT($XENO_DEMO_DIR)
+
+AC_MSG_CHECKING([for test source generation])
+AC_RUN_IFELSE([AC_LANG_PROGRAM([[ ]], [[ ]])],
+    [AC_MSG_RESULT(ok)], [AC_MSG_RESULT(failed)], [AC_MSG_RESULT(untestable)])
+
+dnl CAUTION: We need to have the CONFIG_XENO_XX symbols always
+dnl defined when the configuration header is read, but we want the
+dnl Autoconf-produced symbols to be defined only when compiling
+dnl Xenomai. This way, we won't pollute the namespace with the latter
+dnl when our configuration header is indirectly included by a client
+dnl application. To achieve this, we ask autoheader to produce the
+dnl following header structure:
+dnl #define CONFIG_XX
+dnl #define CONFIG_XX ...
+dnl #ifdef __IN_XENO__
+dnl <Autoconf-defined symbols>
+dnl #endif /* __IN_XENO__ */
+dnl This is quite a hack since we have to rely on the fact that
+dnl all Autoconf-generated symbols are lexicographically sorted
+dnl after CONFIG_XENO_XX ones, but, well...
+dnl Use a key which will cause the verbatim string to be put after
+dnl all CONFIG_XENO_XX symbols, but still before any Autoconf-generated
+dnl symbol, hence CONFIG_XENO___.
+AH_VERBATIM(CONFIG_XENO___,[#ifdef __IN_XENO__])
+
+dnl Now we can close the conditional section, right after all
+dnl Autoconf-generated symbols have been listed.
+AH_BOTTOM([#endif /* __IN_XENO__ */])
+
+if test $rtcore_type = cobalt; then
+   XENO_USER_CFLAGS="-I$topdir/lib/cobalt/arch/$target_cpu_arch/include -I$topdir/kernel/cobalt/arch/$target_cpu_arch/include $XENO_USER_CFLAGS"
+   XENO_COBALT_CFLAGS="$XENO_USER_CFLAGS"
+   case "$build_for" in
+    i*86*-*) XENO_COBALT_CFLAGS="$XENO_COBALT_CFLAGS -fno-omit-frame-pointer";;
+    esac
+
+dnl Build wrapping information. XENO_POSIX_WRAPPERS lists all wrapping
+dnl directives in a format the linker understands, for building the
+dnl in-tree executables which require POSIX symbol wrapping.
+
+   modechk_wrappers="$topdir/lib/cobalt/modechk.wrappers"
+   cobalt_wrappers="$topdir/lib/cobalt/cobalt.wrappers"
+   if [[ $ac_cv_ld_file_option = yes ]]; then
+	XENO_POSIX_WRAPPERS="-Wl,@$modechk_wrappers -Wl,@$cobalt_wrappers"
+   else
+	XENO_POSIX_WRAPPERS=`cat $modechk_wrappers $cobalt_wrappers | \
+			while read wrap_option symbol ; do \
+				echo -n "-Wl,$wrap_option,$symbol " ; \
+			done`
+   fi
+
+   AC_SUBST(XENO_POSIX_WRAPPERS)
+   AC_SUBST([CONFIG_STATUS_DEPENDENCIES], ["$modechk_wrappers $cobalt_wrappers"])
+fi
+
+dnl Multi-library support.
+AC_MSG_CHECKING([whether to enable soname suffix for libraries])
+AC_ARG_ENABLE([so-suffix],
+   [AS_HELP_STRING([--enable-so-suffix],
+      [enable soname suffix (for Mercury only)])],
+   [enable_so_suffix=$enableval],
+   [enable_so_suffix="no"])
+AC_MSG_RESULT(${enable_so_suffix})
+if test "$enable_so_suffix" = "yes"; then
+   if test "$rtcore_type" != mercury; then
+      AC_MSG_ERROR([soname suffix is only allowed for Mercury core])
+   else
+      CORE="_$rtcore_type"
+   fi
+fi
+
+dnl
+dnl Build the Makefiles
+dnl
+
+XENO_AUTOINIT_LDFLAGS='$(top_builddir)/lib/boilerplate/init/bootstrap-internal.o'" -Wl,--wrap=main -Wl,--dynamic-list=$topdir/scripts/dynlist.ld"
+AC_SUBST(XENO_AUTOINIT_LDFLAGS)
+
+XENO_CORE_LDADD="\$(top_builddir)/lib/$rtcore_type/lib${rtcore_type}.la"
+if test $rtcore_type = cobalt; then
+   XENO_CORE_LDADD="$XENO_CORE_LDADD \$(top_builddir)/lib/cobalt/libmodechk.la"
+fi
+AC_SUBST(XENO_CORE_LDADD)
+
+AC_SUBST(DOXYGEN_SHOW_INCLUDE_FILES)
+AC_SUBST(DOXYGEN_HAVE_DOT)
+AC_SUBST(DOXYGEN)
+AC_SUBST(LATEX_BATCHMODE)
+AC_SUBST(LATEX_MODE)
+
+AC_SUBST(ASCIIDODC)
+AC_SUBST(A2X)
+AC_SUBST(W3M)
+
+AC_SUBST(XENO_TARGET_CORE)
+AC_SUBST(XENO_TARGET_ARCH)
+AC_SUBST(XENO_BUILD_STRING)
+AC_SUBST(XENO_HOST_STRING)
+AC_SUBST(XENO_COBALT_CFLAGS)
+AC_SUBST(XENO_LIB_LDFLAGS)
+AC_SUBST(XENO_USER_CFLAGS)
+AC_SUBST(XENO_USER_CFLAGS_STDLIB)
+AC_SUBST(XENO_USER_LDADD)
+AC_SUBST(XENO_USER_APP_CFLAGS)
+AC_SUBST(XENO_USER_APP_LDFLAGS)
+AC_SUBST(XENO_FUSE_CFLAGS)
+AC_SUBST(XENO_TEST_DIR)
+AC_SUBST(XENO_DEMO_DIR)
+AC_SUBST(XENO_BUILD_COMPILER)
+AC_SUBST(XENO_BUILD_ARGS)
+AC_SUBST(CORE)
+
+AC_CONFIG_FILES([ \
+	Makefile \
+	config/Makefile \
+	scripts/Makefile \
+	scripts/xeno-config:scripts/xeno-config-$rtcore_type.in \
+	scripts/xeno \
+	lib/Makefile \
+	lib/boilerplate/Makefile \
+	lib/boilerplate/init/Makefile \
+	lib/cobalt/Makefile \
+	lib/cobalt/arch/Makefile \
+	lib/cobalt/arch/arm/Makefile \
+	lib/cobalt/arch/arm/include/Makefile \
+	lib/cobalt/arch/arm/include/asm/Makefile \
+	lib/cobalt/arch/arm/include/asm/xenomai/Makefile \
+	lib/cobalt/arch/arm64/Makefile \
+	lib/cobalt/arch/arm64/include/Makefile \
+	lib/cobalt/arch/arm64/include/asm/Makefile \
+	lib/cobalt/arch/arm64/include/asm/xenomai/Makefile \
+	lib/cobalt/arch/powerpc/Makefile \
+	lib/cobalt/arch/powerpc/include/Makefile \
+	lib/cobalt/arch/powerpc/include/asm/Makefile \
+	lib/cobalt/arch/powerpc/include/asm/xenomai/Makefile \
+	lib/cobalt/arch/x86/Makefile \
+	lib/cobalt/arch/x86/include/Makefile \
+	lib/cobalt/arch/x86/include/asm/Makefile \
+	lib/cobalt/arch/x86/include/asm/xenomai/Makefile \
+	lib/mercury/Makefile \
+	lib/copperplate/Makefile \
+	lib/copperplate/regd/Makefile \
+	lib/alchemy/Makefile \
+	lib/vxworks/Makefile \
+	lib/psos/Makefile \
+	lib/analogy/Makefile \
+	lib/smokey/Makefile \
+	lib/trank/Makefile \
+	testsuite/Makefile \
+	testsuite/latency/Makefile \
+	testsuite/switchtest/Makefile \
+	testsuite/gpiotest/Makefile \
+	testsuite/gpiobench/Makefile \
+	testsuite/spitest/Makefile \
+	testsuite/smokey/Makefile \
+	testsuite/smokey/arith/Makefile \
+	testsuite/smokey/dlopen/Makefile \
+	testsuite/smokey/sched-quota/Makefile \
+	testsuite/smokey/sched-tp/Makefile \
+	testsuite/smokey/setsched/Makefile \
+	testsuite/smokey/rtdm/Makefile \
+	testsuite/smokey/vdso-access/Makefile \
+	testsuite/smokey/posix-cond/Makefile \
+	testsuite/smokey/posix-mutex/Makefile \
+	testsuite/smokey/posix-clock/Makefile \
+	testsuite/smokey/posix-fork/Makefile \
+	testsuite/smokey/posix-select/Makefile \
+	testsuite/smokey/xddp/Makefile \
+	testsuite/smokey/iddp/Makefile \
+	testsuite/smokey/bufp/Makefile \
+	testsuite/smokey/sigdebug/Makefile \
+	testsuite/smokey/timerfd/Makefile \
+	testsuite/smokey/tsc/Makefile \
+	testsuite/smokey/leaks/Makefile \
+	testsuite/smokey/memcheck/Makefile \
+	testsuite/smokey/memory-coreheap/Makefile \
+	testsuite/smokey/memory-heapmem/Makefile \
+	testsuite/smokey/memory-tlsf/Makefile \
+	testsuite/smokey/memory-pshared/Makefile \
+	testsuite/smokey/fpu-stress/Makefile \
+	testsuite/smokey/net_udp/Makefile \
+	testsuite/smokey/net_packet_dgram/Makefile \
+	testsuite/smokey/net_packet_raw/Makefile \
+	testsuite/smokey/net_common/Makefile \
+	testsuite/smokey/cpu-affinity/Makefile \
+	testsuite/smokey/gdb/Makefile \
+	testsuite/smokey/y2038/Makefile \
+	testsuite/clocktest/Makefile \
+	testsuite/xeno-test/Makefile \
+	utils/Makefile \
+	utils/hdb/Makefile \
+	utils/can/Makefile \
+	utils/analogy/Makefile \
+	utils/ps/Makefile \
+	utils/slackspot/Makefile \
+	utils/corectl/Makefile \
+	utils/autotune/Makefile \
+	utils/net/rtnet \
+	utils/net/rtnet.conf \
+	utils/net/Makefile \
+	utils/chkkconf/Makefile \
+	demo/Makefile \
+	demo/posix/Makefile \
+	demo/posix/cyclictest/Makefile \
+	demo/posix/cobalt/Makefile \
+	demo/alchemy/Makefile \
+	demo/alchemy/cobalt/Makefile \
+	include/Makefile \
+	include/cobalt/uapi/Makefile \
+	include/cobalt/uapi/asm-generic/Makefile \
+	include/cobalt/uapi/kernel/Makefile \
+	include/cobalt/Makefile \
+	include/cobalt/sys/Makefile \
+	include/cobalt/kernel/Makefile \
+	include/cobalt/kernel/rtdm/Makefile \
+	include/cobalt/kernel/rtdm/analogy/Makefile \
+	include/cobalt/boilerplate/Makefile \
+	include/rtdm/Makefile \
+	include/rtdm/uapi/Makefile \
+	include/mercury/Makefile \
+	include/mercury/boilerplate/Makefile \
+	include/boilerplate/Makefile \
+	include/copperplate/Makefile \
+	include/alchemy/Makefile \
+	include/vxworks/Makefile \
+	include/psos/Makefile \
+	include/smokey/Makefile \
+	include/trank/Makefile \
+	include/trank/posix/Makefile \
+	include/trank/native/Makefile \
+	include/trank/rtdm/Makefile \
+	include/xenomai/Makefile \
+	doc/Makefile \
+	doc/doxygen/Makefile \
+	doc/doxygen/xeno3prm-common.conf \
+	doc/doxygen/xeno3prm-html.conf \
+	doc/doxygen/xeno3prm-latex.conf \
+	doc/gitdoc/Makefile \
+	doc/asciidoc/Makefile \
+	])
+
+AC_OUTPUT()
diff --git a/kernel/xenomai-v3.2.4/demo/Makefile.am b/kernel/xenomai-v3.2.4/demo/Makefile.am
new file mode 100644
index 0000000..fe5107c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/Makefile.am
@@ -0,0 +1,2 @@
+
+SUBDIRS = posix alchemy
diff --git a/kernel/xenomai-v3.2.4/demo/alchemy/Makefile.am b/kernel/xenomai-v3.2.4/demo/alchemy/Makefile.am
new file mode 100644
index 0000000..fed5c1c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/alchemy/Makefile.am
@@ -0,0 +1,32 @@
+demodir = @XENO_DEMO_DIR@
+
+demo_PROGRAMS = altency
+
+if XENO_COBALT
+SUBDIRS = cobalt
+endif
+
+cppflags = 				\
+	$(XENO_USER_CFLAGS)		\
+	-I$(top_srcdir)/include
+
+ldadd = 					\
+	../../lib/alchemy/libalchemy@CORE@.la		\
+	../../lib/copperplate/libcopperplate@CORE@.la	\
+	@XENO_CORE_LDADD@ 			\
+	@XENO_USER_LDADD@			\
+	-lpthread -lrt -lm
+
+altency_SOURCES = altency.c
+altency_CPPFLAGS = $(cppflags)
+altency_LDADD = $(ldadd) -lpthread -lrt -lm
+altency_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+# This demo mixes the Alchemy and Xenomai-enabled POSIX APIs over
+# Cobalt, so we ask for both set of flags. --posix along with
+# --ldflags will get us the linker switches causing the symbol
+# wrapping for open/read/write/ioctl and friends.  Over Mercury,
+# --posix is ignored since it's implicitly enabled.
+#
+# CFLAGS =  $(shell xeno-config --alchemy --posix --cflags)
+# LDFLAGS = $(shell xeno-config --alchemy --posix --ldflags)
diff --git a/kernel/xenomai-v3.2.4/demo/alchemy/altency.c b/kernel/xenomai-v3.2.4/demo/alchemy/altency.c
new file mode 100644
index 0000000..e7c31d7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/alchemy/altency.c
@@ -0,0 +1,699 @@
+/*
+ * The alternate latency measurement program based on the Alchemy API.
+ *
+ * Licensed under the LGPL v2.1.
+ */
+#include <stdlib.h>
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <sched.h>
+#include <time.h>
+#include <unistd.h>
+#include <signal.h>
+#include <alchemy/task.h>
+#include <alchemy/timer.h>
+#include <alchemy/sem.h>
+#include <rtdm/testing.h>
+#include <boilerplate/trace.h>
+#include <xenomai/init.h>
+
+RT_TASK latency_task, display_task;
+
+RT_SEM display_sem;
+
+#define TEN_MILLIONS    10000000
+
+unsigned max_relaxed;
+int32_t minjitter, maxjitter, avgjitter;
+int32_t gminjitter = TEN_MILLIONS, gmaxjitter = -TEN_MILLIONS, goverrun = 0;
+int64_t gavgjitter = 0;
+
+long long period_ns = 0;
+int test_duration = 0;		/* sec of testing, via -T <sec>, 0 is inf */
+int data_lines = 21;		/* data lines per header line, -l <lines> to change */
+int quiet = 0;			/* suppress printing of RTH, RTD lines when -T given */
+int devfd = -1;
+int freeze_max = 0;
+int priority = T_HIPRIO;
+int stop_upon_switch = 0;
+sig_atomic_t sampling_relaxed = 0;
+
+#define USER_TASK       0
+#define KERNEL_TASK     1
+#define TIMER_HANDLER   2
+
+int test_mode = USER_TASK;
+const char *test_mode_names[] = {
+	"periodic user-mode task",
+	"in-kernel periodic task",
+	"in-kernel timer handler"
+};
+
+time_t test_start, test_end;	/* report test duration */
+int test_loops = 0;		/* outer loop count */
+
+/* Warmup time : in order to avoid spurious cache effects on low-end machines. */
+#define WARMUP_TIME 1
+#define HISTOGRAM_CELLS 300
+int histogram_size = HISTOGRAM_CELLS;
+int32_t *histogram_avg = NULL, *histogram_max = NULL, *histogram_min = NULL;
+
+char *do_gnuplot = NULL;
+int do_histogram = 0, do_stats = 0, finished = 0;
+int bucketsize = 1000;		/* default = 1000ns, -B <size> to override */
+
+#define need_histo() (do_histogram || do_stats || do_gnuplot)
+
+static inline void add_histogram(int32_t *histogram, int32_t addval)
+{
+	/* bucketsize steps */
+	int inabs = (addval >= 0 ? addval : -addval) / bucketsize;
+	histogram[inabs < histogram_size ? inabs : histogram_size - 1]++;
+}
+
+static void latency(void *cookie)
+{
+	RTIME expected_ns, start_ns, fault_threshold;
+	unsigned int old_relaxed = 0, new_relaxed;
+	int ret, count, nsamples, warmup = 1;
+	int32_t minj, maxj, dt, overrun, sumj;
+	unsigned long ov;
+
+	fault_threshold = CONFIG_XENO_DEFAULT_PERIOD;
+	nsamples = (long long)ONE_BILLION / period_ns;
+	start_ns = rt_timer_read() + 1000000; /* 1ms from now */
+	expected_ns = start_ns;
+
+	ret = rt_task_set_periodic(NULL, start_ns, period_ns);
+	if (ret) {
+		fprintf(stderr, "altency: failed to set periodic, code %d\n",
+			ret);
+		return;
+	}
+
+	for (;;) {
+		minj = TEN_MILLIONS;
+		maxj = -TEN_MILLIONS;
+		overrun = 0;
+		test_loops++;
+
+		for (count = sumj = 0; count < nsamples; count++) {
+			ret = rt_task_wait_period(&ov);
+			dt = (int32_t)(rt_timer_read() - expected_ns);
+			new_relaxed = sampling_relaxed;
+			if (dt > maxj) {
+				if (new_relaxed != old_relaxed
+				    && dt > fault_threshold)
+					max_relaxed +=
+						new_relaxed - old_relaxed;
+				maxj = dt;
+			}
+			old_relaxed = new_relaxed;
+			if (dt < minj)
+				minj = dt;
+			sumj += dt;
+
+			if (ret) {
+				if (ret != -ETIMEDOUT) {
+					fprintf(stderr,
+						"altency: wait period failed, code %d\n",
+						ret);
+					exit(EXIT_FAILURE); /* Timer stopped. */
+				}
+				overrun += ov;
+				expected_ns += period_ns * ov;
+			}
+			expected_ns += period_ns;
+
+			if (freeze_max && (dt > gmaxjitter)
+			    && !(finished || warmup)) {
+				xntrace_user_freeze(dt, 0);
+				gmaxjitter = dt;
+			}
+
+			if (!(finished || warmup) && need_histo())
+				add_histogram(histogram_avg, dt);
+		}
+
+		if (!warmup) {
+			if (!finished && need_histo()) {
+				add_histogram(histogram_max, maxj);
+				add_histogram(histogram_min, minj);
+			}
+
+			minjitter = minj;
+			if (minj < gminjitter)
+				gminjitter = minj;
+
+			maxjitter = maxj;
+			if (maxj > gmaxjitter)
+				gmaxjitter = maxj;
+
+			avgjitter = sumj / nsamples;
+			gavgjitter += avgjitter;
+			goverrun += overrun;
+			rt_sem_v(&display_sem);
+		}
+
+		if (warmup && test_loops == WARMUP_TIME) {
+			test_loops = 0;
+			warmup = 0;
+		}
+	}
+}
+
+static void display(void *cookie)
+{
+	char sem_name[16];
+	int ret, n = 0;
+	time_t start;
+
+	if (test_mode == USER_TASK) {
+		snprintf(sem_name, sizeof(sem_name), "dispsem-%d", getpid());
+		ret = rt_sem_create(&display_sem, sem_name, 0, S_FIFO);
+		if (ret) {
+			fprintf(stderr,
+				"altency: cannot create semaphore: %s\n",
+				strerror(-ret));
+			return;
+		}
+
+	} else {
+		struct rttst_tmbench_config config;
+
+		if (test_mode == KERNEL_TASK)
+			config.mode = RTTST_TMBENCH_TASK;
+		else
+			config.mode = RTTST_TMBENCH_HANDLER;
+
+		config.period = period_ns;
+		config.priority = priority;
+		config.warmup_loops = WARMUP_TIME;
+		config.histogram_size = need_histo() ? histogram_size : 0;
+		config.histogram_bucketsize = bucketsize;
+		config.freeze_max = freeze_max;
+
+		ret = ioctl(devfd, RTTST_RTIOC_TMBENCH_START, &config);
+		if (ret) {
+			fprintf(stderr,
+				"altency: failed to start in-kernel timer benchmark, code %d\n",
+				ret);
+			return;
+		}
+	}
+
+	time(&start);
+
+	if (WARMUP_TIME)
+		printf("warming up...\n");
+
+	if (quiet)
+		fprintf(stderr, "running quietly for %d seconds\n",
+			test_duration);
+
+	for (;;) {
+		int32_t minj, gminj, maxj, gmaxj, avgj;
+
+		if (test_mode == USER_TASK) {
+			ret = rt_sem_p(&display_sem, TM_INFINITE);
+			if (ret) {
+				if (ret != -EIDRM)
+					fprintf(stderr,
+						"altency: failed to pend on semaphore, code %d\n",
+						ret);
+
+				return;
+			}
+
+			minj = minjitter;
+			gminj = gminjitter;
+			avgj = avgjitter;
+			maxj = maxjitter;
+			gmaxj = gmaxjitter;
+
+		} else {
+			struct rttst_interm_bench_res result;
+
+			ret = ioctl(devfd, RTTST_RTIOC_INTERM_BENCH_RES, &result);
+			if (ret) {
+				if (ret != -EIDRM)
+					fprintf(stderr,
+					"altency: failed to call RTTST_RTIOC_INTERM_BENCH_RES, %m\n");
+
+				return;
+			}
+
+			minj = result.last.min;
+			gminj = result.overall.min;
+			avgj = result.last.avg;
+			maxj = result.last.max;
+			gmaxj = result.overall.max;
+			goverrun = result.overall.overruns;
+		}
+
+		if (!quiet) {
+			if (data_lines && (n++ % data_lines) == 0) {
+				time_t now, dt;
+				time(&now);
+				dt = now - start - WARMUP_TIME;
+				printf
+				    ("RTT|  %.2ld:%.2ld:%.2ld  (%s, %Ld us period, "
+				     "priority %d)\n", dt / 3600,
+				     (dt / 60) % 60, dt % 60,
+				     test_mode_names[test_mode],
+				     period_ns / 1000, priority);
+				printf("RTH|%11s|%11s|%11s|%8s|%6s|%11s|%11s\n",
+				       "----lat min", "----lat avg",
+				       "----lat max", "-overrun", "---msw",
+				       "---lat best", "--lat worst");
+			}
+			printf("RTD|%11.3f|%11.3f|%11.3f|%8d|%6u|%11.3f|%11.3f\n",
+			       (double)minj / 1000,
+			       (double)avgj / 1000,
+			       (double)maxj / 1000,
+			       goverrun,
+			       max_relaxed,
+			       (double)gminj / 1000, (double)gmaxj / 1000);
+		}
+	}
+}
+
+static double dump_histogram(int32_t *histogram, char *kind)
+{
+	int n, total_hits = 0;
+	double avg = 0;		/* used to sum hits 1st */
+
+	if (do_histogram)
+		printf("---|--param|----range-|--samples\n");
+
+	for (n = 0; n < histogram_size; n++) {
+		int32_t hits = histogram[n];
+
+		if (hits) {
+			total_hits += hits;
+			avg += n * hits;
+			if (do_histogram)
+				printf("HSD|    %s| %3d -%3d | %8d\n",
+				       kind, n, n + 1, hits);
+		}
+	}
+
+	avg /= total_hits;	/* compute avg, reuse variable */
+
+	return avg;
+}
+
+static void dump_histo_gnuplot(int32_t *histogram)
+{
+	unsigned start, stop;
+	FILE *f;
+	int n;
+
+	f = fopen(do_gnuplot, "w");
+	if (!f)
+		return;
+
+	for (n = 0; n < histogram_size && histogram[n] == 0L; n++)
+		;
+	start = n;
+
+	for (n = histogram_size - 1; n >= 0 && histogram[n] == 0L; n--)
+		;
+	stop = n;
+
+	fprintf(f, "%g 1\n", start * bucketsize / 1000.0);
+	for (n = start; n <= stop; n++)
+		fprintf(f, "%g %d\n",
+			(n + 0.5) * bucketsize / 1000.0, histogram[n] + 1);
+	fprintf(f, "%g 1\n", (stop + 1) * bucketsize / 1000.0);
+
+	fclose(f);
+}
+
+static void dump_stats(int32_t *histogram, char *kind, double avg)
+{
+	int n, total_hits = 0;
+	double variance = 0;
+
+	for (n = 0; n < histogram_size; n++) {
+		int32_t hits = histogram[n];
+
+		if (hits) {
+			total_hits += hits;
+			variance += hits * (n - avg) * (n - avg);
+		}
+	}
+
+	/* compute std-deviation (unbiased form) */
+	if (total_hits > 1) {
+		variance /= total_hits - 1;
+		variance = sqrt(variance);
+	} else
+		variance = 0;
+
+	printf("HSS|    %s| %9d| %10.3f| %10.3f\n",
+	       kind, total_hits, avg, variance);
+}
+
+static void dump_hist_stats(void)
+{
+	double minavg, maxavg, avgavg;
+
+	/* max is last, where its visible w/o scrolling */
+	minavg = dump_histogram(histogram_min, "min");
+	avgavg = dump_histogram(histogram_avg, "avg");
+	maxavg = dump_histogram(histogram_max, "max");
+
+	printf("HSH|--param|--samples-|--average--|---stddev--\n");
+
+	dump_stats(histogram_min, "min", minavg);
+	dump_stats(histogram_avg, "avg", avgavg);
+	dump_stats(histogram_max, "max", maxavg);
+
+	if (do_gnuplot)
+		dump_histo_gnuplot(histogram_avg);
+}
+
+static void cleanup(void)
+{
+	time_t actual_duration;
+	int32_t gmaxj, gminj, gavgj;
+
+	if (test_mode == USER_TASK) {
+		rt_sem_delete(&display_sem);
+
+		gavgjitter /= (test_loops > 1 ? test_loops : 2) - 1;
+
+		gminj = gminjitter;
+		gmaxj = gmaxjitter;
+		gavgj = gavgjitter;
+	} else {
+		struct rttst_overall_bench_res overall;
+
+		overall.histogram_min = histogram_min;
+		overall.histogram_max = histogram_max;
+		overall.histogram_avg = histogram_avg;
+		ioctl(devfd, RTTST_RTIOC_TMBENCH_STOP, &overall);
+		gminj = overall.result.min;
+		gmaxj = overall.result.max;
+		gavgj = overall.result.avg;
+		goverrun = overall.result.overruns;
+	}
+
+	if (devfd >= 0)
+		close(devfd);
+
+	if (need_histo())
+		dump_hist_stats();
+
+	time(&test_end);
+	actual_duration = test_end - test_start - WARMUP_TIME;
+	if (!test_duration)
+		test_duration = actual_duration;
+
+	printf
+	    ("---|-----------|-----------|-----------|--------|------|-------------------------\n"
+	     "RTS|%11.3f|%11.3f|%11.3f|%8d|%6u|    %.2ld:%.2ld:%.2ld/%.2d:%.2d:%.2d\n",
+	     (double)gminj / 1000, (double)gavgj / 1000, (double)gmaxj / 1000,
+	     goverrun, max_relaxed, actual_duration / 3600, (actual_duration / 60) % 60,
+	     actual_duration % 60, test_duration / 3600,
+	     (test_duration / 60) % 60, test_duration % 60);
+	if (max_relaxed > 0)
+		printf(
+"Warning! some latency peaks may have been due to involuntary mode switches.\n"
+"Please contact xenomai@xenomai.org\n");
+
+	if (histogram_avg)
+		free(histogram_avg);
+	if (histogram_max)
+		free(histogram_max);
+	if (histogram_min)
+		free(histogram_min);
+
+	exit(0);
+}
+
+static void faulthand(int sig)
+{
+	xntrace_user_freeze(0, 1);
+	signal(sig, SIG_DFL);
+	__STD(kill(getpid(), sig));
+}
+
+#ifdef CONFIG_XENO_COBALT
+
+static const char *reason_str[] = {
+	[SIGDEBUG_UNDEFINED] = "received SIGDEBUG for unknown reason",
+	[SIGDEBUG_MIGRATE_SIGNAL] = "received signal",
+	[SIGDEBUG_MIGRATE_SYSCALL] = "invoked syscall",
+	[SIGDEBUG_MIGRATE_FAULT] = "triggered fault",
+	[SIGDEBUG_MIGRATE_PRIOINV] = "affected by priority inversion",
+	[SIGDEBUG_NOMLOCK] = "process memory not locked",
+	[SIGDEBUG_WATCHDOG] = "watchdog triggered (period too short?)",
+	[SIGDEBUG_LOCK_BREAK] = "scheduler lock break",
+};
+
+static void sigdebug(int sig, siginfo_t *si, void *context)
+{
+	const char fmt[] = "%s, aborting.\n"
+		"(enabling CONFIG_XENO_OPT_DEBUG_TRACE_RELAX may help)\n";
+	unsigned int reason = sigdebug_reason(si);
+	int n __attribute__ ((unused));
+	static char buffer[256];
+
+	if (!stop_upon_switch) {
+		++sampling_relaxed;
+		return;
+	}
+
+	if (reason > SIGDEBUG_WATCHDOG)
+		reason = SIGDEBUG_UNDEFINED;
+
+	switch(reason) {
+	case SIGDEBUG_UNDEFINED:
+	case SIGDEBUG_NOMLOCK:
+	case SIGDEBUG_WATCHDOG:
+		n = snprintf(buffer, sizeof(buffer), "altency: %s\n",
+			     reason_str[reason]);
+		n = write(STDERR_FILENO, buffer, n);
+		exit(EXIT_FAILURE);
+	}
+
+	n = snprintf(buffer, sizeof(buffer), fmt, reason_str[reason]);
+	n = write(STDERR_FILENO, buffer, n);
+	signal(sig, SIG_DFL);
+	__STD(kill(getpid(), sig));
+}
+
+#endif /* CONFIG_XENO_COBALT */
+
+void application_usage(void)
+{
+        fprintf(stderr, "usage: %s [options]:\n", get_program_name());
+	fprintf(stderr,
+		"-h                              print histograms of min, avg, max latencies\n"
+		"-g <file>                       dump histogram to <file> in gnuplot format\n"
+		"-s                              print statistics of min, avg, max latencies\n"
+		"-H <histogram-size>             default = 200, increase if your last bucket is full\n"
+		"-B <bucket-size>                default = 1000ns, decrease for more resolution\n"
+		"-p <period_us>                  sampling period\n"
+		"-l <data-lines per header>      default=21, 0 to supress headers\n"
+		"-T <test_duration_seconds>      default=0, so ^C to end\n"
+		"-q                              supresses RTD, RTH lines if -T is used\n"
+		"-D <testing_device_no>          number of testing device, default=0\n"
+		"-t <test_mode>                  0=user task (default), 1=kernel task, 2=timer IRQ\n"
+		"-f                              freeze trace for each new max latency\n"
+		"-c <cpu>                        pin measuring task down to given CPU\n"
+		"-P <priority>                   task priority (test mode 0 and 1 only)\n"
+		"-b                              break upon mode switch\n"
+		);
+}
+
+int main(int argc, char *const *argv)
+{
+	struct sigaction sa __attribute__((unused));
+	int c, ret, sig, cpu = 0;
+	char task_name[32];
+	cpu_set_t cpus;
+	sigset_t mask;
+
+	while ((c = getopt(argc, argv, "g:hp:l:T:qH:B:sD:t:fc:P:b")) != EOF)
+		switch (c) {
+		case 'g':
+			do_gnuplot = strdup(optarg);
+			break;
+		case 'h':
+			do_histogram = 1;
+			break;
+		case 's':
+			do_stats = 1;
+			break;
+		case 'H':
+			histogram_size = atoi(optarg);
+			break;
+		case 'B':
+			bucketsize = atoi(optarg);
+			break;
+		case 'p':
+			period_ns = atoi(optarg) * 1000LL;
+			if (period_ns > ONE_BILLION) {
+				fprintf(stderr, "altency: invalid period (> 1s).\n");
+				exit(2);
+			}
+			break;
+		case 'l':
+			data_lines = atoi(optarg);
+			break;
+		case 'T':
+			test_duration = atoi(optarg);
+			alarm(test_duration + WARMUP_TIME);
+			break;
+		case 'q':
+			quiet = 1;
+			break;
+		case 't':
+			test_mode = atoi(optarg);
+			break;
+		case 'f':
+			freeze_max = 1;
+			break;
+		case 'c':
+			cpu = atoi(optarg);
+			if (cpu < 0 || cpu >= CPU_SETSIZE) {
+				fprintf(stderr, "altency: invalid CPU #%d\n", cpu);
+				return 1;
+			}
+			break;
+		case 'P':
+			priority = atoi(optarg);
+			break;
+		case 'b':
+			stop_upon_switch = 1;
+			break;
+		default:
+			xenomai_usage();
+			exit(2);
+		}
+
+	if (!test_duration && quiet) {
+		fprintf(stderr,
+			"altency: -q only works if -T has been given.\n");
+		quiet = 0;
+	}
+
+	if ((test_mode < USER_TASK) || (test_mode > TIMER_HANDLER)) {
+		fprintf(stderr, "altency: invalid test mode.\n");
+		exit(2);
+	}
+
+	time(&test_start);
+
+	histogram_avg = calloc(histogram_size, sizeof(int32_t));
+	histogram_max = calloc(histogram_size, sizeof(int32_t));
+	histogram_min = calloc(histogram_size, sizeof(int32_t));
+
+	if (!(histogram_avg && histogram_max && histogram_min))
+		cleanup();
+
+	if (period_ns == 0)
+		period_ns = CONFIG_XENO_DEFAULT_PERIOD;	/* ns */
+
+	if (priority <= T_LOPRIO)
+		priority = T_LOPRIO + 1;
+	else if (priority > T_HIPRIO)
+		priority = T_HIPRIO;
+
+	sigemptyset(&mask);
+	sigaddset(&mask, SIGINT);
+	sigaddset(&mask, SIGTERM);
+	sigaddset(&mask, SIGHUP);
+	sigaddset(&mask, SIGALRM);
+	pthread_sigmask(SIG_BLOCK, &mask, NULL);
+
+#ifdef CONFIG_XENO_COBALT
+	sigemptyset(&sa.sa_mask);
+	sa.sa_sigaction = sigdebug;
+	sa.sa_flags = SA_SIGINFO;
+	sigaction(SIGDEBUG, &sa, NULL);
+#endif
+
+	if (freeze_max) {
+		/* If something goes wrong, we want to freeze the current
+		   trace path to help debugging. */
+		signal(SIGSEGV, faulthand);
+		signal(SIGBUS, faulthand);
+	}
+
+	setlinebuf(stdout);
+
+	printf("== Sampling period: %Ld us\n"
+	       "== Test mode: %s\n"
+	       "== All results in microseconds\n",
+	       period_ns / 1000, test_mode_names[test_mode]);
+
+	if (test_mode != USER_TASK) {
+		devfd = open("/dev/rtdm/timerbench", O_RDWR);
+		if (devfd < 0) {
+			fprintf(stderr,
+				"altency: failed to open timerbench device, %m\n"
+				"(modprobe xeno_timerbench?)\n");
+			return 0;
+		}
+	}
+
+	snprintf(task_name, sizeof(task_name), "alt-display-%d", getpid());
+	ret = rt_task_create(&display_task, task_name, 0, 0, 0);
+	if (ret) {
+		fprintf(stderr,
+			"altency: failed to create display task, code %d\n",
+			ret);
+		return 0;
+	}
+
+	ret = rt_task_start(&display_task, &display, NULL);
+	if (ret) {
+		fprintf(stderr,
+			"altency: failed to start display task, code %d\n",
+			ret);
+		return 0;
+	}
+
+	if (test_mode == USER_TASK) {
+		snprintf(task_name, sizeof(task_name), "alt-sampling-%d", getpid());
+		ret = rt_task_create(&latency_task, task_name, 0, priority,
+				     T_WARNSW);
+		if (ret) {
+			fprintf(stderr,
+				"altency: failed to create sampling task, code %d\n",
+				ret);
+			return 0;
+		}
+
+		CPU_ZERO(&cpus);
+		CPU_SET(cpu, &cpus);
+		ret = rt_task_set_affinity(&latency_task, &cpus);
+		if (ret) {
+			fprintf(stderr,
+				"altency: failed to set CPU affinity, code %d\n",
+				ret);
+			return 0;
+		}
+
+		ret = rt_task_start(&latency_task, latency, NULL);
+		if (ret) {
+			fprintf(stderr,
+				"altency: failed to start sampling task, code %d\n",
+				ret);
+			return 0;
+		}
+	}
+
+	__STD(sigwait(&mask, &sig));
+	finished = 1;
+
+	cleanup();
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/Makefile.am b/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/Makefile.am
new file mode 100644
index 0000000..ae2edd7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/Makefile.am
@@ -0,0 +1,22 @@
+demodir = @XENO_DEMO_DIR@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+demo_PROGRAMS = cross-link
+
+cppflags = 			\
+	$(XENO_USER_CFLAGS)	\
+	-I$(top_srcdir)/include
+
+ldadd = 					\
+	@XENO_AUTOINIT_LDFLAGS@			\
+	$(XENO_POSIX_WRAPPERS)			\
+	../../../lib/alchemy/libalchemy@CORE@.la 	\
+	../../../lib/copperplate/libcopperplate@CORE@.la \
+	@XENO_CORE_LDADD@			\
+	@XENO_USER_LDADD@			\
+	-lrt -lpthread -lm
+
+cross_link_SOURCES = cross-link.c
+cross_link_CPPFLAGS = $(cppflags)
+cross_link_LDADD = $(ldadd)
diff --git a/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/cross-link.c b/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/cross-link.c
new file mode 100644
index 0000000..9b1fb34
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/alchemy/cobalt/cross-link.c
@@ -0,0 +1,328 @@
+/*
+ * cross-link.c
+ *
+ * Userspace test program (Xenomai alchemy skin) for RTDM-based UART drivers
+ * Copyright 2005 by Joerg Langenberg <joergel75@gmx.net>
+ *
+ * Updates by Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#include <stdio.h>
+#include <signal.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <alchemy/task.h>
+#include <alchemy/timer.h>
+#include <rtdm/serial.h>
+
+#define MAIN_PREFIX   "main : "
+#define WTASK_PREFIX  "write_task: "
+#define RTASK_PREFIX  "read_task: "
+
+#define WRITE_FILE    "/dev/rtdm/rtser0"
+#define READ_FILE     "/dev/rtdm/rtser1"
+
+int read_fd  = -1;
+int write_fd = -1;
+
+#define STATE_FILE_OPENED         1
+#define STATE_TASK_CREATED        2
+
+unsigned int read_state = 0;
+unsigned int write_state = 0;
+
+/*                           --s-ms-us-ns */
+RTIME write_task_period_ns =    100000000llu;
+RT_TASK write_task;
+RT_TASK read_task;
+
+static const struct rtser_config read_config = {
+	.config_mask       = 0xFFFF,
+	.baud_rate         = 115200,
+	.parity            = RTSER_DEF_PARITY,
+	.data_bits         = RTSER_DEF_BITS,
+	.stop_bits         = RTSER_DEF_STOPB,
+	.handshake         = RTSER_DEF_HAND,
+	.fifo_depth        = RTSER_DEF_FIFO_DEPTH,
+	.rx_timeout        = RTSER_DEF_TIMEOUT,
+	.tx_timeout        = RTSER_DEF_TIMEOUT,
+	.event_timeout     = 1000000000, /* 1 s */
+	.timestamp_history = RTSER_RX_TIMESTAMP_HISTORY,
+	.event_mask        = RTSER_EVENT_RXPEND,
+};
+
+static const struct rtser_config write_config = {
+	.config_mask       = RTSER_SET_BAUD | RTSER_SET_TIMESTAMP_HISTORY,
+	.baud_rate         = 115200,
+	.timestamp_history = RTSER_DEF_TIMESTAMP_HISTORY,
+	/* the rest implicitly remains default */
+};
+
+static int close_file( int fd, char *name)
+{
+	int err, i=0;
+
+	do {
+		i++;
+		err = close(fd);
+		switch (err) {
+		case -EAGAIN:
+			printf(MAIN_PREFIX "%s -> EAGAIN (%d times)\n",
+			       name, i);
+			rt_task_sleep(50000); /* wait 50us */
+			break;
+		case 0:
+			printf(MAIN_PREFIX "%s -> closed\n", name);
+			break;
+		default:
+			printf(MAIN_PREFIX "%s -> %s\n", name,
+			       strerror(errno));
+			break;
+		}
+	} while (err == -EAGAIN && i < 10);
+
+	return err;
+}
+
+static void cleanup_all(void)
+{
+	if (read_state & STATE_FILE_OPENED) {
+		close_file(read_fd, READ_FILE" (read)");
+		read_state &= ~STATE_FILE_OPENED;
+	}
+
+	if (write_state & STATE_FILE_OPENED) {
+		close_file(write_fd, WRITE_FILE " (write)");
+		write_state &= ~STATE_FILE_OPENED;
+	}
+
+	if (write_state & STATE_TASK_CREATED) {
+		printf(MAIN_PREFIX "delete write_task\n");
+		rt_task_delete(&write_task);
+		write_state &= ~STATE_TASK_CREATED;
+	}
+
+	if (read_state & STATE_TASK_CREATED) {
+		printf(MAIN_PREFIX "delete read_task\n");
+		rt_task_delete(&read_task);
+		read_state &= ~STATE_TASK_CREATED;
+	}
+}
+
+static void catch_signal(int sig)
+{
+	cleanup_all();
+	printf(MAIN_PREFIX "exit\n");
+	return;
+}
+
+static void write_task_proc(void *arg)
+{
+	int err;
+	RTIME write_time;
+	ssize_t sz = sizeof(RTIME);
+	int written = 0;
+
+	err = rt_task_set_periodic(NULL, TM_NOW,
+				   rt_timer_ns2ticks(write_task_period_ns));
+	if (err) {
+		printf(WTASK_PREFIX "error on set periodic, %s\n",
+		       strerror(-err));
+		goto exit_write_task;
+	}
+
+	while (1) {
+		err = rt_task_wait_period(NULL);
+		if (err) {
+			printf(WTASK_PREFIX
+			       "error on rt_task_wait_period, %s\n",
+			       strerror(-err));
+			break;
+		}
+
+		write_time = rt_timer_read();
+
+		written = write(write_fd, &write_time, sz);
+		if (written < 0 ) {
+			printf(WTASK_PREFIX "error on write, %s\n",
+			       strerror(errno));
+			break;
+		} else if (written != sz) {
+			printf(WTASK_PREFIX "only %d / %zd byte transmitted\n",
+			       written, sz);
+			break;
+		}
+	}
+
+ exit_write_task:
+	if ((write_state & STATE_FILE_OPENED) &&
+	    close_file(write_fd, WRITE_FILE " (write)") == 0)
+		write_state &= ~STATE_FILE_OPENED;
+
+	printf(WTASK_PREFIX "exit\n");
+}
+
+static void read_task_proc(void *arg)
+{
+	int err;
+	int nr = 0;
+	RTIME read_time  = 0;
+	RTIME write_time = 0;
+	RTIME irq_time   = 0;
+	ssize_t sz = sizeof(RTIME);
+	int rd = 0;
+	struct rtser_event rx_event;
+
+	printf(" Nr |   write->irq    |    irq->read    |   write->read   |\n");
+	printf("-----------------------------------------------------------\n");
+
+	/*
+	 * We are in secondary mode now due to printf, the next
+	 * blocking Xenomai or driver call will switch us back
+	 * (here: RTSER_RTIOC_WAIT_EVENT).
+	 */
+
+	while (1) {
+		/* waiting for event */
+		err = ioctl(read_fd, RTSER_RTIOC_WAIT_EVENT, &rx_event);
+		if (err) {
+			printf(RTASK_PREFIX
+			       "error on RTSER_RTIOC_WAIT_EVENT, %s\n",
+			       strerror(errno));
+			if (err == -ETIMEDOUT)
+				continue;
+			break;
+		}
+
+		irq_time = rx_event.rxpend_timestamp;
+		rd = read(read_fd, &write_time, sz);
+		if (rd == sz) {
+			read_time = rt_timer_read();
+			printf("%3d |%16llu |%16llu |%16llu\n", nr,
+			       irq_time  - write_time,
+			       read_time - irq_time,
+			       read_time - write_time);
+			nr++;
+		} else if (rd < 0 ) {
+			printf(RTASK_PREFIX "error on read, code %s\n",
+			       strerror(errno));
+			break;
+		} else {
+			printf(RTASK_PREFIX "only %d / %zd byte received \n",
+			       rd, sz);
+			break;
+		}
+	}
+
+	if ((read_state & STATE_FILE_OPENED) &&
+	    close_file(read_fd, READ_FILE " (read)") == 0)
+		read_state &= ~STATE_FILE_OPENED;
+
+	printf(RTASK_PREFIX "exit\n");
+}
+
+int main(int argc, char* argv[])
+{
+	int err = 0;
+
+	signal(SIGTERM, catch_signal);
+	signal(SIGINT, catch_signal);
+
+	/* open rtser0 */
+	write_fd = open( WRITE_FILE, 0);
+	if (write_fd < 0) {
+		printf(MAIN_PREFIX "can't open %s (write), %s\n", WRITE_FILE,
+		       strerror(errno));
+		goto error;
+	}
+	write_state |= STATE_FILE_OPENED;
+	printf(MAIN_PREFIX "write-file opened\n");
+
+	/* writing write-config */
+	err = ioctl(write_fd, RTSER_RTIOC_SET_CONFIG, &write_config);
+	if (err) {
+		printf(MAIN_PREFIX "error while RTSER_RTIOC_SET_CONFIG, %s\n",
+		       strerror(errno));
+		goto error;
+	}
+	printf(MAIN_PREFIX "write-config written\n");
+
+	/* open rtser1 */
+	read_fd = open( READ_FILE, 0 );
+	if (read_fd < 0) {
+		printf(MAIN_PREFIX "can't open %s (read), %s\n", READ_FILE,
+		       strerror(errno));
+		goto error;
+	}
+	read_state |= STATE_FILE_OPENED;
+	printf(MAIN_PREFIX "read-file opened\n");
+
+	/* writing read-config */
+	err = ioctl(read_fd, RTSER_RTIOC_SET_CONFIG, &read_config);
+	if (err) {
+		printf(MAIN_PREFIX "error while ioctl, %s\n",
+		       strerror(errno));
+		goto error;
+	}
+	printf(MAIN_PREFIX "read-config written\n");
+
+	/* create write_task */
+	err = rt_task_create(&write_task, "write_task", 0, 50, 0);
+	if (err) {
+		printf(MAIN_PREFIX "failed to create write_task, %s\n",
+		       strerror(-err));
+		goto error;
+	}
+	write_state |= STATE_TASK_CREATED;
+	printf(MAIN_PREFIX "write-task created\n");
+
+	/* create read_task */
+	err = rt_task_create(&read_task, "read_task", 0, 51, 0);
+	if (err) {
+		printf(MAIN_PREFIX "failed to create read_task, %s\n",
+		       strerror(-err));
+		goto error;
+	}
+	read_state |= STATE_TASK_CREATED;
+	printf(MAIN_PREFIX "read-task created\n");
+
+	/* start write_task */
+	printf(MAIN_PREFIX "starting write-task\n");
+	err = rt_task_start(&write_task, &write_task_proc, NULL);
+	if (err) {
+		printf(MAIN_PREFIX "failed to start write_task, %s\n",
+		       strerror(-err));
+		goto error;
+	}
+
+	/* start read_task */
+	printf(MAIN_PREFIX "starting read-task\n");
+	err = rt_task_start(&read_task,&read_task_proc,NULL);
+	if (err) {
+		printf(MAIN_PREFIX "failed to start read_task, %s\n",
+		       strerror(-err));
+		goto error;
+	}
+
+	for (;;)
+		pause();
+
+	return 0;
+
+ error:
+	cleanup_all();
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/Makefile.am b/kernel/xenomai-v3.2.4/demo/posix/Makefile.am
new file mode 100644
index 0000000..0dd66a5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/Makefile.am
@@ -0,0 +1,8 @@
+
+SUBDIRS = cyclictest
+
+if XENO_COBALT
+SUBDIRS += cobalt
+endif
+
+DIST_SUBDIRS = cyclictest cobalt
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/Makefile.am b/kernel/xenomai-v3.2.4/demo/posix/cobalt/Makefile.am
new file mode 100644
index 0000000..2a22967
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/Makefile.am
@@ -0,0 +1,76 @@
+demodir = @XENO_DEMO_DIR@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+demo_PROGRAMS = 	\
+	gpiopwm		\
+	bufp-label	\
+	bufp-readwrite	\
+	can_rtt		\
+	eth_p_all	\
+	iddp-label	\
+	iddp-sendrecv	\
+	xddp-echo	\
+	xddp-label	\
+	xddp-stream
+
+cppflags = 			\
+	$(XENO_USER_CFLAGS)	\
+	-I$(top_srcdir)/include
+
+ldflags = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+ldadd = 					\
+	 @XENO_CORE_LDADD@			\
+	 @XENO_USER_LDADD@			\
+	-lpthread -lrt
+
+gpiopwm_SOURCES = gpiopwm.c
+gpiopwm_CPPFLAGS = $(cppflags) -I$(top_srcdir)/include/rtdm/uapi
+gpiopwm_LDFLAGS = $(ldflags)
+gpiopwm_LDADD = $(ldadd)
+
+bufp_label_SOURCES = bufp-label.c
+bufp_label_CPPFLAGS = $(cppflags)
+bufp_label_LDFLAGS = $(ldflags)
+bufp_label_LDADD = $(ldadd)
+
+bufp_readwrite_SOURCES = bufp-readwrite.c
+bufp_readwrite_CPPFLAGS = $(cppflags)
+bufp_readwrite_LDFLAGS = $(ldflags)
+bufp_readwrite_LDADD = $(ldadd)
+
+can_rtt_SOURCES = can-rtt.c
+can_rtt_CPPFLAGS = $(cppflags)
+can_rtt_LDFLAGS = $(ldflags)
+can_rtt_LDADD = $(ldadd)
+
+eth_p_all_SOURCES = eth_p_all.c
+eth_p_all_CPPFLAGS = $(cppflags)
+eth_p_all_LDFLAGS = $(ldflags)
+eth_p_all_LDADD = $(ldadd)
+
+iddp_label_SOURCES = iddp-label.c
+iddp_label_CPPFLAGS = $(cppflags)
+iddp_label_LDFLAGS = $(ldflags)
+iddp_label_LDADD = $(ldadd)
+
+iddp_sendrecv_SOURCES = iddp-sendrecv.c
+iddp_sendrecv_CPPFLAGS = $(cppflags)
+iddp_sendrecv_LDFLAGS = $(ldflags)
+iddp_sendrecv_LDADD = $(ldadd)
+
+xddp_echo_SOURCES = xddp-echo.c
+xddp_echo_CPPFLAGS = $(cppflags)
+xddp_echo_LDFLAGS = $(ldflags)
+xddp_echo_LDADD = $(ldadd)
+
+xddp_label_SOURCES = xddp-label.c
+xddp_label_CPPFLAGS = $(cppflags)
+xddp_label_LDFLAGS = $(ldflags)
+xddp_label_LDADD = $(ldadd)
+
+xddp_stream_SOURCES = xddp-stream.c
+xddp_stream_CPPFLAGS = $(cppflags)
+xddp_stream_LDFLAGS = $(ldflags)
+xddp_stream_LDADD = $(ldadd)
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-label.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-label.c
new file mode 100644
index 0000000..1141c89
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-label.c
@@ -0,0 +1,221 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ *
+ * BUFP-based client/server demo, using the read(2)/write(2)
+ * system calls to exchange data over a socket.
+ *
+ * In this example, two sockets are created.  A server thread (reader)
+ * is bound to a real-time port and receives a stream of bytes sent to
+ * this port from a client thread (writer).
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <pthread.h>
+#include <errno.h>
+#include <rtdm/ipc.h>
+
+pthread_t svtid, cltid;
+
+#define BUFP_PORT_LABEL  "bufp-demo"
+
+static const char *msg[] = {
+	"Surfing With The Alien",
+	"Lords of Karma",
+	"Banana Mango",
+	"Psycho Monkey",
+	"Luminous Flesh Giants",
+	"Moroccan Sunset",
+	"Satch Boogie",
+	"Flying In A Blue Dream",
+	"Ride",
+	"Summer Song",
+	"Speed Of Light",
+	"Crystal Planet",
+	"Raspberry Jam Delta-V",
+	"Champagne?",
+	"Clouds Race Across The Sky",
+	"Engines Of Creation"
+};
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *server(void *arg)
+{
+	struct rtipc_port_label plabel;
+	struct sockaddr_ipc saddr;
+	char buf[128];
+	size_t bufsz;
+	int ret, s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP);
+	if (s < 0)
+		fail("socket");
+
+	/*
+	 * Set a 16k buffer for the server endpoint. This
+	 * configuration must be done prior to binding the socket to a
+	 * port.
+	 */
+	bufsz = 16384; /* bytes */
+	ret = setsockopt(s, SOL_BUFP, BUFP_BUFSZ,
+			 &bufsz, sizeof(bufsz));
+	if (ret)
+		fail("setsockopt");
+
+	/*
+	 * Set a port label. This name will be registered when
+	 * binding, in addition to the port number (if given).
+	 */
+	strcpy(plabel.label, BUFP_PORT_LABEL);
+	ret = setsockopt(s, SOL_BUFP, BUFP_LABEL,
+			 &plabel, sizeof(plabel));
+	if (ret)
+		fail("setsockopt");
+
+	/*
+	 * Bind the socket to the port. Assign that port a label, so
+	 * that peers may use a descriptive information to locate
+	 * it. Labeled ports will appear in the
+	 * /proc/xenomai/registry/rtipc/bufp directory once the socket
+	 * is bound.
+	 *
+	 * saddr.sipc_port specifies the port number to use. If -1 is
+	 * passed, the BUFP driver will auto-select an idle port.
+	 */
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = -1;
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	for (;;) {
+		ret = read(s, buf, sizeof(buf));
+		if (ret < 0) {
+			close(s);
+			fail("read");
+		}
+		printf("%s: received %d bytes, \"%.*s\"\n",
+		       __FUNCTION__, ret, ret, buf);
+	}
+
+	return NULL;
+}
+
+static void *client(void *arg)
+{
+	struct rtipc_port_label plabel;
+	struct sockaddr_ipc svsaddr;
+	int ret, s, n = 0, len;
+	struct timespec ts;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP);
+	if (s < 0)
+		fail("socket");
+
+	/*
+	 * Set the port label. This name will be used to find the peer
+	 * when connecting, instead of the port number. The label must
+	 * be set _after_ the socket is bound to the port, so that
+	 * BUFP does not try to register this label for the client
+	 * port as well (like the server thread did).
+	 */
+	strcpy(plabel.label, BUFP_PORT_LABEL);
+	ret = setsockopt(s, SOL_BUFP, BUFP_LABEL,
+			 &plabel, sizeof(plabel));
+	if (ret)
+		fail("setsockopt");
+
+	memset(&svsaddr, 0, sizeof(svsaddr));
+	svsaddr.sipc_family = AF_RTIPC;
+	svsaddr.sipc_port = -1;	/* Tell BUFP to search by label. */
+	ret = connect(s, (struct sockaddr *)&svsaddr, sizeof(svsaddr));
+	if (ret)
+		fail("connect");
+
+	for (;;) {
+		len = strlen(msg[n]);
+		ret = write(s, msg[n], len);
+		if (ret < 0) {
+			close(s);
+			fail("write");
+		}
+		printf("%s: sent %d bytes, \"%.*s\"\n",
+		       __FUNCTION__, ret, ret, msg[n]);
+		n = (n + 1) % (sizeof(msg) / sizeof(msg[0]));
+		/*
+		 * We run in full real-time mode (i.e. primary mode),
+		 * so we have to let the system breathe between two
+		 * iterations.
+		 */
+		ts.tv_sec = 0;
+		ts.tv_nsec = 500000000; /* 500 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+int main(int argc, char **argv)
+{
+	struct sched_param svparam = {.sched_priority = 71 };
+	struct sched_param clparam = {.sched_priority = 70 };
+	pthread_attr_t svattr, clattr;
+	sigset_t set;
+	int sig;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGINT);
+	sigaddset(&set, SIGTERM);
+	sigaddset(&set, SIGHUP);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	pthread_attr_init(&svattr);
+	pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&svattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&svattr, &svparam);
+
+	errno = pthread_create(&svtid, &svattr, &server, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&clattr);
+	pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&clattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&clattr, &clparam);
+
+	errno = pthread_create(&cltid, &clattr, &client, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	__STD(sigwait(&set, &sig));
+	pthread_cancel(svtid);
+	pthread_cancel(cltid);
+	pthread_join(svtid, NULL);
+	pthread_join(cltid, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-readwrite.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-readwrite.c
new file mode 100644
index 0000000..34d761a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/bufp-readwrite.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ *
+ * BUFP-based client/server demo, using the read(2)/write(2)
+ * system calls to exchange data over a socket.
+ *
+ * In this example, two sockets are created.  A server thread (reader)
+ * is bound to a real-time port and receives a stream of bytes sent to
+ * this port from a client thread (writer).
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <pthread.h>
+#include <errno.h>
+#include <rtdm/ipc.h>
+
+pthread_t svtid, cltid;
+
+#define BUFP_SVPORT 12
+
+static const char *msg[] = {
+	"Surfing With The Alien",
+	"Lords of Karma",
+	"Banana Mango",
+	"Psycho Monkey",
+	"Luminous Flesh Giants",
+	"Moroccan Sunset",
+	"Satch Boogie",
+	"Flying In A Blue Dream",
+	"Ride",
+	"Summer Song",
+	"Speed Of Light",
+	"Crystal Planet",
+	"Raspberry Jam Delta-V",
+	"Champagne?",
+	"Clouds Race Across The Sky",
+	"Engines Of Creation"
+};
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *server(void *arg)
+{
+	struct sockaddr_ipc saddr;
+	char buf[128];
+	size_t bufsz;
+	int ret, s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP);
+	if (s < 0)
+		fail("socket");
+
+	/*
+	 * Set a 16k buffer for the server endpoint. This
+	 * configuration must be done prior to binding the socket to a
+	 * port.
+	 */
+	bufsz = 16384; /* bytes */
+	ret = setsockopt(s, SOL_BUFP, BUFP_BUFSZ,
+			 &bufsz, sizeof(bufsz));
+	if (ret)
+		fail("setsockopt");
+
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = BUFP_SVPORT;
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	for (;;) {
+		ret = read(s, buf, sizeof(buf));
+		if (ret < 0) {
+			close(s);
+			fail("read");
+		}
+		printf("%s: received %d bytes, \"%.*s\"\n",
+		       __FUNCTION__, ret, ret, buf);
+	}
+
+	return NULL;
+}
+
+static void *client(void *arg)
+{
+	struct sockaddr_ipc svsaddr;
+	int ret, s, n = 0, len;
+	struct timespec ts;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP);
+	if (s < 0)
+		fail("socket");
+
+	memset(&svsaddr, 0, sizeof(svsaddr));
+	svsaddr.sipc_family = AF_RTIPC;
+	svsaddr.sipc_port = BUFP_SVPORT;
+	ret = connect(s, (struct sockaddr *)&svsaddr, sizeof(svsaddr));
+	if (ret)
+		fail("connect");
+
+	for (;;) {
+		len = strlen(msg[n]);
+		ret = write(s, msg[n], len);
+		if (ret < 0) {
+			close(s);
+			fail("write");
+		}
+		printf("%s: sent %d bytes, \"%.*s\"\n",
+		       __FUNCTION__, ret, ret, msg[n]);
+		n = (n + 1) % (sizeof(msg) / sizeof(msg[0]));
+		/*
+		 * We run in full real-time mode (i.e. primary mode),
+		 * so we have to let the system breathe between two
+		 * iterations.
+		 */
+		ts.tv_sec = 0;
+		ts.tv_nsec = 500000000; /* 500 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+int main(int argc, char **argv)
+{
+	struct sched_param svparam = {.sched_priority = 71 };
+	struct sched_param clparam = {.sched_priority = 70 };
+	pthread_attr_t svattr, clattr;
+	sigset_t set;
+	int sig;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGINT);
+	sigaddset(&set, SIGTERM);
+	sigaddset(&set, SIGHUP);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	pthread_attr_init(&svattr);
+	pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&svattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&svattr, &svparam);
+
+	errno = pthread_create(&svtid, &svattr, &server, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&clattr);
+	pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&clattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&clattr, &clparam);
+
+	errno = pthread_create(&cltid, &clattr, &client, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	__STD(sigwait(&set, &sig));
+	pthread_cancel(svtid);
+	pthread_cancel(cltid);
+	pthread_join(svtid, NULL);
+	pthread_join(cltid, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/can-rtt.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/can-rtt.c
new file mode 100644
index 0000000..dd212d8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/can-rtt.c
@@ -0,0 +1,408 @@
+/*
+ * Round-Trip-Time Test - sends and receives messages and measures the
+ *                        time in between.
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Based on RTnet's examples/xenomai/posix/rtt-sender.c.
+ *
+ * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2002 Marc Kleine-Budde <kleine-budde@gmx.de>
+ *               2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *
+ * The program sends out CAN messages periodically and copies the current
+ * time-stamp to the payload. At reception, that time-stamp is compared
+ * with the current time to determine the round-trip time. The jitter
+ * values are printer out regularly. Concurrent tests can be carried out
+ * by starting the program with different message identifiers. It is also
+ * possible to use this program on a remote system as simple repeater to
+ * loopback messages.
+ */
+
+#include <errno.h>
+#include <mqueue.h>
+#include <signal.h>
+#include <pthread.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <limits.h>
+#include <getopt.h>
+#include <memory.h>
+#include <netinet/in.h>
+#include <net/if.h>
+#include <sys/ioctl.h>
+#include <rtdm/can.h>
+#include <xenomai/init.h>
+
+#define NSEC_PER_SEC 1000000000
+
+static unsigned int cycle = 10000; /* 10 ms */
+static canid_t can_id = 0x1;
+
+static pthread_t txthread, rxthread;
+static int txsock, rxsock;
+static mqd_t mq;
+static int txcount, rxcount;
+static int overruns;
+static int repeater;
+
+struct rtt_stat {
+    long long rtt;
+    long long rtt_min;
+    long long rtt_max;
+    long long rtt_sum;
+    long long rtt_sum_last;
+    int counts_per_sec;
+};
+
+void application_usage(void)
+{
+    fprintf(stderr, "usage: %s [options] <tx-can-interface> <rx-can-interface>:\n",
+	    get_program_name());
+    fprintf(stderr,
+	    " -r, --repeater			Repeater, send back received messages\n"
+	    " -i, --id=ID			CAN Identifier (default = 0x1)\n"
+	    " -c, --cycle			Cycle time in us (default = 10000us)\n");
+}
+
+static void *transmitter(void *arg)
+{
+    struct sched_param  param = { .sched_priority = 80 };
+    struct timespec next_period;
+    struct timespec time;
+    struct can_frame frame;
+    long long *rtt_time = (long long *)&frame.data, t;
+
+    /* Pre-fill CAN frame */
+    frame.can_id = can_id;
+    frame.can_dlc = sizeof(*rtt_time);
+
+    pthread_setname_np(pthread_self(), "rtcan_rtt_transmitter");
+    pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
+
+    clock_gettime(CLOCK_MONOTONIC, &next_period);
+
+    while(1) {
+	next_period.tv_nsec += cycle * 1000;
+	while (next_period.tv_nsec >= NSEC_PER_SEC) {
+		next_period.tv_nsec -= NSEC_PER_SEC;
+		next_period.tv_sec++;
+	}
+
+	clock_nanosleep(CLOCK_MONOTONIC, TIMER_ABSTIME, &next_period, NULL);
+
+	if (rxcount != txcount) {
+	    overruns++;
+	    continue;
+	}
+
+	clock_gettime(CLOCK_MONOTONIC, &time);
+	t = (long long)time.tv_sec * NSEC_PER_SEC + time.tv_nsec;
+	memcpy(rtt_time, &t, sizeof(t));
+
+	/* Transmit the message containing the local time */
+	if (send(txsock, (void *)&frame, sizeof(struct can_frame), 0) < 0) {
+	    if (errno == EBADF)
+		printf("terminating transmitter thread\n");
+	    else
+		perror("send failed");
+	    return NULL;
+	}
+	txcount++;
+    }
+}
+
+
+static void *receiver(void *arg)
+{
+    struct sched_param param = { .sched_priority = 82 };
+    struct timespec time;
+    struct can_frame frame;
+    long long *rtt_time = (long long *)frame.data, t;
+    struct rtt_stat rtt_stat = {0, 1000000000000000000LL, -1000000000000000000LL,
+				0, 0, 0};
+
+    pthread_setname_np(pthread_self(), "rtcan_rtt_receiver");
+    pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
+
+    rtt_stat.counts_per_sec = 1000000 / cycle;
+
+    while (1) {
+	if (recv(rxsock, (void *)&frame, sizeof(struct can_frame), 0) < 0) {
+	    if (errno == EBADF)
+		printf("terminating receiver thread\n");
+	    else
+		perror("recv failed");
+	    return NULL;
+	}
+	if (repeater) {
+	    /* Transmit the message back as is */
+	    if (send(txsock, (void *)&frame, sizeof(struct can_frame), 0) < 0) {
+		if (errno == EBADF)
+		    printf("terminating transmitter thread\n");
+		else
+		    perror("send failed");
+		return NULL;
+	    }
+	    txcount++;
+	} else {
+	    memcpy(&t, rtt_time, sizeof(t));
+	    clock_gettime(CLOCK_MONOTONIC, &time);
+	    if (rxcount > 0) {
+		rtt_stat.rtt = ((long long)time.tv_sec * 1000000000LL +
+				time.tv_nsec - t);
+		rtt_stat.rtt_sum += rtt_stat.rtt;
+		if (rtt_stat.rtt <  rtt_stat.rtt_min)
+		    rtt_stat.rtt_min = rtt_stat.rtt;
+		if (rtt_stat.rtt > rtt_stat.rtt_max)
+		    rtt_stat.rtt_max = rtt_stat.rtt;
+	    }
+	}
+	rxcount++;
+
+	if ((rxcount % rtt_stat.counts_per_sec) == 0) {
+	    mq_send(mq, (char *)&rtt_stat, sizeof(rtt_stat), 0);
+	    rtt_stat.rtt_sum_last = rtt_stat.rtt_sum;
+	}
+    }
+}
+
+static void catch_signal(int sig)
+{
+    mq_close(mq);
+    close(rxsock);
+    close(txsock);
+}
+
+
+int main(int argc, char *argv[])
+{
+    struct sched_param param = { .sched_priority = 1 };
+    pthread_attr_t thattr;
+    struct mq_attr mqattr;
+    struct sockaddr_can rxaddr, txaddr;
+    struct can_filter rxfilter[1];
+    struct rtt_stat rtt_stat;
+    char mqname[32];
+    char *txdev, *rxdev;
+    struct can_ifreq ifr;
+    int ret, opt;
+
+    struct option long_options[] = {
+	{ "id", required_argument, 0, 'i'},
+	{ "cycle", required_argument, 0, 'c'},
+	{ "repeater", no_argument, 0, 'r'},
+	{ 0, 0, 0, 0},
+    };
+
+    while ((opt = getopt_long(argc, argv, "ri:c:",
+			      long_options, NULL)) != -1) {
+	switch (opt) {
+	case 'c':
+	    cycle = atoi(optarg);
+	    break;
+
+	case 'i':
+	    can_id = strtoul(optarg, NULL, 0);
+	    break;
+
+	case 'r':
+	    repeater = 1;
+	    break;
+
+	default:
+	    fprintf(stderr, "Unknown option %c\n", opt);
+	    exit(-1);
+	}
+    }
+
+    printf("%d %d\n", optind, argc);
+    if (optind + 2 != argc) {
+	xenomai_usage();
+	exit(0);
+    }
+
+    txdev = argv[optind];
+    rxdev = argv[optind + 1];
+
+    /* Create and configure RX socket */
+    if ((rxsock = socket(PF_CAN, SOCK_RAW, CAN_RAW)) < 0) {
+	perror("RX socket failed");
+	return -1;
+    }
+
+    namecpy(ifr.ifr_name, rxdev);
+    printf("RX rxsock=%d, ifr_name=%s\n", rxsock, ifr.ifr_name);
+
+    if (ioctl(rxsock, SIOCGIFINDEX, &ifr) < 0) {
+	perror("RX ioctl SIOCGIFINDEX failed");
+	goto failure1;
+    }
+
+    /* We only want to receive our own messages */
+    rxfilter[0].can_id = can_id;
+    rxfilter[0].can_mask = 0x3ff;
+    if (setsockopt(rxsock, SOL_CAN_RAW, CAN_RAW_FILTER,
+		   &rxfilter, sizeof(struct can_filter)) < 0) {
+	perror("RX setsockopt CAN_RAW_FILTER failed");
+	goto failure1;
+    }
+    memset(&rxaddr, 0, sizeof(rxaddr));
+    rxaddr.can_ifindex = ifr.ifr_ifindex;
+    rxaddr.can_family = AF_CAN;
+    if (bind(rxsock, (struct sockaddr *)&rxaddr, sizeof(rxaddr)) < 0) {
+	perror("RX bind failed\n");
+	goto failure1;
+    }
+
+    /* Create and configure TX socket */
+
+    if (strcmp(rxdev, txdev) == 0) {
+	txsock = rxsock;
+    } else {
+	if ((txsock = socket(PF_CAN, SOCK_RAW, 0)) < 0) {
+	    perror("TX socket failed");
+	    goto failure1;
+	}
+
+	namecpy(ifr.ifr_name, txdev);
+	printf("TX txsock=%d, ifr_name=%s\n", txsock, ifr.ifr_name);
+
+	if (ioctl(txsock, SIOCGIFINDEX, &ifr) < 0) {
+	    perror("TX ioctl SIOCGIFINDEX failed");
+	    goto failure2;
+	}
+
+	/* Suppress definiton of a default receive filter list */
+	if (setsockopt(txsock, SOL_CAN_RAW, CAN_RAW_FILTER, NULL, 0) < 0) {
+	    perror("TX setsockopt CAN_RAW_FILTER failed");
+	    goto failure2;
+	}
+
+	memset(&txaddr, 0, sizeof(txaddr));
+	txaddr.can_ifindex = ifr.ifr_ifindex;
+	txaddr.can_family = AF_CAN;
+
+	if (bind(txsock, (struct sockaddr *)&txaddr, sizeof(txaddr)) < 0) {
+		perror("TX bind failed\n");
+		goto failure2;
+	}
+    }
+
+    signal(SIGTERM, catch_signal);
+    signal(SIGINT, catch_signal);
+    signal(SIGHUP, catch_signal);
+
+    printf("Round-Trip-Time test %s -> %s with CAN ID 0x%x\n",
+	   argv[optind], argv[optind + 1], can_id);
+    printf("Cycle time: %d us\n", cycle);
+    printf("All RTT timing figures are in us.\n");
+
+    /* Create statistics message queue */
+    snprintf(mqname, sizeof(mqname), "/rtcan_rtt-%d", getpid());
+    mqattr.mq_flags   = 0;
+    mqattr.mq_maxmsg  = 100;
+    mqattr.mq_msgsize = sizeof(struct rtt_stat);
+    mq = mq_open(mqname, O_RDWR | O_CREAT | O_EXCL, 0600, &mqattr);
+    if (mq == (mqd_t)-1) {
+	perror("opening mqueue failed");
+	goto failure2;
+    }
+
+    /* Create receiver RT-thread */
+    pthread_attr_init(&thattr);
+    pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+    ret = pthread_create(&rxthread, &thattr, &receiver, NULL);
+    if (ret) {
+	fprintf(stderr, "%s: pthread_create(receiver) failed\n",
+		strerror(-ret));
+	goto failure3;
+    }
+
+    if (!repeater) {
+	/* Create transitter RT-thread */
+	ret = pthread_create(&txthread, &thattr, &transmitter, NULL);
+	if (ret) {
+	    fprintf(stderr, "%s: pthread_create(transmitter) failed\n",
+		    strerror(-ret));
+	    goto failure4;
+	}
+    }
+
+    pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
+
+    if (repeater)
+	printf("Messages\n");
+    else
+	printf("Messages RTTlast RTT_avg RTT_min RTT_max Overruns\n");
+
+    while (1) {
+	long long rtt_avg;
+
+	ret = mq_receive(mq, (char *)&rtt_stat, sizeof(rtt_stat), NULL);
+	if (ret != sizeof(rtt_stat)) {
+	    if (ret < 0) {
+		if (errno == EBADF)
+		    printf("terminating mq_receive\n");
+		else
+		    perror("mq_receive failed");
+	    } else
+		fprintf(stderr,
+			"mq_receive returned invalid length %d\n", ret);
+	    break;
+	}
+
+	if (repeater) {
+	    printf("%8d\n", rxcount);
+	} else {
+	    rtt_avg = ((rtt_stat.rtt_sum - rtt_stat.rtt_sum_last) /
+		       rtt_stat.counts_per_sec);
+	    printf("%8d %7ld %7ld %7ld %7ld %8d\n", rxcount,
+		   (long)(rtt_stat.rtt / 1000), (long)(rtt_avg / 1000),
+		   (long)(rtt_stat.rtt_min / 1000),
+		   (long)(rtt_stat.rtt_max / 1000),
+		   overruns);
+	}
+    }
+
+    /* This call also leaves primary mode, required for socket cleanup. */
+    printf("shutting down\n");
+
+    /* Important: First close the sockets! */
+    close(rxsock);
+    close(txsock);
+    pthread_join(txthread, NULL);
+    pthread_cancel(rxthread);
+    pthread_join(rxthread, NULL);
+
+    return 0;
+
+ failure4:
+    pthread_cancel(rxthread);
+    pthread_join(rxthread, NULL);
+ failure3:
+    mq_close(mq);
+ failure2:
+    close(txsock);
+ failure1:
+    close(rxsock);
+
+    return 1;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/eth_p_all.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/eth_p_all.c
new file mode 100644
index 0000000..c4cf0d6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/eth_p_all.c
@@ -0,0 +1,108 @@
+/***
+ *
+ *  demo/posix/cobalt/rtnet-eth_p_all.c
+ *
+ *  ETH_P_ALL receiver - listens for all incoming packets and dumps them
+ *
+ *  Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  RTnet - real-time networking example
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+
+#include <signal.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <sys/socket.h>
+#include <sys/ioctl.h>
+#include <netpacket/packet.h>
+#include <net/ethernet.h>
+#include <net/if.h>
+#include <arpa/inet.h>
+#include <netinet/ether.h>
+
+char buffer[10*1024];
+int sock;
+
+
+static void catch_signal(int sig)
+{
+	close(sock);
+}
+
+
+int main(int argc, char *argv[])
+{
+	struct sched_param param = { .sched_priority = 1 };
+	ssize_t len;
+	struct sockaddr_ll addr;
+	struct ether_header *eth = (struct ether_header *)buffer;
+
+
+	signal(SIGTERM, catch_signal);
+	signal(SIGINT, catch_signal);
+	signal(SIGHUP, catch_signal);
+	mlockall(MCL_CURRENT|MCL_FUTURE);
+
+	if ((sock = socket(PF_PACKET, SOCK_RAW, htons(ETH_P_ALL))) < 0) {
+		perror("socket cannot be created");
+		return EXIT_FAILURE;
+	}
+
+	if (argc > 1) {
+		struct ifreq ifr;
+
+		snprintf(ifr.ifr_name, IFNAMSIZ, "%s", argv[1]);
+		if (ioctl(sock, SIOCGIFINDEX, &ifr) < 0) {
+			perror("cannot get interface index");
+			close(sock);
+			return EXIT_FAILURE;
+		}
+
+		addr.sll_family	  = AF_PACKET;
+		addr.sll_protocol = ETH_P_ALL;
+		addr.sll_ifindex  = ifr.ifr_ifindex;
+
+		if (bind(sock, (struct sockaddr *)&addr, sizeof(addr)) < 0) {
+			perror("cannot bind to local ip/port");
+			close(sock);
+			return EXIT_FAILURE;
+		}
+	}
+
+	pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
+
+	while (1) {
+		len = recv(sock, buffer, sizeof(buffer), 0);
+		if (len < 0)
+			break;
+
+		printf("from: %s type: %04x length=%zd\n",
+			ether_ntoa((struct ether_addr *)eth->ether_shost),
+			ntohs(eth->ether_type), len);
+	}
+
+	printf("shutting down\n");
+
+	return EXIT_SUCCESS;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/gpiopwm.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/gpiopwm.c
new file mode 100644
index 0000000..b195d7e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/gpiopwm.c
@@ -0,0 +1,504 @@
+#include <xenomai/init.h>
+#include <semaphore.h>
+#include <pthread.h>
+#include <signal.h>
+#include <rtdm/gpiopwm.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <errno.h>
+#include <error.h>
+#include <stdio.h>
+#include <time.h>
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <netdb.h>
+#include <sys/types.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+#include <linux/netdevice.h>
+
+#define MIN_DUTY_CYCLE				(0)
+#define MAX_DUTY_CYCLE				(100)
+
+typedef void *(*gpiopwm_control_thread)(void *cookie);
+#define DEVICE_NAME "/dev/rtdm/gpiopwm"
+char *device_name;
+int dev;
+
+static sem_t synch;
+static sem_t setup;
+static int stop;
+static int step = 1;
+static int port = 66666;
+
+#define MAX_IP_INTERFACES 			(9)
+static char *ip_str[MAX_IP_INTERFACES + 1];
+static int last_ip;
+
+
+#define GPIO_PWM_SERVO_CONFIG			\
+{						\
+	.duty_cycle	=	50,		\
+	.range_min	=	950,		\
+	.range_max	=	2050,		\
+	.period		=	20000000,	\
+	.gpio		=	1,		\
+}
+
+static struct gpiopwm config = GPIO_PWM_SERVO_CONFIG;
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void sem_sync(sem_t *sem)
+{
+	int ret;
+
+	for (;;) {
+		ret = sem_wait(sem);
+		if (ret == 0)
+			return;
+		if (errno != EINTR)
+			fail("sem_wait");
+	}
+}
+
+static inline void clear_screen(void)
+{
+	const char* cmd = "\e[1;1H\e[2J";
+	int ret;
+
+	ret = write(2, cmd, strlen(cmd));
+	if (!ret)
+		error(1, ret, "clear screen error");
+}
+
+static inline void print_config(char *str)
+{
+	int i;
+
+	printf("Config: %s\n", str);
+	for (i = 0; i < last_ip ; i++)
+		printf("%s", ip_str[i]);
+	printf(" device     : %s\n", device_name);
+	printf(" range      : [%d, %d]\n", config.range_min, config.range_max);
+	printf(" period     : %d nsec\n", config.period);
+	printf(" gpio pin   : %d\n", config.gpio);
+	printf(" duty cycle : %d\n", config.duty_cycle);
+}
+
+static inline void input_message(void)
+{
+	print_config("");
+	printf("\n GPIO PWM Control\n");
+	printf( "  Enter duty_cycle [0-100] : ");
+}
+
+static void get_ip_addresses(void)
+{
+	char ip[INET_ADDRSTRLEN];
+	struct sockaddr_in *s_in;
+	struct ifconf ifconf;
+	struct ifreq ifr[10];
+	int ret;
+	int ifs;
+	int i;
+	int s;
+
+	s = socket(AF_INET, SOCK_STREAM, 0);
+	if (s < 0)
+		return;
+
+	ifconf.ifc_buf = (char *) ifr;
+	ifconf.ifc_len = sizeof(ifr);
+
+	if (ioctl(s, SIOCGIFCONF, &ifconf) == -1)
+		return;
+
+	ifs = ifconf.ifc_len / sizeof(ifr[0]);
+
+	/* we wont _display_ more than MAX_IP_INTERFACES */
+	if (ifs > MAX_IP_INTERFACES)
+		ifs = MAX_IP_INTERFACES;
+
+	last_ip = ifs + 1;
+
+	for (i = 0; i < ifs; i++) {
+		s_in = (struct sockaddr_in *) &ifr[i].ifr_addr;
+		if (!inet_ntop(AF_INET, &s_in->sin_addr, ip, sizeof(ip)))
+			return;
+		ret = asprintf(&ip_str[i]," ip      : %s\n", ip);
+		if (ret)
+			perror("asprintf");
+	}
+
+	ret = asprintf(&ip_str[i]," port    : %d\n\n", port);
+	if (ret)
+		perror("asprintf");
+
+	close(s);
+}
+
+static void setup_sched_parameters(pthread_attr_t *attr, int prio)
+{
+	struct sched_param p;
+	int ret;
+
+	ret = pthread_attr_init(attr);
+	if (ret)
+		error(1, ret, "pthread_attr_init()");
+
+	ret = pthread_attr_setinheritsched(attr, PTHREAD_EXPLICIT_SCHED);
+	if (ret)
+		error(1, ret, "pthread_attr_setinheritsched()");
+
+	ret = pthread_attr_setschedpolicy(attr, prio ? SCHED_FIFO : SCHED_OTHER);
+	if (ret)
+		error(1, ret, "pthread_attr_setschedpolicy()");
+
+	p.sched_priority = prio;
+	ret = pthread_attr_setschedparam(attr, &p);
+	if (ret)
+		error(1, ret, "pthread_attr_setschedparam()");
+}
+
+static void *gpiopwm_init_thread(void *cookie)
+{
+	int ret;
+
+	pthread_setname_np(pthread_self(), "gpio-pwm-handler");
+	ret = ioctl(dev, GPIOPWM_RTIOC_SET_CONFIG, config);
+	if (ret)
+		error(1, ret, "failed to set config");
+
+	ioctl(dev, GPIOPWM_RTIOC_START);
+
+	/* setup completed: allow handler to run */
+	sem_post(&setup);
+
+	/* wait for completion */
+	sem_sync(&synch);
+	ioctl(dev, GPIOPWM_RTIOC_STOP);
+
+	return NULL;
+}
+
+/*
+ * Controls the motor receving the duty cycle sent over UDP
+ * ie: echo -n <duty_cycle> | nc  -w1 -u <ipaddr> <port>
+ */
+static void *gpiopwm_udp_ctrl_thread(void *cookie)
+{
+	struct sockaddr_in saddr;
+	struct sockaddr_in caddr;
+	unsigned int duty_cycle;
+	const int blen = 4;
+	int optval = 1;
+	socklen_t clen;
+	char buf[blen];
+	int sockfd;
+	int ret;
+
+	pthread_setname_np(pthread_self(), "gpio-pwm.netcat");
+
+	sockfd = socket(AF_INET, SOCK_DGRAM, 0);
+	if (sockfd < 0)
+		perror("socket");
+
+	setsockopt(sockfd, SOL_SOCKET, SO_REUSEADDR, &optval, sizeof(int));
+
+	bzero((char *) &saddr, sizeof(saddr));
+	saddr.sin_addr.s_addr = htonl(INADDR_ANY);
+	saddr.sin_port = htons(port);
+	saddr.sin_family = AF_INET;
+
+	if (bind(sockfd, (struct sockaddr *)&saddr, sizeof(saddr)) < 0)
+		perror("bind");
+
+	clen = sizeof(caddr);
+	sem_sync(&setup);
+	for (;;) {
+
+		clear_screen();
+		print_config("UDP Server\n");
+
+		memset(buf,'\0', blen);
+		ret = recvfrom(sockfd, buf, blen - 1, 0, (struct sockaddr *)&caddr, &clen);
+		if (ret < 0)
+			perror("recvfrom");
+
+		duty_cycle = strtol(buf, NULL, 10);
+		if (duty_cycle < MIN_DUTY_CYCLE || duty_cycle > MAX_DUTY_CYCLE)
+			continue;
+
+		ret = ioctl(dev, GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE, duty_cycle);
+		if (ret)
+			break;
+
+		config.duty_cycle = duty_cycle;
+	}
+
+	return NULL;
+}
+
+/*
+ * Manual control of the pwm duty cycle.
+ */
+static void *gpiopwm_manual_ctrl_thread(void *cookie)
+{
+	unsigned int duty_cycle;
+	size_t len = 4;
+	char *in;
+	int ret;
+
+	pthread_setname_np(pthread_self(), "gpio-pwm.manual");
+
+	in = malloc(len * sizeof(*in));
+	if (!in)
+		goto err;
+
+	sem_sync(&setup);
+	for (;;) {
+		clear_screen();
+		input_message();
+
+		len = getline(&in, &len, stdin);
+		if (len == -1 || len == 1)
+			break;
+
+		duty_cycle = atoi(in);
+		if (!duty_cycle && strncmp(in, "000", len - 1) != 0)
+			break;
+
+		ret = ioctl(dev, GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE, duty_cycle);
+		if (ret) {
+			fprintf(stderr, "invalid duty cycle %d\n", duty_cycle);
+			break;
+		}
+
+		config.duty_cycle = duty_cycle;
+	}
+
+	free(in);
+err:
+	sem_post(&synch);
+
+	return NULL;
+}
+
+/*
+ * Continuously sweep all duty cycles 0..100 and 100..0.
+ * No mode switches should occur.
+ */
+static void *gpiopwm_sweep_ctrl_thread(void *cookie)
+{
+	struct timespec delay;
+	struct duty_values {
+		enum { fwd, bck} direction;
+		int x;
+	} values;
+	int ret;
+
+	pthread_setname_np(pthread_self(), "gpio-pwm.sweep");
+
+	delay = (struct timespec) {.tv_sec = 0, .tv_nsec = 10 * config.period};
+	values = (struct duty_values) {.direction = fwd, .x = MIN_DUTY_CYCLE};
+
+	sem_sync(&setup);
+	for (;;) {
+		if (stop)
+			break;
+
+		ret = ioctl(dev, GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE, values.x);
+		if (ret) {
+			fprintf(stderr, "invalid duty cycle %d\n", values.x);
+			break;
+		}
+
+		nanosleep(&delay, NULL);
+
+		if (values.direction == bck) {
+			if (values.x - (step - 1) > MIN_DUTY_CYCLE)
+				values.x -= step;
+			else {
+				values.direction = fwd;
+				values.x = MIN_DUTY_CYCLE;
+				continue;
+			}
+		}
+
+		if (values.direction == fwd) {
+			if (values.x + (step - 1) < MAX_DUTY_CYCLE)
+				values.x += step;
+			else {
+				values.direction = bck;
+				values.x = MAX_DUTY_CYCLE;
+			}
+		}
+	}
+	sem_post(&synch);
+
+	return NULL;
+}
+
+static void gpiopwm_sweep_sig_handler(int sig)
+{
+	stop = 1;
+}
+
+static const struct option options[] = {
+	{
+#define help_opt		0
+		.name = "help",
+		.has_arg = 0,
+		.flag = NULL,
+	},
+	{
+#define sweep_range_opt		1
+		.name = "sweep",
+		.has_arg = 1,
+		.flag = NULL,
+	},
+	{
+#define manual_opt		2
+		.name = "manual",
+		.has_arg = 0,
+		.flag = NULL,
+	},
+	{
+#define config_opt		3
+		.name = "config",
+		.has_arg = 1,
+		.flag = NULL,
+	},
+	{
+#define udp_opt			4
+		.name = "udp",
+		.has_arg = 1,
+		.flag = NULL,
+	},
+	{
+		.name = NULL,
+	}
+};
+
+static void usage(void)
+{
+	fprintf(stderr, "Usage:\n"
+		"gpiopwm --config=dev:min:max:period:gpio:duty [--sweep=<step> | --udp=<port> | --manual]\n\n"
+		"--config=<..>\n"
+		"	dev:		/dev/rtdm/gpio-pwm id [0..7]\n"
+		"	min:		min active period in usec\n"
+		"	max:		max active period in usec\n"
+		"	period:		base signal period in nsec\n"
+		"	gpio:		gpio pin number\n"
+		"	duty:		default duty cycle [0..100]\n"
+		"--sweep=<step>\n"
+		"			sweep all duty cycle ranges in a loop\n"
+		"			in step increments [default 1]\n"
+		"--manual		input duty cycle from the command line\n"
+		"--udp=<port>		receive duty cycle from the network\n"
+		"			ie: echo -n <duty_cycle> | nc  -w1 -u <ipaddr> <port>\n"
+		);
+}
+
+int main(int argc, char *argv[])
+{
+	gpiopwm_control_thread handler = NULL;
+	pthread_t pwm_task, ctrl_task;
+	int opt, lindex, device = 0;
+	pthread_attr_t tattr;
+	char *p;
+	int ret;
+
+	for (;;) {
+		lindex = -1;
+		opt = getopt_long_only(argc, argv, "", options, &lindex);
+		if (opt == EOF)
+			break;
+
+		switch (lindex) {
+		case sweep_range_opt:
+			handler = gpiopwm_sweep_ctrl_thread;
+			signal(SIGINT, gpiopwm_sweep_sig_handler);
+			step = atoi(optarg);
+			step = step < 1 ? 1 : step;
+			break;
+		case manual_opt:
+			handler = gpiopwm_manual_ctrl_thread;
+			signal(SIGINT, SIG_IGN);
+			break;
+		case udp_opt:
+			handler = gpiopwm_udp_ctrl_thread;
+			port = atoi(optarg);
+			get_ip_addresses();
+			break;
+		case config_opt:
+			p = strtok(optarg,":");
+			device = p ? atoi(p): -1;
+			p = strtok(NULL,":");
+			config.range_min = p ? atoi(p): -1;
+			p = strtok(NULL,":");
+			config.range_max = p ? atoi(p): -1;
+			p = strtok(NULL,":");
+			config.period = p ? atoi(p): -1;
+			p = strtok(NULL,":");
+			config.gpio = p ? atoi(p): -1;
+			p = strtok(NULL,"");
+			config.duty_cycle = p ? atoi(p): -1;
+			break;
+		case help_opt:
+		default:
+			usage();
+			exit(1);
+		}
+	}
+
+	if (handler == NULL) {
+		usage();
+		exit(1);
+	}
+
+	ret = sem_init(&synch, 0, 0);
+	if (ret < 0)
+		error(1, errno, "can't create synch semaphore");
+
+	ret = sem_init(&setup, 0, 0);
+	if (ret < 0)
+		error(1, errno, "can't create setup semaphore");
+
+	ret = asprintf(&device_name, "%s%d", DEVICE_NAME, device);
+	if (ret < 0)
+		error(1, EINVAL, "can't create device name");
+
+	dev = open(device_name, O_RDWR);
+	if (dev < 0)
+		error(1, EINVAL, "can't open %s", device_name);
+
+	setup_sched_parameters(&tattr, 99);
+	ret = pthread_create(&ctrl_task, &tattr, handler, NULL);
+	if (ret)
+		error(1, ret, "pthread_create(ctrl_handler)");
+
+	setup_sched_parameters(&tattr, 98);
+	ret = pthread_create(&pwm_task, &tattr, gpiopwm_init_thread, NULL);
+	if (ret)
+		error(1, ret, "pthread_create(init thread)");
+
+	pthread_join(pwm_task, NULL);
+	pthread_join(ctrl_task, NULL);
+
+	pthread_attr_destroy(&tattr);
+
+	ret = close(dev);
+	if (ret < 0)
+		error(1, EINVAL, "can't close");
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-label.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-label.c
new file mode 100644
index 0000000..06fc881
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-label.c
@@ -0,0 +1,238 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ *
+ * IDDP-based client/server demo, using the write(2)/recvfrom(2)
+ * system calls to exchange data over a socket.
+ *
+ * In this example, two sockets are created.  A server thread (reader)
+ * is bound to a labeled real-time port and receives datagrams sent to
+ * this port from a client thread (writer). The client thread attaches
+ * to the port opened by the server using a labeled connection
+ * request. The client socket is bound to a different port, only to
+ * provide a valid peer name; this is optional.
+ *
+ * ASCII labels can be attached to bound ports, in order to connect
+ * sockets to them in a more descriptive way than using plain numeric
+ * port values.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <pthread.h>
+#include <errno.h>
+#include <rtdm/ipc.h>
+
+pthread_t svtid, cltid;
+
+#define IDDP_CLPORT  27
+
+#define IDDP_PORT_LABEL  "iddp-demo"
+
+static const char *msg[] = {
+    "Surfing With The Alien",
+    "Lords of Karma",
+    "Banana Mango",
+    "Psycho Monkey",
+    "Luminous Flesh Giants",
+    "Moroccan Sunset",
+    "Satch Boogie",
+    "Flying In A Blue Dream",
+    "Ride",
+    "Summer Song",
+    "Speed Of Light",
+    "Crystal Planet",
+    "Raspberry Jam Delta-V",
+    "Champagne?",
+    "Clouds Race Across The Sky",
+    "Engines Of Creation"
+};
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *server(void *arg)
+{
+	struct sockaddr_ipc saddr, claddr;
+	struct rtipc_port_label plabel;
+	socklen_t addrlen;
+	char buf[128];
+	int ret, s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP);
+	if (s < 0)
+		fail("socket");
+
+	/*
+	 * We will use Xenomai's system heap for datagram, so no
+	 * IDDP_POOLSZ required here.
+	 */
+
+	/*
+	 * Set a port label. This name will be registered when
+	 * binding, in addition to the port number (if given).
+	 */
+	strcpy(plabel.label, IDDP_PORT_LABEL);
+	ret = setsockopt(s, SOL_IDDP, IDDP_LABEL,
+			 &plabel, sizeof(plabel));
+	if (ret)
+		fail("setsockopt");
+
+	/*
+	 * Bind the socket to the port. Assign that port a label, so
+	 * that peers may use a descriptive information to locate
+	 * it. Labeled ports will appear in the
+	 * /proc/xenomai/registry/rtipc/iddp directory once the socket
+	 * is bound.
+	 *
+	 * saddr.sipc_port specifies the port number to use. If -1 is
+	 * passed, the IDDP driver will auto-select an idle port.
+	 */
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = -1;	/* Pick next free */
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	for (;;) {
+		addrlen = sizeof(saddr);
+		ret = recvfrom(s, buf, sizeof(buf), 0,
+			       (struct sockaddr *)&claddr, &addrlen);
+		if (ret < 0) {
+			close(s);
+			fail("recvfrom");
+		}
+		printf("%s: received %d bytes, \"%.*s\" from port %d\n",
+			  __FUNCTION__, ret, ret, buf, claddr.sipc_port);
+	}
+
+	return NULL;
+}
+
+static void *client(void *arg)
+{
+	struct sockaddr_ipc svsaddr, clsaddr;
+	struct rtipc_port_label plabel;
+	int ret, s, n = 0, len;
+	struct timespec ts;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP);
+	if (s < 0)
+		fail("socket");
+
+	/*
+	 * Set a name on the client socket. This is strictly optional,
+	 * and only done here for the purpose of getting back a
+	 * different port number in recvfrom().
+	 */
+	clsaddr.sipc_family = AF_RTIPC;
+	clsaddr.sipc_port = IDDP_CLPORT;
+	ret = bind(s, (struct sockaddr *)&clsaddr, sizeof(clsaddr));
+	if (ret)
+		fail("bind");
+
+	/*
+	 * Set the port label. This name will be used to find the peer
+	 * when connecting, instead of the port number. The label must
+	 * be set _after_ the socket is bound to the port, so that
+	 * IDDP does not try to register this label for the client
+	 * port as well (like the server thread did).
+	 */
+	strcpy(plabel.label, IDDP_PORT_LABEL);
+	ret = setsockopt(s, SOL_IDDP, IDDP_LABEL,
+			 &plabel, sizeof(plabel));
+	if (ret)
+		fail("setsockopt");
+
+	memset(&svsaddr, 0, sizeof(svsaddr));
+	svsaddr.sipc_family = AF_RTIPC;
+	svsaddr.sipc_port = -1;	/* Tell IDDP to search by label. */
+	ret = connect(s, (struct sockaddr *)&svsaddr, sizeof(svsaddr));
+	if (ret)
+		fail("connect");
+
+	for (;;) {
+		len = strlen(msg[n]);
+		/* Send to default destination we connected to. */
+		ret = write(s, msg[n], len);
+		if (ret < 0) {
+			close(s);
+			fail("sendto");
+		}
+		printf("%s: sent %d bytes, \"%.*s\"\n",
+		       __FUNCTION__, ret, ret, msg[n]);
+		n = (n + 1) % (sizeof(msg) / sizeof(msg[0]));
+		/*
+		 * We run in full real-time mode (i.e. primary mode),
+		 * so we have to let the system breathe between two
+		 * iterations.
+		 */
+		ts.tv_sec = 0;
+		ts.tv_nsec = 500000000; /* 500 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+int main(int argc, char **argv)
+{
+	struct sched_param svparam = {.sched_priority = 71 };
+	struct sched_param clparam = {.sched_priority = 70 };
+	pthread_attr_t svattr, clattr;
+	sigset_t set;
+	int sig;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGINT);
+	sigaddset(&set, SIGTERM);
+	sigaddset(&set, SIGHUP);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	pthread_attr_init(&svattr);
+	pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&svattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&svattr, &svparam);
+
+	errno = pthread_create(&svtid, &svattr, &server, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&clattr);
+	pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&clattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&clattr, &clparam);
+
+	errno = pthread_create(&cltid, &clattr, &client, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	__STD(sigwait(&set, &sig));
+	pthread_cancel(svtid);
+	pthread_cancel(cltid);
+	pthread_join(svtid, NULL);
+	pthread_join(cltid, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-sendrecv.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-sendrecv.c
new file mode 100644
index 0000000..31ee10f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/iddp-sendrecv.c
@@ -0,0 +1,194 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ *
+ * IDDP-based client/server demo, using the sendto(2)/recvfrom(2)
+ * system calls to exchange data over a socket.
+ *
+ * In this example, two sockets are created.  A server thread (reader)
+ * is bound to a real-time port and receives datagrams sent to this
+ * port from a client thread (writer). The client socket is bound to a
+ * different port, only to provide a valid peer name; this is
+ * optional.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <pthread.h>
+#include <errno.h>
+#include <rtdm/ipc.h>
+
+pthread_t svtid, cltid;
+
+#define IDDP_SVPORT 12
+#define IDDP_CLPORT 13
+
+static const char *msg[] = {
+	"Surfing With The Alien",
+	"Lords of Karma",
+	"Banana Mango",
+	"Psycho Monkey",
+	"Luminous Flesh Giants",
+	"Moroccan Sunset",
+	"Satch Boogie",
+	"Flying In A Blue Dream",
+	"Ride",
+	"Summer Song",
+	"Speed Of Light",
+	"Crystal Planet",
+	"Raspberry Jam Delta-V",
+	"Champagne?",
+	"Clouds Race Across The Sky",
+	"Engines Of Creation"
+};
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *server(void *arg)
+{
+	struct sockaddr_ipc saddr, claddr;
+	socklen_t addrlen;
+	char buf[128];
+	size_t poolsz;
+	int ret, s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP);
+	if (s < 0)
+		fail("socket");
+
+	/*
+	 * Set a local 32k pool for the server endpoint. Memory needed
+	 * to convey datagrams will be pulled from this pool, instead
+	 * of Xenomai's system pool.
+	 */
+	poolsz = 32768; /* bytes */
+	ret = setsockopt(s, SOL_IDDP, IDDP_POOLSZ,
+			 &poolsz, sizeof(poolsz));
+	if (ret)
+		fail("setsockopt");
+
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = IDDP_SVPORT;
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	for (;;) {
+		addrlen = sizeof(saddr);
+		ret = recvfrom(s, buf, sizeof(buf), 0,
+			       (struct sockaddr *)&claddr, &addrlen);
+		if (ret < 0) {
+			close(s);
+			fail("recvfrom");
+		}
+		printf("%s: received %d bytes, \"%.*s\" from port %d\n",
+		       __FUNCTION__, ret, ret, buf, claddr.sipc_port);
+	}
+
+	return NULL;
+}
+
+static void *client(void *arg)
+{
+	struct sockaddr_ipc svsaddr, clsaddr;
+	int ret, s, n = 0, len;
+	struct timespec ts;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP);
+	if (s < 0)
+		fail("socket");
+
+	clsaddr.sipc_family = AF_RTIPC;
+	clsaddr.sipc_port = IDDP_CLPORT;
+	ret = bind(s, (struct sockaddr *)&clsaddr, sizeof(clsaddr));
+	if (ret)
+		fail("bind");
+
+	svsaddr.sipc_family = AF_RTIPC;
+	svsaddr.sipc_port = IDDP_SVPORT;
+	for (;;) {
+		len = strlen(msg[n]);
+		ret = sendto(s, msg[n], len, 0,
+			     (struct sockaddr *)&svsaddr, sizeof(svsaddr));
+		if (ret < 0) {
+			close(s);
+			fail("sendto");
+		}
+		printf("%s: sent %d bytes, \"%.*s\"\n",
+		       __FUNCTION__, ret, ret, msg[n]);
+		n = (n + 1) % (sizeof(msg) / sizeof(msg[0]));
+		/*
+		 * We run in full real-time mode (i.e. primary mode),
+		 * so we have to let the system breathe between two
+		 * iterations.
+		 */
+		ts.tv_sec = 0;
+		ts.tv_nsec = 500000000; /* 500 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+int main(int argc, char **argv)
+{
+	struct sched_param svparam = {.sched_priority = 71 };
+	struct sched_param clparam = {.sched_priority = 70 };
+	pthread_attr_t svattr, clattr;
+	sigset_t set;
+	int sig;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGINT);
+	sigaddset(&set, SIGTERM);
+	sigaddset(&set, SIGHUP);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	pthread_attr_init(&svattr);
+	pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&svattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&svattr, &svparam);
+
+	errno = pthread_create(&svtid, &svattr, &server, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&clattr);
+	pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&clattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&clattr, &clparam);
+
+	errno = pthread_create(&cltid, &clattr, &client, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	__STD(sigwait(&set, &sig));
+	pthread_cancel(svtid);
+	pthread_cancel(cltid);
+	pthread_join(svtid, NULL);
+	pthread_join(cltid, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-echo.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-echo.c
new file mode 100644
index 0000000..ba85582
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-echo.c
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ *
+ * XDDP-based RT/NRT threads communication demo.
+ *
+ * Real-time Xenomai threads and regular Linux threads may want to
+ * exchange data in a way that does not require the former to leave
+ * the real-time domain (i.e. secondary mode). Message pipes - as
+ * implemented by the RTDM-based XDDP protocol - are provided for this
+ * purpose.
+ *
+ * On the Linux domain side, pseudo-device files named /dev/rtp<minor>
+ * give regular POSIX threads access to non real-time communication
+ * endpoints, via the standard character-based I/O interface. On the
+ * Xenomai domain side, sockets may be bound to XDDP ports, which act
+ * as proxies to send and receive data to/from the associated
+ * pseudo-device files. Ports and pseudo-device minor numbers are
+ * paired, meaning that e.g. port 7 will proxy the traffic for
+ * /dev/rtp7. Therefore, port numbers may range from 0 to
+ * CONFIG_XENO_OPT_PIPE_NRDEV - 1.
+ *
+ * All data sent through a bound/connected XDDP socket via sendto(2) or
+ * write(2) will be passed to the peer endpoint in the Linux domain,
+ * and made available for reading via the standard read(2) system
+ * call. Conversely, all data sent using write(2) through the non
+ * real-time endpoint will be conveyed to the real-time socket
+ * endpoint, and made available to the recvfrom(2) or read(2) system
+ * calls.
+ *
+ * Both threads can use the bi-directional data path to send and
+ * receive datagrams in a FIFO manner, as illustrated by the simple
+ * echoing process implemented by this program.
+ *
+ * realtime_thread------------------------------>-------+
+ *   =>  get socket                                     |
+ *   =>  bind socket to port 0                          v
+ *   =>  write traffic to NRT domain via sendto()       |
+ *   =>  read traffic from NRT domain via recvfrom() <--|--+
+ *                                                      |  |
+ * regular_thread---------------------------------------+  |
+ *   =>  open /dev/rtp0                                 |  ^
+ *   =>  read traffic from RT domain via read()         |  |
+ *   =>  echo traffic back to RT domain via write()     +--+
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <malloc.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <rtdm/ipc.h>
+
+pthread_t rt, nrt;
+
+#define XDDP_PORT 0	/* [0..CONFIG-XENO_OPT_PIPE_NRDEV - 1] */
+
+static const char *msg[] = {
+	"Surfing With The Alien",
+	"Lords of Karma",
+	"Banana Mango",
+	"Psycho Monkey",
+	"Luminous Flesh Giants",
+	"Moroccan Sunset",
+	"Satch Boogie",
+	"Flying In A Blue Dream",
+	"Ride",
+	"Summer Song",
+	"Speed Of Light",
+	"Crystal Planet",
+	"Raspberry Jam Delta-V",
+	"Champagne?",
+	"Clouds Race Across The Sky",
+	"Engines Of Creation"
+};
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *realtime_thread(void *arg)
+{
+	struct sockaddr_ipc saddr;
+	int ret, s, n = 0, len;
+	struct timespec ts;
+	size_t poolsz;
+	char buf[128];
+
+	/*
+	 * Get a datagram socket to bind to the RT endpoint. Each
+	 * endpoint is represented by a port number within the XDDP
+	 * protocol namespace.
+	 */
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP);
+	if (s < 0) {
+		perror("socket");
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * Set a local 16k pool for the RT endpoint. Memory needed to
+	 * convey datagrams will be pulled from this pool, instead of
+	 * Xenomai's system pool.
+	 */
+	poolsz = 16384; /* bytes */
+	ret = setsockopt(s, SOL_XDDP, XDDP_POOLSZ,
+			 &poolsz, sizeof(poolsz));
+	if (ret)
+		fail("setsockopt");
+
+	/*
+	 * Bind the socket to the port, to setup a proxy to channel
+	 * traffic to/from the Linux domain.
+	 *
+	 * saddr.sipc_port specifies the port number to use.
+	 */
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = XDDP_PORT;
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	for (;;) {
+		len = strlen(msg[n]);
+		/*
+		 * Send a datagram to the NRT endpoint via the proxy.
+		 * We may pass a NULL destination address, since a
+		 * bound socket is assigned a default destination
+		 * address matching the binding address (unless
+		 * connect(2) was issued before bind(2), in which case
+		 * the former would prevail).
+		 */
+		ret = sendto(s, msg[n], len, 0, NULL, 0);
+		if (ret != len)
+			fail("sendto");
+
+		printf("%s: sent %d bytes, \"%.*s\"\n",
+		       __FUNCTION__, ret, ret, msg[n]);
+
+		/* Read back packets echoed by the regular thread */
+		ret = recvfrom(s, buf, sizeof(buf), 0, NULL, 0);
+		if (ret <= 0)
+			fail("recvfrom");
+
+		printf("   => \"%.*s\" echoed by peer\n", ret, buf);
+
+		n = (n + 1) % (sizeof(msg) / sizeof(msg[0]));
+		/*
+		 * We run in full real-time mode (i.e. primary mode),
+		 * so we have to let the system breathe between two
+		 * iterations.
+		 */
+		ts.tv_sec = 0;
+		ts.tv_nsec = 500000000; /* 500 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+static void *regular_thread(void *arg)
+{
+	char buf[128], *devname;
+	int fd, ret;
+
+	if (asprintf(&devname, "/dev/rtp%d", XDDP_PORT) < 0)
+		fail("asprintf");
+
+	fd = open(devname, O_RDWR);
+	free(devname);
+	if (fd < 0)
+		fail("open");
+
+	for (;;) {
+		/* Get the next message from realtime_thread. */
+		ret = read(fd, buf, sizeof(buf));
+		if (ret <= 0)
+			fail("read");
+
+		/* Echo the message back to realtime_thread. */
+		ret = write(fd, buf, ret);
+		if (ret <= 0)
+			fail("write");
+	}
+
+	return NULL;
+}
+
+int main(int argc, char **argv)
+{
+	struct sched_param rtparam = { .sched_priority = 42 };
+	pthread_attr_t rtattr, regattr;
+	sigset_t set;
+	int sig;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGINT);
+	sigaddset(&set, SIGTERM);
+	sigaddset(&set, SIGHUP);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	pthread_attr_init(&rtattr);
+	pthread_attr_setdetachstate(&rtattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&rtattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&rtattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&rtattr, &rtparam);
+
+	errno = pthread_create(&rt, &rtattr, &realtime_thread, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&regattr);
+	pthread_attr_setdetachstate(&regattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&regattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&regattr, SCHED_OTHER);
+
+	errno = pthread_create(&nrt, &regattr, &regular_thread, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	__STD(sigwait(&set, &sig));
+	pthread_cancel(rt);
+	pthread_cancel(nrt);
+	pthread_join(rt, NULL);
+	pthread_join(nrt, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-label.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-label.c
new file mode 100644
index 0000000..9de31fd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-label.c
@@ -0,0 +1,329 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ *
+ * XDDP-based RT/NRT threads communication demo.
+ *
+ * Real-time Xenomai threads and regular Linux threads may want to
+ * exchange data in a way that does not require the former to leave
+ * the real-time domain (i.e. secondary mode). Message pipes - as
+ * implemented by the RTDM-based XDDP protocol - are provided for this
+ * purpose.
+ *
+ * On the Linux domain side, pseudo-device files named /dev/rtp<minor>
+ * give regular POSIX threads access to non real-time communication
+ * endpoints, via the standard character-based I/O interface. On the
+ * Xenomai domain side, sockets may be bound to XDDP ports, which act
+ * as proxies to send and receive data to/from the associated
+ * pseudo-device files. Ports and pseudo-device minor numbers are
+ * paired, meaning that e.g. port 7 will proxy the traffic for
+ * /dev/rtp7. Therefore, port numbers may range from 0 to
+ * CONFIG_XENO_OPT_PIPE_NRDEV - 1.
+ *
+ * All data sent through a bound/connected XDDP socket via sendto(2) or
+ * write(2) will be passed to the peer endpoint in the Linux domain,
+ * and made available for reading via the standard read(2) system
+ * call. Conversely, all data sent using write(2) through the non
+ * real-time endpoint will be conveyed to the real-time socket
+ * endpoint, and made available to the recvfrom(2) or read(2) system
+ * calls.
+ *
+ * ASCII labels can be attached to bound ports, in order to connect
+ * sockets to them in a more descriptive way than using plain numeric
+ * port values.
+ *
+ * The example code below illustrates the following process:
+ *
+ * realtime_thread1----------------------------->----------+
+ *   =>  get socket                                        |
+ *   =>  bind socket to port "xddp-demo                    |
+ *   =>  read traffic from NRT domain via recvfrom()    <--+--+
+ *                                                         |  |
+ * realtime_thread2----------------------------------------+  |
+ *   =>  get socket                                        |  |
+ *   =>  connect socket to port "xddp-demo"                |  |
+ *   =>  write traffic to NRT domain via sendto()          v  |
+ *                                                         |  ^
+ * regular_thread------------------------------------------+  |
+ *   =>  open /proc/xenomai/registry/rtipc/xddp/xddp-demo  |  |
+ *   =>  read traffic from RT domain via read()            |  |
+ *   =>  mirror traffic to RT domain via write()           +--+
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <malloc.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <rtdm/ipc.h>
+
+pthread_t rt1, rt2, nrt;
+
+#define XDDP_PORT_LABEL  "xddp-demo"
+
+static const char *msg[] = {
+	"Surfing With The Alien",
+	"Lords of Karma",
+	"Banana Mango",
+	"Psycho Monkey",
+	"Luminous Flesh Giants",
+	"Moroccan Sunset",
+	"Satch Boogie",
+	"Flying In A Blue Dream",
+	"Ride",
+	"Summer Song",
+	"Speed Of Light",
+	"Crystal Planet",
+	"Raspberry Jam Delta-V",
+	"Champagne?",
+	"Clouds Race Across The Sky",
+	"Engines Of Creation"
+};
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *realtime_thread1(void *arg)
+{
+	struct rtipc_port_label plabel;
+	struct sockaddr_ipc saddr;
+	char buf[128];
+	int ret, s;
+
+	/*
+	 * Get a datagram socket to bind to the RT endpoint. Each
+	 * endpoint is represented by a port number within the XDDP
+	 * protocol namespace.
+	 */
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP);
+	if (s < 0) {
+		perror("socket");
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * Set a port label. This name will be registered when
+	 * binding, in addition to the port number (if given).
+	 */
+	strcpy(plabel.label, XDDP_PORT_LABEL);
+	ret = setsockopt(s, SOL_XDDP, XDDP_LABEL,
+			 &plabel, sizeof(plabel));
+	if (ret)
+		fail("setsockopt");
+	/*
+	 * Bind the socket to the port, to setup a proxy to channel
+	 * traffic to/from the Linux domain. Assign that port a label,
+	 * so that peers may use a descriptive information to locate
+	 * it. For instance, the pseudo-device matching our RT
+	 * endpoint will appear as
+	 * /proc/xenomai/registry/rtipc/xddp/<XDDP_PORT_LABEL> in the
+	 * Linux domain, once the socket is bound.
+	 *
+	 * saddr.sipc_port specifies the port number to use. If -1 is
+	 * passed, the XDDP driver will auto-select an idle port.
+	 */
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = -1;
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	for (;;) {
+		/* Get packets relayed by the regular thread */
+		ret = recvfrom(s, buf, sizeof(buf), 0, NULL, 0);
+		if (ret <= 0)
+			fail("recvfrom");
+
+		printf("%s: \"%.*s\" relayed by peer\n", __FUNCTION__, ret, buf);
+	}
+
+	return NULL;
+}
+
+static void *realtime_thread2(void *arg)
+{
+	struct rtipc_port_label plabel;
+	struct sockaddr_ipc saddr;
+	int ret, s, n = 0, len;
+	struct timespec ts;
+	struct timeval tv;
+	socklen_t addrlen;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP);
+	if (s < 0) {
+		perror("socket");
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * Set the socket timeout; it will apply when attempting to
+	 * connect to a labeled port, and to recvfrom() calls.  The
+	 * following setup tells the XDDP driver to wait for at most
+	 * one second until a socket is bound to a port using the same
+	 * label, or return with a timeout error.
+	 */
+	tv.tv_sec = 1;
+	tv.tv_usec = 0;
+	ret = setsockopt(s, SOL_SOCKET, SO_RCVTIMEO,
+			 &tv, sizeof(tv));
+	if (ret)
+		fail("setsockopt");
+
+	/*
+	 * Set a port label. This name will be used to find the peer
+	 * when connecting, instead of the port number.
+	 */
+	strcpy(plabel.label, XDDP_PORT_LABEL);
+	ret = setsockopt(s, SOL_XDDP, XDDP_LABEL,
+			 &plabel, sizeof(plabel));
+	if (ret)
+		fail("setsockopt");
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = -1;	/* Tell XDDP to search by label. */
+	ret = connect(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("connect");
+
+	/*
+	 * We succeeded in making the port our default destination
+	 * address by using its label, but we don't know its actual
+	 * port number yet. Use getpeername() to retrieve it.
+	 */
+	addrlen = sizeof(saddr);
+	ret = getpeername(s, (struct sockaddr *)&saddr, &addrlen);
+	if (ret || addrlen != sizeof(saddr))
+		fail("getpeername");
+
+	printf("%s: NRT peer is reading from /dev/rtp%d\n",
+	       __FUNCTION__, saddr.sipc_port);
+
+	for (;;) {
+		len = strlen(msg[n]);
+		/*
+		 * Send a datagram to the NRT endpoint via the proxy.
+		 * We may pass a NULL destination address, since the
+		 * socket was successfully assigned the proper default
+		 * address via connect(2).
+		 */
+		ret = sendto(s, msg[n], len, 0, NULL, 0);
+		if (ret != len)
+			fail("sendto");
+
+		printf("%s: sent %d bytes, \"%.*s\"\n",
+		       __FUNCTION__, ret, ret, msg[n]);
+
+		n = (n + 1) % (sizeof(msg) / sizeof(msg[0]));
+		/*
+		 * We run in full real-time mode (i.e. primary mode),
+		 * so we have to let the system breathe between two
+		 * iterations.
+		 */
+		ts.tv_sec = 0;
+		ts.tv_nsec = 500000000; /* 500 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+static void *regular_thread(void *arg)
+{
+	char buf[128], *devname;
+	int fd, ret;
+
+	if (asprintf(&devname,
+		     "/proc/xenomai/registry/rtipc/xddp/%s",
+		     XDDP_PORT_LABEL) < 0)
+		fail("asprintf");
+
+	fd = open(devname, O_RDWR);
+	free(devname);
+	if (fd < 0)
+		fail("open");
+
+	for (;;) {
+		/* Get the next message from realtime_thread2. */
+		ret = read(fd, buf, sizeof(buf));
+		if (ret <= 0)
+			fail("read");
+
+		/* Relay the message to realtime_thread1. */
+		ret = write(fd, buf, ret);
+		if (ret <= 0)
+			fail("write");
+	}
+
+	return NULL;
+}
+
+int main(int argc, char **argv)
+{
+	struct sched_param rtparam = { .sched_priority = 42 };
+	pthread_attr_t rtattr, regattr;
+	sigset_t set;
+	int sig;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGINT);
+	sigaddset(&set, SIGTERM);
+	sigaddset(&set, SIGHUP);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	pthread_attr_init(&rtattr);
+	pthread_attr_setdetachstate(&rtattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&rtattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&rtattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&rtattr, &rtparam);
+
+	/* Both real-time threads have the same attribute set. */
+
+	errno = pthread_create(&rt1, &rtattr, &realtime_thread1, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	errno = pthread_create(&rt2, &rtattr, &realtime_thread2, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&regattr);
+	pthread_attr_setdetachstate(&regattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&regattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&regattr, SCHED_OTHER);
+
+	errno = pthread_create(&nrt, &regattr, &regular_thread, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	__STD(sigwait(&set, &sig));
+	pthread_cancel(rt1);
+	pthread_cancel(rt2);
+	pthread_cancel(nrt);
+	pthread_join(rt1, NULL);
+	pthread_join(rt2, NULL);
+	pthread_join(nrt, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-stream.c b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-stream.c
new file mode 100644
index 0000000..e537294
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cobalt/xddp-stream.c
@@ -0,0 +1,254 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ *
+ * XDDP-based RT/NRT threads communication demo.
+ *
+ * Real-time Xenomai threads and regular Linux threads may want to
+ * exchange data in a way that does not require the former to leave
+ * the real-time domain (i.e. secondary mode). Message pipes - as
+ * implemented by the RTDM-based XDDP protocol - are provided for this
+ * purpose.
+ *
+ * On the Linux domain side, pseudo-device files named /dev/rtp<minor>
+ * give regular POSIX threads access to non real-time communication
+ * endpoints, via the standard character-based I/O interface. On the
+ * Xenomai domain side, sockets may be bound to XDDP ports, which act
+ * as proxies to send and receive data to/from the associated
+ * pseudo-device files. Ports and pseudo-device minor numbers are
+ * paired, meaning that e.g. port 7 will proxy the traffic for
+ * /dev/rtp7. Therefore, port numbers may range from 0 to
+ * CONFIG_XENO_OPT_PIPE_NRDEV - 1.
+ *
+ * All data sent through a bound/connected XDDP socket via sendto(2) or
+ * write(2) will be passed to the peer endpoint in the Linux domain,
+ * and made available for reading via the standard read(2) system
+ * call. Conversely, all data sent using write(2) through the non
+ * real-time endpoint will be conveyed to the real-time socket
+ * endpoint, and made available to the recvfrom(2) or read(2) system
+ * calls.
+ *
+ * In addition to sending datagrams, real-time threads may stream data
+ * in a byte-oriented mode through the proxy as well. This increases
+ * the bandwidth and reduces the overhead, when a lot of data has to
+ * flow down to the Linux domain, if keeping the message boundaries is
+ * not required. The example code below illustrates such use.
+ *
+ * realtime_thread-------------------------------------->----------+
+ *   =>  get socket                                                |
+ *   =>  bind socket to port 0                                     v
+ *   =>  write scattered traffic to NRT domain via sendto()        |
+ *   =>  read traffic from NRT domain via recvfrom()            <--|--+
+ *                                                                 |  |
+ * regular_thread--------------------------------------------------+  |
+ *   =>  open /dev/rtp0                                            |  ^
+ *   =>  read traffic from RT domain via read()                    |  |
+ *   =>  echo traffic back to RT domain via write()                +--+
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <malloc.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <rtdm/ipc.h>
+
+pthread_t rt, nrt;
+
+#define XDDP_PORT 0	/* [0..CONFIG-XENO_OPT_PIPE_NRDEV - 1] */
+
+static const char *msg[] = {
+	"Surfing With The Alien",
+	"Lords of Karma",
+	"Banana Mango",
+	"Psycho Monkey",
+	"Luminous Flesh Giants",
+	"Moroccan Sunset",
+	"Satch Boogie",
+	"Flying In A Blue Dream",
+	"Ride",
+	"Summer Song",
+	"Speed Of Light",
+	"Crystal Planet",
+	"Raspberry Jam Delta-V",
+	"Champagne?",
+	"Clouds Race Across The Sky",
+	"Engines Of Creation"
+};
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *realtime_thread(void *arg)
+{
+	struct sockaddr_ipc saddr;
+	int ret, s, n = 0, len, b;
+	struct timespec ts;
+	size_t streamsz;
+	char buf[128];
+
+	/*
+	 * Get a datagram socket to bind to the RT endpoint. Each
+	 * endpoint is represented by a port number within the XDDP
+	 * protocol namespace.
+	 */
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP);
+	if (s < 0) {
+		perror("socket");
+		exit(EXIT_FAILURE);
+	}
+
+	/*
+	 * Tell the XDDP driver that we will use the streaming
+	 * capabilities on this socket. To this end, we have to
+	 * specify the size of the streaming buffer, as a count of
+	 * bytes. The real-time output will be buffered up to that
+	 * amount, and sent as a single datagram to the NRT endpoint
+	 * when fully gathered, or when another source port attempts
+	 * to send data to the same endpoint. Passing a null size
+	 * would disable streaming.
+	 */
+	streamsz = 1024; /* bytes */
+	ret = setsockopt(s, SOL_XDDP, XDDP_BUFSZ,
+			 &streamsz, sizeof(streamsz));
+	if (ret)
+		fail("setsockopt");
+	/*
+	 * Bind the socket to the port, to setup a proxy to channel
+	 * traffic to/from the Linux domain.
+	 *
+	 * saddr.sipc_port specifies the port number to use.
+	 */
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = XDDP_PORT;
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	for (;;) {
+		len = strlen(msg[n]);
+		/*
+		 * Send a datagram to the NRT endpoint via the proxy.
+		 * The output is artificially scattered in separate
+		 * one-byte sendings, to illustrate the use of
+		 * MSG_MORE.
+		 */
+		for (b = 0; b < len; b++) {
+			ret = sendto(s, msg[n] + b, 1, MSG_MORE, NULL, 0);
+			if (ret != 1)
+				fail("sendto");
+		}
+
+		printf("%s: sent (scattered) %d-bytes message, \"%.*s\"\n",
+		       __FUNCTION__, len, len, msg[n]);
+
+		/* Read back packets echoed by the regular thread */
+		ret = recvfrom(s, buf, sizeof(buf), 0, NULL, 0);
+		if (ret <= 0)
+			fail("recvfrom");
+
+		printf("   => \"%.*s\" echoed by peer\n", ret, buf);
+
+		n = (n + 1) % (sizeof(msg) / sizeof(msg[0]));
+		/*
+		 * We run in full real-time mode (i.e. primary mode),
+		 * so we have to let the system breathe between two
+		 * iterations.
+		 */
+		ts.tv_sec = 0;
+		ts.tv_nsec = 500000000; /* 500 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+static void *regular_thread(void *arg)
+{
+	char buf[128], *devname;
+	int fd, ret;
+
+	if (asprintf(&devname, "/dev/rtp%d", XDDP_PORT) < 0)
+		fail("asprintf");
+
+	fd = open(devname, O_RDWR);
+	free(devname);
+	if (fd < 0)
+		fail("open");
+
+	for (;;) {
+		/* Get the next message from realtime_thread. */
+		ret = read(fd, buf, sizeof(buf));
+		if (ret <= 0)
+			fail("read");
+
+		/* Echo the message back to realtime_thread. */
+		ret = write(fd, buf, ret);
+		if (ret <= 0)
+			fail("write");
+	}
+
+	return NULL;
+}
+
+int main(int argc, char **argv)
+{
+	struct sched_param rtparam = { .sched_priority = 42 };
+	pthread_attr_t rtattr, regattr;
+	sigset_t set;
+	int sig;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGINT);
+	sigaddset(&set, SIGTERM);
+	sigaddset(&set, SIGHUP);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	pthread_attr_init(&rtattr);
+	pthread_attr_setdetachstate(&rtattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&rtattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&rtattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&rtattr, &rtparam);
+
+	errno = pthread_create(&rt, &rtattr, &realtime_thread, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&regattr);
+	pthread_attr_setdetachstate(&regattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&regattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&regattr, SCHED_OTHER);
+
+	errno = pthread_create(&nrt, &regattr, &regular_thread, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	__STD(sigwait(&set, &sig));
+	pthread_cancel(rt);
+	pthread_cancel(nrt);
+	pthread_join(rt, NULL);
+	pthread_join(nrt, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/Makefile.am b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/Makefile.am
new file mode 100644
index 0000000..248e406
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/Makefile.am
@@ -0,0 +1,33 @@
+demodir = @XENO_DEMO_DIR@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+VERSION_STRING = 0.92
+
+demo_PROGRAMS = cyclictest
+
+cyclictest_CPPFLAGS = 				\
+	$(XENO_USER_CFLAGS)			\
+	-I$(top_srcdir)/include			\
+	-DVERSION_STRING=$(VERSION_STRING)	\
+	-Wno-strict-prototypes			\
+	-Wno-implicit-function-declaration	\
+	-Wno-missing-prototypes			\
+	-Wno-nonnull				\
+	-Wno-unused-function
+
+cyclictest_SOURCES =	\
+	cyclictest.c	\
+	error.c		\
+	error.h		\
+	rt_numa.h	\
+	rt-sched.h	\
+	rt-utils.c	\
+	rt-utils.h
+
+cyclictest_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+cyclictest_LDADD =		\
+	@XENO_CORE_LDADD@	\
+	@XENO_USER_LDADD@ 	\
+	-lpthread -lrt -lm
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/README b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/README
new file mode 100644
index 0000000..8186b04
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/README
@@ -0,0 +1,5 @@
+
+This is the original cyclictest program from the PREEMPT-RT test
+suite as of version 0.92.
+
+See git://git.kernel.org/pub/scm/linux/kernel/git/clrkwllms/rt-tests.git
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/cyclictest.c b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/cyclictest.c
new file mode 100644
index 0000000..b92596c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/cyclictest.c
@@ -0,0 +1,2269 @@
+/*
+ * High resolution timer test software
+ *
+ * (C) 2013      Clark Williams <williams@redhat.com>
+ * (C) 2013      John Kacur <jkacur@redhat.com>
+ * (C) 2008-2012 Clark Williams <williams@redhat.com>
+ * (C) 2005-2007 Thomas Gleixner <tglx@linutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License Version
+ * 2 as published by the Free Software Foundation.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <getopt.h>
+#include <pthread.h>
+#include <signal.h>
+#include <sched.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include <limits.h>
+#include <linux/unistd.h>
+
+#include <sys/prctl.h>
+#include <sys/stat.h>
+#include <sys/sysinfo.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <sys/utsname.h>
+#include <sys/mman.h>
+#include "rt_numa.h"
+
+#include "rt-utils.h"
+
+#define DEFAULT_INTERVAL 1000
+#define DEFAULT_DISTANCE 500
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+/* Ugly, but .... */
+#define gettid() syscall(__NR_gettid)
+#define sigev_notify_thread_id _sigev_un._tid
+
+#define USEC_PER_SEC		1000000
+#define NSEC_PER_SEC		1000000000
+
+#define HIST_MAX		1000000
+
+#define MODE_CYCLIC		0
+#define MODE_CLOCK_NANOSLEEP	1
+#define MODE_SYS_ITIMER		2
+#define MODE_SYS_NANOSLEEP	3
+#define MODE_SYS_OFFSET		2
+
+#define TIMER_RELTIME		0
+
+/* Must be power of 2 ! */
+#define VALBUF_SIZE		16384
+
+#define KVARS			32
+#define KVARNAMELEN		32
+#define KVALUELEN		32
+
+int enable_events;
+
+static char *policyname(int policy);
+
+#define write_check(__fd, __buf, __len)			\
+	do {						\
+		int __ret = write(__fd, __buf, __len);	\
+		(void)__ret;				\
+	} while (0);
+
+enum {
+	NOTRACE,
+	CTXTSWITCH,
+	IRQSOFF,
+	PREEMPTOFF,
+	PREEMPTIRQSOFF,
+	WAKEUP,
+	WAKEUPRT,
+	LATENCY,
+	FUNCTION,
+	CUSTOM,
+};
+
+/* Struct to transfer parameters to the thread */
+struct thread_param {
+	int prio;
+	int policy;
+	int mode;
+	int timermode;
+	int signal;
+	int clock;
+	unsigned long max_cycles;
+	struct thread_stat *stats;
+	int bufmsk;
+	unsigned long interval;
+	int cpu;
+	int node;
+	int tnum;
+};
+
+/* Struct for statistics */
+struct thread_stat {
+	unsigned long cycles;
+	unsigned long cyclesread;
+	long min;
+	long max;
+	long act;
+	double avg;
+	long *values;
+	long *hist_array;
+	long *outliers;
+	pthread_t thread;
+	int threadstarted;
+	int tid;
+	long reduce;
+	long redmax;
+	long cycleofmax;
+	long hist_overflow;
+	long num_outliers;
+};
+
+static int shutdown;
+static int tracelimit = 0;
+static int notrace = 0;
+static int ftrace = 0;
+static int kernelversion;
+static int verbose = 0;
+static int oscope_reduction = 1;
+static int lockall = 0;
+static int tracetype = NOTRACE;
+static int histogram = 0;
+static int histofall = 0;
+static int duration = 0;
+static int use_nsecs = 0;
+static int refresh_on_max;
+static int force_sched_other;
+static int priospread = 0;
+static int check_clock_resolution;
+static int ct_debug;
+static int use_fifo = 0;
+static pthread_t fifo_threadid;
+static int aligned = 0;
+static int secaligned = 0;
+static int offset = 0;
+static int laptop = 0;
+
+static pthread_cond_t refresh_on_max_cond = PTHREAD_COND_INITIALIZER;
+static pthread_mutex_t refresh_on_max_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static pthread_mutex_t break_thread_id_lock = PTHREAD_MUTEX_INITIALIZER;
+static pid_t break_thread_id = 0;
+static uint64_t break_thread_value = 0;
+
+static struct timespec globalt;
+
+/* Backup of kernel variables that we modify */
+static struct kvars {
+	char name[KVARNAMELEN];
+	char value[KVALUELEN];
+} kv[KVARS];
+
+static char *procfileprefix = "/proc/sys/kernel/";
+static char *fileprefix;
+static char tracer[MAX_PATH];
+static char fifopath[MAX_PATH];
+static char **traceptr;
+static int traceopt_count;
+static int traceopt_size;
+
+static struct thread_param **parameters;
+static struct thread_stat **statistics;
+
+static void print_stat(FILE *fp, struct thread_param *par, int index, int verbose, int quiet);
+
+static int latency_target_fd = -1;
+static int32_t latency_target_value = 0;
+
+/* Latency trick
+ * if the file /dev/cpu_dma_latency exists,
+ * open it and write a zero into it. This will tell
+ * the power management system not to transition to
+ * a high cstate (in fact, the system acts like idle=poll)
+ * When the fd to /dev/cpu_dma_latency is closed, the behavior
+ * goes back to the system default.
+ *
+ * Documentation/power/pm_qos_interface.txt
+ */
+static void set_latency_target(void)
+{
+	struct stat s;
+	int err;
+
+	if (laptop) {
+		warn("not setting cpu_dma_latency to save battery power\n");
+		return;
+	}
+
+	errno = 0;
+	err = stat("/dev/cpu_dma_latency", &s);
+	if (err == -1) {
+		err_msg_n(errno, "WARN: stat /dev/cpu_dma_latency failed");
+		return;
+	}
+
+	errno = 0;
+	latency_target_fd = open("/dev/cpu_dma_latency", O_RDWR);
+	if (latency_target_fd == -1) {
+		err_msg_n(errno, "WARN: open /dev/cpu_dma_latency");
+		return;
+	}
+
+	errno = 0;
+	err = write(latency_target_fd, &latency_target_value, 4);
+	if (err < 1) {
+		err_msg_n(errno, "# error setting cpu_dma_latency to %d!", latency_target_value);
+		close(latency_target_fd);
+		return;
+	}
+	printf("# /dev/cpu_dma_latency set to %dus\n", latency_target_value);
+}
+
+
+enum kernelversion {
+	KV_NOT_SUPPORTED,
+	KV_26_LT18,
+	KV_26_LT24,
+	KV_26_33,
+	KV_30
+};
+
+enum {
+	ERROR_GENERAL	= -1,
+	ERROR_NOTFOUND	= -2,
+};
+
+static char functiontracer[MAX_PATH];
+static char traceroptions[MAX_PATH];
+
+static int trace_fd     = -1;
+static int tracemark_fd = -1;
+
+static int kernvar(int mode, const char *name, char *value, size_t sizeofvalue)
+{
+	char filename[128];
+	int retval = 1;
+	int path;
+	size_t len_prefix = strlen(fileprefix), len_name = strlen(name);
+
+	if (len_prefix + len_name + 1 > sizeof(filename)) {
+		errno = ENOMEM;
+		return 1;
+	}
+
+	memcpy(filename, fileprefix, len_prefix);
+	memcpy(filename + len_prefix, name, len_name + 1);
+
+	path = open(filename, mode);
+	if (path >= 0) {
+		if (mode == O_RDONLY) {
+			int got;
+			if ((got = read(path, value, sizeofvalue)) > 0) {
+				retval = 0;
+				value[got-1] = '\0';
+			}
+		} else if (mode == O_WRONLY) {
+			if (write(path, value, sizeofvalue) == sizeofvalue)
+				retval = 0;
+		}
+		close(path);
+	}
+	return retval;
+}
+
+static void setkernvar(const char *name, char *value)
+{
+	int i;
+	char oldvalue[KVALUELEN];
+
+	if (kernelversion < KV_26_33) {
+		if (kernvar(O_RDONLY, name, oldvalue, sizeof(oldvalue)))
+			fprintf(stderr, "could not retrieve %s\n", name);
+		else {
+			for (i = 0; i < KVARS; i++) {
+				if (!strcmp(kv[i].name, name))
+					break;
+				if (kv[i].name[0] == '\0') {
+					strncpy(kv[i].name, name,
+						sizeof(kv[i].name));
+					strncpy(kv[i].value, oldvalue,
+					    sizeof(kv[i].value));
+					break;
+				}
+			}
+			if (i == KVARS)
+				fprintf(stderr, "could not backup %s (%s)\n",
+					name, oldvalue);
+		}
+	}
+	if (kernvar(O_WRONLY, name, value, strlen(value)))
+		fprintf(stderr, "could not set %s to %s\n", name, value);
+
+}
+
+static void restorekernvars(void)
+{
+	int i;
+
+	for (i = 0; i < KVARS; i++) {
+		if (kv[i].name[0] != '\0') {
+			if (kernvar(O_WRONLY, kv[i].name, kv[i].value,
+			    strlen(kv[i].value)))
+				fprintf(stderr, "could not restore %s to %s\n",
+					kv[i].name, kv[i].value);
+		}
+	}
+}
+
+static inline void tsnorm(struct timespec *ts)
+{
+	while (ts->tv_nsec >= NSEC_PER_SEC) {
+		ts->tv_nsec -= NSEC_PER_SEC;
+		ts->tv_sec++;
+	}
+}
+
+static inline int tsgreater(struct timespec *a, struct timespec *b)
+{
+	return ((a->tv_sec > b->tv_sec) ||
+		(a->tv_sec == b->tv_sec && a->tv_nsec > b->tv_nsec));
+}
+
+static inline int64_t calcdiff_ns(struct timespec t1, struct timespec t2)
+{
+	struct timespec r;
+	
+	r.tv_sec = t1.tv_sec - t2.tv_sec;
+	r.tv_nsec = t1.tv_nsec - t2.tv_nsec;
+	if (r.tv_nsec < 0) {
+		r.tv_sec--;
+		r.tv_nsec += NSEC_PER_SEC;
+	}
+
+	return r.tv_sec * NSEC_PER_SEC + r.tv_nsec;
+}
+
+static inline int64_t calcdiff(struct timespec t1, struct timespec t2)
+{
+	return calcdiff_ns(t1, t2) / 1000;
+}
+
+void traceopt(char *option)
+{
+	char *ptr;
+	if (traceopt_count + 1 > traceopt_size) {
+		traceopt_size += 16;
+		printf("expanding traceopt buffer to %d entries\n", traceopt_size);
+		traceptr = realloc(traceptr, sizeof(char*) * traceopt_size);
+		if (traceptr == NULL)
+			fatal ("Error allocating space for %d trace options\n",
+			       traceopt_count+1);
+	}
+	ptr = malloc(strlen(option)+1);
+	if (ptr == NULL)
+		fatal("error allocating space for trace option %s\n", option);
+	printf("adding traceopt %s\n", option);
+	strcpy(ptr, option);
+	traceptr[traceopt_count++] = ptr;
+}
+
+static int trace_file_exists(char *name)
+{
+	struct stat sbuf;
+	char *tracing_prefix = get_debugfileprefix();
+	char path[MAX_PATH];
+	strcat(strcpy(path, tracing_prefix), name);
+	return stat(path, &sbuf) ? 0 : 1;
+}
+
+#define TRACEBUFSIZ 1024
+static __thread char tracebuf[TRACEBUFSIZ];
+
+static void tracemark(char *fmt, ...) __attribute__((format(printf, 1, 2)));
+static void tracemark(char *fmt, ...)
+{
+	va_list ap;
+	int len;
+
+	/* bail out if we're not tracing */
+	/* or if the kernel doesn't support trace_mark */
+	if (tracemark_fd < 0)
+		return;
+
+	va_start(ap, fmt);
+	len = vsnprintf(tracebuf, TRACEBUFSIZ, fmt, ap);
+	va_end(ap);
+	write_check(tracemark_fd, tracebuf, len);
+}
+
+
+
+void tracing(int on)
+{
+	if (on) {
+		switch (kernelversion) {
+		case KV_26_LT18: gettimeofday(0,(struct timezone *)1); break;
+		case KV_26_LT24: prctl(0, 1); break;
+		case KV_26_33:
+		case KV_30:
+			write_check(trace_fd, "1", 1);
+			break;
+		default:	 break;
+		}
+	} else {
+		switch (kernelversion) {
+		case KV_26_LT18: gettimeofday(0,0); break;
+		case KV_26_LT24: prctl(0, 0); break;
+		case KV_26_33:
+		case KV_30:
+			write_check(trace_fd, "0", 1);
+			break;
+		default:	break;
+		}
+	}
+}
+
+static int settracer(char *tracer)
+{
+	if (valid_tracer(tracer)) {
+		setkernvar("current_tracer", tracer);
+		return 0;
+	}
+	return -1;
+}
+
+static void setup_tracer(void)
+{
+	if (!tracelimit || notrace)
+		return;
+
+	if (mount_debugfs(NULL))
+		fatal("could not mount debugfs");
+
+	if (kernelversion >= KV_26_33) {
+		char testname[MAX_PATH];
+
+		fileprefix = get_debugfileprefix();
+		if (!trace_file_exists("tracing_enabled") &&
+		    !trace_file_exists("tracing_on"))
+			warn("tracing_enabled or tracing_on not found\n"
+			    "debug fs not mounted, "
+			    "TRACERs not configured?\n", testname);
+	} else
+		fileprefix = procfileprefix;
+
+	if (kernelversion >= KV_26_33) {
+		int ret;
+
+		if (trace_file_exists("tracing_enabled") &&
+		    !trace_file_exists("tracing_on"))
+			setkernvar("tracing_enabled", "1");
+
+		/* ftrace_enabled is a sysctl variable */
+		/* turn it on if you're doing anything but nop or event tracing */
+
+		fileprefix = procfileprefix;
+		if (tracetype)
+			setkernvar("ftrace_enabled", "1");
+		else
+			setkernvar("ftrace_enabled", "0");
+		fileprefix = get_debugfileprefix();
+
+		/*
+		 * Set default tracer to nop.
+		 * this also has the nice side effect of clearing out
+		 * old traces.
+		 */
+		ret = settracer("nop");
+
+		switch (tracetype) {
+		case NOTRACE:
+			/* no tracer specified, use events */
+			enable_events = 1;
+			break;
+		case FUNCTION:
+			ret = settracer("function");
+			break;
+		case IRQSOFF:
+			ret = settracer("irqsoff");
+			break;
+		case PREEMPTOFF:
+			ret = settracer("preemptoff");
+			break;
+		case PREEMPTIRQSOFF:
+			ret = settracer("preemptirqsoff");
+			break;
+		case CTXTSWITCH:
+			if (valid_tracer("sched_switch"))
+			    ret = settracer("sched_switch");
+			else {
+				if ((ret = event_enable("sched/sched_wakeup")))
+					break;
+				ret = event_enable("sched/sched_switch");
+			}
+			break;
+		case WAKEUP:
+			ret = settracer("wakeup");
+			break;
+		case WAKEUPRT:
+			ret = settracer("wakeup_rt");
+			break;
+		default:
+			if (strlen(tracer)) {
+				ret = settracer(tracer);
+				if (strcmp(tracer, "events") == 0 && ftrace)
+					ret = settracer(functiontracer);
+			}
+			else {
+				printf("cyclictest: unknown tracer!\n");
+				ret = 0;
+			}
+			break;
+		}
+
+		if (enable_events)
+			/* turn on all events */
+			event_enable_all();
+
+		if (ret)
+			fprintf(stderr, "Requested tracer '%s' not available\n", tracer);
+
+		setkernvar(traceroptions, "print-parent");
+		setkernvar(traceroptions, "latency-format");
+		if (verbose) {
+			setkernvar(traceroptions, "sym-offset");
+			setkernvar(traceroptions, "sym-addr");
+			setkernvar(traceroptions, "verbose");
+		} else {
+			setkernvar(traceroptions, "nosym-offset");
+			setkernvar(traceroptions, "nosym-addr");
+			setkernvar(traceroptions, "noverbose");
+		}
+		if (traceopt_count) {
+			int i;
+			for (i = 0; i < traceopt_count; i++)
+				setkernvar(traceroptions, traceptr[i]);
+		}
+		setkernvar("tracing_max_latency", "0");
+		if (trace_file_exists("latency_hist"))
+			setkernvar("latency_hist/wakeup/reset", "1");
+
+		/* open the tracing on file descriptor */
+		if (trace_fd == -1) {
+			char path[MAX_PATH];
+			strcpy(path, fileprefix);
+			if (trace_file_exists("tracing_on"))
+				strcat(path, "tracing_on");
+			else
+				strcat(path, "tracing_enabled");
+			if ((trace_fd = open(path, O_WRONLY)) == -1)
+				fatal("unable to open %s for tracing", path);
+		}
+
+		/* open the tracemark file descriptor */
+		if (tracemark_fd == -1) {
+			char path[MAX_PATH];
+			strcat(strcpy(path, fileprefix), "trace_marker");
+			if ((tracemark_fd = open(path, O_WRONLY)) == -1)
+				warn("unable to open trace_marker file: %s\n", path);
+		}
+
+	} else {
+		setkernvar("trace_all_cpus", "1");
+		setkernvar("trace_freerunning", "1");
+		setkernvar("trace_print_on_crash", "0");
+		setkernvar("trace_user_triggered", "1");
+		setkernvar("trace_user_trigger_irq", "-1");
+		setkernvar("trace_verbose", "0");
+		setkernvar("preempt_thresh", "0");
+		setkernvar("wakeup_timing", "0");
+		setkernvar("preempt_max_latency", "0");
+		if (ftrace)
+			setkernvar("mcount_enabled", "1");
+		setkernvar("trace_enabled", "1");
+		setkernvar("latency_hist/wakeup_latency/reset", "1");
+	}
+
+	tracing(1);
+}
+
+/*
+ * parse an input value as a base10 value followed by an optional
+ * suffix. The input value is presumed to be in seconds, unless
+ * followed by a modifier suffix: m=minutes, h=hours, d=days
+ *
+ * the return value is a value in seconds
+ */
+int parse_time_string(char *val)
+{
+	char *end;
+	int t = strtol(val, &end, 10);
+	if (end) {
+		switch (*end) {
+		case 'm':
+		case 'M':
+			t *= 60;
+			break;
+
+		case 'h':
+		case 'H':
+			t *= 60*60;
+			break;
+
+		case 'd':
+		case 'D':
+			t *= 24*60*60;
+			break;
+
+		}
+	}
+	return t;
+}
+
+/*
+ * Raise the soft priority limit up to prio, if that is less than or equal
+ * to the hard limit
+ * if a call fails, return the error
+ * if successful return 0
+ * if fails, return -1
+*/
+static int raise_soft_prio(int policy, const struct sched_param *param)
+{
+	int err;
+	int policy_max;	/* max for scheduling policy such as SCHED_FIFO */
+	int soft_max;
+	int hard_max;
+	int prio;
+	struct rlimit rlim;
+
+	prio = param->sched_priority;
+
+	policy_max = sched_get_priority_max(policy);
+	if (policy_max == -1) {
+		err = errno;
+		err_msg("WARN: no such policy\n");
+		return err;
+	}
+
+	err = getrlimit(RLIMIT_RTPRIO, &rlim);
+	if (err) {
+		err = errno;
+		err_msg_n(err, "WARN: getrlimit failed");
+		return err;
+	}
+
+	soft_max = (rlim.rlim_cur == RLIM_INFINITY) ? policy_max : rlim.rlim_cur;
+	hard_max = (rlim.rlim_max == RLIM_INFINITY) ? policy_max : rlim.rlim_max;
+
+	if (prio > soft_max && prio <= hard_max) {
+		rlim.rlim_cur = prio;
+		err = setrlimit(RLIMIT_RTPRIO, &rlim);
+		if (err) {
+			err = errno;
+			err_msg_n(err, "WARN: setrlimit failed");
+			/* return err; */
+		}
+	} else {
+		err = -1;
+	}
+
+	return err;
+}
+
+/*
+ * Check the error status of sched_setscheduler
+ * If an error can be corrected by raising the soft limit priority to
+ * a priority less than or equal to the hard limit, then do so.
+ */
+static int setscheduler(pid_t pid, int policy, const struct sched_param *param)
+{
+	int err = 0;
+
+try_again:
+	err = sched_setscheduler(pid, policy, param);
+	if (err) {
+		err = errno;
+		if (err == EPERM) {
+			int err1;
+			err1 = raise_soft_prio(policy, param);
+			if (!err1) goto try_again;
+		}
+	}
+
+	return err;
+}
+
+/* Work around lack of barriers in oldish uClibc-based toolchains. */
+
+static struct thread_barrier {
+	pthread_mutex_t lock;
+	pthread_cond_t wait;
+	unsigned int count;
+} align_barr, globalt_barr;
+
+static inline
+void barrier_init(struct thread_barrier *__restrict barrier,
+		 unsigned int count)
+{
+	pthread_mutex_init(&barrier->lock, NULL);
+	pthread_cond_init(&barrier->wait, NULL);
+	barrier->count = count;
+}
+
+static inline void barrier_destroy(struct thread_barrier *barrier)
+{
+	pthread_mutex_destroy(&barrier->lock);
+	pthread_cond_destroy(&barrier->wait);
+}
+
+static inline void barrier_wait(struct thread_barrier *barrier)
+{
+	pthread_mutex_lock(&barrier->lock);
+
+	if (barrier->count > 0) {
+		barrier->count--;
+		pthread_cond_broadcast(&barrier->wait);
+		while (barrier->count > 0)
+			pthread_cond_wait(&barrier->wait, &barrier->lock);
+	}
+
+	pthread_mutex_unlock(&barrier->lock);
+}
+
+/*
+ * timer thread
+ *
+ * Modes:
+ * - clock_nanosleep based
+ * - cyclic timer based
+ *
+ * Clock:
+ * - CLOCK_MONOTONIC
+ * - CLOCK_REALTIME
+ *
+ */
+void *timerthread(void *param)
+{
+	struct thread_param *par = param;
+	struct sched_param schedp;
+	struct sigevent sigev;
+	sigset_t sigset;
+	timer_t timer;
+	struct timespec now, next, interval, stop;
+	struct itimerval itimer;
+	struct itimerspec tspec;
+	struct thread_stat *stat = par->stats;
+	int stopped = 0;
+	cpu_set_t mask;
+	pthread_t thread;
+
+	/* if we're running in numa mode, set our memory node */
+	if (par->node != -1)
+		rt_numa_set_numa_run_on_node(par->node, par->cpu);
+
+	if (par->cpu != -1) {
+		CPU_ZERO(&mask);
+		CPU_SET(par->cpu, &mask);
+		thread = pthread_self();
+		if(pthread_setaffinity_np(thread, sizeof(mask), &mask) == -1)
+			warn("Could not set CPU affinity to CPU #%d\n", par->cpu);
+	}
+
+	interval.tv_sec = par->interval / USEC_PER_SEC;
+	interval.tv_nsec = (par->interval % USEC_PER_SEC) * 1000;
+
+	stat->tid = gettid();
+
+	sigemptyset(&sigset);
+	sigaddset(&sigset, par->signal);
+	sigprocmask(SIG_BLOCK, &sigset, NULL);
+
+	if (par->mode == MODE_CYCLIC) {
+		sigev.sigev_notify = SIGEV_THREAD_ID | SIGEV_SIGNAL;
+		sigev.sigev_signo = par->signal;
+		sigev.sigev_notify_thread_id = stat->tid;
+		timer_create(par->clock, &sigev, &timer);
+		tspec.it_interval = interval;
+	}
+
+	memset(&schedp, 0, sizeof(schedp));
+	schedp.sched_priority = par->prio;
+	if (pthread_setschedparam(pthread_self(), par->policy, &schedp))
+		fatal("timerthread%d: failed to set priority to %d\n", par->cpu, par->prio);
+
+	/* Get current time */
+	if (aligned || secaligned) {
+		barrier_wait(&globalt_barr);
+		if (par->tnum == 0) {
+			clock_gettime(par->clock, &globalt);
+			if (secaligned) {
+				/* Ensure that the thread start timestamp is not
+				   in the past */
+				if (globalt.tv_nsec > 900000000)
+					globalt.tv_sec += 2;
+				else
+					globalt.tv_sec++;
+				globalt.tv_nsec = 0;
+			}
+		}
+		barrier_wait(&align_barr);
+		now = globalt;
+		if(offset) {
+			if (aligned)
+				now.tv_nsec += offset * par->tnum;
+			else
+				now.tv_nsec += offset;
+			tsnorm(&now);
+		}
+	}
+	else
+		clock_gettime(par->clock, &now);
+
+	next = now;
+	next.tv_sec += interval.tv_sec;
+	next.tv_nsec += interval.tv_nsec;
+	tsnorm(&next);
+
+	memset(&stop, 0, sizeof(stop)); /* grrr */
+
+	if (duration) {
+		stop = now;
+		stop.tv_sec += duration;
+	}
+	if (par->mode == MODE_CYCLIC) {
+		if (par->timermode == TIMER_ABSTIME)
+			tspec.it_value = next;
+		else {
+			tspec.it_value = interval;
+		}
+		timer_settime(timer, par->timermode, &tspec, NULL);
+	}
+
+	if (par->mode == MODE_SYS_ITIMER) {
+		itimer.it_interval.tv_sec = interval.tv_sec;
+		itimer.it_interval.tv_usec = interval.tv_nsec / 1000;
+		itimer.it_value = itimer.it_interval;
+		setitimer (ITIMER_REAL, &itimer, NULL);
+	}
+
+	stat->threadstarted++;
+
+#ifdef CONFIG_XENO_COBALT
+	if (pthread_setmode_np(0, PTHREAD_WARNSW, NULL))
+		fatal("pthread_setmode_np()");
+#endif
+	while (!shutdown) {
+
+		uint64_t diff;
+		int sigs, ret;
+
+		/* Wait for next period */
+		switch (par->mode) {
+		case MODE_CYCLIC:
+		case MODE_SYS_ITIMER:
+			if (sigwait(&sigset, &sigs) < 0)
+				goto out;
+			break;
+
+		case MODE_CLOCK_NANOSLEEP:
+			if (par->timermode == TIMER_ABSTIME) {
+				if ((ret = clock_nanosleep(par->clock, TIMER_ABSTIME, &next, NULL))) {
+					if (ret != EINTR)
+						warn("clock_nanosleep failed. errno: %d\n", errno);
+					goto out;
+				}
+			} else {
+				if ((ret = clock_gettime(par->clock, &now))) {
+					if (ret != EINTR)
+						warn("clock_gettime() failed: %s", strerror(errno));
+					goto out;
+				}
+				if ((ret = clock_nanosleep(par->clock, TIMER_RELTIME, &interval, NULL))) {
+					if (ret != EINTR)
+						warn("clock_nanosleep() failed. errno: %d\n", errno);
+					goto out;
+				}
+				next.tv_sec = now.tv_sec + interval.tv_sec;
+				next.tv_nsec = now.tv_nsec + interval.tv_nsec;
+				tsnorm(&next);
+			}
+			break;
+
+		case MODE_SYS_NANOSLEEP:
+			if ((ret = clock_gettime(par->clock, &now))) {
+				if (ret != EINTR)
+					warn("clock_gettime() failed: errno %d\n", errno);
+				goto out;
+			}
+			if (nanosleep(&interval, NULL)) {
+				if (errno != EINTR)
+					warn("nanosleep failed. errno: %d\n", errno);
+				goto out;
+			}
+			next.tv_sec = now.tv_sec + interval.tv_sec;
+			next.tv_nsec = now.tv_nsec + interval.tv_nsec;
+			tsnorm(&next);
+			break;
+		}
+
+		if ((ret = clock_gettime(par->clock, &now))) {
+			if (ret != EINTR)
+				warn("clock_getttime() failed. errno: %d\n", errno);
+			goto out;
+		}
+
+		if (use_nsecs)
+			diff = calcdiff_ns(now, next);
+		else
+			diff = calcdiff(now, next);
+		if (diff < stat->min)
+			stat->min = diff;
+		if (diff > stat->max) {
+			stat->max = diff;
+			if (refresh_on_max)
+				pthread_cond_signal(&refresh_on_max_cond);
+		}
+		stat->avg += (double) diff;
+
+		if (duration && (calcdiff(now, stop) >= 0))
+			shutdown++;
+
+		if (!stopped && tracelimit && (diff > tracelimit)) {
+			stopped++;
+			tracemark("hit latency threshold (%llu > %d)",
+				  (unsigned long long) diff, tracelimit);
+			tracing(0);
+			shutdown++;
+			pthread_mutex_lock(&break_thread_id_lock);
+			if (break_thread_id == 0)
+				break_thread_id = stat->tid;
+			break_thread_value = diff;
+			pthread_mutex_unlock(&break_thread_id_lock);
+		}
+		stat->act = diff;
+
+		if (par->bufmsk)
+			stat->values[stat->cycles & par->bufmsk] = diff;
+
+		/* Update the histogram */
+		if (histogram) {
+			if (diff >= histogram) {
+				stat->hist_overflow++;
+				if (stat->num_outliers < histogram)
+					stat->outliers[stat->num_outliers++] = stat->cycles;
+			}
+			else
+				stat->hist_array[diff]++;
+		}
+
+		stat->cycles++;
+
+		next.tv_sec += interval.tv_sec;
+		next.tv_nsec += interval.tv_nsec;
+		if (par->mode == MODE_CYCLIC) {
+			int overrun_count = timer_getoverrun(timer);
+			next.tv_sec += overrun_count * interval.tv_sec;
+			next.tv_nsec += overrun_count * interval.tv_nsec;
+		}
+		tsnorm(&next);
+
+		while (tsgreater(&now, &next)) {
+			next.tv_sec += interval.tv_sec;
+			next.tv_nsec += interval.tv_nsec;
+			tsnorm(&next);
+		}
+
+		if (par->max_cycles && par->max_cycles == stat->cycles)
+			break;
+	}
+
+out:
+#ifdef CONFIG_XENO_COBALT
+	if (pthread_setmode_np(PTHREAD_WARNSW, 0, NULL))
+		fatal("pthread_setmode_np()");
+#endif
+	if (par->mode == MODE_CYCLIC)
+		timer_delete(timer);
+
+	if (par->mode == MODE_SYS_ITIMER) {
+		itimer.it_value.tv_sec = 0;
+		itimer.it_value.tv_usec = 0;
+		itimer.it_interval.tv_sec = 0;
+		itimer.it_interval.tv_usec = 0;
+		setitimer (ITIMER_REAL, &itimer, NULL);
+	}
+
+	/* switch to normal */
+	schedp.sched_priority = 0;
+	sched_setscheduler(0, SCHED_OTHER, &schedp);
+
+	stat->threadstarted = -1;
+
+	return NULL;
+}
+
+
+/* Print usage information */
+static void display_help(int error)
+{
+	char tracers[MAX_PATH];
+	char *prefix;
+
+	prefix = get_debugfileprefix();
+	if (prefix[0] == '\0')
+		strcpy(tracers, "unavailable (debugfs not mounted)");
+	else {
+		fileprefix = prefix;
+		if (kernvar(O_RDONLY, "available_tracers", tracers, sizeof(tracers)))
+			strcpy(tracers, "none");
+	}
+
+	printf("cyclictest V %1.2f\n", VERSION_STRING);
+	printf("Usage:\n"
+	       "cyclictest <options>\n\n"
+#if LIBNUMA_API_VERSION >= 2
+	       "-a [CPUSET] --affinity     Run thread #N on processor #N, if possible, or if CPUSET\n"
+	       "                           given, pin threads to that set of processors in round-\n"
+	       "                           robin order.  E.g. -a 2 pins all threads to CPU 2,\n"
+	       "                           but -a 3-5,0 -t 5 will run the first and fifth\n"
+	       "                           threads on CPU (0),thread #2 on CPU 3, thread #3\n"
+	       "                           on CPU 4, and thread #5 on CPU 5.\n"
+#else
+	       "-a [NUM] --affinity        run thread #N on processor #N, if possible\n"
+	       "                           with NUM pin all threads to the processor NUM\n"
+#endif
+	       "-A USEC  --aligned=USEC    align thread wakeups to a specific offset\n"
+	       "-b USEC  --breaktrace=USEC send break trace command when latency > USEC\n"
+	       "-B       --preemptirqs     both preempt and irqsoff tracing (used with -b)\n"
+	       "-c CLOCK --clock=CLOCK     select clock\n"
+	       "                           0 = CLOCK_MONOTONIC (default)\n"
+	       "                           1 = CLOCK_REALTIME\n"
+	       "-C       --context         context switch tracing (used with -b)\n"
+	       "-d DIST  --distance=DIST   distance of thread intervals in us default=500\n"
+	       "-D       --duration=t      specify a length for the test run\n"
+	       "                           default is in seconds, but 'm', 'h', or 'd' maybe added\n"
+	       "                           to modify value to minutes, hours or days\n"
+	       "	 --latency=PM_QOS  write PM_QOS to /dev/cpu_dma_latency\n"
+	       "-E       --event           event tracing (used with -b)\n"
+	       "-f       --ftrace          function trace (when -b is active)\n"
+	       "-F       --fifo=<path>     create a named pipe at path and write stats to it\n"
+	       "-h       --histogram=US    dump a latency histogram to stdout after the run\n"
+	       "                           (with same priority about many threads)\n"
+	       "                           US is the max time to be be tracked in microseconds\n"
+	       "-H       --histofall=US    same as -h except with an additional summary column\n"
+	       "-i INTV  --interval=INTV   base interval of thread in us default=1000\n"
+	       "-I       --irqsoff         Irqsoff tracing (used with -b)\n"
+	       "-l LOOPS --loops=LOOPS     number of loops: default=0(endless)\n"
+	       "	 --laptop	   Save battery when running cyclictest\n"
+	       "			   This will give you poorer realtime results\n"
+	       "			   but will not drain your battery so quickly\n"
+	       "-m       --mlockall        lock current and future memory allocations\n"
+	       "-M       --refresh_on_max  delay updating the screen until a new max latency is hit\n"
+	       "-n       --nanosleep       use clock_nanosleep\n"
+	       "	 --notrace	   suppress tracing\n"
+	       "-N       --nsecs           print results in ns instead of us (default us)\n"
+	       "-o RED   --oscope=RED      oscilloscope mode, reduce verbose output by RED\n"
+	       "-O TOPT  --traceopt=TOPT   trace option\n"
+	       "-p PRIO  --prio=PRIO       priority of highest prio thread (defaults to 99)\n"
+	       "-P       --preemptoff      Preempt off tracing (used with -b)\n"
+	       "-q       --quiet           print only a summary on exit\n"
+	       "	 --priospread       spread priority levels starting at specified value\n"
+	       "-r       --relative        use relative timer instead of absolute\n"
+	       "-R       --resolution      check clock resolution, calling clock_gettime() many\n"
+	       "                           times.  list of clock_gettime() values will be\n"
+	       "                           reported with -X\n"
+	       "         --secaligned [USEC] align thread wakeups to the next full second,\n"
+	       "                           and apply the optional offset\n"
+	       "-s       --system          use sys_nanosleep and sys_setitimer\n"
+	       "-S       --smp             Standard SMP testing: options -a -t -n and\n"
+	       "                           same priority of all threads\n"
+	       "-t       --threads         one thread per available processor\n"
+	       "-t [NUM] --threads=NUM     number of threads:\n"
+	       "                           without NUM, threads = max_cpus\n"
+	       "                           without -t default = 1\n"
+	       "-T TRACE --tracer=TRACER   set tracing function\n"
+	       "    configured tracers: %s\n"
+	       "-u       --unbuffered      force unbuffered output for live processing\n"
+#ifdef NUMA
+	       "-U       --numa            Standard NUMA testing (similar to SMP option)\n"
+	       "                           thread data structures allocated from local node\n"
+#endif
+	       "-v       --verbose         output values on stdout for statistics\n"
+	       "                           format: n:c:v n=tasknum c=count v=value in us\n"
+	       "-w       --wakeup          task wakeup tracing (used with -b)\n"
+	       "-W       --wakeuprt        rt task wakeup tracing (used with -b)\n"
+	       "	 --dbg_cyclictest  print info useful for debugging cyclictest\n"
+	       "	 --policy=POLI     policy of realtime thread, POLI may be fifo(default) or rr\n"
+	       "                           format: --policy=fifo(default) or --policy=rr\n",
+	       tracers
+		);
+	if (error)
+		exit(EXIT_FAILURE);
+	exit(EXIT_SUCCESS);
+}
+
+void application_usage(void)
+{
+	display_help(0);
+}
+
+static int use_nanosleep;
+static int timermode = TIMER_ABSTIME;
+static int use_system;
+static int priority = 99;
+static int policy = SCHED_FIFO;	/* default policy if not specified */
+static int num_threads = 1;
+static int max_cycles;
+static int clocksel = 0;
+static int quiet;
+static int interval = DEFAULT_INTERVAL;
+static int distance = -1;
+static struct bitmask *affinity_mask = NULL;
+static int smp = 0;
+
+enum {
+	AFFINITY_UNSPECIFIED,
+	AFFINITY_SPECIFIED,
+	AFFINITY_USEALL
+};
+static int setaffinity = AFFINITY_UNSPECIFIED;
+
+static int clocksources[] = {
+	CLOCK_MONOTONIC,
+	CLOCK_REALTIME,
+};
+
+static unsigned int is_cpumask_zero(const struct bitmask *mask)
+{
+	return (rt_numa_bitmask_count(mask) == 0);
+}
+
+static int cpu_for_thread(int thread_num, int max_cpus)
+{
+	unsigned int m, cpu, i, num_cpus;
+	num_cpus = rt_numa_bitmask_count(affinity_mask);
+
+	m = thread_num % num_cpus;
+
+	/* there are num_cpus bits set, we want position of m'th one */
+	for (i = 0, cpu = 0; i < max_cpus; i++) {
+		if (rt_numa_bitmask_isbitset(affinity_mask, i)) {
+			if (cpu == m)
+				return i;
+			cpu++;
+		}
+	}
+	fprintf(stderr, "Bug in cpu mask handling code.\n");
+	return 0;
+}
+
+
+static void parse_cpumask(const char *option, const int max_cpus)
+{
+	affinity_mask = rt_numa_parse_cpustring(option, max_cpus);
+	if (affinity_mask) {
+		if (is_cpumask_zero(affinity_mask)) {
+			rt_bitmask_free(affinity_mask);
+			affinity_mask = NULL;
+		}
+	}
+	if (!affinity_mask)
+		display_help(1);
+
+	if (verbose) {
+		printf("%s: Using %u cpus.\n", __func__,
+			rt_numa_bitmask_count(affinity_mask));
+	}
+}
+
+
+static void handlepolicy(char *polname)
+{
+	if (strncasecmp(polname, "other", 5) == 0)
+		policy = SCHED_OTHER;
+	else if (strncasecmp(polname, "batch", 5) == 0)
+		policy = SCHED_BATCH;
+	else if (strncasecmp(polname, "idle", 4) == 0)
+		policy = SCHED_IDLE;
+	else if (strncasecmp(polname, "fifo", 4) == 0)
+		policy = SCHED_FIFO;
+	else if (strncasecmp(polname, "rr", 2) == 0)
+		policy = SCHED_RR;
+	else	/* default policy if we don't recognize the request */
+		policy = SCHED_OTHER;
+}
+
+static char *policyname(int policy)
+{
+	char *policystr = "";
+
+	switch(policy) {
+	case SCHED_OTHER:
+		policystr = "other";
+		break;
+	case SCHED_FIFO:
+		policystr = "fifo";
+		break;
+	case SCHED_RR:
+		policystr = "rr";
+		break;
+	case SCHED_BATCH:
+		policystr = "batch";
+		break;
+	case SCHED_IDLE:
+		policystr = "idle";
+		break;
+	}
+	return policystr;
+}
+
+
+enum option_values {
+	OPT_AFFINITY=1, OPT_NOTRACE, OPT_BREAKTRACE, OPT_PREEMPTIRQ, OPT_CLOCK,
+	OPT_CONTEXT, OPT_DISTANCE, OPT_DURATION, OPT_LATENCY, OPT_EVENT,
+	OPT_FTRACE, OPT_FIFO, OPT_HISTOGRAM, OPT_HISTOFALL, OPT_INTERVAL,
+	OPT_IRQSOFF, OPT_LOOPS, OPT_MLOCKALL, OPT_REFRESH, OPT_NANOSLEEP,
+	OPT_NSECS, OPT_OSCOPE, OPT_TRACEOPT, OPT_PRIORITY, OPT_PREEMPTOFF,
+	OPT_QUIET, OPT_PRIOSPREAD, OPT_RELATIVE, OPT_RESOLUTION, OPT_SYSTEM,
+	OPT_SMP, OPT_THREADS, OPT_TRACER, OPT_UNBUFFERED, OPT_NUMA, OPT_VERBOSE,
+	OPT_WAKEUP, OPT_WAKEUPRT, OPT_DBGCYCLIC, OPT_POLICY, OPT_HELP, OPT_NUMOPTS,
+	OPT_ALIGNED, OPT_LAPTOP, OPT_SECALIGNED,
+};
+
+/* Process commandline options */
+static void process_options (int argc, char *argv[], int max_cpus)
+{
+	int error = 0;
+	int option_affinity = 0;
+
+	for (;;) {
+		int option_index = 0;
+		/*
+		 * Options for getopt
+		 * Ordered alphabetically by single letter name
+		 */
+		static struct option long_options[] = {
+			{"affinity",         optional_argument, NULL, OPT_AFFINITY},
+			{"notrace",          no_argument,       NULL, OPT_NOTRACE },
+			{"aligned",          optional_argument, NULL, OPT_ALIGNED },
+			{"breaktrace",       required_argument, NULL, OPT_BREAKTRACE },
+			{"preemptirqs",      no_argument,       NULL, OPT_PREEMPTIRQ },
+			{"clock",            required_argument, NULL, OPT_CLOCK },
+			{"context",          no_argument,       NULL, OPT_CONTEXT },
+			{"distance",         required_argument, NULL, OPT_DISTANCE },
+			{"duration",         required_argument, NULL, OPT_DURATION },
+			{"latency",          required_argument, NULL, OPT_LATENCY },
+			{"event",            no_argument,       NULL, OPT_EVENT },
+			{"ftrace",           no_argument,       NULL, OPT_FTRACE },
+			{"fifo",             required_argument, NULL, OPT_FIFO },
+			{"histogram",        required_argument, NULL, OPT_HISTOGRAM },
+			{"histofall",        required_argument, NULL, OPT_HISTOFALL },
+			{"interval",         required_argument, NULL, OPT_INTERVAL },
+			{"irqsoff",          no_argument,       NULL, OPT_IRQSOFF },
+			{"laptop",	     no_argument,	NULL, OPT_LAPTOP },
+			{"loops",            required_argument, NULL, OPT_LOOPS },
+			{"mlockall",         no_argument,       NULL, OPT_MLOCKALL },
+			{"refresh_on_max",   no_argument,       NULL, OPT_REFRESH },
+			{"nanosleep",        no_argument,       NULL, OPT_NANOSLEEP },
+			{"nsecs",            no_argument,       NULL, OPT_NSECS },
+			{"oscope",           required_argument, NULL, OPT_OSCOPE },
+			{"traceopt",         required_argument, NULL, OPT_TRACEOPT },
+			{"priority",         required_argument, NULL, OPT_PRIORITY },
+			{"preemptoff",       no_argument,       NULL, OPT_PREEMPTOFF },
+			{"quiet",            no_argument,       NULL, OPT_QUIET },
+			{"priospread",       no_argument,       NULL, OPT_PRIOSPREAD },
+			{"relative",         no_argument,       NULL, OPT_RELATIVE },
+			{"resolution",       no_argument,       NULL, OPT_RESOLUTION },
+			{"secaligned",       optional_argument, NULL, OPT_SECALIGNED },
+			{"system",           no_argument,       NULL, OPT_SYSTEM },
+			{"smp",              no_argument,       NULL, OPT_SMP },
+			{"threads",          optional_argument, NULL, OPT_THREADS },
+			{"tracer",           required_argument, NULL, OPT_TRACER },
+			{"unbuffered",       no_argument,       NULL, OPT_UNBUFFERED },
+			{"numa",             no_argument,       NULL, OPT_NUMA },
+			{"verbose",          no_argument,       NULL, OPT_VERBOSE },
+			{"wakeup",           no_argument,       NULL, OPT_WAKEUP },
+			{"wakeuprt",         no_argument,       NULL, OPT_WAKEUPRT },
+			{"dbg_cyclictest",   no_argument,       NULL, OPT_DBGCYCLIC },
+			{"policy",           required_argument, NULL, OPT_POLICY },
+			{"help",             no_argument,       NULL, OPT_HELP },
+			{NULL, 0, NULL, 0}
+		};
+		int c = getopt_long(argc, argv, "a::A::b:Bc:Cd:D:Efh:H:i:Il:MnNo:O:p:PmqrRsSt::uUvD:wWT:",
+				    long_options, &option_index);
+		if (c == -1)
+			break;
+		switch (c) {
+		case 'a':
+		case OPT_AFFINITY:
+			option_affinity = 1;
+			if (smp || numa)
+				break;
+			if (optarg != NULL) {
+				parse_cpumask(optarg, max_cpus);
+				setaffinity = AFFINITY_SPECIFIED;
+			} else if (optind<argc && atoi(argv[optind])) {
+				parse_cpumask(argv[optind], max_cpus);
+				setaffinity = AFFINITY_SPECIFIED;
+			} else {
+				setaffinity = AFFINITY_USEALL;
+			}
+			break;
+		case 'A':
+		case OPT_ALIGNED:
+			aligned=1;
+			if (optarg != NULL)
+				offset = atoi(optarg) * 1000;
+			else if (optind<argc && atoi(argv[optind]))
+				offset = atoi(argv[optind]) * 1000;
+			else
+				offset = 0;
+			break;
+		case 'b':
+		case OPT_BREAKTRACE:
+			tracelimit = atoi(optarg); break;
+		case 'B':
+		case OPT_PREEMPTIRQ:
+			tracetype = PREEMPTIRQSOFF; break;
+		case 'c':
+		case OPT_CLOCK:
+			clocksel = atoi(optarg); break;
+		case 'C':
+		case OPT_CONTEXT:
+			tracetype = CTXTSWITCH; break;
+		case 'd':
+		case OPT_DISTANCE:
+			distance = atoi(optarg); break;
+		case 'D':
+		case OPT_DURATION:
+			duration = parse_time_string(optarg); break;
+		case 'E':
+		case OPT_EVENT:
+			enable_events = 1; break;
+		case 'f':
+		case OPT_FTRACE:
+			tracetype = FUNCTION; ftrace = 1; break;
+		case 'F':
+		case OPT_FIFO:
+			use_fifo = 1;
+			strncpy(fifopath, optarg, sizeof(fifopath) - 1);
+			break;
+
+		case 'H':
+		case OPT_HISTOFALL:
+			histofall = 1; /* fall through */
+		case 'h':
+		case OPT_HISTOGRAM:
+			histogram = atoi(optarg); break;
+		case 'i':
+		case OPT_INTERVAL:
+			interval = atoi(optarg); break;
+		case 'I':
+		case OPT_IRQSOFF:
+			if (tracetype == PREEMPTOFF) {
+				tracetype = PREEMPTIRQSOFF;
+				strncpy(tracer, "preemptirqsoff", sizeof(tracer));
+			} else {
+				tracetype = IRQSOFF;
+				strncpy(tracer, "irqsoff", sizeof(tracer));
+			}
+			break;
+		case 'l':
+		case OPT_LOOPS:
+			max_cycles = atoi(optarg); break;
+		case 'm':
+		case OPT_MLOCKALL:
+			lockall = 1; break;
+		case 'M':
+		case OPT_REFRESH:
+			refresh_on_max = 1; break;
+		case 'n':
+		case OPT_NANOSLEEP:
+			use_nanosleep = MODE_CLOCK_NANOSLEEP; break;
+		case 'N':
+		case OPT_NSECS:
+			use_nsecs = 1; break;
+		case 'o':
+		case OPT_OSCOPE:
+			oscope_reduction = atoi(optarg); break;
+		case 'O':
+		case OPT_TRACEOPT:
+			traceopt(optarg); break;
+		case 'p':
+		case OPT_PRIORITY:
+			priority = atoi(optarg);
+			if (policy != SCHED_FIFO && policy != SCHED_RR)
+				policy = SCHED_FIFO;
+			break;
+		case 'P':
+		case OPT_PREEMPTOFF:
+			if (tracetype == IRQSOFF) {
+				tracetype = PREEMPTIRQSOFF;
+				strncpy(tracer, "preemptirqsoff", sizeof(tracer));
+			} else {
+				tracetype = PREEMPTOFF;
+				strncpy(tracer, "preemptoff", sizeof(tracer));
+			}
+			break;
+		case 'q':
+		case OPT_QUIET:
+			quiet = 1; break;
+		case 'r':
+		case OPT_RELATIVE:
+			timermode = TIMER_RELTIME; break;
+		case 'R':
+		case OPT_RESOLUTION:
+			check_clock_resolution = 1; break;
+		case 's':
+		case OPT_SECALIGNED:
+			secaligned = 1;
+			if (optarg != NULL)
+				offset = atoi(optarg) * 1000;
+			else if (optind < argc && atoi(argv[optind]))
+				offset = atoi(argv[optind]) * 1000;
+			else
+				offset = 0;
+			break;
+		case OPT_SYSTEM:
+			use_system = MODE_SYS_OFFSET; break;
+		case 'S':
+		case OPT_SMP: /* SMP testing */
+			if (numa)
+				fatal("numa and smp options are mutually exclusive\n");
+			smp = 1;
+			num_threads = max_cpus;
+			setaffinity = AFFINITY_USEALL;
+			use_nanosleep = MODE_CLOCK_NANOSLEEP;
+			break;
+		case 't':
+		case OPT_THREADS:
+			if (smp) {
+				warn("-t ignored due to --smp\n");
+				break;
+			}
+			if (optarg != NULL)
+				num_threads = atoi(optarg);
+			else if (optind<argc && atoi(argv[optind]))
+				num_threads = atoi(argv[optind]);
+			else
+				num_threads = max_cpus;
+			break;
+		case 'T':
+		case OPT_TRACER:
+			tracetype = CUSTOM;
+			strncpy(tracer, optarg, sizeof(tracer) - 1);
+			break;
+		case 'u':
+		case OPT_UNBUFFERED:
+			setvbuf(stdout, NULL, _IONBF, 0); break;
+		case 'U':
+		case OPT_NUMA: /* NUMA testing */
+			if (smp)
+				fatal("numa and smp options are mutually exclusive\n");
+#ifdef NUMA
+			if (numa_available() == -1)
+				fatal("NUMA functionality not available!");
+			numa = 1;
+			num_threads = max_cpus;
+			setaffinity = AFFINITY_USEALL;
+			use_nanosleep = MODE_CLOCK_NANOSLEEP;
+#else
+			warn("cyclictest was not built with the numa option\n");
+			warn("ignoring --numa or -U\n");
+#endif
+			break;
+		case 'v':
+		case OPT_VERBOSE: verbose = 1; break;
+		case 'w':
+		case OPT_WAKEUP:
+			tracetype = WAKEUP; break;
+		case 'W':
+		case OPT_WAKEUPRT:
+			tracetype = WAKEUPRT; break;
+		case '?':
+		case OPT_HELP:
+			display_help(0); break;
+
+		/* long only options */
+		case OPT_PRIOSPREAD:
+			priospread = 1; break;
+		case OPT_LATENCY:
+                          /* power management latency target value */
+			  /* note: default is 0 (zero) */
+			latency_target_value = atoi(optarg);
+			if (latency_target_value < 0)
+				latency_target_value = 0;
+			break;
+		case OPT_NOTRACE:
+			notrace = 1; break;
+		case OPT_POLICY:
+			handlepolicy(optarg); break;
+		case OPT_DBGCYCLIC:
+			ct_debug = 1; break;
+		case OPT_LAPTOP:
+			laptop = 1; break;
+		}
+	}
+
+	if (option_affinity) {
+		if (smp) {
+			warn("-a ignored due to --smp\n");
+		} else if (numa) {
+			warn("-a ignored due to --numa\n");
+		}
+	}
+
+	if (tracelimit)
+		fileprefix = procfileprefix;
+
+	if (clocksel < 0 || clocksel > ARRAY_SIZE(clocksources))
+		error = 1;
+
+	if (oscope_reduction < 1)
+		error = 1;
+
+	if (oscope_reduction > 1 && !verbose) {
+		warn("-o option only meaningful, if verbose\n");
+		error = 1;
+	}
+
+	if (histogram < 0)
+		error = 1;
+
+	if (histogram > HIST_MAX)
+		histogram = HIST_MAX;
+
+	if (histogram && distance != -1)
+		warn("distance is ignored and set to 0, if histogram enabled\n");
+	if (distance == -1)
+		distance = DEFAULT_DISTANCE;
+
+	if (priority < 0 || priority > 99)
+		error = 1;
+
+	if (priospread && priority == 0) {
+		fprintf(stderr, "defaulting realtime priority to %d\n",
+			num_threads+1);
+		priority = num_threads+1;
+	}
+
+	if (priority && (policy != SCHED_FIFO && policy != SCHED_RR)) {
+		fprintf(stderr, "policy and priority don't match: setting policy to SCHED_FIFO\n");
+		policy = SCHED_FIFO;
+	}
+
+	if ((policy == SCHED_FIFO || policy == SCHED_RR) && priority == 0) {
+		fprintf(stderr, "defaulting realtime priority to %d\n",
+			num_threads+1);
+		priority = num_threads+1;
+	}
+
+	if (num_threads < 1)
+		error = 1;
+
+	if (aligned && secaligned)
+		error = 1;
+
+	if (aligned || secaligned) {
+		barrier_init(&globalt_barr, num_threads);
+		barrier_init(&align_barr, num_threads);
+	}
+
+	if (error) {
+		if (affinity_mask)
+			rt_bitmask_free(affinity_mask);
+		display_help(1);
+	}
+}
+
+static int check_kernel(void)
+{
+	struct utsname kname;
+	int maj, min, sub, kv, ret;
+
+	ret = uname(&kname);
+	if (ret) {
+		fprintf(stderr, "uname failed: %s. Assuming not 2.6\n",
+				strerror(errno));
+		return KV_NOT_SUPPORTED;
+	}
+	sscanf(kname.release, "%d.%d.%d", &maj, &min, &sub);
+	if (maj == 2 && min == 6) {
+		if (sub < 18)
+			kv = KV_26_LT18;
+		else if (sub < 24)
+			kv = KV_26_LT24;
+		else if (sub < 28) {
+			kv = KV_26_33;
+			strcpy(functiontracer, "ftrace");
+			strcpy(traceroptions, "iter_ctrl");
+		} else {
+			kv = KV_26_33;
+			strcpy(functiontracer, "function");
+			strcpy(traceroptions, "trace_options");
+		}
+	} else if (maj >= 3) {
+		kv = KV_30;
+		strcpy(functiontracer, "function");
+		strcpy(traceroptions, "trace_options");
+
+	} else
+		kv = KV_NOT_SUPPORTED;
+
+	return kv;
+}
+
+static int check_timer(void)
+{
+	struct timespec ts;
+
+	if (clock_getres(CLOCK_MONOTONIC, &ts))
+		return 1;
+
+	return (ts.tv_sec != 0 || ts.tv_nsec != 1);
+}
+
+static void sighand(int sig)
+{
+	if (sig == SIGUSR1) {
+		int i;
+		int oldquiet = quiet;
+
+		quiet = 0;
+		fprintf(stderr, "#---------------------------\n");
+		fprintf(stderr, "# cyclictest current status:\n");
+		for (i = 0; i < num_threads; i++)
+			print_stat(stderr, parameters[i], i, 0, 0);
+		fprintf(stderr, "#---------------------------\n");
+		quiet = oldquiet;
+		return;
+	}
+	shutdown = 1;
+	if (refresh_on_max)
+		pthread_cond_signal(&refresh_on_max_cond);
+	if (tracelimit && !notrace)
+		tracing(0);
+}
+
+static void print_tids(struct thread_param *par[], int nthreads)
+{
+	int i;
+
+	printf("# Thread Ids:");
+	for (i = 0; i < nthreads; i++)
+		printf(" %05d", par[i]->stats->tid);
+	printf("\n");
+}
+
+static void print_hist(struct thread_param *par[], int nthreads)
+{
+	int i, j;
+	unsigned long long int log_entries[nthreads+1];
+	unsigned long maxmax, alloverflows;
+
+	bzero(log_entries, sizeof(log_entries));
+
+	printf("# Histogram\n");
+	for (i = 0; i < histogram; i++) {
+		unsigned long long int allthreads = 0;
+
+		printf("%06d ", i);
+
+		for (j = 0; j < nthreads; j++) {
+			unsigned long curr_latency=par[j]->stats->hist_array[i];
+			printf("%06lu", curr_latency);
+			if (j < nthreads - 1)
+				printf("\t");
+			log_entries[j] += curr_latency;
+			allthreads += curr_latency;
+		}
+		if (histofall && nthreads > 1) {
+			printf("\t%06llu", allthreads);
+			log_entries[nthreads] += allthreads;
+		}
+		printf("\n");
+	}
+	printf("# Total:");
+	for (j = 0; j < nthreads; j++)
+		printf(" %09llu", log_entries[j]);
+	if (histofall && nthreads > 1)
+		printf(" %09llu", log_entries[nthreads]);
+	printf("\n");
+	printf("# Min Latencies:");
+	for (j = 0; j < nthreads; j++)
+		printf(" %05lu", par[j]->stats->min);
+	printf("\n");
+	printf("# Avg Latencies:");
+	for (j = 0; j < nthreads; j++)
+		printf(" %05lu", par[j]->stats->cycles ?
+		       (long)(par[j]->stats->avg/par[j]->stats->cycles) : 0);
+	printf("\n");
+	printf("# Max Latencies:");
+	maxmax = 0;
+	for (j = 0; j < nthreads; j++) {
+		printf(" %05lu", par[j]->stats->max);
+		if (par[j]->stats->max > maxmax)
+			maxmax = par[j]->stats->max;
+	}
+	if (histofall && nthreads > 1)
+		printf(" %05lu", maxmax);
+	printf("\n");
+	printf("# Histogram Overflows:");
+	alloverflows = 0;
+	for (j = 0; j < nthreads; j++) {
+		printf(" %05lu", par[j]->stats->hist_overflow);
+		alloverflows += par[j]->stats->hist_overflow;
+	}
+	if (histofall && nthreads > 1)
+		printf(" %05lu", alloverflows);
+	printf("\n");
+
+	printf("# Histogram Overflow at cycle number:\n");
+	for (i = 0; i < nthreads; i++) {
+		printf("# Thread %d:", i);
+		for (j = 0; j < par[i]->stats->num_outliers; j++)
+			printf(" %05lu", par[i]->stats->outliers[j]);
+		if (par[i]->stats->num_outliers < par[i]->stats->hist_overflow)
+			printf(" # %05lu others", par[i]->stats->hist_overflow - par[i]->stats->num_outliers);
+		printf("\n");
+	}
+	printf("\n");
+}
+
+static void print_stat(FILE *fp, struct thread_param *par, int index, int verbose, int quiet)
+{
+	struct thread_stat *stat = par->stats;
+
+	if (!verbose) {
+		if (quiet != 1) {
+			char *fmt;
+			if (use_nsecs)
+				fmt = "T:%2d (%5d) P:%2d I:%ld C:%7lu "
+					"Min:%7ld Act:%8ld Avg:%8ld Max:%8ld\n";
+			else
+				fmt = "T:%2d (%5d) P:%2d I:%ld C:%7lu "
+					"Min:%7ld Act:%5ld Avg:%5ld Max:%8ld\n";
+			fprintf(fp, fmt, index, stat->tid, par->prio,
+				par->interval, stat->cycles, stat->min, stat->act,
+				stat->cycles ?
+				(long)(stat->avg/stat->cycles) : 0, stat->max);
+		}
+	} else {
+		while (stat->cycles != stat->cyclesread) {
+			long diff = stat->values
+			    [stat->cyclesread & par->bufmsk];
+
+			if (diff > stat->redmax) {
+				stat->redmax = diff;
+				stat->cycleofmax = stat->cyclesread;
+			}
+			if (++stat->reduce == oscope_reduction) {
+				fprintf(fp, "%8d:%8lu:%8ld\n", index,
+					stat->cycleofmax, stat->redmax);
+				stat->reduce = 0;
+				stat->redmax = 0;
+			}
+			stat->cyclesread++;
+		}
+	}
+}
+
+
+/*
+ * thread that creates a named fifo and hands out run stats when someone
+ * reads from the fifo.
+ */
+void *fifothread(void *param)
+{
+	int ret;
+	int fd;
+	FILE *fp;
+	int i;
+
+	if (use_fifo == 0)
+		return NULL;
+
+	unlink(fifopath);
+	ret = mkfifo(fifopath, 0666);
+	if (ret) {
+		fprintf(stderr, "Error creating fifo %s: %s\n", fifopath, strerror(errno));
+		return NULL;
+	}
+	while (!shutdown) {
+		fd = open(fifopath, O_WRONLY|O_NONBLOCK);
+		if (fd < 0) {
+			usleep(500000);
+			continue;
+		}
+		fp = fdopen(fd, "w");
+		for (i=0; i < num_threads; i++)
+			print_stat(fp, parameters[i], i, 0, 0);
+		fclose(fp);
+		usleep(250);
+	}
+	unlink(fifopath);
+	return NULL;
+}
+
+#ifdef CONFIG_XENO_COBALT
+
+static const char *reason_str[] = {
+	[SIGDEBUG_UNDEFINED] = "received SIGDEBUG for unknown reason",
+	[SIGDEBUG_MIGRATE_SIGNAL] = "received signal",
+	[SIGDEBUG_MIGRATE_SYSCALL] = "invoked syscall",
+	[SIGDEBUG_MIGRATE_FAULT] = "triggered fault",
+	[SIGDEBUG_MIGRATE_PRIOINV] = "affected by priority inversion",
+	[SIGDEBUG_NOMLOCK] = "process memory not locked",
+	[SIGDEBUG_WATCHDOG] = "watchdog triggered (period too short?)",
+	[SIGDEBUG_LOCK_BREAK] = "scheduler lock break",
+};
+
+static void sigdebug(int sig, siginfo_t *si, void *context)
+{
+	const char fmt[] = "%s, aborting.\n"
+		"(enabling CONFIG_XENO_OPT_DEBUG_TRACE_RELAX may help)\n";
+	unsigned int reason = sigdebug_reason(si);
+	int n __attribute__ ((unused));
+	static char buffer[256];
+
+	if (reason > SIGDEBUG_WATCHDOG)
+		reason = SIGDEBUG_UNDEFINED;
+
+	switch(reason) {
+	case SIGDEBUG_UNDEFINED:
+	case SIGDEBUG_NOMLOCK:
+	case SIGDEBUG_WATCHDOG:
+		n = snprintf(buffer, sizeof(buffer), "latency: %s\n",
+			     reason_str[reason]);
+		write_check(STDERR_FILENO, buffer, n);
+		exit(EXIT_FAILURE);
+	}
+
+	n = snprintf(buffer, sizeof(buffer), fmt, reason_str[reason]);
+	write_check(STDERR_FILENO, buffer, n);
+	signal(sig, SIG_DFL);
+	kill(getpid(), sig);
+}
+
+#endif
+
+int main(int argc, char **argv)
+{
+	struct sigaction sa __attribute__((unused));
+	sigset_t sigset;
+	int signum = SIGALRM;
+	int mode;
+	int max_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+	int i, ret = -1;
+	int status;
+
+	process_options(argc, argv, max_cpus);
+
+	if (check_privs())
+		exit(EXIT_FAILURE);
+
+	if (verbose)
+		printf("Max CPUs = %d\n", max_cpus);
+
+	/* Checks if numa is on, program exits if numa on but not available */
+	numa_on_and_available();
+
+	/* lock all memory (prevent swapping) */
+	if (lockall)
+		if (mlockall(MCL_CURRENT|MCL_FUTURE) == -1) {
+			perror("mlockall");
+			goto out;
+		}
+
+	/* use the /dev/cpu_dma_latency trick if it's there */
+	set_latency_target();
+
+	kernelversion = check_kernel();
+
+	if (kernelversion == KV_NOT_SUPPORTED)
+		warn("Running on unknown kernel version...YMMV\n");
+
+	setup_tracer();
+
+	if (check_timer())
+		warn("High resolution timers not available\n");
+
+	if (check_clock_resolution) {
+		int clock;
+		uint64_t diff;
+		int k;
+		uint64_t min_non_zero_diff = UINT64_MAX;
+		struct timespec now;
+		struct timespec prev;
+		uint64_t reported_resolution = UINT64_MAX;
+		struct timespec res;
+		struct timespec *time;
+		int times;
+
+		clock = clocksources[clocksel];
+
+		if (clock_getres(clock, &res)) {
+			warn("clock_getres failed");
+		} else {
+			reported_resolution = (NSEC_PER_SEC * res.tv_sec) + res.tv_nsec;
+		}
+
+
+		/*
+		 * Calculate how many calls to clock_gettime are needed.
+		 * Then call it that many times.
+		 * Goal is to collect timestamps for ~ 0.001 sec.
+		 * This will reliably capture resolution <= 500 usec.
+		 */
+		times = 1000;
+		clock_gettime(clock, &prev);
+		for (k=0; k < times; k++) {
+			clock_gettime(clock, &now);
+		}
+
+		diff = calcdiff_ns(now, prev);
+		if (diff == 0) {
+			/*
+			 * No clock rollover occurred.
+			 * Use the default value for times.
+			 */
+			times = -1;
+		} else {
+			int call_time;
+			call_time = diff / times;         /* duration 1 call */
+			times = NSEC_PER_SEC / call_time; /* calls per second */
+			times /= 1000;                    /* calls per msec */
+			if (times < 1000)
+				times = 1000;
+		}
+		/* sanity check */
+		if ((times <= 0) || (times > 100000))
+			times = 100000;
+
+		time = calloc(times, sizeof(*time));
+
+		for (k=0; k < times; k++) {
+			clock_gettime(clock, &time[k]);
+		}
+
+		if (ct_debug) {
+			info("For %d consecutive calls to clock_gettime():\n", times);
+			info("time, delta time (nsec)\n");
+		}
+
+		prev = time[0];
+		for (k=1; k < times; k++) {
+
+			diff = calcdiff_ns(time[k], prev);
+			prev = time[k];
+
+			if (diff && (diff < min_non_zero_diff)) {
+				min_non_zero_diff = diff;
+			}
+
+			if (ct_debug)
+				info("%ld.%06ld  %5llu\n",
+				     time[k].tv_sec, time[k].tv_nsec,
+				     (unsigned long long)diff);
+		}
+
+		free(time);
+
+
+		if (verbose ||
+		    (min_non_zero_diff && (min_non_zero_diff > reported_resolution))) {
+			/*
+			 * Measured clock resolution includes the time to call
+			 * clock_gettime(), so it will be slightly larger than
+			 * actual resolution.
+			 */
+			warn("reported clock resolution: %llu nsec\n",
+			     (unsigned long long)reported_resolution);
+			warn("measured clock resolution approximately: %llu nsec\n",
+			     (unsigned long long)min_non_zero_diff);
+		}
+
+	}
+
+	mode = use_nanosleep + use_system;
+
+	sigemptyset(&sigset);
+	sigaddset(&sigset, signum);
+	sigprocmask (SIG_BLOCK, &sigset, NULL);
+
+	signal(SIGINT, sighand);
+	signal(SIGTERM, sighand);
+	signal(SIGUSR1, sighand);
+#ifdef CONFIG_XENO_COBALT
+	sigemptyset(&sa.sa_mask);
+	sa.sa_sigaction = sigdebug;
+	sa.sa_flags = SA_SIGINFO;
+	sigaction(SIGDEBUG, &sa, NULL);
+#endif
+
+	parameters = calloc(num_threads, sizeof(struct thread_param *));
+	if (!parameters)
+		goto out;
+	statistics = calloc(num_threads, sizeof(struct thread_stat *));
+	if (!statistics)
+		goto outpar;
+
+	for (i = 0; i < num_threads; i++) {
+		pthread_attr_t attr;
+		int node;
+		struct thread_param *par;
+		struct thread_stat *stat;
+
+		status = pthread_attr_init(&attr);
+		if (status != 0)
+			fatal("error from pthread_attr_init for thread %d: %s\n", i, strerror(status));
+
+		node = -1;
+		if (numa) {
+			void *stack;
+			void *currstk;
+			size_t stksize;
+
+			/* find the memory node associated with the cpu i */
+			node = rt_numa_numa_node_of_cpu(i);
+
+			/* get the stack size set for for this thread */
+			if (pthread_attr_getstack(&attr, &currstk, &stksize))
+				fatal("failed to get stack size for thread %d\n", i);
+
+			/* if the stack size is zero, set a default */
+			if (stksize == 0)
+				stksize = PTHREAD_STACK_MIN * 2;
+
+			/*  allocate memory for a stack on appropriate node */
+			stack = rt_numa_numa_alloc_onnode(stksize, node, i);
+
+			/* set the thread's stack */
+			if (pthread_attr_setstack(&attr, stack, stksize))
+				fatal("failed to set stack addr for thread %d to 0x%x\n",
+				      i, stack+stksize);
+		}
+
+		/* allocate the thread's parameter block  */
+		parameters[i] = par = threadalloc(sizeof(struct thread_param), node);
+		if (par == NULL)
+			fatal("error allocating thread_param struct for thread %d\n", i);
+		memset(par, 0, sizeof(struct thread_param));
+
+		/* allocate the thread's statistics block */
+		statistics[i] = stat = threadalloc(sizeof(struct thread_stat), node);
+		if (stat == NULL)
+			fatal("error allocating thread status struct for thread %d\n", i);
+		memset(stat, 0, sizeof(struct thread_stat));
+
+		/* allocate the histogram if requested */
+		if (histogram) {
+			int bufsize = histogram * sizeof(long);
+
+			stat->hist_array = threadalloc(bufsize, node);
+			stat->outliers = threadalloc(bufsize, node);
+			if (stat->hist_array == NULL || stat->outliers == NULL)
+				fatal("failed to allocate histogram of size %d on node %d\n",
+				      histogram, i);
+			memset(stat->hist_array, 0, bufsize);
+			memset(stat->outliers, 0, bufsize);
+		}
+
+		if (verbose) {
+			int bufsize = VALBUF_SIZE * sizeof(long);
+			stat->values = threadalloc(bufsize, node);
+			if (!stat->values)
+				goto outall;
+			memset(stat->values, 0, bufsize);
+			par->bufmsk = VALBUF_SIZE - 1;
+		}
+
+		par->prio = priority;
+		if (priority && (policy == SCHED_FIFO || policy == SCHED_RR))
+			par->policy = policy;
+		else {
+			par->policy = SCHED_OTHER;
+			force_sched_other = 1;
+		}
+		if (priospread)
+			priority--;
+		par->clock = clocksources[clocksel];
+		par->mode = mode;
+		par->timermode = timermode;
+		par->signal = signum;
+		par->interval = interval;
+		if (!histogram) /* same interval on CPUs */
+			interval += distance;
+		if (verbose)
+			printf("Thread %d Interval: %d\n", i, interval);
+		par->max_cycles = max_cycles;
+		par->stats = stat;
+		par->node = node;
+		par->tnum = i;
+		switch (setaffinity) {
+		case AFFINITY_UNSPECIFIED: par->cpu = -1; break;
+		case AFFINITY_SPECIFIED:
+			par->cpu = cpu_for_thread(i, max_cpus);
+			if (verbose)
+				printf("Thread %d using cpu %d.\n", i,
+					par->cpu);
+			break;
+		case AFFINITY_USEALL: par->cpu = i % max_cpus; break;
+		}
+		stat->min = 1000000;
+		stat->max = 0;
+		stat->avg = 0.0;
+		stat->threadstarted = 1;
+		status = pthread_create(&stat->thread, &attr, timerthread, par);
+		if (status)
+			fatal("failed to create thread %d: %s\n", i, strerror(status));
+
+	}
+	if (use_fifo)
+		status = pthread_create(&fifo_threadid, NULL, fifothread, NULL);
+
+	while (!shutdown) {
+		char lavg[256];
+		int fd, len, allstopped = 0;
+		static char *policystr = NULL;
+		static char *slash = NULL;
+		static char *policystr2;
+
+		if (!policystr)
+			policystr = policyname(policy);
+
+		if (!slash) {
+			if (force_sched_other) {
+				slash = "/";
+				policystr2 = policyname(SCHED_OTHER);
+			} else
+				slash = policystr2 = "";
+		}
+		if (!verbose && !quiet) {
+			fd = open("/proc/loadavg", O_RDONLY, 0666);
+			len = read(fd, &lavg, 255);
+			close(fd);
+			lavg[len-1] = 0x0;
+			printf("policy: %s%s%s: loadavg: %s          \n\n",
+			       policystr, slash, policystr2, lavg);
+		}
+
+		for (i = 0; i < num_threads; i++) {
+
+			print_stat(stdout, parameters[i], i, verbose, quiet);
+			if(max_cycles && statistics[i]->cycles >= max_cycles)
+				allstopped++;
+		}
+
+		usleep(10000);
+		if (shutdown || allstopped)
+			break;
+		if (!verbose && !quiet)
+			printf("\033[%dA", num_threads + 2);
+
+		if (refresh_on_max) {
+			pthread_mutex_lock(&refresh_on_max_lock);
+			pthread_cond_wait(&refresh_on_max_cond,
+					  &refresh_on_max_lock);
+			pthread_mutex_unlock(&refresh_on_max_lock);
+		}
+	}
+	ret = EXIT_SUCCESS;
+
+ outall:
+	shutdown = 1;
+	usleep(50000);
+
+	if (quiet)
+		quiet = 2;
+	for (i = 0; i < num_threads; i++) {
+		if (statistics[i]->threadstarted > 0)
+			pthread_kill(statistics[i]->thread, SIGTERM);
+		if (statistics[i]->threadstarted) {
+			pthread_join(statistics[i]->thread, NULL);
+			if (quiet && !histogram)
+				print_stat(stdout, parameters[i], i, 0, 0);
+		}
+		if (statistics[i]->values)
+			threadfree(statistics[i]->values, VALBUF_SIZE*sizeof(long), parameters[i]->node);
+	}
+
+	if (histogram) {
+		print_hist(parameters, num_threads);
+		for (i = 0; i < num_threads; i++) {
+			threadfree(statistics[i]->hist_array, histogram*sizeof(long), parameters[i]->node);
+			threadfree(statistics[i]->outliers, histogram*sizeof(long), parameters[i]->node);
+		}
+	}
+
+	if (tracelimit) {
+		print_tids(parameters, num_threads);
+		if (break_thread_id) {
+			printf("# Break thread: %d\n", break_thread_id);
+			printf("# Break value: %llu\n", (unsigned long long)break_thread_value);
+		}
+	}
+
+
+	for (i=0; i < num_threads; i++) {
+		if (!statistics[i])
+			continue;
+		threadfree(statistics[i], sizeof(struct thread_stat), parameters[i]->node);
+	}
+
+ outpar:
+	for (i = 0; i < num_threads; i++) {
+		if (!parameters[i])
+			continue;
+		threadfree(parameters[i], sizeof(struct thread_param), parameters[i]->node);
+	}
+ out:
+	/* ensure that the tracer is stopped */
+	if (tracelimit && !notrace)
+		tracing(0);
+
+
+	/* close any tracer file descriptors */
+	if (tracemark_fd >= 0)
+		close(tracemark_fd);
+	if (trace_fd >= 0)
+		close(trace_fd);
+
+	if (enable_events)
+		/* turn off all events */
+		event_disable_all();
+
+	/* turn off the function tracer */
+	fileprefix = procfileprefix;
+	if (tracetype && !notrace)
+		setkernvar("ftrace_enabled", "0");
+	fileprefix = get_debugfileprefix();
+
+	/* unlock everything */
+	if (lockall)
+		munlockall();
+
+	/* Be a nice program, cleanup */
+	if (kernelversion < KV_26_33)
+		restorekernvars();
+
+	/* close the latency_target_fd if it's open */
+	if (latency_target_fd >= 0)
+		close(latency_target_fd);
+
+	if (affinity_mask)
+		rt_bitmask_free(affinity_mask);
+
+	exit(ret);
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.c b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.c
new file mode 100644
index 0000000..b32aa02
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.c
@@ -0,0 +1,96 @@
+/*
+ * Copyright (C) 2009 John Kacur <jkacur@redhat.com>
+ *
+ * error routines, similar to those found in
+ * Advanced Programming in the UNIX Environment 2nd ed.
+ */
+#include "error.h"
+
+/* Print an error message, plus a message for err and exit with error err */
+void err_exit(int err, char *fmt, ...)
+{
+	va_list ap;
+	va_start(ap, fmt);
+	err_doit(err, fmt, ap);
+	va_end(ap);
+	exit(err);
+}
+
+/* print an error message and return */
+void err_msg(char *fmt, ...)
+{
+	va_list ap;
+	va_start(ap, fmt);
+	err_doit(0, fmt, ap);
+	va_end(ap);
+	return;
+}
+
+/* Print an error message, plus a message for err, and return */
+void err_msg_n(int err, char *fmt, ...)
+{
+	va_list ap;
+	va_start(ap, fmt);
+	err_doit(err, fmt, ap);
+	va_end(ap);
+	return;
+}
+
+/* print an error message and quit */
+void err_quit(char *fmt, ...)
+{
+	va_list ap;
+	va_start(ap, fmt);
+	err_doit(0, fmt, ap);
+	va_end(ap);
+	exit(1);
+}
+
+void debug(char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	fputs("DEBUG: ", stderr);
+	err_doit(0, fmt, ap);
+	va_end(ap);
+}
+
+void info(char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	fputs("INFO: ", stderr);
+	err_doit(0, fmt, ap);
+	va_end(ap);
+}
+
+void warn(char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	fputs("WARN: ", stderr);
+	err_doit(0, fmt, ap);
+	va_end(ap);
+}
+
+void fatal(char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	fputs("FATAL: ", stderr);
+	err_doit(0, fmt, ap);
+	va_end(ap);
+	exit(EXIT_FAILURE);
+}
+
+void err_doit(int err, const char *fmt, va_list ap)
+{
+	vfprintf(stderr, fmt, ap);
+	if (err)
+		fprintf(stderr, ": %s\n", strerror(err));
+	return;
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.h b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.h
new file mode 100644
index 0000000..ae05a2e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/error.h
@@ -0,0 +1,19 @@
+#ifndef __ERROR_H
+#define __ERROR_H
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <string.h>
+
+void err_exit(int err, char *fmt, ...);
+void err_msg(char *fmt, ...);
+void err_msg_n(int err, char *fmt, ...);
+void err_quit(char *fmt, ...);
+void debug(char *fmt, ...);
+void info(char *fmt, ...);
+void warn(char *fmt, ...);
+void fatal(char *fmt, ...);
+void err_doit(int err, const char *fmt, va_list ap);
+
+#endif	/* __ERROR_H */
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-sched.h b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-sched.h
new file mode 100644
index 0000000..064e51c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-sched.h
@@ -0,0 +1,61 @@
+/*
+   rt-sched.h - sched_setattr() and sched_getattr() API
+
+   (C) Dario Faggioli <raistlin@linux.it>, 2009, 2010
+   Copyright (C) 2014 BMW Car IT GmbH, Daniel Wagner <daniel.wagner@bmw-carit.de
+
+   This program is free software; you can redistribute it and/or modify
+   it under the terms of the GNU General Public License as published by
+   the Free Software Foundation; either version 2 of the License, or
+   (at your option) any later version.
+
+   This program is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+   GNU General Public License for more details.
+
+   You should have received a copy of the GNU General Public License
+   along with this program; if not, write to the Free Software
+   Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA  02110-1301
+   USA */
+
+/* This file is based on Dario Faggioli's libdl. Eventually it will be
+   replaced by a proper implemenation of this API. */
+
+#ifndef __RT_SCHED_H__
+#define __RT_SCHED_H__
+
+#include <stdint.h>
+#include <sys/types.h>
+
+#ifndef SCHED_DEADLINE
+#define SCHED_DEADLINE 6
+#endif
+
+struct sched_attr {
+	uint32_t size;
+	uint32_t sched_policy;
+	uint64_t sched_flags;
+
+	/* SCHED_NORMAL, SCHED_BATCH */
+	int32_t sched_nice;
+
+	/* SCHED_FIFO, SCHED_RR */
+	uint32_t sched_priority;
+
+	/* SCHED_DEADLINE */
+	uint64_t sched_runtime;
+	uint64_t sched_deadline;
+	uint64_t sched_period;
+};
+
+int sched_setattr(pid_t pid,
+		  const struct sched_attr *attr,
+		  unsigned int flags);
+
+int sched_getattr(pid_t pid,
+		  struct sched_attr *attr,
+		  unsigned int size,
+		  unsigned int flags);
+
+#endif /* __RT_SCHED_H__ */
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.c b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.c
new file mode 100644
index 0000000..3882d23
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.c
@@ -0,0 +1,319 @@
+/*
+ * Copyright (C) 2009 Carsten Emde <carsten.emde@osadl.org>
+ * Copyright (C) 2010 Clark Williams <williams@redhat.com>
+ *
+ * based on functions from cyclictest that has
+ * (C) 2008-2009 Clark Williams <williams@redhat.com>
+ * (C) 2005-2007 Thomas Gleixner <tglx@linutronix.de>
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <sched.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <sys/syscall.h> /* For SYS_gettid definitions */
+#include "rt-utils.h"
+#include "rt-sched.h"
+#include "error.h"
+
+static char debugfileprefix[MAX_PATH];
+
+/*
+ * Finds the tracing directory in a mounted debugfs
+ */
+char *get_debugfileprefix(void)
+{
+	char type[100];
+	FILE *fp;
+	int size;
+	int found = 0;
+	struct stat s;
+
+	if (debugfileprefix[0] != '\0')
+		goto out;
+
+	/* look in the "standard" mount point first */
+	if ((stat("/sys/kernel/debug/tracing", &s) == 0) && S_ISDIR(s.st_mode)) {
+		strcpy(debugfileprefix, "/sys/kernel/debug/tracing/");
+		goto out;
+	}
+
+	/* now look in the "other standard" place */
+	if ((stat("/debug/tracing", &s) == 0) && S_ISDIR(s.st_mode)) {
+		strcpy(debugfileprefix, "/debug/tracing/");
+		goto out;
+	}
+	
+	/* oh well, parse /proc/mounts and see if it's there */
+	if ((fp = fopen("/proc/mounts","r")) == NULL)
+		goto out;
+
+	while (fscanf(fp, "%*s %"
+		      STR(MAX_PATH)
+		      "s %99s %*s %*d %*d\n",
+		      debugfileprefix, type) == 2) {
+		if (strcmp(type, "debugfs") == 0) {
+			found = 1;
+			break;
+		}
+		/* stupid check for systemd-style autofs mount */
+		if ((strcmp(debugfileprefix, "/sys/kernel/debug") == 0) &&
+		    (strcmp(type, "systemd") == 0)) {
+			found = 1;
+			break;
+		}
+	}
+	fclose(fp);
+
+	if (!found) {
+		debugfileprefix[0] = '\0';
+		goto out;
+	}
+
+	size = sizeof(debugfileprefix) - strlen(debugfileprefix);
+	strncat(debugfileprefix, "/tracing/", size);
+
+out:
+	return debugfileprefix;
+}
+
+int mount_debugfs(char *path)
+{
+	char *mountpoint = path;
+	char cmd[MAX_PATH];
+	char *prefix;
+	int ret;
+
+	/* if it's already mounted just return */
+	prefix = get_debugfileprefix();
+	if (strlen(prefix) != 0) {
+		info("debugfs mountpoint: %s\n", prefix);
+		return 0;
+	}
+	if (!mountpoint)
+		mountpoint = "/sys/kernel/debug";
+	
+	sprintf(cmd, "mount -t debugfs debugfs %s", mountpoint);
+	ret = system(cmd);
+	if (ret != 0) {
+		fprintf(stderr, "Error mounting debugfs at %s: %s\n", mountpoint, strerror(errno));
+		return -1;
+	}
+	return 0;
+		
+}
+
+static char **tracer_list;
+static char *tracer_buffer;
+static int num_tracers;
+#define CHUNKSZ   1024
+
+/*
+ * return a list of the tracers configured into the running kernel
+ */
+
+int get_tracers(char ***list)
+{
+	int ret;
+	FILE *fp;
+	char buffer[CHUNKSZ];
+	char *prefix = get_debugfileprefix();
+	char *tmpbuf = NULL;
+	char *ptr;
+	int tmpsz = 0;
+
+	/* if we've already parse it, return what we have */
+	if (tracer_list) {
+		*list = tracer_list;
+		return num_tracers;
+	}
+
+	/* open the tracing file available_tracers */
+	sprintf(buffer, "%savailable_tracers", prefix);
+	if ((fp = fopen(buffer, "r")) == NULL)
+		fatal ("Can't open %s for reading\n", buffer);
+
+	/* allocate initial buffer */
+	ptr = tmpbuf = malloc(CHUNKSZ);
+	if (ptr == NULL)
+		fatal("error allocating initial space for tracer list\n");
+
+	/* read in the list of available tracers */
+	while((ret = fread(buffer, sizeof(char), CHUNKSZ, fp))) {
+		if ((ptr+ret+1) > (tmpbuf+tmpsz)) {
+			tmpbuf = realloc(tmpbuf, tmpsz + CHUNKSZ);
+			if (tmpbuf == NULL)
+				fatal("error allocating space for list of valid tracers\n");
+			tmpsz += CHUNKSZ;
+		}
+		strncpy(ptr, buffer, ret);
+		ptr += ret;
+	}
+	fclose(fp);
+	if (tmpsz == 0)
+		fatal("error reading available tracers\n");
+	
+	tracer_buffer = tmpbuf;
+
+	/* get a buffer for the pointers to tracers */
+	if (!(tracer_list = malloc(sizeof(char *))))
+		fatal ("error allocatinging tracer list buffer\n");
+
+	/* parse the buffer */
+	ptr = strtok(tmpbuf, " \t\n\r");
+	do {
+		tracer_list[num_tracers++] = ptr;
+		tracer_list = realloc(tracer_list, sizeof(char*)*(num_tracers+1));
+		tracer_list[num_tracers] = NULL;
+	} while ((ptr = strtok(NULL, " \t\n\r")) != NULL);
+
+	/* return the list and number of tracers */
+	*list = tracer_list;
+	return num_tracers;
+}
+
+
+/* 
+ * return zero if tracername is not a valid tracer, non-zero if it is 
+ */
+
+int valid_tracer(char *tracername)
+{
+	char **list;
+	int ntracers;
+	int i;
+
+	ntracers = get_tracers(&list);
+	if (ntracers == 0 || tracername == NULL)
+		return 0;
+	for (i = 0; i < ntracers; i++)
+		if (strncmp(list[i], tracername, strlen(list[i])) == 0)
+			return 1;
+	return 0;
+}
+
+/*
+ * enable event tracepoint
+ */
+int setevent(char *event, char *val)
+{
+	char *prefix = get_debugfileprefix();
+	char buffer[MAX_PATH];
+	int fd;
+	int ret;
+
+	sprintf(buffer, "%s%s", prefix, event);
+	if ((fd = open(buffer, O_WRONLY)) < 0) {
+		warn("unable to open %s\n", buffer);
+		return -1;
+	}
+	if ((ret = write(fd, val, strlen(val))) < 0) {
+		warn("unable to write %s to %s\n", val, buffer);
+		close(fd);
+		return -1;
+	}
+	close(fd);
+	return 0;
+}
+
+int event_enable_all(void)
+{
+	return setevent("events/enable", "1");
+}
+
+int event_disable_all(void)
+{
+	return setevent("events/enable", "0");
+}
+
+int event_enable(char *event) 
+{
+	char path[MAX_PATH];
+	sprintf(path, "events/%s/enable", event);
+	return setevent(path, "1");
+}
+
+int event_disable(char *event)
+{
+	char path[MAX_PATH];
+	sprintf(path, "events/%s/enable", event);
+	return setevent(path, "0");
+}
+	
+int check_privs(void)
+{
+	int policy = sched_getscheduler(0);
+	struct sched_param param, old_param;
+
+	/* if we're already running a realtime scheduler
+	 * then we *should* be able to change things later
+	 */
+	if (policy == SCHED_FIFO || policy == SCHED_RR)
+		return 0;
+
+	/* first get the current parameters */
+	if (sched_getparam(0, &old_param)) {
+		fprintf(stderr, "unable to get scheduler parameters\n");
+		return 1;
+	}
+	param = old_param;
+
+	/* try to change to SCHED_FIFO */
+	param.sched_priority = 1;
+	if (sched_setscheduler(0, SCHED_FIFO, &param)) {
+		fprintf(stderr, "Unable to change scheduling policy!\n");
+		fprintf(stderr, "either run as root or join realtime group\n");
+		return 1;
+	}
+
+	/* we're good; change back and return success */
+	return sched_setscheduler(0, policy, &old_param);
+}
+
+const char *policy_to_string(int policy)
+{
+	switch (policy) {
+	case SCHED_OTHER:
+		return "SCHED_OTHER";
+	case SCHED_FIFO:
+		return "SCHED_FIFO";
+	case SCHED_RR:
+		return "SCHED_RR";
+	case SCHED_BATCH:
+		return "SCHED_BATCH";
+	case SCHED_IDLE:
+		return "SCHED_IDLE";
+	case SCHED_DEADLINE:
+		return "SCHED_DEADLINE";
+	}
+
+	return "unknown";
+}
+
+uint32_t string_to_policy(const char *str)
+{
+	if (!strcmp(str, "other"))
+		return SCHED_OTHER;
+	else if (!strcmp(str, "fifo"))
+		return SCHED_FIFO;
+	else if (!strcmp(str, "rr"))
+		return SCHED_RR;
+	else if (!strcmp(str, "batch"))
+		return SCHED_BATCH;
+	else if (!strcmp(str, "idle"))
+		return SCHED_IDLE;
+	else if (!strcmp(str, "deadline"))
+		return SCHED_DEADLINE;
+
+	return 0;
+}
+
+pid_t gettid(void)
+{
+	return syscall(SYS_gettid);
+}
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.h b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.h
new file mode 100644
index 0000000..a7c7640
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt-utils.h
@@ -0,0 +1,32 @@
+#ifndef __RT_UTILS_H
+#define __RT_UTILS_H
+
+#include <stdint.h>
+#include <linux/sched.h>
+
+#ifndef SCHED_NORMAL
+#define SCHED_NORMAL SCHED_OTHER
+#endif
+
+#define _STR(x) #x
+#define STR(x) _STR(x)
+#define MAX_PATH 256
+
+int check_privs(void);
+char *get_debugfileprefix(void);
+int mount_debugfs(char *);
+int get_tracers(char ***);
+int valid_tracer(char *);
+
+int setevent(char *event, char *val);
+int event_enable(char *event);
+int event_disable(char *event);
+int event_enable_all(void);
+int event_disable_all(void);
+
+const char *policy_to_string(int policy);
+uint32_t string_to_policy(const char *str);
+
+pid_t gettid(void);
+
+#endif	/* __RT_UTILS.H */
diff --git a/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt_numa.h b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt_numa.h
new file mode 100644
index 0000000..98e7d0f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/demo/posix/cyclictest/rt_numa.h
@@ -0,0 +1,277 @@
+/*
+ * A numa library for cyclictest.
+ * The functions here are designed to work whether cyclictest has been
+ * compiled with numa support or not, and whether the user uses the --numa
+ * option or not.
+ * They should also work correctly with older versions of the numactl lib
+ * such as the one found on RHEL5, or with the newer version 2 and above.
+ *
+ * The difference in behavior hinges on whether LIBNUMA_API_VERSION >= 2,
+ * in which case we will employ the bitmask affinity behavior -or-
+ * either LIBNUMA_API_VERSION < 2 or NUMA support is missing altogether,
+ * in which case we retain the older affinity behavior which can either
+ * specify a single CPU core or else use all cores.
+ *
+ * (C) 2010 John Kacur <jkacur@redhat.com>
+ * (C) 2010 Clark Williams <williams@redhat.com>
+ *
+ */
+
+#ifndef _RT_NUMA_H
+#define _RT_NUMA_H
+
+#include "rt-utils.h"
+#include "error.h"
+
+static int numa = 0;
+
+#ifdef NUMA
+#include <numa.h>
+
+#ifndef LIBNUMA_API_VERSION
+#define LIBNUMA_API_VERSION 1
+#endif
+
+static void *
+threadalloc(size_t size, int node)
+{
+	if (node == -1)
+		return malloc(size);
+	return numa_alloc_onnode(size, node);
+}
+
+static void
+threadfree(void *ptr, size_t size, int node)
+{
+	if (node == -1)
+		free(ptr);
+	else
+		numa_free(ptr, size);
+}
+
+static void rt_numa_set_numa_run_on_node(int node, int cpu)
+{
+	int res;
+	res = numa_run_on_node(node);
+	if (res)
+		warn("Could not set NUMA node %d for thread %d: %s\n",
+				node, cpu, strerror(errno));
+	return;
+}
+
+static void *rt_numa_numa_alloc_onnode(size_t size, int node, int cpu)
+{
+	void *stack;
+	stack = numa_alloc_onnode(size, node);
+	if (stack == NULL)
+		fatal("failed to allocate %d bytes on node %d for cpu %d\n",
+				size, node, cpu);
+	return stack;
+}
+
+#if LIBNUMA_API_VERSION >= 2
+
+/*
+ * Use new bit mask CPU affinity behavior
+ */
+static int rt_numa_numa_node_of_cpu(int cpu)
+{
+	int node;
+	node = numa_node_of_cpu(cpu);
+	if (node == -1)
+		fatal("invalid cpu passed to numa_node_of_cpu(%d)\n", cpu);
+	return node;
+}
+
+static inline unsigned int rt_numa_bitmask_isbitset( const struct bitmask *mask,
+	unsigned long i)
+{
+	return numa_bitmask_isbitset(mask,i);
+}
+
+static inline struct bitmask* rt_numa_parse_cpustring(const char* s,
+	int max_cpus)
+{
+#ifdef HAVE_PARSE_CPUSTRING_ALL		/* Currently not defined anywhere.  No
+					   autotools build. */
+	return numa_parse_cpustring_all(s);
+#else
+	/* We really need numa_parse_cpustring_all(), so we can assign threads
+	 * to cores which are part of an isolcpus set, but early 2.x versions of
+	 * libnuma do not have this function.  A work around should be to run
+	 * your command with e.g. taskset -c 9-15 <command>
+	 */
+	return numa_parse_cpustring((char *)s);
+#endif
+}
+
+static inline void rt_bitmask_free(struct bitmask *mask)
+{
+	numa_bitmask_free(mask);
+}
+
+#else	/* LIBNUMA_API_VERSION == 1 */
+
+struct bitmask {
+	unsigned long size; /* number of bits in the map */
+	unsigned long *maskp;
+};
+#define BITS_PER_LONG	(8*sizeof(long))
+
+/*
+ * Map legacy CPU affinity behavior onto bit mask infrastructure
+ */
+static int rt_numa_numa_node_of_cpu(int cpu)
+{
+	unsigned char cpumask[256];
+	int node, idx, bit;
+	int max_node, max_cpus;
+
+	max_node = numa_max_node();
+	max_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+
+	if (cpu > max_cpus) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	/* calculate bitmask index and relative bit position of cpu */
+	idx = cpu / 8;
+	bit = cpu % 8;
+
+	for (node = 0; node <= max_node; node++) {
+		if (numa_node_to_cpus(node, (void *) cpumask, sizeof(cpumask)))
+			return -1;
+
+		if (cpumask[idx] & (1<<bit))
+			return node;
+	}
+	errno = EINVAL;
+	return -1;
+}
+
+static inline unsigned int rt_numa_bitmask_isbitset( const struct bitmask *mask,
+	unsigned long i)
+{
+	long bit = mask->maskp[i/BITS_PER_LONG] & (1<<(i % BITS_PER_LONG));
+	return (bit != 0);
+}
+
+static inline struct bitmask* rt_numa_parse_cpustring(const char* s,
+	int max_cpus)
+{
+	int cpu;
+	struct bitmask *mask = NULL;
+	cpu = atoi(s);
+	if (0 <= cpu && cpu < max_cpus) {
+		mask = malloc(sizeof(*mask));
+		if (mask) {
+			/* Round up to integral number of longs to contain
+			 * max_cpus bits */
+			int nlongs = (max_cpus+BITS_PER_LONG-1)/BITS_PER_LONG;
+
+			mask->maskp = calloc(nlongs, sizeof(long));
+			if (mask->maskp) {
+				mask->maskp[cpu/BITS_PER_LONG] |=
+					(1UL << (cpu % BITS_PER_LONG));
+				mask->size = max_cpus;
+			} else {
+				free(mask);
+				mask = NULL;
+			}
+		}
+	}
+	return mask;
+}
+
+static inline void rt_bitmask_free(struct bitmask *mask)
+{
+	free(mask->maskp);
+	free(mask);
+}
+
+#endif	/* LIBNUMA_API_VERSION */
+
+static void numa_on_and_available()
+{
+	if (numa && (numa_available() == -1))
+		fatal("--numa specified and numa functions not available.\n");
+}
+
+#else /* ! NUMA */
+
+struct bitmask {
+    unsigned long size; /* number of bits in the map */
+    unsigned long *maskp;
+};
+#define BITS_PER_LONG    (8*sizeof(long))
+
+static inline void *threadalloc(size_t size, int n) { return malloc(size); }
+static inline void threadfree(void *ptr, size_t s, int n) { free(ptr); }
+static inline void rt_numa_set_numa_run_on_node(int n, int c) { }
+static inline int rt_numa_numa_node_of_cpu(int cpu) { return -1; }
+static void *rt_numa_numa_alloc_onnode(size_t s, int n, int c) { return NULL; }
+
+/*
+ * Map legacy CPU affinity behavior onto bit mask infrastructure
+ */
+static inline unsigned int rt_numa_bitmask_isbitset( const struct bitmask *mask,
+	unsigned long i)
+{
+	long bit = mask->maskp[i/BITS_PER_LONG] & (1<<(i % BITS_PER_LONG));
+	return (bit != 0);
+}
+
+static inline struct bitmask* rt_numa_parse_cpustring(const char* s,
+	int max_cpus)
+{
+	int cpu;
+	struct bitmask *mask = NULL;
+	cpu = atoi(s);
+	if (0 <= cpu && cpu < max_cpus) {
+		mask = malloc(sizeof(*mask));
+		if (mask) {
+			/* Round up to integral number of longs to contain
+			 * max_cpus bits */
+			int nlongs = (max_cpus+BITS_PER_LONG-1)/BITS_PER_LONG;
+
+			mask->maskp = calloc(nlongs, sizeof(long));
+			if (mask->maskp) {
+				mask->maskp[cpu/BITS_PER_LONG] |=
+					(1UL << (cpu % BITS_PER_LONG));
+				mask->size = max_cpus;
+			} else {
+				free(mask);
+				mask = NULL;
+			}
+		}
+	}
+	return mask;
+}
+
+static inline void rt_bitmask_free(struct bitmask *mask)
+{
+	free(mask->maskp);
+	free(mask);
+}
+
+static inline void numa_on_and_available(void) { }
+
+#endif	/* NUMA */
+
+/*
+ * Any behavioral differences above are transparent to these functions
+ */
+/** Returns number of bits set in mask. */
+static inline unsigned int rt_numa_bitmask_count(const struct bitmask *mask)
+{
+	unsigned int num_bits = 0, i;
+	for (i = 0; i < mask->size; i++) {
+		if (rt_numa_bitmask_isbitset(mask, i))
+			num_bits++;
+	}
+	/* Could stash this instead of recomputing every time. */
+	return num_bits;
+}
+
+#endif	/* _RT_NUMA_H */
diff --git a/kernel/xenomai-v3.2.4/doc/Makefile.am b/kernel/xenomai-v3.2.4/doc/Makefile.am
new file mode 100644
index 0000000..3d3f5ec
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/Makefile.am
@@ -0,0 +1,6 @@
+SUBDIRS=gitdoc doxygen asciidoc
+
+gitdoc doxygen asciidoc: FORCE
+	$(MAKE) -C $@
+
+.PHONY: FORCE
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/MIGRATION.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/MIGRATION.adoc
new file mode 100644
index 0000000..dce7f40
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/MIGRATION.adoc
@@ -0,0 +1,1935 @@
+Migrating from Xenomai 2.x to 3.x
+=================================
+
+== Configuration ==
+
+=== User programs and libraries ===
+
+Changes in +xeno-config+::
+
+As with Xenomai 2.x, +xeno-config+ is available for retrieving the
+compilation and link flags for building Xenomai 3.x applications. This
+script will work for both the Cobalt and Mercury environments
+indifferently.
+
+ * Each +--skin=<api>+ option specifier can be abbreviated as
+ +--<api>+. For instance, +--psos+ is a shorthand for +--skin=psos+ on
+ the command line.
+
+ * Over Cobalt, only *xeno-config --posix --ldflags* (or *--rtdm* as
+ an alias) returns the proper linker flags to cause POSIX routines
+ invoked by the application to be wrapped to their respective Xenomai
+ implementation. No other API will imply such wrapping. For this
+ reason, *--cobalt --ldflags* should be used for linking exclusively
+ against the Cobalt library (i.e. +libcobalt.so+) *without* symbol
+ wrapping. Conversely, mentioning *--posix* along with other API
+ switches with *--ldflags* will cause POSIX symbol wrapping to take
+ place, e.g. use *--posix --alchemy --ldflags* for mixed API support
+ with POSIX symbol wrapping.
+
+ * Over _Mercury_, +--posix+ and +--rtdm+ are ignored placeholders,
+   since the full POSIX API is available with the glibc and the
+   threading library.
+
+ * +--[skin=]alchemy+ replaces the former +--skin=native+ switch.
+
+ * +--core+ can be used to retrieve the name of the Xenomai core system
+  for which +xeno-config+ was generated. Possible output values are
+  +cobalt+ and +mercury+.
+
+ * +--ccld+ retrieves a C compiler command suitable for linking a
+   Xenomai 3.x application.
+
+ * +--info+ retrieves the current system information, including the
+   Xenomai release detected on the platform.
+
+[[auto-init]]
+Auto-initialization::
+
++--no-auto-init+ can be passed to disable automatic initialization of
+the Copperplate library when the application process enters the
++main()+ routine.
+
+In such a case, the application code using any API based on the
+Copperplate layer, shall call the +copperplate_init(int *argcp, char
+*const **argvp)+ routine manually, as part of its initialization
+process, _before_ any real-time service is invoked.
+
+This routine takes the addresses of the argument count and vector
+passed to the main() routine respectively. copperplate_init() handles
+the Xenomai options present in the argument vector, stripping them
+out, leaving only unprocessed options on return in the vector,
+updating the count accordingly.
+
++xeno-config+ enables the Copperplate auto-init feature by default.
+
+x86 vsyscall support::
+
+The +--enable-x86-sep+ configuration switch was renamed to
++--enable-x86-vsyscall+ to fix a misnomer. This option should be left
+enabled (default), unless *linuxthreads* are used instead of *NPTL*.
+
+=== Kernel parameters (Cobalt) ===
+
+System parameters renamed::
+
+* xeno_hal.supported_cpus -> xenomai.supported_cpus
+* xeno_hal.disable -> xenomai.state=disabled
+* xeno_hal.cpufreq -> xenomai.cpufreq
+* xeno_nucleus.watchdog_timeout -> xenomai.watchdog_timeout
+* xeno_nucleus.xenomai_gid -> xenomai.allowed_group
+* xeno_nucleus.sysheap_size -> xenomai.sysheap_size
+* xeno_hal.smi (x86 only) -> xenomai.smi
+* xeno_hal.smi_mask (x86 only) -> xenomai.smi_mask
+
+Obsolete parameters dropped::
+
+* xeno_rtdm.tick_arg
+* rtdm.devname_hashtab_size
+* rtdm.protocol_hashtab_size
+
+.Rationale
+**********************************************************************
+Periodic timing is directly handled from the API layer in
+user-space. Cobalt kernel timing is tickless.
+**********************************************************************
+
+== Getting the system state ==
+
+When running Copperplate-based APIs (i.e. all but pure POSIX),
+querying the state of the real-time system should be done via the new
+Xenomai registery interface available with Xenomai 3.x, which is
+turned on when +--enable-registry+ is passed to the configuration
+script for building the Xenomai libraries and programs.
+
+The new registry support is common to the Cobalt and Mercury cores,
+with only marginal differences due to the presence (or lack of) co-
+kernel in the system.
+
+=== New FUSE-based registry interface ===
+
+The Xenomai system state is now fully exported via a FUSE-based
+filesystem.  The hierarchy of the Xenomai registry is organized as
+follows:
+
+----------------------------------------------------------------------------    
+/mount-point              /* registry fs root, defaults to /var/run/xenomai */
+ /user                    /* user name */
+    /session              /* shared session name or anon@<pid> */
+      /pid                /* application (main) pid */
+        /skin             /* API name: alchemy/vxworks/psos/... */
+          /family         /* object class (task, semaphore, ...) */
+             { exported objects... }
+      /system             /* session-wide information */
+----------------------------------------------------------------------------    
+    
+Each leaf entry under a session hierarchy is normally viewable, for
+retrieving the information attached to the corresponding object, such
+as its state, and/or value. There can be multiple sessions hosted
+under a single registry mount point.
+    
+The /system hierarchy provides information about the current state of
+the Xenomai core, aggregating data from all processes which belong to
+the parent session. Typically, the status of all threads and heaps
+created by the session can be retrieved.
+    
+The registry daemon is a companion tool managing exactly one registry
+mount point, which is specified by the --root option on the command
+line. This daemon is automatically spawned by the registry support
+code as required. There is normally no action required from users for
+managing it.
+    
+=== /proc/xenomai interface ===
+
+The /proc/xenomai interface is still available when running over the
+Cobalt core, mainly for pure POSIX-based applications. The following
+changes took place:
+
+Thread status::
+
+All pseudo-files reporting the various thread states moved under the
+new +sched/+ hierarchy, i.e.
+
++{sched, stat, acct}+ -> +sched/{threads, stat, acct}+
+
+Clocks::
+
+With the introduction of dynamic clock registration in the Cobalt
+core, the +clock/+ hierarchy was added, to reflect the current state
+of all timers from the registered Xenomai clocks.
+
+There is no kernel-based time base management anymore with Xenomai
+{xenover}. Functionally speaking, only the former _master_ time base
+remains, periodic timing is now controlled locally from the Xenomai
+libraries in user-space.
+
+Xenomai {xenover} defines a built-in clock named _coreclk_, which has
+the same properties than the former _master_ time base available with
+Xenomai 2.x (i.e. tickless with nanosecond resolution).
+
+The settings of existing clocks can be read from entries under the new
+clock/ hierarchy. Active timers for each clock can be read from
+entries under the new +timer/+ hierarchy.
+
+As a consequence of these changes:
+
+  * the information previously available from the +timer+ entry is now
+obtained by reading +clock/coreclk+.
+
+  * the information previously available from +timerstat/master+ is now
+obtained by reading +timer/coreclk+.
+
+// break list
+Core clock gravity::
+
+The gravity value for a Xenomai clock gives the amount of time by
+which the next timer shot should be anticipated. This is a static
+adjustment value, to account for the basic latency of the target
+system for responding to external events. Such latency may be
+introduced by hardware effects (e.g. bus or cache latency), or
+software issues (e.g. code running with interrupts disabled).
+
+The clock gravity management departs from Xenomai 2.x as follows:
+
+  * different gravity values are applied, depending on which context a
+  timer activates. This may be a real-time IRQ handler (_irq_), a RTDM
+  driver task (_kernel_), or a Xenomai application thread running in
+  user-space (_user_). Xenomai 2.x does not differentiate, only
+  applying a global gravity value regardless of the activated context.
+
+  * in addition to the legacy +latency+ file which now reports
+  the _user_ timer gravity (in nanoseconds), i.e. used for timers
+  activating user-space threads, the full gravity triplet applied to
+  timers running on the core clock can be accessed by reading
+  +clock/coreclk+ (also in nanoseconds).
+
+  * at reset, the _user_ gravity for the core clock now represents the
+sum of the scheduling *and* hardware timer reprogramming time as a
+count of nanoseconds. This departs from Xenomai 2.x for which only the
+former was accounted for as a global gravity value, regardless of the
+target context for the timer.
+
+The following command reports the current gravity triplet for the
+target system, along with the setup information for the core timer:
+
+--------------------------------------------
+# cat xenomai/clock/coreclk
+gravity: irq=848 kernel=8272 user=35303
+devices: timer=decrementer, clock=timebase
+ status: on+watchdog
+  setup: 151
+  ticks: 220862243033
+--------------------------------------------
+    
+Conversely, writing to this file manually changes the gravity values
+of the Xenomai core clock:
+    
+------------------------------------------------------
+    /* change the user gravity (default) */
+# echo 3000 > /proc/xenomai/clock/coreclck
+    /* change the IRQ gravity */
+# echo 1000i > /proc/xenomai/clock/coreclck
+    /* change the user and kernel gravities */
+# echo "2000u 1000k" > /proc/xenomai/clock/coreclck
+------------------------------------------------------
+
++interfaces+ removed::
+
+Only the POSIX and RTDM APIs remain implemented directly in kernel
+space, and are always present when the Cobalt core enabled in the
+configuration. All other APIs are implemented in user-space over the
+Copperplate layer. This makes the former +interfaces+ contents
+basically useless, since the corresponding information for the
+POSIX/RTDM interfaces can be obtained via +sched/threads+
+unconditionally.
+
++registry/usage+ changed format::
+
+The new print out is <used slot count>/<total slot count>.
+
+== Binary object features ==
+
+=== Loading Xenomai libraries dynamically ===
+
+The new +--enable-dlopen-libs+ configuration switch must be turned on
+to allow Xenomai libaries to be dynamically loaded via dlopen(3).
+
+This replaces the former +--enable-dlopen-skins+ switch. Unlike the
+latter, +--enable-dlopen-libs+ does not implicitly disable support for
+thread local storage, but rather selects a suitable TLS model
+(i.e. _global-dynamic_).
+
+=== Thread local storage ===
+
+The former +--with-__thread+ configuration switch was renamed
++--enable-tls+.
+
+As mentioned earlier, TLS is now available to dynamically loaded
+Xenomai libraries, e.g. +--enable-tls --enable-dlopen-libs+ on a
+configuration line is valid. This would select the _global-dynamic_
+TLS model instead of _initial-exec_, to make sure all thread-local
+variables may be accessed from any code module.
+
+== Process-level management ==
+
+=== Main thread shadowing ===
+
+Any application linked against +libcobalt+ has its main thread
+attached to the real-time system automatically, this operation is
+called _auto-shadowing_. As a side-effect, the entire process's memory
+is locked, for current and future mappings
+(i.e. +mlockall(MCL_CURRENT|MCL_FUTURE)+).
+
+=== Shadow signal handler ===
+
+Xenomai's +libcobalt+ installs a handler for the SIGWINCH (aka
+_SIGSHADOW_) signal. This signal may be sent by the Cobalt core to any
+real-time application, for handling internal duties.
+
+Applications are allowed to interpose on the SIGSHADOW handler,
+provided they first forward all signal notifications to this routine,
+then eventually handle all events the Xenomai handler won't process.
+
+This handler was renamed from `xeno_sigwinch_handler()` (Xenomai 2.x)
+to `cobalt_sigshadow_handler()` in Xenomai 3.x. The function prototype
+did not change though, i.e.:
+
+----------------------------------------------------------------
+int cobalt_sigshadow_handler(int sig, siginfo_t *si, void *ctxt)
+----------------------------------------------------------------
+
+A non-zero value is returned whenever the event was handled internally
+by the Xenomai system.
+
+=== Debug signal handler ===
+
+Xenomai's +libcobalt+ installs a handler for the SIGXCPU (aka
+_SIGDEBUG_) signal. This signal may be sent by the Cobalt core to any
+real-time application, for notifying various debug events.
+
+Applications are allowed to interpose on the SIGDEBUG handler,
+provided they eventually forward all signal notifications they won't
+process to the Xenomai handler.
+
+This handler was renamed from `xeno_handle_mlock_alert()` (Xenomai
+2.x) to `cobalt_sigdebug_handler()` in Xenomai 3.x. The function
+prototype did not change though, i.e.:
+
++void cobalt_sigdebug_handler(int sig, siginfo_t *si, void *ctxt)+
+
+=== Copperplate auto-initialization ===
+
+Copperplate is a library layer which mediates between the real-time
+core services available on the platform, and the API exposed to the
+application. It provides typical programming abstractions for
+emulating real-time APIs. All non-POSIX APIs are based on Copperplate
+services (e.g. _alchemy_, _psos_, _vxworks_).
+
+When Copperplate is built for running over the Cobalt core, it sits on
+top of the +libcobalt+ library. Conversely, it is directly stacked on
+top of the *glibc* or *uClibc* when built for running over the Mercury
+core.
+
+Normally, Copperplate should initialize from a call issued by the
++main()+ application routine. To make this process transparent for the
+user, the +xeno-config+ script emits link flags which temporarily
+overrides the +main()+ routine with a Copperplate-based replacement,
+running the proper initialization code as required, before branching
+back to the user-defined application entry point.
+
+This behavior may be disabled by passing the
+<<auto-init,+--no-auto-init+>> option.
+
+== RTDM interface changes ==
+
+=== Files renamed ===
+
+- Redundant prefixes were removed from the following files:
+
+[normal]
+rtdm/rtdm_driver.h -> rtdm/driver.h
+[normal]
+rtdm/rtcan.h -> rtdm/can.h
+[normal]
+rtdm/rtserial.h -> rtdm/serial.h
+[normal]
+rtdm/rttesting.h -> rtdm/testing.h
+[normal]
+rtdm/rtipc.h -> rtdm/ipc.h
+
+=== Driver API ===
+
+==== New device description model ====
+
+Several changes have taken place in the device description passed to
++rtdm_dev_register()+ (i.e. +struct rtdm_device+). Aside of fixing
+consistency issues, the bulk of changes is aimed at narrowing the gap
+between the regular Linux device driver model and RTDM.
+
+To this end, RTDM in Xenomai 3 shares the Linux namespace for named
+devices, which are now backed by common character device objects from
+the regular Linux device model. As a consequence of this, file
+descriptors obtained on named RTDM devices are regular file
+descriptors, visible from the +/proc/<pid>/fd+ interface.
+
+===== Named device description =====
+
+The major change required for supporting this closer integration of
+RTDM into the regular Linux driver model involved splitting the device
+driver properties from the device instance definitions, which used to
+be combined in Xenomai 2.x into the +rtdm_device+ descriptor.
+
+.Xenomai 2.x named device description
+---------------------------------------------
+static struct rtdm_device foo_device0 = {
+	.struct_version		=	RTDM_DEVICE_STRUCT_VER,
+	.device_flags		=	RTDM_NAMED_DEVICE|RTDM_EXCLUSIVE,
+	.device_id		=	0
+	.context_size		=	sizeof(struct foo_context),
+	.ops = {
+		.open		=	foo_open,
+		.ioctl_rt	=	foo_ioctl_rt,
+		.ioctl_nrt	=	foo_ioctl_nrt,
+		.close		=	foo_close,
+	},
+	.device_class		=	RTDM_CLASS_EXPERIMENTAL,
+	.device_sub_class	=	RTDM_SUBCLASS_FOO,
+	.profile_version	=	42,
+	.device_name		=	"foo0",
+	.driver_name		=	"foo driver",
+	.driver_version		=	RTDM_DRIVER_VER(1, 0, 0),
+	.peripheral_name	=	"Ultra-void IV board driver",
+	.proc_name		=	device.device_name,
+	.provider_name		=	"Whoever",
+};
+
+static struct rtdm_device foo_device1 = {
+	.struct_version		=	RTDM_DEVICE_STRUCT_VER,
+	.device_flags		=	RTDM_NAMED_DEVICE|RTDM_EXCLUSIVE,
+	.device_id		=	1
+	.context_size		=	sizeof(struct foo_context),
+	.ops = {
+		.open		=	foo_open,
+		.ioctl_rt	=	foo_ioctl_rt,
+		.ioctl_nrt	=	foo_ioctl_nrt,
+		.close		=	foo_close,
+	},
+	.device_class		=	RTDM_CLASS_EXPERIMENTAL,
+	.device_sub_class	=	RTDM_SUBCLASS_FOO,
+	.profile_version	=	42,
+	.device_name		=	"foo1",
+	.device_data 		=	NULL,
+	.driver_name		=	"foo driver",
+	.driver_version		=	RTDM_DRIVER_VER(1, 0, 0),
+	.peripheral_name	=	"Ultra-void IV board driver",
+	.proc_name		=	device.device_name,
+	.provider_name		=	"Whoever",
+};
+
+foo0.device_data = &some_driver_data0;
+ret = rtdm_dev_register(&foo0);
+...
+foo1.device_data = &some_driver_data1;
+ret = rtdm_dev_register(&foo1);
+
+---------------------------------------------
+
+The legacy description above would only create "virtual" device
+entries, private to the RTDM device namespace, with no visible
+counterparts into the Linux device namespace.
+
+.Xenomai 3.x named device description
+---------------------------------------------
+
+static struct rtdm_driver foo_driver = {
+	.profile_info		=	RTDM_PROFILE_INFO(foo,
+							  RTDM_CLASS_EXPERIMENTAL,
+							  RTDM_SUBCLASS_FOO,
+							  42),
+	.device_flags		=	RTDM_NAMED_DEVICE|RTDM_EXCLUSIVE,
+	.device_count		=	2,
+	.context_size		=	sizeof(struct foo_context),
+	.ops = {
+		.open		=	foo_open,
+		.ioctl_rt	=	foo_ioctl_rt,
+		.ioctl_nrt	=	foo_ioctl_nrt,
+		.close		=	foo_close,
+	},
+};
+
+static struct rtdm_device foo_devices[2] = {
+	[ 0 ... 1 ] = {
+       		.driver = &foo_driver,
+		.label = "foo%d",
+	},
+};
+
+MODULE_VERSION("1.0.0");
+MODULE_DESCRIPTION("Ultra-void IV board driver");
+MODULE_AUTHOR'"Whoever");
+
+foo_devices[0].device_data = &some_driver_data0;
+ret = rtdm_dev_register(&foo_devices[0]);
+...
+foo_devices[1].device_data = &some_driver_data1;
+ret = rtdm_dev_register(&foo_devices[1]);
+
+---------------------------------------------
+
+The current description above will cause the device nodes
+/dev/rtdm/foo0 and /dev/rtdm/foo1 to be created in the Linux device
+namespace. Application may open these device nodes for interacting
+with the RTDM driver, as they would do with any regular _chrdev_
+driver.
+
+===== Protocol device description =====
+
+Similarly, the registration data for protocol devices have been
+changed to follow the new generic layout:
+
+.Xenomai 2.x protocol device description
+---------------------------------------------
+static struct rtdm_device foo_device = {
+	.struct_version =	RTDM_DEVICE_STRUCT_VER,
+	.device_flags	=	RTDM_PROTOCOL_DEVICE,
+	.context_size	=	sizeof(struct foo_context),
+	.device_name	=	"foo",
+	.protocol_family=	PF_FOO,
+	.socket_type	=	SOCK_DGRAM,
+	.socket_nrt	=	foo_socket,
+	.ops = {
+		.close_nrt	=	foo_close,
+		.recvmsg_rt	=	foo_recvmsg,
+		.sendmsg_rt	=	foo_sendmsg,
+		.ioctl_rt	=	foo_ioctl,
+		.ioctl_nrt	=	foo_ioctl,
+		.read_rt	=	foo_read,
+		.write_rt	=	foo_write,
+		.select_bind	=	foo_select,
+	},
+	.device_class		=	RTDM_CLASS_EXPERIMENTAL,
+	.device_sub_class	=	RTDM_SUBCLASS_FOO,
+	.profile_version	=	1,
+	.driver_name		=	"foo",
+	.driver_version		=	RTDM_DRIVER_VER(1, 0, 0),
+	.peripheral_name	=	"Unexpected protocol driver",
+	.proc_name		=	device.device_name,
+	.provider_name		=	"Whoever",
+	.device_data		=	&some_driver_data,
+};
+
+ret = rtdm_dev_register(&foo_device);
+...
+
+---------------------------------------------
+
+.Xenomai 3.x protocol device description
+---------------------------------------------
+static struct rtdm_driver foo_driver = {
+	.profile_info		=	RTDM_PROFILE_INFO(foo,
+							  RTDM_CLASS_EXPERIMENTAL,
+							  RTDM_SUBCLASS_FOO,
+							  1),
+	.device_flags		=	RTDM_PROTOCOL_DEVICE,
+	.device_count		=	1,
+	.context_size		=	sizeof(struct foo_context),
+	.protocol_family	=	PF_FOO,
+	.socket_type		=	SOCK_DGRAM,
+	.ops = {
+		.socket		=	foo_socket,
+		.close		=	foo_close,
+		.recvmsg_rt	=	foo_recvmsg,
+		.sendmsg_rt	=	foo_sendmsg,
+		.ioctl_rt	=	foo_ioctl,
+		.ioctl_nrt	=	foo_ioctl,
+		.read_rt	=	foo_read,
+		.write_rt	=	foo_write,
+		.select		=	foo_select,
+	},
+};
+
+static struct rtdm_device foo_device = {
+	.driver = &foo_driver,
+	.label = "foo",
+	.device_data = &some_driver_data,
+};
+
+ret = rtdm_dev_register(&foo_device);
+...
+
+MODULE_VERSION("1.0.0");
+MODULE_DESCRIPTION("Unexpected protocol driver");
+MODULE_AUTHOR'"Whoever");
+
+---------------------------------------------
+
+* +.device_count+ has been added to reflect the (maximum) number of
+  device instances which may be managed by the driver. This
+  information is used to dynamically reserve a range of major/minor
+  numbers for named RTDM devices in the Linux device namespace, by a
+  particular driver.  Device minors are assigned to RTDM device
+  instances in order of registration starting from minor #0, unless
+  RTDM_FIXED_MINOR is present in the device flags. In the latter case,
+  rtdm_device.minor is used verbatim by the RTDM core when registering
+  the device.
+
+* +.device_id+ was removed from the device description, as the minor
+  number it was most commonly holding is now available from a call to
+  rtdm_fd_minor(). Drivers should use +.device_data+ for storing
+  private information attached to device instances.
+
+* +.struct_version+ was dropped, as it provided no additional feature
+  to the standard module versioning scheme.
+
+* +.proc_name+ was dropped, as it is redundant with the device
+  name. Above all, using a /proc information label different from the
+  actual device name is unlikely to be a good idea.
+
+* +.device_class+, +.device_sub_class+ and +.profile_version+ numbers
+  have been grouped in a dedicated profile information descriptor
+  (+struct rtdm_profile_info+), one *must* initialize using the
+  +RTDM_PROFILE_INFO()+ macro.
+
+* +.driver_name+ was dropped, as it adds no value to the plain module
+  name (unless the module name is deliberately obfuscated, that is).
+
+* +.peripheral_name+ was dropped, as this information should be
+  conveyed by MODULE_DESCRIPTION().
+
+* +.provider_name+ was dropped, as this information should be conveyed
+  by MODULE_AUTHOR().
+
+* +.driver_version+ was dropped, as this information should be
+  conveyed by MODULE_VERSION().
+
+==== Introduction of file descriptors ====
+
+Xenomai 3 introduces a file descriptor abstraction for RTDM
+drivers. For this reason, all RTDM driver handlers and services which
+used to receive a `user_info` opaque argument describing the calling
+context, now receive a `rtdm_fd` pointer standing for the target file
+descriptor for the operation.
+
+As a consequence of this:
+
+- The +rtdm_context_get/put()+ call pair has been replaced by
+  +rtdm_fd_get/put()+.
+
+- Likewise, the +rtdm_context_lock/unlock()+ call pair has been
+  replaced by +rtdm_fd_lock/unlock()+.
+
+- +rtdm_fd_to_private()+ is available to fetch the context-private
+  memory allocated by the driver for a particular RTDM file
+  descriptor. Conversely, +rtdm_private_to_fd()+ returns the file
+  descriptor owning a particular context-private memory area.
+
+- +rtdm_fd_minor() retrieves the minor number assigned to the current
+  named device instance using its file descriptor.
+
+- +xenomai/rtdm/open_files+ and +xenomai/rtdm/fildes+ now solely
+  report file descriptors obtained using the driver-to-driver API.
+  RTDM file descriptors obtained from applications appear under the
+  regular /proc/<pid>/fd hierarchy. All RTDM file descriptors obtained
+  by an application are automatically released when the latter exits.
+
+[CAUTION]
+Because RTDM file descriptors may be released and destroyed
+asynchronously, rtdm_fd_get() and rtdm_fd_lock() may return -EIDRM if
+a file descriptor fetched from some driver-private registry becomes
+stale prior to calling these services. Typically, this may happen if
+the descriptor is released from the ->close() handler implemented by
+the driver. Therefore, make sure to always carefully check the return
+value of these services.
+
+[NOTE]
+Unlike Xenomai 2.x, RTDM file descriptors returned to Xenomai 3
+applications fall within the regular Linux range. Each open RTDM
+connection is actually mapped over a regular file descriptor, which
+RTDM services from _libcobalt_ recognize and handle.
+
+==== Updated device operation descriptor ====
+
+As visible from the previous illustrations, a few handlers have been
+moved to the device operation descriptor, some dropped, other renamed,
+mainly for the sake of consistency:
+
+* +.select_bind+ was renamed as +.select+ in the device operation
+  descriptor.
+
+* +.open_rt+ was dropped, and +.open_nrt+ renamed as +.open+.  Opening
+  a named device instance always happens from secondary mode. In
+  addition, the new handler is now part of the device operation
+  descriptor +.ops+.
+
+.Rationale
+**********************************************************************
+Opening a device instance most often requires allocating resources
+managed by the Linux kernel (memory mappings, DMA etc), which is only
+available to regular calling contexts.
+**********************************************************************
+
+* Likewise, +.socket_rt+ was dropped, and +.socket_nrt+ renamed as
+  +.socket+. Opening a protocol device instance always happens from
+  secondary mode. In addition, the new handler is now part of the
+  device operation descriptor +.ops+.
+
+* As a consequence of the previous changes, +.close_rt+ was dropped,
+  and +.close_nrt+ renamed as +.close+. Closing a device instance
+  always happens from secondary mode.
+
+* .open, .socket and .close handlers have become optional in Xenomai
+  3.x.
+
+[[rtdm-mmap]]
+* The device operation descriptor +.ops+ shows two new members, namely
+  +.mmap+ for handling memory mapping requests to the RTDM driver, and
+  +get_unmapped_area+, mainly for supporting such memory mapping
+  operations in MMU-less configurations. These handlers - named after
+  the similar handlers defined in the regular file_operation
+  descriptor - always operate from secondary mode on behalf of the
+  calling task context, so that they may invoke regular kernel
+  services safely.
+
+[NOTE]
+See the documentation in the
+http://xenomai.org/documentation/xenomai-3/html/xeno3prm/[Programming
+Reference Manual] covering the device registration and operation
+handlers for a complete description.
+
+==== Changes to RTDM services ====
+
+- rtdm_dev_unregister() loses the poll_delay argument, and its return
+  value. Instead, this service waits indefinitely for all ongoing
+  connection to be drop prior to unregistering the device. The new
+  prototype is therefore:
+
+------------------
+void rtdm_dev_unregister(struct rtdm_device *device);
+------------------
+
+.Rationale
+**********************************************************************
+Drivers are most often not willing to deal with receiving a device
+busy condition from a module exit routine (which is the place devices
+should be unregistered from).  Drivers which really want to deal with
+such condition should simply use module refcounting in their own code.
+********************************************************************
+
+- rtdm_task_init() shall be called from secondary mode.
+
+.Rationale
+**********************************************************************
+Since Xenomai 3, rtdm_task_init() involves creating a regular kernel
+thread, which will be given real-time capabilities, such as running
+under the control of the Cobalt kernel. In order to invoke standard
+kernel services, rtdm_task_init() must be called from a regular Linux
+kernel context.
+**********************************************************************
+
+- rtdm_task_join() has been introduced to wait for termination of a
+  RTDM task regardless of the caller's execution mode, which may be
+  primary or secondary. In addition, rtdm_task_join() does not need to
+  poll for such event unlike rtdm_task_join_nrt().
+
+.Rationale
+**********************************************************************
+rtdm_task_join() supersedes rtdm_task_join_nrt() feature-wise with
+less usage restrictions, therefore the latter has become pointless. It
+is therefore deprecated and will be phased out in the next release.
+**********************************************************************
+
+- A RTDM task cannot be forcibly removed from the scheduler by another
+  thread for immediate deletion. Instead, the RTDM task is notified
+  about a pending cancellation request, which it should act upon when
+  detected. To this end, RTDM driver tasks should call the new
+  +rtdm_task_should_stop()+ service to detect such notification from
+  their work loop, and exit accordingly.
+
+.Rationale
+**********************************************************************
+Since Xenomai 3, a RTDM task is based on a regular kernel thread with
+real-time capabilities when controlled by the Cobalt kernel. The Linux
+kernel requires kernel threads to exit at their earliest convenience
+upon notification, which therefore applies to RTDM tasks as well.
+**********************************************************************
+
+- +rtdm_task_set_period()+ now accepts a start date for the periodic
+timeline. Zero can be passed to emulate the previous call form,
+setting the first release point when the first period after the
+current date elapses.
+
+- +rtdm_task_wait_period()+ now copies back the count of overruns into
+a user-provided variable if -ETIMEDOUT is returned. NULL can be passed
+to emulate the previous call form, discarding this information.
+
+- Both +rtdm_task_set_period()+ and +rtdm_task_wait_period()+ may be
+  invoked over a Cobalt thread context.
+
+- RTDM_EXECUTE_ATOMICALLY() is deprecated and will be phased out in
+  the next release. Drivers should prefer the newly introduced RTDM
+  wait queues, or switch to the Cobalt-specific
+  cobalt_atomic_enter/leave() call pair, depending on the use case.
+
+.Rationale
+*******************************************************************
+This construct is not portable to a native implementation of RTDM, and
+may be replaced by other means. The usage patterns of
+RTDM_EXECUTE_ATOMICALLY() used to be:
+
+- somewhat abusing the big nucleus lock (i.e. nklock) grabbed by
+  RTDM_EXECUTE_ATOMICALLY(), for serializing access to a section that
+  should be given its own lock instead, improving concurrency in the
+  same move. Such section does not call services from the Xenomai
+  core, and does NOT specifically require the nucleus lock to be
+  held. In this case, a RTDM lock (rtdm_lock_t) should be used to
+  protect the section instead of RTDM_EXECUTE_ATOMICALLY().
+
+- protecting a section which calls into the Xenomai core, which
+  exhibits one or more of the following characteristics:
+
+    * Some callee within the section may require the nucleus lock to
+      be held on entry (e.g. Cobalt registry lookup). In what has to
+      be a Cobalt-specific case, the new cobalt_atomic_enter/leave()
+      call pair can replace RTDM_EXECUTE_ATOMICALLY(). However, this
+      construct remains by definition non-portable to Mercury.
+
+    * A set-condition-and-wakeup pattern has to be carried out
+      atomically. In this case, RTDM_EXECUTE_ATOMICALLY() can be
+      replaced by the wakeup side of a RTDM wait queue introduced in
+      Xenomai 3 (e.g. rtdm_waitqueue_signal/broadcast()).
+
+    * A test-condition-and-wait pattern has to be carried out
+      atomically. In this case, RTDM_EXECUTE_ATOMICALLY() can be
+      replaced by the wait side of a RTDM wait queue introduced in
+      Xenomai 3 (e.g. rtdm_wait_condition()).
+
+Please refer to kernel/drivers/ipc/iddp.c for an illustration of the
+RTDM wait queue usage.
+*******************************************************************
+
+- rtdm_irq_request/free() and rtdm_irq_enable/disable() call pairs
+  must be called from a Linux task context, which is a restriction
+  that did not exist previously with Xenomai 2.x.
+
+.Rationale
+*******************************************************************
+Recent evolutions of the Linux kernel with respect to IRQ management
+involve complex processing for basic operations
+(e.g. enabling/disabling the interrupt line) with some interrupt types
+like MSI. Such processing cannot be made dual-kernel safe at a
+reasonable cost, without encurring measurable latency or significant
+code updates in the kernel.
+
+Since allocating, releasing, enabling or disabling real-time
+interrupts is most commonly done from driver initialization/cleanup
+context already, the Cobalt core has simply inherited those
+requirements from the Linux kernel.
+*******************************************************************
+
+- The leading _user_info_ argument to rtdm_munmap() has been
+  removed.
+
+.Rationale
+*********************************************************************
+With the introduction of RTDM file descriptors (see below) replacing
+all _user_info_ context pointers, this argument has become irrelevant,
+since this operation is not related to any file descriptor, but rather
+to the current address space.
+*********************************************************************
+
+The new prototype for this routine is therefore
+
+---------------------------------------
+int rtdm_munmap(void *ptr, size_t len);
+---------------------------------------
+
+- Additional memory mapping calls
+
+The new following routines are available to RTDM drivers, for mapping
+memory over a user address space. They are intended to be called from
+a ->mmap() handler:
+
+* rtdm_mmap_kmem() for mapping logical kernel memory (i.e. having
+  a direct physical mapping).
+
+* rtdm_mmap_vmem() for mapping purely virtual memory (i.e. with no
+  direct physical mapping).
+
+* rtdm_mmap_iomem() for mapping I/O memory.
+
+------------------------------------------------------------
+static int foo_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	...
+	switch (memory_type) {
+	case MEM_PHYSICAL:
+		ret = rtdm_mmap_iomem(vma, addr);
+		break;
+	case MEM_LOGICAL:
+		ret = rtdm_mmap_kmem(vma, (void *)addr);
+		break;
+	case MEM_VIRTUAL:
+		ret = rtdm_mmap_vmem(vma, (void *)addr);
+		break;
+	default:
+		return -EINVAL;
+	}
+	...
+}
+------------------------------------------------------------
+
+- the rtdm_nrtsig API has changed, the rtdm_nrtsig_init() function no
+  longer returns errors, it has the void return type. The rtdm_nrtsig_t
+  type has changed from an integer to a structure. In consequence, the
+  nrtsig handler first argument is now a pointer to the rtdm_nrtsig_t
+  structure.
+
+.Rationale
+************************************************************************
+Recent versions of the I-pipe patch support an ipipe_post_work_root()
+service, which has the advantage over the VIRQ support, that it does not
+require allocating one different VIRQ for each handler. As a consequence
+drivers may use as many rtdm_nrtsig_t structures as they like, there is
+no chance of running out of VIRQs.
+************************************************************************
+
+  The new relevant prototypes are therefore:
+
+-------------------------------------------------------------------------
+typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t *nrt_sig, void *arg);
+
+void rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig,
+     rtdm_nrtsig_handler_t handler, void *arg);
+-------------------------------------------------------------------------
+
+- a new rtdm_schedule_nrt_work() was added to allow scheduling a Linux
+  work queue from primary mode.
+
+.Rationale
+************************************************************************
+Scheduling a Linux workqueue maybe a convenient way for adriver to recover
+for an error which requires synchronization with Linux. Typically, recovering
+from a PCI error may involve accessing the PCI config space, which requires
+access to a Linux spinlock so can not be done from primary mode.
+************************************************************************
+
+  The prototype of this new service is:
+
+------------------------------------------------------
+void rtdm_schedule_nrt_work(struct work_struct *work);
+------------------------------------------------------
+
+==== Adaptive syscalls ====
+
++ioctl()+, +read()+, +write()+, +recvmsg()+ and +sendmsg()+ have
+become conforming RTDM calls, which means that Xenomai threads running
+over the Cobalt core will be automatically switched to primary mode
+prior to running the driver handler for the corresponding request.
+
+.Rationale
+**********************************************************************
+Real-time handlers from RTDM drivers serve time-critical requests by
+definition, which makes them preferred targets of adaptive calls over
+non real-time handlers.
+**********************************************************************
+
+[NOTE]
+This behavior departs from Xenomai 2.x, which would run the call from
+the originating context instead (e.g. +ioctl_nrt()+ would be fired for
+a caller running in secondary mode, and conversely +ioctl_rt()+ would
+be called for a request issued from primary mode).
+
+[TIP]
+RTDM drivers implementing differentiated +ioctl()+ support for both
+domains should serve all real-time only requests from +ioctl_rt()+,
+returning +-ENOSYS+ for any unrecognized request, which will cause the
+adaptive switch to take place automatically to the +ioctl_nrt()+
+handler. The +ioctl_nrt()+ should then implement all requests which
+may be valid from the regular Linux domain exclusively.
+
+=== Application interface ===
+
+Unlike with Xenomai 2.x, named RTDM device nodes in Xenomai 3 are
+visible from the Linux device namespace. These nodes are automatically
+created by the _hotplug_ kernel facility. Application must open these
+device nodes for interacting with RTDM drivers, as they would do with
+any regular _chrdev_ driver.
+
+All RTDM device nodes are created under the +rtdm/+ sub-root from the
+standard +/dev+ hierarchy, to eliminate potential name clashes with
+standard drivers.
+
+[IMPORTANT]
+Enabling DEVTMPFS in the target kernel is recommended so that the
+standard +/dev+ tree immediately reflects updates to the RTDM device
+namespace. You may want to enable CONFIG_DEVTMPFS and
+CONFIG_DEVTMPFS_MOUNT.
+
+.Opening a named device instance with Xenomai 2.x
+--------------------------------------------------
+fd = open("foo", O_RDWR);
+   or
+fd = open("/dev/foo", O_RDWR);
+--------------------------------------------------
+
+.Opening a named device instance with Xenomai 3
+-----------------------------------------------
+fd = open("/dev/rtdm/foo", O_RDWR);
+-----------------------------------------------
+
+[TIP]
+Applications can enable the CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE option
+in the kernel configuration to enable legacy pathnames for named RTDM
+devices. This compatibility option allows applications to open named
+RTDM devices using the legacy naming scheme used by Xenomai 2.x.
+
+==== Retrieving device information ====
+
+Device information can be retrieved via _sysfs_, instead of _procfs_
+as with Xenomai 2.x. As a result of this change, +/proc/xenomai/rtdm+
+disappeared entirely. Instead, the RTDM device information can now be
+reached as follows:
+
+- /sys/devices/virtual/rtdm contains entries for all RTDM devices
+present in the system (including named and protocol device types).
+This directory is aliased to /sys/class/rtdm.
+
+- each /sys/devices/virtual/rtdm/<device-name> directory gives access
+  to device information, available from virtual files:
+
+  * reading +profile+ returns the class and subclass ids.
+
+  * reading +refcount+ returns the current count of outstanding
+    connections to the device driver.
+
+  * reading +flags+ returns the device flags as defined by the device
+    driver.
+
+  * reading +type+ returns the device type (_named_ or _protocol_).
+
+=== Inter-Driver API ===
+
+The legacy (and redundant) rt_dev_*() API for calling the I/O services
+exposed by a RTDM driver from another driver was dropped, in favour of
+a direct use of the existing rtdm_*() API in kernel space. For
+instance, calls to +rt_dev_open()+ should be converted to
++rtdm_open()+, +rt_dev_socket()+ to +rtdm_socket()+ and so on.
+
+.Rationale
+******************************************************************
+Having two APIs for exactly the same purpose is uselessly confusing,
+particularly for kernel programming. Since the user-space version of
+the rt_dev_*() API was also dropped in favor of the regular POSIX I/O
+calls exposed by +libcobalt+, the choice was made to retain the most
+straightforward naming for the RTDM-to-RTDM API, keeping the +rtdm_+
+prefix.
+******************************************************************
+
+== Analogy interface changes ==
+
+=== Files renamed ===
+
+- DAQ drivers in kernel space now pull all Analogy core header files
+  from <rtdm/analogy/*.h>. In addition:
+
+[normal]
+analogy/analogy_driver.h -> rtdm/analogy/driver.h
+[normal]
+analogy/driver.h -> rtdm/analogy/driver.h
+[normal]
+analogy/analogy.h -> rtdm/analogy.h
+
+- DAQ drivers in kernel space should include <rtdm/analogy/device.h>
+  instead of <rtdm/analogy/driver.h>.
+
+- Applications need to include only a single file for pulling all
+  routine declarations and constant definitions required for invoking
+  the Analogy services from user-space, namely <rtdm/analogy.h>, i.e.
+
+[normal]
+analogy/types.h
+analogy/command.h
+analogy/device.h
+analogy/subdevice.h
+analogy/instruction.h
+analogy/ioctl.h -> all files merged into rtdm/analogy.h
+
+As a consequence of these changes, the former include/analogy/ file
+tree has been entirely removed.
+
+== RTnet changes ==
+
+RTnet is integrated into Xenomai 3, but some of its behaviour and
+interfaces were changed in an attempt to simplify it.
+
+- a network driver kernel module can not be unloaded as long as the
+  network interface it implements is up
+
+- the RTnet drivers API changed, to make it simpler, and closer to
+  the mainline API
+
+  * module refcounting is now automatically done by the stack, no
+    call is necessary to RTNET_SET_MODULE_OWNER, RTNET_MOD_INC_USE_COUNT,
+    RTNET_MOD_DEC_USE_COUNT
+
+  * per-driver skb receive pools were removed from drivers, they are
+    now handled by the RTnet stack. In consequence, drivers now need
+    to pass an additional argument to the rt_alloc_etherdev() service:
+    the number of buffers in the pool. The new prototype is:
+
+------------------------------------------------------------------------------------
+struct rtnet_device *rt_alloc_etherdev(unsigned sizeof_priv, unsigned rx_pool_size);
+------------------------------------------------------------------------------------
+
+  * in consequence, any explicit call to rtskb_pool_init() can be
+    removed. In addition, drivers should now use the
+    rtnetdev_alloc_rtskb() to allocate buffers from the network device
+    receive pool; much like its counterpart netdev_alloc_skb(), it takes
+    as first argument a pointer to a network device structure. Its
+    prototype is:
+
+--------------------------------------------------------------------------------
+struct rtskb *rtnetdev_alloc_rtskb(struct rtnet_device *dev, unsigned int size);
+--------------------------------------------------------------------------------
+
+  * for driver which wish to explicitly handle skb pools, the
+    signature of rtskb_pool_init has changed: it takes an additional
+    pointer to a structure containing callbacks called when the first
+    buffer is allocated and when the last buffer is returned, so that
+    the rtskb_pool() can implicitly lock a parent structure. The new
+    prototype is:
+
+-----------------------------------------------------------------------
+struct rtskb_pool_lock_ops {
+    int (*trylock)(void *cookie);
+    void (*unlock)(void *cookie);
+};
+
+unsigned int rtskb_pool_init(struct rtskb_pool *pool,
+			  unsigned int initial_size,
+			  const struct rtskb_pool_lock_ops *lock_ops,
+			  void *lock_cookie);
+-----------------------------------------------------------------------
+
+  * for the typical case where an skb pool locks the containing
+    module, the function rtskb_module_pool_init() was added which has
+    the same interface as the old rtskb_poll_init() function. Its
+    prototype is:
+
+-----------------------------------------------------------------------
+unsigned int rtskb_module_pool_init(struct rtskb_pool *pool,
+					unsigned int initial_size);
+-----------------------------------------------------------------------
+
+
+  * in order to ease the port of recent drivers, the following
+    services were added, which work much like their Linux counterpart:
+    rtnetdev_priv(), rtdev_emerg(), rtdev_alert(), rtdev_crit(),
+    rtdev_err(), rtdev_warn(), rtdev_notice(), rtdev_info(),
+    rtdev_dbg(), rtdev_vdbg(), RTDEV_TX_OK, RTDEV_TX_BUSY,
+    rtskb_tx_timestamp(). Their declarations are equivalent to:
+
+-----------------------------------------------------------------------
+#define RTDEV_TX_OK	0
+#define RTDEV_TX_BUSY	1
+
+void *rtndev_priv(struct rtnet_device *dev);
+
+void rtdev_emerg(struct rntet_device *dev, const char *format, ...);
+void rtdev_alert(struct rntet_device *dev, const char *format, ...);
+void rtdev_crit(struct rntet_device *dev, const char *format, ...);
+void rtdev_err(struct rntet_device *dev, const char *format, ...);
+void rtdev_warn(struct rntet_device *dev, const char *format, ...);
+void rtdev_notice(struct rntet_device *dev, const char *format, ...);
+void rtdev_info(struct rntet_device *dev, const char *format, ...);
+void rtdev_dbg(struct rntet_device *dev, const char *format, ...);
+void rtdev_vdbg(struct rntet_device *dev, const char *format, ...);
+
+void rtskb_tx_timestamp(struct rtskb *skb);
+-----------------------------------------------------------------------
+
+
+== POSIX interface changes ==
+
+As mentioned earlier, the former *POSIX skin* is known as the *Cobalt
+API* in Xenomai 3.x, available as +libcobalt.{so,a}+. The Cobalt API
+also includes the code of the former +libxenomai+, which is no longer
+a standalone library.
+
++libcobalt+ exposes the set of POSIX and ISO/C standard features
+specifically implemented by Xenomai to honor real-time requirements
+using the Cobalt core.
+
+=== Interrupt management ===
+
+- The former +pthread_intr+ API once provided by Xenomai 2.x is gone.
+
+[[irqhandling]]
+  
+.Rationale
+**********************************************************************
+Handling real-time interrupt events from user-space can be done safely
+only if some top-half code exists for acknowledging the issuing device
+request from kernel space, particularly when the interrupt line is
+shared. This should be done via a RTDM driver, exposing a +read(2)+ or
++ioctl(2)+ interface, for waiting for interrupt events from
+applications running in user-space.
+**********************************************************************
+
+Failing this, the low-level interrupt service code in user-space
+would be sensitive to external thread management actions, such as
+being stopped because of GDB/ptrace(2) interaction. Unfortunately,
+preventing the device acknowledge code from running upon interrupt
+request may cause unfixable breakage to happen (e.g. IRQ storm
+typically).
+
+Since the application should provide proper top-half code in a
+dedicated RTDM driver for synchronizing on IRQ receipt, the RTDM API
+available in user-space is sufficient.
+
+Removing the +pthread_intr+ API should be considered as a strong hint
+for keeping driver code in kernel space, where it naturally belongs
+to.
+
+[TIP]
+[[userirqtip]]
+This said, in the seldom cases where running a device driver in
+user-space is the best option, one may rely on the RTDM-based UDD
+framework shipped with Xenomai 3. UDD stands for _User-space Device
+Driver_, enabling interrupt control and I/O memory access interfaces
+to applications in a safe manner. It is reminiscent of the UIO
+framework available with the Linux kernel, adapted to the dual
+kernel Cobalt environment.
+
+=== Scheduling ===
+
+- Cobalt implements the following POSIX.1-2001 services not present in
+  Xenomai 2.x: +sched_setscheduler(2)+, +sched_getscheduler(2)+.
+
+- The +SCHED_FIFO+, +SCHED_RR+, +SCHED_SPORADIC+ and +SCHED_TP+
+  classes now support up to 256 priority levels, instead of 99 as
+  previously with Xenomai 2.x. However, +sched_get_priority_max(2)+
+  still returns 99. Only the Cobalt extended call forms
+  (e.g. +pthread_attr_setschedparam_ex()+, +pthread_create_ex()+)
+  recognize these additional levels.
+
+- The new +sched_get_priority_min_ex()+ and
+  +sched_get_priority_max_ex()+ services should be used for querying
+  the static priority range of Cobalt policies.
+
+- `pthread_setschedparam(3)` may cause a secondary mode switch for the
+  caller, but will not cause any mode switch for the target thread
+  unlike with Xenomai 2.x.
+
+[normal]
+  This is a requirement for maintaining both the *glibc* and the
+  Xenomai scheduler in sync, with respect to thread priorities, since
+  the former maintains a process-local priority cache for the threads
+  it knows about. Therefore, an explicit call to the the regular
+  `pthread_setschedparam(3)` shall be issued upon each priority change
+  Xenomai-wise, for maintaining consistency.
+
+[normal]
+  In the Xenomai 2.x implementation, the thread being set a new
+  priority would receive a SIGSHADOW signal, triggering a call to
+  `pthread_setschedparam(3)` immediately.
+
+.Rationale
+**********************************************************************
+The target Xenomai thread may hold a mutex or any resource which may
+only be held in primary mode, in which case switching to secondary
+mode for applying the priority change at any random location over a
+signal handler may create a pathological issue. In addition,
+`pthread_setschedparam(3)` is not async-safe, which makes the former
+method fragile.
+**********************************************************************
+
+[normal]
+  Conversely, a thread which calls +pthread_setschedparam(3)+ does know
+  unambiguously whether the current calling context is safe for the
+  incurred migration.
+
+- A new SCHED_WEAK class is available to POSIX threads, which may be
+  optionally turned on using the +CONFIG_XENO_OPT_SCHED_WEAK+ kernel
+  configuration switch.
+
+[normal]
+  By this feature, Xenomai now accepts Linux real-time scheduling
+  policies (SCHED_FIFO, SCHED_RR) to be weakly scheduled by the Cobalt
+  core, within a low priority scheduling class (i.e. below the Xenomai
+  real-time classes, but still above the idle class).
+
+[normal]
+  Xenomai 2.x already had a limited form of such policy, based on
+  scheduling SCHED_OTHER threads at the special SCHED_FIFO,0 priority
+  level in the Xenomai core. SCHED_WEAK is a generalization of such
+  policy, which provides for 99 priority levels, to cope with the full
+  extent of the regular Linux SCHED_FIFO/RR priority range.
+
+[normal]
+  For instance, a (non real-time) Xenomai thread within the SCHED_WEAK
+  class at priority level 20 in the Cobalt core, may be scheduled with
+  policy SCHED_FIFO/RR at priority 20, by the Linux kernel. The code
+  fragment below would set the scheduling parameters accordingly,
+  assuming the Cobalt version of +pthread_setschedparam(3)+ is invoked:
+
+----------------------------------------------------------------------
+	struct sched_param param = {
+	       .sched_priority = -20,
+	};
+
+	pthread_setschedparam(tid, SCHED_FIFO, &param);
+----------------------------------------------------------------------
+
+[normal]
+  Switching a thread to the SCHED_WEAK class can be done by negating
+  the priority level in the scheduling parameters sent to the Cobalt
+  core. For instance, SCHED_FIFO, prio=-7 would be scheduled as
+  SCHED_WEAK, prio=7 by the Cobalt core.
+
+[normal]
+  SCHED_OTHER for a Xenomai-enabled thread is scheduled as
+  SCHED_WEAK,0 by the Cobalt core. When the SCHED_WEAK support is
+  disabled in the kernel configuration, only SCHED_OTHER is available
+  for weak scheduling of threads by the Cobalt core.
+
+- A new SCHED_QUOTA class is available to POSIX threads, which may be
+  optionally turned on using the +CONFIG_XENO_OPT_SCHED_QUOTA+ kernel
+  configuration switch.
+
+[normal]
+  This policy enforces a limitation on the CPU consumption of
+  threads over a globally defined period, known as the quota
+  interval. This is done by pooling threads with common requirements
+  in groups, and giving each group a share of the global period (see
+  CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).
+
+[normal]
+  When threads have entirely consumed the quota allotted to the group
+  they belong to, the latter is suspended as a whole, until the next
+  quota interval starts. At this point, a new runtime budget is given
+  to each group, in accordance with its share.
+
+- When called from primary mode, sched_yield(2) now delays the caller
+  for a short while *only in case* no context switch happened as a
+  result of the manual round-robin. The delay ends next time the
+  regular Linux kernel switches tasks, or a kernel (virtual) tick has
+  elapsed (TICK_NSEC), whichever comes first.
+
+[normal]
+  Typically, a Xenomai thread undergoing the SCHED_FIFO or SCHED_RR
+  policy with no contender at the same priority level would still be
+  delayed for a while. 
+
+.Rationale
+**********************************************************************
+In most case, it is unwanted that sched_yield(2) does not cause any
+context switch, since this service is commonly used for implementing a
+poor man's cooperative scheduling. A typical use case involves a
+Xenomai thread running in primary mode which needs to yield the CPU to
+another thread running in secondary mode. By waiting for a context
+switch to happen in the regular kernel, we guarantee that the manual
+round-robin takes place between both threads, despite the execution
+mode mismatch. By limiting the incurred delay, we prevent a regular
+high priority SCHED_FIFO thread stuck in a tight loop, from locking
+out the delayed Xenomai thread indefinitely.
+**********************************************************************
+
+=== Thread management ===
+
+- The minimum and default stack size is set to `max(64k,
+  PTHREAD_STACK_MIN)`.
+
+- pthread_set_name_np() has been renamed to pthread_setname_np() with
+  the same arguments, to conform with the GNU extension equivalent.
+
+- pthread_set_mode_np() has been renamed to pthread_setmode_np() for
+  naming consistency with pthread_setname_np(). In addition, the call
+  introduces the PTHREAD_DISABLE_LOCKBREAK mode flag, which disallows
+  breaking the scheduler lock.
+
+[normal]
+  When unset (default case), a thread which holds the scheduler lock
+  drops it temporarily while sleeping.  When set, any attempt to block
+  while holding the scheduler lock will cause a break condition to be
+  immediately raised, with the caller receiving EINTR.
+
+[WARNING]
+A Xenomai thread running with PTHREAD_DISABLE_LOCKBREAK and
+PTHREAD_LOCK_SCHED both set may enter a runaway loop when attempting
+to sleep on a resource or synchronization object (e.g. mutex or
+condition variable).
+
+=== Semaphores ===
+
+- With Cobalt, sem_wait(3), sem_trywait(3), sem_timedwait(3), and
+  sem_post(3) have gained fast acquisition/release operations not
+  requiring any system call, unless a contention exists on the
+  resource. As a consequence, those services may not systematically
+  switch callers executing in relaxed mode to real-time mode, unlike
+  with Xenomai 2.x.
+
+=== Process management ===
+
+- In a +fork(2)+ -> +exec(2)+ sequence, all Cobalt API objects created
+  by the child process before it calls +exec(2)+ are automatically
+  flushed by the Xenomai core.
+
+[[real-time-signals]]
+=== Real-time signals ===
+
+- Support for Xenomai real-time signals is available.
+
+[normal]
+Cobalt replacements for +sigwait(3)+, +sigwaitinfo(2)+,
++sigtimedwait(2)+, +sigqueue(3)+ and +kill(2)+ are
+available. +pthread_kill(3)+ was changed to send thread-directed
+Xenomai signals (instead of regular Linux signals).
+
+[normal]
+Cobalt-based signals are stricly real-time. Both the sender and
+receiver sides work exclusively from the primary domain. However, only
+synchronous handling is available, with a thread waiting explicitly
+for a set of signals, using one of the +sigwait+ calls. There is no
+support for asynchronous delivery of signals to handlers. For this
+reason, there is no provision in the Cobalt API for masking signals,
+as Cobalt signals are implicitly blocked for a thread until the latter
+invokes one of the +sigwait+ calls.
+
+[normal]
+Signals from SIGRTMIN..SIGRTMAX are queued.
+
+[normal]
+COBALT_DELAYMAX is defined as the maximum number of overruns which can
+be reported by the Cobalt core in the siginfo.si_overrun field, for
+any signal.
+
+- Cobalt's +kill(2)+ implementation supports group signaling.
+
+[normal]
+Cobalt's implementation of kill(2) behaves identically to the regular
+system call for non thread-directed signals (i.e. pid <= 0). In this
+case, the caller switches to secondary mode.
+
+[normal]
+Otherwise, Cobalt first attempts to deliver a thread-directed signal
+to the thread whose kernel TID matches the given process id. If this
+thread is not waiting for signals at the time of the call, kill(2) then
+attempts to deliver the signal to a thread from the same process,
+which currently waits for a signal.
+
+- +pthread_kill(3)+ is a conforming call.
+
+[normal]
+When Cobalt's replacement for +pthread_kill(3)+ is invoked, a
+Xenomai-enabled caller is automatically switched to primary mode on
+its way to sending the signal, under the control of the real-time
+co-kernel. Otherwise, the caller keeps running under the control of
+the regular Linux kernel.
+
+[normal]
+This behavior also applies to the new Cobalt-based replacement for the
++kill(2)+ system call.
+
+=== Timers ===
+
+- POSIX timers are no longer dropped when the creator thread
+  exits. However, they are dropped when the container process exits.
+
+- If the thread signaled by a POSIX timer exits, the timer is
+  automatically stopped at the first subsequent timeout which fails
+  sending the notification. The timer lingers until it is deleted by a
+  call to +timer_delete(2)+ or when the process exits, whichever comes
+  first.
+
+- timer_settime(2) may be called from a regular thread (i.e. which is
+  not Xenomai-enabled).
+
+- EPERM is not returned anymore by POSIX timer calls. EINVAL is
+  substituted in the corresponding situation.
+
+- Cobalt replacements for +timerfd_create(2)+, +timerfd_settime(2)+ and
++timerfd_gettime(2)+ have been introduced. The implementation delivers
+I/O notifications to RTDM file descriptors upon Cobalt-originated
+real-time signals.
+
+- `pthread_make_periodic_np()` and `pthread_wait_np()` have been
+removed from the API.
+
+.Rationale
+**********************************************************************
+With the introduction of services to support real-time signals, those
+two non-portable calls have become redundant. Instead, Cobalt-based
+applications should set up a periodic timer using the
+`timer_create(2)`+`timer_settime(2)` call pair, then wait for release
+points via `sigwaitinfo(2)`. Overruns can be detected by looking at the
+siginfo.si_overrun field.
+    
+Alternatively, applications may obtain a file descriptor referring to
+a Cobalt timer via the `timerfd_create(2)` call, and `read(2)` from it to wait
+for timeouts.
+    
+In addition, applications may include a timer in a synchronous
+multiplexing operation involving other event sources, by passing a
+file descriptor returned by the `timerfd_create(2)` service to a `select(2)`
+call.
+**********************************************************************
+
+[TIP]
+A limited emulation of the pthread_make_periodic_np() and
+pthread_wait_np() calls is available from the <<trank,Transition
+Kit>>.
+
+=== Clocks ===
+
+- The internal identifier of CLOCK_HOST_REALTIME has changed from 42
+  to 8.
+
+[CAUTION]
+This information should normally remain opaque to applications, as it
+is subject to change with ABI revisions.
+
+=== Message queues ===
+
+- +mq_open(3)+ default attributes align on the regular kernel values,
+  i.e. 10 msg x 8192 bytes (instead of 128 x 128).
+
+- +mq_send(3)+ now enforces a maximum priority value for messages
+  (32768).
+
+=== POSIX I/O services ===
+
+- A Cobalt replacement for mmap(2) has been introduced. The
+  implementation invokes the <<rtdm-mmap, +.mmap+ operation handler>>
+  from the appropriate RTDM driver the file descriptor is connected
+  to.
+
+- A Cobalt replacement for fcntl(2) has been introduced. The
+  implementation currently deals with the O_NONBLOCK flag exclusively.
+
+- Cobalt's select(2) service is not automatically restarted anymore
+  upon Linux signal receipt, conforming to the POSIX standard (see man
+  signal(7)). In such an event, -1 is returned and errno is set to
+  EINTR.
+
+- The former +include/rtdk.h+ header is gone in Xenomai
+3.x. Applications should include +include/stdio.h+ instead.
+Similarly, the real-time suitable STDIO routines are now part of
++libcobalt+.
+
+== Alchemy interface (formerly _native API_) ==
+
+=== General ===
+
+- The API calls supporting a wait operation may return the -EIDRM
+error code only when the target object was deleted while
+pending. Otherwise, passing a deleted object identifier to an API call
+will result in -EINVAL being returned.
+
+=== Interrupt management ===
+
+- The +RT_INTR+ API is gone. Please see the <<irqhandling,rationale>>
+  for not handling low-level interrupt service code from user-space.
+
+[TIP]
+It is still possible to have the application wait for interrupt
+receipts, as explained <<userirqtip,here>>.
+
+=== I/O regions ===
+
+- The RT_IOREGION API is gone. I/O memory resources should be
+  controlled from a RTDM driver instead.
+
+[TIP]
+<<userirqtip,UDD>> provides a simple way to implement mini-drivers
+exposing any kind of memory regions to applications in user-space, via
+Cobalt's mmap(2) call.
+
+=== Timing services ===
+
+- +rt_timer_tsc()+, +rt_timer_ns2tsc()+ and +rt_timer_tsc2ns()+ have
+  been removed from the API.
+
+.Rationale
+**********************************************************************
+Due to the accumulation of rounding errors, using raw timestamp values
+from the underlying clock source hardware for measuring long
+timespans may yield (increasingly) wrong results.
+    
+Either we guarantee stable computations with counts of nanoseconds
+from within the application, or with raw timestamps instead,
+regardless of the clock source frequency, but we can't provide such
+guarantee for both. From an API standpoint, the nanosecond unit is
+definitely the best option as the meaning won't vary between clock
+sources.
+    
+Avoiding the overhead of the tsc->ns conversion as a justification to
+use raw TSC counts does not fly anymore, as all architectures
+implement fast arithmetics for this operation over Cobalt, and
+Mercury's (virtual) timestamp counter is actually mapped over
+CLOCK_MONOTONIC.
+**********************************************************************
+
+[TIP]
+Alchemy users should measure timespans (or get timestamps) as counts
+of nanoseconds as returned by rt_timer_read() instead.
+
+- +rt_timer_inquire()+ has a void return type, instead of always
+  returning zero as previously. As a consequence of the previously
+  documented change regarding TSC values, the current TSC count is no
+  more returned into the RT_TIMER_INFO structure.
+
+- +rt_timer_set_mode()+ is obsolete. The clock resolution has become a
+per-process setting, which should be set using the
++--alchemy-clock-resolution+ switch on the command line.
+
+[TIP]
+Tick-based timing can be obtained by setting the resolution of the
+Alchemy clock for the application, here to one millisecond (the
+argument expresses a count nanoseconds per tick).  As a result of
+this, all timeout and date values passed to Alchemy API calls will be
+interpreted as counts of milliseconds.
+----------------------------------------------------------
+# xenomai-application --alchemy-clock-resolution=1000000
+----------------------------------------------------------
+
+[normal]
+By default, the Alchemy API sets the clock resolution for the new
+process to one nanosecond (i.e. tickless, highest resolution).
+
+- TM_INFINITE also means infinite wait with all +rt_*_until()+ call
+  forms.
+
+- +rt_task_set_periodic()+ does not suspend the target task anymore.
+If a start date is specified, then +rt_task_wait_period()+ will apply
+the initial delay.
+
+.Rationale
+**********************************************************************
+A periodic Alchemy task has to call +rt_task_wait_period()+ from
+within its work loop for sleeping until the next release point is
+reached. Since waiting for the initial and subsequent release points
+will most often happen at the same code location in the application,
+the semantics of rt_task_set_periodic() can be simplified so that only
+rt_task_wait_period() may block the caller.
+**********************************************************************
+
+[TIP]
+In the unusual case where you do need to have the current task wait
+for the initial release point outside of its periodic work loop, you
+can issue a call to +rt_task_wait_period()+ separately, exclusively
+for this purpose, i.e.
+---------------------------------------------------------------
+              /* wait for the initial release point. */
+              ret = rt_task_wait_period(&overruns);
+	      /* ...more preparation work... */
+	      for (;;) {
+	       	       /* wait for the next release point. */
+	               ret = rt_task_wait_period(&overruns);
+		       /* ...do periodic work... */
+	      }
+---------------------------------------------------------------
+However, this work around won't work if the caller is not the target
+task of rt_task_set_periodic(), which is fortunately unusual for most
+applications.
+
+[normal]
++rt_task_set_periodic()+ still switches to primary as previously over
+Cobalt. However, it does not return -EWOULDBLOCK anymore.
+
+- TM_ONESHOT was dropped, because the operation mode of the hardware
+  timer has no meaning for the application. The core Xenomai system
+  always operates the available timer chip in oneshot mode anyway.
+  A tickless clock has a period of one nanosecond.
+
+- Unlike with Xenomai 2.x, the target task to +rt_task_set_periodic()+
+  must be local to the current process.
+
+[TIP]
+A limited emulation of the deprecated rt_task_set_periodic() behavior
+is available from the <<trank,Transition Kit>>.
+
+=== Mutexes ===
+
+- For consistency with the standard glibc implementation, deleting a
+  RT_MUTEX object in locked state is no longer a valid operation.
+
+- +rt_mutex_inquire()+ does not return the count of waiters anymore.
+
+.Rationale
+**********************************************************************
+Obtaining the current count of waiters only makes sense for debugging
+purpose. Keeping it in the API would introduce a significant overhead
+to maintain internal consistency.
+**********************************************************************
+
+[normal]
+The +owner+ field of a RT_MUTEX_INFO structure now reports the owner's
+task handle, instead of its name. When the mutex is unlocked, a NULL
+handle is returned, which has the same meaning as a zero value in the
+former +locked+ field.
+
+=== Condition variables ===
+
+- For consistency with the standard glibc implementation, deleting a
+  RT_COND object currently pended by other tasks is no longer a valid
+  operation.
+
+- Like +rt_mutex_inquire()+, +rt_cond_inquire()+ does not return the
+count of waiting tasks anymore.
+
+=== Events ===
+
+- Event flags (RT_EVENT) are represented by a regular integer, instead
+  of a long integer as with Xenomai 2.x. This change impacts the
+  following calls:
+
+  * rt_event_create()
+  * rt_event_signal()
+  * rt_event_clear()
+  * rt_event_wait()
+  * rt_event_wait_until()
+
+.Rationale
+**********************************************************************
+Using long integers for representing event bit masks potentially
+creates a portability issue for applications between 32 and 64bit CPU
+architectures. This issue is solved by using 32bit integers on 32/64
+bit machines, which is normally more than enough for encoding the set
+of events received by a single RT_EVENT object.
+**********************************************************************
+
+[TIP]
+These changes are covered by the <<trank,Transition Kit>>.
+
+=== Task management ===
+
+- +rt_task_notify()+ and +rt_task_catch()+ have been removed. They are
+  meaningless in a userland-only context.
+
+- As a consequence of the previous change, the T_NOSIG flag to
+  +rt_task_set_mode()+ was dropped in the same move.
+
+- T_SUSP cannot be passed to rt_task_create() or rt_task_spawn()
+  anymore.
+
+- T_FPU is obsolete. FPU management is automatically enabled for
+  Alchemy tasks if the hardware supports it, disabled otherwise.
+
+.Rationale
+**********************************************************************
+This behavior can be achieved by not calling +rt_task_start()+
+immediately after +rt_task_create()+, or by calling
++rt_task_suspend()+ before +rt_task_start()+.
+**********************************************************************
+
+- +rt_task_shadow()+ now accepts T_LOCK, T_WARNSW.
+
+- +rt_task_create()+ now accepts T_LOCK, T_WARNSW and T_JOINABLE.
+
+- The RT_TASK_INFO structure returned by +rt_task_inquire()+ has
+  changed:
+   * fields +relpoint+ and +cprio+ have been removed, since the
+     corresponding information is too short-lived to be valuable to
+     the caller. The task's base priority is still available from
+     the +prio+ field.
+   * new field +pid+ represents the Linux kernel task identifier for
+     the Alchemy task, as obtained from syscall(__NR_gettid).
+   * other fields which represent runtime statistics are now avail
+     from a core-specific +stat+ field sub-structure.
+
+- New +rt_task_send_until()+, +rt_task_receive_until()+ calls are
+  available, as variants of +rt_task_send()+ and +rt_task_receive()+
+  respectively, with absolute timeout specification.
+
+- rt_task_receive() does not inherit the priority of the sender,
+although the requests will be queued by sender priority.
+
+[normal]
+Instead, the application decides about the server priority instead of
+the real-time core applying implicit dynamic boosts.
+
+- +rt_task_slice()+ now returns -EINVAL if the caller currently holds
+  the scheduler lock, or attempts to change the round-robin settings
+  of a thread which does not belong to the current process.
+
+- T_CPU disappears from the +rt_task_create()+ mode flags. The new
+  +rt_task_set_affinity()+ service is available for setting the CPU
+  affinity of a task.
+
+[TIP]
+An emulation of rt_task_create() and rt_task_spawn() accepting the
+deprecated flags is available from the <<trank,Transition Kit>>.
+
+- +rt_task_sleep_until()+ does not return -ETIMEDOUT anymore. Waiting
+  for a date in the past blocks the caller indefinitely.
+
+=== Message queues ===
+
+- As Alchemy-based applications run in user-space, the following
+  +rt_queue_create()+ mode bits from the former _native_ API are
+  obsolete:
+
+   * Q_SHARED
+   * Q_DMA
+
+[TIP]
+Placeholders for those deprecated definitions are available from the
+<<trank,Transition Kit>>.
+
+=== Heaps ===
+
+- As Alchemy-based applications run in user-space, the following
+  +rt_heap_create()+ mode bits from the former _native_ API are
+  obsolete:
+
+   * H_MAPPABLE
+   * H_SHARED
+   * H_NONCACHED
+   * H_DMA
+
+[TIP]
+If you need to allocate a chunk of DMA-suitable memory, then you
+should create a RTDM driver for this purpose.
+
+- +rt_heap_alloc_until()+ is a new call for waiting for a memory
+  chunk, specifying an absolute timeout date.
+
+- with the removal of H_DMA, returning a physical address (phys_addr)
+  in +rt_heap_inquire()+ does not apply anymore.
+
+[TIP]
+Placeholders for those deprecated definitions are available from the
+<<trank,Transition Kit>>.
+
+=== Alarms ===
+
+- +rt_alarm_wait()+ has been removed.
+
+.Rationale
+**************************************************************
+An alarm handler can be passed to +rt_alarm_create()+ instead.
+**************************************************************
+
+- The RT_ALARM_INFO structure returned by +rt_alarm_inquire()+ has
+  changed:
+   * field +expiration+ has been removed, since the corresponding
+     information is too short-lived to be valuable to the caller.
+
+   * field +active+ has been added, to reflect the current state of
+     the alarm object. If non-zero, the alarm is enabled
+     (i.e. started).
+
+[TIP]
+An emulation of rt_alarm_wait() is available from the
+<<trank,Transition Kit>>.
+
+=== Message pipes ===
+
+- +rt_pipe_create()+ now returns the minor number assigned to the
+  connection, matching the /dev/rtp<minor> device usable by the
+  regular threads. As a consequence of this, any return value higher
+  or equal to zero denotes a successful operation, a negative return
+  denotes an error.
+
+- Writing to a message pipe is allowed from all contexts, including
+  from alarm handlers.
+
+- +rt_pipe_read_until()+ is a new call for waiting for input from a
+  pipe, specifying an absolute timeout date.
+
+== pSOS interface changes ==
+
+=== Memory regions ===
+
+- +rn_create()+ may return ERR_NOSEG if the region control block
+  cannot be allocated internally.
+
+=== Scheduling ===
+
+- The emulator converts priority levels between the core POSIX and
+  pSOS scales using normalization (pSOS -> POSIX) and denormalization
+  (POSIX -> pSOS) handlers.
+
+[normal]
+Applications may override the default priority
+normalization/denormalization handlers, by implementing the following
+routines.
+
+------------------------------------------------------------
+int psos_task_normalize_priority(unsigned long psos_prio);
+
+unsigned long psos_task_denormalize_priority(int core_prio);
+------------------------------------------------------------
+
+[normal]
+Over Cobalt, the POSIX scale is extended to 257 levels, which allows
+to map pSOS over the POSIX scale 1:1, leaving
+normalization/denormalization handlers as no-ops by default.
+
+== VxWorks interface changes ==
+
+=== Task management ===
+
+- +WIND_*+ status bits are synced to the user-visible TCB only as a
+result of a call to +taskTcb()+ or +taskGetInfo()+.
+
+[normal]
+As a consequence of this change, any reference to a user-visible TCB
+should be refreshed by calling +taskTcb()+ anew, each time reading the
++status+ field is required.
+
+=== Scheduling ===
+
+- The emulator converts priority levels between the core POSIX and
+  VxWorks scales using normalization (VxWorks -> POSIX) and
+  denormalization (POSIX -> VxWorks) handlers.
+
+[normal]
+Applications may override the default priority
+normalization/denormalization handlers, by implementing the following
+routines.
+
+------------------------------------------------------------
+int wind_task_normalize_priority(int wind_prio);
+
+int wind_task_denormalize_priority(int core_prio);
+------------------------------------------------------------
+
+[[trank]]
+== Using the Transition Kit ==
+
+Xenomai 2 applications in user-space may use a library and a set of
+compatibility headers, aimed at easing the process of transitioning to
+Xenomai 3.
+
+Enabling this compatibility layer is done via passing specific
+compilation and linker flags when building the
+application. +xeno-config+ can retrieve those flags using the
++--cflags+ and +--ldflags+ switches as usual, with the addition of the
++--compat+ flag. Alternatively, passing the +--[skin=]native+ switch
+as to +xeno-config+ implicitly turns on the compatibility mode for the
+Alchemy API.
+
+[NOTE]
+The transition kit does not currently cover _all_ the changes
+introduced in Xenomai 3 yet, but a significant subset of them
+nevertheless.
+
+.A typical Makefile fragment implicitly turning on backward compatibility
+------------------------------------------------------------
+PREFIX := /usr/xenomai
+CONFIG_CMD := $(PREFIX)/bin/xeno-config
+CFLAGS= $(shell $(CONFIG_CMD) --skin=native --cflags) -g
+LDFLAGS= $(shell $(CONFIG_CMD) --skin=native --ldflags)
+CC = $(shell $(CONFIG_CMD) --cc)
+------------------------------------------------------------
+
+.Another example for using with the POSIX API
+------------------------------------------------------------
+PREFIX := /usr/xenomai
+CONFIG_CMD := $(PREFIX)/bin/xeno-config
+CFLAGS= $(shell $(CONFIG_CMD) --skin=posix --cflags --compat) -g
+LDFLAGS= $(shell $(CONFIG_CMD) --skin=posix --ldflags --compat)
+CC = $(shell $(CONFIG_CMD) --cc)
+------------------------------------------------------------
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/Makefile.am b/kernel/xenomai-v3.2.4/doc/asciidoc/Makefile.am
new file mode 100644
index 0000000..2bacb7c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/Makefile.am
@@ -0,0 +1,136 @@
+HTML_DOCS = 					\
+	html/MIGRATION				\
+	html/README.APPLICATIONS		\
+	html/README.INSTALL			\
+	html/TROUBLESHOOTING.COBALT		\
+	html/TROUBLESHOOTING.MERCURY		\
+	html/asciidoc-icons			\
+	html/asciidoc-icons/callouts		\
+	html/man1/autotune			\
+	html/man1/chkkconf			\
+	html/man1/clocktest			\
+	html/man1/corectl			\
+	html/man1/dohell			\
+	html/man1/latency			\
+	html/man1/rtcanconfig			\
+	html/man1/rtcanrecv			\
+	html/man1/rtcansend			\
+	html/man1/slackspot			\
+	html/man1/switchtest			\
+	html/man1/xeno				\
+	html/man1/xeno-config			\
+	html/man1/xeno-test
+
+PDF_DOCS = 				\
+	MIGRATION.pdf			\
+	README.APPLICATIONS.pdf 	\
+	README.INSTALL.pdf 		\
+	TROUBLESHOOTING.COBALT.pdf	\
+	TROUBLESHOOTING.MERCURY.pdf
+
+TXT_DOCS =				\
+	MIGRATION.txt			\
+	README.APPLICATIONS.txt 	\
+	README.INSTALL.txt		\
+	TROUBLESHOOTING.COBALT.txt	\
+	TROUBLESHOOTING.MERCURY.txt
+
+MAN1_DOCS = 			\
+	man1/autotune.1		\
+	man1/chkkconf.1 	\
+	man1/clocktest.1 	\
+	man1/corectl.1	 	\
+	man1/cyclictest.1 	\
+	man1/dohell.1		\
+	man1/latency.1 		\
+	man1/rtcanconfig.1 	\
+	man1/rtcanrecv.1 	\
+	man1/rtcansend.1 	\
+	man1/slackspot.1	\
+	man1/switchtest.1 	\
+	man1/xeno-config.1 	\
+	man1/xeno-test.1 	\
+	man1/xeno.1
+
+EXTRA_DIST :=				\
+	MIGRATION.adoc 			\
+	README.APPLICATIONS.adoc	\
+	README.INSTALL.adoc 		\
+	TROUBLESHOOTING.COBALT.adoc	\
+	TROUBLESHOOTING.MERCURY.adoc 	\
+	plaintext.conf 			\
+	plaintext.xsl			\
+	plaintext_postproc.awk		\
+	$(MAN1_DOCS:%.1=%.adoc)
+
+if XENO_BUILD_DOC
+
+HTML_DOCSDIR = ./
+PDF_DOCSDIR = ./
+MAN_DOCSDIR = ./
+
+ASCIIDOC_HTML_OPTS=-a icons -a iconsdir=../asciidoc-icons \
+	-a toc -a toclevels=3 -a max-width=55em -a xenover=$(PACKAGE_VERSION)
+
+ASCIIDOC_PDF_OPTS=-a icons -a toc -a toclevels=3 -a xenover=$(PACKAGE_VERSION)
+
+ASCIIDOC_MAN_OPTS=-a xenover=$(PACKAGE_VERSION)
+
+ASCIIDOC_TXT_OPTS=-a xenover=$(PACKAGE_VERSION) -a encoding=ascii
+
+tmpdir=adoc_plaintext
+
+all-local: $(HTML_DOCS) $(PDF_DOCS) $(TXT_DOCS) $(MAN1_DOCS)
+
+html/%: %.adoc Makefile
+	@$(mkdir_p) $@
+	$(ASCIIDOC) -n -b xhtml11 $(ASCIIDOC_HTML_OPTS) -o $@/index.html $<
+
+%.1: %.adoc Makefile
+	@$(mkdir_p) man1
+	$(A2X) -f manpage -D man1 $(ASCIIDOC_MAN_OPTS) $<
+
+%.pdf: %.adoc Makefile
+	$(A2X) -f pdf -D . $(ASCIIDOC_PDF_OPTS) $<
+
+$(tmpdir)/%.txt: %.adoc Makefile plaintext.conf plaintext.xsl
+	@$(mkdir_p) $(tmpdir)
+	$(ASCIIDOC) --backend docbook -f $(srcdir)/plaintext.conf \
+		--doctype article $(ASCIIDOC_TXT_OPTS) \
+		--out-file $(tmpdir)/$*.xml $<
+	xsltproc --stringparam toc.section.depth 3 --nonet \
+		--output $(tmpdir)/$*.html $(srcdir)/plaintext.xsl \
+		$(tmpdir)/$*.xml
+	w3m -cols 80 -dump -T text/html -no-graph $(tmpdir)/$*.html > $@
+
+%.txt: $(tmpdir)/%.txt Makefile plaintext_postproc.awk
+	awk -f $(srcdir)/plaintext_postproc.awk $(tmpdir)/$*.txt > $@
+
+html/asciidoc-icons:
+	$(RM) -R asciidoc-icons
+	@if test -d /usr/share/doc/asciidoc/images/; then \
+		cp -a /usr/share/doc/asciidoc/images/icons/ html/asciidoc-icons; \
+	elif test -d /usr/share/asciidoc/images/icons/; then \
+		cp -a /usr/share/asciidoc/images/icons/ html/asciidoc-icons; \
+	else \
+		cp -a /etc/asciidoc/images/icons/ html/asciidoc-icons; \
+	fi
+
+html/asciidoc-icons/callouts: html/asciidoc-icons
+
+.PHONY: html/asciidoc-icons
+
+include $(top_srcdir)/doc/install.rules
+
+install-data-local: install-docs-local
+uninstall-local: uninstall-docs
+
+else
+install-data-local:
+uninstall-local:
+endif
+
+distclean-local: clean-local
+
+clean-local:
+	$(RM) -R $(HTML_DOCS) $(PDF_DOCS) $(TXT_DOCS) $(tmpdir)
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/README.APPLICATIONS.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/README.APPLICATIONS.adoc
new file mode 100644
index 0000000..4447bc0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/README.APPLICATIONS.adoc
@@ -0,0 +1,78 @@
+Running applications with Xenomai 3.x
+=====================================
+
+Running a Xenomai 3 application
+-------------------------------
+
+For _Cobalt_, you will need the real-time core built into the target
+Linux kernel as described in link:installing-xenomai-3-x[this
+document].
+
+For _Mercury_, you need no Xenomai-specific kernel support so far,
+beyond what your host Linux kernel already provides. Your kernel
+should at least provide high resolution timer support
+(+CONFIG_HIGH_RES_TIMERS+), and likely complete preemption
+(_PREEMPT_RT_) if your application requires short and bounded
+latencies.
+
+Any Xenomai-based application recognizes a set of standard options
+that may be passed on the command line, described in
+link:application-setup-and-init#Standard_Xenomai_command_line_options[this document].
+
+In addition, the *Alchemy*, *pSOS (TM)* and *VxWorks (TM)* APIs running
+over the Xenomai core can define the clock resolution to be used,
+given as a count of nano-seconds, i.e. HZ=(1000000000 / ns), by the
++--{alchemy/psos/vxworks}-clock-resolution=<ns>+ option.
+
+If your application combines multiple APIs, you may pass several
+clock-resolution switches to set them all.
+
+The default value depends on the API being considered. For instance,
+the VxWorks (TM) and pSOS (TM) emulators default to millisecond clock
+rates. The Alchemy API is tickless by default,
+i.e. +--alchemy-clock-resolution=1+.
+
+[CAUTION]
+Specifying a resolution greater than 1 nanosecond requires the low
+resolution clock support to be available from the Xenomai libraries
+(see the +--enable-lores-clock+
+link:installing-xenomai-3-x#Generic_configuration_options_both_cores[configuration
+switch]).
+
+Valgrind support
+----------------
+
+Running Xenomai applications over _Valgrind_ is currently available to
+the _Mercury_ core only.
+
+When the Valgrind API is available to the application process, the
+configuration symbol CONFIG_XENO_VALGRIND_API is defined at build
+time, and may be tested for existence by the application code. See the
+tool documentation at
+http://valgrind.org/docs/manual/manual-core-adv.html#manual-core-adv.clientreq/[this address].
+
+The Xenomai autoconf script will detect the Valgrind core header on
+the build system automatically, and define this symbol accordingly
+(i.e. /usr/include/valgrind/valgrind.h).
+
+[NOTE]
+You may need to install the Valgrind development package on your build
+system to provide for the core header files. For instance, such
+package is called _valgrind-devel_ on Fedora.
+
+Available real-time APIs
+------------------------
+
+[horizontal]
+*Alchemy*::
+		This is a re-implementation from scratch of Xenomai's
+		2.x _native_ API, fully rebased on the new RTOS
+		abstraction interface.
+
+*pSOS*::
+		http://www.windriver.com[pSOS (TM)] is a registered
+		trademark of Wind River Systems, Inc.
+
+*VxWorks*::
+		http://www.windriver.com[VxWorks (TM)] is a registered
+		trademark of Wind River Systems, Inc.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/README.INSTALL.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/README.INSTALL.adoc
new file mode 100644
index 0000000..da96686
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/README.INSTALL.adoc
@@ -0,0 +1,879 @@
+Installing Xenomai 3.x
+======================
+
+Introduction
+------------
+
+Xenomai 3 is the new architecture of the Xenomai real-time framework,
+which can run seamlessly side-by-side Linux as a co-kernel system, or
+natively over mainline Linux kernels.  In the latter case, the
+mainline kernel can be supplemented by the
+https://www.kernel.org/pub/linux/kernel/projects/rt/[PREEMPT-RT patch]
+to meet stricter response time requirements than standard kernel
+preemption would bring.
+
+One of the two available real-time cores is selected at build
+time. The dual kernel core is codenamed _Cobalt_, the native Linux
+implementation is called _Mercury_.
+
+[NOTE]
+If you are looking for detailed information about installing a legacy
+Xenomai 2.x release, please refer to link:installing-xenomai-2.x[this
+document] instead. Please note that Xenomai 2.x is discontinued and
+not maintained anymore.
+
+Installation steps
+------------------
+
+Xenomai follows a split source model, decoupling the kernel space
+support from the user-space libraries.
+
+To this end, kernel and user-space Xenomai components are respectively
+available under the `kernel/` and `lib/` sub-trees. Other top-level
+directories, such as `scripts/`, `testsuite/` and `utils/`, provide
+additional scripts and programs to be used on either the build host,
+or the runtime target.
+
+The `kernel/` sub-tree which implements the in-kernel support code is
+seen as a built-in extension of the Linux kernel.  Therefore, the
+standard Linux kernel configuration process should be used to define
+the various settings for the Xenomai kernel components. All of the
+kernel code Xenomai currently introduces implements the _Cobalt_ core
+(i.e. dual kernel configuration). As of today, the _Mercury_ core
+needs no Xenomai-specific code in kernel space.
+
+The `lib/` sub-tree contains the various user-space libraries exported
+by the Xenomai framework to the applications. This tree is built
+separately from the kernel support. Libraries are built in order to
+support the selected core, either _Cobalt_ or _Mercury_.
+
+[[cobalt-core-install]]
+Installing the _Cobalt_ core
+----------------------------
+Preparing the _Cobalt_ kernel
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+_Xenomai/cobalt_ provides a real-time extension kernel seamlessly
+integrated to Linux, therefore the first step is to build it as part
+of the target kernel. To this end, `scripts/prepare-kernel.sh` is a
+shell script which sets up the target kernel properly. The syntax is
+as follows:
+
+------------------------------------------------------------------------------
+$ scripts/prepare-kernel.sh [--linux=<linux-srctree>]
+[--ipipe=<ipipe-patch>] [--arch=<target-arch>]
+------------------------------------------------------------------------------
+
+`--linux`:: specifies the path of the target kernel source tree. Such
+    kernel tree may be already configured or not, indifferently. This
+    path defaults to $PWD.
+
+`--ipipe`:: specifies the path of the interrupt pipeline (aka I-pipe)
+    patch to apply against the kernel tree. Suitable patches are
+    available from the project's link:/downloads/ipipe/[download
+    area].  This parameter can be omitted if the I-pipe has already
+    been patched in, or the script shall suggest an appropriate
+    one. The script will detect whether the interrupt pipeline code is
+    already present into the kernel tree, and skip this operation if
+    so.
+
+`--arch`:: tells the script about the target architecture. If
+    unspecified, the build host architecture suggested as a reasonable
+    default.
+
+For instance, the following command would prepare the Linux tree
+located at `/home/me/linux-3.10-ipipe` in order to patch the Xenomai
+support in:
+
+------------------------------------------------------------------------------
+$ cd xenomai-3
+$ scripts/prepare-kernel.sh --linux=/home/me/linux-3.10
+------------------------------------------------------------------------------
+
+Note: The script will infer the location of the Xenomai kernel code
+from its own location within the Xenomai source tree. For instance, if
+`/home/me/xenomai-3/scripts/prepare-kernel.sh` is executing, then
+the Xenomai kernel code available from
+`/home/me/xenomai-3/kernel/cobalt` will be patched in the target
+Linux kernel.
+
+
+Configuring and compiling the _Cobalt_ kernel
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Once prepared, the target kernel can be configured as usual. All
+Xenomai configuration options are available from the "Xenomai"
+toplevel Kconfig menu.
+
+There are several important kernel configuration options, documented
+in the link:troubleshooting-a-dual-kernel-configuration#kconf[TROUBLESHOOTING]
+guide.
+
+Once configured, the kernel can be compiled as usual.
+
+If you want several different configs/builds at hand, you may reuse
+the same source by adding `O=../build-<target>` to each make
+invocation.
+
+In order to cross-compile the Linux kernel, pass an ARCH and
+CROSS_COMPILE variable on make command line. See sections
+<<cobalt-core-arm,"Building a _Cobalt/arm_ kernel">>,
+<<cobalt-core-powerpc,"Building a _Cobalt/powerpc_ kernel">>,
+<<cobalt-core-x86,"Building a _Cobalt/x86_ kernel">>,
+for examples.
+
+[[cobalt-kernel-parameters]]
+_Cobalt_ kernel parameters
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The Cobalt kernel accepts the following set of parameters, which
+should be passed on the kernel command line by the boot loader.
+
+[options="header",grid="cols",frame="topbot",cols="2,3,1"]
+|============================================================================
+^|NAME              ^|DESCRIPTION                    ^|DEFAULT
+
+|xenomai.allowed_group=<gid> | Enable non-root access to Xenomai
+services from user-space.  <gid> is the ID of the Linux user group
+whose members should be allowed such access by the Cobalt core. | None
+
+|xenomai.sysheap_size=<kbytes> | Set the size of the memory heap used
+internally by the Cobalt core to allocate runtime objects.  This value
+is expressed in kilo-bytes. | 256
+		
+|xenomai.state=<state> | Set the initial state of the Cobalt core at
+boot up, which may be _enabled_, _stopped_ or _disabled_. See the
+documentation about the
+link:../documentation/xenomai-3/html/man1/corectl/index.html[corectl(1)]
+utility for a description of these states. | enabled
+
+|xenomai.smi=<state> | *x86-specific*: Set the state of the SMI
+workaround. The possible values are _disabled_, _detect_ and
+_enabled_. See the discussion about link:dealing-with-x86-SMI[SMIs]
+for a description of these states.| detect
+
+|xenomai.smi_mask=<source-mask> | *x86-specific*: Set of bits to mask
+in the SMI control register. | 1 (=global disable)
+
+|============================================================================
+
+[[cobalt-build-examples]]
+Examples of building the _Cobalt_ kernel
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The examples in following sections use the following conventions:
+
+`$linux_tree`:: path to the target kernel sources
+`$xenomai_root`:: path to the Xenomai sources
+
+
+[[cobalt-core-x86]]
+Building a _Cobalt/x86_ kernel (32/64bit)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Building _Xenomai/cobalt_ for x86 is almost the same for 32bit and 64bit
+platforms. You should note, however, that it is not possible to run
+Xenomai libraries compiled for x86_32 on a kernel compiled for x86_64,
+and conversely.
+
+Assuming that you want to build natively for a x86_64 system (x86_32
+cross-build options from x86_64 appear between brackets), you would
+typically run:
+
+------------------------------------------------------------------------------
+$ cd $linux_tree
+$ $xenomai_root/scripts/prepare-kernel.sh --arch=x86 \
+  --ipipe=ipipe-core-X.Y.Z-x86-NN.patch
+$ make [ARCH=i386] xconfig/gconfig/menuconfig
+------------------------------------------------------------------------------
+...configure the kernel (see also the recommended settings
+link:configuring-for-x86-based-dual-kernels[here]).
+
+Enable Xenomai options, then build with:
+------------------------------------------------------------------------------
+$ make [ARCH=i386] bzImage modules
+------------------------------------------------------------------------------
+
+Now, let's say that you really want to build Xenomai for a
+Pentium-based x86 32bit platform, using the native host toolchain; the
+typical steps would be as follows:
+
+------------------------------------------------------------------------------
+$ cd $linux_tree
+$ $xenomai_root/scripts/prepare-kernel.sh --arch=i386 \
+  --ipipe=ipipe-core-X.Y.Z-x86-NN.patch
+$ make xconfig/gconfig/menuconfig
+------------------------------------------------------------------------------
+...configure the kernel (see also the recommended settings
+link:configuring-for-x86-based-dual-kernels[here]).
+
+Enable Xenomai options, then build with:
+------------------------------------------------------------------------------
+$ make bzImage modules
+------------------------------------------------------------------------------
+
+Similarly, for a 64bit platform, you would use:
+
+------------------------------------------------------------------------------
+$ cd $linux_tree
+$ $xenomai_root/scripts/prepare-kernel.sh --arch=x86_64 \
+  --ipipe=ipipe-core-X.Y.Z-x86-NN.patch
+$ make xconfig/gconfig/menuconfig
+------------------------------------------------------------------------------
+...configure the kernel (see also the recommended settings
+link:configuring-for-x86-based-dual-kernels[here]).
+
+Enable Xenomai options, then build with:
+------------------------------------------------------------------------------
+$ make bzImage modules
+------------------------------------------------------------------------------
+
+The remaining examples illustrate how to cross-compile a
+_Cobalt_-enabled kernel for various architectures. Of course, you would
+have to install the proper cross-compilation toolchain for the target
+system first.
+
+[[cobalt-core-powerpc]]
+Building a _Cobalt/powerpc_ kernel
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+A typical cross-compilation setup, in order to build Xenomai for a
+ppc-6xx architecture running a 3.10.32 kernel. We use the DENX ELDK
+cross-compiler:
+
+------------------------------------------------------------------------------
+$ cd $linux_tree
+$ $xenomai_root/scripts/prepare-kernel.sh --arch=powerpc \
+  --ipipe=ipipe-core-3.10.32-powerpc-1.patch
+$ make ARCH=powerpc CROSS_COMPILE=ppc_6xx- xconfig/gconfig/menuconfig
+------------------------------------------------------------------------------
+...select the kernel and Xenomai options, save the configuration
+------------------------------------------------------------------------------
+$ make ARCH=powerpc CROSS_COMPILE=powerpc-linux- uImage modules
+------------------------------------------------------------------------------
+...manually install the kernel image and modules to the proper location
+
+[[cobalt-core-arm]]
+Building  _Cobalt/arm_ kernel
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+Using codesourcery toolchain named `arm-none-linux-gnueabi-gcc` and
+compiling for a CSB637 board (AT91RM9200 based), a typical compilation
+will look like:
+
+------------------------------------------------------------------------------
+$ cd $linux_tree
+$ $xenomai_root/scripts/prepare-kernel.sh --arch=arm \
+  --ipipe=ipipe-core-X.Y.Z-x86-NN.patch
+$ mkdir -p $build_root/linux
+$ make ARCH=arm CROSS_COMPILE=arm-none-linux-gnueabi- O=$build_root/linux \
+  csb637_defconfig
+$ make ARCH=arm CROSS_COMPILE=arm-none-linux-gnueabi- O=$build_root/linux \
+  bzImage modules
+------------------------------------------------------------------------------
+...manually install the kernel image, system map and modules to the proper location
+
+
+[[mercury-core-install]]
+Installing the _Mercury_ core
+-----------------------------
+
+For _Mercury_, you need no Xenomai-specific kernel support so far,
+beyond what your host Linux kernel already provides. Your kernel
+should at least provide high resolution timer support
+(`CONFIG_HIGH_RES_TIMERS`), and likely complete preemption
+(_PREEMPT_RT_) if your application requires short and bounded
+latencies.
+
+Kernels with no real-time support can be used too, likely for basic
+debugging tasks, and/or running applications which do not have strict
+response time requirements.
+
+Therefore, unlike with _Cobalt_, there is no additional steps for
+preparing and/or configuring the kernel for _Mercury_.
+
+[[library-install]]
+Installing the Xenomai libraries and tools
+------------------------------------------
+
+Prerequisites
+~~~~~~~~~~~~~
+
+Generic requirements (both cores)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- GCC must have support for legacy atomic builtins (__sync form).
+
+- GCC should have a (sane/working) support for TLS preferably,
+although this is not mandatory if building with `--disable-tls`.
+
+- If you plan to enable the user-space registry support
+  (i.e. +--enable-registry+), then CONFIG_FUSE_FS must be enabled in
+  the target kernel running the real-time applications. In addition,
+  the FUSE development libraries must be available from the toolchain.
+
+- If you plan to build from the sources available from the Xenomai GIT
+  tree (git.xenomai.org), the autoconf (>= 2.62), automake and libtool
+  packages must be available on your build system. This is not
+  required when building from a source tree extracted from a
+  link:/downloads/xenomai/[release tarball].
+
+_Cobalt_-specific requirements
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- The kernel version must be 3.10 or better.
+
+- An interrupt pipeline (I-pipe) patch must be available for your
+  target kernel. You can find the official patches issued by the
+  Xenomai project link:/downloads/ipipe/[there].
+  Only patches from the *ipipe-core* series are appropriate, legacy
+  patches from the *adeos-ipipe* series are not.
+
+- A timestamp counter (TSC) is required from running on a x86_32
+  hardware. Unlike with Xenomai 2.x, TSC-emulation using a PIT
+  register is not available.
+
+_Mercury_-specific requirement
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+- There is no particular requirement for Mercury setups, although
+  using a NPTL-based glibc or uClibc is recommended.
+
+Configuring
+~~~~~~~~~~~
+
+If building the source obtained from the Xenomai GIT tree
+(git.xenomai.org), the `configure` script and Makefiles must be
+generated in the Xenomai source tree. The recommended way is to run
+the automatic reconfiguration script shipped, from the top of the
+source tree:
+
+---------------------
+$ ./scripts/bootstrap
+---------------------
+
+If building from a link:/downloads/xenomai/[release tarball], a set of
+autoconf-generated file will be readily available from the extracted
+source tree, and therefore reconfiguring will not be required.
+
+When run, the generated `configure` script prepares for building the
+libraries and programs, for both the _Cobalt_ and _Mercury_ cores. The
+core-specific code which may be needed internally is automatically and
+transparently selected at compilation-time by the build process.
+
+The options listed below can be passed to this script.
+
+Generic configuration options (both cores)
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+[horizontal]
+*--with=core=<type>*::
+
+	Indicates which real-time core you want to build the support
+	libraries for, namely _cobalt_ or _mercury_. This option
+	defaults to _cobalt_.
+
+*--prefix=<dir>*:: 
+
+	Specifies the root installation path for libraries, include
+	files, scripts and executables. Running `$ make install`
+	installs these files to `$DESTDIR/<dir>`.  This directory
+	defaults to /usr/xenomai.
+
+*--enable-debug[=partial]*::
+
+	This switch controls the debug level. Three levels are
+	available, with varying overhead:
+
+	- _symbols_ enables debug symbols to be compiled in the
+	libraries and executables, still turning on the optimizer
+	(-O2). This option has no overhead, it is useful to get
+	meaningful backtraces using gdb while running the application
+	at nominal speed.
+
+	- _partial_ includes _symbols_, and also turns on internal
+	consistency checks within the Xenomai code (mostly present in
+	the Copperplate layer). The `CONFIG_XENO_DEBUG` macro is
+	defined, for both the Xenomai libraries and the applications
+	getting their C compilation flags from the `xeno-config`
+	script (i.e. `xeno-config --cflags`). The partial debug mode
+	implicitly turns on `--enable-assert`. A measurable overhead
+	is introduced by this level.  This is the default level when
+	`--enable-debug` is mentioned with no level specification.
+
+	- _full_ includes _partial_ settings, but the optimizer is
+	disabled (-O0), and even more consistency checks may be
+	performed.  In addition to `__XENO_DEBUG__`, the macro
+	`CONFIG_XENO_DEBUG_FULL` is defined. This level introduces the
+	most overhead, which may triple the worst-case latency, or
+	even more.
+
+[normal]
+	Over the _Mercury_ core, enabling _partial_ or _full_ debug
+	modes also causes the standard malloc interface to be used
+	internally instead of a fast real-time allocator (TLSF). This
+	allows debugging memory-related issues with the help of
+	_Valgrind_ or other dynamic memory analysers.
+
+*--disable-debug*::
+
+	 Fully turns off all consistency checks and assertions, turns
+         on the optimizer and disables debug symbol generation.
+
+*--enable-assert*::
+
+	A number of debug assertion statements are present into the
+	Xenomai libraries, checking the internal consistency of the
+	runtime system dynamically (see _man assert(3)_). Passing
+	`--disable-assert` to the _configure_ script disables built-in
+	assertions unconditionally. By default, assertions are enabled
+	in partial or full debug modes, disabled otherwise.
+
+*--enable-pshared*::
+
+	Enable shared multi-processing. When enabled, this option
+	allows multiple processes to share real-time objects
+	(e.g. tasks, semaphores).
+
+*--enable-registry[=/registry-root-path]*::
+
+	Xenomai APIs can export their internal state through a
+	pseudo-filesystem, which files may be read to obtain
+	information about the existing real-time objects, such as
+	tasks, semaphores, message queues and so on.  This feature is
+	supported by http://fuse.sourceforge.net/[FUSE], which must be
+	available on the target system. Building the Xenomai libraries
+	with the registry support requires the FUSE development
+	libraries to available from the toolchain. In addition,
+	CONFIG_FUSE_FS must be enabled in the target kernel.
+
+[normal]
+When this option is enabled, the system creates a file hierachy at
+`<user>/<session>/<pid>` under the registry root path, where you
+can access the internal state of the active real-time objects. The
+session label is obtained from the --session runtime switch. If no
+session name is specified, `anon@<pid>` will be used. E.g. looking at
+the properties of a VxWorks task could be done as follows:
+
+If not specified in the configuration switch, the registry root path
+will be +/var/run/xenomai+.
+
+--------------------------------------------------------------------
+$ cat /var/run/xenomai/root/anon@12656/12656/vxworks/tasks/windTask 
+name       = windTask
+errno      = 0
+status     = ready
+priority   = 70
+lock_depth = 0
+--------------------------------------------------------------------
+
+[normal]
+	You may override the default root of the registry hierarchy
+	either statically at build time by passing the desired root
+	path to the --enable-registry configuration switch, or
+	dynamically by using the `--registry-root` runtime option
+	passed to the application.
+
+[NOTE]
+When running over _Xenomai/cobalt_, the `/proc/xenomai` interface is
+also available for inspecting the core system state.
+
+*--enable-lores-clock*::
+
+	Enables support for low resolution clocks. By default,
+	libraries are built with no support for tick-based timing. If
+	you need such support (e.g. for pSOS (TM) or VxWorks (TM)
+	APIs), then you can turn it on using this option.
+
+[NOTE]
+The POSIX API does not support tick-based timing. Alchemy may use it
+optionally.
+
+*--enable-clock-monotonic-raw*::
+
+	The Xenomai libraries requires a monotonic clock to be
+	available from the underlying POSIX interface. When
+	`CLOCK_MONOTONIC_RAW` is available on your system, you may
+	want to pass this switch, otherwise `CLOCK_MONOTONIC` will be
+	used by default.
+
+[NOTE]
+The _Cobalt_ core implements `CLOCK_MONOTONIC_RAW`, so this switch is
+turned on by default when building with `--with-core=cobalt`. On the
+contrary, this option is turned off by default when building for the
+_Mercury_ core, since we don't know in advance whether this feature
+does exist on the target kernel.
+
+*--enable-tls*::
+
+	Xenomai can use GCC's thread local storage extension (TLS) to
+	speed up the retrieval of the per-thread information it uses
+	internally. This switch enables TLS, use the converse
+	`--disable-tls` to prevent this.
+
+[normal]
+	Due to GCC bugs regarding this feature with some
+	release,architecture combinations, whether TLS is turned on by
+	default is a per-architecture decision. Currently, this
+	feature is enabled for x86 and powerpc by default, other
+	architectures will require `--enable-tls` to be passed to the
+	_configure_ script explicitly.
+
+[normal]
+	Unless `--enable-dlopen-libs` is present, the _initial-exec_
+	TLS model is selected.
+
+[normal]
+	When TLS is disabled, POSIX's thread-specific data management
+	services are used internally (i.e. pthread_set/getspecific()).
+
+*--enable-dlopen-libs*::
+
+	This switch allows programs to load Xenomai-based libraries
+	dynamically, using the `dlopen(3)` routine. Enabling dynamic
+	loading introduces some overhead in TLS accesses when enabled
+	(see `--enable-tls`), which might be noticeable depending on
+	the architecture.
+
+[normal]
+	To support dynamic loading when `--enable-tls` is turned on,
+	the _global-dynamic_ TLS model is automatically selected.
+
+[normal]
+	Dynamic loading of Xenomai-based libraries is disabled by
+	default.
+
+*--enable-async-cancel*::
+
+	Enables fully asynchronous cancellation of Xenomai threads
+	created by the real-time APIs, making provision to protect the
+	Xenomai implementation code accordingly.
+[normal]
+	When disabled, Xenomai assumes that threads may exit due to
+	cancellation requests only when they reach cancellation points
+	(like system calls). Asynchronous cancellation is disabled
+	by default.
+
+[CAUTION]
+Fully asynchronous cancellation can easily lead to resource leakage,
+silent corruption, safety issues and all sorts of rampant bugs. The
+only reason to turn this feature on would be aimed at cancelling
+threads which run significantly long, syscall-less busy loops with no
+explicit exit condition, which should probably be revisited anyway.
+
+*--enable-smp*::
+
+	Turns on SMP support for Xenomai libraries.
+
+[CAUTION]
+SMP support must be enabled in Xenomai libraries when the
+client applications are running over a SMP-capable kernel.
+
+*--disable-sanity*::
+
+	Turns off the sanity checks performed at application startup
+	by the Xenomai libraries. This option sets a default, which
+	can later be overriden using the --[no-]sanity options passed
+	to a Copperplate-based Xenomai application. Sanity checks are
+	enabled by default when configuring.
+
+*--enable-fortify*::
+
+	Enables `_FORTIFY_SOURCE` when building the Xenomai code
+	unless --enable-debug=full is also given on the command line,
+	in which case --enable-fortify is silently ignored.
+
+*--disable-valgrind-client*::
+
+	Turns off the Valgrind client support, forcing
+	`CONFIG_XENO_VALGRIND_API` off in the Xenomai configuration
+	header.
+
+*--enable-doc-build*::
+
+	Causes the inline Xenomai documentation based on the
+	http://doxygen.org[Doxygen markup language] to be produced as
+	PDF and HTML documents. Additional documentation like manpages
+	based on the http://asciidoc.org/[Asciidoc markup language] is
+	produced too.
+
+_Cobalt_-specific configuration options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+[options="header",grid="cols",frame="topbot",cols="m,2*d"]
+|============================================================================
+^|NAME              ^|DESCRIPTION                    ^|DEFAULT
+|--enable-x86-vsyscall |Use the x86/vsyscall interface
+		        for issuing syscalls. If disabled,
+			the legacy 0x80 vector will be used.
+		        Turning on this option requires NPTL. |enabled
+
+|--enable-arm-tsc      |Enable ARM TSC emulation.
+		        footnote:[In the unusual
+		        situation where Xenomai
+		        does not support the kuser generic
+		        emulation for the target SOC, use
+			this option to specify another tsc
+			emulation method.
+		        See `--help` for a list of valid
+		        values.]                              |kuser
+
+|--enable-arm-quirks   |Enable quirks for specific ARM
+		        SOCs Currently sa1100 and
+		        xscale3 are supported.	              |disabled
+|============================================================================
+
+_Mercury_-specific configuration options
+^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+
+[options="header",grid="cols",frame="topbot",cols="m,2*d"]
+|============================================================================
+^|NAME              ^|DESCRIPTION                    ^|DEFAULT
+|--enable-condvar-workaround | Enable workaround for broken priority
+        inheritance with condition variables in glibc. This option
+	adds some overhead to RTOS API emulators.     |disabled
+|============================================================================
+
+footnoteref:[disable,Each option enabled by default can be forcibly
+disabled by passing `--disable-<option>` to the _configure_ script]
+
+Cross-compilation
+~~~~~~~~~~~~~~~~~
+
+In order to cross-compile the Xenomai libraries and programs, you will
+need to pass a `--host` and `--build` option to the _configure_
+script. The `--host` option allow to select the architecture for which
+the libraries and programs are built. The `--build` option allows to
+choose the architecture on which the compilation tools are run,
+i.e. the system running the _configure_ script.
+
+Since cross-compiling requires specific tools, such tools are
+generally prefixed with the host architecture name; for example, a
+compiler for the PowerPC architecture may be named
+`powerpc-linux-gcc`.
+
+When passing `--host=powerpc-linux` to configure, it will
+automatically use `powerpc-linux-` as a prefix to all
+compilation tools names and infer the host architecture name from this
+prefix. If configure is unable to infer the architecture name from the
+cross-compilation tools prefix, you will have to manually pass the
+name of all compilation tools using at least the CC and LD, variables
+on configure command line.
+
+The easiest way to build a GNU cross-compiler might involve using
+crosstool-ng, available http://crosstool-ng.org/[here].
+
+If you want to avoid to build your own cross compiler, you might if
+find easier to use the ELDK. It includes the GNU cross development
+tools, such as the compilers, binutils, gdb, etc., and a number of
+pre-built target tools and libraries required on the target
+system. See http://www.denx.de/wiki/DULG/ELDK[here] for further
+details.
+
+Some other pre-built toolchains:
+
+- Mentor Sourcery CodeBench Lite Edition, available
+http://www.mentor.com/embedded-software/sourcery-tools/sourcery-codebench/editions/lite-edition/[here];
+- Linaro toolchain (for the ARM architecture), available
+https://launchpad.net/linaro-toolchain-binaries[here].
+
+
+[[library-install-examples]]
+Examples of building the Xenomai libraries and tools
+----------------------------------------------------
+
+The examples in following sections use the following conventions:
+
+`$xenomai_root`:: path to the Xenomai sources
+`$build_root`:: path to a clean build directory
+`$staging_dir`:: path to a directory that will hold the installed file
+ temporarily before they are moved to their final location; when used
+ in a cross-compilation setup, it is usually a NFS mount point from
+ the target's root directory to the local build host, as a
+ consequence of which running `make{nbsp}DESTDIR=$staging_dir{nbsp}install` on
+ the host immediately updates the target system with the installed
+ programs and libraries.
+
+CAUTION: In the examples below, make sure to add `--enable-smp` to the
+_configure_ script options if building for a SMP-enabled kernel.
+
+Building the x86 libraries (32/64bit)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Assuming that you want to build the _Mercury_ libraries natively for a
+x86_64/SMP system, enabling shared multi-processing support. You would
+typically run:
+
+------------------------------------------------------------------------------
+$ mkdir $build_root && cd $build_root
+$ $xenomai_root/configure --with-core=mercury --enable-smp --enable-pshared
+$ make install
+------------------------------------------------------------------------------
+
+Conversely, cross-building the _Cobalt_ libraries from x86_64 with the
+same feature set, for running on x86_32 could be:
+
+------------------------------------------------------------------------------
+$ mkdir $build_root && cd $build_root
+$ $xenomai_root/configure --with-core=cobalt --enable-smp --enable-pshared \
+  --host=i686-linux CFLAGS="-m32 -O2" LDFLAGS="-m32"
+$ make install
+------------------------------------------------------------------------------
+
+After installing the build tree (i.e. using "make install"), the
+installation root should be populated with the librairies, programs
+and header files you can use to build Xenomai-based real-time
+applications.  This directory path defaults to `/usr/xenomai`.
+
+The remaining examples illustrate how to cross-compile Xenomai for
+various architectures. Of course, you would have to install the proper
+cross-compilation toolchain for the target system first.
+
+Building the PPC32 libraries
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+A typical cross-compilation setup, in order to build the _Cobalt_
+libraries for a ppc-6xx architecture. In that example, we want the
+debug symbols to be generated for the executable, with no runtime
+overhead though. We use the DENX ELDK cross-compiler:
+
+------------------------------------------------------------------------------
+$ cd $build_root
+$ $xenomai_root/configure --host=powerpc-linux --with-core=cobalt \
+  --enable-debug=symbols
+$ make DESTDIR=$staging_dir install
+------------------------------------------------------------------------------
+
+Building the ARM libraries
+~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+Using codesourcery toolchain named `arm-none-linux-gnueabi-gcc` and
+compiling for a CSB637 board (AT91RM9200 based), a typical cross-compilation
+from a x86_32 desktop would look like:
+
+------------------------------------------------------------------------------
+$ mkdir $build_root/xenomai && cd $build_root/xenomai
+$ $xenomai_root/configure CFLAGS="-march=armv4t" LDFLAGS="-march=armv4t" \
+  --build=i686-pc-linux-gnu --host=arm-none-linux-gnueabi- --with-core=cobalt
+$ make DESTDIR=$staging_dir install
+------------------------------------------------------------------------------
+
+IMPORTANT: Unlike previous releases, Xenomai no longer passes any arm
+architecture specific flags, or FPU flags to gcc, so, users are
+expected to pass them using the CFLAGS and LDFLAGS variables as
+demonstrated above, where the AT91RM9200 is based on the ARM920T core,
+implementing the `armv4` architecture. The following table summarizes
+the CFLAGS and options which were automatically passed in previous
+revisions and which now need to be explicitely passed to configure,
+for the supported SOCs:
+
+.ARM configure options and compilation flags
+[options="header",frame="topbot",grid="cols",cols="2*d,m"]
+|======================================================================
+^|SOC       ^| CFLAGS                           ^| configure options
+|at91rm9200  | `-march=armv4t -msoft-float`        |
+|at91sam9x   | `-march=armv5 -msoft-float`         |
+|imx1        | `-march=armv4t -msoft-float`        |
+|imx21       | `-march=armv5 -msoft-float`         |
+|imx31       | `-march=armv6 -mfpu=vfp`            |
+|imx51/imx53 | `-march=armv7-a -mfpu=vfp3`
+		footnoteref:[armv7,Depending on the
+		gcc versions the flag for armv7
+		may be `-march=armv7-a` or
+		`-march=armv7a`]|
+|imx6q	     | `-march=armv7-a -mfpu=vfp3` footnoteref:[armv7] | --enable-smp
+|ixp4xx      | `-march=armv5 -msoft-float`         | `--enable-arm-tsc=ixp4xx`
+|omap3       | `-march=armv7-a -mfpu=vfp3` footnoteref:[armv7] |
+|omap4       | `-march=armv7-a -mfpu=vfp3` footnoteref:[armv7] | --enable-smp
+|orion       | `-march=armv5 -mfpu=vfp`            |
+|pxa         | `-march=armv5 -msoft-float`         |
+|pxa3xx      | `-march=armv5 -msoft-float`         | --enable-arm-quirks=xscale3
+|s3c24xx     | `-march=armv4t -msoft-float`        |
+|sa1100      | `-march=armv4t -msoft-float`        | --enable-arm-quirks=sa1100
+|======================================================================
+
+It is possible to build for an older architecture version (v6 instead
+of v7, or v4 instead of v5), if your toolchain does not support the
+target architecture, the only restriction being that if SMP is
+enabled, the architecture should not be less than v6.
+
+
+Testing the installation
+------------------------
+
+Booting the _Cobalt_ kernel
+~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+In order to test the Xenomai installation over _Cobalt_, you should
+first try to boot the patched kernel. Check the kernel boot log for
+messages like these:
+
+------------------------------------------------------------------------------
+$ dmesg | grep -i xenomai
+I-pipe: head domain Xenomai registered.
+[Xenomai] Cobalt vX.Y.Z enabled
+------------------------------------------------------------------------------
+
+
+If the kernel fails booting, or the log messages indicates an error
+status instead, see the
+link:troubleshooting-a-dual-kernel-configuration#Kernel_log_displays_Xenomai_or_I-pipe_error_messages[TROUBLESHOOTING]
+guide.
+
+
+Testing the real-time system (both cores)
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+First, run the latency test:
+
+------------------------------------------------------------------------------
+$ /usr/xenomai/bin/latency
+------------------------------------------------------------------------------
+
+The latency test should display a message every second with minimum,
+maximum and average latency values. If this test displays an error
+message, hangs, or displays unexpected values, see the
+link:troubleshooting-a-dual-kernel-configuration#the_latency_test_shows_high_latencies[TROUBLESHOOTING]
+guide.
+
+If the latency test succeeds, you should try next to run the
+`xeno-test` test in order to assess the worst-case latency of your
+system. Try:
+
+------------------------------------------------------------------------------
+$ xeno-test --help
+------------------------------------------------------------------------------
+
+Calibrating the _Cobalt_ core timer
+~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+
+The accuracy of the Cobalt timing services depends on proper
+calibration of its core timer. Sound factory-default calibration
+values are defined for each platform Xenomai supports, but it is
+recommended to calibrate the core timer specifically for the target
+system.
+
+See the documentation about the
+link:../documentation/xenomai-3/html/man1/autotune/index.html[autotune(1)]
+utility.
+
+Building and running Xenomai 3 applications
+-------------------------------------------
+
+Once the latency test behaves as expected on your target system, it is
+deemed ready to run real-time applications.
+
+You may want to have a look at
+link:building-applications-with-xenomai-3.x/[this
+document] for details about the application build process.
+
+In addition, you may refer to
+link:running-applications-with-xenomai-3.x/[this document] to learn
+about the command line options available with Xenomai 3 applications.
+
+Migrating applications to Xenomai 3
+-----------------------------------
+
+If you plan to port an existing application based on Xenomai 2.x to
+Xenomai 3.x, you should have a look at
+link:migrating-from-xenomai-2.x-to-3.x/[this migration guide].
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.COBALT.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.COBALT.adoc
new file mode 100644
index 0000000..778e74f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.COBALT.adoc
@@ -0,0 +1,573 @@
+Troubleshooting a dual kernel configuration
+===========================================
+
+This page is a troubleshooting guide enumerating known issues
+with dual kernel Xenomai configurations.
+
+[TIP]
+If running any release from the Xenomai 2 series, or a Xenomai 3
+release using the *Cobalt* real-time core, then you are using a dual
+kernel configuration, and this document was meant for you.  Xenomai 3
+over the *Mercury* core stands for a single kernel configuration
+instead, for which you can find specific
+link:troubleshooting-a-single-kernel-configuration/[troubleshooting
+information here].
+
+== Kernel-related issues
+
+[[kconf]]
+=== Common kernel configuration issues
+
+When configuring the Linux kernel, some options should be avoided.
+
+CONFIG_CPU_FREQ:: This allows the CPU frequency to be modulated with
+workload, but many CPUs change the TSC counting frequency also, which
+makes it useless for accurate timing when the CPU clock can
+change. Also some CPUs can take several milliseconds to ramp up to
+full speed.
+
+CONFIG_CPU_IDLE:: Allows the CPU to enter deep sleep states,
+increasing the time it takes to get out of these sleep states, hence
+the latency of an idle system. Also, on some CPU, entering these deep
+sleep states causes the timers used by Xenomai to stop functioning.
+
+CONFIG_KGDB:: This option should not be enabled, except with x86.
+
+CONFIG_CONTEXT_TRACKING_FORCE:: This option which appeared in kernel
+3.8 is forced off by I-pipe patches since 3.14 onward, as it is
+incompatible with interrupt pipelining, and has no upside for regular
+users. However, you have to manually disable it for older kernels when
+present. Common effects observed with this feature enabled include
+RCU-related kernel warnings during real-time activities, and
+pathologically high latencies.
+
+=== Kernel hangs after "Uncompressing Linux... done, booting the kernel."
+
+This means that the kernel crashes before the console is enabled. You
+should enable the +CONFIG_EARLY_PRINTK+ option. For some architectures
+(x86, arm), enabling this option also requires passing the
++earlyprintk+ parameter on the kernel command line. See
+'Documentation/kernel-parameters.txt' for possible values.
+
+For the ARM architecture, you have to enable +CONFIG_DEBUG_KERNEL+ and
++CONFIG_DEBUG_LL+ in order to be able to enable +CONFIG_EARLY_PRINTK+.
+
+=== Kernel OOPSes
+
+Please make sure to check the <<kconf,"Kernel configuration">> section
+first.
+
+If nothing seems wrong there, try capturing the OOPS information using
+a _serial console_ or _netconsole_, then post it to the
+mailto:xenomai@xenomai.org[xenomai mailing list], along with the
+kernel configuration file (aka `.config`) matching the kernel build.
+
+=== Kernel boots but does not print any message
+
+Your distribution may be configured to pass the +quiet+ option on the
+kernel command line. In this case, the kernel does not print all the
+log messages, however, they are still available using the +dmesg+
+command.
+
+[[kerror]]
+=== Kernel log displays Xenomai or I-pipe error messages
+
+[[no-timer]]
+==== I-pipe: could not find timer for cpu #N
+
+The most probable reason is that no hardware timer chip is available
+for Xenomai timing operations.
+
+Check that you did not enable some of the conflicting options listed
+in the <<kconf,"Kernel configuration">> section.
+
+With AMD x86_64 CPUs:: You will most likely also see the following
+message:
+--------------------------------------------
+I-pipe: cannot use LAPIC as a tick device
+I-pipe: disable C1E power state in your BIOS
+--------------------------------------------
+The interrupt pipeline outputs this message if C1E option is enabled
+in the BIOS. To fix this issue, disable C1E support in the BIOS. In
+some Award BIOS this option is located in the +Advanced BIOS
+Features->+ menu (+AMD C1E Support+).
+
+[WARNING]
+Disabling the +AMD K8 Cool&Quiet+ feature in the BIOS will *NOT* solve
+this problem.
+
+With other CPU architectures:: The interrupt pipeline implementation
+may lack a registration for a hardware timer available to Xenomai
+timing operations (e.g. a call to +ipipe_timer_register()+).
+
+If you are working on porting the interrupt pipeline to some ARM SoC,
+you may want to have a look at this
+link:porting-xenomai-to-a-new-arm-soc/#The_general_case[detailed
+information].
+
+[[SMI]]
+==== SMI-enabled chipset found, but SMI workaround disabled
+
+You may have an issue with System Management Interrupts on your x86
+platform. You may want to look at
+link:dealing-with-x86-smi-troubles/[this document].
+
+=== Xenomai and Linux devices share the same IRQ vector
+
+This x86-specific issue might still happen on legacy hardware with no
+MSI support. See
+link:what-if-xenomai-and-linux-devices-share-the-same-IRQ[this
+article] from the Knowledge Base.
+
+=== Kernel issues specific to the Xenomai 2.x series
+
+==== system init failed, code -19
+
+See <<no-timer, this entry>>.
+
+==== Local APIC absent or disabled!
+
+The Xenomai 2.x _nucleus_ issues this warning if the kernel
+configuration enables the local APIC support
+(+CONFIG_X86_LOCAL_APIC+), but the processor status gathered at boot
+time by the kernel says that no local APIC support is available.
+There are two options for fixing this issue:
+
+* either your CPU really has _no_ local APIC hardware, in which case
+you need to rebuild a kernel with LAPIC support disabled.
+
+* or it does have a local APIC but the kernel boot parameters did not
+specify to activate it using the _lapic_ option. The latter is
+required since 2.6.9-rc4 for boxes which APIC hardware is disabled by
+default by the BIOS. You may want to look at the file
+'Documentation/kernel-parameters.txt' from the Linux source tree, for
+more information about this parameter.
+
+== Application-level issues
+
+[[vsyscall]]
+=== --enable-x86-sep needs NPTL and Linux 2.6.x or higher
+or,
+
+=== --enable-x86-vsyscall requires NPTL ...
+
+This message may happen when starting a Xenomai 2.x or 3.x application
+respectively. On the x86 architecture, the configure script option
+mentioned allows Xenomai to use the _vsyscall_ mechanism for issuing
+system calls, based on the most efficient method determined by the
+kernel for the current system. This mechanism is only available from
+NPTL-enabled glibc releases.
+
+Turn off this feature for other libc flavours.
+
+=== Cobalt core not enabled in kernel
+
+As mentioned in the message, the target kernel is lacking Cobalt
+support.  See
+link:installing-xenomai-3-x/#Installing_the_Cobalt_core[this document]
+for detailed information about installing Cobalt.
+
+=== binding failed: Function not implemented
+
+Another symptom of the previous issue, i.e. the Cobalt core is not
+enabled in the target kernel.
+
+=== binding failed: Operation not permitted
+
+This is the result of an attempt to run a Xenomai application as an
+unprivileged user, which fails because invoking Xenomai services
+requires +CAP_SYS_NICE+. However, you may allow a specific group of
+users to access Xenomai services, by following the instructions on
+link:running-a-Xenomai-application-as-a-regular-user[this page].
+
+=== incompatible ABI revision level
+
+Same as below:
+
+=== ABI mismatch
+
+The ABI concerned by this message is the system call binary interface
+between the Xenomai libraries and the real-time kernel services it
+invokes (e.g. +libcobalt+ and the Cobalt kernel with Xenomai
+3.x). This ABI may evolve over time, only between major Xenomai
+releases or testing candidate releases (i.e. -rc series) though. When
+this happens, the ABI level required by the application linked against
+Xenomai libraries may not match the ABI exposed by the Xenomai
+co-kernel implementation on the target machine, which is the situation
+this message reports.
+
+To fix this issue, just make sure to rebuild both the Xenomai kernel
+support and the user-space binaries for your target system. If however
+you did install the appropriate Xenomai binaries on your target
+system, chances are that stale files from a previous Xenomai
+installation still exist on your system, causing the mismatch.
+
+Each major Xenomai release (e.g. 2.1.x, 2.2.x ... 2.6.x, 3.0.x ...)
+defines such kernel/user ABI, which remains stable across minor update
+releases (e.g. 2.6.0 -> 2.6.4). This guarantee makes partial updates
+possible with production systems (i.e. kernel and/or user support).
+For instance, any application built over the Xenomai 2.6.0 binaries
+can run over a Xenomai 2.6.4 kernel support, and conversely.
+
+[TIP]
+Debian-based distributions (notably Ubuntu) may ship with
+pre-installed Xenomai libraries. Make sure that these files don't get
+in the way if you plan to install a more recent Xenomai kernel
+support.
+
+=== <program>: not found
+
+Although the program in question may be present, this message may
+happen on ARM platforms when a mismatch exists between the kernel and
+user library configurations with respect to EABI support. Typically,
+if user libraries are compiled with a toolchain generating OABI code,
+the result won't run over a kernel not enabling the
++CONFIG_OABI_COMPAT+ option. Conversely, the product of a compilation
+with an EABI toolchain won't run on a kernel not enabling the
++CONFIG_AEABI+ option.
+
+=== incompatible feature set
+
+When a Xenomai application starts, the set of core features it
+requires is compared to the feature set the kernel provides. This
+message denotes a mismatch between both sets, which can be solved by
+fixing the kernel and/or user build configuration.  Further details
+are available from link:installing-xenomai-3-x[this page] for Xenomai
+3, and link:installing-xenomai-2-x[this page] for Xenomai 2.
+
+==== feature mismatch: missing="smp/nosmp"
+
+On SMP-capable architectures, both kernel and user-space components
+(i.e. Xenomai libraries) must be compiled with the same setting with
+respect to SMP support.
+
+SMP support in the kernel is controlled via the +CONFIG_SMP+ option.
+The +--enable-smp+ configuration switch enables this feature for the
+Xenomai libraries (conversely, +--disable-smp+ disables it).
+
+[CAUTION]
+Using Xenomai libraries built for a single-processor configuration
+(i.e. +--disable-smp+) over a SMP kernel (i.e. +CONFIG_SMP=y+) is
+*NOT* valid. On the other hand, using Xenomai libraries built with SMP
+support enabled over a single-processor kernel is fine.
+
+=== Application-level issues specific to the Xenomai 2.x series
+
+The following feature mismatches can be detected with the 2.x series:
+
+==== feature mismatch: missing="kuser_tsc"
+
+See the <<arm-tsc, "ARM tsc emulation issues">> section.
+
+[NOTE]
+This issue does not affect Xenomai 3.x as the latter requires modern
+I-pipe series which must provide _KUSER_TSC_ support on the ARM
+architecture.
+
+==== feature mismatch: missing="sep"
+
+This error is specific to the x86 architecture on Xenomai 2.x, for
+pre-Pentium CPUs which do not provide the _sysenter/sysexit_
+instruction pair. See <<vsyscall, this section>>.
+
+[NOTE]
+This issue does not affect Xenomai 3.x as the latter does not
+support pre-Pentium systems in the first place.
+
+==== feature mismatch: missing="tsc"
+
+This error is specific to the x86 architecture on Xenomai 2.x, for
+pre-Pentium CPUs which do not provide the _rdtsc_ instruction. In this
+particular case, +--enable-x86-tsc+ cannot be mentioned in the
+configuration options for building the user libraries, since the
+processor does not support this feature.
+
+The rule of thumb is to pick the *exact* processor for your x86
+platform when configuring the kernel, at the very least the most
+specific model which is close to the target CPU, not a generic
+placeholder such as _i586_, for which _rdtsc_ is not available.
+
+If your processor does not provide the _rdtsc_ instruction, you have
+to pass +--disable-x86-tsc+ option to the configure script for
+building the user librairies. In this case, Xenomai will provide a
+(much slower) emulation of the hardware TSC.
+
+[NOTE]
+This issue does not affect Xenomai 3.x as the latter does not
+support pre-Pentium systems in the first place.
+
+[[arm-tsc]]
+==== ARM tsc emulation issues
+
+In order to allow applications to measure short durations with as
+little overhead as possible, Xenomai uses a 64 bits high resolution
+counter. On x86, the counter used for this purpose is the time-stamp
+counter readable by the dedicated _rdtsc_ instruction.
+
+ARM processors generally do not have a 64 bits high resolution counter
+available in user-space, so this counter is emulated by reading
+whatever high resolution counter is available on the processor, and
+used as clock source in kernel-space, and extend it to 64 bits by
+using data shared with the kernel. If Xenomai libraries are compiled
+without emulated tsc support, system calls are used, which have a much
+higher overhead than the emulated tsc code.
+
+In recent versions of the I-pipe patch, SOCs generally select the
++CONFIG_IPIPE_ARM_KUSER_TSC+ option, which means that the code for
+reading this counter is provided by the kernel at a predetermined
+address (in the vector page, a page which is mapped at the same
+address in every process) and is the code used if you do not pass the
++--enable-arm-tsc+ or +--disable-arm-tsc+ option to configure, or pass
++--enable-arm-tsc=kuser+.
+
+This default should be fine with recent patches and most ARM
+SOCs.
+
+However, if you see the following message:
+-------------------------------------------------------------------------------
+incompatible feature set
+(userland requires "kuser_tsc...", kernel provides..., missing="kuser_tsc")
+-------------------------------------------------------------------------------
+
+It means that you are either using an old patch, or that the SOC you
+are using does not select the +CONFIG_IPIPE_ARM_KUSER_TSC+ option.
+
+So you should resort to what Xenomai did before branch 2.6: select the
+tsc emulation code when compiling Xenomai user-space support by using
+the +--enable-arm-tsc+ option. The parameter passed to this option is
+the name of the SOC or SOC family for which you are compiling Xenomai.
+Typing:
+-------------------------------------------------------------------------------
+/patch/to/xenomai/configure --help
+-------------------------------------------------------------------------------
+
+will return the list of valid values for this option.
+
+If after having enabled this option and recompiled, you see the
+following message when starting the latency test:
+-------------------------------------------------------------------------------
+kernel/user tsc emulation mismatch
+-------------------------------------------------------------------------------
+or
+-------------------------------------------------------------------------------
+Hardware tsc is not a fast wrapping one
+-------------------------------------------------------------------------------
+
+It means that you selected the wrong SOC or SOC family, reconfigure
+Xenomai user-space support by passing the right parameter to
++--enable-arm-tsc+ and recompile.
+
+The following message:
+-------------------------------------------------------------------------------
+Your board/configuration does not allow tsc emulation
+-------------------------------------------------------------------------------
+
+means that the kernel-space support for the SOC you are using does not
+provide support for tsc emulation in user-space. In that case, you
+should recompile Xenomai user-space support passing the
++--disable-arm-tsc+ option.
+
+==== hardware tsc is not a fast wrapping one
+or,
+
+==== kernel/user tsc emulation mismatch
+or,
+
+==== board/configuration does not allow tsc emulation
+
+See the <<arm-tsc, "ARM tsc emulation issues">> section.
+
+==== native skin or CONFIG_XENO_OPT_PERVASIVE disabled
+
+Possible reasons for this error are:
+
+* you booted a kernel without Xenomai or I-pipe support, a kernel with
+I-pipe and Xenomai support should have a '/proc/ipipe/version' and
+'/proc/xenomai/version' files;
+
+* the kernel you booted does not have the +CONFIG_XENO_SKIN_NATIVE+ and
++CONFIG_XENO_OPT_PERVASIVE+ options enabled;
+
+* Xenomai failed to start, check the <<kerror,"Xenomai or I-pipe error
+in the kernel log">> section;
+
+* you are trying to run Xenomai user-space support compiled for x86_32
+on an x86_64 kernel.
+
+==== "warning: <service> is deprecated" while compiling kernel code
+
+Where <service> is a thread creation service, one of:
+
+* +cre_tsk+
+* +pthread_create+
+* +rt_task_create+
+* +sc_tecreate+ or +sc_tcreate+
+* +taskSpawn+ or +taskInit+
+* +t_create+
+
+Starting with Xenomai 3, APIs are not usable from kernel modules
+anymore, at the notable exception of the RTDM device driver API, which
+by essence must be used from kernel space for writing real-time device
+drivers. Those warnings are there to remind you that application code
+should run in user-space context instead, so that it can be ported to
+Xenomai 3.
+
+You may switch those warnings off by enabling the
++CONFIG_XENO_OPT_NOWARN_DEPRECATED+ option in your kernel
+configuration, but nevertheless, you have been *WARNED*.
+
+==== a Xenomai system call fails with code -38 (ENOSYS)
+
+Possible reasons for this error are:
+
+* you booted a kernel without Xenomai or I-pipe support, a kernel with
+I-pipe and Xenomai support should have a '/proc/ipipe/version' and
+'/proc/xenomai/version' files;
+
+* the kernel you booted does not have the +CONFIG_XENO_SKIN_*+ option
+enabled for the skin you use, or +CONFIG_XENO_OPT_PERVASIVE+ is
+disabled;
+
+* Xenomai failed to start, check the <<kerror,"Xenomai or I-pipe error
+in the kernel log">> section;
+
+* you are trying to run Xenomai user-space support compiled for x86_32
+on an x86_64 kernel.
+
+==== the application overconsumes system memory
+
+Your user-space application unexpectedly commits a lot of virtual
+memory, as reported by "+top+" or '/proc/<pid>/maps'. Sometimes OOM
+situations may even appear during runtime on systems with limited
+memory.
+
+The reason is that Xenomai threads are underlaid by regular POSIX
+threads, for which a large default amount of stack space memory is
+commonly reserved by the POSIX threading library (8MiB per thread by
+the _glibc_). Therefore, the kernel will commit as much as
+_8MiB{nbsp}*{nbsp}nr_threads_ bytes to RAM space for the application,
+as a side-effect of calling the +mlockall()+ service to lock the
+process memory, as Xenomai requires.
+
+This behaviour can be controlled in two ways:
+
+- via the _stacksize_ parameter passed to the various thread creation
+routines, or +pthread_attr_setstacksize()+ directly when using the
+POSIX API.
+
+- by setting a lower user-limit for the initial stack allocation from
+the application's parent shell which all threads from the child
+process inherit, as illustrated below:
+
+---------------------------------------------------------------------
+ulimit -s <initial-size-in-kbytes>
+---------------------------------------------------------------------
+
+==== freeze or machine lockup
+
+Possible reasons may be:
+
+- Stack space overflow issue now biting some real-time kernel thread?
+
+- Spurious delay/timeout values computed by the application
+(specifically: too short).
+
+- A case of freeze is a system call called in a loop which fails
+without its return value being properly checked.
+
+On x86, whenever the nucleus watchdog does not trigger, you may want to
+try disabling CONFIG_X86_UP_IOAPIC while keeping CONFIG_X86_UP_APIC, and
+arm the kernel NMI watchdog on the LAPIC (nmi_watchdog=2). You may be
+lucky and have a backtrace after the freeze. Maybe enabling all the
+nucleus debug options would catch something too.
+
+== Issues when running Xenomai test programs
+
+[[latency]]
+=== Issues when running the _latency_ test
+
+The first test to run to see if Xenomai is running correctly on your
+platform is the latency test. The following sections describe the
+usual reasons for this test not to run correctly.
+
+==== failed to open benchmark device
+
+You have launched +latency -t 1+ or +latency -t 2+ which both require
+the kernel to have been configured with the
++CONFIG_XENO_DRIVERS_TIMERBENCH+ option.
+
+==== the _latency_ test hangs
+
+The most common reason for this issues is a too short period passed
+with the +-p+ option, try increasing the period. If you enable the
+watchdog (option +CONFIG_XENO_OPT_WATCHDOG+, in your kernel
+configuration), you should see the <<short-period, "watchdog triggered
+(period too short?)">> message.
+
+[[short-period]]
+==== watchdog triggered (period too short?)
+
+The built-in Xenomai watchdog has stopped the _latency_ test because
+it was using all the CPU in pure real-time mode (aka _primary
+mode_). This is likely due to a too short period.  Run the _latency_
+test again, passing a longer period using the +-p+ option this time.
+
+==== the _latency_ test shows high latencies
+
+The _latency_ test runs, but you are seeing high latencies.
+
+* make sure that you carefully followed the <<kconf,"Kernel
+configuration" section>>.
+
+* if running on a Raspberry Pi SBC, make sure you don't hit a firmware
+issue, see https://github.com/raspberrypi/firmware/issues/497.
+
+* if running on a x86 platform, make sure that you do not have an
+issue with SMIs, see the <<SMI, section about SMIs>>.
+
+* if running on a x86 platform with a _legacy USB_ switch available
+from the BIOS configuration, try disabling it.
+
+* if you do not have this option at BIOS configuration level, it does
+not necessarily mean that there is no support for it, thus no
+potential for high latencies; this support might just be forcibly
+enabled at boot time. To solve this, in case your machine has some USB
+controller hardware, make sure to enable the corresponding host
+controller driver support in your kernel configuration. For instance,
+UHCI-compliant hardware needs +CONFIG_USB_UHCI_HCD+. As part of its
+init chores, the driver should reset the host controller properly,
+kicking out the BIOS off the concerned hardware, and deactivate the
+USB legacy mode if set in the same move.
+
+* if you observe high latencies while running X-window, try disabling
+hardware acceleration in the X-window server file. With recent
+versions of X-window, try using the 'fbdev' driver. Install it
+(Debian package named 'xserver-xorg-video-fbdev' for instance), then
+modifiy the +Device+ section to use this driver in
+'/etc/X11/xorg.conf', as in:
+-------------------------------------------------------------------------------
+Section "Device"
+	Identifier  "Card0"
+	Driver      "fbdev"
+EndSection
+-------------------------------------------------------------------------------
+With olders versions of X-window, keep the existing driver, but
+add the following line to the +Device+ section:
+-------------------------------------------------------------------------------
+	Option "NoAccel"
+-------------------------------------------------------------------------------
+
+=== Issues when running the _switchtest_ program
+
+==== pthread_create: Resource temporarily unavailable
+
+The switchtest test creates many kernel threads, an operation which
+consumes memory taken from internal pools managed by the Xenomai
+real-time core.
+
+Xenomai 2.x and 3.x series require +CONFIG_XENO_OPT_SYS_HEAPSZ+ to be
+large enough in the kernel configuration settings, to cope with the
+allocation requests.
+
+Xenomai 2.x may also require to increase the
++CONFIG_XENO_OPT_SYS_STACKPOOLSZ+ setting.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc
new file mode 100644
index 0000000..a952e15
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc
@@ -0,0 +1,25 @@
+Troubleshooting a single kernel configuration
+=============================================
+
+This page is a troubleshooting guide enumerating known issues with
+single kernel Xenomai configurations.
+
+[TIP]
+If running Xenomai 3 over the *Mercury* core, then you are using a
+single kernel configuration, and this document was meant for
+you. Otherwise, if you are running any release from the Xenomai 2
+series, or a Xenomai 3 release using the *Cobalt* real-time core, then
+you are using a running kernel configuration, for which you can find
+specific
+link:troubleshooting-a-dual-kernel-configuration/[troubleshooting information here].
+
+*No entry yet.*
+
+== Application-level issues
+
+=== WARNING: [main] failed to lock memory
+
+Your application needs the CAP_SYS_NICE and CAP_IPC_LOCK capabilities
+to be granted access to Xenomai services (see
+capabilities(7)). Running the application with root privileges is a
+way to gain those capabilities.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/autotune.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/autotune.adoc
new file mode 100644
index 0000000..3462e6b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/autotune.adoc
@@ -0,0 +1,155 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for autotune
+//
+// Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+AUTOTUNE(1)
+==========
+:doctype: manpage
+:revdate: 2014/08/03
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+----
+autotune - Calibrate the Xenomai core clock timer
+
+SYNOPSIS
+---------
+*autotune* [ options ]
+
+DESCRIPTION
+------------
+*autotune* is a utility to determine the best calibration values (aka
+ _gravity triplet_) for the core clock timer.
+
+The time spent traversing the kernel code from the low-level Xenomai
+timer handler until the kernel-based client handler is invoked, is
+shorter than the time required to schedule in a kernel thread
+instead. It takes even more time to switch in a user-space thread,
+which entails changing the current memory address space, performing
+potentially time-consuming MMU-related operations.
+
+For this reason, Xenomai differentiates timers on the target context
+they activate, among IRQ(handler), kernel and user threads,
+anticipating the next timer shot accordingly, so that such context is
+activated as close as possible to the ideal time. This anticipation is
+called the _gravity_ of the clock serving the timer, which is actually
+a triplet representing the three possible types of contexts the timer
+can activate.
+
+Therefore, the gravity is a static adjustment value to account for the
+basic latency of the target system for responding to timer events, as
+perceived by the client code waiting for the wake up events. Such
+latency is increased by additional factors, such as:
+
+- bus or CPU cache latency,
+- delay required to program the timer chip for the next shot,
+- code running with interrupts disabled on the CPU to receive the IRQ,
+- inter-processor serialization (_spinlocks_).
+
+*autotune* runs a series of internal calibration tests for estimating
+the most appropriate gravity values for its real-time clock timer,
+retaining the final values.
+
+[IMPORTANT]
+*autotune* requires the *CONFIG_XENO_OPT_AUTOTUNE* option to be
+ enabled in the kernel configuration.
+
+OPTIONS
+--------
+*autotune* accepts the following options:
+
+*--irq*::
+Estimate the IRQ gravity value, which is the shortest time the
+platform needs to deliver an IRQ to a Xenomai interrupt handler in
+kernel space.
+
+*--kernel*::
+Estimate the kernel gravity value, which is the shortest time the
+platform needs to deliver an IRQ to a RTDM task running in kernel
+space. This delay includes the context switching time.
+
+*--user*::
+Estimate the user gravity value, which is the shortest time the
+platform needs to deliver an IRQ to a user-space task/thread running
+in a Xenomai application process. This delay includes the context
+switching time.
+
+*--period <ns>*::
+Set the sampling period to the given count of nanoseconds. The
+estimation is performed by measuring the jitter between the ideal time
+at which a timer tick should be received, and the actual time it is
+eventually received, for a series of ticks. This value expresses the
+delay between two of these ticks. If too short, a lockup might
+occur. A commonly observed result is that the larger the delay, the higher
+the latency, due to CPU cache effects (i.e. the real-time code/data is
+more likely to get evicted from the cachelines as the non real-time
+activity can slip in, treading over a larger address space).
+
+*--reset*::
+Reset the gravity values to their factory defaults. These defaults
+are statically defined by the Xenomai platform code.
+
+*--noload*::
+Disable load generation while auto-tuning. *autotune* runs a load
+generator internally in parallel to estimating the latency, in order
+to eliminate irregular delays which tend to appear on fully idle
+systems.  Therefore, keeping the load generation enabled most often
+leads to a more accurate estimation.
+
+*--verbose[=level]*::
+Set verbosity to the desired level, 1 means almost quiet (default), 2
+means fully verbose.
+
+*--help*::
+Display a short help.
+
+If none of +--irq+, +--kernel+ or +--user+ is given, an estimation is
+done for each of them in sequence.
+
+VERSIONS
+--------
+*autotune* appeared in Xenomai 3.0 for the _Cobalt_ real-time core.
+
+NOTES
+-----
+The auto-tuning process may take some time for completing the
+estimation. Although this delay may vary across hardware platforms,
+running for 30 seconds is common.
+
+Once the gravity values are known for a particular hardware, one may
+write them to +/proc/xenomai/clock/coreclck+ from some system init
+script to set up the Xenomai core clock accordingly, instead of
+running the auto-tuner after each boot e.g:
+    
+------------------------------------------------------
+    /* change the user gravity to 1728 ns (default) */
+# echo 1728 > /proc/xenomai/clock/coreclck
+    /* change the IRQ gravity to 129 ns */
+# echo 129i > /proc/xenomai/clock/coreclck
+    /* change the user and kernel gravities to 1728 and 907 ns resp. */
+# echo "1728u 907k" > /proc/xenomai/clock/coreclck
+------------------------------------------------------
+
+Alternatively, the gravity values can be statically defined in the
+kernel configuration of the target kernel:
+
+- CONFIG_XENO_OPT_TIMING_SCHEDLAT should be assigned the user gravity
+  value.
+
+- CONFIG_XENO_OPT_TIMING_KSCHEDLAT should be assigned the kernel
+  gravity value.
+
+- CONFIG_XENO_OPT_TIMING_IRQLAT should be assigned the IRQ gravity
+  value.
+
+AUTHOR
+-------
+*autotune* was written by Philippe Gerum <rpm@xenomai.org>.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/chkkconf.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/chkkconf.adoc
new file mode 100644
index 0000000..5a3c9a6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/chkkconf.adoc
@@ -0,0 +1,117 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for chkkconf
+//
+// Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+CHKKCONF(1)
+==========
+:doctype: manpage
+:revdate: 2021/09/23
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+----
+chkkconf - Check kernel .config
+
+SYNOPSIS
+---------
+*chkkconf* [ options ]
+
+DESCRIPTION
+------------
+*chkkconf* is a common utility to check kernel configuration based
+on specified checklist. The kernel configuration to verify is
+a regular .config file which contains all the settings for
+building a kernel image. The check list contains a series
+of single-line assertions which are tested against the
+contents of the kernel configuration. The default checklist
+file kconf-checklist under $datarootdir(/user/xenomai/share/
+by default) contains assertions that may influence latency
+for xenomai. When we use the default checklist, the utility checks
+a kernel configuration for common issues which may increase latency.
+
+
+OPTIONS
+--------
+*chkkconf* accepts the following options:
+
+*--file*:: Specify a regular .config file. If none is specified,
+the command defaults to reading /proc/config.gz on the current
+machine. If this fails because any of CONFIG_IKCONFIG or
+CONFIG_IKCONFIG_PROC was disabled in the running kernel, the
+command fails.
+
+*--check-list*:: Specify a file that contains a series of single-line
+assertions which are tested against the contents of the kernel
+configuration. If none is specified, a default check-list is loaded
+from $datarootdir/kconf-checklist(/user/xenomai/share/kconf-checklist
+by default). Each assertion follows the BNF-like syntax below:
+
+- assertion   : expr conditions
+            | "!" expr conditions
+
+- expr        : symbol /* matches =y and =m */
+            | symbol "=" tristate
+
+- tristate  : "y"
+          | "m"
+          | "n"
+
+- conditions  : dependency
+            | dependency arch
+
+- dependency  : "if" symbol       /* true if set as y/m */
+
+- arch        : "on" cputype
+
+- cputype     : $(uname -m)
+
+For instance:
+
+- CONFIG_FOO must be set whenever CONFIG_BAR is unset can be written as
+CONFIG_FOO if !CONFIG_BAR.
+
+- CONFIG_FOO must not be set can be written as !CONFIG_FOO, or
+conversely CONFIG_FOO=n.
+
+- CONFIG_FOO must be built as module on aarch32 or aarch64 can be
+written as CONFIG_FOO=m on aarch.
+
+- CONFIG_FOO must not be built-in on aarch64 if CONFIG_BAR is set can be
+written as !CONFIG_FOO=y if CONFIG_BAR on aarch.
+
+Assertions in the check list may apply to a particular CPU architecture.
+Normally, the command should be able to figure out which architecture
+the kernel configuration file applies to by inspecting the first lines,
+looking for the “Linux/” pattern. However, you might have to specify
+this information manually to the command using the -a option if the file
+referred to by the -f option does not contain such information.
+The architecture name (cputype) should match the output of $(uname -m)
+or some abbreviated portion of it. However, arm64 and arm are automatically
+translated to aarch64 and aarch32 when found in an assertion or passed to
+the -a option.
+
+*--arch*:: Specify CPU architecture that you want to check for.
+
+*--hash-size*:: Set the hash table size.
+
+*--quiet*:: Suppress output.
+
+*--help*::
+Display a short help.
+
+VERSIONS
+--------
+*chkkconf* appeared in Xenomai 3.2 for checking kernel .config.
+
+AUTHOR
+-------
+*chkkconf* was written by Philippe Gerum <rpm@xenomai.org> and ported
+by Hongzhan Chen <hongzhan.chen@intel.com> from xenomai4.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/clocktest.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/clocktest.adoc
new file mode 100644
index 0000000..f82df79
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/clocktest.adoc
@@ -0,0 +1,53 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for clocktest
+//
+// Copyright (C) 2008 Roland Stigge <stigge@antcom.de>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+CLOCKTEST(1)
+============
+:doctype: manpage
+:revdate: 2008/04/01
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+----
+clocktest - Xenomai Clock Test
+
+SYNOPSIS
+--------
+*clocktest* ['OPTIONS']
+
+DESCRIPTION
+-----------
+*clocktest* is part of the Xenomai test suite and tests the Clock. For each
+CPU, it repeatedly prints a time offset (compared to the reference
+gettimeofday()), a drift value, the number of warps and the maximum warp in
+microseconds.
+
+For this program to work, you need to run a suitable Xenomai enabled kernel
+with the respective module (xeno_posix).
+
+OPTIONS
+-------
+*-C <clock_id>*::
+	clock to be tested, default=0 (CLOCK_REALTIME=0, CLOCK_MONOTONIC=1, 
+CLOCK_HOST_REALTIME=42)
+
+*-T <test_duration_seconds>*::
+	default=0 (Never stop, ^C to end)
+
+*-D*::
+	print extra diagnostics for CLOCK_HOST_REALTIME
+
+AUTHOR
+------
+*clocktest* was written by Jan Kiszka. This man page
+was written by Roland Stigge.
+
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/corectl.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/corectl.adoc
new file mode 100644
index 0000000..83f7758
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/corectl.adoc
@@ -0,0 +1,106 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for corectl
+//
+// Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+CORECTL(1)
+==========
+:doctype: manpage
+:revdate: 2015/02/14
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+----
+corectl - Cobalt core control interface
+
+SYNOPSIS
+---------
+*corectl* [ options ]
+
+DESCRIPTION
+------------
+*corectl* is a utility to 
+
+OPTIONS
+--------
+*corectl* accepts the following options:
+
+*--stop [<grace-seconds>]*:: Stop the real-time services. The
+following actions are taken in sequence:
+
+- termination of all Xenomai threads running in user-space, waiting
+for them to exit for at most +grace-seconds+ if specified, or
+indefinitely otherwise.
+
+- active RTDM drivers are notified of the transition to the stopped
+state.
+
+- termination of lingering RTDM threads (i.e. running in kernel
+space), waiting for them to exit for at most 3 seconds.
+
+- deactivation of the real-time timing services, control of the
+hardware timer on all real-time CPUs is fully released to the host
+kernel.
+
+Once stopped, the Cobalt core rejects all connection requests from
+regular applications.
+
+*--start*:: Start the real-time services. The following actions are
+taken in sequence:
+
+- activation of the real-time timing services, the Cobalt core takes
+full control over the hardware timer on all real-time CPUs.
+
+- loaded RTDM drivers are notified of the transition to the running
+state.
+
+Once started, the Cobalt core accepts all connection requests from
+regular applications anew.
+
+*--status*:: Display the current Cobalt core status. The following
+statuses can be returned:
+
+- _disabled_ denotes a fully inoperative core. This state cannot be
+reached using the *corectl* command, but only by passing the
++xenomai.state=disabled+ option on the kernel command line. A disabled
+core cannot be started dynamically using *corectl*.
+
+- _stopped_ means that no further connection request will be accepted
+from applications, the real-time services are currently
+unavailable. The Cobalt core can be stopped at boot time by passing
+the +xenomai.state=stopped+ option on the kernel command line. A
+stopped core can be started dynamically using *corectl --start*,
+switching it to the _running_ state.
+
+- _running_ denotes an active state of the real-time core, application
+requests are processed normally. This is the default state entered at
+boot time, which corresponds to passing the +xenomai.state=enabled+
+option on the kernel command line.
+
+- _teardown_ denotes a real-time system in the process of stopping all
+services. This transient status should not be seen unless some threads
+are unexpectedly lingering despite a termination request was issued.
+
+- _warmup_ denotes a real-time system in the process of starting all
+services. This transient status should not be seen unless an RTDM
+driver gets stuck while switching to active mode.
+
+*--help*::
+Display a short help.
+
+If no option is passed, +--status+ is assumed by default.
+
+VERSIONS
+--------
+*corectl* appeared in Xenomai 3.0 for the _Cobalt_ real-time core.
+
+AUTHOR
+-------
+*corectl* was written by Philippe Gerum <rpm@xenomai.org>.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/cyclictest.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/cyclictest.adoc
new file mode 100644
index 0000000..06e6dad
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/cyclictest.adoc
@@ -0,0 +1,80 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for cyclictest
+//
+// Copyright (C) 2008 Roland Stigge <stigge@antcom.de>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+CYCLICTEST(1)
+=============
+:doctype: manpage
+:revdate: 2008/04/01
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+cyclictest - Xenomai high resolution timer test
+
+SYNOPSIS
+---------
+// The general command line
+*cyclictest* [options]
+
+DESCRIPTION
+------------
+*cyclictest* is part of the Xenomai test suite and tests the POSIX skin of Xenomai with a cyclic timer test.
+
+For this program to work, you need to run a suitable Xenomai enabled kernel with the respective module (xeno_posix).
+
+OPTIONS
+--------
+*cyclictest* accepts the following options:
+
+*-b USEC, --breaktrace=USEC*::
+send break trace command when latency > USEC
+
+*-c CLOCK, --clock=CLOCK*::
+select clock:
+0 = CLOCK_MONOTONIC (default)
+1 = CLOCK_REALTIME
+
+*-d DIST, --distance=DIST*::
+distance of thread intervals in us default=500
+
+*-i INTV, --interval=INTV*::
+base interval of thread in us default=1000
+
+*-l LOOPS, --loops=LOOPS*::
+number of loops: default=0 (endless)
+
+*-n, --nanosleep*::
+use clock_nanosleep
+
+*-p PRIO, --prio=PRIO*::
+priority of highest prio thread
+
+*-q, --quiet*::
+print only a summary on exit
+
+*-r, --relative*::
+use relative timer instead of absolute
+
+//.B -s, --system
+//use sys_nanosleep and sys_setitimer
+
+*-t NUM, --threads=NUM*::
+number of threads: default=1
+
+*-v, --verbose*::
+output values on stdout for statistics +
+format: n:c:v n=tasknum c=count v=value in us
+
+AUTHOR
+-------
+*cyclictest* was written by Thomas Gleixner. This man page
+was written by Roland Stigge.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/dohell.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/dohell.adoc
new file mode 100644
index 0000000..b37c358
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/dohell.adoc
@@ -0,0 +1,55 @@
+DOHELL(1)
+=========
+:doctype: manpage
+:revdata: 2013/08/25
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+dohell - Generate load, in parallel of the latency test
+
+SYNOPSIS
+---------
+*dohell* [ -b <path> ] [ -s <server> ] [ -p <port> ] [ -m <path> ] [ -l <path> | <duration> ]
+
+DESCRIPTION
+------------
+
+*dohell* generates some load, using commonly available commands, in parallel 
+of the link:../latency/index.html[latency(1)] test or as part of 
+link:../xeno-test/index.html[xeno-test(1)]. 
+
+OPTIONS
+--------
+
+*dohell* accepts the following options: 
+
+*-b*:: runs the hackbench test repetitively during the run-time of the
+*dohell* script;
+
+*-s*:: run nc to continuously send data to a server through network;
+
+*-p*:: if *-s* is used, specificy the port to which to send data, if
+  not specified, port 9 (aka discard) is used;
+
+*-m <path>*:: run dd to write data to the *<path>* directory;
+
+*-l <path>*:: mutually exclusive with *<duration>*; the dohell script runs 
+during two runs of the LTP script *runalltests.sh* found in the *<path>* 
+directory;
+
+*<duration>*:: mutually exclusive with *-l*; run dohell for the given duration 
+in seconds.
+
+SEE ALSO
+--------
+
+*link:../xeno-test/index.html[xeno-test(1)], link:../latency/index.html[latency(1)]*.
+
+EXAMPLE
+--------
+--------------------------------------------------------------------------------
+dohell -s 192.168.0.5 -m /mnt -l /ltp
+--------------------------------------------------------------------------------
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/gpiobench.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/gpiobench.adoc
new file mode 100644
index 0000000..bd32ea8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/gpiobench.adoc
@@ -0,0 +1,70 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for gpiobench
+//
+// Copyright (C) 2020 song chen <chensong@tj.kylinos.cn>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+GPIOBENCH(1)
+==========
+:doctype: manpage
+:revdate: 2020/08/03
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+gpiobench - Xenomai gpio latency benchmark
+
+SYNOPSIS
+---------
+// The general command line
+*gpiobench* [ options ]
+
+DESCRIPTION
+------------
+*gpiobench* is part of the Xenomai test suite. It is a gpio latency
+benchmark program.  The system must run a suitable Xenomai enabled kernel with
+the respective module (xeno_timerbench).
+
+OPTIONS
+--------
+*gpiobench* accepts the following options:
+
+*-h <histogram-size>*::
+default = 100, increase if your last bucket is full
+
+*-l <num-of-loops>*::
+default=1000, number of loops to run the test
+
+*-q <quiet>*::
+print only a summary on exit
+
+*-m <test-mode>*::
+0 = loopback (default), 1 = react
+
+*-c <pin-controller>*::
+name of pin controller
+
+*-o <output-pin>*::
+number of gpio pin as output
+
+*-i <interrupt-pin>*::
+number of gpin pin as interrupt
+
+*-p <priority>*::
+default = 99, task priority
+
+*-b <bracktrace>*::
+default = 1000, send break trace command when latency > breaktrace
+
+
+
+AUTHOR
+-------
+*gpiobench* was written by song chen. This man page
+was written by song chen.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/latency.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/latency.adoc
new file mode 100644
index 0000000..42a1ae1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/latency.adoc
@@ -0,0 +1,85 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for latency
+//
+// Copyright (C) 2008 Roland Stigge <stigge@antcom.de>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+LATENCY(1)
+==========
+:doctype: manpage
+:revdate: 2008/04/19
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+latency - Xenomai timer latency benchmark
+
+SYNOPSIS
+---------
+// The general command line
+*latency* [ options ]
+
+DESCRIPTION
+------------
+*latency* is part of the Xenomai test suite. It is a timer latency
+benchmark program.  The system must run a suitable Xenomai enabled kernel with
+the respective module (xeno_timerbench).
+
+OPTIONS
+--------
+*latency* accepts the following options:
+
+*-h*::
+print histograms of min, avg, max latencies
+
+*-g <file>*::
+dump histogram to <file> in a format easily readable with gnuplot. An
+example script for gnuplot may be found in scripts/histo.gp in Xenomai
+sources distribution
+
+*-s*::
+print statistics of min, avg, max latencies
+
+*-H <histogram-size>*::
+default = 200, increase if your last bucket is full
+
+*-B <bucket-size>*::
+default = 1000ns, decrease for more resolution
+
+*-p <period_us>*::
+sampling period
+
+*-l <data-lines per header>*::
+default=21, 0 to suppress headers
+
+*-T <test_duration_seconds>*::
+default=0, so ^C to end
+
+*-q*::
+suppresses RTD, RTH lines if -T is used
+
+*-t <test_mode>*::
+0=user task (default), 1=kernel task, 2=timer IRQ
+
+*-f*::
+freeze trace for each new max latency
+
+*-c <cpu>*::
+pin measuring task down to given CPU
+
+*-P <priority>*::
+task priority (test mode 0 and 1 only)
+
+*-b*::
+break upon mode switch
+
+AUTHOR
+-------
+*latency* was written by Philippe Gerum. This man page
+was written by Roland Stigge.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanconfig.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanconfig.adoc
new file mode 100644
index 0000000..4d3255e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanconfig.adoc
@@ -0,0 +1,63 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for rtcanconfig
+//
+// Copyright (C) 2008 Roland Stigge <stigge@antcom.de>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+RTCANCONFIG(1)
+==============
+:doctype: manpage
+:revdate: 2008/04/19
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+rtcanconfig - Xenomai tool for configuring the CAN controller
+
+SYNOPSIS
+---------
+// The general command line
+*rtcanconfig* <can-interface> [options] [up|down|start|stop|sleep]
+
+DESCRIPTION
+------------
+*rtcanconfig* is part of Xenomai. It is used to configure the CAN
+controller. The system must run a suitable Xenomai enabled kernel with the
+respective module (CAN).
+
+OPTIONS
+--------
+*rtcanconfig* accepts the following options:
+
+*-v, --verbose*::
+be verbose
+
+*-h, --help*::
+a usage description
+
+*-c, --ctrlmode=CTRLMODE*::
+listenonly, loopback or none
+
+*-b, --baudrate=BPS*::
+baudrate in bits/sec
+
+*-B, --bittime=BTR0:BTR1*::
+BTR or standard bit-time
+
+*-B, --bittime=BRP:PROP_SEG:PHASE_SEG1:PHASE_SEG2:SJW:SAM*
+
+SEE ALSO
+--------
+*link:../rtcanrecv/index.html[rtcanrecv(1)], link:../rtcansend/index.html[rtcansend(1)]*.
+
+AUTHORS
+--------
+*rtcanconfig* was written by Wolfgang Grandegger and
+Sebastian Smolorz. This man page was
+written by Roland Stigge.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanrecv.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanrecv.adoc
new file mode 100644
index 0000000..570e5b4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcanrecv.adoc
@@ -0,0 +1,72 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for rtcanrecv
+//
+// Copyright (C) 2008 Roland Stigge <stigge@antcom.de>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+RTCANRECV(1)
+============
+:doctype: manpage
+:revdate: 2008/04/19
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+rtcanrecv - Xenomai tool for receiving CAN messages
+
+SYNOPSIS
+---------
+// The general command line
+*rtcanrecv* [<can-interface>] [Options]
+
+DESCRIPTION
+------------
+*rtcanrecv* is part of Xenomai. It is used to receive messages via a CAN
+interface. The system must run a suitable Xenomai enabled kernel with the
+respective module (xeno_native and the CAN driver).
+
+OPTIONS
+--------
+<can-interface> is the CAN interface file.
+
+*rtcanrecv* accepts the following options:
+
+*-f, --filter=id:mask[:id:mask]...*::
+apply filter
+
+*-e, --error=mask*::
+receive error messages
+
+*-t, --timeout=MS*::
+timeout in ms
+
+*-T, --timestamp*::
+with absolute timestamp
+
+*-R, --timestamp-rel*::
+with relative timestamp
+
+*-v, --verbose*::
+be verbose
+
+*-p, --print=MODULO*::
+print every MODULO message
+
+*-h, --help*::
+this help
+
+SEE ALSO
+--------
+*link:../rtcanconfig/index.html[rtcanconfig(1)], link:../rtcansend/index.html[rtcansend(1)]*.
+
+AUTHORS
+--------
+*rtcanrecv* was written by Wolfgang Grandegger, Jan
+Kiszka and Philippe Gerum. This man page
+was written by Roland Stigge.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcansend.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcansend.adoc
new file mode 100644
index 0000000..990f574
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/rtcansend.adoc
@@ -0,0 +1,86 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for rtcansend
+//
+// Copyright (C) 2008 Roland Stigge <stigge@antcom.de>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+RTCANSEND(1)
+============
+:doctype: manpage
+:revdate: 2008/04/19
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+rtcansend - Xenomai tool for sending CAN messages
+
+SYNOPSIS
+---------
+// The general command line
+*rtcansend* <can-interface> [Options] <can-msg>
+
+DESCRIPTION
+------------
+*rtcansend* is part of Xenomai. It is used to send messages via a CAN
+interface. The system must run a suitable Xenomai enabled kernel with the
+respective module (xeno_native and the CAN driver).
+
+OPTIONS
+--------
+<can-interface> is the CAN interface file.
+
+<can-msg> can consist of up to 8 bytes given as a space separated list.
+
+*rtcansend* accepts the following options:
+
+*-i, --identifier=ID*::
+CAN Identifier (default = 1)
+
+*-r, --rtr*::
+send remote request
+
+*-e, --extended*::
+send extended frame
+
+*-l, --loop=COUNT*::
+send message COUNT times
+
+*-c, --count*::
+message count in data[0-3]
+
+*-d, --delay=MS*::
+delay in ms (default = 1ms)
+
+*-s, --send*::
+use send instead of sendto
+
+*-t, --timeout=MS*::
+timeout in ms
+
+*-L, --loopback=0|1*::
+switch local loopback off or on
+
+*-v, --verbose*::
+be verbose
+
+*-p, --print=MODULO*::
+print every MODULO message
+
+*-h, --help*::
+a usage description
+
+SEE ALSO
+--------
+*link:../rtcanconfig/index.html[rtcanconfig(1)], link:../rtcanrecv/index.html[rtcanrecv(1)]*.
+
+AUTHORS
+--------
+*rtcansend* was written by Wolfgang Grandegger, Jan
+Kiszka and Philippe Gerum. This man page
+was written by Roland Stigge.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/slackspot.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/slackspot.adoc
new file mode 100644
index 0000000..b59b992
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/slackspot.adoc
@@ -0,0 +1,138 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for slackspot
+//
+// Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+SLACKSPOT(1)
+==========
+:doctype: manpage
+:revdate: 2014/06/26
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+----
+slackspot - Trace secondary mode switches
+
+SYNOPSIS
+---------
+*slackspot* [ options ]
+
+DESCRIPTION
+------------
+*slackspot* is a utility to decode the trace data collected by the
+Cobalt core when CONFIG_XENO_OPT_DEBUG_TRACE_RELAX is enabled in the
+kernel configuration.
+
+This data describes each call hierarchy causing migration to secondary
+mode (i.e. _relaxes_) within the application. *slackspot* presents
+such data in a human-readable format as symbolic stack backtraces, for
+helping in debugging spurious relaxes.
+
+OPTIONS
+--------
+*slackspot* accepts the following options:
+
+*--file <trace-file>*::
+Read the trace information to decode from _trace-file_. By default,
+trace data is read from +/proc/xenomai/debug/relax+ unless the
+standard input stream was redirected, in which case +stdin+ is read.
+In addition, the dash character "-" is interpreted as a placeholder
+for +stdin+.
+
+*--path <dir[:dir...]>*::
+Search directory list for executables and dynamic
+libraries. Directories are separatared by a semi-colon within the
+list.  Each directory may be scanned for binary executables when
+resolving symbols found in stack backtraces.
+
+*--filter-in <name=exp[,name=...]>*::
+Only retain backtraces matching the given filters in the output. Each
+filter is specified by a _name=<expr>_ pair, where _name_ identifies
+the information field to be matched in the backtrace, and _expr_ is a
+regular expression which should match such data. Filters are separated
+by a comma within the list. The available filters are as follows:
+
+   * _thread_ matches the thread name.
+   * _pid_ matches the kernel task identifier, i.e. per-task _pid_.
+   * _exe_ matches the name of the main executable being traced.
+   * _function_ matches the name of the function being traced.
+   * _file_ matches the path of the source file being traced.
+   * _map_ matches the path of the mapped executable being traced.
+
+*--filter <name=exp[,name=...]>*::
+A shorthand for *--filter-in*.
+
+*--filter-out <name=exp[,name=...]>*::
+Only retain backtraces NOT matching the given filters in the
+output. This option inverts the sense of matching defined by
+*--filter-in*.
+
+*CROSS_COMPILE=<toolchain-prefix>*::
+A cross-compilation toolchain prefix should be specified for decoding
+the data obtained from a target system, on a build/development
+machine. When present, the value of CROSS_COMPILE will be prepended to
++gcc+ and +addr2line+ for running the corresponding utilities on the
+development system.
+
+VERSIONS
+--------
+
+*slackspot* appeared in Xenomai 3.0 for the _Cobalt_ real-time core.
+
+EXAMPLE
+-------
+
+In the following scenario, the _target_ system built with the
+CONFIG_XENO_OPT_DEBUG_TRACE_RELAX feature enabled in the kernel
+configuration, just ran the _/bin/relax_ program.
+
+This program caused a transition to secondary mode switch of the
+current task (_Task 2_) as a result of calling +putchar()+. The Cobalt
+core saved the corresponding backtrace information, which is now
+available from +/proc/xenomai/debug/relax+ on the target system.
+
+Since the target system has limited horsepower, and doesn't have
+access to the binary utilities required for decoding the trace data,
+we will send such data over the network to the _host_ system, in order
+for the latter to do the decoding and display the call stacks.
+
+We use the standard +netcat+ utility to send and receive the contents
+of +/proc/xenomai/debug/relax+ over the wire between the target and
+host systems. The host will also have to mention where the
+cross-compilation toolchain can be found, by setting the CROSS_COMPILE
+variable appropriately. The example assumes that
++/opt/rootfs/MPC5200/lib+ is the host-based location of the system
+libraries mounted over NFS onto the target file hierarchy.
+
+.On the target system:
+---------------------------------------------------------------------------
+target> netcat -l -p 67676 -c < /proc/xenomai/debug/relax
+---------------------------------------------------------------------------
+.On the host system:
+---------------------------------------------------------------------------
+host> netcat target 67676 | CROSS_COMPILE=ppc_6xx- slackspot
+      --path=/opt/rootfs/MPC5200/lib:$HOME/frags/relax --filter thread=Task*
+Thread[828] "Task 2" started by /bin/relax:
+   #0  0xfff00000 ???
+   #1  0x000001bb ???
+   #2  0x00064393 _IO_file_doallocate() in ??:?
+   #3  0x00073d6f _IO_doallocbuf() in ??:?
+   #4  0x00072d87 _IO_file_overflow() in ??:?
+   #5  0x00075f83 __overflow() in ??:?
+   #6  0x0006997b putchar() in ??:?
+   #7  0x100017db task2_func() in /home/rpm/frags/relax/relax.c:23
+   #8  0x000078d7 task_entry() in /home/rpm/git/xenomai-forge/lib/alchemy/task.c:235
+   #9  0x00005a6b start_thread() in pthread_create.c:?
+   #10 0x000d389f __clone() in ??:?
+---------------------------------------------------------------------------
+
+AUTHOR
+-------
+*slackspot* was written by Philippe Gerum <rpm@xenomai.org>.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/switchtest.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/switchtest.adoc
new file mode 100644
index 0000000..aa93a29
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/switchtest.adoc
@@ -0,0 +1,106 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for switchtest
+//
+// Copyright (C) 2008 Roland Stigge <stigge@antcom.de>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+SWITCHTEST(1)
+=============
+:doctype: manpage
+:revdate: 2008/04/19
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+switchtest - Xenomai context switch test
+
+SYNOPSIS
+---------
+// The general command line
+*switchtest* [options] threadspec threadspec...
+
+DESCRIPTION
+------------
+*switchtest* is part of Xenomai. It can be used to test thread context
+switches. *switchtest* creates threads of various types and attempts to
+switch context between these threads, printing the count of context switches
+every second. A suitable Xenomai enabled kernel with the respective module
+(xeno_posix) must be installed.
+
+OPTIONS
+--------
+Each threadspec specifies the characteristics of a thread to be created:
+
+threadspec = (rtk|rtup|rtus|rtuo)(_fp|_ufpp|_ufps)\*[0-9]*
+
+*rtk*::
+for a kernel-space real-time thread
+
+*rtup*::
+for a user-space real-time thread running in primary mode
+
+*rtus*::
+for a user-space real-time thread running in secondary mode
+
+*rtuo*::
+for a user-space real-time thread oscillating between primary and secondary mode
+
+*_fp*::
+means that the created thread will have the XNFPU bit armed (only valid for rtk)
+
+*_ufpp*::
+means that the created thread will use the FPU when in primary mode (invalid for rtus)
+
+*_ufps*::
+means that the created thread will use the FPU when in secondary mode (invalid for rtk and rtup)
+
+*[0-9]*::
+specifies the ID of the CPU where the created thread will run, 0 if unspecified
+
+Passing no
+*threadspec*
+is equivalent to running:
+
+switchtest rtkN rtkN rtk_fpN rtk_fpN rtk_fp_ufppN rtk_fp_ufppN rtupN rtupN rtup_ufppN
+rtup_ufppN rtusN rtusN rtus_ufpsN rtus_ufpsN rtuoN rtuoN rtuo_ufppN rtuo_ufppN rtuo_ufpsN
+rtuo_ufpsN rtuo_ufpp_ufpsN rtuo_ufpp_ufpsN
+
+with N=1,...,nr_cpus, i.e. occurrences of all the arguments for each CPU
+
+Passing only the --nofpu or -n argument is equivalent to running:
+
+switchtest rtkN rtkN rtupN rtupN rtusN rtusN rtuoN rtuoN
+
+similar to the above.
+
+*switchtest* accepts the following options:
+
+*--help, -h*::
+print usage information and exit
+
+*--lines <lines>, -l <lines>*::
+print headers every <lines> lines
+
+*--quiet or -q*::
+prevent this program from printing every second the count of ncontext switches
+
+*--really-quiet or -Q*::
+prevent this program from printing any output
+
+*--timeout <duration>, -T <duration>*::
+limit the test duration to <duration> seconds
+
+*--nofpu, -n*::
+disables any use of FPU instructions
+
+AUTHOR
+-------
+*switchtest* was written by Philippe Gerum and Gilles
+Chanteperdrix. This man page was written by
+Roland Stigge.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-config.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-config.adoc
new file mode 100644
index 0000000..81f67ad
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-config.adoc
@@ -0,0 +1,217 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for xeno-config
+//
+// Copyright (C) 2005, 2006 Romain Lenglet <rlenglet@users.forge.objectweb.org>
+// Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+XENO-CONFIG(1)
+==============
+:doctype: manpage
+:revdate: 2014/08/03
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+xeno-config - Retrieve Xenomai build flags and configuration
+
+SYNOPSIS
+---------
+*xeno-config*
+
+*xeno-config* *--v* | *--verbose*
+
+*xeno-config* *--help*
+
+*xeno-config* *--info*
+
+*xeno-config* *--core*
+
+*xeno-config* *--version*
+
+*xeno-config* [*--cc*] [*--ccld*] [*--arch*] [*--prefix*] [*--posix|alchemy|rtdm|psos|vxworks|smokey*] [*--compat*] [*--auto-init*|*no-auto-init*] [*--auto-init-solib*] [*--mode-check*|*no-mode-check*] [*--cflags*] [*--ldflags*] [*--library-dir*|*libdir*|*user-libdir*]
+
+DESCRIPTION
+------------
+*xeno-config* is a shell script which is aimed at retrieving the
+Xenomai build configuration data, such as the compiler and linker
+flags required for building applications. For this reason,
+*xeno-config* is typically used in Makefiles.
+
+*xeno-config --verbose* dumps the build configuration data in a
+human-readable format.
+
+Invoking *xeno-config* without any options is equivalent to running
+*xeno-config --verbose --help*.
+
+OPTIONS
+--------
+*--v, --verbose*::
+Output all configuration information in a human-readable format.
+
+*--help*:: Output the list of available command-line options. The
+command exits immediately after completion.
+
+*--version*::
+Output the Xenomai version.
+
+*--cc*::
+Output the path to the C compiler command used to build the Xenomai
+libraries and utilities, which is therefore suitable for compiling a
+Xenomai application.
+
+*--ccld*::
+Output a C compiler command suitable for linking a Xenomai
+application.
+
+*--arch*::
+Output the target CPU architecture Xenomai was compiled for, e.g. arm,
+x86, powerpc etc.  This may differ from the CPU architecture of the
+current system, if cross-compiling.
+
+*--prefix*::
+Output the absolute path to the Xenomai installation directory.
+
+*--[skin=]{posix, alchemy, rtdm, psos, vxworks, smokey, cobalt}*::
+Select the API/skin for which *xeno-config* should print the
+information required. The *skin=* prefix is optional and may be
+omitted, e.g. *--posix* is equivalent to *--skin=posix*, selecting the
+POSIX API.
+
+[NOTE]
+*--native* and *--skin=native* are accepted for backward compatibility
+purpose. They are stricly equivalent to passing *--alchemy --compat*.
+Likewise, passing *--rtdm* or *--skin=rtdm* is stricly equivalent to
+passing *--posix*, enabling POSIX I/O routines to be wrapped to their
+respective Xenomai implementation.
+
+[CAUTION]
+Over Cobalt, only *xeno-config --posix --ldflags* (or *--rtdm* as an
+alias) returns the proper linker flags to cause POSIX routines invoked
+by the application to be wrapped to their respective Xenomai
+implementation. No other API will imply such wrapping. For this
+reason, *--cobalt --ldflags* should be used for linking exclusively
+against the Cobalt library (i.e. +libcobalt.so+) *without* symbol
+wrapping. Conversely, mentioning *--posix* along with other API
+switches with *--ldflags* will cause POSIX symbol wrapping to take
+place, e.g. use *--posix --alchemy --ldflags* for mixed API support
+with POSIX symbol wrapping.
+
+*--cflags*::
+Output the C compiler command-line options (_CFLAGS_) which are required
+to compile applications based on the selected Xenomai API/skin.
+
+*--ldflags*::
+Output the C compiler command-line options (_LDFLAGS_) which are
+required to link applications based on the selected Xenomai API/skin.
+
+*--library-dir, --libdir, --user-libdir*::
+These switches are synonyms, for retrieving the absolute path to the
+Xenomai libraries.
+
+*--auto-init*::
+*--no-auto-init*::
+
+By default, a process started from an executable linked with flags
+returned by *xeno-config --ldflags* performs Xenomai-related inits
+automatically, before the +main()+ routine is entered.
+
+Building with *--no-auto-init* disables such initialization. In this
+case, the application code shall call the +xenomai_init()+ routine
+manually, as part of its initialization chores on behalf on the
++main()+ routine, *before* any real-time service is invoked. See
++include/xenomai/init.h+.
+
+NOTE: This flag only makes sense when passed along with --ldflags.
+
+*--mode-check*::
+*--no-mode-check*::
+
+Over Cobalt, a set of standard routines which may invoke regular Linux
+system calls can trigger an assertion failure on entry, if the caller
+must leave the real-time mode (aka "secondary mode switch") to execute
+such routine.
+
+The assertion failure is triggered if the calling thread has set the
+PTHREAD_WARNSW flag by a call to +pthread_setmode_np()+.
+
+By default, the mode checking routines are substituted to the original
+ones using the symbol wrapping mechanism also used for interposing on
+POSIX services. *--no-mode-check* disables such substitution.
+
+These flags make sense when passed along with --ldflags only.
+
+*--auto-init-solib*::
+
+This switch enables the auto-initialization feature described above
+for a shared library target instead of a pure executable. The main
+difference resides in a position-independent (PIC) glue code being
+used for bootstrapping the initialization.
+
+The bootstrap code runs when the shared library is attached to a
+running executable, either because it appears in the static
+dependencies of this executable, or when loaded dynamically via the
++dlopen()+ interface.
+
+*--core*::
+Output the name of the real-time core the current Xenomai installation
+was built for. The possible values are _cobalt_ or _mercury_,
+depending on the configuration switch *--with-core* used for building
+the Xenomai libraries and utilities.
+
+*--compat*::
+Enable the Xenomai 2.x compatibility mode for the API/skin
+selected. This switch affects the _Alchemy_ and POSIX APIs, turning on
+a set of source-level compatibility wrappers when present.
+
+*--info*::
+Dump information about the running Xenomai-enabled system. Unlike most
+other options, *--info* is aimed at being used on the target system
+running Xenomai, for retrieving the current setup information. The
+output of such command is a valuable information when reporting any
+runtime issue to mailto:xenomai@xenomai.org[the Xenomai mailing
+list]. The command exits immediately after completion.
+
+ENVIRONMENT VARIABLES
+---------------------
+
+*DESTDIR*::
+
+Xenomai's handling of *DESTDIR* is conformant to the GNU coding and
+installation standards, for generating pathnames rooted at some
+staging area on the build system. Such staging area is commonly
+NFS-mounted from the target system running Xenomai.
+
+If the *DESTDIR* variable is set in the environment of *xeno-config*,
+its contents is prepended to all directory and file names based on the
+Xenomai installation root which may be output by the command.
+
+If *DESTDIR* was set when installing Xenomai - typically after
+cross-compiling - *DESTDIR* must be set to the same value before
+calling *xeno-config* for accessing the target-based directories and
+files from the build system.
+
+e.g.
+
+----------------------------------------------------------------------------
+$ configure --prefix=/usr --includedir=/usr/include/xenomai
+$ make install DESTDIR=/nfsroot/target
+$ DESTDIR=/nfsroot/target /nfsroot/target/bin/xeno-config --alchemy --cflags
+-I/nfsroot/target/usr/include/xenomai/cobalt
+-I/nfsroot/target/usr/include/xenomai -D_GNU_SOURCE
+-D_REENTRANT -D__COBALT__
+-I/nfsroot/target/usr/include/xenomai/alchemy
+----------------------------------------------------------------------------
+
+EXIT STATUS
+-----------
+
+*0*:: Success.
+
+*non-zero*:: Error.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-test.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-test.adoc
new file mode 100644
index 0000000..3c9eb54
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno-test.adoc
@@ -0,0 +1,47 @@
+XENO-TEST(1)
+============
+:doctype: manpage
+:revdata: 2013/08/25
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+xeno-test - Run latency test under load
+
+SYNOPSIS
+---------
+*xeno-test* [ -l loadscript ] [ latency options ]
+
+DESCRIPTION
+------------
+
+*xeno-test* runs a series of test finishing with the latency test run
+with a user script to generate load, in order to measure the best case
+and worst case latencies. The default command used to generate load is
+"dohell 900".
+
+OPTIONS
+--------
+*xeno-test* accepts the following options:
+
+*-l <loadscript>*::
+run <loadscript> while running latency, in order to measure latency
+under load, the link:../dohell/index.html[dohell(1)] script is provided for
+this purpose, see its link:../dohell/index.html[manual page] for more details.
+
+*other options*::
+are passed to the latency test, see link:../latency/index.html[latency(1)] 
+for the list of supported options.
+
+SEE ALSO
+--------
+
+*link:../dohell/index.html[dohell(1)], link:../latency/index.html[latency(1)]*.
+
+EXAMPLE
+--------
+--------------------------------------------------------------------------------
+xeno-test -l "dohell -s 192.168.0.5 -m /mnt -l /ltp" -p 100 -g histo
+--------------------------------------------------------------------------------
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno.adoc b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno.adoc
new file mode 100644
index 0000000..5c5cd7c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/man1/xeno.adoc
@@ -0,0 +1,37 @@
+// ** The above line should force tbl to be a preprocessor **
+// Man page for xeno
+//
+// Copyright (C) 2010 Roland Stigge <stigge@antcom.de>
+//
+// You may distribute under the terms of the GNU General Public
+// License as specified in the file COPYING that comes with the
+// Xenomai distribution.
+//
+//
+CLOCKTEST(1)
+============
+:doctype: manpage
+:revdate: 2010/14/02
+:man source: Xenomai
+:man version: {xenover}
+:man manual: Xenomai Manual
+
+NAME
+-----
+xeno - Wrapper for Xenomai executables
+
+SYNOPSIS
+---------
+xeno [xenomai command]
+
+DESCRIPTION
+------------
+*xeno*
+is a wrapper script that hides distribution-specific installation
+prefixes when running standard Xenomai commands.
+
+AUTHOR
+-------
+The wrapper script xeno and this manpage were initially written by
+Roland Stigge for the Debian project but may be
+used elsewhere.
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.conf b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.conf
new file mode 100644
index 0000000..3b1b6d6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.conf
@@ -0,0 +1,12 @@
+[http-inlinemacro]
+<ulink url="{name}:{target}">{0={name}:{target}}: </ulink>"{name}:{target}"
+[https-inlinemacro]
+<ulink url="{name}:{target}">{0={name}:{target}}: </ulink>"{name}:{target}"
+[ftp-inlinemacro]
+<ulink url="{name}:{target}">{0={name}:{target}}: </ulink>"{name}:{target}"
+[irc-inlinemacro]
+<ulink url="{name}:{target}">{0={name}:{target}}: </ulink>"{name}:{target}"
+[mailto-inlinemacro]
+<ulink url="mailto:{target}">{0={target}}: </ulink>"{target}"
+[callto-inlinemacro]
+<ulink url="{name}:{target}">{0={target}}: </ulink>"{target}"
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.xsl b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.xsl
new file mode 100644
index 0000000..ddfb960
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext.xsl
@@ -0,0 +1,5 @@
+<?xml version='1.0'?>
+<xsl:stylesheet  xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+<xsl:import href="/etc/asciidoc/docbook-xsl/xhtml.xsl"/>
+<xsl:output method="html" encoding="ascii" indent="no"/>
+</xsl:stylesheet>
diff --git a/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext_postproc.awk b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext_postproc.awk
new file mode 100644
index 0000000..a1f2a5a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/asciidoc/plaintext_postproc.awk
@@ -0,0 +1,64 @@
+BEGIN {
+	link_re="\"(http|file|https|ftp|irc|mailto):[^#\"]*$"
+}
+
+/Table of Contents/ {
+	in_toc=1
+	print $0
+	next
+}
+
+in_toc && /^([ \t]*[0-9]\.|$)/ {
+	print $0
+	next
+}
+
+in_toc {
+	in_toc=0
+}
+
+$0 ~ link_re {
+	i = match($0, link_re)
+	print substr($0, 1, i - 1)
+	unfinished_url=substr($0, i)
+	next
+}
+
+unfinished_url && /"/ {
+	sub(/^[ \t]*/,"")
+	print unfinished_url$0
+	unfinished_url=0
+	next
+}
+
+unfinished_url {
+	sub(/^[ \t]*/,"")
+	unfinished_url=unfinished_url$0
+	next
+}
+
+/^[0-9]\.[0-9.]*/ {
+	title=$0
+	next
+}
+
+title && /^[ \t]*$/ {
+	print ""
+	print title
+	gsub(/./, "-", title)
+	print title
+	print $0
+	title=0
+	next
+}
+
+title {
+	print title
+	print $0
+	title=0
+	next
+}
+
+{
+	print $0
+}
diff --git a/kernel/xenomai-v3.2.4/doc/doxygen/Makefile.am b/kernel/xenomai-v3.2.4/doc/doxygen/Makefile.am
new file mode 100644
index 0000000..964dfdf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/doxygen/Makefile.am
@@ -0,0 +1,43 @@
+HTML_DOCS = html/xeno3prm html/xeno3prm/search
+PDF_DOCS  = xeno3prm.pdf
+EXTRA_DIST = xeno3prm-common.conf.in xeno3prm-html.conf.in xeno3prm-latex.conf.in
+
+if XENO_BUILD_DOC
+
+HTML_DOCSDIR = ./
+PDF_DOCSDIR = ./
+
+all-local: html pdf
+
+html/xeno3prm/search: html/xeno3prm
+
+html: $(HTML_DOCS)
+
+pdf: $(PDF_DOCS)
+
+html/xeno3prm latex/xeno3prm: FORCE
+	@mkdir -p $@
+	$(DOXYGEN) $(@F)-$(@D).conf
+
+%.pdf: latex/%
+	$(MAKE) -C $< refman.pdf
+	mv $</refman.pdf $@
+
+distclean-local:
+	for dir in *-html *-latex; do \
+	    if test -d $$dir ; then $(RM) -R $$dir ; fi ; \
+	done
+
+.PHONY: FORCE
+
+.DELETE_ON_ERROR:
+
+include $(top_srcdir)/doc/install.rules
+
+install-data-local: install-docs-local
+uninstall-local: uninstall-docs
+
+else
+install-data-local:
+uninstall-local:
+endif
diff --git a/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-common.conf.in b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-common.conf.in
new file mode 100644
index 0000000..cd77650
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-common.conf.in
@@ -0,0 +1,879 @@
+# Doxyfile 1.3.4
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME           = "Xenomai"
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. 
+# This could be handy for archiving the generated documentation or 
+# if some version control system is used.
+
+PROJECT_NUMBER         = @PACKAGE_VERSION@
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) 
+# base path where the generated documentation will be put. 
+# If a relative path is entered, it will be relative to the location 
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = .
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all 
+# documentation generated by doxygen is written. Doxygen will use this 
+# information to generate all constant output in the proper language. 
+# The default language is English, other supported languages are: 
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, 
+# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en 
+# (Japanese with English messages), Korean, Norwegian, Polish, Portuguese, 
+# Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE        = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will 
+# include brief member descriptions after the members that are listed in 
+# the file and class documentation (similar to JavaDoc). 
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend 
+# the brief description of a member or function before the detailed description. 
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the 
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF           = YES
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then 
+# Doxygen will generate a detailed section even if there is only a brief 
+# description.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited 
+# members of a class in the documentation of that class as if those members were 
+# ordinary class members. Constructors, destructors and assignment operators of 
+# the base classes will not be shown.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full 
+# path before files name in the file list and in the header files. If set 
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES        = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag 
+# can be used to strip a user-defined part of the path. Stripping is 
+# only done if one of the specified strings matches the left-hand part of 
+# the path. It is allowed to use relative paths in the argument list.
+
+STRIP_FROM_PATH        = @top_srcdir@/
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter 
+# (but less readable) file names. This can be useful is your file systems 
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
+# will interpret the first line (until the first dot) of a JavaDoc-style 
+# comment as the brief description. If set to NO, the JavaDoc 
+# comments will behave just like the Qt-style comments (thus requiring an 
+# explict @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF      = YES
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen 
+# treat a multi-line C++ special comment block (i.e. a block of //! or /// 
+# comments) as a brief description. This used to be the default behaviour. 
+# The new default is to treat a multi-line C++ comment block as a detailed 
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented 
+# member inherits the documentation from any documented member that it 
+# reimplements.
+
+INHERIT_DOCS           = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC 
+# tag is set to YES, then doxygen will reuse the documentation of the first 
+# member in the group (if any) for the other members of the group. By default 
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC   = YES
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. 
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE               = 8
+
+# This tag can be used to specify a number of aliases that acts 
+# as commands in the documentation. An alias has the form "name=value". 
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to 
+# put the command \sideeffect (or @sideeffect) in the documentation, which 
+# will result in a user-defined paragraph with heading "Side Effects:". 
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                = \
+ 		       "coretags{1}=@par Tags^^@ref cobalt-core-tags \"\1\""	\
+ 		       "apitags{1}=@par Tags^^@ref api-tags \"\1\""	\
+		       "sideeffect=@par Side effects^^"
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources 
+# only. Doxygen will then generate output that is more tailored for C. 
+# For instance, some of the names that are used will be different. The list 
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C  = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources 
+# only. Doxygen will then generate output that is more tailored for Java. 
+# For instance, namespaces will be presented as packages, qualified scopes 
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of 
+# the same type (for instance a group of public functions) to be put as a 
+# subgroup of that type (e.g. under the Public Functions section). Set it to 
+# NO to prevent subgrouping. Alternatively, this can be done per class using 
+# the \nosubgrouping command.
+
+SUBGROUPING            = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in 
+# documentation are documented, even if no documentation was available. 
+# Private class members and static file members will be hidden unless 
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL            = NO
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class 
+# will be included in the documentation.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file 
+# will be included in the documentation.
+
+EXTRACT_STATIC         = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) 
+# defined locally in source files will be included in the documentation. 
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES  = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all 
+# undocumented members of documented classes, files or namespaces. 
+# If set to NO (the default) these members will be included in the 
+# various overviews, but no documentation section is generated. 
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS     = YES
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all 
+# undocumented classes that are normally visible in the class hierarchy. 
+# If set to NO (the default) these classes will be included in the various 
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES     = YES
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all 
+# friend (class|struct|union) declarations. 
+# If set to NO (the default) these declarations will be included in the 
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any 
+# documentation blocks found inside the body of a function. 
+# If set to NO (the default) these blocks will be appended to the 
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation 
+# that is typed after a \internal command is included. If the tag is set 
+# to NO (the default) then the documentation will be excluded. 
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS          = YES
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate 
+# file names in lower-case letters. If set to YES upper-case letters are also 
+# allowed. This is useful if you have classes or files whose names only differ 
+# in case and if your file system supports case sensitive file names. Windows 
+# users are advised to set this option to NO.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen 
+# will show members with their full class and namespace scopes in the 
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen 
+# will put a list of the files that are included by a file in the documentation 
+# of that file.
+
+SHOW_INCLUDE_FILES     = @DOXYGEN_SHOW_INCLUDE_FILES@
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] 
+# is inserted in the documentation for inline members.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen 
+# will sort the (detailed) documentation of file and class members 
+# alphabetically by member name. If set to NO the members will appear in 
+# declaration order.
+
+SORT_MEMBER_DOCS       = YES
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or 
+# disable (NO) the todo list. This list is created by putting \todo 
+# commands in the documentation.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or 
+# disable (NO) the test list. This list is created by putting \test 
+# commands in the documentation.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or 
+# disable (NO) the bug list. This list is created by putting \bug 
+# commands in the documentation.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or 
+# disable (NO) the deprecated list. This list is created by putting 
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional 
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       = 
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines 
+# the initial value of a variable or define consists of for it to appear in 
+# the documentation. If the initializer consists of more lines than specified 
+# here it will be hidden. Use a value of 0 to hide initializers completely. 
+# The appearance of the initializer of individual variables and defines in the 
+# documentation can be controlled using \showinitializer or \hideinitializer 
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated 
+# at the bottom of the documentation of classes and structs. If set to YES the 
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES        = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated 
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET                  = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are 
+# generated by doxygen. Possible values are YES and NO. If left blank 
+# NO is used.
+
+WARNINGS               = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings 
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will 
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED   = NO
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for 
+# potential errors in the documentation, such as not documenting some 
+# parameters in a documented function, or documenting parameters that 
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR      = YES
+
+# The WARN_FORMAT tag determines the format of the warning messages that 
+# doxygen can produce. The string should contain the $file, $line, and $text 
+# tags, which will be replaced by the file and line number from which the 
+# warning originated and the warning text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning 
+# and error messages should be written. If left blank the output is written 
+# to stderr.
+
+WARN_LOGFILE           = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# If the value of the INPUT tag contains directories, you can use the 
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank the following patterns are tested: 
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp 
+# *.h++ *.idl *.odl *.cs *.php *.php3 *.inc
+
+FILE_PATTERNS          = *.c \
+                         *.h \
+                         *.dox
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories 
+# should be searched for input files as well. Possible values are YES and NO. 
+# If left blank NO is used.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should 
+# excluded from the INPUT source files. This way you can easily exclude a 
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE                =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories 
+# that are symbolic links (a Unix filesystem feature) are excluded from the input.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the 
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude 
+# certain files from those directories.
+
+EXCLUDE_PATTERNS       = 
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the 
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank all files are included.
+
+EXAMPLE_PATTERNS       = *.c
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be 
+# searched for input files to be used with the \include or \dontinclude 
+# commands irrespective of the value of the RECURSIVE tag. 
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE      = YES
+
+# The IMAGE_PATH tag can be used to specify one or more files or 
+# directories that contain image that are included in the documentation (see 
+# the \image command).
+
+IMAGE_PATH             = 
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should 
+# invoke to filter for each input file. Doxygen will invoke the filter program 
+# by executing (via popen()) the command <filter> <input-file>, where <filter> 
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an 
+# input file. Doxygen will then use the output that the filter program writes 
+# to standard output.
+
+INPUT_FILTER           =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using 
+# INPUT_FILTER) will be used to filter the input files when producing source 
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES    = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will 
+# be generated. Documented entities will be cross-referenced with these sources.
+
+SOURCE_BROWSER         = NO
+
+# Setting the INLINE_SOURCES tag to YES will include the body 
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES         = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct 
+# doxygen to hide any special comment blocks from generated source code 
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default) 
+# then for each documented function all documented 
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default) 
+# then for each documented function all documented entities 
+# called/used by that function will be listed.
+
+REFERENCES_RELATION    = YES
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen 
+# will generate a verbatim copy of the header file for each class for 
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index 
+# of all compounds will be generated. Enable this if the project 
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX     = NO
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then 
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns 
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all 
+# classes will be put under the same header in the alphabetical index. 
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that 
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX          = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output 
+# The RTF output is optimised for Word 97 and may not look very pretty with 
+# other RTF readers or editors.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact 
+# RTF documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated 
+# will contain hyperlink fields. The RTF file will 
+# contain links (just like the HTML output) instead of page references. 
+# This makes the output suitable for online browsing using WORD or other 
+# programs which support those fields. 
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's 
+# config file, i.e. a series of assigments. You only have to provide 
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE    = 
+
+# Set optional variables used in the generation of an rtf document. 
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE    = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
+# generate man pages
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to 
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
+# then it will generate one additional man file for each entity 
+# documented in the real man page(s). These additional files 
+# only source the real man page, but without them the man command 
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will 
+# generate an XML file that captures the structure of 
+# the code including all documentation. Note that this 
+# feature is still experimental and incomplete at the 
+# moment.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT             = xml
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will 
+# generate an AutoGen Definitions (see autogen.sf.net) file 
+# that captures the structure of the code including all 
+# documentation. Note that this feature is still experimental 
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will 
+# generate a Perl module file that captures the structure of 
+# the code including all documentation. Note that this 
+# feature is still experimental and incomplete at the 
+# moment.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate 
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able 
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be 
+# nicely formatted so it can be parsed by a human reader.  This is useful 
+# if you want to understand what is going on.  On the other hand, if this 
+# tag is set to NO the size of the Perl module output will be much smaller 
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file 
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. 
+# This is useful so different doxyrules.make files included by the same 
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX = 
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
+# evaluate all C-preprocessor directives found in the sources and include 
+# files.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
+# names in the source code. If set to NO (the default) only conditional 
+# compilation will be performed. Macro expansion can be done in a controlled 
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION        = YES
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
+# then the macro expansion is limited to the macros specified with the 
+# PREDEFINED and EXPAND_AS_PREDEFINED tags.
+
+EXPAND_ONLY_PREDEF     = YES
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES        = YES
+
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
+# patterns (like *.h and *.hpp) to filter out the header-files in the 
+# directories. If left blank, the patterns specified with FILE_PATTERNS will 
+# be used.
+
+INCLUDE_FILE_PATTERNS  = 
+
+# The PREDEFINED tag can be used to specify one or more macro names that 
+# are defined before the preprocessor is started (similar to the -D option of 
+# gcc). The argument of the tag is a list of macros of the form: name 
+# or name=definition (no spaces). If the definition and the = are 
+# omitted =1 is assumed.
+
+PREDEFINED = DOXYGEN_CPP			\
+	CONFIG_SMP				\
+        "dref_type(T)=opaque"			\
+        "EXPORT_SYMBOL_GPL(symbol)=//"		\
+        "DECLARE_BITMAP(symbol, nr)=unsigned long symbol[BITS_TO_LONGS(nr)]" \
+	"COBALT_IMPL(T,I,A)=T I A"		\
+	"COBALT_DECL(T,P)=T P"			\
+	"COBALT_SYSCALL(N,M,T,A)=T N A"		\
+	"COBALT_SYSCALL_DECL(N,T,A)=T N A"
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then 
+# this tag can be used to specify a list of macro names that should be expanded. 
+# The macro definition that is found in the sources will be used. 
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED      =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
+# doxygen's preprocessor will remove all function-like macros that are alone 
+# on a line, have an all uppercase name, and do not end with a semicolon. Such 
+# function macros are typically used for boiler-plate code, and will confuse the 
+# parser if not removed.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration::addtions related to external references   
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. 
+# Optionally an initial location of the external documentation 
+# can be added for each tagfile. The format of a tag file without 
+# this location is as follows: 
+#   TAGFILES = file1 file2 ... 
+# Adding location for the tag files is done as follows: 
+#   TAGFILES = file1=loc1 "file2 = loc2" ... 
+# where "loc1" and "loc2" can be relative or absolute paths or 
+# URLs. If a location is present for each tag, the installdox tool 
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen 
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES               = 
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create 
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE       = 
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed 
+# in the class index. If set to NO only the inherited external classes 
+# will be listed.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed 
+# in the modules index. If set to NO, only the current project's groups will 
+# be listed.
+
+EXTERNAL_GROUPS        = YES
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will 
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base or 
+# super classes. Setting the tag to NO turns the diagrams off. Note that this 
+# option is superceded by the HAVE_DOT option below. This is only a fallback. It is 
+# recommended to install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS         = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide 
+# inheritance and usage relations if the target is undocumented 
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is 
+# available from the path. This tool is part of Graphviz, a graph visualization 
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section 
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT               = @DOXYGEN_HAVE_DOT@
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect inheritance relations. Setting this tag to YES will force the 
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect implementation dependencies (inheritance, containment, and 
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH    = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and 
+# collaboration diagrams in a style similiar to the OMG's Unified Modeling 
+# Language.
+
+UML_LOOK               = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the 
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS     = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT 
+# tags are set to YES then doxygen will generate a graph for each documented 
+# file showing the direct and indirect include dependencies of the file with 
+# other documented files.
+
+INCLUDE_GRAPH          = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and 
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each 
+# documented header file showing the documented files that directly or 
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will 
+# generate a call dependency graph for every global function or class method. 
+# Note that enabling this option will significantly increase the time of a run. 
+# So in most cases it will be better to enable call graphs for selected 
+# functions only using the \callgraph command.
+
+CALL_GRAPH             = NO
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen 
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images 
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT       = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be 
+# found. If left blank, it is assumed the dot tool can be found on the path.
+
+DOT_PATH               = 
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that 
+# contain dot files that are included in the documentation (see the 
+# \dotfile command).
+
+DOTFILE_DIRS           = 
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the 
+# graphs generated by dot. A depth value of 3 means that only nodes reachable 
+# from the root by following a path via at most 3 edges will be shown. Nodes 
+# that lay further from the root node will be omitted. Note that setting this 
+# option to 1 or 2 may greatly reduce the computation time needed for large 
+# code bases. Also note that a graph may be further truncated if the graph's 
+# image dimensions are not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH 
+# and MAX_DOT_GRAPH_HEIGHT). If 0 is used for the depth value (the default), 
+# the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent 
+# background. This is disabled by default, which results in a white background. 
+# Warning: Depending on the platform used, enabling this option may lead to 
+# badly anti-aliased labels on the edges of a graph (i.e. they become hard to 
+# read).
+
+DOT_TRANSPARENT        = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output 
+# files in one run (i.e. multiple -o and -T options on the command line). This 
+# makes dot run faster, but since only newer versions of dot (>1.8.10) 
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS      = YES
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will 
+# generate a legend page explaining the meaning of the various boxes and 
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will 
+# remove the intermediate dot files that are used to generate 
+# the various graphs.
+
+DOT_CLEANUP            = YES
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT                  =						\
+ 		       @top_srcdir@/include				\
+ 		       @top_srcdir@/kernel/cobalt			\
+ 		       @top_srcdir@/kernel/drivers			\
+ 		       @top_srcdir@/lib/cobalt				\
+ 		       @top_srcdir@/lib/copperplate			\
+ 		       @top_srcdir@/lib/smokey				\
+ 		       @top_srcdir@/lib/analogy				\
+ 		       @top_srcdir@/lib/alchemy				\
+ 		       @top_srcdir@/lib/vxworks				\
+ 		       @top_srcdir@/lib/psos				\
+ 		       @top_srcdir@/lib/trank
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH           =				\
+		       @top_srcdir@/demo/posix		\
+		       @top_srcdir@/demo/alchemy	\
+		       @top_srcdir@/utils
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH           = @top_srcdir@/include		\
+		       	 @top_srcdir@/include/cobalt	\
+		       	 @top_srcdir@/include/mercury
diff --git a/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-html.conf.in b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-html.conf.in
new file mode 100644
index 0000000..3936a13
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-html.conf.in
@@ -0,0 +1,196 @@
+@INCLUDE = xeno3prm-common.conf
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT            = html/xeno3prm
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet
+
+HTML_STYLESHEET        =
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output dir.
+
+CHM_FILE               =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION           =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND             = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX          = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW      = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH         = 250
+
+#---------------------------------------------------------------------------
+# Configuration::addtions related to the search engine
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE           = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvances is that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH    = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         = pxfonts
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = @LATEX_BATCHMODE@
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES     = NO
diff --git a/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-latex.conf.in b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-latex.conf.in
new file mode 100644
index 0000000..6a25bf7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/doxygen/xeno3prm-latex.conf.in
@@ -0,0 +1,186 @@
+@INCLUDE = xeno3prm-common.conf
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML          = NO
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT            =
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER            =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER            =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet
+
+HTML_STYLESHEET        =
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output dir.
+
+CHM_FILE               =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION           =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND             = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX          = NO
+
+# This tag can be used to set the number of enum values (range [1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+,
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW      = YES
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH         = 250
+
+#---------------------------------------------------------------------------
+# Configuration::addtions related to the search engine  
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE           = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX         = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex/xeno3prm
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, a4wide, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         = pxfonts
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = @LATEX_BATCHMODE@
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES     = NO
diff --git a/kernel/xenomai-v3.2.4/doc/gitdoc/Makefile.am b/kernel/xenomai-v3.2.4/doc/gitdoc/Makefile.am
new file mode 100644
index 0000000..d1f605c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/gitdoc/Makefile.am
@@ -0,0 +1,43 @@
+if XENO_BUILD_DOC
+git-src-check: FORCE
+	@if test \! -e $(top_srcdir)/.git ; then \
+		echo "$@ wants $top_srcdir to be a GIT working tree." ; \
+		/bin/false; \
+	fi
+else
+git-src-check:
+endif
+
+INPUT_DOCS = 							\
+	asciidoc/pages/Installing_Xenomai_3.x.adoc		\
+	asciidoc/pages/Running_Apps_with_Xenomai_3.x.adoc	\
+	asciidoc/pages/Migrating_to_Xenomai_3.x.adoc		\
+	asciidoc/pages/Troubleshooting_dual_kernel.adoc		\
+	asciidoc/pages/Troubleshooting_single_kernel.adoc
+
+OUTPUT_DOCS = 						\
+	doc/asciidoc/README.INSTALL.adoc		\
+	doc/asciidoc/README.APPLICATIONS.adoc		\
+	doc/asciidoc/MIGRATION.adoc			\
+	doc/asciidoc/TROUBLESHOOTING.COBALT.adoc	\
+	doc/asciidoc/TROUBLESHOOTING.MERCURY.adoc
+
+all-local: git-src-check
+	@set -e; if test \! x$(XENO_DOC_GIT) = x; then	\
+		if test -d doc.git; then \
+			(cd doc.git && git pull --quiet --force); \
+		else \
+			git clone --branch master --depth 1 --quiet \
+			$(XENO_DOC_GIT) doc.git; \
+		fi; \
+		set -- $(OUTPUT_DOCS); \
+		for doc in $(INPUT_DOCS); do \
+			cp doc.git/$$doc $(top_srcdir)/$$1; \
+			shift; \
+		done; \
+	fi
+
+clean-local:
+	$(RM) -R doc.git
+
+.PHONY: FORCE
diff --git a/kernel/xenomai-v3.2.4/doc/install.rules b/kernel/xenomai-v3.2.4/doc/install.rules
new file mode 100644
index 0000000..10eaed0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/doc/install.rules
@@ -0,0 +1,56 @@
+# -*- makefile -*-
+# Generic rules for installation and distribution of documentation.
+#
+# Parameters :
+# HTML_DOCS list of html documentation directories
+# PDF_DOCS list of pdf files
+# MAN1_DOCS list of man1 files
+#
+# HTML_DOCSDIR: root of generated HTML files
+# PDF_DOCSDIR: root of generated PDF files
+# MAN_DOCSDIR: root of generated manN sub-directories
+
+install-htmldocs: $(HTML_DOCS:%=$(HTML_DOCSDIR)%)
+	docs="$(HTML_DOCS)"; abs_builddir=$$PWD; \
+	for dir in $$docs; do \
+	    dest=$(DESTDIR)$(htmldir)/$$dir; \
+	    $(mkinstalldirs) $$dest; \
+	    abs_dest=`cd $$dest && pwd` ; \
+	    cd $(HTML_DOCSDIR)$$dir || exit 1; \
+	    for f in * ; do \
+		case $$f in \
+		    *~|CVS|.svn|[mM]akefile*|GNUmakefile*);; \
+		    *) $(INSTALL_DATA) $$f $$abs_dest/$$f;; \
+		esac; \
+	    done; cd $$abs_builddir; \
+	done
+
+install-pdfdocs: $(PDF_DOCS:%=$(PDF_DOCSDIR)%)
+	docs="$^"; dest=$(DESTDIR)$(pdfdir); \
+	$(mkinstalldirs) $$dest && \
+	for f in $$docs; do \
+	    $(INSTALL_DATA) $$f $$dest; \
+	done
+
+install-man1: $(MAN1_DOCS:%=$(MAN_DOCSDIR)%)
+	docs="$^"; dest=$(DESTDIR)$(mandir)/man1; \
+	$(mkinstalldirs) $$dest && \
+	for f in $$docs; do \
+	    $(INSTALL_DATA) $$f $$dest; \
+	done
+
+install-mandocs: install-man1
+
+install-docs-local: install-htmldocs install-pdfdocs install-mandocs
+
+# To make distcheck happy.
+uninstall-docs:
+	if test -n "$(HTML_DOCS)" -o -n "$(PDF_DOCS)" -o -n "$(MAN1_DOCS)"; then \
+	    targets="$(HTML_DOCS:%=$(DESTDIR)$(htmldir)/%) \
+		$(PDF_DOCS:%=$(DESTDIR)$(pdfdir)/%) \
+		$(MAN1_DOCS:%=$(DESTDIR)$(mandir)/%)"; \
+	    for t in $$targets; do \
+		if test -d $$t; then $(RM) $$t/*; $(RM) -r $$t; \
+		else $(RM) $$t; fi; \
+	    done; \
+	fi
diff --git a/kernel/xenomai-v3.2.4/include/COPYING b/kernel/xenomai-v3.2.4/include/COPYING
new file mode 100644
index 0000000..e6afb50
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/COPYING
@@ -0,0 +1,305 @@
+
+As a special exception to the following license, the Xenomai
+project gives permission for additional uses of the header files
+contained in this directory.
+
+The exception is that, if you include these header files unmodified to
+produce application programs executing in user-space that use
+Xenomai services by normal Xenomai system calls, this does not
+by itself cause the resulting executable to be covered by the GNU
+General Public License. This is merely considered normal use of the
+Xenomai system, and does not fall under the heading of "derived
+work".
+
+This exception does not however invalidate any other reasons why the
+executable file might be covered by the GNU General Public License. In
+any case, this exception never applies when the application code is
+built as a static or dynamically loadable portion of the Linux kernel.
+
+This exception applies only to the code released by the Xenomai
+project under the name Xenomai and bearing this exception notice.
+If you copy code from other sources into a copy of Xenomai, the
+exception does not apply to the code that you add in this way.
+
+----------------------------------------------------------------------
+
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/include/Makefile.am b/kernel/xenomai-v3.2.4/include/Makefile.am
new file mode 100644
index 0000000..1e9fe02
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/Makefile.am
@@ -0,0 +1,31 @@
+nodist_include_HEADERS=$(CONFIG_HEADER)
+
+SUBDIRS = 		\
+	boilerplate	\
+	copperplate	\
+	smokey		\
+	alchemy		\
+	psos 		\
+	rtdm		\
+	trank		\
+	vxworks		\
+	xenomai
+
+if XENO_COBALT
+SUBDIRS += cobalt
+else
+SUBDIRS += mercury
+endif
+
+DIST_SUBDIRS = 		\
+	alchemy		\
+	boilerplate	\
+	cobalt 		\
+	copperplate	\
+	mercury		\
+	psos 		\
+	rtdm		\
+	smokey		\
+	trank		\
+	vxworks		\
+	xenomai
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/Makefile.am b/kernel/xenomai-v3.2.4/include/alchemy/Makefile.am
new file mode 100644
index 0000000..8cbc3b8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/Makefile.am
@@ -0,0 +1,15 @@
+includesubdir = $(includedir)/alchemy
+
+includesub_HEADERS =	\
+	alarm.h		\
+	buffer.h	\
+	compat.h	\
+	cond.h		\
+	event.h		\
+	heap.h		\
+	mutex.h		\
+	pipe.h		\
+	queue.h		\
+	sem.h		\
+	task.h		\
+	timer.h
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/alarm.h b/kernel/xenomai-v3.2.4/include/alchemy/alarm.h
new file mode 100644
index 0000000..b57197a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/alarm.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_ALARM_H
+#define _XENOMAI_ALCHEMY_ALARM_H
+
+#include <stdint.h>
+#include <alchemy/timer.h>
+#include <alchemy/compat.h>
+
+/**
+ * @addtogroup alchemy_alarm
+ * @{
+ */
+
+struct RT_ALARM {
+	uintptr_t handle;
+};
+
+typedef struct RT_ALARM RT_ALARM;
+
+/**
+ * @brief Alarm status descriptor
+ * @anchor RT_ALARM_INFO
+ *
+ * This structure reports various static and runtime information about
+ * a real-time alarm, returned by a call to rt_alarm_inquire().
+ */
+struct RT_ALARM_INFO {
+	/**
+	 * Number of past expiries.
+	 */
+	unsigned long expiries;
+	/**
+	 * Name of alarm object.
+	 */
+	char name[XNOBJECT_NAME_LEN];
+	/**
+	 * Active flag.
+	 */
+	int active;
+};
+
+typedef struct RT_ALARM_INFO RT_ALARM_INFO;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+CURRENT_DECL(int, rt_alarm_create(RT_ALARM *alarm,
+				  const char *name,
+				  void (*handler)(void *arg),
+				  void *arg));
+
+CURRENT_DECL(int, rt_alarm_delete(RT_ALARM *alarm));
+
+int rt_alarm_start(RT_ALARM *alarm,
+		   RTIME value,
+		   RTIME interval);
+
+int rt_alarm_stop(RT_ALARM *alarm);
+
+int rt_alarm_inquire(RT_ALARM *alarm,
+		     RT_ALARM_INFO *info);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_ALARM_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/buffer.h b/kernel/xenomai-v3.2.4/include/alchemy/buffer.h
new file mode 100644
index 0000000..9c0c4e6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/buffer.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_BUFFER_H
+#define _XENOMAI_ALCHEMY_BUFFER_H
+
+#include <stdint.h>
+#include <alchemy/timer.h>
+
+/**
+ * @addtogroup alchemy_buffer
+ * @{
+ */
+
+/** Creation flags. */
+#define B_PRIO  0x1	/* Pend by task priority order. */
+#define B_FIFO  0x0	/* Pend by FIFO order. */
+
+struct RT_BUFFER {
+	uintptr_t handle;
+};
+
+typedef struct RT_BUFFER RT_BUFFER;
+
+/**
+ * @brief Buffer status descriptor
+ * @anchor RT_BUFFER_INFO
+ *
+ * This structure reports various static and runtime information about
+ * a real-time buffer, returned by a call to rt_buffer_inquire().
+ */
+struct RT_BUFFER_INFO {
+	/**
+	 * Number of tasks waiting on the read side of the buffer for
+	 * input data.
+	 */
+	int iwaiters;
+	/**
+	 * Number of tasks waiting on the write side of the buffer for
+	 * sending out data.
+	 */
+	int owaiters;
+	/**
+	 * Overall size of buffer (in bytes).
+	 */
+	size_t totalmem;
+	/**
+	 * Amount of memory currently available for holding more data.
+	 */
+	size_t availmem;
+	/**
+	 * Name of the buffer.
+	 */
+	char name[XNOBJECT_NAME_LEN];
+};
+
+typedef struct RT_BUFFER_INFO RT_BUFFER_INFO;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int rt_buffer_create(RT_BUFFER *bf,
+		     const char *name,
+		     size_t bufsz,
+		     int mode);
+
+int rt_buffer_delete(RT_BUFFER *bf);
+
+ssize_t rt_buffer_write_timed(RT_BUFFER *bf,
+			      const void *ptr, size_t size,
+			      const struct timespec *abs_timeout);
+
+static inline
+ssize_t rt_buffer_write_until(RT_BUFFER *bf,
+			      const void *ptr, size_t size,
+			      RTIME timeout)
+{
+	struct timespec ts;
+	return rt_buffer_write_timed(bf, ptr, size,
+				     alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline
+ssize_t rt_buffer_write(RT_BUFFER *bf,
+			const void *ptr, size_t size,
+			RTIME timeout)
+{
+	struct timespec ts;
+	return rt_buffer_write_timed(bf, ptr, size,
+				     alchemy_rel_timeout(timeout, &ts));
+}
+
+ssize_t rt_buffer_read_timed(RT_BUFFER *bf,
+			     void *ptr, size_t size,
+			     const struct timespec *abs_timeout);
+
+static inline
+ssize_t rt_buffer_read_until(RT_BUFFER *bf,
+			     void *ptr, size_t size,
+			     RTIME timeout)
+{
+	struct timespec ts;
+	return rt_buffer_read_timed(bf, ptr, size,
+				    alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline
+ssize_t rt_buffer_read(RT_BUFFER *bf,
+		       void *ptr, size_t size,
+		       RTIME timeout)
+{
+	struct timespec ts;
+	return rt_buffer_read_timed(bf, ptr, size,
+				    alchemy_rel_timeout(timeout, &ts));
+}
+
+int rt_buffer_clear(RT_BUFFER *bf);
+
+int rt_buffer_inquire(RT_BUFFER *bf,
+		      RT_BUFFER_INFO *info);
+
+int rt_buffer_bind(RT_BUFFER *bf,
+		   const char *name, RTIME timeout);
+
+int rt_buffer_unbind(RT_BUFFER *bf);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_BUFFER_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/compat.h b/kernel/xenomai-v3.2.4/include/alchemy/compat.h
new file mode 100644
index 0000000..0113879
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/compat.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_COMPAT_H
+#define _XENOMAI_ALCHEMY_COMPAT_H
+
+#include <trank/trank.h>
+
+#endif /* _XENOMAI_ALCHEMY_COMPAT_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/cond.h b/kernel/xenomai-v3.2.4/include/alchemy/cond.h
new file mode 100644
index 0000000..7043179
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/cond.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_COND_H
+#define _XENOMAI_ALCHEMY_COND_H
+
+#include <stdint.h>
+#include <alchemy/timer.h>
+#include <alchemy/mutex.h>
+
+/**
+ * @addtogroup alchemy_cond
+ * @{
+ */
+
+struct RT_COND {
+	uintptr_t handle;
+};
+
+typedef struct RT_COND RT_COND;
+
+/**
+ * @brief Condition variable status descriptor
+ * @anchor RT_CONF_INFO
+ *
+ * This structure reports various static and runtime information about
+ * a condition variable, returned by a call to rt_cond_inquire().
+ */
+struct RT_COND_INFO {
+	/**
+	 * Name of condition variable.
+	 */
+	char name[XNOBJECT_NAME_LEN];
+};
+
+typedef struct RT_COND_INFO RT_COND_INFO;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int rt_cond_create(RT_COND *cond,
+		   const char *name);
+
+int rt_cond_delete(RT_COND *cond);
+
+int rt_cond_signal(RT_COND *cond);
+
+int rt_cond_broadcast(RT_COND *cond);
+
+int rt_cond_wait_timed(RT_COND *cond,
+		       RT_MUTEX *mutex,
+		       const struct timespec *abs_timeout);
+static inline
+int rt_cond_wait_until(RT_COND *cond, RT_MUTEX *mutex, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_cond_wait_timed(cond, mutex,
+				  alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline
+int rt_cond_wait(RT_COND *cond, RT_MUTEX *mutex, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_cond_wait_timed(cond, mutex,
+				  alchemy_rel_timeout(timeout, &ts));
+}
+
+int rt_cond_inquire(RT_COND *cond,
+		    RT_COND_INFO *info);
+
+int rt_cond_bind(RT_COND *cond,
+		 const char *name, RTIME timeout);
+
+int rt_cond_unbind(RT_COND *cond);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_COND_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/event.h b/kernel/xenomai-v3.2.4/include/alchemy/event.h
new file mode 100644
index 0000000..1e8cb4d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/event.h
@@ -0,0 +1,130 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_EVENT_H
+#define _XENOMAI_ALCHEMY_EVENT_H
+
+#include <stdint.h>
+#include <alchemy/timer.h>
+#include <alchemy/compat.h>
+
+/**
+ * @addtogroup alchemy_event
+ * @{
+ */
+
+/** Creation flags. */
+#define EV_PRIO  0x1	/* Pend by task priority order. */
+#define EV_FIFO  0x0	/* Pend by FIFO order. */
+
+/** Operation flags. */
+#define EV_ANY  0x1	/* Disjunctive wait. */
+#define EV_ALL  0x0	/* Conjunctive wait. */
+
+struct RT_EVENT {
+	uintptr_t handle;
+};
+
+typedef struct RT_EVENT RT_EVENT;
+
+/**
+ * @brief Event status descriptor
+ * @anchor RT_EVENT_INFO
+ *
+ * This structure reports various static and runtime information about
+ * an event flag group, returned by a call to rt_event_inquire().
+ */
+struct RT_EVENT_INFO {
+	/**
+	 * Current value of the event flag group.
+	 */
+	unsigned int value;
+	/**
+	 * Number of tasks currently waiting for events.
+	 */
+	int nwaiters;
+	/**
+	 * Name of event flag group.
+	 */
+	char name[XNOBJECT_NAME_LEN];
+};
+
+typedef struct RT_EVENT_INFO RT_EVENT_INFO;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+CURRENT_DECL(int, rt_event_create(RT_EVENT *event,
+				  const char *name,
+				  unsigned int ivalue,
+				  int mode));
+
+int rt_event_delete(RT_EVENT *event);
+
+CURRENT_DECL(int, rt_event_signal(RT_EVENT *event,
+				  unsigned int mask));
+
+int rt_event_wait_timed(RT_EVENT *event,
+			unsigned int mask,
+			unsigned int *mask_r,
+			int mode,
+			const struct timespec *abs_timeout);
+
+#ifndef __XENO_COMPAT__
+
+static inline
+int rt_event_wait_until(RT_EVENT *event,
+			unsigned int mask, unsigned int *mask_r,
+			int mode, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_event_wait_timed(event, mask, mask_r, mode,
+				   alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline
+int rt_event_wait(RT_EVENT *event,
+		  unsigned int mask, unsigned int *mask_r,
+		  int mode, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_event_wait_timed(event, mask, mask_r, mode,
+				   alchemy_rel_timeout(timeout, &ts));
+}
+
+#endif	/* !__XENO_COMPAT__ */
+
+CURRENT_DECL(int, rt_event_clear(RT_EVENT *event,
+				 unsigned int mask,
+				 unsigned int *mask_r));
+
+int rt_event_inquire(RT_EVENT *event,
+		     RT_EVENT_INFO *info);
+
+int rt_event_bind(RT_EVENT *event,
+		  const char *name, RTIME timeout);
+
+int rt_event_unbind(RT_EVENT *event);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_EVENT_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/heap.h b/kernel/xenomai-v3.2.4/include/alchemy/heap.h
new file mode 100644
index 0000000..ade2f47
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/heap.h
@@ -0,0 +1,137 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_HEAP_H
+#define _XENOMAI_ALCHEMY_HEAP_H
+
+#include <stdint.h>
+#include <alchemy/timer.h>
+
+/**
+ * @addtogroup alchemy_heap
+ * @{
+ */
+
+/** Creation flags. */
+#define H_PRIO    0x1	/* Pend by task priority order. */
+#define H_FIFO    0x0	/* Pend by FIFO order. */
+#define H_SINGLE  0x4	/* Manage as single-block area. */
+
+struct RT_HEAP {
+	uintptr_t handle;
+};
+
+typedef struct RT_HEAP RT_HEAP;
+
+/**
+ * @brief Heap status descriptor
+ * @anchor RT_HEAP_INFO
+ *
+ * This structure reports various static and runtime information about
+ * a real-time heap, returned by a call to rt_heap_inquire().
+ */
+struct RT_HEAP_INFO {
+	/**
+	 * Number of tasks waiting for available memory in
+	 * rt_heap_alloc().
+	 */
+	int nwaiters;
+	/**
+	 * Creation mode flags as given to rt_heap_create().
+	 */
+	int mode;
+	/**
+	 * Size of heap (in bytes) as given to rt_heap_create(). The
+	 * maximum amount of memory available from this heap may be
+	 * larger, due to internal padding.
+	 */
+	size_t heapsize;
+	/**
+	 * Maximum amount of memory available from the heap. This
+	 * value accounts for the overhead of internal data structures
+	 * required to maintain the heap.
+	 */
+	size_t usablemem;
+	/**
+	 * Amount of heap memory currently consumed. info.usablemem -
+	 * info.usedmem computes the current amount of free memory in
+	 * the relevant heap.
+	 */
+	size_t usedmem;
+	/**
+	 * Name of heap.
+	 */
+	char name[XNOBJECT_NAME_LEN];
+};
+
+typedef struct RT_HEAP_INFO RT_HEAP_INFO;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int rt_heap_create(RT_HEAP *heap,
+		   const char *name,
+		   size_t heapsize,
+		   int mode);
+
+int rt_heap_delete(RT_HEAP *heap);
+
+int rt_heap_alloc_timed(RT_HEAP *heap,
+			size_t size,
+			const struct timespec *abs_timeout,
+			void **blockp);
+
+static inline
+int rt_heap_alloc_until(RT_HEAP *heap,
+			size_t size, RTIME timeout, void **blockp)
+{
+	struct timespec ts;
+	return rt_heap_alloc_timed(heap, size,
+				   alchemy_abs_timeout(timeout, &ts),
+				   blockp);
+}
+
+static inline
+int rt_heap_alloc(RT_HEAP *heap,
+		  size_t size, RTIME timeout, void **blockp)
+{
+	struct timespec ts;
+	return rt_heap_alloc_timed(heap, size,
+				   alchemy_rel_timeout(timeout, &ts),
+				   blockp);
+}
+
+int rt_heap_free(RT_HEAP *heap,
+		 void *block);
+
+int rt_heap_inquire(RT_HEAP *heap,
+		    RT_HEAP_INFO *info);
+
+int rt_heap_bind(RT_HEAP *heap,
+		 const char *name,
+		 RTIME timeout);
+
+int rt_heap_unbind(RT_HEAP *heap);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_HEAP_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/mutex.h b/kernel/xenomai-v3.2.4/include/alchemy/mutex.h
new file mode 100644
index 0000000..2c4212f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/mutex.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_MUTEX_H
+#define _XENOMAI_ALCHEMY_MUTEX_H
+
+#include <stdint.h>
+#include <alchemy/timer.h>
+#include <alchemy/task.h>
+
+/**
+ * @addtogroup alchemy_mutex
+ * @{
+ */
+
+struct RT_MUTEX {
+	uintptr_t handle;
+};
+
+typedef struct RT_MUTEX RT_MUTEX;
+
+/**
+ * @brief Mutex status descriptor
+ * @anchor RT_MUTEX_INFO
+ *
+ * This structure reports various static and runtime information about
+ * a mutex, returned by a call to rt_mutex_inquire().
+ */
+struct RT_MUTEX_INFO {
+	/**
+	 * Current mutex owner, or NO_ALCHEMY_TASK if unlocked. This
+	 * information is in essence transient, and may not be valid
+	 * anymore once used by the caller.
+	 */
+	RT_TASK owner;
+	/**
+	 * Name of mutex.
+	 */
+	char name[XNOBJECT_NAME_LEN];
+};
+
+typedef struct RT_MUTEX_INFO RT_MUTEX_INFO;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int rt_mutex_create(RT_MUTEX *mutex,
+		    const char *name);
+
+int rt_mutex_delete(RT_MUTEX *mutex);
+
+int rt_mutex_acquire_timed(RT_MUTEX *mutex,
+			   const struct timespec *abs_timeout);
+
+static inline
+int rt_mutex_acquire_until(RT_MUTEX *mutex, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_mutex_acquire_timed(mutex,
+				      alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline
+int rt_mutex_acquire(RT_MUTEX *mutex, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_mutex_acquire_timed(mutex,
+				      alchemy_rel_timeout(timeout, &ts));
+}
+
+int rt_mutex_release(RT_MUTEX *mutex);
+
+int rt_mutex_inquire(RT_MUTEX *mutex,
+		     RT_MUTEX_INFO *info);
+
+int rt_mutex_bind(RT_MUTEX *mutex,
+		  const char *name, RTIME timeout);
+
+int rt_mutex_unbind(RT_MUTEX *mutex);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_MUTEX_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/pipe.h b/kernel/xenomai-v3.2.4/include/alchemy/pipe.h
new file mode 100644
index 0000000..4ae24f9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/pipe.h
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_PIPE_H
+#define _XENOMAI_ALCHEMY_PIPE_H
+
+#include <stdint.h>
+#include <cobalt/uapi/kernel/pipe.h>
+#include <alchemy/timer.h>
+#include <alchemy/compat.h>
+
+/**
+ * @addtogroup alchemy_pipe
+ * @{
+ */
+
+/** Creation flags. */
+#define P_MINOR_AUTO	XNPIPE_MINOR_AUTO
+
+/** Operation flags. */
+#define P_URGENT	XNPIPE_URGENT
+#define P_NORMAL	XNPIPE_NORMAL
+
+struct RT_PIPE {
+	uintptr_t handle;
+};
+
+typedef struct RT_PIPE RT_PIPE;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+CURRENT_DECL(int, rt_pipe_create(RT_PIPE *pipe,
+				 const char *name,
+				 int minor, size_t poolsize));
+
+int rt_pipe_delete(RT_PIPE *pipe);
+
+ssize_t rt_pipe_read_timed(RT_PIPE *pipe,
+			   void *buf, size_t size,
+			   const struct timespec *abs_timeout);
+
+static inline
+ssize_t rt_pipe_read_until(RT_PIPE *pipe,
+			   void *buf, size_t size, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_pipe_read_timed(pipe, buf, size,
+				  alchemy_abs_timeout(timeout, &ts));
+}
+
+ssize_t rt_pipe_read(RT_PIPE *pipe,
+		     void *buf, size_t size, RTIME timeout);
+
+ssize_t rt_pipe_write(RT_PIPE *pipe,
+		      const void *buf, size_t size, int mode);
+
+ssize_t rt_pipe_stream(RT_PIPE *pipe,
+		       const void *buf, size_t size);
+
+int rt_pipe_bind(RT_PIPE *pipe,
+		 const char *name, RTIME timeout);
+
+int rt_pipe_unbind(RT_PIPE *pipe);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_PIPE_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/queue.h b/kernel/xenomai-v3.2.4/include/alchemy/queue.h
new file mode 100644
index 0000000..4cd2d70
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/queue.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_QUEUE_H
+#define _XENOMAI_ALCHEMY_QUEUE_H
+
+#include <stdint.h>
+#include <alchemy/timer.h>
+
+/**
+ * @addtogroup alchemy_queue
+ * @{
+ */
+
+/** Creation flags. */
+#define Q_PRIO  0x1	/* Pend by task priority order. */
+#define Q_FIFO  0x0	/* Pend by FIFO order. */
+
+#define Q_UNLIMITED 0	/* No size limit. */
+
+/*
+ * Operation flags.
+ */
+#define Q_NORMAL     0x0
+#define Q_URGENT     0x1
+#define Q_BROADCAST  0x2
+
+struct RT_QUEUE {
+	uintptr_t handle;
+};
+
+typedef struct RT_QUEUE RT_QUEUE;
+
+/**
+ * @brief Queue status descriptor
+ * @anchor RT_QUEUE_INFO
+ *
+ * This structure reports various static and runtime information about
+ * a real-time queue, returned by a call to rt_queue_inquire().
+ */
+struct RT_QUEUE_INFO {
+	/**
+	 * Number of tasks currently waiting on the queue for
+	 * messages.
+	 */
+	int nwaiters;
+	/**
+	 * Number of messages pending in queue.
+	 */
+	int nmessages;
+	/**
+	 * Queue mode bits, as given to rt_queue_create().
+	 */
+	int mode;
+	/**
+	 * Maximum number of messages in queue, zero if unlimited.
+	 */
+	size_t qlimit;
+	/**
+	 * Size of memory pool for holding message buffers (in bytes).
+	 */
+	size_t poolsize;
+	/**
+	 * Amount of memory consumed from the buffer pool.
+	 */
+	size_t usedmem;
+	/**
+	 * Name of message queue.
+	 */
+	char name[XNOBJECT_NAME_LEN];
+};
+
+typedef struct RT_QUEUE_INFO RT_QUEUE_INFO;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int rt_queue_create(RT_QUEUE *queue,
+		    const char *name,
+		    size_t poolsize, size_t qlimit, int mode);
+
+int rt_queue_delete(RT_QUEUE *queue);
+
+void *rt_queue_alloc(RT_QUEUE *queue,
+		     size_t size);
+
+int rt_queue_free(RT_QUEUE *queue,
+		  void *buf);
+
+int rt_queue_send(RT_QUEUE *queue,
+		  const void *buf, size_t size, int mode);
+
+int rt_queue_write(RT_QUEUE *queue,
+		   const void *buf, size_t size, int mode);
+
+ssize_t rt_queue_receive_timed(RT_QUEUE *queue,
+			       void **bufp,
+			       const struct timespec *abs_timeout);
+
+static inline
+ssize_t rt_queue_receive_until(RT_QUEUE *queue,
+			       void **bufp, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_queue_receive_timed(queue, bufp,
+				      alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline
+ssize_t rt_queue_receive(RT_QUEUE *queue,
+			 void **bufp, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_queue_receive_timed(queue, bufp,
+				      alchemy_rel_timeout(timeout, &ts));
+}
+
+ssize_t rt_queue_read_timed(RT_QUEUE *queue,
+			    void *buf, size_t size,
+			    const struct timespec *abs_timeout);
+
+static inline
+ssize_t rt_queue_read_until(RT_QUEUE *queue,
+			    void *buf, size_t size, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_queue_read_timed(queue, buf, size,
+				   alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline
+ssize_t rt_queue_read(RT_QUEUE *queue,
+		      void *buf, size_t size, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_queue_read_timed(queue, buf, size,
+				   alchemy_rel_timeout(timeout, &ts));
+}
+
+int rt_queue_flush(RT_QUEUE *queue);
+
+int rt_queue_inquire(RT_QUEUE *queue,
+		     RT_QUEUE_INFO *info);
+
+int rt_queue_bind(RT_QUEUE *queue,
+		  const char *name,
+		  RTIME timeout);
+
+int rt_queue_unbind(RT_QUEUE *queue);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_QUEUE_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/sem.h b/kernel/xenomai-v3.2.4/include/alchemy/sem.h
new file mode 100644
index 0000000..8f86824
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/sem.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_SEM_H
+#define _XENOMAI_ALCHEMY_SEM_H
+
+#include <stdint.h>
+#include <alchemy/timer.h>
+
+/**
+ * @addtogroup alchemy_sem
+ * @{
+ */
+
+/** Creation flags. */
+#define S_PRIO  0x1	/* Pend by task priority order. */
+#define S_FIFO  0x0	/* Pend by FIFO order. */
+#define S_PULSE 0x2	/* Enable pulse mode. */
+
+struct RT_SEM {
+	uintptr_t handle;
+};
+
+typedef struct RT_SEM RT_SEM;
+
+/**
+ * @brief Semaphore status descriptor
+ * @anchor RT_SEM_INFO
+ *
+ * This structure reports various static and runtime information about
+ * a semaphore, returned by a call to rt_sem_inquire().
+ */
+struct RT_SEM_INFO {
+	/**
+	 * Current semaphore value.
+	 */
+	unsigned long count;
+	/**
+	 * Number of tasks waiting on the semaphore.
+	 */
+	int nwaiters;
+	/**
+	 * Name of semaphore.
+	 */
+	char name[XNOBJECT_NAME_LEN];
+};
+
+typedef struct RT_SEM_INFO RT_SEM_INFO;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int rt_sem_create(RT_SEM *sem,
+		  const char *name,
+		  unsigned long icount,
+		  int mode);
+
+int rt_sem_delete(RT_SEM *sem);
+
+int rt_sem_p_timed(RT_SEM *sem,
+		   const struct timespec *abs_timeout);
+
+static inline int rt_sem_p_until(RT_SEM *sem, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_sem_p_timed(sem, alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline int rt_sem_p(RT_SEM *sem, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_sem_p_timed(sem, alchemy_rel_timeout(timeout, &ts));
+}
+
+int rt_sem_v(RT_SEM *sem);
+
+int rt_sem_broadcast(RT_SEM *sem);
+
+int rt_sem_inquire(RT_SEM *sem,
+		   RT_SEM_INFO *info);
+
+int rt_sem_bind(RT_SEM *sem,
+		const char *name, RTIME timeout);
+
+int rt_sem_unbind(RT_SEM *sem);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_SEM_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/task.h b/kernel/xenomai-v3.2.4/include/alchemy/task.h
new file mode 100644
index 0000000..685d478
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/task.h
@@ -0,0 +1,213 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_TASK_H
+#define _XENOMAI_ALCHEMY_TASK_H
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <xeno_config.h>
+#include <boilerplate/list.h>
+#include <copperplate/threadobj.h>
+#include <alchemy/timer.h>
+#include <alchemy/compat.h>
+
+/**
+ * @addtogroup alchemy_task
+ * @{
+ */
+
+/** Task priorities. */
+#define T_LOPRIO  0
+#define T_HIPRIO  99
+
+/** Task mode bits. */
+#define T_LOCK		__THREAD_M_LOCK
+/** Cobalt only, nop over Mercury. */
+#define T_WARNSW	__THREAD_M_WARNSW
+#define T_CONFORMING	__THREAD_M_CONFORMING
+#define T_JOINABLE	__THREAD_M_SPARE0
+
+struct RT_TASK {
+	uintptr_t handle;
+	pthread_t thread;
+};
+
+typedef struct RT_TASK RT_TASK;
+
+struct RT_TASK_MCB {
+	int flowid;
+	int opcode;
+	union {
+		dref_type(void *) __dref;
+		void *data;
+	};
+	ssize_t size;
+};
+
+typedef struct RT_TASK_MCB RT_TASK_MCB;
+
+/**
+ * @brief Task status descriptor
+ * @anchor RT_TASK_INFO
+ *
+ * This structure reports various static and runtime information about
+ * a real-time task, returned by a call to rt_task_inquire().
+ */
+struct RT_TASK_INFO {
+	/**
+	 * Task priority.
+	 */
+	int prio;
+	/**
+	 * Task status.
+	 */
+	struct threadobj_stat stat;
+	/**
+	 * Name of task.
+	 */
+	char name[XNOBJECT_NAME_LEN];
+	/**
+	 * Host pid.
+	 */
+	pid_t pid;
+};
+
+typedef struct RT_TASK_INFO RT_TASK_INFO;
+
+#define NO_ALCHEMY_TASK	((RT_TASK){ 0, 0 })
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+CURRENT_DECL(int, rt_task_create(RT_TASK *task,
+				 const char *name,
+				 int stksize,
+				 int prio,
+				 int mode));
+
+int rt_task_delete(RT_TASK *task);
+
+int rt_task_set_affinity(RT_TASK *task,
+			 const cpu_set_t *cpus);
+
+int rt_task_start(RT_TASK *task,
+		  void (*entry)(void *arg),
+		  void *arg);
+
+CURRENT_DECL(int, rt_task_spawn(RT_TASK *task, const char *name,
+				int stksize, int prio, int mode,
+				void (*entry)(void *arg),
+				void *arg));
+
+int rt_task_shadow(RT_TASK *task,
+		   const char *name,
+		   int prio,
+		   int mode);
+
+int rt_task_join(RT_TASK *task);
+
+CURRENT_DECL(int, rt_task_set_periodic(RT_TASK *task,
+				       RTIME idate, RTIME period));
+
+int rt_task_wait_period(unsigned long *overruns_r);
+
+int rt_task_sleep(RTIME delay);
+
+int rt_task_sleep_until(RTIME date);
+
+int rt_task_same(RT_TASK *task1, RT_TASK *task2);
+
+int rt_task_suspend(RT_TASK *task);
+
+int rt_task_resume(RT_TASK *task);
+
+RT_TASK *rt_task_self(void);
+
+int rt_task_set_priority(RT_TASK *task, int prio);
+
+int rt_task_set_mode(int clrmask, int setmask,
+		     int *mode_r);
+
+int rt_task_yield(void);
+
+int rt_task_unblock(RT_TASK *task);
+
+int rt_task_slice(RT_TASK *task, RTIME quantum);
+
+int rt_task_inquire(RT_TASK *task,
+		    RT_TASK_INFO *info);
+
+ssize_t rt_task_send_timed(RT_TASK *task,
+			   RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r,
+			   const struct timespec *abs_timeout);
+
+static inline
+ssize_t rt_task_send_until(RT_TASK *task,
+			   RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r,
+			   RTIME timeout)
+{
+	struct timespec ts;
+	return rt_task_send_timed(task, mcb_s, mcb_r,
+				  alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline
+ssize_t rt_task_send(RT_TASK *task,
+		     RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r,
+		     RTIME timeout)
+{
+	struct timespec ts;
+	return rt_task_send_timed(task, mcb_s, mcb_r,
+				  alchemy_rel_timeout(timeout, &ts));
+}
+
+int rt_task_receive_timed(RT_TASK_MCB *mcb_r,
+			  const struct timespec *abs_timeout);
+
+static inline
+int rt_task_receive_until(RT_TASK_MCB *mcb_r, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_task_receive_timed(mcb_r,
+				     alchemy_abs_timeout(timeout, &ts));
+}
+
+static inline
+int rt_task_receive(RT_TASK_MCB *mcb_r, RTIME timeout)
+{
+	struct timespec ts;
+	return rt_task_receive_timed(mcb_r,
+				     alchemy_rel_timeout(timeout, &ts));
+}
+
+int rt_task_reply(int flowid,
+		  RT_TASK_MCB *mcb_s);
+
+int rt_task_bind(RT_TASK *task,
+		 const char *name, RTIME timeout);
+
+int rt_task_unbind(RT_TASK *task);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _XENOMAI_ALCHEMY_TASK_H */
diff --git a/kernel/xenomai-v3.2.4/include/alchemy/timer.h b/kernel/xenomai-v3.2.4/include/alchemy/timer.h
new file mode 100644
index 0000000..7b3837b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/alchemy/timer.h
@@ -0,0 +1,125 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_ALCHEMY_TIMER_H
+#define _XENOMAI_ALCHEMY_TIMER_H
+
+#include <stddef.h>
+#include <copperplate/clockobj.h>
+
+/**
+ * @addtogroup alchemy_timer
+ * @{
+ */
+
+typedef ticks_t RTIME;
+
+typedef sticks_t SRTIME;
+
+#define TM_INFINITE  0
+#define TM_NOW       0
+#define TM_NONBLOCK  ((RTIME)-1ULL)
+
+/**
+ * @brief Timer status descriptor
+ * @anchor RT_TIMER_INFO
+ *
+ * This structure reports information about the Alchemy clock,
+ * returned by a call to rt_timer_inquire().
+ */
+typedef struct rt_timer_info {
+	/**
+	 * Clock resolution in nanoseconds.
+	 */
+	RTIME period;
+	/**
+	 * Current monotonic date expressed in clock ticks. The
+	 * duration of a tick depends on the Alchemy clock resolution
+	 * for the process (see --alchemy-clock-resolution option,
+	 * defaults to 1 nanosecond).
+	 */
+	RTIME date;
+} RT_TIMER_INFO;
+
+extern struct clockobj alchemy_clock;
+
+#define alchemy_abs_timeout(__t, __ts)					\
+	({								\
+		(__t) == TM_INFINITE ? NULL :				\
+		(__t) == TM_NONBLOCK ?					\
+		({ (__ts)->tv_sec = (__ts)->tv_nsec = 0; (__ts); }) :	\
+		({ clockobj_ticks_to_timespec(&alchemy_clock, (__t), (__ts)); \
+			(__ts); });					\
+	})
+
+#define alchemy_rel_timeout(__t, __ts)					\
+	({								\
+		(__t) == TM_INFINITE ? NULL :				\
+		(__t) == TM_NONBLOCK ?					\
+		({ (__ts)->tv_sec = (__ts)->tv_nsec = 0; (__ts); }) :	\
+		({ clockobj_ticks_to_timeout(&alchemy_clock, (__t), (__ts)); \
+			(__ts); });					\
+	})
+
+static inline
+int alchemy_poll_mode(const struct timespec *abs_timeout)
+{
+	return abs_timeout &&
+		abs_timeout->tv_sec == 0 &&
+		abs_timeout->tv_nsec == 0;
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * @fn RTIME rt_timer_read(void)
+ * @brief Return the current system time.
+ *
+ * Return the current time maintained by the Xenomai core clock.
+ *
+ * @return The current time expressed in clock ticks (see note).
+ *
+ * @apitags{unrestricted}
+ *
+ * @note The @a time value is a multiple of the Alchemy clock
+ * resolution (see --alchemy-clock-resolution option, defaults to 1
+ * nanosecond).
+ */
+static inline RTIME rt_timer_read(void)
+{
+	return clockobj_get_time(&alchemy_clock);
+}
+
+SRTIME rt_timer_ns2ticks(SRTIME ns);
+
+SRTIME rt_timer_ticks2ns(SRTIME ticks);
+
+RTIME rt_timer_read(void);
+
+void rt_timer_inquire(RT_TIMER_INFO *info);
+
+void rt_timer_spin(RTIME ns);
+
+#ifdef __cplusplus
+}
+#endif
+
+/** @} */
+
+#endif /* _ALCHEMY_TIMER_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/Makefile.am b/kernel/xenomai-v3.2.4/include/boilerplate/Makefile.am
new file mode 100644
index 0000000..0642560
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/Makefile.am
@@ -0,0 +1,23 @@
+includesubdir = $(includedir)/boilerplate
+
+includesub_HEADERS =	\
+	ancillaries.h	\
+	atomic.h	\
+	avl.h		\
+	shavl.h		\
+	avl-inner.h	\
+	compiler.h	\
+	debug.h		\
+	hash.h		\
+	heapmem.h	\
+	libc.h		\
+	list.h		\
+	lock.h		\
+	namegen.h	\
+	obstack.h	\
+	private-list.h	\
+	scope.h		\
+	setup.h		\
+	shared-list.h	\
+	time.h		\
+	tunables.h
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/ancillaries.h b/kernel/xenomai-v3.2.4/include/boilerplate/ancillaries.h
new file mode 100644
index 0000000..319d22f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/ancillaries.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_ANCILLARIES_H
+#define _BOILERPLATE_ANCILLARIES_H
+
+#include <stdarg.h>
+#include <time.h>
+#include <pthread.h>
+#include <sched.h>
+#include <string.h>
+
+struct error_frame;
+
+#define ONE_BILLION  1000000000
+
+void __namecpy_requires_character_array_as_destination(void);
+
+#define namecpy(__dst, __src)						\
+	({								\
+		if (!__builtin_types_compatible_p(typeof(__dst), char[])) \
+			__namecpy_requires_character_array_as_destination();	\
+		strncpy((__dst), __src, sizeof(__dst));			\
+		__dst[sizeof(__dst) - 1] = '\0';			\
+		__dst;							\
+	 })
+
+#define early_panic(__fmt, __args...)		\
+	__early_panic(__func__, __fmt, ##__args)
+
+#define panic(__fmt, __args...)			\
+	__panic(__func__, __fmt, ##__args)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void __printout(const char *name,
+		const char *header,
+		const char *fmt, va_list ap);
+
+void __noreturn __early_panic(const char *fn,
+			      const char *fmt, ...);
+
+void __noreturn ___panic(const char *fn,
+			 const char *name,
+			 const char *fmt, va_list ap);
+
+void __noreturn __panic(const char *fn,
+			const char *fmt, ...);
+
+void __warning(const char *name,
+	       const char *fmt, va_list ap);
+
+void early_warning(const char *fmt, ...);
+
+void warning(const char *fmt, ...);
+
+void __notice(const char *name,
+	      const char *fmt, va_list ap);
+
+void early_notice(const char *fmt, ...);
+
+void notice(const char *fmt, ...);
+
+void __boilerplate_init(void);
+
+const char *symerror(int errnum);
+
+void error_hook(struct error_frame *ef);
+
+int get_static_cpu_count(void);
+
+int get_online_cpu_set(cpu_set_t *cpuset);
+  
+int get_realtime_cpu_set(cpu_set_t *cpuset);
+
+int get_current_cpu(void);
+	
+pid_t get_thread_pid(void);
+
+char *lookup_command(const char *cmd);
+
+size_t get_mem_size(const char *arg);
+
+extern const char *config_strings[];
+
+extern pthread_mutex_t __printlock;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _BOILERPLATE_ANCILLARIES_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/atomic.h b/kernel/xenomai-v3.2.4/include/boilerplate/atomic.h
new file mode 100644
index 0000000..4ee5f39
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/atomic.h
@@ -0,0 +1,89 @@
+/**
+ *   Copyright &copy; 2011 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *   Copyright &copy; 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_ATOMIC_H
+#define _BOILERPLATE_ATOMIC_H
+
+#include <xeno_config.h>
+
+typedef struct { int v; } atomic_t;
+
+typedef struct { long v; } atomic_long_t;
+
+#define ATOMIC_INIT(__n) { (__n) }
+
+static inline long atomic_long_read(const atomic_long_t *ptr)
+{
+	return ptr->v;
+}
+
+static inline void atomic_long_set(atomic_long_t *ptr, long v)
+{
+	ptr->v = v;
+}
+
+static inline int atomic_read(const atomic_t *ptr)
+{
+	return ptr->v;
+}
+
+static inline void atomic_set(atomic_t *ptr, long v)
+{
+	ptr->v = v;
+}
+
+#ifndef atomic_cmpxchg
+#define atomic_cmpxchg(__ptr, __old, __new)  \
+	__sync_val_compare_and_swap(&(__ptr)->v, __old, __new)
+#endif
+
+#ifndef atomic_sub_fetch
+#define atomic_sub_fetch(__ptr, __n)	\
+	__sync_sub_and_fetch(&(__ptr)->v, __n)
+#endif
+
+#ifndef atomic_add_fetch
+#define atomic_add_fetch(__ptr, __n)	\
+	__sync_add_and_fetch(&(__ptr)->v, __n)
+#endif
+
+#ifdef CONFIG_SMP
+#ifndef smp_mb
+#define smp_mb()	__sync_synchronize()
+#endif
+#ifndef smp_rmb
+#define smp_rmb()	smp_mb()
+#endif
+#ifndef smp_wmb
+#define smp_wmb()	smp_mb()
+#endif
+#else  /* !CONFIG_SMP */
+#define smp_mb()	do { } while (0)
+#define smp_rmb()	do { } while (0)
+#define smp_wmb()	do { } while (0)
+#endif /* !CONFIG_SMP */
+
+#define ACCESS_ONCE(x) (*(volatile __typeof__(x) *)&(x))
+
+#define compiler_barrier()	__asm__ __volatile__("": : :"memory")
+
+#ifndef cpu_relax
+#define cpu_relax() __sync_synchronize()
+#endif
+
+#endif /* _BOILERPLATE_ATOMIC_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/avl-inner.h b/kernel/xenomai-v3.2.4/include/boilerplate/avl-inner.h
new file mode 100644
index 0000000..9c05762
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/avl-inner.h
@@ -0,0 +1,522 @@
+/*
+ * Copyright (c) 2015 Gilles Chanteperdrix
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#if (!defined(_BOILERPLATE_AVL_INNER_H) && !defined(AVL_PSHARED)) || \
+    (!defined(_BOILERPLATE_AVL_SHARED_INNER_H) && defined(AVL_PSHARED)) /* Yeah, well... */
+
+#if !defined(_BOILERPLATE_AVL_H) && !defined(_BOILERPLATE_SHAVL_H)
+#error "Do not include this file directly. Use <boilerplate/avl.h> or <boilerplate/shavl.h> instead."
+#endif
+
+#include <stddef.h>
+#include <stdio.h>
+
+#ifdef AVL_PSHARED
+#define __AVL(__decl)	shavl_ ## __decl
+#define __AVLH(__decl)	shavlh_ ## __decl
+#define __AVL_T(__type)	sh ## __type
+#define _BOILERPLATE_AVL_SHARED_INNER_H
+#else
+#define __AVL(__decl)	avl_ ## __decl
+#define __AVLH(__decl)	avlh_ ## __decl
+#define __AVL_T(__type)	__type
+#define _BOILERPLATE_AVL_INNER_H
+#endif
+
+struct __AVL_T(avlh) {
+#define AVLH_APP_BITS 28
+	unsigned int flags: AVLH_APP_BITS;
+	int type: 2;
+	int balance: 2;
+	union {
+		ptrdiff_t offset;
+		struct __AVL_T(avlh) *ptr;
+	} link[3];
+};
+
+struct __AVL_T(avl);
+
+/*
+ * Comparison function: should return -1 if left is less than right, 0
+ * if they are equal and 1 if left is greather than right. You can use
+ * the avl_sign function which will convert a difference to -1, 0,
+ * 1. Beware of overflow however. You can also use avl_cmp_sign()
+ * which should not have any such problems.
+ */
+typedef int __AVL_T(avlh_cmp_t)(const struct __AVL_T(avlh) *const,
+				const struct __AVL_T(avlh) *const);
+
+typedef struct __AVL_T(avlh) *
+__AVL_T(avl_search_t)(const struct __AVL_T(avl) *,
+		      const struct __AVL_T(avlh) *, int *, int);
+
+typedef int __AVL_T(avlh_prn_t)(char *, size_t,
+				const struct __AVL_T(avlh) *const);
+
+struct __AVL_T(avl_searchops) {
+	__AVL_T(avl_search_t) *search;
+	__AVL_T(avlh_cmp_t) *cmp;
+};
+
+struct __AVL_T(avl) {
+	struct __AVL_T(avlh) anchor;
+	union {
+		ptrdiff_t offset;
+		struct __AVL_T(avlh) *ptr;
+	} end[3];
+	unsigned int count;
+	unsigned int height;
+};
+
+#define AVL_LEFT	     -1
+#define AVL_UP		      0
+#define AVL_RIGHT	      1
+/* maps AVL_LEFT to AVL_RIGHT and reciprocally. */
+#define avl_opposite(type)   (-(type))
+/* maps AVL_LEFT and AVL_RIGHT to arrays index (or bit positions). */
+#define avl_type2index(type) ((type)+1)
+
+#define AVL_THR_LEFT  (1 << avl_type2index(AVL_LEFT))
+#define AVL_THR_RIGHT (1 << avl_type2index(AVL_RIGHT))
+
+#ifdef AVL_PSHARED
+
+static inline struct shavlh *
+shavlh_link(const struct shavl *const avl,
+	    const struct shavlh *const holder, unsigned int dir)
+{
+	ptrdiff_t offset = holder->link[avl_type2index(dir)].offset;
+	return offset == (ptrdiff_t)-1 ? NULL : (void *)avl + offset;
+}
+
+static inline void
+shavlh_set_link(struct shavl *const avl, struct shavlh *lhs,
+		int dir, struct shavlh *rhs)
+{
+	ptrdiff_t offset = rhs ? (void *)rhs - (void *)avl : (ptrdiff_t)-1;
+	lhs->link[avl_type2index(dir)].offset = offset;
+}
+
+static inline
+struct shavlh *shavl_end(const struct shavl *const avl, int dir)
+{
+	ptrdiff_t offset = avl->end[avl_type2index(dir)].offset;
+	return offset == (ptrdiff_t)-1 ? NULL : (void *)avl + offset;
+}
+
+static inline void
+shavl_set_end(struct shavl *const avl, int dir, struct shavlh *holder)
+{
+	ptrdiff_t offset = holder ? (void *)holder - (void *)avl : (ptrdiff_t)-1;
+	avl->end[avl_type2index(dir)].offset = offset;
+}
+
+#define shavl_count(avl)	((avl)->count)
+#define shavl_height(avl)	((avl)->height)
+#define shavl_anchor(avl)	(&(avl)->anchor)
+
+#define shavlh_up(avl, holder)			\
+	shavlh_link((avl), (holder), AVL_UP)
+#define shavlh_left(avl, holder)		\
+	shavlh_link((avl), (holder), AVL_LEFT)
+#define shavlh_right(avl, holder)		\
+	shavlh_link((avl), (holder), AVL_RIGHT)
+
+#define shavlh_thr_tst(avl, holder, side)	\
+	(shavlh_link(avl, holder, side) == NULL)
+#define shavlh_child(avl, holder, side)		\
+	(shavlh_link((avl),(holder),(side)))
+#define shavlh_has_child(avl, holder, side)	\
+	(!shavlh_thr_tst(avl, holder, side))
+
+#define shavl_top(avl)	  (shavlh_right(avl, shavl_anchor(avl)))
+#define shavl_head(avl)	  (shavl_end((avl), AVL_LEFT))
+#define shavl_tail(avl)	  (shavl_end((avl), AVL_RIGHT))
+
+/*
+ * Search a node in a pshared AVL, return its parent if it could not
+ * be found.
+ */
+#define DECLARE_SHAVL_SEARCH(__search_fn, __cmp)			\
+	struct shavlh *__search_fn(const struct shavl *const avl,	\
+				   const struct shavlh *const node,	\
+				   int *const pdelta, int dir)		\
+	{								\
+		int delta = AVL_RIGHT;					\
+		struct shavlh *holder = shavl_top(avl), *next;		\
+									\
+		if (holder == NULL)					\
+			goto done;					\
+									\
+		for (;;) {						\
+			delta = __cmp(node, holder);			\
+			/*						\
+			 * Handle duplicates keys here, according to	\
+			 * "dir", if dir is:				\
+			 * - AVL_LEFT, the leftmost node is returned,	\
+			 * - AVL_RIGHT, the rightmost node is returned,	\
+			 * - 0, the first match is returned.		\
+			 */						\
+			if (!(delta ?: dir))				\
+				break;					\
+			next = shavlh_child(avl, holder, delta ?: dir); \
+			if (next == NULL)				\
+				break;					\
+			holder = next;					\
+		}							\
+									\
+	  done:								\
+		*pdelta = delta;					\
+		return holder;						\
+	}
+
+#else  /* !AVL_PSHARED */
+
+#define avlh_link(avl, holder, dir) ((holder)->link[avl_type2index(dir)].ptr)
+
+#define avl_end(avl, dir) ((avl)->end[avl_type2index(dir)].ptr)
+
+static inline void
+avlh_set_link(struct avl *const avl, struct avlh *lhs, int dir, struct avlh *rhs)
+{
+	avlh_link(avl, lhs, dir) = rhs;
+}
+
+static inline void
+avl_set_end(struct avl *const avl, int dir, struct avlh *holder)
+{
+	avl_end(avl, dir) = holder;
+}
+
+#define avl_count(avl)	  ((avl)->count)
+#define avl_height(avl)	  ((avl)->height)
+#define avl_anchor(avl)	  (&(avl)->anchor)
+
+#define avlh_up(avl, holder)	avlh_link((avl), (holder), AVL_UP)
+#define avlh_left(avl, holder)	avlh_link((avl), (holder), AVL_LEFT)
+#define avlh_right(avl, holder)	avlh_link((avl), (holder), AVL_RIGHT)
+
+#define avlh_thr_tst(avl, holder, side) (avlh_link(avl, holder, side) == NULL)
+#define avlh_child(avl, holder, side) (avlh_link((avl),(holder),(side)))
+#define avlh_has_child(avl, holder, side) (!avlh_thr_tst(avl, holder, side))
+
+#define avl_top(avl)	  (avlh_right(avl, avl_anchor(avl)))
+#define avl_head(avl)	  (avl_end((avl), AVL_LEFT))
+#define avl_tail(avl)	  (avl_end((avl), AVL_RIGHT))
+
+/*
+ * Search a node in a private AVL, return its parent if it could not
+ * be found.
+ */
+#define DECLARE_AVL_SEARCH(__search_fn, __cmp)				\
+	struct avlh *__search_fn(const struct avl *const avl,		\
+				 const struct avlh *const node,		\
+				 int *const pdelta, int dir)		\
+	{								\
+		int delta = AVL_RIGHT;					\
+		struct avlh *holder = avl_top(avl), *next;		\
+									\
+		if (holder == NULL)					\
+			goto done;					\
+									\
+		for (;;) {						\
+			delta = __cmp(node, holder);			\
+			/*						\
+			 * Handle duplicates keys here, according to	\
+			 * "dir", if dir is:				\
+			 * - AVL_LEFT, the leftmost node is returned,	\
+			 * - AVL_RIGHT, the rightmost node is returned,	\
+			 * - 0, the first match is returned.		\
+			 */						\
+			if (!(delta ?: dir))				\
+				break;					\
+			next = avlh_child(avl, holder, delta ?: dir);	\
+			if (next == NULL)				\
+				break;					\
+			holder = next;					\
+		}							\
+									\
+	  done:								\
+		*pdelta = delta;					\
+		return holder;						\
+	}
+
+#endif	/* !AVL_PSHARED */
+
+/*
+ * From "Bit twiddling hacks", returns v < 0 ? -1 : (v > 0 ? 1 : 0)
+ */
+#define avl_sign(v)				\
+	({					\
+		typeof(v) _v = (v);		\
+		((_v) > 0) - ((_v) < 0);	\
+	})
+
+/*
+ * Variation on the same theme.
+ */
+#define avl_cmp_sign(l, r)			\
+	({					\
+		typeof(l) _l = (l);		\
+		typeof(r) _r = (r);		\
+		(_l > _r) - (_l < _r);		\
+	})
+
+static inline struct __AVL_T(avlh) *
+__AVL(search_inner)(const struct __AVL_T(avl) *const avl,
+		    const struct __AVL_T(avlh) *n, int *delta,
+		    const struct __AVL_T(avl_searchops) *ops)
+{
+	return ops->search(avl, n, delta, 0);
+}
+
+static inline
+struct __AVL_T(avlh) *__AVL(gettop)(const struct __AVL_T(avl) *const avl)
+{
+	return __AVL(top)(avl);
+}
+
+static inline
+struct __AVL_T(avlh) *__AVL(gethead)(const struct __AVL_T(avl) *const avl)
+{
+	return __AVL(head)(avl);
+}
+
+static inline
+struct __AVL_T(avlh) *__AVL(gettail)(const struct __AVL_T(avl) *const avl)
+{
+	return __AVL(tail)(avl);
+}
+
+static inline
+unsigned int __AVL(getcount)(const struct __AVL_T(avl) *const avl)
+{
+	return __AVL(count)(avl);
+}
+
+struct __AVL_T(avlh) *__AVL(inorder)(const struct __AVL_T(avl) *const avl,
+				     struct __AVL_T(avlh) *holder,
+				     const int dir);
+
+struct __AVL_T(avlh) *__AVL(postorder)(const struct __AVL_T(avl) *const avl,
+				       struct __AVL_T(avlh) *const holder,
+				       const int dir);
+
+struct __AVL_T(avlh) *__AVL(preorder)(const struct __AVL_T(avl) *const avl,
+				      struct __AVL_T(avlh) *holder,
+				      const int dir);
+
+static inline struct __AVL_T(avlh) *
+__AVL(next)(const struct __AVL_T(avl) *const avl,
+	    struct __AVL_T(avlh) *const holder)
+{
+	return __AVL(inorder)(avl, holder, AVL_RIGHT);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(prev)(const struct __AVL_T(avl) *const avl,
+	    struct __AVL_T(avlh) *const holder)
+{
+	return __AVL(inorder)(avl, holder, AVL_LEFT);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(postorder_next)(const struct __AVL_T(avl) *const avl,
+		      struct __AVL_T(avlh) *const holder)
+{
+	return __AVL(postorder)(avl, holder, AVL_RIGHT);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(postorder_prev)(const struct __AVL_T(avl) *const avl,
+		      struct __AVL_T(avlh) *const holder)
+{
+	return __AVL(postorder)(avl, holder, AVL_LEFT);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(preorder_next)(const struct __AVL_T(avl) *const avl,
+		     struct __AVL_T(avlh) *const holder)
+{
+	return __AVL(preorder)(avl, holder, AVL_RIGHT);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(preorder_prev)(const struct __AVL_T(avl) *const avl,
+		     struct __AVL_T(avlh) *const holder)
+{
+	return __AVL(preorder)(avl, holder, AVL_LEFT);
+}
+
+static inline void __AVLH(init)(struct __AVL_T(avlh) *const holder)
+{
+	holder->balance = 0;
+	holder->type = 0;
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(search)(const struct __AVL_T(avl) *const avl,
+	      const struct __AVL_T(avlh) *node,
+	      const struct __AVL_T(avl_searchops) *ops)
+{
+	struct __AVL_T(avlh) *holder;
+	int delta;
+
+	holder = __AVL(search_inner)(avl, node, &delta, ops);
+	if (!delta)
+		return holder;
+
+	return NULL;
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(search_nearest)(const struct __AVL_T(avl) *const avl,
+		      const struct __AVL_T(avlh) *node, int dir,
+		      const struct __AVL_T(avl_searchops) *ops)
+{
+	struct __AVL_T(avlh) *holder;
+	int delta;
+
+	holder = __AVL(search_inner)(avl, node, &delta, ops);
+	if (!holder || delta != dir)
+		return holder;
+
+	return __AVL(inorder)(avl, holder, dir);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(search_le)(const struct __AVL_T(avl) *const avl,
+		 const struct __AVL_T(avlh) *node,
+		 const struct __AVL_T(avl_searchops) *ops)
+{
+	return __AVL(search_nearest)(avl, node, AVL_LEFT, ops);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(search_ge)(const struct __AVL_T(avl) *const avl,
+		 const struct __AVL_T(avlh) *node,
+		 const struct __AVL_T(avl_searchops) *ops)
+{
+	return __AVL(search_nearest)(avl, node, AVL_RIGHT, ops);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(search_multi)(const struct __AVL_T(avl) *const avl,
+		    const struct __AVL_T(avlh) *node, int dir,
+		    const struct __AVL_T(avl_searchops) *ops)
+{
+	struct __AVL_T(avlh) *holder;
+	int delta;
+
+	holder = ops->search(avl, node, &delta, dir);
+	if (!delta)
+		return holder;
+
+	if (!holder)
+		return NULL;
+
+	return __AVL(inorder)(avl, holder, -dir);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(search_first)(const struct __AVL_T(avl) *const avl,
+		    const struct __AVL_T(avlh) *node,
+		    const struct __AVL_T(avl_searchops) *ops)
+{
+	return __AVL(search_multi)(avl, node, AVL_LEFT, ops);
+}
+
+static inline struct __AVL_T(avlh) *
+__AVL(search_last)(const struct __AVL_T(avl) *const avl,
+		   const struct __AVL_T(avlh) *node,
+		   const struct __AVL_T(avl_searchops) *ops)
+{
+	return __AVL(search_multi)(avl, node, AVL_RIGHT, ops);
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+void __AVL(init)(struct __AVL_T(avl) *const avl);
+  
+void __AVL(destroy)(struct __AVL_T(avl) *const avl);
+
+int __AVL(insert)(struct __AVL_T(avl) *const avl,
+		  struct __AVL_T(avlh) *const holder,
+		  const struct __AVL_T(avl_searchops) *ops);
+	
+int __AVL(insert_front)(struct __AVL_T(avl) *avl,
+			struct __AVL_T(avlh) *holder,
+			const struct __AVL_T(avl_searchops) *ops);
+
+int __AVL(insert_back)(struct __AVL_T(avl) *avl,
+		       struct __AVL_T(avlh) *holder,
+		       const struct __AVL_T(avl_searchops) *ops);
+
+int __AVL(insert_at)(struct __AVL_T(avl) *const avl,
+		     struct __AVL_T(avlh) *parent, int dir,
+		     struct __AVL_T(avlh) *child);
+
+int __AVL(prepend)(struct __AVL_T(avl) *const avl,
+		   struct __AVL_T(avlh) *const holder,
+		   const struct __AVL_T(avl_searchops) *ops);
+
+int __AVL(append)(struct __AVL_T(avl) *const avl,
+		  struct __AVL_T(avlh) *const holder,
+		  const struct __AVL_T(avl_searchops) *ops);
+	
+int __AVL(delete)(struct __AVL_T(avl) *const avl,
+		  struct __AVL_T(avlh) *node);
+
+int __AVL(replace)(struct __AVL_T(avl) *avl,
+		   struct __AVL_T(avlh) *oldh,
+		   struct __AVL_T(avlh) *newh,
+		   const struct __AVL_T(avl_searchops) *ops);
+
+struct __AVL_T(avlh) *__AVL(update)(struct __AVL_T(avl) *const avl,
+				    struct __AVL_T(avlh) *const holder,
+				    const struct __AVL_T(avl_searchops) *ops);
+
+struct __AVL_T(avlh) *__AVL(set)(struct __AVL_T(avl) *const avl,
+				 struct __AVL_T(avlh) *const holder,
+				 const struct __AVL_T(avl_searchops) *ops);
+
+void __AVL(clear)(struct __AVL_T(avl) *const avl,
+		  void (*destruct)(struct __AVL_T(avlh) *));
+
+int __AVL(check)(const struct __AVL_T(avl) *avl,
+		 const struct __AVL_T(avl_searchops) *ops);
+	
+void __AVL(dump)(FILE *file, const struct __AVL_T(avl) *const avl,
+		 __AVL_T(avlh_prn_t) *prn, unsigned int indent,
+		 unsigned int len);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#undef __AVL
+#undef __AVLH
+#undef __AVL_T
+
+#endif /* !_BOILERPLATE_AVL_INNER_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/avl.h b/kernel/xenomai-v3.2.4/include/boilerplate/avl.h
new file mode 100644
index 0000000..57d8379
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/avl.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (c) 2018 Philippe Gerum
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _BOILERPLATE_AVL_H
+#define _BOILERPLATE_AVL_H
+
+#include <boilerplate/avl-inner.h>
+
+#endif /* !_BOILERPLATE_AVL_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/compiler.h b/kernel/xenomai-v3.2.4/include/boilerplate/compiler.h
new file mode 100644
index 0000000..263af6b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/compiler.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_COMPILER_H
+#define _BOILERPLATE_COMPILER_H
+
+#include <stddef.h>
+
+#define container_of(ptr, type, member)					\
+	({								\
+		const __typeof__(((type *)0)->member) *__mptr = (ptr);	\
+		(type *)((char *)__mptr - offsetof(type, member));	\
+	})
+
+#define __stringify_1(x...)	#x
+#define __stringify(x...)	__stringify_1(x)
+
+#ifndef __noreturn
+#define __noreturn	__attribute__((__noreturn__))
+#endif
+
+#ifndef __must_check
+#define __must_check	__attribute__((__warn_unused_result__))
+#endif
+
+#ifndef __weak
+#define __weak		__attribute__((__weak__))
+#endif
+
+#ifndef __maybe_unused
+#define __maybe_unused	__attribute__((__unused__))
+#endif
+
+#ifndef __aligned
+#define __aligned(__n)	__attribute__((aligned (__n)))
+#endif
+
+#ifndef __deprecated
+#define __deprecated	__attribute__((__deprecated__))
+#endif
+
+#ifndef __packed
+#define __packed	__attribute__((__packed__))
+#endif
+
+#ifndef __alloc_size
+#define __alloc_size(__args)	__attribute__((__alloc_size__(__args)))
+#endif
+
+#define __align_to(__size, __al)  (((__size) + (__al) - 1) & (~((__al) - 1)))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#define xenomai_count_trailing_zeros(x)					\
+	((x) == 0 ? (int)(sizeof(x) * __CHAR_BIT__)			\
+	: sizeof(x) <= sizeof(unsigned int) ?				\
+		__builtin_ctz((unsigned int)x)				\
+	: sizeof(x) <= sizeof(unsigned long) ?				\
+		__builtin_ctzl((unsigned long)x)			\
+	: __builtin_ctzll(x))
+
+#define xenomai_count_leading_zeros(x)					\
+	((x) == 0 ? (int)(sizeof(x) * __CHAR_BIT__)			\
+	: sizeof(x) <= sizeof(unsigned int) ?				\
+		__builtin_clz((unsigned int)x) +			\
+			(int)(sizeof(unsigned int) - sizeof(x))		\
+	: sizeof(x) <= sizeof(unsigned long) ?				\
+		__builtin_clzl((unsigned long)x)			\
+	: __builtin_clzll(x))
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _BOILERPLATE_COMPILER_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/debug.h b/kernel/xenomai-v3.2.4/include/boilerplate/debug.h
new file mode 100644
index 0000000..248cb90
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/debug.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_DEBUG_H
+#define _BOILERPLATE_DEBUG_H
+
+#include <stdint.h>
+#include <stddef.h>
+#include <xeno_config.h>
+
+#ifdef CONFIG_XENO_DEBUG
+
+#include <pthread.h>
+#include <boilerplate/compiler.h>
+
+static inline int must_check(void)
+{
+	return 1;
+}
+
+struct error_frame {
+	int retval;
+	int lineno;
+	const char *fn;
+	const char *file;
+	struct error_frame *next;
+};
+
+struct backtrace_data {
+	const char *name;
+	struct error_frame *inner;
+	pthread_mutex_t lock;
+	char eundef[16];
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void backtrace_init_context(struct backtrace_data *btd,
+			    const char *name);
+
+void backtrace_destroy_context(struct backtrace_data *btd);
+
+void backtrace_dump(struct backtrace_data *btd);
+
+void backtrace_log(int retval, const char *fn,
+		   const char *file, int lineno);
+
+void backtrace_check(void);
+
+void __debug(const char *name, const char *fmt, ...);
+
+char *__get_error_buf(size_t *sizep);
+
+void debug_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#define __bt(__exp)						\
+	({							\
+		typeof(__exp) __ret = (__exp);			\
+		if (__ret < 0)					\
+			backtrace_log((int)__ret, __FUNCTION__,	\
+				      __FILE__, __LINE__);	\
+		__ret;						\
+	})
+
+#define __bterrno(__exp)					\
+	({							\
+		typeof(__exp) __ret = (__exp);			\
+		if (__ret < 0)					\
+			backtrace_log(-errno, __FUNCTION__,	\
+				      __FILE__, __LINE__);	\
+		__ret;						\
+	})
+
+#else /* !CONFIG_XENO_DEBUG */
+
+static inline int must_check(void)
+{
+	return 0;
+}
+
+struct backtrace_data {
+};
+
+#define __bt(__exp)			(__exp)
+
+#define __bterrno(__exp)		(__exp)
+
+#define backtrace_init_context(btd, name)	\
+	do { (void)(btd); (void)(name); } while (0)
+
+#define backtrace_destroy_context(btd)	\
+	do { (void)(btd); } while (0)
+
+#define backtrace_dump(btd)		\
+	do { (void)(btd); } while (0)
+
+#define backtrace_check()		\
+	do { } while (0)
+/*
+ * XXX: We have no thread-private backtrace context in non-debug mode,
+ * so there is a potential race if multiple threads want to write to
+ * this buffer. This looks acceptable though, since this is primarily
+ * a debug feature, and the race won't damage the system anyway.
+ */
+#define __get_error_buf(sizep)			\
+	({					\
+		static char __buf[16];		\
+		*(sizep) = sizeof(__buf);	\
+		__buf;				\
+	})
+
+#define debug_init()	do { } while (0)
+
+#endif /* !CONFIG_XENO_DEBUG */
+
+static inline int bad_pointer(const void *ptr)
+{
+	return ptr == NULL ||
+		((intptr_t)ptr & (intptr_t)(sizeof(intptr_t)-1)) != 0;
+}
+
+#endif /* _BOILERPLATE_DEBUG_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/hash.h b/kernel/xenomai-v3.2.4/include/boilerplate/hash.h
new file mode 100644
index 0000000..ef62e13
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/hash.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _BOILERPLATE_HASH_H
+#define _BOILERPLATE_HASH_H
+
+#include <pthread.h>
+#include <boilerplate/list.h>
+
+#define HASHSLOTS  (1<<8)
+
+struct hashobj {
+	dref_type(const void *) key;
+#ifdef CONFIG_XENO_PSHARED
+	char static_key[16];
+#endif
+	size_t len;
+	struct holder link;
+};
+
+struct hash_bucket {
+	struct listobj obj_list;
+};
+
+struct hash_table {
+	struct hash_bucket table[HASHSLOTS];
+	pthread_mutex_t lock;
+};
+
+struct hash_operations {
+	int (*compare)(const void *l,
+		       const void *r,
+		       size_t len);
+#ifdef CONFIG_XENO_PSHARED
+	int (*probe)(struct hashobj *oldobj);
+	void *(*alloc)(size_t len);
+	void (*free)(void *key);
+#endif
+};
+
+typedef int (*hash_walk_op)(struct hash_table *t,
+			    struct hashobj *obj,
+			    void *arg);
+	
+#ifdef CONFIG_XENO_PSHARED
+
+/* Private version - h-table is not shareable between processes. */
+
+struct pvhashobj {
+	const void *key;
+	size_t len;
+	struct pvholder link;
+};
+
+struct pvhash_bucket {
+	struct pvlistobj obj_list;
+};
+
+struct pvhash_table {
+	struct pvhash_bucket table[HASHSLOTS];
+	pthread_mutex_t lock;
+};
+
+struct pvhash_operations {
+	int (*compare)(const void *l,
+		       const void *r,
+		       size_t len);
+};
+
+typedef int (*pvhash_walk_op)(struct pvhash_table *t,
+			      struct pvhashobj *obj,
+			      void *arg);
+	
+#else /* !CONFIG_XENO_PSHARED */
+#define pvhashobj		hashobj
+#define pvhash_bucket		hash_bucket
+#define pvhash_table		hash_table
+#define pvhash_walk_op		hash_walk_op
+#endif /* !CONFIG_XENO_PSHARED */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+unsigned int __hash_key(const void *key,
+			size_t length, unsigned int c);
+
+void __hash_init(void *heap, struct hash_table *t);
+
+int __hash_enter(struct hash_table *t,
+		 const void *key, size_t len,
+		 struct hashobj *newobj,
+		 const struct hash_operations *hops,
+		 int nodup);
+
+static inline void hash_init(struct hash_table *t)
+{
+	__hash_init(__main_heap, t);
+}
+
+void hash_destroy(struct hash_table *t);
+
+static inline int hash_enter(struct hash_table *t,
+			     const void *key, size_t len,
+			     struct hashobj *newobj,
+			     const struct hash_operations *hops)
+{
+	return __hash_enter(t, key, len, newobj, hops, 1);
+}
+
+static inline int hash_enter_dup(struct hash_table *t,
+				 const void *key, size_t len,
+				 struct hashobj *newobj,
+				 const struct hash_operations *hops)
+{
+	return __hash_enter(t, key, len, newobj, hops, 0);
+}
+
+int hash_remove(struct hash_table *t, struct hashobj *delobj,
+		const struct hash_operations *hops);
+
+struct hashobj *hash_search(struct hash_table *t,
+			    const void *key, size_t len,
+			    const struct hash_operations *hops);
+
+int hash_walk(struct hash_table *t,
+	      hash_walk_op walk, void *arg);
+
+#ifdef CONFIG_XENO_PSHARED
+
+int __hash_enter_probe(struct hash_table *t,
+		       const void *key, size_t len,
+		       struct hashobj *newobj,
+		       const struct hash_operations *hops,
+		       int nodup);
+
+int __pvhash_enter(struct pvhash_table *t,
+		   const void *key, size_t len,
+		   struct pvhashobj *newobj,
+		   const struct pvhash_operations *hops,
+		   int nodup);
+
+static inline
+int hash_enter_probe(struct hash_table *t,
+		     const void *key, size_t len,
+		     struct hashobj *newobj,
+		     const struct hash_operations *hops)
+{
+	return __hash_enter_probe(t, key, len, newobj, hops, 1);
+}
+
+static inline
+int hash_enter_probe_dup(struct hash_table *t,
+			 const void *key, size_t len,
+			 struct hashobj *newobj,
+			 const struct hash_operations *hops)
+{
+	return __hash_enter_probe(t, key, len, newobj, hops, 0);
+}
+
+struct hashobj *hash_search_probe(struct hash_table *t,
+				  const void *key, size_t len,
+				  const struct hash_operations *hops);
+
+void pvhash_init(struct pvhash_table *t);
+
+static inline
+int pvhash_enter(struct pvhash_table *t,
+		 const void *key, size_t len,
+		 struct pvhashobj *newobj,
+		 const struct pvhash_operations *hops)
+{
+	return __pvhash_enter(t, key, len, newobj, hops, 1);
+}
+
+static inline
+int pvhash_enter_dup(struct pvhash_table *t,
+		     const void *key, size_t len,
+		     struct pvhashobj *newobj,
+		     const struct pvhash_operations *hops)
+{
+	return __pvhash_enter(t, key, len, newobj, hops, 0);
+}
+
+int pvhash_remove(struct pvhash_table *t, struct pvhashobj *delobj,
+		  const struct pvhash_operations *hops);
+
+struct pvhashobj *pvhash_search(struct pvhash_table *t,
+				const void *key, size_t len,
+				const struct pvhash_operations *hops);
+
+int pvhash_walk(struct pvhash_table *t,
+		pvhash_walk_op walk, void *arg);
+
+#else /* !CONFIG_XENO_PSHARED */
+#define pvhash_init		hash_init
+#define pvhash_enter		hash_enter
+#define pvhash_enter_dup	hash_enter_dup
+#define pvhash_remove		hash_remove
+#define pvhash_search		hash_search
+#define pvhash_walk		hash_walk
+#define pvhash_operations	hash_operations
+#endif /* !CONFIG_XENO_PSHARED */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _BOILERPLATE_HASH_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/heapmem.h b/kernel/xenomai-v3.2.4/include/boilerplate/heapmem.h
new file mode 100644
index 0000000..0ddd1ce
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/heapmem.h
@@ -0,0 +1,155 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_HEAPMEM_H
+#define _BOILERPLATE_HEAPMEM_H
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <limits.h>
+#include <boilerplate/list.h>
+#include <boilerplate/lock.h>
+#include <boilerplate/avl.h>
+
+#define HEAPMEM_PAGE_SHIFT	9 /* 2^9 => 512 bytes */
+#define HEAPMEM_PAGE_SIZE	(1UL << HEAPMEM_PAGE_SHIFT)
+#define HEAPMEM_PAGE_MASK	(~(HEAPMEM_PAGE_SIZE - 1))
+#define HEAPMEM_MIN_LOG2	4 /* 16 bytes */
+/*
+ * Use bucketed memory for sizes between 2^HEAPMEM_MIN_LOG2 and
+ * 2^(HEAPMEM_PAGE_SHIFT-1).
+ */
+#define HEAPMEM_MAX		(HEAPMEM_PAGE_SHIFT - HEAPMEM_MIN_LOG2)
+#define HEAPMEM_MIN_ALIGN	(1U << HEAPMEM_MIN_LOG2)
+/* Max size of an extent (4Gb - HEAPMEM_PAGE_SIZE). */
+#define HEAPMEM_MAX_EXTSZ	(4294967295U - HEAPMEM_PAGE_SIZE + 1)
+/* Bits we need for encoding a page # */
+#define HEAPMEM_PGENT_BITS      (32 - HEAPMEM_PAGE_SHIFT)
+
+/* Each page is represented by a page map entry. */
+#define HEAPMEM_PGMAP_BYTES	sizeof(struct heapmem_pgentry)
+
+struct heapmem_pgentry {
+	/* Linkage in bucket list. */
+	unsigned int prev : HEAPMEM_PGENT_BITS;
+	unsigned int next : HEAPMEM_PGENT_BITS;
+	/*  page_list or log2. */
+	unsigned int type : 6;
+	/*
+	 * We hold either a spatial map of busy blocks within the page
+	 * for bucketed memory (up to 32 blocks per page), or the
+	 * overall size of the multi-page block if entry.type ==
+	 * page_list.
+	 */
+	union {
+		uint32_t map;
+		uint32_t bsize;
+	};
+};
+
+/*
+ * A range descriptor is stored at the beginning of the first page of
+ * a range of free pages. heapmem_range.size is nrpages *
+ * HEAPMEM_PAGE_SIZE. Ranges are indexed by address and size in AVL
+ * trees.
+ */
+struct heapmem_range {
+	struct avlh addr_node;
+	struct avlh size_node;
+	size_t size;
+};
+
+struct heapmem_extent {
+	struct pvholder next;
+	void *membase;		/* Base of page array */
+	void *memlim;		/* Limit of page array */
+	struct avl addr_tree;
+	struct avl size_tree;
+	struct heapmem_pgentry pagemap[0]; /* Start of page entries[] */
+};
+
+struct heap_memory {
+	pthread_mutex_t lock;
+	struct pvlistobj extents;
+	size_t arena_size;
+	size_t usable_size;
+	size_t used_size;
+	/* Heads of page lists for log2-sized blocks. */
+	uint32_t buckets[HEAPMEM_MAX];
+};
+
+#define __HEAPMEM_MAP_SIZE(__nrpages)					\
+	((__nrpages) * HEAPMEM_PGMAP_BYTES)
+
+#define __HEAPMEM_ARENA_SIZE(__size)					\
+	(__size +							\
+	 __align_to(sizeof(struct heapmem_extent) +			\
+		    __HEAPMEM_MAP_SIZE((__size) >> HEAPMEM_PAGE_SHIFT),	\
+		    HEAPMEM_MIN_ALIGN))
+
+/*
+ * Calculate the minimal size of the memory arena needed to contain a
+ * heap of __user_size bytes, including our meta data for managing it.
+ * Usable at build time if __user_size is constant.
+ */
+#define HEAPMEM_ARENA_SIZE(__user_size)					\
+	__HEAPMEM_ARENA_SIZE(__align_to(__user_size, HEAPMEM_PAGE_SIZE))
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int heapmem_init(struct heap_memory *heap,
+		 void *mem, size_t size);
+
+int heapmem_extend(struct heap_memory *heap,
+		   void *mem, size_t size);
+
+void heapmem_destroy(struct heap_memory *heap);
+
+void *heapmem_alloc(struct heap_memory *heap,
+		    size_t size) __alloc_size(2);
+
+int heapmem_free(struct heap_memory *heap,
+		 void *block);
+
+static inline
+size_t heapmem_arena_size(const struct heap_memory *heap)
+{
+	return heap->arena_size;
+}
+
+static inline
+size_t heapmem_usable_size(const struct heap_memory *heap)
+{
+	return heap->usable_size;
+}
+
+static inline
+size_t heapmem_used_size(const struct heap_memory *heap)
+{
+	return heap->used_size;
+}
+
+ssize_t heapmem_check(struct heap_memory *heap,
+		      void *block);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _BOILERPLATE_HEAPMEM_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/libc.h b/kernel/xenomai-v3.2.4/include/boilerplate/libc.h
new file mode 100644
index 0000000..44ddad5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/libc.h
@@ -0,0 +1,296 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_LIBC_H
+#define _BOILERPLATE_LIBC_H
+
+#include <limits.h>
+
+#ifdef __IN_XENO__
+/*
+ * Quirks for dealing with outdated libc* issues.  This header will be
+ * parsed by the Xenomai implementation only, applications based on it
+ * have to provide their own set of wrappers as they should decide by
+ * themselves what to do when a feature is missing.
+ */
+#include <xeno_config.h>
+#include <errno.h>
+#include <boilerplate/compiler.h>
+
+#if !HAVE_DECL_PTHREAD_PRIO_NONE
+enum {
+	PTHREAD_PRIO_NONE,
+	PTHREAD_PRIO_INHERIT,
+	PTHREAD_PRIO_PROTECT
+};
+#endif /* !HAVE_DECL_PTHREAD_PRIO_NONE */
+
+#ifndef HAVE_FORK
+static inline int fork(void)
+{
+	errno = ENOSYS;
+	return -1;
+}
+#endif
+
+#ifndef HAVE_PTHREAD_ATFORK
+#ifndef HAVE_FORK
+static inline
+int pthread_atfork(void (*prepare)(void), void (*parent)(void),
+		   void (*child)(void))
+{
+	return 0;
+}
+#else
+#error "fork() without pthread_atfork()"
+#endif
+#endif /* !HAVE_PTHREAD_ATFORK */
+
+#ifndef HAVE_PTHREAD_GETATTR_NP
+static inline
+int pthread_getattr_np(pthread_t th, pthread_attr_t *attr)
+{
+	return ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_GETATTR_NP */
+
+#ifndef HAVE_PTHREAD_CONDATTR_SETCLOCK
+static inline
+int pthread_condattr_setclock(pthread_condattr_t *__restrict__ attr,
+			      clockid_t clock_id)
+{
+	return clock_id == CLOCK_REALTIME ? 0 : ENOSYS;
+}
+#endif	/* !HAVE_PTHREAD_CONDATTR_SETCLOCK */
+
+#ifndef HAVE_PTHREAD_CONDATTR_GETCLOCK
+static inline
+int pthread_condattr_getclock(const pthread_condattr_t *__restrict__ attr,
+			      clockid_t *__restrict__ clock_id)
+{
+	*clock_id = CLOCK_REALTIME;
+
+	return 0;
+}
+#endif	/* !HAVE_PTHREAD_CONDATTR_GETCLOCK */
+
+#ifndef HAVE_PTHREAD_MUTEXATTR_SETPROTOCOL
+static inline
+int pthread_mutexattr_setprotocol(pthread_mutexattr_t *__restrict__ attr,
+				  int protocol)
+{
+	return protocol == PTHREAD_PRIO_NONE ? 0 : ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_MUTEXATTR_SETPROTOCOL */
+
+#ifndef HAVE_PTHREAD_MUTEXATTR_GETPROTOCOL
+static inline
+int pthread_mutexattr_getprotocol(const pthread_mutexattr_t *
+				  __restrict__ attr, int *__restrict__ protocol)
+{
+	*protocol = PTHREAD_PRIO_NONE;
+
+	return 0;
+}
+#endif /* !HAVE_PTHREAD_MUTEXATTR_GETPROTOCOL */
+
+#ifndef HAVE_PTHREAD_MUTEXATTR_SETPRIOCEILING
+static inline
+int pthread_mutexattr_setprioceiling(pthread_mutexattr_t *attr,
+				     int prioceiling)
+{
+	return ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_MUTEXATTR_SETPRIOCEILING */
+
+#ifndef HAVE_PTHREAD_MUTEXATTR_GETPRIOCEILING
+static inline
+int pthread_mutexattr_getprioceiling(const pthread_mutexattr_t *
+				      __restrict attr,
+				     int *__restrict prioceiling)
+{
+	return ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_MUTEXATTR_GETPRIOCEILING */
+
+#ifndef HAVE_PTHREAD_MUTEX_SETPRIOCEILING
+static inline
+int pthread_mutex_setprioceiling(pthread_mutex_t *__restrict attr,
+				 int prioceiling,
+				 int *__restrict old_ceiling)
+{
+	return ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_MUTEXATTR_SETPRIOCEILING */
+
+#ifndef HAVE_PTHREAD_MUTEX_GETPRIOCEILING
+static inline
+int pthread_mutex_getprioceiling(pthread_mutex_t *__restrict attr,
+				 int *__restrict prioceiling)
+{
+	return ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_MUTEXATTR_GETPRIOCEILING */
+
+#ifndef HAVE_PTHREAD_ATTR_SETAFFINITY_NP
+#include <sched.h>
+static inline
+int pthread_attr_setaffinity_np(pthread_attr_t *attr,
+				size_t cpusetsize, const cpu_set_t *cpuset)
+{
+	if (CPU_ISSET(0, cpuset) && CPU_COUNT(cpuset) == 1)
+		return 0;
+	return ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_ATTR_SETAFFINITY_NP */
+
+#ifndef HAVE_PTHREAD_SETAFFINITY_NP
+static inline
+int pthread_setaffinity_np(pthread_t thread, size_t cpusetsize,
+			   const cpu_set_t *cpuset)
+{
+	if (CPU_ISSET(0, cpuset) && CPU_COUNT(cpuset) == 1)
+		return 0;
+	return ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_SETAFFINITY_NP */
+
+#ifndef HAVE_PTHREAD_SETSCHEDPRIO
+
+static inline
+int pthread_setschedprio(pthread_t thread, int prio)
+{
+	struct sched_param param;
+	int policy, ret;
+
+	ret = pthread_getschedparam(thread, &policy, &param);
+	if (ret)
+		return ret;
+
+	param.sched_priority = prio;
+
+	return pthread_setschedparam(thread, policy, &param);
+}
+
+#endif /* !HAVE_PTHREAD_SETSCHEDPRIO */
+
+#if !defined(HAVE_CLOCK_NANOSLEEP) && defined(CONFIG_XENO_MERCURY)
+/*
+ * Best effort for a Mercury setup based on an outdated libc lacking
+ * "advanced" real-time support.  Too bad if the system clock is set
+ * during sleep time. This is a non-issue for Cobalt, as the libcobalt
+ * implementation will always be picked instead.
+ */
+__weak int clock_nanosleep(clockid_t clock_id, int flags,
+			   const struct timespec *request,
+			   struct timespec *remain)
+{
+	struct timespec now, tmp;
+
+	tmp = *request;
+	if (flags) {
+		clock_gettime(CLOCK_REALTIME, &now);
+		tmp.tv_sec -= now.tv_sec;
+		tmp.tv_nsec -= now.tv_nsec;
+		if (tmp.tv_nsec < 0) {
+			tmp.tv_sec--;
+			tmp.tv_nsec += 1000000000;
+		}
+	}
+
+	return nanosleep(&tmp, remain);
+}
+#endif /* !HAVE_CLOCK_NANOSLEEP && MERCURY */
+
+#ifndef HAVE_SCHED_GETCPU
+/*
+ * Might be declared in uClibc headers but not actually implemented,
+ * so we make the placeholder a weak symbol.
+ */
+__weak int sched_getcpu(void)
+{
+	return 0;   /* outdated uClibc: assume uniprocessor. */
+}
+#endif /* !HAVE_SCHED_GETCPU */
+
+#ifndef HAVE_SHM_OPEN
+__weak int shm_open(const char *name, int oflag, mode_t mode)
+{
+	errno = ENOSYS;
+	return -1;
+}
+#endif	/* !HAVE_SHM_OPEN */
+
+#ifndef HAVE_SHM_UNLINK
+__weak int shm_unlink(const char *name)
+{
+	errno = ENOSYS;
+	return -1;
+}
+#endif	/* !HAVE_SHM_UNLINK */
+
+#ifndef HAVE_PTHREAD_MUTEXATTR_SETROBUST
+#ifdef HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP
+#define pthread_mutexattr_setrobust	pthread_mutexattr_setrobust_np
+#else
+static inline
+int pthread_mutexattr_setrobust(pthread_mutexattr_t *attr,
+				int robustness)
+{
+	return ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_MUTEXATTR_SETROBUST_NP */
+#endif /* !HAVE_PTHREAD_MUTEXATTR_SETROBUST */
+
+#if !defined(HAVE_PTHREAD_SETNAME_NP) && defined(CONFIG_XENO_MERCURY)
+static inline
+int pthread_setname_np(pthread_t thread, const char *name)
+{
+	return ENOSYS;
+}
+#endif /* !HAVE_PTHREAD_SETNAME_NP && MERCURY */
+
+#endif /* __IN_XENO__ */
+
+#if defined(__COBALT_WRAP__) || defined(__IN_XENO__)
+/*
+ * clock_nanosleep() and pthread_setname_np() must be declared when the libc
+ * does not declare them, both for compiling xenomai, and for compiling
+ * applications wrapping these symbols to the libcobalt versions.
+ */
+#ifndef HAVE_CLOCK_NANOSLEEP
+int clock_nanosleep(clockid_t clock_id, int flags,
+		    const struct timespec *request,
+		    struct timespec *remain);
+#endif /* !HAVE_CLOCK_NANOSLEEP */
+
+#ifndef HAVE_PTHREAD_SETNAME_NP
+int pthread_setname_np(pthread_t thread, const char *name);
+#endif /* !HAVE_PTHREAD_SETNAME_NP */
+#endif /* __COBALT_WRAP__ || __IN_XENO__ */
+
+#ifndef PTHREAD_STACK_DEFAULT
+#define PTHREAD_STACK_DEFAULT			\
+	({					\
+		int __ret = PTHREAD_STACK_MIN;	\
+		if (__ret < 65536)		\
+			__ret = 65536;		\
+		__ret;				\
+	})
+#endif /* !PTHREAD_STACK_DEFAULT */
+
+#endif /* _BOILERPLATE_LIBC_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/list.h b/kernel/xenomai-v3.2.4/include/boilerplate/list.h
new file mode 100644
index 0000000..97fbc12
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/list.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_LIST_H
+#define _BOILERPLATE_LIST_H
+
+#include <assert.h>
+#include <boilerplate/scope.h>
+#include <boilerplate/compiler.h>
+#include <boilerplate/shared-list.h>
+#include <boilerplate/private-list.h>
+
+/*
+ * WARNING: ALL list services are assumed to be free from any POSIX
+ * cancellation points by callers, allowing the *_nocancel() locking
+ * forms to be used (see boilerplate/lock.h).
+ *
+ * Please think of this when adding any debug instrumentation invoking
+ * printf() and the like.
+ */
+
+#endif /* !_BOILERPLATE_LIST_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/lock.h b/kernel/xenomai-v3.2.4/include/boilerplate/lock.h
new file mode 100644
index 0000000..df3469d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/lock.h
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _BOILERPLATE_LOCK_H
+#define _BOILERPLATE_LOCK_H
+
+#include <pthread.h>
+#include <boilerplate/wrappers.h>
+#include <boilerplate/debug.h>
+
+/*
+ * CANCEL_DEFER/RESTORE() should enclose any emulator code prior to
+ * holding a lock, or invoking inner boilerplate/copperplate services
+ * (which usually do so), to change the system state. A proper cleanup
+ * handler should be pushed prior to acquire such lock.
+ *
+ * Those macros ensure that cancellation type is switched to deferred
+ * mode while the section is traversed, then restored to its original
+ * value upon exit.
+ *
+ * WARNING: inner services MAY ASSUME that cancellability is deferred
+ * for the caller, so you really want to define protected sections as
+ * required in the higher interface layers.
+ */
+struct service {
+	int cancel_type;
+};
+
+#ifdef CONFIG_XENO_ASYNC_CANCEL
+
+#define CANCEL_DEFER(__s)					\
+	do {								\
+		pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED,		\
+				      &(__s).cancel_type);		\
+	} while (0)
+
+#define CANCEL_RESTORE(__s)					\
+	do {								\
+		pthread_setcanceltype((__s).cancel_type, NULL);		\
+		backtrace_check();					\
+	} while (0)
+
+#else  /* !CONFIG_XENO_ASYNC_CANCEL */
+
+#define CANCEL_DEFER(__s)	do { (void)(__s); } while (0)
+
+#define CANCEL_RESTORE(__s)	do { } while (0)
+
+#endif  /* !CONFIG_XENO_ASYNC_CANCEL */
+
+struct cleanup_block {
+	pthread_mutex_t *lock;
+	void (*handler)(void *arg);
+	void *arg;
+};
+
+#define __push_cleanup_args(__cb, __lock, __fn, __arg)	\
+	((__cb)->lock = (__lock)),			\
+	((__cb)->handler = (void (*)(void *))(__fn)),	\
+	((__cb)->arg = (__arg))
+
+#define push_cleanup_handler(__cb, __lock, __fn, __arg)			\
+	pthread_cleanup_push((void (*)(void *))__run_cleanup_block,	\
+			     (__push_cleanup_args(__cb, __lock, __fn, __arg), (__cb)))
+
+#define pop_cleanup_handler(__cb)	\
+	pthread_cleanup_pop(0)
+
+#define push_cleanup_lock(__lock)	\
+	pthread_cleanup_push((void (*)(void *))__RT(pthread_mutex_unlock), (__lock))
+
+#define pop_cleanup_lock(__lock)	\
+	pthread_cleanup_pop(0)
+
+#ifdef CONFIG_XENO_DEBUG
+int __check_cancel_type(const char *locktype);
+#else
+#define __check_cancel_type(__locktype)				\
+	({ (void)__locktype; 0; })
+#endif
+
+#define __do_lock(__lock, __op)					\
+	({							\
+		int __ret;					\
+		__ret = -__RT(pthread_mutex_##__op(__lock));	\
+		__ret;						\
+	})
+
+#define __do_lock_nocancel(__lock, __type, __op)			\
+	({								\
+		__bt(__check_cancel_type(#__op "_nocancel"));		\
+		__do_lock(__lock, __op);				\
+	})
+
+#define __do_unlock(__lock)					\
+	({							\
+		int __ret;					\
+		__ret = -__RT(pthread_mutex_unlock(__lock));	\
+		__ret;						\
+	})
+/*
+ * Macros to enter/leave critical sections within inner
+ * routines. Actually, they are mainly aimed at self-documenting the
+ * code, by specifying basic assumption(s) about the code being
+ * traversed. In effect, they are currently aliases to the standard
+ * pthread_mutex_* API, except for the _safe form.
+ *
+ * The _nocancel suffix indicates that no cancellation point is
+ * traversed by the protected code, therefore we don't need any
+ * cleanup handler since we are guaranteed to run in deferred cancel
+ * mode after CANCEL_DEFER(). A runtime check is inserted in
+ * debug mode, which triggers when cancellability is not in deferred
+ * mode while an attempt is made to acquire a _nocancel lock.
+ *
+ * read/write_lock() forms must be enclosed within the scope of a
+ * cleanup handler since the protected code may reach cancellation
+ * points. push_cleanup_lock() is a simple shorthand to push
+ * pthread_mutex_unlock as the cleanup handler.
+ */
+#define read_lock(__lock)			\
+	__do_lock(__lock, lock)
+
+#define read_trylock(__lock)			\
+	__do_lock(__lock, trylock)
+
+#define read_lock_nocancel(__lock)		\
+	__do_lock_nocancel(__lock, read_lock, lock)
+
+#define read_trylock_nocancel(__lock)		\
+	__do_lock_nocancel(__lock, read_trylock, trylock)
+
+#define read_unlock(__lock)			\
+	__do_unlock(__lock)
+
+#define write_lock(__lock)			\
+	__do_lock(__lock, lock)
+
+#define write_trylock(__lock)			\
+	__do_lock(__lock, trylock)
+
+#define write_lock_nocancel(__lock)		\
+	__do_lock_nocancel(__lock, write_lock, lock)
+
+#define write_trylock_nocancel(__lock)		\
+	__do_lock_nocancel(__lock, write_trylock, trylock)
+
+#define write_unlock(__lock)			\
+	__do_unlock(__lock)
+
+#define __do_lock_safe(__lock, __state, __op)				\
+	({								\
+		int __ret, __oldstate;					\
+		__bt(__check_cancel_type(#__op "_safe"));		\
+		pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &__oldstate); \
+		__ret = -__RT(pthread_mutex_##__op(__lock));		\
+		if (__ret)						\
+			pthread_setcancelstate(__oldstate, NULL);	\
+		__state = __oldstate;					\
+		__ret;							\
+	})
+
+#define __do_unlock_safe(__lock, __state)				\
+	({								\
+		int __ret, __restored_state = __state;			\
+		__ret = -__RT(pthread_mutex_unlock(__lock));		\
+		pthread_setcancelstate(__restored_state, NULL);		\
+		__ret;							\
+	})
+
+/*
+ * The _safe call form is available when undoing the changes from an
+ * update section upon cancellation using a cleanup handler is not an
+ * option (e.g. too complex), or in situations where the protected
+ * code shall fully run; in such cases, cancellation is disabled
+ * throughout the section.
+ */
+
+#define write_lock_safe(__lock, __state)	\
+	__do_lock_safe(__lock, __state, lock)
+
+#define write_trylock_safe(__lock, __state)	\
+	__do_lock_safe(__lock, __state, trylock)
+
+#define write_unlock_safe(__lock, __state)	\
+	__do_unlock_safe(__lock, __state)
+
+#define read_lock_safe(__lock, __state)	\
+	__do_lock_safe(__lock, __state, lock)
+
+#define read_unlock_safe(__lock, __state)	\
+	__do_unlock_safe(__lock, __state)
+
+#ifdef CONFIG_XENO_DEBUG
+#define mutex_type_attribute PTHREAD_MUTEX_ERRORCHECK
+#else
+#define mutex_type_attribute PTHREAD_MUTEX_NORMAL
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void __run_cleanup_block(struct cleanup_block *cb);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _BOILERPLATE_LOCK_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/namegen.h b/kernel/xenomai-v3.2.4/include/boilerplate/namegen.h
new file mode 100644
index 0000000..2395869
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/namegen.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_NAMEGEN_H
+#define _BOILERPLATE_NAMEGEN_H
+
+#include <boilerplate/atomic.h>
+
+struct name_generator {
+	const char *radix;
+	int length;
+	atomic_t serial;
+};
+
+#define DEFINE_NAME_GENERATOR(__name, __radix, __type, __member)	\
+	struct name_generator __name = {				\
+		.radix = __radix,					\
+		.length = sizeof ((__type *)0)->__member,		\
+		.serial = ATOMIC_INIT(0),				\
+	}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+char *generate_name(char *buf, const char *radix,
+		    struct name_generator *ngen);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _BOILERPLATE_NAMEGEN_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/obstack.h b/kernel/xenomai-v3.2.4/include/boilerplate/obstack.h
new file mode 100644
index 0000000..95eb792
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/obstack.h
@@ -0,0 +1,515 @@
+/* obstack.h - object stack macros
+   Copyright (C) 1988-1994,1996-1999,2003,2004,2005
+	Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+   Boston, MA 02110-1301, USA.  */
+
+/* Summary:
+
+All the apparent functions defined here are macros. The idea
+is that you would use these pre-tested macros to solve a
+very specific set of problems, and they would run fast.
+Caution: no side-effects in arguments please!! They may be
+evaluated MANY times!!
+
+These macros operate a stack of objects.  Each object starts life
+small, and may grow to maturity.  (Consider building a word syllable
+by syllable.)  An object can move while it is growing.  Once it has
+been "finished" it never changes address again.  So the "top of the
+stack" is typically an immature growing object, while the rest of the
+stack is of mature, fixed size and fixed address objects.
+
+These routines grab large chunks of memory, using a function you
+supply, called `obstack_chunk_alloc'.  On occasion, they free chunks,
+by calling `obstack_chunk_free'.  You must define them and declare
+them before using any obstack macros.
+
+Each independent stack is represented by a `struct obstack'.
+Each of the obstack macros expects a pointer to such a structure
+as the first argument.
+
+One motivation for this package is the problem of growing char strings
+in symbol tables.  Unless you are "fascist pig with a read-only mind"
+--Gosper's immortal quote from HAKMEM item 154, out of context--you
+would not like to put any arbitrary upper limit on the length of your
+symbols.
+
+In practice this often means you will build many short symbols and a
+few long symbols.  At the time you are reading a symbol you don't know
+how long it is.  One traditional method is to read a symbol into a
+buffer, realloc()ating the buffer every time you try to read a symbol
+that is longer than the buffer.  This is beaut, but you still will
+want to copy the symbol from the buffer to a more permanent
+symbol-table entry say about half the time.
+
+With obstacks, you can work differently.  Use one obstack for all symbol
+names.  As you read a symbol, grow the name in the obstack gradually.
+When the name is complete, finalize it.  Then, if the symbol exists already,
+free the newly read name.
+
+The way we do this is to take a large chunk, allocating memory from
+low addresses.  When you want to build a symbol in the chunk you just
+add chars above the current "high water mark" in the chunk.  When you
+have finished adding chars, because you got to the end of the symbol,
+you know how long the chars are, and you can create a new object.
+Mostly the chars will not burst over the highest address of the chunk,
+because you would typically expect a chunk to be (say) 100 times as
+long as an average object.
+
+In case that isn't clear, when we have enough chars to make up
+the object, THEY ARE ALREADY CONTIGUOUS IN THE CHUNK (guaranteed)
+so we just point to it where it lies.  No moving of chars is
+needed and this is the second win: potentially long strings need
+never be explicitly shuffled. Once an object is formed, it does not
+change its address during its lifetime.
+
+When the chars burst over a chunk boundary, we allocate a larger
+chunk, and then copy the partly formed object from the end of the old
+chunk to the beginning of the new larger chunk.  We then carry on
+accreting characters to the end of the object as we normally would.
+
+A special macro is provided to add a single char at a time to a
+growing object.  This allows the use of register variables, which
+break the ordinary 'growth' macro.
+
+Summary:
+	We allocate large chunks.
+	We carve out one object at a time from the current chunk.
+	Once carved, an object never moves.
+	We are free to append data of any size to the currently
+	  growing object.
+	Exactly one object is growing in an obstack at any one time.
+	You can run one obstack per control block.
+	You may have as many control blocks as you dare.
+	Because of the way we do it, you can `unwind' an obstack
+	  back to a previous state. (You may remove objects much
+	  as you would with a stack.)
+*/
+
+
+/* Don't do the contents of this file more than once.  */
+
+#ifndef _BOILERPLATE_OBSTACK_H
+#define _BOILERPLATE_OBSTACK_H 1
+
+#ifdef HAVE_OBSTACK_H
+#include_next <obstack.h>
+#else
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/* We need the type of a pointer subtraction.  If __PTRDIFF_TYPE__ is
+   defined, as with GNU C, use that; that way we don't pollute the
+   namespace with <stddef.h>'s symbols.  Otherwise, include <stddef.h>
+   and use ptrdiff_t.  */
+
+#ifdef __PTRDIFF_TYPE__
+# define PTR_INT_TYPE __PTRDIFF_TYPE__
+#else
+# include <stddef.h>
+# define PTR_INT_TYPE ptrdiff_t
+#endif
+
+/* If B is the base of an object addressed by P, return the result of
+   aligning P to the next multiple of A + 1.  B and P must be of type
+   char *.  A + 1 must be a power of 2.  */
+
+#define __BPTR_ALIGN(B, P, A) ((B) + (((P) - (B) + (A)) & ~(A)))
+
+/* Similiar to _BPTR_ALIGN (B, P, A), except optimize the common case
+   where pointers can be converted to integers, aligned as integers,
+   and converted back again.  If PTR_INT_TYPE is narrower than a
+   pointer (e.g., the AS/400), play it safe and compute the alignment
+   relative to B.  Otherwise, use the faster strategy of computing the
+   alignment relative to 0.  */
+
+#define __PTR_ALIGN(B, P, A)						    \
+  __BPTR_ALIGN (sizeof (PTR_INT_TYPE) < sizeof (void *) ? (B) : (char *) 0, \
+		P, A)
+
+#include <string.h>
+
+struct _obstack_chunk		/* Lives at front of each chunk. */
+{
+  char  *limit;			/* 1 past end of this chunk */
+  struct _obstack_chunk *prev;	/* address of prior chunk or NULL */
+  char	contents[4];		/* objects begin here */
+};
+
+struct obstack		/* control current object in current chunk */
+{
+  long	chunk_size;		/* preferred size to allocate chunks in */
+  struct _obstack_chunk *chunk;	/* address of current struct obstack_chunk */
+  char	*object_base;		/* address of object we are building */
+  char	*next_free;		/* where to add next char to current object */
+  char	*chunk_limit;		/* address of char after current chunk */
+  union
+  {
+    PTR_INT_TYPE tempint;
+    void *tempptr;
+  } temp;			/* Temporary for some macros.  */
+  int   alignment_mask;		/* Mask of alignment for each object. */
+  /* These prototypes vary based on `use_extra_arg', and we use
+     casts to the prototypeless function type in all assignments,
+     but having prototypes here quiets -Wstrict-prototypes.  */
+  struct _obstack_chunk *(*chunkfun) (void *, long);
+  void (*freefun) (void *, struct _obstack_chunk *);
+  void *extra_arg;		/* first arg for chunk alloc/dealloc funcs */
+  unsigned use_extra_arg:1;	/* chunk alloc/dealloc funcs take extra arg */
+  unsigned maybe_empty_object:1;/* There is a possibility that the current
+				   chunk contains a zero-length object.  This
+				   prevents freeing the chunk if we allocate
+				   a bigger chunk to replace it. */
+  unsigned alloc_failed:1;	/* No longer used, as we now call the failed
+				   handler on error, but retained for binary
+				   compatibility.  */
+};
+
+/* Declare the external functions we use; they are in obstack.c.  */
+
+extern void _obstack_newchunk (struct obstack *, int);
+extern int _obstack_begin (struct obstack *, int, int,
+			    void *(*) (long), void (*) (void *));
+extern int _obstack_begin_1 (struct obstack *, int, int,
+			     void *(*) (void *, long),
+			     void (*) (void *, void *), void *);
+extern int _obstack_memory_used (struct obstack *);
+
+void obstack_free (struct obstack *obstack, void *block);
+
+
+/* Error handler called when `obstack_chunk_alloc' failed to allocate
+   more memory.  This can be set to a user defined function which
+   should either abort gracefully or use longjump - but shouldn't
+   return.  The default action is to print a message and abort.  */
+extern void (*obstack_alloc_failed_handler) (void);
+
+/* Exit value used when `print_and_abort' is used.  */
+extern int obstack_exit_failure;
+
+/* Pointer to beginning of object being allocated or to be allocated next.
+   Note that this might not be the final address of the object
+   because a new chunk might be needed to hold the final size.  */
+
+#define obstack_base(h) ((void *) (h)->object_base)
+
+/* Size for allocating ordinary chunks.  */
+
+#define obstack_chunk_size(h) ((h)->chunk_size)
+
+/* Pointer to next byte not yet allocated in current chunk.  */
+
+#define obstack_next_free(h)	((h)->next_free)
+
+/* Mask specifying low bits that should be clear in address of an object.  */
+
+#define obstack_alignment_mask(h) ((h)->alignment_mask)
+
+/* To prevent prototype warnings provide complete argument list.  */
+#define obstack_init(h)						\
+  _obstack_begin ((h), 0, 0,					\
+		  (void *(*) (long)) obstack_chunk_alloc,	\
+		  (void (*) (void *)) obstack_chunk_free)
+
+#define obstack_begin(h, size)					\
+  _obstack_begin ((h), (size), 0,				\
+		  (void *(*) (long)) obstack_chunk_alloc,	\
+		  (void (*) (void *)) obstack_chunk_free)
+
+#define obstack_specify_allocation(h, size, alignment, chunkfun, freefun)  \
+  _obstack_begin ((h), (size), (alignment),				   \
+		  (void *(*) (long)) (chunkfun),			   \
+		  (void (*) (void *)) (freefun))
+
+#define obstack_specify_allocation_with_arg(h, size, alignment, chunkfun, freefun, arg) \
+  _obstack_begin_1 ((h), (size), (alignment),				\
+		    (void *(*) (void *, long)) (chunkfun),		\
+		    (void (*) (void *, void *)) (freefun), (arg))
+
+#define obstack_chunkfun(h, newchunkfun) \
+  ((h) -> chunkfun = (struct _obstack_chunk *(*)(void *, long)) (newchunkfun))
+
+#define obstack_freefun(h, newfreefun) \
+  ((h) -> freefun = (void (*)(void *, struct _obstack_chunk *)) (newfreefun))
+
+#define obstack_1grow_fast(h,achar) (*((h)->next_free)++ = (achar))
+
+#define obstack_blank_fast(h,n) ((h)->next_free += (n))
+
+#define obstack_memory_used(h) _obstack_memory_used (h)
+
+#if defined __GNUC__ && defined __STDC__ && __STDC__
+/* NextStep 2.0 cc is really gcc 1.93 but it defines __GNUC__ = 2 and
+   does not implement __extension__.  But that compiler doesn't define
+   __GNUC_MINOR__.  */
+# if __GNUC__ < 2 || (__NeXT__ && !__GNUC_MINOR__)
+#  define __extension__
+# endif
+
+/* For GNU C, if not -traditional,
+   we can define these macros to compute all args only once
+   without using a global variable.
+   Also, we can avoid using the `temp' slot, to make faster code.  */
+
+# define obstack_object_size(OBSTACK)					\
+  __extension__								\
+  ({ struct obstack const *__o = (OBSTACK);				\
+     (unsigned) (__o->next_free - __o->object_base); })
+
+# define obstack_room(OBSTACK)						\
+  __extension__								\
+  ({ struct obstack const *__o = (OBSTACK);				\
+     (unsigned) (__o->chunk_limit - __o->next_free); })
+
+# define obstack_make_room(OBSTACK,length)				\
+__extension__								\
+({ struct obstack *__o = (OBSTACK);					\
+   int __len = (length);						\
+   if (__o->chunk_limit - __o->next_free < __len)			\
+     _obstack_newchunk (__o, __len);					\
+   (void) 0; })
+
+# define obstack_empty_p(OBSTACK)					\
+  __extension__								\
+  ({ struct obstack const *__o = (OBSTACK);				\
+     (__o->chunk->prev == 0						\
+      && __o->next_free == __PTR_ALIGN ((char *) __o->chunk,		\
+					__o->chunk->contents,		\
+					__o->alignment_mask)); })
+
+# define obstack_grow(OBSTACK,where,length)				\
+__extension__								\
+({ struct obstack *__o = (OBSTACK);					\
+   int __len = (length);						\
+   if (__o->next_free + __len > __o->chunk_limit)			\
+     _obstack_newchunk (__o, __len);					\
+   memcpy (__o->next_free, where, __len);				\
+   __o->next_free += __len;						\
+   (void) 0; })
+
+# define obstack_grow0(OBSTACK,where,length)				\
+__extension__								\
+({ struct obstack *__o = (OBSTACK);					\
+   int __len = (length);						\
+   if (__o->next_free + __len + 1 > __o->chunk_limit)			\
+     _obstack_newchunk (__o, __len + 1);				\
+   memcpy (__o->next_free, where, __len);				\
+   __o->next_free += __len;						\
+   *(__o->next_free)++ = 0;						\
+   (void) 0; })
+
+# define obstack_1grow(OBSTACK,datum)					\
+__extension__								\
+({ struct obstack *__o = (OBSTACK);					\
+   if (__o->next_free + 1 > __o->chunk_limit)				\
+     _obstack_newchunk (__o, 1);					\
+   obstack_1grow_fast (__o, datum);					\
+   (void) 0; })
+
+/* These assume that the obstack alignment is good enough for pointers
+   or ints, and that the data added so far to the current object
+   shares that much alignment.  */
+
+# define obstack_ptr_grow(OBSTACK,datum)				\
+__extension__								\
+({ struct obstack *__o = (OBSTACK);					\
+   if (__o->next_free + sizeof (void *) > __o->chunk_limit)		\
+     _obstack_newchunk (__o, sizeof (void *));				\
+   obstack_ptr_grow_fast (__o, datum); })				\
+
+# define obstack_int_grow(OBSTACK,datum)				\
+__extension__								\
+({ struct obstack *__o = (OBSTACK);					\
+   if (__o->next_free + sizeof (int) > __o->chunk_limit)		\
+     _obstack_newchunk (__o, sizeof (int));				\
+   obstack_int_grow_fast (__o, datum); })
+
+# define obstack_ptr_grow_fast(OBSTACK,aptr)				\
+__extension__								\
+({ struct obstack *__o1 = (OBSTACK);					\
+   *(const void **) __o1->next_free = (aptr);				\
+   __o1->next_free += sizeof (const void *);				\
+   (void) 0; })
+
+# define obstack_int_grow_fast(OBSTACK,aint)				\
+__extension__								\
+({ struct obstack *__o1 = (OBSTACK);					\
+   *(int *) __o1->next_free = (aint);					\
+   __o1->next_free += sizeof (int);					\
+   (void) 0; })
+
+# define obstack_blank(OBSTACK,length)					\
+__extension__								\
+({ struct obstack *__o = (OBSTACK);					\
+   int __len = (length);						\
+   if (__o->chunk_limit - __o->next_free < __len)			\
+     _obstack_newchunk (__o, __len);					\
+   obstack_blank_fast (__o, __len);					\
+   (void) 0; })
+
+# define obstack_alloc(OBSTACK,length)					\
+__extension__								\
+({ struct obstack *__h = (OBSTACK);					\
+   obstack_blank (__h, (length));					\
+   obstack_finish (__h); })
+
+# define obstack_copy(OBSTACK,where,length)				\
+__extension__								\
+({ struct obstack *__h = (OBSTACK);					\
+   obstack_grow (__h, (where), (length));				\
+   obstack_finish (__h); })
+
+# define obstack_copy0(OBSTACK,where,length)				\
+__extension__								\
+({ struct obstack *__h = (OBSTACK);					\
+   obstack_grow0 (__h, (where), (length));				\
+   obstack_finish (__h); })
+
+/* The local variable is named __o1 to avoid a name conflict
+   when obstack_blank is called.  */
+# define obstack_finish(OBSTACK)					\
+__extension__								\
+({ struct obstack *__o1 = (OBSTACK);					\
+   void *__value = (void *) __o1->object_base;				\
+   if (__o1->next_free == __value)					\
+     __o1->maybe_empty_object = 1;					\
+   __o1->next_free							\
+     = __PTR_ALIGN (__o1->object_base, __o1->next_free,			\
+		    __o1->alignment_mask);				\
+   if (__o1->next_free - (char *)__o1->chunk				\
+       > __o1->chunk_limit - (char *)__o1->chunk)			\
+     __o1->next_free = __o1->chunk_limit;				\
+   __o1->object_base = __o1->next_free;					\
+   __value; })
+
+# define obstack_free(OBSTACK, OBJ)					\
+__extension__								\
+({ struct obstack *__o = (OBSTACK);					\
+   void *__obj = (OBJ);							\
+   if (__obj > (void *)__o->chunk && __obj < (void *)__o->chunk_limit)  \
+     __o->next_free = __o->object_base = (char *)__obj;			\
+   else (obstack_free) (__o, __obj); })
+
+#else /* not __GNUC__ or not __STDC__ */
+
+# define obstack_object_size(h) \
+ (unsigned) ((h)->next_free - (h)->object_base)
+
+# define obstack_room(h)		\
+ (unsigned) ((h)->chunk_limit - (h)->next_free)
+
+# define obstack_empty_p(h) \
+ ((h)->chunk->prev == 0							\
+  && (h)->next_free == __PTR_ALIGN ((char *) (h)->chunk,		\
+				    (h)->chunk->contents,		\
+				    (h)->alignment_mask))
+
+/* Note that the call to _obstack_newchunk is enclosed in (..., 0)
+   so that we can avoid having void expressions
+   in the arms of the conditional expression.
+   Casting the third operand to void was tried before,
+   but some compilers won't accept it.  */
+
+# define obstack_make_room(h,length)					\
+( (h)->temp.tempint = (length),						\
+  (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit)		\
+   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0))
+
+# define obstack_grow(h,where,length)					\
+( (h)->temp.tempint = (length),						\
+  (((h)->next_free + (h)->temp.tempint > (h)->chunk_limit)		\
+   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0),		\
+  memcpy ((h)->next_free, where, (h)->temp.tempint),			\
+  (h)->next_free += (h)->temp.tempint)
+
+# define obstack_grow0(h,where,length)					\
+( (h)->temp.tempint = (length),						\
+  (((h)->next_free + (h)->temp.tempint + 1 > (h)->chunk_limit)		\
+   ? (_obstack_newchunk ((h), (h)->temp.tempint + 1), 0) : 0),		\
+  memcpy ((h)->next_free, where, (h)->temp.tempint),			\
+  (h)->next_free += (h)->temp.tempint,					\
+  *((h)->next_free)++ = 0)
+
+# define obstack_1grow(h,datum)						\
+( (((h)->next_free + 1 > (h)->chunk_limit)				\
+   ? (_obstack_newchunk ((h), 1), 0) : 0),				\
+  obstack_1grow_fast (h, datum))
+
+# define obstack_ptr_grow(h,datum)					\
+( (((h)->next_free + sizeof (char *) > (h)->chunk_limit)		\
+   ? (_obstack_newchunk ((h), sizeof (char *)), 0) : 0),		\
+  obstack_ptr_grow_fast (h, datum))
+
+# define obstack_int_grow(h,datum)					\
+( (((h)->next_free + sizeof (int) > (h)->chunk_limit)			\
+   ? (_obstack_newchunk ((h), sizeof (int)), 0) : 0),			\
+  obstack_int_grow_fast (h, datum))
+
+# define obstack_ptr_grow_fast(h,aptr)					\
+  (((const void **) ((h)->next_free += sizeof (void *)))[-1] = (aptr))
+
+# define obstack_int_grow_fast(h,aint)					\
+  (((int *) ((h)->next_free += sizeof (int)))[-1] = (aint))
+
+# define obstack_blank(h,length)					\
+( (h)->temp.tempint = (length),						\
+  (((h)->chunk_limit - (h)->next_free < (h)->temp.tempint)		\
+   ? (_obstack_newchunk ((h), (h)->temp.tempint), 0) : 0),		\
+  obstack_blank_fast (h, (h)->temp.tempint))
+
+# define obstack_alloc(h,length)					\
+ (obstack_blank ((h), (length)), obstack_finish ((h)))
+
+# define obstack_copy(h,where,length)					\
+ (obstack_grow ((h), (where), (length)), obstack_finish ((h)))
+
+# define obstack_copy0(h,where,length)					\
+ (obstack_grow0 ((h), (where), (length)), obstack_finish ((h)))
+
+# define obstack_finish(h)						\
+( ((h)->next_free == (h)->object_base					\
+   ? (((h)->maybe_empty_object = 1), 0)					\
+   : 0),								\
+  (h)->temp.tempptr = (h)->object_base,					\
+  (h)->next_free							\
+    = __PTR_ALIGN ((h)->object_base, (h)->next_free,			\
+		   (h)->alignment_mask),				\
+  (((h)->next_free - (char *) (h)->chunk				\
+    > (h)->chunk_limit - (char *) (h)->chunk)				\
+   ? ((h)->next_free = (h)->chunk_limit) : 0),				\
+  (h)->object_base = (h)->next_free,					\
+  (h)->temp.tempptr)
+
+# define obstack_free(h,obj)						\
+( (h)->temp.tempint = (char *) (obj) - (char *) (h)->chunk,		\
+  ((((h)->temp.tempint > 0						\
+    && (h)->temp.tempint < (h)->chunk_limit - (char *) (h)->chunk))	\
+   ? (int) ((h)->next_free = (h)->object_base				\
+	    = (h)->temp.tempint + (char *) (h)->chunk)			\
+   : (((obstack_free) ((h), (h)->temp.tempint + (char *) (h)->chunk), 0), 0)))
+
+#endif /* not __GNUC__ or not __STDC__ */
+
+#ifdef __cplusplus
+}	/* C++ */
+#endif
+
+#endif /* !HAVE_OBSTACK_H */
+
+#endif /* _BOILERPLATE_OBSTACK_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/private-list.h b/kernel/xenomai-v3.2.4/include/boilerplate/private-list.h
new file mode 100644
index 0000000..72f1e4c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/private-list.h
@@ -0,0 +1,217 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _BOILERPLATE_PRIVATE_LIST_H
+#define _BOILERPLATE_PRIVATE_LIST_H
+
+#ifndef _BOILERPLATE_LIST_H
+#error "Do not include this file directly. Use <boilerplate/list.h> instead."
+#endif
+
+struct pvholder {
+	struct pvholder *next;
+	struct pvholder *prev;
+};
+
+struct pvlistobj {
+	struct pvholder head;
+};
+
+#define PRIVATE_LIST_INITIALIZER(__name) \
+	{ .head = { .next = &((__name).head), .prev = &((__name).head) } }
+
+#define DEFINE_PRIVATE_LIST(__name) \
+	struct pvlistobj __name = PRIVATE_LIST_INITIALIZER(__name)
+
+static inline void initpvh(struct pvholder *holder)
+{
+	holder->next = holder;
+	holder->prev = holder;
+}
+
+static inline void atpvh(struct pvholder *head, struct pvholder *holder)
+{
+	/* Inserts the new element right after the heading one. */
+	holder->prev = head;
+	holder->next = head->next;
+	holder->next->prev = holder;
+	head->next = holder;
+}
+
+static inline void dtpvh(struct pvholder *holder)
+{
+	holder->prev->next = holder->next;
+	holder->next->prev = holder->prev;
+}
+
+static inline void pvlist_init(struct pvlistobj *list)
+{
+	initpvh(&list->head);
+}
+
+static inline void pvholder_init(struct pvholder *holder)
+{
+	initpvh(holder);
+}
+
+/*
+ * XXX: pvholder_init() is mandatory if you later want to use this
+ * predicate.
+ */
+static inline int pvholder_linked(const struct pvholder *holder)
+{
+	return !(holder->prev == holder->next &&
+		 holder->prev == holder);
+}
+
+static inline void pvlist_prepend(struct pvholder *holder, struct pvlistobj *list)
+{
+	atpvh(&list->head, holder);
+}
+
+static inline void pvlist_append(struct pvholder *holder, struct pvlistobj *list)
+{
+	atpvh(list->head.prev, holder);
+}
+
+static inline void pvlist_insert(struct pvholder *next, struct pvholder *prev)
+{
+	atpvh(prev, next);
+}
+
+static inline void pvlist_join(struct pvlistobj *lsrc, struct pvlistobj *ldst)
+{
+	struct pvholder *headsrc = lsrc->head.next;
+	struct pvholder *tailsrc = lsrc->head.prev;
+	struct pvholder *headdst = &ldst->head;
+
+	headsrc->prev->next = tailsrc->next;
+	tailsrc->next->prev = headsrc->prev;
+	headsrc->prev = headdst;
+	tailsrc->next = headdst->next;
+	headdst->next->prev = tailsrc;
+	headdst->next = headsrc;
+}
+
+static inline void pvlist_remove(struct pvholder *holder)
+{
+	dtpvh(holder);
+}
+
+static inline void pvlist_remove_init(struct pvholder *holder)
+{
+	dtpvh(holder);
+	initpvh(holder);
+}
+
+static inline int pvlist_empty(const struct pvlistobj *list)
+{
+	return list->head.next == &list->head;
+}
+
+static inline struct pvholder *pvlist_pop(struct pvlistobj *list)
+{
+	struct pvholder *holder = list->head.next;
+	pvlist_remove(holder);
+	return holder;
+}
+
+static inline int pvlist_heading_p(const struct pvholder *holder,
+				   const struct pvlistobj *list)
+{
+	return list->head.next == holder;
+}
+
+#define pvlist_entry(ptr, type, member)				\
+	container_of(ptr, type, member)
+
+#define pvlist_first_entry(list, type, member)			\
+	pvlist_entry((list)->head.next, type, member)
+
+#define pvlist_last_entry(list, type, member)			\
+	pvlist_entry((list)->head.prev, type, member)
+
+#define pvlist_prev_entry(pos, list, member)				\
+	({								\
+		typeof(*pos) *__prev = NULL;				\
+		if ((list)->head.next != &(pos)->member)		\
+			__prev = pvlist_entry((pos)->member.prev,	\
+					      typeof(*pos), member);	\
+		__prev;							\
+	})
+
+#define pvlist_next_entry(pos, list, member)				\
+	({								\
+		typeof(*pos) *__next = NULL;				\
+		if ((list)->head.prev != &(pos)->member)		\
+			__next = pvlist_entry((pos)->member.next,	\
+					      typeof(*pos), member);	\
+		__next;							\
+	})
+
+#define pvlist_pop_entry(list, type, member) ({				\
+			struct pvholder *__holder = pvlist_pop(list);	\
+			pvlist_entry(__holder, type, member); })
+
+#define pvlist_for_each(pos, list)					\
+	for (pos = (list)->head.next;					\
+	     pos != &(list)->head; pos = (pos)->next)
+
+#define pvlist_for_each_reverse(pos, list)				\
+	for (pos = (list)->head.prev;					\
+	     pos != &(list)->head; pos = (pos)->prev)
+
+#define pvlist_for_each_safe(pos, tmp, list)				\
+	for (pos = (list)->head.next,					\
+		     tmp = (pos)->next;					\
+	     pos != &(list)->head;					\
+	     pos = tmp, tmp = (pos)->next)
+
+#define pvlist_for_each_entry(pos, list, member)			\
+	for (pos = pvlist_entry((list)->head.next,			\
+			      typeof(*pos), member);			\
+	     &(pos)->member != &(list)->head;				\
+	     pos = pvlist_entry((pos)->member.next,			\
+			      typeof(*pos), member))
+
+#define pvlist_for_each_entry_safe(pos, tmp, list, member)		\
+	for (pos = pvlist_entry((list)->head.next,			\
+			      typeof(*pos), member),			\
+		     tmp = pvlist_entry((pos)->member.next,		\
+				      typeof(*pos), member);		\
+	     &(pos)->member != &(list)->head;				\
+	     pos = tmp, tmp = pvlist_entry((pos)->member.next,		\
+					 typeof(*pos), member))
+
+#define pvlist_for_each_entry_reverse(pos, list, member)		\
+	for (pos = pvlist_entry((list)->head.prev,			\
+			      typeof(*pos), member);			\
+	     &pos->member != &(list)->head;				\
+	     pos = pvlist_entry(pos->member.prev,			\
+			      typeof(*pos), member))
+
+#define pvlist_for_each_entry_reverse_safe(pos, tmp, list, member)	\
+	for (pos = pvlist_entry((list)->head.prev,			\
+			      typeof(*pos), member),			\
+		     tmp = pvlist_entry((pos)->member.prev,		\
+				      typeof(*pos), member);		\
+	     &(pos)->member != &(list)->head;				\
+	     pos = tmp, tmp = pvlist_entry((pos)->member.prev,		\
+					 typeof(*pos), member))
+
+#endif /* !_BOILERPLATE_PRIVATE_LIST_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/scope.h b/kernel/xenomai-v3.2.4/include/boilerplate/scope.h
new file mode 100644
index 0000000..ded6a2c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/scope.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_SCOPE_H
+#define _BOILERPLATE_SCOPE_H
+
+#include <sys/types.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <xeno_config.h>
+
+typedef uintptr_t memoff_t;
+
+#ifdef CONFIG_XENO_PSHARED
+
+extern void *__main_heap;
+
+int pshared_check(void *heap, void *addr);
+
+#define dref_type(t)	memoff_t
+
+#define __memoff(__base, __addr)	((memoff_t)((caddr_t)(__addr) - (caddr_t)(__base)))
+#define __memptr(__base, __off)		((void *)((caddr_t)(__base) + (__off)))
+#define __memchk(__base, __addr)	pshared_check(__base, __addr)
+
+#define __moff(__p)		__memoff(__main_heap, __p)
+#define __moff_nullable(__p)	(__p ? __memoff(__main_heap, __p) : 0)
+#define __mptr(__off)		__memptr(__main_heap, __off)
+#define __mptr_nullable(__off)	(__off ? __memptr(__main_heap, __off) : NULL)
+#define __mchk(__p)		__memchk(__main_heap, __p)
+
+#define mutex_scope_attribute	PTHREAD_PROCESS_SHARED
+#define sem_scope_attribute	1
+#ifdef CONFIG_XENO_COBALT
+#define monitor_scope_attribute	COBALT_MONITOR_SHARED
+#define event_scope_attribute	COBALT_EVENT_SHARED
+#endif
+
+#else /* !CONFIG_XENO_PSHARED */
+
+#define __main_heap	NULL
+
+#define dref_type(t)	__typeof__(t)
+
+#define __memoff(__base, __addr)	(__addr)
+#define __memptr(__base, __off)		(__off)
+#define __memchk(__base, __addr)	1
+
+#define __moff(__p)		(__p)
+#define __moff_nullable(__p)	(__p)
+#define __mptr(__off)		(__off)
+#define __mptr_nullable(__off)	(__off)
+#define __mchk(__p)		1
+
+#define mutex_scope_attribute	PTHREAD_PROCESS_PRIVATE
+#define sem_scope_attribute	0
+#ifdef CONFIG_XENO_COBALT
+#define monitor_scope_attribute	0
+#define event_scope_attribute	0
+#endif
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+#endif /* _BOILERPLATE_SCOPE_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/setup.h b/kernel/xenomai-v3.2.4/include/boilerplate/setup.h
new file mode 100644
index 0000000..7df3cfe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/setup.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_SETUP_H
+#define _BOILERPLATE_SETUP_H
+
+#include <boilerplate/list.h>
+#include <boilerplate/wrappers.h>
+#include <string.h>
+#include <sched.h>
+
+struct base_setup_data {
+	cpu_set_t cpu_affinity;
+	int no_mlock;
+	int no_sanity;
+	int verbosity_level;
+	int trace_level;
+	const char *arg0;
+};
+
+struct option;
+
+struct setup_descriptor {
+	const char *name;
+	int (*tune)(void);
+	int (*parse_option)(int optnum, const char *optarg);
+	void (*help)(void);
+	int (*init)(void);
+	const struct option *options;
+	struct {
+		int id;
+		int opt_start;
+		int opt_end;
+		struct pvholder next;
+		int done;
+	} __reserved;
+};
+
+/*
+ * We have three pre-defined constructor priorities:
+ *
+ * - One for setup calls (__setup_ctor), which are guaranteed to run
+ * prior to the bootstrap code. You should use setup calls for
+ * implementing initialization hooks which depend on a particular call
+ * order. Each Xenomai interface layer is initialized via a dedicated
+ * setup call.
+ *
+ * - A second priority is assigned to early init calls (__early_ctor),
+ * which are also guaranteed to run prior to the bootstrap
+ * code. Whether such early code runs before OR after any setup code
+ * is __UNSPECIFIED__. By design, such code may not invoke any Xenomai
+ * service, and generally speaking, should have no dependencies on
+ * anything else.
+ *
+ * - The last priority level is used for the bootstrap code
+ * (__bootstrap_ctor), which is guaranteed to run after any
+ * setup/early code, provided such bootstrap code is part of the main
+ * executable.
+ *
+ * The guarantees on the init order don't go beyond what is stated
+ * here, do NOT assume more than this.
+ */
+#define __setup_ctor		__attribute__ ((constructor(200)))
+#define __early_ctor		__attribute__ ((constructor(210)))
+#define __bootstrap_ctor	__attribute__ ((constructor(220)))
+
+#define __setup_call(__name, __id)			\
+static __setup_ctor void __declare_ ## __name(void)	\
+{							\
+	__register_setup_call(&(__name), __id);		\
+}
+
+#define core_setup_call(__name)		__setup_call(__name, 0)
+#define boilerplate_setup_call(__name)	__setup_call(__name, 1)
+#define copperplate_setup_call(__name)	__setup_call(__name, 2)
+#define interface_setup_call(__name)	__setup_call(__name, 3)
+#define post_setup_call(__name)		__setup_call(__name, 4)
+#define user_setup_call(__name)		__setup_call(__name, 5)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void __register_setup_call(struct setup_descriptor *p, int id);
+
+extern pid_t __node_id;
+
+extern int __config_done;
+
+extern struct base_setup_data __base_setup_data;
+
+const char *get_program_name(void);
+
+void __trace_me(const char *fmt, ...);
+
+#define trace_me(__fmt, __args...)			\
+	do {						\
+		if (__base_setup_data.trace_level > 0)	\
+			__trace_me(__fmt, ##__args);	\
+	} while (0)
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_BOILERPLATE_SETUP_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/shared-list.h b/kernel/xenomai-v3.2.4/include/boilerplate/shared-list.h
new file mode 100644
index 0000000..741dfc0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/shared-list.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_SHARED_LIST_H
+#define _BOILERPLATE_SHARED_LIST_H
+
+#ifndef _BOILERPLATE_LIST_H
+#error "Do not include this file directly. Use <boilerplate/list.h> instead."
+#endif
+
+#define __hoff(h, a)  __memoff(h, a)
+#define __hptr(h, v)  ((struct holder *)__memptr(h, v))
+#define __hchk(h, a)  __memchk(h, a)
+
+struct holder {
+	dref_type(struct holder *) next;
+	dref_type(struct holder *) prev;
+};
+
+struct listobj {
+	struct holder head;
+};
+
+static inline void __inith_nocheck(void *heap, struct holder *holder)
+{
+	holder->next = __hoff(heap, holder);
+	holder->prev = __hoff(heap, holder);
+}
+
+static inline void __inith(void *heap, struct holder *holder)
+{
+	assert(__hchk(heap, holder));
+	__inith_nocheck(heap, holder);
+}
+
+static inline void inith(struct holder *holder)
+{
+	__inith(__main_heap, holder);
+}
+
+static inline void __ath(void *heap, struct holder *head,
+			 struct holder *holder)
+{
+	/* Inserts the new element right after the heading one. */
+	holder->prev = __hoff(heap, head);
+	holder->next = head->next;
+	__hptr(heap, holder->next)->prev = __hoff(heap, holder);
+	head->next = __hoff(heap, holder);
+}
+
+static inline void ath(struct holder *head, struct holder *holder)
+{
+	__ath(__main_heap, head, holder);
+}
+
+static inline void __dth(void *heap, struct holder *holder)
+{
+	__hptr(heap, holder->prev)->next = holder->next;
+	__hptr(heap, holder->next)->prev = holder->prev;
+}
+
+static inline void dth(struct holder *holder)
+{
+	__dth(__main_heap, holder);
+}
+
+static inline void __list_init(void *heap, struct listobj *list)
+{
+	__inith(heap, &list->head);
+}
+
+static inline void __list_init_nocheck(void *heap, struct listobj *list)
+{
+	__inith_nocheck(heap, &list->head);
+}
+
+static inline void list_init(struct listobj *list)
+{
+	__list_init(__main_heap, list);
+}
+
+static inline void __holder_init(void *heap, struct holder *holder)
+{
+	__inith(heap, holder);
+}
+
+static inline void __holder_init_nocheck(void *heap, struct holder *holder)
+{
+	__inith_nocheck(heap, holder);
+}
+
+static inline void holder_init(struct holder *holder)
+{
+	inith(holder);
+}
+
+static inline int __holder_linked(void *heap, const struct holder *holder)
+{
+	return !(holder->prev == holder->next &&
+		 holder->prev == __hoff(heap, holder));
+}
+
+/*
+ * XXX: holder_init() is mandatory if you later want to use this
+ * predicate.
+ */
+static inline int holder_linked(const struct holder *holder)
+{
+	return __holder_linked(__main_heap, holder);
+}
+
+static inline void __list_prepend(void *heap, struct holder *holder,
+				  struct listobj *list)
+{
+	__ath(heap, &list->head, holder);
+}
+
+static inline void list_prepend(struct holder *holder, struct listobj *list)
+{
+	__list_prepend(__main_heap, holder, list);
+}
+
+static inline void __list_append(void *heap, struct holder *holder,
+				 struct listobj *list)
+{
+	__ath(heap, __hptr(heap, list->head.prev), holder);
+}
+
+static inline void list_append(struct holder *holder, struct listobj *list)
+{
+	__list_append(__main_heap, holder, list);
+}
+
+static inline void __list_insert(void *heap, struct holder *next, struct holder *prev)
+{
+	__ath(heap, prev, next);
+}
+
+static inline void list_insert(struct holder *next, struct holder *prev)
+{
+	__list_insert(__main_heap, next, prev);
+}
+
+static inline void __list_join(void *heap, struct listobj *lsrc,
+			       struct listobj *ldst)
+{
+	struct holder *headsrc = __hptr(heap, lsrc->head.next);
+	struct holder *tailsrc = __hptr(heap, lsrc->head.prev);
+	struct holder *headdst = &ldst->head;
+
+	__hptr(heap, headsrc->prev)->next = tailsrc->next;
+	__hptr(heap, tailsrc->next)->prev = headsrc->prev;
+	headsrc->prev = __hoff(heap, headdst);
+	tailsrc->next = headdst->next;
+	__hptr(heap, headdst->next)->prev = __hoff(heap, tailsrc);
+	headdst->next = __hoff(heap, headsrc);
+}
+
+static inline void list_join(struct listobj *lsrc, struct listobj *ldst)
+{
+	__list_join(__main_heap, lsrc, ldst);
+}
+
+static inline void __list_remove(void *heap, struct holder *holder)
+{
+	__dth(heap, holder);
+}
+
+static inline void list_remove(struct holder *holder)
+{
+	__list_remove(__main_heap, holder);
+}
+
+static inline void __list_remove_init(void *heap, struct holder *holder)
+{
+	__dth(heap, holder);
+	__inith(heap, holder);
+}
+
+static inline void list_remove_init(struct holder *holder)
+{
+	__list_remove_init(__main_heap, holder);
+}
+
+static inline int __list_empty(void *heap, const struct listobj *list)
+{
+	return list->head.next == __hoff(heap, &list->head);
+}
+
+static inline int list_empty(const struct listobj *list)
+{
+	return __list_empty(__main_heap, list);
+}
+
+static inline struct holder *__list_pop(void *heap, struct listobj *list)
+{
+	struct holder *holder = __hptr(heap, list->head.next);
+	__list_remove(heap, holder);
+	return holder;
+}
+
+static inline struct holder *list_pop(struct listobj *list)
+{
+	return __list_pop(__main_heap, list);
+}
+
+static inline int __list_heading_p(void *heap, const struct holder *holder,
+				   const struct listobj *list)
+{
+	return list->head.next == __hoff(heap, holder);
+}
+
+static inline int list_heading_p(const struct holder *holder,
+				 const struct listobj *list)
+{
+	return __list_heading_p(__main_heap, holder, list);
+}
+
+#define list_entry(ptr, type, member)				\
+	container_of(ptr, type, member)
+
+#define __list_first_entry(heap, list, type, member)		\
+	list_entry(__hptr((heap), (list)->head.next), type, member)
+
+#define list_first_entry(list, type, member)			\
+	__list_first_entry(__main_heap, list, type, member)
+
+#define __list_last_entry(heap, list, type, member)		\
+	list_entry(__hptr((heap), (list)->head.prev), type, member)
+
+#define list_last_entry(list, type, member)			\
+	__list_last_entry(__main_heap, list, type, member)
+
+#define __list_prev_entry(heap, pos, list, member)			\
+	({								\
+		typeof(*pos) *__prev = NULL;				\
+		if ((list)->head.next != __hoff(heap, &(pos)->member))	\
+			__prev = list_entry(__hptr((heap),		\
+			   (pos)->member.prev), typeof(*pos), member);	\
+		__prev;							\
+	})
+
+#define list_prev_entry(pos, list, member)				\
+	__list_prev_entry(__main_heap, pos, list, member)
+
+#define __list_next_entry(heap, pos, list, member)			\
+	({								\
+		typeof(*pos) *__next = NULL;				\
+		if ((list)->head.prev != __hoff(heap, &(pos)->member))	\
+			__next = list_entry(__hptr((heap),		\
+			   (pos)->member.next), typeof(*pos), member);	\
+		__next;							\
+	})
+
+#define list_next_entry(pos, list, member)				\
+	__list_next_entry(__main_heap, pos, list, member)
+
+#define __list_pop_entry(heap, list, type, member) ({			\
+			struct holder *__holder = __list_pop((heap), list); \
+			list_entry(__holder, type, member); })
+
+#define list_pop_entry(list, type, member)				\
+	__list_pop_entry(__main_heap, list, type, member)
+
+#define __list_for_each(heap, pos, list)				\
+	for (pos = __hptr((heap), (list)->head.next);			\
+	     pos != &(list)->head; pos = __hptr((heap), (pos)->next))
+
+#define list_for_each(pos, list)					\
+	__list_for_each(__main_heap, pos, list)
+
+#define __list_for_each_reverse(heap, pos, list)			\
+	for (pos = __hptr((heap), (list)->head.prev);			\
+	     pos != &(list)->head; pos = __hptr((heap), (pos)->prev))
+
+#define list_for_each_reverse(pos, list)				\
+	__list_for_each_reverse(__main_heap, pos, list)
+
+#define __list_for_each_safe(heap, pos, tmp, list)			\
+	for (pos = __hptr((heap), (list)->head.next),			\
+		     tmp = __hptr((heap), (pos)->next);			\
+	     pos != &(list)->head;					\
+	     pos = tmp, tmp = __hptr((heap), (pos)->next))
+
+#define list_for_each_safe(pos, tmp, list)				\
+	__list_for_each_safe(__main_heap, pos, tmp, list)
+
+#define __list_for_each_entry(heap, pos, list, member)			\
+	for (pos = list_entry(__hptr((heap), (list)->head.next),	\
+			      typeof(*pos), member);			\
+	     &(pos)->member != &(list)->head;				\
+	     pos = list_entry(__hptr((heap), (pos)->member.next),	\
+			      typeof(*pos), member))
+
+#define list_for_each_entry(pos, list, member)				\
+	__list_for_each_entry(__main_heap, pos, list, member)
+
+#define __list_for_each_entry_safe(heap, pos, tmp, list, member)	\
+	for (pos = list_entry(__hptr((heap), (list)->head.next),	\
+			      typeof(*pos), member),			\
+		     tmp = list_entry(__hptr((heap), (pos)->member.next), \
+				      typeof(*pos), member);		\
+	     &(pos)->member != &(list)->head;				\
+	     pos = tmp, tmp = list_entry(__hptr((heap), (pos)->member.next), \
+					 typeof(*pos), member))
+
+#define __list_for_each_entry_reverse_safe(heap, pos, tmp, list, member) \
+	for (pos = list_entry(__hptr((heap), (list)->head.prev),	\
+			      typeof(*pos), member),			\
+		     tmp = list_entry(__hptr((heap), (pos)->member.prev), \
+				      typeof(*pos), member);		\
+	     &(pos)->member != &(list)->head;				\
+	     pos = tmp, tmp = list_entry(__hptr((heap), (pos)->member.prev), \
+					 typeof(*pos), member))
+
+#define list_for_each_entry_safe(pos, tmp, list, member)		\
+	__list_for_each_entry_safe(__main_heap, pos, tmp, list, member)
+
+#define __list_for_each_entry_reverse(heap, pos, list, member)		\
+	for (pos = list_entry(__hptr((heap), (list)->head.prev),	\
+			      typeof(*pos), member);			\
+	     &pos->member != &(list)->head;				\
+	     pos = list_entry(__hptr((heap), pos->member.prev),		\
+			      typeof(*pos), member))
+
+#define list_for_each_entry_reverse(pos, list, member)			\
+	__list_for_each_entry_reverse(__main_heap, pos, list, member)
+
+#define list_for_each_entry_reverse_safe(pos, tmp, list, member)	\
+	__list_for_each_entry_reverse_safe(__main_heap, pos, tmp, list, member)
+
+#endif /* !_BOILERPLATE_SHARED_LIST_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/shavl.h b/kernel/xenomai-v3.2.4/include/boilerplate/shavl.h
new file mode 100644
index 0000000..a15a4db
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/shavl.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2018 Philippe Gerum
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#ifndef _BOILERPLATE_SHAVL_H
+#define _BOILERPLATE_SHAVL_H
+
+#define AVL_PSHARED
+
+#include <boilerplate/avl-inner.h>
+
+#endif /* !_BOILERPLATE_SHAVL_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/time.h b/kernel/xenomai-v3.2.4/include/boilerplate/time.h
new file mode 100644
index 0000000..8d317e7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/time.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_TIME_H
+#define _BOILERPLATE_TIME_H
+
+#include <time.h>
+
+typedef unsigned long long ticks_t;
+
+typedef long long sticks_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void timespec_sub(struct timespec *__restrict r,
+		  const struct timespec *__restrict t1,
+		  const struct timespec *__restrict t2);
+
+void timespec_subs(struct timespec *__restrict r,
+		   const struct timespec *__restrict t1,
+		   sticks_t t2);
+
+void timespec_add(struct timespec *__restrict r,
+		  const struct timespec *__restrict t1,
+		  const struct timespec *__restrict t2);
+
+void timespec_adds(struct timespec *__restrict r,
+		   const struct timespec *__restrict t1,
+		   sticks_t t2);
+
+void timespec_sets(struct timespec *__restrict r,
+		   ticks_t ns);
+
+#ifdef __cplusplus
+}
+#endif
+
+static inline sticks_t timespec_scalar(const struct timespec *__restrict t)
+{
+	return t->tv_sec * 1000000000LL + t->tv_nsec;
+}
+
+static inline int __attribute__ ((always_inline))
+timespec_before(const struct timespec *__restrict t1,
+		const struct timespec *__restrict t2)
+{
+	if (t1->tv_sec < t2->tv_sec)
+		return 1;
+
+	if (t1->tv_sec == t2->tv_sec &&
+	    t1->tv_nsec < t2->tv_nsec)
+		return 1;
+
+	return 0;
+}
+
+static inline int __attribute__ ((always_inline))
+timespec_before_or_same(const struct timespec *__restrict t1,
+			const struct timespec *__restrict t2)
+{
+	if (t1->tv_sec < t2->tv_sec)
+		return 1;
+
+	if (t1->tv_sec == t2->tv_sec &&
+	    t1->tv_nsec <= t2->tv_nsec)
+		return 1;
+
+	return 0;
+}
+
+static inline int __attribute__ ((always_inline))
+timespec_after(const struct timespec *__restrict t1,
+	       const struct timespec *__restrict t2)
+{
+	return !timespec_before_or_same(t1, t2);
+}
+
+static inline int __attribute__ ((always_inline))
+timespec_after_or_same(const struct timespec *__restrict t1,
+		       const struct timespec *__restrict t2)
+{
+	return !timespec_before(t1, t2);
+}
+
+#endif /* _BOILERPLATE_TIME_H */
diff --git a/kernel/xenomai-v3.2.4/include/boilerplate/tunables.h b/kernel/xenomai-v3.2.4/include/boilerplate/tunables.h
new file mode 100644
index 0000000..397ffc8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/boilerplate/tunables.h
@@ -0,0 +1,126 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _BOILERPLATE_TUNABLES_H
+#define _BOILERPLATE_TUNABLES_H
+
+#include <assert.h>
+#include <boilerplate/setup.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline int __may_change_config_tunable(void)
+{
+	return !__config_done;
+}
+
+#define __tunable_set_call(__name, __scope)	\
+	__assign_ ## __name ## _ ## __scope
+
+#define __tunable_get_call(__name, __scope)	\
+	__read_ ## __name ## _ ## __scope
+
+#define __define_tunable(__name, __type, __val, __scope)	\
+	void __tunable_set_call(__name, __scope)(__typeof__(__type) __val)
+
+#define __read_tunable(__name, __type, __scope)	\
+	__typeof__(__type) __tunable_get_call(__name, __scope)(void)
+
+#define define_config_tunable(__name, __type, __val)	\
+	__define_tunable(__name, __type, __val, config)
+
+#define define_runtime_tunable(__name, __type, __val)	\
+	__define_tunable(__name, __type, __val, runtime)
+
+#define read_config_tunable(__name, __type)		\
+	__read_tunable(__name, __type, config)
+
+#define read_runtime_tunable(__name, __type)		\
+	__read_tunable(__name, __type, runtime)
+
+#define set_config_tunable(__name, __val)			\
+	do {							\
+		assert(__may_change_config_tunable());		\
+		__tunable_set_call(__name, config)(__val);	\
+	} while (0)
+
+#define get_config_tunable(__name)		\
+	__tunable_get_call(__name, config)()
+
+#define set_runtime_tunable(__name, __val)	\
+	__tunable_set_call(__name, runtime)(__val)
+
+#define get_runtime_tunable(__name)		\
+	__tunable_get_call(__name, runtime)()
+
+static inline define_config_tunable(cpu_affinity, cpu_set_t, cpus)
+{
+	__base_setup_data.cpu_affinity = cpus;
+}
+
+static inline read_config_tunable(cpu_affinity, cpu_set_t)
+{
+	return __base_setup_data.cpu_affinity;
+}
+
+static inline define_config_tunable(no_mlock, int, nolock)
+{
+	__base_setup_data.no_mlock = nolock;
+}
+
+static inline read_config_tunable(no_mlock, int)
+{
+	return __base_setup_data.no_mlock;
+}
+
+static inline define_config_tunable(no_sanity, int, nosanity)
+{
+	__base_setup_data.no_sanity = nosanity;
+}
+
+static inline read_config_tunable(no_sanity, int)
+{
+	return __base_setup_data.no_sanity;
+}
+
+static inline define_runtime_tunable(verbosity_level, int, level)
+{
+	__base_setup_data.verbosity_level = level;
+}
+
+static inline read_runtime_tunable(verbosity_level, int)
+{
+	return __base_setup_data.verbosity_level;
+}
+
+static inline define_runtime_tunable(trace_level, int, level)
+{
+	__base_setup_data.trace_level = level;
+}
+
+static inline read_runtime_tunable(trace_level, int)
+{
+	return __base_setup_data.trace_level;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_BOILERPLATE_TUNABLES_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/Makefile.am
new file mode 100644
index 0000000..19e9611
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/Makefile.am
@@ -0,0 +1,27 @@
+includesubdir = $(includedir)/cobalt
+
+includesub_HEADERS =	\
+	fcntl.h		\
+	mqueue.h	\
+	pthread.h	\
+	sched.h		\
+	semaphore.h	\
+	signal.h	\
+	stdio.h		\
+	stdlib.h	\
+	syslog.h	\
+	ticks.h		\
+	time.h		\
+	trace.h		\
+	tunables.h	\
+	unistd.h	\
+	wrappers.h
+
+noinst_HEADERS =	\
+	arith.h
+
+SUBDIRS =		\
+	boilerplate	\
+	kernel		\
+	sys		\
+	uapi
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/arith.h b/kernel/xenomai-v3.2.4/include/cobalt/arith.h
new file mode 100644
index 0000000..6313924
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/arith.h
@@ -0,0 +1,45 @@
+/**
+ *   Generic arithmetic/conversion routines.
+ *   Copyright &copy; 2005 Stelian Pop.
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARITH_H
+#define _COBALT_ARITH_H
+
+#include <stddef.h>
+#include <endian.h>
+
+#if __BYTE_ORDER == __BIG_ENDIAN
+#define endianstruct { unsigned int _h; unsigned int _l; }
+#else /* __BYTE_ORDER == __LITTLE_ENDIAN */
+#define endianstruct { unsigned int _l; unsigned int _h; }
+#endif /* __BYTE_ORDER == __LITTLE_ENDIAN */
+
+static inline unsigned xnarch_do_div(unsigned long long *a, unsigned d)
+{
+	unsigned int r = *a % d;
+	*a /= d;
+
+	return r;
+}
+
+#define do_div(a, d) xnarch_do_div(&(a), (d))
+
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/uapi/arith.h>
+
+#endif /* !_COBALT_ARITH_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/Makefile.am
new file mode 100644
index 0000000..ec3c2fe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/Makefile.am
@@ -0,0 +1,8 @@
+includesubdir = $(includedir)/cobalt/boilerplate
+
+includesub_HEADERS =	\
+	sched.h		\
+	limits.h	\
+	signal.h	\
+	trace.h		\
+	wrappers.h
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/limits.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/limits.h
new file mode 100644
index 0000000..ae49324
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/limits.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_BOILERPLATE_LIMITS_H
+#define _COBALT_BOILERPLATE_LIMITS_H
+
+#include <cobalt/uapi/kernel/limits.h>
+
+#endif /* _COBALT_BOILERPLATE_LIMITS_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/sched.h
new file mode 100644
index 0000000..d23a8da
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/sched.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_BOILERPLATE_SCHED_H
+#define _COBALT_BOILERPLATE_SCHED_H
+
+#include <cobalt/sched.h>
+
+#endif /* _COBALT_BOILERPLATE_SCHED_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/signal.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/signal.h
new file mode 100644
index 0000000..40c6c78
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/signal.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_BOILERPLATE_SIGNAL_H
+#define _COBALT_BOILERPLATE_SIGNAL_H
+
+#include <cobalt/signal.h>
+
+/* Generates reserved signal numbers for Boilerplate/Copperplate. */
+#define __SIGRSVD(n)	(SIGRTMIN + 8 + (n))
+
+#define SIGAGENT	__SIGRSVD(0) /* Request to remote agent */
+#define SIGPERIOD	__SIGRSVD(1) /* Periodic signal */
+
+/* Generates private signal numbers for clients, up to SIGRTMAX. */
+#define __SIGPRIV(n)	__SIGRSVD(8 + (n))
+
+#define SIGSAFE_LOCK_ENTRY(__safelock)					\
+  	do {								\
+		push_cleanup_lock(__safelock);				\
+		write_lock(__safelock);
+
+#define SIGSAFE_LOCK_EXIT(__safelock)					\
+		write_unlock(__safelock);				\
+		pop_cleanup_lock(&__safelock);				\
+	} while (0)
+
+#endif /* _COBALT_BOILERPLATE_SIGNAL_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/trace.h
new file mode 100644
index 0000000..c6cff8b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/trace.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_BOILERPLATE_TRACE_H
+#define _COBALT_BOILERPLATE_TRACE_H
+
+#include <cobalt/trace.h>
+
+#endif /* _COBALT_BOILERPLATE_TRACE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/wrappers.h b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/wrappers.h
new file mode 100644
index 0000000..fc3a59d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/boilerplate/wrappers.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_BOILERPLATE_WRAPPERS_H
+#define _COBALT_BOILERPLATE_WRAPPERS_H
+
+#include <cobalt/wrappers.h>
+
+#endif /* !_COBALT_BOILERPLATE_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/fcntl.h b/kernel/xenomai-v3.2.4/include/cobalt/fcntl.h
new file mode 100644
index 0000000..f1052c2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/fcntl.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <fcntl.h>
+
+#ifndef _COBALT_FCNTL_H
+#define _COBALT_FCNTL_H
+
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(int, open(const char *path, int oflag, ...));
+
+COBALT_DECL(int, open64(const char *path, int oflag, ...));
+
+COBALT_DECL(int, __open_2(const char *path, int oflag));
+
+COBALT_DECL(int, __open64_2(const char *path, int oflag));
+
+COBALT_DECL(int, fcntl(int fd, int cmd, ...));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_FCNTL_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/kernel/Makefile.am
new file mode 100644
index 0000000..6413481
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/Makefile.am
@@ -0,0 +1,37 @@
+
+noinst_HEADERS =	\
+	ancillaries.h	\
+	arith.h		\
+	assert.h	\
+	bufd.h		\
+	clock.h		\
+	compat.h	\
+	heap.h		\
+	init.h		\
+	intr.h		\
+	list.h		\
+	lock.h		\
+	map.h		\
+	pipe.h		\
+	ppd.h		\
+	registry.h	\
+	sched.h		\
+	sched-idle.h	\
+	schedparam.h	\
+	schedqueue.h	\
+	sched-quota.h	\
+	sched-rt.h	\
+	sched-sporadic.h	\
+	sched-tp.h	\
+	sched-weak.h	\
+	select.h	\
+	stat.h		\
+	synch.h		\
+	thread.h	\
+	timer.h		\
+	trace.h		\
+	tree.h		\
+	vdso.h		\
+	vfile.h
+
+SUBDIRS = rtdm
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h
new file mode 100644
index 0000000..b957310
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ancillaries.h
@@ -0,0 +1,68 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ANCILLARIES_H
+#define _COBALT_KERNEL_ANCILLARIES_H
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/uidgid.h>
+#include <cobalt/uapi/kernel/limits.h>
+
+#define ksformat(__dst, __len, __fmt, __args...)			\
+	({								\
+		size_t __ret;						\
+		__ret = snprintf(__dst, __len, __fmt, ##__args);	\
+		if (__ret >= __len)					\
+			__dst[__len-1] = '\0';				\
+		__ret;							\
+	})
+
+#define kasformat(__fmt, __args...)					\
+	({								\
+		kasprintf(GFP_KERNEL, __fmt, ##__args);			\
+	})
+
+#define kvsformat(__dst, __len, __fmt, __ap)				\
+	({								\
+		size_t __ret;						\
+		__ret = vsnprintf(__dst, __len, __fmt, __ap);		\
+		if (__ret >= __len)					\
+			__dst[__len-1] = '\0';				\
+		__ret;							\
+	})
+
+#define kvasformat(__fmt, __ap)						\
+	({								\
+		kvasprintf(GFP_KERNEL, __fmt, __ap);			\
+	})
+
+void __knamecpy_requires_character_array_as_destination(void);
+
+#define knamecpy(__dst, __src)						\
+	({								\
+		if (!__builtin_types_compatible_p(typeof(__dst), char[])) \
+			__knamecpy_requires_character_array_as_destination();	\
+		strncpy((__dst), __src, sizeof(__dst));			\
+		__dst[sizeof(__dst) - 1] = '\0';			\
+		__dst;							\
+	 })
+
+#define get_current_uuid() from_kuid_munged(current_user_ns(), current_uid())
+
+#endif /* !_COBALT_KERNEL_ANCILLARIES_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h
new file mode 100644
index 0000000..a343dfd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/arith.h
@@ -0,0 +1,35 @@
+/*
+ *   Generic arithmetic/conversion routines.
+ *   Copyright &copy; 2005 Stelian Pop.
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ *   Xenomai is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ARITH_H
+#define _COBALT_KERNEL_ARITH_H
+
+#include <asm/byteorder.h>
+#include <asm/div64.h>
+
+#ifdef __BIG_ENDIAN
+#define endianstruct { unsigned int _h; unsigned int _l; }
+#else /* __LITTLE_ENDIAN */
+#define endianstruct { unsigned int _l; unsigned int _h; }
+#endif
+
+#include <asm/xenomai/uapi/arith.h>
+
+#endif /* _COBALT_KERNEL_ARITH_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h
new file mode 100644
index 0000000..98218ce
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/assert.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2006 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_ASSERT_H
+#define _COBALT_KERNEL_ASSERT_H
+
+#include <linux/kconfig.h>
+
+#define XENO_INFO	KERN_INFO    "[Xenomai] "
+#define XENO_WARNING	KERN_WARNING "[Xenomai] "
+#define XENO_ERR	KERN_ERR     "[Xenomai] "
+
+#define XENO_DEBUG(__subsys)				\
+	IS_ENABLED(CONFIG_XENO_OPT_DEBUG_##__subsys)
+#define XENO_ASSERT(__subsys, __cond)			\
+	(!WARN_ON(XENO_DEBUG(__subsys) && !(__cond)))
+#define XENO_BUG(__subsys)				\
+	BUG_ON(XENO_DEBUG(__subsys))
+#define XENO_BUG_ON(__subsys, __cond)			\
+	BUG_ON(XENO_DEBUG(__subsys) && (__cond))
+#define XENO_WARN(__subsys, __cond, __fmt...)		\
+	WARN(XENO_DEBUG(__subsys) && (__cond), __fmt)
+#define XENO_WARN_ON(__subsys, __cond)			\
+	WARN_ON(XENO_DEBUG(__subsys) && (__cond))
+#define XENO_WARN_ON_ONCE(__subsys, __cond)		\
+	WARN_ON_ONCE(XENO_DEBUG(__subsys) && (__cond))
+#ifdef CONFIG_SMP
+#define XENO_BUG_ON_SMP(__subsys, __cond)		\
+	XENO_BUG_ON(__subsys, __cond)
+#define XENO_WARN_ON_SMP(__subsys, __cond)		\
+	XENO_WARN_ON(__subsys, __cond)
+#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond)		\
+	XENO_WARN_ON_ONCE(__subsys, __cond)
+#else
+#define XENO_BUG_ON_SMP(__subsys, __cond)		\
+	do { } while (0)
+#define XENO_WARN_ON_SMP(__subsys, __cond)		\
+	do { } while (0)
+#define XENO_WARN_ON_ONCE_SMP(__subsys, __cond)		\
+	do { } while (0)
+#endif
+
+#define primary_mode_only()	XENO_BUG_ON(CONTEXT, is_secondary_domain())
+#define secondary_mode_only()	XENO_BUG_ON(CONTEXT, !is_secondary_domain())
+#define interrupt_only()	XENO_BUG_ON(CONTEXT, !xnsched_interrupt_p())
+#define realtime_cpu_only()	XENO_BUG_ON(CONTEXT, !xnsched_supported_cpu(raw_smp_processor_id()))
+#define thread_only()		XENO_BUG_ON(CONTEXT, xnsched_interrupt_p())
+#define irqoff_only()		XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0)
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+#define atomic_only()		XENO_BUG_ON(CONTEXT, (xnlock_is_owner(&nklock) && hard_irqs_disabled()) == 0)
+#define preemptible_only()	XENO_BUG_ON(CONTEXT, xnlock_is_owner(&nklock) || hard_irqs_disabled())
+#else
+#define atomic_only()		XENO_BUG_ON(CONTEXT, hard_irqs_disabled() == 0)
+#define preemptible_only()	XENO_BUG_ON(CONTEXT, hard_irqs_disabled() != 0)
+#endif
+
+#endif /* !_COBALT_KERNEL_ASSERT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h
new file mode 100644
index 0000000..92a4078
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/bufd.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_BUFD_H
+#define _COBALT_KERNEL_BUFD_H
+
+#include <linux/types.h>
+
+/**
+ * @addtogroup cobalt_core_bufd
+ *
+ * @{
+ */
+
+struct mm_struct;
+
+struct xnbufd {
+	caddr_t b_ptr;		/* src/dst buffer address */
+	size_t b_len;		/* total length of buffer */
+	off_t b_off;		/* # of bytes read/written */
+	struct mm_struct *b_mm;	/* src/dst address space */
+	caddr_t b_carry;	/* pointer to carry over area */
+	char b_buf[64];		/* fast carry over area */
+};
+
+void xnbufd_map_umem(struct xnbufd *bufd,
+		     void __user *ptr, size_t len);
+
+static inline void xnbufd_map_uread(struct xnbufd *bufd,
+				    const void __user *ptr, size_t len)
+{
+	xnbufd_map_umem(bufd, (void __user *)ptr, len);
+}
+
+static inline void xnbufd_map_uwrite(struct xnbufd *bufd,
+				     void __user *ptr, size_t len)
+{
+	xnbufd_map_umem(bufd, ptr, len);
+}
+
+ssize_t xnbufd_unmap_uread(struct xnbufd *bufd);
+
+ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd);
+
+void xnbufd_map_kmem(struct xnbufd *bufd,
+		     void *ptr, size_t len);
+
+static inline void xnbufd_map_kread(struct xnbufd *bufd,
+				    const void *ptr, size_t len)
+{
+	xnbufd_map_kmem(bufd, (void *)ptr, len);
+}
+
+static inline void xnbufd_map_kwrite(struct xnbufd *bufd,
+				     void *ptr, size_t len)
+{
+	xnbufd_map_kmem(bufd, ptr, len);
+}
+
+ssize_t xnbufd_unmap_kread(struct xnbufd *bufd);
+
+ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd);
+
+ssize_t xnbufd_copy_to_kmem(void *ptr,
+			    struct xnbufd *bufd, size_t len);
+
+ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd,
+			      void *from, size_t len);
+
+void xnbufd_invalidate(struct xnbufd *bufd);
+
+static inline void xnbufd_reset(struct xnbufd *bufd)
+{
+	bufd->b_off = 0;
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_BUFD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h
new file mode 100644
index 0000000..2f7b714
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/clock.h
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2006,2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_CLOCK_H
+#define _COBALT_KERNEL_CLOCK_H
+
+#include <pipeline/pipeline.h>
+#include <pipeline/clock.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/uapi/kernel/types.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @addtogroup cobalt_core_clock
+ * @{
+ */
+
+struct xnsched;
+struct xntimerdata;
+struct __kernel_timex;
+
+struct xnclock_gravity {
+	unsigned long irq;
+	unsigned long kernel;
+	unsigned long user;
+};
+
+struct xnclock {
+	/** (ns) */
+	xnsticks_t wallclock_offset;
+	/** (ns) */
+	xnticks_t resolution;
+	/** (raw clock ticks). */
+	struct xnclock_gravity gravity;
+	/** Clock name. */
+	const char *name;
+	struct {
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+		xnticks_t (*read_raw)(struct xnclock *clock);
+		xnticks_t (*read_monotonic)(struct xnclock *clock);
+		int (*set_time)(struct xnclock *clock,
+				const struct timespec64 *ts);
+		xnsticks_t (*ns_to_ticks)(struct xnclock *clock,
+					  xnsticks_t ns);
+		xnsticks_t (*ticks_to_ns)(struct xnclock *clock,
+					  xnsticks_t ticks);
+		xnsticks_t (*ticks_to_ns_rounded)(struct xnclock *clock,
+						  xnsticks_t ticks);
+		void (*program_local_shot)(struct xnclock *clock,
+					   struct xnsched *sched);
+		void (*program_remote_shot)(struct xnclock *clock,
+					    struct xnsched *sched);
+#endif
+		int (*adjust_time)(struct xnclock *clock,
+				   struct __kernel_timex *tx);
+		int (*set_gravity)(struct xnclock *clock,
+				   const struct xnclock_gravity *p);
+		void (*reset_gravity)(struct xnclock *clock);
+#ifdef CONFIG_XENO_OPT_VFILE
+		void (*print_status)(struct xnclock *clock,
+				     struct xnvfile_regular_iterator *it);
+#endif
+	} ops;
+	/* Private section. */
+	struct xntimerdata *timerdata;
+	int id;
+#ifdef CONFIG_SMP
+	/** Possible CPU affinity of clock beat. */
+	cpumask_t affinity;
+#endif
+#ifdef CONFIG_XENO_OPT_STATS
+	struct xnvfile_snapshot timer_vfile;
+	struct xnvfile_rev_tag timer_revtag;
+	struct list_head timerq;
+	int nrtimers;
+#endif /* CONFIG_XENO_OPT_STATS */
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnvfile_regular vfile;
+#endif
+};
+
+struct xnclock_ratelimit_state {
+	xnticks_t interval;
+	xnticks_t begin;
+	int burst;
+	int printed;
+	int missed;
+};
+
+extern struct xnclock nkclock;
+
+int xnclock_register(struct xnclock *clock,
+		     const cpumask_t *affinity);
+
+void xnclock_deregister(struct xnclock *clock);
+
+void xnclock_tick(struct xnclock *clock);
+
+void xnclock_core_local_shot(struct xnsched *sched);
+
+void xnclock_core_remote_shot(struct xnsched *sched);
+
+xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns);
+
+xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks);
+
+xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks);
+
+xnticks_t xnclock_core_read_monotonic(void);
+
+static inline xnticks_t xnclock_core_read_raw(void)
+{
+	return pipeline_read_cycle_counter();
+}
+
+/* We use the Linux defaults */
+#define XN_RATELIMIT_INTERVAL	5000000000LL
+#define XN_RATELIMIT_BURST	10
+
+int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func);
+
+#define xnclock_ratelimit()	({					\
+	static struct xnclock_ratelimit_state __state = {		\
+		.interval	= XN_RATELIMIT_INTERVAL,		\
+		.burst		= XN_RATELIMIT_BURST,			\
+	};								\
+	__xnclock_ratelimit(&__state, __func__);			\
+})
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+static inline void xnclock_program_shot(struct xnclock *clock,
+					struct xnsched *sched)
+{
+	if (likely(clock == &nkclock))
+		xnclock_core_local_shot(sched);
+	else if (clock->ops.program_local_shot)
+		clock->ops.program_local_shot(clock, sched);
+}
+
+static inline void xnclock_remote_shot(struct xnclock *clock,
+				       struct xnsched *sched)
+{
+#ifdef CONFIG_SMP
+	if (likely(clock == &nkclock))
+		xnclock_core_remote_shot(sched);
+	else if (clock->ops.program_remote_shot)
+		clock->ops.program_remote_shot(clock, sched);
+#endif
+}
+
+static inline xnticks_t xnclock_read_raw(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_read_raw();
+
+	return clock->ops.read_raw(clock);
+}
+
+static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock,
+					     xnsticks_t ns)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ns_to_ticks(ns);
+
+	return clock->ops.ns_to_ticks(clock, ns);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock,
+					     xnsticks_t ticks)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ticks_to_ns(ticks);
+
+	return clock->ops.ticks_to_ns(clock, ticks);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock,
+						     xnsticks_t ticks)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_ticks_to_ns_rounded(ticks);
+
+	return clock->ops.ticks_to_ns_rounded(clock, ticks);
+}
+
+static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return xnclock_core_read_monotonic();
+
+	return clock->ops.read_monotonic(clock);
+}
+
+static inline int xnclock_set_time(struct xnclock *clock,
+				   const struct timespec64 *ts)
+{
+	if (likely(clock == &nkclock))
+		return -EINVAL;
+
+	return clock->ops.set_time(clock, ts);
+}
+
+#else /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline void xnclock_program_shot(struct xnclock *clock,
+					struct xnsched *sched)
+{
+	xnclock_core_local_shot(sched);
+}
+
+static inline void xnclock_remote_shot(struct xnclock *clock,
+				       struct xnsched *sched)
+{
+#ifdef CONFIG_SMP
+	xnclock_core_remote_shot(sched);
+#endif
+}
+
+static inline xnticks_t xnclock_read_raw(struct xnclock *clock)
+{
+	return xnclock_core_read_raw();
+}
+
+static inline xnsticks_t xnclock_ns_to_ticks(struct xnclock *clock,
+					     xnsticks_t ns)
+{
+	return xnclock_core_ns_to_ticks(ns);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns(struct xnclock *clock,
+					     xnsticks_t ticks)
+{
+	return xnclock_core_ticks_to_ns(ticks);
+}
+
+static inline xnsticks_t xnclock_ticks_to_ns_rounded(struct xnclock *clock,
+						     xnsticks_t ticks)
+{
+	return xnclock_core_ticks_to_ns_rounded(ticks);
+}
+
+static inline xnticks_t xnclock_read_monotonic(struct xnclock *clock)
+{
+	return xnclock_core_read_monotonic();
+}
+
+static inline int xnclock_set_time(struct xnclock *clock,
+				   const struct timespec64 *ts)
+{
+	/*
+	 * There is no way to change the core clock's idea of time.
+	 */
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline int xnclock_adjust_time(struct xnclock *clock,
+				      struct __kernel_timex *tx)
+{
+	if (clock->ops.adjust_time == NULL)
+		return -EOPNOTSUPP;
+
+	return clock->ops.adjust_time(clock, tx);
+}
+
+static inline xnticks_t xnclock_get_offset(struct xnclock *clock)
+{
+	return clock->wallclock_offset;
+}
+
+static inline xnticks_t xnclock_get_resolution(struct xnclock *clock)
+{
+	return clock->resolution; /* ns */
+}
+
+static inline void xnclock_set_resolution(struct xnclock *clock,
+					  xnticks_t resolution)
+{
+	clock->resolution = resolution; /* ns */
+}
+
+static inline int xnclock_set_gravity(struct xnclock *clock,
+				      const struct xnclock_gravity *gravity)
+{
+	if (clock->ops.set_gravity)
+		return clock->ops.set_gravity(clock, gravity);
+
+	return -EINVAL;
+}
+
+static inline void xnclock_reset_gravity(struct xnclock *clock)
+{
+	if (clock->ops.reset_gravity)
+		clock->ops.reset_gravity(clock);
+}
+
+#define xnclock_get_gravity(__clock, __type)  ((__clock)->gravity.__type)
+
+static inline xnticks_t xnclock_read_realtime(struct xnclock *clock)
+{
+	if (likely(clock == &nkclock))
+		return pipeline_read_wallclock();
+	/*
+	 * Return an adjusted value of the monotonic time with the
+	 * translated system wallclock offset.
+	 */
+	return xnclock_read_monotonic(clock) + xnclock_get_offset(clock);
+}
+
+void xnclock_apply_offset(struct xnclock *clock,
+			  xnsticks_t delta_ns);
+
+void xnclock_set_wallclock(xnticks_t epoch_ns);
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+void xnclock_init_proc(void);
+
+void xnclock_cleanup_proc(void);
+
+static inline void xnclock_print_status(struct xnclock *clock,
+					struct xnvfile_regular_iterator *it)
+{
+	if (clock->ops.print_status)
+		clock->ops.print_status(clock, it);
+}
+
+#else
+static inline void xnclock_init_proc(void) { }
+static inline void xnclock_cleanup_proc(void) { }
+#endif
+
+int xnclock_init(void);
+
+void xnclock_cleanup(void);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_CLOCK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h
new file mode 100644
index 0000000..275735d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/compat.h
@@ -0,0 +1,167 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_COMPAT_H
+#define _COBALT_KERNEL_COMPAT_H
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <linux/compat.h>
+#include <net/compat.h>
+#include <asm/xenomai/wrappers.h>
+#include <cobalt/uapi/sched.h>
+
+struct mq_attr;
+
+struct __compat_sched_ss_param {
+	int __sched_low_priority;
+	struct old_timespec32 __sched_repl_period;
+	struct old_timespec32 __sched_init_budget;
+	int __sched_max_repl;
+};
+
+struct __compat_sched_rr_param {
+	struct old_timespec32 __sched_rr_quantum;
+};
+
+struct compat_sched_param_ex {
+	int sched_priority;
+	union {
+		struct __compat_sched_ss_param ss;
+		struct __compat_sched_rr_param rr;
+		struct __sched_tp_param tp;
+		struct __sched_quota_param quota;
+	} sched_u;
+};
+
+struct compat_mq_attr {
+	compat_long_t mq_flags;
+	compat_long_t mq_maxmsg;
+	compat_long_t mq_msgsize;
+	compat_long_t mq_curmsgs;
+};
+
+struct compat_sched_tp_window {
+	struct old_timespec32 offset;
+	struct old_timespec32 duration;
+	int ptid;
+};
+
+struct __compat_sched_config_tp {
+	int op;
+	int nr_windows;
+	struct compat_sched_tp_window windows[0];
+};
+
+union compat_sched_config {
+	struct __compat_sched_config_tp tp;
+	struct __sched_config_quota quota;
+};
+
+#define compat_sched_tp_confsz(nr_win) \
+  (sizeof(struct __compat_sched_config_tp) + nr_win * sizeof(struct compat_sched_tp_window))
+
+typedef struct {
+	compat_ulong_t fds_bits[__FD_SETSIZE / (8 * sizeof(compat_long_t))];
+} compat_fd_set;
+
+struct compat_rtdm_mmap_request {
+	u64 offset;
+	compat_size_t length;
+	int prot;
+	int flags;
+};
+
+int sys32_get_timespec(struct timespec64 *ts,
+		       const struct old_timespec32 __user *cts);
+
+int sys32_put_timespec(struct old_timespec32 __user *cts,
+		       const struct timespec64 *ts);
+
+int sys32_get_itimerspec(struct itimerspec64 *its,
+			 const struct old_itimerspec32 __user *cits);
+
+int sys32_put_itimerspec(struct old_itimerspec32 __user *cits,
+			 const struct itimerspec64 *its);
+
+int sys32_get_timeval(struct __kernel_old_timeval *tv,
+		      const struct old_timeval32 __user *ctv);
+
+int sys32_put_timeval(struct old_timeval32 __user *ctv,
+		      const struct __kernel_old_timeval *tv);
+
+int sys32_get_timex(struct __kernel_timex *tx,
+		    const struct old_timex32 __user *ctx);
+
+int sys32_put_timex(struct old_timex32 __user *ctx,
+		    const struct __kernel_timex *tx);
+
+int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds,
+		    size_t cfdsize);
+
+int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds,
+		    size_t fdsize);
+
+int sys32_get_param_ex(int policy,
+		       struct sched_param_ex *p,
+		       const struct compat_sched_param_ex __user *u_cp);
+
+int sys32_put_param_ex(int policy,
+		       struct compat_sched_param_ex __user *u_cp,
+		       const struct sched_param_ex *p);
+
+int sys32_get_mqattr(struct mq_attr *ap,
+		     const struct compat_mq_attr __user *u_cap);
+
+int sys32_put_mqattr(struct compat_mq_attr __user *u_cap,
+		     const struct mq_attr *ap);
+
+int sys32_get_sigevent(struct sigevent *ev,
+		       const struct compat_sigevent *__user u_cev);
+
+int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset);
+
+int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set);
+
+int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval);
+
+int sys32_put_siginfo(void __user *u_si, const struct siginfo *si,
+		      int overrun);
+
+int sys32_get_msghdr(struct user_msghdr *msg,
+		     const struct compat_msghdr __user *u_cmsg);
+
+int sys32_get_mmsghdr(struct mmsghdr *mmsg,
+		      const struct compat_mmsghdr __user *u_cmmsg);
+
+int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg,
+		     const struct user_msghdr *msg);
+
+int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg,
+		     const struct mmsghdr *mmsg);
+
+int sys32_get_iovec(struct iovec *iov,
+		    const struct compat_iovec __user *ciov,
+		    int ciovlen);
+
+int sys32_put_iovec(struct compat_iovec __user *u_ciov,
+		    const struct iovec *iov,
+		    int iovlen);
+
+#endif /* CONFIG_XENO_ARCH_SYS3264 */
+
+#endif /* !_COBALT_KERNEL_COMPAT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h
new file mode 100644
index 0000000..cdb0dac
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/clock.h
@@ -0,0 +1,74 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_CLOCK_H
+#define _COBALT_KERNEL_DOVETAIL_CLOCK_H
+
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/kernel/assert.h>
+#include <linux/ktime.h>
+#include <linux/errno.h>
+
+struct timespec64;
+
+static inline u64 pipeline_read_cycle_counter(void)
+{
+	/*
+	 * With Dovetail, our idea of time is directly based on a
+	 * refined count of nanoseconds since the epoch, the hardware
+	 * time counter is transparent to us. For this reason,
+	 * xnclock_ticks_to_ns() and xnclock_ns_to_ticks() are
+	 * idempotent when building for Dovetail.
+	 */
+	return ktime_get_mono_fast_ns();
+}
+
+static inline xnticks_t pipeline_read_wallclock(void)
+{
+	return ktime_get_real_fast_ns();
+}
+
+static inline int pipeline_set_wallclock(xnticks_t epoch_ns)
+{
+	return -EOPNOTSUPP;
+}
+
+void pipeline_set_timer_shot(unsigned long cycles);
+
+const char *pipeline_timer_name(void);
+
+static inline const char *pipeline_clock_name(void)
+{
+	return "<Linux clocksource>";
+}
+
+static inline int pipeline_get_host_time(struct timespec64 *tp)
+{
+	/* Convert ktime_get_real_fast_ns() to timespec. */
+	*tp = ktime_to_timespec64(ktime_get_real_fast_ns());
+
+	return 0;
+}
+
+static inline void pipeline_init_clock(void)
+{
+	/* N/A */
+}
+
+static inline xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks)
+{
+	return ticks;
+}
+
+static inline xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks)
+{
+	return ticks;
+}
+
+static inline xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns)
+{
+	return ns;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_CLOCK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h
new file mode 100644
index 0000000..af3d70f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/inband_work.h
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H
+#define _COBALT_KERNEL_DOVETAIL_INBAND_WORK_H
+
+#include <linux/irq_work.h>
+
+/*
+ * This field must be named inband_work and appear first in the
+ * container work struct.
+ */
+struct pipeline_inband_work {
+	struct irq_work work;
+};
+
+#define PIPELINE_INBAND_WORK_INITIALIZER(__work, __handler)		\
+	{								\
+		.work = IRQ_WORK_INIT((void (*)(struct irq_work *))__handler), \
+	}
+
+#define pipeline_post_inband_work(__work)				\
+			irq_work_queue(&(__work)->inband_work.work)
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_INBAND_WORK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h
new file mode 100644
index 0000000..55d9b8f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/irq.h
@@ -0,0 +1,24 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_IRQ_H
+#define _COBALT_KERNEL_DOVETAIL_IRQ_H
+
+static inline void xnintr_init_proc(void)
+{
+	/* N/A */
+}
+
+static inline void xnintr_cleanup_proc(void)
+{
+	/* N/A */
+}
+
+static inline int xnintr_mount(void)
+{
+	/* N/A */
+	return 0;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_IRQ_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h
new file mode 100644
index 0000000..fa47f03
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/kevents.h
@@ -0,0 +1,36 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_KEVENTS_H
+#define _COBALT_KERNEL_DOVETAIL_KEVENTS_H
+
+#define KEVENT_PROPAGATE   0
+#define KEVENT_STOP        1
+
+struct cobalt_process;
+struct cobalt_thread;
+
+static inline
+int pipeline_attach_process(struct cobalt_process *process)
+{
+	return 0;
+}
+
+static inline
+void pipeline_detach_process(struct cobalt_process *process)
+{ }
+
+int pipeline_prepare_current(void);
+
+void pipeline_attach_current(struct xnthread *thread);
+
+int pipeline_trap_kevents(void);
+
+void pipeline_enable_kevents(void);
+
+void pipeline_cleanup_process(void);
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_KEVENTS_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h
new file mode 100644
index 0000000..8866c92
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/lock.h
@@ -0,0 +1,28 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_LOCK_H
+#define _COBALT_KERNEL_DOVETAIL_LOCK_H
+
+#include <linux/spinlock.h>
+
+typedef hard_spinlock_t pipeline_spinlock_t;
+
+#define PIPELINE_SPIN_LOCK_UNLOCKED(__name)  __HARD_SPIN_LOCK_INITIALIZER(__name)
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+/* Disable UP-over-SMP kernel optimization in debug mode. */
+#define __locking_active__  1
+
+#else
+
+#ifdef CONFIG_SMP
+#define __locking_active__  1
+#else
+#define __locking_active__  IS_ENABLED(CONFIG_SMP)
+#endif
+
+#endif
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_LOCK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h
new file mode 100644
index 0000000..4f3dd95
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/machine.h
@@ -0,0 +1,51 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_MACHINE_H
+#define _COBALT_KERNEL_DOVETAIL_MACHINE_H
+
+#include <linux/percpu.h>
+
+#ifdef CONFIG_FTRACE
+#define boot_lat_trace_notice "[LTRACE]"
+#else
+#define boot_lat_trace_notice ""
+#endif
+
+struct vm_area_struct;
+
+struct cobalt_machine {
+	const char *name;
+	int (*init)(void);
+	int (*late_init)(void);
+	void (*cleanup)(void);
+	void (*prefault)(struct vm_area_struct *vma);
+	const char *const *fault_labels;
+};
+
+extern struct cobalt_machine cobalt_machine;
+
+struct cobalt_machine_cpudata {
+	unsigned int faults[32];
+};
+
+DECLARE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata);
+
+struct cobalt_pipeline {
+#ifdef CONFIG_SMP
+	cpumask_t supported_cpus;
+#endif
+};
+
+int pipeline_init(void);
+
+int pipeline_late_init(void);
+
+void pipeline_cleanup(void);
+
+extern struct cobalt_pipeline cobalt_pipeline;
+
+#endif /* !_COBALT_KERNEL_IPIPE_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h
new file mode 100644
index 0000000..2ee7b32
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/pipeline.h
@@ -0,0 +1,109 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_PIPELINE_H
+#define _COBALT_KERNEL_DOVETAIL_PIPELINE_H
+
+#include <linux/irq_pipeline.h>
+#include <linux/cpumask.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/syscall.h>
+#include <pipeline/machine.h>
+
+typedef unsigned long spl_t;
+
+/*
+ * We only keep the LSB when testing in SMP mode in order to strip off
+ * the recursion marker (0x2) the nklock may store there.
+ */
+#define splhigh(x)  ((x) = oob_irq_save() & 1)
+#ifdef CONFIG_SMP
+#define splexit(x)  oob_irq_restore(x & 1)
+#else /* !CONFIG_SMP */
+#define splexit(x)  oob_irq_restore(x)
+#endif /* !CONFIG_SMP */
+#define splmax()    oob_irq_disable()
+#define splnone()   oob_irq_enable()
+#define spltest()   oob_irqs_disabled()
+
+#define is_secondary_domain()	running_inband()
+#define is_primary_domain()	running_oob()
+
+#ifdef CONFIG_SMP
+
+irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id);
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	if (num_possible_cpus() == 1)
+		return 0;
+
+	/* Trap the out-of-band rescheduling interrupt. */
+	return __request_percpu_irq(RESCHEDULE_OOB_IPI,
+			pipeline_reschedule_ipi_handler,
+			IRQF_OOB,
+			"Xenomai reschedule",
+			&cobalt_machine_cpudata);
+}
+
+static inline void pipeline_free_resched_ipi(void)
+{
+	if (num_possible_cpus() > 1)
+		/* Release the out-of-band rescheduling interrupt. */
+		free_percpu_irq(RESCHEDULE_OOB_IPI, &cobalt_machine_cpudata);
+}
+
+static inline void pipeline_send_resched_ipi(const struct cpumask *dest)
+{
+	/*
+	 * Trigger the out-of-band rescheduling interrupt on remote
+	 * CPU(s).
+	 */
+	irq_send_oob_ipi(RESCHEDULE_OOB_IPI, dest);
+}
+
+static inline void pipeline_send_timer_ipi(const struct cpumask *dest)
+{
+	/*
+	 * Trigger the out-of-band timer interrupt on remote CPU(s).
+	 */
+	irq_send_oob_ipi(TIMER_OOB_IPI, dest);
+}
+
+#else  /* !CONFIG_SMP */
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	return 0;
+}
+
+
+static inline void pipeline_free_resched_ipi(void)
+{
+}
+
+#endif	/* CONFIG_SMP */
+
+static inline void pipeline_prepare_panic(void)
+{
+	/* N/A */
+}
+
+static inline void pipeline_collect_features(struct cobalt_featinfo *f)
+{
+	f->clock_freq = 0;	/* N/A */
+}
+
+#ifndef pipeline_get_syscall_args
+static inline void pipeline_get_syscall_args(struct task_struct *task,
+					     struct pt_regs *regs,
+					     unsigned long *args)
+{
+	syscall_get_arguments(task, regs, args);
+}
+#endif	/* !pipeline_get_syscall_args */
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_PIPELINE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h
new file mode 100644
index 0000000..45512b9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sched.h
@@ -0,0 +1,62 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_SCHED_H
+#define _COBALT_KERNEL_DOVETAIL_SCHED_H
+
+#include <cobalt/kernel/lock.h>
+
+struct xnthread;
+struct xnsched;
+struct task_struct;
+
+void pipeline_init_shadow_tcb(struct xnthread *thread);
+
+void pipeline_init_root_tcb(struct xnthread *thread);
+
+int ___xnsched_run(struct xnsched *sched);
+
+static inline int pipeline_schedule(struct xnsched *sched)
+{
+	return run_oob_call((int (*)(void *))___xnsched_run, sched);
+}
+
+static inline void pipeline_prep_switch_oob(struct xnthread *root)
+{
+	/* N/A */
+}
+
+bool pipeline_switch_to(struct xnthread *prev,
+			struct xnthread *next,
+			bool leaving_inband);
+
+int pipeline_leave_inband(void);
+
+int pipeline_leave_oob_prepare(void);
+
+static inline void pipeline_leave_oob_unlock(void)
+{
+	/*
+	 * We may not re-enable hard irqs due to the specifics of
+	 * stage escalation via run_oob_call(), to prevent breaking
+	 * the (virtual) interrupt state.
+	 */
+	xnlock_put(&nklock);
+}
+
+void pipeline_leave_oob_finish(void);
+
+static inline
+void pipeline_finalize_thread(struct xnthread *thread)
+{
+	/* N/A */
+}
+
+void pipeline_raise_mayday(struct task_struct *tsk);
+
+void pipeline_clear_mayday(void);
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_SCHED_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h
new file mode 100644
index 0000000..1da9d13
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/sirq.h
@@ -0,0 +1,65 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_SIRQ_H
+#define _COBALT_KERNEL_DOVETAIL_SIRQ_H
+
+#include <linux/irq_pipeline.h>
+#include <cobalt/kernel/assert.h>
+
+/*
+ * Wrappers to create "synthetic IRQs" the Dovetail way. Those
+ * interrupt channels can only be trigged by software, in order to run
+ * a handler on the in-band execution stage.
+ */
+
+static inline
+int pipeline_create_inband_sirq(irqreturn_t (*handler)(int irq, void *dev_id))
+{
+	/*
+	 * Allocate an IRQ from the synthetic interrupt domain then
+	 * trap it to @handler, to be fired from the in-band stage.
+	 */
+	int sirq, ret;
+
+	sirq = irq_create_direct_mapping(synthetic_irq_domain);
+	if (sirq == 0)
+		return -EAGAIN;
+
+	ret = __request_percpu_irq(sirq,
+			handler,
+			IRQF_NO_THREAD,
+			"Inband sirq",
+			&cobalt_machine_cpudata);
+
+	if (ret) {
+		irq_dispose_mapping(sirq);
+		return ret;
+	}
+
+	return sirq;
+}
+
+static inline
+void pipeline_delete_inband_sirq(int sirq)
+{
+	/*
+	 * Free the synthetic IRQ then deallocate it to its
+	 * originating domain.
+	 */
+	free_percpu_irq(sirq,
+		&cobalt_machine_cpudata);
+
+	irq_dispose_mapping(sirq);
+}
+
+static inline void pipeline_post_sirq(int sirq)
+{
+	/* Trigger the synthetic IRQ */
+	irq_post_inband(sirq);
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_SIRQ_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h
new file mode 100644
index 0000000..1e6b0f0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/thread.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_DOVETAIL_THREAD_H
+#define _COBALT_KERNEL_DOVETAIL_THREAD_H
+
+#include <linux/dovetail.h>
+
+struct xnthread;
+
+#define cobalt_threadinfo oob_thread_state
+
+static inline struct cobalt_threadinfo *pipeline_current(void)
+{
+	return dovetail_current_state();
+}
+
+static inline
+struct xnthread *pipeline_thread_from_task(struct task_struct *p)
+{
+	return dovetail_task_state(p)->thread;
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h
new file mode 100644
index 0000000..372d832
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/tick.h
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_TICK_H
+#define _COBALT_KERNEL_IPIPE_TICK_H
+
+int pipeline_install_tick_proxy(void);
+
+void pipeline_uninstall_tick_proxy(void);
+
+struct xnsched;
+
+bool pipeline_must_force_program_tick(struct xnsched *sched);
+
+#endif /* !_COBALT_KERNEL_IPIPE_TICK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h
new file mode 100644
index 0000000..306dd54
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/trace.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_DOVETAIL_TRACE_H
+#define _COBALT_KERNEL_DOVETAIL_TRACE_H
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <cobalt/uapi/kernel/trace.h>
+#include <trace/events/cobalt-core.h>
+#include <cobalt/kernel/assert.h>
+
+static inline int xntrace_max_begin(unsigned long v)
+{
+	return -ENOSYS;
+}
+
+static inline int xntrace_max_end(unsigned long v)
+{
+	return -ENOSYS;
+}
+
+static inline int xntrace_max_reset(void)
+{
+	return -ENOSYS;
+}
+
+static inline int xntrace_user_start(void)
+{
+	trace_cobalt_trigger("user-start");
+	return 0;
+}
+
+static inline int xntrace_user_stop(unsigned long v)
+{
+	trace_cobalt_trace_longval(0, v);
+	trace_cobalt_trigger("user-stop");
+	return 0;
+}
+
+static inline int xntrace_user_freeze(unsigned long v, int once)
+{
+	trace_cobalt_trace_longval(0, v);
+	trace_cobalt_trigger("user-freeze");
+	return 0;
+}
+
+static inline void xntrace_latpeak_freeze(int delay)
+{
+	trace_cobalt_latpeak(delay);
+	trace_cobalt_trigger("latency-freeze");
+}
+
+static inline int xntrace_special(unsigned char id, unsigned long v)
+{
+	trace_cobalt_trace_longval(id, v);
+	return 0;
+}
+
+static inline int xntrace_special_u64(unsigned char id,
+				unsigned long long v)
+{
+	trace_cobalt_trace_longval(id, v);
+	return 0;
+}
+
+static inline int xntrace_pid(pid_t pid, short prio)
+{
+	trace_cobalt_trace_pid(pid, prio);
+	return 0;
+}
+
+static inline int xntrace_tick(unsigned long delay_ticks) /* ns */
+{
+	trace_cobalt_tick_shot(delay_ticks);
+	return 0;
+}
+
+static inline bool xntrace_enabled(void)
+{
+	return IS_ENABLED(CONFIG_DOVETAIL_TRACE);
+}
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_TRACE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h
new file mode 100644
index 0000000..07de643
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/vdso_fallback.h
@@ -0,0 +1,73 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ * Copyright (c) Siemens AG, 2021
+ */
+
+#ifndef _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+#define _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/timer.h>
+#include <xenomai/posix/clock.h>
+
+#define is_clock_gettime(__nr)		((__nr) == __NR_clock_gettime)
+
+#ifndef __NR_clock_gettime64
+#define is_clock_gettime64(__nr)	0
+#else
+#define is_clock_gettime64(__nr)	((__nr) == __NR_clock_gettime64)
+#endif
+
+static __always_inline bool 
+pipeline_handle_vdso_fallback(int nr, struct pt_regs *regs)
+{
+	struct __kernel_old_timespec __user *u_old_ts;
+	struct __kernel_timespec uts, __user *u_uts;
+	struct __kernel_old_timespec old_ts;
+	struct timespec64 ts64;
+	int clock_id, ret = 0;
+	unsigned long args[6];
+
+	if (!is_clock_gettime(nr) && !is_clock_gettime64(nr))
+		return false;
+
+	/*
+	 * We need to fetch the args again because not all archs use the same
+	 * calling convention for Linux and Xenomai syscalls.
+	 */
+	syscall_get_arguments(current, regs, args);
+
+	clock_id = (int)args[0];
+	switch (clock_id) {
+	case CLOCK_MONOTONIC:
+		ns2ts(&ts64, xnclock_read_monotonic(&nkclock));
+		break;
+	case CLOCK_REALTIME:
+		ns2ts(&ts64, xnclock_read_realtime(&nkclock));
+		break;
+	default:
+		return false;
+	}
+
+	if (is_clock_gettime(nr)) {
+		old_ts.tv_sec = (__kernel_old_time_t)ts64.tv_sec;
+		old_ts.tv_nsec = ts64.tv_nsec;
+		u_old_ts = (struct __kernel_old_timespec __user *)args[1];
+		if (raw_copy_to_user(u_old_ts, &old_ts, sizeof(old_ts)))
+			ret = -EFAULT;
+	} else if (is_clock_gettime64(nr)) {
+		uts.tv_sec = ts64.tv_sec;
+		uts.tv_nsec = ts64.tv_nsec;
+		u_uts = (struct __kernel_timespec __user *)args[1];
+		if (raw_copy_to_user(u_uts, &uts, sizeof(uts)))
+			ret = -EFAULT;
+	}
+
+	__xn_status_return(regs, ret);
+
+	return true;
+}
+
+#endif /* !_COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h
new file mode 100644
index 0000000..133aaca
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/dovetail/pipeline/wrappers.h
@@ -0,0 +1,9 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#ifndef _COBALT_KERNEL_DOVETAIL_WRAPPERS_H
+#define _COBALT_KERNEL_DOVETAIL_WRAPPERS_H
+
+/* No wrapper needed so far. */
+
+#endif /* !_COBALT_KERNEL_DOVETAIL_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h
new file mode 100644
index 0000000..09c982f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/heap.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_HEAP_H
+#define _COBALT_KERNEL_HEAP_H
+
+#include <linux/string.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/uapi/kernel/heap.h>
+
+/**
+ * @addtogroup cobalt_core_heap
+ * @{
+ */
+
+#define XNHEAP_PAGE_SHIFT	9 /* 2^9 => 512 bytes */
+#define XNHEAP_PAGE_SIZE	(1UL << XNHEAP_PAGE_SHIFT)
+#define XNHEAP_PAGE_MASK	(~(XNHEAP_PAGE_SIZE - 1))
+#define XNHEAP_MIN_LOG2		4 /* 16 bytes */
+/*
+ * Use bucketed memory for sizes between 2^XNHEAP_MIN_LOG2 and
+ * 2^(XNHEAP_PAGE_SHIFT-1).
+ */
+#define XNHEAP_MAX_BUCKETS	(XNHEAP_PAGE_SHIFT - XNHEAP_MIN_LOG2)
+#define XNHEAP_MIN_ALIGN	(1U << XNHEAP_MIN_LOG2)
+/* Maximum size of a heap (4Gb - PAGE_SIZE). */
+#define XNHEAP_MAX_HEAPSZ	(4294967295U - PAGE_SIZE + 1)
+/* Bits we need for encoding a page # */
+#define XNHEAP_PGENT_BITS      (32 - XNHEAP_PAGE_SHIFT)
+/* Each page is represented by a page map entry. */
+#define XNHEAP_PGMAP_BYTES	sizeof(struct xnheap_pgentry)
+
+struct xnheap_pgentry {
+	/* Linkage in bucket list. */
+	unsigned int prev : XNHEAP_PGENT_BITS;
+	unsigned int next : XNHEAP_PGENT_BITS;
+	/*  page_list or log2. */
+	unsigned int type : 6;
+	/*
+	 * We hold either a spatial map of busy blocks within the page
+	 * for bucketed memory (up to 32 blocks per page), or the
+	 * overall size of the multi-page block if entry.type ==
+	 * page_list.
+	 */
+	union {
+		u32 map;
+		u32 bsize;
+	};
+};
+
+/*
+ * A range descriptor is stored at the beginning of the first page of
+ * a range of free pages. xnheap_range.size is nrpages *
+ * XNHEAP_PAGE_SIZE. Ranges are indexed by address and size in
+ * rbtrees.
+ */
+struct xnheap_range {
+	struct rb_node addr_node;
+	struct rb_node size_node;
+	size_t size;
+};
+
+struct xnheap {
+	void *membase;
+	struct rb_root addr_tree;
+	struct rb_root size_tree;
+	struct xnheap_pgentry *pagemap;
+	size_t usable_size;
+	size_t used_size;
+	u32 buckets[XNHEAP_MAX_BUCKETS];
+	char name[XNOBJECT_NAME_LEN];
+	DECLARE_XNLOCK(lock);
+	struct list_head next;
+};
+
+extern struct xnheap cobalt_heap;
+
+#define xnmalloc(size)     xnheap_alloc(&cobalt_heap, size)
+#define xnfree(ptr)        xnheap_free(&cobalt_heap, ptr)
+
+static inline void *xnheap_get_membase(const struct xnheap *heap)
+{
+	return heap->membase;
+}
+
+static inline
+size_t xnheap_get_size(const struct xnheap *heap)
+{
+	return heap->usable_size;
+}
+
+static inline
+size_t xnheap_get_used(const struct xnheap *heap)
+{
+	return heap->used_size;
+}
+
+static inline
+size_t xnheap_get_free(const struct xnheap *heap)
+{
+	return heap->usable_size - heap->used_size;
+}
+
+int xnheap_init(struct xnheap *heap,
+		void *membase, size_t size);
+
+void xnheap_destroy(struct xnheap *heap);
+
+void *xnheap_alloc(struct xnheap *heap, size_t size);
+
+void xnheap_free(struct xnheap *heap, void *block);
+
+ssize_t xnheap_check_block(struct xnheap *heap, void *block);
+
+void xnheap_set_name(struct xnheap *heap,
+		     const char *name, ...);
+
+void *xnheap_vmalloc(size_t size);
+
+void xnheap_vfree(void *p);
+
+static inline void *xnheap_zalloc(struct xnheap *heap, size_t size)
+{
+	void *p;
+
+	p = xnheap_alloc(heap, size);
+	if (p)
+		memset(p, 0, size);
+
+	return p;
+}
+
+static inline char *xnstrdup(const char *s)
+{
+	char *p;
+
+	p = xnmalloc(strlen(s) + 1);
+	if (p == NULL)
+		return NULL;
+
+	return strcpy(p, s);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+void xnheap_init_proc(void);
+void xnheap_cleanup_proc(void);
+#else /* !CONFIG_XENO_OPT_VFILE */
+static inline void xnheap_init_proc(void) { }
+static inline void xnheap_cleanup_proc(void) { }
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_HEAP_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h
new file mode 100644
index 0000000..41dd531
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/init.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_INIT_H
+#define _COBALT_KERNEL_INIT_H
+
+#include <linux/atomic.h>
+#include <linux/notifier.h>
+#include <cobalt/uapi/corectl.h>
+
+extern atomic_t cobalt_runstate;
+
+static inline enum cobalt_run_states realtime_core_state(void)
+{
+	return atomic_read(&cobalt_runstate);
+}
+
+static inline int realtime_core_enabled(void)
+{
+	return atomic_read(&cobalt_runstate) != COBALT_STATE_DISABLED;
+}
+
+static inline int realtime_core_running(void)
+{
+	return atomic_read(&cobalt_runstate) == COBALT_STATE_RUNNING;
+}
+
+static inline void set_realtime_core_state(enum cobalt_run_states state)
+{
+	atomic_set(&cobalt_runstate, state);
+}
+
+void cobalt_add_state_chain(struct notifier_block *nb);
+
+void cobalt_remove_state_chain(struct notifier_block *nb);
+
+void cobalt_call_state_chain(enum cobalt_run_states newstate);
+
+#endif /* !_COBALT_KERNEL_INIT_H_ */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h
new file mode 100644
index 0000000..393ad96
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/intr.h
@@ -0,0 +1,157 @@
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_INTR_H
+#define _COBALT_KERNEL_INTR_H
+
+#include <linux/spinlock.h>
+#include <cobalt/kernel/stat.h>
+#include <pipeline/irq.h>
+
+/**
+ * @addtogroup cobalt_core_irq
+ * @{
+ */
+
+/* Possible return values of a handler. */
+#define XN_IRQ_NONE	 0x1
+#define XN_IRQ_HANDLED	 0x2
+#define XN_IRQ_STATMASK	 (XN_IRQ_NONE|XN_IRQ_HANDLED)
+#define XN_IRQ_PROPAGATE 0x100
+#define XN_IRQ_DISABLE   0x200
+
+/* Init flags. */
+#define XN_IRQTYPE_SHARED  0x1
+#define XN_IRQTYPE_EDGE    0x2
+
+/* Status bits. */
+#define XN_IRQSTAT_ATTACHED   0
+#define _XN_IRQSTAT_ATTACHED  (1 << XN_IRQSTAT_ATTACHED)
+#define XN_IRQSTAT_DISABLED   1
+#define _XN_IRQSTAT_DISABLED  (1 << XN_IRQSTAT_DISABLED)
+
+struct xnintr;
+struct xnsched;
+
+typedef int (*xnisr_t)(struct xnintr *intr);
+
+typedef void (*xniack_t)(unsigned irq, void *arg);
+
+struct xnirqstat {
+	/** Number of handled receipts since attachment. */
+	xnstat_counter_t hits;
+	/** Runtime accounting entity */
+	xnstat_exectime_t account;
+	/** Accumulated accounting entity */
+	xnstat_exectime_t sum;
+};
+
+struct xnintr {
+#ifdef CONFIG_XENO_OPT_SHIRQ
+	/** Next object in the IRQ-sharing chain. */
+	struct xnintr *next;
+#endif
+	/** Number of consequent unhandled interrupts */
+	unsigned int unhandled;
+	/** Interrupt service routine. */
+	xnisr_t isr;
+	/** User-defined cookie value. */
+	void *cookie;
+	/** runtime status */
+	unsigned long status;
+	/** Creation flags. */
+	int flags;
+	/** IRQ number. */
+	unsigned int irq;
+	/** Interrupt acknowledge routine. */
+	xniack_t iack;
+	/** Symbolic name. */
+	const char *name;
+	/** Descriptor maintenance lock. */
+	raw_spinlock_t lock;
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+	/** Statistics. */
+	struct xnirqstat *stats;
+#endif
+};
+
+struct xnintr_iterator {
+    int cpu;		/** Current CPU in iteration. */
+    unsigned long hits;	/** Current hit counter. */
+    xnticks_t exectime_period;	/** Used CPU time in current accounting period. */
+    xnticks_t account_period; /** Length of accounting period. */
+    xnticks_t exectime_total;	/** Overall CPU time consumed. */
+    int list_rev;	/** System-wide xnintr list revision (internal use). */
+    struct xnintr *prev;	/** Previously visited xnintr object (internal use). */
+};
+
+void xnintr_core_clock_handler(void);
+
+void xnintr_host_tick(struct xnsched *sched);
+
+    /* Public interface. */
+
+int xnintr_init(struct xnintr *intr,
+		const char *name,
+		unsigned irq,
+		xnisr_t isr,
+		xniack_t iack,
+		int flags);
+
+void xnintr_destroy(struct xnintr *intr);
+
+int xnintr_attach(struct xnintr *intr,
+		  void *cookie, const cpumask_t *cpumask);
+
+void xnintr_detach(struct xnintr *intr);
+
+void xnintr_enable(struct xnintr *intr);
+
+void xnintr_disable(struct xnintr *intr);
+
+int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask);
+
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+
+int xnintr_query_init(struct xnintr_iterator *iterator);
+
+int xnintr_get_query_lock(void);
+
+void xnintr_put_query_lock(void);
+
+int xnintr_query_next(int irq, struct xnintr_iterator *iterator,
+		      char *name_buf);
+
+#else /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+static inline int xnintr_query_init(struct xnintr_iterator *iterator)
+{
+	return 0;
+}
+
+static inline int xnintr_get_query_lock(void)
+{
+	return 0;
+}
+
+static inline void xnintr_put_query_lock(void) {}
+#endif /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_INTR_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/clock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/clock.h
new file mode 100644
index 0000000..a06d1aa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/clock.h
@@ -0,0 +1,45 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_CLOCK_H
+#define _COBALT_KERNEL_IPIPE_CLOCK_H
+
+#include <linux/ipipe_tickdev.h>
+#include <cobalt/uapi/kernel/types.h>
+
+struct timespec64;
+
+static inline u64 pipeline_read_cycle_counter(void)
+{
+	u64 t;
+	ipipe_read_tsc(t);
+	return t;
+}
+
+xnticks_t pipeline_read_wallclock(void);
+
+int pipeline_set_wallclock(xnticks_t epoch_ns);
+
+static inline void pipeline_set_timer_shot(unsigned long cycles)
+{
+	ipipe_timer_set(cycles);
+}
+
+static inline const char *pipeline_timer_name(void)
+{
+	return ipipe_timer_name();
+}
+
+static inline const char *pipeline_clock_name(void)
+{
+	return ipipe_clock_name();
+}
+
+int pipeline_get_host_time(struct timespec64 *tp);
+
+void pipeline_update_clock_freq(unsigned long long freq);
+
+void pipeline_init_clock(void);
+
+#endif /* !_COBALT_KERNEL_IPIPE_CLOCK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/inband_work.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/inband_work.h
new file mode 100644
index 0000000..12ef07b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/inband_work.h
@@ -0,0 +1,32 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_INBAND_WORK_H
+#define _COBALT_KERNEL_IPIPE_INBAND_WORK_H
+
+#include <linux/ipipe.h>
+
+/*
+ * This field must be named inband_work and appear first in the
+ * container work struct.
+ */
+struct pipeline_inband_work {
+	struct ipipe_work_header work;
+};
+
+#define PIPELINE_INBAND_WORK_INITIALIZER(__work, __handler)		\
+	{								\
+		.work = {						\
+			.size = sizeof(__work),				\
+			.handler = (void (*)(struct ipipe_work_header *)) \
+			__handler,					\
+		},							\
+	}
+
+#define pipeline_post_inband_work(__work)	\
+	ipipe_post_work_root(__work, inband_work.work)
+
+#endif /* !_COBALT_KERNEL_IPIPE_INBAND_WORK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/irq.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/irq.h
new file mode 100644
index 0000000..a2db772
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/irq.h
@@ -0,0 +1,14 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_IRQ_H
+#define _COBALT_KERNEL_IPIPE_IRQ_H
+
+void xnintr_init_proc(void);
+
+void xnintr_cleanup_proc(void);
+
+int xnintr_mount(void);
+
+#endif /* !_COBALT_KERNEL_IPIPE_IRQ_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/kevents.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/kevents.h
new file mode 100644
index 0000000..f3f0c2f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/kevents.h
@@ -0,0 +1,36 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_KEVENTS_H
+#define _COBALT_KERNEL_IPIPE_KEVENTS_H
+
+#define KEVENT_PROPAGATE   0
+#define KEVENT_STOP        1
+
+struct cobalt_process;
+struct cobalt_thread;
+
+static inline
+int pipeline_attach_process(struct cobalt_process *process)
+{
+	return 0;
+}
+
+static inline
+void pipeline_detach_process(struct cobalt_process *process)
+{ }
+
+int pipeline_prepare_current(void);
+
+void pipeline_attach_current(struct xnthread *thread);
+
+int pipeline_trap_kevents(void);
+
+void pipeline_enable_kevents(void);
+
+void pipeline_cleanup_process(void);
+
+#endif /* !_COBALT_KERNEL_IPIPE_KEVENTS_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/lock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/lock.h
new file mode 100644
index 0000000..f33b041
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/lock.h
@@ -0,0 +1,21 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_LOCK_H
+#define _COBALT_KERNEL_IPIPE_LOCK_H
+
+#include <pipeline/pipeline.h>
+
+typedef ipipe_spinlock_t pipeline_spinlock_t;
+
+#define PIPELINE_SPIN_LOCK_UNLOCKED(__name)  IPIPE_SPIN_LOCK_UNLOCKED
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+/* Disable UP-over-SMP kernel optimization in debug mode. */
+#define __locking_active__  1
+#else
+#define __locking_active__  ipipe_smp_p
+#endif
+
+#endif /* !_COBALT_KERNEL_IPIPE_LOCK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/machine.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/machine.h
new file mode 100644
index 0000000..062722a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/machine.h
@@ -0,0 +1,55 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_MACHINE_H
+#define _COBALT_KERNEL_IPIPE_MACHINE_H
+
+#include <linux/ipipe.h>
+#include <linux/percpu.h>
+
+#ifdef CONFIG_IPIPE_TRACE
+#define boot_lat_trace_notice "[LTRACE]"
+#else
+#define boot_lat_trace_notice ""
+#endif
+
+struct vm_area_struct;
+
+struct cobalt_machine {
+	const char *name;
+	int (*init)(void);
+	int (*late_init)(void);
+	void (*cleanup)(void);
+	void (*prefault)(struct vm_area_struct *vma);
+	const char *const *fault_labels;
+};
+
+extern struct cobalt_machine cobalt_machine;
+
+struct cobalt_machine_cpudata {
+	unsigned int faults[IPIPE_NR_FAULTS];
+};
+
+DECLARE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata);
+
+struct cobalt_pipeline {
+	struct ipipe_domain domain;
+	unsigned long clock_freq;
+	unsigned int escalate_virq;
+#ifdef CONFIG_SMP
+	cpumask_t supported_cpus;
+#endif
+};
+
+int pipeline_init(void);
+
+int pipeline_late_init(void);
+
+void pipeline_cleanup(void);
+
+extern struct cobalt_pipeline cobalt_pipeline;
+
+#endif /* !_COBALT_KERNEL_IPIPE_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/pipeline.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/pipeline.h
new file mode 100644
index 0000000..ac9c92b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/pipeline.h
@@ -0,0 +1,96 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_PIPELINE_H
+#define _COBALT_KERNEL_IPIPE_PIPELINE_H
+
+#ifdef CONFIG_IPIPE_LEGACY
+#error "CONFIG_IPIPE_LEGACY must be switched off"
+#endif
+
+#include <pipeline/machine.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/syscall.h>
+
+#define xnsched_primary_domain  cobalt_pipeline.domain
+
+#define PIPELINE_NR_IRQS  IPIPE_NR_IRQS
+
+typedef unsigned long spl_t;
+
+#define splhigh(x)  ((x) = ipipe_test_and_stall_head() & 1)
+#ifdef CONFIG_SMP
+#define splexit(x)  ipipe_restore_head(x & 1)
+#else /* !CONFIG_SMP */
+#define splexit(x)  ipipe_restore_head(x)
+#endif /* !CONFIG_SMP */
+#define splmax()    ipipe_stall_head()
+#define splnone()   ipipe_unstall_head()
+#define spltest()   ipipe_test_head()
+
+#define is_secondary_domain()	ipipe_root_p
+#define is_primary_domain()	(!ipipe_root_p)
+
+#ifdef CONFIG_SMP
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	return ipipe_request_irq(&cobalt_pipeline.domain,
+				IPIPE_RESCHEDULE_IPI,
+				(ipipe_irq_handler_t)handler,
+				NULL, NULL);
+}
+
+static inline void pipeline_free_resched_ipi(void)
+{
+	ipipe_free_irq(&cobalt_pipeline.domain,
+		IPIPE_RESCHEDULE_IPI);
+}
+
+static inline void pipeline_send_resched_ipi(const struct cpumask *dest)
+{
+	ipipe_send_ipi(IPIPE_RESCHEDULE_IPI, *dest);
+}
+
+static inline void pipeline_send_timer_ipi(const struct cpumask *dest)
+{
+	ipipe_send_ipi(IPIPE_HRTIMER_IPI, *dest);
+}
+
+#else  /* !CONFIG_SMP */
+
+static inline int pipeline_request_resched_ipi(void (*handler)(void))
+{
+	return 0;
+}
+
+
+static inline void pipeline_free_resched_ipi(void)
+{
+}
+
+#endif	/* CONFIG_SMP */
+
+static inline void pipeline_prepare_panic(void)
+{
+	ipipe_prepare_panic();
+}
+
+static inline void pipeline_collect_features(struct cobalt_featinfo *f)
+{
+	f->clock_freq = cobalt_pipeline.clock_freq;
+}
+
+static inline void pipeline_get_syscall_args(struct task_struct *task,
+					     struct pt_regs *regs,
+					     unsigned long *args)
+{
+	*args++ = __xn_reg_arg1(regs);
+	*args++ = __xn_reg_arg2(regs);
+	*args++ = __xn_reg_arg3(regs);
+	*args++ = __xn_reg_arg4(regs);
+	*args   = __xn_reg_arg5(regs);
+}
+
+#endif /* !_COBALT_KERNEL_IPIPE_PIPELINE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sched.h
new file mode 100644
index 0000000..9d7bf88
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sched.h
@@ -0,0 +1,58 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_SCHED_H
+#define _COBALT_KERNEL_IPIPE_SCHED_H
+
+#include <cobalt/kernel/lock.h>
+
+struct xnthread;
+struct xnsched;
+struct task_struct;
+
+void pipeline_init_shadow_tcb(struct xnthread *thread);
+
+void pipeline_init_root_tcb(struct xnthread *thread);
+
+int pipeline_schedule(struct xnsched *sched);
+
+void pipeline_prep_switch_oob(struct xnthread *root);
+
+bool pipeline_switch_to(struct xnthread *prev,
+			struct xnthread *next,
+			bool leaving_inband);
+
+int pipeline_leave_inband(void);
+
+int pipeline_leave_oob_prepare(void);
+
+static inline void pipeline_leave_oob_unlock(void)
+{
+	/*
+	 * Introduce an opportunity for interrupt delivery right
+	 * before switching context, which shortens the
+	 * uninterruptible code path.
+	 *
+	 * We have to shut irqs off before __xnsched_run() is called
+	 * next though: if an interrupt could preempt us right after
+	 * xnarch_escalate() is passed but before the nklock is
+	 * grabbed, we would enter the critical section in
+	 * ___xnsched_run() from the root domain, which would defeat
+	 * the purpose of escalating the request.
+	 */
+	xnlock_clear_irqon(&nklock);
+	splmax();
+}
+
+void pipeline_leave_oob_finish(void);
+
+void pipeline_finalize_thread(struct xnthread *thread);
+
+void pipeline_raise_mayday(struct task_struct *tsk);
+
+void pipeline_clear_mayday(void);
+
+#endif /* !_COBALT_KERNEL_IPIPE_SCHED_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sirq.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sirq.h
new file mode 100644
index 0000000..1a16776
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/sirq.h
@@ -0,0 +1,59 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_SIRQ_H
+#define _COBALT_KERNEL_IPIPE_SIRQ_H
+
+#include <linux/ipipe.h>
+#include <pipeline/machine.h>
+
+/*
+ * Wrappers to create "synthetic IRQs" the I-pipe way (used to be
+ * called "virtual IRQs" there). Those interrupt channels can only be
+ * triggered by software; they have per-CPU semantics. We use them to
+ * schedule handlers to be run on the in-band execution stage, meaning
+ * "secondary mode" in the Cobalt jargon.
+ */
+
+static inline
+int pipeline_create_inband_sirq(irqreturn_t (*handler)(int irq, void *dev_id))
+{
+	int sirq, ret;
+
+	sirq = ipipe_alloc_virq();
+	if (sirq == 0)
+		return -EAGAIN;
+
+	/*
+	 * ipipe_irq_handler_t is close enough to the signature of a
+	 * regular IRQ handler: use the latter in the generic code
+	 * shared with Dovetail.  The extraneous return code will be
+	 * ignored by the I-pipe core.
+	 */
+	ret = ipipe_request_irq(ipipe_root_domain, sirq,
+				(ipipe_irq_handler_t)handler,
+				NULL, NULL);
+	if (ret) {
+		ipipe_free_virq(sirq);
+		return ret;
+	}
+
+	return sirq;
+}
+
+static inline
+void pipeline_delete_inband_sirq(int sirq)
+{
+	ipipe_free_irq(ipipe_root_domain, sirq);
+	ipipe_free_virq(sirq);
+}
+
+static inline void pipeline_post_sirq(int sirq)
+{
+	ipipe_post_irq_root(sirq);
+}
+
+#endif /* !_COBALT_KERNEL_IPIPE_SIRQ_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/thread.h
new file mode 100644
index 0000000..30a8853
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/thread.h
@@ -0,0 +1,27 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_THREAD_H
+#define _COBALT_KERNEL_IPIPE_THREAD_H
+
+#include <linux/ipipe.h>
+#include <linux/sched.h>
+
+struct xnthread;
+
+#define cobalt_threadinfo ipipe_threadinfo
+
+static inline struct cobalt_threadinfo *pipeline_current(void)
+{
+	return ipipe_current_threadinfo();
+}
+
+static inline struct xnthread *pipeline_thread_from_task(struct task_struct *p)
+{
+	return ipipe_task_threadinfo(p)->thread;
+}
+
+#endif /* !_COBALT_KERNEL_IPIPE_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/tick.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/tick.h
new file mode 100644
index 0000000..41347f7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/tick.h
@@ -0,0 +1,18 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_IPIPE_TICK_H
+#define _COBALT_KERNEL_IPIPE_TICK_H
+
+int pipeline_install_tick_proxy(void);
+
+void pipeline_uninstall_tick_proxy(void);
+
+struct xnsched;
+static inline bool pipeline_must_force_program_tick(struct xnsched *sched)
+{
+	return false;
+}
+
+#endif /* !_COBALT_KERNEL_IPIPE_TICK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/trace.h
new file mode 100644
index 0000000..a28b83a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/trace.h
@@ -0,0 +1,116 @@
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_IPIPE_TRACE_H
+#define _COBALT_KERNEL_IPIPE_TRACE_H
+
+#include <linux/types.h>
+#include <linux/kconfig.h>
+#include <linux/ipipe_trace.h>
+#include <cobalt/uapi/kernel/trace.h>
+
+static inline int xntrace_max_begin(unsigned long v)
+{
+	ipipe_trace_begin(v);
+	return 0;
+}
+
+static inline int xntrace_max_end(unsigned long v)
+{
+	ipipe_trace_end(v);
+	return 0;
+}
+
+static inline int xntrace_max_reset(void)
+{
+	ipipe_trace_max_reset();
+	return 0;
+}
+
+static inline int xntrace_user_start(void)
+{
+	return ipipe_trace_frozen_reset();
+}
+
+static inline int xntrace_user_stop(unsigned long v)
+{
+	ipipe_trace_freeze(v);
+	return 0;
+}
+
+static inline int xntrace_user_freeze(unsigned long v, int once)
+{
+	int ret = 0;
+
+	if (!once)
+		ret = ipipe_trace_frozen_reset();
+
+	ipipe_trace_freeze(v);
+
+	return ret;
+}
+
+static inline void xntrace_latpeak_freeze(int delay)
+{
+	xntrace_user_freeze(delay, 0);
+}
+
+static inline int xntrace_special(unsigned char id, unsigned long v)
+{
+	ipipe_trace_special(id, v);
+	return 0;
+}
+
+static inline int xntrace_special_u64(unsigned char id,
+				      unsigned long long v)
+{
+	ipipe_trace_special(id, (unsigned long)(v >> 32));
+	ipipe_trace_special(id, (unsigned long)(v & 0xFFFFFFFF));
+	return 0;
+}
+
+static inline int xntrace_pid(pid_t pid, short prio)
+{
+	ipipe_trace_pid(pid, prio);
+	return 0;
+}
+
+static inline int xntrace_tick(unsigned long delay_ticks)
+{
+	ipipe_trace_event(0, delay_ticks);
+	return 0;
+}
+
+static inline int xntrace_panic_freeze(void)
+{
+	ipipe_trace_panic_freeze();
+	return 0;
+}
+
+static inline int xntrace_panic_dump(void)
+{
+	ipipe_trace_panic_dump();
+	return 0;
+}
+
+static inline bool xntrace_enabled(void)
+{
+	return IS_ENABLED(CONFIG_IPIPE_TRACE);
+}
+
+#endif /* !_COBALT_KERNEL_IPIPE_TRACE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/vdso_fallback.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/vdso_fallback.h
new file mode 100644
index 0000000..f9ea388
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/vdso_fallback.h
@@ -0,0 +1,16 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (c) Siemens AG, 2021
+ */
+
+#ifndef _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+#define _COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H
+
+static __always_inline bool 
+pipeline_handle_vdso_fallback(int nr, struct pt_regs *regs)
+{
+	return false;
+}
+
+#endif /* !_COBALT_KERNEL_PIPELINE_VDSO_FALLBACK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/wrappers.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/wrappers.h
new file mode 100644
index 0000000..dcf021e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ipipe/pipeline/wrappers.h
@@ -0,0 +1,25 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+#ifndef _COBALT_KERNEL_IPIPE_WRAPPERS_H
+#define _COBALT_KERNEL_IPIPE_WRAPPERS_H
+
+#include <linux/ipipe.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+#define cobalt_set_task_state(tsk, state_value)	\
+	set_task_state(tsk, state_value)
+#else
+/*
+ * The co-kernel can still set the current task state safely if it
+ * runs on the head stage.
+ */
+#define cobalt_set_task_state(tsk, state_value)	\
+	smp_store_mb((tsk)->state, (state_value))
+#endif
+
+#ifndef ipipe_root_nr_syscalls
+#define ipipe_root_nr_syscalls(ti)	NR_syscalls
+#endif
+
+#endif /* !_COBALT_KERNEL_IPIPE_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h
new file mode 100644
index 0000000..ec029ef
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/list.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_LIST_H
+#define _COBALT_KERNEL_LIST_H
+
+#include <linux/list.h>
+
+#define __list_add_pri(__new, __head, __member_pri, __member_next, __relop)	\
+do {										\
+	typeof(*__new) *__pos;							\
+	if (list_empty(__head))							\
+		list_add(&(__new)->__member_next, __head);		 	\
+	else {									\
+		list_for_each_entry_reverse(__pos, __head, __member_next) {	\
+			if ((__new)->__member_pri __relop __pos->__member_pri)	\
+				break;						\
+		}								\
+		list_add(&(__new)->__member_next, &__pos->__member_next); 	\
+	}									\
+} while (0)
+
+#define list_add_priff(__new, __head, __member_pri, __member_next)		\
+	__list_add_pri(__new, __head, __member_pri, __member_next, <=)
+
+#define list_add_prilf(__new, __head, __member_pri, __member_next)		\
+	__list_add_pri(__new, __head, __member_pri, __member_next, <)
+
+#define list_get_entry(__head, __type, __member)		\
+  ({								\
+	  __type *__item;					\
+	  __item = list_first_entry(__head, __type, __member);	\
+	  list_del(&__item->__member);				\
+	  __item;						\
+  })
+
+#define list_get_entry_init(__head, __type, __member)		\
+  ({								\
+	  __type *__item;					\
+	  __item = list_first_entry(__head, __type, __member);	\
+	  list_del_init(&__item->__member);			\
+	  __item;						\
+  })
+
+#ifndef list_next_entry
+#define list_next_entry(__item, __member)			\
+	list_entry((__item)->__member.next, typeof(*(__item)), __member)
+#endif
+
+#endif /* !_COBALT_KERNEL_LIST_H_ */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h
new file mode 100644
index 0000000..185f6e7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/lock.h
@@ -0,0 +1,248 @@
+/*
+ * Copyright (C) 2001-2008,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_LOCK_H
+#define _COBALT_KERNEL_LOCK_H
+
+#include <pipeline/lock.h>
+#include <linux/percpu.h>
+#include <cobalt/kernel/assert.h>
+#include <pipeline/pipeline.h>
+
+/**
+ * @addtogroup cobalt_core_lock
+ *
+ * @{
+ */
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+struct xnlock {
+	unsigned owner;
+	arch_spinlock_t alock;
+	const char *file;
+	const char *function;
+	unsigned int line;
+	int cpu;
+	unsigned long long spin_time;
+	unsigned long long lock_date;
+};
+
+struct xnlockinfo {
+	unsigned long long spin_time;
+	unsigned long long lock_time;
+	const char *file;
+	const char *function;
+	unsigned int line;
+};
+
+#define XNARCH_LOCK_UNLOCKED (struct xnlock) {	\
+	~0,					\
+	__ARCH_SPIN_LOCK_UNLOCKED,		\
+	NULL,					\
+	NULL,					\
+	0,					\
+	-1,					\
+	0LL,					\
+	0LL,					\
+}
+
+#define XNLOCK_DBG_CONTEXT		, __FILE__, __LINE__, __FUNCTION__
+#define XNLOCK_DBG_CONTEXT_ARGS					\
+	, const char *file, int line, const char *function
+#define XNLOCK_DBG_PASS_CONTEXT		, file, line, function
+
+void xnlock_dbg_prepare_acquire(unsigned long long *start);
+void xnlock_dbg_prepare_spin(unsigned int *spin_limit);
+void xnlock_dbg_acquired(struct xnlock *lock, int cpu,
+			 unsigned long long *start,
+			 const char *file, int line,
+			 const char *function);
+int xnlock_dbg_release(struct xnlock *lock,
+			 const char *file, int line,
+			 const char *function);
+
+DECLARE_PER_CPU(struct xnlockinfo, xnlock_stats);
+
+#else /* !CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+struct xnlock {
+	unsigned owner;
+	arch_spinlock_t alock;
+};
+
+#define XNARCH_LOCK_UNLOCKED			\
+	(struct xnlock) {			\
+		~0,				\
+		__ARCH_SPIN_LOCK_UNLOCKED,	\
+	}
+
+#define XNLOCK_DBG_CONTEXT
+#define XNLOCK_DBG_CONTEXT_ARGS
+#define XNLOCK_DBG_PASS_CONTEXT
+
+static inline
+void xnlock_dbg_prepare_acquire(unsigned long long *start)
+{
+}
+
+static inline
+void xnlock_dbg_prepare_spin(unsigned int *spin_limit)
+{
+}
+
+static inline void
+xnlock_dbg_acquired(struct xnlock *lock, int cpu,
+		    unsigned long long *start)
+{
+}
+
+static inline int xnlock_dbg_release(struct xnlock *lock)
+{
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING)
+
+#define xnlock_get(lock)		__xnlock_get(lock  XNLOCK_DBG_CONTEXT)
+#define xnlock_put(lock)		__xnlock_put(lock  XNLOCK_DBG_CONTEXT)
+#define xnlock_get_irqsave(lock,x) \
+	((x) = __xnlock_get_irqsave(lock  XNLOCK_DBG_CONTEXT))
+#define xnlock_put_irqrestore(lock,x) \
+	__xnlock_put_irqrestore(lock,x  XNLOCK_DBG_CONTEXT)
+#define xnlock_clear_irqoff(lock)	xnlock_put_irqrestore(lock, 1)
+#define xnlock_clear_irqon(lock)	xnlock_put_irqrestore(lock, 0)
+
+static inline void xnlock_init (struct xnlock *lock)
+{
+	*lock = XNARCH_LOCK_UNLOCKED;
+}
+
+#define DECLARE_XNLOCK(lock)		struct xnlock lock
+#define DECLARE_EXTERN_XNLOCK(lock)	extern struct xnlock lock
+#define DEFINE_XNLOCK(lock)		struct xnlock lock = XNARCH_LOCK_UNLOCKED
+#define DEFINE_PRIVATE_XNLOCK(lock)	static DEFINE_XNLOCK(lock)
+
+static inline int ____xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	int cpu = raw_smp_processor_id();
+	unsigned long long start;
+
+	if (lock->owner == cpu)
+		return 2;
+
+	xnlock_dbg_prepare_acquire(&start);
+
+	arch_spin_lock(&lock->alock);
+	lock->owner = cpu;
+
+	xnlock_dbg_acquired(lock, cpu, &start /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return 0;
+}
+
+static inline void ____xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (xnlock_dbg_release(lock /*, */ XNLOCK_DBG_PASS_CONTEXT))
+		return;
+
+	lock->owner = ~0U;
+	arch_spin_unlock(&lock->alock);
+}
+
+#ifndef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK
+#define ___xnlock_get ____xnlock_get
+#define ___xnlock_put ____xnlock_put
+#else /* out of line xnlock */
+int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
+
+void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS);
+#endif /* out of line xnlock */
+
+static inline spl_t
+__xnlock_get_irqsave(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	unsigned long flags;
+
+	splhigh(flags);
+
+	if (__locking_active__)
+		flags |= ___xnlock_get(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return flags;
+}
+
+static inline void __xnlock_put_irqrestore(struct xnlock *lock, spl_t flags
+					   /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	/* Only release the lock if we didn't take it recursively. */
+	if (__locking_active__ && !(flags & 2))
+		___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+
+	splexit(flags & 1);
+}
+
+static inline int xnlock_is_owner(struct xnlock *lock)
+{
+	if (__locking_active__)
+		return lock->owner == raw_smp_processor_id();
+
+	return 1;
+}
+
+static inline int __xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (__locking_active__)
+		return ___xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+
+	return 0;
+}
+
+static inline void __xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	if (__locking_active__)
+		___xnlock_put(lock /*, */ XNLOCK_DBG_PASS_CONTEXT);
+}
+
+#undef __locking_active__
+
+#else /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */
+
+#define xnlock_init(lock)		do { } while(0)
+#define xnlock_get(lock)		do { } while(0)
+#define xnlock_put(lock)		do { } while(0)
+#define xnlock_get_irqsave(lock,x)	splhigh(x)
+#define xnlock_put_irqrestore(lock,x)	splexit(x)
+#define xnlock_clear_irqoff(lock)	splmax()
+#define xnlock_clear_irqon(lock)	splnone()
+#define xnlock_is_owner(lock)		1
+
+#define DECLARE_XNLOCK(lock)
+#define DECLARE_EXTERN_XNLOCK(lock)
+#define DEFINE_XNLOCK(lock)
+#define DEFINE_PRIVATE_XNLOCK(lock)
+
+#endif /* !(CONFIG_SMP || CONFIG_XENO_OPT_DEBUG_LOCKING) */
+
+DECLARE_EXTERN_XNLOCK(nklock);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_LOCK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h
new file mode 100644
index 0000000..a402df5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/map.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_MAP_H
+#define _COBALT_KERNEL_MAP_H
+
+#include <asm/bitsperlong.h>
+
+/**
+ * @addtogroup cobalt_core_map
+ * @{
+ */
+
+#define XNMAP_MAX_KEYS	(BITS_PER_LONG * BITS_PER_LONG)
+
+struct xnmap {
+    int nkeys;
+    int ukeys;
+    int offset;
+    unsigned long himask;
+    unsigned long himap;
+#define __IDMAP_LONGS	((XNMAP_MAX_KEYS+BITS_PER_LONG-1)/BITS_PER_LONG)
+    unsigned long lomap[__IDMAP_LONGS];
+#undef __IDMAP_LONGS
+    void *objarray[1];
+};
+
+struct xnmap *xnmap_create(int nkeys,
+			   int reserve,
+			   int offset);
+
+void xnmap_delete(struct xnmap *map);
+
+int xnmap_enter(struct xnmap *map,
+		int key,
+		void *objaddr);
+
+int xnmap_remove(struct xnmap *map,
+		 int key);
+
+static inline void *xnmap_fetch_nocheck(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset;
+	return map->objarray[ofkey];
+}
+
+static inline void *xnmap_fetch(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset;
+
+	if (ofkey < 0 || ofkey >= map->nkeys)
+		return NULL;
+
+	return map->objarray[ofkey];
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_MAP_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h
new file mode 100644
index 0000000..8a82c7b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/pipe.h
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA
+ * 02139, USA; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_PIPE_H
+#define _COBALT_KERNEL_PIPE_H
+
+#include <linux/types.h>
+#include <linux/poll.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/kernel/pipe.h>
+
+#define XNPIPE_NDEVS      CONFIG_XENO_OPT_PIPE_NRDEV
+#define XNPIPE_DEV_MAJOR  150
+
+#define XNPIPE_KERN_CONN         0x1
+#define XNPIPE_KERN_LCLOSE       0x2
+#define XNPIPE_USER_CONN         0x4
+#define XNPIPE_USER_SIGIO        0x8
+#define XNPIPE_USER_WREAD        0x10
+#define XNPIPE_USER_WREAD_READY  0x20
+#define XNPIPE_USER_WSYNC        0x40
+#define XNPIPE_USER_WSYNC_READY  0x80
+#define XNPIPE_USER_LCONN        0x100
+
+#define XNPIPE_USER_ALL_WAIT \
+(XNPIPE_USER_WREAD|XNPIPE_USER_WSYNC)
+
+#define XNPIPE_USER_ALL_READY \
+(XNPIPE_USER_WREAD_READY|XNPIPE_USER_WSYNC_READY)
+
+struct xnpipe_mh {
+	size_t size;
+	size_t rdoff;
+	struct list_head link;
+};
+
+struct xnpipe_state;
+
+struct xnpipe_operations {
+	void (*output)(struct xnpipe_mh *mh, void *xstate);
+	int (*input)(struct xnpipe_mh *mh, int retval, void *xstate);
+	void *(*alloc_ibuf)(size_t size, void *xstate);
+	void (*free_ibuf)(void *buf, void *xstate);
+	void (*free_obuf)(void *buf, void *xstate);
+	void (*release)(void *xstate);
+};
+
+struct xnpipe_state {
+	struct list_head slink;	/* Link on sleep queue */
+	struct list_head alink;	/* Link on async queue */
+
+	struct list_head inq;		/* From user-space to kernel */
+	int nrinq;
+	struct list_head outq;		/* From kernel to user-space */
+	int nroutq;
+	struct xnsynch synchbase;
+	struct xnpipe_operations ops;
+	void *xstate;		/* Extra state managed by caller */
+
+	/* Linux kernel part */
+	unsigned long status;
+	struct fasync_struct *asyncq;
+	wait_queue_head_t readq;	/* open/read/poll waiters */
+	wait_queue_head_t syncq;	/* sync waiters */
+	int wcount;			/* number of waiters on this minor */
+	size_t ionrd;
+};
+
+extern struct xnpipe_state xnpipe_states[];
+
+#define xnminor_from_state(s) (s - xnpipe_states)
+
+#ifdef CONFIG_XENO_OPT_PIPE
+int xnpipe_mount(void);
+void xnpipe_umount(void);
+#else /* !CONFIG_XENO_OPT_PIPE */
+static inline int xnpipe_mount(void) { return 0; }
+static inline void xnpipe_umount(void) { }
+#endif /* !CONFIG_XENO_OPT_PIPE */
+
+/* Entry points of the kernel interface. */
+
+int xnpipe_connect(int minor,
+		   struct xnpipe_operations *ops, void *xstate);
+
+int xnpipe_disconnect(int minor);
+
+ssize_t xnpipe_send(int minor,
+		    struct xnpipe_mh *mh, size_t size, int flags);
+
+ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size);
+
+ssize_t xnpipe_recv(int minor,
+		    struct xnpipe_mh **pmh, xnticks_t timeout);
+
+int xnpipe_flush(int minor, int mode);
+
+int xnpipe_pollstate(int minor, unsigned int *mask_r);
+
+static inline unsigned int __xnpipe_pollstate(int minor)
+{
+	struct xnpipe_state *state = xnpipe_states + minor;
+	unsigned int mask = POLLOUT;
+
+	if (!list_empty(&state->inq))
+		mask |= POLLIN;
+
+	return mask;
+}
+
+static inline char *xnpipe_m_data(struct xnpipe_mh *mh)
+{
+	return (char *)(mh + 1);
+}
+
+#define xnpipe_m_size(mh) ((mh)->size)
+
+#define xnpipe_m_rdoff(mh) ((mh)->rdoff)
+
+#endif /* !_COBALT_KERNEL_PIPE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h
new file mode 100644
index 0000000..f0079fe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/ppd.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright &copy; 2006 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_PPD_H
+#define _COBALT_KERNEL_PPD_H
+
+#include <linux/types.h>
+#include <linux/atomic.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/heap.h>
+
+struct cobalt_umm {
+	struct xnheap heap;
+	atomic_t refcount;
+	void (*release)(struct cobalt_umm *umm);
+};
+
+struct cobalt_ppd {
+	struct cobalt_umm umm;
+	atomic_t refcnt;
+	char *exe_path;
+	struct rb_root fds;
+};
+
+extern struct cobalt_ppd cobalt_kernel_ppd;
+
+#endif /* _COBALT_KERNEL_PPD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h
new file mode 100644
index 0000000..a459da5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/registry.h
@@ -0,0 +1,202 @@
+/*
+ * Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_REGISTRY_H
+#define _COBALT_KERNEL_REGISTRY_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/vfile.h>
+
+/**
+ * @addtogroup cobalt_core_registry
+ *
+ * @{
+ */
+struct xnpnode;
+
+struct xnobject {
+	void *objaddr;
+	const char *key;	  /* !< Hash key. May be NULL if anonynous. */
+	unsigned long cstamp;		  /* !< Creation stamp. */
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnpnode *pnode;	/* !< v-file information class. */
+	union {
+		struct {
+			struct xnvfile_rev_tag tag;
+			struct xnvfile_snapshot file;
+		} vfsnap; /* !< virtual snapshot file. */
+		struct xnvfile_regular vfreg; /* !< virtual regular file */
+		struct xnvfile_link link;     /* !< virtual link. */
+	} vfile_u;
+	struct xnvfile *vfilp;
+#endif /* CONFIG_XENO_OPT_VFILE */
+	struct hlist_node hlink; /* !< Link in h-table */
+	struct list_head link;
+};
+
+int xnregistry_init(void);
+
+void xnregistry_cleanup(void);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+#define XNOBJECT_EXPORT_SCHEDULED  ((struct xnvfile *)1L)
+#define XNOBJECT_EXPORT_INPROGRESS ((struct xnvfile *)2L)
+#define XNOBJECT_EXPORT_ABORTED    ((struct xnvfile *)3L)
+
+struct xnptree {
+	const char *dirname;
+	/* hidden */
+	int entries;
+	struct xnvfile_directory vdir;
+};
+
+#define DEFINE_XNPTREE(__var, __name)		\
+	struct xnptree __var = {		\
+		.dirname = __name,		\
+		.entries = 0,			\
+		.vdir = xnvfile_nodir,		\
+	}
+
+struct xnpnode_ops {
+	int (*export)(struct xnobject *object, struct xnpnode *pnode);
+	void (*unexport)(struct xnobject *object, struct xnpnode *pnode);
+	void (*touch)(struct xnobject *object);
+};
+
+struct xnpnode {
+	const char *dirname;
+	struct xnptree *root;
+	struct xnpnode_ops *ops;
+	/* hidden */
+	int entries;
+	struct xnvfile_directory vdir;
+};
+
+struct xnpnode_snapshot {
+	struct xnpnode node;
+	struct xnvfile_snapshot_template vfile;
+};
+
+struct xnpnode_regular {
+	struct xnpnode node;
+	struct xnvfile_regular_template vfile;
+};
+
+struct xnpnode_link {
+	struct xnpnode node;
+	char *(*target)(void *obj);
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+#define DEFINE_XNPTREE(__var, __name);
+
+/* Placeholders. */
+
+struct xnpnode {
+	const char *dirname;
+};
+
+struct xnpnode_snapshot {
+	struct xnpnode node;
+};
+
+struct xnpnode_regular {
+	struct xnpnode node;
+};
+
+struct xnpnode_link {
+	struct xnpnode node;
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/* Public interface. */
+
+extern struct xnobject *registry_obj_slots;
+
+static inline struct xnobject *xnregistry_validate(xnhandle_t handle)
+{
+	struct xnobject *object;
+	/*
+	 * Careful: a removed object which is still in flight to be
+	 * unexported carries a NULL objaddr, so we have to check this
+	 * as well.
+	 */
+	handle = xnhandle_get_index(handle);
+	if (likely(handle && handle < CONFIG_XENO_OPT_REGISTRY_NRSLOTS)) {
+		object = &registry_obj_slots[handle];
+		return object->objaddr ? object : NULL;
+	}
+
+	return NULL;
+}
+
+static inline const char *xnregistry_key(xnhandle_t handle)
+{
+	struct xnobject *object = xnregistry_validate(handle);
+	return object ? object->key : NULL;
+}
+
+int xnregistry_enter(const char *key,
+		     void *objaddr,
+		     xnhandle_t *phandle,
+		     struct xnpnode *pnode);
+
+static inline int
+xnregistry_enter_anon(void *objaddr, xnhandle_t *phandle)
+{
+	return xnregistry_enter(NULL, objaddr, phandle, NULL);
+}
+
+int xnregistry_bind(const char *key,
+		    xnticks_t timeout,
+		    int timeout_mode,
+		    xnhandle_t *phandle);
+
+int xnregistry_remove(xnhandle_t handle);
+
+static inline
+void *xnregistry_lookup(xnhandle_t handle,
+			unsigned long *cstamp_r)
+{
+	struct xnobject *object = xnregistry_validate(handle);
+
+	if (object == NULL)
+		return NULL;
+
+	if (cstamp_r)
+		*cstamp_r = object->cstamp;
+
+	return object->objaddr;
+}
+
+int xnregistry_unlink(const char *key);
+
+unsigned xnregistry_hash_size(void);
+
+extern struct xnpnode_ops xnregistry_vfsnap_ops;
+
+extern struct xnpnode_ops xnregistry_vlink_ops;
+
+extern struct xnpnode_ops xnregistry_vfreg_ops;
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_REGISTRY_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/Makefile.am
new file mode 100644
index 0000000..fe2e4d5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/Makefile.am
@@ -0,0 +1,17 @@
+
+noinst_HEADERS =	\
+	autotune.h	\
+	can.h		\
+	cobalt.h	\
+	compat.h	\
+	driver.h	\
+	fd.h		\
+	gpio.h		\
+	ipc.h		\
+	net.h		\
+	rtdm.h		\
+	serial.h	\
+	testing.h	\
+	udd.h
+
+SUBDIRS = analogy
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/Makefile.am
new file mode 100644
index 0000000..9b2b34f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/Makefile.am
@@ -0,0 +1,12 @@
+
+noinst_HEADERS =	\
+	buffer.h	\
+	channel_range.h	\
+	command.h	\
+	context.h	\
+	device.h	\
+	driver.h	\
+	instruction.h	\
+	rtdm_helpers.h	\
+	subdevice.h	\
+	transfer.h
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h
new file mode 100644
index 0000000..e1a0cc9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/buffer.h
@@ -0,0 +1,461 @@
+/*
+ * Analogy for Linux, buffer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_BUFFER_H
+#define _COBALT_RTDM_ANALOGY_BUFFER_H
+
+#include <linux/version.h>
+#include <linux/mm.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/analogy.h>
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/context.h>
+#include <rtdm/analogy/command.h>
+#include <rtdm/analogy/subdevice.h>
+
+/* --- Events bits / flags --- */
+
+#define A4L_BUF_EOBUF_NR 0
+#define A4L_BUF_EOBUF (1 << A4L_BUF_EOBUF_NR)
+
+#define A4L_BUF_ERROR_NR 1
+#define A4L_BUF_ERROR (1 << A4L_BUF_ERROR_NR)
+
+#define A4L_BUF_EOA_NR 2
+#define A4L_BUF_EOA (1 << A4L_BUF_EOA_NR)
+
+/* --- Status bits / flags --- */
+
+#define A4L_BUF_BULK_NR 8
+#define A4L_BUF_BULK (1 << A4L_BUF_BULK_NR)
+
+#define A4L_BUF_MAP_NR 9
+#define A4L_BUF_MAP (1 << A4L_BUF_MAP_NR)
+
+
+/* Buffer descriptor structure */
+struct a4l_buffer {
+
+	/* Added by the structure update */
+	struct a4l_subdevice *subd;
+
+	/* Buffer's first virtual page pointer */
+	void *buf;
+
+	/* Buffer's global size */
+	unsigned long size;
+	/* Tab containing buffer's pages pointers */
+	unsigned long *pg_list;
+
+	/* RT/NRT synchronization element */
+	struct a4l_sync sync;
+
+	/* Counters needed for transfer */
+	unsigned long end_count;
+	unsigned long prd_count;
+	unsigned long cns_count;
+	unsigned long tmp_count;
+
+	/* Status + events occuring during transfer */
+	unsigned long flags;
+
+	/* Command on progress */
+	struct a4l_cmd_desc *cur_cmd;
+
+	/* Munge counter */
+	unsigned long mng_count;
+
+	/* Theshold below which the user process should not be
+	   awakened */
+	unsigned long wake_count;
+};
+
+static inline void __dump_buffer_counters(struct a4l_buffer *buf)
+{
+	__a4l_dbg(1, core_dbg, "a4l_buffer=0x%p, p=0x%p \n", buf, buf->buf);
+	__a4l_dbg(1, core_dbg, "end=%06ld, prd=%06ld, cns=%06ld, tmp=%06ld \n",
+		buf->end_count, buf->prd_count, buf->cns_count, buf->tmp_count);
+}
+
+/* --- Static inline functions related with
+   user<->kernel data transfers --- */
+
+/* The function __produce is an inline function which copies data into
+   the asynchronous buffer and takes care of the non-contiguous issue
+   when looping. This function is used in read and write operations */
+static inline int __produce(struct a4l_device_context *cxt,
+			    struct a4l_buffer *buf, void *pin, unsigned long count)
+{
+	unsigned long start_ptr = (buf->prd_count % buf->size);
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	unsigned long tmp_cnt = count;
+	int ret = 0;
+
+	while (ret == 0 && tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the copy */
+		if (cxt == NULL)
+			memcpy(buf->buf + start_ptr, pin, blk_size);
+		else
+			ret = rtdm_safe_copy_from_user(fd,
+						       buf->buf + start_ptr,
+						       pin, blk_size);
+
+		/* Update pointers/counts */
+		pin += blk_size;
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+
+	return ret;
+}
+
+/* The function __consume is an inline function which copies data from
+   the asynchronous buffer and takes care of the non-contiguous issue
+   when looping. This function is used in read and write operations */
+static inline int __consume(struct a4l_device_context *cxt,
+			    struct a4l_buffer *buf, void *pout, unsigned long count)
+{
+	unsigned long start_ptr = (buf->cns_count % buf->size);
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	unsigned long tmp_cnt = count;
+	int ret = 0;
+
+	while (ret == 0 && tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the copy */
+		if (cxt == NULL)
+			memcpy(pout, buf->buf + start_ptr, blk_size);
+		else
+			ret = rtdm_safe_copy_to_user(fd,
+						     pout,
+						     buf->buf + start_ptr,
+						     blk_size);
+
+		/* Update pointers/counts */
+		pout += blk_size;
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+
+	return ret;
+}
+
+/* The function __munge is an inline function which calls the
+   subdevice specific munge callback on contiguous windows within the
+   whole buffer. This function is used in read and write operations */
+static inline void __munge(struct a4l_subdevice * subd,
+			   void (*munge) (struct a4l_subdevice *,
+					  void *, unsigned long),
+			   struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long start_ptr = (buf->mng_count % buf->size);
+	unsigned long tmp_cnt = count;
+
+	while (tmp_cnt != 0) {
+		/* Check the data copy can be performed contiguously */
+		unsigned long blk_size = (start_ptr + tmp_cnt > buf->size) ?
+			buf->size - start_ptr : tmp_cnt;
+
+		/* Perform the munge operation */
+		munge(subd, buf->buf + start_ptr, blk_size);
+
+		/* Update the start pointer and the count */
+		tmp_cnt -= blk_size;
+		start_ptr = 0;
+	}
+}
+
+/* The function __handle_event can only be called from process context
+   (not interrupt service routine). It allows the client process to
+   retrieve the buffer status which has been updated by the driver */
+static inline int __handle_event(struct a4l_buffer * buf)
+{
+	int ret = 0;
+
+	/* The event "End of acquisition" must not be cleaned
+	   before the complete flush of the buffer */
+	if (test_bit(A4L_BUF_EOA_NR, &buf->flags))
+		ret = -ENOENT;
+
+	if (test_bit(A4L_BUF_ERROR_NR, &buf->flags))
+		ret = -EPIPE;
+
+	return ret;
+}
+
+/* --- Counters management functions --- */
+
+/* Here, we may wonder why we need more than two counters / pointers.
+
+   Theoretically, we only need two counters (or two pointers):
+   - one which tells where the reader should be within the buffer
+   - one which tells where the writer should be within the buffer
+
+   With these two counters (or pointers), we just have to check that
+   the writer does not overtake the reader inside the ring buffer
+   BEFORE any read / write operations.
+
+   However, if one element is a DMA controller, we have to be more
+   careful. Generally a DMA transfer occurs like this:
+   DMA shot
+      |-> then DMA interrupt
+	 |-> then DMA soft handler which checks the counter
+
+   So, the checkings occur AFTER the write operations.
+
+   Let's take an example: the reader is a software task and the writer
+   is a DMA controller. At the end of the DMA shot, the write counter
+   is higher than the read counter. Unfortunately, a read operation
+   occurs between the DMA shot and the DMA interrupt, so the handler
+   will not notice that an overflow occured.
+
+   That is why tmp_count comes into play: tmp_count records the
+   read/consumer current counter before the next DMA shot and once the
+   next DMA shot is done, we check that the updated writer/producer
+   counter is not higher than tmp_count. Thus we are sure that the DMA
+   writer has not overtaken the reader because it was not able to
+   overtake the n-1 value. */
+
+static inline int __pre_abs_put(struct a4l_buffer * buf, unsigned long count)
+{
+	if (count - buf->tmp_count > buf->size) {
+		set_bit(A4L_BUF_ERROR_NR, &buf->flags);
+		return -EPIPE;
+	}
+
+	buf->tmp_count = buf->cns_count;
+
+	return 0;
+}
+
+static inline int __pre_put(struct a4l_buffer * buf, unsigned long count)
+{
+	return __pre_abs_put(buf, buf->tmp_count + count);
+}
+
+static inline int __pre_abs_get(struct a4l_buffer * buf, unsigned long count)
+{
+	/* The first time, we expect the buffer to be properly filled
+	before the trigger occurence; by the way, we need tmp_count to
+	have been initialized and tmp_count is updated right here */
+	if (buf->tmp_count == 0 || buf->cns_count == 0)
+		goto out;
+
+	/* At the end of the acquisition, the user application has
+	written the defined amount of data into the buffer; so the
+	last time, the DMA channel can easily overtake the tmp
+	frontier because no more data were sent from user space;
+	therefore no useless alarm should be sent */
+	if (buf->end_count != 0 && (long)(count - buf->end_count) > 0)
+		goto out;
+
+	/* Once the exception are passed, we check that the DMA
+	transfer has not overtaken the last record of the production
+	count (tmp_count was updated with prd_count the last time
+	__pre_abs_get was called). We must understand that we cannot
+	compare the current DMA count with the current production
+	count because even if, right now, the production count is
+	higher than the DMA count, it does not mean that the DMA count
+	was not greater a few cycles before; in such case, the DMA
+	channel would have retrieved the wrong data */
+	if ((long)(count - buf->tmp_count) > 0) {
+		set_bit(A4L_BUF_ERROR_NR, &buf->flags);
+		return -EPIPE;
+	}
+
+out:
+	buf->tmp_count = buf->prd_count;
+
+	return 0;
+}
+
+static inline int __pre_get(struct a4l_buffer * buf, unsigned long count)
+{
+	return __pre_abs_get(buf, buf->tmp_count + count);
+}
+
+static inline int __abs_put(struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long old = buf->prd_count;
+
+	if ((long)(buf->prd_count - count) >= 0)
+		return -EINVAL;
+
+	buf->prd_count = count;
+
+	if ((old / buf->size) != (count / buf->size))
+		set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
+
+	if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
+		set_bit(A4L_BUF_EOA_NR, &buf->flags);
+
+	return 0;
+}
+
+static inline int __put(struct a4l_buffer * buf, unsigned long count)
+{
+	return __abs_put(buf, buf->prd_count + count);
+}
+
+static inline int __abs_get(struct a4l_buffer * buf, unsigned long count)
+{
+	unsigned long old = buf->cns_count;
+
+	if ((long)(buf->cns_count - count) >= 0)
+		return -EINVAL;
+
+	buf->cns_count = count;
+
+	if ((old / buf->size) != count / buf->size)
+		set_bit(A4L_BUF_EOBUF_NR, &buf->flags);
+
+	if (buf->end_count != 0 && (long)(count - buf->end_count) >= 0)
+		set_bit(A4L_BUF_EOA_NR, &buf->flags);
+
+	return 0;
+}
+
+static inline int __get(struct a4l_buffer * buf, unsigned long count)
+{
+	return __abs_get(buf, buf->cns_count + count);
+}
+
+static inline unsigned long __count_to_put(struct a4l_buffer * buf)
+{
+	unsigned long ret;
+
+	if ((long) (buf->size + buf->cns_count - buf->prd_count) > 0)
+		ret = buf->size + buf->cns_count - buf->prd_count;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static inline unsigned long __count_to_get(struct a4l_buffer * buf)
+{
+	unsigned long ret;
+
+	/* If the acquisition is unlimited (end_count == 0), we must
+	   not take into account end_count */
+	if (buf->end_count == 0 || (long)(buf->end_count - buf->prd_count) > 0)
+		ret = buf->prd_count;
+	else
+		ret = buf->end_count;
+
+	if ((long)(ret - buf->cns_count) > 0)
+		ret -= buf->cns_count;
+	else
+		ret = 0;
+
+	return ret;
+}
+
+static inline unsigned long __count_to_end(struct a4l_buffer * buf)
+{
+	unsigned long ret = buf->end_count - buf->cns_count;
+
+	if (buf->end_count == 0)
+		return ULONG_MAX;
+
+	return ((long)ret) < 0 ? 0 : ret;
+}
+
+/* --- Buffer internal functions --- */
+
+int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size);
+
+void a4l_free_buffer(struct a4l_buffer *buf_desc);
+
+void a4l_init_buffer(struct a4l_buffer * buf_desc);
+
+void a4l_cleanup_buffer(struct a4l_buffer * buf_desc);
+
+int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd);
+
+void a4l_cancel_buffer(struct a4l_device_context *cxt);
+
+int a4l_buf_prepare_absput(struct a4l_subdevice *subd,
+			   unsigned long count);
+
+int a4l_buf_commit_absput(struct a4l_subdevice *subd,
+			  unsigned long count);
+
+int a4l_buf_prepare_put(struct a4l_subdevice *subd,
+			unsigned long count);
+
+int a4l_buf_commit_put(struct a4l_subdevice *subd,
+		       unsigned long count);
+
+int a4l_buf_put(struct a4l_subdevice *subd,
+		void *bufdata, unsigned long count);
+
+int a4l_buf_prepare_absget(struct a4l_subdevice *subd,
+			   unsigned long count);
+
+int a4l_buf_commit_absget(struct a4l_subdevice *subd,
+			  unsigned long count);
+
+int a4l_buf_prepare_get(struct a4l_subdevice *subd,
+			unsigned long count);
+
+int a4l_buf_commit_get(struct a4l_subdevice *subd,
+		       unsigned long count);
+
+int a4l_buf_get(struct a4l_subdevice *subd,
+		void *bufdata, unsigned long count);
+
+int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
+
+unsigned long a4l_buf_count(struct a4l_subdevice *subd);
+
+/* --- Current Command management function --- */
+
+static inline struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice *subd)
+{
+	return (subd->buf) ? subd->buf->cur_cmd : NULL;
+}
+
+/* --- Munge related function --- */
+
+int a4l_get_chan(struct a4l_subdevice *subd);
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_mmap(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg);
+ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes);
+ssize_t a4l_write_buffer(struct a4l_device_context * cxt, const void *bufdata, size_t nbytes);
+int a4l_select(struct a4l_device_context *cxt,
+	       rtdm_selector_t *selector,
+	       enum rtdm_selecttype type, unsigned fd_index);
+
+#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h
new file mode 100644
index 0000000..2a16e30
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/channel_range.h
@@ -0,0 +1,272 @@
+/**
+ * @file
+ * Analogy for Linux, channel, range related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H
+#define _COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H
+
+#include <rtdm/uapi/analogy.h>
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_channel_range Channels and ranges
+ *
+ * Channels
+ *
+ * According to the Analogy nomenclature, the channel is the elementary
+ * acquisition entity. One channel is supposed to acquire one data at
+ * a time. A channel can be:
+ * - an analog input or an analog ouput;
+ * - a digital input or a digital ouput;
+ *
+ * Channels are defined by their type and by some other
+ * characteristics like:
+ * - their resolutions for analog channels (which usually ranges from
+     8 to 32 bits);
+ * - their references;
+ *
+ * Such parameters must be declared for each channel composing a
+ * subdevice. The structure a4l_channel (struct a4l_channel) is used to
+ * define one channel.
+ *
+ * Another structure named a4l_channels_desc (struct a4l_channels_desc)
+ * gathers all channels for a specific subdevice. This latter
+ * structure also stores :
+ * - the channels count;
+ * - the channels declaration mode (A4L_CHAN_GLOBAL_CHANDESC or
+     A4L_CHAN_PERCHAN_CHANDESC): if all the channels composing a
+     subdevice are identical, there is no need to declare the
+     parameters for each channel; the global declaration mode eases
+     the structure composition.
+ *
+ * Usually the channels descriptor looks like this:
+ * <tt> @verbatim
+struct a4l_channels_desc example_chan = {
+	mode: A4L_CHAN_GLOBAL_CHANDESC, -> Global declaration
+					      mode is set
+	length: 8, -> 8 channels
+	chans: {
+		{A4L_CHAN_AREF_GROUND, 16}, -> Each channel is 16 bits
+						  wide with the ground as
+						  reference
+	},
+};
+@endverbatim </tt>
+ *
+ * Ranges
+ *
+ * So as to perform conversion from logical values acquired by the
+ * device to physical units, some range structure(s) must be declared
+ * on the driver side.
+ *
+ * Such structures contain:
+ * - the physical unit type (Volt, Ampere, none);
+ * - the minimal and maximal values;
+ *
+ * These range structures must be associated with the channels at
+ * subdevice registration time as a channel can work with many
+ * ranges. At configuration time (thanks to an Analogy command), one
+ * range will be selected for each enabled channel.
+ *
+ * Consequently, for each channel, the developer must declare all the
+ * possible ranges in a structure called struct a4l_rngtab. Here is an
+ * example:
+ * <tt> @verbatim
+struct a4l_rngtab example_tab = {
+    length: 2,
+    rngs: {
+	RANGE_V(-5,5),
+	RANGE_V(-10,10),
+    },
+};
+@endverbatim </tt>
+ *
+ * For each subdevice, a specific structure is designed to gather all
+ * the ranges tabs of all the channels. In this structure, called
+ * struct a4l_rngdesc, three fields must be filled:
+ * - the declaration mode (A4L_RNG_GLOBAL_RNGDESC or
+ *   A4L_RNG_PERCHAN_RNGDESC);
+ * - the number of ranges tab;
+ * - the tab of ranges tabs pointers;
+ *
+ * Most of the time, the channels which belong to the same subdevice
+ * use the same set of ranges. So, there is no need to declare the
+ * same ranges for each channel. A macro is defined to prevent
+ * redundant declarations: RNG_GLOBAL().
+ *
+ * Here is an example:
+ * <tt> @verbatim
+struct a4l_rngdesc example_rng = RNG_GLOBAL(example_tab);
+@endverbatim </tt>
+ *
+ * @{
+ */
+
+
+/* --- Channel section --- */
+
+/*!
+ * @anchor A4L_CHAN_AREF_xxx @name Channel reference
+ * @brief Flags to define the channel's reference
+ * @{
+ */
+
+/**
+ * Ground reference
+ */
+#define A4L_CHAN_AREF_GROUND 0x1
+/**
+ * Common reference
+ */
+#define A4L_CHAN_AREF_COMMON 0x2
+/**
+ * Differential reference
+ */
+#define A4L_CHAN_AREF_DIFF 0x4
+/**
+ * Misc reference
+ */
+#define A4L_CHAN_AREF_OTHER 0x8
+
+	  /*! @} A4L_CHAN_AREF_xxx */
+
+/**
+ * Internal use flag (must not be used by driver developer)
+ */
+#define A4L_CHAN_GLOBAL 0x10
+
+/*!
+ * @brief Structure describing some channel's characteristics
+ */
+
+struct a4l_channel {
+	unsigned long flags; /*!< Channel flags to define the reference. */
+	unsigned long nb_bits; /*!< Channel resolution. */
+};
+
+/*!
+ * @anchor A4L_CHAN_xxx @name Channels declaration mode
+ * @brief Constant to define whether the channels in a descriptor are
+ * identical
+ * @{
+ */
+
+/**
+ * Global declaration, the set contains channels with similar
+ * characteristics
+ */
+#define A4L_CHAN_GLOBAL_CHANDESC 0
+/**
+ * Per channel declaration, the decriptor gathers differents channels
+ */
+#define A4L_CHAN_PERCHAN_CHANDESC 1
+
+	  /*! @} A4L_CHAN_xxx */
+
+/*!
+ * @brief Structure describing a channels set
+ */
+
+struct a4l_channels_desc {
+	unsigned long mode; /*!< Declaration mode (global or per channel) */
+	unsigned long length; /*!< Channels count */
+	struct a4l_channel chans[]; /*!< Channels tab */
+};
+
+/**
+ * Internal use flag (must not be used by driver developer)
+ */
+#define A4L_RNG_GLOBAL 0x8
+
+/*!
+ * @brief Structure describing a (unique) range
+ */
+
+struct a4l_range {
+	long min; /*!< Minimal value */
+	long max; /*!< Maximal falue */
+	unsigned long flags; /*!< Range flags (unit, etc.) */
+};
+
+/**
+ * Macro to declare a (unique) range with no unit defined
+ */
+#define RANGE(x,y) {(x * A4L_RNG_FACTOR), (y * A4L_RNG_FACTOR),	\
+			A4L_RNG_NO_UNIT}
+/**
+ * Macro to declare a (unique) range in Volt
+ */
+#define RANGE_V(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_VOLT_UNIT}
+/**
+ * Macro to declare a (unique) range in milliAmpere
+ */
+#define RANGE_mA(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_MAMP_UNIT}
+/**
+ * Macro to declare a (unique) range in some external reference
+ */
+#define RANGE_ext(x,y) {(x * A4L_RNG_FACTOR),(y * A4L_RNG_FACTOR), \
+			A4L_RNG_EXT_UNIT}
+
+
+/* Ranges tab descriptor */
+struct a4l_rngtab {
+	unsigned char length;
+	struct a4l_range rngs[];
+};
+
+/**
+ * Constant to define a ranges descriptor as global (inter-channel)
+ */
+#define A4L_RNG_GLOBAL_RNGDESC 0
+/**
+ * Constant to define a ranges descriptor as specific for a channel
+ */
+#define A4L_RNG_PERCHAN_RNGDESC 1
+
+/* Global ranges descriptor */
+struct a4l_rngdesc {
+	unsigned char mode;
+	unsigned char length;
+	struct a4l_rngtab *rngtabs[];
+};
+
+/**
+ * Macro to declare a ranges global descriptor in one line
+ */
+#define RNG_GLOBAL(x) {			\
+	.mode = A4L_RNG_GLOBAL_RNGDESC,	\
+	.length =  1,			\
+	.rngtabs = {&(x)},		\
+}
+
+extern struct a4l_rngdesc a4l_range_bipolar10;
+extern struct a4l_rngdesc a4l_range_bipolar5;
+extern struct a4l_rngdesc a4l_range_unipolar10;
+extern struct a4l_rngdesc a4l_range_unipolar5;
+extern struct a4l_rngdesc a4l_range_unknown;
+extern struct a4l_rngdesc a4l_range_fake;
+
+#define range_digital a4l_range_unipolar5
+
+/*! @} channelrange */
+
+#endif /* !_COBALT_RTDM_ANALOGY_CHANNEL_RANGE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h
new file mode 100644
index 0000000..89f7cca
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/command.h
@@ -0,0 +1,35 @@
+/**
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_COMMAND_H
+#define _COBALT_RTDM_ANALOGY_COMMAND_H
+
+#include <rtdm/uapi/analogy.h>
+#include <rtdm/analogy/context.h>
+
+#define CR_CHAN(a) CHAN(a)
+#define CR_RNG(a) (((a)>>16)&0xff)
+#define CR_AREF(a) (((a)>>24)&0xf)
+
+/* --- Command related function --- */
+void a4l_free_cmddesc(struct a4l_cmd_desc * desc);
+
+/* --- Upper layer functions --- */
+int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc);
+int a4l_ioctl_cmd(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_COMMAND_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h
new file mode 100644
index 0000000..f619f9c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/context.h
@@ -0,0 +1,48 @@
+/*
+ * Analogy for Linux, context structure / macros declarations
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_CONTEXT_H
+#define _COBALT_RTDM_ANALOGY_CONTEXT_H
+
+#include <rtdm/driver.h>
+
+struct a4l_device;
+struct a4l_buffer;
+
+struct a4l_device_context {
+	/* The adequate device pointer
+	   (retrieved thanks to minor at open time) */
+	struct a4l_device *dev;
+
+	/* The buffer structure contains everything to transfer data
+	   from asynchronous acquisition operations on a specific
+	   subdevice */
+	struct a4l_buffer *buffer;
+};
+
+static inline int a4l_get_minor(struct a4l_device_context *cxt)
+{
+	/* Get a pointer on the container structure */
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	/* Get the minor index */
+	return rtdm_fd_minor(fd);
+}
+
+#endif /* !_COBALT_RTDM_ANALOGY_CONTEXT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h
new file mode 100644
index 0000000..93ecf66
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/device.h
@@ -0,0 +1,67 @@
+/*
+ * Analogy for Linux, device related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_DEVICE_H
+#define _COBALT_RTDM_ANALOGY_DEVICE_H
+
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/transfer.h>
+#include <rtdm/analogy/driver.h>
+
+#define A4L_NB_DEVICES 10
+
+#define A4L_DEV_ATTACHED_NR 0
+
+struct a4l_device {
+
+	/* Spinlock for global device use */
+	rtdm_lock_t lock;
+
+	/* Device specific flags */
+	unsigned long flags;
+
+	/* Driver assigned to this device thanks to attaching
+	   procedure */
+	struct a4l_driver *driver;
+
+	/* Hidden description stuff */
+	struct list_head subdvsq;
+
+	/* Context-dependent stuff */
+	struct a4l_transfer transfer;
+
+	/* Private data useful for drivers functioning */
+	void *priv;
+};
+
+/* --- Devices tab related functions --- */
+void a4l_init_devs(void);
+int a4l_check_cleanup_devs(void);
+int a4l_rdproc_devs(struct seq_file *p, void *data);
+
+/* --- Context related function / macro --- */
+void a4l_set_dev(struct a4l_device_context *cxt);
+#define a4l_get_dev(x) ((x)->dev)
+
+/* --- Upper layer functions --- */
+int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_DEVICE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h
new file mode 100644
index 0000000..08a7546
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/driver.h
@@ -0,0 +1,74 @@
+/**
+ * @file
+ * Analogy for Linux, driver facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_DRIVER_H
+#define _COBALT_RTDM_ANALOGY_DRIVER_H
+
+#include <linux/list.h>
+#include <rtdm/analogy/rtdm_helpers.h>
+#include <rtdm/analogy/context.h>
+#include <rtdm/analogy/buffer.h>
+
+struct seq_file;
+struct a4l_link_desc;
+struct a4l_device;
+
+/** Structure containing driver declaration data.
+ *
+ *  @see rt_task_inquire()
+ */
+/* Analogy driver descriptor */
+struct a4l_driver {
+
+	/* List stuff */
+	struct list_head list;
+			   /**< List stuff */
+
+	/* Visible description stuff */
+	struct module *owner;
+	               /**< Pointer to module containing the code */
+	unsigned int flags;
+	               /**< Type / status driver's flags */
+	char *board_name;
+		       /**< Board name */
+	char *driver_name;
+	               /**< driver name */
+	int privdata_size;
+		       /**< Size of the driver's private data */
+
+	/* Init/destroy procedures */
+	int (*attach) (struct a4l_device *, struct a4l_link_desc *);
+								      /**< Attach procedure */
+	int (*detach) (struct a4l_device *);
+				   /**< Detach procedure */
+
+};
+
+/* Driver list related functions */
+
+int a4l_register_drv(struct a4l_driver * drv);
+int a4l_unregister_drv(struct a4l_driver * drv);
+int a4l_lct_drv(char *pin, struct a4l_driver ** pio);
+#ifdef CONFIG_PROC_FS
+int a4l_rdproc_drvs(struct seq_file *p, void *data);
+#endif /* CONFIG_PROC_FS */
+
+#endif /* !_COBALT_RTDM_ANALOGY_DRIVER_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h
new file mode 100644
index 0000000..2e8245a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/instruction.h
@@ -0,0 +1,45 @@
+/*
+ * Analogy for Linux, instruction related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_INSTRUCTION_H
+#define _COBALT_RTDM_ANALOGY_INSTRUCTION_H
+
+struct a4l_kernel_instruction {
+	unsigned int type;
+	unsigned int idx_subd;
+	unsigned int chan_desc;
+	unsigned int data_size;
+	void *data;
+	void *__udata;
+};
+
+struct a4l_kernel_instruction_list {
+	unsigned int count;
+	struct a4l_kernel_instruction *insns;
+	a4l_insn_t *__uinsns;
+};
+
+/* Instruction related functions */
+
+/* Upper layer functions */
+int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_BUFFER_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h
new file mode 100644
index 0000000..1de219f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/rtdm_helpers.h
@@ -0,0 +1,143 @@
+/*
+ * Analogy for Linux, Operation system facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H
+#define _COBALT_RTDM_ANALOGY_RTDM_HELPERS_H
+
+#include <linux/fs.h>
+#include <linux/spinlock.h>
+#include <linux/sched.h>
+#include <linux/time.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/uaccess.h>
+#include <rtdm/driver.h>
+
+/* --- Trace section  --- */
+#define A4L_PROMPT "Analogy: "
+
+#define RTDM_SUBCLASS_ANALOGY 0
+
+#define __a4l_err(fmt, args...)  rtdm_printk(KERN_ERR A4L_PROMPT fmt, ##args)
+#define __a4l_warn(fmt, args...) rtdm_printk(KERN_WARNING A4L_PROMPT fmt, ##args)
+
+#ifdef  CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+#define __a4l_info(fmt, args...) trace_printk(fmt, ##args)
+#else
+#define __a4l_info(fmt, args...) 						\
+        rtdm_printk(KERN_INFO A4L_PROMPT "%s: " fmt, __FUNCTION__, ##args)
+#endif
+
+#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG
+#ifdef CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+#define __a4l_dbg(level, debug, fmt, args...)				\
+	do {								\
+	if ((debug) >= (level))						\
+		trace_printk(fmt, ##args); 				\
+	} while (0)
+#else
+#define __a4l_dbg(level, debug, fmt, args...)						\
+	do {										\
+	if ((debug) >= (level))								\
+		rtdm_printk(KERN_DEBUG A4L_PROMPT "%s: " fmt, __FUNCTION__ , ##args);	\
+	} while (0)
+#endif
+
+#define core_dbg CONFIG_XENO_DRIVERS_ANALOGY_DEBUG_LEVEL
+#define drv_dbg CONFIG_XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */
+
+#define __a4l_dbg(level, debug, fmt, args...)
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_DEBUG */
+
+#define __a4l_dev_name(dev) 						\
+	(dev->driver == NULL) ? "unattached dev" : dev->driver->board_name
+
+#define a4l_err(dev, fmt, args...) 					\
+	__a4l_err("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_warn(dev, fmt, args...) 					\
+	__a4l_warn("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_info(dev, fmt, args...) 					\
+	__a4l_info("%s: " fmt, __a4l_dev_name(dev), ##args)
+
+#define a4l_dbg(level, debug, dev, fmt, args...)			\
+	__a4l_dbg(level, debug, "%s: " fmt, __a4l_dev_name(dev), ##args)
+
+
+/* --- Time section --- */
+static inline void a4l_udelay(unsigned int us)
+{
+	rtdm_task_busy_sleep(((nanosecs_rel_t) us) * 1000);
+}
+
+/* Function which gives absolute time */
+nanosecs_abs_t a4l_get_time(void);
+
+/* Function for setting up the absolute time recovery */
+void a4l_init_time(void);
+
+/* --- IRQ section --- */
+#define A4L_IRQ_DISABLED 0
+
+typedef int (*a4l_irq_hdlr_t) (unsigned int irq, void *d);
+
+struct a4l_irq_descriptor {
+	/* These fields are useful to launch the IRQ trampoline;
+	   that is the reason why a structure has been defined */
+	a4l_irq_hdlr_t handler;
+	unsigned int irq;
+	void *cookie;
+	rtdm_irq_t rtdm_desc;
+};
+
+int __a4l_request_irq(struct a4l_irq_descriptor * dsc,
+		      unsigned int irq,
+		      a4l_irq_hdlr_t handler,
+		      unsigned long flags, void *cookie);
+int __a4l_free_irq(struct a4l_irq_descriptor * dsc);
+
+/* --- Synchronization section --- */
+#define __NRT_WAITER 1
+#define __RT_WAITER 2
+#define __EVT_PDING 3
+
+struct a4l_sync {
+	unsigned long status;
+	rtdm_event_t rtdm_evt;
+	rtdm_nrtsig_t nrt_sig;
+	wait_queue_head_t wq;
+};
+
+#define a4l_select_sync(snc, slr, type, fd) \
+	rtdm_event_select(&((snc)->rtdm_evt), slr, type, fd)
+
+int a4l_init_sync(struct a4l_sync * snc);
+void a4l_cleanup_sync(struct a4l_sync * snc);
+void a4l_flush_sync(struct a4l_sync * snc);
+int a4l_wait_sync(struct a4l_sync * snc, int rt);
+int a4l_timedwait_sync(struct a4l_sync * snc,
+		       int rt, unsigned long long ns_timeout);
+void a4l_signal_sync(struct a4l_sync * snc);
+
+#endif /* !_COBALT_RTDM_ANALOGY_RTDM_HELPERS_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h
new file mode 100644
index 0000000..21c09df
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/subdevice.h
@@ -0,0 +1,118 @@
+/**
+ * @file
+ * Analogy for Linux, subdevice related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_SUBDEVICE_H
+#define _COBALT_RTDM_ANALOGY_SUBDEVICE_H
+
+#include <linux/list.h>
+#include <rtdm/analogy/instruction.h>
+#include <rtdm/analogy/command.h>
+#include <rtdm/analogy/channel_range.h>
+
+/* --- Subdevice descriptor structure --- */
+
+struct a4l_device;
+struct a4l_buffer;
+
+/*!
+ * @brief Structure describing the subdevice
+ * @see a4l_add_subd()
+ */
+
+struct a4l_subdevice {
+
+	struct list_head list;
+			   /**< List stuff */
+
+	struct a4l_device *dev;
+			       /**< Containing device */
+
+	unsigned int idx;
+		      /**< Subdevice index */
+
+	struct a4l_buffer *buf;
+			       /**< Linked buffer */
+
+	/* Subdevice's status (busy, linked?) */
+	unsigned long status;
+			     /**< Subdevice's status */
+
+	/* Descriptors stuff */
+	unsigned long flags;
+			 /**< Type flags */
+	struct a4l_channels_desc *chan_desc;
+				/**< Tab of channels descriptors pointers */
+	struct a4l_rngdesc *rng_desc;
+				/**< Tab of ranges descriptors pointers */
+	struct a4l_cmd_desc *cmd_mask;
+			    /**< Command capabilities mask */
+
+	/* Functions stuff */
+	int (*insn_read) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							/**< Callback for the instruction "read" */
+	int (*insn_write) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							 /**< Callback for the instruction "write" */
+	int (*insn_bits) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							/**< Callback for the instruction "bits" */
+	int (*insn_config) (struct a4l_subdevice *, struct a4l_kernel_instruction *);
+							  /**< Callback for the configuration instruction */
+	int (*do_cmd) (struct a4l_subdevice *, struct a4l_cmd_desc *);
+					/**< Callback for command handling */
+	int (*do_cmdtest) (struct a4l_subdevice *, struct a4l_cmd_desc *);
+						       /**< Callback for command checking */
+	void (*cancel) (struct a4l_subdevice *);
+					 /**< Callback for asynchronous transfer cancellation */
+	void (*munge) (struct a4l_subdevice *, void *, unsigned long);
+								/**< Callback for munge operation */
+	int (*trigger) (struct a4l_subdevice *, lsampl_t);
+					      /**< Callback for trigger operation */
+
+	char priv[0];
+		  /**< Private data */
+};
+
+/* --- Subdevice related functions and macros --- */
+
+struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice * sb, int idx);
+struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice * sb, int chidx, int rngidx);
+int a4l_check_chanlist(struct a4l_subdevice * subd,
+		       unsigned char nb_chan, unsigned int *chans);
+
+#define a4l_subd_is_input(x) ((A4L_SUBD_MASK_READ & (x)->flags) != 0)
+/* The following macro considers that a DIO subdevice is firstly an
+   output subdevice */
+#define a4l_subd_is_output(x) \
+	((A4L_SUBD_MASK_WRITE & (x)->flags) != 0 || \
+	 (A4L_SUBD_DIO & (x)->flags) != 0)
+
+/* --- Upper layer functions --- */
+
+struct a4l_subdevice * a4l_get_subd(struct a4l_device *dev, int idx);
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+			    void (*setup)(struct a4l_subdevice *));
+int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice * subd);
+int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg);
+int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_SUBDEVICE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h
new file mode 100644
index 0000000..c62c22a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/analogy/transfer.h
@@ -0,0 +1,78 @@
+/*
+ * Analogy for Linux, transfer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_ANALOGY_TRANSFER_H
+#define _COBALT_RTDM_ANALOGY_TRANSFER_H
+
+#include <rtdm/analogy/buffer.h>
+
+/* IRQ types */
+#define A4L_IRQ_DISABLED 0
+
+/* Fields init values */
+#define A4L_IRQ_UNUSED (unsigned int)((unsigned short)(~0))
+#define A4L_IDX_UNUSED (unsigned int)(~0)
+
+/* TODO: IRQ handling must leave transfer for os_facilities */
+
+struct a4l_device;
+/* Analogy transfer descriptor */
+struct a4l_transfer {
+
+	/* Subdevices desc */
+	unsigned int nb_subd;
+	struct a4l_subdevice **subds;
+
+	/* Buffer stuff: the default size */
+	unsigned int default_bufsize;
+
+	/* IRQ in use */
+	/* TODO: irq_desc should vanish */
+	struct a4l_irq_descriptor irq_desc;
+};
+
+/* --- Proc function --- */
+
+int a4l_rdproc_transfer(struct seq_file *p, void *data);
+
+/* --- Upper layer functions --- */
+
+void a4l_presetup_transfer(struct a4l_device_context * cxt);
+int a4l_setup_transfer(struct a4l_device_context * cxt);
+int a4l_precleanup_transfer(struct a4l_device_context * cxt);
+int a4l_cleanup_transfer(struct a4l_device_context * cxt);
+int a4l_reserve_transfer(struct a4l_device_context * cxt, int idx_subd);
+int a4l_init_transfer(struct a4l_device_context * cxt, struct a4l_cmd_desc * cmd);
+int a4l_cancel_transfer(struct a4l_device_context * cxt, int idx_subd);
+int a4l_cancel_transfers(struct a4l_device_context * cxt);
+
+ssize_t a4l_put(struct a4l_device_context * cxt, void *buf, size_t nbytes);
+ssize_t a4l_get(struct a4l_device_context * cxt, void *buf, size_t nbytes);
+
+int a4l_request_irq(struct a4l_device *dev,
+		    unsigned int irq,
+		    a4l_irq_hdlr_t handler,
+		    unsigned long flags, void *cookie);
+int a4l_free_irq(struct a4l_device *dev, unsigned int irq);
+unsigned int a4l_get_irq(struct a4l_device *dev);
+
+int a4l_ioctl_cancel(struct a4l_device_context * cxt, void *arg);
+
+#endif /* !_COBALT_RTDM_ANALOGY_TRANSFER_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h
new file mode 100644
index 0000000..885a237
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/autotune.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_AUTOTUNE_H
+#define _COBALT_RTDM_AUTOTUNE_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/autotune.h>
+
+#endif /* !_COBALT_RTDM_AUTOTUNE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h
new file mode 100644
index 0000000..73268e3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/can.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                    <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_CAN_H
+#define _COBALT_RTDM_CAN_H
+
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/if.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/can.h>
+
+#endif /* _COBALT_RTDM_CAN_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h
new file mode 100644
index 0000000..d60cfc5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/cobalt.h
@@ -0,0 +1,33 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_COBALT_H
+#define _COBALT_RTDM_COBALT_H
+
+#include <xenomai/posix/process.h>
+#include <xenomai/posix/extension.h>
+#include <xenomai/posix/thread.h>
+#include <xenomai/posix/signal.h>
+#include <xenomai/posix/timer.h>
+#include <xenomai/posix/clock.h>
+#include <xenomai/posix/event.h>
+#include <xenomai/posix/monitor.h>
+#include <xenomai/posix/corectl.h>
+
+#endif /* !_COBALT_RTDM_COBALT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h
new file mode 100644
index 0000000..2c81a33
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/compat.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_COMPAT_H
+#define _COBALT_RTDM_COMPAT_H
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <cobalt/kernel/compat.h>
+#include <rtdm/rtdm.h>
+
+struct compat_rtdm_getsockopt_args {
+	int level;
+	int optname;
+	compat_uptr_t optval;
+	compat_uptr_t optlen;
+};
+
+struct compat_rtdm_setsockopt_args {
+	int level;
+	int optname;
+	const compat_uptr_t optval;
+	socklen_t optlen;
+};
+
+struct compat_rtdm_getsockaddr_args {
+	compat_uptr_t addr;
+	compat_uptr_t addrlen;
+};
+
+struct compat_rtdm_setsockaddr_args {
+	const compat_uptr_t addr;
+	socklen_t addrlen;
+};
+
+#define _RTIOC_GETSOCKOPT_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x20,	\
+					     struct compat_rtdm_getsockopt_args)
+#define _RTIOC_SETSOCKOPT_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x21,	\
+					     struct compat_rtdm_setsockopt_args)
+#define _RTIOC_BIND_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x22,	\
+					     struct compat_rtdm_setsockaddr_args)
+#define _RTIOC_CONNECT_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x23,	\
+					     struct compat_rtdm_setsockaddr_args)
+#define _RTIOC_ACCEPT_COMPAT		_IOW(RTIOC_TYPE_COMMON, 0x25,	\
+					     struct compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETSOCKNAME_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x26,	\
+					     struct compat_rtdm_getsockaddr_args)
+#define _RTIOC_GETPEERNAME_COMPAT	_IOW(RTIOC_TYPE_COMMON, 0x27,	\
+					     struct compat_rtdm_getsockaddr_args)
+
+#define __COMPAT_CASE(__op)		: case __op
+
+#else	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+#define __COMPAT_CASE(__op)
+
+#endif	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+#define COMPAT_CASE(__op)	case __op __COMPAT_CASE(__op  ## _COMPAT)
+
+#endif /* !_COBALT_RTDM_COMPAT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h
new file mode 100644
index 0000000..2a68c3e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/driver.h
@@ -0,0 +1,1361 @@
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, driver API header
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * @ingroup driverapi
+ */
+#ifndef _COBALT_RTDM_DRIVER_H
+#define _COBALT_RTDM_DRIVER_H
+
+#include <asm/atomic.h>
+#include <linux/cpumask.h>
+#include <linux/list.h>
+#include <linux/module.h>
+#include <linux/cdev.h>
+#include <linux/wait.h>
+#include <linux/notifier.h>
+#include <pipeline/lock.h>
+#include <pipeline/inband_work.h>
+#include <xenomai/version.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/init.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <cobalt/kernel/tree.h>
+#include <rtdm/fd.h>
+#include <rtdm/rtdm.h>
+
+/* debug support */
+#include <cobalt/kernel/assert.h>
+#include <trace/events/cobalt-rtdm.h>
+#ifdef CONFIG_PCI
+#include <asm-generic/xenomai/pci_ids.h>
+#endif /* CONFIG_PCI */
+#include <asm/xenomai/syscall.h>
+
+struct class;
+typedef struct xnselector rtdm_selector_t;
+enum rtdm_selecttype;
+
+/*!
+ * @addtogroup rtdm_device_register
+ * @{
+ */
+
+/*!
+ * @anchor dev_flags @name Device Flags
+ * Static flags describing a RTDM device
+ * @{
+ */
+/** If set, only a single instance of the device can be requested by an
+ *  application. */
+#define RTDM_EXCLUSIVE			0x0001
+
+/**
+ * Use fixed minor provided in the rtdm_device description for
+ * registering. If this flag is absent, the RTDM core assigns minor
+ * numbers to devices managed by a driver in order of registration.
+ */
+#define RTDM_FIXED_MINOR		0x0002
+
+/** If set, the device is addressed via a clear-text name. */
+#define RTDM_NAMED_DEVICE		0x0010
+
+/** If set, the device is addressed via a combination of protocol ID and
+ *  socket type. */
+#define RTDM_PROTOCOL_DEVICE		0x0020
+
+/** Mask selecting the device type. */
+#define RTDM_DEVICE_TYPE_MASK		0x00F0
+
+/** Flag indicating a secure variant of RTDM (not supported here) */
+#define RTDM_SECURE_DEVICE		0x80000000
+/** @} Device Flags */
+
+/** Maximum number of named devices per driver. */
+#define RTDM_MAX_MINOR	4096
+
+/** @} rtdm_device_register */
+
+/*!
+ * @addtogroup rtdm_sync
+ * @{
+ */
+
+/*!
+ * @anchor RTDM_SELECTTYPE_xxx   @name RTDM_SELECTTYPE_xxx
+ * Event types select can bind to
+ * @{
+ */
+enum rtdm_selecttype {
+	/** Select input data availability events */
+	RTDM_SELECTTYPE_READ = XNSELECT_READ,
+
+	/** Select ouput buffer availability events */
+	RTDM_SELECTTYPE_WRITE = XNSELECT_WRITE,
+
+	/** Select exceptional events */
+	RTDM_SELECTTYPE_EXCEPT = XNSELECT_EXCEPT
+};
+/** @} RTDM_SELECTTYPE_xxx */
+
+/** @} rtdm_sync */
+
+/**
+ * @brief Device context
+ *
+ * A device context structure is associated with every open device instance.
+ * RTDM takes care of its creation and destruction and passes it to the
+ * operation handlers when being invoked.
+ *
+ * Drivers can attach arbitrary data immediately after the official
+ * structure.  The size of this data is provided via
+ * rtdm_driver.context_size during device registration.
+ */
+struct rtdm_dev_context {
+	struct rtdm_fd fd;
+
+	/** Set of active device operation handlers */
+	/** Reference to owning device */
+	struct rtdm_device *device;
+
+	/** Begin of driver defined context data structure */
+	char dev_private[0];
+};
+
+static inline struct rtdm_dev_context *rtdm_fd_to_context(struct rtdm_fd *fd)
+{
+	return container_of(fd, struct rtdm_dev_context, fd);
+}
+
+/**
+ * Locate the driver private area associated to a device context structure
+ *
+ * @param[in] fd File descriptor structure associated with opened
+ * device instance
+ *
+ * @return The address of the private driver area associated to @a
+ * file descriptor.
+ */
+static inline void *rtdm_fd_to_private(struct rtdm_fd *fd)
+{
+	return &rtdm_fd_to_context(fd)->dev_private[0];
+}
+
+/**
+ * Locate a device file descriptor structure from its driver private area
+ *
+ * @param[in] dev_private Address of a private context area
+ *
+ * @return The address of the file descriptor structure defining @a
+ * dev_private.
+ */
+static inline struct rtdm_fd *rtdm_private_to_fd(void *dev_private)
+{
+	struct rtdm_dev_context *ctx;
+	ctx = container_of(dev_private, struct rtdm_dev_context, dev_private);
+	return &ctx->fd;
+}
+
+/**
+ * Tell whether the passed file descriptor belongs to an application.
+ *
+ * @param[in] fd File descriptor
+ *
+ * @return true if passed file descriptor belongs to an application,
+ * false otherwise.
+ */
+static inline bool rtdm_fd_is_user(struct rtdm_fd *fd)
+{
+	return rtdm_fd_owner(fd) != &cobalt_kernel_ppd;
+}
+
+/**
+ * Locate a device structure from a file descriptor.
+ *
+ * @param[in] fd File descriptor
+ *
+ * @return The address of the device structure to which this file
+ * descriptor is attached.
+ */
+static inline struct rtdm_device *rtdm_fd_device(struct rtdm_fd *fd)
+{
+	return rtdm_fd_to_context(fd)->device;
+}
+
+/**
+ * @brief RTDM profile information
+ *
+ * This descriptor details the profile information associated to a
+ * RTDM class of device managed by a driver.
+ *
+ * @anchor rtdm_profile_info
+ */
+struct rtdm_profile_info {
+	/** Device class name */
+	const char *name;
+	/** Device class ID, see @ref RTDM_CLASS_xxx */
+	int class_id;
+	/** Device sub-class, see RTDM_SUBCLASS_xxx definition in the
+	    @ref rtdm_profiles "Device Profiles" */
+	int subclass_id;
+	/** Supported device profile version */
+	int version;
+	/** Reserved */
+	unsigned int magic;
+	struct module *owner;
+	struct class *kdev_class;
+};
+
+struct rtdm_driver;
+
+/**
+ * @brief RTDM state management handlers
+ */
+struct rtdm_sm_ops {
+	/** Handler called upon transition to COBALT_STATE_WARMUP */ 
+	int (*start)(struct rtdm_driver *drv);
+	/** Handler called upon transition to COBALT_STATE_TEARDOWN */ 
+	int (*stop)(struct rtdm_driver *drv);
+};
+
+/**
+ * @brief RTDM driver
+ *
+ * This descriptor describes a RTDM device driver. The structure holds
+ * runtime data, therefore it must reside in writable memory.
+ */
+struct rtdm_driver {
+	/**
+	 * Class profile information. The RTDM_PROFILE_INFO() macro @b
+	 * must be used for filling up this field.
+	 * @anchor rtdm_driver_profile
+	 */
+	struct rtdm_profile_info profile_info;
+	/**
+	 * Device flags, see @ref dev_flags "Device Flags" for details
+	 * @anchor rtdm_driver_flags
+	 */
+	int device_flags;
+	/**
+	 * Size of the private memory area the core should
+	 * automatically allocate for each open file descriptor, which
+	 * is usable for storing the context data associated to each
+	 * connection. The allocated memory is zero-initialized. The
+	 * start of this area can be retrieved by a call to
+	 * rtdm_fd_to_private().
+	 */
+	size_t context_size;
+	/** Protocol device identification: protocol family (PF_xxx) */
+	int protocol_family;
+	/** Protocol device identification: socket type (SOCK_xxx) */
+	int socket_type;
+	/** I/O operation handlers */
+	struct rtdm_fd_ops ops;
+	/** State management handlers */
+	struct rtdm_sm_ops smops;
+	/**
+	 * Count of devices this driver manages. This value is used to
+	 * allocate a chrdev region for named devices.
+	 */
+	int device_count;
+	/** Base minor for named devices. */
+	int base_minor;
+	/** Reserved area */
+	struct {
+		union {
+			struct {
+				struct cdev cdev;
+				int major;
+			} named;
+		};
+		atomic_t refcount;
+		struct notifier_block nb_statechange;
+		DECLARE_BITMAP(minor_map, RTDM_MAX_MINOR);
+	};
+};
+
+#define RTDM_CLASS_MAGIC	0x8284636c
+
+/**
+ * @brief Initializer for class profile information.
+ *
+ * This macro must be used to fill in the @ref rtdm_profile_info
+ * "class profile information" field from a RTDM driver.
+ *
+ * @param __name Class name (unquoted).
+ *
+ * @param __id Class major identification number
+ * (profile_version.class_id).
+ *
+ * @param __subid Class minor identification number
+ * (profile_version.subclass_id).
+ *
+ * @param __version Profile version number.
+ *
+ * @note See @ref rtdm_profiles "Device Profiles".
+ */
+#define RTDM_PROFILE_INFO(__name, __id, __subid, __version)	\
+{								\
+	.name = ( # __name ),					\
+	.class_id = (__id),					\
+	.subclass_id = (__subid),				\
+	.version = (__version),					\
+	.magic = ~RTDM_CLASS_MAGIC,				\
+	.owner = THIS_MODULE,					\
+	.kdev_class = NULL,					\
+}
+
+int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls);
+
+/**
+ * @brief RTDM device
+ *
+ * This descriptor describes a RTDM device instance. The structure
+ * holds runtime data, therefore it must reside in writable memory.
+ */
+struct rtdm_device {
+	/** Device driver. */
+	struct rtdm_driver *driver;
+	/** Driver definable device data */
+	void *device_data;
+	/**
+	 * Device label template for composing the device name. A
+	 * limited printf-like format string is assumed, with a
+	 * provision for replacing the first %d/%i placeholder found
+	 * in the string by the device minor number.  It is up to the
+	 * driver to actually mention this placeholder or not,
+	 * depending on the naming convention for its devices.  For
+	 * named devices, the corresponding device node will
+	 * automatically appear in the /dev/rtdm hierachy with
+	 * hotplug-enabled device filesystems (DEVTMPFS).
+	 */
+	const char *label;
+	/**
+	 * Minor number of the device. If RTDM_FIXED_MINOR is present
+	 * in the driver flags, the value stored in this field is used
+	 * verbatim by rtdm_dev_register(). Otherwise, the RTDM core
+	 * automatically assigns minor numbers to all devices managed
+	 * by the driver referred to by @a driver, in order of
+	 * registration, storing the resulting values into this field.
+	 *
+	 * Device nodes created for named devices in the Linux /dev
+	 * hierarchy are assigned this minor number.
+	 *
+	 * The minor number of the current device handling an I/O
+	 * request can be retreived by a call to rtdm_fd_minor().
+	 */
+	int minor;
+	/** Reserved area. */
+	struct {
+		unsigned int magic;
+		char *name;
+		union {
+			struct {
+				xnhandle_t handle;
+			} named;
+			struct {
+				struct xnid id;
+			} proto;
+		};
+		dev_t rdev;
+		struct device *kdev;
+		struct class *kdev_class;
+		atomic_t refcount;
+		struct rtdm_fd_ops ops;
+		wait_queue_head_t putwq;
+		struct list_head openfd_list;
+	};
+};
+
+/* --- device registration --- */
+
+int rtdm_dev_register(struct rtdm_device *device);
+
+void rtdm_dev_unregister(struct rtdm_device *device);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+
+static inline struct device *rtdm_dev_to_kdev(struct rtdm_device *device)
+{
+	return device->kdev;
+}
+
+/* --- clock services --- */
+static inline nanosecs_abs_t rtdm_clock_read(void)
+{
+	return xnclock_read_realtime(&nkclock);
+}
+
+static inline nanosecs_abs_t rtdm_clock_read_monotonic(void)
+{
+	return xnclock_read_monotonic(&nkclock);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- timeout sequences */
+
+typedef nanosecs_abs_t rtdm_toseq_t;
+
+void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout);
+
+/*!
+ * @addtogroup rtdm_sync
+ * @{
+ */
+
+/*!
+ * @defgroup rtdm_sync_biglock Big dual kernel lock
+ * @{
+ */
+
+/**
+ * @brief Enter atomic section (dual kernel only)
+ *
+ * This call opens a fully atomic section, serializing execution with
+ * respect to all interrupt handlers (including for real-time IRQs)
+ * and Xenomai threads running on all CPUs.
+ *
+ * @param __context name of local variable to store the context
+ * in. This variable updated by the real-time core will hold the
+ * information required to leave the atomic section properly.
+ *
+ * @note Atomic sections may be nested. The caller is allowed to sleep
+ * on a blocking Xenomai service from primary mode within an atomic
+ * section delimited by cobalt_atomic_enter/cobalt_atomic_leave calls.
+ * On the contrary, sleeping on a regular Linux kernel service while
+ * holding such lock is NOT valid.
+ *
+ * @note Since the strongest lock is acquired by this service, it can
+ * be used to synchronize real-time and non-real-time contexts.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases, mainly for the
+ * purpose of porting existing dual-kernel drivers which still depend
+ * on the obsolete RTDM_EXECUTE_ATOMICALLY() construct.
+ */
+#define cobalt_atomic_enter(__context)				\
+	do {							\
+		xnlock_get_irqsave(&nklock, (__context));	\
+		xnsched_lock();					\
+	} while (0)
+
+/**
+ * @brief Leave atomic section (dual kernel only)
+ *
+ * This call closes an atomic section previously opened by a call to
+ * cobalt_atomic_enter(), restoring the preemption and interrupt state
+ * which prevailed prior to entering the exited section.
+ *
+ * @param __context name of local variable which stored the context.
+ *
+ * @warning This service is not portable to the Mercury core, and
+ * should be restricted to Cobalt-specific use cases.
+ */
+#define cobalt_atomic_leave(__context)				\
+	do {							\
+		xnsched_unlock();				\
+		xnlock_put_irqrestore(&nklock, (__context));	\
+	} while (0)
+
+/**
+ * @brief Execute code block atomically (DEPRECATED)
+ *
+ * Generally, it is illegal to suspend the current task by calling
+ * rtdm_task_sleep(), rtdm_event_wait(), etc. while holding a spinlock. In
+ * contrast, this macro allows to combine several operations including
+ * a potentially rescheduling call to an atomic code block with respect to
+ * other RTDM_EXECUTE_ATOMICALLY() blocks. The macro is a light-weight
+ * alternative for protecting code blocks via mutexes, and it can even be used
+ * to synchronise real-time and non-real-time contexts.
+ *
+ * @param code_block Commands to be executed atomically
+ *
+ * @note It is not allowed to leave the code block explicitly by using
+ * @c break, @c return, @c goto, etc. This would leave the global lock held
+ * during the code block execution in an inconsistent state. Moreover, do not
+ * embed complex operations into the code bock. Consider that they will be
+ * executed under preemption lock with interrupts switched-off. Also note that
+ * invocation of rescheduling calls may break the atomicity until the task
+ * gains the CPU again.
+ *
+ * @coretags{unrestricted}
+ *
+ * @deprecated This construct will be phased out in Xenomai
+ * 3.0. Please use rtdm_waitqueue services instead.
+ *
+ * @see cobalt_atomic_enter().
+ */
+#ifdef DOXYGEN_CPP /* Beautify doxygen output */
+#define RTDM_EXECUTE_ATOMICALLY(code_block)	\
+{						\
+	<ENTER_ATOMIC_SECTION>			\
+	code_block;				\
+	<LEAVE_ATOMIC_SECTION>			\
+}
+#else /* This is how it really works */
+static inline __attribute__((deprecated)) void
+rtdm_execute_atomically(void) { }
+
+#define RTDM_EXECUTE_ATOMICALLY(code_block)		\
+{							\
+	spl_t __rtdm_s;					\
+							\
+	rtdm_execute_atomically();			\
+	xnlock_get_irqsave(&nklock, __rtdm_s);		\
+	xnsched_lock();					\
+	code_block;					\
+	xnsched_unlock();				\
+	xnlock_put_irqrestore(&nklock, __rtdm_s);	\
+}
+#endif
+
+/** @} Big dual kernel lock */
+
+/**
+ * @defgroup rtdm_sync_spinlock Spinlock with preemption deactivation
+ * @{
+ */
+
+/**
+ * Static lock initialisation
+ */
+#define RTDM_LOCK_UNLOCKED(__name)	PIPELINE_SPIN_LOCK_UNLOCKED(__name)
+
+#define DEFINE_RTDM_LOCK(__name)		\
+	rtdm_lock_t __name = RTDM_LOCK_UNLOCKED(__name)
+
+/** Lock variable */
+typedef pipeline_spinlock_t rtdm_lock_t;
+
+/** Variable to save the context while holding a lock */
+typedef unsigned long rtdm_lockctx_t;
+
+/**
+ * Dynamic lock initialisation
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{task-unrestricted}
+ */
+static inline void rtdm_lock_init(rtdm_lock_t *lock)
+{
+	raw_spin_lock_init(lock);
+}
+
+/**
+ * Acquire lock from non-preemptible contexts
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{unrestricted}
+ */
+static inline void rtdm_lock_get(rtdm_lock_t *lock)
+{
+	XENO_BUG_ON(COBALT, !spltest());
+	raw_spin_lock(lock);
+	xnsched_lock();
+}
+
+/**
+ * Release lock without preemption restoration
+ *
+ * @param lock Address of lock variable
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+static inline void rtdm_lock_put(rtdm_lock_t *lock)
+{
+	raw_spin_unlock(lock);
+	xnsched_unlock();
+}
+
+/**
+ * Acquire lock and disable preemption, by stalling the head domain.
+ *
+ * @param __lock Address of lock variable
+ * @param __context name of local variable to store the context in
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_get_irqsave(__lock, __context)	\
+	((__context) = __rtdm_lock_get_irqsave(__lock))
+
+static inline rtdm_lockctx_t __rtdm_lock_get_irqsave(rtdm_lock_t *lock)
+{
+	rtdm_lockctx_t context;
+
+	splhigh(context);
+	raw_spin_lock(lock);
+	xnsched_lock();
+
+	return context;
+}
+
+/**
+ * Release lock and restore preemption state
+ *
+ * @param lock Address of lock variable
+ * @param context name of local variable which stored the context
+ *
+ * @coretags{unrestricted}
+ */
+static inline
+void rtdm_lock_put_irqrestore(rtdm_lock_t *lock, rtdm_lockctx_t context)
+{
+	raw_spin_unlock(lock);
+	xnsched_unlock();
+	splexit(context);
+}
+
+/**
+ * Disable preemption locally
+ *
+ * @param __context name of local variable to store the context in
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_irqsave(__context)	\
+	splhigh(__context)
+
+/**
+ * Restore preemption state
+ *
+ * @param __context name of local variable which stored the context
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_lock_irqrestore(__context)	\
+	splexit(__context)
+
+/** @} Spinlock with Preemption Deactivation */
+
+#ifndef DOXYGEN_CPP
+
+struct rtdm_waitqueue {
+	struct xnsynch wait;
+};
+typedef struct rtdm_waitqueue rtdm_waitqueue_t;
+
+#define RTDM_WAITQUEUE_INITIALIZER(__name) {		 \
+	    .wait = XNSYNCH_WAITQUEUE_INITIALIZER((__name).wait), \
+	}
+
+#define DEFINE_RTDM_WAITQUEUE(__name)				\
+	struct rtdm_waitqueue __name = RTDM_WAITQUEUE_INITIALIZER(__name)
+
+#define DEFINE_RTDM_WAITQUEUE_ONSTACK(__name)	\
+	DEFINE_RTDM_WAITQUEUE(__name)
+
+static inline void rtdm_waitqueue_init(struct rtdm_waitqueue *wq)
+{
+	*wq = (struct rtdm_waitqueue)RTDM_WAITQUEUE_INITIALIZER(*wq);
+}
+
+static inline void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq)
+{
+	xnsynch_destroy(&wq->wait);
+}
+
+static inline int __rtdm_dowait(struct rtdm_waitqueue *wq,
+				nanosecs_rel_t timeout, xntmode_t timeout_mode)
+{
+	int ret;
+	
+	ret = xnsynch_sleep_on(&wq->wait, timeout, timeout_mode);
+	if (ret & XNBREAK)
+		return -EINTR;
+	if (ret & XNTIMEO)
+		return -ETIMEDOUT;
+	if (ret & XNRMID)
+		return -EIDRM;
+	return 0;
+}
+
+static inline int __rtdm_timedwait(struct rtdm_waitqueue *wq,
+				   nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+{
+	if (toseq && timeout > 0)
+		return __rtdm_dowait(wq, *toseq, XN_ABSOLUTE);
+
+	return __rtdm_dowait(wq, timeout, XN_RELATIVE);
+}
+
+#define rtdm_timedwait_condition_locked(__wq, __cond, __timeout, __toseq) \
+	({								\
+		int __ret = 0;						\
+		while (__ret == 0 && !(__cond))				\
+			__ret = __rtdm_timedwait(__wq, __timeout, __toseq); \
+		__ret;							\
+	})
+
+#define rtdm_wait_condition_locked(__wq, __cond)			\
+	({								\
+		int __ret = 0;						\
+		while (__ret == 0 && !(__cond))				\
+			__ret = __rtdm_dowait(__wq,			\
+					      XN_INFINITE, XN_RELATIVE); \
+		__ret;							\
+	})
+
+#define rtdm_timedwait_condition(__wq, __cond, __timeout, __toseq)	\
+	({								\
+		spl_t __s;						\
+		int __ret;						\
+		xnlock_get_irqsave(&nklock, __s);			\
+		__ret = rtdm_timedwait_condition_locked(__wq, __cond,	\
+					      __timeout, __toseq);	\
+		xnlock_put_irqrestore(&nklock, __s);			\
+		__ret;							\
+	})
+
+#define rtdm_timedwait(__wq, __timeout, __toseq)			\
+	__rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_timedwait_locked(__wq, __timeout, __toseq)			\
+	rtdm_timedwait(__wq, __timeout, __toseq)
+
+#define rtdm_wait_condition(__wq, __cond)				\
+	({								\
+		spl_t __s;						\
+		int __ret;						\
+		xnlock_get_irqsave(&nklock, __s);			\
+		__ret = rtdm_wait_condition_locked(__wq, __cond);	\
+		xnlock_put_irqrestore(&nklock, __s);			\
+		__ret;							\
+	})
+
+#define rtdm_wait(__wq)							\
+	__rtdm_dowait(__wq, XN_INFINITE, XN_RELATIVE)
+
+#define rtdm_wait_locked(__wq)  rtdm_wait(__wq)
+
+#define rtdm_waitqueue_lock(__wq, __context)  cobalt_atomic_enter(__context)
+
+#define rtdm_waitqueue_unlock(__wq, __context)  cobalt_atomic_leave(__context)
+
+#define rtdm_waitqueue_signal(__wq)					\
+	({								\
+		struct xnthread *__waiter;				\
+		__waiter = xnsynch_wakeup_one_sleeper(&(__wq)->wait);	\
+		xnsched_run();						\
+		__waiter != NULL;					\
+	})
+
+#define __rtdm_waitqueue_flush(__wq, __reason)				\
+	({								\
+		int __ret;						\
+		__ret = xnsynch_flush(&(__wq)->wait, __reason);		\
+		xnsched_run();						\
+		__ret == XNSYNCH_RESCHED;				\
+	})
+
+#define rtdm_waitqueue_broadcast(__wq)	\
+	__rtdm_waitqueue_flush(__wq, 0)
+
+#define rtdm_waitqueue_flush(__wq)	\
+	__rtdm_waitqueue_flush(__wq, XNBREAK)
+
+#define rtdm_waitqueue_wakeup(__wq, __waiter)				\
+	do {								\
+		xnsynch_wakeup_this_sleeper(&(__wq)->wait, __waiter);	\
+		xnsched_run();						\
+	} while (0)
+
+#define rtdm_for_each_waiter(__pos, __wq)		\
+	xnsynch_for_each_sleeper(__pos, &(__wq)->wait)
+
+#define rtdm_for_each_waiter_safe(__pos, __tmp, __wq)	\
+	xnsynch_for_each_sleeper_safe(__pos, __tmp, &(__wq)->wait)
+
+#endif /* !DOXYGEN_CPP */
+
+/** @} rtdm_sync */
+
+/* --- Interrupt management services --- */
+/*!
+ * @addtogroup rtdm_irq
+ * @{
+ */
+
+typedef struct xnintr rtdm_irq_t;
+
+/*!
+ * @anchor RTDM_IRQTYPE_xxx   @name RTDM_IRQTYPE_xxx
+ * Interrupt registrations flags
+ * @{
+ */
+/** Enable IRQ-sharing with other real-time drivers */
+#define RTDM_IRQTYPE_SHARED		XN_IRQTYPE_SHARED
+/** Mark IRQ as edge-triggered, relevant for correct handling of shared
+ *  edge-triggered IRQs */
+#define RTDM_IRQTYPE_EDGE		XN_IRQTYPE_EDGE
+/** @} RTDM_IRQTYPE_xxx */
+
+/**
+ * Interrupt handler
+ *
+ * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 or a combination of @ref RTDM_IRQ_xxx flags
+ */
+typedef int (*rtdm_irq_handler_t)(rtdm_irq_t *irq_handle);
+
+/*!
+ * @anchor RTDM_IRQ_xxx   @name RTDM_IRQ_xxx
+ * Return flags of interrupt handlers
+ * @{
+ */
+/** Unhandled interrupt */
+#define RTDM_IRQ_NONE			XN_IRQ_NONE
+/** Denote handled interrupt */
+#define RTDM_IRQ_HANDLED		XN_IRQ_HANDLED
+/** Request interrupt disabling on exit */
+#define RTDM_IRQ_DISABLE		XN_IRQ_DISABLE
+/** @} RTDM_IRQ_xxx */
+
+/**
+ * Retrieve IRQ handler argument
+ *
+ * @param irq_handle IRQ handle
+ * @param type Type of the pointer to return
+ *
+ * @return The argument pointer registered on rtdm_irq_request() is returned,
+ * type-casted to the specified @a type.
+ *
+ * @coretags{unrestricted}
+ */
+#define rtdm_irq_get_arg(irq_handle, type)	((type *)irq_handle->cookie)
+/** @} rtdm_irq */
+
+int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
+		     rtdm_irq_handler_t handler, unsigned long flags,
+		     const char *device_name, void *arg);
+
+int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no,
+			    rtdm_irq_handler_t handler, unsigned long flags,
+			    const char *device_name, void *arg,
+			    const cpumask_t *cpumask);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline int rtdm_irq_free(rtdm_irq_t *irq_handle)
+{
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+	xnintr_destroy(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_enable(rtdm_irq_t *irq_handle)
+{
+	xnintr_enable(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_disable(rtdm_irq_t *irq_handle)
+{
+	xnintr_disable(irq_handle);
+	return 0;
+}
+
+static inline int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle,
+					const cpumask_t *cpumask)
+{
+	return xnintr_affinity(irq_handle, cpumask);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- non-real-time signalling services --- */
+
+/*!
+ * @addtogroup rtdm_nrtsignal
+ * @{
+ */
+
+typedef struct rtdm_nrtsig rtdm_nrtsig_t;
+/**
+ * Non-real-time signal handler
+ *
+ * @param[in] nrt_sig Signal handle pointer as passed to rtdm_nrtsig_init()
+ * @param[in] arg Argument as passed to rtdm_nrtsig_init()
+ *
+ * @note The signal handler will run in soft-IRQ context of the non-real-time
+ * subsystem. Note the implications of this context, e.g. no invocation of
+ * blocking operations.
+ */
+typedef void (*rtdm_nrtsig_handler_t)(rtdm_nrtsig_t *nrt_sig, void *arg);
+
+struct rtdm_nrtsig {
+	struct pipeline_inband_work inband_work; /* Must be first */
+	rtdm_nrtsig_handler_t handler;
+	void *arg;
+};
+
+void rtdm_schedule_nrt_work(struct work_struct *lostage_work);
+/** @} rtdm_nrtsignal */
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work);
+
+static inline void rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig,
+				    rtdm_nrtsig_handler_t handler, void *arg)
+{
+	nrt_sig->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*nrt_sig,
+						 __rtdm_nrtsig_execute);
+	nrt_sig->handler = handler;
+	nrt_sig->arg = arg;
+}
+
+static inline void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig)
+{
+	nrt_sig->handler = NULL;
+	nrt_sig->arg = NULL;
+}
+
+void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig);
+#endif /* !DOXYGEN_CPP */
+
+/* --- timer services --- */
+
+/*!
+ * @addtogroup rtdm_timer
+ * @{
+ */
+
+typedef struct xntimer rtdm_timer_t;
+
+/**
+ * Timer handler
+ *
+ * @param[in] timer Timer handle as returned by rtdm_timer_init()
+ */
+typedef void (*rtdm_timer_handler_t)(rtdm_timer_t *timer);
+
+/*!
+ * @anchor RTDM_TIMERMODE_xxx   @name RTDM_TIMERMODE_xxx
+ * Timer operation modes
+ * @{
+ */
+enum rtdm_timer_mode {
+	/** Monotonic timer with relative timeout */
+	RTDM_TIMERMODE_RELATIVE = XN_RELATIVE,
+
+	/** Monotonic timer with absolute timeout */
+	RTDM_TIMERMODE_ABSOLUTE = XN_ABSOLUTE,
+
+	/** Adjustable timer with absolute timeout */
+	RTDM_TIMERMODE_REALTIME = XN_REALTIME
+};
+/** @} RTDM_TIMERMODE_xxx */
+
+/** @} rtdm_timer */
+
+int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler,
+		    const char *name);
+
+void rtdm_timer_destroy(rtdm_timer_t *timer);
+
+int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+		     nanosecs_rel_t interval, enum rtdm_timer_mode mode);
+
+void rtdm_timer_stop(rtdm_timer_t *timer);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline int rtdm_timer_start_in_handler(rtdm_timer_t *timer,
+					      nanosecs_abs_t expiry,
+					      nanosecs_rel_t interval,
+					      enum rtdm_timer_mode mode)
+{
+	return xntimer_start(timer, expiry, interval, (xntmode_t)mode);
+}
+
+static inline void rtdm_timer_stop_in_handler(rtdm_timer_t *timer)
+{
+	xntimer_stop(timer);
+}
+#endif /* !DOXYGEN_CPP */
+
+/* --- task services --- */
+/*!
+ * @addtogroup rtdm_task
+ * @{
+ */
+
+typedef struct xnthread rtdm_task_t;
+
+/**
+ * Real-time task procedure
+ *
+ * @param[in,out] arg argument as passed to rtdm_task_init()
+ */
+typedef void (*rtdm_task_proc_t)(void *arg);
+
+/**
+ * @anchor rtdmtaskprio @name Task Priority Range
+ * Maximum and minimum task priorities
+ * @{ */
+#define RTDM_TASK_LOWEST_PRIORITY	0
+#define RTDM_TASK_HIGHEST_PRIORITY	99
+/** @} Task Priority Range */
+
+/**
+ * @anchor rtdmchangetaskprio @name Task Priority Modification
+ * Raise or lower task priorities by one level
+ * @{ */
+#define RTDM_TASK_RAISE_PRIORITY	(+1)
+#define RTDM_TASK_LOWER_PRIORITY	(-1)
+/** @} Task Priority Modification */
+
+/** @} rtdm_task */
+
+int rtdm_task_init(rtdm_task_t *task, const char *name,
+		   rtdm_task_proc_t task_proc, void *arg,
+		   int priority, nanosecs_rel_t period);
+int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode);
+void rtdm_task_busy_sleep(nanosecs_rel_t delay);
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline void rtdm_task_destroy(rtdm_task_t *task)
+{
+	xnthread_cancel(task);
+	xnthread_join(task, true);
+}
+
+static inline int rtdm_task_should_stop(void)
+{
+	return xnthread_test_info(xnthread_current(), XNCANCELD);
+}
+
+void rtdm_task_join(rtdm_task_t *task);
+
+static inline void __deprecated rtdm_task_join_nrt(rtdm_task_t *task,
+						   unsigned int poll_delay)
+{
+	rtdm_task_join(task);
+}
+
+static inline void rtdm_task_set_priority(rtdm_task_t *task, int priority)
+{
+	union xnsched_policy_param param = { .rt = { .prio = priority } };
+	spl_t s;
+
+	splhigh(s);
+	xnthread_set_schedparam(task, &xnsched_class_rt, &param);
+	xnsched_run();
+	splexit(s);
+}
+
+static inline int rtdm_task_set_period(rtdm_task_t *task,
+				       nanosecs_abs_t start_date,
+				       nanosecs_rel_t period)
+{
+	if (period < 0)
+		period = 0;
+	if (start_date == 0)
+		start_date = XN_INFINITE;
+
+	return xnthread_set_periodic(task, start_date, XN_ABSOLUTE, period);
+}
+
+static inline int rtdm_task_unblock(rtdm_task_t *task)
+{
+	spl_t s;
+	int res;
+
+	splhigh(s);
+	res = xnthread_unblock(task);
+	xnsched_run();
+	splexit(s);
+
+	return res;
+}
+
+static inline rtdm_task_t *rtdm_task_current(void)
+{
+	return xnthread_current();
+}
+
+static inline int rtdm_task_wait_period(unsigned long *overruns_r)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+	return xnthread_wait_period(overruns_r);
+}
+
+static inline int rtdm_task_sleep(nanosecs_rel_t delay)
+{
+	return __rtdm_task_sleep(delay, XN_RELATIVE);
+}
+
+static inline int
+rtdm_task_sleep_abs(nanosecs_abs_t wakeup_date, enum rtdm_timer_mode mode)
+{
+	/* For the sake of a consistent API usage... */
+	if (mode != RTDM_TIMERMODE_ABSOLUTE && mode != RTDM_TIMERMODE_REALTIME)
+		return -EINVAL;
+	return __rtdm_task_sleep(wakeup_date, (xntmode_t)mode);
+}
+
+/* rtdm_task_sleep_abs shall be used instead */
+static inline int __deprecated rtdm_task_sleep_until(nanosecs_abs_t wakeup_time)
+{
+	return __rtdm_task_sleep(wakeup_time, XN_REALTIME);
+}
+
+#define rtdm_task_busy_wait(__condition, __spin_ns, __sleep_ns)			\
+	({									\
+		__label__ done;							\
+		nanosecs_abs_t __end;						\
+		int __ret = 0;							\
+		for (;;) {							\
+			__end = rtdm_clock_read_monotonic() + __spin_ns;	\
+			for (;;) {						\
+				if (__condition)				\
+					goto done;				\
+				if (rtdm_clock_read_monotonic() >= __end)	\
+					break;					\
+			}							\
+			__ret = rtdm_task_sleep(__sleep_ns);			\
+			if (__ret)						\
+				break;						\
+		}								\
+	done:									\
+		__ret;								\
+	})
+
+#define rtdm_wait_context	xnthread_wait_context
+
+static inline
+void rtdm_wait_complete(struct rtdm_wait_context *wc)
+{
+	xnthread_complete_wait(wc);
+}
+
+static inline
+int rtdm_wait_is_completed(struct rtdm_wait_context *wc)
+{
+	return xnthread_wait_complete_p(wc);
+}
+
+static inline void rtdm_wait_prepare(struct rtdm_wait_context *wc)
+{
+	xnthread_prepare_wait(wc);
+}
+
+static inline
+struct rtdm_wait_context *rtdm_wait_get_context(rtdm_task_t *task)
+{
+	return xnthread_get_wait_context(task);
+}
+
+#endif /* !DOXYGEN_CPP */
+
+/* --- event services --- */
+
+typedef struct rtdm_event {
+	struct xnsynch synch_base;
+	DECLARE_XNSELECT(select_block);
+} rtdm_event_t;
+
+#define RTDM_EVENT_PENDING		XNSYNCH_SPARE1
+
+void rtdm_event_init(rtdm_event_t *event, unsigned long pending);
+int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector,
+		      enum rtdm_selecttype type, unsigned fd_index);
+int rtdm_event_wait(rtdm_event_t *event);
+int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq);
+void rtdm_event_signal(rtdm_event_t *event);
+
+void rtdm_event_clear(rtdm_event_t *event);
+
+void rtdm_event_pulse(rtdm_event_t *event);
+
+void rtdm_event_destroy(rtdm_event_t *event);
+
+/* --- semaphore services --- */
+
+typedef struct rtdm_sem {
+	unsigned long value;
+	struct xnsynch synch_base;
+	DECLARE_XNSELECT(select_block);
+} rtdm_sem_t;
+
+void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value);
+int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector,
+		    enum rtdm_selecttype type, unsigned fd_index);
+int rtdm_sem_down(rtdm_sem_t *sem);
+int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
+		       rtdm_toseq_t *timeout_seq);
+void rtdm_sem_up(rtdm_sem_t *sem);
+
+void rtdm_sem_destroy(rtdm_sem_t *sem);
+
+/* --- mutex services --- */
+
+typedef struct rtdm_mutex {
+	struct xnsynch synch_base;
+	atomic_t fastlock;
+} rtdm_mutex_t;
+
+void rtdm_mutex_init(rtdm_mutex_t *mutex);
+int rtdm_mutex_lock(rtdm_mutex_t *mutex);
+int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq);
+void rtdm_mutex_unlock(rtdm_mutex_t *mutex);
+void rtdm_mutex_destroy(rtdm_mutex_t *mutex);
+
+/* --- utility functions --- */
+
+#define rtdm_printk(format, ...)	printk(format, ##__VA_ARGS__)
+
+#define rtdm_printk_ratelimited(fmt, ...)  do {				\
+	if (xnclock_ratelimit())					\
+		printk(fmt, ##__VA_ARGS__);				\
+} while (0)
+
+#ifndef DOXYGEN_CPP /* Avoid static inline tags for RTDM in doxygen */
+static inline void *rtdm_malloc(size_t size)
+{
+	return xnmalloc(size);
+}
+
+static inline void rtdm_free(void *ptr)
+{
+	xnfree(ptr);
+}
+
+int rtdm_mmap_to_user(struct rtdm_fd *fd,
+		      void *src_addr, size_t len,
+		      int prot, void **pptr,
+		      struct vm_operations_struct *vm_ops,
+		      void *vm_private_data);
+
+int rtdm_iomap_to_user(struct rtdm_fd *fd,
+		       phys_addr_t src_addr, size_t len,
+		       int prot, void **pptr,
+		       struct vm_operations_struct *vm_ops,
+		       void *vm_private_data);
+
+int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va);
+
+int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va);
+
+int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa);
+
+int rtdm_munmap(void *ptr, size_t len);
+
+static inline int rtdm_read_user_ok(struct rtdm_fd *fd,
+				    const void __user *ptr, size_t size)
+{
+	return access_rok(ptr, size);
+}
+
+static inline int rtdm_rw_user_ok(struct rtdm_fd *fd,
+				  const void __user *ptr, size_t size)
+{
+	return access_wok(ptr, size);
+}
+
+static inline int rtdm_copy_from_user(struct rtdm_fd *fd,
+				      void *dst, const void __user *src,
+				      size_t size)
+{
+	return __xn_copy_from_user(dst, src, size) ? -EFAULT : 0;
+}
+
+static inline int rtdm_safe_copy_from_user(struct rtdm_fd *fd,
+					   void *dst, const void __user *src,
+					   size_t size)
+{
+	return cobalt_copy_from_user(dst, src, size);
+}
+
+static inline int rtdm_copy_to_user(struct rtdm_fd *fd,
+				    void __user *dst, const void *src,
+				    size_t size)
+{
+	return __xn_copy_to_user(dst, src, size) ? -EFAULT : 0;
+}
+
+static inline int rtdm_safe_copy_to_user(struct rtdm_fd *fd,
+					 void __user *dst, const void *src,
+					 size_t size)
+{
+	return cobalt_copy_to_user(dst, src, size);
+}
+
+static inline int rtdm_strncpy_from_user(struct rtdm_fd *fd,
+					 char *dst,
+					 const char __user *src, size_t count)
+{
+	return cobalt_strncpy_from_user(dst, src, count);
+}
+
+static inline bool rtdm_available(void)
+{
+	return realtime_core_enabled();
+}
+
+static inline int rtdm_rt_capable(struct rtdm_fd *fd)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p()))
+		return 0;
+
+	if (!rtdm_fd_is_user(fd))
+		return !xnsched_root_p();
+
+	return xnthread_current() != NULL;
+}
+
+static inline int rtdm_in_rt_context(void)
+{
+	return is_primary_domain();
+}
+
+#define RTDM_IOV_FASTMAX  16
+
+int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast);
+
+int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast);
+
+static inline
+void rtdm_drop_iovec(struct iovec *iov, struct iovec *iov_fast)
+{
+	if (iov != iov_fast)
+		xnfree(iov);
+}
+
+ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen);
+
+#endif /* !DOXYGEN_CPP */
+
+#endif /* _COBALT_RTDM_DRIVER_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h
new file mode 100644
index 0000000..176c67e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/fd.h
@@ -0,0 +1,431 @@
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008,2013,2014 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_FD_H
+#define _COBALT_KERNEL_FD_H
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/file.h>
+#include <cobalt/kernel/tree.h>
+#include <asm-generic/xenomai/syscall.h>
+
+struct vm_area_struct;
+struct rtdm_fd;
+struct _rtdm_mmap_request;
+struct _rtdm_setsockaddr_args;
+struct _rtdm_setsockopt_args;
+struct xnselector;
+struct cobalt_ppd;
+struct rtdm_device;
+
+/**
+ * @file
+ * @anchor File operation handlers
+ * @addtogroup rtdm_device_register
+ * @{
+ */
+
+/**
+ * Open handler for named devices
+ *
+ * @param[in] fd File descriptor associated with opened device instance
+ * @param[in] oflags Open flags as passed by the user
+ *
+ * The file descriptor carries a device minor information which can be
+ * retrieved by a call to rtdm_fd_minor(fd). The minor number can be
+ * used for distinguishing devices managed by a driver.
+ *
+ * @return 0 on success. On failure, a negative error code is returned.
+ *
+ * @see @c open() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_open_handler(struct rtdm_fd *fd, int oflags);
+
+/**
+ * Socket creation handler for protocol devices
+ *
+ * @param[in] fd File descriptor associated with opened device instance
+ * @param[in] protocol Protocol number as passed by the user
+ *
+ * @return 0 on success. On failure, a negative error code is returned.
+ *
+ * @see @c socket() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_socket_handler(struct rtdm_fd *fd, int protocol);
+
+/**
+ * Close handler
+ *
+ * @param[in] fd File descriptor associated with opened
+ * device instance.
+ *
+ * @see @c close() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+void rtdm_close_handler(struct rtdm_fd *fd);
+
+/**
+ * IOCTL handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] request Request number as passed by the user
+ * @param[in,out] arg Request argument as passed by the user
+ *
+ * @return A positive value or 0 on success. On failure return either
+ * -ENOSYS, to request that the function be called again from the opposite
+ * realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c ioctl() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+int rtdm_ioctl_handler(struct rtdm_fd *fd, unsigned int request, void __user *arg);
+
+/**
+ * Read handler
+ *
+ * @param[in] fd File descriptor
+ * @param[out] buf Input buffer as passed by the user
+ * @param[in] size Number of bytes the user requests to read
+ *
+ * @return On success, the number of bytes read. On failure return either
+ * -ENOSYS, to request that this handler be called again from the opposite
+ * realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c read() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_read_handler(struct rtdm_fd *fd, void __user *buf, size_t size);
+
+/**
+ * Write handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] buf Output buffer as passed by the user
+ * @param[in] size Number of bytes the user requests to write
+ *
+ * @return On success, the number of bytes written. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c write() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_write_handler(struct rtdm_fd *fd, const void __user *buf, size_t size);
+
+/**
+ * Receive message handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in,out] msg Message descriptor as passed by the user, automatically
+ * mirrored to safe kernel memory in case of user mode call
+ * @param[in] flags Message flags as passed by the user
+ *
+ * @return On success, the number of bytes received. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c recvmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_recvmsg_handler(struct rtdm_fd *fd, struct user_msghdr *msg, int flags);
+
+/**
+ * Transmit message handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] msg Message descriptor as passed by the user, automatically
+ * mirrored to safe kernel memory in case of user mode call
+ * @param[in] flags Message flags as passed by the user
+ *
+ * @return On success, the number of bytes transmitted. On failure return
+ * either -ENOSYS, to request that this handler be called again from the
+ * opposite realtime/non-realtime context, or another negative error code.
+ *
+ * @see @c sendmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ */
+ssize_t rtdm_sendmsg_handler(struct rtdm_fd *fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * Select handler
+ *
+ * @param[in] fd File descriptor
+ * @param selector Pointer to the selector structure
+ * @param type Type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
+ * XNSELECT_EXCEPT)
+ * @param index Index of the file descriptor
+ *
+ * @return 0 on success. On failure, a negative error code is
+ * returned.
+ *
+ * @see @c select() in POSIX.1-2001,
+ * http://pubs.opengroup.org/onlinepubs/007908799/xsh/select.html
+ */
+int rtdm_select_handler(struct rtdm_fd *fd, struct xnselector *selector,
+			unsigned int type, unsigned int index);
+
+/**
+ * Memory mapping handler
+ *
+ * @param[in] fd File descriptor
+ * @param[in] vma Virtual memory area descriptor
+ *
+ * @return 0 on success. On failure, a negative error code is
+ * returned.
+ *
+ * @see @c mmap() in POSIX.1-2001,
+ * http://pubs.opengroup.org/onlinepubs/7908799/xsh/mmap.html
+ *
+ * @note The address hint passed to the mmap() request is deliberately
+ * ignored by RTDM.
+ */
+int rtdm_mmap_handler(struct rtdm_fd *fd, struct vm_area_struct *vma);
+
+/**
+ * Allocate mapping region in address space
+ *
+ * When present, this optional handler should return the start address
+ * of a free region in the process's address space, large enough to
+ * cover the ongoing mmap() operation. If unspecified, the default
+ * architecture-defined handler is invoked.
+ *
+ * Most drivers can omit this handler, except on MMU-less platforms
+ * (see second note).
+ *
+ * @param[in] fd File descriptor
+ * @param[in] len Length of the requested region
+ * @param[in] pgoff Page frame number to map to (see second note).
+ * @param[in] flags Requested mapping flags
+ *
+ * @return The start address of the mapping region on success. On
+ * failure, a negative error code should be returned, with -ENOSYS
+ * meaning that the driver does not want to provide such information,
+ * in which case the ongoing mmap() operation will fail.
+ *
+ * @note The address hint passed to the mmap() request is deliberately
+ * ignored by RTDM, and therefore not passed to this handler.
+ *
+ * @note On MMU-less platforms, this handler is required because RTDM
+ * issues mapping requests over a shareable character device
+ * internally. In such context, the RTDM core may pass a null @a pgoff
+ * argument to the handler, for probing for the logical start address
+ * of the memory region to map to. Otherwise, when @a pgoff is
+ * non-zero, pgoff << PAGE_SHIFT is usually returned.
+ */
+unsigned long
+rtdm_get_unmapped_area_handler(struct rtdm_fd *fd,
+			       unsigned long len, unsigned long pgoff,
+			       unsigned long flags);
+/**
+ * @anchor rtdm_fd_ops
+ * @brief RTDM file operation descriptor.
+ *
+ * This structure describes the operations available with a RTDM
+ * device, defining handlers for submitting I/O requests. Those
+ * handlers are implemented by RTDM device drivers.
+ */
+struct rtdm_fd_ops {
+	/** See rtdm_open_handler(). */
+	int (*open)(struct rtdm_fd *fd, int oflags);
+	/** See rtdm_socket_handler(). */
+	int (*socket)(struct rtdm_fd *fd, int protocol);
+	/** See rtdm_close_handler(). */
+	void (*close)(struct rtdm_fd *fd);
+	/** See rtdm_ioctl_handler(). */
+	int (*ioctl_rt)(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg);
+	/** See rtdm_ioctl_handler(). */
+	int (*ioctl_nrt)(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg);
+	/** See rtdm_read_handler(). */
+	ssize_t (*read_rt)(struct rtdm_fd *fd,
+			   void __user *buf, size_t size);
+	/** See rtdm_read_handler(). */
+	ssize_t (*read_nrt)(struct rtdm_fd *fd,
+			    void __user *buf, size_t size);
+	/** See rtdm_write_handler(). */
+	ssize_t (*write_rt)(struct rtdm_fd *fd,
+			    const void __user *buf, size_t size);
+	/** See rtdm_write_handler(). */
+	ssize_t (*write_nrt)(struct rtdm_fd *fd,
+			     const void __user *buf, size_t size);
+	/** See rtdm_recvmsg_handler(). */
+	ssize_t (*recvmsg_rt)(struct rtdm_fd *fd,
+			      struct user_msghdr *msg, int flags);
+	/** See rtdm_recvmsg_handler(). */
+	ssize_t (*recvmsg_nrt)(struct rtdm_fd *fd,
+			       struct user_msghdr *msg, int flags);
+	/** See rtdm_sendmsg_handler(). */
+	ssize_t (*sendmsg_rt)(struct rtdm_fd *fd,
+			      const struct user_msghdr *msg, int flags);
+	/** See rtdm_sendmsg_handler(). */
+	ssize_t (*sendmsg_nrt)(struct rtdm_fd *fd,
+			       const struct user_msghdr *msg, int flags);
+	/** See rtdm_select_handler(). */
+	int (*select)(struct rtdm_fd *fd,
+		      struct xnselector *selector,
+		      unsigned int type, unsigned int index);
+	/** See rtdm_mmap_handler(). */
+	int (*mmap)(struct rtdm_fd *fd,
+		    struct vm_area_struct *vma);
+	/** See rtdm_get_unmapped_area_handler(). */
+	unsigned long (*get_unmapped_area)(struct rtdm_fd *fd,
+					   unsigned long len,
+					   unsigned long pgoff,
+					   unsigned long flags);
+};
+
+/** @} File operation handlers */
+
+struct rtdm_fd {
+	unsigned int magic;
+	struct rtdm_fd_ops *ops;
+	struct cobalt_ppd *owner;
+	unsigned int refs;
+	int ufd;
+	int minor;
+	int oflags;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	int compat;
+#endif
+	bool stale;
+	struct list_head cleanup;
+	struct list_head next;	/* in dev->openfd_list */
+};
+
+#define RTDM_FD_MAGIC 0x52544446
+
+#define RTDM_FD_COMPAT	__COBALT_COMPAT_BIT
+#define RTDM_FD_COMPATX	__COBALT_COMPATX_BIT
+
+int __rtdm_anon_getfd(const char *name, int flags);
+
+void __rtdm_anon_putfd(int ufd);
+
+static inline struct cobalt_ppd *rtdm_fd_owner(const struct rtdm_fd *fd)
+{
+	return fd->owner;
+}
+
+static inline int rtdm_fd_ufd(const struct rtdm_fd *fd)
+{
+	return fd->ufd;
+}
+
+static inline int rtdm_fd_minor(const struct rtdm_fd *fd)
+{
+	return fd->minor;
+}
+
+static inline int rtdm_fd_flags(const struct rtdm_fd *fd)
+{
+	return fd->oflags;
+}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
+{
+	return fd->compat;
+}
+#else
+static inline int rtdm_fd_is_compat(const struct rtdm_fd *fd)
+{
+	return 0;
+}
+#endif
+
+int rtdm_fd_enter(struct rtdm_fd *rtdm_fd, int ufd,
+		  unsigned int magic, struct rtdm_fd_ops *ops);
+
+int rtdm_fd_register(struct rtdm_fd *fd, int ufd);
+
+struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic);
+
+int rtdm_fd_lock(struct rtdm_fd *fd);
+
+void rtdm_fd_put(struct rtdm_fd *fd);
+
+void rtdm_fd_unlock(struct rtdm_fd *fd);
+
+int rtdm_fd_fcntl(int ufd, int cmd, ...);
+
+int rtdm_fd_ioctl(int ufd, unsigned int request, ...);
+
+ssize_t rtdm_fd_read(int ufd, void __user *buf, size_t size);
+
+ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size);
+
+int rtdm_fd_close(int ufd, unsigned int magic);
+
+ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags);
+
+int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags, void __user *u_timeout,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg),
+		       int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts));
+
+int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen,
+			 unsigned int flags, void __user *u_timeout,
+			 int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+			 int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg));
+
+ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg,
+			int flags);
+
+int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg));
+
+int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma,
+		 void **u_addrp);
+
+int rtdm_fd_valid_p(int ufd);
+
+int rtdm_fd_select(int ufd, struct xnselector *selector,
+		   unsigned int type);
+
+int rtdm_fd_get_setsockaddr_args(struct rtdm_fd *fd,
+				 struct _rtdm_setsockaddr_args *dst,
+				 const void *src);
+
+int rtdm_fd_get_setsockopt_args(struct rtdm_fd *fd,
+				struct _rtdm_setsockopt_args *dst,
+				const void *src);
+
+int rtdm_fd_get_iovec(struct rtdm_fd *fd, struct iovec *iov,
+		      const struct user_msghdr *msg, bool rw);
+
+int rtdm_fd_put_iovec(struct rtdm_fd *fd, const struct iovec *iov,
+		      const struct user_msghdr *msg);
+
+int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd,
+		struct rtdm_device *dev);
+
+void rtdm_device_flush_fds(struct rtdm_device *dev);
+
+void rtdm_fd_cleanup(struct cobalt_ppd *p);
+
+void rtdm_fd_init(void);
+
+#endif /* _COBALT_KERNEL_FD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h
new file mode 100644
index 0000000..b621a71
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpio.h
@@ -0,0 +1,82 @@
+/**
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_GPIO_H
+#define _COBALT_RTDM_GPIO_H
+
+#include <linux/list.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/gpio.h>
+
+struct class;
+struct device_node;
+struct gpio_desc;
+
+struct rtdm_gpio_pin {
+	struct rtdm_device dev;
+	struct list_head next;
+	rtdm_irq_t irqh;
+	rtdm_event_t event;
+	char *name;
+	struct gpio_desc *desc;
+	nanosecs_abs_t timestamp;
+	bool monotonic_timestamp;
+};
+
+struct rtdm_gpio_chip {
+	struct gpio_chip *gc;
+	struct rtdm_driver driver;
+	struct class *devclass;
+	struct list_head next;
+	rtdm_lock_t lock;
+	struct rtdm_gpio_pin pins[0];
+};
+
+int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc,
+		      struct gpio_chip *gc,
+		      int gpio_subclass);
+
+struct rtdm_gpio_chip *
+rtdm_gpiochip_alloc(struct gpio_chip *gc,
+		    int gpio_subclass);
+
+void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc);
+
+int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc,
+			      const char *label, int gpio_subclass);
+
+int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc,
+			     unsigned int offset);
+
+int rtdm_gpiochip_find(struct device_node *from, const char *label, int type);
+
+int rtdm_gpiochip_array_find(struct device_node *from, const char *label[],
+			     int nentries, int type);
+
+#ifdef CONFIG_OF
+
+int rtdm_gpiochip_scan_of(struct device_node *from,
+			  const char *compat, int type);
+
+int rtdm_gpiochip_scan_array_of(struct device_node *from,
+				const char *compat[],
+				int nentries, int type);
+#endif
+
+void rtdm_gpiochip_remove_by_type(int type);
+
+#endif /* !_COBALT_RTDM_GPIO_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h
new file mode 100644
index 0000000..e38d241
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/gpiopwm.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_PWM_H
+#define _COBALT_RTDM_PWM_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/gpiopwm.h>
+
+#endif /* !_COBALT_RTDM_PWM_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h
new file mode 100644
index 0000000..5eefccd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/ipc.h
@@ -0,0 +1,30 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_RTDM_IPC_H
+#define _COBALT_RTDM_IPC_H
+
+#include <linux/net.h>
+#include <linux/socket.h>
+#include <linux/if.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/ipc.h>
+
+#endif /* !_COBALT_RTDM_IPC_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h
new file mode 100644
index 0000000..07198f8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/net.h
@@ -0,0 +1,45 @@
+/*
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef _COBALT_RTDM_NET_H
+#define _COBALT_RTDM_NET_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/net.h>
+#include <rtdm/driver.h>
+
+struct rtnet_callback {
+    void    (*func)(struct rtdm_fd *, void *);
+    void    *arg;
+};
+
+#define RTNET_RTIOC_CALLBACK    _IOW(RTIOC_TYPE_NETWORK, 0x12, \
+				     struct rtnet_callback)
+
+/* utility functions */
+
+/* provided by rt_ipv4 */
+unsigned long rt_inet_aton(const char *ip);
+
+/* provided by rt_packet */
+int rt_eth_aton(unsigned char *addr_buf, const char *mac);
+
+#define RTNET_RTDM_VER 914
+
+#endif  /* _COBALT_RTDM_NET_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h
new file mode 100644
index 0000000..b937df2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/rtdm.h
@@ -0,0 +1,218 @@
+/*
+ * Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_RTDM_H
+#define _COBALT_RTDM_RTDM_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/ioctl.h>
+#include <linux/sched.h>
+#include <linux/socket.h>
+#include <cobalt/kernel/ppd.h>
+#include <rtdm/fd.h>
+
+typedef __u32 socklen_t;
+
+#include <rtdm/uapi/rtdm.h>
+
+int __rtdm_dev_open(const char *path, int oflag);
+
+int __rtdm_dev_socket(int protocol_family,
+		      int socket_type, int protocol);
+
+static inline int rtdm_open(const char *path, int oflag, ...)
+{
+	return __rtdm_dev_open(path, oflag);
+}
+
+static inline int rtdm_socket(int protocol_family,
+			      int socket_type, int protocol)
+{
+	return __rtdm_dev_socket(protocol_family, socket_type, protocol);
+}
+
+static inline int rtdm_close(int fd)
+{
+	return rtdm_fd_close(fd, RTDM_FD_MAGIC);
+}
+
+#define rtdm_fcntl(__fd, __cmd, __args...)	\
+	rtdm_fd_fcntl(__fd, __cmd, ##__args)
+
+#define rtdm_ioctl(__fd, __request, __args...)	\
+	rtdm_fd_ioctl(__fd, __request, ##__args)
+
+static inline ssize_t rtdm_read(int fd, void *buf, size_t count)
+{
+	return rtdm_fd_read(fd, buf, count);
+}
+
+static inline ssize_t rtdm_write(int fd, const void *buf, size_t count)
+{
+	return rtdm_fd_write(fd, buf, count);
+}
+
+static inline ssize_t rtdm_recvmsg(int s, struct user_msghdr *msg, int flags)
+{
+	return rtdm_fd_recvmsg(s, msg, flags);
+}
+
+static inline ssize_t rtdm_sendmsg(int s, const struct user_msghdr *msg, int flags)
+{
+	return rtdm_fd_sendmsg(s, msg, flags);
+}
+
+static inline
+ssize_t rtdm_recvfrom(int s, void *buf, size_t len, int flags,
+		      struct sockaddr *from,
+		      socklen_t *fromlen)
+{
+	struct user_msghdr msg;
+	struct iovec iov;
+	ssize_t ret;
+
+	iov.iov_base = buf;
+	iov.iov_len = len;
+	msg.msg_name = from;
+	msg.msg_namelen = from ? *fromlen : 0;
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+
+	ret = rtdm_recvmsg(s, &msg, flags);
+	if (ret < 0)
+		return ret;
+
+	if (from)
+		*fromlen = msg.msg_namelen;
+
+	return ret;
+}
+
+static inline ssize_t rtdm_recv(int s, void *buf, size_t len, int flags)
+{
+	return rtdm_recvfrom(s, buf, len, flags, NULL, NULL);
+}
+
+static inline ssize_t rtdm_sendto(int s, const void *buf, size_t len,
+				  int flags, const struct sockaddr *to,
+				  socklen_t tolen)
+{
+	struct user_msghdr msg;
+	struct iovec iov;
+
+	iov.iov_base = (void *)buf;
+	iov.iov_len = len;
+	msg.msg_name = (struct sockaddr *)to;
+	msg.msg_namelen = tolen;
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+
+	return rtdm_sendmsg(s, &msg, flags);
+}
+
+static inline ssize_t rtdm_send(int s, const void *buf, size_t len, int flags)
+{
+	return rtdm_sendto(s, buf, len, flags, NULL, 0);
+}
+
+static inline int rtdm_getsockopt(int s, int level, int optname,
+				  void *optval, socklen_t *optlen)
+{
+	struct _rtdm_getsockopt_args args = {
+		level, optname, optval, optlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETSOCKOPT, &args);
+}
+
+static inline int rtdm_setsockopt(int s, int level, int optname,
+				  const void *optval, socklen_t optlen)
+{
+	struct _rtdm_setsockopt_args args = {
+		level, optname, (void *)optval, optlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_SETSOCKOPT, &args);
+}
+
+static inline int rtdm_bind(int s, const struct sockaddr *my_addr,
+			    socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = {
+		my_addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_BIND, &args);
+}
+
+static inline int rtdm_connect(int s, const struct sockaddr *serv_addr,
+			       socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = {
+		serv_addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_CONNECT, &args);
+}
+
+static inline int rtdm_listen(int s, int backlog)
+{
+	return rtdm_ioctl(s, _RTIOC_LISTEN, backlog);
+}
+
+static inline int rtdm_accept(int s, struct sockaddr *addr,
+			      socklen_t *addrlen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		addr, addrlen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_ACCEPT, &args);
+}
+
+static inline int rtdm_getsockname(int s, struct sockaddr *name,
+				   socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		name, namelen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETSOCKNAME, &args);
+}
+
+static inline int rtdm_getpeername(int s, struct sockaddr *name,
+				   socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = {
+		name, namelen
+	};
+
+	return rtdm_ioctl(s, _RTIOC_GETPEERNAME, &args);
+}
+
+static inline int rtdm_shutdown(int s, int how)
+{
+	return rtdm_ioctl(s, _RTIOC_SHUTDOWN, how);
+}
+
+#endif /* _COBALT_RTDM_RTDM_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h
new file mode 100644
index 0000000..0b557b4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/serial.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_SERIAL_H
+#define _COBALT_RTDM_SERIAL_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/serial.h>
+
+#endif /* !_COBALT_RTDM_SERIAL_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h
new file mode 100644
index 0000000..d2669e1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/testing.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_TESTING_H
+#define _COBALT_RTDM_TESTING_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/testing.h>
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+#include <rtdm/compat.h>
+
+struct compat_rttst_overall_bench_res {
+	struct rttst_bench_res result;
+	compat_uptr_t histogram_avg;
+	compat_uptr_t histogram_min;
+	compat_uptr_t histogram_max;
+};
+
+struct compat_rttst_heap_stathdr {
+	int nrstats;
+	compat_uptr_t buf;
+};
+
+#define RTTST_RTIOC_TMBENCH_STOP_COMPAT \
+	_IOWR(RTIOC_TYPE_TESTING, 0x11, struct compat_rttst_overall_bench_res)
+
+#endif	/* CONFIG_XENO_ARCH_SYS3264 */
+
+#endif /* !_COBALT_RTDM_TESTING_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h
new file mode 100644
index 0000000..bc2a68d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/rtdm/udd.h
@@ -0,0 +1,340 @@
+/**
+ * @file
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_RTDM_UDD_H
+#define _COBALT_RTDM_UDD_H
+
+#include <linux/list.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/udd.h>
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_udd User-space driver core
+ *
+ * This profile includes all mini-drivers sitting on top of the
+ * User-space Device Driver framework (UDD). The generic UDD core
+ * driver enables interrupt control and I/O memory access interfaces
+ * to user-space device drivers, as defined by the mini-drivers when
+ * registering.
+ *
+ * A mini-driver supplements the UDD core with ancillary functions for
+ * dealing with @ref udd_memory_region "memory mappings" and @ref
+ * udd_irq_handler "interrupt control" for a particular I/O
+ * card/device.
+ *
+ * UDD-compliant mini-drivers only have to provide the basic support
+ * for dealing with the interrupt sources present in the device, so
+ * that most part of the device requests can be handled from a Xenomai
+ * application running in user-space. Typically, a mini-driver would
+ * handle the interrupt top-half, and the user-space application would
+ * handle the bottom-half.
+ *
+ * This profile is reminiscent of the UIO framework available with the
+ * Linux kernel, adapted to the dual kernel Cobalt environment.
+ *
+ * @{
+ */
+
+/**
+ * @anchor udd_irq_special
+ * Special IRQ values for udd_device.irq
+ *
+ * @{
+ */
+/**
+ * No IRQ managed. Passing this code implicitly disables all
+ * interrupt-related services, including control (disable/enable) and
+ * notification.
+ */
+#define UDD_IRQ_NONE     0
+/**
+ * IRQ directly managed from the mini-driver on top of the UDD
+ * core. The mini-driver is in charge of attaching the handler(s) to
+ * the IRQ(s) it manages, notifying the Cobalt threads waiting for IRQ
+ * events by calling the udd_notify_event() service.
+ */
+#define UDD_IRQ_CUSTOM   (-1)
+/** @} */
+
+/**
+ * @anchor udd_memory_types  @name Memory types for mapping
+ * Types of memory for mapping
+ *
+ * The UDD core implements a default ->mmap() handler which first
+ * attempts to hand over the request to the corresponding handler
+ * defined by the mini-driver. If not present, the UDD core
+ * establishes the mapping automatically, depending on the memory
+ * type defined for the region.
+ *
+ * @{
+ */
+/**
+ * No memory region. Use this type code to disable an entry in the
+ * array of memory mappings, i.e. udd_device.mem_regions[].
+ */
+#define UDD_MEM_NONE     0
+/**
+ * Physical I/O memory region. By default, the UDD core maps such
+ * memory to a virtual user range by calling the rtdm_mmap_iomem()
+ * service.
+ */
+#define UDD_MEM_PHYS     1
+/**
+ * Kernel logical memory region (e.g. kmalloc()). By default, the UDD
+ * core maps such memory to a virtual user range by calling the
+ * rtdm_mmap_kmem() service. */
+#define UDD_MEM_LOGICAL  2
+/**
+ * Virtual memory region with no direct physical mapping
+ * (e.g. vmalloc()). By default, the UDD core maps such memory to a
+ * virtual user range by calling the rtdm_mmap_vmem() service.
+ */
+#define UDD_MEM_VIRTUAL  3
+/** @} */
+
+#define UDD_NR_MAPS  5
+
+/**
+ * @anchor udd_memory_region
+ * UDD memory region descriptor.
+ *
+ * This descriptor defines the characteristics of a memory region
+ * declared to the UDD core by the mini-driver. All valid regions
+ * should be declared in the udd_device.mem_regions[] array,
+ * invalid/unassigned ones should bear the UDD_MEM_NONE type.
+ *
+ * The UDD core exposes each region via the mmap(2) interface to the
+ * application. To this end, a companion mapper device is created
+ * automatically when registering the mini-driver.
+ *
+ * The mapper device creates special files in the RTDM namespace for
+ * reaching the individual regions, which the application can open
+ * then map to its address space via the mmap(2) system call.
+ *
+ * For instance, declaring a region of physical memory at index #2 of
+ * the memory region array could be done as follows:
+ *
+ * @code
+ * static struct udd_device udd;
+ *
+ * static int foocard_pci_probe(struct pci_dev *dev, const struct pci_device_id *id)
+ * {
+ *      udd.device_name = "foocard";
+ *      ...
+ *      udd.mem_regions[2].name = "ADC";
+ *      udd.mem_regions[2].addr = pci_resource_start(dev, 1);
+ *      udd.mem_regions[2].len = pci_resource_len(dev, 1);
+ *      udd.mem_regions[2].type = UDD_MEM_PHYS;
+ *      ...
+ *      return udd_register_device(&udd);
+ * }
+ * @endcode
+ *
+ * This will make such region accessible via the mapper device using
+ * the following sequence of code (see note), via the default
+ * ->mmap() handler from the UDD core:
+ *
+ * @code
+ * int fd, fdm;
+ * void *p;
+ *
+ * fd = open("/dev/rtdm/foocard", O_RDWR);
+ * fdm = open("/dev/rtdm/foocard,mapper2", O_RDWR);
+ * p = mmap(NULL, 4096, PROT_READ|PROT_WRITE, MAP_SHARED, fdm, 0);
+ * @endcode
+ *
+ * if no valid region has been declared in the
+ * udd_device.mem_regions[] array, no mapper device is created.
+ *
+ * @note The example code assumes that @ref cobalt_api POSIX symbol
+ * wrapping is in effect, so that RTDM performs the memory mapping
+ * operation (not the regular kernel).
+ */
+struct udd_memregion {
+	/** Name of the region (informational but required) */
+	const char *name;
+	/**
+	 * Start address of the region. This may be a physical or
+	 * virtual address, depending on the @ref udd_memory_types
+	 * "memory type".
+	 */
+	unsigned long addr;
+	/**
+	 * Length (in bytes) of the region. This value must be
+	 * PAGE_SIZE aligned.
+	 */
+	size_t len;
+	/**
+	 * Type of the region. See the discussion about @ref
+	 * udd_memory_types "UDD memory types" for possible values.
+	 */
+	int type;
+};
+
+/**
+ * @anchor udd_device
+ * UDD device descriptor.
+ *
+ * This descriptor defines the characteristics of a UDD-based
+ * mini-driver when registering via a call to udd_register_device().
+ */
+struct udd_device {
+	/**
+	 * Name of the device managed by the mini-driver, appears
+	 * automatically in the /dev/rtdm namespace upon creation.
+	 */
+	const char *device_name;
+	/**
+	 * Additional device flags (e.g. RTDM_EXCLUSIVE)
+	 * RTDM_NAMED_DEVICE may be omitted).
+	 */
+	int device_flags;
+	/**
+	 * Subclass code of the device managed by the mini-driver (see
+	 * RTDM_SUBCLASS_xxx definition in the @ref rtdm_profiles
+	 * "Device Profiles"). The main class code is pre-set to
+	 * RTDM_CLASS_UDD.
+	 */
+	int device_subclass;
+	struct {
+		/**
+		 * Ancillary open() handler, optional. See
+		 * rtdm_open_handler().
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		int (*open)(struct rtdm_fd *fd, int oflags);
+		/**
+		 * Ancillary close() handler, optional. See
+		 * rtdm_close_handler().
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		void (*close)(struct rtdm_fd *fd);
+		/**
+		 * Ancillary ioctl() handler, optional. See
+		 * rtdm_ioctl_handler().
+		 *
+		 * If this routine returns -ENOSYS, the default action
+		 * implemented by the UDD core for the corresponding
+		 * request will be applied, as if no ioctl handler had
+		 * been defined.
+		 *
+		 * @note This handler is called from primary mode
+		 * only.
+		 */
+		int (*ioctl)(struct rtdm_fd *fd,
+			     unsigned int request, void *arg);
+		/**
+		 * Ancillary mmap() handler for the mapper device,
+		 * optional. See rtdm_mmap_handler(). The mapper
+		 * device operates on a valid region defined in the @a
+		 * mem_regions[] array. A pointer to the region 
+		 * can be obtained by a call to udd_get_region().
+		 *
+		 * If this handler is NULL, the UDD core establishes
+		 * the mapping automatically, depending on the memory
+		 * type defined for the region.
+		 *
+		 * @note This handler is called from secondary mode
+		 * only.
+		 */
+		int (*mmap)(struct rtdm_fd *fd,
+			    struct vm_area_struct *vma);
+		/**
+		 * @anchor udd_irq_handler
+		 *
+		 * Ancillary handler for receiving interrupts. This
+		 * handler must be provided if the mini-driver hands
+		 * over IRQ handling to the UDD core, by setting the
+		 * @a irq field to a valid value, different from
+		 * UDD_IRQ_CUSTOM and UDD_IRQ_NONE.
+		 *
+		 * The ->interrupt() handler shall return one of the
+		 * following status codes:
+		 *
+		 * - RTDM_IRQ_HANDLED, if the mini-driver successfully
+		 * handled the IRQ. This flag can be combined with
+		 * RTDM_IRQ_DISABLE to prevent the Cobalt kernel from
+		 * re-enabling the interrupt line upon return,
+		 * otherwise it is re-enabled automatically.
+		 *
+		 * - RTDM_IRQ_NONE, if the interrupt does not match
+		 * any IRQ the mini-driver can handle.
+		 *
+		 * Once the ->interrupt() handler has returned, the
+		 * UDD core notifies user-space Cobalt threads waiting
+		 * for IRQ events (if any).
+		 *
+		 * @note This handler is called from primary mode
+		 * only.
+		 */
+		int (*interrupt)(struct udd_device *udd);
+	} ops;
+	/**
+	 * IRQ number. If valid, the UDD core manages the
+	 * corresponding interrupt line, installing a base handler.
+	 * Otherwise, a special value can be passed for declaring
+	 * @ref udd_irq_special "unmanaged IRQs".
+	 */
+	int irq;
+	/**
+	 * Array of memory regions defined by the device. The array
+	 * can be sparse, with some entries bearing the UDD_MEM_NONE
+	 * type interleaved with valid ones.  See the discussion about
+	 * @ref udd_memory_region "UDD memory regions".
+	 */
+	struct udd_memregion mem_regions[UDD_NR_MAPS];
+	/** Reserved to the UDD core. */
+	struct udd_reserved {
+		rtdm_irq_t irqh;
+		u32 event_count;
+		struct udd_signotify signfy;
+		struct rtdm_event pulse;
+		struct rtdm_driver driver;
+		struct rtdm_device device;
+		struct rtdm_driver mapper_driver;
+		struct udd_mapper {
+			struct udd_device *udd;
+			struct rtdm_device dev;
+		} mapdev[UDD_NR_MAPS];
+		char *mapper_name;
+		int nr_maps;
+	} __reserved;
+};
+
+int udd_register_device(struct udd_device *udd);
+
+int udd_unregister_device(struct udd_device *udd);
+
+struct udd_device *udd_get_device(struct rtdm_fd *fd);
+
+void udd_notify_event(struct udd_device *udd);
+
+void udd_enable_irq(struct udd_device *udd,
+		    rtdm_event_t *done);
+
+void udd_disable_irq(struct udd_device *udd,
+		     rtdm_event_t *done);
+
+/** @} */
+
+#endif /* !_COBALT_RTDM_UDD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h
new file mode 100644
index 0000000..75efdec
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-idle.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_IDLE_H
+#define _COBALT_KERNEL_SCHED_IDLE_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-idle.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/* Idle priority level - actually never used for indexing. */
+#define XNSCHED_IDLE_PRIO	-1
+
+extern struct xnsched_class xnsched_class_idle;
+
+static inline bool __xnsched_idle_setparam(struct xnthread *thread,
+					   const union xnsched_policy_param *p)
+{
+	xnthread_clear_state(thread, XNWEAK);
+	return xnsched_set_effective_priority(thread, p->idle.prio);
+}
+
+static inline void __xnsched_idle_getparam(struct xnthread *thread,
+					   union xnsched_policy_param *p)
+{
+	p->idle.prio = thread->cprio;
+}
+
+static inline void __xnsched_idle_trackprio(struct xnthread *thread,
+					    const union xnsched_policy_param *p)
+{
+	if (p)
+		/* Inheriting a priority-less class makes no sense. */
+		XENO_WARN_ON_ONCE(COBALT, 1);
+	else
+		thread->cprio = XNSCHED_IDLE_PRIO;
+}
+
+static inline void __xnsched_idle_protectprio(struct xnthread *thread, int prio)
+{
+	XENO_WARN_ON_ONCE(COBALT, 1);
+}
+
+static inline int xnsched_idle_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_IDLE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h
new file mode 100644
index 0000000..57a46a9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-quota.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_QUOTA_H
+#define _COBALT_KERNEL_SCHED_QUOTA_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-quota.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+#define XNSCHED_QUOTA_MIN_PRIO	1
+#define XNSCHED_QUOTA_MAX_PRIO	255
+#define XNSCHED_QUOTA_NR_PRIO	\
+	(XNSCHED_QUOTA_MAX_PRIO - XNSCHED_QUOTA_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_quota;
+
+struct xnsched_quota_group {
+	struct xnsched *sched;
+	xnticks_t quota_ns;
+	xnticks_t quota_peak_ns;
+	xnticks_t run_start_ns;
+	xnticks_t run_budget_ns;
+	xnticks_t run_credit_ns;
+	struct list_head members;
+	struct list_head expired;
+	struct list_head next;
+	int nr_active;
+	int nr_threads;
+	int tgid;
+	int quota_percent;
+	int quota_peak_percent;
+};
+
+struct xnsched_quota {
+	xnticks_t period_ns;
+	struct xntimer refill_timer;
+	struct xntimer limit_timer;
+	struct list_head groups;
+};
+
+static inline int xnsched_quota_init_thread(struct xnthread *thread)
+{
+	thread->quota = NULL;
+	INIT_LIST_HEAD(&thread->quota_expired);
+
+	return 0;
+}
+
+int xnsched_quota_create_group(struct xnsched_quota_group *tg,
+			       struct xnsched *sched,
+			       int *quota_sum_r);
+
+int xnsched_quota_destroy_group(struct xnsched_quota_group *tg,
+				int force,
+				int *quota_sum_r);
+
+void xnsched_quota_set_limit(struct xnsched_quota_group *tg,
+			     int quota_percent, int quota_peak_percent,
+			     int *quota_sum_r);
+
+struct xnsched_quota_group *
+xnsched_quota_find_group(struct xnsched *sched, int tgid);
+
+int xnsched_quota_sum_all(struct xnsched *sched);
+
+#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_QUOTA_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h
new file mode 100644
index 0000000..992a5ba
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-rt.h
@@ -0,0 +1,150 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_RT_H
+#define _COBALT_KERNEL_SCHED_RT_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-rt.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/*
+ * Global priority scale for Xenomai's core scheduling class,
+ * available to SCHED_COBALT members.
+ */
+#define XNSCHED_CORE_MIN_PRIO	0
+#define XNSCHED_CORE_MAX_PRIO	259
+#define XNSCHED_CORE_NR_PRIO	\
+	(XNSCHED_CORE_MAX_PRIO - XNSCHED_CORE_MIN_PRIO + 1)
+
+/*
+ * Priority range for SCHED_FIFO, and all other classes Cobalt
+ * implements except SCHED_COBALT.
+ */
+#define XNSCHED_FIFO_MIN_PRIO	1
+#define XNSCHED_FIFO_MAX_PRIO	256
+
+#if XNSCHED_CORE_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||	\
+  (defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&			\
+   XNSCHED_CORE_NR_PRIO > XNSCHED_MLQ_LEVELS)
+#error "XNSCHED_MLQ_LEVELS is too low"
+#endif
+
+extern struct xnsched_class xnsched_class_rt;
+
+static inline void __xnsched_rt_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->sched->rt.runnable, thread);
+}
+
+static inline void __xnsched_rt_track_weakness(struct xnthread *thread)
+{
+	/*
+	 * We have to track threads exiting weak scheduling, i.e. any
+	 * thread leaving the WEAK class code if compiled in, or
+	 * assigned a zero priority if weak threads are hosted by the
+	 * RT class.
+	 *
+	 * CAUTION: since we need to check the effective priority
+	 * level for determining the weakness state, this can only
+	 * apply to non-boosted threads.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK) || thread->cprio)
+		xnthread_clear_state(thread, XNWEAK);
+	else
+		xnthread_set_state(thread, XNWEAK);
+}
+
+static inline bool __xnsched_rt_setparam(struct xnthread *thread,
+					 const union xnsched_policy_param *p)
+{
+	bool ret = xnsched_set_effective_priority(thread, p->rt.prio);
+	
+	if (!xnthread_test_state(thread, XNBOOST))
+		__xnsched_rt_track_weakness(thread);
+
+	return ret;
+}
+
+static inline void __xnsched_rt_getparam(struct xnthread *thread,
+					 union xnsched_policy_param *p)
+{
+	p->rt.prio = thread->cprio;
+}
+
+static inline void __xnsched_rt_trackprio(struct xnthread *thread,
+					  const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->rt.prio; /* Force update. */
+	else {
+		thread->cprio = thread->bprio;
+		/* Leaving PI/PP, so non-boosted by definition. */
+		__xnsched_rt_track_weakness(thread);
+	}
+}
+
+static inline void __xnsched_rt_protectprio(struct xnthread *thread, int prio)
+{
+	/*
+	 * The RT class supports the widest priority range from
+	 * XNSCHED_CORE_MIN_PRIO to XNSCHED_CORE_MAX_PRIO inclusive,
+	 * no need to cap the input value which is guaranteed to be in
+	 * the range [1..XNSCHED_CORE_MAX_PRIO].
+	 */
+	thread->cprio = prio;
+}
+
+static inline void __xnsched_rt_forget(struct xnthread *thread)
+{
+}
+
+static inline int xnsched_rt_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+struct xnthread *xnsched_rt_pick(struct xnsched *sched);
+#else
+static inline struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	return xnsched_getq(&sched->rt.runnable);
+}
+#endif
+
+void xnsched_rt_tick(struct xnsched *sched);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_RT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h
new file mode 100644
index 0000000..50ca406
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-sporadic.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_SPORADIC_H
+#define _COBALT_KERNEL_SCHED_SPORADIC_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-sporadic.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+
+#define XNSCHED_SPORADIC_MIN_PRIO	1
+#define XNSCHED_SPORADIC_MAX_PRIO	255
+#define XNSCHED_SPORADIC_NR_PRIO	\
+	(XNSCHED_SPORADIC_MAX_PRIO - XNSCHED_SPORADIC_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_sporadic;
+
+struct xnsched_sporadic_repl {
+	xnticks_t date;
+	xnticks_t amount;
+};
+
+struct xnsched_sporadic_data {
+	xnticks_t resume_date;
+	xnticks_t budget;
+	int repl_in;
+	int repl_out;
+	int repl_pending;
+	struct xntimer repl_timer;
+	struct xntimer drop_timer;
+	struct xnsched_sporadic_repl repl_data[CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL];
+	struct xnsched_sporadic_param param;
+	struct xnthread *thread;
+};
+
+struct xnsched_sporadic {
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	unsigned long drop_retries;
+#endif
+};
+
+static inline int xnsched_sporadic_init_thread(struct xnthread *thread)
+{
+	thread->pss = NULL;
+
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_SPORADIC */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_SPORADIC_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h
new file mode 100644
index 0000000..6ae5ff8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-tp.h
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_TP_H
+#define _COBALT_KERNEL_SCHED_TP_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-tp.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+
+#define XNSCHED_TP_MIN_PRIO	1
+#define XNSCHED_TP_MAX_PRIO	255
+#define XNSCHED_TP_NR_PRIO	\
+	(XNSCHED_TP_MAX_PRIO - XNSCHED_TP_MIN_PRIO + 1)
+
+extern struct xnsched_class xnsched_class_tp;
+
+struct xnsched_tp_window {
+	xnticks_t w_offset;
+	int w_part;
+};
+
+struct xnsched_tp_schedule {
+	int pwin_nr;
+	xnticks_t tf_duration;
+	atomic_t refcount;
+	struct xnsched_tp_window pwins[0];
+};
+
+struct xnsched_tp {
+	struct xnsched_tpslot {
+		/** Per-partition runqueue. */
+		xnsched_queue_t runnable;
+	} partitions[CONFIG_XENO_OPT_SCHED_TP_NRPART];
+	/** Idle slot for passive windows. */
+	struct xnsched_tpslot idle;
+	/** Active partition slot */
+	struct xnsched_tpslot *tps;
+	/** Time frame timer */
+	struct xntimer tf_timer;
+	/** Global partition schedule */
+	struct xnsched_tp_schedule *gps;
+	/** Window index of next partition */
+	int wnext;
+	/** Start of next time frame */
+	xnticks_t tf_start;
+	/** Assigned thread queue */
+	struct list_head threads;
+};
+
+static inline int xnsched_tp_init_thread(struct xnthread *thread)
+{
+	thread->tps = NULL;
+
+	return 0;
+}
+
+struct xnsched_tp_schedule *
+xnsched_tp_set_schedule(struct xnsched *sched,
+			struct xnsched_tp_schedule *gps);
+
+void xnsched_tp_start_schedule(struct xnsched *sched);
+
+void xnsched_tp_stop_schedule(struct xnsched *sched);
+
+int xnsched_tp_get_partition(struct xnsched *sched);
+
+struct xnsched_tp_schedule *
+xnsched_tp_get_schedule(struct xnsched *sched);
+
+void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps);
+
+#endif /* CONFIG_XENO_OPT_SCHED_TP */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_TP_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h
new file mode 100644
index 0000000..400aa73
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched-weak.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_WEAK_H
+#define _COBALT_KERNEL_SCHED_WEAK_H
+
+#ifndef _COBALT_KERNEL_SCHED_H
+#error "please don't include cobalt/kernel/sched-weak.h directly"
+#endif
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+
+#define XNSCHED_WEAK_MIN_PRIO	0
+#define XNSCHED_WEAK_MAX_PRIO	99
+#define XNSCHED_WEAK_NR_PRIO	\
+	(XNSCHED_WEAK_MAX_PRIO - XNSCHED_WEAK_MIN_PRIO + 1)
+
+#if XNSCHED_WEAK_NR_PRIO > XNSCHED_CLASS_WEIGHT_FACTOR ||	\
+	(defined(CONFIG_XENO_OPT_SCALABLE_SCHED) &&		\
+	 XNSCHED_WEAK_NR_PRIO > XNSCHED_MLQ_LEVELS)
+#error "WEAK class has too many priority levels"
+#endif
+
+extern struct xnsched_class xnsched_class_weak;
+
+struct xnsched_weak {
+	xnsched_queue_t runnable;	/*!< Runnable thread queue. */
+};
+
+static inline int xnsched_weak_init_thread(struct xnthread *thread)
+{
+	return 0;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_WEAK */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_WEAK_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h
new file mode 100644
index 0000000..aa24d54
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/sched.h
@@ -0,0 +1,674 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHED_H
+#define _COBALT_KERNEL_SCHED_H
+
+#include <linux/percpu.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/schedqueue.h>
+#include <cobalt/kernel/sched-tp.h>
+#include <cobalt/kernel/sched-weak.h>
+#include <cobalt/kernel/sched-sporadic.h>
+#include <cobalt/kernel/sched-quota.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/machine.h>
+#include <pipeline/sched.h>
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+/* Sched status flags */
+#define XNRESCHED	0x10000000	/* Needs rescheduling */
+#define XNINSW		0x20000000	/* In context switch */
+#define XNINTCK		0x40000000	/* In master tick handler context */
+
+/* Sched local flags */
+#define XNIDLE		0x00010000	/* Idle (no outstanding timer) */
+#define XNHTICK		0x00008000	/* Host tick pending  */
+#define XNINIRQ		0x00004000	/* In IRQ handling context */
+#define XNHDEFER	0x00002000	/* Host tick deferred */
+
+/*
+ * Hardware timer is stopped.
+ */
+#define XNTSTOP		0x00000800
+
+struct xnsched_rt {
+	xnsched_queue_t runnable;	/*!< Runnable thread queue. */
+};
+
+/*!
+ * \brief Scheduling information structure.
+ */
+
+struct xnsched {
+	/*!< Scheduler specific status bitmask. */
+	unsigned long status;
+	/*!< Scheduler specific local flags bitmask. */
+	unsigned long lflags;
+	/*!< Current thread. */
+	struct xnthread *curr;
+#ifdef CONFIG_SMP
+	/*!< Owner CPU id. */
+	int cpu;
+	/*!< Mask of CPUs needing rescheduling. */
+	cpumask_t resched;
+#endif
+	/*!< Context of built-in real-time class. */
+	struct xnsched_rt rt;
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	/*!< Context of weak scheduling class. */
+	struct xnsched_weak weak;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	/*!< Context of TP class. */
+	struct xnsched_tp tp;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	/*!< Context of sporadic scheduling class. */
+	struct xnsched_sporadic pss;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	/*!< Context of runtime quota scheduling. */
+	struct xnsched_quota quota;
+#endif
+	/*!< Interrupt nesting level. */
+	volatile unsigned inesting;
+	/*!< Host timer. */
+	struct xntimer htimer;
+	/*!< Round-robin timer. */
+	struct xntimer rrbtimer;
+	/*!< Root thread control block. */
+	struct xnthread rootcb;
+#ifdef CONFIG_XENO_ARCH_FPU
+	/*!< Thread owning the current FPU context. */
+	struct xnthread *fpuholder;
+#endif
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	/*!< Watchdog timer object. */
+	struct xntimer wdtimer;
+#endif
+#ifdef CONFIG_XENO_OPT_STATS
+	/*!< Last account switch date (ticks). */
+	xnticks_t last_account_switch;
+	/*!< Currently active account */
+	xnstat_exectime_t *current_account;
+#endif
+};
+
+DECLARE_PER_CPU(struct xnsched, nksched);
+
+extern cpumask_t cobalt_cpu_affinity;
+
+extern struct list_head nkthreadq;
+
+extern int cobalt_nrthreads;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+extern struct xnvfile_rev_tag nkthreadlist_tag;
+#endif
+
+union xnsched_policy_param;
+
+struct xnsched_class {
+	void (*sched_init)(struct xnsched *sched);
+	void (*sched_enqueue)(struct xnthread *thread);
+	void (*sched_dequeue)(struct xnthread *thread);
+	void (*sched_requeue)(struct xnthread *thread);
+	struct xnthread *(*sched_pick)(struct xnsched *sched);
+	void (*sched_tick)(struct xnsched *sched);
+	void (*sched_rotate)(struct xnsched *sched,
+			     const union xnsched_policy_param *p);
+	void (*sched_migrate)(struct xnthread *thread,
+			      struct xnsched *sched);
+	int (*sched_chkparam)(struct xnthread *thread,
+			      const union xnsched_policy_param *p);
+	/**
+	 * Set base scheduling parameters. This routine is indirectly
+	 * called upon a change of base scheduling settings through
+	 * __xnthread_set_schedparam() -> xnsched_set_policy(),
+	 * exclusively.
+	 *
+	 * The scheduling class implementation should do the necessary
+	 * housekeeping to comply with the new settings.
+	 * thread->base_class is up to date before the call is made,
+	 * and should be considered for the new weighted priority
+	 * calculation. On the contrary, thread->sched_class should
+	 * NOT be referred to by this handler.
+	 *
+	 * sched_setparam() is NEVER involved in PI or PP
+	 * management. However it must deny a priority update if it
+	 * contradicts an ongoing boost for @a thread. This is
+	 * typically what the xnsched_set_effective_priority() helper
+	 * does for such handler.
+	 *
+	 * @param thread Affected thread.
+	 * @param p New base policy settings.
+	 *
+	 * @return True if the effective priority was updated
+	 * (thread->cprio).
+	 */
+	bool (*sched_setparam)(struct xnthread *thread,
+			       const union xnsched_policy_param *p);
+	void (*sched_getparam)(struct xnthread *thread,
+			       union xnsched_policy_param *p);
+	void (*sched_trackprio)(struct xnthread *thread,
+				const union xnsched_policy_param *p);
+	void (*sched_protectprio)(struct xnthread *thread, int prio);
+	int (*sched_declare)(struct xnthread *thread,
+			     const union xnsched_policy_param *p);
+	void (*sched_forget)(struct xnthread *thread);
+	void (*sched_kick)(struct xnthread *thread);
+#ifdef CONFIG_XENO_OPT_VFILE
+	int (*sched_init_vfile)(struct xnsched_class *schedclass,
+				struct xnvfile_directory *vfroot);
+	void (*sched_cleanup_vfile)(struct xnsched_class *schedclass);
+#endif
+	int nthreads;
+	struct xnsched_class *next;
+	int weight;
+	int policy;
+	const char *name;
+};
+
+#define XNSCHED_CLASS_WEIGHT(n)		(n * XNSCHED_CLASS_WEIGHT_FACTOR)
+
+/* Placeholder for current thread priority */
+#define XNSCHED_RUNPRIO   0x80000000
+
+#define xnsched_for_each_thread(__thread)	\
+	list_for_each_entry(__thread, &nkthreadq, glink)
+
+#ifdef CONFIG_SMP
+static inline int xnsched_cpu(struct xnsched *sched)
+{
+	return sched->cpu;
+}
+#else /* !CONFIG_SMP */
+static inline int xnsched_cpu(struct xnsched *sched)
+{
+	return 0;
+}
+#endif /* CONFIG_SMP */
+
+static inline struct xnsched *xnsched_struct(int cpu)
+{
+	return &per_cpu(nksched, cpu);
+}
+
+static inline struct xnsched *xnsched_current(void)
+{
+	/* IRQs off */
+	return raw_cpu_ptr(&nksched);
+}
+
+static inline struct xnthread *xnsched_current_thread(void)
+{
+	return xnsched_current()->curr;
+}
+
+/* Test resched flag of given sched. */
+static inline int xnsched_resched_p(struct xnsched *sched)
+{
+	return sched->status & XNRESCHED;
+}
+
+/* Set self resched flag for the current scheduler. */
+static inline void xnsched_set_self_resched(struct xnsched *sched)
+{
+	sched->status |= XNRESCHED;
+}
+
+/* Set resched flag for the given scheduler. */
+#ifdef CONFIG_SMP
+
+static inline void xnsched_set_resched(struct xnsched *sched)
+{
+	struct xnsched *current_sched = xnsched_current();
+
+	if (current_sched == sched)
+		current_sched->status |= XNRESCHED;
+	else if (!xnsched_resched_p(sched)) {
+		cpumask_set_cpu(xnsched_cpu(sched), &current_sched->resched);
+		sched->status |= XNRESCHED;
+		current_sched->status |= XNRESCHED;
+	}
+}
+
+#define xnsched_realtime_cpus    cobalt_pipeline.supported_cpus
+
+static inline int xnsched_supported_cpu(int cpu)
+{
+	return cpumask_test_cpu(cpu, &xnsched_realtime_cpus);
+}
+
+static inline int xnsched_threading_cpu(int cpu)
+{
+	return cpumask_test_cpu(cpu, &cobalt_cpu_affinity);
+}
+
+#else /* !CONFIG_SMP */
+
+static inline void xnsched_set_resched(struct xnsched *sched)
+{
+	xnsched_set_self_resched(sched);
+}
+
+#define xnsched_realtime_cpus CPU_MASK_ALL
+
+static inline int xnsched_supported_cpu(int cpu)
+{
+	return 1;
+}
+
+static inline int xnsched_threading_cpu(int cpu)
+{
+	return 1;
+}
+
+#endif /* !CONFIG_SMP */
+
+#define for_each_realtime_cpu(cpu)		\
+	for_each_online_cpu(cpu)		\
+		if (xnsched_supported_cpu(cpu))	\
+
+int ___xnsched_run(struct xnsched *sched);
+
+void __xnsched_run_handler(void);
+
+static inline int __xnsched_run(struct xnsched *sched)
+{
+	/*
+	 * Reschedule if XNSCHED is pending, but never over an IRQ
+	 * handler or in the middle of unlocked context switch.
+	 */
+	if (((sched->status|sched->lflags) &
+	     (XNINIRQ|XNINSW|XNRESCHED)) != XNRESCHED)
+		return 0;
+
+	return pipeline_schedule(sched);
+}
+
+static inline int xnsched_run(void)
+{
+	struct xnsched *sched = xnsched_current();
+	/*
+	 * sched->curr is shared locklessly with ___xnsched_run().
+	 * READ_ONCE() makes sure the compiler never uses load tearing
+	 * for reading this pointer piecemeal, so that multiple stores
+	 * occurring concurrently on remote CPUs never yield a
+	 * spurious merged value on the local one.
+	 */
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	/*
+	 * If running over the root thread, hard irqs must be off
+	 * (asserted out of line in ___xnsched_run()).
+	 */
+	return curr->lock_count > 0 ? 0 : __xnsched_run(sched);
+}
+
+void xnsched_lock(void);
+
+void xnsched_unlock(void);
+
+static inline int xnsched_interrupt_p(void)
+{
+	return xnsched_current()->lflags & XNINIRQ;
+}
+
+static inline int xnsched_root_p(void)
+{
+	return xnthread_test_state(xnsched_current_thread(), XNROOT);
+}
+
+static inline int xnsched_unblockable_p(void)
+{
+	return xnsched_interrupt_p() || xnsched_root_p();
+}
+
+static inline int xnsched_primary_p(void)
+{
+	return !xnsched_unblockable_p();
+}
+
+bool xnsched_set_effective_priority(struct xnthread *thread,
+				    int prio);
+
+#include <cobalt/kernel/sched-idle.h>
+#include <cobalt/kernel/sched-rt.h>
+
+int xnsched_init_proc(void);
+
+void xnsched_cleanup_proc(void);
+
+void xnsched_register_classes(void);
+
+void xnsched_init_all(void);
+
+void xnsched_destroy_all(void);
+
+struct xnthread *xnsched_pick_next(struct xnsched *sched);
+
+void xnsched_putback(struct xnthread *thread);
+
+int xnsched_set_policy(struct xnthread *thread,
+		       struct xnsched_class *sched_class,
+		       const union xnsched_policy_param *p);
+
+void xnsched_track_policy(struct xnthread *thread,
+			  struct xnthread *target);
+
+void xnsched_protect_priority(struct xnthread *thread,
+			      int prio);
+
+void xnsched_migrate(struct xnthread *thread,
+		     struct xnsched *sched);
+
+void xnsched_migrate_passive(struct xnthread *thread,
+			     struct xnsched *sched);
+
+/**
+ * @fn void xnsched_rotate(struct xnsched *sched, struct xnsched_class *sched_class, const union xnsched_policy_param *sched_param)
+ * @brief Rotate a scheduler runqueue.
+ *
+ * The specified scheduling class is requested to rotate its runqueue
+ * for the given scheduler. Rotation is performed according to the
+ * scheduling parameter specified by @a sched_param.
+ *
+ * @note The nucleus supports round-robin scheduling for the members
+ * of the RT class.
+ *
+ * @param sched The per-CPU scheduler hosting the target scheduling
+ * class.
+ *
+ * @param sched_class The scheduling class which should rotate its
+ * runqueue.
+ *
+ * @param sched_param The scheduling parameter providing rotation
+ * information to the specified scheduling class.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+static inline void xnsched_rotate(struct xnsched *sched,
+				  struct xnsched_class *sched_class,
+				  const union xnsched_policy_param *sched_param)
+{
+	sched_class->sched_rotate(sched, sched_param);
+}
+
+static inline int xnsched_init_thread(struct xnthread *thread)
+{
+	int ret = 0;
+
+	xnsched_idle_init_thread(thread);
+	xnsched_rt_init_thread(thread);
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	ret = xnsched_tp_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_TP */
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	ret = xnsched_sporadic_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_SPORADIC */
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	ret = xnsched_quota_init_thread(thread);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
+
+	return ret;
+}
+
+static inline int xnsched_root_priority(struct xnsched *sched)
+{
+	return sched->rootcb.cprio;
+}
+
+static inline struct xnsched_class *xnsched_root_class(struct xnsched *sched)
+{
+	return sched->rootcb.sched_class;
+}
+
+static inline void xnsched_tick(struct xnsched *sched)
+{
+	struct xnthread *curr = sched->curr;
+	struct xnsched_class *sched_class = curr->sched_class;
+	/*
+	 * A thread that undergoes round-robin scheduling only
+	 * consumes its time slice when it runs within its own
+	 * scheduling class, which excludes temporary PI boosts, and
+	 * does not hold the scheduler lock.
+	 */
+	if (sched_class == curr->base_class &&
+	    sched_class->sched_tick &&
+	    xnthread_test_state(curr, XNTHREAD_BLOCK_BITS|XNRRB) == XNRRB &&
+		curr->lock_count == 0)
+		sched_class->sched_tick(sched);
+}
+
+static inline int xnsched_chkparam(struct xnsched_class *sched_class,
+				   struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	if (sched_class->sched_chkparam)
+		return sched_class->sched_chkparam(thread, p);
+
+	return 0;
+}
+
+static inline int xnsched_declare(struct xnsched_class *sched_class,
+				  struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	int ret;
+
+	if (sched_class->sched_declare) {
+		ret = sched_class->sched_declare(thread, p);
+		if (ret)
+			return ret;
+	}
+	if (sched_class != thread->base_class)
+		sched_class->nthreads++;
+
+	return 0;
+}
+
+static inline int xnsched_calc_wprio(struct xnsched_class *sched_class,
+				     int prio)
+{
+	return prio + sched_class->weight;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+static inline void xnsched_enqueue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_enqueue(thread);
+}
+
+static inline void xnsched_dequeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_dequeue(thread);
+}
+
+static inline void xnsched_requeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		sched_class->sched_requeue(thread);
+}
+
+static inline
+bool xnsched_setparam(struct xnthread *thread,
+		      const union xnsched_policy_param *p)
+{
+	return thread->base_class->sched_setparam(thread, p);
+}
+
+static inline void xnsched_getparam(struct xnthread *thread,
+				    union xnsched_policy_param *p)
+{
+	thread->sched_class->sched_getparam(thread, p);
+}
+
+static inline void xnsched_trackprio(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	thread->sched_class->sched_trackprio(thread, p);
+	thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
+}
+
+static inline void xnsched_protectprio(struct xnthread *thread, int prio)
+{
+	thread->sched_class->sched_protectprio(thread, prio);
+	thread->wprio = xnsched_calc_wprio(thread->sched_class, thread->cprio);
+}
+
+static inline void xnsched_forget(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	--sched_class->nthreads;
+
+	if (sched_class->sched_forget)
+		sched_class->sched_forget(thread);
+}
+
+static inline void xnsched_kick(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	xnthread_set_info(thread, XNKICKED);
+
+	if (sched_class->sched_kick)
+		sched_class->sched_kick(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+
+/*
+ * If only the RT and IDLE scheduling classes are compiled in, we can
+ * fully inline common helpers for dealing with those.
+ */
+
+static inline void xnsched_enqueue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_enqueue(thread);
+}
+
+static inline void xnsched_dequeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_dequeue(thread);
+}
+
+static inline void xnsched_requeue(struct xnthread *thread)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class != &xnsched_class_idle)
+		__xnsched_rt_requeue(thread);
+}
+
+static inline bool xnsched_setparam(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->base_class;
+
+	if (sched_class == &xnsched_class_idle)
+		return __xnsched_idle_setparam(thread, p);
+
+	return __xnsched_rt_setparam(thread, p);
+}
+
+static inline void xnsched_getparam(struct xnthread *thread,
+				    union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_getparam(thread, p);
+	else
+		__xnsched_rt_getparam(thread, p);
+}
+
+static inline void xnsched_trackprio(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_trackprio(thread, p);
+	else
+		__xnsched_rt_trackprio(thread, p);
+
+	thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+}
+
+static inline void xnsched_protectprio(struct xnthread *thread, int prio)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (sched_class == &xnsched_class_idle)
+		__xnsched_idle_protectprio(thread, prio);
+	else
+		__xnsched_rt_protectprio(thread, prio);
+
+	thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+}
+
+static inline void xnsched_forget(struct xnthread *thread)
+{
+	--thread->base_class->nthreads;
+	__xnsched_rt_forget(thread);
+}
+
+static inline void xnsched_kick(struct xnthread *thread)
+{
+	xnthread_set_info(thread, XNKICKED);
+	xnsched_set_resched(thread->sched);
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHED_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h
new file mode 100644
index 0000000..9da95aa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedparam.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHEDPARAM_H
+#define _COBALT_KERNEL_SCHEDPARAM_H
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+struct xnsched_idle_param {
+	int prio;
+};
+
+struct xnsched_weak_param {
+	int prio;
+};
+
+struct xnsched_rt_param {
+	int prio;
+};
+
+struct xnsched_tp_param {
+	int prio;
+	int ptid;	/* partition id. */
+};
+
+struct xnsched_sporadic_param {
+	xnticks_t init_budget;
+	xnticks_t repl_period;
+	int max_repl;
+	int low_prio;
+	int normal_prio;
+	int current_prio;
+};
+
+struct xnsched_quota_param {
+	int prio;
+	int tgid;	/* thread group id. */
+};
+
+union xnsched_policy_param {
+	struct xnsched_idle_param idle;
+	struct xnsched_rt_param rt;
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	struct xnsched_weak_param weak;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	struct xnsched_tp_param tp;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	struct xnsched_sporadic_param pss;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_param quota;
+#endif
+};
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHEDPARAM_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h
new file mode 100644
index 0000000..f7e87a3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/schedqueue.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SCHEDQUEUE_H
+#define _COBALT_KERNEL_SCHEDQUEUE_H
+
+#include <cobalt/kernel/list.h>
+
+/**
+ * @addtogroup cobalt_core_sched
+ * @{
+ */
+
+#define XNSCHED_CLASS_WEIGHT_FACTOR	1024
+
+#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
+
+#include <linux/bitmap.h>
+
+/*
+ * Multi-level priority queue, suitable for handling the runnable
+ * thread queue of the core scheduling class with O(1) property. We
+ * only manage a descending queuing order, i.e. highest numbered
+ * priorities come first.
+ */
+#define XNSCHED_MLQ_LEVELS  260	/* i.e. XNSCHED_CORE_NR_PRIO */
+
+struct xnsched_mlq {
+	int elems;
+	DECLARE_BITMAP(prio_map, XNSCHED_MLQ_LEVELS);
+	struct list_head heads[XNSCHED_MLQ_LEVELS];
+};
+
+struct xnthread;
+
+void xnsched_initq(struct xnsched_mlq *q);
+
+void xnsched_addq(struct xnsched_mlq *q,
+		  struct xnthread *thread);
+
+void xnsched_addq_tail(struct xnsched_mlq *q, 
+		       struct xnthread *thread);
+
+void xnsched_delq(struct xnsched_mlq *q,
+		  struct xnthread *thread);
+
+struct xnthread *xnsched_getq(struct xnsched_mlq *q);
+
+static inline int xnsched_emptyq_p(struct xnsched_mlq *q)
+{
+	return q->elems == 0;
+}
+
+static inline int xnsched_weightq(struct xnsched_mlq *q)
+{
+	return find_first_bit(q->prio_map, XNSCHED_MLQ_LEVELS);
+}
+
+typedef struct xnsched_mlq xnsched_queue_t;
+
+#else /* ! CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+typedef struct list_head xnsched_queue_t;
+
+#define xnsched_initq(__q)			INIT_LIST_HEAD(__q)
+#define xnsched_emptyq_p(__q)			list_empty(__q)
+#define xnsched_addq(__q, __t)			list_add_prilf(__t, __q, cprio, rlink)
+#define xnsched_addq_tail(__q, __t)		list_add_priff(__t, __q, cprio, rlink)
+#define xnsched_delq(__q, __t)			(void)(__q), list_del(&(__t)->rlink)
+#define xnsched_getq(__q)							\
+	({									\
+		struct xnthread *__t = NULL;					\
+		if (!list_empty(__q))						\
+			__t = list_get_entry(__q, struct xnthread, rlink);	\
+		__t;								\
+	})
+#define xnsched_weightq(__q)						\
+	({								\
+		struct xnthread *__t;					\
+		__t = list_first_entry(__q, struct xnthread, rlink);	\
+		__t->cprio;						\
+	})
+	
+
+#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+struct xnthread *xnsched_findq(xnsched_queue_t *q, int prio);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SCHEDQUEUE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h
new file mode 100644
index 0000000..1bac45a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/select.h
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2008 Efixo <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SELECT_H
+#define _COBALT_KERNEL_SELECT_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/thread.h>
+
+/**
+ * @addtogroup cobalt_core_select
+ * @{
+ */
+
+#define XNSELECT_READ      0
+#define XNSELECT_WRITE     1
+#define XNSELECT_EXCEPT    2
+#define XNSELECT_MAX_TYPES 3
+
+struct xnselector {
+	struct xnsynch synchbase;
+	struct fds {
+		fd_set expected;
+		fd_set pending;
+	} fds [XNSELECT_MAX_TYPES];
+	struct list_head destroy_link;
+	struct list_head bindings; /* only used by xnselector_destroy */
+};
+
+#define __NFDBITS__	(8 * sizeof(unsigned long))
+#define __FDSET_LONGS__	(__FD_SETSIZE/__NFDBITS__)
+#define	__FDELT__(d)	((d) / __NFDBITS__)
+#define	__FDMASK__(d)	(1UL << ((d) % __NFDBITS__))
+
+static inline void __FD_SET__(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        __fdsetp->fds_bits[__tmp] |= (1UL<<__rem);
+}
+
+static inline void __FD_CLR__(unsigned long __fd, __kernel_fd_set *__fdsetp)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        __fdsetp->fds_bits[__tmp] &= ~(1UL<<__rem);
+}
+
+static inline int __FD_ISSET__(unsigned long __fd, const __kernel_fd_set *__p)
+{
+        unsigned long __tmp = __fd / __NFDBITS__;
+        unsigned long __rem = __fd % __NFDBITS__;
+        return (__p->fds_bits[__tmp] & (1UL<<__rem)) != 0;
+}
+
+static inline void __FD_ZERO__(__kernel_fd_set *__p)
+{
+	unsigned long *__tmp = __p->fds_bits;
+	int __i;
+
+	__i = __FDSET_LONGS__;
+	while (__i) {
+		__i--;
+		*__tmp = 0;
+		__tmp++;
+	}
+}
+
+struct xnselect {
+	struct list_head bindings;
+};
+
+#define DECLARE_XNSELECT(name) struct xnselect name
+
+struct xnselect_binding {
+	struct xnselector *selector;
+	struct xnselect *fd;
+	unsigned int type;
+	unsigned int bit_index;
+	struct list_head link;  /* link in selected fds list. */
+	struct list_head slink; /* link in selector list */
+};
+
+void xnselect_init(struct xnselect *select_block);
+
+int xnselect_bind(struct xnselect *select_block,
+		  struct xnselect_binding *binding,
+		  struct xnselector *selector,
+		  unsigned int type,
+		  unsigned int bit_index,
+		  unsigned int state);
+
+int __xnselect_signal(struct xnselect *select_block, unsigned int state);
+
+/**
+ * Signal a file descriptor state change.
+ *
+ * @param select_block pointer to an @a xnselect structure representing the file
+ * descriptor whose state changed;
+ * @param state new value of the state.
+ *
+ * @retval 1 if rescheduling is needed;
+ * @retval 0 otherwise.
+ */
+static inline int
+xnselect_signal(struct xnselect *select_block, unsigned int state)
+{
+	if (!list_empty(&select_block->bindings))
+		return __xnselect_signal(select_block, state);
+
+	return 0;
+}
+
+void xnselect_destroy(struct xnselect *select_block);
+
+int xnselector_init(struct xnselector *selector);
+
+int xnselect(struct xnselector *selector,
+	     fd_set *out_fds[XNSELECT_MAX_TYPES],
+	     fd_set *in_fds[XNSELECT_MAX_TYPES],
+	     int nfds,
+	     xnticks_t timeout, xntmode_t timeout_mode);
+
+void xnselector_destroy(struct xnselector *selector);
+
+int xnselect_mount(void);
+
+int xnselect_umount(void);
+
+/** @} */
+
+#endif /* _COBALT_KERNEL_SELECT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h
new file mode 100644
index 0000000..3c059a5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/stat.h
@@ -0,0 +1,152 @@
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2006 Dmitry Adamushko <dmitry.adamushko@gmail.com>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_STAT_H
+#define _COBALT_KERNEL_STAT_H
+
+#include <cobalt/kernel/clock.h>
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_stat Thread runtime statistics
+ * @{
+ */
+#ifdef CONFIG_XENO_OPT_STATS
+
+typedef struct xnstat_exectime {
+
+	xnticks_t start;   /* Start of execution time accumulation */
+
+	xnticks_t total; /* Accumulated execution time */
+
+} xnstat_exectime_t;
+
+/* Return current date which can be passed to other xnstat services for
+   immediate or lazy accounting. */
+#define xnstat_exectime_now() xnclock_core_read_raw()
+
+/* Accumulate exectime of the current account until the given date. */
+#define xnstat_exectime_update(sched, date) \
+do { \
+	xnticks_t __date = date; \
+	(sched)->current_account->total += \
+		__date - (sched)->last_account_switch; \
+	(sched)->last_account_switch = __date; \
+	/* All changes must be committed before changing the current_account \
+	   reference in sched (required for xnintr_sync_stat_references) */ \
+	smp_wmb(); \
+} while (0)
+
+/* Update the current account reference, returning the previous one. */
+#define xnstat_exectime_set_current(sched, new_account) \
+({ \
+	xnstat_exectime_t *__prev; \
+	__prev = (xnstat_exectime_t *) \
+		atomic_long_xchg((atomic_long_t *)&(sched)->current_account, \
+				 (long)(new_account)); \
+	__prev; \
+})
+
+/* Return the currently active accounting entity. */
+#define xnstat_exectime_get_current(sched) ((sched)->current_account)
+
+/* Finalize an account (no need to accumulate the exectime, just mark the
+   switch date and set the new account). */
+#define xnstat_exectime_finalize(sched, new_account) \
+do { \
+	(sched)->last_account_switch = xnclock_core_read_raw(); \
+	(sched)->current_account = (new_account); \
+} while (0)
+
+/* Obtain content of xnstat_exectime_t */
+#define xnstat_exectime_get_start(account)	((account)->start)
+#define xnstat_exectime_get_total(account)	((account)->total)
+
+/* Obtain last account switch date of considered sched */
+#define xnstat_exectime_get_last_switch(sched)	((sched)->last_account_switch)
+
+/* Reset statistics from inside the accounted entity (e.g. after CPU
+   migration). */
+#define xnstat_exectime_reset_stats(stat) \
+do { \
+	(stat)->total = 0; \
+	(stat)->start = xnclock_core_read_raw(); \
+} while (0)
+
+
+typedef struct xnstat_counter {
+	unsigned long counter;
+} xnstat_counter_t;
+
+static inline unsigned long xnstat_counter_inc(xnstat_counter_t *c)
+{
+	return c->counter++;
+}
+
+static inline unsigned long xnstat_counter_get(xnstat_counter_t *c)
+{
+	return c->counter;
+}
+
+static inline void xnstat_counter_set(xnstat_counter_t *c, unsigned long value)
+{
+	c->counter = value;
+}
+
+#else /* !CONFIG_XENO_OPT_STATS */
+typedef struct xnstat_exectime {
+} xnstat_exectime_t;
+
+#define xnstat_exectime_now()					({ 0; })
+#define xnstat_exectime_update(sched, date)			do { } while (0)
+#define xnstat_exectime_set_current(sched, new_account)		({ (void)sched; NULL; })
+#define xnstat_exectime_get_current(sched)			({ (void)sched; NULL; })
+#define xnstat_exectime_finalize(sched, new_account)		do { } while (0)
+#define xnstat_exectime_get_start(account)			({ 0; })
+#define xnstat_exectime_get_total(account)			({ 0; })
+#define xnstat_exectime_get_last_switch(sched)			({ 0; })
+#define xnstat_exectime_reset_stats(account)			do { } while (0)
+
+typedef struct xnstat_counter {
+} xnstat_counter_t;
+
+#define xnstat_counter_inc(c) ({ do { } while(0); 0; })
+#define xnstat_counter_get(c) ({ 0; })
+#define xnstat_counter_set(c, value) do { } while (0)
+#endif /* CONFIG_XENO_OPT_STATS */
+
+/* Account the exectime of the current account until now, switch to
+   new_account, and return the previous one. */
+#define xnstat_exectime_switch(sched, new_account) \
+({ \
+	xnstat_exectime_update(sched, xnstat_exectime_now()); \
+	xnstat_exectime_set_current(sched, new_account); \
+})
+
+/* Account the exectime of the current account until given start time, switch
+   to new_account, and return the previous one. */
+#define xnstat_exectime_lazy_switch(sched, new_account, date) \
+({ \
+	xnstat_exectime_update(sched, date); \
+	xnstat_exectime_set_current(sched, new_account); \
+})
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_STAT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h
new file mode 100644
index 0000000..a2bf80d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/synch.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_SYNCH_H
+#define _COBALT_KERNEL_SYNCH_H
+
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/uapi/kernel/synch.h>
+#include <cobalt/uapi/kernel/thread.h>
+
+/**
+ * @addtogroup cobalt_core_synch
+ * @{
+ */
+#define XNSYNCH_CLAIMED  0x100	/* Claimed by other thread(s) (PI) */
+#define XNSYNCH_CEILING  0x200	/* Actively boosting (PP) */
+
+/* Spare flags usable by upper interfaces */
+#define XNSYNCH_SPARE0  0x01000000
+#define XNSYNCH_SPARE1  0x02000000
+#define XNSYNCH_SPARE2  0x04000000
+#define XNSYNCH_SPARE3  0x08000000
+#define XNSYNCH_SPARE4  0x10000000
+#define XNSYNCH_SPARE5  0x20000000
+#define XNSYNCH_SPARE6  0x40000000
+#define XNSYNCH_SPARE7  0x80000000
+
+/* Statuses */
+#define XNSYNCH_DONE    0	/* Resource available / operation complete */
+#define XNSYNCH_WAIT    1	/* Calling thread blocked -- start rescheduling */
+#define XNSYNCH_RESCHED 2	/* Force rescheduling */
+
+struct xnthread;
+struct xnsynch;
+
+struct xnsynch {
+	/** wait (weighted) prio in thread->boosters */
+	int wprio;
+	/** thread->boosters */
+	struct list_head next;
+	/**
+	 *  &variable holding the current priority ceiling value
+	 *  (xnsched_class_rt-based, [1..255], XNSYNCH_PP).
+	 */
+	u32 *ceiling_ref;
+	/** Status word */
+	unsigned long status;
+	/** Pending threads */
+	struct list_head pendq;
+	/** Thread which owns the resource */
+	struct xnthread *owner;
+	 /** Pointer to fast lock word */
+	atomic_t *fastlock;
+	/* Cleanup handler */
+	void (*cleanup)(struct xnsynch *synch);
+};
+
+#define XNSYNCH_WAITQUEUE_INITIALIZER(__name) {		\
+		.status = XNSYNCH_PRIO,			\
+		.wprio = -1,				\
+		.pendq = LIST_HEAD_INIT((__name).pendq),	\
+		.owner = NULL,				\
+		.cleanup = NULL,			\
+		.fastlock = NULL,			\
+	}
+
+#define DEFINE_XNWAITQ(__name)	\
+	struct xnsynch __name = XNSYNCH_WAITQUEUE_INITIALIZER(__name)
+
+static inline void xnsynch_set_status(struct xnsynch *synch, int bits)
+{
+	synch->status |= bits;
+}
+
+static inline void xnsynch_clear_status(struct xnsynch *synch, int bits)
+{
+	synch->status &= ~bits;
+}
+
+#define xnsynch_for_each_sleeper(__pos, __synch)		\
+	list_for_each_entry(__pos, &(__synch)->pendq, plink)
+
+#define xnsynch_for_each_sleeper_safe(__pos, __tmp, __synch)	\
+	list_for_each_entry_safe(__pos, __tmp, &(__synch)->pendq, plink)
+
+static inline int xnsynch_pended_p(struct xnsynch *synch)
+{
+	return !list_empty(&synch->pendq);
+}
+
+static inline struct xnthread *xnsynch_owner(struct xnsynch *synch)
+{
+	return synch->owner;
+}
+
+#define xnsynch_fastlock(synch)		((synch)->fastlock)
+#define xnsynch_fastlock_p(synch)	((synch)->fastlock != NULL)
+#define xnsynch_owner_check(synch, thread) \
+	xnsynch_fast_owner_check((synch)->fastlock, thread->handle)
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED
+
+void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper);
+
+void xnsynch_detect_boosted_relax(struct xnthread *owner);
+
+#else /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+static inline void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper) { }
+
+static inline void xnsynch_detect_boosted_relax(struct xnthread *owner) { }
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+void xnsynch_init(struct xnsynch *synch, int flags,
+		  atomic_t *fastlock);
+
+void xnsynch_init_protect(struct xnsynch *synch, int flags,
+			  atomic_t *fastlock, u32 *ceiling_ref);
+
+int xnsynch_destroy(struct xnsynch *synch);
+
+void xnsynch_commit_ceiling(struct xnthread *curr);
+
+static inline void xnsynch_register_cleanup(struct xnsynch *synch,
+					    void (*handler)(struct xnsynch *))
+{
+	synch->cleanup = handler;
+}
+
+int __must_check xnsynch_sleep_on(struct xnsynch *synch,
+				  xnticks_t timeout,
+				  xntmode_t timeout_mode);
+
+struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch);
+
+int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr);
+
+void xnsynch_wakeup_this_sleeper(struct xnsynch *synch,
+				 struct xnthread *sleeper);
+
+int __must_check xnsynch_acquire(struct xnsynch *synch,
+				 xnticks_t timeout,
+				 xntmode_t timeout_mode);
+
+int __must_check xnsynch_try_acquire(struct xnsynch *synch);
+
+bool xnsynch_release(struct xnsynch *synch, struct xnthread *thread);
+
+struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch);
+
+int xnsynch_flush(struct xnsynch *synch, int reason);
+
+void xnsynch_requeue_sleeper(struct xnthread *thread);
+
+void xnsynch_forget_sleeper(struct xnthread *thread);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_SYNCH_H_ */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h
new file mode 100644
index 0000000..b79cb84
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/thread.h
@@ -0,0 +1,581 @@
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_THREAD_H
+#define _COBALT_KERNEL_THREAD_H
+
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#include <pipeline/thread.h>
+#include <pipeline/inband_work.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/schedparam.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/kernel/thread.h>
+#include <cobalt/uapi/signal.h>
+#include <asm/xenomai/machine.h>
+#include <asm/xenomai/thread.h>
+
+/**
+ * @addtogroup cobalt_core_thread
+ * @{
+ */
+#define XNTHREAD_BLOCK_BITS   (XNSUSP|XNPEND|XNDELAY|XNDORMANT|XNRELAX|XNHELD|XNDBGSTOP)
+#define XNTHREAD_MODE_BITS    (XNRRB|XNWARN|XNTRAPLB)
+
+#define XNTHREAD_SIGDEBUG		0
+#define XNTHREAD_SIGSHADOW_HARDEN	1
+#define XNTHREAD_SIGSHADOW_BACKTRACE	2
+#define XNTHREAD_SIGSHADOW_HOME		3
+#define XNTHREAD_SIGTERM		4
+#define XNTHREAD_MAX_SIGNALS		5
+
+struct xnthread;
+struct xnsched;
+struct xnselector;
+struct xnsched_class;
+struct xnsched_tpslot;
+struct xnthread_personality;
+struct completion;
+
+struct lostage_signal {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct task_struct *task;
+	int signo, sigval;
+	struct lostage_signal *self; /* Revisit: I-pipe requirement */
+};
+
+struct xnthread_init_attr {
+	struct xnthread_personality *personality;
+	cpumask_t affinity;
+	int flags;
+	const char *name;
+};
+
+struct xnthread_start_attr {
+	int mode;
+	void (*entry)(void *cookie);
+	void *cookie;
+};
+
+struct xnthread_wait_context {
+	int posted;
+};
+
+struct xnthread_personality {
+	const char *name;
+	unsigned int magic;
+	int xid;
+	atomic_t refcnt;
+	struct {
+		void *(*attach_process)(void);
+		void (*detach_process)(void *arg);
+		void (*map_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*relax_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*harden_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*move_thread)(struct xnthread *thread,
+							    int dest_cpu);
+		struct xnthread_personality *(*exit_thread)(struct xnthread *thread);
+		struct xnthread_personality *(*finalize_thread)(struct xnthread *thread);
+	} ops;
+	struct module *module;
+};
+
+struct xnthread {
+	struct xnarchtcb tcb;	/* Architecture-dependent block */
+
+	__u32 state;		/* Thread state flags */
+	__u32 info;		/* Thread information flags */
+	__u32 local_info;	/* Local thread information flags */
+
+	struct xnsched *sched;		/* Thread scheduler */
+	struct xnsched_class *sched_class; /* Current scheduling class */
+	struct xnsched_class *base_class; /* Base scheduling class */
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	struct xnsched_tpslot *tps;	/* Current partition slot for TP scheduling */
+	struct list_head tp_link;	/* Link in per-sched TP thread queue */
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	struct xnsched_sporadic_data *pss; /* Sporadic scheduling data. */
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_group *quota; /* Quota scheduling group. */
+	struct list_head quota_expired;
+	struct list_head quota_next;
+#endif
+	cpumask_t affinity;	/* Processor affinity. */
+
+	/** Base priority (before PI/PP boost) */
+	int bprio;
+
+	/** Current (effective) priority */
+	int cprio;
+
+	/**
+	 * Weighted priority (cprio + scheduling class weight).
+	 */
+	int wprio;
+
+	int lock_count;	/** Scheduler lock count. */
+
+	/**
+	 * Thread holder in xnsched run queue. Ordered by
+	 * thread->cprio.
+	 */
+	struct list_head rlink;
+
+	/**
+	 * Thread holder in xnsynch pendq. Prioritized by
+	 * thread->cprio + scheduling class weight.
+	 */
+	struct list_head plink;
+
+	/** Thread holder in global queue. */
+	struct list_head glink;
+
+	/**
+	 * List of xnsynch owned by this thread which cause a priority
+	 * boost due to one of the following reasons:
+	 *
+	 * - they are currently claimed by other thread(s) when
+	 * enforcing the priority inheritance protocol (XNSYNCH_PI).
+	 *
+	 * - they require immediate priority ceiling (XNSYNCH_PP).
+	 *
+	 * This list is ordered by decreasing (weighted) thread
+	 * priorities.
+	 */
+	struct list_head boosters;
+
+	struct xnsynch *wchan;		/* Resource the thread pends on */
+
+	struct xnsynch *wwake;		/* Wait channel the thread was resumed from */
+
+	int res_count;			/* Held resources count */
+
+	struct xntimer rtimer;		/* Resource timer */
+
+	struct xntimer ptimer;		/* Periodic timer */
+
+	xnticks_t rrperiod;		/* Allotted round-robin period (ns) */
+
+  	struct xnthread_wait_context *wcontext;	/* Active wait context. */
+
+	struct {
+		xnstat_counter_t ssw;	/* Primary -> secondary mode switch count */
+		xnstat_counter_t csw;	/* Context switches (includes secondary -> primary switches) */
+		xnstat_counter_t xsc;	/* Xenomai syscalls */
+		xnstat_counter_t pf;	/* Number of page faults */
+		xnstat_exectime_t account; /* Execution time accounting entity */
+		xnstat_exectime_t lastperiod; /* Interval marker for execution time reports */
+	} stat;
+
+	struct xnselector *selector;    /* For select. */
+
+	xnhandle_t handle;	/* Handle in registry */
+
+	char name[XNOBJECT_NAME_LEN]; /* Symbolic name of thread */
+
+	void (*entry)(void *cookie); /* Thread entry routine */
+	void *cookie;		/* Cookie to pass to the entry routine */
+
+	/**
+	 * Thread data visible from userland through a window on the
+	 * global heap.
+	 */
+	struct xnthread_user_window *u_window;
+
+	struct xnthread_personality *personality;
+
+	struct completion exited;
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+	const char *exe_path;	/* Executable path */
+	u32 proghash;		/* Hash value for exe_path */
+#endif
+	struct lostage_signal sigarray[XNTHREAD_MAX_SIGNALS];
+};
+
+static inline int xnthread_get_state(const struct xnthread *thread)
+{
+	return thread->state;
+}
+
+static inline int xnthread_test_state(struct xnthread *thread, int bits)
+{
+	return thread->state & bits;
+}
+
+static inline void xnthread_set_state(struct xnthread *thread, int bits)
+{
+	thread->state |= bits;
+}
+
+static inline void xnthread_clear_state(struct xnthread *thread, int bits)
+{
+	thread->state &= ~bits;
+}
+
+static inline int xnthread_test_info(struct xnthread *thread, int bits)
+{
+	return thread->info & bits;
+}
+
+static inline void xnthread_set_info(struct xnthread *thread, int bits)
+{
+	thread->info |= bits;
+}
+
+static inline void xnthread_clear_info(struct xnthread *thread, int bits)
+{
+	thread->info &= ~bits;
+}
+
+static inline int xnthread_test_localinfo(struct xnthread *curr, int bits)
+{
+	return curr->local_info & bits;
+}
+
+static inline void xnthread_set_localinfo(struct xnthread *curr, int bits)
+{
+	curr->local_info |= bits;
+}
+
+static inline void xnthread_clear_localinfo(struct xnthread *curr, int bits)
+{
+	curr->local_info &= ~bits;
+}
+
+static inline struct xnarchtcb *xnthread_archtcb(struct xnthread *thread)
+{
+	return &thread->tcb;
+}
+
+static inline int xnthread_base_priority(const struct xnthread *thread)
+{
+	return thread->bprio;
+}
+
+static inline int xnthread_current_priority(const struct xnthread *thread)
+{
+	return thread->cprio;
+}
+
+static inline struct task_struct *xnthread_host_task(struct xnthread *thread)
+{
+	return xnarch_host_task(xnthread_archtcb(thread));
+}
+
+#define xnthread_for_each_booster(__pos, __thread)		\
+	list_for_each_entry(__pos, &(__thread)->boosters, next)
+
+#define xnthread_for_each_booster_safe(__pos, __tmp, __thread)	\
+	list_for_each_entry_safe(__pos, __tmp, &(__thread)->boosters, next)
+
+#define xnthread_run_handler(__t, __h, __a...)				\
+	do {								\
+		struct xnthread_personality *__p__ = (__t)->personality;	\
+		if ((__p__)->ops.__h)					\
+			(__p__)->ops.__h(__t, ##__a);			\
+	} while (0)
+
+#define xnthread_run_handler_stack(__t, __h, __a...)			\
+	do {								\
+		struct xnthread_personality *__p__ = (__t)->personality;	\
+		do {							\
+			if ((__p__)->ops.__h == NULL)			\
+				break;					\
+			__p__ = (__p__)->ops.__h(__t, ##__a);		\
+		} while (__p__);					\
+	} while (0)
+
+static inline
+struct xnthread_wait_context *xnthread_get_wait_context(struct xnthread *thread)
+{
+	return thread->wcontext;
+}
+
+static inline
+int xnthread_register(struct xnthread *thread, const char *name)
+{
+	return xnregistry_enter(name, thread, &thread->handle, NULL);
+}
+
+static inline
+struct xnthread *xnthread_lookup(xnhandle_t threadh)
+{
+	struct xnthread *thread = xnregistry_lookup(threadh, NULL);
+	return thread && thread->handle == xnhandle_get_index(threadh) ? thread : NULL;
+}
+
+static inline void xnthread_sync_window(struct xnthread *thread)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline
+void xnthread_clear_sync_window(struct xnthread *thread, int state_bits)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state & ~state_bits;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline
+void xnthread_set_sync_window(struct xnthread *thread, int state_bits)
+{
+	if (thread->u_window) {
+		thread->u_window->state = thread->state | state_bits;
+		thread->u_window->info = thread->info;
+	}
+}
+
+static inline int normalize_priority(int prio)
+{
+	return prio < MAX_RT_PRIO ? prio : MAX_RT_PRIO - 1;
+}
+
+int __xnthread_init(struct xnthread *thread,
+		    const struct xnthread_init_attr *attr,
+		    struct xnsched *sched,
+		    struct xnsched_class *sched_class,
+		    const union xnsched_policy_param *sched_param);
+
+void __xnthread_test_cancel(struct xnthread *curr);
+
+void __xnthread_cleanup(struct xnthread *curr);
+
+void __xnthread_discard(struct xnthread *thread);
+
+/**
+ * @fn struct xnthread *xnthread_current(void)
+ * @brief Retrieve the current Cobalt core TCB.
+ *
+ * Returns the address of the current Cobalt core thread descriptor,
+ * or NULL if running over a regular Linux task. This call is not
+ * affected by the current runtime mode of the core thread.
+ *
+ * @note The returned value may differ from xnsched_current_thread()
+ * called from the same context, since the latter returns the root
+ * thread descriptor for the current CPU if the caller is running in
+ * secondary mode.
+ *
+ * @coretags{unrestricted}
+ */
+static inline struct xnthread *xnthread_current(void)
+{
+	return pipeline_current()->thread;
+}
+
+/**
+ * @fn struct xnthread *xnthread_from_task(struct task_struct *p)
+ * @brief Retrieve the Cobalt core TCB attached to a Linux task.
+ *
+ * Returns the address of the Cobalt core thread descriptor attached
+ * to the Linux task @a p, or NULL if @a p is a regular Linux
+ * task. This call is not affected by the current runtime mode of the
+ * core thread.
+ *
+ * @coretags{unrestricted}
+ */
+static inline struct xnthread *xnthread_from_task(struct task_struct *p)
+{
+	return pipeline_thread_from_task(p);
+}
+
+/**
+ * @fn void xnthread_test_cancel(void)
+ * @brief Introduce a thread cancellation point.
+ *
+ * Terminates the current thread if a cancellation request is pending
+ * for it, i.e. if xnthread_cancel() was called.
+ *
+ * @coretags{mode-unrestricted}
+ */
+static inline void xnthread_test_cancel(void)
+{
+	struct xnthread *curr = xnthread_current();
+
+	if (curr && xnthread_test_info(curr, XNCANCELD))
+		__xnthread_test_cancel(curr);
+}
+
+static inline
+void xnthread_complete_wait(struct xnthread_wait_context *wc)
+{
+	wc->posted = 1;
+}
+
+static inline
+int xnthread_wait_complete_p(struct xnthread_wait_context *wc)
+{
+	return wc->posted;
+}
+
+#ifdef CONFIG_XENO_ARCH_FPU
+void xnthread_switch_fpu(struct xnsched *sched);
+#else
+static inline void xnthread_switch_fpu(struct xnsched *sched) { }
+#endif /* CONFIG_XENO_ARCH_FPU */
+
+void xnthread_deregister(struct xnthread *thread);
+
+char *xnthread_format_status(unsigned long status,
+			     char *buf, int size);
+
+pid_t xnthread_host_pid(struct xnthread *thread);
+
+int xnthread_set_clock(struct xnthread *thread,
+		       struct xnclock *newclock);
+
+xnticks_t xnthread_get_timeout(struct xnthread *thread,
+			       xnticks_t ns);
+
+xnticks_t xnthread_get_period(struct xnthread *thread);
+
+void xnthread_prepare_wait(struct xnthread_wait_context *wc);
+
+int xnthread_init(struct xnthread *thread,
+		  const struct xnthread_init_attr *attr,
+		  struct xnsched_class *sched_class,
+		  const union xnsched_policy_param *sched_param);
+
+int xnthread_start(struct xnthread *thread,
+		   const struct xnthread_start_attr *attr);
+
+int xnthread_set_mode(int clrmask,
+		      int setmask);
+
+void xnthread_suspend(struct xnthread *thread,
+		      int mask,
+		      xnticks_t timeout,
+		      xntmode_t timeout_mode,
+		      struct xnsynch *wchan);
+
+void xnthread_resume(struct xnthread *thread,
+		     int mask);
+
+int xnthread_unblock(struct xnthread *thread);
+
+int xnthread_set_periodic(struct xnthread *thread,
+			  xnticks_t idate,
+			  xntmode_t timeout_mode,
+			  xnticks_t period);
+
+int xnthread_wait_period(unsigned long *overruns_r);
+
+int xnthread_set_slice(struct xnthread *thread,
+		       xnticks_t quantum);
+
+void xnthread_cancel(struct xnthread *thread);
+
+int xnthread_join(struct xnthread *thread, bool uninterruptible);
+
+int xnthread_harden(void);
+
+void xnthread_relax(int notify, int reason);
+
+void __xnthread_kick(struct xnthread *thread);
+
+void xnthread_kick(struct xnthread *thread);
+
+void __xnthread_demote(struct xnthread *thread);
+
+void xnthread_demote(struct xnthread *thread);
+
+void __xnthread_signal(struct xnthread *thread, int sig, int arg);
+
+void xnthread_signal(struct xnthread *thread, int sig, int arg);
+
+void xnthread_pin_initial(struct xnthread *thread);
+
+void xnthread_call_mayday(struct xnthread *thread, int reason);
+
+static inline void xnthread_get_resource(struct xnthread *curr)
+{
+	if (xnthread_test_state(curr, XNWEAK|XNDEBUG))
+		curr->res_count++;
+}
+
+static inline int xnthread_put_resource(struct xnthread *curr)
+{
+	if (xnthread_test_state(curr, XNWEAK) ||
+	    IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP)) {
+		if (unlikely(curr->res_count == 0)) {
+			if (xnthread_test_state(curr, XNWARN))
+				xnthread_signal(curr, SIGDEBUG,
+						SIGDEBUG_RESCNT_IMBALANCE);
+			return -EPERM;
+		}
+		curr->res_count--;
+	}
+
+	return 0;
+}
+
+static inline void xnthread_commit_ceiling(struct xnthread *curr)
+{
+	if (curr->u_window->pp_pending)
+		xnsynch_commit_ceiling(curr);
+}
+
+#ifdef CONFIG_SMP
+
+void xnthread_migrate_passive(struct xnthread *thread,
+			      struct xnsched *sched);
+#else
+
+static inline void xnthread_migrate_passive(struct xnthread *thread,
+					    struct xnsched *sched)
+{ }
+
+#endif
+
+int __xnthread_set_schedparam(struct xnthread *thread,
+			      struct xnsched_class *sched_class,
+			      const union xnsched_policy_param *sched_param);
+
+int xnthread_set_schedparam(struct xnthread *thread,
+			    struct xnsched_class *sched_class,
+			    const union xnsched_policy_param *sched_param);
+
+int xnthread_killall(int grace, int mask);
+
+void __xnthread_propagate_schedparam(struct xnthread *curr);
+
+static inline void xnthread_propagate_schedparam(struct xnthread *curr)
+{
+	if (xnthread_test_info(curr, XNSCHEDP))
+		__xnthread_propagate_schedparam(curr);
+}
+
+extern struct xnthread_personality xenomai_personality;
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h
new file mode 100644
index 0000000..e48022f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/time.h
@@ -0,0 +1,31 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _COBALT_KERNEL_TIME_H
+#define _COBALT_KERNEL_TIME_H
+
+#include <linux/time.h>
+#include <linux/time64.h>
+
+/**
+ * Read struct __kernel_timespec from userspace and convert to
+ * struct timespec64
+ *
+ * @param ts The destination, will be filled
+ * @param uts The source, provided by an application
+ * @return 0 on success, -EFAULT otherwise
+ */
+int cobalt_get_timespec64(struct timespec64 *ts,
+			  const struct __kernel_timespec __user *uts);
+
+/**
+ * Covert struct timespec64 to struct __kernel_timespec
+ * and copy to userspace
+ *
+ * @param ts The source, provided by kernel
+ * @param uts The destination, will be filled
+ * @return 0 on success, -EFAULT otherwise
+ */
+int cobalt_put_timespec64(const struct timespec64 *ts,
+			   struct __kernel_timespec __user *uts);
+
+#endif //_COBALT_KERNEL_TIME_H
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h
new file mode 100644
index 0000000..703a135
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/timer.h
@@ -0,0 +1,551 @@
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _COBALT_KERNEL_TIMER_H
+#define _COBALT_KERNEL_TIMER_H
+
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @addtogroup cobalt_core_timer
+ * @{
+ */
+#define XN_INFINITE   ((xnticks_t)0)
+#define XN_NONBLOCK   ((xnticks_t)-1)
+
+/* Timer modes */
+typedef enum xntmode {
+	XN_RELATIVE,
+	XN_ABSOLUTE,
+	XN_REALTIME
+} xntmode_t;
+
+/* Timer status */
+#define XNTIMER_DEQUEUED  0x00000001
+#define XNTIMER_KILLED    0x00000002
+#define XNTIMER_PERIODIC  0x00000004
+#define XNTIMER_REALTIME  0x00000008
+#define XNTIMER_FIRED     0x00000010
+#define XNTIMER_RUNNING   0x00000020
+#define XNTIMER_KGRAVITY  0x00000040
+#define XNTIMER_UGRAVITY  0x00000080
+#define XNTIMER_IGRAVITY  0	     /* most conservative */
+
+#define XNTIMER_GRAVITY_MASK	(XNTIMER_KGRAVITY|XNTIMER_UGRAVITY)
+#define XNTIMER_INIT_MASK	XNTIMER_GRAVITY_MASK
+
+/* These flags are available to the real-time interfaces */
+#define XNTIMER_SPARE0  0x01000000
+#define XNTIMER_SPARE1  0x02000000
+#define XNTIMER_SPARE2  0x04000000
+#define XNTIMER_SPARE3  0x08000000
+#define XNTIMER_SPARE4  0x10000000
+#define XNTIMER_SPARE5  0x20000000
+#define XNTIMER_SPARE6  0x40000000
+#define XNTIMER_SPARE7  0x80000000
+
+/* Timer priorities */
+#define XNTIMER_LOPRIO  (-999999999)
+#define XNTIMER_STDPRIO 0
+#define XNTIMER_HIPRIO  999999999
+
+struct xntlholder {
+	struct list_head link;
+	xnticks_t key;
+	int prio;
+};
+
+#define xntlholder_date(h)	((h)->key)
+#define xntlholder_prio(h)	((h)->prio)
+#define xntlist_init(q)		INIT_LIST_HEAD(q)
+#define xntlist_empty(q)	list_empty(q)
+
+static inline struct xntlholder *xntlist_head(struct list_head *q)
+{
+	if (list_empty(q))
+		return NULL;
+
+	return list_first_entry(q, struct xntlholder, link);
+}
+
+static inline struct xntlholder *xntlist_next(struct list_head *q,
+					      struct xntlholder *h)
+{
+	if (list_is_last(&h->link, q))
+		return NULL;
+
+	return list_entry(h->link.next, struct xntlholder, link);
+}
+
+static inline struct xntlholder *xntlist_second(struct list_head *q,
+	struct xntlholder *h)
+{
+	return xntlist_next(q, h);
+}
+
+static inline void xntlist_insert(struct list_head *q, struct xntlholder *holder)
+{
+	struct xntlholder *p;
+
+	if (list_empty(q)) {
+		list_add(&holder->link, q);
+		return;
+	}
+
+	/*
+	 * Insert the new timer at the proper place in the single
+	 * queue. O(N) here, but this is the price for the increased
+	 * flexibility...
+	 */
+	list_for_each_entry_reverse(p, q, link) {
+		if ((xnsticks_t) (holder->key - p->key) > 0 ||
+		    (holder->key == p->key && holder->prio <= p->prio))
+		  break;
+	}
+
+	list_add(&holder->link, &p->link);
+}
+
+#define xntlist_remove(q, h)			\
+	do {					\
+		(void)(q);			\
+		list_del(&(h)->link);		\
+	} while (0)
+
+#if defined(CONFIG_XENO_OPT_TIMER_RBTREE)
+
+#include <linux/rbtree.h>
+
+typedef struct {
+	unsigned long long date;
+	unsigned prio;
+	struct rb_node link;
+} xntimerh_t;
+
+#define xntimerh_date(h) ((h)->date)
+#define xntimerh_prio(h) ((h)->prio)
+#define xntimerh_init(h) do { } while (0)
+
+typedef struct {
+	struct rb_root root;
+	xntimerh_t *head;
+} xntimerq_t;
+
+#define xntimerq_init(q)			\
+	({					\
+		xntimerq_t *_q = (q);		\
+		_q->root = RB_ROOT;		\
+		_q->head = NULL;		\
+	})
+
+#define xntimerq_destroy(q) do { } while (0)
+#define xntimerq_empty(q) ((q)->head == NULL)
+
+#define xntimerq_head(q) ((q)->head)
+
+#define xntimerq_next(q, h)						\
+	({								\
+		struct rb_node *_node = rb_next(&(h)->link);		\
+		_node ? (container_of(_node, xntimerh_t, link)) : NULL; \
+	})
+
+#define xntimerq_second(q, h) xntimerq_next(q, h)
+
+void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder);
+
+static inline void xntimerq_remove(xntimerq_t *q, xntimerh_t *holder)
+{
+	if (holder == q->head)
+		q->head = xntimerq_second(q, holder);
+
+	rb_erase(&holder->link, &q->root);
+}
+
+typedef struct { } xntimerq_it_t;
+
+#define xntimerq_it_begin(q,i)	((void) (i), xntimerq_head(q))
+#define xntimerq_it_next(q,i,h) ((void) (i), xntimerq_next((q),(h)))
+
+#else /* CONFIG_XENO_OPT_TIMER_LIST */
+
+typedef struct xntlholder xntimerh_t;
+
+#define xntimerh_date(h)       xntlholder_date(h)
+#define xntimerh_prio(h)       xntlholder_prio(h)
+#define xntimerh_init(h)       do { } while (0)
+
+typedef struct list_head xntimerq_t;
+
+#define xntimerq_init(q)        xntlist_init(q)
+#define xntimerq_destroy(q)     do { } while (0)
+#define xntimerq_empty(q)       xntlist_empty(q)
+#define xntimerq_head(q)        xntlist_head(q)
+#define xntimerq_second(q, h)   xntlist_second((q),(h))
+#define xntimerq_insert(q, h)   xntlist_insert((q),(h))
+#define xntimerq_remove(q, h)   xntlist_remove((q),(h))
+
+typedef struct { } xntimerq_it_t;
+
+#define xntimerq_it_begin(q,i)  ((void) (i), xntlist_head(q))
+#define xntimerq_it_next(q,i,h) ((void) (i), xntlist_next((q),(h)))
+
+#endif /* CONFIG_XENO_OPT_TIMER_LIST */
+
+struct xnsched;
+
+struct xntimerdata {
+	xntimerq_t q;
+};
+
+static inline struct xntimerdata *
+xnclock_percpu_timerdata(struct xnclock *clock, int cpu)
+{
+	return per_cpu_ptr(clock->timerdata, cpu);
+}
+
+static inline struct xntimerdata *
+xnclock_this_timerdata(struct xnclock *clock)
+{
+	return raw_cpu_ptr(clock->timerdata);
+}
+
+struct xntimer {
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	struct xnclock *clock;
+#endif
+	/** Link in timers list. */
+	xntimerh_t aplink;
+	struct list_head adjlink;
+	/** Timer status. */
+	unsigned long status;
+	/** Periodic interval (clock ticks, 0 == one shot). */
+	xnticks_t interval;
+	/** Periodic interval (nanoseconds, 0 == one shot). */
+	xnticks_t interval_ns;
+	/** Count of timer ticks in periodic mode. */
+	xnticks_t periodic_ticks;
+	/** First tick date in periodic mode. */
+	xnticks_t start_date;
+	/** Date of next periodic release point (timer ticks). */
+	xnticks_t pexpect_ticks;
+	/** Sched structure to which the timer is attached. */
+	struct xnsched *sched;
+	/** Timeout handler. */
+	void (*handler)(struct xntimer *timer);
+#ifdef CONFIG_XENO_OPT_STATS
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	struct xnclock *tracker;
+#endif
+	/** Timer name to be displayed. */
+	char name[XNOBJECT_NAME_LEN];
+	/** Timer holder in timebase. */
+	struct list_head next_stat;
+	/** Number of timer schedules. */
+	xnstat_counter_t scheduled;
+	/** Number of timer events. */
+	xnstat_counter_t fired;
+#endif /* CONFIG_XENO_OPT_STATS */
+};
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+static inline struct xnclock *xntimer_clock(struct xntimer *timer)
+{
+	return timer->clock;
+}
+
+void xntimer_set_clock(struct xntimer *timer,
+		       struct xnclock *newclock);
+
+#else /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+static inline struct xnclock *xntimer_clock(struct xntimer *timer)
+{
+	return &nkclock;
+}
+
+static inline void xntimer_set_clock(struct xntimer *timer,
+				     struct xnclock *newclock)
+{
+	XENO_BUG_ON(COBALT, newclock != &nkclock);
+}
+
+#endif /* !CONFIG_XENO_OPT_EXTCLOCK */
+
+#ifdef CONFIG_SMP
+static inline struct xnsched *xntimer_sched(struct xntimer *timer)
+{
+	return timer->sched;
+}
+#else /* !CONFIG_SMP */
+#define xntimer_sched(t)	xnsched_current()
+#endif /* !CONFIG_SMP */
+
+#define xntimer_percpu_queue(__timer)					\
+	({								\
+		struct xntimerdata *tmd;				\
+		int cpu = xnsched_cpu((__timer)->sched);		\
+		tmd = xnclock_percpu_timerdata(xntimer_clock(__timer), cpu); \
+		&tmd->q;						\
+	})
+
+static inline unsigned long xntimer_gravity(struct xntimer *timer)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+
+	if (timer->status & XNTIMER_KGRAVITY)
+		return clock->gravity.kernel;
+
+	if (timer->status & XNTIMER_UGRAVITY)
+		return clock->gravity.user;
+
+	return clock->gravity.irq;
+}
+
+static inline void xntimer_update_date(struct xntimer *timer)
+{
+	xntimerh_date(&timer->aplink) = timer->start_date
+		+ xnclock_ns_to_ticks(xntimer_clock(timer),
+			timer->periodic_ticks * timer->interval_ns)
+		- xntimer_gravity(timer);
+}
+
+static inline xnticks_t xntimer_pexpect(struct xntimer *timer)
+{
+	return timer->start_date +
+		xnclock_ns_to_ticks(xntimer_clock(timer),
+				timer->pexpect_ticks * timer->interval_ns);
+}
+
+static inline void xntimer_set_priority(struct xntimer *timer,
+					int prio)
+{
+	xntimerh_prio(&timer->aplink) = prio;
+}
+
+static inline int xntimer_active_p(struct xntimer *timer)
+{
+	return timer->sched != NULL;
+}
+
+static inline int xntimer_running_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_RUNNING) != 0;
+}
+
+static inline int xntimer_fired_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_FIRED) != 0;
+}
+
+static inline int xntimer_periodic_p(struct xntimer *timer)
+{
+	return (timer->status & XNTIMER_PERIODIC) != 0;
+}
+
+void __xntimer_init(struct xntimer *timer,
+		    struct xnclock *clock,
+		    void (*handler)(struct xntimer *timer),
+		    struct xnsched *sched,
+		    int flags);
+
+void xntimer_set_gravity(struct xntimer *timer,
+			 int gravity);
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+#define xntimer_init(__timer, __clock, __handler, __sched, __flags)	\
+do {									\
+	__xntimer_init(__timer, __clock, __handler, __sched, __flags);	\
+	xntimer_set_name(__timer, #__handler);				\
+} while (0)
+
+static inline void xntimer_reset_stats(struct xntimer *timer)
+{
+	xnstat_counter_set(&timer->scheduled, 0);
+	xnstat_counter_set(&timer->fired, 0);
+}
+
+static inline void xntimer_account_scheduled(struct xntimer *timer)
+{
+	xnstat_counter_inc(&timer->scheduled);
+}
+
+static inline void xntimer_account_fired(struct xntimer *timer)
+{
+	xnstat_counter_inc(&timer->fired);
+}
+
+static inline void xntimer_set_name(struct xntimer *timer, const char *name)
+{
+	knamecpy(timer->name, name);
+}
+
+#else /* !CONFIG_XENO_OPT_STATS */
+
+#define xntimer_init	__xntimer_init
+
+static inline void xntimer_reset_stats(struct xntimer *timer) { }
+
+static inline void xntimer_account_scheduled(struct xntimer *timer) { }
+
+static inline void xntimer_account_fired(struct xntimer *timer) { }
+
+static inline void xntimer_set_name(struct xntimer *timer, const char *name) { }
+
+#endif /* !CONFIG_XENO_OPT_STATS */
+
+#if defined(CONFIG_XENO_OPT_EXTCLOCK) && defined(CONFIG_XENO_OPT_STATS)
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock);
+#else
+static inline
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock) { }
+#endif
+
+void xntimer_destroy(struct xntimer *timer);
+
+/**
+ * @fn xnticks_t xntimer_interval(struct xntimer *timer)
+ *
+ * @brief Return the timer interval value.
+ *
+ * Return the timer interval value in nanoseconds.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The duration of a period in nanoseconds. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled or
+ * one shot.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+static inline xnticks_t xntimer_interval(struct xntimer *timer)
+{
+	return timer->interval_ns;
+}
+
+static inline xnticks_t xntimer_expiry(struct xntimer *timer)
+{
+	/* Real expiry date in ticks without anticipation (no gravity) */
+	return xntimerh_date(&timer->aplink) + xntimer_gravity(timer);
+}
+
+int xntimer_start(struct xntimer *timer,
+		xnticks_t value,
+		xnticks_t interval,
+		xntmode_t mode);
+
+void __xntimer_stop(struct xntimer *timer);
+
+xnticks_t xntimer_get_date(struct xntimer *timer);
+
+xnticks_t __xntimer_get_timeout(struct xntimer *timer);
+
+xnticks_t xntimer_get_interval(struct xntimer *timer);
+
+int xntimer_heading_p(struct xntimer *timer);
+
+static inline void xntimer_stop(struct xntimer *timer)
+{
+	if (timer->status & XNTIMER_RUNNING)
+		__xntimer_stop(timer);
+}
+
+static inline xnticks_t xntimer_get_timeout(struct xntimer *timer)
+{
+	if (!xntimer_running_p(timer))
+		return XN_INFINITE;
+
+	return __xntimer_get_timeout(timer);
+}
+
+static inline xnticks_t xntimer_get_timeout_stopped(struct xntimer *timer)
+{
+	return __xntimer_get_timeout(timer);
+}
+
+static inline void xntimer_enqueue(struct xntimer *timer,
+				   xntimerq_t *q)
+{
+	xntimerq_insert(q, &timer->aplink);
+	timer->status &= ~XNTIMER_DEQUEUED;
+	xntimer_account_scheduled(timer);
+}
+
+static inline void xntimer_dequeue(struct xntimer *timer,
+				   xntimerq_t *q)
+{
+	xntimerq_remove(q, &timer->aplink);
+	timer->status |= XNTIMER_DEQUEUED;
+}
+
+unsigned long long xntimer_get_overruns(struct xntimer *timer,
+					struct xnthread *waiter,
+					xnticks_t now);
+
+#ifdef CONFIG_SMP
+
+void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched);
+
+static inline
+void xntimer_migrate(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	if (timer->sched != sched)
+		__xntimer_migrate(timer, sched);
+}
+
+void __xntimer_set_affinity(struct xntimer *timer,
+			    struct xnsched *sched);
+
+static inline void xntimer_set_affinity(struct xntimer *timer,
+					struct xnsched *sched)
+{
+	if (sched != xntimer_sched(timer))
+		__xntimer_set_affinity(timer, sched);
+}
+
+#else /* ! CONFIG_SMP */
+
+static inline void xntimer_migrate(struct xntimer *timer,
+				   struct xnsched *sched)
+{
+	timer->sched = sched;
+}
+
+static inline void xntimer_set_affinity(struct xntimer *timer,
+					struct xnsched *sched)
+{
+	xntimer_migrate(timer, sched);
+}
+
+#endif /* CONFIG_SMP */
+
+char *xntimer_format_time(xnticks_t ns,
+			  char *buf, size_t bufsz);
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_TIMER_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h
new file mode 100644
index 0000000..e46dd4e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/trace.h
@@ -0,0 +1,10 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#ifndef _COBALT_KERNEL_TRACE_H
+#define _COBALT_KERNEL_TRACE_H
+
+#include <pipeline/trace.h>
+
+#endif /* !_COBALT_KERNEL_TRACE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h
new file mode 100644
index 0000000..c52ee32
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/tree.h
@@ -0,0 +1,94 @@
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_TREE_H
+#define _COBALT_KERNEL_TREE_H
+
+#include <linux/errno.h>
+#include <linux/rbtree.h>
+#include <cobalt/kernel/assert.h>
+
+typedef unsigned long long xnkey_t;
+
+static inline xnkey_t PTR_KEY(void *p)
+{
+	return (xnkey_t)(long)p;
+}
+
+struct xnid {
+	xnkey_t key;
+	struct rb_node link;
+};
+
+#define xnid_entry(ptr, type, member)					\
+	({								\
+		typeof(ptr) _ptr = (ptr);				\
+		(_ptr ? container_of(_ptr, type, member.link) : NULL);	\
+	})
+
+#define xnid_next_entry(ptr, member)				\
+	xnid_entry(rb_next(&ptr->member.link), typeof(*ptr), member)
+
+static inline void xntree_init(struct rb_root *t)
+{
+	*t = RB_ROOT;
+}
+
+#define xntree_for_each_entry(pos, root, member)			\
+	for (pos = xnid_entry(rb_first(root), typeof(*pos), member);	\
+	     pos; pos = xnid_next_entry(pos, member))
+
+void xntree_cleanup(struct rb_root *t, void *cookie,
+		void (*destroy)(void *cookie, struct xnid *id));
+
+int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key);
+
+static inline xnkey_t xnid_key(struct xnid *i)
+{
+	return i->key;
+}
+
+static inline
+struct xnid *xnid_fetch(struct rb_root *t, xnkey_t key)
+{
+	struct rb_node *node = t->rb_node;
+
+	while (node) {
+		struct xnid *i = container_of(node, struct xnid, link);
+
+		if (key < i->key)
+			node = node->rb_left;
+		else if (key > i->key)
+			node = node->rb_right;
+		else
+			return i;
+	}
+
+	return NULL;
+}
+
+static inline int xnid_remove(struct rb_root *t, struct xnid *xnid)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	if (xnid_fetch(t, xnid->key) != xnid)
+		return -ENOENT;
+#endif
+	rb_erase(&xnid->link, t);
+	return 0;
+}
+
+#endif /* _COBALT_KERNEL_TREE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h
new file mode 100644
index 0000000..7da88a7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/vdso.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_KERNEL_VDSO_H
+#define _COBALT_KERNEL_VDSO_H
+
+#include <linux/time.h>
+#include <asm/barrier.h>
+#include <asm/atomic.h>
+#include <asm/processor.h>
+#include <cobalt/uapi/kernel/vdso.h>
+
+extern struct xnvdso *nkvdso;
+
+/*
+ * Define the available feature set here. We have a single feature
+ * defined for now, only in the I-pipe case.
+ */
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT
+
+#define XNVDSO_FEATURES XNVDSO_FEAT_HOST_REALTIME
+
+static inline struct xnvdso_hostrt_data *get_hostrt_data(void)
+{
+	return &nkvdso->hostrt_data;
+}
+
+#else
+
+#define XNVDSO_FEATURES 0
+
+#endif
+
+#endif /* _COBALT_KERNEL_VDSO_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h b/kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h
new file mode 100644
index 0000000..a53c237
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/kernel/vfile.h
@@ -0,0 +1,667 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_KERNEL_VFILE_H
+#define _COBALT_KERNEL_VFILE_H
+
+#if defined(CONFIG_XENO_OPT_VFILE) || defined(DOXYGEN_CPP)
+
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <cobalt/kernel/lock.h>
+
+/**
+ * @addtogroup cobalt_core_vfile
+ * @{
+ */
+
+struct xnvfile_directory;
+struct xnvfile_regular_iterator;
+struct xnvfile_snapshot_iterator;
+struct xnvfile_lock_ops;
+
+struct xnvfile {
+	struct proc_dir_entry *pde;
+	struct file *file;
+	struct xnvfile_lock_ops *lockops;
+	int refcnt;
+	void *private;
+};
+
+/**
+ * @brief Vfile locking operations
+ * @anchor vfile_lockops
+ *
+ * This structure describes the operations to be provided for
+ * implementing locking support on vfiles. They apply to both
+ * snapshot-driven and regular vfiles.
+ */
+struct xnvfile_lock_ops {
+	/**
+	 * @anchor lockops_get
+	 * This handler should grab the desired lock.
+	 *
+	 * @param vfile A pointer to the virtual file which needs
+	 * locking.
+	 *
+	 * @return zero should be returned if the call
+	 * succeeds. Otherwise, a negative error code can be returned;
+	 * upon error, the current vfile operation is aborted, and the
+	 * user-space caller is passed back the error value.
+	 */
+	int (*get)(struct xnvfile *vfile);
+	/**
+	 * @anchor lockops_put This handler should release the lock
+	 * previously grabbed by the @ref lockops_get "get() handler".
+	 *
+	 * @param vfile A pointer to the virtual file which currently
+	 * holds the lock to release.
+	 */
+	void (*put)(struct xnvfile *vfile);
+};
+
+struct xnvfile_hostlock_class {
+	struct xnvfile_lock_ops ops;
+	struct mutex mutex;
+};
+
+struct xnvfile_nklock_class {
+	struct xnvfile_lock_ops ops;
+	spl_t s;
+};
+
+struct xnvfile_input {
+	const char __user *u_buf;
+	size_t size;
+	struct xnvfile *vfile;
+};
+
+/**
+ * @brief Regular vfile operation descriptor
+ * @anchor regular_ops
+ *
+ * This structure describes the operations available with a regular
+ * vfile. It defines handlers for sending back formatted kernel data
+ * upon a user-space read request, and for obtaining user data upon a
+ * user-space write request.
+ */
+struct xnvfile_regular_ops {
+	/**
+	 * @anchor regular_rewind This handler is called only once,
+	 * when the virtual file is opened, before the @ref
+	 * regular_begin "begin() handler" is invoked.
+	 *
+	 * @param it A pointer to the vfile iterator which will be
+	 * used to read the file contents.
+	 *
+	 * @return Zero should be returned upon success. Otherwise, a
+	 * negative error code aborts the operation, and is passed
+	 * back to the reader.
+	 *
+	 * @note This handler is optional. It should not be used to
+	 * allocate resources but rather to perform consistency
+	 * checks, since no closure call is issued in case the open
+	 * sequence eventually fails.
+	 */
+	int (*rewind)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_begin
+	 * This handler should prepare for iterating over the records
+	 * upon a read request, starting from the specified position.
+	 *
+	 * @param it A pointer to the current vfile iterator. On
+	 * entry, it->pos is set to the (0-based) position of the
+	 * first record to output. This handler may be called multiple
+	 * times with different position requests.
+	 *
+	 * @return A pointer to the first record to format and output,
+	 * to be passed to the @ref regular_show "show() handler" as
+	 * its @a data parameter, if the call succeeds. Otherwise:
+	 *
+	 * - NULL in case no record is available, in which case the
+	 * read operation will terminate immediately with no output.
+	 *
+	 * - VFILE_SEQ_START, a special value indicating that @ref
+	 * regular_show "the show() handler" should receive a NULL
+	 * data pointer first, in order to output a header.
+	 *
+	 * - ERR_PTR(errno), where errno is a negative error code;
+	 * upon error, the current operation will be aborted
+	 * immediately.
+	 *
+	 * @note This handler is optional; if none is given in the
+	 * operation descriptor (i.e. NULL value), the @ref
+	 * regular_show "show() handler()" will be called only once
+	 * for a read operation, with a NULL @a data parameter. This
+	 * particular setting is convenient for simple regular vfiles
+	 * having a single, fixed record to output.
+	 */
+	void *(*begin)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_next
+	 * This handler should return the address of the next record
+	 * to format and output by the @ref regular_show "show()
+	 * handler".
+	 *
+	 * @param it A pointer to the current vfile iterator. On
+	 * entry, it->pos is set to the (0-based) position of the
+	 * next record to output.
+	 *
+	 * @return A pointer to the next record to format and output,
+	 * to be passed to the @ref regular_show "show() handler" as
+	 * its @a data parameter, if the call succeeds. Otherwise:
+	 *
+	 * - NULL in case no record is available, in which case the
+	 * read operation will terminate immediately with no output.
+	 *
+	 * - ERR_PTR(errno), where errno is a negative error code;
+	 * upon error, the current operation will be aborted
+	 * immediately.
+	 *
+	 * @note This handler is optional; if none is given in the
+	 * operation descriptor (i.e. NULL value), the read operation
+	 * will stop after the first invocation of the @ref regular_show
+	 * "show() handler".
+	 */
+	void *(*next)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_end
+	 * This handler is called after all records have been output.
+	 *
+	 * @param it A pointer to the current vfile iterator.
+	 *
+	 * @note This handler is optional and the pointer may be NULL.
+	 */
+	void (*end)(struct xnvfile_regular_iterator *it);
+	/**
+	 * @anchor regular_show
+	 * This handler should format and output a record.
+	 *
+	 * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
+	 * xnvfile_putc() are available to format and/or emit the
+	 * output. All routines take the iterator argument @a it as
+	 * their first parameter.
+	 *
+	 * @param it A pointer to the current vfile iterator.
+	 *
+	 * @param data A pointer to the record to format then
+	 * output. The first call to the handler may receive a NULL @a
+	 * data pointer, depending on the presence and/or return of a
+	 * @ref regular_begin "hander"; the show handler should test
+	 * this special value to output any header that fits, prior to
+	 * receiving more calls with actual records.
+	 *
+	 * @return zero if the call succeeds, also indicating that the
+	 * handler should be called for the next record if
+	 * any. Otherwise:
+	 *
+	 * - A negative error code. This will abort the output phase,
+	 * and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped and will not be output.
+	 */
+	int (*show)(struct xnvfile_regular_iterator *it, void *data);
+	/**
+	 * @anchor regular_store
+	 * This handler receives data written to the vfile, likely for
+	 * updating some kernel setting, or triggering any other
+	 * action which fits. This is the only handler which deals
+	 * with the write-side of a vfile.  It is called when writing
+	 * to the /proc entry of the vfile from a user-space process.
+	 *
+	 * The input data is described by a descriptor passed to the
+	 * handler, which may be subsequently passed to parsing helper
+	 * routines.  For instance, xnvfile_get_string() will accept
+	 * the input descriptor for returning the written data as a
+	 * null-terminated character string. On the other hand,
+	 * xnvfile_get_integer() will attempt to return a long integer
+	 * from the input data.
+	 *
+	 * @param input A pointer to an input descriptor. It refers to
+	 * an opaque data from the handler's standpoint.
+	 *
+	 * @return the number of bytes read from the input descriptor
+	 * if the call succeeds. Otherwise, a negative error code.
+	 * Return values from parsing helper routines are commonly
+	 * passed back to the caller by the @ref regular_store
+	 * "store() handler".
+	 *
+	 * @note This handler is optional, and may be omitted for
+	 * read-only vfiles.
+	 */
+	ssize_t (*store)(struct xnvfile_input *input);
+};
+
+struct xnvfile_regular {
+	struct xnvfile entry;
+	size_t privsz;
+	struct xnvfile_regular_ops *ops;
+};
+
+struct xnvfile_regular_template {
+	size_t privsz;
+	struct xnvfile_regular_ops *ops;
+	struct xnvfile_lock_ops *lockops;
+};
+
+/**
+ * @brief Regular vfile iterator
+ * @anchor regular_iterator
+ *
+ * This structure defines an iterator over a regular vfile.
+ */
+struct xnvfile_regular_iterator {
+	/** Current record position while iterating. */
+	loff_t pos;
+	/** Backlink to the host sequential file supporting the vfile. */
+	struct seq_file *seq;
+	/** Backlink to the vfile being read. */
+	struct xnvfile_regular *vfile;
+	/**
+	 * Start of private area. Use xnvfile_iterator_priv() to
+	 * address it.
+	 */
+	char private[0];
+};
+
+/**
+ * @brief Snapshot vfile operation descriptor
+ * @anchor snapshot_ops
+ *
+ * This structure describes the operations available with a
+ * snapshot-driven vfile. It defines handlers for returning a
+ * printable snapshot of some Xenomai object contents upon a
+ * user-space read request, and for updating this object upon a
+ * user-space write request.
+ */
+struct xnvfile_snapshot_ops {
+	/**
+	 * @anchor snapshot_rewind
+	 * This handler (re-)initializes the data collection, moving
+	 * the seek pointer at the first record. When the file
+	 * revision tag is touched while collecting data, the current
+	 * reading is aborted, all collected data dropped, and the
+	 * vfile is eventually rewound.
+	 *
+	 * @param it A pointer to the current snapshot iterator. Two
+	 * useful information can be retrieved from this iterator in
+	 * this context:
+	 *
+	 * - it->vfile is a pointer to the descriptor of the virtual
+	 * file being rewound.
+	 *
+	 * - xnvfile_iterator_priv(it) returns a pointer to the
+	 * private data area, available from the descriptor, which
+	 * size is vfile->privsz. If the latter size is zero, the
+	 * returned pointer is meaningless and should not be used.
+	 *
+	 * @return A negative error code aborts the data collection,
+	 * and is passed back to the reader. Otherwise:
+	 *
+	 * - a strictly positive value is interpreted as the total
+	 * number of records which will be returned by the @ref
+	 * snapshot_next "next() handler" during the data collection
+	 * phase. If no @ref snapshot_begin "begin() handler" is
+	 * provided in the @ref snapshot_ops "operation descriptor",
+	 * this value is used to allocate the snapshot buffer
+	 * internally. The size of this buffer would then be
+	 * vfile->datasz * value.
+	 *
+	 * - zero leaves the allocation to the @ref snapshot_begin
+	 * "begin() handler" if present, or indicates that no record
+	 * is to be output in case such handler is not given.
+	 *
+	 * @note This handler is optional; a NULL value indicates that
+	 * nothing needs to be done for rewinding the vfile.  It is
+	 * called with the vfile lock held.
+	 */
+	int (*rewind)(struct xnvfile_snapshot_iterator *it);
+	/**
+	 * @anchor snapshot_begin
+	 * This handler should allocate the snapshot buffer to hold
+	 * records during the data collection phase.  When specified,
+	 * all records collected via the @ref snapshot_next "next()
+	 * handler" will be written to a cell from the memory area
+	 * returned by begin().
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @return A pointer to the record buffer, if the call
+	 * succeeds. Otherwise:
+	 *
+	 * - NULL in case of allocation error. This will abort the data
+	 * collection, and return -ENOMEM to the reader.
+	 *
+	 * - VFILE_SEQ_EMPTY, a special value indicating that no
+	 * record will be output. In such a case, the @ref
+	 * snapshot_next "next() handler" will not be called, and the
+	 * data collection will stop immediately. However, the @ref
+	 * snapshot_show "show() handler" will still be called once,
+	 * with a NULL data pointer (i.e. header display request).
+	 *
+	 * @note This handler is optional; if none is given, an
+	 * internal allocation depending on the value returned by the
+	 * @ref snapshot_rewind "rewind() handler" can be obtained.
+	 */
+	void *(*begin)(struct xnvfile_snapshot_iterator *it);
+	/**
+	 * @anchor snapshot_end
+	 * This handler releases the memory buffer previously obtained
+	 * from begin(). It is usually called after the snapshot data
+	 * has been output by show(), but it may also be called before
+	 * rewinding the vfile after a revision change, to release the
+	 * dropped buffer.
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param buf A pointer to the buffer to release.
+	 *
+	 * @note This routine is optional and the pointer may be
+	 * NULL. It is not needed upon internal buffer allocation;
+	 * see the description of the @ref snapshot_rewind "rewind()
+	 * handler".
+	 */
+	void (*end)(struct xnvfile_snapshot_iterator *it, void *buf);
+	/**
+	 * @anchor snapshot_next
+	 * This handler fetches the next record, as part of the
+	 * snapshot data to be sent back to the reader via the
+	 * show().
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param data A pointer to the record to fill in.
+	 *
+	 * @return a strictly positive value, if the call succeeds and
+	 * leaves a valid record into @a data, which should be passed
+	 * to the @ref snapshot_show "show() handler()" during the
+	 * formatting and output phase. Otherwise:
+	 *
+	 * - A negative error code. This will abort the data
+	 * collection, and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped. In such a case, the @a
+	 * data pointer is not advanced to the next position before
+	 * the @ref snapshot_next "next() handler" is called anew.
+	 *
+	 * @note This handler is called with the vfile lock
+	 * held. Before each invocation of this handler, the vfile
+	 * core checks whether the revision tag has been touched, in
+	 * which case the data collection is restarted from scratch. A
+	 * data collection phase succeeds whenever all records can be
+	 * fetched via the @ref snapshot_next "next() handler", while
+	 * the revision tag remains unchanged, which indicates that a
+	 * consistent snapshot of the object state was taken.
+	 */
+	int (*next)(struct xnvfile_snapshot_iterator *it, void *data);
+	/**
+	 * @anchor snapshot_show
+	 * This handler should format and output a record from the
+	 * collected data.
+	 *
+	 * xnvfile_printf(), xnvfile_write(), xnvfile_puts() and
+	 * xnvfile_putc() are available to format and/or emit the
+	 * output. All routines take the iterator argument @a it as
+	 * their first parameter.
+	 *
+	 * @param it A pointer to the current snapshot iterator.
+	 *
+	 * @param data A pointer to the record to format then
+	 * output. The first call to the handler is always passed a
+	 * NULL @a data pointer; the show handler should test this
+	 * special value to output any header that fits, prior to
+	 * receiving more calls with actual records.
+	 *
+	 * @return zero if the call succeeds, also indicating that the
+	 * handler should be called for the next record if
+	 * any. Otherwise:
+	 *
+	 * - A negative error code. This will abort the output phase,
+	 * and return this status to the reader.
+	 *
+	 * - VFILE_SEQ_SKIP, a special value indicating that the
+	 * current record should be skipped and will not be output.
+	 */
+	int (*show)(struct xnvfile_snapshot_iterator *it, void *data);
+	/**
+	 * @anchor snapshot_store
+	 * This handler receives data written to the vfile, likely for
+	 * updating the associated Xenomai object's state, or
+	 * triggering any other action which fits. This is the only
+	 * handler which deals with the write-side of a vfile.  It is
+	 * called when writing to the /proc entry of the vfile
+	 * from a user-space process.
+	 *
+	 * The input data is described by a descriptor passed to the
+	 * handler, which may be subsequently passed to parsing helper
+	 * routines.  For instance, xnvfile_get_string() will accept
+	 * the input descriptor for returning the written data as a
+	 * null-terminated character string. On the other hand,
+	 * xnvfile_get_integer() will attempt to return a long integer
+	 * from the input data.
+	 *
+	 * @param input A pointer to an input descriptor. It refers to
+	 * an opaque data from the handler's standpoint.
+	 *
+	 * @return the number of bytes read from the input descriptor
+	 * if the call succeeds. Otherwise, a negative error code.
+	 * Return values from parsing helper routines are commonly
+	 * passed back to the caller by the @ref snapshot_store
+	 * "store() handler".
+	 *
+	 * @note This handler is optional, and may be omitted for
+	 * read-only vfiles.
+	 */
+	ssize_t (*store)(struct xnvfile_input *input);
+};
+
+/**
+ * @brief Snapshot revision tag
+ * @anchor revision_tag
+ *
+ * This structure defines a revision tag to be used with @ref
+ * snapshot_vfile "snapshot-driven vfiles".
+ */
+struct xnvfile_rev_tag {
+	/** Current revision number. */
+	int rev;
+};
+
+struct xnvfile_snapshot_template {
+	size_t privsz;
+	size_t datasz;
+	struct xnvfile_rev_tag *tag;
+	struct xnvfile_snapshot_ops *ops;
+	struct xnvfile_lock_ops *lockops;
+};
+
+/**
+ * @brief Snapshot vfile descriptor
+ * @anchor snapshot_vfile
+ *
+ * This structure describes a snapshot-driven vfile.  Reading from
+ * such a vfile involves a preliminary data collection phase under
+ * lock protection, and a subsequent formatting and output phase of
+ * the collected data records. Locking is done in a way that does not
+ * increase worst-case latency, regardless of the number of records to
+ * be collected for output.
+ */
+struct xnvfile_snapshot {
+	struct xnvfile entry;
+	size_t privsz;
+	size_t datasz;
+	struct xnvfile_rev_tag *tag;
+	struct xnvfile_snapshot_ops *ops;
+};
+
+/**
+ * @brief Snapshot-driven vfile iterator
+ * @anchor snapshot_iterator
+ *
+ * This structure defines an iterator over a snapshot-driven vfile.
+ */
+struct xnvfile_snapshot_iterator {
+	/** Number of collected records. */
+	int nrdata;
+	/** Address of record buffer. */
+	caddr_t databuf;
+	/** Backlink to the host sequential file supporting the vfile. */
+	struct seq_file *seq;
+	/** Backlink to the vfile being read. */
+	struct xnvfile_snapshot *vfile;
+	/** Buffer release handler. */
+	void (*endfn)(struct xnvfile_snapshot_iterator *it, void *buf);
+	/**
+	 * Start of private area. Use xnvfile_iterator_priv() to
+	 * address it.
+	 */
+	char private[0];
+};
+
+struct xnvfile_directory {
+	struct xnvfile entry;
+};
+
+struct xnvfile_link {
+	struct xnvfile entry;
+};
+
+/* vfile.begin()=> */
+#define VFILE_SEQ_EMPTY			((void *)-1)
+/* =>vfile.show() */
+#define VFILE_SEQ_START			SEQ_START_TOKEN
+/* vfile.next/show()=> */
+#define VFILE_SEQ_SKIP			2
+
+#define xnvfile_printf(it, args...)	seq_printf((it)->seq, ##args)
+#define xnvfile_write(it, data, len)	seq_write((it)->seq, (data),(len))
+#define xnvfile_puts(it, s)		seq_puts((it)->seq, (s))
+#define xnvfile_putc(it, c)		seq_putc((it)->seq, (c))
+
+static inline void xnvfile_touch_tag(struct xnvfile_rev_tag *tag)
+{
+	tag->rev++;
+}
+
+static inline void xnvfile_touch(struct xnvfile_snapshot *vfile)
+{
+	xnvfile_touch_tag(vfile->tag);
+}
+
+#define xnvfile_noentry			\
+	{				\
+		.pde = NULL,		\
+		.private = NULL,	\
+		.file = NULL,		\
+		.refcnt = 0,		\
+	}
+
+#define xnvfile_nodir	{ .entry = xnvfile_noentry }
+#define xnvfile_nolink	{ .entry = xnvfile_noentry }
+#define xnvfile_nofile	{ .entry = xnvfile_noentry }
+
+#define xnvfile_priv(e)			((e)->entry.private)
+#define xnvfile_nref(e)			((e)->entry.refcnt)
+#define xnvfile_file(e)			((e)->entry.file)
+#define xnvfile_iterator_priv(it)	((void *)(&(it)->private))
+
+extern struct xnvfile_nklock_class xnvfile_nucleus_lock;
+
+extern struct xnvfile_directory cobalt_vfroot;
+
+int xnvfile_init_root(void);
+
+void xnvfile_destroy_root(void);
+
+int xnvfile_init_snapshot(const char *name,
+			  struct xnvfile_snapshot *vfile,
+			  struct xnvfile_directory *parent);
+
+int xnvfile_init_regular(const char *name,
+			 struct xnvfile_regular *vfile,
+			 struct xnvfile_directory *parent);
+
+int xnvfile_init_dir(const char *name,
+		     struct xnvfile_directory *vdir,
+		     struct xnvfile_directory *parent);
+
+int xnvfile_init_link(const char *from,
+		      const char *to,
+		      struct xnvfile_link *vlink,
+		      struct xnvfile_directory *parent);
+
+void xnvfile_destroy(struct xnvfile *vfile);
+
+ssize_t xnvfile_get_blob(struct xnvfile_input *input,
+			 void *data, size_t size);
+
+ssize_t xnvfile_get_string(struct xnvfile_input *input,
+			   char *s, size_t maxlen);
+
+ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp);
+
+int __vfile_hostlock_get(struct xnvfile *vfile);
+
+void __vfile_hostlock_put(struct xnvfile *vfile);
+
+static inline
+void xnvfile_destroy_snapshot(struct xnvfile_snapshot *vfile)
+{
+	xnvfile_destroy(&vfile->entry);
+}
+
+static inline
+void xnvfile_destroy_regular(struct xnvfile_regular *vfile)
+{
+	xnvfile_destroy(&vfile->entry);
+}
+
+static inline
+void xnvfile_destroy_dir(struct xnvfile_directory *vdir)
+{
+	xnvfile_destroy(&vdir->entry);
+}
+
+static inline
+void xnvfile_destroy_link(struct xnvfile_link *vlink)
+{
+	xnvfile_destroy(&vlink->entry);
+}
+
+#define DEFINE_VFILE_HOSTLOCK(name)					\
+	struct xnvfile_hostlock_class name = {				\
+		.ops = {						\
+			.get = __vfile_hostlock_get,			\
+			.put = __vfile_hostlock_put,			\
+		},							\
+		.mutex = __MUTEX_INITIALIZER(name.mutex),		\
+	}
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+#define xnvfile_touch_tag(tag)	do { } while (0)
+
+#define xnvfile_touch(vfile)	do { } while (0)
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+/** @} */
+
+#endif /* !_COBALT_KERNEL_VFILE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/mqueue.h b/kernel/xenomai-v3.2.4/include/cobalt/mqueue.h
new file mode 100644
index 0000000..496632d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/mqueue.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <mqueue.h>
+
+#ifndef _COBALT_MQUEUE_H
+#define _COBALT_MQUEUE_H
+
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(mqd_t, mq_open(const char *name,
+			   int oflags,
+			   ...));
+
+COBALT_DECL(int, mq_close(mqd_t qd));
+
+COBALT_DECL(int, mq_unlink(const char *name));
+
+COBALT_DECL(int, mq_getattr(mqd_t qd,
+			    struct mq_attr *attr));
+
+COBALT_DECL(int, mq_setattr(mqd_t qd,
+			    const struct mq_attr *__restrict__ attr,
+			    struct mq_attr *__restrict__ oattr));
+
+COBALT_DECL(int, mq_send(mqd_t qd,
+			 const char *buffer,
+			 size_t len,
+			 unsigned prio));
+
+COBALT_DECL(int, mq_timedsend(mqd_t q,
+			      const char * buffer,
+			      size_t len,
+			      unsigned prio,
+			      const struct timespec *timeout));
+
+COBALT_DECL(ssize_t, mq_receive(mqd_t q,
+				char *buffer,
+				size_t len,
+				unsigned *prio));
+
+COBALT_DECL(ssize_t, mq_timedreceive(mqd_t q,
+				     char *__restrict__ buffer,
+				     size_t len,
+				     unsigned *__restrict__ prio,
+				     const struct timespec *__restrict__ timeout));
+
+COBALT_DECL(int, mq_notify(mqd_t q,
+			   const struct sigevent *evp));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_MQUEUE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/pthread.h b/kernel/xenomai-v3.2.4/include/cobalt/pthread.h
new file mode 100644
index 0000000..3e9bd47
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/pthread.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <pthread.h>
+
+#ifndef _COBALT_PTHREAD_H
+#define _COBALT_PTHREAD_H
+
+#include <boilerplate/libc.h>
+#include <cobalt/wrappers.h>
+#include <cobalt/uapi/thread.h>
+
+typedef struct pthread_attr_ex {
+	pthread_attr_t std;
+	struct {
+		int personality;
+		int sched_policy;
+		struct sched_param_ex sched_param;
+	} nonstd;
+} pthread_attr_ex_t;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(int, pthread_attr_init(pthread_attr_t *attr));
+
+COBALT_DECL(int, pthread_create(pthread_t *ptid_r,
+				const pthread_attr_t *attr,
+				void *(*start) (void *),
+				void *arg));
+
+COBALT_DECL(int, pthread_getschedparam(pthread_t thread,
+				       int *policy,
+				       struct sched_param *param));
+
+COBALT_DECL(int, pthread_setschedparam(pthread_t thread,
+				       int policy,
+				       const struct sched_param *param));
+
+COBALT_DECL(int, pthread_setschedprio(pthread_t thread, int prio));
+
+COBALT_DECL(int, pthread_mutex_init(pthread_mutex_t *mutex,
+				    const pthread_mutexattr_t *attr));
+
+COBALT_DECL(int, pthread_mutex_destroy(pthread_mutex_t *mutex));
+
+COBALT_DECL(int, pthread_mutex_lock(pthread_mutex_t *mutex));
+
+COBALT_DECL(int, pthread_mutex_timedlock(pthread_mutex_t *mutex,
+					 const struct timespec *to));
+
+COBALT_DECL(int, pthread_mutex_trylock(pthread_mutex_t *mutex));
+
+COBALT_DECL(int, pthread_mutex_unlock(pthread_mutex_t *mutex));
+
+COBALT_DECL(int, pthread_mutex_setprioceiling(pthread_mutex_t *__restrict mutex,
+					      int prioceiling,
+					      int *__restrict old_ceiling));
+  
+COBALT_DECL(int, pthread_mutex_getprioceiling(pthread_mutex_t *__restrict mutex,
+					      int *__restrict old_ceiling));
+
+COBALT_DECL(int, pthread_cond_init (pthread_cond_t *cond,
+				    const pthread_condattr_t *attr));
+
+COBALT_DECL(int, pthread_cond_destroy(pthread_cond_t *cond));
+
+COBALT_DECL(int, pthread_cond_wait(pthread_cond_t *cond,
+				   pthread_mutex_t *mutex));
+
+COBALT_DECL(int, pthread_cond_timedwait(pthread_cond_t *cond,
+					pthread_mutex_t *mutex,
+					const struct timespec *abstime));
+
+COBALT_DECL(int, pthread_cond_signal(pthread_cond_t *cond));
+
+COBALT_DECL(int, pthread_cond_broadcast(pthread_cond_t *cond));
+
+COBALT_DECL(int, pthread_kill(pthread_t ptid, int sig));
+
+COBALT_DECL(int, pthread_join(pthread_t ptid, void **retval));
+
+#ifndef pthread_yield
+/*
+ * linuxthreads wraps pthread_yield() to sched_yield() via a
+ * preprocessor macro, which confuses the compiler with
+ * COBALT_DECL(). Since Cobalt also routes pthread_yield() to its own
+ * sched_yield() implementation internally, we can live with this
+ * wrapping.
+ */
+COBALT_DECL(int, pthread_yield(void));
+#endif
+
+int pthread_setmode_np(int clrmask, int setmask,
+		       int *mask_r);
+
+COBALT_DECL(int, pthread_setname_np(pthread_t thread, const char *name));
+
+int pthread_create_ex(pthread_t *ptid_r,
+		      const pthread_attr_ex_t *attr_ex,
+		      void *(*start)(void *),
+		      void *arg);
+
+int pthread_getschedparam_ex(pthread_t ptid,
+			     int *pol,
+			     struct sched_param_ex *par);
+
+int pthread_setschedparam_ex(pthread_t ptid,
+			     int pol,
+			     const struct sched_param_ex *par);
+
+int pthread_attr_init_ex(pthread_attr_ex_t *attr_ex);
+
+int pthread_attr_destroy_ex(pthread_attr_ex_t *attr_ex);
+
+int pthread_attr_setschedpolicy_ex(pthread_attr_ex_t *attr_ex,
+				   int policy);
+
+int pthread_attr_getschedpolicy_ex(const pthread_attr_ex_t *attr_ex,
+				   int *policy);
+
+int pthread_attr_setschedparam_ex(pthread_attr_ex_t *attr_ex,
+				  const struct sched_param_ex *param_ex);
+
+int pthread_attr_getschedparam_ex(const pthread_attr_ex_t *attr_ex,
+				  struct sched_param_ex *param_ex);
+
+int pthread_attr_getinheritsched_ex(const pthread_attr_ex_t *attr_ex,
+				    int *inheritsched);
+
+int pthread_attr_setinheritsched_ex(pthread_attr_ex_t *attr_ex,
+				    int inheritsched);
+
+int pthread_attr_getdetachstate_ex(const pthread_attr_ex_t *attr_ex,
+				   int *detachstate);
+
+int pthread_attr_setdetachstate_ex(pthread_attr_ex_t *attr_ex,
+				   int detachstate);
+
+int pthread_attr_setdetachstate_ex(pthread_attr_ex_t *attr_ex,
+				   int detachstate);
+
+int pthread_attr_getstacksize_ex(const pthread_attr_ex_t *attr_ex,
+				 size_t *stacksize);
+
+int pthread_attr_setstacksize_ex(pthread_attr_ex_t *attr_ex,
+				 size_t stacksize);
+
+int pthread_attr_getscope_ex(const pthread_attr_ex_t *attr_ex,
+			     int *scope);
+
+int pthread_attr_setscope_ex(pthread_attr_ex_t *attr_ex,
+			     int scope);
+
+int pthread_attr_getpersonality_ex(const pthread_attr_ex_t *attr_ex,
+				   int *personality);
+
+int pthread_attr_setpersonality_ex(pthread_attr_ex_t *attr_ex,
+				   int personality);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_PTHREAD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/sched.h
new file mode 100644
index 0000000..7c5b26e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/sched.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <sched.h>
+
+#ifndef _COBALT_SCHED_H
+#define _COBALT_SCHED_H
+
+#include <sys/types.h>
+#include <cobalt/wrappers.h>
+#include <cobalt/uapi/sched.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(int, sched_yield(void));
+
+COBALT_DECL(int, sched_get_priority_min(int policy));
+
+COBALT_DECL(int, sched_get_priority_max(int policy));
+
+COBALT_DECL(int, sched_setscheduler(pid_t pid, int policy,
+				    const struct sched_param *param));
+
+COBALT_DECL(int, sched_getscheduler(pid_t pid));
+
+int sched_get_priority_min_ex(int policy);
+
+int sched_get_priority_max_ex(int policy);
+
+int sched_setscheduler_ex(pid_t pid, int policy,
+			  const struct sched_param_ex *param_ex);
+
+int sched_getscheduler_ex(pid_t pid, int *policy_r,
+			  struct sched_param_ex *param_ex);
+ 
+int sched_setconfig_np(int cpu, int policy,
+		       const union sched_config *config, size_t len);
+
+ssize_t sched_getconfig_np(int cpu, int policy,
+			   union sched_config *config, size_t *len_r);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_SCHED_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/semaphore.h b/kernel/xenomai-v3.2.4/include/cobalt/semaphore.h
new file mode 100644
index 0000000..a7714fd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/semaphore.h
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <semaphore.h>
+
+#ifndef _COBALT_SEMAPHORE_H
+#define _COBALT_SEMAPHORE_H
+
+#include <boilerplate/atomic.h>
+#include <cobalt/uapi/sem.h>
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(int, sem_init(sem_t *sem,
+			  int pshared,
+			  unsigned int value));
+
+COBALT_DECL(int, sem_destroy(sem_t *sem));
+
+COBALT_DECL(int, sem_post(sem_t *sem));
+
+COBALT_DECL(int, sem_wait(sem_t *sem));
+
+COBALT_DECL(int, sem_timedwait(sem_t *sem,
+			       const struct timespec *abs_timeout));
+
+COBALT_DECL(int, sem_trywait(sem_t *sem));
+
+COBALT_DECL(int, sem_getvalue(sem_t *sem, int *value));
+
+COBALT_DECL(sem_t *, sem_open(const char *name, int oflags, ...));
+
+COBALT_DECL(int, sem_close(sem_t *sem));
+
+COBALT_DECL(int, sem_unlink(const char *name));
+
+int sem_init_np(sem_t *sem,
+		int flags,
+		unsigned int value);
+
+int sem_broadcast_np(sem_t *sem);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_SEMAPHORE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/signal.h b/kernel/xenomai-v3.2.4/include/cobalt/signal.h
new file mode 100644
index 0000000..62694f9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/signal.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <signal.h>
+
+#ifndef _COBALT_SIGNAL_H
+#define _COBALT_SIGNAL_H
+
+/* Re-read in case we came from selective __need* block. */
+#include_next <signal.h>
+#include <cobalt/wrappers.h>
+#include <cobalt/uapi/signal.h>
+
+#ifndef sigev_notify_thread_id
+#define sigev_notify_thread_id	 _sigev_un._tid
+#endif
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int cobalt_sigshadow_handler(int sig, siginfo_t *si,
+			     void *ctxt);
+
+void cobalt_sigdebug_handler(int sig, siginfo_t *si,
+			     void *context);
+
+COBALT_DECL(int, sigpending(sigset_t *set));
+
+COBALT_DECL(int, sigwait(const sigset_t *set, int *sig));
+
+COBALT_DECL(int, sigwaitinfo(const sigset_t *set, siginfo_t *si));
+
+COBALT_DECL(int, sigtimedwait(const sigset_t *set, siginfo_t *si,
+			      const struct timespec *timeout));
+
+COBALT_DECL(int, kill(pid_t pid, int sig));
+
+COBALT_DECL(int, sigqueue(pid_t pid, int sig,
+			  const union sigval value));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_SIGNAL_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/stdio.h b/kernel/xenomai-v3.2.4/include/cobalt/stdio.h
new file mode 100644
index 0000000..5b9df80
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/stdio.h
@@ -0,0 +1,129 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ * Copyright (C) 2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <stdio.h>
+
+#ifndef _COBALT_STDIO_H
+#define _COBALT_STDIO_H
+
+#include <stddef.h>
+#include <stdarg.h>
+#include <xeno_config.h>
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+COBALT_DECL(int, vfprintf(FILE *stream, const char *fmt, va_list args));
+
+#ifdef CONFIG_XENO_FORTIFY
+
+COBALT_DECL(int, __vfprintf_chk(FILE *stream, int level,
+				const char *fmt, va_list ap));
+
+COBALT_DECL(int, __vprintf_chk(int flag,
+			       const char *fmt, va_list ap));
+
+COBALT_DECL(int, __printf_chk(int flag, const char *fmt, ...));
+
+COBALT_DECL(int, __fprintf_chk(FILE *fp, int flag, const char *fmt, ...));
+
+int __rt_vfprintf_chk(FILE *stream, int level,
+		      const char *fmt, va_list args);
+
+void __rt_vsyslog_chk(int priority, int level,
+		      const char *fmt, va_list args);
+
+#endif	/* CONFIG_XENO_FORTIFY */
+
+COBALT_DECL(int, vprintf(const char *fmt, va_list args));
+
+COBALT_DECL(int, fprintf(FILE *stream, const char *fmt, ...));
+
+COBALT_DECL(int, printf(const char *fmt, ...));
+
+COBALT_DECL(int, puts(const char *s));
+
+COBALT_DECL(int, fputs(const char *s, FILE *stream));
+
+#ifndef putchar
+COBALT_DECL(int, putchar(int c));
+#else
+static inline int __real_putchar(int c)
+{
+	return putchar(c);
+}
+int __wrap_putchar(int c);
+int __cobalt_putchar(int c);
+#undef putchar
+#define putchar putchar
+#endif
+
+#ifndef fputc
+COBALT_DECL(int, fputc(int c, FILE *stream));
+#else
+static inline int __real_fputc(int c, FILE *stream)
+{
+	return fputc(c, stream);
+}
+int __wrap_fputc(int c, FILE *stream);
+int __cobalt_fputc(int c, FILE *stream);
+#undef fputc
+#define fputc fputc
+#endif
+
+COBALT_DECL(size_t,
+	    fwrite(const void *ptr, size_t sz, size_t nmemb, FILE *stream));
+
+COBALT_DECL(int, fclose(FILE *stream));
+
+int rt_vfprintf(FILE *stream, const char *format, va_list args);
+
+int rt_vprintf(const char *format, va_list args);
+
+int rt_fprintf(FILE *stream, const char *format, ...);
+
+int rt_printf(const char *format, ...);
+
+int rt_puts(const char *s);
+
+int rt_fputs(const char *s, FILE *stream);
+
+int rt_fputc(int c, FILE *stream);
+
+int rt_putchar(int c);
+
+size_t rt_fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream);
+
+void rt_syslog(int priority, const char *format, ...);
+
+void rt_vsyslog(int priority, const char *format, va_list args);
+
+int rt_print_init(size_t buffer_size, const char *name);
+
+const char *rt_print_buffer_name(void);
+
+void rt_print_flush_buffers(void);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* !_COBALT_STDIO_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/stdlib.h b/kernel/xenomai-v3.2.4/include/cobalt/stdlib.h
new file mode 100644
index 0000000..6b664f5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/stdlib.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <stdlib.h>
+
+#ifndef _COBALT_STDLIB_H
+#define _COBALT_STDLIB_H
+
+/* Re-read in case we came from selective __need* block. */
+#include_next <stdlib.h>
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+COBALT_DECL(void, free(void *ptr));
+
+COBALT_DECL(void *, malloc(size_t size));
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* !_COBALT_STDLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/sys/Makefile.am
new file mode 100644
index 0000000..099cc27
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/Makefile.am
@@ -0,0 +1,10 @@
+includesubdir = $(includedir)/cobalt/sys
+
+includesub_HEADERS =	\
+	cobalt.h	\
+	ioctl.h		\
+	mman.h		\
+	select.h	\
+	socket.h	\
+	time.h		\
+	timerfd.h
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/cobalt.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/cobalt.h
new file mode 100644
index 0000000..46096e8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/cobalt.h
@@ -0,0 +1,145 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_SYS_COBALT_H
+#define _COBALT_SYS_COBALT_H
+
+#include <sys/types.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sched.h>
+#include <semaphore.h>
+#include <errno.h>
+#include <stdio.h>
+#include <time.h>
+#include <boilerplate/atomic.h>
+#include <boilerplate/list.h>
+#include <cobalt/uapi/kernel/synch.h>
+#include <cobalt/uapi/kernel/vdso.h>
+#include <cobalt/uapi/corectl.h>
+#include <cobalt/uapi/mutex.h>
+#include <cobalt/uapi/event.h>
+#include <cobalt/uapi/monitor.h>
+#include <cobalt/uapi/thread.h>
+#include <cobalt/uapi/cond.h>
+#include <cobalt/uapi/sem.h>
+#include <cobalt/ticks.h>
+
+#define cobalt_commit_memory(p) __cobalt_commit_memory(p, sizeof(*p))
+
+struct cobalt_tsd_hook {
+	void (*create_tsd)(void);
+	void (*delete_tsd)(void);
+	struct pvholder next;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int cobalt_extend(unsigned int magic);
+
+int cobalt_corectl(int request, void *buf, size_t bufsz);
+
+int cobalt_thread_stat(pid_t pid,
+		       struct cobalt_threadstat *stat);
+
+int cobalt_serial_debug(const char *fmt, ...);
+
+void __cobalt_commit_memory(void *p, size_t len);
+
+void cobalt_thread_harden(void);
+
+void cobalt_thread_relax(void);
+
+int cobalt_thread_join(pthread_t thread);
+
+pid_t cobalt_thread_pid(pthread_t thread);
+
+int cobalt_thread_mode(void);
+  
+int cobalt_monitor_init(cobalt_monitor_t *mon,
+			clockid_t clk_id, int flags);
+
+int cobalt_monitor_destroy(cobalt_monitor_t *mon);
+
+int cobalt_monitor_enter(cobalt_monitor_t *mon);
+
+int cobalt_monitor_exit(cobalt_monitor_t *mon);
+
+int cobalt_monitor_wait(cobalt_monitor_t *mon, int event,
+			const struct timespec *ts);
+
+void cobalt_monitor_grant(cobalt_monitor_t *mon,
+			  struct xnthread_user_window *u_window);
+
+int cobalt_monitor_grant_sync(cobalt_monitor_t *mon,
+			      struct xnthread_user_window *u_window);
+
+void cobalt_monitor_grant_all(cobalt_monitor_t *mon);
+
+int cobalt_monitor_grant_all_sync(cobalt_monitor_t *mon);
+
+void cobalt_monitor_drain(cobalt_monitor_t *mon);
+
+int cobalt_monitor_drain_sync(cobalt_monitor_t *mon);
+
+void cobalt_monitor_drain_all(cobalt_monitor_t *mon);
+
+int cobalt_monitor_drain_all_sync(cobalt_monitor_t *mon);
+
+int cobalt_event_init(cobalt_event_t *event,
+		      unsigned int value,
+		      int flags);
+
+int cobalt_event_post(cobalt_event_t *event,
+		      unsigned int bits);
+
+int cobalt_event_wait(cobalt_event_t *event,
+		      unsigned int bits,
+		      unsigned int *bits_r,
+		      int mode,
+		      const struct timespec *timeout);
+
+unsigned long cobalt_event_clear(cobalt_event_t *event,
+				 unsigned int bits);
+
+int cobalt_event_inquire(cobalt_event_t *event,
+			 struct cobalt_event_info *info,
+			 pid_t *waitlist, size_t waitsz);
+
+int cobalt_event_destroy(cobalt_event_t *event);
+
+int cobalt_sem_inquire(sem_t *sem, struct cobalt_sem_info *info,
+		       pid_t *waitlist, size_t waitsz);
+
+int cobalt_sched_weighted_prio(int policy,
+			       const struct sched_param_ex *param_ex);
+
+void cobalt_register_tsd_hook(struct cobalt_tsd_hook *th);
+
+void cobalt_assert_nrt(void);
+
+unsigned long long cobalt_read_tsc(void);
+
+extern int __cobalt_control_bind;
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_SYS_COBALT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/ioctl.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/ioctl.h
new file mode 100644
index 0000000..553aa56
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/ioctl.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <sys/ioctl.h>
+
+#ifndef _COBALT_SYS_IOCTL_H
+#define _COBALT_SYS_IOCTL_H
+
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(int, ioctl(int fildes, unsigned int request, ...));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_SYS_IOCTL_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/mman.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/mman.h
new file mode 100644
index 0000000..75a00da
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/mman.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <sys/mman.h>
+
+#ifndef _COBALT_SYS_MMAN_H
+#define _COBALT_SYS_MMAN_H
+
+#include <sys/types.h>
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(void *, mmap(void *addr, size_t length, int prot, int flags,
+			 int fd, off_t offset));
+
+#if defined(_LARGEFILE64_SOURCE) || defined(_GNU_SOURCE)
+COBALT_DECL(void *, mmap64(void *addr, size_t length, int prot, int flags,
+			   int fd, off64_t offset));
+#endif
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_SYS_MMAN_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/select.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/select.h
new file mode 100644
index 0000000..76e8476
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/select.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <sys/select.h>
+
+#ifndef _COBALT_SYS_SELECT_H
+#define _COBALT_SYS_SELECT_H
+
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(int, select(int __nfds, fd_set *__restrict __readfds,
+			fd_set *__restrict __writefds,
+			fd_set *__restrict __exceptfds,
+			struct timeval *__restrict __timeout));
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_SYS_SELECT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/socket.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/socket.h
new file mode 100644
index 0000000..156b493
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/socket.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <sys/socket.h>
+
+#ifndef _COBALT_SYS_SOCKET_H
+#define _COBALT_SYS_SOCKET_H
+
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(int, socket(int protocol_family,
+			int socket_type, int protocol));
+
+COBALT_DECL(ssize_t, recvmsg(int fd,
+			     struct msghdr *msg, int flags));
+
+COBALT_DECL(int, recvmmsg(int fd,
+			  struct mmsghdr *msgvec, unsigned int vlen,
+			  unsigned int flags, struct timespec *timeout));
+
+COBALT_DECL(ssize_t, sendmsg(int fd,
+			     const struct msghdr *msg, int flags));
+
+COBALT_DECL(int, sendmmsg(int fd,
+			  struct mmsghdr *msgvec, unsigned int vlen,
+			  unsigned int flags));
+
+COBALT_DECL(ssize_t, recvfrom(int fd, void *buf, size_t len, int flags,
+			      struct sockaddr *from, socklen_t *fromlen));
+
+COBALT_DECL(ssize_t, sendto(int fd, const void *buf, size_t len, int flags,
+			    const struct sockaddr *to, socklen_t tolen));
+
+COBALT_DECL(ssize_t, recv(int fd, void *buf,
+			  size_t len, int flags));
+
+COBALT_DECL(ssize_t, send(int fd, const void *buf,
+			  size_t len, int flags));
+
+COBALT_DECL(int, getsockopt(int fd, int level, int optname,
+			    void *optval, socklen_t *optlen));
+
+COBALT_DECL(int, setsockopt(int fd, int level, int optname,
+			    const void *optval, socklen_t optlen));
+
+COBALT_DECL(int, bind(int fd, const struct sockaddr *my_addr,
+		      socklen_t addrlen));
+
+COBALT_DECL(int, connect(int fd, const struct sockaddr *serv_addr,
+			 socklen_t addrlen));
+
+COBALT_DECL(int, listen(int fd, int backlog));
+
+COBALT_DECL(int, accept(int fd, struct sockaddr *addr,
+			socklen_t *addrlen));
+
+COBALT_DECL(int, getsockname(int fd, struct sockaddr *name,
+			     socklen_t *namelen));
+
+COBALT_DECL(int, getpeername(int fd, struct sockaddr *name,
+			     socklen_t *namelen));
+
+COBALT_DECL(int, shutdown(int fd, int how));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_SYS_SOCKET_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/time.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/time.h
new file mode 100644
index 0000000..38f5a34
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/time.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <sys/time.h>
+
+#ifndef _COBALT_SYS_TIME_H
+#define _COBALT_SYS_TIME_H
+
+#include <cobalt/wrappers.h>
+
+struct timezone;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(int, gettimeofday(struct timeval *tv,
+			      struct timezone *tz));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_SYS_TIME_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/sys/timerfd.h b/kernel/xenomai-v3.2.4/include/cobalt/sys/timerfd.h
new file mode 100644
index 0000000..a7df836
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/sys/timerfd.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_SYS_TIMERFD_H
+#define _COBALT_SYS_TIMERFD_H
+
+#pragma GCC system_header
+#include_next <sys/timerfd.h>
+#include <cobalt/wrappers.h>
+#include <cobalt/uapi/time.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+COBALT_DECL(int, timerfd_create(int clockid, int flags));
+
+COBALT_DECL(int, timerfd_settime(int fd, int flags,
+		const struct itimerspec *new_value,
+		struct itimerspec *old_value));
+
+COBALT_DECL(int, timerfd_gettime(int fd, struct itimerspec *curr_value));
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* _COBALT_SYS_TIMERFD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/syslog.h b/kernel/xenomai-v3.2.4/include/cobalt/syslog.h
new file mode 100644
index 0000000..236c8a2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/syslog.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <syslog.h>
+
+#ifndef _COBALT_SYSLOG_H
+#define _COBALT_SYSLOG_H
+
+#include <stdarg.h>
+#include <xeno_config.h>
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+COBALT_DECL(void, syslog(int priority, const char *fmt, ...));
+
+COBALT_DECL(void, vsyslog(int priority,
+			  const char *fmt, va_list ap));
+
+#ifdef CONFIG_XENO_FORTIFY
+
+COBALT_DECL(void, __vsyslog_chk(int priority, int level,
+				const char *fmt, va_list ap));
+
+COBALT_DECL(void, __syslog_chk(int pri, int flag,
+			       const char *fmt, ...));
+
+#endif /* CONFIG_XENO_FORTIFY */
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* !_COBALT_SYSLOG_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/ticks.h b/kernel/xenomai-v3.2.4/include/cobalt/ticks.h
new file mode 100644
index 0000000..e59d86d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/ticks.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_TICKS_H
+#define _COBALT_TICKS_H
+
+#include <stdbool.h>
+#include <cobalt/uapi/kernel/types.h>
+
+/*
+ * Depending on the underlying pipeline support, we may represent time
+ * stamps as count of nanoseconds (Dovetail), or as values of the
+ * hardware tick counter (aka TSC) available with the platform
+ * (I-pipe). In the latter - legacy - case, we need to convert from
+ * TSC values to nanoseconds and conversely via scaled maths. This
+ * indirection will go away once support for the I-pipe is removed.
+ */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern unsigned long long __cobalt_tsc_clockfreq;
+
+static inline bool cobalt_use_legacy_tsc(void)
+{
+	return !!__cobalt_tsc_clockfreq;
+}
+
+xnsticks_t __cobalt_tsc_to_ns(xnsticks_t ticks);
+
+xnsticks_t __cobalt_tsc_to_ns_rounded(xnsticks_t ticks);
+
+xnsticks_t __cobalt_ns_to_tsc(xnsticks_t ns);
+
+static inline
+xnsticks_t cobalt_ns_to_ticks(xnsticks_t ns)
+{
+	if (cobalt_use_legacy_tsc())
+		return __cobalt_ns_to_tsc(ns);
+
+	return ns;
+}
+
+static inline
+xnsticks_t cobalt_ticks_to_ns(xnsticks_t ticks)
+{
+	if (cobalt_use_legacy_tsc())
+		return __cobalt_tsc_to_ns(ticks);
+
+	return ticks;
+}
+
+static inline
+xnsticks_t cobalt_ticks_to_ns_rounded(xnsticks_t ticks)
+{
+	if (cobalt_use_legacy_tsc())
+		return __cobalt_tsc_to_ns_rounded(ticks);
+
+	return ticks;
+}
+
+unsigned long long cobalt_divrem_billion(unsigned long long value,
+					 unsigned long *rem);
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_TICKS_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/time.h b/kernel/xenomai-v3.2.4/include/cobalt/time.h
new file mode 100644
index 0000000..e3f355c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/time.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <time.h>
+
+#ifndef _COBALT_TIME_H
+#define _COBALT_TIME_H
+
+/* Re-read in case we came from selective __need* block. */
+#include_next <time.h>
+#include <cobalt/wrappers.h>
+#include <cobalt/uapi/time.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+struct timex;
+
+COBALT_DECL(int, clock_getres(clockid_t clock_id,
+			      struct timespec *tp));
+
+COBALT_DECL(int, clock_gettime(clockid_t clock_id,
+			       struct timespec *tp));
+
+COBALT_DECL(int, clock_settime(clockid_t clock_id,
+			       const struct timespec *tp));
+
+COBALT_DECL(int, clock_adjtime(clockid_t clock_id,
+			       struct timex *tx));
+
+COBALT_DECL(int, clock_nanosleep(clockid_t clock_id,
+				 int flags,
+				 const struct timespec *rqtp,
+				 struct timespec *rmtp));
+
+COBALT_DECL(time_t, time(time_t *t));
+
+COBALT_DECL(int, nanosleep(const struct timespec *rqtp,
+			   struct timespec *rmtp));
+
+COBALT_DECL(int, timer_create(clockid_t clockid,
+			      const struct sigevent *__restrict__ evp,
+			      timer_t * __restrict__ timerid));
+
+COBALT_DECL(int, timer_delete(timer_t timerid));
+
+COBALT_DECL(int, timer_settime(timer_t timerid,
+			       int flags,
+			       const struct itimerspec *value,
+			       struct itimerspec *ovalue));
+
+COBALT_DECL(int, timer_gettime(timer_t timerid,
+			       struct itimerspec *value));
+
+COBALT_DECL(int, timer_getoverrun(timer_t timerid));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_TIME_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/trace.h
new file mode 100644
index 0000000..b2f9d95
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/trace.h
@@ -0,0 +1,52 @@
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_TRACE_H
+#define _COBALT_TRACE_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#include <stdarg.h>
+
+int xntrace_max_begin(unsigned long v);
+
+int xntrace_max_end(unsigned long v);
+
+int xntrace_max_reset(void);
+
+int xntrace_user_start(void);
+
+int xntrace_user_stop(unsigned long v);
+
+int xntrace_user_freeze(unsigned long v, int once);
+
+int xntrace_special(unsigned char id, unsigned long v);
+
+int xntrace_special_u64(unsigned char id, unsigned long long v);
+
+void xntrace_latpeak_freeze(int delay);
+
+int xnftrace_vprintf(const char *format, va_list args);
+int xnftrace_printf(const char *format, ...);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_TRACE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/tunables.h b/kernel/xenomai-v3.2.4/include/cobalt/tunables.h
new file mode 100644
index 0000000..67ac77a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/tunables.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_TUNABLES_H
+#define _COBALT_TUNABLES_H
+
+#include <boilerplate/tunables.h>
+#include <sys/cobalt.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern int __cobalt_main_prio;
+
+extern int __cobalt_print_bufsz;
+
+extern int __cobalt_print_bufcount;
+
+extern int __cobalt_print_syncdelay;
+
+static inline define_config_tunable(main_prio, int, prio)
+{
+	__cobalt_main_prio = prio;
+}
+
+static inline read_config_tunable(main_prio, int)
+{
+	return __cobalt_main_prio;
+}
+
+static inline define_config_tunable(print_buffer_size, int, size)
+{
+	__cobalt_print_bufsz = size;
+}
+
+static inline read_config_tunable(print_buffer_size, int)
+{
+	return __cobalt_print_bufsz;
+}
+
+static inline define_config_tunable(print_buffer_count, int, count)
+{
+	__cobalt_print_bufcount = count;
+}
+
+static inline read_config_tunable(print_buffer_count, int)
+{
+	return __cobalt_print_bufcount;
+}
+
+static inline define_config_tunable(print_sync_delay, int, delay_ms)
+{
+	__cobalt_print_syncdelay = delay_ms;
+}
+
+static inline read_config_tunable(print_sync_delay, int)
+{
+	return __cobalt_print_syncdelay;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_TUNABLES_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/uapi/Makefile.am
new file mode 100644
index 0000000..d887213
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/Makefile.am
@@ -0,0 +1,18 @@
+includesubdir = $(includedir)/cobalt/uapi
+
+includesub_HEADERS =	\
+	cond.h		\
+	corectl.h	\
+	event.h		\
+	monitor.h	\
+	mutex.h		\
+	sched.h		\
+	sem.h		\
+	signal.h	\
+	thread.h	\
+	time.h
+
+noinst_HEADERS =	\
+	syscall.h
+
+SUBDIRS = asm-generic kernel
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/Makefile.am
new file mode 100644
index 0000000..9e7b0d4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/Makefile.am
@@ -0,0 +1,5 @@
+
+noinst_HEADERS =	\
+	arith.h		\
+	features.h	\
+	syscall.h
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h
new file mode 100644
index 0000000..d01d01e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/arith.h
@@ -0,0 +1,365 @@
+/**
+ *   Generic arithmetic/conversion routines.
+ *   Copyright &copy; 2005 Stelian Pop.
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_ARITH_H
+#define _COBALT_UAPI_ASM_GENERIC_ARITH_H
+
+#ifndef xnarch_u64tou32
+#define xnarch_u64tou32(ull, h, l) ({		\
+      union {					\
+	      unsigned long long _ull;		\
+	      struct endianstruct _s;		\
+      } _u;					\
+      _u._ull = (ull);				\
+      (h) = _u._s._h;				\
+      (l) = _u._s._l;				\
+})
+#endif /* !xnarch_u64tou32 */
+
+#ifndef xnarch_u64fromu32
+#define xnarch_u64fromu32(h, l) ({		\
+	union {					\
+		unsigned long long _ull;	\
+		struct endianstruct _s;		\
+	} _u;					\
+	_u._s._h = (h);				\
+	_u._s._l = (l);				\
+	_u._ull;				\
+})
+#endif /* !xnarch_u64fromu32 */
+
+#ifndef xnarch_ullmul
+static inline __attribute__((__const__)) unsigned long long
+xnarch_generic_ullmul(const unsigned m0, const unsigned m1)
+{
+	return (unsigned long long) m0 * m1;
+}
+#define xnarch_ullmul(m0,m1) xnarch_generic_ullmul((m0),(m1))
+#endif /* !xnarch_ullmul */
+
+#ifndef xnarch_ulldiv
+static inline unsigned long long xnarch_generic_ulldiv (unsigned long long ull,
+							const unsigned uld,
+							unsigned long *const rp)
+{
+	const unsigned r = do_div(ull, uld);
+
+	if (rp)
+		*rp = r;
+
+	return ull;
+}
+#define xnarch_ulldiv(ull,uld,rp) xnarch_generic_ulldiv((ull),(uld),(rp))
+#endif /* !xnarch_ulldiv */
+
+#ifndef xnarch_uldivrem
+#define xnarch_uldivrem(ull,ul,rp) ((unsigned) xnarch_ulldiv((ull),(ul),(rp)))
+#endif /* !xnarch_uldivrem */
+
+#ifndef xnarch_divmod64
+static inline unsigned long long
+xnarch_generic_divmod64(unsigned long long a,
+			unsigned long long b,
+			unsigned long long *rem)
+{
+	unsigned long long q;
+#if defined(__KERNEL__) && BITS_PER_LONG < 64
+	unsigned long long
+		xnarch_generic_full_divmod64(unsigned long long a,
+					     unsigned long long b,
+					     unsigned long long *rem);
+	if (b <= 0xffffffffULL) {
+		unsigned long r;
+		q = xnarch_ulldiv(a, b, &r);
+		if (rem)
+			*rem = r;
+	} else {
+		if (a < b) {
+			if (rem)
+				*rem = a;
+			return 0;
+		}
+
+		return xnarch_generic_full_divmod64(a, b, rem);
+	}
+#else /* !(__KERNEL__ && BITS_PER_LONG < 64) */
+	q = a / b;
+	if (rem)
+		*rem = a % b;
+#endif  /* !(__KERNEL__ && BITS_PER_LONG < 64) */
+	return q;
+}
+#define xnarch_divmod64(a,b,rp) xnarch_generic_divmod64((a),(b),(rp))
+#endif /* !xnarch_divmod64 */
+
+#ifndef xnarch_imuldiv
+static inline __attribute__((__const__)) int xnarch_generic_imuldiv(int i,
+								    int mult,
+								    int div)
+{
+	/* (int)i = (unsigned long long)i*(unsigned)(mult)/(unsigned)div. */
+	const unsigned long long ull = xnarch_ullmul(i, mult);
+	return xnarch_uldivrem(ull, div, NULL);
+}
+#define xnarch_imuldiv(i,m,d) xnarch_generic_imuldiv((i),(m),(d))
+#endif /* !xnarch_imuldiv */
+
+#ifndef xnarch_imuldiv_ceil
+static inline __attribute__((__const__)) int xnarch_generic_imuldiv_ceil(int i,
+									 int mult,
+									 int div)
+{
+	/* Same as xnarch_generic_imuldiv, rounding up. */
+	const unsigned long long ull = xnarch_ullmul(i, mult);
+	return xnarch_uldivrem(ull + (unsigned)div - 1, div, NULL);
+}
+#define xnarch_imuldiv_ceil(i,m,d) xnarch_generic_imuldiv_ceil((i),(m),(d))
+#endif /* !xnarch_imuldiv_ceil */
+
+/* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits.
+   Building block for llimd. Without const qualifiers, gcc reload registers
+   after each call to uldivrem. */
+static inline unsigned long long
+xnarch_generic_div96by32(const unsigned long long h,
+			 const unsigned l,
+			 const unsigned d,
+			 unsigned long *const rp)
+{
+	unsigned long rh;
+	const unsigned qh = xnarch_uldivrem(h, d, &rh);
+	const unsigned long long t = xnarch_u64fromu32(rh, l);
+	const unsigned ql = xnarch_uldivrem(t, d, rp);
+
+	return xnarch_u64fromu32(qh, ql);
+}
+
+#ifndef xnarch_llimd
+static inline __attribute__((__const__))
+unsigned long long xnarch_generic_ullimd(const unsigned long long op,
+					 const unsigned m,
+					 const unsigned d)
+{
+	unsigned int oph, opl, tlh, tll;
+	unsigned long long th, tl;
+
+	xnarch_u64tou32(op, oph, opl);
+	tl = xnarch_ullmul(opl, m);
+	xnarch_u64tou32(tl, tlh, tll);
+	th = xnarch_ullmul(oph, m);
+	th += tlh;
+
+	return xnarch_generic_div96by32(th, tll, d, NULL);
+}
+
+static inline __attribute__((__const__)) long long
+xnarch_generic_llimd (long long op, unsigned m, unsigned d)
+{
+	long long ret;
+	int sign = 0;
+
+	if (op < 0LL) {
+		sign = 1;
+		op = -op;
+	}
+	ret = xnarch_generic_ullimd(op, m, d);
+
+	return sign ? -ret : ret;
+}
+#define xnarch_llimd(ll,m,d) xnarch_generic_llimd((ll),(m),(d))
+#endif /* !xnarch_llimd */
+
+#ifndef _xnarch_u96shift
+#define xnarch_u96shift(h, m, l, s) ({		\
+	unsigned int _l = (l);			\
+	unsigned int _m = (m);			\
+	unsigned int _s = (s);			\
+	_l >>= _s;				\
+	_l |= (_m << (32 - _s));		\
+	_m >>= _s;				\
+	_m |= ((h) << (32 - _s));		\
+	xnarch_u64fromu32(_m, _l);		\
+})
+#endif /* !xnarch_u96shift */
+
+static inline long long xnarch_llmi(int i, int j)
+{
+	/* Fast 32x32->64 signed multiplication */
+	return (long long) i * j;
+}
+
+#ifndef xnarch_llmulshft
+/* Fast scaled-math-based replacement for long long multiply-divide */
+static inline long long
+xnarch_generic_llmulshft(const long long op,
+			  const unsigned m,
+			  const unsigned s)
+{
+	unsigned int oph, opl, tlh, tll, thh, thl;
+	unsigned long long th, tl;
+
+	xnarch_u64tou32(op, oph, opl);
+	tl = xnarch_ullmul(opl, m);
+	xnarch_u64tou32(tl, tlh, tll);
+	th = xnarch_llmi(oph, m);
+	th += tlh;
+	xnarch_u64tou32(th, thh, thl);
+
+	return xnarch_u96shift(thh, thl, tll, s);
+}
+#define xnarch_llmulshft(ll, m, s) xnarch_generic_llmulshft((ll), (m), (s))
+#endif /* !xnarch_llmulshft */
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+
+/* Representation of a 32 bits fraction. */
+struct xnarch_u32frac {
+	unsigned long long frac;
+	unsigned integ;
+};
+
+static inline void xnarch_init_u32frac(struct xnarch_u32frac *const f,
+				       const unsigned m,
+				       const unsigned d)
+{
+	/*
+	 * Avoid clever compiler optimizations to occur when d is
+	 * known at compile-time. The performance of this function is
+	 * not critical since it is only called at init time.
+	 */
+	volatile unsigned vol_d = d;
+	f->integ = m / d;
+	f->frac = xnarch_generic_div96by32
+		(xnarch_u64fromu32(m % d, 0), 0, vol_d, NULL);
+}
+
+#ifndef xnarch_nodiv_imuldiv
+static inline __attribute__((__const__)) unsigned
+xnarch_generic_nodiv_imuldiv(unsigned op, const struct xnarch_u32frac f)
+{
+	return (xnarch_ullmul(op, f.frac >> 32) >> 32) + f.integ * op;
+}
+#define xnarch_nodiv_imuldiv(op, f) xnarch_generic_nodiv_imuldiv((op),(f))
+#endif /* xnarch_nodiv_imuldiv */
+
+#ifndef xnarch_nodiv_imuldiv_ceil
+static inline __attribute__((__const__)) unsigned
+xnarch_generic_nodiv_imuldiv_ceil(unsigned op, const struct xnarch_u32frac f)
+{
+	unsigned long long full = xnarch_ullmul(op, f.frac >> 32) + ~0U;
+	return (full >> 32) + f.integ * op;
+}
+#define xnarch_nodiv_imuldiv_ceil(op, f) \
+	xnarch_generic_nodiv_imuldiv_ceil((op),(f))
+#endif /* xnarch_nodiv_imuldiv_ceil */
+
+#ifndef xnarch_nodiv_ullimd
+
+#ifndef xnarch_add96and64
+#error "xnarch_add96and64 must be implemented."
+#endif
+
+static inline __attribute__((__const__)) unsigned long long
+xnarch_mul64by64_high(const unsigned long long op, const unsigned long long m)
+{
+	/* Compute high 64 bits of multiplication 64 bits x 64 bits. */
+	register unsigned long long t0, t1, t2, t3;
+	register unsigned int oph, opl, mh, ml, t0h, t0l, t1h, t1l, t2h, t2l, t3h, t3l;
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(m, mh, ml);
+	t0 = xnarch_ullmul(opl, ml);
+	xnarch_u64tou32(t0, t0h, t0l);
+	t3 = xnarch_ullmul(oph, mh);
+	xnarch_u64tou32(t3, t3h, t3l);
+	xnarch_add96and64(t3h, t3l, t0h, 0, t0l >> 31);
+	t1 = xnarch_ullmul(oph, ml);
+	xnarch_u64tou32(t1, t1h, t1l);
+	xnarch_add96and64(t3h, t3l, t0h, t1h, t1l);
+	t2 = xnarch_ullmul(opl, mh);
+	xnarch_u64tou32(t2, t2h, t2l);
+	xnarch_add96and64(t3h, t3l, t0h, t2h, t2l);
+
+	return xnarch_u64fromu32(t3h, t3l);
+}
+
+static inline unsigned long long
+xnarch_generic_nodiv_ullimd(const unsigned long long op,
+			    const unsigned long long frac,
+			    unsigned int integ)
+{
+	return xnarch_mul64by64_high(op, frac) + integ * op;
+}
+#define xnarch_nodiv_ullimd(op, f, i)  xnarch_generic_nodiv_ullimd((op),(f), (i))
+#endif /* !xnarch_nodiv_ullimd */
+
+#ifndef xnarch_nodiv_llimd
+static inline __attribute__((__const__)) long long
+xnarch_generic_nodiv_llimd(long long op, unsigned long long frac,
+			   unsigned int integ)
+{
+	long long ret;
+	int sign = 0;
+
+	if (op < 0LL) {
+		sign = 1;
+		op = -op;
+	}
+	ret = xnarch_nodiv_ullimd(op, frac, integ);
+
+	return sign ? -ret : ret;
+}
+#define xnarch_nodiv_llimd(ll,frac,integ) xnarch_generic_nodiv_llimd((ll),(frac),(integ))
+#endif /* !xnarch_nodiv_llimd */
+
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+
+static inline void xnarch_init_llmulshft(const unsigned m_in,
+					 const unsigned d_in,
+					 unsigned *m_out,
+					 unsigned *s_out)
+{
+	/*
+	 * Avoid clever compiler optimizations to occur when d is
+	 * known at compile-time. The performance of this function is
+	 * not critical since it is only called at init time.
+	 */
+	volatile unsigned int vol_d = d_in;
+	unsigned long long mult;
+
+	*s_out = 31;
+	while (1) {
+		mult = ((unsigned long long)m_in) << *s_out;
+		do_div(mult, vol_d);
+		if (mult <= 0x7FFFFFFF)
+			break;
+		(*s_out)--;
+	}
+	*m_out = (unsigned int)mult;
+}
+
+#define xnarch_ullmod(ull,uld,rem)   ({ xnarch_ulldiv(ull,uld,rem); (*rem); })
+#define xnarch_uldiv(ull, d)         xnarch_uldivrem(ull, d, NULL)
+#define xnarch_ulmod(ull, d)         ({ unsigned long _rem;	\
+					xnarch_uldivrem(ull,d,&_rem); _rem; })
+
+#define xnarch_div64(a,b)            xnarch_divmod64((a),(b),NULL)
+#define xnarch_mod64(a,b)            ({ unsigned long long _rem; \
+					xnarch_divmod64((a),(b),&_rem); _rem; })
+
+#endif /* _COBALT_UAPI_ASM_GENERIC_ARITH_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h
new file mode 100644
index 0000000..8a4927c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/features.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_FEATURES_H
+#define _COBALT_UAPI_ASM_GENERIC_FEATURES_H
+
+#include <linux/types.h>
+
+#define XNFEAT_STRING_LEN 64
+
+struct cobalt_featinfo {
+	/** Real-time clock frequency */
+	__u64 clock_freq;
+	/** Offset of nkvdso in the sem heap. */
+	__u32 vdso_offset;
+	/** ABI revision level. */
+	__u32 feat_abirev;
+	/** Available feature set. */
+	__u32 feat_all;
+	/** Mandatory features (when requested). */
+	__u32 feat_man;
+	/** Requested feature set. */
+	__u32 feat_req;
+	/** Missing features. */
+	__u32 feat_mis;
+	char feat_all_s[XNFEAT_STRING_LEN];
+	char feat_man_s[XNFEAT_STRING_LEN];
+	char feat_req_s[XNFEAT_STRING_LEN];
+	char feat_mis_s[XNFEAT_STRING_LEN];
+	/* Architecture-specific features. */
+	struct cobalt_featinfo_archdep feat_arch;
+};
+
+#define __xn_feat_smp         0x80000000
+#define __xn_feat_nosmp       0x40000000
+#define __xn_feat_fastsynch   0x20000000
+#define __xn_feat_nofastsynch 0x10000000
+#define __xn_feat_control     0x08000000
+#define __xn_feat_prioceiling 0x04000000
+
+#ifdef CONFIG_SMP
+#define __xn_feat_smp_mask __xn_feat_smp
+#else
+#define __xn_feat_smp_mask __xn_feat_nosmp
+#endif
+
+/*
+ * Revisit: all archs currently support fast locking, and there is no
+ * reason for any future port not to provide this. This will be
+ * written in stone at the next ABI update, when fastsynch support is
+ * dropped from the optional feature set.
+ */
+#define __xn_feat_fastsynch_mask __xn_feat_fastsynch
+
+/* List of generic features kernel or userland may support */
+#define __xn_feat_generic_mask			\
+	(__xn_feat_smp_mask		|	\
+	 __xn_feat_fastsynch_mask 	|	\
+	 __xn_feat_prioceiling)
+
+/*
+ * List of features both sides have to agree on: If userland supports
+ * it, the kernel has to provide it, too. This means backward
+ * compatibility between older userland and newer kernel may be
+ * supported for those features, but forward compatibility between
+ * newer userland and older kernel cannot.
+ */
+#define __xn_feat_generic_man_mask		\
+	(__xn_feat_fastsynch		|	\
+	 __xn_feat_nofastsynch		|	\
+	 __xn_feat_nosmp		|	\
+	 __xn_feat_prioceiling)
+
+static inline
+const char *get_generic_feature_label(unsigned int feature)
+{
+	switch (feature) {
+	case __xn_feat_smp:
+		return "smp";
+	case __xn_feat_nosmp:
+		return "nosmp";
+	case __xn_feat_fastsynch:
+		return "fastsynch";
+	case __xn_feat_nofastsynch:
+		return "nofastsynch";
+	case __xn_feat_control:
+		return "control";
+	case __xn_feat_prioceiling:
+		return "prioceiling";
+	default:
+		return 0;
+	}
+}
+
+static inline int check_abi_revision(unsigned long abirev)
+{
+	return abirev == XENOMAI_ABI_REV;
+}
+
+#endif /* !_COBALT_UAPI_ASM_GENERIC_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h
new file mode 100644
index 0000000..b38b241
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/asm-generic/syscall.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_ASM_GENERIC_SYSCALL_H
+#define _COBALT_UAPI_ASM_GENERIC_SYSCALL_H
+
+#include <linux/types.h>
+#include <asm/xenomai/uapi/features.h>
+#include <asm/xenomai/uapi/syscall.h>
+
+#define __COBALT_SYSCALL_BIT	0x10000000
+
+struct cobalt_bindreq {
+	/** Features userland requires. */
+	__u32 feat_req;
+	/** ABI revision userland uses. */
+	__u32 abi_rev;
+	/** Features the Cobalt core provides. */
+	struct cobalt_featinfo feat_ret;
+};
+
+#define COBALT_SECONDARY  0
+#define COBALT_PRIMARY    1
+
+#endif /* !_COBALT_UAPI_ASM_GENERIC_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h
new file mode 100644
index 0000000..b1106c7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/cond.h
@@ -0,0 +1,39 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_COND_H
+#define _COBALT_UAPI_COND_H
+
+#include <cobalt/uapi/mutex.h>
+
+#define COBALT_COND_MAGIC 0x86860505
+
+struct cobalt_cond_state {
+	__u32 pending_signals;
+	__u32 mutex_state_offset;
+};
+
+union cobalt_cond_union {
+	pthread_cond_t native_cond;
+	struct cobalt_cond_shadow {
+		__u32 magic;
+		__u32 state_offset;
+		xnhandle_t handle;
+	} shadow_cond;
+};
+
+#endif /* !_COBALT_UAPI_COND_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h
new file mode 100644
index 0000000..98d989d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/corectl.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_CORECTL_H
+#define _COBALT_UAPI_CORECTL_H
+
+#define _CC_COBALT_GET_VERSION		0
+#define _CC_COBALT_GET_NR_PIPES		1
+#define _CC_COBALT_GET_NR_TIMERS	2
+
+#define _CC_COBALT_GET_DEBUG			3
+#   define _CC_COBALT_DEBUG_ASSERT		1
+#   define _CC_COBALT_DEBUG_CONTEXT		2
+#   define _CC_COBALT_DEBUG_LOCKING		4
+#   define _CC_COBALT_DEBUG_USER		8
+#   define _CC_COBALT_DEBUG_MUTEX_RELAXED	16
+#   define _CC_COBALT_DEBUG_MUTEX_SLEEP		32
+/* bit 6 (64) formerly used for DEBUG_POSIX_SYNCHRO */
+#   define _CC_COBALT_DEBUG_LEGACY		128
+#   define _CC_COBALT_DEBUG_TRACE_RELAX		256
+#   define _CC_COBALT_DEBUG_NET			512
+
+#define _CC_COBALT_GET_POLICIES		4
+#   define _CC_COBALT_SCHED_FIFO	1
+#   define _CC_COBALT_SCHED_RR		2
+#   define _CC_COBALT_SCHED_WEAK	4
+#   define _CC_COBALT_SCHED_SPORADIC	8
+#   define _CC_COBALT_SCHED_QUOTA	16
+#   define _CC_COBALT_SCHED_TP		32
+
+#define _CC_COBALT_GET_WATCHDOG		5
+#define _CC_COBALT_GET_CORE_STATUS	6
+#define _CC_COBALT_START_CORE		7
+#define _CC_COBALT_STOP_CORE		8
+
+#define _CC_COBALT_GET_NET_CONFIG	9
+#   define _CC_COBALT_NET		0x00000001
+#   define _CC_COBALT_NET_ETH_P_ALL	0x00000002
+#   define _CC_COBALT_NET_IPV4		0x00000004
+#   define _CC_COBALT_NET_ICMP		0x00000008
+#   define _CC_COBALT_NET_NETROUTING	0x00000010
+#   define _CC_COBALT_NET_ROUTER	0x00000020
+#   define _CC_COBALT_NET_UDP		0x00000040
+#   define _CC_COBALT_NET_AF_PACKET	0x00000080
+#   define _CC_COBALT_NET_TDMA		0x00000100
+#   define _CC_COBALT_NET_NOMAC		0x00000200
+#   define _CC_COBALT_NET_CFG		0x00000400
+#   define _CC_COBALT_NET_CAP		0x00000800
+#   define _CC_COBALT_NET_PROXY		0x00001000
+
+
+enum cobalt_run_states {
+	COBALT_STATE_DISABLED,
+	COBALT_STATE_RUNNING,
+	COBALT_STATE_STOPPED,
+	COBALT_STATE_TEARDOWN,
+	COBALT_STATE_WARMUP,
+};
+
+#endif /* !_COBALT_UAPI_CORECTL_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h
new file mode 100644
index 0000000..8710e8e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/event.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_EVENT_H
+#define _COBALT_UAPI_EVENT_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+struct cobalt_event_state {
+	__u32 value;
+	__u32 flags;
+#define COBALT_EVENT_PENDED  0x1
+	__u32 nwaiters;
+};
+
+struct cobalt_event;
+
+/* Creation flags. */
+#define COBALT_EVENT_FIFO    0x0
+#define COBALT_EVENT_PRIO    0x1
+#define COBALT_EVENT_SHARED  0x2
+
+/* Wait mode. */
+#define COBALT_EVENT_ALL  0x0
+#define COBALT_EVENT_ANY  0x1
+
+struct cobalt_event_shadow {
+	__u32 state_offset;
+	__u32 flags;
+	xnhandle_t handle;
+};
+
+struct cobalt_event_info {
+	unsigned int value;
+	int flags;
+	int nrwait;
+};
+
+typedef struct cobalt_event_shadow cobalt_event_t;
+
+#endif /* !_COBALT_UAPI_EVENT_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/Makefile.am b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/Makefile.am
new file mode 100644
index 0000000..12e1b37
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/Makefile.am
@@ -0,0 +1,12 @@
+includesubdir = $(includedir)/cobalt/uapi/kernel
+
+includesub_HEADERS =	\
+	heap.h		\
+	limits.h	\
+	pipe.h		\
+	synch.h		\
+	thread.h	\
+	trace.h		\
+	types.h		\
+	urw.h		\
+	vdso.h
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h
new file mode 100644
index 0000000..75e7289
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/heap.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_HEAP_H
+#define _COBALT_UAPI_KERNEL_HEAP_H
+
+#include <linux/types.h>
+
+#define COBALT_MEMDEV_PRIVATE  "memdev-private"
+#define COBALT_MEMDEV_SHARED   "memdev-shared"
+#define COBALT_MEMDEV_SYS      "memdev-sys"
+
+struct cobalt_memdev_stat {
+	__u32 size;
+	__u32 free;
+};
+
+#define MEMDEV_RTIOC_STAT	_IOR(RTDM_CLASS_MEMORY, 0, struct cobalt_memdev_stat)
+
+#endif /* !_COBALT_UAPI_KERNEL_HEAP_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h
new file mode 100644
index 0000000..22017c5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/limits.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_LIMITS_H
+#define _COBALT_UAPI_KERNEL_LIMITS_H
+
+#define XNOBJECT_NAME_LEN 32
+
+#endif /* !_COBALT_UAPI_KERNEL_LIMITS_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h
new file mode 100644
index 0000000..688ee0c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/pipe.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_PIPE_H
+#define _COBALT_UAPI_KERNEL_PIPE_H
+
+#define	XNPIPE_IOCTL_BASE	'p'
+
+#define XNPIPEIOC_GET_NRDEV	_IOW(XNPIPE_IOCTL_BASE, 0, int)
+#define XNPIPEIOC_IFLUSH	_IO(XNPIPE_IOCTL_BASE, 1)
+#define XNPIPEIOC_OFLUSH	_IO(XNPIPE_IOCTL_BASE, 2)
+#define XNPIPEIOC_FLUSH		XNPIPEIOC_OFLUSH
+#define XNPIPEIOC_SETSIG	_IO(XNPIPE_IOCTL_BASE, 3)
+
+#define XNPIPE_NORMAL	0x0
+#define XNPIPE_URGENT	0x1
+
+#define XNPIPE_IFLUSH	0x1
+#define XNPIPE_OFLUSH	0x2
+
+#define XNPIPE_MINOR_AUTO  (-1)
+
+#endif /* !_COBALT_UAPI_KERNEL_PIPE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h
new file mode 100644
index 0000000..a7cb9fb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/synch.h
@@ -0,0 +1,84 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2008, 2009 Jan Kiszka <jan.kiszka@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_SYNCH_H
+#define _COBALT_UAPI_KERNEL_SYNCH_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+/* Creation flags */
+#define XNSYNCH_FIFO    0x0
+#define XNSYNCH_PRIO    0x1
+#define XNSYNCH_PI      0x2
+#define XNSYNCH_DREORD  0x4
+#define XNSYNCH_OWNER   0x8
+#define XNSYNCH_PP      0x10
+
+/* Fast lock API */
+static inline int xnsynch_fast_is_claimed(xnhandle_t handle)
+{
+	return (handle & XNSYNCH_FLCLAIM) != 0;
+}
+
+static inline xnhandle_t xnsynch_fast_claimed(xnhandle_t handle)
+{
+	return handle | XNSYNCH_FLCLAIM;
+}
+
+static inline xnhandle_t xnsynch_fast_ceiling(xnhandle_t handle)
+{
+	return handle | XNSYNCH_FLCEIL;
+}
+
+static inline int
+xnsynch_fast_owner_check(atomic_t *fastlock, xnhandle_t ownerh)
+{
+	return (xnhandle_get_id((xnhandle_t)atomic_read(fastlock)) == ownerh) ?
+		0 : -EPERM;
+}
+
+static inline
+int xnsynch_fast_acquire(atomic_t *fastlock, xnhandle_t new_ownerh)
+{
+	xnhandle_t h;
+
+	h = (xnhandle_t)atomic_cmpxchg(fastlock, XN_NO_HANDLE, new_ownerh);
+	if (h != XN_NO_HANDLE) {
+		if (xnhandle_get_id(h) == new_ownerh)
+			return -EBUSY;
+
+		return -EAGAIN;
+	}
+
+	return 0;
+}
+
+static inline
+int xnsynch_fast_release(atomic_t *fastlock, xnhandle_t cur_ownerh)
+{
+	return (xnhandle_t)atomic_cmpxchg(fastlock, cur_ownerh, XN_NO_HANDLE)
+		== cur_ownerh;
+}
+
+/* Local/shared property */
+static inline int xnsynch_is_shared(xnhandle_t handle)
+{
+	return (handle & XNSYNCH_PSHARED) != 0;
+}
+
+#endif /* !_COBALT_UAPI_KERNEL_SYNCH_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h
new file mode 100644
index 0000000..664def0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/thread.h
@@ -0,0 +1,115 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_THREAD_H
+#define _COBALT_UAPI_KERNEL_THREAD_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_thread_states Thread state flags
+ * @brief Bits reporting permanent or transient states of threads
+ * @{
+ */
+
+/* State flags (shared) */
+
+#define XNSUSP    0x00000001 /**< Suspended. */
+#define XNPEND    0x00000002 /**< Sleep-wait for a resource. */
+#define XNDELAY   0x00000004 /**< Delayed */
+#define XNREADY   0x00000008 /**< Linked to the ready queue. */
+#define XNDORMANT 0x00000010 /**< Not started yet */
+#define XNZOMBIE  0x00000020 /**< Zombie thread in deletion process */
+#define XNMAPPED  0x00000040 /**< Thread is mapped to a linux task */
+#define XNRELAX   0x00000080 /**< Relaxed shadow thread (blocking bit) */
+#define XNHELD    0x00000200 /**< Thread is held to process emergency. */
+#define XNBOOST   0x00000400 /**< PI/PP boost undergoing */
+#define XNSSTEP   0x00000800 /**< Single-stepped by debugger */
+#define XNLOCK    0x00001000 /**< Scheduler lock control (pseudo-bit, not in ->state) */
+#define XNRRB     0x00002000 /**< Undergoes a round-robin scheduling */
+#define XNWARN    0x00004000 /**< Issue SIGDEBUG on error detection */
+#define XNFPU     0x00008000 /**< Thread uses FPU */
+#define XNROOT    0x00010000 /**< Root thread (that is, Linux/IDLE) */
+#define XNWEAK    0x00020000 /**< Non real-time shadow (from the WEAK class) */
+#define XNUSER    0x00040000 /**< Shadow thread running in userland */
+#define XNJOINED  0x00080000 /**< Another thread waits for joining this thread */
+#define XNTRAPLB  0x00100000 /**< Trap lock break (i.e. may not sleep with sched lock) */
+#define XNDEBUG   0x00200000 /**< User-level debugging enabled */
+#define XNDBGSTOP 0x00400000 /**< Stopped for synchronous debugging */
+
+/** @} */
+
+/**
+ * @ingroup cobalt_core_thread
+ * @defgroup cobalt_core_thread_info Thread information flags
+ * @brief Bits reporting events notified to threads
+ * @{
+ */
+
+/* Information flags (shared) */
+
+#define XNTIMEO   0x00000001 /**< Woken up due to a timeout condition */
+#define XNRMID    0x00000002 /**< Pending on a removed resource */
+#define XNBREAK   0x00000004 /**< Forcibly awaken from a wait state */
+#define XNKICKED  0x00000008 /**< Forced out of primary mode */
+#define XNWAKEN   0x00000010 /**< Thread waken up upon resource availability */
+#define XNROBBED  0x00000020 /**< Robbed from resource ownership */
+#define XNCANCELD 0x00000040 /**< Cancellation request is pending */
+#define XNPIALERT 0x00000080 /**< Priority inversion alert (SIGDEBUG sent) */
+#define XNSCHEDP  0x00000100 /**< schedparam propagation is pending */
+#define XNCONTHI  0x00000200 /**< Continue in primary mode after debugging */
+
+/* Local information flags (private to current thread) */
+
+#define XNMOVED   0x00000001 /**< CPU migration in primary mode occurred */
+#define XNLBALERT 0x00000002 /**< Scheduler lock break alert (SIGDEBUG sent) */
+#define XNDESCENT 0x00000004 /**< Adaptive transitioning to secondary mode */
+#define XNSYSRST  0x00000008 /**< Thread awaiting syscall restart after signal */
+#define XNHICCUP  0x00000010 /**< Just left from ptracing */
+
+/** @} */
+
+/*
+ * Must follow strictly the declaration order of the state flags
+ * defined above. Status symbols are defined as follows:
+ *
+ * 'S' -> Forcibly suspended.
+ * 'w'/'W' -> Waiting for a resource, with or without timeout.
+ * 'D' -> Delayed (without any other wait condition).
+ * 'R' -> Runnable.
+ * 'U' -> Unstarted or dormant.
+ * 'X' -> Relaxed shadow.
+ * 'H' -> Held in emergency.
+ * 'b' -> Priority boost undergoing.
+ * 'T' -> Ptraced and stopped.
+ * 'l' -> Locks scheduler.
+ * 'r' -> Undergoes round-robin.
+ * 't' -> Runtime mode errors notified.
+ * 'L' -> Lock breaks trapped.
+ * 's' -> Ptraced, stopped synchronously.
+ */
+#define XNTHREAD_STATE_LABELS  "SWDRU..X.HbTlrt.....L.s"
+
+struct xnthread_user_window {
+	__u32 state;
+	__u32 info;
+	__u32 grant_value;
+	__u32 pp_pending;
+};
+
+#endif /* !_COBALT_UAPI_KERNEL_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h
new file mode 100644
index 0000000..a1add30
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/trace.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_TRACE_H
+#define _COBALT_UAPI_KERNEL_TRACE_H
+
+#define __xntrace_op_max_begin		0
+#define __xntrace_op_max_end		1
+#define __xntrace_op_max_reset		2
+#define __xntrace_op_user_start		3
+#define __xntrace_op_user_stop		4
+#define __xntrace_op_user_freeze	5
+#define __xntrace_op_special		6
+#define __xntrace_op_special_u64	7
+#define __xntrace_op_latpeak_freeze	8
+
+#endif /* !_COBALT_UAPI_KERNEL_TRACE_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h
new file mode 100644
index 0000000..2c931c2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/types.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_TYPES_H
+#define _COBALT_UAPI_KERNEL_TYPES_H
+
+#include <linux/types.h>
+#include <cobalt/uapi/kernel/limits.h>
+
+typedef __u64 xnticks_t;
+
+typedef __s64 xnsticks_t;
+
+typedef __u32 xnhandle_t;
+
+#define XN_NO_HANDLE		((xnhandle_t)0)
+#define XN_HANDLE_INDEX_MASK	((xnhandle_t)0xf0000000)
+
+/* Fixed bits (part of the identifier) */
+#define XNSYNCH_PSHARED		((xnhandle_t)0x40000000)
+
+/* Transient bits (expressing a status) */
+#define XNSYNCH_FLCLAIM		((xnhandle_t)0x80000000) /* Contended. */
+#define XNSYNCH_FLCEIL		((xnhandle_t)0x20000000) /* Ceiling active. */
+
+#define XN_HANDLE_TRANSIENT_MASK	(XNSYNCH_FLCLAIM|XNSYNCH_FLCEIL)
+
+/*
+ * Strip all special bits from the handle, only retaining the object
+ * index value in the registry.
+ */
+static inline xnhandle_t xnhandle_get_index(xnhandle_t handle)
+{
+	return handle & ~XN_HANDLE_INDEX_MASK;
+}
+
+/*
+ * Strip the transient bits from the handle, only retaining the fixed
+ * part making the identifier.
+ */
+static inline xnhandle_t xnhandle_get_id(xnhandle_t handle)
+{
+	return handle & ~XN_HANDLE_TRANSIENT_MASK;
+}
+
+/*
+ * Our representation of time specs at the kernel<->user interface
+ * boundary at the moment, until we have fully transitioned to a
+ * y2038-safe implementation in libcobalt. Once done, those legacy
+ * types will be removed.
+ */
+struct __user_old_timespec {
+	long  tv_sec;
+	long  tv_nsec;
+};
+
+struct __user_old_itimerspec {
+	struct __user_old_timespec it_interval;
+	struct __user_old_timespec it_value;
+};
+
+struct __user_old_timeval {
+	long  tv_sec;
+	long  tv_usec;
+};
+
+/* Lifted from include/uapi/linux/timex.h. */
+struct __user_old_timex {
+	unsigned int modes;	/* mode selector */
+	__kernel_long_t offset;	/* time offset (usec) */
+	__kernel_long_t freq;	/* frequency offset (scaled ppm) */
+	__kernel_long_t maxerror;/* maximum error (usec) */
+	__kernel_long_t esterror;/* estimated error (usec) */
+	int status;		/* clock command/status */
+	__kernel_long_t constant;/* pll time constant */
+	__kernel_long_t precision;/* clock precision (usec) (read only) */
+	__kernel_long_t tolerance;/* clock frequency tolerance (ppm)
+				   * (read only)
+				   */
+	struct __user_old_timeval time;	/* (read only, except for ADJ_SETOFFSET) */
+	__kernel_long_t tick;	/* (modified) usecs between clock ticks */
+
+	__kernel_long_t ppsfreq;/* pps frequency (scaled ppm) (ro) */
+	__kernel_long_t jitter; /* pps jitter (us) (ro) */
+	int shift;              /* interval duration (s) (shift) (ro) */
+	__kernel_long_t stabil;            /* pps stability (scaled ppm) (ro) */
+	__kernel_long_t jitcnt; /* jitter limit exceeded (ro) */
+	__kernel_long_t calcnt; /* calibration intervals (ro) */
+	__kernel_long_t errcnt; /* calibration errors (ro) */
+	__kernel_long_t stbcnt; /* stability limit exceeded (ro) */
+
+	int tai;		/* TAI offset (ro) */
+
+	int  :32; int  :32; int  :32; int  :32;
+	int  :32; int  :32; int  :32; int  :32;
+	int  :32; int  :32; int  :32;
+};
+
+#endif /* !_COBALT_UAPI_KERNEL_TYPES_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h
new file mode 100644
index 0000000..fcfde21
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/urw.h
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_URW_H
+#define _COBALT_UAPI_KERNEL_URW_H
+
+#include <linux/types.h>
+
+/*
+ * A restricted version of the kernel seqlocks with a slightly
+ * different interface, allowing for unsynced reads with concurrent
+ * write detection, without serializing writers.  Caller should
+ * provide for proper locking to deal with concurrent updates.
+ *
+ * urw_t lock = URW_INITIALIZER;
+ * urwstate_t tmp;
+ *
+ * unsynced_read_block(&tmp, &lock) {
+ *          (will redo until clean read)...
+ * }
+ *
+ * unsynced_write_block(&tmp, &lock) {
+ *          ...
+ * }
+ *
+ * This code was inspired by Wolfgang Mauerer's linux/seqlock.h
+ * adaptation for Xenomai 2.6 to support the VDSO feature.
+ */
+
+typedef struct {
+	__u32 sequence;
+} urw_t;
+
+typedef struct {
+	__u32 token;
+	__u32 dirty;
+} urwstate_t;
+
+#define URW_INITIALIZER     { 0 }
+#define DEFINE_URW(__name)  urw_t __name = URW_INITIALIZER
+
+#ifndef READ_ONCE
+#define READ_ONCE ACCESS_ONCE
+#endif
+
+static inline void __try_read_start(const urw_t *urw, urwstate_t *tmp)
+{
+	__u32 token;
+repeat:
+	token = READ_ONCE(urw->sequence);
+	smp_rmb();
+	if (token & 1) {
+		cpu_relax();
+		goto repeat;
+	}
+
+	tmp->token = token;
+	tmp->dirty = 1;
+}
+
+static inline void __try_read_end(const urw_t *urw, urwstate_t *tmp)
+{
+	smp_rmb();
+	if (urw->sequence != tmp->token) {
+		__try_read_start(urw, tmp);
+		return;
+	}
+
+	tmp->dirty = 0;
+}
+
+static inline void __do_write_start(urw_t *urw, urwstate_t *tmp)
+{
+	urw->sequence++;
+	tmp->dirty = 1;
+	smp_wmb();
+}
+
+static inline void __do_write_end(urw_t *urw, urwstate_t *tmp)
+{
+	smp_wmb();
+	tmp->dirty = 0;
+	urw->sequence++;
+}
+
+static inline void unsynced_rw_init(urw_t *urw)
+{
+	urw->sequence = 0;
+}
+
+#define unsynced_read_block(__tmp, __urw)		\
+	for (__try_read_start(__urw, __tmp);		\
+	     (__tmp)->dirty; __try_read_end(__urw, __tmp))
+
+#define unsynced_write_block(__tmp, __urw)		\
+	for (__do_write_start(__urw, __tmp);		\
+	     (__tmp)->dirty; __do_write_end(__urw, __tmp))
+
+#endif /* !_COBALT_UAPI_KERNEL_URW_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h
new file mode 100644
index 0000000..5b9b1b6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/kernel/vdso.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2009 Wolfgang Mauerer <wolfgang.mauerer@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_KERNEL_VDSO_H
+#define _COBALT_UAPI_KERNEL_VDSO_H
+
+#include <cobalt/uapi/kernel/urw.h>
+
+/*
+ * I-pipe only. Dovetail enables the common vDSO for getting
+ * CLOCK_REALTIME timestamps from the out-of-band stage
+ * (XNVDSO_FEAT_HOST_REALTIME is cleared in this case).
+ */
+struct xnvdso_hostrt_data {
+	__u64 wall_sec;
+	__u64 wtom_sec;
+	__u64 cycle_last;
+	__u64 mask;
+	__u32 wall_nsec;
+	__u32 wtom_nsec;
+	__u32 mult;
+	__u32 shift;
+	__u32 live;
+	urw_t lock;
+};
+
+/*
+ * Data shared between the Cobalt kernel and applications, which lives
+ * in the shared memory heap (COBALT_MEMDEV_SHARED).
+ * xnvdso_hostrt_data.features tells which data is present. Notice
+ * that struct xnvdso may only grow, but never shrink.
+ */
+struct xnvdso {
+	__u64 features;
+	/* XNVDSO_FEAT_HOST_REALTIME */
+	struct xnvdso_hostrt_data hostrt_data;
+	/* XNVDSO_FEAT_WALLCLOCK_OFFSET */
+	__u64 wallclock_offset;
+};
+
+/* For each shared feature, add a flag below. */
+
+#define XNVDSO_FEAT_HOST_REALTIME	0x0000000000000001ULL
+#define XNVDSO_FEAT_WALLCLOCK_OFFSET	0x0000000000000002ULL
+
+static inline int xnvdso_test_feature(struct xnvdso *vdso,
+				      __u64 feature)
+{
+	return (vdso->features & feature) != 0;
+}
+
+#endif /* !_COBALT_UAPI_KERNEL_VDSO_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h
new file mode 100644
index 0000000..6e54daf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/monitor.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_MONITOR_H
+#define _COBALT_UAPI_MONITOR_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+struct cobalt_monitor_state {
+	atomic_t owner;
+	__u32 flags;
+#define COBALT_MONITOR_GRANTED    0x01
+#define COBALT_MONITOR_DRAINED    0x02
+#define COBALT_MONITOR_SIGNALED   0x03 /* i.e. GRANTED or DRAINED */
+#define COBALT_MONITOR_BROADCAST  0x04
+#define COBALT_MONITOR_PENDED     0x08
+};
+
+struct cobalt_monitor;
+
+struct cobalt_monitor_shadow {
+	__u32 state_offset;
+	__u32 flags;
+	xnhandle_t handle;
+#define COBALT_MONITOR_SHARED     0x1
+#define COBALT_MONITOR_WAITGRANT  0x0
+#define COBALT_MONITOR_WAITDRAIN  0x1
+};
+
+typedef struct cobalt_monitor_shadow cobalt_monitor_t;
+
+#endif /* !_COBALT_UAPI_MONITOR_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h
new file mode 100644
index 0000000..75e34f9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/mutex.h
@@ -0,0 +1,44 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_MUTEX_H
+#define _COBALT_UAPI_MUTEX_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define COBALT_MUTEX_MAGIC  0x86860303
+
+struct cobalt_mutex_state {
+	atomic_t owner;
+	__u32 flags;
+#define COBALT_MUTEX_COND_SIGNAL 0x00000001
+#define COBALT_MUTEX_ERRORCHECK  0x00000002
+	__u32 ceiling;
+};
+
+union cobalt_mutex_union {
+	pthread_mutex_t native_mutex;
+	struct cobalt_mutex_shadow {
+		__u32 magic;
+		__u32 lockcnt;
+		__u32 state_offset;
+		xnhandle_t handle;
+		struct cobalt_mutexattr attr;
+	} shadow_mutex;
+};
+
+#endif /* !_COBALT_UAPI_MUTEX_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h
new file mode 100644
index 0000000..1409587
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/sched.h
@@ -0,0 +1,138 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SCHED_H
+#define _COBALT_UAPI_SCHED_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define SCHED_COBALT		42
+#define SCHED_WEAK		43
+
+#ifndef SCHED_SPORADIC
+#define SCHED_SPORADIC		10
+#define sched_ss_low_priority	sched_u.ss.__sched_low_priority
+#define sched_ss_repl_period	sched_u.ss.__sched_repl_period
+#define sched_ss_init_budget	sched_u.ss.__sched_init_budget
+#define sched_ss_max_repl	sched_u.ss.__sched_max_repl
+#endif	/* !SCHED_SPORADIC */
+
+struct __sched_ss_param {
+	int __sched_low_priority;
+	struct __user_old_timespec __sched_repl_period;
+	struct __user_old_timespec __sched_init_budget;
+	int __sched_max_repl;
+};
+
+#define sched_rr_quantum	sched_u.rr.__sched_rr_quantum
+
+struct __sched_rr_param {
+	struct __user_old_timespec __sched_rr_quantum;
+};
+
+#ifndef SCHED_TP
+#define SCHED_TP		11
+#define sched_tp_partition	sched_u.tp.__sched_partition
+#endif	/* !SCHED_TP */
+
+struct __sched_tp_param {
+	int __sched_partition;
+};
+
+struct sched_tp_window {
+	struct __user_old_timespec offset;
+	struct __user_old_timespec duration;
+	int ptid;
+};
+
+enum {
+	sched_tp_install,
+	sched_tp_uninstall,
+	sched_tp_start,
+	sched_tp_stop,
+};
+	
+struct __sched_config_tp {
+	int op;
+	int nr_windows;
+	struct sched_tp_window windows[0];
+};
+
+#define sched_tp_confsz(nr_win) \
+  (sizeof(struct __sched_config_tp) + nr_win * sizeof(struct sched_tp_window))
+
+#ifndef SCHED_QUOTA
+#define SCHED_QUOTA		12
+#define sched_quota_group	sched_u.quota.__sched_group
+#endif	/* !SCHED_QUOTA */
+
+struct __sched_quota_param {
+	int __sched_group;
+};
+
+enum {
+	sched_quota_add,
+	sched_quota_remove,
+	sched_quota_force_remove,
+	sched_quota_set,
+	sched_quota_get,
+};
+
+struct __sched_config_quota {
+	int op;
+	union {
+		struct {
+			int pshared;
+		} add;
+		struct {
+			int tgid;
+		} remove;
+		struct {
+			int tgid;
+			int quota;
+			int quota_peak;
+		} set;
+		struct {
+			int tgid;
+		} get;
+	};
+	struct __sched_quota_info {
+		int tgid;
+		int quota;
+		int quota_peak;
+		int quota_sum;
+	} info;
+};
+
+#define sched_quota_confsz()  sizeof(struct __sched_config_quota)
+
+struct sched_param_ex {
+	int sched_priority;
+	union {
+		struct __sched_ss_param ss;
+		struct __sched_rr_param rr;
+		struct __sched_tp_param tp;
+		struct __sched_quota_param quota;
+	} sched_u;
+};
+
+union sched_config {
+	struct __sched_config_tp tp;
+	struct __sched_config_quota quota;
+};
+
+#endif /* !_COBALT_UAPI_SCHED_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h
new file mode 100644
index 0000000..01a9b55
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/sem.h
@@ -0,0 +1,56 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SEM_H
+#define _COBALT_UAPI_SEM_H
+
+#include <cobalt/uapi/kernel/types.h>
+
+#define COBALT_SEM_MAGIC (0x86860707)
+#define COBALT_NAMED_SEM_MAGIC (0x86860D0D)
+
+struct cobalt_sem;
+
+struct cobalt_sem_state {
+	atomic_t value;
+	__u32 flags;
+};
+
+union cobalt_sem_union {
+	sem_t native_sem;
+	struct cobalt_sem_shadow {
+		__u32 magic;
+		__s32 state_offset;
+		xnhandle_t handle;
+	} shadow_sem;
+};
+
+struct cobalt_sem_info {
+	unsigned int value;
+	int flags;
+	int nrwait;
+};
+
+#define SEM_FIFO       0x1
+#define SEM_PULSE      0x2
+#define SEM_PSHARED    0x4
+#define SEM_REPORT     0x8
+#define SEM_WARNDEL    0x10
+#define SEM_RAWCLOCK   0x20
+#define SEM_NOBUSYDEL  0x40
+
+#endif /* !_COBALT_UAPI_SEM_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h
new file mode 100644
index 0000000..8a7ea15
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/signal.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SIGNAL_H
+#define _COBALT_UAPI_SIGNAL_H
+
+/*
+ * Those are pseudo-signals only available with pthread_kill() to
+ * suspend/resume/unblock threads synchronously, force them out of
+ * primary mode or even demote them to the SCHED_OTHER class via the
+ * low-level nucleus interface. Can't block those signals, queue them,
+ * or even set them in a sigset. Those are nasty, strictly anti-POSIX
+ * things; we do provide them nevertheless only because we are mean
+ * people doing harmful code for no valid reason. Can't go against
+ * your nature, right?  Nah... (this said, don't blame us for POSIX,
+ * we are not _that_ mean).
+ */
+#define SIGSUSP (SIGRTMAX + 1)
+#define SIGRESM (SIGRTMAX + 2)
+#define SIGRELS (SIGRTMAX + 3)
+#define SIGKICK (SIGRTMAX + 4)
+#define SIGDEMT (SIGRTMAX + 5)
+
+/*
+ * Regular POSIX signals with specific handling by Xenomai.
+ */
+#define SIGSHADOW			SIGWINCH
+#define sigshadow_action(code)		((code) & 0xff)
+#define sigshadow_arg(code)		(((code) >> 8) & 0xff)
+#define sigshadow_int(action, arg)	((action) | ((arg) << 8))
+
+/* SIGSHADOW action codes. */
+#define SIGSHADOW_ACTION_HARDEN		1
+#define SIGSHADOW_ACTION_BACKTRACE	2
+#define SIGSHADOW_ACTION_HOME		3
+#define SIGSHADOW_BACKTRACE_DEPTH	16
+
+#define SIGDEBUG			SIGXCPU
+#define sigdebug_code(si)		((si)->si_value.sival_int)
+#define sigdebug_reason(si)		(sigdebug_code(si) & 0xff)
+#define sigdebug_marker			0xfccf0000
+#define sigdebug_marked(si)		\
+	((sigdebug_code(si) & 0xffff0000) == sigdebug_marker)
+
+/* Possible values of sigdebug_reason() */
+#define SIGDEBUG_UNDEFINED		0
+#define SIGDEBUG_MIGRATE_SIGNAL		1
+#define SIGDEBUG_MIGRATE_SYSCALL	2
+#define SIGDEBUG_MIGRATE_FAULT		3
+#define SIGDEBUG_MIGRATE_PRIOINV	4
+#define SIGDEBUG_NOMLOCK		5
+#define SIGDEBUG_WATCHDOG		6
+#define SIGDEBUG_RESCNT_IMBALANCE	7
+#define SIGDEBUG_LOCK_BREAK		8
+#define SIGDEBUG_MUTEX_SLEEP		9
+
+#define COBALT_DELAYMAX			2147483647U
+
+/*
+ * Internal accessors to extra siginfo/sigevent fields, extending some
+ * existing base field. The extra data should be grouped in a
+ * dedicated struct type. The extra space is taken from the padding
+ * area available from the original structure definitions.
+ *
+ * e.g. getting the address of the following extension to
+ * _sifields._rt from siginfo_t,
+ *
+ * struct bar {
+ *    int foo;
+ * };
+ *
+ * would be noted as:
+ *
+ * siginfo_t si;
+ * struct bar *p = __cobalt_si_extra(&si, _rt, struct bar);
+ *
+ * This code is shared between kernel and user space. Proper
+ * definitions of siginfo_t and sigevent_t should have been read prior
+ * to including this file.
+ *
+ * CAUTION: this macro does not handle alignment issues for the extra
+ * data. The extra type definition should take care of this.
+ */
+#ifdef __OPTIMIZE__
+extern void *__siginfo_overflow(void);
+static inline
+const void *__check_si_overflow(size_t fldsz, size_t extrasz, const void *p)
+{
+	siginfo_t *si __attribute__((unused));
+
+	if (fldsz + extrasz <= sizeof(si->_sifields))
+		return p;
+
+	return __siginfo_overflow();
+}
+#define __cobalt_si_extra(__si, __basefield, __type)				\
+	((__type *)__check_si_overflow(sizeof(__si->_sifields.__basefield),	\
+	       sizeof(__type), &(__si->_sifields.__basefield) + 1))
+#else
+#define __cobalt_si_extra(__si, __basefield, __type)				\
+	((__type *)((&__si->_sifields.__basefield) + 1))
+#endif
+
+/* Same approach, this time for extending sigevent_t. */
+
+#ifdef __OPTIMIZE__
+extern void *__sigevent_overflow(void);
+static inline
+const void *__check_sev_overflow(size_t fldsz, size_t extrasz, const void *p)
+{
+	sigevent_t *sev __attribute__((unused));
+
+	if (fldsz + extrasz <= sizeof(sev->_sigev_un))
+		return p;
+
+	return __sigevent_overflow();
+}
+#define __cobalt_sev_extra(__sev, __basefield, __type)				\
+	((__type *)__check_sev_overflow(sizeof(__sev->_sigev_un.__basefield),	\
+	       sizeof(__type), &(__sev->_sigev_un.__basefield) + 1))
+#else
+#define __cobalt_sev_extra(__sev, __basefield, __type)				\
+	((__type *)((&__sev->_sigev_un.__basefield) + 1))
+#endif
+
+#endif /* !_COBALT_UAPI_SIGNAL_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h
new file mode 100644
index 0000000..1523ddd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/syscall.h
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_SYSCALL_H
+#define _COBALT_UAPI_SYSCALL_H
+
+#include <cobalt/uapi/asm-generic/syscall.h>
+
+#define sc_cobalt_bind				0
+#define sc_cobalt_thread_create			1
+#define sc_cobalt_thread_getpid			2
+#define sc_cobalt_thread_setmode		3
+#define sc_cobalt_thread_setname		4
+#define sc_cobalt_thread_join			5
+#define sc_cobalt_thread_kill			6
+#define sc_cobalt_thread_setschedparam_ex	7
+#define sc_cobalt_thread_getschedparam_ex	8
+#define sc_cobalt_thread_getstat		9
+#define sc_cobalt_sem_init			10
+#define sc_cobalt_sem_destroy			11
+#define sc_cobalt_sem_post			12
+#define sc_cobalt_sem_wait			13
+#define sc_cobalt_sem_trywait			14
+#define sc_cobalt_sem_getvalue			15
+#define sc_cobalt_sem_open			16
+#define sc_cobalt_sem_close			17
+#define sc_cobalt_sem_unlink			18
+#define sc_cobalt_sem_timedwait			19
+#define sc_cobalt_sem_inquire			20
+#define sc_cobalt_sem_broadcast_np		21
+#define sc_cobalt_clock_getres			22
+#define sc_cobalt_clock_gettime			23
+#define sc_cobalt_clock_settime			24
+#define sc_cobalt_clock_nanosleep		25
+#define sc_cobalt_mutex_init			26
+#define sc_cobalt_mutex_check_init		27
+#define sc_cobalt_mutex_destroy			28
+#define sc_cobalt_mutex_lock			29
+#define sc_cobalt_mutex_timedlock		30
+#define sc_cobalt_mutex_trylock			31
+#define sc_cobalt_mutex_unlock			32
+#define sc_cobalt_cond_init			33
+#define sc_cobalt_cond_destroy			34
+#define sc_cobalt_cond_wait_prologue		35
+#define sc_cobalt_cond_wait_epilogue		36
+#define sc_cobalt_mq_open			37
+#define sc_cobalt_mq_close			38
+#define sc_cobalt_mq_unlink			39
+#define sc_cobalt_mq_getattr			40
+#define sc_cobalt_mq_timedsend			41
+#define sc_cobalt_mq_timedreceive		42
+#define sc_cobalt_mq_notify			43
+#define sc_cobalt_sched_minprio			44
+#define sc_cobalt_sched_maxprio			45
+#define sc_cobalt_sched_weightprio		46
+#define sc_cobalt_sched_yield			47
+#define sc_cobalt_sched_setscheduler_ex		48
+#define sc_cobalt_sched_getscheduler_ex		49
+#define sc_cobalt_sched_setconfig_np		50
+#define sc_cobalt_sched_getconfig_np		51
+#define sc_cobalt_timer_create			52
+#define sc_cobalt_timer_delete			53
+#define sc_cobalt_timer_settime			54
+#define sc_cobalt_timer_gettime			55
+#define sc_cobalt_timer_getoverrun		56
+#define sc_cobalt_timerfd_create		57
+#define sc_cobalt_timerfd_settime		58
+#define sc_cobalt_timerfd_gettime		59
+#define sc_cobalt_sigwait			60
+#define sc_cobalt_sigwaitinfo			61
+#define sc_cobalt_sigtimedwait			62
+#define sc_cobalt_sigpending			63
+#define sc_cobalt_kill				64
+#define sc_cobalt_sigqueue			65
+#define sc_cobalt_monitor_init			66
+#define sc_cobalt_monitor_destroy		67
+#define sc_cobalt_monitor_enter			68
+#define sc_cobalt_monitor_wait			69
+#define sc_cobalt_monitor_sync			70
+#define sc_cobalt_monitor_exit			71
+#define sc_cobalt_event_init			72
+#define sc_cobalt_event_wait			73
+#define sc_cobalt_event_sync			74
+#define sc_cobalt_event_destroy			75
+#define sc_cobalt_event_inquire			76
+#define sc_cobalt_open				77
+#define sc_cobalt_socket			78
+#define sc_cobalt_close				79
+#define sc_cobalt_ioctl				80
+#define sc_cobalt_read				81
+#define sc_cobalt_write				82
+#define sc_cobalt_recvmsg			83
+#define sc_cobalt_sendmsg			84
+#define sc_cobalt_mmap				85
+#define sc_cobalt_select			86
+#define sc_cobalt_fcntl				87
+#define sc_cobalt_migrate			88
+#define sc_cobalt_archcall			89
+#define sc_cobalt_trace				90
+#define sc_cobalt_corectl			91
+#define sc_cobalt_get_current			92
+/* 93: formerly mayday */
+#define sc_cobalt_backtrace			94
+#define sc_cobalt_serialdbg			95
+#define sc_cobalt_extend			96
+#define sc_cobalt_ftrace_puts			97
+#define sc_cobalt_recvmmsg			98
+#define sc_cobalt_sendmmsg			99
+#define sc_cobalt_clock_adjtime			100
+#define sc_cobalt_thread_setschedprio		101
+#define sc_cobalt_sem_timedwait64		102
+#define sc_cobalt_clock_gettime64		103
+#define sc_cobalt_clock_settime64		104
+#define sc_cobalt_clock_nanosleep64		105
+#define sc_cobalt_clock_getres64		106
+#define sc_cobalt_clock_adjtime64		107
+#define sc_cobalt_mutex_timedlock64		108
+#define sc_cobalt_mq_timedsend64		109
+#define sc_cobalt_mq_timedreceive64		110
+#define sc_cobalt_sigtimedwait64		111
+#define sc_cobalt_monitor_wait64		112
+#define sc_cobalt_event_wait64			113
+#define sc_cobalt_recvmmsg64			114
+
+#define __NR_COBALT_SYSCALLS			128 /* Power of 2 */
+
+#endif /* !_COBALT_UAPI_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h
new file mode 100644
index 0000000..07602db
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/thread.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_THREAD_H
+#define _COBALT_UAPI_THREAD_H
+
+#include <cobalt/uapi/kernel/thread.h>
+
+#define PTHREAD_WARNSW             XNWARN
+#define PTHREAD_LOCK_SCHED         XNLOCK
+#define PTHREAD_DISABLE_LOCKBREAK  XNTRAPLB
+#define PTHREAD_CONFORMING     0
+
+struct cobalt_mutexattr {
+	int type : 3;
+	int protocol : 3;
+	int pshared : 1;
+	int __pad : 1;
+	int ceiling : 8;  /* prio-1, (XN)SCHED_FIFO range. */
+};
+
+struct cobalt_condattr {
+	int clock : 7;
+	int pshared : 1;
+};
+
+struct cobalt_threadstat {
+	__u64 xtime;
+	__u64 timeout;
+	__u64 msw;
+	__u64 csw;
+	__u64 xsc;
+	__u32 status;
+	__u32 pf;
+	int cpu;
+	int cprio;
+	char name[XNOBJECT_NAME_LEN];
+	char personality[XNOBJECT_NAME_LEN];
+};
+
+#endif /* !_COBALT_UAPI_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h b/kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h
new file mode 100644
index 0000000..411baf5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/uapi/time.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_UAPI_TIME_H
+#define _COBALT_UAPI_TIME_H
+
+#ifndef CLOCK_MONOTONIC_RAW
+#define CLOCK_MONOTONIC_RAW  4
+#endif
+
+/*
+ * Additional clock ids we manage are supposed not to collide with any
+ * of the POSIX and Linux kernel definitions so that no ambiguities
+ * arise when porting applications in both directions.
+ *
+ * 0  .. 31   regular POSIX/linux clock ids.
+ * 32 .. 63   statically reserved Cobalt clocks
+ * 64 .. 127  dynamically registered Cobalt clocks (external)
+ *
+ * CAUTION: clock ids must fit within a 7bit value, see
+ * include/cobalt/uapi/thread.h (e.g. cobalt_condattr).
+ */
+#define __COBALT_CLOCK_STATIC(nr)	((clockid_t)(nr + 32))
+
+#define CLOCK_HOST_REALTIME  __COBALT_CLOCK_STATIC(0)
+
+#define COBALT_MAX_EXTCLOCKS  64
+
+#define __COBALT_CLOCK_EXT(nr)		((clockid_t)(nr) | (1 << 6))
+#define __COBALT_CLOCK_EXT_P(id)	((int)(id) >= 64 && (int)(id) < 128)
+#define __COBALT_CLOCK_EXT_INDEX(id)	((int)(id) & ~(1 << 6))
+
+/*
+ * Additional timerfd defines
+ *
+ * when passing TFD_WAKEUP to timer_settime, any timer expiration
+ * unblocks the thread having issued timer_settime.
+ */
+#define TFD_WAKEUP	(1 << 2)
+
+#endif /* !_COBALT_UAPI_TIME_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/unistd.h b/kernel/xenomai-v3.2.4/include/cobalt/unistd.h
new file mode 100644
index 0000000..fe3992a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/unistd.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <unistd.h>
+
+#ifndef _COBALT_UNISTD_H
+#define _COBALT_UNISTD_H
+
+#include <cobalt/wrappers.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COBALT_DECL(ssize_t, read(int fd, void *buf, size_t nbyte));
+
+COBALT_DECL(ssize_t, write(int fd, const void *buf, size_t nbyte));
+
+COBALT_DECL(int, close(int fildes));
+
+COBALT_DECL(unsigned int, sleep(unsigned int seconds));
+
+COBALT_DECL(int, usleep(useconds_t usec));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COBALT_UNISTD_H */
diff --git a/kernel/xenomai-v3.2.4/include/cobalt/wrappers.h b/kernel/xenomai-v3.2.4/include/cobalt/wrappers.h
new file mode 100644
index 0000000..7e061ca
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/cobalt/wrappers.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_WRAPPERS_H
+#define _COBALT_WRAPPERS_H
+
+#include <boilerplate/compiler.h>
+
+#define __stringify_1(x...)	#x
+#define __stringify(x...)	__stringify_1(x)
+
+#define __WRAP(call)		__wrap_ ## call
+#define __STD(call)		__real_ ## call
+#define __COBALT(call)		__cobalt_ ## call
+#define __RT(call)		__COBALT(call)
+#define COBALT_DECL(T, P)	\
+	__typeof__(T) __RT(P);	\
+	__typeof__(T) __STD(P); \
+	__typeof__(T) __WRAP(P)
+
+/*
+ * 
+ * Each "foo" Cobalt routine shadowing a POSIX service may be
+ * overriden by an external library (see --with-cobalt-override
+ * option), in which case we generate the following symbols:
+ * 
+ * __real_foo() => Original POSIX implementation.
+ * __cobalt_foo() => Cobalt implementation.
+ * __wrap_foo() => Weak alias to __cobalt_foo(), may be
+ * overriden.
+ *
+ * In the latter case, the external library shall provide its own
+ * implementation of __wrap_foo(), overriding Cobalt's foo()
+ * version. The original Cobalt implementation can still be
+ * referenced as __COBALT(foo).
+ */
+#define COBALT_IMPL(T, I, A)								\
+__typeof__(T) __wrap_ ## I A __attribute__((alias("__cobalt_" __stringify(I)), weak));	\
+__typeof__(T) __cobalt_ ## I A
+
+#endif /* !_COBALT_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/Makefile.am b/kernel/xenomai-v3.2.4/include/copperplate/Makefile.am
new file mode 100644
index 0000000..5baa09b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/Makefile.am
@@ -0,0 +1,19 @@
+includesubdir = $(includedir)/copperplate
+
+includesub_HEADERS =		\
+	clockobj.h		\
+	cluster.h		\
+	debug.h			\
+	eventobj.h		\
+	heapobj.h		\
+	reference.h		\
+	registry.h		\
+	semobj.h		\
+	syncobj.h		\
+	threadobj.h		\
+	timerobj.h		\
+	traceobj.h		\
+	tunables.h
+
+noinst_HEADERS =		\
+	registry-obstack.h
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/clockobj.h b/kernel/xenomai-v3.2.4/include/copperplate/clockobj.h
new file mode 100644
index 0000000..dde18bd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/clockobj.h
@@ -0,0 +1,263 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_CLOCKOBJ_H
+#define _COPPERPLATE_CLOCKOBJ_H
+
+#include <pthread.h>
+#include <xeno_config.h>
+#include <boilerplate/time.h>
+#include <boilerplate/list.h>
+#include <boilerplate/lock.h>
+#include <boilerplate/limits.h>
+
+/*
+ * The Copperplate clock shall be monotonic unless the threading
+ * library has restrictions to support this over Mercury.
+ *
+ * In the normal case, this means that ongoing delays and timeouts
+ * won't be affected when the host system date is changed. In the
+ * restricted case by contrast, ongoing delays and timeouts may be
+ * impacted by changes to the host system date.
+ *
+ * The implementation maintains a per-clock epoch value, so that
+ * different emulators can have different (virtual) system dates.
+ */
+#ifdef CONFIG_XENO_COPPERPLATE_CLOCK_RESTRICTED
+#define CLOCK_COPPERPLATE  CLOCK_REALTIME
+#else
+#define CLOCK_COPPERPLATE  CLOCK_MONOTONIC
+#endif
+
+struct clockobj {
+	pthread_mutex_t lock;
+	struct timespec epoch;
+	struct timespec offset;
+#ifndef CONFIG_XENO_LORES_CLOCK_DISABLED
+	unsigned int resolution;
+	unsigned int frequency;
+#endif
+};
+
+#define zero_time	((struct timespec){ .tv_sec = 0, .tv_nsec = 0 })
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void clockobj_set_date(struct clockobj *clkobj, ticks_t ticks);
+
+void clockobj_get_date(struct clockobj *clkobj, ticks_t *pticks);
+
+ticks_t clockobj_get_time(struct clockobj *clkobj);
+
+void clockobj_get_distance(struct clockobj *clkobj,
+			   const struct itimerspec *itm,
+			   struct timespec *delta);
+
+void clockobj_caltime_to_timeout(struct clockobj *clkobj, const struct tm *tm,
+				 unsigned long rticks, struct timespec *ts);
+
+void clockobj_caltime_to_ticks(struct clockobj *clkobj, const struct tm *tm,
+			       unsigned long rticks, ticks_t *pticks);
+
+void clockobj_ticks_to_caltime(struct clockobj *clkobj,
+			       ticks_t ticks,
+			       struct tm *tm,
+			       unsigned long *rticks);
+
+void clockobj_convert_clocks(struct clockobj *clkobj,
+			     const struct timespec *in,
+			     clockid_t clk_id,
+			     struct timespec *out);
+
+int clockobj_set_resolution(struct clockobj *clkobj,
+			    unsigned int resolution_ns);
+
+int clockobj_init(struct clockobj *clkobj,
+		  unsigned int resolution_ns);
+
+int clockobj_destroy(struct clockobj *clkobj);
+
+#ifndef CONFIG_XENO_LORES_CLOCK_DISABLED
+
+void __clockobj_ticks_to_timeout(struct clockobj *clkobj, clockid_t clk_id,
+				 ticks_t ticks, struct timespec *ts);
+
+void __clockobj_ticks_to_timespec(struct clockobj *clkobj,
+				  ticks_t ticks, struct timespec *ts);
+#endif /* !CONFIG_XENO_LORES_CLOCK_DISABLED */
+
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <cobalt/ticks.h>
+#include <cobalt/sys/cobalt.h>
+
+/*
+ * The Cobalt core exclusively deals with aperiodic timings, so a
+ * Cobalt _tick_ is actually a _TSC_ unit. In contrast, Copperplate
+ * deals with _TSC_ units and periodic _ticks_ which duration depend
+ * on the clock resolution. Therefore, Cobalt ticks are strictly
+ * equivalent to Copperplate TSC units, and Copperplate ticks are
+ * periods of the reference clockobj which Cobalt does not know about.
+ */
+
+static inline ticks_t clockobj_get_tsc(void)
+{
+	/* Guaranteed to be the source of CLOCK_COPPERPLATE. */
+	return cobalt_read_tsc();
+}
+
+static inline sticks_t clockobj_ns_to_tsc(sticks_t ns)
+{
+	return cobalt_ns_to_ticks(ns);
+}
+
+static inline sticks_t clockobj_tsc_to_ns(sticks_t tsc)
+{
+	return cobalt_ticks_to_ns(tsc);
+}
+
+static inline
+void clockobj_ns_to_timespec(ticks_t ns, struct timespec *ts)
+{
+	unsigned long rem;
+
+	ts->tv_sec = (time_t)cobalt_divrem_billion(ns, &rem);
+	ts->tv_nsec = (long)rem;
+}
+
+#else /* CONFIG_XENO_MERCURY */
+
+ticks_t clockobj_get_tsc(void);
+
+static inline sticks_t clockobj_ns_to_tsc(sticks_t ns)
+{
+	return ns;
+}
+
+static inline sticks_t clockobj_tsc_to_ns(sticks_t tsc)
+{
+	return tsc;
+}
+
+static inline
+void clockobj_ns_to_timespec(ticks_t ns, struct timespec *ts)
+{
+	ts->tv_sec = ns / 1000000000ULL;
+	ts->tv_nsec = ns - (ts->tv_sec * 1000000000ULL);
+}
+
+#endif /* CONFIG_XENO_MERCURY */
+
+#ifdef CONFIG_XENO_LORES_CLOCK_DISABLED
+
+static inline
+void __clockobj_ticks_to_timeout(struct clockobj *clkobj,
+				 clockid_t clk_id,
+				 ticks_t ticks, struct timespec *ts)
+{
+	struct timespec now, delta;
+
+	__RT(clock_gettime(clk_id, &now));
+	clockobj_ns_to_timespec(ticks, &delta);
+	timespec_add(ts, &now, &delta);
+}
+
+static inline
+void __clockobj_ticks_to_timespec(struct clockobj *clkobj,
+				  ticks_t ticks, struct timespec *ts)
+{
+	clockobj_ns_to_timespec(ticks, ts);
+}
+
+static inline
+void clockobj_ticks_to_timespec(struct clockobj *clkobj,
+				ticks_t ticks, struct timespec *ts)
+{
+	__clockobj_ticks_to_timespec(clkobj, ticks, ts);
+}
+
+static inline
+unsigned int clockobj_get_resolution(struct clockobj *clkobj)
+{
+	return 1;
+}
+
+static inline
+unsigned int clockobj_get_frequency(struct clockobj *clkobj)
+{
+	return 1000000000;
+}
+
+static inline sticks_t clockobj_ns_to_ticks(struct clockobj *clkobj,
+					    sticks_t ns)
+{
+	return ns;
+}
+
+static inline sticks_t clockobj_ticks_to_ns(struct clockobj *clkobj,
+					    sticks_t ticks)
+{
+	return ticks;
+}
+
+#else /* !CONFIG_XENO_LORES_CLOCK_DISABLED */
+
+static inline
+void clockobj_ticks_to_timespec(struct clockobj *clkobj,
+				ticks_t ticks, struct timespec *ts)
+{
+	__clockobj_ticks_to_timespec(clkobj, ticks, ts);
+}
+
+static inline
+unsigned int clockobj_get_resolution(struct clockobj *clkobj)
+{
+	return clkobj->resolution;
+}
+
+static inline
+unsigned int clockobj_get_frequency(struct clockobj *clkobj)
+{
+	return clkobj->frequency;
+}
+
+sticks_t clockobj_ns_to_ticks(struct clockobj *clkobj,
+			      sticks_t ns);
+
+static inline sticks_t clockobj_ticks_to_ns(struct clockobj *clkobj,
+					    sticks_t ticks)
+{
+	return ticks * clkobj->resolution;
+}
+
+#endif /* !CONFIG_XENO_LORES_CLOCK_DISABLED */
+
+static inline
+void clockobj_ticks_to_timeout(struct clockobj *clkobj,
+			       ticks_t ticks, struct timespec *ts)
+{
+	__clockobj_ticks_to_timeout(clkobj, CLOCK_COPPERPLATE, ticks, ts);
+}
+
+#endif /* _COPPERPLATE_CLOCKOBJ_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/cluster.h b/kernel/xenomai-v3.2.4/include/copperplate/cluster.h
new file mode 100644
index 0000000..2ca07c6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/cluster.h
@@ -0,0 +1,308 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_CLUSTER_H
+#define _COPPERPLATE_CLUSTER_H
+
+#include <boilerplate/hash.h>
+#include <copperplate/syncobj.h>
+
+#ifdef CONFIG_XENO_PSHARED
+
+struct clusterobj {
+	pid_t cnode;
+	struct hashobj hobj;
+};
+
+struct dictionary {
+	struct hash_table table;
+	struct hashobj hobj;
+};
+
+struct cluster {
+	struct dictionary *d;
+};
+
+struct syndictionary {
+	struct hash_table table;
+	struct syncobj sobj;
+	struct hashobj hobj;
+};
+
+struct syncluster {
+	struct syndictionary *d;
+};
+
+struct pvclusterobj {
+	struct pvhashobj hobj;
+};
+
+struct pvcluster {
+	struct pvhash_table table;
+};
+
+struct pvsyncluster {
+	struct pvcluster c;
+	struct syncobj sobj;
+};
+
+static inline
+const void *clusterobj_key(const struct clusterobj *cobj)
+{
+	return __memptr(__main_heap, cobj->hobj.key);
+}
+
+static inline
+size_t clusterobj_keylen(const struct clusterobj *cobj)
+{
+	return cobj->hobj.len;
+}
+
+static inline
+pid_t clusterobj_cnode(const struct clusterobj *cobj)
+{
+	return cobj->cnode;
+}
+
+static inline
+const void *pvclusterobj_key(const struct pvclusterobj *cobj)
+{
+	return cobj->hobj.key;
+}
+
+static inline
+size_t pvclusterobj_keylen(const struct pvclusterobj *cobj)
+{
+	return cobj->hobj.len;
+}
+
+static inline
+pid_t pvclusterobj_cnode(const struct pvclusterobj *cobj)
+{
+	return -1;
+}
+
+#else /* !CONFIG_XENO_PSHARED */
+
+struct clusterobj {
+	struct pvhashobj hobj;
+};
+
+struct cluster {
+	struct pvhash_table table;
+};
+
+struct syncluster {
+	struct cluster c;
+	struct syncobj sobj;
+};
+
+#define pvclusterobj  clusterobj
+#define pvcluster     cluster
+#define pvsyncluster  syncluster
+
+static inline
+const void *clusterobj_key(const struct pvclusterobj *cobj)
+{
+	return cobj->hobj.key;
+}
+
+static inline
+size_t clusterobj_keylen(const struct pvclusterobj *cobj)
+{
+	return cobj->hobj.len;
+}
+
+static inline
+pid_t clusterobj_cnode(const struct pvclusterobj *cobj)
+{
+	return -1;
+}
+
+static inline
+const void *pvclusterobj_key(const struct pvclusterobj *cobj)
+{
+	return clusterobj_key(cobj);
+}
+
+static inline
+size_t pvclusterobj_keylen(const struct pvclusterobj *cobj)
+{
+	return clusterobj_keylen(cobj);
+}
+
+static inline
+pid_t pvclusterobj_cnode(const struct pvclusterobj *cobj)
+{
+	return clusterobj_cnode(cobj);
+}
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+struct syncluster_wait_struct {
+	union {
+		dref_type(char *) name_ref;
+		const char *name;
+	};
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int pvcluster_init(struct pvcluster *c, const char *name);
+
+void pvcluster_destroy(struct pvcluster *c);
+
+int pvcluster_addobj(struct pvcluster *c, const char *name,
+		     struct pvclusterobj *cobj);
+
+int pvcluster_addobj_dup(struct pvcluster *c, const char *name,
+			 struct pvclusterobj *cobj);
+
+int pvcluster_delobj(struct pvcluster *c,
+		     struct pvclusterobj *cobj);
+
+struct pvclusterobj *pvcluster_findobj(struct pvcluster *c,
+				       const char *name);
+
+int pvcluster_walk(struct pvcluster *c,
+		   int (*walk)(struct pvcluster *c,
+			       struct pvclusterobj *cobj));
+  
+int pvsyncluster_init(struct pvsyncluster *sc, const char *name);
+
+void pvsyncluster_destroy(struct pvsyncluster *sc);
+
+int pvsyncluster_addobj(struct pvsyncluster *sc, const char *name,
+			struct pvclusterobj *cobj);
+
+int pvsyncluster_delobj(struct pvsyncluster *sc,
+			struct pvclusterobj *cobj);
+
+int pvsyncluster_findobj(struct pvsyncluster *sc,
+			 const char *name,
+			 const struct timespec *timeout,
+			 struct pvclusterobj **cobjp) __must_check;
+
+#ifdef CONFIG_XENO_PSHARED
+
+int cluster_init(struct cluster *c, const char *name);
+
+int cluster_addobj(struct cluster *c, const char *name,
+		   struct clusterobj *cobj);
+
+int cluster_addobj_dup(struct cluster *c, const char *name,
+		       struct clusterobj *cobj);
+
+int cluster_delobj(struct cluster *c,
+		   struct clusterobj *cobj);
+
+struct clusterobj *cluster_findobj(struct cluster *c,
+				   const char *name);
+
+int cluster_walk(struct cluster *c,
+		 int (*walk)(struct cluster *c,
+			     struct clusterobj *cobj));
+
+int syncluster_init(struct syncluster *sc, const char *name);
+
+int syncluster_addobj(struct syncluster *sc, const char *name,
+		      struct clusterobj *cobj);
+
+int syncluster_delobj(struct syncluster *sc,
+		      struct clusterobj *cobj);
+
+int syncluster_findobj(struct syncluster *sc,
+		       const char *name,
+		       const struct timespec *timeout,
+		       struct clusterobj **cobjp) __must_check;
+
+#else /* !CONFIG_XENO_PSHARED */
+
+static inline int cluster_init(struct cluster *c, const char *name)
+{
+	return pvcluster_init(c, name);
+}
+
+static inline int cluster_addobj(struct cluster *c, const char *name,
+				 struct clusterobj *cobj)
+{
+	return pvcluster_addobj(c, name, cobj);
+}
+
+static inline int cluster_addobj_dup(struct cluster *c, const char *name,
+				     struct clusterobj *cobj)
+{
+	return pvcluster_addobj_dup(c, name, cobj);
+}
+
+static inline int cluster_delobj(struct cluster *c,
+				 struct clusterobj *cobj)
+{
+	return pvcluster_delobj(c, cobj);
+}
+
+static inline struct clusterobj *cluster_findobj(struct cluster *c,
+						 const char *name)
+{
+	return pvcluster_findobj(c, name);
+}
+
+static inline int cluster_walk(struct cluster *c,
+			       int (*walk)(struct cluster *c,
+					   struct clusterobj *cobj))
+{
+	return pvcluster_walk(c, walk);
+}
+
+static inline int syncluster_init(struct syncluster *sc,
+				  const char *name)
+{
+	return pvsyncluster_init(sc, name);
+}
+
+static inline int syncluster_addobj(struct syncluster *sc,
+				    const char *name,
+				    struct clusterobj *cobj)
+{
+	return pvsyncluster_addobj(sc, name, cobj);
+}
+
+static inline int syncluster_delobj(struct syncluster *sc,
+				    struct clusterobj *cobj)
+{
+	return pvsyncluster_delobj(sc, cobj);
+}
+
+static inline  __must_check
+int syncluster_findobj(struct syncluster *sc,
+		       const char *name,
+		       const struct timespec *timeout,
+		       struct clusterobj **cobjp)
+{
+	return pvsyncluster_findobj(sc, name, timeout, cobjp);
+}
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COPPERPLATE_CLUSTER_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/debug.h b/kernel/xenomai-v3.2.4/include/copperplate/debug.h
new file mode 100644
index 0000000..027f12c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/debug.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_DEBUG_H
+#define _COPPERPLATE_DEBUG_H
+
+#include <boilerplate/debug.h>
+
+#ifdef CONFIG_XENO_DEBUG
+
+struct threadobj;
+
+#define debug(__fmt, __args...)						\
+	do {								\
+		struct threadobj *__thobj = threadobj_current();	\
+		if (__thobj == NULL ||					\
+		    (__thobj->status & __THREAD_S_DEBUG) != 0)		\
+		  __debug(__thobj ? __thobj->name : NULL, __fmt, ##__args); \
+	} while (0)
+
+#else /* !CONFIG_XENO_DEBUG */
+
+#define debug(fmt, args...)  do { } while (0)
+
+#endif /* !CONFIG_XENO_DEBUG */
+
+#endif /* _COPPERPLATE_DEBUG_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/eventobj.h b/kernel/xenomai-v3.2.4/include/copperplate/eventobj.h
new file mode 100644
index 0000000..3fc9416
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/eventobj.h
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_EVENTOBJ_H
+#define _COPPERPLATE_EVENTOBJ_H
+
+#include <boilerplate/compiler.h>
+#include <copperplate/reference.h>
+
+struct eventobj_waitentry {
+	pid_t pid;
+	char name[32];
+};
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <cobalt/uapi/event.h>
+
+struct eventobj_corespec {
+	cobalt_event_t event;
+};
+
+struct eventobj_wait_struct {
+};
+
+#define EVOBJ_FIFO  COBALT_EVENT_FIFO
+#define EVOBJ_PRIO  COBALT_EVENT_PRIO
+
+#define EVOBJ_ALL   COBALT_EVENT_ALL
+#define EVOBJ_ANY   COBALT_EVENT_ANY
+
+#else  /* CONFIG_XENO_MERCURY */
+
+#include <copperplate/syncobj.h>
+
+struct eventobj_corespec {
+	struct syncobj sobj;
+	unsigned int value;
+	int flags;
+};
+
+struct eventobj_wait_struct {
+	unsigned int value;
+	int mode;
+};
+
+#define EVOBJ_FIFO  0x0
+#define EVOBJ_PRIO  0x1
+
+#define EVOBJ_ALL   0x0
+#define EVOBJ_ANY   0x1
+
+#endif /* CONFIG_XENO_MERCURY */
+
+struct eventobj {
+	struct eventobj_corespec core;
+	fnref_type(void (*)(struct eventobj *evobj)) finalizer;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int eventobj_init(struct eventobj *evobj,
+		  unsigned int value, int flags,
+		  fnref_type(void (*)(struct eventobj *evobj)) finalizer) __must_check;
+
+int eventobj_destroy(struct eventobj *evobj);
+
+void eventobj_uninit(struct eventobj *evobj);
+  
+int eventobj_post(struct eventobj *evobj,
+		  unsigned int bits);
+
+int eventobj_wait(struct eventobj *evobj,
+		  unsigned int bits,
+		  unsigned int *bits_r,
+		  int mode,
+		  const struct timespec *timeout) __must_check;
+
+int eventobj_clear(struct eventobj *evobj,
+		   unsigned int bits,
+		   unsigned int *bits_r);
+
+int eventobj_inquire(struct eventobj *evobj, size_t waitsz,
+		     struct eventobj_waitentry *waitlist,
+		     unsigned int *bits_r);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COPPERPLATE_EVENTOBJ_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/heapobj.h b/kernel/xenomai-v3.2.4/include/copperplate/heapobj.h
new file mode 100644
index 0000000..f8d14a3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/heapobj.h
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2008-2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_HEAPOBJ_H
+#define _COPPERPLATE_HEAPOBJ_H
+
+#include <sys/types.h>
+#include <stdint.h>
+#include <string.h>
+#include <assert.h>
+#include <errno.h>
+#include <pthread.h>
+#include <xeno_config.h>
+#include <boilerplate/wrappers.h>
+#include <boilerplate/list.h>
+#include <copperplate/reference.h>
+#include <boilerplate/lock.h>
+#include <copperplate/debug.h>
+
+struct heapobj {
+	union {
+		dref_type(void *) pool_ref;
+		void *pool;
+	};
+	size_t size;
+	char name[32];
+#ifdef CONFIG_XENO_PSHARED
+	char fsname[256];
+#endif
+};
+
+struct sysgroup {
+	int thread_count;
+	struct listobj thread_list;
+	int heap_count;
+	struct listobj heap_list;
+	pthread_mutex_t lock;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int heapobj_pkg_init_private(void);
+
+int __heapobj_init_private(struct heapobj *hobj, const char *name,
+			   size_t size, void *mem);
+
+int heapobj_init_array_private(struct heapobj *hobj, const char *name,
+			       size_t size, int elems);
+#ifdef __cplusplus
+}
+#endif
+
+#ifdef CONFIG_XENO_TLSF
+
+size_t get_used_size(void *pool);
+void destroy_memory_pool(void *pool);
+size_t add_new_area(void *pool, size_t size, void *mem);
+void *malloc_ex(size_t size, void *pool);
+void free_ex(void *pool, void *ptr);
+void *tlsf_malloc(size_t size);
+void tlsf_free(void *ptr);
+size_t malloc_usable_size_ex(void *ptr, void *pool);
+
+static inline
+void pvheapobj_destroy(struct heapobj *hobj)
+{
+	destroy_memory_pool(hobj->pool);
+}
+
+static inline
+int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem)
+{
+	hobj->size = add_new_area(hobj->pool, size, mem);
+	if (hobj->size == (size_t)-1)
+		return __bt(-EINVAL);
+
+	return 0;
+}
+
+static inline
+void *pvheapobj_alloc(struct heapobj *hobj, size_t size)
+{
+	return malloc_ex(size, hobj->pool);
+}
+
+static inline
+void pvheapobj_free(struct heapobj *hobj, void *ptr)
+{
+	free_ex(ptr, hobj->pool);
+}
+
+static inline
+size_t pvheapobj_validate(struct heapobj *hobj, void *ptr)
+{
+	return malloc_usable_size_ex(ptr, hobj->pool);
+}
+
+static inline
+size_t pvheapobj_inquire(struct heapobj *hobj)
+{
+	return get_used_size(hobj->pool);
+}
+
+static inline void *pvmalloc(size_t size)
+{
+	return tlsf_malloc(size);
+}
+
+static inline void pvfree(void *ptr)
+{
+	tlsf_free(ptr);
+}
+
+static inline char *pvstrdup(const char *ptr)
+{
+	char *str;
+
+	str = (char *)pvmalloc(strlen(ptr) + 1);
+	if (str == NULL)
+		return NULL;
+
+	return strcpy(str, ptr);
+}
+
+#elif defined(CONFIG_XENO_HEAPMEM)
+
+#include <stdlib.h>
+#include <boilerplate/heapmem.h>
+
+extern struct heap_memory heapmem_main;
+
+static inline
+void pvheapobj_destroy(struct heapobj *hobj)
+{
+	heapmem_destroy((struct heap_memory *)hobj->pool);
+	if (hobj->pool != (void *)&heapmem_main)
+		__STD(free(hobj->pool));
+}
+
+static inline
+int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem)
+{
+	return heapmem_extend((struct heap_memory *)hobj->pool,
+			      mem, size);
+}
+
+static inline
+void *pvheapobj_alloc(struct heapobj *hobj, size_t size)
+{
+	return heapmem_alloc((struct heap_memory *)hobj->pool, size);
+}
+
+static inline
+void pvheapobj_free(struct heapobj *hobj, void *ptr)
+{
+	heapmem_free((struct heap_memory *)hobj->pool, ptr);
+}
+
+static inline
+size_t pvheapobj_validate(struct heapobj *hobj, void *ptr)
+{
+	ssize_t size = heapmem_check((struct heap_memory *)hobj->pool, ptr);
+	return size < 0 ? 0 : size;
+}
+
+static inline
+size_t pvheapobj_inquire(struct heapobj *hobj)
+{
+	return heapmem_used_size((struct heap_memory *)hobj->pool);
+}
+
+static inline void *pvmalloc(size_t size)
+{
+	return heapmem_alloc(&heapmem_main, size);
+}
+
+static inline void pvfree(void *ptr)
+{
+	heapmem_free(&heapmem_main, ptr);
+}
+
+static inline char *pvstrdup(const char *ptr)
+{
+	char *str;
+
+	str = (char *)pvmalloc(strlen(ptr) + 1);
+	if (str == NULL)
+		return NULL;
+
+	return strcpy(str, ptr);
+}
+
+#else /* !CONFIG_XENO_HEAPMEM, i.e. malloc */
+
+#include <stdlib.h>
+
+static inline void *pvmalloc(size_t size)
+{
+	/*
+	 * NOTE: We don't want debug _nrt assertions to trigger when
+	 * running over Cobalt if the user picked this allocator, so
+	 * we make sure to call the glibc directly, not the Cobalt
+	 * wrappers.
+	 */
+	return __STD(malloc(size));
+}
+
+static inline void pvfree(void *ptr)
+{
+	__STD(free(ptr));
+}
+
+static inline char *pvstrdup(const char *ptr)
+{
+	return strdup(ptr);
+}
+
+void pvheapobj_destroy(struct heapobj *hobj);
+
+int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem);
+
+void *pvheapobj_alloc(struct heapobj *hobj, size_t size);
+
+void pvheapobj_free(struct heapobj *hobj, void *ptr);
+
+size_t pvheapobj_inquire(struct heapobj *hobj);
+
+size_t pvheapobj_validate(struct heapobj *hobj, void *ptr);
+
+#endif /* !CONFIG_XENO_HEAPMEM */
+
+#ifdef CONFIG_XENO_PSHARED
+
+extern void *__main_heap;
+
+extern struct hash_table *__main_catalog;
+#define main_catalog	(*((struct hash_table *)__main_catalog))
+
+extern struct sysgroup *__main_sysgroup;
+
+struct sysgroup_memspec {
+	/** next member in sysgroup list. */
+	struct holder next;
+};
+
+static inline void *mainheap_ptr(memoff_t off)
+{
+	return off ? (void *)__memptr(__main_heap, off) : NULL;
+}
+
+static inline memoff_t mainheap_off(void *addr)
+{
+	return addr ? (memoff_t)__memoff(__main_heap, addr) : 0;
+}
+
+/*
+ * ptr shall point to a block of memory allocated within the main heap
+ * if non-null; such address is always 8-byte aligned. Handles of
+ * shared heap pointers are returned with bit #0 set, which serves as
+ * a special tag detected in mainhead_deref(). A null pointer is
+ * always translated as a null handle.
+ */
+#define mainheap_ref(ptr, type)						\
+	({								\
+		type handle;						\
+		assert(__builtin_types_compatible_p(typeof(type), unsigned long) || \
+		       __builtin_types_compatible_p(typeof(type), uintptr_t)); \
+		assert(ptr == NULL || __memchk(__main_heap, ptr));	\
+		handle = (type)mainheap_off(ptr);			\
+		handle|1;						\
+	})
+/*
+ * Handles of shared heap-based pointers have bit #0 set. Other values
+ * are not translated, and the return value is the original handle
+ * cast to a pointer. A null handle is always returned unchanged.
+ */
+#define mainheap_deref(handle, type)					\
+	({								\
+		type *ptr;						\
+		assert(__builtin_types_compatible_p(typeof(handle), unsigned long) || \
+		       __builtin_types_compatible_p(typeof(handle), uintptr_t)); \
+		ptr = (handle & 1) ? (type *)mainheap_ptr(handle & ~1UL) : (type *)handle; \
+		ptr;							\
+	})
+
+static inline void
+__sysgroup_add(struct sysgroup_memspec *obj, struct listobj *q, int *countp)
+{
+	write_lock_nocancel(&__main_sysgroup->lock);
+	(*countp)++;
+	list_append(&obj->next, q);
+	write_unlock(&__main_sysgroup->lock);
+}
+
+#define sysgroup_add(__group, __obj)	\
+	__sysgroup_add(__obj, &(__main_sysgroup->__group ## _list),	\
+		       &(__main_sysgroup->__group ## _count))
+
+static inline void
+__sysgroup_remove(struct sysgroup_memspec *obj, int *countp)
+{
+	write_lock_nocancel(&__main_sysgroup->lock);
+	(*countp)--;
+	list_remove(&obj->next);
+	write_unlock(&__main_sysgroup->lock);
+}
+
+#define sysgroup_remove(__group, __obj)	\
+	__sysgroup_remove(__obj, &(__main_sysgroup->__group ## _count))
+
+static inline void sysgroup_lock(void)
+{
+	read_lock_nocancel(&__main_sysgroup->lock);
+}
+
+static inline void sysgroup_unlock(void)
+{
+	read_unlock(&__main_sysgroup->lock);
+}
+
+#define sysgroup_count(__group)	\
+	(__main_sysgroup->__group ## _count)
+
+#define for_each_sysgroup(__obj, __tmp, __group)	\
+	list_for_each_entry_safe(__obj, __tmp, &(__main_sysgroup->__group ## _list), next)
+
+int heapobj_pkg_init_shared(void);
+
+int heapobj_init(struct heapobj *hobj, const char *name,
+		 size_t size);
+
+static inline int __heapobj_init(struct heapobj *hobj, const char *name,
+				 size_t size, void *unused)
+{
+	/* Can't work on user-defined memory in shared mode. */
+	return heapobj_init(hobj, name, size);
+}
+
+int heapobj_init_array(struct heapobj *hobj, const char *name,
+		       size_t size, int elems);
+
+void heapobj_destroy(struct heapobj *hobj);
+
+int heapobj_extend(struct heapobj *hobj,
+		   size_t size, void *mem);
+
+void *heapobj_alloc(struct heapobj *hobj,
+		    size_t size);
+
+void heapobj_free(struct heapobj *hobj,
+		  void *ptr);
+
+size_t heapobj_validate(struct heapobj *hobj,
+			void *ptr);
+
+size_t heapobj_inquire(struct heapobj *hobj);
+
+size_t heapobj_get_size(struct heapobj *hobj);
+
+int heapobj_bind_session(const char *session);
+
+void heapobj_unbind_session(void);
+
+int heapobj_unlink_session(const char *session);
+
+void *xnmalloc(size_t size);
+
+void xnfree(void *ptr);
+
+char *xnstrdup(const char *ptr);
+
+#else /* !CONFIG_XENO_PSHARED */
+
+struct sysgroup_memspec {
+};
+
+/*
+ * Whether an object is laid in some shared heap. Never if pshared
+ * mode is disabled.
+ */
+static inline int pshared_check(void *heap, void *addr)
+{
+	return 0;
+}
+
+#ifdef __cplusplus
+#define __check_ref_width(__dst, __src)			\
+	({						\
+		assert(sizeof(__dst) >= sizeof(__src));	\
+		(typeof(__dst))__src;			\
+	})
+#else
+#define __check_ref_width(__dst, __src)					\
+	__builtin_choose_expr(						\
+		sizeof(__dst) >= sizeof(__src), (typeof(__dst))__src,	\
+		((void)0))
+#endif
+
+#define mainheap_ref(ptr, type)						\
+	({								\
+		type handle;						\
+		handle = __check_ref_width(handle, ptr);		\
+		assert(ptr == NULL || __memchk(__main_heap, ptr));	\
+		handle;							\
+	})
+#define mainheap_deref(handle, type)					\
+	({								\
+		type *ptr;						\
+		ptr = __check_ref_width(ptr, handle);			\
+		ptr;							\
+	})
+
+#define sysgroup_add(__group, __obj)	do { } while (0)
+#define sysgroup_remove(__group, __obj)	do { } while (0)
+
+static inline int heapobj_pkg_init_shared(void)
+{
+	return 0;
+}
+
+static inline int __heapobj_init(struct heapobj *hobj, const char *name,
+				 size_t size, void *mem)
+{
+	return __heapobj_init_private(hobj, name, size, mem);
+}
+
+static inline int heapobj_init(struct heapobj *hobj, const char *name,
+			       size_t size)
+{
+	return __heapobj_init_private(hobj, name, size, NULL);
+}
+
+static inline int heapobj_init_array(struct heapobj *hobj, const char *name,
+				     size_t size, int elems)
+{
+	return heapobj_init_array_private(hobj, name, size, elems);
+}
+
+static inline void heapobj_destroy(struct heapobj *hobj)
+{
+	pvheapobj_destroy(hobj);
+}
+
+static inline int heapobj_extend(struct heapobj *hobj,
+				 size_t size, void *mem)
+{
+	return pvheapobj_extend(hobj, size, mem);
+}
+
+static inline void *heapobj_alloc(struct heapobj *hobj,
+				  size_t size)
+{
+	return pvheapobj_alloc(hobj, size);
+}
+
+static inline void heapobj_free(struct heapobj *hobj,
+				void *ptr)
+{
+	pvheapobj_free(hobj, ptr);
+}
+
+static inline size_t heapobj_validate(struct heapobj *hobj,
+				      void *ptr)
+{
+	return pvheapobj_validate(hobj, ptr);
+}
+
+static inline size_t heapobj_inquire(struct heapobj *hobj)
+{
+	return pvheapobj_inquire(hobj);
+}
+
+static inline int heapobj_bind_session(const char *session)
+{
+	return -ENOSYS;
+}
+
+static inline int heapobj_unlink_session(const char *session)
+{
+	return 0;
+}
+
+static inline void heapobj_unbind_session(void) { }
+
+static inline void *xnmalloc(size_t size)
+{
+	return pvmalloc(size);
+}
+
+static inline void xnfree(void *ptr)
+{
+	pvfree(ptr);
+}
+
+static inline char *xnstrdup(const char *ptr)
+{
+	return pvstrdup(ptr);
+}
+
+#endif	/* !CONFIG_XENO_PSHARED */
+
+static inline const char *heapobj_name(struct heapobj *hobj)
+{
+	return hobj->name;
+}
+
+static inline size_t heapobj_size(struct heapobj *hobj)
+{
+	return hobj->size;
+}
+
+#endif /* _COPPERPLATE_HEAPOBJ_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/reference.h b/kernel/xenomai-v3.2.4/include/copperplate/reference.h
new file mode 100644
index 0000000..8f6f76c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/reference.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COPPERPLATE_REFERENCE_H
+#define _COPPERPLATE_REFERENCE_H
+
+#include <boilerplate/limits.h>
+#include <boilerplate/scope.h>
+#include <boilerplate/setup.h>
+
+#define libcopperplate_tag  0	/* Library tag - unique and constant. */
+#define libcopperplate_cbi  1	/* Callback binary interface level. */
+
+#ifdef CONFIG_XENO_PSHARED
+/*
+ * Layout of a function reference handle in shared memory (32-bit
+ * value):
+ *
+ * xxHHHHHHHHHHHHHHHHHHHHLLLLLPPPPP
+ *
+ * where: 'P' => function index in the per-library array
+ *        'L' => library tag
+ *        'H' => symbol hash value (symname + cbi)
+ *        'x' => unassigned
+ *
+ * NOTE: handle value -1 is kept for representing a NULL function
+ * pointer; bit #31 should remain unassigned and cleared for this
+ * purpose.
+ */
+
+struct __fnref {
+	void (*fn)(void);
+	unsigned int hash;
+};
+
+#define __refvar(l, s)		__ ## l ## __ref__ ## s
+#define __refmangle(l, h, p)	(((h & 0xfffff) << 10)|((l & 0x1f) << 5)|(p & 0x1f))
+#define __refhash(r)		(((r) >> 10) & 0xfffffU)
+#define __reftag(r)		(((r) >> 5) & 0x1f)
+#define __refpos(r)		((r) & 0x1f)
+#define __refchk(v, r)							\
+	({								\
+		int __tag = __reftag(r), __pos = __refpos(r);		\
+		typeof(v) __p = (typeof(v))__fnrefs[__tag][__pos].fn;	\
+		assert(__fnrefs[__tag][__pos].hash == __refhash(r));	\
+		assert(__p != NULL);					\
+		__p;							\
+	})
+#define fnref_type(t)		int
+#define fnref_null		-1
+static inline int __fnref_nofn(void *fnaddr)
+{
+	return fnaddr == NULL;
+}
+#define fnref_put(l, s)		(__fnref_nofn((void *)(s)) ? fnref_null : __refvar(l, s))
+#define fnref_get(v, r)		((v) = (r) < 0 ? NULL :	__refchk(v, r))
+#define fnref_register(l, s)						\
+	int __refvar(l, s);						\
+	static void __early_ctor __ifnref_ ## s(void)			\
+	{								\
+		__refvar(l, s) = __fnref_register(#l, l ## _tag,	\
+						  l ## _cbi,		\
+						  #s, (void (*)(void))s); \
+	}
+#define fnref_declare(l, s)	extern int __refvar(l, s)
+
+#define MAX_FNLIBS  16		/* max=32 */
+#define MAX_FNREFS  16		/* max=32 */
+
+extern struct __fnref __fnrefs[MAX_FNLIBS][MAX_FNREFS];
+
+int __fnref_register(const char *libname,
+		     int libtag, int cbirev,
+		     const char *symname, void (*fn)(void));
+
+#else /* !CONFIG_XENO_PSHARED */
+
+#define fnref_type(t)		__typeof__(t)
+#define fnref_null		NULL
+#define fnref_put(l, s)		(s)
+#define fnref_get(v, r)		((v) = (r))
+#define fnref_register(l, s)
+#define fnref_declare(l, s)
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+#endif /* _COPPERPLATE_REFERENCE_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/registry-obstack.h b/kernel/xenomai-v3.2.4/include/copperplate/registry-obstack.h
new file mode 100644
index 0000000..f3d1a17
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/registry-obstack.h
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COPPERPLATE_REGISTRY_OBSTACK_H
+#define _COPPERPLATE_REGISTRY_OBSTACK_H
+
+#include <copperplate/registry.h>
+
+#ifdef CONFIG_XENO_REGISTRY
+
+#include <stdlib.h>
+#include <boilerplate/obstack.h>
+#include <copperplate/heapobj.h>
+
+struct threadobj;
+struct syncobj;
+
+/*
+ * Obstacks are grown from handlers called by the fusefs server
+ * thread, which has no real-time requirement: malloc/free is fine for
+ * memory management.
+ */
+#define obstack_chunk_alloc	malloc
+#define obstack_chunk_free	free
+
+struct threadobj;
+
+struct fsobstack {
+	struct obstack obstack;
+	void *data;
+	size_t len;
+};
+
+struct fsobstack_syncops {
+	int (*prepare_cache)(struct fsobstack *o,
+			     struct obstack *cache, int item_count);
+	size_t (*collect_data)(void *p, struct threadobj *thobj);
+	size_t (*format_data)(struct fsobstack *o, void *p);
+};
+
+struct syncobj;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void fsobstack_grow_string(struct fsobstack *o,
+			   const char *s);
+
+void fsobstack_grow_char(struct fsobstack *o,
+			 char c);
+
+int fsobstack_grow_format(struct fsobstack *o,
+			  const char *fmt, ...);
+
+int fsobstack_grow_file(struct fsobstack *o,
+			const char *path);
+
+int fsobstack_grow_syncobj_grant(struct fsobstack *o,
+				 struct syncobj *sobj,
+				 struct fsobstack_syncops *ops);
+
+int fsobstack_grow_syncobj_drain(struct fsobstack *o,
+				 struct syncobj *sobj,
+				 struct fsobstack_syncops *ops);
+
+ssize_t fsobstack_pull(struct fsobstack *o,
+		       char *buf, size_t size);
+
+ssize_t fsobj_obstack_read(struct fsobj *fsobj,
+			   char *buf, size_t size, off_t offset,
+			   void *priv);
+
+int fsobj_obstack_release(struct fsobj *fsobj, void *priv);
+
+#ifdef __cplusplus
+}
+#endif
+
+static inline void fsobstack_init(struct fsobstack *o)
+{
+	obstack_init(&o->obstack);
+	o->data = NULL;
+	o->len = 0;
+}
+
+static inline void fsobstack_destroy(struct fsobstack *o)
+{
+	obstack_free(&o->obstack, NULL);
+}
+
+static inline void fsobstack_finish(struct fsobstack *o)
+{
+	o->len = obstack_object_size(&o->obstack);
+	o->data = obstack_finish(&o->obstack);
+}
+
+static inline
+void registry_init_file_obstack(struct fsobj *fsobj, 
+				const struct registry_operations *ops)
+{
+	registry_init_file(fsobj, ops, sizeof(struct fsobstack));
+}
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static inline
+void registry_init_file_obstack(struct fsobj *fsobj, 
+				const struct registry_operations *ops)
+{ }
+
+#endif /* !CONFIG_XENO_REGISTRY */
+
+#endif /* !_COPPERPLATE_REGISTRY_OBSTACK_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/registry.h b/kernel/xenomai-v3.2.4/include/copperplate/registry.h
new file mode 100644
index 0000000..c94c902
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/registry.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_REGISTRY_H
+#define _COPPERPLATE_REGISTRY_H
+
+#include <sys/types.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <boilerplate/list.h>
+#include <boilerplate/hash.h>
+#include <boilerplate/obstack.h>
+
+struct fsobj;
+
+#define REGISTRY_SHARED  1
+#define REGISTRY_ANON    2
+
+#ifdef CONFIG_XENO_REGISTRY
+
+struct registry_operations {
+	int (*open)(struct fsobj *fsobj, void *priv);
+	int (*release)(struct fsobj *fsobj, void *priv);
+	ssize_t (*read)(struct fsobj *fsobj,
+			char *buf, size_t size, off_t offset,
+			void *priv);
+	ssize_t (*write)(struct fsobj *fsobj,
+			 const char *buf, size_t size, off_t offset,
+			 void *priv);
+};
+
+struct regfs_dir;
+
+struct fsobj {
+	pthread_mutex_t lock;
+	char *path;
+	const char *basename;
+	int mode;
+	size_t privsz;
+	struct regfs_dir *dir;
+	struct timespec ctime;
+	struct timespec mtime;
+	const struct registry_operations *ops;
+	struct pvholder link;
+	struct pvhashobj hobj;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int registry_add_dir(const char *fmt, ...);
+
+int registry_init_file(struct fsobj *fsobj,
+		       const struct registry_operations *ops,
+		       size_t privsz);
+
+int registry_add_file(struct fsobj *fsobj,
+		      int mode,
+		      const char *fmt, ...);
+
+void registry_destroy_file(struct fsobj *fsobj);
+
+void registry_touch_file(struct fsobj *fsobj);
+
+int __registry_pkg_init(const char *arg0,
+			char *mountpt,
+			int flags);
+
+int registry_pkg_init(const char *arg0,
+		      int flags);
+
+void registry_pkg_destroy(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+struct fsobj {
+};
+
+struct registry_operations {
+};
+
+static inline
+int registry_add_dir(const char *fmt, ...)
+{
+	return 0;
+}
+
+static inline
+void registry_init_file(struct fsobj *fsobj,
+			const struct registry_operations *ops,
+			size_t privsz)
+{
+}
+
+static inline
+int registry_add_file(struct fsobj *fsobj,
+		      int mode,
+		      const char *fmt, ...)
+{
+	return 0;
+}
+
+static inline
+void registry_destroy_file(struct fsobj *fsobj)
+{
+}
+
+static inline
+void registry_touch_file(struct fsobj *fsobj)
+{
+}
+
+static inline
+int __registry_pkg_init(const char *arg0,
+			char *mountpt, int flags)
+{
+	return 0;
+}
+
+static inline
+int registry_pkg_init(const char *arg0,
+		      int flags)
+{
+	return 0;
+}
+
+static inline
+void registry_pkg_destroy(void)
+{
+}
+
+#endif /* !CONFIG_XENO_REGISTRY */
+
+#endif /* !_COPPERPLATE_REGISTRY_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/semobj.h b/kernel/xenomai-v3.2.4/include/copperplate/semobj.h
new file mode 100644
index 0000000..4d9e0a5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/semobj.h
@@ -0,0 +1,88 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_SEMOBJ_H
+#define _COPPERPLATE_SEMOBJ_H
+
+#include <boilerplate/compiler.h>
+#include <copperplate/reference.h>
+
+struct semobj_waitentry {
+	pid_t pid;
+	char name[32];
+};
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <semaphore.h>
+
+struct semobj_corespec {
+	sem_t sem;
+};
+
+#else  /* CONFIG_XENO_MERCURY */
+
+#include <copperplate/syncobj.h>
+
+struct semobj_corespec {
+	struct syncobj sobj;
+	int flags;
+	int value;
+};
+
+#endif /* CONFIG_XENO_MERCURY */
+
+struct semobj {
+	struct semobj_corespec core;
+	fnref_type(void (*)(struct semobj *smobj)) finalizer;
+};
+
+#define SEMOBJ_PRIO	0x1
+#define SEMOBJ_PULSE	0x2
+#define SEMOBJ_WARNDEL	0x4
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int semobj_init(struct semobj *smobj,
+		int flags, int value,
+		fnref_type(void (*)(struct semobj *smobj)) finalizer);
+
+int semobj_destroy(struct semobj *smobj);
+
+void semobj_uninit(struct semobj *smobj);
+
+int semobj_post(struct semobj *smobj);
+
+int semobj_broadcast(struct semobj *smobj);
+
+int semobj_wait(struct semobj *smobj,
+		const struct timespec *timeout) __must_check;
+
+int semobj_getvalue(struct semobj *smobj, int *sval);
+
+int semobj_inquire(struct semobj *smobj, size_t waitsz,
+		   struct semobj_waitentry *waitlist,
+		   int *val_r);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COPPERPLATE_SEMOBJ_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/syncobj.h b/kernel/xenomai-v3.2.4/include/copperplate/syncobj.h
new file mode 100644
index 0000000..66dee02
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/syncobj.h
@@ -0,0 +1,233 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_SYNCOBJ_H
+#define _COPPERPLATE_SYNCOBJ_H
+
+#include <pthread.h>
+#include <time.h>
+#include <boilerplate/list.h>
+#include <boilerplate/lock.h>
+#include <copperplate/reference.h>
+
+/* syncobj->flags */
+#define SYNCOBJ_FIFO	0x0
+#define SYNCOBJ_PRIO	0x1
+#define SYNCOBJ_LOCKED	0x2
+
+/* threadobj->wait_status */
+#define SYNCOBJ_FLUSHED		0x1
+#define SYNCOBJ_SIGNALED	0x2
+#define SYNCOBJ_DRAINWAIT	0x4
+
+#define SYNCOBJ_MAGIC  0xf9f99f9f
+
+struct threadobj;
+
+struct syncstate {
+	int state;
+};
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <boilerplate/atomic.h>
+#include <cobalt/uapi/monitor.h>
+
+struct syncobj_corespec {
+	cobalt_monitor_t monitor;
+};
+
+#else  /* CONFIG_XENO_MERCURY */
+
+struct syncobj_corespec {
+	pthread_mutex_t lock;
+	pthread_cond_t drain_sync;
+};
+
+#endif /* CONFIG_XENO_MERCURY */
+
+struct syncobj {
+	unsigned int magic;
+	int flags;
+	int wait_count;
+	struct listobj grant_list;
+	int grant_count;
+	struct listobj drain_list;
+	int drain_count;
+	struct syncobj_corespec core;
+	fnref_type(void (*)(struct syncobj *sobj)) finalizer;
+};
+
+#define syncobj_for_each_grant_waiter(sobj, pos)		\
+	list_for_each_entry(pos, &(sobj)->grant_list, wait_link)
+
+#define syncobj_for_each_grant_waiter_safe(sobj, pos, tmp)	\
+	list_for_each_entry_safe(pos, tmp, &(sobj)->grant_list, wait_link)
+
+#define syncobj_for_each_drain_waiter(sobj, pos)		\
+	list_for_each_entry(pos, &(sobj)->drain_list, wait_link)
+
+#define syncobj_for_each_drain_waiter_safe(sobj, pos, tmp)	\
+	list_for_each_entry_safe(pos, tmp, &(sobj)->drain_list, wait_link)
+
+void __syncobj_cleanup_wait(struct syncobj *sobj,
+			    struct threadobj *thobj);
+
+#ifdef CONFIG_XENO_DEBUG
+
+static inline void __syncobj_tag_locked(struct syncobj *sobj)
+{
+	sobj->flags |= SYNCOBJ_LOCKED;
+}
+
+static inline void __syncobj_tag_unlocked(struct syncobj *sobj)
+{
+	assert(sobj->flags & SYNCOBJ_LOCKED);
+	sobj->flags &= ~SYNCOBJ_LOCKED;
+}
+
+static inline void __syncobj_check_locked(struct syncobj *sobj)
+{
+	assert(sobj->flags & SYNCOBJ_LOCKED);
+}
+
+#else /* !CONFIG_XENO_DEBUG */
+
+static inline void __syncobj_tag_locked(struct syncobj *sobj)
+{
+}
+
+static inline void __syncobj_tag_unlocked(struct syncobj *sobj)
+{
+}
+
+static inline void __syncobj_check_locked(struct syncobj *sobj)
+{
+}
+
+#endif /* !CONFIG_XENO_DEBUG */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int __syncobj_broadcast_drain(struct syncobj *sobj, int reason);
+
+int __syncobj_broadcast_grant(struct syncobj *sobj, int reason);
+
+int syncobj_init(struct syncobj *sobj, clockid_t clk_id, int flags,
+		 fnref_type(void (*)(struct syncobj *sobj)) finalizer) __must_check;
+
+int syncobj_wait_grant(struct syncobj *sobj,
+		 const struct timespec *timeout,
+		 struct syncstate *syns) __must_check;
+
+struct threadobj *syncobj_grant_one(struct syncobj *sobj);
+
+void syncobj_grant_to(struct syncobj *sobj,
+		      struct threadobj *thobj);
+
+struct threadobj *syncobj_peek_grant(struct syncobj *sobj);
+
+struct threadobj *syncobj_peek_drain(struct syncobj *sobj);
+
+int syncobj_lock(struct syncobj *sobj,
+		 struct syncstate *syns) __must_check;
+
+void syncobj_unlock(struct syncobj *sobj,
+		    struct syncstate *syns);
+
+int syncobj_wait_drain(struct syncobj *sobj,
+		       const struct timespec *timeout,
+		       struct syncstate *syns) __must_check;
+
+int syncobj_destroy(struct syncobj *sobj,
+		    struct syncstate *syns);
+
+void syncobj_uninit(struct syncobj *sobj);
+
+static inline int syncobj_grant_wait_p(struct syncobj *sobj)
+{
+	__syncobj_check_locked(sobj);
+
+	return !list_empty(&sobj->grant_list);
+}
+
+static inline int syncobj_count_grant(struct syncobj *sobj)
+{
+	__syncobj_check_locked(sobj);
+
+	return sobj->grant_count;
+}
+
+static inline int syncobj_count_drain(struct syncobj *sobj)
+{
+	__syncobj_check_locked(sobj);
+
+	return sobj->drain_count;
+}
+
+static inline int syncobj_drain_wait_p(struct syncobj *sobj)
+{
+	__syncobj_check_locked(sobj);
+
+	return !list_empty(&sobj->drain_list);
+}
+
+static inline int syncobj_drain(struct syncobj *sobj)
+{
+	int ret = 0;
+
+	__syncobj_check_locked(sobj);
+
+	if (sobj->drain_count > 0)
+		ret = __syncobj_broadcast_drain(sobj, SYNCOBJ_SIGNALED);
+
+	return ret;
+}
+
+static inline int syncobj_grant_all(struct syncobj *sobj)
+{
+	int ret = 0;
+
+	__syncobj_check_locked(sobj);
+
+	if (sobj->grant_count > 0)
+		ret = __syncobj_broadcast_grant(sobj, SYNCOBJ_SIGNALED);
+
+	return ret;
+}
+
+static inline int syncobj_flush(struct syncobj *sobj)
+{
+	__syncobj_check_locked(sobj);
+
+	if (sobj->grant_count > 0)
+		__syncobj_broadcast_grant(sobj, SYNCOBJ_FLUSHED);
+
+	if (sobj->drain_count > 0)
+		__syncobj_broadcast_drain(sobj, SYNCOBJ_FLUSHED);
+
+	return sobj->wait_count;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COPPERPLATE_SYNCOBJ_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/threadobj.h b/kernel/xenomai-v3.2.4/include/copperplate/threadobj.h
new file mode 100644
index 0000000..c836341
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/threadobj.h
@@ -0,0 +1,589 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_THREADOBJ_H
+#define _COPPERPLATE_THREADOBJ_H
+
+#include <time.h>
+#include <semaphore.h>
+#include <signal.h>
+#include <pthread.h>
+#include <stdint.h>
+#include <boilerplate/list.h>
+#include <boilerplate/lock.h>
+#include <boilerplate/sched.h>
+#include <copperplate/clockobj.h>
+#include <copperplate/heapobj.h>
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <cobalt/uapi/kernel/types.h>
+
+struct xnthread_user_window;
+
+struct threadobj_corespec {
+	xnhandle_t handle;
+	union {
+		__u32 u_winoff;
+		struct xnthread_user_window *u_window;
+	};
+};
+
+struct threadobj_stat {
+	/** Execution time in primary mode (ns). */
+	ticks_t xtime;
+	/** Current timeout value (ns). */
+	ticks_t timeout;
+	/** Number of primary->secondary mode switches. */
+	uint64_t msw;
+	/** Number of context switches. */
+	uint64_t csw;
+	/** Number of Xenomai syscalls. */
+	uint64_t xsc;
+	/** Current CPU for thread. */
+	int cpu;
+	/** Scheduler lock nesting count. */
+	int schedlock;
+	/** Cobalt thread status bits. */
+	unsigned int status;
+	/** Number of page faults. */
+	uint32_t pf;
+};
+
+#define SCHED_CORE  SCHED_COBALT
+
+static inline
+void threadobj_save_timeout(struct threadobj_corespec *corespec,
+			    const struct timespec *timeout)
+{
+	/*
+	 * We retrieve this information from the nucleus directly via
+	 * cobalt_thread_stat().
+	 */
+}
+
+#ifdef CONFIG_XENO_PSHARED
+
+static inline struct xnthread_user_window *
+threadobj_get_window(struct threadobj_corespec *corespec)
+{
+	extern void *cobalt_umm_shared;
+	return (struct xnthread_user_window *)
+		((caddr_t)cobalt_umm_shared + corespec->u_winoff);
+}
+
+#else /* !CONFIG_XENO_PSHARED */
+
+static inline struct xnthread_user_window *
+threadobj_get_window(struct threadobj_corespec *corespec)
+{
+	return corespec->u_window;
+}
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+#else  /* CONFIG_XENO_MERCURY */
+
+#include <sys/time.h>
+
+struct threadobj_corespec {
+	pthread_cond_t grant_sync;
+	int policy_unlocked;
+	struct sched_param_ex schedparam_unlocked;
+	timer_t rr_timer;
+	/** Timeout reported by sysregd. */
+	struct timespec timeout;
+#ifdef CONFIG_XENO_WORKAROUND_CONDVAR_PI
+	int policy_unboosted;
+	struct sched_param_ex schedparam_unboosted;
+#endif
+};
+
+struct threadobj_stat {
+	/** Current timeout value (ns). */
+	ticks_t timeout;
+	/** Current CPU for thread. */
+	int cpu;
+	/** Scheduler lock nesting count. */
+	int schedlock;
+	/** Mercury thread status bits. */
+	unsigned int status;
+};
+
+#define SCHED_CORE  SCHED_FIFO
+
+static inline
+void threadobj_save_timeout(struct threadobj_corespec *corespec,
+			    const struct timespec *timeout)
+{
+	if (timeout)
+		corespec->timeout = *timeout;
+}
+
+#endif /* CONFIG_XENO_MERCURY */
+
+/*
+ * threadobj->status, updated with ->lock held.
+ */
+#define __THREAD_S_STARTED	(1 << 0)	/* threadobj_start() called. */
+#define __THREAD_S_WARMUP	(1 << 1)	/* threadobj_prologue() not called yet. */
+#define __THREAD_S_ABORTED	(1 << 2)	/* Cancelled before start. */
+#define __THREAD_S_LOCKED	(1 << 3)	/* threadobj_lock() granted (debug only). */
+#define __THREAD_S_ACTIVE	(1 << 4)	/* Running user code. */
+#define __THREAD_S_SUSPENDED	(1 << 5)	/* Suspended via threadobj_suspend(). */
+#define __THREAD_S_SAFE		(1 << 6)	/* TCB release deferred. */
+#define __THREAD_S_PERIODIC	(1 << 7)	/* Periodic timer set. */
+#define __THREAD_S_DEBUG	(1 << 31)	/* Debug mode enabled. */
+/*
+ * threadobj->run_state, locklessly updated by "current", merged
+ * with ->status bits by threadobj_get_status().
+ */
+#define __THREAD_S_RUNNING	0
+#define __THREAD_S_DORMANT	(1 << 16)
+#define __THREAD_S_WAIT		(1 << 17)
+#define __THREAD_S_TIMEDWAIT	(1 << 18)
+#define __THREAD_S_DELAYED	(1 << 19)
+#define __THREAD_S_BREAK	(__THREAD_S_DELAYED|(1 << 20))
+
+/* threadobj mode bits */
+#define __THREAD_M_LOCK		(1 << 0) /* Toggle scheduler lock. */
+#define __THREAD_M_WARNSW	(1 << 1) /* Toggle switch warning bit. */
+#define __THREAD_M_CONFORMING	(1 << 2) /* Switch to conforming mode. */
+#define __THREAD_M_SPARE0	(1 << 16)
+#define __THREAD_M_SPARE1	(1 << 17)
+#define __THREAD_M_SPARE2	(1 << 18)
+#define __THREAD_M_SPARE3	(1 << 19)
+#define __THREAD_M_SPARE4	(1 << 20)
+#define __THREAD_M_SPARE5	(1 << 21)
+#define __THREAD_M_SPARE6	(1 << 22)
+#define __THREAD_M_SPARE7	(1 << 23)
+
+/*
+ * We need to use a valid address here. The object will never be dereferenced
+ * when it is identified as IRQ context, so the pthread key itself is fine.
+ */
+#define THREADOBJ_IRQCONTEXT	((struct threadobj *)&threadobj_tskey)
+
+struct traceobj;
+struct syncobj;
+
+struct threadobj {
+	unsigned int magic;	/* Must be first. */
+	pthread_t ptid;
+	pthread_mutex_t lock;
+
+	int schedlock_depth;
+	int cancel_state;
+	int status;
+	int run_state;
+	int policy;
+	struct sched_param_ex schedparam;
+	int global_priority;
+	pid_t cnode;
+	pid_t pid;
+	char name[32];
+
+	void (*finalizer)(struct threadobj *thobj);
+	int core_offset;
+	int *errno_pointer;
+	/* Those members belong exclusively to the syncobj code. */
+	struct syncobj *wait_sobj;
+	struct holder wait_link;
+	int wait_status;
+	int wait_prio;
+  	dref_type(void *) wait_union;
+	size_t wait_size;
+	timer_t periodic_timer;
+
+	struct threadobj_corespec core;
+	struct timespec tslice;
+	pthread_cond_t barrier;
+	struct traceobj *tracer;
+	sem_t *cancel_sem;
+	struct sysgroup_memspec memspec;
+	struct backtrace_data btd;
+};
+
+struct threadobj_init_data {
+	unsigned int magic;
+	cpu_set_t affinity;
+	int policy;
+	struct sched_param_ex param_ex;
+	void (*finalizer)(struct threadobj *thobj);
+};
+
+extern int threadobj_high_prio;
+
+extern int threadobj_irq_prio;
+
+extern pthread_key_t threadobj_tskey;
+
+#ifdef HAVE_TLS
+
+extern __thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
+struct threadobj *__threadobj_current;
+
+static inline void threadobj_set_current(struct threadobj *thobj)
+{
+	__threadobj_current = thobj;
+	pthread_setspecific(threadobj_tskey, thobj);
+}
+
+static inline struct threadobj *__threadobj_get_current(void)
+{
+	return __threadobj_current;
+}
+
+#else /* !HAVE_TLS */
+
+static inline void threadobj_set_current(struct threadobj *thobj)
+{
+	pthread_setspecific(threadobj_tskey, thobj);
+}
+
+static inline struct threadobj *__threadobj_get_current(void)
+{
+	return (struct threadobj *)pthread_getspecific(threadobj_tskey);
+}
+
+#endif /* !HAVE_TLS */
+
+static inline struct threadobj *threadobj_current(void)
+{
+	struct threadobj *thobj = __threadobj_get_current();
+	return thobj == NULL || thobj == THREADOBJ_IRQCONTEXT ? NULL : thobj;
+}
+
+#ifdef CONFIG_XENO_DEBUG
+
+static inline void __threadobj_tag_locked(struct threadobj *thobj)
+{
+	thobj->status |= __THREAD_S_LOCKED;
+}
+
+static inline void __threadobj_tag_unlocked(struct threadobj *thobj)
+{
+	assert(thobj->status & __THREAD_S_LOCKED);
+	thobj->status &= ~__THREAD_S_LOCKED;
+}
+
+static inline void __threadobj_check_locked(struct threadobj *thobj)
+{
+	assert(thobj->status & __THREAD_S_LOCKED);
+}
+
+#else /* !CONFIG_XENO_DEBUG */
+
+static inline void __threadobj_tag_locked(struct threadobj *thobj)
+{
+}
+
+static inline void __threadobj_tag_unlocked(struct threadobj *thobj)
+{
+}
+
+static inline void __threadobj_check_locked(struct threadobj *thobj)
+{
+}
+
+#endif /* !CONFIG_XENO_DEBUG */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void *__threadobj_alloc(size_t tcb_struct_size,
+			size_t wait_union_size,
+			int thobj_offset);
+
+static inline void __threadobj_free(void *p)
+{
+	xnfree(p);
+}
+
+static inline void threadobj_free(struct threadobj *thobj)
+{
+	__threadobj_free((unsigned char *)thobj - thobj->core_offset);
+}
+
+int threadobj_init(struct threadobj *thobj,
+		   struct threadobj_init_data *idata) __must_check;
+
+int threadobj_start(struct threadobj *thobj) __must_check;
+
+int threadobj_shadow(struct threadobj *thobj,
+		     const char *name);
+
+int threadobj_prologue(struct threadobj *thobj,
+		       const char *name);
+
+void threadobj_wait_start(void);
+
+void threadobj_notify_entry(void);
+
+int threadobj_cancel(struct threadobj *thobj);
+
+void threadobj_uninit(struct threadobj *thobj);
+
+int threadobj_suspend(struct threadobj *thobj);
+
+int threadobj_resume(struct threadobj *thobj);
+
+int threadobj_unblock(struct threadobj *thobj);
+
+int __threadobj_lock_sched(struct threadobj *current);
+
+int threadobj_lock_sched(void);
+
+int __threadobj_unlock_sched(struct threadobj *current);
+
+int threadobj_unlock_sched(void);
+
+int threadobj_set_schedparam(struct threadobj *thobj, int policy,
+			     const struct sched_param_ex *param_ex);
+
+int threadobj_set_schedprio(struct threadobj *thobj, int priority);
+
+int threadobj_set_mode(int clrmask, int setmask, int *mode_r);
+
+int threadobj_set_periodic(struct threadobj *thobj,
+			   const struct timespec *__restrict__ idate,
+			   const struct timespec *__restrict__ period);
+
+int threadobj_wait_period(unsigned long *overruns_r) __must_check;
+
+void threadobj_spin(ticks_t ns);
+
+int threadobj_stat(struct threadobj *thobj,
+		   struct threadobj_stat *stat);
+
+int threadobj_sleep(const struct timespec *ts);
+
+void threadobj_set_current_name(const char *name);
+
+#ifdef CONFIG_XENO_PSHARED
+
+static inline int threadobj_local_p(struct threadobj *thobj)
+{
+	extern pid_t __node_id;
+	return thobj->cnode == __node_id;
+}
+
+#else /* !CONFIG_XENO_PSHARED */
+
+static inline int threadobj_local_p(struct threadobj *thobj)
+{
+	return 1;
+}
+
+#endif	/* !CONFIG_XENO_PSHARED */
+
+void threadobj_init_key(void);
+
+int threadobj_pkg_init(int anon_session);
+
+#ifdef __cplusplus
+}
+#endif
+
+#define threadobj_alloc(T, __mptr, W)					\
+	({								\
+		void *__p;						\
+		__p = __threadobj_alloc(sizeof(T), sizeof(W), offsetof(T, __mptr)); \
+		__p;							\
+	})
+
+static inline int threadobj_get_policy(struct threadobj *thobj)
+{
+	return thobj->policy;
+}
+
+static inline int threadobj_get_priority(struct threadobj *thobj)
+{
+	return thobj->schedparam.sched_priority;
+}
+
+static inline void threadobj_copy_schedparam(struct sched_param_ex *param_ex,
+					     const struct threadobj *thobj)
+{
+	*param_ex = thobj->schedparam;
+}
+
+static inline int threadobj_lock(struct threadobj *thobj)
+{
+	int ret;
+
+	ret = write_lock_safe(&thobj->lock, thobj->cancel_state);
+	if (ret)
+		return ret;
+
+	__threadobj_tag_locked(thobj);
+
+	return 0;
+}
+
+static inline int threadobj_trylock(struct threadobj *thobj)
+{
+	int ret;
+
+	ret = write_trylock_safe(&thobj->lock, thobj->cancel_state);
+	if (ret)
+		return ret;
+
+	__threadobj_tag_locked(thobj);
+
+	return 0;
+}
+
+static inline int threadobj_unlock(struct threadobj *thobj)
+{
+	__threadobj_check_locked(thobj);
+	__threadobj_tag_unlocked(thobj);
+	return write_unlock_safe(&thobj->lock, thobj->cancel_state);
+}
+
+static inline int threadobj_irq_p(void)
+{
+	struct threadobj *current = __threadobj_get_current();
+	return current == THREADOBJ_IRQCONTEXT;
+}
+
+static inline int threadobj_current_p(void)
+{
+	return threadobj_current() != NULL;
+}
+
+static inline int __threadobj_lock_sched_once(struct threadobj *current)
+{
+	if (current->schedlock_depth == 0)
+		return __threadobj_lock_sched(current);
+
+	return -EBUSY;
+}
+
+static inline int threadobj_lock_sched_once(void)
+{
+	struct threadobj *current = threadobj_current();
+
+	if (current->schedlock_depth == 0)
+		return threadobj_lock_sched();
+
+	return -EBUSY;
+}
+
+static inline void threadobj_yield(void)
+{
+	__RT(sched_yield());
+}
+
+static inline unsigned int threadobj_get_magic(struct threadobj *thobj)
+{
+	return thobj->magic;
+}
+
+static inline void threadobj_set_magic(struct threadobj *thobj,
+				       unsigned int magic)
+{
+	thobj->magic = magic;
+}
+
+static inline int threadobj_get_lockdepth(struct threadobj *thobj)
+{
+	return thobj->schedlock_depth;
+}
+
+static inline int threadobj_get_status(struct threadobj *thobj)
+{
+	return thobj->status | thobj->run_state;
+}
+
+static inline int threadobj_get_errno(struct threadobj *thobj)
+{
+	return *thobj->errno_pointer;
+}
+
+#define threadobj_prepare_wait(T)					\
+	({								\
+		struct threadobj *__thobj = threadobj_current();	\
+		assert(__thobj != NULL);				\
+		assert(sizeof(typeof(T)) <= __thobj->wait_size);	\
+		__mptr(__thobj->wait_union);				\
+	})
+
+#define threadobj_finish_wait()		do { } while (0)
+
+static inline void *threadobj_get_wait(struct threadobj *thobj)
+{
+	return __mptr(thobj->wait_union);
+}
+
+static inline const char *threadobj_get_name(struct threadobj *thobj)
+{
+	return thobj->name;
+}
+
+static inline pid_t threadobj_get_pid(struct threadobj *thobj)
+{
+	return thobj->pid;
+}
+
+#ifdef CONFIG_XENO_WORKAROUND_CONDVAR_PI
+
+int threadobj_cond_timedwait(pthread_cond_t *cond,
+			     pthread_mutex_t *lock,
+			     const struct timespec *timeout);
+
+int threadobj_cond_wait(pthread_cond_t *cond,
+			pthread_mutex_t *lock);
+
+int threadobj_cond_signal(pthread_cond_t *cond);
+
+int threadobj_cond_broadcast(pthread_cond_t *cond);
+
+#else
+
+static inline
+int threadobj_cond_timedwait(pthread_cond_t *cond,
+			     pthread_mutex_t *lock,
+			     const struct timespec *timeout)
+{
+	return __RT(pthread_cond_timedwait(cond, lock, timeout));
+}
+
+static inline
+int threadobj_cond_wait(pthread_cond_t *cond,
+			pthread_mutex_t *lock)
+{
+	return __RT(pthread_cond_wait(cond, lock));
+}
+
+static inline
+int threadobj_cond_signal(pthread_cond_t *cond)
+{
+	return __RT(pthread_cond_signal(cond));
+}
+
+static inline
+int threadobj_cond_broadcast(pthread_cond_t *cond)
+{
+	return __RT(pthread_cond_broadcast(cond));
+}
+
+#endif /* !CONFIG_XENO_WORKAROUND_CONDVAR_PI */
+
+#endif /* _COPPERPLATE_THREADOBJ_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/timerobj.h b/kernel/xenomai-v3.2.4/include/copperplate/timerobj.h
new file mode 100644
index 0000000..66e2e5c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/timerobj.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_TIMEROBJ_H
+#define _COPPERPLATE_TIMEROBJ_H
+
+#include <pthread.h>
+#include <time.h>
+#include <boilerplate/list.h>
+#include <boilerplate/lock.h>
+
+struct timerobj {
+	struct itimerspec itspec;
+	void (*handler)(struct timerobj *tmobj);
+	timer_t timer;
+	pthread_mutex_t lock;
+	int cancel_state;
+	struct pvholder next;
+};
+
+static inline int timerobj_lock(struct timerobj *tmobj)
+{
+	return write_lock_safe(&tmobj->lock, tmobj->cancel_state);
+}
+
+static inline int timerobj_unlock(struct timerobj *tmobj)
+{
+	return write_unlock_safe(&tmobj->lock, tmobj->cancel_state);
+}
+
+static inline int timerobj_enabled(const struct timerobj *tmobj)
+{
+	return tmobj->handler != NULL;
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int timerobj_init(struct timerobj *tmobj);
+
+void timerobj_destroy(struct timerobj *tmobj);
+
+int timerobj_start(struct timerobj *tmobj,
+		   void (*handler)(struct timerobj *tmobj),
+		   struct itimerspec *it);
+
+int timerobj_stop(struct timerobj *tmobj);
+
+int timerobj_pkg_init(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COPPERPLATE_TIMEROBJ_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/traceobj.h b/kernel/xenomai-v3.2.4/include/copperplate/traceobj.h
new file mode 100644
index 0000000..be660aa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/traceobj.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _COPPERPLATE_TRACEOBJ_H
+#define _COPPERPLATE_TRACEOBJ_H
+
+#include <pthread.h>
+
+struct threadobj;
+
+struct traceobj {
+	pthread_mutex_t lock;
+	pthread_cond_t join;
+	const char *label;
+	int nr_marks;
+	int cur_mark;
+	struct tracemark *marks;
+	int nr_threads;
+};
+
+#define traceobj_assert(trobj, cond)					\
+do {									\
+	int __ret = (cond);						\
+	if (!__ret)							\
+		__traceobj_assert_failed(trobj, __FILE__, __LINE__, __STRING(cond)); \
+} while(0)
+
+#define traceobj_check(__trobj, __status, __expected)			\
+do {									\
+	if (__status != __expected)					\
+		__traceobj_check_abort(__trobj, __FILE__, __LINE__, 	\
+				       __status, __expected);		\
+} while(0)
+
+#define traceobj_check_warn(__trobj, __status, __expected)		\
+do {									\
+	if (__status != __expected)					\
+		__traceobj_check_warn(__trobj, __FILE__, __LINE__, 	\
+				      __status, __expected);		\
+} while(0)
+
+#define traceobj_mark(trobj, mark)	\
+	__traceobj_mark(trobj, __FILE__, __LINE__, mark)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int traceobj_init(struct traceobj *trobj,
+		  const char *label, int nr_marks);
+
+void traceobj_verify(struct traceobj *trobj, int tseq[], int nr_seq);
+
+void traceobj_destroy(struct traceobj *trobj);
+
+void traceobj_enter(struct traceobj *trobj);
+
+void traceobj_exit(struct traceobj *trobj);
+
+void traceobj_unwind(struct traceobj *trobj);
+
+void traceobj_join(struct traceobj *trobj);
+
+void __traceobj_assert_failed(struct traceobj *trobj,
+			      const char *file, int line, const char *cond);
+
+void __traceobj_check_abort(struct traceobj *trobj,
+			    const char *file, int line,
+			    int received, int expected);
+
+void __traceobj_check_warn(struct traceobj *trobj,
+			   const char *file, int line,
+			   int received, int expected);
+	
+void __traceobj_mark(struct traceobj *trobj,
+		     const char *file, int line, int mark);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COPPERPLATE_TRACEOBJ_H */
diff --git a/kernel/xenomai-v3.2.4/include/copperplate/tunables.h b/kernel/xenomai-v3.2.4/include/copperplate/tunables.h
new file mode 100644
index 0000000..7b45c51
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/copperplate/tunables.h
@@ -0,0 +1,100 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COPPERPLATE_TUNABLES_H
+#define _COPPERPLATE_TUNABLES_H
+
+#include <boilerplate/tunables.h>
+
+struct copperplate_setup_data {
+	const char *session_root;
+	const char *session_label;
+	const char *registry_root;
+	int no_registry;
+	int shared_registry;
+	size_t mem_pool;
+	gid_t session_gid;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+extern struct copperplate_setup_data __copperplate_setup_data;
+
+define_config_tunable(session_label, const char *, label);
+
+static inline read_config_tunable(session_label, const char *)
+{
+	return __copperplate_setup_data.session_label;
+}
+
+static inline define_config_tunable(registry_root, const char *, root)
+{
+	__copperplate_setup_data.registry_root = root;
+}
+
+static inline read_config_tunable(registry_root, const char *)
+{
+	return __copperplate_setup_data.registry_root;
+}
+
+static inline define_config_tunable(no_registry, int, noreg)
+{
+	__copperplate_setup_data.no_registry = noreg;
+}
+
+static inline read_config_tunable(no_registry, int)
+{
+	return __copperplate_setup_data.no_registry;
+}
+
+static inline define_config_tunable(shared_registry, int, shared)
+{
+	__copperplate_setup_data.shared_registry = shared;
+}
+
+static inline read_config_tunable(shared_registry, int)
+{
+	return __copperplate_setup_data.shared_registry;
+}
+
+static inline define_config_tunable(mem_pool_size, size_t, size)
+{
+	__copperplate_setup_data.mem_pool = size;
+}
+
+static inline read_config_tunable(mem_pool_size, size_t)
+{
+	return __copperplate_setup_data.mem_pool;
+}
+
+static inline define_config_tunable(session_gid, gid_t, gid)
+{
+	__copperplate_setup_data.session_gid = gid;
+}
+
+static inline read_config_tunable(session_gid, gid_t)
+{
+	return __copperplate_setup_data.session_gid;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_COPPERPLATE_TUNABLES_H */
diff --git a/kernel/xenomai-v3.2.4/include/mercury/Makefile.am b/kernel/xenomai-v3.2.4/include/mercury/Makefile.am
new file mode 100644
index 0000000..f1af838
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/mercury/Makefile.am
@@ -0,0 +1,4 @@
+
+SUBDIRS = boilerplate
+
+noinst_HEADERS = pthread.h
diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/Makefile.am b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/Makefile.am
new file mode 100644
index 0000000..63cf2f6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/Makefile.am
@@ -0,0 +1,8 @@
+includesubdir = $(includedir)/mercury/boilerplate
+
+includesub_HEADERS =	\
+	sched.h		\
+	limits.h	\
+	signal.h	\
+	trace.h		\
+	wrappers.h
diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/limits.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/limits.h
new file mode 100644
index 0000000..ef62334
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/limits.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _MERCURY_BOILERPLATE_LIMITS_H
+#define _MERCURY_BOILERPLATE_LIMITS_H
+
+#define XNOBJECT_NAME_LEN  32
+
+#endif /* _MERCURY_BOILERPLATE_LIMITS_H */
diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/sched.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/sched.h
new file mode 100644
index 0000000..30dde60
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/sched.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _MERCURY_BOILERPLATE_SCHED_H
+#define _MERCURY_BOILERPLATE_SCHED_H
+
+#include <time.h>
+#include <sched.h>
+
+struct __sched_rr_param {
+	struct timespec __sched_rr_quantum;
+};
+
+struct sched_param_ex {
+	int sched_priority;
+	union {
+		struct __sched_rr_param rr;
+	} sched_u;
+};
+
+#define sched_rr_quantum	sched_u.rr.__sched_rr_quantum
+
+#endif /* _MERCURY_BOILERPLATE_SCHED_H */
diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/signal.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/signal.h
new file mode 100644
index 0000000..0405481
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/signal.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _MERCURY_BOILERPLATE_SIGNAL_H
+#define _MERCURY_BOILERPLATE_SIGNAL_H
+
+#include <signal.h>
+
+#ifndef sigev_notify_thread_id
+#define sigev_notify_thread_id	 _sigev_un._tid
+#endif
+
+/* Generates reserved signal numbers for Boilerplate/Copperplate. */
+#define __SIGRSVD(n)	(SIGRTMIN + 8 + (n))
+
+#define SIGSUSP		__SIGRSVD(0)	/* Suspend request */
+#define SIGRESM		__SIGRSVD(1)	/* Resume request */
+#define SIGRELS		__SIGRSVD(2)	/* Syscall abort */
+#define SIGRRB		__SIGRSVD(3)	/* Round-robin event */
+#define SIGAGENT	__SIGRSVD(4)	/* Request to remote agent */
+#define SIGPERIOD	__SIGRSVD(5)	/* Periodic signal */
+
+/* Generates private signal numbers for clients, up to SIGRTMAX. */
+#define __SIGPRIV(n)	__SIGRSVD(8 + (n))
+
+#define SIGSAFE_LOCK_ENTRY(__safelock)					\
+  	do {								\
+		sigset_t __safeset, __oldsafeset;			\
+		sigemptyset(&__safeset);				\
+		sigaddset(&__safeset, SIGSUSP);				\
+		pthread_sigmask(SIG_BLOCK, &__safeset, &__oldsafeset);	\
+		push_cleanup_lock(__safelock);				\
+		write_lock(__safelock);
+
+#define SIGSAFE_LOCK_EXIT(__safelock)					\
+		write_unlock(__safelock);				\
+		pop_cleanup_lock(&__safelock);				\
+		pthread_sigmask(SIG_SETMASK, &__oldsafeset, NULL);	\
+	} while (0)
+
+#endif /* _MERCURY_BOILERPLATE_SIGNAL_H */
diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/trace.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/trace.h
new file mode 100644
index 0000000..787b088
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/trace.h
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _MERCURY_BOILERPLATE_TRACE_H
+#define _MERCURY_BOILERPLATE_TRACE_H
+
+#include <errno.h>
+
+static inline int xntrace_max_begin(unsigned long v)
+{
+	return -ENOSYS;
+}
+
+static inline int xntrace_max_end(unsigned long v)
+{
+	return -ENOSYS;
+}
+
+static inline int xntrace_max_reset(void)
+{
+	return -ENOSYS;
+}
+
+static inline int xntrace_user_start(void)
+{
+	return -ENOSYS;
+}
+
+static inline int xntrace_user_stop(unsigned long v)
+{
+	return -ENOSYS;
+}
+
+static inline int xntrace_user_freeze(unsigned long v, int once)
+{
+	return -ENOSYS;
+}
+
+static inline void xntrace_latpeak_freeze(int delay)
+{
+}
+
+static inline int xntrace_special(unsigned char id, unsigned long v)
+{
+	return -ENOSYS;
+}
+
+static inline int xntrace_special_u64(unsigned char id, unsigned long long v)
+{
+	return -ENOSYS;
+}
+
+#endif /* _MERCURY_BOILERPLATE_TRACE_H */
diff --git a/kernel/xenomai-v3.2.4/include/mercury/boilerplate/wrappers.h b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/wrappers.h
new file mode 100644
index 0000000..a62ccb7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/mercury/boilerplate/wrappers.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _MERCURY_BOILERPLATE_WRAPPERS_H
+#define _MERCURY_BOILERPLATE_WRAPPERS_H
+
+#define __RT(call)	call
+#define __STD(call)	call
+
+#endif /* _MERCURY_BOILERPLATE_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/include/mercury/pthread.h b/kernel/xenomai-v3.2.4/include/mercury/pthread.h
new file mode 100644
index 0000000..dd94305
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/mercury/pthread.h
@@ -0,0 +1,21 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <pthread.h>
+
+#include <boilerplate/libc.h>
diff --git a/kernel/xenomai-v3.2.4/include/psos/Makefile.am b/kernel/xenomai-v3.2.4/include/psos/Makefile.am
new file mode 100644
index 0000000..ee7c5c8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/psos/Makefile.am
@@ -0,0 +1,5 @@
+includesubdir = $(includedir)/psos
+
+includesub_HEADERS = \
+		psos.h \
+		tunables.h
diff --git a/kernel/xenomai-v3.2.4/include/psos/psos.h b/kernel/xenomai-v3.2.4/include/psos/psos.h
new file mode 100644
index 0000000..1554244
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/psos/psos.h
@@ -0,0 +1,376 @@
+/*
+ * Copyright (C) 2001-2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a pSOS-like API built upon the copperplate library.
+ *
+ * pSOS and pSOS+ are registered trademarks of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_PSOS_PSOS_H
+#define _XENOMAI_PSOS_PSOS_H
+
+#include <sys/types.h>
+
+#ifndef SUCCESS
+#define SUCCESS 0
+#endif
+
+#define T_NOPREEMPT   0x0001
+#define T_PREEMPT     0x0000
+#define T_TSLICE      0x0002
+#define T_NOTSLICE    0x0000
+#define T_NOASR       0x0004
+#define T_ASR         0x0000
+#define T_SUPV        0x2000
+#define T_USER        0x0000
+#define T_LEVELMASK0  0x0000
+#define T_LEVELMASK1  0x0100
+#define T_LEVELMASK2  0x0200
+#define T_LEVELMASK3  0x0300
+#define T_LEVELMASK4  0x0400
+#define T_LEVELMASK5  0x0500
+#define T_LEVELMASK6  0x0600
+#define T_LEVELMASK7  0x0700
+#define T_NOISR       0x0700
+#define T_ISR         0x0000
+#define T_GLOBAL      0x0001
+#define T_LOCAL       0x0000
+#define T_NOFPU       0x0000
+#define T_FPU         0x0002
+
+#define RN_PRIOR      0x0002
+#define RN_FIFO       0x0000
+#define RN_DEL        0x0004
+#define RN_NODEL      0x0000
+#define RN_NOWAIT     0x0001
+#define RN_WAIT       0x0000
+
+#define SM_GLOBAL     0x0001
+#define SM_LOCAL      0x0000
+#define SM_PRIOR      0x0002
+#define SM_FIFO       0x0000
+#define SM_NOWAIT     0x0001
+#define SM_WAIT       0x0000
+
+#define EV_NOWAIT     0x0001
+#define EV_WAIT       0x0000
+#define EV_ANY        0x0002
+#define EV_ALL        0x0000
+
+#define K_GLOBAL      0x0001
+#define K_LOCAL       0x0000
+
+#define PT_GLOBAL     0x0001
+#define PT_LOCAL      0x0000
+#define PT_DEL        0x0004
+#define PT_NODEL      0x0000
+
+#define Q_GLOBAL      0x0001
+#define Q_LOCAL       0x0000
+#define Q_PRIOR       0x0002
+#define Q_FIFO        0x0000
+#define Q_LIMIT       0x0004
+#define Q_NOLIMIT     0x0000
+#define Q_PRIBUF      0x0008
+#define Q_SYSBUF      0x0000
+#define Q_NOWAIT      0x0001
+#define Q_WAIT        0x0000
+
+#define ERR_TIMEOUT  0x01
+#define ERR_SSFN     0x03
+#define ERR_NODENO   0x04
+#define ERR_OBJDEL   0x05
+#define ERR_OBJID    0x06
+#define ERR_OBJTYPE  0x07
+#define ERR_OBJTFULL 0x08
+#define ERR_OBJNF    0x09
+
+#define ERR_NOTCB    0x0E
+#define ERR_NOSTK    0x0F
+#define ERR_TINYSTK  0x10
+#define ERR_PRIOR    0x11
+#define ERR_ACTIVE   0x12
+#define ERR_NACTIVE  0x13
+#define ERR_SUSP     0x14
+#define ERR_NOTSUSP  0x15
+#define ERR_SETPRI   0x16
+#define ERR_REGNUM   0x17
+
+#define ERR_RNADDR   0x1B
+#define ERR_UNITSIZE 0x1C
+#define ERR_TINYUNIT 0x1D
+#define ERR_TINYRN   0x1E
+#define ERR_SEGINUSE 0x1F
+#define ERR_TOOBIG   0x21
+#define ERR_NOSEG    0x22
+#define ERR_NOTINRN  0x23
+#define ERR_SEGADDR  0x24
+#define ERR_SEGFREE  0x25
+#define ERR_RNKILLD  0x26
+#define ERR_TATRNDEL 0x27
+
+#define ERR_PTADDR   0x28
+#define ERR_BUFSIZE  0x29
+#define ERR_TINYPT   0x2A
+#define ERR_BUFINUSE 0x2B
+#define ERR_NOBUF    0x2C
+#define ERR_BUFADDR  0x2D
+#define ERR_BUFFREE  0x2F
+
+#define ERR_MSGSIZ   0x31
+#define ERR_BUFSIZ   0x32
+#define ERR_NOQCB    0x33
+#define ERR_NOMGB    0x34
+#define ERR_QFULL    0x35
+#define ERR_QKILLD   0x36
+#define ERR_NOMSG    0x37
+#define ERR_TATQDEL  0x38
+#define ERR_MATQDEL  0x39
+#define ERR_VARQ     0x3A
+#define ERR_NOTVARQ  0x3B
+
+#define ERR_NOEVS    0x3C
+#define ERR_NOTINASR 0x3E
+#define ERR_NOASR    0x3F
+
+#define ERR_NOSCB    0x41
+#define ERR_NOSEM    0x42
+#define ERR_SKILLD   0x43
+#define ERR_TATSDEL  0x44
+
+#define ERR_NOTIME   0x47
+#define ERR_ILLDATE  0x48
+#define ERR_ILLTIME  0x49
+#define ERR_ILLTICKS 0x4A
+#define ERR_NOTIMERS 0x4B
+#define ERR_BADTMID  0x4C
+#define ERR_TMNOTSET 0x4D
+#define ERR_TOOLATE  0x4E
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+u_long ev_receive(u_long events,
+		  u_long flags,
+		  u_long timeout,
+		  u_long *events_r);
+
+u_long ev_send(u_long tid,
+	       u_long events);
+
+u_long pt_create(const char *name,
+		 void *paddr,
+		 void *laddr,
+		 u_long psize,
+		 u_long bsize,
+		 u_long flags,
+		 u_long *tid_r,
+		 u_long *nbuf_r);
+
+u_long pt_delete(u_long tid);
+
+u_long pt_getbuf(u_long tid,
+		 void **bufaddr);
+
+u_long pt_ident(const char *name,
+		u_long node,
+		u_long *ptid_r);
+
+u_long pt_retbuf(u_long tid,
+		 void *buf);
+
+u_long q_broadcast(u_long qid,
+		   u_long msgbuf[4],
+		   u_long *count_r);
+
+u_long q_create(const char *name,
+		u_long count,
+		u_long flags,
+		u_long *qid_r);
+
+u_long q_delete(u_long qid);
+
+u_long q_ident(const char *name,
+	       u_long node,
+	       u_long *qid_r);
+
+u_long q_receive(u_long qid,
+		 u_long flags,
+		 u_long timeout,
+		 u_long msgbuf[4]);
+
+u_long q_send(u_long qid,
+	      u_long msgbuf[4]);
+
+u_long q_urgent(u_long qid,
+		u_long msgbuf[4]);
+
+u_long q_vcreate(const char *name,
+		 u_long flags,
+		 u_long count,
+		 u_long maxlen,
+		 u_long *qid_r);
+
+u_long q_vdelete(u_long qid);
+
+u_long q_vident(const char *name,
+		u_long node,
+		u_long *qid_r);
+
+u_long q_vreceive(u_long qid,
+		  u_long flags,
+		  u_long timeout,
+		  void *msgbuf,
+		  u_long msglen,
+		  u_long *msglen_r);
+
+u_long q_vsend(u_long qid,
+	       void *msgbuf,
+	       u_long msglen);
+
+u_long q_vurgent(u_long qid,
+		 void *msgbuf,
+		 u_long msglen);
+
+u_long q_vbroadcast(u_long qid,
+		    void *msgbuf,
+		    u_long msglen,
+		    u_long *count_r);
+
+u_long rn_create(const char *name,
+		 void *saddr,
+		 u_long rnsize,
+		 u_long usize,
+		 u_long flags,
+		 u_long *rnid_r,
+		 u_long *asize_r);
+
+u_long rn_delete(u_long rnid);
+
+u_long rn_getseg(u_long rnid,
+		 u_long size,
+		 u_long flags,
+		 u_long timeout,
+		 void **segaddr);
+
+u_long rn_ident(const char *name,
+		u_long *rnid_r);
+
+u_long rn_retseg(u_long rnid,
+		 void *segaddr);
+
+u_long sm_create(const char *name,
+		 u_long count,
+		 u_long flags,
+		 u_long *smid_r);
+
+u_long sm_delete(u_long smid);
+
+u_long sm_ident(const char *name,
+		u_long node,
+		u_long *smid_r);
+
+u_long sm_p(u_long smid,
+	    u_long flags,
+	    u_long timeout);
+
+u_long sm_v(u_long smid);
+
+u_long t_create(const char *name,
+		u_long prio,
+		u_long sstack,
+		u_long ustack,
+		u_long flags,
+		u_long *tid_r);
+
+u_long t_delete(u_long tid);
+
+u_long t_getreg(u_long tid,
+		u_long regnum,
+		u_long *regvalue_r);
+
+u_long t_ident(const char *name,
+	       u_long node,
+	       u_long *tid_r);
+
+u_long t_mode(u_long mask,
+	      u_long newmask,
+	      u_long *oldmode_r);
+
+u_long t_resume(u_long tid);
+
+u_long t_setpri(u_long tid,
+		u_long newprio,
+		u_long *oldprio_r);
+
+u_long t_setreg(u_long tid,
+		u_long regnum,
+		u_long regvalue);
+
+u_long t_start(u_long tid,
+	       u_long mode,
+	       void (*entry)(u_long a0,
+			     u_long a1,
+			     u_long a2,
+			     u_long a3),
+	       u_long args[]);
+
+u_long t_suspend(u_long tid);
+
+u_long tm_cancel(u_long tmid);
+
+u_long tm_evafter(u_long ticks,
+		  u_long events,
+		  u_long *tmid_r);
+
+u_long tm_evevery(u_long ticks,
+		  u_long events,
+		  u_long *tmid_r);
+
+u_long tm_evwhen(u_long date,
+		 u_long time,
+		 u_long ticks,
+		 u_long events,
+		 u_long *tmid_r);
+
+u_long tm_get(u_long *date_r,
+	      u_long *time_r,
+	      u_long *ticks_r);
+
+u_long tm_set(u_long date,
+	      u_long time,
+	      u_long ticks);
+
+u_long tm_getm(unsigned long long *ns);
+
+u_long tm_wkafter(u_long ticks);
+
+u_long tm_wkwhen(u_long date,
+		 u_long time,
+		 u_long ticks);
+
+int psos_task_normalize_priority(u_long psos_prio);
+
+u_long psos_task_denormalize_priority(int core_prio);
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* !_XENOMAI_PSOS_PSOS_H */
diff --git a/kernel/xenomai-v3.2.4/include/psos/tunables.h b/kernel/xenomai-v3.2.4/include/psos/tunables.h
new file mode 100644
index 0000000..2dbd3f0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/psos/tunables.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2001-2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a pSOS-like API built upon the copperplate library.
+ *
+ * pSOS and pSOS+ are registered trademarks of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_PSOS_TUNABLES_H
+#define _XENOMAI_PSOS_TUNABLES_H
+
+#include <boilerplate/tunables.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif /* __cplusplus */
+
+extern int psos_long_names;
+
+static inline define_config_tunable(long_names, int, on)
+{
+	psos_long_names = on;
+}
+
+static inline read_config_tunable(long_names, int)
+{
+	return psos_long_names;
+}
+
+#ifdef __cplusplus
+}
+#endif /* __cplusplus */
+
+#endif /* !_XENOMAI_PSOS_TUNABLES_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/Makefile.am b/kernel/xenomai-v3.2.4/include/rtdm/Makefile.am
new file mode 100644
index 0000000..989c46f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/Makefile.am
@@ -0,0 +1,20 @@
+includesubdir = $(includedir)/rtdm
+
+includesub_HEADERS = rtdm.h
+
+if XENO_COBALT
+includesub_HEADERS +=	\
+	analogy.h	\
+	autotune.h	\
+	can.h		\
+	gpio.h		\
+	gpiopwm.h	\
+	ipc.h		\
+	net.h		\
+	serial.h	\
+	spi.h		\
+	testing.h	\
+	udd.h
+endif
+
+SUBDIRS = uapi
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/analogy.h b/kernel/xenomai-v3.2.4/include/rtdm/analogy.h
new file mode 100644
index 0000000..066d05a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/analogy.h
@@ -0,0 +1,264 @@
+/**
+ * @file
+ * Analogy for Linux, library facilities
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_ANALOGY_H
+#define _RTDM_ANALOGY_H
+
+#include <stdio.h>
+#include <sys/types.h>
+#include <rtdm/uapi/analogy.h>
+
+#include "boilerplate/list.h"
+
+/*!
+  @addtogroup analogy_lib_descriptor
+  @{
+ */
+
+/*!
+ * @anchor ANALOGY_xxx_DESC   @name ANALOGY_xxx_DESC
+ * @brief Constants used as argument so as to define the description
+ * depth to recover
+ * @{
+ */
+
+/**
+ * BSC stands for basic descriptor (device data)
+ */
+#define A4L_BSC_DESC 0x0
+
+/**
+ * CPLX stands for complex descriptor (subdevice + channel + range
+ * data)
+ */
+#define A4L_CPLX_DESC 0x1
+
+	  /*! @} ANALOGY_xxx_DESC */
+
+/* --- Descriptor structure --- */
+
+/*!
+ * @brief Structure containing device-information useful to users
+ * @see a4l_get_desc()
+ */
+
+struct a4l_descriptor {
+	char board_name[A4L_NAMELEN];
+				     /**< Board name. */
+	char driver_name[A4L_NAMELEN];
+				     /**< Driver name. */
+	int nb_subd;
+		 /**< Subdevices count. */
+	int idx_read_subd;
+		       /**< Input subdevice index. */
+	int idx_write_subd;
+			/**< Output subdevice index. */
+	int fd;
+	    /**< File descriptor. */
+	unsigned int magic;
+			/**< Opaque field. */
+	int sbsize;
+		/**< Data buffer size. */
+	void *sbdata;
+		 /**< Data buffer pointer. */
+};
+typedef struct a4l_descriptor a4l_desc_t;
+
+/*! @} descriptor_sys */
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef DOXYGEN_CPP
+
+/* --- Level 0 API (not supposed to be used) --- */
+
+int a4l_sys_open(const char *fname);
+
+int a4l_sys_close(int fd);
+
+int a4l_sys_read(int fd, void *buf, size_t nbyte);
+
+int a4l_sys_write(int fd, void *buf, size_t nbyte);
+
+int a4l_sys_attach(int fd, a4l_lnkdesc_t *arg);
+
+int a4l_sys_detach(int fd);
+
+int a4l_sys_bufcfg(int fd, unsigned int idx_subd, unsigned long size);
+
+int a4l_sys_desc(int fd, a4l_desc_t *dsc, int pass);
+
+int a4l_sys_devinfo(int fd, a4l_dvinfo_t *info);
+
+int a4l_sys_subdinfo(int fd, a4l_sbinfo_t *info);
+
+int a4l_sys_nbchaninfo(int fd, unsigned int idx_subd, unsigned int *nb);
+
+int a4l_sys_chaninfo(int fd,
+		     unsigned int idx_subd, a4l_chinfo_t *info);
+
+int a4l_sys_nbrnginfo(int fd,
+		      unsigned int idx_subd,
+		      unsigned int idx_chan, unsigned int *nb);
+
+int a4l_sys_rnginfo(int fd,
+		    unsigned int idx_subd,
+		    unsigned int idx_chan, a4l_rnginfo_t *info);
+
+/* --- Level 1 API (supposed to be used) --- */
+
+int a4l_get_desc(int fd, a4l_desc_t *dsc, int pass);
+
+int a4l_open(a4l_desc_t *dsc, const char *fname);
+
+int a4l_close(a4l_desc_t *dsc);
+
+int a4l_fill_desc(a4l_desc_t *dsc);
+
+int a4l_get_subdinfo(a4l_desc_t *dsc,
+		     unsigned int subd, a4l_sbinfo_t **info);
+
+int a4l_get_chinfo(a4l_desc_t *dsc,
+		   unsigned int subd,
+		   unsigned int chan, a4l_chinfo_t **info);
+
+#define a4l_get_chan_max(x) (1ULL << (x)->nb_bits)
+
+#define a4l_is_chan_global(x) ((x)->chan_flags & A4L_CHAN_GLOBAL)
+
+int a4l_get_rnginfo(a4l_desc_t *dsc,
+		    unsigned int subd,
+		    unsigned int chan,
+		    unsigned int rng, a4l_rnginfo_t **info);
+
+#define a4l_is_rng_global(x) ((x)->flags & A4L_RNG_GLOBAL)
+
+int a4l_snd_command(a4l_desc_t *dsc, struct a4l_cmd_desc *cmd);
+
+int a4l_snd_cancel(a4l_desc_t *dsc, unsigned int idx_subd);
+
+int a4l_set_bufsize(a4l_desc_t *dsc,
+		    unsigned int idx_subd, unsigned long size);
+
+int a4l_get_bufsize(a4l_desc_t *dsc,
+		    unsigned int idx_subd, unsigned long *size);
+
+int a4l_set_wakesize(a4l_desc_t *dsc, unsigned long size);
+
+int a4l_get_wakesize(a4l_desc_t *dsc, unsigned long *size);
+
+int a4l_mark_bufrw(a4l_desc_t *dsc,
+		   unsigned int idx_subd,
+		   unsigned long cur, unsigned long *newp);
+
+int a4l_poll(a4l_desc_t *dsc,
+	     unsigned int idx_subd, unsigned long ms_timeout);
+
+int a4l_mmap(a4l_desc_t *dsc,
+	     unsigned int idx_subd, unsigned long size, void **ptr);
+
+int a4l_async_read(a4l_desc_t *dsc,
+		   void *buf, size_t nbyte, unsigned long ms_timeout);
+
+int a4l_async_write(a4l_desc_t *dsc,
+		    void *buf, size_t nbyte, unsigned long ms_timeout);
+
+int a4l_snd_insnlist(a4l_desc_t *dsc, a4l_insnlst_t *arg);
+
+int a4l_snd_insn(a4l_desc_t *dsc, a4l_insn_t *arg);
+
+/* --- Level 2 API (supposed to be used) --- */
+
+int a4l_sync_write(a4l_desc_t *dsc,
+		   unsigned int idx_subd,
+		   unsigned int chan_desc,
+		   unsigned int delay, void *buf, size_t nbyte);
+
+int a4l_sync_read(a4l_desc_t *dsc,
+		  unsigned int idx_subd,
+		  unsigned int chan_desc,
+		  unsigned int delay, void *buf, size_t nbyte);
+
+int a4l_config_subd(a4l_desc_t *dsc,
+		    unsigned int idx_subd, unsigned int type, ...);
+
+int a4l_sync_dio(a4l_desc_t *dsc,
+		 unsigned int idx_subd, void *mask, void *buf);
+
+int a4l_sizeof_chan(a4l_chinfo_t *chan);
+
+int a4l_sizeof_subd(a4l_sbinfo_t *subd);
+
+int a4l_find_range(a4l_desc_t *dsc,
+		   unsigned int idx_subd,
+		   unsigned int idx_chan,
+		   unsigned long unit,
+		   double min, double max, a4l_rnginfo_t **rng);
+
+int a4l_rawtoul(a4l_chinfo_t *chan, unsigned long *dst, void *src, int cnt);
+
+int a4l_rawtof(a4l_chinfo_t *chan,
+	       a4l_rnginfo_t *rng, float *dst, void *src, int cnt);
+
+int a4l_rawtod(a4l_chinfo_t *chan,
+	       a4l_rnginfo_t *rng, double *dst, void *src, int cnt);
+
+int a4l_ultoraw(a4l_chinfo_t *chan, void *dst, unsigned long *src, int cnt);
+
+int a4l_ftoraw(a4l_chinfo_t *chan,
+	       a4l_rnginfo_t *rng, void *dst, float *src, int cnt);
+
+int a4l_dtoraw(a4l_chinfo_t *chan,
+	       a4l_rnginfo_t *rng, void *dst, double *src, int cnt);
+
+int a4l_read_calibration_file(char *name, struct a4l_calibration_data *data);
+
+int a4l_get_softcal_converter(struct a4l_polynomial *converter,
+	                      int subd, int chan, int range,
+	                      struct a4l_calibration_data *data );
+
+int a4l_rawtodcal(a4l_chinfo_t *chan, double *dst, void *src,
+		  int cnt, struct a4l_polynomial *converter);
+int a4l_dcaltoraw(a4l_chinfo_t * chan, void *dst, double *src, int cnt,
+		  struct a4l_polynomial *converter);
+
+int a4l_math_polyfit(unsigned order, double *r,double orig,
+	             const unsigned dim, double *x, double *y);
+
+void a4l_math_mean(double *pmean, double *val, unsigned nr);
+
+void a4l_math_stddev(double *pstddev,
+	             double mean, double *val, unsigned nr);
+
+void a4l_math_stddev_of_mean(double *pstddevm,
+	                     double mean, double *val, unsigned nr);
+
+
+
+
+#endif /* !DOXYGEN_CPP */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_RTDM_ANALOGY_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/autotune.h b/kernel/xenomai-v3.2.4/include/rtdm/autotune.h
new file mode 100644
index 0000000..6a73cf7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/autotune.h
@@ -0,0 +1,26 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_AUTOTUNE_H
+#define _RTDM_AUTOTUNE_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/autotune.h>
+
+#endif /* !_RTDM_AUTOTUNE_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/can.h b/kernel/xenomai-v3.2.4/include/rtdm/can.h
new file mode 100644
index 0000000..837692b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/can.h
@@ -0,0 +1,239 @@
+/**
+ * @file
+ * @note Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * @note Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_CAN_H
+#define _RTDM_CAN_H
+
+#include <net/if.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/can.h>
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_can CAN Devices
+ *
+ * This is the common interface a RTDM-compliant CAN device has to provide.
+ * Feel free to report bugs and comments on this profile to the "Socketcan"
+ * mailing list (Socketcan-core@lists.berlios.de) or directly to the authors
+ * (wg@grandegger.com or Sebastian.Smolorz@stud.uni-hannover.de).
+ *
+ * @b Profile @b Revision: 2
+ * @n
+ * @n
+ * @par Device Characteristics
+ * @n
+ * @ref rtdm_driver_flags "Device Flags": @c RTDM_PROTOCOL_DEVICE @n
+ * @n
+ * @ref rtdm_driver.protocol_family "Protocol Family": @c PF_CAN @n
+ * @n
+ * @ref rtdm_driver.socket_type "Socket Type": @c SOCK_RAW @n
+ * @n
+ * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_CAN @n
+ * @n
+ *
+ * @par Supported Operations
+ * @n
+ * @b Socket @n
+ * @coretags{secondary-only}
+ * @n
+ * Specific return values:
+ * - -EPROTONOSUPPORT (Protocol is not supported by the driver.
+ *                     See @ref CAN_PROTO "CAN protocols"
+ *                     for possible protocols.)
+ * .
+ * @n
+ * @n
+ * @b Close @n
+ * Blocking calls to any of the @ref Send or @ref Recv "Receive" functions
+ * will be unblocked when the socket is closed and return with an error. @n
+ * @n
+ * @coretags{secondary-only}
+ * @n
+ * Specific return values: none @n
+ * @n
+ * @n
+ * @b IOCTL @n
+ * @coretags{task-unrestricted}. see @ref CANIOCTLs "below"
+ * Specific return values: see @ref CANIOCTLs "below" @n
+ * @n
+ * @n
+ * @anchor Bind
+ * @b Bind @n
+ * Binds a socket to one or all CAN devices (see struct sockaddr_can). If
+ * a filter list has been defined with setsockopt (see @ref Sockopts),
+ * it will be used upon reception of CAN frames to decide whether the
+ * bound socket will receive a frame. If no filter has been defined, the
+ * socket will receive @b all CAN frames on the specified interface(s). @n
+ * @n
+ * Binding to special interface index @c 0 will make the socket receive
+ * CAN frames from all CAN interfaces. @n
+ * @n
+ * Binding to an interface index is also relevant for the @ref Send functions
+ * because they will transmit a message over the interface the socket is
+ * bound to when no socket address is given to them. @n
+ * @n
+ * @n
+ * @coretags{secondary-only}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -ENOMEM (Not enough memory to fulfill the operation)
+ * - -EINVAL (Invalid address family, or invalid length of address structure)
+ * - -ENODEV (Invalid CAN interface index)
+ * - -ENOSPC (No enough space for filter list)
+ * - -EBADF  (Socket is about to be closed)
+ * - -EAGAIN (Too many receivers. Old binding (if any) is still active.
+ *            Close some sockets and try again.)
+ * .
+ * @n
+ * @n
+ * @anchor Sockopts
+ * <b>Setsockopt, Getsockopt</b>@n
+ * These functions allow to set and get various socket options. Currently,
+ * only CAN raw sockets are supported. @n
+ * @n
+ * Supported Levels and Options:
+ * @n
+ * - Level @b SOL_CAN_RAW : CAN RAW protocol (see @ref CAN_RAW)
+ *   - Option @ref CAN_RAW_FILTER : CAN filter list
+ *   - Option @ref CAN_RAW_ERR_FILTER : CAN error mask
+ *   - Option @ref CAN_RAW_LOOPBACK : CAN TX loopback to local sockets
+ *   .
+ * .
+ * @n
+ * @coretags{task-unrestricted}
+ * Specific return values: see links to options above. @n
+ * @n
+ * @n
+ * @anchor Recv
+ * <b>Recv, Recvfrom, Recvmsg</b> @n
+ * These functions receive CAN messages from a socket. Only one
+ * message per call can be received, so only one buffer with the correct length
+ * must be passed. For @c SOCK_RAW, this is the size of struct can_frame. @n
+ * @n
+ * Unlike a call to one of the @ref Send functions, a Recv function will not
+ * return with an error if an interface is down (due to bus-off or setting
+ * of stop mode) or in sleep mode. Moreover, in such a case there may still
+ * be some CAN messages in the socket buffer which could be read out
+ * successfully. @n
+ * @n
+ * It is possible to receive a high precision timestamp with every CAN
+ * message. The condition is a former instruction to the socket via
+ * @ref RTCAN_RTIOC_TAKE_TIMESTAMP. The timestamp will be copied to the
+ * @c msg_control buffer of <TT>struct msghdr</TT> if it points to a valid
+ * memory location with size of @ref nanosecs_abs_t. If this
+ * is a NULL pointer the timestamp will be discarded silently. @n
+ * @n
+ * @b Note: A @c msg_controllen of @c 0 upon completion of the function call
+ *          indicates that no timestamp is available for that message.
+ * @n
+ * @n
+ * Supported Flags [in]:
+ * - MSG_DONTWAIT (By setting this flag the operation will only succeed if
+ *                 it would not block, i.e. if there is a message in the
+ *                 socket buffer. This flag takes precedence over a timeout
+ *                 specified by @ref RTCAN_RTIOC_RCV_TIMEOUT.)
+ * - MSG_PEEK     (Receive a message but leave it in the socket buffer. The
+ *                 next receive operation will get that message again.)
+ * .
+ * @n
+ * Supported Flags [out]: none @n
+ * @n
+ * @coretags{mode-unrestricted}
+ * @n
+ * Specific return values:
+ * - Non-negative value (Indicating the successful reception of a CAN message.
+ *   For @c SOCK_RAW, this is the size of struct can_frame regardless of
+ *   the actual size of the payload.)
+ * - -EFAULT (It was not possible to access user space memory area at one
+ *            of the specified addresses.)
+ * - -EINVAL (Unsupported flag detected, or invalid length of socket address
+ *            buffer, or invalid length of message control buffer)
+ * - -EMSGSIZE (Zero or more than one iovec buffer passed, or buffer too
+ *              small)
+ * - -EAGAIN (No data available in non-blocking mode)
+ * - -EBADF (Socket was closed.)
+ * - -EINTR (Operation was interrupted explicitly or by signal.)
+ * - -ETIMEDOUT (Timeout)
+ * .
+ * @n
+ * @n
+ * @anchor Send
+ * <b>Send, Sendto, Sendmsg</b> @n
+ * These functions send out CAN messages. Only one message per call can
+ * be transmitted, so only one buffer with the correct length must be passed.
+ * For @c SOCK_RAW, this is the size of struct can_frame. @n
+ * @n
+ * The following only applies to @c SOCK_RAW: If a socket address of
+ * struct sockaddr_can is given, only @c can_ifindex is used. It is also
+ * possible to omit the socket address. Then the interface the socket is
+ * bound to will be used for sending messages. @n
+ * @n
+ * If an interface goes down (due to bus-off or setting of stop mode) all
+ * senders that were blocked on this interface will be woken up. @n
+ * @n
+ * @n
+ * Supported Flags:
+ * - MSG_DONTWAIT (By setting this flag the transmit operation will only
+ *                 succeed if it would not block. This flag takes precedence
+ *                 over a timeout specified by @ref RTCAN_RTIOC_SND_TIMEOUT.)
+ * .
+ * @coretags{mode-unrestricted}
+ * @n
+ * Specific return values:
+ * - Non-negative value equal to given buffer size (Indicating the
+ *   successful completion of the function call. See also note.)
+ * - -EOPNOTSUPP (MSG_OOB flag is not supported.)
+ * - -EINVAL (Unsupported flag detected @e or: Invalid length of socket
+ *            address @e or: Invalid address family @e or: Data length code
+ *            of CAN frame not between 0 and 15 @e or: CAN standard frame has
+ *            got an ID not between 0 and 2031)
+ * - -EMSGSIZE (Zero or more than one buffer passed or invalid size of buffer)
+ * - -EFAULT (It was not possible to access user space memory area at one
+ *            of the specified addresses.)
+ * - -ENXIO (Invalid CAN interface index - @c 0 is not allowed here - or
+ *           socket not bound or rather bound to all interfaces.)
+ * - -ENETDOWN (Controller is bus-off or in stopped state.)
+ * - -ECOMM (Controller is sleeping)
+ * - -EAGAIN (Cannot transmit without blocking but a non-blocking
+ *            call was requested.)
+ * - -EINTR (Operation was interrupted explicitly or by signal)
+ * - -EBADF (Socket was closed.)
+ * - -ETIMEDOUT (Timeout)
+ * .
+ * @b Note: A successful completion of the function call does not implicate a
+ *          successful transmission of the message.
+ *
+ * @{
+ *
+ * @anchor CANutils @name CAN example and utility programs
+ * @{
+ * @example rtcanconfig.c
+ * @example rtcansend.c
+ * @example rtcanrecv.c
+ * @example can-rtt.c
+ * @}
+ *
+ * @}
+ */
+#endif /* !_RTDM_CAN_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/gpio.h b/kernel/xenomai-v3.2.4/include/rtdm/gpio.h
new file mode 100644
index 0000000..c61f229
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/gpio.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_GPIO_H
+#define _RTDM_GPIO_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/gpio.h>
+
+#endif /* !_RTDM_GPIO_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/gpiopwm.h b/kernel/xenomai-v3.2.4/include/rtdm/gpiopwm.h
new file mode 100644
index 0000000..28cdfc5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/gpiopwm.h
@@ -0,0 +1,24 @@
+/**
+ * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_PWM_H
+#define _RTDM_PWM_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/gpiopwm.h>
+
+#endif /* !_RTDM_PWM_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/ipc.h b/kernel/xenomai-v3.2.4/include/rtdm/ipc.h
new file mode 100644
index 0000000..4f92d47
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/ipc.h
@@ -0,0 +1,26 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_IPC_H
+#define _RTDM_IPC_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/ipc.h>
+
+#endif /* !_RTDM_IPC_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/net.h b/kernel/xenomai-v3.2.4/include/rtdm/net.h
new file mode 100644
index 0000000..1a667bd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/net.h
@@ -0,0 +1,38 @@
+/***
+ *
+ *  rtdm/net.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  As a special exception to the GNU General Public license, the RTnet
+ *  project allows you to use this header file in unmodified form to produce
+ *  application programs executing in user-space which use RTnet services by
+ *  normal system calls. The resulting executable will not be covered by the
+ *  GNU General Public License merely as a result of this header file use.
+ *  Instead, this header file use will be considered normal use of RTnet and
+ *  not a "derived work" in the sense of the GNU General Public License.
+ *
+ */
+
+#ifndef _RTDM_NET_H
+#define _RTDM_NET_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/net.h>
+
+#endif  /* !_RTDM_NET_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/rtdm.h b/kernel/xenomai-v3.2.4/include/rtdm/rtdm.h
new file mode 100644
index 0000000..01f07fe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/rtdm.h
@@ -0,0 +1,59 @@
+/**
+ * @file
+ * @note Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de>
+ * @note Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_RTDM_H
+#define _RTDM_RTDM_H
+
+#include <linux/types.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <fcntl.h>
+#include <stddef.h>
+#include <stdint.h>
+#include <errno.h>
+#include <unistd.h>
+#include <boilerplate/wrappers.h>
+
+/**
+ * @defgroup rtdm RTDM
+ *
+ * The Real-Time Driver Model (RTDM) provides a unified interface to
+ * both users and developers of real-time device
+ * drivers. Specifically, it addresses the constraints of mixed
+ * RT/non-RT systems like Xenomai. RTDM conforms to POSIX
+ * semantics (IEEE Std 1003.1) where available and applicable.
+ *
+ * @b API @b Revision: 8
+ */
+
+/**
+ * @ingroup rtdm
+ * @defgroup rtdm_user_api RTDM User API
+ *
+ * Application interface to RTDM services
+ *
+ * This is the upper interface of RTDM provided to application
+ * programs both in kernel and user space. Note that certain functions
+ * may not be implemented by every device. Refer to the @ref
+ * rtdm_profiles "Device Profiles" for precise information.
+ */
+
+#include <rtdm/uapi/rtdm.h>
+
+#endif /* !_RTDM_RTDM_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/serial.h b/kernel/xenomai-v3.2.4/include/rtdm/serial.h
new file mode 100644
index 0000000..232e96d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/serial.h
@@ -0,0 +1,79 @@
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, serial device profile header
+ *
+ * @note Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_SERIAL_H
+#define _RTDM_SERIAL_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/serial.h>
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_serial Serial Devices
+ *
+ * This is the common interface a RTDM-compliant serial device has to
+ * provide.  Feel free to comment on this profile via the Xenomai
+ * mailing list <xenomai@xenomai.org> or directly to the author
+ * <jan.kiszka@web.de>.
+ *
+ * @b Profile @b Revision: 3
+ * @n
+ * @n
+ * @par Device Characteristics
+ * @ref rtdm_driver_flags "Device Flags": @c RTDM_NAMED_DEVICE, @c RTDM_EXCLUSIVE @n
+ * @n
+ * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_SERIAL @n
+ * @n
+ * Device Name: @c "/dev/rtdm/rtser<N>", N >= 0 @n
+ * @n
+ *
+ * @par Supported Operations
+ * @b Open @n
+ * @coretags{secondary-only}
+ * Specific return values: none @n
+ * @n
+ * @b Close @n
+ * @coretags{secondary-only}
+ * Specific return values: none @n
+ * @n
+ * @b IOCTL @n
+ * @coretags{task-unrestricted}. See @ref SERIOCTLs "below" @n
+ * Specific return values: see @ref SERIOCTLs "below" @n
+ * @n
+ * @b Read @n
+ * @coretags{mode-unrestricted}
+ * Specific return values:
+ * - -ETIMEDOUT
+ * - -EINTR (interrupted explicitly or by signal)
+ * - -EAGAIN (no data available in non-blocking mode)
+ * - -EBADF (device has been closed while reading)
+ * - -EIO (hardware error or broken bit stream)
+ * .
+ * @n
+ * @b Write @n
+ * @coretags{mode-unrestricted}
+ * Specific return values:
+ * - -ETIMEDOUT
+ * - -EINTR (interrupted explicitly or by signal)
+ * - -EAGAIN (no data written in non-blocking mode)
+ * - -EBADF (device has been closed while writing)
+ */
+
+#endif /* !_RTDM_SERIAL_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/spi.h b/kernel/xenomai-v3.2.4/include/rtdm/spi.h
new file mode 100644
index 0000000..339a862
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/spi.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_SPI_H
+#define _RTDM_SPI_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/spi.h>
+
+#endif /* !_RTDM_SPI_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/testing.h b/kernel/xenomai-v3.2.4/include/rtdm/testing.h
new file mode 100644
index 0000000..2eb8135
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/testing.h
@@ -0,0 +1,59 @@
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, testing device profile header
+ *
+ * @note Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_TESTING_H
+#define _RTDM_TESTING_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/testing.h>
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_testing Testing Devices
+ *
+ * This group of devices is intended to provide in-kernel testing
+ * results.  Feel free to comment on this profile via the Xenomai
+ * mailing list <xenomai@xenomai.org> or directly to the author
+ * <jan.kiszka@web.de>.
+ *
+ * @b Profile @b Revision: 2
+ * @n
+ * @n
+ * @par Device Characteristics
+ * @ref rtdm_driver_flags "Device Flags": @c RTDM_NAMED_DEVICE @n
+ * @n
+ * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_TESTING @n
+ * @n
+ *
+ * @par Supported Operations
+ * @b Open @n
+ * @coretags{secondary-only}
+ * Specific return values: none @n
+ * @n
+ * @b Close @n
+ * @coretags{secondary-only}
+ * Specific return values: none @n
+ * @n
+ * @b IOCTL @n
+ * @coretags{task-unrestricted}. See @ref TSTIOCTLs below @n
+ * Specific return values: see @ref TSTIOCTLs below @n
+ */
+
+#endif /* _RTDM_TESTING_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/Makefile.am b/kernel/xenomai-v3.2.4/include/rtdm/uapi/Makefile.am
new file mode 100644
index 0000000..726eb1c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/Makefile.am
@@ -0,0 +1,18 @@
+includesubdir = $(includedir)/rtdm/uapi
+
+includesub_HEADERS = rtdm.h
+
+if XENO_COBALT
+includesub_HEADERS +=	\
+	analogy.h	\
+	autotune.h	\
+	can.h		\
+	gpio.h		\
+	gpiopwm.h	\
+	ipc.h		\
+	net.h		\
+	serial.h	\
+	spi.h		\
+	testing.h	\
+	udd.h
+endif
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h
new file mode 100644
index 0000000..2d53168
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/analogy.h
@@ -0,0 +1,743 @@
+/**
+ * @file
+ * Analogy for Linux, UAPI bits
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_ANALOGY_H
+#define _RTDM_UAPI_ANALOGY_H
+
+/* --- Misc precompilation constant --- */
+#define A4L_NAMELEN 20
+
+#define A4L_INFINITE 0
+#define A4L_NONBLOCK (-1)
+
+/* --- Common Analogy types --- */
+
+typedef unsigned short sampl_t;
+typedef unsigned long lsampl_t;
+
+/* MMAP ioctl argument structure */
+struct a4l_mmap_arg {
+	unsigned int idx_subd;
+	unsigned long size;
+	void *ptr;
+};
+typedef struct a4l_mmap_arg a4l_mmap_t;
+
+/* Constants related with buffer size
+   (might be used with BUFCFG ioctl) */
+#define A4L_BUF_MAXSIZE 0x1000000
+#define A4L_BUF_DEFSIZE 0x10000
+#define A4L_BUF_DEFMAGIC 0xffaaff55
+
+/* BUFCFG ioctl argument structure */
+struct a4l_buffer_config {
+	/* NOTE: with the last buffer implementation, the field
+	   idx_subd became useless; the buffer are now
+	   per-context. So, the buffer size configuration is specific
+	   to an opened device. There is a little exception: we can
+	   define a default buffer size for a device.
+	   So far, a hack is used to implement the configuration of
+	   the default buffer size */
+	unsigned int idx_subd;
+	unsigned long buf_size;
+};
+typedef struct a4l_buffer_config a4l_bufcfg_t;
+
+/* BUFINFO ioctl argument structure */
+struct a4l_buffer_info {
+	unsigned int idx_subd;
+	unsigned long buf_size;
+	unsigned long rw_count;
+};
+typedef struct a4l_buffer_info a4l_bufinfo_t;
+
+/* BUFCFG2 / BUFINFO2 ioctl argument structure */
+struct a4l_buffer_config2 {
+	unsigned long wake_count;
+	unsigned long reserved[3];
+};
+typedef struct a4l_buffer_config2 a4l_bufcfg2_t;
+
+/* POLL ioctl argument structure */
+struct a4l_poll {
+	unsigned int idx_subd;
+	unsigned long arg;
+};
+typedef struct a4l_poll a4l_poll_t;
+
+/* DEVCFG ioctl argument structure */
+struct a4l_link_desc {
+	unsigned char bname_size;
+	char *bname;
+	unsigned int opts_size;
+	void *opts;
+};
+typedef struct a4l_link_desc a4l_lnkdesc_t;
+
+/* DEVINFO ioctl argument structure */
+struct a4l_dev_info {
+	char board_name[A4L_NAMELEN];
+	char driver_name[A4L_NAMELEN];
+	int nb_subd;
+	int idx_read_subd;
+	int idx_write_subd;
+};
+typedef struct a4l_dev_info a4l_dvinfo_t;
+
+#define CIO 'd'
+#define A4L_DEVCFG _IOW(CIO,0,a4l_lnkdesc_t)
+#define A4L_DEVINFO _IOR(CIO,1,a4l_dvinfo_t)
+#define A4L_SUBDINFO _IOR(CIO,2,a4l_sbinfo_t)
+#define A4L_CHANINFO _IOR(CIO,3,a4l_chinfo_arg_t)
+#define A4L_RNGINFO _IOR(CIO,4,a4l_rnginfo_arg_t)
+#define A4L_CMD _IOWR(CIO,5,a4l_cmd_t)
+#define A4L_CANCEL _IOR(CIO,6,unsigned int)
+#define A4L_INSNLIST _IOR(CIO,7,unsigned int)
+#define A4L_INSN _IOR(CIO,8,unsigned int)
+#define A4L_BUFCFG _IOR(CIO,9,a4l_bufcfg_t)
+#define A4L_BUFINFO _IOWR(CIO,10,a4l_bufinfo_t)
+#define A4L_POLL _IOR(CIO,11,unsigned int)
+#define A4L_MMAP _IOWR(CIO,12,unsigned int)
+#define A4L_NBCHANINFO _IOR(CIO,13,a4l_chinfo_arg_t)
+#define A4L_NBRNGINFO _IOR(CIO,14,a4l_rnginfo_arg_t)
+
+/* These IOCTLs are bound to be merged with A4L_BUFCFG and A4L_BUFINFO
+   at the next major release */
+#define A4L_BUFCFG2 _IOR(CIO,15,a4l_bufcfg_t)
+#define A4L_BUFINFO2 _IOWR(CIO,16,a4l_bufcfg_t)
+
+/*!
+ * @addtogroup analogy_lib_async1
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_CMD_xxx @name ANALOGY_CMD_xxx
+ * @brief Common command flags definitions
+ * @{
+ */
+
+/**
+ * Do not execute the command, just check it
+ */
+#define A4L_CMD_SIMUL 0x1
+/**
+ * Perform data recovery / transmission in bulk mode
+ */
+#define A4L_CMD_BULK 0x2
+/**
+ * Perform a command which will write data to the device
+ */
+#define A4L_CMD_WRITE 0x4
+
+	  /*! @} ANALOGY_CMD_xxx */
+
+/*!
+ * @anchor TRIG_xxx @name TRIG_xxx
+ * @brief Command triggers flags definitions
+ * @{
+ */
+
+/**
+ * Never trigger
+ */
+#define TRIG_NONE	0x00000001
+/**
+ * Trigger now + N ns
+ */
+#define TRIG_NOW	0x00000002
+/**
+ * Trigger on next lower level trig
+ */
+#define TRIG_FOLLOW	0x00000004
+/**
+ * Trigger at time N ns
+ */
+#define TRIG_TIME	0x00000008
+/**
+ * Trigger at rate N ns
+ */
+#define TRIG_TIMER	0x00000010
+/**
+ * Trigger when count reaches N
+ */
+#define TRIG_COUNT	0x00000020
+/**
+ * Trigger on external signal N
+ */
+#define TRIG_EXT	0x00000040
+/**
+ * Trigger on analogy-internal signal N
+ */
+#define TRIG_INT	0x00000080
+/**
+ * Driver defined trigger
+ */
+#define TRIG_OTHER	0x00000100
+/**
+ * Wake up on end-of-scan
+ */
+#define TRIG_WAKE_EOS	0x0020
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_MASK 0x00030000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_NEAREST 0x00000000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_DOWN 0x00010000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_UP 0x00020000
+/**
+ * Trigger not implemented yet
+ */
+#define TRIG_ROUND_UP_NEXT 0x00030000
+
+	  /*! @} TRIG_xxx */
+
+/*!
+ * @anchor CHAN_RNG_AREF @name Channel macros
+ * @brief Specific precompilation macros and constants useful for the
+ * channels descriptors tab located in the command structure
+ * @{
+ */
+
+/**
+ * Channel indication macro
+ */
+#define CHAN(a) ((a) & 0xffff)
+/**
+ * Range definition macro
+ */
+#define RNG(a) (((a) & 0xff) << 16)
+/**
+ * Reference definition macro
+ */
+#define AREF(a) (((a) & 0x03) << 24)
+/**
+ * Flags definition macro
+ */
+#define FLAGS(a) ((a) & CR_FLAGS_MASK)
+/**
+ * Channel + range + reference definition macro
+ */
+#define PACK(a, b, c) (a | RNG(b) | AREF(c))
+/**
+ * Channel + range + reference + flags definition macro
+ */
+#define PACK_FLAGS(a, b, c, d) (PACK(a, b, c) | FLAGS(d))
+
+/**
+ * Analog reference is analog ground
+ */
+#define AREF_GROUND 0x00
+/**
+ * Analog reference is analog common
+ */
+#define AREF_COMMON 0x01
+/**
+ * Analog reference is differential
+ */
+#define AREF_DIFF 0x02
+/**
+ * Analog reference is undefined
+ */
+#define AREF_OTHER 0x03
+
+	  /*! @} CHAN_RNG_AREF */
+
+#if !defined(DOXYGEN_CPP)
+
+#define CR_FLAGS_MASK 0xfc000000
+#define CR_ALT_FILTER (1<<26)
+#define CR_DITHER CR_ALT_FILTER
+#define CR_DEGLITCH CR_ALT_FILTER
+#define CR_ALT_SOURCE (1<<27)
+#define CR_EDGE	(1<<30)
+#define CR_INVERT (1<<31)
+
+#endif /* !DOXYGEN_CPP */
+
+/*!
+ * @brief Structure describing the asynchronous instruction
+ * @see a4l_snd_command()
+ */
+
+struct a4l_cmd_desc {
+	unsigned char idx_subd;
+			       /**< Subdevice to which the command will be applied. */
+
+	unsigned long flags;
+			       /**< Command flags */
+
+	/* Command trigger characteristics */
+	unsigned int start_src;
+			       /**< Start trigger type */
+	unsigned int start_arg;
+			       /**< Start trigger argument */
+	unsigned int scan_begin_src;
+			       /**< Scan begin trigger type */
+	unsigned int scan_begin_arg;
+			       /**< Scan begin trigger argument */
+	unsigned int convert_src;
+			       /**< Convert trigger type */
+	unsigned int convert_arg;
+			       /**< Convert trigger argument */
+	unsigned int scan_end_src;
+			       /**< Scan end trigger type */
+	unsigned int scan_end_arg;
+			       /**< Scan end trigger argument */
+	unsigned int stop_src;
+			       /**< Stop trigger type */
+	unsigned int stop_arg;
+			   /**< Stop trigger argument */
+
+	unsigned char nb_chan;
+			   /**< Count of channels related with the command */
+	unsigned int *chan_descs;
+			    /**< Tab containing channels descriptors */
+
+	/* Driver specific fields */
+	unsigned int valid_simul_stages;
+			   /** < cmd simulation valid stages (driver dependent) */
+
+	unsigned int data_len;
+			   /**< Driver specific buffer size */
+	sampl_t *data;
+	                   /**< Driver specific buffer pointer */
+};
+typedef struct a4l_cmd_desc a4l_cmd_t;
+
+/*! @} analogy_lib_async1 */
+
+/* --- Range section --- */
+
+/** Constant for internal use only (must not be used by driver
+    developer).  */
+#define A4L_RNG_FACTOR 1000000
+
+/**
+ * Volt unit range flag
+ */
+#define A4L_RNG_VOLT_UNIT 0x0
+/**
+ * MilliAmpere unit range flag
+ */
+#define A4L_RNG_MAMP_UNIT 0x1
+/**
+ * No unit range flag
+ */
+#define A4L_RNG_NO_UNIT 0x2
+/**
+ * External unit range flag
+ */
+#define A4L_RNG_EXT_UNIT 0x4
+
+/**
+ * Macro to retrieve the range unit from the range flags
+ */
+#define A4L_RNG_UNIT(x) (x & (A4L_RNG_VOLT_UNIT |	\
+			      A4L_RNG_MAMP_UNIT |	\
+			      A4L_RNG_NO_UNIT |		\
+			      A4L_RNG_EXT_UNIT))
+
+/* --- Subdevice flags desc stuff --- */
+
+/* TODO: replace ANALOGY_SUBD_AI with ANALOGY_SUBD_ANALOG
+   and ANALOGY_SUBD_INPUT */
+
+/* Subdevice types masks */
+#define A4L_SUBD_MASK_READ 0x80000000
+#define A4L_SUBD_MASK_WRITE 0x40000000
+#define A4L_SUBD_MASK_SPECIAL 0x20000000
+
+/*!
+ * @addtogroup analogy_subdevice
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_SUBD_xxx @name Subdevices types
+ * @brief Flags to define the subdevice type
+ * @{
+ */
+
+/**
+ * Unused subdevice
+ */
+#define A4L_SUBD_UNUSED (A4L_SUBD_MASK_SPECIAL|0x1)
+/**
+ * Analog input subdevice
+ */
+#define A4L_SUBD_AI (A4L_SUBD_MASK_READ|0x2)
+/**
+ * Analog output subdevice
+ */
+#define A4L_SUBD_AO (A4L_SUBD_MASK_WRITE|0x4)
+/**
+ * Digital input subdevice
+ */
+#define A4L_SUBD_DI (A4L_SUBD_MASK_READ|0x8)
+/**
+ * Digital output subdevice
+ */
+#define A4L_SUBD_DO (A4L_SUBD_MASK_WRITE|0x10)
+/**
+ * Digital input/output subdevice
+ */
+#define A4L_SUBD_DIO (A4L_SUBD_MASK_SPECIAL|0x20)
+/**
+ * Counter subdevice
+ */
+#define A4L_SUBD_COUNTER (A4L_SUBD_MASK_SPECIAL|0x40)
+/**
+ * Timer subdevice
+ */
+#define A4L_SUBD_TIMER (A4L_SUBD_MASK_SPECIAL|0x80)
+/**
+ * Memory, EEPROM, DPRAM
+ */
+#define A4L_SUBD_MEMORY (A4L_SUBD_MASK_SPECIAL|0x100)
+/**
+ * Calibration subdevice  DACs
+ */
+#define A4L_SUBD_CALIB (A4L_SUBD_MASK_SPECIAL|0x200)
+/**
+ * Processor, DSP
+ */
+#define A4L_SUBD_PROC (A4L_SUBD_MASK_SPECIAL|0x400)
+/**
+ * Serial IO subdevice
+ */
+#define A4L_SUBD_SERIAL (A4L_SUBD_MASK_SPECIAL|0x800)
+/**
+ * Mask which gathers all the types
+ */
+#define A4L_SUBD_TYPES (A4L_SUBD_UNUSED |	 \
+			   A4L_SUBD_AI |	 \
+			   A4L_SUBD_AO |	 \
+			   A4L_SUBD_DI |	 \
+			   A4L_SUBD_DO |	 \
+			   A4L_SUBD_DIO |	 \
+			   A4L_SUBD_COUNTER | \
+			   A4L_SUBD_TIMER |	 \
+			   A4L_SUBD_MEMORY |	 \
+			   A4L_SUBD_CALIB |	 \
+			   A4L_SUBD_PROC |	 \
+			   A4L_SUBD_SERIAL)
+
+/*! @} ANALOGY_SUBD_xxx */
+
+/*!
+ * @anchor ANALOGY_SUBD_FT_xxx @name Subdevice features
+ * @brief Flags to define the subdevice's capabilities
+ * @{
+ */
+
+/* Subdevice capabilities */
+/**
+ * The subdevice can handle command (i.e it can perform asynchronous
+ * acquisition)
+ */
+#define A4L_SUBD_CMD 0x1000
+/**
+ * The subdevice support mmap operations (technically, any driver can
+ * do it; however, the developer might want that his driver must be
+ * accessed through read / write
+ */
+#define A4L_SUBD_MMAP 0x8000
+
+/*! @} ANALOGY_SUBD_FT_xxx */
+
+/*!
+ * @anchor ANALOGY_SUBD_ST_xxx @name Subdevice status
+ * @brief Flags to define the subdevice's status
+ * @{
+ */
+
+/* Subdevice status flag(s) */
+/**
+ * The subdevice is busy, a synchronous or an asynchronous acquisition
+ * is occuring
+ */
+#define A4L_SUBD_BUSY_NR 0
+#define A4L_SUBD_BUSY (1 << A4L_SUBD_BUSY_NR)
+
+/**
+ * The subdevice is about to be cleaned in the middle of the detach
+ * procedure
+ */
+#define A4L_SUBD_CLEAN_NR 1
+#define A4L_SUBD_CLEAN (1 << A4L_SUBD_CLEAN_NR)
+
+
+/*! @} ANALOGY_SUBD_ST_xxx */
+
+/* --- Subdevice related IOCTL arguments structures --- */
+
+/* SUDBINFO IOCTL argument */
+struct a4l_subd_info {
+	unsigned long flags;
+	unsigned long status;
+	unsigned char nb_chan;
+};
+typedef struct a4l_subd_info a4l_sbinfo_t;
+
+/* CHANINFO / NBCHANINFO IOCTL arguments */
+struct a4l_chan_info {
+	unsigned long chan_flags;
+	unsigned char nb_rng;
+	unsigned char nb_bits;
+};
+typedef struct a4l_chan_info a4l_chinfo_t;
+
+struct a4l_chinfo_arg {
+	unsigned int idx_subd;
+	void *info;
+};
+typedef struct a4l_chinfo_arg a4l_chinfo_arg_t;
+
+/* RNGINFO / NBRNGINFO IOCTL arguments */
+struct a4l_rng_info {
+	long min;
+	long max;
+	unsigned long flags;
+};
+typedef struct a4l_rng_info a4l_rnginfo_t;
+
+struct a4l_rng_info_arg {
+	unsigned int idx_subd;
+	unsigned int idx_chan;
+	void *info;
+};
+typedef struct a4l_rng_info_arg a4l_rnginfo_arg_t;
+
+/*! @} */
+
+#define A4L_INSN_MASK_READ 0x8000000
+#define A4L_INSN_MASK_WRITE 0x4000000
+#define A4L_INSN_MASK_SPECIAL 0x2000000
+
+/*!
+ * @addtogroup analogy_lib_sync1
+ * @{
+ */
+
+/*!
+ * @anchor ANALOGY_INSN_xxx @name Instruction type
+ * @brief Flags to define the type of instruction
+ * @{
+ */
+
+/**
+ * Read instruction
+ */
+#define A4L_INSN_READ (0 | A4L_INSN_MASK_READ)
+/**
+ * Write instruction
+ */
+#define A4L_INSN_WRITE (1 | A4L_INSN_MASK_WRITE)
+/**
+ * "Bits" instruction
+ */
+#define A4L_INSN_BITS (2 | A4L_INSN_MASK_READ | \
+		       A4L_INSN_MASK_WRITE)
+/**
+ * Configuration instruction
+ */
+#define A4L_INSN_CONFIG (3 | A4L_INSN_MASK_READ | \
+			 A4L_INSN_MASK_WRITE)
+/**
+ * Get time instruction
+ */
+#define A4L_INSN_GTOD (4 | A4L_INSN_MASK_READ | \
+		       A4L_INSN_MASK_SPECIAL)
+/**
+ * Wait instruction
+ */
+#define A4L_INSN_WAIT (5 | A4L_INSN_MASK_WRITE | \
+		       A4L_INSN_MASK_SPECIAL)
+/**
+ * Trigger instruction (to start asynchronous acquisition)
+ */
+#define A4L_INSN_INTTRIG (6 | A4L_INSN_MASK_WRITE | \
+			  A4L_INSN_MASK_SPECIAL)
+
+	  /*! @} ANALOGY_INSN_xxx */
+
+/**
+ * Maximal wait duration
+ */
+#define A4L_INSN_WAIT_MAX 100000
+
+/*!
+ * @anchor INSN_CONFIG_xxx @name Configuration instruction type
+ * @brief Values to define the type of configuration instruction
+ * @{
+ */
+
+#define A4L_INSN_CONFIG_DIO_INPUT		0
+#define A4L_INSN_CONFIG_DIO_OUTPUT		1
+#define A4L_INSN_CONFIG_DIO_OPENDRAIN		2
+#define A4L_INSN_CONFIG_ANALOG_TRIG		16
+#define A4L_INSN_CONFIG_ALT_SOURCE		20
+#define A4L_INSN_CONFIG_DIGITAL_TRIG		21
+#define A4L_INSN_CONFIG_BLOCK_SIZE		22
+#define A4L_INSN_CONFIG_TIMER_1			23
+#define A4L_INSN_CONFIG_FILTER			24
+#define A4L_INSN_CONFIG_CHANGE_NOTIFY		25
+#define A4L_INSN_CONFIG_SERIAL_CLOCK		26
+#define A4L_INSN_CONFIG_BIDIRECTIONAL_DATA	27
+#define A4L_INSN_CONFIG_DIO_QUERY		28
+#define A4L_INSN_CONFIG_PWM_OUTPUT		29
+#define A4L_INSN_CONFIG_GET_PWM_OUTPUT		30
+#define A4L_INSN_CONFIG_ARM			31
+#define A4L_INSN_CONFIG_DISARM			32
+#define A4L_INSN_CONFIG_GET_COUNTER_STATUS	33
+#define A4L_INSN_CONFIG_RESET			34
+#define A4L_INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR	1001	/* Use CTR as single pulsegenerator */
+#define A4L_INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR	1002	/* Use CTR as pulsetraingenerator */
+#define A4L_INSN_CONFIG_GPCT_QUADRATURE_ENCODER	1003	/* Use the counter as encoder */
+#define A4L_INSN_CONFIG_SET_GATE_SRC		2001	/* Set gate source */
+#define A4L_INSN_CONFIG_GET_GATE_SRC		2002	/* Get gate source */
+#define A4L_INSN_CONFIG_SET_CLOCK_SRC		2003	/* Set master clock source */
+#define A4L_INSN_CONFIG_GET_CLOCK_SRC		2004	/* Get master clock source */
+#define A4L_INSN_CONFIG_SET_OTHER_SRC		2005	/* Set other source */
+#define A4L_INSN_CONFIG_SET_COUNTER_MODE	4097
+#define A4L_INSN_CONFIG_SET_ROUTING		4099
+#define A4L_INSN_CONFIG_GET_ROUTING		4109
+
+/*! @} INSN_CONFIG_xxx */
+
+/*!
+ * @anchor ANALOGY_COUNTER_xxx @name Counter status bits
+ * @brief Status bits for INSN_CONFIG_GET_COUNTER_STATUS
+ * @{
+ */
+
+#define A4L_COUNTER_ARMED		0x1
+#define A4L_COUNTER_COUNTING		0x2
+#define A4L_COUNTER_TERMINAL_COUNT	0x4
+
+	  /*! @} ANALOGY_COUNTER_xxx */
+
+/*!
+ * @anchor ANALOGY_IO_DIRECTION @name IO direction
+ * @brief Values to define the IO polarity
+ * @{
+ */
+
+#define A4L_INPUT	0
+#define A4L_OUTPUT	1
+#define A4L_OPENDRAIN	2
+
+	  /*! @} ANALOGY_IO_DIRECTION */
+
+
+/*!
+ * @anchor ANALOGY_EV_xxx @name Events types
+ * @brief Values to define the Analogy events. They might used to send
+ * some specific events through the instruction interface.
+ * @{
+ */
+
+#define A4L_EV_START		0x00040000
+#define A4L_EV_SCAN_BEGIN	0x00080000
+#define A4L_EV_CONVERT		0x00100000
+#define A4L_EV_SCAN_END		0x00200000
+#define A4L_EV_STOP		0x00400000
+
+/*! @} ANALOGY_EV_xxx */
+
+/*!
+ * @brief Structure describing the synchronous instruction
+ * @see a4l_snd_insn()
+ */
+
+struct a4l_instruction {
+	unsigned int type;
+		       /**< Instruction type */
+	unsigned int idx_subd;
+			   /**< Subdevice to which the instruction will be applied. */
+	unsigned int chan_desc;
+			    /**< Channel descriptor */
+	unsigned int data_size;
+			    /**< Size of the intruction data */
+	void *data;
+		    /**< Instruction data */
+};
+typedef struct a4l_instruction a4l_insn_t;
+
+/*!
+ * @brief Structure describing the list of synchronous instructions
+ * @see a4l_snd_insnlist()
+ */
+
+struct a4l_instruction_list {
+	unsigned int count;
+			/**< Instructions count */
+	a4l_insn_t *insns;
+			  /**< Tab containing the instructions pointers */
+};
+typedef struct a4l_instruction_list a4l_insnlst_t;
+
+/*! @} analogy_lib_sync1 */
+
+struct a4l_calibration_subdev {
+	a4l_sbinfo_t *info;
+	char *name;
+	int slen;
+	int idx;
+};
+
+struct a4l_calibration_subdev_data {
+	int index;
+	int channel;
+	int range;
+	int expansion;
+	int nb_coeff;
+	double *coeff;
+
+};
+
+struct a4l_calibration_data {
+	char *driver_name;
+	char *board_name;
+	int nb_ai;
+	struct a4l_calibration_subdev_data *ai;
+	int nb_ao;
+	struct a4l_calibration_subdev_data *ao;
+};
+
+struct a4l_polynomial {
+	int expansion;
+	int order;
+	int nb_coeff;
+	double *coeff;
+};
+
+
+#endif /* _RTDM_UAPI_ANALOGY_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h
new file mode 100644
index 0000000..ab6cab1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/autotune.h
@@ -0,0 +1,40 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_AUTOTUNE_H
+#define _RTDM_UAPI_AUTOTUNE_H
+
+#include <linux/types.h>
+
+#define RTDM_CLASS_AUTOTUNE		RTDM_CLASS_MISC
+#define RTDM_SUBCLASS_AUTOTUNE		0
+
+struct autotune_setup {
+	__u32 period;
+	__u32 quiet;
+};
+
+#define AUTOTUNE_RTIOC_IRQ		_IOW(RTDM_CLASS_AUTOTUNE, 0, struct autotune_setup)
+#define AUTOTUNE_RTIOC_KERN		_IOW(RTDM_CLASS_AUTOTUNE, 1, struct autotune_setup)
+#define AUTOTUNE_RTIOC_USER		_IOW(RTDM_CLASS_AUTOTUNE, 2, struct autotune_setup)
+#define AUTOTUNE_RTIOC_PULSE		_IOW(RTDM_CLASS_AUTOTUNE, 3, __u64)
+#define AUTOTUNE_RTIOC_RUN		_IOR(RTDM_CLASS_AUTOTUNE, 4, __u32)
+#define AUTOTUNE_RTIOC_RESET		_IO(RTDM_CLASS_AUTOTUNE, 5)
+
+#endif /* !_RTDM_UAPI_AUTOTUNE_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h
new file mode 100644
index 0000000..8d0d837
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/can.h
@@ -0,0 +1,905 @@
+/**
+ * @file
+ * Real-Time Driver Model for RT-Socket-CAN, CAN device profile header
+ *
+ * @note Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * @note Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * This RTDM CAN device profile header is based on:
+ *
+ * include/linux/can.h, include/linux/socket.h, net/can/pf_can.h in
+ * linux-can.patch, a CAN socket framework for Linux
+ *
+ * Copyright (C) 2004, 2005,
+ * Robert Schwebel, Benedikt Spranger, Marc Kleine-Budde, Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_CAN_H
+#define _RTDM_UAPI_CAN_H
+
+/**
+ * @addtogroup rtdm_can
+ * @{
+ */
+
+#define RTCAN_PROFILE_VER  2
+
+#ifndef AF_CAN
+
+/** CAN address family */
+#define AF_CAN	29
+
+/** CAN protocol family */
+#define PF_CAN	AF_CAN
+
+#endif
+
+/** CAN socket levels
+ *
+ *  Used for @ref Sockopts for the particular protocols.
+ */
+#define SOL_CAN_RAW  103
+
+/** Type of CAN id (see @ref CAN_xxx_MASK and @ref CAN_xxx_FLAG) */
+typedef uint32_t can_id_t;
+typedef uint32_t canid_t;
+
+/** Type of CAN error mask */
+typedef can_id_t can_err_mask_t;
+
+/*!
+ * @anchor CAN_xxx_MASK @name CAN ID masks
+ * Bit masks for masking CAN IDs
+ * @{ */
+
+/** Bit mask for extended CAN IDs */
+#define CAN_EFF_MASK  0x1FFFFFFF
+
+/** Bit mask for standard CAN IDs */
+#define CAN_SFF_MASK  0x000007FF
+
+/** @} */
+
+/*!
+ * @anchor CAN_xxx_FLAG @name CAN ID flags
+ * Flags within a CAN ID indicating special CAN frame attributes
+ * @{ */
+/** Extended frame */
+#define CAN_EFF_FLAG  0x80000000
+/** Remote transmission frame */
+#define CAN_RTR_FLAG  0x40000000
+/** Error frame (see @ref Errors), not valid in struct can_filter */
+#define CAN_ERR_FLAG  0x20000000
+/** Invert CAN filter definition, only valid in struct can_filter */
+#define CAN_INV_FILTER CAN_ERR_FLAG
+
+/** @} */
+
+/*!
+ * @anchor CAN_PROTO @name Particular CAN protocols
+ * Possible protocols for the PF_CAN protocol family
+ *
+ * Currently only the RAW protocol is supported.
+ * @{ */
+/** Raw protocol of @c PF_CAN, applicable to socket type @c SOCK_RAW */
+#define CAN_RAW  1
+/** @} */
+
+#define CAN_BAUDRATE_UNKNOWN       ((uint32_t)-1)
+#define CAN_BAUDRATE_UNCONFIGURED  0
+
+/**
+ * Baudrate definition in bits per second
+ */
+typedef uint32_t can_baudrate_t;
+
+/**
+ * Supported CAN bit-time types
+ */
+enum CAN_BITTIME_TYPE {
+	/** Standard bit-time definition according to Bosch */
+	CAN_BITTIME_STD,
+	/** Hardware-specific BTR bit-time definition */
+	CAN_BITTIME_BTR
+};
+
+/**
+ * See @ref CAN_BITTIME_TYPE
+ */
+typedef enum CAN_BITTIME_TYPE can_bittime_type_t;
+
+/**
+ * Standard bit-time parameters according to Bosch
+ */
+struct can_bittime_std {
+	uint32_t brp;		/**< Baud rate prescaler */
+	uint8_t prop_seg;	/**< from 1 to 8 */
+	uint8_t phase_seg1;	/**< from 1 to 8 */
+	uint8_t phase_seg2;	/**< from 1 to 8 */
+	uint8_t sjw:7;		/**< from 1 to 4 */
+	uint8_t sam:1;		/**< 1 - enable triple sampling */
+};
+
+/**
+ * Hardware-specific BTR bit-times
+ */
+struct can_bittime_btr {
+
+	uint8_t btr0;		/**< Bus timing register 0 */
+	uint8_t btr1;		/**< Bus timing register 1 */
+};
+
+/**
+ * Custom CAN bit-time definition
+ */
+struct can_bittime {
+	/** Type of bit-time definition */
+	can_bittime_type_t type;
+
+	union {
+		/** Standard bit-time */
+		struct can_bittime_std std;
+		/** Hardware-spcific BTR bit-time */
+		struct can_bittime_btr btr;
+	};
+};
+
+/*!
+ * @anchor CAN_MODE @name CAN operation modes
+ * Modes into which CAN controllers can be set
+ * @{ */
+enum CAN_MODE {
+	/*! Set controller in Stop mode (no reception / transmission possible) */
+	CAN_MODE_STOP = 0,
+
+	/*! Set controller into normal operation. @n
+	 *  Coming from stopped mode or bus off, the controller begins with no
+	 *  errors in @ref CAN_STATE_ACTIVE. */
+	CAN_MODE_START,
+
+	/*! Set controller into Sleep mode. @n
+	 *  This is only possible if the controller is not stopped or bus-off. @n
+	 *  Notice that sleep mode will only be entered when there is no bus
+	 *  activity. If the controller detects bus activity while "sleeping"
+	 *  it will go into operating mode again. @n
+	 *  To actively leave sleep mode again trigger @c CAN_MODE_START. */
+	CAN_MODE_SLEEP
+};
+/** @} */
+
+/** See @ref CAN_MODE */
+typedef enum CAN_MODE can_mode_t;
+
+/*!
+ * @anchor CAN_CTRLMODE @name CAN controller modes
+ * Special CAN controllers modes, which can be or'ed together.
+ *
+ * @note These modes are hardware-dependent. Please consult the hardware
+ * manual of the CAN controller for more detailed information.
+ *
+ * @{ */
+
+/*! Listen-Only mode
+ *
+ *  In this mode the CAN controller would give no acknowledge to the CAN-bus,
+ *  even if a message is received successfully and messages would not be
+ *  transmitted. This mode might be useful for bus-monitoring, hot-plugging
+ *  or throughput analysis. */
+#define CAN_CTRLMODE_LISTENONLY 0x1
+
+/*! Loopback mode
+ *
+ * In this mode the CAN controller does an internal loop-back, a message is
+ * transmitted and simultaneously received. That mode can be used for self
+ * test operation. */
+#define CAN_CTRLMODE_LOOPBACK   0x2
+
+/*! Triple sampling mode
+ *
+ * In this mode the CAN controller uses Triple sampling. */
+#define CAN_CTRLMODE_3_SAMPLES  0x4
+
+/** @} */
+
+/** See @ref CAN_CTRLMODE */
+typedef int can_ctrlmode_t;
+
+/*!
+ * @anchor CAN_STATE @name CAN controller states
+ * States a CAN controller can be in.
+ * @{ */
+enum CAN_STATE {
+	/** CAN controller is error active */
+	CAN_STATE_ERROR_ACTIVE = 0,
+	/** CAN controller is active */
+	CAN_STATE_ACTIVE = 0,
+
+	/** CAN controller is error active, warning level is reached */
+	CAN_STATE_ERROR_WARNING = 1,
+	/** CAN controller is error active, warning level is reached */
+	CAN_STATE_BUS_WARNING = 1,
+
+	/** CAN controller is error passive */
+	CAN_STATE_ERROR_PASSIVE = 2,
+	/** CAN controller is error passive */
+	CAN_STATE_BUS_PASSIVE = 2,
+
+	/** CAN controller went into Bus Off */
+	CAN_STATE_BUS_OFF,
+
+	/** CAN controller is scanning to get the baudrate */
+	CAN_STATE_SCANNING_BAUDRATE,
+
+	/** CAN controller is in stopped mode */
+	CAN_STATE_STOPPED,
+
+	/** CAN controller is in Sleep mode */
+	CAN_STATE_SLEEPING,
+};
+/** @} */
+
+/** See @ref CAN_STATE */
+typedef enum CAN_STATE can_state_t;
+
+#define CAN_STATE_OPERATING(state) ((state) < CAN_STATE_BUS_OFF)
+
+/**
+ * Filter for reception of CAN messages.
+ *
+ * This filter works as follows:
+ * A received CAN ID is AND'ed bitwise with @c can_mask and then compared to
+ * @c can_id. This also includes the @ref CAN_EFF_FLAG and @ref CAN_RTR_FLAG
+ * of @ref CAN_xxx_FLAG. If this comparison is true, the message will be
+ * received by the socket. The logic can be inverted with the @c can_id flag
+ * @ref CAN_INV_FILTER :
+ *
+ * @code
+ * if (can_id & CAN_INV_FILTER) {
+ *    if ((received_can_id & can_mask) != (can_id & ~CAN_INV_FILTER))
+ *       accept-message;
+ * } else {
+ *    if ((received_can_id & can_mask) == can_id)
+ *       accept-message;
+ * }
+ * @endcode
+ *
+ * Multiple filters can be arranged in a filter list and set with
+ * @ref Sockopts. If one of these filters matches a CAN ID upon reception
+ * of a CAN frame, this frame is accepted.
+ *
+ */
+typedef struct can_filter {
+	/** CAN ID which must match with incoming IDs after passing the mask.
+	 *  The filter logic can be inverted with the flag @ref CAN_INV_FILTER. */
+	uint32_t can_id;
+
+	/** Mask which is applied to incoming IDs. See @ref CAN_xxx_MASK
+	 *  "CAN ID masks" if exactly one CAN ID should come through. */
+	uint32_t can_mask;
+} can_filter_t;
+
+/**
+ * Socket address structure for the CAN address family
+ */
+struct sockaddr_can {
+	/** CAN address family, must be @c AF_CAN */
+	sa_family_t can_family;
+
+	/** Interface index of CAN controller. See @ref SIOCGIFINDEX. */
+	int can_ifindex;
+};
+
+/**
+ * Raw CAN frame
+ *
+ * Central structure for receiving and sending CAN frames.
+ */
+typedef struct can_frame {
+	/** CAN ID of the frame
+	 *
+	 *  See @ref CAN_xxx_FLAG "CAN ID flags" for special bits.
+	 */
+	can_id_t can_id;
+
+	/** Size of the payload in bytes */
+	uint8_t can_dlc;
+
+	/** Payload data bytes */
+	uint8_t data[8] __attribute__ ((aligned(8)));
+} can_frame_t;
+
+/**
+ * CAN interface request descriptor
+ *
+ * Parameter block for submitting CAN control requests.
+ */
+struct can_ifreq {
+	union {
+		char	ifrn_name[IFNAMSIZ];
+	} ifr_ifrn;
+	
+	union {
+		struct can_bittime bittime;
+		can_baudrate_t baudrate;
+		can_ctrlmode_t ctrlmode;
+		can_mode_t mode;
+		can_state_t state;
+		int ifru_ivalue;
+	} ifr_ifru;
+};
+
+/*!
+ * @anchor RTCAN_TIMESTAMPS   @name Timestamp switches
+ * Arguments to pass to @ref RTCAN_RTIOC_TAKE_TIMESTAMP
+ * @{ */
+#define RTCAN_TAKE_NO_TIMESTAMPS	0  /**< Switch off taking timestamps */
+#define RTCAN_TAKE_TIMESTAMPS		1  /**< Do take timestamps */
+/** @} */
+
+#define RTIOC_TYPE_CAN  RTDM_CLASS_CAN
+
+/*!
+ * @anchor Rawsockopts @name RAW socket options
+ * Setting and getting CAN RAW socket options.
+ * @{ */
+
+/**
+ * CAN filter definition
+ *
+ * A CAN raw filter list with elements of struct can_filter can be installed
+ * with @c setsockopt. This list is used upon reception of CAN frames to
+ * decide whether the bound socket will receive a frame. An empty filter list
+ * can also be defined using optlen = 0, which is recommanded for write-only
+ * sockets.
+ * @n
+ * If the socket was already bound with @ref Bind, the old filter list
+ * gets replaced with the new one. Be aware that already received, but
+ * not read out CAN frames may stay in the socket buffer.
+ * @n
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_FILTER
+ *
+ * @param [in] optval Pointer to array of struct can_filter.
+ *
+ * @param [in] optlen Size of filter list: count * sizeof( struct can_filter).
+ * @n
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -ENOMEM (Not enough memory to fulfill the operation)
+ * - -EINVAL (Invalid length "optlen")
+ * - -ENOSPC (No space to store filter list, check RT-Socket-CAN kernel
+ *            parameters)
+ * .
+ */
+#define CAN_RAW_FILTER		0x1
+
+/**
+ * CAN error mask
+ *
+ * A CAN error mask (see @ref Errors) can be set with @c setsockopt. This
+ * mask is then used to decide if error frames are delivered to this socket
+ * in case of error condidtions. The error frames are marked with the
+ * @ref CAN_ERR_FLAG of @ref CAN_xxx_FLAG and must be handled by the
+ * application properly. A detailed description of the errors can be
+ * found in the @c can_id and the @c data fields of struct can_frame
+ * (see @ref Errors for futher details).
+ *
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_ERR_FILTER
+ *
+ * @param [in] optval Pointer to error mask of type can_err_mask_t.
+ *
+ * @param [in] optlen Size of error mask: sizeof(can_err_mask_t).
+ *
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -EINVAL (Invalid length "optlen")
+ * .
+ */
+#define CAN_RAW_ERR_FILTER	0x2
+
+/**
+ * CAN TX loopback
+ *
+ * The TX loopback to other local sockets can be selected with this
+ * @c setsockopt.
+ *
+ * @note The TX loopback feature must be enabled in the kernel and then
+ * the loopback to other local TX sockets is enabled by default.
+ *
+ * @n
+ * @param [in] level @b SOL_CAN_RAW
+ *
+ * @param [in] optname @b CAN_RAW_LOOPBACK
+ *
+ * @param [in] optval Pointer to integer value.
+ *
+ * @param [in] optlen Size of int: sizeof(int).
+ *
+ * @coretags{task-unrestricted}
+ * @n
+ * Specific return values:
+ * - -EFAULT (It was not possible to access user space memory area at the
+ *            specified address.)
+ * - -EINVAL (Invalid length "optlen")
+ * - -EOPNOTSUPP (not supported, check RT-Socket-CAN kernel parameters).
+ */
+#define CAN_RAW_LOOPBACK	0x3
+
+/**
+ * CAN receive own messages
+ *
+ * Not supported by RT-Socket-CAN, but defined for compatibility with
+ * Socket-CAN.
+ */
+#define CAN_RAW_RECV_OWN_MSGS   0x4
+
+/** @} */
+
+/*!
+ * @anchor CANIOCTLs @name IOCTLs
+ * CAN device IOCTLs
+ *
+ * @deprecated Passing \c struct \c ifreq as a request descriptor
+ * for CAN IOCTLs is still accepted for backward compatibility,
+ * however it is recommended to switch to \c struct \c can_ifreq at
+ * the first opportunity.
+ *
+ * @{ */
+
+/**
+ * Get CAN interface index by name
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                     (<TT>struct can_ifreq</TT>). If
+ *                     <TT>ifr_name</TT> holds a valid CAN interface
+ *                     name <TT>ifr_ifindex</TT> will be filled with
+ *                     the corresponding interface index.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ *
+ * @coretags{task-unrestricted}
+ */
+#ifdef DOXYGEN_CPP /* For Doxygen only, already defined by kernel headers */
+#define SIOCGIFINDEX defined_by_kernel_header_file
+#endif
+
+/**
+ * Set baud rate
+ *
+ * The baudrate must be specified in bits per second. The driver will
+ * try to calculate resonable CAN bit-timing parameters. You can use
+ * @ref SIOCSCANCUSTOMBITTIME to set custom bit-timing.
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_baudrate_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EDOM  : Baud rate not possible.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting the baud rate is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANBAUDRATE	_IOW(RTIOC_TYPE_CAN, 0x01, struct can_ifreq)
+
+/**
+ * Get baud rate
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    @ref can_baudrate_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define SIOCGCANBAUDRATE	_IOWR(RTIOC_TYPE_CAN, 0x02, struct can_ifreq)
+
+/**
+ * Set custom bit time parameter
+ *
+ * Custem-bit time could be defined in various formats (see
+ * struct can_bittime).
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 struct can_bittime.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting the bit-time is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANCUSTOMBITTIME	_IOW(RTIOC_TYPE_CAN, 0x03, struct can_ifreq)
+
+/**
+ * Get custom bit-time parameters
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    struct can_bittime.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define SIOCGCANCUSTOMBITTIME	_IOWR(RTIOC_TYPE_CAN, 0x04, struct can_ifreq)
+
+/**
+ * Set operation mode of CAN controller
+ *
+ * See @ref CAN_MODE "CAN controller modes" for available modes.
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_mode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EAGAIN: (@ref CAN_MODE_START, @ref CAN_MODE_STOP) Could not successfully
+ *            set mode, hardware is busy. Try again.
+ * - -EINVAL: (@ref CAN_MODE_START) Cannot start controller,
+ *            set baud rate first.
+ * - -ENETDOWN: (@ref CAN_MODE_SLEEP) Cannot go into sleep mode because
+		controller is stopped or bus off.
+ * - -EOPNOTSUPP: unknown mode
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting a CAN controller into normal operation after a bus-off can
+ * take some time (128 occurrences of 11 consecutive recessive bits).
+ * In such a case, although this IOCTL will return immediately with success
+ * and @ref SIOCGCANSTATE will report @ref CAN_STATE_ACTIVE,
+ * bus-off recovery may still be in progress. @n
+ * If a controller is bus-off, setting it into stop mode will return no error
+ * but the controller remains bus-off.
+ */
+#define SIOCSCANMODE		_IOW(RTIOC_TYPE_CAN, 0x05, struct can_ifreq)
+
+/**
+ * Get current state of CAN controller
+ *
+ * States are divided into main states and additional error indicators. A CAN
+ * controller is always in exactly one main state. CAN bus errors are
+ * registered by the CAN hardware and collected by the driver. There is one
+ * error indicator (bit) per error type. If this IOCTL is triggered the error
+ * types which occured since the last call of this IOCTL are reported and
+ * thereafter the error indicators are cleared. See also
+ * @ref CAN_STATE "CAN controller states".
+ *
+ * @param [in,out] arg Pointer to interface request structure buffer
+ *                    (<TT>struct can_ifreq</TT>).
+ *                    <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                    <TT>ifr_ifru</TT> will be filled with an instance of
+ *                    @ref can_mode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+#define SIOCGCANSTATE		_IOWR(RTIOC_TYPE_CAN, 0x06, struct can_ifreq)
+
+/**
+ * Set special controller modes
+ *
+ * Various special controller modes could be or'ed together (see
+ * @ref CAN_CTRLMODE for further information).
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_ctrlmode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No valid baud rate, see @ref can_baudrate_t.
+ * - -EAGAIN: Request could not be successully fulfilled. Try again.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note Setting special controller modes is a configuration task. It should
+ * be done deliberately or otherwise CAN messages will likely be lost.
+ */
+#define SIOCSCANCTRLMODE	_IOW(RTIOC_TYPE_CAN, 0x07, struct can_ifreq)
+
+/**
+ * Get special controller modes
+ *
+ *
+ * @param [in] arg Pointer to interface request structure buffer
+ *                 (<TT>struct can_ifreq</TT>).
+ *                 <TT>ifr_name</TT> must hold a valid CAN interface name,
+ *                 <TT>ifr_ifru</TT> must be filled with an instance of
+ *                 @ref can_ctrlmode_t.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ * - -ENODEV: No device with specified name exists.
+ * - -EINVAL: No baud rate was set yet.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+#define SIOCGCANCTRLMODE	_IOWR(RTIOC_TYPE_CAN, 0x08, struct can_ifreq)
+
+/**
+ * Enable or disable storing a high precision timestamp upon reception of
+ * a CAN frame.
+ *
+ * A newly created socket takes no timestamps by default.
+ *
+ * @param [in] arg int variable, see @ref RTCAN_TIMESTAMPS "Timestamp switches"
+ *
+ * @return 0 on success.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Activating taking timestamps only has an effect on newly received
+ * CAN messages from the bus. Frames that already are in the socket buffer do
+ * not have timestamps if it was deactivated before. See @ref Recv "Receive"
+ * for more details.
+ */
+#define RTCAN_RTIOC_TAKE_TIMESTAMP _IOW(RTIOC_TYPE_CAN, 0x09, int)
+
+/**
+ * Specify a reception timeout for a socket
+ *
+ * Defines a timeout for all receive operations via a
+ * socket which will take effect when one of the @ref Recv "receive functions"
+ * is called without the @c MSG_DONTWAIT flag set.
+ *
+ * The default value for a newly created socket is an infinite timeout.
+ *
+ * @note The setting of the timeout value is not done atomically to avoid
+ * locks. Please set the value before receiving messages from the socket.
+ *
+ * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is
+ *                interpreted as relative timeout in nanoseconds in case
+ *                of a positive value.
+ *                See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTCAN_RTIOC_RCV_TIMEOUT	_IOW(RTIOC_TYPE_CAN, 0x0A, nanosecs_rel_t)
+
+/**
+ * Specify a transmission timeout for a socket
+ *
+ * Defines a timeout for all send operations via a
+ * socket which will take effect when one of the @ref Send "send functions"
+ * is called without the @c MSG_DONTWAIT flag set.
+ *
+ * The default value for a newly created socket is an infinite timeout.
+ *
+ * @note The setting of the timeout value is not done atomically to avoid
+ * locks. Please set the value before sending messages to the socket.
+ *
+ * @param [in] arg Pointer to @ref nanosecs_rel_t variable. The value is
+ *                interpreted as relative timeout in nanoseconds in case
+ *                of a positive value.
+ *                See @ref RTDM_TIMEOUT_xxx "Timeouts" for special timeouts.
+ *
+ * @return 0 on success, otherwise:
+ * - -EFAULT: It was not possible to access user space memory area at the
+ *            specified address.
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTCAN_RTIOC_SND_TIMEOUT	_IOW(RTIOC_TYPE_CAN, 0x0B, nanosecs_rel_t)
+/** @} */
+
+#define CAN_ERR_DLC  8	/* dlc for error frames */
+
+/*!
+ * @anchor Errors @name Error mask
+ * Error class (mask) in @c can_id field of struct can_frame to
+ * be used with @ref CAN_RAW_ERR_FILTER.
+ *
+ * @b Note: Error reporting is hardware dependent and most CAN controllers
+ * report less detailed error conditions than the SJA1000.
+ *
+ * @b Note: In case of a bus-off error condition (@ref CAN_ERR_BUSOFF), the
+ * CAN controller is @b not restarted automatically. It is the application's
+ * responsibility to react appropriately, e.g. calling @ref CAN_MODE_START.
+ *
+ * @b Note: Bus error interrupts (@ref CAN_ERR_BUSERROR) are enabled when an
+ * application is calling a @ref Recv function on a socket listening
+ * on bus errors (using @ref CAN_RAW_ERR_FILTER). After one bus error has
+ * occured, the interrupt will be disabled to allow the application time for
+ * error processing and to efficiently avoid bus error interrupt flooding.
+ * @{ */
+
+/** TX timeout (netdevice driver) */
+#define CAN_ERR_TX_TIMEOUT	0x00000001U
+
+/** Lost arbitration (see @ref Error0 "data[0]") */
+#define CAN_ERR_LOSTARB		0x00000002U
+
+/** Controller problems (see @ref Error1 "data[1]") */
+#define CAN_ERR_CRTL		0x00000004U
+
+/** Protocol violations (see @ref Error2 "data[2]",
+			     @ref Error3 "data[3]") */
+#define CAN_ERR_PROT		0x00000008U
+
+/** Transceiver status (see @ref Error4 "data[4]")    */
+#define CAN_ERR_TRX		0x00000010U
+
+/** Received no ACK on transmission */
+#define CAN_ERR_ACK		0x00000020U
+
+/** Bus off */
+#define CAN_ERR_BUSOFF		0x00000040U
+
+/** Bus error (may flood!) */
+#define CAN_ERR_BUSERROR	0x00000080U
+
+/** Controller restarted */
+#define CAN_ERR_RESTARTED	0x00000100U
+
+/** Omit EFF, RTR, ERR flags */
+#define CAN_ERR_MASK		0x1FFFFFFFU
+
+/** @} */
+
+/*!
+ * @anchor Error0 @name Arbitration lost error
+ * Error in the data[0] field of struct can_frame.
+ * @{ */
+/* arbitration lost in bit ... / data[0] */
+#define CAN_ERR_LOSTARB_UNSPEC	0x00 /**< unspecified */
+				     /**< else bit number in bitstream */
+/** @} */
+
+/*!
+ * @anchor Error1 @name Controller problems
+ * Error in the data[1] field of struct can_frame.
+ * @{ */
+/* error status of CAN-controller / data[1] */
+#define CAN_ERR_CRTL_UNSPEC	 0x00 /**< unspecified */
+#define CAN_ERR_CRTL_RX_OVERFLOW 0x01 /**< RX buffer overflow */
+#define CAN_ERR_CRTL_TX_OVERFLOW 0x02 /**< TX buffer overflow */
+#define CAN_ERR_CRTL_RX_WARNING	 0x04 /**< reached warning level for RX errors */
+#define CAN_ERR_CRTL_TX_WARNING	 0x08 /**< reached warning level for TX errors */
+#define CAN_ERR_CRTL_RX_PASSIVE	 0x10 /**< reached passive level for RX errors */
+#define CAN_ERR_CRTL_TX_PASSIVE	 0x20 /**< reached passive level for TX errors */
+/** @} */
+
+/*!
+ * @anchor Error2 @name Protocol error type
+ * Error in the data[2] field of struct can_frame.
+ * @{ */
+/* error in CAN protocol (type) / data[2] */
+#define CAN_ERR_PROT_UNSPEC	0x00 /**< unspecified */
+#define CAN_ERR_PROT_BIT	0x01 /**< single bit error */
+#define CAN_ERR_PROT_FORM	0x02 /**< frame format error */
+#define CAN_ERR_PROT_STUFF	0x04 /**< bit stuffing error */
+#define CAN_ERR_PROT_BIT0	0x08 /**< unable to send dominant bit */
+#define CAN_ERR_PROT_BIT1	0x10 /**< unable to send recessive bit */
+#define CAN_ERR_PROT_OVERLOAD	0x20 /**< bus overload */
+#define CAN_ERR_PROT_ACTIVE	0x40 /**< active error announcement */
+#define CAN_ERR_PROT_TX		0x80 /**< error occured on transmission */
+/** @} */
+
+/*!
+ * @anchor Error3 @name Protocol error location
+ * Error in the data[3] field of struct can_frame.
+ * @{ */
+/* error in CAN protocol (location) / data[3] */
+#define CAN_ERR_PROT_LOC_UNSPEC	 0x00 /**< unspecified */
+#define CAN_ERR_PROT_LOC_SOF	 0x03 /**< start of frame */
+#define CAN_ERR_PROT_LOC_ID28_21 0x02 /**< ID bits 28 - 21 (SFF: 10 - 3) */
+#define CAN_ERR_PROT_LOC_ID20_18 0x06 /**< ID bits 20 - 18 (SFF: 2 - 0 )*/
+#define CAN_ERR_PROT_LOC_SRTR	 0x04 /**< substitute RTR (SFF: RTR) */
+#define CAN_ERR_PROT_LOC_IDE	 0x05 /**< identifier extension */
+#define CAN_ERR_PROT_LOC_ID17_13 0x07 /**< ID bits 17-13 */
+#define CAN_ERR_PROT_LOC_ID12_05 0x0F /**< ID bits 12-5 */
+#define CAN_ERR_PROT_LOC_ID04_00 0x0E /**< ID bits 4-0 */
+#define CAN_ERR_PROT_LOC_RTR	 0x0C /**< RTR */
+#define CAN_ERR_PROT_LOC_RES1	 0x0D /**< reserved bit 1 */
+#define CAN_ERR_PROT_LOC_RES0	 0x09 /**< reserved bit 0 */
+#define CAN_ERR_PROT_LOC_DLC	 0x0B /**< data length code */
+#define CAN_ERR_PROT_LOC_DATA	 0x0A /**< data section */
+#define CAN_ERR_PROT_LOC_CRC_SEQ 0x08 /**< CRC sequence */
+#define CAN_ERR_PROT_LOC_CRC_DEL 0x18 /**< CRC delimiter */
+#define CAN_ERR_PROT_LOC_ACK	 0x19 /**< ACK slot */
+#define CAN_ERR_PROT_LOC_ACK_DEL 0x1B /**< ACK delimiter */
+#define CAN_ERR_PROT_LOC_EOF	 0x1A /**< end of frame */
+#define CAN_ERR_PROT_LOC_INTERM	 0x12 /**< intermission */
+/** @} */
+
+/*!
+ * @anchor Error4 @name Protocol error location
+ * Error in the data[4] field of struct can_frame.
+ * @{ */
+/* error status of CAN-transceiver / data[4] */
+/*                                               CANH CANL */
+#define CAN_ERR_TRX_UNSPEC		0x00 /**< 0000 0000 */
+#define CAN_ERR_TRX_CANH_NO_WIRE	0x04 /**< 0000 0100 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_BAT	0x05 /**< 0000 0101 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_VCC	0x06 /**< 0000 0110 */
+#define CAN_ERR_TRX_CANH_SHORT_TO_GND	0x07 /**< 0000 0111 */
+#define CAN_ERR_TRX_CANL_NO_WIRE	0x40 /**< 0100 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_BAT	0x50 /**< 0101 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_VCC	0x60 /**< 0110 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_GND	0x70 /**< 0111 0000 */
+#define CAN_ERR_TRX_CANL_SHORT_TO_CANH	0x80 /**< 1000 0000 */
+/** @} */
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_CAN_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h
new file mode 100644
index 0000000..82612a5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpio.h
@@ -0,0 +1,43 @@
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_GPIO_H
+#define _RTDM_UAPI_GPIO_H
+
+struct rtdm_gpio_readout {
+	nanosecs_abs_t timestamp;
+	__s32 value;
+};
+
+#define GPIO_RTIOC_DIR_OUT	_IOW(RTDM_CLASS_GPIO, 0, int)
+#define GPIO_RTIOC_DIR_IN	_IO(RTDM_CLASS_GPIO, 1)
+#define GPIO_RTIOC_IRQEN	_IOW(RTDM_CLASS_GPIO, 2, int) /* GPIO trigger */
+#define GPIO_RTIOC_IRQDIS	_IO(RTDM_CLASS_GPIO, 3)
+#define GPIO_RTIOC_REQS		_IO(RTDM_CLASS_GPIO, 4)
+#define GPIO_RTIOC_RELS		_IO(RTDM_CLASS_GPIO, 5)
+#define GPIO_RTIOC_TS_MONO	_IOR(RTDM_CLASS_GPIO, 7, int)
+#define GPIO_RTIOC_TS_REAL	_IOR(RTDM_CLASS_GPIO, 8, int)
+#define GPIO_RTIOC_TS		GPIO_RTIOC_TS_REAL
+
+#define GPIO_TRIGGER_NONE		0x0 /* unspecified */
+#define GPIO_TRIGGER_EDGE_RISING	0x1
+#define GPIO_TRIGGER_EDGE_FALLING	0x2
+#define GPIO_TRIGGER_LEVEL_HIGH		0x4
+#define GPIO_TRIGGER_LEVEL_LOW		0x8
+#define GPIO_TRIGGER_MASK		0xf
+
+#endif /* !_RTDM_UAPI_GPIO_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h
new file mode 100644
index 0000000..512c89c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/gpiopwm.h
@@ -0,0 +1,56 @@
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, pwm header
+ *
+ * @note Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rttesting
+ */
+#ifndef _RTDM_UAPI_PWM_H
+#define _RTDM_UAPI_PWM_H
+
+#include <linux/types.h>
+
+#define RTPWM_PROFILE_VER			1
+
+struct gpiopwm {
+	unsigned int duty_cycle;
+	unsigned int range_min;
+	unsigned int range_max;
+	unsigned int period;
+	unsigned int gpio;
+};
+
+#define RTIOC_TYPE_PWM		RTDM_CLASS_PWM
+
+#define GPIOPWM_RTIOC_SET_CONFIG \
+	_IOW(RTIOC_TYPE_PWM, 0x00, struct gpiopwm)
+
+#define GPIOPWM_RTIOC_GET_CONFIG \
+	_IOR(RTIOC_TYPE_PWM, 0x10, struct gpiopwm)
+
+#define GPIOPWM_RTIOC_START \
+	_IO(RTIOC_TYPE_PWM, 0x20)
+
+#define GPIOPWM_RTIOC_STOP \
+	_IO(RTIOC_TYPE_PWM, 0x30)
+
+#define GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE \
+	_IOW(RTIOC_TYPE_PWM, 0x40, unsigned int)
+
+
+#endif /* !_RTDM_UAPI_TESTING_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h
new file mode 100644
index 0000000..432cd9b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/ipc.h
@@ -0,0 +1,881 @@
+/**
+ * @file
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _RTDM_UAPI_IPC_H
+#define _RTDM_UAPI_IPC_H
+
+/**
+ * @ingroup rtdm_profiles
+ * @defgroup rtdm_ipc Real-time IPC
+ *
+ * @b Profile @b Revision: 1
+ * @n
+ * @n
+ * @par Device Characteristics
+ * @n
+ * @ref rtdm_driver_flags "Device Flags": @c RTDM_PROTOCOL_DEVICE @n
+ * @n
+ * @ref rtdm_driver.protocol_family "Protocol Family": @c PF_RTIPC @n
+ * @n
+ * @ref rtdm_driver.socket_type "Socket Type": @c SOCK_DGRAM @n
+ * @n
+ * @ref rtdm_driver_profile "Device Class": @c RTDM_CLASS_RTIPC @n
+ * @n
+ * @{
+ *
+ * @anchor rtipc_operations @name Supported operations
+ * Standard socket operations supported by the RTIPC protocols.
+ * @{
+ */
+
+/** Create an endpoint for communication in the AF_RTIPC domain.
+ *
+ * @param[in] domain The communication domain. Must be AF_RTIPC.
+ *
+ * @param[in] type The socket type. Must be SOCK_DGRAM.
+ *
+ * @param [in] protocol Any of @ref IPCPROTO_XDDP, @ref IPCPROTO_IDDP,
+ * or @ref IPCPROTO_BUFP. @ref IPCPROTO_IPC is also valid, and refers
+ * to the default RTIPC protocol, namely @ref IPCPROTO_IDDP.
+ *
+ * @return In addition to the standard error codes for @c socket(2),
+ * the following specific error code may be returned:
+ * - -ENOPROTOOPT (Protocol is known, but not compiled in the RTIPC driver).
+ *   See @ref RTIPC_PROTO "RTIPC protocols"
+ *   for available protocols.
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int socket__AF_RTIPC(int domain =AF_RTIPC, int type =SOCK_DGRAM, int protocol);
+#endif
+
+/**
+ * Close a RTIPC socket descriptor.
+ *
+ * Blocking calls to any of the @ref sendmsg__AF_RTIPC "sendmsg" or @ref
+ * recvmsg__AF_RTIPC "recvmsg" functions will be unblocked when the socket
+ * is closed and return with an error.
+ *
+ * @param[in] sockfd The socket descriptor to close.
+ *
+ * @return In addition to the standard error codes for @c close(2),
+ * the following specific error code may be returned:
+ * none
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int close__AF_RTIPC(int sockfd);
+#endif
+
+/**
+ * Bind a RTIPC socket to a port.
+ *
+ * Bind the socket to a destination port.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param [in] addr The address to bind the socket to (see struct
+ * sockaddr_ipc). The meaning of such address depends on the RTIPC
+ * protocol in use for the socket:
+ *
+ * - IPCPROTO_XDDP
+ *
+ *   This action creates an endpoint for channelling traffic between
+ *   the Xenomai and Linux domains.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and
+ *   CONFIG_XENO_OPT_PIPE_NRDEV-1.
+ *
+ *   If @em sipc_port is -1, a free port will be assigned automatically.
+ *
+ *   Upon success, the pseudo-device /dev/rtp@em N will be reserved
+ *   for this communication channel, where @em N is the assigned port
+ *   number. The non real-time side shall open this device to exchange
+ *   data over the bound socket.
+ *
+ * @anchor xddp_label_binding
+ *   If a label was assigned (see @ref XDDP_LABEL) prior to
+ *   binding the socket to a port, a registry link referring to the
+ *   created pseudo-device will be automatically set up as
+ *   @c /proc/xenomai/registry/rtipc/xddp/@em label, where @em label is the
+ *   label string passed to setsockopt() for the @ref XDDP_LABEL option.
+ *
+ * - IPCPROTO_IDDP
+ *
+ *   This action creates an endpoint for exchanging datagrams within
+ *   the Xenomai domain.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and
+ *   CONFIG_XENO_OPT_IDDP_NRPORT-1.
+ *
+ *   If @em sipc_port is -1, a free port will be assigned
+ *   automatically. The real-time peer shall connect to the same port
+ *   for exchanging data over the bound socket.
+ *
+ * @anchor iddp_label_binding
+ *   If a label was assigned (see @ref IDDP_LABEL) prior to binding
+ *   the socket to a port, a registry link referring to the assigned
+ *   port number will be automatically set up as @c
+ *   /proc/xenomai/registry/rtipc/iddp/@em label, where @em label is
+ *   the label string passed to setsockopt() for the @ref IDDP_LABEL
+ *   option.
+ *
+ * - IPCPROTO_BUFP
+ *
+ *   This action creates an endpoint for a one-way byte
+ *   stream within the Xenomai domain.
+ *
+ *   @em sipc_family must be AF_RTIPC, @em sipc_port is either -1,
+ *   or a valid free port number between 0 and CONFIG_XENO_OPT_BUFP_NRPORT-1.
+ *
+ *   If @em sipc_port is -1, an available port will be assigned
+ *   automatically. The real-time peer shall connect to the same port
+ *   for exchanging data over the bound socket.
+ *
+ * @anchor bufp_label_binding
+ *   If a label was assigned (see @ref BUFP_LABEL) prior to binding
+ *   the socket to a port, a registry link referring to the assigned
+ *   port number will be automatically set up as @c
+ *   /proc/xenomai/registry/rtipc/bufp/@em label, where @em label is
+ *   the label string passed to setsockopt() for the @a BUFP_LABEL
+ *   option.
+ *
+ * @param[in] addrlen The size in bytes of the structure pointed to by
+ * @a addr.
+ *
+ * @return In addition to the standard error codes for @c
+ * bind(2), the following specific error code may be returned:
+ *   - -EFAULT (Invalid data address given)
+ *   - -ENOMEM (Not enough memory)
+ *   - -EINVAL (Invalid parameter)
+ *   - -EADDRINUSE (Socket already bound to a port, or no port available)
+ *   - -EAGAIN (no registry slot available, check/raise
+ *     CONFIG_XENO_OPT_REGISTRY_NRSLOTS) .
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int bind__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr,
+		   socklen_t addrlen);
+#endif
+
+/**
+ * Initiate a connection on a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param [in] addr The address to connect the socket to (see struct
+ * sockaddr_ipc).
+ *
+ * - If sipc_port is a valid port for the protocol, it is used
+ * verbatim and the connection succeeds immediately, regardless of
+ * whether the destination is bound at the time of the call.
+ *
+ * - If sipc_port is -1 and a label was assigned to the socket,
+ * connect() blocks for the requested amount of time (see @ref
+ * SO_RCVTIMEO) until a socket is bound to the same label via @c
+ * bind(2) (see @ref XDDP_LABEL, @ref IDDP_LABEL, @ref BUFP_LABEL), in
+ * which case a connection is established between both endpoints.
+ *
+ * - If sipc_port is -1 and no label was assigned to the socket, the
+ * default destination address is cleared, meaning that any subsequent
+ * write to the socket will return -EDESTADDRREQ, until a valid
+ * destination address is set via @c connect(2) or @c bind(2).
+ *
+ * @param[in] addrlen The size in bytes of the structure pointed to by
+ * @a addr.
+ *
+ * @return In addition to the standard error codes for @c connect(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int connect__AF_RTIPC(int sockfd, const struct sockaddr_ipc *addr,
+		      socklen_t addrlen);
+#endif
+
+/**
+ * Set options on RTIPC sockets.
+ *
+ * These functions allow to set various socket options.
+ * Supported Levels and Options:
+ *
+ * - Level @ref sockopts_socket "SOL_SOCKET"
+ * - Level @ref sockopts_xddp "SOL_XDDP"
+ * - Level @ref sockopts_iddp "SOL_IDDP"
+ * - Level @ref sockopts_bufp "SOL_BUFP"
+ * .
+ *
+ * @return In addition to the standard error codes for @c
+ * setsockopt(2), the following specific error code may
+ * be returned:
+ * follow the option links above.
+ *
+ * @par Calling context:
+ * non-RT
+ */
+#ifdef DOXYGEN_CPP
+int setsockopt__AF_RTIPC(int sockfd, int level, int optname,
+			 const void *optval, socklen_t optlen);
+#endif
+/**
+ * Get options on RTIPC sockets.
+ *
+ * These functions allow to get various socket options.
+ * Supported Levels and Options:
+ *
+ * - Level @ref sockopts_socket "SOL_SOCKET"
+ * - Level @ref sockopts_xddp "SOL_XDDP"
+ * - Level @ref sockopts_iddp "SOL_IDDP"
+ * - Level @ref sockopts_bufp "SOL_BUFP"
+ * .
+ *
+ * @return In addition to the standard error codes for @c
+ * getsockopt(2), the following specific error code may
+ * be returned:
+ * follow the option links above.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getsockopt__AF_RTIPC(int sockfd, int level, int optname,
+			 void *optval, socklen_t *optlen);
+#endif
+
+/**
+ * Send a message on a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param[in] msg The address of the message header conveying the
+ * datagram.
+ *
+ * @param [in] flags Operation flags:
+ *
+ * - MSG_OOB Send out-of-band message.  For all RTIPC protocols except
+ *   @ref IPCPROTO_BUFP, sending out-of-band data actually means
+ *   pushing them to the head of the receiving queue, so that the
+ *   reader will always receive them before normal messages. @ref
+ *   IPCPROTO_BUFP does not support out-of-band sending.
+ *
+ * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be
+ *   blocked whenever the message cannot be sent immediately at the
+ *   time of the call (e.g. memory shortage), but will rather return
+ *   with -EWOULDBLOCK. Unlike other RTIPC protocols, @ref
+ *   IPCPROTO_XDDP accepts but never considers MSG_DONTWAIT since
+ *   writing to a real-time XDDP endpoint is inherently a non-blocking
+ *   operation.
+ *
+ * - MSG_MORE Accumulate data before sending. This flag is accepted by
+ *   the @ref IPCPROTO_XDDP protocol only, and tells the send service
+ *   to accumulate the outgoing data into an internal streaming
+ *   buffer, instead of issuing a datagram immediately for it. See
+ *   @ref XDDP_BUFSZ for more.
+ *
+ * @note No RTIPC protocol allows for short writes, and only complete
+ * messages are sent to the peer.
+ *
+ * @return In addition to the standard error codes for @c sendmsg(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT
+ */
+#ifdef DOXYGEN_CPP
+ssize_t sendmsg__AF_RTIPC(int sockfd, const struct msghdr *msg, int flags);
+#endif
+
+/**
+ * Receive a message from a RTIPC socket.
+ *
+ * @param[in] sockfd The RTDM file descriptor obtained from the socket
+ * creation call.
+ *
+ * @param[out] msg The address the message header will be copied at.
+ *
+ * @param [in] flags Operation flags:
+ *
+ * - MSG_DONTWAIT Non-blocking I/O operation. The caller will not be
+ *   blocked whenever no message is immediately available for receipt
+ *   at the time of the call, but will rather return with
+ *   -EWOULDBLOCK.
+ *
+ * @note @ref IPCPROTO_BUFP does not allow for short reads and always
+ * returns the requested amount of bytes, except in one situation:
+ * whenever some writer is waiting for sending data upon a buffer full
+ * condition, while the caller would have to wait for receiving a
+ * complete message.  This is usually the sign of a pathological use
+ * of the BUFP socket, like defining an incorrect buffer size via @ref
+ * BUFP_BUFSZ. In that case, a short read is allowed to prevent a
+ * deadlock.
+ *
+ * @return In addition to the standard error codes for @c recvmsg(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT
+ */
+#ifdef DOXYGEN_CPP
+ssize_t recvmsg__AF_RTIPC(int sockfd, struct msghdr *msg, int flags);
+#endif
+
+/**
+ * Get socket name.
+ *
+ * The name of the local endpoint for the socket is copied back (see
+ * struct sockaddr_ipc).
+ *
+ * @return In addition to the standard error codes for @c getsockname(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getsockname__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen);
+#endif
+
+/**
+ * Get socket peer.
+ *
+ * The name of the remote endpoint for the socket is copied back (see
+ * struct sockaddr_ipc). This is the default destination address for
+ * messages sent on the socket. It can be set either explicitly via @c
+ * connect(2), or implicitly via @c bind(2) if no @c connect(2) was
+ * called prior to binding the socket to a port, in which case both
+ * the local and remote names are equal.
+ *
+ * @return In addition to the standard error codes for @c getpeername(2),
+ * the following specific error code may be returned:
+ * none.
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#ifdef DOXYGEN_CPP
+int getpeername__AF_RTIPC(int sockfd, struct sockaddr_ipc *addr, socklen_t *addrlen);
+#endif
+
+/** @} */
+
+#include <cobalt/uapi/kernel/types.h>
+#include <cobalt/uapi/kernel/pipe.h>
+#include <rtdm/rtdm.h>
+
+/* Address family */
+#define AF_RTIPC		111
+
+/* Protocol family */
+#define PF_RTIPC		AF_RTIPC
+
+/**
+ * @anchor RTIPC_PROTO @name RTIPC protocol list
+ * protocols for the PF_RTIPC protocol family
+ *
+ * @{ */
+enum {
+/** Default protocol (IDDP) */
+	IPCPROTO_IPC  = 0,
+/**
+ * Cross-domain datagram protocol (RT <-> non-RT).
+ *
+ * Real-time Xenomai threads and regular Linux threads may want to
+ * exchange data in a way that does not require the former to leave
+ * the real-time domain (i.e. primary mode). The RTDM-based XDDP
+ * protocol is available for this purpose.
+ *
+ * On the Linux domain side, pseudo-device files named /dev/rtp@em \<minor\>
+ * give regular POSIX threads access to non real-time communication
+ * endpoints, via the standard character-based I/O interface. On the
+ * Xenomai domain side, sockets may be bound to XDDP ports, which act
+ * as proxies to send and receive data to/from the associated
+ * pseudo-device files. Ports and pseudo-device minor numbers are
+ * paired, meaning that e.g. socket port 7 will proxy the traffic to/from
+ * /dev/rtp7.
+ *
+ * All data sent through a bound/connected XDDP socket via @c
+ * sendto(2) or @c write(2) will be passed to the peer endpoint in the
+ * Linux domain, and made available for reading via the standard @c
+ * read(2) system call. Conversely, all data sent using @c write(2)
+ * through the non real-time endpoint will be conveyed to the
+ * real-time socket endpoint, and made available to the @c recvfrom(2)
+ * or @c read(2) system calls.
+ */
+	IPCPROTO_XDDP = 1,
+/**
+ * Intra-domain datagram protocol (RT <-> RT).
+ *
+ * The RTDM-based IDDP protocol enables real-time threads to exchange
+ * datagrams within the Xenomai domain, via socket endpoints.
+ */
+	IPCPROTO_IDDP = 2,
+/**
+ * Buffer protocol (RT <-> RT, byte-oriented).
+ *
+ * The RTDM-based BUFP protocol implements a lightweight,
+ * byte-oriented, one-way Producer-Consumer data path. All messages
+ * written are buffered into a single memory area in strict FIFO
+ * order, until read by the consumer.
+ *
+ * This protocol always prevents short writes, and only allows short
+ * reads when a potential deadlock situation arises (i.e. readers and
+ * writers waiting for each other indefinitely).
+ */
+	IPCPROTO_BUFP = 3,
+	IPCPROTO_MAX
+};
+/** @} */
+
+/**
+ * Port number type for the RTIPC address family.
+ */
+typedef int16_t rtipc_port_t;
+
+/**
+ * Port label information structure.
+ */
+struct rtipc_port_label {
+	/** Port label string, null-terminated. */
+	char label[XNOBJECT_NAME_LEN];
+};
+
+/**
+ * Socket address structure for the RTIPC address family.
+ */
+struct sockaddr_ipc {
+	/** RTIPC address family, must be @c AF_RTIPC */
+	sa_family_t sipc_family;
+	/** Port number. */
+	rtipc_port_t sipc_port;
+};
+
+#define SOL_XDDP		311
+/**
+ * @anchor sockopts_xddp @name XDDP socket options
+ * Setting and getting XDDP socket options.
+ * @{ */
+/**
+ * XDDP label assignment
+ *
+ * ASCII label strings can be attached to XDDP ports, so that opening
+ * the non-RT endpoint can be done by specifying this symbolic device
+ * name rather than referring to a raw pseudo-device entry
+ * (i.e. /dev/rtp@em N).
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref xddp_label_binding
+ * "XDDP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_LABEL		1
+/**
+ * XDDP local pool size configuration
+ *
+ * By default, the memory needed to convey the data is pulled from
+ * Xenomai's system pool. Setting a local pool size overrides this
+ * default for the socket.
+ *
+ * If a non-zero size was configured, a local pool is allocated at
+ * binding time. This pool will provide storage for pending datagrams.
+ *
+ * It is not allowed to configure a local pool size after the socket
+ * was bound. However, multiple configuration calls are allowed prior
+ * to the binding; the last value set will be used.
+ *
+ * @note: the pool memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_POOLSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the local pool to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_POOLSZ		2
+/**
+ * XDDP streaming buffer size configuration
+ *
+ * In addition to sending datagrams, real-time threads may stream data
+ * in a byte-oriented mode through the port as well. This increases
+ * the bandwidth and reduces the overhead, when the overall data to
+ * send to the Linux domain is collected by bits, and keeping the
+ * message boundaries is not required.
+ *
+ * This feature is enabled when a non-zero buffer size is set for the
+ * socket. In that case, the real-time data accumulates into the
+ * streaming buffer when MSG_MORE is passed to any of the @ref
+ * sendmsg__AF_RTIPC "send functions", until:
+ *
+ * - the receiver from the Linux domain wakes up and consumes it,
+ * - a different source port attempts to send data to the same
+ *   destination port,
+ * - MSG_MORE is absent from the send flags,
+ * - the buffer is full,
+ * .
+ * whichever comes first.
+ *
+ * Setting *@a optval to zero disables the streaming buffer, in which
+ * case all sendings are conveyed in separate datagrams, regardless of
+ * MSG_MORE.
+ *
+ * @note only a single streaming buffer exists per socket. When this
+ * buffer is full, the real-time data stops accumulating and sending
+ * operations resume in mere datagram mode. Accumulation may happen
+ * again after some or all data in the streaming buffer is consumed
+ * from the Linux domain endpoint.
+ *
+ * The streaming buffer size may be adjusted multiple times during the
+ * socket lifetime; the latest configuration change will take effect
+ * when the accumulation resumes after the previous buffer was
+ * flushed.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_BUFSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the streaming buffer
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -ENOMEM (Not enough memory)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define XDDP_BUFSZ		3
+/**
+ * XDDP monitoring callback
+ *
+ * Other RTDM drivers may install a user-defined callback via the @ref
+ * rtdm_setsockopt call from the inter-driver API, in order to collect
+ * particular events occurring on the channel.
+ *
+ * This notification mechanism is particularly useful to monitor a
+ * channel asynchronously while performing other tasks.
+ *
+ * The user-provided routine will be passed the RTDM file descriptor
+ * of the socket receiving the event, the event code, and an optional
+ * argument.  Four events are currently defined, see @ref XDDP_EVENTS.
+ *
+ * The XDDP_EVTIN and XDDP_EVTOUT events are fired on behalf of a
+ * fully atomic context; therefore, care must be taken to keep their
+ * overhead low. In those cases, the Xenomai services that may be
+ * called from the callback are restricted to the set allowed to a
+ * real-time interrupt handler.
+ *
+ * @param [in] level @ref sockopts_xddp "SOL_XDDP"
+ * @param [in] optname @b XDDP_MONITOR
+ * @param [in] optval Pointer to a pointer to function of type int
+ *             (*)(int fd, int event, long arg), containing the address of the
+ *             user-defined callback.Passing a NULL callback pointer
+ *             in @a optval disables monitoring.
+ * @param [in] optlen sizeof(int (*)(int fd, int event, long arg))
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EPERM (Operation not allowed from user-space)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT, kernel space only
+ */
+#define XDDP_MONITOR		4
+/** @} */
+
+/**
+ * @anchor XDDP_EVENTS @name XDDP events
+ * Specific events occurring on XDDP channels, which can be monitored
+ * via the @ref XDDP_MONITOR socket option.
+ *
+ * @{ */
+/**
+ * @ref XDDP_MONITOR "Monitor" writes to the non real-time endpoint.
+ *
+ * XDDP_EVTIN is sent when data is written to the non real-time
+ * endpoint the socket is bound to (i.e. via /dev/rtp@em N), which
+ * means that some input is pending for the real-time endpoint. The
+ * argument is the size of the incoming message.
+ */
+#define XDDP_EVTIN		1
+/**
+ * @ref XDDP_MONITOR "Monitor" reads from the non real-time endpoint.
+ *
+ * XDDP_EVTOUT is sent when the non real-time endpoint successfully
+ * reads a complete message (i.e. via /dev/rtp@em N). The argument is
+ * the size of the outgoing message.
+ */
+#define XDDP_EVTOUT		2
+/**
+ * @ref XDDP_MONITOR "Monitor" close from the non real-time endpoint.
+ *
+ * XDDP_EVTDOWN is sent when the non real-time endpoint is closed. The
+ * argument is always 0.
+ */
+#define XDDP_EVTDOWN		3
+/**
+ * @ref XDDP_MONITOR "Monitor" memory shortage for non real-time
+ * datagrams.
+ *
+ * XDDP_EVTNOBUF is sent when no memory is available from the pool to
+ * hold the message currently sent from the non real-time
+ * endpoint. The argument is the size of the failed allocation. Upon
+ * return from the callback, the caller will block and retry until
+ * enough space is available from the pool; during that process, the
+ * callback might be invoked multiple times, each time a new attempt
+ * to get the required memory fails.
+ */
+#define XDDP_EVTNOBUF		4
+/** @} */
+
+#define SOL_IDDP		312
+/**
+ * @anchor sockopts_iddp @name IDDP socket options
+ * Setting and getting IDDP socket options.
+ * @{ */
+/**
+ * IDDP label assignment
+ *
+ * ASCII label strings can be attached to IDDP ports, in order to
+ * connect sockets to them in a more descriptive way than using plain
+ * numeric port values.
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref iddp_label_binding
+ * "IDDP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_iddp "SOL_IDDP"
+ * @param [in] optname @b IDDP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define IDDP_LABEL		1
+/**
+ * IDDP local pool size configuration
+ *
+ * By default, the memory needed to convey the data is pulled from
+ * Xenomai's system pool. Setting a local pool size overrides this
+ * default for the socket.
+ *
+ * If a non-zero size was configured, a local pool is allocated at
+ * binding time. This pool will provide storage for pending datagrams.
+ *
+ * It is not allowed to configure a local pool size after the socket
+ * was bound. However, multiple configuration calls are allowed prior
+ * to the binding; the last value set will be used.
+ *
+ * @note: the pool memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_iddp "SOL_IDDP"
+ * @param [in] optname @b IDDP_POOLSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the local pool to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define IDDP_POOLSZ		2
+/** @} */
+
+#define SOL_BUFP		313
+/**
+ * @anchor sockopts_bufp @name BUFP socket options
+ * Setting and getting BUFP socket options.
+ * @{ */
+/**
+ * BUFP label assignment
+ *
+ * ASCII label strings can be attached to BUFP ports, in order to
+ * connect sockets to them in a more descriptive way than using plain
+ * numeric port values.
+ *
+ * When available, this label will be registered when binding, in
+ * addition to the port number (see @ref bufp_label_binding
+ * "BUFP port binding").
+ *
+ * It is not allowed to assign a label after the socket was
+ * bound. However, multiple assignment calls are allowed prior to the
+ * binding; the last label set will be used.
+ *
+ * @param [in] level @ref sockopts_bufp "SOL_BUFP"
+ * @param [in] optname @b BUFP_LABEL
+ * @param [in] optval Pointer to struct rtipc_port_label
+ * @param [in] optlen sizeof(struct rtipc_port_label)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define BUFP_LABEL		1
+/**
+ * BUFP buffer size configuration
+ *
+ * All messages written to a BUFP socket are buffered in a single
+ * per-socket memory area. Configuring the size of such buffer prior
+ * to binding the socket to a destination port is mandatory.
+ *
+ * It is not allowed to configure a buffer size after the socket was
+ * bound. However, multiple configuration calls are allowed prior to
+ * the binding; the last value set will be used.
+ *
+ * @note: the buffer memory is obtained from the host allocator by the
+ * @ref bind__AF_RTIPC "bind call".
+ *
+ * @param [in] level @ref sockopts_bufp "SOL_BUFP"
+ * @param [in] optname @b BUFP_BUFSZ
+ * @param [in] optval Pointer to a variable of type size_t, containing
+ * the required size of the buffer to reserve at binding time
+ * @param [in] optlen sizeof(size_t)
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EFAULT (Invalid data address given)
+ * - -EALREADY (socket already bound)
+ * - -EINVAL (@a optlen is invalid or *@a optval is zero)
+ * .
+ *
+ * @par Calling context:
+ * RT/non-RT
+ */
+#define BUFP_BUFSZ		2
+/** @} */
+
+/**
+ * @anchor sockopts_socket @name Socket level options
+ * Setting and getting supported standard socket level options.
+ * @{ */
+/**
+ *
+ * @ref IPCPROTO_IDDP and @ref IPCPROTO_BUFP protocols support the
+ * standard SO_SNDTIMEO socket option, from the @c SOL_SOCKET level.
+ *
+ * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399/
+ */
+#ifdef DOXYGEN_CPP
+#define SO_SNDTIMEO defined_by_kernel_header_file
+#endif
+/**
+ *
+ * All RTIPC protocols support the standard SO_RCVTIMEO socket option,
+ * from the @c SOL_SOCKET level.
+ *
+ * @see @c setsockopt(), @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399/
+ */
+#ifdef DOXYGEN_CPP
+#define SO_RCVTIMEO defined_by_kernel_header_file
+#endif
+/** @} */
+
+/**
+ * @anchor rtdm_ipc_examples @name RTIPC examples
+ * @{ */
+/** @example bufp-readwrite.c */
+/** @example bufp-label.c */
+/** @example iddp-label.c */
+/** @example iddp-sendrecv.c */
+/** @example xddp-echo.c */
+/** @example xddp-label.c */
+/** @example xddp-stream.c */
+/** @} */
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_IPC_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h
new file mode 100644
index 0000000..65a0e79
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/net.h
@@ -0,0 +1,75 @@
+/***
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2005-2011 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  As a special exception to the GNU General Public license, the RTnet
+ *  project allows you to use this header file in unmodified form to produce
+ *  application programs executing in user-space which use RTnet services by
+ *  normal system calls. The resulting executable will not be covered by the
+ *  GNU General Public License merely as a result of this header file use.
+ *  Instead, this header file use will be considered normal use of RTnet and
+ *  not a "derived work" in the sense of the GNU General Public License.
+ *
+ *  This exception does not apply when the application code is built as a
+ *  static or dynamically loadable portion of the Linux kernel nor does the
+ *  exception override other reasons justifying application of the GNU General
+ *  Public License.
+ *
+ *  This exception applies only to the code released by the RTnet project
+ *  under the name RTnet and bearing this exception notice. If you copy code
+ *  from other sources into a copy of RTnet, the exception does not apply to
+ *  the code that you add in this way.
+ *
+ */
+
+#ifndef _RTDM_UAPI_NET_H
+#define _RTDM_UAPI_NET_H
+
+/* sub-classes: RTDM_CLASS_NETWORK */
+#define RTDM_SUBCLASS_RTNET     0
+
+#define RTIOC_TYPE_NETWORK      RTDM_CLASS_NETWORK
+
+/* RTnet-specific IOCTLs */
+#define RTNET_RTIOC_XMITPARAMS  _IOW(RTIOC_TYPE_NETWORK, 0x10, unsigned int)
+#define RTNET_RTIOC_PRIORITY    RTNET_RTIOC_XMITPARAMS  /* legacy */
+#define RTNET_RTIOC_TIMEOUT     _IOW(RTIOC_TYPE_NETWORK, 0x11, int64_t)
+/* RTNET_RTIOC_CALLBACK         _IOW(RTIOC_TYPE_NETWORK, 0x12, ...
+ * IOCTL only usable inside the kernel. */
+/* RTNET_RTIOC_NONBLOCK         _IOW(RTIOC_TYPE_NETWORK, 0x13, unsigned int)
+ * This IOCTL is no longer supported (and it was buggy anyway).
+ * Use RTNET_RTIOC_TIMEOUT with any negative timeout value instead. */
+#define RTNET_RTIOC_EXTPOOL     _IOW(RTIOC_TYPE_NETWORK, 0x14, unsigned int)
+#define RTNET_RTIOC_SHRPOOL     _IOW(RTIOC_TYPE_NETWORK, 0x15, unsigned int)
+
+/* socket transmission priorities */
+#define SOCK_MAX_PRIO           0
+#define SOCK_DEF_PRIO           SOCK_MAX_PRIO + \
+				    (SOCK_MIN_PRIO-SOCK_MAX_PRIO+1)/2
+#define SOCK_MIN_PRIO           SOCK_NRT_PRIO - 1
+#define SOCK_NRT_PRIO           31
+
+/* socket transmission channels */
+#define SOCK_DEF_RT_CHANNEL     0           /* default rt xmit channel     */
+#define SOCK_DEF_NRT_CHANNEL    1           /* default non-rt xmit channel */
+#define SOCK_USER_CHANNEL       2           /* first user-defined channel  */
+
+/* argument construction for RTNET_RTIOC_XMITPARAMS */
+#define SOCK_XMIT_PARAMS(priority, channel) ((priority) | ((channel) << 16))
+
+#endif  /* !_RTDM_UAPI_NET_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h
new file mode 100644
index 0000000..80c789a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/rtdm.h
@@ -0,0 +1,203 @@
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, user API header.
+ *
+ * @note Copyright (C) 2005, 2006 Jan Kiszka <jan.kiszka@web.de>
+ * @note Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ * @ingroup rtdm_user_api
+ */
+#ifndef _RTDM_UAPI_RTDM_H
+#define _RTDM_UAPI_RTDM_H
+
+/*!
+ * @addtogroup rtdm
+ * @{
+ */
+
+/*!
+ * @anchor rtdm_api_versioning @name API Versioning
+ * @{ */
+/** Common user and driver API version */
+#define RTDM_API_VER			9
+
+/** Minimum API revision compatible with the current release */
+#define RTDM_API_MIN_COMPAT_VER		9
+/** @} API Versioning */
+
+/** RTDM type for representing absolute dates. Its base type is a 64 bit
+ *  unsigned integer. The unit is 1 nanosecond. */
+typedef uint64_t nanosecs_abs_t;
+
+/** RTDM type for representing relative intervals. Its base type is a 64 bit
+ *  signed integer. The unit is 1 nanosecond. Relative intervals can also
+ *  encode the special timeouts "infinite" and "non-blocking", see
+ *  @ref RTDM_TIMEOUT_xxx. */
+typedef int64_t nanosecs_rel_t;
+
+/*!
+ * @anchor RTDM_TIMEOUT_xxx @name RTDM_TIMEOUT_xxx
+ * Special timeout values
+ * @{ */
+/** Block forever. */
+#define RTDM_TIMEOUT_INFINITE		0
+
+/** Any negative timeout means non-blocking. */
+#define RTDM_TIMEOUT_NONE		(-1)
+/** @} RTDM_TIMEOUT_xxx */
+/** @} rtdm */
+
+/*!
+ * @addtogroup rtdm_profiles
+ * @{
+ */
+
+/*!
+ * @anchor RTDM_CLASS_xxx   @name RTDM_CLASS_xxx
+ * Device classes
+ * @{ */
+#define RTDM_CLASS_PARPORT		1
+#define RTDM_CLASS_SERIAL		2
+#define RTDM_CLASS_CAN			3
+#define RTDM_CLASS_NETWORK		4
+#define RTDM_CLASS_RTMAC		5
+#define RTDM_CLASS_TESTING		6
+#define RTDM_CLASS_RTIPC		7
+#define RTDM_CLASS_COBALT		8
+#define RTDM_CLASS_UDD			9
+#define RTDM_CLASS_MEMORY		10
+#define RTDM_CLASS_GPIO			11
+#define RTDM_CLASS_SPI			12
+#define RTDM_CLASS_PWM			13
+
+#define RTDM_CLASS_MISC			223
+#define RTDM_CLASS_EXPERIMENTAL		224
+#define RTDM_CLASS_MAX			255
+/** @} RTDM_CLASS_xxx */
+
+#define RTDM_SUBCLASS_GENERIC		(-1)
+
+#define RTIOC_TYPE_COMMON		0
+
+/*!
+ * @anchor device_naming    @name Device Naming
+ * Maximum length of device names (excluding the final null character)
+ * @{
+ */
+#define RTDM_MAX_DEVNAME_LEN		31
+/** @} Device Naming */
+
+/**
+ * Device information
+ */
+typedef struct rtdm_device_info {
+	/** Device flags, see @ref dev_flags "Device Flags" for details */
+	int device_flags;
+
+	/** Device class ID, see @ref RTDM_CLASS_xxx */
+	int device_class;
+
+	/** Device sub-class, either RTDM_SUBCLASS_GENERIC or a
+	 *  RTDM_SUBCLASS_xxx definition of the related @ref rtdm_profiles
+	 *  "Device Profile" */
+	int device_sub_class;
+
+	/** Supported device profile version */
+	int profile_version;
+} rtdm_device_info_t;
+
+/*!
+ * @anchor RTDM_PURGE_xxx_BUFFER    @name RTDM_PURGE_xxx_BUFFER
+ * Flags selecting buffers to be purged
+ * @{ */
+#define RTDM_PURGE_RX_BUFFER		0x0001
+#define RTDM_PURGE_TX_BUFFER		0x0002
+/** @} RTDM_PURGE_xxx_BUFFER*/
+
+/*!
+ * @anchor common_IOCTLs    @name Common IOCTLs
+ * The following IOCTLs are common to all device rtdm_profiles.
+ * @{
+ */
+
+/**
+ * Retrieve information about a device or socket.
+ * @param[out] arg Pointer to information buffer (struct rtdm_device_info)
+ */
+#define RTIOC_DEVICE_INFO \
+	_IOR(RTIOC_TYPE_COMMON, 0x00, struct rtdm_device_info)
+
+/**
+ * Purge internal device or socket buffers.
+ * @param[in] arg Purge mask, see @ref RTDM_PURGE_xxx_BUFFER
+ */
+#define RTIOC_PURGE		_IOW(RTIOC_TYPE_COMMON, 0x10, int)
+/** @} Common IOCTLs */
+/** @} rtdm */
+
+/* Internally used for mapping socket functions on IOCTLs */
+struct _rtdm_getsockopt_args {
+	int level;
+	int optname;
+	void *optval;
+	socklen_t *optlen;
+};
+
+struct _rtdm_setsockopt_args {
+	int level;
+	int optname;
+	const void *optval;
+	socklen_t optlen;
+};
+
+struct _rtdm_getsockaddr_args {
+	struct sockaddr *addr;
+	socklen_t *addrlen;
+};
+
+struct _rtdm_setsockaddr_args {
+	const struct sockaddr *addr;
+	socklen_t addrlen;
+};
+
+#define _RTIOC_GETSOCKOPT	_IOW(RTIOC_TYPE_COMMON, 0x20,		\
+				     struct _rtdm_getsockopt_args)
+#define _RTIOC_SETSOCKOPT	_IOW(RTIOC_TYPE_COMMON, 0x21,		\
+				     struct _rtdm_setsockopt_args)
+#define _RTIOC_BIND		_IOW(RTIOC_TYPE_COMMON, 0x22,		\
+				     struct _rtdm_setsockaddr_args)
+#define _RTIOC_CONNECT		_IOW(RTIOC_TYPE_COMMON, 0x23,		\
+				     struct _rtdm_setsockaddr_args)
+#define _RTIOC_LISTEN		_IOW(RTIOC_TYPE_COMMON, 0x24,		\
+				     int)
+#define _RTIOC_ACCEPT		_IOW(RTIOC_TYPE_COMMON, 0x25,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_GETSOCKNAME	_IOW(RTIOC_TYPE_COMMON, 0x26,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_GETPEERNAME	_IOW(RTIOC_TYPE_COMMON, 0x27,		\
+				     struct _rtdm_getsockaddr_args)
+#define _RTIOC_SHUTDOWN		_IOW(RTIOC_TYPE_COMMON, 0x28,		\
+				     int)
+
+/* Internally used for mmap() */
+struct _rtdm_mmap_request {
+	__u64 offset;
+	size_t length;
+	int prot;
+	int flags;
+};
+
+#endif /* !_RTDM_UAPI_RTDM_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h
new file mode 100644
index 0000000..9ac691b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/serial.h
@@ -0,0 +1,407 @@
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, serial device profile header
+ *
+ * @note Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rtserial
+ */
+#ifndef _RTDM_UAPI_SERIAL_H
+#define _RTDM_UAPI_SERIAL_H
+
+#define RTSER_PROFILE_VER		3
+
+/*!
+ * @anchor RTSER_DEF_BAUD   @name RTSER_DEF_BAUD
+ * Default baud rate
+ * @{ */
+#define RTSER_DEF_BAUD			9600
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_PARITY   @name RTSER_xxx_PARITY
+ * Number of parity bits
+ * @{ */
+#define RTSER_NO_PARITY			0x00
+#define RTSER_ODD_PARITY		0x01
+#define RTSER_EVEN_PARITY		0x03
+#define RTSER_DEF_PARITY		RTSER_NO_PARITY
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_BITS   @name RTSER_xxx_BITS
+ * Number of data bits
+ * @{ */
+#define RTSER_5_BITS			0x00
+#define RTSER_6_BITS			0x01
+#define RTSER_7_BITS			0x02
+#define RTSER_8_BITS			0x03
+#define RTSER_DEF_BITS			RTSER_8_BITS
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_STOPB   @name RTSER_xxx_STOPB
+ * Number of stop bits
+ * @{ */
+#define RTSER_1_STOPB			0x00
+/** valid only in combination with 5 data bits */
+#define RTSER_1_5_STOPB			0x01
+#define RTSER_2_STOPB			0x01
+#define RTSER_DEF_STOPB			RTSER_1_STOPB
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_HAND   @name RTSER_xxx_HAND
+ * Handshake mechanisms
+ * @{ */
+#define RTSER_NO_HAND			0x00
+#define RTSER_RTSCTS_HAND		0x01
+#define RTSER_DEF_HAND			RTSER_NO_HAND
+/** @} */
+
+/*!
+ * @anchor RTSER_RS485_xxx   @name RTSER_RS485_xxx
+ * RS485 mode with automatic RTS handling
+ * @{ */
+#define RTSER_RS485_DISABLE		0x00
+#define RTSER_RS485_ENABLE		0x01
+#define RTSER_DEF_RS485			RTSER_RS485_DISABLE
+/** @} */
+
+/*!
+ * @anchor RTSER_FIFO_xxx   @name RTSER_FIFO_xxx
+ * Reception FIFO interrupt threshold
+ * @{ */
+#define RTSER_FIFO_DEPTH_1		0x00
+#define RTSER_FIFO_DEPTH_4		0x40
+#define RTSER_FIFO_DEPTH_8		0x80
+#define RTSER_FIFO_DEPTH_14		0xC0
+#define RTSER_DEF_FIFO_DEPTH		RTSER_FIFO_DEPTH_1
+/** @} */
+
+/*!
+ * @anchor RTSER_TIMEOUT_xxx   @name RTSER_TIMEOUT_xxx
+ * Special timeout values, see also @ref RTDM_TIMEOUT_xxx
+ * @{ */
+#define RTSER_TIMEOUT_INFINITE		RTDM_TIMEOUT_INFINITE
+#define RTSER_TIMEOUT_NONE		RTDM_TIMEOUT_NONE
+#define RTSER_DEF_TIMEOUT		RTDM_TIMEOUT_INFINITE
+/** @} */
+
+/*!
+ * @anchor RTSER_xxx_TIMESTAMP_HISTORY   @name RTSER_xxx_TIMESTAMP_HISTORY
+ * Timestamp history control
+ * @{ */
+#define RTSER_RX_TIMESTAMP_HISTORY	0x01
+#define RTSER_DEF_TIMESTAMP_HISTORY	0x00
+/** @} */
+
+/*!
+ * @anchor RTSER_EVENT_xxx   @name RTSER_EVENT_xxx
+ * Events bits
+ * @{ */
+#define RTSER_EVENT_RXPEND		0x01
+#define RTSER_EVENT_ERRPEND		0x02
+#define RTSER_EVENT_MODEMHI		0x04
+#define RTSER_EVENT_MODEMLO		0x08
+#define RTSER_EVENT_TXEMPTY		0x10
+#define RTSER_DEF_EVENT_MASK		0x00
+/** @} */
+
+
+/*!
+ * @anchor RTSER_SET_xxx   @name RTSER_SET_xxx
+ * Configuration mask bits
+ * @{ */
+#define RTSER_SET_BAUD			0x0001
+#define RTSER_SET_PARITY		0x0002
+#define RTSER_SET_DATA_BITS		0x0004
+#define RTSER_SET_STOP_BITS		0x0008
+#define RTSER_SET_HANDSHAKE		0x0010
+#define RTSER_SET_FIFO_DEPTH		0x0020
+#define RTSER_SET_TIMEOUT_RX		0x0100
+#define RTSER_SET_TIMEOUT_TX		0x0200
+#define RTSER_SET_TIMEOUT_EVENT		0x0400
+#define RTSER_SET_TIMESTAMP_HISTORY	0x0800
+#define RTSER_SET_EVENT_MASK		0x1000
+#define RTSER_SET_RS485			0x2000
+/** @} */
+
+
+/*!
+ * @anchor RTSER_LSR_xxx   @name RTSER_LSR_xxx
+ * Line status bits
+ * @{ */
+#define RTSER_LSR_DATA			0x01
+#define RTSER_LSR_OVERRUN_ERR		0x02
+#define RTSER_LSR_PARITY_ERR		0x04
+#define RTSER_LSR_FRAMING_ERR		0x08
+#define RTSER_LSR_BREAK_IND		0x10
+#define RTSER_LSR_THR_EMTPY		0x20
+#define RTSER_LSR_TRANSM_EMPTY		0x40
+#define RTSER_LSR_FIFO_ERR		0x80
+#define RTSER_SOFT_OVERRUN_ERR		0x0100
+/** @} */
+
+
+/*!
+ * @anchor RTSER_MSR_xxx   @name RTSER_MSR_xxx
+ * Modem status bits
+ * @{ */
+#define RTSER_MSR_DCTS			0x01
+#define RTSER_MSR_DDSR			0x02
+#define RTSER_MSR_TERI			0x04
+#define RTSER_MSR_DDCD			0x08
+#define RTSER_MSR_CTS			0x10
+#define RTSER_MSR_DSR			0x20
+#define RTSER_MSR_RI			0x40
+#define RTSER_MSR_DCD			0x80
+/** @} */
+
+
+/*!
+ * @anchor RTSER_MCR_xxx   @name RTSER_MCR_xxx
+ * Modem control bits
+ * @{ */
+#define RTSER_MCR_DTR			0x01
+#define RTSER_MCR_RTS			0x02
+#define RTSER_MCR_OUT1			0x04
+#define RTSER_MCR_OUT2			0x08
+#define RTSER_MCR_LOOP			0x10
+/** @} */
+
+
+/*!
+ * @anchor RTSER_BREAK_xxx   @name RTSER_BREAK_xxx
+ * Break control
+ * @{ */
+#define RTSER_BREAK_CLR			0x00
+#define RTSER_BREAK_SET			0x01
+
+
+/**
+ * Serial device configuration
+ */
+typedef struct rtser_config {
+	/** mask specifying valid fields, see @ref RTSER_SET_xxx */
+	int		config_mask;
+
+	/** baud rate, default @ref RTSER_DEF_BAUD */
+	int		baud_rate;
+
+	/** number of parity bits, see @ref RTSER_xxx_PARITY */
+	int		parity;
+
+	/** number of data bits, see @ref RTSER_xxx_BITS */
+	int		data_bits;
+
+	/** number of stop bits, see @ref RTSER_xxx_STOPB */
+	int		stop_bits;
+
+	/** handshake mechanisms, see @ref RTSER_xxx_HAND */
+	int		handshake;
+
+	/** reception FIFO interrupt threshold, see @ref RTSER_FIFO_xxx */
+	int		fifo_depth;
+
+	int		reserved;
+
+	/** reception timeout, see @ref RTSER_TIMEOUT_xxx for special
+	 *  values */
+	nanosecs_rel_t	rx_timeout;
+
+	/** transmission timeout, see @ref RTSER_TIMEOUT_xxx for special
+	 *  values */
+	nanosecs_rel_t	tx_timeout;
+
+	/** event timeout, see @ref RTSER_TIMEOUT_xxx for special values */
+	nanosecs_rel_t	event_timeout;
+
+	/** enable timestamp history, see @ref RTSER_xxx_TIMESTAMP_HISTORY */
+	int		timestamp_history;
+
+	/** event mask to be used with @ref RTSER_RTIOC_WAIT_EVENT, see
+	 *  @ref RTSER_EVENT_xxx */
+	int		event_mask;
+
+	/** enable RS485 mode, see @ref RTSER_RS485_xxx */
+	int		rs485;
+} rtser_config_t;
+
+/**
+ * Serial device status
+ */
+typedef struct rtser_status {
+	/** line status register, see @ref RTSER_LSR_xxx */
+	int		line_status;
+
+	/** modem status register, see @ref RTSER_MSR_xxx */
+	int		modem_status;
+} rtser_status_t;
+
+/**
+ * Additional information about serial device events
+ */
+typedef struct rtser_event {
+	/** signalled events, see @ref RTSER_EVENT_xxx */
+	int		events;
+
+	/** number of pending input characters */
+	int		rx_pending;
+
+	/** last interrupt timestamp */
+	nanosecs_abs_t	last_timestamp;
+
+	/** reception timestamp of oldest character in input queue */
+	nanosecs_abs_t	rxpend_timestamp;
+} rtser_event_t;
+
+
+#define RTIOC_TYPE_SERIAL		RTDM_CLASS_SERIAL
+
+
+/*!
+ * @name Sub-Classes of RTDM_CLASS_SERIAL
+ * @{ */
+#define RTDM_SUBCLASS_16550A		0
+/** @} */
+
+
+/*!
+ * @anchor SERIOCTLs @name IOCTLs
+ * Serial device IOCTLs
+ * @{ */
+
+/**
+ * Get serial device configuration
+ *
+ * @param[out] arg Pointer to configuration buffer (struct rtser_config)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_GET_CONFIG	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x00, struct rtser_config)
+
+/**
+ * Set serial device configuration
+ *
+ * @param[in] arg Pointer to configuration buffer (struct rtser_config)
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EPERM is returned if the caller's context is invalid, see note below.
+ *
+ * - -ENOMEM is returned if a new history buffer for timestamps cannot be
+ * allocated.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note If rtser_config contains a valid timestamp_history and the
+ * addressed device has been opened in non-real-time context, this IOCTL must
+ * be issued in non-real-time context as well. Otherwise, this command will
+ * fail.
+ */
+#define RTSER_RTIOC_SET_CONFIG	\
+	_IOW(RTIOC_TYPE_SERIAL, 0x01, struct rtser_config)
+
+/**
+ * Get serial device status
+ *
+ * @param[out] arg Pointer to status buffer (struct rtser_status)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note The error states @c RTSER_LSR_OVERRUN_ERR, @c RTSER_LSR_PARITY_ERR,
+ * @c RTSER_LSR_FRAMING_ERR, and @c RTSER_SOFT_OVERRUN_ERR that may have
+ * occured during previous read accesses to the device will be saved for being
+ * reported via this IOCTL. Upon return from @c RTSER_RTIOC_GET_STATUS, the
+ * saved state will be cleared.
+ */
+#define RTSER_RTIOC_GET_STATUS	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x02, struct rtser_status)
+
+/**
+ * Get serial device's modem contol register
+ *
+ * @param[out] arg Pointer to variable receiving the content (int, see
+ *             @ref RTSER_MCR_xxx)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_GET_CONTROL	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x03, int)
+
+/**
+ * Set serial device's modem contol register
+ *
+ * @param[in] arg New control register content (int, see @ref RTSER_MCR_xxx)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+#define RTSER_RTIOC_SET_CONTROL	\
+	_IOW(RTIOC_TYPE_SERIAL, 0x04, int)
+
+/**
+ * Wait on serial device events according to previously set mask
+ *
+ * @param[out] arg Pointer to event information buffer (struct rtser_event)
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EBUSY is returned if another task is already waiting on events of this
+ * device.
+ *
+ * - -EBADF is returned if the file descriptor is invalid or the device has
+ * just been closed.
+ *
+ * @coretags{mode-unrestricted}
+ */
+#define RTSER_RTIOC_WAIT_EVENT	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x05, struct rtser_event)
+/** @} */
+
+/**
+ * Set or clear break on UART output line
+ *
+ * @param[in] arg @c RTSER_BREAK_SET or @c RTSER_BREAK_CLR (int)
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note A set break condition may also be cleared on UART line
+ * reconfiguration.
+ */
+#define RTSER_RTIOC_BREAK_CTL	\
+	_IOR(RTIOC_TYPE_SERIAL, 0x06, int)
+/** @} */
+
+/*!
+ * @anchor SERutils @name RT Serial example and utility programs
+ * @{ */
+/** @example cross-link.c */
+/** @} */
+
+#endif /* !_RTDM_UAPI_SERIAL_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h
new file mode 100644
index 0000000..184a2b0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/spi.h
@@ -0,0 +1,42 @@
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_UAPI_SPI_H
+#define _RTDM_UAPI_SPI_H
+
+#include <linux/types.h>
+
+struct rtdm_spi_config {
+	__u32 speed_hz;
+	__u16 mode;
+	__u8 bits_per_word;
+};
+
+struct rtdm_spi_iobufs {
+	__u32 io_len;
+	__u32 i_offset;
+	__u32 o_offset;
+	__u32 map_len;
+};
+
+#define SPI_RTIOC_SET_CONFIG		_IOW(RTDM_CLASS_SPI, 0, struct rtdm_spi_config)
+#define SPI_RTIOC_GET_CONFIG		_IOR(RTDM_CLASS_SPI, 1, struct rtdm_spi_config)
+#define SPI_RTIOC_SET_IOBUFS		_IOR(RTDM_CLASS_SPI, 2, struct rtdm_spi_iobufs)
+#define SPI_RTIOC_TRANSFER		_IO(RTDM_CLASS_SPI, 3)
+#define SPI_RTIOC_TRANSFER_N		_IOR(RTDM_CLASS_SPI, 4, int)
+
+#endif /* !_RTDM_UAPI_SPI_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h
new file mode 100644
index 0000000..40512c9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/testing.h
@@ -0,0 +1,212 @@
+/**
+ * @file
+ * Real-Time Driver Model for Xenomai, testing device profile header
+ *
+ * @note Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * @ingroup rttesting
+ */
+#ifndef _RTDM_UAPI_TESTING_H
+#define _RTDM_UAPI_TESTING_H
+
+#include <linux/types.h>
+
+#define RTTST_PROFILE_VER		2
+
+typedef struct rttst_bench_res {
+	__s32 avg;
+	__s32 min;
+	__s32 max;
+	__s32 overruns;
+	__s32 test_loops;
+} rttst_bench_res_t;
+
+typedef struct rttst_interm_bench_res {
+	struct rttst_bench_res last;
+	struct rttst_bench_res overall;
+} rttst_interm_bench_res_t;
+
+typedef struct rttst_overall_bench_res {
+	struct rttst_bench_res result;
+	__s32 *histogram_avg;
+	__s32 *histogram_min;
+	__s32 *histogram_max;
+} rttst_overall_bench_res_t;
+
+#define RTTST_TMBENCH_INVALID		-1 /* internal use only */
+#define RTTST_TMBENCH_TASK		0
+#define RTTST_TMBENCH_HANDLER		1
+
+typedef struct rttst_tmbench_config {
+	int mode;
+	int priority;
+	__u64 period;
+	int warmup_loops;
+	int histogram_size;
+	int histogram_bucketsize;
+	int freeze_max;
+} rttst_tmbench_config_t;
+
+struct rttst_swtest_task {
+	unsigned int index;
+	unsigned int flags;
+};
+
+/* Possible values for struct rttst_swtest_task::flags. */
+#define RTTST_SWTEST_FPU		0x1
+#define RTTST_SWTEST_USE_FPU		0x2 /* Only for kernel-space tasks. */
+#define RTTST_SWTEST_FREEZE		0x4 /* Only for kernel-space tasks. */
+
+/**
+ * @brief parameter for the RTTST_RTIOC_SWTEST_SWITCH_TO syscall
+ * @anchor rttst_swtest_dir
+ *
+ * This structure is used to tell the RTTST_RTIOC_SWTEST_SWITCH_TO syscall
+ * which threads should be exchanged and if the mode (primary/secondary) of the
+ * from thread should be switched.
+ */
+struct rttst_swtest_dir {
+	/** Index of the thread that should be replaced. */
+	unsigned int from;
+
+	/** Index of the thread that should run. */
+	unsigned int to;
+
+	/** If the mode should be switched: 0 for no switch, 1 for switch. */
+	unsigned int switch_mode;
+};
+
+struct rttst_swtest_error {
+	struct rttst_swtest_dir last_switch;
+	unsigned int fp_val;
+};
+
+#define RTTST_RTDM_NORMAL_CLOSE		0
+#define RTTST_RTDM_DEFER_CLOSE_CONTEXT	1
+
+#define RTTST_RTDM_MAGIC_PRIMARY	0xfefbfefb
+#define RTTST_RTDM_MAGIC_SECONDARY	0xa5b9a5b9
+
+#define RTTST_HEAPCHECK_ZEROOVRD   1
+#define RTTST_HEAPCHECK_SHUFFLE    2
+#define RTTST_HEAPCHECK_PATTERN    4
+#define RTTST_HEAPCHECK_HOT        8
+
+struct rttst_heap_parms {
+	__u64 heap_size;
+	__u64 block_size;
+	int flags;
+	int nrstats;
+};
+
+struct rttst_heap_stats {
+	__u64 heap_size;
+	__u64 user_size;
+	__u64 block_size;
+	__s64 alloc_avg_ns;
+	__s64 alloc_max_ns;
+	__s64 free_avg_ns;
+	__s64 free_max_ns;
+	__u64 maximum_free;
+	__u64 largest_free;
+	int nrblocks;
+	int flags;
+};
+
+struct rttst_heap_stathdr {
+	int nrstats;
+	struct rttst_heap_stats *buf;
+};
+
+#define RTIOC_TYPE_TESTING		RTDM_CLASS_TESTING
+
+/*!
+ * @name Sub-Classes of RTDM_CLASS_TESTING
+ * @{ */
+/** subclass name: "timerbench" */
+#define RTDM_SUBCLASS_TIMERBENCH	0
+/** subclass name: "irqbench" */
+#define RTDM_SUBCLASS_IRQBENCH		1
+/** subclass name: "switchtest" */
+#define RTDM_SUBCLASS_SWITCHTEST	2
+/** subclase name: "rtdm" */
+#define RTDM_SUBCLASS_RTDMTEST		3
+/** subclase name: "heapcheck" */
+#define RTDM_SUBCLASS_HEAPCHECK		4
+/** @} */
+
+/*!
+ * @anchor TSTIOCTLs @name IOCTLs
+ * Testing device IOCTLs
+ * @{ */
+#define RTTST_RTIOC_INTERM_BENCH_RES \
+	_IOWR(RTIOC_TYPE_TESTING, 0x00, struct rttst_interm_bench_res)
+
+#define RTTST_RTIOC_TMBENCH_START \
+	_IOW(RTIOC_TYPE_TESTING, 0x10, struct rttst_tmbench_config)
+
+#define RTTST_RTIOC_TMBENCH_STOP \
+	_IOWR(RTIOC_TYPE_TESTING, 0x11, struct rttst_overall_bench_res)
+
+#define RTTST_RTIOC_SWTEST_SET_TASKS_COUNT \
+	_IOW(RTIOC_TYPE_TESTING, 0x30, __u32)
+
+#define RTTST_RTIOC_SWTEST_SET_CPU \
+	_IOW(RTIOC_TYPE_TESTING, 0x31, __u32)
+
+#define RTTST_RTIOC_SWTEST_REGISTER_UTASK \
+	_IOW(RTIOC_TYPE_TESTING, 0x32, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_CREATE_KTASK \
+	_IOWR(RTIOC_TYPE_TESTING, 0x33, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_PEND \
+	_IOR(RTIOC_TYPE_TESTING, 0x34, struct rttst_swtest_task)
+
+#define RTTST_RTIOC_SWTEST_SWITCH_TO \
+	_IOR(RTIOC_TYPE_TESTING, 0x35, struct rttst_swtest_dir)
+
+#define RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT \
+	_IOR(RTIOC_TYPE_TESTING, 0x36, __u32)
+
+#define RTTST_RTIOC_SWTEST_GET_LAST_ERROR \
+	_IOR(RTIOC_TYPE_TESTING, 0x37, struct rttst_swtest_error)
+
+#define RTTST_RTIOC_SWTEST_SET_PAUSE \
+	_IOW(RTIOC_TYPE_TESTING, 0x38, __u32)
+
+#define RTTST_RTIOC_RTDM_DEFER_CLOSE \
+	_IOW(RTIOC_TYPE_TESTING, 0x40, __u32)
+
+#define RTTST_RTIOC_RTDM_ACTOR_GET_CPU \
+	_IOR(RTIOC_TYPE_TESTING, 0x41, __u32)
+  
+#define RTTST_RTIOC_RTDM_PING_PRIMARY \
+	_IOR(RTIOC_TYPE_TESTING, 0x42, __u32)
+  
+#define RTTST_RTIOC_RTDM_PING_SECONDARY \
+	_IOR(RTIOC_TYPE_TESTING, 0x43, __u32)
+
+#define RTTST_RTIOC_HEAP_CHECK \
+	_IOR(RTIOC_TYPE_TESTING, 0x44, struct rttst_heap_parms)
+
+#define RTTST_RTIOC_HEAP_STAT_COLLECT \
+	_IOR(RTIOC_TYPE_TESTING, 0x45, int)
+
+/** @} */
+
+#endif /* !_RTDM_UAPI_TESTING_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h b/kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h
new file mode 100644
index 0000000..065df12
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/uapi/udd.h
@@ -0,0 +1,98 @@
+/**
+ * @file
+ * This file is part of the Xenomai project.
+ *
+ * @author Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UAPI_UDD_H
+#define _RTDM_UAPI_UDD_H
+
+/**
+ * @addtogroup rtdm_udd
+ *
+ * @{
+ */
+
+/**
+ * @anchor udd_signotify
+ * @brief UDD event notification descriptor
+ *
+ * This structure shall be used to pass the information required to
+ * enable/disable the notification by signal upon interrupt receipt.
+ *
+ * If PID is zero or negative, the notification is disabled.
+ * Otherwise, the Cobalt thread whose PID is given will receive the
+ * Cobalt signal also mentioned, along with the count of interrupts at
+ * the time of the receipt stored in siginfo.si_int. A Cobalt thread
+ * must explicitly wait for notifications using the sigwaitinfo() or
+ * sigtimedwait() services (no asynchronous mode available).
+ */
+struct udd_signotify {
+	/**
+	 * PID of the Cobalt thread to notify upon interrupt
+	 * receipt. If @a pid is zero or negative, the notification is
+	 * disabled.
+	 */
+	pid_t pid;
+	/**
+	 * Signal number to send to PID for notifying, which must be
+	 * in the range [SIGRTMIN .. SIGRTMAX] inclusive. This value
+	 * is not considered if @a pid is zero or negative.
+	 */
+	int sig;
+};
+
+/**
+ * @anchor udd_ioctl_codes @name UDD_IOCTL
+ * IOCTL requests
+ *
+ * @{
+ */
+
+/**
+ * Enable the interrupt line. The UDD-class mini-driver should handle
+ * this request when received through its ->ioctl() handler if
+ * provided. Otherwise, the UDD core enables the interrupt line in the
+ * interrupt controller before returning to the caller.
+ */
+#define UDD_RTIOC_IRQEN		_IO(RTDM_CLASS_UDD, 0)
+/**
+ * Disable the interrupt line. The UDD-class mini-driver should handle
+ * this request when received through its ->ioctl() handler if
+ * provided. Otherwise, the UDD core disables the interrupt line in
+ * the interrupt controller before returning to the caller.
+ *
+ * @note The mini-driver must handle the UDD_RTIOC_IRQEN request for a
+ * custom IRQ from its ->ioctl() handler, otherwise such request
+ * receives -EIO from the UDD core.
+ */
+#define UDD_RTIOC_IRQDIS	_IO(RTDM_CLASS_UDD, 1)
+/**
+ * Enable/Disable signal notification upon interrupt event. A valid
+ * @ref udd_signotify "notification descriptor" must be passed along
+ * with this request, which is handled by the UDD core directly.
+ *
+ * @note The mini-driver must handle the UDD_RTIOC_IRQDIS request for
+ * a custom IRQ from its ->ioctl() handler, otherwise such request
+ * receives -EIO from the UDD core.
+ */
+#define UDD_RTIOC_IRQSIG	_IOW(RTDM_CLASS_UDD, 2, struct udd_signotify)
+
+/** @} */
+/** @} */
+
+#endif /* !_RTDM_UAPI_UDD_H */
diff --git a/kernel/xenomai-v3.2.4/include/rtdm/udd.h b/kernel/xenomai-v3.2.4/include/rtdm/udd.h
new file mode 100644
index 0000000..41e028f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/rtdm/udd.h
@@ -0,0 +1,26 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _RTDM_UDD_H
+#define _RTDM_UDD_H
+
+#include <rtdm/rtdm.h>
+#include <rtdm/uapi/udd.h>
+
+#endif /* !_RTDM_UDD_H */
diff --git a/kernel/xenomai-v3.2.4/include/smokey/Makefile.am b/kernel/xenomai-v3.2.4/include/smokey/Makefile.am
new file mode 100644
index 0000000..a0074fb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/smokey/Makefile.am
@@ -0,0 +1,3 @@
+includesubdir = $(includedir)/smokey
+
+includesub_HEADERS = smokey.h
diff --git a/kernel/xenomai-v3.2.4/include/smokey/smokey.h b/kernel/xenomai-v3.2.4/include/smokey/smokey.h
new file mode 100644
index 0000000..0ac1e8d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/smokey/smokey.h
@@ -0,0 +1,274 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_SMOKEY_SMOKEY_H
+#define _XENOMAI_SMOKEY_SMOKEY_H
+
+#include <stdarg.h>
+#include <pthread.h>
+#include <boilerplate/list.h>
+#include <boilerplate/libc.h>
+#include <copperplate/clockobj.h>
+#include <xenomai/init.h>
+
+#ifdef HAVE_FORK
+#define do_fork fork
+#else
+#define do_fork vfork
+#endif
+
+#define SMOKEY_INT(__name) {		\
+	 .name = # __name,		\
+	 .parser = smokey_int,		\
+	 .matched = 0,			\
+	 }
+
+#define SMOKEY_BOOL(__name) {		\
+	 .name = # __name,		\
+	 .parser = smokey_bool,		\
+	 .matched = 0,			\
+	 }
+
+#define SMOKEY_STRING(__name) {		\
+	 .name = # __name,		\
+	 .parser = smokey_string,	\
+	 .matched = 0,			\
+	 }
+
+#define SMOKEY_SIZE(__name) {		\
+	 .name = # __name,		\
+	 .parser = smokey_size,		\
+	 .matched = 0,			\
+	 }
+
+#define SMOKEY_ARGLIST(__args...)  ((struct smokey_arg[]){ __args })
+
+#define SMOKEY_NOARGS  (((struct smokey_arg[]){ { .name = NULL } }))
+
+struct smokey_arg {
+	const char *name;
+	int (*parser)(const char *s,
+		      struct smokey_arg *arg);
+	union {
+		int n_val;
+		char *s_val;
+		size_t l_val;
+	} u;
+	int matched;
+};
+
+struct smokey_test {
+	const char *name;
+	struct smokey_arg *args;
+	int nargs;
+	const char *description;
+	int (*run)(struct smokey_test *t,
+		   int argc, char *const argv[]);
+	struct {
+		int id;
+		struct pvholder next;
+	} __reserved;
+};
+
+#define for_each_smokey_test(__pos)	\
+	pvlist_for_each_entry((__pos), &smokey_test_list, __reserved.next)
+
+#define __smokey_arg_count(__args)	\
+	(sizeof(__args) / sizeof(__args[0]))
+
+#define smokey_test_plugin(__plugin, __args, __desc)			\
+	static int run_ ## __plugin(struct smokey_test *t,		\
+				    int argc, char *const argv[]);	\
+	static struct smokey_test __plugin = {				\
+		.name = #__plugin,					\
+		.args = (__args),					\
+		.nargs = __smokey_arg_count(__args),			\
+		.description = (__desc),				\
+		.run = run_ ## __plugin,				\
+	};								\
+	__early_ctor void smokey_plugin_ ## __plugin(void);		\
+	void smokey_plugin_ ## __plugin(void)				\
+	{								\
+		smokey_register_plugin(&(__plugin));			\
+	}
+
+#define SMOKEY_ARG(__plugin, __arg)	   (smokey_lookup_arg(&(__plugin), # __arg))
+#define SMOKEY_ARG_ISSET(__plugin, __arg)  (SMOKEY_ARG(__plugin, __arg)->matched)
+#define SMOKEY_ARG_INT(__plugin, __arg)	   (SMOKEY_ARG(__plugin, __arg)->u.n_val)
+#define SMOKEY_ARG_BOOL(__plugin, __arg)   (!!SMOKEY_ARG_INT(__plugin, __arg))
+#define SMOKEY_ARG_STRING(__plugin, __arg) (SMOKEY_ARG(__plugin, __arg)->u.s_val)
+#define SMOKEY_ARG_SIZE(__plugin, __arg)   (SMOKEY_ARG(__plugin, __arg)->u.l_val)
+
+#define smokey_arg_isset(__t, __name)      (smokey_lookup_arg(__t, __name)->matched)
+#define smokey_arg_int(__t, __name)	   (smokey_lookup_arg(__t, __name)->u.n_val)
+#define smokey_arg_bool(__t, __name)       (!!smokey_arg_int(__t, __name))
+#define smokey_arg_string(__t, __name)     (smokey_lookup_arg(__t, __name)->u.s_val)
+#define smokey_arg_size(__t, __name)       (smokey_lookup_arg(__t, __name)->u.l_val)
+
+#define smokey_check_errno(__expr)					\
+	({                                                              \
+		int __ret = (__expr);					\
+		if (__ret < 0) {					\
+			__ret = -errno;					\
+			__smokey_warning(__FILE__, __LINE__, "%s: %s",	\
+					 #__expr, strerror(errno));	\
+		}							\
+		__ret;							\
+	})
+
+#define smokey_check_status(__expr)					\
+	({                                                              \
+		int __ret = (__expr);					\
+		if (__ret) {						\
+			__smokey_warning(__FILE__, __LINE__, "%s: %s",	\
+					 #__expr, strerror(__ret));	\
+			__ret = -__ret;					\
+		}							\
+		__ret;							\
+	})
+
+#define smokey_assert(__expr)						\
+	({                                                              \
+		int __ret = (__expr);					\
+		if (!__ret) 						\
+			__smokey_warning(__FILE__, __LINE__,		\
+					 "assertion failed: %s", #__expr); \
+		__ret;							\
+	})
+
+#define smokey_warning(__fmt, __args...)	\
+	__smokey_warning(__FILE__, __LINE__, __fmt, ##__args)
+
+#define __T(__ret, __action)					\
+	({							\
+		(__ret) = (__action);				\
+		if (__ret) {					\
+			if ((__ret) > 0)			\
+				(__ret) = -(__ret);		\
+			smokey_warning("FAILED: %s (=%s)",	\
+				       __stringify(__action),	\
+				       symerror(__ret));	\
+		}						\
+		(__ret) == 0;					\
+	})
+
+#define __F(__ret, __action)					\
+	({							\
+		(__ret) = (__action);				\
+		if ((__ret) == 0)				\
+			smokey_warning("FAILED: %s (=0)",	\
+				       __stringify(__action));	\
+		else if ((__ret) > 0)				\
+			(__ret) = -(__ret);			\
+		(__ret) != 0;					\
+	})
+
+#define __Terrno(__ret, __action)				\
+	({							\
+		(__ret) = (__action);				\
+		if (__ret) {					\
+			(__ret) = -errno;			\
+			smokey_warning("FAILED: %s (=%s)",	\
+				       __stringify(__action),	\
+				       symerror(__ret));	\
+		}						\
+		(__ret) == 0;					\
+	})
+
+#define __Tassert(__expr)					\
+	({							\
+		int __ret = !!(__expr);				\
+		if (!__ret)					\
+			smokey_warning("FAILED: %s (=false)",	\
+				       __stringify(__expr));	\
+		__ret;						\
+	})
+
+#define __Fassert(__expr)					\
+	({							\
+		int __ret = (__expr);				\
+		if (__ret)					\
+			smokey_warning("FAILED: %s (=true)",	\
+				       __stringify(__expr));	\
+		!__ret;						\
+	})
+
+struct smokey_barrier {
+	pthread_mutex_t lock;
+	pthread_cond_t barrier;
+	int signaled;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void smokey_register_plugin(struct smokey_test *t);
+
+int smokey_int(const char *s, struct smokey_arg *arg);
+
+int smokey_bool(const char *s, struct smokey_arg *arg);
+
+int smokey_string(const char *s, struct smokey_arg *arg);
+
+int smokey_size(const char *s, struct smokey_arg *arg);
+
+struct smokey_arg *smokey_lookup_arg(struct smokey_test *t,
+				     const char *arg);
+
+int smokey_parse_args(struct smokey_test *t,
+		      int argc, char *const argv[]);
+
+void smokey_vatrace(const char *fmt, va_list ap);
+  
+void smokey_trace(const char *fmt, ...);
+
+void smokey_note(const char *fmt, ...);
+
+void __smokey_warning(const char *file, int lineno,
+		      const char *fmt, ...);
+
+int smokey_barrier_init(struct smokey_barrier *b);
+
+void smokey_barrier_destroy(struct smokey_barrier *b);
+
+int smokey_barrier_wait(struct smokey_barrier *b);
+
+int smokey_barrier_timedwait(struct smokey_barrier *b,
+			     struct timespec *ts);
+  
+void smokey_barrier_release(struct smokey_barrier *b);
+
+int smokey_fork_exec(const char *path, const char *arg);
+
+int smokey_modprobe(const char *name, bool silent);
+
+int smokey_rmmod(const char *name);
+
+#ifdef __cplusplus
+}
+#endif
+
+extern struct pvlistobj smokey_test_list;
+
+extern int smokey_keep_going;
+
+extern int smokey_verbose_mode;
+
+extern int smokey_on_vm;
+
+#endif /* _XENOMAI_SMOKEY_SMOKEY_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/Makefile.am b/kernel/xenomai-v3.2.4/include/trank/Makefile.am
new file mode 100644
index 0000000..69721d9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/Makefile.am
@@ -0,0 +1,10 @@
+includesubdir = $(includedir)/trank
+
+includesub_HEADERS = trank.h
+
+if XENO_COBALT
+includesub_HEADERS += rtdk.h
+SUBDIRS = posix native rtdm
+endif
+
+DIST_SUBDIRS = posix native rtdm
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/Makefile.am b/kernel/xenomai-v3.2.4/include/trank/native/Makefile.am
new file mode 100644
index 0000000..98dc104
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/Makefile.am
@@ -0,0 +1,16 @@
+includesubdir = $(includedir)/trank/native
+
+includesub_HEADERS =	\
+	alarm.h		\
+	buffer.h	\
+	cond.h		\
+	event.h		\
+	heap.h		\
+	misc.h		\
+	mutex.h		\
+	pipe.h		\
+	queue.h		\
+	sem.h		\
+	task.h		\
+	timer.h		\
+	types.h
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/alarm.h b/kernel/xenomai-v3.2.4/include/trank/native/alarm.h
new file mode 100644
index 0000000..688d625
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/alarm.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_ALARM_H
+#define _XENOMAI_TRANK_NATIVE_ALARM_H
+
+#include <trank/trank.h>
+#include <alchemy/alarm.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COMPAT_DECL(int, rt_alarm_create(RT_ALARM *alarm, const char *name));
+
+COMPAT_DECL(int, rt_alarm_wait(RT_ALARM *alarm));
+
+COMPAT_DECL(int, rt_alarm_delete(RT_ALARM *alarm));
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_TRANK_NATIVE_ALARM_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/buffer.h b/kernel/xenomai-v3.2.4/include/trank/native/buffer.h
new file mode 100644
index 0000000..b32affc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/buffer.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_BUFFER_H
+#define _XENOMAI_TRANK_NATIVE_BUFFER_H
+
+#include <alchemy/buffer.h>
+
+#endif /* _XENOMAI_TRANK_NATIVE_BUFFER_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/cond.h b/kernel/xenomai-v3.2.4/include/trank/native/cond.h
new file mode 100644
index 0000000..49e1e40
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/cond.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_COND_H
+#define _XENOMAI_TRANK_NATIVE_COND_H
+
+#include <alchemy/cond.h>
+
+#endif /* _XENOMAI_TRANK_NATIVE_COND_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/event.h b/kernel/xenomai-v3.2.4/include/trank/native/event.h
new file mode 100644
index 0000000..800cbca
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/event.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_EVENT_H
+#define _XENOMAI_TRANK_NATIVE_EVENT_H
+
+#include <alchemy/event.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COMPAT_DECL(int, rt_event_create(RT_EVENT *event, const char *name,
+				 unsigned long ivalue, int mode));
+
+COMPAT_DECL(int, rt_event_signal(RT_EVENT *event, unsigned long mask));
+
+COMPAT_DECL(int, rt_event_clear(RT_EVENT *event, unsigned long mask,
+				unsigned long *mask_r));
+
+#ifdef __XENO_COMPAT__
+
+static inline
+int rt_event_wait_until(RT_EVENT *event,
+			unsigned long mask, unsigned long *mask_r,
+			int mode, RTIME timeout)
+{
+	struct timespec ts;
+	unsigned int _mask;
+	int ret;
+
+	ret = rt_event_wait_timed(event, mask, &_mask, mode,
+				  alchemy_abs_timeout(timeout, &ts));
+	if (ret)
+		return ret;
+
+	*mask_r = _mask;
+
+	return 0;
+}
+
+static inline
+int rt_event_wait(RT_EVENT *event,
+		  unsigned long mask, unsigned long *mask_r,
+		  int mode, RTIME timeout)
+{
+	struct timespec ts;
+	unsigned int _mask;
+	int ret;
+
+	ret = rt_event_wait_timed(event, mask, &_mask, mode,
+				  alchemy_rel_timeout(timeout, &ts));
+	if (ret)
+		return ret;
+
+	*mask_r = _mask;
+
+	return 0;
+}
+
+#endif /* __XENO_COMPAT__ */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_TRANK_NATIVE_EVENT_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/heap.h b/kernel/xenomai-v3.2.4/include/trank/native/heap.h
new file mode 100644
index 0000000..8beed18
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/heap.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_HEAP_H
+#define _XENOMAI_TRANK_NATIVE_HEAP_H
+
+#include <alchemy/heap.h>
+
+#define H_MAPPABLE  0
+#define H_SHARED    0
+#define H_NONCACHED 0
+#define H_DMA	    0
+
+#endif /* _XENOMAI_TRANK_NATIVE_HEAP_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/misc.h b/kernel/xenomai-v3.2.4/include/trank/native/misc.h
new file mode 100644
index 0000000..ea242d2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/misc.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_MISC_H
+#define _XENOMAI_TRANK_NATIVE_MISC_H
+
+#include <stdint.h>
+#include <errno.h>
+#include <trank/trank.h>
+
+#define IORN_IOPORT  0
+#define IORN_IOMEM   0
+
+typedef struct rt_ioregion {
+} RT_IOREGION;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+__deprecated
+static inline int rt_io_get_region(RT_IOREGION *iorn,
+				   const char *name,
+				   uint64_t start,
+				   uint64_t len,
+				   int flags)
+{
+	trank_warning("service should be provided by a RTDM driver");
+	return -ENOSYS;
+}
+
+__deprecated
+int rt_io_put_region(RT_IOREGION *iorn)
+{
+	trank_warning("service should be provided by a RTDM driver");
+	return -ENOSYS;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_TRANK_NATIVE_MISC_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/mutex.h b/kernel/xenomai-v3.2.4/include/trank/native/mutex.h
new file mode 100644
index 0000000..9c8683b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/mutex.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_MUTEX_H
+#define _XENOMAI_TRANK_NATIVE_MUTEX_H
+
+#include <alchemy/mutex.h>
+
+#endif /* _XENOMAI_TRANK_NATIVE_MUTEX_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/pipe.h b/kernel/xenomai-v3.2.4/include/trank/native/pipe.h
new file mode 100644
index 0000000..42f56a5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/pipe.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_PIPE_H
+#define _XENOMAI_TRANK_NATIVE_PIPE_H
+
+#include <alchemy/pipe.h>
+#include <trank/trank.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+COMPAT_DECL(int, rt_pipe_create(RT_PIPE *pipe,
+				const char *name,
+				int minor, size_t poolsize));
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_TRANK_NATIVE_PIPE_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/queue.h b/kernel/xenomai-v3.2.4/include/trank/native/queue.h
new file mode 100644
index 0000000..d144a22
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/queue.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_QUEUE_H
+#define _XENOMAI_TRANK_NATIVE_QUEUE_H
+
+#include <alchemy/queue.h>
+
+#define Q_SHARED  0
+#define Q_DMA     0
+
+#endif /* _XENOMAI_TRANK_NATIVE_QUEUE_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/sem.h b/kernel/xenomai-v3.2.4/include/trank/native/sem.h
new file mode 100644
index 0000000..34494a4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/sem.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_SEM_H
+#define _XENOMAI_TRANK_NATIVE_SEM_H
+
+#include <alchemy/sem.h>
+
+#endif /* _XENOMAI_TRANK_NATIVE_SEM_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/task.h b/kernel/xenomai-v3.2.4/include/trank/native/task.h
new file mode 100644
index 0000000..85c3cd4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/task.h
@@ -0,0 +1,58 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_TASK_H
+#define _XENOMAI_TRANK_NATIVE_TASK_H
+
+#include <errno.h>
+#include <alchemy/task.h>
+#include <trank/trank.h>
+#include <trank/native/types.h>
+
+#define T_FPU    0
+#define T_NOSIG  0
+#define T_SUSP	 __THREAD_M_SPARE7
+
+/* bit #24 onward are otherwise unused. */
+#define T_CPU(cpu) (1 << (24 + (cpu & 7)))
+#define T_CPUMASK  0xff000000
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+__deprecated
+static inline int rt_task_notify(RT_TASK *task, rt_sigset_t sigs)
+{
+	trank_warning("in-kernel native API is gone, rebase over RTDM");
+	return -ENOSYS;
+}
+
+COMPAT_DECL(int, rt_task_create(RT_TASK *task, const char *name,
+				int stksize, int prio, int mode));
+
+COMPAT_DECL(int, rt_task_spawn(RT_TASK *task, const char *name,
+			       int stksize, int prio, int mode,
+			       void (*entry)(void *arg), void *arg));
+
+COMPAT_DECL(int, rt_task_set_periodic(RT_TASK *task,
+				      RTIME idate, RTIME period));
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_TRANK_NATIVE_TASK_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/timer.h b/kernel/xenomai-v3.2.4/include/trank/native/timer.h
new file mode 100644
index 0000000..9cb606d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/timer.h
@@ -0,0 +1,47 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_TIMER_H
+#define _XENOMAI_TRANK_NATIVE_TIMER_H
+
+#include <errno.h>
+#include <trank/trank.h>
+#include <alchemy/timer.h>
+
+#define TM_ONESHOT  0
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+__deprecated
+static inline int rt_timer_set_mode(RTIME nstick)
+{
+#ifdef CONFIG_XENO_LORES_CLOCK_DISABLED
+	if (nstick != TM_ONESHOT) {
+		trank_warning("start program with --alchemy-clock-resolution option instead");
+		return -ENODEV;
+	}
+#endif
+	return 0;
+}
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_TRANK_NATIVE_TIMER_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/native/types.h b/kernel/xenomai-v3.2.4/include/trank/native/types.h
new file mode 100644
index 0000000..f525c15
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/native/types.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_NATIVE_TYPES_H
+#define _XENOMAI_TRANK_NATIVE_TYPES_H
+
+#include <alchemy/timer.h>
+
+typedef unsigned long rt_sigset_t;
+
+#endif /* _XENOMAI_TRANK_NATIVE_TYPES_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/posix/Makefile.am b/kernel/xenomai-v3.2.4/include/trank/posix/Makefile.am
new file mode 100644
index 0000000..87b4500
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/posix/Makefile.am
@@ -0,0 +1,3 @@
+includesubdir = $(includedir)/trank/posix
+
+includesub_HEADERS = pthread.h
diff --git a/kernel/xenomai-v3.2.4/include/trank/posix/pthread.h b/kernel/xenomai-v3.2.4/include/trank/posix/pthread.h
new file mode 100644
index 0000000..a7364c5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/posix/pthread.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#pragma GCC system_header
+#include_next <pthread.h>
+
+#ifndef _XENOMAI_TRANK_POSIX_PTHREAD_H
+#define _XENOMAI_TRANK_POSIX_PTHREAD_H
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/**
+ * Set the mode of the current thread (compatibility service)
+ *
+ * This service is a non-portable extension of the POSIX interface.
+ *
+ * @param clrmask set of bits to be cleared;
+ *
+ * @param setmask set of bits to be set.
+ *
+ * @param mode_r If non-NULL, @a mode_r must be a pointer to a memory
+ * location which will be written upon success with the previous set
+ * of active mode bits. If NULL, the previous set of active mode bits
+ * will not be returned.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EINVAL, some bit in @a clrmask or @a setmask is invalid.
+ *
+ * @note Setting @a clrmask and @a setmask to zero leads to a nop,
+ * only returning the previous mode if @a mode_r is a valid address.
+ *
+ * @deprecated This service is an alias to pthread_setmode_np() for
+ * source compatibility with Xenomai 2.x.
+ */
+static inline int pthread_set_mode_np(int clrmask, int setmask,
+				      int *mask_r)
+{
+	return pthread_setmode_np(clrmask, setmask, mask_r);
+}
+
+/**
+ * Set a thread name (compatibility service)
+ *
+ * This service set to @a name, the name of @a thread. This name is
+ * used for displaying information in /proc/xenomai/sched.
+ *
+ * This service is a non-portable extension of the POSIX interface.
+ *
+ * @param thread target thread;
+ *
+ * @param name name of the thread.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a thread is invalid.
+ *
+ * @deprecated This service is an alias to pthread_setname_np() for
+ * source compatibility with Xenomai 2.x.
+ */
+static inline int pthread_set_name_np(pthread_t thread,
+				      const char *name)
+{
+	return pthread_setname_np(thread, name);
+}
+
+int pthread_make_periodic_np(pthread_t thread,
+			     struct timespec *starttp,
+			     struct timespec *periodtp);
+
+int pthread_wait_np(unsigned long *overruns_r);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_TRANK_POSIX_PTHREAD_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdk.h b/kernel/xenomai-v3.2.4/include/trank/rtdk.h
new file mode 100644
index 0000000..e8bb6d1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/rtdk.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_RTDK_H
+#define _XENOMAI_TRANK_RTDK_H
+
+#include <stdio.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+static inline void rt_print_auto_init(int enable)
+{
+	/* stdio support is automatically enabled by libcobalt. */
+}
+
+static inline void rt_print_cleanup(void) { }
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_TRANK_RTDK_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/Makefile.am b/kernel/xenomai-v3.2.4/include/trank/rtdm/Makefile.am
new file mode 100644
index 0000000..2eae0f6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/Makefile.am
@@ -0,0 +1,8 @@
+includesubdir = $(includedir)/trank/rtdm
+
+includesub_HEADERS =	\
+	rtcan.h		\
+	rtdm.h		\
+	rtipc.h		\
+	rtserial.h	\
+	rttesting.h
diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rtcan.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtcan.h
new file mode 100644
index 0000000..e87e5e1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtcan.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_RTDM_RTCAN_H
+#define _XENOMAI_TRANK_RTDM_RTCAN_H
+
+#include <rtdm/can.h>
+
+#endif /* _XENOMAI_TRANK_RTDM_RTCAN_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rtdm.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtdm.h
new file mode 100644
index 0000000..80d874a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtdm.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_RTDM_RTDM_H
+#define _XENOMAI_TRANK_RTDM_RTDM_H
+
+#include_next <rtdm/rtdm.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+#ifndef RTDM_NO_DEFAULT_USER_API
+
+#define rt_dev_call(__call, __args...)	\
+({					\
+	int __ret;			\
+	__ret = __RT(__call(__args));	\
+	__ret < 0 ? -errno : __ret;	\
+})
+
+#define rt_dev_open(__args...)		rt_dev_call(open, __args)
+#define rt_dev_ioctl(__args...)		rt_dev_call(ioctl, __args)
+#define rt_dev_recvfrom(__args...)	rt_dev_call(recvfrom, __args)
+
+static inline int rt_dev_socket(int domain, int type, int protocol)
+{
+	return rt_dev_call(socket, domain, type, protocol);
+}
+
+static inline ssize_t rt_dev_recvmsg(int fd, struct msghdr *msg, int flags)
+{
+	return rt_dev_call(recvmsg, fd, msg, flags);
+}
+
+static inline ssize_t rt_dev_recv(int fd, void *buf, size_t len, int flags)
+{
+	return rt_dev_call(recvfrom, fd, buf, len, flags, NULL, NULL);
+}
+
+static inline ssize_t rt_dev_sendmsg(int fd, const struct msghdr *msg, int flags)
+{
+	return rt_dev_call(sendmsg, fd, msg, flags);
+}
+
+static inline ssize_t rt_dev_sendto(int fd, const void *buf, size_t len,
+				    int flags, const struct sockaddr *to,
+				    socklen_t tolen)
+{
+	struct iovec iov;
+	struct msghdr msg;
+
+	iov.iov_base = (void *)buf;
+	iov.iov_len = len;
+
+	msg.msg_name = (struct sockaddr *)to;
+	msg.msg_namelen = tolen;
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_control = NULL;
+	msg.msg_controllen = 0;
+
+	return rt_dev_call(sendmsg, fd, &msg, flags);
+}
+
+static inline ssize_t rt_dev_send(int fd, const void *buf, size_t len,
+				  int flags)
+{
+	return rt_dev_call(sendto, fd, buf, len, flags, NULL, 0);
+}
+
+static inline int rt_dev_getsockopt(int fd, int level, int optname,
+				    void *optval, socklen_t *optlen)
+{
+	struct _rtdm_getsockopt_args args = {
+		level, optname, optval, optlen
+	};
+
+	return rt_dev_call(ioctl, fd, _RTIOC_GETSOCKOPT, &args);
+}
+
+static inline int rt_dev_setsockopt(int fd, int level, int optname,
+				    const void *optval, socklen_t optlen)
+{
+	struct _rtdm_setsockopt_args args = {
+		level, optname, (void *)optval, optlen
+	};
+
+	return rt_dev_call(ioctl, fd, _RTIOC_SETSOCKOPT, &args);
+}
+
+static inline int rt_dev_bind(int fd, const struct sockaddr *my_addr,
+			      socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = { my_addr, addrlen };
+
+	return rt_dev_call(ioctl, fd, _RTIOC_BIND, &args);
+}
+
+static inline int rt_dev_connect(int fd, const struct sockaddr *serv_addr,
+				 socklen_t addrlen)
+{
+	struct _rtdm_setsockaddr_args args = { serv_addr, addrlen };
+
+	return rt_dev_call(ioctl, fd, _RTIOC_CONNECT, &args);
+}
+
+static inline int rt_dev_listen(int fd, int backlog)
+{
+	return rt_dev_call(ioctl, fd, _RTIOC_LISTEN, backlog);
+}
+
+static inline int rt_dev_accept(int fd, struct sockaddr *addr,
+				socklen_t *addrlen)
+{
+	struct _rtdm_getsockaddr_args args = { addr, addrlen };
+
+	return rt_dev_call(ioctl, fd, _RTIOC_ACCEPT, &args);
+}
+
+static inline int rt_dev_getsockname(int fd, struct sockaddr *name,
+				     socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = { name, namelen };
+
+	return rt_dev_call(ioctl, fd, _RTIOC_GETSOCKNAME, &args);
+}
+
+static inline int rt_dev_getpeername(int fd, struct sockaddr *name,
+				     socklen_t *namelen)
+{
+	struct _rtdm_getsockaddr_args args = { name, namelen };
+
+	return rt_dev_call(ioctl, fd, _RTIOC_GETPEERNAME, &args);
+}
+
+static inline int rt_dev_shutdown(int fd, int how)
+{
+	return rt_dev_call(ioctl, fd, _RTIOC_SHUTDOWN, how);
+}
+
+static inline int rt_dev_close(int fd)
+{
+	return rt_dev_call(close, fd);
+}
+
+static inline ssize_t rt_dev_write(int fd, const void *buf, size_t len)
+{
+	return rt_dev_call(write, fd, buf, len);
+}
+
+static inline ssize_t rt_dev_read(int fd, void *buf, size_t len)
+{
+	return rt_dev_call(read, fd, buf, len);
+}
+
+#endif /* !RTDM_NO_DEFAULT_USER_API */
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_TRANK_RTDM_RTDM_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rtipc.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtipc.h
new file mode 100644
index 0000000..4e626f8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtipc.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_RTDM_RTIPC_H
+#define _XENOMAI_TRANK_RTDM_RTIPC_H
+
+#include <rtdm/ipc.h>
+
+#endif /* _XENOMAI_TRANK_RTDM_RTIPC_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rtserial.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtserial.h
new file mode 100644
index 0000000..7f2a7fb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rtserial.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_RTDM_RTSERIAL_H
+#define _XENOMAI_TRANK_RTDM_RTSERIAL_H
+
+#include <rtdm/serial.h>
+
+#endif /* _XENOMAI_TRANK_RTDM_RTSERIAL_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/rtdm/rttesting.h b/kernel/xenomai-v3.2.4/include/trank/rtdm/rttesting.h
new file mode 100644
index 0000000..b774fd0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/rtdm/rttesting.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_RTDM_RTTESTING_H
+#define _XENOMAI_TRANK_RTDM_RTTESTING_H
+
+#include <rtdm/testing.h>
+
+#endif /* _XENOMAI_TRANK_RTDM_RTTESTING_H */
diff --git a/kernel/xenomai-v3.2.4/include/trank/trank.h b/kernel/xenomai-v3.2.4/include/trank/trank.h
new file mode 100644
index 0000000..cc68837
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/trank/trank.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TRANK_TRANK_H
+#define _XENOMAI_TRANK_TRANK_H
+
+#include <boilerplate/compiler.h>
+
+#ifdef __XENO_COMPAT__
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void warning(const char *fmt, ...);
+
+#ifdef __cplusplus
+}
+#endif
+
+#define trank_warning(__fmt, __args...)	\
+	warning("%s: " __fmt, __func__, ##__args)
+
+#define __CURRENT(call)		__current_ ## call
+
+#define COMPAT_DECL(T, P)	__typeof__(T) P
+#define CURRENT_DECL(T, P)	__typeof__(T) __CURRENT(P)
+
+#else /* !__XENO_COMPAT__ */
+
+#define __CURRENT(call)		call
+
+#define COMPAT_DECL(T, P)
+#define CURRENT_DECL(T, P)	__typeof__(T) P;	\
+				__typeof__(T) __current_ ## P
+
+#define CURRENT_IMPL(T, I, A)		\
+__typeof__(T) I A __attribute__((alias("__current_" __stringify(I)), weak)); \
+__typeof__(T) __current_ ## I A
+
+#endif /* !__XENO_COMPAT__ */
+
+#endif /* _XENOMAI_TRANK_TRANK_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/Makefile.am b/kernel/xenomai-v3.2.4/include/vxworks/Makefile.am
new file mode 100644
index 0000000..849fbdf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/Makefile.am
@@ -0,0 +1,18 @@
+includesubdir = $(includedir)/vxworks
+
+includesub_HEADERS =	\
+	errnoLib.h	\
+	intLib.h	\
+	kernLib.h	\
+	lstLib.h	\
+	memPartLib.h	\
+	msgQLib.h	\
+	rngLib.h	\
+	semLib.h	\
+	sysLib.h	\
+	taskHookLib.h	\
+	taskInfo.h	\
+	taskLib.h	\
+	tickLib.h	\
+	types.h		\
+	wdLib.h
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/errnoLib.h b/kernel/xenomai-v3.2.4/include/vxworks/errnoLib.h
new file mode 100644
index 0000000..f434927
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/errnoLib.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_ERRNOLIB_H
+#define _XENOMAI_VXWORKS_ERRNOLIB_H
+
+#include <errno.h>
+#include <vxworks/taskLib.h>
+
+#define OK	0
+#define ERROR	(-1)
+
+#define WIND_TASK_ERR_BASE	0x00030000
+#define WIND_MEM_ERR_BASE	0x00110000
+#define WIND_SEM_ERR_BASE	0x00160000
+#define WIND_OBJ_ERR_BASE	0x003d0000
+#define WIND_MSGQ_ERR_BASE	0x00410000
+#define WIND_INT_ERR_BASE	0x00430000
+
+#define S_objLib_OBJ_ID_ERROR		(WIND_OBJ_ERR_BASE + 0x0001)
+#define S_objLib_OBJ_UNAVAILABLE	(WIND_OBJ_ERR_BASE + 0x0002)
+#define S_objLib_OBJ_DELETED		(WIND_OBJ_ERR_BASE + 0x0003)
+#define S_objLib_OBJ_TIMEOUT		(WIND_OBJ_ERR_BASE + 0x0004)
+#define S_objLib_OBJ_NO_METHOD		(WIND_OBJ_ERR_BASE + 0x0005)
+
+#define S_taskLib_NAME_NOT_FOUND	(WIND_TASK_ERR_BASE + 0x0065)
+#define S_taskLib_TASK_HOOK_TABLE_FULL	(WIND_TASK_ERR_BASE + 0x0066)
+#define S_taskLib_TASK_HOOK_NOT_FOUND	(WIND_TASK_ERR_BASE + 0x0067)
+#define S_taskLib_ILLEGAL_PRIORITY	(WIND_TASK_ERR_BASE + 0x006d)
+
+#define S_semLib_INVALID_STATE		(WIND_SEM_ERR_BASE + 0x0065)
+#define S_semLib_INVALID_OPTION		(WIND_SEM_ERR_BASE + 0x0066)
+#define S_semLib_INVALID_QUEUE_TYPE	(WIND_SEM_ERR_BASE + 0x0067)
+#define S_semLib_INVALID_OPERATION	(WIND_SEM_ERR_BASE + 0x0068)
+
+#define S_msgQLib_INVALID_MSG_LENGTH		(WIND_MSGQ_ERR_BASE + 0x0001)
+#define S_msgQLib_NON_ZERO_TIMEOUT_AT_INT_LEVEL	(WIND_MSGQ_ERR_BASE + 0x0002)
+#define S_msgQLib_INVALID_QUEUE_TYPE		(WIND_MSGQ_ERR_BASE + 0x0003)
+
+#define S_intLib_NOT_ISR_CALLABLE	(WIND_INT_ERR_BASE + 0x0001)
+
+#define S_memLib_NOT_ENOUGH_MEMORY	(WIND_MEM_ERR_BASE + 0x0001)
+#define S_memLib_INVALID_NBYTES		(WIND_MEM_ERR_BASE + 0x0002)
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void printErrno(int status);
+
+STATUS errnoSet(int status);
+
+int errnoGet(void);
+
+int errnoOfTaskGet(TASK_ID task_id);
+
+STATUS errnoOfTaskSet(TASK_ID task_id, int status);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_ERRNOLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/intLib.h b/kernel/xenomai-v3.2.4/include/vxworks/intLib.h
new file mode 100644
index 0000000..9fd0e76
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/intLib.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_INTLIB_H
+#define _XENOMAI_VXWORKS_INTLIB_H
+
+#include <vxworks/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+BOOL intContext(void);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_INTLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/kernLib.h b/kernel/xenomai-v3.2.4/include/vxworks/kernLib.h
new file mode 100644
index 0000000..dd9d5d3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/kernLib.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_KERNLIB_H
+#define _XENOMAI_VXWORKS_KERNLIB_H
+
+#include <vxworks/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+STATUS kernelTimeSlice(int ticks);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_KERNLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/lstLib.h b/kernel/xenomai-v3.2.4/include/vxworks/lstLib.h
new file mode 100644
index 0000000..9644ca5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/lstLib.h
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_LSTLIB_H
+#define _XENOMAI_VXWORKS_LSTLIB_H
+
+#include <boilerplate/list.h>
+#include <vxworks/types.h>
+
+typedef struct LIST {
+	struct pvlistobj list;
+	int count;
+} LIST;
+
+typedef struct NODE {
+	struct pvholder link;
+	struct LIST *list;
+} NODE;
+
+static inline void lstInit(LIST *l)
+{
+	pvlist_init(&l->list);
+	l->count = 0;
+}
+
+static inline void lstAdd(LIST *l, NODE *n)
+{
+	pvholder_init(&n->link);
+	pvlist_append(&n->link, &l->list);
+	n->list = l;
+	l->count++;
+}
+
+static inline int lstCount(LIST *l)
+{
+	return l->count;
+}
+
+static inline void lstDelete(LIST *l, NODE *n)
+{
+	pvlist_remove(&n->link);
+	n->list = NULL;
+	l->count--;
+}
+
+static inline NODE *lstFirst(LIST *l)
+{
+	if (l == NULL || pvlist_empty(&l->list))
+		return NULL;
+
+	return pvlist_first_entry(&l->list, struct NODE, link);
+}
+
+static inline NODE *lstGet(LIST *l)
+{
+	struct NODE *n;
+
+	if (l == NULL || pvlist_empty(&l->list))
+		return NULL;
+
+	n = pvlist_pop_entry(&l->list, struct NODE, link);
+	n->list = NULL;
+	l->count--;
+
+	return n;
+}
+
+static inline void lstInsert(LIST *l, NODE *nprev, NODE *n)
+{
+	pvholder_init(&n->link);
+
+	if (nprev == NULL)
+		pvlist_prepend(&n->link, &l->list);
+	else
+		pvlist_insert(&n->link, &nprev->link);
+
+	n->list = l;
+	l->count++;
+}
+
+static inline NODE *lstLast(LIST *l)
+{
+	if (l == NULL || pvlist_empty(&l->list))
+		return NULL;
+
+	return pvlist_last_entry(&l->list, struct NODE, link);
+}
+
+static inline NODE *lstNext(NODE *n)
+{
+	if (n->list == NULL || &n->link == n->list->list.head.prev)
+		return NULL;
+
+	return container_of(n->link.next, struct NODE, link);
+}
+
+static inline NODE *lstPrevious(NODE *n)
+{
+	if (n->list == NULL || &n->link == n->list->list.head.next)
+		return NULL;
+
+	return container_of(n->link.prev, struct NODE, link);
+}
+
+static inline void lstFree(LIST *l)
+{
+	lstInit(l);
+}
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void lstExtract(LIST *lsrc, NODE *nstart, NODE *nend, LIST *ldst);
+
+NODE *lstNth(LIST *l, int nodenum);
+
+NODE *lstNStep(NODE *n, int steps);
+
+int lstFind(LIST *l, NODE *n);
+
+void lstConcat(LIST *ldst, LIST *lsrc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_LSTLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/memPartLib.h b/kernel/xenomai-v3.2.4/include/vxworks/memPartLib.h
new file mode 100644
index 0000000..139714c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/memPartLib.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_MEMPARTLIB_H
+#define _XENOMAI_VXWORKS_MEMPARTLIB_H
+
+#include <vxworks/types.h>
+
+typedef uintptr_t PART_ID;
+
+struct wind_part_stats {
+	unsigned long numBytesFree;
+	unsigned long numBlocksFree;
+	unsigned long numBytesAlloc;
+	unsigned long numBlocksAlloc;
+	unsigned long maxBytesAlloc;
+};
+
+typedef struct wind_part_stats MEM_PART_STATS;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+PART_ID memPartCreate(char *pPool, unsigned int poolSize);
+
+STATUS memPartAddToPool(PART_ID  partId,
+			char *pPool, unsigned int poolSize);
+
+void *memPartAlignedAlloc(PART_ID  partId,
+			  unsigned int nBytes, unsigned int alignment);
+
+void *memPartAlloc(PART_ID  partId, unsigned int nBytes);
+
+STATUS memPartFree(PART_ID partId, char *pBlock);
+
+void memAddToPool(char *pPool, unsigned int poolSize);
+
+STATUS memPartInfoGet(PART_ID partId,
+		      MEM_PART_STATS *ppartStats);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_MEMPARTLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/msgQLib.h b/kernel/xenomai-v3.2.4/include/vxworks/msgQLib.h
new file mode 100644
index 0000000..e8e44a6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/msgQLib.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_MSGQLIB_H
+#define _XENOMAI_VXWORKS_MSGQLIB_H
+
+#include <vxworks/types.h>
+
+typedef uintptr_t MSG_Q_ID;
+
+#define MSG_PRI_NORMAL   0
+#define MSG_PRI_URGENT   1
+
+#define MSG_Q_FIFO       0x0
+#define MSG_Q_PRIORITY   0x1
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+MSG_Q_ID msgQCreate(int maxMsgs, int maxMsgLength, int options);
+
+STATUS msgQDelete(MSG_Q_ID msgQId);
+
+int msgQNumMsgs(MSG_Q_ID msgQId);
+
+int msgQReceive(MSG_Q_ID msgQId, char *buf, UINT bytes, int timeout);
+
+STATUS msgQSend(MSG_Q_ID msgQId, const char *buf, UINT bytes,
+		int timeout, int prio);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_MSGQLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/rngLib.h b/kernel/xenomai-v3.2.4/include/vxworks/rngLib.h
new file mode 100644
index 0000000..561358d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/rngLib.h
@@ -0,0 +1,61 @@
+/*
+ *  * Copyright (C) 2008 Niklaus Giger <niklaus.giger@member.fsf.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_RNGLIB_H
+#define _XENOMAI_VXWORKS_RNGLIB_H
+
+#include <vxworks/types.h>
+
+typedef uintptr_t RING_ID;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+RING_ID rngCreate(int nbytes);
+
+void rngDelete(RING_ID ringId);
+
+void rngFlush(RING_ID ringId);
+
+int rngBufGet(RING_ID rngId, char *buffer, int maxbytes);
+
+int rngBufPut(RING_ID rngId, char *buffer, int nbytes);
+
+BOOL rngIsEmpty(RING_ID ringId);
+
+BOOL rngIsFull(RING_ID ringId);
+
+int rngFreeBytes(RING_ID ringId);
+
+int rngNBytes(RING_ID ringId);
+
+void rngPutAhead(RING_ID ringId, char byte, int offset);
+
+void rngMoveAhead(RING_ID ringId, int n);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif	/* !_XENOMAI_VXWORKS_RNGLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/semLib.h b/kernel/xenomai-v3.2.4/include/vxworks/semLib.h
new file mode 100644
index 0000000..1ff9f97
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/semLib.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_SEMLIB_H
+#define _XENOMAI_VXWORKS_SEMLIB_H
+
+#include <vxworks/types.h>
+
+#define SEM_Q_FIFO           0x0
+#define SEM_Q_PRIORITY       0x1
+#define SEM_DELETE_SAFE      0x4
+#define SEM_INVERSION_SAFE   0x8
+
+typedef uintptr_t SEM_ID;
+
+typedef enum {
+    SEM_EMPTY =0,
+    SEM_FULL
+} SEM_B_STATE;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+STATUS semGive(SEM_ID sem_id);
+
+STATUS semTake(SEM_ID sem_id, int timeout);
+
+STATUS semFlush(SEM_ID sem_id);
+
+STATUS semDelete(SEM_ID sem_id);
+
+SEM_ID semBCreate(int flags, SEM_B_STATE state);
+
+SEM_ID semMCreate(int flags);
+
+SEM_ID semCCreate(int flags, int count);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_SEMLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/sysLib.h b/kernel/xenomai-v3.2.4/include/vxworks/sysLib.h
new file mode 100644
index 0000000..caa7e03
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/sysLib.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_SYSLIB_H
+#define _XENOMAI_VXWORKS_SYSLIB_H
+
+#include <vxworks/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+int sysClkRateGet(void);
+
+STATUS sysClkRateSet(int hz);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_SYSLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/taskHookLib.h b/kernel/xenomai-v3.2.4/include/vxworks/taskHookLib.h
new file mode 100644
index 0000000..132786e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/taskHookLib.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_TASKHOOKLIB_H
+#define _XENOMAI_VXWORKS_TASKHOOKLIB_H
+
+#include <vxworks/types.h>
+#include <vxworks/taskLib.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+STATUS taskCreateHookAdd(FUNCPTR createHook);
+
+STATUS taskCreateHookDelete(FUNCPTR createHook);
+
+STATUS taskDeleteHookAdd(FUNCPTR deleteHook);
+
+STATUS taskDeleteHookDelete(FUNCPTR deleteHook);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_TASKHOOKLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/taskInfo.h b/kernel/xenomai-v3.2.4/include/vxworks/taskInfo.h
new file mode 100644
index 0000000..0416efc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/taskInfo.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_TASKINFO_H
+#define _XENOMAI_VXWORKS_TASKINFO_H
+
+#include <vxworks/types.h>
+#include <vxworks/taskLib.h>
+
+typedef struct TASK_DESC {
+	TASK_ID td_tid;
+	int	td_priority;
+	int	td_status;
+	int	td_flags;
+	char	td_name[32];
+	FUNCPTR	td_entry;
+	int	td_errorStatus;
+
+	int	td_stacksize;
+	char	*td_pStackBase;
+	char	*td_pStackEnd;
+} TASK_DESC;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+const char *taskName(TASK_ID task_id);
+
+TASK_ID taskNameToId(const char *name);
+
+TASK_ID taskIdDefault(TASK_ID task_id);
+
+BOOL taskIsReady(TASK_ID task_id);
+
+BOOL taskIsSuspended (TASK_ID task_id);
+
+STATUS taskGetInfo(TASK_ID task_id, TASK_DESC *desc);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_TASKINFO_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/taskLib.h b/kernel/xenomai-v3.2.4/include/vxworks/taskLib.h
new file mode 100644
index 0000000..ebbe7c6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/taskLib.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_TASKLIB_H
+#define _XENOMAI_VXWORKS_TASKLIB_H
+
+#include <pthread.h>
+#include <vxworks/types.h>
+
+/* Task options: none applicable - only there for code compatibility. */
+#define VX_UNBREAKABLE   0x0002
+#define VX_FP_TASK       0x0008
+#define VX_PRIVATE_ENV   0x0080
+#define VX_NO_STACK_FILL 0x0100
+
+#define WIND_READY	0x0
+#define WIND_SUSPEND	0x1
+#define WIND_PEND	0x2
+#define WIND_DELAY	0x4
+#define WIND_DEAD	0x8
+#define WIND_STOP	0x10	/* Never reported. */
+
+typedef uintptr_t TASK_ID;
+
+typedef void (*FUNCPTR)(long arg, ...);
+
+typedef struct WIND_TCB {
+	void *opaque;
+	int status;
+	int safeCnt;
+	int flags;
+	FUNCPTR entry;
+} WIND_TCB;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+TASK_ID taskSpawn(const char *name,
+		  int prio,
+		  int flags,
+		  int stacksize,
+		  FUNCPTR entry,
+		  long arg0, long arg1, long arg2, long arg3, long arg4,
+		  long arg5, long arg6, long arg7, long arg8, long arg9);
+
+STATUS taskInit(WIND_TCB *pTcb,
+		const char *name,
+		int prio,
+		int flags,
+		char * stack __attribute__ ((unused)),
+		int stacksize,
+		FUNCPTR entry,
+		long arg0, long arg1, long arg2, long arg3, long arg4,
+		long arg5, long arg6, long arg7, long arg8, long arg9);
+
+STATUS taskActivate(TASK_ID tid);
+
+STATUS taskDelete(TASK_ID tid);
+
+STATUS taskDeleteForce(TASK_ID tid);
+
+STATUS taskSuspend(TASK_ID tid);
+
+STATUS taskResume(TASK_ID tid);
+
+STATUS taskPrioritySet(TASK_ID tid,
+		       int prio);
+
+STATUS taskPriorityGet(TASK_ID tid,
+		       int *pprio);
+
+void taskExit(int code);
+
+STATUS taskLock(void);
+
+STATUS taskUnlock(void);
+
+TASK_ID taskIdSelf(void);
+
+STATUS taskSafe(void);
+
+STATUS taskUnsafe(void);
+
+STATUS taskDelay(int ticks);
+
+STATUS taskIdVerify(TASK_ID tid);
+
+struct WIND_TCB *taskTcb(TASK_ID tid);
+
+int wind_task_normalize_priority(int wind_prio);
+
+int wind_task_denormalize_priority(int core_prio);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_TASKLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/tickLib.h b/kernel/xenomai-v3.2.4/include/vxworks/tickLib.h
new file mode 100644
index 0000000..52c96f0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/tickLib.h
@@ -0,0 +1,41 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_TICKLIB_H
+#define _XENOMAI_VXWORKS_TICKLIB_H
+
+#include <vxworks/types.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+ULONG tickGet(void);
+
+void tickSet(ULONG ticks);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_TICKLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/types.h b/kernel/xenomai-v3.2.4/include/vxworks/types.h
new file mode 100644
index 0000000..0790831
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/types.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_TYPES_H
+#define _XENOMAI_VXWORKS_TYPES_H
+
+#include <stdint.h>
+
+#undef STATUS
+typedef int STATUS;
+typedef int BOOL;
+
+typedef unsigned int UINT;
+
+typedef unsigned long ULONG;
+
+#define NO_WAIT		0
+#define WAIT_FOREVER	(-1)
+
+#endif /* !_XENOMAI_VXWORKS_TYPES_H */
diff --git a/kernel/xenomai-v3.2.4/include/vxworks/wdLib.h b/kernel/xenomai-v3.2.4/include/vxworks/wdLib.h
new file mode 100644
index 0000000..bbe76aa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/vxworks/wdLib.h
@@ -0,0 +1,50 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This file satisfies the references within the emulator code
+ * mimicking a VxWorks-like API built upon the copperplate library.
+ *
+ * VxWorks is a registered trademark of Wind River Systems, Inc.
+ */
+
+#ifndef _XENOMAI_VXWORKS_WDLIB_H
+#define _XENOMAI_VXWORKS_WDLIB_H
+
+#include <vxworks/types.h>
+
+typedef uintptr_t WDOG_ID;
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+WDOG_ID wdCreate(void);
+
+STATUS wdDelete(WDOG_ID wdog_id);
+
+STATUS wdStart(WDOG_ID wdog_id,
+	       int delay,
+	       void (*handler)(long),
+	       long arg);
+
+STATUS wdCancel(WDOG_ID wdog_id);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* !_XENOMAI_VXWORKS_WDLIB_H */
diff --git a/kernel/xenomai-v3.2.4/include/xenomai/Makefile.am b/kernel/xenomai-v3.2.4/include/xenomai/Makefile.am
new file mode 100644
index 0000000..f4d0c16
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/xenomai/Makefile.am
@@ -0,0 +1,6 @@
+includesubdir = $(includedir)/xenomai
+
+includesub_HEADERS =	\
+	init.h		\
+	tunables.h	\
+	version.h
diff --git a/kernel/xenomai-v3.2.4/include/xenomai/init.h b/kernel/xenomai-v3.2.4/include/xenomai/init.h
new file mode 100644
index 0000000..598bf53
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/xenomai/init.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_INIT_H
+#define _XENOMAI_INIT_H
+
+#include <boilerplate/setup.h>
+#include <boilerplate/ancillaries.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void xenomai_init(int *argcp, char *const **argvp);
+
+void xenomai_init_dso(int *argcp, char *const **argvp);
+
+int xenomai_main(int argc, char *const argv[]);
+
+void xenomai_usage(void);
+
+void application_usage(void);
+
+void application_version(void);
+
+extern const char *xenomai_version_string;
+
+extern const int xenomai_auto_bootstrap;
+  
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _XENOMAI_INIT_H */
diff --git a/kernel/xenomai-v3.2.4/include/xenomai/tunables.h b/kernel/xenomai-v3.2.4/include/xenomai/tunables.h
new file mode 100644
index 0000000..9eebc22
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/xenomai/tunables.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_TUNABLES_H
+#define _XENOMAI_TUNABLES_H
+
+#include <boilerplate/tunables.h>
+#include <copperplate/tunables.h>
+
+#endif /* !_XENOMAI_TUNABLES_H */
diff --git a/kernel/xenomai-v3.2.4/include/xenomai/version.h b/kernel/xenomai-v3.2.4/include/xenomai/version.h
new file mode 100644
index 0000000..bf603be
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/include/xenomai/version.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _XENOMAI_VERSION_H
+#define _XENOMAI_VERSION_H
+
+#ifndef __KERNEL__
+#include <xeno_config.h>
+#include <boilerplate/compiler.h>
+#endif
+
+#define XENO_VERSION(maj, min, rev)  (((maj)<<16)|((min)<<8)|(rev))
+
+#define XENO_VERSION_CODE	XENO_VERSION(CONFIG_XENO_VERSION_MAJOR,	\
+					     CONFIG_XENO_VERSION_MINOR,	\
+					     CONFIG_XENO_REVISION_LEVEL)
+
+#define XENO_VERSION_STRING	CONFIG_XENO_VERSION_STRING
+
+#endif /* _XENOMAI_VERSION_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/COPYING b/kernel/xenomai-v3.2.4/kernel/cobalt/COPYING
new file mode 100644
index 0000000..0d72637
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/COPYING
@@ -0,0 +1,281 @@
+
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig
new file mode 100644
index 0000000..3233de1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/Kconfig
@@ -0,0 +1,489 @@
+menu "Core features"
+
+config XENO_OPT_SCHED_CLASSES
+	bool "Extra scheduling classes"
+	default n
+	help
+	The Cobalt kernel implements a set of scheduling classes.
+	Each scheduling class defines its own set of rules for
+	determining when and how to select a new thread to run.
+
+	Cobalt has a built-in real-time class, which supports both
+	preemptive fixed-priority FIFO, and round-robin scheduling.
+
+	Enabling CONFIG_XENO_OPT_SCHED_CLASSES allows you to select
+	additional scheduling classes to enable in the Cobalt kernel.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_WEAK
+	bool "Weak scheduling class"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option creates a Cobalt scheduling class for mapping
+	members of the regular POSIX SCHED_FIFO/RR policies to a low
+	priority class of the Cobalt kernel, providing no real-time
+	guarantee. Therefore, up to a hundred non real-time priority
+	levels are available from the SCHED_WEAK policy.
+
+	When CONFIG_XENO_OPT_SCHED_WEAK is disabled, Cobalt still
+	supports a single non real-time priority level (i.e. zero
+	priority), assigned to members of the SCHED_OTHER class.
+
+	SCHED_WEAK/SCHED_OTHER threads can access Cobalt resources,
+	wait on Cobalt synchronization objects, but cannot compete for
+	the CPU with members of the real-time Cobalt classes.
+
+	Since Cobalt assumes no real-time requirement for
+	SCHED_WEAK/SCHED_OTHER threads, they are automatically moved
+	back to secondary mode upon return from any Cobalt syscall if
+	necessary, unless they hold a Cobalt mutex, which would defer
+	the transition until such mutex is released.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_TP
+	bool "Temporal partitioning"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option enables support for temporal partitioning.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_TP_NRPART
+	int "Number of partitions"
+	default 4
+	range 1 1024
+	depends on XENO_OPT_SCHED_TP
+	help
+	Define here the maximum number of temporal partitions the TP
+	scheduler may have to handle.
+
+config XENO_OPT_SCHED_SPORADIC
+	bool "Sporadic scheduling"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option enables support for the sporadic scheduling policy
+	in the Cobalt kernel (SCHED_SPORADIC), also known as POSIX
+	sporadic server.
+
+	It can be used to enforce a capped limit on the execution time
+	of a thread within a given period of time.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_SPORADIC_MAXREPL
+	int "Maximum number of pending replenishments"
+	default 8
+	range 4 16
+	depends on XENO_OPT_SCHED_SPORADIC
+	help
+	For performance reason, the budget replenishment information
+	is statically stored on a per-thread basis. This parameter
+	defines the maximum number of replenishment requests that can
+	be pending concurrently for any given thread that undergoes
+	sporadic scheduling (system minimum is 4).
+
+config XENO_OPT_SCHED_QUOTA
+	bool "Thread groups with runtime quota"
+	default n
+	depends on XENO_OPT_SCHED_CLASSES
+	help
+	This option enables the SCHED_QUOTA scheduling policy in the
+	Cobalt kernel.
+
+	This policy enforces a limitation on the CPU consumption of
+	threads over a globally defined period, known as the quota
+	interval. This is done by pooling threads with common
+	requirements in groups, and giving each group a share of the
+	global period (see CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).
+
+	When threads have entirely consumed the quota allotted to the
+	group they belong to, the latter is suspended as a whole,
+	until the next quota interval starts. At this point, a new
+	runtime budget is given to each group, in accordance with its
+	share.
+
+	If in doubt, say N.
+
+config XENO_OPT_SCHED_QUOTA_PERIOD
+	int "Quota interval (us)"
+	default 10000
+	range 100 1000000000
+	depends on XENO_OPT_SCHED_QUOTA
+	help
+	The global period thread groups can get a share of.
+
+config XENO_OPT_SCHED_QUOTA_NR_GROUPS
+	int "Number of thread groups"
+	default 32
+	range 1 1024
+	depends on XENO_OPT_SCHED_QUOTA
+	help
+	The overall number of thread groups which may be defined
+	across all CPUs.
+
+config XENO_OPT_STATS
+	bool "Runtime statistics"
+	depends on XENO_OPT_VFILE
+	default y
+	help
+	This option causes the Cobalt kernel to collect various
+	per-thread runtime statistics, which are accessible through
+	the /proc/xenomai/sched/stat interface.
+
+config XENO_OPT_STATS_IRQS
+	bool "Account IRQ handlers separatly"
+	depends on XENO_OPT_STATS && IPIPE
+	default y
+	help
+	When enabled, the runtime of interrupt handlers is accounted
+	separately from the threads they interrupt. Also, the
+	occurrence of shared interrupts is accounted on a per-handler
+	basis.
+
+	This option is available to legacy I-pipe builds only.
+
+config XENO_OPT_SHIRQ
+	bool "Shared interrupts"
+	help
+	Enables support for both level- and edge-triggered shared
+	interrupts, so that multiple real-time interrupt handlers
+	are allowed to control dedicated hardware devices which are
+	configured to share the same interrupt line.
+
+config XENO_OPT_RR_QUANTUM
+	int "Round-robin quantum (us)"
+	default 1000
+	help
+	This parameter defines the duration of the default round-robin
+	time quantum expressed as a count of micro-seconds. This value
+	may be overriden internally by Cobalt services which do
+	provide a round-robin interval.
+
+config XENO_OPT_AUTOTUNE
+        tristate "Auto-tuning"
+        default y
+	select XENO_DRIVERS_AUTOTUNE
+        help
+	Enable auto-tuning capabilities. Auto-tuning is used for
+	adjusting the core timing services to the intrinsic latency of
+	the platform.
+
+config XENO_OPT_SCALABLE_SCHED
+	bool "O(1) scheduler"
+	help
+	This option causes a multi-level priority queue to be used in
+	the real-time scheduler, so that it operates in constant-time
+	regardless of the number of _concurrently runnable_ threads
+	(which might be much lower than the total number of active
+	threads).
+
+	Its use is recommended for large multi-threaded systems
+	involving more than 10 of such threads; otherwise, the default
+	linear method usually performs better with lower memory
+	footprints.
+
+choice
+	prompt "Timer indexing method"
+	default XENO_OPT_TIMER_LIST if !X86_64
+	default XENO_OPT_TIMER_RBTREE if X86_64
+	help
+	This option allows to select the underlying data structure
+	which is going to be used for ordering the outstanding
+	software timers managed by the Cobalt kernel.
+
+config XENO_OPT_TIMER_LIST
+	bool "Linear"
+	help
+	Use a linked list. Albeit O(N), this simple data structure is
+	particularly efficient when only a few timers (< 10) may be
+	concurrently outstanding at any point in time.
+
+config XENO_OPT_TIMER_RBTREE
+	bool "Tree"
+	help
+	Use a red-black tree. This data structure is efficient when a
+	high number of software timers may be concurrently
+	outstanding at any point in time.
+
+endchoice
+
+config XENO_OPT_PIPE
+	bool
+
+config XENO_OPT_MAP
+	bool
+
+config XENO_OPT_EXTCLOCK
+       bool
+
+config XENO_OPT_COBALT_EXTENSION
+       bool
+
+config XENO_OPT_VFILE
+       bool
+       depends on PROC_FS
+       default y
+
+endmenu
+
+menu "Sizes and static limits"
+
+config XENO_OPT_PIPE_NRDEV
+	int "Number of pipe devices"
+	depends on XENO_OPT_PIPE
+	default 32
+	help
+	Message pipes are bi-directional FIFO communication channels
+	allowing data exchange between Cobalt threads and regular
+	POSIX threads. Pipes natively preserve message boundaries, but
+	can also be used in byte streaming mode from kernel to
+	user-space.
+
+	This option sets the maximum number of pipe devices supported
+	in the system. Pipe devices are named /dev/rtpN where N is a
+	device minor number ranging from 0 to XENO_OPT_PIPE_NRDEV - 1.
+
+config XENO_OPT_REGISTRY_NRSLOTS
+	int "Number of registry slots"
+	default 512
+	help
+	The registry is used by the Cobalt kernel to export named
+	resources to user-space programs via the /proc interface.
+	Each named resource occupies a registry slot. This option sets
+	the maximum number of resources the registry can handle.
+
+config XENO_OPT_SYS_HEAPSZ
+	int "Size of system heap (Kb)"
+	default 4096
+	help
+	The system heap is used for various internal allocations by
+	the Cobalt kernel. The size is expressed in Kilobytes.
+
+config XENO_OPT_PRIVATE_HEAPSZ
+	int "Size of private heap (Kb)"
+	default 256
+	help
+	The Cobalt kernel implements fast IPC mechanisms within the
+	scope of a process which require a private kernel memory heap
+	to be mapped in the address space of each Xenomai application
+	process. This option can be used to set the size of this
+	per-process heap.
+
+	64k is considered a large enough size for common use cases.
+
+config XENO_OPT_SHARED_HEAPSZ
+	int "Size of shared heap (Kb)"
+	default 256
+	help
+	The Cobalt kernel implements fast IPC mechanisms between
+	processes which require a shared kernel memory heap to be
+	mapped in the address space of all Xenomai application
+	processes. This option can be used to set the size of this
+	system-wide heap.
+
+	64k is considered a large enough size for common use cases.
+
+config XENO_OPT_NRTIMERS
+       int "Maximum number of POSIX timers per process"
+       default 256
+       help
+       This tunable controls how many POSIX timers can exist at any
+       given time for each Cobalt process (a timer is created by a
+       call to the timer_create() service of the Cobalt/POSIX API).
+
+config XENO_OPT_DEBUG_TRACE_LOGSZ
+       int "Trace log size"
+       depends on XENO_OPT_DEBUG_TRACE_RELAX
+       default 16
+       help
+       The size (kilobytes) of the trace log of relax requests. Once
+       this limit is reached, subsequent traces will be silently
+       discarded.
+
+       Writing to /proc/xenomai/debug/relax empties the trace log.
+
+endmenu
+
+menu "Latency settings"
+
+config XENO_OPT_TIMING_SCHEDLAT
+	int "User scheduling latency (ns)"
+	default 0
+	help
+	The user scheduling latency is the time between the
+	termination of an interrupt handler and the execution of the
+	first instruction of the real-time application thread this
+	handler resumes. A default value of 0 (recommended) will cause
+	a pre-calibrated value to be used.
+
+	If the auto-tuner is enabled, this value will be used as the
+	factory default when running "autotune --reset".
+
+config XENO_OPT_TIMING_KSCHEDLAT
+	int "Intra-kernel scheduling latency (ns)"
+	default 0
+	help
+	The intra-kernel scheduling latency is the time between the
+	termination of an interrupt handler and the execution of the
+	first instruction of the RTDM kernel thread this handler
+	resumes. A default value of 0 (recommended) will cause a
+	pre-calibrated value to be used.
+
+	Intra-kernel latency is usually significantly lower than user
+	scheduling latency on MMU-enabled platforms, due to CPU cache
+	latency.
+
+	If the auto-tuner is enabled, this value will be used as the
+	factory default when running "autotune --reset".
+
+config XENO_OPT_TIMING_IRQLAT
+	int "Interrupt latency (ns)"
+	default 0
+	help
+	The interrupt latency is the time between the occurrence of an
+	IRQ and the first instruction of the interrupt handler which
+	will service it. A default value of 0 (recommended) will cause
+	a pre-calibrated value to be used.
+
+	If the auto-tuner is enabled, this value will be used as the
+	factory default when running "autotune --reset".
+
+endmenu
+
+menuconfig XENO_OPT_DEBUG
+	depends on XENO_OPT_VFILE
+	bool "Debug support"
+	help
+	  When enabled, various debugging features can be switched
+	  on. They can help to find problems in applications, drivers,
+	  and the Cobalt kernel. XENO_OPT_DEBUG by itself does not have
+	  any impact on the generated code.
+
+if XENO_OPT_DEBUG
+
+config XENO_OPT_DEBUG_COBALT
+	bool "Cobalt runtime assertions"
+	help
+	  This option activates various assertions inside the Cobalt
+	  kernel. This option has limited overhead.
+
+config XENO_OPT_DEBUG_MEMORY
+	bool "Cobalt memory checks"
+	help
+	  This option enables memory debug checks inside the Cobalt
+	  kernel. This option may induce significant overhead with large
+	  heaps.
+
+config XENO_OPT_DEBUG_CONTEXT
+       bool "Check for calling context"
+       help
+         This option enables checks for the calling context in the
+         Cobalt kernel, aimed at detecting when regular Linux routines
+         are entered from a real-time context, and conversely.
+
+config XENO_OPT_DEBUG_LOCKING
+	bool "Spinlock debugging support"
+	default y if SMP
+	help
+	  This option activates runtime assertions, and measurements
+	  of spinlocks spinning time and duration in the Cobalt
+	  kernel. It helps finding latency spots due to interrupt
+	  masked sections. Statistics about the longest masked section
+	  can be found in /proc/xenomai/debug/lock.
+
+	  This option may induce a measurable overhead on low end
+	  machines.
+
+config XENO_OPT_DEBUG_USER
+	bool "User consistency checks"
+	help
+	  This option enables a set of consistency checks for
+	  detecting wrong runtime behavior in user applications.
+
+	  With some of the debug categories, threads can ask for
+	  notification when a problem is detected, by turning on the
+	  PTHREAD_WARNSW mode bit with pthread_setmode_np().  Cobalt
+	  sends the Linux-originated SIGDEBUG signal for notifying
+	  threads, along with a reason code passed into the associated
+	  siginfo data (see pthread_setmode_np()).
+	
+	  Some of these runtime checks may induce overhead, enable
+	  them for debugging purposes only.
+
+if XENO_OPT_DEBUG_USER
+
+config XENO_OPT_DEBUG_MUTEX_RELAXED
+       bool "Detect relaxed mutex owner"
+       default y
+       help
+         A thread which attempts to acquire a mutex currently owned by
+         another thread running in secondary/relaxed mode thread will
+         suffer unwanted latencies, due to a priority inversion.
+         debug notifications are enabled for such thread, it receives
+         a SIGDEBUG signal.
+
+	 This option has some overhead in real-time mode over
+	 contented mutexes.
+ 
+config XENO_OPT_DEBUG_MUTEX_SLEEP
+       bool "Detect sleeping with mutex"
+       default y
+       help
+         A thread which goes sleeping while holding a mutex is prone
+         to cause unwanted latencies to other threads serialized by
+         the same lock. If debug notifications are enabled for such
+         thread, it receives a SIGDEBUG signal right before entering
+	 sleep.
+
+	 This option has noticeable overhead in real-time mode as it
+	 disables the normal fast mutex operations from user-space,
+	 causing a system call for each mutex acquisition/release.
+
+config XENO_OPT_DEBUG_LEGACY
+        bool "Detect usage of legacy constructs/features"
+	default n
+	help
+	    Turns on detection of legacy API usage.
+
+endif # XENO_OPT_DEBUG_USER
+
+config XENO_OPT_DEBUG_TRACE_RELAX
+	bool "Trace relax requests"
+	default n
+	help
+	  This option enables recording of unwanted relax requests from
+	  user-space applications leaving the real-time domain, logging
+	  the thread information and code location involved. All records
+	  are readable from /proc/xenomai/debug/relax, and can be
+	  decoded using the "slackspot" utility.
+
+config XENO_OPT_WATCHDOG
+	bool "Watchdog support"
+	default y
+	help
+	  This option activates a watchdog aimed at detecting runaway
+	  Cobalt threads. If enabled, the watchdog triggers after a
+	  given period of uninterrupted real-time activity has elapsed
+	  without Linux interaction in the meantime.
+
+	  In such an event, the current thread is moved out the
+	  real-time domain, receiving a SIGDEBUG signal from the Linux
+	  kernel immediately after.
+
+	  The timeout value of the watchdog can be set using the
+	  XENO_OPT_WATCHDOG_TIMEOUT parameter.
+
+config XENO_OPT_WATCHDOG_TIMEOUT
+	depends on XENO_OPT_WATCHDOG
+	int "Watchdog timeout"
+	default 4
+	range 1 60
+	help
+	  Watchdog timeout value (in seconds).
+
+endif # XENO_OPT_DEBUG
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/Makefile
new file mode 100644
index 0000000..6cd2d59
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/Makefile
@@ -0,0 +1,27 @@
+obj-$(CONFIG_XENOMAI) += pipeline/ xenomai.o rtdm/ posix/
+
+xenomai-y :=	arith.o 	\
+		bufd.o		\
+		clock.o		\
+		heap.o		\
+		init.o		\
+		lock.o		\
+		registry.o	\
+		sched-idle.o	\
+		sched-rt.o	\
+		sched.o		\
+		select.o	\
+		synch.o		\
+		thread.o	\
+		time.o		\
+		timer.o		\
+		tree.o
+
+xenomai-$(CONFIG_XENO_OPT_SCHED_QUOTA) += sched-quota.o
+xenomai-$(CONFIG_XENO_OPT_SCHED_WEAK) += sched-weak.o
+xenomai-$(CONFIG_XENO_OPT_SCHED_SPORADIC) += sched-sporadic.o
+xenomai-$(CONFIG_XENO_OPT_SCHED_TP) += sched-tp.o
+xenomai-$(CONFIG_XENO_OPT_DEBUG) += debug.o
+xenomai-$(CONFIG_XENO_OPT_PIPE) += pipe.o
+xenomai-$(CONFIG_XENO_OPT_MAP) += map.o
+xenomai-$(CONFIG_PROC_FS) += vfile.o procfs.o
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/Kconfig
new file mode 100644
index 0000000..b0cbdc3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/Kconfig
@@ -0,0 +1,16 @@
+source "kernel/xenomai/Kconfig"
+source "drivers/xenomai/Kconfig"
+
+config XENO_ARCH_FPU
+	def_bool VFP
+
+config XENO_ARCH_SYS3264
+        def_bool n
+
+config XENO_ARCH_OUTOFLINE_XNLOCK
+       bool
+       default y
+
+config XENO_ARCH_IPIPE_COMPAT
+       def_bool DOVETAIL
+       select IPIPE_COMPAT
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/Makefile
new file mode 100644
index 0000000..13cbf84
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/Makefile
@@ -0,0 +1,5 @@
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+xenomai-y := machine.o
+
+ccflags-y := -I$(srctree)/arch/arm/xenomai/include -I$(srctree)/include/xenomai
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/calibration.h
new file mode 100644
index 0000000..3cf5825
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/calibration.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2001-2021 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_CALIBRATION_H
+#define _COBALT_ARM_DOVETAIL_CALIBRATION_H
+
+static inline void xnarch_get_latencies(struct xnclock_gravity *p)
+{
+	unsigned int sched_latency;
+
+#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0
+	sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT;
+#else
+	sched_latency = 5000;
+#endif
+	p->user = xnclock_ns_to_ticks(&nkclock, sched_latency);
+	p->kernel = xnclock_ns_to_ticks(&nkclock,
+					CONFIG_XENO_OPT_TIMING_KSCHEDLAT);
+	p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT);
+}
+
+#endif /* !_COBALT_ARM_DOVETAIL_CALIBRATION_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/features.h
new file mode 100644
index 0000000..9c0af20
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/features.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_FEATURES_H
+#define _COBALT_ARM_DOVETAIL_FEATURES_H
+
+struct cobalt_featinfo;
+static inline void collect_arch_features(struct cobalt_featinfo *p) { }
+
+#include <asm/xenomai/uapi/features.h>
+
+#endif /* !_COBALT_ARM_DOVETAIL_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/fptest.h
new file mode 100644
index 0000000..4cc0752
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/fptest.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_FPTEST_H
+#define _COBALT_ARM_DOVETAIL_FPTEST_H
+
+#include <linux/errno.h>
+#include <asm/hwcap.h>
+
+#ifdef CONFIG_VFP
+#define have_vfp (elf_hwcap & HWCAP_VFP)
+#else /* !CONFIG_VFP */
+#define have_vfp 0
+#endif /* !CONFIG_VFP */
+
+#include <asm/xenomai/uapi/fptest.h>
+
+static inline int fp_kernel_supported(void)
+{
+	return 0;
+}
+
+static inline void fp_init(void)
+{
+}
+
+static inline int fp_linux_begin(void)
+{
+	return -ENOSYS;
+}
+
+static inline void fp_linux_end(void)
+{
+}
+
+static inline int fp_detect(void)
+{
+	return have_vfp ? __COBALT_HAVE_VFP : 0;
+}
+
+#endif /* _COBALT_ARM_DOVETAIL_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/machine.h
new file mode 100644
index 0000000..a694a78
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/machine.h
@@ -0,0 +1,72 @@
+/**
+ *   Copyright &copy; 2002-2004 Philippe Gerum.
+ *
+ *   ARM port
+ *     Copyright (C) 2005 Stelian Pop
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Xenomai; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_MACHINE_H
+#define _COBALT_ARM_DOVETAIL_MACHINE_H
+
+#include <linux/version.h>
+#include <asm/byteorder.h>
+#include <asm/cacheflush.h>
+
+#define xnarch_cache_aliasing() cache_is_vivt()
+
+#if __LINUX_ARM_ARCH__ < 5
+static inline __attribute_const__ unsigned long ffnz(unsigned long x)
+{
+	int r = 0;
+
+	if (!x)
+		return 0;
+	if (!(x & 0xffff)) {
+		x >>= 16;
+		r += 16;
+	}
+	if (!(x & 0xff)) {
+		x >>= 8;
+		r += 8;
+	}
+	if (!(x & 0xf)) {
+		x >>= 4;
+		r += 4;
+	}
+	if (!(x & 3)) {
+		x >>= 2;
+		r += 2;
+	}
+	if (!(x & 1)) {
+		x >>= 1;
+		r += 1;
+	}
+	return r;
+}
+#else
+static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
+{
+	int __r;
+	__asm__("clz\t%0, %1" : "=r" (__r) : "r"(ul & (-ul)) : "cc");
+	return 31 - __r;
+}
+#endif
+
+#include <asm-generic/xenomai/machine.h>
+
+#endif /* !_COBALT_ARM_DOVETAIL_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..d41b257
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_SYSCALL_H
+#define _COBALT_ARM_DOVETAIL_SYSCALL_H
+
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm-generic/xenomai/syscall.h>
+
+/*
+ * Cobalt syscall numbers can be fetched from ARM_ORIG_r0 with ARM_r7
+ * containing the Xenomai syscall marker, Linux syscalls directly from
+ * ARM_r7. Since we have to work with Dovetail whilst remaining binary
+ * compatible with applications built for the I-pipe, we retain the
+ * old syscall signature based on receiving XENO_ARM_SYSCALL in
+ * ARM_r7, possibly ORed with __COBALT_SYSCALL_BIT by Dovetail
+ * (IPIPE_COMPAT mode).
+ *
+ * FIXME: We also have __COBALT_SYSCALL_BIT (equal to
+ * __OOB_SYSCALL_BIT) present in the actual syscall number in r0,
+ * which is pretty much useless. Oh, well...  When support for the
+ * I-pipe is dropped, we may switch back to the regular convention
+ * Dovetail abides by, with the actual syscall number into r7 ORed
+ * with __OOB_SYSCALL_BIT, freeing r0 for passing a call argument.
+ */
+#define __xn_reg_sys(__regs)	((__regs)->ARM_ORIG_r0)
+#define __xn_syscall_p(__regs)	(((__regs)->ARM_r7 & ~__COBALT_SYSCALL_BIT) == XENO_ARM_SYSCALL)
+#define __xn_syscall(__regs)	(__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT)
+
+/*
+ * Root syscall number with predicate (valid only if
+ * !__xn_syscall_p(__regs)).
+ */
+#define __xn_rootcall_p(__regs, __code)					\
+	({								\
+		*(__code) = (__regs)->ARM_r7;				\
+		*(__code) < NR_syscalls || *(__code) >= __ARM_NR_BASE;	\
+	})
+
+#define __xn_reg_rval(__regs)	((__regs)->ARM_r0)
+#define __xn_reg_pc(__regs)	((__regs)->ARM_ip)
+#define __xn_reg_sp(__regs)	((__regs)->ARM_sp)
+
+static inline void __xn_error_return(struct pt_regs *regs, int v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, long v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs)
+{
+	return __xn_reg_rval(regs) == -EINTR;
+}
+
+static inline
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			unsigned long a3, unsigned long a4,
+			unsigned long a5)
+{
+	/* We need none of these with Dovetail. */
+	return -ENOSYS;
+}
+
+#define pipeline_get_syscall_args pipeline_get_syscall_args
+static inline void pipeline_get_syscall_args(struct task_struct *task,
+					     struct pt_regs *regs,
+					     unsigned long *args)
+{
+	args[0] = regs->ARM_r1;
+	args[1] = regs->ARM_r2;
+	args[2] = regs->ARM_r3;
+	args[3] = regs->ARM_r4;
+	args[4] = regs->ARM_r5;
+}
+
+#endif /* !_COBALT_ARM_DOVETAIL_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall32.h
new file mode 100644
index 0000000..95c5a11
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/syscall32.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_SYSCALL32_H
+#define _COBALT_ARM_ASM_SYSCALL32_H
+
+#include <asm-generic/xenomai/syscall32.h>
+
+#endif /* !_COBALT_ARM_ASM_SYSCALL32_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/thread.h
new file mode 100644
index 0000000..792a3d2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/thread.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_DOVETAIL_THREAD_H
+#define _COBALT_ARM_DOVETAIL_THREAD_H
+
+#include <asm-generic/xenomai/dovetail/thread.h>
+#include <asm/traps.h>
+
+#define xnarch_fault_pc(__regs)	((__regs)->ARM_pc - (thumb_mode(__regs) ? 2 : 4))
+#define xnarch_fault_pf_p(__nr)	((__nr) == ARM_TRAP_ACCESS)
+#define xnarch_fault_bp_p(__nr)	((current->ptrace & PT_PTRACED) &&	\
+					((__nr) == ARM_TRAP_BREAK ||	\
+						(__nr) == ARM_TRAP_UNDEFINSTR))
+#define xnarch_fault_notify(__nr) (!xnarch_fault_bp_p(__nr))
+
+#endif /* !_COBALT_ARM_DOVETAIL_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/wrappers.h
new file mode 100644
index 0000000..fe59896
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/include/asm/xenomai/wrappers.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_WRAPPERS_H
+#define _COBALT_ARM_ASM_WRAPPERS_H
+
+#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */
+
+#define __put_user_inatomic __put_user
+#define __get_user_inatomic __get_user
+
+#endif /* _COBALT_ARM_ASM_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/machine.c
new file mode 100644
index 0000000..bc32f17
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/dovetail/machine.c
@@ -0,0 +1,44 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2021 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#include <linux/mm.h>
+#include <asm/xenomai/machine.h>
+
+static void mach_arm_prefault(struct vm_area_struct *vma)
+{
+	unsigned long addr;
+	unsigned int flags;
+
+	if ((vma->vm_flags & VM_MAYREAD)) {
+		flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0;
+		for (addr = vma->vm_start;
+		     addr != vma->vm_end; addr += PAGE_SIZE)
+			handle_mm_fault(vma, addr, flags, NULL);
+	}
+}
+
+static const char *const fault_labels[] = {
+	[ARM_TRAP_ACCESS] = "Data or instruction access",
+	[ARM_TRAP_SECTION] = "Section fault",
+	[ARM_TRAP_DABT] = "Generic data abort",
+	[ARM_TRAP_PABT] = "Prefetch abort",
+	[ARM_TRAP_BREAK] = "Instruction breakpoint",
+	[ARM_TRAP_FPU] = "Floating point exception",
+	[ARM_TRAP_VFP] = "VFP Floating point exception",
+	[ARM_TRAP_UNDEFINSTR] = "Undefined instruction",
+	[ARM_TRAP_ALIGNMENT] = "Unaligned access exception",
+	[31] = NULL
+};
+
+struct cobalt_machine cobalt_machine = {
+	.name = "arm",
+	.init = NULL,
+	.late_init = NULL,
+	.cleanup = NULL,
+	.prefault = mach_arm_prefault,
+	.fault_labels = fault_labels,
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/arith.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/arith.h
new file mode 100644
index 0000000..cb7fb4a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/arith.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_ARITH_H
+#define _COBALT_ARM_ASM_UAPI_ARITH_H
+
+#include <asm/xenomai/uapi/features.h>
+
+#if __LINUX_ARM_ARCH__ >= 4 && (!defined(CONFIG_THUMB2_KERNEL) || !defined(CONFIG_FTRACE))
+static inline __attribute__((__const__)) unsigned long long
+mach_arm_nodiv_ullimd(const unsigned long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ);
+
+#define xnarch_nodiv_ullimd(op, frac, integ) \
+	mach_arm_nodiv_ullimd((op), (frac), (integ))
+
+static inline __attribute__((__const__)) long long
+mach_arm_nodiv_llimd(const long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ);
+
+#define xnarch_nodiv_llimd(op, frac, integ) \
+	mach_arm_nodiv_llimd((op), (frac), (integ))
+#else /* arm <= v3 */
+#define xnarch_add96and64(l0, l1, l2, s0, s1)		\
+	do {						\
+		__asm__ ("adds %2, %2, %4\n\t"		\
+			 "adcs %1, %1, %3\n\t"		\
+			 "adc %0, %0, #0\n\t"		\
+			 : "+r"(l0), "+r"(l1), "+r"(l2)	\
+			 : "r"(s0), "r"(s1): "cc");	\
+	} while (0)
+#endif /* arm <= v3 */
+
+#include <cobalt/uapi/asm-generic/arith.h>
+
+#if __LINUX_ARM_ARCH__ >= 4 && (!defined(CONFIG_THUMB2_KERNEL) || !defined(CONFIG_FTRACE))
+#define mach_arm_nodiv_ullimd_str			\
+	"umull %[tl], %[rl], %[opl], %[fracl]\n\t"	\
+	"umull %[rm], %[rh], %[oph], %[frach]\n\t"	\
+	"adds %[rl], %[rl], %[tl], lsr #31\n\t"		\
+	"adcs %[rm], %[rm], #0\n\t"			\
+	"adc %[rh], %[rh], #0\n\t"			\
+	"umull %[tl], %[th], %[oph], %[fracl]\n\t"	\
+	"adds %[rl], %[rl], %[tl]\n\t"			\
+	"adcs %[rm], %[rm], %[th]\n\t"			\
+	"adc %[rh], %[rh], #0\n\t"			\
+	"umull %[tl], %[th], %[opl], %[frach]\n\t"	\
+	"adds %[rl], %[rl], %[tl]\n\t"			\
+	"adcs %[rm], %[rm], %[th]\n\t"			\
+	"adc %[rh], %[rh], #0\n\t"			\
+	"umlal %[rm], %[rh], %[opl], %[integ]\n\t"	\
+	"mla %[rh], %[oph], %[integ], %[rh]\n\t"
+
+static inline __attribute__((__const__)) unsigned long long
+mach_arm_nodiv_ullimd(const unsigned long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ)
+{
+	register unsigned rl __asm__("r5");
+	register unsigned rm __asm__("r0");
+	register unsigned rh __asm__("r1");
+	register unsigned fracl __asm__ ("r2");
+	register unsigned frach __asm__ ("r3");
+	register unsigned integ __asm__("r4") = rhs_integ;
+	register unsigned opl __asm__ ("r6");
+	register unsigned oph __asm__ ("r8");
+	register unsigned tl __asm__("r9");
+	register unsigned th __asm__("r10");
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(frac, frach, fracl);
+
+	__asm__ (mach_arm_nodiv_ullimd_str
+		 : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh),
+		   [tl]"=r"(tl), [th]"=r"(th)
+		 : [opl]"r"(opl), [oph]"r"(oph),
+		   [fracl]"r"(fracl), [frach]"r"(frach),
+		   [integ]"r"(integ)
+		 : "cc");
+
+	return xnarch_u64fromu32(rh, rm);
+}
+
+static inline __attribute__((__const__)) long long
+mach_arm_nodiv_llimd(const long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ)
+{
+	register unsigned rl __asm__("r5");
+	register unsigned rm __asm__("r0");
+	register unsigned rh __asm__("r1");
+	register unsigned fracl __asm__ ("r2");
+	register unsigned frach __asm__ ("r3");
+	register unsigned integ __asm__("r4") = rhs_integ;
+	register unsigned opl __asm__ ("r6");
+	register unsigned oph __asm__ ("r8");
+	register unsigned tl __asm__("r9");
+	register unsigned th __asm__("r10");
+	register unsigned s __asm__("r12");
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(frac, frach, fracl);
+
+	__asm__ ("movs %[s], %[oph], lsr #30\n\t"
+		 "beq 1f\n\t"
+		 "rsbs  %[opl], %[opl], #0\n\t"
+		 "sbc  %[oph], %[oph], %[oph], lsl #1\n"
+		 "1:\t"
+		 mach_arm_nodiv_ullimd_str
+		 "teq %[s], #0\n\t"
+		 "beq 2f\n\t"
+		 "rsbs  %[rm], %[rm], #0\n\t"
+		 "sbc  %[rh], %[rh], %[rh], lsl #1\n"
+		 "2:\t"
+		 : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh),
+		   [tl]"=r"(tl), [th]"=r"(th), [s]"=r"(s)
+		 : [opl]"r"(opl), [oph]"r"(oph),
+		   [fracl]"r"(fracl), [frach]"r"(frach),
+		   [integ]"r"(integ)
+		 : "cc");
+
+	return xnarch_u64fromu32(rh, rm);
+}
+#endif /* arm >= v4 */
+
+#endif /* _COBALT_ARM_ASM_UAPI_ARITH_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/features.h
new file mode 100644
index 0000000..43b7afb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/features.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_FEATURES_H
+#define _COBALT_ARM_ASM_UAPI_FEATURES_H
+
+/* The ABI revision level we use on this arch. */
+#define XENOMAI_ABI_REV   18UL
+
+#define XENOMAI_FEAT_DEP (__xn_feat_generic_mask)
+
+#define XENOMAI_FEAT_MAN (__xn_feat_generic_man_mask)
+
+#define XNARCH_HAVE_LLMULSHFT    1
+#define XNARCH_HAVE_NODIV_LLIMD  1
+
+struct cobalt_featinfo_archdep { /* no arch-specific feature */ };
+
+#include <cobalt/uapi/asm-generic/features.h>
+
+static inline const char *get_feature_label(unsigned int feature)
+{
+	return get_generic_feature_label(feature);
+}
+
+#endif /* !_COBALT_ARM_ASM_UAPI_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/fptest.h
new file mode 100644
index 0000000..d5c2c75
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/fptest.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_FPTEST_H
+#define _COBALT_ARM_ASM_UAPI_FPTEST_H
+
+#define __COBALT_HAVE_VFP  0x1
+
+static inline void fp_regs_set(int features, unsigned int val)
+{
+	unsigned long long e[16];
+	unsigned int i;
+
+	if (features & __COBALT_HAVE_VFP) {
+		for (i = 0; i < 16; i++)
+			e[i] = val;
+
+		/* vldm %0!, {d0-d15},
+		   AKA fldmiax %0!, {d0-d15} */
+		__asm__ __volatile__("ldc p11, cr0, [%0],#32*4":
+				     "=r"(i)
+				     : "0"(&e[0])
+				     : "d0", "d1", "d2", "d3", "d4", "d5",
+				       "d6", "d7", "d8", "d9", "d10", "d11",
+				       "d12", "d13", "d14", "d15",
+				       "memory");
+	}
+}
+
+static inline unsigned int fp_regs_check(int features, unsigned int val,
+					 int (*report)(const char *fmt, ...))
+{
+	unsigned int result = val, i;
+	unsigned long long e[16];
+
+	if (features & __COBALT_HAVE_VFP) {
+		/* vstm %0!, {d0-d15},
+		   AKA fstmiax %0!, {d0-d15} */
+		__asm__ __volatile__("stc p11, cr0, [%0],#32*4":
+				     "=r"(i): "0"(&e[0]): "memory");
+
+		for (i = 0; i < 16; i++)
+			if (e[i] != val) {
+				report("d%d: %llu != %u\n", i, e[i], val);
+				result = e[i];
+			}
+	}
+
+	return result;
+}
+
+#endif /* !_COBALT_ARM_ASM_UAPI_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/syscall.h
new file mode 100644
index 0000000..c079a35
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/syscall.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_SYSCALL_H
+#define _COBALT_ARM_ASM_UAPI_SYSCALL_H
+
+#define __xn_syscode(__nr)	(__COBALT_SYSCALL_BIT | (__nr))
+
+#define XENO_ARM_SYSCALL        0x000F0042	/* carefully chosen... */
+
+#define XENOMAI_SYSARCH_TSCINFO      4
+
+#endif /* !_COBALT_ARM_ASM_UAPI_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/tsc.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/tsc.h
new file mode 100644
index 0000000..b17cfb2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/include/asm/xenomai/uapi/tsc.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM_ASM_UAPI_TSC_H
+#define _COBALT_ARM_ASM_UAPI_TSC_H
+
+struct __xn_tscinfo {
+	volatile unsigned int *counter;
+};
+
+#endif /* !_COBALT_ARM_ASM_UAPI_TSC_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/Makefile
new file mode 100644
index 0000000..c482fb3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-$(CONFIG_IPIPE) := machine.o thread.o switch.o syscall.o
+
+ccflags-y := -I$(srctree)/arch/arm/xenomai/include -I$(srctree)/include/xenomai
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/README b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/README
new file mode 100644
index 0000000..80f954a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/README
@@ -0,0 +1,3 @@
+Get the interrupt pipeline code for the target kernel from
+http://xenomai.org/downloads/ipipe/, or
+git://git.xenomai.org/ipipe.git
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/calibration.h
new file mode 100644
index 0000000..d227cae
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/calibration.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_CALIBRATION_H
+#define _COBALT_ARM_ASM_CALIBRATION_H
+
+unsigned int omap_rev(void);
+#define cpu_is_omap44xx() ((omap_rev() & 0xff) == 0x44)
+
+static inline void xnarch_get_latencies(struct xnclock_gravity *p)
+{
+	unsigned int ulat;
+#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0
+	ulat = CONFIG_XENO_OPT_TIMING_SCHEDLAT;
+#elif defined(CONFIG_ARCH_AT91RM9200)
+	ulat = 8500;
+#elif defined(CONFIG_ARCH_AT91SAM9263)
+	ulat = 11000;
+#elif defined(CONFIG_SOC_IMX6Q)
+	ulat = 6000;
+#elif defined(CONFIG_ARCH_MX51)
+	ulat = 5000;
+#elif defined(CONFIG_ARCH_MX53)
+	ulat = 5000;
+#elif defined(CONFIG_ARCH_MX6)
+	ulat = 2000;
+#elif defined(CONFIG_SOC_IMX7)
+	ulat = 2000;
+#elif defined(CONFIG_SOC_LS1021A)
+	ulat = 2800;
+#elif defined(CONFIG_ARCH_OMAP)
+	ulat = cpu_is_omap44xx() ? 2500 : 5000;
+#elif defined(CONFIG_ARCH_STI)
+	ulat = 6000;
+#elif defined(CONFIG_ARCH_SOCFPGA)
+	ulat = 4500;
+#else
+	ulat = 9500;	/* XXX sane? */
+#endif
+	p->user = xnclock_ns_to_ticks(&nkclock, ulat);
+	p->kernel = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_KSCHEDLAT);
+	p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT);
+}
+
+#endif /* !_COBALT_ARM_ASM_CALIBRATION_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/features.h
new file mode 100644
index 0000000..d485286
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/features.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_FEATURES_H
+#define _COBALT_ARM_ASM_FEATURES_H
+
+struct cobalt_featinfo;
+static inline void collect_arch_features(struct cobalt_featinfo *p) { }
+
+#include <asm/xenomai/uapi/features.h>
+
+#endif /* !_COBALT_ARM_ASM_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/fptest.h
new file mode 100644
index 0000000..d3f335f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/fptest.h
@@ -0,0 +1,56 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_FPTEST_H
+#define _COBALT_ARM_ASM_FPTEST_H
+
+#include <linux/errno.h>
+#include <asm/hwcap.h>
+
+#ifdef CONFIG_VFP
+#define have_vfp (ELF_HWCAP & HWCAP_VFP)
+#else /* !CONFIG_VFP */
+#define have_vfp 0
+#endif /* !CONFIG_VFP */
+
+#include <asm/xenomai/uapi/fptest.h>
+
+static inline int fp_kernel_supported(void)
+{
+	return 1;
+}
+
+static inline void fp_init(void)
+{
+}
+
+static inline int fp_linux_begin(void)
+{
+	return -ENOSYS;
+}
+
+static inline void fp_linux_end(void)
+{
+}
+
+static inline int fp_detect(void)
+{
+	return have_vfp ? __COBALT_HAVE_VFP : 0;
+}
+
+#endif /* _COBALT_ARM_ASM_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/machine.h
new file mode 100644
index 0000000..d6e965f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/machine.h
@@ -0,0 +1,85 @@
+/**
+ *   Copyright &copy; 2002-2004 Philippe Gerum.
+ *
+ *   ARM port
+ *     Copyright (C) 2005 Stelian Pop
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Xenomai; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_MACHINE_H
+#define _COBALT_ARM_ASM_MACHINE_H
+
+#include <linux/version.h>
+#include <asm/byteorder.h>
+
+#define XNARCH_HOST_TICK_IRQ __ipipe_hrtimer_irq
+
+#include <asm/barrier.h>
+#include <asm/compiler.h>
+#include <asm/cmpxchg.h>
+#include <asm/switch_to.h>
+#include <asm/system_info.h>
+#include <asm/system_misc.h>
+#include <asm/timex.h>
+#include <asm/processor.h>
+#include <asm/ipipe.h>
+#include <asm/mach/irq.h>
+#include <asm/cacheflush.h>
+
+#define xnarch_cache_aliasing() cache_is_vivt()
+
+#if __LINUX_ARM_ARCH__ < 5
+static inline __attribute_const__ unsigned long ffnz(unsigned long x)
+{
+	int r = 0;
+
+	if (!x)
+		return 0;
+	if (!(x & 0xffff)) {
+		x >>= 16;
+		r += 16;
+	}
+	if (!(x & 0xff)) {
+		x >>= 8;
+		r += 8;
+	}
+	if (!(x & 0xf)) {
+		x >>= 4;
+		r += 4;
+	}
+	if (!(x & 3)) {
+		x >>= 2;
+		r += 2;
+	}
+	if (!(x & 1)) {
+		x >>= 1;
+		r += 1;
+	}
+	return r;
+}
+#else
+static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
+{
+	int __r;
+	__asm__("clz\t%0, %1" : "=r" (__r) : "r"(ul & (-ul)) : "cc");
+	return 31 - __r;
+}
+#endif
+
+#include <asm-generic/xenomai/machine.h>
+
+#endif /* !_COBALT_ARM_ASM_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..362f0eb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall.h
@@ -0,0 +1,91 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_SYSCALL_H
+#define _COBALT_ARM_ASM_SYSCALL_H
+
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm-generic/xenomai/syscall.h>
+
+#ifndef __ARM_NR_ipipe
+/* Legacy pipelines do not define this. */
+#define __ARM_NR_ipipe	(__NR_SYSCALL_BASE + XENO_ARM_SYSCALL)
+#endif
+
+/*
+ * Cobalt syscall numbers can be fetched from ARM_ORIG_r0 with ARM_r7
+ * containing the Xenomai syscall marker, Linux syscalls directly from
+ * ARM_r7 (may require the OABI tweak).
+ */
+#define __xn_reg_sys(__regs)	((__regs)->ARM_ORIG_r0)
+/* In OABI_COMPAT mode, handle both OABI and EABI userspace syscalls */
+#ifdef CONFIG_OABI_COMPAT
+#define __xn_syscall_p(__regs)	(((__regs)->ARM_r7 == __NR_OABI_SYSCALL_BASE + XENO_ARM_SYSCALL) || \
+				 ((__regs)->ARM_r7 == __ARM_NR_ipipe))
+#define __xn_abi_decode(__regs) ((__regs)->ARM_r7 - __NR_OABI_SYSCALL_BASE)
+#else /* !CONFIG_OABI_COMPAT */
+#define __xn_syscall_p(__regs)	((__regs)->ARM_r7 == __ARM_NR_ipipe)
+#define __xn_abi_decode(__regs) ((__regs)->ARM_r7)
+#endif /* !CONFIG_OABI_COMPAT */
+#define __xn_syscall(__regs)	(__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT)
+
+/*
+ * Root syscall number with predicate (valid only if
+ * !__xn_syscall_p(__regs)).
+ */
+#define __xn_rootcall_p(__regs, __code)					\
+	({								\
+		*(__code) = __xn_abi_decode(__regs);			\
+		*(__code) < NR_syscalls || *(__code) >= __ARM_NR_BASE;	\
+	})
+	
+#define __xn_reg_rval(__regs)	((__regs)->ARM_r0)
+#define __xn_reg_arg1(__regs)	((__regs)->ARM_r1)
+#define __xn_reg_arg2(__regs)	((__regs)->ARM_r2)
+#define __xn_reg_arg3(__regs)	((__regs)->ARM_r3)
+#define __xn_reg_arg4(__regs)	((__regs)->ARM_r4)
+#define __xn_reg_arg5(__regs)	((__regs)->ARM_r5)
+#define __xn_reg_pc(__regs)	((__regs)->ARM_ip)
+#define __xn_reg_sp(__regs)	((__regs)->ARM_sp)
+
+static inline void __xn_error_return(struct pt_regs *regs, int v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, long v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs)
+{
+	return __xn_reg_rval(regs) == -EINTR;
+}
+
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			 unsigned long a3, unsigned long a4,
+			 unsigned long a5);
+
+#endif /* !_COBALT_ARM_ASM_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall32.h
new file mode 100644
index 0000000..95c5a11
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/syscall32.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_SYSCALL32_H
+#define _COBALT_ARM_ASM_SYSCALL32_H
+
+#include <asm-generic/xenomai/syscall32.h>
+
+#endif /* !_COBALT_ARM_ASM_SYSCALL32_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/thread.h
new file mode 100644
index 0000000..93346fd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/thread.h
@@ -0,0 +1,119 @@
+/*
+ * Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_THREAD_H
+#define _COBALT_ARM_ASM_THREAD_H
+
+#include <asm-generic/xenomai/ipipe/thread.h>
+
+#ifdef CONFIG_XENO_ARCH_FPU
+#ifdef CONFIG_VFP
+#include <asm/vfp.h>
+#endif /* CONFIG_VFP */
+#endif /* !CONFIG_XENO_ARCH_FPU */
+
+struct xnarchtcb {
+	struct xntcb core;
+#ifdef CONFIG_XENO_ARCH_FPU
+#ifdef CONFIG_VFP
+	union vfp_state *fpup;
+#define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
+#else
+#define xnarch_fpu_ptr(tcb)     NULL
+#endif
+#endif
+};
+
+#define xnarch_fault_regs(d)	((d)->regs)
+#define xnarch_fault_trap(d)	((d)->exception)
+#define xnarch_fault_code(d)	(0)
+#define xnarch_fault_pc(d)	((d)->regs->ARM_pc - (thumb_mode((d)->regs) ? 2 : 4)) /* XXX ? */
+
+#define xnarch_fault_pf_p(d)	((d)->exception == IPIPE_TRAP_ACCESS)
+#define xnarch_fault_bp_p(d)	((current->ptrace & PT_PTRACED) &&	\
+				 ((d)->exception == IPIPE_TRAP_BREAK ||	\
+				  (d)->exception == IPIPE_TRAP_UNDEFINSTR))
+
+#define xnarch_fault_notify(d) (!xnarch_fault_bp_p(d))
+
+static inline
+struct task_struct *xnarch_host_task(struct xnarchtcb *tcb)
+{
+	return tcb->core.host_task;
+}
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in);
+
+static inline void xnarch_enter_root(struct xnthread *root) { }
+
+static inline int xnarch_escalate(void)
+{
+	if (ipipe_root_p) {
+		ipipe_raise_irq(cobalt_pipeline.escalate_virq);
+		return 1;
+	}
+
+	return 0;
+}
+
+#if defined(CONFIG_XENO_ARCH_FPU) && defined(CONFIG_VFP)
+
+void xnarch_init_root_tcb(struct xnthread *thread);
+
+void xnarch_init_shadow_tcb(struct xnthread *thread);
+
+int xnarch_fault_fpu_p(struct ipipe_trap_data *d);
+
+void xnarch_leave_root(struct xnthread *root);
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread);
+
+int xnarch_handle_fpu_fault(struct xnthread *from, 
+			struct xnthread *to, struct ipipe_trap_data *d);
+
+#else /* !CONFIG_XENO_ARCH_FPU || !CONFIG_VFP */
+
+static inline void xnarch_init_root_tcb(struct xnthread *thread) { }
+static inline void xnarch_init_shadow_tcb(struct xnthread *thread) { }
+
+/*
+ * Userland may raise FPU faults with FPU-enabled kernels, regardless
+ * of whether real-time threads actually use FPU, so we simply ignore
+ * these faults.
+ */
+static inline int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
+{
+	return 0;
+}
+
+static inline void xnarch_leave_root(struct xnthread *root) { }
+
+static inline void xnarch_switch_fpu(struct xnthread *f, struct xnthread *t) { }
+
+static inline int xnarch_handle_fpu_fault(struct xnthread *from, 
+					struct xnthread *to, struct ipipe_trap_data *d)
+{
+	return 0;
+}
+#endif /*  !CONFIG_XENO_ARCH_FPU || !CONFIG_VFP */
+
+static inline void xnarch_enable_kfpu(void) { }
+
+static inline void xnarch_disable_kfpu(void) { }
+
+#endif /* !_COBALT_ARM_ASM_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/wrappers.h
new file mode 100644
index 0000000..fe59896
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/include/asm/xenomai/wrappers.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM_ASM_WRAPPERS_H
+#define _COBALT_ARM_ASM_WRAPPERS_H
+
+#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */
+
+#define __put_user_inatomic __put_user
+#define __get_user_inatomic __get_user
+
+#endif /* _COBALT_ARM_ASM_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/machine.c
new file mode 100644
index 0000000..0fd48ca
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/machine.c
@@ -0,0 +1,63 @@
+/**
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+
+#include <linux/mm.h>
+#include <asm/xenomai/machine.h>
+
+static void mach_arm_prefault(struct vm_area_struct *vma)
+{
+	unsigned long addr;
+	unsigned int flags;
+
+	if ((vma->vm_flags & VM_MAYREAD)) {
+		flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0;
+		for (addr = vma->vm_start;
+		     addr != vma->vm_end; addr += PAGE_SIZE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+			handle_mm_fault(vma->vm_mm, vma, addr, flags);
+#else
+			handle_mm_fault(vma, addr, flags);
+#endif
+	}
+}
+
+static const char *const fault_labels[] = {
+	[IPIPE_TRAP_ACCESS] = "Data or instruction access",
+	[IPIPE_TRAP_SECTION] = "Section fault",
+	[IPIPE_TRAP_DABT] = "Generic data abort",
+	[IPIPE_TRAP_UNKNOWN] = "Unknown exception",
+	[IPIPE_TRAP_BREAK] = "Instruction breakpoint",
+	[IPIPE_TRAP_FPU] = "Floating point exception",
+	[IPIPE_TRAP_VFP] = "VFP Floating point exception",
+	[IPIPE_TRAP_UNDEFINSTR] = "Undefined instruction",
+#ifdef IPIPE_TRAP_ALIGNMENT
+	[IPIPE_TRAP_ALIGNMENT] = "Unaligned access exception",
+#endif /* IPIPE_TRAP_ALIGNMENT */
+	[IPIPE_NR_FAULTS] = NULL
+};
+
+struct cobalt_machine cobalt_machine = {
+	.name = "arm",
+	.init = NULL,
+	.late_init = NULL,
+	.cleanup = NULL,
+	.prefault = mach_arm_prefault,
+	.fault_labels = fault_labels,
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/switch.S b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/switch.S
new file mode 100644
index 0000000..9fc847a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/switch.S
@@ -0,0 +1,219 @@
+/*
+ * Copyright (C) 2005 Stelian Pop.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/linkage.h>
+#include <linux/version.h>
+#include <asm/assembler.h>
+#include <asm/asm-offsets.h>
+#include <asm/tls.h>
+#ifdef CONFIG_VFP
+#include <asm/vfpmacros.h>
+#endif
+
+	.macro fpu_switch tmp
+#ifdef CONFIG_VFP
+#if __LINUX_ARM_ARCH__ <= 6
+#ifdef CONFIG_JUMP_LABEL
+9998:	nop
+	.pushsection __jump_table, "aw"
+	.word	9998b, 9999f, __xeno_vfp_key
+	.popsection
+#else
+	ldr	\tmp, =elf_hwcap
+	ldr	\tmp, [\tmp]
+	tst	\tmp, #HWCAP_VFP
+	beq	9999f
+#endif
+#endif
+	@ Always disable VFP so we can lazily save/restore the old
+	@ state. This occurs in the context of the previous thread.
+	VFPFMRX \tmp, FPEXC
+	bic     \tmp, \tmp, #FPEXC_EN
+	VFPFMXR FPEXC, \tmp
+#if __LINUX_ARM_ARCH__ <= 6
+9999:
+#endif
+#endif
+	.endm
+
+	.text
+
+#if defined(CONFIG_VFP) && defined(CONFIG_XENO_ARCH_FPU)
+/* Copied from vfp_save_state in arch/arm/vfp/vfphw.S
+ * r0 = pointer to union vfp_state, r1 = fpexc
+ */
+ENTRY(__asm_vfp_save)
+	VFPFSTMIA	r0, r2		@ save the working registers
+	VFPFMRX		r2, FPSCR	@ current status
+	tst		r1, #FPEXC_EX	@ is there additional state to save?
+	beq		1f
+	VFPFMRX		r3, FPINST	@ FPINST (only if FPEXC.EX is set)
+	tst		r1, #FPEXC_FP2V	@ is there an FPINST2 to read?
+	beq		1f
+	VFPFMRX		r12, FPINST2	@ FPINST2 if needed (and present)
+1:
+	stmia		r0, {r1, r2, r3, r12}	@ save FPEXC, FPSCR, FPINST, FPINST2
+	mov		pc, lr
+ENDPROC(__asm_vfp_save)
+
+/* Copied from no_old_VFP_process in arch/arm/vfp/vfphw.S
+ * r0 = pointer to union vfp_state
+ * r1 = current cpu
+ */
+ENTRY(__asm_vfp_load)
+#ifdef CONFIG_SMP
+	str		r1, [r0, #VFP_CPU]
+#endif
+	VFPFLDMIA	r0, r2		@ reload the working registers while
+					@ FPEXC is in a safe state
+	ldmia		r0, {r1, r2, r3, r12}	@ load FPEXC, FPSCR, FPINST, FPINST2
+	tst		r1, #FPEXC_EX	@ is there additional state to restore?
+	beq		1f
+	VFPFMXR		FPINST, r3	@ restore FPINST (only if FPEXC.EX is set)
+	tst		r1, #FPEXC_FP2V	@ is there an FPINST2 to write?
+	beq		1f
+	VFPFMXR		FPINST2, r12	@ FPINST2 if needed (and present)
+1:
+	VFPFMXR		FPSCR, r2	@ restore status
+	mov		pc, lr
+ENDPROC(__asm_vfp_load)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3, 11, 0)
+	.macro load_tls base, tp, tpuser
+	ldr     \tp, [\base, #TI_TP_VALUE]
+	.endm
+
+	.macro switch_tls base, tp, tpuser, tmp1, tmp2
+	set_tls \tp, \tmp1, \tmp2
+	.endm
+#else
+	.macro load_tls base, tp, tpuser
+	ldr	\tp, [\base, #TI_TP_VALUE]
+	ldr	\tpuser, [\base, #TI_TP_VALUE + 4]
+	.endm
+#endif
+
+/*
+ * Switch context routine.
+ *
+ * Registers according to the ARM procedure call standard:
+ *   Reg    Description
+ *   r0-r3  argument/scratch registers
+ *   r4-r9  variable register
+ *   r10=sl stack limit/variable register
+ *   r11=fp frame pointer/variable register
+ *   r12=ip intra-procedure-call scratch register
+ *   r13=sp stack pointer (auto preserved)
+ *   r14=lr link register
+ *   r15=pc program counter (auto preserved)
+ *
+ * Copied from __switch_to, arch/arm/kernel/entry-armv.S.
+ * Right now it is identical, but who knows what the
+ * future reserves us...
+ *
+ * XXX: All the following config options are NOT tested:
+ *      CONFIG_IWMMXT
+ *
+ *  Calling args:
+ * r0 = previous thread_info, r1 = next thread_info
+ *
+ * FIXME: this is ugly, at some point we should stop duplicating
+ * what __switch_to() does, dropping specific fpu management from
+ * Cobalt in the same move; this will have to wait until Dovetail
+ * is substituted to the I-pipe though, since the former already
+ * provides the support we need for this. --rpm
+ */
+ENTRY(__asm_thread_switch)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
+	add     ip, r0, #TI_CPU_SAVE
+ ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
+ THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
+ THUMB(	str	sp, [ip], #4		   )
+ THUMB(	str	lr, [ip], #4		   )
+	load_tls r1, r4, r5
+#ifdef CONFIG_CPU_USE_DOMAINS
+	ldr     r6, [r1, #TI_CPU_DOMAIN]
+#endif
+	switch_tls r0, r4, r5, r3, r7
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	ldr	r7, [r1, #TI_TASK]
+	ldr	r8, =__stack_chk_guard
+	ldr	r7, [r7, #TSK_STACK_CANARY]
+#endif
+#ifdef CONFIG_CPU_USE_DOMAINS
+	mcr     p15, 0, r6, c3, c0, 0           @ Set domain register
+#endif
+	fpu_switch r4
+#if defined(CONFIG_CC_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	str	r7, [r8]
+#endif
+ ARM(	add	r4, r1, #TI_CPU_SAVE	   )
+ ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
+ THUMB(	add	ip, r1, #TI_CPU_SAVE	   )
+ THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
+ THUMB(	ldr	sp, [ip], #4		   )
+ THUMB(	ldr	pc, [ip]		   )
+ENDPROC(__asm_thread_switch)
+
+#else /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) */
+
+#include <asm/unwind.h>
+
+ UNWIND(.fnstart	)
+ UNWIND(.cantunwind	)
+	add	ip, r0, #TI_CPU_SAVE
+ ARM(	stmia	ip!, {r4 - sl, fp, sp, lr} )	@ Store most regs on stack
+ THUMB(	stmia	ip!, {r4 - sl, fp}	   )	@ Store most regs on stack
+ THUMB(	str	sp, [ip], #4		   )
+ THUMB(	str	lr, [ip], #4		   )
+	ldr	r4, [r1, #TI_TP_VALUE]
+	ldr	r5, [r1, #TI_TP_VALUE + 4]
+#ifdef CONFIG_CPU_USE_DOMAINS
+	mrc	p15, 0, r6, c3, c0, 0		@ Get domain register
+	str	r6, [r0, #TI_CPU_DOMAIN]	@ Save old domain register
+	ldr	r6, [r1, #TI_CPU_DOMAIN]
+#endif
+	switch_tls r0, r4, r5, r3, r7
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	ldr	r7, [r1, #TI_TASK]
+	ldr	r8, =__stack_chk_guard
+	.if (TSK_STACK_CANARY > IMM12_MASK)
+	add	r7, r7, #TSK_STACK_CANARY & ~IMM12_MASK
+	.endif
+	ldr	r7, [r7, #TSK_STACK_CANARY & IMM12_MASK]
+#endif
+#ifdef CONFIG_CPU_USE_DOMAINS
+	mcr	p15, 0, r6, c3, c0, 0		@ Set domain register
+#endif
+	mov	r5, r0
+	fpu_switch r4
+	add	r4, r1, #TI_CPU_SAVE
+#if defined(CONFIG_STACKPROTECTOR) && !defined(CONFIG_SMP)
+	str	r7, [r8]
+#endif
+ THUMB(	mov	ip, r4			   )
+	mov	r0, r5
+ ARM(	ldmia	r4, {r4 - sl, fp, sp, pc}  )	@ Load all regs saved previously
+ THUMB(	ldmia	ip!, {r4 - sl, fp}	   )	@ Load all regs saved previously
+ THUMB(	ldr	sp, [ip], #4		   )
+ THUMB(	ldr	pc, [ip]		   )
+ UNWIND(.fnend		)
+
+#endif /* LINUX_VERSION_CODE >= KERNEL_VERSION(4, 19, 0) */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/syscall.c
new file mode 100644
index 0000000..ee78243
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/syscall.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2005 Stelian Pop
+ * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/ipipe.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/xenomai/uapi/tsc.h>
+
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			 unsigned long a3, unsigned long a4,
+			 unsigned long a5)
+{
+	struct ipipe_sysinfo ipipe_info;
+	struct __ipipe_tscinfo *p = &ipipe_info.arch.tsc;
+	struct __xn_tscinfo info;
+	int ret;
+
+	if (a1 != XENOMAI_SYSARCH_TSCINFO)
+		return -EINVAL;
+
+	ret = ipipe_get_sysinfo(&ipipe_info);
+	if (ret)
+		return ret;
+
+	switch (p->type) {
+	case IPIPE_TSC_TYPE_DECREMENTER:
+		info.counter = p->u.dec.counter;
+		break;
+	case IPIPE_TSC_TYPE_NONE:
+		return -ENOSYS;
+	default:
+		info.counter = p->u.fr.counter;
+		break;
+	}
+
+	return cobalt_copy_to_user((void *)a2, &info, sizeof(info));
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/thread.c
new file mode 100644
index 0000000..c68b5e3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm/ipipe/thread.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/ipipe.h>
+#include <linux/mm.h>
+#include <linux/jump_label.h>
+#include <asm/mmu_context.h>
+#include <cobalt/kernel/thread.h>
+
+struct static_key __xeno_vfp_key = STATIC_KEY_INIT_TRUE;
+
+asmlinkage void __asm_thread_switch(struct thread_info *out,
+				    struct thread_info *in);
+
+asmlinkage void __asm_thread_trampoline(void);
+
+#if defined(CONFIG_XENO_ARCH_FPU) && defined(CONFIG_VFP)
+
+static unsigned int vfp_checked;
+static DEFINE_MUTEX(vfp_check_lock);
+
+asmlinkage void __asm_vfp_save(union vfp_state *vfp, unsigned int fpexc);
+
+asmlinkage void __asm_vfp_load(union vfp_state *vfp, unsigned int cpu);
+
+#define do_vfp_fmrx(_vfp_)						\
+	({								\
+		u32 __v;						\
+		asm volatile("mrc p10, 7, %0, " __stringify(_vfp_)	\
+			     ", cr0, 0 @ fmrx %0, " #_vfp_:		\
+			     "=r" (__v));				\
+		__v;							\
+	})
+
+#define do_vfp_fmxr(_vfp_,_var_)				\
+	asm volatile("mcr p10, 7, %0, " __stringify(_vfp_)	\
+		     ", cr0, 0 @ fmxr " #_vfp_ ", %0":		\
+		     /* */ : "r" (_var_))
+
+extern union vfp_state *vfp_current_hw_state[NR_CPUS];
+
+static inline union vfp_state *get_fpu_owner(void)
+{
+	union vfp_state *vfp_owner;
+	unsigned int cpu;
+#ifdef CONFIG_SMP
+	unsigned int fpexc;
+#endif
+
+#if __LINUX_ARM_ARCH__ <= 6
+	if (!static_key_true(&__xeno_vfp_key))
+		return NULL;
+#endif
+
+#ifdef CONFIG_SMP
+	fpexc = do_vfp_fmrx(FPEXC);
+	if (!(fpexc & FPEXC_EN))
+		return NULL;
+#endif
+
+	cpu = raw_smp_processor_id();
+	vfp_owner = vfp_current_hw_state[cpu];
+	if (!vfp_owner)
+		return NULL;
+
+#ifdef CONFIG_SMP
+	if (vfp_owner->hard.cpu != cpu)
+		return NULL;
+#endif /* SMP */
+
+	return vfp_owner;
+}
+
+#define do_disable_vfp(fpexc)					\
+	do_vfp_fmxr(FPEXC, fpexc & ~FPEXC_EN)
+
+#define XNARCH_VFP_ANY_EXC						\
+	(FPEXC_EX|FPEXC_DEX|FPEXC_FP2V|FPEXC_VV|FPEXC_TRAP_MASK)
+
+#define do_enable_vfp()							\
+	({								\
+		unsigned _fpexc = do_vfp_fmrx(FPEXC) | FPEXC_EN;	\
+		do_vfp_fmxr(FPEXC, _fpexc & ~XNARCH_VFP_ANY_EXC);	\
+		_fpexc;							\
+	})
+
+int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
+{
+	/* This function does the same thing to decode the faulting instruct as
+	   "call_fpe" in arch/arm/entry-armv.S */
+	static unsigned copro_to_exc[16] = {
+		IPIPE_TRAP_UNDEFINSTR,
+		/* FPE */
+		IPIPE_TRAP_FPU, IPIPE_TRAP_FPU,
+		IPIPE_TRAP_UNDEFINSTR,
+#ifdef CONFIG_CRUNCH
+		IPIPE_TRAP_FPU, IPIPE_TRAP_FPU, IPIPE_TRAP_FPU,
+#else /* !CONFIG_CRUNCH */
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+#endif /* !CONFIG_CRUNCH */
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+#ifdef CONFIG_VFP
+		IPIPE_TRAP_VFP, IPIPE_TRAP_VFP,
+#else /* !CONFIG_VFP */
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+#endif /* !CONFIG_VFP */
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+		IPIPE_TRAP_UNDEFINSTR, IPIPE_TRAP_UNDEFINSTR,
+	};
+	unsigned instr, exc, cp;
+	char *pc;
+
+	if (d->exception == IPIPE_TRAP_FPU)
+		return 1;
+
+	if (d->exception == IPIPE_TRAP_VFP)
+		goto trap_vfp;
+
+	if (d->exception != IPIPE_TRAP_UNDEFINSTR)
+		return 0;
+
+	pc = (char *) xnarch_fault_pc(d);
+	if (unlikely(thumb_mode(d->regs))) {
+		unsigned short thumbh, thumbl;
+
+#if defined(CONFIG_ARM_THUMB) && __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_CPU_V7)
+#if __LINUX_ARM_ARCH__ < 7
+		if (cpu_architecture() < CPU_ARCH_ARMv7)
+#else
+		if (0)
+#endif /* arch < 7 */
+#endif /* thumb && arch >= 6 && cpu_v7 */
+			return 0;
+
+		thumbh = *(unsigned short *) pc;
+		thumbl = *((unsigned short *) pc + 1);
+
+		if ((thumbh & 0x0000f800) < 0x0000e800)
+			return 0;
+		instr = (thumbh << 16) | thumbl;
+
+#ifdef CONFIG_NEON
+		if ((instr & 0xef000000) == 0xef000000
+		    || (instr & 0xff100000) == 0xf9000000)
+			goto trap_vfp;
+#endif
+	} else {
+		instr = *(unsigned *) pc;
+
+#ifdef CONFIG_NEON
+		if ((instr & 0xfe000000) == 0xf2000000
+		    || (instr & 0xff100000) == 0xf4000000)
+			goto trap_vfp;
+#endif
+	}
+
+	if ((instr & 0x0c000000) != 0x0c000000)
+		return 0;
+
+	cp = (instr & 0x00000f00) >> 8;
+#ifdef CONFIG_IWMMXT
+	/* We need something equivalent to _TIF_USING_IWMMXT for Xenomai kernel
+	   threads */
+	if (cp <= 1) {
+		d->exception = IPIPE_TRAP_FPU;
+		return 1;
+	}
+#endif
+
+	exc = copro_to_exc[cp];
+	if (exc == IPIPE_TRAP_VFP) {
+	  trap_vfp:
+		/* If an exception is pending, the VFP fault is not really an
+		   "FPU unavailable" fault, so we return undefinstr in that
+		   case, the nucleus will let linux handle the fault. */
+		exc = do_vfp_fmrx(FPEXC);
+		if (exc & (FPEXC_EX|FPEXC_DEX)
+		    || ((exc & FPEXC_EN) && do_vfp_fmrx(FPSCR) & FPSCR_IXE))
+			exc = IPIPE_TRAP_UNDEFINSTR;
+		else
+			exc = IPIPE_TRAP_VFP;
+	}
+
+	d->exception = exc;
+	return exc != IPIPE_TRAP_UNDEFINSTR;
+}
+
+void xnarch_leave_root(struct xnthread *root)
+{
+	struct xnarchtcb *rootcb = xnthread_archtcb(root);
+	rootcb->fpup = get_fpu_owner();
+}
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
+{
+	union vfp_state *const from_fpup = from ? from->tcb.fpup : NULL;
+	unsigned cpu = raw_smp_processor_id();
+
+	if (xnthread_test_state(to, XNROOT) == 0) {
+		union vfp_state *const to_fpup = to->tcb.fpup;
+		unsigned fpexc = do_enable_vfp();
+
+		if (from_fpup == to_fpup)
+			return;
+
+		if (from_fpup)
+			__asm_vfp_save(from_fpup, fpexc);
+
+		__asm_vfp_load(to_fpup, cpu);
+       } else {
+		/*
+		 * We are restoring the Linux current thread. The FPU
+		 * can be disabled, so that a fault will occur if the
+		 * newly switched thread uses the FPU, to allow the
+		 * kernel handler to pick the correct FPU context, and
+		 * save in the same move the last used RT context.
+		 */
+		vfp_current_hw_state[cpu] = from_fpup;
+#ifdef CONFIG_SMP
+		/*
+		 * On SMP, since returning to FPU disabled mode means
+		 * that we have to save fpu, avoid doing it if
+		 * current FPU context belongs to the task we are
+		 * switching to.
+		 */
+		if (from_fpup) {
+			union vfp_state *const current_task_fpup =
+				&to->tcb.core.tip->vfpstate;
+			const unsigned fpdis = do_vfp_fmrx(FPEXC);
+			const unsigned fpen = fpdis | FPEXC_EN;
+
+			do_vfp_fmxr(FPEXC, fpen & ~XNARCH_VFP_ANY_EXC);
+			if (from_fpup == current_task_fpup)
+				return;
+
+			__asm_vfp_save(from_fpup, fpen);
+			do_vfp_fmxr(FPEXC, fpdis);
+		}
+#endif
+	}
+}
+
+int xnarch_handle_fpu_fault(struct xnthread *from,
+			struct xnthread *to, struct ipipe_trap_data *d)
+{
+	if (xnthread_test_state(to, XNFPU))
+		/* FPU is already enabled, probably an exception */
+               return 0;
+
+#if __LINUX_ARM_ARCH__ <= 6
+	if (!static_key_true(&__xeno_vfp_key))
+		/* VFP instruction emitted, on a cpu without VFP, this
+		   is an error */
+		return 0;
+#endif
+
+	xnlock_get(&nklock);
+	xnthread_set_state(to, XNFPU);
+	xnlock_put(&nklock);
+
+	xnarch_switch_fpu(from, to);
+
+	/* Retry faulting instruction */
+	d->regs->ARM_pc = xnarch_fault_pc(d);
+	return 1;
+}
+
+void xnarch_init_shadow_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+
+	tcb->fpup = &task_thread_info(tcb->core.host_task)->vfpstate;
+
+	if (vfp_checked == 0) {
+		mutex_lock(&vfp_check_lock);
+		if (vfp_checked == 0) {
+			if ((elf_hwcap & HWCAP_VFP) == 0)
+				static_key_slow_dec(&__xeno_vfp_key);
+			vfp_checked = 1;
+		}
+		mutex_unlock(&vfp_check_lock);
+	}
+
+	/* XNFPU is set upon first FPU fault */
+	xnthread_clear_state(thread, XNFPU);
+}
+
+void xnarch_init_root_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = &thread->tcb;
+	tcb->fpup = NULL;
+}
+
+#endif /* CONFIG_XENO_ARCH_FPU && CONFIG_VFP*/
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in)
+{
+	struct xnarchtcb *out_tcb = &out->tcb, *in_tcb = &in->tcb;
+	struct mm_struct *prev_mm, *next_mm;
+	struct task_struct *next;
+
+	next = in_tcb->core.host_task;
+	prev_mm = out_tcb->core.active_mm;
+
+	next_mm = in_tcb->core.mm;
+	if (next_mm == NULL) {
+		in_tcb->core.active_mm = prev_mm;
+		enter_lazy_tlb(prev_mm, next);
+	} else {
+		ipipe_switch_mm_head(prev_mm, next_mm, next);
+		/*
+		 * We might be switching back to the root thread,
+		 * which we preempted earlier, shortly after "current"
+		 * dropped its mm context in the do_exit() path
+		 * (next->mm == NULL). In that particular case, the
+		 * kernel expects a lazy TLB state for leaving the mm.
+		 */
+		if (next->mm == NULL)
+			enter_lazy_tlb(prev_mm, next);
+	}
+
+	__asm_thread_switch(out_tcb->core.tip, in_tcb->core.tip);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig
new file mode 100644
index 0000000..bdf5f16
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/Kconfig
@@ -0,0 +1,12 @@
+source "kernel/xenomai/Kconfig"
+source "drivers/xenomai/Kconfig"
+
+config XENO_ARCH_FPU
+	def_bool y
+
+config XENO_ARCH_SYS3264
+        def_bool n
+
+config XENO_ARCH_OUTOFLINE_XNLOCK
+       bool
+       default y
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile
new file mode 100644
index 0000000..6c872fd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-y := machine.o
+
+ccflags-y := -I$(srctree)/arch/arm64/xenomai/include -I$(srctree)/include/xenomai
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h
new file mode 100644
index 0000000..cd9496b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/calibration.h
@@ -0,0 +1,25 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_ARM64_DOVETAIL_CALIBRATION_H
+#define _COBALT_ARM64_DOVETAIL_CALIBRATION_H
+
+static inline void xnarch_get_latencies(struct xnclock_gravity *p)
+{
+	unsigned int sched_latency;
+
+#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0
+	sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT;
+#else
+	sched_latency = 5000;
+#endif
+	p->user = xnclock_ns_to_ticks(&nkclock, sched_latency);
+	p->kernel = xnclock_ns_to_ticks(&nkclock,
+					CONFIG_XENO_OPT_TIMING_KSCHEDLAT);
+	p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT);
+}
+
+#endif /* !_COBALT_ARM64_DOVETAIL_CALIBRATION_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h
new file mode 100644
index 0000000..d5a438b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/features.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_ARM64_DOVETAIL_FEATURES_H
+#define _COBALT_ARM64_DOVETAIL_FEATURES_H
+
+struct cobalt_featinfo;
+static inline void collect_arch_features(struct cobalt_featinfo *p) { }
+
+#include <asm/xenomai/uapi/features.h>
+
+#endif /* !_COBALT_ARM64_DOVETAIL_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h
new file mode 100644
index 0000000..8c4228d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/fptest.h
@@ -0,0 +1,39 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+
+#ifndef _COBALT_ARM64_DOVETAIL_FPTEST_H
+#define _COBALT_ARM64_DOVETAIL_FPTEST_H
+
+#include <linux/errno.h>
+#include <asm/xenomai/uapi/fptest.h>
+#include <asm/hwcap.h>
+
+#define have_fp (ELF_HWCAP & HWCAP_FP)
+
+static inline int fp_kernel_supported(void)
+{
+	return 0;
+}
+
+static inline void fp_init(void)
+{
+}
+
+static inline int fp_linux_begin(void)
+{
+	return -ENOSYS;
+}
+
+static inline void fp_linux_end(void)
+{
+}
+
+static inline int fp_detect(void)
+{
+	return have_fp ? __COBALT_HAVE_FPU : 0;
+}
+
+#endif /* !_COBALT_ARM64_DOVETAIL_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h
new file mode 100644
index 0000000..e71a5b7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/machine.h
@@ -0,0 +1,33 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com>
+ */
+
+#ifndef _COBALT_ARM64_DOVETAIL_MACHINE_H
+#define _COBALT_ARM64_DOVETAIL_MACHINE_H
+
+#include <linux/version.h>
+#include <asm/byteorder.h>
+#include <cobalt/kernel/assert.h>
+
+/* D-side always behaves as PIPT on AArch64 (see arch/arm64/include/asm/cachetype.h) */
+#define xnarch_cache_aliasing() 0
+
+static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
+{
+	int __r;
+
+	/* zero input is not valid */
+	XENO_WARN_ON(COBALT, ul == 0);
+
+	__asm__ ("rbit\t%0, %1\n"
+	         "clz\t%0, %0\n"
+	        : "=r" (__r) : "r"(ul) : "cc");
+
+	return __r;
+}
+
+#include <asm-generic/xenomai/machine.h>
+
+#endif /* !_COBALT_ARM64_DOVETAIL_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..96871e2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall.h
@@ -0,0 +1,63 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com>
+ * Copyright (C) 2021 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_ARM64_DOVETAIL_SYSCALL_H
+#define _COBALT_ARM64_DOVETAIL_SYSCALL_H
+
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm-generic/xenomai/syscall.h>
+
+/*
+ * Cobalt and Linux syscall numbers can be fetched from syscallno,
+ * masking out the __COBALT_SYSCALL_BIT marker.
+ */
+#define __xn_reg_sys(__regs)	((unsigned long)(__regs)->syscallno)
+#define __xn_syscall_p(regs)	((__xn_reg_sys(regs) & __COBALT_SYSCALL_BIT) != 0)
+#define __xn_syscall(__regs)	((unsigned long)(__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT))
+
+#define __xn_reg_rval(__regs)	((__regs)->regs[0])
+#define __xn_reg_pc(__regs)	((__regs)->pc)
+#define __xn_reg_sp(__regs)	((__regs)->sp)
+
+/*
+ * Root syscall number with predicate (valid only if
+ * !__xn_syscall_p(__regs)).
+ */
+#define __xn_rootcall_p(__regs, __code)			\
+	({						\
+		*(__code) = __xn_syscall(__regs);	\
+		*(__code) < NR_syscalls;		\
+	})
+
+static inline void __xn_error_return(struct pt_regs *regs, int v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, long v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs)
+{
+	return __xn_reg_rval(regs) == -EINTR;
+}
+
+static inline
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			unsigned long a3, unsigned long a4,
+			unsigned long a5)
+{
+	/* We need none of these with Dovetail. */
+	return -ENOSYS;
+}
+
+#endif /* !_COBALT_ARM64_DOVETAIL_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h
new file mode 100644
index 0000000..cd0f392
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/syscall32.h
@@ -0,0 +1,12 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2014 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_ARM64_DOVETAIL_SYSCALL32_H
+#define _COBALT_ARM64_DOVETAIL_SYSCALL32_H
+
+#include <asm-generic/xenomai/syscall32.h>
+
+#endif /* !_COBALT_ARM64_DOVETAIL_SYSCALL32_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h
new file mode 100644
index 0000000..5b60ff3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/thread.h
@@ -0,0 +1,22 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com>
+ * Copyright (C) 2021 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_ARM64_DOVETAIL_THREAD_H
+#define _COBALT_ARM64_DOVETAIL_THREAD_H
+
+#include <asm-generic/xenomai/dovetail/thread.h>
+#include <asm/dovetail.h>
+
+#define xnarch_fault_pc(__regs)	((unsigned long)((__regs)->pc - 4)) /* XXX ? */
+
+#define xnarch_fault_pf_p(__nr)	((__nr) == ARM64_TRAP_ACCESS)
+#define xnarch_fault_bp_p(__nr)	((current->ptrace & PT_PTRACED) &&	\
+				 ((__nr) == ARM64_TRAP_DEBUG || (__nr) == ARM64_TRAP_UNDI))
+
+#define xnarch_fault_notify(__nr) (!xnarch_fault_bp_p(__nr))
+
+#endif /* !_COBALT_ARM64_DOVETAIL_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h
new file mode 100644
index 0000000..7a1122f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/include/asm/xenomai/wrappers.h
@@ -0,0 +1,15 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2005 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_ARM64_ASM_WRAPPERS_H
+#define _COBALT_ARM64_ASM_WRAPPERS_H
+
+#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */
+
+#define __put_user_inatomic __put_user
+#define __get_user_inatomic __get_user
+
+#endif /* _COBALT_ARM64_ASM_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c
new file mode 100644
index 0000000..e03d7b9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/dovetail/machine.c
@@ -0,0 +1,41 @@
+// SPDX-License-Identifier: GPL-2.0
+// Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+// Copyright (C) 2021 Philippe Gerum  <rpm@xenomai.org>
+
+#include <linux/mm.h>
+#include <asm/xenomai/machine.h>
+
+static void mach_arm64_prefault(struct vm_area_struct *vma)
+{
+	unsigned long addr;
+	unsigned int flags;
+
+	if ((vma->vm_flags & VM_MAYREAD)) {
+		flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0;
+		for (addr = vma->vm_start;
+		     addr != vma->vm_end; addr += PAGE_SIZE)
+			handle_mm_fault(vma, addr, flags, NULL);
+	}
+}
+
+static const char *const fault_labels[] = {
+	[ARM64_TRAP_ACCESS] = "Data or instruction abort",
+	[ARM64_TRAP_ALIGN] = "SP/PC alignment abort",
+	[ARM64_TRAP_SEA] = "Synchronous external abort",
+	[ARM64_TRAP_DEBUG] = "Debug trap",
+	[ARM64_TRAP_UNDI] = "Undefined instruction",
+	[ARM64_TRAP_UNDSE] = "Undefined synchronous exception",
+	[ARM64_TRAP_FPE] = "FPSIMD exception",
+	[ARM64_TRAP_SVE] = "SVE access trap",
+	[ARM64_TRAP_BTI] = "Branch target identification trap",
+	[31] = NULL
+};
+
+struct cobalt_machine cobalt_machine = {
+	.name = "arm64",
+	.init = NULL,
+	.late_init = NULL,
+	.cleanup = NULL,
+	.prefault = mach_arm64_prefault,
+	.fault_labels = fault_labels,
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h
new file mode 100644
index 0000000..3d81f6e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/arith.h
@@ -0,0 +1,142 @@
+/*
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM64_ASM_UAPI_ARITH_H
+#define _COBALT_ARM64_ASM_UAPI_ARITH_H
+
+#include <asm/xenomai/uapi/features.h>
+
+#if !defined(CONFIG_FTRACE)
+static inline __attribute__((__const__)) unsigned long long
+mach_arm_nodiv_ullimd(const unsigned long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ);
+
+#define xnarch_nodiv_ullimd(op, frac, integ) \
+	mach_arm_nodiv_ullimd((op), (frac), (integ))
+
+static inline __attribute__((__const__)) long long
+mach_arm_nodiv_llimd(const long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ);
+
+#define xnarch_nodiv_llimd(op, frac, integ) \
+	mach_arm_nodiv_llimd((op), (frac), (integ))
+#else /* arm <= v3 */
+#define xnarch_add96and64(l0, l1, l2, s0, s1)		\
+	do {						\
+		__asm__ ("adds %2, %2, %4\n\t"		\
+			 "adcs %1, %1, %3\n\t"		\
+			 "adc %0, %0, #0\n\t"		\
+			 : "+r"(l0), "+r"(l1), "+r"(l2)	\
+			 : "r"(s0), "r"(s1): "cc");	\
+	} while (0)
+#endif /* arm <= v3 */
+
+#include <cobalt/uapi/asm-generic/arith.h>
+
+#if !defined(CONFIG_FTRACE)
+#define mach_arm_nodiv_ullimd_str			\
+	"umull %[tl], %[rl], %[opl], %[fracl]\n\t"	\
+	"umull %[rm], %[rh], %[oph], %[frach]\n\t"	\
+	"adds %[rl], %[rl], %[tl], lsr #31\n\t"		\
+	"adcs %[rm], %[rm], #0\n\t"			\
+	"adc %[rh], %[rh], #0\n\t"			\
+	"umull %[tl], %[th], %[oph], %[fracl]\n\t"	\
+	"adds %[rl], %[rl], %[tl]\n\t"			\
+	"adcs %[rm], %[rm], %[th]\n\t"			\
+	"adc %[rh], %[rh], #0\n\t"			\
+	"umull %[tl], %[th], %[opl], %[frach]\n\t"	\
+	"adds %[rl], %[rl], %[tl]\n\t"			\
+	"adcs %[rm], %[rm], %[th]\n\t"			\
+	"adc %[rh], %[rh], #0\n\t"			\
+	"umlal %[rm], %[rh], %[opl], %[integ]\n\t"	\
+	"mla %[rh], %[oph], %[integ], %[rh]\n\t"
+
+static inline __attribute__((__const__)) unsigned long long
+mach_arm_nodiv_ullimd(const unsigned long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ)
+{
+	register unsigned rl __asm__("r5");
+	register unsigned rm __asm__("r0");
+	register unsigned rh __asm__("r1");
+	register unsigned fracl __asm__ ("r2");
+	register unsigned frach __asm__ ("r3");
+	register unsigned integ __asm__("r4") = rhs_integ;
+	register unsigned opl __asm__ ("r6");
+	register unsigned oph __asm__ ("r7");
+	register unsigned tl __asm__("r8");
+	register unsigned th __asm__("r9");
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(frac, frach, fracl);
+
+	__asm__ (mach_arm_nodiv_ullimd_str
+		 : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh),
+		   [tl]"=r"(tl), [th]"=r"(th)
+		 : [opl]"r"(opl), [oph]"r"(oph),
+		   [fracl]"r"(fracl), [frach]"r"(frach),
+		   [integ]"r"(integ)
+		 : "cc");
+
+	return xnarch_u64fromu32(rh, rm);
+}
+
+static inline __attribute__((__const__)) long long
+mach_arm_nodiv_llimd(const long long op,
+		       const unsigned long long frac,
+		       const unsigned rhs_integ)
+{
+	register unsigned rl __asm__("r5");
+	register unsigned rm __asm__("r0");
+	register unsigned rh __asm__("r1");
+	register unsigned fracl __asm__ ("r2");
+	register unsigned frach __asm__ ("r3");
+	register unsigned integ __asm__("r4") = rhs_integ;
+	register unsigned opl __asm__ ("r6");
+	register unsigned oph __asm__ ("r7");
+	register unsigned tl __asm__("r8");
+	register unsigned th __asm__("r9");
+	register unsigned s __asm__("r10");
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(frac, frach, fracl);
+
+	__asm__ ("movs %[s], %[oph], lsr #30\n\t"
+		 "beq 1f\n\t"
+		 "rsbs  %[opl], %[opl], #0\n\t"
+		 "sbc  %[oph], %[oph], %[oph], lsl #1\n"
+		 "1:\t"
+		 mach_arm_nodiv_ullimd_str
+		 "teq %[s], #0\n\t"
+		 "beq 2f\n\t"
+		 "rsbs  %[rm], %[rm], #0\n\t"
+		 "sbc  %[rh], %[rh], %[rh], lsl #1\n"
+		 "2:\t"
+		 : [rl]"=r"(rl), [rm]"=r"(rm), [rh]"=r"(rh),
+		   [tl]"=r"(tl), [th]"=r"(th), [s]"=r"(s)
+		 : [opl]"r"(opl), [oph]"r"(oph),
+		   [fracl]"r"(fracl), [frach]"r"(frach),
+		   [integ]"r"(integ)
+		 : "cc");
+
+	return xnarch_u64fromu32(rh, rm);
+}
+#endif /* arm >= v4 */
+
+#endif /* _COBALT_ARM64_ASM_UAPI_ARITH_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h
new file mode 100644
index 0000000..b98a963
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/features.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM64_ASM_UAPI_FEATURES_H
+#define _COBALT_ARM64_ASM_UAPI_FEATURES_H
+
+/* The ABI revision level we use on this arch. */
+#define XENOMAI_ABI_REV   2UL
+
+#define XENOMAI_FEAT_DEP (__xn_feat_generic_mask)
+
+#define XENOMAI_FEAT_MAN (__xn_feat_generic_man_mask)
+
+#undef XNARCH_HAVE_LLMULSHFT
+
+#undef XNARCH_HAVE_NODIV_LLIMD
+
+struct cobalt_featinfo_archdep { /* no arch-specific feature */ };
+
+#include <cobalt/uapi/asm-generic/features.h>
+
+static inline const char *get_feature_label(unsigned int feature)
+{
+	return get_generic_feature_label(feature);
+}
+
+#endif /* !_COBALT_ARM64_ASM_UAPI_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h
new file mode 100644
index 0000000..7a2cb92
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/fptest.h
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM64_ASM_UAPI_FPTEST_H
+#define _COBALT_ARM64_ASM_UAPI_FPTEST_H
+
+#define __COBALT_HAVE_FPU  0x1
+
+/*
+ * CAUTION: keep this code strictly inlined in macros: we don't want
+ * GCC to apply any callee-saved logic to fpsimd registers in
+ * fp_regs_set() before fp_regs_check() can verify their contents, but
+ * we still want GCC to know about the registers we have clobbered.
+ */
+
+#define fp_regs_set(__features, __val)					\
+	do {								\
+		unsigned long long __e[32];				\
+		unsigned int __i;					\
+									\
+		if (__features & __COBALT_HAVE_FPU) {			\
+									\
+			for (__i = 0; __i < 32; __i++)			\
+				__e[__i] = (__val);			\
+									\
+			__asm__ __volatile__("ldp  d0, d1, [%0, #8 * 0] \n"	\
+					     "ldp  d2, d3, [%0, #8 * 2] \n"	\
+					     "ldp  d4, d5, [%0, #8 * 4]\n"	\
+					     "ldp  d6, d7, [%0, #8 * 6]\n"	\
+					     "ldp  d8, d9, [%0, #8 * 8]\n"	\
+					     "ldp  d10, d11, [%0, #8 * 10]\n"	\
+					     "ldp  d12, d13, [%0, #8 * 12]\n"	\
+					     "ldp  d14, d15, [%0, #8 * 14]\n"	\
+					     "ldp  d16, d17, [%0, #8 * 16]\n"	\
+					     "ldp  d18, d19, [%0, #8 * 18]\n"	\
+					     "ldp  d20, d21, [%0, #8 * 20]\n"	\
+					     "ldp  d22, d23, [%0, #8 * 22]\n"	\
+					     "ldp  d24, d25, [%0, #8 * 24]\n"	\
+					     "ldp  d26, d27, [%0, #8 * 26]\n"	\
+					     "ldp  d28, d29, [%0, #8 * 28]\n"	\
+					     "ldp  d30, d31, [%0, #8 * 30]\n"	\
+					     : /* No outputs. */	\
+					     : "r"(&__e[0])		\
+					     : "d0", "d1", "d2", "d3", "d4", "d5", "d6",	\
+					       "d7", "d8", "d9", "d10", "d11", "d12", "d13",	\
+					       "d14", "d15", "d16", "d17", "d18", "d19",	\
+					       "d20", "d21", "d22", "d23", "d24", "d25",	\
+					       "d26", "d27", "d28", "d29", "d30", "d31",	\
+					       "memory");		\
+		}							\
+	} while (0)
+
+#define fp_regs_check(__features, __val, __report)			\
+	({								\
+		unsigned int __result = (__val), __i;			\
+		unsigned long long __e[32];				\
+									\
+		if (__features & __COBALT_HAVE_FPU) {			\
+									\
+			__asm__ __volatile__("stp  d0, d1, [%0, #8 * 0] \n"	\
+					     "stp  d2, d3, [%0, #8 * 2] \n"	\
+					     "stp  d4, d5, [%0, #8 * 4]\n"	\
+					     "stp  d6, d7, [%0, #8 * 6]\n"	\
+					     "stp  d8, d9, [%0, #8 * 8]\n"	\
+					     "stp  d10, d11, [%0, #8 * 10]\n"	\
+					     "stp  d12, d13, [%0, #8 * 12]\n"	\
+					     "stp  d14, d15, [%0, #8 * 14]\n"	\
+					     "stp  d16, d17, [%0, #8 * 16]\n"	\
+					     "stp  d18, d19, [%0, #8 * 18]\n"	\
+					     "stp  d20, d21, [%0, #8 * 20]\n"	\
+					     "stp  d22, d23, [%0, #8 * 22]\n"	\
+					     "stp  d24, d25, [%0, #8 * 24]\n"	\
+					     "stp  d26, d27, [%0, #8 * 26]\n"	\
+					     "stp  d28, d29, [%0, #8 * 28]\n"	\
+					     "stp  d30, d31, [%0, #8 * 30]\n"	\
+					     :  /* No outputs. */	\
+					     : "r"(&__e[0])		\
+					     : "memory");		\
+									\
+			for (__i = 0; __i < 32; __i++)			\
+				if (__e[__i] != __val) {		\
+					__report("d%d: %llu != %u\n",	\
+						 __i, __e[__i], __val); \
+					__result = __e[__i];		\
+				}					\
+		}							\
+									\
+		__result;						\
+	})
+
+#endif /* !_COBALT_ARM64_ASM_UAPI_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h
new file mode 100644
index 0000000..5b319d6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/syscall.h
@@ -0,0 +1,28 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM64_ASM_UAPI_SYSCALL_H
+#define _COBALT_ARM64_ASM_UAPI_SYSCALL_H
+
+#define __xn_syscode(__nr)	(__COBALT_SYSCALL_BIT | (__nr))
+
+#define XENOMAI_SYSARCH_TSCINFO		0
+
+#endif /* !_COBALT_ARM64_ASM_UAPI_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h
new file mode 100644
index 0000000..20a4eaa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/include/asm/xenomai/uapi/tsc.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_ARM64_ASM_UAPI_TSC_H
+#define _COBALT_ARM64_ASM_UAPI_TSC_H
+
+struct __xn_tscinfo {
+	volatile unsigned int *counter;
+};
+
+#endif /* !_COBALT_ARM64_ASM_UAPI_TSC_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile
new file mode 100644
index 0000000..cf12a18
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/Makefile
@@ -0,0 +1,5 @@
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-y := machine.o thread.o syscall.o
+
+ccflags-y := -I$(srctree)/arch/arm64/xenomai/include -I$(srctree)/include/xenomai
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/README b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/README
new file mode 100644
index 0000000..80f954a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/README
@@ -0,0 +1,3 @@
+Get the interrupt pipeline code for the target kernel from
+http://xenomai.org/downloads/ipipe/, or
+git://git.xenomai.org/ipipe.git
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h
new file mode 100644
index 0000000..e85521e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/calibration.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM64_ASM_CALIBRATION_H
+#define _COBALT_ARM64_ASM_CALIBRATION_H
+
+static inline void xnarch_get_latencies(struct xnclock_gravity *p)
+{
+	unsigned int ulat;
+#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0
+	ulat = CONFIG_XENO_OPT_TIMING_SCHEDLAT;
+#elif defined(CONFIG_ARCH_HISI)
+	ulat = 4000;
+#else
+	ulat = 4000;
+#endif
+	p->user = xnclock_ns_to_ticks(&nkclock, ulat);
+	p->kernel = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_KSCHEDLAT);
+	p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT);
+}
+
+#endif /* !_COBALT_ARM64_ASM_CALIBRATION_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h
new file mode 100644
index 0000000..112408f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/features.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM64_ASM_FEATURES_H
+#define _COBALT_ARM64_ASM_FEATURES_H
+
+struct cobalt_featinfo;
+static inline void collect_arch_features(struct cobalt_featinfo *p) { }
+
+#include <asm/xenomai/uapi/features.h>
+
+#endif /* !_COBALT_ARM64_ASM_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h
new file mode 100644
index 0000000..39903a0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/fptest.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM64_ASM_FPTEST_H
+#define _COBALT_ARM64_ASM_FPTEST_H
+
+#include <linux/errno.h>
+#include <asm/xenomai/uapi/fptest.h>
+#include <asm/hwcap.h>
+
+#define have_fp (ELF_HWCAP & HWCAP_FP)
+
+static inline int fp_kernel_supported(void)
+{
+	return 0;
+}
+
+static inline void fp_init(void)
+{
+}
+
+static inline int fp_linux_begin(void)
+{
+	return -ENOSYS;
+}
+
+static inline void fp_linux_end(void)
+{
+}
+
+static inline int fp_detect(void)
+{
+	return have_fp ? __COBALT_HAVE_FPU : 0;
+}
+
+#endif /* _COBALT_ARM64_ASM_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h
new file mode 100644
index 0000000..c91c8f5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/machine.h
@@ -0,0 +1,68 @@
+/**
+ *   Copyright &copy; 2002-2004 Philippe Gerum.
+ *
+ *   ARM port
+ *     Copyright (C) 2005 Stelian Pop
+ *
+ *   ARM64 port
+ *     Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com>
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Xenomai; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+#ifndef _COBALT_ARM64_ASM_MACHINE_H
+#define _COBALT_ARM64_ASM_MACHINE_H
+
+#include <linux/version.h>
+#include <asm/byteorder.h>
+
+#define XNARCH_HOST_TICK_IRQ __ipipe_hrtimer_irq
+
+#include <asm/barrier.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#include <asm/compiler.h>
+#endif
+
+#include <asm/cmpxchg.h>
+#include <asm/switch_to.h>
+#include <asm/system_misc.h>
+#include <asm/timex.h>
+#include <asm/processor.h>
+#include <asm/ipipe.h>
+#include <asm/cacheflush.h>
+#include <cobalt/kernel/assert.h>
+
+/* D-side always behaves as PIPT on AArch64 (see arch/arm64/include/asm/cachetype.h) */
+#define xnarch_cache_aliasing() 0
+
+static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
+{
+	int __r;
+
+	/* zero input is not valid */
+	XENO_WARN_ON(COBALT, ul == 0);
+
+	__asm__ ("rbit\t%0, %1\n"
+	         "clz\t%0, %0\n"
+	        : "=r" (__r) : "r"(ul) : "cc");
+
+	return __r;
+}
+
+#include <asm-generic/xenomai/machine.h>
+
+#endif /* !_COBALT_ARM64_ASM_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..6b8b71d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall.h
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM64_ASM_SYSCALL_H
+#define _COBALT_ARM64_ASM_SYSCALL_H
+
+#include <linux/errno.h>
+#include <linux/uaccess.h>
+#include <asm/unistd.h>
+#include <asm/ptrace.h>
+#include <asm-generic/xenomai/syscall.h>
+
+/*
+ * Cobalt and Linux syscall numbers can be fetched from syscallno,
+ * masking out the __COBALT_SYSCALL_BIT marker.
+ */
+#define __xn_reg_sys(__regs)	((unsigned long)(__regs)->syscallno)
+#define __xn_syscall_p(regs)	((__xn_reg_sys(regs) & __COBALT_SYSCALL_BIT) != 0)
+#define __xn_syscall(__regs)	((unsigned long)(__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT))
+
+#define __xn_reg_rval(__regs)	((__regs)->regs[0])
+#define __xn_reg_arg1(__regs)	((__regs)->regs[0])
+#define __xn_reg_arg2(__regs)	((__regs)->regs[1])
+#define __xn_reg_arg3(__regs)	((__regs)->regs[2])
+#define __xn_reg_arg4(__regs)	((__regs)->regs[3])
+#define __xn_reg_arg5(__regs)	((__regs)->regs[4])
+#define __xn_reg_pc(__regs)	((__regs)->pc)
+#define __xn_reg_sp(__regs)	((__regs)->sp)
+
+/*
+ * Root syscall number with predicate (valid only if
+ * !__xn_syscall_p(__regs)).
+ */
+#define __xn_rootcall_p(__regs, __code)			\
+	({						\
+		*(__code) = __xn_syscall(__regs);	\
+		*(__code) < NR_syscalls;		\
+	})
+
+static inline void __xn_error_return(struct pt_regs *regs, int v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, long v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs)
+{
+	return __xn_reg_rval(regs) == -EINTR;
+}
+
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			 unsigned long a3, unsigned long a4,
+			 unsigned long a5);
+
+#endif /* !_COBALT_ARM64_ASM_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h
new file mode 100644
index 0000000..a66ddd6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/syscall32.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM64_ASM_SYSCALL32_H
+#define _COBALT_ARM64_ASM_SYSCALL32_H
+
+#include <asm-generic/xenomai/syscall32.h>
+
+#endif /* !_COBALT_ARM64_ASM_SYSCALL32_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h
new file mode 100644
index 0000000..7899a49
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/thread.h
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2005 Stelian Pop
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM64_ASM_THREAD_H
+#define _COBALT_ARM64_ASM_THREAD_H
+
+#include <linux/version.h>
+#include <asm-generic/xenomai/ipipe/thread.h>
+
+#if defined(CONFIG_XENO_ARCH_FPU) && LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)
+#define ARM64_XENO_OLD_SWITCH
+#endif
+
+struct xnarchtcb {
+	struct xntcb core;
+#ifdef ARM64_XENO_OLD_SWITCH
+	struct fpsimd_state xnfpsimd_state;
+	struct fpsimd_state *fpup;
+#define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
+#endif
+};
+
+#define xnarch_fault_regs(d)	((d)->regs)
+#define xnarch_fault_trap(d)	((d)->exception)
+#define xnarch_fault_code(d)	(0)
+#define xnarch_fault_pc(d)	((unsigned long)((d)->regs->pc - 4)) /* XXX ? */
+
+#define xnarch_fault_pf_p(d)	((d)->exception == IPIPE_TRAP_ACCESS)
+#define xnarch_fault_bp_p(d)	((current->ptrace & PT_PTRACED) &&	\
+				 ((d)->exception == IPIPE_TRAP_BREAK ||	\
+				  (d)->exception == IPIPE_TRAP_UNDEFINSTR))
+
+#define xnarch_fault_notify(d) (!xnarch_fault_bp_p(d))
+
+static inline
+struct task_struct *xnarch_host_task(struct xnarchtcb *tcb)
+{
+	return tcb->core.host_task;
+}
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in);
+
+static inline void xnarch_enter_root(struct xnthread *root) { }
+
+int xnarch_escalate(void);
+
+#ifdef ARM64_XENO_OLD_SWITCH
+
+void xnarch_init_root_tcb(struct xnthread *thread);
+
+void xnarch_init_shadow_tcb(struct xnthread *thread);
+
+void xnarch_leave_root(struct xnthread *root);
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread);
+
+#else /* !ARM64_XENO_OLD_SWITCH */
+
+static inline void xnarch_init_root_tcb(struct xnthread *thread) { }
+static inline void xnarch_init_shadow_tcb(struct xnthread *thread) { }
+static inline void xnarch_leave_root(struct xnthread *root) { }
+static inline void xnarch_switch_fpu(struct xnthread *f, struct xnthread *t) { }
+
+#endif /*  !ARM64_XENO_OLD_SWITCH */
+
+static inline int xnarch_fault_fpu_p(struct ipipe_trap_data *d)
+{
+	return xnarch_fault_trap(d) == IPIPE_TRAP_FPU_ACC;
+}
+
+static inline int
+xnarch_handle_fpu_fault(struct xnthread *from,
+			struct xnthread *to, struct ipipe_trap_data *d)
+{
+	return 0;
+}
+
+static inline void xnarch_enable_kfpu(void) { }
+
+static inline void xnarch_disable_kfpu(void) { }
+
+#endif /* !_COBALT_ARM64_ASM_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h
new file mode 100644
index 0000000..5a5754f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/include/asm/xenomai/wrappers.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ARM64_ASM_WRAPPERS_H
+#define _COBALT_ARM64_ASM_WRAPPERS_H
+
+#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */
+
+#define __put_user_inatomic __put_user
+#define __get_user_inatomic __get_user
+
+#endif /* _COBALT_ARM64_ASM_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c
new file mode 100644
index 0000000..521b734
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/machine.c
@@ -0,0 +1,66 @@
+/**
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ *   ARM64 port
+ *     Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com>
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+
+#include <linux/mm.h>
+#include <asm/xenomai/machine.h>
+
+static void mach_arm_prefault(struct vm_area_struct *vma)
+{
+	unsigned long addr;
+	unsigned int flags;
+
+	if ((vma->vm_flags & VM_MAYREAD)) {
+		flags = (vma->vm_flags & VM_MAYWRITE) ? FAULT_FLAG_WRITE : 0;
+		for (addr = vma->vm_start;
+		     addr != vma->vm_end; addr += PAGE_SIZE)
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+			handle_mm_fault(vma->vm_mm, vma, addr, flags);
+#else
+			handle_mm_fault(vma, addr, flags);
+#endif
+	}
+}
+
+static const char *const fault_labels[] = {
+	[IPIPE_TRAP_ACCESS] = "Data or instruction access",
+	[IPIPE_TRAP_SECTION] = "Section fault",
+	[IPIPE_TRAP_DABT] = "Generic data abort",
+	[IPIPE_TRAP_UNKNOWN] = "Unknown exception",
+	[IPIPE_TRAP_BREAK] = "Instruction breakpoint",
+	[IPIPE_TRAP_FPU_ACC] = "Floating point access",
+	[IPIPE_TRAP_FPU_EXC] = "Floating point exception",
+	[IPIPE_TRAP_UNDEFINSTR] = "Undefined instruction",
+#ifdef IPIPE_TRAP_ALIGNMENT
+	[IPIPE_TRAP_ALIGNMENT] = "Unaligned access exception",
+#endif /* IPIPE_TRAP_ALIGNMENT */
+	[IPIPE_NR_FAULTS] = NULL
+};
+
+struct cobalt_machine cobalt_machine = {
+	.name = "arm",
+	.init = NULL,
+	.late_init = NULL,
+	.cleanup = NULL,
+	.prefault = mach_arm_prefault,
+	.fault_labels = fault_labels,
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c
new file mode 100644
index 0000000..ee78243
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/syscall.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2005 Stelian Pop
+ * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/ipipe.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/xenomai/uapi/tsc.h>
+
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			 unsigned long a3, unsigned long a4,
+			 unsigned long a5)
+{
+	struct ipipe_sysinfo ipipe_info;
+	struct __ipipe_tscinfo *p = &ipipe_info.arch.tsc;
+	struct __xn_tscinfo info;
+	int ret;
+
+	if (a1 != XENOMAI_SYSARCH_TSCINFO)
+		return -EINVAL;
+
+	ret = ipipe_get_sysinfo(&ipipe_info);
+	if (ret)
+		return ret;
+
+	switch (p->type) {
+	case IPIPE_TSC_TYPE_DECREMENTER:
+		info.counter = p->u.dec.counter;
+		break;
+	case IPIPE_TSC_TYPE_NONE:
+		return -ENOSYS;
+	default:
+		info.counter = p->u.fr.counter;
+		break;
+	}
+
+	return cobalt_copy_to_user((void *)a2, &info, sizeof(info));
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c
new file mode 100644
index 0000000..1068f80
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/arm64/ipipe/thread.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ * 
+ * ARM64 port
+ *   Copyright (C) 2015 Dmitriy Cherkasov <dmitriy@mperpetuo.com>
+ *   Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/ipipe.h>
+#include <linux/mm.h>
+#include <linux/jump_label.h>
+#include <asm/mmu_context.h>
+#include <cobalt/kernel/thread.h>
+#include <asm/fpsimd.h>
+#include <asm/processor.h>
+#include <asm/hw_breakpoint.h>
+
+#ifdef ARM64_XENO_OLD_SWITCH
+
+#include <asm/fpsimd.h>
+
+#define FPSIMD_EN (0x3 << 20)
+
+static inline unsigned long get_cpacr(void)
+{
+	unsigned long result;
+	__asm__ ("mrs %0, cpacr_el1": "=r"(result));
+	return result;
+}
+
+static inline void set_cpacr(long val)
+{
+	__asm__ __volatile__ (
+		"msr cpacr_el1, %0\n\t"
+		"isb"
+		: /* */ : "r"(val));
+}
+
+static inline void enable_fpsimd(void)
+{
+	set_cpacr(get_cpacr() | FPSIMD_EN);
+}
+
+static inline struct fpsimd_state *get_fpu_owner(struct xnarchtcb *rootcb)
+{
+	struct task_struct *curr = rootcb->core.host_task;
+
+	if (test_ti_thread_flag(task_thread_info(curr), TIF_FOREIGN_FPSTATE))
+		/* Foreign fpu state, use auxiliary backup area */
+		return &rootcb->xnfpsimd_state;
+
+	return &curr->thread.fpsimd_state;
+}
+
+void xnarch_leave_root(struct xnthread *root)
+{
+	struct xnarchtcb *rootcb = xnthread_archtcb(root);
+	rootcb->fpup = get_fpu_owner(rootcb);
+}
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
+{
+	struct fpsimd_state *const from_fpup = from ? from->tcb.fpup : NULL;
+	struct fpsimd_state *const to_fpup = to->tcb.fpup;
+
+	enable_fpsimd();
+
+	if (from_fpup == to_fpup)
+		return;
+
+	fpsimd_save_state(from_fpup);
+
+	fpsimd_load_state(to_fpup);
+	to_fpup->cpu = raw_smp_processor_id();
+}
+
+void xnarch_init_shadow_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+	tcb->fpup = &tcb->core.host_task->thread.fpsimd_state;
+}
+
+void xnarch_init_root_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = &thread->tcb;
+	tcb->fpup = NULL;
+}
+
+#endif /* ARM64_XENO_OLD_SWITCH */
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in)
+{
+	struct xnarchtcb *out_tcb = &out->tcb, *in_tcb = &in->tcb;
+	struct task_struct *prev, *next, *last;
+	struct mm_struct *prev_mm, *next_mm;
+
+	next = in_tcb->core.host_task;
+	prev = out_tcb->core.host_task;
+	prev_mm = out_tcb->core.active_mm;
+
+	next_mm = in_tcb->core.mm;
+	if (next_mm == NULL) {
+		in_tcb->core.active_mm = prev_mm;
+		enter_lazy_tlb(prev_mm, next);
+	} else {
+		ipipe_switch_mm_head(prev_mm, next_mm, next);
+		/*
+		 * We might be switching back to the root thread,
+		 * which we preempted earlier, shortly after "current"
+		 * dropped its mm context in the do_exit() path
+		 * (next->mm == NULL). In that particular case, the
+		 * kernel expects a lazy TLB state for leaving the mm.
+		 */
+		if (next->mm == NULL)
+			enter_lazy_tlb(prev_mm, next);
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,9,0)
+	ipipe_switch_to(prev, next);
+	(void)last;
+#else
+	switch_to(prev, next, last);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,19,0)
+	fpsimd_restore_current_state();
+#endif
+#endif
+}
+
+int xnarch_escalate(void)
+{
+	if (ipipe_root_p) {
+		ipipe_raise_irq(cobalt_pipeline.escalate_virq);
+		return 1;
+	}
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/Kconfig
new file mode 100644
index 0000000..6ce3440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/Kconfig
@@ -0,0 +1,8 @@
+source "kernel/xenomai/Kconfig"
+source "drivers/xenomai/Kconfig"
+
+config XENO_ARCH_FPU
+	def_bool PPC_FPU
+
+config XENO_ARCH_SYS3264
+        def_bool n
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/arith.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/arith.h
new file mode 100644
index 0000000..160a7d8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/arith.h
@@ -0,0 +1,34 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_UAPI_ARITH_H
+#define _COBALT_POWERPC_ASM_UAPI_ARITH_H
+
+#include <asm/xenomai/uapi/features.h>
+
+#define xnarch_add96and64(l0, l1, l2, s0, s1)		\
+	do {						\
+		__asm__ ("addc %2, %2, %4\n\t"		\
+			 "adde %1, %1, %3\n\t"		\
+			 "addze %0, %0\n\t"		\
+			 : "+r"(l0), "+r"(l1), "+r"(l2)	\
+			 : "r"(s0), "r"(s1) : "cc");	\
+	} while (0)
+
+#include <cobalt/uapi/asm-generic/arith.h>
+
+#endif /* _COBALT_POWERPC_ASM_UAPI_ARITH_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/features.h
new file mode 100644
index 0000000..ed54882
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/features.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_UAPI_FEATURES_H
+#define _COBALT_POWERPC_ASM_UAPI_FEATURES_H
+
+/* The ABI revision level we use on this arch. */
+#define XENOMAI_ABI_REV   18UL
+
+#define XENOMAI_FEAT_DEP  __xn_feat_generic_mask
+
+#define XENOMAI_FEAT_MAN  __xn_feat_generic_man_mask
+
+#define XNARCH_HAVE_LLMULSHFT    1
+#define XNARCH_HAVE_NODIV_LLIMD  1
+
+struct cobalt_featinfo_archdep { /* no arch-specific feature */ };
+
+#include <cobalt/uapi/asm-generic/features.h>
+
+static inline const char *get_feature_label(unsigned feature)
+{
+	return get_generic_feature_label(feature);
+}
+
+#endif /* !_COBALT_POWERPC_ASM_UAPI_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/fptest.h
new file mode 100644
index 0000000..e6f89c9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/fptest.h
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_UAPI_FPTEST_H
+#define _COBALT_POWERPC_ASM_UAPI_FPTEST_H
+
+#ifndef __NO_FPRS__		/* i.e. has FPU, not SPE */
+
+static inline void fp_regs_set(int features, unsigned int val)
+{
+	unsigned long long fpval = val;
+
+	__asm__ __volatile__("lfd	0, %0\n"
+			     "	fmr	1, 0\n"
+			     "	fmr	2, 0\n"
+			     "	fmr	3, 0\n"
+			     "	fmr	4, 0\n"
+			     "	fmr	5, 0\n"
+			     "	fmr	6, 0\n"
+			     "	fmr	7, 0\n"
+			     "	fmr	8, 0\n"
+			     "	fmr	9, 0\n"
+			     "	fmr	10, 0\n"
+			     "	fmr	11, 0\n"
+			     "	fmr	12, 0\n"
+			     "	fmr	13, 0\n"
+			     "	fmr	14, 0\n"
+			     "	fmr	15, 0\n"
+			     "	fmr	16, 0\n"
+			     "	fmr	17, 0\n"
+			     "	fmr	18, 0\n"
+			     "	fmr	19, 0\n"
+			     "	fmr	20, 0\n"
+			     "	fmr	21, 0\n"
+			     "	fmr	22, 0\n"
+			     "	fmr	23, 0\n"
+			     "	fmr	24, 0\n"
+			     "	fmr	25, 0\n"
+			     "	fmr	26, 0\n"
+			     "	fmr	27, 0\n"
+			     "	fmr	28, 0\n"
+			     "	fmr	29, 0\n"
+			     "	fmr	30, 0\n"
+			     "	fmr	31, 0\n"::"m"(fpval));
+}
+
+#define FPTEST_REGVAL(n) {						\
+    	unsigned long long t;						\
+	__asm__ __volatile__("	stfd	" #n ", %0" : "=m" (t));	\
+	e[n] = (unsigned)t;						\
+	}
+
+static inline unsigned int fp_regs_check(int features, unsigned int val,
+					 int (*report)(const char *fmt, ...))
+{
+	unsigned int i, result = val;
+	unsigned int e[32];
+
+	FPTEST_REGVAL(0);
+	FPTEST_REGVAL(1);
+	FPTEST_REGVAL(2);
+	FPTEST_REGVAL(3);
+	FPTEST_REGVAL(4);
+	FPTEST_REGVAL(5);
+	FPTEST_REGVAL(6);
+	FPTEST_REGVAL(7);
+	FPTEST_REGVAL(8);
+	FPTEST_REGVAL(9);
+	FPTEST_REGVAL(10);
+	FPTEST_REGVAL(11);
+	FPTEST_REGVAL(12);
+	FPTEST_REGVAL(13);
+	FPTEST_REGVAL(14);
+	FPTEST_REGVAL(15);
+	FPTEST_REGVAL(16);
+	FPTEST_REGVAL(17);
+	FPTEST_REGVAL(18);
+	FPTEST_REGVAL(19);
+	FPTEST_REGVAL(20);
+	FPTEST_REGVAL(21);
+	FPTEST_REGVAL(22);
+	FPTEST_REGVAL(23);
+	FPTEST_REGVAL(24);
+	FPTEST_REGVAL(25);
+	FPTEST_REGVAL(26);
+	FPTEST_REGVAL(27);
+	FPTEST_REGVAL(28);
+	FPTEST_REGVAL(29);
+	FPTEST_REGVAL(30);
+	FPTEST_REGVAL(31);
+
+	for (i = 0; i < 32; i++)
+		if (e[i] != val) {
+			report("r%d: %u != %u\n", i, e[i], val);
+			result = e[i];
+		}
+
+	return result;
+}
+
+#else	/* __NO_FPRS__ */
+
+static inline void fp_regs_set(int features, unsigned int val) { }
+
+static inline unsigned int fp_regs_check(int features, unsigned int val,
+					 int (*report)(const char *fmt, ...))
+{
+	return val;
+}
+
+#endif	/* __NO_FPRS__ */
+
+#endif /* !_COBALT_POWERPC_ASM_UAPI_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/syscall.h
new file mode 100644
index 0000000..243aeab
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/include/asm/xenomai/uapi/syscall.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_UAPI_SYSCALL_H
+#define _COBALT_POWERPC_ASM_UAPI_SYSCALL_H
+
+#define __xn_syscode(__nr)	(__COBALT_SYSCALL_BIT | (__nr))
+
+#endif /* !_COBALT_POWERPC_ASM_UAPI_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/Makefile
new file mode 100644
index 0000000..e175d0a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/Makefile
@@ -0,0 +1,8 @@
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-y := machine.o thread.o
+
+xenomai-$(CONFIG_XENO_ARCH_FPU) += fpu.o
+
+ccflags-y := -I$(srctree)/arch/powerpc/xenomai/include -I$(srctree)/include/xenomai
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/README b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/README
new file mode 100644
index 0000000..80f954a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/README
@@ -0,0 +1,3 @@
+Get the interrupt pipeline code for the target kernel from
+http://xenomai.org/downloads/ipipe/, or
+git://git.xenomai.org/ipipe.git
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/fpu.S b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/fpu.S
new file mode 100644
index 0000000..186e922
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/fpu.S
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2004-2009 Philippe Gerum.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/version.h>
+#include <asm/processor.h>
+#include <asm/cputable.h>
+#include <asm/ppc_asm.h>
+#include <asm/asm-offsets.h>
+
+#ifdef THREAD_FPSTATE
+#define FIXUP_REG_FPSTATE(__reg)	addi	__reg, __reg, THREAD_FPSTATE
+#else
+/*
+ * v3.10 -> v3.13 do not have THREAD_FPSTATE yet, but still
+ * define THREAD_FPSCR.
+ */
+#define FIXUP_REG_FPSTATE(__reg)
+#define	FPSTATE_FPSCR(__base)		THREAD_FPSCR(__base)
+#endif
+
+/* r3 = &thread_struct (tcb->fpup) */
+_GLOBAL(__asm_save_fpu)
+	mfmsr	r5
+	ori	r5,r5,MSR_FP
+	SYNC
+	MTMSRD(r5)
+	isync
+	FIXUP_REG_FPSTATE(r3)
+	SAVE_32FPRS(0,r3)
+	mffs	fr0
+	stfd	fr0,FPSTATE_FPSCR(r3)
+	blr
+
+/* r3 = &thread_struct */
+_GLOBAL(__asm_init_fpu)
+	mfmsr	r5
+	ori	r5,r5,MSR_FP|MSR_FE0|MSR_FE1
+	SYNC
+	MTMSRD(r5)
+
+	/* Fallback wanted. */
+	
+/* r3 = &thread_struct (tcb->fpup) */
+_GLOBAL(__asm_restore_fpu)
+	mfmsr	r5
+	ori	r5,r5,MSR_FP
+	SYNC
+	MTMSRD(r5)
+	isync
+	FIXUP_REG_FPSTATE(r3)
+	lfd	fr0,FPSTATE_FPSCR(r3)
+	MTFSF_L(fr0)
+	REST_32FPRS(0,r3)
+	blr
+
+_GLOBAL(__asm_disable_fpu)
+	mfmsr	r5
+	li	r3,MSR_FP
+	andc	r5,r5,r3
+	SYNC
+	MTMSRD(r5)
+	isync
+	blr
+
+_GLOBAL(__asm_enable_fpu)
+	mfmsr	r5
+	ori	r5,r5,MSR_FP
+	SYNC
+	MTMSRD(r5)
+	isync
+	blr
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/calibration.h
new file mode 100644
index 0000000..9f06c3f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/calibration.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * 64-bit PowerPC adoption
+ *   copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_CALIBRATION_H
+#define _COBALT_POWERPC_ASM_CALIBRATION_H
+
+static inline void xnarch_get_latencies(struct xnclock_gravity *p)
+{
+#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0
+#define __sched_latency CONFIG_XENO_OPT_TIMING_SCHEDLAT
+#elif defined(CONFIG_PPC_PASEMI)
+#define __sched_latency 1000
+#elif defined(CONFIG_WALNUT)
+#define __sched_latency 11000
+#elif defined(CONFIG_YOSEMITE)
+#define __sched_latency 2000
+#elif defined(CONFIG_BUBINGA)
+#define __sched_latency 8000
+#elif defined(CONFIG_SYCAMORE)
+#define __sched_latency 8000
+#elif defined(CONFIG_SEQUOIA)
+#define __sched_latency 3000
+#elif defined(CONFIG_LWMON5)
+#define __sched_latency 2800
+#elif defined(CONFIG_OCOTEA)
+#define __sched_latency 2700
+#elif defined(CONFIG_BAMBOO)
+#define __sched_latency 4000
+#elif defined(CONFIG_TAISHAN)
+#define __sched_latency 1800
+#elif defined(CONFIG_RAINIER)
+#define __sched_latency 2300
+#elif defined(CONFIG_YUCCA)
+#define __sched_latency 2780
+#elif defined(CONFIG_YELLOWSTONE)
+#define __sched_latency 2700
+#elif defined(CONFIG_YOSEMITE)
+#define __sched_latency 2500
+#elif defined(CONFIG_MPC8349_ITX)
+#define __sched_latency 2500
+#elif defined(CONFIG_MPC836x_MDS)
+#define __sched_latency 2900
+#elif defined(CONFIG_MPC5121_ADS)
+#define __sched_latency 4000
+#elif defined(CONFIG_MPC8272_ADS)
+#define __sched_latency 5500
+#elif defined(CONFIG_MPC85xx_RDB)
+#define __sched_latency 2000
+#elif defined(CONFIG_MVME7100)
+#define __sched_latency 1500
+#elif defined(CONFIG_TQM8548)
+#define __sched_latency 500
+#elif defined(CONFIG_TQM8560)
+#define __sched_latency 1000
+#elif defined(CONFIG_TQM8555)
+#define __sched_latency 2000
+#elif defined(CONFIG_KUP4K)
+#define __sched_latency 22000
+#elif defined(CONFIG_P1022_DS)
+#define __sched_latency 3000
+/*
+ * Check for the most generic configs at the bottom of this list, so
+ * that the most specific choices available are picked first.
+ */
+#elif defined(CONFIG_CORENET_GENERIC)
+#define __sched_latency 2800
+#elif defined(CONFIG_MPC85xx) || defined(CONFIG_PPC_85xx)
+#define __sched_latency 1000
+#elif defined(CONFIG_405GPR)
+#define __sched_latency 9000
+#elif defined(CONFIG_PPC_MPC52xx)
+#define __sched_latency 4500
+#elif defined(CONFIG_PPC_8xx)
+#define __sched_latency 25000
+#endif
+
+#ifndef __sched_latency
+/* Platform is unknown: pick a default value. */
+#define __sched_latency 4000
+#endif
+	p->user = xnclock_ns_to_ticks(&nkclock, __sched_latency);
+	p->kernel = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_KSCHEDLAT);
+	p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT);
+}
+
+#undef __sched_latency
+
+#endif /* !_COBALT_POWERPC_ASM_CALIBRATION_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/features.h
new file mode 100644
index 0000000..03f93a2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/features.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_FEATURES_H
+#define _COBALT_POWERPC_ASM_FEATURES_H
+
+struct cobalt_featinfo;
+static inline void collect_arch_features(struct cobalt_featinfo *p) { }
+
+#include <asm/xenomai/uapi/features.h>
+
+#endif /* !_COBALT_POWERPC_ASM_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/fptest.h
new file mode 100644
index 0000000..a9d93fe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/fptest.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_FPTEST_H
+#define _COBALT_POWERPC_ASM_FPTEST_H
+
+#include <linux/errno.h>
+#include <linux/printk.h>
+#include <asm/xenomai/uapi/fptest.h>
+
+static inline int fp_kernel_supported(void)
+{
+/*
+ * CAUTION: some architectures have a hardware FP unit, but a
+ * restricted set of supported FP instructions. Those may enable
+ * CONFIG_MATH_EMULATION and MATH_EMULATION_HW_UNIMPLEMENTED at the
+ * same time to provide an emulation of the missing instruction set.
+ */
+#ifdef CONFIG_PPC_FPU
+	return 1;
+#else
+#ifdef CONFIG_MATH_EMULATION
+	printk_once(XENO_WARNING "kernel-based FPU support is disabled\n");
+#endif	/* !CONFIG_MATH_EMULATION */
+	return 0;
+#endif	/* !CONFIG_PPC_FPU */
+}
+
+static inline void fp_init(void)
+{
+}
+
+static inline int fp_linux_begin(void)
+{
+	return -ENOSYS;
+}
+
+static inline void fp_linux_end(void)
+{
+}
+
+static inline int fp_detect(void)
+{
+	return 0;
+}
+
+#endif /* !_COBALT_POWERPC_ASM_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/machine.h
new file mode 100644
index 0000000..0e41fd6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/machine.h
@@ -0,0 +1,39 @@
+/**
+ *   Copyright &copy; 2002-2004 Philippe Gerum.
+ *
+ *   64-bit PowerPC adoption
+ *     copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with Xenomai; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_MACHINE_H
+#define _COBALT_POWERPC_ASM_MACHINE_H
+
+#include <linux/compiler.h>
+
+#define XNARCH_HOST_TICK_IRQ	__ipipe_hrtimer_irq
+
+static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
+{
+	__asm__ ("cntlzw %0, %1":"=r"(ul):"r"(ul & (-ul)));
+	return 31 - ul;
+}
+
+/* Read this last to enable default settings. */
+#include <asm-generic/xenomai/machine.h>
+
+#endif /* !_COBALT_POWERPC_ASM_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..9b166ad
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * 64-bit PowerPC adoption
+ *   copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_SYSCALL_H
+#define _COBALT_POWERPC_ASM_SYSCALL_H
+
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <asm-generic/xenomai/syscall.h>
+
+/*
+ * Cobalt and Linux syscall numbers can be fetched from GPR0, masking
+ * out the __COBALT_SYSCALL_BIT marker.
+ */
+#define __xn_reg_sys(__regs)	((__regs)->gpr[0])
+#define __xn_syscall_p(__regs)	(__xn_reg_sys(__regs) & __COBALT_SYSCALL_BIT)
+#define __xn_syscall(__regs)	(__xn_reg_sys(__regs) & ~__COBALT_SYSCALL_BIT)
+
+#define __xn_reg_rval(__regs)	((__regs)->gpr[3])
+#define __xn_reg_arg1(__regs)	((__regs)->gpr[3])
+#define __xn_reg_arg2(__regs)	((__regs)->gpr[4])
+#define __xn_reg_arg3(__regs)	((__regs)->gpr[5])
+#define __xn_reg_arg4(__regs)	((__regs)->gpr[6])
+#define __xn_reg_arg5(__regs)	((__regs)->gpr[7])
+#define __xn_reg_pc(__regs)	((__regs)->nip)
+#define __xn_reg_sp(__regs)	((__regs)->gpr[1])
+
+/*
+ * Root syscall number with predicate (valid only if
+ * !__xn_syscall_p(__regs)).
+ */
+#define __xn_rootcall_p(__regs, __code)			\
+	({						\
+		*(__code) = __xn_syscall(__regs);	\
+		*(__code) < NR_syscalls;		\
+	})
+
+static inline void __xn_error_return(struct pt_regs *regs, int v)
+{
+	/*
+	 * We currently never set the SO bit for marking errors, even
+	 * if we always test it upon syscall return.
+	 */
+	__xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, long v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs)
+{
+	return __xn_reg_rval(regs) == -EINTR;
+}
+
+static inline
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			 unsigned long a3, unsigned long a4,
+			 unsigned long a5)
+{
+	return -ENOSYS;
+}
+
+#endif /* !_COBALT_POWERPC_ASM_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall32.h
new file mode 100644
index 0000000..15c977c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/syscall32.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_SYSCALL32_H
+#define _COBALT_POWERPC_ASM_SYSCALL32_H
+
+#include <asm-generic/xenomai/syscall32.h>
+
+#endif /* !_COBALT_POWERPC_ASM_SYSCALL32_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/thread.h
new file mode 100644
index 0000000..f91e26b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/thread.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2004-2013 Philippe Gerum.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_THREAD_H
+#define _COBALT_POWERPC_ASM_THREAD_H
+
+#include <asm-generic/xenomai/ipipe/thread.h>
+
+struct xnarchtcb {
+	struct xntcb core;
+#ifdef CONFIG_XENO_ARCH_FPU
+	struct thread_struct *fpup;
+#define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
+#else
+#define xnarch_fpu_ptr(tcb)     NULL
+#endif
+};
+
+#define xnarch_fault_regs(d)	((d)->regs)
+#define xnarch_fault_trap(d)    ((unsigned int)(d)->regs->trap)
+#define xnarch_fault_code(d)    ((d)->regs->dar)
+#define xnarch_fault_pc(d)      ((d)->regs->nip)
+#define xnarch_fault_pc(d)      ((d)->regs->nip)
+#define xnarch_fault_fpu_p(d)   0
+#define xnarch_fault_pf_p(d)   ((d)->exception == IPIPE_TRAP_ACCESS)
+#define xnarch_fault_bp_p(d)   ((current->ptrace & PT_PTRACED) &&	\
+				((d)->exception == IPIPE_TRAP_IABR ||	\
+				 (d)->exception == IPIPE_TRAP_SSTEP ||	\
+				 (d)->exception == IPIPE_TRAP_DEBUG))
+#define xnarch_fault_notify(d) (xnarch_fault_bp_p(d) == 0)
+
+static inline
+struct task_struct *xnarch_host_task(struct xnarchtcb *tcb)
+{
+	return tcb->core.host_task;
+}
+
+static inline void xnarch_enter_root(struct xnthread *root) { }
+
+#ifdef CONFIG_XENO_ARCH_FPU
+
+void xnarch_init_root_tcb(struct xnthread *thread);
+
+void xnarch_init_shadow_tcb(struct xnthread *thread);
+
+void xnarch_leave_root(struct xnthread *root);
+
+#else  /* !CONFIG_XENO_ARCH_FPU */
+
+static inline void xnarch_init_root_tcb(struct xnthread *thread) { }
+static inline void xnarch_init_shadow_tcb(struct xnthread *thread) { }
+static inline void xnarch_leave_root(struct xnthread *root) { }
+
+#endif  /* !CONFIG_XENO_ARCH_FPU */
+
+static inline int 
+xnarch_handle_fpu_fault(struct xnthread *from, 
+			struct xnthread *to, struct ipipe_trap_data *d)
+{
+	return 0;
+}
+
+static inline int xnarch_escalate(void)
+{
+	if (ipipe_root_p) {
+		ipipe_raise_irq(cobalt_pipeline.escalate_virq);
+		return 1;
+	}
+
+	return 0;
+}
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in);
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *thread);
+
+#endif /* !_COBALT_POWERPC_ASM_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/wrappers.h
new file mode 100644
index 0000000..f0ae0e4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/include/asm/xenomai/wrappers.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_POWERPC_ASM_WRAPPERS_H
+#define _COBALT_POWERPC_ASM_WRAPPERS_H
+
+#include <asm-generic/xenomai/wrappers.h>	/* Read the generic portion. */
+
+#endif /* _COBALT_POWERPC_ASM_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/machine.c
new file mode 100644
index 0000000..14e2c4f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/machine.c
@@ -0,0 +1,67 @@
+/**
+ *   Copyright (C) 2004-2006 Philippe Gerum.
+ *
+ *   64-bit PowerPC adoption
+ *     copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+#include <linux/stddef.h>
+#include <asm/cputable.h>
+#include <asm/xenomai/machine.h>
+
+static int mach_powerpc_init(void)
+{
+#ifdef CONFIG_ALTIVEC
+	if (!cpu_has_feature(CPU_FTR_ALTIVEC)) {
+		printk("Xenomai: ALTIVEC support enabled in kernel but no hardware found.\n"
+		       "         Disable CONFIG_ALTIVEC in the kernel configuration.\n");
+		return -ENODEV;
+	}
+#endif /* CONFIG_ALTIVEC */
+
+	return 0;
+}
+
+static const char *const fault_labels[] = {
+	[0] = "Data or instruction access",
+	[1] = "Alignment",
+	[2] = "Altivec unavailable",
+	[3] = "Program check exception",
+	[4] = "Machine check exception",
+	[5] = "Unknown",
+	[6] = "Instruction breakpoint",
+	[7] = "Run mode exception",
+	[8] = "Single-step exception",
+	[9] = "Non-recoverable exception",
+	[10] = "Software emulation",
+	[11] = "Debug",
+	[12] = "SPE",
+	[13] = "Altivec assist",
+	[14] = "Cache-locking exception",
+	[15] = "Kernel FP unavailable",
+	[16] = NULL
+};
+
+struct cobalt_machine cobalt_machine = {
+	.name = "powerpc",
+	.init = mach_powerpc_init,
+	.late_init = NULL,
+	.cleanup = NULL,
+	.prefault = NULL,
+	.fault_labels = fault_labels,
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/thread.c
new file mode 100644
index 0000000..6ce2787
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/powerpc/ipipe/thread.c
@@ -0,0 +1,174 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * 64-bit PowerPC adoption
+ *   copyright (C) 2005 Taneli Vähäkangas and Heikki Lindholm
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/ipipe.h>
+#include <linux/mm.h>
+#include <asm/mmu_context.h>
+#include <cobalt/kernel/thread.h>
+
+asmlinkage struct task_struct *
+_switch(struct thread_struct *prev, struct thread_struct *next);
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in)
+{
+	struct xnarchtcb *out_tcb = &out->tcb, *in_tcb = &in->tcb;
+	struct mm_struct *prev_mm, *next_mm;
+	struct task_struct *next;
+
+	next = in_tcb->core.host_task;
+	prev_mm = out_tcb->core.active_mm;
+
+	next_mm = in_tcb->core.mm;
+	if (next_mm == NULL) {
+		in_tcb->core.active_mm = prev_mm;
+		enter_lazy_tlb(prev_mm, next);
+	} else {
+		ipipe_switch_mm_head(prev_mm, next_mm, next);
+		/*
+		 * We might be switching back to the root thread,
+		 * which we preempted earlier, shortly after "current"
+		 * dropped its mm context in the do_exit() path
+		 * (next->mm == NULL). In that particular case, the
+		 * kernel expects a lazy TLB state for leaving the mm.
+		 */
+		if (next->mm == NULL)
+			enter_lazy_tlb(prev_mm, next);
+	}
+
+	hard_local_irq_disable();
+	_switch(out_tcb->core.tsp, in_tcb->core.tsp);
+}
+
+#ifdef CONFIG_XENO_ARCH_FPU
+
+asmlinkage void __asm_init_fpu(struct thread_struct *ts);
+
+asmlinkage void __asm_save_fpu(struct thread_struct *ts);
+
+asmlinkage void __asm_restore_fpu(struct thread_struct *ts);
+
+asmlinkage void __asm_disable_fpu(void);
+
+asmlinkage void __asm_enable_fpu(void);
+
+#if !defined(CONFIG_SMP) && LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)
+#define get_fpu_owner(cur) last_task_used_math
+#else /* CONFIG_SMP */
+#define get_fpu_owner(cur) ({					\
+    struct task_struct * _cur = (cur);                          \
+    ((_cur->thread.regs && (_cur->thread.regs->msr & MSR_FP))   \
+     ? _cur : NULL);                                            \
+})
+#endif /* CONFIG_SMP */
+
+static void xnarch_enable_fpu(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+	struct task_struct *task = tcb->core.host_task;
+
+	if (task && task != tcb->core.user_fpu_owner)
+		__asm_disable_fpu();
+	else
+		__asm_enable_fpu();
+}
+
+static void do_save_fpu(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+
+	if (tcb->fpup) {
+		__asm_save_fpu(tcb->fpup);
+
+		if (tcb->core.user_fpu_owner &&
+		    tcb->core.user_fpu_owner->thread.regs)
+			tcb->core.user_fpu_owner->thread.regs->msr &= ~(MSR_FP|MSR_FE0|MSR_FE1);
+	}
+}
+
+static void xnarch_restore_fpu(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+	struct thread_struct *ts;
+	struct pt_regs *regs;
+
+	if (tcb->fpup) {
+		__asm_restore_fpu(tcb->fpup);
+		/*
+		 * Note: Only enable FP in MSR, if it was enabled when
+		 * we saved the fpu state.
+		 */
+		if (tcb->core.user_fpu_owner) {
+			ts = &tcb->core.user_fpu_owner->thread;
+			regs = ts->regs;
+			if (regs) {
+				regs->msr &= ~(MSR_FE0|MSR_FE1);
+				regs->msr |= (MSR_FP|ts->fpexc_mode);
+			}
+		}
+	}
+	/*
+	 * FIXME: We restore FPU "as it was" when Xenomai preempted Linux,
+	 * whereas we could be much lazier.
+	 */
+	if (tcb->core.host_task &&
+	    tcb->core.host_task != tcb->core.user_fpu_owner)
+		__asm_disable_fpu();
+}
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
+{
+	if (from == to || 
+		xnarch_fpu_ptr(xnthread_archtcb(from)) == 
+		xnarch_fpu_ptr(xnthread_archtcb(to))) {
+		xnarch_enable_fpu(to);
+		return;
+	}
+	
+	if (from)
+		do_save_fpu(from);
+	
+	xnarch_restore_fpu(to);
+}
+
+void xnarch_leave_root(struct xnthread *root)
+{
+	struct xnarchtcb *rootcb = xnthread_archtcb(root);
+	rootcb->core.user_fpu_owner = get_fpu_owner(rootcb->core.host_task);
+	/* So that do_save_fpu() operates on the right FPU area. */
+	rootcb->fpup = rootcb->core.user_fpu_owner ?
+		&rootcb->core.user_fpu_owner->thread : NULL;
+}
+
+void xnarch_init_root_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = &thread->tcb;
+	tcb->fpup = NULL;
+}
+
+void xnarch_init_shadow_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = &thread->tcb;
+	tcb->fpup = &tcb->core.host_task->thread;
+}
+
+#endif /* CONFIG_XENO_ARCH_FPU */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/Kconfig b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/Kconfig
new file mode 100644
index 0000000..9adbbf7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/Kconfig
@@ -0,0 +1,8 @@
+config XENO_ARCH_FPU
+	def_bool y
+
+config XENO_ARCH_SYS3264
+        def_bool IA32_EMULATION
+
+source "kernel/xenomai/Kconfig"
+source "drivers/xenomai/Kconfig"
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/Makefile
new file mode 100644
index 0000000..93929b6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/Makefile
@@ -0,0 +1,5 @@
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+xenomai-y := machine.o smi.o c1e.o
+
+ccflags-y := -I$(srctree)/arch/x86/xenomai/include -I$(srctree)/include/xenomai
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/c1e.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/c1e.c
new file mode 120000
index 0000000..5dc924e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/c1e.c
@@ -0,0 +1 @@
+../ipipe/c1e.c
\ No newline at end of file
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/calibration.h
new file mode 100644
index 0000000..7f2dde7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/calibration.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_CALIBRATION_H
+#define _COBALT_X86_ASM_CALIBRATION_H
+
+static inline void xnarch_get_latencies(struct xnclock_gravity *p)
+{
+	unsigned long sched_latency;
+
+#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0
+	sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT;
+#else /* !CONFIG_XENO_OPT_TIMING_SCHEDLAT */
+	sched_latency = num_online_cpus() > 1 ? 3350 : 2000;
+#endif /* !CONFIG_XENO_OPT_TIMING_SCHEDLAT */
+
+	p->user = xnclock_ns_to_ticks(&nkclock, sched_latency);
+	p->kernel = xnclock_ns_to_ticks(&nkclock,
+					CONFIG_XENO_OPT_TIMING_KSCHEDLAT);
+	p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT);
+}
+
+#endif /* !_COBALT_X86_ASM_CALIBRATION_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/fptest.h
new file mode 100644
index 0000000..463d9d3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/fptest.h
@@ -0,0 +1,74 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_FPTEST_H
+#define _COBALT_X86_ASM_FPTEST_H
+
+#include <linux/errno.h>
+#include <asm/processor.h>
+#include <asm/xenomai/wrappers.h>
+#include <asm/xenomai/uapi/fptest.h>
+
+/*
+ * We do NOT support out-of-band FPU operations in kernel space for a
+ * reason: this is a mess. Out-of-band FPU is just fine and makes a
+ * lot of sense for many real-time applications, but you have to do
+ * that from userland.
+ */
+static inline int fp_kernel_supported(void)
+{
+	return 0;
+}
+
+static inline void fp_init(void)
+{
+}
+
+static inline int fp_linux_begin(void)
+{
+	kernel_fpu_begin();
+	/*
+	 * We need a clean context for testing the sanity of the FPU
+	 * register stack across switches in fp_regs_check()
+	 * (fildl->fistpl), which kernel_fpu_begin() does not
+	 * guarantee us. Force this manually.
+	 */
+	asm volatile("fninit");
+
+	return true;
+}
+
+static inline void fp_linux_end(void)
+{
+	kernel_fpu_end();
+}
+
+static inline int fp_detect(void)
+{
+	int features = 0;
+
+	if (boot_cpu_has(X86_FEATURE_XMM2))
+		features |= __COBALT_HAVE_SSE2;
+
+	if (boot_cpu_has(X86_FEATURE_AVX))
+		features |= __COBALT_HAVE_AVX;
+
+	return features;
+}
+
+#endif /* _COBALT_X86_ASM_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/machine.h
new file mode 100644
index 0000000..56b1c48
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/machine.h
@@ -0,0 +1,34 @@
+/**
+ * Copyright (C) 2007-2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_MACHINE_H
+#define _COBALT_X86_ASM_MACHINE_H
+
+#include <linux/compiler.h>
+
+static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
+{
+	__asm__("bsfq %1, %0":"=r" (ul) : "rm" (ul));
+
+	return ul;
+}
+
+/* Read this last to enable default settings. */
+#include <asm-generic/xenomai/machine.h>
+
+#endif /* !_COBALT_X86_ASM_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..b2e1582
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/syscall.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_SYSCALL_H
+#define _COBALT_X86_ASM_SYSCALL_H
+
+#include <linux/errno.h>
+#include <asm/ptrace.h>
+#include <asm-generic/xenomai/syscall.h>
+
+/*
+ * Cobalt and Linux syscall numbers can be fetched from ORIG_AX,
+ * masking out the __COBALT_SYSCALL_BIT marker.
+ */
+#define __xn_reg_sys(regs)    ((regs)->orig_ax)
+#define __xn_reg_rval(regs)   ((regs)->ax)
+#define __xn_reg_pc(regs)     ((regs)->ip)
+#define __xn_reg_sp(regs)     ((regs)->sp)
+
+#define __xn_syscall_p(regs)  (__xn_reg_sys(regs) & __COBALT_SYSCALL_BIT)
+#ifdef CONFIG_XENO_ARCH_SYS3264
+#define __xn_syscall(regs)    __COBALT_CALL32_SYSNR(__xn_reg_sys(regs)	\
+				    & ~__COBALT_SYSCALL_BIT)
+#else
+#define __xn_syscall(regs)    (__xn_reg_sys(regs) & ~__COBALT_SYSCALL_BIT)
+#endif
+
+#ifdef CONFIG_IA32_EMULATION
+#define __xn_nr_root_syscalls						\
+	({								\
+		struct thread_info *__ti = current_thread_info();	\
+		__ti->status & TS_COMPAT ? IA32_NR_syscalls : NR_syscalls; \
+	})
+#else
+#define __xn_nr_root_syscalls	NR_syscalls
+#endif
+/*
+ * Root syscall number with predicate (valid only if
+ * !__xn_syscall_p(__regs)).
+ */
+#define __xn_rootcall_p(__regs, __code)			\
+	({						\
+		*(__code) = __xn_reg_sys(__regs);	\
+		*(__code) < __xn_nr_root_syscalls;	\
+	})
+
+static inline void __xn_error_return(struct pt_regs *regs, int v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, long v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs)
+{
+	return __xn_reg_rval(regs) == -EINTR;
+}
+
+static inline
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			 unsigned long a3, unsigned long a4,
+			 unsigned long a5)
+{
+	return -ENOSYS;
+}
+
+#endif /* !_COBALT_X86_ASM_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/thread.h
new file mode 100644
index 0000000..6eb71e2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/include/asm/xenomai/thread.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004-2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_THREAD_H
+#define _COBALT_X86_ASM_THREAD_H
+
+#include <asm-generic/xenomai/dovetail/thread.h>
+#include <asm/traps.h>
+
+#define xnarch_fault_pc(__regs)		((__regs)->ip)
+#define xnarch_fault_pf_p(__nr)		((__nr) == X86_TRAP_PF)
+#define xnarch_fault_bp_p(__nr)		((current->ptrace & PT_PTRACED) &&	\
+					 ((__nr) == X86_TRAP_DB || (__nr) == X86_TRAP_BP))
+#define xnarch_fault_notify(__nr)	(!xnarch_fault_bp_p(__nr))
+
+#endif /* !_COBALT_X86_ASM_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/machine.c
new file mode 100644
index 0000000..562de40
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/machine.c
@@ -0,0 +1,70 @@
+/**
+ *   Copyright (C) 2007-2012 Philippe Gerum.
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+
+#include <asm/xenomai/machine.h>
+#include <asm/xenomai/smi.h>
+#include <asm/xenomai/c1e.h>
+
+static int mach_x86_init(void)
+{
+	mach_x86_c1e_disable();
+	mach_x86_smi_init();
+	mach_x86_smi_disable();
+
+	return 0;
+}
+
+static void mach_x86_cleanup(void)
+{
+	mach_x86_smi_restore();
+}
+
+static const char *const fault_labels[] = {
+    [0] = "Divide error",
+    [1] = "Debug",
+    [2] = "",   /* NMI is not pipelined. */
+    [3] = "Int3",
+    [4] = "Overflow",
+    [5] = "Bounds",
+    [6] = "Invalid opcode",
+    [7] = "FPU not available",
+    [8] = "Double fault",
+    [9] = "FPU segment overrun",
+    [10] = "Invalid TSS",
+    [11] = "Segment not present",
+    [12] = "Stack segment",
+    [13] = "General protection",
+    [14] = "Page fault",
+    [15] = "Spurious interrupt",
+    [16] = "FPU error",
+    [17] = "Alignment check",
+    [18] = "Machine check",
+    [19] = "SIMD error",
+    [20] = NULL,
+};
+
+struct cobalt_machine cobalt_machine = {
+	.name = "x86",
+	.init = mach_x86_init,
+	.late_init = NULL,
+	.cleanup = mach_x86_cleanup,
+	.prefault = NULL,
+	.fault_labels = fault_labels,
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/smi.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/smi.c
new file mode 120000
index 0000000..8d19721
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/dovetail/smi.c
@@ -0,0 +1 @@
+../ipipe/smi.c
\ No newline at end of file
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/c1e.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/c1e.h
new file mode 100644
index 0000000..7e06014
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/c1e.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef C1E_H
+#define C1E_H
+
+void mach_x86_c1e_disable(void);
+
+#endif /* C1E_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/features.h
new file mode 100644
index 0000000..a37c186
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/features.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2005-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_FEATURES_H
+#define _COBALT_X86_ASM_FEATURES_H
+
+struct cobalt_featinfo;
+static inline void collect_arch_features(struct cobalt_featinfo *p) { }
+
+#include <asm/xenomai/uapi/features.h>
+
+#endif /* !_COBALT_X86_ASM_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/smi.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/smi.h
new file mode 100644
index 0000000..1ea90fb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/smi.h
@@ -0,0 +1,32 @@
+/**
+ *   Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ *   SMI workaround for x86.
+ *
+ *   Xenomai free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_SMI_H
+#define _COBALT_X86_ASM_SMI_H
+
+#ifndef _COBALT_X86_ASM_MACHINE_H
+#error "please don't include asm/smi.h directly"
+#endif
+
+void mach_x86_smi_disable(void);
+void mach_x86_smi_restore(void);
+void mach_x86_smi_init(void);
+
+#endif /* !_COBALT_X86_ASM_SMI_64_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32-table.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32-table.h
new file mode 100644
index 0000000..3986b22
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32-table.h
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_SYSCALL32_TABLE_H
+#define _COBALT_X86_ASM_SYSCALL32_TABLE_H
+
+/*
+ * CAUTION: This file is read verbatim into the main syscall
+ * table. Only preprocessor stuff and syscall entries here.
+ */
+
+__COBALT_CALL32emu_THUNK(thread_create)
+__COBALT_CALL32emu_THUNK(thread_setschedparam_ex)
+__COBALT_CALL32emu_THUNK(thread_getschedparam_ex)
+__COBALT_CALL32emu_THUNK(thread_setschedprio)
+__COBALT_CALL32emu_THUNK(sem_open)
+__COBALT_CALL32emu_THUNK(sem_timedwait)
+__COBALT_CALL32emu_THUNK(clock_getres)
+__COBALT_CALL32emu_THUNK(clock_gettime)
+__COBALT_CALL32emu_THUNK(clock_settime)
+__COBALT_CALL32emu_THUNK(clock_nanosleep)
+__COBALT_CALL32emu_THUNK(mutex_timedlock)
+__COBALT_CALL32emu_THUNK(cond_wait_prologue)
+__COBALT_CALL32emu_THUNK(mq_open)
+__COBALT_CALL32emu_THUNK(mq_getattr)
+__COBALT_CALL32emu_THUNK(mq_timedsend)
+__COBALT_CALL32emu_THUNK(mq_timedreceive)
+__COBALT_CALL32emu_THUNK(mq_notify)
+__COBALT_CALL32emu_THUNK(sched_weightprio)
+__COBALT_CALL32emu_THUNK(sched_setconfig_np)
+__COBALT_CALL32emu_THUNK(sched_getconfig_np)
+__COBALT_CALL32emu_THUNK(sched_setscheduler_ex)
+__COBALT_CALL32emu_THUNK(sched_getscheduler_ex)
+__COBALT_CALL32emu_THUNK(timer_create)
+__COBALT_CALL32emu_THUNK(timer_settime)
+__COBALT_CALL32emu_THUNK(timer_gettime)
+__COBALT_CALL32emu_THUNK(timerfd_settime)
+__COBALT_CALL32emu_THUNK(timerfd_gettime)
+__COBALT_CALL32emu_THUNK(sigwait)
+__COBALT_CALL32emu_THUNK(sigtimedwait)
+__COBALT_CALL32emu_THUNK(sigwaitinfo)
+__COBALT_CALL32emu_THUNK(sigpending)
+__COBALT_CALL32emu_THUNK(sigqueue)
+__COBALT_CALL32emu_THUNK(monitor_wait)
+__COBALT_CALL32emu_THUNK(event_wait)
+__COBALT_CALL32emu_THUNK(select)
+__COBALT_CALL32emu_THUNK(recvmsg)
+__COBALT_CALL32emu_THUNK(sendmsg)
+__COBALT_CALL32emu_THUNK(mmap)
+__COBALT_CALL32emu_THUNK(backtrace)
+__COBALT_CALL32emu_THUNK(mq_timedreceive64)
+__COBALT_CALL32emu_THUNK(sigtimedwait64)
+__COBALT_CALL32emu_THUNK(recvmmsg64)
+
+#endif /* !_COBALT_X86_ASM_SYSCALL32_TABLE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h
new file mode 100644
index 0000000..f023de3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/syscall32.h
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_SYSCALL32_H
+#define _COBALT_X86_ASM_SYSCALL32_H
+
+#include <asm/unistd.h>
+
+#ifdef CONFIG_IA32_EMULATION
+
+#define __COBALT_IA32_BASE		256 /* Power of two. */
+
+#define __COBALT_SYSNR32emu(__reg)					\
+	({								\
+		long __nr = __reg;					\
+		if (in_ia32_syscall())					\
+			__nr += __COBALT_IA32_BASE;			\
+		__nr;							\
+	})
+
+#define __COBALT_COMPAT32emu(__reg)					\
+	(in_ia32_syscall() ? __COBALT_COMPAT_BIT : 0)
+
+#if __NR_COBALT_SYSCALLS > __COBALT_IA32_BASE
+#error "__NR_COBALT_SYSCALLS > __COBALT_IA32_BASE"
+#endif
+
+#define __syshand32emu__(__name)	\
+	((cobalt_syshand)(void (*)(void))(CoBaLt32emu_ ## __name))
+
+#define __COBALT_CALL32emu_INITHAND(__handler)	\
+	[__COBALT_IA32_BASE ... __COBALT_IA32_BASE + __NR_COBALT_SYSCALLS-1] = __handler,
+
+#define __COBALT_CALL32emu_INITMODE(__mode)	\
+	[__COBALT_IA32_BASE ... __COBALT_IA32_BASE + __NR_COBALT_SYSCALLS-1] = __mode,
+
+/* ia32 default entry (no thunk) */
+#define __COBALT_CALL32emu_ENTRY(__name, __handler)		\
+	[sc_cobalt_ ## __name + __COBALT_IA32_BASE] = __handler,
+
+/* ia32 thunk installation */
+#define __COBALT_CALL32emu_THUNK(__name)	\
+	__COBALT_CALL32emu_ENTRY(__name, __syshand32emu__(__name))
+
+/* ia32 thunk implementation. */
+#define COBALT_SYSCALL32emu(__name, __mode, __args)	\
+	long CoBaLt32emu_ ## __name __args
+
+/* ia32 thunk declaration. */
+#define COBALT_SYSCALL32emu_DECL(__name, __args)	\
+	long CoBaLt32emu_ ## __name __args
+
+#else /* !CONFIG_IA32_EMULATION */
+
+/* ia32 emulation support disabled. */
+
+#define __COBALT_SYSNR32emu(__reg)	(__reg)
+
+#define __COBALT_COMPAT32emu(__reg)	0
+
+#define __COBALT_CALL32emu_INITHAND(__handler)
+
+#define __COBALT_CALL32emu_INITMODE(__mode)
+
+#define __COBALT_CALL32emu_ENTRY(__name, __handler)
+
+#define __COBALT_CALL32emu_THUNK(__name)
+
+#define COBALT_SYSCALL32emu_DECL(__name, __args)
+
+#endif /* !CONFIG_IA32_EMULATION */
+
+#define __COBALT_CALL32_ENTRY(__name, __handler)	\
+	__COBALT_CALL32emu_ENTRY(__name, __handler)
+
+#define __COBALT_CALL32_INITHAND(__handler)	\
+	__COBALT_CALL32emu_INITHAND(__handler)
+
+#define __COBALT_CALL32_INITMODE(__mode)	\
+	__COBALT_CALL32emu_INITMODE(__mode)
+
+/* Already checked for __COBALT_SYSCALL_BIT */
+#define __COBALT_CALL32_SYSNR(__reg)	__COBALT_SYSNR32emu(__reg)
+
+#define __COBALT_CALL_COMPAT(__reg)	__COBALT_COMPAT32emu(__reg)
+
+#endif /* !_COBALT_X86_ASM_SYSCALL32_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/arith.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/arith.h
new file mode 100644
index 0000000..3682736
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/arith.h
@@ -0,0 +1,243 @@
+/**
+ *   Arithmetic/conversion routines for x86.
+ *
+ *   Copyright &copy; 2005 Gilles Chanteperdrix, 32bit version.
+ *   Copyright &copy; 2007 Jan Kiszka, 64bit version.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_X86_ASM_UAPI_ARITH_H
+#define _COBALT_X86_ASM_UAPI_ARITH_H
+#define _COBALT_X86_ASM_UAPI_ARITH_H
+
+#include <asm/xenomai/uapi/features.h>
+
+#ifdef __i386__
+
+#define xnarch_u64tou32(ull, h, l) ({          \
+    unsigned long long _ull = (ull);            \
+    (l) = _ull & 0xffffffff;                    \
+    (h) = _ull >> 32;                           \
+})
+
+#define xnarch_u64fromu32(h, l) ({             \
+    unsigned long long _ull;                    \
+    asm ( "": "=A"(_ull) : "d"(h), "a"(l));     \
+    _ull;                                       \
+})
+
+/* const helper for xnarch_uldivrem, so that the compiler will eliminate
+   multiple calls with same arguments, at no additionnal cost. */
+static inline __attribute__((__const__)) unsigned long long
+__mach_x86_32_uldivrem(const unsigned long long ull, const unsigned long d)
+{
+    unsigned long long ret;
+    __asm__ ("divl %1" : "=A,A"(ret) : "r,?m"(d), "A,A"(ull));
+    /* Exception if quotient does not fit on unsigned long. */
+    return ret;
+}
+
+/* Fast long long division: when the quotient and remainder fit on 32 bits. */
+static inline unsigned long mach_x86_32_uldivrem(unsigned long long ull,
+						 const unsigned d,
+						 unsigned long *const rp)
+{
+    unsigned long q, r;
+    ull = __mach_x86_32_uldivrem(ull, d);
+    __asm__ ( "": "=d"(r), "=a"(q) : "A"(ull));
+    if(rp)
+	*rp = r;
+    return q;
+}
+#define xnarch_uldivrem(ull, d, rp) mach_x86_32_uldivrem((ull),(d),(rp))
+
+/* Division of an unsigned 96 bits ((h << 32) + l) by an unsigned 32 bits.
+   Building block for ulldiv. */
+static inline unsigned long long mach_x86_32_div96by32(const unsigned long long h,
+						       const unsigned long l,
+						       const unsigned long d,
+						       unsigned long *const rp)
+{
+    unsigned long rh;
+    const unsigned long qh = xnarch_uldivrem(h, d, &rh);
+    const unsigned long long t = xnarch_u64fromu32(rh, l);
+    const unsigned long ql = xnarch_uldivrem(t, d, rp);
+
+    return xnarch_u64fromu32(qh, ql);
+}
+
+/* Slow long long division. Uses xnarch_uldivrem, hence has the same property:
+   the compiler removes redundant calls. */
+static inline unsigned long long
+mach_x86_32_ulldiv(const unsigned long long ull,
+		   const unsigned d,
+		   unsigned long *const rp)
+{
+    unsigned long h, l;
+    xnarch_u64tou32(ull, h, l);
+    return mach_x86_32_div96by32(h, l, d, rp);
+}
+#define xnarch_ulldiv(ull,d,rp) mach_x86_32_ulldiv((ull),(d),(rp))
+
+/* Fast scaled-math-based replacement for long long multiply-divide */
+#define xnarch_llmulshft(ll, m, s)					\
+({									\
+	long long __ret;						\
+	unsigned __lo, __hi;						\
+									\
+	__asm__ (							\
+		/* HI = HIWORD(ll) * m */				\
+		"mov  %%eax,%%ecx\n\t"					\
+		"mov  %%edx,%%eax\n\t"					\
+		"imull %[__m]\n\t"					\
+		"mov  %%eax,%[__lo]\n\t"				\
+		"mov  %%edx,%[__hi]\n\t"				\
+									\
+		/* LO = LOWORD(ll) * m */				\
+		"mov  %%ecx,%%eax\n\t"					\
+		"mull %[__m]\n\t"					\
+									\
+		/* ret = (HI << 32) + LO */				\
+		"add  %[__lo],%%edx\n\t"				\
+		"adc  $0,%[__hi]\n\t"					\
+									\
+		/* ret = ret >> s */					\
+		"mov  %[__s],%%ecx\n\t"					\
+		"shrd %%cl,%%edx,%%eax\n\t"				\
+		"shrd %%cl,%[__hi],%%edx\n\t"				\
+		: "=A" (__ret), [__lo] "=&r" (__lo), [__hi] "=&r" (__hi) \
+		: "A" (ll), [__m] "m" (m), [__s] "m" (s)		\
+		: "ecx");						\
+	__ret;								\
+})
+
+static inline __attribute__((const)) unsigned long long
+mach_x86_32_nodiv_ullimd(const unsigned long long op,
+			 const unsigned long long frac,
+			 unsigned rhs_integ)
+{
+	register unsigned rl __asm__("ecx");
+	register unsigned rm __asm__("esi");
+	register unsigned rh __asm__("edi");
+	unsigned fracl, frach, opl, oph;
+	volatile unsigned integ = rhs_integ;
+	register unsigned long long t;
+
+	xnarch_u64tou32(op, oph, opl);
+	xnarch_u64tou32(frac, frach, fracl);
+
+	__asm__ ("mov %[oph], %%eax\n\t"
+		 "mull %[frach]\n\t"
+		 "mov %%eax, %[rm]\n\t"
+		 "mov %%edx, %[rh]\n\t"
+		 "mov %[opl], %%eax\n\t"
+		 "mull %[fracl]\n\t"
+		 "mov %%edx, %[rl]\n\t"
+		 "shl $1, %%eax\n\t"
+		 "adc $0, %[rl]\n\t"
+		 "adc $0, %[rm]\n\t"
+		 "adc $0, %[rh]\n\t"
+		 "mov %[oph], %%eax\n\t"
+		 "mull %[fracl]\n\t"
+		 "add %%eax, %[rl]\n\t"
+		 "adc %%edx, %[rm]\n\t"
+		 "adc $0, %[rh]\n\t"
+		 "mov %[opl], %%eax\n\t"
+		 "mull %[frach]\n\t"
+		 "add %%eax, %[rl]\n\t"
+		 "adc %%edx, %[rm]\n\t"
+		 "adc $0, %[rh]\n\t"
+		 "mov %[opl], %%eax\n\t"
+		 "mull %[integ]\n\t"
+		 "add %[rm], %%eax\n\t"
+		 "adc %%edx, %[rh]\n\t"
+		 "mov %[oph], %%edx\n\t"
+		 "imul %[integ], %%edx\n\t"
+		 "add %[rh], %%edx\n\t"
+		 : [rl]"=&c"(rl), [rm]"=&S"(rm), [rh]"=&D"(rh), "=&A"(t)
+		 : [opl]"m"(opl), [oph]"m"(oph),
+		   [fracl]"m"(fracl), [frach]"m"(frach), [integ]"m"(integ)
+		 : "cc");
+
+	return t;
+}
+
+#define xnarch_nodiv_ullimd(op, frac, integ) \
+	mach_x86_32_nodiv_ullimd((op), (frac), (integ))
+
+#else /* x86_64 */
+
+static inline __attribute__((__const__)) long long
+mach_x86_64_llimd (long long op, unsigned m, unsigned d)
+{
+	long long result;
+
+	__asm__ (
+		"imul %[m]\n\t"
+		"idiv %[d]\n\t"
+		: "=a" (result)
+		: "a" (op), [m] "r" ((unsigned long long)m),
+		  [d] "r" ((unsigned long long)d)
+		: "rdx");
+
+	return result;
+}
+#define xnarch_llimd(ll,m,d) mach_x86_64_llimd((ll),(m),(d))
+
+static inline __attribute__((__const__)) long long
+mach_x86_64_llmulshft(long long op, unsigned m, unsigned s)
+{
+	long long result;
+
+	__asm__ (
+		"imulq %[m]\n\t"
+		"shrd %%cl,%%rdx,%%rax\n\t"
+		: "=a,a" (result)
+		: "a,a" (op), [m] "m,r" ((unsigned long long)m),
+		  "c,c" (s)
+		: "rdx");
+
+	return result;
+}
+#define xnarch_llmulshft(op, m, s) mach_x86_64_llmulshft((op), (m), (s))
+
+static inline __attribute__((__const__)) unsigned long long
+mach_x86_64_nodiv_ullimd(unsigned long long op,
+			    unsigned long long frac, unsigned rhs_integ)
+{
+	register unsigned long long rl __asm__("rax") = frac;
+	register unsigned long long rh __asm__("rdx");
+	register unsigned long long integ __asm__("rsi") = rhs_integ;
+	register unsigned long long t __asm__("r8") = 0x80000000ULL;
+
+	__asm__ ("mulq %[op]\n\t"
+		 "addq %[t], %[rl]\n\t"
+		 "adcq $0, %[rh]\n\t"
+		 "imulq %[op], %[integ]\n\t"
+		 "leaq (%[integ], %[rh], 1),%[rl]":
+		 [rh]"=&d"(rh), [rl]"+&a"(rl), [integ]"+S"(integ):
+		 [op]"D"(op), [t]"r"(t): "cc");
+
+	return rl;
+}
+
+#define xnarch_nodiv_ullimd(op, frac, integ) \
+	mach_x86_64_nodiv_ullimd((op), (frac), (integ))
+
+#endif /* x86_64 */
+
+#include <cobalt/uapi/asm-generic/arith.h>
+
+#endif /* _COBALT_X86_ASM_UAPI_ARITH_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/features.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/features.h
new file mode 100644
index 0000000..65f8164
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/features.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2005-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_X86_ASM_UAPI_FEATURES_H
+#define _COBALT_X86_ASM_UAPI_FEATURES_H
+
+/* The ABI revision level we use on this arch. */
+#define XENOMAI_ABI_REV   18UL
+
+#define XENOMAI_FEAT_DEP  __xn_feat_generic_mask
+
+#define XENOMAI_FEAT_MAN  __xn_feat_generic_man_mask
+
+#define XNARCH_HAVE_LLMULSHFT    1
+#define XNARCH_HAVE_NODIV_LLIMD  1
+
+struct cobalt_featinfo_archdep { /* no arch-specific feature */ };
+
+#include <cobalt/uapi/asm-generic/features.h>
+
+static inline const char *get_feature_label(unsigned int feature)
+{
+	return get_generic_feature_label(feature);
+}
+
+#endif /* !_COBALT_X86_ASM_UAPI_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/fptest.h
new file mode 100644
index 0000000..d406cc3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/fptest.h
@@ -0,0 +1,132 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_X86_ASM_UAPI_FPTEST_H
+#define _COBALT_X86_ASM_UAPI_FPTEST_H
+
+#define __COBALT_HAVE_SSE2	0x1
+#define __COBALT_HAVE_AVX	0x2
+
+static inline void fp_regs_set(int features, unsigned int val)
+{
+	unsigned long long vec[4] = { val, 0, val, 0 };
+	unsigned i;
+
+	for (i = 0; i < 8; i++)
+		__asm__ __volatile__("fildl %0": /* no output */ :"m"(val));
+
+	if (features & __COBALT_HAVE_AVX) {
+		__asm__ __volatile__(
+			"vmovupd %0,%%ymm0;"
+			"vmovupd %0,%%ymm1;"
+			"vmovupd %0,%%ymm2;"
+			"vmovupd %0,%%ymm3;"
+			"vmovupd %0,%%ymm4;"
+			"vmovupd %0,%%ymm5;"
+			"vmovupd %0,%%ymm6;"
+			"vmovupd %0,%%ymm7;"
+			: : "m"(vec[0]), "m"(vec[1]), "m"(vec[2]), "m"(vec[3]));
+	} else if (features & __COBALT_HAVE_SSE2) {
+		__asm__ __volatile__(
+			"movupd %0,%%xmm0;"
+			"movupd %0,%%xmm1;"
+			"movupd %0,%%xmm2;"
+			"movupd %0,%%xmm3;"
+			"movupd %0,%%xmm4;"
+			"movupd %0,%%xmm5;"
+			"movupd %0,%%xmm6;"
+			"movupd %0,%%xmm7;"
+			: : "m"(vec[0]), "m"(vec[1]), "m"(vec[2]), "m"(vec[3]));
+	}
+}
+
+static inline unsigned int fp_regs_check(int features, unsigned int val,
+					 int (*report)(const char *fmt, ...))
+{
+	unsigned long long vec[8][4];
+	unsigned int i, result = val;
+	unsigned e[8];
+
+	for (i = 0; i < 8; i++)
+		__asm__ __volatile__("fistpl %0":"=m"(e[7 - i]));
+
+	if (features & __COBALT_HAVE_AVX) {
+		__asm__ __volatile__(
+			"vmovupd %%ymm0,%0;"
+			"vmovupd %%ymm1,%1;"
+			"vmovupd %%ymm2,%2;"
+			"vmovupd %%ymm3,%3;"
+			"vmovupd %%ymm4,%4;"
+			"vmovupd %%ymm5,%5;"
+			"vmovupd %%ymm6,%6;"
+			"vmovupd %%ymm7,%7;"
+			: "=m" (vec[0][0]), "=m" (vec[1][0]),
+			  "=m" (vec[2][0]), "=m" (vec[3][0]),
+			  "=m" (vec[4][0]), "=m" (vec[5][0]),
+			  "=m" (vec[6][0]), "=m" (vec[7][0]));
+	} else if (features & __COBALT_HAVE_SSE2) {
+		__asm__ __volatile__(
+			"movupd %%xmm0,%0;"
+			"movupd %%xmm1,%1;"
+			"movupd %%xmm2,%2;"
+			"movupd %%xmm3,%3;"
+			"movupd %%xmm4,%4;"
+			"movupd %%xmm5,%5;"
+			"movupd %%xmm6,%6;"
+			"movupd %%xmm7,%7;"
+			: "=m" (vec[0][0]), "=m" (vec[1][0]),
+			  "=m" (vec[2][0]), "=m" (vec[3][0]),
+			  "=m" (vec[4][0]), "=m" (vec[5][0]),
+			  "=m" (vec[6][0]), "=m" (vec[7][0]));
+	}
+
+	for (i = 0; i < 8; i++)
+		if (e[i] != val) {
+			report("r%d: %u != %u\n", i, e[i], val);
+			result = e[i];
+		}
+
+	if (features & __COBALT_HAVE_AVX) {
+		for (i = 0; i < 8; i++) {
+			int error = 0;
+			if (vec[i][0] != val) {
+				result = vec[i][0];
+				error = 1;
+			}
+			if (vec[i][2] != val) {
+				result = vec[i][2];
+				error = 1;
+			}
+			if (error)
+				report("ymm%d: %llu/%llu != %u/%u\n",
+				       i, (unsigned long long)vec[i][0],
+				       (unsigned long long)vec[i][2],
+				       val, val);
+		}
+	} else if (features & __COBALT_HAVE_SSE2) {
+		for (i = 0; i < 8; i++)
+			if (vec[i][0] != val) {
+				report("xmm%d: %llu != %u\n",
+				       i, (unsigned long long)vec[i][0], val);
+				result = vec[i][0];
+			}
+	}
+
+	return result;
+}
+
+#endif /* _COBALT_X86_ASM_UAPI_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/syscall.h
new file mode 100644
index 0000000..500d169
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/uapi/syscall.h
@@ -0,0 +1,23 @@
+/*
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COBALT_X86_ASM_UAPI_SYSCALL_H
+#define _COBALT_X86_ASM_UAPI_SYSCALL_H
+
+#define __xn_syscode(__nr)	(__COBALT_SYSCALL_BIT | __nr)
+
+#endif /* !_COBALT_X86_ASM_UAPI_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
new file mode 100644
index 0000000..f873277
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/include/asm/xenomai/wrappers.h
@@ -0,0 +1,66 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_WRAPPERS_H
+#define _COBALT_X86_ASM_WRAPPERS_H
+
+#include <asm-generic/xenomai/wrappers.h> /* Read the generic portion. */
+
+#define __get_user_inatomic __get_user
+#define __put_user_inatomic __put_user
+
+#if LINUX_VERSION_CODE > KERNEL_VERSION(4,9,108) && \
+    LINUX_VERSION_CODE < KERNEL_VERSION(4,10,0)
+#define IPIPE_X86_FPU_EAGER
+#endif
+#if LINUX_VERSION_CODE > KERNEL_VERSION(4,4,137) && \
+    LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
+#define IPIPE_X86_FPU_EAGER
+#endif
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+#define IPIPE_X86_FPU_EAGER
+#endif
+
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0) && \
+      LINUX_VERSION_CODE < KERNEL_VERSION(5,16,0)
+#include <asm/fpu/internal.h>
+#else
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0)
+
+static inline void kernel_fpu_disable(void)
+{
+	__thread_clear_has_fpu(current);
+}
+
+static inline void kernel_fpu_enable(void)
+{
+}
+
+static inline bool kernel_fpu_disabled(void)
+{
+	return __thread_has_fpu(current) == 0 && (read_cr0() & X86_CR0_TS) == 0;
+}
+#endif /* linux < 4.1.0 */
+
+#endif /* _COBALT_X86_ASM_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/Makefile
new file mode 100644
index 0000000..1ef407c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/Makefile
@@ -0,0 +1,5 @@
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+xenomai-y := machine.o thread.o smi.o c1e.o
+
+ccflags-y := -I$(srctree)/arch/x86/xenomai/include -I$(srctree)/include/xenomai
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/README b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/README
new file mode 100644
index 0000000..80f954a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/README
@@ -0,0 +1,3 @@
+Get the interrupt pipeline code for the target kernel from
+http://xenomai.org/downloads/ipipe/, or
+git://git.xenomai.org/ipipe.git
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/c1e.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/c1e.c
new file mode 100644
index 0000000..9bd4e92
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/c1e.c
@@ -0,0 +1,72 @@
+/*
+ * Disable Intel automatic promotion to C1E mode.
+ * Lifted from drivers/idle/intel_idle.c
+ * Copyright (c) 2013, Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/smp.h>
+#include <asm/processor.h>
+#include <asm/cpu_device_id.h>
+#include <asm/msr.h>
+
+#define ICPU(model) \
+	{ X86_VENDOR_INTEL, 6, model, X86_FEATURE_MWAIT, 1UL }
+
+static const struct x86_cpu_id c1e_ids[] = {
+	ICPU(0x1a),
+	ICPU(0x1e),
+	ICPU(0x1f),
+	ICPU(0x25),
+	ICPU(0x2c),
+	ICPU(0x2e),
+	ICPU(0x2f),
+	ICPU(0x2a),
+	ICPU(0x2d),
+	ICPU(0x3a),
+	ICPU(0x3e),
+	ICPU(0x3c),
+	ICPU(0x3f),
+	ICPU(0x45),
+	ICPU(0x46),
+	ICPU(0x4D),
+	{}
+};
+
+#undef ICPU
+
+static void c1e_promotion_disable(void *dummy)
+{
+	unsigned long long msr_bits;
+
+	rdmsrl(MSR_IA32_POWER_CTL, msr_bits);
+	msr_bits &= ~0x2;
+	wrmsrl(MSR_IA32_POWER_CTL, msr_bits);
+}
+
+void mach_x86_c1e_disable(void)
+{
+	const struct x86_cpu_id *id;
+
+	id = x86_match_cpu(c1e_ids);
+	if (id) {
+		printk("[Xenomai] disabling automatic C1E state promotion on Intel processor\n");
+		/*
+		 * cpu uses C1E, disable this feature (copied from
+		 * intel_idle driver)
+		 */
+		on_each_cpu(c1e_promotion_disable, NULL, 1);
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/calibration.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/calibration.h
new file mode 100644
index 0000000..eaecc48
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/calibration.h
@@ -0,0 +1,71 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_CALIBRATION_H
+#define _COBALT_X86_ASM_CALIBRATION_H
+
+#include <asm/processor.h>
+
+static inline unsigned long __get_bogomips(void)
+{
+	return this_cpu_read(cpu_info.loops_per_jiffy)/(500000/HZ);
+}
+
+static inline void xnarch_get_latencies(struct xnclock_gravity *p)
+{
+	unsigned long sched_latency;
+
+#if CONFIG_XENO_OPT_TIMING_SCHEDLAT != 0
+	sched_latency = CONFIG_XENO_OPT_TIMING_SCHEDLAT;
+#else /* !CONFIG_XENO_OPT_TIMING_SCHEDLAT */
+
+	if (strcmp(ipipe_timer_name(), "lapic") == 0) {
+#ifdef CONFIG_SMP
+		if (num_online_cpus() > 1)
+			sched_latency = 3350;
+		else
+			sched_latency = 2000;
+#else /* !SMP */
+		sched_latency = 1000;
+#endif /* !SMP */
+	} else if (strcmp(ipipe_timer_name(), "pit")) { /* HPET */
+#ifdef CONFIG_SMP
+		if (num_online_cpus() > 1)
+			sched_latency = 3350;
+		else
+			sched_latency = 1500;
+#else /* !SMP */
+		sched_latency = 1000;
+#endif /* !SMP */
+	} else {
+		sched_latency = (__get_bogomips() < 250 ? 17000 :
+				 __get_bogomips() < 2500 ? 4200 :
+				 3500);
+#ifdef CONFIG_SMP
+		sched_latency += 1000;
+#endif /* CONFIG_SMP */
+	}
+#endif /* !CONFIG_XENO_OPT_TIMING_SCHEDLAT */
+
+	p->user = xnclock_ns_to_ticks(&nkclock, sched_latency);
+	p->kernel = xnclock_ns_to_ticks(&nkclock,
+					CONFIG_XENO_OPT_TIMING_KSCHEDLAT);
+	p->irq = xnclock_ns_to_ticks(&nkclock, CONFIG_XENO_OPT_TIMING_IRQLAT);
+}
+
+#endif /* !_COBALT_X86_ASM_CALIBRATION_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/fptest.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/fptest.h
new file mode 100644
index 0000000..7a2b17d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/fptest.h
@@ -0,0 +1,75 @@
+/*
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_FPTEST_H
+#define _COBALT_X86_ASM_FPTEST_H
+
+#include <linux/errno.h>
+#include <asm/processor.h>
+#include <asm/xenomai/wrappers.h>
+#include <asm/xenomai/uapi/fptest.h>
+
+static inline int fp_kernel_supported(void)
+{
+	return 1;
+}
+
+static inline void fp_init(void)
+{
+	__asm__ __volatile__("fninit");
+}
+
+static inline int fp_linux_begin(void)
+{
+	kernel_fpu_begin();
+	/* kernel_fpu_begin() does no re-initialize the fpu context, but
+	   fp_regs_set() implicitely expects an initialized fpu context, so
+	   initialize it here. */
+	fp_init();
+	return 0;
+}
+
+static inline void fp_linux_end(void)
+{
+	kernel_fpu_end();
+}
+
+static inline int fp_detect(void)
+{
+	int features = 0;
+
+#ifndef cpu_has_xmm2
+#ifdef cpu_has_sse2
+#define cpu_has_xmm2 cpu_has_sse2
+#else
+#define cpu_has_xmm2 0
+#endif
+#endif
+	if (cpu_has_xmm2)
+		features |= __COBALT_HAVE_SSE2;
+
+#ifndef cpu_has_avx
+#define cpu_has_avx 0
+#endif
+	if (cpu_has_avx)
+		features |= __COBALT_HAVE_AVX;
+
+	return features;
+}
+
+#endif /* _COBALT_X86_ASM_FPTEST_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/machine.h
new file mode 100644
index 0000000..750eb1e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/machine.h
@@ -0,0 +1,35 @@
+/**
+ * Copyright (C) 2007-2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_MACHINE_H
+#define _COBALT_X86_ASM_MACHINE_H
+
+#include <linux/compiler.h>
+
+static inline __attribute_const__ unsigned long ffnz(unsigned long ul)
+{
+	__asm__("bsfq %1, %0":"=r" (ul) : "rm" (ul));
+	return ul;
+}
+
+#define XNARCH_HOST_TICK_IRQ	__ipipe_hrtimer_irq
+
+/* Read this last to enable default settings. */
+#include <asm-generic/xenomai/machine.h>
+
+#endif /* !_COBALT_X86_ASM_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..f889f5f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/syscall.h
@@ -0,0 +1,81 @@
+/*
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_SYSCALL_H
+#define _COBALT_X86_ASM_SYSCALL_H
+
+#include <linux/errno.h>
+#include <asm/ptrace.h>
+#include <asm-generic/xenomai/syscall.h>
+
+/*
+ * Cobalt and Linux syscall numbers can be fetched from ORIG_AX,
+ * masking out the __COBALT_SYSCALL_BIT marker.
+ */
+#define __xn_reg_sys(regs)    ((regs)->orig_ax)
+#define __xn_reg_rval(regs)   ((regs)->ax)
+#define __xn_reg_arg1(regs)   ((regs)->di)
+#define __xn_reg_arg2(regs)   ((regs)->si)
+#define __xn_reg_arg3(regs)   ((regs)->dx)
+#define __xn_reg_arg4(regs)   ((regs)->r10)
+#define __xn_reg_arg5(regs)   ((regs)->r8)
+#define __xn_reg_pc(regs)     ((regs)->ip)
+#define __xn_reg_sp(regs)     ((regs)->sp)
+
+#define __xn_syscall_p(regs)  (__xn_reg_sys(regs) & __COBALT_SYSCALL_BIT)
+#ifdef CONFIG_XENO_ARCH_SYS3264
+#define __xn_syscall(regs)    __COBALT_CALL32_SYSNR(__xn_reg_sys(regs)	\
+				    & ~__COBALT_SYSCALL_BIT)
+#else
+#define __xn_syscall(regs)    (__xn_reg_sys(regs) & ~__COBALT_SYSCALL_BIT)
+#endif
+
+/*
+ * Root syscall number with predicate (valid only if
+ * !__xn_syscall_p(__regs)).
+ */
+#define __xn_rootcall_p(__regs, __code)			\
+	({						\
+		*(__code) = __xn_reg_sys(__regs);	\
+		*(__code) < ipipe_root_nr_syscalls(current_thread_info()); \
+	})
+
+static inline void __xn_error_return(struct pt_regs *regs, int v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline void __xn_status_return(struct pt_regs *regs, long v)
+{
+	__xn_reg_rval(regs) = v;
+}
+
+static inline int __xn_interrupted_p(struct pt_regs *regs)
+{
+	return __xn_reg_rval(regs) == -EINTR;
+}
+
+static inline
+int xnarch_local_syscall(unsigned long a1, unsigned long a2,
+			 unsigned long a3, unsigned long a4,
+			 unsigned long a5)
+{
+	return -ENOSYS;
+}
+
+#endif /* !_COBALT_X86_ASM_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/thread.h
new file mode 100644
index 0000000..a1a79bb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/include/asm/xenomai/thread.h
@@ -0,0 +1,101 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004-2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_X86_ASM_THREAD_H
+#define _COBALT_X86_ASM_THREAD_H
+
+#include <asm-generic/xenomai/ipipe/thread.h>
+#include <asm/xenomai/wrappers.h>
+#include <asm/traps.h>
+
+#ifndef IPIPE_X86_FPU_EAGER
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,4,0)
+typedef union thread_xstate x86_fpustate;
+#define x86_fpustate_ptr(t) ((t)->fpu.state)
+#else
+typedef union fpregs_state x86_fpustate;
+#define x86_fpustate_ptr(t) ((t)->fpu.active_state)
+#endif
+#endif
+
+struct xnarchtcb {
+	struct xntcb core;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+	unsigned long sp;
+	unsigned long *spp;
+	unsigned long ip;
+	unsigned long *ipp;
+#endif  
+#ifdef IPIPE_X86_FPU_EAGER
+	struct fpu *kfpu;
+#else
+	x86_fpustate *fpup;
+	unsigned int root_used_math: 1;
+	x86_fpustate *kfpu_state;
+#endif
+	unsigned int root_kfpu: 1;
+};
+
+#define xnarch_fpu_ptr(tcb)     ((tcb)->fpup)
+
+#define xnarch_fault_regs(d)	((d)->regs)
+#define xnarch_fault_trap(d)	((d)->exception)
+#define xnarch_fault_code(d)	((d)->regs->orig_ax)
+#define xnarch_fault_pc(d)	((d)->regs->ip)
+#define xnarch_fault_fpu_p(d)	((d)->exception == X86_TRAP_NM)
+#define xnarch_fault_pf_p(d)	((d)->exception == X86_TRAP_PF)
+#define xnarch_fault_bp_p(d)	((current->ptrace & PT_PTRACED) &&	\
+				 ((d)->exception == X86_TRAP_DB || (d)->exception == X86_TRAP_BP))
+#define xnarch_fault_notify(d)	(!xnarch_fault_bp_p(d))
+
+static inline
+struct task_struct *xnarch_host_task(struct xnarchtcb *tcb)
+{
+	return tcb->core.host_task;
+}
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to);
+
+int xnarch_handle_fpu_fault(struct xnthread *from, 
+			struct xnthread *to, struct ipipe_trap_data *d);
+
+void xnarch_leave_root(struct xnthread *root);
+
+void xnarch_init_root_tcb(struct xnthread *thread);
+
+void xnarch_init_shadow_tcb(struct xnthread *thread);
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in);
+
+static inline void xnarch_enter_root(struct xnthread *root) { }
+
+static inline int xnarch_escalate(void)
+{
+	if (ipipe_root_p) {
+		ipipe_raise_irq(cobalt_pipeline.escalate_virq);
+		return 1;
+	}
+
+	return 0;
+}
+
+int mach_x86_thread_init(void);
+void mach_x86_thread_cleanup(void);
+
+#endif /* !_COBALT_X86_ASM_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/machine.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/machine.c
new file mode 100644
index 0000000..d51a91f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/machine.c
@@ -0,0 +1,78 @@
+/**
+ *   Copyright (C) 2007-2012 Philippe Gerum.
+ *
+ *   Xenomai is free software; you can redistribute it and/or
+ *   modify it under the terms of the GNU General Public License as
+ *   published by the Free Software Foundation, Inc., 675 Mass Ave,
+ *   Cambridge MA 02139, USA; either version 2 of the License, or (at
+ *   your option) any later version.
+ *
+ *   Xenomai is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ *   General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ *   02111-1307, USA.
+ */
+#include <asm/xenomai/machine.h>
+#include <asm/xenomai/thread.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/xenomai/smi.h>
+#include <asm/xenomai/c1e.h>
+
+static int mach_x86_init(void)
+{
+	int ret;
+
+	ret = mach_x86_thread_init();
+	if (ret)
+		return ret;
+
+	mach_x86_c1e_disable();
+	mach_x86_smi_init();
+	mach_x86_smi_disable();
+
+	return 0;
+}
+
+static void mach_x86_cleanup(void)
+{
+	mach_x86_smi_restore();
+	mach_x86_thread_cleanup();
+}
+
+static const char *const fault_labels[] = {
+    [0] = "Divide error",
+    [1] = "Debug",
+    [2] = "",   /* NMI is not pipelined. */
+    [3] = "Int3",
+    [4] = "Overflow",
+    [5] = "Bounds",
+    [6] = "Invalid opcode",
+    [7] = "FPU not available",
+    [8] = "Double fault",
+    [9] = "FPU segment overrun",
+    [10] = "Invalid TSS",
+    [11] = "Segment not present",
+    [12] = "Stack segment",
+    [13] = "General protection",
+    [14] = "Page fault",
+    [15] = "Spurious interrupt",
+    [16] = "FPU error",
+    [17] = "Alignment check",
+    [18] = "Machine check",
+    [19] = "SIMD error",
+    [20] = NULL,
+};
+
+struct cobalt_machine cobalt_machine = {
+	.name = "x86",
+	.init = mach_x86_init,
+	.late_init = NULL,
+	.cleanup = mach_x86_cleanup,
+	.prefault = NULL,
+	.fault_labels = fault_labels,
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/smi.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/smi.c
new file mode 100644
index 0000000..f28af9a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/smi.c
@@ -0,0 +1,168 @@
+/**
+ *   SMI workaround for x86.
+ *
+ *   Cut/Pasted from Vitor Angelo "smi" module.
+ *   Adapted by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ *   This program is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/ctype.h>
+#include <linux/pci.h>
+#include <linux/pci_ids.h>
+#include <linux/reboot.h>
+#include <cobalt/kernel/assert.h>
+#include <asm-generic/xenomai/pci_ids.h>
+#include <asm/xenomai/machine.h>
+
+#define DEVFN		0xf8	/* device 31, function 0 */
+
+#define PMBASE_B0	0x40
+#define PMBASE_B1	0x41
+
+#define SMI_CTRL_ADDR	0x30
+
+static int smi_state;
+static char smi_state_arg[16] = "detect";
+module_param_string(smi, smi_state_arg, sizeof(smi_state_arg), 0444);
+
+static unsigned int smi_masked_bits = 1; /* Global disable bit */
+module_param_named(smi_mask, smi_masked_bits, int, 0400);
+
+static unsigned int smi_saved_bits;
+static unsigned short smi_en_addr;
+
+#define mask_bits(v, p) outl(inl(p)&~(v),(p))
+#define set_bits(v, p)  outl(inl(p)|(v), (p))
+
+static int smi_reboot(struct notifier_block *nb, ulong event, void *buf);
+
+static struct notifier_block smi_notifier = {
+	.notifier_call = smi_reboot
+};
+
+static int smi_reboot(struct notifier_block *nb, ulong event, void *buf)
+{
+	if (((event == SYS_RESTART) || (event == SYS_HALT) ||
+	     (event == SYS_POWER_OFF)) && smi_en_addr)
+		set_bits(smi_saved_bits, smi_en_addr);
+
+	return NOTIFY_DONE;
+}
+
+void mach_x86_smi_disable(void)
+{
+	if (smi_en_addr == 0)
+		return;
+
+	smi_saved_bits = inl(smi_en_addr) & smi_masked_bits;
+	mask_bits(smi_masked_bits, smi_en_addr);
+
+	if (inl(smi_en_addr) & smi_masked_bits)
+		printk(XENO_WARNING "SMI workaround failed!\n");
+	else
+		printk(XENO_INFO "SMI workaround enabled\n");
+
+	register_reboot_notifier(&smi_notifier);
+}
+
+void mach_x86_smi_restore(void)
+{
+	if (smi_en_addr == 0)
+		return;
+
+	printk(XENO_INFO "SMI configuration restored\n");
+
+	set_bits(smi_saved_bits, smi_en_addr);
+
+	unregister_reboot_notifier(&smi_notifier);
+}
+
+static unsigned short get_smi_en_addr(struct pci_dev *dev)
+{
+	u_int8_t byte0, byte1;
+
+	pci_read_config_byte(dev, PMBASE_B0, &byte0);
+	pci_read_config_byte(dev, PMBASE_B1, &byte1);
+	return SMI_CTRL_ADDR + (((byte1 << 1) | (byte0 >> 7)) << 7);	// bits 7-15
+}
+
+
+static const char *smi_state_labels[] = {
+	"disabled",
+	"detect",
+	"enabled",
+};
+
+static void setup_smi_state(void)
+{
+	static char warn_bad_state[] =
+		XENO_WARNING "invalid SMI state '%s'\n";
+	char *p;
+	int n;
+
+	/* Backward compat with legacy state specifiers. */
+	n = simple_strtol(smi_state_arg, &p, 10);
+	if (*p == '\0') {
+		smi_state = n;
+		return;
+	}
+
+	for (n = 0; n < ARRAY_SIZE(smi_state_labels); n++)
+		if (strcmp(smi_state_labels[n], smi_state_arg) == 0) {
+			smi_state = n - 1;
+			return;
+		}
+
+	printk(warn_bad_state, smi_state_arg);
+}
+
+void mach_x86_smi_init(void)
+{
+	struct pci_dev *dev = NULL;
+
+	setup_smi_state();
+
+	if (smi_state < 0)
+		return;
+
+	/*
+	 * Do not use pci_register_driver, pci_enable_device, ...
+	 * Just register the used ports.
+	 */
+	dev = pci_get_class(PCI_CLASS_BRIDGE_ISA << 8, NULL);
+	if (dev == NULL || dev->bus->number ||
+	    dev->devfn != DEVFN || dev->vendor != PCI_VENDOR_ID_INTEL) {
+		pci_dev_put(dev);
+		return;
+	}
+
+	if (smi_state == 0) {
+		printk(XENO_WARNING "SMI-enabled chipset found, but SMI workaround disabled\n"
+		       "          (see xenomai.smi parameter). You might encounter\n"
+		       "          high latencies!\n");
+		pci_dev_put(dev);
+		return;
+	}
+
+	printk(XENO_INFO "SMI-enabled chipset found\n");
+	smi_en_addr = get_smi_en_addr(dev);
+
+	pci_dev_put(dev);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/thread.c
new file mode 100644
index 0000000..46c47af
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arch/x86/ipipe/thread.c
@@ -0,0 +1,522 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004-2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#include <linux/sched.h>
+#include <linux/ipipe.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <cobalt/kernel/thread.h>
+#include <asm/mmu_context.h>
+#include <asm/processor.h>
+
+static struct kmem_cache *xstate_cache;
+
+#ifdef IPIPE_X86_FPU_EAGER
+#define fpu_kernel_xstate_size sizeof(struct fpu)
+#else
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,7,0)
+#define fpu_kernel_xstate_size xstate_size
+#endif
+#endif /* IPIPE_X86_FPU_EAGER */
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,6,0)
+#define cpu_has_xmm boot_cpu_has(X86_FEATURE_XMM)
+#define cpu_has_fxsr boot_cpu_has(X86_FEATURE_FXSR)
+#define cpu_has_xsave boot_cpu_has(X86_FEATURE_XSAVE)
+#endif
+
+#ifndef IPIPE_X86_FPU_EAGER
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,2,0)
+#include <asm/i387.h>
+#include <asm/fpu-internal.h>
+#define x86_fpregs_active(t)		__thread_has_fpu(t)
+#define x86_fpregs_deactivate(t)	__thread_clear_has_fpu(t)
+#define x86_fpregs_activate(t)		__thread_set_has_fpu(t)
+#define x86_xstate_alignment		__alignof__(union thread_xstate)
+#else
+#include <asm/fpu/internal.h>
+
+static inline int x86_fpregs_active(struct task_struct *t)
+{
+	return t->thread.fpu.fpregs_active;
+}
+
+static inline void x86_fpregs_deactivate(struct task_struct *t)
+{
+	if (x86_fpregs_active(t))
+		__fpregs_deactivate(&t->thread.fpu);
+}
+
+static inline void x86_fpregs_activate(struct task_struct *t)
+{
+	if (!x86_fpregs_active(t))
+		__fpregs_activate(&t->thread.fpu);
+}
+
+#define x86_xstate_alignment		__alignof__(union fpregs_state)
+
+#endif
+#else /* IPIPE_X86_FPU_EAGER */
+#define x86_xstate_alignment		__alignof__(union fpregs_state)
+#endif /* ! IPIPE_X86_FPU_EAGER */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+/*
+ * This is obsolete context switch code uselessly duplicating
+ * mainline's.
+ */
+#define __SWITCH_CLOBBER_LIST  , "r8", "r9", "r10", "r11", "r12", "r13", "r14", "r15"
+
+#ifdef CONFIG_CC_STACKPROTECTOR
+
+#define __CANARY_OUTPUT							\
+	, [gs_canary] "=m" (irq_stack_union.stack_canary)
+
+#define __CANARY_INPUT							\
+	, [task_canary] "i" (offsetof(struct task_struct, stack_canary)) \
+	, [current_task] "m" (current_task)
+
+#define __CANARY_SWITCH							\
+  	"movq "__percpu_arg([current_task])",%%rsi\n\t"			\
+	"movq %P[task_canary](%%rsi),%%r8\n\t"				\
+	"movq %%r8,"__percpu_arg([gs_canary])"\n\t"
+
+#else /* !CONFIG_CC_STACKPROTECTOR */
+
+#define __CANARY_OUTPUT
+#define __CANARY_INPUT
+#define __CANARY_SWITCH
+
+#endif /* !CONFIG_CC_STACKPROTECTOR */
+
+#define do_switch_threads(prev, next, p_rsp, n_rsp, p_rip, n_rip)	\
+	({								\
+		long __rdi, __rsi, __rax, __rbx, __rcx, __rdx;		\
+									\
+		__asm__ __volatile__("pushfq\n\t"			\
+			     "pushq	%%rbp\n\t"			\
+			     "movq	%%rsi, %%rbp\n\t"		\
+			     "movq	%%rsp, (%%rdx)\n\t"		\
+			     "movq	$1f, (%%rax)\n\t"		\
+			     "movq	(%%rcx), %%rsp\n\t"		\
+			     "pushq	(%%rbx)\n\t"			\
+			     "jmp	__switch_to\n\t"		\
+			     "1:\n\t"					\
+			     __CANARY_SWITCH				\
+			     "movq	%%rbp, %%rsi\n\t"		\
+			     "popq	%%rbp\n\t"			\
+			     "popfq\n\t"				\
+			     : "=S" (__rsi), "=D" (__rdi), "=a"	(__rax), \
+			       "=b" (__rbx), "=c" (__rcx), "=d" (__rdx)	\
+			       __CANARY_OUTPUT				\
+			     : "0" (next), "1" (prev), "5" (p_rsp), "4" (n_rsp), \
+			       "2" (p_rip), "3" (n_rip)			\
+			       __CANARY_INPUT				\
+			     : "memory", "cc" __SWITCH_CLOBBER_LIST);	\
+	})
+
+#else /* LINUX_VERSION_CODE >= 4.8 */
+
+#include <asm/switch_to.h>
+
+#endif /* LINUX_VERSION_CODE >= 4.8 */
+
+void xnarch_switch_to(struct xnthread *out, struct xnthread *in)
+{
+	struct xnarchtcb *out_tcb = &out->tcb, *in_tcb = &in->tcb;
+	struct task_struct *prev, *next, *last;
+	struct mm_struct *prev_mm, *next_mm;
+
+	prev = out_tcb->core.host_task;
+#ifndef IPIPE_X86_FPU_EAGER
+	if (x86_fpregs_active(prev))
+		/*
+		 * __switch_to will try and use __unlazy_fpu, so we
+		 * need to clear the ts bit.
+		 */
+		clts();
+#endif /* ! IPIPE_X86_FPU_EAGER */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
+	if (!xnthread_test_state(out, XNROOT | XNUSER) &&
+	    !test_thread_flag(TIF_NEED_FPU_LOAD)) {
+		/*
+		 * This compensates that switch_fpu_prepare ignores kernel
+		 * threads.
+		 */
+		struct fpu *prev_fpu = &prev->thread.fpu;
+
+		if (!copy_fpregs_to_fpstate(prev_fpu))
+			prev_fpu->last_cpu = -1;
+		else
+			prev_fpu->last_cpu = raw_smp_processor_id();
+	}
+#endif
+
+	next = in_tcb->core.host_task;
+#ifndef IPIPE_X86_FPU_EAGER
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,2,0)
+	next->thread.fpu.counter = 0;
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(3,13,0)
+	next->thread.fpu_counter = 0;
+#else
+	next->fpu_counter = 0;
+#endif
+#endif /* ! IPIPE_X86_FPU_EAGER */
+	prev_mm = out_tcb->core.active_mm;
+	next_mm = in_tcb->core.mm;
+	if (next_mm == NULL) {
+		in_tcb->core.active_mm = prev_mm;
+		enter_lazy_tlb(prev_mm, next);
+	} else {
+		ipipe_switch_mm_head(prev_mm, next_mm, next);
+		/*
+		 * We might be switching back to the root thread,
+		 * which we preempted earlier, shortly after "current"
+		 * dropped its mm context in the do_exit() path
+		 * (next->mm == NULL). In that particular case, the
+		 * kernel expects a lazy TLB state for leaving the mm.
+		 */
+		if (next->mm == NULL)
+			enter_lazy_tlb(prev_mm, next);
+	}
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+	do_switch_threads(prev, next,
+			  out_tcb->spp, in_tcb->spp,
+			  out_tcb->ipp, in_tcb->ipp);
+	(void)last;
+#else /* LINUX_VERSION_CODE >= 4.8 */
+	switch_to(prev, next, last);
+#endif /* LINUX_VERSION_CODE >= 4.8 */
+
+#ifndef IPIPE_X86_FPU_EAGER
+	stts();
+#endif /* ! IPIPE_X86_FPU_EAGER */
+}
+
+#ifndef IPIPE_X86_FPU_EAGER
+
+#define XSAVE_PREFIX	"0x48,"
+#define XSAVE_SUFFIX	"q"
+
+static inline void __do_save_fpu_state(x86_fpustate *fpup)
+{
+#ifdef cpu_has_xsave
+	if (cpu_has_xsave) {
+#ifdef CONFIG_AS_AVX
+		__asm__ __volatile__("xsave" XSAVE_SUFFIX " %0"
+			     : "=m" (fpup->xsave) : "a" (-1), "d" (-1)
+			     : "memory");
+#else /* !CONFIG_AS_AVX */
+		__asm __volatile__(".byte " XSAVE_PREFIX "0x0f,0xae,0x27"
+			     : : "D" (&fpup->xsave), "m" (fpup->xsave),
+			         "a" (-1), "d" (-1)
+			     : "memory");
+#endif /* !CONFIG_AS_AVX */
+		return;
+	}
+#endif /* cpu_has_xsave */
+#ifdef CONFIG_AS_FXSAVEQ
+	__asm __volatile__("fxsaveq %0" : "=m" (fpup->fxsave));
+#else /* !CONFIG_AS_FXSAVEQ */
+	__asm__ __volatile__("rex64/fxsave (%[fx])"
+		     : "=m" (fpup->fxsave)
+		     : [fx] "R" (&fpup->fxsave));
+#endif /* !CONFIG_AS_FXSAVEQ */
+}
+
+static inline void __do_restore_fpu_state(x86_fpustate *fpup)
+{
+#ifdef cpu_has_xsave
+	if (cpu_has_xsave) {
+#ifdef CONFIG_AS_AVX
+		__asm__ __volatile__("xrstor" XSAVE_SUFFIX " %0"
+			     : : "m" (fpup->xsave), "a" (-1), "d" (-1)
+			     : "memory");
+#else /* !CONFIG_AS_AVX */
+		__asm__ __volatile__(".byte " XSAVE_PREFIX "0x0f,0xae,0x2f"
+			     : : "D" (&fpup->xsave), "m" (fpup->xsave),
+			         "a" (-1), "d" (-1)
+			     : "memory");
+#endif /* !CONFIG_AS_AVX */
+		return;
+	}
+#endif /* cpu_has_xsave */
+#ifdef CONFIG_AS_FXSAVEQ
+	__asm__ __volatile__("fxrstorq %0" : : "m" (fpup->fxsave));
+#else /* !CONFIG_AS_FXSAVEQ */
+	__asm__ __volatile__("rex64/fxrstor (%0)"
+		     : : "R" (&fpup->fxsave), "m" (fpup->fxsave));
+#endif /* !CONFIG_AS_FXSAVEQ */
+}
+
+int xnarch_handle_fpu_fault(struct xnthread *from, 
+			struct xnthread *to, struct ipipe_trap_data *d)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(to);
+	struct task_struct *p = tcb->core.host_task;
+
+	if (x86_fpregs_active(p))
+		return 0;
+
+	if (!(p->flags & PF_USED_MATH)) {
+		/*
+		 * The faulting task is a shadow using the FPU for the first
+		 * time, initialize the FPU context and tell linux about it.
+		 */
+		__asm__ __volatile__("clts; fninit");
+
+		if (cpu_has_xmm) {
+			unsigned long __mxcsr = 0x1f80UL & 0xffbfUL;
+			__asm__ __volatile__("ldmxcsr %0"::"m"(__mxcsr));
+		}
+		p->flags |= PF_USED_MATH;
+	} else {
+		/*
+		 * The faulting task already used FPU in secondary
+		 * mode.
+		 */
+		clts();
+		__do_restore_fpu_state(tcb->fpup);
+	}
+		
+	x86_fpregs_activate(p);
+
+	xnlock_get(&nklock);
+	xnthread_set_state(to, XNFPU);
+	xnlock_put(&nklock);
+
+	return 1;
+}
+#else /* IPIPE_X86_FPU_EAGER */
+
+int xnarch_handle_fpu_fault(struct xnthread *from,
+			struct xnthread *to, struct ipipe_trap_data *d)
+{
+	/* in eager mode there are no such faults */
+	BUG_ON(1);
+}
+#endif /* ! IPIPE_X86_FPU_EAGER */
+
+#define current_task_used_kfpu() kernel_fpu_disabled()
+
+#define tcb_used_kfpu(t) ((t)->root_kfpu)
+
+#ifndef IPIPE_X86_FPU_EAGER
+void xnarch_leave_root(struct xnthread *root)
+{
+	struct xnarchtcb *const rootcb = xnthread_archtcb(root);
+	struct task_struct *const p = current;
+	x86_fpustate *const current_task_fpup = x86_fpustate_ptr(&p->thread);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+	rootcb->spp = &p->thread.sp;
+	rootcb->ipp = &p->thread.rip;
+#endif
+	if (!current_task_used_kfpu()) {
+		rootcb->root_kfpu = 0;
+		rootcb->fpup = x86_fpregs_active(p) ? current_task_fpup : NULL;
+		return;
+	}
+
+	/*
+	 * We need to save the kernel FPU context before preempting,
+	 * store it in our root control block.
+	 */
+	rootcb->root_kfpu = 1;
+	rootcb->fpup = current_task_fpup;
+	rootcb->root_used_math = !!(p->flags & PF_USED_MATH);
+	x86_fpustate_ptr(&p->thread) = rootcb->kfpu_state;
+	x86_fpregs_activate(p);
+	p->flags |= PF_USED_MATH;
+	kernel_fpu_enable();
+}
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
+{
+	x86_fpustate *const prev_fpup = from ? from->tcb.fpup : NULL;
+	struct xnarchtcb *const tcb = xnthread_archtcb(to);
+	struct task_struct *const p = tcb->core.host_task;
+	x86_fpustate *const next_task_fpup = x86_fpustate_ptr(&p->thread);
+
+	/* Restore lazy mode only if root fpu owner is not current. */
+	if (xnthread_test_state(to, XNROOT) &&
+	    prev_fpup != next_task_fpup &&
+	    !tcb_used_kfpu(tcb))
+		return;
+
+	clts();
+	/*
+	 * The only case where we can skip restoring the FPU is:
+	 * - the fpu context of the next task is the current fpu
+	 * context;
+	 * - root thread has not used fpu in kernel-space;
+	 * - cpu has fxsr (because if it does not, last context switch
+	 * reinitialized fpu)
+	 */
+	if (prev_fpup != next_task_fpup || !cpu_has_fxsr)
+		__do_restore_fpu_state(next_task_fpup);
+
+	if (!tcb_used_kfpu(tcb)) {
+		x86_fpregs_activate(p);
+		return;
+	}
+	kernel_fpu_disable();
+
+	x86_fpustate_ptr(&p->thread) = to->tcb.fpup;
+	if (!tcb->root_used_math) {
+		x86_fpregs_deactivate(p);
+		p->flags &= ~PF_USED_MATH;
+	}
+}
+#else /* IPIPE_X86_FPU_EAGER */
+void xnarch_leave_root(struct xnthread *root)
+{
+	struct xnarchtcb *const rootcb = xnthread_archtcb(root);
+
+	rootcb->root_kfpu = current_task_used_kfpu();
+
+	if (!tcb_used_kfpu(rootcb))
+		return;
+
+	/* save fpregs from in-kernel use */
+	copy_fpregs_to_fpstate(rootcb->kfpu);
+	kernel_fpu_enable();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+	/* restore current's fpregs */
+	__cpu_invalidate_fpregs_state();
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,4,182)
+	switch_fpu_finish(current);
+#elif LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
+	switch_fpu_finish(&current->thread.fpu);
+#else
+	switch_fpu_finish(&current->thread.fpu, raw_smp_processor_id());
+#endif
+#else
+	/* mark current thread as not owning the FPU anymore */
+	if (fpregs_active())
+		fpregs_deactivate(&current->thread.fpu);
+#endif
+}
+
+void xnarch_switch_fpu(struct xnthread *from, struct xnthread *to)
+{
+	struct xnarchtcb *const to_tcb = xnthread_archtcb(to);
+
+	if (tcb_used_kfpu(to_tcb)) {
+		copy_kernel_to_fpregs(&to_tcb->kfpu->state);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
+		/* redo the invalidation done by kernel_fpu_begin */
+		__cpu_invalidate_fpregs_state();
+#endif
+		kernel_fpu_disable();
+	}
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
+	else if (!xnthread_test_state(to, XNROOT) &&
+		 test_thread_flag(TIF_NEED_FPU_LOAD)) {
+		/*
+		 * This is open-coded switch_fpu_return but without a test for
+		 * PF_KTHREAD, i.e including kernel threads.
+		 */
+		struct fpu *fpu = &current->thread.fpu;
+		int cpu = raw_smp_processor_id();
+
+		if (!fpregs_state_valid(fpu, cpu)) {
+			copy_kernel_to_fpregs(&fpu->state);
+			fpregs_activate(fpu);
+			fpu->last_cpu = cpu;
+		}
+		clear_thread_flag(TIF_NEED_FPU_LOAD);
+	}
+#endif
+}
+#endif /* ! IPIPE_X86_FPU_EAGER */
+
+void xnarch_init_root_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+	tcb->sp = 0;
+	tcb->spp = &tcb->sp;
+	tcb->ipp = &tcb->ip;
+#endif	
+#ifndef IPIPE_X86_FPU_EAGER
+	tcb->fpup = NULL;
+	tcb->kfpu_state = kmem_cache_zalloc(xstate_cache, GFP_KERNEL);
+#else /* IPIPE_X86_FPU_EAGER */
+	tcb->kfpu = kmem_cache_zalloc(xstate_cache, GFP_KERNEL);
+#endif /* ! IPIPE_X86_FPU_EAGER */
+	tcb->root_kfpu = 0;
+}
+
+void xnarch_init_shadow_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+	struct task_struct *p = tcb->core.host_task;
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,8,0)
+	tcb->sp = 0;
+	tcb->spp = &p->thread.sp;
+	tcb->ipp = &p->thread.rip; /* <!> raw naming intended. */
+#endif
+#ifndef IPIPE_X86_FPU_EAGER
+	tcb->fpup = x86_fpustate_ptr(&p->thread);
+	tcb->kfpu_state = NULL;
+#else /* IPIPE_X86_FPU_EAGER */
+	tcb->kfpu = NULL;
+#endif /* ! IPIPE_X86_FPU_EAGER */
+	tcb->root_kfpu = 0;
+
+#ifndef IPIPE_X86_FPU_EAGER
+	/* XNFPU is set upon first FPU fault */
+	xnthread_clear_state(thread, XNFPU);
+#else /* IPIPE_X86_FPU_EAGER */
+	/* XNFPU is always set */
+	xnthread_set_state(thread, XNFPU);
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
+	fpu__activate_fpstate_read(&p->thread.fpu);
+#else
+	fpu__initialize(&p->thread.fpu);
+#endif
+#endif /* ! IPIPE_X86_FPU_EAGER */
+}
+
+int mach_x86_thread_init(void)
+{
+	xstate_cache = kmem_cache_create("cobalt_x86_xstate",
+					 fpu_kernel_xstate_size,
+					 x86_xstate_alignment,
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,14,0)
+					 SLAB_NOTRACK,
+#else
+					 0,
+#endif
+					 NULL);
+	if (xstate_cache == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+void mach_x86_thread_cleanup(void)
+{
+	kmem_cache_destroy(xstate_cache);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/arith.c b/kernel/xenomai-v3.2.4/kernel/cobalt/arith.c
new file mode 100644
index 0000000..5603c2d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/arith.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright &copy; 2005 Gilles Chanteperdrix.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_arith In-kernel arithmetics
+ *
+ * A collection of helpers performing arithmetics not implicitly
+ * available from kernel context via GCC helpers. Many of these
+ * routines enable 64bit arithmetics on 32bit systems. Xenomai
+ * architecture ports normally implement the performance critical ones
+ * in hand-crafted assembly code (see
+ * kernel/cobalt/arch/\<arch\>/include/asm/xenomai/uapi/arith.h).
+ * @{
+ */
+
+/**
+ * Architecture-independent div64 operation with remainder.
+ *
+ * @param a dividend
+ *
+ * @param b divisor
+ *
+ * @param rem if non-NULL, a pointer to a 64bit variable for
+ * collecting the remainder from the division.
+ */
+unsigned long long xnarch_generic_full_divmod64(unsigned long long a,
+						unsigned long long b,
+						unsigned long long *rem)
+{
+	unsigned long long q = 0, r = a;
+	int i;
+
+	for (i = fls(a >> 32) - fls(b >> 32), b <<= i; i >= 0; i--, b >>= 1) {
+		q <<= 1;
+		if (b <= r) {
+			r -= b;
+			q++;
+		}
+	}
+
+	if (rem)
+		*rem = r;
+	return q;
+}
+EXPORT_SYMBOL_GPL(xnarch_generic_full_divmod64);
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c b/kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c
new file mode 100644
index 0000000..3b79505
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/bufd.c
@@ -0,0 +1,653 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/bufd.h>
+#include <cobalt/kernel/assert.h>
+#include <asm/xenomai/syscall.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_bufd Buffer descriptor
+ *
+ * Abstraction for copying data to/from different address spaces
+ *
+ * A buffer descriptor is a simple abstraction dealing with copy
+ * operations to/from memory buffers which may belong to different
+ * address spaces.
+ *
+ * To this end, the buffer descriptor library provides a small set of
+ * copy routines which are aware of address space restrictions when
+ * moving data, and a generic container type which can hold a
+ * reference to - or cover - a particular memory area, either present
+ * in kernel space, or in any of the existing user memory contexts.
+ *
+ * The goal of the buffer descriptor abstraction is to hide address
+ * space specifics from Xenomai services dealing with memory areas,
+ * allowing them to operate on multiple address spaces seamlessly.
+ *
+ * The common usage patterns are as follows:
+ *
+ * - Implementing a Xenomai syscall returning a bulk of data to the
+ *   caller, which may have to be copied back to either kernel or user
+ *   space:
+ *
+ *   @code
+ *   [Syscall implementation]
+ *   ssize_t rt_bulk_read_inner(struct xnbufd *bufd)
+ *   {
+ *       ssize_t ret;
+ *       size_t len;
+ *       void *bulk;
+ *
+ *       bulk = get_next_readable_bulk(&len);
+ *       ret = xnbufd_copy_from_kmem(bufd, bulk, min(bufd->b_len, len));
+ *       free_bulk(bulk);
+ *
+ *       ret = this_may_fail();
+ *       if (ret)
+ *	       xnbufd_invalidate(bufd);
+ *
+ *       return ret;
+ *   }
+ *
+ *   [Kernel wrapper for in-kernel calls]
+ *   int rt_bulk_read(void *ptr, size_t len)
+ *   {
+ *       struct xnbufd bufd;
+ *       ssize_t ret;
+ *
+ *       xnbufd_map_kwrite(&bufd, ptr, len);
+ *       ret = rt_bulk_read_inner(&bufd);
+ *       xnbufd_unmap_kwrite(&bufd);
+ *
+ *       return ret;
+ *   }
+ *
+ *   [Userland trampoline for user syscalls]
+ *   int __rt_bulk_read(struct pt_regs *regs)
+ *   {
+ *       struct xnbufd bufd;
+ *       void __user *ptr;
+ *       ssize_t ret;
+ *       size_t len;
+ *
+ *       ptr = (void __user *)__xn_reg_arg1(regs);
+ *       len = __xn_reg_arg2(regs);
+ *
+ *       xnbufd_map_uwrite(&bufd, ptr, len);
+ *       ret = rt_bulk_read_inner(&bufd);
+ *       xnbufd_unmap_uwrite(&bufd);
+ *
+ *       return ret;
+ *   }
+ *   @endcode
+ *
+ * - Implementing a Xenomai syscall receiving a bulk of data from the
+ *   caller, which may have to be read from either kernel or user
+ *   space:
+ *
+ *   @code
+ *   [Syscall implementation]
+ *   ssize_t rt_bulk_write_inner(struct xnbufd *bufd)
+ *   {
+ *       void *bulk = get_free_bulk(bufd->b_len);
+ *       return xnbufd_copy_to_kmem(bulk, bufd, bufd->b_len);
+ *   }
+ *
+ *   [Kernel wrapper for in-kernel calls]
+ *   int rt_bulk_write(const void *ptr, size_t len)
+ *   {
+ *       struct xnbufd bufd;
+ *       ssize_t ret;
+ *
+ *       xnbufd_map_kread(&bufd, ptr, len);
+ *       ret = rt_bulk_write_inner(&bufd);
+ *       xnbufd_unmap_kread(&bufd);
+ *
+ *       return ret;
+ *   }
+ *
+ *   [Userland trampoline for user syscalls]
+ *   int __rt_bulk_write(struct pt_regs *regs)
+ *   {
+ *       struct xnbufd bufd;
+ *       void __user *ptr;
+ *       ssize_t ret;
+ *       size_t len;
+ *
+ *       ptr = (void __user *)__xn_reg_arg1(regs);
+ *       len = __xn_reg_arg2(regs);
+ *
+ *       xnbufd_map_uread(&bufd, ptr, len);
+ *       ret = rt_bulk_write_inner(&bufd);
+ *       xnbufd_unmap_uread(&bufd);
+ *
+ *       return ret;
+ *   }
+ *   @endcode
+ *
+ *@{*/
+
+/**
+ * @fn void xnbufd_map_kread(struct xnbufd *bufd, const void *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for reading from kernel memory.
+ *
+ * The new buffer descriptor may be used to copy data from kernel
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_kread().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes kernel memory area, starting from @a ptr.
+ *
+ * @param ptr The start of the kernel buffer to map.
+ *
+ * @param len The length of the kernel buffer starting at @a ptr.
+ *
+ * @coretags{unrestricted}
+ */
+
+/**
+ * @fn void xnbufd_map_kwrite(struct xnbufd *bufd, void *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for writing to kernel memory.
+ *
+ * The new buffer descriptor may be used to copy data to kernel
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_kwrite().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes kernel memory area, starting from @a ptr.
+ *
+ * @param ptr The start of the kernel buffer to map.
+ *
+ * @param len The length of the kernel buffer starting at @a ptr.
+ *
+ * @coretags{unrestricted}
+ */
+void xnbufd_map_kmem(struct xnbufd *bufd, void *ptr, size_t len)
+{
+	bufd->b_ptr = ptr;
+	bufd->b_len = len;
+	bufd->b_mm = NULL;
+	bufd->b_off = 0;
+	bufd->b_carry = NULL;
+}
+EXPORT_SYMBOL_GPL(xnbufd_map_kmem);
+
+/**
+ * @fn void xnbufd_map_uread(struct xnbufd *bufd, const void __user *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for reading from user memory.
+ *
+ * The new buffer descriptor may be used to copy data from user
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_uread().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes user memory area, starting from @a ptr. @a ptr is
+ * never dereferenced directly, since it may refer to a buffer that
+ * lives in another address space.
+ *
+ * @param ptr The start of the user buffer to map.
+ *
+ * @param len The length of the user buffer starting at @a ptr.
+ *
+ * @coretags{task-unrestricted}
+ */
+
+/**
+ * @fn void xnbufd_map_uwrite(struct xnbufd *bufd, void __user *ptr, size_t len)
+ * @brief Initialize a buffer descriptor for writing to user memory.
+ *
+ * The new buffer descriptor may be used to copy data to user
+ * memory. This routine should be used in pair with
+ * xnbufd_unmap_uwrite().
+ *
+ * @param bufd The address of the buffer descriptor which will map a
+ * @a len bytes user memory area, starting from @a ptr. @a ptr is
+ * never dereferenced directly, since it may refer to a buffer that
+ * lives in another address space.
+ *
+ * @param ptr The start of the user buffer to map.
+ *
+ * @param len The length of the user buffer starting at @a ptr.
+ *
+ * @coretags{task-unrestricted}
+ */
+
+void xnbufd_map_umem(struct xnbufd *bufd, void __user *ptr, size_t len)
+{
+	bufd->b_ptr = ptr;
+	bufd->b_len = len;
+	bufd->b_mm = current->mm;
+	bufd->b_off = 0;
+	bufd->b_carry = NULL;
+}
+EXPORT_SYMBOL_GPL(xnbufd_map_umem);
+
+/**
+ * @fn ssize_t xnbufd_copy_to_kmem(void *to, struct xnbufd *bufd, size_t len)
+ * @brief Copy memory covered by a buffer descriptor to kernel memory.
+ *
+ * This routine copies @a len bytes from the area referred to by the
+ * buffer descriptor @a bufd to the kernel memory area @a to.
+ * xnbufd_copy_to_kmem() tracks the read offset within the source
+ * memory internally, so that it may be called several times in a
+ * loop, until the entire memory area is loaded.
+ *
+ * The source address space is dealt with, according to the following
+ * rules:
+ *
+ * - if @a bufd refers to readable kernel area (i.e. see
+ *   xnbufd_map_kread()), the copy is immediately and fully performed
+ *   with no restriction.
+ *
+ * - if @a bufd refers to a readable user area (i.e. see
+ *   xnbufd_map_uread()), the copy is performed only if that area
+ *   lives in the currently active address space, and only if the
+ *   caller may sleep Linux-wise to process any potential page fault
+ *   which may arise while reading from that memory.
+ *
+ * - any attempt to read from @a bufd from a non-suitable context is
+ *   considered as a bug, and will raise a panic assertion when the
+ *   nucleus is compiled in debug mode.
+ *
+ * @param to The start address of the kernel memory to copy to.
+ *
+ * @param bufd The address of the buffer descriptor covering the user
+ * memory to copy data from.
+ *
+ * @param len The length of the user memory to copy from @a bufd.
+ *
+ * @return The number of bytes read so far from the memory area
+ * covered by @a ubufd. Otherwise:
+ *
+ * - -EINVAL is returned upon attempt to read from the user area from
+ *   an invalid context. This error is only returned when the debug
+ *   mode is disabled; otherwise a panic assertion is raised.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ *
+ * This routine may switch the caller to secondary mode if a page
+ * fault occurs while reading from the user area. For that reason,
+ * xnbufd_copy_to_kmem() may only be called from a preemptible section
+ * (Linux-wise).
+ */
+ssize_t xnbufd_copy_to_kmem(void *to, struct xnbufd *bufd, size_t len)
+{
+	caddr_t from;
+
+	thread_only();
+
+	if (len == 0)
+		goto out;
+
+	from = bufd->b_ptr + bufd->b_off;
+
+	/*
+	 * If the descriptor covers a source buffer living in the
+	 * kernel address space, we may read from it directly.
+	 */
+	if (bufd->b_mm == NULL) {
+		memcpy(to, from, len);
+		goto advance_offset;
+	}
+
+	/*
+	 * We want to read data from user-space, check whether:
+	 * 1) the source buffer lies in the current address space,
+	 * 2) we may fault while reading from the buffer directly.
+	 *
+	 * If we can't reach the buffer, or the current context may
+	 * not fault while reading data from it, copy_from_user() is
+	 * not an option and we have a bug somewhere, since there is
+	 * no way we could fetch the data to kernel space immediately.
+	 *
+	 * Note that we don't check for non-preemptible Linux context
+	 * here, since the source buffer would live in kernel space in
+	 * such a case.
+	 */
+	if (current->mm == bufd->b_mm) {
+		preemptible_only();
+		if (cobalt_copy_from_user(to, (void __user *)from, len))
+			return -EFAULT;
+		goto advance_offset;
+	}
+
+	XENO_BUG(COBALT);
+
+	return -EINVAL;
+
+advance_offset:
+	bufd->b_off += len;
+out:
+	return (ssize_t)bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_copy_to_kmem);
+
+/**
+ * @fn ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, void *from, size_t len)
+ * @brief Copy kernel memory to the area covered by a buffer descriptor.
+ *
+ * This routine copies @a len bytes from the kernel memory starting at
+ * @a from to the area referred to by the buffer descriptor @a
+ * bufd. xnbufd_copy_from_kmem() tracks the write offset within the
+ * destination memory internally, so that it may be called several
+ * times in a loop, until the entire memory area is stored.
+ *
+ * The destination address space is dealt with, according to the
+ * following rules:
+ *
+ * - if @a bufd refers to a writable kernel area (i.e. see
+ *   xnbufd_map_kwrite()), the copy is immediatly and fully performed
+ *   with no restriction.
+ *
+ * - if @a bufd refers to a writable user area (i.e. see
+ *   xnbufd_map_uwrite()), the copy is performed only if that area
+ *   lives in the currently active address space, and only if the
+ *   caller may sleep Linux-wise to process any potential page fault
+ *   which may arise while writing to that memory.
+ *
+ * - if @a bufd refers to a user area which may not be immediately
+ *   written to from the current context, the copy is postponed until
+ *   xnbufd_unmap_uwrite() is invoked for @a ubufd, at which point the
+ *   copy will take place. In such a case, the source memory is
+ *   transferred to a carry over buffer allocated internally; this
+ *   operation may lead to request dynamic memory from the nucleus
+ *   heap if @a len is greater than 64 bytes.
+ *
+ * @param bufd The address of the buffer descriptor covering the user
+ * memory to copy data to.
+ *
+ * @param from The start address of the kernel memory to copy from.
+ *
+ * @param len The length of the kernel memory to copy to @a bufd.
+ *
+ * @return The number of bytes written so far to the memory area
+ * covered by @a ubufd. Otherwise,
+ *
+ * - -ENOMEM is returned when no memory is available from the nucleus
+ *    heap to allocate the carry over buffer.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ *
+ * This routine may switch the caller to secondary mode if a page
+ * fault occurs while reading from the user area. For that reason,
+ * xnbufd_copy_to_kmem() may only be called from a preemptible section
+ * (Linux-wise).
+ */
+ssize_t xnbufd_copy_from_kmem(struct xnbufd *bufd, void *from, size_t len)
+{
+	caddr_t to;
+
+	thread_only();
+
+	if (len == 0)
+		goto out;
+
+	to = bufd->b_ptr + bufd->b_off;
+
+	/*
+	 * If the descriptor covers a destination buffer living in the
+	 * kernel address space, we may copy to it directly.
+	 */
+	if (bufd->b_mm == NULL)
+		goto direct_copy;
+
+	/*
+	 * We want to pass data to user-space, check whether:
+	 * 1) the destination buffer lies in the current address space,
+	 * 2) we may fault while writing to the buffer directly.
+	 *
+	 * If we can't reach the buffer, or the current context may
+	 * not fault while copying data to it, copy_to_user() is not
+	 * an option and we have to convey the data from kernel memory
+	 * through the carry over buffer.
+	 *
+	 * Note that we don't check for non-preemptible Linux context
+	 * here: feeding a RT activity with data from a non-RT context
+	 * is wrong in the first place, so never mind.
+	 */
+	if (current->mm == bufd->b_mm) {
+		preemptible_only();
+		if (cobalt_copy_to_user((void __user *)to, from, len))
+			return -EFAULT;
+		goto advance_offset;
+	}
+
+	/*
+	 * We need a carry over buffer to convey the data to
+	 * user-space. xnbufd_unmap_uwrite() should be called on the
+	 * way back to user-space to update the destination buffer
+	 * from the carry over area.
+	 */
+	if (bufd->b_carry == NULL) {
+		/*
+		 * Try to use the fast carry over area available
+		 * directly from the descriptor for short messages, to
+		 * save a dynamic allocation request.
+		 */
+		if (bufd->b_len <= sizeof(bufd->b_buf))
+			bufd->b_carry = bufd->b_buf;
+		else {
+			bufd->b_carry = xnmalloc(bufd->b_len);
+			if (bufd->b_carry == NULL)
+				return -ENOMEM;
+		}
+		to = bufd->b_carry;
+	} else
+		to = bufd->b_carry + bufd->b_off;
+
+direct_copy:
+	memcpy(to, from, len);
+
+advance_offset:
+	bufd->b_off += len;
+out:
+	return (ssize_t)bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_copy_from_kmem);
+
+/**
+ * @fn void xnbufd_unmap_uread(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_uread().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_uread(), to read data from a user area.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes read so far from the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ */
+ssize_t xnbufd_unmap_uread(struct xnbufd *bufd)
+{
+	preemptible_only();
+
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_uread);
+
+/**
+ * @fn void xnbufd_unmap_uwrite(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_uwrite().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_uwrite(), to write data to a user area.
+ *
+ * The main action taken is to write the contents of the kernel memory
+ * area passed to xnbufd_copy_from_kmem() whenever the copy operation
+ * was postponed at that time; the carry over buffer is eventually
+ * released as needed. If xnbufd_copy_from_kmem() was allowed to copy
+ * to the destination user memory at once, then xnbufd_unmap_uwrite()
+ * leads to a no-op.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes written so far to the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @note Calling this routine while holding the nklock and/or running
+ * with interrupts disabled is invalid, and doing so will trigger a
+ * debug assertion.
+ */
+ssize_t xnbufd_unmap_uwrite(struct xnbufd *bufd)
+{
+	ssize_t ret = 0;
+	void __user *to;
+	void *from;
+	size_t len;
+
+	preemptible_only();
+
+	len = bufd->b_off;
+
+	if (bufd->b_carry == NULL)
+		/* Copy took place directly. Fine. */
+		goto done;
+
+	/*
+	 * Something was written to the carry over area, copy the
+	 * contents to user-space, then release the area if needed.
+	 */
+	to = (void __user *)bufd->b_ptr;
+	from = bufd->b_carry;
+	ret = cobalt_copy_to_user(to, from, len);
+
+	if (bufd->b_len > sizeof(bufd->b_buf))
+		xnfree(bufd->b_carry);
+done:
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return ret ?: (ssize_t)len;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_uwrite);
+
+/**
+ * @fn void xnbufd_reset(struct xnbufd *bufd)
+ * @brief Reset a buffer descriptor.
+ *
+ * The buffer descriptor is reset, so that all data already copied is
+ * forgotten. Any carry over buffer allocated is kept, though.
+ *
+ * @param bufd The address of the buffer descriptor to reset.
+ *
+ * @coretags{unrestricted}
+ */
+
+/**
+ * @fn void xnbufd_invalidate(struct xnbufd *bufd)
+ * @brief Invalidate a buffer descriptor.
+ *
+ * The buffer descriptor is invalidated, making it unusable for
+ * further copy operations. If an outstanding carry over buffer was
+ * allocated by a previous call to xnbufd_copy_from_kmem(), it is
+ * immediately freed so that no data transfer will happen when the
+ * descriptor is finalized.
+ *
+ * The only action that may subsequently be performed on an
+ * invalidated descriptor is calling the relevant unmapping routine
+ * for it. For that reason, xnbufd_invalidate() should be invoked on
+ * the error path when data may have been transferred to the carry
+ * over buffer.
+ *
+ * @param bufd The address of the buffer descriptor to invalidate.
+ *
+ * @coretags{unrestricted}
+ */
+void xnbufd_invalidate(struct xnbufd *bufd)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	if (bufd->b_carry) {
+		if (bufd->b_len > sizeof(bufd->b_buf))
+			xnfree(bufd->b_carry);
+		bufd->b_carry = NULL;
+	}
+	bufd->b_off = 0;
+}
+EXPORT_SYMBOL_GPL(xnbufd_invalidate);
+
+/**
+ * @fn void xnbufd_unmap_kread(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_kread().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_kread(), to read data from a kernel area.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes read so far from the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ */
+ssize_t xnbufd_unmap_kread(struct xnbufd *bufd)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_kread);
+
+/**
+ * @fn void xnbufd_unmap_kwrite(struct xnbufd *bufd)
+ * @brief Finalize a buffer descriptor obtained from xnbufd_map_kwrite().
+ *
+ * This routine finalizes a buffer descriptor previously initialized
+ * by a call to xnbufd_map_kwrite(), to write data to a kernel area.
+ *
+ * @param bufd The address of the buffer descriptor to finalize.
+ *
+ * @return The number of bytes written so far to the memory area
+ * covered by @a ubufd.
+ *
+ * @coretags{task-unrestricted}
+ */
+ssize_t xnbufd_unmap_kwrite(struct xnbufd *bufd)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	bufd->b_ptr = (caddr_t)-1;
+#endif
+	return bufd->b_off;
+}
+EXPORT_SYMBOL_GPL(xnbufd_unmap_kwrite);
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/clock.c b/kernel/xenomai-v3.2.4/kernel/cobalt/clock.c
new file mode 100644
index 0000000..2b9efad
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/clock.c
@@ -0,0 +1,830 @@
+/*
+ * Copyright (C) 2006-2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/percpu.h>
+#include <linux/errno.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/uapi/time.h>
+#include <asm/xenomai/calibration.h>
+#include <trace/events/cobalt-core.h>
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_clock Clock services
+ *
+ * @{
+ */
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+
+static struct xnarch_u32frac bln_frac;
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem)
+{
+	unsigned long long q;
+	unsigned r;
+
+	q = xnarch_nodiv_ullimd(value, bln_frac.frac, bln_frac.integ);
+	r = value - q * 1000000000;
+	if (r >= 1000000000) {
+		++q;
+		r -= 1000000000;
+	}
+	*rem = r;
+	return q;
+}
+
+#else
+
+unsigned long long xnclock_divrem_billion(unsigned long long value,
+					  unsigned long *rem)
+{
+	return xnarch_ulldiv(value, 1000000000, rem);
+
+}
+
+#endif /* !XNARCH_HAVE_NODIV_LLIMD */
+
+EXPORT_SYMBOL_GPL(xnclock_divrem_billion);
+
+DEFINE_PRIVATE_XNLOCK(ratelimit_lock);
+
+int __xnclock_ratelimit(struct xnclock_ratelimit_state *rs, const char *func)
+{
+	spl_t s;
+	int ret;
+
+	if (!rs->interval)
+		return 1;
+
+	xnlock_get_irqsave(&ratelimit_lock, s);
+
+	if (!rs->begin)
+		rs->begin = xnclock_read_realtime(&nkclock);
+	if (xnclock_read_realtime(&nkclock) >= rs->begin + rs->interval) {
+		if (rs->missed)
+			printk(KERN_WARNING "%s: %d callbacks suppressed\n",
+			       func, rs->missed);
+		rs->begin   = 0;
+		rs->printed = 0;
+		rs->missed  = 0;
+	}
+	if (rs->burst && rs->burst > rs->printed) {
+		rs->printed++;
+		ret = 1;
+	} else {
+		rs->missed++;
+		ret = 0;
+	}
+	xnlock_put_irqrestore(&ratelimit_lock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__xnclock_ratelimit);
+
+void xnclock_core_local_shot(struct xnsched *sched)
+{
+	struct xntimerdata *tmd;
+	struct xntimer *timer;
+	xnsticks_t delay;
+	xntimerh_t *h;
+
+	/*
+	 * Do not reprogram locally when inside the tick handler -
+	 * will be done on exit anyway. Also exit if there is no
+	 * pending timer.
+	 */
+	if (sched->status & XNINTCK)
+		return;
+
+	/*
+	 * Assume the core clock device always has percpu semantics in
+	 * SMP.
+	 */
+	tmd = xnclock_this_timerdata(&nkclock);
+	h = xntimerq_head(&tmd->q);
+	if (h == NULL) {
+		sched->lflags |= XNIDLE;
+		return;
+	}
+
+	/*
+	 * Here we try to defer the host tick heading the timer queue,
+	 * so that it does not preempt a real-time activity uselessly,
+	 * in two cases:
+	 *
+	 * 1) a rescheduling is pending for the current CPU. We may
+	 * assume that a real-time thread is about to resume, so we
+	 * want to move the host tick out of the way until the host
+	 * kernel resumes, unless there is no other outstanding
+	 * timers.
+	 *
+	 * 2) the current thread is running in primary mode, in which
+	 * case we may also defer the host tick until the host kernel
+	 * resumes.
+	 *
+	 * The host tick deferral is cleared whenever Xenomai is about
+	 * to yield control to the host kernel (see ___xnsched_run()),
+	 * or a timer with an earlier timeout date is scheduled,
+	 * whichever comes first.
+	 */
+	sched->lflags &= ~(XNHDEFER|XNIDLE|XNTSTOP);
+	timer = container_of(h, struct xntimer, aplink);
+	if (unlikely(timer == &sched->htimer)) {
+		if (xnsched_resched_p(sched) ||
+		    !xnthread_test_state(sched->curr, XNROOT)) {
+			h = xntimerq_second(&tmd->q, h);
+			if (h) {
+				sched->lflags |= XNHDEFER;
+				timer = container_of(h, struct xntimer, aplink);
+			}
+		}
+	}
+
+	delay = xntimerh_date(&timer->aplink) - xnclock_core_read_raw();
+	if (delay < 0)
+		delay = 0;
+	else if (delay > ULONG_MAX)
+		delay = ULONG_MAX;
+
+	xntrace_tick((unsigned)delay);
+
+	pipeline_set_timer_shot(delay);
+}
+
+#ifdef CONFIG_SMP
+void xnclock_core_remote_shot(struct xnsched *sched)
+{
+	pipeline_send_timer_ipi(cpumask_of(xnsched_cpu(sched)));
+}
+#endif
+
+static void adjust_timer(struct xntimer *timer, xntimerq_t *q,
+			 xnsticks_t delta)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+	xnticks_t period, div;
+	xnsticks_t diff;
+
+	xntimerh_date(&timer->aplink) -= delta;
+
+	if (xntimer_periodic_p(timer) == 0)
+		goto enqueue;
+
+	timer->start_date -= delta;
+	period = xntimer_interval(timer);
+	diff = xnclock_ticks_to_ns(clock,
+		xnclock_read_raw(clock) - xntimer_expiry(timer));
+
+	if ((xnsticks_t)(diff - period) >= 0) {
+		/*
+		 * Timer should tick several times before now, instead
+		 * of calling timer->handler several times, we change
+		 * the timer date without changing its pexpect, so
+		 * that timer will tick only once and the lost ticks
+		 * will be counted as overruns.
+		 */
+		div = xnarch_div64(diff, period);
+		timer->periodic_ticks += div;
+		xntimer_update_date(timer);
+	} else if (delta < 0
+		   && (timer->status & XNTIMER_FIRED)
+		   && (xnsticks_t) (diff + period) <= 0) {
+		/*
+		 * Timer is periodic and NOT waiting for its first
+		 * shot, so we make it tick sooner than its original
+		 * date in order to avoid the case where by adjusting
+		 * time to a sooner date, real-time periodic timers do
+		 * not tick until the original date has passed.
+		 */
+		div = xnarch_div64(-diff, period);
+		timer->periodic_ticks -= div;
+		timer->pexpect_ticks -= div;
+		xntimer_update_date(timer);
+	}
+
+enqueue:
+	xntimer_enqueue(timer, q);
+}
+
+void xnclock_apply_offset(struct xnclock *clock, xnsticks_t delta_ns)
+{
+	struct xntimer *timer, *tmp;
+	struct list_head adjq;
+	struct xnsched *sched;
+	xnsticks_t delta;
+	xntimerq_it_t it;
+	unsigned int cpu;
+	xntimerh_t *h;
+	xntimerq_t *q;
+
+	atomic_only();
+
+	/*
+	 * The (real-time) epoch just changed for the clock. Since
+	 * timeout dates of timers are expressed as monotonic ticks
+	 * internally, we need to apply the new offset to the
+	 * monotonic clock to all outstanding timers based on the
+	 * affected clock.
+	 */
+	INIT_LIST_HEAD(&adjq);
+	delta = xnclock_ns_to_ticks(clock, delta_ns);
+
+	for_each_online_cpu(cpu) {
+		sched = xnsched_struct(cpu);
+		q = &xnclock_percpu_timerdata(clock, cpu)->q;
+
+		for (h = xntimerq_it_begin(q, &it); h;
+		     h = xntimerq_it_next(q, &it, h)) {
+			timer = container_of(h, struct xntimer, aplink);
+			if (timer->status & XNTIMER_REALTIME)
+				list_add_tail(&timer->adjlink, &adjq);
+		}
+
+		if (list_empty(&adjq))
+			continue;
+
+		list_for_each_entry_safe(timer, tmp, &adjq, adjlink) {
+			list_del(&timer->adjlink);
+			xntimer_dequeue(timer, q);
+			adjust_timer(timer, q, delta);
+		}
+
+		if (sched != xnsched_current())
+			xnclock_remote_shot(clock, sched);
+		else
+			xnclock_program_shot(clock, sched);
+	}
+}
+EXPORT_SYMBOL_GPL(xnclock_apply_offset);
+
+void xnclock_set_wallclock(xnticks_t epoch_ns)
+{
+	xnsticks_t old_offset_ns, offset_ns;
+	spl_t s;
+
+	/*
+	 * The epoch of CLOCK_REALTIME just changed. Since timeouts
+	 * are expressed as monotonic ticks, we need to apply the
+	 * wallclock-to-monotonic offset to all outstanding timers
+	 * based on this clock.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	old_offset_ns = nkclock.wallclock_offset;
+	offset_ns = (xnsticks_t)(epoch_ns - xnclock_core_read_monotonic());
+	nkclock.wallclock_offset = offset_ns;
+	nkvdso->wallclock_offset = offset_ns;
+	xnclock_apply_offset(&nkclock, offset_ns - old_offset_ns);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnclock_set_wallclock);
+
+xnticks_t xnclock_core_read_monotonic(void)
+{
+	return xnclock_core_ticks_to_ns(xnclock_core_read_raw());
+}
+EXPORT_SYMBOL_GPL(xnclock_core_read_monotonic);
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+static struct xnvfile_directory timerlist_vfroot;
+
+static struct xnvfile_snapshot_ops timerlist_ops;
+
+struct vfile_clock_priv {
+	struct xntimer *curr;
+};
+
+struct vfile_clock_data {
+	int cpu;
+	unsigned int scheduled;
+	unsigned int fired;
+	xnticks_t timeout;
+	xnticks_t interval;
+	unsigned long status;
+	char name[XNOBJECT_NAME_LEN];
+};
+
+static int timerlist_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_clock_priv *priv = xnvfile_iterator_priv(it);
+	struct xnclock *clock = xnvfile_priv(it->vfile);
+
+	if (list_empty(&clock->timerq))
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&clock->timerq, struct xntimer, next_stat);
+
+	return clock->nrtimers;
+}
+
+static int timerlist_next(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_clock_priv *priv = xnvfile_iterator_priv(it);
+	struct xnclock *clock = xnvfile_priv(it->vfile);
+	struct vfile_clock_data *p = data;
+	struct xntimer *timer;
+
+	if (priv->curr == NULL)
+		return 0;
+
+	timer = priv->curr;
+	if (list_is_last(&timer->next_stat, &clock->timerq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_entry(timer->next_stat.next,
+					struct xntimer, next_stat);
+
+	if (clock == &nkclock && xnstat_counter_get(&timer->scheduled) == 0)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(xntimer_sched(timer));
+	p->scheduled = xnstat_counter_get(&timer->scheduled);
+	p->fired = xnstat_counter_get(&timer->fired);
+	p->timeout = xntimer_get_timeout(timer);
+	p->interval = xntimer_interval(timer);
+	p->status = timer->status;
+	knamecpy(p->name, timer->name);
+
+	return 1;
+}
+
+static int timerlist_show(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_clock_data *p = data;
+	char timeout_buf[]  = "-         ";
+	char interval_buf[] = "-         ";
+	char hit_buf[32];
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-20s  %-10s  %-10s  %s\n",
+			       "CPU", "SCHED/SHOT", "TIMEOUT",
+			       "INTERVAL", "NAME");
+	else {
+		if (p->status & XNTIMER_RUNNING)
+			xntimer_format_time(p->timeout, timeout_buf,
+					    sizeof(timeout_buf));
+		if (p->status & XNTIMER_PERIODIC)
+			xntimer_format_time(p->interval, interval_buf,
+					    sizeof(interval_buf));
+		ksformat(hit_buf, sizeof(hit_buf), "%u/%u",
+			 p->scheduled, p->fired);
+		xnvfile_printf(it,
+			       "%-3u  %-20s  %-10s  %-10s  %s\n",
+			       p->cpu, hit_buf, timeout_buf,
+			       interval_buf, p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops timerlist_ops = {
+	.rewind = timerlist_rewind,
+	.next = timerlist_next,
+	.show = timerlist_show,
+};
+
+static void init_timerlist_proc(struct xnclock *clock)
+{
+	memset(&clock->timer_vfile, 0, sizeof(clock->timer_vfile));
+	clock->timer_vfile.privsz = sizeof(struct vfile_clock_priv);
+	clock->timer_vfile.datasz = sizeof(struct vfile_clock_data);
+	clock->timer_vfile.tag = &clock->timer_revtag;
+	clock->timer_vfile.ops = &timerlist_ops;
+
+	xnvfile_init_snapshot(clock->name, &clock->timer_vfile, &timerlist_vfroot);
+	xnvfile_priv(&clock->timer_vfile) = clock;
+}
+
+static void cleanup_timerlist_proc(struct xnclock *clock)
+{
+	xnvfile_destroy_snapshot(&clock->timer_vfile);
+}
+
+void init_timerlist_root(void)
+{
+	xnvfile_init_dir("timer", &timerlist_vfroot, &cobalt_vfroot);
+}
+
+void cleanup_timerlist_root(void)
+{
+	xnvfile_destroy_dir(&timerlist_vfroot);
+}
+
+#else  /* !CONFIG_XENO_OPT_STATS */
+
+static inline void init_timerlist_root(void) { }
+
+static inline void cleanup_timerlist_root(void) { }
+
+static inline void init_timerlist_proc(struct xnclock *clock) { }
+
+static inline void cleanup_timerlist_proc(struct xnclock *clock) { }
+
+#endif	/* !CONFIG_XENO_OPT_STATS */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static struct xnvfile_directory clock_vfroot;
+
+void print_core_clock_status(struct xnclock *clock,
+			     struct xnvfile_regular_iterator *it)
+{
+	const char *wd_status = "off";
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	wd_status = "on";
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+
+	xnvfile_printf(it, "%8s: timer=%s, clock=%s\n",
+		       "devices", pipeline_timer_name(), pipeline_clock_name());
+	xnvfile_printf(it, "%8s: %s\n", "watchdog", wd_status);
+}
+
+static int clock_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct xnclock *clock = xnvfile_priv(it->vfile);
+	xnticks_t now = xnclock_read_raw(clock);
+
+	if (clock->id >= 0)	/* External clock, print id. */
+		xnvfile_printf(it, "%7s: %d\n", "id", __COBALT_CLOCK_EXT(clock->id));
+
+	xnvfile_printf(it, "%7s: irq=%Ld kernel=%Ld user=%Ld\n", "gravity",
+		       xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, irq)),
+		       xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, kernel)),
+		       xnclock_ticks_to_ns(clock, xnclock_get_gravity(clock, user)));
+
+	xnclock_print_status(clock, it);
+
+	xnvfile_printf(it, "%7s: %Lu (%.4Lx %.4x)\n", "ticks",
+		       now, now >> 32, (u32)(now & -1U));
+
+	return 0;
+}
+
+static ssize_t clock_store(struct xnvfile_input *input)
+{
+	char buf[128], *args = buf, *p;
+	struct xnclock_gravity gravity;
+	struct xnvfile_regular *vfile;
+	unsigned long ns, ticks;
+	struct xnclock *clock;
+	ssize_t nbytes;
+	int ret;
+
+	nbytes = xnvfile_get_string(input, buf, sizeof(buf));
+	if (nbytes < 0)
+		return nbytes;
+
+	vfile = container_of(input->vfile, struct xnvfile_regular, entry);
+	clock = xnvfile_priv(vfile);
+	gravity = clock->gravity;
+
+	while ((p = strsep(&args, " \t:/,")) != NULL) {
+		if (*p == '\0')
+			continue;
+		ns = simple_strtol(p, &p, 10);
+		ticks = xnclock_ns_to_ticks(clock, ns);
+		switch (*p) {
+		case 'i':
+			gravity.irq = ticks;
+			break;
+		case 'k':
+			gravity.kernel = ticks;
+			break;
+		case 'u':
+		case '\0':
+			gravity.user = ticks;
+			break;
+		default:
+			return -EINVAL;
+		}
+		ret = xnclock_set_gravity(clock, &gravity);
+		if (ret)
+			return ret;
+	}
+
+	return nbytes;
+}
+
+static struct xnvfile_regular_ops clock_ops = {
+	.show = clock_show,
+	.store = clock_store,
+};
+
+static void init_clock_proc(struct xnclock *clock)
+{
+	memset(&clock->vfile, 0, sizeof(clock->vfile));
+	clock->vfile.ops = &clock_ops;
+	xnvfile_init_regular(clock->name, &clock->vfile, &clock_vfroot);
+	xnvfile_priv(&clock->vfile) = clock;
+	init_timerlist_proc(clock);
+}
+
+static void cleanup_clock_proc(struct xnclock *clock)
+{
+	cleanup_timerlist_proc(clock);
+	xnvfile_destroy_regular(&clock->vfile);
+}
+
+void xnclock_init_proc(void)
+{
+	xnvfile_init_dir("clock", &clock_vfroot, &cobalt_vfroot);
+	init_timerlist_root();
+}
+
+void xnclock_cleanup_proc(void)
+{
+	xnvfile_destroy_dir(&clock_vfroot);
+	cleanup_timerlist_root();
+}
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static inline void init_clock_proc(struct xnclock *clock) { }
+
+static inline void cleanup_clock_proc(struct xnclock *clock) { }
+
+#endif	/* !CONFIG_XENO_OPT_VFILE */
+
+/**
+ * @brief Register a Xenomai clock.
+ *
+ * This service installs a new clock which may be used to drive
+ * Xenomai timers.
+ *
+ * @param clock The new clock to register.
+ *
+ * @param affinity The set of CPUs we may expect the backing clock
+ * device to tick on. As a special case, passing a NULL affinity mask
+ * means that timer IRQs cannot be seen as percpu events, in which
+ * case all outstanding timers will be maintained into a single global
+ * queue instead of percpu timer queues.
+ *
+ * @coretags{secondary-only}
+ */
+int xnclock_register(struct xnclock *clock, const cpumask_t *affinity)
+{
+	struct xntimerdata *tmd;
+	int cpu;
+
+	secondary_mode_only();
+
+#ifdef CONFIG_SMP
+	/*
+	 * A CPU affinity set may be defined for each clock,
+	 * enumerating the CPUs which can receive ticks from the
+	 * backing clock device.  When given, this set must be a
+	 * subset of the real-time CPU set.
+	 */
+	if (affinity) {
+		cpumask_and(&clock->affinity, affinity, &xnsched_realtime_cpus);
+		if (cpumask_empty(&clock->affinity))
+			return -EINVAL;
+	} else	/* Device is global without particular IRQ affinity. */
+		cpumask_clear(&clock->affinity);
+#endif
+
+	/* Allocate the percpu timer queue slot. */
+	clock->timerdata = alloc_percpu(struct xntimerdata);
+	if (clock->timerdata == NULL)
+		return -ENOMEM;
+
+	/*
+	 * POLA: init all timer slots for the new clock, although some
+	 * of them might remain unused depending on the CPU affinity
+	 * of the event source(s). If the clock device is global
+	 * without any particular IRQ affinity, all timers will be
+	 * queued to CPU0.
+	 */
+	for_each_online_cpu(cpu) {
+		tmd = xnclock_percpu_timerdata(clock, cpu);
+		xntimerq_init(&tmd->q);
+	}
+
+#ifdef CONFIG_XENO_OPT_STATS
+	INIT_LIST_HEAD(&clock->timerq);
+#endif /* CONFIG_XENO_OPT_STATS */
+
+	init_clock_proc(clock);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnclock_register);
+
+/**
+ * @fn void xnclock_deregister(struct xnclock *clock)
+ * @brief Deregister a Xenomai clock.
+ *
+ * This service uninstalls a Xenomai clock previously registered with
+ * xnclock_register().
+ *
+ * This service may be called once all timers driven by @a clock have
+ * been stopped.
+ *
+ * @param clock The clock to deregister.
+ *
+ * @coretags{secondary-only}
+ */
+void xnclock_deregister(struct xnclock *clock)
+{
+	struct xntimerdata *tmd;
+	int cpu;
+
+	secondary_mode_only();
+
+	cleanup_clock_proc(clock);
+
+	for_each_online_cpu(cpu) {
+		tmd = xnclock_percpu_timerdata(clock, cpu);
+		XENO_BUG_ON(COBALT, !xntimerq_empty(&tmd->q));
+		xntimerq_destroy(&tmd->q);
+	}
+
+	free_percpu(clock->timerdata);
+}
+EXPORT_SYMBOL_GPL(xnclock_deregister);
+
+/**
+ * @fn void xnclock_tick(struct xnclock *clock)
+ * @brief Process a clock tick.
+ *
+ * This routine processes an incoming @a clock event, firing elapsed
+ * timers as appropriate.
+ *
+ * @param clock The clock for which a new event was received.
+ *
+ * @coretags{coreirq-only, atomic-entry}
+ *
+ * @note The current CPU must be part of the real-time affinity set
+ * unless the clock device has no particular IRQ affinity, otherwise
+ * weird things may happen.
+ */
+void xnclock_tick(struct xnclock *clock)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xntimer *timer;
+	xnsticks_t delta;
+	xntimerq_t *tmq;
+	xnticks_t now;
+	xntimerh_t *h;
+
+	atomic_only();
+
+#ifdef CONFIG_SMP
+	/*
+	 * Some external clock devices may be global without any
+	 * particular IRQ affinity, in which case the associated
+	 * timers will be queued to CPU0.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_EXTCLOCK) &&
+	    clock != &nkclock &&
+	    !cpumask_test_cpu(xnsched_cpu(sched), &clock->affinity))
+		tmq = &xnclock_percpu_timerdata(clock, 0)->q;
+	else
+#endif
+		tmq = &xnclock_this_timerdata(clock)->q;
+
+	/*
+	 * Optimisation: any local timer reprogramming triggered by
+	 * invoked timer handlers can wait until we leave the tick
+	 * handler. Use this status flag as hint to xntimer_start().
+	 */
+	sched->status |= XNINTCK;
+
+	now = xnclock_read_raw(clock);
+	while ((h = xntimerq_head(tmq)) != NULL) {
+		timer = container_of(h, struct xntimer, aplink);
+		delta = (xnsticks_t)(xntimerh_date(&timer->aplink) - now);
+		if (delta > 0)
+			break;
+
+		trace_cobalt_timer_expire(timer);
+
+		xntimer_dequeue(timer, tmq);
+		xntimer_account_fired(timer);
+
+		/*
+		 * By postponing the propagation of the low-priority
+		 * host tick to the interrupt epilogue (see
+		 * xnintr_irq_handler()), we save some I-cache, which
+		 * translates into precious microsecs on low-end hw.
+		 */
+		if (unlikely(timer == &sched->htimer)) {
+			sched->lflags |= XNHTICK;
+			sched->lflags &= ~XNHDEFER;
+			if (timer->status & XNTIMER_PERIODIC)
+				goto advance;
+			continue;
+		}
+
+		timer->handler(timer);
+		now = xnclock_read_raw(clock);
+		timer->status |= XNTIMER_FIRED;
+		/*
+		 * Only requeue periodic timers which have not been
+		 * requeued, stopped or killed.
+		 */
+		if ((timer->status &
+		     (XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_KILLED|XNTIMER_RUNNING)) !=
+		    (XNTIMER_PERIODIC|XNTIMER_DEQUEUED|XNTIMER_RUNNING))
+			continue;
+	advance:
+		do {
+			timer->periodic_ticks++;
+			xntimer_update_date(timer);
+		} while (xntimerh_date(&timer->aplink) < now);
+
+#ifdef CONFIG_SMP
+		/*
+		 * If the timer was migrated over its timeout handler,
+		 * xntimer_migrate() re-queued it already.
+		 */
+		if (unlikely(timer->sched != sched))
+			continue;
+#endif
+		xntimer_enqueue(timer, tmq);
+	}
+
+	sched->status &= ~XNINTCK;
+
+	xnclock_program_shot(clock, sched);
+}
+EXPORT_SYMBOL_GPL(xnclock_tick);
+
+static int set_core_clock_gravity(struct xnclock *clock,
+				  const struct xnclock_gravity *p)
+{
+	nkclock.gravity = *p;
+
+	return 0;
+}
+
+static void reset_core_clock_gravity(struct xnclock *clock)
+{
+	struct xnclock_gravity gravity;
+
+	xnarch_get_latencies(&gravity);
+	if (gravity.kernel == 0)
+		gravity.kernel = gravity.user;
+	set_core_clock_gravity(clock, &gravity);
+}
+
+struct xnclock nkclock = {
+	.name = "coreclk",
+	.resolution = 1,	/* nanosecond. */
+	.ops = {
+		.set_gravity = set_core_clock_gravity,
+		.reset_gravity = reset_core_clock_gravity,
+#ifdef CONFIG_XENO_OPT_VFILE
+		.print_status = print_core_clock_status,
+#endif
+	},
+	.id = -1,
+};
+EXPORT_SYMBOL_GPL(nkclock);
+
+void xnclock_cleanup(void)
+{
+	xnclock_deregister(&nkclock);
+}
+
+int __init xnclock_init()
+{
+	spl_t s;
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	xnarch_init_u32frac(&bln_frac, 1, 1000000000);
+#endif
+	pipeline_init_clock();
+	xnclock_reset_gravity(&nkclock);
+	xnlock_get_irqsave(&nklock, s);
+	nkclock.wallclock_offset = pipeline_read_wallclock() -
+		xnclock_core_read_monotonic();
+	xnlock_put_irqrestore(&nklock, s);
+	xnclock_register(&nkclock, &xnsched_realtime_cpus);
+
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/debug.c b/kernel/xenomai-v3.2.4/kernel/cobalt/debug.c
new file mode 100644
index 0000000..db0ecf0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/debug.c
@@ -0,0 +1,657 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/limits.h>
+#include <linux/ctype.h>
+#include <linux/jhash.h>
+#include <linux/mm.h>
+#include <linux/signal.h>
+#include <linux/vmalloc.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/uapi/signal.h>
+#include <asm/xenomai/syscall.h>
+#include "posix/process.h"
+#include "debug.h"
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_debug Debugging services
+ * @{
+ */
+struct xnvfile_directory cobalt_debug_vfroot;
+EXPORT_SYMBOL_GPL(cobalt_debug_vfroot);
+
+#ifdef CONFIG_XENO_OPT_DEBUG_TRACE_RELAX
+
+#define SYMBOL_HSLOTS	(1 << 8)
+
+struct hashed_symbol {
+	struct hashed_symbol *next;
+	char symbol[0];
+};
+
+static struct hashed_symbol *symbol_jhash[SYMBOL_HSLOTS];
+
+static struct xnheap memory_pool;
+
+/*
+ * This is a permanent storage for ASCII strings which comes handy to
+ * get a unique and constant reference to a symbol while preserving
+ * storage space. Hashed symbols have infinite lifetime and are never
+ * flushed.
+ */
+DEFINE_PRIVATE_XNLOCK(symbol_lock);
+
+static const char *hash_symbol(const char *symbol)
+{
+	struct hashed_symbol *p, **h;
+	const char *str;
+	size_t len;
+	u32 hash;
+	spl_t s;
+
+	len = strlen(symbol);
+	hash = jhash(symbol, len, 0);
+
+	xnlock_get_irqsave(&symbol_lock, s);
+
+	h = &symbol_jhash[hash & (SYMBOL_HSLOTS - 1)];
+	p = *h;
+	while (p &&
+	       (*p->symbol != *symbol ||
+		strcmp(p->symbol + 1, symbol + 1)))
+	       p = p->next;
+
+	if (p)
+		goto done;
+
+	p = xnheap_alloc(&memory_pool, sizeof(*p) + len + 1);
+	if (p == NULL) {
+		str = NULL;
+		goto out;
+	}
+
+	strcpy(p->symbol, symbol);
+	p->next = *h;
+	*h = p;
+done:
+	str = p->symbol;
+out:
+	xnlock_put_irqrestore(&symbol_lock, s);
+
+	return str;
+}
+
+/*
+ * We define a static limit (RELAX_SPOTNR) for spot records to limit
+ * the memory consumption (we pull record memory from the system
+ * heap). The current value should be reasonable enough unless the
+ * application is extremely unsane, given that we only keep unique
+ * spots. Said differently, if the application has more than
+ * RELAX_SPOTNR distinct code locations doing spurious relaxes, then
+ * the first issue to address is likely PEBKAC.
+ */
+#define RELAX_SPOTNR	128
+#define RELAX_HSLOTS	(1 << 8)
+
+struct relax_record {
+	/* Number of hits for this location */
+	u32 hits;
+	struct relax_spot {
+		/* Faulty thread name. */
+		char thread[XNOBJECT_NAME_LEN];
+		/* call stack the relax originates from. */
+		int depth;
+		struct backtrace {
+			unsigned long pc;
+			const char *mapname;
+		} backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+		/* Program hash value of the caller. */
+		u32 proghash;
+		/* Pid of the caller. */
+		pid_t pid;
+		/* Reason for relaxing. */
+		int reason;
+	} spot;
+	struct relax_record *r_next;
+	struct relax_record *h_next;
+	const char *exe_path;
+};
+
+static struct relax_record *relax_jhash[RELAX_HSLOTS];
+
+static struct relax_record *relax_record_list;
+
+static int relax_overall, relax_queued;
+
+DEFINE_PRIVATE_XNLOCK(relax_lock);
+
+/*
+ * The motivation to centralize tracing information about relaxes
+ * directly into kernel space is fourfold:
+ *
+ * - this allows to gather all the trace data into a single location
+ * and keep it safe there, with no external log file involved.
+ *
+ * - enabling the tracing does not impose any requirement on the
+ * application (aside of being compiled with debug symbols for best
+ * interpreting that information). We only need a kernel config switch
+ * for this (i.e. CONFIG_XENO_OPT_DEBUG_TRACE_RELAX).
+ *
+ * - the data is collected and can be made available exactly the same
+ * way regardless of the application emitting the relax requests, or
+ * whether it is still alive when the trace data are displayed.
+ *
+ * - the kernel is able to provide accurate and detailed trace
+ * information, such as the relative offset of instructions causing
+ * relax requests within dynamic shared objects, without having to
+ * guess it roughly from /proc/pid/maps, or relying on ldd's
+ * --function-relocs feature, which both require to run on the target
+ * system to get the needed information. Instead, we allow a build
+ * host to use a cross-compilation toolchain later to extract the
+ * source location, from the raw data the kernel has provided on the
+ * target system.
+ *
+ * However, collecting the call frames within the application to
+ * determine the full context of a relax spot is not something we can
+ * do purely from kernel space, notably because it depends on build
+ * options we just don't know about (e.g. frame pointers availability
+ * for the app, or other nitty-gritty details depending on the
+ * toolchain). To solve this, we ask the application to send us a
+ * complete backtrace taken from the context of a specific signal
+ * handler, which we know is stacked over the relax spot. That
+ * information is then stored by the kernel after some
+ * post-processing, along with other data identifying the caller, and
+ * made available through the /proc/xenomai/debug/relax vfile.
+ *
+ * Implementation-wise, xndebug_notify_relax and xndebug_trace_relax
+ * routines are paired: first, xndebug_notify_relax sends a SIGSHADOW
+ * request to userland when a relax spot is detected from
+ * xnthread_relax, which should then trigger a call back to
+ * xndebug_trace_relax with the complete backtrace information, as
+ * seen from userland (via the internal sc_cobalt_backtrace
+ * syscall). All this runs on behalf of the relaxing thread, so we can
+ * make a number of convenient assumptions (such as being able to scan
+ * the current vma list to get detailed information about the
+ * executable mappings that could be involved).
+ */
+
+void xndebug_notify_relax(struct xnthread *thread, int reason)
+{
+	xnthread_signal(thread, SIGSHADOW,
+			  sigshadow_int(SIGSHADOW_ACTION_BACKTRACE, reason));
+}
+
+void xndebug_trace_relax(int nr, unsigned long *backtrace,
+			 int reason)
+{
+	struct relax_record *p, **h;
+	struct vm_area_struct *vma;
+	struct xnthread *thread;
+	struct relax_spot spot;
+	struct mm_struct *mm;
+	struct file *file;
+	unsigned long pc;
+	char *mapname;
+	int n, depth;
+	char *tmp;
+	u32 hash;
+	spl_t s;
+
+	thread = xnthread_current();
+	if (thread == NULL)
+		return;		/* Can't be, right? What a mess. */
+
+	/*
+	 * We compute PC values relative to the base of the shared
+	 * executable mappings we find in the backtrace, which makes
+	 * it possible for the slackspot utility to match the
+	 * corresponding source code locations from unrelocated file
+	 * offsets.
+	 */
+
+	tmp = (char *)__get_free_page(GFP_KERNEL);
+	if (tmp == NULL)
+		/*
+		 * The situation looks really bad, but we can't do
+		 * anything about it. Just bail out.
+		 */
+		return;
+
+	memset(&spot, 0, sizeof(spot));
+	mm = get_task_mm(current);
+	mmap_read_lock(mm);
+
+	for (n = 0, depth = 0; n < nr; n++) {
+		pc = backtrace[n];
+
+		vma = find_vma(mm, pc);
+		if (vma == NULL)
+			continue;
+
+		/*
+		 * Interpreter-generated executable mappings are not
+		 * file-backed. Use this to determine when $pc should be fixed
+		 * up by subtracting the mapping base address in the DSO case.
+		 */
+		file = vma->vm_file;
+		if (file != NULL)
+			pc -= vma->vm_start;
+
+		spot.backtrace[depth].pc = pc;
+
+		/*
+		 * Even in case we can't fetch the map name, we still
+		 * record the PC value, which may still give some hint
+		 * downstream.
+		 */
+		if (file == NULL)
+			goto next_frame;
+
+		mapname = d_path(&file->f_path, tmp, PAGE_SIZE);
+		if (IS_ERR(mapname))
+			goto next_frame;
+
+		spot.backtrace[depth].mapname = hash_symbol(mapname);
+	next_frame:
+		depth++;
+	}
+
+	mmap_read_unlock(mm);
+	mmput(mm);
+	free_page((unsigned long)tmp);
+
+	/*
+	 * Most of the time we will be sent duplicates, since the odds
+	 * of seeing the same thread running the same code doing the
+	 * same mistake all over again are high. So we probe the hash
+	 * table for an identical spot first, before going for a
+	 * complete record allocation from the system heap if no match
+	 * was found. Otherwise, we just take the fast exit path.
+	 */
+	spot.depth = depth;
+	spot.proghash = thread->proghash;
+	spot.pid = xnthread_host_pid(thread);
+	spot.reason = reason;
+	strcpy(spot.thread, thread->name);
+	hash = jhash2((u32 *)&spot, sizeof(spot) / sizeof(u32), 0);
+
+	xnlock_get_irqsave(&relax_lock, s);
+
+	h = &relax_jhash[hash & (RELAX_HSLOTS - 1)];
+	p = *h;
+	while (p &&
+	       /* Try quick guesses first, then memcmp */
+	       (p->spot.depth != spot.depth ||
+		p->spot.pid != spot.pid ||
+		memcmp(&p->spot, &spot, sizeof(spot))))
+	       p = p->h_next;
+
+	if (p) {
+		p->hits++;
+		goto out;	/* Spot already recorded. */
+	}
+
+	if (relax_queued >= RELAX_SPOTNR)
+		goto out;	/* No more space -- ignore. */
+	/*
+	 * We can only compete with other shadows which have just
+	 * switched to secondary mode like us. So holding the
+	 * relax_lock a bit more without disabling interrupts is not
+	 * an issue. This allows us to postpone the record memory
+	 * allocation while probing and updating the hash table in a
+	 * single move.
+	 */
+	p = xnheap_alloc(&memory_pool, sizeof(*p));
+	if (p == NULL)
+		goto out;      /* Something is about to go wrong... */
+
+	memcpy(&p->spot, &spot, sizeof(p->spot));
+	p->exe_path = hash_symbol(thread->exe_path);
+	p->hits = 1;
+	p->h_next = *h;
+	*h = p;
+	p->r_next = relax_record_list;
+	relax_record_list = p;
+	relax_queued++;
+out:
+	relax_overall++;
+
+	xnlock_put_irqrestore(&relax_lock, s);
+}
+
+static DEFINE_VFILE_HOSTLOCK(relax_mutex);
+
+struct relax_vfile_priv {
+	int queued;
+	int overall;
+	int ncurr;
+	struct relax_record *head;
+	struct relax_record *curr;
+};
+
+static void *relax_vfile_begin(struct xnvfile_regular_iterator *it)
+{
+	struct relax_vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct relax_record *p;
+	spl_t s;
+	int n;
+
+	/*
+	 * Snapshot the counters under lock, to make sure they remain
+	 * mutually consistent despite we dump the record list in a
+	 * lock-less manner. Additionally, the vfile layer already
+	 * holds the relax_mutex lock for us, so that we can't race
+	 * with ->store().
+	 */
+	xnlock_get_irqsave(&relax_lock, s);
+
+	if (relax_queued == 0 || it->pos > relax_queued) {
+		xnlock_put_irqrestore(&relax_lock, s);
+		return NULL;
+	}
+	priv->overall = relax_overall;
+	priv->queued = relax_queued;
+	priv->head = relax_record_list;
+
+	xnlock_put_irqrestore(&relax_lock, s);
+
+	if (it->pos == 0) {
+		priv->curr = NULL;
+		priv->ncurr = -1;
+		return VFILE_SEQ_START;
+	}
+
+	for (n = 1, p = priv->head; n < it->pos; n++)
+		p = p->r_next;
+
+	priv->curr = p;
+	priv->ncurr = n;
+
+	return p;
+}
+
+static void *relax_vfile_next(struct xnvfile_regular_iterator *it)
+{
+	struct relax_vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct relax_record *p;
+	int n;
+
+	if (it->pos > priv->queued)
+		return NULL;
+
+	if (it->pos == priv->ncurr + 1)
+		p = priv->curr->r_next;
+	else {
+		for (n = 1, p = priv->head; n < it->pos; n++)
+			p = p->r_next;
+	}
+
+	priv->curr = p;
+	priv->ncurr = it->pos;
+
+	return p;
+}
+
+static const char *reason_str[] = {
+    [SIGDEBUG_UNDEFINED] = "undefined",
+    [SIGDEBUG_MIGRATE_SIGNAL] = "signal",
+    [SIGDEBUG_MIGRATE_SYSCALL] = "syscall",
+    [SIGDEBUG_MIGRATE_FAULT] = "fault",
+    [SIGDEBUG_MIGRATE_PRIOINV] = "pi-error",
+    [SIGDEBUG_NOMLOCK] = "mlock-check",
+    [SIGDEBUG_WATCHDOG] = "runaway-break",
+    [SIGDEBUG_RESCNT_IMBALANCE] = "resource-count-imbalance",
+    [SIGDEBUG_MUTEX_SLEEP] = "sleep-holding-mutex",
+    [SIGDEBUG_LOCK_BREAK] = "scheduler-lock-break",
+};
+
+static int relax_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct relax_vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct relax_record *p = data;
+	int n;
+
+	/*
+	 * No need to grab any lock to read a record from a previously
+	 * validated index: the data must be there and won't be
+	 * touched anymore.
+	 */
+	if (p == NULL) {
+		xnvfile_printf(it, "%d\n", priv->overall);
+		return 0;
+	}
+
+	xnvfile_printf(it, "%s\n", p->exe_path ?: "?");
+	xnvfile_printf(it, "%d %d %s %s\n", p->spot.pid, p->hits,
+		       reason_str[p->spot.reason], p->spot.thread);
+
+	for (n = 0; n < p->spot.depth; n++)
+		xnvfile_printf(it, "0x%lx %s\n",
+			       p->spot.backtrace[n].pc,
+			       p->spot.backtrace[n].mapname ?: "?");
+
+	xnvfile_printf(it, ".\n");
+
+	return 0;
+}
+
+static ssize_t relax_vfile_store(struct xnvfile_input *input)
+{
+	struct relax_record *p, *np;
+	spl_t s;
+
+	/*
+	 * Flush out all records. Races with ->show() are prevented
+	 * using the relax_mutex lock. The vfile layer takes care of
+	 * this internally.
+	 */
+	xnlock_get_irqsave(&relax_lock, s);
+	p = relax_record_list;
+	relax_record_list = NULL;
+	relax_overall = 0;
+	relax_queued = 0;
+	memset(relax_jhash, 0, sizeof(relax_jhash));
+	xnlock_put_irqrestore(&relax_lock, s);
+
+	while (p) {
+		np = p->r_next;
+		xnheap_free(&memory_pool, p);
+		p = np;
+	}
+
+	return input->size;
+}
+
+static struct xnvfile_regular_ops relax_vfile_ops = {
+	.begin = relax_vfile_begin,
+	.next = relax_vfile_next,
+	.show = relax_vfile_show,
+	.store = relax_vfile_store,
+};
+
+static struct xnvfile_regular relax_vfile = {
+	.privsz = sizeof(struct relax_vfile_priv),
+	.ops = &relax_vfile_ops,
+	.entry = { .lockops = &relax_mutex.ops },
+};
+
+static inline int init_trace_relax(void)
+{
+	u32 size = CONFIG_XENO_OPT_DEBUG_TRACE_LOGSZ * 1024;
+	void *p;
+	int ret;
+
+	p = vmalloc(size);
+	if (p == NULL)
+		return -ENOMEM;
+
+	ret = xnheap_init(&memory_pool, p, size);
+	if (ret)
+		return ret;
+
+	xnheap_set_name(&memory_pool, "debug log");
+
+	ret = xnvfile_init_regular("relax", &relax_vfile, &cobalt_debug_vfroot);
+	if (ret) {
+		xnheap_destroy(&memory_pool);
+		vfree(p);
+	}
+
+	return ret;
+}
+
+static inline void cleanup_trace_relax(void)
+{
+	void *p;
+
+	xnvfile_destroy_regular(&relax_vfile);
+	p = xnheap_get_membase(&memory_pool);
+	xnheap_destroy(&memory_pool);
+	vfree(p);
+}
+
+#else /* !CONFIG_XENO_OPT_DEBUG_TRACE_RELAX */
+
+static inline int init_trace_relax(void)
+{
+	return 0;
+}
+
+static inline void cleanup_trace_relax(void)
+{
+}
+
+static inline void init_thread_relax_trace(struct xnthread *thread)
+{
+}
+
+#endif /* !XENO_OPT_DEBUG_TRACE_RELAX */
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+void xnlock_dbg_prepare_acquire(unsigned long long *start)
+{
+	*start = xnclock_read_raw(&nkclock);
+}
+EXPORT_SYMBOL_GPL(xnlock_dbg_prepare_acquire);
+
+void xnlock_dbg_acquired(struct xnlock *lock, int cpu, unsigned long long *start,
+			 const char *file, int line, const char *function)
+{
+	lock->lock_date = *start;
+	lock->spin_time = xnclock_read_raw(&nkclock) - *start;
+	lock->file = file;
+	lock->function = function;
+	lock->line = line;
+	lock->cpu = cpu;
+}
+EXPORT_SYMBOL_GPL(xnlock_dbg_acquired);
+
+int xnlock_dbg_release(struct xnlock *lock,
+		       const char *file, int line, const char *function)
+{
+	unsigned long long lock_time;
+	struct xnlockinfo *stats;
+	int cpu;
+
+	lock_time = xnclock_read_raw(&nkclock) - lock->lock_date;
+	cpu = raw_smp_processor_id();
+	stats = &per_cpu(xnlock_stats, cpu);
+
+	if (lock->file == NULL) {
+		lock->file = "??";
+		lock->line = 0;
+		lock->function = "invalid";
+	}
+
+	if (unlikely(lock->owner != cpu)) {
+		pipeline_prepare_panic();
+		printk(XENO_ERR "lock %p already unlocked on CPU #%d\n"
+				"          last owner = %s:%u (%s(), CPU #%d)\n",
+		       lock, cpu, lock->file, lock->line, lock->function,
+		       lock->cpu);
+		dump_stack();
+		return 1;
+	}
+
+	/* File that we released it. */
+	lock->cpu = -lock->cpu;
+	lock->file = file;
+	lock->line = line;
+	lock->function = function;
+
+	if (lock_time > stats->lock_time) {
+		stats->lock_time = lock_time;
+		stats->spin_time = lock->spin_time;
+		stats->file = lock->file;
+		stats->function = lock->function;
+		stats->line = lock->line;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnlock_dbg_release);
+
+#endif /* CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+void xndebug_shadow_init(struct xnthread *thread)
+{
+	struct cobalt_ppd *sys_ppd;
+	size_t len;
+
+	sys_ppd = cobalt_ppd_get(0);
+	/*
+	 * The caller is current, so we know for sure that sys_ppd
+	 * will still be valid after we dropped the lock.
+	 *
+	 * NOTE: Kernel shadows all share the system global ppd
+	 * descriptor with no refcounting.
+	 */
+	thread->exe_path = sys_ppd->exe_path ?: "(unknown)";
+	/*
+	 * The program hash value is a unique token debug features may
+	 * use to identify all threads which belong to a given
+	 * executable file. Using this value for quick probes is often
+	 * handier and more efficient than testing the whole exe_path.
+	 */
+	len = strlen(thread->exe_path);
+	thread->proghash = jhash(thread->exe_path, len, 0);
+}
+
+int xndebug_init(void)
+{
+	int ret;
+
+	ret = init_trace_relax();
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+void xndebug_cleanup(void)
+{
+	cleanup_trace_relax();
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/debug.h b/kernel/xenomai-v3.2.4/kernel/cobalt/debug.h
new file mode 100644
index 0000000..24dc354
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/debug.h
@@ -0,0 +1,72 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+
+#ifndef _KERNEL_COBALT_DEBUG_H
+#define _KERNEL_COBALT_DEBUG_H
+
+#include <cobalt/kernel/assert.h>
+
+struct xnthread;
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+
+int xndebug_init(void);
+
+void xndebug_cleanup(void);
+
+void xndebug_shadow_init(struct xnthread *thread);
+
+extern struct xnvfile_directory cobalt_debug_vfroot;
+
+#else  /* !XENO_OPT_DEBUG */
+
+static inline int xndebug_init(void)
+{
+	return 0;
+}
+
+static inline void xndebug_cleanup(void)
+{
+}
+
+static inline void xndebug_shadow_init(struct xnthread *thread)
+{
+}
+
+#endif  /* !XENO_OPT_DEBUG */
+
+#ifdef CONFIG_XENO_OPT_DEBUG_TRACE_RELAX
+void xndebug_notify_relax(struct xnthread *thread,
+			  int reason);
+void xndebug_trace_relax(int nr, unsigned long *backtrace,
+			 int reason);
+#else
+static inline
+void xndebug_notify_relax(struct xnthread *thread, int reason)
+{
+}
+static inline
+void xndebug_trace_relax(int nr, unsigned long *backtrace,
+			 int reason)
+{
+	/* Simply ignore. */
+}
+#endif
+
+#endif /* !_KERNEL_COBALT_DEBUG_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile
new file mode 100644
index 0000000..f49d3a0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(srctree)/kernel
+
+obj-y +=	pipeline.o
+
+pipeline-y :=	init.o kevents.o sched.o tick.o syscall.o intr.o
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c
new file mode 100644
index 0000000..bc891b4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/init.c
@@ -0,0 +1,52 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#include <linux/init.h>
+#include <pipeline/machine.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/assert.h>
+
+int __init pipeline_init(void)
+{
+	int ret;
+
+	if (cobalt_machine.init) {
+		ret = cobalt_machine.init();
+		if (ret)
+			return ret;
+	}
+
+	/* Enable the Xenomai out-of-band stage */
+	enable_oob_stage("Xenomai");
+
+	ret = xnclock_init();
+	if (ret)
+		goto fail_clock;
+
+	return 0;
+
+fail_clock:
+	if (cobalt_machine.cleanup)
+		cobalt_machine.cleanup();
+
+	return ret;
+}
+
+int __init pipeline_late_init(void)
+{
+	if (cobalt_machine.late_init)
+		return cobalt_machine.late_init();
+
+	return 0;
+}
+
+__init void pipeline_cleanup(void)
+{
+	/* Disable the Xenomai stage */
+	disable_oob_stage();
+
+	xnclock_cleanup();
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c
new file mode 100644
index 0000000..88116c7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/intr.c
@@ -0,0 +1,146 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq_pipeline.h>
+#include <linux/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/intr.h>
+
+void xnintr_host_tick(struct xnsched *sched) /* hard irqs off */
+{
+	sched->lflags &= ~XNHTICK;
+	tick_notify_proxy();
+}
+
+/*
+ * Low-level core clock irq handler. This one forwards ticks from the
+ * Xenomai platform timer to nkclock exclusively.
+ */
+void xnintr_core_clock_handler(void)
+{
+	struct xnsched *sched;
+
+	xnlock_get(&nklock);
+	xnclock_tick(&nkclock);
+	xnlock_put(&nklock);
+
+	/*
+	 * If the core clock interrupt preempted a real-time thread,
+	 * any transition to the root thread has already triggered a
+	 * host tick propagation from xnsched_run(), so at this point,
+	 * we only need to propagate the host tick in case the
+	 * interrupt preempted the root thread.
+	 */
+	sched = xnsched_current();
+	if ((sched->lflags & XNHTICK) &&
+	    xnthread_test_state(sched->curr, XNROOT))
+		xnintr_host_tick(sched);
+}
+
+static irqreturn_t xnintr_irq_handler(int irq, void *dev_id)
+{
+	struct xnintr *intr = dev_id;
+	int ret;
+
+	ret = intr->isr(intr);
+	XENO_WARN_ON_ONCE(USER, (ret & XN_IRQ_STATMASK) == 0);
+
+	if (ret & XN_IRQ_DISABLE)
+		disable_irq(irq);
+	else if (ret & XN_IRQ_PROPAGATE)
+		irq_post_inband(irq);
+
+	return ret & XN_IRQ_NONE ? IRQ_NONE : IRQ_HANDLED;
+}
+
+int xnintr_init(struct xnintr *intr, const char *name,
+		unsigned int irq, xnisr_t isr, xniack_t iack,
+		int flags)
+{
+	secondary_mode_only();
+
+	intr->irq = irq;
+	intr->isr = isr;
+	intr->iack = NULL;	/* unused */
+	intr->cookie = NULL;
+	intr->name = name ? : "<unknown>";
+	intr->flags = flags;
+	intr->status = 0;
+	intr->unhandled = 0;	/* unused */
+	raw_spin_lock_init(&intr->lock); /* unused */
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnintr_init);
+
+void xnintr_destroy(struct xnintr *intr)
+{
+	secondary_mode_only();
+	xnintr_detach(intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_destroy);
+
+int xnintr_attach(struct xnintr *intr, void *cookie, const cpumask_t *cpumask)
+{
+	cpumask_t tmp_mask, *effective_mask;
+	int ret;
+
+	secondary_mode_only();
+
+	intr->cookie = cookie;
+
+	if (!cpumask) {
+		effective_mask = &xnsched_realtime_cpus;
+	} else {
+		effective_mask = &tmp_mask;
+		cpumask_and(effective_mask, &xnsched_realtime_cpus, cpumask);
+		if (cpumask_empty(effective_mask))
+			return -EINVAL;
+	}
+#ifdef CONFIG_SMP
+	ret = irq_set_affinity_hint(intr->irq, effective_mask);
+	if (ret)
+		return ret;
+#endif
+
+	return request_irq(intr->irq, xnintr_irq_handler, IRQF_OOB,
+			intr->name, intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_attach);
+
+void xnintr_detach(struct xnintr *intr)
+{
+	secondary_mode_only();
+#ifdef CONFIG_SMP
+	irq_set_affinity_hint(intr->irq, NULL);
+#endif
+	free_irq(intr->irq, intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_detach);
+
+void xnintr_enable(struct xnintr *intr)
+{
+}
+EXPORT_SYMBOL_GPL(xnintr_enable);
+
+void xnintr_disable(struct xnintr *intr)
+{
+}
+EXPORT_SYMBOL_GPL(xnintr_disable);
+
+int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask)
+{
+	cpumask_t effective_mask;
+
+	secondary_mode_only();
+
+	cpumask_and(&effective_mask, &xnsched_realtime_cpus, cpumask);
+	if (cpumask_empty(&effective_mask))
+		return -EINVAL;
+
+	return irq_set_affinity_hint(intr->irq, &effective_mask);
+}
+EXPORT_SYMBOL_GPL(xnintr_affinity);
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c
new file mode 100644
index 0000000..4da4f51
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/kevents.c
@@ -0,0 +1,351 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org>
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org>
+ */
+
+#include <linux/ptrace.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/init.h>
+#include <rtdm/driver.h>
+#include <trace/events/cobalt-core.h>
+#include "../posix/process.h"
+#include "../posix/thread.h"
+#include "../posix/memory.h"
+
+void arch_inband_task_init(struct task_struct *tsk)
+{
+	struct cobalt_threadinfo *p = dovetail_task_state(tsk);
+
+	p->thread = NULL;
+	p->process = NULL;
+}
+
+void handle_oob_trap_entry(unsigned int trapnr, struct pt_regs *regs)
+{
+	struct xnthread *thread;
+	struct xnsched *sched;
+	spl_t s;
+
+	sched = xnsched_current();
+	thread = sched->curr;
+
+	/*
+	 * Enable back tracing.
+	 */
+	trace_cobalt_thread_fault(xnarch_fault_pc(regs), trapnr);
+
+	if (xnthread_test_state(thread, XNROOT))
+		return;
+
+	if (xnarch_fault_bp_p(trapnr) && user_mode(regs)) {
+		XENO_WARN_ON(CORE, xnthread_test_state(thread, XNRELAX));
+		xnlock_get_irqsave(&nklock, s);
+		xnthread_set_info(thread, XNCONTHI);
+		dovetail_request_ucall(current);
+		cobalt_stop_debugged_process(thread);
+		xnlock_put_irqrestore(&nklock, s);
+		xnsched_run();
+	}
+
+	/*
+	 * If we experienced a trap on behalf of a shadow thread
+	 * running in primary mode, move it to the Linux domain,
+	 * leaving the kernel process the exception.
+	 */
+#if defined(CONFIG_XENO_OPT_DEBUG_COBALT) || defined(CONFIG_XENO_OPT_DEBUG_USER)
+	if (!user_mode(regs)) {
+		printk(XENO_WARNING
+		       "switching %s to secondary mode after exception #%u in "
+		       "kernel-space at 0x%lx (pid %d)\n", thread->name,
+		       trapnr,
+		       xnarch_fault_pc(regs),
+		       xnthread_host_pid(thread));
+	} else if (xnarch_fault_notify(trapnr)) /* Don't report debug traps */
+		printk(XENO_WARNING
+		       "switching %s to secondary mode after exception #%u from "
+		       "user-space at 0x%lx (pid %d)\n", thread->name,
+		       trapnr,
+		       xnarch_fault_pc(regs),
+		       xnthread_host_pid(thread));
+#endif
+
+	if (xnarch_fault_pf_p(trapnr))
+		/*
+		 * The page fault counter is not SMP-safe, but it's a
+		 * simple indicator that something went wrong wrt
+		 * memory locking anyway.
+		 */
+		xnstat_counter_inc(&thread->stat.pf);
+
+	xnthread_relax(xnarch_fault_notify(trapnr), SIGDEBUG_MIGRATE_FAULT);
+}
+
+static inline int handle_setaffinity_event(struct dovetail_migration_data *d)
+{
+	return cobalt_handle_setaffinity_event(d->task);
+}
+
+static inline int handle_taskexit_event(struct task_struct *p)
+{
+	return cobalt_handle_taskexit_event(p);
+}
+
+static inline int handle_user_return(struct task_struct *task)
+{
+	return cobalt_handle_user_return(task);
+}
+
+void handle_oob_mayday(struct pt_regs *regs)
+{
+	XENO_BUG_ON(COBALT, !xnthread_test_state(xnthread_current(), XNUSER));
+
+	xnthread_relax(0, 0);
+}
+
+static int handle_sigwake_event(struct task_struct *p)
+{
+	struct xnthread *thread;
+	sigset_t pending;
+	spl_t s;
+
+	thread = xnthread_from_task(p);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * CAUTION: __TASK_TRACED is not set in p->state yet. This
+	 * state bit will be set right after we return, when the task
+	 * is woken up.
+	 */
+	if ((p->ptrace & PT_PTRACED) && !xnthread_test_state(thread, XNSSTEP)) {
+		/* We already own the siglock. */
+		sigorsets(&pending,
+			  &p->pending.signal,
+			  &p->signal->shared_pending.signal);
+
+		if (sigismember(&pending, SIGTRAP) ||
+		    sigismember(&pending, SIGSTOP)
+		    || sigismember(&pending, SIGINT))
+			cobalt_register_debugged_thread(thread);
+	}
+
+	if (xnthread_test_state(thread, XNRELAX))
+		goto out;
+
+	/*
+	 * Allow a thread stopped for debugging to resume briefly in order to
+	 * migrate to secondary mode. xnthread_relax will reapply XNDBGSTOP.
+	 */
+	if (xnthread_test_state(thread, XNDBGSTOP))
+		xnthread_resume(thread, XNDBGSTOP);
+
+	__xnthread_kick(thread);
+out:
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return KEVENT_PROPAGATE;
+}
+
+static inline int handle_cleanup_event(struct mm_struct *mm)
+{
+	return cobalt_handle_cleanup_event(mm);
+}
+
+void pipeline_cleanup_process(void)
+{
+	dovetail_stop_altsched();
+}
+
+int handle_ptrace_resume(struct task_struct *tracee)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	thread = xnthread_from_task(tracee);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	if (xnthread_test_state(thread, XNSSTEP)) {
+		xnlock_get_irqsave(&nklock, s);
+
+		xnthread_resume(thread, XNDBGSTOP);
+		cobalt_unregister_debugged_thread(thread);
+
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return KEVENT_PROPAGATE;
+}
+
+static void handle_ptrace_cont(void)
+{
+	struct xnthread *curr = xnthread_current();
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(curr, XNSSTEP)) {
+		if (!xnthread_test_info(curr, XNCONTHI))
+			cobalt_unregister_debugged_thread(curr);
+
+		xnthread_set_localinfo(curr, XNHICCUP);
+
+		dovetail_request_ucall(current);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+void handle_inband_event(enum inband_event_type event, void *data)
+{
+	switch (event) {
+	case INBAND_TASK_SIGNAL:
+		handle_sigwake_event(data);
+		break;
+	case INBAND_TASK_MIGRATION:
+		handle_setaffinity_event(data);
+		break;
+	case INBAND_TASK_EXIT:
+		if (xnthread_current())
+			handle_taskexit_event(current);
+		break;
+	case INBAND_TASK_RETUSER:
+		handle_user_return(data);
+		break;
+	case INBAND_TASK_PTSTEP:
+		handle_ptrace_resume(data);
+		break;
+	case INBAND_TASK_PTCONT:
+		handle_ptrace_cont();
+		break;
+	case INBAND_TASK_PTSTOP:
+		break;
+	case INBAND_PROCESS_CLEANUP:
+		handle_cleanup_event(data);
+		break;
+	}
+}
+
+/*
+ * Called by the in-band kernel when the CLOCK_REALTIME epoch changes.
+ */
+void inband_clock_was_set(void)
+{
+	if (realtime_core_enabled())
+		xnclock_set_wallclock(ktime_get_real_fast_ns());
+}
+
+#ifdef CONFIG_MMU
+
+int pipeline_prepare_current(void)
+{
+	struct task_struct *p = current;
+	kernel_siginfo_t si;
+
+	if ((p->mm->def_flags & VM_LOCKED) == 0) {
+		memset(&si, 0, sizeof(si));
+		si.si_signo = SIGDEBUG;
+		si.si_code = SI_QUEUE;
+		si.si_int = SIGDEBUG_NOMLOCK | sigdebug_marker;
+		send_sig_info(SIGDEBUG, &si, p);
+	}
+
+	return 0;
+}
+
+static inline int get_mayday_prot(void)
+{
+	return PROT_READ|PROT_EXEC;
+}
+
+#else /* !CONFIG_MMU */
+
+int pipeline_prepare_current(void)
+{
+	return 0;
+}
+
+static inline int get_mayday_prot(void)
+{
+	/*
+	 * Until we stop backing /dev/mem with the mayday page, we
+	 * can't ask for PROT_EXEC since the former does not define
+	 * mmap capabilities, and default ones won't allow an
+	 * executable mapping with MAP_SHARED. In the NOMMU case, this
+	 * is (currently) not an issue.
+	 */
+	return PROT_READ;
+}
+
+#endif /* !CONFIG_MMU */
+
+void resume_oob_task(struct task_struct *p) /* inband, oob stage stalled */
+{
+	struct xnthread *thread = xnthread_from_task(p);
+
+	xnlock_get(&nklock);
+
+	/*
+	 * We fire the handler before the thread is migrated, so that
+	 * thread->sched does not change between paired invocations of
+	 * relax_thread/harden_thread handlers.
+	 */
+	xnthread_run_handler_stack(thread, harden_thread);
+
+	cobalt_adjust_affinity(p);
+
+	xnthread_resume(thread, XNRELAX);
+
+	/*
+	 * In case we migrated independently of the user return notifier, clear
+	 * XNCONTHI here and also disable the notifier - we are already done.
+	 */
+	if (unlikely(xnthread_test_info(thread, XNCONTHI))) {
+		xnthread_clear_info(thread, XNCONTHI);
+		dovetail_clear_ucall();
+	}
+
+	/* Unregister as debugged thread in case we postponed this. */
+	if (unlikely(xnthread_test_state(thread, XNSSTEP)))
+		cobalt_unregister_debugged_thread(thread);
+
+	xnlock_put(&nklock);
+
+	xnsched_run();
+
+}
+
+void pipeline_attach_current(struct xnthread *thread)
+{
+	struct cobalt_threadinfo *p;
+
+	p = pipeline_current();
+	p->thread = thread;
+	p->process = cobalt_search_process(current->mm);
+	dovetail_init_altsched(&xnthread_archtcb(thread)->altsched);
+}
+
+int pipeline_trap_kevents(void)
+{
+	dovetail_start();
+	return 0;
+}
+
+void pipeline_enable_kevents(void)
+{
+	dovetail_start_altsched();
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c
new file mode 100644
index 0000000..01ea442
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/sched.c
@@ -0,0 +1,99 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001-2020 Philippe Gerum <rpm@xenomai.org>.
+ */
+
+#include <linux/cpuidle.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/sched.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+
+/* in-band stage, hard_irqs_disabled() */
+bool irq_cpuidle_control(struct cpuidle_device *dev,
+			struct cpuidle_state *state)
+{
+	/*
+	 * Deny entering sleep state if this entails stopping the
+	 * timer (i.e. C3STOP misfeature).
+	 */
+	if (state && (state->flags & CPUIDLE_FLAG_TIMER_STOP))
+		return false;
+
+	return true;
+}
+
+bool pipeline_switch_to(struct xnthread *prev, struct xnthread *next,
+			bool leaving_inband)
+{
+	return dovetail_context_switch(&xnthread_archtcb(prev)->altsched,
+			&xnthread_archtcb(next)->altsched, leaving_inband);
+}
+
+void pipeline_init_shadow_tcb(struct xnthread *thread)
+{
+	/*
+	 * Initialize the alternate scheduling control block.
+	 */
+	dovetail_init_altsched(&xnthread_archtcb(thread)->altsched);
+
+	trace_cobalt_shadow_map(thread);
+}
+
+void pipeline_init_root_tcb(struct xnthread *thread)
+{
+	/*
+	 * Initialize the alternate scheduling control block.
+	 */
+	dovetail_init_altsched(&xnthread_archtcb(thread)->altsched);
+}
+
+int pipeline_leave_inband(void)
+{
+	return dovetail_leave_inband();
+}
+
+int pipeline_leave_oob_prepare(void)
+{
+	int suspmask = XNRELAX;
+	struct xnthread *curr = xnthread_current();
+
+	dovetail_leave_oob();
+	/*
+	 * If current is being debugged, record that it should migrate
+	 * back in case it resumes in userspace. If it resumes in
+	 * kernel space, i.e.  over a restarting syscall, the
+	 * associated hardening will clear XNCONTHI.
+	 */
+	if (xnthread_test_state(curr, XNSSTEP)) {
+		xnthread_set_info(curr, XNCONTHI);
+		dovetail_request_ucall(current);
+		suspmask |= XNDBGSTOP;
+	}
+	return suspmask;
+}
+
+void pipeline_leave_oob_finish(void)
+{
+	dovetail_resume_inband();
+}
+
+void pipeline_raise_mayday(struct task_struct *tsk)
+{
+	dovetail_send_mayday(tsk);
+}
+
+void pipeline_clear_mayday(void) /* May solely affect current. */
+{
+	clear_thread_flag(TIF_MAYDAY);
+}
+
+irqreturn_t pipeline_reschedule_ipi_handler(int irq, void *dev_id)
+{
+	trace_cobalt_schedule_remote(xnsched_current());
+
+	/* Will reschedule from irq_exit_pipeline(). */
+
+	return IRQ_HANDLED;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c
new file mode 100644
index 0000000..440c069
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/syscall.c
@@ -0,0 +1,25 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>
+ * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+
+#include <linux/irqstage.h>
+#include <pipeline/pipeline.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/assert.h>
+#include <xenomai/posix/syscall.h>
+
+int handle_pipelined_syscall(struct irq_stage *stage, struct pt_regs *regs)
+{
+	if (unlikely(running_inband()))
+		return handle_root_syscall(regs);
+
+	return handle_head_syscall(stage == &inband_stage, regs);
+}
+
+int handle_oob_syscall(struct pt_regs *regs)
+{
+	return handle_head_syscall(false, regs);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c
new file mode 100644
index 0000000..873b624
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/dovetail/tick.c
@@ -0,0 +1,190 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+
+#include <linux/tick.h>
+#include <linux/clockchips.h>
+#include <cobalt/kernel/intr.h>
+#include <pipeline/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+
+static DEFINE_PER_CPU(struct clock_proxy_device *, proxy_device);
+
+const char *pipeline_timer_name(void)
+{
+	struct clock_proxy_device *dev = per_cpu(proxy_device, 0);
+	struct clock_event_device *real_dev = dev->real_device;
+
+	/*
+	 * Return the name of the current clock event chip, which is
+	 * the real device controlled by the proxy tick device.
+	 */
+	return real_dev->name;
+}
+
+void pipeline_set_timer_shot(unsigned long delay) /* ns */
+{
+	struct clock_proxy_device *dev = __this_cpu_read(proxy_device);
+	struct clock_event_device *real_dev = dev->real_device;
+	u64 cycles;
+	ktime_t t;
+	int ret;
+
+	if (real_dev->features & CLOCK_EVT_FEAT_KTIME) {
+		t = ktime_add(delay, xnclock_core_read_raw());
+		real_dev->set_next_ktime(t, real_dev);
+	} else {
+		if (delay <= 0) {
+			delay = real_dev->min_delta_ns;
+		} else {
+			delay = min_t(int64_t, delay,
+				real_dev->max_delta_ns);
+			delay = max_t(int64_t, delay,
+				real_dev->min_delta_ns);
+		}
+		cycles = ((u64)delay * real_dev->mult) >> real_dev->shift;
+		ret = real_dev->set_next_event(cycles, real_dev);
+		if (ret)
+			real_dev->set_next_event(real_dev->min_delta_ticks,
+						real_dev);
+	}
+}
+
+static int proxy_set_next_ktime(ktime_t expires,
+				struct clock_event_device *proxy_dev) /* hard irqs on/off */
+{
+	struct xnsched *sched;
+	unsigned long flags;
+	ktime_t delta;
+	int ret;
+
+	/*
+	 * Expiration dates of in-band timers are based on the common
+	 * monotonic time base. If the timeout date has already
+	 * elapsed, make sure xntimer_start() does not fail with
+	 * -ETIMEDOUT but programs the hardware for ticking
+	 * immediately instead.
+	 */
+	delta = ktime_sub(expires, ktime_get());
+	if (delta < 0)
+		delta = 0;
+
+	xnlock_get_irqsave(&nklock, flags);
+	sched = xnsched_current();
+	ret = xntimer_start(&sched->htimer, delta, XN_INFINITE, XN_RELATIVE);
+	xnlock_put_irqrestore(&nklock, flags);
+
+	return ret ? -ETIME : 0;
+}
+
+bool pipeline_must_force_program_tick(struct xnsched *sched)
+{
+	return sched->lflags & XNTSTOP;
+}
+
+static int proxy_set_oneshot_stopped(struct clock_event_device *proxy_dev)
+{
+	struct clock_event_device *real_dev;
+	struct clock_proxy_device *dev;
+	struct xnsched *sched;
+	spl_t s;
+
+	dev = container_of(proxy_dev, struct clock_proxy_device, proxy_device);
+
+	/*
+	 * In-band wants to disable the clock hardware on entering a
+	 * tickless state, so we have to stop our in-band tick
+	 * emulation. Propagate the request for shutting down the
+	 * hardware to the real device only if we have no outstanding
+	 * OOB timers. CAUTION: the in-band timer is counted when
+	 * assessing the RQ_IDLE condition, so we need to stop it
+	 * prior to testing the latter.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_current();
+	xntimer_stop(&sched->htimer);
+	sched->lflags |= XNTSTOP;
+
+	if (sched->lflags & XNIDLE) {
+		real_dev = dev->real_device;
+		real_dev->set_state_oneshot_stopped(real_dev);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static void setup_proxy(struct clock_proxy_device *dev)
+{
+	struct clock_event_device *proxy_dev = &dev->proxy_device;
+
+	dev->handle_oob_event = (typeof(dev->handle_oob_event))
+		xnintr_core_clock_handler;
+	proxy_dev->features |= CLOCK_EVT_FEAT_KTIME;
+	proxy_dev->set_next_ktime = proxy_set_next_ktime;
+	if (proxy_dev->set_state_oneshot_stopped)
+		proxy_dev->set_state_oneshot_stopped = proxy_set_oneshot_stopped;
+	__this_cpu_write(proxy_device, dev);
+}
+
+#ifdef CONFIG_SMP
+static irqreturn_t tick_ipi_handler(int irq, void *dev_id)
+{
+	xnintr_core_clock_handler();
+
+	return IRQ_HANDLED;
+}
+#endif
+
+int pipeline_install_tick_proxy(void)
+{
+	int ret;
+
+#ifdef CONFIG_SMP
+	/*
+	 * We may be running a SMP kernel on a uniprocessor machine
+	 * whose interrupt controller provides no IPI: attempt to hook
+	 * the timer IPI only if the hardware can support multiple
+	 * CPUs.
+	 */
+	if (num_possible_cpus() > 1) {
+		ret = __request_percpu_irq(TIMER_OOB_IPI,
+					tick_ipi_handler,
+					IRQF_OOB, "Xenomai timer IPI",
+					&cobalt_machine_cpudata);
+		if (ret)
+			return ret;
+	}
+#endif
+
+	/* Install the proxy tick device */
+	ret = tick_install_proxy(setup_proxy, &xnsched_realtime_cpus);
+	if (ret)
+		goto fail_proxy;
+
+	return 0;
+
+fail_proxy:
+#ifdef CONFIG_SMP
+	if (num_possible_cpus() > 1)
+		free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata);
+#endif
+
+	return ret;
+}
+
+void pipeline_uninstall_tick_proxy(void)
+{
+	/* Uninstall the proxy tick device. */
+	tick_uninstall_proxy(&xnsched_realtime_cpus);
+
+#ifdef CONFIG_SMP
+	if (num_possible_cpus() > 1)
+		free_percpu_irq(TIMER_OOB_IPI, &cobalt_machine_cpudata);
+#endif
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/heap.c b/kernel/xenomai-v3.2.4/kernel/cobalt/heap.c
new file mode 100644
index 0000000..f106d5d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/heap.c
@@ -0,0 +1,863 @@
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/log2.h>
+#include <linux/bitops.h>
+#include <linux/mm.h>
+#include <asm/pgtable.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/ancillaries.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_heap Dynamic memory allocation services
+ *
+ * This code implements a variant of the allocator described in
+ * "Design of a General Purpose Memory Allocator for the 4.3BSD Unix
+ * Kernel" by Marshall K. McKusick and Michael J. Karels (USENIX
+ * 1988), see http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf.
+ * The free page list is maintained in rbtrees for fast lookups of
+ * multi-page memory ranges, and pages holding bucketed memory have a
+ * fast allocation bitmap to manage their blocks internally.
+ *@{
+ */
+struct xnheap cobalt_heap;		/* System heap */
+EXPORT_SYMBOL_GPL(cobalt_heap);
+
+static LIST_HEAD(heapq);	/* Heap list for v-file dump */
+
+static int nrheaps;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static struct xnvfile_rev_tag vfile_tag;
+
+static struct xnvfile_snapshot_ops vfile_ops;
+
+struct vfile_priv {
+	struct xnheap *curr;
+};
+
+struct vfile_data {
+	size_t all_mem;
+	size_t free_mem;
+	char name[XNOBJECT_NAME_LEN];
+};
+
+static struct xnvfile_snapshot vfile = {
+	.privsz = sizeof(struct vfile_priv),
+	.datasz = sizeof(struct vfile_data),
+	.tag = &vfile_tag,
+	.ops = &vfile_ops,
+};
+
+static int vfile_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_priv *priv = xnvfile_iterator_priv(it);
+
+	if (list_empty(&heapq)) {
+		priv->curr = NULL;
+		return 0;
+	}
+
+	priv->curr = list_first_entry(&heapq, struct xnheap, next);
+
+	return nrheaps;
+}
+
+static int vfile_next(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_data *p = data;
+	struct xnheap *heap;
+
+	if (priv->curr == NULL)
+		return 0;	/* We are done. */
+
+	heap = priv->curr;
+	if (list_is_last(&heap->next, &heapq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_entry(heap->next.next,
+					struct xnheap, next);
+
+	p->all_mem = xnheap_get_size(heap);
+	p->free_mem = xnheap_get_free(heap);
+	knamecpy(p->name, heap->name);
+
+	return 1;
+}
+
+static int vfile_show(struct xnvfile_snapshot_iterator *it, void *data)
+{
+	struct vfile_data *p = data;
+
+	if (p == NULL)
+		xnvfile_printf(it, "%9s %9s  %s\n",
+			       "TOTAL", "FREE", "NAME");
+	else
+		xnvfile_printf(it, "%9zu %9zu  %s\n",
+			       p->all_mem,
+			       p->free_mem,
+			       p->name);
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_ops = {
+	.rewind = vfile_rewind,
+	.next = vfile_next,
+	.show = vfile_show,
+};
+
+void xnheap_init_proc(void)
+{
+	xnvfile_init_snapshot("heap", &vfile, &cobalt_vfroot);
+}
+
+void xnheap_cleanup_proc(void)
+{
+	xnvfile_destroy_snapshot(&vfile);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+enum xnheap_pgtype {
+	page_free =0,
+	page_cont =1,
+	page_list =2
+};
+
+static inline u32 __always_inline
+gen_block_mask(int log2size)
+{
+	return -1U >> (32 - (XNHEAP_PAGE_SIZE >> log2size));
+}
+
+static inline  __always_inline
+int addr_to_pagenr(struct xnheap *heap, void *p)
+{
+	return ((void *)p - heap->membase) >> XNHEAP_PAGE_SHIFT;
+}
+
+static inline  __always_inline
+void *pagenr_to_addr(struct xnheap *heap, int pg)
+{
+	return heap->membase + (pg << XNHEAP_PAGE_SHIFT);
+}
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MEMORY
+/*
+ * Setting page_cont/page_free in the page map is only required for
+ * enabling full checking of the block address in free requests, which
+ * may be extremely time-consuming when deallocating huge blocks
+ * spanning thousands of pages. We only do such marking when running
+ * in memory debug mode.
+ */
+static inline bool
+page_is_valid(struct xnheap *heap, int pg)
+{
+	switch (heap->pagemap[pg].type) {
+	case page_free:
+	case page_cont:
+		return false;
+	case page_list:
+	default:
+		return true;
+	}
+}
+
+static void mark_pages(struct xnheap *heap,
+		       int pg, int nrpages,
+		       enum xnheap_pgtype type)
+{
+	while (nrpages-- > 0)
+		heap->pagemap[pg].type = type;
+}
+
+#else
+
+static inline bool
+page_is_valid(struct xnheap *heap, int pg)
+{
+	return true;
+}
+
+static void mark_pages(struct xnheap *heap,
+		       int pg, int nrpages,
+		       enum xnheap_pgtype type)
+{ }
+
+#endif
+
+static struct xnheap_range *
+search_size_ge(struct rb_root *t, size_t size)
+{
+	struct rb_node *rb, *deepest = NULL;
+	struct xnheap_range *r;
+	
+	/*
+	 * We first try to find an exact match. If that fails, we walk
+	 * the tree in logical order by increasing size value from the
+	 * deepest node traversed until we find the first successor to
+	 * that node, or nothing beyond it, whichever comes first.
+	 */
+	rb = t->rb_node;
+	while (rb) {
+		deepest = rb;
+		r = rb_entry(rb, struct xnheap_range, size_node);
+		if (size < r->size) {
+			rb = rb->rb_left;
+			continue;
+		}
+		if (size > r->size) {
+			rb = rb->rb_right;
+			continue;
+		}
+		return r;
+	}
+
+	rb = deepest;
+	while (rb) {
+		r = rb_entry(rb, struct xnheap_range, size_node);
+		if (size <= r->size)
+			return r;
+		rb = rb_next(rb);
+	}
+
+	return NULL;
+}
+
+static struct xnheap_range *
+search_left_mergeable(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node *node = heap->addr_tree.rb_node;
+	struct xnheap_range *p;
+
+  	while (node) {
+		p = rb_entry(node, struct xnheap_range, addr_node);
+		if ((void *)p + p->size == (void *)r)
+			return p;
+		if (&r->addr_node < node)
+  			node = node->rb_left;
+		else
+  			node = node->rb_right;
+	}
+
+	return NULL;
+}
+
+static struct xnheap_range *
+search_right_mergeable(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node *node = heap->addr_tree.rb_node;
+	struct xnheap_range *p;
+
+  	while (node) {
+		p = rb_entry(node, struct xnheap_range, addr_node);
+		if ((void *)r + r->size == (void *)p)
+			return p;
+		if (&r->addr_node < node)
+  			node = node->rb_left;
+		else
+  			node = node->rb_right;
+	}
+
+	return NULL;
+}
+
+static void insert_range_bysize(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node **new = &heap->size_tree.rb_node, *parent = NULL;
+	struct xnheap_range *p;
+
+  	while (*new) {
+  		p = container_of(*new, struct xnheap_range, size_node);
+		parent = *new;
+  		if (r->size <= p->size)
+  			new = &((*new)->rb_left);
+  		else
+  			new = &((*new)->rb_right);
+  	}
+
+  	rb_link_node(&r->size_node, parent, new);
+  	rb_insert_color(&r->size_node, &heap->size_tree);
+}
+
+static void insert_range_byaddr(struct xnheap *heap, struct xnheap_range *r)
+{
+  	struct rb_node **new = &heap->addr_tree.rb_node, *parent = NULL;
+	struct xnheap_range *p;
+
+  	while (*new) {
+  		p = container_of(*new, struct xnheap_range, addr_node);
+		parent = *new;
+  		if (r < p)
+  			new = &((*new)->rb_left);
+  		else
+  			new = &((*new)->rb_right);
+  	}
+
+  	rb_link_node(&r->addr_node, parent, new);
+  	rb_insert_color(&r->addr_node, &heap->addr_tree);
+}
+
+static int reserve_page_range(struct xnheap *heap, size_t size)
+{
+	struct xnheap_range *new, *splitr;
+
+	/* Find a suitable range of pages covering 'size'. */
+	new = search_size_ge(&heap->size_tree, size);
+	if (new == NULL)
+		return -1;
+
+	rb_erase(&new->size_node, &heap->size_tree);
+	if (new->size == size) {
+		rb_erase(&new->addr_node, &heap->addr_tree);
+		return addr_to_pagenr(heap, new);
+	}
+
+	/*
+	 * The free range fetched is larger than what we need: split
+	 * it in two, the upper part is returned to the caller, the
+	 * lower part is sent back to the free list, which makes
+	 * reindexing by address pointless.
+	 */
+	splitr = new;
+	splitr->size -= size;
+	new = (struct xnheap_range *)((void *)new + splitr->size);
+	insert_range_bysize(heap, splitr);
+
+	return addr_to_pagenr(heap, new);
+}
+
+static void release_page_range(struct xnheap *heap,
+			       void *page, size_t size)
+{
+	struct xnheap_range *freed = page, *left, *right;
+	bool addr_linked = false;
+
+	freed->size = size;
+
+	left = search_left_mergeable(heap, freed);
+	if (left) {
+		rb_erase(&left->size_node, &heap->size_tree);
+		left->size += freed->size;
+		freed = left;
+		addr_linked = true;
+	}
+
+	right = search_right_mergeable(heap, freed);
+	if (right) {
+		rb_erase(&right->size_node, &heap->size_tree);
+		freed->size += right->size;
+		if (addr_linked)
+			rb_erase(&right->addr_node, &heap->addr_tree);
+		else
+			rb_replace_node(&right->addr_node, &freed->addr_node,
+					&heap->addr_tree);
+	} else if (!addr_linked)
+		insert_range_byaddr(heap, freed);
+
+	insert_range_bysize(heap, freed);
+	mark_pages(heap, addr_to_pagenr(heap, page),
+		   size >> XNHEAP_PAGE_SHIFT, page_free);
+}
+
+static void add_page_front(struct xnheap *heap,
+			   int pg, int log2size)
+{
+	struct xnheap_pgentry *new, *head, *next;
+	int ilog;
+
+	/* Insert page at front of the per-bucket page list. */
+	
+	ilog = log2size - XNHEAP_MIN_LOG2;
+	new = &heap->pagemap[pg];
+	if (heap->buckets[ilog] == -1U) {
+		heap->buckets[ilog] = pg;
+		new->prev = new->next = pg;
+	} else {
+		head = &heap->pagemap[heap->buckets[ilog]];
+		new->prev = heap->buckets[ilog];
+		new->next = head->next;
+		next = &heap->pagemap[new->next];
+		next->prev = pg;
+		head->next = pg;
+		heap->buckets[ilog] = pg;
+	}
+}
+
+static void remove_page(struct xnheap *heap,
+			int pg, int log2size)
+{
+	struct xnheap_pgentry *old, *prev, *next;
+	int ilog = log2size - XNHEAP_MIN_LOG2;
+
+	/* Remove page from the per-bucket page list. */
+
+	old = &heap->pagemap[pg];
+	if (pg == old->next)
+		heap->buckets[ilog] = -1U;
+	else {
+		if (pg == heap->buckets[ilog])
+			heap->buckets[ilog] = old->next;
+		prev = &heap->pagemap[old->prev];
+		prev->next = old->next;
+		next = &heap->pagemap[old->next];
+		next->prev = old->prev;
+	}
+}
+
+static void move_page_front(struct xnheap *heap,
+			    int pg, int log2size)
+{
+	int ilog = log2size - XNHEAP_MIN_LOG2;
+
+	/* Move page at front of the per-bucket page list. */
+	
+	if (heap->buckets[ilog] == pg)
+		return;	 /* Already at front, no move. */
+		
+	remove_page(heap, pg, log2size);
+	add_page_front(heap, pg, log2size);
+}
+
+static void move_page_back(struct xnheap *heap,
+			   int pg, int log2size)
+{
+	struct xnheap_pgentry *old, *last, *head, *next;
+	int ilog;
+
+	/* Move page at end of the per-bucket page list. */
+	
+	old = &heap->pagemap[pg];
+	if (pg == old->next) /* Singleton, no move. */
+		return;
+		
+	remove_page(heap, pg, log2size);
+
+	ilog = log2size - XNHEAP_MIN_LOG2;
+	head = &heap->pagemap[heap->buckets[ilog]];
+	last = &heap->pagemap[head->prev];
+	old->prev = head->prev;
+	old->next = last->next;
+	next = &heap->pagemap[old->next];
+	next->prev = pg;
+	last->next = pg;
+}
+
+static void *add_free_range(struct xnheap *heap,
+			    size_t bsize, int log2size)
+{
+	int pg;
+
+	pg = reserve_page_range(heap, ALIGN(bsize, XNHEAP_PAGE_SIZE));
+	if (pg < 0)
+		return NULL;
+	
+	/*
+	 * Update the page entry.  If @log2size is non-zero
+	 * (i.e. bsize < XNHEAP_PAGE_SIZE), bsize is (1 << log2Size)
+	 * between 2^XNHEAP_MIN_LOG2 and 2^(XNHEAP_PAGE_SHIFT - 1).
+	 * Save the log2 power into entry.type, then update the
+	 * per-page allocation bitmap to reserve the first block.
+	 *
+	 * Otherwise, we have a larger block which may span multiple
+	 * pages: set entry.type to page_list, indicating the start of
+	 * the page range, and entry.bsize to the overall block size.
+	 */
+	if (log2size) {
+		heap->pagemap[pg].type = log2size;
+		/*
+		 * Mark the first object slot (#0) as busy, along with
+		 * the leftmost bits we won't use for this log2 size.
+		 */
+		heap->pagemap[pg].map = ~gen_block_mask(log2size) | 1;
+		/*
+		 * Insert the new page at front of the per-bucket page
+		 * list, enforcing the assumption that pages with free
+		 * space live close to the head of this list.
+		 */
+		add_page_front(heap, pg, log2size);
+	} else {
+		heap->pagemap[pg].type = page_list;
+		heap->pagemap[pg].bsize = (u32)bsize;
+		mark_pages(heap, pg + 1,
+			   (bsize >> XNHEAP_PAGE_SHIFT) - 1, page_cont);
+	}
+
+	heap->used_size += bsize;
+
+	return pagenr_to_addr(heap, pg);
+}
+
+/**
+ * @fn void *xnheap_alloc(struct xnheap *heap, size_t size)
+ * @brief Allocate a memory block from a memory heap.
+ *
+ * Allocates a contiguous region of memory from an active memory heap.
+ * Such allocation is guaranteed to be time-bounded.
+ *
+ * @param heap The descriptor address of the heap to get memory from.
+ *
+ * @param size The size in bytes of the requested block.
+ *
+ * @return The address of the allocated region upon success, or NULL
+ * if no memory is available from the specified heap.
+ *
+ * @coretags{unrestricted}
+ */
+void *xnheap_alloc(struct xnheap *heap, size_t size)
+{
+	int log2size, ilog, pg, b = -1;
+	size_t bsize;
+	void *block;
+	spl_t s;
+
+	if (size == 0)
+		return NULL;
+
+	if (size < XNHEAP_MIN_ALIGN) {
+		bsize = size = XNHEAP_MIN_ALIGN;
+		log2size = XNHEAP_MIN_LOG2;
+	} else {
+		log2size = ilog2(size);
+		if (log2size < XNHEAP_PAGE_SHIFT) {
+			if (size & (size - 1))
+				log2size++;
+			bsize = 1 << log2size;
+		} else
+			bsize = ALIGN(size, XNHEAP_PAGE_SIZE);
+	}
+	
+	/*
+	 * Allocate entire pages directly from the pool whenever the
+	 * block is larger or equal to XNHEAP_PAGE_SIZE.  Otherwise,
+	 * use bucketed memory.
+	 *
+	 * NOTE: Fully busy pages from bucketed memory are moved back
+	 * at the end of the per-bucket page list, so that we may
+	 * always assume that either the heading page has some room
+	 * available, or no room is available from any page linked to
+	 * this list, in which case we should immediately add a fresh
+	 * page.
+	 */
+	xnlock_get_irqsave(&heap->lock, s);
+
+	if (bsize >= XNHEAP_PAGE_SIZE)
+		/* Add a range of contiguous free pages. */
+		block = add_free_range(heap, bsize, 0);
+	else {
+		ilog = log2size - XNHEAP_MIN_LOG2;
+		XENO_WARN_ON(MEMORY, ilog < 0 || ilog >= XNHEAP_MAX_BUCKETS);
+		pg = heap->buckets[ilog];
+		/*
+		 * Find a block in the heading page if any. If there
+		 * is none, there won't be any down the list: add a
+		 * new page right away.
+		 */
+		if (pg < 0 || heap->pagemap[pg].map == -1U)
+			block = add_free_range(heap, bsize, log2size);
+		else {
+			b = ffs(~heap->pagemap[pg].map) - 1;
+			/*
+			 * Got one block from the heading per-bucket
+			 * page, tag it as busy in the per-page
+			 * allocation map.
+			 */
+			heap->pagemap[pg].map |= (1U << b);
+			heap->used_size += bsize;
+			block = heap->membase +
+				(pg << XNHEAP_PAGE_SHIFT) +
+				(b << log2size);
+			if (heap->pagemap[pg].map == -1U)
+				move_page_back(heap, pg, log2size);
+		}
+	}
+
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	return block;
+}
+EXPORT_SYMBOL_GPL(xnheap_alloc);
+
+/**
+ * @fn void xnheap_free(struct xnheap *heap, void *block)
+ * @brief Release a block to a memory heap.
+ *
+ * Releases a memory block to a heap.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @param block The block to be returned to the heap.
+ *
+ * @coretags{unrestricted}
+ */
+void xnheap_free(struct xnheap *heap, void *block)
+{
+	unsigned long pgoff, boff;
+	int log2size, pg, n;
+	size_t bsize;
+	u32 oldmap;
+	spl_t s;
+
+	xnlock_get_irqsave(&heap->lock, s);
+
+	/* Compute the heading page number in the page map. */
+	pgoff = block - heap->membase;
+	pg = pgoff >> XNHEAP_PAGE_SHIFT;
+
+	if (!page_is_valid(heap, pg))
+		goto bad;
+	
+	switch (heap->pagemap[pg].type) {
+	case page_list:
+		bsize = heap->pagemap[pg].bsize;
+		XENO_WARN_ON(MEMORY, (bsize & (XNHEAP_PAGE_SIZE - 1)) != 0);
+		release_page_range(heap, pagenr_to_addr(heap, pg), bsize);
+		break;
+
+	default:
+		log2size = heap->pagemap[pg].type;
+		bsize = (1 << log2size);
+		XENO_WARN_ON(MEMORY, bsize >= XNHEAP_PAGE_SIZE);
+		boff = pgoff & ~XNHEAP_PAGE_MASK;
+		if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+			goto bad;
+
+		n = boff >> log2size; /* Block position in page. */
+		oldmap = heap->pagemap[pg].map;
+		heap->pagemap[pg].map &= ~(1U << n);
+
+		/*
+		 * If the page the block was sitting on is fully idle,
+		 * return it to the pool. Otherwise, check whether
+		 * that page is transitioning from fully busy to
+		 * partially busy state, in which case it should move
+		 * toward the front of the per-bucket page list.
+		 */
+		if (heap->pagemap[pg].map == ~gen_block_mask(log2size)) {
+			remove_page(heap, pg, log2size);
+			release_page_range(heap, pagenr_to_addr(heap, pg),
+					   XNHEAP_PAGE_SIZE);
+		} else if (oldmap == -1U)
+			move_page_front(heap, pg, log2size);
+	}
+
+	heap->used_size -= bsize;
+
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	return;
+bad:
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	XENO_WARN(MEMORY, 1, "invalid block %p in heap %s",
+		  block, heap->name);
+}
+EXPORT_SYMBOL_GPL(xnheap_free);
+
+ssize_t xnheap_check_block(struct xnheap *heap, void *block)
+{
+	unsigned long pg, pgoff, boff;
+	ssize_t ret = -EINVAL;
+	size_t bsize;
+	spl_t s;
+
+	xnlock_get_irqsave(&heap->lock, s);
+
+	/* Calculate the page number from the block address. */
+	pgoff = block - heap->membase;
+	pg = pgoff >> XNHEAP_PAGE_SHIFT;
+	if (page_is_valid(heap, pg)) {
+		if (heap->pagemap[pg].type == page_list)
+			bsize = heap->pagemap[pg].bsize;
+		else {
+			bsize = (1 << heap->pagemap[pg].type);
+			boff = pgoff & ~XNHEAP_PAGE_MASK;
+			if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+				goto out;
+		}
+		ret = (ssize_t)bsize;
+	}
+out:
+	xnlock_put_irqrestore(&heap->lock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnheap_check_block);
+
+/**
+ * @fn xnheap_init(struct xnheap *heap, void *membase, u32 size)
+ * @brief Initialize a memory heap.
+ *
+ * Initializes a memory heap suitable for time-bounded allocation
+ * requests of dynamic memory.
+ *
+ * @param heap The address of a heap descriptor to initialize.
+ *
+ * @param membase The address of the storage area.
+ *
+ * @param size The size in bytes of the storage area.  @a size must be
+ * a multiple of XNHEAP_PAGE_SIZE and smaller than (4Gb - PAGE_SIZE)
+ * in the current implementation.
+ *
+ * @return 0 is returned upon success, or:
+ *
+ * - -EINVAL is returned if @a size is either greater than
+ *   XNHEAP_MAX_HEAPSZ, or not aligned on PAGE_SIZE.
+ *
+ * - -ENOMEM is returned upon failure of allocating the meta-data area
+ * used internally to maintain the heap.
+ *
+ * @coretags{secondary-only}
+ */
+int xnheap_init(struct xnheap *heap, void *membase, size_t size)
+{
+	int n, nrpages;
+	spl_t s;
+
+	secondary_mode_only();
+
+ 	if (size > XNHEAP_MAX_HEAPSZ || !PAGE_ALIGNED(size))
+		return -EINVAL;
+
+	/* Reset bucket page lists, all empty. */
+	for (n = 0; n < XNHEAP_MAX_BUCKETS; n++)
+		heap->buckets[n] = -1U;
+
+	xnlock_init(&heap->lock);
+
+	nrpages = size >> XNHEAP_PAGE_SHIFT;
+	heap->pagemap = vzalloc(sizeof(struct xnheap_pgentry) * nrpages);
+	if (heap->pagemap == NULL)
+		return -ENOMEM;
+
+	heap->membase = membase;
+	heap->usable_size = size;
+	heap->used_size = 0;
+		      
+	/*
+	 * The free page pool is maintained as a set of ranges of
+	 * contiguous pages indexed by address and size in rbtrees.
+	 * Initially, we have a single range in those trees covering
+	 * the whole memory we have been given for the heap. Over
+	 * time, that range will be split then possibly re-merged back
+	 * as allocations and deallocations take place.
+	 */
+	heap->size_tree = RB_ROOT;
+	heap->addr_tree = RB_ROOT;
+	release_page_range(heap, membase, size);
+
+	/* Default name, override with xnheap_set_name() */
+	ksformat(heap->name, sizeof(heap->name), "(%p)", heap);
+
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&heap->next, &heapq);
+	nrheaps++;
+	xnvfile_touch_tag(&vfile_tag);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnheap_init);
+
+/**
+ * @fn void xnheap_destroy(struct xnheap *heap)
+ * @brief Destroys a memory heap.
+ *
+ * Destroys a memory heap.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @coretags{secondary-only}
+ */
+void xnheap_destroy(struct xnheap *heap)
+{
+	spl_t s;
+
+	secondary_mode_only();
+
+	xnlock_get_irqsave(&nklock, s);
+	list_del(&heap->next);
+	nrheaps--;
+	xnvfile_touch_tag(&vfile_tag);
+	xnlock_put_irqrestore(&nklock, s);
+	vfree(heap->pagemap);
+}
+EXPORT_SYMBOL_GPL(xnheap_destroy);
+
+/**
+ * @fn xnheap_set_name(struct xnheap *heap,const char *name,...)
+ * @brief Set the heap's name string.
+ *
+ * Set the heap name that will be used in statistic outputs.
+ *
+ * @param heap The address of a heap descriptor.
+ *
+ * @param name Name displayed in statistic outputs. This parameter can
+ * be a printk()-like format argument list.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnheap_set_name(struct xnheap *heap, const char *name, ...)
+{
+	va_list args;
+
+	va_start(args, name);
+	kvsformat(heap->name, sizeof(heap->name), name, args);
+	va_end(args);
+}
+EXPORT_SYMBOL_GPL(xnheap_set_name);
+
+void *xnheap_vmalloc(size_t size)
+{
+	/*
+	 * We want memory used in real-time context to be pulled from
+	 * ZONE_NORMAL, however we don't need it to be physically
+	 * contiguous.
+	 *
+	 * 32bit systems which would need HIGHMEM for running a Cobalt
+	 * configuration would also be required to support PTE
+	 * pinning, which not all architectures provide.  Moreover,
+	 * pinning PTEs eagerly for a potentially (very) large amount
+	 * of memory may quickly degrade performance.
+	 *
+	 * If using a different kernel/user memory split cannot be the
+	 * answer for those configs, it's likely that basing such
+	 * software on a 32bit system had to be wrong in the first
+	 * place anyway.
+	 */
+	return vmalloc_kernel(size, 0);
+}
+EXPORT_SYMBOL_GPL(xnheap_vmalloc);
+
+void xnheap_vfree(void *p)
+{
+	vfree(p);
+}
+EXPORT_SYMBOL_GPL(xnheap_vfree);
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h
new file mode 100644
index 0000000..8939e45
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/dovetail/thread.h
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ */
+#ifndef _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H
+#define _COBALT_ASM_GENERIC_DOVETAIL_THREAD_H
+
+#include <linux/dovetail.h>
+
+struct xnarchtcb {
+	struct dovetail_altsched_context altsched;
+};
+
+static inline
+struct task_struct *xnarch_host_task(struct xnarchtcb *tcb)
+{
+	return tcb->altsched.task;
+}
+
+#endif /* !_COBALT_ASM_GENERIC_DOVETAIL_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h
new file mode 100644
index 0000000..fcd7275
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/ipipe/thread.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_IPIPE_THREAD_H
+#define _COBALT_ASM_GENERIC_IPIPE_THREAD_H
+
+#include <asm/ptrace.h>
+#include <asm/processor.h>
+
+struct task_struct;
+
+struct xntcb {
+	struct task_struct *host_task;
+	struct thread_struct *tsp;
+	struct mm_struct *mm;
+	struct mm_struct *active_mm;
+	struct thread_struct ts;
+	struct thread_info *tip;
+#ifdef CONFIG_XENO_ARCH_FPU
+	struct task_struct *user_fpu_owner;
+#endif
+};
+
+#endif /* !_COBALT_ASM_GENERIC_IPIPE_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h
new file mode 100644
index 0000000..f45e523
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/machine.h
@@ -0,0 +1,28 @@
+/**
+ *   Copyright &copy; 2012 Philippe Gerum.
+ *
+ *   Xenomai is free software; you can redistribute it and/or modify
+ *   it under the terms of the GNU General Public License as published by
+ *   the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ *   USA; either version 2 of the License, or (at your option) any later
+ *   version.
+ *
+ *   This program is distributed in the hope that it will be useful,
+ *   but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *   GNU General Public License for more details.
+ *
+ *   You should have received a copy of the GNU General Public License
+ *   along with this program; if not, write to the Free Software
+ *   Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_MACHINE_H
+#define _COBALT_ASM_GENERIC_MACHINE_H
+
+#include <pipeline/machine.h>
+
+#ifndef xnarch_cache_aliasing
+#define xnarch_cache_aliasing()  0
+#endif
+
+#endif /* !_COBALT_ASM_GENERIC_MACHINE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h
new file mode 100644
index 0000000..1a6c308
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/pci_ids.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2009 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_PCI_IDS_H
+#define _COBALT_ASM_GENERIC_PCI_IDS_H
+
+#include <linux/pci_ids.h>
+
+/* SMI */
+#ifndef PCI_DEVICE_ID_INTEL_ESB2_0
+#define PCI_DEVICE_ID_INTEL_ESB2_0 0x2670
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH7_0
+#define PCI_DEVICE_ID_INTEL_ICH7_0 0x27b8
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH7_1
+#define PCI_DEVICE_ID_INTEL_ICH7_1 0x27b9
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH8_4
+#define PCI_DEVICE_ID_INTEL_ICH8_4 0x2815
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH9_1
+#define PCI_DEVICE_ID_INTEL_ICH9_1 0x2917
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH9_5
+#define PCI_DEVICE_ID_INTEL_ICH9_5 0x2919
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_ICH10_1
+#define PCI_DEVICE_ID_INTEL_ICH10_1 0x3a16
+#endif
+#ifndef PCI_DEVICE_ID_INTEL_PCH_LPC_MIN
+#define PCI_DEVICE_ID_INTEL_PCH_LPC_MIN 0x3b00
+#endif
+
+/* RTCAN */
+#ifndef PCI_VENDOR_ID_ESDGMBH
+#define PCI_VENDOR_ID_ESDGMBH 0x12fe
+#endif
+#ifndef PCI_DEVICE_ID_PLX_9030
+#define PCI_DEVICE_ID_PLX_9030 0x9030
+#endif
+#ifndef PCI_DEVICE_ID_PLX_9056
+#define PCI_DEVICE_ID_PLX_9056 0x9056
+#endif
+
+#endif /* _COBALT_ASM_GENERIC_PCI_IDS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h
new file mode 100644
index 0000000..117bb3f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall.h
@@ -0,0 +1,154 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004,2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_SYSCALL_H
+#define _COBALT_ASM_GENERIC_SYSCALL_H
+
+#include <linux/types.h>
+#include <linux/version.h>
+#include <linux/uaccess.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/wrappers.h>
+#include <asm/xenomai/machine.h>
+#include <cobalt/uapi/asm-generic/syscall.h>
+#include <cobalt/uapi/kernel/types.h>
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 0, 0)
+#define access_rok(addr, size)	access_ok((addr), (size))
+#define access_wok(addr, size)	access_ok((addr), (size))
+#else
+#define access_rok(addr, size)	access_ok(VERIFY_READ, (addr), (size))
+#define access_wok(addr, size)	access_ok(VERIFY_WRITE, (addr), (size))
+#endif
+
+#define __xn_copy_from_user(dstP, srcP, n)	raw_copy_from_user(dstP, srcP, n)
+#define __xn_copy_to_user(dstP, srcP, n)	raw_copy_to_user(dstP, srcP, n)
+#define __xn_put_user(src, dstP)		__put_user(src, dstP)
+#define __xn_get_user(dst, srcP)		__get_user(dst, srcP)
+#define __xn_strncpy_from_user(dstP, srcP, n)	strncpy_from_user(dstP, srcP, n)
+
+static inline int cobalt_copy_from_user(void *dst, const void __user *src,
+					size_t size)
+{
+	size_t remaining = size;
+
+	if (likely(access_rok(src, size)))
+		remaining = __xn_copy_from_user(dst, src, size);
+
+	if (unlikely(remaining > 0)) {
+		memset(dst + (size - remaining), 0, remaining);
+		return -EFAULT;
+	}
+	return 0;
+}
+
+static inline int cobalt_copy_to_user(void __user *dst, const void *src,
+				      size_t size)
+{
+	if (unlikely(!access_wok(dst, size) ||
+	    __xn_copy_to_user(dst, src, size)))
+		return -EFAULT;
+	return 0;
+}
+
+static inline int cobalt_strncpy_from_user(char *dst, const char __user *src,
+					   size_t count)
+{
+	if (unlikely(!access_rok(src, 1)))
+		return -EFAULT;
+
+	return __xn_strncpy_from_user(dst, src, count);
+}
+
+
+/*
+ * NOTE: those copy helpers won't work in compat mode: use
+ * sys32_get_*(), sys32_put_*() instead.
+ */
+
+static inline int cobalt_get_u_timespec(struct timespec64 *dst,
+			const struct __user_old_timespec __user *src)
+{
+	struct __user_old_timespec u_ts;
+	int ret;
+
+	ret = cobalt_copy_from_user(&u_ts, src, sizeof(u_ts));
+	if (ret)
+		return ret;
+
+	dst->tv_sec = u_ts.tv_sec;
+	dst->tv_nsec = u_ts.tv_nsec;
+
+	return 0;
+}
+
+static inline int cobalt_put_u_timespec(
+	struct __user_old_timespec __user *dst,
+	const struct timespec64 *src)
+{
+	struct __user_old_timespec u_ts;
+	int ret;
+
+	u_ts.tv_sec = src->tv_sec;
+	u_ts.tv_nsec = src->tv_nsec;
+
+	ret = cobalt_copy_to_user(dst, &u_ts, sizeof(*dst));
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static inline int cobalt_get_u_itimerspec(struct itimerspec64 *dst,
+			const struct __user_old_itimerspec __user *src)
+{
+	struct __user_old_itimerspec u_its;
+	int ret;
+
+	ret = cobalt_copy_from_user(&u_its, src, sizeof(u_its));
+	if (ret)
+		return ret;
+
+	dst->it_interval.tv_sec = u_its.it_interval.tv_sec;
+	dst->it_interval.tv_nsec = u_its.it_interval.tv_nsec;
+	dst->it_value.tv_sec = u_its.it_value.tv_sec;
+	dst->it_value.tv_nsec = u_its.it_value.tv_nsec;
+
+	return 0;
+}
+
+static inline int cobalt_put_u_itimerspec(
+	struct __user_old_itimerspec __user *dst,
+	const struct itimerspec64 *src)
+{
+	struct __user_old_itimerspec u_its;
+
+	u_its.it_interval.tv_sec = src->it_interval.tv_sec;
+	u_its.it_interval.tv_nsec = src->it_interval.tv_nsec;
+	u_its.it_value.tv_sec = src->it_value.tv_sec;
+	u_its.it_value.tv_nsec = src->it_value.tv_nsec;
+
+	return cobalt_copy_to_user(dst, &u_its, sizeof(*dst));
+}
+
+/* 32bit syscall emulation */
+#define __COBALT_COMPAT_BIT	0x1
+/* 32bit syscall emulation - extended form */
+#define __COBALT_COMPATX_BIT	0x2
+
+#endif /* !_COBALT_ASM_GENERIC_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h
new file mode 100644
index 0000000..b0c6f4a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/syscall32.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_SYSCALL32_H
+#define _COBALT_ASM_GENERIC_SYSCALL32_H
+
+#define __COBALT_CALL32_INITHAND(__handler)
+
+#define __COBALT_CALL32_INITMODE(__mode)
+
+#define __COBALT_CALL32_ENTRY(__name, __handler)
+
+#define __COBALT_CALL_COMPAT(__reg)	0
+
+#endif /* !_COBALT_ASM_GENERIC_SYSCALL32_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h
new file mode 100644
index 0000000..7654047
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/asm-generic/xenomai/wrappers.h
@@ -0,0 +1,270 @@
+/*
+ * Copyright (C) 2005-2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_ASM_GENERIC_WRAPPERS_H
+
+#include <linux/xenomai/wrappers.h>
+
+#define COBALT_BACKPORT(__sym) __cobalt_backport_ ##__sym
+
+/*
+ * To keep the #ifdefery as readable as possible, please:
+ *
+ * - keep the conditional structure flat, no nesting (e.g. do not fold
+ *   the pre-3.11 conditions into the pre-3.14 ones).
+ * - group all wrappers for a single kernel revision.
+ * - list conditional blocks in order of kernel release, latest first
+ * - identify the first kernel release for which the wrapper should
+ *   be defined, instead of testing the existence of a preprocessor
+ *   symbol, so that obsolete wrappers can be spotted.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+#define raw_copy_to_user(__to, __from, __n)	__copy_to_user_inatomic(__to, __from, __n)
+#define raw_copy_from_user(__to, __from, __n)	__copy_from_user_inatomic(__to, __from, __n)
+#define raw_put_user(__from, __to)		__put_user_inatomic(__from, __to)
+#define raw_get_user(__to, __from)		__get_user_inatomic(__to, __from)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,6,0)
+#define in_ia32_syscall() (current_thread_info()->status & TS_COMPAT)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,5,0)
+#define cobalt_gpiochip_dev(__gc)	((__gc)->dev)
+#else
+#define cobalt_gpiochip_dev(__gc)	((__gc)->parent)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,0,0)
+#define cobalt_get_restart_block(p)	(&task_thread_info(p)->restart_block)
+#else
+#define cobalt_get_restart_block(p)	(&(p)->restart_block)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,19,0)
+#define user_msghdr msghdr
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,17,0)
+#include <linux/netdevice.h>
+
+#undef alloc_netdev
+#define alloc_netdev(sizeof_priv, name, name_assign_type, setup) \
+	alloc_netdev_mqs(sizeof_priv, name, setup, 1, 1)
+ 
+#include <linux/trace_seq.h>
+
+static inline unsigned char *
+trace_seq_buffer_ptr(struct trace_seq *s)
+{
+	return s->buffer + s->len;
+}
+
+#endif /* < 3.17 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,16,0)
+#define smp_mb__before_atomic()  smp_mb()
+#define smp_mb__after_atomic()   smp_mb()
+#endif /* < 3.16 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,15,0)
+#define raw_cpu_ptr(v)	__this_cpu_ptr(v)
+#endif /* < 3.15 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
+#include <linux/pci.h>
+
+#ifdef CONFIG_PCI
+#define pci_enable_msix_range COBALT_BACKPORT(pci_enable_msix_range)
+#ifdef CONFIG_PCI_MSI
+int pci_enable_msix_range(struct pci_dev *dev,
+			  struct msix_entry *entries,
+			  int minvec, int maxvec);
+#else /* !CONFIG_PCI_MSI */
+static inline
+int pci_enable_msix_range(struct pci_dev *dev,
+			  struct msix_entry *entries,
+			  int minvec, int maxvec)
+{
+	return -ENOSYS;
+}
+#endif /* !CONFIG_PCI_MSI */
+#endif /* CONFIG_PCI */
+#endif /* < 3.14 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
+#include <linux/dma-mapping.h>
+#include <linux/hwmon.h>
+
+#define dma_set_mask_and_coherent COBALT_BACKPORT(dma_set_mask_and_coherent)
+static inline
+int dma_set_mask_and_coherent(struct device *dev, u64 mask)
+{
+	int rc = dma_set_mask(dev, mask);
+	if (rc == 0)
+		dma_set_coherent_mask(dev, mask);
+	return rc;
+}
+
+#ifdef CONFIG_HWMON
+#define hwmon_device_register_with_groups \
+	COBALT_BACKPORT(hwmon_device_register_with_groups)
+struct device *
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups);
+
+#define devm_hwmon_device_register_with_groups \
+	COBALT_BACKPORT(devm_hwmon_device_register_with_groups)
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups);
+#endif /* !CONFIG_HWMON */
+
+#define reinit_completion(__x)	INIT_COMPLETION(*(__x))
+
+#endif /* < 3.13 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,11,0)
+#define DEVICE_ATTR_RW(_name)	__ATTR_RW(_name)
+#define DEVICE_ATTR_RO(_name)	__ATTR_RO(_name)
+#define DEVICE_ATTR_WO(_name)	__ATTR_WO(_name)
+#endif /* < 3.11 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,10,0)
+#error "Xenomai/cobalt requires Linux kernel 3.10 or above"
+#endif /* < 3.10 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,0,0)
+#define __kernel_timex		timex
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,1,0)
+#define old_timex32		compat_timex
+#define SO_RCVTIMEO_OLD		SO_RCVTIMEO
+#define SO_SNDTIMEO_OLD		SO_SNDTIMEO
+#endif
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,2,0)
+#define mmiowb()		do { } while (0)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#define __kernel_old_timeval	timeval
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,208) || \
+    (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0) && \
+     LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0))
+#define mmap_read_lock(__mm)	down_read(&mm->mmap_sem)
+#define mmap_read_unlock(__mm)	up_read(&mm->mmap_sem)
+#define mmap_write_lock(__mm)	down_write(&mm->mmap_sem)
+#define mmap_write_unlock(__mm)	up_write(&mm->mmap_sem)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,6,0)
+#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write) \
+	struct file_operations __name = {			    \
+		.open = (__open),				    \
+		.release = (__release),				    \
+		.read = (__read),				    \
+		.write = (__write),				    \
+		.llseek = seq_lseek,				    \
+}
+#else
+#define DEFINE_PROC_OPS(__name, __open, __release, __read, __write)	\
+	struct proc_ops __name = {					\
+		.proc_open = (__open),					\
+		.proc_release = (__release),				\
+		.proc_read = (__read),					\
+		.proc_write = (__write),				\
+		.proc_lseek = seq_lseek,				\
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL)
+#else
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,7,0)
+#define pci_aer_clear_nonfatal_status	pci_cleanup_aer_uncorrect_error_status
+#define old_timespec32    compat_timespec
+#define old_itimerspec32  compat_itimerspec
+#define old_timeval32     compat_timeval
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,8,0)
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags, PAGE_KERNEL)
+#else
+#define vmalloc_kernel(__size, __flags)	__vmalloc(__size, GFP_KERNEL|__flags)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,9,0)
+#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \
+	({								\
+		loff_t ___file_size;					\
+		int __ret;						\
+		__ret = kernel_read_file(__file, __buf, &___file_size,	\
+				__buf_size, __id);			\
+		(*__file_size) = ___file_size;				\
+		__ret;							\
+	})
+#else
+#define read_file_from_kernel(__file, __buf, __buf_size, __file_size, __id) \
+	kernel_read_file(__file, 0, __buf, __buf_size, __file_size, __id)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#if __has_attribute(__fallthrough__)
+# define fallthrough			__attribute__((__fallthrough__))
+#else
+# define fallthrough			do {} while (0)  /* fallthrough */
+#endif
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,11,0)
+#define IRQ_WORK_INIT(_func) (struct irq_work) {	\
+	.flags = ATOMIC_INIT(0),			\
+	.func = (_func),				\
+}
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,14,0)
+#define close_fd(__ufd)	__close_fd(current->files, __ufd)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,15,0) && \
+    (LINUX_VERSION_CODE >= KERNEL_VERSION(5,11,0) || \
+     LINUX_VERSION_CODE < KERNEL_VERSION(5,10,188)) && \
+    (LINUX_VERSION_CODE >= KERNEL_VERSION(5,5,0) || \
+     LINUX_VERSION_CODE < KERNEL_VERSION(5,4,251)) && \
+    (LINUX_VERSION_CODE >= KERNEL_VERSION(4,20,0) || \
+     LINUX_VERSION_CODE < KERNEL_VERSION(4,19,291))
+#define dev_addr_set(dev, addr)		memcpy((dev)->dev_addr, addr, MAX_ADDR_LEN)
+#define eth_hw_addr_set(dev, addr)	memcpy((dev)->dev_addr, addr, ETH_ALEN)
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,17,0)
+#define pde_data(i)	PDE_DATA(i)
+#endif
+
+#endif /* _COBALT_ASM_GENERIC_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h
new file mode 100644
index 0000000..66d020f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/irq.h
@@ -0,0 +1,52 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2017 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#ifndef _COBALT_DOVETAIL_IRQ_H
+#define _COBALT_DOVETAIL_IRQ_H
+
+#ifdef CONFIG_XENOMAI
+
+#include <cobalt/kernel/sched.h>
+
+/* hard irqs off. */
+static inline void irq_enter_pipeline(void)
+{
+	struct xnsched *sched = xnsched_current();
+
+	sched->lflags |= XNINIRQ;
+}
+
+/* hard irqs off. */
+static inline void irq_exit_pipeline(void)
+{
+	struct xnsched *sched = xnsched_current();
+
+	sched->lflags &= ~XNINIRQ;
+
+	/*
+	 * CAUTION: Switching stages as a result of rescheduling may
+	 * re-enable irqs, shut them off before returning if so.
+	 */
+	if ((sched->status|sched->lflags) & XNRESCHED) {
+		xnsched_run();
+		if (!hard_irqs_disabled())
+			hard_local_irq_disable();
+	}
+}
+
+#else  /* !CONFIG_XENOMAI */
+
+static inline void irq_enter_pipeline(void)
+{
+}
+
+static inline void irq_exit_pipeline(void)
+{
+}
+
+#endif	/* !CONFIG_XENOMAI */
+
+#endif /* !_COBALT_DOVETAIL_IRQ_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h
new file mode 100644
index 0000000..69b89de
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/dovetail/thread_info.h
@@ -0,0 +1,33 @@
+/**
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (c) Siemens AG, 2020
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_DOVETAIL_THREAD_INFO_H
+#define _COBALT_DOVETAIL_THREAD_INFO_H
+
+struct xnthread;
+struct cobalt_process;
+
+struct oob_thread_state {
+	/* Core thread backlink. */
+	struct xnthread *thread;
+	/* User process backlink. NULL for core threads. */
+	struct cobalt_process *process;
+};
+
+#endif /* !_COBALT_DOVETAIL_THREAD_INFO_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/ipipe/thread_info.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/ipipe/thread_info.h
new file mode 100644
index 0000000..3fc467a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/ipipe/thread_info.h
@@ -0,0 +1,38 @@
+/**
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA 02139,
+ * USA; either version 2 of the License, or (at your option) any later
+ * version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_IPIPE_THREAD_INFO_H
+#define _COBALT_IPIPE_THREAD_INFO_H
+
+struct xnthread;
+struct cobalt_process;
+
+struct ipipe_threadinfo {
+	/* Core thread backlink. */
+	struct xnthread *thread;
+	/* User process backlink. NULL for core threads. */
+	struct cobalt_process *process;
+};
+
+static inline void __ipipe_init_threadinfo(struct ipipe_threadinfo *p)
+{
+	p->thread = NULL;
+	p->process = NULL;
+}
+
+#endif /* !_COBALT_IPIPE_THREAD_INFO_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h
new file mode 100644
index 0000000..38ade6d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/stdarg.h
@@ -0,0 +1 @@
+#include <stdarg.h>
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h b/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h
new file mode 100644
index 0000000..349123c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/include/linux/xenomai/wrappers.h
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _COBALT_LINUX_WRAPPERS_H
+#define _COBALT_LINUX_WRAPPERS_H
+
+#include <linux/version.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 20, 0)
+#include <linux/signal.h>
+typedef siginfo_t kernel_siginfo_t;
+#endif
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4,11,0)
+#include <linux/sched.h>
+#include <linux/sched/rt.h>
+#else
+#include <linux/sched.h>
+#include <linux/sched/signal.h>
+#include <linux/sched/rt.h>
+#include <linux/sched/mm.h>
+#include <linux/sched/debug.h>
+#include <linux/sched/task_stack.h>
+#include <uapi/linux/sched/types.h>
+#endif
+
+#include <pipeline/wrappers.h>
+
+#endif /* !_COBALT_LINUX_WRAPPERS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/init.c b/kernel/xenomai-v3.2.4/kernel/cobalt/init.c
new file mode 100644
index 0000000..5168b56
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/init.c
@@ -0,0 +1,325 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/init.h>
+#include <linux/module.h>
+#include <xenomai/version.h>
+#include <pipeline/machine.h>
+#include <pipeline/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/pipe.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/vdso.h>
+#include <rtdm/fd.h>
+#include "rtdm/internal.h"
+#include "posix/internal.h"
+#include "procfs.h"
+
+/**
+ * @defgroup cobalt Cobalt
+ *
+ * Cobalt supplements the native Linux kernel in dual kernel
+ * configurations. It deals with all time-critical activities, such as
+ * handling interrupts, and scheduling real-time threads. The Cobalt
+ * kernel has higher priority over all the native kernel activities.
+ *
+ * Cobalt provides an implementation of the POSIX and RTDM interfaces
+ * based on a set of generic RTOS building blocks.
+ */
+
+#ifdef CONFIG_SMP
+static unsigned long supported_cpus_arg = -1;
+module_param_named(supported_cpus, supported_cpus_arg, ulong, 0444);
+#endif /* CONFIG_SMP */
+
+static unsigned long sysheap_size_arg;
+module_param_named(sysheap_size, sysheap_size_arg, ulong, 0444);
+
+static char init_state_arg[16] = "enabled";
+module_param_string(state, init_state_arg, sizeof(init_state_arg), 0444);
+
+static BLOCKING_NOTIFIER_HEAD(state_notifier_list);
+
+struct cobalt_pipeline cobalt_pipeline;
+EXPORT_SYMBOL_GPL(cobalt_pipeline);
+
+DEFINE_PER_CPU(struct cobalt_machine_cpudata, cobalt_machine_cpudata);
+EXPORT_PER_CPU_SYMBOL_GPL(cobalt_machine_cpudata);
+
+atomic_t cobalt_runstate = ATOMIC_INIT(COBALT_STATE_WARMUP);
+EXPORT_SYMBOL_GPL(cobalt_runstate);
+
+struct cobalt_ppd cobalt_kernel_ppd = {
+	.exe_path = "vmlinux",
+};
+EXPORT_SYMBOL_GPL(cobalt_kernel_ppd);
+
+#ifdef CONFIG_XENO_OPT_DEBUG
+#define boot_debug_notice "[DEBUG]"
+#else
+#define boot_debug_notice ""
+#endif
+
+#ifdef CONFIG_ENABLE_DEFAULT_TRACERS
+#define boot_evt_trace_notice "[ETRACE]"
+#else
+#define boot_evt_trace_notice ""
+#endif
+
+#define boot_state_notice						\
+	({								\
+		realtime_core_state() == COBALT_STATE_STOPPED ?		\
+			"[STOPPED]" : "";				\
+	})
+
+void cobalt_add_state_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&state_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_add_state_chain);
+
+void cobalt_remove_state_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&state_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_remove_state_chain);
+
+void cobalt_call_state_chain(enum cobalt_run_states newstate)
+{
+	blocking_notifier_call_chain(&state_notifier_list, newstate, NULL);
+}
+EXPORT_SYMBOL_GPL(cobalt_call_state_chain);
+
+static void sys_shutdown(void)
+{
+	void *membase;
+
+	pipeline_uninstall_tick_proxy();
+	xnsched_destroy_all();
+	xnregistry_cleanup();
+	membase = xnheap_get_membase(&cobalt_heap);
+	xnheap_destroy(&cobalt_heap);
+	xnheap_vfree(membase);
+}
+
+static struct {
+	const char *label;
+	enum cobalt_run_states state;
+} init_states[] __initdata = {
+	{ "disabled", COBALT_STATE_DISABLED },
+	{ "stopped", COBALT_STATE_STOPPED },
+	{ "enabled", COBALT_STATE_WARMUP },
+};
+
+static void __init setup_init_state(void)
+{
+	static char warn_bad_state[] __initdata =
+		XENO_WARNING "invalid init state '%s'\n";
+	int n;
+
+	for (n = 0; n < ARRAY_SIZE(init_states); n++)
+		if (strcmp(init_states[n].label, init_state_arg) == 0) {
+			set_realtime_core_state(init_states[n].state);
+			return;
+		}
+
+	printk(warn_bad_state, init_state_arg);
+}
+
+static __init int sys_init(void)
+{
+	void *heapaddr;
+	int ret;
+
+	if (sysheap_size_arg == 0)
+		sysheap_size_arg = CONFIG_XENO_OPT_SYS_HEAPSZ;
+
+	heapaddr = xnheap_vmalloc(sysheap_size_arg * 1024);
+	if (heapaddr == NULL ||
+	    xnheap_init(&cobalt_heap, heapaddr, sysheap_size_arg * 1024)) {
+		return -ENOMEM;
+	}
+	xnheap_set_name(&cobalt_heap, "system heap");
+
+	xnsched_init_all();
+
+	xnregistry_init();
+
+	/*
+	 * If starting in stopped mode, do all initializations, but do
+	 * not enable the core timer.
+	 */
+	if (realtime_core_state() == COBALT_STATE_WARMUP) {
+		ret = pipeline_install_tick_proxy();
+		if (ret) {
+			sys_shutdown();
+			return ret;
+		}
+		set_realtime_core_state(COBALT_STATE_RUNNING);
+	}
+
+	return 0;
+}
+
+static int __init xenomai_init(void)
+{
+	int ret, __maybe_unused cpu;
+
+	setup_init_state();
+
+	if (!realtime_core_enabled()) {
+		printk(XENO_WARNING "disabled on kernel command line\n");
+		return 0;
+	}
+
+#ifdef CONFIG_SMP
+	cpumask_clear(&xnsched_realtime_cpus);
+	for_each_online_cpu(cpu) {
+		if (supported_cpus_arg & (1UL << cpu))
+			cpumask_set_cpu(cpu, &xnsched_realtime_cpus);
+	}
+	if (cpumask_empty(&xnsched_realtime_cpus)) {
+		printk(XENO_WARNING "disabled via empty real-time CPU mask\n");
+		set_realtime_core_state(COBALT_STATE_DISABLED);
+		return 0;
+	}
+	if (!cpumask_test_cpu(0, &xnsched_realtime_cpus)) {
+		printk(XENO_ERR "CPU 0 is missing in real-time CPU mask\n");
+		set_realtime_core_state(COBALT_STATE_DISABLED);
+		return -EINVAL;
+	}
+	cobalt_cpu_affinity = xnsched_realtime_cpus;
+#endif /* CONFIG_SMP */
+
+	xnsched_register_classes();
+
+	ret = xnprocfs_init_tree();
+	if (ret)
+		goto fail;
+
+	ret = pipeline_init();
+	if (ret)
+		goto cleanup_proc;
+
+	xnintr_mount();
+
+	ret = xnpipe_mount();
+	if (ret)
+		goto cleanup_mach;
+
+	ret = xnselect_mount();
+	if (ret)
+		goto cleanup_pipe;
+
+	ret = sys_init();
+	if (ret)
+		goto cleanup_select;
+
+	ret = pipeline_late_init();
+	if (ret)
+		goto cleanup_sys;
+
+	ret = rtdm_init();
+	if (ret)
+		goto cleanup_sys;
+
+	ret = cobalt_init();
+	if (ret)
+		goto cleanup_rtdm;
+
+	rtdm_fd_init();
+
+	printk(XENO_INFO "Cobalt v%s %s%s%s%s\n",
+	       XENO_VERSION_STRING,
+	       boot_debug_notice,
+	       boot_lat_trace_notice,
+	       boot_evt_trace_notice,
+	       boot_state_notice);
+
+	return 0;
+
+cleanup_rtdm:
+	rtdm_cleanup();
+cleanup_sys:
+	sys_shutdown();
+cleanup_select:
+	xnselect_umount();
+cleanup_pipe:
+	xnpipe_umount();
+cleanup_mach:
+	pipeline_cleanup();
+cleanup_proc:
+	xnprocfs_cleanup_tree();
+fail:
+	set_realtime_core_state(COBALT_STATE_DISABLED);
+	printk(XENO_ERR "init failed, code %d\n", ret);
+
+	return ret;
+}
+device_initcall(xenomai_init);
+
+/**
+ * @ingroup cobalt
+ * @defgroup cobalt_core Cobalt kernel
+ *
+ * The Cobalt core is a co-kernel which supplements the Linux kernel
+ * for delivering real-time services with very low latency. It
+ * implements a set of generic RTOS building blocks, which the
+ * Cobalt/POSIX and Cobalt/RTDM APIs are based on.  Cobalt has higher
+ * priority over the Linux kernel activities.
+ *
+ * @{
+ *
+ * @page cobalt-core-tags Dual kernel service tags
+ *
+ * The Cobalt kernel services may be restricted to particular calling
+ * contexts, or entail specific side-effects. To describe this
+ * information, each service documented by this section bears a set of
+ * tags when applicable.
+ *
+ * The table below matches the tags used throughout the documentation
+ * with the description of their meaning for the caller.
+ *
+ * @par
+ * <b>Context tags</b>
+ * <TABLE>
+ * <TR><TH>Tag</TH> <TH>Context on entry</TH></TR>
+ * <TR><TD>primary-only</TD>	<TD>Must be called from a Cobalt task in primary mode</TD></TR>
+ * <TR><TD>primary-timed</TD>	<TD>Requires a Cobalt task in primary mode if timed</TD></TR>
+ * <TR><TD>coreirq-only</TD>	<TD>Must be called from a Cobalt IRQ handler</TD></TR>
+ * <TR><TD>secondary-only</TD>	<TD>Must be called from a Cobalt task in secondary mode or regular Linux task</TD></TR>
+ * <TR><TD>rtdm-task</TD>	<TD>Must be called from a RTDM driver task</TD></TR>
+ * <TR><TD>mode-unrestricted</TD>	<TD>May be called from a Cobalt task in either primary or secondary mode</TD></TR>
+ * <TR><TD>task-unrestricted</TD>	<TD>May be called from a Cobalt or regular Linux task indifferently</TD></TR>
+ * <TR><TD>unrestricted</TD>	<TD>May be called from any context previously described</TD></TR>
+ * <TR><TD>atomic-entry</TD>	<TD>Caller must currently hold the big Cobalt kernel lock (nklock)</TD></TR>
+ * </TABLE>
+ *
+ * @par
+ * <b>Possible side-effects</b>
+ * <TABLE>
+ * <TR><TH>Tag</TH> <TH>Description</TH></TR>
+ * <TR><TD>might-switch</TD>	<TD>The Cobalt kernel may switch context</TD></TR>
+ * </TABLE>
+ *
+ * @}
+ */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/Makefile
new file mode 100644
index 0000000..f2b877d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(srctree)/kernel
+
+obj-y +=	pipeline.o
+
+pipeline-y :=	init.o intr.o kevents.o tick.o syscall.o sched.o clock.o
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/clock.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/clock.c
new file mode 100644
index 0000000..1c04eed
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/clock.c
@@ -0,0 +1,158 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ */
+
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/kernel/timer.h>
+#include <xenomai/posix/clock.h>
+#include <pipeline/machine.h>
+
+static unsigned long long clockfreq;
+
+#ifdef XNARCH_HAVE_LLMULSHFT
+
+static unsigned int tsc_scale, tsc_shift;
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+
+static struct xnarch_u32frac tsc_frac;
+
+long long xnclock_core_ns_to_ticks(long long ns)
+{
+	return xnarch_nodiv_llimd(ns, tsc_frac.frac, tsc_frac.integ);
+}
+
+#else /* !XNARCH_HAVE_NODIV_LLIMD */
+
+long long xnclock_core_ns_to_ticks(long long ns)
+{
+	return xnarch_llimd(ns, 1 << tsc_shift, tsc_scale);
+}
+
+#endif /* !XNARCH_HAVE_NODIV_LLIMD */
+
+xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks)
+{
+	return xnarch_llmulshft(ticks, tsc_scale, tsc_shift);
+}
+
+xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks)
+{
+	unsigned int shift = tsc_shift - 1;
+	return (xnarch_llmulshft(ticks, tsc_scale, shift) + 1) / 2;
+}
+
+#else  /* !XNARCH_HAVE_LLMULSHFT */
+
+xnsticks_t xnclock_core_ticks_to_ns(xnsticks_t ticks)
+{
+	return xnarch_llimd(ticks, 1000000000, clockfreq);
+}
+
+xnsticks_t xnclock_core_ticks_to_ns_rounded(xnsticks_t ticks)
+{
+	return (xnarch_llimd(ticks, 1000000000, clockfreq/2) + 1) / 2;
+}
+
+xnsticks_t xnclock_core_ns_to_ticks(xnsticks_t ns)
+{
+	return xnarch_llimd(ns, clockfreq, 1000000000);
+}
+
+#endif /* !XNARCH_HAVE_LLMULSHFT */
+
+EXPORT_SYMBOL_GPL(xnclock_core_ticks_to_ns);
+EXPORT_SYMBOL_GPL(xnclock_core_ticks_to_ns_rounded);
+EXPORT_SYMBOL_GPL(xnclock_core_ns_to_ticks);
+
+int pipeline_get_host_time(struct timespec64 *tp)
+{
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT
+	struct xnvdso_hostrt_data *hostrt_data;
+	u64 now, base, mask, cycle_delta;
+	__u32 mult, shift;
+	unsigned long rem;
+	urwstate_t tmp;
+	__u64 nsec;
+
+	hostrt_data = get_hostrt_data();
+	BUG_ON(!hostrt_data);
+
+	if (unlikely(!hostrt_data->live))
+		return -1;
+
+	/*
+	 * Note: Disabling HW interrupts around writes to hostrt_data
+	 * ensures that a reader (on the Xenomai side) cannot
+	 * interrupt a writer (on the Linux kernel side) on the same
+	 * CPU.  The urw block is required when a reader is
+	 * interleaved by a writer on a different CPU. This follows
+	 * the approach from userland, where taking the spinlock is
+	 * not possible.
+	 */
+	unsynced_read_block(&tmp, &hostrt_data->lock) {
+		now = xnclock_read_raw(&nkclock);
+		base = hostrt_data->cycle_last;
+		mask = hostrt_data->mask;
+		mult = hostrt_data->mult;
+		shift = hostrt_data->shift;
+		tp->tv_sec = hostrt_data->wall_sec;
+		nsec = hostrt_data->wall_nsec;
+	}
+
+	/*
+	 * At this point, we have a consistent copy of the fundamental
+	 * data structure - calculate the interval between the current
+	 * and base time stamp cycles, and convert the difference
+	 * to nanoseconds.
+	 */
+	cycle_delta = (now - base) & mask;
+	nsec += (cycle_delta * mult) >> shift;
+
+	/* Convert to the desired sec, usec representation */
+	tp->tv_sec += xnclock_divrem_billion(nsec, &rem);
+	tp->tv_nsec = rem;
+
+	return 0;
+#else
+	return -EINVAL;
+#endif
+}
+
+xnticks_t pipeline_read_wallclock(void)
+{
+	return xnclock_read_monotonic(&nkclock) + xnclock_get_offset(&nkclock);
+}
+EXPORT_SYMBOL_GPL(pipeline_read_wallclock);
+
+int pipeline_set_wallclock(xnticks_t epoch_ns)
+{
+	xnclock_set_wallclock(epoch_ns);
+
+	return 0;
+}
+
+void pipeline_update_clock_freq(unsigned long long freq)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	clockfreq = freq;
+#ifdef XNARCH_HAVE_LLMULSHFT
+	xnarch_init_llmulshft(1000000000, freq, &tsc_scale, &tsc_shift);
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	xnarch_init_u32frac(&tsc_frac, 1 << tsc_shift, tsc_scale);
+#endif
+#endif
+	cobalt_pipeline.clock_freq = freq;
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+void pipeline_init_clock(void)
+{
+	pipeline_update_clock_freq(cobalt_pipeline.clock_freq);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/init.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/init.c
new file mode 100644
index 0000000..c199f00
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/init.c
@@ -0,0 +1,81 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2019 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+#include <pipeline/machine.h>
+#include <linux/ipipe_tickdev.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/clock.h>
+
+int __init pipeline_init(void)
+{
+	struct ipipe_sysinfo sysinfo;
+	int ret, virq;
+
+	ret = ipipe_select_timers(&xnsched_realtime_cpus);
+	if (ret < 0)
+		return ret;
+
+	ipipe_get_sysinfo(&sysinfo);
+
+	cobalt_pipeline.clock_freq = sysinfo.sys_hrclock_freq;
+
+	if (cobalt_machine.init) {
+		ret = cobalt_machine.init();
+		if (ret)
+			return ret;
+	}
+
+	ipipe_register_head(&xnsched_primary_domain, "Xenomai");
+
+	virq = ipipe_alloc_virq();
+	if (virq == 0)
+		goto fail_escalate;
+
+	cobalt_pipeline.escalate_virq = virq;
+
+	ipipe_request_irq(&xnsched_primary_domain,
+			  cobalt_pipeline.escalate_virq,
+			  (ipipe_irq_handler_t)__xnsched_run_handler,
+			  NULL, NULL);
+
+	ret = xnclock_init();
+	if (ret)
+		goto fail_clock;
+
+	return 0;
+
+fail_clock:
+	ipipe_free_irq(&xnsched_primary_domain,
+		       cobalt_pipeline.escalate_virq);
+	ipipe_free_virq(cobalt_pipeline.escalate_virq);
+fail_escalate:
+	ipipe_unregister_head(&xnsched_primary_domain);
+
+	if (cobalt_machine.cleanup)
+		cobalt_machine.cleanup();
+
+	return ret;
+}
+
+int __init pipeline_late_init(void)
+{
+	if (cobalt_machine.late_init)
+		return cobalt_machine.late_init();
+
+	return 0;
+}
+
+__init void pipeline_cleanup(void)
+{
+	ipipe_unregister_head(&xnsched_primary_domain);
+	ipipe_free_irq(&xnsched_primary_domain,
+		       cobalt_pipeline.escalate_virq);
+	ipipe_free_virq(cobalt_pipeline.escalate_virq);
+	ipipe_timers_release();
+	xnclock_cleanup();
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/intr.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/intr.c
new file mode 100644
index 0000000..cb15597
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/intr.c
@@ -0,0 +1,1230 @@
+/*
+ * Copyright (C) 2001,2002,2003 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2005,2006 Dmitry Adamushko <dmitry.adamushko@gmail.com>.
+ * Copyright (C) 2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+*/
+#include <linux/mutex.h>
+#include <linux/ipipe.h>
+#include <linux/ipipe_tickdev.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/assert.h>
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_irq Interrupt management
+ * @{
+ */
+#define XNINTR_MAX_UNHANDLED	1000
+
+static DEFINE_MUTEX(intrlock);
+
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+struct xnintr nktimer;	     /* Only for statistics */
+static int xnintr_count = 1; /* Number of attached xnintr objects + nktimer */
+static int xnintr_list_rev;  /* Modification counter of xnintr list */
+
+/* Both functions update xnintr_list_rev at the very end.
+ * This guarantees that module.c::stat_seq_open() won't get
+ * an up-to-date xnintr_list_rev and old xnintr_count. */
+
+static inline void stat_counter_inc(void)
+{
+	xnintr_count++;
+	smp_mb();
+	xnintr_list_rev++;
+}
+
+static inline void stat_counter_dec(void)
+{
+	xnintr_count--;
+	smp_mb();
+	xnintr_list_rev++;
+}
+
+static inline void sync_stat_references(struct xnintr *intr)
+{
+	struct xnirqstat *statp;
+	struct xnsched *sched;
+	int cpu;
+
+	for_each_realtime_cpu(cpu) {
+		sched = xnsched_struct(cpu);
+		statp = per_cpu_ptr(intr->stats, cpu);
+		/* Synchronize on all dangling references to go away. */
+		while (sched->current_account == &statp->account)
+			cpu_relax();
+	}
+}
+
+static void clear_irqstats(struct xnintr *intr)
+{
+	struct xnirqstat *p;
+	int cpu;
+
+	for_each_realtime_cpu(cpu) {
+		p = per_cpu_ptr(intr->stats, cpu);
+		memset(p, 0, sizeof(*p));
+	}
+}
+
+static inline void alloc_irqstats(struct xnintr *intr)
+{
+	intr->stats = alloc_percpu(struct xnirqstat);
+	clear_irqstats(intr);
+}
+
+static inline void free_irqstats(struct xnintr *intr)
+{
+	free_percpu(intr->stats);
+}
+
+static inline void query_irqstats(struct xnintr *intr, int cpu,
+				  struct xnintr_iterator *iterator)
+{
+	struct xnirqstat *statp;
+	xnticks_t last_switch;
+
+	statp = per_cpu_ptr(intr->stats, cpu);
+	iterator->hits = xnstat_counter_get(&statp->hits);
+	last_switch = xnsched_struct(cpu)->last_account_switch;
+	iterator->exectime_period = statp->account.total;
+	iterator->account_period = last_switch - statp->account.start;
+	statp->sum.total += iterator->exectime_period;
+	iterator->exectime_total = statp->sum.total;
+	statp->account.total = 0;
+	statp->account.start = last_switch;
+}
+
+static void inc_irqstats(struct xnintr *intr, struct xnsched *sched, xnticks_t start)
+{
+	struct xnirqstat *statp;
+
+	statp = raw_cpu_ptr(intr->stats);
+	xnstat_counter_inc(&statp->hits);
+	xnstat_exectime_lazy_switch(sched, &statp->account, start);
+}
+
+static inline void switch_to_irqstats(struct xnintr *intr,
+				      struct xnsched *sched)
+{
+	struct xnirqstat *statp;
+
+	statp = raw_cpu_ptr(intr->stats);
+	xnstat_exectime_switch(sched, &statp->account);
+}
+
+static inline void switch_from_irqstats(struct xnsched *sched,
+					xnstat_exectime_t *prev)
+{
+	xnstat_exectime_switch(sched, prev);
+}
+
+static inline xnstat_exectime_t *switch_core_irqstats(struct xnsched *sched)
+{
+	struct xnirqstat *statp;
+	xnstat_exectime_t *prev;
+
+	statp = raw_cpu_ptr(nktimer.stats);
+	prev = xnstat_exectime_switch(sched, &statp->account);
+	xnstat_counter_inc(&statp->hits);
+
+	return prev;
+}
+
+#else  /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+static inline void stat_counter_inc(void) {}
+
+static inline void stat_counter_dec(void) {}
+
+static inline void sync_stat_references(struct xnintr *intr) {}
+
+static inline void alloc_irqstats(struct xnintr *intr) {}
+
+static inline void free_irqstats(struct xnintr *intr) {}
+
+static inline void clear_irqstats(struct xnintr *intr) {}
+
+static inline void query_irqstats(struct xnintr *intr, int cpu,
+				  struct xnintr_iterator *iterator) {}
+
+static inline void inc_irqstats(struct xnintr *intr, struct xnsched *sched, xnticks_t start) {}
+
+static inline void switch_to_irqstats(struct xnintr *intr,
+				      struct xnsched *sched) {}
+
+static inline void switch_from_irqstats(struct xnsched *sched,
+					xnstat_exectime_t *prev) {}
+
+static inline xnstat_exectime_t *switch_core_irqstats(struct xnsched *sched)
+{
+	return NULL;
+}
+
+#endif /* !CONFIG_XENO_OPT_STATS_IRQS */
+
+static void xnintr_irq_handler(unsigned int irq, void *cookie);
+
+void xnintr_host_tick(struct xnsched *sched) /* Interrupts off. */
+{
+	sched->lflags &= ~XNHTICK;
+#ifdef XNARCH_HOST_TICK_IRQ
+	ipipe_post_irq_root(XNARCH_HOST_TICK_IRQ);
+#endif
+}
+
+/*
+ * Low-level core clock irq handler. This one forwards ticks from the
+ * Xenomai platform timer to nkclock exclusively.
+ */
+void xnintr_core_clock_handler(void)
+{
+	struct xnsched *sched = xnsched_current();
+	int cpu  __maybe_unused = xnsched_cpu(sched);
+	xnstat_exectime_t *prev;
+
+	if (!xnsched_supported_cpu(cpu)) {
+#ifdef XNARCH_HOST_TICK_IRQ
+		ipipe_post_irq_root(XNARCH_HOST_TICK_IRQ);
+#endif
+		return;
+	}
+
+	prev = switch_core_irqstats(sched);
+
+	trace_cobalt_clock_entry(per_cpu(ipipe_percpu.hrtimer_irq, cpu));
+
+	++sched->inesting;
+	sched->lflags |= XNINIRQ;
+
+	xnlock_get(&nklock);
+	xnclock_tick(&nkclock);
+	xnlock_put(&nklock);
+
+	trace_cobalt_clock_exit(per_cpu(ipipe_percpu.hrtimer_irq, cpu));
+	switch_from_irqstats(sched, prev);
+
+	if (--sched->inesting == 0) {
+		sched->lflags &= ~XNINIRQ;
+		xnsched_run();
+		sched = xnsched_current();
+	}
+	/*
+	 * If the core clock interrupt preempted a real-time thread,
+	 * any transition to the root thread has already triggered a
+	 * host tick propagation from xnsched_run(), so at this point,
+	 * we only need to propagate the host tick in case the
+	 * interrupt preempted the root thread.
+	 */
+	if ((sched->lflags & XNHTICK) &&
+	    xnthread_test_state(sched->curr, XNROOT))
+		xnintr_host_tick(sched);
+}
+
+struct irqdisable_work {
+	struct ipipe_work_header work; /* Must be first. */
+	int irq;
+};
+
+static void lostage_irqdisable_line(struct ipipe_work_header *work)
+{
+	struct irqdisable_work *rq;
+
+	rq = container_of(work, struct irqdisable_work, work);
+	ipipe_disable_irq(rq->irq);
+}
+
+static void disable_irq_line(int irq)
+{
+	struct irqdisable_work diswork = {
+		.work = {
+			.size = sizeof(diswork),
+			.handler = lostage_irqdisable_line,
+		},
+		.irq = irq,
+	};
+
+	ipipe_post_work_root(&diswork, work);
+}
+
+/* Optional support for shared interrupts. */
+
+#ifdef CONFIG_XENO_OPT_SHIRQ
+
+struct xnintr_vector {
+	DECLARE_XNLOCK(lock);
+	struct xnintr *handlers;
+	int unhandled;
+} ____cacheline_aligned_in_smp;
+
+static struct xnintr_vector vectors[IPIPE_NR_IRQS];
+
+static inline struct xnintr *xnintr_vec_first(unsigned int irq)
+{
+	return vectors[irq].handlers;
+}
+
+static inline struct xnintr *xnintr_vec_next(struct xnintr *prev)
+{
+	return prev->next;
+}
+
+static void disable_shared_irq_line(struct xnintr_vector *vec)
+{
+	int irq = vec - vectors;
+	struct xnintr *intr;
+
+	xnlock_get(&vec->lock);
+	intr = vec->handlers;
+	while (intr) {
+		set_bit(XN_IRQSTAT_DISABLED, &intr->status);
+		intr = intr->next;
+	}
+	xnlock_put(&vec->lock);
+	disable_irq_line(irq);
+}
+
+/*
+ * Low-level interrupt handler dispatching the user-defined ISRs for
+ * shared interrupts -- Called with interrupts off.
+ */
+static void xnintr_vec_handler(unsigned int irq, void *cookie)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xnintr_vector *vec = vectors + irq;
+	xnstat_exectime_t *prev;
+	struct xnintr *intr;
+	xnticks_t start;
+	int s = 0, ret;
+
+	prev  = xnstat_exectime_get_current(sched);
+	start = xnstat_exectime_now();
+	trace_cobalt_irq_entry(irq);
+
+	++sched->inesting;
+	sched->lflags |= XNINIRQ;
+
+	xnlock_get(&vec->lock);
+	intr = vec->handlers;
+	if (unlikely(test_bit(XN_IRQSTAT_DISABLED, &intr->status))) {
+		/* irqdisable_work is on its way, ignore. */
+		xnlock_put(&vec->lock);
+		goto out;
+	}
+
+	while (intr) {
+		/*
+		 * NOTE: We assume that no CPU migration can occur
+		 * while running the interrupt service routine.
+		 */
+		ret = intr->isr(intr);
+		XENO_WARN_ON_ONCE(USER, (ret & XN_IRQ_STATMASK) == 0);
+		s |= ret;
+		if (ret & XN_IRQ_HANDLED) {
+			inc_irqstats(intr, sched, start);
+			start = xnstat_exectime_now();
+		}
+		intr = intr->next;
+	}
+
+	xnlock_put(&vec->lock);
+
+	if (unlikely(!(s & XN_IRQ_HANDLED))) {
+		if (++vec->unhandled == XNINTR_MAX_UNHANDLED) {
+			printk(XENO_ERR "%s: IRQ%d not handled. Disabling IRQ line\n",
+			       __FUNCTION__, irq);
+			s |= XN_IRQ_DISABLE;
+		}
+	} else
+		vec->unhandled = 0;
+
+	if (s & XN_IRQ_PROPAGATE)
+		ipipe_post_irq_root(irq);
+	else if (s & XN_IRQ_DISABLE)
+		disable_shared_irq_line(vec);
+	else
+		ipipe_end_irq(irq);
+out:
+	switch_from_irqstats(sched, prev);
+
+	trace_cobalt_irq_exit(irq);
+
+	if (--sched->inesting == 0) {
+		sched->lflags &= ~XNINIRQ;
+		xnsched_run();
+	}
+}
+
+/*
+ * Low-level interrupt handler dispatching the user-defined ISRs for
+ * shared edge-triggered interrupts -- Called with interrupts off.
+ */
+static void xnintr_edge_vec_handler(unsigned int irq, void *cookie)
+{
+	const int MAX_EDGEIRQ_COUNTER = 128;
+	struct xnsched *sched = xnsched_current();
+	struct xnintr_vector *vec = vectors + irq;
+	struct xnintr *intr, *end = NULL;
+	int s = 0, counter = 0, ret;
+	xnstat_exectime_t *prev;
+	xnticks_t start;
+
+	prev  = xnstat_exectime_get_current(sched);
+	start = xnstat_exectime_now();
+	trace_cobalt_irq_entry(irq);
+
+	++sched->inesting;
+	sched->lflags |= XNINIRQ;
+
+	xnlock_get(&vec->lock);
+	intr = vec->handlers;
+	if (unlikely(test_bit(XN_IRQSTAT_DISABLED, &intr->status))) {
+		/* irqdisable_work is on its way, ignore. */
+		xnlock_put(&vec->lock);
+		goto out;
+	}
+
+	while (intr != end) {
+		switch_to_irqstats(intr, sched);
+		/*
+		 * NOTE: We assume that no CPU migration will occur
+		 * while running the interrupt service routine.
+		 */
+		ret = intr->isr(intr);
+		XENO_WARN_ON_ONCE(USER, (ret & XN_IRQ_STATMASK) == 0);
+		s |= ret;
+
+		if (ret & XN_IRQ_HANDLED) {
+			end = NULL;
+			inc_irqstats(intr, sched, start);
+			start = xnstat_exectime_now();
+		} else if (end == NULL)
+			end = intr;
+
+		if (counter++ > MAX_EDGEIRQ_COUNTER)
+			break;
+
+		intr = intr->next;
+		if (intr  == NULL)
+			intr = vec->handlers;
+	}
+
+	xnlock_put(&vec->lock);
+
+	if (counter > MAX_EDGEIRQ_COUNTER)
+		printk(XENO_ERR "%s: failed to get the IRQ%d line free\n",
+		       __FUNCTION__, irq);
+
+	if (unlikely(!(s & XN_IRQ_HANDLED))) {
+		if (++vec->unhandled == XNINTR_MAX_UNHANDLED) {
+			printk(XENO_ERR "%s: IRQ%d not handled. Disabling IRQ line\n",
+			       __FUNCTION__, irq);
+			s |= XN_IRQ_DISABLE;
+		}
+	} else
+		vec->unhandled = 0;
+
+	if (s & XN_IRQ_PROPAGATE)
+		ipipe_post_irq_root(irq);
+	else if (s & XN_IRQ_DISABLE)
+		disable_shared_irq_line(vec);
+	else
+		ipipe_end_irq(irq);
+out:
+	switch_from_irqstats(sched, prev);
+
+	trace_cobalt_irq_exit(irq);
+
+	if (--sched->inesting == 0) {
+		sched->lflags &= ~XNINIRQ;
+		xnsched_run();
+	}
+}
+
+static inline bool cobalt_owns_irq(int irq)
+{
+	ipipe_irq_handler_t h;
+
+	h = __ipipe_irq_handler(&xnsched_primary_domain, irq);
+
+	return h == xnintr_vec_handler ||
+		h == xnintr_edge_vec_handler ||
+		h == xnintr_irq_handler;
+}
+
+static inline int xnintr_irq_attach(struct xnintr *intr)
+{
+	struct xnintr_vector *vec = vectors + intr->irq;
+	struct xnintr *prev, **p = &vec->handlers;
+	int ret;
+
+	prev = *p;
+	if (prev) {
+		/* Check on whether the shared mode is allowed. */
+		if ((prev->flags & intr->flags & XN_IRQTYPE_SHARED) == 0 ||
+		    (prev->iack != intr->iack)
+		    || ((prev->flags & XN_IRQTYPE_EDGE) !=
+			(intr->flags & XN_IRQTYPE_EDGE)))
+			return -EBUSY;
+
+		/*
+		 * Get a position at the end of the list to insert the
+		 * new element.
+		 */
+		while (prev) {
+			p = &prev->next;
+			prev = *p;
+		}
+	} else {
+		/* Initialize the corresponding interrupt channel */
+		void (*handler) (unsigned, void *) = xnintr_irq_handler;
+
+		if (intr->flags & XN_IRQTYPE_SHARED) {
+			if (intr->flags & XN_IRQTYPE_EDGE)
+				handler = xnintr_edge_vec_handler;
+			else
+				handler = xnintr_vec_handler;
+
+		}
+		vec->unhandled = 0;
+
+		ret = ipipe_request_irq(&xnsched_primary_domain,
+					intr->irq, handler, intr,
+					(ipipe_irq_ackfn_t)intr->iack);
+		if (ret)
+			return ret;
+	}
+
+	intr->next = NULL;
+	/*
+	 * Add the given interrupt object. No need to synchronise with
+	 * the IRQ handler, we are only extending the chain.
+	 */
+	*p = intr;
+
+	return 0;
+}
+
+static inline void xnintr_irq_detach(struct xnintr *intr)
+{
+	struct xnintr_vector *vec = vectors + intr->irq;
+	struct xnintr *e, **p = &vec->handlers;
+
+	while ((e = *p) != NULL) {
+		if (e == intr) {
+			/* Remove the given interrupt object from the list. */
+			xnlock_get(&vec->lock);
+			*p = e->next;
+			xnlock_put(&vec->lock);
+
+			sync_stat_references(intr);
+
+			/* Release the IRQ line if this was the last user */
+			if (vec->handlers == NULL)
+				ipipe_free_irq(&xnsched_primary_domain, intr->irq);
+
+			return;
+		}
+		p = &e->next;
+	}
+
+	printk(XENO_ERR "attempted to detach an unregistered interrupt descriptor\n");
+}
+
+#else /* !CONFIG_XENO_OPT_SHIRQ */
+
+struct xnintr_vector {
+#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING)
+	DECLARE_XNLOCK(lock);
+#endif /* CONFIG_SMP || XENO_DEBUG(LOCKING) */
+} ____cacheline_aligned_in_smp;
+
+static struct xnintr_vector vectors[IPIPE_NR_IRQS];
+
+static inline bool cobalt_owns_irq(int irq)
+{
+	ipipe_irq_handler_t h;
+
+	h = __ipipe_irq_handler(&xnsched_primary_domain, irq);
+
+	return h == xnintr_irq_handler;
+}
+
+static inline struct xnintr *xnintr_vec_first(unsigned int irq)
+{
+	return cobalt_owns_irq(irq) ?
+		__ipipe_irq_cookie(&xnsched_primary_domain, irq) : NULL;
+}
+
+static inline struct xnintr *xnintr_vec_next(struct xnintr *prev)
+{
+	return NULL;
+}
+
+static inline int xnintr_irq_attach(struct xnintr *intr)
+{
+	return ipipe_request_irq(&xnsched_primary_domain,
+				 intr->irq, xnintr_irq_handler, intr,
+				 (ipipe_irq_ackfn_t)intr->iack);
+}
+
+static inline void xnintr_irq_detach(struct xnintr *intr)
+{
+	int irq = intr->irq;
+
+	xnlock_get(&vectors[irq].lock);
+	ipipe_free_irq(&xnsched_primary_domain, irq);
+	xnlock_put(&vectors[irq].lock);
+
+	sync_stat_references(intr);
+}
+
+#endif /* !CONFIG_XENO_OPT_SHIRQ */
+
+/*
+ * Low-level interrupt handler dispatching non-shared ISRs -- Called
+ * with interrupts off.
+ */
+static void xnintr_irq_handler(unsigned int irq, void *cookie)
+{
+	struct xnintr_vector __maybe_unused *vec = vectors + irq;
+	struct xnsched *sched = xnsched_current();
+	xnstat_exectime_t *prev;
+	struct xnintr *intr;
+	xnticks_t start;
+	int s = 0;
+
+	prev  = xnstat_exectime_get_current(sched);
+	start = xnstat_exectime_now();
+	trace_cobalt_irq_entry(irq);
+
+	++sched->inesting;
+	sched->lflags |= XNINIRQ;
+
+	xnlock_get(&vec->lock);
+
+#ifdef CONFIG_SMP
+	/*
+	 * In SMP case, we have to reload the cookie under the per-IRQ
+	 * lock to avoid racing with xnintr_detach.  However, we
+	 * assume that no CPU migration will occur while running the
+	 * interrupt service routine, so the scheduler pointer will
+	 * remain valid throughout this function.
+	 */
+	intr = __ipipe_irq_cookie(&xnsched_primary_domain, irq);
+	if (unlikely(intr == NULL))
+		goto done;
+#else
+	intr = cookie;
+#endif
+	if (unlikely(test_bit(XN_IRQSTAT_DISABLED, &intr->status))) {
+		/* irqdisable_work is on its way, ignore. */
+		xnlock_put(&vec->lock);
+		goto out;
+	}
+
+	s = intr->isr(intr);
+	XENO_WARN_ON_ONCE(USER, (s & XN_IRQ_STATMASK) == 0);
+	if (unlikely(!(s & XN_IRQ_HANDLED))) {
+		if (++intr->unhandled == XNINTR_MAX_UNHANDLED) {
+			printk(XENO_ERR "%s: IRQ%d not handled. Disabling IRQ line\n",
+			       __FUNCTION__, irq);
+			s |= XN_IRQ_DISABLE;
+		}
+	} else {
+		inc_irqstats(intr, sched, start);
+		intr->unhandled = 0;
+	}
+
+	if (s & XN_IRQ_DISABLE)
+		set_bit(XN_IRQSTAT_DISABLED, &intr->status);
+#ifdef CONFIG_SMP
+done:
+#endif
+	xnlock_put(&vec->lock);
+
+	if (s & XN_IRQ_DISABLE)
+		disable_irq_line(irq);
+	else if (s & XN_IRQ_PROPAGATE)
+		ipipe_post_irq_root(irq);
+	else
+		ipipe_end_irq(irq);
+out:
+	switch_from_irqstats(sched, prev);
+
+	trace_cobalt_irq_exit(irq);
+
+	if (--sched->inesting == 0) {
+		sched->lflags &= ~XNINIRQ;
+		xnsched_run();
+	}
+}
+
+int __init xnintr_mount(void)
+{
+	int i;
+	for (i = 0; i < IPIPE_NR_IRQS; ++i)
+		xnlock_init(&vectors[i].lock);
+	return 0;
+}
+
+/**
+ * @fn int xnintr_init(struct xnintr *intr,const char *name,unsigned int irq,xnisr_t isr,xniack_t iack,int flags)
+ * @brief Initialize an interrupt descriptor.
+ *
+ * When an interrupt occurs on the given @a irq line, the interrupt
+ * service routine @a isr is fired in order to deal with the hardware
+ * event. The interrupt handler may call any non-blocking service from
+ * the Cobalt core.
+ *
+ * Upon receipt of an IRQ, the interrupt handler @a isr is immediately
+ * called on behalf of the interrupted stack context, the rescheduling
+ * procedure is locked, and the interrupt line is masked in the system
+ * interrupt controller chip.  Upon return, the status of the
+ * interrupt handler is checked for the following bits:
+ *
+ * - XN_IRQ_HANDLED indicates that the interrupt request was
+ * successfully handled.
+ *
+ * - XN_IRQ_NONE indicates the opposite to XN_IRQ_HANDLED, meaning
+ * that no interrupt source could be identified for the ongoing
+ * request by the handler.
+ *
+ * In addition, one of the following bits may be present in the
+ * status:
+ *
+ * - XN_IRQ_DISABLE tells the Cobalt core to disable the interrupt
+ * line before returning from the interrupt context.
+ *
+ * - XN_IRQ_PROPAGATE propagates the IRQ event down the interrupt
+ * pipeline to Linux. Using this flag is strongly discouraged, unless
+ * you fully understand the implications of such propagation.
+ *
+ * @warning The handler should not use these bits if it shares the
+ * interrupt line with other handlers in the real-time domain. When
+ * any of these bits is detected, the interrupt line is left masked.
+ *
+ * A count of interrupt receipts is tracked into the interrupt
+ * descriptor, and reset to zero each time such descriptor is
+ * attached. Since this count could wrap around, it should be used as
+ * an indication of interrupt activity only.
+ *
+ * @param intr The address of a descriptor the Cobalt core will use to
+ * store the interrupt-specific data.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * interrupt or NULL.
+ *
+ * @param irq The IRQ line number associated with the interrupt
+ * descriptor. This value is architecture-dependent. An interrupt
+ * descriptor must be attached to the system by a call to
+ * xnintr_attach() before @a irq events can be received.
+ *
+ * @param isr The address of an interrupt handler, which is passed the
+ * address of the interrupt descriptor receiving the IRQ.
+ *
+ * @param iack The address of an optional interrupt acknowledge
+ * routine, aimed at replacing the default one. Only very specific
+ * situations actually require to override the default setting for
+ * this parameter, like having to acknowledge non-standard PIC
+ * hardware. @a iack should return a non-zero value to indicate that
+ * the interrupt has been properly acknowledged. If @a iack is NULL,
+ * the default routine will be used instead.
+ *
+ * @param flags A set of creation flags affecting the operation. The
+ * valid flags are:
+ *
+ * - XN_IRQTYPE_SHARED enables IRQ-sharing with other interrupt
+ * objects.
+ *
+ * - XN_IRQTYPE_EDGE is an additional flag need to be set together
+ * with XN_IRQTYPE_SHARED to enable IRQ-sharing of edge-triggered
+ * interrupts.
+ *
+ * @return 0 is returned on success. Otherwise, -EINVAL is returned if
+ * @a irq is not a valid interrupt number.
+ *
+ * @coretags{secondary-only}
+ */
+int xnintr_init(struct xnintr *intr, const char *name,
+		unsigned int irq, xnisr_t isr, xniack_t iack,
+		int flags)
+{
+	secondary_mode_only();
+
+	if (irq >= IPIPE_NR_IRQS)
+		return -EINVAL;
+
+	intr->irq = irq;
+	intr->isr = isr;
+	intr->iack = iack;
+	intr->cookie = NULL;
+	intr->name = name ? : "<unknown>";
+	intr->flags = flags;
+	intr->status = _XN_IRQSTAT_DISABLED;
+	intr->unhandled = 0;
+	raw_spin_lock_init(&intr->lock);
+#ifdef CONFIG_XENO_OPT_SHIRQ
+	intr->next = NULL;
+#endif
+	alloc_irqstats(intr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnintr_init);
+
+/**
+ * @fn void xnintr_destroy(struct xnintr *intr)
+ * @brief Destroy an interrupt descriptor.
+ *
+ * Destroys an interrupt descriptor previously initialized by
+ * xnintr_init(). The descriptor is automatically detached by a call
+ * to xnintr_detach(). No more IRQs will be received through this
+ * descriptor after this service has returned.
+ *
+ * @param intr The address of the interrupt descriptor to destroy.
+ *
+ * @coretags{secondary-only}
+ */
+void xnintr_destroy(struct xnintr *intr)
+{
+	secondary_mode_only();
+	xnintr_detach(intr);
+	free_irqstats(intr);
+}
+EXPORT_SYMBOL_GPL(xnintr_destroy);
+
+/**
+ * @fn int xnintr_attach(struct xnintr *intr, void *cookie)
+ * @brief Attach an interrupt descriptor.
+ *
+ * Attach an interrupt descriptor previously initialized by
+ * xnintr_init(). This operation registers the descriptor at the
+ * interrupt pipeline, but does not enable the interrupt line yet. A
+ * call to xnintr_enable() is required to start receiving IRQs from
+ * the interrupt line associated to the descriptor.
+ *
+ * @param intr The address of the interrupt descriptor to attach.
+ *
+ * @param cookie A user-defined opaque value which is stored into the
+ * descriptor for further retrieval by the interrupt handler.
+ *
+ * @param cpumask Initial CPU affinity of the interrupt. If NULL, affinity is
+ * set to all real-time CPUs.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -EINVAL is returned if an error occurred while attaching the
+ * descriptor.
+ *
+ * - -EBUSY is returned if the descriptor was already attached.
+ *
+ * @note The caller <b>must not</b> hold nklock when invoking this service,
+ * this would cause deadlocks.
+ *
+ * @coretags{secondary-only}
+ *
+ * @note Attaching an interrupt descriptor resets the tracked number
+ * of IRQ receipts to zero.
+ */
+int xnintr_attach(struct xnintr *intr, void *cookie, const cpumask_t *cpumask)
+{
+#ifdef CONFIG_SMP
+	cpumask_t tmp_mask, *effective_mask;
+#endif
+	int ret;
+
+	secondary_mode_only();
+	trace_cobalt_irq_attach(intr->irq);
+
+	intr->cookie = cookie;
+	clear_irqstats(intr);
+
+#ifdef CONFIG_SMP
+	if (!cpumask) {
+		effective_mask = &xnsched_realtime_cpus;
+	} else {
+		effective_mask = &tmp_mask;
+		cpumask_and(effective_mask, &xnsched_realtime_cpus, cpumask);
+		if (cpumask_empty(effective_mask))
+			return -EINVAL;
+	}
+	ret = ipipe_set_irq_affinity(intr->irq, *effective_mask);
+	if (ret < 0)
+		return ret;
+#endif /* CONFIG_SMP */
+
+	raw_spin_lock(&intr->lock);
+
+	if (test_and_set_bit(XN_IRQSTAT_ATTACHED, &intr->status)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	ret = xnintr_irq_attach(intr);
+	if (ret) {
+		clear_bit(XN_IRQSTAT_ATTACHED, &intr->status);
+		goto out;
+	}
+
+	stat_counter_inc();
+out:
+	raw_spin_unlock(&intr->lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnintr_attach);
+
+/**
+ * @fn int xnintr_detach(struct xnintr *intr)
+ * @brief Detach an interrupt descriptor.
+ *
+ * This call unregisters an interrupt descriptor previously attached
+ * by xnintr_attach() from the interrupt pipeline. Once detached, the
+ * associated interrupt line is disabled, but the descriptor remains
+ * valid. The descriptor can be attached anew by a call to
+ * xnintr_attach().
+ *
+ * @param intr The address of the interrupt descriptor to detach.
+ *
+ * @note The caller <b>must not</b> hold nklock when invoking this
+ * service, this would cause deadlocks.
+ *
+ * @coretags{secondary-only}
+ */
+void xnintr_detach(struct xnintr *intr)
+{
+	secondary_mode_only();
+	trace_cobalt_irq_detach(intr->irq);
+
+	raw_spin_lock(&intr->lock);
+
+	if (test_and_clear_bit(XN_IRQSTAT_ATTACHED, &intr->status)) {
+		xnintr_irq_detach(intr);
+		stat_counter_dec();
+	}
+
+	raw_spin_unlock(&intr->lock);
+}
+EXPORT_SYMBOL_GPL(xnintr_detach);
+
+/**
+ * @fn void xnintr_enable(struct xnintr *intr)
+ * @brief Enable an interrupt line.
+ *
+ * Enables the interrupt line associated with an interrupt descriptor.
+ *
+ * @param intr The address of the interrupt descriptor.
+ *
+ * @coretags{secondary-only}
+ */
+void xnintr_enable(struct xnintr *intr)
+{
+	unsigned long flags;
+
+	secondary_mode_only();
+	trace_cobalt_irq_enable(intr->irq);
+
+	raw_spin_lock_irqsave(&intr->lock, flags);
+
+	/*
+	 * If disabled on entry, there is no way we could race with
+	 * disable_irq_line().
+	 */
+	if (test_and_clear_bit(XN_IRQSTAT_DISABLED, &intr->status))
+		ipipe_enable_irq(intr->irq);
+
+	raw_spin_unlock_irqrestore(&intr->lock, flags);
+}
+EXPORT_SYMBOL_GPL(xnintr_enable);
+
+/**
+ * @fn void xnintr_disable(struct xnintr *intr)
+ * @brief Disable an interrupt line.
+ *
+ * Disables the interrupt line associated with an interrupt
+ * descriptor.
+ *
+ * @param intr The address of the interrupt descriptor.
+ *
+ * @coretags{secondary-only}
+ */
+void xnintr_disable(struct xnintr *intr)
+{
+	unsigned long flags;
+
+	secondary_mode_only();
+	trace_cobalt_irq_disable(intr->irq);
+
+	/* We only need a virtual masking. */
+	raw_spin_lock_irqsave(&intr->lock, flags);
+
+	/*
+	 * Racing with disable_irq_line() is innocuous, the pipeline
+	 * would serialize calls to ipipe_disable_irq() across CPUs,
+	 * and the descriptor status would still properly match the
+	 * line status in the end.
+	 */
+	if (!test_and_set_bit(XN_IRQSTAT_DISABLED, &intr->status))
+		ipipe_disable_irq(intr->irq);
+
+	raw_spin_unlock_irqrestore(&intr->lock, flags);
+}
+EXPORT_SYMBOL_GPL(xnintr_disable);
+
+/**
+ * @fn void xnintr_affinity(struct xnintr *intr, cpumask_t cpumask)
+ * @brief Set processor affinity of interrupt.
+ *
+ * Restricts the IRQ line associated with the interrupt descriptor @a
+ * intr to be received only on processors which bits are set in @a
+ * cpumask.
+ *
+ * @param intr The address of the interrupt descriptor.
+ *
+ * @param cpumask The new processor affinity.
+ *
+ * @note Depending on architectures, setting more than one bit in @a
+ * cpumask could be meaningless.
+ *
+ * @coretags{secondary-only}
+ */
+int xnintr_affinity(struct xnintr *intr, const cpumask_t *cpumask)
+{
+#ifdef CONFIG_SMP
+	cpumask_t effective_mask;
+
+	secondary_mode_only();
+
+	cpumask_and(&effective_mask, &xnsched_realtime_cpus, cpumask);
+	if (cpumask_empty(&effective_mask))
+		return -EINVAL;
+
+	return ipipe_set_irq_affinity(intr->irq, effective_mask);
+#else
+	secondary_mode_only();
+	return 0;
+#endif
+}
+EXPORT_SYMBOL_GPL(xnintr_affinity);
+
+static inline int xnintr_is_timer_irq(int irq)
+{
+	int cpu;
+
+	for_each_realtime_cpu(cpu)
+		if (irq == per_cpu(ipipe_percpu.hrtimer_irq, cpu))
+			return 1;
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+
+int xnintr_get_query_lock(void)
+{
+	return mutex_lock_interruptible(&intrlock) ? -ERESTARTSYS : 0;
+}
+
+void xnintr_put_query_lock(void)
+{
+	mutex_unlock(&intrlock);
+}
+
+int xnintr_query_init(struct xnintr_iterator *iterator)
+{
+	iterator->prev = NULL;
+
+	/* The order is important here: first xnintr_list_rev then
+	 * xnintr_count.  On the other hand, xnintr_attach/detach()
+	 * update xnintr_count first and then xnintr_list_rev.  This
+	 * should guarantee that we can't get an up-to-date
+	 * xnintr_list_rev and old xnintr_count here. The other way
+	 * around is not a problem as xnintr_query() will notice this
+	 * fact later.  Should xnintr_list_rev change later,
+	 * xnintr_query() will trigger an appropriate error below.
+	 */
+	iterator->list_rev = xnintr_list_rev;
+	smp_mb();
+
+	return xnintr_count;
+}
+
+int xnintr_query_next(int irq, struct xnintr_iterator *iterator,
+		      char *name_buf)
+{
+	int cpu, nr_cpus = num_present_cpus();
+	struct xnintr *intr;
+
+	if (iterator->list_rev != xnintr_list_rev)
+		return -EAGAIN;
+
+	intr = iterator->prev;
+	if (intr == NULL) {
+		if (xnintr_is_timer_irq(irq))
+			intr = &nktimer;
+		else
+			intr = xnintr_vec_first(irq);
+		if (intr == NULL)
+			return -ENODEV;
+		iterator->prev = intr;
+		iterator->cpu = -1;
+	}
+
+	for (;;) {
+		for (cpu = iterator->cpu + 1; cpu < nr_cpus; ++cpu) {
+			if (cpu_online(cpu)) {
+				ksformat(name_buf, XNOBJECT_NAME_LEN, "IRQ%d: %s",
+					irq, intr->name);
+				query_irqstats(intr, cpu, iterator);
+				iterator->cpu = cpu;
+				return 0;
+			}
+		}
+
+		iterator->prev = xnintr_vec_next(intr);
+		if (iterator->prev == NULL)
+			return -ENODEV;
+
+		iterator->cpu = -1;
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_STATS_IRQS */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+#include <cobalt/kernel/vfile.h>
+
+static inline int format_irq_proc(unsigned int irq,
+				  struct xnvfile_regular_iterator *it)
+{
+	struct xnintr *intr;
+	struct irq_desc *d;
+	int cpu;
+
+	for_each_realtime_cpu(cpu)
+		if (xnintr_is_timer_irq(irq)) {
+			xnvfile_printf(it, "         [timer/%d]", cpu);
+			return 0;
+		}
+
+#ifdef CONFIG_SMP
+	/*
+	 * IPI numbers on ARM are not compile time constants, so do
+	 * not use switch/case here.
+	 */
+	if (irq == IPIPE_HRTIMER_IPI) {
+		xnvfile_puts(it, "         [timer-ipi]");
+		return 0;
+	}
+	if (irq == IPIPE_RESCHEDULE_IPI) {
+		xnvfile_puts(it, "         [reschedule]");
+		return 0;
+	}
+	if (irq == IPIPE_CRITICAL_IPI) {
+		xnvfile_puts(it, "         [sync]");
+		return 0;
+	}
+#endif /* CONFIG_SMP */
+	if (ipipe_virtual_irq_p(irq)) {
+		xnvfile_puts(it, "         [virtual]");
+		return 0;
+	}
+
+	mutex_lock(&intrlock);
+
+	if (!cobalt_owns_irq(irq)) {
+		xnvfile_puts(it, "         ");
+		d = irq_to_desc(irq);
+		xnvfile_puts(it, d && d->name ? d->name : "-");
+	} else {
+		intr = xnintr_vec_first(irq);
+		if (intr) {
+			xnvfile_puts(it, "        ");
+
+			do {
+				xnvfile_putc(it, ' ');
+				xnvfile_puts(it, intr->name);
+				intr = xnintr_vec_next(intr);
+			} while (intr);
+		}
+	}
+
+	mutex_unlock(&intrlock);
+
+	return 0;
+}
+
+static int irq_vfile_show(struct xnvfile_regular_iterator *it,
+			  void *data)
+{
+	int cpu, irq;
+
+	/* FIXME: We assume the entire output fits in a single page. */
+
+	xnvfile_puts(it, "  IRQ ");
+
+	for_each_realtime_cpu(cpu)
+		xnvfile_printf(it, "        CPU%d", cpu);
+
+	for (irq = 0; irq < IPIPE_NR_IRQS; irq++) {
+		if (__ipipe_irq_handler(&xnsched_primary_domain, irq) == NULL)
+			continue;
+
+		xnvfile_printf(it, "\n%5d:", irq);
+
+		for_each_realtime_cpu(cpu) {
+			xnvfile_printf(it, "%12lu",
+				       __ipipe_cpudata_irq_hits(&xnsched_primary_domain, cpu,
+								irq));
+		}
+
+		format_irq_proc(irq, it);
+	}
+
+	xnvfile_putc(it, '\n');
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops irq_vfile_ops = {
+	.show = irq_vfile_show,
+};
+
+static struct xnvfile_regular irq_vfile = {
+	.ops = &irq_vfile_ops,
+};
+
+void xnintr_init_proc(void)
+{
+	xnvfile_init_regular("irq", &irq_vfile, &cobalt_vfroot);
+}
+
+void xnintr_cleanup_proc(void)
+{
+	xnvfile_destroy_regular(&irq_vfile);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/kevents.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/kevents.c
new file mode 100644
index 0000000..43cc192
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/kevents.c
@@ -0,0 +1,541 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org>
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org>
+ */
+
+#include <linux/ipipe.h>
+#include <linux/ipipe_tickdev.h>
+#include <linux/ptrace.h>
+#include <linux/kallsyms.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/vdso.h>
+#include <rtdm/driver.h>
+#include <trace/events/cobalt-core.h>
+#include "../posix/process.h"
+#include "../posix/thread.h"
+#include "../posix/memory.h"
+
+static inline int handle_exception(struct ipipe_trap_data *d)
+{
+	struct xnthread *thread;
+	struct xnsched *sched;
+
+	sched = xnsched_current();
+	thread = sched->curr;
+
+	trace_cobalt_thread_fault(xnarch_fault_pc(d),
+				  xnarch_fault_trap(d));
+
+	if (xnthread_test_state(thread, XNROOT))
+		return 0;
+
+	if (xnarch_fault_fpu_p(d)) {
+#ifdef CONFIG_XENO_ARCH_FPU
+		spl_t s;
+
+		/* FPU exception received in primary mode. */
+		splhigh(s);
+		if (xnarch_handle_fpu_fault(sched->fpuholder, thread, d)) {
+			sched->fpuholder = thread;
+			splexit(s);
+			return 1;
+		}
+		splexit(s);
+#endif /* CONFIG_XENO_ARCH_FPU */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4, 16, 0)
+		printk("invalid use of FPU in Xenomai context at %pS\n",
+		       (void *)xnarch_fault_pc(d));
+#else
+		print_symbol("invalid use of FPU in Xenomai context at %s\n",
+			     xnarch_fault_pc(d));
+#endif
+	}
+
+	if (xnarch_fault_bp_p(d) && user_mode(d->regs)) {
+		spl_t s;
+
+		XENO_WARN_ON(CORE, xnthread_test_state(thread, XNRELAX));
+		xnlock_get_irqsave(&nklock, s);
+		xnthread_set_info(thread, XNCONTHI);
+		ipipe_enable_user_intret_notifier();
+		cobalt_stop_debugged_process(thread);
+		xnlock_put_irqrestore(&nklock, s);
+		xnsched_run();
+	}
+
+	/*
+	 * If we experienced a trap on behalf of a shadow thread
+	 * running in primary mode, move it to the Linux domain,
+	 * leaving the kernel process the exception.
+	 */
+#if defined(CONFIG_XENO_OPT_DEBUG_COBALT) || defined(CONFIG_XENO_OPT_DEBUG_USER)
+	if (!user_mode(d->regs)) {
+		xntrace_panic_freeze();
+		printk(XENO_WARNING
+		       "switching %s to secondary mode after exception #%u in "
+		       "kernel-space at 0x%lx (pid %d)\n", thread->name,
+		       xnarch_fault_trap(d),
+		       xnarch_fault_pc(d),
+		       xnthread_host_pid(thread));
+		xntrace_panic_dump();
+	} else if (xnarch_fault_notify(d)) /* Don't report debug traps */
+		printk(XENO_WARNING
+		       "switching %s to secondary mode after exception #%u from "
+		       "user-space at 0x%lx (pid %d)\n", thread->name,
+		       xnarch_fault_trap(d),
+		       xnarch_fault_pc(d),
+		       xnthread_host_pid(thread));
+#endif
+
+	if (xnarch_fault_pf_p(d))
+		/*
+		 * The page fault counter is not SMP-safe, but it's a
+		 * simple indicator that something went wrong wrt
+		 * memory locking anyway.
+		 */
+		xnstat_counter_inc(&thread->stat.pf);
+
+	xnthread_relax(xnarch_fault_notify(d), SIGDEBUG_MIGRATE_FAULT);
+
+	return 0;
+}
+
+static int handle_mayday_event(struct pt_regs *regs)
+{
+	XENO_BUG_ON(COBALT, !xnthread_test_state(xnthread_current(), XNUSER));
+
+	xnthread_relax(0, 0);
+
+	return KEVENT_PROPAGATE;
+}
+
+int ipipe_trap_hook(struct ipipe_trap_data *data)
+{
+	if (data->exception == IPIPE_TRAP_MAYDAY)
+		return handle_mayday_event(data->regs);
+
+	/*
+	 * No migration is possible on behalf of the head domain, so
+	 * the following access is safe.
+	 */
+	raw_cpu_ptr(&cobalt_machine_cpudata)->faults[data->exception]++;
+
+	if (handle_exception(data))
+		return KEVENT_STOP;
+
+	/*
+	 * CAUTION: access faults must be propagated downstream
+	 * whichever domain caused them, so that we don't spuriously
+	 * raise a fatal error when some Linux fixup code is available
+	 * to recover from the fault.
+	 */
+	return KEVENT_PROPAGATE;
+}
+
+/*
+ * Legacy idle hook, unconditionally allow entering the idle state.
+ */
+bool ipipe_enter_idle_hook(void)
+{
+	return true;
+}
+
+static inline int handle_setaffinity_event(struct ipipe_cpu_migration_data *d)
+{
+	return cobalt_handle_setaffinity_event(d->task);
+}
+
+static inline int handle_taskexit_event(struct task_struct *p)
+{
+	return cobalt_handle_taskexit_event(p);
+}
+
+void ipipe_migration_hook(struct task_struct *p) /* hw IRQs off */
+{
+	struct xnthread *thread = xnthread_from_task(p);
+
+	xnlock_get(&nklock);
+
+	/*
+	 * We fire the handler before the thread is migrated, so that
+	 * thread->sched does not change between paired invocations of
+	 * relax_thread/harden_thread handlers.
+	 */
+	xnthread_run_handler_stack(thread, harden_thread);
+
+	cobalt_adjust_affinity(p);
+
+	xnthread_resume(thread, XNRELAX);
+
+	/*
+	 * In case we migrated independently of the user return notifier, clear
+	 * XNCONTHI here and also disable the notifier - we are already done.
+	 */
+	if (unlikely(xnthread_test_info(thread, XNCONTHI))) {
+		xnthread_clear_info(thread, XNCONTHI);
+		ipipe_disable_user_intret_notifier();
+	}
+
+	/* Unregister as debugged thread in case we postponed this. */
+	if (unlikely(xnthread_test_state(thread, XNSSTEP)))
+		cobalt_unregister_debugged_thread(thread);
+
+	xnlock_put(&nklock);
+
+	xnsched_run();
+}
+
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT
+
+static IPIPE_DEFINE_SPINLOCK(__hostrtlock);
+
+static int handle_hostrt_event(struct ipipe_hostrt_data *hostrt)
+{
+	unsigned long flags;
+	urwstate_t tmp;
+
+	/*
+	 * The locking strategy is twofold:
+	 * - The spinlock protects against concurrent updates from within the
+	 *   Linux kernel and against preemption by Xenomai
+	 * - The unsynced R/W block is for lockless read-only access.
+	 */
+	raw_spin_lock_irqsave(&__hostrtlock, flags);
+
+	unsynced_write_block(&tmp, &nkvdso->hostrt_data.lock) {
+		nkvdso->hostrt_data.live = 1;
+		nkvdso->hostrt_data.cycle_last = hostrt->cycle_last;
+		nkvdso->hostrt_data.mask = hostrt->mask;
+		nkvdso->hostrt_data.mult = hostrt->mult;
+		nkvdso->hostrt_data.shift = hostrt->shift;
+		nkvdso->hostrt_data.wall_sec = hostrt->wall_time_sec;
+		nkvdso->hostrt_data.wall_nsec = hostrt->wall_time_nsec;
+		nkvdso->hostrt_data.wtom_sec = hostrt->wall_to_monotonic.tv_sec;
+		nkvdso->hostrt_data.wtom_nsec = hostrt->wall_to_monotonic.tv_nsec;
+	}
+
+	raw_spin_unlock_irqrestore(&__hostrtlock, flags);
+
+	return KEVENT_PROPAGATE;
+}
+
+static inline void init_hostrt(void)
+{
+	unsynced_rw_init(&nkvdso->hostrt_data.lock);
+	nkvdso->hostrt_data.live = 0;
+}
+
+#else /* !CONFIG_IPIPE_HAVE_HOSTRT */
+
+struct ipipe_hostrt_data;
+
+static inline int handle_hostrt_event(struct ipipe_hostrt_data *hostrt)
+{
+	return KEVENT_PROPAGATE;
+}
+
+static inline void init_hostrt(void) { }
+
+#endif /* !CONFIG_IPIPE_HAVE_HOSTRT */
+
+static int handle_schedule_event(struct task_struct *next_task)
+{
+	struct task_struct *prev_task;
+	struct xnthread *next;
+	sigset_t pending;
+	spl_t s;
+
+	cobalt_signal_yield();
+
+	prev_task = current;
+	next = xnthread_from_task(next_task);
+	if (next == NULL)
+		goto out;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * Track tasks leaving the ptraced state.  Check both SIGSTOP
+	 * (NPTL) and SIGINT (LinuxThreads) to detect ptrace
+	 * continuation.
+	 */
+	if (xnthread_test_state(next, XNSSTEP)) {
+		if (signal_pending(next_task)) {
+			/*
+			 * Do not grab the sighand lock here: it's
+			 * useless, and we already own the runqueue
+			 * lock, so this would expose us to deadlock
+			 * situations on SMP.
+			 */
+			sigorsets(&pending,
+				  &next_task->pending.signal,
+				  &next_task->signal->shared_pending.signal);
+			if (sigismember(&pending, SIGSTOP) ||
+			    sigismember(&pending, SIGINT))
+				goto no_ptrace;
+		}
+
+		/*
+		 * Do not unregister before the thread migrated.
+		 * cobalt_unregister_debugged_thread will then be called by our
+		 * ipipe_migration_hook.
+		 */
+		if (!xnthread_test_info(next, XNCONTHI))
+			cobalt_unregister_debugged_thread(next);
+
+		xnthread_set_localinfo(next, XNHICCUP);
+	}
+
+no_ptrace:
+	xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Do basic sanity checks on the incoming thread state.
+	 * NOTE: we allow ptraced threads to run shortly in order to
+	 * properly recover from a stopped state.
+	 */
+	if (!XENO_WARN(COBALT, !xnthread_test_state(next, XNRELAX),
+		       "hardened thread %s[%d] running in Linux domain?! "
+		       "(status=0x%x, sig=%d, prev=%s[%d])",
+		       next->name, task_pid_nr(next_task),
+		       xnthread_get_state(next),
+		       signal_pending(next_task),
+		       prev_task->comm, task_pid_nr(prev_task)))
+		XENO_WARN(COBALT,
+			  !(next_task->ptrace & PT_PTRACED) &&
+			   !xnthread_test_state(next, XNDORMANT)
+			  && xnthread_test_state(next, XNPEND),
+			  "blocked thread %s[%d] rescheduled?! "
+			  "(status=0x%x, sig=%d, prev=%s[%d])",
+			  next->name, task_pid_nr(next_task),
+			  xnthread_get_state(next),
+			  signal_pending(next_task), prev_task->comm,
+			  task_pid_nr(prev_task));
+out:
+	return KEVENT_PROPAGATE;
+}
+
+static int handle_sigwake_event(struct task_struct *p)
+{
+	struct xnthread *thread;
+	sigset_t pending;
+	spl_t s;
+
+	thread = xnthread_from_task(p);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * CAUTION: __TASK_TRACED is not set in p->state yet. This
+	 * state bit will be set right after we return, when the task
+	 * is woken up.
+	 */
+	if ((p->ptrace & PT_PTRACED) && !xnthread_test_state(thread, XNSSTEP)) {
+		/* We already own the siglock. */
+		sigorsets(&pending,
+			  &p->pending.signal,
+			  &p->signal->shared_pending.signal);
+
+		if (sigismember(&pending, SIGTRAP) ||
+		    sigismember(&pending, SIGSTOP)
+		    || sigismember(&pending, SIGINT))
+			cobalt_register_debugged_thread(thread);
+	}
+
+	if (xnthread_test_state(thread, XNRELAX))
+		goto out;
+
+	/*
+	 * If kicking a shadow thread in primary mode, make sure Linux
+	 * won't schedule in its mate under our feet as a result of
+	 * running signal_wake_up(). The Xenomai scheduler must remain
+	 * in control for now, until we explicitly relax the shadow
+	 * thread to allow for processing the pending signals. Make
+	 * sure we keep the additional state flags unmodified so that
+	 * we don't break any undergoing ptrace.
+	 */
+	if (p->state & (TASK_INTERRUPTIBLE|TASK_UNINTERRUPTIBLE))
+		cobalt_set_task_state(p, p->state | TASK_NOWAKEUP);
+
+	/*
+	 * Allow a thread stopped for debugging to resume briefly in order to
+	 * migrate to secondary mode. xnthread_relax will reapply XNDBGSTOP.
+	 */
+	if (xnthread_test_state(thread, XNDBGSTOP))
+		xnthread_resume(thread, XNDBGSTOP);
+
+	__xnthread_kick(thread);
+out:
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return KEVENT_PROPAGATE;
+}
+
+static inline int handle_cleanup_event(struct mm_struct *mm)
+{
+	return cobalt_handle_cleanup_event(mm);
+}
+
+void pipeline_cleanup_process(void)
+{
+	ipipe_disable_notifier(current);
+}
+
+static inline int handle_clockfreq_event(unsigned int *p)
+{
+	unsigned int newfreq = *p;
+
+	pipeline_update_clock_freq(newfreq);
+
+	return KEVENT_PROPAGATE;
+}
+
+static inline int handle_user_return(struct task_struct *task)
+{
+	ipipe_disable_user_intret_notifier();
+	return cobalt_handle_user_return(task);
+}
+
+int handle_ptrace_resume(struct ipipe_ptrace_resume_data *resume)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	thread = xnthread_from_task(resume->task);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	if (resume->request == PTRACE_SINGLESTEP &&
+	    xnthread_test_state(thread, XNSSTEP)) {
+		xnlock_get_irqsave(&nklock, s);
+
+		xnthread_resume(thread, XNDBGSTOP);
+		cobalt_unregister_debugged_thread(thread);
+
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return KEVENT_PROPAGATE;
+}
+
+int ipipe_kevent_hook(int kevent, void *data)
+{
+	int ret;
+
+	switch (kevent) {
+	case IPIPE_KEVT_SCHEDULE:
+		ret = handle_schedule_event(data);
+		break;
+	case IPIPE_KEVT_SIGWAKE:
+		ret = handle_sigwake_event(data);
+		break;
+	case IPIPE_KEVT_EXIT:
+		ret = handle_taskexit_event(data);
+		break;
+	case IPIPE_KEVT_CLEANUP:
+		ret = handle_cleanup_event(data);
+		break;
+	case IPIPE_KEVT_SETAFFINITY:
+		ret = handle_setaffinity_event(data);
+		break;
+#ifdef CONFIG_IPIPE_HAVE_HOSTRT
+	case IPIPE_KEVT_HOSTRT:
+		ret = handle_hostrt_event(data);
+		break;
+#endif
+	case IPIPE_KEVT_CLOCKFREQ:
+		ret = handle_clockfreq_event(data);
+		break;
+	case IPIPE_KEVT_USERINTRET:
+		ret = handle_user_return(data);
+		break;
+	case IPIPE_KEVT_PTRESUME:
+		ret = handle_ptrace_resume(data);
+		break;
+	default:
+		ret = KEVENT_PROPAGATE;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_MMU
+
+int pipeline_prepare_current(void)
+{
+	struct task_struct *p = current;
+	kernel_siginfo_t si;
+
+	if ((p->mm->def_flags & VM_LOCKED) == 0) {
+		memset(&si, 0, sizeof(si));
+		si.si_signo = SIGDEBUG;
+		si.si_code = SI_QUEUE;
+		si.si_int = SIGDEBUG_NOMLOCK | sigdebug_marker;
+		send_sig_info(SIGDEBUG, &si, p);
+		return 0;
+	}
+
+	return __ipipe_disable_ondemand_mappings(p);
+}
+
+static inline int get_mayday_prot(void)
+{
+	return PROT_READ|PROT_EXEC;
+}
+
+#else /* !CONFIG_MMU */
+
+int pipeline_prepare_current(void)
+{
+	return 0;
+}
+
+static inline int get_mayday_prot(void)
+{
+	/*
+	 * Until we stop backing /dev/mem with the mayday page, we
+	 * can't ask for PROT_EXEC since the former does not define
+	 * mmap capabilities, and default ones won't allow an
+	 * executable mapping with MAP_SHARED. In the NOMMU case, this
+	 * is (currently) not an issue.
+	 */
+	return PROT_READ;
+}
+
+#endif /* !CONFIG_MMU */
+
+void pipeline_attach_current(struct xnthread *thread)
+{
+	struct cobalt_threadinfo *p;
+
+	p = pipeline_current();
+	p->thread = thread;
+	p->process = cobalt_search_process(current->mm);
+}
+
+int pipeline_trap_kevents(void)
+{
+	init_hostrt();
+	ipipe_set_hooks(ipipe_root_domain, IPIPE_SYSCALL|IPIPE_KEVENT);
+	ipipe_set_hooks(&xnsched_primary_domain, IPIPE_SYSCALL|IPIPE_TRAP);
+
+	return 0;
+}
+
+void pipeline_enable_kevents(void)
+{
+	ipipe_enable_notifier(current);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/sched.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/sched.c
new file mode 100644
index 0000000..3104e50
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/sched.c
@@ -0,0 +1,198 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001-2020 Philippe Gerum <rpm@xenomai.org>.
+ */
+
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/assert.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+
+int pipeline_schedule(struct xnsched *sched)
+{
+	int ret = 0;
+
+	XENO_WARN_ON_ONCE(COBALT,
+		!hard_irqs_disabled() && is_secondary_domain());
+
+	if (!xnarch_escalate())
+		ret = ___xnsched_run(sched);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(pipeline_schedule);
+
+void pipeline_prep_switch_oob(struct xnthread *root)
+{
+	struct xnarchtcb *rootcb = xnthread_archtcb(root);
+	struct task_struct *p = current;
+
+	ipipe_notify_root_preemption();
+	/* Remember the preempted Linux task pointer. */
+	rootcb->core.host_task = p;
+	rootcb->core.tsp = &p->thread;
+	rootcb->core.mm = rootcb->core.active_mm = ipipe_get_active_mm();
+	rootcb->core.tip = task_thread_info(p);
+	xnarch_leave_root(root);
+}
+
+#ifdef CONFIG_XENO_ARCH_FPU
+
+static void switch_fpu(void)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xnthread *curr = sched->curr;
+
+	if (!xnthread_test_state(curr, XNFPU))
+		return;
+
+	xnarch_switch_fpu(sched->fpuholder, curr);
+	sched->fpuholder = curr;
+}
+
+static void giveup_fpu(struct xnthread *thread)
+{
+	struct xnsched *sched = thread->sched;
+
+	if (thread == sched->fpuholder)
+		sched->fpuholder = NULL;
+}
+
+#else
+
+static inline void giveup_fpu(struct xnthread *thread)
+{ }
+
+#endif /* !CONFIG_XENO_ARCH_FPU */
+
+bool pipeline_switch_to(struct xnthread *prev, struct xnthread *next,
+			bool leaving_inband)
+{
+	xnarch_switch_to(prev, next);
+
+	/*
+	 * Test whether we transitioned from primary mode to secondary
+	 * over a shadow thread, caused by a call to xnthread_relax().
+	 * In such a case, we are running over the regular schedule()
+	 * tail code, so we have to tell the caller to skip the Cobalt
+	 * tail code.
+	 */
+	if (!leaving_inband && is_secondary_domain()) {
+		__ipipe_complete_domain_migration();
+		XENO_BUG_ON(COBALT, xnthread_current() == NULL);
+		/*
+		 * Interrupts must be disabled here (has to be done on
+		 * entry of the Linux [__]switch_to function), but it
+		 * is what callers expect, specifically the reschedule
+		 * of an IRQ handler that hit before we call
+		 * xnsched_run in xnthread_suspend() when relaxing a
+		 * thread.
+		 */
+		XENO_BUG_ON(COBALT, !hard_irqs_disabled());
+		return true;
+	}
+
+	switch_fpu();
+
+	return false;
+}
+
+void pipeline_init_shadow_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+	struct task_struct *p = current;
+
+	/*
+	 * If the current task is a kthread, the pipeline will take
+	 * the necessary steps to make the FPU usable in such
+	 * context. The kernel already took care of this issue for
+	 * userland tasks (e.g. setting up a clean backup area).
+	 */
+	__ipipe_share_current(0);
+
+	tcb->core.host_task = p;
+	tcb->core.tsp = &p->thread;
+	tcb->core.mm = p->mm;
+	tcb->core.active_mm = p->mm;
+	tcb->core.tip = task_thread_info(p);
+#ifdef CONFIG_XENO_ARCH_FPU
+	tcb->core.user_fpu_owner = p;
+#endif /* CONFIG_XENO_ARCH_FPU */
+	xnarch_init_shadow_tcb(thread);
+
+	trace_cobalt_shadow_map(thread);
+}
+
+void pipeline_init_root_tcb(struct xnthread *thread)
+{
+	struct xnarchtcb *tcb = xnthread_archtcb(thread);
+	struct task_struct *p = current;
+
+	tcb->core.host_task = p;
+	tcb->core.tsp = &tcb->core.ts;
+	tcb->core.mm = p->mm;
+	tcb->core.tip = NULL;
+	xnarch_init_root_tcb(thread);
+}
+
+int pipeline_leave_inband(void)
+{
+	int ret;
+
+	ret = __ipipe_migrate_head();
+	if (ret)
+		return ret;
+
+	switch_fpu();
+
+	return 0;
+}
+
+int pipeline_leave_oob_prepare(void)
+{
+	struct xnthread *curr = xnthread_current();
+	struct task_struct *p = current;
+	int suspmask = XNRELAX;
+
+	set_current_state(p->state & ~TASK_NOWAKEUP);
+
+	/*
+	 * If current is being debugged, record that it should migrate
+	 * back in case it resumes in userspace. If it resumes in
+	 * kernel space, i.e.  over a restarting syscall, the
+	 * associated hardening will both clear XNCONTHI and disable
+	 * the user return notifier again.
+	 */
+	if (xnthread_test_state(curr, XNSSTEP)) {
+		xnthread_set_info(curr, XNCONTHI);
+		ipipe_enable_user_intret_notifier();
+		suspmask |= XNDBGSTOP;
+	}
+	/*
+	 * Return the suspension bits the caller should pass to
+	 * xnthread_suspend().
+	 */
+	return suspmask;
+}
+
+void pipeline_leave_oob_finish(void)
+{
+	__ipipe_reenter_root();
+}
+
+void pipeline_finalize_thread(struct xnthread *thread)
+{
+	giveup_fpu(thread);
+}
+
+void pipeline_raise_mayday(struct task_struct *tsk)
+{
+	ipipe_raise_mayday(tsk);
+}
+
+void pipeline_clear_mayday(void) /* May solely affect current. */
+{
+	ipipe_clear_thread_flag(TIP_MAYDAY);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/syscall.c
new file mode 100644
index 0000000..867a81e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/syscall.c
@@ -0,0 +1,29 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>
+ * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+
+#include <pipeline/pipeline.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/assert.h>
+#include <xenomai/posix/syscall.h>
+
+int ipipe_syscall_hook(struct ipipe_domain *ipd, struct pt_regs *regs)
+{
+	if (unlikely(is_secondary_domain()))
+		return handle_root_syscall(regs);
+
+	return handle_head_syscall(ipd != &xnsched_primary_domain, regs);
+}
+
+int ipipe_fastcall_hook(struct pt_regs *regs)
+{
+	int ret;
+
+	ret = handle_head_syscall(false, regs);
+	XENO_BUG_ON(COBALT, ret == KEVENT_PROPAGATE);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/tick.c b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/tick.c
new file mode 100644
index 0000000..db6e37c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/ipipe/tick.c
@@ -0,0 +1,286 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ */
+#include <linux/ipipe.h>
+#include <linux/ipipe_tickdev.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/arith.h>
+
+extern struct xnintr nktimer;
+
+/**
+ * @internal
+ * @fn static int program_htick_shot(unsigned long delay, struct clock_event_device *cdev)
+ *
+ * @brief Program next host tick as a Xenomai timer event.
+ *
+ * Program the next shot for the host tick on the current CPU.
+ * Emulation is done using a nucleus timer attached to the master
+ * timebase.
+ *
+ * @param delay The time delta from the current date to the next tick,
+ * expressed as a count of nanoseconds.
+ *
+ * @param cdev An pointer to the clock device which notifies us.
+ *
+ * @coretags{unrestricted}
+ */
+static int program_htick_shot(unsigned long delay,
+			      struct clock_event_device *cdev)
+{
+	struct xnsched *sched;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_current();
+	ret = xntimer_start(&sched->htimer, delay, XN_INFINITE, XN_RELATIVE);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret ? -ETIME : 0;
+}
+
+/**
+ * @internal
+ * @fn void switch_htick_mode(enum clock_event_mode mode, struct clock_event_device *cdev)
+ *
+ * @brief Tick mode switch emulation callback.
+ *
+ * Changes the host tick mode for the tick device of the current CPU.
+ *
+ * @param mode The new mode to switch to. The possible values are:
+ *
+ * - CLOCK_EVT_MODE_ONESHOT, for a switch to oneshot mode.
+ *
+ * - CLOCK_EVT_MODE_PERIODIC, for a switch to periodic mode. The current
+ * implementation for the generic clockevent layer Linux exhibits
+ * should never downgrade from a oneshot to a periodic tick mode, so
+ * this mode should not be encountered. This said, the associated code
+ * is provided, basically for illustration purposes.
+ *
+ * - CLOCK_EVT_MODE_SHUTDOWN, indicates the removal of the current
+ * tick device. Normally, the nucleus only interposes on tick devices
+ * which should never be shut down, so this mode should not be
+ * encountered.
+ *
+ * @param cdev An opaque pointer to the clock device which notifies us.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note GENERIC_CLOCKEVENTS is required from the host kernel.
+ */
+static void switch_htick_mode(enum clock_event_mode mode,
+			      struct clock_event_device *cdev)
+{
+	struct xnsched *sched;
+	xnticks_t tickval;
+	spl_t s;
+
+	if (mode == CLOCK_EVT_MODE_ONESHOT)
+		return;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sched = xnsched_current();
+
+	switch (mode) {
+	case CLOCK_EVT_MODE_PERIODIC:
+		tickval = 1000000000UL / HZ;
+		xntimer_start(&sched->htimer, tickval, tickval, XN_RELATIVE);
+		break;
+	case CLOCK_EVT_MODE_SHUTDOWN:
+		xntimer_stop(&sched->htimer);
+		break;
+	default:
+		XENO_BUG(COBALT);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+static int grab_timer_on_cpu(int cpu)
+{
+	int tickval, ret;
+
+	ret = ipipe_timer_start(xnintr_core_clock_handler,
+				switch_htick_mode, program_htick_shot, cpu);
+	switch (ret) {
+	case CLOCK_EVT_MODE_PERIODIC:
+		/*
+		 * Oneshot tick emulation callback won't be used, ask
+		 * the caller to start an internal timer for emulating
+		 * a periodic tick.
+		 */
+		tickval = 1000000000UL / HZ;
+		break;
+
+	case CLOCK_EVT_MODE_ONESHOT:
+		/* oneshot tick emulation */
+		tickval = 1;
+		break;
+
+	case CLOCK_EVT_MODE_UNUSED:
+		/* we don't need to emulate the tick at all. */
+		tickval = 0;
+		break;
+
+	case CLOCK_EVT_MODE_SHUTDOWN:
+		return -ENODEV;
+
+	default:
+		return ret;
+	}
+
+	return tickval;
+}
+
+/**
+ * @fn int pipeline_install_tick_proxy(void)
+ * @brief Grab the hardware timer on all real-time CPUs.
+ *
+ * pipeline_install_tick_proxy() grabs and tunes the hardware timer for all
+ * real-time CPUs.
+ *
+ * Host tick emulation is performed for sharing the clock chip between
+ * Linux and Xenomai.
+ *
+ * @return a positive value is returned on success, representing the
+ * duration of a Linux periodic tick expressed as a count of
+ * nanoseconds; zero should be returned when the Linux kernel does not
+ * undergo periodic timing on the given CPU (e.g. oneshot
+ * mode). Otherwise:
+ *
+ * - -EBUSY is returned if the hardware timer has already been
+ * grabbed.  xntimer_release_hardware() must be issued before
+ * pipeline_install_tick_proxy() is called again.
+ *
+ * - -ENODEV is returned if the hardware timer cannot be used.  This
+ * situation may occur after the kernel disabled the timer due to
+ * invalid calibration results; in such a case, such hardware is
+ * unusable for any timing duties.
+ *
+ * @coretags{secondary-only}
+ */
+
+int pipeline_install_tick_proxy(void)
+{
+	struct xnsched *sched;
+	int ret, cpu, _cpu;
+	spl_t s;
+
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+	/*
+	 * Only for statistical purpose, the timer interrupt is
+	 * attached by pipeline_install_tick_proxy().
+	 */
+	xnintr_init(&nktimer, "[timer]",
+		    per_cpu(ipipe_percpu.hrtimer_irq, 0), NULL, NULL, 0);
+#endif /* CONFIG_XENO_OPT_STATS_IRQS */
+
+#ifdef CONFIG_SMP
+	ret = ipipe_request_irq(&cobalt_pipeline.domain,
+				IPIPE_HRTIMER_IPI,
+				(ipipe_irq_handler_t)xnintr_core_clock_handler,
+				NULL, NULL);
+	if (ret)
+		return ret;
+#endif
+
+	for_each_realtime_cpu(cpu) {
+		ret = grab_timer_on_cpu(cpu);
+		if (ret < 0)
+			goto fail;
+
+		xnlock_get_irqsave(&nklock, s);
+
+		/*
+		 * If the current tick device for the target CPU is
+		 * periodic, we won't be called back for host tick
+		 * emulation. Therefore, we need to start a periodic
+		 * nucleus timer which will emulate the ticking for
+		 * that CPU, since we are going to hijack the hw clock
+		 * chip for managing our own system timer.
+		 *
+		 * CAUTION:
+		 *
+		 * - nucleus timers may be started only _after_ the hw
+		 * timer has been set up for the target CPU through a
+		 * call to pipeline_install_tick_proxy().
+		 *
+		 * - we don't compensate for the elapsed portion of
+		 * the current host tick, since we cannot get this
+		 * information easily for all CPUs except the current
+		 * one, and also because of the declining relevance of
+		 * the jiffies clocksource anyway.
+		 *
+		 * - we must not hold the nklock across calls to
+		 * pipeline_install_tick_proxy().
+		 */
+
+		sched = xnsched_struct(cpu);
+		/* Set up timer with host tick period if valid. */
+		if (ret > 1)
+			xntimer_start(&sched->htimer, ret, ret, XN_RELATIVE);
+		else if (ret == 1)
+			xntimer_start(&sched->htimer, 0, 0, XN_RELATIVE);
+
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return 0;
+fail:
+	for_each_realtime_cpu(_cpu) {
+		if (_cpu == cpu)
+			break;
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		xntimer_stop(&sched->htimer);
+		xnlock_put_irqrestore(&nklock, s);
+		ipipe_timer_stop(_cpu);
+	}
+
+#ifdef CONFIG_SMP
+	ipipe_free_irq(&cobalt_pipeline.domain,
+		       IPIPE_HRTIMER_IPI);
+#endif
+
+	return ret;
+}
+
+/**
+ * @fn void pipeline_uninstall_tick_proxy(void)
+ * @brief Release hardware timers.
+ *
+ * Releases hardware timers previously grabbed by a call to
+ * pipeline_install_tick_proxy().
+ *
+ * @coretags{secondary-only}
+ */
+void pipeline_uninstall_tick_proxy(void)
+{
+	int cpu;
+
+	/*
+	 * We must not hold the nklock while stopping the hardware
+	 * timer, since this could cause deadlock situations to arise
+	 * on SMP systems.
+	 */
+	for_each_realtime_cpu(cpu)
+		ipipe_timer_stop(cpu);
+
+#ifdef CONFIG_SMP
+	ipipe_free_irq(&cobalt_pipeline.domain,
+		       IPIPE_HRTIMER_IPI);
+#endif
+
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+	xnintr_destroy(&nktimer);
+#endif /* CONFIG_XENO_OPT_STATS_IRQS */
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/lock.c b/kernel/xenomai-v3.2.4/kernel/cobalt/lock.c
new file mode 100644
index 0000000..e48072d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/lock.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) 2001-2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004,2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <cobalt/kernel/lock.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_lock Locking services
+ *
+ * The Xenomai core deals with concurrent activities from two distinct
+ * kernels running side-by-side. When interrupts are involved, the
+ * services from this section control the @b hard interrupt state
+ * exclusively, for protecting against processor-local or SMP
+ * concurrency.
+ *
+ * @note In a dual kernel configuration, <i>hard interrupts</i> are
+ * gated by the CPU. When enabled, hard interrupts are immediately
+ * delivered to the Xenomai core if they belong to a real-time source,
+ * or deferred until enabled by a second-stage virtual interrupt mask,
+ * if they belong to regular Linux devices/sources.
+ *
+ * @{
+ */
+DEFINE_XNLOCK(nklock);
+#if defined(CONFIG_SMP) || defined(CONFIG_XENO_OPT_DEBUG_LOCKING)
+EXPORT_SYMBOL_GPL(nklock);
+
+#ifdef CONFIG_XENO_ARCH_OUTOFLINE_XNLOCK
+int ___xnlock_get(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	return ____xnlock_get(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+}
+EXPORT_SYMBOL_GPL(___xnlock_get);
+
+void ___xnlock_put(struct xnlock *lock /*, */ XNLOCK_DBG_CONTEXT_ARGS)
+{
+	____xnlock_put(lock /* , */ XNLOCK_DBG_PASS_CONTEXT);
+}
+EXPORT_SYMBOL_GPL(___xnlock_put);
+#endif /* out of line xnlock */
+#endif /* CONFIG_SMP || XENO_DEBUG(LOCKING) */
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+DEFINE_PER_CPU(struct xnlockinfo, xnlock_stats);
+EXPORT_PER_CPU_SYMBOL_GPL(xnlock_stats);
+#endif
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/map.c b/kernel/xenomai-v3.2.4/kernel/cobalt/map.c
new file mode 100644
index 0000000..161d24c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/map.c
@@ -0,0 +1,265 @@
+/*
+ * Copyright (C) 2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/map.h>
+#include <asm/xenomai/machine.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_map Lightweight key-to-object mapping service
+ *
+ * A map is a simple indexing structure which associates unique
+ * integer keys with pointers to objects.  The current implementation
+ * supports reservation, for naming/indexing objects, either on a
+ * fixed, user-provided integer (i.e. a reserved key value), or by
+ * drawing the next available key internally if the caller did not
+ * specify any fixed key. For instance, in some given map, the key
+ * space ranging from 0 to 255 could be reserved for fixed keys,
+ * whilst the range from 256 to 511 could be available for drawing
+ * free keys dynamically.
+ *
+ * A maximum of 1024 unique keys per map is supported on 32bit
+ * machines.
+ *
+ * (This implementation should not be confused with C++ STL maps,
+ * which are dynamically expandable and allow arbitrary key types;
+ * Xenomai maps don't).
+ *
+ * @{
+ */
+
+/**
+ * @fn void xnmap_create(int nkeys, int reserve, int offset)
+ * @brief Create a map.
+ *
+ * Allocates a new map with the specified addressing capabilities. The
+ * memory is obtained from the Xenomai system heap.
+ *
+ * @param nkeys The maximum number of unique keys the map will be able
+ * to hold. This value cannot exceed the static limit represented by
+ * XNMAP_MAX_KEYS, and must be a power of two.
+ *
+ * @param reserve The number of keys which should be kept for
+ * reservation within the index space. Reserving a key means to
+ * specify a valid key to the xnmap_enter() service, which will then
+ * attempt to register this exact key, instead of drawing the next
+ * available key from the unreserved index space. When reservation is
+ * in effect, the unreserved index space will hold key values greater
+ * than @a reserve, keeping the low key values for the reserved space.
+ * For instance, passing @a reserve = 32 would cause the index range [
+ * 0 .. 31 ] to be kept for reserved keys.  When non-zero, @a reserve
+ * is rounded to the next multiple of BITS_PER_LONG. If @a reserve is
+ * zero no reservation will be available from the map.
+ *
+ * @param offset The lowest key value xnmap_enter() will return to the
+ * caller. Key values will be in the range [ 0 + offset .. @a nkeys +
+ * offset - 1 ]. Negative offsets are valid.
+ *
+ * @return the address of the new map is returned on success;
+ * otherwise, NULL is returned if @a nkeys is invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+struct xnmap *xnmap_create(int nkeys, int reserve, int offset)
+{
+	struct xnmap *map;
+	int mapsize;
+
+	if (nkeys <= 0 || (nkeys & (nkeys - 1)) != 0)
+		return NULL;
+
+	mapsize = sizeof(*map) + (nkeys - 1) * sizeof(map->objarray[0]);
+	map = xnmalloc(mapsize);
+
+	if (!map)
+		return NULL;
+
+	map->ukeys = 0;
+	map->nkeys = nkeys;
+	map->offset = offset;
+	map->himask = (1 << ((reserve + BITS_PER_LONG - 1) / BITS_PER_LONG)) - 1;
+	map->himap = ~0;
+	memset(map->lomap, ~0, sizeof(map->lomap));
+	memset(map->objarray, 0, sizeof(map->objarray[0]) * nkeys);
+
+	return map;
+}
+EXPORT_SYMBOL_GPL(xnmap_create);
+
+/**
+ * @fn void xnmap_delete(struct xnmap *map)
+ * @brief Delete a map.
+ *
+ * Deletes a map, freeing any associated memory back to the Xenomai
+ * system heap.
+ *
+ * @param map The address of the map to delete.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnmap_delete(struct xnmap *map)
+{
+	xnfree(map);
+}
+EXPORT_SYMBOL_GPL(xnmap_delete);
+
+/**
+ * @fn void xnmap_enter(struct xnmap *map, int key, void *objaddr)
+ * @brief Index an object into a map.
+ *
+ * Insert a new object into the given map.
+ *
+ * @param map The address of the map to insert into.
+ *
+ * @param key The key to index the object on. If this key is within
+ * the valid index range [ 0 - offset .. nkeys - offset - 1 ], then an
+ * attempt to reserve this exact key is made. If @a key has an
+ * out-of-range value lower or equal to 0 - offset - 1, then an
+ * attempt is made to draw a free key from the unreserved index space.
+ *
+ * @param objaddr The address of the object to index on the key. This
+ * value will be returned by a successful call to xnmap_fetch() with
+ * the same key.
+ *
+ * @return a valid key is returned on success, either @a key if
+ * reserved, or the next free key. Otherwise:
+ *
+ * - -EEXIST is returned upon attempt to reserve a busy key.
+ *
+ * - -ENOSPC when no more free key is available.
+ *
+ * @coretags{unrestricted}
+ */
+int xnmap_enter(struct xnmap *map, int key, void *objaddr)
+{
+	int hi, lo, ofkey = key - map->offset;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (ofkey >= 0 && ofkey < map->nkeys) {
+		if (map->objarray[ofkey] != NULL) {
+			key = -EEXIST;
+			goto unlock_and_exit;
+		}
+	} else if (map->ukeys >= map->nkeys) {
+		key = -ENOSPC;
+		goto unlock_and_exit;
+	}
+	else {
+		/* The himask implements a namespace reservation of
+		   half of the bitmap space which cannot be used to
+		   draw keys. */
+
+		hi = ffnz(map->himap & ~map->himask);
+		lo = ffnz(map->lomap[hi]);
+		ofkey = hi * BITS_PER_LONG + lo;
+		++map->ukeys;
+
+		map->lomap[hi] &= ~(1UL << lo);
+		if (map->lomap[hi] == 0)
+			map->himap &= ~(1UL << hi);
+	}
+
+	map->objarray[ofkey] = objaddr;
+
+      unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ofkey + map->offset;
+}
+EXPORT_SYMBOL_GPL(xnmap_enter);
+
+/**
+ * @fn void xnmap_remove(struct xnmap *map, int key)
+ * @brief Remove an object reference from a map.
+ *
+ * Removes an object reference from the given map, releasing the
+ * associated key.
+ *
+ * @param map The address of the map to remove from.
+ *
+ * @param key The key the object reference to be removed is indexed
+ * on.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ESRCH is returned if @a key is invalid.
+ *
+ * @coretags{unrestricted}
+ */
+int xnmap_remove(struct xnmap *map, int key)
+{
+	int ofkey = key - map->offset, hi, lo;
+	spl_t s;
+
+	if (ofkey < 0 || ofkey >= map->nkeys)
+		return -ESRCH;
+
+	hi = ofkey / BITS_PER_LONG;
+	lo = ofkey % BITS_PER_LONG;
+	xnlock_get_irqsave(&nklock, s);
+	map->objarray[ofkey] = NULL;
+	map->himap |= (1UL << hi);
+	map->lomap[hi] |= (1UL << lo);
+	--map->ukeys;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnmap_remove);
+
+/**
+ * @fn void xnmap_fetch(struct xnmap *map, int key)
+ * @brief Search an object into a map.
+ *
+ * Retrieve an object reference from the given map by its index key.
+ *
+ * @param map The address of the map to retrieve from.
+ *
+ * @param key The key to be searched for in the map index.
+ *
+ * @return The indexed object address is returned on success,
+ * otherwise NULL is returned when @a key is invalid or no object is
+ * currently indexed on it.
+ *
+ * @coretags{unrestricted}
+ */
+
+/**
+ * @fn void xnmap_fetch_nocheck(struct xnmap *map, int key)
+ * @brief Search an object into a map - unchecked form.
+ *
+ * Retrieve an object reference from the given map by its index key,
+ * but does not perform any sanity check on the provided key.
+ *
+ * @param map The address of the map to retrieve from.
+ *
+ * @param key The key to be searched for in the map index.
+ *
+ * @return The indexed object address is returned on success,
+ * otherwise NULL is returned when no object is currently indexed on
+ * @a key.
+ *
+ * @coretags{unrestricted}
+ */
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c b/kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c
new file mode 100644
index 0000000..e846303
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/pipe.c
@@ -0,0 +1,1201 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2005 Dmitry Adamushko <dmitry.adamushko@gmail.com>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation, Inc., 675 Mass Ave, Cambridge MA
+ * 02139, USA; either version 2 of the License, or (at your option)
+ * any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/fcntl.h>
+#include <linux/poll.h>
+#include <linux/termios.h>
+#include <linux/spinlock.h>
+#include <linux/device.h>
+#include <linux/uaccess.h>
+#include <linux/compat.h>
+#include <asm/io.h>
+#include <asm/xenomai/syscall.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/pipe.h>
+#include <pipeline/sirq.h>
+
+static int xnpipe_asyncsig = SIGIO;
+
+struct xnpipe_state xnpipe_states[XNPIPE_NDEVS];
+EXPORT_SYMBOL_GPL(xnpipe_states);
+
+#define XNPIPE_BITMAP_SIZE	((XNPIPE_NDEVS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+
+static unsigned long xnpipe_bitmap[XNPIPE_BITMAP_SIZE];
+
+static LIST_HEAD(xnpipe_sleepq);
+
+static LIST_HEAD(xnpipe_asyncq);
+
+static int xnpipe_wakeup_virq;
+
+static struct class *xnpipe_class;
+
+/* Allocation of minor values */
+
+static inline int xnpipe_minor_alloc(int minor)
+{
+	spl_t s;
+
+	if ((minor < 0 && minor != XNPIPE_MINOR_AUTO) || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (minor == XNPIPE_MINOR_AUTO)
+		minor = find_first_zero_bit(xnpipe_bitmap, XNPIPE_NDEVS);
+
+	if (minor == XNPIPE_NDEVS ||
+	    (xnpipe_bitmap[minor / BITS_PER_LONG] &
+	     (1UL << (minor % BITS_PER_LONG))))
+		minor = -EBUSY;
+	else
+		xnpipe_bitmap[minor / BITS_PER_LONG] |=
+			(1UL << (minor % BITS_PER_LONG));
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return minor;
+}
+
+static inline void xnpipe_minor_free(int minor)
+{
+	xnpipe_bitmap[minor / BITS_PER_LONG] &=
+		~(1UL << (minor % BITS_PER_LONG));
+}
+
+static inline void xnpipe_enqueue_wait(struct xnpipe_state *state, int mask)
+{
+	if (state->wcount != 0x7fffffff && state->wcount++ == 0)
+		list_add_tail(&state->slink, &xnpipe_sleepq);
+
+	state->status |= mask;
+}
+
+static inline void xnpipe_dequeue_wait(struct xnpipe_state *state, int mask)
+{
+	if (state->status & mask)
+		if (--state->wcount == 0) {
+			list_del(&state->slink);
+			state->status &= ~mask;
+		}
+}
+
+static inline void xnpipe_dequeue_all(struct xnpipe_state *state, int mask)
+{
+	if (state->status & mask) {
+		if (state->wcount) {
+			state->wcount = 0;
+			list_del(&state->slink);
+			state->status &= ~mask;
+		}
+	}
+}
+
+/* Must be entered with nklock held, interrupts off. */
+#define xnpipe_wait(__state, __mask, __s, __cond)			\
+({									\
+	wait_queue_head_t *__waitq;					\
+	DEFINE_WAIT(__wait);						\
+	int __sigpending;						\
+									\
+	if ((__mask) & XNPIPE_USER_WREAD)				\
+		__waitq = &(__state)->readq;				\
+	else								\
+		__waitq = &(__state)->syncq;				\
+									\
+	xnpipe_enqueue_wait(__state, __mask);				\
+	xnlock_put_irqrestore(&nklock, __s);				\
+									\
+	for (;;) {							\
+		__sigpending = signal_pending(current);			\
+		if (__sigpending)					\
+			break;						\
+		prepare_to_wait_exclusive(__waitq, &__wait, TASK_INTERRUPTIBLE); \
+		if (__cond || (__state)->status & XNPIPE_KERN_LCLOSE)	\
+			break;						\
+		schedule();						\
+	}								\
+									\
+	finish_wait(__waitq, &__wait);					\
+									\
+	/* Restore the interrupt state initially set by the caller. */	\
+	xnlock_get_irqsave(&nklock, __s);				\
+	xnpipe_dequeue_wait(__state, __mask);				\
+									\
+	__sigpending;							\
+})
+
+static irqreturn_t xnpipe_wakeup_proc(int sirq, void *dev_id)
+{
+	struct xnpipe_state *state;
+	unsigned long rbits;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * NOTE: sleepers might enter/leave the queue while we don't
+	 * hold the nklock in these wakeup loops. So we iterate over
+	 * each sleeper list until we find no more candidate for
+	 * wakeup after an entire scan, redoing the scan from the list
+	 * head otherwise.
+	 */
+	for (;;) {
+		if (list_empty(&xnpipe_sleepq))
+			goto check_async;
+
+		state = list_first_entry(&xnpipe_sleepq, struct xnpipe_state, slink);
+
+		for (;;) {
+			rbits = state->status & XNPIPE_USER_ALL_READY;
+			if (rbits)
+				break;
+			if (list_is_last(&state->slink, &xnpipe_sleepq))
+				goto check_async;
+			state = list_next_entry(state, slink);
+		}
+
+		state->status &= ~rbits;
+
+		if ((rbits & XNPIPE_USER_WREAD_READY) != 0) {
+			if (waitqueue_active(&state->readq)) {
+				xnlock_put_irqrestore(&nklock, s);
+				wake_up_interruptible(&state->readq);
+				xnlock_get_irqsave(&nklock, s);
+			}
+		}
+		if ((rbits & XNPIPE_USER_WSYNC_READY) != 0) {
+			if (waitqueue_active(&state->syncq)) {
+				xnlock_put_irqrestore(&nklock, s);
+				wake_up_interruptible(&state->syncq);
+				xnlock_get_irqsave(&nklock, s);
+			}
+		}
+	}
+
+check_async:
+	/*
+	 * Scan the async queue, sending the proper signal to
+	 * subscribers.
+	 */
+	for (;;) {
+		if (list_empty(&xnpipe_asyncq))
+			goto out;
+
+		state = list_first_entry(&xnpipe_asyncq, struct xnpipe_state, alink);
+
+		for (;;) {
+			if (state->status & XNPIPE_USER_SIGIO)
+				break;
+			if (list_is_last(&state->alink, &xnpipe_asyncq))
+				goto out;
+			state = list_next_entry(state, alink);
+		}
+
+		state->status &= ~XNPIPE_USER_SIGIO;
+		xnlock_put_irqrestore(&nklock, s);
+		kill_fasync(&state->asyncq, xnpipe_asyncsig, POLL_IN);
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return IRQ_HANDLED;
+}
+
+static inline void xnpipe_schedule_request(void) /* hw IRQs off */
+{
+	pipeline_post_sirq(xnpipe_wakeup_virq);
+}
+
+static inline ssize_t xnpipe_flush_bufq(void (*fn)(void *buf, void *xstate),
+					struct list_head *q,
+					void *xstate)
+{
+	struct xnpipe_mh *mh, *tmp;
+	ssize_t n = 0;
+
+	if (list_empty(q))
+		return 0;
+
+	/* Queue is private, no locking is required. */
+	list_for_each_entry_safe(mh, tmp, q, link) {
+		list_del(&mh->link);
+		n += xnpipe_m_size(mh);
+		fn(mh, xstate);
+	}
+
+	/* Return the overall count of bytes flushed. */
+	return n;
+}
+
+/*
+ * Move the specified queue contents to a private queue, then call the
+ * flush handler to purge it. The latter runs without locking.
+ * Returns the number of bytes flushed. Must be entered with nklock
+ * held, interrupts off.
+ */
+#define xnpipe_flushq(__state, __q, __f, __s)				\
+({									\
+	LIST_HEAD(__privq);						\
+	ssize_t __n;							\
+									\
+	list_splice_init(&(state)->__q, &__privq);			\
+	(__state)->nr ## __q = 0;					\
+	xnlock_put_irqrestore(&nklock, (__s));				\
+	__n = xnpipe_flush_bufq((__state)->ops.__f, &__privq, (__state)->xstate);	\
+	xnlock_get_irqsave(&nklock, (__s));				\
+									\
+	__n;								\
+})
+
+static void *xnpipe_default_alloc_ibuf(size_t size, void *xstate)
+{
+	void *buf;
+
+	buf = xnmalloc(size);
+	if (likely(buf != NULL))
+		return buf;
+
+	if (size > xnheap_get_size(&cobalt_heap))
+		/* Request will never succeed. */
+		return (struct xnpipe_mh *)-1;
+
+	return NULL;
+}
+
+static void xnpipe_default_free_ibuf(void *buf, void *xstate)
+{
+	xnfree(buf);
+}
+
+static void xnpipe_default_release(void *xstate)
+{
+}
+
+static inline int xnpipe_set_ops(struct xnpipe_state *state,
+				 struct xnpipe_operations *ops)
+{
+	state->ops = *ops;
+
+	if (ops->free_obuf == NULL)
+		/*
+		 * Caller must provide a way to free unread outgoing
+		 * buffers.
+		 */
+		return -EINVAL;
+
+	/* Set some default handlers for common usage. */
+	if (ops->alloc_ibuf == NULL)
+		state->ops.alloc_ibuf = xnpipe_default_alloc_ibuf;
+	if (ops->free_ibuf == NULL)
+		state->ops.free_ibuf = xnpipe_default_free_ibuf;
+	if (ops->release == NULL)
+		state->ops.release = xnpipe_default_release;
+
+	return 0;
+}
+
+int xnpipe_connect(int minor, struct xnpipe_operations *ops, void *xstate)
+{
+	struct xnpipe_state *state;
+	int need_sched = 0, ret;
+	spl_t s;
+
+	minor = xnpipe_minor_alloc(minor);
+	if (minor < 0)
+		return minor;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	ret = xnpipe_set_ops(state, ops);
+	if (ret) {
+		xnlock_put_irqrestore(&nklock, s);
+		return ret;
+	}
+
+	state->status |= XNPIPE_KERN_CONN;
+	xnsynch_init(&state->synchbase, XNSYNCH_FIFO, NULL);
+	state->xstate = xstate;
+	state->ionrd = 0;
+
+	if (state->status & XNPIPE_USER_CONN) {
+		if (state->status & XNPIPE_USER_WREAD) {
+			/*
+			 * Wake up the regular Linux task waiting for
+			 * the kernel side to connect (xnpipe_open).
+			 */
+			state->status |= XNPIPE_USER_WREAD_READY;
+			need_sched = 1;
+		}
+
+		if (state->asyncq) {	/* Schedule asynch sig. */
+			state->status |= XNPIPE_USER_SIGIO;
+			need_sched = 1;
+		}
+	}
+
+	if (need_sched)
+		xnpipe_schedule_request();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return minor;
+}
+EXPORT_SYMBOL_GPL(xnpipe_connect);
+
+int xnpipe_disconnect(int minor)
+{
+	struct xnpipe_state *state;
+	int need_sched = 0;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	state->status &= ~XNPIPE_KERN_CONN;
+
+	state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);
+
+	if ((state->status & XNPIPE_USER_CONN) == 0)
+		goto cleanup;
+
+	xnpipe_flushq(state, inq, free_ibuf, s);
+
+	if (xnsynch_destroy(&state->synchbase) == XNSYNCH_RESCHED)
+		xnsched_run();
+
+	if (state->status & XNPIPE_USER_WREAD) {
+		/*
+		 * Wake up the regular Linux task waiting for some
+		 * operation from the Xenomai side (read/write or
+		 * poll).
+		 */
+		state->status |= XNPIPE_USER_WREAD_READY;
+		need_sched = 1;
+	}
+
+	if (state->asyncq) {	/* Schedule asynch sig. */
+		state->status |= XNPIPE_USER_SIGIO;
+		need_sched = 1;
+	}
+
+cleanup:
+	/*
+	 * If xnpipe_release() has not fully run, enter lingering
+	 * close. This will prevent the extra state from being wiped
+	 * out until then.
+	 */
+	if (state->status & XNPIPE_USER_CONN)
+		state->status |= XNPIPE_KERN_LCLOSE;
+	else {
+		xnlock_put_irqrestore(&nklock, s);
+		state->ops.release(state->xstate);
+		xnlock_get_irqsave(&nklock, s);
+		xnpipe_minor_free(minor);
+	}
+
+	if (need_sched)
+		xnpipe_schedule_request();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnpipe_disconnect);
+
+ssize_t xnpipe_send(int minor, struct xnpipe_mh *mh, size_t size, int flags)
+{
+	struct xnpipe_state *state;
+	int need_sched = 0;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	if (size <= sizeof(*mh))
+		return -EINVAL;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	xnpipe_m_size(mh) = size - sizeof(*mh);
+	xnpipe_m_rdoff(mh) = 0;
+	state->ionrd += xnpipe_m_size(mh);
+
+	if (flags & XNPIPE_URGENT)
+		list_add(&mh->link, &state->outq);
+	else
+		list_add_tail(&mh->link, &state->outq);
+
+	state->nroutq++;
+
+	if ((state->status & XNPIPE_USER_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return (ssize_t) size;
+	}
+
+	if (state->status & XNPIPE_USER_WREAD) {
+		/*
+		 * Wake up the regular Linux task waiting for input
+		 * from the Xenomai side.
+		 */
+		state->status |= XNPIPE_USER_WREAD_READY;
+		need_sched = 1;
+	}
+
+	if (state->asyncq) {	/* Schedule asynch sig. */
+		state->status |= XNPIPE_USER_SIGIO;
+		need_sched = 1;
+	}
+
+	if (need_sched)
+		xnpipe_schedule_request();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return (ssize_t) size;
+}
+EXPORT_SYMBOL_GPL(xnpipe_send);
+
+ssize_t xnpipe_mfixup(int minor, struct xnpipe_mh *mh, ssize_t size)
+{
+	struct xnpipe_state *state;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	if (size < 0)
+		return -EINVAL;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	xnpipe_m_size(mh) += size;
+	state->ionrd += size;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return (ssize_t) size;
+}
+EXPORT_SYMBOL_GPL(xnpipe_mfixup);
+
+ssize_t xnpipe_recv(int minor, struct xnpipe_mh **pmh, xnticks_t timeout)
+{
+	struct xnpipe_state *state;
+	struct xnpipe_mh *mh;
+	xntmode_t mode;
+	ssize_t ret;
+	int info;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	if (xnsched_interrupt_p())
+		return -EPERM;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		ret = -EBADF;
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * If we received a relative timespec, rescale it to an
+	 * absolute time value based on the monotonic clock.
+	 */
+	mode = XN_RELATIVE;
+	if (timeout != XN_NONBLOCK && timeout != XN_INFINITE) {
+		mode = XN_ABSOLUTE;
+		timeout += xnclock_read_monotonic(&nkclock);
+	}
+
+	for (;;) {
+		if (!list_empty(&state->inq))
+			break;
+
+		if (timeout == XN_NONBLOCK) {
+			ret = -EWOULDBLOCK;
+			goto unlock_and_exit;
+		}
+
+		info = xnsynch_sleep_on(&state->synchbase, timeout, mode);
+		if (info & XNTIMEO) {
+			ret = -ETIMEDOUT;
+			goto unlock_and_exit;
+		}
+		if (info & XNBREAK) {
+			ret = -EINTR;
+			goto unlock_and_exit;
+		}
+		if (info & XNRMID) {
+			ret = -EIDRM;
+			goto unlock_and_exit;
+		}
+	}
+
+	mh = list_get_entry(&state->inq, struct xnpipe_mh, link);
+	*pmh = mh;
+	state->nrinq--;
+	ret = (ssize_t)xnpipe_m_size(mh);
+
+	if (state->status & XNPIPE_USER_WSYNC) {
+		state->status |= XNPIPE_USER_WSYNC_READY;
+		xnpipe_schedule_request();
+	}
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnpipe_recv);
+
+int xnpipe_flush(int minor, int mode)
+{
+	struct xnpipe_state *state;
+	int msgcount;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBADF;
+	}
+
+	msgcount = state->nroutq + state->nrinq;
+
+	if (mode & XNPIPE_OFLUSH)
+		state->ionrd -= xnpipe_flushq(state, outq, free_obuf, s);
+
+	if (mode & XNPIPE_IFLUSH)
+		xnpipe_flushq(state, inq, free_ibuf, s);
+
+	if ((state->status & XNPIPE_USER_WSYNC) &&
+	    msgcount > state->nroutq + state->nrinq) {
+		state->status |= XNPIPE_USER_WSYNC_READY;
+		xnpipe_schedule_request();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnpipe_flush);
+
+int xnpipe_pollstate(int minor, unsigned int *mask_r)
+{
+	struct xnpipe_state *state;
+	int ret = 0;
+	spl_t s;
+
+	if (minor < 0 || minor >= XNPIPE_NDEVS)
+		return -ENODEV;
+
+	state = xnpipe_states + minor;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (state->status & XNPIPE_KERN_CONN) {
+		*mask_r = POLLOUT;
+		if (!list_empty(&state->inq))
+			*mask_r |= POLLIN;
+	} else
+		ret = -EIO;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnpipe_pollstate);
+
+/* Must be entered with nklock held, interrupts off. */
+#define xnpipe_cleanup_user_conn(__state, __s)				\
+	do {								\
+		xnpipe_flushq((__state), outq, free_obuf, (__s));	\
+		xnpipe_flushq((__state), inq, free_ibuf, (__s));	\
+		(__state)->status &= ~XNPIPE_USER_CONN;			\
+		if ((__state)->status & XNPIPE_KERN_LCLOSE) {		\
+			(__state)->status &= ~XNPIPE_KERN_LCLOSE;	\
+			xnlock_put_irqrestore(&nklock, (__s));		\
+			(__state)->ops.release((__state)->xstate);	\
+			xnlock_get_irqsave(&nklock, (__s));		\
+			xnpipe_minor_free(xnminor_from_state(__state));	\
+		}							\
+	} while(0)
+
+/*
+ * Open the pipe from user-space.
+ */
+
+static int xnpipe_open(struct inode *inode, struct file *file)
+{
+	int minor, err = 0, sigpending;
+	struct xnpipe_state *state;
+	spl_t s;
+
+	minor = MINOR(inode->i_rdev);
+
+	if (minor >= XNPIPE_NDEVS)
+		return -ENXIO;	/* TssTss... stop playing with mknod() ;o) */
+
+	state = &xnpipe_states[minor];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/* Enforce exclusive open for the message queues. */
+	if (state->status & (XNPIPE_USER_CONN | XNPIPE_USER_LCONN)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBUSY;
+	}
+
+	state->status |= XNPIPE_USER_LCONN;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	file->private_data = state;
+	init_waitqueue_head(&state->readq);
+	init_waitqueue_head(&state->syncq);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	state->status |= XNPIPE_USER_CONN;
+	state->status &= ~XNPIPE_USER_LCONN;
+	state->wcount = 0;
+
+	state->status &=
+		~(XNPIPE_USER_ALL_WAIT | XNPIPE_USER_ALL_READY |
+		  XNPIPE_USER_SIGIO);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		if (file->f_flags & O_NONBLOCK) {
+			xnpipe_cleanup_user_conn(state, s);
+			xnlock_put_irqrestore(&nklock, s);
+			return -EWOULDBLOCK;
+		}
+
+		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
+					 state->status & XNPIPE_KERN_CONN);
+		if (sigpending) {
+			xnpipe_cleanup_user_conn(state, s);
+			xnlock_put_irqrestore(&nklock, s);
+			return -ERESTARTSYS;
+		}
+	}
+
+	if (err)
+		xnpipe_cleanup_user_conn(state, s);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+static int xnpipe_release(struct inode *inode, struct file *file)
+{
+	struct xnpipe_state *state = file->private_data;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnpipe_dequeue_all(state, XNPIPE_USER_WREAD);
+	xnpipe_dequeue_all(state, XNPIPE_USER_WSYNC);
+
+	if (state->status & XNPIPE_KERN_CONN) {
+		/* Unblock waiters. */
+		if (xnsynch_pended_p(&state->synchbase)) {
+			xnsynch_flush(&state->synchbase, XNRMID);
+			xnsched_run();
+		}
+	}
+
+	if (state->ops.input)
+		state->ops.input(NULL, -EPIPE, state->xstate);
+
+	if (state->asyncq) {	/* Clear the async queue */
+		list_del(&state->alink);
+		state->status &= ~XNPIPE_USER_SIGIO;
+		xnlock_put_irqrestore(&nklock, s);
+		fasync_helper(-1, file, 0, &state->asyncq);
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	xnpipe_cleanup_user_conn(state, s);
+	/*
+	 * The extra state may not be available from now on, if
+	 * xnpipe_disconnect() entered lingering close before we got
+	 * there; so calling xnpipe_cleanup_user_conn() should be the
+	 * last thing we do.
+	 */
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static ssize_t xnpipe_read(struct file *file,
+			   char *buf, size_t count, loff_t *ppos)
+{
+	struct xnpipe_state *state = file->private_data;
+	int sigpending, err = 0;
+	size_t nbytes, inbytes;
+	struct xnpipe_mh *mh;
+	ssize_t ret;
+	spl_t s;
+
+	if (!access_wok(buf, count))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EPIPE;
+	}
+	/*
+	 * Queue probe and proc enqueuing must be seen atomically,
+	 * including from the Xenomai side.
+	 */
+	if (list_empty(&state->outq)) {
+		if (file->f_flags & O_NONBLOCK) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EWOULDBLOCK;
+		}
+
+		sigpending = xnpipe_wait(state, XNPIPE_USER_WREAD, s,
+					 !list_empty(&state->outq));
+
+		if (list_empty(&state->outq)) {
+			xnlock_put_irqrestore(&nklock, s);
+			return sigpending ? -ERESTARTSYS : 0;
+		}
+	}
+
+	mh = list_get_entry(&state->outq, struct xnpipe_mh, link);
+	state->nroutq--;
+
+	/*
+	 * We allow more data to be appended to the current message
+	 * bucket while its contents is being copied to the user
+	 * buffer, therefore, we need to loop until: 1) all the data
+	 * has been copied, 2) we consumed the user buffer space
+	 * entirely.
+	 */
+
+	inbytes = 0;
+
+	for (;;) {
+		nbytes = xnpipe_m_size(mh) - xnpipe_m_rdoff(mh);
+
+		if (nbytes + inbytes > count)
+			nbytes = count - inbytes;
+
+		if (nbytes == 0)
+			break;
+
+		xnlock_put_irqrestore(&nklock, s);
+
+		/* More data could be appended while doing this: */
+		err = __copy_to_user(buf + inbytes,
+				     xnpipe_m_data(mh) + xnpipe_m_rdoff(mh),
+				     nbytes);
+
+		xnlock_get_irqsave(&nklock, s);
+
+		if (err) {
+			err = -EFAULT;
+			break;
+		}
+
+		inbytes += nbytes;
+		xnpipe_m_rdoff(mh) += nbytes;
+	}
+
+	state->ionrd -= inbytes;
+	ret = inbytes;
+
+	if (xnpipe_m_size(mh) > xnpipe_m_rdoff(mh)) {
+		list_add(&mh->link, &state->outq);
+		state->nroutq++;
+	} else {
+		/*
+		 * We always want to fire the output handler because
+		 * whatever the error state is for userland (e.g
+		 * -EFAULT), we did pull a message from our output
+		 * queue.
+		 */
+		if (state->ops.output)
+			state->ops.output(mh, state->xstate);
+		xnlock_put_irqrestore(&nklock, s);
+		state->ops.free_obuf(mh, state->xstate);
+		xnlock_get_irqsave(&nklock, s);
+		if (state->status & XNPIPE_USER_WSYNC) {
+			state->status |= XNPIPE_USER_WSYNC_READY;
+			xnpipe_schedule_request();
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err ? : ret;
+}
+
+static ssize_t xnpipe_write(struct file *file,
+			    const char *buf, size_t count, loff_t *ppos)
+{
+	struct xnpipe_state *state = file->private_data;
+	struct xnpipe_mh *mh;
+	int pollnum, ret;
+	spl_t s;
+
+	if (count == 0)
+		return 0;
+
+	if (!access_rok(buf, count))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+retry:
+	if ((state->status & XNPIPE_KERN_CONN) == 0) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EPIPE;
+	}
+
+	pollnum = state->nrinq + state->nroutq;
+	xnlock_put_irqrestore(&nklock, s);
+
+	mh = state->ops.alloc_ibuf(count + sizeof(*mh), state->xstate);
+	if (mh == (struct xnpipe_mh *)-1)
+		return -ENOMEM;
+
+	if (mh == NULL) {
+		if (file->f_flags & O_NONBLOCK)
+			return -EWOULDBLOCK;
+
+		xnlock_get_irqsave(&nklock, s);
+		if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
+				pollnum > state->nrinq + state->nroutq)) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -ERESTARTSYS;
+		}
+		goto retry;
+	}
+
+	xnpipe_m_size(mh) = count;
+	xnpipe_m_rdoff(mh) = 0;
+
+	if (copy_from_user(xnpipe_m_data(mh), buf, count)) {
+		state->ops.free_ibuf(mh, state->xstate);
+		return -EFAULT;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	list_add_tail(&mh->link, &state->inq);
+	state->nrinq++;
+
+	/* Wake up a Xenomai sleeper if any. */
+	if (xnsynch_wakeup_one_sleeper(&state->synchbase))
+		xnsched_run();
+
+	if (state->ops.input) {
+		ret = state->ops.input(mh, 0, state->xstate);
+		if (ret)
+			count = (size_t)ret;
+	}
+
+	if (file->f_flags & O_SYNC) {
+		if (!list_empty(&state->inq)) {
+			if (xnpipe_wait(state, XNPIPE_USER_WSYNC, s,
+					list_empty(&state->inq)))
+				count = -ERESTARTSYS;
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return (ssize_t)count;
+}
+
+static long xnpipe_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
+{
+	struct xnpipe_state *state = file->private_data;
+	int ret = 0;
+	ssize_t n;
+	spl_t s;
+
+	switch (cmd) {
+	case XNPIPEIOC_GET_NRDEV:
+
+		if (put_user(XNPIPE_NDEVS, (int *)arg))
+			return -EFAULT;
+
+		break;
+
+	case XNPIPEIOC_OFLUSH:
+
+		xnlock_get_irqsave(&nklock, s);
+
+		if ((state->status & XNPIPE_KERN_CONN) == 0) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EPIPE;
+		}
+
+		n = xnpipe_flushq(state, outq, free_obuf, s);
+		state->ionrd -= n;
+		goto kick_wsync;
+
+	case XNPIPEIOC_IFLUSH:
+
+		xnlock_get_irqsave(&nklock, s);
+
+		if ((state->status & XNPIPE_KERN_CONN) == 0) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EPIPE;
+		}
+
+		n = xnpipe_flushq(state, inq, free_ibuf, s);
+
+	kick_wsync:
+
+		if (n > 0 && (state->status & XNPIPE_USER_WSYNC)) {
+			state->status |= XNPIPE_USER_WSYNC_READY;
+			xnpipe_schedule_request();
+		}
+
+		xnlock_put_irqrestore(&nklock, s);
+		ret = n;
+		break;
+
+	case XNPIPEIOC_SETSIG:
+
+		if (arg < 1 || arg >= _NSIG)
+			return -EINVAL;
+
+		xnpipe_asyncsig = arg;
+		break;
+
+	case FIONREAD:
+
+		n = (state->status & XNPIPE_KERN_CONN) ? state->ionrd : 0;
+
+		if (put_user(n, (int *)arg))
+			return -EFAULT;
+
+		break;
+
+	case TCGETS:
+		/* For isatty() probing. */
+		return -ENOTTY;
+
+	default:
+
+		return -EINVAL;
+	}
+
+	return ret;
+}
+
+#ifdef CONFIG_COMPAT
+/*
+ * Could be replaced with compat_ptr_ioctl if support for kernels < 5.4 is
+ * dropped.
+ */
+static long xnpipe_compat_ioctl(struct file *file, unsigned int cmd,
+				unsigned long arg)
+{
+	return xnpipe_ioctl(file, cmd, (unsigned long)compat_ptr(arg));
+}
+#else
+#define xnpipe_compat_ioctl	NULL
+#endif
+
+static int xnpipe_fasync(int fd, struct file *file, int on)
+{
+	struct xnpipe_state *state = file->private_data;
+	int ret, queued;
+	spl_t s;
+
+	queued = (state->asyncq != NULL);
+	ret = fasync_helper(fd, file, on, &state->asyncq);
+
+	if (state->asyncq) {
+		if (!queued) {
+			xnlock_get_irqsave(&nklock, s);
+			list_add_tail(&state->alink, &xnpipe_asyncq);
+			xnlock_put_irqrestore(&nklock, s);
+		}
+	} else if (queued) {
+		xnlock_get_irqsave(&nklock, s);
+		list_del(&state->alink);
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return ret;
+}
+
+static unsigned xnpipe_poll(struct file *file, poll_table *pt)
+{
+	struct xnpipe_state *state = file->private_data;
+	unsigned r_mask = 0, w_mask = 0;
+	spl_t s;
+
+	poll_wait(file, &state->readq, pt);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (state->status & XNPIPE_KERN_CONN)
+		w_mask |= (POLLOUT | POLLWRNORM);
+	else
+		r_mask |= POLLHUP;
+
+	if (!list_empty(&state->outq))
+		r_mask |= (POLLIN | POLLRDNORM);
+	else
+		/*
+		 * Procs which have issued a timed out poll req will
+		 * remain linked to the sleepers queue, and will be
+		 * silently unlinked the next time the Xenomai side
+		 * kicks xnpipe_wakeup_proc().
+		 */
+		xnpipe_enqueue_wait(state, XNPIPE_USER_WREAD);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return r_mask | w_mask;
+}
+
+static struct file_operations xnpipe_fops = {
+	.read = xnpipe_read,
+	.write = xnpipe_write,
+	.poll = xnpipe_poll,
+	.unlocked_ioctl = xnpipe_ioctl,
+	.compat_ioctl = xnpipe_compat_ioctl,
+	.open = xnpipe_open,
+	.release = xnpipe_release,
+	.fasync = xnpipe_fasync
+};
+
+int xnpipe_mount(void)
+{
+	struct xnpipe_state *state;
+	struct device *cldev;
+	int i;
+
+	for (state = &xnpipe_states[0];
+	     state < &xnpipe_states[XNPIPE_NDEVS]; state++) {
+		state->status = 0;
+		state->asyncq = NULL;
+		INIT_LIST_HEAD(&state->inq);
+		state->nrinq = 0;
+		INIT_LIST_HEAD(&state->outq);
+		state->nroutq = 0;
+	}
+
+	xnpipe_class = class_create(THIS_MODULE, "rtpipe");
+	if (IS_ERR(xnpipe_class)) {
+		printk(XENO_ERR "error creating rtpipe class, err=%ld\n",
+		       PTR_ERR(xnpipe_class));
+		return -EBUSY;
+	}
+
+	for (i = 0; i < XNPIPE_NDEVS; i++) {
+		cldev = device_create(xnpipe_class, NULL,
+				      MKDEV(XNPIPE_DEV_MAJOR, i),
+				      NULL, "rtp%d", i);
+		if (IS_ERR(cldev)) {
+			printk(XENO_ERR
+			       "can't add device class, major=%d, minor=%d, err=%ld\n",
+			       XNPIPE_DEV_MAJOR, i, PTR_ERR(cldev));
+			class_destroy(xnpipe_class);
+			return -EBUSY;
+		}
+	}
+
+	if (register_chrdev(XNPIPE_DEV_MAJOR, "rtpipe", &xnpipe_fops)) {
+		printk(XENO_ERR
+		       "unable to reserve major #%d for message pipes\n",
+		       XNPIPE_DEV_MAJOR);
+		return -EBUSY;
+	}
+
+	xnpipe_wakeup_virq = pipeline_create_inband_sirq(xnpipe_wakeup_proc);
+	if (xnpipe_wakeup_virq < 0) {
+		printk(XENO_ERR
+		       "unable to reserve synthetic IRQ for message pipes\n");
+		return xnpipe_wakeup_virq;
+	}
+
+	return 0;
+}
+
+void xnpipe_umount(void)
+{
+	int i;
+
+	pipeline_delete_inband_sirq(xnpipe_wakeup_virq);
+
+	unregister_chrdev(XNPIPE_DEV_MAJOR, "rtpipe");
+
+	for (i = 0; i < XNPIPE_NDEVS; i++)
+		device_destroy(xnpipe_class, MKDEV(XNPIPE_DEV_MAJOR, i));
+
+	class_destroy(xnpipe_class);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/COPYING b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/COPYING
new file mode 100644
index 0000000..0d72637
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/COPYING
@@ -0,0 +1,281 @@
+
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                       59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile
new file mode 100644
index 0000000..5b4f321
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/Makefile
@@ -0,0 +1,38 @@
+
+ccflags-y += -I$(srctree)/kernel
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-y :=		\
+	clock.o		\
+	cond.o		\
+	corectl.o	\
+	event.o		\
+	io.o		\
+	memory.o	\
+	monitor.o	\
+	mqueue.o	\
+	mutex.o		\
+	nsem.o		\
+	process.o	\
+	sched.o		\
+	sem.o		\
+	signal.o	\
+	syscall.o	\
+	thread.o	\
+	timer.o		\
+	timerfd.o
+
+syscall_entries := $(srctree)/$(src)/gen-syscall-entries.sh
+
+quiet_cmd_syscall_entries = GEN     $@
+      cmd_syscall_entries = $(CONFIG_SHELL) '$(syscall_entries)' $(filter-out FORCE,$^) > $@
+
+$(obj)/syscall_entries.h: $(syscall_entries) $(wildcard $(srctree)/$(src)/*.c) FORCE
+	$(call if_changed,syscall_entries)
+
+target += syscall_entries.h
+
+$(obj)/syscall.o: $(obj)/syscall_entries.h
+
+xenomai-$(CONFIG_XENO_ARCH_SYS3264) += compat.o syscall32.o
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c
new file mode 100644
index 0000000..71d14db
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.c
@@ -0,0 +1,497 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/clocksource.h>
+#include <linux/bitmap.h>
+#include <cobalt/kernel/clock.h>
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+static struct xnclock *external_clocks[COBALT_MAX_EXTCLOCKS];
+
+DECLARE_BITMAP(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS);
+
+#define do_ext_clock(__clock_id, __handler, __ret, __args...)	\
+({								\
+	struct xnclock *__clock;				\
+	int __val = 0, __nr;					\
+	spl_t __s;						\
+								\
+	if (!__COBALT_CLOCK_EXT_P(__clock_id))			\
+		__val = -EINVAL;				\
+	else {							\
+		__nr = __COBALT_CLOCK_EXT_INDEX(__clock_id);	\
+		xnlock_get_irqsave(&nklock, __s);		\
+		if (!test_bit(__nr, cobalt_clock_extids)) {	\
+			xnlock_put_irqrestore(&nklock, __s);	\
+			__val = -EINVAL;			\
+		} else {					\
+			__clock = external_clocks[__nr];	\
+			(__ret) = xnclock_ ## __handler(__clock, ##__args); \
+			xnlock_put_irqrestore(&nklock, __s);	\
+		}						\
+	}							\
+	__val;							\
+})
+
+int __cobalt_clock_getres(clockid_t clock_id, struct timespec64 *ts)
+{
+	xnticks_t ns;
+	int ret;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+		ns2ts(ts, 1);
+		break;
+	default:
+		ret = do_ext_clock(clock_id, get_resolution, ns);
+		if (ret)
+			return ret;
+		ns2ts(ts, ns);
+	}
+
+	trace_cobalt_clock_getres(clock_id, ts);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_getres, current,
+	       (clockid_t clock_id, struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_getres(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (u_ts && cobalt_put_u_timespec(u_ts, &ts))
+		return -EFAULT;
+
+	trace_cobalt_clock_getres(clock_id, &ts);
+
+	return 0;
+}
+
+int __cobalt_clock_getres64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_getres(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (cobalt_put_timespec64(&ts, u_ts))
+		return -EFAULT;
+
+	trace_cobalt_clock_getres(clock_id, &ts);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_getres64, current,
+	       (clockid_t clock_id, struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_getres64(clock_id, u_ts);
+}
+
+int __cobalt_clock_gettime(clockid_t clock_id, struct timespec64 *ts)
+{
+	xnticks_t ns;
+	int ret;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+		ns2ts(ts, xnclock_read_realtime(&nkclock));
+		break;
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+		ns2ts(ts, xnclock_read_monotonic(&nkclock));
+		break;
+	case CLOCK_HOST_REALTIME:
+		if (pipeline_get_host_time(ts) != 0)
+			return -EINVAL;
+		break;
+	default:
+		ret = do_ext_clock(clock_id, read_monotonic, ns);
+		if (ret)
+			return ret;
+		ns2ts(ts, ns);
+	}
+
+	trace_cobalt_clock_gettime(clock_id, ts);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_gettime, current,
+	       (clockid_t clock_id, struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_gettime(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (cobalt_put_u_timespec(u_ts, &ts))
+		return -EFAULT;
+
+	return 0;
+}
+
+int __cobalt_clock_gettime64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_gettime(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	if (cobalt_put_timespec64(&ts, u_ts))
+		return -EFAULT;
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_gettime64, current,
+	       (clockid_t clock_id, struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_gettime64(clock_id, u_ts);
+}
+
+int __cobalt_clock_settime(clockid_t clock_id, const struct timespec64 *ts)
+{
+	int _ret, ret = 0;
+
+	if ((unsigned long)ts->tv_nsec >= ONE_BILLION)
+		return -EINVAL;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+		ret = pipeline_set_wallclock(ts2ns(ts));
+		break;
+	default:
+		_ret = do_ext_clock(clock_id, set_time, ret, ts);
+		if (_ret || ret)
+			return _ret ?: ret;
+	}
+
+	trace_cobalt_clock_settime(clock_id, ts);
+
+	return ret;
+}
+
+int __cobalt_clock_adjtime(clockid_t clock_id, struct __kernel_timex *tx)
+{
+	int _ret, ret = 0;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+	case CLOCK_HOST_REALTIME:
+		return -EOPNOTSUPP;
+	default:
+		_ret = do_ext_clock(clock_id, adjust_time, ret, tx);
+		if (_ret || ret)
+			return _ret ?: ret;
+	}
+
+	trace_cobalt_clock_adjtime(clock_id, tx);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_settime, current,
+	       (clockid_t clock_id, const struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts;
+
+	if (cobalt_get_u_timespec(&ts, u_ts))
+		return -EFAULT;
+
+	return __cobalt_clock_settime(clock_id, &ts);
+}
+
+int __cobalt_clock_settime64(clockid_t clock_id,
+			const struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts64;
+
+	if (cobalt_get_timespec64(&ts64, u_ts))
+		return -EFAULT;
+
+	return __cobalt_clock_settime(clock_id, &ts64);
+}
+
+COBALT_SYSCALL(clock_settime64, current,
+	       (clockid_t clock_id, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_settime64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL(clock_adjtime, current,
+	       (clockid_t clock_id, struct __user_old_timex __user *u_tx))
+{
+	struct __kernel_timex tx;
+	int ret;
+
+	if (cobalt_copy_from_user(&tx, u_tx, sizeof(tx)))
+		return -EFAULT;
+
+	ret = __cobalt_clock_adjtime(clock_id, &tx);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_tx, &tx, sizeof(tx));
+}
+
+int __cobalt_clock_adjtime64(clockid_t clock_id,
+			struct __kernel_timex __user *u_tx)
+{
+	struct __kernel_timex tx;
+	int ret;
+
+	if (cobalt_copy_from_user(&tx, u_tx, sizeof(tx)))
+		return -EFAULT;
+
+	ret = __cobalt_clock_adjtime(clock_id, &tx);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_tx, &tx, sizeof(tx));
+}
+
+COBALT_SYSCALL(clock_adjtime64, current,
+	       (clockid_t clock_id, struct __kernel_timex __user *u_tx))
+{
+	return __cobalt_clock_adjtime64(clock_id, u_tx);
+}
+
+int __cobalt_clock_nanosleep(clockid_t clock_id, int flags,
+			     const struct timespec64 *rqt,
+			     struct timespec64 *rmt)
+{
+	struct restart_block *restart;
+	struct xnthread *cur;
+	xnsticks_t timeout, rem;
+	spl_t s;
+
+	trace_cobalt_clock_nanosleep(clock_id, flags, rqt);
+
+	if (clock_id != CLOCK_MONOTONIC &&
+	    clock_id != CLOCK_MONOTONIC_RAW &&
+	    clock_id != CLOCK_REALTIME)
+		return -EOPNOTSUPP;
+
+	if (rqt->tv_sec < 0)
+		return -EINVAL;
+
+	if ((unsigned long)rqt->tv_nsec >= ONE_BILLION)
+		return -EINVAL;
+
+	if (flags & ~TIMER_ABSTIME)
+		return -EINVAL;
+
+	cur = xnthread_current();
+
+	if (xnthread_test_localinfo(cur, XNSYSRST)) {
+		xnthread_clear_localinfo(cur, XNSYSRST);
+
+		restart = cobalt_get_restart_block(current);
+
+		if (restart->fn != cobalt_restart_syscall_placeholder) {
+			if (rmt) {
+				xnlock_get_irqsave(&nklock, s);
+				rem = xntimer_get_timeout_stopped(&cur->rtimer);
+				xnlock_put_irqrestore(&nklock, s);
+				ns2ts(rmt, rem > 1 ? rem : 0);
+			}
+			return -EINTR;
+		}
+
+		timeout = restart->nanosleep.expires;
+	} else
+		timeout = ts2ns(rqt);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnthread_suspend(cur, XNDELAY, timeout + 1,
+			 clock_flag(flags, clock_id), NULL);
+
+	if (xnthread_test_info(cur, XNBREAK)) {
+		if (signal_pending(current)) {
+			restart = cobalt_get_restart_block(current);
+			restart->nanosleep.expires =
+				(flags & TIMER_ABSTIME) ? timeout :
+				    xntimer_get_timeout_stopped(&cur->rtimer);
+			xnlock_put_irqrestore(&nklock, s);
+			restart->fn = cobalt_restart_syscall_placeholder;
+
+			xnthread_set_localinfo(cur, XNSYSRST);
+
+			return -ERESTARTSYS;
+		}
+
+		if (flags == 0 && rmt) {
+			rem = xntimer_get_timeout_stopped(&cur->rtimer);
+			xnlock_put_irqrestore(&nklock, s);
+			ns2ts(rmt, rem > 1 ? rem : 0);
+		} else
+			xnlock_put_irqrestore(&nklock, s);
+
+		return -EINTR;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+COBALT_SYSCALL(clock_nanosleep, primary,
+	       (clockid_t clock_id, int flags,
+		const struct __user_old_timespec __user *u_rqt,
+		struct __user_old_timespec __user *u_rmt))
+{
+	struct timespec64 rqt, rmt, *rmtp = NULL;
+	int ret;
+
+	if (u_rmt)
+		rmtp = &rmt;
+
+	if (cobalt_get_u_timespec(&rqt, u_rqt))
+		return -EFAULT;
+
+	ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp);
+	if (ret == -EINTR && flags == 0 && rmtp) {
+		if (cobalt_put_u_timespec(u_rmt, rmtp))
+			return -EFAULT;
+	}
+
+	return ret;
+}
+
+int __cobalt_clock_nanosleep64(clockid_t clock_id, int flags,
+		const struct __kernel_timespec __user *u_rqt,
+		struct __kernel_timespec __user *u_rmt)
+{
+	struct timespec64 rqt, rmt, *rmtp = NULL;
+	int ret;
+
+	if (u_rmt)
+		rmtp = &rmt;
+
+	if (cobalt_get_timespec64(&rqt, u_rqt))
+		return -EFAULT;
+
+	ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp);
+	if (ret == -EINTR && flags == 0 && rmtp) {
+		if (cobalt_put_timespec64(rmtp, u_rmt))
+			return -EFAULT;
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(clock_nanosleep64, primary,
+	       (clockid_t clock_id, int flags,
+		const struct __kernel_timespec __user *u_rqt,
+		struct __kernel_timespec __user *u_rmt))
+{
+	return __cobalt_clock_nanosleep64(clock_id, flags, u_rqt, u_rmt);
+}
+
+int cobalt_clock_register(struct xnclock *clock, const cpumask_t *affinity,
+			  clockid_t *clk_id)
+{
+	int ret, nr;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	nr = find_first_zero_bit(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS);
+	if (nr >= COBALT_MAX_EXTCLOCKS) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EAGAIN;
+	}
+
+	/*
+	 * CAUTION: a bit raised in cobalt_clock_extids means that the
+	 * corresponding entry in external_clocks[] is valid. The
+	 * converse assumption is NOT true.
+	 */
+	__set_bit(nr, cobalt_clock_extids);
+	external_clocks[nr] = clock;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = xnclock_register(clock, affinity);
+	if (ret)
+		return ret;
+
+	clock->id = nr;
+	*clk_id = __COBALT_CLOCK_EXT(clock->id);
+
+	trace_cobalt_clock_register(clock->name, *clk_id);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cobalt_clock_register);
+
+void cobalt_clock_deregister(struct xnclock *clock)
+{
+	trace_cobalt_clock_deregister(clock->name, clock->id);
+	clear_bit(clock->id, cobalt_clock_extids);
+	smp_mb__after_atomic();
+	external_clocks[clock->id] = NULL;
+	xnclock_deregister(clock);
+}
+EXPORT_SYMBOL_GPL(cobalt_clock_deregister);
+
+struct xnclock *cobalt_clock_find(clockid_t clock_id)
+{
+	struct xnclock *clock = ERR_PTR(-EINVAL);
+	spl_t s;
+	int nr;
+
+	if (clock_id == CLOCK_MONOTONIC ||
+	    clock_id == CLOCK_MONOTONIC_RAW ||
+	    clock_id == CLOCK_REALTIME)
+		return &nkclock;
+
+	if (__COBALT_CLOCK_EXT_P(clock_id)) {
+		nr = __COBALT_CLOCK_EXT_INDEX(clock_id);
+		xnlock_get_irqsave(&nklock, s);
+		if (test_bit(nr, cobalt_clock_extids))
+			clock = external_clocks[nr];
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return clock;
+}
+EXPORT_SYMBOL_GPL(cobalt_clock_find);
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h
new file mode 100644
index 0000000..e183739
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/clock.h
@@ -0,0 +1,174 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_CLOCK_H
+#define _COBALT_POSIX_CLOCK_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/cpumask.h>
+#include <cobalt/uapi/time.h>
+#include <xenomai/posix/syscall.h>
+
+#define ONE_BILLION             1000000000
+
+struct xnclock;
+
+static inline void ns2ts(struct timespec64 *ts, xnticks_t nsecs)
+{
+	ts->tv_sec = xnclock_divrem_billion(nsecs, &ts->tv_nsec);
+}
+
+static inline void u_ns2ts(struct __user_old_timespec *ts, xnticks_t nsecs)
+{
+	ts->tv_sec = xnclock_divrem_billion(nsecs, &ts->tv_nsec);
+}
+
+static inline xnticks_t ts2ns(const struct timespec64 *ts)
+{
+	xnticks_t nsecs = ts->tv_nsec;
+
+	if (ts->tv_sec)
+		nsecs += (xnticks_t)ts->tv_sec * ONE_BILLION;
+
+	return nsecs;
+}
+
+static inline xnticks_t u_ts2ns(const struct __user_old_timespec *ts)
+{
+	xnticks_t nsecs = ts->tv_nsec;
+
+	if (ts->tv_sec)
+		nsecs += (xnticks_t)ts->tv_sec * ONE_BILLION;
+
+	return nsecs;
+}
+
+static inline xnticks_t tv2ns(const struct __kernel_old_timeval *tv)
+{
+	xnticks_t nsecs = tv->tv_usec * 1000;
+
+	if (tv->tv_sec)
+		nsecs += (xnticks_t)tv->tv_sec * ONE_BILLION;
+
+	return nsecs;
+}
+
+static inline void ticks2tv(struct __kernel_old_timeval *tv, xnticks_t ticks)
+{
+	unsigned long nsecs;
+
+	tv->tv_sec = xnclock_divrem_billion(ticks, &nsecs);
+	tv->tv_usec = nsecs / 1000;
+}
+
+static inline xnticks_t clock_get_ticks(clockid_t clock_id)
+{
+	return clock_id == CLOCK_REALTIME ?
+		xnclock_read_realtime(&nkclock) :
+		xnclock_read_monotonic(&nkclock);
+}
+
+static inline int clock_flag(int flag, clockid_t clock_id)
+{
+	if ((flag & TIMER_ABSTIME) == 0)
+		return XN_RELATIVE;
+
+	if (clock_id == CLOCK_REALTIME)
+		return XN_REALTIME;
+
+	return XN_ABSOLUTE;
+}
+
+int __cobalt_clock_getres(clockid_t clock_id,
+			  struct timespec64 *ts);
+
+int __cobalt_clock_getres64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts);
+
+int __cobalt_clock_gettime(clockid_t clock_id,
+			   struct timespec64 *ts);
+
+int __cobalt_clock_gettime64(clockid_t clock_id,
+			struct __kernel_timespec __user *u_ts);
+
+int __cobalt_clock_settime(clockid_t clock_id,
+			   const struct timespec64 *ts);
+
+int __cobalt_clock_settime64(clockid_t clock_id,
+			const struct __kernel_timespec __user *u_ts);
+
+int __cobalt_clock_adjtime(clockid_t clock_id,
+			   struct __kernel_timex *tx);
+
+int __cobalt_clock_adjtime64(clockid_t clock_id,
+			struct __kernel_timex __user *u_tx);
+
+int __cobalt_clock_nanosleep(clockid_t clock_id, int flags,
+			     const struct timespec64 *rqt,
+			     struct timespec64 *rmt);
+
+int __cobalt_clock_nanosleep64(clockid_t clock_id, int flags,
+		const struct __kernel_timespec __user *u_rqt,
+		struct __kernel_timespec __user *u_rmt);
+
+COBALT_SYSCALL_DECL(clock_getres,
+		    (clockid_t clock_id, struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_getres64,
+		    (clockid_t clock_id, struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_gettime,
+		    (clockid_t clock_id, struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_gettime64,
+		    (clockid_t clock_id, struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_settime,
+		    (clockid_t clock_id, const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_settime64,
+		    (clockid_t clock_id,
+			 const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(clock_adjtime,
+		    (clockid_t clock_id, struct __user_old_timex __user *u_tx));
+
+COBALT_SYSCALL_DECL(clock_adjtime64,
+		    (clockid_t clock_id, struct __kernel_timex __user *u_tx));
+
+COBALT_SYSCALL_DECL(clock_nanosleep,
+		    (clockid_t clock_id, int flags,
+		     const struct __user_old_timespec __user *u_rqt,
+		     struct __user_old_timespec __user *u_rmt));
+
+COBALT_SYSCALL_DECL(clock_nanosleep64,
+		    (clockid_t clock_id, int flags,
+		     const struct __kernel_timespec __user *u_rqt,
+		     struct __kernel_timespec __user *u_rmt));
+
+int cobalt_clock_register(struct xnclock *clock,
+			  const cpumask_t *affinity,
+			  clockid_t *clk_id);
+
+void cobalt_clock_deregister(struct xnclock *clock);
+
+struct xnclock *cobalt_clock_find(clockid_t clock_id);
+
+extern DECLARE_BITMAP(cobalt_clock_extids, COBALT_MAX_EXTCLOCKS);
+
+#endif /* !_COBALT_POSIX_CLOCK_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c
new file mode 100644
index 0000000..2ec4608
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/compat.c
@@ -0,0 +1,544 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/err.h>
+#include <linux/memory.h>
+#include <linux/module.h>
+#include <cobalt/kernel/compat.h>
+#include <asm/xenomai/syscall.h>
+#include <xenomai/posix/mqueue.h>
+
+int sys32_get_timespec(struct timespec64 *ts,
+		       const struct old_timespec32 __user *u_cts)
+{
+	struct old_timespec32 cts;
+
+	if (u_cts == NULL || !access_rok(u_cts, sizeof(*u_cts)))
+		return -EFAULT;
+
+	if (__xn_get_user(cts.tv_sec, &u_cts->tv_sec) ||
+		__xn_get_user(cts.tv_nsec, &u_cts->tv_nsec))
+		return -EFAULT;
+
+	ts->tv_sec = cts.tv_sec;
+	ts->tv_nsec = cts.tv_nsec;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_timespec);
+
+int sys32_put_timespec(struct old_timespec32 __user *u_cts,
+		       const struct timespec64 *ts)
+{
+	struct old_timespec32 cts;
+
+	if (u_cts == NULL || !access_wok(u_cts, sizeof(*u_cts)))
+		return -EFAULT;
+
+	cts.tv_sec = ts->tv_sec;
+	cts.tv_nsec = ts->tv_nsec;
+
+	if (__xn_put_user(cts.tv_sec, &u_cts->tv_sec) ||
+	    __xn_put_user(cts.tv_nsec, &u_cts->tv_nsec))
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_timespec);
+
+int sys32_get_itimerspec(struct itimerspec64 *its,
+			 const struct old_itimerspec32 __user *cits)
+{
+	int ret = sys32_get_timespec(&its->it_value, &cits->it_value);
+
+	return ret ?: sys32_get_timespec(&its->it_interval, &cits->it_interval);
+}
+EXPORT_SYMBOL_GPL(sys32_get_itimerspec);
+
+int sys32_put_itimerspec(struct old_itimerspec32 __user *cits,
+			 const struct itimerspec64 *its)
+{
+	int ret = sys32_put_timespec(&cits->it_value, &its->it_value);
+
+	return ret ?: sys32_put_timespec(&cits->it_interval, &its->it_interval);
+}
+EXPORT_SYMBOL_GPL(sys32_put_itimerspec);
+
+int sys32_get_timeval(struct __kernel_old_timeval *tv,
+		      const struct old_timeval32 __user *ctv)
+{
+	return (ctv == NULL ||
+		!access_rok(ctv, sizeof(*ctv)) ||
+		__xn_get_user(tv->tv_sec, &ctv->tv_sec) ||
+		__xn_get_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_timeval);
+
+int sys32_put_timeval(struct old_timeval32 __user *ctv,
+		      const struct __kernel_old_timeval *tv)
+{
+	return (ctv == NULL ||
+		!access_wok(ctv, sizeof(*ctv)) ||
+		__xn_put_user(tv->tv_sec, &ctv->tv_sec) ||
+		__xn_put_user(tv->tv_usec, &ctv->tv_usec)) ? -EFAULT : 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_timeval);
+
+int sys32_get_timex(struct __kernel_timex *tx,
+		    const struct old_timex32 __user *ctx)
+{
+	struct __kernel_old_timeval time;
+	int ret;
+
+	memset(tx, 0, sizeof(*tx));
+
+	ret = sys32_get_timeval(&time, &ctx->time);
+	if (ret)
+		return ret;
+
+	tx->time.tv_sec = time.tv_sec;
+	tx->time.tv_usec = time.tv_usec;
+
+	if (!access_rok(ctx, sizeof(*ctx)) ||
+	    __xn_get_user(tx->modes, &ctx->modes) ||
+	    __xn_get_user(tx->offset, &ctx->offset) ||
+	    __xn_get_user(tx->freq, &ctx->freq) ||
+	    __xn_get_user(tx->maxerror, &ctx->maxerror) ||
+	    __xn_get_user(tx->esterror, &ctx->esterror) ||
+	    __xn_get_user(tx->status, &ctx->status) ||
+	    __xn_get_user(tx->constant, &ctx->constant) ||
+	    __xn_get_user(tx->precision, &ctx->precision) ||
+	    __xn_get_user(tx->tolerance, &ctx->tolerance) ||
+	    __xn_get_user(tx->tick, &ctx->tick) ||
+	    __xn_get_user(tx->ppsfreq, &ctx->ppsfreq) ||
+	    __xn_get_user(tx->jitter, &ctx->jitter) ||
+	    __xn_get_user(tx->shift, &ctx->shift) ||
+	    __xn_get_user(tx->stabil, &ctx->stabil) ||
+	    __xn_get_user(tx->jitcnt, &ctx->jitcnt) ||
+	    __xn_get_user(tx->calcnt, &ctx->calcnt) ||
+	    __xn_get_user(tx->errcnt, &ctx->errcnt) ||
+	    __xn_get_user(tx->stbcnt, &ctx->stbcnt))
+	  return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_timex);
+
+int sys32_put_timex(struct old_timex32 __user *ctx,
+		    const struct __kernel_timex *tx)
+{
+	struct __kernel_old_timeval time;
+	int ret;
+
+	time.tv_sec = tx->time.tv_sec;
+	time.tv_usec = tx->time.tv_usec;
+
+	ret = sys32_put_timeval(&ctx->time, &time);
+	if (ret)
+		return ret;
+
+	if (!access_wok(ctx, sizeof(*ctx)) ||
+	    __xn_put_user(tx->modes, &ctx->modes) ||
+	    __xn_put_user(tx->offset, &ctx->offset) ||
+	    __xn_put_user(tx->freq, &ctx->freq) ||
+	    __xn_put_user(tx->maxerror, &ctx->maxerror) ||
+	    __xn_put_user(tx->esterror, &ctx->esterror) ||
+	    __xn_put_user(tx->status, &ctx->status) ||
+	    __xn_put_user(tx->constant, &ctx->constant) ||
+	    __xn_put_user(tx->precision, &ctx->precision) ||
+	    __xn_put_user(tx->tolerance, &ctx->tolerance) ||
+	    __xn_put_user(tx->tick, &ctx->tick) ||
+	    __xn_put_user(tx->ppsfreq, &ctx->ppsfreq) ||
+	    __xn_put_user(tx->jitter, &ctx->jitter) ||
+	    __xn_put_user(tx->shift, &ctx->shift) ||
+	    __xn_put_user(tx->stabil, &ctx->stabil) ||
+	    __xn_put_user(tx->jitcnt, &ctx->jitcnt) ||
+	    __xn_put_user(tx->calcnt, &ctx->calcnt) ||
+	    __xn_put_user(tx->errcnt, &ctx->errcnt) ||
+	    __xn_put_user(tx->stbcnt, &ctx->stbcnt))
+	  return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_timex);
+
+int sys32_get_fdset(fd_set *fds, const compat_fd_set __user *cfds,
+		    size_t cfdsize)
+{
+	int rdpos, wrpos, rdlim = cfdsize / sizeof(compat_ulong_t);
+
+	if (cfds == NULL || !access_rok(cfds, cfdsize))
+		return -EFAULT;
+
+	for (rdpos = 0, wrpos = 0; rdpos < rdlim; rdpos++, wrpos++)
+		if (__xn_get_user(fds->fds_bits[wrpos], cfds->fds_bits + rdpos))
+			return -EFAULT;
+
+	return 0;
+}
+
+int sys32_put_fdset(compat_fd_set __user *cfds, const fd_set *fds,
+		    size_t fdsize)
+{
+	int rdpos, wrpos, wrlim = fdsize / sizeof(long);
+
+	if (cfds == NULL || !access_wok(cfds, wrlim * sizeof(compat_ulong_t)))
+		return -EFAULT;
+
+	for (rdpos = 0, wrpos = 0; wrpos < wrlim; rdpos++, wrpos++)
+		if (__xn_put_user(fds->fds_bits[rdpos], cfds->fds_bits + wrpos))
+			return -EFAULT;
+
+	return 0;
+}
+
+int sys32_get_param_ex(int policy,
+		       struct sched_param_ex *p,
+		       const struct compat_sched_param_ex __user *u_cp)
+{
+	struct compat_sched_param_ex cpex;
+
+	if (u_cp == NULL || cobalt_copy_from_user(&cpex, u_cp, sizeof(cpex)))
+		return -EFAULT;
+
+	p->sched_priority = cpex.sched_priority;
+
+	switch (policy) {
+	case SCHED_SPORADIC:
+		p->sched_ss_low_priority = cpex.sched_ss_low_priority;
+		p->sched_ss_max_repl = cpex.sched_ss_max_repl;
+		p->sched_ss_repl_period.tv_sec = cpex.sched_ss_repl_period.tv_sec;
+		p->sched_ss_repl_period.tv_nsec = cpex.sched_ss_repl_period.tv_nsec;
+		p->sched_ss_init_budget.tv_sec = cpex.sched_ss_init_budget.tv_sec;
+		p->sched_ss_init_budget.tv_nsec = cpex.sched_ss_init_budget.tv_nsec;
+		break;
+	case SCHED_RR:
+		p->sched_rr_quantum.tv_sec = cpex.sched_rr_quantum.tv_sec;
+		p->sched_rr_quantum.tv_nsec = cpex.sched_rr_quantum.tv_nsec;
+		break;
+	case SCHED_TP:
+		p->sched_tp_partition = cpex.sched_tp_partition;
+		break;
+	case SCHED_QUOTA:
+		p->sched_quota_group = cpex.sched_quota_group;
+		break;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_param_ex);
+
+int sys32_put_param_ex(int policy,
+		       struct compat_sched_param_ex __user *u_cp,
+		       const struct sched_param_ex *p)
+{
+	struct compat_sched_param_ex cpex;
+
+	if (u_cp == NULL)
+		return -EFAULT;
+
+	cpex.sched_priority = p->sched_priority;
+
+	switch (policy) {
+	case SCHED_SPORADIC:
+		cpex.sched_ss_low_priority = p->sched_ss_low_priority;
+		cpex.sched_ss_max_repl = p->sched_ss_max_repl;
+		cpex.sched_ss_repl_period.tv_sec = p->sched_ss_repl_period.tv_sec;
+		cpex.sched_ss_repl_period.tv_nsec = p->sched_ss_repl_period.tv_nsec;
+		cpex.sched_ss_init_budget.tv_sec = p->sched_ss_init_budget.tv_sec;
+		cpex.sched_ss_init_budget.tv_nsec = p->sched_ss_init_budget.tv_nsec;
+		break;
+	case SCHED_RR:
+		cpex.sched_rr_quantum.tv_sec = p->sched_rr_quantum.tv_sec;
+		cpex.sched_rr_quantum.tv_nsec = p->sched_rr_quantum.tv_nsec;
+		break;
+	case SCHED_TP:
+		cpex.sched_tp_partition = p->sched_tp_partition;
+		break;
+	case SCHED_QUOTA:
+		cpex.sched_quota_group = p->sched_quota_group;
+		break;
+	}
+
+	return cobalt_copy_to_user(u_cp, &cpex, sizeof(cpex));
+}
+EXPORT_SYMBOL_GPL(sys32_put_param_ex);
+
+int sys32_get_mqattr(struct mq_attr *ap,
+		     const struct compat_mq_attr __user *u_cap)
+{
+	struct compat_mq_attr cattr;
+
+	if (u_cap == NULL ||
+	    cobalt_copy_from_user(&cattr, u_cap, sizeof(cattr)))
+		return -EFAULT;
+
+	ap->mq_flags = cattr.mq_flags;
+	ap->mq_maxmsg = cattr.mq_maxmsg;
+	ap->mq_msgsize = cattr.mq_msgsize;
+	ap->mq_curmsgs = cattr.mq_curmsgs;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_mqattr);
+
+int sys32_put_mqattr(struct compat_mq_attr __user *u_cap,
+		     const struct mq_attr *ap)
+{
+	struct compat_mq_attr cattr;
+
+	cattr.mq_flags = ap->mq_flags;
+	cattr.mq_maxmsg = ap->mq_maxmsg;
+	cattr.mq_msgsize = ap->mq_msgsize;
+	cattr.mq_curmsgs = ap->mq_curmsgs;
+
+	return u_cap == NULL ? -EFAULT :
+		cobalt_copy_to_user(u_cap, &cattr, sizeof(cattr));
+}
+EXPORT_SYMBOL_GPL(sys32_put_mqattr);
+
+int sys32_get_sigevent(struct sigevent *ev,
+		       const struct compat_sigevent *__user u_cev)
+{
+	struct compat_sigevent cev;
+	compat_int_t *cp;
+	int ret, *p;
+
+	if (u_cev == NULL)
+		return -EFAULT;
+
+	ret = cobalt_copy_from_user(&cev, u_cev, sizeof(cev));
+	if (ret)
+		return ret;
+
+	memset(ev, 0, sizeof(*ev));
+	ev->sigev_value.sival_ptr = compat_ptr(cev.sigev_value.sival_ptr);
+	ev->sigev_signo = cev.sigev_signo;
+	ev->sigev_notify = cev.sigev_notify;
+	/*
+	 * Extensions may define extra fields we don't know about in
+	 * the padding area, so we have to load it entirely.
+	 */
+	p = ev->_sigev_un._pad;
+	cp = cev._sigev_un._pad;
+	while (p < &ev->_sigev_un._pad[ARRAY_SIZE(ev->_sigev_un._pad)] &&
+	       cp < &cev._sigev_un._pad[ARRAY_SIZE(cev._sigev_un._pad)])
+		*p++ = *cp++;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_sigevent);
+
+int sys32_get_sigset(sigset_t *set, const compat_sigset_t *u_cset)
+{
+#ifdef __BIG_ENDIAN
+	compat_sigset_t v;
+
+	if (cobalt_copy_from_user(&v, u_cset, sizeof(compat_sigset_t)))
+		return -EFAULT;
+	switch (_NSIG_WORDS) {
+	case 4: set->sig[3] = v.sig[6] | (((long)v.sig[7]) << 32 );
+	case 3: set->sig[2] = v.sig[4] | (((long)v.sig[5]) << 32 );
+	case 2: set->sig[1] = v.sig[2] | (((long)v.sig[3]) << 32 );
+	case 1: set->sig[0] = v.sig[0] | (((long)v.sig[1]) << 32 );
+	}
+#else
+	if (cobalt_copy_from_user(set, u_cset, sizeof(compat_sigset_t)))
+		return -EFAULT;
+#endif
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_sigset);
+
+int sys32_put_sigset(compat_sigset_t *u_cset, const sigset_t *set)
+{
+#ifdef __BIG_ENDIAN
+	compat_sigset_t v;
+	switch (_NSIG_WORDS) {
+	case 4: v.sig[7] = (set->sig[3] >> 32); v.sig[6] = set->sig[3];
+	case 3: v.sig[5] = (set->sig[2] >> 32); v.sig[4] = set->sig[2];
+	case 2: v.sig[3] = (set->sig[1] >> 32); v.sig[2] = set->sig[1];
+	case 1: v.sig[1] = (set->sig[0] >> 32); v.sig[0] = set->sig[0];
+	}
+	return cobalt_copy_to_user(u_cset, &v, sizeof(*u_cset)) ? -EFAULT : 0;
+#else
+	return cobalt_copy_to_user(u_cset, set, sizeof(*u_cset)) ? -EFAULT : 0;
+#endif
+}
+EXPORT_SYMBOL_GPL(sys32_put_sigset);
+
+int sys32_get_sigval(union sigval *val, const union compat_sigval *u_cval)
+{
+	union compat_sigval cval;
+	int ret;
+
+	if (u_cval == NULL)
+		return -EFAULT;
+
+	ret = cobalt_copy_from_user(&cval, u_cval, sizeof(cval));
+	if (ret)
+		return ret;
+
+	val->sival_ptr = compat_ptr(cval.sival_ptr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_sigval);
+
+int sys32_put_siginfo(void __user *u_si, const struct siginfo *si,
+		      int overrun)
+{
+	struct compat_siginfo __user *u_p = u_si;
+	int ret;
+
+	if (u_p == NULL)
+		return -EFAULT;
+
+	ret = __xn_put_user(si->si_signo, &u_p->si_signo);
+	ret |= __xn_put_user(si->si_errno, &u_p->si_errno);
+	ret |= __xn_put_user(si->si_code, &u_p->si_code);
+
+	/*
+	 * Copy the generic/standard siginfo bits to userland.
+	 */
+	switch (si->si_code) {
+	case SI_TIMER:
+		ret |= __xn_put_user(si->si_tid, &u_p->si_tid);
+		ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr);
+		ret |= __xn_put_user(overrun, &u_p->si_overrun);
+		break;
+	case SI_QUEUE:
+	case SI_MESGQ:
+		ret |= __xn_put_user(ptr_to_compat(si->si_ptr), &u_p->si_ptr);
+		fallthrough;
+	case SI_USER:
+		ret |= __xn_put_user(si->si_pid, &u_p->si_pid);
+		ret |= __xn_put_user(si->si_uid, &u_p->si_uid);
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(sys32_put_siginfo);
+
+int sys32_get_msghdr(struct user_msghdr *msg,
+		     const struct compat_msghdr __user *u_cmsg)
+{
+	compat_uptr_t tmp1, tmp2, tmp3;
+
+	if (u_cmsg == NULL ||
+	    !access_rok(u_cmsg, sizeof(*u_cmsg)) ||
+	    __xn_get_user(tmp1, &u_cmsg->msg_name) ||
+	    __xn_get_user(msg->msg_namelen, &u_cmsg->msg_namelen) ||
+	    __xn_get_user(tmp2, &u_cmsg->msg_iov) ||
+	    __xn_get_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) ||
+	    __xn_get_user(tmp3, &u_cmsg->msg_control) ||
+	    __xn_get_user(msg->msg_controllen, &u_cmsg->msg_controllen) ||
+	    __xn_get_user(msg->msg_flags, &u_cmsg->msg_flags))
+		return -EFAULT;
+
+	if (msg->msg_namelen > sizeof(struct sockaddr_storage))
+		msg->msg_namelen = sizeof(struct sockaddr_storage);
+
+	msg->msg_name = compat_ptr(tmp1);
+	msg->msg_iov = compat_ptr(tmp2);
+	msg->msg_control = compat_ptr(tmp3);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_msghdr);
+
+int sys32_get_mmsghdr(struct mmsghdr *mmsg,
+		      const struct compat_mmsghdr __user *u_cmmsg)
+{
+	if (u_cmmsg == NULL ||
+	    !access_rok(u_cmmsg, sizeof(*u_cmmsg)) ||
+	    __xn_get_user(mmsg->msg_len, &u_cmmsg->msg_len))
+		return -EFAULT;
+
+	return sys32_get_msghdr(&mmsg->msg_hdr, &u_cmmsg->msg_hdr);
+}
+EXPORT_SYMBOL_GPL(sys32_get_mmsghdr);
+
+int sys32_put_msghdr(struct compat_msghdr __user *u_cmsg,
+		     const struct user_msghdr *msg)
+{
+	if (u_cmsg == NULL ||
+	    !access_wok(u_cmsg, sizeof(*u_cmsg)) ||
+	    __xn_put_user(ptr_to_compat(msg->msg_name), &u_cmsg->msg_name) ||
+	    __xn_put_user(msg->msg_namelen, &u_cmsg->msg_namelen) ||
+	    __xn_put_user(ptr_to_compat(msg->msg_iov), &u_cmsg->msg_iov) ||
+	    __xn_put_user(msg->msg_iovlen, &u_cmsg->msg_iovlen) ||
+	    __xn_put_user(ptr_to_compat(msg->msg_control), &u_cmsg->msg_control) ||
+	    __xn_put_user(msg->msg_controllen, &u_cmsg->msg_controllen) ||
+	    __xn_put_user(msg->msg_flags, &u_cmsg->msg_flags))
+		return -EFAULT;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_msghdr);
+
+int sys32_put_mmsghdr(struct compat_mmsghdr __user *u_cmmsg,
+		     const struct mmsghdr *mmsg)
+{
+	if (u_cmmsg == NULL ||
+	    !access_wok(u_cmmsg, sizeof(*u_cmmsg)) ||
+	    __xn_put_user(mmsg->msg_len, &u_cmmsg->msg_len))
+		return -EFAULT;
+
+	return sys32_put_msghdr(&u_cmmsg->msg_hdr, &mmsg->msg_hdr);
+}
+EXPORT_SYMBOL_GPL(sys32_put_mmsghdr);
+
+int sys32_get_iovec(struct iovec *iov,
+		    const struct compat_iovec __user *u_ciov,
+		    int ciovlen)
+{
+	const struct compat_iovec __user *p;
+	struct compat_iovec ciov;
+	int ret, n;
+
+	for (n = 0, p = u_ciov; n < ciovlen; n++, p++) {
+		ret = cobalt_copy_from_user(&ciov, p, sizeof(ciov));
+		if (ret)
+			return ret;
+		iov[n].iov_base = compat_ptr(ciov.iov_base);
+		iov[n].iov_len = ciov.iov_len;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_get_iovec);
+
+int sys32_put_iovec(struct compat_iovec __user *u_ciov,
+		    const struct iovec *iov,
+		    int iovlen)
+{
+	struct compat_iovec __user *p;
+	struct compat_iovec ciov;
+	int ret, n;
+
+	for (n = 0, p = u_ciov; n < iovlen; n++, p++) {
+		ciov.iov_base = ptr_to_compat(iov[n].iov_base);
+		ciov.iov_len = iov[n].iov_len;
+		ret = cobalt_copy_to_user(p, &ciov, sizeof(*p));
+		if (ret)
+			return ret;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(sys32_put_iovec);
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c
new file mode 100644
index 0000000..bb18fe3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.c
@@ -0,0 +1,424 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "internal.h"
+#include "thread.h"
+#include "mutex.h"
+#include "cond.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+
+static inline int
+pthread_cond_init(struct cobalt_cond_shadow *cnd, const struct cobalt_condattr *attr)
+{
+	int synch_flags = XNSYNCH_PRIO, ret;
+	struct cobalt_cond *cond, *old_cond;
+	struct cobalt_cond_state *state;
+	struct cobalt_ppd *sys_ppd;
+	struct list_head *condq;
+	spl_t s;
+
+	cond = xnmalloc(sizeof(*cond));
+	if (cond == NULL)
+		return -ENOMEM;
+
+	sys_ppd = cobalt_ppd_get(attr->pshared);
+	state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state));
+	if (state == NULL) {
+		ret = -EAGAIN;
+		goto fail_umm;
+	}
+	cond->state = state;
+	state->pending_signals = 0;
+	state->mutex_state_offset = ~0U;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	condq = &cobalt_current_resources(attr->pshared)->condq;
+	if (cnd->magic == COBALT_COND_MAGIC && !list_empty(condq)) {
+		old_cond = xnregistry_lookup(cnd->handle, NULL);
+		if (cobalt_obj_active(old_cond, COBALT_COND_MAGIC,
+				      typeof(*old_cond))) {
+			ret = -EBUSY;
+			goto fail_register;
+		}
+	}
+
+	ret = xnregistry_enter_anon(cond, &cond->resnode.handle);
+	if (ret < 0)
+		goto fail_register;
+	if (attr->pshared)
+		cond->resnode.handle |= XNSYNCH_PSHARED;
+	cond->magic = COBALT_COND_MAGIC;
+	xnsynch_init(&cond->synchbase, synch_flags, NULL);
+	cond->attr = *attr;
+	cond->mutex = NULL;
+	cobalt_add_resource(&cond->resnode, cond, attr->pshared);
+
+	cnd->handle = cond->resnode.handle;
+	cnd->state_offset = cobalt_umm_offset(&sys_ppd->umm, state);
+	cnd->magic = COBALT_COND_MAGIC;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+fail_register:
+	xnlock_put_irqrestore(&nklock, s);
+	cobalt_umm_free(&sys_ppd->umm, state);
+fail_umm:
+	xnfree(cond);
+
+	return ret;
+}
+
+static inline int pthread_cond_destroy(struct cobalt_cond_shadow *cnd)
+{
+	struct cobalt_cond *cond;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	cond = xnregistry_lookup(cnd->handle, NULL);
+	if (cond == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+
+	if (!cobalt_obj_active(cnd, COBALT_COND_MAGIC, struct cobalt_cond_shadow)
+	    || !cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+
+	if (cond->resnode.scope !=
+	    cobalt_current_resources(cond->attr.pshared)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EPERM;
+	}
+
+	if (xnsynch_pended_p(&cond->synchbase) || cond->mutex) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBUSY;
+	}
+
+	cobalt_cond_reclaim(&cond->resnode, s); /* drops lock */
+
+	cobalt_mark_deleted(cnd);
+
+	return 0;
+}
+
+static inline int cobalt_cond_timedwait_prologue(struct xnthread *cur,
+						 struct cobalt_cond *cond,
+						 struct cobalt_mutex *mutex,
+						 xnticks_t abs_to)
+{
+	int err, ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/* If another thread waiting for cond does not use the same mutex */
+	if (!cobalt_obj_active(cond, COBALT_COND_MAGIC, struct cobalt_cond)
+	    || (cond->mutex && cond->mutex != mutex)) {
+		err = -EINVAL;
+		goto unlock_and_return;
+	}
+
+	if (cond->resnode.scope !=
+	    cobalt_current_resources(cond->attr.pshared)) {
+		err = -EPERM;
+		goto unlock_and_return;
+	}
+
+	if (mutex->attr.pshared != cond->attr.pshared) {
+		err = -EINVAL;
+		goto unlock_and_return;
+	}
+
+	/* Unlock mutex. */
+	err = cobalt_mutex_release(cur, mutex);
+	if (err < 0)
+		goto unlock_and_return;
+
+	/* err == 1 means a reschedule is needed, but do not
+	   reschedule here, releasing the mutex and suspension must be
+	   done atomically in pthread_cond_*wait. */
+
+	/* Bind mutex to cond. */
+	if (cond->mutex == NULL) {
+		cond->mutex = mutex;
+		list_add_tail(&cond->mutex_link, &mutex->conds);
+	}
+
+	/* Wait for another thread to signal the condition. */
+	if (abs_to != XN_INFINITE)
+		ret = xnsynch_sleep_on(&cond->synchbase, abs_to,
+				       clock_flag(TIMER_ABSTIME, cond->attr.clock));
+	else
+		ret = xnsynch_sleep_on(&cond->synchbase, XN_INFINITE, XN_RELATIVE);
+
+	/* There are three possible wakeup conditions :
+	   - cond_signal / cond_broadcast, no status bit is set, and the function
+	     should return 0 ;
+	   - timeout, the status XNTIMEO is set, and the function should return
+	     ETIMEDOUT ;
+	   - pthread_kill, the status bit XNBREAK is set, but ignored, the
+	     function simply returns EINTR (used only by the user-space
+	     interface, replaced by 0 anywhere else), causing a wakeup, spurious
+	     or not whether pthread_cond_signal was called between pthread_kill
+	     and the moment when xnsynch_sleep_on returned ;
+	 */
+
+	err = 0;
+
+	if (ret & XNBREAK)
+		err = -EINTR;
+	else if (ret & XNTIMEO)
+		err = -ETIMEDOUT;
+
+unlock_and_return:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+static inline int cobalt_cond_timedwait_epilogue(struct xnthread *cur,
+						 struct cobalt_cond *cond,
+						 struct cobalt_mutex *mutex)
+{
+	int err;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	err = __cobalt_mutex_acquire_unchecked(cur, mutex, NULL);
+	if (err == -EINTR)
+		goto unlock_and_return;
+
+	/*
+	 * Unbind mutex and cond, if no other thread is waiting, if
+	 * the job was not already done.
+	 */
+	if (!xnsynch_pended_p(&cond->synchbase) && cond->mutex == mutex) {
+		cond->mutex = NULL;
+		list_del(&cond->mutex_link);
+	}
+
+unlock_and_return:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+COBALT_SYSCALL(cond_init, current,
+	       (struct cobalt_cond_shadow __user *u_cnd,
+		const struct cobalt_condattr __user *u_attr))
+{
+	struct cobalt_cond_shadow cnd;
+	struct cobalt_condattr attr;
+	int err;
+
+	if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd)))
+		return -EFAULT;
+
+	if (cobalt_copy_from_user(&attr, u_attr, sizeof(attr)))
+		return -EFAULT;
+
+	trace_cobalt_cond_init(u_cnd, &attr);
+
+	err = pthread_cond_init(&cnd, &attr);
+	if (err < 0)
+		return err;
+
+	return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd));
+}
+
+COBALT_SYSCALL(cond_destroy, current,
+	       (struct cobalt_cond_shadow __user *u_cnd))
+{
+	struct cobalt_cond_shadow cnd;
+	int err;
+
+	if (cobalt_copy_from_user(&cnd, u_cnd, sizeof(cnd)))
+		return -EFAULT;
+
+	trace_cobalt_cond_destroy(u_cnd);
+
+	err = pthread_cond_destroy(&cnd);
+	if (err < 0)
+		return err;
+
+	return cobalt_copy_to_user(u_cnd, &cnd, sizeof(*u_cnd));
+}
+
+struct us_cond_data {
+	int err;
+};
+
+static inline int cond_fetch_timeout(struct timespec64 *ts,
+				     const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT :	cobalt_get_u_timespec(ts, u_ts);
+}
+
+int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow __user *u_cnd,
+				struct cobalt_mutex_shadow __user *u_mx,
+				int *u_err,
+				void __user *u_ts,
+				int (*fetch_timeout)(struct timespec64 *ts,
+						     const void __user *u_ts))
+{
+	struct xnthread *cur = xnthread_current();
+	struct cobalt_cond *cond;
+	struct cobalt_mutex *mx;
+	struct us_cond_data d;
+	struct timespec64 ts;
+	xnhandle_t handle;
+	int err, perr = 0;
+	__u32 offset;
+
+	handle = cobalt_get_handle_from_user(&u_cnd->handle);
+	cond = xnregistry_lookup(handle, NULL);
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+	mx = xnregistry_lookup(handle, NULL);
+
+	if (cond->mutex == NULL) {
+		__xn_get_user(offset, &u_mx->state_offset);
+		cond->state->mutex_state_offset = offset;
+	}
+
+	if (fetch_timeout) {
+		err = fetch_timeout(&ts, u_ts);
+		if (err == 0) {
+			trace_cobalt_cond_timedwait(u_cnd, u_mx, &ts);
+			err = cobalt_cond_timedwait_prologue(cur, cond, mx,
+							     ts2ns(&ts) + 1);
+		}
+	} else {
+		trace_cobalt_cond_wait(u_cnd, u_mx);
+		err = cobalt_cond_timedwait_prologue(cur, cond, mx, XN_INFINITE);
+	}
+
+	switch(err) {
+	case 0:
+	case -ETIMEDOUT:
+		perr = d.err = err;
+		err = cobalt_cond_timedwait_epilogue(cur, cond, mx);
+		break;
+
+	case -EINTR:
+		perr = err;
+		d.err = 0;	/* epilogue should return 0. */
+		break;
+
+	default:
+		/* Please gcc and handle the case which will never
+		   happen */
+		d.err = EINVAL;
+	}
+
+	if (cond->mutex == NULL)
+		cond->state->mutex_state_offset = ~0U;
+
+	if (err == -EINTR)
+		__xn_put_user(d.err, u_err);
+
+	return err == 0 ? perr : err;
+}
+
+/* pthread_cond_wait_prologue(cond, mutex, count_ptr, timed, timeout) */
+COBALT_SYSCALL(cond_wait_prologue, nonrestartable,
+	       (struct cobalt_cond_shadow __user *u_cnd,
+		struct cobalt_mutex_shadow __user *u_mx,
+		int *u_err,
+		unsigned int timed,
+		struct __user_old_timespec __user *u_ts))
+{
+	return __cobalt_cond_wait_prologue(u_cnd, u_mx, u_err, u_ts,
+					   timed ? cond_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL(cond_wait_epilogue, primary,
+	       (struct cobalt_cond_shadow __user *u_cnd,
+		struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct xnthread *cur = xnthread_current();
+	struct cobalt_cond *cond;
+	struct cobalt_mutex *mx;
+	xnhandle_t handle;
+	int err;
+
+	handle = cobalt_get_handle_from_user(&u_cnd->handle);
+	cond = xnregistry_lookup(handle, NULL);
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+	mx = xnregistry_lookup(handle, NULL);
+	err = cobalt_cond_timedwait_epilogue(cur, cond, mx);
+
+	if (cond->mutex == NULL)
+		cond->state->mutex_state_offset = ~0U;
+
+	return err;
+}
+
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond)
+{
+	struct cobalt_cond_state *state;
+	__u32 pending_signals;
+	int need_resched;
+
+	state = cond->state;
+	pending_signals = state->pending_signals;
+
+	switch(pending_signals) {
+	default:
+		state->pending_signals = 0;
+		need_resched = xnsynch_wakeup_many_sleepers(&cond->synchbase,
+							    pending_signals);
+		break;
+
+	case ~0U:
+		need_resched =
+			xnsynch_flush(&cond->synchbase, 0) == XNSYNCH_RESCHED;
+		state->pending_signals = 0;
+		break;
+
+	case 0:
+		need_resched = 0;
+		break;
+	}
+
+	return need_resched;
+}
+
+void cobalt_cond_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_cond *cond;
+
+	cond = container_of(node, struct cobalt_cond, resnode);
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	xnsynch_destroy(&cond->synchbase);
+	cobalt_mark_deleted(cond);
+	xnlock_put_irqrestore(&nklock, s);
+
+	cobalt_umm_free(&cobalt_ppd_get(cond->attr.pshared)->umm,
+			cond->state);
+	xnfree(cond);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h
new file mode 100644
index 0000000..7bec2a6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/cond.h
@@ -0,0 +1,71 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_COND_H
+#define _COBALT_POSIX_COND_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/thread.h>
+#include <cobalt/uapi/cond.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_mutex;
+
+struct cobalt_cond {
+	unsigned int magic;
+	struct xnsynch synchbase;
+	struct list_head mutex_link;
+	struct cobalt_cond_state *state;
+	struct cobalt_condattr attr;
+	struct cobalt_mutex *mutex;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_cond_wait_prologue(struct cobalt_cond_shadow __user *u_cnd,
+				struct cobalt_mutex_shadow __user *u_mx,
+				int *u_err,
+				void __user *u_ts,
+				int (*fetch_timeout)(struct timespec64 *ts,
+						     const void __user *u_ts));
+COBALT_SYSCALL_DECL(cond_init,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     const struct cobalt_condattr __user *u_attr));
+
+COBALT_SYSCALL_DECL(cond_destroy,
+		    (struct cobalt_cond_shadow __user *u_cnd));
+
+COBALT_SYSCALL_DECL(cond_wait_prologue,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     struct cobalt_mutex_shadow __user *u_mx,
+		     int *u_err,
+		     unsigned int timed,
+		     struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(cond_wait_epilogue,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     struct cobalt_mutex_shadow __user *u_mx));
+
+int cobalt_cond_deferred_signals(struct cobalt_cond *cond);
+
+void cobalt_cond_reclaim(struct cobalt_resnode *node,
+			 spl_t s);
+
+#endif /* !_COBALT_POSIX_COND_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c
new file mode 100644
index 0000000..fd012d0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/kconfig.h>
+#include <linux/atomic.h>
+#include <linux/printk.h>
+#include <cobalt/kernel/init.h>
+#include <cobalt/kernel/thread.h>
+#include <xenomai/version.h>
+#include <pipeline/tick.h>
+#include <asm/xenomai/syscall.h>
+#include "corectl.h"
+
+static BLOCKING_NOTIFIER_HEAD(config_notifier_list);
+
+static int do_conf_option(int option, void __user *u_buf, size_t u_bufsz)
+{
+	struct cobalt_config_vector vec;
+	int ret, val = 0;
+
+	if (option <= _CC_COBALT_GET_CORE_STATUS && u_bufsz < sizeof(val))
+		return -EINVAL;
+
+	switch (option) {
+	case _CC_COBALT_GET_VERSION:
+		val = XENO_VERSION_CODE;
+		break;
+	case _CC_COBALT_GET_NR_PIPES:
+#ifdef CONFIG_XENO_OPT_PIPE
+		val = CONFIG_XENO_OPT_PIPE_NRDEV;
+#endif
+		break;
+	case _CC_COBALT_GET_NR_TIMERS:
+		val = CONFIG_XENO_OPT_NRTIMERS;
+		break;
+	case _CC_COBALT_GET_POLICIES:
+		val = _CC_COBALT_SCHED_FIFO|_CC_COBALT_SCHED_RR;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_WEAK))
+			val |= _CC_COBALT_SCHED_WEAK;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_SPORADIC))
+			val |= _CC_COBALT_SCHED_SPORADIC;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_QUOTA))
+			val |= _CC_COBALT_SCHED_QUOTA;
+		if (IS_ENABLED(CONFIG_XENO_OPT_SCHED_TP))
+			val |= _CC_COBALT_SCHED_TP;
+		break;
+	case _CC_COBALT_GET_DEBUG:
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_COBALT))
+			val |= _CC_COBALT_DEBUG_ASSERT;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_CONTEXT))
+			val |= _CC_COBALT_DEBUG_CONTEXT;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LOCKING))
+			val |= _CC_COBALT_DEBUG_LOCKING;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_USER))
+			val |= _CC_COBALT_DEBUG_USER;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED))
+			val |= _CC_COBALT_DEBUG_MUTEX_RELAXED;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP))
+			val |= _CC_COBALT_DEBUG_MUTEX_SLEEP;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY))
+			val |= _CC_COBALT_DEBUG_LEGACY;
+		if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_TRACE_RELAX))
+			val |= _CC_COBALT_DEBUG_TRACE_RELAX;
+		if (IS_ENABLED(CONFIG_XENO_DRIVERS_RTNET_CHECKED))
+			val |= _CC_COBALT_DEBUG_NET;
+		break;
+	case _CC_COBALT_GET_WATCHDOG:
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+		val = CONFIG_XENO_OPT_WATCHDOG_TIMEOUT;
+#endif
+		break;
+	case _CC_COBALT_GET_CORE_STATUS:
+		val = realtime_core_state();
+		break;
+	default:
+		if (is_primary_domain())
+			/* Switch to secondary mode first. */
+			return -ENOSYS;
+		vec.u_buf = u_buf;
+		vec.u_bufsz = u_bufsz;
+		ret = blocking_notifier_call_chain(&config_notifier_list,
+						   option, &vec);
+		if (ret == NOTIFY_DONE)
+			return -EINVAL; /* Nobody cared. */
+		return notifier_to_errno(ret);
+	}
+
+	ret = cobalt_copy_to_user(u_buf, &val, sizeof(val));
+
+	return ret ? -EFAULT : 0;
+}
+
+static int stop_services(const void __user *u_buf, size_t u_bufsz)
+{
+	const u32 final_grace_period = 3; /* seconds */
+	enum cobalt_run_states state;
+	__u32 grace_period;
+	int ret;
+
+	/*
+	 * XXX: we don't have any syscall for unbinding a thread from
+	 * the Cobalt core, so we deny real-time threads from stopping
+	 * Cobalt services. i.e. _CC_COBALT_STOP_CORE must be issued
+	 * from a plain regular linux thread.
+	 */
+	if (xnthread_current())
+		return -EPERM;
+
+	if (u_bufsz != sizeof(__u32))
+		return -EINVAL;
+
+	ret = cobalt_copy_from_user(&grace_period,
+				    u_buf, sizeof(grace_period));
+	if (ret)
+		return ret;
+
+	state = atomic_cmpxchg(&cobalt_runstate,
+			       COBALT_STATE_RUNNING,
+			       COBALT_STATE_TEARDOWN);
+	switch (state) {
+	case COBALT_STATE_STOPPED:
+		break;
+	case COBALT_STATE_RUNNING:
+		/* Kill user threads. */
+		ret = xnthread_killall(grace_period, XNUSER);
+		if (ret) {
+			set_realtime_core_state(state);
+			return ret;
+		}
+		cobalt_call_state_chain(COBALT_STATE_TEARDOWN);
+		/* Kill lingering RTDM tasks. */
+		ret = xnthread_killall(final_grace_period, 0);
+		if (ret == -EAGAIN)
+			printk(XENO_WARNING "some RTDM tasks won't stop");
+		pipeline_uninstall_tick_proxy();
+		set_realtime_core_state(COBALT_STATE_STOPPED);
+		printk(XENO_INFO "services stopped\n");
+		break;
+	default:
+		ret = -EINPROGRESS;
+	}
+
+	return ret;
+}
+
+static int start_services(void)
+{
+	enum cobalt_run_states state;
+	int ret = 0;
+
+	state = atomic_cmpxchg(&cobalt_runstate,
+			       COBALT_STATE_STOPPED,
+			       COBALT_STATE_WARMUP);
+	switch (state) {
+	case COBALT_STATE_RUNNING:
+		break;
+	case COBALT_STATE_STOPPED:
+		pipeline_install_tick_proxy();
+		cobalt_call_state_chain(COBALT_STATE_WARMUP);
+		set_realtime_core_state(COBALT_STATE_RUNNING);
+		printk(XENO_INFO "services started\n");
+		break;
+	default:
+		ret = -EINPROGRESS;
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(corectl, probing,
+	       (int request, void __user *u_buf, size_t u_bufsz))
+{
+	int ret;
+	
+	switch (request) {
+	case _CC_COBALT_STOP_CORE:
+		ret = stop_services(u_buf, u_bufsz);
+		break;
+	case _CC_COBALT_START_CORE:
+		ret = start_services();
+		break;
+	default:
+		ret = do_conf_option(request, u_buf, u_bufsz);
+	}
+	
+	return ret;
+}
+
+void cobalt_add_config_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_register(&config_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_add_config_chain);
+
+void cobalt_remove_config_chain(struct notifier_block *nb)
+{
+	blocking_notifier_chain_unregister(&config_notifier_list, nb);
+}
+EXPORT_SYMBOL_GPL(cobalt_remove_config_chain);
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h
new file mode 100644
index 0000000..b9bcf3b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/corectl.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_CORECTL_H
+#define _COBALT_POSIX_CORECTL_H
+
+#include <linux/types.h>
+#include <linux/notifier.h>
+#include <xenomai/posix/syscall.h>
+#include <cobalt/uapi/corectl.h>
+
+struct cobalt_config_vector {
+	void __user *u_buf;
+	size_t u_bufsz;
+};
+
+COBALT_SYSCALL_DECL(corectl,
+		    (int request, void __user *u_buf, size_t u_bufsz));
+
+void cobalt_add_config_chain(struct notifier_block *nb);
+
+void cobalt_remove_config_chain(struct notifier_block *nb);
+
+#endif /* !_COBALT_POSIX_CORECTL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c
new file mode 100644
index 0000000..052c686
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.c
@@ -0,0 +1,415 @@
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include "event.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+/*
+ * Cobalt event notification services
+ *
+ * An event flag group is a synchronization object represented by a
+ * regular native integer; every available bit in such word can be
+ * used to map a user-defined event flag.  When a flag is set, the
+ * associated event is said to have occurred.
+ *
+ * Xenomai threads and interrupt handlers can use event flags to
+ * signal the occurrence of events to other threads; those threads can
+ * either wait for the events to occur in a conjunctive manner (all
+ * awaited events must have occurred to wake up), or in a disjunctive
+ * way (at least one of the awaited events must have occurred to wake
+ * up).
+ *
+ * We expose this non-POSIX feature through the internal API, as a
+ * fast IPC mechanism available to the Copperplate interface.
+ */
+
+struct event_wait_context {
+	struct xnthread_wait_context wc;
+	unsigned int value;
+	int mode;
+};
+
+COBALT_SYSCALL(event_init, current,
+	       (struct cobalt_event_shadow __user *u_event,
+		unsigned int value, int flags))
+{
+	struct cobalt_event_shadow shadow;
+	struct cobalt_event_state *state;
+	int pshared, synflags, ret;
+	struct cobalt_event *event;
+	struct cobalt_umm *umm;
+	unsigned long stateoff;
+	spl_t s;
+
+	trace_cobalt_event_init(u_event, value, flags);
+
+	event = xnmalloc(sizeof(*event));
+	if (event == NULL)
+		return -ENOMEM;
+
+	pshared = (flags & COBALT_EVENT_SHARED) != 0;
+	umm = &cobalt_ppd_get(pshared)->umm;
+	state = cobalt_umm_alloc(umm, sizeof(*state));
+	if (state == NULL) {
+		xnfree(event);
+		return -EAGAIN;
+	}
+
+	ret = xnregistry_enter_anon(event, &event->resnode.handle);
+	if (ret) {
+		cobalt_umm_free(umm, state);
+		xnfree(event);
+		return ret;
+	}
+
+	event->state = state;
+	event->flags = flags;
+	synflags = (flags & COBALT_EVENT_PRIO) ? XNSYNCH_PRIO : XNSYNCH_FIFO;
+	xnsynch_init(&event->synch, synflags, NULL);
+	state->value = value;
+	state->flags = 0;
+	state->nwaiters = 0;
+	stateoff = cobalt_umm_offset(umm, state);
+	XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff);
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_add_resource(&event->resnode, event, pshared);
+	event->magic = COBALT_EVENT_MAGIC;
+	xnlock_put_irqrestore(&nklock, s);
+
+	shadow.flags = flags;
+	shadow.handle = event->resnode.handle;
+	shadow.state_offset = (__u32)stateoff;
+
+	return cobalt_copy_to_user(u_event, &shadow, sizeof(*u_event));
+}
+
+int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
+			unsigned int bits,
+			unsigned int __user *u_bits_r,
+			int mode, const struct timespec64 *ts)
+{
+	unsigned int rbits = 0, testval;
+	xnticks_t timeout = XN_INFINITE;
+	struct cobalt_event_state *state;
+	xntmode_t tmode = XN_RELATIVE;
+	struct event_wait_context ewc;
+	struct cobalt_event *event;
+	xnhandle_t handle;
+	int ret = 0, info;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	if (ts) {
+		if (!timespec64_valid(ts))
+			return -EINVAL;
+
+		timeout = ts2ns(ts);
+		if (timeout) {
+			timeout++;
+			tmode = XN_ABSOLUTE;
+		} else
+			timeout = XN_NONBLOCK;
+		trace_cobalt_event_timedwait(u_event, bits, mode, ts);
+	} else
+		trace_cobalt_event_wait(u_event, bits, mode);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	event = xnregistry_lookup(handle, NULL);
+	if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	state = event->state;
+
+	if (bits == 0) {
+		/*
+		 * Special case: we don't wait for any event, we only
+		 * return the current flag group value.
+		 */
+		rbits = state->value;
+		goto out;
+	}
+
+	state->flags |= COBALT_EVENT_PENDED;
+	rbits = state->value & bits;
+	testval = mode & COBALT_EVENT_ANY ? rbits : bits;
+	if (rbits && rbits == testval)
+		goto done;
+
+	if (timeout == XN_NONBLOCK) {
+		ret = -EWOULDBLOCK;
+		goto done;
+	}
+
+	ewc.value = bits;
+	ewc.mode = mode;
+	xnthread_prepare_wait(&ewc.wc);
+	state->nwaiters++;
+	info = xnsynch_sleep_on(&event->synch, timeout, tmode);
+	if (info & XNRMID) {
+		ret = -EIDRM;
+		goto out;
+	}
+	if (info & (XNBREAK|XNTIMEO)) {
+		state->nwaiters--;
+		ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
+	} else
+		rbits = ewc.value;
+done:
+	if (!xnsynch_pended_p(&event->synch))
+		state->flags &= ~COBALT_EVENT_PENDED;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (ret == 0 &&
+	    cobalt_copy_to_user(u_bits_r, &rbits, sizeof(rbits)))
+		return -EFAULT;
+
+	return ret;
+}
+
+int __cobalt_event_wait64(struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits,
+			  unsigned int __user *u_bits_r,
+			  int mode, const struct __kernel_timespec __user *u_ts)
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_timespec64(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp);
+}
+
+COBALT_SYSCALL(event_wait, primary,
+	       (struct cobalt_event_shadow __user *u_event,
+		unsigned int bits,
+		unsigned int __user *u_bits_r,
+		int mode, const struct __user_old_timespec __user *u_ts))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_u_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp);
+}
+
+COBALT_SYSCALL(event_wait64, primary,
+	       (struct cobalt_event_shadow __user *u_event,
+		unsigned int bits,
+		unsigned int __user *u_bits_r,
+		int mode, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_event_wait64(u_event, bits, u_bits_r, mode, u_ts);
+}
+
+COBALT_SYSCALL(event_sync, current,
+	       (struct cobalt_event_shadow __user *u_event))
+{
+	unsigned int bits, waitval, testval;
+	struct xnthread_wait_context *wc;
+	struct cobalt_event_state *state;
+	struct event_wait_context *ewc;
+	struct cobalt_event *event;
+	struct xnthread *p, *tmp;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	event = xnregistry_lookup(handle, NULL);
+	if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Userland has already updated the bitmask, our job is to
+	 * wake up any thread which could be satisfied by its current
+	 * value.
+	 */
+	state = event->state;
+	bits = state->value;
+
+	xnsynch_for_each_sleeper_safe(p, tmp, &event->synch) {
+		wc = xnthread_get_wait_context(p);
+		ewc = container_of(wc, struct event_wait_context, wc);
+		waitval = ewc->value & bits;
+		testval = ewc->mode & COBALT_EVENT_ANY ? waitval : ewc->value;
+		if (waitval && waitval == testval) {
+			state->nwaiters--;
+			ewc->value = waitval;
+			xnsynch_wakeup_this_sleeper(&event->synch, p);
+		}
+	}
+
+	xnsched_run();
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(event_destroy, current,
+	       (struct cobalt_event_shadow __user *u_event))
+{
+	struct cobalt_event *event;
+	xnhandle_t handle;
+	spl_t s;
+
+	trace_cobalt_event_destroy(u_event);
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	event = xnregistry_lookup(handle, NULL);
+	if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+
+	cobalt_event_reclaim(&event->resnode, s); /* drops lock */
+
+	return 0;
+}
+
+COBALT_SYSCALL(event_inquire, current,
+	       (struct cobalt_event_shadow __user *u_event,
+		struct cobalt_event_info __user *u_info,
+		pid_t __user *u_waitlist,
+		size_t waitsz))
+{
+	int nrpend = 0, nrwait = 0, nrpids, ret = 0;
+	unsigned long pstamp, nstamp = 0;
+	struct cobalt_event_info info;
+	struct cobalt_event *event;
+	pid_t *t = NULL, fbuf[16];
+	struct xnthread *thread;
+	xnhandle_t handle;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_event->handle);
+
+	nrpids = waitsz / sizeof(pid_t);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	for (;;) {
+		pstamp = nstamp;
+		event = xnregistry_lookup(handle, &nstamp);
+		if (event == NULL || event->magic != COBALT_EVENT_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+		/*
+		 * Allocate memory to return the wait list without
+		 * holding any lock, then revalidate the handle.
+		 */
+		if (t == NULL) {
+			nrpend = 0;
+			if (!xnsynch_pended_p(&event->synch))
+				break;
+			xnsynch_for_each_sleeper(thread, &event->synch)
+				nrpend++;
+			if (u_waitlist == NULL)
+				break;
+			xnlock_put_irqrestore(&nklock, s);
+			if (nrpids > nrpend)
+				nrpids = nrpend;
+			if (nrpend <= ARRAY_SIZE(fbuf))
+				t = fbuf; /* Use fast buffer. */
+			else {
+				t = xnmalloc(nrpend * sizeof(pid_t));
+				if (t == NULL)
+					return -ENOMEM;
+			}
+			xnlock_get_irqsave(&nklock, s);
+		} else if (pstamp == nstamp)
+			break;
+		else {
+			xnlock_put_irqrestore(&nklock, s);
+			if (t != fbuf)
+				xnfree(t);
+			t = NULL;
+			xnlock_get_irqsave(&nklock, s);
+		}
+	}
+
+	info.flags = event->flags;
+	info.value = event->value;
+	info.nrwait = nrpend;
+
+	if (xnsynch_pended_p(&event->synch) && u_waitlist != NULL) {
+		xnsynch_for_each_sleeper(thread, &event->synch) {
+			if (nrwait >= nrpids)
+				break;
+			t[nrwait++] = xnthread_host_pid(thread);
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = cobalt_copy_to_user(u_info, &info, sizeof(info));
+	if (ret == 0 && nrwait > 0)
+		ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t));
+
+	if (t && t != fbuf)
+		xnfree(t);
+
+	return ret ?: nrwait;
+}
+
+void cobalt_event_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_event *event;
+	struct cobalt_umm *umm;
+	int pshared;
+
+	event = container_of(node, struct cobalt_event, resnode);
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	xnsynch_destroy(&event->synch);
+	pshared = (event->flags & COBALT_EVENT_SHARED) != 0;
+	xnlock_put_irqrestore(&nklock, s);
+
+	umm = &cobalt_ppd_get(pshared)->umm;
+	cobalt_umm_free(umm, event->state);
+	xnfree(event);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h
new file mode 100644
index 0000000..919774c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/event.h
@@ -0,0 +1,83 @@
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_EVENT_H
+#define _COBALT_POSIX_EVENT_H
+
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/event.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_resources;
+struct cobalt_process;
+
+struct cobalt_event {
+	unsigned int magic;
+	unsigned int value;
+	int flags;
+	struct xnsynch synch;
+	struct cobalt_event_state *state;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_event_wait(struct cobalt_event_shadow __user *u_event,
+			unsigned int bits,
+			unsigned int __user *u_bits_r,
+			int mode, const struct timespec64 *ts);
+
+int __cobalt_event_wait64(struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits, unsigned int __user *u_bits_r,
+			  int mode,
+			  const struct __kernel_timespec __user *u_ts);
+
+COBALT_SYSCALL_DECL(event_init,
+		    (struct cobalt_event_shadow __user *u_evtsh,
+		     unsigned int value,
+		     int flags));
+
+COBALT_SYSCALL_DECL(event_wait,
+		    (struct cobalt_event_shadow __user *u_evtsh,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(event_wait64,
+		    (struct cobalt_event_shadow __user *u_evtsh,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(event_sync,
+		    (struct cobalt_event_shadow __user *u_evtsh));
+
+COBALT_SYSCALL_DECL(event_destroy,
+		    (struct cobalt_event_shadow __user *u_evtsh));
+
+COBALT_SYSCALL_DECL(event_inquire,
+		    (struct cobalt_event_shadow __user *u_event,
+		     struct cobalt_event_info __user *u_info,
+		     pid_t __user *u_waitlist,
+		     size_t waitsz));
+
+void cobalt_event_reclaim(struct cobalt_resnode *node,
+			  spl_t s);
+
+#endif /* !_COBALT_POSIX_EVENT_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h
new file mode 100644
index 0000000..e23c26c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/extension.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_EXTENSION_H
+#define _COBALT_POSIX_EXTENSION_H
+
+#include <linux/time.h>
+#include <linux/list.h>
+
+#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION
+
+#include <cobalt/kernel/thread.h>
+
+struct cobalt_timer;
+struct cobalt_sigpending;
+struct cobalt_extref;
+struct siginfo;
+struct xnsched_class;
+union xnsched_policy_param;
+
+struct cobalt_extension {
+	struct xnthread_personality core;
+	struct {
+		struct cobalt_thread *
+		(*timer_init)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */
+			      const struct sigevent *__restrict__ evp);
+		int (*timer_settime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */
+				     const struct itimerspec64 *__restrict__ value,
+				     int flags);
+		int (*timer_gettime)(struct cobalt_extref *reftimer, /* nklocked, IRQs off. */
+				     struct itimerspec64 *__restrict__ value);
+		int (*timer_delete)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */
+		int (*timer_cleanup)(struct cobalt_extref *reftimer); /* nklocked, IRQs off. */
+		int (*signal_deliver)(struct cobalt_extref *refthread,
+				      struct siginfo *si,
+				      struct cobalt_sigpending *sigp);
+		int (*signal_queue)(struct cobalt_extref *refthread,
+				    struct cobalt_sigpending *sigp);
+		int (*signal_copyinfo)(struct cobalt_extref *refthread,
+				       void __user *u_si,
+				       const struct siginfo *si,
+				       int overrun);
+		int (*signal_copyinfo_compat)(struct cobalt_extref *refthread,
+					      void __user *u_si,
+					      const struct siginfo *si,
+					      int overrun);
+		int (*sched_yield)(struct cobalt_extref *curref);
+		int (*thread_setsched)(struct cobalt_extref *refthread, /* nklocked, IRQs off. */
+				       struct xnsched_class *sched_class,
+				       union xnsched_policy_param *param);
+	} ops;
+};
+
+struct cobalt_extref {
+	struct cobalt_extension *extension;
+	struct list_head next;
+	void *private;
+};
+
+static inline void cobalt_set_extref(struct cobalt_extref *ref,
+				     struct cobalt_extension *ext,
+				     void *priv)
+{
+	ref->extension = ext;
+	ref->private = priv;
+}
+
+/**
+ * All macros return non-zero if some thread-level extension code was
+ * called, leaving the output value into __ret. Otherwise, the __ret
+ * value is undefined.
+ */
+#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...) \
+	({									\
+		int __val = 0;							\
+		if ((__owner) && (__owner)->extref.extension) {			\
+			(__extref)->extension = (__owner)->extref.extension;	\
+			if ((__extref)->extension->ops.__extfn) {		\
+				(__ret) = (__extref)->extension->ops.		\
+					__extfn(__extref, ##__args );		\
+				__val = 1;					\
+			}							\
+		} else								\
+			(__extref)->extension = NULL;				\
+		__val;								\
+	})
+		
+#define cobalt_call_extension(__extfn, __extref, __ret, __args...)	\
+	({								\
+		int __val = 0;						\
+		if ((__extref)->extension &&				\
+		    (__extref)->extension->ops.__extfn) {		\
+			(__ret) = (__extref)->extension->ops.		\
+				__extfn(__extref, ##__args );		\
+			__val = 1;					\
+		}							\
+		__val;							\
+	})
+
+#else /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+struct cobalt_extension;
+
+struct cobalt_extref {
+};
+
+static inline void cobalt_set_extref(struct cobalt_extref *ref,
+				     struct cobalt_extension *ext,
+				     void *priv)
+{
+}
+
+#define cobalt_initcall_extension(__extfn, __extref, __owner, __ret, __args...)	\
+	({ (void)(__owner); (void)(__ret); 0; })
+
+#define cobalt_call_extension(__extfn, __extref, __ret, __args...)	\
+	({ (void)(__ret); 0; })
+
+#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+#endif /* !_COBALT_POSIX_EXTENSION_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh
new file mode 100755
index 0000000..0f99fff
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/gen-syscall-entries.sh
@@ -0,0 +1,32 @@
+#! /bin/sh
+
+set -e
+
+shift
+
+awk '
+match($0, /COBALT_SYSCALL\([^,]*,[ \t]*[^,]*/)  {
+	str=substr($0, RSTART + 15, RLENGTH - 15)
+	match(str, /[^, \t]*/)
+	syscall=substr(str, RSTART, RLENGTH)
+
+	if (syscall == "") {
+		print "Failed to find syscall name in line " $0 > "/dev/stderr"
+		exit 1
+	}
+
+	calls = calls "	__COBALT_CALL_ENTRY(" syscall ") \\\n"
+	modes = modes "	__COBALT_MODE(" str ") \\\n"
+	next
+}
+
+/COBALT_SYSCALL\(/  {
+	print "Failed to parse line " $0 > "/dev/stderr"
+	exit 1
+}
+
+END {
+	print "#define __COBALT_CALL_ENTRIES \\\n" calls "	/* end */"
+	print "#define __COBALT_CALL_MODES \\\n" modes "	/* end */"
+}
+' $*
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h
new file mode 100644
index 0000000..8b134d0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/internal.h
@@ -0,0 +1,62 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_INTERNAL_H
+#define _COBALT_POSIX_INTERNAL_H
+
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/kernel/arith.h>
+#include <asm/xenomai/syscall.h>
+#include "process.h"
+#include "extension.h"
+#include "syscall.h"
+#include "memory.h"
+
+#define COBALT_MAXNAME		64
+#define COBALT_PERMS_MASK	(O_RDONLY | O_WRONLY | O_RDWR)
+
+#define COBALT_MAGIC(n)		(0x8686##n##n)
+#define COBALT_ANY_MAGIC	COBALT_MAGIC(00)
+#define COBALT_THREAD_MAGIC	COBALT_MAGIC(01)
+#define COBALT_MQ_MAGIC		COBALT_MAGIC(0A)
+#define COBALT_MQD_MAGIC	COBALT_MAGIC(0B)
+#define COBALT_EVENT_MAGIC	COBALT_MAGIC(0F)
+#define COBALT_MONITOR_MAGIC	COBALT_MAGIC(10)
+#define COBALT_TIMERFD_MAGIC	COBALT_MAGIC(11)
+
+#define cobalt_obj_active(h,m,t)	\
+	((h) && ((t *)(h))->magic == (m))
+
+#define cobalt_mark_deleted(t) ((t)->magic = ~(t)->magic)
+
+extern struct xnptree posix_ptree;
+
+static inline xnhandle_t cobalt_get_handle_from_user(xnhandle_t *u_h)
+{
+	xnhandle_t handle;
+	return __xn_get_user(handle, u_h) ? 0 : handle;
+}
+
+int cobalt_init(void);
+
+long cobalt_restart_syscall_placeholder(struct restart_block *param);
+
+#endif /* !_COBALT_POSIX_INTERNAL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c
new file mode 100644
index 0000000..b95dfbc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.c
@@ -0,0 +1,394 @@
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/err.h>
+#include <linux/fs.h>
+#include <cobalt/kernel/compat.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/time.h>
+#include <xenomai/rtdm/internal.h>
+#include "process.h"
+#include "internal.h"
+#include "clock.h"
+#include "io.h"
+
+COBALT_SYSCALL(open, lostage,
+	       (const char __user *u_path, int oflag))
+{
+	struct filename *filename;
+	int ufd;
+
+	filename = getname(u_path);
+	if (IS_ERR(filename))
+		return PTR_ERR(filename);
+
+	ufd = __rtdm_dev_open(filename->name, oflag);
+	putname(filename);
+
+	return ufd;
+}
+
+COBALT_SYSCALL(socket, lostage,
+	       (int protocol_family, int socket_type, int protocol))
+{
+	return __rtdm_dev_socket(protocol_family, socket_type, protocol);
+}
+
+COBALT_SYSCALL(close, lostage, (int fd))
+{
+	return rtdm_fd_close(fd, 0);
+}
+
+COBALT_SYSCALL(fcntl, current, (int fd, int cmd, long arg))
+{
+	return rtdm_fd_fcntl(fd, cmd, arg);
+}
+
+COBALT_SYSCALL(ioctl, handover,
+	       (int fd, unsigned int request, void __user *arg))
+{
+	return rtdm_fd_ioctl(fd, request, arg);
+}
+
+COBALT_SYSCALL(read, handover,
+	       (int fd, void __user *buf, size_t size))
+{
+	return rtdm_fd_read(fd, buf, size);
+}
+
+COBALT_SYSCALL(write, handover,
+	       (int fd, const void __user *buf, size_t size))
+{
+	return rtdm_fd_write(fd, buf, size);
+}
+
+COBALT_SYSCALL(recvmsg, handover,
+	       (int fd, struct user_msghdr __user *umsg, int flags))
+{
+	struct user_msghdr m;
+	ssize_t ret;
+
+	ret = cobalt_copy_from_user(&m, umsg, sizeof(m));
+	if (ret)
+		return ret;
+
+	ret = rtdm_fd_recvmsg(fd, &m, flags);
+	if (ret < 0)
+		return ret;
+
+	return cobalt_copy_to_user(umsg, &m, sizeof(*umsg)) ?: ret;
+}
+
+static int get_timespec(struct timespec64 *ts,
+			const void __user *u_ts)
+{
+	return cobalt_get_u_timespec(ts, u_ts);
+}
+
+static int get_mmsg(struct mmsghdr *mmsg, void __user *u_mmsg)
+{
+	return cobalt_copy_from_user(mmsg, u_mmsg, sizeof(*mmsg));
+}
+
+static int put_mmsg(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return cobalt_copy_to_user(q, mmsg, sizeof(*q));
+}
+
+COBALT_SYSCALL(recvmmsg, primary,
+	       (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		unsigned int flags, struct __user_old_timespec __user *u_timeout))
+{
+	return __rtdm_fd_recvmmsg(fd, u_msgvec, vlen, flags, u_timeout,
+				  get_mmsg, put_mmsg, get_timespec);
+}
+
+COBALT_SYSCALL(recvmmsg64, primary,
+	       (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		unsigned int flags, struct __kernel_timespec __user *u_timeout))
+{
+	return __rtdm_fd_recvmmsg64(fd, u_msgvec, vlen, flags, u_timeout,
+				    get_mmsg, put_mmsg);
+}
+
+COBALT_SYSCALL(sendmsg, handover,
+	       (int fd, struct user_msghdr __user *umsg, int flags))
+{
+	struct user_msghdr m;
+	int ret;
+
+	ret = cobalt_copy_from_user(&m, umsg, sizeof(m));
+
+	return ret ?: rtdm_fd_sendmsg(fd, &m, flags);
+}
+
+static int put_mmsglen(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct mmsghdr __user **p = (struct mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return __xn_put_user(mmsg->msg_len, &q->msg_len);
+}
+
+COBALT_SYSCALL(sendmmsg, primary,
+	       (int fd, struct mmsghdr __user *u_msgvec,
+		unsigned int vlen, unsigned int flags))
+{
+	return __rtdm_fd_sendmmsg(fd, u_msgvec, vlen, flags,
+				  get_mmsg, put_mmsglen);
+}
+
+COBALT_SYSCALL(mmap, lostage,
+	       (int fd, struct _rtdm_mmap_request __user *u_rma,
+	        void __user **u_addrp))
+{
+	struct _rtdm_mmap_request rma;
+	void *u_addr = NULL;
+	int ret;
+
+	ret = cobalt_copy_from_user(&rma, u_rma, sizeof(rma));
+	if (ret)
+		return ret;
+
+	ret = rtdm_fd_mmap(fd, &rma, &u_addr);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_addrp, &u_addr, sizeof(u_addr));
+}
+
+static int __cobalt_first_fd_valid_p(fd_set *fds[XNSELECT_MAX_TYPES], int nfds)
+{
+	int i, fd;
+
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (fds[i]
+		    && (fd = find_first_bit(fds[i]->fds_bits, nfds)) < nfds)
+			return rtdm_fd_valid_p(fd);
+
+	/* All empty is correct, used as a "sleep" mechanism by strange
+	   applications. */
+	return 1;
+}
+
+static int __cobalt_select_bind_all(struct xnselector *selector,
+				    fd_set *fds[XNSELECT_MAX_TYPES], int nfds)
+{
+	bool first_fd = true;
+	unsigned fd, type;
+	int err;
+
+	for (type = 0; type < XNSELECT_MAX_TYPES; type++) {
+		fd_set *set = fds[type];
+		if (set)
+			for (fd = find_first_bit(set->fds_bits, nfds);
+			     fd < nfds;
+			     fd = find_next_bit(set->fds_bits, nfds, fd + 1)) {
+				err = rtdm_fd_select(fd, selector, type);
+				if (err) {
+					/*
+					 * Do not needlessly signal "retry
+					 * under Linux" for mixed fd sets.
+					 */
+					if (err == -EADV && !first_fd)
+						return -EBADF;
+					return err;
+				}
+				first_fd = false;
+			}
+	}
+
+	return 0;
+}
+
+int __cobalt_select(int nfds, void __user *u_rfds, void __user *u_wfds,
+		    void __user *u_xfds, void __user *u_tv, bool compat)
+{
+	void __user *ufd_sets[XNSELECT_MAX_TYPES] = {
+		[XNSELECT_READ] = u_rfds,
+		[XNSELECT_WRITE] = u_wfds,
+		[XNSELECT_EXCEPT] = u_xfds
+	};
+	fd_set *in_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL};
+	fd_set *out_fds[XNSELECT_MAX_TYPES] = {NULL, NULL, NULL};
+	fd_set in_fds_storage[XNSELECT_MAX_TYPES],
+		out_fds_storage[XNSELECT_MAX_TYPES];
+	xnticks_t timeout = XN_INFINITE;
+	struct restart_block *restart;
+	xntmode_t mode = XN_RELATIVE;
+	struct xnselector *selector;
+	struct xnthread *curr;
+	struct __kernel_old_timeval tv;
+	size_t fds_size;
+	int i, err;
+
+	curr = xnthread_current();
+
+	if (u_tv) {
+		if (xnthread_test_localinfo(curr, XNSYSRST)) {
+			xnthread_clear_localinfo(curr, XNSYSRST);
+
+			restart = cobalt_get_restart_block(current);
+			timeout = restart->nanosleep.expires;
+
+			if (restart->fn != cobalt_restart_syscall_placeholder) {
+				err = -EINTR;
+				goto out;
+			}
+		} else {
+#ifdef CONFIG_XENO_ARCH_SYS3264
+			if (compat) {
+				if (sys32_get_timeval(&tv, u_tv))
+					return -EFAULT;
+			} else
+#endif
+			{
+				if (!access_wok(u_tv, sizeof(tv))
+				    || cobalt_copy_from_user(&tv, u_tv,
+							     sizeof(tv)))
+					return -EFAULT;
+			}
+
+			if (tv.tv_usec >= 1000000)
+				return -EINVAL;
+
+			timeout = clock_get_ticks(CLOCK_MONOTONIC) + tv2ns(&tv);
+		}
+
+		mode = XN_ABSOLUTE;
+	}
+
+	fds_size = __FDELT__(nfds + __NFDBITS__ - 1) * sizeof(long);
+
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (ufd_sets[i]) {
+			in_fds[i] = &in_fds_storage[i];
+			out_fds[i] = &out_fds_storage[i];
+#ifdef CONFIG_XENO_ARCH_SYS3264
+			if (compat) {
+				if (sys32_get_fdset(in_fds[i], ufd_sets[i],
+						    fds_size))
+					return -EFAULT;
+			} else
+#endif
+			{
+				if (!access_wok((void __user *) ufd_sets[i],
+						sizeof(fd_set))
+				    || cobalt_copy_from_user(in_fds[i],
+							     (void __user *)ufd_sets[i],
+							     fds_size))
+					return -EFAULT;
+			}
+		}
+
+	selector = curr->selector;
+	if (!selector) {
+		/* This function may be called from pure Linux fd_sets, we want
+		   to avoid the xnselector allocation in this case, so, we do a
+		   simple test: test if the first file descriptor we find in the
+		   fd_set is an RTDM descriptor or a message queue descriptor. */
+		if (!__cobalt_first_fd_valid_p(in_fds, nfds))
+			return -EADV;
+
+		selector = xnmalloc(sizeof(*curr->selector));
+		if (selector == NULL)
+			return -ENOMEM;
+		xnselector_init(selector);
+		curr->selector = selector;
+
+		/* Bind directly the file descriptors, we do not need to go
+		   through xnselect returning -ECHRNG */
+		err = __cobalt_select_bind_all(selector, in_fds, nfds);
+		if (err)
+			return err;
+	}
+
+	do {
+		err = xnselect(selector, out_fds, in_fds, nfds, timeout, mode);
+		if (err == -ECHRNG) {
+			int bind_err = __cobalt_select_bind_all(selector,
+								out_fds, nfds);
+			if (bind_err)
+				return bind_err;
+		}
+	} while (err == -ECHRNG);
+
+	if (err == -EINTR && signal_pending(current)) {
+		xnthread_set_localinfo(curr, XNSYSRST);
+
+		restart = cobalt_get_restart_block(current);
+		restart->fn = cobalt_restart_syscall_placeholder;
+		restart->nanosleep.expires = timeout;
+
+		return -ERESTARTSYS;
+	}
+
+out:
+	if (u_tv && (err > 0 || err == -EINTR)) {
+		xnsticks_t diff = timeout - clock_get_ticks(CLOCK_MONOTONIC);
+		if (diff > 0)
+			ticks2tv(&tv, diff);
+		else
+			tv.tv_sec = tv.tv_usec = 0;
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (compat) {
+			if (sys32_put_timeval(u_tv, &tv))
+				return -EFAULT;
+		} else
+#endif
+		{
+			if (cobalt_copy_to_user(u_tv, &tv, sizeof(tv)))
+				return -EFAULT;
+		}
+	}
+
+	if (err >= 0)
+		for (i = 0; i < XNSELECT_MAX_TYPES; i++) {
+			if (!ufd_sets[i])
+				continue;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+			if (compat) {
+				if (sys32_put_fdset(ufd_sets[i], out_fds[i],
+						    sizeof(fd_set)))
+					return -EFAULT;
+			} else
+#endif
+			{
+				if (cobalt_copy_to_user((void __user *)ufd_sets[i],
+							out_fds[i], sizeof(fd_set)))
+					return -EFAULT;
+			}
+		}
+	return err;
+}
+
+/* int select(int, fd_set *, fd_set *, fd_set *, struct __kernel_old_timeval *) */
+COBALT_SYSCALL(select, primary,
+	       (int nfds,
+		fd_set __user *u_rfds,
+		fd_set __user *u_wfds,
+		fd_set __user *u_xfds,
+		struct __kernel_old_timeval __user *u_tv))
+{
+	return __cobalt_select(nfds, u_rfds, u_wfds, u_xfds, u_tv, false);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h
new file mode 100644
index 0000000..1d9ee09
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/io.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_IO_H
+#define _COBALT_POSIX_IO_H
+
+#include <rtdm/rtdm.h>
+#include <xenomai/posix/syscall.h>
+#include <cobalt/kernel/select.h>
+
+int __cobalt_select(int nfds, void __user *u_rfds, void __user *u_wfds,
+		    void __user *u_xfds, void __user *u_tv, bool compat);
+
+COBALT_SYSCALL_DECL(open,
+		    (const char __user *u_path, int oflag));
+
+COBALT_SYSCALL_DECL(socket,
+		    (int protocol_family,
+		     int socket_type, int protocol));
+
+COBALT_SYSCALL_DECL(close, (int fd));
+
+COBALT_SYSCALL_DECL(fcntl, (int fd, int cmd, long arg));
+
+COBALT_SYSCALL_DECL(ioctl,
+		    (int fd, unsigned int request, void __user *arg));
+
+COBALT_SYSCALL_DECL(read,
+		    (int fd, void __user *buf, size_t size));
+
+COBALT_SYSCALL_DECL(write,
+		    (int fd, const void __user *buf, size_t size));
+
+COBALT_SYSCALL_DECL(recvmsg,
+		    (int fd, struct user_msghdr __user *umsg, int flags));
+
+COBALT_SYSCALL_DECL(recvmmsg,
+		    (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		     unsigned int flags, struct __user_old_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(recvmmsg64,
+		    (int fd, struct mmsghdr __user *u_msgvec, unsigned int vlen,
+		     unsigned int flags,
+		     struct __kernel_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(sendmsg,
+		    (int fd, struct user_msghdr __user *umsg, int flags));
+
+COBALT_SYSCALL_DECL(sendmmsg,
+		    (int fd, struct mmsghdr __user *u_msgvec,
+		     unsigned int vlen, unsigned int flags));
+
+COBALT_SYSCALL_DECL(mmap,
+		    (int fd, struct _rtdm_mmap_request __user *u_rma,
+		     void __user * __user *u_addrp));
+
+COBALT_SYSCALL_DECL(select,
+		    (int nfds,
+		     fd_set __user *u_rfds,
+		     fd_set __user *u_wfds,
+		     fd_set __user *u_xfds,
+		     struct __kernel_old_timeval __user *u_tv));
+
+#endif /* !_COBALT_POSIX_IO_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c
new file mode 100644
index 0000000..fc88e26
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.c
@@ -0,0 +1,354 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/gfp.h>
+#include <linux/vmalloc.h>
+#include <rtdm/driver.h>
+#include <cobalt/kernel/vdso.h>
+#include "process.h"
+#include "memory.h"
+
+#define UMM_PRIVATE  0	/* Per-process user-mapped memory heap */
+#define UMM_SHARED   1	/* Shared user-mapped memory heap */
+#define SYS_GLOBAL   2	/* System heap (not mmapped) */
+
+struct xnvdso *nkvdso;
+EXPORT_SYMBOL_GPL(nkvdso);
+
+static void umm_vmopen(struct vm_area_struct *vma)
+{
+	struct cobalt_umm *umm = vma->vm_private_data;
+
+	atomic_inc(&umm->refcount);
+}
+
+static void umm_vmclose(struct vm_area_struct *vma)
+{
+	struct cobalt_umm *umm = vma->vm_private_data;
+
+	cobalt_umm_destroy(umm);
+}
+
+static struct vm_operations_struct umm_vmops = {
+	.open = umm_vmopen,
+	.close = umm_vmclose,
+};
+
+static struct cobalt_umm *umm_from_fd(struct rtdm_fd *fd)
+{
+	struct cobalt_process *process;
+
+	process = cobalt_current_process();
+	if (process == NULL)
+		return NULL;
+
+	if (rtdm_fd_minor(fd) == UMM_PRIVATE)
+		return &process->sys_ppd.umm;
+
+	return &cobalt_kernel_ppd.umm;
+}
+
+static int umm_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct cobalt_umm *umm;
+	size_t len;
+	int ret;
+
+	umm = umm_from_fd(fd);
+	if (fd == NULL)
+		return -ENODEV;
+
+	len = vma->vm_end - vma->vm_start;
+	if (len != xnheap_get_size(&umm->heap))
+		return -EINVAL;
+
+	vma->vm_private_data = umm;
+	vma->vm_ops = &umm_vmops;
+	if (xnarch_cache_aliasing())
+		vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	ret = rtdm_mmap_vmem(vma, xnheap_get_membase(&umm->heap));
+	if (ret)
+		return ret;
+
+	atomic_inc(&umm->refcount);
+
+	return 0;
+}
+
+#ifndef CONFIG_MMU
+static unsigned long umm_get_unmapped_area(struct rtdm_fd *fd,
+					   unsigned long len,
+					   unsigned long pgoff,
+					   unsigned long flags)
+{
+	struct cobalt_umm *umm;
+
+	umm = umm_from_fd(fd);
+	if (umm == NULL)
+		return -ENODEV;
+
+	if (pgoff == 0)
+		return (unsigned long)xnheap_get_membase(&umm->heap);
+
+	return pgoff << PAGE_SHIFT;
+}
+#else
+#define umm_get_unmapped_area	NULL
+#endif
+
+static int stat_umm(struct rtdm_fd *fd,
+		    struct cobalt_umm __user *u_stat)
+{
+	struct cobalt_memdev_stat stat;
+	struct cobalt_umm *umm;
+	spl_t s;
+	
+	umm = umm_from_fd(fd);
+	if (umm == NULL)
+		return -ENODEV;
+
+	xnlock_get_irqsave(&umm->heap.lock, s);
+	stat.size = xnheap_get_size(&umm->heap);
+	stat.free = xnheap_get_free(&umm->heap);
+	xnlock_put_irqrestore(&umm->heap.lock, s);
+
+	return rtdm_safe_copy_to_user(fd, u_stat, &stat, sizeof(stat));
+}
+
+static int do_umm_ioctls(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg)
+{
+	int ret;
+
+	switch (request) {
+	case MEMDEV_RTIOC_STAT:
+		ret = stat_umm(fd, arg);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int umm_ioctl_rt(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg)
+{
+	return do_umm_ioctls(fd, request, arg);
+}
+
+static int umm_ioctl_nrt(struct rtdm_fd *fd,
+			 unsigned int request, void __user *arg)
+{
+	return do_umm_ioctls(fd, request, arg);
+}
+
+static int sysmem_open(struct rtdm_fd *fd, int oflags)
+{
+	if ((oflags & O_ACCMODE) != O_RDONLY)
+		return -EACCES;
+
+	return 0;
+}
+
+static int do_sysmem_ioctls(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	struct cobalt_memdev_stat stat;
+	spl_t s;
+	int ret;
+
+	switch (request) {
+	case MEMDEV_RTIOC_STAT:
+		xnlock_get_irqsave(&cobalt_heap.lock, s);
+		stat.size = xnheap_get_size(&cobalt_heap);
+		stat.free = xnheap_get_free(&cobalt_heap);
+		xnlock_put_irqrestore(&cobalt_heap.lock, s);
+		ret = rtdm_safe_copy_to_user(fd, arg, &stat, sizeof(stat));
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int sysmem_ioctl_rt(struct rtdm_fd *fd,
+			   unsigned int request, void __user *arg)
+{
+	return do_sysmem_ioctls(fd, request, arg);
+}
+
+static int sysmem_ioctl_nrt(struct rtdm_fd *fd,
+			   unsigned int request, void __user *arg)
+{
+	return do_sysmem_ioctls(fd, request, arg);
+}
+
+static struct rtdm_driver umm_driver = {
+	.profile_info	=	RTDM_PROFILE_INFO(umm,
+						  RTDM_CLASS_MEMORY,
+						  RTDM_SUBCLASS_GENERIC,
+						  0),
+	.device_flags	=	RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR,
+	.device_count	=	2,
+	.ops = {
+		.ioctl_rt		=	umm_ioctl_rt,
+		.ioctl_nrt		=	umm_ioctl_nrt,
+		.mmap			=	umm_mmap,
+		.get_unmapped_area	=	umm_get_unmapped_area,
+	},
+};
+
+static struct rtdm_device umm_devices[] = {
+	[ UMM_PRIVATE ] = {
+		.driver = &umm_driver,
+		.label = COBALT_MEMDEV_PRIVATE,
+		.minor = UMM_PRIVATE,
+	},
+	[ UMM_SHARED ] = {
+		.driver = &umm_driver,
+		.label = COBALT_MEMDEV_SHARED,
+		.minor = UMM_SHARED,
+	},
+};
+
+static struct rtdm_driver sysmem_driver = {
+	.profile_info	=	RTDM_PROFILE_INFO(sysmem,
+						  RTDM_CLASS_MEMORY,
+						  SYS_GLOBAL,
+						  0),
+	.device_flags	=	RTDM_NAMED_DEVICE,
+	.device_count	=	1,
+	.ops = {
+		.open		=	sysmem_open,
+		.ioctl_rt	=	sysmem_ioctl_rt,
+		.ioctl_nrt	=	sysmem_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device sysmem_device = {
+	.driver = &sysmem_driver,
+	.label = COBALT_MEMDEV_SYS,
+};
+
+static inline void init_vdso(void)
+{
+	nkvdso->features = XNVDSO_FEATURES;
+	nkvdso->wallclock_offset = nkclock.wallclock_offset;
+}
+
+int cobalt_memdev_init(void)
+{
+	int ret;
+
+	ret = cobalt_umm_init(&cobalt_kernel_ppd.umm,
+			      CONFIG_XENO_OPT_SHARED_HEAPSZ * 1024, NULL);
+	if (ret)
+		return ret;
+
+	cobalt_umm_set_name(&cobalt_kernel_ppd.umm, "shared heap");
+
+	nkvdso = cobalt_umm_alloc(&cobalt_kernel_ppd.umm, sizeof(*nkvdso));
+	if (nkvdso == NULL) {
+		ret = -ENOMEM;
+		goto fail_vdso;
+	}
+
+	init_vdso();
+
+	ret = rtdm_dev_register(umm_devices + UMM_PRIVATE);
+	if (ret)
+		goto fail_private;
+
+	ret = rtdm_dev_register(umm_devices + UMM_SHARED);
+	if (ret)
+		goto fail_shared;
+
+	ret = rtdm_dev_register(&sysmem_device);
+	if (ret)
+		goto fail_sysmem;
+
+	return 0;
+
+fail_sysmem:
+	rtdm_dev_unregister(umm_devices + UMM_SHARED);
+fail_shared:
+	rtdm_dev_unregister(umm_devices + UMM_PRIVATE);
+fail_private:
+	cobalt_umm_free(&cobalt_kernel_ppd.umm, nkvdso);
+fail_vdso:
+	cobalt_umm_destroy(&cobalt_kernel_ppd.umm);
+
+	return ret;
+}
+
+void cobalt_memdev_cleanup(void)
+{
+	rtdm_dev_unregister(&sysmem_device);
+	rtdm_dev_unregister(umm_devices + UMM_SHARED);
+	rtdm_dev_unregister(umm_devices + UMM_PRIVATE);
+	cobalt_umm_free(&cobalt_kernel_ppd.umm, nkvdso);
+	cobalt_umm_destroy(&cobalt_kernel_ppd.umm);
+}
+
+int cobalt_umm_init(struct cobalt_umm *umm, u32 size,
+		    void (*release)(struct cobalt_umm *umm))
+{
+	void *basemem;
+	int ret;
+
+	secondary_mode_only();
+
+	/* We don't support CPUs with VIVT caches and the like. */
+	BUG_ON(xnarch_cache_aliasing());
+
+	size = PAGE_ALIGN(size);
+	basemem = vmalloc_kernel(size, __GFP_ZERO);
+	if (basemem == NULL)
+		return -ENOMEM;
+
+	ret = xnheap_init(&umm->heap, basemem, size);
+	if (ret) {
+		vfree(basemem);
+		return ret;
+	}
+
+	umm->release = release;
+	atomic_set(&umm->refcount, 1);
+	smp_mb();
+
+	return 0;
+}
+
+void cobalt_umm_destroy(struct cobalt_umm *umm)
+{
+	secondary_mode_only();
+
+	if (atomic_dec_and_test(&umm->refcount)) {
+		xnheap_destroy(&umm->heap);
+		vfree(xnheap_get_membase(&umm->heap));
+		if (umm->release)
+			umm->release(umm);
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h
new file mode 100644
index 0000000..c22417b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/memory.h
@@ -0,0 +1,61 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_MEMORY_H
+#define _COBALT_POSIX_MEMORY_H
+
+#include <cobalt/kernel/ppd.h>
+
+#define cobalt_umm_set_name(__umm, __fmt, __args...)	\
+	xnheap_set_name(&(__umm)->heap, (__fmt), ## __args)
+
+static inline
+void *cobalt_umm_alloc(struct cobalt_umm *umm, __u32 size)
+{
+	return xnheap_alloc(&umm->heap, size);
+}
+
+static inline
+void *cobalt_umm_zalloc(struct cobalt_umm *umm, __u32 size)
+{
+	return xnheap_zalloc(&umm->heap, size);
+}
+
+static inline
+void cobalt_umm_free(struct cobalt_umm *umm, void *p)
+{
+	xnheap_free(&umm->heap, p);
+}
+
+static inline
+__u32 cobalt_umm_offset(struct cobalt_umm *umm, void *p)
+{
+	return p - xnheap_get_membase(&umm->heap);
+}
+
+int cobalt_memdev_init(void);
+
+void cobalt_memdev_cleanup(void);
+
+int cobalt_umm_init(struct cobalt_umm *umm, u32 size,
+		    void (*release)(struct cobalt_umm *umm));
+
+void cobalt_umm_destroy(struct cobalt_umm *umm);
+
+#endif /* !_COBALT_POSIX_MEMORY_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c
new file mode 100644
index 0000000..1e71283
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include "monitor.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+/*
+ * The Cobalt monitor is a double-wait condition object, serializing
+ * accesses through a gate. It behaves like a mutex + two condition
+ * variables combo with extended signaling logic. Folding several
+ * conditions and the serialization support into a single object
+ * performs better on low end hw caches and allows for specific
+ * optimizations, compared to using separate general-purpose mutex and
+ * condvars. This object is used by the Copperplate interface
+ * internally when it runs over the Cobalt core.
+ *
+ * Threads can wait for some resource(s) to be granted (consumer
+ * side), or wait for the available resource(s) to drain (producer
+ * side).  Therefore, signals are thread-directed for the grant side,
+ * and monitor-directed for the drain side.
+ *
+ * Typically, a consumer would wait for the GRANT condition to be
+ * satisfied, signaling the DRAINED condition when more resources
+ * could be made available if the protocol implements output
+ * contention (e.g. the write side of a message queue waiting for the
+ * consumer to release message slots). Conversely, a producer would
+ * wait for the DRAINED condition to be satisfied, issuing GRANT
+ * signals once more resources have been made available to the
+ * consumer.
+ *
+ * Implementation-wise, the monitor logic is shared with the Cobalt
+ * thread object.
+ */
+COBALT_SYSCALL(monitor_init, current,
+	       (struct cobalt_monitor_shadow __user *u_mon,
+		clockid_t clk_id, int flags))
+{
+	struct cobalt_monitor_shadow shadow;
+	struct cobalt_monitor_state *state;
+	struct cobalt_monitor *mon;
+	int pshared, tmode, ret;
+	struct cobalt_umm *umm;
+	unsigned long stateoff;
+	spl_t s;
+
+	tmode = clock_flag(TIMER_ABSTIME, clk_id);
+	if (tmode < 0)
+		return -EINVAL;
+
+	mon = xnmalloc(sizeof(*mon));
+	if (mon == NULL)
+		return -ENOMEM;
+
+	pshared = (flags & COBALT_MONITOR_SHARED) != 0;
+	umm = &cobalt_ppd_get(pshared)->umm;
+	state = cobalt_umm_alloc(umm, sizeof(*state));
+	if (state == NULL) {
+		xnfree(mon);
+		return -EAGAIN;
+	}
+
+	ret = xnregistry_enter_anon(mon, &mon->resnode.handle);
+	if (ret) {
+		cobalt_umm_free(umm, state);
+		xnfree(mon);
+		return ret;
+	}
+
+	mon->state = state;
+	xnsynch_init(&mon->gate, XNSYNCH_PI, &state->owner);
+	xnsynch_init(&mon->drain, XNSYNCH_PRIO, NULL);
+	mon->flags = flags;
+	mon->tmode = tmode;
+	INIT_LIST_HEAD(&mon->waiters);
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_add_resource(&mon->resnode, monitor, pshared);
+	mon->magic = COBALT_MONITOR_MAGIC;
+	xnlock_put_irqrestore(&nklock, s);
+
+	state->flags = 0;
+	stateoff = cobalt_umm_offset(umm, state);
+	XENO_BUG_ON(COBALT, stateoff != (__u32)stateoff);
+	shadow.flags = flags;
+	shadow.handle = mon->resnode.handle;
+	shadow.state_offset = (__u32)stateoff;
+
+	return cobalt_copy_to_user(u_mon, &shadow, sizeof(*u_mon));
+}
+
+/* nklock held, irqs off */
+static int monitor_enter(xnhandle_t handle, struct xnthread *curr)
+{
+	struct cobalt_monitor *mon;
+	int info;
+
+	mon = xnregistry_lookup(handle, NULL); /* (Re)validate. */
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
+		return -EINVAL;
+
+	info = xnsynch_acquire(&mon->gate, XN_INFINITE, XN_RELATIVE);
+	if (info)
+		/* Break or error, no timeout possible. */
+		return info & XNBREAK ? -EINTR : -EINVAL;
+
+	mon->state->flags &= ~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST);
+
+	return 0;
+}
+
+COBALT_SYSCALL(monitor_enter, primary,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct xnthread *curr = xnthread_current();
+	xnhandle_t handle;
+	int ret;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+	ret = monitor_enter(handle, curr);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+/* nklock held, irqs off */
+static void monitor_wakeup(struct cobalt_monitor *mon)
+{
+	struct cobalt_monitor_state *state = mon->state;
+	struct cobalt_thread *thread, *tmp;
+	struct xnthread *p;
+	int bcast;
+
+	/*
+	 * Having the GRANT signal pending does not necessarily mean
+	 * that somebody is actually waiting for it, so we have to
+	 * check both conditions below.
+	 */
+	bcast = (state->flags & COBALT_MONITOR_BROADCAST) != 0;
+	if ((state->flags & COBALT_MONITOR_GRANTED) == 0 ||
+	    list_empty(&mon->waiters))
+		goto drain;
+
+	/*
+	 * Unblock waiters requesting a grant, either those who
+	 * received it only or all of them, depending on the broadcast
+	 * bit.
+	 *
+	 * We update the PENDED flag to inform userland about the
+	 * presence of waiters, so that it may decide not to issue any
+	 * syscall for exiting the monitor if nobody else is waiting
+	 * at the gate.
+	 */
+	list_for_each_entry_safe(thread, tmp, &mon->waiters, monitor_link) {
+		p = &thread->threadbase;
+		/*
+		 * A thread might receive a grant signal albeit it
+		 * does not wait on a monitor, or it might have timed
+		 * out before we got there, so we really have to check
+		 * that ->wchan does match our sleep queue.
+		 */
+		if (bcast ||
+		    (p->u_window->grant_value && p->wchan == &thread->monitor_synch)) {
+			xnsynch_wakeup_this_sleeper(&thread->monitor_synch, p);
+			list_del_init(&thread->monitor_link);
+		}
+	}
+drain:
+	/*
+	 * Unblock threads waiting for a drain event if that signal is
+	 * pending, either one or all, depending on the broadcast
+	 * flag.
+	 */
+	if ((state->flags & COBALT_MONITOR_DRAINED) != 0 &&
+	    xnsynch_pended_p(&mon->drain)) {
+		if (bcast)
+			xnsynch_flush(&mon->drain, 0);
+		else
+			xnsynch_wakeup_one_sleeper(&mon->drain);
+	}
+
+	if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain))
+		state->flags &= ~COBALT_MONITOR_PENDED;
+}
+
+int __cobalt_monitor_wait(struct cobalt_monitor_shadow __user *u_mon,
+			  int event, const struct timespec64 *ts,
+			  int __user *u_ret)
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+	struct cobalt_monitor_state *state;
+	xnticks_t timeout = XN_INFINITE;
+	int ret = 0, opret = 0, info;
+	struct cobalt_monitor *mon;
+	struct xnsynch *synch;
+	xnhandle_t handle;
+	xntmode_t tmode;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+
+	if (ts) {
+		if (!timespec64_valid(ts))
+			return -EINVAL;
+
+		timeout = ts2ns(ts) + 1;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * The current thread might have sent signals to the monitor
+	 * it wants to sleep on: wake up satisfied waiters before
+	 * going to sleep.
+	 */
+	state = mon->state;
+	if (state->flags & COBALT_MONITOR_SIGNALED)
+		monitor_wakeup(mon);
+
+	synch = &curr->monitor_synch;
+	if (event & COBALT_MONITOR_WAITDRAIN)
+		synch = &mon->drain;
+	else {
+		curr->threadbase.u_window->grant_value = 0;
+		list_add_tail(&curr->monitor_link, &mon->waiters);
+	}
+
+	/*
+	 * Tell userland that somebody is now waiting for a signal, so
+	 * that later exiting the monitor on the producer side will
+	 * trigger a wakeup syscall.
+	 *
+	 * CAUTION: we must raise the PENDED flag while holding the
+	 * gate mutex, to prevent a signal from sneaking in from a
+	 * remote CPU without the producer issuing the corresponding
+	 * wakeup call when dropping the gate lock.
+	 */
+	state->flags |= COBALT_MONITOR_PENDED;
+
+	tmode = ts ? mon->tmode : XN_RELATIVE;
+
+	/* Release the gate prior to waiting, all atomically. */
+	xnsynch_release(&mon->gate, &curr->threadbase);
+
+	info = xnsynch_sleep_on(synch, timeout, tmode);
+	if (info) {
+		if ((event & COBALT_MONITOR_WAITDRAIN) == 0 &&
+		    !list_empty(&curr->monitor_link))
+			list_del_init(&curr->monitor_link);
+
+		if (list_empty(&mon->waiters) && !xnsynch_pended_p(&mon->drain))
+			state->flags &= ~COBALT_MONITOR_PENDED;
+
+		if (info & XNBREAK) {
+			opret = -EINTR;
+			goto out;
+		}
+		if (info & XNTIMEO)
+			opret = -ETIMEDOUT;
+	}
+
+	ret = monitor_enter(handle, &curr->threadbase);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	__xn_put_user(opret, u_ret);
+
+	return ret;
+}
+
+int __cobalt_monitor_wait64(struct cobalt_monitor_shadow __user *u_mon,
+			    int event,
+			    const struct __kernel_timespec __user *u_ts,
+			    int __user *u_ret)
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_timespec64(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_monitor_wait(u_mon, event, tsp, u_ret);
+}
+
+COBALT_SYSCALL(monitor_wait, nonrestartable,
+	       (struct cobalt_monitor_shadow __user *u_mon,
+	       int event, const struct __user_old_timespec __user *u_ts,
+	       int __user *u_ret))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = cobalt_get_u_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_monitor_wait(u_mon, event, tsp, u_ret);
+}
+
+COBALT_SYSCALL(monitor_wait64, nonrestartable,
+	       (struct cobalt_monitor_shadow __user *u_mon, int event,
+		const struct __kernel_timespec __user *u_ts, int __user *u_ret))
+{
+	return __cobalt_monitor_wait64(u_mon, event, u_ts, u_ret);
+}
+
+COBALT_SYSCALL(monitor_sync, nonrestartable,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct cobalt_monitor *mon;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
+		ret = -EINVAL;
+	else if (mon->state->flags & COBALT_MONITOR_SIGNALED) {
+		monitor_wakeup(mon);
+		xnsynch_release(&mon->gate, curr);
+		xnsched_run();
+		ret = monitor_enter(handle, curr);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(monitor_exit, primary,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct cobalt_monitor *mon;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC)
+		ret = -EINVAL;
+	else {
+		if (mon->state->flags & COBALT_MONITOR_SIGNALED)
+			monitor_wakeup(mon);
+
+		xnsynch_release(&mon->gate, curr);
+		xnsched_run();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(monitor_destroy, primary,
+	       (struct cobalt_monitor_shadow __user *u_mon))
+{
+	struct cobalt_monitor_state *state;
+	struct cobalt_monitor *mon;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret = 0;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mon->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mon = xnregistry_lookup(handle, NULL);
+	if (mon == NULL || mon->magic != COBALT_MONITOR_MAGIC) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	state = mon->state;
+	if ((state->flags & COBALT_MONITOR_PENDED) != 0 ||
+	    xnsynch_pended_p(&mon->drain) || !list_empty(&mon->waiters)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	/*
+	 * A monitor must be destroyed by the thread currently holding
+	 * its gate lock.
+	 */
+	if (xnsynch_owner_check(&mon->gate, curr)) {
+		ret = -EPERM;
+		goto fail;
+	}
+
+	xnsynch_release(&mon->gate, curr);
+	cobalt_monitor_reclaim(&mon->resnode, s); /* drops lock */
+
+	xnsched_run();
+
+	return 0;
+ fail:
+	xnlock_put_irqrestore(&nklock, s);
+	
+	return ret;
+}
+
+void cobalt_monitor_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_monitor *mon;
+	struct cobalt_umm *umm;
+	int pshared;
+
+	mon = container_of(node, struct cobalt_monitor, resnode);
+	pshared = (mon->flags & COBALT_MONITOR_SHARED) != 0;
+	xnsynch_destroy(&mon->gate);
+	xnsynch_destroy(&mon->drain);
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	cobalt_mark_deleted(mon);
+	xnlock_put_irqrestore(&nklock, s);
+
+	umm = &cobalt_ppd_get(pshared)->umm;
+	cobalt_umm_free(umm, mon->state);
+	xnfree(mon);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h
new file mode 100644
index 0000000..bf8794e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/monitor.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_MONITOR_H
+#define _COBALT_POSIX_MONITOR_H
+
+#include <cobalt/kernel/synch.h>
+#include <cobalt/uapi/monitor.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_resources;
+struct cobalt_process;
+
+struct cobalt_monitor {
+	unsigned int magic;
+	struct xnsynch gate;
+	struct xnsynch drain;
+	struct cobalt_monitor_state *state;
+	struct list_head waiters;
+	int flags;
+	xntmode_t tmode;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_monitor_wait(struct cobalt_monitor_shadow __user *u_mon,
+			  int event, const struct timespec64 *ts,
+			  int __user *u_ret);
+
+int __cobalt_monitor_wait64(struct cobalt_monitor_shadow __user *u_mon,
+			    int event,
+			    const struct __kernel_timespec __user *u_ts,
+			    int __user *u_ret);
+
+COBALT_SYSCALL_DECL(monitor_init,
+		    (struct cobalt_monitor_shadow __user *u_monsh,
+		     clockid_t clk_id,
+		     int flags));
+
+COBALT_SYSCALL_DECL(monitor_enter,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+COBALT_SYSCALL_DECL(monitor_sync,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+COBALT_SYSCALL_DECL(monitor_exit,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+COBALT_SYSCALL_DECL(monitor_wait,
+		    (struct cobalt_monitor_shadow __user *u_monsh,
+		     int event, const struct __user_old_timespec __user *u_ts,
+		     int __user *u_ret));
+
+COBALT_SYSCALL_DECL(monitor_wait64,
+		    (struct cobalt_monitor_shadow __user *u_monsh, int event,
+		     const struct __kernel_timespec __user *u_ts,
+		     int __user *u_ret));
+
+COBALT_SYSCALL_DECL(monitor_destroy,
+		    (struct cobalt_monitor_shadow __user *u_monsh));
+
+void cobalt_monitor_reclaim(struct cobalt_resnode *node,
+			    spl_t s);
+
+#endif /* !_COBALT_POSIX_MONITOR_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c
new file mode 100644
index 0000000..a156af5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.c
@@ -0,0 +1,1093 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/stdarg.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/select.h>
+#include <rtdm/fd.h>
+#include "internal.h"
+#include "thread.h"
+#include "signal.h"
+#include "timer.h"
+#include "mqueue.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+#include <cobalt/kernel/time.h>
+
+#define COBALT_MSGMAX		65536
+#define COBALT_MSGSIZEMAX	(16*1024*1024)
+#define COBALT_MSGPRIOMAX	32768
+
+struct cobalt_mq {
+	unsigned magic;
+
+	struct list_head link;
+
+	struct xnsynch receivers;
+	struct xnsynch senders;
+	size_t memsize;
+	char *mem;
+	struct list_head queued;
+	struct list_head avail;
+	int nrqueued;
+
+	/* mq_notify */
+	struct siginfo si;
+	mqd_t target_qd;
+	struct cobalt_thread *target;
+
+	struct mq_attr attr;
+
+	unsigned refs;
+	char name[COBALT_MAXNAME];
+	xnhandle_t handle;
+
+	DECLARE_XNSELECT(read_select);
+	DECLARE_XNSELECT(write_select);
+};
+
+struct cobalt_mqd {
+	struct cobalt_mq *mq;
+	struct rtdm_fd fd;
+};
+
+struct cobalt_msg {
+	struct list_head link;
+	unsigned int prio;
+	size_t len;
+	char data[0];
+};
+
+struct cobalt_mqwait_context {
+	struct xnthread_wait_context wc;
+	struct cobalt_msg *msg;
+};
+
+static struct mq_attr default_attr = {
+      .mq_maxmsg = 10,
+      .mq_msgsize = 8192,
+};
+
+static LIST_HEAD(cobalt_mqq);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static int mq_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	return 0;
+}
+
+static struct xnvfile_regular_ops mq_vfile_ops = {
+	.show = mq_vfile_show,
+};
+
+static struct xnpnode_regular __mq_pnode = {
+	.node = {
+		.dirname = "mqueue",
+		.root = &posix_ptree,
+		.ops = &xnregistry_vfreg_ops,
+	},
+	.vfile = {
+		.ops = &mq_vfile_ops,
+	},
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __mq_pnode = {
+	.node = {
+		.dirname = "mqueue",
+	}
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+
+static inline struct cobalt_msg *mq_msg_alloc(struct cobalt_mq *mq)
+{
+	if (list_empty(&mq->avail))
+		return NULL;
+
+	return list_get_entry(&mq->avail, struct cobalt_msg, link);
+}
+
+static inline void mq_msg_free(struct cobalt_mq *mq, struct cobalt_msg * msg)
+{
+	list_add(&msg->link, &mq->avail); /* For earliest re-use of the block. */
+}
+
+static inline int mq_init(struct cobalt_mq *mq, const struct mq_attr *attr)
+{
+	unsigned i, msgsize, memsize;
+	char *mem;
+
+	if (attr == NULL)
+		attr = &default_attr;
+	else {
+		if (attr->mq_maxmsg <= 0 || attr->mq_msgsize <= 0)
+			return -EINVAL;
+		if (attr->mq_maxmsg > COBALT_MSGMAX)
+			return -EINVAL;
+		if (attr->mq_msgsize > COBALT_MSGSIZEMAX)
+			return -EINVAL;
+	}
+
+	msgsize = attr->mq_msgsize + sizeof(struct cobalt_msg);
+
+	/* Align msgsize on natural boundary. */
+	if ((msgsize % sizeof(unsigned long)))
+		msgsize +=
+		    sizeof(unsigned long) - (msgsize % sizeof(unsigned long));
+
+	memsize = msgsize * attr->mq_maxmsg;
+	memsize = PAGE_ALIGN(memsize);
+	if (get_order(memsize) > MAX_ORDER)
+		return -ENOSPC;
+
+	mem = xnheap_vmalloc(memsize);
+	if (mem == NULL)
+		return -ENOSPC;
+
+	mq->memsize = memsize;
+	INIT_LIST_HEAD(&mq->queued);
+	mq->nrqueued = 0;
+	xnsynch_init(&mq->receivers, XNSYNCH_PRIO, NULL);
+	xnsynch_init(&mq->senders, XNSYNCH_PRIO, NULL);
+	mq->mem = mem;
+
+	/* Fill the pool. */
+	INIT_LIST_HEAD(&mq->avail);
+	for (i = 0; i < attr->mq_maxmsg; i++) {
+		struct cobalt_msg *msg = (struct cobalt_msg *) (mem + i * msgsize);
+		mq_msg_free(mq, msg);
+	}
+
+	mq->attr = *attr;
+	mq->target = NULL;
+	xnselect_init(&mq->read_select);
+	xnselect_init(&mq->write_select);
+	mq->magic = COBALT_MQ_MAGIC;
+	mq->refs = 2;
+	INIT_LIST_HEAD(&mq->link);
+
+	return 0;
+}
+
+static inline void mq_destroy(struct cobalt_mq *mq)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xnsynch_destroy(&mq->receivers);
+	xnsynch_destroy(&mq->senders);
+	list_del(&mq->link);
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+	xnselect_destroy(&mq->read_select); /* Reschedules. */
+	xnselect_destroy(&mq->write_select); /* Ditto. */
+	xnregistry_remove(mq->handle);
+	xnheap_vfree(mq->mem);
+	kfree(mq);
+}
+
+static int mq_unref_inner(struct cobalt_mq *mq, spl_t s)
+{
+	int destroy;
+
+	destroy = --mq->refs == 0;
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (destroy)
+		mq_destroy(mq);
+
+	return destroy;
+}
+
+static int mq_unref(struct cobalt_mq *mq)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	return mq_unref_inner(mq, s);
+}
+
+static void mqd_close(struct rtdm_fd *fd)
+{
+	struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd);
+	struct cobalt_mq *mq = mqd->mq;
+
+	kfree(mqd);
+	mq_unref(mq);
+}
+
+int
+mqd_select(struct rtdm_fd *fd, struct xnselector *selector,
+	   unsigned type, unsigned index)
+{
+	struct cobalt_mqd *mqd = container_of(fd, struct cobalt_mqd, fd);
+	struct xnselect_binding *binding;
+	struct cobalt_mq *mq;
+	int err;
+	spl_t s;
+
+	if (type == XNSELECT_READ || type == XNSELECT_WRITE) {
+		binding = xnmalloc(sizeof(*binding));
+		if (!binding)
+			return -ENOMEM;
+	} else
+		return -EBADF;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq = mqd->mq;
+
+	switch(type) {
+	case XNSELECT_READ:
+		err = -EBADF;
+		if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_WRONLY)
+			goto unlock_and_error;
+
+		err = xnselect_bind(&mq->read_select, binding,
+				selector, type, index,
+				!list_empty(&mq->queued));
+		if (err)
+			goto unlock_and_error;
+		break;
+
+	case XNSELECT_WRITE:
+		err = -EBADF;
+		if ((rtdm_fd_flags(fd) & COBALT_PERMS_MASK) == O_RDONLY)
+			goto unlock_and_error;
+
+		err = xnselect_bind(&mq->write_select, binding,
+				selector, type, index,
+				!list_empty(&mq->avail));
+		if (err)
+			goto unlock_and_error;
+		break;
+	}
+	xnlock_put_irqrestore(&nklock, s);
+	return 0;
+
+      unlock_and_error:
+	xnlock_put_irqrestore(&nklock, s);
+	xnfree(binding);
+	return err;
+}
+
+static struct rtdm_fd_ops mqd_ops = {
+	.close = mqd_close,
+	.select = mqd_select,
+};
+
+static inline int mqd_create(struct cobalt_mq *mq, unsigned long flags, int ufd)
+{
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	if (cobalt_ppd_get(0) == &cobalt_kernel_ppd)
+		return -EPERM;
+
+	mqd = kmalloc(sizeof(*mqd), GFP_KERNEL);
+	if (mqd == NULL)
+		return -ENOSPC;
+
+	mqd->fd.oflags = flags;
+	mqd->mq = mq;
+
+	ret = rtdm_fd_enter(&mqd->fd, ufd, COBALT_MQD_MAGIC, &mqd_ops);
+	if (ret < 0)
+		return ret;
+
+	return rtdm_fd_register(&mqd->fd, ufd);
+}
+
+static int mq_open(int uqd, const char *name, int oflags,
+		   int mode, struct mq_attr *attr)
+{
+	struct cobalt_mq *mq;
+	xnhandle_t handle;
+	spl_t s;
+	int err;
+
+	if (name[0] != '/' || name[1] == '\0')
+		return -EINVAL;
+
+  retry_bind:
+	err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle);
+	switch (err) {
+	case 0:
+		/* Found */
+		if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+			return -EEXIST;
+
+		xnlock_get_irqsave(&nklock, s);
+		mq = xnregistry_lookup(handle, NULL);
+		if (mq && mq->magic != COBALT_MQ_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+
+		if (mq) {
+			++mq->refs;
+			xnlock_put_irqrestore(&nklock, s);
+		} else {
+			xnlock_put_irqrestore(&nklock, s);
+			goto retry_bind;
+		}
+
+		err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK),
+				uqd);
+		if (err < 0) {
+			mq_unref(mq);
+			return err;
+		}
+		break;
+
+	case -EWOULDBLOCK:
+		/* Not found */
+		if ((oflags & O_CREAT) == 0)
+			return (mqd_t)-ENOENT;
+
+		mq = kmalloc(sizeof(*mq), GFP_KERNEL);
+		if (mq == NULL)
+			return -ENOSPC;
+
+		err = mq_init(mq, attr);
+		if (err) {
+			kfree(mq);
+			return err;
+		}
+
+		snprintf(mq->name, sizeof(mq->name), "%s", &name[1]);
+
+		err = mqd_create(mq, oflags & (O_NONBLOCK | COBALT_PERMS_MASK),
+				uqd);
+		if (err < 0) {
+			mq_destroy(mq);
+			return err;
+		}
+
+		xnlock_get_irqsave(&nklock, s);
+		err = xnregistry_enter(mq->name, mq, &mq->handle,
+				       &__mq_pnode.node);
+		if (err < 0)
+			--mq->refs;
+		else
+			list_add_tail(&mq->link, &cobalt_mqq);
+		xnlock_put_irqrestore(&nklock, s);
+		if (err < 0) {
+			rtdm_fd_close(uqd, COBALT_MQD_MAGIC);
+			if (err == -EEXIST)
+				goto retry_bind;
+			return err;
+		}
+		break;
+
+	default:
+		return err;
+	}
+
+	return 0;
+}
+
+static inline int mq_close(mqd_t fd)
+{
+	int err;
+
+	err = rtdm_fd_close(fd, COBALT_MQD_MAGIC);
+	return err == -EADV ? -EBADF : err;
+}
+
+static inline int mq_unlink(const char *name)
+{
+	struct cobalt_mq *mq;
+	xnhandle_t handle;
+	spl_t s;
+	int err;
+
+	if (name[0] != '/' || name[1] == '\0')
+		return -EINVAL;
+
+	err = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle);
+	if (err == -EWOULDBLOCK)
+		return -ENOENT;
+	if (err)
+		return err;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq = xnregistry_lookup(handle, NULL);
+	if (!mq) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+	if (mq->magic != COBALT_MQ_MAGIC) {
+		err = -EINVAL;
+	  err_unlock:
+		xnlock_put_irqrestore(&nklock, s);
+
+		return err;
+	}
+	if (mq_unref_inner(mq, s) == 0)
+		xnregistry_unlink(&name[1]);
+	return 0;
+}
+
+static inline struct cobalt_msg *
+mq_trysend(struct cobalt_mqd *mqd, size_t len)
+{
+	struct cobalt_msg *msg;
+	struct cobalt_mq *mq;
+	unsigned flags;
+
+	mq = mqd->mq;
+	flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK;
+
+	if (flags != O_WRONLY && flags != O_RDWR)
+		return ERR_PTR(-EBADF);
+
+	if (len > mq->attr.mq_msgsize)
+		return ERR_PTR(-EMSGSIZE);
+
+	msg = mq_msg_alloc(mq);
+	if (msg == NULL)
+		return ERR_PTR(-EAGAIN);
+
+	if (list_empty(&mq->avail))
+		xnselect_signal(&mq->write_select, 0);
+
+	return msg;
+}
+
+static inline struct cobalt_msg *
+mq_tryrcv(struct cobalt_mqd *mqd, size_t len)
+{
+	struct cobalt_msg *msg;
+	unsigned int flags;
+	struct cobalt_mq *mq;
+
+	mq = mqd->mq;
+	flags = rtdm_fd_flags(&mqd->fd) & COBALT_PERMS_MASK;
+
+	if (flags != O_RDONLY && flags != O_RDWR)
+		return ERR_PTR(-EBADF);
+
+	if (len < mq->attr.mq_msgsize)
+		return ERR_PTR(-EMSGSIZE);
+
+	if (list_empty(&mq->queued))
+		return ERR_PTR(-EAGAIN);
+
+	msg = list_get_entry(&mq->queued, struct cobalt_msg, link);
+	mq->nrqueued--;
+
+	if (list_empty(&mq->queued))
+		xnselect_signal(&mq->read_select, 0);
+
+	return msg;
+}
+
+static struct cobalt_msg *
+mq_timedsend_inner(struct cobalt_mqd *mqd,
+		   size_t len, const void __user *u_ts,
+		   int (*fetch_timeout)(struct timespec64 *ts,
+					const void __user *u_ts))
+{
+	struct cobalt_mqwait_context mwc;
+	struct cobalt_msg *msg;
+	struct cobalt_mq *mq;
+	struct timespec64 ts;
+	xntmode_t tmode;
+	xnticks_t to;
+	spl_t s;
+	int ret;
+
+	to = XN_INFINITE;
+	tmode = XN_RELATIVE;
+redo:
+	xnlock_get_irqsave(&nklock, s);
+	msg = mq_trysend(mqd, len);
+	if (msg != ERR_PTR(-EAGAIN))
+		goto out;
+
+	if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK)
+		goto out;
+
+	if (fetch_timeout) {
+		xnlock_put_irqrestore(&nklock, s);
+		ret = fetch_timeout(&ts, u_ts);
+		if (ret)
+			return ERR_PTR(ret);
+		if (!timespec64_valid(&ts))
+			return ERR_PTR(-EINVAL);
+		to = ts2ns(&ts) + 1;
+		tmode = XN_REALTIME;
+		fetch_timeout = NULL;
+		goto redo;
+	}
+
+	mq = mqd->mq;
+	xnthread_prepare_wait(&mwc.wc);
+	ret = xnsynch_sleep_on(&mq->senders, to, tmode);
+	if (ret) {
+		if (ret & XNBREAK)
+			msg = ERR_PTR(-EINTR);
+		else if (ret & XNTIMEO)
+			msg = ERR_PTR(-ETIMEDOUT);
+		else if (ret & XNRMID)
+			msg = ERR_PTR(-EBADF);
+	} else
+		msg = mwc.msg;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return msg;
+}
+
+static void mq_release_msg(struct cobalt_mq *mq, struct cobalt_msg *msg)
+{
+	struct cobalt_mqwait_context *mwc;
+	struct xnthread_wait_context *wc;
+	struct xnthread *thread;
+
+	/*
+	 * Try passing the free message slot to a waiting sender, link
+	 * it to the free queue otherwise.
+	 */
+	if (xnsynch_pended_p(&mq->senders)) {
+		thread = xnsynch_wakeup_one_sleeper(&mq->senders);
+		wc = xnthread_get_wait_context(thread);
+		mwc = container_of(wc, struct cobalt_mqwait_context, wc);
+		mwc->msg = msg;
+		xnthread_complete_wait(wc);
+	} else {
+		mq_msg_free(mq, msg);
+		if (list_is_singular(&mq->avail))
+			xnselect_signal(&mq->write_select, 1);
+	}
+}
+
+static int
+mq_finish_send(struct cobalt_mqd *mqd, struct cobalt_msg *msg)
+{
+	struct cobalt_mqwait_context *mwc;
+	struct xnthread_wait_context *wc;
+	struct cobalt_sigpending *sigp;
+	struct xnthread *thread;
+	struct cobalt_mq *mq;
+	spl_t s;
+
+	mq = mqd->mq;
+
+	xnlock_get_irqsave(&nklock, s);
+	/* Can we do pipelined sending? */
+	if (xnsynch_pended_p(&mq->receivers)) {
+		thread = xnsynch_wakeup_one_sleeper(&mq->receivers);
+		wc = xnthread_get_wait_context(thread);
+		mwc = container_of(wc, struct cobalt_mqwait_context, wc);
+		mwc->msg = msg;
+		xnthread_complete_wait(wc);
+	} else {
+		/* Nope, have to go through the queue. */
+		list_add_priff(msg, &mq->queued, prio, link);
+		mq->nrqueued++;
+
+		/*
+		 * If first message and no pending reader, send a
+		 * signal if notification was enabled via mq_notify().
+		 */
+		if (list_is_singular(&mq->queued)) {
+			xnselect_signal(&mq->read_select, 1);
+			if (mq->target) {
+				sigp = cobalt_signal_alloc();
+				if (sigp) {
+					cobalt_copy_siginfo(SI_MESGQ, &sigp->si, &mq->si);
+					if (cobalt_signal_send(mq->target, sigp, 0) <= 0)
+						cobalt_signal_free(sigp);
+				}
+				mq->target = NULL;
+			}
+		}
+	}
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static struct cobalt_msg *
+mq_timedrcv_inner(struct cobalt_mqd *mqd,
+		  size_t len,
+		  const void __user *u_ts,
+		  int (*fetch_timeout)(struct timespec64 *ts,
+				       const void __user *u_ts))
+{
+	struct cobalt_mqwait_context mwc;
+	struct cobalt_msg *msg;
+	struct cobalt_mq *mq;
+	struct timespec64 ts;
+	xntmode_t tmode;
+	xnticks_t to;
+	spl_t s;
+	int ret;
+
+	to = XN_INFINITE;
+	tmode = XN_RELATIVE;
+redo:
+	xnlock_get_irqsave(&nklock, s);
+	msg = mq_tryrcv(mqd, len);
+	if (msg != ERR_PTR(-EAGAIN))
+		goto out;
+
+	if (rtdm_fd_flags(&mqd->fd) & O_NONBLOCK)
+		goto out;
+
+	if (fetch_timeout) {
+		xnlock_put_irqrestore(&nklock, s);
+		ret = fetch_timeout(&ts, u_ts);
+		if (ret)
+			return ERR_PTR(ret);
+		if (!timespec64_valid(&ts))
+			return ERR_PTR(-EINVAL);
+		to = ts2ns(&ts) + 1;
+		tmode = XN_REALTIME;
+		fetch_timeout = NULL;
+		goto redo;
+	}
+
+	mq = mqd->mq;
+	xnthread_prepare_wait(&mwc.wc);
+	ret = xnsynch_sleep_on(&mq->receivers, to, tmode);
+	if (ret == 0)
+		msg = mwc.msg;
+	else if (ret & XNRMID)
+		msg = ERR_PTR(-EBADF);
+	else if (ret & XNTIMEO)
+		msg = ERR_PTR(-ETIMEDOUT);
+	else
+		msg = ERR_PTR(-EINTR);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return msg;
+}
+
+static int
+mq_finish_rcv(struct cobalt_mqd *mqd, struct cobalt_msg *msg)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq_release_msg(mqd->mq, msg);
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static inline int mq_getattr(struct cobalt_mqd *mqd, struct mq_attr *attr)
+{
+	struct cobalt_mq *mq;
+	spl_t s;
+
+	mq = mqd->mq;
+	*attr = mq->attr;
+	xnlock_get_irqsave(&nklock, s);
+	attr->mq_flags = rtdm_fd_flags(&mqd->fd);
+	attr->mq_curmsgs = mq->nrqueued;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static inline int
+mq_notify(struct cobalt_mqd *mqd, unsigned index, const struct sigevent *evp)
+{
+	struct cobalt_thread *thread = cobalt_current_thread();
+	struct cobalt_mq *mq;
+	int err;
+	spl_t s;
+
+	if (evp && ((evp->sigev_notify != SIGEV_SIGNAL &&
+		     evp->sigev_notify != SIGEV_NONE) ||
+		    (unsigned int)(evp->sigev_signo - 1) > SIGRTMAX - 1))
+		return -EINVAL;
+
+	if (xnsched_interrupt_p() || thread == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+	mq = mqd->mq;
+	if (mq->target && mq->target != thread) {
+		err = -EBUSY;
+		goto unlock_and_error;
+	}
+
+	if (evp == NULL || evp->sigev_notify == SIGEV_NONE)
+		/* Here, mq->target == cobalt_current_thread() or NULL. */
+		mq->target = NULL;
+	else {
+		mq->target = thread;
+		mq->target_qd = index;
+		mq->si.si_signo = evp->sigev_signo;
+		mq->si.si_errno = 0;
+		mq->si.si_code = SI_MESGQ;
+		mq->si.si_value = evp->sigev_value;
+		/*
+		 * XXX: we differ from the regular kernel here, which
+		 * passes the sender's pid/uid data into the
+		 * receiver's namespaces. We pass the receiver's creds
+		 * into the init namespace instead.
+		 */
+		mq->si.si_pid = task_pid_nr(current);
+		mq->si.si_uid = get_current_uuid();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+	return 0;
+
+      unlock_and_error:
+	xnlock_put_irqrestore(&nklock, s);
+	return err;
+}
+
+static inline struct cobalt_mqd *cobalt_mqd_get(mqd_t ufd)
+{
+	struct rtdm_fd *fd;
+
+	fd = rtdm_fd_get(ufd, COBALT_MQD_MAGIC);
+	if (IS_ERR(fd)) {
+		int err = PTR_ERR(fd);
+		if (err == -EADV)
+			err = cobalt_current_process() ? -EBADF : -EPERM;
+		return ERR_PTR(err);
+	}
+
+	return container_of(fd, struct cobalt_mqd, fd);
+}
+
+static inline void cobalt_mqd_put(struct cobalt_mqd *mqd)
+{
+	rtdm_fd_put(&mqd->fd);
+}
+
+int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp)
+{
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	mqd = cobalt_mqd_get(fd);
+	if (IS_ERR(mqd))
+		ret = PTR_ERR(mqd);
+	else {
+		trace_cobalt_mq_notify(fd, evp);
+		ret = mq_notify(mqd, fd, evp);
+		cobalt_mqd_put(mqd);
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(mq_notify, primary,
+	       (mqd_t fd, const struct sigevent *__user evp))
+{
+	struct sigevent sev;
+
+	if (evp && cobalt_copy_from_user(&sev, evp, sizeof(sev)))
+		return -EFAULT;
+
+	return __cobalt_mq_notify(fd, evp ? &sev : NULL);
+}
+
+int __cobalt_mq_open(const char __user *u_name, int oflags,
+		     mode_t mode, struct mq_attr *attr)
+{
+	char name[COBALT_MAXNAME];
+	unsigned int len;
+	mqd_t uqd;
+	int ret;
+
+	len = cobalt_strncpy_from_user(name, u_name, sizeof(name));
+	if (len < 0)
+		return -EFAULT;
+
+	if (len >= sizeof(name))
+		return -ENAMETOOLONG;
+
+	if (len == 0)
+		return -EINVAL;
+
+	trace_cobalt_mq_open(name, oflags, mode);
+
+	uqd = __rtdm_anon_getfd("[cobalt-mq]", oflags);
+	if (uqd < 0)
+		return uqd;
+
+	ret = mq_open(uqd, name, oflags, mode, attr);
+	if (ret < 0) {
+		__rtdm_anon_putfd(uqd);
+		return ret;
+	}
+
+	return uqd;
+}
+
+COBALT_SYSCALL(mq_open, lostage,
+	       (const char __user *u_name, int oflags,
+		mode_t mode, struct mq_attr __user *u_attr))
+{
+	struct mq_attr _attr, *attr = &_attr;
+
+	if ((oflags & O_CREAT) && u_attr) {
+		if (cobalt_copy_from_user(&_attr, u_attr, sizeof(_attr)))
+			return -EFAULT;
+	} else
+		attr = NULL;
+
+	return __cobalt_mq_open(u_name, oflags, mode, attr);
+}
+
+COBALT_SYSCALL(mq_close, lostage, (mqd_t uqd))
+{
+	trace_cobalt_mq_close(uqd);
+
+	return mq_close(uqd);
+}
+
+COBALT_SYSCALL(mq_unlink, lostage, (const char __user *u_name))
+{
+	char name[COBALT_MAXNAME];
+	unsigned len;
+
+	len = cobalt_strncpy_from_user(name, u_name, sizeof(name));
+	if (len < 0)
+		return -EFAULT;
+	if (len >= sizeof(name))
+		return -ENAMETOOLONG;
+
+	trace_cobalt_mq_unlink(name);
+
+	return mq_unlink(name);
+}
+
+int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr)
+{
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	mqd = cobalt_mqd_get(uqd);
+	if (IS_ERR(mqd))
+		return PTR_ERR(mqd);
+
+	ret = mq_getattr(mqd, attr);
+	cobalt_mqd_put(mqd);
+	if (ret)
+		return ret;
+
+	trace_cobalt_mq_getattr(uqd, attr);
+
+	return 0;
+}
+
+COBALT_SYSCALL(mq_getattr, current,
+	       (mqd_t uqd, struct mq_attr __user *u_attr))
+{
+	struct mq_attr attr;
+	int ret;
+
+	ret = __cobalt_mq_getattr(uqd, &attr);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_attr, &attr, sizeof(attr));
+}
+
+static inline int mq_fetch_timeout(struct timespec64 *ts,
+				   const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts);
+}
+
+static inline int mq_fetch_timeout64(struct timespec64 *ts,
+				     const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts);
+}
+
+int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio, const void __user *u_ts,
+			  int (*fetch_timeout)(struct timespec64 *ts,
+					       const void __user *u_ts))
+{
+	struct cobalt_msg *msg;
+	struct cobalt_mqd *mqd;
+	int ret;
+
+	mqd = cobalt_mqd_get(uqd);
+	if (IS_ERR(mqd))
+		return PTR_ERR(mqd);
+
+	if (prio >= COBALT_MSGPRIOMAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (len > 0 && !access_rok(u_buf, len)) {
+		ret = -EFAULT;
+		goto out;
+	}
+
+	trace_cobalt_mq_send(uqd, u_buf, len, prio);
+	msg = mq_timedsend_inner(mqd, len, u_ts, fetch_timeout);
+	if (IS_ERR(msg)) {
+		ret = PTR_ERR(msg);
+		goto out;
+	}
+
+	ret = cobalt_copy_from_user(msg->data, u_buf, len);
+	if (ret) {
+		mq_finish_rcv(mqd, msg);
+		goto out;
+	}
+	msg->len = len;
+	msg->prio = prio;
+	ret = mq_finish_send(mqd, msg);
+out:
+	cobalt_mqd_put(mqd);
+
+	return ret;
+}
+
+int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len,
+			    unsigned int prio, const void __user *u_ts)
+{
+	return __cobalt_mq_timedsend(uqd, u_buf, len, prio, u_ts,
+				     u_ts ? mq_fetch_timeout64 : NULL);
+}
+
+COBALT_SYSCALL(mq_timedsend, primary,
+	       (mqd_t uqd, const void __user *u_buf, size_t len,
+		unsigned int prio, const struct __user_old_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedsend(uqd, u_buf, len, prio,
+				     u_ts, u_ts ? mq_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL(mq_timedsend64, primary,
+	       (mqd_t uqd, const void __user *u_buf, size_t len,
+		unsigned int prio, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedsend64(uqd, u_buf, len, prio, u_ts);
+}
+
+int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf,
+			     ssize_t *lenp,
+			     unsigned int __user *u_prio,
+			     const void __user *u_ts,
+			     int (*fetch_timeout)(struct timespec64 *ts,
+						  const void __user *u_ts))
+{
+	struct cobalt_mqd *mqd;
+	struct cobalt_msg *msg;
+	unsigned int prio;
+	int ret;
+
+	mqd = cobalt_mqd_get(uqd);
+	if (IS_ERR(mqd))
+		return PTR_ERR(mqd);
+
+	if (*lenp > 0 && !access_wok(u_buf, *lenp)) {
+		ret = -EFAULT;
+		goto fail;
+	}
+
+	msg = mq_timedrcv_inner(mqd, *lenp, u_ts, fetch_timeout);
+	if (IS_ERR(msg)) {
+		ret = PTR_ERR(msg);
+		goto fail;
+	}
+
+	ret = cobalt_copy_to_user(u_buf, msg->data, msg->len);
+	if (ret) {
+		mq_finish_rcv(mqd, msg);
+		goto fail;
+	}
+
+	*lenp = msg->len;
+	prio = msg->prio;
+	ret = mq_finish_rcv(mqd, msg);
+	if (ret)
+		goto fail;
+
+	cobalt_mqd_put(mqd);
+
+	if (u_prio && __xn_put_user(prio, u_prio))
+		return -EFAULT;
+
+	return 0;
+fail:
+	cobalt_mqd_put(mqd);
+
+	return ret;
+}
+
+int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf,
+			       ssize_t *len,
+			       unsigned int __user *u_prio,
+			       const void __user *u_ts)
+{
+	return __cobalt_mq_timedreceive(uqd, u_buf, len, u_prio, u_ts,
+					u_ts ? mq_fetch_timeout64 : NULL);
+}
+
+COBALT_SYSCALL(mq_timedreceive, primary,
+	       (mqd_t uqd, void __user *u_buf,
+		ssize_t __user *u_len,
+		unsigned int __user *u_prio,
+		const struct __user_old_timespec __user *u_ts))
+{
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&len, u_len, sizeof(len));
+	if (ret)
+		return ret;
+
+	ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio,
+				       u_ts, u_ts ? mq_fetch_timeout : NULL);
+
+	return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len));
+}
+
+COBALT_SYSCALL(mq_timedreceive64, primary,
+	       (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		unsigned int __user *u_prio,
+		const struct __kernel_timespec __user *u_ts))
+{
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&len, u_len, sizeof(len));
+	if (ret)
+		return ret;
+
+	ret = __cobalt_mq_timedreceive64(uqd, u_buf, &len, u_prio, u_ts);
+
+	return ret ?: cobalt_copy_to_user(u_len, &len, sizeof(*u_len));
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h
new file mode 100644
index 0000000..d922386
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mqueue.h
@@ -0,0 +1,92 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_MQUEUE_H
+#define _COBALT_POSIX_MQUEUE_H
+
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <xenomai/posix/syscall.h>
+
+struct mq_attr {
+	long mq_flags;
+	long mq_maxmsg;
+	long mq_msgsize;
+	long mq_curmsgs;
+};
+
+int __cobalt_mq_open(const char __user *u_name, int oflags,
+		     mode_t mode, struct mq_attr *attr);
+
+int __cobalt_mq_getattr(mqd_t uqd, struct mq_attr *attr);
+
+int __cobalt_mq_timedsend(mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio, const void __user *u_ts,
+			  int (*fetch_timeout)(struct timespec64 *ts,
+					       const void __user *u_ts));
+
+int __cobalt_mq_timedsend64(mqd_t uqd, const void __user *u_buf, size_t len,
+			    unsigned int prio, const void __user *u_ts);
+
+int __cobalt_mq_timedreceive(mqd_t uqd, void __user *u_buf,
+			     ssize_t *lenp,
+			     unsigned int __user *u_prio,
+			     const void __user *u_ts,
+			     int (*fetch_timeout)(struct timespec64 *ts,
+						  const void __user *u_ts));
+
+int __cobalt_mq_timedreceive64(mqd_t uqd, void __user *u_buf,
+			       ssize_t *len,
+			       unsigned int __user *u_prio,
+			       const void __user *u_ts);
+
+int __cobalt_mq_notify(mqd_t fd, const struct sigevent *evp);
+
+COBALT_SYSCALL_DECL(mq_open,
+		    (const char __user *u_name, int oflags,
+		     mode_t mode, struct mq_attr __user *u_attr));
+
+COBALT_SYSCALL_DECL(mq_close, (mqd_t uqd));
+
+COBALT_SYSCALL_DECL(mq_unlink, (const char __user *u_name));
+
+COBALT_SYSCALL_DECL(mq_getattr, (mqd_t uqd, struct mq_attr __user *u_attr));
+
+COBALT_SYSCALL_DECL(mq_timedsend,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio, const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_timedsend64,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_timedreceive,
+		    (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_timedreceive64,
+		    (mqd_t uqd, void __user *u_buf, ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mq_notify,
+		    (mqd_t fd, const struct sigevent *__user evp));
+
+#endif /* !_COBALT_POSIX_MQUEUE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c
new file mode 100644
index 0000000..0f1c018
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.c
@@ -0,0 +1,446 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "internal.h"
+#include "thread.h"
+#include "mutex.h"
+#include "cond.h"
+#include "clock.h"
+#include <cobalt/kernel/time.h>
+
+static int cobalt_mutex_init_inner(struct cobalt_mutex_shadow *shadow,
+				   struct cobalt_mutex *mutex,
+				   struct cobalt_mutex_state *state,
+				   const struct cobalt_mutexattr *attr)
+{
+	int synch_flags = XNSYNCH_PRIO | XNSYNCH_OWNER;
+	struct cobalt_umm *umm;
+	spl_t s;
+	int ret;
+
+	ret = xnregistry_enter_anon(mutex, &mutex->resnode.handle);
+	if (ret < 0)
+		return ret;
+
+	umm = &cobalt_ppd_get(attr->pshared)->umm;
+	shadow->handle = mutex->resnode.handle;
+	shadow->magic = COBALT_MUTEX_MAGIC;
+	shadow->lockcnt = 0;
+	shadow->attr = *attr;
+	shadow->state_offset = cobalt_umm_offset(umm, state);
+
+	mutex->magic = COBALT_MUTEX_MAGIC;
+
+	if (attr->protocol == PTHREAD_PRIO_PROTECT) {
+		state->ceiling = attr->ceiling + 1;
+		xnsynch_init_protect(&mutex->synchbase, synch_flags,
+				     &state->owner, &state->ceiling);
+	} else {
+		state->ceiling = 0;
+		if (attr->protocol == PTHREAD_PRIO_INHERIT)
+			synch_flags |= XNSYNCH_PI;
+		xnsynch_init(&mutex->synchbase, synch_flags, &state->owner);
+	}
+
+	state->flags = (attr->type == PTHREAD_MUTEX_ERRORCHECK
+			? COBALT_MUTEX_ERRORCHECK : 0);
+	mutex->attr = *attr;
+	INIT_LIST_HEAD(&mutex->conds);
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_add_resource(&mutex->resnode, mutex, attr->pshared);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+/* must be called with nklock locked, interrupts off. */
+int __cobalt_mutex_acquire_unchecked(struct xnthread *cur,
+				     struct cobalt_mutex *mutex,
+				     const struct timespec64 *ts)
+{
+	int ret;
+
+	if (ts) {
+		if (!timespec64_valid(ts))
+			return -EINVAL;
+		ret = xnsynch_acquire(&mutex->synchbase, ts2ns(ts) + 1, XN_REALTIME);
+	} else
+		ret = xnsynch_acquire(&mutex->synchbase, XN_INFINITE, XN_RELATIVE);
+
+	if (ret) {
+		if (ret & XNBREAK)
+			return -EINTR;
+		if (ret & XNTIMEO)
+			return -ETIMEDOUT;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int cobalt_mutex_release(struct xnthread *curr,
+			 struct cobalt_mutex *mutex)
+{	/* nklock held, irqs off */
+	struct cobalt_mutex_state *state;
+	struct cobalt_cond *cond;
+	unsigned long flags;
+	int need_resched;
+
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex))
+		 return -EINVAL;
+
+	if (mutex->resnode.scope !=
+	    cobalt_current_resources(mutex->attr.pshared))
+		return -EPERM;
+
+	/*
+	 * We are about to release a mutex which is still pending PP
+	 * (i.e. we never got scheduled out while holding it). Clear
+	 * the lazy handle.
+	 */
+	if (mutex->resnode.handle == curr->u_window->pp_pending)
+		curr->u_window->pp_pending = XN_NO_HANDLE;
+
+	state = container_of(mutex->synchbase.fastlock, struct cobalt_mutex_state, owner);
+	flags = state->flags;
+	need_resched = 0;
+	if ((flags & COBALT_MUTEX_COND_SIGNAL)) {
+		state->flags = flags & ~COBALT_MUTEX_COND_SIGNAL;
+		if (!list_empty(&mutex->conds)) {
+			list_for_each_entry(cond, &mutex->conds, mutex_link)
+				need_resched |=
+				cobalt_cond_deferred_signals(cond);
+		}
+	}
+	need_resched |= xnsynch_release(&mutex->synchbase, curr);
+
+	return need_resched;
+}
+
+int __cobalt_mutex_timedlock_break(struct cobalt_mutex_shadow __user *u_mx,
+				   const void __user *u_ts,
+				   int (*fetch_timeout)(struct timespec64 *ts,
+							const void __user *u_ts))
+{
+	struct xnthread *curr = xnthread_current();
+	struct timespec64 ts, *tsp = NULL;
+	struct cobalt_mutex *mutex;
+	xnhandle_t handle;
+	spl_t s;
+	int ret;
+
+	/* We need a valid thread handle for the fast lock. */
+	if (curr->handle == XN_NO_HANDLE)
+		return -EPERM;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+redo:
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(handle, NULL);
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (mutex->resnode.scope !=
+	    cobalt_current_resources(mutex->attr.pshared)) {
+		ret = -EPERM;
+		goto out;
+	}
+
+	xnthread_commit_ceiling(curr);
+
+	if (xnsynch_owner_check(&mutex->synchbase, curr)) {
+		/* Check if we can take the mutex immediately */
+		ret = xnsynch_try_acquire(&mutex->synchbase);
+		if (ret != -EBUSY)
+			goto out;
+
+		if (fetch_timeout) {
+			xnlock_put_irqrestore(&nklock, s);
+			ret = fetch_timeout(&ts, u_ts);
+			if (ret)
+				return ret;
+
+			fetch_timeout = NULL;
+			tsp = &ts;
+			goto redo; /* Revalidate handle. */
+		}
+		ret = __cobalt_mutex_acquire_unchecked(curr, mutex, tsp);
+		xnlock_put_irqrestore(&nklock, s);
+		return ret;
+	}
+
+	/* We already own the mutex, something looks wrong. */
+
+	ret = -EBUSY;
+	switch(mutex->attr.type) {
+	case PTHREAD_MUTEX_NORMAL:
+		/* Attempting to relock a normal mutex, deadlock. */
+		if (IS_ENABLED(XENO_OPT_DEBUG_USER))
+			printk(XENO_WARNING
+			       "thread %s deadlocks on non-recursive mutex\n",
+			       curr->name);
+		/* Make the caller hang. */
+		__cobalt_mutex_acquire_unchecked(curr, mutex, NULL);
+		break;
+
+	case PTHREAD_MUTEX_ERRORCHECK:
+	case PTHREAD_MUTEX_RECURSIVE:
+		/*
+		 * Recursive mutexes are handled in user-space, so
+		 * these cases should never happen.
+		 */
+		ret = -EINVAL;
+		break;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(mutex_check_init, current,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct cobalt_mutex *mutex;
+	xnhandle_t handle;
+	int err;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+	mutex = xnregistry_lookup(handle, NULL);
+	if (cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex)))
+		/* mutex is already in a queue. */
+		err = -EBUSY;
+	else
+		err = 0;
+
+	xnlock_put_irqrestore(&nklock, s);
+	return err;
+}
+
+COBALT_SYSCALL(mutex_init, current,
+	       (struct cobalt_mutex_shadow __user *u_mx,
+		const struct cobalt_mutexattr __user *u_attr))
+{
+	struct cobalt_mutex_state *state;
+	struct cobalt_mutex_shadow mx;
+	struct cobalt_mutexattr attr;
+	struct cobalt_mutex *mutex;
+	int ret;
+
+	if (cobalt_copy_from_user(&mx, u_mx, sizeof(mx)))
+		return -EFAULT;
+
+	if (cobalt_copy_from_user(&attr, u_attr, sizeof(attr)))
+		return -EFAULT;
+
+	mutex = xnmalloc(sizeof(*mutex));
+	if (mutex == NULL)
+		return -ENOMEM;
+
+	state = cobalt_umm_alloc(&cobalt_ppd_get(attr.pshared)->umm,
+				 sizeof(*state));
+	if (state == NULL) {
+		xnfree(mutex);
+		return -EAGAIN;
+	}
+
+	ret = cobalt_mutex_init_inner(&mx, mutex, state, &attr);
+	if (ret) {
+		xnfree(mutex);
+		cobalt_umm_free(&cobalt_ppd_get(attr.pshared)->umm, state);
+		return ret;
+	}
+
+	return cobalt_copy_to_user(u_mx, &mx, sizeof(*u_mx));
+}
+
+COBALT_SYSCALL(mutex_destroy, current,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct cobalt_mutex_shadow mx;
+	struct cobalt_mutex *mutex;
+	spl_t s;
+	int ret;
+
+	if (cobalt_copy_from_user(&mx, u_mx, sizeof(mx)))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(mx.handle, NULL);
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex))) {
+		ret = -EINVAL;
+		goto fail;
+	}
+	if (cobalt_current_resources(mutex->attr.pshared) !=
+	    mutex->resnode.scope) {
+		ret = -EPERM;
+		goto fail;
+	}
+	if (xnsynch_fast_owner_check(mutex->synchbase.fastlock,
+					XN_NO_HANDLE) != 0 ||
+	    !list_empty(&mutex->conds)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	cobalt_mutex_reclaim(&mutex->resnode, s); /* drops lock */
+
+	cobalt_mark_deleted(&mx);
+
+	return cobalt_copy_to_user(u_mx, &mx, sizeof(*u_mx));
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(mutex_trylock, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct xnthread *curr = xnthread_current();
+	struct cobalt_mutex *mutex;
+	xnhandle_t handle;
+	spl_t s;
+	int ret;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(handle, NULL);
+	if (!cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, typeof(*mutex))) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	xnthread_commit_ceiling(curr);
+
+	ret = xnsynch_try_acquire(&mutex->synchbase);
+
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(mutex_lock, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	return __cobalt_mutex_timedlock_break(u_mx, NULL, NULL);
+}
+
+static inline int mutex_fetch_timeout(struct timespec64 *ts,
+				      const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts);
+}
+
+static inline int mutex_fetch_timeout64(struct timespec64 *ts,
+					const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts);
+}
+
+int __cobalt_mutex_timedlock64(struct cobalt_mutex_shadow __user *u_mx,
+			       const void __user *u_ts)
+{
+	return __cobalt_mutex_timedlock_break(u_mx, u_ts,
+					      mutex_fetch_timeout64);
+}
+
+COBALT_SYSCALL(mutex_timedlock, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx,
+		const struct __user_old_timespec __user *u_ts))
+{
+	return __cobalt_mutex_timedlock_break(u_mx, u_ts, mutex_fetch_timeout);
+}
+
+COBALT_SYSCALL(mutex_timedlock64, primary,
+	       (struct cobalt_mutex_shadow __user *u_mx,
+		const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mutex_timedlock64(u_mx, u_ts);
+}
+
+COBALT_SYSCALL(mutex_unlock, nonrestartable,
+	       (struct cobalt_mutex_shadow __user *u_mx))
+{
+	struct cobalt_mutex *mutex;
+	struct xnthread *curr;
+	xnhandle_t handle;
+	int ret;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_mx->handle);
+	curr = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	mutex = xnregistry_lookup(handle, NULL);
+	ret = cobalt_mutex_release(curr, mutex);
+	if (ret > 0) {
+		xnsched_run();
+		ret = 0;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+void cobalt_mutex_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_mutex_state *state;
+	struct cobalt_mutex *mutex;
+	int pshared;
+
+	mutex = container_of(node, struct cobalt_mutex, resnode);
+	state = container_of(mutex->synchbase.fastlock, struct cobalt_mutex_state, owner);
+	pshared = mutex->attr.pshared;
+	xnregistry_remove(node->handle);
+	cobalt_del_resource(node);
+	xnsynch_destroy(&mutex->synchbase);
+	cobalt_mark_deleted(mutex);
+	xnlock_put_irqrestore(&nklock, s);
+
+	cobalt_umm_free(&cobalt_ppd_get(pshared)->umm, state);
+	xnfree(mutex);
+}
+
+struct xnsynch *lookup_lazy_pp(xnhandle_t handle)
+{				/* nklock held, irqs off */
+	struct cobalt_mutex *mutex;
+
+	/* Only mutexes may be PP-enabled. */
+	
+	mutex = xnregistry_lookup(handle, NULL);
+	if (mutex == NULL ||
+	    !cobalt_obj_active(mutex, COBALT_MUTEX_MAGIC, struct cobalt_mutex) ||
+	    mutex->attr.protocol != PTHREAD_PRIO_PROTECT)
+		return NULL;
+
+	return &mutex->synchbase;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h
new file mode 100644
index 0000000..d7fede2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/mutex.h
@@ -0,0 +1,83 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _COBALT_POSIX_MUTEX_H
+#define _COBALT_POSIX_MUTEX_H
+
+#include "thread.h"
+#include <cobalt/uapi/mutex.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_process;
+
+struct cobalt_mutex {
+	unsigned int magic;
+	struct xnsynch synchbase;
+	/** cobalt_mutexq */
+	struct list_head conds;
+	struct cobalt_mutexattr attr;
+	struct cobalt_resnode resnode;
+};
+
+int __cobalt_mutex_timedlock_break(struct cobalt_mutex_shadow __user *u_mx,
+				   const void __user *u_ts,
+				   int (*fetch_timeout)(struct timespec64 *ts,
+							const void __user *u_ts));
+
+int __cobalt_mutex_timedlock64(struct cobalt_mutex_shadow __user *u_mx,
+				   const void __user *u_ts);
+
+int __cobalt_mutex_acquire_unchecked(struct xnthread *cur,
+				     struct cobalt_mutex *mutex,
+				     const struct timespec64 *ts);
+
+COBALT_SYSCALL_DECL(mutex_check_init,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_init,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct cobalt_mutexattr __user *u_attr));
+
+COBALT_SYSCALL_DECL(mutex_destroy,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_trylock,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_lock,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+COBALT_SYSCALL_DECL(mutex_timedlock,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mutex_timedlock64,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(mutex_unlock,
+		    (struct cobalt_mutex_shadow __user *u_mx));
+
+int cobalt_mutex_release(struct xnthread *cur,
+			 struct cobalt_mutex *mutex);
+
+void cobalt_mutex_reclaim(struct cobalt_resnode *node,
+			  spl_t s);
+
+#endif /* !_COBALT_POSIX_MUTEX_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c
new file mode 100644
index 0000000..89cf62b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/nsem.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/list.h>
+#include <linux/err.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/tree.h>
+#include "internal.h"
+#include "sem.h"
+#include "thread.h"
+#include <trace/events/cobalt-posix.h>
+
+DEFINE_PRIVATE_XNLOCK(named_sem_lock);
+
+struct cobalt_named_sem {
+	struct cobalt_sem *sem;
+	struct cobalt_sem_shadow __user *usem;
+	unsigned int refs;
+	struct xnid id;
+};
+
+static struct cobalt_named_sem *
+sem_search(struct cobalt_process *process, xnhandle_t handle)
+{
+	struct xnid *i;
+
+	i = xnid_fetch(&process->usems, handle);
+	if (i == NULL)
+		return NULL;
+
+	return container_of(i, struct cobalt_named_sem, id);
+}
+
+static struct cobalt_sem_shadow __user *
+sem_open(struct cobalt_process *process,
+	 struct cobalt_sem_shadow __user *ushadow,
+	 struct filename *filename, int oflags, mode_t mode,
+	 unsigned int value)
+{
+	const char *name = filename->name;
+	struct cobalt_sem_shadow shadow;
+	struct cobalt_named_sem *u, *v;
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	spl_t s;
+	int rc;
+
+	if (name[0] != '/' || name[1] == '\0')
+		return ERR_PTR(-EINVAL);
+
+  retry_bind:
+	rc = xnregistry_bind(&name[1], XN_NONBLOCK, XN_RELATIVE, &handle);
+	switch (rc) {
+	case 0:
+		/* Found */
+		if ((oflags & (O_CREAT | O_EXCL)) == (O_CREAT | O_EXCL))
+			return ERR_PTR(-EEXIST);
+
+		xnlock_get_irqsave(&named_sem_lock, s);
+		u = sem_search(process, handle);
+		if (u) {
+			++u->refs;
+			xnlock_put_irqrestore(&named_sem_lock, s);
+			return u->usem;
+		}
+		xnlock_put_irqrestore(&named_sem_lock, s);
+
+		xnlock_get_irqsave(&nklock, s);
+		sem = xnregistry_lookup(handle, NULL);
+		if (sem && sem->magic != COBALT_SEM_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return ERR_PTR(-EINVAL);
+		}
+
+		if (sem) {
+			++sem->refs;
+			xnlock_put_irqrestore(&nklock, s);
+		} else {
+			xnlock_put_irqrestore(&nklock, s);
+			goto retry_bind;
+		}
+
+		__cobalt_sem_shadow_init(sem, COBALT_NAMED_SEM_MAGIC, &shadow);
+		break;
+
+	case -EWOULDBLOCK:
+		/* Not found */
+		if ((oflags & O_CREAT) == 0)
+			return ERR_PTR(-ENOENT);
+
+		shadow.magic = 0;
+		sem = __cobalt_sem_init(&name[1], &shadow,
+					SEM_PSHARED | SEM_NAMED, value);
+		if (IS_ERR(sem)) {
+			rc = PTR_ERR(sem);
+			if (rc == -EEXIST)
+				goto retry_bind;
+			return ERR_PTR(rc);
+		}
+
+		sem->pathname = filename;
+		handle = shadow.handle;
+		break;
+
+	default:
+		return ERR_PTR(rc);
+	}
+
+	if (cobalt_copy_to_user(ushadow, &shadow, sizeof(shadow))) {
+		__cobalt_sem_destroy(handle);
+		return ERR_PTR(-EFAULT);
+	}
+
+	u = xnmalloc(sizeof(*u));
+	if (u == NULL) {
+		__cobalt_sem_destroy(handle);
+		return ERR_PTR(-ENOMEM);
+	}
+
+	u->sem = sem;
+	u->usem = ushadow;
+	u->refs = 1;
+
+	xnlock_get_irqsave(&named_sem_lock, s);
+	v = sem_search(process, handle);
+	if (v) {
+		++v->refs;
+		xnlock_put_irqrestore(&named_sem_lock, s);
+		xnlock_get_irqsave(&nklock, s);
+		--sem->refs;
+		xnlock_put_irqrestore(&nklock, s);
+		putname(filename);
+		xnfree(u);
+		u = v;
+	} else {
+		xnid_enter(&process->usems, &u->id, handle);
+		xnlock_put_irqrestore(&named_sem_lock, s);
+	}
+
+	trace_cobalt_psem_open(name, handle, oflags, mode, value);
+
+	return u->usem;
+}
+
+static int sem_close(struct cobalt_process *process, xnhandle_t handle)
+{
+	struct cobalt_named_sem *u;
+	spl_t s;
+	int err;
+
+	xnlock_get_irqsave(&named_sem_lock, s);
+	u = sem_search(process, handle);
+	if (u == NULL) {
+		err = -ENOENT;
+		goto err_unlock;
+	}
+
+	if (--u->refs) {
+		err = 0;
+		goto err_unlock;
+	}
+
+	xnid_remove(&process->usems, &u->id);
+	xnlock_put_irqrestore(&named_sem_lock, s);
+
+	__cobalt_sem_destroy(handle);
+
+	xnfree(u);
+	return 1;
+
+  err_unlock:
+	xnlock_put_irqrestore(&named_sem_lock, s);
+	return err;
+}
+
+struct cobalt_sem_shadow __user *
+__cobalt_sem_open(struct cobalt_sem_shadow __user *usm,
+		  const char __user *u_name,
+		  int oflags, mode_t mode, unsigned int value)
+{
+	struct cobalt_process *process;
+	struct filename *filename;
+
+	process = cobalt_current_process();
+	if (process == NULL)
+		return ERR_PTR(-EPERM);
+
+	filename = getname(u_name);
+	if (IS_ERR(filename))
+		return ERR_CAST(filename);
+
+	usm = sem_open(process, usm, filename, oflags, mode, value);
+	if (IS_ERR(usm)) {
+		trace_cobalt_psem_open_failed(filename->name, oflags, mode,
+					      value, PTR_ERR(usm));
+		putname(filename);
+	}
+
+	return usm;
+}
+
+COBALT_SYSCALL(sem_open, lostage,
+	       (struct cobalt_sem_shadow __user *__user *u_addrp,
+		const char __user *u_name,
+		int oflags, mode_t mode, unsigned int value))
+{
+	struct cobalt_sem_shadow __user *usm;
+
+	if (__xn_get_user(usm, u_addrp))
+		return -EFAULT;
+
+	usm = __cobalt_sem_open(usm, u_name, oflags, mode, value);
+	if (IS_ERR(usm))
+		return PTR_ERR(usm);
+
+	return __xn_put_user(usm, u_addrp) ? -EFAULT : 0;
+}
+
+COBALT_SYSCALL(sem_close, lostage,
+	       (struct cobalt_sem_shadow __user *usm))
+{
+	struct cobalt_process *process;
+	xnhandle_t handle;
+
+	process = cobalt_current_process();
+	if (process == NULL)
+		return -EPERM;
+
+	handle = cobalt_get_handle_from_user(&usm->handle);
+	trace_cobalt_psem_close(handle);
+
+	return sem_close(process, handle);
+}
+
+static inline int sem_unlink(const char *name)
+{
+	xnhandle_t handle;
+	int ret;
+
+	if (name[0] != '/')
+		return -EINVAL;
+
+	ret = xnregistry_bind(name + 1, XN_NONBLOCK, XN_RELATIVE, &handle);
+	if (ret == -EWOULDBLOCK)
+		return -ENOENT;
+
+	if (__cobalt_sem_destroy(handle) == -EBUSY)
+		xnregistry_unlink(xnregistry_key(handle));
+
+	return 0;
+}
+
+COBALT_SYSCALL(sem_unlink, lostage,
+	       (const char __user *u_name))
+{
+	struct filename *filename;
+	int ret;
+
+	filename = getname(u_name);
+	if (IS_ERR(filename))
+		return PTR_ERR(filename);
+
+	trace_cobalt_psem_unlink(filename->name);
+	ret = sem_unlink(filename->name);
+	putname(filename);
+
+	return ret;
+}
+
+static void reclaim_named_sem(void *arg, struct xnid *i)
+{
+	struct cobalt_process *process = arg;
+	struct cobalt_named_sem *u;
+
+	u = container_of(i, struct cobalt_named_sem, id);
+	u->refs = 1;
+	sem_close(process, xnid_key(i));
+}
+
+void cobalt_nsem_reclaim(struct cobalt_process *process)
+{
+	xntree_cleanup(&process->usems, process, reclaim_named_sem);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c
new file mode 100644
index 0000000..935007f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.c
@@ -0,0 +1,1203 @@
+/*
+ * Copyright (C) 2001-2014 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2001-2014 The Xenomai project <http://www.xenomai.org>
+ * Copyright (C) 2006 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ * RTAI/fusion Copyright (C) 2004 The RTAI project <http://www.rtai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/unistd.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/anon_inodes.h>
+#include <linux/mman.h>
+#include <linux/mm.h>
+#include <linux/slab.h>
+#include <linux/cred.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/signal.h>
+#include <pipeline/kevents.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/signal.h>
+#include <cobalt/uapi/syscall.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+#include <rtdm/driver.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/syscall.h>
+#include "../debug.h"
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "mutex.h"
+#include "cond.h"
+#include "mqueue.h"
+#include "sem.h"
+#include "signal.h"
+#include "timer.h"
+#include "monitor.h"
+#include "clock.h"
+#include "event.h"
+#include "timerfd.h"
+#include "io.h"
+
+static int gid_arg = -1;
+module_param_named(allowed_group, gid_arg, int, 0644);
+
+static DEFINE_MUTEX(personality_lock);
+
+static struct hlist_head *process_hash;
+DEFINE_PRIVATE_XNLOCK(process_hash_lock);
+#define PROCESS_HASH_SIZE 13
+
+struct xnthread_personality *cobalt_personalities[NR_PERSONALITIES];
+
+static struct xnsynch yield_sync;
+
+LIST_HEAD(cobalt_global_thread_list);
+
+DEFINE_XNPTREE(posix_ptree, "posix");
+
+struct cobalt_resources cobalt_global_resources = {
+	.condq = LIST_HEAD_INIT(cobalt_global_resources.condq),
+	.mutexq = LIST_HEAD_INIT(cobalt_global_resources.mutexq),
+	.semq = LIST_HEAD_INIT(cobalt_global_resources.semq),
+	.monitorq = LIST_HEAD_INIT(cobalt_global_resources.monitorq),
+	.eventq = LIST_HEAD_INIT(cobalt_global_resources.eventq),
+	.schedq = LIST_HEAD_INIT(cobalt_global_resources.schedq),
+};
+
+static unsigned __attribute__((pure)) process_hash_crunch(struct mm_struct *mm)
+{
+	unsigned long hash = ((unsigned long)mm - PAGE_OFFSET) / sizeof(*mm);
+	return hash % PROCESS_HASH_SIZE;
+}
+
+static struct cobalt_process *__process_hash_search(struct mm_struct *mm)
+{
+	unsigned int bucket = process_hash_crunch(mm);
+	struct cobalt_process *p;
+
+	hlist_for_each_entry(p, &process_hash[bucket], hlink)
+		if (p->mm == mm)
+			return p;
+	
+	return NULL;
+}
+
+static int process_hash_enter(struct cobalt_process *p)
+{
+	struct mm_struct *mm = current->mm;
+	unsigned int bucket = process_hash_crunch(mm);
+	int err;
+	spl_t s;
+
+	xnlock_get_irqsave(&process_hash_lock, s);
+	if (__process_hash_search(mm)) {
+		err = -EBUSY;
+		goto out;
+	}
+
+	p->mm = mm;
+	hlist_add_head(&p->hlink, &process_hash[bucket]);
+	err = 0;
+  out:
+	xnlock_put_irqrestore(&process_hash_lock, s);
+	return err;
+}
+
+static void process_hash_remove(struct cobalt_process *p)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&process_hash_lock, s);
+	if (p->mm)
+		hlist_del(&p->hlink);
+	xnlock_put_irqrestore(&process_hash_lock, s);
+}
+
+struct cobalt_process *cobalt_search_process(struct mm_struct *mm)
+{
+	struct cobalt_process *process;
+	spl_t s;
+	
+	xnlock_get_irqsave(&process_hash_lock, s);
+	process = __process_hash_search(mm);
+	xnlock_put_irqrestore(&process_hash_lock, s);
+	
+	return process;
+}
+
+static void *lookup_context(int xid)
+{
+	struct cobalt_process *process = cobalt_current_process();
+	void *priv = NULL;
+	spl_t s;
+
+	xnlock_get_irqsave(&process_hash_lock, s);
+	/*
+	 * First try matching the process context attached to the
+	 * (usually main) thread which issued sc_cobalt_bind. If not
+	 * found, try matching by mm context, which should point us
+	 * back to the latter. If none match, then the current process
+	 * is unbound.
+	 */
+	if (process == NULL && current->mm)
+		process = __process_hash_search(current->mm);
+	if (process)
+		priv = process->priv[xid];
+
+	xnlock_put_irqrestore(&process_hash_lock, s);
+
+	return priv;
+}
+
+void cobalt_remove_process(struct cobalt_process *process)
+{
+	struct xnthread_personality *personality;
+	void *priv;
+	int xid;
+
+	mutex_lock(&personality_lock);
+
+	for (xid = NR_PERSONALITIES - 1; xid >= 0; xid--) {
+		if (!__test_and_clear_bit(xid, &process->permap))
+			continue;
+		personality = cobalt_personalities[xid];
+		priv = process->priv[xid];
+		if (priv == NULL)
+			continue;
+		/*
+		 * CAUTION: process potentially refers to stale memory
+		 * upon return from detach_process() for the Cobalt
+		 * personality, so don't dereference it afterwards.
+		 */
+		if (xid)
+			process->priv[xid] = NULL;
+		__clear_bit(personality->xid, &process->permap);
+		personality->ops.detach_process(priv);
+		atomic_dec(&personality->refcnt);
+		XENO_WARN_ON(COBALT, atomic_read(&personality->refcnt) < 0);
+		if (personality->module)
+			module_put(personality->module);
+	}
+
+	cobalt_set_process(NULL);
+
+	mutex_unlock(&personality_lock);
+}
+
+static void post_ppd_release(struct cobalt_umm *umm)
+{
+	struct cobalt_process *process;
+
+	process = container_of(umm, struct cobalt_process, sys_ppd.umm);
+	kfree(process);
+}
+
+static inline char *get_exe_path(struct task_struct *p)
+{
+	struct file *exe_file;
+	char *pathname, *buf;
+	struct mm_struct *mm;
+	struct path path;
+
+	/*
+	 * PATH_MAX is fairly large, and in any case won't fit on the
+	 * caller's stack happily; since we are mapping a shadow,
+	 * which is a heavyweight operation anyway, let's pick the
+	 * memory from the page allocator.
+	 */
+	buf = (char *)__get_free_page(GFP_KERNEL);
+	if (buf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	mm = get_task_mm(p);
+	if (mm == NULL) {
+		pathname = "vmlinux";
+		goto copy;	/* kernel thread */
+	}
+
+	exe_file = get_mm_exe_file(mm);
+	mmput(mm);
+	if (exe_file == NULL) {
+		pathname = ERR_PTR(-ENOENT);
+		goto out;	/* no luck. */
+	}
+
+	path = exe_file->f_path;
+	path_get(&exe_file->f_path);
+	fput(exe_file);
+	pathname = d_path(&path, buf, PATH_MAX);
+	path_put(&path);
+	if (IS_ERR(pathname))
+		goto out;	/* mmmh... */
+copy:
+	/* caution: d_path() may start writing anywhere in the buffer. */
+	pathname = kstrdup(pathname, GFP_KERNEL);
+out:
+	free_page((unsigned long)buf);
+
+	return pathname;
+}
+
+static inline int raise_cap(int cap)
+{
+	struct cred *new;
+
+	new = prepare_creds();
+	if (new == NULL)
+		return -ENOMEM;
+
+	cap_raise(new->cap_effective, cap);
+
+	return commit_creds(new);
+}
+
+static int bind_personality(struct xnthread_personality *personality)
+{
+	struct cobalt_process *process;
+	void *priv;
+
+	/*
+	 * We also check capabilities for stacking a Cobalt extension,
+	 * in case the process dropped the supervisor privileges after
+	 * a successful initial binding to the Cobalt interface.
+	 */
+	if (!capable(CAP_SYS_NICE) &&
+	    (gid_arg == -1 || !in_group_p(KGIDT_INIT(gid_arg))))
+		return -EPERM;
+	/*
+	 * Protect from the same process binding to the same interface
+	 * several times.
+	 */
+	priv = lookup_context(personality->xid);
+	if (priv)
+		return 0;
+
+	priv = personality->ops.attach_process();
+	if (IS_ERR(priv))
+		return PTR_ERR(priv);
+
+	process = cobalt_current_process();
+	/*
+	 * We are still covered by the personality_lock, so we may
+	 * safely bump the module refcount after the attach handler
+	 * has returned.
+	 */
+	if (personality->module && !try_module_get(personality->module)) {
+		personality->ops.detach_process(priv);
+		return -EAGAIN;
+	}
+
+	__set_bit(personality->xid, &process->permap);
+	atomic_inc(&personality->refcnt);
+	process->priv[personality->xid] = priv;
+
+	raise_cap(CAP_SYS_NICE);
+	raise_cap(CAP_IPC_LOCK);
+	raise_cap(CAP_SYS_RAWIO);
+
+	return 0;
+}
+
+int cobalt_bind_personality(unsigned int magic)
+{
+	struct xnthread_personality *personality;
+	int xid, ret = -ESRCH;
+
+	mutex_lock(&personality_lock);
+
+	for (xid = 1; xid < NR_PERSONALITIES; xid++) {
+		personality = cobalt_personalities[xid];
+		if (personality && personality->magic == magic) {
+			ret = bind_personality(personality);
+			break;
+		}
+	}
+
+	mutex_unlock(&personality_lock);
+
+	return ret ?: xid;
+}
+
+int cobalt_bind_core(int ufeatures)
+{
+	struct cobalt_process *process;
+	int ret;
+
+	mutex_lock(&personality_lock);
+	ret = bind_personality(&cobalt_personality);
+	mutex_unlock(&personality_lock);
+	if (ret)
+		return ret;
+
+	process = cobalt_current_process();
+	/* Feature set userland knows about. */
+	process->ufeatures = ufeatures;
+
+	return 0;
+}
+
+/**
+ * @fn int cobalt_register_personality(struct xnthread_personality *personality)
+ * @internal
+ * @brief Register a new interface personality.
+ *
+ * - personality->ops.attach_process() is called when a user-space
+ *   process binds to the personality, on behalf of one of its
+ *   threads. The attach_process() handler may return:
+ *
+ *   . an opaque pointer, representing the context of the calling
+ *   process for this personality;
+ *
+ *   . a NULL pointer, meaning that no per-process structure should be
+ *   attached to this process for this personality;
+ *
+ *   . ERR_PTR(negative value) indicating an error, the binding
+ *   process will then abort.
+ *
+ * - personality->ops.detach_process() is called on behalf of an
+ *   exiting user-space process which has previously attached to the
+ *   personality. This handler is passed a pointer to the per-process
+ *   data received earlier from the ops->attach_process() handler.
+ *
+ * @return the personality (extension) identifier.
+ *
+ * @note cobalt_get_context() is NULL when ops.detach_process() is
+ * invoked for the personality the caller detaches from.
+ *
+ * @coretags{secondary-only}
+ */
+int cobalt_register_personality(struct xnthread_personality *personality)
+{
+	int xid;
+
+	mutex_lock(&personality_lock);
+
+	for (xid = 0; xid < NR_PERSONALITIES; xid++) {
+		if (cobalt_personalities[xid] == NULL) {
+			personality->xid = xid;
+			atomic_set(&personality->refcnt, 0);
+			cobalt_personalities[xid] = personality;
+			goto out;
+		}
+	}
+
+	xid = -EAGAIN;
+out:
+	mutex_unlock(&personality_lock);
+
+	return xid;
+}
+EXPORT_SYMBOL_GPL(cobalt_register_personality);
+
+/*
+ * @brief Unregister an interface personality.
+ *
+ * @coretags{secondary-only}
+ */
+int cobalt_unregister_personality(int xid)
+{
+	struct xnthread_personality *personality;
+	int ret = 0;
+
+	if (xid < 0 || xid >= NR_PERSONALITIES)
+		return -EINVAL;
+
+	mutex_lock(&personality_lock);
+
+	personality = cobalt_personalities[xid];
+	if (atomic_read(&personality->refcnt) > 0)
+		ret = -EBUSY;
+	else
+		cobalt_personalities[xid] = NULL;
+
+	mutex_unlock(&personality_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(cobalt_unregister_personality);
+
+/**
+ * Stack a new personality over Cobalt for the current thread.
+ *
+ * This service registers the current thread as a member of the
+ * additional personality identified by @a xid. If the current thread
+ * is already assigned this personality, the call returns successfully
+ * with no effect.
+ *
+ * @param xid the identifier of the additional personality.
+ *
+ * @return A handle to the previous personality. The caller should
+ * save this handle for unstacking @a xid when applicable via a call
+ * to cobalt_pop_personality().
+ *
+ * @coretags{secondary-only}
+ */
+struct xnthread_personality *
+cobalt_push_personality(int xid)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+	struct xnthread_personality *prev, *next;
+	struct xnthread *thread = p->thread;
+
+	secondary_mode_only();
+
+	mutex_lock(&personality_lock);
+
+	if (xid < 0 || xid >= NR_PERSONALITIES ||
+	    p->process == NULL || !test_bit(xid, &p->process->permap)) {
+		mutex_unlock(&personality_lock);
+		return NULL;
+	}
+
+	next = cobalt_personalities[xid];
+	prev = thread->personality;
+	if (next == prev) {
+		mutex_unlock(&personality_lock);
+		return prev;
+	}
+
+	thread->personality = next;
+	mutex_unlock(&personality_lock);
+	xnthread_run_handler(thread, map_thread);
+
+	return prev;
+}
+EXPORT_SYMBOL_GPL(cobalt_push_personality);
+
+/**
+ * Pop the topmost personality from the current thread.
+ *
+ * This service pops the topmost personality off the current thread.
+ *
+ * @param prev the previous personality which was returned by the
+ * latest call to cobalt_push_personality() for the current thread.
+ *
+ * @coretags{secondary-only}
+ */
+void cobalt_pop_personality(struct xnthread_personality *prev)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+	struct xnthread *thread = p->thread;
+
+	secondary_mode_only();
+	thread->personality = prev;
+}
+EXPORT_SYMBOL_GPL(cobalt_pop_personality);
+
+/**
+ * Return the per-process data attached to the calling user process.
+ *
+ * This service returns the per-process data attached to the calling
+ * user process for the personality whose xid is @a xid.
+ *
+ * The per-process data was obtained from the ->attach_process()
+ * handler defined for the personality @a xid refers to.
+ *
+ * See cobalt_register_personality() documentation for information on
+ * the way to attach a per-process data to a process.
+ *
+ * @param xid the personality identifier.
+ *
+ * @return the per-process data if the current context is a user-space
+ * process; @return NULL otherwise. As a special case,
+ * cobalt_get_context(0) returns the current Cobalt process
+ * descriptor, which is strictly identical to calling
+ * cobalt_current_process().
+ *
+ * @coretags{task-unrestricted}
+ */
+void *cobalt_get_context(int xid)
+{
+	return lookup_context(xid);
+}
+EXPORT_SYMBOL_GPL(cobalt_get_context);
+
+int cobalt_yield(xnticks_t min, xnticks_t max)
+{
+	xnticks_t start;
+	int ret;
+
+	start = xnclock_read_monotonic(&nkclock);
+	max += start;
+	min += start;
+
+	do {
+		ret = xnsynch_sleep_on(&yield_sync, max, XN_ABSOLUTE);
+		if (ret & XNBREAK)
+			return -EINTR;
+	} while (ret == 0 && xnclock_read_monotonic(&nkclock) < min);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cobalt_yield);
+
+/**
+ * @fn int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff)
+ * @internal
+ * @brief Create a shadow thread context over a user task.
+ *
+ * This call maps a Xenomai thread to the current regular Linux task
+ * running in userland.  The priority and scheduling class of the
+ * underlying Linux task are not affected; it is assumed that the
+ * interface library did set them appropriately before issuing the
+ * shadow mapping request.
+ *
+ * @param thread The descriptor address of the new shadow thread to be
+ * mapped to current. This descriptor must have been previously
+ * initialized by a call to xnthread_init().
+ *
+ * @param u_winoff will receive the offset of the per-thread
+ * "u_window" structure in the global heap associated to @a
+ * thread. This structure reflects thread state information visible
+ * from userland through a shared memory window.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -EINVAL is returned if the thread control block does not bear the
+ * XNUSER bit.
+ *
+ * - -EBUSY is returned if either the current Linux task or the
+ * associated shadow thread is already involved in a shadow mapping.
+ *
+ * @coretags{secondary-only}
+ */
+int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff)
+{
+	struct xnthread_user_window *u_window;
+	struct xnthread_start_attr attr;
+	struct cobalt_ppd *sys_ppd;
+	struct cobalt_umm *umm;
+	int ret;
+
+	if (!xnthread_test_state(thread, XNUSER))
+		return -EINVAL;
+
+	if (xnthread_current() || xnthread_test_state(thread, XNMAPPED))
+		return -EBUSY;
+
+	if (!access_wok(u_winoff, sizeof(*u_winoff)))
+		return -EFAULT;
+
+	ret = pipeline_prepare_current();
+	if (ret)
+		return ret;
+
+	umm = &cobalt_kernel_ppd.umm;
+	u_window = cobalt_umm_zalloc(umm, sizeof(*u_window));
+	if (u_window == NULL)
+		return -ENOMEM;
+
+	thread->u_window = u_window;
+	__xn_put_user(cobalt_umm_offset(umm, u_window), u_winoff);
+	xnthread_pin_initial(thread);
+
+	/*
+	 * CAUTION: we enable the pipeline notifier only when our
+	 * shadow TCB is consistent, so that we won't trigger false
+	 * positive in debug code from handle_schedule_event() and
+	 * friends.
+	 */
+	pipeline_init_shadow_tcb(thread);
+	xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
+	pipeline_attach_current(thread);
+	xnthread_set_state(thread, XNMAPPED);
+	xndebug_shadow_init(thread);
+	sys_ppd = cobalt_ppd_get(0);
+	atomic_inc(&sys_ppd->refcnt);
+	/*
+	 * ->map_thread() handler is invoked after the TCB is fully
+	 * built, and when we know for sure that current will go
+	 * through our task-exit handler, because it has a shadow
+	 * extension and I-pipe notifications will soon be enabled for
+	 * it.
+	 */
+	xnthread_run_handler(thread, map_thread);
+	pipeline_enable_kevents();
+
+	attr.mode = 0;
+	attr.entry = NULL;
+	attr.cookie = NULL;
+	ret = xnthread_start(thread, &attr);
+	if (ret)
+		return ret;
+
+	xnthread_sync_window(thread);
+
+	xntrace_pid(xnthread_host_pid(thread),
+		    xnthread_current_priority(thread));
+
+	return 0;
+}
+
+void cobalt_signal_yield(void)
+{
+	spl_t s;
+
+	if (!xnsynch_pended_p(&yield_sync))
+		return;
+
+	xnlock_get_irqsave(&nklock, s);
+	if (xnsynch_pended_p(&yield_sync)) {
+		xnsynch_flush(&yield_sync, 0);
+		xnsched_run();
+	}
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+static inline struct cobalt_process *
+process_from_thread(struct xnthread *thread)
+{
+	return container_of(thread, struct cobalt_thread, threadbase)->process;
+}
+
+void cobalt_stop_debugged_process(struct xnthread *thread)
+{
+	struct cobalt_process *process = process_from_thread(thread);
+	struct cobalt_thread *cth;
+
+	if (process->debugged_threads > 0)
+		return;
+
+	list_for_each_entry(cth, &process->thread_list, next) {
+		if (&cth->threadbase == thread)
+			continue;
+
+		xnthread_suspend(&cth->threadbase, XNDBGSTOP, XN_INFINITE,
+				 XN_RELATIVE, NULL);
+	}
+}
+
+static void cobalt_resume_debugged_process(struct cobalt_process *process)
+{
+	struct cobalt_thread *cth;
+
+	xnsched_lock();
+
+	list_for_each_entry(cth, &process->thread_list, next)
+		if (xnthread_test_state(&cth->threadbase, XNDBGSTOP))
+			xnthread_resume(&cth->threadbase, XNDBGSTOP);
+
+	xnsched_unlock();
+}
+
+/* called with nklock held */
+void cobalt_register_debugged_thread(struct xnthread *thread)
+{
+	struct cobalt_process *process = process_from_thread(thread);
+
+	xnthread_set_state(thread, XNSSTEP);
+
+	cobalt_stop_debugged_process(thread);
+	process->debugged_threads++;
+
+	if (xnthread_test_state(thread, XNRELAX))
+		xnthread_suspend(thread, XNDBGSTOP, XN_INFINITE, XN_RELATIVE,
+				 NULL);
+}
+
+/* called with nklock held */
+void cobalt_unregister_debugged_thread(struct xnthread *thread)
+{
+	struct cobalt_process *process = process_from_thread(thread);
+
+	process->debugged_threads--;
+	xnthread_clear_state(thread, XNSSTEP);
+
+	if (process->debugged_threads == 0)
+		cobalt_resume_debugged_process(process);
+}
+
+int cobalt_handle_setaffinity_event(struct task_struct *task)
+{
+#ifdef CONFIG_SMP
+	struct xnthread *thread;
+	spl_t s;
+
+	thread = xnthread_from_task(task);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	/*
+	 * Detect a Cobalt thread sleeping in primary mode which is
+	 * required to migrate to another CPU by the host kernel.
+	 *
+	 * We may NOT fix up thread->sched immediately using the
+	 * passive migration call, because that latter always has to
+	 * take place on behalf of the target thread itself while
+	 * running in secondary mode. Therefore, that thread needs to
+	 * go through secondary mode first, then move back to primary
+	 * mode, so that affinity_ok() does the fixup work.
+	 *
+	 * We force this by sending a SIGSHADOW signal to the migrated
+	 * thread, asking it to switch back to primary mode from the
+	 * handler, at which point the interrupted syscall may be
+	 * restarted.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(thread, XNTHREAD_BLOCK_BITS & ~XNRELAX))
+		__xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HARDEN);
+
+	xnlock_put_irqrestore(&nklock, s);
+#endif /* CONFIG_SMP */
+
+	return KEVENT_PROPAGATE;
+}
+
+#ifdef CONFIG_SMP
+void cobalt_adjust_affinity(struct task_struct *task) /* nklocked, IRQs off */
+{
+	struct xnthread *thread = xnthread_from_task(task);
+	struct xnsched *sched;
+	int cpu = task_cpu(task);
+
+	/*
+	 * To maintain consistency between both Cobalt and host
+	 * schedulers, reflecting a thread migration to another CPU
+	 * into the Cobalt scheduler state must happen from secondary
+	 * mode only, on behalf of the migrated thread itself once it
+	 * runs on the target CPU.
+	 *
+	 * This means that the Cobalt scheduler state regarding the
+	 * CPU information lags behind the host scheduler state until
+	 * the migrated thread switches back to primary mode
+	 * (i.e. task_cpu(p) != xnsched_cpu(xnthread_from_task(p)->sched)).
+	 * This is ok since Cobalt does not schedule such thread until then.
+	 *
+	 * check_affinity() detects when a Cobalt thread switching
+	 * back to primary mode did move to another CPU earlier while
+	 * in secondary mode. If so, do the fixups to reflect the
+	 * change.
+	 */
+	if (!xnsched_threading_cpu(cpu)) {
+		/*
+		 * The thread is about to switch to primary mode on a
+		 * non-rt CPU, which is damn wrong and hopeless.
+		 * Whine and cancel that thread.
+		 */
+		printk(XENO_WARNING "thread %s[%d] switched to non-rt CPU%d, aborted.\n",
+		       thread->name, xnthread_host_pid(thread), cpu);
+		/*
+		 * Can't call xnthread_cancel() from a migration
+		 * point, that would break. Since we are on the wakeup
+		 * path to hardening, just raise XNCANCELD to catch it
+		 * in xnthread_harden().
+		 */
+		xnthread_set_info(thread, XNCANCELD);
+		return;
+	}
+
+	sched = xnsched_struct(cpu);
+	if (sched == thread->sched)
+		return;
+
+	/*
+	 * The current thread moved to a supported real-time CPU,
+	 * which is not part of its original affinity mask
+	 * though. Assume user wants to extend this mask.
+	 */
+	if (!cpumask_test_cpu(cpu, &thread->affinity))
+		cpumask_set_cpu(cpu, &thread->affinity);
+
+	xnthread_run_handler_stack(thread, move_thread, cpu);
+	xnthread_migrate_passive(thread, sched);
+}
+#endif /* CONFIG_SMP */
+
+static void __handle_taskexit_event(struct task_struct *p)
+{
+	struct cobalt_ppd *sys_ppd;
+	struct xnthread *thread;
+	spl_t s;
+
+	/*
+	 * We are called for both kernel and user shadows over the
+	 * root thread.
+	 */
+	secondary_mode_only();
+
+	thread = xnthread_current();
+	XENO_BUG_ON(COBALT, thread == NULL);
+	trace_cobalt_shadow_unmap(thread);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(thread, XNSSTEP))
+		cobalt_unregister_debugged_thread(thread);
+
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnthread_run_handler_stack(thread, exit_thread);
+
+	if (xnthread_test_state(thread, XNUSER)) {
+		cobalt_umm_free(&cobalt_kernel_ppd.umm, thread->u_window);
+		thread->u_window = NULL;
+		sys_ppd = cobalt_ppd_get(0);
+		if (atomic_dec_and_test(&sys_ppd->refcnt))
+			cobalt_remove_process(cobalt_current_process());
+	}
+}
+
+int cobalt_handle_user_return(struct task_struct *task)
+{
+	struct xnthread *thread;
+	spl_t s;
+	int err;
+
+	thread = xnthread_from_task(task);
+	if (thread == NULL)
+		return KEVENT_PROPAGATE;
+
+	if (xnthread_test_info(thread, XNCONTHI)) {
+		xnlock_get_irqsave(&nklock, s);
+		xnthread_clear_info(thread, XNCONTHI);
+		xnlock_put_irqrestore(&nklock, s);
+
+		err = xnthread_harden();
+
+		/*
+		 * XNCONTHI may or may not have been re-applied if
+		 * harden bailed out due to pending signals. Make sure
+		 * it is set in that case.
+		 */
+		if (err == -ERESTARTSYS) {
+			xnlock_get_irqsave(&nklock, s);
+			xnthread_set_info(thread, XNCONTHI);
+			xnlock_put_irqrestore(&nklock, s);
+		}
+	}
+
+	return KEVENT_PROPAGATE;
+}
+
+static void detach_current(void)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+
+	p->thread = NULL;
+	p->process = NULL;
+}
+
+int cobalt_handle_taskexit_event(struct task_struct *task) /* task == current */
+{
+	__handle_taskexit_event(task);
+
+	/*
+	 * __xnthread_cleanup() -> ... -> finalize_thread
+	 * handler. From that point, the TCB is dropped. Be careful of
+	 * not treading on stale memory within @thread.
+	 */
+	__xnthread_cleanup(xnthread_current());
+
+	detach_current();
+
+	return KEVENT_PROPAGATE;
+}
+
+int cobalt_handle_cleanup_event(struct mm_struct *mm)
+{
+	struct cobalt_process *old, *process;
+	struct cobalt_ppd *sys_ppd;
+	struct xnthread *curr;
+
+	/*
+	 * We are NOT called for exiting kernel shadows.
+	 * cobalt_current_process() is cleared if we get there after
+	 * handle_task_exit(), so we need to restore this context
+	 * pointer temporarily.
+	 */
+	process = cobalt_search_process(mm);
+	old = cobalt_set_process(process);
+	sys_ppd = cobalt_ppd_get(0);
+	if (sys_ppd != &cobalt_kernel_ppd) {
+		bool running_exec;
+
+		/*
+		 * Detect a userland shadow running exec(), i.e. still
+		 * attached to the current linux task (no prior
+		 * detach_current). In this case, we emulate a task
+		 * exit, since the Xenomai binding shall not survive
+		 * the exec() syscall. Since the process will keep on
+		 * running though, we have to disable the event
+		 * notifier manually for it.
+		 */
+		curr = xnthread_current();
+		running_exec = curr && (current->flags & PF_EXITING) == 0;
+		if (running_exec) {
+			__handle_taskexit_event(current);
+			pipeline_cleanup_process();
+		}
+		if (atomic_dec_and_test(&sys_ppd->refcnt))
+			cobalt_remove_process(process);
+		if (running_exec) {
+			__xnthread_cleanup(curr);
+			detach_current();
+		}
+	}
+
+	/*
+	 * CAUTION: Do not override a state change caused by
+	 * cobalt_remove_process().
+	 */
+	if (cobalt_current_process() == process)
+		cobalt_set_process(old);
+
+	return KEVENT_PROPAGATE;
+}
+
+static int attach_process(struct cobalt_process *process)
+{
+	struct cobalt_ppd *p = &process->sys_ppd;
+	char *exe_path;
+	int ret;
+
+	ret = cobalt_umm_init(&p->umm, CONFIG_XENO_OPT_PRIVATE_HEAPSZ * 1024,
+			      post_ppd_release);
+	if (ret)
+		return ret;
+
+	cobalt_umm_set_name(&p->umm, "private heap[%d]", task_pid_nr(current));
+
+	ret = pipeline_attach_process(process);
+	if (ret)
+		goto fail_pipeline;
+
+	exe_path = get_exe_path(current);
+	if (IS_ERR(exe_path)) {
+		printk(XENO_WARNING
+		       "%s[%d] can't find exe path\n",
+		       current->comm, task_pid_nr(current));
+		exe_path = NULL; /* Not lethal, but weird. */
+	}
+	p->exe_path = exe_path;
+	xntree_init(&p->fds);
+	atomic_set(&p->refcnt, 1);
+
+	ret = process_hash_enter(process);
+	if (ret)
+		goto fail_hash;
+
+	return 0;
+fail_hash:
+	pipeline_detach_process(process);
+	if (p->exe_path)
+		kfree(p->exe_path);
+fail_pipeline:
+	cobalt_umm_destroy(&p->umm);
+
+	return ret;
+}
+
+static void *cobalt_process_attach(void)
+{
+	struct cobalt_process *process;
+	int ret;
+
+	process = kzalloc(sizeof(*process), GFP_KERNEL);
+	if (process == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = attach_process(process);
+	if (ret) {
+		kfree(process);
+		return ERR_PTR(ret);
+	}
+
+	INIT_LIST_HEAD(&process->resources.condq);
+	INIT_LIST_HEAD(&process->resources.mutexq);
+	INIT_LIST_HEAD(&process->resources.semq);
+	INIT_LIST_HEAD(&process->resources.monitorq);
+	INIT_LIST_HEAD(&process->resources.eventq);
+	INIT_LIST_HEAD(&process->resources.schedq);
+	INIT_LIST_HEAD(&process->sigwaiters);
+	INIT_LIST_HEAD(&process->thread_list);
+	xntree_init(&process->usems);
+	bitmap_fill(process->timers_map, CONFIG_XENO_OPT_NRTIMERS);
+	cobalt_set_process(process);
+
+	return process;
+}
+
+static void detach_process(struct cobalt_process *process)
+{
+	struct cobalt_ppd *p = &process->sys_ppd;
+
+	if (p->exe_path)
+		kfree(p->exe_path);
+
+	rtdm_fd_cleanup(p);
+	process_hash_remove(process);
+	/*
+	 * CAUTION: the process descriptor might be immediately
+	 * released as a result of calling cobalt_umm_destroy(), so we
+	 * must do this last, not to tread on stale memory.
+	 */
+	cobalt_umm_destroy(&p->umm);
+}
+
+static void __reclaim_resource(struct cobalt_process *process,
+			       void (*reclaim)(struct cobalt_resnode *node, spl_t s),
+			       struct list_head *local,
+			       struct list_head *global)
+{
+	struct cobalt_resnode *node, *tmp;
+	LIST_HEAD(stash);
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(global))
+		goto flush_local;
+
+	list_for_each_entry_safe(node, tmp, global, next) {
+		if (node->owner == process) {
+			list_del(&node->next);
+			list_add(&node->next, &stash);
+		}
+	}
+		
+	list_for_each_entry_safe(node, tmp, &stash, next) {
+		reclaim(node, s);
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	XENO_BUG_ON(COBALT, !list_empty(&stash));
+
+flush_local:
+	if (list_empty(local))
+		goto out;
+
+	list_for_each_entry_safe(node, tmp, local, next) {
+		reclaim(node, s);
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+#define cobalt_reclaim_resource(__process, __reclaim, __type)		\
+	__reclaim_resource(__process, __reclaim,			\
+			   &(__process)->resources.__type ## q,		\
+			   &cobalt_global_resources.__type ## q)
+
+static void cobalt_process_detach(void *arg)
+{
+	struct cobalt_process *process = arg;
+
+	cobalt_nsem_reclaim(process);
+ 	cobalt_timer_reclaim(process);
+ 	cobalt_sched_reclaim(process);
+	cobalt_reclaim_resource(process, cobalt_cond_reclaim, cond);
+	cobalt_reclaim_resource(process, cobalt_mutex_reclaim, mutex);
+	cobalt_reclaim_resource(process, cobalt_event_reclaim, event);
+	cobalt_reclaim_resource(process, cobalt_monitor_reclaim, monitor);
+	cobalt_reclaim_resource(process, cobalt_sem_reclaim, sem);
+ 	detach_process(process);
+	/*
+	 * The cobalt_process descriptor release may be deferred until
+	 * the last mapping on the private heap is gone. However, this
+	 * is potentially stale memory already.
+	 */
+}
+
+struct xnthread_personality cobalt_personality = {
+	.name = "cobalt",
+	.magic = 0,
+	.ops = {
+		.attach_process = cobalt_process_attach,
+		.detach_process = cobalt_process_detach,
+		.map_thread = cobalt_thread_map,
+		.exit_thread = cobalt_thread_exit,
+		.finalize_thread = cobalt_thread_finalize,
+	},
+};
+EXPORT_SYMBOL_GPL(cobalt_personality);
+
+__init int cobalt_init(void)
+{
+	unsigned int i, size;
+	int ret;
+
+	size = sizeof(*process_hash) * PROCESS_HASH_SIZE;
+	process_hash = kmalloc(size, GFP_KERNEL);
+	if (process_hash == NULL) {
+		printk(XENO_ERR "cannot allocate processes hash table\n");
+		return -ENOMEM;
+	}
+
+	ret = xndebug_init();
+	if (ret)
+		goto fail_debug;
+
+	for (i = 0; i < PROCESS_HASH_SIZE; i++)
+		INIT_HLIST_HEAD(&process_hash[i]);
+
+	xnsynch_init(&yield_sync, XNSYNCH_FIFO, NULL);
+
+	ret = cobalt_memdev_init();
+	if (ret)
+		goto fail_memdev;
+
+	ret = cobalt_register_personality(&cobalt_personality);
+	if (ret)
+		goto fail_register;
+
+	ret = cobalt_signal_init();
+	if (ret)
+		goto fail_siginit;
+
+	ret = pipeline_trap_kevents();
+	if (ret)
+		goto fail_kevents;
+
+	if (gid_arg != -1)
+		printk(XENO_INFO "allowing access to group %d\n", gid_arg);
+
+	return 0;
+fail_kevents:
+	cobalt_signal_cleanup();
+fail_siginit:
+	cobalt_unregister_personality(0);
+fail_register:
+	cobalt_memdev_cleanup();
+fail_memdev:
+	xnsynch_destroy(&yield_sync);
+	xndebug_cleanup();
+fail_debug:
+	kfree(process_hash);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h
new file mode 100644
index 0000000..279707a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/process.h
@@ -0,0 +1,178 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_PROCESS_H
+#define _COBALT_POSIX_PROCESS_H
+
+#include <linux/list.h>
+#include <linux/bitmap.h>
+#include <pipeline/thread.h>
+#include <cobalt/kernel/ppd.h>
+
+#define NR_PERSONALITIES  4
+#if BITS_PER_LONG < NR_PERSONALITIES
+#error "NR_PERSONALITIES overflows internal bitmap"
+#endif
+
+struct mm_struct;
+struct xnthread_personality;
+struct cobalt_timer;
+
+struct cobalt_resources {
+	struct list_head condq;
+	struct list_head mutexq;
+	struct list_head semq;
+	struct list_head monitorq;
+	struct list_head eventq;
+	struct list_head schedq;
+};
+
+struct cobalt_process {
+	struct mm_struct *mm;
+	struct hlist_node hlink;
+	struct cobalt_ppd sys_ppd;
+	unsigned long permap;
+	struct rb_root usems;
+	struct list_head sigwaiters;
+	struct cobalt_resources resources;
+	struct list_head thread_list;
+	DECLARE_BITMAP(timers_map, CONFIG_XENO_OPT_NRTIMERS);
+	struct cobalt_timer *timers[CONFIG_XENO_OPT_NRTIMERS];
+	void *priv[NR_PERSONALITIES];
+	int ufeatures;
+	unsigned int debugged_threads;
+};
+
+struct cobalt_resnode {
+	struct cobalt_resources *scope;
+	struct cobalt_process *owner;
+	struct list_head next;
+	xnhandle_t handle;
+};
+
+int cobalt_register_personality(struct xnthread_personality *personality);
+
+int cobalt_unregister_personality(int xid);
+
+struct xnthread_personality *cobalt_push_personality(int xid);
+
+void cobalt_pop_personality(struct xnthread_personality *prev);
+
+int cobalt_bind_core(int ufeatures);
+
+int cobalt_bind_personality(unsigned int magic);
+
+struct cobalt_process *cobalt_search_process(struct mm_struct *mm);
+
+int cobalt_map_user(struct xnthread *thread, __u32 __user *u_winoff);
+
+void *cobalt_get_context(int xid);
+
+int cobalt_yield(xnticks_t min, xnticks_t max);
+
+int cobalt_process_init(void);
+
+extern struct list_head cobalt_global_thread_list;
+
+extern struct cobalt_resources cobalt_global_resources;
+
+static inline struct cobalt_process *cobalt_current_process(void)
+{
+	return pipeline_current()->process;
+}
+
+static inline struct cobalt_process *
+cobalt_set_process(struct cobalt_process *process)
+{
+	struct cobalt_threadinfo *p = pipeline_current();
+	struct cobalt_process *old;
+
+	old = p->process;
+	p->process = process;
+
+	return old;
+}
+
+static inline struct cobalt_ppd *cobalt_ppd_get(int global)
+{
+	struct cobalt_process *process;
+
+	if (global || (process = cobalt_current_process()) == NULL)
+		return &cobalt_kernel_ppd;
+
+	return &process->sys_ppd;
+}
+
+static inline struct cobalt_resources *cobalt_current_resources(int pshared)
+{
+	struct cobalt_process *process;
+
+	if (pshared || (process = cobalt_current_process()) == NULL)
+		return &cobalt_global_resources;
+
+	return &process->resources;
+}
+
+static inline
+void __cobalt_add_resource(struct cobalt_resnode *node, int pshared)
+{
+	node->owner = cobalt_current_process();
+	node->scope = cobalt_current_resources(pshared);
+}
+
+#define cobalt_add_resource(__node, __type, __pshared)			\
+	do {								\
+		__cobalt_add_resource(__node, __pshared);		\
+		list_add_tail(&(__node)->next,				\
+			      &((__node)->scope)->__type ## q);		\
+	} while (0)
+
+static inline
+void cobalt_del_resource(struct cobalt_resnode *node)
+{
+	list_del(&node->next);
+}
+
+void cobalt_remove_process(struct cobalt_process *process);
+
+void cobalt_signal_yield(void);
+
+void cobalt_stop_debugged_process(struct xnthread *thread);
+
+void cobalt_register_debugged_thread(struct xnthread *thread);
+
+void cobalt_unregister_debugged_thread(struct xnthread *thread);
+
+extern struct xnthread_personality *cobalt_personalities[];
+
+extern struct xnthread_personality cobalt_personality;
+
+int cobalt_handle_setaffinity_event(struct task_struct *task);
+
+#ifdef CONFIG_SMP
+void cobalt_adjust_affinity(struct task_struct *task);
+#else
+static inline void cobalt_adjust_affinity(struct task_struct *task) { }
+#endif
+
+int cobalt_handle_taskexit_event(struct task_struct *task);
+
+int cobalt_handle_cleanup_event(struct mm_struct *mm);
+
+int cobalt_handle_user_return(struct task_struct *task);
+
+#endif /* !_COBALT_POSIX_PROCESS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c
new file mode 100644
index 0000000..994ee88
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.c
@@ -0,0 +1,853 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "clock.h"
+#include <trace/events/cobalt-posix.h>
+
+struct xnsched_class *
+cobalt_sched_policy_param(union xnsched_policy_param *param,
+			  int u_policy, const struct sched_param_ex *param_ex,
+			  xnticks_t *tslice_r)
+{
+	struct xnsched_class *sched_class;
+	int prio, policy;
+	xnticks_t tslice;
+
+	prio = param_ex->sched_priority;
+	tslice = XN_INFINITE;
+	policy = u_policy;
+
+	/*
+	 * NOTE: The user-defined policy may be different than ours,
+	 * e.g. SCHED_FIFO,prio=-7 from userland would be interpreted
+	 * as SCHED_WEAK,prio=7 in kernel space.
+	 */
+	if (prio < 0) {
+		prio = -prio;
+		policy = SCHED_WEAK;
+	}
+	sched_class = &xnsched_class_rt;
+	param->rt.prio = prio;
+
+	switch (policy) {
+	case SCHED_NORMAL:
+		if (prio)
+			return NULL;
+		/*
+		 * When the weak scheduling class is compiled in,
+		 * SCHED_WEAK and SCHED_NORMAL threads are scheduled
+		 * by xnsched_class_weak, at their respective priority
+		 * levels. Otherwise, SCHED_NORMAL is scheduled by
+		 * xnsched_class_rt at priority level #0.
+		 */
+		fallthrough;
+	case SCHED_WEAK:
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+		if (prio < XNSCHED_WEAK_MIN_PRIO ||
+		    prio > XNSCHED_WEAK_MAX_PRIO)
+			return NULL;
+		param->weak.prio = prio;
+		sched_class = &xnsched_class_weak;
+#else
+		if (prio)
+			return NULL;
+#endif
+		break;
+	case SCHED_RR:
+		/* if unspecified, use current one. */
+		tslice = u_ts2ns(&param_ex->sched_rr_quantum);
+		if (tslice == XN_INFINITE && tslice_r)
+			tslice = *tslice_r;
+		fallthrough;
+	case SCHED_FIFO:
+		if (prio < XNSCHED_FIFO_MIN_PRIO ||
+		    prio > XNSCHED_FIFO_MAX_PRIO)
+			return NULL;
+		break;
+	case SCHED_COBALT:
+		if (prio < XNSCHED_CORE_MIN_PRIO ||
+		    prio > XNSCHED_CORE_MAX_PRIO)
+			return NULL;
+		break;
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	case SCHED_SPORADIC:
+		param->pss.normal_prio = param_ex->sched_priority;
+		param->pss.low_prio = param_ex->sched_ss_low_priority;
+		param->pss.current_prio = param->pss.normal_prio;
+		param->pss.init_budget = u_ts2ns(&param_ex->sched_ss_init_budget);
+		param->pss.repl_period = u_ts2ns(&param_ex->sched_ss_repl_period);
+		param->pss.max_repl = param_ex->sched_ss_max_repl;
+		sched_class = &xnsched_class_sporadic;
+		break;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	case SCHED_TP:
+		param->tp.prio = param_ex->sched_priority;
+		param->tp.ptid = param_ex->sched_tp_partition;
+		sched_class = &xnsched_class_tp;
+		break;
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	case SCHED_QUOTA:
+		param->quota.prio = param_ex->sched_priority;
+		param->quota.tgid = param_ex->sched_quota_group;
+		sched_class = &xnsched_class_quota;
+		break;
+#endif
+	default:
+		return NULL;
+	}
+
+	if (tslice_r)
+		*tslice_r = tslice;
+
+	return sched_class;
+}
+
+COBALT_SYSCALL(sched_minprio, current, (int policy))
+{
+	int ret;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+	case SCHED_SPORADIC:
+	case SCHED_TP:
+	case SCHED_QUOTA:
+		ret = XNSCHED_FIFO_MIN_PRIO;
+		break;
+	case SCHED_COBALT:
+		ret = XNSCHED_CORE_MIN_PRIO;
+		break;
+	case SCHED_NORMAL:
+	case SCHED_WEAK:
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	trace_cobalt_sched_min_prio(policy, ret);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_maxprio, current, (int policy))
+{
+	int ret;
+
+	switch (policy) {
+	case SCHED_FIFO:
+	case SCHED_RR:
+	case SCHED_SPORADIC:
+	case SCHED_TP:
+	case SCHED_QUOTA:
+		ret = XNSCHED_FIFO_MAX_PRIO;
+		break;
+	case SCHED_COBALT:
+		ret = XNSCHED_CORE_MAX_PRIO;
+		break;
+	case SCHED_NORMAL:
+		ret = 0;
+		break;
+	case SCHED_WEAK:
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+		ret = XNSCHED_FIFO_MAX_PRIO;
+#else
+		ret = 0;
+#endif
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	trace_cobalt_sched_max_prio(policy, ret);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_yield, primary, (void))
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+	int ret = 0;
+
+	trace_cobalt_pthread_yield(0);
+
+	/* Maybe some extension wants to handle this. */
+  	if (cobalt_call_extension(sched_yield, &curr->extref, ret) && ret)
+		return ret > 0 ? 0 : ret;
+
+	xnthread_resume(&curr->threadbase, 0);
+	if (xnsched_run())
+		return 0;
+
+	/*
+	 * If the round-robin move did not beget any context switch to
+	 * a thread running in primary mode, then wait for the next
+	 * linux context switch to happen.
+	 *
+	 * Rationale: it is most probably unexpected that
+	 * sched_yield() does not cause any context switch, since this
+	 * service is commonly used for implementing a poor man's
+	 * cooperative scheduling. By waiting for a context switch to
+	 * happen in the regular kernel, we guarantee that the CPU has
+	 * been relinquished for a while.
+	 *
+	 * Typically, this behavior allows a thread running in primary
+	 * mode to effectively yield the CPU to a thread of
+	 * same/higher priority stuck in secondary mode.
+	 *
+	 * NOTE: calling cobalt_yield() with no timeout
+	 * (i.e. XN_INFINITE) is probably never a good idea. This
+	 * means that a SCHED_FIFO non-rt thread stuck in a tight loop
+	 * would prevent the caller from waking up, since no
+	 * linux-originated schedule event would happen for unblocking
+	 * it on the current CPU. For this reason, we pass the
+	 * arbitrary TICK_NSEC value to limit the wait time to a
+	 * reasonable amount.
+	 */
+	return cobalt_yield(TICK_NSEC, TICK_NSEC);
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+
+static inline
+int set_tp_config(int cpu, union sched_config *config, size_t len)
+{
+	xnticks_t offset, duration, next_offset;
+	struct xnsched_tp_schedule *gps, *ogps;
+	struct xnsched_tp_window *w;
+	struct sched_tp_window *p;
+	struct xnsched *sched;
+	spl_t s;
+	int n;
+
+	if (len < sizeof(config->tp))
+		return -EINVAL;
+
+	sched = xnsched_struct(cpu);
+
+	switch (config->tp.op) {
+	case sched_tp_install:
+		if (config->tp.nr_windows > 0)
+			break;
+		fallthrough;
+	case sched_tp_uninstall:
+		gps = NULL;
+		goto set_schedule;
+	case sched_tp_start:
+		xnlock_get_irqsave(&nklock, s);
+		xnsched_tp_start_schedule(sched);
+		xnlock_put_irqrestore(&nklock, s);
+		return 0;
+	case sched_tp_stop:
+		xnlock_get_irqsave(&nklock, s);
+		xnsched_tp_stop_schedule(sched);
+		xnlock_put_irqrestore(&nklock, s);
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	/* Install a new TP schedule on CPU. */
+
+	gps = xnmalloc(sizeof(*gps) + config->tp.nr_windows * sizeof(*w));
+	if (gps == NULL)
+		return -ENOMEM;
+
+	for (n = 0, p = config->tp.windows, w = gps->pwins, next_offset = 0;
+	     n < config->tp.nr_windows; n++, p++, w++) {
+		/*
+		 * Time windows must be strictly contiguous. Holes may
+		 * be defined using windows assigned to the pseudo
+		 * partition #-1.
+		 */
+		offset = u_ts2ns(&p->offset);
+		if (offset != next_offset)
+			goto cleanup_and_fail;
+
+		duration = u_ts2ns(&p->duration);
+		if (duration <= 0)
+			goto cleanup_and_fail;
+
+		if (p->ptid < -1 ||
+		    p->ptid >= CONFIG_XENO_OPT_SCHED_TP_NRPART)
+			goto cleanup_and_fail;
+
+		w->w_offset = next_offset;
+		w->w_part = p->ptid;
+		next_offset += duration;
+	}
+
+	atomic_set(&gps->refcount, 1);
+	gps->pwin_nr = n;
+	gps->tf_duration = next_offset;
+set_schedule:
+	xnlock_get_irqsave(&nklock, s);
+	ogps = xnsched_tp_set_schedule(sched, gps);
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (ogps)
+		xnsched_tp_put_schedule(ogps);
+
+	return 0;
+
+cleanup_and_fail:
+	xnfree(gps);
+
+	return -EINVAL;
+}
+
+static inline
+ssize_t get_tp_config(int cpu, void __user *u_config, size_t len,
+		      union sched_config *(*fetch_config)
+		      (int policy, const void __user *u_config,
+		       size_t *len),
+		      ssize_t (*put_config)(int policy, void __user *u_config,
+					    size_t u_len,
+					    const union sched_config *config,
+					    size_t len))
+{
+	struct xnsched_tp_window *pw, *w;
+	struct xnsched_tp_schedule *gps;
+	struct sched_tp_window *pp, *p;
+	union sched_config *config;
+	struct xnsched *sched;
+	ssize_t ret, elen;
+	spl_t s;
+	int n;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sched = xnsched_struct(cpu);
+	gps = xnsched_tp_get_schedule(sched);
+	if (gps == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return 0;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	elen = sched_tp_confsz(gps->pwin_nr);
+	config = xnmalloc(elen);
+	if (config == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	config->tp.op = sched_tp_install;
+	config->tp.nr_windows = gps->pwin_nr;
+	for (n = 0, pp = p = config->tp.windows, pw = w = gps->pwins;
+	     n < gps->pwin_nr; pp = p, p++, pw = w, w++, n++) {
+		u_ns2ts(&p->offset, w->w_offset);
+		u_ns2ts(&pp->duration, w->w_offset - pw->w_offset);
+		p->ptid = w->w_part;
+	}
+	u_ns2ts(&pp->duration, gps->tf_duration - pw->w_offset);
+	ret = put_config(SCHED_TP, u_config, len, config, elen);
+	xnfree(config);
+out:
+	xnsched_tp_put_schedule(gps);
+
+	return ret;
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_TP */
+
+static inline int
+set_tp_config(int cpu, union sched_config *config, size_t len)
+{
+	return -EINVAL;
+}
+
+static inline ssize_t
+get_tp_config(int cpu, union sched_config __user *u_config, size_t len,
+	      union sched_config *(*fetch_config)
+	      (int policy, const void __user *u_config,
+	       size_t *len),
+	      ssize_t (*put_config)(int policy, void __user *u_config,
+				    size_t u_len,
+				    const union sched_config *config,
+				    size_t len))
+{
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_TP */
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+static inline
+int set_quota_config(int cpu, union sched_config *config, size_t len)
+{
+	struct __sched_config_quota *p = &config->quota;
+	struct __sched_quota_info *iq = &p->info;
+	struct cobalt_sched_group *group;
+	struct xnsched_quota_group *tg;
+	struct xnsched *sched;
+	int ret, quota_sum;
+	spl_t s;
+
+	if (len < sizeof(*p))
+		return -EINVAL;
+
+	switch (p->op) {
+	case sched_quota_add:
+		group = xnmalloc(sizeof(*group));
+		if (group == NULL)
+			return -ENOMEM;
+		tg = &group->quota;
+		group->pshared = p->add.pshared != 0;
+		group->scope = cobalt_current_resources(group->pshared);
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		ret = xnsched_quota_create_group(tg, sched, &quota_sum);
+		if (ret) {
+			xnlock_put_irqrestore(&nklock, s);
+			xnfree(group);
+			return ret;
+		}
+		list_add(&group->next, &group->scope->schedq);
+		xnlock_put_irqrestore(&nklock, s);
+		break;
+	case sched_quota_remove:
+	case sched_quota_force_remove:
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		tg = xnsched_quota_find_group(sched, p->remove.tgid);
+		if (tg == NULL)
+			goto bad_tgid;
+		group = container_of(tg, struct cobalt_sched_group, quota);
+		if (group->scope != cobalt_current_resources(group->pshared))
+			goto bad_tgid;
+		ret = xnsched_quota_destroy_group(tg,
+						  p->op == sched_quota_force_remove,
+						  &quota_sum);
+		if (ret) {
+			xnlock_put_irqrestore(&nklock, s);
+			return ret;
+		}
+		list_del(&group->next);
+		xnlock_put_irqrestore(&nklock, s);
+		iq->tgid = tg->tgid;
+		iq->quota = tg->quota_percent;
+		iq->quota_peak = tg->quota_peak_percent;
+		iq->quota_sum = quota_sum;
+		xnfree(group);
+		return 0;
+	case sched_quota_set:
+		xnlock_get_irqsave(&nklock, s);
+		sched = xnsched_struct(cpu);
+		tg = xnsched_quota_find_group(sched, p->set.tgid);
+		if (tg == NULL)
+			goto bad_tgid;
+		group = container_of(tg, struct cobalt_sched_group, quota);
+		if (group->scope != cobalt_current_resources(group->pshared))
+			goto bad_tgid;
+		xnsched_quota_set_limit(tg, p->set.quota, p->set.quota_peak,
+					&quota_sum);
+		xnlock_put_irqrestore(&nklock, s);
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	iq->tgid = tg->tgid;
+	iq->quota = tg->quota_percent;
+	iq->quota_peak = tg->quota_peak_percent;
+	iq->quota_sum = quota_sum;
+
+	return 0;
+bad_tgid:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return -ESRCH;
+}
+
+static inline
+ssize_t get_quota_config(int cpu, void __user *u_config, size_t len,
+			 union sched_config *(*fetch_config)
+			 (int policy, const void __user *u_config,
+			  size_t *len),
+			 ssize_t (*put_config)(int policy, void __user *u_config,
+					       size_t u_len,
+					       const union sched_config *config,
+					       size_t len))
+{
+	struct cobalt_sched_group *group;
+	struct xnsched_quota_group *tg;
+	union sched_config *config;
+	struct xnsched *sched;
+	ssize_t ret;
+	spl_t s;
+
+	config = fetch_config(SCHED_QUOTA, u_config, &len);
+	if (IS_ERR(config))
+		return PTR_ERR(config);
+
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_struct(cpu);
+	tg = xnsched_quota_find_group(sched, config->quota.get.tgid);
+	if (tg == NULL)
+		goto bad_tgid;
+
+	group = container_of(tg, struct cobalt_sched_group, quota);
+	if (group->scope != cobalt_current_resources(group->pshared))
+		goto bad_tgid;
+
+	config->quota.info.tgid = tg->tgid;
+	config->quota.info.quota = tg->quota_percent;
+	config->quota.info.quota_peak = tg->quota_peak_percent;
+	config->quota.info.quota_sum = xnsched_quota_sum_all(sched);
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = put_config(SCHED_QUOTA, u_config, len, config, sizeof(*config));
+	xnfree(config);
+
+	return ret;
+bad_tgid:
+	xnlock_put_irqrestore(&nklock, s);
+	xnfree(config);
+
+	return -ESRCH;
+}
+
+#else /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+static inline
+int set_quota_config(int cpu, union sched_config *config, size_t len)
+{
+	return -EINVAL;
+}
+
+static inline
+ssize_t get_quota_config(int cpu, void __user *u_config,
+			 size_t len,
+			 union sched_config *(*fetch_config)
+			 (int policy, const void __user *u_config,
+			  size_t *len),
+			 ssize_t (*put_config)(int policy, void __user *u_config,
+					       size_t u_len,
+					       const union sched_config *config,
+					       size_t len))
+{
+	return -EINVAL;
+}
+
+#endif /* !CONFIG_XENO_OPT_SCHED_QUOTA */
+
+static union sched_config *
+sched_fetch_config(int policy, const void __user *u_config, size_t *len)
+{
+	union sched_config *buf;
+	int ret;
+
+	if (u_config == NULL)
+		return ERR_PTR(-EFAULT);
+
+	if (policy == SCHED_QUOTA && *len < sizeof(buf->quota))
+		return ERR_PTR(-EINVAL);
+
+	buf = xnmalloc(*len);
+	if (buf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = cobalt_copy_from_user(buf, u_config, *len);
+	if (ret) {
+		xnfree(buf);
+		return ERR_PTR(ret);
+	}
+
+	return buf;
+}
+
+static int sched_ack_config(int policy, const union sched_config *config,
+			    void __user *u_config)
+{
+	union sched_config __user *u_p = u_config;
+
+	if (policy != SCHED_QUOTA)
+		return 0;
+
+	return u_p == NULL ? -EFAULT :
+		cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+				       sizeof(u_p->quota.info));
+}
+
+static ssize_t sched_put_config(int policy,
+				void __user *u_config, size_t u_len,
+				const union sched_config *config, size_t len)
+{
+	union sched_config *u_p = u_config;
+
+	if (u_config == NULL)
+		return -EFAULT;
+
+	if (policy == SCHED_QUOTA) {
+		if (u_len < sizeof(config->quota))
+			return -EINVAL;
+		return cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+					      sizeof(u_p->quota.info)) ?:
+			sizeof(u_p->quota.info);
+	}
+
+	return cobalt_copy_to_user(u_config, config, len) ?: len;
+}
+
+int __cobalt_sched_setconfig_np(int cpu, int policy,
+				void __user *u_config,
+				size_t len,
+				union sched_config *(*fetch_config)
+				(int policy, const void __user *u_config,
+				 size_t *len),
+				int (*ack_config)(int policy,
+						  const union sched_config *config,
+						  void __user *u_config))
+{
+	union sched_config *buf;
+	int ret;
+
+	trace_cobalt_sched_setconfig(cpu, policy, len);
+
+	if (cpu < 0 || cpu >= NR_CPUS || !xnsched_threading_cpu(cpu))
+		return -EINVAL;
+
+	if (len == 0)
+		return -EINVAL;
+
+	buf = fetch_config(policy, u_config, &len);
+	if (IS_ERR(buf))
+		return PTR_ERR(buf);
+
+	switch (policy)	{
+	case SCHED_TP:
+		ret = set_tp_config(cpu, buf, len);
+		break;
+	case SCHED_QUOTA:
+		ret = set_quota_config(cpu, buf, len);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	if (ret == 0)
+		ret = ack_config(policy, buf, u_config);
+
+	xnfree(buf);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_setconfig_np, conforming,
+	       (int cpu, int policy,
+		union sched_config __user *u_config,
+		size_t len))
+{
+	return __cobalt_sched_setconfig_np(cpu, policy, u_config, len,
+					   sched_fetch_config, sched_ack_config);
+}
+
+ssize_t __cobalt_sched_getconfig_np(int cpu, int policy,
+				    void __user *u_config,
+				    size_t len,
+				    union sched_config *(*fetch_config)
+				    (int policy, const void __user *u_config,
+				     size_t *len),
+				    ssize_t (*put_config)(int policy,
+							  void __user *u_config,
+							  size_t u_len,
+							  const union sched_config *config,
+							  size_t len))
+{
+	ssize_t ret;
+
+	switch (policy)	{
+	case SCHED_TP:
+		ret = get_tp_config(cpu, u_config, len,
+				    fetch_config, put_config);
+		break;
+	case SCHED_QUOTA:
+		ret = get_quota_config(cpu, u_config, len,
+				       fetch_config, put_config);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	trace_cobalt_sched_get_config(cpu, policy, ret);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sched_getconfig_np, conforming,
+	       (int cpu, int policy,
+		union sched_config __user *u_config,
+		size_t len))
+{
+	return __cobalt_sched_getconfig_np(cpu, policy, u_config, len,
+					   sched_fetch_config, sched_put_config);
+}
+
+int __cobalt_sched_weightprio(int policy,
+			      const struct sched_param_ex *param_ex)
+{
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+	int prio;
+
+	sched_class = cobalt_sched_policy_param(&param, policy,
+						param_ex, NULL);
+	if (sched_class == NULL)
+		return -EINVAL;
+
+	prio = param_ex->sched_priority;
+	if (prio < 0)
+		prio = -prio;
+
+	return prio + sched_class->weight;
+}
+
+COBALT_SYSCALL(sched_weightprio, current,
+	       (int policy, const struct sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+
+	if (cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
+		return -EFAULT;
+
+	return __cobalt_sched_weightprio(policy, &param_ex);
+}
+
+int cobalt_sched_setscheduler_ex(pid_t pid,
+				 int policy,
+				 const struct sched_param_ex *param_ex,
+				 __u32 __user *u_winoff,
+				 int __user *u_promoted)
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	int ret, promoted = 0;
+	spl_t s;
+
+	trace_cobalt_sched_setscheduler(pid, policy, param_ex);
+
+	if (pid) {
+		xnlock_get_irqsave(&nklock, s);
+		thread = cobalt_thread_find(pid);
+		xnlock_put_irqrestore(&nklock, s);
+	} else
+		thread = cobalt_current_thread();
+
+	if (thread == NULL) {
+		if (u_winoff == NULL || pid != task_pid_vnr(current))
+			return -ESRCH;
+			
+		thread = cobalt_thread_shadow(&hkey, u_winoff);
+		if (IS_ERR(thread))
+			return PTR_ERR(thread);
+
+		promoted = 1;
+	}
+
+	ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted));
+}
+
+COBALT_SYSCALL(sched_setscheduler_ex, conforming,
+	       (pid_t pid,
+		int policy,
+		const struct sched_param_ex __user *u_param,
+		__u32 __user *u_winoff,
+		int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+
+	if (cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
+		return -EFAULT;
+
+	return cobalt_sched_setscheduler_ex(pid, policy, &param_ex,
+					    u_winoff, u_promoted);
+}
+
+int cobalt_sched_getscheduler_ex(pid_t pid,
+				 int *policy_r,
+				 struct sched_param_ex *param_ex)
+{
+	struct cobalt_thread *thread;
+	spl_t s;
+
+	trace_cobalt_sched_getscheduler(pid);
+
+	if (pid) {
+		xnlock_get_irqsave(&nklock, s);
+		thread = cobalt_thread_find(pid);
+		xnlock_put_irqrestore(&nklock, s);
+	} else
+		thread = cobalt_current_thread();
+
+	if (thread == NULL)
+		return -ESRCH;
+
+	return __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex);
+}
+
+COBALT_SYSCALL(sched_getscheduler_ex, current,
+	       (pid_t pid,
+		int __user *u_policy,
+		struct sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_sched_getscheduler_ex(pid, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	if (cobalt_copy_to_user(u_param, &param_ex, sizeof(param_ex)) ||
+	    cobalt_copy_to_user(u_policy, &policy, sizeof(policy)))
+		return -EFAULT;
+
+	return 0;
+}
+
+void cobalt_sched_reclaim(struct cobalt_process *process)
+{
+	struct cobalt_resources *p = &process->resources;
+	struct cobalt_sched_group *group;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	while (!list_empty(&p->schedq)) {
+		group = list_get_entry(&p->schedq, struct cobalt_sched_group, next);
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+		xnsched_quota_destroy_group(&group->quota, 1, NULL);
+#endif
+		xnlock_put_irqrestore(&nklock, s);
+		xnfree(group);
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h
new file mode 100644
index 0000000..2b23be0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sched.h
@@ -0,0 +1,109 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SCHED_H
+#define _COBALT_POSIX_SCHED_H
+
+#include <linux/list.h>
+#include <cobalt/kernel/sched.h>
+#include <xenomai/posix/syscall.h>
+
+struct cobalt_resources;
+struct cobalt_process;
+
+struct cobalt_sched_group {
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	struct xnsched_quota_group quota;
+#endif
+	struct cobalt_resources *scope;
+	int pshared;
+	struct list_head next;
+};
+
+int __cobalt_sched_weightprio(int policy,
+			      const struct sched_param_ex *param_ex);
+
+int __cobalt_sched_setconfig_np(int cpu, int policy,
+				void __user *u_config,
+				size_t len,
+				union sched_config *(*fetch_config)
+				(int policy, const void __user *u_config,
+				 size_t *len),
+				int (*ack_config)(int policy,
+						  const union sched_config *config,
+						  void __user *u_config));
+
+ssize_t __cobalt_sched_getconfig_np(int cpu, int policy,
+				    void __user *u_config,
+				    size_t len,
+				    union sched_config *(*fetch_config)
+				    (int policy, const void __user *u_config,
+				     size_t *len),
+				    ssize_t (*put_config)(int policy,
+							  void __user *u_config, size_t u_len,
+							  const union sched_config *config,
+							  size_t len));
+int cobalt_sched_setscheduler_ex(pid_t pid,
+				 int policy,
+				 const struct sched_param_ex *param_ex,
+				 __u32 __user *u_winoff,
+				 int __user *u_promoted);
+
+int cobalt_sched_getscheduler_ex(pid_t pid,
+				 int *policy_r,
+				 struct sched_param_ex *param_ex);
+
+struct xnsched_class *
+cobalt_sched_policy_param(union xnsched_policy_param *param,
+			  int u_policy, const struct sched_param_ex *param_ex,
+			  xnticks_t *tslice_r);
+
+COBALT_SYSCALL_DECL(sched_yield, (void));
+
+COBALT_SYSCALL_DECL(sched_weightprio,
+		    (int policy, const struct sched_param_ex __user *u_param));
+
+COBALT_SYSCALL_DECL(sched_minprio, (int policy));
+
+COBALT_SYSCALL_DECL(sched_maxprio, (int policy));
+
+COBALT_SYSCALL_DECL(sched_setconfig_np,
+		    (int cpu,
+		     int policy,
+		     union sched_config __user *u_config,
+		     size_t len));
+
+COBALT_SYSCALL_DECL(sched_getconfig_np,
+		    (int cpu, int policy,
+		     union sched_config __user *u_config,
+		     size_t len));
+
+COBALT_SYSCALL_DECL(sched_setscheduler_ex,
+		    (pid_t pid,
+		     int policy,
+		     const struct sched_param_ex __user *u_param,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted));
+
+COBALT_SYSCALL_DECL(sched_getscheduler_ex,
+		    (pid_t pid,
+		     int __user *u_policy,
+		     struct sched_param_ex __user *u_param));
+
+void cobalt_sched_reclaim(struct cobalt_process *process);
+
+#endif /* !_COBALT_POSIX_SCHED_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c
new file mode 100644
index 0000000..71b8c52
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.c
@@ -0,0 +1,667 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ * Copyright (C) 2014,2015 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/stddef.h>
+#include <linux/err.h>
+#include <cobalt/kernel/time.h>
+#include "internal.h"
+#include "thread.h"
+#include "clock.h"
+#include "sem.h"
+#include <trace/events/cobalt-posix.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static int sem_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	return 0;
+}
+
+static struct xnvfile_regular_ops sem_vfile_ops = {
+	.show = sem_vfile_show,
+};
+
+static struct xnpnode_regular __sem_pnode = {
+	.node = {
+		.dirname = "sem",
+		.root = &posix_ptree,
+		.ops = &xnregistry_vfreg_ops,
+	},
+	.vfile = {
+		.ops = &sem_vfile_ops,
+	},
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __sem_pnode = {
+	.node = {
+		.dirname = "sem",
+	}
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+static inline struct cobalt_resources *sem_kqueue(struct cobalt_sem *sem)
+{
+	int pshared = !!(sem->flags & SEM_PSHARED);
+	return cobalt_current_resources(pshared);
+}
+
+static inline int sem_check(struct cobalt_sem *sem)
+{
+	if (sem == NULL || sem->magic != COBALT_SEM_MAGIC)
+		return -EINVAL;
+
+	if (sem->resnode.scope && sem->resnode.scope != sem_kqueue(sem))
+		return -EPERM;
+
+	return 0;
+}
+
+int __cobalt_sem_destroy(xnhandle_t handle)
+{
+	struct cobalt_sem *sem;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	sem = xnregistry_lookup(handle, NULL);
+	if (!cobalt_obj_active(sem, COBALT_SEM_MAGIC, typeof(*sem))) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	if (--sem->refs) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	cobalt_mark_deleted(sem);
+	if (!sem->pathname)
+		cobalt_del_resource(&sem->resnode);
+	if (xnsynch_destroy(&sem->synchbase) == XNSYNCH_RESCHED) {
+		xnsched_run();
+		ret = 1;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnregistry_remove(sem->resnode.handle);
+	if (sem->pathname)
+		putname(sem->pathname);
+
+	cobalt_umm_free(&cobalt_ppd_get(!!(sem->flags & SEM_PSHARED))->umm,
+			sem->state);
+
+	xnfree(sem);
+
+	return ret;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+struct cobalt_sem *
+__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sm,
+		  int flags, unsigned int value)
+{
+	struct cobalt_sem_state *state;
+	struct cobalt_sem *sem, *osem;
+	struct cobalt_ppd *sys_ppd;
+	int ret, sflags, pshared;
+	struct list_head *semq;
+	spl_t s;
+
+	if ((flags & SEM_PULSE) != 0 && value > 0) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	sem = xnmalloc(sizeof(*sem));
+	if (sem == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	pshared = !!(flags & SEM_PSHARED);
+	sys_ppd = cobalt_ppd_get(pshared);
+	state = cobalt_umm_alloc(&sys_ppd->umm, sizeof(*state));
+	if (state == NULL) {
+		ret = -EAGAIN;
+		goto err_free_sem;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	semq = &cobalt_current_resources(pshared)->semq;
+	if ((sm->magic == COBALT_SEM_MAGIC && !list_empty(semq)) ||
+	    sm->magic == COBALT_NAMED_SEM_MAGIC) {
+		osem = xnregistry_lookup(sm->handle, NULL);
+		if (cobalt_obj_active(osem, COBALT_SEM_MAGIC, typeof(*osem))) {
+			ret = -EBUSY;
+			goto err_lock_put;
+		}
+	}
+
+	if (value > (unsigned)SEM_VALUE_MAX) {
+		ret = -EINVAL;
+		goto err_lock_put;
+	}
+
+	ret = xnregistry_enter(name ?: "", sem, &sem->resnode.handle,
+			       name ? &__sem_pnode.node : NULL);
+	if (ret < 0)
+		goto err_lock_put;
+
+	sem->magic = COBALT_SEM_MAGIC;
+	if (!name)
+		cobalt_add_resource(&sem->resnode, sem, pshared);
+	else
+		sem->resnode.scope = NULL;
+	sflags = flags & SEM_FIFO ? 0 : XNSYNCH_PRIO;
+	xnsynch_init(&sem->synchbase, sflags, NULL);
+
+	sem->state = state;
+	atomic_set(&state->value, value);
+	state->flags = flags;
+	sem->flags = flags;
+	sem->refs = name ? 2 : 1;
+	sem->pathname = NULL;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	__cobalt_sem_shadow_init(sem,
+			name ? COBALT_NAMED_SEM_MAGIC : COBALT_SEM_MAGIC, sm);
+
+	trace_cobalt_psem_init(name ?: "anon",
+			       sem->resnode.handle, flags, value);
+
+	return sem;
+
+err_lock_put:
+	xnlock_put_irqrestore(&nklock, s);
+	cobalt_umm_free(&sys_ppd->umm, state);
+err_free_sem:
+	xnfree(sem);
+out:
+	trace_cobalt_psem_init_failed(name ?: "anon", flags, value, ret);
+
+	return ERR_PTR(ret);
+}
+
+void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic,
+			      struct cobalt_sem_shadow *sm)
+{
+	__u32 flags = sem->state->flags;
+	struct cobalt_ppd *sys_ppd;
+
+	sys_ppd = cobalt_ppd_get(!!(flags & SEM_PSHARED));
+
+	sm->magic = magic;
+	sm->handle = sem->resnode.handle;
+	sm->state_offset = cobalt_umm_offset(&sys_ppd->umm, sem->state);
+	if (sem->state->flags & SEM_PSHARED)
+		sm->state_offset = -sm->state_offset;
+}
+
+static int sem_destroy(struct cobalt_sem_shadow *sm)
+{
+	struct cobalt_sem *sem;
+	int warn, ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (sm->magic != COBALT_SEM_MAGIC) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	sem = xnregistry_lookup(sm->handle, NULL);
+	ret = sem_check(sem);
+	if (ret)
+		goto fail;
+
+	if ((sem->flags & SEM_NOBUSYDEL) != 0 &&
+	    xnsynch_pended_p(&sem->synchbase)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	warn = sem->flags & SEM_WARNDEL;
+	cobalt_mark_deleted(sm);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = __cobalt_sem_destroy(sem->resnode.handle);
+
+	return warn ? ret : 0;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+static inline int do_trywait(struct cobalt_sem *sem)
+{
+	int ret;
+	
+	ret = sem_check(sem);
+	if (ret)
+		return ret;
+
+	if (atomic_sub_return(1, &sem->state->value) < 0)
+		return -EAGAIN;
+
+	return 0;
+}
+
+static int sem_wait(xnhandle_t handle)
+{
+	struct cobalt_sem *sem;
+	int ret, info;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = do_trywait(sem);
+	if (ret != -EAGAIN)
+		goto out;
+
+	ret = 0;
+	info = xnsynch_sleep_on(&sem->synchbase, XN_INFINITE, XN_RELATIVE);
+	if (info & XNRMID) {
+		ret = -EINVAL;
+	} else if (info & XNBREAK) {
+		atomic_inc(&sem->state->value); /* undo do_trywait() */
+		ret = -EINTR;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem,
+			   const struct timespec64 *ts)
+{
+	int ret, info;
+	bool validate_ts = true;
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	xntmode_t tmode;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_timedwait(handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	for (;;) {
+		sem = xnregistry_lookup(handle, NULL);
+		ret = do_trywait(sem);
+		if (ret != -EAGAIN)
+			break;
+
+		/*
+		 * POSIX states that the validity of the timeout spec
+		 * _need_ not be checked if the semaphore can be
+		 * locked immediately, we show this behavior despite
+		 * it's actually more complex, to keep some
+		 * applications ported to Linux happy.
+		 */
+		if (validate_ts) {
+			atomic_inc(&sem->state->value);
+			if (!ts) {
+				ret = -EFAULT;
+				break;
+			}
+			if (!timespec64_valid(ts)) {
+				ret = -EINVAL;
+				break;
+			}
+			validate_ts = false;
+			continue;
+		}
+
+		ret = 0;
+		tmode = sem->flags & SEM_RAWCLOCK ? XN_ABSOLUTE : XN_REALTIME;
+		info = xnsynch_sleep_on(&sem->synchbase, ts2ns(ts) + 1, tmode);
+		if (info & XNRMID)
+			ret = -EINVAL;
+		else if (info & (XNBREAK|XNTIMEO)) {
+			ret = (info & XNBREAK) ? -EINTR : -ETIMEDOUT;
+			atomic_inc(&sem->state->value);
+		}
+		break;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem,
+			     const struct __kernel_timespec __user *u_ts)
+{
+	int ret = 1;
+	struct timespec64 ts64;
+
+	if (u_ts)
+		ret = cobalt_get_timespec64(&ts64, u_ts);
+
+	return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64);
+}
+
+static int sem_post(xnhandle_t handle)
+{
+	struct cobalt_sem *sem;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = sem_check(sem);
+	if (ret)
+		goto out;
+
+	if (atomic_read(&sem->state->value) == SEM_VALUE_MAX) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (atomic_inc_return(&sem->state->value) <= 0) {
+		if (xnsynch_wakeup_one_sleeper(&sem->synchbase))
+			xnsched_run();
+	} else if (sem->flags & SEM_PULSE)
+		atomic_set(&sem->state->value, 0);
+out:	
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+static int sem_getvalue(xnhandle_t handle, int *value)
+{
+	struct cobalt_sem *sem;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = sem_check(sem);
+	if (ret) {
+		xnlock_put_irqrestore(&nklock, s);
+		return ret;
+	}
+
+	*value = atomic_read(&sem->state->value);
+	if ((sem->flags & SEM_REPORT) == 0 && *value < 0)
+		*value = 0;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+COBALT_SYSCALL(sem_init, current,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		int flags, unsigned int value))
+{
+	struct cobalt_sem_shadow sm;
+	struct cobalt_sem *sem;
+
+	if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm)))
+		return -EFAULT;
+
+	if (flags & ~(SEM_FIFO|SEM_PULSE|SEM_PSHARED|SEM_REPORT|\
+		      SEM_WARNDEL|SEM_RAWCLOCK|SEM_NOBUSYDEL))
+		return -EINVAL;
+
+	sem = __cobalt_sem_init(NULL, &sm, flags, value);
+	if (IS_ERR(sem))
+		return PTR_ERR(sem);
+
+	return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem));
+}
+
+COBALT_SYSCALL(sem_post, current,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	xnhandle_t handle;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_post(handle);
+
+	return sem_post(handle);
+}
+
+COBALT_SYSCALL(sem_wait, primary,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	xnhandle_t handle;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_wait(handle);
+
+	return sem_wait(handle);
+}
+
+COBALT_SYSCALL(sem_timedwait, primary,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		const struct __user_old_timespec __user *u_ts))
+{
+	int ret = 1;
+	struct timespec64 ts64;
+
+	if (u_ts)
+		ret = cobalt_get_u_timespec(&ts64, u_ts);
+
+	return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64);
+}
+
+COBALT_SYSCALL(sem_timedwait64, primary,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_sem_timedwait64(u_sem, u_ts);
+}
+
+COBALT_SYSCALL(sem_trywait, primary,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	int ret;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_trywait(handle);
+
+	xnlock_get_irqsave(&nklock, s);
+	sem = xnregistry_lookup(handle, NULL);
+	ret = do_trywait(sem);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sem_getvalue, current,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		int __user *u_sval))
+{
+	int ret, sval = -1;
+	xnhandle_t handle;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+
+	ret = sem_getvalue(handle, &sval);
+	trace_cobalt_psem_getvalue(handle, sval);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_sval, &sval, sizeof(sval));
+}
+
+COBALT_SYSCALL(sem_destroy, current,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	struct cobalt_sem_shadow sm;
+	int err;
+
+	if (cobalt_copy_from_user(&sm, u_sem, sizeof(sm)))
+		return -EFAULT;
+
+	trace_cobalt_psem_destroy(sm.handle);
+
+	err = sem_destroy(&sm);
+	if (err < 0)
+		return err;
+
+	return cobalt_copy_to_user(u_sem, &sm, sizeof(*u_sem)) ?: err;
+}
+
+COBALT_SYSCALL(sem_broadcast_np, current,
+	       (struct cobalt_sem_shadow __user *u_sem))
+{
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	spl_t s;
+	int ret;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_broadcast(handle);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sem = xnregistry_lookup(handle, NULL);
+	ret = sem_check(sem);
+	if (ret == 0 && atomic_read(&sem->state->value) < 0) {
+		atomic_set(&sem->state->value, 0);
+		xnsynch_flush(&sem->synchbase, 0);
+		xnsched_run();
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(sem_inquire, current,
+	       (struct cobalt_sem_shadow __user *u_sem,
+		struct cobalt_sem_info __user *u_info,
+		pid_t __user *u_waitlist,
+		size_t waitsz))
+{
+	int val = 0, nrwait = 0, nrpids, ret = 0;
+	unsigned long pstamp, nstamp = 0;
+	struct cobalt_sem_info info;
+	pid_t *t = NULL, fbuf[16];
+	struct xnthread *thread;
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	spl_t s;
+
+	handle = cobalt_get_handle_from_user(&u_sem->handle);
+	trace_cobalt_psem_inquire(handle);
+
+	nrpids = waitsz / sizeof(pid_t);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	for (;;) {
+		pstamp = nstamp;
+		sem = xnregistry_lookup(handle, &nstamp);
+		if (sem == NULL || sem->magic != COBALT_SEM_MAGIC) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+		/*
+		 * Allocate memory to return the wait list without
+		 * holding any lock, then revalidate the handle.
+		 */
+		if (t == NULL) {
+			val = atomic_read(&sem->state->value);
+			if (val >= 0 || u_waitlist == NULL)
+				break;
+			xnlock_put_irqrestore(&nklock, s);
+			if (nrpids > -val)
+				nrpids = -val;
+			if (-val <= ARRAY_SIZE(fbuf))
+				t = fbuf; /* Use fast buffer. */
+			else {
+				t = xnmalloc(-val * sizeof(pid_t));
+				if (t == NULL)
+					return -ENOMEM;
+			}
+			xnlock_get_irqsave(&nklock, s);
+		} else if (pstamp == nstamp)
+			break;
+		else if (val != atomic_read(&sem->state->value)) {
+			xnlock_put_irqrestore(&nklock, s);
+			if (t != fbuf)
+				xnfree(t);
+			t = NULL;
+			xnlock_get_irqsave(&nklock, s);
+		}
+	}
+
+	info.flags = sem->flags;
+	info.value = (sem->flags & SEM_REPORT) || val >= 0 ? val : 0;
+	info.nrwait = val < 0 ? -val : 0;
+
+	if (xnsynch_pended_p(&sem->synchbase) && u_waitlist != NULL) {
+		xnsynch_for_each_sleeper(thread, &sem->synchbase) {
+			if (nrwait >= nrpids)
+				break;
+			t[nrwait++] = xnthread_host_pid(thread);
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	ret = cobalt_copy_to_user(u_info, &info, sizeof(info));
+	if (ret == 0 && nrwait > 0)
+		ret = cobalt_copy_to_user(u_waitlist, t, nrwait * sizeof(pid_t));
+
+	if (t && t != fbuf)
+		xnfree(t);
+
+	return ret ?: nrwait;
+}
+
+void cobalt_sem_reclaim(struct cobalt_resnode *node, spl_t s)
+{
+	struct cobalt_sem *sem;
+	xnhandle_t handle;
+	int named, ret;
+
+	sem = container_of(node, struct cobalt_sem, resnode);
+	named = (sem->flags & SEM_NAMED) != 0;
+	handle = node->handle;
+	xnlock_put_irqrestore(&nklock, s);
+	ret = __cobalt_sem_destroy(handle);
+	if (named && ret == -EBUSY)
+		xnregistry_unlink(xnregistry_key(handle));
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h
new file mode 100644
index 0000000..d7dbb90
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/sem.h
@@ -0,0 +1,133 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SEM_H
+#define _COBALT_POSIX_SEM_H
+
+#include <linux/kernel.h>
+#include <linux/fcntl.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/registry.h>
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/process.h>
+
+struct cobalt_process;
+struct filename;
+
+struct cobalt_sem {
+	unsigned int magic;
+	struct xnsynch synchbase;
+	struct cobalt_sem_state *state;
+	int flags;
+	unsigned int refs;
+	struct filename *pathname;
+	struct cobalt_resnode resnode;
+};
+
+/* Copied from Linuxthreads semaphore.h. */
+struct _sem_fastlock
+{
+  long int __status;
+  int __spinlock;
+};
+
+typedef struct
+{
+  struct _sem_fastlock __sem_lock;
+  int __sem_value;
+  long __sem_waiting;
+} sem_t;
+
+#include <cobalt/uapi/sem.h>
+
+#define SEM_VALUE_MAX	(INT_MAX)
+#define SEM_FAILED	NULL
+#define SEM_NAMED	0x80000000
+
+struct cobalt_sem_shadow __user *
+__cobalt_sem_open(struct cobalt_sem_shadow __user *usm,
+		  const char __user *u_name,
+		  int oflags, mode_t mode, unsigned int value);
+
+int __cobalt_sem_timedwait(struct cobalt_sem_shadow __user *u_sem,
+			   const struct timespec64 *ts);
+
+int __cobalt_sem_timedwait64(struct cobalt_sem_shadow __user *u_sem,
+			     const struct __kernel_timespec __user *u_ts);
+
+int __cobalt_sem_destroy(xnhandle_t handle);
+
+void cobalt_nsem_reclaim(struct cobalt_process *process);
+
+struct cobalt_sem *
+__cobalt_sem_init(const char *name, struct cobalt_sem_shadow *sem,
+		  int flags, unsigned value);
+
+void __cobalt_sem_shadow_init(struct cobalt_sem *sem, __u32 magic,
+			      struct cobalt_sem_shadow *sm);
+
+COBALT_SYSCALL_DECL(sem_init,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     int flags, unsigned value));
+
+COBALT_SYSCALL_DECL(sem_post,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_wait,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_timedwait,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct __user_old_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(sem_timedwait64,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL_DECL(sem_trywait,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_getvalue,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     int __user *u_sval));
+
+COBALT_SYSCALL_DECL(sem_destroy,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_open,
+		    (struct cobalt_sem_shadow __user *__user *u_addrp,
+		     const char __user *u_name,
+		     int oflags, mode_t mode, unsigned int value));
+
+COBALT_SYSCALL_DECL(sem_close,
+		    (struct cobalt_sem_shadow __user *usm));
+
+COBALT_SYSCALL_DECL(sem_unlink, (const char __user *u_name));
+
+COBALT_SYSCALL_DECL(sem_broadcast_np,
+		    (struct cobalt_sem_shadow __user *u_sem));
+
+COBALT_SYSCALL_DECL(sem_inquire,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     struct cobalt_sem_info __user *u_info,
+		     pid_t __user *u_waitlist,
+		     size_t waitsz));
+
+void cobalt_sem_reclaim(struct cobalt_resnode *node,
+			spl_t s);
+
+#endif /* !_COBALT_POSIX_SEM_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c
new file mode 100644
index 0000000..5f5cb85
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.c
@@ -0,0 +1,638 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/sched.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/compat.h>
+#include <cobalt/kernel/time.h>
+#include "internal.h"
+#include "signal.h"
+#include "thread.h"
+#include "timer.h"
+#include "clock.h"
+
+static void *sigpending_mem;
+
+static LIST_HEAD(sigpending_pool);
+
+/*
+ * How many signal notifications which may be pending at any given
+ * time, except timers.  Cobalt signals are always thread directed,
+ * and we assume that in practice, each signal number is processed by
+ * a dedicated thread. We provide for up to three real-time signal
+ * events to pile up, and a single notification pending for other
+ * signals. Timers use a fast queuing logic maintaining a count of
+ * overruns, and therefore do not consume any memory from this pool.
+ */
+#define __SIGPOOL_SIZE  (sizeof(struct cobalt_sigpending) *	\
+			 (_NSIG + (SIGRTMAX - SIGRTMIN) * 2))
+
+static int cobalt_signal_deliver(struct cobalt_thread *thread,
+				 struct cobalt_sigpending *sigp,
+				 int group)
+{				/* nklocked, IRQs off */
+	struct cobalt_sigwait_context *swc;
+	struct xnthread_wait_context *wc;
+	struct list_head *sigwaiters;
+	int sig, ret;
+
+	sig = sigp->si.si_signo;
+	XENO_BUG_ON(COBALT, sig < 1 || sig > _NSIG);
+
+	/*
+	 * Attempt to deliver the signal immediately to the initial
+	 * target that waits for it.
+	 */
+	if (xnsynch_pended_p(&thread->sigwait)) {
+		wc = xnthread_get_wait_context(&thread->threadbase);
+		swc = container_of(wc, struct cobalt_sigwait_context, wc);
+		if (sigismember(swc->set, sig))
+			goto deliver;
+	}
+
+	/*
+	 * If that does not work out and we are sending to a thread
+	 * group, try to deliver to any thread from the same process
+	 * waiting for that signal.
+	 */
+	sigwaiters = &thread->process->sigwaiters;
+	if (!group || list_empty(sigwaiters))
+		return 0;
+
+	list_for_each_entry(thread, sigwaiters, signext) {
+		wc = xnthread_get_wait_context(&thread->threadbase);
+		swc = container_of(wc, struct cobalt_sigwait_context, wc);
+		if (sigismember(swc->set, sig))
+			goto deliver;
+	}
+
+	return 0;
+deliver:
+	cobalt_copy_siginfo(sigp->si.si_code, swc->si, &sigp->si);
+	cobalt_call_extension(signal_deliver, &thread->extref,
+			      ret, swc->si, sigp);
+	xnthread_complete_wait(&swc->wc);
+	xnsynch_wakeup_one_sleeper(&thread->sigwait);
+	list_del(&thread->signext);
+
+	/*
+	 * This is an immediate delivery bypassing any queuing, so we
+	 * have to release the sigpending data right away before
+	 * leaving.
+	 */
+	cobalt_signal_free(sigp);
+
+	return 1;
+}
+
+int cobalt_signal_send(struct cobalt_thread *thread,
+		       struct cobalt_sigpending *sigp,
+		       int group)
+{				/* nklocked, IRQs off */
+	struct list_head *sigq;
+	int sig, ret;
+
+	/* Can we deliver this signal immediately? */
+	ret = cobalt_signal_deliver(thread, sigp, group);
+	if (ret)
+		return ret;	/* Yep, done. */
+
+	/*
+	 * Nope, attempt to queue it. We start by calling any Cobalt
+	 * extension for queuing the signal first.
+	 */
+	if (cobalt_call_extension(signal_queue, &thread->extref, ret, sigp)) {
+		if (ret)
+			/* Queuing done remotely or error. */
+			return ret;
+	}
+
+	sig = sigp->si.si_signo;
+	sigq = thread->sigqueues + sig - 1;
+	if (!list_empty(sigq)) {
+		/* Queue non-rt signals only once. */
+		if (sig < SIGRTMIN)
+			return 0;
+		/* Queue rt signal source only once (SI_TIMER). */
+		if (!list_empty(&sigp->next))
+			return 0;
+	}
+
+	sigaddset(&thread->sigpending, sig);
+	list_add_tail(&sigp->next, sigq);
+
+	return 1;
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_send);
+
+int cobalt_signal_send_pid(pid_t pid, struct cobalt_sigpending *sigp)
+{				/* nklocked, IRQs off */
+	struct cobalt_thread *thread;
+
+	thread = cobalt_thread_find(pid);
+	if (thread)
+		return cobalt_signal_send(thread, sigp, 0);
+
+	return -ESRCH;
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_send_pid);
+
+struct cobalt_sigpending *cobalt_signal_alloc(void)
+{				/* nklocked, IRQs off */
+	struct cobalt_sigpending *sigp;
+
+	if (list_empty(&sigpending_pool)) {
+		if (xnclock_ratelimit())
+			printk(XENO_WARNING "signal bucket pool underflows\n");
+		return NULL;
+	}
+
+	sigp = list_get_entry(&sigpending_pool, struct cobalt_sigpending, next);
+	INIT_LIST_HEAD(&sigp->next);
+
+	return sigp;
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_alloc);
+
+void cobalt_signal_free(struct cobalt_sigpending *sigp)
+{				/* nklocked, IRQs off */
+	if ((void *)sigp >= sigpending_mem &&
+	    (void *)sigp < sigpending_mem + __SIGPOOL_SIZE)
+		list_add_tail(&sigp->next, &sigpending_pool);
+}
+EXPORT_SYMBOL_GPL(cobalt_signal_free);
+
+void cobalt_signal_flush(struct cobalt_thread *thread)
+{
+	struct cobalt_sigpending *sigp, *tmp;
+	struct list_head *sigq;
+	spl_t s;
+	int n;
+
+	/*
+	 * TCB is not accessible from userland anymore, no locking
+	 * required.
+	 */
+	if (sigisemptyset(&thread->sigpending))
+		return;
+
+	for (n = 0; n < _NSIG; n++) {
+		sigq = thread->sigqueues + n;
+		if (list_empty(sigq))
+			continue;
+		/*
+		 * sigpending blocks must be unlinked so that we
+		 * detect this fact when deleting their respective
+		 * owners.
+		 */
+		list_for_each_entry_safe(sigp, tmp, sigq, next) {
+			list_del_init(&sigp->next);
+			if ((void *)sigp >= sigpending_mem &&
+			    (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) {
+				xnlock_get_irqsave(&nklock, s);
+				list_add_tail(&sigp->next, &sigpending_pool);
+				xnlock_put_irqrestore(&nklock, s);
+			}
+		}
+	}
+
+	sigemptyset(&thread->sigpending);
+}
+
+static int signal_put_siginfo(void __user *u_si, const struct siginfo *si,
+			      int overrun)
+{
+	struct siginfo __user *u_p = u_si;
+	int ret;
+
+	ret = __xn_put_user(si->si_signo, &u_p->si_signo);
+	ret |= __xn_put_user(si->si_errno, &u_p->si_errno);
+	ret |= __xn_put_user(si->si_code, &u_p->si_code);
+
+	/*
+	 * Copy the generic/standard siginfo bits to userland.
+	 */
+	switch (si->si_code) {
+	case SI_TIMER:
+		ret |= __xn_put_user(si->si_tid, &u_p->si_tid);
+		ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr);
+		ret |= __xn_put_user(overrun, &u_p->si_overrun);
+		break;
+	case SI_QUEUE:
+	case SI_MESGQ:
+		ret |= __xn_put_user(si->si_ptr, &u_p->si_ptr);
+		fallthrough;
+	case SI_USER:
+		ret |= __xn_put_user(si->si_pid, &u_p->si_pid);
+		ret |= __xn_put_user(si->si_uid, &u_p->si_uid);
+	}
+
+	return ret;
+}
+
+static int signal_wait(sigset_t *set, xnticks_t timeout,
+		       void __user *u_si, bool compat)
+{
+	struct cobalt_sigpending *sigp = NULL;
+	struct cobalt_sigwait_context swc;
+	struct cobalt_thread *curr;
+	int ret, sig, n, overrun;
+	unsigned long *p, *t, m;
+	struct siginfo si, *sip;
+	struct list_head *sigq;
+	spl_t s;
+
+	curr = cobalt_current_thread();
+	XENO_BUG_ON(COBALT, curr == NULL);
+
+	if (u_si && !access_wok(u_si, sizeof(*u_si)))
+		return -EFAULT;
+
+	xnlock_get_irqsave(&nklock, s);
+
+check:
+	if (sigisemptyset(&curr->sigpending))
+		/* Most common/fast path. */
+		goto wait;
+
+	p = curr->sigpending.sig; /* pending */
+	t = set->sig;		  /* tested */
+
+	for (n = 0, sig = 0; n < _NSIG_WORDS; ++n) {
+		m = *p++ & *t++;
+		if (m == 0)
+			continue;
+		sig = ffz(~m) +  n *_NSIG_BPW + 1;
+		break;
+	}
+
+	if (sig) {
+		sigq = curr->sigqueues + sig - 1;
+		if (list_empty(sigq)) {
+			sigdelset(&curr->sigpending, sig);
+			goto check;
+		}
+		sigp = list_get_entry(sigq, struct cobalt_sigpending, next);
+		INIT_LIST_HEAD(&sigp->next); /* Mark sigp as unlinked. */
+		if (list_empty(sigq))
+			sigdelset(&curr->sigpending, sig);
+		sip = &sigp->si;
+		ret = 0;
+		goto done;
+	}
+
+wait:
+	if (timeout == XN_NONBLOCK) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+	swc.set = set;
+	swc.si = &si;
+	xnthread_prepare_wait(&swc.wc);
+	list_add_tail(&curr->signext, &curr->process->sigwaiters);
+	ret = xnsynch_sleep_on(&curr->sigwait, timeout, XN_RELATIVE);
+	if (ret) {
+		list_del(&curr->signext);
+		ret = ret & XNBREAK ? -EINTR : -EAGAIN;
+		goto fail;
+	}
+	sig = si.si_signo;
+	sip = &si;
+done:
+	 /*
+	  * si_overrun raises a nasty issue since we have to
+	  * collect+clear it atomically before we drop the lock,
+	  * although we don't know in advance if any extension would
+	  * use it along with the additional si_codes it may provide,
+	  * but we must drop the lock before running the
+	  * signal_copyinfo handler.
+	  *
+	  * Observing that si_overrun is likely the only "unstable"
+	  * data from the signal information which might change under
+	  * our feet while we copy the bits to userland, we collect it
+	  * here from the atomic section for all unknown si_codes,
+	  * then pass its value to the signal_copyinfo handler.
+	  */
+	switch (sip->si_code) {
+	case SI_TIMER:
+		overrun = cobalt_timer_deliver(curr, sip->si_tid);
+		break;
+	case SI_USER:
+	case SI_MESGQ:
+	case SI_QUEUE:
+		overrun = 0;
+		break;
+	default:
+		overrun = sip->si_overrun;
+		if (overrun)
+			sip->si_overrun = 0;
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (u_si == NULL)
+		goto out;	/* Return signo only. */
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (compat) {
+		ret = sys32_put_siginfo(u_si, sip, overrun);
+		if (!ret)
+			/* Allow an extended target to receive more data. */
+			cobalt_call_extension(signal_copyinfo_compat,
+					      &curr->extref, ret, u_si, sip,
+					      overrun);
+	} else
+#endif
+	{
+		ret = signal_put_siginfo(u_si, sip, overrun);
+		if (!ret)
+			/* Allow an extended target to receive more data. */
+			cobalt_call_extension(signal_copyinfo, &curr->extref,
+					      ret, u_si, sip, overrun);
+	}
+
+out:
+	/*
+	 * If we pulled the signal information from a sigpending
+	 * block, release it to the free pool if applicable.
+	 */
+	if (sigp &&
+	    (void *)sigp >= sigpending_mem &&
+	    (void *)sigp < sigpending_mem + __SIGPOOL_SIZE) {
+		xnlock_get_irqsave(&nklock, s);
+		list_add_tail(&sigp->next, &sigpending_pool);
+		xnlock_put_irqrestore(&nklock, s);
+		/* no more ref. to sigp beyond this point. */
+	}
+
+	return ret ? -EFAULT : sig;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sigwait(sigset_t *set)
+{
+	return signal_wait(set, XN_INFINITE, NULL, false);
+}
+
+COBALT_SYSCALL(sigwait, primary,
+	       (const sigset_t __user *u_set, int __user *u_sig))
+{
+	sigset_t set;
+	int sig;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	sig = signal_wait(&set, XN_INFINITE, NULL, false);
+	if (sig < 0)
+		return sig;
+
+	return cobalt_copy_to_user(u_sig, &sig, sizeof(*u_sig));
+}
+
+int __cobalt_sigtimedwait(sigset_t *set,
+			  const struct timespec64 *timeout,
+			  void __user *u_si,
+			  bool compat)
+{
+	xnticks_t ticks;
+
+	if (!timespec64_valid(timeout))
+		return -EINVAL;
+	ticks = ts2ns(timeout);
+	if (ticks++ == 0)
+		ticks = XN_NONBLOCK;
+
+	return signal_wait(set, ticks, u_si, compat);
+}
+
+COBALT_SYSCALL(sigtimedwait, nonrestartable,
+	       (const sigset_t __user *u_set,
+		struct siginfo __user *u_si,
+		const struct __user_old_timespec __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	if (cobalt_copy_from_user(&timeout, u_timeout, sizeof(timeout)))
+		return -EFAULT;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, false);
+}
+
+COBALT_SYSCALL(sigtimedwait64, nonrestartable,
+	       (const sigset_t __user *u_set,
+		struct siginfo __user *u_si,
+		const struct __kernel_timespec __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	if (cobalt_get_timespec64(&timeout, u_timeout))
+		return -EFAULT;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, false);
+}
+
+int __cobalt_sigwaitinfo(sigset_t *set,
+			 void __user *u_si,
+			 bool compat)
+{
+	return signal_wait(set, XN_INFINITE, u_si, compat);
+}
+
+COBALT_SYSCALL(sigwaitinfo, nonrestartable,
+	       (const sigset_t __user *u_set, struct siginfo __user *u_si))
+{
+	sigset_t set;
+
+	if (cobalt_copy_from_user(&set, u_set, sizeof(set)))
+		return -EFAULT;
+
+	return __cobalt_sigwaitinfo(&set, u_si, false);
+}
+
+COBALT_SYSCALL(sigpending, primary, (old_sigset_t __user *u_set))
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+
+	return cobalt_copy_to_user(u_set, &curr->sigpending, sizeof(*u_set));
+}
+
+int __cobalt_kill(struct cobalt_thread *thread, int sig, int group) /* nklocked, IRQs off */
+{
+	struct cobalt_sigpending *sigp;
+	int ret = 0;
+
+	/*
+	 * We have undocumented pseudo-signals to suspend/resume/unblock
+	 * threads, force them out of primary mode or even demote them
+	 * to the weak scheduling class/priority. Process them early,
+	 * before anyone can notice...
+	 */
+	switch(sig) {
+	case 0:
+		/* Check for existence only. */
+		break;
+	case SIGSUSP:
+		/*
+		 * All callers shall be tagged as conforming calls, so
+		 * self-directed suspension can only happen from
+		 * primary mode. Yummie.
+		 */
+		xnthread_suspend(&thread->threadbase, XNSUSP,
+				 XN_INFINITE, XN_RELATIVE, NULL);
+		if (&thread->threadbase == xnthread_current() &&
+		    xnthread_test_info(&thread->threadbase, XNBREAK))
+			ret = -EINTR;
+		break;
+	case SIGRESM:
+		xnthread_resume(&thread->threadbase, XNSUSP);
+		goto resched;
+	case SIGRELS:
+		xnthread_unblock(&thread->threadbase);
+		goto resched;
+	case SIGKICK:
+		xnthread_kick(&thread->threadbase);
+		goto resched;
+	case SIGDEMT:
+		xnthread_demote(&thread->threadbase);
+		goto resched;
+	case 1 ... _NSIG:
+		sigp = cobalt_signal_alloc();
+		if (sigp) {
+			sigp->si.si_signo = sig;
+			sigp->si.si_errno = 0;
+			sigp->si.si_code = SI_USER;
+			sigp->si.si_pid = task_pid_nr(current);
+			sigp->si.si_uid = get_current_uuid();
+			if (cobalt_signal_send(thread, sigp, group) <= 0)
+				cobalt_signal_free(sigp);
+		}
+	resched:
+		xnsched_run();
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL(kill, conforming, (pid_t pid, int sig))
+{
+	struct cobalt_thread *thread;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	thread = cobalt_thread_find(pid);
+	if (thread == NULL)
+		ret = -ESRCH;
+	else
+		ret = __cobalt_kill(thread, sig, 1);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value)
+{
+	struct cobalt_sigpending *sigp;
+	struct cobalt_thread *thread;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	thread = cobalt_thread_find(pid);
+	if (thread == NULL) {
+		ret = -ESRCH;
+		goto out;
+	}
+
+	switch(sig) {
+	case 0:
+		/* Check for existence only. */
+		break;
+	case 1 ... _NSIG:
+		sigp = cobalt_signal_alloc();
+		if (sigp) {
+			sigp->si.si_signo = sig;
+			sigp->si.si_errno = 0;
+			sigp->si.si_code = SI_QUEUE;
+			sigp->si.si_pid = task_pid_nr(current);
+			sigp->si.si_uid = get_current_uuid();
+			sigp->si.si_value = *value;
+			if (cobalt_signal_send(thread, sigp, 1) <= 0)
+				cobalt_signal_free(sigp);
+			else
+				xnsched_run();
+		}
+		break;
+	default:
+		/* Cobalt pseudo-signals are never process-directed. */
+		ret = __cobalt_kill(thread, sig, 0);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__cobalt_sigqueue);
+
+COBALT_SYSCALL(sigqueue, conforming,
+	       (pid_t pid, int sig, const union sigval __user *u_value))
+{
+	union sigval val;
+	int ret;
+
+	ret = cobalt_copy_from_user(&val, u_value, sizeof(val));
+
+	return ret ?: __cobalt_sigqueue(pid, sig, &val);
+}
+
+__init int cobalt_signal_init(void)
+{
+	struct cobalt_sigpending *sigp;
+
+	sigpending_mem = xnheap_vmalloc(__SIGPOOL_SIZE);
+	if (sigpending_mem == NULL)
+		return -ENOMEM;
+
+	for (sigp = sigpending_mem;
+	     (void *)sigp < sigpending_mem + __SIGPOOL_SIZE; sigp++)
+		list_add_tail(&sigp->next, &sigpending_pool);
+
+	return 0;
+}
+
+__init void cobalt_signal_cleanup(void)
+{
+	xnheap_vfree(sigpending_mem);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h
new file mode 100644
index 0000000..0b5d11e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/signal.h
@@ -0,0 +1,120 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SIGNAL_H
+#define _COBALT_POSIX_SIGNAL_H
+
+#include <linux/signal.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/list.h>
+#include <cobalt/uapi/signal.h>
+#include <xenomai/posix/syscall.h>
+
+struct cobalt_thread;
+
+struct cobalt_sigpending {
+	struct siginfo si;
+	struct list_head next;
+};
+
+static inline
+void cobalt_copy_siginfo(int code,
+			 struct siginfo *__restrict__ dst,
+			 const struct siginfo *__restrict__ src)
+{
+	dst->si_signo = src->si_signo;
+	dst->si_errno = src->si_errno;
+	dst->si_code = code;
+
+	switch (code) {
+	case SI_TIMER:
+		dst->si_tid = src->si_tid;
+		dst->si_overrun = src->si_overrun;
+		dst->si_value = src->si_value;
+		break;
+	case SI_QUEUE:
+	case SI_MESGQ:
+		dst->si_value = src->si_value;
+		fallthrough;
+	case SI_USER:
+		dst->si_pid = src->si_pid;
+		dst->si_uid = src->si_uid;
+	}
+}
+
+int __cobalt_sigwait(sigset_t *set);
+
+int __cobalt_sigtimedwait(sigset_t *set,
+			  const struct timespec64 *timeout,
+			  void __user *u_si,
+			  bool compat);
+
+int __cobalt_sigwaitinfo(sigset_t *set,
+			 void __user *u_si,
+			 bool compat);
+
+int __cobalt_sigqueue(pid_t pid, int sig, const union sigval *value);
+
+int cobalt_signal_send(struct cobalt_thread *thread,
+		       struct cobalt_sigpending *sigp,
+		       int group);
+
+int cobalt_signal_send_pid(pid_t pid,
+			   struct cobalt_sigpending *sigp);
+
+struct cobalt_sigpending *cobalt_signal_alloc(void);
+
+void cobalt_signal_free(struct cobalt_sigpending *sigp);
+
+void cobalt_signal_flush(struct cobalt_thread *thread);
+
+int cobalt_signal_wait(sigset_t *set, struct siginfo *si,
+		       xnticks_t timeout, xntmode_t tmode);
+
+int __cobalt_kill(struct cobalt_thread *thread,
+		  int sig, int group);
+
+COBALT_SYSCALL_DECL(sigwait,
+		    (const sigset_t __user *u_set, int __user *u_sig));
+
+COBALT_SYSCALL_DECL(sigtimedwait,
+		    (const sigset_t __user *u_set,
+		     struct siginfo __user *u_si,
+		     const struct __user_old_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(sigtimedwait64,
+		    (const sigset_t __user *u_set,
+		     struct siginfo __user *u_si,
+		     const struct __kernel_timespec __user *u_timeout));
+
+COBALT_SYSCALL_DECL(sigwaitinfo,
+		    (const sigset_t __user *u_set,
+		     struct siginfo __user *u_si));
+
+COBALT_SYSCALL_DECL(sigpending,
+		    (old_sigset_t __user *u_set));
+
+COBALT_SYSCALL_DECL(kill, (pid_t pid, int sig));
+
+COBALT_SYSCALL_DECL(sigqueue,
+		    (pid_t pid, int sig, const union sigval __user *u_value));
+
+int cobalt_signal_init(void);
+
+void cobalt_signal_cleanup(void);
+
+#endif /* !_COBALT_POSIX_SIGNAL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c
new file mode 100644
index 0000000..46c4998
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>
+ * Copyright (C) 2005 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <linux/sched.h>
+#include <linux/kconfig.h>
+#include <linux/unistd.h>
+#include <cobalt/uapi/corectl.h>
+#include <cobalt/kernel/tree.h>
+#include <cobalt/kernel/vdso.h>
+#include <cobalt/kernel/init.h>
+#include <pipeline/kevents.h>
+#include <pipeline/vdso_fallback.h>
+#include <asm/syscall.h>
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "mutex.h"
+#include "cond.h"
+#include "mqueue.h"
+#include "sem.h"
+#include "signal.h"
+#include "timer.h"
+#include "monitor.h"
+#include "clock.h"
+#include "event.h"
+#include "timerfd.h"
+#include "io.h"
+#include "corectl.h"
+#include "../debug.h"
+#include <trace/events/cobalt-posix.h>
+
+/* Syscall must run into the Linux domain. */
+#define __xn_exec_lostage    0x1
+/* Syscall must run into the Xenomai domain. */
+#define __xn_exec_histage    0x2
+/* Shadow syscall: caller must be mapped. */
+#define __xn_exec_shadow     0x4
+/* Switch back toggle; caller must return to its original mode. */
+#define __xn_exec_switchback 0x8
+/* Exec in current domain. */
+#define __xn_exec_current    0x10
+/* Exec in conforming domain, Xenomai or Linux. */
+#define __xn_exec_conforming 0x20
+/* Attempt syscall restart in the opposite domain upon -ENOSYS. */
+#define __xn_exec_adaptive   0x40
+/* Do not restart syscall upon signal receipt. */
+#define __xn_exec_norestart  0x80
+/* Shorthand for shadow init syscall. */
+#define __xn_exec_init       __xn_exec_lostage
+/* Shorthand for shadow syscall in Xenomai space. */
+#define __xn_exec_primary   (__xn_exec_shadow|__xn_exec_histage)
+/* Shorthand for shadow syscall in Linux space. */
+#define __xn_exec_secondary (__xn_exec_shadow|__xn_exec_lostage)
+/* Shorthand for syscall in Linux space with switchback if shadow. */
+#define __xn_exec_downup    (__xn_exec_lostage|__xn_exec_switchback)
+/* Shorthand for non-restartable primary syscall. */
+#define __xn_exec_nonrestartable (__xn_exec_primary|__xn_exec_norestart)
+/* Domain probing syscall starting in conforming mode. */
+#define __xn_exec_probing   (__xn_exec_conforming|__xn_exec_adaptive)
+/* Hand over mode selection to syscall.  */
+#define __xn_exec_handover  (__xn_exec_current|__xn_exec_adaptive)
+
+typedef long (*cobalt_syshand)(unsigned long arg1, unsigned long arg2,
+			       unsigned long arg3, unsigned long arg4,
+			       unsigned long arg5);
+
+static void prepare_for_signal(struct task_struct *p,
+			       struct xnthread *thread,
+			       struct pt_regs *regs,
+			       int sysflags)
+{
+	int notify = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_info(thread, XNKICKED)) {
+		if (signal_pending(p)) {
+			__xn_error_return(regs,
+					  (sysflags & __xn_exec_norestart) ?
+					  -EINTR : -ERESTARTSYS);
+			notify = !xnthread_test_state(thread, XNSSTEP);
+			xnthread_clear_info(thread, XNBREAK);
+		}
+		xnthread_clear_info(thread, XNKICKED);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnthread_test_cancel();
+
+	xnthread_relax(notify, SIGDEBUG_MIGRATE_SIGNAL);
+}
+
+static COBALT_SYSCALL(migrate, current, (int domain))
+{
+	struct xnthread *thread = xnthread_current();
+
+	if (is_secondary_domain()) {
+		if (domain == COBALT_PRIMARY) {
+			if (thread == NULL)
+				return -EPERM;
+			/*
+			 * Paranoid: a corner case where userland
+			 * fiddles with SIGSHADOW while the target
+			 * thread is still waiting to be started.
+			 */
+			if (xnthread_test_state(thread, XNDORMANT))
+				return 0;
+
+			return xnthread_harden() ? : 1;
+		}
+		return 0;
+	}
+
+	/* We are running on the head stage, apply relax request. */
+	if (domain == COBALT_SECONDARY) {
+		xnthread_relax(0, 0);
+		return 1;
+	}
+
+	return 0;
+}
+
+static COBALT_SYSCALL(trace, current,
+		      (int op, unsigned long a1,
+		       unsigned long a2, unsigned long a3))
+{
+	int ret = -EINVAL;
+
+	switch (op) {
+	case __xntrace_op_max_begin:
+		ret = xntrace_max_begin(a1);
+		break;
+
+	case __xntrace_op_max_end:
+		ret = xntrace_max_end(a1);
+		break;
+
+	case __xntrace_op_max_reset:
+		ret = xntrace_max_reset();
+		break;
+
+	case __xntrace_op_user_start:
+		ret = xntrace_user_start();
+		break;
+
+	case __xntrace_op_user_stop:
+		ret = xntrace_user_stop(a1);
+		break;
+
+	case __xntrace_op_user_freeze:
+		ret = xntrace_user_freeze(a1, a2);
+		break;
+
+	case __xntrace_op_special:
+		ret = xntrace_special(a1 & 0xFF, a2);
+		break;
+
+	case __xntrace_op_special_u64:
+		ret = xntrace_special_u64(a1 & 0xFF,
+					  (((u64) a2) << 32) | a3);
+		break;
+
+	case __xntrace_op_latpeak_freeze:
+		xntrace_latpeak_freeze(a1);
+		ret = 0;
+		break;
+
+	}
+	return ret;
+}
+
+static COBALT_SYSCALL(ftrace_puts, current,
+		      (const char __user *str))
+{
+	char buf[256];
+	unsigned len;
+
+	len = cobalt_strncpy_from_user(buf, str, sizeof(buf));
+	if (len < 0)
+		return -EFAULT;
+
+#ifdef CONFIG_TRACING
+	__trace_puts(_THIS_IP_, buf, len);
+#endif
+
+	return 0;
+}
+
+static COBALT_SYSCALL(archcall, current,
+		      (unsigned long a1, unsigned long a2,
+		       unsigned long a3, unsigned long a4,
+		       unsigned long a5))
+{
+	return xnarch_local_syscall(a1, a2, a3, a4, a5);
+}
+
+static COBALT_SYSCALL(get_current, current,
+		      (xnhandle_t __user *u_handle))
+{
+	struct xnthread *cur = xnthread_current();
+
+	if (cur == NULL)
+		return -EPERM;
+
+	return cobalt_copy_to_user(u_handle, &cur->handle,
+				      sizeof(*u_handle));
+}
+
+static COBALT_SYSCALL(backtrace, lostage,
+		      (int nr, unsigned long __user *u_backtrace, int reason))
+{
+	unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+	int ret;
+
+	/*
+	 * In case backtrace() in userland is broken or fails. We may
+	 * want to know about this in kernel space however, for future
+	 * use.
+	 */
+	if (nr <= 0)
+		return 0;
+	/*
+	 * We may omit the older frames if we can't store the full
+	 * backtrace.
+	 */
+	if (nr > SIGSHADOW_BACKTRACE_DEPTH)
+		nr = SIGSHADOW_BACKTRACE_DEPTH;
+	/*
+	 * Fetch the backtrace array, filled with PC values as seen
+	 * from the relaxing thread in user-space. This can't fail
+	 */
+	ret = cobalt_copy_from_user(backtrace, u_backtrace, nr * sizeof(long));
+	if (ret)
+		return ret;
+
+	xndebug_trace_relax(nr, backtrace, reason);
+
+	return 0;
+}
+
+static COBALT_SYSCALL(serialdbg, current,
+		      (const char __user *u_msg, int len))
+{
+	char buf[128];
+	int n;
+
+	while (len > 0) {
+		n = len;
+		if (n > sizeof(buf))
+			n = sizeof(buf);
+		if (cobalt_copy_from_user(buf, u_msg, n))
+			return -EFAULT;
+		raw_printk("%.*s", n, buf);
+		u_msg += n;
+		len -= n;
+	}
+
+	return 0;
+}
+
+static void stringify_feature_set(unsigned long fset, char *buf, int size)
+{
+	unsigned long feature;
+	int nc, nfeat;
+
+	*buf = '\0';
+
+	for (feature = 1, nc = nfeat = 0; fset != 0 && size > 0; feature <<= 1) {
+		if (fset & feature) {
+			nc = ksformat(buf, size, "%s%s",
+				      nfeat > 0 ? " " : "",
+				      get_feature_label(feature));
+			nfeat++;
+			size -= nc;
+			buf += nc;
+			fset &= ~feature;
+		}
+	}
+}
+
+static COBALT_SYSCALL(bind, lostage,
+		      (struct cobalt_bindreq __user *u_breq))
+{
+	unsigned long featreq, featmis;
+	struct cobalt_bindreq breq;
+	struct cobalt_featinfo *f;
+	int abirev;
+
+	if (cobalt_copy_from_user(&breq, u_breq, sizeof(breq)))
+		return -EFAULT;
+
+	f = &breq.feat_ret;
+	featreq = breq.feat_req;
+	if (!realtime_core_running() && (featreq & __xn_feat_control) == 0)
+		return -EAGAIN;
+
+	/*
+	 * Calculate the missing feature set:
+	 * kernel_unavailable_set & user_mandatory_set.
+	 */
+	featmis = (~XENOMAI_FEAT_DEP & (featreq & XENOMAI_FEAT_MAN));
+	abirev = breq.abi_rev;
+
+	/*
+	 * Pass back the supported feature set and the ABI revision
+	 * level to user-space.
+	 */
+	f->feat_all = XENOMAI_FEAT_DEP;
+	stringify_feature_set(XENOMAI_FEAT_DEP, f->feat_all_s,
+			      sizeof(f->feat_all_s));
+	f->feat_man = featreq & XENOMAI_FEAT_MAN;
+	stringify_feature_set(f->feat_man, f->feat_man_s,
+			      sizeof(f->feat_man_s));
+	f->feat_mis = featmis;
+	stringify_feature_set(featmis, f->feat_mis_s,
+			      sizeof(f->feat_mis_s));
+	f->feat_req = featreq;
+	stringify_feature_set(featreq, f->feat_req_s,
+			      sizeof(f->feat_req_s));
+	f->feat_abirev = XENOMAI_ABI_REV;
+	collect_arch_features(f);
+
+	pipeline_collect_features(f);
+	f->vdso_offset = cobalt_umm_offset(&cobalt_ppd_get(1)->umm, nkvdso);
+
+	if (cobalt_copy_to_user(u_breq, &breq, sizeof(breq)))
+		return -EFAULT;
+
+	/*
+	 * If some mandatory features the user-space code relies on
+	 * are missing at kernel level, we cannot go further.
+	 */
+	if (featmis)
+		return -EINVAL;
+
+	if (!check_abi_revision(abirev))
+		return -ENOEXEC;
+
+	return cobalt_bind_core(featreq);
+}
+
+static COBALT_SYSCALL(extend, lostage, (unsigned int magic))
+{
+	return cobalt_bind_personality(magic);
+}
+
+static int CoBaLt_ni(void)
+{
+	return -ENOSYS;
+}
+
+/*
+ * We have a single syscall table for all ABI models, i.e. 64bit
+ * native + 32bit emulation) or plain 32bit.
+ *
+ * The syscall table is set up in a single step, based on three
+ * subsequent sources of initializers:
+ *
+ * - first, all syscall entries are defaulted to a placeholder
+ * returning -ENOSYS (__COBALT_CALL_NI), as the table may be sparse.
+ *
+ * - then __COBALT_CALL_ENTRY() produces a native call entry
+ * (e.g. pure 64bit call handler for a 64bit architecture, 32bit
+ * handler for a 32bit architecture), optionally followed by a set of
+ * 32bit syscall entries offset by an arch-specific base index, which
+ * default to the native calls. These nitty-gritty details are defined
+ * by <asm/xenomai/syscall32.h>. 32bit architectures - or 64bit ones
+ * for which we don't support any 32bit ABI model - will simply define
+ * __COBALT_CALL32_ENTRY() as an empty macro.
+ *
+ * - finally, 32bit thunk entries are generated by including
+ * <asm/xenomai/syscall32-table.h>, overriding the default handlers
+ * installed during the previous step.
+ *
+ * For instance, with CONFIG_IA32_EMULATION support enabled in an
+ * x86_64 kernel, sc_cobalt_mq_timedreceive would appear twice in the
+ * table, as:
+ *
+ * [sc_cobalt_mq_timedreceive] = CoBaLt_mq_timedreceive,
+ * ...
+ * [sc_cobalt_mq_timedreceive + __COBALT_IA32_BASE] = CoBaLt32emu_mq_timedreceive,
+ *
+ * CoBaLt32emu_mq_timedreceive() would do the required thunking for
+ * dealing with the 32<->64bit conversion of arguments. On the other
+ * hand, sc_cobalt_sched_yield - which do not require any thunk -
+ * would also appear twice, but both entries would point at the native
+ * syscall implementation:
+ *
+ * [sc_cobalt_sched_yield] = CoBaLt_sched_yield,
+ * ...
+ * [sc_cobalt_sched_yield + __COBALT_IA32_BASE] = CoBaLt_sched_yield,
+ *
+ * Accordingly, applications targeting the ia32 model issue syscalls
+ * in the range [__COBALT_IA32_BASE..__COBALT_IA32_BASE +
+ * __NR_COBALT_SYSCALLS-1], whilst native (32/64bit) ones issue
+ * syscalls in the range [0..__NR_COBALT_SYSCALLS-1].
+ *
+ * In short, this is an incremental process where the arch-specific
+ * code can override the 32bit syscall entries, pointing at the thunk
+ * routines it may need for handing 32bit calls over their respective
+ * 64bit implementation.
+ *
+ * By convention, there is NO pure 32bit syscall, which means that
+ * each 32bit syscall defined by a compat ABI interface MUST match a
+ * native (64bit) syscall. This is important as we share the call
+ * modes (i.e. __xn_exec_ bits) between all ABI models.
+ *
+ * --rpm
+ */
+#define __syshand__(__name)	\
+	((cobalt_syshand)(void (*)(void))(CoBaLt_ ## __name))
+
+#define __COBALT_NI	__syshand__(ni)
+
+#define __COBALT_CALL_NI				\
+	[0 ... __NR_COBALT_SYSCALLS-1] = __COBALT_NI,	\
+	__COBALT_CALL32_INITHAND(__COBALT_NI)
+
+#define __COBALT_CALL_NFLAGS				\
+	[0 ... __NR_COBALT_SYSCALLS-1] = 0,		\
+	__COBALT_CALL32_INITMODE(0)
+
+#define __COBALT_CALL_ENTRY(__name)				\
+	[sc_cobalt_ ## __name] = __syshand__(__name),		\
+	__COBALT_CALL32_ENTRY(__name, __syshand__(__name))
+
+#define __COBALT_MODE(__name, __mode)	\
+	[sc_cobalt_ ## __name] = __xn_exec_##__mode,
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+#include "syscall32.h"
+#endif
+
+#include "syscall_entries.h"
+
+static const cobalt_syshand cobalt_syscalls[] = {
+	__COBALT_CALL_NI
+	__COBALT_CALL_ENTRIES
+#ifdef CONFIG_XENO_ARCH_SYS3264
+#include <asm/xenomai/syscall32-table.h>
+#endif
+};
+
+static const int cobalt_sysmodes[] = {
+	__COBALT_CALL_NFLAGS
+	__COBALT_CALL_MODES
+};
+
+static inline int allowed_syscall(struct cobalt_process *process,
+				  struct xnthread *thread,
+				  int sysflags, int nr)
+{
+	if (nr == sc_cobalt_bind)
+		return 1;
+	
+	if (process == NULL)
+		return 0;
+
+	if (thread == NULL && (sysflags & __xn_exec_shadow))
+		return 0;
+
+	return cap_raised(current_cap(), CAP_SYS_NICE);
+}
+
+int handle_head_syscall(bool caller_is_relaxed, struct pt_regs *regs)
+{
+	struct cobalt_process *process;
+	int switched, sigs, sysflags;
+	struct xnthread *thread;
+	cobalt_syshand handler;
+	struct task_struct *p;
+	unsigned long args[6];
+	unsigned int nr, code;
+	long ret;
+
+	if (!__xn_syscall_p(regs))
+		goto linux_syscall;
+
+	thread = xnthread_current();
+	code = __xn_syscall(regs);
+	if (code >= ARRAY_SIZE(cobalt_syscalls))
+		goto bad_syscall;
+
+	nr = code & (__NR_COBALT_SYSCALLS - 1);
+
+	trace_cobalt_head_sysentry(code);
+
+	process = cobalt_current_process();
+	if (process == NULL) {
+		process = cobalt_search_process(current->mm);
+		cobalt_set_process(process);
+	}
+
+	handler = cobalt_syscalls[code];
+	sysflags = cobalt_sysmodes[nr];
+
+	/*
+	 * Executing Cobalt services requires CAP_SYS_NICE, except for
+	 * sc_cobalt_bind which does its own checks.
+	 */
+	if (unlikely(!allowed_syscall(process, thread, sysflags, nr))) {
+		/*
+		 * Exclude get_current from reporting, it is used to probe the
+		 * execution context.
+		 */
+		if (XENO_DEBUG(COBALT) && nr != sc_cobalt_get_current)
+			printk(XENO_WARNING
+			       "syscall <%d> denied to %s[%d]\n",
+			       nr, current->comm, task_pid_nr(current));
+		__xn_error_return(regs, -EPERM);
+		goto ret_handled;
+	}
+
+	if (sysflags & __xn_exec_conforming)
+		/*
+		 * If the conforming exec bit is set, turn the exec
+		 * bitmask for the syscall into the most appropriate
+		 * setup for the caller, i.e. Xenomai domain for
+		 * shadow threads, Linux otherwise.
+		 */
+		sysflags |= (thread ? __xn_exec_histage : __xn_exec_lostage);
+
+	/*
+	 * Here we have to dispatch the syscall execution properly,
+	 * depending on:
+	 *
+	 * o Whether the syscall must be run into the Linux or Xenomai
+	 * domain, or indifferently in the current Xenomai domain.
+	 *
+	 * o Whether the caller currently runs in the Linux or Xenomai
+	 * domain.
+	 */
+restart:
+	/*
+	 * Process adaptive syscalls by restarting them in the
+	 * opposite domain upon receiving -ENOSYS from the syscall
+	 * handler.
+	 */
+	switched = 0;
+	if (sysflags & __xn_exec_lostage) {
+		/*
+		 * The syscall must run from the Linux domain.
+		 */
+		if (!caller_is_relaxed) {
+			/*
+			 * Request originates from the Xenomai domain:
+			 * relax the caller then invoke the syscall
+			 * handler right after.
+			 */
+			xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+			switched = 1;
+		} else
+			/*
+			 * Request originates from the Linux domain:
+			 * propagate the event to our Linux-based
+			 * handler, so that the syscall is executed
+			 * from there.
+			 */
+			return KEVENT_PROPAGATE;
+	} else if (sysflags & (__xn_exec_histage | __xn_exec_current)) {
+		/*
+		 * Syscall must run either from the Xenomai domain, or
+		 * from the calling domain.
+		 *
+		 * If the request originates from the Linux domain,
+		 * hand it over to our secondary-mode dispatcher.
+		 * Otherwise, invoke the syscall handler immediately.
+		 */
+		if (caller_is_relaxed)
+			return KEVENT_PROPAGATE;
+	}
+
+	/*
+	 * 'thread' has to be valid from that point: all syscalls
+	 * regular threads may call have been pipelined to the root
+	 * handler (lostage ones), or rejected by allowed_syscall().
+	 */
+
+	p = current;
+	pipeline_get_syscall_args(p, regs, args);
+
+	ret = handler(args[0], args[1], args[2], args[3], args[4]);
+	if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) {
+		if (switched) {
+			ret = xnthread_harden();
+			if (ret) {
+				switched = 0;
+				goto done;
+			}
+		} else /* Mark the primary -> secondary transition. */
+			xnthread_set_localinfo(thread, XNDESCENT);
+		sysflags ^=
+		    (__xn_exec_lostage | __xn_exec_histage |
+		     __xn_exec_adaptive);
+		goto restart;
+	}
+done:
+	__xn_status_return(regs, ret);
+	sigs = 0;
+	if (!xnsched_root_p()) {
+		if (signal_pending(p) ||
+		    xnthread_test_info(thread, XNKICKED)) {
+			sigs = 1;
+			prepare_for_signal(p, thread, regs, sysflags);
+		} else if (xnthread_test_state(thread, XNWEAK) &&
+			   thread->res_count == 0) {
+			if (switched)
+				switched = 0;
+			else
+				xnthread_relax(0, 0);
+		}
+	}
+	if (!sigs && (sysflags & __xn_exec_switchback) && switched)
+		/* -EPERM will be trapped later if needed. */
+		xnthread_harden();
+
+ret_handled:
+	/* Update the stats and userland-visible state. */
+	if (thread) {
+		xnthread_clear_localinfo(thread, XNDESCENT);
+		xnstat_counter_inc(&thread->stat.xsc);
+		xnthread_sync_window(thread);
+	}
+
+	trace_cobalt_head_sysexit(__xn_reg_rval(regs));
+
+	return KEVENT_STOP;
+
+linux_syscall:
+	if (xnsched_root_p())
+		/*
+		 * The call originates from the Linux domain, either
+		 * from a relaxed shadow or from a regular Linux task;
+		 * just propagate the event so that we will fall back
+		 * to handle_root_syscall().
+		 */
+		return KEVENT_PROPAGATE;
+
+	if (!__xn_rootcall_p(regs, &code))
+		goto bad_syscall;
+
+	if (pipeline_handle_vdso_fallback(code, regs))
+		return KEVENT_STOP;
+
+	/*
+	 * We know this is a Cobalt thread since it runs over the head
+	 * domain, however the current syscall should be handled by
+	 * the host kernel instead.  Before this happens, we have to
+	 * re-enter the root domain.
+	 */
+	xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+
+	return KEVENT_PROPAGATE;
+
+bad_syscall:
+	printk(XENO_WARNING "bad syscall <%#x>\n", code);
+
+	__xn_error_return(regs, -ENOSYS);
+
+	return KEVENT_STOP;
+}
+
+int handle_root_syscall(struct pt_regs *regs)
+{
+	int sysflags, switched, sigs;
+	struct xnthread *thread;
+	cobalt_syshand handler;
+	struct task_struct *p;
+	unsigned long args[6];
+	unsigned int nr, code;
+	long ret;
+
+	/*
+	 * Catch cancellation requests pending for user shadows
+	 * running mostly in secondary mode, i.e. XNWEAK. In that
+	 * case, we won't run prepare_for_signal() that frequently, so
+	 * check for cancellation here.
+	 */
+	xnthread_test_cancel();
+
+	if (!__xn_syscall_p(regs))
+		/* Fall back to Linux syscall handling. */
+		return KEVENT_PROPAGATE;
+
+	thread = xnthread_current();
+	/* code has already been checked in the head domain handler. */
+	code = __xn_syscall(regs);
+	nr = code & (__NR_COBALT_SYSCALLS - 1);
+
+	trace_cobalt_root_sysentry(code);
+
+	/* Processing a Xenomai syscall. */
+
+	handler = cobalt_syscalls[code];
+	sysflags = cobalt_sysmodes[nr];
+
+	if (thread && (sysflags & __xn_exec_conforming))
+		sysflags |= __xn_exec_histage;
+restart:
+	/*
+	 * Process adaptive syscalls by restarting them in the
+	 * opposite domain upon receiving -ENOSYS from the syscall
+	 * handler.
+	 */
+	switched = 0;
+	if (sysflags & __xn_exec_histage) {
+		/*
+		 * This request originates from the Linux domain but
+		 * should run into the Xenomai domain: harden the
+		 * caller before invoking the syscall handler.
+		 */
+		ret = xnthread_harden();
+		if (ret) {
+			__xn_error_return(regs, ret);
+			goto ret_handled;
+		}
+		switched = 1;
+	} else {
+		/*
+		 * We want to run the syscall in the current Linux
+		 * domain. This is a slow path, so proceed with any
+		 * pending schedparam update on the fly.
+		 */
+		if (thread)
+			xnthread_propagate_schedparam(thread);
+	}
+
+	p = current;
+	pipeline_get_syscall_args(p, regs, args);
+
+	ret = handler(args[0], args[1], args[2], args[3], args[4]);
+	if (ret == -ENOSYS && (sysflags & __xn_exec_adaptive)) {
+		sysflags ^= __xn_exec_histage;
+		if (switched) {
+			xnthread_relax(1, SIGDEBUG_MIGRATE_SYSCALL);
+			sysflags &= ~__xn_exec_adaptive;
+			 /* Mark the primary -> secondary transition. */
+			xnthread_set_localinfo(thread, XNDESCENT);
+		}
+		goto restart;
+	}
+
+	__xn_status_return(regs, ret);
+
+	sigs = 0;
+	if (!xnsched_root_p()) {
+		/*
+		 * We may have gained a shadow TCB from the syscall we
+		 * just invoked, so make sure to fetch it.
+		 */
+		thread = xnthread_current();
+		if (signal_pending(p)) {
+			sigs = 1;
+			prepare_for_signal(p, thread, regs, sysflags);
+		} else if (xnthread_test_state(thread, XNWEAK) &&
+			   thread->res_count == 0)
+			sysflags |= __xn_exec_switchback;
+	}
+	if (!sigs && (sysflags & __xn_exec_switchback)
+	    && (switched || xnsched_primary_p()))
+		xnthread_relax(0, 0);
+
+ret_handled:
+	/* Update the stats and userland-visible state. */
+	if (thread) {
+		xnthread_clear_localinfo(thread, XNDESCENT|XNHICCUP);
+		xnstat_counter_inc(&thread->stat.xsc);
+		xnthread_sync_window(thread);
+	}
+
+	trace_cobalt_root_sysexit(__xn_reg_rval(regs));
+
+	return KEVENT_STOP;
+}
+
+long cobalt_restart_syscall_placeholder(struct restart_block *param)
+{
+	return -EINVAL;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h
new file mode 100644
index 0000000..3a4c98d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SYSCALL_H
+#define _COBALT_POSIX_SYSCALL_H
+
+#include <cobalt/uapi/syscall.h>
+
+struct pt_regs;
+
+/* Regular (native) syscall handler implementation. */
+#define COBALT_SYSCALL(__name, __mode, __args)	\
+	long CoBaLt_ ## __name __args
+
+/* Regular (native) syscall handler declaration. */
+#define COBALT_SYSCALL_DECL(__name, __args)	\
+	long CoBaLt_ ## __name __args
+
+#include <asm/xenomai/syscall32.h>
+
+int handle_head_syscall(bool caller_is_relaxed,
+			struct pt_regs *regs);
+
+int handle_root_syscall(struct pt_regs *regs);
+
+#endif /* !_COBALT_POSIX_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c
new file mode 100644
index 0000000..9be0971
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.c
@@ -0,0 +1,963 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/err.h>
+#include <cobalt/uapi/syscall.h>
+#include <cobalt/kernel/time.h>
+#include <xenomai/rtdm/internal.h>
+#include "internal.h"
+#include "syscall32.h"
+#include "thread.h"
+#include "mutex.h"
+#include "cond.h"
+#include "sem.h"
+#include "sched.h"
+#include "clock.h"
+#include "timer.h"
+#include "timerfd.h"
+#include "signal.h"
+#include "monitor.h"
+#include "event.h"
+#include "mqueue.h"
+#include "io.h"
+#include "../debug.h"
+
+COBALT_SYSCALL32emu(thread_create, init,
+		    (compat_ulong_t pth,
+		     int policy,
+		     const struct compat_sched_param_ex __user *u_param_ex,
+		     int xid,
+		     __u32 __user *u_winoff))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param_ex);
+	if (ret)
+		return ret;
+
+	return __cobalt_thread_create(pth, policy, &param_ex, xid, u_winoff);
+}
+
+COBALT_SYSCALL32emu(thread_setschedparam_ex, conforming,
+		    (compat_ulong_t pth,
+		     int policy,
+		     const struct compat_sched_param_ex __user *u_param_ex,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_thread_setschedparam_ex(pth, policy, &param_ex,
+					      u_winoff, u_promoted);
+}
+
+COBALT_SYSCALL32emu(thread_getschedparam_ex, current,
+		    (compat_ulong_t pth,
+		     int __user *u_policy,
+		     struct compat_sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_thread_getschedparam_ex(pth, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy));
+
+	return ret ?: sys32_put_param_ex(policy, u_param, &param_ex);
+}
+
+COBALT_SYSCALL32emu(thread_setschedprio, conforming,
+		    (compat_ulong_t pth,
+		     int prio,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted))
+{
+	return cobalt_thread_setschedprio(pth, prio, u_winoff, u_promoted);
+}
+
+static inline int sys32_fetch_timeout(struct timespec64 *ts,
+				      const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT :
+		sys32_get_timespec(ts, u_ts);
+}
+
+COBALT_SYSCALL32emu(sem_open, lostage,
+		    (compat_uptr_t __user *u_addrp,
+		     const char __user *u_name,
+		     int oflags, mode_t mode, unsigned int value))
+{
+	struct cobalt_sem_shadow __user *usm;
+	compat_uptr_t cusm;
+
+	if (__xn_get_user(cusm, u_addrp))
+		return -EFAULT;
+
+	usm = __cobalt_sem_open(compat_ptr(cusm), u_name, oflags, mode, value);
+	if (IS_ERR(usm))
+		return PTR_ERR(usm);
+
+	return __xn_put_user(ptr_to_compat(usm), u_addrp) ? -EFAULT : 0;
+}
+
+COBALT_SYSCALL32emu(sem_timedwait, primary,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct old_timespec32 __user *u_ts))
+{
+	int ret = 1;
+	struct timespec64 ts64;
+
+	if (u_ts)
+		ret = sys32_fetch_timeout(&ts64, u_ts);
+
+	return __cobalt_sem_timedwait(u_sem, ret ? NULL : &ts64);
+}
+
+COBALT_SYSCALL32emu(sem_timedwait64, primary,
+		    (struct cobalt_sem_shadow __user *u_sem,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_sem_timedwait64(u_sem, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_getres, current,
+		    (clockid_t clock_id,
+		     struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_getres(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	return u_ts ? sys32_put_timespec(u_ts, &ts) : 0;
+}
+
+COBALT_SYSCALL32emu(clock_getres64, current,
+		    (clockid_t clock_id,
+		     struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_getres64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_gettime, current,
+		    (clockid_t clock_id,
+		     struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = __cobalt_clock_gettime(clock_id, &ts);
+	if (ret)
+		return ret;
+
+	return sys32_put_timespec(u_ts, &ts);
+}
+
+COBALT_SYSCALL32emu(clock_gettime64, current,
+		    (clockid_t clock_id,
+		     struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_gettime64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_settime, current,
+		    (clockid_t clock_id,
+		     const struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts;
+	int ret;
+
+	ret = sys32_get_timespec(&ts, u_ts);
+	if (ret)
+		return ret;
+
+	return __cobalt_clock_settime(clock_id, &ts);
+}
+
+COBALT_SYSCALL32emu(clock_settime64, current,
+		    (clockid_t clock_id,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_clock_settime64(clock_id, u_ts);
+}
+
+COBALT_SYSCALL32emu(clock_adjtime, current,
+		    (clockid_t clock_id, struct old_timex32 __user *u_tx))
+{
+	struct __kernel_timex tx;
+	int ret;
+
+	ret = sys32_get_timex(&tx, u_tx);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_clock_adjtime(clock_id, &tx);
+	if (ret)
+		return ret;
+
+	return sys32_put_timex(u_tx, &tx);
+}
+
+COBALT_SYSCALL32emu(clock_adjtime64, current,
+		    (clockid_t clock_id, struct __kernel_timex __user *u_tx))
+{
+	return __cobalt_clock_adjtime64(clock_id, u_tx);
+}
+
+
+COBALT_SYSCALL32emu(clock_nanosleep, primary,
+		    (clockid_t clock_id, int flags,
+		     const struct old_timespec32 __user *u_rqt,
+		     struct old_timespec32 __user *u_rmt))
+{
+	struct timespec64 rqt, rmt, *rmtp = NULL;
+	int ret;
+
+	if (u_rmt)
+		rmtp = &rmt;
+
+	ret = sys32_get_timespec(&rqt, u_rqt);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_clock_nanosleep(clock_id, flags, &rqt, rmtp);
+	if (ret == -EINTR && flags == 0 && rmtp)
+		ret = sys32_put_timespec(u_rmt, rmtp);
+
+	return ret;
+}
+
+COBALT_SYSCALL32emu(clock_nanosleep64, nonrestartable,
+		    (clockid_t clock_id, int flags,
+		     const struct __kernel_timespec __user *u_rqt,
+		     struct __kernel_timespec __user *u_rmt))
+{
+	return __cobalt_clock_nanosleep64(clock_id, flags, u_rqt, u_rmt);
+}
+
+
+COBALT_SYSCALL32emu(mutex_timedlock, primary,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct old_timespec32 __user *u_ts))
+{
+	return __cobalt_mutex_timedlock_break(u_mx, u_ts, sys32_fetch_timeout);
+}
+
+COBALT_SYSCALL32emu(mutex_timedlock64, primary,
+		    (struct cobalt_mutex_shadow __user *u_mx,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mutex_timedlock64(u_mx, u_ts);
+}
+
+COBALT_SYSCALL32emu(cond_wait_prologue, nonrestartable,
+		    (struct cobalt_cond_shadow __user *u_cnd,
+		     struct cobalt_mutex_shadow __user *u_mx,
+		     int *u_err,
+		     unsigned int timed,
+		     struct old_timespec32 __user *u_ts))
+{
+	return __cobalt_cond_wait_prologue(u_cnd, u_mx, u_err, u_ts,
+					   timed ? sys32_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL32emu(mq_open, lostage,
+		    (const char __user *u_name, int oflags,
+		     mode_t mode, struct compat_mq_attr __user *u_attr))
+{
+	struct mq_attr _attr, *attr = &_attr;
+	int ret;
+
+	if ((oflags & O_CREAT) && u_attr) {
+		ret = sys32_get_mqattr(&_attr, u_attr);
+		if (ret)
+			return ret;
+	} else
+		attr = NULL;
+
+	return __cobalt_mq_open(u_name, oflags, mode, attr);
+}
+
+COBALT_SYSCALL32emu(mq_getattr, current,
+		    (mqd_t uqd, struct compat_mq_attr __user *u_attr))
+{
+	struct mq_attr attr;
+	int ret;
+
+	ret = __cobalt_mq_getattr(uqd, &attr);
+	if (ret)
+		return ret;
+
+	return sys32_put_mqattr(u_attr, &attr);
+}
+
+COBALT_SYSCALL32emu(mq_timedsend, primary,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio,
+		     const struct old_timespec32 __user *u_ts))
+{
+	return __cobalt_mq_timedsend(uqd, u_buf, len, prio,
+				     u_ts, u_ts ? sys32_fetch_timeout : NULL);
+}
+
+COBALT_SYSCALL32emu(mq_timedsend64, primary,
+		    (mqd_t uqd, const void __user *u_buf, size_t len,
+		     unsigned int prio,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_mq_timedsend64(uqd, u_buf, len, prio, u_ts);
+}
+
+COBALT_SYSCALL32emu(mq_timedreceive, primary,
+		    (mqd_t uqd, void __user *u_buf,
+		     compat_ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct old_timespec32 __user *u_ts))
+{
+	compat_ssize_t clen;
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&clen, u_len, sizeof(*u_len));
+	if (ret)
+		return ret;
+
+	len = clen;
+	ret = __cobalt_mq_timedreceive(uqd, u_buf, &len, u_prio,
+				       u_ts, u_ts ? sys32_fetch_timeout : NULL);
+	clen = len;
+
+	return ret ?: cobalt_copy_to_user(u_len, &clen, sizeof(*u_len));
+}
+
+COBALT_SYSCALL32emu(mq_timedreceive64, primary,
+		    (mqd_t uqd, void __user *u_buf,
+		     compat_ssize_t __user *u_len,
+		     unsigned int __user *u_prio,
+		     const struct __kernel_timespec __user *u_ts))
+{
+	compat_ssize_t clen;
+	ssize_t len;
+	int ret;
+
+	ret = cobalt_copy_from_user(&clen, u_len, sizeof(*u_len));
+	if (ret)
+		return ret;
+
+	len = clen;
+	ret = __cobalt_mq_timedreceive64(uqd, u_buf, &len, u_prio, u_ts);
+	clen = len;
+
+	return ret ?: cobalt_copy_to_user(u_len, &clen, sizeof(*u_len));
+}
+
+static inline int mq_fetch_timeout(struct timespec64 *ts,
+				   const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_u_timespec(ts, u_ts);
+}
+
+COBALT_SYSCALL32emu(mq_notify, primary,
+		    (mqd_t fd, const struct compat_sigevent *__user u_cev))
+{
+	struct sigevent sev;
+	int ret;
+
+	if (u_cev) {
+		ret = sys32_get_sigevent(&sev, u_cev);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_mq_notify(fd, u_cev ? &sev : NULL);
+}
+
+COBALT_SYSCALL32emu(sched_weightprio, current,
+		    (int policy,
+		     const struct compat_sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param);
+	if (ret)
+		return ret;
+
+	return __cobalt_sched_weightprio(policy, &param_ex);
+}
+
+static union sched_config *
+sys32_fetch_config(int policy, const void __user *u_config, size_t *len)
+{
+	union compat_sched_config *cbuf;
+	union sched_config *buf;
+	int ret, n;
+
+	if (u_config == NULL)
+		return ERR_PTR(-EFAULT);
+
+	if (policy == SCHED_QUOTA && *len < sizeof(cbuf->quota))
+		return ERR_PTR(-EINVAL);
+
+	cbuf = xnmalloc(*len);
+	if (cbuf == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = cobalt_copy_from_user(cbuf, u_config, *len);
+	if (ret) {
+		buf = ERR_PTR(ret);
+		goto out;
+	}
+
+	switch (policy) {
+	case SCHED_TP:
+		*len = sched_tp_confsz(cbuf->tp.nr_windows);
+		break;
+	case SCHED_QUOTA:
+		break;
+	default:
+		buf = ERR_PTR(-EINVAL);
+		goto out;
+	}
+
+	buf = xnmalloc(*len);
+	if (buf == NULL) {
+		buf = ERR_PTR(-ENOMEM);
+		goto out;
+	}
+
+	if (policy == SCHED_QUOTA)
+		memcpy(&buf->quota, &cbuf->quota, sizeof(cbuf->quota));
+	else {
+		buf->tp.op = cbuf->tp.op;
+		buf->tp.nr_windows = cbuf->tp.nr_windows;
+		for (n = 0; n < buf->tp.nr_windows; n++) {
+			buf->tp.windows[n].ptid = cbuf->tp.windows[n].ptid;
+			buf->tp.windows[n].offset.tv_sec = cbuf->tp.windows[n].offset.tv_sec;
+			buf->tp.windows[n].offset.tv_nsec = cbuf->tp.windows[n].offset.tv_nsec;
+			buf->tp.windows[n].duration.tv_sec = cbuf->tp.windows[n].duration.tv_sec;
+			buf->tp.windows[n].duration.tv_nsec = cbuf->tp.windows[n].duration.tv_nsec;
+		}
+	}
+out:
+	xnfree(cbuf);
+
+	return buf;
+}
+
+static int sys32_ack_config(int policy, const union sched_config *config,
+			    void __user *u_config)
+{
+	union compat_sched_config __user *u_p = u_config;
+
+	if (policy != SCHED_QUOTA)
+		return 0;
+
+	return u_config == NULL ? -EFAULT :
+		cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+				       sizeof(u_p->quota.info));
+}
+
+static ssize_t sys32_put_config(int policy,
+				void __user *u_config, size_t u_len,
+				const union sched_config *config, size_t len)
+{
+	union compat_sched_config __user *u_p = u_config;
+	int n, ret;
+
+	if (u_config == NULL)
+		return -EFAULT;
+
+	if (policy == SCHED_QUOTA) {
+		if (u_len < sizeof(u_p->quota))
+			return -EINVAL;
+		return cobalt_copy_to_user(&u_p->quota.info, &config->quota.info,
+					      sizeof(u_p->quota.info)) ?:
+			sizeof(u_p->quota.info);
+	}
+
+	/* SCHED_TP */
+
+	if (u_len < compat_sched_tp_confsz(config->tp.nr_windows))
+		return -ENOSPC;
+
+	__xn_put_user(config->tp.op, &u_p->tp.op);
+	__xn_put_user(config->tp.nr_windows, &u_p->tp.nr_windows);
+
+	for (n = 0, ret = 0; n < config->tp.nr_windows; n++) {
+		ret |= __xn_put_user(config->tp.windows[n].ptid,
+				     &u_p->tp.windows[n].ptid);
+		ret |= __xn_put_user(config->tp.windows[n].offset.tv_sec,
+				     &u_p->tp.windows[n].offset.tv_sec);
+		ret |= __xn_put_user(config->tp.windows[n].offset.tv_nsec,
+				     &u_p->tp.windows[n].offset.tv_nsec);
+		ret |= __xn_put_user(config->tp.windows[n].duration.tv_sec,
+				     &u_p->tp.windows[n].duration.tv_sec);
+		ret |= __xn_put_user(config->tp.windows[n].duration.tv_nsec,
+				     &u_p->tp.windows[n].duration.tv_nsec);
+	}
+
+	return ret ?: u_len;
+}
+
+COBALT_SYSCALL32emu(sched_setconfig_np, conforming,
+		    (int cpu, int policy,
+		     union compat_sched_config __user *u_config,
+		     size_t len))
+{
+	return __cobalt_sched_setconfig_np(cpu, policy, u_config, len,
+					   sys32_fetch_config, sys32_ack_config);
+}
+
+COBALT_SYSCALL32emu(sched_getconfig_np, conformin,
+		    (int cpu, int policy,
+		     union compat_sched_config __user *u_config,
+		     size_t len))
+{
+	return __cobalt_sched_getconfig_np(cpu, policy, u_config, len,
+					   sys32_fetch_config, sys32_put_config);
+}
+
+COBALT_SYSCALL32emu(sched_setscheduler_ex, conforming,
+		    (compat_pid_t pid,
+		     int policy,
+		     const struct compat_sched_param_ex __user *u_param_ex,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = sys32_get_param_ex(policy, &param_ex, u_param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_sched_setscheduler_ex(pid, policy, &param_ex,
+					    u_winoff, u_promoted);
+}
+
+COBALT_SYSCALL32emu(sched_getscheduler_ex, current,
+		    (compat_pid_t pid,
+		     int __user *u_policy,
+		     struct compat_sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_sched_getscheduler_ex(pid, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy));
+
+	return ret ?: sys32_put_param_ex(policy, u_param, &param_ex);
+}
+
+COBALT_SYSCALL32emu(timer_create, current,
+		    (clockid_t clock,
+		     const struct compat_sigevent __user *u_sev,
+		     timer_t __user *u_tm))
+{
+	struct sigevent sev, *evp = NULL;
+	int ret;
+
+	if (u_sev) {
+		evp = &sev;
+		ret = sys32_get_sigevent(&sev, u_sev);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_timer_create(clock, evp, u_tm);
+}
+
+COBALT_SYSCALL32emu(timer_settime, primary,
+		    (timer_t tm, int flags,
+		     const struct old_itimerspec32 __user *u_newval,
+		     struct old_itimerspec32 __user *u_oldval))
+{
+	struct itimerspec64 newv, oldv, *oldvp = &oldv;
+	int ret;
+
+	if (u_oldval == NULL)
+		oldvp = NULL;
+
+	ret = sys32_get_itimerspec(&newv, u_newval);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_timer_settime(tm, flags, &newv, oldvp);
+	if (ret)
+		return ret;
+
+	if (oldvp) {
+		ret = sys32_put_itimerspec(u_oldval, oldvp);
+		if (ret)
+			__cobalt_timer_settime(tm, flags, oldvp, NULL);
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL32emu(timer_gettime, current,
+		    (timer_t tm, struct old_itimerspec32 __user *u_val))
+{
+	struct itimerspec64 val;
+	int ret;
+
+	ret = __cobalt_timer_gettime(tm, &val);
+
+	return ret ?: sys32_put_itimerspec(u_val, &val);
+}
+
+COBALT_SYSCALL32emu(timerfd_settime, primary,
+		    (int fd, int flags,
+		     const struct old_itimerspec32 __user *new_value,
+		     struct old_itimerspec32 __user *old_value))
+{
+	struct itimerspec64 ovalue, value;
+	int ret;
+
+	ret = sys32_get_itimerspec(&value, new_value);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_timerfd_settime(fd, flags, &value, &ovalue);
+	if (ret)
+		return ret;
+
+	if (old_value) {
+		ret = sys32_put_itimerspec(old_value, &ovalue);
+		value.it_value.tv_sec = 0;
+		value.it_value.tv_nsec = 0;
+		__cobalt_timerfd_settime(fd, flags, &value, NULL);
+	}
+
+	return ret;
+}
+
+COBALT_SYSCALL32emu(timerfd_gettime, current,
+		    (int fd, struct old_itimerspec32 __user *curr_value))
+{
+	struct itimerspec64 value;
+	int ret;
+
+	ret = __cobalt_timerfd_gettime(fd, &value);
+
+	return ret ?: sys32_put_itimerspec(curr_value, &value);
+}
+
+COBALT_SYSCALL32emu(sigwait, primary,
+		    (const compat_sigset_t __user *u_set,
+		     int __user *u_sig))
+{
+	sigset_t set;
+	int ret, sig;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	sig = __cobalt_sigwait(&set);
+	if (sig < 0)
+		return sig;
+
+	return cobalt_copy_to_user(u_sig, &sig, sizeof(*u_sig));
+}
+
+COBALT_SYSCALL32emu(sigtimedwait, nonrestartable,
+		    (const compat_sigset_t __user *u_set,
+		     struct compat_siginfo __user *u_si,
+		     const struct old_timespec32 __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+	int ret;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	ret = sys32_get_timespec(&timeout, u_timeout);
+	if (ret)
+		return ret;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, true);
+}
+
+COBALT_SYSCALL32emu(sigtimedwait64, nonrestartable,
+		    (const compat_sigset_t __user *u_set,
+		     struct compat_siginfo __user *u_si,
+		     const struct __kernel_timespec __user *u_timeout))
+{
+	struct timespec64 timeout;
+	sigset_t set;
+	int ret;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	ret = cobalt_get_timespec64(&timeout, u_timeout);
+	if (ret)
+		return ret;
+
+	return __cobalt_sigtimedwait(&set, &timeout, u_si, true);
+}
+
+COBALT_SYSCALL32emu(sigwaitinfo, nonrestartable,
+		    (const compat_sigset_t __user *u_set,
+		     struct compat_siginfo __user *u_si))
+{
+	sigset_t set;
+	int ret;
+
+	ret = sys32_get_sigset(&set, u_set);
+	if (ret)
+		return ret;
+
+	return __cobalt_sigwaitinfo(&set, u_si, true);
+}
+
+COBALT_SYSCALL32emu(sigpending, primary, (compat_old_sigset_t __user *u_set))
+{
+	struct cobalt_thread *curr = cobalt_current_thread();
+
+	return sys32_put_sigset((compat_sigset_t *)u_set, &curr->sigpending);
+}
+
+COBALT_SYSCALL32emu(sigqueue, conforming,
+		    (pid_t pid, int sig,
+		     const union compat_sigval __user *u_value))
+{
+	union sigval val;
+	int ret;
+
+	ret = sys32_get_sigval(&val, u_value);
+
+	return ret ?: __cobalt_sigqueue(pid, sig, &val);
+}
+
+COBALT_SYSCALL32emu(monitor_wait, nonrestartable,
+		    (struct cobalt_monitor_shadow __user *u_mon,
+		     int event, const struct old_timespec32 __user *u_ts,
+		     int __user *u_ret))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = sys32_get_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_monitor_wait(u_mon, event, tsp, u_ret);
+}
+
+COBALT_SYSCALL32emu(monitor_wait64, nonrestartable,
+		    (struct cobalt_monitor_shadow __user *u_mon, int event,
+		     const struct __kernel_timespec __user *u_ts,
+		     int __user *u_ret))
+{
+	return __cobalt_monitor_wait64(u_mon, event, u_ts, u_ret);
+}
+
+COBALT_SYSCALL32emu(event_wait, primary,
+		    (struct cobalt_event_shadow __user *u_event,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode, const struct old_timespec32 __user *u_ts))
+{
+	struct timespec64 ts, *tsp = NULL;
+	int ret;
+
+	if (u_ts) {
+		tsp = &ts;
+		ret = sys32_get_timespec(&ts, u_ts);
+		if (ret)
+			return ret;
+	}
+
+	return __cobalt_event_wait(u_event, bits, u_bits_r, mode, tsp);
+}
+
+COBALT_SYSCALL32emu(event_wait64, primary,
+		    (struct cobalt_event_shadow __user *u_event,
+		     unsigned int bits,
+		     unsigned int __user *u_bits_r,
+		     int mode, const struct __kernel_timespec __user *u_ts))
+{
+	return __cobalt_event_wait64(u_event, bits, u_bits_r, mode, u_ts);
+}
+
+COBALT_SYSCALL32emu(select, primary,
+		    (int nfds,
+		     compat_fd_set __user *u_rfds,
+		     compat_fd_set __user *u_wfds,
+		     compat_fd_set __user *u_xfds,
+		     struct old_timeval32 __user *u_tv))
+{
+	return __cobalt_select(nfds, u_rfds, u_wfds, u_xfds, u_tv, true);
+}
+
+COBALT_SYSCALL32emu(recvmsg, handover,
+		    (int fd, struct compat_msghdr __user *umsg,
+		     int flags))
+{
+	struct user_msghdr m;
+	ssize_t ret;
+
+	ret = sys32_get_msghdr(&m, umsg);
+	if (ret)
+		return ret;
+
+	ret = rtdm_fd_recvmsg(fd, &m, flags);
+	if (ret < 0)
+		return ret;
+
+	return sys32_put_msghdr(umsg, &m) ?: ret;
+}
+
+static int get_timespec32(struct timespec64 *ts,
+			  const void __user *u_ts)
+{
+	return sys32_get_timespec(ts, u_ts);
+}
+
+static int get_mmsg32(struct mmsghdr *mmsg, void __user *u_mmsg)
+{
+	return sys32_get_mmsghdr(mmsg, u_mmsg);
+}
+
+static int put_mmsg32(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct compat_mmsghdr __user **p = (struct compat_mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return sys32_put_mmsghdr(q, mmsg);
+}
+
+COBALT_SYSCALL32emu(recvmmsg, primary,
+	       (int ufd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen,
+		unsigned int flags, struct old_timespec32 *u_timeout))
+{
+	return __rtdm_fd_recvmmsg(ufd, u_msgvec, vlen, flags, u_timeout,
+				  get_mmsg32, put_mmsg32,
+				  get_timespec32);
+}
+
+COBALT_SYSCALL32emu(recvmmsg64, primary,
+		    (int ufd, struct compat_mmsghdr __user *u_msgvec,
+		     unsigned int vlen, unsigned int flags,
+		     struct __kernel_timespec *u_timeout))
+{
+	return __rtdm_fd_recvmmsg64(ufd, u_msgvec, vlen, flags, u_timeout,
+				    get_mmsg32, put_mmsg32);
+}
+
+COBALT_SYSCALL32emu(sendmsg, handover,
+		    (int fd, struct compat_msghdr __user *umsg, int flags))
+{
+	struct user_msghdr m;
+	int ret;
+
+	ret = sys32_get_msghdr(&m, umsg);
+
+	return ret ?: rtdm_fd_sendmsg(fd, &m, flags);
+}
+
+static int put_mmsglen32(void __user **u_mmsg_p, const struct mmsghdr *mmsg)
+{
+	struct compat_mmsghdr __user **p = (struct compat_mmsghdr **)u_mmsg_p,
+		*q __user = (*p)++;
+
+	return __xn_put_user(mmsg->msg_len, &q->msg_len);
+}
+
+COBALT_SYSCALL32emu(sendmmsg, primary,
+		    (int fd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen,
+		     unsigned int flags))
+{
+	return __rtdm_fd_sendmmsg(fd, u_msgvec, vlen, flags,
+				  get_mmsg32, put_mmsglen32);
+}
+
+COBALT_SYSCALL32emu(mmap, lostage,
+		    (int fd, struct compat_rtdm_mmap_request __user *u_crma,
+		     compat_uptr_t __user *u_caddrp))
+{
+	struct _rtdm_mmap_request rma;
+	compat_uptr_t u_caddr;
+	void *u_addr = NULL;
+	int ret;
+
+	if (u_crma == NULL ||
+	    !access_rok(u_crma, sizeof(*u_crma)) ||
+	    __xn_get_user(rma.length, &u_crma->length) ||
+	    __xn_get_user(rma.offset, &u_crma->offset) ||
+	    __xn_get_user(rma.prot, &u_crma->prot) ||
+	    __xn_get_user(rma.flags, &u_crma->flags))
+	  return -EFAULT;
+
+	ret = rtdm_fd_mmap(fd, &rma, &u_addr);
+	if (ret)
+		return ret;
+
+	u_caddr = ptr_to_compat(u_addr);
+
+	return cobalt_copy_to_user(u_caddrp, &u_caddr, sizeof(u_caddr));
+}
+
+COBALT_SYSCALL32emu(backtrace, current,
+		    (int nr, compat_ulong_t __user *u_backtrace,
+		     int reason))
+{
+	compat_ulong_t cbacktrace[SIGSHADOW_BACKTRACE_DEPTH];
+	unsigned long backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+	int ret, n;
+
+	if (nr <= 0)
+		return 0;
+
+	if (nr > SIGSHADOW_BACKTRACE_DEPTH)
+		nr = SIGSHADOW_BACKTRACE_DEPTH;
+
+	ret = cobalt_copy_from_user(cbacktrace, u_backtrace,
+				       nr * sizeof(compat_ulong_t));
+	if (ret)
+		return ret;
+
+	for (n = 0; n < nr; n++)
+		backtrace [n] = cbacktrace[n];
+
+	xndebug_trace_relax(nr, backtrace, reason);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h
new file mode 100644
index 0000000..37f58ef
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/syscall32.h
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_SYSCALL32_H
+#define _COBALT_POSIX_SYSCALL32_H
+
+#include <cobalt/kernel/compat.h>
+
+struct cobalt_mutex_shadow;
+struct cobalt_event_shadow;
+struct cobalt_cond_shadow;
+struct cobalt_sem_shadow;
+struct cobalt_monitor_shadow;
+
+COBALT_SYSCALL32emu_DECL(thread_create,
+			 (compat_ulong_t pth,
+			  int policy,
+			  const struct compat_sched_param_ex __user *u_param_ex,
+			  int xid,
+			  __u32 __user *u_winoff));
+
+COBALT_SYSCALL32emu_DECL(thread_setschedparam_ex,
+			 (compat_ulong_t pth,
+			  int policy,
+			  const struct compat_sched_param_ex __user *u_param,
+			  __u32 __user *u_winoff,
+			  int __user *u_promoted));
+
+COBALT_SYSCALL32emu_DECL(thread_getschedparam_ex,
+			 (compat_ulong_t pth,
+			  int __user *u_policy,
+			  struct compat_sched_param_ex __user *u_param));
+
+COBALT_SYSCALL32emu_DECL(thread_setschedprio,
+			 (compat_ulong_t pth,
+			  int prio,
+			  __u32 __user *u_winoff,
+			  int __user *u_promoted));
+
+COBALT_SYSCALL32emu_DECL(clock_getres,
+			 (clockid_t clock_id,
+			  struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_getres64,
+			 (clockid_t clock_id,
+			  struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_gettime,
+			 (clockid_t clock_id,
+			  struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_gettime64,
+			 (clockid_t clock_id,
+			  struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_settime,
+			 (clockid_t clock_id,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_settime64,
+			 (clockid_t clock_id,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(clock_adjtime,
+			 (clockid_t clock_id,
+			  struct old_timex32 __user *u_tx));
+
+COBALT_SYSCALL32emu_DECL(clock_adjtime64,
+			 (clockid_t clock_id,
+			  struct __kernel_timex __user *u_tx));
+
+COBALT_SYSCALL32emu_DECL(clock_nanosleep,
+			 (clockid_t clock_id, int flags,
+			  const struct old_timespec32 __user *u_rqt,
+			  struct old_timespec32 __user *u_rmt));
+
+COBALT_SYSCALL32emu_DECL(clock_nanosleep64,
+			 (clockid_t clock_id, int flags,
+			  const struct __kernel_timespec __user *u_rqt,
+			  struct __kernel_timespec __user *u_rmt));
+
+
+COBALT_SYSCALL32emu_DECL(mutex_timedlock,
+			 (struct cobalt_mutex_shadow __user *u_mx,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mutex_timedlock64,
+			 (struct cobalt_mutex_shadow __user *u_mx,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(cond_wait_prologue,
+			 (struct cobalt_cond_shadow __user *u_cnd,
+			  struct cobalt_mutex_shadow __user *u_mx,
+			  int *u_err,
+			  unsigned int timed,
+			  struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_open,
+			 (const char __user *u_name, int oflags,
+			  mode_t mode, struct compat_mq_attr __user *u_attr));
+
+COBALT_SYSCALL32emu_DECL(mq_getattr,
+			 (mqd_t uqd, struct compat_mq_attr __user *u_attr));
+
+COBALT_SYSCALL32emu_DECL(mq_timedsend,
+			 (mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_timedsend64,
+			 (mqd_t uqd, const void __user *u_buf, size_t len,
+			  unsigned int prio,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_timedreceive,
+			 (mqd_t uqd, void __user *u_buf,
+			  compat_ssize_t __user *u_len,
+			  unsigned int __user *u_prio,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_timedreceive64,
+			 (mqd_t uqd, void __user *u_buf,
+			  compat_ssize_t __user *u_len,
+			  unsigned int __user *u_prio,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(mq_notify,
+			 (mqd_t fd, const struct compat_sigevent *__user u_cev));
+
+COBALT_SYSCALL32emu_DECL(sched_weightprio,
+			 (int policy,
+			  const struct compat_sched_param_ex __user *u_param));
+
+COBALT_SYSCALL32emu_DECL(sched_setconfig_np,
+			 (int cpu, int policy,
+			  union compat_sched_config __user *u_config,
+			  size_t len));
+
+COBALT_SYSCALL32emu_DECL(sched_getconfig_np,
+			 (int cpu, int policy,
+			  union compat_sched_config __user *u_config,
+			  size_t len));
+
+COBALT_SYSCALL32emu_DECL(sched_setscheduler_ex,
+			 (compat_pid_t pid,
+			  int policy,
+			  const struct compat_sched_param_ex __user *u_param,
+			  __u32 __user *u_winoff,
+			  int __user *u_promoted));
+
+COBALT_SYSCALL32emu_DECL(sched_getscheduler_ex,
+			 (compat_pid_t pid,
+			  int __user *u_policy,
+			  struct compat_sched_param_ex __user *u_param));
+
+COBALT_SYSCALL32emu_DECL(timer_create,
+			 (clockid_t clock,
+			  const struct compat_sigevent __user *u_sev,
+			  timer_t __user *u_tm));
+
+COBALT_SYSCALL32emu_DECL(timer_settime,
+			 (timer_t tm, int flags,
+			  const struct old_itimerspec32 __user *u_newval,
+			  struct old_itimerspec32 __user *u_oldval));
+
+COBALT_SYSCALL32emu_DECL(timer_gettime,
+			 (timer_t tm,
+			  struct old_itimerspec32 __user *u_val));
+
+COBALT_SYSCALL32emu_DECL(timerfd_settime,
+			 (int fd, int flags,
+			  const struct old_itimerspec32 __user *new_value,
+			  struct old_itimerspec32 __user *old_value));
+
+COBALT_SYSCALL32emu_DECL(timerfd_gettime,
+			 (int fd, struct old_itimerspec32 __user *value));
+
+COBALT_SYSCALL32emu_DECL(sigwait,
+			 (const compat_sigset_t __user *u_set,
+			  int __user *u_sig));
+
+COBALT_SYSCALL32emu_DECL(sigtimedwait,
+			 (const compat_sigset_t __user *u_set,
+			  struct compat_siginfo __user *u_si,
+			  const struct old_timespec32 __user *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(sigtimedwait64,
+			 (const compat_sigset_t __user *u_set,
+			  struct compat_siginfo __user *u_si,
+			  const struct __kernel_timespec __user *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(sigwaitinfo,
+			 (const compat_sigset_t __user *u_set,
+			  struct compat_siginfo __user *u_si));
+
+COBALT_SYSCALL32emu_DECL(sigpending,
+			 (compat_old_sigset_t __user *u_set));
+
+COBALT_SYSCALL32emu_DECL(sigqueue,
+			 (pid_t pid, int sig,
+			  const union compat_sigval __user *u_value));
+
+COBALT_SYSCALL32emu_DECL(monitor_wait,
+			 (struct cobalt_monitor_shadow __user *u_mon,
+			  int event, const struct old_timespec32 __user *u_ts,
+			  int __user *u_ret));
+
+COBALT_SYSCALL32emu_DECL(monitor_wait64,
+			 (struct cobalt_monitor_shadow __user *u_mon,
+			  int event,
+			  const struct __kernel_timespec __user *u_ts,
+			  int __user *u_ret));
+
+COBALT_SYSCALL32emu_DECL(event_wait,
+			 (struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits,
+			  unsigned int __user *u_bits_r,
+			  int mode, const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(event_wait64,
+			 (struct cobalt_event_shadow __user *u_event,
+			  unsigned int bits,
+			  unsigned int __user *u_bits_r,
+			  int mode,
+			  const struct __kernel_timespec __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(select,
+			 (int nfds,
+			  compat_fd_set __user *u_rfds,
+			  compat_fd_set __user *u_wfds,
+			  compat_fd_set __user *u_xfds,
+			  struct old_timeval32 __user *u_tv));
+
+COBALT_SYSCALL32emu_DECL(recvmsg,
+			 (int fd, struct compat_msghdr __user *umsg,
+			  int flags));
+
+COBALT_SYSCALL32emu_DECL(recvmmsg,
+			 (int fd, struct compat_mmsghdr __user *u_msgvec,
+			  unsigned int vlen,
+			  unsigned int flags, struct old_timespec32 *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(recvmmsg64,
+			 (int fd, struct compat_mmsghdr __user *u_msgvec,
+			  unsigned int vlen,
+			  unsigned int flags,
+			  struct __kernel_timespec *u_timeout));
+
+COBALT_SYSCALL32emu_DECL(sendmsg,
+			 (int fd, struct compat_msghdr __user *umsg,
+			  int flags));
+
+COBALT_SYSCALL32emu_DECL(sendmmsg,
+			 (int fd, struct compat_mmsghdr __user *u_msgvec, unsigned int vlen,
+			  unsigned int flags));
+
+COBALT_SYSCALL32emu_DECL(mmap,
+			 (int fd,
+			  struct compat_rtdm_mmap_request __user *u_rma,
+			  compat_uptr_t __user *u_addrp));
+
+COBALT_SYSCALL32emu_DECL(backtrace,
+			 (int nr, compat_ulong_t __user *u_backtrace,
+			  int reason));
+
+COBALT_SYSCALL32emu_DECL(sem_open,
+			 (compat_uptr_t __user *u_addrp,
+			  const char __user *u_name,
+			  int oflags, mode_t mode, unsigned int value));
+
+COBALT_SYSCALL32emu_DECL(sem_timedwait,
+			 (struct cobalt_sem_shadow __user *u_sem,
+			  const struct old_timespec32 __user *u_ts));
+
+COBALT_SYSCALL32emu_DECL(sem_timedwait64,
+			 (struct cobalt_sem_shadow __user *u_sem,
+			  const struct __kernel_timespec __user *u_ts));
+
+#endif /* !_COBALT_POSIX_SYSCALL32_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c
new file mode 100644
index 0000000..94a6e39
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.c
@@ -0,0 +1,954 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/types.h>
+#include <linux/cred.h>
+#include <linux/jhash.h>
+#include <linux/signal.h>
+#include <linux/jiffies.h>
+#include <linux/err.h>
+#include "internal.h"
+#include "thread.h"
+#include "sched.h"
+#include "signal.h"
+#include "timer.h"
+#include "clock.h"
+#include "sem.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-posix.h>
+
+xnticks_t cobalt_time_slice = CONFIG_XENO_OPT_RR_QUANTUM * 1000;
+
+#define PTHREAD_HSLOTS (1 << 8)	/* Must be a power of 2 */
+
+/* Process-local index, pthread_t x mm_struct (cobalt_local_hkey). */
+struct local_thread_hash {
+	pid_t pid;
+	struct cobalt_thread *thread;
+	struct cobalt_local_hkey hkey;
+	struct local_thread_hash *next;
+};
+
+/* System-wide index on task_pid_nr(). */
+struct global_thread_hash {
+	pid_t pid;
+	struct cobalt_thread *thread;
+	struct global_thread_hash *next;
+};
+
+static struct local_thread_hash *local_index[PTHREAD_HSLOTS];
+
+static struct global_thread_hash *global_index[PTHREAD_HSLOTS];
+
+static inline struct local_thread_hash *
+thread_hash(const struct cobalt_local_hkey *hkey,
+	    struct cobalt_thread *thread, pid_t pid)
+{
+	struct global_thread_hash **ghead, *gslot;
+	struct local_thread_hash **lhead, *lslot;
+	u32 hash;
+	void *p;
+	spl_t s;
+
+	p = xnmalloc(sizeof(*lslot) + sizeof(*gslot));
+	if (p == NULL)
+		return NULL;
+
+	lslot = p;
+	lslot->hkey = *hkey;
+	lslot->thread = thread;
+	lslot->pid = pid;
+	hash = jhash2((u32 *)&lslot->hkey,
+		      sizeof(lslot->hkey) / sizeof(u32), 0);
+	lhead = &local_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	gslot = p + sizeof(*lslot);
+	gslot->pid = pid;
+	gslot->thread = thread;
+	hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+	ghead = &global_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	xnlock_get_irqsave(&nklock, s);
+	lslot->next = *lhead;
+	*lhead = lslot;
+	gslot->next = *ghead;
+	*ghead = gslot;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return lslot;
+}
+
+static inline void thread_unhash(const struct cobalt_local_hkey *hkey)
+{
+	struct global_thread_hash **gtail, *gslot;
+	struct local_thread_hash **ltail, *lslot;
+	pid_t pid;
+	u32 hash;
+	spl_t s;
+
+	hash = jhash2((u32 *) hkey, sizeof(*hkey) / sizeof(u32), 0);
+	ltail = &local_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	lslot = *ltail;
+	while (lslot &&
+	       (lslot->hkey.u_pth != hkey->u_pth ||
+		lslot->hkey.mm != hkey->mm)) {
+		ltail = &lslot->next;
+		lslot = *ltail;
+	}
+
+	if (lslot == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return;
+	}
+
+	*ltail = lslot->next;
+	pid = lslot->pid;
+	hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+	gtail = &global_index[hash & (PTHREAD_HSLOTS - 1)];
+	gslot = *gtail;
+	while (gslot && gslot->pid != pid) {
+		gtail = &gslot->next;
+		gslot = *gtail;
+	}
+	/* gslot must be found here. */
+	XENO_BUG_ON(COBALT, !(gslot && gtail));
+	*gtail = gslot->next;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnfree(lslot);
+}
+
+static struct cobalt_thread *
+thread_lookup(const struct cobalt_local_hkey *hkey)
+{
+	struct local_thread_hash *lslot;
+	struct cobalt_thread *thread;
+	u32 hash;
+	spl_t s;
+
+	hash = jhash2((u32 *)hkey, sizeof(*hkey) / sizeof(u32), 0);
+	lslot = local_index[hash & (PTHREAD_HSLOTS - 1)];
+
+	xnlock_get_irqsave(&nklock, s);
+
+	while (lslot != NULL &&
+	       (lslot->hkey.u_pth != hkey->u_pth || lslot->hkey.mm != hkey->mm))
+		lslot = lslot->next;
+
+	thread = lslot ? lslot->thread : NULL;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return thread;
+}
+
+struct cobalt_thread *cobalt_thread_find(pid_t pid) /* nklocked, IRQs off */
+{
+	struct global_thread_hash *gslot;
+	u32 hash;
+
+	hash = jhash2((u32 *)&pid, sizeof(pid) / sizeof(u32), 0);
+
+	gslot = global_index[hash & (PTHREAD_HSLOTS - 1)];
+	while (gslot && gslot->pid != pid)
+		gslot = gslot->next;
+
+	return gslot ? gslot->thread : NULL;
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_find);
+
+struct cobalt_thread *cobalt_thread_find_local(pid_t pid) /* nklocked, IRQs off */
+{
+	struct cobalt_thread *thread;
+
+	thread = cobalt_thread_find(pid);
+	if (thread == NULL || thread->hkey.mm != current->mm)
+		return NULL;
+
+	return thread;
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_find_local);
+
+struct cobalt_thread *cobalt_thread_lookup(unsigned long pth) /* nklocked, IRQs off */
+{
+	struct cobalt_local_hkey hkey;
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	return thread_lookup(&hkey);
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_lookup);
+
+void cobalt_thread_map(struct xnthread *curr)
+{
+	struct cobalt_thread *thread;
+
+	thread = container_of(curr, struct cobalt_thread, threadbase);
+	thread->process = cobalt_current_process();
+	XENO_BUG_ON(COBALT, thread->process == NULL);
+}
+
+struct xnthread_personality *cobalt_thread_exit(struct xnthread *curr)
+{
+	struct cobalt_thread *thread;
+	spl_t s;
+
+	thread = container_of(curr, struct cobalt_thread, threadbase);
+	/*
+	 * Unhash first, to prevent further access to the TCB from
+	 * userland.
+	 */
+	thread_unhash(&thread->hkey);
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_mark_deleted(thread);
+	list_del(&thread->next);
+	xnlock_put_irqrestore(&nklock, s);
+	cobalt_signal_flush(thread);
+	xnsynch_destroy(&thread->monitor_synch);
+	xnsynch_destroy(&thread->sigwait);
+
+	return NULL;
+}
+
+struct xnthread_personality *cobalt_thread_finalize(struct xnthread *zombie)
+{
+	struct cobalt_thread *thread;
+
+	thread = container_of(zombie, struct cobalt_thread, threadbase);
+	xnfree(thread);
+
+	return NULL;
+}
+
+int __cobalt_thread_setschedparam_ex(struct cobalt_thread *thread, int policy,
+				     const struct sched_param_ex *param_ex)
+{
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+	xnticks_t tslice;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC,
+			       struct cobalt_thread)) {
+		ret = -ESRCH;
+		goto out;
+	}
+
+	tslice = thread->threadbase.rrperiod;
+	sched_class = cobalt_sched_policy_param(&param, policy,
+						param_ex, &tslice);
+	if (sched_class == NULL) {
+		ret = -EINVAL;
+		goto out;
+	}
+	xnthread_set_slice(&thread->threadbase, tslice);
+	if (cobalt_call_extension(thread_setsched, &thread->extref, ret,
+				  sched_class, &param) && ret)
+		goto out;
+	ret = xnthread_set_schedparam(&thread->threadbase,
+				      sched_class, &param);
+	xnsched_run();
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_thread_getschedparam_ex(struct cobalt_thread *thread,
+				     int *policy_r,
+				     struct sched_param_ex *param_ex)
+{
+	struct xnsched_class *base_class;
+	struct xnthread *base_thread;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!cobalt_obj_active(thread, COBALT_THREAD_MAGIC,
+			       struct cobalt_thread)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -ESRCH;
+	}
+
+	base_thread = &thread->threadbase;
+	base_class = base_thread->base_class;
+	*policy_r = base_class->policy;
+
+	param_ex->sched_priority = xnthread_base_priority(base_thread);
+	if (param_ex->sched_priority == 0) /* SCHED_FIFO/SCHED_WEAK */
+		*policy_r = SCHED_NORMAL;
+
+	if (base_class == &xnsched_class_rt) {
+		if (xnthread_test_state(base_thread, XNRRB)) {
+			u_ns2ts(&param_ex->sched_rr_quantum, base_thread->rrperiod);
+			*policy_r = SCHED_RR;
+		}
+		goto out;
+	}
+
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	if (base_class == &xnsched_class_weak) {
+		if (*policy_r != SCHED_WEAK)
+			param_ex->sched_priority = -param_ex->sched_priority;
+		goto out;
+	}
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	if (base_class == &xnsched_class_sporadic) {
+		param_ex->sched_ss_low_priority = base_thread->pss->param.low_prio;
+		u_ns2ts(&param_ex->sched_ss_repl_period, base_thread->pss->param.repl_period);
+		u_ns2ts(&param_ex->sched_ss_init_budget, base_thread->pss->param.init_budget);
+		param_ex->sched_ss_max_repl = base_thread->pss->param.max_repl;
+		goto out;
+	}
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	if (base_class == &xnsched_class_tp) {
+		param_ex->sched_tp_partition =
+			base_thread->tps - base_thread->sched->tp.partitions;
+		goto out;
+	}
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	if (base_class == &xnsched_class_quota) {
+		param_ex->sched_quota_group = base_thread->quota->tgid;
+		goto out;
+	}
+#endif
+
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+
+static int pthread_create(struct cobalt_thread **thread_p,
+			  int policy,
+			  const struct sched_param_ex *param_ex,
+			  struct task_struct *task)
+{
+	struct cobalt_process *process = cobalt_current_process();
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+	struct xnthread_init_attr iattr;
+	struct cobalt_thread *thread;
+	xnticks_t tslice;
+	int ret, n;
+	spl_t s;
+
+	thread = xnmalloc(sizeof(*thread));
+	if (thread == NULL)
+		return -EAGAIN;
+
+	tslice = cobalt_time_slice;
+	sched_class = cobalt_sched_policy_param(&param, policy,
+						param_ex, &tslice);
+	if (sched_class == NULL) {
+		xnfree(thread);
+		return -EINVAL;
+	}
+
+	iattr.name = task->comm;
+	iattr.flags = XNUSER|XNFPU;
+	iattr.personality = &cobalt_personality;
+	iattr.affinity = CPU_MASK_ALL;
+	ret = xnthread_init(&thread->threadbase, &iattr, sched_class, &param);
+	if (ret) {
+		xnfree(thread);
+		return ret;
+	}
+
+	thread->magic = COBALT_THREAD_MAGIC;
+	xnsynch_init(&thread->monitor_synch, XNSYNCH_FIFO, NULL);
+
+	xnsynch_init(&thread->sigwait, XNSYNCH_FIFO, NULL);
+	sigemptyset(&thread->sigpending);
+	for (n = 0; n < _NSIG; n++)
+		INIT_LIST_HEAD(thread->sigqueues + n);
+
+	xnthread_set_slice(&thread->threadbase, tslice);
+	cobalt_set_extref(&thread->extref, NULL, NULL);
+
+	/*
+	 * We need an anonymous registry entry to obtain a handle for
+	 * fast mutex locking.
+	 */
+	ret = xnthread_register(&thread->threadbase, "");
+	if (ret) {
+		xnsynch_destroy(&thread->monitor_synch);
+		xnsynch_destroy(&thread->sigwait);
+		__xnthread_discard(&thread->threadbase);
+		xnfree(thread);
+		return ret;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&thread->next, process ? &process->thread_list
+					     : &cobalt_global_thread_list);
+	xnlock_put_irqrestore(&nklock, s);
+
+	thread->hkey.u_pth = 0;
+	thread->hkey.mm = NULL;
+
+	*thread_p = thread;
+
+	return 0;
+}
+
+static void pthread_discard(struct cobalt_thread *thread)
+{
+	spl_t s;
+
+	xnsynch_destroy(&thread->monitor_synch);
+	xnsynch_destroy(&thread->sigwait);
+
+	xnlock_get_irqsave(&nklock, s);
+	list_del(&thread->next);
+	xnlock_put_irqrestore(&nklock, s);
+	__xnthread_discard(&thread->threadbase);
+	xnfree(thread);
+}
+
+static inline int pthread_setmode_np(int clrmask, int setmask, int *mode_r)
+{
+	const int valid_flags = XNLOCK|XNWARN|XNTRAPLB;
+	int old;
+
+	/*
+	 * The conforming mode bit is actually zero, since jumping to
+	 * this code entailed switching to primary mode already.
+	 */
+	if ((clrmask & ~valid_flags) != 0 || (setmask & ~valid_flags) != 0)
+		return -EINVAL;
+
+	old = xnthread_set_mode(clrmask, setmask);
+	if (mode_r)
+		*mode_r = old;
+
+	if ((clrmask & ~setmask) & XNLOCK)
+		/* Reschedule if the scheduler has been unlocked. */
+		xnsched_run();
+
+	return 0;
+}
+
+static struct cobalt_thread *thread_lookup_or_shadow(unsigned long pth,
+						     __u32 __user *u_winoff,
+						     int *promoted_r)
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+
+	*promoted_r = 0;
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+
+	thread = thread_lookup(&hkey);
+	if (thread == NULL) {
+		if (u_winoff == NULL)
+			return ERR_PTR(-ESRCH);
+			
+		thread = cobalt_thread_shadow(&hkey, u_winoff);
+		if (!IS_ERR(thread))
+			*promoted_r = 1;
+	}
+
+	return thread;
+}
+
+int cobalt_thread_setschedparam_ex(unsigned long pth,
+				   int policy,
+				   const struct sched_param_ex *param_ex,
+				   __u32 __user *u_winoff,
+				   int __user *u_promoted)
+{
+	struct cobalt_thread *thread;
+	int ret, promoted;
+
+	trace_cobalt_pthread_setschedparam(pth, policy, param_ex);
+
+	thread = thread_lookup_or_shadow(pth, u_winoff, &promoted);
+	if (IS_ERR(thread))
+		return PTR_ERR(thread);
+
+	ret = __cobalt_thread_setschedparam_ex(thread, policy, param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted));
+}
+
+COBALT_SYSCALL(thread_setschedparam_ex, conforming,
+	       (unsigned long pth,
+		int policy,
+		const struct sched_param_ex __user *u_param,
+		__u32 __user *u_winoff,
+		int __user *u_promoted))
+{
+	struct sched_param_ex param_ex;
+
+	if (cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex)))
+		return -EFAULT;
+
+	return cobalt_thread_setschedparam_ex(pth, policy, &param_ex,
+					      u_winoff, u_promoted);
+}
+
+int cobalt_thread_getschedparam_ex(unsigned long pth,
+				   int *policy_r,
+				   struct sched_param_ex *param_ex)
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	int ret;
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+	if (thread == NULL)
+		return -ESRCH;
+
+	ret = __cobalt_thread_getschedparam_ex(thread, policy_r, param_ex);
+	if (ret)
+		return ret;
+
+	trace_cobalt_pthread_getschedparam(pth, *policy_r, param_ex);
+
+	return 0;
+}
+
+COBALT_SYSCALL(thread_getschedparam_ex, current,
+	       (unsigned long pth,
+		int __user *u_policy,
+		struct sched_param_ex __user *u_param))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	ret = cobalt_thread_getschedparam_ex(pth, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	ret = cobalt_copy_to_user(u_policy, &policy, sizeof(policy));
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_param, &param_ex, sizeof(param_ex));
+}
+
+int cobalt_thread_setschedprio(unsigned long pth,
+			       int prio,
+			       __u32 __user *u_winoff,
+			       int __user *u_promoted)
+{
+	struct sched_param_ex param_ex;
+	struct cobalt_thread *thread;
+	int ret, policy, promoted;
+
+	trace_cobalt_pthread_setschedprio(pth, prio);
+
+	thread = thread_lookup_or_shadow(pth, u_winoff, &promoted);
+	if (IS_ERR(thread))
+		return PTR_ERR(thread);
+
+	ret = __cobalt_thread_getschedparam_ex(thread, &policy, &param_ex);
+	if (ret)
+		return ret;
+
+	param_ex.sched_priority = prio;
+
+	ret = __cobalt_thread_setschedparam_ex(thread, policy, &param_ex);
+	if (ret)
+		return ret;
+
+	return cobalt_copy_to_user(u_promoted, &promoted, sizeof(promoted));
+}
+
+COBALT_SYSCALL(thread_setschedprio, conforming,
+	       (unsigned long pth,
+		int prio,
+		__u32 __user *u_winoff,
+		int __user *u_promoted))
+{
+	return cobalt_thread_setschedprio(pth, prio, u_winoff, u_promoted);
+}
+
+int __cobalt_thread_create(unsigned long pth, int policy,
+			   struct sched_param_ex *param_ex,
+			   int xid, __u32 __user *u_winoff)
+{
+	struct cobalt_thread *thread = NULL;
+	struct task_struct *p = current;
+	struct cobalt_local_hkey hkey;
+	int ret;
+
+	trace_cobalt_pthread_create(pth, policy, param_ex);
+
+	/*
+	 * We have been passed the pthread_t identifier the user-space
+	 * Cobalt library has assigned to our caller; we'll index our
+	 * internal pthread_t descriptor in kernel space on it.
+	 */
+	hkey.u_pth = pth;
+	hkey.mm = p->mm;
+
+	ret = pthread_create(&thread, policy, param_ex, p);
+	if (ret)
+		return ret;
+
+	ret = cobalt_map_user(&thread->threadbase, u_winoff);
+	if (ret) {
+		pthread_discard(thread);
+		return ret;
+	}
+
+	if (!thread_hash(&hkey, thread, task_pid_vnr(p))) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+
+	thread->hkey = hkey;
+
+	if (xid > 0 && cobalt_push_personality(xid) == NULL) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	return xnthread_harden();
+fail:
+	xnthread_cancel(&thread->threadbase);
+
+	return ret;
+}
+
+COBALT_SYSCALL(thread_create, init,
+	       (unsigned long pth, int policy,
+		struct sched_param_ex __user *u_param,
+		int xid,
+		__u32 __user *u_winoff))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = cobalt_copy_from_user(&param_ex, u_param, sizeof(param_ex));
+	if (ret)
+		return ret;
+
+	return __cobalt_thread_create(pth, policy, &param_ex, xid, u_winoff);
+}
+
+struct cobalt_thread *
+cobalt_thread_shadow(struct cobalt_local_hkey *hkey,
+		     __u32 __user *u_winoff)
+{
+	struct cobalt_thread *thread = NULL;
+	struct sched_param_ex param_ex;
+	int ret;
+
+	if (xnthread_current())
+		return ERR_PTR(-EBUSY);
+
+	param_ex.sched_priority = 0;
+	trace_cobalt_pthread_create(hkey->u_pth, SCHED_NORMAL, &param_ex);
+	ret = pthread_create(&thread, SCHED_NORMAL, &param_ex, current);
+	if (ret)
+		return ERR_PTR(ret);
+
+	ret = cobalt_map_user(&thread->threadbase, u_winoff);
+	if (ret) {
+		pthread_discard(thread);
+		return ERR_PTR(ret);
+	}
+
+	if (!thread_hash(hkey, thread, task_pid_vnr(current))) {
+		ret = -EAGAIN;
+		goto fail;
+	}
+
+	thread->hkey = *hkey;
+
+	xnthread_harden();
+
+	return thread;
+fail:
+	xnthread_cancel(&thread->threadbase);
+
+	return ERR_PTR(ret);
+}
+
+COBALT_SYSCALL(thread_setmode, primary,
+	       (int clrmask, int setmask, int __user *u_mode_r))
+{
+	int ret, old;
+
+	trace_cobalt_pthread_setmode(clrmask, setmask);
+
+	ret = pthread_setmode_np(clrmask, setmask, &old);
+	if (ret)
+		return ret;
+
+	if (u_mode_r && cobalt_copy_to_user(u_mode_r, &old, sizeof(old)))
+		return -EFAULT;
+
+	return 0;
+}
+
+COBALT_SYSCALL(thread_setname, current,
+	       (unsigned long pth, const char __user *u_name))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	char name[XNOBJECT_NAME_LEN];
+	struct task_struct *p;
+	spl_t s;
+
+	if (cobalt_strncpy_from_user(name, u_name,
+				     sizeof(name) - 1) < 0)
+		return -EFAULT;
+
+	name[sizeof(name) - 1] = '\0';
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+
+	trace_cobalt_pthread_setname(pth, name);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	thread = thread_lookup(&hkey);
+	if (thread == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -ESRCH;
+	}
+
+	ksformat(thread->threadbase.name,
+		 XNOBJECT_NAME_LEN - 1, "%s", name);
+	p = xnthread_host_task(&thread->threadbase);
+	get_task_struct(p);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	knamecpy(p->comm, name);
+	put_task_struct(p);
+
+	return 0;
+}
+
+COBALT_SYSCALL(thread_kill, conforming,
+	       (unsigned long pth, int sig))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	int ret;
+	spl_t s;
+
+	trace_cobalt_pthread_kill(pth, sig);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+	if (thread == NULL)
+		ret = -ESRCH;
+	else
+		ret = __cobalt_kill(thread, sig, 0);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+COBALT_SYSCALL(thread_join, primary, (unsigned long pth))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	spl_t s;
+
+	trace_cobalt_pthread_join(pth);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (thread == NULL)
+		return -ESRCH;
+
+	return xnthread_join(&thread->threadbase, false);
+}
+
+COBALT_SYSCALL(thread_getpid, current, (unsigned long pth))
+{
+	struct cobalt_local_hkey hkey;
+	struct cobalt_thread *thread;
+	pid_t pid;
+	spl_t s;
+
+	trace_cobalt_pthread_pid(pth);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	hkey.u_pth = pth;
+	hkey.mm = current->mm;
+	thread = thread_lookup(&hkey);
+	if (thread == NULL)
+		pid = -ESRCH;
+	else
+		pid = xnthread_host_pid(&thread->threadbase);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return pid;
+}
+
+COBALT_SYSCALL(thread_getstat, current,
+	       (pid_t pid, struct cobalt_threadstat __user *u_stat))
+{
+	struct cobalt_threadstat stat;
+	struct cobalt_thread *p;
+	struct xnthread *thread;
+	xnticks_t xtime;
+	spl_t s;
+
+	trace_cobalt_pthread_stat(pid);
+
+	if (pid == 0) {
+		thread = xnthread_current();
+		if (thread == NULL)
+			return -EPERM;
+		xnlock_get_irqsave(&nklock, s);
+	} else {
+		xnlock_get_irqsave(&nklock, s);
+		p = cobalt_thread_find(pid);
+		if (p == NULL) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -ESRCH;
+		}
+		thread = &p->threadbase;
+	}
+
+	/* We have to hold the nklock to keep most values consistent. */
+	stat.cpu = xnsched_cpu(thread->sched);
+	stat.cprio = xnthread_current_priority(thread);
+	xtime = xnstat_exectime_get_total(&thread->stat.account);
+	if (thread->sched->curr == thread)
+		xtime += xnstat_exectime_now() -
+			xnstat_exectime_get_last_switch(thread->sched);
+	stat.xtime = xnclock_ticks_to_ns(&nkclock, xtime);
+	stat.msw = xnstat_counter_get(&thread->stat.ssw);
+	stat.csw = xnstat_counter_get(&thread->stat.csw);
+	stat.xsc = xnstat_counter_get(&thread->stat.xsc);
+	stat.pf = xnstat_counter_get(&thread->stat.pf);
+	stat.status = xnthread_get_state(thread);
+	if (thread->lock_count > 0)
+		stat.status |= XNLOCK;
+	stat.timeout = xnthread_get_timeout(thread,
+					    xnclock_read_monotonic(&nkclock));
+	strcpy(stat.name, thread->name);
+	strcpy(stat.personality, thread->personality->name);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return cobalt_copy_to_user(u_stat, &stat, sizeof(stat));
+}
+
+#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION
+
+int cobalt_thread_extend(struct cobalt_extension *ext,
+			 void *priv)
+{
+	struct cobalt_thread *thread = cobalt_current_thread();
+	struct xnthread_personality *prev;
+
+	trace_cobalt_pthread_extend(thread->hkey.u_pth, ext->core.name);
+
+	prev = cobalt_push_personality(ext->core.xid);
+	if (prev == NULL)
+		return -EINVAL;
+
+	cobalt_set_extref(&thread->extref, ext, priv);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_extend);
+
+void cobalt_thread_restrict(void)
+{
+	struct cobalt_thread *thread = cobalt_current_thread();
+
+	trace_cobalt_pthread_restrict(thread->hkey.u_pth,
+		      thread->threadbase.personality->name);
+	cobalt_pop_personality(&cobalt_personality);
+	cobalt_set_extref(&thread->extref, NULL, NULL);
+}
+EXPORT_SYMBOL_GPL(cobalt_thread_restrict);
+
+#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+const char *cobalt_trace_parse_sched_params(struct trace_seq *p, int policy,
+					    struct sched_param_ex *params)
+{
+	const char *ret = trace_seq_buffer_ptr(p);
+
+	switch (policy) {
+	case SCHED_QUOTA:
+		trace_seq_printf(p, "priority=%d, group=%d",
+				 params->sched_priority,
+				 params->sched_quota_group);
+		break;
+	case SCHED_TP:
+		trace_seq_printf(p, "priority=%d, partition=%d",
+				 params->sched_priority,
+				 params->sched_tp_partition);
+		break;
+	case SCHED_NORMAL:
+		break;
+	case SCHED_SPORADIC:
+		trace_seq_printf(p, "priority=%d, low_priority=%d, "
+				 "budget=(%ld.%09ld), period=(%ld.%09ld), "
+				 "maxrepl=%d",
+				 params->sched_priority,
+				 params->sched_ss_low_priority,
+				 params->sched_ss_init_budget.tv_sec,
+				 params->sched_ss_init_budget.tv_nsec,
+				 params->sched_ss_repl_period.tv_sec,
+				 params->sched_ss_repl_period.tv_nsec,
+				 params->sched_ss_max_repl);
+		break;
+	case SCHED_RR:
+	case SCHED_FIFO:
+	case SCHED_COBALT:
+	case SCHED_WEAK:
+	default:
+		trace_seq_printf(p, "priority=%d", params->sched_priority);
+		break;
+	}
+	trace_seq_putc(p, '\0');
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h
new file mode 100644
index 0000000..0959ff6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/thread.h
@@ -0,0 +1,228 @@
+/*
+ * Written by Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_THREAD_H
+#define _COBALT_POSIX_THREAD_H
+
+#include <linux/stdarg.h>
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/signal.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/uapi/thread.h>
+#include <cobalt/uapi/sched.h>
+/* CAUTION: rtdm/cobalt.h reads this header. */
+#include <xenomai/posix/syscall.h>
+#include <xenomai/posix/extension.h>
+
+#define PTHREAD_PROCESS_PRIVATE 0
+#define PTHREAD_PROCESS_SHARED  1
+
+#define PTHREAD_CREATE_JOINABLE 0
+#define PTHREAD_CREATE_DETACHED 1
+
+#define PTHREAD_INHERIT_SCHED  0
+#define PTHREAD_EXPLICIT_SCHED 1
+
+#define PTHREAD_MUTEX_NORMAL     0
+#define PTHREAD_MUTEX_RECURSIVE  1
+#define PTHREAD_MUTEX_ERRORCHECK 2
+#define PTHREAD_MUTEX_DEFAULT    0
+
+struct cobalt_thread;
+struct cobalt_threadstat;
+
+/*
+ * pthread_mutexattr_t and pthread_condattr_t fit on 32 bits, for
+ * compatibility with libc.
+ */
+
+/* The following definitions are copied from linuxthread pthreadtypes.h. */
+struct _pthread_fastlock {
+	long int __status;
+	int __spinlock;
+};
+
+typedef struct {
+	struct _pthread_fastlock __c_lock;
+	long __c_waiting;
+	char __padding[48 - sizeof (struct _pthread_fastlock)
+		       - sizeof (long) - sizeof (long long)];
+	long long __align;
+} pthread_cond_t;
+
+enum {
+	PTHREAD_PRIO_NONE,
+	PTHREAD_PRIO_INHERIT,
+	PTHREAD_PRIO_PROTECT
+};
+
+typedef struct {
+	int __m_reserved;
+	int __m_count;
+	long __m_owner;
+	int __m_kind;
+	struct _pthread_fastlock __m_lock;
+} pthread_mutex_t;
+
+struct cobalt_local_hkey {
+	/** pthread_t from userland. */
+	unsigned long u_pth;
+	/** kernel mm context. */
+	struct mm_struct *mm;
+};
+
+struct cobalt_thread {
+	unsigned int magic;
+	struct xnthread threadbase;
+	struct cobalt_extref extref;
+	struct cobalt_process *process;
+	struct list_head next;	/* in global/process thread_list */
+
+	/** Signal management. */
+	sigset_t sigpending;
+	struct list_head sigqueues[_NSIG]; /* in cobalt_sigpending */
+	struct xnsynch sigwait;
+	struct list_head signext;
+
+	/** Monitor wait object and link holder. */
+	struct xnsynch monitor_synch;
+	struct list_head monitor_link;
+
+	struct cobalt_local_hkey hkey;
+};
+
+struct cobalt_sigwait_context {
+	struct xnthread_wait_context wc;
+	sigset_t *set;
+	struct siginfo *si;
+};
+
+static inline struct cobalt_thread *cobalt_current_thread(void)
+{
+	struct xnthread *curr = xnthread_current();
+	return curr ? container_of(curr, struct cobalt_thread, threadbase) : NULL;
+}
+
+int __cobalt_thread_create(unsigned long pth, int policy,
+			   struct sched_param_ex __user *u_param,
+			   int xid, __u32 __user *u_winoff);
+
+int __cobalt_thread_setschedparam_ex(struct cobalt_thread *thread, int policy,
+				     const struct sched_param_ex *param_ex);
+
+int cobalt_thread_setschedparam_ex(unsigned long pth,
+				   int policy,
+				   const struct sched_param_ex *param_ex,
+				   __u32 __user *u_winoff,
+				   int __user *u_promoted);
+
+int cobalt_thread_getschedparam_ex(unsigned long pth,
+				   int *policy_r,
+				   struct sched_param_ex *param_ex);
+
+int __cobalt_thread_getschedparam_ex(struct cobalt_thread *thread,
+				     int *policy_r,
+				     struct sched_param_ex *param_ex);
+
+int cobalt_thread_setschedprio(unsigned long pth,
+			       int prio,
+			       __u32 __user *u_winoff,
+			       int __user *u_promoted);
+
+struct cobalt_thread *cobalt_thread_find(pid_t pid);
+
+struct cobalt_thread *cobalt_thread_find_local(pid_t pid);
+
+struct cobalt_thread *cobalt_thread_lookup(unsigned long pth);
+
+COBALT_SYSCALL_DECL(thread_create,
+		    (unsigned long pth, int policy,
+		     struct sched_param_ex __user *u_param,
+		     int xid, __u32 __user *u_winoff));
+
+struct cobalt_thread *
+cobalt_thread_shadow(struct cobalt_local_hkey *lhkey,
+		     __u32 __user *u_winoff);
+
+COBALT_SYSCALL_DECL(thread_setmode,
+		    (int clrmask, int setmask, int __user *u_mode_r));
+
+COBALT_SYSCALL_DECL(thread_setname,
+		    (unsigned long pth, const char __user *u_name));
+
+COBALT_SYSCALL_DECL(thread_kill, (unsigned long pth, int sig));
+
+COBALT_SYSCALL_DECL(thread_join, (unsigned long pth));
+
+COBALT_SYSCALL_DECL(thread_getpid, (unsigned long pth));
+
+COBALT_SYSCALL_DECL(thread_getstat,
+		    (pid_t pid, struct cobalt_threadstat __user *u_stat));
+
+COBALT_SYSCALL_DECL(thread_setschedparam_ex,
+		    (unsigned long pth,
+		     int policy,
+		     const struct sched_param_ex __user *u_param,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted));
+
+COBALT_SYSCALL_DECL(thread_getschedparam_ex,
+		    (unsigned long pth,
+		     int __user *u_policy,
+		     struct sched_param_ex __user *u_param));
+
+COBALT_SYSCALL_DECL(thread_setschedprio,
+		    (unsigned long pth,
+		     int prio,
+		     __u32 __user *u_winoff,
+		     int __user *u_promoted));
+
+void cobalt_thread_map(struct xnthread *curr);
+
+struct xnthread_personality *cobalt_thread_exit(struct xnthread *curr);
+
+struct xnthread_personality *cobalt_thread_finalize(struct xnthread *zombie);
+
+#ifdef CONFIG_XENO_OPT_COBALT_EXTENSION
+
+int cobalt_thread_extend(struct cobalt_extension *ext,
+			 void *priv);
+
+void cobalt_thread_restrict(void);
+
+static inline
+int cobalt_thread_extended_p(const struct cobalt_thread *thread,
+			     const struct cobalt_extension *ext)
+{
+	return thread->extref.extension == ext;
+}
+
+#else /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+static inline
+int cobalt_thread_extended_p(const struct cobalt_thread *thread,
+			     const struct cobalt_extension *ext)
+{
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_OPT_COBALT_EXTENSION */
+
+extern xnticks_t cobalt_time_slice;
+
+#endif /* !_COBALT_POSIX_THREAD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c
new file mode 100644
index 0000000..a58ea99
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.c
@@ -0,0 +1,588 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/cred.h>
+#include <linux/err.h>
+#include "internal.h"
+#include "thread.h"
+#include "timer.h"
+#include "clock.h"
+#include "signal.h"
+
+void cobalt_timer_handler(struct xntimer *xntimer)
+{
+	struct cobalt_timer *timer;
+	/*
+	 * Deliver the timer notification via a signal (unless
+	 * SIGEV_NONE was given). If we can't do this because the
+	 * target thread disappeared, then stop the timer. It will go
+	 * away when timer_delete() is called, or the owner's process
+	 * exits, whichever comes first.
+	 */
+	timer = container_of(xntimer, struct cobalt_timer, timerbase);
+	if (timer->sigp.si.si_signo &&
+	    cobalt_signal_send_pid(timer->target, &timer->sigp) == -ESRCH)
+		xntimer_stop(&timer->timerbase);
+}
+EXPORT_SYMBOL_GPL(cobalt_timer_handler);
+
+static inline struct cobalt_thread *
+timer_init(struct cobalt_timer *timer,
+	   const struct sigevent *__restrict__ evp) /* nklocked, IRQs off. */
+{
+	struct cobalt_thread *owner = cobalt_current_thread(), *target = NULL;
+	struct xnclock *clock;
+
+	/*
+	 * First, try to offload this operation to the extended
+	 * personality the current thread might originate from.
+	 */
+	if (cobalt_initcall_extension(timer_init, &timer->extref,
+				      owner, target, evp) && target)
+		return target;
+
+	/*
+	 * Ok, we have no extension available, or we do but it does
+	 * not want to overload the standard behavior: handle this
+	 * timer the pure Cobalt way then.
+	 */
+	if (evp == NULL || evp->sigev_notify == SIGEV_NONE) {
+		target = owner;	/* Assume SIGEV_THREAD_ID. */
+		goto init;
+	}
+
+	if (evp->sigev_notify != SIGEV_THREAD_ID)
+		return ERR_PTR(-EINVAL);
+
+	/*
+	 * Recipient thread must be a Xenomai shadow in user-space,
+	 * living in the same process than our caller.
+	 */
+	target = cobalt_thread_find_local(evp->sigev_notify_thread_id);
+	if (target == NULL)
+		return ERR_PTR(-EINVAL);
+init:
+	clock = cobalt_clock_find(timer->clockid);
+	if (IS_ERR(clock))
+		return ERR_PTR(PTR_ERR(clock));
+
+	xntimer_init(&timer->timerbase, clock, cobalt_timer_handler,
+		     target->threadbase.sched, XNTIMER_UGRAVITY);
+
+	return target;
+}
+
+static inline int timer_alloc_id(struct cobalt_process *cc)
+{
+	int id;
+
+	id = find_first_bit(cc->timers_map, CONFIG_XENO_OPT_NRTIMERS);
+	if (id == CONFIG_XENO_OPT_NRTIMERS)
+		return -EAGAIN;
+
+	__clear_bit(id, cc->timers_map);
+
+	return id;
+}
+
+static inline void timer_free_id(struct cobalt_process *cc, int id)
+{
+	__set_bit(id, cc->timers_map);
+}
+
+struct cobalt_timer *
+cobalt_timer_by_id(struct cobalt_process *cc, timer_t timer_id)
+{
+	if (timer_id < 0 || timer_id >= CONFIG_XENO_OPT_NRTIMERS)
+		return NULL;
+
+	if (test_bit(timer_id, cc->timers_map))
+		return NULL;
+
+	return cc->timers[timer_id];
+}
+
+static inline int timer_create(clockid_t clockid,
+			       const struct sigevent *__restrict__ evp,
+			       timer_t * __restrict__ timerid)
+{
+	struct cobalt_process *cc;
+	struct cobalt_thread *target;
+	struct cobalt_timer *timer;
+	int signo, ret = -EINVAL;
+	timer_t timer_id;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	timer = xnmalloc(sizeof(*timer));
+	if (timer == NULL)
+		return -ENOMEM;
+
+	timer->sigp.si.si_errno = 0;
+	timer->sigp.si.si_code = SI_TIMER;
+	timer->sigp.si.si_overrun = 0;
+	INIT_LIST_HEAD(&timer->sigp.next);
+	timer->clockid = clockid;
+	timer->overruns = 0;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	ret = timer_alloc_id(cc);
+	if (ret < 0)
+		goto out;
+
+	timer_id = ret;
+
+	if (evp == NULL) {
+		timer->sigp.si.si_int = timer_id;
+		signo = SIGALRM;
+	} else {
+		if (evp->sigev_notify == SIGEV_NONE)
+			signo = 0; /* Don't notify. */
+		else {
+			signo = evp->sigev_signo;
+			if (signo < 1 || signo > _NSIG) {
+				ret = -EINVAL;
+				goto fail;
+			}
+			timer->sigp.si.si_value = evp->sigev_value;
+		}
+	}
+
+	timer->sigp.si.si_signo = signo;
+	timer->sigp.si.si_tid = timer_id;
+	timer->id = timer_id;
+
+	target = timer_init(timer, evp);
+	if (target == NULL) {
+		ret = -EPERM;
+		goto fail;
+	}
+
+	if (IS_ERR(target)) {
+		ret = PTR_ERR(target);
+		goto fail;
+	}
+
+	timer->target = xnthread_host_pid(&target->threadbase);
+	cc->timers[timer_id] = timer;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	*timerid = timer_id;
+
+	return 0;
+fail:
+	timer_free_id(cc, timer_id);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnfree(timer);
+
+	return ret;
+}
+
+static void timer_cleanup(struct cobalt_process *p, struct cobalt_timer *timer)
+{
+	xntimer_destroy(&timer->timerbase);
+
+	if (!list_empty(&timer->sigp.next))
+		list_del(&timer->sigp.next);
+
+	timer_free_id(p, cobalt_timer_id(timer));
+	p->timers[cobalt_timer_id(timer)] = NULL;
+}
+
+static inline int
+timer_delete(timer_t timerid)
+{
+	struct cobalt_process *cc;
+	struct cobalt_timer *timer;
+	int ret = 0;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EINVAL;
+	}
+	/*
+	 * If an extension runs and actually handles the deletion, we
+	 * should not call the timer_cleanup extension handler for
+	 * this timer, but we shall destroy the core timer. If the
+	 * handler returns on error, the whole deletion process is
+	 * aborted, leaving the timer untouched. In all other cases,
+	 * we do the core timer cleanup work, firing the timer_cleanup
+	 * extension handler if defined.
+	 */
+  	if (cobalt_call_extension(timer_delete, &timer->extref, ret) && ret < 0)
+		goto out;
+
+	if (ret == 0)
+		cobalt_call_extension(timer_cleanup, &timer->extref, ret);
+	else
+		ret = 0;
+
+	timer_cleanup(cc, timer);
+	xnlock_put_irqrestore(&nklock, s);
+	xnfree(timer);
+
+	return ret;
+
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+void __cobalt_timer_getval(struct xntimer *__restrict__ timer,
+			   struct itimerspec64 *__restrict__ value)
+{
+	ns2ts(&value->it_interval, xntimer_interval(timer));
+
+	if (!xntimer_running_p(timer)) {
+		value->it_value.tv_sec = 0;
+		value->it_value.tv_nsec = 0;
+	} else {
+		ns2ts(&value->it_value, xntimer_get_timeout(timer));
+	}
+}
+
+static inline void
+timer_gettimeout(struct cobalt_timer *__restrict__ timer,
+		 struct itimerspec64 *__restrict__ value)
+{
+	int ret = 0;
+
+	if (cobalt_call_extension(timer_gettime, &timer->extref,
+				  ret, value) && ret != 0)
+		return;
+
+	__cobalt_timer_getval(&timer->timerbase, value);
+}
+
+int __cobalt_timer_setval(struct xntimer *__restrict__ timer, int clock_flag,
+			  const struct itimerspec64 *__restrict__ value)
+{
+	xnticks_t start, period;
+
+	if (value->it_value.tv_nsec == 0 && value->it_value.tv_sec == 0) {
+		xntimer_stop(timer);
+		return 0;
+	}
+
+	if ((unsigned long)value->it_value.tv_nsec >= ONE_BILLION ||
+	    ((unsigned long)value->it_interval.tv_nsec >= ONE_BILLION &&
+	     (value->it_value.tv_sec != 0 || value->it_value.tv_nsec != 0)))
+		return -EINVAL;
+
+	start = ts2ns(&value->it_value) + 1;
+	period = ts2ns(&value->it_interval);
+
+	/*
+	 * Now start the timer. If the timeout data has already
+	 * passed, the caller will handle the case.
+	 */
+	return xntimer_start(timer, start, period, clock_flag);
+}
+
+static inline int timer_set(struct cobalt_timer *timer, int flags,
+			    const struct itimerspec64 *__restrict__ value)
+{				/* nklocked, IRQs off. */
+	struct cobalt_thread *thread;
+	int ret = 0;
+
+	/* First, try offloading the work to an extension. */
+
+	if (cobalt_call_extension(timer_settime, &timer->extref,
+				  ret, value, flags) && ret != 0)
+		return ret < 0 ? ret : 0;
+
+	/*
+	 * No extension, or operation not handled. Default to plain
+	 * POSIX behavior.
+	 *
+	 * If the target thread vanished, just don't start the timer.
+	 */
+	thread = cobalt_thread_find(timer->target);
+	if (thread == NULL)
+		return 0;
+
+	/*
+	 * Make the timer affine to the CPU running the thread to be
+	 * signaled if possible.
+	 */
+	xntimer_set_affinity(&timer->timerbase, thread->threadbase.sched);
+
+	return __cobalt_timer_setval(&timer->timerbase,
+				     clock_flag(flags, timer->clockid), value);
+}
+
+static inline void
+timer_deliver_late(struct cobalt_process *cc, timer_t timerid)
+{
+	struct cobalt_timer *timer;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	/*
+	 * We dropped the lock shortly, revalidate the timer handle in
+	 * case a deletion slipped in.
+	 */
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer)
+		cobalt_timer_handler(&timer->timerbase);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+int __cobalt_timer_settime(timer_t timerid, int flags,
+			   const struct itimerspec64 *__restrict__ value,
+			   struct itimerspec64 *__restrict__ ovalue)
+{
+	struct cobalt_timer *timer;
+	struct cobalt_process *cc;
+	int ret;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	XENO_BUG_ON(COBALT, cc == NULL);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	if (ovalue)
+		timer_gettimeout(timer, ovalue);
+
+	ret = timer_set(timer, flags, value);
+	if (ret == -ETIMEDOUT) {
+		/*
+		 * Time has already passed, deliver a notification
+		 * immediately. Since we are about to dive into the
+		 * signal machinery for this, let's drop the nklock to
+		 * break the atomic section temporarily.
+		 */
+		xnlock_put_irqrestore(&nklock, s);
+		timer_deliver_late(cc, timerid);
+		return 0;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+int __cobalt_timer_gettime(timer_t timerid, struct itimerspec64 *value)
+{
+	struct cobalt_timer *timer;
+	struct cobalt_process *cc;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL)
+		goto fail;
+
+	timer_gettimeout(timer, value);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return -EINVAL;
+}
+
+COBALT_SYSCALL(timer_delete, current, (timer_t timerid))
+{
+	return timer_delete(timerid);
+}
+
+int __cobalt_timer_create(clockid_t clock,
+			  const struct sigevent *sev,
+			  timer_t __user *u_tm)
+{
+	timer_t timerid = 0;
+	int ret;
+
+	ret = timer_create(clock, sev, &timerid);
+	if (ret)
+		return ret;
+
+	if (cobalt_copy_to_user(u_tm, &timerid, sizeof(timerid))) {
+		timer_delete(timerid);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+COBALT_SYSCALL(timer_create, current,
+	       (clockid_t clock,
+		const struct sigevent __user *u_sev,
+		timer_t __user *u_tm))
+{
+	struct sigevent sev, *evp = NULL;
+
+	if (u_sev) {
+		evp = &sev;
+		if (cobalt_copy_from_user(&sev, u_sev, sizeof(sev)))
+			return -EFAULT;
+	}
+
+	return __cobalt_timer_create(clock, evp, u_tm);
+}
+
+COBALT_SYSCALL(timer_settime, primary,
+	       (timer_t tm, int flags,
+		const struct __user_old_itimerspec __user *u_newval,
+		struct __user_old_itimerspec __user *u_oldval))
+{
+	struct itimerspec64 newv, oldv, *oldvp = &oldv;
+	int ret;
+
+	if (u_oldval == NULL)
+		oldvp = NULL;
+
+	if (cobalt_get_u_itimerspec(&newv, u_newval))
+		return -EFAULT;
+
+	ret = __cobalt_timer_settime(tm, flags, &newv, oldvp);
+	if (ret)
+		return ret;
+
+	if (oldvp && cobalt_put_u_itimerspec(u_oldval, oldvp)) {
+		__cobalt_timer_settime(tm, flags, oldvp, NULL);
+		return -EFAULT;
+	}
+
+	return 0;
+}
+
+COBALT_SYSCALL(timer_gettime, current,
+	       (timer_t tm, struct __user_old_itimerspec __user *u_val))
+{
+	struct itimerspec64 val;
+	int ret;
+
+	ret = __cobalt_timer_gettime(tm, &val);
+	if (ret)
+		return ret;
+
+	return cobalt_put_u_itimerspec(u_val, &val);
+}
+
+COBALT_SYSCALL(timer_getoverrun, current, (timer_t timerid))
+{
+	struct cobalt_timer *timer;
+	struct cobalt_process *cc;
+	int overruns;
+	spl_t s;
+
+	cc = cobalt_current_process();
+	if (cc == NULL)
+		return -EPERM;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	timer = cobalt_timer_by_id(cc, timerid);
+	if (timer == NULL)
+		goto fail;
+
+	overruns = timer->overruns;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return overruns;
+fail:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return -EINVAL;
+}
+
+int cobalt_timer_deliver(struct cobalt_thread *waiter, timer_t timerid) /* nklocked, IRQs off. */
+{
+	struct cobalt_timer *timer;
+	xnticks_t now;
+
+	timer = cobalt_timer_by_id(cobalt_current_process(), timerid);
+	if (timer == NULL)
+		/* Killed before ultimate delivery, who cares then? */
+		return 0;
+
+	if (!xntimer_periodic_p(&timer->timerbase))
+		timer->overruns = 0;
+	else {
+		now = xnclock_read_raw(xntimer_clock(&timer->timerbase));
+		timer->overruns = xntimer_get_overruns(&timer->timerbase,
+					       &waiter->threadbase, now);
+		if ((unsigned int)timer->overruns > COBALT_DELAYMAX)
+			timer->overruns = COBALT_DELAYMAX;
+	}
+
+	return timer->overruns;
+}
+
+void cobalt_timer_reclaim(struct cobalt_process *p)
+{
+	struct cobalt_timer *timer;
+	unsigned id;
+	spl_t s;
+	int ret;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (find_first_zero_bit(p->timers_map, CONFIG_XENO_OPT_NRTIMERS) ==
+		CONFIG_XENO_OPT_NRTIMERS)
+		goto out;
+
+	for (id = 0; id < ARRAY_SIZE(p->timers); id++) {
+		timer = cobalt_timer_by_id(p, id);
+		if (timer == NULL)
+			continue;
+
+		cobalt_call_extension(timer_cleanup, &timer->extref, ret);
+		timer_cleanup(p, timer);
+		xnlock_put_irqrestore(&nklock, s);
+		xnfree(timer);
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h
new file mode 100644
index 0000000..3b580d4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timer.h
@@ -0,0 +1,86 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _COBALT_POSIX_TIMER_H
+#define _COBALT_POSIX_TIMER_H
+
+#include <linux/types.h>
+#include <linux/time.h>
+#include <linux/list.h>
+#include <cobalt/kernel/timer.h>
+#include <xenomai/posix/signal.h>
+#include <xenomai/posix/syscall.h>
+
+struct cobalt_timer {
+	struct xntimer timerbase;
+	timer_t id;
+	int overruns;
+	clockid_t clockid;
+	pid_t target;
+	struct cobalt_sigpending sigp;
+	struct cobalt_extref extref;
+};
+
+int cobalt_timer_deliver(struct cobalt_thread *waiter,
+			 timer_t timerid);
+
+void cobalt_timer_reclaim(struct cobalt_process *p);
+
+static inline timer_t cobalt_timer_id(const struct cobalt_timer *timer)
+{
+	return timer->id;
+}
+
+struct cobalt_timer *
+cobalt_timer_by_id(struct cobalt_process *p, timer_t timer_id);
+
+void cobalt_timer_handler(struct xntimer *xntimer);
+
+void __cobalt_timer_getval(struct xntimer *__restrict__ timer, 
+			   struct itimerspec64 *__restrict__ value);
+
+int __cobalt_timer_setval(struct xntimer *__restrict__ timer, int clock_flag, 
+			  const struct itimerspec64 *__restrict__ value);
+
+int __cobalt_timer_create(clockid_t clock,
+			  const struct sigevent *sev,
+			  timer_t __user *u_tm);
+
+int __cobalt_timer_settime(timer_t timerid, int flags,
+			   const struct itimerspec64 *__restrict__ value,
+			   struct itimerspec64 *__restrict__ ovalue);
+
+int __cobalt_timer_gettime(timer_t timerid, struct itimerspec64 *value);
+
+COBALT_SYSCALL_DECL(timer_create,
+		    (clockid_t clock,
+		     const struct sigevent __user *u_sev,
+		     timer_t __user *u_tm));
+
+COBALT_SYSCALL_DECL(timer_delete, (timer_t tm));
+
+COBALT_SYSCALL_DECL(timer_settime,
+		    (timer_t tm, int flags,
+		     const struct __user_old_itimerspec __user *u_newval,
+		     struct __user_old_itimerspec __user *u_oldval));
+
+COBALT_SYSCALL_DECL(timer_gettime,
+		    (timer_t tm, struct __user_old_itimerspec __user *u_val));
+
+COBALT_SYSCALL_DECL(timer_getoverrun, (timer_t tm));
+
+#endif /* !_COBALT_POSIX_TIMER_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c
new file mode 100644
index 0000000..472c4cb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.c
@@ -0,0 +1,334 @@
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/timerfd.h>
+#include <linux/err.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/select.h>
+#include <rtdm/fd.h>
+#include "internal.h"
+#include "clock.h"
+#include "timer.h"
+#include "timerfd.h"
+
+struct cobalt_tfd {
+	int flags;
+	clockid_t clockid;
+	struct rtdm_fd fd;
+	struct xntimer timer;
+	DECLARE_XNSELECT(read_select);
+	struct itimerspec64 value;
+	struct xnsynch readers;
+	struct xnthread *target;
+};
+
+#define COBALT_TFD_TICKED	(1 << 2)
+
+#define COBALT_TFD_SETTIME_FLAGS (TFD_TIMER_ABSTIME | TFD_WAKEUP)
+
+static ssize_t timerfd_read(struct rtdm_fd *fd, void __user *buf, size_t size)
+{
+	struct cobalt_tfd *tfd;
+	__u64 __user *u_ticks;
+	__u64 ticks = 0;
+	bool aligned;
+	spl_t s;
+	int err;
+
+	if (size < sizeof(ticks))
+		return -EINVAL;
+
+	u_ticks = buf;
+	if (!access_wok(u_ticks, sizeof(*u_ticks)))
+		return -EFAULT;
+
+	aligned = (((unsigned long)buf) & (sizeof(ticks) - 1)) == 0;
+
+	tfd = container_of(fd, struct cobalt_tfd, fd);
+
+	xnlock_get_irqsave(&nklock, s);
+	if (tfd->flags & COBALT_TFD_TICKED) {
+		err = 0;
+		goto out;
+	}
+	if (rtdm_fd_flags(fd) & O_NONBLOCK) {
+		err = -EAGAIN;
+		goto out;
+	}
+
+	do {
+		err = xnsynch_sleep_on(&tfd->readers, XN_INFINITE, XN_RELATIVE);
+	} while (err == 0 && (tfd->flags & COBALT_TFD_TICKED) == 0);
+
+	if (err & XNBREAK)
+		err = -EINTR;
+  out:
+	if (err == 0) {
+		xnticks_t now;
+
+		if (xntimer_periodic_p(&tfd->timer)) {
+			now = xnclock_read_raw(xntimer_clock(&tfd->timer));
+			ticks = 1 + xntimer_get_overruns(&tfd->timer,
+					 xnthread_current(), now);
+		} else
+			ticks = 1;
+
+		tfd->flags &= ~COBALT_TFD_TICKED;
+		xnselect_signal(&tfd->read_select, 0);
+	}
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (err == 0) {
+		err = aligned ? __xn_put_user(ticks, u_ticks) :
+			__xn_copy_to_user(buf, &ticks, sizeof(ticks));
+		if (err)
+			err =-EFAULT;
+	}
+
+	return err ?: sizeof(ticks);
+}
+
+static int
+timerfd_select(struct rtdm_fd *fd, struct xnselector *selector,
+	       unsigned type, unsigned index)
+{
+	struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd);
+	struct xnselect_binding *binding;
+	spl_t s;
+	int err;
+
+	if (type != XNSELECT_READ)
+		return -EBADF;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (binding == NULL)
+		return -ENOMEM;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_set_affinity(&tfd->timer, xnthread_current()->sched);
+	err = xnselect_bind(&tfd->read_select, binding, selector, type,
+			index, tfd->flags & COBALT_TFD_TICKED);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+static void timerfd_close(struct rtdm_fd *fd)
+{
+	struct cobalt_tfd *tfd = container_of(fd, struct cobalt_tfd, fd);
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_destroy(&tfd->timer);
+	xnsynch_destroy(&tfd->readers);
+	xnsched_run();
+	xnlock_put_irqrestore(&nklock, s);
+	xnselect_destroy(&tfd->read_select); /* Reschedules. */
+	xnfree(tfd);
+}
+
+static struct rtdm_fd_ops timerfd_ops = {
+	.read_rt = timerfd_read,
+	.select = timerfd_select,
+	.close = timerfd_close,
+};
+
+static void timerfd_handler(struct xntimer *xntimer)
+{
+	struct cobalt_tfd *tfd;
+
+	tfd = container_of(xntimer, struct cobalt_tfd, timer);
+	tfd->flags |= COBALT_TFD_TICKED;
+	xnselect_signal(&tfd->read_select, 1);
+	xnsynch_wakeup_one_sleeper(&tfd->readers);
+	if (tfd->target)
+		xnthread_unblock(tfd->target);
+}
+
+COBALT_SYSCALL(timerfd_create, lostage, (int clockid, int flags))
+{
+	struct cobalt_tfd *tfd;
+	struct xnthread *curr;
+	struct xnclock *clock;
+	int ret, ufd;
+
+	if (flags & ~TFD_CREATE_FLAGS)
+		return -EINVAL;
+
+	clock = cobalt_clock_find(clockid);
+	if (IS_ERR(clock))
+		return PTR_ERR(clock);
+
+	tfd = xnmalloc(sizeof(*tfd));
+	if (tfd == NULL)
+		return -ENOMEM;
+
+	ufd = __rtdm_anon_getfd("[cobalt-timerfd]",
+				O_RDWR | (flags & TFD_SHARED_FCNTL_FLAGS));
+	if (ufd < 0) {
+		ret = ufd;
+		goto fail_getfd;
+	}
+
+	tfd->flags = flags & ~TFD_NONBLOCK;
+	tfd->fd.oflags = (flags & TFD_NONBLOCK) ? O_NONBLOCK : 0;
+	tfd->clockid = clockid;
+	curr = xnthread_current();
+	xntimer_init(&tfd->timer, clock, timerfd_handler,
+		     curr ? curr->sched : NULL, XNTIMER_UGRAVITY);
+	xnsynch_init(&tfd->readers, XNSYNCH_PRIO, NULL);
+	xnselect_init(&tfd->read_select);
+	tfd->target = NULL;
+
+	ret = rtdm_fd_enter(&tfd->fd, ufd, COBALT_TIMERFD_MAGIC, &timerfd_ops);
+	if (ret < 0)
+		goto fail;
+
+	ret = rtdm_fd_register(&tfd->fd, ufd);
+	if (ret < 0)
+		goto fail;
+
+	return ufd;
+fail:
+	xnselect_destroy(&tfd->read_select);
+	xnsynch_destroy(&tfd->readers);
+	xntimer_destroy(&tfd->timer);
+	__rtdm_anon_putfd(ufd);
+fail_getfd:
+	xnfree(tfd);
+
+	return ret;
+}
+
+static inline struct cobalt_tfd *tfd_get(int ufd)
+{
+	struct rtdm_fd *fd;
+
+	fd = rtdm_fd_get(ufd, COBALT_TIMERFD_MAGIC);
+	if (IS_ERR(fd)) {
+		int err = PTR_ERR(fd);
+		if (err == -EBADF && cobalt_current_process() == NULL)
+			err = -EPERM;
+		return ERR_PTR(err);
+	}
+
+	return container_of(fd, struct cobalt_tfd, fd);
+}
+
+static inline void tfd_put(struct cobalt_tfd *tfd)
+{
+	rtdm_fd_put(&tfd->fd);
+}
+
+int __cobalt_timerfd_settime(int fd, int flags,
+			     const struct itimerspec64 *value,
+			     struct itimerspec64 *ovalue)
+{
+	struct cobalt_tfd *tfd;
+	int cflag, ret;
+	spl_t s;
+
+	if (flags & ~COBALT_TFD_SETTIME_FLAGS)
+		return -EINVAL;
+
+	tfd = tfd_get(fd);
+	if (IS_ERR(tfd))
+		return PTR_ERR(tfd);
+
+	cflag = (flags & TFD_TIMER_ABSTIME) ? TIMER_ABSTIME : 0;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	tfd->target = NULL;
+	if (flags & TFD_WAKEUP) {
+		tfd->target = xnthread_current();
+		if (tfd->target == NULL) {
+			ret = -EPERM;
+			goto out;
+		}
+	}
+
+	if (ovalue)
+		__cobalt_timer_getval(&tfd->timer, ovalue);
+
+	xntimer_set_affinity(&tfd->timer, xnthread_current()->sched);
+
+	ret = __cobalt_timer_setval(&tfd->timer,
+				    clock_flag(cflag, tfd->clockid), value);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	tfd_put(tfd);
+
+	return ret;
+}
+
+COBALT_SYSCALL(timerfd_settime, primary,
+	       (int fd, int flags,
+		const struct __user_old_itimerspec __user *new_value,
+		struct __user_old_itimerspec __user *old_value))
+{
+	struct itimerspec64 ovalue, value;
+	int ret;
+
+	ret = cobalt_get_u_itimerspec(&value, new_value);
+	if (ret)
+		return ret;
+
+	ret = __cobalt_timerfd_settime(fd, flags, &value, &ovalue);
+	if (ret)
+		return ret;
+
+	if (old_value) {
+		ret = cobalt_copy_to_user(old_value, &ovalue, sizeof(ovalue));
+		value.it_value.tv_sec = 0;
+		value.it_value.tv_nsec = 0;
+		__cobalt_timerfd_settime(fd, flags, &value, NULL);
+	}
+
+	return ret;
+}
+
+int __cobalt_timerfd_gettime(int fd, struct itimerspec64 *value)
+{
+	struct cobalt_tfd *tfd;
+	spl_t s;
+
+	tfd = tfd_get(fd);
+	if (IS_ERR(tfd))
+		return PTR_ERR(tfd);
+
+	xnlock_get_irqsave(&nklock, s);
+	__cobalt_timer_getval(&tfd->timer, value);
+	xnlock_put_irqrestore(&nklock, s);
+
+	tfd_put(tfd);
+
+	return 0;
+}
+
+COBALT_SYSCALL(timerfd_gettime, current,
+	       (int fd, struct __user_old_itimerspec __user *curr_value))
+{
+	struct itimerspec64 value;
+	int ret;
+
+	ret = __cobalt_timerfd_gettime(fd, &value);
+
+	return ret ?: cobalt_put_u_itimerspec(curr_value, &value);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h
new file mode 100644
index 0000000..245b869
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/posix/timerfd.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef TIMERFD_H
+#define TIMERFD_H
+
+#include <linux/time.h>
+#include <xenomai/posix/syscall.h>
+
+int __cobalt_timerfd_settime(int fd, int flags,
+			     const struct itimerspec64 *new_value,
+			     struct itimerspec64 *old_value);
+
+int __cobalt_timerfd_gettime(int fd,
+			     struct itimerspec64 *value);
+
+COBALT_SYSCALL_DECL(timerfd_create,
+		    (int clockid, int flags));
+
+COBALT_SYSCALL_DECL(timerfd_settime,
+		    (int fd, int flags,
+		     const struct __user_old_itimerspec __user *new_value,
+		     struct __user_old_itimerspec __user *old_value));
+
+COBALT_SYSCALL_DECL(timerfd_gettime,
+		    (int fd, struct __user_old_itimerspec __user *curr_value));
+
+#endif /* TIMERFD_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c b/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c
new file mode 100644
index 0000000..0aaf691
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.c
@@ -0,0 +1,220 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/vfile.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/sched.h>
+#include <xenomai/version.h>
+#include "debug.h"
+
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+
+static int lock_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct xnlockinfo lockinfo;
+	spl_t s;
+	int cpu;
+
+	for_each_realtime_cpu(cpu) {
+		xnlock_get_irqsave(&nklock, s);
+		lockinfo = per_cpu(xnlock_stats, cpu);
+		xnlock_put_irqrestore(&nklock, s);
+
+		if (cpu > 0)
+			xnvfile_printf(it, "\n");
+
+		xnvfile_printf(it, "CPU%d:\n", cpu);
+
+		xnvfile_printf(it,
+			     "  longest locked section: %llu ns\n"
+			     "  spinning time: %llu ns\n"
+			     "  section entry: %s:%d (%s)\n",
+			       xnclock_ticks_to_ns(&nkclock, lockinfo.lock_time),
+			       xnclock_ticks_to_ns(&nkclock, lockinfo.spin_time),
+			       lockinfo.file, lockinfo.line, lockinfo.function);
+	}
+
+	return 0;
+}
+
+static ssize_t lock_vfile_store(struct xnvfile_input *input)
+{
+	ssize_t ret;
+	spl_t s;
+	int cpu;
+
+	long val;
+
+	ret = xnvfile_get_integer(input, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val != 0)
+		return -EINVAL;
+
+	for_each_realtime_cpu(cpu) {
+		xnlock_get_irqsave(&nklock, s);
+		memset(&per_cpu(xnlock_stats, cpu), '\0', sizeof(struct xnlockinfo));
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+	return ret;
+}
+
+static struct xnvfile_regular_ops lock_vfile_ops = {
+	.show = lock_vfile_show,
+	.store = lock_vfile_store,
+};
+
+static struct xnvfile_regular lock_vfile = {
+	.ops = &lock_vfile_ops,
+};
+
+#endif /* CONFIG_XENO_OPT_DEBUG_LOCKING */
+
+static int latency_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	xnvfile_printf(it, "%Lu\n",
+		       xnclock_ticks_to_ns(&nkclock, nkclock.gravity.user));
+
+	return 0;
+}
+
+static ssize_t latency_vfile_store(struct xnvfile_input *input)
+{
+	ssize_t ret;
+	long val;
+
+	ret = xnvfile_get_integer(input, &val);
+	if (ret < 0)
+		return ret;
+
+	nkclock.gravity.user = xnclock_ns_to_ticks(&nkclock, val);
+
+	return ret;
+}
+
+static struct xnvfile_regular_ops latency_vfile_ops = {
+	.show = latency_vfile_show,
+	.store = latency_vfile_store,
+};
+
+static struct xnvfile_regular latency_vfile = {
+	.ops = &latency_vfile_ops,
+};
+
+static int version_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	xnvfile_printf(it, "%s\n", XENO_VERSION_STRING);
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops version_vfile_ops = {
+	.show = version_vfile_show,
+};
+
+static struct xnvfile_regular version_vfile = {
+	.ops = &version_vfile_ops,
+};
+
+static int faults_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	int cpu, trap;
+
+	xnvfile_puts(it, "TRAP ");
+
+	for_each_realtime_cpu(cpu)
+		xnvfile_printf(it, "        CPU%d", cpu);
+
+	for (trap = 0; cobalt_machine.fault_labels[trap]; trap++) {
+		if (*cobalt_machine.fault_labels[trap] == '\0')
+			continue;
+
+		xnvfile_printf(it, "\n%3d: ", trap);
+
+		for_each_realtime_cpu(cpu)
+			xnvfile_printf(it, "%12u",
+				       per_cpu(cobalt_machine_cpudata, cpu).faults[trap]);
+
+		xnvfile_printf(it, "    (%s)",
+			       cobalt_machine.fault_labels[trap]);
+	}
+
+	xnvfile_putc(it, '\n');
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops faults_vfile_ops = {
+	.show = faults_vfile_show,
+};
+
+static struct xnvfile_regular faults_vfile = {
+	.ops = &faults_vfile_ops,
+};
+
+void xnprocfs_cleanup_tree(void)
+{
+#ifdef CONFIG_XENO_OPT_DEBUG
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+	xnvfile_destroy_regular(&lock_vfile);
+#endif
+	xnvfile_destroy_dir(&cobalt_debug_vfroot);
+#endif /* XENO_OPT_DEBUG */
+	xnvfile_destroy_regular(&faults_vfile);
+	xnvfile_destroy_regular(&version_vfile);
+	xnvfile_destroy_regular(&latency_vfile);
+	xnintr_cleanup_proc();
+	xnheap_cleanup_proc();
+	xnclock_cleanup_proc();
+	xnsched_cleanup_proc();
+	xnvfile_destroy_root();
+}
+
+int __init xnprocfs_init_tree(void)
+{
+	int ret;
+
+	ret = xnvfile_init_root();
+	if (ret)
+		return ret;
+
+	ret = xnsched_init_proc();
+	if (ret)
+		return ret;
+
+	xnclock_init_proc();
+	xnheap_init_proc();
+	xnintr_init_proc();
+	xnvfile_init_regular("latency", &latency_vfile, &cobalt_vfroot);
+	xnvfile_init_regular("version", &version_vfile, &cobalt_vfroot);
+	xnvfile_init_regular("faults", &faults_vfile, &cobalt_vfroot);
+#ifdef CONFIG_XENO_OPT_DEBUG
+	xnvfile_init_dir("debug", &cobalt_debug_vfroot, &cobalt_vfroot);
+#ifdef CONFIG_XENO_OPT_DEBUG_LOCKING
+	xnvfile_init_regular("lock", &lock_vfile, &cobalt_debug_vfroot);
+#endif
+#endif
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h b/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h
new file mode 100644
index 0000000..75304fe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/procfs.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#ifndef _KERNEL_COBALT_PROCFS_H
+#define _KERNEL_COBALT_PROCFS_H
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int xnprocfs_init_tree(void);
+void xnprocfs_cleanup_tree(void);
+#else
+static inline int xnprocfs_init_tree(void) { return 0; }
+static inline void xnprocfs_cleanup_tree(void) { }
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+#endif /* !_KERNEL_COBALT_PROCFS_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/registry.c b/kernel/xenomai-v3.2.4/kernel/cobalt/registry.c
new file mode 100644
index 0000000..211e0f7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/registry.c
@@ -0,0 +1,954 @@
+/*
+ * Copyright (C) 2004 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/assert.h>
+#include <pipeline/sirq.h>
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_registry Registry services
+ *
+ * The registry provides a mean to index object descriptors on unique
+ * alphanumeric keys. When labeled this way, an object is globally
+ * exported; it can be searched for, and its descriptor returned to
+ * the caller for further use; the latter operation is called a
+ * "binding". When no object has been registered under the given name
+ * yet, the registry can be asked to set up a rendez-vous, blocking
+ * the caller until the object is eventually registered.
+ *
+ *@{
+ */
+
+struct xnobject *registry_obj_slots;
+EXPORT_SYMBOL_GPL(registry_obj_slots);
+
+static LIST_HEAD(free_object_list); /* Free objects. */
+
+static LIST_HEAD(busy_object_list); /* Active and exported objects. */
+
+static unsigned int nr_active_objects;
+
+static unsigned long next_object_stamp;
+
+static struct hlist_head *object_index;
+
+static int nr_object_entries;
+
+static struct xnsynch register_synch;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+#include <linux/workqueue.h>
+
+static void proc_callback(struct work_struct *work);
+
+static irqreturn_t registry_proc_schedule(int virq, void *dev_id);
+
+static LIST_HEAD(proc_object_list);	/* Objects waiting for /proc handling. */
+
+static DECLARE_WORK(registry_proc_work, proc_callback);
+
+static int proc_virq;
+
+static struct xnvfile_directory registry_vfroot;
+
+static int usage_vfile_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	xnvfile_printf(it, "%u/%u\n",
+		       nr_active_objects,
+		       CONFIG_XENO_OPT_REGISTRY_NRSLOTS);
+	return 0;
+}
+
+static struct xnvfile_regular_ops usage_vfile_ops = {
+	.show = usage_vfile_show,
+};
+
+static struct xnvfile_regular usage_vfile = {
+	.ops = &usage_vfile_ops,
+};
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+unsigned xnregistry_hash_size(void)
+{
+	static const int primes[] = {
+		101, 211, 307, 401, 503, 601,
+		701, 809, 907, 1009, 1103
+	};
+
+#define obj_hash_max(n)			 \
+((n) < sizeof(primes) / sizeof(int) ? \
+ (n) : sizeof(primes) / sizeof(int) - 1)
+
+	return primes[obj_hash_max(CONFIG_XENO_OPT_REGISTRY_NRSLOTS / 100)];
+}
+
+int xnregistry_init(void)
+{
+	int n, ret __maybe_unused;
+
+	registry_obj_slots = kmalloc(CONFIG_XENO_OPT_REGISTRY_NRSLOTS *
+				     sizeof(struct xnobject), GFP_KERNEL);
+	if (registry_obj_slots == NULL)
+		return -ENOMEM;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	ret = xnvfile_init_dir("registry", &registry_vfroot, &cobalt_vfroot);
+	if (ret)
+		return ret;
+
+	ret = xnvfile_init_regular("usage", &usage_vfile, &registry_vfroot);
+	if (ret) {
+		xnvfile_destroy_dir(&registry_vfroot);
+		return ret;
+	}
+
+	proc_virq = pipeline_create_inband_sirq(registry_proc_schedule);
+	if (proc_virq < 0) {
+		xnvfile_destroy_regular(&usage_vfile);
+		xnvfile_destroy_dir(&registry_vfroot);
+		return proc_virq;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	next_object_stamp = 0;
+
+	for (n = 0; n < CONFIG_XENO_OPT_REGISTRY_NRSLOTS; n++) {
+		registry_obj_slots[n].objaddr = NULL;
+		list_add_tail(&registry_obj_slots[n].link, &free_object_list);
+	}
+
+	/* Slot #0 is reserved/invalid. */
+	list_get_entry(&free_object_list, struct xnobject, link);
+	nr_active_objects = 1;
+
+	nr_object_entries = xnregistry_hash_size();
+	object_index = kmalloc(sizeof(*object_index) *
+				      nr_object_entries, GFP_KERNEL);
+
+	if (object_index == NULL) {
+#ifdef CONFIG_XENO_OPT_VFILE
+		xnvfile_destroy_regular(&usage_vfile);
+		xnvfile_destroy_dir(&registry_vfroot);
+		pipeline_delete_inband_sirq(proc_virq);
+#endif /* CONFIG_XENO_OPT_VFILE */
+		return -ENOMEM;
+	}
+
+	for (n = 0; n < nr_object_entries; n++)
+		INIT_HLIST_HEAD(&object_index[n]);
+
+	xnsynch_init(&register_synch, XNSYNCH_FIFO, NULL);
+
+	return 0;
+}
+
+void xnregistry_cleanup(void)
+{
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct hlist_node *enext;
+	struct xnobject *ecurr;
+	struct xnpnode *pnode;
+	int n;
+
+	flush_scheduled_work();
+
+	for (n = 0; n < nr_object_entries; n++)
+		hlist_for_each_entry_safe(ecurr, enext, 
+					&object_index[n], hlink) {
+			pnode = ecurr->pnode;
+			if (pnode == NULL)
+				continue;
+
+			pnode->ops->unexport(ecurr, pnode);
+
+			if (--pnode->entries > 0)
+				continue;
+
+			xnvfile_destroy_dir(&pnode->vdir);
+
+			if (--pnode->root->entries == 0)
+				xnvfile_destroy_dir(&pnode->root->vdir);
+		}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	kfree(object_index);
+	xnsynch_destroy(&register_synch);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	pipeline_delete_inband_sirq(proc_virq);
+	flush_scheduled_work();
+	xnvfile_destroy_regular(&usage_vfile);
+	xnvfile_destroy_dir(&registry_vfroot);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	kfree(registry_obj_slots);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static DEFINE_SEMAPHORE(export_mutex);
+
+/*
+ * The following stuff implements the mechanism for delegating
+ * export/unexport requests to/from the /proc interface from the
+ * Xenomai domain to the Linux kernel (i.e. the "lower stage"). This
+ * ends up being a bit complex due to the fact that such requests
+ * might lag enough before being processed by the Linux kernel so that
+ * subsequent requests might just contradict former ones before they
+ * even had a chance to be applied (e.g. export -> unexport in the
+ * Xenomai domain for short-lived objects). This situation and the
+ * like are hopefully properly handled due to a careful
+ * synchronization of operations across domains.
+ */
+static void proc_callback(struct work_struct *work)
+{
+	struct xnvfile_directory *rdir, *dir;
+	const char *rname, *type;
+	struct xnobject *object;
+	struct xnpnode *pnode;
+	int ret;
+	spl_t s;
+
+	down(&export_mutex);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	while (!list_empty(&proc_object_list)) {
+		object = list_get_entry(&proc_object_list,
+					struct xnobject, link);
+		pnode = object->pnode;
+		type = pnode->dirname;
+		dir = &pnode->vdir;
+		rdir = &pnode->root->vdir;
+		rname = pnode->root->dirname;
+
+		if (object->vfilp != XNOBJECT_EXPORT_SCHEDULED)
+			goto unexport;
+
+		object->vfilp = XNOBJECT_EXPORT_INPROGRESS;
+		list_add_tail(&object->link, &busy_object_list);
+
+		xnlock_put_irqrestore(&nklock, s);
+
+		if (pnode->entries++ == 0) {
+			if (pnode->root->entries++ == 0) {
+				/* Create the root directory on the fly. */
+				ret = xnvfile_init_dir(rname, rdir, &registry_vfroot);
+				if (ret) {
+					xnlock_get_irqsave(&nklock, s);
+					object->pnode = NULL;
+					pnode->root->entries = 0;
+					pnode->entries = 0;
+					continue;
+				}
+			}
+			/* Create the class directory on the fly. */
+			ret = xnvfile_init_dir(type, dir, rdir);
+			if (ret) {
+				if (pnode->root->entries == 1) {
+					pnode->root->entries = 0;
+					xnvfile_destroy_dir(rdir);
+				}
+				xnlock_get_irqsave(&nklock, s);
+				object->pnode = NULL;
+				pnode->entries = 0;
+				continue;
+			}
+		}
+
+		ret = pnode->ops->export(object, pnode);
+		if (ret && --pnode->entries == 0) {
+			xnvfile_destroy_dir(dir);
+			if (--pnode->root->entries == 0)
+				xnvfile_destroy_dir(rdir);
+			xnlock_get_irqsave(&nklock, s);
+			object->pnode = NULL;
+		} else
+			xnlock_get_irqsave(&nklock, s);
+
+		continue;
+
+	unexport:
+		object->vfilp = NULL;
+		object->pnode = NULL;
+
+		if (object->vfilp == XNOBJECT_EXPORT_ABORTED)
+			object->objaddr = NULL;
+
+		if (object->objaddr)
+			list_add_tail(&object->link, &busy_object_list);
+		else {
+			/*
+			 * Trap the case where we are unexporting an
+			 * already unregistered object.
+			 */
+			list_add_tail(&object->link, &free_object_list);
+			nr_active_objects--;
+		}
+
+		xnlock_put_irqrestore(&nklock, s);
+
+		pnode->ops->unexport(object, pnode);
+
+		if (--pnode->entries == 0) {
+			xnvfile_destroy_dir(dir);
+			if (--pnode->root->entries == 0)
+				xnvfile_destroy_dir(rdir);
+		}
+
+		xnlock_get_irqsave(&nklock, s);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	up(&export_mutex);
+}
+
+static irqreturn_t registry_proc_schedule(int virq, void *dev_id)
+{
+	/*
+	 * schedule_work() will check for us if the work has already
+	 * been scheduled, so just be lazy and submit blindly.
+	 */
+	schedule_work(&registry_proc_work);
+
+	return IRQ_HANDLED;
+}
+
+static int registry_export_vfsnap(struct xnobject *object,
+				  struct xnpnode *pnode)
+{
+	struct xnpnode_snapshot *p;
+	int ret;
+
+	/*
+	 * Make sure to initialize _all_ mandatory vfile fields; most
+	 * of the time we are using sane NULL defaults based on static
+	 * storage for the vfile struct, but here we are building up a
+	 * vfile object explicitly.
+	 */
+	p = container_of(pnode, struct xnpnode_snapshot, node);
+	object->vfile_u.vfsnap.file.datasz = p->vfile.datasz;
+	object->vfile_u.vfsnap.file.privsz = p->vfile.privsz;
+	/*
+	 * Make the vfile refer to the provided tag struct if any,
+	 * otherwise use our default tag space. In the latter case,
+	 * each object family has its own private revision tag.
+	 */
+	object->vfile_u.vfsnap.file.tag = p->vfile.tag ?:
+		&object->vfile_u.vfsnap.tag;
+	object->vfile_u.vfsnap.file.ops = p->vfile.ops;
+	object->vfile_u.vfsnap.file.entry.lockops = p->vfile.lockops;
+
+	ret = xnvfile_init_snapshot(object->key, &object->vfile_u.vfsnap.file,
+				    &pnode->vdir);
+	if (ret)
+		return ret;
+
+	object->vfilp = &object->vfile_u.vfsnap.file.entry;
+	object->vfilp->private = object->objaddr;
+
+	return 0;
+}
+
+static void registry_unexport_vfsnap(struct xnobject *object,
+				    struct xnpnode *pnode)
+{
+	xnvfile_destroy_snapshot(&object->vfile_u.vfsnap.file);
+}
+
+static void registry_touch_vfsnap(struct xnobject *object)
+{
+	xnvfile_touch(&object->vfile_u.vfsnap.file);
+}
+
+struct xnpnode_ops xnregistry_vfsnap_ops = {
+	.export = registry_export_vfsnap,
+	.unexport = registry_unexport_vfsnap,
+	.touch = registry_touch_vfsnap,
+};
+EXPORT_SYMBOL_GPL(xnregistry_vfsnap_ops);
+
+static int registry_export_vfreg(struct xnobject *object,
+				 struct xnpnode *pnode)
+{
+	struct xnpnode_regular *p;
+	int ret;
+
+	/* See registry_export_vfsnap() for hints. */
+	p = container_of(pnode, struct xnpnode_regular, node);
+	object->vfile_u.vfreg.privsz = p->vfile.privsz;
+	object->vfile_u.vfreg.ops = p->vfile.ops;
+	object->vfile_u.vfreg.entry.lockops = p->vfile.lockops;
+	object->vfile_u.vfreg.entry.refcnt = 0;
+
+	ret = xnvfile_init_regular(object->key, &object->vfile_u.vfreg,
+				   &pnode->vdir);
+	if (ret)
+		return ret;
+
+	object->vfilp = &object->vfile_u.vfreg.entry;
+	object->vfilp->private = object->objaddr;
+
+	return 0;
+}
+
+static void registry_unexport_vfreg(struct xnobject *object,
+				    struct xnpnode *pnode)
+{
+	xnvfile_destroy_regular(&object->vfile_u.vfreg);
+}
+
+struct xnpnode_ops xnregistry_vfreg_ops = {
+	.export = registry_export_vfreg,
+	.unexport = registry_unexport_vfreg,
+};
+EXPORT_SYMBOL_GPL(xnregistry_vfreg_ops);
+
+static int registry_export_vlink(struct xnobject *object,
+				 struct xnpnode *pnode)
+{
+	struct xnpnode_link *link_desc;
+	char *link_target;
+	int ret;
+
+	link_desc = container_of(pnode, struct xnpnode_link, node);
+	link_target = link_desc->target(object->objaddr);
+	if (link_target == NULL)
+		return -ENOMEM;
+
+	ret = xnvfile_init_link(object->key, link_target,
+				&object->vfile_u.link, &pnode->vdir);
+	kfree(link_target);
+	if (ret)
+		return ret;
+
+	object->vfilp = &object->vfile_u.link.entry;
+	object->vfilp->private = object->objaddr;
+
+	return 0;
+}
+
+static void registry_unexport_vlink(struct xnobject *object,
+				    struct xnpnode *pnode)
+{
+	xnvfile_destroy_link(&object->vfile_u.link);
+}
+
+struct xnpnode_ops xnregistry_vlink_ops = {
+	.export = registry_export_vlink,
+	.unexport = registry_unexport_vlink,
+};
+EXPORT_SYMBOL_GPL(xnregistry_vlink_ops);
+
+static inline void registry_export_pnode(struct xnobject *object,
+					 struct xnpnode *pnode)
+{
+	object->vfilp = XNOBJECT_EXPORT_SCHEDULED;
+	object->pnode = pnode;
+	list_del(&object->link);
+	list_add_tail(&object->link, &proc_object_list);
+	pipeline_post_sirq(proc_virq);
+}
+
+static inline void registry_unexport_pnode(struct xnobject *object)
+{
+	if (object->vfilp != XNOBJECT_EXPORT_SCHEDULED) {
+		/*
+		 * We might have preempted a v-file read op, so bump
+		 * the object's revtag to make sure the data
+		 * collection is aborted next, if we end up deleting
+		 * the object being read.
+		 */
+		if (object->pnode->ops->touch)
+			object->pnode->ops->touch(object);
+		list_del(&object->link);
+		list_add_tail(&object->link, &proc_object_list);
+		pipeline_post_sirq(proc_virq);
+	} else {
+		/*
+		 * Unexporting before the lower stage has had a chance
+		 * to export. Move back the object to the busyq just
+		 * like if no export had been requested.
+		 */
+		list_del(&object->link);
+		list_add_tail(&object->link, &busy_object_list);
+		object->pnode = NULL;
+		object->vfilp = NULL;
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+static unsigned registry_hash_crunch(const char *key)
+{
+	unsigned int h = 0, g;
+
+#define HQON    24		/* Higher byte position */
+#define HBYTE   0xf0000000	/* Higher nibble on */
+
+	while (*key) {
+		h = (h << 4) + *key++;
+		if ((g = (h & HBYTE)) != 0)
+			h = (h ^ (g >> HQON)) ^ g;
+	}
+
+	return h % nr_object_entries;
+}
+
+static inline int registry_hash_enter(const char *key, struct xnobject *object)
+{
+	struct xnobject *ecurr;
+	unsigned s;
+
+	object->key = key;
+	s = registry_hash_crunch(key);
+
+	hlist_for_each_entry(ecurr, &object_index[s], hlink)
+		if (ecurr == object || strcmp(key, ecurr->key) == 0)
+			return -EEXIST;
+
+	hlist_add_head(&object->hlink, &object_index[s]);
+
+	return 0;
+}
+
+static inline int registry_hash_remove(struct xnobject *object)
+{
+	unsigned int s = registry_hash_crunch(object->key);
+	struct xnobject *ecurr;
+
+	hlist_for_each_entry(ecurr, &object_index[s], hlink)
+		if (ecurr == object) {
+			hlist_del(&ecurr->hlink);
+			return 0;
+		}
+
+	return -ESRCH;
+}
+
+static struct xnobject *registry_hash_find(const char *key)
+{
+	struct xnobject *ecurr;
+
+	hlist_for_each_entry(ecurr, 
+			&object_index[registry_hash_crunch(key)], hlink)
+		if (strcmp(key, ecurr->key) == 0)
+			return ecurr;
+
+	return NULL;
+}
+
+struct registry_wait_context {
+	struct xnthread_wait_context wc;
+	const char *key;
+};
+
+static inline int registry_wakeup_sleepers(const char *key)
+{
+	struct registry_wait_context *rwc;
+	struct xnthread_wait_context *wc;
+	struct xnthread *sleeper, *tmp;
+	int cnt = 0;
+
+	xnsynch_for_each_sleeper_safe(sleeper, tmp, &register_synch) {
+		wc = xnthread_get_wait_context(sleeper);
+		rwc = container_of(wc, struct registry_wait_context, wc);
+		if (*key == *rwc->key && strcmp(key, rwc->key) == 0) {
+			xnsynch_wakeup_this_sleeper(&register_synch, sleeper);
+			++cnt;
+		}
+	}
+
+	return cnt;
+}
+
+/**
+ * @fn int xnregistry_enter(const char *key,void *objaddr,xnhandle_t *phandle,struct xnpnode *pnode)
+ * @brief Register a real-time object.
+ *
+ * This service allocates a new registry slot for an associated
+ * object, and indexes it by an alphanumeric key for later retrieval.
+ *
+ * @param key A valid NULL-terminated string by which the object will
+ * be indexed and later retrieved in the registry. Since it is assumed
+ * that such key is stored into the registered object, it will *not*
+ * be copied but only kept by reference in the registry. Pass an empty
+ * or NULL string if the object shall only occupy a registry slot for
+ * handle-based lookups. The slash character is not accepted in @a key
+ * if @a pnode is non-NULL.
+ *
+ * @param objaddr An opaque pointer to the object to index by @a
+ * key.
+ *
+ * @param phandle A pointer to a generic handle defined by the
+ * registry which will uniquely identify the indexed object, until the
+ * latter is unregistered using the xnregistry_remove() service.
+ *
+ * @param pnode A pointer to an optional /proc node class
+ * descriptor. This structure provides the information needed to
+ * export all objects from the given class through the /proc
+ * filesystem, under the /proc/xenomai/registry entry. Passing NULL
+ * indicates that no /proc support is available for the newly
+ * registered object.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a objaddr is NULL.
+ *
+ * - -EINVAL if @a pnode is non-NULL, and @a key points to a valid
+ * string containing a '/' character.
+ *
+ * - -ENOMEM is returned if the system fails to get enough dynamic
+ * memory from the global real-time heap in order to register the
+ * object.
+ *
+ * - -EEXIST is returned if the @a key is already in use.
+ *
+ * @coretags{unrestricted, might-switch, atomic-entry}
+ */
+int xnregistry_enter(const char *key, void *objaddr,
+		     xnhandle_t *phandle, struct xnpnode *pnode)
+{
+	struct xnobject *object;
+	spl_t s;
+	int ret;
+
+	if (objaddr == NULL ||
+	    (pnode != NULL && key != NULL && strchr(key, '/')))
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&free_object_list)) {
+		ret = -EAGAIN;
+		goto unlock_and_exit;
+	}
+
+	object = list_get_entry(&free_object_list, struct xnobject, link);
+	nr_active_objects++;
+	object->objaddr = objaddr;
+	object->cstamp = ++next_object_stamp;
+	trace_cobalt_registry_enter(key, objaddr);
+#ifdef CONFIG_XENO_OPT_VFILE
+	object->pnode = NULL;
+#endif
+	if (key == NULL || *key == '\0') {
+		object->key = NULL;
+		*phandle = object - registry_obj_slots;
+		ret = 0;
+		goto unlock_and_exit;
+	}
+
+	ret = registry_hash_enter(key, object);
+	if (ret) {
+		nr_active_objects--;
+		list_add_tail(&object->link, &free_object_list);
+		goto unlock_and_exit;
+	}
+
+	list_add_tail(&object->link, &busy_object_list);
+
+	/*
+	 * <!> Make sure the handle is written back before the
+	 * rescheduling takes place.
+	 */
+	*phandle = object - registry_obj_slots;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if (pnode)
+		registry_export_pnode(object, pnode);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	if (registry_wakeup_sleepers(key))
+		xnsched_run();
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnregistry_enter);
+
+/**
+ * @fn int xnregistry_bind(const char *key,xnticks_t timeout,int timeout_mode,xnhandle_t *phandle)
+ * @brief Bind to a real-time object.
+ *
+ * This service retrieves the registry handle of a given object
+ * identified by its key. Unless otherwise specified, this service
+ * will block the caller if the object is not registered yet, waiting
+ * for such registration to occur.
+ *
+ * @param key A valid NULL-terminated string which identifies the
+ * object to bind to.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread wait for the object to be registered. This value is a wait
+ * time given as a count of nanoseconds. It can either be relative,
+ * absolute monotonic (XN_ABSOLUTE), or absolute adjustable
+ * (XN_REALTIME) depending on @a timeout_mode. Passing XN_INFINITE @b
+ * and setting @a timeout_mode to XN_RELATIVE specifies an unbounded
+ * wait. Passing XN_NONBLOCK causes the service to return immediately
+ * without waiting if the object is not registered on entry. All other
+ * values are used as a wait limit.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @param phandle A pointer to a memory location which will be written
+ * upon success with the generic handle defined by the registry for
+ * the retrieved object. Contents of this memory is undefined upon
+ * failure.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a key is NULL.
+ *
+ * - -EINTR is returned if xnthread_unblock() has been called for the
+ * waiting thread before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to XN_NONBLOCK
+ * and the searched object is not registered on entry. As a special
+ * exception, this error is also returned if this service should
+ * block, but was called from a context which cannot sleep
+ * (e.g. interrupt, non-realtime or scheduler locked).
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note xnregistry_bind() only returns the index portion of a handle,
+ * which might include other fixed bits to be complete
+ * (e.g. XNSYNCH_PSHARED). The caller is responsible for completing
+ * the handle returned with those bits if applicable, depending on the
+ * context.
+ */
+int xnregistry_bind(const char *key, xnticks_t timeout, int timeout_mode,
+		    xnhandle_t *phandle)
+{
+	struct registry_wait_context rwc;
+	struct xnobject *object;
+	int ret = 0, info;
+	spl_t s;
+
+	if (key == NULL)
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (timeout_mode == XN_RELATIVE &&
+	    timeout != XN_INFINITE && timeout != XN_NONBLOCK) {
+		timeout_mode = XN_ABSOLUTE;
+		timeout += xnclock_read_monotonic(&nkclock);
+	}
+
+	for (;;) {
+		object = registry_hash_find(key);
+		if (object) {
+			*phandle = object - registry_obj_slots;
+			goto unlock_and_exit;
+		}
+
+		if ((timeout_mode == XN_RELATIVE && timeout == XN_NONBLOCK) ||
+		    xnsched_unblockable_p()) {
+			ret = -EWOULDBLOCK;
+			goto unlock_and_exit;
+		}
+
+		rwc.key = key;
+		xnthread_prepare_wait(&rwc.wc);
+		info = xnsynch_sleep_on(&register_synch, timeout, timeout_mode);
+		if (info & XNTIMEO) {
+			ret = -ETIMEDOUT;
+			goto unlock_and_exit;
+		}
+		if (info & XNBREAK) {
+			ret = -EINTR;
+			goto unlock_and_exit;
+		}
+	}
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnregistry_bind);
+
+/**
+ * @fn int xnregistry_remove(xnhandle_t handle)
+ * @brief Forcibly unregister a real-time object.
+ *
+ * This service forcibly removes an object from the registry. The
+ * removal is performed regardless of the current object's locking
+ * status.
+ *
+ * @param handle The generic handle of the object to remove.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -ESRCH is returned if @a handle does not reference a registered
+ * object.
+ *
+ * @coretags{unrestricted}
+ */
+int xnregistry_remove(xnhandle_t handle)
+{
+	struct xnobject *object;
+	void *objaddr;
+	int ret = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	object = xnregistry_validate(handle);
+	if (object == NULL) {
+		ret = -ESRCH;
+		goto unlock_and_exit;
+	}
+
+	trace_cobalt_registry_remove(object->key, object->objaddr);
+
+	objaddr = object->objaddr;
+	object->objaddr = NULL;
+	object->cstamp = 0;
+
+	if (object->key) {
+		registry_hash_remove(object);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+		if (object->pnode) {
+			if (object->vfilp == XNOBJECT_EXPORT_INPROGRESS) {
+				object->vfilp = XNOBJECT_EXPORT_ABORTED;
+				object->objaddr = objaddr;
+			}
+
+			registry_unexport_pnode(object);
+			/*
+			 * Leave the update of the object queues to
+			 * the work callback if it has been kicked.
+			 */
+			if (object->pnode) {
+				xnlock_put_irqrestore(&nklock, s);
+				if (is_secondary_domain())
+					flush_work(&registry_proc_work);
+				return 0;
+			}
+		}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+		list_del(&object->link);
+	}
+
+	if (!IS_ENABLED(CONFIG_XENO_OPT_VFILE) || !object->objaddr) {
+		list_add_tail(&object->link, &free_object_list);
+		nr_active_objects--;
+	}
+
+unlock_and_exit:
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnregistry_remove);
+
+/**
+ * Turn a named object into an anonymous object
+ *
+ * @coretags{unrestricted}
+ */
+int xnregistry_unlink(const char *key)
+{
+	struct xnobject *object;
+	int ret = 0;
+	spl_t s;
+
+	if (key == NULL)
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	object = registry_hash_find(key);
+	if (object == NULL) {
+		ret = -ESRCH;
+		goto unlock_and_exit;
+	}
+
+	trace_cobalt_registry_unlink(object->key, object->objaddr);
+
+	ret = registry_hash_remove(object);
+	if (ret < 0)
+		goto unlock_and_exit;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if (object->pnode) {
+		registry_unexport_pnode(object);
+		/*
+		 * Leave the update of the object queues to
+		 * the work callback if it has been kicked.
+		 */
+		if (object->pnode)
+			goto unlock_and_exit;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	list_del(&object->link);
+
+	object->key = NULL;
+
+unlock_and_exit:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+/**
+ * @fn void *xnregistry_lookup(xnhandle_t handle, unsigned long *cstamp_r)
+ * @brief Find a real-time object into the registry.
+ *
+ * This service retrieves an object from its handle into the registry
+ * and returns the memory address of its descriptor. Optionally, it
+ * also copies back the object's creation stamp which is unique across
+ * object registration calls.
+ *
+ * @param handle The generic handle of the object to fetch.
+ *
+ * @param cstamp_r If not-NULL, the object's creation stamp will be
+ * copied to this memory area.
+ *
+ * @return The memory address of the object's descriptor is returned
+ * on success. Otherwise, NULL is returned if @a handle does not
+ * reference a registered object.
+ *
+ * @coretags{unrestricted}
+ */
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/API.CHANGES b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/API.CHANGES
new file mode 100644
index 0000000..978799f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/API.CHANGES
@@ -0,0 +1,53 @@
+Scheduled modifications (unsorted):
+ o Packet pool management (generalised variant of RTnet's and RT-Firerwire's
+   buffer pools).
+ o Threaded IRQ handlers.
+ o Support for deferring IRQ line re-enabling from handler to thread context.
+
+Revision 9:
+
+See http://xenomai.org/migrating-from-xenomai-2-x-to-3-x/#RTDM_interface_changes.
+
+Revision 8:
+ o Added rtdm_rt_capable.
+ o Added rtdm_context_put as logic match to rtdm_context_get
+
+Revision 7:
+ o Added callbacks and services to enable select support.
+
+Revision 6:
+ o Added profile_version field to rtdm_device.
+ o Requested IRQ lines are now enabled on return of rtdm_irq_request.
+ o Converted request argument in IOCTL handler to unsigned int to fix issues
+   on 64-bit architectures.
+ o Added custom argument to rtdm_nrtsig handler.
+ o Introduced Timer API.
+ o Introduced monotonic time base:
+    - obtainable via rtdm_clock_read_monotonic
+    - usable via new rtdm_task_sleep_abs or the timer API
+ o Deprecated rtdm_task_sleep_until, users shall migrate to
+   rtdm_task_sleep_abs(..., RTDM_TIMERMODE_REALTIME).
+
+Revision 5:
+ o Introduced generic time types nanosecs_abs_t and nanosecs_rel_t.
+ o Switched the following functions parameters from unsigned to signed
+   (uint64_t -> nanosecs_rel_t) and adopted their semantics:
+    - period in rtdm_task_init, period 0 means non-periodic
+    - period in rtdm_task_set_period, period 0 means non-periodic
+    - delay in rtdm_task_sleep, now clearly specified: delay = 0 means
+      infinite delay, delay < 0 means no delay at all
+    - delay in rtdm_task_busy_sleep, same semantics as before (delay <= 0
+      means no delay)
+  o Added rtdm_safe_copy_to/from_user.
+  o Added rtdm_iomap_to_user.
+
+Revision 4:
+ o Dropped RTDM_IRQ_PROPAGATE ISR return flag. Generic deterministic RTDM
+   drivers should not interact with standard Linux in this way.
+ o Merged RTDM_IRQ_ENABLE into RTDM_IRQ_HANDLED return code. An explicit
+   request to leave the IRQ line disabled upon return from ISR will be
+   provided in later versions via rtdm_irq_disable.
+ o Added RTDM_IRQTYPE_SHARED and RTDM_IRQTYPE_EDGE flags which indicate
+   specific handling sharable level- and edge-triggered IRQs
+ o Added rtdm_mmap_to_user and rtdm_munmap. Intended usage is the mapping of
+   driver memory like DMA buffers into the address range of a user task.
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/COPYING b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/COPYING
new file mode 100644
index 0000000..66b9f24
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/COPYING
@@ -0,0 +1,281 @@
+		    GNU GENERAL PUBLIC LICENSE
+		       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.
+                          59 Temple Place - Suite 330, Boston, MA
+                          02111-1307, USA.
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Library General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+
+		    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+			    NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile
new file mode 100644
index 0000000..4f5a6ca
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/Makefile
@@ -0,0 +1,10 @@
+
+obj-$(CONFIG_XENOMAI) += xenomai.o
+
+xenomai-y :=	core.o		\
+		device.o	\
+		drvlib.o	\
+		fd.o		\
+		wrappers.o
+
+ccflags-y += -I$(srctree)/$(src)/.. -I$(srctree)/kernel
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c
new file mode 100644
index 0000000..dcced04
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/core.c
@@ -0,0 +1,1373 @@
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/workqueue.h>
+#include <linux/slab.h>
+#include <linux/file.h>
+#include <linux/sched.h>
+#include <linux/fs.h>
+#include <linux/fdtable.h>
+#include <linux/anon_inodes.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/heap.h>
+#include "rtdm/internal.h"
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-rtdm.h>
+#include "posix/process.h"
+
+/**
+ * @ingroup rtdm
+ * @defgroup rtdm_driver_interface Driver programming interface
+ * RTDM driver programming interface
+ * @{
+ */
+
+static void cleanup_instance(struct rtdm_device *dev,
+			     struct rtdm_dev_context *context)
+{
+	if (context)
+		kfree(context);
+
+	__rtdm_put_device(dev);
+}
+
+void __rtdm_dev_close(struct rtdm_fd *fd)
+{
+	struct rtdm_dev_context *context = rtdm_fd_to_context(fd);
+	struct rtdm_device *dev = context->device;
+	struct rtdm_driver *drv = dev->driver;
+
+	if (!fd->stale && drv->ops.close)
+		drv->ops.close(fd);
+
+	cleanup_instance(dev, context);
+}
+
+int __rtdm_anon_getfd(const char *name, int flags)
+{
+	return anon_inode_getfd(name, &rtdm_dumb_fops, NULL, flags);
+}
+
+void __rtdm_anon_putfd(int ufd)
+{
+	close_fd(ufd);
+}
+
+static int create_instance(int ufd, struct rtdm_device *dev,
+			   struct rtdm_dev_context **context_ptr)
+{
+	struct rtdm_driver *drv = dev->driver;
+	struct rtdm_dev_context *context;
+
+	/*
+	 * Reset to NULL so that we can always use cleanup_files/instance to
+	 * revert also partially successful allocations.
+	 */
+	*context_ptr = NULL;
+
+	if ((drv->device_flags & RTDM_EXCLUSIVE) != 0 &&
+	    atomic_read(&dev->refcount) > 1)
+		return -EBUSY;
+
+	context = kzalloc(sizeof(struct rtdm_dev_context) +
+			  drv->context_size, GFP_KERNEL);
+	if (unlikely(context == NULL))
+		return -ENOMEM;
+
+	context->device = dev;
+	*context_ptr = context;
+
+	return rtdm_fd_enter(&context->fd, ufd, RTDM_FD_MAGIC, &dev->ops);
+}
+
+#ifdef CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE
+
+static inline struct file *
+open_devnode(struct rtdm_device *dev, const char *path, int oflag)
+{
+	struct file *filp;
+	char *filename;
+
+	if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_LEGACY) &&
+	    strncmp(path, "/dev/rtdm/", 10))
+		printk(XENO_WARNING
+		       "%s[%d] opens obsolete device path: %s\n",
+		       current->comm, task_pid_nr(current), path);
+
+	filename = kasprintf(GFP_KERNEL, "/dev/rtdm/%s", dev->name);
+	if (filename == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	filp = filp_open(filename, oflag, 0);
+	kfree(filename);
+
+	return filp;
+}
+
+#else /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */
+
+static inline struct file *
+open_devnode(struct rtdm_device *dev, const char *path, int oflag)
+{
+	return filp_open(path, oflag, 0);
+}
+
+#endif /* !CONFIG_XENO_OPT_RTDM_COMPAT_DEVNODE */
+
+int __rtdm_dev_open(const char *path, int oflag)
+{
+	struct rtdm_dev_context *context;
+	struct rtdm_device *dev;
+	struct file *filp;
+	int ufd, ret;
+
+	secondary_mode_only();
+
+	/*
+	 * CAUTION: we do want a lookup into the registry to happen
+	 * before any attempt is made to open the devnode, so that we
+	 * don't inadvertently open a regular (i.e. non-RTDM) device.
+	 * Reason is that opening, then closing a device - because we
+	 * don't manage it - may incur side-effects we don't want,
+	 * e.g. opening then closing one end of a pipe would cause the
+	 * other side to read the EOF condition.  This is basically
+	 * why we keep a RTDM registry for named devices, so that we
+	 * can figure out whether an open() request is going to be
+	 * valid, without having to open the devnode yet.
+	 */
+	dev = __rtdm_get_namedev(path);
+	if (dev == NULL)
+		return -EADV;
+
+	ufd = get_unused_fd_flags(oflag);
+	if (ufd < 0) {
+		ret = ufd;
+		goto fail_fd;
+	}
+
+	filp = open_devnode(dev, path, oflag);
+	if (IS_ERR(filp)) {
+		ret = PTR_ERR(filp);
+		goto fail_fopen;
+	}
+
+	ret = create_instance(ufd, dev, &context);
+	if (ret < 0)
+		goto fail_create;
+
+	context->fd.minor = dev->minor;
+	context->fd.oflags = oflag;
+
+	trace_cobalt_fd_open(current, &context->fd, ufd, oflag);
+
+	if (dev->ops.open) {
+		ret = dev->ops.open(&context->fd, oflag);
+		if (!XENO_ASSERT(COBALT, !spltest()))
+			splnone();
+		if (ret < 0)
+			goto fail_open;
+	}
+
+	ret = rtdm_device_new_fd(&context->fd, ufd, context->device);
+	if (ret < 0)
+		goto fail_open;
+
+	fd_install(ufd, filp);
+
+	return ufd;
+
+fail_open:
+	cleanup_instance(dev, context);
+fail_create:
+	filp_close(filp, current->files);
+fail_fopen:
+	put_unused_fd(ufd);
+fail_fd:
+	__rtdm_put_device(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__rtdm_dev_open);
+
+int __rtdm_dev_socket(int protocol_family, int socket_type,
+		      int protocol)
+{
+	struct rtdm_dev_context *context;
+	struct rtdm_device *dev;
+	int ufd, ret;
+
+	secondary_mode_only();
+
+	dev = __rtdm_get_protodev(protocol_family, socket_type);
+	if (dev == NULL)
+		return -EAFNOSUPPORT;
+
+	ufd = __rtdm_anon_getfd("[rtdm-socket]", O_RDWR);
+	if (ufd < 0) {
+		ret = ufd;
+		goto fail_getfd;
+	}
+
+	ret = create_instance(ufd, dev, &context);
+	if (ret < 0)
+		goto fail_create;
+
+	trace_cobalt_fd_socket(current, &context->fd, ufd, protocol_family);
+
+	if (dev->ops.socket) {
+		ret = dev->ops.socket(&context->fd, protocol);
+		if (!XENO_ASSERT(COBALT, !spltest()))
+			splnone();
+		if (ret < 0)
+			goto fail_socket;
+	}
+
+	ret = rtdm_device_new_fd(&context->fd, ufd, context->device);
+	if (ret < 0)
+		goto fail_socket;
+
+	return ufd;
+
+fail_socket:
+	cleanup_instance(dev, context);
+fail_create:
+	close_fd(ufd);
+fail_getfd:
+	__rtdm_put_device(dev);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(__rtdm_dev_socket);
+
+int __rtdm_dev_ioctl_core(struct rtdm_fd *fd, unsigned int request,
+			  void __user *arg)
+{
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_driver *drv;
+	struct rtdm_device_info dev_info;
+
+	if (fd->magic != RTDM_FD_MAGIC || request != RTIOC_DEVICE_INFO)
+		return -EADV;
+
+	drv = dev->driver;
+	dev_info.device_flags = drv->device_flags;
+	dev_info.device_class = drv->profile_info.class_id;
+	dev_info.device_sub_class = drv->profile_info.subclass_id;
+	dev_info.profile_version = drv->profile_info.version;
+
+	return rtdm_safe_copy_to_user(fd, arg, &dev_info,  sizeof(dev_info));
+}
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * @addtogroup rtdm_sync
+ *@{
+ */
+
+/**
+ * @fn void rtdm_waitqueue_init(struct rtdm_waitqueue *wq)
+ * @brief  Initialize a RTDM wait queue
+ *
+ * Sets up a wait queue structure for further use.
+ *
+ * @param wq waitqueue to initialize.
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_waitqueue_init(struct rtdm_waitqueue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq)
+ * @brief  Deletes a RTDM wait queue
+ *
+ * Dismantles a wait queue structure, releasing all resources attached
+ * to it.
+ *
+ * @param wq waitqueue to delete.
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_waitqueue_destroy(struct rtdm_waitqueue *wq);
+
+/**
+ * @fn rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a locked waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true or a timeout occurs. The condition is checked each time the
+ * waitqueue @a wq is signaled.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_timedwait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition,
+				nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition)
+ * @brief Sleep on a locked waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true. The condition is checked each time the waitqueue @a wq is
+ * signaled.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_wait_condition_locked(struct rtdm_wait_queue *wq, C_expr condition);
+
+/**
+ * @fn rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true or a timeout occurs. The condition is checked each time the
+ * waitqueue @a wq is signaled.
+ *
+ * @param wq waitqueue to wait on.
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_timedwait_condition(struct rtdm_wait_queue *wq, C_expr condition,
+			 nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn void rtdm_timedwait(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs.
+ *
+ * @param wq waitqueue to wait on.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_timedwait(struct rtdm_wait_queue *wq,
+		    nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn void rtdm_timedwait_locked(struct rtdm_wait_queue *wq, nanosecs_rel_t timeout, rtdm_toseq_t *toseq)
+ * @brief Timed sleep on a locked waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush(), or a timeout occurs.
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @param timeout relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values.
+ * 
+ * @param[in,out] toseq handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * @note Passing RTDM_TIMEOUT_NONE to @a timeout makes no sense for
+ * such service, and might cause unexpected behavior.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_timedwait_locked(struct rtdm_wait_queue *wq,
+			   nanosecs_rel_t timeout, rtdm_toseq_t *toseq);
+
+/**
+ * @fn rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition)
+ * @brief Sleep on a waitqueue until a condition gets true
+ *
+ * The calling task is put to sleep until @a condition evaluates to
+ * true. The condition is checked each time the waitqueue @a wq is
+ * signaled.
+ *
+ * @param wq waitqueue to wait on
+ *
+ * @param condition C expression for the event to wait for.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has received a Linux signal or
+ * has been forcibly unblocked by a call to rtdm_task_unblock().
+ *
+ * @note rtdm_waitqueue_signal() has to be called after changing any
+ * variable that could change the result of the wait condition.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+rtdm_wait_condition(struct rtdm_wait_queue *wq, C_expr condition);
+
+/**
+ * @fn void rtdm_wait(struct rtdm_wait_queue *wq)
+ * @brief Sleep on a waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush().
+ *
+ * @param wq waitqueue to wait on.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_wait(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_wait_locked(struct rtdm_wait_queue *wq)
+ * @brief Sleep on a locked waitqueue unconditionally
+ *
+ * The calling task is put to sleep until the waitqueue is signaled by
+ * either rtdm_waitqueue_signal() or rtdm_waitqueue_broadcast(), or
+ * flushed by a call to rtdm_waitqueue_flush().
+ *
+ * The waitqueue must have been locked by a call to
+ * rtdm_waitqueue_lock() prior to calling this service.
+ *
+ * @param wq locked waitqueue to wait on. The waitqueue lock is
+ * dropped when sleeping, then reacquired before this service returns
+ * to the caller.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if the waitqueue has been flushed, or the
+ * calling task has received a Linux signal or has been forcibly
+ * unblocked by a call to rtdm_task_unblock().
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_wait_locked(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context)
+ * @brief Lock a waitqueue
+ *
+ * Acquires the lock on the waitqueue @a wq.
+ *
+ * @param wq waitqueue to lock.
+ *
+ * @param context name of local variable to store the context in.
+ *
+ * @note Recursive locking might lead to unexpected behavior,
+ * including lock up.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_waitqueue_lock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context);
+
+/**
+ * @fn void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context)
+ * @brief Unlock a waitqueue
+ *
+ * Releases the lock on the waitqueue @a wq.
+ *
+ * @param wq waitqueue to unlock.
+ *
+ * @param context name of local variable to retrieve the context from.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_waitqueue_unlock(struct rtdm_wait_queue *wq, rtdm_lockctx_t context);
+
+/**
+ * @fn void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq)
+ * @brief Signal a waitqueue
+ *
+ * Signals the waitqueue @a wq, waking up a single waiter (if
+ * any).
+ *
+ * @param wq waitqueue to signal.
+ *
+ * @return non-zero if a task has been readied as a result of this
+ * call, zero otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_signal(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq)
+ * @brief Broadcast a waitqueue
+ *
+ * Broadcast the waitqueue @a wq, waking up all waiters. Each
+ * readied task may assume to have received the wake up event.
+ *
+ * @param wq waitqueue to broadcast.
+ *
+ * @return non-zero if at least one task has been readied as a result
+ * of this call, zero otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_broadcast(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq)
+ * @brief Flush a waitqueue
+ *
+ * Flushes the waitqueue @a wq, unblocking all waiters with an error
+ * status (-EINTR).
+ *
+ * @param wq waitqueue to flush.
+ *
+ * @return non-zero if at least one task has been readied as a result
+ * of this call, zero otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_flush(struct rtdm_wait_queue *wq);
+
+/**
+ * @fn void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter)
+ * @brief Signal a particular waiter on a waitqueue
+ *
+ * Signals the waitqueue @a wq, waking up waiter @a waiter only,
+ * which must be currently sleeping on the waitqueue.
+ *
+ * @param wq waitqueue to signal.
+ *
+ * @param waiter RTDM task to wake up.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_waitqueue_wakeup(struct rtdm_wait_queue *wq, rtdm_task_t waiter);
+
+/**
+ * @fn rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq)
+ * @brief Simple iterator for waitqueues
+ *
+ * This construct traverses the wait list of a given waitqueue
+ * @a wq, assigning each RTDM task pointer to the cursor variable
+ * @a pos, which must be of type rtdm_task_t.
+ *
+ * @a wq must have been locked by a call to rtdm_waitqueue_lock()
+ * prior to traversing its wait list.
+ *
+ * @param pos cursor variable holding a pointer to the RTDM task
+ * being fetched.
+ *
+ * @param wq waitqueue to scan.
+ *
+ * @note The waitqueue should not be signaled, broadcast or flushed
+ * during the traversal, unless the loop is aborted immediately
+ * after. Should multiple waiters be readied while iterating, the safe
+ * form rtdm_for_each_waiter_safe() must be used for traversal
+ * instead.
+ *
+ * @coretags{unrestricted}
+ */
+rtdm_for_each_waiter(rtdm_task_t pos, struct rtdm_wait_queue *wq);
+
+/**
+ * @fn rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq)
+ * @brief Safe iterator for waitqueues
+ *
+ * This construct traverses the wait list of a given waitqueue
+ * @a wq, assigning each RTDM task pointer to the cursor variable
+ * @a pos, which must be of type rtdm_task_t.
+ *
+ * Unlike with rtdm_for_each_waiter(), the waitqueue may be signaled,
+ * broadcast or flushed during the traversal.
+ *
+ * @a wq must have been locked by a call to rtdm_waitqueue_lock()
+ * prior to traversing its wait list.
+ *
+ * @param pos cursor variable holding a pointer to the RTDM task
+ * being fetched.
+ *
+ * @param tmp temporary cursor variable.
+ *
+ * @param wq waitqueue to scan.
+ *
+ * @coretags{unrestricted}
+ */
+rtdm_for_each_waiter_safe(rtdm_task_t pos, rtdm_task_t tmp, struct rtdm_wait_queue *wq);
+
+/** @} rtdm_sync */
+
+/**
+ * @defgroup rtdm_interdriver_api Driver to driver services
+ * Inter-driver interface
+ *@{
+ */
+
+/**
+ * @brief Open a device
+ *
+ * Refer to rtdm_open() for parameters and return values
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_open(const char *path, int oflag, ...);
+
+/**
+ * @brief Create a socket
+ *
+ * Refer to rtdm_socket() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_socket(int protocol_family, int socket_type, int protocol);
+
+/**
+ * @brief Close a device or socket
+ *
+ * Refer to rtdm_close() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_close(int fd);
+
+/**
+ * @brief Issue an IOCTL
+ *
+ * Refer to rtdm_ioctl() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_ioctl(int fd, int request, ...);
+
+/**
+ * @brief Read from device
+ *
+ * Refer to rtdm_read() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_read(int fd, void *buf, size_t nbyte);
+
+/**
+ * @brief Write to device
+ *
+ * Refer to rtdm_write() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_write(int fd, const void *buf, size_t nbyte);
+
+/**
+ * @brief Receive message from socket
+ *
+ * Refer to rtdm_recvmsg() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Receive message from socket
+ *
+ * Refer to rtdm_recvfrom() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags,
+		      struct sockaddr *from, socklen_t *fromlen);
+
+/**
+ * @brief Receive message from socket
+ *
+ * Refer to rtdm_recv() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * Refer to rtdm_sendmsg() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * Refer to rtdm_sendto() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags,
+		    const struct sockaddr *to, socklen_t tolen);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * Refer to rtdm_send() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags);
+
+/**
+ * @brief Bind to local address
+ *
+ * Refer to rtdm_bind() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen);
+
+/**
+ * @brief Connect to remote address
+ *
+ * Refer to rtdm_connect() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_connect(int fd, const struct sockaddr *serv_addr, socklen_t addrlen);
+
+/**
+ * @brief Listen to incoming connection requests
+ *
+ * Refer to rtdm_listen() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_listen(int fd, int backlog);
+
+/**
+ * @brief Accept a connection request
+ *
+ * Refer to rtdm_accept() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen);
+
+/**
+ * @brief Shut down parts of a connection
+ *
+ * Refer to rtdm_shutdown() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_shutdown(int fd, int how);
+
+/**
+ * @brief Get socket option
+ *
+ * Refer to rtdm_getsockopt() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockopt(int fd, int level, int optname, void *optval,
+		    socklen_t *optlen);
+
+/**
+ * @brief Set socket option
+ *
+ * Refer to rtdm_setsockopt() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_setsockopt(int fd, int level, int optname, const void *optval,
+		    socklen_t optlen);
+
+/**
+ * @brief Get local socket address
+ *
+ * Refer to rtdm_getsockname() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen);
+
+/**
+ * @brief Get socket destination address
+ *
+ * Refer to rtdm_getpeername() for parameters and return values. Action
+ * depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen);
+
+/** @} Inter-driver calls */
+
+/** @} */
+
+/*!
+ * @addtogroup rtdm_user_api
+ * @{
+ */
+
+/**
+ * @brief Open a device
+ *
+ * @param[in] path Device name
+ * @param[in] oflag Open flags
+ * @param ... Further parameters will be ignored.
+ *
+ * @return Positive file descriptor value on success, otherwise a negative
+ * error code.
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c open() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_open(const char *path, int oflag, ...);
+
+/**
+ * @brief Create a socket
+ *
+ * @param[in] protocol_family Protocol family (@c PF_xxx)
+ * @param[in] socket_type Socket type (@c SOCK_xxx)
+ * @param[in] protocol Protocol ID, 0 for default
+ *
+ * @return Positive file descriptor value on success, otherwise a negative
+ * error code.
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c socket() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_socket(int protocol_family, int socket_type, int protocol);
+
+/**
+ * @brief Close a device or socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket()
+ *
+ * @return 0 on success, otherwise a negative error code.
+ *
+ * @note If the matching rtdm_open() or rtdm_socket() call took place in
+ * non-real-time context, rtdm_close() must be issued within non-real-time
+ * as well. Otherwise, the call will fail.
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c close() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_close(int fd);
+
+/**
+ * @brief Issue an IOCTL
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open() or rtdm_socket()
+ * @param[in] request IOCTL code
+ * @param ... Optional third argument, depending on IOCTL function
+ * (@c void @c * or @c unsigned @c long)
+ *
+ * @return Positiv value on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c ioctl() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_ioctl(int fd, int request, ...);
+
+/**
+ * @brief Read from device
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open()
+ * @param[out] buf Input buffer
+ * @param[in] nbyte Number of bytes to read
+ *
+ * @return Number of bytes read, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c read() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_read(int fd, void *buf, size_t nbyte);
+
+/**
+ * @brief Write to device
+ *
+ * @param[in] fd File descriptor as returned by rtdm_open()
+ * @param[in] buf Output buffer
+ * @param[in] nbyte Number of bytes to write
+ *
+ * @return Number of bytes written, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c write() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_write(int fd, const void *buf, size_t nbyte);
+
+/**
+ * @brief Receive message from socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in,out] msg Message descriptor
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes received, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c recvmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvmsg(int fd, struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Receive message from socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ * @param[out] from Buffer for message sender address
+ * @param[in,out] fromlen Address buffer size
+ *
+ * @return Number of bytes received, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c recvfrom() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recvfrom(int fd, void *buf, size_t len, int flags,
+		      struct sockaddr *from, socklen_t *fromlen);
+
+/**
+ * @brief Receive message from socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes received, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c recv() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_recv(int fd, void *buf, size_t len, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] msg Message descriptor
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes sent, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c sendmsg() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendmsg(int fd, const struct user_msghdr *msg, int flags);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ * @param[in] to Buffer for message destination address
+ * @param[in] tolen Address buffer size
+ *
+ * @return Number of bytes sent, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c sendto() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_sendto(int fd, const void *buf, size_t len, int flags,
+		    const struct sockaddr *to, socklen_t tolen);
+
+/**
+ * @brief Transmit message to socket
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] buf Message buffer
+ * @param[in] len Message buffer size
+ * @param[in] flags Message flags
+ *
+ * @return Number of bytes sent, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c send() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+ssize_t rtdm_send(int fd, const void *buf, size_t len, int flags);
+
+/**
+ * @brief Bind to local address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] my_addr Address buffer
+ * @param[in] addrlen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c bind() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen);
+
+/**
+ * @brief Connect to remote address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] serv_addr Address buffer
+ * @param[in] addrlen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c connect() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_connect(int fd, const struct sockaddr *serv_addr,
+		 socklen_t addrlen);
+
+/**
+ * @brief Listen for incomming connection requests
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] backlog Maximum queue length
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c listen() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_listen(int fd, int backlog);
+
+/**
+ * @brief Accept connection requests
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] addr Buffer for remote address
+ * @param[in,out] addrlen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c accept() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{mode-unrestricted, might-switch}
+ */
+int rtdm_accept(int fd, struct sockaddr *addr, socklen_t *addrlen);
+
+/**
+ * @brief Shut down parts of a connection
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] how Specifies the part to be shut down (@c SHUT_xxx)
+*
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c shutdown() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_shutdown(int fd, int how);
+
+/**
+ * @brief Get socket option
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] level Addressed stack level
+ * @param[in] optname Option name ID
+ * @param[out] optval Value buffer
+ * @param[in,out] optlen Value buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c getsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockopt(int fd, int level, int optname, void *optval,
+		      socklen_t *optlen);
+
+/**
+ * @brief Set socket option
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[in] level Addressed stack level
+ * @param[in] optname Option name ID
+ * @param[in] optval Value buffer
+ * @param[in] optlen Value buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c setsockopt() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_setsockopt(int fd, int level, int optname, const void *optval,
+		    socklen_t optlen);
+
+/**
+ * @brief Get local socket address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] name Address buffer
+ * @param[in,out] namelen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c getsockname() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getsockname(int fd, struct sockaddr *name, socklen_t *namelen);
+
+/**
+ * @brief Get socket destination address
+ *
+ * @param[in] fd File descriptor as returned by rtdm_socket()
+ * @param[out] name Address buffer
+ * @param[in,out] namelen Address buffer size
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * Action depends on driver implementation, see @ref rtdm_profiles
+ * "Device Profiles".
+ *
+ * @see @c getpeername() in IEEE Std 1003.1,
+ * http://www.opengroup.org/onlinepubs/009695399
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int rtdm_getpeername(int fd, struct sockaddr *name, socklen_t *namelen);
+
+#endif /* DOXYGEN_CPP */
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c
new file mode 100644
index 0000000..1215515
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/device.c
@@ -0,0 +1,651 @@
+/*
+ * Real-Time Driver Model for Xenomai, device management
+ *
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/mutex.h>
+#include <linux/slab.h>
+#include <linux/device.h>
+#include <linux/notifier.h>
+#include "rtdm/internal.h"
+#include <cobalt/kernel/init.h>
+#include <trace/events/cobalt-rtdm.h>
+
+/**
+ * @ingroup rtdm
+ * @defgroup rtdm_profiles Device Profiles
+ *
+ * Pre-defined classes of real-time devices
+ *
+ * Device profiles define which operation handlers a driver of a
+ * certain class of devices has to implement, which name or protocol
+ * it has to register, which IOCTLs it has to provide, and further
+ * details. Sub-classes can be defined in order to extend a device
+ * profile with more hardware-specific functions.
+ */
+
+/**
+ * @addtogroup rtdm_driver_interface
+ * @{
+ */
+
+#define RTDM_DEVICE_MAGIC	0x82846877
+
+static struct rb_root protocol_devices;
+
+static DEFINE_MUTEX(register_lock);
+static DECLARE_BITMAP(protocol_devices_minor_map, RTDM_MAX_MINOR);
+
+static struct class *rtdm_class;
+
+static int enosys(void)
+{
+	return -ENOSYS;
+}
+
+void __rtdm_put_device(struct rtdm_device *dev)
+{
+	secondary_mode_only();
+
+	if (atomic_dec_and_test(&dev->refcount))
+		wake_up(&dev->putwq);
+}
+
+static inline xnkey_t get_proto_id(int pf, int type)
+{
+	xnkey_t llpf = (unsigned int)pf;
+	return (llpf << 32) | (unsigned int)type;
+}
+
+struct rtdm_device *__rtdm_get_namedev(const char *path)
+{
+	struct rtdm_device *dev;
+	xnhandle_t handle;
+	int ret;
+
+	secondary_mode_only();
+
+	/* skip common /dev prefix */
+	if (strncmp(path, "/dev/", 5) == 0)
+		path += 5;
+
+	/* skip RTDM devnode root */
+	if (strncmp(path, "rtdm/", 5) == 0)
+		path += 5;
+
+	ret = xnregistry_bind(path, XN_NONBLOCK, XN_RELATIVE, &handle);
+	if (ret)
+		return NULL;
+
+	mutex_lock(&register_lock);
+
+	dev = xnregistry_lookup(handle, NULL);
+	if (dev && dev->magic == RTDM_DEVICE_MAGIC)
+		__rtdm_get_device(dev);
+	else
+		dev = NULL;
+
+	mutex_unlock(&register_lock);
+
+	return dev;
+}
+
+struct rtdm_device *__rtdm_get_protodev(int protocol_family, int socket_type)
+{
+	struct rtdm_device *dev = NULL;
+	struct xnid *xnid;
+	xnkey_t id;
+
+	secondary_mode_only();
+
+	id = get_proto_id(protocol_family, socket_type);
+
+	mutex_lock(&register_lock);
+
+	xnid = xnid_fetch(&protocol_devices, id);
+	if (xnid) {
+		dev = container_of(xnid, struct rtdm_device, proto.id);
+		__rtdm_get_device(dev);
+	}
+
+	mutex_unlock(&register_lock);
+
+	return dev;
+}
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_device_register Device Registration Services
+ * @{
+ */
+
+static char *rtdm_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "rtdm/%s", dev_name(dev));
+}
+
+static ssize_t profile_show(struct device *kdev,
+			    struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+
+	return sprintf(buf, "%d,%d\n",
+		       dev->driver->profile_info.class_id,
+		       dev->driver->profile_info.subclass_id);
+}
+
+static ssize_t refcount_show(struct device *kdev,
+			     struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+
+	return sprintf(buf, "%d\n", atomic_read(&dev->refcount));
+}
+
+#define cat_count(__buf, __str)			\
+	({					\
+		int __ret = sizeof(__str) - 1;	\
+		strcat(__buf, __str);		\
+		__ret;				\
+	})
+
+static ssize_t flags_show(struct device *kdev,
+			  struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+	struct rtdm_driver *drv = dev->driver;
+
+	return sprintf(buf, "%#x\n", drv->device_flags);
+
+}
+
+static ssize_t type_show(struct device *kdev,
+			 struct device_attribute *attr, char *buf)
+{
+	struct rtdm_device *dev = dev_get_drvdata(kdev);
+	struct rtdm_driver *drv = dev->driver;
+	int ret;
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE)
+		ret = cat_count(buf, "named\n");
+	else
+		ret = cat_count(buf, "protocol\n");
+
+	return ret;
+
+}
+
+#ifdef ATTRIBUTE_GROUPS
+
+static DEVICE_ATTR_RO(profile);
+static DEVICE_ATTR_RO(refcount);
+static DEVICE_ATTR_RO(flags);
+static DEVICE_ATTR_RO(type);
+
+static struct attribute *rtdm_attrs[] = {
+	&dev_attr_profile.attr,
+	&dev_attr_refcount.attr,
+	&dev_attr_flags.attr,
+	&dev_attr_type.attr,
+	NULL,
+};
+ATTRIBUTE_GROUPS(rtdm);
+
+#else /* !ATTRIBUTE_GROUPS */
+
+/*
+ * Cope with legacy sysfs attributes. Scheduled for removal when 3.10
+ * is at EOL for us.
+ */
+static struct device_attribute rtdm_attrs[] = {
+	DEVICE_ATTR_RO(profile),
+	DEVICE_ATTR_RO(refcount),
+	DEVICE_ATTR_RO(flags),
+	DEVICE_ATTR_RO(type),
+	__ATTR_NULL 
+};
+
+#define dev_groups   dev_attrs
+#define rtdm_groups  rtdm_attrs
+
+#endif /* !ATTRIBUTE_GROUPS */
+
+static int state_change_notifier(struct notifier_block *nb,
+				 unsigned long action, void *data)
+{
+	struct rtdm_driver *drv;
+	int ret;
+
+	drv = container_of(nb, struct rtdm_driver, nb_statechange);
+
+	switch (action) {
+	case COBALT_STATE_WARMUP:
+		if (drv->smops.start == NULL)
+			return NOTIFY_DONE;
+		ret = drv->smops.start(drv);
+		if (ret)
+			printk(XENO_WARNING
+			       "failed starting driver %s (%d)\n",
+			       drv->profile_info.name, ret);
+		break;
+	case COBALT_STATE_TEARDOWN:
+		if (drv->smops.stop == NULL)
+			return NOTIFY_DONE;
+		ret = drv->smops.stop(drv);
+		if (ret)
+			printk(XENO_WARNING
+			       "failed stopping driver %s (%d)\n",
+			       drv->profile_info.name, ret);
+		break;
+	default:
+		return NOTIFY_DONE;
+	}
+
+	return NOTIFY_OK;
+}
+
+static int register_driver(struct rtdm_driver *drv)
+{
+	dev_t rdev;
+	int ret;
+
+	if (drv->profile_info.magic == RTDM_CLASS_MAGIC) {
+		atomic_inc(&drv->refcount);
+		return 0;
+	}
+
+	if (drv->profile_info.magic != ~RTDM_CLASS_MAGIC) {
+		XENO_WARN_ON_ONCE(COBALT, 1);
+		return -EINVAL;
+	}
+
+	switch (drv->device_flags & RTDM_DEVICE_TYPE_MASK) {
+	case RTDM_NAMED_DEVICE:
+	case RTDM_PROTOCOL_DEVICE:
+		break;
+	default:
+		printk(XENO_WARNING "%s has invalid device type (%#x)\n",
+		       drv->profile_info.name,
+		       drv->device_flags & RTDM_DEVICE_TYPE_MASK);
+		return -EINVAL;
+	}
+
+	if (drv->device_count <= 0 ||
+	    drv->device_count > RTDM_MAX_MINOR) {
+		printk(XENO_WARNING "%s has invalid device count (%d)\n",
+		       drv->profile_info.name, drv->device_count);
+		return -EINVAL;
+	}
+
+	if ((drv->device_flags & RTDM_NAMED_DEVICE) == 0)
+		goto done;
+
+	if (drv->base_minor < 0 ||
+	    drv->base_minor >= RTDM_MAX_MINOR) {
+		printk(XENO_WARNING "%s has invalid base minor (%d)\n",
+		       drv->profile_info.name, drv->base_minor);
+		return -EINVAL;
+	}
+
+	ret = alloc_chrdev_region(&rdev, drv->base_minor, drv->device_count,
+				  drv->profile_info.name);
+	if (ret) {
+		printk(XENO_WARNING "cannot allocate chrdev region %s[%d..%d]\n",
+		       drv->profile_info.name, drv->base_minor,
+		       drv->base_minor + drv->device_count - 1);
+		return ret;
+	}
+
+	cdev_init(&drv->named.cdev, &rtdm_dumb_fops);
+	ret = cdev_add(&drv->named.cdev, rdev, drv->device_count);
+	if (ret) {
+		printk(XENO_WARNING "cannot create cdev series for %s\n",
+		       drv->profile_info.name);
+		goto fail_cdev;
+	}
+
+	drv->named.major = MAJOR(rdev);
+	bitmap_zero(drv->minor_map, RTDM_MAX_MINOR);
+
+done:
+	atomic_set(&drv->refcount, 1);
+	drv->nb_statechange.notifier_call = state_change_notifier;
+	drv->nb_statechange.priority = 0;
+	cobalt_add_state_chain(&drv->nb_statechange);
+	drv->profile_info.magic = RTDM_CLASS_MAGIC;
+
+	return 0;
+
+fail_cdev:
+	unregister_chrdev_region(rdev, drv->device_count);
+
+	return ret;
+}
+
+static void unregister_driver(struct rtdm_driver *drv)
+{
+	XENO_BUG_ON(COBALT, drv->profile_info.magic != RTDM_CLASS_MAGIC);
+
+	if (!atomic_dec_and_test(&drv->refcount))
+		return;
+
+	cobalt_remove_state_chain(&drv->nb_statechange);
+
+	drv->profile_info.magic = ~RTDM_CLASS_MAGIC;
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		cdev_del(&drv->named.cdev);
+		unregister_chrdev_region(MKDEV(drv->named.major, drv->base_minor),
+					 drv->device_count);
+	}
+}
+
+/**
+ * @brief Register a RTDM device
+ *
+ * Registers a device in the RTDM namespace.
+ *
+ * @param[in] dev Device descriptor.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if the descriptor contains invalid
+ * entries. RTDM_PROFILE_INFO() must appear in the list of
+ * initializers for the driver properties.
+ *
+ * - -EEXIST is returned if the specified device name of protocol ID is
+ * already in use.
+ *
+ * - -ENOMEM is returned if a memory allocation failed in the process
+ * of registering the device.
+ *
+ * - -EAGAIN is returned if no registry slot is available (check/raise
+ * CONFIG_XENO_OPT_REGISTRY_NRSLOTS).
+ *
+ * - -ENOSYS is returned if the real-time core is disabled.
+ *
+ * - -ENXIO is returned if no valid minor could be assigned
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_dev_register(struct rtdm_device *dev)
+{
+	struct class *kdev_class = rtdm_class;
+	struct device *kdev = NULL;
+	struct rtdm_driver *drv;
+	int ret, major, minor;
+	xnkey_t id;
+	dev_t rdev;
+	const char *dev_name;
+
+	secondary_mode_only();
+
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	mutex_lock(&register_lock);
+
+	dev->name = NULL;
+	drv = dev->driver;
+	ret = register_driver(drv);
+	if (ret) {
+		mutex_unlock(&register_lock);
+		return ret;
+	}
+
+	dev->ops = drv->ops;
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		dev->ops.socket =
+			(typeof(dev->ops.socket))(void (*)(void))enosys;
+	} else {
+		dev->ops.open = (typeof(dev->ops.open))(void (*)(void))enosys;
+	}
+
+	INIT_LIST_HEAD(&dev->openfd_list);
+	init_waitqueue_head(&dev->putwq);
+	dev->ops.close = __rtdm_dev_close; /* Interpose on driver's handler. */
+	atomic_set(&dev->refcount, 0);
+
+	if (drv->profile_info.kdev_class)
+		kdev_class = drv->profile_info.kdev_class;
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		if (drv->device_flags & RTDM_FIXED_MINOR) {
+			minor = dev->minor;
+			if (minor < 0 ||
+			    minor >= drv->base_minor + drv->device_count) {
+				ret = -ENXIO;
+				goto fail;
+			}
+		} else {
+			minor = find_first_zero_bit(drv->minor_map, RTDM_MAX_MINOR);
+			if (minor >= RTDM_MAX_MINOR) {
+				ret = -ENXIO;
+				goto fail;
+			}
+			dev->minor = minor;
+		}
+
+		major = drv->named.major;
+		dev->name = kasformat(dev->label, minor);
+		if (dev->name == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+		if (dev->name[0] == '/') {
+			dev_name = dev->name+1;
+		} else {
+			dev_name = dev->name;
+		}
+		ret = xnregistry_enter(dev_name, dev,
+				       &dev->named.handle, NULL);
+		if (ret)
+			goto fail;
+
+		rdev = MKDEV(major, minor);
+		kdev = device_create(kdev_class, NULL, rdev,
+				     dev, kbasename(dev->label), minor);
+		if (IS_ERR(kdev)) {
+			xnregistry_remove(dev->named.handle);
+			ret = PTR_ERR(kdev);
+			goto fail2;
+		}
+		__set_bit(minor, drv->minor_map);
+	} else {
+		minor = find_first_zero_bit(protocol_devices_minor_map,
+					RTDM_MAX_MINOR);
+		if (minor >= RTDM_MAX_MINOR) {
+			ret = -ENXIO;
+			goto fail;
+		}
+		dev->minor = minor;
+
+		dev->name = kstrdup(dev->label, GFP_KERNEL);
+		if (dev->name == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		rdev = MKDEV(0, minor);
+		kdev = device_create(kdev_class, NULL, rdev,
+				     dev, dev->name);
+		if (IS_ERR(kdev)) {
+			ret = PTR_ERR(kdev);
+			goto fail2;
+		}
+
+		id = get_proto_id(drv->protocol_family, drv->socket_type);
+		ret = xnid_enter(&protocol_devices, &dev->proto.id, id);
+		if (ret < 0)
+			goto fail;
+		__set_bit(minor, protocol_devices_minor_map);
+	}
+
+	dev->rdev = rdev;
+	dev->kdev = kdev;
+	dev->magic = RTDM_DEVICE_MAGIC;
+	dev->kdev_class = kdev_class;
+
+	mutex_unlock(&register_lock);
+
+	trace_cobalt_device_register(dev);
+
+	return 0;
+fail:
+	if (kdev)
+		device_destroy(kdev_class, rdev);
+fail2:
+	unregister_driver(drv);
+
+	mutex_unlock(&register_lock);
+
+	if (dev->name)
+		kfree(dev->name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_dev_register);
+
+/**
+ * @brief Unregister a RTDM device
+ *
+ * Removes the device from the RTDM namespace. This routine first
+ * attempts to teardown all active connections to the @a device prior
+ * to unregistering.
+ *
+ * @param[in] dev Device descriptor.
+ *
+ * @coretags{secondary-only}
+ */
+void rtdm_dev_unregister(struct rtdm_device *dev)
+{
+	struct rtdm_driver *drv = dev->driver;
+
+	secondary_mode_only();
+
+	trace_cobalt_device_unregister(dev);
+
+	/* Lock out any further connection. */
+	dev->magic = ~RTDM_DEVICE_MAGIC;
+
+	/* Flush all fds from this device. */
+	rtdm_device_flush_fds(dev);
+
+	/* Then wait for the ongoing connections to finish. */
+	wait_event(dev->putwq,
+		   atomic_read(&dev->refcount) == 0);
+
+	mutex_lock(&register_lock);
+
+	if (drv->device_flags & RTDM_NAMED_DEVICE) {
+		xnregistry_remove(dev->named.handle);
+		__clear_bit(dev->minor, drv->minor_map);
+	} else {
+		xnid_remove(&protocol_devices, &dev->proto.id);
+		__clear_bit(dev->minor, protocol_devices_minor_map);
+	}
+
+	device_destroy(dev->kdev_class, dev->rdev);
+
+	unregister_driver(drv);
+
+	mutex_unlock(&register_lock);
+
+	kfree(dev->name);
+}
+EXPORT_SYMBOL_GPL(rtdm_dev_unregister);
+
+/**
+ * @brief Set the kernel device class of a RTDM driver.
+ *
+ * Set the kernel device class assigned to the RTDM driver. By
+ * default, RTDM drivers belong to Linux's "rtdm" device class,
+ * creating a device node hierarchy rooted at /dev/rtdm, and sysfs
+ * nodes under /sys/class/rtdm.
+ *
+ * This call assigns a user-defined kernel device class to the RTDM
+ * driver, so that its devices are created into a different system
+ * hierarchy.
+ *
+ * rtdm_drv_set_sysclass() is meaningful only before the first device
+ * which is attached to @a drv is registered by a call to
+ * rtdm_dev_register().
+ *
+ * @param[in] drv Address of the RTDM driver descriptor.
+ *
+ * @param[in] cls Pointer to the kernel device class. NULL is allowed
+ * to clear a previous setting, switching back to the default "rtdm"
+ * device class.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EBUSY is returned if the kernel device class has already been
+ * set for @a drv, or some device(s) attached to @a drv are currently
+ * registered.
+ *
+ * @coretags{task-unrestricted}
+ *
+ * @attention The kernel device class set by this call is not related to
+ * the RTDM class identification as defined by the @ref rtdm_profiles
+ * "RTDM profiles" in any way. This is strictly related to the Linux
+ * kernel device hierarchy.
+ */
+int rtdm_drv_set_sysclass(struct rtdm_driver *drv, struct class *cls)
+{
+	if ((cls && drv->profile_info.kdev_class) ||
+	    atomic_read(&drv->refcount))
+		return -EBUSY;
+
+	drv->profile_info.kdev_class = cls;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_drv_set_sysclass);
+
+/** @} */
+
+int __init rtdm_init(void)
+{
+	xntree_init(&protocol_devices);
+
+	rtdm_class = class_create(THIS_MODULE, "rtdm");
+	if (IS_ERR(rtdm_class)) {
+		printk(XENO_ERR "cannot create RTDM sysfs class\n");
+		return PTR_ERR(rtdm_class);
+	}
+	rtdm_class->dev_groups = rtdm_groups;
+	rtdm_class->devnode = rtdm_devnode;
+
+	bitmap_zero(protocol_devices_minor_map, RTDM_MAX_MINOR);
+
+	return 0;
+}
+
+void rtdm_cleanup(void)
+{
+	class_destroy(rtdm_class);
+	/*
+	 * NOTE: no need to flush the cleanup_queue as no device is
+	 * allowed to unregister as long as there are references.
+	 */
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c
new file mode 100644
index 0000000..99d54f5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/drvlib.c
@@ -0,0 +1,2493 @@
+/*
+ * Real-Time Driver Model for Xenomai, driver library
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2008 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/mman.h>
+#include <asm/page.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <linux/highmem.h>
+#include <linux/err.h>
+#include <linux/anon_inodes.h>
+#include <rtdm/driver.h>
+#include <rtdm/compat.h>
+#include <pipeline/inband_work.h>
+#include "internal.h"
+#include <trace/events/cobalt-rtdm.h>
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_clock Clock Services
+ * @{
+ */
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * @brief Get system time
+ *
+ * @return The system time in nanoseconds is returned
+ *
+ * @note The resolution of this service depends on the system timer. In
+ * particular, if the system timer is running in periodic mode, the return
+ * value will be limited to multiples of the timer tick period.
+ *
+ * @note The system timer may have to be started to obtain valid results.
+ * Whether this happens automatically (as on Xenomai) or is controlled by the
+ * application depends on the RTDM host environment.
+ *
+ * @coretags{unrestricted}
+ */
+nanosecs_abs_t rtdm_clock_read(void);
+
+/**
+ * @brief Get monotonic time
+ *
+ * @return The monotonic time in nanoseconds is returned
+ *
+ * @note The resolution of this service depends on the system timer. In
+ * particular, if the system timer is running in periodic mode, the return
+ * value will be limited to multiples of the timer tick period.
+ *
+ * @note The system timer may have to be started to obtain valid results.
+ * Whether this happens automatically (as on Xenomai) or is controlled by the
+ * application depends on the RTDM host environment.
+ *
+ * @coretags{unrestricted}
+ */
+nanosecs_abs_t rtdm_clock_read_monotonic(void);
+#endif /* DOXYGEN_CPP */
+/** @} */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_task Task Services
+ * @{
+ */
+
+/**
+ * @brief Initialise and start a real-time task
+ *
+ * After initialising a task, the task handle remains valid and can be
+ * passed to RTDM services until either rtdm_task_destroy() or
+ * rtdm_task_join() was invoked.
+ *
+ * @param[in,out] task Task handle
+ * @param[in] name Optional task name
+ * @param[in] task_proc Procedure to be executed by the task
+ * @param[in] arg Custom argument passed to @c task_proc() on entry
+ * @param[in] priority Priority of the task, see also
+ * @ref rtdmtaskprio "Task Priority Range"
+ * @param[in] period Period in nanoseconds of a cyclic task, 0 for non-cyclic
+ * mode. Waiting for the first and subsequent periodic events is
+ * done using rtdm_task_wait_period().
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int rtdm_task_init(rtdm_task_t *task, const char *name,
+		   rtdm_task_proc_t task_proc, void *arg,
+		   int priority, nanosecs_rel_t period)
+{
+	union xnsched_policy_param param;
+	struct xnthread_start_attr sattr;
+	struct xnthread_init_attr iattr;
+	int err;
+
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	iattr.name = name;
+	iattr.flags = 0;
+	iattr.personality = &xenomai_personality;
+	iattr.affinity = CPU_MASK_ALL;
+	param.rt.prio = priority;
+
+	err = xnthread_init(task, &iattr, &xnsched_class_rt, &param);
+	if (err)
+		return err;
+
+	/* We need an anonymous registry entry to obtain a handle for fast
+	   mutex locking. */
+	err = xnthread_register(task, "");
+	if (err)
+		goto cleanup_out;
+
+	if (period > 0) {
+		err = xnthread_set_periodic(task, XN_INFINITE,
+					    XN_RELATIVE, period);
+		if (err)
+			goto cleanup_out;
+	}
+
+	sattr.mode = 0;
+	sattr.entry = task_proc;
+	sattr.cookie = arg;
+	err = xnthread_start(task, &sattr);
+	if (err)
+		goto cleanup_out;
+
+	return 0;
+
+      cleanup_out:
+	xnthread_cancel(task);
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_task_init);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+/**
+ * @brief Destroy a real-time task
+ *
+ * This call sends a termination request to @a task, then waits for it
+ * to exit. All RTDM task should check for pending termination
+ * requests by calling rtdm_task_should_stop() from their work loop.
+ *
+ * If @a task is current, rtdm_task_destroy() terminates the current
+ * context, and does not return to the caller.
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init()
+ *
+ * @note Passing the same task handle to RTDM services after the completion of
+ * this function is not allowed.
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+void rtdm_task_destroy(rtdm_task_t *task);
+
+/**
+ * @brief Check for pending termination request
+ *
+ * Check whether a termination request was received by the current
+ * RTDM task. Termination requests are sent by calling
+ * rtdm_task_destroy().
+ *
+ * @return Non-zero indicates that a termination request is pending,
+ * in which case the caller should wrap up and exit.
+ *
+ * @coretags{rtdm-task, might-switch}
+ */
+int rtdm_task_should_stop(void);
+
+/**
+ * @brief Adjust real-time task priority
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init()
+ * @param[in] priority New priority of the task, see also
+ * @ref rtdmtaskprio "Task Priority Range"
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_task_set_priority(rtdm_task_t *task, int priority);
+
+/**
+ * @brief Adjust real-time task period
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init(), or
+ * NULL for referring to the current RTDM task or Cobalt thread.
+ *
+ * @param[in] start_date The initial (absolute) date of the first
+ * release point, expressed in nanoseconds.  @a task will be delayed
+ * by the first call to rtdm_task_wait_period() until this point is
+ * reached. If @a start_date is zero, the first release point is set
+ * to @a period nanoseconds after the current date.
+
+ * @param[in] period New period in nanoseconds of a cyclic task, zero
+ * to disable cyclic mode for @a task.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_task_set_period(rtdm_task_t *task, nanosecs_abs_t start_date,
+			 nanosecs_rel_t period);
+
+/**
+ * @brief Wait on next real-time task period
+ *
+ * @param[in] overruns_r Address of a long word receiving the count of
+ * overruns if -ETIMEDOUT is returned, or NULL if the caller don't
+ * need that information.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if calling task is not in periodic mode.
+ *
+ * - -ETIMEDOUT is returned if a timer overrun occurred, which indicates
+ * that a previous release point has been missed by the calling task.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_wait_period(unsigned long *overruns_r);
+
+/**
+ * @brief Activate a blocked real-time task
+ *
+ * @return Non-zero is returned if the task was actually unblocked from a
+ * pending wait state, 0 otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+int rtdm_task_unblock(rtdm_task_t *task);
+
+/**
+ * @brief Get current real-time task
+ *
+ * @return Pointer to task handle
+ *
+ * @coretags{mode-unrestricted}
+ */
+rtdm_task_t *rtdm_task_current(void);
+
+/**
+ * @brief Sleep a specified amount of time
+ *
+ * @param[in] delay Delay in nanoseconds, see @ref RTDM_TIMEOUT_xxx for
+ * special values.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_sleep(nanosecs_rel_t delay);
+
+/**
+ * @brief Sleep until a specified absolute time
+ *
+ * @deprecated Use rtdm_task_sleep_abs instead!
+ *
+ * @param[in] wakeup_time Absolute timeout in nanoseconds
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_sleep_until(nanosecs_abs_t wakeup_time);
+
+/**
+ * @brief Sleep until a specified absolute time
+ *
+ * @param[in] wakeup_time Absolute timeout in nanoseconds
+ * @param[in] mode Selects the timer mode, see RTDM_TIMERMODE_xxx for details
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * - -EINVAL is returned if an invalid parameter was passed.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_sleep_abs(nanosecs_abs_t wakeup_time, enum rtdm_timer_mode mode);
+
+/**
+ * @brief Safe busy waiting
+ *
+ * This service alternates active spinning and sleeping within a wait
+ * loop, until a condition is satisfied. While sleeping, a task is
+ * scheduled out and does not consume any CPU time.
+ *
+ * rtdm_task_busy_wait() is particularly useful for waiting for a
+ * state change reading an I/O register, which usually happens shortly
+ * after the wait starts, without incurring the adverse effects of
+ * long busy waiting if it doesn't.
+ *
+ * @param[in] condition The C expression to be tested for detecting
+ * completion.
+ * @param[in] spin_ns The time to spin on @a condition before
+ * sleeping, expressed as a count of nanoseconds.
+ * @param[in] sleep_ns The time to sleep for before spinning again,
+ * expressed as a count of nanoseconds.
+ *
+ * @return 0 on success if @a condition is satisfied, otherwise:
+ *
+ * - -EINTR is returned if the calling task has been unblocked by a
+ * Linux signal or explicitly via rtdm_task_unblock().
+ *
+ * - -EPERM may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_task_busy_wait(bool condition, nanosecs_rel_t spin_ns,
+			nanosecs_rel_t sleep_ns);
+
+/**
+ * @brief Register wait context
+ *
+ * rtdm_wait_prepare() registers a wait context structure for the
+ * caller, which can be later retrieved by a call to
+ * rtdm_wait_get_context(). This call is normally issued before the
+ * current task blocks on a wait object, waiting for some (producer)
+ * code to wake it up. Arbitrary data can be exchanged between both
+ * sites via the wait context structure, which is allocated by the
+ * waiter (consumer) side.
+ *
+ * @a wc is the address of an anchor object which is commonly embedded
+ * into a larger structure with arbitrary contents, which needs to be
+ * shared between the consumer (waiter) and the producer for
+ * implementing the wait code.
+ *
+ * A typical implementation pattern for the wait side is:
+ *
+ * @code
+ * struct rtdm_waitqueue wq;
+ * struct some_wait_context {
+ *    int input_value;
+ *    int output_value;
+ *    struct rtdm_wait_context wc;
+ * } wait_context;
+ *
+ * wait_context.input_value = 42;
+ * rtdm_wait_prepare(&wait_context);
+ * ret = rtdm_wait_condition(&wq, rtdm_wait_is_completed(&wait_context));
+ * if (ret)
+ *     goto wait_failed;
+ * handle_event(wait_context.output_value);
+ * @endcode
+ *
+ * On the producer side, the implementation would look like:
+ *
+ * @code
+ * struct rtdm_waitqueue wq;
+ * struct some_wait_context {
+ *    int input_value;
+ *    int output_value;
+ *    struct rtdm_wait_context wc;
+ * } *wait_context_ptr;
+ * struct rtdm_wait_context *wc;
+ * rtdm_task_t *task;
+ *
+ * rtdm_for_each_waiter(task, &wq) {
+ *    wc = rtdm_wait_get_context(task);
+ *    wait_context_ptr = container_of(wc, struct some_wait_context, wc);
+ *    wait_context_ptr->output_value = 12;
+ * }
+ * rtdm_waitqueue_broadcast(&wq);
+ * @endcode
+ *
+ * @param wc Wait context to register.
+ */
+void rtdm_wait_prepare(struct rtdm_wait_context *wc);
+
+/**
+ * @brief Mark completion for a wait context
+ *
+ * rtdm_complete_wait() marks a wait context as completed, so that
+ * rtdm_wait_is_completed() returns true for such context.
+ *
+ * @param wc Wait context to complete.
+ */
+void rtdm_wait_complete(struct rtdm_wait_context *wc);
+
+/**
+ * @brief Test completion of a wait context
+ *
+ * rtdm_wait_is_completed() returns true if rtdm_complete_wait() was
+ * called for @a wc. The completion mark is reset each time
+ * rtdm_wait_prepare() is called for a wait context.
+ *
+ * @param wc Wait context to check for completion.
+ *
+ * @return non-zero/true if rtdm_wait_complete() was called for @a wc,
+ * zero otherwise.
+ */
+int rtdm_wait_is_completed(struct rtdm_wait_context *wc);
+
+#endif /* DOXYGEN_CPP */
+
+int __rtdm_task_sleep(xnticks_t timeout, xntmode_t mode)
+{
+	struct xnthread *thread;
+
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+
+	thread = xnthread_current();
+	xnthread_suspend(thread, XNDELAY, timeout, mode, NULL);
+
+	return xnthread_test_info(thread, XNBREAK) ? -EINTR : 0;
+}
+
+EXPORT_SYMBOL_GPL(__rtdm_task_sleep);
+
+/**
+ * @brief Wait on a real-time task to terminate
+ *
+ * @param[in,out] task Task handle as returned by rtdm_task_init()
+ *
+ * @note Passing the same task handle to RTDM services after the
+ * completion of this function is not allowed.
+ *
+ * @note This service does not trigger the termination of the targeted
+ * task.  The user has to take of this, otherwise rtdm_task_join()
+ * will never return.
+ *
+ * @coretags{mode-unrestricted}
+ */
+void rtdm_task_join(rtdm_task_t *task)
+{
+	trace_cobalt_driver_task_join(task);
+
+	xnthread_join(task, true);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_task_join);
+
+/**
+ * @brief Busy-wait a specified amount of time
+ *
+ * This service does not schedule out the caller, but rather spins in
+ * a tight loop, burning CPU cycles until the timeout elapses.
+ *
+ * @param[in] delay Delay in nanoseconds. Note that a zero delay does @b not
+ * have the meaning of @c RTDM_TIMEOUT_INFINITE here.
+ *
+ * @note The caller must not be migratable to different CPUs while executing
+ * this service. Otherwise, the actual delay will be undefined.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_task_busy_sleep(nanosecs_rel_t delay)
+{
+	xnticks_t wakeup;
+
+	wakeup = xnclock_read_raw(&nkclock) +
+		xnclock_ns_to_ticks(&nkclock, delay);
+
+	while ((xnsticks_t)(xnclock_read_raw(&nkclock) - wakeup) < 0)
+		cpu_relax();
+}
+
+EXPORT_SYMBOL_GPL(rtdm_task_busy_sleep);
+/** @} */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_timer Timer Services
+ * @{
+ */
+
+/**
+ * @brief Initialise a timer
+ *
+ * @param[in,out] timer Timer handle
+ * @param[in] handler Handler to be called on timer expiry
+ * @param[in] name Optional timer name
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_timer_init(rtdm_timer_t *timer, rtdm_timer_handler_t handler,
+		    const char *name)
+{
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	xntimer_init((timer), &nkclock, handler, NULL, XNTIMER_IGRAVITY);
+	xntimer_set_name((timer), (name));
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_init);
+
+/**
+ * @brief Destroy a timer
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_timer_destroy(rtdm_timer_t *timer)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_destroy(timer);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_destroy);
+
+/**
+ * @brief Start a timer
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ * @param[in] expiry Firing time of the timer, @c mode defines if relative or
+ * absolute
+ * @param[in] interval Relative reload value, > 0 if the timer shall work in
+ * periodic mode with the specific interval, 0 for one-shot timers
+ * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for
+ * possible values
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if @c expiry describes an absolute date in
+ * the past. In such an event, the timer is nevertheless armed for the
+ * next shot in the timeline if @a interval is non-zero.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_timer_start(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+		     nanosecs_rel_t interval, enum rtdm_timer_mode mode)
+{
+	spl_t s;
+	int err;
+
+	xnlock_get_irqsave(&nklock, s);
+	err = xntimer_start(timer, expiry, interval, (xntmode_t)mode);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_start);
+
+/**
+ * @brief Stop a timer
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_timer_stop(rtdm_timer_t *timer)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_stop(timer);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_timer_stop);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+/**
+ * @brief Start a timer from inside a timer handler
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ * @param[in] expiry Firing time of the timer, @c mode defines if relative or
+ * absolute
+ * @param[in] interval Relative reload value, > 0 if the timer shall work in
+ * periodic mode with the specific interval, 0 for one-shot timers
+ * @param[in] mode Defines the operation mode, see @ref RTDM_TIMERMODE_xxx for
+ * possible values
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if @c expiry describes an absolute date in the
+ * past.
+ *
+ * @coretags{coreirq-only}
+ */
+int rtdm_timer_start_in_handler(rtdm_timer_t *timer, nanosecs_abs_t expiry,
+				nanosecs_rel_t interval,
+				enum rtdm_timer_mode mode);
+
+/**
+ * @brief Stop a timer from inside a timer handler
+ *
+ * @param[in,out] timer Timer handle as returned by rtdm_timer_init()
+ *
+ * @coretags{coreirq-only}
+ */
+void rtdm_timer_stop_in_handler(rtdm_timer_t *timer);
+#endif /* DOXYGEN_CPP */
+/** @} */
+
+/* --- IPC cleanup helper --- */
+
+#define RTDM_SYNCH_DELETED          XNSYNCH_SPARE0
+
+void __rtdm_synch_flush(struct xnsynch *synch, unsigned long reason)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (reason == XNRMID)
+		xnsynch_set_status(synch, RTDM_SYNCH_DELETED);
+
+	if (likely(xnsynch_flush(synch, reason) == XNSYNCH_RESCHED))
+		xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(__rtdm_synch_flush);
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_sync Synchronisation Services
+ * @{
+ */
+
+/*!
+ * @name Timeout Sequence Management
+ * @{
+ */
+
+/**
+ * @brief Initialise a timeout sequence
+ *
+ * This service initialises a timeout sequence handle according to the given
+ * timeout value. Timeout sequences allow to maintain a continuous @a timeout
+ * across multiple calls of blocking synchronisation services. A typical
+ * application scenario is given below.
+ *
+ * @param[in,out] timeout_seq Timeout sequence handle
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ *
+ * Application Scenario:
+ * @code
+int device_service_routine(...)
+{
+	rtdm_toseq_t timeout_seq;
+	...
+
+	rtdm_toseq_init(&timeout_seq, timeout);
+	...
+	while (received < requested) {
+		ret = rtdm_event_timedwait(&data_available, timeout, &timeout_seq);
+		if (ret < 0) // including -ETIMEDOUT
+			break;
+
+		// receive some data
+		...
+	}
+	...
+}
+ * @endcode
+ * Using a timeout sequence in such a scenario avoids that the user-provided
+ * relative @c timeout is restarted on every call to rtdm_event_timedwait(),
+ * potentially causing an overall delay that is larger than specified by
+ * @c timeout. Moreover, all functions supporting timeout sequences also
+ * interpret special timeout values (infinite and non-blocking),
+ * disburdening the driver developer from handling them separately.
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_toseq_init(rtdm_toseq_t *timeout_seq, nanosecs_rel_t timeout)
+{
+	XENO_WARN_ON(COBALT, xnsched_unblockable_p()); /* only warn here */
+
+	*timeout_seq = xnclock_read_monotonic(&nkclock) + timeout;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_toseq_init);
+
+/** @} */
+
+/**
+ * @ingroup rtdm_sync
+ * @defgroup rtdm_sync_event Event Services
+ * @{
+ */
+
+/**
+ * @brief Initialise an event
+ *
+ * @param[in,out] event Event handle
+ * @param[in] pending Non-zero if event shall be initialised as set, 0 otherwise
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_event_init(rtdm_event_t *event, unsigned long pending)
+{
+	spl_t s;
+
+	trace_cobalt_driver_event_init(event, pending);
+
+	/* Make atomic for re-initialisation support */
+	xnlock_get_irqsave(&nklock, s);
+
+	xnsynch_init(&event->synch_base, XNSYNCH_PRIO, NULL);
+	if (pending)
+		xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING);
+	xnselect_init(&event->select_block);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_init);
+
+/**
+ * @brief Destroy an event
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_event_destroy(rtdm_event_t *event)
+{
+	trace_cobalt_driver_event_destroy(event);
+	if (realtime_core_enabled()) {
+		__rtdm_synch_flush(&event->synch_base, XNRMID);
+		xnselect_destroy(&event->select_block);
+	}
+}
+EXPORT_SYMBOL_GPL(rtdm_event_destroy);
+
+/**
+ * @brief Signal an event occurrence to currently listening waiters
+ *
+ * This function wakes up all current waiters of the given event, but it does
+ * not change the event state. Subsequently callers of rtdm_event_wait() or
+ * rtdm_event_timedwait() will therefore be blocked first.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_event_pulse(rtdm_event_t *event)
+{
+	trace_cobalt_driver_event_pulse(event);
+	__rtdm_synch_flush(&event->synch_base, 0);
+}
+EXPORT_SYMBOL_GPL(rtdm_event_pulse);
+
+/**
+ * @brief Signal an event occurrence
+ *
+ * This function sets the given event and wakes up all current waiters. If no
+ * waiter is presently registered, the next call to rtdm_event_wait() or
+ * rtdm_event_timedwait() will return immediately.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_event_signal(rtdm_event_t *event)
+{
+	int resched = 0;
+	spl_t s;
+
+	trace_cobalt_driver_event_signal(event);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnsynch_set_status(&event->synch_base, RTDM_EVENT_PENDING);
+	if (xnsynch_flush(&event->synch_base, 0))
+		resched = 1;
+	if (xnselect_signal(&event->select_block, 1))
+		resched = 1;
+	if (resched)
+		xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_signal);
+
+/**
+ * @brief Wait on event occurrence
+ *
+ * This is the light-weight version of rtdm_event_timedwait(), implying an
+ * infinite timeout.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a event has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_event_wait(rtdm_event_t *event)
+{
+	return rtdm_event_timedwait(event, 0, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_wait);
+
+/**
+ * @brief Wait on event occurrence with timeout
+ *
+ * This function waits or tests for the occurence of the given event, taking
+ * the provided timeout into account. On successful return, the event is
+ * reset.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ * @param[in,out] timeout_seq Handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a event has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * - -EWOULDBLOCK is returned if a negative @a timeout (i.e., non-blocking
+ * operation) has been specified.
+ *
+ * @coretags{primary-timed, might-switch}
+ */
+int rtdm_event_timedwait(rtdm_event_t *event, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq)
+{
+	struct xnthread *thread;
+	int err = 0, ret;
+	spl_t s;
+
+	if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p()))
+		return -EPERM;
+
+	trace_cobalt_driver_event_wait(event, xnthread_current());
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(event->synch_base.status & RTDM_SYNCH_DELETED))
+		err = -EIDRM;
+	else if (likely(event->synch_base.status & RTDM_EVENT_PENDING)) {
+		xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING);
+		xnselect_signal(&event->select_block, 0);
+	} else {
+		/* non-blocking mode */
+		if (timeout < 0) {
+			err = -EWOULDBLOCK;
+			goto unlock_out;
+		}
+
+		thread = xnthread_current();
+
+		if (timeout_seq && (timeout > 0))
+			/* timeout sequence */
+			ret = xnsynch_sleep_on(&event->synch_base, *timeout_seq,
+					       XN_ABSOLUTE);
+		else
+			/* infinite or relative timeout */
+			ret = xnsynch_sleep_on(&event->synch_base, timeout, XN_RELATIVE);
+
+		if (likely(ret == 0)) {
+			xnsynch_clear_status(&event->synch_base,
+					    RTDM_EVENT_PENDING);
+			xnselect_signal(&event->select_block, 0);
+		} else if (ret & XNTIMEO)
+			err = -ETIMEDOUT;
+		else if (ret & XNRMID)
+			err = -EIDRM;
+		else /* XNBREAK */
+			err = -EINTR;
+	}
+
+unlock_out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_timedwait);
+
+/**
+ * @brief Clear event state
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_event_clear(rtdm_event_t *event)
+{
+	spl_t s;
+
+	trace_cobalt_driver_event_clear(event);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnsynch_clear_status(&event->synch_base, RTDM_EVENT_PENDING);
+	xnselect_signal(&event->select_block, 0);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_event_clear);
+
+/**
+ * @brief Bind a selector to an event
+ *
+ * This functions binds the given selector to an event so that the former is
+ * notified when the event state changes. Typically the select binding handler
+ * will invoke this service.
+ *
+ * @param[in,out] event Event handle as returned by rtdm_event_init()
+ * @param[in,out] selector Selector as passed to the select binding handler
+ * @param[in] type Type of the bound event as passed to the select binding handler
+ * @param[in] fd_index File descriptor index as passed to the select binding
+ * handler
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ENOMEM is returned if there is insufficient memory to establish the
+ * dynamic binding.
+ *
+ * - -EINVAL is returned if @a type or @a fd_index are invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_event_select(rtdm_event_t *event, rtdm_selector_t *selector,
+		      enum rtdm_selecttype type, unsigned int fd_index)
+{
+	struct xnselect_binding *binding;
+	int err;
+	spl_t s;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (!binding)
+		return -ENOMEM;
+
+	xnlock_get_irqsave(&nklock, s);
+	err = xnselect_bind(&event->select_block,
+			    binding, selector, type, fd_index,
+			    event->synch_base.status & (RTDM_SYNCH_DELETED |
+						       RTDM_EVENT_PENDING));
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (err)
+		xnfree(binding);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtdm_event_select);
+
+/** @} */
+
+/**
+ * @ingroup rtdm_sync
+ * @defgroup rtdm_sync_sem Semaphore Services
+ * @{
+ */
+
+/**
+ * @brief Initialise a semaphore
+ *
+ * @param[in,out] sem Semaphore handle
+ * @param[in] value Initial value of the semaphore
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_sem_init(rtdm_sem_t *sem, unsigned long value)
+{
+	spl_t s;
+
+	trace_cobalt_driver_sem_init(sem, value);
+
+	/* Make atomic for re-initialisation support */
+	xnlock_get_irqsave(&nklock, s);
+
+	sem->value = value;
+	xnsynch_init(&sem->synch_base, XNSYNCH_PRIO, NULL);
+	xnselect_init(&sem->select_block);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_init);
+
+/**
+ * @brief Destroy a semaphore
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_sem_destroy(rtdm_sem_t *sem)
+{
+	trace_cobalt_driver_sem_destroy(sem);
+	if (realtime_core_enabled()) {
+		__rtdm_synch_flush(&sem->synch_base, XNRMID);
+		xnselect_destroy(&sem->select_block);
+	}
+}
+EXPORT_SYMBOL_GPL(rtdm_sem_destroy);
+
+/**
+ * @brief Decrement a semaphore
+ *
+ * This is the light-weight version of rtdm_sem_timeddown(), implying an
+ * infinite timeout.
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a sem has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_sem_down(rtdm_sem_t *sem)
+{
+	return rtdm_sem_timeddown(sem, 0, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_down);
+
+/**
+ * @brief Decrement a semaphore with timeout
+ *
+ * This function tries to decrement the given semphore's value if it is
+ * positive on entry. If not, the caller is blocked unless non-blocking
+ * operation was selected.
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ * @param[in,out] timeout_seq Handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore
+ * value is currently not positive.
+ *
+ * - -EINTR is returned if calling task has been unblock by a signal or
+ * explicitly via rtdm_task_unblock().
+ *
+ * - -EIDRM is returned if @a sem has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-timed, might-switch}
+ */
+int rtdm_sem_timeddown(rtdm_sem_t *sem, nanosecs_rel_t timeout,
+		       rtdm_toseq_t *timeout_seq)
+{
+	struct xnthread *thread;
+	int err = 0, ret;
+	spl_t s;
+
+	if (!XENO_ASSERT(COBALT, timeout < 0 || !xnsched_unblockable_p()))
+		return -EPERM;
+
+	trace_cobalt_driver_sem_wait(sem, xnthread_current());
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(sem->synch_base.status & RTDM_SYNCH_DELETED))
+		err = -EIDRM;
+	else if (sem->value > 0) {
+		if(!--sem->value)
+			xnselect_signal(&sem->select_block, 0);
+	} else if (timeout < 0) /* non-blocking mode */
+		err = -EWOULDBLOCK;
+	else {
+		thread = xnthread_current();
+
+		if (timeout_seq && timeout > 0)
+			/* timeout sequence */
+			ret = xnsynch_sleep_on(&sem->synch_base, *timeout_seq,
+					       XN_ABSOLUTE);
+		else
+			/* infinite or relative timeout */
+			ret = xnsynch_sleep_on(&sem->synch_base, timeout, XN_RELATIVE);
+
+		if (ret) {
+			if (ret & XNTIMEO)
+				err = -ETIMEDOUT;
+			else if (ret & XNRMID)
+				err = -EIDRM;
+			else /* XNBREAK */
+				err = -EINTR;
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return err;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_timeddown);
+
+/**
+ * @brief Increment a semaphore
+ *
+ * This function increments the given semphore's value, waking up a potential
+ * waiter which was blocked upon rtdm_sem_down().
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void rtdm_sem_up(rtdm_sem_t *sem)
+{
+	spl_t s;
+
+	trace_cobalt_driver_sem_up(sem);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnsynch_wakeup_one_sleeper(&sem->synch_base))
+		xnsched_run();
+	else
+		if (sem->value++ == 0
+		    && xnselect_signal(&sem->select_block, 1))
+			xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_sem_up);
+
+/**
+ * @brief Bind a selector to a semaphore
+ *
+ * This functions binds the given selector to the semaphore so that the former
+ * is notified when the semaphore state changes. Typically the select binding
+ * handler will invoke this service.
+ *
+ * @param[in,out] sem Semaphore handle as returned by rtdm_sem_init()
+ * @param[in,out] selector Selector as passed to the select binding handler
+ * @param[in] type Type of the bound event as passed to the select binding handler
+ * @param[in] fd_index File descriptor index as passed to the select binding
+ * handler
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ENOMEM is returned if there is insufficient memory to establish the
+ * dynamic binding.
+ *
+ * - -EINVAL is returned if @a type or @a fd_index are invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_sem_select(rtdm_sem_t *sem, rtdm_selector_t *selector,
+		    enum rtdm_selecttype type, unsigned int fd_index)
+{
+	struct xnselect_binding *binding;
+	int err;
+	spl_t s;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (!binding)
+		return -ENOMEM;
+
+	xnlock_get_irqsave(&nklock, s);
+	err = xnselect_bind(&sem->select_block, binding, selector,
+			    type, fd_index,
+			    (sem->value > 0) ||
+			    sem->synch_base.status & RTDM_SYNCH_DELETED);
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (err)
+		xnfree(binding);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtdm_sem_select);
+
+/** @} */
+
+/**
+ * @ingroup rtdm_sync
+ * @defgroup rtdm_sync_mutex Mutex services
+ * @{
+ */
+
+/**
+ * @brief Initialise a mutex
+ *
+ * This function initalises a basic mutex with priority inversion protection.
+ * "Basic", as it does not allow a mutex owner to recursively lock the same
+ * mutex again.
+ *
+ * @param[in,out] mutex Mutex handle
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_mutex_init(rtdm_mutex_t *mutex)
+{
+	spl_t s;
+
+	/* Make atomic for re-initialisation support */
+	xnlock_get_irqsave(&nklock, s);
+	xnsynch_init(&mutex->synch_base, XNSYNCH_PI, &mutex->fastlock);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_mutex_init);
+
+/**
+ * @brief Destroy a mutex
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void rtdm_mutex_destroy(rtdm_mutex_t *mutex)
+{
+	trace_cobalt_driver_mutex_destroy(mutex);
+
+	if (realtime_core_enabled())
+		__rtdm_synch_flush(&mutex->synch_base, XNRMID);
+}
+EXPORT_SYMBOL_GPL(rtdm_mutex_destroy);
+
+/**
+ * @brief Release a mutex
+ *
+ * This function releases the given mutex, waking up a potential waiter which
+ * was blocked upon rtdm_mutex_lock() or rtdm_mutex_timedlock().
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ *
+ * @coretags{primary-only, might-switch}
+ */
+void rtdm_mutex_unlock(rtdm_mutex_t *mutex)
+{
+	if (!XENO_ASSERT(COBALT, !xnsched_interrupt_p()))
+		return;
+
+	trace_cobalt_driver_mutex_release(mutex);
+
+	if (unlikely(xnsynch_release(&mutex->synch_base,
+				     xnsched_current_thread())))
+		xnsched_run();
+}
+EXPORT_SYMBOL_GPL(rtdm_mutex_unlock);
+
+/**
+ * @brief Request a mutex
+ *
+ * This is the light-weight version of rtdm_mutex_timedlock(), implying an
+ * infinite timeout.
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EIDRM is returned if @a mutex has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_mutex_lock(rtdm_mutex_t *mutex)
+{
+	return rtdm_mutex_timedlock(mutex, 0, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_mutex_lock);
+
+/**
+ * @brief Request a mutex with timeout
+ *
+ * This function tries to acquire the given mutex. If it is not available, the
+ * caller is blocked unless non-blocking operation was selected.
+ *
+ * @param[in,out] mutex Mutex handle as returned by rtdm_mutex_init()
+ * @param[in] timeout Relative timeout in nanoseconds, see
+ * @ref RTDM_TIMEOUT_xxx for special values
+ * @param[in,out] timeout_seq Handle of a timeout sequence as returned by
+ * rtdm_toseq_init() or NULL
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -ETIMEDOUT is returned if the if the request has not been satisfied
+ * within the specified amount of time.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is negative and the semaphore
+ * value is currently not positive.
+ *
+ * - -EIDRM is returned if @a mutex has been destroyed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int rtdm_mutex_timedlock(rtdm_mutex_t *mutex, nanosecs_rel_t timeout,
+			 rtdm_toseq_t *timeout_seq)
+{
+	struct xnthread *curr;
+	int ret;
+	spl_t s;
+
+	if (!XENO_ASSERT(COBALT, !xnsched_unblockable_p()))
+		return -EPERM;
+
+	curr = xnthread_current();
+	trace_cobalt_driver_mutex_wait(mutex, curr);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(mutex->synch_base.status & RTDM_SYNCH_DELETED)) {
+		ret = -EIDRM;
+		goto out;
+	}
+
+	ret = xnsynch_try_acquire(&mutex->synch_base);
+	if (ret != -EBUSY)
+		goto out;
+
+	if (timeout < 0) {
+		ret = -EWOULDBLOCK;
+		goto out;
+	}
+
+	for (;;) {
+		if (timeout_seq && timeout > 0) /* timeout sequence */
+			ret = xnsynch_acquire(&mutex->synch_base, *timeout_seq,
+					      XN_ABSOLUTE);
+		else		/* infinite or relative timeout */
+			ret = xnsynch_acquire(&mutex->synch_base, timeout,
+					      XN_RELATIVE);
+		if (ret == 0)
+			break;
+		if (ret & XNBREAK)
+			continue;
+		ret = ret & XNTIMEO ? -ETIMEDOUT : -EIDRM;
+		break;
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_mutex_timedlock);
+/** @} */
+
+/** @} Synchronisation services */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_irq Interrupt Management Services
+ * @{
+ */
+
+/**
+ * @brief Register an interrupt handler
+ *
+ * This function registers the provided handler with an IRQ line and enables
+ * the line.
+ *
+ * @param[in,out] irq_handle IRQ handle
+ * @param[in] irq_no Line number of the addressed IRQ
+ * @param[in] handler Interrupt handler
+ * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details
+ * @param[in] device_name Device name to show up in real-time IRQ lists
+ * @param[in] arg Pointer to be passed to the interrupt handler on invocation
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if an invalid parameter was passed.
+ *
+ * - -EBUSY is returned if the specified IRQ line is already in use.
+ *
+ * - -ENOSYS is returned if the real-time core is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_request(rtdm_irq_t *irq_handle, unsigned int irq_no,
+		     rtdm_irq_handler_t handler, unsigned long flags,
+		     const char *device_name, void *arg)
+{
+	return rtdm_irq_request_affine(irq_handle, irq_no, handler, flags,
+				       device_name, arg, NULL);
+}
+
+EXPORT_SYMBOL_GPL(rtdm_irq_request);
+
+/**
+ * @brief Register an interrupt handler
+ *
+ * This function registers the provided handler with an IRQ line and enables
+ * the line.
+ *
+ * @param[in,out] irq_handle IRQ handle
+ * @param[in] irq_no Line number of the addressed IRQ
+ * @param[in] handler Interrupt handler
+ * @param[in] flags Registration flags, see @ref RTDM_IRQTYPE_xxx for details
+ * @param[in] device_name Device name to show up in real-time IRQ lists
+ * @param[in] arg Pointer to be passed to the interrupt handler on invocation
+ * @param[in] cpumask CPU affinity of the interrupt
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if an invalid parameter was passed.
+ *
+ * - -EBUSY is returned if the specified IRQ line is already in use.
+ *
+ * - -ENOSYS is returned if the real-time core is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_request_affine(rtdm_irq_t *irq_handle, unsigned int irq_no,
+			    rtdm_irq_handler_t handler, unsigned long flags,
+			    const char *device_name, void *arg,
+			    const cpumask_t *cpumask)
+{
+	int err;
+
+	if (!realtime_core_enabled())
+		return -ENOSYS;
+
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	err = xnintr_init(irq_handle, device_name, irq_no, handler, NULL, flags);
+	if (err)
+		return err;
+
+	err = xnintr_attach(irq_handle, arg, cpumask);
+	if (err) {
+		xnintr_destroy(irq_handle);
+		return err;
+	}
+
+	xnintr_enable(irq_handle);
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtdm_irq_request_affine);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+/**
+ * @brief Release an interrupt handler
+ *
+ * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @note The caller is responsible for shutting down the IRQ source at device
+ * level before invoking this service. In turn, rtdm_irq_free ensures that any
+ * pending event on the given IRQ line is fully processed on return from this
+ * service.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_free(rtdm_irq_t *irq_handle);
+
+/**
+ * @brief Enable interrupt line
+ *
+ * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @note This service is for exceptional use only. Drivers should
+ * always prefer interrupt masking at device level (via corresponding
+ * control registers etc.)  over masking at line level. Keep in mind
+ * that the latter is incompatible with IRQ line sharing and can also
+ * be more costly as interrupt controller access requires broader
+ * synchronization. Also, such service is solely available from
+ * secondary mode. The caller is responsible for excluding such
+ * conflicts.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_enable(rtdm_irq_t *irq_handle);
+
+/**
+ * @brief Disable interrupt line
+ *
+ * @param[in,out] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @note This service is for exceptional use only. Drivers should
+ * always prefer interrupt masking at device level (via corresponding
+ * control registers etc.)  over masking at line level. Keep in mind
+ * that the latter is incompatible with IRQ line sharing and can also
+ * be more costly as interrupt controller access requires broader
+ * synchronization.  Also, such service is solely available from
+ * secondary mode.  The caller is responsible for excluding such
+ * conflicts.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_disable(rtdm_irq_t *irq_handle);
+
+/**
+ * @brief Set interrupt affinity
+ *
+ * @param[in] irq_handle IRQ handle as returned by rtdm_irq_request()
+ *
+ * @param[in] cpumask The new CPU affinity of the interrupt
+ *
+ * @return 0 on success, otherwise negative error code
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_irq_set_affinity(rtdm_irq_t *irq_handle, const cpumask_t *cpumask);
+#endif /* DOXYGEN_CPP */
+
+/** @} Interrupt Management Services */
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_nrtsignal Non-Real-Time Signalling Services
+ *
+ * These services provide a mechanism to request the execution of a specified
+ * handler in non-real-time context. The triggering can safely be performed in
+ * real-time context without suffering from unknown delays. The handler
+ * execution will be deferred until the next time the real-time subsystem
+ * releases the CPU to the non-real-time part.
+ * @{
+ */
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * @brief Register a non-real-time signal handler
+ *
+ * @param[in,out] nrt_sig Signal handle
+ * @param[in] handler Non-real-time signal handler
+ * @param[in] arg Custom argument passed to @c handler() on each invocation
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EAGAIN is returned if no free signal slot is available.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_nrtsig_init(rtdm_nrtsig_t *nrt_sig, rtdm_nrtsig_handler_t handler,
+		     void *arg);
+
+/**
+ * @brief Release a non-realtime signal handler
+ *
+ * @param[in,out] nrt_sig Signal handle
+ *
+ * @coretags{task-unrestricted}
+ */
+void rtdm_nrtsig_destroy(rtdm_nrtsig_t *nrt_sig);
+#endif /* DOXYGEN_CPP */
+
+void __rtdm_nrtsig_execute(struct pipeline_inband_work *inband_work)
+{
+	struct rtdm_nrtsig *nrt_sig;
+
+	nrt_sig = container_of(inband_work, typeof(*nrt_sig), inband_work);
+	nrt_sig->handler(nrt_sig, nrt_sig->arg);
+}
+EXPORT_SYMBOL_GPL(__rtdm_nrtsig_execute);
+
+/**
+ * Trigger non-real-time signal
+ *
+ * @param[in,out] nrt_sig Signal handle
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_nrtsig_pend(rtdm_nrtsig_t *nrt_sig)
+{
+	pipeline_post_inband_work(nrt_sig);
+}
+EXPORT_SYMBOL_GPL(rtdm_nrtsig_pend);
+
+static LIST_HEAD(nrt_work_list);
+DEFINE_PRIVATE_XNLOCK(nrt_work_lock);
+
+static void lostage_schedule_work(struct pipeline_inband_work *inband_work)
+{
+	struct work_struct *lostage_work;
+	spl_t s;
+
+	xnlock_get_irqsave(&nrt_work_lock, s);
+
+	while (!list_empty(&nrt_work_list)) {
+		lostage_work = list_first_entry(&nrt_work_list,
+						struct work_struct, entry);
+		list_del_init(&lostage_work->entry);
+
+		xnlock_put_irqrestore(&nrt_work_lock, s);
+
+		schedule_work(lostage_work);
+
+		xnlock_get_irqsave(&nrt_work_lock, s);
+	}
+
+	xnlock_put_irqrestore(&nrt_work_lock, s);
+}
+
+static struct lostage_trigger_work {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+} nrt_work =  {
+	.inband_work = PIPELINE_INBAND_WORK_INITIALIZER(nrt_work,
+							lostage_schedule_work),
+};
+
+/**
+ * Put a work task in Linux non real-time global workqueue from primary mode.
+ *
+ * @param lostage_work
+ */
+void rtdm_schedule_nrt_work(struct work_struct *lostage_work)
+{
+	spl_t s;
+
+	if (is_secondary_domain()) {
+		schedule_work(lostage_work);
+		return;
+	}
+
+	xnlock_get_irqsave(&nrt_work_lock, s);
+
+	list_add_tail(&lostage_work->entry, &nrt_work_list);
+	pipeline_post_inband_work(&nrt_work);
+
+	xnlock_put_irqrestore(&nrt_work_lock, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_schedule_nrt_work);
+
+/** @} Non-Real-Time Signalling Services */
+
+
+/**
+ * @ingroup rtdm_driver_interface
+ * @defgroup rtdm_util Utility Services
+ * @{
+ */
+
+struct mmap_tramp_data {
+	struct rtdm_fd *fd;
+	struct file_operations *fops;
+	int (*mmap_handler)(struct rtdm_fd *fd,
+			    struct vm_area_struct *vma);
+};
+
+struct mmap_helper_data {
+	void *src_vaddr;
+	phys_addr_t src_paddr;
+	struct vm_operations_struct *vm_ops;
+	void *vm_private_data;
+	struct mmap_tramp_data tramp_data;
+};
+
+static int mmap_kmem_helper(struct vm_area_struct *vma, void *va)
+{
+	unsigned long addr, len, pfn, to;
+	int ret = 0;
+
+	to = (unsigned long)va;
+	addr = vma->vm_start;
+	len = vma->vm_end - vma->vm_start;
+
+	if (to != PAGE_ALIGN(to) || (len & ~PAGE_MASK) != 0)
+		return -EINVAL;
+
+#ifndef CONFIG_MMU
+	pfn = __pa(to) >> PAGE_SHIFT;
+	ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED);
+#else
+	if (to < VMALLOC_START || to >= VMALLOC_END) {
+		/* logical address. */
+		pfn = __pa(to) >> PAGE_SHIFT;
+		ret = remap_pfn_range(vma, addr, pfn, len, PAGE_SHARED);
+		if (ret)
+			return ret;
+	} else {
+		/* vmalloc memory. */
+		while (len > 0) {
+			struct page *page = vmalloc_to_page((void *)to);
+			if (vm_insert_page(vma, addr, page))
+				return -EAGAIN;
+			addr += PAGE_SIZE;
+			to += PAGE_SIZE;
+			len -= PAGE_SIZE;
+		}
+	}
+
+	if (cobalt_machine.prefault)
+		cobalt_machine.prefault(vma);
+#endif
+
+	return ret;
+}
+
+static int mmap_iomem_helper(struct vm_area_struct *vma, phys_addr_t pa)
+{
+	pgprot_t prot = PAGE_SHARED;
+	unsigned long len;
+
+	len = vma->vm_end - vma->vm_start;
+#ifndef CONFIG_MMU
+	vma->vm_pgoff = pa >> PAGE_SHIFT;
+#endif /* CONFIG_MMU */
+
+#ifdef __HAVE_PHYS_MEM_ACCESS_PROT
+	if (vma->vm_file)
+		prot = phys_mem_access_prot(vma->vm_file, pa >> PAGE_SHIFT,
+					    len, prot);
+#endif
+	vma->vm_page_prot = pgprot_noncached(prot);
+
+	return remap_pfn_range(vma, vma->vm_start, pa >> PAGE_SHIFT,
+			       len, vma->vm_page_prot);
+}
+
+static int mmap_buffer_helper(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct mmap_tramp_data *tramp_data = vma->vm_private_data;
+	struct mmap_helper_data *helper_data;
+	int ret;
+
+	helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data);
+	vma->vm_ops = helper_data->vm_ops;
+	vma->vm_private_data = helper_data->vm_private_data;
+
+	if (helper_data->src_paddr)
+		ret = mmap_iomem_helper(vma, helper_data->src_paddr);
+	else
+		ret = mmap_kmem_helper(vma, helper_data->src_vaddr);
+
+	return ret;
+}
+
+static int mmap_trampoline(struct file *filp, struct vm_area_struct *vma)
+{
+	struct mmap_tramp_data *tramp_data = filp->private_data;
+	int ret;
+
+	vma->vm_private_data = tramp_data;
+
+	ret = tramp_data->mmap_handler(tramp_data->fd, vma);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+#ifndef CONFIG_MMU
+
+static unsigned long
+internal_get_unmapped_area(struct file *filp,
+			   unsigned long addr, unsigned long len,
+			   unsigned long pgoff, unsigned long flags)
+{
+	struct mmap_tramp_data *tramp_data = filp->private_data;
+	struct mmap_helper_data *helper_data;
+	unsigned long pa;
+
+	helper_data = container_of(tramp_data, struct mmap_helper_data, tramp_data);
+	pa = helper_data->src_paddr;
+	if (pa)
+		return (unsigned long)__va(pa);
+
+	return (unsigned long)helper_data->src_vaddr;
+}
+
+static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data,
+			size_t len, off_t offset, int prot, int flags,
+			void **pptr)
+{
+	const struct file_operations *old_fops;
+	unsigned long u_addr;
+	struct file *filp;
+
+	filp = filp_open("/dev/mem", O_RDWR, 0);
+	if (IS_ERR(filp))
+		return PTR_ERR(filp);
+
+	old_fops = filp->f_op;
+	filp->f_op = tramp_data->fops;
+	filp->private_data = tramp_data;
+	u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset);
+	filp_close(filp, current->files);
+	filp->f_op = old_fops;
+
+	if (IS_ERR_VALUE(u_addr))
+		return (int)u_addr;
+
+	*pptr = (void *)u_addr;
+
+	return 0;
+}
+
+#else /* CONFIG_MMU */
+
+static int do_rtdm_mmap(struct mmap_tramp_data *tramp_data,
+			size_t len, off_t offset, int prot, int flags,
+			void **pptr)
+{
+	unsigned long u_addr;
+	struct file *filp;
+
+	filp = anon_inode_getfile("[rtdm]", tramp_data->fops, tramp_data, O_RDWR);
+	if (IS_ERR(filp))
+		return PTR_ERR(filp);
+
+	u_addr = vm_mmap(filp, (unsigned long)*pptr, len, prot, flags, offset);
+	filp_close(filp, current->files);
+
+	if (IS_ERR_VALUE(u_addr))
+		return (int)u_addr;
+
+	*pptr = (void *)u_addr;
+
+	return 0;
+}
+
+#define internal_get_unmapped_area  NULL
+
+#endif /* CONFIG_MMU */
+
+static struct file_operations internal_mmap_fops = {
+	.mmap = mmap_trampoline,
+	.get_unmapped_area = internal_get_unmapped_area
+};
+
+static unsigned long
+driver_get_unmapped_area(struct file *filp,
+			 unsigned long addr, unsigned long len,
+			 unsigned long pgoff, unsigned long flags)
+{
+	struct mmap_tramp_data *tramp_data = filp->private_data;
+	struct rtdm_fd *fd = tramp_data->fd;
+
+	if (fd->ops->get_unmapped_area)
+		return fd->ops->get_unmapped_area(fd, len, pgoff, flags);
+
+#ifdef CONFIG_MMU
+	/* Run default handler. */
+	return current->mm->get_unmapped_area(filp, addr, len, pgoff, flags);
+#else
+	return -ENODEV;
+#endif
+}
+
+static struct file_operations driver_mmap_fops = {
+	.mmap = mmap_trampoline,
+	.get_unmapped_area = driver_get_unmapped_area
+};
+
+int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset,
+			  int prot, int flags, void **pptr)
+{
+	struct mmap_tramp_data tramp_data = {
+		.fd = fd,
+		.fops = &driver_mmap_fops,
+		.mmap_handler = fd->ops->mmap,
+	};
+
+#ifndef CONFIG_MMU
+	/*
+	 * XXX: A .get_unmapped_area handler must be provided in the
+	 * nommu case. We use this to force the memory management code
+	 * not to share VM regions for distinct areas to map to, as it
+	 * would otherwise do since all requests currently apply to
+	 * the same file (i.e. from /dev/mem, see do_mmap_pgoff() in
+	 * the nommu case).
+	 */
+	if (fd->ops->get_unmapped_area)
+		offset = fd->ops->get_unmapped_area(fd, len, 0, flags);
+#endif
+
+	return do_rtdm_mmap(&tramp_data, len, offset, prot, flags, pptr);
+}
+
+/**
+ * Map a kernel memory range into the address space of the user.
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] src_addr Kernel virtual address to be mapped
+ * @param[in] len Length of the memory range
+ * @param[in] prot Protection flags for the user's memory range, typically
+ * either PROT_READ or PROT_READ|PROT_WRITE
+ * @param[in,out] pptr Address of a pointer containing the desired user
+ * address or NULL on entry and the finally assigned address on return
+ * @param[in] vm_ops vm_operations to be executed on the vm_area of the
+ * user memory range or NULL
+ * @param[in] vm_private_data Private data to be stored in the vm_area,
+ * primarily useful for vm_operation handlers
+ *
+ * @return 0 on success, otherwise (most common values):
+ *
+ * - -EINVAL is returned if an invalid start address, size, or destination
+ * address was passed.
+ *
+ * - -ENOMEM is returned if there is insufficient free memory or the limit of
+ * memory mapping for the user process was reached.
+ *
+ * - -EAGAIN is returned if too much memory has been already locked by the
+ * user process.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @note This service only works on memory regions allocated via kmalloc() or
+ * vmalloc(). To map physical I/O memory to user-space use
+ * rtdm_iomap_to_user() instead.
+ *
+ * @note This service is provided only for use in .ioctl operation handlers.
+ * Otherwise RTDM drivers implementing a .mmap operation should use
+ * rtdm_mmap_kmem(), rtdm_mmap_vmem(), or rtdm_mmap_iomem().
+ *
+ * @note RTDM supports two models for unmapping the memory area:
+ * - manual unmapping via rtdm_munmap(), which may be issued from a
+ * driver in response to an IOCTL call, or by a call to the regular
+ * munmap() call from the application.
+ * - automatic unmapping, triggered by the termination of the process
+ *   which owns the mapping.
+ * To track the number of references pending on the resource mapped,
+ * the driver can pass the address of a close handler for the vm_area
+ * considered, in the @a vm_ops descriptor. See the relevant Linux
+ * kernel programming documentation (e.g. Linux Device Drivers book)
+ * on virtual memory management for details.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_to_user(struct rtdm_fd *fd,
+		      void *src_addr, size_t len,
+		      int prot, void **pptr,
+		      struct vm_operations_struct *vm_ops,
+		      void *vm_private_data)
+{
+	struct mmap_helper_data helper_data = {
+		.tramp_data = {
+			.fd = fd,
+			.fops = &internal_mmap_fops,
+			.mmap_handler = mmap_buffer_helper,
+		},
+		.src_vaddr = src_addr,
+		.src_paddr = 0,
+		.vm_ops = vm_ops,
+		.vm_private_data = vm_private_data
+	};
+
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_to_user);
+
+/**
+ * Map an I/O memory range into the address space of the user.
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] src_addr physical I/O address to be mapped
+ * @param[in] len Length of the memory range
+ * @param[in] prot Protection flags for the user's memory range, typically
+ * either PROT_READ or PROT_READ|PROT_WRITE
+ * @param[in,out] pptr Address of a pointer containing the desired user
+ * address or NULL on entry and the finally assigned address on return
+ * @param[in] vm_ops vm_operations to be executed on the vm_area of the
+ * user memory range or NULL
+ * @param[in] vm_private_data Private data to be stored in the vm_area,
+ * primarily useful for vm_operation handlers
+ *
+ * @return 0 on success, otherwise (most common values):
+ *
+ * - -EINVAL is returned if an invalid start address, size, or destination
+ * address was passed.
+ *
+ * - -ENOMEM is returned if there is insufficient free memory or the limit of
+ * memory mapping for the user process was reached.
+ *
+ * - -EAGAIN is returned if too much memory has been already locked by the
+ * user process.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @note RTDM supports two models for unmapping the memory area:
+ * - manual unmapping via rtdm_munmap(), which may be issued from a
+ * driver in response to an IOCTL call, or by a call to the regular
+ * munmap() call from the application.
+ * - automatic unmapping, triggered by the termination of the process
+ *   which owns the mapping.
+ * To track the number of references pending on the resource mapped,
+ * the driver can pass the address of a close handler for the vm_area
+ * considered, in the @a vm_ops descriptor. See the relevant Linux
+ * kernel programming documentation (e.g. Linux Device Drivers book)
+ * on virtual memory management for details.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_iomap_to_user(struct rtdm_fd *fd,
+		       phys_addr_t src_addr, size_t len,
+		       int prot, void **pptr,
+		       struct vm_operations_struct *vm_ops,
+		       void *vm_private_data)
+{
+	struct mmap_helper_data helper_data = {
+		.tramp_data = {
+			.fd = fd,
+			.fops = &internal_mmap_fops,
+			.mmap_handler = mmap_buffer_helper,
+		},
+		.src_vaddr = NULL,
+		.src_paddr = src_addr,
+		.vm_ops = vm_ops,
+		.vm_private_data = vm_private_data
+	};
+
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	return do_rtdm_mmap(&helper_data.tramp_data, len, 0, prot, MAP_SHARED, pptr);
+}
+EXPORT_SYMBOL_GPL(rtdm_iomap_to_user);
+
+/**
+ * Map a kernel logical memory range to a virtual user area.
+ *
+ * This routine is commonly used from a .mmap operation handler of a RTDM
+ * driver, for mapping a virtual memory area with a direct physical
+ * mapping over the user address space referred to by @a vma.
+ *
+ * @param[in] vma The VMA descriptor to receive the mapping.
+ * @param[in] va The kernel logical address to be mapped.
+ *
+ * @return 0 on success, otherwise a negated error code is returned.
+ *
+ * @note This service works on memory regions allocated via
+ * kmalloc(). To map a chunk of virtual space with no direct physical
+ * mapping, or a physical I/O memory to a VMA, call rtdm_mmap_vmem()
+ * or rtdm_mmap_iomem() respectively instead.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_kmem(struct vm_area_struct *vma, void *va)
+{
+	return mmap_kmem_helper(vma, va);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_kmem);
+
+/**
+ * Map a kernel virtual memory range to a virtual user area.
+ *
+ * This routine is commonly used from a .mmap operation handler of a RTDM
+ * driver, for mapping a kernel virtual memory area over the user
+ * address space referred to by @a vma.
+ *
+ * @param[in] vma The VMA descriptor to receive the mapping.
+ * @param[in] va The virtual address to be mapped.
+ *
+ * @return 0 on success, otherwise a negated error code is returned.
+ *
+ * @note This service works on memory regions allocated via
+ * vmalloc(). To map a chunk of logical space obtained from kmalloc(),
+ * or a physical I/O memory to a VMA, call rtdm_mmap_kmem() or
+ * rtdm_mmap_iomem() respectively instead.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_vmem(struct vm_area_struct *vma, void *va)
+{
+	/*
+	 * Our helper handles both of directly mapped to physical and
+	 * purely virtual memory ranges.
+	 */
+	return mmap_kmem_helper(vma, va);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_vmem);
+
+/**
+ * Map an I/O memory range to a virtual user area.
+ *
+ * This routine is commonly used from a .mmap operation handler of a RTDM
+ * driver, for mapping an I/O memory area over the user address space
+ * referred to by @a vma.
+ *
+ * @param[in] vma The VMA descriptor to receive the mapping.
+ * @param[in] pa The physical I/O address to be mapped.
+ *
+ * @return 0 on success, otherwise a negated error code is returned.
+ *
+ * @note To map a chunk of logical space obtained from kmalloc(), or a
+ * purely virtual area with no direct physical mapping to a VMA, call
+ * rtdm_mmap_kmem() or rtdm_mmap_vmem() respectively instead.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_mmap_iomem(struct vm_area_struct *vma, phys_addr_t pa)
+{
+	return mmap_iomem_helper(vma, pa);
+}
+EXPORT_SYMBOL_GPL(rtdm_mmap_iomem);
+
+/**
+ * Unmap a user memory range.
+ *
+ * @param[in] ptr User address or the memory range
+ * @param[in] len Length of the memory range
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EINVAL is returned if an invalid address or size was passed.
+ *
+ * - -EPERM @e may be returned if an illegal invocation environment is
+ * detected.
+ *
+ * @coretags{secondary-only}
+ */
+int rtdm_munmap(void *ptr, size_t len)
+{
+	if (!XENO_ASSERT(COBALT, xnsched_root_p()))
+		return -EPERM;
+
+	return vm_munmap((unsigned long)ptr, len);
+}
+EXPORT_SYMBOL_GPL(rtdm_munmap);
+
+int rtdm_get_iovec(struct rtdm_fd *fd, struct iovec **iovp,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast)
+{
+	size_t len = sizeof(struct iovec) * msg->msg_iovlen;
+	struct iovec *iov = iov_fast;
+
+	/*
+	 * If the I/O vector doesn't fit in the fast memory, allocate
+	 * a chunk from the system heap which is large enough to hold
+	 * it.
+	 */
+	if (msg->msg_iovlen > RTDM_IOV_FASTMAX) {
+		iov = xnmalloc(len);
+		if (iov == NULL)
+			return -ENOMEM;
+	}
+
+	*iovp = iov;
+
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(iov, msg->msg_iov, len);
+		return 0;
+	}
+
+	return rtdm_fd_get_iovec(fd, iov, msg, false);
+}
+EXPORT_SYMBOL_GPL(rtdm_get_iovec);
+
+int rtdm_put_iovec(struct rtdm_fd *fd, struct iovec *iov,
+		   const struct user_msghdr *msg,
+		   struct iovec *iov_fast)
+{
+	size_t len = sizeof(iov[0]) * msg->msg_iovlen;
+	int ret;
+
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(msg->msg_iov, iov, len);
+		ret = 0;
+	} else
+		ret = rtdm_fd_put_iovec(fd, iov, msg);
+
+	if (iov != iov_fast)
+		xnfree(iov);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_put_iovec);
+
+ssize_t rtdm_get_iov_flatlen(struct iovec *iov, int iovlen)
+{
+	ssize_t len;
+	int nvec;
+
+	/* Return the flattened vector length. */
+	for (len = 0, nvec = 0; nvec < iovlen; nvec++) {
+		ssize_t l = iov[nvec].iov_len;
+		if (l < 0 || len + l < len) /* SuS wants this. */
+			return -EINVAL;
+		len += l;
+	}
+
+	return len;
+}
+EXPORT_SYMBOL_GPL(rtdm_get_iov_flatlen);
+
+#ifdef DOXYGEN_CPP /* Only used for doxygen doc generation */
+
+/**
+ * Real-time safe rate-limited message printing on kernel console
+ *
+ * @param[in] format Format string (conforming standard @c printf())
+ * @param ... Arguments referred by @a format
+ *
+ * @return On success, this service returns the number of characters printed.
+ * Otherwise, a negative error code is returned.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_printk_ratelimited(const char *format, ...);
+
+/**
+ * Real-time safe message printing on kernel console
+ *
+ * @param[in] format Format string (conforming standard @c printf())
+ * @param ... Arguments referred by @a format
+ *
+ * @return On success, this service returns the number of characters printed.
+ * Otherwise, a negative error code is returned.
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_printk(const char *format, ...);
+
+/**
+ * Allocate memory block
+ *
+ * @param[in] size Requested size of the memory block
+ *
+ * @return The pointer to the allocated block is returned on success, NULL
+ * otherwise.
+ *
+ * @coretags{unrestricted}
+ */
+void *rtdm_malloc(size_t size);
+
+/**
+ * Release real-time memory block
+ *
+ * @param[in] ptr Pointer to memory block as returned by rtdm_malloc()
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_free(void *ptr);
+
+/**
+ * Check if read access to user-space memory block is safe
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] ptr Address of the user-provided memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return Non-zero is return when it is safe to read from the specified
+ * memory block, 0 otherwise.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_read_user_ok(struct rtdm_fd *fd, const void __user *ptr,
+		      size_t size);
+
+/**
+ * Check if read/write access to user-space memory block is safe
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] ptr Address of the user-provided memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return Non-zero is return when it is safe to read from or write to the
+ * specified memory block, 0 otherwise.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_rw_user_ok(struct rtdm_fd *fd, const void __user *ptr,
+		    size_t size);
+
+/**
+ * Copy user-space memory block to specified buffer
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Destination buffer address
+ * @param[in] src Address of the user-space memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note Before invoking this service, verify via rtdm_read_user_ok() that the
+ * provided user-space address can securely be accessed.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_copy_from_user(struct rtdm_fd *fd, void *dst,
+			const void __user *src, size_t size);
+
+/**
+ * Check if read access to user-space memory block and copy it to specified
+ * buffer
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Destination buffer address
+ * @param[in] src Address of the user-space memory block
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note This service is a combination of rtdm_read_user_ok and
+ * rtdm_copy_from_user.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_safe_copy_from_user(struct rtdm_fd *fd, void *dst,
+			     const void __user *src, size_t size);
+
+/**
+ * Copy specified buffer to user-space memory block
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Address of the user-space memory block
+ * @param[in] src Source buffer address
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note Before invoking this service, verify via rtdm_rw_user_ok() that the
+ * provided user-space address can securely be accessed.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_copy_to_user(struct rtdm_fd *fd, void __user *dst,
+		      const void *src, size_t size);
+
+/**
+ * Check if read/write access to user-space memory block is safe and copy
+ * specified buffer to it
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Address of the user-space memory block
+ * @param[in] src Source buffer address
+ * @param[in] size Size of the memory block
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note This service is a combination of rtdm_rw_user_ok and
+ * rtdm_copy_to_user.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_safe_copy_to_user(struct rtdm_fd *fd, void __user *dst,
+			   const void *src, size_t size);
+
+/**
+ * Copy user-space string to specified buffer
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ * @param[in] dst Destination buffer address
+ * @param[in] src Address of the user-space string
+ * @param[in] count Maximum number of bytes to copy, including the trailing
+ * '0'
+ *
+ * @return Length of the string on success (not including the trailing '0'),
+ * otherwise:
+ *
+ * - -EFAULT is returned if an invalid memory area was accessed.
+ *
+ * @note This services already includes a check of the source address,
+ * calling rtdm_read_user_ok() for @a src explicitly is not required.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_strncpy_from_user(struct rtdm_fd *fd, char *dst,
+			   const char __user *src, size_t count);
+
+/**
+ * Test if running in a real-time task
+ *
+ * @return Non-zero is returned if the caller resides in real-time context, 0
+ * otherwise.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_in_rt_context(void);
+
+/**
+ * Test if the caller is capable of running in real-time context
+ *
+ * @param[in] fd RTDM file descriptor as passed to the invoked
+ * device operation handler
+ *
+ * @return Non-zero is returned if the caller is able to execute in real-time
+ * context (independent of its current execution mode), 0 otherwise.
+ *
+ * @note This function can be used by drivers that provide different
+ * implementations for the same service depending on the execution mode of
+ * the caller. If a caller requests such a service in non-real-time context
+ * but is capable of running in real-time as well, it might be appropriate
+ * for the driver to reject the request via -ENOSYS so that RTDM can switch
+ * the caller and restart the request in real-time context.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_rt_capable(struct rtdm_fd *fd);
+
+/**
+ * Test if the real-time core is available
+ *
+ * @return True if the real-time is available, false if it is disabled or in
+ * error state.
+ *
+ * @note Drivers should query the core state during initialization if they
+ * perform hardware setup operations or interact with RTDM services such as
+ * locks prior to calling an RTDM service that has a built-in state check of
+ * the real-time core (e.g. rtdm_dev_register() or rtdm_task_init()).
+ *
+ * @coretags{unrestricted}
+ */
+bool rtdm_available(void);
+
+#endif /* DOXYGEN_CPP */
+
+/** @} Utility Services */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c
new file mode 100644
index 0000000..c7ceb76
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/fd.c
@@ -0,0 +1,1192 @@
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>
+ * Copyright (C) 2013,2014 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/list.h>
+#include <linux/err.h>
+#include <linux/slab.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/poll.h>
+#include <linux/kthread.h>
+#include <linux/fdtable.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/ppd.h>
+#include <cobalt/kernel/time.h>
+#include <pipeline/inband_work.h>
+#include <trace/events/cobalt-rtdm.h>
+#include <rtdm/compat.h>
+#include <rtdm/fd.h>
+#include "internal.h"
+#include "posix/process.h"
+#include "posix/syscall.h"
+#include "posix/clock.h"
+
+#define RTDM_SETFL_MASK (O_NONBLOCK)
+
+DEFINE_PRIVATE_XNLOCK(fdtree_lock);
+static LIST_HEAD(rtdm_fd_cleanup_queue);
+static struct semaphore rtdm_fd_cleanup_sem;
+
+struct rtdm_fd_index {
+	struct xnid id;
+	struct rtdm_fd *fd;
+};
+
+static int enosys(void)
+{
+	return -ENOSYS;
+}
+
+static int enotty(void)
+{
+	return -ENOTTY;
+}
+
+static int ebadf(void)
+{
+	return -EBADF;
+}
+
+static int enodev(void)
+{
+	return -ENODEV;
+}
+
+static inline struct rtdm_fd_index *
+fetch_fd_index(struct cobalt_ppd *p, int ufd)
+{
+	struct xnid *id = xnid_fetch(&p->fds, ufd);
+	if (id == NULL)
+		return NULL;
+
+	return container_of(id, struct rtdm_fd_index, id);
+}
+
+static struct rtdm_fd *fetch_fd(struct cobalt_ppd *p, int ufd)
+{
+	struct rtdm_fd_index *idx = fetch_fd_index(p, ufd);
+	if (idx == NULL)
+		return NULL;
+
+	return idx->fd;
+}
+
+#define assign_invalid_handler(__handler, __invalid)			\
+	do								\
+		(__handler) = (typeof(__handler))(void (*)(void))__invalid; \
+	while (0)
+
+/* Calling this handler should beget ENOSYS if not implemented. */
+#define assign_switch_handler(__handler)				\
+	do								\
+		if ((__handler) == NULL)				\
+			assign_invalid_handler(__handler, enosys);	\
+	while (0)
+
+#define assign_default_handler(__handler, __invalid)			\
+	do								\
+		if ((__handler) == NULL)				\
+			assign_invalid_handler(__handler, __invalid);	\
+	while (0)
+
+#define __rt(__handler)		__handler ## _rt
+#define __nrt(__handler)	__handler ## _nrt
+
+/*
+ * Install a placeholder returning EADV if none of the dual handlers
+ * are implemented, ENOSYS otherwise for NULL handlers to trigger the
+ * adaptive switch.
+ */
+#define assign_default_dual_handlers(__handler, __invalid_handler)	\
+	do								\
+		if (__rt(__handler) || __nrt(__handler)) {		\
+			assign_switch_handler(__rt(__handler));		\
+			assign_switch_handler(__nrt(__handler));	\
+		} else {						\
+			assign_invalid_handler(__rt(__handler),		\
+					       __invalid_handler);	\
+			assign_invalid_handler(__nrt(__handler),	\
+					       __invalid_handler);	\
+		}							\
+	while (0)
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+static inline void set_compat_bit(struct rtdm_fd *fd)
+{
+	struct pt_regs *regs;
+
+	if (cobalt_ppd_get(0) == &cobalt_kernel_ppd)
+		fd->compat = 0;
+	else {
+		regs = task_pt_regs(current);
+		XENO_BUG_ON(COBALT, !__xn_syscall_p(regs));
+		fd->compat = __COBALT_CALL_COMPAT(__xn_reg_sys(regs));
+	}
+}
+
+#else	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+static inline void set_compat_bit(struct rtdm_fd *fd)
+{
+}
+
+#endif	/* !CONFIG_XENO_ARCH_SYS3264 */
+
+int rtdm_fd_enter(struct rtdm_fd *fd, int ufd, unsigned int magic,
+		  struct rtdm_fd_ops *ops)
+{
+	struct cobalt_ppd *ppd;
+
+	secondary_mode_only();
+
+	if (magic == 0)
+		return -EINVAL;
+
+	assign_default_dual_handlers(ops->ioctl, enotty);
+	assign_default_dual_handlers(ops->read, ebadf);
+	assign_default_dual_handlers(ops->write, ebadf);
+	assign_default_dual_handlers(ops->recvmsg, ebadf);
+	assign_default_dual_handlers(ops->sendmsg, ebadf);
+	assign_default_handler(ops->select, ebadf);
+	assign_default_handler(ops->mmap, enodev);
+
+	ppd = cobalt_ppd_get(0);
+	fd->magic = magic;
+	fd->ops = ops;
+	fd->owner = ppd;
+	fd->ufd = ufd;
+	fd->refs = 1;
+	fd->stale = false;
+	set_compat_bit(fd);
+	INIT_LIST_HEAD(&fd->next);
+
+	return 0;
+}
+
+int rtdm_fd_register(struct rtdm_fd *fd, int ufd)
+{
+	struct rtdm_fd_index *idx;
+	struct cobalt_ppd *ppd;
+	spl_t s;
+	int ret = 0;
+
+	ppd = cobalt_ppd_get(0);
+	idx = kmalloc(sizeof(*idx), GFP_KERNEL);
+	if (idx == NULL)
+		return -ENOMEM;
+
+	idx->fd = fd;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	ret = xnid_enter(&ppd->fds, &idx->id, ufd);
+	xnlock_put_irqrestore(&fdtree_lock, s);
+	if (ret < 0) {
+		kfree(idx);
+		ret = -EBUSY;
+	}
+
+	return ret;
+}
+
+int rtdm_device_new_fd(struct rtdm_fd *fd, int ufd,
+			struct rtdm_device *device)
+{
+	spl_t s;
+	int ret;
+
+	ret = rtdm_fd_register(fd, ufd);
+	if (ret < 0)
+		return ret;
+
+	trace_cobalt_fd_created(fd, ufd);
+	xnlock_get_irqsave(&fdtree_lock, s);
+	list_add(&fd->next, &device->openfd_list);
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return 0;
+}
+
+/**
+ * @brief Retrieve and lock a RTDM file descriptor
+ *
+ * @param[in] ufd User-side file descriptor
+ * @param[in] magic Magic word for lookup validation
+ *
+ * @return Pointer to the RTDM file descriptor matching @a
+ * ufd. Otherwise:
+ *
+ * - ERR_PTR(-EADV) if the use-space handle is either invalid, or not
+ * managed by RTDM.
+ *
+ * - ERR_PTR(-EBADF) if the underlying device is being torn down at
+ * the time of the call.
+ *
+ * @note The file descriptor returned must be later released by a call
+ * to rtdm_fd_put().
+ *
+ * @coretags{unrestricted}
+ */
+struct rtdm_fd *rtdm_fd_get(int ufd, unsigned int magic)
+{
+	struct cobalt_ppd *p = cobalt_ppd_get(0);
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	fd = fetch_fd(p, ufd);
+	if (fd == NULL || (magic != 0 && fd->magic != magic)) {
+		fd = ERR_PTR(-EADV);
+		goto out;
+	}
+
+	if (fd->stale) {
+		fd = ERR_PTR(-EBADF);
+		goto out;
+	}
+
+	++fd->refs;
+out:
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return fd;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_get);
+
+static int fd_cleanup_thread(void *data)
+{
+	struct rtdm_fd *fd;
+	int err;
+	spl_t s;
+
+	for (;;) {
+		set_cpus_allowed_ptr(current, cpu_online_mask);
+
+		do {
+			err = down_interruptible(&rtdm_fd_cleanup_sem);
+			if (kthread_should_stop())
+				return 0;
+		} while (err);
+
+		xnlock_get_irqsave(&fdtree_lock, s);
+		fd = list_first_entry(&rtdm_fd_cleanup_queue,
+				struct rtdm_fd, cleanup);
+		list_del(&fd->cleanup);
+		xnlock_put_irqrestore(&fdtree_lock, s);
+
+		fd->ops->close(fd);
+	}
+
+	return 0;
+}
+
+static void lostage_trigger_close(struct pipeline_inband_work *inband_work)
+{
+	up(&rtdm_fd_cleanup_sem);
+}
+
+static struct lostage_trigger_close {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+} fd_closework =  {
+	.inband_work = PIPELINE_INBAND_WORK_INITIALIZER(fd_closework,
+						lostage_trigger_close),
+};
+
+static void __put_fd(struct rtdm_fd *fd, spl_t s)
+{
+	bool destroy, trigger;
+
+	XENO_WARN_ON(COBALT, fd->refs <= 0);
+	destroy = --fd->refs == 0;
+	if (destroy && !list_empty(&fd->next))
+		list_del_init(&fd->next);
+
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	if (!destroy)
+		return;
+
+	if (is_secondary_domain())
+		fd->ops->close(fd);
+	else {
+		xnlock_get_irqsave(&fdtree_lock, s);
+		trigger = list_empty(&rtdm_fd_cleanup_queue);
+		list_add_tail(&fd->cleanup, &rtdm_fd_cleanup_queue);
+		xnlock_put_irqrestore(&fdtree_lock, s);
+
+		if (trigger)
+			pipeline_post_inband_work(&fd_closework);
+	}
+}
+
+void rtdm_device_flush_fds(struct rtdm_device *dev)
+{
+	struct rtdm_driver *drv = dev->driver;
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+
+	while (!list_empty(&dev->openfd_list)) {
+		fd = list_get_entry_init(&dev->openfd_list, struct rtdm_fd, next);
+		fd->stale = true;
+		if (drv->ops.close) {
+			rtdm_fd_get_light(fd);
+			xnlock_put_irqrestore(&fdtree_lock, s);
+			drv->ops.close(fd);
+			rtdm_fd_put(fd);
+			xnlock_get_irqsave(&fdtree_lock, s);
+		}
+	}
+
+	xnlock_put_irqrestore(&fdtree_lock, s);
+}
+
+/**
+ * @brief Release a RTDM file descriptor obtained via rtdm_fd_get()
+ *
+ * @param[in] fd RTDM file descriptor to release
+ *
+ * @note Every call to rtdm_fd_get() must be matched by a call to
+ * rtdm_fd_put().
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_fd_put(struct rtdm_fd *fd)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	__put_fd(fd, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_put);
+
+/**
+ * @brief Hold a reference on a RTDM file descriptor
+ *
+ * @param[in] fd Target file descriptor
+ *
+ * @note rtdm_fd_lock() increments the reference counter of @a fd. You
+ * only need to call this function in special scenarios, e.g. when
+ * keeping additional references to the file descriptor that have
+ * different lifetimes. Only use rtdm_fd_lock() on descriptors that
+ * are currently locked via an earlier rtdm_fd_get()/rtdm_fd_lock() or
+ * while running a device operation handler.
+ *
+ * @coretags{unrestricted}
+ */
+int rtdm_fd_lock(struct rtdm_fd *fd)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	if (fd->refs == 0) {
+		xnlock_put_irqrestore(&fdtree_lock, s);
+		return -EIDRM;
+	}
+	++fd->refs;
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_lock);
+
+/**
+ * @brief Drop a reference on a RTDM file descriptor
+ *
+ * @param[in] fd Target file descriptor
+ *
+ * @note Every call to rtdm_fd_lock() must be matched by a call to
+ * rtdm_fd_unlock().
+ *
+ * @coretags{unrestricted}
+ */
+void rtdm_fd_unlock(struct rtdm_fd *fd)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	__put_fd(fd, s);
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_unlock);
+
+int rtdm_fd_fcntl(int ufd, int cmd, ...)
+{
+	struct rtdm_fd *fd;
+	va_list ap;
+	long arg;
+	int ret;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd))
+		return PTR_ERR(fd);
+
+	va_start(ap, cmd);
+	arg = va_arg(ap, long);
+	va_end(ap);
+
+	switch (cmd) {
+	case F_GETFL:
+		ret = fd->oflags;
+		break;
+	case F_SETFL:
+		fd->oflags = (fd->oflags & ~RTDM_SETFL_MASK) |
+			(arg & RTDM_SETFL_MASK);
+		ret = 0;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	rtdm_fd_put(fd);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_fcntl);
+
+static struct rtdm_fd *get_fd_fixup_mode(int ufd)
+{
+	struct xnthread *thread;
+	struct rtdm_fd *fd;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd))
+		return fd;
+
+	/*
+	 * Mode is selected according to the following convention:
+	 *
+	 * - Cobalt threads must try running the syscall from primary
+	 * mode as a first attempt, regardless of their scheduling
+	 * class. The driver handler may ask for demoting the caller
+	 * to secondary mode by returning -ENOSYS.
+	 *
+	 * - Regular threads (i.e. not bound to Cobalt) may only run
+	 * the syscall from secondary mode.
+	 */
+	thread = xnthread_current();
+	if (unlikely(is_secondary_domain())) {
+		if (thread == NULL ||
+		    xnthread_test_localinfo(thread, XNDESCENT))
+			return fd;
+	} else if (likely(thread))
+		return fd;
+
+	/*
+	 * We need to switch to the converse mode. Since all callers
+	 * bear the "adaptive" tag, we just pass -ENOSYS back to the
+	 * syscall dispatcher to get switched to the next mode.
+	 */
+	rtdm_fd_put(fd);
+
+	return ERR_PTR(-ENOSYS);
+}
+
+int rtdm_fd_ioctl(int ufd, unsigned int request, ...)
+{
+	struct rtdm_fd *fd;
+	void __user *arg;
+	va_list args;
+	int err, ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		err = PTR_ERR(fd);
+		goto out;
+	}
+
+	va_start(args, request);
+	arg = va_arg(args, void __user *);
+	va_end(args);
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_ioctl(current, fd, ufd, request);
+
+	if (is_secondary_domain())
+		err = fd->ops->ioctl_nrt(fd, request, arg);
+	else
+		err = fd->ops->ioctl_rt(fd, request, arg);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	if (err < 0) {
+		ret = __rtdm_dev_ioctl_core(fd, request, arg);
+		if (ret != -EADV)
+			err = ret;
+	}
+
+	rtdm_fd_put(fd);
+  out:
+	if (err < 0)
+		trace_cobalt_fd_ioctl_status(current, fd, ufd, err);
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_ioctl);
+
+ssize_t
+rtdm_fd_read(int ufd, void __user *buf, size_t size)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_read(current, fd, ufd, size);
+
+	if (is_secondary_domain())
+		ret = fd->ops->read_nrt(fd, buf, size);
+	else
+		ret = fd->ops->read_rt(fd, buf, size);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		    splnone();
+
+	rtdm_fd_put(fd);
+
+  out:
+	if (ret < 0)
+		trace_cobalt_fd_read_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_read);
+
+ssize_t rtdm_fd_write(int ufd, const void __user *buf, size_t size)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_write(current, fd, ufd, size);
+
+	if (is_secondary_domain())
+		ret = fd->ops->write_nrt(fd, buf, size);
+	else
+		ret = fd->ops->write_rt(fd, buf, size);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+
+  out:
+	if (ret < 0)
+		trace_cobalt_fd_write_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_write);
+
+ssize_t rtdm_fd_recvmsg(int ufd, struct user_msghdr *msg, int flags)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_recvmsg(current, fd, ufd, flags);
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	if (is_secondary_domain())
+		ret = fd->ops->recvmsg_nrt(fd, msg, flags);
+	else
+		ret = fd->ops->recvmsg_rt(fd, msg, flags);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+out:
+	if (ret < 0)
+		trace_cobalt_fd_recvmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_recvmsg);
+
+struct cobalt_recvmmsg_timer {
+	struct xntimer timer;
+	struct xnthread *waiter;
+};
+
+static void recvmmsg_timeout_handler(struct xntimer *timer)
+{
+	struct cobalt_recvmmsg_timer *rq;
+
+	rq = container_of(timer, struct cobalt_recvmmsg_timer, timer);
+	xnthread_set_info(rq->waiter, XNTIMEO);
+	xnthread_resume(rq->waiter, XNDELAY);
+}
+
+int __rtdm_fd_recvmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags, void __user *u_timeout,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg),
+		       int (*get_timespec)(struct timespec64 *ts, const void __user *u_ts))
+{
+	struct cobalt_recvmmsg_timer rq;
+	xntmode_t tmode = XN_RELATIVE;
+	struct timespec64 ts = { 0 };
+	int ret = 0, datagrams = 0;
+	xnticks_t timeout = 0;
+	struct mmsghdr mmsg;
+	struct rtdm_fd *fd;
+	void __user *u_p;
+	ssize_t len;
+	spl_t s;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_recvmmsg(current, fd, ufd, flags);
+
+	if (u_timeout) {
+		ret = get_timespec(&ts, u_timeout);
+		if (ret)
+			goto fail;
+
+		if (!timespec64_valid(&ts)) {
+			ret = -EINVAL;
+			goto fail;
+		}
+
+		tmode = XN_ABSOLUTE;
+		timeout = ts2ns(&ts);
+		if (timeout == 0)
+			flags |= MSG_DONTWAIT;
+		else {
+			timeout += xnclock_read_monotonic(&nkclock);
+			rq.waiter = xnthread_current();
+			xntimer_init(&rq.timer, &nkclock,
+				     recvmmsg_timeout_handler,
+				     NULL, XNTIMER_IGRAVITY);
+			xnlock_get_irqsave(&nklock, s);
+			ret = xntimer_start(&rq.timer, timeout,
+					    XN_INFINITE, tmode);
+			xnlock_put_irqrestore(&nklock, s);
+		}
+	}
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	for (u_p = u_msgvec; vlen > 0; vlen--) {
+		ret = get_mmsg(&mmsg, u_p);
+		if (ret)
+			break;
+		len = fd->ops->recvmsg_rt(fd, &mmsg.msg_hdr, flags);
+		if (len < 0) {
+			ret = len;
+			break;
+		}
+		mmsg.msg_len = (unsigned int)len;
+		ret = put_mmsg(&u_p, &mmsg);
+		if (ret)
+			break;
+		datagrams++;
+		/* OOB data requires immediate handling. */
+		if (mmsg.msg_hdr.msg_flags & MSG_OOB)
+			break;
+		if (flags & MSG_WAITFORONE)
+			flags |= MSG_DONTWAIT;
+	}
+
+	if (timeout) {
+		xnlock_get_irqsave(&nklock, s);
+		xntimer_destroy(&rq.timer);
+		xnlock_put_irqrestore(&nklock, s);
+	}
+
+fail:
+	rtdm_fd_put(fd);
+
+	if (datagrams > 0)
+		ret = datagrams;
+
+out:
+	trace_cobalt_fd_recvmmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+
+static inline int __rtdm_fetch_timeout64(struct timespec64 *ts,
+					 const void __user *u_ts)
+{
+	return u_ts == NULL ? -EFAULT : cobalt_get_timespec64(ts, u_ts);
+}
+
+int __rtdm_fd_recvmmsg64(int ufd, void __user *u_msgvec, unsigned int vlen,
+			 unsigned int flags, void __user *u_timeout,
+			 int (*get_mmsg)(struct mmsghdr *mmsg,
+					 void __user *u_mmsg),
+			 int (*put_mmsg)(void __user **u_mmsg_p,
+					 const struct mmsghdr *mmsg))
+{
+	return __rtdm_fd_recvmmsg(ufd, u_msgvec, vlen, flags, u_timeout,
+				  get_mmsg, put_mmsg, __rtdm_fetch_timeout64);
+}
+
+
+ssize_t rtdm_fd_sendmsg(int ufd, const struct user_msghdr *msg, int flags)
+{
+	struct rtdm_fd *fd;
+	ssize_t ret;
+
+	fd = get_fd_fixup_mode(ufd);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_sendmsg(current, fd, ufd, flags);
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	if (is_secondary_domain())
+		ret = fd->ops->sendmsg_nrt(fd, msg, flags);
+	else
+		ret = fd->ops->sendmsg_rt(fd, msg, flags);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+out:
+	if (ret < 0)
+		trace_cobalt_fd_sendmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_sendmsg);
+
+int __rtdm_fd_sendmmsg(int ufd, void __user *u_msgvec, unsigned int vlen,
+		       unsigned int flags,
+		       int (*get_mmsg)(struct mmsghdr *mmsg, void __user *u_mmsg),
+		       int (*put_mmsg)(void __user **u_mmsg_p, const struct mmsghdr *mmsg))
+{
+	int ret = 0, datagrams = 0;
+	struct mmsghdr mmsg;
+	struct rtdm_fd *fd;
+	void __user *u_p;
+	ssize_t len;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_sendmmsg(current, fd, ufd, flags);
+
+	if (fd->oflags & O_NONBLOCK)
+		flags |= MSG_DONTWAIT;
+
+	for (u_p = u_msgvec; vlen > 0; vlen--) {
+		ret = get_mmsg(&mmsg, u_p);
+		if (ret)
+			break;
+		len = fd->ops->sendmsg_rt(fd, &mmsg.msg_hdr, flags);
+		if (len < 0) {
+			ret = len;
+			break;
+		}
+		mmsg.msg_len = (unsigned int)len;
+		ret = put_mmsg(&u_p, &mmsg);
+		if (ret)
+			break;
+		datagrams++;
+	}
+
+	rtdm_fd_put(fd);
+
+	if (datagrams > 0)
+		ret = datagrams;
+
+out:
+	trace_cobalt_fd_sendmmsg_status(current, fd, ufd, ret);
+
+	return ret;
+}
+
+static void
+__fd_close(struct cobalt_ppd *p, struct rtdm_fd_index *idx, spl_t s)
+{
+	xnid_remove(&p->fds, &idx->id);
+	__put_fd(idx->fd, s);
+
+	kfree(idx);
+}
+
+int rtdm_fd_close(int ufd, unsigned int magic)
+{
+	struct rtdm_fd_index *idx;
+	struct cobalt_ppd *ppd;
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	secondary_mode_only();
+
+	ppd = cobalt_ppd_get(0);
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	idx = fetch_fd_index(ppd, ufd);
+	if (idx == NULL)
+		goto eadv;
+
+	fd = idx->fd;
+	if (magic != 0 && fd->magic != magic) {
+eadv:
+		xnlock_put_irqrestore(&fdtree_lock, s);
+		return -EADV;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_close(current, fd, ufd, fd->refs);
+
+	/*
+	 * In dual kernel mode, the linux-side fdtable and the RTDM
+	 * ->close() handler are asynchronously managed, i.e.  the
+	 * handler execution may be deferred after the regular file
+	 * descriptor was removed from the fdtable if some refs on
+	 * rtdm_fd are still pending.
+	 */
+	__fd_close(ppd, idx, s);
+	close_fd(ufd);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_close);
+
+int rtdm_fd_mmap(int ufd, struct _rtdm_mmap_request *rma,
+		 void **u_addrp)
+{
+	struct rtdm_fd *fd;
+	int ret;
+
+	secondary_mode_only();
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd)) {
+		ret = PTR_ERR(fd);
+		goto out;
+	}
+
+	set_compat_bit(fd);
+
+	trace_cobalt_fd_mmap(current, fd, ufd, rma);
+
+	if (rma->flags & (MAP_FIXED|MAP_ANONYMOUS)) {
+		ret = -EADV;
+		goto unlock;
+	}
+
+	ret = __rtdm_mmap_from_fdop(fd, rma->length, rma->offset,
+				    rma->prot, rma->flags, u_addrp);
+unlock:
+	rtdm_fd_put(fd);
+out:
+	if (ret)
+		trace_cobalt_fd_mmap_status(current, fd, ufd, ret);
+
+	return ret;
+}
+
+int rtdm_fd_valid_p(int ufd)
+{
+	struct rtdm_fd *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&fdtree_lock, s);
+	fd = fetch_fd(cobalt_ppd_get(0), ufd);
+	xnlock_put_irqrestore(&fdtree_lock, s);
+
+	return fd != NULL;
+}
+
+/**
+ * @brief Bind a selector to specified event types of a given file descriptor
+ * @internal
+ *
+ * This function is invoked by higher RTOS layers implementing select-like
+ * services. It shall not be called directly by RTDM drivers.
+ *
+ * @param[in] ufd User-side file descriptor to bind to
+ * @param[in,out] selector Selector object that shall be bound to the given
+ * event
+ * @param[in] type Event type the caller is interested in
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - -EADV is returned if the file descriptor @a ufd cannot be resolved.
+ *
+ * - -EBADF is returned if the underlying device is being torn down at the time
+ *   of the call.
+ *
+ * - -EINVAL is returned if @a type is invalid.
+ *
+ * @coretags{task-unrestricted}
+ */
+int rtdm_fd_select(int ufd, struct xnselector *selector,
+		   unsigned int type)
+{
+	struct rtdm_fd *fd;
+	int ret;
+
+	fd = rtdm_fd_get(ufd, 0);
+	if (IS_ERR(fd))
+		return PTR_ERR(fd);
+
+	set_compat_bit(fd);
+
+	ret = fd->ops->select(fd, selector, type, ufd);
+
+	if (!XENO_ASSERT(COBALT, !spltest()))
+		splnone();
+
+	rtdm_fd_put(fd);
+
+	return ret;
+}
+
+int rtdm_fd_get_setsockaddr_args(struct rtdm_fd *fd,
+				 struct _rtdm_setsockaddr_args *dst,
+				 const void *src)
+{
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_setsockaddr_args cargs;
+		int ret;
+
+		if (!rtdm_read_user_ok(fd, src, sizeof(cargs)))
+			return -EFAULT;
+
+		ret = rtdm_copy_from_user(fd, &cargs, src, sizeof(cargs));
+		if (ret)
+			return ret;
+
+		dst->addr = compat_ptr(cargs.addr);
+		dst->addrlen = cargs.addrlen;
+
+		return 0;
+	}
+#endif
+
+	if (!rtdm_read_user_ok(fd, src, sizeof(*dst)))
+		return -EFAULT;
+
+	return rtdm_copy_from_user(fd, dst, src, sizeof(*dst));
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_get_setsockaddr_args);
+
+int rtdm_fd_get_setsockopt_args(struct rtdm_fd *fd,
+				struct _rtdm_setsockopt_args *dst,
+				const void *src)
+{
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_setsockopt_args cargs;
+		int ret;
+
+		if (!rtdm_read_user_ok(fd, src, sizeof(cargs)))
+			return -EFAULT;
+
+		ret = rtdm_copy_from_user(fd, &cargs, src, sizeof(cargs));
+		if (ret)
+			return ret;
+
+		dst->optlen = cargs.optlen;
+		dst->optval = compat_ptr(cargs.optval);
+		dst->optname = cargs.optname;
+		dst->level = cargs.level;
+
+		return 0;
+	}
+#endif
+
+	if (!rtdm_read_user_ok(fd, src, sizeof(*dst)))
+		return -EFAULT;
+
+	return rtdm_copy_from_user(fd, dst, src, sizeof(*dst));
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_get_setsockopt_args);
+
+int rtdm_fd_get_iovec(struct rtdm_fd *fd, struct iovec *iov,
+		      const struct user_msghdr *msg, bool rw)
+{
+	size_t sz;
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	sz = rtdm_fd_is_compat(fd) ? sizeof(struct compat_iovec) : sizeof(*iov);
+#else
+	sz = sizeof(*iov);
+#endif
+
+	sz *= msg->msg_iovlen;
+
+	if (!rw && !rtdm_read_user_ok(fd, msg->msg_iov, sz))
+		return -EFAULT;
+
+	if (rw && !rtdm_rw_user_ok(fd, msg->msg_iov, sz))
+		return -EFAULT;
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd))
+		return sys32_get_iovec(
+			iov, (struct compat_iovec __user *)msg->msg_iov,
+			(int)msg->msg_iovlen);
+#endif
+
+	return rtdm_copy_from_user(fd, iov, msg->msg_iov, sz);
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_get_iovec);
+
+int rtdm_fd_put_iovec(struct rtdm_fd *fd, const struct iovec *iov,
+		      const struct user_msghdr *msg)
+{
+	size_t sz;
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	sz = rtdm_fd_is_compat(fd) ? sizeof(struct compat_iovec) : sizeof(*iov);
+#else
+	sz = sizeof(*iov);
+#endif
+
+	sz *= msg->msg_iovlen;
+
+	if (!rtdm_rw_user_ok(fd, msg->msg_iov, sz))
+		return -EFAULT;
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd))
+		return sys32_put_iovec(
+			(struct compat_iovec __user *)msg->msg_iov, iov,
+			(int)msg->msg_iovlen);
+#endif
+
+	return rtdm_copy_to_user(fd, msg->msg_iov, iov, sz);
+}
+EXPORT_SYMBOL_GPL(rtdm_fd_put_iovec);
+
+static void destroy_fd(void *cookie, struct xnid *id)
+{
+	struct cobalt_ppd *p = cookie;
+	struct rtdm_fd_index *idx;
+	spl_t s;
+
+	idx = container_of(id, struct rtdm_fd_index, id);
+	xnlock_get_irqsave(&fdtree_lock, s);
+	__fd_close(p, idx, 0);
+}
+
+void rtdm_fd_cleanup(struct cobalt_ppd *p)
+{
+	/*
+	 * This is called on behalf of a (userland) task exit handler,
+	 * so we don't have to deal with the regular file descriptors,
+	 * we only have to empty our own index.
+	 */
+	xntree_cleanup(&p->fds, p, destroy_fd);
+}
+
+void rtdm_fd_init(void)
+{
+	sema_init(&rtdm_fd_cleanup_sem, 0);
+	kthread_run(fd_cleanup_thread, NULL, "rtdm_fd");
+}
+
+static inline void warn_user(struct file *file, const char *call)
+{
+	struct dentry *dentry = file->f_path.dentry;
+	
+	printk(XENO_WARNING
+	       "%s[%d] called regular %s() on /dev/rtdm/%s\n",
+	       current->comm, task_pid_nr(current), call + 5, dentry->d_name.name);
+}
+
+static ssize_t dumb_read(struct file *file, char  __user *buf,
+			 size_t count, loff_t __user *ppos)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+static ssize_t dumb_write(struct file *file,  const char __user *buf,
+			  size_t count, loff_t __user *ppos)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+static unsigned int dumb_poll(struct file *file, poll_table *pt)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+static long dumb_ioctl(struct file *file, unsigned int cmd,
+		       unsigned long arg)
+{
+	warn_user(file, __func__);
+	return -EINVAL;
+}
+
+const struct file_operations rtdm_dumb_fops = {
+	.read		= dumb_read,
+	.write		= dumb_write,
+	.poll		= dumb_poll,
+	.unlocked_ioctl	= dumb_ioctl,
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h
new file mode 100644
index 0000000..99488e0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/internal.h
@@ -0,0 +1,64 @@
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2005 Joerg Langenberg <joerg.langenberg@gmx.net>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef _RTDM_INTERNAL_H
+#define _RTDM_INTERNAL_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/sem.h>
+#include <linux/file.h>
+#include <linux/atomic.h>
+#include <cobalt/kernel/tree.h>
+#include <cobalt/kernel/lock.h>
+#include <rtdm/driver.h>
+
+static inline void __rtdm_get_device(struct rtdm_device *device)
+{
+	atomic_inc(&device->refcount);
+}
+
+void __rtdm_put_device(struct rtdm_device *device);
+
+struct rtdm_device *__rtdm_get_namedev(const char *path);
+
+struct rtdm_device *__rtdm_get_protodev(int protocol_family,
+					int socket_type);
+
+void __rtdm_dev_close(struct rtdm_fd *fd);
+
+int __rtdm_dev_ioctl_core(struct rtdm_fd *fd,
+			  unsigned int request, void __user *arg);
+
+int __rtdm_mmap_from_fdop(struct rtdm_fd *fd, size_t len, off_t offset,
+			  int prot, int flags, void **pptr);
+
+/* nklock held, irqs off. */
+static inline void rtdm_fd_get_light(struct rtdm_fd *fd)
+{
+	++fd->refs;
+}
+
+int rtdm_init(void);
+
+void rtdm_cleanup(void);
+
+extern const struct file_operations rtdm_dumb_fops;
+
+#endif /* _RTDM_INTERNAL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c
new file mode 100644
index 0000000..d35bb3b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/rtdm/wrappers.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (c) 2013  Hauke Mehrtens <hauke@hauke-m.de>
+ * Copyright (c) 2013  Hannes Frederic Sowa <hannes@stressinduktion.org>
+ * Copyright (c) 2014  Luis R. Rodriguez <mcgrof@do-not-panic.com>
+ *
+ * Backport functionality introduced in Linux 3.13.
+ *
+ * Copyright (c) 2014  Hauke Mehrtens <hauke@hauke-m.de>
+ *
+ * Backport functionality introduced in Linux 3.14.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <linux/version.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/hwmon.h>
+#include <asm/xenomai/wrappers.h>
+
+/*
+ * Same rules as kernel/cobalt/include/asm-generic/xenomai/wrappers.h
+ * apply to reduce #ifdefery.
+ */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,14,0)
+#ifdef CONFIG_PCI_MSI
+int pci_enable_msix_range(struct pci_dev *dev,
+			struct msix_entry *entries,
+			int minvec, int maxvec)
+{
+	int nvec = maxvec;
+	int rc;
+
+	if (maxvec < minvec)
+		return -ERANGE;
+
+	do {
+		rc = pci_enable_msix(dev, entries, nvec);
+		if (rc < 0) {
+			return rc;
+		} else if (rc > 0) {
+			if (rc < minvec)
+				return -ENOSPC;
+			nvec = rc;
+		}
+	} while (rc);
+
+	return nvec;
+}
+EXPORT_SYMBOL(pci_enable_msix_range);
+#endif
+#endif /* < 3.14 */
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(3,13,0)
+#ifdef CONFIG_HWMON
+struct device*
+hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups)
+{
+	struct device *hwdev;
+
+	hwdev = hwmon_device_register(dev);
+	hwdev->groups = groups;
+	dev_set_drvdata(hwdev, drvdata);
+	return hwdev;
+}
+
+static void devm_hwmon_release(struct device *dev, void *res)
+{
+	struct device *hwdev = *(struct device **)res;
+
+	hwmon_device_unregister(hwdev);
+}
+
+struct device *
+devm_hwmon_device_register_with_groups(struct device *dev, const char *name,
+				void *drvdata,
+				const struct attribute_group **groups)
+{
+	struct device **ptr, *hwdev;
+
+	if (!dev)
+		return ERR_PTR(-EINVAL);
+
+	ptr = devres_alloc(devm_hwmon_release, sizeof(*ptr), GFP_KERNEL);
+	if (!ptr)
+		return ERR_PTR(-ENOMEM);
+
+	hwdev = hwmon_device_register_with_groups(dev, name, drvdata, groups);
+	if (IS_ERR(hwdev))
+		goto error;
+
+	*ptr = hwdev;
+	devres_add(dev, ptr);
+	return hwdev;
+
+error:
+	devres_free(ptr);
+	return hwdev;
+}
+EXPORT_SYMBOL_GPL(devm_hwmon_device_register_with_groups);
+#endif
+#endif /* < 3.13 */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c
new file mode 100644
index 0000000..e679982
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-idle.c
@@ -0,0 +1,67 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+
+static struct xnthread *xnsched_idle_pick(struct xnsched *sched)
+{
+	return &sched->rootcb;
+}
+
+static bool xnsched_idle_setparam(struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	return __xnsched_idle_setparam(thread, p);
+}
+
+static void xnsched_idle_getparam(struct xnthread *thread,
+				  union xnsched_policy_param *p)
+{
+	__xnsched_idle_getparam(thread, p);
+}
+
+static void xnsched_idle_trackprio(struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	__xnsched_idle_trackprio(thread, p);
+}
+
+static void xnsched_idle_protectprio(struct xnthread *thread, int prio)
+{
+	__xnsched_idle_protectprio(thread, prio);
+}
+
+struct xnsched_class xnsched_class_idle = {
+	.sched_init		=	NULL,
+	.sched_enqueue		=	NULL,
+	.sched_dequeue		=	NULL,
+	.sched_requeue		=	NULL,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_forget		=	NULL,
+	.sched_kick		=	NULL,
+	.sched_declare		=	NULL,
+	.sched_pick		=	xnsched_idle_pick,
+	.sched_setparam		=	xnsched_idle_setparam,
+	.sched_getparam		=	xnsched_idle_getparam,
+	.sched_trackprio	=	xnsched_idle_trackprio,
+	.sched_protectprio	=	xnsched_idle_protectprio,
+	.weight			=	XNSCHED_CLASS_WEIGHT(0),
+	.policy			=	SCHED_IDLE,
+	.name			=	"idle"
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c
new file mode 100644
index 0000000..4c3383b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-quota.c
@@ -0,0 +1,835 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/bitmap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/uapi/sched.h>
+#include <trace/events/cobalt-core.h>
+
+/*
+ * With this policy, each per-CPU scheduler slot maintains a list of
+ * active thread groups, picking from the sched_rt runqueue.
+ *
+ * Each time a thread is picked from the runqueue, we check whether we
+ * still have budget for running it, looking at the group it belongs
+ * to. If so, a timer is armed to elapse when that group has no more
+ * budget, would the incoming thread run unpreempted until then
+ * (i.e. xnsched_quota->limit_timer).
+ *
+ * Otherwise, if no budget remains in the group for running the
+ * candidate thread, we move the latter to a local expiry queue
+ * maintained by the group. This process is done on the fly as we pull
+ * from the runqueue.
+ *
+ * Updating the remaining budget is done each time the Cobalt core
+ * asks for replacing the current thread with the next runnable one,
+ * i.e. xnsched_quota_pick(). There we charge the elapsed run time of
+ * the outgoing thread to the relevant group, and conversely, we check
+ * whether the incoming thread has budget.
+ *
+ * Finally, a per-CPU timer (xnsched_quota->refill_timer) periodically
+ * ticks in the background, in accordance to the defined quota
+ * interval. Thread group budgets get replenished by its handler in
+ * accordance to their respective share, pushing all expired threads
+ * back to the run queue in the same move.
+ *
+ * NOTE: since the core logic enforcing the budget entirely happens in
+ * xnsched_quota_pick(), applying a budget change can be done as
+ * simply as forcing the rescheduling procedure to be invoked asap. As
+ * a result of this, the Cobalt core will ask for the next thread to
+ * run, which means calling xnsched_quota_pick() eventually.
+ *
+ * CAUTION: xnsched_quota_group->nr_active does count both the threads
+ * from that group linked to the sched_rt runqueue, _and_ the threads
+ * moved to the local expiry queue. As a matter of fact, the expired
+ * threads - those for which we consumed all the per-group budget -
+ * are still seen as runnable (i.e. not blocked/suspended) by the
+ * Cobalt core. This only means that the SCHED_QUOTA policy won't pick
+ * them until the corresponding budget is replenished.
+ */
+static DECLARE_BITMAP(group_map, CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS);
+
+static inline int group_is_active(struct xnsched_quota_group *tg)
+{
+	struct xnthread *curr = tg->sched->curr;
+
+	if (tg->nr_active)
+		return 1;
+
+	/*
+	 * Check whether the current thread belongs to the group, and
+	 * is still in running state (XNREADY denotes a thread linked
+	 * to the runqueue, in which case tg->nr_active already
+	 * accounts for it).
+	 */
+	if (curr->quota == tg &&
+	    xnthread_test_state(curr, XNREADY|XNTHREAD_BLOCK_BITS) == 0)
+		return 1;
+
+	return 0;
+}
+
+static inline void replenish_budget(struct xnsched_quota *qs,
+				    struct xnsched_quota_group *tg)
+{
+	xnticks_t budget_ns, credit_ns;
+
+	if (tg->quota_ns == tg->quota_peak_ns) {
+		/*
+		 * Fast path: we don't accumulate runtime credit.
+		 * This includes groups with no runtime limit
+		 * (i.e. quota off: quota >= period && quota == peak).
+		 */
+		tg->run_budget_ns = tg->quota_ns;
+		return;
+	}
+
+	/*
+	 * We have to deal with runtime credit accumulation, as the
+	 * group may consume more than its base quota during a single
+	 * interval, up to a peak duration though (not to monopolize
+	 * the CPU).
+	 *
+	 * - In the simplest case, a group is allotted a new full
+	 * budget plus the unconsumed portion of the previous budget,
+	 * provided the sum does not exceed the peak quota.
+	 *
+	 * - When there is too much budget for a single interval
+	 * (i.e. above peak quota), we spread the extra time over
+	 * multiple intervals through a credit accumulation mechanism.
+	 *
+	 * - The accumulated credit is dropped whenever a group has no
+	 * runnable threads.
+	 */
+	if (!group_is_active(tg)) {
+		/* Drop accumulated credit. */
+		tg->run_credit_ns = 0;
+		tg->run_budget_ns = tg->quota_ns;
+		return;
+	}
+
+	budget_ns = tg->run_budget_ns + tg->quota_ns;
+	if (budget_ns > tg->quota_peak_ns) {
+		/* Too much budget, spread it over intervals. */
+		tg->run_credit_ns += budget_ns - tg->quota_peak_ns;
+		tg->run_budget_ns = tg->quota_peak_ns;
+	} else if (tg->run_credit_ns) {
+		credit_ns = tg->quota_peak_ns - budget_ns;
+		/* Consume the accumulated credit. */
+		if (tg->run_credit_ns >= credit_ns)
+			tg->run_credit_ns -= credit_ns;
+		else {
+			credit_ns = tg->run_credit_ns;
+			tg->run_credit_ns = 0;
+		}
+		/* Allot extended budget, limited to peak quota. */
+		tg->run_budget_ns = budget_ns + credit_ns;
+	} else
+		/* No credit, budget was below peak quota. */
+		tg->run_budget_ns = budget_ns;
+}
+
+static void quota_refill_handler(struct xntimer *timer)
+{
+	struct xnsched_quota_group *tg;
+	struct xnthread *thread, *tmp;
+	struct xnsched_quota *qs;
+	struct xnsched *sched;
+
+	qs = container_of(timer, struct xnsched_quota, refill_timer);
+	XENO_BUG_ON(COBALT, list_empty(&qs->groups));
+	sched = container_of(qs, struct xnsched, quota);
+
+	trace_cobalt_schedquota_refill(0);
+
+	list_for_each_entry(tg, &qs->groups, next) {
+		/* Allot a new runtime budget for the group. */
+		replenish_budget(qs, tg);
+
+		if (tg->run_budget_ns == 0 || list_empty(&tg->expired))
+			continue;
+		/*
+		 * For each group living on this CPU, move all expired
+		 * threads back to the runqueue. Since those threads
+		 * were moved out of the runqueue as we were
+		 * considering them for execution, we push them back
+		 * in LIFO order to their respective priority group.
+		 * The expiry queue is FIFO to keep ordering right
+		 * among expired threads.
+		 */
+		list_for_each_entry_safe_reverse(thread, tmp, &tg->expired, quota_expired) {
+			list_del_init(&thread->quota_expired);
+			xnsched_addq(&sched->rt.runnable, thread);
+		}
+	}
+
+	xnsched_set_self_resched(timer->sched);
+}
+
+static void quota_limit_handler(struct xntimer *timer)
+{
+	struct xnsched *sched;
+
+	sched = container_of(timer, struct xnsched, quota.limit_timer);
+	/*
+	 * Force a rescheduling on the return path of the current
+	 * interrupt, so that the budget is re-evaluated for the
+	 * current group in xnsched_quota_pick().
+	 */
+	xnsched_set_self_resched(sched);
+}
+
+static int quota_sum_all(struct xnsched_quota *qs)
+{
+	struct xnsched_quota_group *tg;
+	int sum;
+
+	if (list_empty(&qs->groups))
+		return 0;
+
+	sum = 0;
+	list_for_each_entry(tg, &qs->groups, next)
+		sum += tg->quota_percent;
+
+	return sum;
+}
+
+static void xnsched_quota_init(struct xnsched *sched)
+{
+	char limiter_name[XNOBJECT_NAME_LEN], refiller_name[XNOBJECT_NAME_LEN];
+	struct xnsched_quota *qs = &sched->quota;
+
+	qs->period_ns = CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD * 1000ULL;
+	INIT_LIST_HEAD(&qs->groups);
+
+#ifdef CONFIG_SMP
+	ksformat(refiller_name, sizeof(refiller_name),
+		 "[quota-refill/%u]", sched->cpu);
+	ksformat(limiter_name, sizeof(limiter_name),
+		 "[quota-limit/%u]", sched->cpu);
+#else
+	strcpy(refiller_name, "[quota-refill]");
+	strcpy(limiter_name, "[quota-limit]");
+#endif
+	xntimer_init(&qs->refill_timer,
+		     &nkclock, quota_refill_handler, sched,
+		     XNTIMER_IGRAVITY);
+	xntimer_set_name(&qs->refill_timer, refiller_name);
+
+	xntimer_init(&qs->limit_timer,
+		     &nkclock, quota_limit_handler, sched,
+		     XNTIMER_IGRAVITY);
+	xntimer_set_name(&qs->limit_timer, limiter_name);
+}
+
+static bool xnsched_quota_setparam(struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	struct xnsched_quota_group *tg;
+	struct xnsched_quota *qs;
+	bool effective;
+
+	xnthread_clear_state(thread, XNWEAK);
+	effective = xnsched_set_effective_priority(thread, p->quota.prio);
+
+	qs = &thread->sched->quota;
+	list_for_each_entry(tg, &qs->groups, next) {
+		if (tg->tgid != p->quota.tgid)
+			continue;
+		if (thread->quota) {
+			/* Dequeued earlier by our caller. */
+			list_del(&thread->quota_next);
+			thread->quota->nr_threads--;
+		}
+
+		trace_cobalt_schedquota_add_thread(tg, thread);
+
+		thread->quota = tg;
+		list_add(&thread->quota_next, &tg->members);
+		tg->nr_threads++;
+		return effective;
+	}
+
+	XENO_BUG(COBALT);
+
+	return false;
+}
+
+static void xnsched_quota_getparam(struct xnthread *thread,
+				   union xnsched_policy_param *p)
+{
+	p->quota.prio = thread->cprio;
+	p->quota.tgid = thread->quota->tgid;
+}
+
+static void xnsched_quota_trackprio(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	if (p) {
+		/* We should not cross groups during PI boost. */
+		XENO_WARN_ON(COBALT,
+			     thread->base_class == &xnsched_class_quota &&
+			     thread->quota->tgid != p->quota.tgid);
+		thread->cprio = p->quota.prio;
+	} else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_quota_protectprio(struct xnthread *thread, int prio)
+{
+	if (prio > XNSCHED_QUOTA_MAX_PRIO)
+		prio = XNSCHED_QUOTA_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_quota_chkparam(struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	struct xnsched_quota_group *tg;
+	struct xnsched_quota *qs;
+	int tgid;
+
+	if (p->quota.prio < XNSCHED_QUOTA_MIN_PRIO ||
+	    p->quota.prio > XNSCHED_QUOTA_MAX_PRIO)
+		return -EINVAL;
+
+	tgid = p->quota.tgid;
+	if (tgid < 0 || tgid >= CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS)
+		return -EINVAL;
+
+	/*
+	 * The group must be managed on the same CPU the thread
+	 * currently runs on.
+	 */
+	qs = &thread->sched->quota;
+	list_for_each_entry(tg, &qs->groups, next) {
+		if (tg->tgid == tgid)
+			return 0;
+	}
+
+	/*
+	 * If that group exists nevertheless, we give userland a
+	 * specific error code.
+	 */
+	if (test_bit(tgid, group_map))
+		return -EPERM;
+
+	return -EINVAL;
+}
+
+static void xnsched_quota_forget(struct xnthread *thread)
+{
+	trace_cobalt_schedquota_remove_thread(thread->quota, thread);
+
+	thread->quota->nr_threads--;
+	XENO_BUG_ON(COBALT, thread->quota->nr_threads < 0);
+	list_del(&thread->quota_next);
+	thread->quota = NULL;
+}
+
+static void xnsched_quota_kick(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	/*
+	 * Allow a kicked thread to be elected for running until it
+	 * relaxes, even if the group it belongs to lacks runtime
+	 * budget.
+	 */
+	if (tg->run_budget_ns == 0 && !list_empty(&thread->quota_expired)) {
+		list_del_init(&thread->quota_expired);
+		xnsched_addq_tail(&sched->rt.runnable, thread);
+	}
+}
+
+static inline int thread_is_runnable(struct xnthread *thread)
+{
+	return thread->quota->run_budget_ns > 0 ||
+		xnthread_test_info(thread, XNKICKED);
+}
+
+static void xnsched_quota_enqueue(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	if (!thread_is_runnable(thread))
+		list_add_tail(&thread->quota_expired, &tg->expired);
+	else
+		xnsched_addq_tail(&sched->rt.runnable, thread);
+
+	tg->nr_active++;
+}
+
+static void xnsched_quota_dequeue(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	if (!list_empty(&thread->quota_expired))
+		list_del_init(&thread->quota_expired);
+	else
+		xnsched_delq(&sched->rt.runnable, thread);
+
+	tg->nr_active--;
+}
+
+static void xnsched_quota_requeue(struct xnthread *thread)
+{
+	struct xnsched_quota_group *tg = thread->quota;
+	struct xnsched *sched = thread->sched;
+
+	if (!thread_is_runnable(thread))
+		list_add(&thread->quota_expired, &tg->expired);
+	else
+		xnsched_addq(&sched->rt.runnable, thread);
+
+	tg->nr_active++;
+}
+
+static struct xnthread *xnsched_quota_pick(struct xnsched *sched)
+{
+	struct xnthread *next, *curr = sched->curr;
+	struct xnsched_quota *qs = &sched->quota;
+	struct xnsched_quota_group *otg, *tg;
+	xnticks_t now, elapsed;
+	int ret;
+
+	now = xnclock_read_monotonic(&nkclock);
+	otg = curr->quota;
+	if (otg == NULL)
+		goto pick;
+	/*
+	 * Charge the time consumed by the outgoing thread to the
+	 * group it belongs to.
+	 */
+	elapsed = now - otg->run_start_ns;
+	if (elapsed < otg->run_budget_ns)
+		otg->run_budget_ns -= elapsed;
+	else
+		otg->run_budget_ns = 0;
+pick:
+	next = xnsched_getq(&sched->rt.runnable);
+	if (next == NULL) {
+		xntimer_stop(&qs->limit_timer);
+		return NULL;
+	}
+
+	/*
+	 * As we basically piggyback on the SCHED_FIFO runqueue, make
+	 * sure to detect non-quota threads.
+	 */
+	tg = next->quota;
+	if (tg == NULL)
+		return next;
+
+	tg->run_start_ns = now;
+
+	/*
+	 * Don't consider budget if kicked, we have to allow this
+	 * thread to run until it eventually relaxes.
+	 */
+	if (xnthread_test_info(next, XNKICKED)) {
+		xntimer_stop(&qs->limit_timer);
+		goto out;
+	}
+
+	if (tg->run_budget_ns == 0) {
+		/* Flush expired group members as we go. */
+		list_add_tail(&next->quota_expired, &tg->expired);
+		goto pick;
+	}
+
+	if (otg == tg && xntimer_running_p(&qs->limit_timer))
+		/* Same group, leave the running timer untouched. */
+		goto out;
+
+	/* Arm limit timer for the new running group. */
+	ret = xntimer_start(&qs->limit_timer, now + tg->run_budget_ns,
+			    XN_INFINITE, XN_ABSOLUTE);
+	if (ret) {
+		/* Budget exhausted: deactivate this group. */
+		tg->run_budget_ns = 0;
+		list_add_tail(&next->quota_expired, &tg->expired);
+		goto pick;
+	}
+out:
+	tg->nr_active--;
+
+	return next;
+}
+
+static void xnsched_quota_migrate(struct xnthread *thread, struct xnsched *sched)
+{
+	union xnsched_policy_param param;
+	/*
+	 * Runtime quota groups are defined per-CPU, so leaving the
+	 * current CPU means exiting the group. We do this by moving
+	 * the target thread to the plain RT class.
+	 */
+	param.rt.prio = thread->cprio;
+	__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+}
+
+/**
+ * @ingroup cobalt_core_sched
+ * @defgroup sched_quota SCHED_QUOTA scheduling policy
+ *
+ * The SCHED_QUOTA policy enforces a limitation on the CPU consumption
+ * of threads over a globally defined period, known as the quota
+ * interval. This is done by pooling threads with common requirements
+ * in groups, and giving each group a share of the global period
+ * (CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).
+ *
+ * When threads have entirely consumed the quota allotted to the group
+ * they belong to, the latter is suspended as a whole, until the next
+ * quota interval starts. At this point, a new runtime budget is
+ * given to each group, in accordance with its share.
+ *
+ *@{
+ */
+int xnsched_quota_create_group(struct xnsched_quota_group *tg,
+			       struct xnsched *sched,
+			       int *quota_sum_r)
+{
+	int tgid, nr_groups = CONFIG_XENO_OPT_SCHED_QUOTA_NR_GROUPS;
+	struct xnsched_quota *qs = &sched->quota;
+
+	atomic_only();
+
+	tgid = find_first_zero_bit(group_map, nr_groups);
+	if (tgid >= nr_groups)
+		return -ENOSPC;
+
+	__set_bit(tgid, group_map);
+	tg->tgid = tgid;
+	tg->sched = sched;
+	tg->run_budget_ns = qs->period_ns;
+	tg->run_credit_ns = 0;
+	tg->quota_percent = 100;
+	tg->quota_peak_percent = 100;
+	tg->quota_ns = qs->period_ns;
+	tg->quota_peak_ns = qs->period_ns;
+	tg->nr_active = 0;
+	tg->nr_threads = 0;
+	INIT_LIST_HEAD(&tg->members);
+	INIT_LIST_HEAD(&tg->expired);
+
+	trace_cobalt_schedquota_create_group(tg);
+
+	if (list_empty(&qs->groups))
+		xntimer_start(&qs->refill_timer,
+			      qs->period_ns, qs->period_ns, XN_RELATIVE);
+
+	list_add(&tg->next, &qs->groups);
+	*quota_sum_r = quota_sum_all(qs);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_create_group);
+
+int xnsched_quota_destroy_group(struct xnsched_quota_group *tg,
+				int force, int *quota_sum_r)
+{
+	struct xnsched_quota *qs = &tg->sched->quota;
+	union xnsched_policy_param param;
+	struct xnthread *thread, *tmp;
+
+	atomic_only();
+
+	if (!list_empty(&tg->members)) {
+		if (!force)
+			return -EBUSY;
+		/* Move group members to the rt class. */
+		list_for_each_entry_safe(thread, tmp, &tg->members, quota_next) {
+			param.rt.prio = thread->cprio;
+			__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+		}
+	}
+
+	trace_cobalt_schedquota_destroy_group(tg);
+
+	list_del(&tg->next);
+	__clear_bit(tg->tgid, group_map);
+
+	if (list_empty(&qs->groups))
+		xntimer_stop(&qs->refill_timer);
+
+	if (quota_sum_r)
+		*quota_sum_r = quota_sum_all(qs);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_destroy_group);
+
+void xnsched_quota_set_limit(struct xnsched_quota_group *tg,
+			     int quota_percent, int quota_peak_percent,
+			     int *quota_sum_r)
+{
+	struct xnsched *sched = tg->sched;
+	struct xnsched_quota *qs = &sched->quota;
+	xnticks_t old_quota_ns = tg->quota_ns;
+	struct xnthread *thread, *tmp, *curr;
+	xnticks_t now, elapsed, consumed;
+
+	atomic_only();
+
+	trace_cobalt_schedquota_set_limit(tg, quota_percent,
+					  quota_peak_percent);
+
+	if (quota_percent < 0 || quota_percent > 100) { /* Quota off. */
+		quota_percent = 100;
+		tg->quota_ns = qs->period_ns;
+	} else
+		tg->quota_ns = xnarch_div64(qs->period_ns * quota_percent, 100);
+
+	if (quota_peak_percent < quota_percent)
+		quota_peak_percent = quota_percent;
+
+	if (quota_peak_percent < 0 || quota_peak_percent > 100) {
+		quota_peak_percent = 100;
+		tg->quota_peak_ns = qs->period_ns;
+	} else
+		tg->quota_peak_ns = xnarch_div64(qs->period_ns * quota_peak_percent, 100);
+
+	tg->quota_percent = quota_percent;
+	tg->quota_peak_percent = quota_peak_percent;
+
+	curr = sched->curr;
+	if (curr->quota == tg &&
+	    xnthread_test_state(curr, XNREADY|XNTHREAD_BLOCK_BITS) == 0) {
+		now = xnclock_read_monotonic(&nkclock);
+
+		elapsed = now - tg->run_start_ns;
+		if (elapsed < tg->run_budget_ns)
+			tg->run_budget_ns -= elapsed;
+		else
+			tg->run_budget_ns = 0;
+
+		tg->run_start_ns = now;
+
+		xntimer_stop(&qs->limit_timer);
+	}
+
+	if (tg->run_budget_ns <= old_quota_ns)
+		consumed = old_quota_ns - tg->run_budget_ns;
+	else
+		consumed = 0;
+	if (tg->quota_ns >= consumed)
+		tg->run_budget_ns = tg->quota_ns - consumed;
+	else
+		tg->run_budget_ns = 0;
+
+	tg->run_credit_ns = 0;	/* Drop accumulated credit. */
+
+	*quota_sum_r = quota_sum_all(qs);
+
+	if (tg->run_budget_ns > 0) {
+		list_for_each_entry_safe_reverse(thread, tmp, &tg->expired,
+						 quota_expired) {
+			list_del_init(&thread->quota_expired);
+			xnsched_addq(&sched->rt.runnable, thread);
+		}
+	}
+
+	/*
+	 * Apply the new budget immediately, in case a member of this
+	 * group is currently running.
+	 */
+	xnsched_set_resched(sched);
+	xnsched_run();
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_set_limit);
+
+struct xnsched_quota_group *
+xnsched_quota_find_group(struct xnsched *sched, int tgid)
+{
+	struct xnsched_quota_group *tg;
+
+	atomic_only();
+
+	if (list_empty(&sched->quota.groups))
+		return NULL;
+
+	list_for_each_entry(tg, &sched->quota.groups, next) {
+		if (tg->tgid == tgid)
+			return tg;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_find_group);
+
+int xnsched_quota_sum_all(struct xnsched *sched)
+{
+	struct xnsched_quota *qs = &sched->quota;
+
+	atomic_only();
+
+	return quota_sum_all(qs);
+}
+EXPORT_SYMBOL_GPL(xnsched_quota_sum_all);
+
+/** @} */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_quota_vfroot;
+
+struct vfile_sched_quota_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_quota_data {
+	int cpu;
+	pid_t pid;
+	int prio;
+	int tgid;
+	xnticks_t budget;
+	char name[XNOBJECT_NAME_LEN];
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_quota_ops;
+
+static struct xnvfile_snapshot vfile_sched_quota = {
+	.privsz = sizeof(struct vfile_sched_quota_priv),
+	.datasz = sizeof(struct vfile_sched_quota_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_quota_ops,
+};
+
+static int vfile_sched_quota_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_quota_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_quota.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_quota_next(struct xnvfile_snapshot_iterator *it,
+				  void *data)
+{
+	struct vfile_sched_quota_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_quota_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_quota)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->tgid = thread->quota->tgid;
+	p->prio = thread->cprio;
+	p->budget = thread->quota->run_budget_ns;
+
+	return 1;
+}
+
+static int vfile_sched_quota_show(struct xnvfile_snapshot_iterator *it,
+				  void *data)
+{
+	struct vfile_sched_quota_data *p = data;
+	char buf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-4s %-4s %-10s %s\n",
+			       "CPU", "PID", "TGID", "PRI", "BUDGET", "NAME");
+	else {
+		xntimer_format_time(p->budget, buf, sizeof(buf));
+		xnvfile_printf(it, "%3u  %-6d %-4d %-4d %-10s %s\n",
+			       p->cpu,
+			       p->pid,
+			       p->tgid,
+			       p->prio,
+			       buf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_quota_ops = {
+	.rewind = vfile_sched_quota_rewind,
+	.next = vfile_sched_quota_next,
+	.show = vfile_sched_quota_show,
+};
+
+static int xnsched_quota_init_vfile(struct xnsched_class *schedclass,
+				    struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_quota_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_quota,
+				     &sched_quota_vfroot);
+}
+
+static void xnsched_quota_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_quota);
+	xnvfile_destroy_dir(&sched_quota_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_quota = {
+	.sched_init		=	xnsched_quota_init,
+	.sched_enqueue		=	xnsched_quota_enqueue,
+	.sched_dequeue		=	xnsched_quota_dequeue,
+	.sched_requeue		=	xnsched_quota_requeue,
+	.sched_pick		=	xnsched_quota_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_migrate		=	xnsched_quota_migrate,
+	.sched_chkparam		=	xnsched_quota_chkparam,
+	.sched_setparam		=	xnsched_quota_setparam,
+	.sched_getparam		=	xnsched_quota_getparam,
+	.sched_trackprio	=	xnsched_quota_trackprio,
+	.sched_protectprio	=	xnsched_quota_protectprio,
+	.sched_forget		=	xnsched_quota_forget,
+	.sched_kick		=	xnsched_quota_kick,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_quota_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_quota_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(3),
+	.policy			=	SCHED_QUOTA,
+	.name			=	"quota"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_quota);
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c
new file mode 100644
index 0000000..2457032
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-rt.c
@@ -0,0 +1,257 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+
+static void xnsched_rt_init(struct xnsched *sched)
+{
+	xnsched_initq(&sched->rt.runnable);
+}
+
+static void xnsched_rt_requeue(struct xnthread *thread)
+{
+	/*
+	 * Put back at same place: i.e. requeue to head of current
+	 * priority group (i.e. LIFO, used for preemption handling).
+	 */
+	__xnsched_rt_requeue(thread);
+}
+
+static void xnsched_rt_enqueue(struct xnthread *thread)
+{
+	/*
+	 * Enqueue for next pick: i.e. move to end of current priority
+	 * group (i.e. FIFO).
+	 */
+	__xnsched_rt_enqueue(thread);
+}
+
+static void xnsched_rt_dequeue(struct xnthread *thread)
+{
+	/*
+	 * Pull from the runnable thread queue.
+	 */
+	__xnsched_rt_dequeue(thread);
+}
+
+static void xnsched_rt_rotate(struct xnsched *sched,
+			      const union xnsched_policy_param *p)
+{
+	struct xnthread *thread, *curr;
+
+	if (xnsched_emptyq_p(&sched->rt.runnable))
+		return;	/* No runnable thread in this class. */
+
+	curr = sched->curr;
+
+	if (p->rt.prio == XNSCHED_RUNPRIO)
+		thread = curr;
+	else {
+		thread = xnsched_findq(&sched->rt.runnable, p->rt.prio);
+		if (thread == NULL)
+			return;
+	}
+
+	/*
+	 * In case we picked the current thread, we have to make sure
+	 * not to move it back to the run queue if it was blocked
+	 * before we were called. The same goes if the current thread
+	 * holds the scheduler lock.
+	 */
+	if (thread != curr ||
+	    (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS) &&
+	     curr->lock_count == 0))
+		xnsched_putback(thread);
+}
+
+void xnsched_rt_tick(struct xnsched *sched)
+{
+	/*
+	 * The round-robin time credit is only consumed by a running
+	 * thread that neither holds the scheduler lock nor was
+	 * blocked before entering this callback. As the time slice is
+	 * exhausted for the running thread, move it back to the
+	 * run queue at the end of its priority group.
+	 */
+	xnsched_putback(sched->curr);
+}
+
+static bool xnsched_rt_setparam(struct xnthread *thread,
+				const union xnsched_policy_param *p)
+{
+	return __xnsched_rt_setparam(thread, p);
+}
+
+static void xnsched_rt_getparam(struct xnthread *thread,
+				union xnsched_policy_param *p)
+{
+	__xnsched_rt_getparam(thread, p);
+}
+
+static void xnsched_rt_trackprio(struct xnthread *thread,
+				 const union xnsched_policy_param *p)
+{
+	__xnsched_rt_trackprio(thread, p);
+}
+
+static void xnsched_rt_protectprio(struct xnthread *thread, int prio)
+{
+	__xnsched_rt_protectprio(thread, prio);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_rt_vfroot;
+
+struct vfile_sched_rt_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_rt_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	xnticks_t period;
+	int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_rt_ops;
+
+static struct xnvfile_snapshot vfile_sched_rt = {
+	.privsz = sizeof(struct vfile_sched_rt_priv),
+	.datasz = sizeof(struct vfile_sched_rt_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_rt_ops,
+};
+
+static int vfile_sched_rt_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_rt_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_rt.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_rt_next(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_rt_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_rt_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_rt ||
+	    xnthread_test_state(thread, XNWEAK))
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->cprio = thread->cprio;
+	p->period = xnthread_get_period(thread);
+
+	return 1;
+}
+
+static int vfile_sched_rt_show(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_rt_data *p = data;
+	char pribuf[16], ptbuf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-8s %-10s %s\n",
+			       "CPU", "PID", "PRI", "PERIOD", "NAME");
+	else {
+		ksformat(pribuf, sizeof(pribuf), "%3d", p->cprio);
+		xntimer_format_time(p->period, ptbuf, sizeof(ptbuf));
+		xnvfile_printf(it, "%3u  %-6d %-8s %-10s %s\n",
+			       p->cpu,
+			       p->pid,
+			       pribuf,
+			       ptbuf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_rt_ops = {
+	.rewind = vfile_sched_rt_rewind,
+	.next = vfile_sched_rt_next,
+	.show = vfile_sched_rt_show,
+};
+
+static int xnsched_rt_init_vfile(struct xnsched_class *schedclass,
+				 struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_rt_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_rt,
+				     &sched_rt_vfroot);
+}
+
+static void xnsched_rt_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_rt);
+	xnvfile_destroy_dir(&sched_rt_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_rt = {
+	.sched_init		=	xnsched_rt_init,
+	.sched_enqueue		=	xnsched_rt_enqueue,
+	.sched_dequeue		=	xnsched_rt_dequeue,
+	.sched_requeue		=	xnsched_rt_requeue,
+	.sched_pick		=	xnsched_rt_pick,
+	.sched_tick		=	xnsched_rt_tick,
+	.sched_rotate		=	xnsched_rt_rotate,
+	.sched_forget		=	NULL,
+	.sched_kick		=	NULL,
+	.sched_declare		=	NULL,
+	.sched_setparam		=	xnsched_rt_setparam,
+	.sched_trackprio	=	xnsched_rt_trackprio,
+	.sched_protectprio	=	xnsched_rt_protectprio,
+	.sched_getparam		=	xnsched_rt_getparam,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_rt_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_rt_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(4),
+	.policy			=	SCHED_FIFO,
+	.name			=	"rt"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_rt);
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c
new file mode 100644
index 0000000..77f1d00
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-sporadic.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/uapi/sched.h>
+
+#define MAX_REPLENISH CONFIG_XENO_OPT_SCHED_SPORADIC_MAXREPL
+
+static void sporadic_post_recharge(struct xnthread *thread, xnticks_t budget);
+
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+
+static inline void sporadic_note_late_drop(struct xnsched *sched)
+{
+	/*
+	 * This code should pull the break when a misconfigured
+	 * sporadic thread is late on its drop date for more than a
+	 * hundred times in a row. This normally reveals a time budget
+	 * which is too tight.
+	 */
+	XENO_BUG_ON(COBALT, ++sched->pss.drop_retries > 100);
+}
+
+static inline void sporadic_note_valid_drop(struct xnsched *sched)
+{
+	sched->pss.drop_retries = 0;
+}
+
+#else /* !CONFIG_XENO_OPT_DEBUG_COBALT */
+
+static inline void sporadic_note_late_drop(struct xnsched *sched)
+{
+}
+
+static inline void sporadic_note_valid_drop(struct xnsched *sched)
+{
+}
+
+#endif /* !CONFIG_XENO_OPT_DEBUG_COBALT */
+
+static inline xnticks_t sporadic_diff_time(xnticks_t start, xnticks_t end)
+{
+	xnsticks_t d = (xnsticks_t)(end - start);
+	return unlikely(d < 0) ? -d : d;
+}
+
+static void sporadic_drop_handler(struct xntimer *timer)
+{
+	struct xnsched_sporadic_data *pss;
+	union xnsched_policy_param p;
+	struct xnthread *thread;
+
+	/*
+	 * XXX: this code will work properly regardless of
+	 * primary/secondary mode issues.
+	 */
+	pss = container_of(timer, struct xnsched_sporadic_data, drop_timer);
+	thread = pss->thread;
+
+	sporadic_post_recharge(thread, pss->budget);
+
+	if (pss->budget == 0 && thread->cprio > pss->param.low_prio) {
+		if (pss->param.low_prio < 0)
+			/*
+			 * Special case: low_prio == -1, we want the
+			 * thread to suspend until a replenishment
+			 * happens.
+			 */
+			xnthread_suspend(thread, XNHELD,
+					 XN_INFINITE, XN_RELATIVE, NULL);
+		else {
+			p.pss.init_budget = 0;
+			p.pss.current_prio = pss->param.low_prio;
+			/* Move sporadic thread to the background. */
+			__xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p);
+		}
+	}
+}
+
+static void sporadic_schedule_drop(struct xnthread *thread)
+{
+	xnticks_t now = xnclock_read_monotonic(&nkclock);
+	struct xnsched_sporadic_data *pss = thread->pss;
+	int ret;
+
+	pss->resume_date = now;
+	/*
+	 * Assuming this timer should not fire that often unless the
+	 * monitored thread behaves badly, we don't pin it on the CPU
+	 * the thread is running, trading cycles at firing time
+	 * against cycles when arming the timer.
+	 */
+	xntimer_set_affinity(&pss->drop_timer, thread->sched);
+	ret = xntimer_start(&pss->drop_timer, now + pss->budget,
+			    XN_INFINITE, XN_ABSOLUTE);
+	if (ret == -ETIMEDOUT) {
+		sporadic_note_late_drop(thread->sched);
+		sporadic_drop_handler(&pss->drop_timer);
+	} else
+		sporadic_note_valid_drop(thread->sched);
+}
+
+static void sporadic_replenish_handler(struct xntimer *timer)
+{
+	struct xnsched_sporadic_data *pss;
+	union xnsched_policy_param p;
+	struct xnthread *thread;
+	xnticks_t now;
+	int r, ret;
+
+	pss = container_of(timer, struct xnsched_sporadic_data, repl_timer);
+	thread = pss->thread;
+	XENO_BUG_ON(COBALT, pss->repl_pending <= 0);
+
+retry:
+	now = xnclock_read_monotonic(&nkclock);
+
+	do {
+		r = pss->repl_out;
+		if ((xnsticks_t)(now - pss->repl_data[r].date) <= 0)
+			break;
+		pss->budget += pss->repl_data[r].amount;
+		if (pss->budget > pss->param.init_budget)
+			pss->budget = pss->param.init_budget;
+		pss->repl_out = (r + 1) % MAX_REPLENISH;
+	} while(--pss->repl_pending > 0);
+
+	if (pss->repl_pending > 0) {
+		xntimer_set_affinity(&pss->repl_timer, thread->sched);
+		ret = xntimer_start(&pss->repl_timer, pss->repl_data[r].date,
+				    XN_INFINITE, XN_ABSOLUTE);
+		if (ret == -ETIMEDOUT)
+			goto retry; /* This plugs a tiny race. */
+	}
+
+	if (pss->budget == 0)
+		return;
+
+	if (xnthread_test_state(thread, XNHELD))
+		xnthread_resume(thread, XNHELD);
+	else if (thread->cprio < pss->param.normal_prio) {
+		p.pss.init_budget = 0;
+		p.pss.current_prio = pss->param.normal_prio;
+		/* Move sporadic thread to the foreground. */
+		__xnthread_set_schedparam(thread, &xnsched_class_sporadic, &p);
+	}
+
+	/*
+	 * XXX: we have to reset the drop timer in case we preempted
+	 * the thread which just got a budget increase.
+	 */
+	if (thread->sched->curr == thread)
+		sporadic_schedule_drop(thread);
+}
+
+static void sporadic_post_recharge(struct xnthread *thread, xnticks_t budget)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+	int r, ret;
+
+	if (pss->repl_pending >= pss->param.max_repl)
+		return;
+
+	if (budget > pss->budget) {
+		budget = pss->budget;
+		pss->budget = 0;
+	} else
+		pss->budget -= budget;
+
+	r = pss->repl_in;
+	pss->repl_data[r].date = pss->resume_date + pss->param.repl_period;
+	pss->repl_data[r].amount = budget;
+	pss->repl_in = (r + 1) % MAX_REPLENISH;
+
+	if (pss->repl_pending++ == 0) {
+		xntimer_set_affinity(&pss->repl_timer, thread->sched);
+		ret = xntimer_start(&pss->repl_timer, pss->repl_data[r].date,
+				    XN_INFINITE, XN_ABSOLUTE);
+		/*
+		 * The following case should not happen unless the
+		 * initial budget value is inappropriate, but let's
+		 * handle it anyway.
+		 */
+		if (ret == -ETIMEDOUT)
+			sporadic_replenish_handler(&pss->repl_timer);
+	}
+}
+
+static void sporadic_suspend_activity(struct xnthread *thread)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+	xnticks_t budget, now;
+
+	if (pss->budget > 0) {
+		xntimer_stop(&pss->drop_timer);
+		now = xnclock_read_monotonic(&nkclock);
+		budget = sporadic_diff_time(now, pss->resume_date);
+		sporadic_post_recharge(thread, budget);
+	}
+}
+
+static inline void sporadic_resume_activity(struct xnthread *thread)
+{
+	if (thread->pss->budget > 0)
+		sporadic_schedule_drop(thread);
+}
+
+static void xnsched_sporadic_init(struct xnsched *sched)
+{
+	/*
+	 * We litterally stack the sporadic scheduler on top of the RT
+	 * one, reusing its run queue directly. This way, RT and
+	 * sporadic threads are merged into the same runqueue and thus
+	 * share the same priority scale, with the addition of budget
+	 * management for the sporadic ones.
+	 */
+#ifdef CONFIG_XENO_OPT_DEBUG_COBALT
+	sched->pss.drop_retries = 0;
+#endif
+}
+
+static bool xnsched_sporadic_setparam(struct xnthread *thread,
+				      const union xnsched_policy_param *p)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+	bool effective;
+
+	xnthread_clear_state(thread, XNWEAK);
+	effective = xnsched_set_effective_priority(thread, p->pss.current_prio);
+
+	/*
+	 * We use the budget information to determine whether we got
+	 * here from one of our internal calls to
+	 * xnthread_set_schedparam(), in which case we don't want to
+	 * update the scheduling parameters, but only set the
+	 * effective priority.
+	 */
+	if (p->pss.init_budget > 0) {
+		pss->param = p->pss;
+		pss->budget = p->pss.init_budget;
+		pss->repl_in = 0;
+		pss->repl_out = 0;
+		pss->repl_pending = 0;
+		if (effective && thread == thread->sched->curr) {
+			xntimer_stop(&pss->drop_timer);
+			sporadic_schedule_drop(thread);
+		}
+	}
+
+	return effective;
+}
+
+static void xnsched_sporadic_getparam(struct xnthread *thread,
+				      union xnsched_policy_param *p)
+{
+	p->pss = thread->pss->param;
+	p->pss.current_prio = thread->cprio;
+}
+
+static void xnsched_sporadic_trackprio(struct xnthread *thread,
+				       const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->pss.current_prio;
+	else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_sporadic_protectprio(struct xnthread *thread, int prio)
+{
+	if (prio > XNSCHED_SPORADIC_MAX_PRIO)
+		prio = XNSCHED_SPORADIC_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_sporadic_chkparam(struct xnthread *thread,
+				     const union xnsched_policy_param *p)
+{
+	if (p->pss.low_prio != -1 &&
+	    (p->pss.low_prio < XNSCHED_SPORADIC_MIN_PRIO ||
+	     p->pss.low_prio > XNSCHED_SPORADIC_MAX_PRIO))
+		return -EINVAL;
+
+	if (p->pss.normal_prio < XNSCHED_SPORADIC_MIN_PRIO ||
+	    p->pss.normal_prio > XNSCHED_SPORADIC_MAX_PRIO)
+		return -EINVAL;
+
+	if (p->pss.init_budget == 0)
+		return -EINVAL;
+
+	if (p->pss.current_prio != p->pss.normal_prio)
+		return -EINVAL;
+
+	if (p->pss.repl_period < p->pss.init_budget)
+		return -EINVAL;
+
+	if (p->pss.normal_prio <= p->pss.low_prio)
+		return -EINVAL;
+
+	if (p->pss.max_repl < 1 || p->pss.max_repl > MAX_REPLENISH)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int xnsched_sporadic_declare(struct xnthread *thread,
+				    const union xnsched_policy_param *p)
+{
+	struct xnsched_sporadic_data *pss;
+
+	pss = xnmalloc(sizeof(*pss));
+	if (pss == NULL)
+		return -ENOMEM;
+
+	xntimer_init(&pss->repl_timer, &nkclock, sporadic_replenish_handler,
+		     thread->sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&pss->repl_timer, "pss-replenish");
+	xntimer_init(&pss->drop_timer, &nkclock, sporadic_drop_handler,
+		     thread->sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&pss->drop_timer, "pss-drop");
+
+	thread->pss = pss;
+	pss->thread = thread;
+
+	return 0;
+}
+
+static void xnsched_sporadic_forget(struct xnthread *thread)
+{
+	struct xnsched_sporadic_data *pss = thread->pss;
+
+	xntimer_destroy(&pss->repl_timer);
+	xntimer_destroy(&pss->drop_timer);
+	xnfree(pss);
+	thread->pss = NULL;
+}
+
+static void xnsched_sporadic_enqueue(struct xnthread *thread)
+{
+	__xnsched_rt_enqueue(thread);
+}
+
+static void xnsched_sporadic_dequeue(struct xnthread *thread)
+{
+	__xnsched_rt_dequeue(thread);
+}
+
+static void xnsched_sporadic_requeue(struct xnthread *thread)
+{
+	__xnsched_rt_requeue(thread);
+}
+
+static struct xnthread *xnsched_sporadic_pick(struct xnsched *sched)
+{
+	struct xnthread *curr = sched->curr, *next;
+
+	next = xnsched_getq(&sched->rt.runnable);
+	if (next == NULL)
+		goto swap;
+
+	if (curr == next)
+		return next;
+
+	/* Arm the drop timer for an incoming sporadic thread. */
+	if (next->pss)
+		sporadic_resume_activity(next);
+swap:
+	/*
+	 * A non-sporadic outgoing thread is having a priority
+	 * inheritance boost, so apply an infinite time budget as we
+	 * want it to release the claimed resource asap. Otherwise,
+	 * clear the drop timer, then schedule a replenishment
+	 * operation.
+	 */
+	if (curr->pss)
+		sporadic_suspend_activity(curr);
+
+	return next;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_sporadic_vfroot;
+
+struct vfile_sched_sporadic_priv {
+	int nrthreads;
+	struct xnthread *curr;
+};
+
+struct vfile_sched_sporadic_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	int current_prio;
+	int low_prio;
+	int normal_prio;
+	xnticks_t period;
+	xnticks_t timeout;
+	xnticks_t budget;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_sporadic_ops;
+
+static struct xnvfile_snapshot vfile_sched_sporadic = {
+	.privsz = sizeof(struct vfile_sched_sporadic_priv),
+	.datasz = sizeof(struct vfile_sched_sporadic_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_sporadic_ops,
+};
+
+static int vfile_sched_sporadic_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_sporadic_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_sporadic.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_sporadic_next(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	struct vfile_sched_sporadic_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_sporadic_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_sporadic)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->current_prio = thread->cprio;
+	p->low_prio = thread->pss->param.low_prio;
+	p->normal_prio = thread->pss->param.normal_prio;
+	p->period = xnthread_get_period(thread);
+	p->budget = thread->pss->param.init_budget;
+
+	return 1;
+}
+
+static int vfile_sched_sporadic_show(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	char lpbuf[16], npbuf[16], ptbuf[16], btbuf[16];
+	struct vfile_sched_sporadic_data *p = data;
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-6s %-4s %-4s  %-10s %-10s %s\n",
+			       "CPU", "PID", "LPRI", "NPRI", "BUDGET",
+			       "PERIOD", "NAME");
+	else {
+		ksformat(lpbuf, sizeof(lpbuf), "%3d%c",
+			 p->low_prio, p->current_prio == p->low_prio ? '*' : ' ');
+
+		ksformat(npbuf, sizeof(npbuf), "%3d%c",
+			 p->normal_prio, p->current_prio == p->normal_prio ? '*' : ' ');
+
+		xntimer_format_time(p->period, ptbuf, sizeof(ptbuf));
+		xntimer_format_time(p->budget, btbuf, sizeof(btbuf));
+
+		xnvfile_printf(it,
+			       "%3u  %-6d %-4s %-4s  %-10s %-10s %s\n",
+			       p->cpu,
+			       p->pid,
+			       lpbuf,
+			       npbuf,
+			       btbuf,
+			       ptbuf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_sporadic_ops = {
+	.rewind = vfile_sched_sporadic_rewind,
+	.next = vfile_sched_sporadic_next,
+	.show = vfile_sched_sporadic_show,
+};
+
+static int xnsched_sporadic_init_vfile(struct xnsched_class *schedclass,
+				       struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name,
+			       &sched_sporadic_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_sporadic,
+				     &sched_sporadic_vfroot);
+}
+
+static void xnsched_sporadic_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_sporadic);
+	xnvfile_destroy_dir(&sched_sporadic_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_sporadic = {
+	.sched_init		=	xnsched_sporadic_init,
+	.sched_enqueue		=	xnsched_sporadic_enqueue,
+	.sched_dequeue		=	xnsched_sporadic_dequeue,
+	.sched_requeue		=	xnsched_sporadic_requeue,
+	.sched_pick		=	xnsched_sporadic_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_migrate		=	NULL,
+	.sched_chkparam		=	xnsched_sporadic_chkparam,
+	.sched_setparam		=	xnsched_sporadic_setparam,
+	.sched_getparam		=	xnsched_sporadic_getparam,
+	.sched_trackprio	=	xnsched_sporadic_trackprio,
+	.sched_protectprio	=	xnsched_sporadic_protectprio,
+	.sched_declare		=	xnsched_sporadic_declare,
+	.sched_forget		=	xnsched_sporadic_forget,
+	.sched_kick		=	NULL,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_sporadic_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_sporadic_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(3),
+	.policy			=	SCHED_SPORADIC,
+	.name			=	"pss"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_sporadic);
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c
new file mode 100644
index 0000000..ccff374
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-tp.c
@@ -0,0 +1,464 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/uapi/sched.h>
+
+static void tp_schedule_next(struct xnsched_tp *tp)
+{
+	struct xnsched_tp_window *w;
+	struct xnsched *sched;
+	int p_next, ret;
+	xnticks_t t;
+
+	for (;;) {
+		/*
+		 * Switch to the next partition. Time holes in a
+		 * global time frame are defined as partition windows
+		 * assigned to part# -1, in which case the (always
+		 * empty) idle queue will be polled for runnable
+		 * threads.  Therefore, we may assume that a window
+		 * begins immediately after the previous one ends,
+		 * which simplifies the implementation a lot.
+		 */
+		w = &tp->gps->pwins[tp->wnext];
+		p_next = w->w_part;
+		tp->tps = p_next < 0 ? &tp->idle : &tp->partitions[p_next];
+
+		/* Schedule tick to advance to the next window. */
+		tp->wnext = (tp->wnext + 1) % tp->gps->pwin_nr;
+		w = &tp->gps->pwins[tp->wnext];
+		t = tp->tf_start + w->w_offset;
+
+		ret = xntimer_start(&tp->tf_timer, t, XN_INFINITE, XN_ABSOLUTE);
+		if (ret != -ETIMEDOUT)
+			break;
+		/*
+		 * We are late, make sure to remain within the bounds
+		 * of a valid time frame before advancing to the next
+		 * window. Otherwise, fix up by advancing to the next
+		 * time frame immediately.
+		 */
+		for (;;) {
+			t = tp->tf_start + tp->gps->tf_duration;
+			if (xnclock_read_monotonic(&nkclock) > t) {
+				tp->tf_start = t;
+				tp->wnext = 0;
+			} else
+				break;
+		}
+	}
+
+	sched = container_of(tp, struct xnsched, tp);
+	xnsched_set_resched(sched);
+}
+
+static void tp_tick_handler(struct xntimer *timer)
+{
+	struct xnsched_tp *tp = container_of(timer, struct xnsched_tp, tf_timer);
+	/*
+	 * Advance beginning date of time frame by a full period if we
+	 * are processing the last window.
+	 */
+	if (tp->wnext + 1 == tp->gps->pwin_nr)
+		tp->tf_start += tp->gps->tf_duration;
+
+	tp_schedule_next(tp);
+}
+
+static void xnsched_tp_init(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+	char timer_name[XNOBJECT_NAME_LEN];
+	int n;
+
+	for (n = 0; n < CONFIG_XENO_OPT_SCHED_TP_NRPART; n++)
+		xnsched_initq(&tp->partitions[n].runnable);
+
+	xnsched_initq(&tp->idle.runnable);
+
+#ifdef CONFIG_SMP
+	ksformat(timer_name, sizeof(timer_name), "[tp-tick/%u]", sched->cpu);
+#else
+	strcpy(timer_name, "[tp-tick]");
+#endif
+	tp->tps = NULL;
+	tp->gps = NULL;
+	INIT_LIST_HEAD(&tp->threads);
+	xntimer_init(&tp->tf_timer, &nkclock, tp_tick_handler,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&tp->tf_timer, timer_name);
+}
+
+static bool xnsched_tp_setparam(struct xnthread *thread,
+				const union xnsched_policy_param *p)
+{
+	struct xnsched *sched = thread->sched;
+
+	thread->tps = &sched->tp.partitions[p->tp.ptid];
+	xnthread_clear_state(thread, XNWEAK);
+
+	return xnsched_set_effective_priority(thread, p->tp.prio);
+}
+
+static void xnsched_tp_getparam(struct xnthread *thread,
+				union xnsched_policy_param *p)
+{
+	p->tp.prio = thread->cprio;
+	p->tp.ptid = thread->tps - thread->sched->tp.partitions;
+}
+
+static void xnsched_tp_trackprio(struct xnthread *thread,
+				 const union xnsched_policy_param *p)
+{
+	/*
+	 * The assigned partition never changes internally due to PI
+	 * (see xnsched_track_policy), since this would be pretty
+	 * wrong with respect to TP scheduling: i.e. we may not allow
+	 * a thread from another partition to consume CPU time from
+	 * the current one, despite this would help enforcing PI (see
+	 * note). In any case, introducing resource contention between
+	 * threads that belong to different partitions is utterly
+	 * wrong in the first place.  Only an explicit call to
+	 * xnsched_set_policy() may change the partition assigned to a
+	 * thread. For that reason, a policy reset action only boils
+	 * down to reinstating the base priority.
+	 *
+	 * NOTE: we do allow threads from lower scheduling classes to
+	 * consume CPU time from the current window as a result of a
+	 * PI boost, since this is aimed at speeding up the release of
+	 * a synchronization object a TP thread needs.
+	 */
+	if (p) {
+		/* We should never cross partition boundaries. */
+		XENO_WARN_ON(COBALT,
+			   thread->base_class == &xnsched_class_tp &&
+			   thread->tps - thread->sched->tp.partitions != p->tp.ptid);
+		thread->cprio = p->tp.prio;
+	} else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_tp_protectprio(struct xnthread *thread, int prio)
+{
+  	if (prio > XNSCHED_TP_MAX_PRIO)
+		prio = XNSCHED_TP_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_tp_chkparam(struct xnthread *thread,
+			       const union xnsched_policy_param *p)
+{
+	struct xnsched_tp *tp = &thread->sched->tp;
+
+	if (p->tp.ptid < 0 ||
+		p->tp.ptid >= CONFIG_XENO_OPT_SCHED_TP_NRPART)
+		return -EINVAL;
+
+	if (tp->gps == NULL ||
+	    p->tp.prio < XNSCHED_TP_MIN_PRIO ||
+	    p->tp.prio > XNSCHED_TP_MAX_PRIO)
+		return -EINVAL;
+
+	return 0;
+}
+
+static int xnsched_tp_declare(struct xnthread *thread,
+			      const union xnsched_policy_param *p)
+{
+	struct xnsched *sched = thread->sched;
+
+	list_add_tail(&thread->tp_link, &sched->tp.threads);
+
+	return 0;
+}
+
+static void xnsched_tp_forget(struct xnthread *thread)
+{
+	list_del(&thread->tp_link);
+	thread->tps = NULL;
+}
+
+static void xnsched_tp_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->tps->runnable, thread);
+}
+
+static void xnsched_tp_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->tps->runnable, thread);
+}
+
+static void xnsched_tp_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->tps->runnable, thread);
+}
+
+static struct xnthread *xnsched_tp_pick(struct xnsched *sched)
+{
+	/* Never pick a thread if we don't schedule partitions. */
+	if (!xntimer_running_p(&sched->tp.tf_timer))
+		return NULL;
+
+	return xnsched_getq(&sched->tp.tps->runnable);
+}
+
+static void xnsched_tp_migrate(struct xnthread *thread, struct xnsched *sched)
+{
+	union xnsched_policy_param param;
+	/*
+	 * Since our partition schedule is a per-scheduler property,
+	 * it cannot apply to a thread that moves to another CPU
+	 * anymore. So we upgrade that thread to the RT class when a
+	 * CPU migration occurs. A subsequent call to
+	 * __xnthread_set_schedparam() may move it back to TP
+	 * scheduling, with a partition assignment that fits the
+	 * remote CPU's partition schedule.
+	 */
+	param.rt.prio = thread->cprio;
+	__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+}
+
+void xnsched_tp_start_schedule(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+
+	if (tp->gps == NULL)
+		return;
+
+	tp->wnext = 0;
+	tp->tf_start = xnclock_read_monotonic(&nkclock);
+	tp_schedule_next(tp);
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_start_schedule);
+
+void xnsched_tp_stop_schedule(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+
+	if (tp->gps)
+		xntimer_stop(&tp->tf_timer);
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_stop_schedule);
+
+struct xnsched_tp_schedule *
+xnsched_tp_set_schedule(struct xnsched *sched,
+			struct xnsched_tp_schedule *gps)
+{
+	struct xnsched_tp_schedule *old_gps;
+	struct xnsched_tp *tp = &sched->tp;
+	union xnsched_policy_param param;
+	struct xnthread *thread, *tmp;
+
+	XENO_BUG_ON(COBALT, gps != NULL &&
+		   (gps->pwin_nr <= 0 || gps->pwins[0].w_offset != 0));
+
+	xnsched_tp_stop_schedule(sched);
+
+	/*
+	 * Move all TP threads on this scheduler to the RT class,
+	 * until we call __xnthread_set_schedparam() for them again.
+	 */
+	if (list_empty(&tp->threads))
+		goto done;
+
+	list_for_each_entry_safe(thread, tmp, &tp->threads, tp_link) {
+		param.rt.prio = thread->cprio;
+		__xnthread_set_schedparam(thread, &xnsched_class_rt, &param);
+	}
+done:
+	old_gps = tp->gps;
+	tp->gps = gps;
+
+	return old_gps;
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_set_schedule);
+
+struct xnsched_tp_schedule *
+xnsched_tp_get_schedule(struct xnsched *sched)
+{
+	struct xnsched_tp_schedule *gps;
+
+	gps = sched->tp.gps;
+	if (gps == NULL)
+		return NULL;
+
+	atomic_inc(&gps->refcount);
+
+	return gps;
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_get_schedule);
+
+void xnsched_tp_put_schedule(struct xnsched_tp_schedule *gps)
+{
+	if (atomic_dec_and_test(&gps->refcount))
+		xnfree(gps);
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_put_schedule);
+
+int xnsched_tp_get_partition(struct xnsched *sched)
+{
+	struct xnsched_tp *tp = &sched->tp;
+
+	if (tp->tps == NULL || tp->tps == &tp->idle)
+		return -1;
+
+	return tp->tps - tp->partitions;
+}
+EXPORT_SYMBOL_GPL(xnsched_tp_get_partition);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_tp_vfroot;
+
+struct vfile_sched_tp_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_tp_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	int prio;
+	int ptid;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_tp_ops;
+
+static struct xnvfile_snapshot vfile_sched_tp = {
+	.privsz = sizeof(struct vfile_sched_tp_priv),
+	.datasz = sizeof(struct vfile_sched_tp_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_tp_ops,
+};
+
+static int vfile_sched_tp_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_tp_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_tp.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_tp_next(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_tp_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_tp_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_tp)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->ptid = thread->tps - thread->sched->tp.partitions;
+	p->prio = thread->cprio;
+
+	return 1;
+}
+
+static int vfile_sched_tp_show(struct xnvfile_snapshot_iterator *it,
+			       void *data)
+{
+	struct vfile_sched_tp_data *p = data;
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-4s %-4s  %s\n",
+			       "CPU", "PID", "PTID", "PRI", "NAME");
+	else
+		xnvfile_printf(it, "%3u  %-6d %-4d %-4d  %s\n",
+			       p->cpu,
+			       p->pid,
+			       p->ptid,
+			       p->prio,
+			       p->name);
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_tp_ops = {
+	.rewind = vfile_sched_tp_rewind,
+	.next = vfile_sched_tp_next,
+	.show = vfile_sched_tp_show,
+};
+
+static int xnsched_tp_init_vfile(struct xnsched_class *schedclass,
+				 struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_tp_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_tp,
+				     &sched_tp_vfroot);
+}
+
+static void xnsched_tp_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_tp);
+	xnvfile_destroy_dir(&sched_tp_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_tp = {
+	.sched_init		=	xnsched_tp_init,
+	.sched_enqueue		=	xnsched_tp_enqueue,
+	.sched_dequeue		=	xnsched_tp_dequeue,
+	.sched_requeue		=	xnsched_tp_requeue,
+	.sched_pick		=	xnsched_tp_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_migrate		=	xnsched_tp_migrate,
+	.sched_chkparam		=	xnsched_tp_chkparam,
+	.sched_setparam		=	xnsched_tp_setparam,
+	.sched_getparam		=	xnsched_tp_getparam,
+	.sched_trackprio	=	xnsched_tp_trackprio,
+	.sched_protectprio	=	xnsched_tp_protectprio,
+	.sched_declare		=	xnsched_tp_declare,
+	.sched_forget		=	xnsched_tp_forget,
+	.sched_kick		=	NULL,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_tp_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_tp_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(2),
+	.policy			=	SCHED_TP,
+	.name			=	"tp"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_tp);
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c
new file mode 100644
index 0000000..dd6a78e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched-weak.c
@@ -0,0 +1,224 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <cobalt/kernel/sched.h>
+#include <cobalt/uapi/sched.h>
+
+static void xnsched_weak_init(struct xnsched *sched)
+{
+	xnsched_initq(&sched->weak.runnable);
+}
+
+static void xnsched_weak_requeue(struct xnthread *thread)
+{
+	xnsched_addq(&thread->sched->weak.runnable, thread);
+}
+
+static void xnsched_weak_enqueue(struct xnthread *thread)
+{
+	xnsched_addq_tail(&thread->sched->weak.runnable, thread);
+}
+
+static void xnsched_weak_dequeue(struct xnthread *thread)
+{
+	xnsched_delq(&thread->sched->weak.runnable, thread);
+}
+
+static struct xnthread *xnsched_weak_pick(struct xnsched *sched)
+{
+	return xnsched_getq(&sched->weak.runnable);
+}
+
+static bool xnsched_weak_setparam(struct xnthread *thread,
+				  const union xnsched_policy_param *p)
+{
+	if (!xnthread_test_state(thread, XNBOOST))
+		xnthread_set_state(thread, XNWEAK);
+
+	return xnsched_set_effective_priority(thread, p->weak.prio);
+}
+
+static void xnsched_weak_getparam(struct xnthread *thread,
+				  union xnsched_policy_param *p)
+{
+	p->weak.prio = thread->cprio;
+}
+
+static void xnsched_weak_trackprio(struct xnthread *thread,
+				   const union xnsched_policy_param *p)
+{
+	if (p)
+		thread->cprio = p->weak.prio;
+	else
+		thread->cprio = thread->bprio;
+}
+
+static void xnsched_weak_protectprio(struct xnthread *thread, int prio)
+{
+  	if (prio > XNSCHED_WEAK_MAX_PRIO)
+		prio = XNSCHED_WEAK_MAX_PRIO;
+
+	thread->cprio = prio;
+}
+
+static int xnsched_weak_chkparam(struct xnthread *thread,
+				 const union xnsched_policy_param *p)
+{
+	if (p->weak.prio < XNSCHED_WEAK_MIN_PRIO ||
+	    p->weak.prio > XNSCHED_WEAK_MAX_PRIO)
+		return -EINVAL;
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+struct xnvfile_directory sched_weak_vfroot;
+
+struct vfile_sched_weak_priv {
+	struct xnthread *curr;
+};
+
+struct vfile_sched_weak_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_sched_weak_ops;
+
+static struct xnvfile_snapshot vfile_sched_weak = {
+	.privsz = sizeof(struct vfile_sched_weak_priv),
+	.datasz = sizeof(struct vfile_sched_weak_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_sched_weak_ops,
+};
+
+static int vfile_sched_weak_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it);
+	int nrthreads = xnsched_class_weak.nthreads;
+
+	if (nrthreads == 0)
+		return -ESRCH;
+
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+
+	return nrthreads;
+}
+
+static int vfile_sched_weak_next(struct xnvfile_snapshot_iterator *it,
+				 void *data)
+{
+	struct vfile_sched_weak_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_sched_weak_data *p = data;
+	struct xnthread *thread;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	if (thread->base_class != &xnsched_class_weak)
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->cprio = thread->cprio;
+
+	return 1;
+}
+
+static int vfile_sched_weak_show(struct xnvfile_snapshot_iterator *it,
+				 void *data)
+{
+	struct vfile_sched_weak_data *p = data;
+	char pribuf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it, "%-3s  %-6s %-4s %s\n",
+			       "CPU", "PID", "PRI", "NAME");
+	else {
+		ksformat(pribuf, sizeof(pribuf), "%3d", p->cprio);
+		xnvfile_printf(it, "%3u  %-6d %-4s %s\n",
+			       p->cpu,
+			       p->pid,
+			       pribuf,
+			       p->name);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_sched_weak_ops = {
+	.rewind = vfile_sched_weak_rewind,
+	.next = vfile_sched_weak_next,
+	.show = vfile_sched_weak_show,
+};
+
+static int xnsched_weak_init_vfile(struct xnsched_class *schedclass,
+				   struct xnvfile_directory *vfroot)
+{
+	int ret;
+
+	ret = xnvfile_init_dir(schedclass->name, &sched_weak_vfroot, vfroot);
+	if (ret)
+		return ret;
+
+	return xnvfile_init_snapshot("threads", &vfile_sched_weak,
+				     &sched_weak_vfroot);
+}
+
+static void xnsched_weak_cleanup_vfile(struct xnsched_class *schedclass)
+{
+	xnvfile_destroy_snapshot(&vfile_sched_weak);
+	xnvfile_destroy_dir(&sched_weak_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct xnsched_class xnsched_class_weak = {
+	.sched_init		=	xnsched_weak_init,
+	.sched_enqueue		=	xnsched_weak_enqueue,
+	.sched_dequeue		=	xnsched_weak_dequeue,
+	.sched_requeue		=	xnsched_weak_requeue,
+	.sched_pick		=	xnsched_weak_pick,
+	.sched_tick		=	NULL,
+	.sched_rotate		=	NULL,
+	.sched_forget		=	NULL,
+	.sched_kick		=	NULL,
+	.sched_chkparam		=	xnsched_weak_chkparam,
+	.sched_setparam		=	xnsched_weak_setparam,
+	.sched_trackprio	=	xnsched_weak_trackprio,
+	.sched_protectprio	=	xnsched_weak_protectprio,
+	.sched_getparam		=	xnsched_weak_getparam,
+#ifdef CONFIG_XENO_OPT_VFILE
+	.sched_init_vfile	=	xnsched_weak_init_vfile,
+	.sched_cleanup_vfile	=	xnsched_weak_cleanup_vfile,
+#endif
+	.weight			=	XNSCHED_CLASS_WEIGHT(1),
+	.policy			=	SCHED_WEAK,
+	.name			=	"weak"
+};
+EXPORT_SYMBOL_GPL(xnsched_class_weak);
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/sched.c b/kernel/xenomai-v3.2.4/kernel/cobalt/sched.c
new file mode 100644
index 0000000..aa65fd7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/sched.c
@@ -0,0 +1,1493 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/signal.h>
+#include <linux/wait.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/arith.h>
+#include <cobalt/uapi/signal.h>
+#include <pipeline/sched.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_sched Thread scheduling control
+ * @{
+ */
+
+DEFINE_PER_CPU(struct xnsched, nksched);
+EXPORT_PER_CPU_SYMBOL_GPL(nksched);
+
+cpumask_t cobalt_cpu_affinity = CPU_MASK_ALL;
+EXPORT_SYMBOL_GPL(cobalt_cpu_affinity);
+
+LIST_HEAD(nkthreadq);
+
+int cobalt_nrthreads;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct xnvfile_rev_tag nkthreadlist_tag;
+#endif
+
+static struct xnsched_class *xnsched_class_highest;
+
+#define for_each_xnsched_class(p) \
+   for (p = xnsched_class_highest; p; p = p->next)
+
+static void xnsched_register_class(struct xnsched_class *sched_class)
+{
+	sched_class->next = xnsched_class_highest;
+	xnsched_class_highest = sched_class;
+
+	/*
+	 * Classes shall be registered by increasing priority order,
+	 * idle first and up.
+	 */
+	XENO_BUG_ON(COBALT, sched_class->next &&
+		   sched_class->next->weight > sched_class->weight);
+
+	printk(XENO_INFO "scheduling class %s registered.\n", sched_class->name);
+}
+
+void xnsched_register_classes(void)
+{
+	xnsched_register_class(&xnsched_class_idle);
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	xnsched_register_class(&xnsched_class_weak);
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_TP
+	xnsched_register_class(&xnsched_class_tp);
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_SPORADIC
+	xnsched_register_class(&xnsched_class_sporadic);
+#endif
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+	xnsched_register_class(&xnsched_class_quota);
+#endif
+	xnsched_register_class(&xnsched_class_rt);
+}
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+
+static unsigned long wd_timeout_arg = CONFIG_XENO_OPT_WATCHDOG_TIMEOUT;
+module_param_named(watchdog_timeout, wd_timeout_arg, ulong, 0644);
+
+static inline xnticks_t get_watchdog_timeout(void)
+{
+	return wd_timeout_arg * 1000000000ULL;
+}
+
+/**
+ * @internal
+ * @fn void watchdog_handler(struct xntimer *timer)
+ * @brief Process watchdog ticks.
+ *
+ * This internal routine handles incoming watchdog triggers to detect
+ * software lockups. It forces the offending thread to stop
+ * monopolizing the CPU, either by kicking it out of primary mode if
+ * running in user space, or cancelling it if kernel-based.
+ *
+ * @coretags{coreirq-only, atomic-entry}
+ */
+static void watchdog_handler(struct xntimer *timer)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xnthread *curr = sched->curr;
+
+	/*
+	 * CAUTION: The watchdog tick might have been delayed while we
+	 * were busy switching the CPU to secondary mode at the
+	 * trigger date eventually. Make sure that we are not about to
+	 * kick the incoming root thread.
+	 */
+	if (xnthread_test_state(curr, XNROOT))
+ 		return;
+
+	trace_cobalt_watchdog_signal(curr);
+
+	if (xnthread_test_state(curr, XNUSER)) {
+		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
+		       "'%s' signaled\n", xnsched_cpu(sched), curr->name);
+		xnthread_call_mayday(curr, SIGDEBUG_WATCHDOG);
+	} else {
+		printk(XENO_WARNING "watchdog triggered on CPU #%d -- runaway thread "
+		       "'%s' canceled\n", xnsched_cpu(sched), curr->name);
+		/*
+		 * On behalf on an IRQ handler, xnthread_cancel()
+		 * would go half way cancelling the preempted
+		 * thread. Therefore we manually raise XNKICKED to
+		 * cause the next call to xnthread_suspend() to return
+		 * early in XNBREAK condition, and XNCANCELD so that
+		 * @thread exits next time it invokes
+		 * xnthread_test_cancel().
+		 */
+		xnthread_set_info(curr, XNKICKED|XNCANCELD);
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+
+static void roundrobin_handler(struct xntimer *timer)
+{
+	struct xnsched *sched = container_of(timer, struct xnsched, rrbtimer);
+	xnsched_tick(sched);
+}
+
+static void xnsched_init(struct xnsched *sched, int cpu)
+{
+	char rrbtimer_name[XNOBJECT_NAME_LEN];
+	char htimer_name[XNOBJECT_NAME_LEN];
+	char root_name[XNOBJECT_NAME_LEN];
+	union xnsched_policy_param param;
+	struct xnthread_init_attr attr;
+	struct xnsched_class *p;
+
+#ifdef CONFIG_SMP
+	sched->cpu = cpu;
+	ksformat(htimer_name, sizeof(htimer_name), "[host-timer/%u]", cpu);
+	ksformat(rrbtimer_name, sizeof(rrbtimer_name), "[rrb-timer/%u]", cpu);
+	ksformat(root_name, sizeof(root_name), "ROOT/%u", cpu);
+	cpumask_clear(&sched->resched);
+#else
+	strcpy(htimer_name, "[host-timer]");
+	strcpy(rrbtimer_name, "[rrb-timer]");
+	strcpy(root_name, "ROOT");
+#endif
+	for_each_xnsched_class(p) {
+		if (p->sched_init)
+			p->sched_init(sched);
+	}
+
+	sched->status = 0;
+	sched->lflags = XNIDLE;
+	sched->inesting = 0;
+	sched->curr = &sched->rootcb;
+
+	attr.flags = XNROOT | XNFPU;
+	attr.name = root_name;
+	attr.personality = &xenomai_personality;
+	attr.affinity = *cpumask_of(cpu);
+	param.idle.prio = XNSCHED_IDLE_PRIO;
+
+	__xnthread_init(&sched->rootcb, &attr,
+			sched, &xnsched_class_idle, &param);
+
+	/*
+	 * No direct handler here since the host timer processing is
+	 * postponed to xnintr_irq_handler(), as part of the interrupt
+	 * exit code.
+	 */
+	xntimer_init(&sched->htimer, &nkclock, NULL,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_priority(&sched->htimer, XNTIMER_LOPRIO);
+	xntimer_set_name(&sched->htimer, htimer_name);
+	xntimer_init(&sched->rrbtimer, &nkclock, roundrobin_handler,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&sched->rrbtimer, rrbtimer_name);
+	xntimer_set_priority(&sched->rrbtimer, XNTIMER_LOPRIO);
+
+	xnstat_exectime_set_current(sched, &sched->rootcb.stat.account);
+#ifdef CONFIG_XENO_ARCH_FPU
+	sched->fpuholder = &sched->rootcb;
+#endif /* CONFIG_XENO_ARCH_FPU */
+
+	pipeline_init_root_tcb(&sched->rootcb);
+	list_add_tail(&sched->rootcb.glink, &nkthreadq);
+	cobalt_nrthreads++;
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_init(&sched->wdtimer, &nkclock, watchdog_handler,
+		     sched, XNTIMER_IGRAVITY);
+	xntimer_set_name(&sched->wdtimer, "[watchdog]");
+	xntimer_set_priority(&sched->wdtimer, XNTIMER_LOPRIO);
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+}
+
+void xnsched_init_all(void)
+{
+	struct xnsched *sched;
+	int cpu;
+
+	for_each_online_cpu(cpu) {
+		sched = &per_cpu(nksched, cpu);
+		xnsched_init(sched, cpu);
+	}
+
+	pipeline_request_resched_ipi(__xnsched_run_handler);
+}
+
+static void xnsched_destroy(struct xnsched *sched)
+{
+	xntimer_destroy(&sched->htimer);
+	xntimer_destroy(&sched->rrbtimer);
+	xntimer_destroy(&sched->rootcb.ptimer);
+	xntimer_destroy(&sched->rootcb.rtimer);
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_destroy(&sched->wdtimer);
+#endif /* CONFIG_XENO_OPT_WATCHDOG */
+}
+
+void xnsched_destroy_all(void)
+{
+	struct xnthread *thread, *tmp;
+	struct xnsched *sched;
+	int cpu;
+	spl_t s;
+
+	pipeline_free_resched_ipi();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/* NOTE: &nkthreadq can't be empty (root thread(s)). */
+	list_for_each_entry_safe(thread, tmp, &nkthreadq, glink) {
+		if (!xnthread_test_state(thread, XNROOT))
+			xnthread_cancel(thread);
+	}
+
+	xnsched_run();
+
+	for_each_online_cpu(cpu) {
+		sched = xnsched_struct(cpu);
+		xnsched_destroy(sched);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+static inline void set_thread_running(struct xnsched *sched,
+				      struct xnthread *thread)
+{
+	xnthread_clear_state(thread, XNREADY);
+	if (xnthread_test_state(thread, XNRRB))
+		xntimer_start(&sched->rrbtimer,
+			      thread->rrperiod, XN_INFINITE, XN_RELATIVE);
+	else
+		xntimer_stop(&sched->rrbtimer);
+}
+
+/* Must be called with nklock locked, interrupts off. */
+struct xnthread *xnsched_pick_next(struct xnsched *sched)
+{
+	struct xnsched_class *p __maybe_unused;
+	struct xnthread *curr = sched->curr;
+	struct xnthread *thread;
+
+	if (!xnthread_test_state(curr, XNTHREAD_BLOCK_BITS | XNZOMBIE)) {
+		/*
+		 * Do not preempt the current thread if it holds the
+		 * scheduler lock.
+		 */
+		if (curr->lock_count > 0) {
+			xnsched_set_self_resched(sched);
+			return curr;
+		}
+		/*
+		 * Push the current thread back to the run queue of
+		 * the scheduling class it belongs to, if not yet
+		 * linked to it (XNREADY tells us if it is).
+		 */
+		if (!xnthread_test_state(curr, XNREADY)) {
+			xnsched_requeue(curr);
+			xnthread_set_state(curr, XNREADY);
+		}
+	}
+
+	/*
+	 * Find the runnable thread having the highest priority among
+	 * all scheduling classes, scanned by decreasing priority.
+	 */
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+	for_each_xnsched_class(p) {
+		thread = p->sched_pick(sched);
+		if (thread) {
+			set_thread_running(sched, thread);
+			return thread;
+		}
+	}
+
+	return NULL; /* Never executed because of the idle class. */
+#else /* !CONFIG_XENO_OPT_SCHED_CLASSES */
+	thread = xnsched_rt_pick(sched);
+	if (unlikely(thread == NULL))
+		thread = &sched->rootcb;
+
+	set_thread_running(sched, thread);
+
+	return thread;
+#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
+}
+
+void xnsched_lock(void)
+{
+	struct xnsched *sched = xnsched_current();
+	/* See comments in xnsched_run(), ___xnsched_run(). */
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	if (sched->lflags & XNINIRQ)
+		return;
+
+	/*
+	 * CAUTION: The fast xnthread_current() accessor carries the
+	 * relevant lock nesting count only if current runs in primary
+	 * mode. Otherwise, if the caller is unknown or relaxed
+	 * Xenomai-wise, then we fall back to the root thread on the
+	 * current scheduler, which must be done with IRQs off.
+	 * Either way, we don't need to grab the super lock.
+	 */
+	XENO_WARN_ON_ONCE(COBALT, (curr->state & XNROOT) &&
+			  !hard_irqs_disabled());
+
+	curr->lock_count++;
+}
+EXPORT_SYMBOL_GPL(xnsched_lock);
+
+void xnsched_unlock(void)
+{
+	struct xnsched *sched = xnsched_current();
+	struct xnthread *curr = READ_ONCE(sched->curr);
+
+	XENO_WARN_ON_ONCE(COBALT, (curr->state & XNROOT) &&
+			  !hard_irqs_disabled());
+
+	if (sched->lflags & XNINIRQ)
+		return;
+
+	if (!XENO_ASSERT(COBALT, curr->lock_count > 0))
+		return;
+
+	if (--curr->lock_count == 0) {
+		xnthread_clear_localinfo(curr, XNLBALERT);
+		xnsched_run();
+	}
+}
+EXPORT_SYMBOL_GPL(xnsched_unlock);
+
+/* nklock locked, interrupts off. */
+void xnsched_putback(struct xnthread *thread)
+{
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_dequeue(thread);
+	else
+		xnthread_set_state(thread, XNREADY);
+
+	xnsched_enqueue(thread);
+	xnsched_set_resched(thread->sched);
+}
+
+/* nklock locked, interrupts off. */
+int xnsched_set_policy(struct xnthread *thread,
+		       struct xnsched_class *sched_class,
+		       const union xnsched_policy_param *p)
+{
+	struct xnsched_class *orig_effective_class __maybe_unused;
+	bool effective;
+	int ret;
+
+	ret = xnsched_chkparam(sched_class, thread, p);
+	if (ret)
+		return ret;
+
+	/*
+	 * Declaring a thread to a new scheduling class may fail, so
+	 * we do that early, while the thread is still a member of the
+	 * previous class. However, this also means that the
+	 * declaration callback shall not do anything that might
+	 * affect the previous class (such as touching thread->rlink
+	 * for instance).
+	 */
+	if (sched_class != thread->base_class) {
+		ret = xnsched_declare(sched_class, thread, p);
+		if (ret)
+			return ret;
+	}
+
+	/*
+	 * As a special case, we may be called from __xnthread_init()
+	 * with no previous scheduling class at all.
+	 */
+	if (likely(thread->base_class != NULL)) {
+		if (xnthread_test_state(thread, XNREADY))
+			xnsched_dequeue(thread);
+
+		if (sched_class != thread->base_class)
+			xnsched_forget(thread);
+	}
+
+	/*
+	 * Set the base and effective scheduling parameters. However,
+	 * xnsched_setparam() will deny lowering the effective
+	 * priority if a boost is undergoing, only recording the
+	 * change into the base priority field in such situation.
+	 */
+	thread->base_class = sched_class;
+	/*
+	 * Referring to the effective class from a setparam() handler
+	 * is wrong: make sure to break if so.
+	 */
+	if (XENO_DEBUG(COBALT)) {
+		orig_effective_class = thread->sched_class;
+		thread->sched_class = NULL;
+	}
+
+	/*
+	 * This is the ONLY place where calling xnsched_setparam() is
+	 * legit, sane and safe.
+	 */
+	effective = xnsched_setparam(thread, p);
+	if (effective) {
+		thread->sched_class = sched_class;
+		thread->wprio = xnsched_calc_wprio(sched_class, thread->cprio);
+	} else if (XENO_DEBUG(COBALT))
+		thread->sched_class = orig_effective_class;
+
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_enqueue(thread);
+
+	/*
+	 * Make sure not to raise XNSCHED when setting up the root
+	 * thread, so that we can't start rescheduling on interrupt
+	 * exit before all CPUs have their runqueue fully
+	 * built. Filtering on XNROOT here is correct because the root
+	 * thread enters the idle class once as part of the runqueue
+	 * setup process and never leaves it afterwards.
+	 */
+	if (!xnthread_test_state(thread, XNDORMANT|XNROOT))
+		xnsched_set_resched(thread->sched);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsched_set_policy);
+
+/* nklock locked, interrupts off. */
+bool xnsched_set_effective_priority(struct xnthread *thread, int prio)
+{
+	int wprio = xnsched_calc_wprio(thread->base_class, prio);
+
+	thread->bprio = prio;
+	if (wprio == thread->wprio)
+		return true;
+
+	/*
+	 * We may not lower the effective/current priority of a
+	 * boosted thread when changing the base scheduling
+	 * parameters. Only xnsched_track_policy() and
+	 * xnsched_protect_priority() may do so when dealing with PI
+	 * and PP synchs resp.
+	 */
+	if (wprio < thread->wprio && xnthread_test_state(thread, XNBOOST))
+		return false;
+
+	thread->cprio = prio;
+
+	trace_cobalt_thread_set_current_prio(thread);
+
+	return true;
+}
+
+/* nklock locked, interrupts off. */
+void xnsched_track_policy(struct xnthread *thread,
+			  struct xnthread *target)
+{
+	union xnsched_policy_param param;
+
+	/*
+	 * Inherit (or reset) the effective scheduling class and
+	 * priority of a thread. Unlike xnsched_set_policy(), this
+	 * routine is allowed to lower the weighted priority with no
+	 * restriction, even if a boost is undergoing.
+	 */
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_dequeue(thread);
+	/*
+	 * Self-targeting means to reset the scheduling policy and
+	 * parameters to the base settings. Otherwise, make thread
+	 * inherit the scheduling parameters from target.
+	 */
+	if (target == thread) {
+		thread->sched_class = thread->base_class;
+		xnsched_trackprio(thread, NULL);
+		/*
+		 * Per SuSv2, resetting the base scheduling parameters
+		 * should not move the thread to the tail of its
+		 * priority group.
+		 */
+		if (xnthread_test_state(thread, XNREADY))
+			xnsched_requeue(thread);
+
+	} else {
+		xnsched_getparam(target, &param);
+		thread->sched_class = target->sched_class;
+		xnsched_trackprio(thread, &param);
+		if (xnthread_test_state(thread, XNREADY))
+			xnsched_enqueue(thread);
+	}
+
+	trace_cobalt_thread_set_current_prio(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+/* nklock locked, interrupts off. */
+void xnsched_protect_priority(struct xnthread *thread, int prio)
+{
+	/*
+	 * Apply a PP boost by changing the effective priority of a
+	 * thread, forcing it to the RT class. Like
+	 * xnsched_track_policy(), this routine is allowed to lower
+	 * the weighted priority with no restriction, even if a boost
+	 * is undergoing.
+	 *
+	 * This routine only deals with active boosts, resetting the
+	 * base priority when leaving a PP boost is obtained by a call
+	 * to xnsched_track_policy().
+	 */
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_dequeue(thread);
+
+	thread->sched_class = &xnsched_class_rt;
+	xnsched_protectprio(thread, prio);
+
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_enqueue(thread);
+
+	trace_cobalt_thread_set_current_prio(thread);
+
+	xnsched_set_resched(thread->sched);
+}
+
+static void migrate_thread(struct xnthread *thread, struct xnsched *sched)
+{
+	struct xnsched_class *sched_class = thread->sched_class;
+
+	if (xnthread_test_state(thread, XNREADY)) {
+		xnsched_dequeue(thread);
+		xnthread_clear_state(thread, XNREADY);
+	}
+
+	if (sched_class->sched_migrate)
+		sched_class->sched_migrate(thread, sched);
+	/*
+	 * WARNING: the scheduling class may have just changed as a
+	 * result of calling the per-class migration hook.
+	 */
+	thread->sched = sched;
+}
+
+/*
+ * nklock locked, interrupts off. thread must be runnable.
+ */
+void xnsched_migrate(struct xnthread *thread, struct xnsched *sched)
+{
+	xnsched_set_resched(thread->sched);
+	migrate_thread(thread, sched);
+	/* Move thread to the remote run queue. */
+	xnsched_putback(thread);
+}
+
+/*
+ * nklock locked, interrupts off. Thread may be blocked.
+ */
+void xnsched_migrate_passive(struct xnthread *thread, struct xnsched *sched)
+{
+	struct xnsched *last_sched = thread->sched;
+
+	migrate_thread(thread, sched);
+
+	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS)) {
+		xnsched_requeue(thread);
+		xnthread_set_state(thread, XNREADY);
+		xnsched_set_resched(last_sched);
+	}
+}
+
+#ifdef CONFIG_XENO_OPT_SCALABLE_SCHED
+
+void xnsched_initq(struct xnsched_mlq *q)
+{
+	int prio;
+
+	q->elems = 0;
+	bitmap_zero(q->prio_map, XNSCHED_MLQ_LEVELS);
+
+	for (prio = 0; prio < XNSCHED_MLQ_LEVELS; prio++)
+		INIT_LIST_HEAD(q->heads + prio);
+}
+
+static inline int get_qindex(struct xnsched_mlq *q, int prio)
+{
+	XENO_BUG_ON(COBALT, prio < 0 || prio >= XNSCHED_MLQ_LEVELS);
+	/*
+	 * BIG FAT WARNING: We need to rescale the priority level to a
+	 * 0-based range. We use find_first_bit() to scan the bitmap
+	 * which is a bit scan forward operation. Therefore, the lower
+	 * the index value, the higher the priority (since least
+	 * significant bits will be found first when scanning the
+	 * bitmap).
+	 */
+	return XNSCHED_MLQ_LEVELS - prio - 1;
+}
+
+static struct list_head *add_q(struct xnsched_mlq *q, int prio)
+{
+	struct list_head *head;
+	int idx;
+
+	idx = get_qindex(q, prio);
+	head = q->heads + idx;
+	q->elems++;
+
+	/* New item is not linked yet. */
+	if (list_empty(head))
+		__set_bit(idx, q->prio_map);
+
+	return head;
+}
+
+void xnsched_addq(struct xnsched_mlq *q, struct xnthread *thread)
+{
+	struct list_head *head = add_q(q, thread->cprio);
+	list_add(&thread->rlink, head);
+}
+
+void xnsched_addq_tail(struct xnsched_mlq *q, struct xnthread *thread)
+{
+	struct list_head *head = add_q(q, thread->cprio);
+	list_add_tail(&thread->rlink, head);
+}
+
+static void del_q(struct xnsched_mlq *q,
+		  struct list_head *entry, int idx)
+{
+	struct list_head *head = q->heads + idx;
+
+	list_del(entry);
+	q->elems--;
+
+	if (list_empty(head))
+		__clear_bit(idx, q->prio_map);
+}
+
+void xnsched_delq(struct xnsched_mlq *q, struct xnthread *thread)
+{
+	del_q(q, &thread->rlink, get_qindex(q, thread->cprio));
+}
+
+struct xnthread *xnsched_getq(struct xnsched_mlq *q)
+{
+	struct xnthread *thread;
+	struct list_head *head;
+	int idx;
+
+	if (q->elems == 0)
+		return NULL;
+
+	idx = xnsched_weightq(q);
+	head = q->heads + idx;
+	XENO_BUG_ON(COBALT, list_empty(head));
+	thread = list_first_entry(head, struct xnthread, rlink);
+	del_q(q, &thread->rlink, idx);
+
+	return thread;
+}
+
+struct xnthread *xnsched_findq(struct xnsched_mlq *q, int prio)
+{
+	struct list_head *head;
+	int idx;
+
+	idx = get_qindex(q, prio);
+	head = q->heads + idx;
+	if (list_empty(head))
+		return NULL;
+
+	return list_first_entry(head, struct xnthread, rlink);
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	struct xnsched_mlq *q = &sched->rt.runnable;
+	struct xnthread *thread;
+	struct list_head *head;
+	int idx;
+
+	if (q->elems == 0)
+		return NULL;
+
+	/*
+	 * Some scheduling policies may be implemented as variants of
+	 * the core SCHED_FIFO class, sharing its runqueue
+	 * (e.g. SCHED_SPORADIC, SCHED_QUOTA). This means that we have
+	 * to do some cascading to call the right pick handler
+	 * eventually.
+	 */
+	idx = xnsched_weightq(q);
+	head = q->heads + idx;
+	XENO_BUG_ON(COBALT, list_empty(head));
+
+	/*
+	 * The active class (i.e. ->sched_class) is the one currently
+	 * queuing the thread, reflecting any priority boost due to
+	 * PI.
+	 */
+	thread = list_first_entry(head, struct xnthread, rlink);
+	if (unlikely(thread->sched_class != &xnsched_class_rt))
+		return thread->sched_class->sched_pick(sched);
+
+	del_q(q, &thread->rlink, idx);
+
+	return thread;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
+
+#else /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+struct xnthread *xnsched_findq(struct list_head *q, int prio)
+{
+	struct xnthread *thread;
+
+	if (list_empty(q))
+		return NULL;
+
+	/* Find thread leading a priority group. */
+	list_for_each_entry(thread, q, rlink) {
+		if (prio == thread->cprio)
+			return thread;
+	}
+
+	return NULL;
+}
+
+#ifdef CONFIG_XENO_OPT_SCHED_CLASSES
+
+struct xnthread *xnsched_rt_pick(struct xnsched *sched)
+{
+	struct list_head *q = &sched->rt.runnable;
+	struct xnthread *thread;
+
+	if (list_empty(q))
+		return NULL;
+
+	thread = list_first_entry(q, struct xnthread, rlink);
+	if (unlikely(thread->sched_class != &xnsched_class_rt))
+		return thread->sched_class->sched_pick(sched);
+
+	list_del(&thread->rlink);
+
+	return thread;
+}
+
+#endif /* CONFIG_XENO_OPT_SCHED_CLASSES */
+
+#endif /* !CONFIG_XENO_OPT_SCALABLE_SCHED */
+
+/**
+ * @fn int xnsched_run(void)
+ * @brief The rescheduling procedure.
+ *
+ * This is the central rescheduling routine which should be called to
+ * validate and apply changes which have previously been made to the
+ * nucleus scheduling state, such as suspending, resuming or changing
+ * the priority of threads.  This call performs context switches as
+ * needed. xnsched_run() schedules out the current thread if:
+ *
+ * - the current thread is about to block.
+ * - a runnable thread from a higher priority scheduling class is
+ * waiting for the CPU.
+ * - the current thread does not lead the runnable threads from its
+ * own scheduling class (i.e. round-robin).
+ *
+ * The Cobalt core implements a lazy rescheduling scheme so that most
+ * of the services affecting the threads state MUST be followed by a
+ * call to the rescheduling procedure for the new scheduling state to
+ * be applied.
+ *
+ * In other words, multiple changes on the scheduler state can be done
+ * in a row, waking threads up, blocking others, without being
+ * immediately translated into the corresponding context switches.
+ * When all changes have been applied, xnsched_run() should be called
+ * for considering those changes, and possibly switching context.
+ *
+ * As a notable exception to the previous principle however, every
+ * action which ends up suspending the current thread begets an
+ * implicit call to the rescheduling procedure on behalf of the
+ * blocking service.
+ *
+ * Typically, self-suspension or sleeping on a synchronization object
+ * automatically leads to a call to the rescheduling procedure,
+ * therefore the caller does not need to explicitly issue
+ * xnsched_run() after such operations.
+ *
+ * The rescheduling procedure always leads to a null-effect if it is
+ * called on behalf of an interrupt service routine. Any outstanding
+ * scheduler lock held by the outgoing thread will be restored when
+ * the thread is scheduled back in.
+ *
+ * Calling this procedure with no applicable context switch pending is
+ * harmless and simply leads to a null-effect.
+ *
+ * @return Non-zero is returned if a context switch actually happened,
+ * otherwise zero if the current thread was left running.
+ *
+ * @coretags{unrestricted}
+ */
+static inline int test_resched(struct xnsched *sched)
+{
+	int resched = xnsched_resched_p(sched);
+#ifdef CONFIG_SMP
+	/* Send resched IPI to remote CPU(s). */
+	if (unlikely(!cpumask_empty(&sched->resched))) {
+		smp_mb();
+		pipeline_send_resched_ipi(&sched->resched);
+		cpumask_clear(&sched->resched);
+	}
+#endif
+	sched->status &= ~XNRESCHED;
+
+	return resched;
+}
+
+static inline void enter_root(struct xnthread *root)
+{
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_stop(&root->sched->wdtimer);
+#endif
+}
+
+static inline void leave_root(struct xnthread *root)
+{
+	pipeline_prep_switch_oob(root);
+
+#ifdef CONFIG_XENO_OPT_WATCHDOG
+	xntimer_start(&root->sched->wdtimer, get_watchdog_timeout(),
+		      XN_INFINITE, XN_RELATIVE);
+#endif
+}
+
+void __xnsched_run_handler(void) /* hw interrupts off. */
+{
+	trace_cobalt_schedule_remote(xnsched_current());
+	xnsched_run();
+}
+
+static inline void do_lazy_user_work(struct xnthread *curr)
+{
+	xnthread_commit_ceiling(curr);
+}
+
+int ___xnsched_run(struct xnsched *sched)
+{
+	bool switched = false, leaving_inband;
+	struct xnthread *prev, *next, *curr;
+	spl_t s;
+
+	XENO_WARN_ON_ONCE(COBALT, is_secondary_domain());
+
+	trace_cobalt_schedule(sched);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	curr = sched->curr;
+	/*
+	 * CAUTION: xnthread_host_task(curr) may be unsynced and even
+	 * stale if curr = &rootcb, since the task logged by
+	 * leave_root() may not still be the current one. Use
+	 * "current" for disambiguating.
+	 */
+	xntrace_pid(task_pid_nr(current), xnthread_current_priority(curr));
+
+	if (xnthread_test_state(curr, XNUSER))
+		do_lazy_user_work(curr);
+
+	if (!test_resched(sched))
+		goto out;
+
+	next = xnsched_pick_next(sched);
+	if (next == curr) {
+		if (unlikely(xnthread_test_state(next, XNROOT))) {
+			if (sched->lflags & XNHTICK)
+				xnintr_host_tick(sched);
+			if (sched->lflags & XNHDEFER)
+				xnclock_program_shot(&nkclock, sched);
+		}
+		goto out;
+	}
+
+	prev = curr;
+
+	trace_cobalt_switch_context(prev, next);
+
+	/*
+	 * sched->curr is shared locklessly with xnsched_run() and
+	 * xnsched_lock(). WRITE_ONCE() makes sure sched->curr is
+	 * written atomically so that these routines always observe
+	 * consistent values by preventing the compiler from using
+	 * store tearing.
+	 */
+	WRITE_ONCE(sched->curr, next);
+	leaving_inband = false;
+
+	if (xnthread_test_state(prev, XNROOT)) {
+		leave_root(prev);
+		leaving_inband = true;
+	} else if (xnthread_test_state(next, XNROOT)) {
+		if (sched->lflags & XNHTICK)
+			xnintr_host_tick(sched);
+		if (sched->lflags & XNHDEFER)
+			xnclock_program_shot(&nkclock, sched);
+		enter_root(next);
+	}
+
+	xnstat_exectime_switch(sched, &next->stat.account);
+	xnstat_counter_inc(&next->stat.csw);
+
+	if (pipeline_switch_to(prev, next, leaving_inband))
+		/* oob -> in-band transition detected. */
+		return true;
+
+	/*
+	 * Re-read sched->curr for tracing: the current thread may
+	 * have switched from in-band to oob context.
+	 */
+	xntrace_pid(task_pid_nr(current),
+		xnthread_current_priority(xnsched_current()->curr));
+
+	switched = true;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return !!switched;
+}
+EXPORT_SYMBOL_GPL(___xnsched_run);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static struct xnvfile_directory sched_vfroot;
+
+struct vfile_schedlist_priv {
+	struct xnthread *curr;
+	xnticks_t start_time;
+};
+
+struct vfile_schedlist_data {
+	int cpu;
+	pid_t pid;
+	char name[XNOBJECT_NAME_LEN];
+	char sched_class[XNOBJECT_NAME_LEN];
+	char personality[XNOBJECT_NAME_LEN];
+	int cprio;
+	xnticks_t timeout;
+	int state;
+};
+
+static struct xnvfile_snapshot_ops vfile_schedlist_ops;
+
+static struct xnvfile_snapshot schedlist_vfile = {
+	.privsz = sizeof(struct vfile_schedlist_priv),
+	.datasz = sizeof(struct vfile_schedlist_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_schedlist_ops,
+};
+
+static int vfile_schedlist_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_schedlist_priv *priv = xnvfile_iterator_priv(it);
+
+	/* &nkthreadq cannot be empty (root thread(s)). */
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+	priv->start_time = xnclock_read_monotonic(&nkclock);
+
+	return cobalt_nrthreads;
+}
+
+static int vfile_schedlist_next(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedlist_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_schedlist_data *p = data;
+	xnticks_t timeout, period;
+	struct xnthread *thread;
+	xnticks_t base_time;
+
+	if (priv->curr == NULL)
+		return 0;	/* All done. */
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	p->cpu = xnsched_cpu(thread->sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->cprio = thread->cprio;
+	p->state = xnthread_get_state(thread);
+	if (thread->lock_count > 0)
+		p->state |= XNLOCK;
+	knamecpy(p->sched_class, thread->sched_class->name);
+	knamecpy(p->personality, thread->personality->name);
+	period = xnthread_get_period(thread);
+	base_time = priv->start_time;
+	if (xntimer_clock(&thread->ptimer) != &nkclock)
+		base_time = xnclock_read_monotonic(xntimer_clock(&thread->ptimer));
+	timeout = xnthread_get_timeout(thread, base_time);
+	/*
+	 * Here we cheat: thread is periodic and the sampling rate may
+	 * be high, so it is indeed possible that the next tick date
+	 * from the ptimer progresses fast enough while we are busy
+	 * collecting output data in this loop, so that next_date -
+	 * start_time > period. In such a case, we simply ceil the
+	 * value to period to keep the result meaningful, even if not
+	 * necessarily accurate. But what does accuracy mean when the
+	 * sampling frequency is high, and the way to read it has to
+	 * go through the vfile interface anyway?
+	 */
+	if (period > 0 && period < timeout &&
+	    !xntimer_running_p(&thread->rtimer))
+		timeout = period;
+
+	p->timeout = timeout;
+
+	return 1;
+}
+
+static int vfile_schedlist_show(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedlist_data *p = data;
+	char sbuf[64], pbuf[16], tbuf[16];
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-6s %-5s  %-8s  %-5s %-12s  %-10s %s\n",
+			       "CPU", "PID", "CLASS", "TYPE", "PRI", "TIMEOUT",
+			       "STAT", "NAME");
+	else {
+		ksformat(pbuf, sizeof(pbuf), "%3d", p->cprio);
+		xntimer_format_time(p->timeout, tbuf, sizeof(tbuf));
+		xnthread_format_status(p->state, sbuf, sizeof(sbuf));
+
+		xnvfile_printf(it,
+			       "%3u  %-6d %-5s  %-8s  %-5s %-12s  %-10s %s%s%s\n",
+			       p->cpu,
+			       p->pid,
+			       p->sched_class,
+			       p->personality,
+			       pbuf,
+			       tbuf,
+			       sbuf,
+			       (p->state & XNUSER) ? "" : "[",
+			       p->name,
+			       (p->state & XNUSER) ? "" : "]");
+	}
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_schedlist_ops = {
+	.rewind = vfile_schedlist_rewind,
+	.next = vfile_schedlist_next,
+	.show = vfile_schedlist_show,
+};
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+static spl_t vfile_schedstat_lock_s;
+
+static int vfile_schedstat_get_lock(struct xnvfile *vfile)
+{
+	int ret;
+
+	ret = xnintr_get_query_lock();
+	if (ret < 0)
+		return ret;
+	xnlock_get_irqsave(&nklock, vfile_schedstat_lock_s);
+	return 0;
+}
+
+static void vfile_schedstat_put_lock(struct xnvfile *vfile)
+{
+	xnlock_put_irqrestore(&nklock, vfile_schedstat_lock_s);
+	xnintr_put_query_lock();
+}
+
+static struct xnvfile_lock_ops vfile_schedstat_lockops = {
+	.get = vfile_schedstat_get_lock,
+	.put = vfile_schedstat_put_lock,
+};
+
+struct vfile_schedstat_priv {
+	int irq;
+	struct xnthread *curr;
+	struct xnintr_iterator intr_it;
+};
+
+struct vfile_schedstat_data {
+	int cpu;
+	pid_t pid;
+	int state;
+	char name[XNOBJECT_NAME_LEN];
+	unsigned long ssw;
+	unsigned long csw;
+	unsigned long xsc;
+	unsigned long pf;
+	xnticks_t exectime_period;
+	xnticks_t account_period;
+	xnticks_t exectime_total;
+	struct xnsched_class *sched_class;
+	xnticks_t period;
+	int cprio;
+};
+
+static struct xnvfile_snapshot_ops vfile_schedstat_ops;
+
+static struct xnvfile_snapshot schedstat_vfile = {
+	.privsz = sizeof(struct vfile_schedstat_priv),
+	.datasz = sizeof(struct vfile_schedstat_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_schedstat_ops,
+	.entry = { .lockops = &vfile_schedstat_lockops },
+};
+
+static int vfile_schedstat_rewind(struct xnvfile_snapshot_iterator *it)
+{
+	struct vfile_schedstat_priv *priv = xnvfile_iterator_priv(it);
+	int irqnr;
+
+	/*
+	 * The activity numbers on each valid interrupt descriptor are
+	 * grouped under a pseudo-thread.
+	 */
+	priv->curr = list_first_entry(&nkthreadq, struct xnthread, glink);
+	priv->irq = 0;
+	irqnr = xnintr_query_init(&priv->intr_it) * num_online_cpus();
+
+	return irqnr + cobalt_nrthreads;
+}
+
+static int vfile_schedstat_next(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedstat_priv *priv = xnvfile_iterator_priv(it);
+	struct vfile_schedstat_data *p = data;
+	struct xnthread *thread;
+	struct xnsched *sched;
+	xnticks_t period;
+	int __maybe_unused ret;
+
+	if (priv->curr == NULL)
+		/*
+		 * We are done with actual threads, scan interrupt
+		 * descriptors.
+		 */
+		goto scan_irqs;
+
+	thread = priv->curr;
+	if (list_is_last(&thread->glink, &nkthreadq))
+		priv->curr = NULL;
+	else
+		priv->curr = list_next_entry(thread, glink);
+
+	sched = thread->sched;
+	p->cpu = xnsched_cpu(sched);
+	p->pid = xnthread_host_pid(thread);
+	memcpy(p->name, thread->name, sizeof(p->name));
+	p->state = xnthread_get_state(thread);
+	if (thread->lock_count > 0)
+		p->state |= XNLOCK;
+	p->ssw = xnstat_counter_get(&thread->stat.ssw);
+	p->csw = xnstat_counter_get(&thread->stat.csw);
+	p->xsc = xnstat_counter_get(&thread->stat.xsc);
+	p->pf = xnstat_counter_get(&thread->stat.pf);
+	p->sched_class = thread->sched_class;
+	p->cprio = thread->cprio;
+	p->period = xnthread_get_period(thread);
+
+	period = sched->last_account_switch - thread->stat.lastperiod.start;
+	if (period == 0 && thread == sched->curr) {
+		p->exectime_period = 1;
+		p->account_period = 1;
+	} else {
+		p->exectime_period = thread->stat.account.total -
+			thread->stat.lastperiod.total;
+		p->account_period = period;
+	}
+	p->exectime_total = thread->stat.account.total;
+	thread->stat.lastperiod.total = thread->stat.account.total;
+	thread->stat.lastperiod.start = sched->last_account_switch;
+
+	return 1;
+
+scan_irqs:
+#ifdef CONFIG_XENO_OPT_STATS_IRQS
+	if (priv->irq >= PIPELINE_NR_IRQS)
+		return 0;	/* All done. */
+
+	ret = xnintr_query_next(priv->irq, &priv->intr_it, p->name);
+	if (ret) {
+		if (ret == -EAGAIN)
+			xnvfile_touch(it->vfile); /* force rewind. */
+		priv->irq++;
+		return VFILE_SEQ_SKIP;
+	}
+
+	if (!xnsched_supported_cpu(priv->intr_it.cpu))
+		return VFILE_SEQ_SKIP;
+
+	p->cpu = priv->intr_it.cpu;
+	p->csw = priv->intr_it.hits;
+	p->exectime_period = priv->intr_it.exectime_period;
+	p->account_period = priv->intr_it.account_period;
+	p->exectime_total = priv->intr_it.exectime_total;
+	p->pid = 0;
+	p->state =  0;
+	p->ssw = 0;
+	p->xsc = 0;
+	p->pf = 0;
+	p->sched_class = &xnsched_class_idle;
+	p->cprio = 0;
+	p->period = 0;
+
+	return 1;
+#else /* !CONFIG_XENO_OPT_STATS_IRQS */
+	return 0;
+#endif /* !CONFIG_XENO_OPT_STATS_IRQS */
+}
+
+static int vfile_schedstat_show(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedstat_data *p = data;
+	int usage = 0;
+
+	if (p == NULL)
+		xnvfile_printf(it,
+			       "%-3s  %-6s %-10s %-10s %-10s %-4s  %-8s  %5s"
+			       "  %s\n",
+			       "CPU", "PID", "MSW", "CSW", "XSC", "PF", "STAT", "%CPU",
+			       "NAME");
+	else {
+		if (p->account_period) {
+			while (p->account_period > 0xffffffffUL) {
+				p->exectime_period >>= 16;
+				p->account_period >>= 16;
+			}
+			usage = xnarch_ulldiv(p->exectime_period * 1000LL +
+					      (p->account_period >> 1),
+					      p->account_period, NULL);
+		}
+		xnvfile_printf(it,
+			       "%3u  %-6d %-10lu %-10lu %-10lu %-4lu  %.8x  %3u.%u"
+			       "  %s%s%s\n",
+			       p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state,
+			       usage / 10, usage % 10,
+			       (p->state & XNUSER) ? "" : "[",
+			       p->name,
+			       (p->state & XNUSER) ? "" : "]");
+	}
+
+	return 0;
+}
+
+static int vfile_schedacct_show(struct xnvfile_snapshot_iterator *it,
+				void *data)
+{
+	struct vfile_schedstat_data *p = data;
+
+	if (p == NULL)
+		return 0;
+
+	xnvfile_printf(it, "%u %d %lu %lu %lu %lu %.8x %Lu %Lu %Lu %s %s %d %Lu\n",
+		       p->cpu, p->pid, p->ssw, p->csw, p->xsc, p->pf, p->state,
+		       xnclock_ticks_to_ns(&nkclock, p->account_period),
+		       xnclock_ticks_to_ns(&nkclock, p->exectime_period),
+		       xnclock_ticks_to_ns(&nkclock, p->exectime_total),
+		       p->name,
+		       p->sched_class->name,
+		       p->cprio,
+		       p->period);
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops vfile_schedstat_ops = {
+	.rewind = vfile_schedstat_rewind,
+	.next = vfile_schedstat_next,
+	.show = vfile_schedstat_show,
+};
+
+/*
+ * An accounting vfile is a thread statistics vfile in disguise with a
+ * different output format, which is parser-friendly.
+ */
+static struct xnvfile_snapshot_ops vfile_schedacct_ops;
+
+static struct xnvfile_snapshot schedacct_vfile = {
+	.privsz = sizeof(struct vfile_schedstat_priv),
+	.datasz = sizeof(struct vfile_schedstat_data),
+	.tag = &nkthreadlist_tag,
+	.ops = &vfile_schedacct_ops,
+};
+
+static struct xnvfile_snapshot_ops vfile_schedacct_ops = {
+	.rewind = vfile_schedstat_rewind,
+	.next = vfile_schedstat_next,
+	.show = vfile_schedacct_show,
+};
+
+#endif /* CONFIG_XENO_OPT_STATS */
+
+#ifdef CONFIG_SMP
+
+static int affinity_vfile_show(struct xnvfile_regular_iterator *it,
+			       void *data)
+{
+	unsigned long val = 0;
+	int cpu;
+
+	for (cpu = 0; cpu < nr_cpumask_bits; cpu++)
+		if (cpumask_test_cpu(cpu, &cobalt_cpu_affinity))
+			val |= (1UL << cpu);
+
+	xnvfile_printf(it, "%08lx\n", val);
+
+	return 0;
+}
+
+static ssize_t affinity_vfile_store(struct xnvfile_input *input)
+{
+	cpumask_t affinity;
+	ssize_t ret;
+	long val;
+	int cpu;
+	spl_t s;
+
+	ret = xnvfile_get_integer(input, &val);
+	if (ret < 0)
+		return ret;
+
+	if (val == 0)
+		affinity = xnsched_realtime_cpus; /* Reset to default. */
+	else {
+		cpumask_clear(&affinity);
+		for (cpu = 0; cpu < nr_cpumask_bits; cpu++, val >>= 1) {
+			if (val & 1) {
+				/*
+				 * The new dynamic affinity must be a strict
+				 * subset of the static set of supported CPUs.
+				 */
+				if (!cpumask_test_cpu(cpu,
+						      &xnsched_realtime_cpus))
+					return -EINVAL;
+				cpumask_set_cpu(cpu, &affinity);
+			}
+		}
+	}
+
+	cpumask_and(&affinity, &affinity, cpu_online_mask);
+	if (cpumask_empty(&affinity))
+		return -EINVAL;
+
+	xnlock_get_irqsave(&nklock, s);
+	cobalt_cpu_affinity = affinity;
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+
+static struct xnvfile_regular_ops affinity_vfile_ops = {
+	.show = affinity_vfile_show,
+	.store = affinity_vfile_store,
+};
+
+static struct xnvfile_regular affinity_vfile = {
+	.ops = &affinity_vfile_ops,
+};
+
+#endif /* CONFIG_SMP */
+
+int xnsched_init_proc(void)
+{
+	struct xnsched_class *p;
+	int ret;
+
+	ret = xnvfile_init_dir("sched", &sched_vfroot, &cobalt_vfroot);
+	if (ret)
+		return ret;
+
+	ret = xnvfile_init_snapshot("threads", &schedlist_vfile, &sched_vfroot);
+	if (ret)
+		return ret;
+
+	for_each_xnsched_class(p) {
+		if (p->sched_init_vfile) {
+			ret = p->sched_init_vfile(p, &sched_vfroot);
+			if (ret)
+				return ret;
+		}
+	}
+
+#ifdef CONFIG_XENO_OPT_STATS
+	ret = xnvfile_init_snapshot("stat", &schedstat_vfile, &sched_vfroot);
+	if (ret)
+		return ret;
+	ret = xnvfile_init_snapshot("acct", &schedacct_vfile, &sched_vfroot);
+	if (ret)
+		return ret;
+#endif /* CONFIG_XENO_OPT_STATS */
+
+#ifdef CONFIG_SMP
+	xnvfile_init_regular("affinity", &affinity_vfile, &cobalt_vfroot);
+#endif /* CONFIG_SMP */
+
+	return 0;
+}
+
+void xnsched_cleanup_proc(void)
+{
+	struct xnsched_class *p;
+
+	for_each_xnsched_class(p) {
+		if (p->sched_cleanup_vfile)
+			p->sched_cleanup_vfile(p);
+	}
+
+#ifdef CONFIG_SMP
+	xnvfile_destroy_regular(&affinity_vfile);
+#endif /* CONFIG_SMP */
+#ifdef CONFIG_XENO_OPT_STATS
+	xnvfile_destroy_snapshot(&schedacct_vfile);
+	xnvfile_destroy_snapshot(&schedstat_vfile);
+#endif /* CONFIG_XENO_OPT_STATS */
+	xnvfile_destroy_snapshot(&schedlist_vfile);
+	xnvfile_destroy_dir(&sched_vfroot);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/select.c b/kernel/xenomai-v3.2.4/kernel/cobalt/select.c
new file mode 100644
index 0000000..bd790af
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/select.c
@@ -0,0 +1,461 @@
+/*
+ * Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2008 Efixo
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/types.h>
+#include <linux/bitops.h>	/* For hweight_long */
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/select.h>
+#include <pipeline/sirq.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_select Synchronous I/O multiplexing
+ *
+ * This module implements the services needed for implementing the
+ * POSIX select() service, or any other event multiplexing services.
+ *
+ * Following the implementation of the posix select service, this module defines
+ * three types of events:
+ * - \a XNSELECT_READ meaning that a file descriptor is ready for reading;
+ * - \a XNSELECT_WRITE meaning that a file descriptor is ready for writing;
+ * - \a XNSELECT_EXCEPT meaning that a file descriptor received an exceptional
+ *   event.
+ *
+ * It works by defining two structures:
+ * - a @a struct @a xnselect structure, which should be added to every file
+ * descriptor for every event type (read, write, or except);
+ * - a @a struct @a xnselector structure, the selection structure,  passed by
+ * the thread calling the xnselect service, where this service does all its
+ * housekeeping.
+ * @{
+ */
+
+static LIST_HEAD(selector_list);
+static int deletion_virq;
+
+/**
+ * Initialize a @a struct @a xnselect structure.
+ *
+ * This service must be called to initialize a @a struct @a xnselect structure
+ * before it is bound to a selector by the means of xnselect_bind().
+ *
+ * @param select_block pointer to the xnselect structure to be initialized
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnselect_init(struct xnselect *select_block)
+{
+	INIT_LIST_HEAD(&select_block->bindings);
+}
+EXPORT_SYMBOL_GPL(xnselect_init);
+
+static inline int xnselect_wakeup(struct xnselector *selector)
+{
+	return xnsynch_flush(&selector->synchbase, 0) == XNSYNCH_RESCHED;
+}
+
+/**
+ * Bind a file descriptor (represented by its @a xnselect structure) to a
+ * selector block.
+ *
+ * @param select_block pointer to the @a struct @a xnselect to be bound;
+ *
+ * @param binding pointer to a newly allocated (using xnmalloc) @a struct
+ * @a xnselect_binding;
+ *
+ * @param selector pointer to the selector structure;
+ *
+ * @param type type of events (@a XNSELECT_READ, @a XNSELECT_WRITE, or @a
+ * XNSELECT_EXCEPT);
+ *
+ * @param index index of the file descriptor (represented by @a
+ * select_block) in the bit fields used by the @a selector structure;
+ *
+ * @param state current state of the file descriptor.
+ *
+ * @a select_block must have been initialized with xnselect_init(),
+ * the @a xnselector structure must have been initialized with
+ * xnselector_init(), @a binding may be uninitialized.
+ *
+ * This service must be called with nklock locked, irqs off. For this reason,
+ * the @a binding parameter must have been allocated by the caller outside the
+ * locking section.
+ *
+ * @retval -EINVAL if @a type or @a index is invalid;
+ * @retval 0 otherwise.
+ *
+ * @coretags{task-unrestricted, might-switch, atomic-entry}
+ */
+int xnselect_bind(struct xnselect *select_block,
+		  struct xnselect_binding *binding,
+		  struct xnselector *selector,
+		  unsigned type,
+		  unsigned index,
+		  unsigned state)
+{
+	atomic_only();
+
+	if (type >= XNSELECT_MAX_TYPES || index > __FD_SETSIZE)
+		return -EINVAL;
+
+	binding->selector = selector;
+	binding->fd = select_block;
+	binding->type = type;
+	binding->bit_index = index;
+
+	list_add_tail(&binding->slink, &selector->bindings);
+	list_add_tail(&binding->link, &select_block->bindings);
+	__FD_SET__(index, &selector->fds[type].expected);
+	if (state) {
+		__FD_SET__(index, &selector->fds[type].pending);
+		if (xnselect_wakeup(selector))
+			xnsched_run();
+	} else
+		__FD_CLR__(index, &selector->fds[type].pending);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnselect_bind);
+
+/* Must be called with nklock locked irqs off */
+int __xnselect_signal(struct xnselect *select_block, unsigned state)
+{
+	struct xnselect_binding *binding;
+	struct xnselector *selector;
+	int resched = 0;
+
+	list_for_each_entry(binding, &select_block->bindings, link) {
+		selector = binding->selector;
+		if (state) {
+			if (!__FD_ISSET__(binding->bit_index,
+					&selector->fds[binding->type].pending)) {
+				__FD_SET__(binding->bit_index,
+					 &selector->fds[binding->type].pending);
+				if (xnselect_wakeup(selector))
+					resched = 1;
+			}
+		} else
+			__FD_CLR__(binding->bit_index,
+				 &selector->fds[binding->type].pending);
+	}
+
+	return resched;
+}
+EXPORT_SYMBOL_GPL(__xnselect_signal);
+
+/**
+ * Destroy the @a xnselect structure associated with a file descriptor.
+ *
+ * Any binding with a @a xnselector block is destroyed.
+ *
+ * @param select_block pointer to the @a xnselect structure associated
+ * with a file descriptor
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+void xnselect_destroy(struct xnselect *select_block)
+{
+	struct xnselect_binding *binding, *tmp;
+	struct xnselector *selector;
+	int resched = 0;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&select_block->bindings))
+		goto out;
+
+	list_for_each_entry_safe(binding, tmp, &select_block->bindings, link) {
+		list_del(&binding->link);
+		selector = binding->selector;
+		__FD_CLR__(binding->bit_index,
+			 &selector->fds[binding->type].expected);
+		if (!__FD_ISSET__(binding->bit_index,
+				&selector->fds[binding->type].pending)) {
+			__FD_SET__(binding->bit_index,
+				 &selector->fds[binding->type].pending);
+			if (xnselect_wakeup(selector))
+				resched = 1;
+		}
+		list_del(&binding->slink);
+		xnlock_put_irqrestore(&nklock, s);
+		xnfree(binding);
+		xnlock_get_irqsave(&nklock, s);
+	}
+	if (resched)
+		xnsched_run();
+out:
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnselect_destroy);
+
+static unsigned
+fd_set_andnot(fd_set *result, fd_set *first, fd_set *second, unsigned n)
+{
+	unsigned i, not_empty = 0;
+
+	for (i = 0; i < __FDELT__(n); i++)
+		if((result->fds_bits[i] =
+		    first->fds_bits[i] & ~(second->fds_bits[i])))
+			not_empty = 1;
+
+	if (i < __FDSET_LONGS__
+	    && (result->fds_bits[i] =
+		first->fds_bits[i] & ~(second->fds_bits[i]) & (__FDMASK__(n) - 1)))
+		not_empty = 1;
+
+	return not_empty;
+}
+
+static unsigned
+fd_set_and(fd_set *result, fd_set *first, fd_set *second, unsigned n)
+{
+	unsigned i, not_empty = 0;
+
+	for (i = 0; i < __FDELT__(n); i++)
+		if((result->fds_bits[i] =
+		    first->fds_bits[i] & second->fds_bits[i]))
+			not_empty = 1;
+
+	if (i < __FDSET_LONGS__
+	    && (result->fds_bits[i] =
+		first->fds_bits[i] & second->fds_bits[i] & (__FDMASK__(n) - 1)))
+		not_empty = 1;
+
+	return not_empty;
+}
+
+static void fd_set_zeropad(fd_set *set, unsigned n)
+{
+	unsigned i;
+
+	i = __FDELT__(n);
+
+	if (i < __FDSET_LONGS__)
+		set->fds_bits[i] &= (__FDMASK__(n) - 1);
+
+	for(i++; i < __FDSET_LONGS__; i++)
+		set->fds_bits[i] = 0;
+}
+
+static unsigned fd_set_popcount(fd_set *set, unsigned n)
+{
+	unsigned count = 0, i;
+
+	for (i = 0; i < __FDELT__(n); i++)
+		if (set->fds_bits[i])
+			count += hweight_long(set->fds_bits[i]);
+
+	if (i < __FDSET_LONGS__ && (set->fds_bits[i] & (__FDMASK__(n) - 1)))
+		count += hweight_long(set->fds_bits[i] & (__FDMASK__(n) - 1));
+
+	return count;
+}
+
+/**
+ * Initialize a selector structure.
+ *
+ * @param selector The selector structure to be initialized.
+ *
+ * @retval 0
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnselector_init(struct xnselector *selector)
+{
+	unsigned int i;
+
+	xnsynch_init(&selector->synchbase, XNSYNCH_FIFO, NULL);
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++) {
+		__FD_ZERO__(&selector->fds[i].expected);
+		__FD_ZERO__(&selector->fds[i].pending);
+	}
+	INIT_LIST_HEAD(&selector->bindings);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnselector_init);
+
+/**
+ * Check the state of a number of file descriptors, wait for a state change if
+ * no descriptor is ready.
+ *
+ * @param selector structure to check for pending events
+ * @param out_fds The set of descriptors with pending events if a strictly positive number is returned, or the set of descriptors not yet bound if -ECHRNG is returned;
+ * @param in_fds the set of descriptors which events should be checked
+ * @param nfds the highest-numbered descriptor in any of the @a in_fds sets, plus 1;
+ * @param timeout the timeout, whose meaning depends on @a timeout_mode, note
+ * that xnselect() pass @a timeout and @a timeout_mode unchanged to
+ * xnsynch_sleep_on, so passing a relative value different from XN_INFINITE as a
+ * timeout with @a timeout_mode set to XN_RELATIVE, will cause a longer sleep
+ * than expected if the sleep is interrupted.
+ * @param timeout_mode the mode of @a timeout.
+ *
+ * @retval -EINVAL if @a nfds is negative;
+ * @retval -ECHRNG if some of the descriptors passed in @a in_fds have not yet
+ * been registered with xnselect_bind(), @a out_fds contains the set of such
+ * descriptors;
+ * @retval -EINTR if @a xnselect was interrupted while waiting;
+ * @retval 0 in case of timeout.
+ * @retval the number of file descriptors having received an event.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int xnselect(struct xnselector *selector,
+	     fd_set *out_fds[XNSELECT_MAX_TYPES],
+	     fd_set *in_fds[XNSELECT_MAX_TYPES],
+	     int nfds,
+	     xnticks_t timeout, xntmode_t timeout_mode)
+{
+	unsigned int i, not_empty = 0, count;
+	int info = 0;
+	spl_t s;
+
+	if ((unsigned) nfds > __FD_SETSIZE)
+		return -EINVAL;
+
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (out_fds[i])
+			fd_set_zeropad(out_fds[i], nfds);
+
+	xnlock_get_irqsave(&nklock, s);
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (out_fds[i]
+		    && fd_set_andnot(out_fds[i], in_fds[i],
+				     &selector->fds[i].expected, nfds))
+			not_empty = 1;
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (not_empty)
+		return -ECHRNG;
+
+	xnlock_get_irqsave(&nklock, s);
+	for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+		if (out_fds[i]
+		    && fd_set_and(out_fds[i], in_fds[i],
+				  &selector->fds[i].pending, nfds))
+			not_empty = 1;
+
+	while (!not_empty) {
+		info = xnsynch_sleep_on(&selector->synchbase,
+					timeout, timeout_mode);
+
+		for (i = 0; i < XNSELECT_MAX_TYPES; i++)
+			if (out_fds[i]
+			    && fd_set_and(out_fds[i], in_fds[i],
+					  &selector->fds[i].pending, nfds))
+				not_empty = 1;
+
+		if (info & (XNBREAK | XNTIMEO))
+			break;
+	}
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (not_empty) {
+		for (count = 0, i = 0; i < XNSELECT_MAX_TYPES; i++)
+			if (out_fds[i])
+				count += fd_set_popcount(out_fds[i], nfds);
+
+		return count;
+	}
+
+	if (info & XNBREAK)
+		return -EINTR;
+
+	return 0; /* Timeout */
+}
+EXPORT_SYMBOL_GPL(xnselect);
+
+/**
+ * Destroy a selector block.
+ *
+ * All bindings with file descriptor are destroyed.
+ *
+ * @param selector the selector block to be destroyed
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnselector_destroy(struct xnselector *selector)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&selector->destroy_link, &selector_list);
+	pipeline_post_sirq(deletion_virq);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnselector_destroy);
+
+static irqreturn_t xnselector_destroy_loop(int virq, void *dev_id)
+{
+	struct xnselect_binding *binding, *tmpb;
+	struct xnselector *selector, *tmps;
+	struct xnselect *fd;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&selector_list))
+		goto out;
+
+	list_for_each_entry_safe(selector, tmps, &selector_list, destroy_link) {
+		list_del(&selector->destroy_link);
+		if (list_empty(&selector->bindings))
+			goto release;
+		list_for_each_entry_safe(binding, tmpb, &selector->bindings, slink) {
+			list_del(&binding->slink);
+			fd = binding->fd;
+			list_del(&binding->link);
+			xnlock_put_irqrestore(&nklock, s);
+			xnfree(binding);
+			xnlock_get_irqsave(&nklock, s);
+		}
+	release:
+		xnsynch_destroy(&selector->synchbase);
+		xnsched_run();
+		xnlock_put_irqrestore(&nklock, s);
+
+		xnfree(selector);
+
+		xnlock_get_irqsave(&nklock, s);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return IRQ_HANDLED;
+}
+
+int xnselect_mount(void)
+{
+	deletion_virq = pipeline_create_inband_sirq(xnselector_destroy_loop);
+	if (deletion_virq < 0)
+		return deletion_virq;
+
+	return 0;
+}
+
+int xnselect_umount(void)
+{
+	pipeline_delete_inband_sirq(deletion_virq);
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/synch.c b/kernel/xenomai-v3.2.4/kernel/cobalt/synch.c
new file mode 100644
index 0000000..6e50e53
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/synch.c
@@ -0,0 +1,1185 @@
+/*
+ * Copyright (C) 2001-2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/signal.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/uapi/signal.h>
+#include <trace/events/cobalt-core.h>
+
+#define PP_CEILING_MASK 0xff
+
+static inline int get_ceiling_value(struct xnsynch *synch)
+{
+	/*
+	 * The ceiling priority value is stored in user-writable
+	 * memory, make sure to constrain it within valid bounds for
+	 * xnsched_class_rt before using it.
+	 */
+	return *synch->ceiling_ref & PP_CEILING_MASK ?: 1;
+}
+
+struct xnsynch *lookup_lazy_pp(xnhandle_t handle);
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_synch Thread synchronization services
+ * @{
+ */
+
+/**
+ * @brief Initialize a synchronization object.
+ *
+ * Initializes a synchronization object. Xenomai threads can wait on
+ * and signal such objects for serializing access to resources.
+ * This object has built-in support for priority inheritance.
+ *
+ * @param synch The address of a synchronization object descriptor
+ * Cobalt will use to store the object-specific data.  This descriptor
+ * must always be valid while the object is active therefore it must
+ * be allocated in permanent memory.
+ *
+ * @param flags A set of creation flags affecting the operation. The
+ * valid flags are:
+ *
+ * - XNSYNCH_PRIO causes the threads waiting for the resource to pend
+ * in priority order. Otherwise, FIFO ordering is used (XNSYNCH_FIFO).
+ *
+ * - XNSYNCH_OWNER indicates that the synchronization object shall
+ * track the resource ownership, allowing a single owner at most at
+ * any point in time. Note that setting this flag implies the use of
+ * xnsynch_acquire() and xnsynch_release() instead of
+ * xnsynch_sleep_on() and xnsynch_wakeup_*().
+ *
+ * - XNSYNCH_PI enables priority inheritance when a priority inversion
+ * is detected among threads using this object.  XNSYNCH_PI implies
+ * XNSYNCH_OWNER and XNSYNCH_PRIO.
+ *
+ * - XNSYNCH_PP enables priority protect to prevent priority inversion.
+ * XNSYNCH_PP implies XNSYNCH_OWNER and XNSYNCH_PRIO.
+ *
+ * - XNSYNCH_DREORD (Disable REORDering) tells Cobalt not to reorder
+ * the wait list upon priority change of a waiter. Reordering is the
+ * default. Only applies when XNSYNCH_PRIO is present.
+ *
+ * @param fastlock Address of the fast lock word to be associated with
+ * a synchronization object with ownership tracking. Therefore, a
+ * valid fast-lock address is required if XNSYNCH_OWNER is set in @a
+ * flags.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnsynch_init(struct xnsynch *synch, int flags, atomic_t *fastlock)
+{
+	if (flags & (XNSYNCH_PI|XNSYNCH_PP))
+		flags |= XNSYNCH_PRIO | XNSYNCH_OWNER;	/* Obviously... */
+
+	synch->status = flags & ~XNSYNCH_CLAIMED;
+	synch->owner = NULL;
+	synch->cleanup = NULL;	/* for PI/PP only. */
+	synch->wprio = -1;
+	synch->ceiling_ref = NULL;
+	INIT_LIST_HEAD(&synch->pendq);
+
+	if (flags & XNSYNCH_OWNER) {
+		BUG_ON(fastlock == NULL);
+		synch->fastlock = fastlock;
+		atomic_set(fastlock, XN_NO_HANDLE);
+	} else
+		synch->fastlock = NULL;
+}
+EXPORT_SYMBOL_GPL(xnsynch_init);
+
+/**
+ * @brief Initialize a synchronization object enforcing PP.
+ *
+ * This call is a variant of xnsynch_init() for initializing
+ * synchronization objects enabling the priority protect protocol.
+ *
+ * @param synch The address of a synchronization object descriptor
+ * Cobalt will use to store the object-specific data.  See
+ * xnsynch_init().
+ *
+ * @param flags A set of creation flags affecting the operation. See
+ * xnsynch_init(). XNSYNCH_PI is mutually exclusive with XNSYNCH_PP,
+ * and won't be considered.
+ *
+ * @param fastlock Address of the fast lock word to be associated with
+ * a synchronization object with ownership tracking. See xnsynch_init().
+ *
+ * @param ceiling_ref The address of the variable holding the current
+ * priority ceiling value for this object.
+ *
+ * @coretags{task-unrestricted}
+ */
+void xnsynch_init_protect(struct xnsynch *synch, int flags,
+			  atomic_t *fastlock, u32 *ceiling_ref)
+{
+	xnsynch_init(synch, (flags & ~XNSYNCH_PI) | XNSYNCH_PP, fastlock);
+	synch->ceiling_ref = ceiling_ref;
+}
+
+/**
+ * @fn void xnsynch_destroy(struct xnsynch *synch)
+ * @brief Destroy a synchronization object.
+ *
+ * Destroys the synchronization object @a synch, unblocking all
+ * waiters with the XNRMID status.
+ *
+ * @return XNSYNCH_RESCHED is returned if at least one thread is
+ * unblocked, which means the caller should invoke xnsched_run() for
+ * applying the new scheduling state. Otherwise, XNSYNCH_DONE is
+ * returned.
+
+ * @sideeffect Same as xnsynch_flush().
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnsynch_destroy(struct xnsynch *synch)
+{
+	int ret;
+	
+	ret = xnsynch_flush(synch, XNRMID);
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_CLAIMED);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnsynch_destroy);
+
+/**
+ * @fn int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode);
+ * @brief Sleep on an ownerless synchronization object.
+ *
+ * Makes the calling thread sleep on the specified synchronization
+ * object, waiting for it to be signaled.
+ *
+ * This service should be called by upper interfaces wanting the
+ * current thread to pend on the given resource. It must not be used
+ * with synchronization objects that are supposed to track ownership
+ * (XNSYNCH_OWNER).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to sleep on.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread pends on the resource. This value is a wait time given as a
+ * count of nanoseconds. It can either be relative, absolute
+ * monotonic, or absolute adjustable depending on @a
+ * timeout_mode. Passing XN_INFINITE @b and setting @a mode to
+ * XN_RELATIVE specifies an unbounded wait. All other values are used
+ * to initialize a watchdog timer.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @return A bitmask which may include zero or one information bit
+ * among XNRMID, XNTIMEO and XNBREAK, which should be tested by the
+ * caller, for detecting respectively: object deletion, timeout or
+ * signal/unblock conditions which might have happened while waiting.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int xnsynch_sleep_on(struct xnsynch *synch, xnticks_t timeout,
+		     xntmode_t timeout_mode)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	primary_mode_only();
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	thread = xnthread_current();
+
+	if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP) &&
+	    thread->res_count > 0 &&
+	    xnthread_test_state(thread, XNWARN))
+		xnthread_signal(thread, SIGDEBUG, SIGDEBUG_MUTEX_SLEEP);
+	
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_synch_sleepon(synch);
+
+	if ((synch->status & XNSYNCH_PRIO) == 0) /* i.e. FIFO */
+		list_add_tail(&thread->plink, &synch->pendq);
+	else /* i.e. priority-sorted */
+		list_add_priff(thread, &synch->pendq, wprio, plink);
+
+	xnthread_suspend(thread, XNPEND, timeout, timeout_mode, synch);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return xnthread_test_info(thread, XNRMID|XNTIMEO|XNBREAK);
+}
+EXPORT_SYMBOL_GPL(xnsynch_sleep_on);
+
+/**
+ * @fn struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch);
+ * @brief Unblock the heading thread from wait.
+ *
+ * This service wakes up the thread which is currently leading the
+ * synchronization object's pending list. The sleeping thread is
+ * unblocked from its pending state, but no reschedule is performed.
+ *
+ * This service should be called by upper interfaces wanting to signal
+ * the given resource so that a single waiter is resumed. It must not
+ * be used with synchronization objects that are supposed to track
+ * ownership (XNSYNCH_OWNER not set).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * whose ownership is changed.
+ *
+ * @return The descriptor address of the unblocked thread.
+ *
+ * @coretags{unrestricted}
+ */
+struct xnthread *xnsynch_wakeup_one_sleeper(struct xnsynch *synch)
+{
+	struct xnthread *thread;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&synch->pendq)) {
+		thread = NULL;
+		goto out;
+	}
+
+	trace_cobalt_synch_wakeup(synch);
+	thread = list_first_entry(&synch->pendq, struct xnthread, plink);
+	list_del(&thread->plink);
+	thread->wchan = NULL;
+	xnthread_resume(thread, XNPEND);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return thread;
+}
+EXPORT_SYMBOL_GPL(xnsynch_wakeup_one_sleeper);
+
+int xnsynch_wakeup_many_sleepers(struct xnsynch *synch, int nr)
+{
+	struct xnthread *thread, *tmp;
+	int nwakeups = 0;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (list_empty(&synch->pendq))
+		goto out;
+
+	trace_cobalt_synch_wakeup_many(synch);
+
+	list_for_each_entry_safe(thread, tmp, &synch->pendq, plink) {
+		if (nwakeups++ >= nr)
+			break;
+		list_del(&thread->plink);
+		thread->wchan = NULL;
+		xnthread_resume(thread, XNPEND);
+	}
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return nwakeups;
+}
+EXPORT_SYMBOL_GPL(xnsynch_wakeup_many_sleepers);
+
+/**
+ * @fn void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread *sleeper);
+ * @brief Unblock a particular thread from wait.
+ *
+ * This service wakes up a specific thread which is currently pending on
+ * the given synchronization object. The sleeping thread is unblocked
+ * from its pending state, but no reschedule is performed.
+ *
+ * This service should be called by upper interfaces wanting to signal
+ * the given resource so that a specific waiter is resumed. It must not
+ * be used with synchronization objects that are supposed to track
+ * ownership (XNSYNCH_OWNER not set).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * whose ownership is changed.
+ *
+ * @param sleeper The thread to unblock which MUST be currently linked
+ * to the synchronization object's pending queue (i.e. synch->pendq).
+ *
+ * @coretags{unrestricted}
+ */
+void xnsynch_wakeup_this_sleeper(struct xnsynch *synch, struct xnthread *sleeper)
+{
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, synch->status & XNSYNCH_OWNER);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_synch_wakeup(synch);
+	list_del(&sleeper->plink);
+	sleeper->wchan = NULL;
+	xnthread_resume(sleeper, XNPEND);
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnsynch_wakeup_this_sleeper);
+
+static inline void raise_boost_flag(struct xnthread *owner)
+{
+	/* Backup the base priority at first boost only. */
+	if (!xnthread_test_state(owner, XNBOOST)) {
+		owner->bprio = owner->cprio;
+		xnthread_set_state(owner, XNBOOST);
+	}
+}
+
+static void inherit_thread_priority(struct xnthread *owner,
+				    struct xnthread *target)
+{
+	if (xnthread_test_state(owner, XNZOMBIE))
+		return;
+	
+	/* Apply the scheduling policy of "target" to "thread" */
+	xnsched_track_policy(owner, target);
+
+	/*
+	 * Owner may be sleeping, propagate priority update through
+	 * the PI chain if needed.
+	 */
+	if (owner->wchan)
+		xnsynch_requeue_sleeper(owner);
+}
+
+static void __ceil_owner_priority(struct xnthread *owner, int prio)
+{
+	if (xnthread_test_state(owner, XNZOMBIE))
+		return;
+	/*
+	 * Raise owner priority to the ceiling value, this implicitly
+	 * selects SCHED_FIFO for the owner.
+	 */
+	xnsched_protect_priority(owner, prio);
+
+	if (owner->wchan)
+		xnsynch_requeue_sleeper(owner);
+}
+
+static void adjust_boost(struct xnthread *owner, struct xnthread *target)
+{
+	struct xnsynch *synch;
+
+	/*
+	 * CAUTION: we may have PI and PP-enabled objects among the
+	 * boosters, considering the leader of synch->pendq is
+	 * therefore NOT enough for determining the next boost
+	 * priority, since PP is tracked on acquisition, not on
+	 * contention. Check the head of the booster list instead.
+	 */
+	synch = list_first_entry(&owner->boosters, struct xnsynch, next);
+	if (synch->wprio == owner->wprio)
+		return;
+	
+	if (synch->status & XNSYNCH_PP)
+		__ceil_owner_priority(owner, get_ceiling_value(synch));
+	else {
+		XENO_BUG_ON(COBALT, list_empty(&synch->pendq));
+		if (target == NULL)
+			target = list_first_entry(&synch->pendq,
+						  struct xnthread, plink);
+		inherit_thread_priority(owner, target);
+	}
+}
+
+static void ceil_owner_priority(struct xnsynch *synch)
+{
+	struct xnthread *owner = synch->owner;
+	int wprio;
+
+	/* PP ceiling values are implicitly based on the RT class. */
+	wprio = xnsched_calc_wprio(&xnsched_class_rt,
+				   get_ceiling_value(synch));
+	synch->wprio = wprio;
+	list_add_priff(synch, &owner->boosters, wprio, next);
+	raise_boost_flag(owner);
+	synch->status |= XNSYNCH_CEILING;
+
+	/*
+	 * If the ceiling value is lower than the current effective
+	 * priority, we must not adjust the latter.  BEWARE: not only
+	 * this restriction is required to keep the PP logic right,
+	 * but this is also a basic assumption made by all
+	 * xnthread_commit_ceiling() callers which won't check for any
+	 * rescheduling opportunity upon return.
+	 *
+	 * However we do want the object to be linked to the booster
+	 * list, and XNBOOST must appear in the current thread status.
+	 *
+	 * This way, setparam() won't be allowed to decrease the
+	 * current weighted priority below the ceiling value, until we
+	 * eventually release this object.
+	 */
+	if (wprio > owner->wprio)
+		adjust_boost(owner, NULL);
+}
+
+static inline
+void track_owner(struct xnsynch *synch, struct xnthread *owner)
+{
+	synch->owner = owner;
+}
+
+static inline  /* nklock held, irqs off */
+void set_current_owner_locked(struct xnsynch *synch, struct xnthread *owner)
+{
+	/*
+	 * Update the owner information, and apply priority protection
+	 * for PP objects. We may only get there if owner is current,
+	 * or blocked.
+	 */
+	track_owner(synch, owner);
+	if (synch->status & XNSYNCH_PP)
+		ceil_owner_priority(synch);
+}
+
+static inline
+void set_current_owner(struct xnsynch *synch, struct xnthread *owner)
+{
+	spl_t s;
+
+	track_owner(synch, owner);
+	if (synch->status & XNSYNCH_PP) {
+		xnlock_get_irqsave(&nklock, s);
+		ceil_owner_priority(synch);
+		xnlock_put_irqrestore(&nklock, s);
+	}
+}
+
+static inline
+xnhandle_t get_owner_handle(xnhandle_t ownerh, struct xnsynch *synch)
+{
+	/*
+	 * On acquisition from kernel space, the fast lock handle
+	 * should bear the FLCEIL bit for PP objects, so that userland
+	 * takes the slow path on release, jumping to the kernel for
+	 * dropping the ceiling priority boost.
+	 */
+	if (synch->status & XNSYNCH_PP)
+		ownerh = xnsynch_fast_ceiling(ownerh);
+
+	return ownerh;
+}
+
+static void commit_ceiling(struct xnsynch *synch, struct xnthread *curr)
+{
+	xnhandle_t oldh, h;
+	atomic_t *lockp;
+
+	track_owner(synch, curr);
+	ceil_owner_priority(synch);
+	/*
+	 * Raise FLCEIL, which indicates a kernel entry will be
+	 * required for releasing this resource.
+	 */
+	lockp = xnsynch_fastlock(synch);
+	do {
+		h = atomic_read(lockp);
+		oldh = atomic_cmpxchg(lockp, h, xnsynch_fast_ceiling(h));
+	} while (oldh != h);
+}
+
+void xnsynch_commit_ceiling(struct xnthread *curr)  /* nklock held, irqs off */
+{
+	struct xnsynch *synch;
+	atomic_t *lockp;
+
+	/* curr->u_window has to be valid, curr bears XNUSER. */
+	synch = lookup_lazy_pp(curr->u_window->pp_pending);
+	if (synch == NULL) {
+		/*
+		 * If pp_pending is a bad handle, don't panic but
+		 * rather ignore: we don't want a misbehaving userland
+		 * to crash the kernel.
+		 */
+		XENO_WARN_ON_ONCE(USER, 1);
+		goto out;
+	}
+
+	/*
+	 * For PP locks, userland does, in that order:
+	 *
+	 * -- LOCK
+	 * 1. curr->u_window->pp_pending = lock_handle
+	 *    barrier();
+	 * 2. atomic_cmpxchg(lockp, XN_NO_HANDLE, curr->handle);
+	 *
+	 * -- UNLOCK
+	 * 1. atomic_cmpxchg(lockp, curr->handle, XN_NO_HANDLE); [unclaimed]
+	 *    barrier();
+	 * 2. curr->u_window->pp_pending = XN_NO_HANDLE
+	 *
+	 * Make sure we have not been caught in a rescheduling in
+	 * between those steps. If we did, then we won't be holding
+	 * the lock as we schedule away, therefore no priority update
+	 * must take place.
+	 */
+	lockp = xnsynch_fastlock(synch);
+	if (xnsynch_fast_owner_check(lockp, curr->handle))
+		return;
+
+	/*
+	 * In rare cases, we could be called multiple times for
+	 * committing a lazy ceiling for the same object, e.g. if
+	 * userland is preempted in the middle of a recursive locking
+	 * sequence.
+	 *
+	 * This stems from the fact that userland has to update
+	 * ->pp_pending prior to trying to grab the lock atomically,
+	 * at which point it can figure out whether a recursive
+	 * locking happened. We get out of this trap by testing the
+	 * XNSYNCH_CEILING flag.
+	 */
+	if ((synch->status & XNSYNCH_CEILING) == 0)
+		commit_ceiling(synch, curr);
+out:
+	curr->u_window->pp_pending = XN_NO_HANDLE;
+}
+
+/**
+ * @fn int xnsynch_try_acquire(struct xnsynch *synch);
+ * @brief Try acquiring the ownership of a synchronization object.
+ *
+ * This service should be called by upper interfaces wanting the
+ * current thread to acquire the ownership of the given resource. If
+ * the resource is already assigned to another thread, the call
+ * returns with an error code.
+ *
+ * This service must be used only with synchronization objects that
+ * track ownership (XNSYNCH_OWNER set.
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to acquire.
+ *
+ * @return Zero is returned if @a synch has been successfully
+ * acquired. Otherwise:
+ *
+ * - -EDEADLK is returned if @a synch is currently held by the calling
+ * thread.
+ *
+ * - -EBUSY is returned if @a synch is currently held by another
+ * thread.
+ *
+ * @coretags{primary-only}
+ */
+int xnsynch_try_acquire(struct xnsynch *synch)
+{
+	struct xnthread *curr;
+	atomic_t *lockp;
+	xnhandle_t h;
+
+	primary_mode_only();
+
+	XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
+
+	curr = xnthread_current();
+	lockp = xnsynch_fastlock(synch);
+	trace_cobalt_synch_try_acquire(synch);
+
+	h = atomic_cmpxchg(lockp, XN_NO_HANDLE,
+			   get_owner_handle(curr->handle, synch));
+	if (h != XN_NO_HANDLE)
+		return xnhandle_get_id(h) == curr->handle ?
+			-EDEADLK : -EBUSY;
+
+	set_current_owner(synch, curr);
+	xnthread_get_resource(curr);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnsynch_try_acquire);
+
+/**
+ * @fn int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout, xntmode_t timeout_mode);
+ * @brief Acquire the ownership of a synchronization object.
+ *
+ * This service should be called by upper interfaces wanting the
+ * current thread to acquire the ownership of the given resource. If
+ * the resource is already assigned to another thread, the caller is
+ * suspended.
+ *
+ * This service must be used only with synchronization objects that
+ * track ownership (XNSYNCH_OWNER set.
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to acquire.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread pends on the resource. This value is a wait time given as a
+ * count of nanoseconds. It can either be relative, absolute
+ * monotonic, or absolute adjustable depending on @a
+ * timeout_mode. Passing XN_INFINITE @b and setting @a mode to
+ * XN_RELATIVE specifies an unbounded wait. All other values are used
+ * to initialize a watchdog timer.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @return A bitmask which may include zero or one information bit
+ * among XNRMID, XNTIMEO and XNBREAK, which should be tested by the
+ * caller, for detecting respectively: object deletion, timeout or
+ * signal/unblock conditions which might have happened while waiting.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note Unlike xnsynch_try_acquire(), this call does NOT check for
+ * invalid recursive locking request, which means that such request
+ * will always cause a deadlock for the caller.
+ */
+int xnsynch_acquire(struct xnsynch *synch, xnticks_t timeout,
+		    xntmode_t timeout_mode)
+{
+	struct xnthread *curr, *owner;
+	xnhandle_t currh, h, oldh;
+	atomic_t *lockp;
+	spl_t s;
+
+	primary_mode_only();
+
+	XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
+
+	curr = xnthread_current();
+	currh = curr->handle;
+	lockp = xnsynch_fastlock(synch);
+	trace_cobalt_synch_acquire(synch);
+redo:
+	/* Basic form of xnsynch_try_acquire(). */
+	h = atomic_cmpxchg(lockp, XN_NO_HANDLE,
+			   get_owner_handle(currh, synch));
+	if (likely(h == XN_NO_HANDLE)) {
+		set_current_owner(synch, curr);
+		xnthread_get_resource(curr);
+		return 0;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+
+	/*
+	 * Set claimed bit.  In case it appears to be set already,
+	 * re-read its state under nklock so that we don't miss any
+	 * change between the lock-less read and here. But also try to
+	 * avoid cmpxchg where possible. Only if it appears not to be
+	 * set, start with cmpxchg directly.
+	 */
+	if (xnsynch_fast_is_claimed(h)) {
+		oldh = atomic_read(lockp);
+		goto test_no_owner;
+	}
+
+	do {
+		oldh = atomic_cmpxchg(lockp, h, xnsynch_fast_claimed(h));
+		if (likely(oldh == h))
+			break;
+	test_no_owner:
+		if (oldh == XN_NO_HANDLE) {
+			/* Mutex released from another cpu. */
+			xnlock_put_irqrestore(&nklock, s);
+			goto redo;
+		}
+		h = oldh;
+	} while (!xnsynch_fast_is_claimed(h));
+
+	owner = xnthread_lookup(h);
+	if (owner == NULL) {
+		/*
+		 * The handle is broken, therefore pretend that the
+		 * synch object was deleted to signal an error.
+		 */
+		xnthread_set_info(curr, XNRMID);
+		goto out;
+	}
+
+	/*
+	 * This is the contended path. We just detected an earlier
+	 * syscall-less fast locking from userland, fix up the
+	 * in-kernel state information accordingly.
+	 *
+	 * The consistency of the state information is guaranteed,
+	 * because we just raised the claim bit atomically for this
+	 * contended lock, therefore userland will have to jump to the
+	 * kernel when releasing it, instead of doing a fast
+	 * unlock. Since we currently own the superlock, consistency
+	 * wrt transfer_ownership() is guaranteed through
+	 * serialization.
+	 *
+	 * CAUTION: in this particular case, the only assumptions we
+	 * can safely make is that *owner is valid but not current on
+	 * this CPU.
+	 */
+	track_owner(synch, owner);
+	xnsynch_detect_relaxed_owner(synch, curr);
+
+	if ((synch->status & XNSYNCH_PRIO) == 0) { /* i.e. FIFO */
+		list_add_tail(&curr->plink, &synch->pendq);
+		goto block;
+	}
+
+	if (curr->wprio > owner->wprio) {
+		if (xnthread_test_info(owner, XNWAKEN) && owner->wwake == synch) {
+			/* Ownership is still pending, steal the resource. */
+			set_current_owner_locked(synch, curr);
+			xnthread_clear_info(curr, XNRMID | XNTIMEO | XNBREAK);
+			xnthread_set_info(owner, XNROBBED);
+			goto grab;
+		}
+
+		list_add_priff(curr, &synch->pendq, wprio, plink);
+
+		if (synch->status & XNSYNCH_PI) {
+			raise_boost_flag(owner);
+
+			if (synch->status & XNSYNCH_CLAIMED)
+				list_del(&synch->next); /* owner->boosters */
+			else
+				synch->status |= XNSYNCH_CLAIMED;
+
+			synch->wprio = curr->wprio;
+			list_add_priff(synch, &owner->boosters, wprio, next);
+			/*
+			 * curr->wprio > owner->wprio implies that
+			 * synch must be leading the booster list
+			 * after insertion, so we may call
+			 * inherit_thread_priority() for tracking
+			 * current's priority directly without going
+			 * through adjust_boost().
+			 */
+			inherit_thread_priority(owner, curr);
+		}
+	} else
+		list_add_priff(curr, &synch->pendq, wprio, plink);
+block:
+	xnthread_suspend(curr, XNPEND, timeout, timeout_mode, synch);
+	curr->wwake = NULL;
+	xnthread_clear_info(curr, XNWAKEN);
+
+	if (xnthread_test_info(curr, XNRMID | XNTIMEO | XNBREAK))
+		goto out;
+
+	if (xnthread_test_info(curr, XNROBBED)) {
+		/*
+		 * Somebody stole us the ownership while we were ready
+		 * to run, waiting for the CPU: we need to wait again
+		 * for the resource.
+		 */
+		if (timeout_mode != XN_RELATIVE || timeout == XN_INFINITE) {
+			xnlock_put_irqrestore(&nklock, s);
+			goto redo;
+		}
+		timeout = xntimer_get_timeout_stopped(&curr->rtimer);
+		if (timeout > 1) { /* Otherwise, it's too late. */
+			xnlock_put_irqrestore(&nklock, s);
+			goto redo;
+		}
+		xnthread_set_info(curr, XNTIMEO);
+		goto out;
+	}
+grab:
+	xnthread_get_resource(curr);
+
+	if (xnsynch_pended_p(synch))
+		currh = xnsynch_fast_claimed(currh);
+
+	/* Set new ownership for this object. */
+	atomic_set(lockp, get_owner_handle(currh, synch));
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return xnthread_test_info(curr, XNRMID|XNTIMEO|XNBREAK);
+}
+EXPORT_SYMBOL_GPL(xnsynch_acquire);
+
+static void drop_booster(struct xnsynch *synch, struct xnthread *owner)
+{
+	list_del(&synch->next);	/* owner->boosters */
+
+	if (list_empty(&owner->boosters)) {
+		xnthread_clear_state(owner, XNBOOST);
+		inherit_thread_priority(owner, owner);
+	} else
+		adjust_boost(owner, NULL);
+}
+
+static inline void clear_pi_boost(struct xnsynch *synch,
+				  struct xnthread *owner)
+{	/* nklock held, irqs off */
+	synch->status &= ~XNSYNCH_CLAIMED;
+	drop_booster(synch, owner);
+}
+
+static inline void clear_pp_boost(struct xnsynch *synch,
+				  struct xnthread *owner)
+{	/* nklock held, irqs off */
+	synch->status &= ~XNSYNCH_CEILING;
+	drop_booster(synch, owner);
+}
+
+static bool transfer_ownership(struct xnsynch *synch,
+			       struct xnthread *lastowner)
+{				/* nklock held, irqs off */
+	struct xnthread *nextowner;
+	xnhandle_t nextownerh;
+	atomic_t *lockp;
+
+	lockp = xnsynch_fastlock(synch);
+
+	/*
+	 * Our caller checked for contention locklessly, so we do have
+	 * to check again under lock in a different way.
+	 */
+	if (list_empty(&synch->pendq)) {
+		synch->owner = NULL;
+		atomic_set(lockp, XN_NO_HANDLE);
+		return false;
+	}
+
+	nextowner = list_first_entry(&synch->pendq, struct xnthread, plink);
+	list_del(&nextowner->plink);
+	nextowner->wchan = NULL;
+	nextowner->wwake = synch;
+	set_current_owner_locked(synch, nextowner);
+	xnthread_set_info(nextowner, XNWAKEN);
+	xnthread_resume(nextowner, XNPEND);
+
+	if (synch->status & XNSYNCH_CLAIMED)
+		clear_pi_boost(synch, lastowner);
+
+	nextownerh = get_owner_handle(nextowner->handle, synch);
+	if (xnsynch_pended_p(synch))
+		nextownerh = xnsynch_fast_claimed(nextownerh);
+
+	atomic_set(lockp, nextownerh);
+
+	return true;
+}
+
+/**
+ * @fn bool xnsynch_release(struct xnsynch *synch, struct xnthread *curr)
+ * @brief Release a resource and pass it to the next waiting thread.
+ *
+ * This service releases the ownership of the given synchronization
+ * object. The thread which is currently leading the object's pending
+ * list, if any, is unblocked from its pending state. However, no
+ * reschedule is performed.
+ *
+ * This service must be used only with synchronization objects that
+ * track ownership (XNSYNCH_OWNER set).
+ *
+ * @param synch The descriptor address of the synchronization object
+ * whose ownership is changed.
+ *
+ * @param curr The descriptor address of the current thread, which
+ * must own the object at the time of calling.
+ *
+ * @return True if a reschedule is required.
+ *
+ * @sideeffect
+ *
+ * - The effective priority of the previous resource owner might be
+ * lowered to its base priority value as a consequence of the priority
+ * boost being cleared.
+ *
+ * - The synchronization object ownership is transfered to the
+ * unblocked thread.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+bool xnsynch_release(struct xnsynch *synch, struct xnthread *curr)
+{
+	bool need_resched = false;
+	xnhandle_t currh, h;
+	atomic_t *lockp;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, (synch->status & XNSYNCH_OWNER) == 0);
+
+	trace_cobalt_synch_release(synch);
+
+	if (xnthread_put_resource(curr))
+		return false;
+
+	lockp = xnsynch_fastlock(synch);
+	currh = curr->handle;
+	/*
+	 * FLCEIL may only be raised by the owner, or when the owner
+	 * is blocked waiting for the synch (ownership transfer). In
+	 * addition, only the current owner of a synch may release it,
+	 * therefore we can't race while testing FLCEIL locklessly.
+	 * All updates to FLCLAIM are covered by the superlock.
+	 *
+	 * Therefore, clearing the fastlock racelessly in this routine
+	 * without leaking FLCEIL/FLCLAIM updates can be achieved by
+	 * holding the superlock.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	if (synch->status & XNSYNCH_CEILING) {
+		clear_pp_boost(synch, curr);
+		need_resched = true;
+	}
+
+	h = atomic_cmpxchg(lockp, currh, XN_NO_HANDLE);
+	if ((h & ~XNSYNCH_FLCEIL) != currh)
+		/* FLCLAIM set, synch is contended. */
+		need_resched = transfer_ownership(synch, curr);
+	else if (h != currh)	/* FLCEIL set, FLCLAIM clear. */
+		atomic_set(lockp, XN_NO_HANDLE);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return need_resched;
+}
+EXPORT_SYMBOL_GPL(xnsynch_release);
+
+void xnsynch_requeue_sleeper(struct xnthread *thread)
+{				/* nklock held, irqs off */
+	struct xnsynch *synch = thread->wchan;
+	struct xnthread *owner;
+
+	XENO_BUG_ON(COBALT, !(synch->status & XNSYNCH_PRIO));
+
+	/*
+	 * Update the position in the pend queue of a thread waiting
+	 * for a lock. This routine propagates the change throughout
+	 * the PI chain if required.
+	 */
+	list_del(&thread->plink);
+	list_add_priff(thread, &synch->pendq, wprio, plink);
+	owner = synch->owner;
+
+	/* Only PI-enabled objects are of interest here. */
+	if ((synch->status & XNSYNCH_PI) == 0)
+		return;
+
+	synch->wprio = thread->wprio;
+	if (synch->status & XNSYNCH_CLAIMED)
+		list_del(&synch->next);
+	else {
+		synch->status |= XNSYNCH_CLAIMED;
+		raise_boost_flag(owner);
+	}
+
+	list_add_priff(synch, &owner->boosters, wprio, next);
+	adjust_boost(owner, thread);
+}
+EXPORT_SYMBOL_GPL(xnsynch_requeue_sleeper);
+
+/**
+ * @fn struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch);
+ * @brief Access the thread leading a synch object wait queue.
+ *
+ * This services returns the descriptor address of to the thread leading a
+ * synchronization object wait queue.
+ *
+ * @param synch The descriptor address of the target synchronization object.
+ *
+ * @return The descriptor address of the unblocked thread.
+ *
+ * @coretags{unrestricted}
+ */
+struct xnthread *xnsynch_peek_pendq(struct xnsynch *synch)
+{
+	struct xnthread *thread = NULL;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!list_empty(&synch->pendq))
+		thread = list_first_entry(&synch->pendq,
+					  struct xnthread, plink);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return thread;
+}
+EXPORT_SYMBOL_GPL(xnsynch_peek_pendq);
+
+/**
+ * @fn int xnsynch_flush(struct xnsynch *synch, int reason);
+ * @brief Unblock all waiters pending on a resource.
+ *
+ * This service atomically releases all threads which currently sleep
+ * on a given resource. This service should be called by upper
+ * interfaces under circumstances requiring that the pending queue of
+ * a given resource is cleared, such as before the resource is
+ * deleted.
+ *
+ * @param synch The descriptor address of the synchronization object
+ * to be flushed.
+ *
+ * @param reason Some flags to set in the information mask of every
+ * unblocked thread. Zero is an acceptable value. The following bits
+ * are pre-defined by Cobalt:
+ *
+ * - XNRMID should be set to indicate that the synchronization object
+ * is about to be destroyed (see xnthread_resume()).
+ *
+ * - XNBREAK should be set to indicate that the wait has been forcibly
+ * interrupted (see xnthread_unblock()).
+ *
+ * @return XNSYNCH_RESCHED is returned if at least one thread is
+ * unblocked, which means the caller should invoke xnsched_run() for
+ * applying the new scheduling state. Otherwise, XNSYNCH_DONE is
+ * returned.
+ *
+ * @sideeffect
+ *
+ * - The effective priority of the current resource owner might be
+ * lowered to its base priority value as a consequence of the priority
+ * inheritance boost being cleared.
+ *
+ * @coretags{unrestricted}
+ */
+int xnsynch_flush(struct xnsynch *synch, int reason)
+{
+	struct xnthread *sleeper, *tmp;
+	int ret;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_synch_flush(synch);
+
+	if (list_empty(&synch->pendq)) {
+		XENO_BUG_ON(COBALT, synch->status & XNSYNCH_CLAIMED);
+		ret = XNSYNCH_DONE;
+	} else {
+		ret = XNSYNCH_RESCHED;
+		list_for_each_entry_safe(sleeper, tmp, &synch->pendq, plink) {
+			list_del(&sleeper->plink);
+			xnthread_set_info(sleeper, reason);
+			sleeper->wchan = NULL;
+			xnthread_resume(sleeper, XNPEND);
+		}
+		if (synch->status & XNSYNCH_CLAIMED)
+			clear_pi_boost(synch, synch->owner);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnsynch_flush);
+
+void xnsynch_forget_sleeper(struct xnthread *thread)
+{				/* nklock held, irqs off */
+	struct xnsynch *synch = thread->wchan;
+	struct xnthread *owner, *target;
+
+	/*
+	 * Do all the necessary housekeeping chores to stop a thread
+	 * from waiting on a given synchronization object. Doing so
+	 * may require to update a PI chain.
+	 */
+	trace_cobalt_synch_forget(synch);
+
+	xnthread_clear_state(thread, XNPEND);
+	thread->wchan = NULL;
+	list_del(&thread->plink); /* synch->pendq */
+
+	/*
+	 * Only a sleeper leaving a PI chain triggers an update.
+	 * NOTE: PP objects never bear the CLAIMED bit.
+	 */
+	if ((synch->status & XNSYNCH_CLAIMED) == 0)
+		return;
+
+	owner = synch->owner;
+
+	if (list_empty(&synch->pendq)) {
+		/* No more sleepers: clear the PI boost. */
+		clear_pi_boost(synch, owner);
+		return;
+	}
+
+	/*
+	 * Reorder the booster queue of the current owner after we
+	 * left the wait list, then set its priority to the new
+	 * required minimum required to prevent priority inversion.
+	 */
+	target = list_first_entry(&synch->pendq, struct xnthread, plink);
+	synch->wprio = target->wprio;
+	list_del(&synch->next);	/* owner->boosters */
+	list_add_priff(synch, &owner->boosters, wprio, next);
+	adjust_boost(owner, target);
+}
+EXPORT_SYMBOL_GPL(xnsynch_forget_sleeper);
+
+#ifdef CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED
+
+/*
+ * Detect when a thread is about to sleep on a synchronization
+ * object currently owned by someone running in secondary mode.
+ */
+void xnsynch_detect_relaxed_owner(struct xnsynch *synch,
+				  struct xnthread *sleeper)
+{
+	if (xnthread_test_state(sleeper, XNWARN) &&
+	    !xnthread_test_info(sleeper, XNPIALERT) &&
+	    xnthread_test_state(synch->owner, XNRELAX)) {
+		xnthread_set_info(sleeper, XNPIALERT);
+		__xnthread_signal(sleeper, SIGDEBUG,
+				  SIGDEBUG_MIGRATE_PRIOINV);
+	} else
+		xnthread_clear_info(sleeper,  XNPIALERT);
+}
+
+/*
+ * Detect when a thread is about to relax while holding booster(s)
+ * (claimed PI or active PP object), which denotes a potential for
+ * priority inversion. In such an event, any sleeper bearing the
+ * XNWARN bit will receive a SIGDEBUG notification.
+ */
+void xnsynch_detect_boosted_relax(struct xnthread *owner)
+{
+	struct xnthread *sleeper;
+	struct xnsynch *synch;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	xnthread_for_each_booster(synch, owner) {
+		xnsynch_for_each_sleeper(sleeper, synch) {
+			if (xnthread_test_state(sleeper, XNWARN)) {
+				xnthread_set_info(sleeper, XNPIALERT);
+				__xnthread_signal(sleeper, SIGDEBUG,
+						  SIGDEBUG_MIGRATE_PRIOINV);
+			}
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+#endif /* CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED */
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/thread.c b/kernel/xenomai-v3.2.4/kernel/cobalt/thread.c
new file mode 100644
index 0000000..ff12f28
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/thread.c
@@ -0,0 +1,2531 @@
+/*
+ * Copyright (C) 2001-2013 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2006-2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ * Copyright (C) 2001-2013 The Xenomai project <http://www.xenomai.org>
+ *
+ * SMP support Copyright (C) 2004 The HYADES project <http://www.hyades-itea.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/kthread.h>
+#include <linux/wait.h>
+#include <linux/signal.h>
+#include <linux/pid.h>
+#include <linux/sched.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/stat.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/select.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/thread.h>
+#include <pipeline/kevents.h>
+#include <pipeline/inband_work.h>
+#include <pipeline/sched.h>
+#include <trace/events/cobalt-core.h>
+#include "debug.h"
+
+static DECLARE_WAIT_QUEUE_HEAD(join_all);
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_thread Thread services
+ * @{
+ */
+
+static void timeout_handler(struct xntimer *timer)
+{
+	struct xnthread *thread = container_of(timer, struct xnthread, rtimer);
+
+	xnthread_set_info(thread, XNTIMEO);	/* Interrupts are off. */
+	xnthread_resume(thread, XNDELAY);
+}
+
+static void periodic_handler(struct xntimer *timer)
+{
+	struct xnthread *thread = container_of(timer, struct xnthread, ptimer);
+	/*
+	 * Prevent unwanted round-robin, and do not wake up threads
+	 * blocked on a resource.
+	 */
+	if (xnthread_test_state(thread, XNDELAY|XNPEND) == XNDELAY)
+		xnthread_resume(thread, XNDELAY);
+
+	/*
+	 * The periodic thread might have migrated to another CPU
+	 * while passive, fix the timer affinity if need be.
+	 */
+	xntimer_set_affinity(&thread->ptimer, thread->sched);
+}
+
+static inline void enlist_new_thread(struct xnthread *thread)
+{				/* nklock held, irqs off */
+	list_add_tail(&thread->glink, &nkthreadq);
+	cobalt_nrthreads++;
+	xnvfile_touch_tag(&nkthreadlist_tag);
+}
+
+struct kthread_arg {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct xnthread *thread;
+	struct completion *done;
+};
+
+static void do_parent_wakeup(struct pipeline_inband_work *inband_work)
+{
+	struct kthread_arg *ka;
+
+	ka = container_of(inband_work, struct kthread_arg, inband_work);
+	complete(ka->done);
+}
+
+static inline void init_kthread_info(struct xnthread *thread)
+{
+	struct cobalt_threadinfo *p;
+
+	p = pipeline_current();
+	p->thread = thread;
+	p->process = NULL;
+}
+
+static int map_kthread(struct xnthread *thread, struct kthread_arg *ka)
+{
+	int ret;
+	spl_t s;
+
+	if (xnthread_test_state(thread, XNUSER))
+		return -EINVAL;
+
+	if (xnthread_current() || xnthread_test_state(thread, XNMAPPED))
+		return -EBUSY;
+
+	thread->u_window = NULL;
+	xnthread_pin_initial(thread);
+
+	pipeline_init_shadow_tcb(thread);
+	xnthread_suspend(thread, XNRELAX, XN_INFINITE, XN_RELATIVE, NULL);
+	init_kthread_info(thread);
+	xnthread_set_state(thread, XNMAPPED);
+	xndebug_shadow_init(thread);
+	xnthread_run_handler(thread, map_thread);
+	pipeline_enable_kevents();
+
+	/*
+	 * CAUTION: Soon after xnthread_init() has returned,
+	 * xnthread_start() is commonly invoked from the root domain,
+	 * therefore the call site may expect the started kernel
+	 * shadow to preempt immediately. As a result of such
+	 * assumption, start attributes (struct xnthread_start_attr)
+	 * are often laid on the caller's stack.
+	 *
+	 * For this reason, we raise the completion signal to wake up
+	 * the xnthread_init() caller only once the emerging thread is
+	 * hardened, and __never__ before that point. Since we run
+	 * over the Xenomai domain upon return from xnthread_harden(),
+	 * we schedule a virtual interrupt handler in the root domain
+	 * to signal the completion object.
+	 */
+	xnthread_resume(thread, XNDORMANT);
+	ret = xnthread_harden();
+
+	trace_cobalt_lostage_request("wakeup", current);
+
+	ka->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*ka, do_parent_wakeup);
+	pipeline_post_inband_work(ka);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	enlist_new_thread(thread);
+	/*
+	 * Make sure xnthread_start() did not slip in from another CPU
+	 * while we were back from wakeup_parent().
+	 */
+	if (thread->entry == NULL)
+		xnthread_suspend(thread, XNDORMANT,
+				 XN_INFINITE, XN_RELATIVE, NULL);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	xnthread_test_cancel();
+
+	xntrace_pid(xnthread_host_pid(thread),
+		    xnthread_current_priority(thread));
+
+	return ret;
+}
+
+static int kthread_trampoline(void *arg)
+{
+	struct kthread_arg *ka = arg;
+	struct xnthread *thread = ka->thread;
+	struct sched_param param;
+	int ret, policy, prio;
+
+	/*
+	 * It only makes sense to create Xenomai kthreads with the
+	 * SCHED_FIFO, SCHED_NORMAL or SCHED_WEAK policies. So
+	 * anything that is not from Xenomai's RT class is assumed to
+	 * belong to SCHED_NORMAL linux-wise.
+	 */
+	if (thread->sched_class != &xnsched_class_rt) {
+		policy = SCHED_NORMAL;
+		prio = 0;
+	} else {
+		policy = SCHED_FIFO;
+		prio = normalize_priority(thread->cprio);
+	}
+
+	param.sched_priority = prio;
+	sched_setscheduler(current, policy, &param);
+
+	ret = map_kthread(thread, ka);
+	if (ret) {
+		printk(XENO_WARNING "failed to create kernel shadow %s\n",
+		       thread->name);
+		return ret;
+	}
+
+	trace_cobalt_shadow_entry(thread);
+
+	thread->entry(thread->cookie);
+
+	xnthread_cancel(thread);
+
+	return 0;
+}
+
+static inline int spawn_kthread(struct xnthread *thread)
+{
+	DECLARE_COMPLETION_ONSTACK(done);
+	struct kthread_arg ka = {
+		.thread = thread,
+		.done = &done
+	};
+	struct task_struct *p;
+
+	p = kthread_run(kthread_trampoline, &ka, "%s", thread->name);
+	if (IS_ERR(p))
+		return PTR_ERR(p);
+
+	wait_for_completion(&done);
+
+	return 0;
+}
+
+int __xnthread_init(struct xnthread *thread,
+		    const struct xnthread_init_attr *attr,
+		    struct xnsched *sched,
+		    struct xnsched_class *sched_class,
+		    const union xnsched_policy_param *sched_param)
+{
+	int flags = attr->flags, ret, gravity;
+
+	flags &= ~(XNSUSP|XNBOOST);
+#ifndef CONFIG_XENO_ARCH_FPU
+	flags &= ~XNFPU;
+#endif
+	if ((flags & XNROOT) == 0)
+		flags |= XNDORMANT;
+
+	if (attr->name)
+		ksformat(thread->name,
+			 sizeof(thread->name), "%s", attr->name);
+	else
+		ksformat(thread->name,
+			 sizeof(thread->name), "@%p", thread);
+
+	/*
+	 * We mirror the global user debug state into the per-thread
+	 * state, to speed up branch taking in lib/cobalt wherever
+	 * this needs to be tested.
+	 */
+	if (IS_ENABLED(CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP))
+		flags |= XNDEBUG;
+
+	thread->personality = attr->personality;
+	cpumask_and(&thread->affinity, &attr->affinity, &cobalt_cpu_affinity);
+	thread->sched = sched;
+	thread->state = flags;
+	thread->info = 0;
+	thread->local_info = 0;
+	thread->wprio = XNSCHED_IDLE_PRIO;
+	thread->cprio = XNSCHED_IDLE_PRIO;
+	thread->bprio = XNSCHED_IDLE_PRIO;
+	thread->lock_count = 0;
+	thread->rrperiod = XN_INFINITE;
+	thread->wchan = NULL;
+	thread->wwake = NULL;
+	thread->wcontext = NULL;
+	thread->res_count = 0;
+	thread->handle = XN_NO_HANDLE;
+	memset(&thread->stat, 0, sizeof(thread->stat));
+	thread->selector = NULL;
+	INIT_LIST_HEAD(&thread->glink);
+	INIT_LIST_HEAD(&thread->boosters);
+	/* These will be filled by xnthread_start() */
+	thread->entry = NULL;
+	thread->cookie = NULL;
+	init_completion(&thread->exited);
+	memset(xnthread_archtcb(thread), 0, sizeof(struct xnarchtcb));
+	memset(thread->sigarray, 0, sizeof(thread->sigarray));
+
+	gravity = flags & XNUSER ? XNTIMER_UGRAVITY : XNTIMER_KGRAVITY;
+	xntimer_init(&thread->rtimer, &nkclock, timeout_handler,
+		     sched, gravity);
+	xntimer_set_name(&thread->rtimer, thread->name);
+	xntimer_set_priority(&thread->rtimer, XNTIMER_HIPRIO);
+	xntimer_init(&thread->ptimer, &nkclock, periodic_handler,
+		     sched, gravity);
+	xntimer_set_name(&thread->ptimer, thread->name);
+	xntimer_set_priority(&thread->ptimer, XNTIMER_HIPRIO);
+
+	thread->base_class = NULL; /* xnsched_set_policy() will set it. */
+	ret = xnsched_init_thread(thread);
+	if (ret)
+		goto err_out;
+
+	ret = xnsched_set_policy(thread, sched_class, sched_param);
+	if (ret)
+		goto err_out;
+
+	if ((flags & (XNUSER|XNROOT)) == 0) {
+		ret = spawn_kthread(thread);
+		if (ret)
+			goto err_out;
+	}
+
+	return 0;
+
+err_out:
+	xntimer_destroy(&thread->rtimer);
+	xntimer_destroy(&thread->ptimer);
+
+	return ret;
+}
+
+void xnthread_deregister(struct xnthread *thread)
+{
+	if (thread->handle != XN_NO_HANDLE)
+		xnregistry_remove(thread->handle);
+
+	thread->handle = XN_NO_HANDLE;
+}
+
+char *xnthread_format_status(unsigned long status, char *buf, int size)
+{
+	static const char labels[] = XNTHREAD_STATE_LABELS;
+	int pos, c, mask;
+	char *wp;
+
+	for (mask = (int)status, pos = 0, wp = buf;
+	     mask != 0 && wp - buf < size - 2;	/* 1-letter label + \0 */
+	     mask >>= 1, pos++) {
+		if ((mask & 1) == 0)
+			continue;
+
+		c = labels[pos];
+
+		switch (1 << pos) {
+		case XNROOT:
+			c = 'R'; /* Always mark root as runnable. */
+			break;
+		case XNREADY:
+			if (status & XNROOT)
+				continue; /* Already reported on XNROOT. */
+			break;
+		case XNDELAY:
+			/*
+			 * Only report genuine delays here, not timed
+			 * waits for resources.
+			 */
+			if (status & XNPEND)
+				continue;
+			break;
+		case XNPEND:
+			/* Report timed waits with lowercase symbol. */
+			if (status & XNDELAY)
+				c |= 0x20;
+			break;
+		default:
+			if (c == '.')
+				continue;
+		}
+		*wp++ = c;
+	}
+
+	*wp = '\0';
+
+	return buf;
+}
+
+pid_t xnthread_host_pid(struct xnthread *thread)
+{
+	if (xnthread_test_state(thread, XNROOT))
+		return 0;
+	if (!xnthread_host_task(thread))
+		return -1;
+
+	return task_pid_nr(xnthread_host_task(thread));
+}
+
+int xnthread_set_clock(struct xnthread *thread, struct xnclock *newclock)
+{
+	spl_t s;
+
+	if (thread == NULL) {
+		thread = xnthread_current();
+		if (thread == NULL)
+			return -EPERM;
+	}
+	
+	/* Change the clock the thread's periodic timer is paced by. */
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_set_clock(&thread->ptimer, newclock);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_clock);
+
+xnticks_t xnthread_get_timeout(struct xnthread *thread, xnticks_t ns)
+{
+	struct xntimer *timer;
+	xnticks_t timeout;
+
+	if (!xnthread_test_state(thread,XNDELAY))
+		return 0LL;
+
+	if (xntimer_running_p(&thread->rtimer))
+		timer = &thread->rtimer;
+	else if (xntimer_running_p(&thread->ptimer))
+		timer = &thread->ptimer;
+	else
+		return 0LL;
+
+	timeout = xntimer_get_date(timer);
+	if (timeout <= ns)
+		return 1;
+
+	return timeout - ns;
+}
+EXPORT_SYMBOL_GPL(xnthread_get_timeout);
+
+xnticks_t xnthread_get_period(struct xnthread *thread)
+{
+	xnticks_t period = 0;
+	/*
+	 * The current thread period might be:
+	 * - the value of the timer interval for periodic threads (ns/ticks)
+	 * - or, the value of the alloted round-robin quantum (ticks)
+	 * - or zero, meaning "no periodic activity".
+	 */
+	if (xntimer_running_p(&thread->ptimer))
+		period = xntimer_interval(&thread->ptimer);
+	else if (xnthread_test_state(thread,XNRRB))
+		period = thread->rrperiod;
+
+	return period;
+}
+EXPORT_SYMBOL_GPL(xnthread_get_period);
+
+void xnthread_prepare_wait(struct xnthread_wait_context *wc)
+{
+	struct xnthread *curr = xnthread_current();
+
+	wc->posted = 0;
+	curr->wcontext = wc;
+}
+EXPORT_SYMBOL_GPL(xnthread_prepare_wait);
+
+static inline void release_all_ownerships(struct xnthread *curr)
+{
+	struct xnsynch *synch, *tmp;
+
+	/*
+	 * Release all the ownerships obtained by a thread on
+	 * synchronization objects. This routine must be entered
+	 * interrupts off.
+	 */
+	xnthread_for_each_booster_safe(synch, tmp, curr) {
+		xnsynch_release(synch, curr);
+		if (synch->cleanup)
+			synch->cleanup(synch);
+	}
+}
+
+static inline void cleanup_tcb(struct xnthread *curr) /* nklock held, irqs off */
+{
+	list_del(&curr->glink);
+	cobalt_nrthreads--;
+	xnvfile_touch_tag(&nkthreadlist_tag);
+
+	if (xnthread_test_state(curr, XNREADY)) {
+		XENO_BUG_ON(COBALT, xnthread_test_state(curr, XNTHREAD_BLOCK_BITS));
+		xnsched_dequeue(curr);
+		xnthread_clear_state(curr, XNREADY);
+	}
+
+	if (xnthread_test_state(curr, XNPEND))
+		xnsynch_forget_sleeper(curr);
+
+	xnthread_set_state(curr, XNZOMBIE);
+	/*
+	 * NOTE: we must be running over the root thread, or @curr
+	 * is dormant, which means that we don't risk sched->curr to
+	 * disappear due to voluntary rescheduling while holding the
+	 * nklock, despite @curr bears the zombie bit.
+	 */
+	release_all_ownerships(curr);
+
+	pipeline_finalize_thread(curr);
+	xnsched_forget(curr);
+	xnthread_deregister(curr);
+}
+
+void __xnthread_cleanup(struct xnthread *curr)
+{
+	spl_t s;
+
+	secondary_mode_only();
+
+	xntimer_destroy(&curr->rtimer);
+	xntimer_destroy(&curr->ptimer);
+
+	if (curr->selector) {
+		xnselector_destroy(curr->selector);
+		curr->selector = NULL;
+	}
+
+	xnlock_get_irqsave(&nklock, s);
+	cleanup_tcb(curr);
+	xnlock_put_irqrestore(&nklock, s);
+
+	/* Wake up the joiner if any (we can't have more than one). */
+	complete(&curr->exited);
+
+	/* Notify our exit to xnthread_killall() if need be. */
+	if (waitqueue_active(&join_all))
+		wake_up(&join_all);
+
+	/* Finalize last since this incurs releasing the TCB. */
+	xnthread_run_handler_stack(curr, finalize_thread);
+}
+
+/*
+ * Unwinds xnthread_init() ops for an unmapped thread.  Since the
+ * latter must be dormant, it can't be part of any runqueue.
+ */
+void __xnthread_discard(struct xnthread *thread)
+{
+	spl_t s;
+
+	secondary_mode_only();
+
+	xntimer_destroy(&thread->rtimer);
+	xntimer_destroy(&thread->ptimer);
+
+	xnlock_get_irqsave(&nklock, s);
+	if (!list_empty(&thread->glink)) {
+		list_del(&thread->glink);
+		cobalt_nrthreads--;
+		xnvfile_touch_tag(&nkthreadlist_tag);
+	}
+	xnthread_deregister(thread);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(__xnthread_discard);
+
+/**
+ * @fn void xnthread_init(struct xnthread *thread,const struct xnthread_init_attr *attr,struct xnsched_class *sched_class,const union xnsched_policy_param *sched_param)
+ * @brief Initialize a new thread.
+ *
+ * Initializes a new thread. The thread is left dormant until it is
+ * actually started by xnthread_start().
+ *
+ * @param thread The address of a thread descriptor Cobalt will use to
+ * store the thread-specific data.  This descriptor must always be
+ * valid while the thread is active therefore it must be allocated in
+ * permanent memory. @warning Some architectures may require the
+ * descriptor to be properly aligned in memory; this is an additional
+ * reason for descriptors not to be laid in the program stack where
+ * alignement constraints might not always be satisfied.
+ *
+ * @param attr A pointer to an attribute block describing the initial
+ * properties of the new thread. Members of this structure are defined
+ * as follows:
+ *
+ * - name: An ASCII string standing for the symbolic name of the
+ * thread. This name is copied to a safe place into the thread
+ * descriptor. This name might be used in various situations by Cobalt
+ * for issuing human-readable diagnostic messages, so it is usually a
+ * good idea to provide a sensible value here.  NULL is fine though
+ * and means "anonymous".
+ *
+ * - flags: A set of creation flags affecting the operation. The
+ * following flags can be part of this bitmask:
+ *
+ *   - XNSUSP creates the thread in a suspended state. In such a case,
+ * the thread shall be explicitly resumed using the xnthread_resume()
+ * service for its execution to actually begin, additionally to
+ * issuing xnthread_start() for it. This flag can also be specified
+ * when invoking xnthread_start() as a starting mode.
+ *
+ * - XNUSER shall be set if @a thread will be mapped over an existing
+ * user-space task. Otherwise, a new kernel host task is created, then
+ * paired with the new Xenomai thread.
+ *
+ * - XNFPU (enable FPU) tells Cobalt that the new thread may use the
+ * floating-point unit. XNFPU is implicitly assumed for user-space
+ * threads even if not set in @a flags.
+ *
+ * - affinity: The processor affinity of this thread. Passing
+ * CPU_MASK_ALL means "any cpu" from the allowed core affinity mask
+ * (cobalt_cpu_affinity). Passing an empty set is invalid.
+ *
+ * @param sched_class The initial scheduling class the new thread
+ * should be assigned to.
+ *
+ * @param sched_param The initial scheduling parameters to set for the
+ * new thread; @a sched_param must be valid within the context of @a
+ * sched_class.
+ *
+ * @return 0 is returned on success. Otherwise, the following error
+ * code indicates the cause of the failure:
+ *
+ * - -EINVAL is returned if @a attr->flags has invalid bits set, or @a
+ *   attr->affinity is invalid (e.g. empty).
+ *
+ * @coretags{secondary-only}
+ */
+int xnthread_init(struct xnthread *thread,
+		  const struct xnthread_init_attr *attr,
+		  struct xnsched_class *sched_class,
+		  const union xnsched_policy_param *sched_param)
+{
+	struct xnsched *sched;
+	cpumask_t affinity;
+	int ret;
+
+	if (attr->flags & ~(XNFPU | XNUSER | XNSUSP))
+		return -EINVAL;
+
+	/*
+	 * Pick an initial CPU for the new thread which is part of its
+	 * affinity mask, and therefore also part of the supported
+	 * CPUs. This CPU may change in pin_to_initial_cpu().
+	 */
+	cpumask_and(&affinity, &attr->affinity, &cobalt_cpu_affinity);
+	if (cpumask_empty(&affinity))
+		return -EINVAL;
+
+	sched = xnsched_struct(cpumask_first(&affinity));
+
+	ret = __xnthread_init(thread, attr, sched, sched_class, sched_param);
+	if (ret)
+		return ret;
+
+	trace_cobalt_thread_init(thread, attr, sched_class);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_init);
+
+/**
+ * @fn int xnthread_start(struct xnthread *thread,const struct xnthread_start_attr *attr)
+ * @brief Start a newly created thread.
+ *
+ * Starts a (newly) created thread, scheduling it for the first
+ * time. This call releases the target thread from the XNDORMANT
+ * state. This service also sets the initial mode for the new thread.
+ *
+ * @param thread The descriptor address of the started thread which
+ * must have been previously initialized by a call to xnthread_init().
+ *
+ * @param attr A pointer to an attribute block describing the
+ * execution properties of the new thread. Members of this structure
+ * are defined as follows:
+ *
+ * - mode: The initial thread mode. The following flags can be part of
+ * this bitmask:
+ *
+ *   - XNLOCK causes the thread to lock the scheduler when it starts.
+ * The target thread will have to call the xnsched_unlock()
+ * service to unlock the scheduler. A non-preemptible thread may still
+ * block, in which case, the lock is reasserted when the thread is
+ * scheduled back in.
+ *
+ *   - XNSUSP makes the thread start in a suspended state. In such a
+ * case, the thread will have to be explicitly resumed using the
+ * xnthread_resume() service for its execution to actually begin.
+ *
+ * - entry: The address of the thread's body routine. In other words,
+ * it is the thread entry point.
+ *
+ * - cookie: A user-defined opaque cookie Cobalt will pass to the
+ * emerging thread as the sole argument of its entry point.
+ *
+ * @retval 0 if @a thread could be started ;
+ *
+ * @retval -EBUSY if @a thread was not dormant or stopped ;
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int xnthread_start(struct xnthread *thread,
+		   const struct xnthread_start_attr *attr)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (!xnthread_test_state(thread, XNDORMANT)) {
+		xnlock_put_irqrestore(&nklock, s);
+		return -EBUSY;
+	}
+
+	xnthread_set_state(thread, attr->mode & (XNTHREAD_MODE_BITS | XNSUSP));
+	thread->entry = attr->entry;
+	thread->cookie = attr->cookie;
+	if (attr->mode & XNLOCK)
+		thread->lock_count = 1;
+
+	/*
+	 * A user-space thread starts immediately Cobalt-wise since we
+	 * already have an underlying Linux context for it, so we can
+	 * enlist it now to make it visible from the /proc interface.
+	 */
+	if (xnthread_test_state(thread, XNUSER))
+		enlist_new_thread(thread);
+
+	trace_cobalt_thread_start(thread);
+
+	xnthread_resume(thread, XNDORMANT);
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_start);
+
+/**
+ * @fn void xnthread_set_mode(int clrmask,int setmask)
+ * @brief Change control mode of the current thread.
+ *
+ * Change the control mode of the current thread. The control mode
+ * affects several behaviours of the Cobalt core regarding this
+ * thread.
+ *
+ * @param clrmask Clears the corresponding bits from the control mode
+ * before setmask is applied. The scheduler lock held by the current
+ * thread can be forcibly released by passing the XNLOCK bit in this
+ * mask. In this case, the lock nesting count is also reset to zero.
+ *
+ * @param setmask The new thread mode. The following flags may be set
+ * in this bitmask:
+ *
+ * - XNLOCK makes the current thread non-preemptible by other threads.
+ * Unless XNTRAPLB is also set for the thread, the latter may still
+ * block, dropping the lock temporarily, in which case, the lock will
+ * be reacquired automatically when the thread resumes execution.
+ *
+ * - XNWARN enables debugging notifications for the current thread.  A
+ * SIGDEBUG (Linux-originated) signal is sent when the following
+ * atypical or abnormal behavior is detected:
+ *
+ *    - the current thread switches to secondary mode. Such notification
+ *      comes in handy for detecting spurious relaxes.
+ *
+ *    - CONFIG_XENO_OPT_DEBUG_MUTEX_RELAXED is enabled in the kernel
+ *      configuration, and the current thread is sleeping on a Cobalt
+ *      mutex currently owned by a thread running in secondary mode,
+ *      which reveals a priority inversion.
+ *
+ *    - the current thread is about to sleep while holding a Cobalt
+ *      mutex, and CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP is enabled in the
+ *      kernel configuration. Blocking for acquiring a mutex does not
+ *      trigger such a signal though.
+ *
+ *    - the current thread has both XNTRAPLB and XNLOCK set, and
+ *      attempts to block on a Cobalt service, which would cause a
+ *      lock break.
+ *
+ * - XNTRAPLB disallows breaking the scheduler lock. In the default
+ * case, a thread which holds the scheduler lock is allowed to drop it
+ * temporarily for sleeping. If this mode bit is set, such thread
+ * would return immediately with XNBREAK set from
+ * xnthread_suspend(). If XNWARN is set for the current thread,
+ * SIGDEBUG is sent in addition to raising the break condition.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note Setting @a clrmask and @a setmask to zero leads to a nop,
+ * in which case xnthread_set_mode() returns the current mode.
+ */
+int xnthread_set_mode(int clrmask, int setmask)
+{
+	int oldmode, lock_count;
+	struct xnthread *curr;
+	spl_t s;
+
+	primary_mode_only();
+
+	xnlock_get_irqsave(&nklock, s);
+	curr = xnsched_current_thread();
+	oldmode = xnthread_get_state(curr) & XNTHREAD_MODE_BITS;
+	lock_count = curr->lock_count;
+	xnthread_clear_state(curr, clrmask & XNTHREAD_MODE_BITS);
+	xnthread_set_state(curr, setmask & XNTHREAD_MODE_BITS);
+	trace_cobalt_thread_set_mode(curr);
+
+	if (setmask & XNLOCK) {
+		if (lock_count == 0)
+			xnsched_lock();
+	} else if (clrmask & XNLOCK) {
+		if (lock_count > 0) {
+			curr->lock_count = 0;
+			xnthread_clear_localinfo(curr, XNLBALERT);
+			xnsched_run();
+		}
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	if (lock_count > 0)
+		oldmode |= XNLOCK;
+
+	return oldmode;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_mode);
+
+/**
+ * @fn void xnthread_suspend(struct xnthread *thread, int mask,xnticks_t timeout, xntmode_t timeout_mode,struct xnsynch *wchan)
+ * @brief Suspend a thread.
+ *
+ * Suspends the execution of a thread according to a given suspensive
+ * condition. This thread will not be eligible for scheduling until it
+ * all the pending suspensive conditions set by this service are
+ * removed by one or more calls to xnthread_resume().
+ *
+ * @param thread The descriptor address of the suspended thread.
+ *
+ * @param mask The suspension mask specifying the suspensive condition
+ * to add to the thread's wait mask. Possible values usable by the
+ * caller are:
+ *
+ * - XNSUSP. This flag forcibly suspends a thread, regardless of any
+ * resource to wait for. A reverse call to xnthread_resume()
+ * specifying the XNSUSP bit must be issued to remove this condition,
+ * which is cumulative with other suspension bits.@a wchan should be
+ * NULL when using this suspending mode.
+ *
+ * - XNDELAY. This flags denotes a counted delay wait (in ticks) which
+ * duration is defined by the value of the timeout parameter.
+ *
+ * - XNPEND. This flag denotes a wait for a synchronization object to
+ * be signaled. The wchan argument must points to this object. A
+ * timeout value can be passed to bound the wait. This suspending mode
+ * should not be used directly by the client interface, but rather
+ * through the xnsynch_sleep_on() call.
+ *
+ * @param timeout The timeout which may be used to limit the time the
+ * thread pends on a resource. This value is a wait time given in
+ * nanoseconds. It can either be relative, absolute monotonic, or
+ * absolute adjustable depending on @a timeout_mode.
+ *
+ * Passing XN_INFINITE @b and setting @a timeout_mode to XN_RELATIVE
+ * specifies an unbounded wait. All other values are used to
+ * initialize a watchdog timer. If the current operation mode of the
+ * system timer is oneshot and @a timeout elapses before
+ * xnthread_suspend() has completed, then the target thread will not
+ * be suspended, and this routine leads to a null effect.
+ *
+ * @param timeout_mode The mode of the @a timeout parameter. It can
+ * either be set to XN_RELATIVE, XN_ABSOLUTE, or XN_REALTIME (see also
+ * xntimer_start()).
+ *
+ * @param wchan The address of a pended resource. This parameter is
+ * used internally by the synchronization object implementation code
+ * to specify on which object the suspended thread pends. NULL is a
+ * legitimate value when this parameter does not apply to the current
+ * suspending mode (e.g. XNSUSP).
+ *
+ * @note If the target thread has received a Linux-originated signal,
+ * then this service immediately exits without suspending the thread,
+ * but raises the XNBREAK condition in its information mask.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void xnthread_suspend(struct xnthread *thread, int mask,
+		      xnticks_t timeout, xntmode_t timeout_mode,
+		      struct xnsynch *wchan)
+{
+	unsigned long oldstate;
+	struct xnsched *sched;
+	spl_t s;
+
+	/* No, you certainly do not want to suspend the root thread. */
+	XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT));
+	/* No built-in support for conjunctive wait. */
+	XENO_BUG_ON(COBALT, wchan && thread->wchan);
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_thread_suspend(thread, mask, timeout, timeout_mode, wchan);
+
+	sched = thread->sched;
+	oldstate = thread->state;
+
+	/*
+	 * If attempting to suspend a runnable thread which is pending
+	 * a forced switch to secondary mode (XNKICKED), just raise
+	 * the XNBREAK status and return immediately, except if we
+	 * are precisely doing such switch by applying XNRELAX.
+	 *
+	 * In the latter case, we also make sure to clear XNKICKED,
+	 * since we won't go through prepare_for_signal() once
+	 * relaxed.
+	 */
+	if (likely((oldstate & XNTHREAD_BLOCK_BITS) == 0)) {
+		if (likely((mask & XNRELAX) == 0)) {
+			if (xnthread_test_info(thread, XNKICKED))
+				goto abort;
+			if (thread == sched->curr &&
+			    thread->lock_count > 0 &&
+			    (oldstate & XNTRAPLB) != 0)
+				goto lock_break;
+		}
+		/*
+		 * Do not destroy the info left behind by yet unprocessed
+		 * wakeups when suspending a remote thread.
+		 */
+		if (thread == sched->curr)
+			xnthread_clear_info(thread, XNRMID|XNTIMEO|XNBREAK|
+						    XNWAKEN|XNROBBED|XNKICKED);
+	}
+
+	/*
+	 * Don't start the timer for a thread delayed indefinitely.
+	 */
+	if (timeout != XN_INFINITE || timeout_mode != XN_RELATIVE) {
+		xntimer_set_affinity(&thread->rtimer, thread->sched);
+		if (xntimer_start(&thread->rtimer, timeout, XN_INFINITE,
+				  timeout_mode)) {
+			/* (absolute) timeout value in the past, bail out. */
+			if (wchan) {
+				thread->wchan = wchan;
+				xnsynch_forget_sleeper(thread);
+			}
+			xnthread_set_info(thread, XNTIMEO);
+			goto out;
+		}
+		xnthread_set_state(thread, XNDELAY);
+	}
+
+	if (oldstate & XNREADY) {
+		xnsched_dequeue(thread);
+		xnthread_clear_state(thread, XNREADY);
+	}
+
+	xnthread_set_state(thread, mask);
+
+	/*
+	 * We must make sure that we don't clear the wait channel if a
+	 * thread is first blocked (wchan != NULL) then forcibly
+	 * suspended (wchan == NULL), since these are conjunctive
+	 * conditions.
+	 */
+	if (wchan)
+		thread->wchan = wchan;
+
+	if (likely(thread == sched->curr)) {
+		xnsched_set_resched(sched);
+		/*
+		 * Transition to secondary mode (XNRELAX) is a
+		 * separate path which is only available to
+		 * xnthread_relax(). Using __xnsched_run() there for
+		 * rescheduling allows us to break the scheduler lock
+		 * temporarily.
+		 */
+		if (unlikely(mask & XNRELAX)) {
+			pipeline_leave_oob_unlock();
+			__xnsched_run(sched);
+			return;
+		}
+		/*
+		 * If the thread is runnning on a remote CPU,
+		 * xnsched_run() will trigger the IPI as required.  In
+		 * this case, sched refers to a remote runqueue, so
+		 * make sure to always kick the rescheduling procedure
+		 * for the local one.
+		 */
+		__xnsched_run(xnsched_current());
+		goto out;
+	}
+
+	/*
+	 * Ok, this one is an interesting corner case, which requires
+	 * a bit of background first. Here, we handle the case of
+	 * suspending a _relaxed_ user shadow which is _not_ the
+	 * current thread.
+	 *
+	 *  The net effect is that we are attempting to stop the
+	 * shadow thread for Cobalt, whilst this thread is actually
+	 * running some code under the control of the Linux scheduler
+	 * (i.e. it's relaxed).
+	 *
+	 *  To make this possible, we force the target Linux task to
+	 * migrate back to the Xenomai domain by sending it a
+	 * SIGSHADOW signal the interface libraries trap for this
+	 * specific internal purpose, whose handler is expected to
+	 * call back Cobalt's migration service.
+	 *
+	 * By forcing this migration, we make sure that Cobalt
+	 * controls, hence properly stops, the target thread according
+	 * to the requested suspension condition. Otherwise, the
+	 * shadow thread in secondary mode would just keep running
+	 * into the Linux domain, thus breaking the most common
+	 * assumptions regarding suspended threads.
+	 *
+	 * We only care for threads that are not current, and for
+	 * XNSUSP, XNDELAY, XNDORMANT and XNHELD conditions, because:
+	 *
+	 * - There is no point in dealing with a relaxed thread which
+	 * is current, since personalities have to ask for primary
+	 * mode switch when processing any syscall which may block the
+	 * caller (i.e. __xn_exec_primary).
+	 *
+	 * - among all blocking bits (XNTHREAD_BLOCK_BITS), only
+	 * XNSUSP, XNDELAY, XNHELD and XNDBGSTOP may be applied by the
+	 * current thread to a non-current thread. XNPEND is always
+	 * added by the caller to its own state, XNMIGRATE, XNRELAX
+	 * and XNDBGSTOP have special semantics escaping this issue.
+	 *
+	 * We don't signal threads which are already in a dormant
+	 * state, since they are suspended by definition.
+	 */
+	if (((oldstate & (XNTHREAD_BLOCK_BITS|XNUSER)) == (XNRELAX|XNUSER)) &&
+	    (mask & (XNDELAY | XNSUSP | XNHELD)) != 0)
+		__xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HARDEN);
+out:
+	xnlock_put_irqrestore(&nklock, s);
+	return;
+
+lock_break:
+	/* NOTE: thread is current */
+	if (xnthread_test_state(thread, XNWARN) &&
+	    !xnthread_test_localinfo(thread, XNLBALERT)) {
+		xnthread_set_info(thread, XNKICKED);
+		xnthread_set_localinfo(thread, XNLBALERT);
+		__xnthread_signal(thread, SIGDEBUG, SIGDEBUG_LOCK_BREAK);
+	}
+abort:
+	if (wchan) {
+		thread->wchan = wchan;
+		xnsynch_forget_sleeper(thread);
+	}
+	xnthread_clear_info(thread, XNRMID | XNTIMEO);
+	xnthread_set_info(thread, XNBREAK);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_suspend);
+
+/**
+ * @fn void xnthread_resume(struct xnthread *thread,int mask)
+ * @brief Resume a thread.
+ *
+ * Resumes the execution of a thread previously suspended by one or
+ * more calls to xnthread_suspend(). This call removes a suspensive
+ * condition affecting the target thread. When all suspensive
+ * conditions are gone, the thread is left in a READY state at which
+ * point it becomes eligible anew for scheduling.
+ *
+ * @param thread The descriptor address of the resumed thread.
+ *
+ * @param mask The suspension mask specifying the suspensive condition
+ * to remove from the thread's wait mask. Possible values usable by
+ * the caller are:
+ *
+ * - XNSUSP. This flag removes the explicit suspension condition. This
+ * condition might be additive to the XNPEND condition.
+ *
+ * - XNDELAY. This flag removes the counted delay wait condition.
+ *
+ * - XNPEND. This flag removes the resource wait condition. If a
+ * watchdog is armed, it is automatically disarmed by this
+ * call. Unlike the two previous conditions, only the current thread
+ * can set this condition for itself, i.e. no thread can force another
+ * one to pend on a resource.
+ *
+ * When the thread is eventually resumed by one or more calls to
+ * xnthread_resume(), the caller of xnthread_suspend() in the awakened
+ * thread that suspended itself should check for the following bits in
+ * its own information mask to determine what caused its wake up:
+ *
+ * - XNRMID means that the caller must assume that the pended
+ * synchronization object has been destroyed (see xnsynch_flush()).
+ *
+ * - XNTIMEO means that the delay elapsed, or the watchdog went off
+ * before the corresponding synchronization object was signaled.
+ *
+ * - XNBREAK means that the wait has been forcibly broken by a call to
+ * xnthread_unblock().
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+void xnthread_resume(struct xnthread *thread, int mask)
+{
+	unsigned long oldstate;
+	struct xnsched *sched;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_thread_resume(thread, mask);
+
+	xntrace_pid(xnthread_host_pid(thread), xnthread_current_priority(thread));
+
+	sched = thread->sched;
+	oldstate = thread->state;
+
+	if ((oldstate & XNTHREAD_BLOCK_BITS) == 0) {
+		if (oldstate & XNREADY)
+			xnsched_dequeue(thread);
+		goto enqueue;
+	}
+
+	/* Clear the specified block bit(s) */
+	xnthread_clear_state(thread, mask);
+
+	/*
+	 * If XNDELAY was set in the clear mask, xnthread_unblock()
+	 * was called for the thread, or a timeout has elapsed. In the
+	 * latter case, stopping the timer is a no-op.
+	 */
+	if (mask & XNDELAY)
+		xntimer_stop(&thread->rtimer);
+
+	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS))
+		goto clear_wchan;
+
+	if (mask & XNDELAY) {
+		mask = xnthread_test_state(thread, XNPEND);
+		if (mask == 0)
+			goto unlock_and_exit;
+		if (thread->wchan)
+			xnsynch_forget_sleeper(thread);
+		goto recheck_state;
+	}
+
+	if (xnthread_test_state(thread, XNDELAY)) {
+		if (mask & XNPEND) {
+			/*
+			 * A resource became available to the thread.
+			 * Cancel the watchdog timer.
+			 */
+			xntimer_stop(&thread->rtimer);
+			xnthread_clear_state(thread, XNDELAY);
+		}
+		goto recheck_state;
+	}
+
+	/*
+	 * The thread is still suspended, but is no more pending on a
+	 * resource.
+	 */
+	if ((mask & XNPEND) != 0 && thread->wchan)
+		xnsynch_forget_sleeper(thread);
+
+	goto unlock_and_exit;
+
+recheck_state:
+	if (xnthread_test_state(thread, XNTHREAD_BLOCK_BITS))
+		goto unlock_and_exit;
+
+clear_wchan:
+	if ((mask & ~XNDELAY) != 0 && thread->wchan != NULL)
+		/*
+		 * If the thread was actually suspended, clear the
+		 * wait channel.  -- this allows requests like
+		 * xnthread_suspend(thread,XNDELAY,...)  not to run
+		 * the following code when the suspended thread is
+		 * woken up while undergoing a simple delay.
+		 */
+		xnsynch_forget_sleeper(thread);
+
+	if (unlikely((oldstate & mask) & XNHELD)) {
+		xnsched_requeue(thread);
+		goto ready;
+	}
+enqueue:
+	xnsched_enqueue(thread);
+ready:
+	xnthread_set_state(thread, XNREADY);
+	xnsched_set_resched(sched);
+unlock_and_exit:
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_resume);
+
+/**
+ * @fn int xnthread_unblock(struct xnthread *thread)
+ * @brief Unblock a thread.
+ *
+ * Breaks the thread out of any wait it is currently in.  This call
+ * removes the XNDELAY and XNPEND suspensive conditions previously put
+ * by xnthread_suspend() on the target thread. If all suspensive
+ * conditions are gone, the thread is left in a READY state at which
+ * point it becomes eligible anew for scheduling.
+ *
+ * @param thread The descriptor address of the unblocked thread.
+ *
+ * This call neither releases the thread from the XNSUSP, XNRELAX,
+ * XNDORMANT or XNHELD suspensive conditions.
+ *
+ * When the thread resumes execution, the XNBREAK bit is set in the
+ * unblocked thread's information mask. Unblocking a non-blocked
+ * thread is perfectly harmless.
+ *
+ * @return non-zero is returned if the thread was actually unblocked
+ * from a pending wait state, 0 otherwise.
+ *
+ * @coretags{unrestricted, might-switch}
+ */
+int xnthread_unblock(struct xnthread *thread)
+{
+	int ret = 1;
+	spl_t s;
+
+	/*
+	 * Attempt to abort an undergoing wait for the given thread.
+	 * If this state is due to an alarm that has been armed to
+	 * limit the sleeping thread's waiting time while it pends for
+	 * a resource, the corresponding XNPEND state will be cleared
+	 * by xnthread_resume() in the same move. Otherwise, this call
+	 * may abort an undergoing infinite wait for a resource (if
+	 * any).
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	trace_cobalt_thread_unblock(thread);
+
+	if (xnthread_test_state(thread, XNDELAY))
+		xnthread_resume(thread, XNDELAY);
+	else if (xnthread_test_state(thread, XNPEND))
+		xnthread_resume(thread, XNPEND);
+	else
+		ret = 0;
+
+	/*
+	 * We should not clear a previous break state if this service
+	 * is called more than once before the target thread actually
+	 * resumes, so we only set the bit here and never clear
+	 * it. However, we must not raise the XNBREAK bit if the
+	 * target thread was already awake at the time of this call,
+	 * so that downstream code does not get confused by some
+	 * "successful but interrupted syscall" condition. IOW, a
+	 * break state raised here must always trigger an error code
+	 * downstream, and an already successful syscall cannot be
+	 * marked as interrupted.
+	 */
+	if (ret)
+		xnthread_set_info(thread, XNBREAK);
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_unblock);
+
+/**
+ * @fn int xnthread_set_periodic(struct xnthread *thread,xnticks_t idate, xntmode_t timeout_mode, xnticks_t period)
+ * @brief Make a thread periodic.
+ *
+ * Make a thread periodic by programming its first release point and
+ * its period in the processor time line.  Subsequent calls to
+ * xnthread_wait_period() will delay the thread until the next
+ * periodic release point in the processor timeline is reached.
+ *
+ * @param thread The core thread to make periodic. If NULL, the
+ * current thread is assumed.
+ *
+ * @param idate The initial (absolute) date of the first release
+ * point, expressed in nanoseconds. The affected thread will be
+ * delayed by the first call to xnthread_wait_period() until this
+ * point is reached. If @a idate is equal to XN_INFINITE, the first
+ * release point is set to @a period nanoseconds after the current
+ * date. In the latter case, @a timeout_mode is not considered and can
+ * have any valid value.
+ *
+ * @param timeout_mode The mode of the @a idate parameter. It can
+ * either be set to XN_ABSOLUTE or XN_REALTIME with @a idate different
+ * from XN_INFINITE (see also xntimer_start()).
+ *
+ * @param period The period of the thread, expressed in nanoseconds.
+ * As a side-effect, passing XN_INFINITE attempts to stop the thread's
+ * periodic timer; in the latter case, the routine always exits
+ * succesfully, regardless of the previous state of this timer.
+ *
+ * @return 0 is returned upon success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned @a idate is different from XN_INFINITE and
+ * represents a date in the past.
+ *
+ * - -EINVAL is returned if @a period is different from XN_INFINITE
+ * but shorter than the scheduling latency value for the target
+ * system, as available from /proc/xenomai/latency. -EINVAL is also
+ * returned if @a timeout_mode is not compatible with @a idate, such
+ * as XN_RELATIVE with @a idate different from XN_INFINITE.
+ *
+ * - -EPERM is returned if @a thread is NULL, but the caller is not a
+ * Xenomai thread.
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnthread_set_periodic(struct xnthread *thread, xnticks_t idate,
+			  xntmode_t timeout_mode, xnticks_t period)
+{
+	int ret = 0;
+	spl_t s;
+
+	if (thread == NULL) {
+		thread = xnthread_current();
+		if (thread == NULL)
+			return -EPERM;
+	}
+		
+	xnlock_get_irqsave(&nklock, s);
+
+	if (period == XN_INFINITE) {
+		if (xntimer_running_p(&thread->ptimer))
+			xntimer_stop(&thread->ptimer);
+
+		goto unlock_and_exit;
+	}
+
+	/*
+	 * LART: detect periods which are shorter than the core clock
+	 * gravity for kernel thread timers. This can't work, caller
+	 * must have messed up arguments.
+	 */
+	if (period < xnclock_ticks_to_ns(&nkclock,
+			 xnclock_get_gravity(&nkclock, kernel))) {
+		ret = -EINVAL;
+		goto unlock_and_exit;
+	}
+
+	xntimer_set_affinity(&thread->ptimer, thread->sched);
+
+	if (idate == XN_INFINITE)
+		xntimer_start(&thread->ptimer, period, period, XN_RELATIVE);
+	else {
+		if (timeout_mode == XN_REALTIME)
+			idate -= xnclock_get_offset(xntimer_clock(&thread->ptimer));
+		else if (timeout_mode != XN_ABSOLUTE) {
+			ret = -EINVAL;
+			goto unlock_and_exit;
+		}
+		ret = xntimer_start(&thread->ptimer, idate, period,
+				    XN_ABSOLUTE);
+	}
+
+unlock_and_exit:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_periodic);
+
+/**
+ * @fn int xnthread_wait_period(unsigned long *overruns_r)
+ * @brief Wait for the next periodic release point.
+ *
+ * Make the current thread wait for the next periodic release point in
+ * the processor time line.
+ *
+ * @param overruns_r If non-NULL, @a overruns_r must be a pointer to a
+ * memory location which will be written with the count of pending
+ * overruns. This value is copied only when xnthread_wait_period()
+ * returns -ETIMEDOUT or success; the memory location remains
+ * unmodified otherwise. If NULL, this count will never be copied
+ * back.
+ *
+ * @return 0 is returned upon success; if @a overruns_r is valid, zero
+ * is copied to the pointed memory location. Otherwise:
+ *
+ * - -EWOULDBLOCK is returned if xnthread_set_periodic() has not
+ * previously been called for the calling thread.
+ *
+ * - -EINTR is returned if xnthread_unblock() has been called for the
+ * waiting thread before the next periodic release point has been
+ * reached. In this case, the overrun counter is reset too.
+ *
+ * - -ETIMEDOUT is returned if the timer has overrun, which indicates
+ * that one or more previous release points have been missed by the
+ * calling thread. If @a overruns_r is valid, the count of pending
+ * overruns is copied to the pointed memory location.
+ *
+ * @coretags{primary-only, might-switch}
+ */
+int xnthread_wait_period(unsigned long *overruns_r)
+{
+	unsigned long overruns = 0;
+	struct xnthread *thread;
+	struct xnclock *clock;
+	xnticks_t now;
+	int ret = 0;
+	spl_t s;
+
+	thread = xnthread_current();
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (unlikely(!xntimer_running_p(&thread->ptimer))) {
+		ret = -EWOULDBLOCK;
+		goto out;
+	}
+
+	trace_cobalt_thread_wait_period(thread);
+
+	clock = xntimer_clock(&thread->ptimer);
+	now = xnclock_read_raw(clock);
+	if (likely((xnsticks_t)(now - xntimer_pexpect(&thread->ptimer)) < 0)) {
+		xnthread_suspend(thread, XNDELAY, XN_INFINITE, XN_RELATIVE, NULL);
+		if (unlikely(xnthread_test_info(thread, XNBREAK))) {
+			ret = -EINTR;
+			goto out;
+		}
+
+		now = xnclock_read_raw(clock);
+	}
+
+	overruns = xntimer_get_overruns(&thread->ptimer, thread, now);
+	if (overruns) {
+		ret = -ETIMEDOUT;
+		trace_cobalt_thread_missed_period(thread);
+	}
+
+	if (likely(overruns_r != NULL))
+		*overruns_r = overruns;
+ out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_wait_period);
+
+/**
+ * @fn int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
+ * @brief Set thread time-slicing information.
+ *
+ * Update the time-slicing information for a given thread. This
+ * service enables or disables round-robin scheduling for the thread,
+ * depending on the value of @a quantum. By default, times-slicing is
+ * disabled for a new thread initialized by a call to xnthread_init().
+ *
+ * @param thread The descriptor address of the affected thread.
+ *
+ * @param quantum The time quantum assigned to the thread expressed in
+ * nanoseconds. If @a quantum is different from XN_INFINITE, the
+ * time-slice for the thread is set to that value and its current time
+ * credit is refilled (i.e. the thread is given a full time-slice to
+ * run next). Otherwise, if @a quantum equals XN_INFINITE,
+ * time-slicing is stopped for that thread.
+ *
+ * @return 0 is returned upon success. Otherwise, -EINVAL is returned
+ * if @a quantum is not XN_INFINITE and:
+ *
+ *   - the base scheduling class of the target thread does not support
+ *   time-slicing,
+ *
+ *   - @a quantum is smaller than the master clock gravity for a user
+ * thread, which denotes a spurious value.
+ *
+ * @coretags{task-unrestricted}
+ */
+int xnthread_set_slice(struct xnthread *thread, xnticks_t quantum)
+{
+	struct xnsched *sched;
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	sched = thread->sched;
+	thread->rrperiod = quantum;
+
+	if (quantum != XN_INFINITE) {
+		if (quantum <= xnclock_get_gravity(&nkclock, user) ||
+		    thread->base_class->sched_tick == NULL) {
+			xnlock_put_irqrestore(&nklock, s);
+			return -EINVAL;
+		}
+		xnthread_set_state(thread, XNRRB);
+		if (sched->curr == thread)
+			xntimer_start(&sched->rrbtimer,
+				      quantum, XN_INFINITE, XN_RELATIVE);
+	} else {
+		xnthread_clear_state(thread, XNRRB);
+		if (sched->curr == thread)
+			xntimer_stop(&sched->rrbtimer);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_slice);
+
+/**
+ * @fn void xnthread_cancel(struct xnthread *thread)
+ * @brief Cancel a thread.
+ *
+ * Request cancellation of a thread. This service forces @a thread to
+ * exit from any blocking call, then to switch to secondary mode.
+ * @a thread will terminate as soon as it reaches a cancellation
+ * point. Cancellation points are defined for the following
+ * situations:
+ *
+ * - @a thread self-cancels by a call to xnthread_cancel().
+ * - @a thread invokes a Linux syscall (user-space shadow only).
+ * - @a thread receives a Linux signal (user-space shadow only).
+ * - @a thread unblocks from a Xenomai syscall (user-space shadow only).
+ * - @a thread attempts to block on a Xenomai syscall (user-space shadow only).
+ * - @a thread explicitly calls xnthread_test_cancel().
+ *
+ * @param thread The descriptor address of the thread to terminate.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ *
+ * @note In addition to the common actions taken upon cancellation, a
+ * thread which belongs to the SCHED_WEAK class is sent a regular
+ * SIGTERM signal.
+ */
+void xnthread_cancel(struct xnthread *thread)
+{
+	spl_t s;
+
+	/* Right, so you want to kill the kernel?! */
+	XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT));
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_info(thread, XNCANCELD))
+		goto check_self_cancel;
+
+	trace_cobalt_thread_cancel(thread);
+
+	xnthread_set_info(thread, XNCANCELD);
+
+	/*
+	 * If @thread is not started yet, fake a start request,
+	 * raising the kicked condition bit to make sure it will reach
+	 * xnthread_test_cancel() on its wakeup path.
+	 */
+	if (xnthread_test_state(thread, XNDORMANT)) {
+		xnthread_set_info(thread, XNKICKED);
+		xnthread_resume(thread, XNDORMANT);
+		goto out;
+	}
+
+check_self_cancel:
+	if (xnthread_current() == thread) {
+		xnlock_put_irqrestore(&nklock, s);
+		xnthread_test_cancel();
+		/*
+		 * May return if on behalf of an IRQ handler which has
+		 * preempted @thread.
+		 */
+		return;
+	}
+
+	/*
+	 * Force the non-current thread to exit:
+	 *
+	 * - unblock a user thread, switch it to weak scheduling,
+	 * then send it SIGTERM.
+	 *
+	 * - just unblock a kernel thread, it is expected to reach a
+	 * cancellation point soon after
+	 * (i.e. xnthread_test_cancel()).
+	 */
+	if (xnthread_test_state(thread, XNUSER)) {
+		__xnthread_demote(thread);
+		__xnthread_signal(thread, SIGTERM, 0);
+	} else
+		__xnthread_kick(thread);
+out:
+	xnsched_run();
+
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_cancel);
+
+struct wait_grace_struct {
+	struct completion done;
+	struct rcu_head rcu;
+};
+
+static void grace_elapsed(struct rcu_head *head)
+{
+	struct wait_grace_struct *wgs;
+
+	wgs = container_of(head, struct wait_grace_struct, rcu);
+	complete(&wgs->done);
+}
+
+static void wait_for_rcu_grace_period(struct pid *pid)
+{
+	struct wait_grace_struct wait = {
+		.done = COMPLETION_INITIALIZER_ONSTACK(wait.done),
+	};
+	struct task_struct *p;
+
+	init_rcu_head_on_stack(&wait.rcu);
+	
+	for (;;) {
+		call_rcu(&wait.rcu, grace_elapsed);
+		wait_for_completion(&wait.done);
+		if (pid == NULL)
+			break;
+		rcu_read_lock();
+		p = pid_task(pid, PIDTYPE_PID);
+		rcu_read_unlock();
+		if (p == NULL)
+			break;
+		reinit_completion(&wait.done);
+	}
+}
+
+/**
+ * @fn void xnthread_join(struct xnthread *thread, bool uninterruptible)
+ * @brief Join with a terminated thread.
+ *
+ * This service waits for @a thread to terminate after a call to
+ * xnthread_cancel().  If that thread has already terminated or is
+ * dormant at the time of the call, then xnthread_join() returns
+ * immediately.
+ *
+ * xnthread_join() adapts to the calling context (primary or
+ * secondary), switching to secondary mode if needed for the duration
+ * of the wait. Upon return, the original runtime mode is restored,
+ * unless a Linux signal is pending.
+ *
+ * @param thread The descriptor address of the thread to join with.
+ *
+ * @param uninterruptible Boolean telling whether the service should
+ * wait for completion uninterruptible.
+ *
+ * @return 0 is returned on success. Otherwise, the following error
+ * codes indicate the cause of the failure:
+ *
+ * - -EDEADLK is returned if the current thread attempts to join
+ * itself.
+ *
+ * - -EINTR is returned if the current thread was unblocked while
+ *   waiting for @a thread to terminate.
+ *
+ * - -EBUSY indicates that another thread is already waiting for @a
+ *   thread to terminate.
+ *
+ * @coretags{task-unrestricted, might-switch}
+ */
+int xnthread_join(struct xnthread *thread, bool uninterruptible)
+{
+	struct xnthread *curr = xnthread_current();
+	int ret = 0, switched = 0;
+	struct pid *pid;
+	pid_t tpid;
+	spl_t s;
+
+	XENO_BUG_ON(COBALT, xnthread_test_state(thread, XNROOT));
+
+	if (thread == curr)
+		return -EDEADLK;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	if (xnthread_test_state(thread, XNJOINED)) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (xnthread_test_info(thread, XNDORMANT))
+		goto out;
+
+	trace_cobalt_thread_join(thread);
+
+	xnthread_set_state(thread, XNJOINED);
+	tpid = xnthread_host_pid(thread);
+
+	if (curr && !xnthread_test_state(curr, XNRELAX)) {
+		xnlock_put_irqrestore(&nklock, s);
+		xnthread_relax(0, 0);
+		switched = 1;
+	} else
+		xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Since in theory, we might be sleeping there for a long
+	 * time, we get a reference on the pid struct holding our
+	 * target, then we check for its existence upon wake up.
+	 */
+	pid = find_get_pid(tpid);
+	if (pid == NULL)
+		goto done;
+
+	/*
+	 * We have a tricky issue to deal with, which involves code
+	 * relying on the assumption that a destroyed thread will have
+	 * scheduled away from do_exit() before xnthread_join()
+	 * returns. A typical example is illustrated by the following
+	 * sequence, with a RTDM kernel task implemented in a
+	 * dynamically loaded module:
+	 *
+	 * CPU0:  rtdm_task_destroy(ktask)
+	 *           xnthread_cancel(ktask)
+	 *           xnthread_join(ktask)
+	 *        ...<back to user>..
+	 *        rmmod(module)
+	 *
+	 * CPU1:  in ktask()
+	 *        ...
+	 *        ...
+	 *          __xnthread_test_cancel()
+	 *             do_exit()
+         *                schedule()
+	 *
+	 * In such a sequence, the code on CPU0 would expect the RTDM
+	 * task to have scheduled away upon return from
+	 * rtdm_task_destroy(), so that unmapping the destroyed task
+	 * code and data memory when unloading the module is always
+	 * safe.
+	 *
+	 * To address this, the joiner first waits for the joinee to
+	 * signal completion from the Cobalt thread cleanup handler
+	 * (__xnthread_cleanup), then waits for a full RCU grace
+	 * period to have elapsed. Since the completion signal is sent
+	 * on behalf of do_exit(), we may assume that the joinee has
+	 * scheduled away before the RCU grace period ends.
+	 */
+	if (uninterruptible)
+		wait_for_completion(&thread->exited);
+	else {
+		ret = wait_for_completion_interruptible(&thread->exited);
+		if (ret < 0) {
+			put_pid(pid);
+			return -EINTR;
+		}
+	}
+
+	/* Make sure the joinee has scheduled away ultimately. */
+	wait_for_rcu_grace_period(pid);
+
+	put_pid(pid);
+done:
+	ret = 0;
+	if (switched)
+		ret = xnthread_harden();
+
+	return ret;
+out:
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_join);
+
+#ifdef CONFIG_SMP
+
+void xnthread_migrate_passive(struct xnthread *thread, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	if (thread->sched == sched)
+		return;
+
+	trace_cobalt_thread_migrate_passive(thread, xnsched_cpu(sched));
+	/*
+	 * Timer migration is postponed until the next timeout happens
+	 * for the periodic and rrb timers. The resource timer will be
+	 * moved to the right CPU next time it is armed in
+	 * xnthread_suspend().
+	 */
+	xnsched_migrate_passive(thread, sched);
+
+	xnstat_exectime_reset_stats(&thread->stat.lastperiod);
+}
+
+#endif	/* CONFIG_SMP */
+
+/**
+ * @fn int xnthread_set_schedparam(struct xnthread *thread,struct xnsched_class *sched_class,const union xnsched_policy_param *sched_param)
+ * @brief Change the base scheduling parameters of a thread.
+ *
+ * Changes the base scheduling policy and paramaters of a thread. If
+ * the thread is currently blocked, waiting in priority-pending mode
+ * (XNSYNCH_PRIO) for a synchronization object to be signaled, Cobalt
+ * will attempt to reorder the object's wait queue so that it reflects
+ * the new sleeper's priority, unless the XNSYNCH_DREORD flag has been
+ * set for the pended object.
+ *
+ * @param thread The descriptor address of the affected thread. See
+ * note.
+ *
+ * @param sched_class The new scheduling class the thread should be
+ * assigned to.
+ *
+ * @param sched_param The scheduling parameters to set for the thread;
+ * @a sched_param must be valid within the context of @a sched_class.
+ *
+ * It is absolutely required to use this service to change a thread
+ * priority, in order to have all the needed housekeeping chores
+ * correctly performed. i.e. Do *not* call xnsched_set_policy()
+ * directly or worse, change the thread.cprio field by hand in any
+ * case.
+ *
+ * @return 0 is returned on success. Otherwise, a negative error code
+ * indicates the cause of a failure that happened in the scheduling
+ * class implementation for @a sched_class. Invalid parameters passed
+ * into @a sched_param are common causes of error.
+ *
+ * @sideeffect
+ *
+ * - This service does not call the rescheduling procedure but may
+ * affect the state of the run queue for the previous and new
+ * scheduling classes.
+ *
+ * - Assigning the same scheduling class and parameters to a running
+ * or ready thread moves it to the end of the run queue, thus causing
+ * a manual round-robin, except if a priority boost is undergoing.
+ *
+ * @coretags{task-unregistred}
+ *
+ * @note The changes only apply to the Xenomai scheduling parameters
+ * for @a thread. There is no propagation/translation of such changes
+ * to the Linux scheduler for the task mated to the Xenomai target
+ * thread.
+ */
+int xnthread_set_schedparam(struct xnthread *thread,
+			    struct xnsched_class *sched_class,
+			    const union xnsched_policy_param *sched_param)
+{
+	spl_t s;
+	int ret;
+
+	xnlock_get_irqsave(&nklock, s);
+	ret = __xnthread_set_schedparam(thread, sched_class, sched_param);
+	xnlock_put_irqrestore(&nklock, s);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xnthread_set_schedparam);
+
+int __xnthread_set_schedparam(struct xnthread *thread,
+			      struct xnsched_class *sched_class,
+			      const union xnsched_policy_param *sched_param)
+{
+	int old_wprio, new_wprio, ret;
+
+	old_wprio = thread->wprio;
+
+	ret = xnsched_set_policy(thread, sched_class, sched_param);
+	if (ret)
+		return ret;
+
+	new_wprio = thread->wprio;
+
+	/*
+	 * If the thread is waiting on a synchronization object,
+	 * update its position in the corresponding wait queue, unless
+	 * 1) reordering is explicitly disabled, or 2) the (weighted)
+	 * priority has not changed (to prevent spurious round-robin
+	 * effects).
+	 */
+	if (old_wprio != new_wprio && thread->wchan &&
+	    (thread->wchan->status & (XNSYNCH_DREORD|XNSYNCH_PRIO))
+	    == XNSYNCH_PRIO)
+		xnsynch_requeue_sleeper(thread);
+	/*
+	 * We should not move the thread at the end of its priority
+	 * group, if any of these conditions is true:
+	 *
+	 * - thread is not runnable;
+	 * - thread bears the ready bit which means that xnsched_set_policy()
+	 * already reordered the run queue;
+	 * - thread currently holds the scheduler lock, so we don't want
+	 * any round-robin effect to take place;
+	 * - a priority boost is undergoing for this thread.
+	 */
+	if (!xnthread_test_state(thread, XNTHREAD_BLOCK_BITS|XNREADY|XNBOOST) &&
+	    thread->lock_count == 0)
+		xnsched_putback(thread);
+
+	xnthread_set_info(thread, XNSCHEDP);
+	/* Ask the target thread to call back if relaxed. */
+	if (xnthread_test_state(thread, XNRELAX))
+		__xnthread_signal(thread, SIGSHADOW, SIGSHADOW_ACTION_HOME);
+
+	return ret;
+}
+
+void __xnthread_test_cancel(struct xnthread *curr)
+{
+	/*
+	 * Just in case xnthread_test_cancel() is called from an IRQ
+	 * handler, in which case we may not take the exit path.
+	 *
+	 * NOTE: curr->sched is stable from our POV and can't change
+	 * under our feet.
+	 */
+	if (curr->sched->lflags & XNINIRQ)
+		return;
+
+	if (!xnthread_test_state(curr, XNRELAX))
+		xnthread_relax(0, 0);
+
+	do_exit(0);
+	/* ... won't return ... */
+	XENO_BUG(COBALT);
+}
+EXPORT_SYMBOL_GPL(__xnthread_test_cancel);
+
+/**
+ * @internal
+ * @fn int xnthread_harden(void);
+ * @brief Migrate a Linux task to the Xenomai domain.
+ *
+ * This service causes the transition of "current" from the Linux
+ * domain to Xenomai. The shadow will resume in the Xenomai domain as
+ * returning from schedule().
+ *
+ * @coretags{secondary-only, might-switch}
+ */
+int xnthread_harden(void)
+{
+	struct task_struct *p = current;
+	struct xnthread *thread;
+	int ret;
+
+	secondary_mode_only();
+
+	thread = xnthread_current();
+	if (thread == NULL)
+		return -EPERM;
+
+	if (signal_pending(p))
+		return -ERESTARTSYS;
+
+	trace_cobalt_shadow_gohard(thread);
+
+	xnthread_clear_sync_window(thread, XNRELAX);
+
+	ret = pipeline_leave_inband();
+	if (ret) {
+		xnthread_test_cancel();
+		xnthread_set_sync_window(thread, XNRELAX);
+		return ret;
+	}
+
+	/* "current" is now running on the out-of-band stage. */
+
+	xnlock_clear_irqon(&nklock);
+	xnthread_test_cancel();
+
+	trace_cobalt_shadow_hardened(thread);
+
+	/*
+	 * Recheck pending signals once again. As we block task
+	 * wakeups during the migration and handle_sigwake_event()
+	 * ignores signals until XNRELAX is cleared, any signal
+	 * between entering TASK_HARDENING and starting the migration
+	 * is just silently queued up to here.
+	 */
+	if (signal_pending(p)) {
+		xnthread_relax(!xnthread_test_state(thread, XNSSTEP),
+			       SIGDEBUG_MIGRATE_SIGNAL);
+		return -ERESTARTSYS;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_harden);
+
+struct lostage_wakeup {
+	struct pipeline_inband_work inband_work; /* Must be first. */
+	struct task_struct *task;
+};
+
+static void lostage_task_wakeup(struct pipeline_inband_work *inband_work)
+{
+	struct lostage_wakeup *rq;
+	struct task_struct *p;
+
+	rq = container_of(inband_work, struct lostage_wakeup, inband_work);
+	p = rq->task;
+
+	trace_cobalt_lostage_wakeup(p);
+
+	wake_up_process(p);
+}
+
+void __xnthread_propagate_schedparam(struct xnthread *curr)
+{
+	int kpolicy = SCHED_FIFO, kprio = curr->bprio, ret;
+	struct task_struct *p = current;
+	struct sched_param param;
+	spl_t s;
+
+	/*
+	 * Test-set race for XNSCHEDP is ok, the propagation is meant
+	 * to be done asap but not guaranteed to be carried out
+	 * immediately, and the request will remain pending until it
+	 * is eventually handled. We just have to protect against a
+	 * set-clear race.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	xnthread_clear_info(curr, XNSCHEDP);
+	xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Map our policies/priorities to the regular kernel's
+	 * (approximated).
+	 */
+	if (xnthread_test_state(curr, XNWEAK) && kprio == 0)
+		kpolicy = SCHED_NORMAL;
+	else if (kprio >= MAX_RT_PRIO)
+		kprio = MAX_RT_PRIO - 1;
+
+	if (p->policy != kpolicy || (kprio > 0 && p->rt_priority != kprio)) {
+		param.sched_priority = kprio;
+		ret = sched_setscheduler_nocheck(p, kpolicy, &param);
+		XENO_WARN_ON(COBALT, ret != 0);
+	}
+}
+
+/**
+ * @internal
+ * @fn void xnthread_relax(int notify, int reason);
+ * @brief Switch a shadow thread back to the Linux domain.
+ *
+ * This service yields the control of the running shadow back to
+ * Linux. This is obtained by suspending the shadow and scheduling a
+ * wake up call for the mated user task inside the Linux domain. The
+ * Linux task will resume on return from xnthread_suspend() on behalf
+ * of the root thread.
+ *
+ * @param notify A boolean flag indicating whether threads monitored
+ * from secondary mode switches should be sent a SIGDEBUG signal. For
+ * instance, some internal operations like task exit should not
+ * trigger such signal.
+ *
+ * @param reason The reason to report along with the SIGDEBUG signal.
+ *
+ * @coretags{primary-only, might-switch}
+ *
+ * @note "current" is valid here since the shadow runs with the
+ * properties of the Linux task.
+ */
+void xnthread_relax(int notify, int reason)
+{
+	struct task_struct *p = current;
+	struct lostage_wakeup wakework = {
+		.inband_work = PIPELINE_INBAND_WORK_INITIALIZER(wakework,
+					lostage_task_wakeup),
+		.task = p,
+	};
+	struct xnthread *thread = xnthread_current();
+	int cpu __maybe_unused, suspension;
+	kernel_siginfo_t si;
+
+	primary_mode_only();
+
+	/*
+	 * Enqueue the request to move the running shadow from the Xenomai
+	 * domain to the Linux domain.  This will cause the Linux task
+	 * to resume using the register state of the shadow thread.
+	 */
+	trace_cobalt_shadow_gorelax(reason);
+
+	/*
+	 * If you intend to change the following interrupt-free
+	 * sequence, /first/ make sure to check the special handling
+	 * of XNRELAX in xnthread_suspend() when switching out the
+	 * current thread, not to break basic assumptions we make
+	 * there.
+	 *
+	 * We disable interrupts during the migration sequence, but
+	 * xnthread_suspend() has an interrupts-on section built in.
+	 */
+	splmax();
+	trace_cobalt_lostage_request("wakeup", p);
+	pipeline_post_inband_work(&wakework);
+	/*
+	 * Grab the nklock to synchronize the Linux task state
+	 * manipulation with handle_sigwake_event. This lock will be
+	 * dropped by xnthread_suspend().
+	 */
+	xnlock_get(&nklock);
+	xnthread_run_handler_stack(thread, relax_thread);
+	suspension = pipeline_leave_oob_prepare();
+	xnthread_suspend(thread, suspension, XN_INFINITE, XN_RELATIVE, NULL);
+	splnone();
+
+	/*
+	 * Basic sanity check after an expected transition to secondary
+	 * mode.
+	 */
+	XENO_WARN(COBALT, is_primary_domain(),
+		  "xnthread_relax() failed for thread %s[%d]",
+		  thread->name, xnthread_host_pid(thread));
+
+	pipeline_leave_oob_finish();
+
+	/* Account for secondary mode switch. */
+	xnstat_counter_inc(&thread->stat.ssw);
+
+	/*
+	 * When relaxing, we check for propagating to the regular
+	 * kernel new Cobalt schedparams that might have been set for
+	 * us while we were running in primary mode.
+	 *
+	 * CAUTION: This obviously won't update the schedparams cached
+	 * by the glibc for the caller in user-space, but this is the
+	 * deal: we don't relax threads which issue
+	 * pthread_setschedparam[_ex]() from primary mode, but then
+	 * only the kernel side (Cobalt and the host kernel) will be
+	 * aware of the change, and glibc might cache obsolete
+	 * information.
+	 */
+	xnthread_propagate_schedparam(thread);
+
+	if (xnthread_test_state(thread, XNUSER) && notify) {
+		if (xnthread_test_state(thread, XNWARN)) {
+			/* Help debugging spurious relaxes. */
+			xndebug_notify_relax(thread, reason);
+			memset(&si, 0, sizeof(si));
+			si.si_signo = SIGDEBUG;
+			si.si_code = SI_QUEUE;
+			si.si_int = reason | sigdebug_marker;
+			send_sig_info(SIGDEBUG, &si, p);
+		}
+		xnsynch_detect_boosted_relax(thread);
+	}
+
+	/*
+	 * "current" is now running into the Linux domain on behalf of
+	 * the root thread.
+	 */
+	xnthread_sync_window(thread);
+
+#ifdef CONFIG_SMP
+	if (xnthread_test_localinfo(thread, XNMOVED)) {
+		xnthread_clear_localinfo(thread, XNMOVED);
+		cpu = xnsched_cpu(thread->sched);
+		set_cpus_allowed_ptr(p, cpumask_of(cpu));
+	}
+#endif
+	/*
+	 * After migration there will be no syscall restart (rather a signal
+	 * delivery).
+	 */
+	xnthread_clear_localinfo(thread, XNSYSRST);
+
+	pipeline_clear_mayday();
+
+	trace_cobalt_shadow_relaxed(thread);
+}
+EXPORT_SYMBOL_GPL(xnthread_relax);
+
+static void lostage_task_signal(struct pipeline_inband_work *inband_work)
+{
+	struct lostage_signal *rq;
+	struct task_struct *p;
+	kernel_siginfo_t si;
+	int signo, sigval;
+	spl_t s;
+
+	rq = container_of(inband_work, struct lostage_signal, inband_work);
+	/*
+	 * Revisit: I-pipe requirement. It passes a copy of the original work
+	 * struct, so retrieve the original one first in order to update is.
+	 */
+	rq = rq->self;
+
+	xnlock_get_irqsave(&nklock, s);
+
+	p = rq->task;
+	signo = rq->signo;
+	sigval = rq->sigval;
+	rq->task = NULL;
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	trace_cobalt_lostage_signal(p, signo);
+
+	if (signo == SIGSHADOW || signo == SIGDEBUG) {
+		memset(&si, '\0', sizeof(si));
+		si.si_signo = signo;
+		si.si_code = SI_QUEUE;
+		si.si_int = sigval;
+		send_sig_info(signo, &si, p);
+	} else {
+		send_sig(signo, p, 1);
+	}
+}
+
+static int force_wakeup(struct xnthread *thread) /* nklock locked, irqs off */
+{
+	int ret = 0;
+
+	if (xnthread_test_info(thread, XNKICKED))
+		return 1;
+
+	if (xnthread_unblock(thread)) {
+		xnthread_set_info(thread, XNKICKED);
+		ret = 1;
+	}
+
+	/*
+	 * CAUTION: we must NOT raise XNBREAK when clearing a forcible
+	 * block state, such as XNSUSP, XNHELD. The caller of
+	 * xnthread_suspend() we unblock shall proceed as for a normal
+	 * return, until it traverses a cancellation point if
+	 * XNCANCELD was raised earlier, or calls xnthread_suspend()
+	 * which will detect XNKICKED and act accordingly.
+	 *
+	 * Rationale: callers of xnthread_suspend() may assume that
+	 * receiving XNBREAK means that the process that motivated the
+	 * blocking did not go to completion. E.g. the wait context
+	 * (see. xnthread_prepare_wait()) was NOT posted before
+	 * xnsynch_sleep_on() returned, leaving no useful data there.
+	 * Therefore, in case only XNSUSP remains set for the thread
+	 * on entry to force_wakeup(), after XNPEND was lifted earlier
+	 * when the wait went to successful completion (i.e. no
+	 * timeout), then we want the kicked thread to know that it
+	 * did receive the requested resource, not finding XNBREAK in
+	 * its state word.
+	 *
+	 * Callers of xnthread_suspend() may inquire for XNKICKED to
+	 * detect forcible unblocks from XNSUSP, XNHELD, if they
+	 * should act upon this case specifically.
+	 */
+	if (xnthread_test_state(thread, XNSUSP|XNHELD)) {
+		xnthread_resume(thread, XNSUSP|XNHELD);
+		xnthread_set_info(thread, XNKICKED);
+	}
+
+	/*
+	 * Tricky cases:
+	 *
+	 * - a thread which was ready on entry wasn't actually
+	 * running, but nevertheless waits for the CPU in primary
+	 * mode, so we have to make sure that it will be notified of
+	 * the pending break condition as soon as it enters
+	 * xnthread_suspend() from a blocking Xenomai syscall.
+	 *
+	 * - a ready/readied thread on exit may be prevented from
+	 * running by the scheduling policy module it belongs
+	 * to. Typically, policies enforcing a runtime budget do not
+	 * block threads with no budget, but rather keep them out of
+	 * their run queue, so that ->sched_pick() won't elect
+	 * them. We tell the policy handler about the fact that we do
+	 * want such thread to run until it relaxes, whatever this
+	 * means internally for the implementation.
+	 */
+	if (xnthread_test_state(thread, XNREADY))
+		xnsched_kick(thread);
+
+	return ret;
+}
+
+void __xnthread_kick(struct xnthread *thread) /* nklock locked, irqs off */
+{
+	struct task_struct *p = xnthread_host_task(thread);
+
+	/* Thread is already relaxed -- nop. */
+	if (xnthread_test_state(thread, XNRELAX))
+		return;
+
+	/*
+	 * First, try to kick the thread out of any blocking syscall
+	 * Xenomai-wise. If that succeeds, then the thread will relax
+	 * on its return path to user-space.
+	 */
+	if (force_wakeup(thread))
+		return;
+
+	/*
+	 * If that did not work out because the thread was not blocked
+	 * (i.e. XNPEND/XNDELAY) in a syscall, then force a mayday
+	 * trap. Note that we don't want to send that thread any linux
+	 * signal, we only want to force it to switch to secondary
+	 * mode asap.
+	 *
+	 * It could happen that a thread is relaxed on a syscall
+	 * return path after it was resumed from self-suspension
+	 * (e.g. XNSUSP) then also forced to run a mayday trap right
+	 * after: this is still correct, at worst we would get a
+	 * useless mayday syscall leading to a no-op, no big deal.
+	 */
+	xnthread_set_info(thread, XNKICKED);
+
+	/*
+	 * We may send mayday signals to userland threads only.
+	 * However, no need to run a mayday trap if the current thread
+	 * kicks itself out of primary mode: it will relax on its way
+	 * back to userland via the current syscall
+	 * epilogue. Otherwise, we want that thread to enter the
+	 * mayday trap asap, to call us back for relaxing.
+	 */
+	if (thread != xnsched_current_thread() &&
+	    xnthread_test_state(thread, XNUSER))
+		pipeline_raise_mayday(p);
+}
+
+void xnthread_kick(struct xnthread *thread)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xnthread_kick(thread);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_kick);
+
+void __xnthread_demote(struct xnthread *thread) /* nklock locked, irqs off */
+{
+	struct xnsched_class *sched_class;
+	union xnsched_policy_param param;
+
+	/*
+	 * First we kick the thread out of primary mode, and have it
+	 * resume execution immediately over the regular linux
+	 * context.
+	 */
+	__xnthread_kick(thread);
+
+	/*
+	 * Then we demote it, turning that thread into a non real-time
+	 * Xenomai shadow, which still has access to Xenomai
+	 * resources, but won't compete for real-time scheduling
+	 * anymore. In effect, moving the thread to a weak scheduling
+	 * class/priority will prevent it from sticking back to
+	 * primary mode.
+	 */
+#ifdef CONFIG_XENO_OPT_SCHED_WEAK
+	param.weak.prio = 0;
+	sched_class = &xnsched_class_weak;
+#else
+	param.rt.prio = 0;
+	sched_class = &xnsched_class_rt;
+#endif
+	__xnthread_set_schedparam(thread, sched_class, &param);
+}
+
+void xnthread_demote(struct xnthread *thread)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xnthread_demote(thread);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_demote);
+
+static int get_slot_index_from_sig(int sig, int arg)
+{
+	int action;
+
+	switch (sig) {
+	case SIGDEBUG:
+		return XNTHREAD_SIGDEBUG;
+	case SIGSHADOW:
+		action = sigshadow_action(arg);
+		switch (action) {
+		case SIGSHADOW_ACTION_HARDEN:
+			return XNTHREAD_SIGSHADOW_HARDEN;
+		case SIGSHADOW_ACTION_BACKTRACE:
+			return XNTHREAD_SIGSHADOW_BACKTRACE;
+		case SIGSHADOW_ACTION_HOME:
+			return XNTHREAD_SIGSHADOW_HOME;
+		}
+		break;
+	case SIGTERM:
+		return XNTHREAD_SIGTERM;
+	}
+
+	return -1;
+}
+
+/* nklock locked, irqs off */
+void __xnthread_signal(struct xnthread *thread, int sig, int arg)
+{
+	struct lostage_signal *sigwork;
+	int slot;
+
+	if (XENO_WARN_ON(COBALT, !xnthread_test_state(thread, XNUSER)))
+		return;
+
+	slot = get_slot_index_from_sig(sig, arg);
+	if (WARN_ON_ONCE(slot < 0))
+		return;
+
+	sigwork = &thread->sigarray[slot];
+	if (sigwork->task)
+		return;
+
+	sigwork->inband_work = (struct pipeline_inband_work)
+			PIPELINE_INBAND_WORK_INITIALIZER(*sigwork,
+							 lostage_task_signal);
+	sigwork->task = xnthread_host_task(thread);
+	sigwork->signo = sig;
+	sigwork->sigval = sig == SIGDEBUG ? arg | sigdebug_marker : arg;
+	sigwork->self = sigwork; /* Revisit: I-pipe requirement */
+
+	trace_cobalt_lostage_request("signal", sigwork->task);
+
+	pipeline_post_inband_work(sigwork);
+}
+
+void xnthread_signal(struct xnthread *thread, int sig, int arg)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xnthread_signal(thread, sig, arg);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xnthread_signal);
+
+void xnthread_pin_initial(struct xnthread *thread)
+{
+	struct task_struct *p = current;
+	struct xnsched *sched;
+	int cpu;
+	spl_t s;
+
+	/*
+	 * @thread is the Xenomai extension of the current kernel
+	 * task. If the current CPU is part of the affinity mask of
+	 * this thread, pin the latter on this CPU. Otherwise pin it
+	 * to the first CPU of that mask.
+	 */
+	cpu = task_cpu(p);
+	if (!cpumask_test_cpu(cpu, &thread->affinity))
+		cpu = cpumask_first(&thread->affinity);
+
+	set_cpus_allowed_ptr(p, cpumask_of(cpu));
+	/*
+	 * @thread is still unstarted Xenomai-wise, we are precisely
+	 * in the process of mapping the current kernel task to
+	 * it. Therefore xnthread_migrate_passive() is the right way
+	 * to pin it on a real-time CPU.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+	sched = xnsched_struct(cpu);
+	xnthread_migrate_passive(thread, sched);
+	xnlock_put_irqrestore(&nklock, s);
+}
+
+/* nklock locked, irqs off */
+void xnthread_call_mayday(struct xnthread *thread, int reason)
+{
+	struct task_struct *p = xnthread_host_task(thread);
+
+	/* Mayday traps are available to userland threads only. */
+	XENO_BUG_ON(COBALT, !xnthread_test_state(thread, XNUSER));
+	xnthread_set_info(thread, XNKICKED);
+	__xnthread_signal(thread, SIGDEBUG, reason);
+	pipeline_raise_mayday(p);
+}
+EXPORT_SYMBOL_GPL(xnthread_call_mayday);
+
+int xnthread_killall(int grace, int mask)
+{
+	struct xnthread *t, *curr = xnthread_current();
+	int nrkilled = 0, nrthreads, count;
+	long ret;
+	spl_t s;
+
+	secondary_mode_only();
+
+	/*
+	 * We may hold the core lock across calls to xnthread_cancel()
+	 * provided that we won't self-cancel.
+	 */
+	xnlock_get_irqsave(&nklock, s);
+
+	nrthreads = cobalt_nrthreads;
+
+	xnsched_for_each_thread(t) {
+		if (xnthread_test_state(t, XNROOT) ||
+		    xnthread_test_state(t, mask) != mask ||
+		    t == curr)
+			continue;
+
+		if (XENO_DEBUG(COBALT))
+			printk(XENO_INFO "terminating %s[%d]\n",
+			       t->name, xnthread_host_pid(t));
+		nrkilled++;
+		xnthread_cancel(t);
+	}
+
+	xnlock_put_irqrestore(&nklock, s);
+
+	/*
+	 * Cancel then join all existing threads during the grace
+	 * period. It is the caller's responsibility to prevent more
+	 * threads to bind to the system if required, we won't make
+	 * any provision for this here.
+	 */
+	count = nrthreads - nrkilled;
+	if (XENO_DEBUG(COBALT))
+		printk(XENO_INFO "waiting for %d threads to exit\n",
+		       nrkilled);
+
+	if (grace > 0) {
+		ret = wait_event_interruptible_timeout(join_all,
+						       cobalt_nrthreads == count,
+						       grace * HZ);
+		if (ret == 0)
+			return -EAGAIN;
+	} else
+		ret = wait_event_interruptible(join_all,
+					       cobalt_nrthreads == count);
+
+	/* Wait for a full RCU grace period to expire. */
+	wait_for_rcu_grace_period(NULL);
+
+	if (XENO_DEBUG(COBALT))
+		printk(XENO_INFO "joined %d threads\n",
+		       count + nrkilled - cobalt_nrthreads);
+
+	return ret < 0 ? -EINTR : 0;
+}
+EXPORT_SYMBOL_GPL(xnthread_killall);
+
+/* Xenomai's generic personality. */
+struct xnthread_personality xenomai_personality = {
+	.name = "core",
+	.magic = -1
+};
+EXPORT_SYMBOL_GPL(xenomai_personality);
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/time.c b/kernel/xenomai-v3.2.4/kernel/cobalt/time.c
new file mode 100644
index 0000000..cb152fc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/time.c
@@ -0,0 +1,38 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm-generic/xenomai/syscall.h>
+#include <cobalt/kernel/time.h>
+#include <linux/compat.h>
+
+int cobalt_get_timespec64(struct timespec64 *ts,
+			  const struct __kernel_timespec __user *uts)
+{
+	struct __kernel_timespec kts;
+	int ret;
+
+	ret = cobalt_copy_from_user(&kts, uts, sizeof(kts));
+	if (ret)
+		return -EFAULT;
+
+	ts->tv_sec = kts.tv_sec;
+
+	/* Zero out the padding in compat mode */
+	if (in_compat_syscall())
+		kts.tv_nsec &= 0xFFFFFFFFUL;
+
+	/* In 32-bit mode, this drops the padding */
+	ts->tv_nsec = kts.tv_nsec;
+
+	return 0;
+}
+
+int cobalt_put_timespec64(const struct timespec64 *ts,
+		   struct __kernel_timespec __user *uts)
+{
+	struct __kernel_timespec kts = {
+		.tv_sec = ts->tv_sec,
+		.tv_nsec = ts->tv_nsec
+	};
+
+	return cobalt_copy_to_user(uts, &kts, sizeof(kts)) ? -EFAULT : 0;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/timer.c b/kernel/xenomai-v3.2.4/kernel/cobalt/timer.c
new file mode 100644
index 0000000..4b9cea4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/timer.c
@@ -0,0 +1,719 @@
+/*
+ * Copyright (C) 2001,2002,2003,2007,2012 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2004 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/sched.h>
+#include <pipeline/tick.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/intr.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/arith.h>
+#include <trace/events/cobalt-core.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_timer Timer services
+ *
+ * The Xenomai timer facility depends on a clock source (xnclock) for
+ * scheduling the next activation times.
+ *
+ * The core provides and depends on a monotonic clock source (nkclock)
+ * with nanosecond resolution, driving the platform timer hardware
+ * exposed by the interrupt pipeline.
+ *
+ * @{
+ */
+
+int xntimer_heading_p(struct xntimer *timer)
+{
+	struct xnsched *sched = timer->sched;
+	xntimerq_t *q;
+	xntimerh_t *h;
+
+	q = xntimer_percpu_queue(timer);
+	h = xntimerq_head(q);
+	if (h == &timer->aplink)
+		return 1;
+
+	if (sched->lflags & XNHDEFER) {
+		h = xntimerq_second(q, h);
+		if (h == &timer->aplink)
+			return 1;
+	}
+
+	return 0;
+}
+
+void xntimer_enqueue_and_program(struct xntimer *timer, xntimerq_t *q)
+{
+	struct xnsched *sched = xntimer_sched(timer);
+
+	xntimer_enqueue(timer, q);
+	if (pipeline_must_force_program_tick(sched) || xntimer_heading_p(timer)) {
+		struct xnsched *sched = xntimer_sched(timer);
+		struct xnclock *clock = xntimer_clock(timer);
+		if (sched != xnsched_current())
+			xnclock_remote_shot(clock, sched);
+		else
+			xnclock_program_shot(clock, sched);
+	}
+}
+
+/**
+ * Arm a timer.
+ *
+ * Activates a timer so that the associated timeout handler will be
+ * fired after each expiration time. A timer can be either periodic or
+ * one-shot, depending on the reload value passed to this routine. The
+ * given timer must have been previously initialized.
+ *
+ * A timer is attached to the clock specified in xntimer_init().
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @param value The date of the initial timer shot, expressed in
+ * nanoseconds.
+ *
+ * @param interval The reload value of the timer. It is a periodic
+ * interval value to be used for reprogramming the next timer shot,
+ * expressed in nanoseconds. If @a interval is equal to XN_INFINITE,
+ * the timer will not be reloaded after it has expired.
+ *
+ * @param mode The timer mode. It can be XN_RELATIVE if @a value shall
+ * be interpreted as a relative date, XN_ABSOLUTE for an absolute date
+ * based on the monotonic clock of the related time base (as returned
+ * my xnclock_read_monotonic()), or XN_REALTIME if the absolute date
+ * is based on the adjustable real-time date for the relevant clock
+ * (obtained from xnclock_read_realtime()).
+ *
+ * @return 0 is returned upon success, or -ETIMEDOUT if an absolute
+ * date in the past has been given. In such an event, the timer is
+ * nevertheless armed for the next shot in the timeline if @a interval
+ * is different from XN_INFINITE.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+int xntimer_start(struct xntimer *timer,
+		  xnticks_t value, xnticks_t interval,
+		  xntmode_t mode)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+	xntimerq_t *q = xntimer_percpu_queue(timer);
+	xnticks_t date, now, delay, period;
+	unsigned long gravity;
+	int ret = 0;
+
+	atomic_only();
+
+	trace_cobalt_timer_start(timer, value, interval, mode);
+
+	if ((timer->status & XNTIMER_DEQUEUED) == 0)
+		xntimer_dequeue(timer, q);
+
+	now = xnclock_read_raw(clock);
+
+	timer->status &= ~(XNTIMER_REALTIME | XNTIMER_FIRED | XNTIMER_PERIODIC);
+	switch (mode) {
+	case XN_RELATIVE:
+		if ((xnsticks_t)value < 0)
+			return -ETIMEDOUT;
+		date = xnclock_ns_to_ticks(clock, value) + now;
+		break;
+	case XN_REALTIME:
+		timer->status |= XNTIMER_REALTIME;
+		value -= xnclock_get_offset(clock);
+		fallthrough;
+	default: /* XN_ABSOLUTE || XN_REALTIME */
+		date = xnclock_ns_to_ticks(clock, value);
+		if ((xnsticks_t)(date - now) <= 0) {
+			if (interval == XN_INFINITE)
+				return -ETIMEDOUT;
+			/*
+			 * We are late on arrival for the first
+			 * delivery, wait for the next shot on the
+			 * periodic time line.
+			 */
+			delay = now - date;
+			period = xnclock_ns_to_ticks(clock, interval);
+			date += period * (xnarch_div64(delay, period) + 1);
+		}
+		break;
+	}
+
+	/*
+	 * To cope with the basic system latency, we apply a clock
+	 * gravity value, which is the amount of time expressed in
+	 * clock ticks by which we should anticipate the shot for any
+	 * outstanding timer. The gravity value varies with the type
+	 * of context the timer wakes up, i.e. irq handler, kernel or
+	 * user thread.
+	 */
+	gravity = xntimer_gravity(timer);
+	xntimerh_date(&timer->aplink) = date - gravity;
+	if (now >= xntimerh_date(&timer->aplink))
+		xntimerh_date(&timer->aplink) += gravity / 2;
+
+	timer->interval_ns = XN_INFINITE;
+	timer->interval = XN_INFINITE;
+	if (interval != XN_INFINITE) {
+		timer->interval_ns = interval;
+		timer->interval = xnclock_ns_to_ticks(clock, interval);
+		timer->periodic_ticks = 0;
+		timer->start_date = date;
+		timer->pexpect_ticks = 0;
+		timer->status |= XNTIMER_PERIODIC;
+	}
+
+	timer->status |= XNTIMER_RUNNING;
+	xntimer_enqueue_and_program(timer, q);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(xntimer_start);
+
+/**
+ * @fn int xntimer_stop(struct xntimer *timer)
+ *
+ * @brief Disarm a timer.
+ *
+ * This service deactivates a timer previously armed using
+ * xntimer_start(). Once disarmed, the timer can be subsequently
+ * re-armed using the latter service.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+void __xntimer_stop(struct xntimer *timer)
+{
+	struct xnclock *clock = xntimer_clock(timer);
+	xntimerq_t *q = xntimer_percpu_queue(timer);
+	struct xnsched *sched;
+	int heading = 1;
+
+	atomic_only();
+
+	trace_cobalt_timer_stop(timer);
+
+	if ((timer->status & XNTIMER_DEQUEUED) == 0) {
+		heading = xntimer_heading_p(timer);
+		xntimer_dequeue(timer, q);
+	}
+	timer->status &= ~(XNTIMER_FIRED|XNTIMER_RUNNING);
+	sched = xntimer_sched(timer);
+
+	/*
+	 * If we removed the heading timer, reprogram the next shot if
+	 * any. If the timer was running on another CPU, let it tick.
+	 */
+	if (heading && sched == xnsched_current())
+		xnclock_program_shot(clock, sched);
+}
+EXPORT_SYMBOL_GPL(__xntimer_stop);
+
+/**
+ * @fn xnticks_t xntimer_get_date(struct xntimer *timer)
+ *
+ * @brief Return the absolute expiration date.
+ *
+ * Return the next expiration date of a timer as an absolute count of
+ * nanoseconds.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The expiration date in nanoseconds. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+xnticks_t xntimer_get_date(struct xntimer *timer)
+{
+	atomic_only();
+
+	if (!xntimer_running_p(timer))
+		return XN_INFINITE;
+
+	return xnclock_ticks_to_ns(xntimer_clock(timer), xntimer_expiry(timer));
+}
+EXPORT_SYMBOL_GPL(xntimer_get_date);
+
+/**
+ * @fn xnticks_t xntimer_get_timeout(struct xntimer *timer)
+ *
+ * @brief Return the relative expiration date.
+ *
+ * This call returns the count of nanoseconds remaining until the
+ * timer expires.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @return The count of nanoseconds until expiry. The special value
+ * XN_INFINITE is returned if @a timer is currently disabled.  It
+ * might happen that the timer expires when this service runs (even if
+ * the associated handler has not been fired yet); in such a case, 1
+ * is returned.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+xnticks_t __xntimer_get_timeout(struct xntimer *timer)
+{
+	struct xnclock *clock;
+	xnticks_t expiry, now;
+
+	atomic_only();
+
+	clock = xntimer_clock(timer);
+	now = xnclock_read_raw(clock);
+	expiry = xntimer_expiry(timer);
+	if (expiry < now)
+		return 1;  /* Will elapse shortly. */
+
+	return xnclock_ticks_to_ns(clock, expiry - now);
+}
+EXPORT_SYMBOL_GPL(__xntimer_get_timeout);
+
+/**
+ * @fn void xntimer_init(struct xntimer *timer,struct xnclock *clock,void (*handler)(struct xntimer *timer), struct xnsched *sched, int flags)
+ * @brief Initialize a timer object.
+ *
+ * Creates a timer. When created, a timer is left disarmed; it must be
+ * started using xntimer_start() in order to be activated.
+ *
+ * @param timer The address of a timer descriptor the nucleus will use
+ * to store the object-specific data.  This descriptor must always be
+ * valid while the object is active therefore it must be allocated in
+ * permanent memory.
+ *
+ * @param clock The clock the timer relates to. Xenomai defines a
+ * monotonic system clock, with nanosecond resolution, named
+ * nkclock. In addition, external clocks driven by other tick sources
+ * may be created dynamically if CONFIG_XENO_OPT_EXTCLOCK is defined.
+ *
+ * @param handler The routine to call upon expiration of the timer.
+ *
+ * @param sched An optional pointer to the per-CPU scheduler slot the
+ * new timer is affine to. If non-NULL, the timer will fire on the CPU
+ * @a sched is bound to, otherwise it will fire either on the current
+ * CPU if real-time, or on the first real-time CPU.
+ *
+ * @param flags A set of flags describing the timer. A set of clock
+ * gravity hints can be passed via the @a flags argument, used for
+ * optimizing the built-in heuristics aimed at latency reduction:
+ *
+ * - XNTIMER_IGRAVITY, the timer activates a leaf timer handler.
+ * - XNTIMER_KGRAVITY, the timer activates a kernel thread.
+ * - XNTIMER_UGRAVITY, the timer activates a user-space thread.
+ *
+ * There is no limitation on the number of timers which can be
+ * created/active concurrently.
+ *
+ * @coretags{unrestricted}
+ */
+#ifdef DOXYGEN_CPP
+void xntimer_init(struct xntimer *timer, struct xnclock *clock,
+		  void (*handler)(struct xntimer *timer),
+		  struct xnsched *sched,
+		  int flags);
+#endif
+
+void __xntimer_init(struct xntimer *timer,
+		    struct xnclock *clock,
+		    void (*handler)(struct xntimer *timer),
+		    struct xnsched *sched,
+		    int flags)
+{
+	spl_t s __maybe_unused;
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	timer->clock = clock;
+#endif
+	xntimerh_init(&timer->aplink);
+	xntimerh_date(&timer->aplink) = XN_INFINITE;
+	xntimer_set_priority(timer, XNTIMER_STDPRIO);
+	timer->status = (XNTIMER_DEQUEUED|(flags & XNTIMER_INIT_MASK));
+	timer->handler = handler;
+	timer->interval_ns = 0;
+	timer->sched = NULL;
+
+	/*
+	 * Set the timer affinity, preferably to xnsched_cpu(sched) if
+	 * sched was given, CPU0 otherwise.
+	 */
+	if (sched == NULL)
+		sched = xnsched_struct(0);
+
+	xntimer_set_affinity(timer, sched);
+
+#ifdef CONFIG_XENO_OPT_STATS
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+	timer->tracker = clock;
+#endif
+	ksformat(timer->name, XNOBJECT_NAME_LEN, "%d/%s",
+		 task_pid_nr(current), current->comm);
+	xntimer_reset_stats(timer);
+	xnlock_get_irqsave(&nklock, s);
+	list_add_tail(&timer->next_stat, &clock->timerq);
+	clock->nrtimers++;
+	xnvfile_touch(&clock->timer_vfile);
+	xnlock_put_irqrestore(&nklock, s);
+#endif /* CONFIG_XENO_OPT_STATS */
+}
+EXPORT_SYMBOL_GPL(__xntimer_init);
+
+void xntimer_set_gravity(struct xntimer *timer, int gravity)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	timer->status &= ~XNTIMER_GRAVITY_MASK;
+	timer->status |= gravity;
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xntimer_set_gravity);
+
+#ifdef CONFIG_XENO_OPT_EXTCLOCK
+
+#ifdef CONFIG_XENO_OPT_STATS
+
+static void __xntimer_switch_tracking(struct xntimer *timer,
+				      struct xnclock *newclock)
+{
+	struct xnclock *oldclock = timer->tracker;
+
+	list_del(&timer->next_stat);
+	oldclock->nrtimers--;
+	xnvfile_touch(&oldclock->timer_vfile);
+	list_add_tail(&timer->next_stat, &newclock->timerq);
+	newclock->nrtimers++;
+	xnvfile_touch(&newclock->timer_vfile);
+	timer->tracker = newclock;
+}
+
+void xntimer_switch_tracking(struct xntimer *timer,
+			     struct xnclock *newclock)
+{
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	__xntimer_switch_tracking(timer, newclock);
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xntimer_switch_tracking);
+
+#else
+
+static inline
+void __xntimer_switch_tracking(struct xntimer *timer,
+			       struct xnclock *newclock)
+{ }
+
+#endif /* CONFIG_XENO_OPT_STATS */
+
+/**
+ * @brief Set the reference clock of a timer.
+ *
+ * This service changes the reference clock pacing a timer. If the
+ * clock timers are tracked, the tracking information is updated too.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @param newclock The address of a valid clock descriptor.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+void xntimer_set_clock(struct xntimer *timer,
+		       struct xnclock *newclock)
+{
+	atomic_only();
+
+	if (timer->clock != newclock) {
+		xntimer_stop(timer);
+		timer->clock = newclock;
+		/*
+		 * Since the timer was stopped, we can wait until it
+		 * is restarted for fixing its CPU affinity.
+		 */
+		__xntimer_switch_tracking(timer, newclock);
+	}
+}
+
+#endif /* CONFIG_XENO_OPT_EXTCLOCK */
+
+/**
+ * @fn void xntimer_destroy(struct xntimer *timer)
+ *
+ * @brief Release a timer object.
+ *
+ * Destroys a timer. After it has been destroyed, all resources
+ * associated with the timer have been released. The timer is
+ * automatically deactivated before deletion if active on entry.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @coretags{unrestricted}
+ */
+void xntimer_destroy(struct xntimer *timer)
+{
+	struct xnclock *clock __maybe_unused = xntimer_clock(timer);
+	spl_t s;
+
+	xnlock_get_irqsave(&nklock, s);
+	xntimer_stop(timer);
+	timer->status |= XNTIMER_KILLED;
+	timer->sched = NULL;
+#ifdef CONFIG_XENO_OPT_STATS
+	list_del(&timer->next_stat);
+	clock->nrtimers--;
+	xnvfile_touch(&clock->timer_vfile);
+#endif /* CONFIG_XENO_OPT_STATS */
+	xnlock_put_irqrestore(&nklock, s);
+}
+EXPORT_SYMBOL_GPL(xntimer_destroy);
+
+#ifdef CONFIG_SMP
+
+/**
+ * Migrate a timer.
+ *
+ * This call migrates a timer to another cpu. In order to avoid
+ * pathological cases, it must be called from the CPU to which @a
+ * timer is currently attached.
+ *
+ * @param timer The address of the timer object to be migrated.
+ *
+ * @param sched The address of the destination per-CPU scheduler
+ * slot.
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+void __xntimer_migrate(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off, sched != timer->sched */
+	struct xnclock *clock;
+	xntimerq_t *q;
+
+	trace_cobalt_timer_migrate(timer, xnsched_cpu(sched));
+
+	/*
+	 * This assertion triggers when the timer is migrated to a CPU
+	 * for which we do not expect any clock events/IRQs from the
+	 * associated clock device. If so, the timer would never fire
+	 * since clock ticks would never happen on that CPU.
+	 */
+	XENO_WARN_ON_SMP(COBALT,
+			 !cpumask_empty(&xntimer_clock(timer)->affinity) &&
+			 !cpumask_test_cpu(xnsched_cpu(sched),
+					   &xntimer_clock(timer)->affinity));
+
+	if (timer->status & XNTIMER_RUNNING) {
+		xntimer_stop(timer);
+		timer->sched = sched;
+		clock = xntimer_clock(timer);
+		q = xntimer_percpu_queue(timer);
+		xntimer_enqueue(timer, q);
+		if (xntimer_heading_p(timer))
+			xnclock_remote_shot(clock, sched);
+	} else
+		timer->sched = sched;
+}
+EXPORT_SYMBOL_GPL(__xntimer_migrate);
+
+static inline int get_clock_cpu(struct xnclock *clock, int cpu)
+{
+	/*
+	 * Check a CPU number against the possible set of CPUs
+	 * receiving events from the underlying clock device. If the
+	 * suggested CPU does not receive events from this device,
+	 * return the first one which does instead.
+	 *
+	 * A global clock device with no particular IRQ affinity may
+	 * tick on any CPU, but timers should always be queued on
+	 * CPU0.
+	 *
+	 * NOTE: we have scheduler slots initialized for all online
+	 * CPUs, we can program and receive clock ticks on any of
+	 * them. So there is no point in restricting the valid CPU set
+	 * to cobalt_cpu_affinity, which specifically refers to the
+	 * set of CPUs which may run real-time threads. Although
+	 * receiving a clock tick for waking up a thread living on a
+	 * remote CPU is not optimal since this involves IPI-signaled
+	 * rescheds, this is still a valid case.
+	 */
+	if (cpumask_empty(&clock->affinity))
+		return 0;
+
+	if (cpumask_test_cpu(cpu, &clock->affinity))
+		return cpu;
+	
+	return cpumask_first(&clock->affinity);
+}
+
+void __xntimer_set_affinity(struct xntimer *timer, struct xnsched *sched)
+{				/* nklocked, IRQs off */
+	struct xnclock *clock = xntimer_clock(timer);
+	int cpu;
+
+	/*
+	 * Figure out which CPU is best suited for managing this
+	 * timer, preferably picking xnsched_cpu(sched) if the ticking
+	 * device moving the timer clock beats on that CPU. Otherwise,
+	 * pick the first CPU from the clock affinity mask if set. If
+	 * not, the timer is backed by a global device with no
+	 * particular IRQ affinity, so it should always be queued to
+	 * CPU0.
+	 */
+	cpu = 0;
+	if (!cpumask_empty(&clock->affinity))
+		cpu = get_clock_cpu(clock, xnsched_cpu(sched));
+
+	xntimer_migrate(timer, xnsched_struct(cpu));
+}
+EXPORT_SYMBOL_GPL(__xntimer_set_affinity);
+
+#endif /* CONFIG_SMP */
+
+/**
+ * Get the count of overruns for the last tick.
+ *
+ * This service returns the count of pending overruns for the last
+ * tick of a given timer, as measured by the difference between the
+ * expected expiry date of the timer and the date @a now passed as
+ * argument.
+ *
+ * @param timer The address of a valid timer descriptor.
+ *
+ * @param waiter The thread for which the overrun count is being
+ * collected.
+ *
+ * @param now current date (as
+ * xnclock_read_raw(xntimer_clock(timer)))
+ *
+ * @return the number of overruns of @a timer at date @a now
+ *
+ * @coretags{unrestricted, atomic-entry}
+ */
+unsigned long long xntimer_get_overruns(struct xntimer *timer,
+					struct xnthread *waiter,
+					xnticks_t now)
+{
+	xnticks_t period = timer->interval;
+	unsigned long long overruns = 0;
+	xnsticks_t delta;
+	xntimerq_t *q;
+
+	atomic_only();
+
+	delta = now - xntimer_pexpect(timer);
+	if (unlikely(delta >= (xnsticks_t) period)) {
+		period = timer->interval_ns;
+		delta = xnclock_ticks_to_ns(xntimer_clock(timer), delta);
+		overruns = xnarch_div64(delta, period);
+		timer->pexpect_ticks += overruns;
+		if (xntimer_running_p(timer)) {
+			XENO_BUG_ON(COBALT, (timer->status &
+				    (XNTIMER_DEQUEUED|XNTIMER_PERIODIC))
+				    != XNTIMER_PERIODIC);
+				q = xntimer_percpu_queue(timer);
+			xntimer_dequeue(timer, q);
+			while (xntimerh_date(&timer->aplink) < now) {
+				timer->periodic_ticks++;
+				xntimer_update_date(timer);
+			}
+			xntimer_enqueue_and_program(timer, q);
+		}
+	}
+
+	timer->pexpect_ticks++;
+
+	/* Hide overruns due to the most recent ptracing session. */
+	if (xnthread_test_localinfo(waiter, XNHICCUP))
+		return 0;
+
+	return overruns;
+}
+EXPORT_SYMBOL_GPL(xntimer_get_overruns);
+
+char *xntimer_format_time(xnticks_t ns, char *buf, size_t bufsz)
+{
+	unsigned long ms, us, rem;
+	int len = (int)bufsz;
+	char *p = buf;
+	xnticks_t sec;
+
+	if (ns == 0 && bufsz > 1) {
+		strcpy(buf, "-");
+		return buf;
+	}
+
+	sec = xnclock_divrem_billion(ns, &rem);
+	us = rem / 1000;
+	ms = us / 1000;
+	us %= 1000;
+
+	if (sec) {
+		p += ksformat(p, bufsz, "%Lus", sec);
+		len = bufsz - (p - buf);
+	}
+
+	if (len > 0 && (ms || (sec && us))) {
+		p += ksformat(p, bufsz - (p - buf), "%lums", ms);
+		len = bufsz - (p - buf);
+	}
+
+	if (len > 0 && us)
+		p += ksformat(p, bufsz - (p - buf), "%luus", us);
+
+	return buf;
+}
+EXPORT_SYMBOL_GPL(xntimer_format_time);
+
+#if defined(CONFIG_XENO_OPT_TIMER_RBTREE)
+static inline bool xntimerh_is_lt(xntimerh_t *left, xntimerh_t *right)
+{
+	return left->date < right->date
+		|| (left->date == right->date && left->prio > right->prio);
+}
+
+void xntimerq_insert(xntimerq_t *q, xntimerh_t *holder)
+{
+	struct rb_node **new = &q->root.rb_node, *parent = NULL;
+
+	if (!q->head)
+		q->head = holder;
+	else if (xntimerh_is_lt(holder, q->head)) {
+		parent = &q->head->link;
+		new = &parent->rb_left;
+		q->head = holder;
+	} else while (*new) {
+		xntimerh_t *i = container_of(*new, xntimerh_t, link);
+
+		parent = *new;
+		if (xntimerh_is_lt(holder, i))
+			new = &((*new)->rb_left);
+		else
+			new = &((*new)->rb_right);
+	}
+
+	rb_link_node(&holder->link, parent, new);
+	rb_insert_color(&holder->link, &q->root);
+}
+#endif
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h
new file mode 100644
index 0000000..d98787c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-core.h
@@ -0,0 +1,908 @@
+/*
+ * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt_core
+
+#if !defined(_TRACE_COBALT_CORE_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_CORE_H
+
+#include <linux/tracepoint.h>
+#include <linux/math64.h>
+#include <cobalt/kernel/timer.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/uapi/kernel/types.h>
+
+struct xnsched;
+struct xnthread;
+struct xnsynch;
+struct xnsched_class;
+struct xnsched_quota_group;
+struct xnthread_init_attr;
+
+DECLARE_EVENT_CLASS(thread_event,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(unsigned long, state)
+		__field(unsigned long, info)
+	),
+
+	TP_fast_assign(
+		__entry->state = thread->state;
+		__entry->info = thread->info;
+		__entry->pid = xnthread_host_pid(thread);
+	),
+
+	TP_printk("pid=%d state=0x%lx info=0x%lx",
+		  __entry->pid, __entry->state, __entry->info)
+);
+
+DECLARE_EVENT_CLASS(curr_thread_event,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(unsigned long, state)
+		__field(unsigned long, info)
+	),
+
+	TP_fast_assign(
+		__entry->state = thread->state;
+		__entry->info = thread->info;
+	),
+
+	TP_printk("state=0x%lx info=0x%lx",
+		  __entry->state, __entry->info)
+);
+
+DECLARE_EVENT_CLASS(synch_wait_event,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch),
+
+	TP_STRUCT__entry(
+		__field(struct xnsynch *, synch)
+	),
+
+	TP_fast_assign(
+		__entry->synch = synch;
+	),
+
+	TP_printk("synch=%p", __entry->synch)
+);
+
+DECLARE_EVENT_CLASS(synch_post_event,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch),
+
+	TP_STRUCT__entry(
+		__field(struct xnsynch *, synch)
+	),
+
+	TP_fast_assign(
+		__entry->synch = synch;
+	),
+
+	TP_printk("synch=%p", __entry->synch)
+);
+
+DECLARE_EVENT_CLASS(irq_event,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, irq)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+	),
+
+	TP_printk("irq=%u", __entry->irq)
+);
+
+DECLARE_EVENT_CLASS(clock_event,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, irq)
+	),
+
+	TP_fast_assign(
+		__entry->irq = irq;
+	),
+
+	TP_printk("clock_irq=%u", __entry->irq)
+);
+
+DECLARE_EVENT_CLASS(timer_event,
+	TP_PROTO(struct xntimer *timer),
+	TP_ARGS(timer),
+
+	TP_STRUCT__entry(
+		__field(struct xntimer *, timer)
+	),
+
+	TP_fast_assign(
+		__entry->timer = timer;
+	),
+
+	TP_printk("timer=%p", __entry->timer)
+);
+
+DECLARE_EVENT_CLASS(registry_event,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr),
+
+	TP_STRUCT__entry(
+		__string(key, key ?: "(anon)")
+		__field(void *, addr)
+	),
+
+	TP_fast_assign(
+		__assign_str(key, key ?: "(anon)");
+		__entry->addr = addr;
+	),
+
+	TP_printk("key=%s, addr=%p", __get_str(key), __entry->addr)
+);
+
+TRACE_EVENT(cobalt_schedule,
+	TP_PROTO(struct xnsched *sched),
+	TP_ARGS(sched),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, status)
+	),
+
+	TP_fast_assign(
+		__entry->status = sched->status;
+	),
+
+	TP_printk("status=0x%lx", __entry->status)
+);
+
+TRACE_EVENT(cobalt_schedule_remote,
+	TP_PROTO(struct xnsched *sched),
+	TP_ARGS(sched),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, status)
+	),
+
+	TP_fast_assign(
+		__entry->status = sched->status;
+	),
+
+	TP_printk("status=0x%lx", __entry->status)
+);
+
+TRACE_EVENT(cobalt_switch_context,
+	TP_PROTO(struct xnthread *prev, struct xnthread *next),
+	TP_ARGS(prev, next),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, prev)
+		__string(prev_name, prev->name)
+		__field(pid_t, prev_pid)
+		__field(int, prev_prio)
+		__field(unsigned long, prev_state)
+		__field(struct xnthread *, next)
+		__string(next_name, next->name)
+		__field(pid_t, next_pid)
+		__field(int, next_prio)
+	),
+
+	TP_fast_assign(
+		__entry->prev = prev;
+		__assign_str(prev_name, prev->name);
+		__entry->prev_pid = xnthread_host_pid(prev);
+		__entry->prev_prio = xnthread_current_priority(prev);
+		__entry->prev_state = prev->state;
+		__entry->next = next;
+		__assign_str(next_name, next->name);
+		__entry->next_pid = xnthread_host_pid(next);
+		__entry->next_prio = xnthread_current_priority(next);
+	),
+
+	TP_printk("prev_name=%s prev_pid=%d prev_prio=%d prev_state=0x%lx ==> next_name=%s next_pid=%d next_prio=%d",
+		  __get_str(prev_name), __entry->prev_pid,
+		  __entry->prev_prio, __entry->prev_state,
+		  __get_str(next_name), __entry->next_pid, __entry->next_prio)
+);
+
+#ifdef CONFIG_XENO_OPT_SCHED_QUOTA
+
+TRACE_EVENT(cobalt_schedquota_refill,
+	TP_PROTO(int dummy),
+	TP_ARGS(dummy),
+
+	TP_STRUCT__entry(
+		__field(int, dummy)
+	),
+
+	TP_fast_assign(
+		(void)dummy;
+	),
+
+	TP_printk("%s", "")
+);
+
+DECLARE_EVENT_CLASS(schedquota_group_event,
+	TP_PROTO(struct xnsched_quota_group *tg),
+	TP_ARGS(tg),
+
+	TP_STRUCT__entry(
+		__field(int, tgid)
+	),
+
+	TP_fast_assign(
+		__entry->tgid = tg->tgid;
+	),
+
+	TP_printk("tgid=%d",
+		  __entry->tgid)
+);
+
+DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_create_group,
+	TP_PROTO(struct xnsched_quota_group *tg),
+	TP_ARGS(tg)
+);
+
+DEFINE_EVENT(schedquota_group_event, cobalt_schedquota_destroy_group,
+	TP_PROTO(struct xnsched_quota_group *tg),
+	TP_ARGS(tg)
+);
+
+TRACE_EVENT(cobalt_schedquota_set_limit,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 int percent,
+		 int peak_percent),
+	TP_ARGS(tg, percent, peak_percent),
+
+	TP_STRUCT__entry(
+		__field(int, tgid)
+		__field(int, percent)
+		__field(int, peak_percent)
+	),
+
+	TP_fast_assign(
+		__entry->tgid = tg->tgid;
+		__entry->percent = percent;
+		__entry->peak_percent = peak_percent;
+	),
+
+	TP_printk("tgid=%d percent=%d peak_percent=%d",
+		  __entry->tgid, __entry->percent, __entry->peak_percent)
+);
+
+DECLARE_EVENT_CLASS(schedquota_thread_event,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 struct xnthread *thread),
+	TP_ARGS(tg, thread),
+
+	TP_STRUCT__entry(
+		__field(int, tgid)
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+	),
+
+	TP_fast_assign(
+		__entry->tgid = tg->tgid;
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+	),
+
+	TP_printk("tgid=%d thread=%p pid=%d",
+		  __entry->tgid, __entry->thread, __entry->pid)
+);
+
+DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_add_thread,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 struct xnthread *thread),
+	TP_ARGS(tg, thread)
+);
+
+DEFINE_EVENT(schedquota_thread_event, cobalt_schedquota_remove_thread,
+	TP_PROTO(struct xnsched_quota_group *tg,
+		 struct xnthread *thread),
+	TP_ARGS(tg, thread)
+);
+
+#endif /* CONFIG_XENO_OPT_SCHED_QUOTA */
+
+TRACE_EVENT(cobalt_thread_init,
+	TP_PROTO(struct xnthread *thread,
+		 const struct xnthread_init_attr *attr,
+		 struct xnsched_class *sched_class),
+	TP_ARGS(thread, attr, sched_class),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__string(thread_name, thread->name)
+		__string(class_name, sched_class->name)
+		__field(unsigned long, flags)
+		__field(int, cprio)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__assign_str(thread_name, thread->name);
+		__entry->flags = attr->flags;
+		__assign_str(class_name, sched_class->name);
+		__entry->cprio = thread->cprio;
+	),
+
+	TP_printk("thread=%p name=%s flags=0x%lx class=%s prio=%d",
+		   __entry->thread, __get_str(thread_name), __entry->flags,
+		   __get_str(class_name), __entry->cprio)
+);
+
+TRACE_EVENT(cobalt_thread_suspend,
+	TP_PROTO(struct xnthread *thread, unsigned long mask, xnticks_t timeout,
+		 xntmode_t timeout_mode, struct xnsynch *wchan),
+	TP_ARGS(thread, mask, timeout, timeout_mode, wchan),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(unsigned long, mask)
+		__field(xnticks_t, timeout)
+		__field(xntmode_t, timeout_mode)
+		__field(struct xnsynch *, wchan)
+	),
+
+	TP_fast_assign(
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->mask = mask;
+		__entry->timeout = timeout;
+		__entry->timeout_mode = timeout_mode;
+		__entry->wchan = wchan;
+	),
+
+	TP_printk("pid=%d mask=0x%lx timeout=%Lu timeout_mode=%d wchan=%p",
+		  __entry->pid, __entry->mask,
+		  __entry->timeout, __entry->timeout_mode, __entry->wchan)
+);
+
+TRACE_EVENT(cobalt_thread_resume,
+	TP_PROTO(struct xnthread *thread, unsigned long mask),
+	TP_ARGS(thread, mask),
+
+	TP_STRUCT__entry(
+		__string(name, thread->name)
+		__field(pid_t, pid)
+		__field(unsigned long, mask)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, thread->name);
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->mask = mask;
+	),
+
+	TP_printk("name=%s pid=%d mask=0x%lx",
+		  __get_str(name), __entry->pid, __entry->mask)
+);
+
+TRACE_EVENT(cobalt_thread_fault,
+	TP_PROTO(unsigned long ip, unsigned int type),
+	TP_ARGS(ip, type),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, ip)
+		__field(unsigned int, type)
+	),
+
+	TP_fast_assign(
+		__entry->ip = ip;
+		__entry->type = type;
+	),
+
+	TP_printk("ip=%#lx type=%#x",
+		  __entry->ip, __entry->type)
+);
+
+TRACE_EVENT(cobalt_thread_set_current_prio,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+		__field(int, cprio)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->cprio = xnthread_current_priority(thread);
+	),
+
+	TP_printk("thread=%p pid=%d prio=%d",
+		  __entry->thread, __entry->pid, __entry->cprio)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_start,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_cancel,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_join,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(thread_event, cobalt_thread_unblock,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_thread_wait_period,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_thread_missed_period,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_thread_set_mode,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_thread_migrate,
+	TP_PROTO(unsigned int cpu),
+	TP_ARGS(cpu),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("cpu=%u", __entry->cpu)
+);
+
+TRACE_EVENT(cobalt_thread_migrate_passive,
+	TP_PROTO(struct xnthread *thread, unsigned int cpu),
+	TP_ARGS(thread, cpu),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("thread=%p pid=%d cpu=%u",
+		  __entry->thread, __entry->pid, __entry->cpu)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_gohard,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_watchdog_signal,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_hardened,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+#define cobalt_print_relax_reason(reason)				\
+	__print_symbolic(reason,					\
+			 { SIGDEBUG_UNDEFINED,		"undefined" },	\
+			 { SIGDEBUG_MIGRATE_SIGNAL,	"signal" },	\
+			 { SIGDEBUG_MIGRATE_SYSCALL,	"syscall" },	\
+			 { SIGDEBUG_MIGRATE_FAULT,	"fault" })
+
+TRACE_EVENT(cobalt_shadow_gorelax,
+	TP_PROTO(int reason),
+	TP_ARGS(reason),
+
+	TP_STRUCT__entry(
+		__field(int, reason)
+	),
+
+	TP_fast_assign(
+		__entry->reason = reason;
+	),
+
+	TP_printk("reason=%s", cobalt_print_relax_reason(__entry->reason))
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_relaxed,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_entry,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_shadow_map,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, thread)
+		__field(pid_t, pid)
+		__field(int, prio)
+	),
+
+	TP_fast_assign(
+		__entry->thread = thread;
+		__entry->pid = xnthread_host_pid(thread);
+		__entry->prio = xnthread_base_priority(thread);
+	),
+
+	TP_printk("thread=%p pid=%d prio=%d",
+		  __entry->thread, __entry->pid, __entry->prio)
+);
+
+DEFINE_EVENT(curr_thread_event, cobalt_shadow_unmap,
+	TP_PROTO(struct xnthread *thread),
+	TP_ARGS(thread)
+);
+
+TRACE_EVENT(cobalt_lostage_request,
+        TP_PROTO(const char *type, struct task_struct *task),
+	TP_ARGS(type, task),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__array(char, comm, TASK_COMM_LEN)
+		__field(const char *, type)
+	),
+
+	TP_fast_assign(
+		__entry->type = type;
+		__entry->pid = task_pid_nr(task);
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("request=%s pid=%d comm=%s",
+		  __entry->type, __entry->pid, __entry->comm)
+);
+
+TRACE_EVENT(cobalt_lostage_wakeup,
+	TP_PROTO(struct task_struct *task),
+	TP_ARGS(task),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__array(char, comm, TASK_COMM_LEN)
+	),
+
+	TP_fast_assign(
+		__entry->pid = task_pid_nr(task);
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("pid=%d comm=%s",
+		  __entry->pid, __entry->comm)
+);
+
+TRACE_EVENT(cobalt_lostage_signal,
+	TP_PROTO(struct task_struct *task, int sig),
+	TP_ARGS(task, sig),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__array(char, comm, TASK_COMM_LEN)
+		__field(int, sig)
+	),
+
+	TP_fast_assign(
+		__entry->pid = task_pid_nr(task);
+		__entry->sig = sig;
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+	),
+
+	TP_printk("pid=%d comm=%s sig=%d",
+		  __entry->pid, __entry->comm, __entry->sig)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_entry,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_exit,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_attach,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_detach,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_enable,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(irq_event, cobalt_irq_disable,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(clock_event, cobalt_clock_entry,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(clock_event, cobalt_clock_exit,
+	TP_PROTO(unsigned int irq),
+	TP_ARGS(irq)
+);
+
+DEFINE_EVENT(timer_event, cobalt_timer_stop,
+	TP_PROTO(struct xntimer *timer),
+	TP_ARGS(timer)
+);
+
+DEFINE_EVENT(timer_event, cobalt_timer_expire,
+	TP_PROTO(struct xntimer *timer),
+	TP_ARGS(timer)
+);
+
+#define cobalt_print_timer_mode(mode)			\
+	__print_symbolic(mode,				\
+			 { XN_RELATIVE, "rel" },	\
+			 { XN_ABSOLUTE, "abs" },	\
+			 { XN_REALTIME, "rt" })
+
+TRACE_EVENT(cobalt_timer_start,
+	TP_PROTO(struct xntimer *timer, xnticks_t value, xnticks_t interval,
+		 xntmode_t mode),
+	TP_ARGS(timer, value, interval, mode),
+
+	TP_STRUCT__entry(
+		__field(struct xntimer *, timer)
+#ifdef CONFIG_XENO_OPT_STATS
+		__string(name, timer->name)
+#endif
+		__field(xnticks_t, value)
+		__field(xnticks_t, interval)
+		__field(xntmode_t, mode)
+	),
+
+	TP_fast_assign(
+		__entry->timer = timer;
+#ifdef CONFIG_XENO_OPT_STATS
+		__assign_str(name, timer->name);
+#endif
+		__entry->value = value;
+		__entry->interval = interval;
+		__entry->mode = mode;
+	),
+
+	TP_printk("timer=%p(%s) value=%Lu interval=%Lu mode=%s",
+		  __entry->timer,
+#ifdef CONFIG_XENO_OPT_STATS
+		  __get_str(name),
+#else
+		  "(anon)",
+#endif
+		  __entry->value, __entry->interval,
+		  cobalt_print_timer_mode(__entry->mode))
+);
+
+#ifdef CONFIG_SMP
+
+TRACE_EVENT(cobalt_timer_migrate,
+	TP_PROTO(struct xntimer *timer, unsigned int cpu),
+	TP_ARGS(timer, cpu),
+
+	TP_STRUCT__entry(
+		__field(struct xntimer *, timer)
+		__field(unsigned int, cpu)
+	),
+
+	TP_fast_assign(
+		__entry->timer = timer;
+		__entry->cpu = cpu;
+	),
+
+	TP_printk("timer=%p cpu=%u",
+		  __entry->timer, __entry->cpu)
+);
+
+#endif /* CONFIG_SMP */
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_sleepon,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_try_acquire,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_wait_event, cobalt_synch_acquire,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_release,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_wakeup_many,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_flush,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(synch_post_event, cobalt_synch_forget,
+	TP_PROTO(struct xnsynch *synch),
+	TP_ARGS(synch)
+);
+
+DEFINE_EVENT(registry_event, cobalt_registry_enter,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr)
+);
+
+DEFINE_EVENT(registry_event, cobalt_registry_remove,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr)
+);
+
+DEFINE_EVENT(registry_event, cobalt_registry_unlink,
+	TP_PROTO(const char *key, void *addr),
+	TP_ARGS(key, addr)
+);
+
+TRACE_EVENT(cobalt_tick_shot,
+	TP_PROTO(s64 delta),
+	TP_ARGS(delta),
+
+	TP_STRUCT__entry(
+		__field(u64, secs)
+		__field(u32, nsecs)
+		__field(s64, delta)
+	),
+
+	TP_fast_assign(
+		__entry->delta = delta;
+		__entry->secs = div_u64_rem(trace_clock_local() + delta,
+					    NSEC_PER_SEC, &__entry->nsecs);
+	),
+
+	TP_printk("next tick at %Lu.%06u (delay: %Ld us)",
+		  (unsigned long long)__entry->secs,
+		  __entry->nsecs / 1000, div_s64(__entry->delta, 1000))
+);
+
+TRACE_EVENT(cobalt_trace,
+	TP_PROTO(const char *msg),
+	TP_ARGS(msg),
+	TP_STRUCT__entry(
+		__string(msg, msg)
+	),
+	TP_fast_assign(
+		__assign_str(msg, msg);
+	),
+	TP_printk("%s", __get_str(msg))
+);
+
+TRACE_EVENT(cobalt_trace_longval,
+	TP_PROTO(int id, u64 val),
+	TP_ARGS(id, val),
+	TP_STRUCT__entry(
+		__field(int, id)
+		__field(u64, val)
+	),
+	TP_fast_assign(
+		__entry->id = id;
+		__entry->val = val;
+	),
+	TP_printk("id=%#x, v=%llu", __entry->id, __entry->val)
+);
+
+TRACE_EVENT(cobalt_trace_pid,
+	TP_PROTO(pid_t pid, int prio),
+	TP_ARGS(pid, prio),
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(int, prio)
+	),
+	TP_fast_assign(
+		__entry->pid = pid;
+		__entry->prio = prio;
+	),
+	TP_printk("pid=%d, prio=%d", __entry->pid, __entry->prio)
+);
+
+TRACE_EVENT(cobalt_latpeak,
+	TP_PROTO(int latmax_ns),
+	TP_ARGS(latmax_ns),
+	TP_STRUCT__entry(
+		 __field(int, latmax_ns)
+	),
+	TP_fast_assign(
+		__entry->latmax_ns = latmax_ns;
+	),
+	TP_printk("** latency peak: %d.%.3d us **",
+		  __entry->latmax_ns / 1000,
+		  __entry->latmax_ns % 1000)
+);
+
+/* Basically cobalt_trace() + trigger point */
+TRACE_EVENT(cobalt_trigger,
+	TP_PROTO(const char *issuer),
+	TP_ARGS(issuer),
+	TP_STRUCT__entry(
+		__string(issuer, issuer)
+	),
+	TP_fast_assign(
+		__assign_str(issuer, issuer);
+	),
+	TP_printk("%s", __get_str(issuer))
+);
+
+#endif /* _TRACE_COBALT_CORE_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cobalt-core
+#include <trace/define_trace.h>
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h
new file mode 100644
index 0000000..2bc004d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-posix.h
@@ -0,0 +1,1186 @@
+/*
+ * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt_posix
+
+#if !defined(_TRACE_COBALT_POSIX_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_POSIX_H
+
+#include <linux/tracepoint.h>
+#include <linux/trace_seq.h>
+#include <xenomai/posix/cond.h>
+#include <xenomai/posix/mqueue.h>
+#include <xenomai/posix/event.h>
+
+#define __timespec_fields(__name)				\
+	__field(time64_t, tv_sec_##__name)			\
+	__field(long, tv_nsec_##__name)
+
+#define __assign_timespec(__to, __from)				\
+	do {							\
+		__entry->tv_sec_##__to = (__from)->tv_sec;	\
+		__entry->tv_nsec_##__to = (__from)->tv_nsec;	\
+	} while (0)
+
+#define __timespec_args(__name)					\
+	(long long)__entry->tv_sec_##__name, __entry->tv_nsec_##__name
+
+#ifdef CONFIG_IA32_EMULATION
+#define __sc_compat(__name)	, { sc_cobalt_##__name + __COBALT_IA32_BASE, "compat-" #__name }
+#else
+#define __sc_compat(__name)
+#endif
+
+#define __cobalt_symbolic_syscall(__name)				\
+	{ sc_cobalt_##__name, #__name }					\
+	__sc_compat(__name)						\
+
+#define __cobalt_syscall_name(__nr)					\
+	__print_symbolic((__nr),					\
+		__cobalt_symbolic_syscall(bind),			\
+		__cobalt_symbolic_syscall(thread_create),		\
+		__cobalt_symbolic_syscall(thread_getpid),		\
+		__cobalt_symbolic_syscall(thread_setmode),		\
+		__cobalt_symbolic_syscall(thread_setname),		\
+		__cobalt_symbolic_syscall(thread_join),			\
+		__cobalt_symbolic_syscall(thread_kill),			\
+		__cobalt_symbolic_syscall(thread_setschedparam_ex),	\
+		__cobalt_symbolic_syscall(thread_getschedparam_ex),	\
+		__cobalt_symbolic_syscall(thread_setschedprio),		\
+		__cobalt_symbolic_syscall(thread_getstat),		\
+		__cobalt_symbolic_syscall(sem_init),			\
+		__cobalt_symbolic_syscall(sem_destroy),			\
+		__cobalt_symbolic_syscall(sem_post),			\
+		__cobalt_symbolic_syscall(sem_wait),			\
+		__cobalt_symbolic_syscall(sem_trywait),			\
+		__cobalt_symbolic_syscall(sem_getvalue),		\
+		__cobalt_symbolic_syscall(sem_open),			\
+		__cobalt_symbolic_syscall(sem_close),			\
+		__cobalt_symbolic_syscall(sem_unlink),			\
+		__cobalt_symbolic_syscall(sem_timedwait),		\
+		__cobalt_symbolic_syscall(sem_inquire),			\
+		__cobalt_symbolic_syscall(sem_broadcast_np),		\
+		__cobalt_symbolic_syscall(clock_getres),		\
+		__cobalt_symbolic_syscall(clock_gettime),		\
+		__cobalt_symbolic_syscall(clock_settime),		\
+		__cobalt_symbolic_syscall(clock_nanosleep),		\
+		__cobalt_symbolic_syscall(mutex_init),			\
+		__cobalt_symbolic_syscall(mutex_check_init),		\
+		__cobalt_symbolic_syscall(mutex_destroy),		\
+		__cobalt_symbolic_syscall(mutex_lock),			\
+		__cobalt_symbolic_syscall(mutex_timedlock),		\
+		__cobalt_symbolic_syscall(mutex_trylock),		\
+		__cobalt_symbolic_syscall(mutex_unlock),		\
+		__cobalt_symbolic_syscall(cond_init),			\
+		__cobalt_symbolic_syscall(cond_destroy),		\
+		__cobalt_symbolic_syscall(cond_wait_prologue),		\
+		__cobalt_symbolic_syscall(cond_wait_epilogue),		\
+		__cobalt_symbolic_syscall(mq_open),			\
+		__cobalt_symbolic_syscall(mq_close),			\
+		__cobalt_symbolic_syscall(mq_unlink),			\
+		__cobalt_symbolic_syscall(mq_getattr),			\
+		__cobalt_symbolic_syscall(mq_timedsend),		\
+		__cobalt_symbolic_syscall(mq_timedreceive),		\
+		__cobalt_symbolic_syscall(mq_notify),			\
+		__cobalt_symbolic_syscall(sched_minprio),		\
+		__cobalt_symbolic_syscall(sched_maxprio),		\
+		__cobalt_symbolic_syscall(sched_weightprio),		\
+		__cobalt_symbolic_syscall(sched_yield),			\
+		__cobalt_symbolic_syscall(sched_setscheduler_ex),	\
+		__cobalt_symbolic_syscall(sched_getscheduler_ex),	\
+		__cobalt_symbolic_syscall(sched_setconfig_np),		\
+		__cobalt_symbolic_syscall(sched_getconfig_np),		\
+		__cobalt_symbolic_syscall(timer_create),		\
+		__cobalt_symbolic_syscall(timer_delete),		\
+		__cobalt_symbolic_syscall(timer_settime),		\
+		__cobalt_symbolic_syscall(timer_gettime),		\
+		__cobalt_symbolic_syscall(timer_getoverrun),		\
+		__cobalt_symbolic_syscall(timerfd_create),		\
+		__cobalt_symbolic_syscall(timerfd_settime),		\
+		__cobalt_symbolic_syscall(timerfd_gettime),		\
+		__cobalt_symbolic_syscall(sigwait),			\
+		__cobalt_symbolic_syscall(sigwaitinfo),			\
+		__cobalt_symbolic_syscall(sigtimedwait),		\
+		__cobalt_symbolic_syscall(sigpending),			\
+		__cobalt_symbolic_syscall(kill),			\
+		__cobalt_symbolic_syscall(sigqueue),			\
+		__cobalt_symbolic_syscall(monitor_init),		\
+		__cobalt_symbolic_syscall(monitor_destroy),		\
+		__cobalt_symbolic_syscall(monitor_enter),		\
+		__cobalt_symbolic_syscall(monitor_wait),		\
+		__cobalt_symbolic_syscall(monitor_sync),		\
+		__cobalt_symbolic_syscall(monitor_exit),		\
+		__cobalt_symbolic_syscall(event_init),			\
+		__cobalt_symbolic_syscall(event_wait),			\
+		__cobalt_symbolic_syscall(event_sync),			\
+		__cobalt_symbolic_syscall(event_destroy),		\
+		__cobalt_symbolic_syscall(event_inquire),		\
+		__cobalt_symbolic_syscall(open),			\
+		__cobalt_symbolic_syscall(socket),			\
+		__cobalt_symbolic_syscall(close),			\
+		__cobalt_symbolic_syscall(ioctl),			\
+		__cobalt_symbolic_syscall(read),			\
+		__cobalt_symbolic_syscall(write),			\
+		__cobalt_symbolic_syscall(recvmsg),			\
+		__cobalt_symbolic_syscall(sendmsg),			\
+		__cobalt_symbolic_syscall(mmap),			\
+		__cobalt_symbolic_syscall(select),			\
+		__cobalt_symbolic_syscall(fcntl),			\
+		__cobalt_symbolic_syscall(migrate),			\
+		__cobalt_symbolic_syscall(archcall),			\
+		__cobalt_symbolic_syscall(trace),			\
+		__cobalt_symbolic_syscall(corectl),			\
+		__cobalt_symbolic_syscall(get_current),			\
+		__cobalt_symbolic_syscall(backtrace),			\
+		__cobalt_symbolic_syscall(serialdbg),			\
+		__cobalt_symbolic_syscall(extend),			\
+		__cobalt_symbolic_syscall(ftrace_puts),			\
+		__cobalt_symbolic_syscall(recvmmsg),			\
+		__cobalt_symbolic_syscall(sendmmsg),			\
+		__cobalt_symbolic_syscall(clock_adjtime),		\
+		__cobalt_symbolic_syscall(sem_timedwait64),		\
+		__cobalt_symbolic_syscall(clock_gettime64),		\
+		__cobalt_symbolic_syscall(clock_settime64),		\
+		__cobalt_symbolic_syscall(clock_nanosleep64),		\
+		__cobalt_symbolic_syscall(clock_getres64),		\
+		__cobalt_symbolic_syscall(clock_adjtime64),		\
+		__cobalt_symbolic_syscall(mutex_timedlock64),		\
+		__cobalt_symbolic_syscall(mq_timedsend64),  		\
+		__cobalt_symbolic_syscall(mq_timedreceive64),		\
+		__cobalt_symbolic_syscall(sigtimedwait64),		\
+		__cobalt_symbolic_syscall(monitor_wait64),		\
+		__cobalt_symbolic_syscall(event_wait64),		\
+		__cobalt_symbolic_syscall(recvmmsg64))
+
+DECLARE_EVENT_CLASS(cobalt_syscall_entry,
+	TP_PROTO(unsigned int nr),
+	TP_ARGS(nr),
+
+	TP_STRUCT__entry(
+		__field(unsigned int, nr)
+	),
+
+	TP_fast_assign(
+		__entry->nr = nr;
+	),
+
+	TP_printk("syscall=%s", __cobalt_syscall_name(__entry->nr))
+);
+
+DECLARE_EVENT_CLASS(cobalt_syscall_exit,
+	TP_PROTO(long result),
+	TP_ARGS(result),
+
+	TP_STRUCT__entry(
+		__field(long, result)
+	),
+
+	TP_fast_assign(
+		__entry->result = result;
+	),
+
+	TP_printk("result=%ld", __entry->result)
+);
+
+#define cobalt_print_sched_policy(__policy)			\
+	__print_symbolic(__policy,				\
+			 {SCHED_NORMAL, "normal"},		\
+			 {SCHED_FIFO, "fifo"},			\
+			 {SCHED_RR, "rr"},			\
+			 {SCHED_TP, "tp"},			\
+			 {SCHED_QUOTA, "quota"},		\
+			 {SCHED_SPORADIC, "sporadic"},		\
+			 {SCHED_COBALT, "cobalt"},		\
+			 {SCHED_WEAK, "weak"})
+
+const char *cobalt_trace_parse_sched_params(struct trace_seq *, int,
+					    struct sched_param_ex *);
+
+#define __parse_sched_params(policy, params)			\
+	cobalt_trace_parse_sched_params(p, policy,		\
+					(struct sched_param_ex *)(params))
+
+DECLARE_EVENT_CLASS(cobalt_posix_schedparam,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex),
+
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__field(int, policy)
+		__dynamic_array(char, param_ex, sizeof(struct sched_param_ex))
+	),
+
+	TP_fast_assign(
+		__entry->pth = pth;
+		__entry->policy = policy;
+		memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex));
+	),
+
+	TP_printk("pth=%p policy=%s param={ %s }",
+		  (void *)__entry->pth,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __parse_sched_params(__entry->policy,
+				       __get_dynamic_array(param_ex))
+	)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_scheduler,
+	TP_PROTO(pid_t pid, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pid, policy, param_ex),
+
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+		__field(int, policy)
+		__dynamic_array(char, param_ex, sizeof(struct sched_param_ex))
+	),
+
+	TP_fast_assign(
+		__entry->pid = pid;
+		__entry->policy = policy;
+		memcpy(__get_dynamic_array(param_ex), param_ex, sizeof(*param_ex));
+	),
+
+	TP_printk("pid=%d policy=%s param={ %s }",
+		  __entry->pid,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __parse_sched_params(__entry->policy,
+				       __get_dynamic_array(param_ex))
+	)
+);
+
+DECLARE_EVENT_CLASS(cobalt_void,
+	TP_PROTO(int dummy),
+	TP_ARGS(dummy),
+	TP_STRUCT__entry(
+		__field(int, dummy)
+	),
+	TP_fast_assign(
+		(void)dummy;
+	),
+	TP_printk("%s", "")
+);
+
+DEFINE_EVENT(cobalt_syscall_entry, cobalt_head_sysentry,
+	TP_PROTO(unsigned int nr),
+	TP_ARGS(nr)
+);
+
+DEFINE_EVENT(cobalt_syscall_exit, cobalt_head_sysexit,
+	TP_PROTO(long result),
+	TP_ARGS(result)
+);
+
+DEFINE_EVENT(cobalt_syscall_entry, cobalt_root_sysentry,
+	TP_PROTO(unsigned int nr),
+	TP_ARGS(nr)
+);
+
+DEFINE_EVENT(cobalt_syscall_exit, cobalt_root_sysexit,
+	TP_PROTO(long result),
+	TP_ARGS(result)
+);
+
+DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_create,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex)
+);
+
+DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_setschedparam,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex)
+);
+
+DEFINE_EVENT(cobalt_posix_schedparam, cobalt_pthread_getschedparam,
+	TP_PROTO(unsigned long pth, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pth, policy, param_ex)
+);
+
+TRACE_EVENT(cobalt_pthread_setschedprio,
+	TP_PROTO(unsigned long pth, int prio),
+	TP_ARGS(pth, prio),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__field(int, prio)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__entry->prio = prio;
+	),
+	TP_printk("pth=%p prio=%d", (void *)__entry->pth, __entry->prio)
+);
+
+#define cobalt_print_thread_mode(__mode)			\
+	__print_flags(__mode, "|",				\
+		      {PTHREAD_WARNSW, "warnsw"},		\
+		      {PTHREAD_LOCK_SCHED, "lock"},		\
+		      {PTHREAD_DISABLE_LOCKBREAK, "nolockbreak"})
+
+TRACE_EVENT(cobalt_pthread_setmode,
+	TP_PROTO(int clrmask, int setmask),
+	TP_ARGS(clrmask, setmask),
+	TP_STRUCT__entry(
+		__field(int, clrmask)
+		__field(int, setmask)
+	),
+	TP_fast_assign(
+		__entry->clrmask = clrmask;
+		__entry->setmask = setmask;
+	),
+	TP_printk("clrmask=%#x(%s) setmask=%#x(%s)",
+		  __entry->clrmask, cobalt_print_thread_mode(__entry->clrmask),
+		  __entry->setmask, cobalt_print_thread_mode(__entry->setmask))
+);
+
+TRACE_EVENT(cobalt_pthread_setname,
+	TP_PROTO(unsigned long pth, const char *name),
+	TP_ARGS(pth, name),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__assign_str(name, name);
+	),
+	TP_printk("pth=%p name=%s", (void *)__entry->pth, __get_str(name))
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_pid,
+	TP_PROTO(pid_t pid),
+	TP_ARGS(pid),
+	TP_STRUCT__entry(
+		__field(pid_t, pid)
+	),
+	TP_fast_assign(
+		__entry->pid = pid;
+	),
+	TP_printk("pid=%d", __entry->pid)
+);
+
+DEFINE_EVENT(cobalt_posix_pid, cobalt_pthread_stat,
+	TP_PROTO(pid_t pid),
+	TP_ARGS(pid)
+);
+
+TRACE_EVENT(cobalt_pthread_kill,
+	TP_PROTO(unsigned long pth, int sig),
+	TP_ARGS(pth, sig),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__field(int, sig)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__entry->sig = sig;
+	),
+	TP_printk("pth=%p sig=%d", (void *)__entry->pth, __entry->sig)
+);
+
+TRACE_EVENT(cobalt_pthread_join,
+	TP_PROTO(unsigned long pth),
+	TP_ARGS(pth),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+	),
+	TP_printk("pth=%p", (void *)__entry->pth)
+);
+
+TRACE_EVENT(cobalt_pthread_pid,
+	TP_PROTO(unsigned long pth),
+	TP_ARGS(pth),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+	),
+	TP_printk("pth=%p", (void *)__entry->pth)
+);
+
+TRACE_EVENT(cobalt_pthread_extend,
+	TP_PROTO(unsigned long pth, const char *name),
+	TP_ARGS(pth, name),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__assign_str(name, name);
+	),
+	TP_printk("pth=%p +personality=%s", (void *)__entry->pth, __get_str(name))
+);
+
+TRACE_EVENT(cobalt_pthread_restrict,
+	TP_PROTO(unsigned long pth, const char *name),
+	TP_ARGS(pth, name),
+	TP_STRUCT__entry(
+		__field(unsigned long, pth)
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__entry->pth = pth;
+		__assign_str(name, name);
+	),
+	TP_printk("pth=%p -personality=%s", (void *)__entry->pth, __get_str(name))
+);
+
+DEFINE_EVENT(cobalt_void, cobalt_pthread_yield,
+	TP_PROTO(int dummy),
+	TP_ARGS(dummy)
+);
+
+TRACE_EVENT(cobalt_sched_setconfig,
+	TP_PROTO(int cpu, int policy, size_t len),
+	TP_ARGS(cpu, policy, len),
+	TP_STRUCT__entry(
+		__field(int, cpu)
+		__field(int, policy)
+		__field(size_t, len)
+	),
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->policy = policy;
+		__entry->len = len;
+	),
+	TP_printk("cpu=%d policy=%d(%s) len=%zu",
+		  __entry->cpu, __entry->policy,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __entry->len)
+);
+
+TRACE_EVENT(cobalt_sched_get_config,
+	TP_PROTO(int cpu, int policy, size_t rlen),
+	TP_ARGS(cpu, policy, rlen),
+	TP_STRUCT__entry(
+		__field(int, cpu)
+		__field(int, policy)
+		__field(ssize_t, rlen)
+	),
+	TP_fast_assign(
+		__entry->cpu = cpu;
+		__entry->policy = policy;
+		__entry->rlen = rlen;
+	),
+	TP_printk("cpu=%d policy=%d(%s) rlen=%Zd",
+		  __entry->cpu, __entry->policy,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __entry->rlen)
+);
+
+DEFINE_EVENT(cobalt_posix_scheduler, cobalt_sched_setscheduler,
+	TP_PROTO(pid_t pid, int policy,
+		 const struct sched_param_ex *param_ex),
+	TP_ARGS(pid, policy, param_ex)
+);
+
+DEFINE_EVENT(cobalt_posix_pid, cobalt_sched_getscheduler,
+	TP_PROTO(pid_t pid),
+	TP_ARGS(pid)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_prio_bound,
+	TP_PROTO(int policy, int prio),
+	TP_ARGS(policy, prio),
+	TP_STRUCT__entry(
+		__field(int, policy)
+		__field(int, prio)
+	),
+	TP_fast_assign(
+		__entry->policy = policy;
+		__entry->prio = prio;
+	),
+	TP_printk("policy=%d(%s) prio=%d",
+		  __entry->policy,
+		  cobalt_print_sched_policy(__entry->policy),
+		  __entry->prio)
+);
+
+DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_min_prio,
+	TP_PROTO(int policy, int prio),
+	TP_ARGS(policy, prio)
+);
+
+DEFINE_EVENT(cobalt_posix_prio_bound, cobalt_sched_max_prio,
+	TP_PROTO(int policy, int prio),
+	TP_ARGS(policy, prio)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_sem,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle),
+	TP_STRUCT__entry(
+		__field(xnhandle_t, handle)
+	),
+	TP_fast_assign(
+		__entry->handle = handle;
+	),
+	TP_printk("sem=%#x", __entry->handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_wait,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_trywait,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_timedwait,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_post,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_destroy,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_broadcast,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_inquire,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+TRACE_EVENT(cobalt_psem_getvalue,
+	TP_PROTO(xnhandle_t handle, int value),
+	TP_ARGS(handle, value),
+	TP_STRUCT__entry(
+		__field(xnhandle_t, handle)
+		__field(int, value)
+	),
+	TP_fast_assign(
+		__entry->handle = handle;
+		__entry->value = value;
+	),
+	TP_printk("sem=%#x value=%d", __entry->handle, __entry->value)
+);
+
+#define cobalt_print_sem_flags(__flags)				\
+  	__print_flags(__flags, "|",				\
+			 {SEM_FIFO, "fifo"},			\
+			 {SEM_PULSE, "pulse"},			\
+			 {SEM_PSHARED, "pshared"},		\
+			 {SEM_REPORT, "report"},		\
+			 {SEM_WARNDEL, "warndel"},		\
+			 {SEM_RAWCLOCK, "rawclock"},		\
+			 {SEM_NOBUSYDEL, "nobusydel"})
+
+TRACE_EVENT(cobalt_psem_init,
+	TP_PROTO(const char *name, xnhandle_t handle,
+		 int flags, unsigned int value),
+	TP_ARGS(name, handle, flags, value),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(xnhandle_t, handle)
+		__field(int, flags)
+		__field(unsigned int, value)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->handle = handle;
+		__entry->flags = flags;
+		__entry->value = value;
+	),
+	TP_printk("sem=%#x(%s) flags=%#x(%s) value=%u",
+		  __entry->handle,
+		  __get_str(name),
+		  __entry->flags,
+		  cobalt_print_sem_flags(__entry->flags),
+		  __entry->value)
+);
+
+TRACE_EVENT(cobalt_psem_init_failed,
+	TP_PROTO(const char *name, int flags, unsigned int value, int status),
+	TP_ARGS(name, flags, value, status),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, flags)
+		__field(unsigned int, value)
+		__field(int, status)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->flags = flags;
+		__entry->value = value;
+		__entry->status = status;
+	),
+	TP_printk("name=%s flags=%#x(%s) value=%u error=%d",
+		  __get_str(name),
+		  __entry->flags,
+		  cobalt_print_sem_flags(__entry->flags),
+		  __entry->value, __entry->status)
+);
+
+#define cobalt_print_oflags(__flags)		\
+	__print_flags(__flags,  "|", 		\
+		      {O_RDONLY, "rdonly"},	\
+		      {O_WRONLY, "wronly"},	\
+		      {O_RDWR, "rdwr"},		\
+		      {O_CREAT, "creat"},	\
+		      {O_EXCL, "excl"},		\
+		      {O_DIRECT, "direct"},	\
+		      {O_NONBLOCK, "nonblock"},	\
+		      {O_TRUNC, "trunc"})
+
+TRACE_EVENT(cobalt_psem_open,
+	TP_PROTO(const char *name, xnhandle_t handle,
+		 int oflags, mode_t mode, unsigned int value),
+	TP_ARGS(name, handle, oflags, mode, value),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(xnhandle_t, handle)
+		__field(int, oflags)
+		__field(mode_t, mode)
+		__field(unsigned int, value)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->handle = handle;
+		__entry->oflags = oflags;
+		if (oflags & O_CREAT) {
+			__entry->mode = mode;
+			__entry->value = value;
+		} else {
+			__entry->mode = 0;
+			__entry->value = 0;
+		}
+	),
+	TP_printk("named_sem=%#x=(%s) oflags=%#x(%s) mode=%o value=%u",
+		  __entry->handle, __get_str(name),
+		  __entry->oflags, cobalt_print_oflags(__entry->oflags),
+		  __entry->mode, __entry->value)
+);
+
+TRACE_EVENT(cobalt_psem_open_failed,
+	TP_PROTO(const char *name, int oflags, mode_t mode,
+		 unsigned int value, int status),
+	TP_ARGS(name, oflags, mode, value, status),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, oflags)
+		__field(mode_t, mode)
+		__field(unsigned int, value)
+		__field(int, status)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->oflags = oflags;
+		__entry->status = status;
+		if (oflags & O_CREAT) {
+			__entry->mode = mode;
+			__entry->value = value;
+		} else {
+			__entry->mode = 0;
+			__entry->value = 0;
+		}
+	),
+	TP_printk("named_sem=%s oflags=%#x(%s) mode=%o value=%u error=%d",
+		  __get_str(name),
+		  __entry->oflags, cobalt_print_oflags(__entry->oflags),
+		  __entry->mode, __entry->value, __entry->status)
+);
+
+DEFINE_EVENT(cobalt_posix_sem, cobalt_psem_close,
+	TP_PROTO(xnhandle_t handle),
+	TP_ARGS(handle)
+);
+
+TRACE_EVENT(cobalt_psem_unlink,
+	TP_PROTO(const char *name),
+	TP_ARGS(name),
+	TP_STRUCT__entry(
+		__string(name, name)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+	TP_printk("name=%s", __get_str(name))
+);
+
+DECLARE_EVENT_CLASS(cobalt_clock_timespec,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *val),
+	TP_ARGS(clk_id, val),
+
+	TP_STRUCT__entry(
+		__field(clockid_t, clk_id)
+		__timespec_fields(val)
+	),
+
+	TP_fast_assign(
+		__entry->clk_id = clk_id;
+		__assign_timespec(val, val);
+	),
+
+	TP_printk("clock_id=%d timeval=(%lld.%09ld)",
+		  __entry->clk_id,
+		  __timespec_args(val)
+	)
+);
+
+DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_getres,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *res),
+	TP_ARGS(clk_id, res)
+);
+
+DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_gettime,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *time),
+	TP_ARGS(clk_id, time)
+);
+
+DEFINE_EVENT(cobalt_clock_timespec, cobalt_clock_settime,
+	TP_PROTO(clockid_t clk_id, const struct timespec64 *time),
+	TP_ARGS(clk_id, time)
+);
+
+TRACE_EVENT(cobalt_clock_adjtime,
+	TP_PROTO(clockid_t clk_id, struct __kernel_timex *tx),
+	TP_ARGS(clk_id, tx),
+
+	TP_STRUCT__entry(
+		__field(clockid_t, clk_id)
+		__field(struct __kernel_timex *, tx)
+	),
+
+	TP_fast_assign(
+		__entry->clk_id = clk_id;
+		__entry->tx = tx;
+	),
+
+	TP_printk("clock_id=%d timex=%p",
+		  __entry->clk_id,
+		  __entry->tx
+	)
+);
+
+#define cobalt_print_timer_flags(__flags)			\
+	__print_flags(__flags, "|",				\
+		      {TIMER_ABSTIME, "TIMER_ABSTIME"})
+
+TRACE_EVENT(cobalt_clock_nanosleep,
+	TP_PROTO(clockid_t clk_id, int flags, const struct timespec64 *time),
+	TP_ARGS(clk_id, flags, time),
+
+	TP_STRUCT__entry(
+		__field(clockid_t, clk_id)
+		__field(int, flags)
+		__timespec_fields(time)
+	),
+
+	TP_fast_assign(
+		__entry->clk_id = clk_id;
+		__entry->flags = flags;
+		__assign_timespec(time, time);
+	),
+
+	TP_printk("clock_id=%d flags=%#x(%s) rqt=(%lld.%09ld)",
+		  __entry->clk_id,
+		  __entry->flags, cobalt_print_timer_flags(__entry->flags),
+		  __timespec_args(time)
+	)
+);
+
+DECLARE_EVENT_CLASS(cobalt_clock_ident,
+	TP_PROTO(const char *name, clockid_t clk_id),
+	TP_ARGS(name, clk_id),
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(clockid_t, clk_id)
+	),
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->clk_id = clk_id;
+	),
+	TP_printk("name=%s, id=%#x", __get_str(name), __entry->clk_id)
+);
+
+DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_register,
+	TP_PROTO(const char *name, clockid_t clk_id),
+	TP_ARGS(name, clk_id)
+);
+
+DEFINE_EVENT(cobalt_clock_ident, cobalt_clock_deregister,
+	TP_PROTO(const char *name, clockid_t clk_id),
+	TP_ARGS(name, clk_id)
+);
+
+#define cobalt_print_clock(__clk_id)					\
+	__print_symbolic(__clk_id,					\
+			 {CLOCK_MONOTONIC, "CLOCK_MONOTONIC"},		\
+			 {CLOCK_MONOTONIC_RAW, "CLOCK_MONOTONIC_RAW"},	\
+			 {CLOCK_REALTIME, "CLOCK_REALTIME"})
+
+TRACE_EVENT(cobalt_cond_init,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd,
+		 const struct cobalt_condattr *attr),
+	TP_ARGS(u_cnd, attr),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+		__field(clockid_t, clk_id)
+		__field(int, pshared)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+		__entry->clk_id = attr->clock;
+		__entry->pshared = attr->pshared;
+	),
+	TP_printk("cond=%p attr={ .clock=%s, .pshared=%d }",
+		  __entry->u_cnd,
+		  cobalt_print_clock(__entry->clk_id),
+		  __entry->pshared)
+);
+
+TRACE_EVENT(cobalt_cond_destroy,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd),
+	TP_ARGS(u_cnd),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+	),
+	TP_printk("cond=%p", __entry->u_cnd)
+);
+
+TRACE_EVENT(cobalt_cond_timedwait,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd,
+		 const struct cobalt_mutex_shadow __user *u_mx,
+		 const struct timespec64 *timeout),
+	TP_ARGS(u_cnd, u_mx, timeout),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+		__field(const struct cobalt_mutex_shadow __user *, u_mx)
+		__timespec_fields(timeout)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+		__entry->u_mx = u_mx;
+		__assign_timespec(timeout, timeout);
+	),
+	TP_printk("cond=%p, mutex=%p, timeout=(%lld.%09ld)",
+		  __entry->u_cnd, __entry->u_mx, __timespec_args(timeout))
+);
+
+TRACE_EVENT(cobalt_cond_wait,
+	TP_PROTO(const struct cobalt_cond_shadow __user *u_cnd,
+		 const struct cobalt_mutex_shadow __user *u_mx),
+	TP_ARGS(u_cnd, u_mx),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_cond_shadow __user *, u_cnd)
+		__field(const struct cobalt_mutex_shadow __user *, u_mx)
+	),
+	TP_fast_assign(
+		__entry->u_cnd = u_cnd;
+		__entry->u_mx = u_mx;
+	),
+	TP_printk("cond=%p, mutex=%p",
+		  __entry->u_cnd, __entry->u_mx)
+);
+
+TRACE_EVENT(cobalt_mq_open,
+	TP_PROTO(const char *name, int oflags, mode_t mode),
+	TP_ARGS(name, oflags, mode),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+		__field(int, oflags)
+		__field(mode_t, mode)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+		__entry->oflags = oflags;
+		__entry->mode = (oflags & O_CREAT) ? mode : 0;
+	),
+
+	TP_printk("name=%s oflags=%#x(%s) mode=%o",
+		  __get_str(name),
+		  __entry->oflags, cobalt_print_oflags(__entry->oflags),
+		  __entry->mode)
+);
+
+TRACE_EVENT(cobalt_mq_notify,
+	TP_PROTO(mqd_t mqd, const struct sigevent *sev),
+	TP_ARGS(mqd, sev),
+
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(int, signo)
+	),
+
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->signo = sev && sev->sigev_notify != SIGEV_NONE ?
+			sev->sigev_signo : 0;
+	),
+
+	TP_printk("mqd=%d signo=%d",
+		  __entry->mqd, __entry->signo)
+);
+
+TRACE_EVENT(cobalt_mq_close,
+	TP_PROTO(mqd_t mqd),
+	TP_ARGS(mqd),
+
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+	),
+
+	TP_fast_assign(
+		__entry->mqd = mqd;
+	),
+
+	TP_printk("mqd=%d", __entry->mqd)
+);
+
+TRACE_EVENT(cobalt_mq_unlink,
+	TP_PROTO(const char *name),
+	TP_ARGS(name),
+
+	TP_STRUCT__entry(
+		__string(name, name)
+	),
+
+	TP_fast_assign(
+		__assign_str(name, name);
+	),
+
+	TP_printk("name=%s", __get_str(name))
+);
+
+TRACE_EVENT(cobalt_mq_send,
+	TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len,
+		 unsigned int prio),
+	TP_ARGS(mqd, u_buf, len, prio),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(const void __user *, u_buf)
+		__field(size_t, len)
+		__field(unsigned int, prio)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->u_buf = u_buf;
+		__entry->len = len;
+		__entry->prio = prio;
+	),
+	TP_printk("mqd=%d buf=%p len=%zu prio=%u",
+		  __entry->mqd, __entry->u_buf, __entry->len,
+		  __entry->prio)
+);
+
+TRACE_EVENT(cobalt_mq_timedreceive,
+	TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len,
+		 const struct timespec64 *timeout),
+	TP_ARGS(mqd, u_buf, len, timeout),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(const void __user *, u_buf)
+		__field(size_t, len)
+		__timespec_fields(timeout)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->u_buf = u_buf;
+		__entry->len = len;
+		__assign_timespec(timeout, timeout);
+	),
+	TP_printk("mqd=%d buf=%p len=%zu timeout=(%lld.%09ld)",
+		  __entry->mqd, __entry->u_buf, __entry->len,
+		  __timespec_args(timeout))
+);
+
+TRACE_EVENT(cobalt_mq_receive,
+	TP_PROTO(mqd_t mqd, const void __user *u_buf, size_t len),
+	TP_ARGS(mqd, u_buf, len),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(const void __user *, u_buf)
+		__field(size_t, len)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->u_buf = u_buf;
+		__entry->len = len;
+	),
+	TP_printk("mqd=%d buf=%p len=%zu",
+		  __entry->mqd, __entry->u_buf, __entry->len)
+);
+
+DECLARE_EVENT_CLASS(cobalt_posix_mqattr,
+	TP_PROTO(mqd_t mqd, const struct mq_attr *attr),
+	TP_ARGS(mqd, attr),
+	TP_STRUCT__entry(
+		__field(mqd_t, mqd)
+		__field(long, flags)
+		__field(long, curmsgs)
+		__field(long, msgsize)
+		__field(long, maxmsg)
+	),
+	TP_fast_assign(
+		__entry->mqd = mqd;
+		__entry->flags = attr->mq_flags;
+		__entry->curmsgs = attr->mq_curmsgs;
+		__entry->msgsize = attr->mq_msgsize;
+		__entry->maxmsg = attr->mq_maxmsg;
+	),
+	TP_printk("mqd=%d flags=%#lx(%s) curmsgs=%ld msgsize=%ld maxmsg=%ld",
+		  __entry->mqd,
+		  __entry->flags, cobalt_print_oflags(__entry->flags),
+		  __entry->curmsgs,
+		  __entry->msgsize,
+		  __entry->maxmsg
+	)
+);
+
+DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_getattr,
+	TP_PROTO(mqd_t mqd, const struct mq_attr *attr),
+	TP_ARGS(mqd, attr)
+);
+
+DEFINE_EVENT(cobalt_posix_mqattr, cobalt_mq_setattr,
+	TP_PROTO(mqd_t mqd, const struct mq_attr *attr),
+	TP_ARGS(mqd, attr)
+);
+
+#define cobalt_print_evflags(__flags)			\
+	__print_flags(__flags,  "|",			\
+		      {COBALT_EVENT_SHARED, "shared"},	\
+		      {COBALT_EVENT_PRIO, "prio"})
+
+TRACE_EVENT(cobalt_event_init,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event,
+		 unsigned long value, int flags),
+	TP_ARGS(u_event, value, flags),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+		__field(unsigned long, value)
+		__field(int, flags)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+		__entry->value = value;
+		__entry->flags = flags;
+	),
+	TP_printk("event=%p value=%lu flags=%#x(%s)",
+		  __entry->u_event, __entry->value,
+		  __entry->flags, cobalt_print_evflags(__entry->flags))
+);
+
+#define cobalt_print_evmode(__mode)			\
+	__print_symbolic(__mode,			\
+			 {COBALT_EVENT_ANY, "any"},	\
+			 {COBALT_EVENT_ALL, "all"})
+
+TRACE_EVENT(cobalt_event_timedwait,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event,
+		 unsigned long bits, int mode,
+		 const struct timespec64 *timeout),
+	TP_ARGS(u_event, bits, mode, timeout),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+		__field(unsigned long, bits)
+		__field(int, mode)
+		__timespec_fields(timeout)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+		__entry->bits = bits;
+		__entry->mode = mode;
+		__assign_timespec(timeout, timeout);
+	),
+	TP_printk("event=%p bits=%#lx mode=%#x(%s) timeout=(%lld.%09ld)",
+		  __entry->u_event, __entry->bits, __entry->mode,
+		  cobalt_print_evmode(__entry->mode),
+		  __timespec_args(timeout))
+);
+
+TRACE_EVENT(cobalt_event_wait,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event,
+		 unsigned long bits, int mode),
+	TP_ARGS(u_event, bits, mode),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+		__field(unsigned long, bits)
+		__field(int, mode)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+		__entry->bits = bits;
+		__entry->mode = mode;
+	),
+	TP_printk("event=%p bits=%#lx mode=%#x(%s)",
+		  __entry->u_event, __entry->bits, __entry->mode,
+		  cobalt_print_evmode(__entry->mode))
+);
+
+DECLARE_EVENT_CLASS(cobalt_event_ident,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event),
+	TP_STRUCT__entry(
+		__field(const struct cobalt_event_shadow __user *, u_event)
+	),
+	TP_fast_assign(
+		__entry->u_event = u_event;
+	),
+	TP_printk("event=%p", __entry->u_event)
+);
+
+DEFINE_EVENT(cobalt_event_ident, cobalt_event_destroy,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event)
+);
+
+DEFINE_EVENT(cobalt_event_ident, cobalt_event_sync,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event)
+);
+
+DEFINE_EVENT(cobalt_event_ident, cobalt_event_inquire,
+	TP_PROTO(const struct cobalt_event_shadow __user *u_event),
+	TP_ARGS(u_event)
+);
+
+#endif /* _TRACE_COBALT_POSIX_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cobalt-posix
+#include <trace/define_trace.h>
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h
new file mode 100644
index 0000000..91b6390
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/trace/cobalt-rtdm.h
@@ -0,0 +1,554 @@
+/*
+ * Copyright (C) 2014 Jan Kiszka <jan.kiszka@siemens.com>.
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM cobalt_rtdm
+
+#if !defined(_TRACE_COBALT_RTDM_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_COBALT_RTDM_H
+
+#include <linux/tracepoint.h>
+#include <linux/mman.h>
+#include <linux/sched.h>
+
+struct rtdm_fd;
+struct rtdm_event;
+struct rtdm_sem;
+struct rtdm_mutex;
+struct xnthread;
+struct rtdm_device;
+struct rtdm_dev_context;
+struct _rtdm_mmap_request;
+
+DECLARE_EVENT_CLASS(fd_event,
+	TP_PROTO(struct rtdm_fd *fd, int ufd),
+	TP_ARGS(fd, ufd),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+	),
+
+	TP_fast_assign(
+		__entry->dev = rtdm_fd_to_context(fd)->device;
+		__entry->ufd = ufd;
+	),
+
+	TP_printk("device=%p fd=%d",
+		  __entry->dev, __entry->ufd)
+);
+
+DECLARE_EVENT_CLASS(fd_request,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd, unsigned long arg),
+	TP_ARGS(task, fd, ufd, arg),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN)
+		__field(pid_t, pid)
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+		__field(unsigned long, arg)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+		__entry->pid = task_pid_nr(task);
+		__entry->dev = rtdm_fd_to_context(fd)->device;
+		__entry->ufd = ufd;
+		__entry->arg = arg;
+	),
+
+	TP_printk("device=%p fd=%d arg=%#lx pid=%d comm=%s",
+		  __entry->dev, __entry->ufd, __entry->arg,
+		  __entry->pid, __entry->comm)
+);
+
+DECLARE_EVENT_CLASS(fd_request_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd, int status),
+	TP_ARGS(task, fd, ufd, status),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN)
+		__field(pid_t, pid)
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+		__entry->pid = task_pid_nr(task);
+		__entry->dev =
+			!IS_ERR(fd) ? rtdm_fd_to_context(fd)->device : NULL;
+		__entry->ufd = ufd;
+	),
+
+	TP_printk("device=%p fd=%d pid=%d comm=%s",
+		  __entry->dev, __entry->ufd, __entry->pid, __entry->comm)
+);
+
+DECLARE_EVENT_CLASS(task_op,
+	TP_PROTO(struct xnthread *task),
+	TP_ARGS(task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+	),
+
+	TP_printk("task %p(%s)", __entry->task, __get_str(task_name))
+);
+
+DECLARE_EVENT_CLASS(event_op,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_event *, ev)
+	),
+
+	TP_fast_assign(
+		__entry->ev = ev;
+	),
+
+	TP_printk("event=%p", __entry->ev)
+);
+
+DECLARE_EVENT_CLASS(sem_op,
+	TP_PROTO(struct rtdm_sem *sem),
+	TP_ARGS(sem),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_sem *, sem)
+	),
+
+	TP_fast_assign(
+		__entry->sem = sem;
+	),
+
+	TP_printk("sem=%p", __entry->sem)
+);
+
+DECLARE_EVENT_CLASS(mutex_op,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_mutex *, mutex)
+	),
+
+	TP_fast_assign(
+		__entry->mutex = mutex;
+	),
+
+	TP_printk("mutex=%p", __entry->mutex)
+);
+
+TRACE_EVENT(cobalt_device_register,
+	TP_PROTO(struct rtdm_device *dev),
+	TP_ARGS(dev),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_device *, dev)
+		__string(device_name, dev->name)
+		__field(int, flags)
+		__field(int, class_id)
+		__field(int, subclass_id)
+		__field(int, profile_version)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= dev;
+		__assign_str(device_name, dev->name);
+		__entry->flags = dev->driver->device_flags;
+		__entry->class_id = dev->driver->profile_info.class_id;
+		__entry->subclass_id = dev->driver->profile_info.subclass_id;
+		__entry->profile_version = dev->driver->profile_info.version;
+	),
+
+	TP_printk("%s device %s=%p flags=0x%x, class=%d.%d profile=%d",
+		  (__entry->flags & RTDM_DEVICE_TYPE_MASK)
+		  == RTDM_NAMED_DEVICE ? "named" : "protocol",
+		  __get_str(device_name), __entry->dev,
+		  __entry->flags, __entry->class_id, __entry->subclass_id,
+		  __entry->profile_version)
+);
+
+TRACE_EVENT(cobalt_device_unregister,
+	TP_PROTO(struct rtdm_device *dev),
+	TP_ARGS(dev),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_device *, dev)
+		__string(device_name, dev->name)
+	),
+
+	TP_fast_assign(
+		__entry->dev	= dev;
+		__assign_str(device_name, dev->name);
+	),
+
+	TP_printk("device %s=%p",
+		  __get_str(device_name), __entry->dev)
+);
+
+DEFINE_EVENT(fd_event, cobalt_fd_created,
+	TP_PROTO(struct rtdm_fd *fd, int ufd),
+	TP_ARGS(fd, ufd)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_open,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long oflags),
+	TP_ARGS(task, fd, ufd, oflags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_close,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long lock_count),
+	TP_ARGS(task, fd, ufd, lock_count)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_socket,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long protocol_family),
+	TP_ARGS(task, fd, ufd, protocol_family)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_read,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long len),
+	TP_ARGS(task, fd, ufd, len)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_write,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long len),
+	TP_ARGS(task, fd, ufd, len)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_ioctl,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long request),
+	TP_ARGS(task, fd, ufd, request)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_sendmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_sendmmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_recvmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+DEFINE_EVENT(fd_request, cobalt_fd_recvmmsg,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 unsigned long flags),
+	TP_ARGS(task, fd, ufd, flags)
+);
+
+#define cobalt_print_protbits(__prot)		\
+	__print_flags(__prot,  "|", 		\
+		      {PROT_EXEC, "exec"},	\
+		      {PROT_READ, "read"},	\
+		      {PROT_WRITE, "write"})
+
+#define cobalt_print_mapbits(__flags)		\
+	__print_flags(__flags,  "|", 		\
+		      {MAP_SHARED, "shared"},	\
+		      {MAP_PRIVATE, "private"},	\
+		      {MAP_ANONYMOUS, "anon"},	\
+		      {MAP_FIXED, "fixed"},	\
+		      {MAP_HUGETLB, "huge"},	\
+		      {MAP_NONBLOCK, "nonblock"},	\
+		      {MAP_NORESERVE, "noreserve"},	\
+		      {MAP_POPULATE, "populate"},	\
+		      {MAP_UNINITIALIZED, "uninit"})
+
+TRACE_EVENT(cobalt_fd_mmap,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd, struct _rtdm_mmap_request *rma),
+        TP_ARGS(task, fd, ufd, rma),
+
+	TP_STRUCT__entry(
+		__array(char, comm, TASK_COMM_LEN)
+		__field(pid_t, pid)
+		__field(struct rtdm_device *, dev)
+		__field(int, ufd)
+		__field(size_t, length)
+		__field(off_t, offset)
+		__field(int, prot)
+		__field(int, flags)
+	),
+
+	TP_fast_assign(
+		memcpy(__entry->comm, task->comm, TASK_COMM_LEN);
+		__entry->pid = task_pid_nr(task);
+		__entry->dev = rtdm_fd_to_context(fd)->device;
+		__entry->ufd = ufd;
+		__entry->length = rma->length;
+		__entry->offset = rma->offset;
+		__entry->prot = rma->prot;
+		__entry->flags = rma->flags;
+	),
+
+	TP_printk("device=%p fd=%d area={ len:%zu, off:%Lu }"
+		  " prot=%#x(%s) flags=%#x(%s) pid=%d comm=%s",
+		  __entry->dev, __entry->ufd, __entry->length,
+		  (unsigned long long)__entry->offset,
+		  __entry->prot, cobalt_print_protbits(__entry->prot),
+		  __entry->flags, cobalt_print_mapbits(__entry->flags),
+		  __entry->pid, __entry->comm)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_ioctl_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_read_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_write_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_recvmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_recvmmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_sendmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_sendmmsg_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(fd_request_status, cobalt_fd_mmap_status,
+	TP_PROTO(struct task_struct *task,
+		 struct rtdm_fd *fd, int ufd,
+		 int status),
+	TP_ARGS(task, fd, ufd, status)
+);
+
+DEFINE_EVENT(task_op, cobalt_driver_task_join,
+	TP_PROTO(struct xnthread *task),
+	TP_ARGS(task)
+);
+
+TRACE_EVENT(cobalt_driver_event_init,
+	TP_PROTO(struct rtdm_event *ev, unsigned long pending),
+	TP_ARGS(ev, pending),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_event *, ev)
+		__field(unsigned long,	pending)
+	),
+
+	TP_fast_assign(
+		__entry->ev = ev;
+		__entry->pending = pending;
+	),
+
+	TP_printk("event=%p pending=%#lx",
+		  __entry->ev, __entry->pending)
+);
+
+TRACE_EVENT(cobalt_driver_event_wait,
+	TP_PROTO(struct rtdm_event *ev, struct xnthread *task),
+	TP_ARGS(ev, task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+		__field(struct rtdm_event *, ev)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+		__entry->ev = ev;
+	),
+
+	TP_printk("event=%p task=%p(%s)",
+		  __entry->ev, __entry->task, __get_str(task_name))
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_signal,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_clear,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_pulse,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+DEFINE_EVENT(event_op, cobalt_driver_event_destroy,
+	TP_PROTO(struct rtdm_event *ev),
+	TP_ARGS(ev)
+);
+
+TRACE_EVENT(cobalt_driver_sem_init,
+	TP_PROTO(struct rtdm_sem *sem, unsigned long value),
+	TP_ARGS(sem, value),
+
+	TP_STRUCT__entry(
+		__field(struct rtdm_sem *, sem)
+		__field(unsigned long, value)
+	),
+
+	TP_fast_assign(
+		__entry->sem = sem;
+		__entry->value = value;
+	),
+
+	TP_printk("sem=%p value=%lu",
+		  __entry->sem, __entry->value)
+);
+
+TRACE_EVENT(cobalt_driver_sem_wait,
+	TP_PROTO(struct rtdm_sem *sem, struct xnthread *task),
+	TP_ARGS(sem, task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+		__field(struct rtdm_sem *, sem)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+		__entry->sem = sem;
+	),
+
+	TP_printk("sem=%p task=%p(%s)",
+		  __entry->sem, __entry->task, __get_str(task_name))
+);
+
+DEFINE_EVENT(sem_op, cobalt_driver_sem_up,
+	TP_PROTO(struct rtdm_sem *sem),
+	TP_ARGS(sem)
+);
+
+DEFINE_EVENT(sem_op, cobalt_driver_sem_destroy,
+	TP_PROTO(struct rtdm_sem *sem),
+	TP_ARGS(sem)
+);
+
+DEFINE_EVENT(mutex_op, cobalt_driver_mutex_init,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex)
+);
+
+DEFINE_EVENT(mutex_op, cobalt_driver_mutex_release,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex)
+);
+
+DEFINE_EVENT(mutex_op, cobalt_driver_mutex_destroy,
+	TP_PROTO(struct rtdm_mutex *mutex),
+	TP_ARGS(mutex)
+);
+
+TRACE_EVENT(cobalt_driver_mutex_wait,
+	TP_PROTO(struct rtdm_mutex *mutex, struct xnthread *task),
+	TP_ARGS(mutex, task),
+
+	TP_STRUCT__entry(
+		__field(struct xnthread *, task)
+		__string(task_name, task->name)
+		__field(struct rtdm_mutex *, mutex)
+	),
+
+	TP_fast_assign(
+		__entry->task = task;
+		__assign_str(task_name, task->name);
+		__entry->mutex = mutex;
+	),
+
+	TP_printk("mutex=%p task=%p(%s)",
+		  __entry->mutex, __entry->task, __get_str(task_name))
+);
+
+#endif /* _TRACE_COBALT_RTDM_H */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE cobalt-rtdm
+#include <trace/define_trace.h>
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/tree.c b/kernel/xenomai-v3.2.4/kernel/cobalt/tree.c
new file mode 100644
index 0000000..8e2c9bb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/tree.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <cobalt/kernel/tree.h>
+
+void xntree_cleanup(struct rb_root *t, void *cookie,
+		void (*destroy)(void *cookie, struct xnid *id))
+{
+	struct rb_node *node, *next;
+
+	node = rb_first(t);
+	while (node) {
+		next = rb_next(node);
+
+		/* destroy is expected to remove the node from the rbtree */
+		destroy(cookie, container_of(node, struct xnid, link));
+
+		node = next;
+	}
+}
+
+int xnid_enter(struct rb_root *t, struct xnid *xnid, xnkey_t key)
+{
+	struct rb_node **new = &t->rb_node, *parent = NULL;
+
+	while (*new) {
+		struct xnid *i = container_of(*new, struct xnid, link);
+
+		parent = *new;
+		if (key < i->key)
+			new = &((*new)->rb_left);
+		else if (key > i->key)
+			new = &((*new)->rb_right);
+		else
+			return -EEXIST;
+	}
+
+	xnid->key = key;
+	rb_link_node(&xnid->link, parent, new);
+	rb_insert_color(&xnid->link, t);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/udev/00-rtnet.rules b/kernel/xenomai-v3.2.4/kernel/cobalt/udev/00-rtnet.rules
new file mode 100644
index 0000000..39df24e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/udev/00-rtnet.rules
@@ -0,0 +1,2 @@
+# Don't let udev mess with our special network names
+KERNEL=="vnic*|rteth*|rtlo", NAME="$env{INTERFACE_NAME}"
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/udev/rtdm.rules b/kernel/xenomai-v3.2.4/kernel/cobalt/udev/rtdm.rules
new file mode 100644
index 0000000..d549eda
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/udev/rtdm.rules
@@ -0,0 +1,2 @@
+# Xenomai real-time devices
+SUBSYSTEM=="rtdm", MODE="0660", GROUP="xenomai"
diff --git a/kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c b/kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c
new file mode 100644
index 0000000..05fa48a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/cobalt/vfile.c
@@ -0,0 +1,976 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/stdarg.h>
+#include <linux/ctype.h>
+#include <linux/uaccess.h>
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <cobalt/kernel/lock.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/vfile.h>
+#include <asm/xenomai/wrappers.h>
+
+/**
+ * @ingroup cobalt_core
+ * @defgroup cobalt_core_vfile Virtual file services
+ *
+ * Virtual files provide a mean to export Xenomai object states to
+ * user-space, based on common kernel interfaces.  This encapsulation
+ * is aimed at:
+ *
+ * - supporting consistent collection of very large record-based
+ * output, without encurring latency peaks for undergoing real-time
+ * activities.
+ *
+ * - in the future, hiding discrepancies between linux kernel
+ * releases, regarding the proper way to export kernel object states
+ * to userland, either via the /proc interface or by any other mean.
+ *
+ * This virtual file implementation offers record-based read support
+ * based on seq_files, single-buffer write support, directory and link
+ * handling, all visible from the /proc namespace.
+ *
+ * The vfile support exposes four filesystem object types:
+ *
+ * - snapshot-driven file (struct xnvfile_snapshot). This is commonly
+ * used to export real-time object states via the /proc filesystem. To
+ * minimize the latency involved in protecting the vfile routines from
+ * changes applied by real-time code on such objects, a snapshot of
+ * the data to output is first taken under proper locking, before the
+ * collected data is formatted and sent out in a lockless manner.
+ *
+ * Because a large number of records may have to be output, the data
+ * collection phase is not strictly atomic as a whole, but only
+ * protected at record level. The vfile implementation can be notified
+ * of updates to the underlying data set, and restart the collection
+ * from scratch until the snapshot is fully consistent.
+ *
+ * - regular sequential file (struct xnvfile_regular). This is
+ * basically an encapsulated sequential file object as available from
+ * the host kernel (i.e. seq_file), with a few additional features to
+ * make it more handy in a Xenomai environment, like implicit locking
+ * support and shortened declaration for simplest, single-record
+ * output.
+ *
+ * - virtual link (struct xnvfile_link). This is a symbolic link
+ * feature integrated with the vfile semantics. The link target is
+ * computed dynamically at creation time from a user-given helper
+ * routine.
+ *
+ * - virtual directory (struct xnvfile_directory). A directory object,
+ * which can be used to create a hierarchy for ordering a set of vfile
+ * objects.
+ *
+ *@{*/
+
+/**
+ * @var struct xnvfile_directory cobalt_vfroot
+ * @brief Xenomai vfile root directory
+ *
+ * This vdir maps the /proc/xenomai directory. It can be used to
+ * create a hierarchy of Xenomai-related vfiles under this root.
+ */
+struct xnvfile_directory cobalt_vfroot;
+EXPORT_SYMBOL_GPL(cobalt_vfroot);
+
+static struct xnvfile_directory sysroot;
+
+static void *vfile_snapshot_start(struct seq_file *seq, loff_t *offp)
+{
+	struct xnvfile_snapshot_iterator *it = seq->private;
+	loff_t pos = *offp;
+
+	if (pos > it->nrdata)
+		return NULL;
+
+	if (pos == 0)
+		return SEQ_START_TOKEN;
+
+	return it->databuf + (pos - 1) * it->vfile->datasz;
+}
+
+static void *vfile_snapshot_next(struct seq_file *seq, void *v, loff_t *offp)
+{
+	struct xnvfile_snapshot_iterator *it = seq->private;
+	loff_t pos = *offp;
+
+	++*offp;
+
+	if (pos >= it->nrdata)
+		return NULL;
+
+	return it->databuf + pos * it->vfile->datasz;
+}
+
+static void vfile_snapshot_stop(struct seq_file *seq, void *v)
+{
+}
+
+static int vfile_snapshot_show(struct seq_file *seq, void *v)
+{
+	struct xnvfile_snapshot_iterator *it = seq->private;
+	void *data = v == SEQ_START_TOKEN ? NULL : v;
+	int ret;
+
+	ret = it->vfile->ops->show(it, data);
+
+	return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret;
+}
+
+static struct seq_operations vfile_snapshot_ops = {
+	.start = vfile_snapshot_start,
+	.next = vfile_snapshot_next,
+	.stop = vfile_snapshot_stop,
+	.show = vfile_snapshot_show
+};
+
+static void vfile_snapshot_free(struct xnvfile_snapshot_iterator *it, void *buf)
+{
+	kfree(buf);
+}
+
+static int vfile_snapshot_open(struct inode *inode, struct file *file)
+{
+	struct xnvfile_snapshot *vfile = pde_data(inode);
+	struct xnvfile_snapshot_ops *ops = vfile->ops;
+	struct xnvfile_snapshot_iterator *it;
+	int revtag, ret, nrdata;
+	struct seq_file *seq;
+	caddr_t data;
+
+	WARN_ON_ONCE(file->private_data != NULL);
+
+	if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL)
+		return -EACCES;
+
+	/*
+	 * Make sure to create the seq_file backend only when reading
+	 * from the v-file is possible.
+	 */
+	if ((file->f_mode & FMODE_READ) == 0) {
+		file->private_data = NULL;
+		return 0;
+	}
+
+	if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0)
+		return -EBUSY;
+
+	it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL);
+	if (it == NULL)
+		return -ENOMEM;
+
+	it->vfile = vfile;
+	xnvfile_file(vfile) = file;
+
+	ret = vfile->entry.lockops->get(&vfile->entry);
+	if (ret)
+		goto fail;
+redo:
+	/*
+	 * The ->rewind() method is optional; there may be cases where
+	 * we don't have to take an atomic snapshot of the v-file
+	 * contents before proceeding. In case ->rewind() detects a
+	 * stale backend object, it can force us to bail out.
+	 *
+	 * If present, ->rewind() may return a strictly positive
+	 * value, indicating how many records at most may be returned
+	 * by ->next(). We use this hint to allocate the snapshot
+	 * buffer, in case ->begin() is not provided. The size of this
+	 * buffer would then be vfile->datasz * hint value.
+	 *
+	 * If ->begin() is given, we always expect the latter do the
+	 * allocation for us regardless of the hint value. Otherwise,
+	 * a NULL return from ->rewind() tells us that the vfile won't
+	 * output any snapshot data via ->show().
+	 */
+	nrdata = 0;
+	if (ops->rewind) {
+		nrdata = ops->rewind(it);
+		if (nrdata < 0) {
+			ret = nrdata;
+			vfile->entry.lockops->put(&vfile->entry);
+			goto fail;
+		}
+	}
+	revtag = vfile->tag->rev;
+
+	vfile->entry.lockops->put(&vfile->entry);
+
+	/* Release the data buffer, in case we had to restart. */
+	if (it->databuf) {
+		it->endfn(it, it->databuf);
+		it->databuf = NULL;
+	}
+
+	/*
+	 * Having no record to output is fine, in which case ->begin()
+	 * shall return VFILE_SEQ_EMPTY if present. ->begin() may be
+	 * absent, meaning that no allocation is even required to
+	 * collect the records to output. NULL is kept for allocation
+	 * errors in all other cases.
+	 */
+	if (ops->begin) {
+		XENO_BUG_ON(COBALT, ops->end == NULL);
+		data = ops->begin(it);
+		if (data == NULL) {
+			kfree(it);
+			return -ENOMEM;
+		}
+		if (data != VFILE_SEQ_EMPTY) {
+			it->databuf = data;
+			it->endfn = ops->end;
+		}
+	} else if (nrdata > 0 && vfile->datasz > 0) {
+		/* We have a hint for auto-allocation. */
+		data = kmalloc(vfile->datasz * nrdata, GFP_KERNEL);
+		if (data == NULL) {
+			kfree(it);
+			return -ENOMEM;
+		}
+		it->databuf = data;
+		it->endfn = vfile_snapshot_free;
+	}
+
+	it->nrdata = 0;
+	data = it->databuf;
+	if (data == NULL)
+		goto done;
+
+	/*
+	 * Take a snapshot of the vfile contents, redo if the revision
+	 * tag of the scanned data set changed concurrently.
+	 */
+	for (;;) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			goto fail;
+		if (vfile->tag->rev != revtag)
+			goto redo;
+		ret = ops->next(it, data);
+		vfile->entry.lockops->put(&vfile->entry);
+		if (ret < 0)
+			goto fail;
+		if (ret == 0)
+			break;
+		if (ret != VFILE_SEQ_SKIP) {
+			data += vfile->datasz;
+			it->nrdata++;
+		}
+	}
+
+done:
+	ret = seq_open(file, &vfile_snapshot_ops);
+	if (ret)
+		goto fail;
+
+	seq = file->private_data;
+	it->seq = seq;
+	seq->private = it;
+	xnvfile_nref(vfile)++;
+
+	return 0;
+
+fail:
+	if (it->databuf)
+		it->endfn(it, it->databuf);
+	kfree(it);
+
+	return ret;
+}
+
+static int vfile_snapshot_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	struct xnvfile_snapshot_iterator *it;
+
+	if (seq) {
+		it = seq->private;
+		if (it) {
+			--xnvfile_nref(it->vfile);
+			XENO_BUG_ON(COBALT, it->vfile->entry.refcnt < 0);
+			if (it->databuf)
+				it->endfn(it, it->databuf);
+			kfree(it);
+		}
+
+		return seq_release(inode, file);
+	}
+
+	return 0;
+}
+
+ssize_t vfile_snapshot_write(struct file *file, const char __user *buf,
+			     size_t size, loff_t *ppos)
+{
+	struct xnvfile_snapshot *vfile =
+		pde_data(file->f_path.dentry->d_inode);
+	struct xnvfile_input input;
+	ssize_t ret;
+
+	if (vfile->entry.lockops) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			return ret;
+	}
+
+	input.u_buf = buf;
+	input.size = size;
+	input.vfile = &vfile->entry;
+
+	ret = vfile->ops->store(&input);
+
+	if (vfile->entry.lockops)
+		vfile->entry.lockops->put(&vfile->entry);
+
+	return ret;
+}
+
+static const DEFINE_PROC_OPS(vfile_snapshot_fops,
+			vfile_snapshot_open,
+			vfile_snapshot_release,
+			seq_read,
+			vfile_snapshot_write);
+
+/**
+ * @fn int xnvfile_init_snapshot(const char *name, struct xnvfile_snapshot *vfile, struct xnvfile_directory *parent)
+ * @brief Initialize a snapshot-driven vfile.
+ *
+ * @param name The name which should appear in the pseudo-filesystem,
+ * identifying the vfile entry.
+ *
+ * @param vfile A pointer to a vfile descriptor to initialize
+ * from. The following fields in this structure should be filled in
+ * prior to call this routine:
+ *
+ * - .privsz is the size (in bytes) of the private data area to be
+ * reserved in the @ref snapshot_iterator "vfile iterator". A NULL
+ * value indicates that no private area should be reserved.
+ *
+ * - .datasz is the size (in bytes) of a single record to be collected
+ * by the @ref snapshot_next "next() handler" from the @ref
+ * snapshot_ops "operation descriptor".
+ *
+ * - .tag is a pointer to a mandatory vfile revision tag structure
+ * (struct xnvfile_rev_tag). This tag will be monitored for changes by
+ * the vfile core while collecting data to output, so that any update
+ * detected will cause the current snapshot data to be dropped, and
+ * the collection to restart from the beginning. To this end, any
+ * change to the data which may be part of the collected records,
+ * should also invoke xnvfile_touch() on the associated tag.
+ *
+ * - entry.lockops is a pointer to a @ref vfile_lockops "lock descriptor",
+ * defining the lock and unlock operations for the vfile. This pointer
+ * may be left to NULL, in which case the operations on the nucleus
+ * lock (i.e. nklock) will be used internally around calls to data
+ * collection handlers (see @ref snapshot_ops "operation descriptor").
+ *
+ * - .ops is a pointer to an @ref snapshot_ops "operation descriptor".
+ *
+ * @param parent A pointer to a virtual directory descriptor; the
+ * vfile entry will be created into this directory. If NULL, the /proc
+ * root directory will be used. /proc/xenomai is mapped on the
+ * globally available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual file entry cannot be created
+ * in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_snapshot(const char *name,
+			  struct xnvfile_snapshot *vfile,
+			  struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+	int mode;
+
+	XENO_BUG_ON(COBALT, vfile->tag == NULL);
+
+	if (vfile->entry.lockops == NULL)
+		/* Defaults to nucleus lock */
+		vfile->entry.lockops = &xnvfile_nucleus_lock.ops;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	mode = vfile->ops->store ? 0644 : 0444;
+	ppde = parent->entry.pde;
+	pde = proc_create_data(name, mode, ppde, &vfile_snapshot_fops, vfile);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vfile->entry.pde = pde;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_snapshot);
+
+static void *vfile_regular_start(struct seq_file *seq, loff_t *offp)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+	int ret;
+
+	it->pos = *offp;
+
+	if (vfile->entry.lockops) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			return ERR_PTR(ret);
+	}
+
+	/*
+	 * If we have no begin() op, then we allow a single call only
+	 * to ->show(), by returning the start token once. Otherwise,
+	 * we are done.
+	 */
+	if (vfile->ops->begin == NULL)
+		return it->pos > 0 ? NULL : SEQ_START_TOKEN;
+
+	return vfile->ops->begin(it);
+}
+
+static void *vfile_regular_next(struct seq_file *seq, void *v, loff_t *offp)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+	void *data;
+
+	it->pos = ++(*offp);
+
+	if (vfile->ops->next == NULL)
+		return NULL;
+
+	data = vfile->ops->next(it);
+	if (data == NULL)
+		return NULL;
+
+	return data;
+}
+
+static void vfile_regular_stop(struct seq_file *seq, void *v)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+
+	if (vfile->entry.lockops)
+		vfile->entry.lockops->put(&vfile->entry);
+
+	if (vfile->ops->end)
+		vfile->ops->end(it);
+}
+
+static int vfile_regular_show(struct seq_file *seq, void *v)
+{
+	struct xnvfile_regular_iterator *it = seq->private;
+	struct xnvfile_regular *vfile = it->vfile;
+	void *data = v == SEQ_START_TOKEN ? NULL : v;
+	int ret;
+
+	ret = vfile->ops->show(it, data);
+
+	return ret == VFILE_SEQ_SKIP ? SEQ_SKIP : ret;
+}
+
+static struct seq_operations vfile_regular_ops = {
+	.start = vfile_regular_start,
+	.next = vfile_regular_next,
+	.stop = vfile_regular_stop,
+	.show = vfile_regular_show
+};
+
+static int vfile_regular_open(struct inode *inode, struct file *file)
+{
+	struct xnvfile_regular *vfile = pde_data(inode);
+	struct xnvfile_regular_ops *ops = vfile->ops;
+	struct xnvfile_regular_iterator *it;
+	struct seq_file *seq;
+	int ret;
+
+	if ((file->f_flags & O_EXCL) != 0 && xnvfile_nref(vfile) > 0)
+		return -EBUSY;
+
+	if ((file->f_mode & FMODE_WRITE) != 0 && ops->store == NULL)
+		return -EACCES;
+
+	if ((file->f_mode & FMODE_READ) == 0) {
+		file->private_data = NULL;
+		return 0;
+	}
+
+	it = kzalloc(sizeof(*it) + vfile->privsz, GFP_KERNEL);
+	if (it == NULL)
+		return -ENOMEM;
+
+	it->vfile = vfile;
+	it->pos = -1;
+	xnvfile_file(vfile) = file;
+
+	if (ops->rewind) {
+		ret = ops->rewind(it);
+		if (ret) {
+		fail:
+			kfree(it);
+			return ret;
+		}
+	}
+
+	ret = seq_open(file, &vfile_regular_ops);
+	if (ret)
+		goto fail;
+
+	seq = file->private_data;
+	it->seq = seq;
+	seq->private = it;
+	xnvfile_nref(vfile)++;
+
+	return 0;
+}
+
+static int vfile_regular_release(struct inode *inode, struct file *file)
+{
+	struct seq_file *seq = file->private_data;
+	struct xnvfile_regular_iterator *it;
+
+	if (seq) {
+		it = seq->private;
+		if (it) {
+			--xnvfile_nref(it->vfile);
+			XENO_BUG_ON(COBALT, xnvfile_nref(it->vfile) < 0);
+			kfree(it);
+		}
+
+		return seq_release(inode, file);
+	}
+
+	return 0;
+}
+
+ssize_t vfile_regular_write(struct file *file, const char __user *buf,
+			    size_t size, loff_t *ppos)
+{
+	struct xnvfile_regular *vfile =
+		pde_data(file->f_path.dentry->d_inode);
+	struct xnvfile_input input;
+	ssize_t ret;
+
+	if (vfile->entry.lockops) {
+		ret = vfile->entry.lockops->get(&vfile->entry);
+		if (ret)
+			return ret;
+	}
+
+	input.u_buf = buf;
+	input.size = size;
+	input.vfile = &vfile->entry;
+
+	ret = vfile->ops->store(&input);
+
+	if (vfile->entry.lockops)
+		vfile->entry.lockops->put(&vfile->entry);
+
+	return ret;
+}
+
+static const DEFINE_PROC_OPS(vfile_regular_fops,
+			vfile_regular_open,
+			vfile_regular_release,
+			seq_read,
+			vfile_regular_write);
+
+/**
+ * @fn int xnvfile_init_regular(const char *name, struct xnvfile_regular *vfile, struct xnvfile_directory *parent)
+ * @brief Initialize a regular vfile.
+ *
+ * @param name The name which should appear in the pseudo-filesystem,
+ * identifying the vfile entry.
+ *
+ * @param vfile A pointer to a vfile descriptor to initialize
+ * from. The following fields in this structure should be filled in
+ * prior to call this routine:
+ *
+ * - .privsz is the size (in bytes) of the private data area to be
+ * reserved in the @ref regular_iterator "vfile iterator". A NULL
+ * value indicates that no private area should be reserved.
+ *
+ * - entry.lockops is a pointer to a @ref vfile_lockops "locking
+ * descriptor", defining the lock and unlock operations for the
+ * vfile. This pointer may be left to NULL, in which case no
+ * locking will be applied.
+ *
+ * - .ops is a pointer to an @ref regular_ops "operation descriptor".
+ *
+ * @param parent A pointer to a virtual directory descriptor; the
+ * vfile entry will be created into this directory. If NULL, the /proc
+ * root directory will be used. /proc/xenomai is mapped on the
+ * globally available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual file entry cannot be created
+ * in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_regular(const char *name,
+			 struct xnvfile_regular *vfile,
+			 struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+	int mode;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	mode = vfile->ops->store ? 0644 : 0444;
+	ppde = parent->entry.pde;
+	pde = proc_create_data(name, mode, ppde, &vfile_regular_fops, vfile);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vfile->entry.pde = pde;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_regular);
+
+/**
+ * @fn int xnvfile_init_dir(const char *name, struct xnvfile_directory *vdir, struct xnvfile_directory *parent)
+ * @brief Initialize a virtual directory entry.
+ *
+ * @param name The name which should appear in the pseudo-filesystem,
+ * identifying the vdir entry.
+ *
+ * @param vdir A pointer to the virtual directory descriptor to
+ * initialize.
+ *
+ * @param parent A pointer to a virtual directory descriptor standing
+ * for the parent directory of the new vdir.  If NULL, the /proc root
+ * directory will be used. /proc/xenomai is mapped on the globally
+ * available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual directory entry cannot be
+ * created in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_dir(const char *name,
+		     struct xnvfile_directory *vdir,
+		     struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	ppde = parent->entry.pde;
+	pde = proc_mkdir(name, ppde);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vdir->entry.pde = pde;
+	vdir->entry.lockops = NULL;
+	vdir->entry.private = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_dir);
+
+/**
+ * @fn int xnvfile_init_link(const char *from, const char *to, struct xnvfile_link *vlink, struct xnvfile_directory *parent)
+ * @brief Initialize a virtual link entry.
+ *
+ * @param from The name which should appear in the pseudo-filesystem,
+ * identifying the vlink entry.
+ *
+ * @param to The target file name which should be referred to
+ * symbolically by @a name.
+ *
+ * @param vlink A pointer to the virtual link descriptor to
+ * initialize.
+ *
+ * @param parent A pointer to a virtual directory descriptor standing
+ * for the parent directory of the new vlink. If NULL, the /proc root
+ * directory will be used. /proc/xenomai is mapped on the globally
+ * available @a cobalt_vfroot vdir.
+ *
+ * @return 0 is returned on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the virtual link entry cannot be created
+ * in the /proc hierarchy.
+ *
+ * @coretags{secondary-only}
+ */
+int xnvfile_init_link(const char *from,
+		      const char *to,
+		      struct xnvfile_link *vlink,
+		      struct xnvfile_directory *parent)
+{
+	struct proc_dir_entry *ppde, *pde;
+
+	if (parent == NULL)
+		parent = &sysroot;
+
+	ppde = parent->entry.pde;
+	pde = proc_symlink(from, ppde, to);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vlink->entry.pde = pde;
+	vlink->entry.lockops = NULL;
+	vlink->entry.private = NULL;
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(xnvfile_init_link);
+
+/**
+ * @fn void xnvfile_destroy(struct xnvfile *vfile)
+ * @brief Removes a virtual file entry.
+ *
+ * @param vfile A pointer to the virtual file descriptor to
+ * remove.
+ *
+ * @coretags{secondary-only}
+ */
+void xnvfile_destroy(struct xnvfile *vfile)
+{
+	proc_remove(vfile->pde);
+}
+EXPORT_SYMBOL_GPL(xnvfile_destroy);
+
+/**
+ * @fn ssize_t xnvfile_get_blob(struct xnvfile_input *input, void *data, size_t size)
+ * @brief Read in a data bulk written to the vfile.
+ *
+ * When writing to a vfile, the associated store() handler from the
+ * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
+ * "regular vfile" is called, with a single argument describing the
+ * input data. xnvfile_get_blob() retrieves this data as an untyped
+ * binary blob, and copies it back to the caller's buffer.
+ *
+ * @param input A pointer to the input descriptor passed to the
+ * store() handler.
+ *
+ * @param data The address of the destination buffer to copy the input
+ * data to.
+ *
+ * @param size The maximum number of bytes to copy to the destination
+ * buffer. If @a size is larger than the actual data size, the input
+ * is truncated to @a size.
+ *
+ * @return The number of bytes read and copied to the destination
+ * buffer upon success. Otherwise, a negative error code is returned:
+ *
+ * - -EFAULT indicates an invalid source buffer address.
+ *
+ * @coretags{secondary-only}
+ */
+ssize_t xnvfile_get_blob(struct xnvfile_input *input,
+			 void *data, size_t size)
+{
+	ssize_t nbytes = input->size;
+
+	if (nbytes > size)
+		nbytes = size;
+
+	if (nbytes > 0 && copy_from_user(data, input->u_buf, nbytes))
+		return -EFAULT;
+
+	return nbytes;
+}
+EXPORT_SYMBOL_GPL(xnvfile_get_blob);
+
+/**
+ * @fn ssize_t xnvfile_get_string(struct xnvfile_input *input, char *s, size_t maxlen)
+ * @brief Read in a C-string written to the vfile.
+ *
+ * When writing to a vfile, the associated store() handler from the
+ * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
+ * "regular vfile" is called, with a single argument describing the
+ * input data. xnvfile_get_string() retrieves this data as a
+ * null-terminated character string, and copies it back to the
+ * caller's buffer.
+ *
+ * @param input A pointer to the input descriptor passed to the
+ * store() handler.
+ *
+ * @param s The address of the destination string buffer to copy the
+ * input data to.
+ *
+ * @param maxlen The maximum number of bytes to copy to the
+ * destination buffer, including the ending null character. If @a
+ * maxlen is larger than the actual string length, the input is
+ * truncated to @a maxlen.
+ *
+ * @return The number of characters read upon success. Otherwise, a
+ * negative error code is returned:
+ *
+ * - -EFAULT indicates an invalid source buffer address.
+ *
+ * @coretags{secondary-only}
+ */
+ssize_t xnvfile_get_string(struct xnvfile_input *input,
+			   char *s, size_t maxlen)
+{
+	ssize_t nbytes, eol;
+
+	if (maxlen < 1)
+		return -EINVAL;
+
+	nbytes = xnvfile_get_blob(input, s, maxlen - 1);
+	if (nbytes < 0)
+		return nbytes;
+
+	eol = nbytes;
+	if (eol > 0 && s[eol - 1] == '\n')
+		eol--;
+
+	s[eol] = '\0';
+
+	return nbytes;
+}
+EXPORT_SYMBOL_GPL(xnvfile_get_string);
+
+/**
+ * @fn ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp)
+ * @brief Evaluate the string written to the vfile as a long integer.
+ *
+ * When writing to a vfile, the associated store() handler from the
+ * @ref snapshot_store "snapshot-driven vfile" or @ref regular_store
+ * "regular vfile" is called, with a single argument describing the
+ * input data. xnvfile_get_integer() retrieves and interprets this
+ * data as a long integer, and copies the resulting value back to @a
+ * valp.
+ *
+ * The long integer can be expressed in decimal, octal or hexadecimal
+ * bases depending on the prefix found.
+ *
+ * @param input A pointer to the input descriptor passed to the
+ * store() handler.
+ *
+ * @param valp The address of a long integer variable to receive the
+ * value.
+ *
+ * @return The number of characters read while evaluating the input as
+ * a long integer upon success. Otherwise, a negative error code is
+ * returned:
+ *
+ * - -EINVAL indicates a parse error on the input stream; the written
+ * text cannot be evaluated as a long integer.
+ *
+ * - -EFAULT indicates an invalid source buffer address.
+ *
+ * @coretags{secondary-only}
+ */
+ssize_t xnvfile_get_integer(struct xnvfile_input *input, long *valp)
+{
+	char *end, buf[32];
+	ssize_t nbytes;
+	long val;
+
+	nbytes = xnvfile_get_blob(input, buf, sizeof(buf) - 1);
+	if (nbytes < 0)
+		return nbytes;
+
+	if (nbytes == 0)
+		return -EINVAL;
+
+	buf[nbytes] = '\0';
+	val = simple_strtol(buf, &end, 0);
+
+	if (*end != '\0' && !isspace(*end))
+		return -EINVAL;
+
+	*valp = val;
+
+	return nbytes;
+}
+EXPORT_SYMBOL_GPL(xnvfile_get_integer);
+
+int __vfile_hostlock_get(struct xnvfile *vfile)
+{
+	struct xnvfile_hostlock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops);
+	mutex_lock(&lc->mutex);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__vfile_hostlock_get);
+
+void __vfile_hostlock_put(struct xnvfile *vfile)
+{
+	struct xnvfile_hostlock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_hostlock_class, ops);
+	mutex_unlock(&lc->mutex);
+}
+EXPORT_SYMBOL_GPL(__vfile_hostlock_put);
+
+static int __vfile_nklock_get(struct xnvfile *vfile)
+{
+	struct xnvfile_nklock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops);
+	xnlock_get_irqsave(&nklock, lc->s);
+
+	return 0;
+}
+
+static void __vfile_nklock_put(struct xnvfile *vfile)
+{
+	struct xnvfile_nklock_class *lc;
+
+	lc = container_of(vfile->lockops, struct xnvfile_nklock_class, ops);
+	xnlock_put_irqrestore(&nklock, lc->s);
+}
+
+struct xnvfile_nklock_class xnvfile_nucleus_lock = {
+	.ops = {
+		.get = __vfile_nklock_get,
+		.put = __vfile_nklock_put,
+	},
+};
+
+int __init xnvfile_init_root(void)
+{
+	struct xnvfile_directory *vdir = &cobalt_vfroot;
+	struct proc_dir_entry *pde;
+
+	pde = proc_mkdir("xenomai", NULL);
+	if (pde == NULL)
+		return -ENOMEM;
+
+	vdir->entry.pde = pde;
+	vdir->entry.lockops = NULL;
+	vdir->entry.private = NULL;
+
+	return 0;
+}
+
+void xnvfile_destroy_root(void)
+{
+	cobalt_vfroot.entry.pde = NULL;
+	remove_proc_entry("xenomai", NULL);
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/Kconfig
new file mode 100644
index 0000000..197a48e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/Kconfig
@@ -0,0 +1,35 @@
+menu "Drivers"
+
+config XENO_OPT_RTDM_COMPAT_DEVNODE
+	bool "Enable legacy pathnames for named RTDM devices"
+	default y
+	help
+	This compatibility option allows applications to open named
+	RTDM devices using the legacy naming scheme, i.e.
+
+	fd = open("devname", ...);
+	   or
+	fd = open("/dev/devname", ...);
+
+	When such a request is received by RTDM, a warning message is
+	issued to the kernel log whenever XENO_OPT_DEBUG_LEGACY is
+	also enabled in the kernel configuration.
+
+	Applications should open named devices via their actual device
+	nodes instead, i.e.
+
+	fd = open("/dev/rtdm/devname", ...);
+
+source "drivers/xenomai/autotune/Kconfig"
+source "drivers/xenomai/serial/Kconfig"
+source "drivers/xenomai/testing/Kconfig"
+source "drivers/xenomai/can/Kconfig"
+source "drivers/xenomai/net/Kconfig"
+source "drivers/xenomai/analogy/Kconfig"
+source "drivers/xenomai/ipc/Kconfig"
+source "drivers/xenomai/udd/Kconfig"
+source "drivers/xenomai/gpio/Kconfig"
+source "drivers/xenomai/gpiopwm/Kconfig"
+source "drivers/xenomai/spi/Kconfig"
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/Makefile
new file mode 100644
index 0000000..b8fe1b3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XENOMAI) += autotune/ serial/ testing/ can/ net/ analogy/ ipc/ udd/ gpio/ gpiopwm/ spi/
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig
new file mode 100644
index 0000000..858762b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Kconfig
@@ -0,0 +1,56 @@
+menu "ANALOGY drivers"
+
+config XENO_DRIVERS_ANALOGY
+	tristate "ANALOGY interface"
+	help
+
+	ANALOGY is a framework aimed at supporting data acquisition
+	devices.
+
+config XENO_DRIVERS_ANALOGY_DEBUG
+       depends on XENO_DRIVERS_ANALOGY
+       bool "Analogy debug trace"
+       default n
+       help
+
+       Enable debugging traces in Analogy so as to monitor Analogy's
+       core and drivers behaviours.
+
+config XENO_DRIVERS_ANALOGY_DEBUG_FTRACE
+       depends on XENO_DRIVERS_ANALOGY_DEBUG
+       bool "Analogy debug ftrace"
+       default n
+       help
+
+       Route the Analogy a4l_dbg and a4l_info statements to /sys/kernel/debug/
+
+config XENO_DRIVERS_ANALOGY_DEBUG_LEVEL
+       depends on XENO_DRIVERS_ANALOGY_DEBUG
+       int "Analogy core debug level threshold"
+       default 0
+       help
+
+       Define the level above which the debugging traces will not be
+       displayed.
+
+       WARNING: this threshold is only applied on the Analogy
+       core. That will not affect the driver.
+
+config XENO_DRIVERS_ANALOGY_DRIVER_DEBUG_LEVEL
+       depends on XENO_DRIVERS_ANALOGY_DEBUG
+       int "Analogy driver debug level threshold"
+       default 0
+       help
+
+       Define the level above which the debugging traces will not be
+       displayed.
+
+       WARNING: this threshold is only applied on the Analogy
+       driver. That will not affect the core.
+
+source "drivers/xenomai/analogy/testing/Kconfig"
+source "drivers/xenomai/analogy/intel/Kconfig"
+source "drivers/xenomai/analogy/national_instruments/Kconfig"
+source "drivers/xenomai/analogy/sensoray/Kconfig"
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile
new file mode 100644
index 0000000..8dcb7e7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/Makefile
@@ -0,0 +1,16 @@
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY) += xeno_analogy.o testing/ intel/ national_instruments/ sensoray/
+
+xeno_analogy-y := \
+	buffer.o \
+	command.o \
+	device.o \
+	driver.o \
+	driver_facilities.o \
+	instruction.o \
+	rtdm_helpers.o \
+	subdevice.o \
+	transfer.o \
+	rtdm_interface.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c
new file mode 100644
index 0000000..df22894
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/buffer.c
@@ -0,0 +1,1145 @@
+/*
+ * Analogy for Linux, buffer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/mman.h>
+#include <linux/vmalloc.h>
+#include <asm/errno.h>
+#include <asm/io.h>
+#include <rtdm/analogy/device.h>
+
+/* --- Initialization functions (init, alloc, free) --- */
+
+/* The buffer charactistic is very close to the Comedi one: it is
+   allocated with vmalloc() and all physical addresses of the pages which
+   compose the virtual buffer are hold in a table */
+
+void a4l_free_buffer(struct a4l_buffer * buf_desc)
+{
+	__a4l_dbg(1, core_dbg, "buf=%p buf->buf=%p\n", buf_desc, buf_desc->buf);
+
+	if (buf_desc->pg_list != NULL) {
+		rtdm_free(buf_desc->pg_list);
+		buf_desc->pg_list = NULL;
+	}
+
+	if (buf_desc->buf != NULL) {
+		char *vaddr, *vabase = buf_desc->buf;
+		for (vaddr = vabase; vaddr < vabase + buf_desc->size;
+		     vaddr += PAGE_SIZE)
+			ClearPageReserved(vmalloc_to_page(vaddr));
+		vfree(buf_desc->buf);
+		buf_desc->buf = NULL;
+	}
+}
+
+int a4l_alloc_buffer(struct a4l_buffer *buf_desc, int buf_size)
+{
+	int ret = 0;
+	char *vaddr, *vabase;
+
+	buf_desc->size = buf_size;
+	buf_desc->size = PAGE_ALIGN(buf_desc->size);
+
+	buf_desc->buf = vmalloc_32(buf_desc->size);
+	if (buf_desc->buf == NULL) {
+		ret = -ENOMEM;
+		goto out_virt_contig_alloc;
+	}
+
+	vabase = buf_desc->buf;
+
+	for (vaddr = vabase; vaddr < vabase + buf_desc->size;
+	     vaddr += PAGE_SIZE)
+		SetPageReserved(vmalloc_to_page(vaddr));
+
+	buf_desc->pg_list = rtdm_malloc(((buf_desc->size) >> PAGE_SHIFT) *
+					sizeof(unsigned long));
+	if (buf_desc->pg_list == NULL) {
+		ret = -ENOMEM;
+		goto out_virt_contig_alloc;
+	}
+
+	for (vaddr = vabase; vaddr < vabase + buf_desc->size;
+	     vaddr += PAGE_SIZE)
+		buf_desc->pg_list[(vaddr - vabase) >> PAGE_SHIFT] =
+			(unsigned long) page_to_phys(vmalloc_to_page(vaddr));
+
+	__a4l_dbg(1, core_dbg, "buf=%p buf->buf=%p\n", buf_desc, buf_desc->buf);
+
+out_virt_contig_alloc:
+	if (ret != 0)
+		a4l_free_buffer(buf_desc);
+
+	return ret;
+}
+
+static void a4l_reinit_buffer(struct a4l_buffer *buf_desc)
+{
+	/* No command to process yet */
+	buf_desc->cur_cmd = NULL;
+
+	/* No more (or not yet) linked with a subdevice */
+	buf_desc->subd = NULL;
+
+	/* Initializes counts and flags */
+	buf_desc->end_count = 0;
+	buf_desc->prd_count = 0;
+	buf_desc->cns_count = 0;
+	buf_desc->tmp_count = 0;
+	buf_desc->mng_count = 0;
+
+	/* Flush pending events */
+	buf_desc->flags = 0;
+	a4l_flush_sync(&buf_desc->sync);
+}
+
+void a4l_init_buffer(struct a4l_buffer *buf_desc)
+{
+	memset(buf_desc, 0, sizeof(struct a4l_buffer));
+	a4l_init_sync(&buf_desc->sync);
+	a4l_reinit_buffer(buf_desc);
+}
+
+void a4l_cleanup_buffer(struct a4l_buffer *buf_desc)
+{
+	a4l_cleanup_sync(&buf_desc->sync);
+}
+
+int a4l_setup_buffer(struct a4l_device_context *cxt, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_buffer *buf_desc = cxt->buffer;
+	int i;
+
+	/* Retrieve the related subdevice */
+	buf_desc->subd = a4l_get_subd(cxt->dev, cmd->idx_subd);
+	if (buf_desc->subd == NULL) {
+		__a4l_err("a4l_setup_buffer: subdevice index "
+			  "out of range (%d)\n", cmd->idx_subd);
+		return -EINVAL;
+	}
+
+	if (test_and_set_bit(A4L_SUBD_BUSY_NR, &buf_desc->subd->status)) {
+		__a4l_err("a4l_setup_buffer: subdevice %d already busy\n",
+			  cmd->idx_subd);
+		return -EBUSY;
+	}
+
+	/* Checks if the transfer system has to work in bulk mode */
+	if (cmd->flags & A4L_CMD_BULK)
+		set_bit(A4L_BUF_BULK_NR, &buf_desc->flags);
+
+	/* Sets the working command */
+	buf_desc->cur_cmd = cmd;
+
+	/* Link the subdevice with the context's buffer */
+	buf_desc->subd->buf = buf_desc;
+
+	/* Computes the count to reach, if need be */
+	if (cmd->stop_src == TRIG_COUNT) {
+		for (i = 0; i < cmd->nb_chan; i++) {
+			struct a4l_channel *chft;
+			chft = a4l_get_chfeat(buf_desc->subd,
+					      CR_CHAN(cmd->chan_descs[i]));
+			buf_desc->end_count += chft->nb_bits / 8;
+		}
+		buf_desc->end_count *= cmd->stop_arg;
+	}
+
+	__a4l_dbg(1, core_dbg, "end_count=%lu\n", buf_desc->end_count);
+
+	return 0;
+}
+
+void a4l_cancel_buffer(struct a4l_device_context *cxt)
+{
+	struct a4l_buffer *buf_desc = cxt->buffer;
+	struct a4l_subdevice *subd = buf_desc->subd;
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return;
+
+	/* If a "cancel" function is registered, call it
+	   (Note: this function is called before having checked
+	   if a command is under progress; we consider that
+	   the "cancel" function can be used as as to (re)initialize
+	   some component) */
+	if (subd->cancel != NULL)
+		subd->cancel(subd);
+
+	if (buf_desc->cur_cmd != NULL) {
+		a4l_free_cmddesc(buf_desc->cur_cmd);
+		rtdm_free(buf_desc->cur_cmd);
+		buf_desc->cur_cmd = NULL;
+	}
+
+	a4l_reinit_buffer(buf_desc);
+
+	clear_bit(A4L_SUBD_BUSY_NR, &subd->status);
+	subd->buf = NULL;
+}
+
+/* --- Munge related function --- */
+
+int a4l_get_chan(struct a4l_subdevice *subd)
+{
+	int i, j, tmp_count, tmp_size = 0;
+	struct a4l_cmd_desc *cmd;
+
+	cmd = a4l_get_cmd(subd);
+	if (!cmd)
+		return -EINVAL;
+
+	/* There is no need to check the channel idx,
+	   it has already been controlled in command_test */
+
+	/* We assume channels can have different sizes;
+	   so, we have to compute the global size of the channels
+	   in this command... */
+	for (i = 0; i < cmd->nb_chan; i++) {
+		j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ?
+			CR_CHAN(cmd->chan_descs[i]) : 0;
+		tmp_size += subd->chan_desc->chans[j].nb_bits;
+	}
+
+	/* Translation bits -> bytes */
+	tmp_size /= 8;
+
+	tmp_count = subd->buf->mng_count % tmp_size;
+
+	/* Translation bytes -> bits */
+	tmp_count *= 8;
+
+	/* ...and find the channel the last munged sample
+	   was related with */
+	for (i = 0; tmp_count > 0 && i < cmd->nb_chan; i++) {
+		j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ?
+			CR_CHAN(cmd->chan_descs[i]) : 0;
+		tmp_count -= subd->chan_desc->chans[j].nb_bits;
+	}
+
+	if (tmp_count == 0)
+		return i;
+	else
+		return -EINVAL;
+}
+
+/* --- Transfer / copy functions --- */
+
+/* The following functions are explained in the Doxygen section
+   "Buffer management services" in driver_facilities.c */
+
+int a4l_buf_prepare_absput(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	return __pre_abs_put(buf, count);
+}
+
+
+int a4l_buf_commit_absput(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	return __abs_put(buf, count);
+}
+
+int a4l_buf_prepare_put(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	return __pre_put(buf, count);
+}
+
+int a4l_buf_commit_put(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	return __put(buf, count);
+}
+
+int a4l_buf_put(struct a4l_subdevice *subd, void *bufdata, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+	int err;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_input(subd))
+		return -EINVAL;
+
+	if (__count_to_put(buf) < count)
+		return -EAGAIN;
+
+	err = __produce(NULL, buf, bufdata, count);
+	if (err < 0)
+		return err;
+
+	err = __put(buf, count);
+
+	return err;
+}
+
+int a4l_buf_prepare_absget(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	return __pre_abs_get(buf, count);
+}
+
+int a4l_buf_commit_absget(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	return __abs_get(buf, count);
+}
+
+int a4l_buf_prepare_get(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	return __pre_get(buf, count);
+}
+
+int a4l_buf_commit_get(struct a4l_subdevice *subd, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+
+	/* Basic checkings */
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	return __get(buf, count);
+}
+
+int a4l_buf_get(struct a4l_subdevice *subd, void *bufdata, unsigned long count)
+{
+	struct a4l_buffer *buf = subd->buf;
+	int err;
+
+	/* Basic checkings */
+
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (!a4l_subd_is_output(subd))
+		return -EINVAL;
+
+	if (__count_to_get(buf) < count)
+		return -EAGAIN;
+
+	/* Update the counter */
+	err = __consume(NULL, buf, bufdata, count);
+	if (err < 0)
+		return err;
+
+	/* Perform the transfer */
+	err = __get(buf, count);
+
+	return err;
+}
+
+int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts)
+{
+	struct a4l_buffer *buf = subd->buf;
+	int tmp;
+	unsigned long wake = 0, count = ULONG_MAX;
+
+	/* Warning: here, there may be a condition race : the cancel
+	   function is called by the user side and a4l_buf_evt and all
+	   the a4l_buf_... functions are called by the kernel
+	   side. Nonetheless, the driver should be in charge of such
+	   race conditions, not the framework */
+
+	/* Basic checking */
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	/* Here we save the data count available for the user side */
+	if (evts == 0) {
+		count = a4l_subd_is_input(subd) ?
+			__count_to_get(buf) : __count_to_put(buf);
+		wake = __count_to_end(buf) < buf->wake_count ?
+			__count_to_end(buf) : buf->wake_count;
+	} else {
+		/* Even if it is a little more complex, atomic
+		   operations are used so as to prevent any kind of
+		   corner case */
+		while ((tmp = ffs(evts) - 1) != -1) {
+			set_bit(tmp, &buf->flags);
+			clear_bit(tmp, &evts);
+		}
+	}
+
+	if (count >= wake)
+		/* Notify the user-space side */
+		a4l_signal_sync(&buf->sync);
+
+	return 0;
+}
+
+unsigned long a4l_buf_count(struct a4l_subdevice *subd)
+{
+	struct a4l_buffer *buf = subd->buf;
+	unsigned long ret = 0;
+
+	/* Basic checking */
+	if (!buf || !test_bit(A4L_SUBD_BUSY_NR, &subd->status))
+		return -ENOENT;
+
+	if (a4l_subd_is_input(subd))
+		ret = __count_to_put(buf);
+	else if (a4l_subd_is_output(subd))
+		ret = __count_to_get(buf);
+
+	return ret;
+}
+
+/* --- Mmap functions --- */
+
+void a4l_map(struct vm_area_struct *area)
+{
+	unsigned long *status = (unsigned long *)area->vm_private_data;
+	set_bit(A4L_BUF_MAP_NR, status);
+}
+
+void a4l_unmap(struct vm_area_struct *area)
+{
+	unsigned long *status = (unsigned long *)area->vm_private_data;
+	clear_bit(A4L_BUF_MAP_NR, status);
+}
+
+static struct vm_operations_struct a4l_vm_ops = {
+	.open = a4l_map,
+	.close = a4l_unmap,
+};
+
+int a4l_ioctl_mmap(struct a4l_device_context *cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	a4l_mmap_t map_cfg;
+	struct a4l_device *dev;
+	struct a4l_buffer *buf;
+	int ret;
+
+	/* The mmap operation cannot be performed in a
+	   real-time context */
+	if (rtdm_in_rt_context()) {
+		return -ENOSYS;
+	}
+
+	dev = a4l_get_dev(cxt);
+	buf = cxt->buffer;
+
+	/* Basic checkings */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_mmap: cannot mmap on "
+			  "an unattached device\n");
+		return -EINVAL;
+	}
+
+	if (test_bit(A4L_BUF_MAP_NR, &buf->flags)) {
+		__a4l_err("a4l_ioctl_mmap: buffer already mapped\n");
+		return -EBUSY;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &map_cfg, arg, sizeof(a4l_mmap_t)) != 0)
+		return -EFAULT;
+
+	/* Check the size to be mapped */
+	if ((map_cfg.size & ~(PAGE_MASK)) != 0 || map_cfg.size > buf->size)
+		return -EFAULT;
+
+	/* All the magic is here */
+	ret = rtdm_mmap_to_user(fd,
+				buf->buf,
+				map_cfg.size,
+				PROT_READ | PROT_WRITE,
+				&map_cfg.ptr, &a4l_vm_ops, &buf->flags);
+
+	if (ret < 0) {
+		__a4l_err("a4l_ioctl_mmap: internal error, "
+			  "rtdm_mmap_to_user failed (err=%d)\n", ret);
+		return ret;
+	}
+
+	return rtdm_safe_copy_to_user(fd,
+				      arg, &map_cfg, sizeof(a4l_mmap_t));
+}
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_cancel(struct a4l_device_context * cxt, void *arg)
+{
+	unsigned int idx_subd = (unsigned long)arg;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_subdevice *subd;
+
+	/* Basically check the device */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_cancel: operation not supported on "
+			  "an unattached device\n");
+		return -EINVAL;
+	}
+
+	if (cxt->buffer->subd == NULL) {
+		__a4l_err("a4l_ioctl_cancel: "
+			  "no acquisition to cancel on this context\n");
+		return -EINVAL;
+	}
+
+	if (idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_cancel: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	subd = dev->transfer.subds[idx_subd];
+
+	if (subd != cxt->buffer->subd) {
+		__a4l_err("a4l_ioctl_cancel: "
+			  "current context works on another subdevice "
+			  "(%d!=%d)\n", cxt->buffer->subd->idx, subd->idx);
+		return -EINVAL;
+	}
+
+	a4l_cancel_buffer(cxt);
+	return 0;
+}
+
+/* The ioctl BUFCFG is only useful for changing the size of the
+   asynchronous buffer.
+   (BUFCFG = free of the current buffer + allocation of a new one) */
+
+int a4l_ioctl_bufcfg(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	a4l_bufcfg_t buf_cfg;
+
+	/* As Linux API is used to allocate a virtual buffer,
+	   the calling process must not be in primary mode */
+	if (rtdm_in_rt_context()) {
+		return -ENOSYS;
+	}
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_bufcfg: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &buf_cfg,
+				     arg, sizeof(a4l_bufcfg_t)) != 0)
+		return -EFAULT;
+
+	if (buf_cfg.buf_size > A4L_BUF_MAXSIZE) {
+		__a4l_err("a4l_ioctl_bufcfg: buffer size too big (<=16MB)\n");
+		return -EINVAL;
+	}
+
+	if (buf_cfg.idx_subd == A4L_BUF_DEFMAGIC) {
+		cxt->dev->transfer.default_bufsize = buf_cfg.buf_size;
+		return 0;
+	}
+
+	if (subd && test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		__a4l_err("a4l_ioctl_bufcfg: acquisition in progress\n");
+		return -EBUSY;
+	}
+
+	if (test_bit(A4L_BUF_MAP, &buf->flags)) {
+		__a4l_err("a4l_ioctl_bufcfg: please unmap before "
+			  "configuring buffer\n");
+		return -EPERM;
+	}
+
+	/* Free the buffer... */
+	a4l_free_buffer(buf);
+
+	/* ...to reallocate it */
+	return a4l_alloc_buffer(buf, buf_cfg.buf_size);
+}
+
+/* The ioctl BUFCFG2 allows the user space process to define the
+   minimal amount of data which should trigger a wake-up. If the ABI
+   could be broken, this facility would be handled by the original
+   BUFCFG ioctl. At the next major release, this ioctl will vanish. */
+
+int a4l_ioctl_bufcfg2(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	a4l_bufcfg2_t buf_cfg;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_bufcfg2: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &buf_cfg,
+				     arg, sizeof(a4l_bufcfg2_t)) != 0)
+		return -EFAULT;
+
+	if (buf_cfg.wake_count > buf->size) {
+		__a4l_err("a4l_ioctl_bufcfg2: "
+			  "wake-up threshold too big (> buffer size: %lu)\n",
+			  buf->size);
+		return -EINVAL;
+	}
+
+	buf->wake_count = buf_cfg.wake_count;
+
+	return 0;
+}
+
+/* The BUFINFO ioctl provides two basic roles:
+   - tell the user app the size of the asynchronous buffer
+   - display the read/write counters (how many bytes to read/write) */
+
+int a4l_ioctl_bufinfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	a4l_bufinfo_t info;
+
+	unsigned long tmp_cnt;
+	int ret;
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_bufinfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &info, arg, sizeof(a4l_bufinfo_t)) != 0)
+		return -EFAULT;
+
+
+	/* If a transfer is not occuring, simply return buffer
+	   informations, otherwise make the transfer progress */
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		info.rw_count = 0;
+		goto a4l_ioctl_bufinfo_out;
+	}
+
+	ret = __handle_event(buf);
+
+	if (a4l_subd_is_input(subd)) {
+
+		/* Updates consume count if rw_count is not null */
+		if (info.rw_count != 0)
+			buf->cns_count += info.rw_count;
+
+		/* Retrieves the data amount to read */
+		tmp_cnt = info.rw_count = __count_to_get(buf);
+
+		__a4l_dbg(1, core_dbg, "count to read=%lu\n", tmp_cnt);
+
+		if ((ret < 0 && ret != -ENOENT) ||
+		    (ret == -ENOENT && tmp_cnt == 0)) {
+			a4l_cancel_buffer(cxt);
+			return ret;
+		}
+	} else if (a4l_subd_is_output(subd)) {
+
+		if (ret < 0) {
+			a4l_cancel_buffer(cxt);
+			if (info.rw_count != 0)
+				return ret;
+		}
+
+		/* If rw_count is not null,
+		   there is something to write / munge  */
+		if (info.rw_count != 0 && info.rw_count <= __count_to_put(buf)) {
+
+			/* Updates the production pointer */
+			buf->prd_count += info.rw_count;
+
+			/* Sets the munge count */
+			tmp_cnt = info.rw_count;
+		} else
+			tmp_cnt = 0;
+
+		/* Retrieves the data amount which is writable */
+		info.rw_count = __count_to_put(buf);
+
+		__a4l_dbg(1, core_dbg, " count to write=%lu\n", info.rw_count);
+
+	} else {
+		__a4l_err("a4l_ioctl_bufinfo: inappropriate subdevice\n");
+		return -EINVAL;
+	}
+
+	/* Performs the munge if need be */
+	if (subd->munge != NULL) {
+
+		/* Call the munge callback */
+		__munge(subd, subd->munge, buf, tmp_cnt);
+
+		/* Updates munge count */
+		buf->mng_count += tmp_cnt;
+	}
+
+a4l_ioctl_bufinfo_out:
+
+	/* Sets the buffer size */
+	info.buf_size = buf->size;
+
+	/* Sends the structure back to user space */
+	if (rtdm_safe_copy_to_user(fd,
+				   arg, &info, sizeof(a4l_bufinfo_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+/* The ioctl BUFINFO2 tells the user application the minimal amount of
+data which should trigger a wake-up. If the ABI could be broken, this
+facility would be handled by the original BUFINFO ioctl. At the next
+major release, this ioctl will vanish. */
+
+int a4l_ioctl_bufinfo2(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	a4l_bufcfg2_t buf_cfg;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_bufcfg2: unattached device\n");
+		return -EINVAL;
+	}
+
+	buf_cfg.wake_count = buf->wake_count;
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg, &buf_cfg, sizeof(a4l_bufcfg2_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+/* The function a4l_read_buffer can be considered as the kernel entry
+   point of the RTDM syscall read. This syscall is supposed to be used
+   only during asynchronous acquisitions */
+ssize_t a4l_read_buffer(struct a4l_device_context * cxt, void *bufdata, size_t nbytes)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	ssize_t count = 0;
+
+	/* Basic checkings */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_read: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		__a4l_err("a4l_read: idle subdevice on this context\n");
+		return -ENOENT;
+	}
+
+	if (!a4l_subd_is_input(subd)) {
+		__a4l_err("a4l_read: operation requires an input subdevice \n");
+		return -EINVAL;
+	}
+
+	while (count < nbytes) {
+
+		unsigned long tmp_cnt;
+
+		/* Check the events */
+		int ret = __handle_event(buf);
+
+		__dump_buffer_counters(buf);
+
+		/* Compute the data amount to copy */
+		tmp_cnt = __count_to_get(buf);
+
+		/* Check tmp_cnt count is not higher than
+		   the global count to read */
+		if (tmp_cnt > nbytes - count)
+			tmp_cnt = nbytes - count;
+
+		/* We check whether there is an error */
+		if (ret < 0 && ret != -ENOENT) {
+			__a4l_err("a4l_read: failed to handle event %d \n", ret);
+			a4l_cancel_buffer(cxt);
+			count = ret;
+			goto out_a4l_read;
+		}
+
+		/* We check whether the acquisition is over */
+		if (ret == -ENOENT && tmp_cnt == 0) {
+			__a4l_info("a4l_read: acquisition done - all data "
+				   "requested by the client was delivered \n");
+			a4l_cancel_buffer(cxt);
+			count = 0;
+			goto out_a4l_read;
+		}
+
+		if (tmp_cnt > 0) {
+
+			/* Performs the munge if need be */
+			if (subd->munge != NULL) {
+				__munge(subd, subd->munge, buf, tmp_cnt);
+
+				/* Updates munge count */
+				buf->mng_count += tmp_cnt;
+			}
+
+			/* Performs the copy */
+			ret = __consume(cxt, buf, bufdata + count, tmp_cnt);
+
+			if (ret < 0) {
+				count = ret;
+				goto out_a4l_read;
+			}
+
+			/* Updates consume count */
+			buf->cns_count += tmp_cnt;
+			a4l_dbg(1, core_dbg, dev, "buf->cns_cnt=%ld \n", buf->cns_count);
+
+			/* Updates the return value */
+			count += tmp_cnt;
+
+			/* If the driver does not work in bulk mode,
+			   we must leave this function */
+			if (!test_bit(A4L_BUF_BULK, &buf->flags))
+				goto out_a4l_read;
+		}
+		else {
+			/* If the acquisition is not over, we must not
+			   leave the function without having read a least byte */
+			ret = a4l_wait_sync(&(buf->sync), rtdm_in_rt_context());
+			if (ret < 0) {
+				if (ret == -ERESTARTSYS)
+					ret = -EINTR;
+				count = ret;
+				goto out_a4l_read;
+			}
+		}
+	}
+
+out_a4l_read:
+
+	return count;
+}
+
+/* The function a4l_write_buffer can be considered as the kernel entry
+   point of the RTDM syscall write. This syscall is supposed to be
+   used only during asynchronous acquisitions */
+ssize_t a4l_write_buffer(struct a4l_device_context *cxt, const void *bufdata, size_t nbytes)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	ssize_t count = 0;
+
+	/* Basic checkings */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_write: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		__a4l_err("a4l_write: idle subdevice on this context\n");
+		return -ENOENT;
+	}
+
+	if (!a4l_subd_is_output(subd)) {
+		__a4l_err("a4l_write: operation requires an output subdevice \n");
+		return -EINVAL;
+	}
+
+	while (count < nbytes) {
+
+		unsigned long tmp_cnt;
+
+		/* Check the events */
+		int ret = __handle_event(buf);
+
+		__dump_buffer_counters(buf);
+
+		/* Compute the data amount to copy */
+		tmp_cnt = __count_to_put(buf);
+
+		/* Check tmp_cnt count is not higher than
+		   the global count to write */
+		if (tmp_cnt > nbytes - count)
+			tmp_cnt = nbytes - count;
+
+		if (ret < 0) {
+			count = (ret == -ENOENT) ? -EINVAL : ret;
+			__a4l_err("a4l_write: failed to handle event %d \n", ret);
+			a4l_cancel_buffer(cxt);
+			goto out_a4l_write;
+		}
+
+		if (tmp_cnt > 0) {
+
+
+			/* Performs the copy */
+			ret = __produce(cxt,
+					buf, (void *)bufdata + count, tmp_cnt);
+			if (ret < 0) {
+				count = ret;
+				goto out_a4l_write;
+			}
+
+			/* Performs the munge if need be */
+			if (subd->munge != NULL) {
+				__munge(subd, subd->munge, buf, tmp_cnt);
+
+				/* Updates munge count */
+				buf->mng_count += tmp_cnt;
+			}
+
+			/* Updates produce count */
+			buf->prd_count += tmp_cnt;
+			a4l_dbg(1, core_dbg, dev , "buf->prd_cnt=%ld \n", buf->prd_count);
+
+			/* Updates the return value */
+			count += tmp_cnt;
+
+			/* If the driver does not work in bulk mode,
+			   we must leave this function */
+			if (!test_bit(A4L_BUF_BULK, &buf->flags))
+				goto out_a4l_write;
+		} else {
+			/* The buffer is full, we have to wait for a slot to free */
+			ret = a4l_wait_sync(&(buf->sync), rtdm_in_rt_context());
+			if (ret < 0) {
+				__a4l_err("a4l_write: failed to wait for free slot (%d)\n", ret);
+				if (ret == -ERESTARTSYS)
+					ret = -EINTR;
+				count = ret;
+				goto out_a4l_write;
+			}
+		}
+	}
+
+out_a4l_write:
+
+	return count;
+}
+
+int a4l_select(struct a4l_device_context *cxt,
+	       rtdm_selector_t *selector,
+	       enum rtdm_selecttype type, unsigned fd_index)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+
+	/* Basic checkings */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_select: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY, &subd->status)) {
+		__a4l_err("a4l_select: idle subdevice on this context\n");
+		return -ENOENT;
+	}
+
+	/* Check the RTDM select type
+	   (RTDM_SELECTTYPE_EXCEPT is not supported) */
+
+	if(type != RTDM_SELECTTYPE_READ &&
+	   type != RTDM_SELECTTYPE_WRITE) {
+		__a4l_err("a4l_select: wrong select argument\n");
+		return -EINVAL;
+	}
+
+	if (type == RTDM_SELECTTYPE_READ && !a4l_subd_is_input(subd)) {
+		__a4l_err("a4l_select: current context "
+			  "does not work with an input subdevice\n");
+		return -EINVAL;
+	}
+
+	if (type == RTDM_SELECTTYPE_WRITE && !a4l_subd_is_output(subd)) {
+		__a4l_err("a4l_select: current context "
+			  "does not work with an input subdevice\n");
+		return -EINVAL;
+	}
+
+	/* Performs a bind on the Analogy synchronization element */
+	return a4l_select_sync(&(buf->sync), selector, type, fd_index);
+}
+
+int a4l_ioctl_poll(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+	unsigned long tmp_cnt = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_buffer *buf = cxt->buffer;
+	struct a4l_subdevice *subd = buf->subd;
+	a4l_poll_t poll;
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_poll: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (!subd || !test_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		__a4l_err("a4l_poll: idle subdevice on this context\n");
+		return -ENOENT;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &poll, arg, sizeof(a4l_poll_t)) != 0)
+		return -EFAULT;
+
+	/* Checks the buffer events */
+	a4l_flush_sync(&buf->sync);
+	ret = __handle_event(buf);
+
+	/* Retrieves the data amount to compute
+	   according to the subdevice type */
+	if (a4l_subd_is_input(subd)) {
+
+		tmp_cnt = __count_to_get(buf);
+
+		/* Check if some error occured */
+		if (ret < 0 && ret != -ENOENT) {
+			a4l_cancel_buffer(cxt);
+			return ret;
+		}
+
+		/* Check whether the acquisition is over */
+		if (ret == -ENOENT && tmp_cnt == 0) {
+			a4l_cancel_buffer(cxt);
+			return 0;
+		}
+	} else {
+
+		/* If some error was detected, cancel the transfer */
+		if (ret < 0) {
+			a4l_cancel_buffer(cxt);
+			return ret;
+		}
+
+		tmp_cnt = __count_to_put(buf);
+	}
+
+	if (poll.arg == A4L_NONBLOCK || tmp_cnt != 0)
+		goto out_poll;
+
+	if (poll.arg == A4L_INFINITE)
+		ret = a4l_wait_sync(&(buf->sync), rtdm_in_rt_context());
+	else {
+		unsigned long long ns = ((unsigned long long)poll.arg) *
+			((unsigned long long)NSEC_PER_MSEC);
+		ret = a4l_timedwait_sync(&(buf->sync), rtdm_in_rt_context(), ns);
+	}
+
+	if (ret == 0) {
+		/* Retrieves the count once more */
+		if (a4l_subd_is_input(dev->transfer.subds[poll.idx_subd]))
+			tmp_cnt = __count_to_get(buf);
+		else
+			tmp_cnt = __count_to_put(buf);
+	}
+	else
+		return ret;
+
+out_poll:
+
+	poll.arg = tmp_cnt;
+
+	ret = rtdm_safe_copy_to_user(fd,
+				     arg, &poll, sizeof(a4l_poll_t));
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c
new file mode 100644
index 0000000..7420bc5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/command.c
@@ -0,0 +1,392 @@
+/*
+ * Analogy for Linux, command related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/mman.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+/* --- Command descriptor management functions --- */
+int a4l_fill_cmddesc(struct a4l_device_context *cxt, struct a4l_cmd_desc *desc,
+		     unsigned int **chan_descs, void *arg)
+{
+	unsigned int *tmpchans = NULL;
+	int ret = 0;
+
+	ret = rtdm_safe_copy_from_user(rtdm_private_to_fd(cxt),
+				       desc, arg, sizeof(struct a4l_cmd_desc));
+	if (ret != 0)
+		goto out_cmddesc;
+
+
+	if (desc->nb_chan == 0) {
+		ret = -EINVAL;
+		goto out_cmddesc;
+	}
+
+	tmpchans = rtdm_malloc(desc->nb_chan * sizeof(unsigned int));
+	if (tmpchans == NULL) {
+		ret = -ENOMEM;
+		goto out_cmddesc;
+	}
+
+	ret = rtdm_safe_copy_from_user(rtdm_private_to_fd(cxt),
+				       tmpchans,
+				       desc->chan_descs,
+				       desc->nb_chan * sizeof(unsigned int));
+	if (ret != 0) {
+		__a4l_err("%s invalid arguments \n", __FUNCTION__);
+		goto out_cmddesc;
+	}
+
+	*chan_descs = desc->chan_descs;
+	desc->chan_descs = tmpchans;
+
+	__a4l_dbg(1, core_dbg, "desc dump: \n");
+	__a4l_dbg(1, core_dbg, "\t->idx_subd=%u\n", desc->idx_subd);
+	__a4l_dbg(1, core_dbg, "\t->flags=%lu\n", desc->flags);
+	__a4l_dbg(1, core_dbg, "\t->nb_chan=%u\n", desc->nb_chan);
+	__a4l_dbg(1, core_dbg, "\t->chan_descs=0x%x\n", *desc->chan_descs);
+	__a4l_dbg(1, core_dbg, "\t->data_len=%u\n", desc->data_len);
+	__a4l_dbg(1, core_dbg, "\t->pdata=0x%p\n", desc->data);
+
+	out_cmddesc:
+
+	if (ret != 0) {
+		__a4l_err("a4l_fill_cmddesc: %d \n", ret);
+		if (tmpchans != NULL)
+			rtdm_free(tmpchans);
+		desc->chan_descs = NULL;
+	}
+
+	return ret;
+}
+
+void a4l_free_cmddesc(struct a4l_cmd_desc * desc)
+{
+	if (desc->chan_descs != NULL)
+		rtdm_free(desc->chan_descs);
+}
+
+int a4l_check_cmddesc(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_subdevice *subd;
+
+	if (desc->idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_check_cmddesc: "
+			  "subdevice index out of range (idx=%u)\n",
+			  desc->idx_subd);
+		return -EINVAL;
+	}
+
+	subd = dev->transfer.subds[desc->idx_subd];
+
+	if ((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED) {
+		__a4l_err("a4l_check_cmddesc: "
+			  "subdevice type incoherent\n");
+		return -EIO;
+	}
+
+	if (!(subd->flags & A4L_SUBD_CMD)) {
+		__a4l_err("a4l_check_cmddesc: operation not supported, "
+			  "synchronous only subdevice\n");
+		return -EIO;
+	}
+
+	if (test_bit(A4L_SUBD_BUSY, &subd->status)) {
+		__a4l_err("a4l_check_cmddesc: subdevice busy\n");
+		return -EBUSY;
+	}
+
+	return a4l_check_chanlist(dev->transfer.subds[desc->idx_subd],
+				  desc->nb_chan, desc->chan_descs);
+}
+
+/* --- Command checking functions --- */
+
+int a4l_check_generic_cmdcnt(struct a4l_cmd_desc * desc)
+{
+	unsigned int tmp1, tmp2;
+
+	/* Makes sure trigger sources are trivially valid */
+	tmp1 =
+	desc->start_src & ~(TRIG_NOW | TRIG_INT | TRIG_EXT | TRIG_FOLLOW);
+	tmp2 = desc->start_src & (TRIG_NOW | TRIG_INT | TRIG_EXT | TRIG_FOLLOW);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: start_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->scan_begin_src & ~(TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
+	tmp2 = desc->scan_begin_src & (TRIG_TIMER | TRIG_EXT | TRIG_FOLLOW);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: scan_begin_src, , weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->convert_src & ~(TRIG_TIMER | TRIG_EXT | TRIG_NOW);
+	tmp2 = desc->convert_src & (TRIG_TIMER | TRIG_EXT | TRIG_NOW);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: convert_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->scan_end_src & ~(TRIG_COUNT);
+	if (tmp1 != 0) {
+		__a4l_err("a4l_check_cmddesc: scan_end_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->stop_src & ~(TRIG_COUNT | TRIG_NONE);
+	tmp2 = desc->stop_src & (TRIG_COUNT | TRIG_NONE);
+	if (tmp1 != 0 || tmp2 == 0) {
+		__a4l_err("a4l_check_cmddesc: stop_src, weird trigger\n");
+		return -EINVAL;
+	}
+
+	/* Makes sure trigger sources are unique */
+	if (desc->start_src != TRIG_NOW &&
+	    desc->start_src != TRIG_INT &&
+	    desc->start_src != TRIG_EXT && desc->start_src != TRIG_FOLLOW) {
+		__a4l_err("a4l_check_cmddesc: start_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	if (desc->scan_begin_src != TRIG_TIMER &&
+	    desc->scan_begin_src != TRIG_EXT &&
+	    desc->scan_begin_src != TRIG_FOLLOW) {
+		__a4l_err("a4l_check_cmddesc: scan_begin_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	if (desc->convert_src != TRIG_TIMER &&
+	    desc->convert_src != TRIG_EXT && desc->convert_src != TRIG_NOW) {
+		__a4l_err("a4l_check_cmddesc: convert_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	if (desc->stop_src != TRIG_COUNT && desc->stop_src != TRIG_NONE) {
+		__a4l_err("a4l_check_cmddesc: stop_src, "
+			  "only one trigger should be set\n");
+		return -EINVAL;
+	}
+
+	/* Makes sure arguments are trivially compatible */
+	tmp1 = desc->start_src & (TRIG_NOW | TRIG_FOLLOW | TRIG_INT);
+	tmp2 = desc->start_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no start_arg expected\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->scan_begin_src & TRIG_FOLLOW;
+	tmp2 = desc->scan_begin_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no scan_begin_arg expected\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->convert_src & TRIG_NOW;
+	tmp2 = desc->convert_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no convert_arg expected\n");
+		return -EINVAL;
+	}
+
+	tmp1 = desc->stop_src & TRIG_NONE;
+	tmp2 = desc->stop_arg;
+	if (tmp1 != 0 && tmp2 != 0) {
+		__a4l_err("a4l_check_cmddesc: no stop_arg expected\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int a4l_check_specific_cmdcnt(struct a4l_device_context * cxt, struct a4l_cmd_desc * desc)
+{
+	unsigned int tmp1, tmp2;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_cmd_desc *cmd_mask = dev->transfer.subds[desc->idx_subd]->cmd_mask;
+
+	if (cmd_mask == NULL)
+		return 0;
+
+	if (cmd_mask->start_src != 0) {
+		tmp1 = desc->start_src & ~(cmd_mask->start_src);
+		tmp2 = desc->start_src & (cmd_mask->start_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: start_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->scan_begin_src != 0) {
+		tmp1 = desc->scan_begin_src & ~(cmd_mask->scan_begin_src);
+		tmp2 = desc->scan_begin_src & (cmd_mask->scan_begin_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: scan_begin_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->convert_src != 0) {
+		tmp1 = desc->convert_src & ~(cmd_mask->convert_src);
+		tmp2 = desc->convert_src & (cmd_mask->convert_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: convert_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->scan_end_src != 0) {
+		tmp1 = desc->scan_end_src & ~(cmd_mask->scan_end_src);
+		if (tmp1 != 0) {
+			__a4l_err("a4l_check_cmddesc: scan_end_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	if (cmd_mask->stop_src != 0) {
+		tmp1 = desc->stop_src & ~(cmd_mask->stop_src);
+		tmp2 = desc->stop_src & (cmd_mask->stop_src);
+		if (tmp1 != 0 || tmp2 == 0) {
+			__a4l_err("a4l_check_cmddesc: stop_src, "
+				  "trigger unsupported\n");
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* --- IOCTL / FOPS function --- */
+
+int a4l_ioctl_cmd(struct a4l_device_context * ctx, void *arg)
+{
+	int ret = 0, simul_flag = 0;
+	struct a4l_cmd_desc *cmd_desc = NULL;
+	struct a4l_device *dev = a4l_get_dev(ctx);
+	unsigned int *chan_descs, *tmp;
+	struct a4l_subdevice *subd;
+
+	/* The command launching cannot be done in real-time because
+	   of some possible buffer allocations in the drivers */
+	if (rtdm_in_rt_context())
+		return -ENOSYS;
+
+	/* Basically check the device */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_cmd: cannot command "
+			  "an unattached device\n");
+		return -EINVAL;
+	}
+
+	/* Allocates the command */
+	cmd_desc = (struct a4l_cmd_desc *) rtdm_malloc(sizeof(struct a4l_cmd_desc));
+	if (cmd_desc == NULL)
+		return -ENOMEM;
+	memset(cmd_desc, 0, sizeof(struct a4l_cmd_desc));
+
+	/* Gets the command */
+	ret = a4l_fill_cmddesc(ctx, cmd_desc, &chan_descs, arg);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	/* Checks the command */
+	ret = a4l_check_cmddesc(ctx, cmd_desc);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	ret = a4l_check_generic_cmdcnt(cmd_desc);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	ret = a4l_check_specific_cmdcnt(ctx, cmd_desc);
+	if (ret != 0)
+		goto out_ioctl_cmd;
+
+	__a4l_dbg(1, core_dbg,"1st cmd checks passed\n");
+	subd = dev->transfer.subds[cmd_desc->idx_subd];
+
+	/* Tests the command with the cmdtest function */
+	if (cmd_desc->flags & A4L_CMD_SIMUL) {
+		simul_flag = 1;
+
+		if (!subd->do_cmdtest) {
+			__a4l_err("a4l_ioctl_cmd: driver's cmd_test NULL\n");
+			ret = -EINVAL;
+			goto out_ioctl_cmd;
+		}
+
+		ret = subd->do_cmdtest(subd, cmd_desc);
+		if (ret != 0) {
+			__a4l_err("a4l_ioctl_cmd: driver's cmd_test failed\n");
+			goto out_ioctl_cmd;
+		}
+		__a4l_dbg(1, core_dbg, "driver's cmd checks passed\n");
+		goto out_ioctl_cmd;
+	}
+
+
+	/* Gets the transfer system ready */
+	ret = a4l_setup_buffer(ctx, cmd_desc);
+	if (ret < 0)
+		goto out_ioctl_cmd;
+
+	/* Eventually launches the command */
+	ret = subd->do_cmd(subd, cmd_desc);
+
+	if (ret != 0) {
+		a4l_cancel_buffer(ctx);
+		goto out_ioctl_cmd;
+	}
+
+	out_ioctl_cmd:
+
+	if (simul_flag) {
+		/* copy the kernel based descriptor */
+		tmp = cmd_desc->chan_descs;
+		/* return the user based descriptor */
+		cmd_desc->chan_descs = chan_descs;
+		rtdm_safe_copy_to_user(rtdm_private_to_fd(ctx), arg, cmd_desc,
+				       sizeof(struct a4l_cmd_desc));
+		/* make sure we release the memory associated to the kernel */
+		cmd_desc->chan_descs = tmp;
+
+	}
+
+	if (ret != 0 || simul_flag == 1) {
+		a4l_free_cmddesc(cmd_desc);
+		rtdm_free(cmd_desc);
+	}
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c
new file mode 100644
index 0000000..69492b9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/device.c
@@ -0,0 +1,458 @@
+/*
+ * Analogy for Linux, device related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/string.h>
+#include <rtdm/analogy/device.h>
+
+#include "proc.h"
+
+static struct a4l_device a4l_devs[A4L_NB_DEVICES];
+
+/* --- Device tab management functions --- */
+
+void a4l_init_devs(void)
+{
+	int i;
+	memset(a4l_devs, 0, A4L_NB_DEVICES * sizeof(struct a4l_device));
+	for (i = 0; i < A4L_NB_DEVICES; i++) {
+		rtdm_lock_init(&a4l_devs[i].lock);
+		a4l_devs[i].transfer.irq_desc.irq = A4L_IRQ_UNUSED;
+	}
+}
+
+int a4l_check_cleanup_devs(void)
+{
+	int i, ret = 0;
+
+	for (i = 0; i < A4L_NB_DEVICES && ret == 0; i++)
+		if (test_bit(A4L_DEV_ATTACHED_NR, &a4l_devs[i].flags))
+			ret = -EBUSY;
+
+	return ret;
+}
+
+void a4l_set_dev(struct a4l_device_context *cxt)
+{
+	/* Retrieve the minor index */
+	const int minor = a4l_get_minor(cxt);
+	/* Fill the dev fields accordingly */
+	cxt->dev = &(a4l_devs[minor]);
+}
+
+/* --- Device tab proc section --- */
+
+#ifdef CONFIG_PROC_FS
+
+int a4l_rdproc_devs(struct seq_file *p, void *data)
+{
+	int i;
+
+	seq_printf(p, "--  Analogy devices --\n\n");
+	seq_printf(p, "| idx | status | driver\n");
+
+	for (i = 0; i < A4L_NB_DEVICES; i++) {
+		char *status, *name;
+
+		/* Gets the device's state */
+		if (a4l_devs[i].flags == 0) {
+			status = "Unused";
+			name = "No driver";
+		} else if (test_bit(A4L_DEV_ATTACHED_NR, &a4l_devs[i].flags)) {
+			status = "Linked";
+			name = a4l_devs[i].driver->driver_name;
+		} else {
+			status = "Broken";
+			name = "Unknown";
+		}
+
+		seq_printf(p, "|  %02d | %s | %s\n", i, status, name);
+	}
+	return 0;
+}
+
+static int a4l_proc_transfer_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, a4l_rdproc_transfer, pde_data(inode));
+}
+
+static const DEFINE_PROC_OPS(a4l_proc_transfer_ops,
+			a4l_proc_transfer_open,
+			single_release,
+			seq_read,
+			NULL);
+
+int a4l_proc_attach(struct a4l_device_context * cxt)
+{
+	int ret = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct proc_dir_entry *entry;
+	char *entry_name;
+
+	/* Allocate the buffer for the file name */
+	entry_name = rtdm_malloc(A4L_NAMELEN + 4);
+	if (entry_name == NULL) {
+		__a4l_err("a4l_proc_attach: failed to allocate buffer\n");
+		return -ENOMEM;
+	}
+
+	/* Create the proc file name */
+	ksformat(entry_name, A4L_NAMELEN + 4, "%02d-%s",
+		 a4l_get_minor(cxt), dev->driver->board_name);
+
+	/* Create the proc entry */
+	entry = proc_create_data(entry_name, 0444, a4l_proc_root,
+				 &a4l_proc_transfer_ops, &dev->transfer);
+	if (entry == NULL) {
+		__a4l_err("a4l_proc_attach: "
+			  "failed to create /proc/analogy/%s\n",
+			  entry_name);
+		ret = -ENOMEM;
+	}
+
+	rtdm_free(entry_name);
+
+	return ret;
+}
+
+void a4l_proc_detach(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	char *entry_name;
+
+	entry_name = rtdm_malloc(A4L_NAMELEN + 4);
+	if (entry_name == NULL) {
+		__a4l_err("a4l_proc_detach: "
+			  "failed to allocate filename buffer\n");
+		return;
+	}
+
+	ksformat(entry_name, A4L_NAMELEN + 4, "%02d-%s",
+		 a4l_get_minor(cxt), dev->driver->board_name);
+
+	remove_proc_entry(entry_name, a4l_proc_root);
+
+	rtdm_free(entry_name);
+}
+
+#else /* !CONFIG_PROC_FS */
+
+int a4l_proc_attach(struct a4l_device_context * cxt)
+{
+	return 0;
+}
+
+void a4l_proc_detach(struct a4l_device_context * cxt)
+{
+}
+
+#endif /* CONFIG_PROC_FS */
+
+/* --- Attach / detach section --- */
+
+int a4l_fill_lnkdesc(struct a4l_device_context * cxt,
+		     a4l_lnkdesc_t * link_arg, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret;
+	char *tmpname = NULL;
+	void *tmpopts = NULL;
+
+	ret = rtdm_safe_copy_from_user(fd,
+				       link_arg, arg, sizeof(a4l_lnkdesc_t));
+	if (ret != 0) {
+		__a4l_err("a4l_fill_lnkdesc: "
+			  "call1(copy_from_user) failed\n");
+		goto out_get_lnkdesc;
+	}
+
+	if (link_arg->bname_size != 0 && link_arg->bname != NULL) {
+		tmpname = rtdm_malloc(link_arg->bname_size + 1);
+		if (tmpname == NULL) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call1(alloc) failed\n");
+			ret = -ENOMEM;
+			goto out_get_lnkdesc;
+		}
+		tmpname[link_arg->bname_size] = 0;
+
+		ret = rtdm_safe_copy_from_user(fd,
+					       tmpname,
+					       link_arg->bname,
+					       link_arg->bname_size);
+		if (ret != 0) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call2(copy_from_user) failed\n");
+			goto out_get_lnkdesc;
+		}
+	} else {
+		__a4l_err("a4l_fill_lnkdesc: board name missing\n");
+		ret = -EINVAL;
+		goto out_get_lnkdesc;
+	}
+
+	if (link_arg->opts_size != 0 && link_arg->opts != NULL) {
+		tmpopts = rtdm_malloc(link_arg->opts_size);
+
+		if (tmpopts == NULL) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call2(alloc) failed\n");
+			ret = -ENOMEM;
+			goto out_get_lnkdesc;
+		}
+
+		ret = rtdm_safe_copy_from_user(fd,
+					       tmpopts,
+					       link_arg->opts,
+					       link_arg->opts_size);
+		if (ret != 0) {
+			__a4l_err("a4l_fill_lnkdesc: "
+				  "call3(copy_from_user) failed\n");
+			goto out_get_lnkdesc;
+		}
+	}
+
+	link_arg->bname = tmpname;
+	link_arg->opts = tmpopts;
+
+      out_get_lnkdesc:
+
+	if (tmpname == NULL) {
+		link_arg->bname = NULL;
+		link_arg->bname_size = 0;
+	}
+
+	if (tmpopts == NULL) {
+		link_arg->opts = NULL;
+		link_arg->opts_size = 0;
+	}
+
+	return ret;
+}
+
+void a4l_free_lnkdesc(struct a4l_device_context * cxt, a4l_lnkdesc_t * link_arg)
+{
+	if (link_arg->bname != NULL)
+		rtdm_free(link_arg->bname);
+
+	if (link_arg->opts != NULL)
+		rtdm_free(link_arg->opts);
+}
+
+int a4l_assign_driver(struct a4l_device_context * cxt,
+			 struct a4l_driver * drv, a4l_lnkdesc_t * link_arg)
+{
+	int ret = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	dev->driver = drv;
+	INIT_LIST_HEAD(&dev->subdvsq);
+
+	if (drv->privdata_size == 0)
+		__a4l_dbg(1, core_dbg, " warning! "
+				       "the field priv will not be usable\n");
+	else {
+		dev->priv = rtdm_malloc(drv->privdata_size);
+		if (dev->priv == NULL) {
+			__a4l_err("a4l_assign_driver: "
+				  "call(alloc) failed\n");
+			ret = -ENOMEM;
+			goto out_assign_driver;
+		}
+
+		/* Initialize the private data even if it not our role
+		   (the driver should do it), that may prevent hard to
+		   find bugs */
+		memset(dev->priv, 0, drv->privdata_size);
+	}
+
+	if ((ret = drv->attach(dev, link_arg)) != 0)
+		__a4l_err("a4l_assign_driver: "
+			  "call(drv->attach) failed (ret=%d)\n",
+		     ret);
+
+out_assign_driver:
+
+	/* Increments module's count */
+	if (ret == 0 && (!try_module_get(drv->owner))) {
+		__a4l_err("a4l_assign_driver: "
+			  "driver's owner field wrongly set\n");
+		ret = -ENODEV;
+	}
+
+	if (ret != 0 && dev->priv != NULL) {
+		rtdm_free(dev->priv);
+		dev->driver = NULL;
+	}
+
+	return ret;
+}
+
+int a4l_release_driver(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_subdevice *subd, *tmp;
+	int ret = 0;
+
+	if ((ret = dev->driver->detach(dev)) != 0)
+		goto out_release_driver;
+
+	module_put(dev->driver->owner);
+
+	/* In case, the driver developer did not free the subdevices */
+	if (!list_empty(&dev->subdvsq))
+		list_for_each_entry_safe(subd, tmp, &dev->subdvsq, list) {
+			list_del(&subd->list);
+			rtdm_free(subd);
+		}
+
+	/* Free the private field */
+	if (dev->priv)
+		rtdm_free(dev->priv);
+
+	dev->driver = NULL;
+
+out_release_driver:
+	return ret;
+}
+
+int a4l_device_attach(struct a4l_device_context * cxt, void *arg)
+{
+	int ret = 0;
+	a4l_lnkdesc_t link_arg;
+	struct a4l_driver *drv = NULL;
+
+	if ((ret = a4l_fill_lnkdesc(cxt, &link_arg, arg)) != 0)
+		goto out_attach;
+
+	if ((ret = a4l_lct_drv(link_arg.bname, &drv)) != 0) {
+		__a4l_err("a4l_device_attach: "
+			  "cannot find board name %s\n", link_arg.bname);
+		goto out_attach;
+	}
+
+	if ((ret = a4l_assign_driver(cxt, drv, &link_arg)) != 0)
+		goto out_attach;
+
+      out_attach:
+	a4l_free_lnkdesc(cxt, &link_arg);
+	return ret;
+}
+
+int a4l_device_detach(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	if (dev->driver == NULL) {
+		__a4l_err("a4l_device_detach: "
+			  "incoherent state, driver not reachable\n");
+		return -ENXIO;
+	}
+
+	return a4l_release_driver(cxt);
+}
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_devcfg(struct a4l_device_context * cxt, void *arg)
+{
+	int ret = 0;
+
+	if (rtdm_in_rt_context())
+		return -ENOSYS;
+
+	if (arg == NULL) {
+		/* Basic checking */
+		if (!test_bit(A4L_DEV_ATTACHED_NR, &(a4l_get_dev(cxt)->flags))) {
+			__a4l_err("a4l_ioctl_devcfg: "
+				  "free device, no driver to detach\n");
+			return -EINVAL;
+		}
+		/* Pre-cleanup of the transfer structure, we ensure
+		   that nothing is busy */
+		if ((ret = a4l_precleanup_transfer(cxt)) != 0)
+			return ret;
+		/* Remove the related proc file */
+		a4l_proc_detach(cxt);
+		/* Free the device and the driver from each other */
+		if ((ret = a4l_device_detach(cxt)) == 0)
+			clear_bit(A4L_DEV_ATTACHED_NR,
+				  &(a4l_get_dev(cxt)->flags));
+		/* Free the transfer structure and its related data */
+		if ((ret = a4l_cleanup_transfer(cxt)) != 0)
+			return ret;
+	} else {
+		/* Basic checking */
+		if (test_bit
+		    (A4L_DEV_ATTACHED_NR, &(a4l_get_dev(cxt)->flags))) {
+			__a4l_err("a4l_ioctl_devcfg: "
+				  "linked device, cannot attach more driver\n");
+			return -EINVAL;
+		}
+		/* Pre-initialization of the transfer structure */
+		a4l_presetup_transfer(cxt);
+		/* Link the device with the driver */
+		if ((ret = a4l_device_attach(cxt, arg)) != 0)
+			return ret;
+		/* Create the transfer structure and
+		   the related proc file */
+		if ((ret = a4l_setup_transfer(cxt)) != 0 ||
+		    (ret = a4l_proc_attach(cxt)) != 0)
+			a4l_device_detach(cxt);
+		else
+			set_bit(A4L_DEV_ATTACHED_NR,
+				&(a4l_get_dev(cxt)->flags));
+	}
+
+	return ret;
+}
+
+int a4l_ioctl_devinfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	a4l_dvinfo_t info;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	memset(&info, 0, sizeof(a4l_dvinfo_t));
+
+	if (test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		int len = (strlen(dev->driver->board_name) > A4L_NAMELEN) ?
+		    A4L_NAMELEN : strlen(dev->driver->board_name);
+
+		memcpy(info.board_name, dev->driver->board_name, len);
+
+		len = (strlen(dev->driver->driver_name) > A4L_NAMELEN) ?
+		    A4L_NAMELEN : strlen(dev->driver->driver_name);
+
+		memcpy(info.driver_name, dev->driver->driver_name, len);
+
+		info.nb_subd = dev->transfer.nb_subd;
+		/* TODO: for API compatibility issue, find the first
+		   read subdevice and write subdevice */
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg, &info, sizeof(a4l_dvinfo_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c
new file mode 100644
index 0000000..a857dea
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver.c
@@ -0,0 +1,104 @@
+/*
+ * Analogy for Linux, driver related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <rtdm/analogy/device.h>
+
+#include "proc.h"
+
+static LIST_HEAD(a4l_drvs);
+
+/* --- Driver list management functions --- */
+
+int a4l_lct_drv(char *pin, struct a4l_driver ** pio)
+{
+	struct list_head *this;
+	int ret = -EINVAL;
+
+	__a4l_dbg(1, core_dbg, "name=%s\n", pin);
+
+	/* Goes through the linked list so as to find
+	   a driver instance with the same name */
+	list_for_each(this, &a4l_drvs) {
+		struct a4l_driver *drv = list_entry(this, struct a4l_driver, list);
+
+		if (strcmp(drv->board_name, pin) == 0) {
+			/* The argument pio can be NULL
+			   if there is no need to retrieve the pointer */
+			if (pio != NULL)
+				*pio = drv;
+			ret = 0;
+			break;
+		}
+	}
+
+	return ret;
+}
+
+int a4l_register_drv(struct a4l_driver * drv)
+{
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	__a4l_dbg(1, core_dbg, "board name=%s\n", drv->board_name);
+
+	if (a4l_lct_drv(drv->board_name, NULL) != 0) {
+		list_add(&drv->list, &a4l_drvs);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+int a4l_unregister_drv(struct a4l_driver * drv)
+{
+	__a4l_dbg(1, core_dbg, "board name=%s\n", drv->board_name);
+
+	if (a4l_lct_drv(drv->board_name, NULL) == 0) {
+		/* Here, we consider the argument is pointing
+		   to a real driver struct (not a blank structure
+		   with only the name field properly set */
+		list_del(&drv->list);
+		return 0;
+	} else
+		return -EINVAL;
+}
+
+#ifdef CONFIG_PROC_FS
+
+/* --- Driver list proc section --- */
+
+int a4l_rdproc_drvs(struct seq_file *p, void *data)
+{
+	int i = 0;
+	struct list_head *this;
+
+	seq_printf(p, "--  Analogy drivers --\n\n");
+
+	seq_printf(p, "| idx | board name \n");
+
+	list_for_each(this, &a4l_drvs) {
+		struct a4l_driver *drv = list_entry(this, struct a4l_driver, list);
+		seq_printf(p, "|  %02d | %s \n", i++, drv->board_name);
+	}
+	return 0;
+}
+
+#endif /* CONFIG_PROC_FS */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c
new file mode 100644
index 0000000..7d2d883
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/driver_facilities.c
@@ -0,0 +1,608 @@
+/*
+ * Analogy for Linux, driver facilities
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <rtdm/analogy/device.h>
+
+/**
+ * @ingroup cobalt
+ * @defgroup analogy Analogy framework
+ * A RTDM-based interface for implementing DAQ card drivers
+ */
+
+/**
+ * @ingroup analogy
+ * @defgroup analogy_driver_facilities Driver API
+ * Programming interface provided to DAQ card drivers
+ */
+
+/* --- Driver section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_driver Driver management services
+ *
+ * Analogy driver registration / unregistration
+ *
+ * In a common Linux char driver, the developer has to register a fops
+ * structure filled with callbacks for read / write / mmap / ioctl
+ * operations.
+ *
+ * Analogy drivers do not have to implement read / write / mmap /
+ * ioctl functions, these procedures are implemented in the Analogy
+ * generic layer. Then, the transfers between user-space and
+ * kernel-space are already managed. Analogy drivers work with commands
+ * and instructions which are some kind of more dedicated read / write
+ * operations. And, instead of registering a fops structure, a Analogy
+ * driver must register some a4l_driver structure.
+ *
+ * @{
+ */
+
+/**
+ * @brief Register an Analogy driver
+ *
+ * After initialising a driver structure, the driver must be made
+ * available so as to be attached.
+ *
+ * @param[in] drv Driver descriptor structure
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_register_drv(struct a4l_driver * drv);
+EXPORT_SYMBOL_GPL(a4l_register_drv);
+
+/**
+ * @brief Unregister an Analogy driver
+ *
+ * This function removes the driver descriptor from the Analogy driver
+ * list. The driver cannot be attached anymore.
+ *
+ * @param[in] drv Driver descriptor structure
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_unregister_drv(struct a4l_driver * drv);
+EXPORT_SYMBOL_GPL(a4l_unregister_drv);
+
+/** @} */
+
+/* --- Subdevice section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_subdevice Subdevice management services
+ *
+ * Subdevice declaration in a driver
+ *
+ * The subdevice structure is the most complex one in the Analogy
+ * driver layer. It contains some description fields to fill and some
+ * callbacks to declare.
+ *
+ * The description fields are:
+ * - flags: to define the subdevice type and its capabilities;
+ * - chan_desc: to describe the channels which compose the subdevice;
+ * - rng_desc: to declare the usable ranges;
+ *
+ * The functions callbakcs are:
+ * - do_cmd() and do_cmdtest(): to performe asynchronous acquisitions
+ *   thanks to commands;
+ * - cancel(): to abort a working asynchronous acquisition;
+ * - munge(): to apply modifications on the data freshly acquired
+ *   during an asynchronous transfer. Warning: using this feature with
+ *   can significantly reduce the performances (if the munge operation
+ *   is complex, it will trigger high CPU charge and if the
+ *   acquisition device is DMA capable, many cache-misses and
+ *   cache-replaces will occur (the benefits of the DMA controller
+ *   will vanish);
+ * - trigger(): optionnaly to launch an asynchronous acquisition;
+ * - insn_read(), insn_write(), insn_bits(), insn_config(): to perform
+ *   synchronous acquisition operations.
+ *
+ * Once the subdevice is filled, it must be inserted into the driver
+ * structure thanks to a4l_add_subd().
+ *
+ * @{
+ */
+
+EXPORT_SYMBOL_GPL(a4l_range_bipolar10);
+EXPORT_SYMBOL_GPL(a4l_range_bipolar5);
+EXPORT_SYMBOL_GPL(a4l_range_unipolar10);
+EXPORT_SYMBOL_GPL(a4l_range_unipolar5);
+EXPORT_SYMBOL_GPL(a4l_range_unknown);
+EXPORT_SYMBOL_GPL(a4l_range_fake);
+
+/**
+ * @brief Allocate a subdevice descriptor
+ *
+ * This is a helper function so as to get a suitable subdevice
+ * descriptor
+ *
+ * @param[in] sizeof_priv Size of the subdevice's private data
+ * @param[in] setup Setup function to be called after the allocation
+ *
+ * @return the index with which the subdevice has been registered, in
+ * case of error a negative error code is returned.
+ *
+ */
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+				  void (*setup)(struct a4l_subdevice *));
+EXPORT_SYMBOL_GPL(a4l_alloc_subd);
+
+/**
+ * @brief Add a subdevice to the driver descriptor
+ *
+ * Once the driver descriptor structure is initialized, the function
+ * a4l_add_subd() must be used so to add some subdevices to the
+ * driver.
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the index with which the subdevice has been registered, in
+ * case of error a negative error code is returned.
+ *
+ */
+int a4l_add_subd(struct a4l_device *dev, struct a4l_subdevice *subd);
+EXPORT_SYMBOL_GPL(a4l_add_subd);
+
+/**
+ * @brief Get a pointer to the subdevice descriptor referenced by its
+ * registration index
+ *
+ * This function is scarcely useful as all the drivers callbacks get
+ * the related subdevice descriptor as first argument.
+ * This function is not optimized, it goes through a linked list to
+ * get the proper pointer. So it must not be used in real-time context
+ * but at initialization / cleanup time (attach / detach).
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] idx Subdevice index
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+struct a4l_subdevice *a4l_get_subd(struct a4l_device *dev, int idx);
+EXPORT_SYMBOL_GPL(a4l_get_subd);
+
+/** @} */
+
+/* --- Buffer section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_buffer Buffer management services
+ *
+ * Buffer management services
+ *
+ * The buffer is the key component of the Analogy infrastructure. It
+ * manages transfers between the user-space and the Analogy drivers
+ * thanks to generic functions which are described hereafter. Thanks
+ * to the buffer subsystem, the driver developer does not have to care
+ * about the way the user program retrieves or sends data.
+ *
+ * To write a classical char driver, the developer has to fill a fops
+ * structure so as to provide transfer operations to the user program
+ * (read, write, ioctl and mmap if need be).
+ *
+ * The Analogy infrastructure manages the whole interface with the
+ * userspace; the common read, write, mmap, etc. callbacks are generic
+ * Analogy functions. These functions manage (and perform, if need be)
+ * tranfers between the user-space and an asynchronous buffer thanks
+ * to lockless mechanisms.
+ *
+ * Consequently, the developer has to use the proper buffer functions
+ * in order to write / read acquired data into / from the asynchronous
+ * buffer.
+ *
+ * Here are listed the functions:
+ * - a4l_buf_prepare_(abs)put() and a4l_buf_commit_(abs)put()
+ * - a4l_buf_prepare_(abs)get() and a4l_buf_commit_(abs)get()
+ * - a4l_buf_put()
+ * - a4l_buf_get()
+ * - a4l_buf_evt().
+ *
+ * The functions count might seem high; however, the developer needs a
+ * few of them to write a driver. Having so many functions enables to
+ * manage any transfer cases:
+ * - If some DMA controller is available, there is no need to make the
+ *   driver copy the acquired data into the asynchronous buffer, the
+ *   DMA controller must directly trigger DMA shots into / from the
+ *   buffer. In that case, a function a4l_buf_prepare_*() must be used
+ *   so as to set up the DMA transfer and a function
+ *   a4l_buf_commit_*() has to be called to complete the transfer().
+ * - For DMA controllers which need to work with global counter (the
+ *   transfered data count since the beginning of the acquisition),
+ *   the functions a4l_buf_*_abs_*() have been made available.
+ * - If no DMA controller is available, the driver has to perform the
+ *   copy between the hardware component and the asynchronous
+ *   buffer. In such cases, the functions a4l_buf_get() and
+ *   a4l_buf_put() are useful.
+ *
+ * @{
+ */
+
+/**
+ * @brief Update the absolute count of data sent from the device to
+ * the buffer since the start of the acquisition and after the next
+ * DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(absg)et() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However, some
+ * pointers still have to be updated so as to monitor the tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred during the next
+ * DMA shot plus the data count which have been copied since the start
+ * of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_absput(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_absput);
+
+/**
+ * @brief Set the absolute count of data which was sent from the
+ * device to the buffer since the start of the acquisition and until
+ * the last DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count transferred to the buffer during
+ * the last DMA shot plus the data count which have been sent /
+ * retrieved since the beginning of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_absput(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_absput);
+
+/**
+ * @brief Set the count of data which is to be sent to the buffer at
+ * the next DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_put(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_put);
+
+/**
+ * @brief Set the count of data sent to the buffer during the last
+ * completed DMA shots
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The amount of data transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_put(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_put);
+
+/**
+ * @brief Copy some data from the device driver to the buffer
+ *
+ * The function a4l_buf_put() must copy data coming from some
+ * acquisition device to the Analogy buffer. This ring-buffer is an
+ * intermediate area between the device driver and the user-space
+ * program, which is supposed to recover the acquired data.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] bufdata The data buffer to copy into the Analogy buffer
+ * @param[in] count The amount of data to copy
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_put(struct a4l_subdevice *subd, void *bufdata, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_put);
+
+/**
+ * @brief Update the absolute count of data sent from the buffer to
+ * the device since the start of the acquisition and after the next
+ * DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(absg)et() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred during the next
+ * DMA shot plus the data count which have been copied since the start
+ * of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_absget(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_absget);
+
+/**
+ * @brief Set the absolute count of data which was sent from the
+ * buffer to the device since the start of the acquisition and until
+ * the last DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count transferred to the device during
+ * the last DMA shot plus the data count which have been sent since
+ * the beginning of the acquisition
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_absget(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_absget);
+
+/**
+ * @brief Set the count of data which is to be sent from the buffer to
+ * the device at the next DMA shot
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The data count to be transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_prepare_get(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_prepare_get);
+
+/**
+ * @brief Set the count of data sent from the buffer to the device
+ * during the last completed DMA shots
+ *
+ * The functions a4l_buf_prepare_(abs)put(),
+ * a4l_buf_commit_(abs)put(), a4l_buf_prepare_(abs)get() and
+ * a4l_buf_commit_(abs)get() have been made available for DMA
+ * transfers. In such situations, no data copy is needed between the
+ * Analogy buffer and the device as some DMA controller is in charge
+ * of performing data shots from / to the Analogy buffer. However,
+ * some pointers still have to be updated so as to monitor the
+ * tranfers.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] count The amount of data transferred
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_commit_get(struct a4l_subdevice *subd, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_commit_get);
+
+/**
+ * @brief Copy some data from the buffer to the device driver
+ *
+ * The function a4l_buf_get() must copy data coming from the Analogy
+ * buffer to some acquisition device. This ring-buffer is an
+ * intermediate area between the device driver and the user-space
+ * program, which is supposed to provide the data to send to the
+ * device.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] bufdata The data buffer to copy into the Analogy buffer
+ * @param[in] count The amount of data to copy
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_get(struct a4l_subdevice *subd, void *bufdata, unsigned long count);
+EXPORT_SYMBOL_GPL(a4l_buf_get);
+
+/**
+ * @brief Signal some event(s) to a user-space program involved in
+ * some read / write operation
+ *
+ * The function a4l_buf_evt() is useful in many cases:
+ * - To wake-up a process waiting for some data to read.
+ * - To wake-up a process waiting for some data to write.
+ * - To notify the user-process an error has occured during the
+ *   acquistion.
+ *
+ * @param[in] subd Subdevice descriptor structure
+ * @param[in] evts Some specific event to notify:
+ * - A4L_BUF_ERROR to indicate some error has occured during the
+ *   transfer
+ * - A4L_BUF_EOA to indicate the acquisition is complete (this
+ *   event is automatically set, it should not be used).
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_buf_evt(struct a4l_subdevice *subd, unsigned long evts);
+EXPORT_SYMBOL_GPL(a4l_buf_evt);
+
+/**
+ * @brief Get the data amount available in the Analogy buffer
+ *
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the amount of data available in the Analogy buffer.
+ *
+ */
+unsigned long a4l_buf_count(struct a4l_subdevice *subd);
+EXPORT_SYMBOL_GPL(a4l_buf_count);
+
+#ifdef DOXYGEN_CPP		/* Only used for doxygen doc generation */
+
+/**
+ * @brief Get the current Analogy command descriptor
+ *
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the command descriptor.
+ *
+ */
+struct a4l_cmd_desc *a4l_get_cmd(struct a4l_subdevice * subd);
+
+#endif /* DOXYGEN_CPP */
+
+/**
+ * @brief Get the channel index according to its type
+ *
+ * @param[in] subd Subdevice descriptor structure
+ *
+ * @return the channel index.
+ *
+ */
+int a4l_get_chan(struct a4l_subdevice *subd);
+EXPORT_SYMBOL_GPL(a4l_get_chan);
+
+/** @} */
+
+/* --- IRQ handling section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_irq Interrupt management services
+ * @{
+ */
+
+/**
+ * @brief Get the interrupt number in use for a specific device
+ *
+ * @param[in] dev Device descriptor structure
+ *
+ * @return the line number used or A4L_IRQ_UNUSED if no interrupt
+ * is registered.
+ *
+ */
+unsigned int a4l_get_irq(struct a4l_device * dev);
+EXPORT_SYMBOL_GPL(a4l_get_irq);
+
+/**
+ * @brief Register an interrupt handler for a specific device
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] irq Line number of the addressed IRQ
+ * @param[in] handler Interrupt handler
+ * @param[in] flags Registration flags:
+ * - RTDM_IRQTYPE_SHARED: enable IRQ-sharing with other drivers
+ *   (Warning: real-time drivers and non-real-time drivers cannot
+ *   share an interrupt line).
+ * - RTDM_IRQTYPE_EDGE: mark IRQ as edge-triggered (Warning: this flag
+ *   is meaningless in RTDM-less context).
+ * - A4L_IRQ_DISABLED: keep IRQ disabled when calling the action
+ *   handler (Warning: this flag is ignored in RTDM-enabled
+ *   configuration).
+ * @param[in] cookie Pointer to be passed to the interrupt handler on
+ * invocation
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_request_irq(struct a4l_device * dev,
+		       unsigned int irq,
+		       a4l_irq_hdlr_t handler,
+		       unsigned long flags, void *cookie);
+EXPORT_SYMBOL_GPL(a4l_request_irq);
+
+/**
+ * @brief Release an interrupt handler for a specific device
+ *
+ * @param[in] dev Device descriptor structure
+ * @param[in] irq Line number of the addressed IRQ
+ *
+ * @return 0 on success, otherwise negative error code.
+ *
+ */
+int a4l_free_irq(struct a4l_device * dev, unsigned int irq);
+EXPORT_SYMBOL_GPL(a4l_free_irq);
+
+/** @} */
+
+/* --- Misc section --- */
+
+/**
+ * @ingroup analogy_driver_facilities
+ * @defgroup analogy_misc Misc services
+ * @{
+ */
+
+/**
+ * @brief Get the absolute time in nanoseconds
+ *
+ * @return the absolute time expressed in nanoseconds
+ *
+ */
+unsigned long long a4l_get_time(void);
+EXPORT_SYMBOL_GPL(a4l_get_time);
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c
new file mode 100644
index 0000000..1cbdb14
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/instruction.c
@@ -0,0 +1,427 @@
+/*
+ * Analogy for Linux, instruction related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/ioport.h>
+#include <linux/mman.h>
+#include <asm/div64.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+int a4l_do_insn_gettime(struct a4l_kernel_instruction * dsc)
+{
+	nanosecs_abs_t ns;
+	uint32_t ns2;
+
+	unsigned int *data = (unsigned int *)dsc->data;
+
+	/* Basic checkings */
+	if (dsc->data_size != 2 * sizeof(unsigned int)) {
+		__a4l_err("a4l_do_insn_gettime: data size should be 2\n");
+		return -EINVAL;
+	}
+
+	/* Get a timestamp */
+	ns = a4l_get_time();
+
+	/* Perform the conversion */
+	ns2 = do_div(ns, 1000000000);
+	data[0] = (unsigned int) ns;
+	data[1] = (unsigned int) ns2 / 1000;
+
+	return 0;
+}
+
+int a4l_do_insn_wait(struct a4l_kernel_instruction * dsc)
+{
+	unsigned int us;
+	unsigned int *data = (unsigned int *)dsc->data;
+
+	/* Basic checkings */
+	if (dsc->data_size != sizeof(unsigned int)) {
+		__a4l_err("a4l_do_insn_wait: data size should be 1\n");
+		return -EINVAL;
+	}
+
+	if (data[0] > A4L_INSN_WAIT_MAX) {
+		__a4l_err("a4l_do_insn_wait: wait duration is out of range\n");
+		return -EINVAL;
+	}
+
+	/* As we use (a4l_)udelay, we have to convert the delay into
+	   microseconds */
+	us = data[0] / 1000;
+
+	/* At least, the delay is rounded up to 1 microsecond */
+	if (us == 0)
+		us = 1;
+
+	/* Performs the busy waiting */
+	a4l_udelay(us);
+
+	return 0;
+}
+
+int a4l_do_insn_trig(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	struct a4l_subdevice *subd;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	unsigned int trignum;
+	unsigned int *data = (unsigned int*)dsc->data;
+
+	/* Basic checkings */
+	if (dsc->data_size > 1) {
+		__a4l_err("a4l_do_insn_trig: data size should not be > 1\n");
+		return -EINVAL;
+	}
+
+	trignum = (dsc->data_size == sizeof(unsigned int)) ? data[0] : 0;
+
+	if (dsc->idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_do_insn_trig: "
+			  "subdevice index is out of range\n");
+		return -EINVAL;
+	}
+
+	subd = dev->transfer.subds[dsc->idx_subd];
+
+	/* Checks that the concerned subdevice is trigger-compliant */
+	if ((subd->flags & A4L_SUBD_CMD) == 0 || subd->trigger == NULL) {
+		__a4l_err("a4l_do_insn_trig: subdevice does not support "
+			  "triggering or asynchronous acquisition\n");
+		return -EINVAL;
+	}
+
+	/* Performs the trigger */
+	return subd->trigger(subd, trignum);
+}
+
+int a4l_fill_insndsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+	void *tmp_data = NULL;
+
+	ret = rtdm_safe_copy_from_user(fd,
+				       dsc, arg, sizeof(a4l_insn_t));
+	if (ret != 0)
+		goto out_insndsc;
+
+	if (dsc->data_size != 0 && dsc->data == NULL) {
+		__a4l_err("a4l_fill_insndsc: no data pointer specified\n");
+		ret = -EINVAL;
+		goto out_insndsc;
+	}
+
+	if (dsc->data_size != 0 && dsc->data != NULL) {
+		tmp_data = rtdm_malloc(dsc->data_size);
+		if (tmp_data == NULL) {
+			ret = -ENOMEM;
+			goto out_insndsc;
+		}
+
+		if ((dsc->type & A4L_INSN_MASK_WRITE) != 0) {
+			ret = rtdm_safe_copy_from_user(fd,
+						       tmp_data, dsc->data,
+						       dsc->data_size);
+			if (ret < 0)
+				goto out_insndsc;
+		}
+	}
+
+	dsc->__udata = dsc->data;
+	dsc->data = tmp_data;
+
+out_insndsc:
+
+	if (ret != 0 && tmp_data != NULL)
+		rtdm_free(tmp_data);
+
+	return ret;
+}
+
+int a4l_free_insndsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+
+	if ((dsc->type & A4L_INSN_MASK_READ) != 0)
+		ret = rtdm_safe_copy_to_user(fd,
+					     dsc->__udata,
+					     dsc->data, dsc->data_size);
+
+	if (dsc->data != NULL)
+		rtdm_free(dsc->data);
+
+	return ret;
+}
+
+int a4l_do_special_insn(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	int ret = 0;
+
+	switch (dsc->type) {
+	case A4L_INSN_GTOD:
+		ret = a4l_do_insn_gettime(dsc);
+		break;
+	case A4L_INSN_WAIT:
+		ret = a4l_do_insn_wait(dsc);
+		break;
+	case A4L_INSN_INTTRIG:
+		ret = a4l_do_insn_trig(cxt, dsc);
+		break;
+	default:
+		__a4l_err("a4l_do_special_insn: "
+			  "incoherent instruction code\n");
+		return -EINVAL;
+	}
+
+	if (ret < 0)
+		__a4l_err("a4l_do_special_insn: "
+			  "execution of the instruction failed (err=%d)\n",
+			  ret);
+
+	return ret;
+}
+
+int a4l_do_insn(struct a4l_device_context * cxt, struct a4l_kernel_instruction * dsc)
+{
+	int ret = 0;
+	struct a4l_subdevice *subd;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	int (*hdlr) (struct a4l_subdevice *, struct a4l_kernel_instruction *) = NULL;
+
+	/* Checks the subdevice index */
+	if (dsc->idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_do_insn: "
+			  "subdevice index out of range (idx=%d)\n",
+			  dsc->idx_subd);
+		return -EINVAL;
+	}
+
+	/* Recovers pointers on the proper subdevice */
+	subd = dev->transfer.subds[dsc->idx_subd];
+
+	/* Checks the subdevice's characteristics */
+	if ((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED) {
+		__a4l_err("a4l_do_insn: wrong subdevice selected\n");
+		return -EINVAL;
+	}
+
+	/* Checks the channel descriptor */
+	if ((subd->flags & A4L_SUBD_TYPES) != A4L_SUBD_CALIB) {
+		ret = a4l_check_chanlist(dev->transfer.subds[dsc->idx_subd],
+					 1, &dsc->chan_desc);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* Choose the proper handler, we can check the pointer because
+	   the subdevice was memset to 0 at allocation time */
+	switch (dsc->type) {
+	case A4L_INSN_READ:
+		hdlr = subd->insn_read;
+		break;
+	case A4L_INSN_WRITE:
+		hdlr = subd->insn_write;
+		break;
+	case A4L_INSN_BITS:
+		hdlr = subd->insn_bits;
+		break;
+	case A4L_INSN_CONFIG:
+		hdlr = subd->insn_config;
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	/* We check the instruction type */
+	if (ret < 0)
+		return ret;
+
+	/* We check whether a handler is available */
+	if (hdlr == NULL)
+		return -ENOSYS;
+
+	/* Prevents the subdevice from being used during
+	   the following operations */
+	if (test_and_set_bit(A4L_SUBD_BUSY_NR, &subd->status)) {
+		ret = -EBUSY;
+		goto out_do_insn;
+	}
+
+	/* Let's the driver-specific code perform the instruction */
+	ret = hdlr(subd, dsc);
+
+	if (ret < 0)
+		__a4l_err("a4l_do_insn: "
+			  "execution of the instruction failed (err=%d)\n",
+			  ret);
+
+out_do_insn:
+
+	/* Releases the subdevice from its reserved state */
+	clear_bit(A4L_SUBD_BUSY_NR, &subd->status);
+
+	return ret;
+}
+
+int a4l_ioctl_insn(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int ret = 0;
+	struct a4l_kernel_instruction insn;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_insn: unattached device\n");
+		return -EINVAL;
+	}
+
+	/* Recovers the instruction descriptor */
+	ret = a4l_fill_insndsc(cxt, &insn, arg);
+	if (ret != 0)
+		goto err_ioctl_insn;
+
+	/* Performs the instruction */
+	if ((insn.type & A4L_INSN_MASK_SPECIAL) != 0)
+		ret = a4l_do_special_insn(cxt, &insn);
+	else
+		ret = a4l_do_insn(cxt, &insn);
+
+	if (ret < 0)
+		goto err_ioctl_insn;
+
+	/* Frees the used memory and sends back some
+	   data, if need be */
+	ret = a4l_free_insndsc(cxt, &insn);
+
+	return ret;
+
+err_ioctl_insn:
+	a4l_free_insndsc(cxt, &insn);
+	return ret;
+}
+
+int a4l_fill_ilstdsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction_list * dsc, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+
+	dsc->insns = NULL;
+
+	/* Recovers the structure from user space */
+	ret = rtdm_safe_copy_from_user(fd,
+				       dsc, arg, sizeof(a4l_insnlst_t));
+	if (ret < 0)
+		return ret;
+
+	/* Some basic checking */
+	if (dsc->count == 0) {
+		__a4l_err("a4l_fill_ilstdsc: instruction list's count is 0\n");
+		return -EINVAL;
+	}
+
+	/* Keeps the user pointer in an opaque field */
+	dsc->__uinsns = (a4l_insn_t *)dsc->insns;
+
+	dsc->insns = rtdm_malloc(dsc->count * sizeof(struct a4l_kernel_instruction));
+	if (dsc->insns == NULL)
+		return -ENOMEM;
+
+	/* Recovers the instructions, one by one. This part is not
+	   optimized */
+	for (i = 0; i < dsc->count && ret == 0; i++)
+		ret = a4l_fill_insndsc(cxt,
+				       &(dsc->insns[i]),
+				       &(dsc->__uinsns[i]));
+
+	/* In case of error, frees the allocated memory */
+	if (ret < 0 && dsc->insns != NULL)
+		rtdm_free(dsc->insns);
+
+	return ret;
+}
+
+int a4l_free_ilstdsc(struct a4l_device_context * cxt, struct a4l_kernel_instruction_list * dsc)
+{
+	int i, ret = 0;
+
+	if (dsc->insns != NULL) {
+
+		for (i = 0; i < dsc->count && ret == 0; i++)
+			ret = a4l_free_insndsc(cxt, &(dsc->insns[i]));
+
+		while (i < dsc->count) {
+			a4l_free_insndsc(cxt, &(dsc->insns[i]));
+			i++;
+		}
+
+		rtdm_free(dsc->insns);
+	}
+
+	return ret;
+}
+
+/* This function is not optimized in terms of memory footprint and
+   CPU charge; however, the whole analogy instruction system was not
+   designed for performance issues */
+int a4l_ioctl_insnlist(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+	struct a4l_kernel_instruction_list ilst;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_insnlist: unattached device\n");
+		return -EINVAL;
+	}
+
+	if ((ret = a4l_fill_ilstdsc(cxt, &ilst, arg)) < 0)
+		return ret;
+
+	/* Performs the instructions */
+	for (i = 0; i < ilst.count && ret == 0; i++) {
+		if ((ilst.insns[i].type & A4L_INSN_MASK_SPECIAL) != 0)
+			ret = a4l_do_special_insn(cxt, &ilst.insns[i]);
+		else
+			ret = a4l_do_insn(cxt, &ilst.insns[i]);
+	}
+
+	if (ret < 0)
+		goto err_ioctl_ilst;
+
+	return a4l_free_ilstdsc(cxt, &ilst);
+
+err_ioctl_ilst:
+	a4l_free_ilstdsc(cxt, &ilst);
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c
new file mode 100644
index 0000000..1abe250
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.c
@@ -0,0 +1,331 @@
+/*
+ * Analogy subdevice driver for 8255 chip
+ * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <rtdm/analogy/device.h>
+
+#include "8255.h"
+
+#define CALLBACK_ARG		(((subd_8255_t *)subd->priv)->cb_arg)
+#define CALLBACK_FUNC		(((subd_8255_t *)subd->priv)->cb_func)
+
+/* Channels descriptor */
+static struct a4l_channels_desc chandesc_8255 = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 24,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, sizeof(sampl_t)},
+	},
+};
+
+/* Command options mask */
+static struct a4l_cmd_desc cmd_mask_8255 = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW,
+	.scan_begin_src = TRIG_EXT,
+	.convert_src = TRIG_FOLLOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+void a4l_subdev_8255_interrupt(struct a4l_subdevice *subd)
+{
+	sampl_t d;
+
+	/* Retrieve the sample... */
+	d = CALLBACK_FUNC(0, _8255_DATA, 0, CALLBACK_ARG);
+	d |= (CALLBACK_FUNC(0, _8255_DATA + 1, 0, CALLBACK_ARG) << 8);
+
+	/* ...and send it */
+	a4l_buf_put(subd, &d, sizeof(sampl_t));
+
+	a4l_buf_evt(subd, 0);
+}
+EXPORT_SYMBOL_GPL(a4l_subdev_8255_interrupt);
+
+static int subdev_8255_cb(int dir, int port, int data, unsigned long arg)
+{
+	unsigned long iobase = arg;
+
+	if (dir) {
+		outb(data, iobase + port);
+		return 0;
+	} else {
+		return inb(iobase + port);
+	}
+}
+
+static void do_config(struct a4l_subdevice *subd)
+{
+	int config;
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+
+	config = CR_CW;
+	/* 1 in io_bits indicates output, 1 in config indicates input */
+	if (!(subd_8255->io_bits & 0x0000ff))
+		config |= CR_A_IO;
+	if (!(subd_8255->io_bits & 0x00ff00))
+		config |= CR_B_IO;
+	if (!(subd_8255->io_bits & 0x0f0000))
+		config |= CR_C_LO_IO;
+	if (!(subd_8255->io_bits & 0xf00000))
+		config |= CR_C_HI_IO;
+	CALLBACK_FUNC(1, _8255_CR, config, CALLBACK_ARG);
+}
+
+int subd_8255_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	/* FIXME */
+	return 0;
+}
+
+int subd_8255_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	if (cmd->start_arg != 0) {
+		cmd->start_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->scan_begin_arg != 0) {
+		cmd->scan_begin_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->convert_arg != 0) {
+		cmd->convert_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->scan_end_arg != 1) {
+		cmd->scan_end_arg = 1;
+		return -EINVAL;
+	}
+	if (cmd->stop_arg != 0) {
+		cmd->stop_arg = 0;
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+void subd_8255_cancel(struct a4l_subdevice *subd)
+{
+	/* FIXME */
+}
+
+int subd_8255_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+	uint32_t *data = (uint32_t *)insn->data;
+
+	if (data[0]) {
+
+		subd_8255->status &= ~data[0];
+		subd_8255->status |= (data[0] & data[1]);
+
+		if (data[0] & 0xff)
+			CALLBACK_FUNC(1, _8255_DATA,
+				      subd_8255->status & 0xff, CALLBACK_ARG);
+		if (data[0] & 0xff00)
+			CALLBACK_FUNC(1, _8255_DATA + 1,
+				      (subd_8255->status >> 8) & 0xff,
+				      CALLBACK_ARG);
+		if (data[0] & 0xff0000)
+			CALLBACK_FUNC(1, _8255_DATA + 2,
+				      (subd_8255->status >> 16) & 0xff,
+				      CALLBACK_ARG);
+	}
+
+	data[1] = CALLBACK_FUNC(0, _8255_DATA, 0, CALLBACK_ARG);
+	data[1] |= (CALLBACK_FUNC(0, _8255_DATA + 1, 0, CALLBACK_ARG) << 8);
+	data[1] |= (CALLBACK_FUNC(0, _8255_DATA + 2, 0, CALLBACK_ARG) << 16);
+
+	return 0;
+}
+
+int subd_8255_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	unsigned int mask;
+	unsigned int bits;
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	mask = 1 << CR_CHAN(insn->chan_desc);
+
+	if (mask & 0x0000ff) {
+		bits = 0x0000ff;
+	} else if (mask & 0x00ff00) {
+		bits = 0x00ff00;
+	} else if (mask & 0x0f0000) {
+		bits = 0x0f0000;
+	} else {
+		bits = 0xf00000;
+	}
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		subd_8255->io_bits &= ~bits;
+		break;
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		subd_8255->io_bits |= bits;
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (subd_8255->io_bits & bits) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	do_config(subd);
+
+	return 0;
+}
+
+void a4l_subdev_8255_init(struct a4l_subdevice *subd)
+{
+	subd_8255_t *subd_8255 = (subd_8255_t *)subd->priv;
+	/* Initializes the subdevice structure */
+	memset(subd, 0, sizeof(struct a4l_subdevice));
+
+	/* Subdevice filling part */
+
+	subd->flags = A4L_SUBD_DIO;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->chan_desc = &chandesc_8255;
+	subd->insn_bits = subd_8255_insn_bits;
+	subd->insn_config = subd_8255_insn_config;
+
+	if(subd_8255->have_irq) {
+		subd->cmd_mask = &cmd_mask_8255;
+		subd->do_cmdtest = subd_8255_cmdtest;
+		subd->do_cmd = subd_8255_cmd;
+		subd->cancel = subd_8255_cancel;
+	}
+
+	/* 8255 setting part */
+
+	if(CALLBACK_FUNC == NULL)
+		CALLBACK_FUNC = subdev_8255_cb;
+
+	do_config(subd);
+}
+EXPORT_SYMBOL_GPL(a4l_subdev_8255_init);
+
+/*
+
+  Start of the 8255 standalone device
+
+*/
+
+static int dev_8255_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	unsigned long *addrs;
+	int i, err = 0;
+
+	if(arg->opts == NULL || arg->opts_size == 0) {
+		a4l_err(dev,
+			"dev_8255_attach: unable to detect any 8255 chip, "
+			"chips addresses must be passed as attach arguments\n");
+		return -EINVAL;
+	}
+
+	addrs = (unsigned long*) arg->opts;
+
+	for(i = 0; i < (arg->opts_size / sizeof(unsigned long)); i++) {
+		struct a4l_subdevice * subd;
+		subd_8255_t *subd_8255;
+
+		subd = a4l_alloc_subd(sizeof(subd_8255_t), NULL);
+		if(subd == NULL) {
+			a4l_err(dev,
+				"dev_8255_attach: "
+				"unable to allocate subdevice\n");
+			/* There is no need to free previously
+			   allocated structure(s), the analogy layer will
+			   do it for us */
+			err = -ENOMEM;
+			goto out_attach;
+		}
+
+		memset(subd, 0, sizeof(struct a4l_subdevice));
+		memset(subd->priv, 0, sizeof(subd_8255_t));
+
+		subd_8255 = (subd_8255_t *)subd->priv;
+
+		if(request_region(addrs[i], _8255_SIZE, "Analogy 8255") == 0) {
+			subd->flags = A4L_SUBD_UNUSED;
+			a4l_warn(dev,
+				 "dev_8255_attach: "
+				 "I/O port conflict at 0x%lx\n", addrs[i]);
+		}
+		else {
+			subd_8255->cb_arg = addrs[i];
+			a4l_subdev_8255_init(subd);
+		}
+
+		err = a4l_add_subd(dev, subd);
+		if(err < 0) {
+			a4l_err(dev,
+				"dev_8255_attach: "
+				"a4l_add_subd() failed (err=%d)\n", err);
+			goto out_attach;
+		}
+	}
+
+out_attach:
+	return err;
+}
+
+static int dev_8255_detach(struct a4l_device *dev)
+{
+	struct a4l_subdevice *subd;
+	int i = 0;
+
+	while((subd = a4l_get_subd(dev, i++)) != NULL) {
+		subd_8255_t *subd_8255 = (subd_8255_t *) subd->priv;
+		if(subd_8255 != NULL && subd_8255->cb_arg != 0)
+			release_region(subd_8255->cb_arg, _8255_SIZE);
+	}
+
+	return 0;
+}
+
+static struct a4l_driver drv_8255 = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_8255",
+	.driver_name = "8255",
+	.attach = dev_8255_attach,
+	.detach = dev_8255_detach,
+	.privdata_size = 0,
+};
+
+static int __init drv_8255_init(void)
+{
+	return a4l_register_drv(&drv_8255);
+}
+
+static void __exit drv_8255_cleanup(void)
+{
+	a4l_unregister_drv(&drv_8255);
+}
+MODULE_DESCRIPTION("Analogy driver for 8255 chip");
+MODULE_LICENSE("GPL");
+
+module_init(drv_8255_init);
+module_exit(drv_8255_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h
new file mode 100644
index 0000000..31b1ed8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/8255.h
@@ -0,0 +1,60 @@
+/*
+ * Hardware driver for 8255 chip
+ * @note Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef __ANALOGY_8255_H__
+#define __ANALOGY_8255_H__
+
+#include <rtdm/analogy/device.h>
+
+typedef int (*a4l_8255_cb_t)(int, int, int, unsigned long);
+
+typedef struct subd_8255_struct {
+	unsigned long cb_arg;
+	a4l_8255_cb_t cb_func;
+	unsigned int status;
+	int have_irq;
+	int io_bits;
+} subd_8255_t;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_8255) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_8255_MODULE))
+
+#define _8255_SIZE 4
+
+#define _8255_DATA 0
+#define _8255_CR 3
+
+#define CR_C_LO_IO	0x01
+#define CR_B_IO		0x02
+#define CR_B_MODE	0x04
+#define CR_C_HI_IO	0x08
+#define CR_A_IO		0x10
+#define CR_A_MODE(a)	((a)<<5)
+#define CR_CW		0x80
+
+void a4l_subdev_8255_init(struct a4l_subdevice *subd);
+void a4l_subdev_8255_interrupt(struct a4l_subdevice *subd);
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_8255 */
+
+#define a4l_subdev_8255_init(x)		do { } while(0)
+#define a4l_subdev_8255_interrupt(x)	do { } while(0)
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_8255 */
+
+#endif /* !__ANALOGY_8255_H__ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig
new file mode 100644
index 0000000..6907c83
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Kconfig
@@ -0,0 +1,10 @@
+
+config XENO_DRIVERS_ANALOGY_8255
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "8255 driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_PARPORT
+	depends on XENO_DRIVERS_ANALOGY && X86
+	tristate "Standard parallel port driver"
+	default n
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile
new file mode 100644
index 0000000..94beedc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/Makefile
@@ -0,0 +1,10 @@
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_8255) += analogy_8255.o
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_PARPORT) += analogy_parport.o
+
+analogy_8255-y := 8255.o
+
+analogy_parport-y := parport.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c
new file mode 100644
index 0000000..eb07434
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/intel/parport.c
@@ -0,0 +1,457 @@
+/*
+ * Analogy driver for standard parallel port
+ * Copyright (C) 1998,2001 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+   A cheap and easy way to get a few more digital I/O lines.  Steal
+   additional parallel ports from old computers or your neighbors'
+   computers.
+
+   Attach options list:
+   0: I/O port base for the parallel port.
+   1: IRQ
+
+   Parallel Port Lines:
+
+   pin     subdev  chan    aka
+   ---     ------  ----    ---
+   1       2       0       strobe
+   2       0       0       data 0
+   3       0       1       data 1
+   4       0       2       data 2
+   5       0       3       data 3
+   6       0       4       data 4
+   7       0       5       data 5
+   8       0       6       data 6
+   9       0       7       data 7
+   10      1       3       acknowledge
+   11      1       4       busy
+   12      1       2       output
+   13      1       1       printer selected
+   14      2       1       auto LF
+   15      1       0       error
+   16      2       2       init
+   17      2       3       select printer
+   18-25   ground
+
+   Notes:
+
+   Subdevices 0 is digital I/O, subdevice 1 is digital input, and
+   subdevice 2 is digital output.  Unlike other Analogy devices,
+   subdevice 0 defaults to output.
+
+   Pins 13 and 14 are inverted once by Analogy and once by the
+   hardware, thus cancelling the effect.
+
+   Pin 1 is a strobe, thus acts like one.  There's no way in software
+   to change this, at least on a standard parallel port.
+
+   Subdevice 3 pretends to be a digital input subdevice, but it always
+   returns 0 when read.  However, if you run a command with
+   scan_begin_src=TRIG_EXT, it uses pin 10 as a external triggering
+   pin, which can be used to wake up tasks.
+
+   see http://www.beyondlogic.org/ for information.
+   or http://www.linux-magazin.de/ausgabe/1999/10/IO/io.html
+*/
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>		/* For inb/outb */
+#include <rtdm/analogy/device.h>
+
+#define PARPORT_SIZE 3
+
+#define PARPORT_A 0
+#define PARPORT_B 1
+#define PARPORT_C 2
+
+#define DEFAULT_ADDRESS 0x378
+#define DEFAULT_IRQ 7
+
+typedef struct parport_subd_priv {
+	unsigned long io_bits;
+} parport_spriv_t;
+
+typedef struct parport_priv {
+	unsigned long io_base;
+	unsigned int a_data;
+	unsigned int c_data;
+	int enable_irq;
+} parport_priv_t;
+
+#define devpriv ((parport_priv_t *)(dev->priv))
+
+static int parport_insn_a(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (data[0]) {
+		devpriv->a_data &= ~data[0];
+		devpriv->a_data |= (data[0] & data[1]);
+
+		outb(devpriv->a_data, devpriv->io_base + PARPORT_A);
+	}
+
+	data[1] = inb(devpriv->io_base + PARPORT_A);
+
+	return 0;
+}
+
+static int parport_insn_config_a(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	parport_spriv_t *spriv = (parport_spriv_t *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	/* No need to check the channel descriptor; the input / output
+	   setting is global for all channels */
+
+	switch (data[0]) {
+
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		spriv->io_bits = 0xff;
+		devpriv->c_data &= ~(1 << 5);
+		break;
+
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		spriv->io_bits = 0;
+		devpriv->c_data |= (1 << 5);
+		break;
+
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (spriv->io_bits == 0xff) ?
+			A4L_OUTPUT: A4L_INPUT;
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	return 0;
+}
+
+static int parport_insn_b(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (data[0]) {
+		/* should writes be ignored? */
+	}
+
+	data[1] = (inb(devpriv->io_base + PARPORT_B) >> 3);
+
+	return 0;
+}
+
+static int parport_insn_c(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] &= 0x0f;
+	if (data[0]) {
+		devpriv->c_data &= ~data[0];
+		devpriv->c_data |= (data[0] & data[1]);
+
+		outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+	}
+
+	data[1] = devpriv->c_data & 0xf;
+
+	return 2;
+}
+
+static int parport_intr_insn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (insn->data_size < sizeof(uint8_t))
+		return -EINVAL;
+
+	data[1] = 0;
+	return 0;
+}
+
+static struct a4l_cmd_desc parport_intr_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW,
+	.scan_begin_src = TRIG_EXT,
+	.convert_src = TRIG_FOLLOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+static int parport_intr_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc * cmd)
+{
+
+	if (cmd->start_arg != 0) {
+		return -EINVAL;
+	}
+	if (cmd->scan_begin_arg != 0) {
+		return -EINVAL;
+	}
+	if (cmd->convert_arg != 0) {
+		return -EINVAL;
+	}
+	if (cmd->scan_end_arg != 1) {
+		return -EINVAL;
+	}
+	if (cmd->stop_arg != 0) {
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int parport_intr_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	devpriv->c_data |= 0x10;
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	devpriv->enable_irq = 1;
+
+	return 0;
+}
+
+static void parport_intr_cancel(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	a4l_info(dev, "cancel in progress\n");
+
+	devpriv->c_data &= ~0x10;
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	devpriv->enable_irq = 0;
+}
+
+static int parport_interrupt(unsigned int irq, void *d)
+{
+	struct a4l_device *dev = d;
+	struct a4l_subdevice *subd = a4l_get_subd(dev, 3);
+
+	if (!devpriv->enable_irq) {
+		a4l_err(dev, "parport_interrupt: bogus irq, ignored\n");
+		return IRQ_NONE;
+	}
+
+	a4l_buf_put(subd, 0, sizeof(unsigned int));
+	a4l_buf_evt(subd, 0);
+
+	return 0;
+}
+
+
+/* --- Channels descriptor --- */
+
+static struct a4l_channels_desc parport_chan_desc_a = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc parport_chan_desc_b = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 5,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc parport_chan_desc_c = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 4,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc parport_chan_desc_intr = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 1,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+/* --- Subdevice initialization functions --- */
+
+static void setup_subd_a(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DIO;
+	subd->chan_desc = &parport_chan_desc_a;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_insn_a;
+	subd->insn_config = parport_insn_config_a;
+}
+
+static void setup_subd_b(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DI;
+	subd->chan_desc = &parport_chan_desc_b;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_insn_b;
+}
+
+static void setup_subd_c(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DO;
+	subd->chan_desc = &parport_chan_desc_c;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_insn_c;
+}
+
+static void setup_subd_intr(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DI;
+	subd->chan_desc = &parport_chan_desc_intr;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = parport_intr_insn;
+	subd->cmd_mask = &parport_intr_cmd_mask;
+	subd->do_cmdtest = parport_intr_cmdtest;
+	subd->do_cmd = parport_intr_cmd;
+	subd->cancel = parport_intr_cancel;
+}
+
+static void (*setup_subds[3])(struct a4l_subdevice *) = {
+	setup_subd_a,
+	setup_subd_b,
+	setup_subd_c
+};
+
+static int dev_parport_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int i, err = 0, irq = A4L_IRQ_UNUSED;
+	unsigned long io_base;
+
+	if(arg->opts == NULL || arg->opts_size < sizeof(unsigned long)) {
+
+		a4l_warn(dev,
+			 "dev_parport_attach: no attach options specified, "
+			 "taking default options (addr=0x%x, irq=%d)\n",
+			 DEFAULT_ADDRESS, DEFAULT_IRQ);
+
+		io_base = DEFAULT_ADDRESS;
+		irq = DEFAULT_IRQ;
+	} else {
+
+		io_base = ((unsigned long *)arg->opts)[0];
+
+		if (arg->opts_size >= 2 * sizeof(unsigned long))
+			irq = (int) ((unsigned long *)arg->opts)[1];
+	}
+
+	if (!request_region(io_base, PARPORT_SIZE, "analogy_parport")) {
+		a4l_err(dev, "dev_parport_attach: I/O port conflict");
+		return -EIO;
+	}
+
+	a4l_info(dev, "address = 0x%lx\n", io_base);
+
+	for (i = 0; i < 3; i++) {
+
+		struct a4l_subdevice *subd = a4l_alloc_subd(sizeof(parport_spriv_t),
+						  setup_subds[i]);
+		if (subd == NULL)
+			return -ENOMEM;
+
+		err = a4l_add_subd(dev, subd);
+		if (err != i)
+			return err;
+	}
+
+	if (irq != A4L_IRQ_UNUSED) {
+
+		struct a4l_subdevice *subd;
+
+		a4l_info(dev, "irq = %d\n", irq);
+
+		err = a4l_request_irq(dev, irq, parport_interrupt, 0, dev);
+		if (err < 0) {
+			a4l_err(dev, "dev_parport_attach: irq not available\n");
+			return err;
+		}
+
+		subd = a4l_alloc_subd(0, setup_subd_intr);
+		if (subd == NULL)
+			return -ENOMEM;
+
+		err = a4l_add_subd(dev, subd);
+		if (err < 0)
+			return err;
+	}
+
+	devpriv->io_base = io_base;
+
+	devpriv->a_data = 0;
+	outb(devpriv->a_data, devpriv->io_base + PARPORT_A);
+
+	devpriv->c_data = 0;
+	outb(devpriv->c_data, devpriv->io_base + PARPORT_C);
+
+	return 0;
+}
+
+static int dev_parport_detach(struct a4l_device *dev)
+{
+	int err = 0;
+
+	if (devpriv->io_base != 0)
+		release_region(devpriv->io_base, PARPORT_SIZE);
+
+	if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) {
+		a4l_free_irq(dev, a4l_get_irq(dev));
+	}
+
+
+	return err;
+}
+
+static struct a4l_driver drv_parport = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_parport",
+	.driver_name = "parport",
+	.attach = dev_parport_attach,
+	.detach = dev_parport_detach,
+	.privdata_size = sizeof(parport_priv_t),
+};
+
+static int __init drv_parport_init(void)
+{
+	return a4l_register_drv(&drv_parport);
+}
+
+static void __exit drv_parport_cleanup(void)
+{
+	a4l_unregister_drv(&drv_parport);
+}
+
+MODULE_DESCRIPTION("Analogy driver for standard parallel port");
+MODULE_LICENSE("GPL");
+
+module_init(drv_parport_init);
+module_exit(drv_parport_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig
new file mode 100644
index 0000000..bd1687a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Kconfig
@@ -0,0 +1,42 @@
+
+config XENO_DRIVERS_ANALOGY_NI_MITE
+	depends on XENO_DRIVERS_ANALOGY && PCI
+	tristate "NI MITE driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_TIO
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "NI TIO driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_MIO       
+	depends on XENO_DRIVERS_ANALOGY && XENO_DRIVERS_ANALOGY_NI_TIO && PCI
+	tristate "NI MIO driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_PCIMIO       
+	depends on XENO_DRIVERS_ANALOGY && PCI
+	select XENO_DRIVERS_ANALOGY_NI_MITE
+	select XENO_DRIVERS_ANALOGY_NI_TIO
+	select XENO_DRIVERS_ANALOGY_NI_MIO
+	select XENO_DRIVERS_ANALOGY_8255
+	tristate "NI PCIMIO driver"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_670x       
+	depends on EXPERIMENTAL && XENO_DRIVERS_ANALOGY && PCI
+	select XENO_DRIVERS_ANALOGY_NI_MITE
+	select XENO_DRIVERS_ANALOGY_NI_TIO
+	select XENO_DRIVERS_ANALOGY_NI_MIO
+	select XENO_DRIVERS_ANALOGY_8255
+	tristate "NI 670X driver (EXPERIMENTAL)"
+	default n
+
+config XENO_DRIVERS_ANALOGY_NI_660x       
+	depends on EXPERIMENTAL && XENO_DRIVERS_ANALOGY && PCI
+	select XENO_DRIVERS_ANALOGY_NI_MITE
+	select XENO_DRIVERS_ANALOGY_NI_TIO
+	select XENO_DRIVERS_ANALOGY_NI_MIO
+	select XENO_DRIVERS_ANALOGY_8255
+	tristate "NI 660X driver (EXPERIMENTAL)"
+	default n
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile
new file mode 100644
index 0000000..b4c93d2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/Makefile
@@ -0,0 +1,16 @@
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) += analogy_ni_mite.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_TIO) += analogy_ni_tio.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_MIO) += analogy_ni_mio.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_PCIMIO) += analogy_ni_pcimio.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_670x) += analogy_ni_670x.o
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_NI_660x) += analogy_ni_660x.o
+
+analogy_ni_mite-y := mite.o
+analogy_ni_tio-y := tio_common.o
+analogy_ni_mio-y := mio_common.o
+analogy_ni_pcimio-y := pcimio.o
+analogy_ni_670x-y := ni_670x.o
+analogy_ni_660x-y := ni_660x.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c
new file mode 100644
index 0000000..b071adc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mio_common.c
@@ -0,0 +1,5590 @@
+/*
+ * Hardware driver for DAQ-STC based boards
+ *
+ * Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2002-2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Description: DAQ-STC systems
+ *
+ * References:
+ * 340747b.pdf  AT-MIO E series Register-Level Programmer Manual
+ * 341079b.pdf  PCI E Series Register-Level Programmer Manual
+ * 340934b.pdf  DAQ-STC reference manual
+ * 322080b.pdf  6711/6713/6715 User Manual
+ * 320945c.pdf  PCI E Series User Manual
+ * 322138a.pdf  PCI-6052E and DAQPad-6052E User Manual
+ * 320517c.pdf  AT E Series User manual (obsolete)
+ * 320517f.pdf  AT E Series User manual
+ * 320906c.pdf  Maximum signal ratings
+ * 321066a.pdf  About 16x
+ * 321791a.pdf  Discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf  About at-mio-16e-10 rev P
+ * 321837a.pdf  Discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf  About at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ * - The interrupt routine needs to be cleaned up
+ * - S-Series PCI-6143 support has been added but is not fully tested
+ *   as yet. Terry Barnaby, BEAM Ltd.
+ *
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include "../intel/8255.h"
+#include "mite.h"
+#include "ni_stc.h"
+#include "ni_mio.h"
+
+#define NI_TIMEOUT 1000
+
+/* Note: this table must match the ai_gain_* definitions */
+static const short ni_gainlkup[][16] = {
+	/* ai_gain_16 */
+	{0, 1, 2, 3, 4, 5, 6, 7, 0x100, 0x101, 0x102, 0x103, 0x104, 0x105,
+	 0x106, 0x107},
+	/* ai_gain_8 */
+	{1, 2, 4, 7, 0x101, 0x102, 0x104, 0x107},
+	/* ai_gain_14 */
+	{1, 2, 3, 4, 5, 6, 7, 0x101, 0x102, 0x103, 0x104, 0x105, 0x106,
+	 0x107},
+	/* ai_gain_4 */
+	{0, 1, 4, 7},
+	/* ai_gain_611x */
+	{0x00a, 0x00b, 0x001, 0x002, 0x003, 0x004, 0x005, 0x006},
+	/* ai_gain_622x */
+	{0, 1, 4, 5},
+	/* ai_gain_628x */
+	{1, 2, 3, 4, 5, 6, 7},
+	/* ai_gain_6143 */
+	{0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00},
+};
+
+struct a4l_rngtab rng_ni_E_ai = {16, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2.5, 2.5),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.25, 0.25),
+	RANGE_V(-0.1, 0.1),
+	RANGE_V(-0.05, 0.05),
+	RANGE_V(0, 20),
+	RANGE_V(0, 10),
+	RANGE_V(0, 5),
+	RANGE_V(0, 2),
+	RANGE_V(0, 1),
+	RANGE_V(0, 0.5),
+	RANGE_V(0, 0.2),
+	RANGE_V(0, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai =
+	RNG_GLOBAL(rng_ni_E_ai);
+
+struct a4l_rngtab rng_ni_E_ai_limited = {8, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.1, 0.1),
+	RANGE_V(0, 10),
+	RANGE_V(0, 5),
+	RANGE_V(0, 1),
+	RANGE_V(0, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_limited =
+	RNG_GLOBAL(rng_ni_E_ai_limited);
+
+struct a4l_rngtab rng_ni_E_ai_limited14 = {14, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2, 2),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.2, 0.2),
+	RANGE_V(-0.1, 0.1),
+	RANGE_V(0, 10),
+	RANGE_V(0, 5),
+	RANGE_V(0, 2),
+	RANGE_V(0, 1),
+	RANGE_V(0, 0.5),
+	RANGE_V(0, 0.2),
+	RANGE_V(0, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_limited14 =
+	RNG_GLOBAL(rng_ni_E_ai_limited14);
+
+struct a4l_rngtab rng_ni_E_ai_bipolar4 = {4, {
+	RANGE_V(-10,10),
+	RANGE_V(-5, 5),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.05, 0.05),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_bipolar4 =
+	RNG_GLOBAL(rng_ni_E_ai_bipolar4);
+
+struct a4l_rngtab rng_ni_E_ai_611x = {8, {
+	RANGE_V(-50, 50),
+	RANGE_V(-20, 20),
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2, 2),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.2, 0.2),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ai_611x =
+	RNG_GLOBAL(rng_ni_E_ai_611x);
+
+struct a4l_rngtab rng_ni_M_ai_622x = {4, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.2, 0.2),
+}};
+struct a4l_rngdesc a4l_range_ni_M_ai_622x =
+	RNG_GLOBAL(rng_ni_M_ai_622x);
+
+struct a4l_rngtab rng_ni_M_ai_628x = {7, {
+	RANGE_V(-10, 10),
+	RANGE_V(-5, 5),
+	RANGE_V(-2, 2),
+	RANGE_V(-1, 1),
+	RANGE_V(-0.5, 0.5),
+	RANGE_V(-0.2, 0.2),
+	RANGE_V(-0.1, 0.1),
+}};
+struct a4l_rngdesc a4l_range_ni_M_ai_628x =
+	RNG_GLOBAL(rng_ni_M_ai_628x);
+
+struct a4l_rngtab rng_ni_S_ai_6143 = {1, {
+	RANGE_V(-5, 5),
+}};
+struct a4l_rngdesc a4l_range_ni_S_ai_6143 =
+	RNG_GLOBAL(rng_ni_S_ai_6143);
+
+
+struct a4l_rngtab rng_ni_E_ao_ext = {4, {
+	RANGE_V(-10, 10),
+	RANGE_V(0, 10),
+	RANGE_ext(-1, 1),
+	RANGE_ext(0, 1),
+}};
+struct a4l_rngdesc a4l_range_ni_E_ao_ext =
+	RNG_GLOBAL(rng_ni_E_ao_ext);
+
+struct a4l_rngdesc *ni_range_lkup[] = {
+	&a4l_range_ni_E_ai,
+	&a4l_range_ni_E_ai_limited,
+	&a4l_range_ni_E_ai_limited14,
+	&a4l_range_ni_E_ai_bipolar4,
+	&a4l_range_ni_E_ai_611x,
+	&a4l_range_ni_M_ai_622x,
+	&a4l_range_ni_M_ai_628x,
+	&a4l_range_ni_S_ai_6143
+};
+
+static const int num_adc_stages_611x = 3;
+
+static void ni_handle_fifo_dregs(struct a4l_subdevice *subd);
+static void get_last_sample_611x(struct a4l_subdevice *subd);
+static void get_last_sample_6143(struct a4l_subdevice *subd);
+static void handle_cdio_interrupt(struct a4l_device *dev);
+static void ni_load_channelgain_list(struct a4l_device *dev,
+				     unsigned int n_chan, unsigned int *list);
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+static void ni_handle_fifo_half_full(struct a4l_subdevice *subd);
+static int ni_ao_fifo_half_empty(struct a4l_subdevice *subd);
+#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static inline void ni_set_bitfield(struct a4l_device *dev,
+				   int reg,
+				   unsigned int bit_mask,
+				   unsigned int bit_values)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->soft_reg_copy_lock, flags);
+	switch (reg) {
+	case Interrupt_A_Enable_Register:
+		devpriv->int_a_enable_reg &= ~bit_mask;
+		devpriv->int_a_enable_reg |= bit_values & bit_mask;
+		devpriv->stc_writew(dev, devpriv->int_a_enable_reg,
+				    Interrupt_A_Enable_Register);
+		break;
+	case Interrupt_B_Enable_Register:
+		devpriv->int_b_enable_reg &= ~bit_mask;
+		devpriv->int_b_enable_reg |= bit_values & bit_mask;
+		devpriv->stc_writew(dev, devpriv->int_b_enable_reg,
+				    Interrupt_B_Enable_Register);
+		break;
+	case IO_Bidirection_Pin_Register:
+		devpriv->io_bidirection_pin_reg &= ~bit_mask;
+		devpriv->io_bidirection_pin_reg |= bit_values & bit_mask;
+		devpriv->stc_writew(dev, devpriv->io_bidirection_pin_reg,
+				    IO_Bidirection_Pin_Register);
+		break;
+	case AI_AO_Select:
+		devpriv->ai_ao_select_reg &= ~bit_mask;
+		devpriv->ai_ao_select_reg |= bit_values & bit_mask;
+		ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select);
+		break;
+	case G0_G1_Select:
+		devpriv->g0_g1_select_reg &= ~bit_mask;
+		devpriv->g0_g1_select_reg |= bit_values & bit_mask;
+		ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select);
+		break;
+	default:
+		a4l_err(dev,
+			"Warning %s() called with invalid register\n",
+			__FUNCTION__);
+		a4l_err(dev,"reg is %d\n", reg);
+		break;
+	}
+
+	mmiowb();
+	rtdm_lock_put_irqrestore(&devpriv->soft_reg_copy_lock, flags);
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_ai_drain_dma(struct a4l_subdevice *subd);
+
+static inline void ni_set_ai_dma_channel(struct a4l_device * dev, int channel)
+{
+	unsigned bitfield;
+
+	if (channel >= 0) {
+		bitfield =
+			(ni_stc_dma_channel_select_bitfield(channel) <<
+			 AI_DMA_Select_Shift) & AI_DMA_Select_Mask;
+	} else {
+		bitfield = 0;
+	}
+	ni_set_bitfield(dev, AI_AO_Select, AI_DMA_Select_Mask, bitfield);
+}
+
+static inline void ni_set_ao_dma_channel(struct a4l_device * dev, int channel)
+{
+	unsigned bitfield;
+
+	if (channel >= 0) {
+		bitfield =
+			(ni_stc_dma_channel_select_bitfield(channel) <<
+			 AO_DMA_Select_Shift) & AO_DMA_Select_Mask;
+	} else {
+		bitfield = 0;
+	}
+	ni_set_bitfield(dev, AI_AO_Select, AO_DMA_Select_Mask, bitfield);
+}
+
+static inline void ni_set_gpct_dma_channel(struct a4l_device * dev,
+					   unsigned gpct_index, int mite_channel)
+{
+	unsigned bitfield;
+
+	if (mite_channel >= 0) {
+		bitfield = GPCT_DMA_Select_Bits(gpct_index, mite_channel);
+	} else {
+		bitfield = 0;
+	}
+	ni_set_bitfield(dev, G0_G1_Select, GPCT_DMA_Select_Mask(gpct_index),
+			bitfield);
+}
+
+static inline void ni_set_cdo_dma_channel(struct a4l_device * dev, int mite_channel)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->soft_reg_copy_lock, flags);
+	devpriv->cdio_dma_select_reg &= ~CDO_DMA_Select_Mask;
+	if (mite_channel >= 0) {
+		/*XXX just guessing
+		  ni_stc_dma_channel_select_bitfield() returns the right
+		  bits, under the assumption the cdio dma selection
+		  works just like ai/ao/gpct. Definitely works for dma
+		  channels 0 and 1. */
+		devpriv->cdio_dma_select_reg |=
+			(ni_stc_dma_channel_select_bitfield(mite_channel) <<
+			 CDO_DMA_Select_Shift) & CDO_DMA_Select_Mask;
+	}
+	ni_writeb(devpriv->cdio_dma_select_reg, M_Offset_CDIO_DMA_Select);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&devpriv->soft_reg_copy_lock, flags);
+}
+
+static int ni_request_ai_mite_channel(struct a4l_device * dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	BUG_ON(devpriv->ai_mite_chan);
+	devpriv->ai_mite_chan =
+		mite_request_channel(devpriv->mite, devpriv->ai_mite_ring);
+	if (devpriv->ai_mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock,
+				      flags);
+		a4l_err(dev,
+			"ni_request_ai_mite_channel: "
+			"failed to reserve mite dma channel for analog input.");
+		return -EBUSY;
+	}
+	devpriv->ai_mite_chan->dir = A4L_INPUT;
+	ni_set_ai_dma_channel(dev, devpriv->ai_mite_chan->channel);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+	return 0;
+}
+
+static int ni_request_ao_mite_channel(struct a4l_device * dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	BUG_ON(devpriv->ao_mite_chan);
+	devpriv->ao_mite_chan =
+		mite_request_channel(devpriv->mite, devpriv->ao_mite_ring);
+	if (devpriv->ao_mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock,
+				      flags);
+		a4l_err(dev,
+			"ni_request_ao_mite_channel: "
+			"failed to reserve mite dma channel for analog outut.");
+		return -EBUSY;
+	}
+	devpriv->ao_mite_chan->dir = A4L_OUTPUT;
+	ni_set_ao_dma_channel(dev, devpriv->ao_mite_chan->channel);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+	return 0;
+}
+
+static int ni_request_gpct_mite_channel(struct a4l_device * dev,
+					unsigned gpct_index, int direction)
+{
+	unsigned long flags;
+	struct mite_channel *mite_chan;
+
+	BUG_ON(gpct_index >= NUM_GPCT);
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	BUG_ON(devpriv->counter_dev->counters[gpct_index]->mite_chan);
+	mite_chan = mite_request_channel(devpriv->mite,
+					 devpriv->gpct_mite_ring[gpct_index]);
+	if (mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock,
+				      flags);
+		a4l_err(dev,
+			"ni_request_gpct_mite_channel: "
+			"failed to reserve mite dma channel for counter.");
+		return -EBUSY;
+	}
+	mite_chan->dir = direction;
+	a4l_ni_tio_set_mite_channel(devpriv->counter_dev->counters[gpct_index],
+				mite_chan);
+	ni_set_gpct_dma_channel(dev, gpct_index, mite_chan->channel);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+	return 0;
+}
+
+static int ni_request_cdo_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+	int err = 0;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	/* No channel should be allocated... */
+	BUG_ON(devpriv->cdo_mite_chan);
+	/* ...until now */
+	devpriv->cdo_mite_chan =
+		mite_request_channel(devpriv->mite, devpriv->cdo_mite_ring);
+
+	if (devpriv->cdo_mite_chan) {
+		devpriv->cdo_mite_chan->dir = A4L_OUTPUT;
+		ni_set_cdo_dma_channel(dev, devpriv->cdo_mite_chan->channel);
+	} else {
+		err = -EBUSY;
+		a4l_err(dev,
+			"ni_request_cdo_mite_channel: "
+			"failed to reserve mite dma channel "
+			"for correlated digital outut.");
+	}
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return err;
+}
+
+void ni_release_ai_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ai_mite_chan) {
+		ni_set_ai_dma_channel(dev, -1);
+		a4l_mite_release_channel(devpriv->ai_mite_chan);
+		devpriv->ai_mite_chan = NULL;
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_release_ao_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ao_mite_chan) {
+		ni_set_ao_dma_channel(dev, -1);
+		a4l_mite_release_channel(devpriv->ao_mite_chan);
+		devpriv->ao_mite_chan = NULL;
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_release_gpct_mite_channel(struct a4l_device *dev, unsigned gpct_index)
+{
+	unsigned long flags;
+
+	BUG_ON(gpct_index >= NUM_GPCT);
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->counter_dev->counters[gpct_index]->mite_chan) {
+		struct mite_channel *mite_chan =
+			devpriv->counter_dev->counters[gpct_index]->mite_chan;
+
+		ni_set_gpct_dma_channel(dev, gpct_index, -1);
+		a4l_ni_tio_set_mite_channel(devpriv->counter_dev->
+					counters[gpct_index], NULL);
+		a4l_mite_release_channel(mite_chan);
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_release_cdo_mite_channel(struct a4l_device *dev)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->cdo_mite_chan) {
+		ni_set_cdo_dma_channel(dev, -1);
+		a4l_mite_release_channel(devpriv->cdo_mite_chan);
+		devpriv->cdo_mite_chan = NULL;
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+}
+
+void ni_sync_ai_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ai_mite_chan)
+		a4l_mite_sync_input_dma(devpriv->ai_mite_chan, subd);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+}
+
+void mite_handle_b_linkc(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ao_mite_chan)
+		a4l_mite_sync_output_dma(devpriv->ao_mite_chan, subd);
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+}
+
+static int ni_ao_wait_for_dma_load(struct a4l_subdevice *subd)
+{
+	static const int timeout = 10000;
+
+	struct a4l_device *dev = subd->dev;
+	struct a4l_buffer *buf = subd->buf;
+
+	int i;
+
+	for (i = 0; i < timeout; i++) {
+
+		int buffer_filled;
+		unsigned short b_status;
+
+		b_status = devpriv->stc_readw(dev, AO_Status_1_Register);
+
+		buffer_filled = test_bit(A4L_BUF_EOA_NR, &buf->flags);
+		buffer_filled |= (b_status & AO_FIFO_Half_Full_St);
+
+		if (buffer_filled)
+			break;
+
+		/* If we poll too often, the pci bus activity seems
+		   to slow the dma transfer down */
+		a4l_udelay(10);
+	}
+
+	if (i == timeout) {
+		a4l_err(dev,
+			"ni_ao_wait_for_dma_load: "
+			"timed out waiting for dma load");
+		return -EPIPE;
+	}
+
+	return 0;
+}
+
+
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static inline int ni_ai_drain_dma(struct a4l_subdevice *subd)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ni_request_ai_mite_channel(struct a4l_device * dev)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ni_request_ao_mite_channel(struct a4l_device * dev)
+{
+	return -ENOTSUPP;
+}
+
+static inline
+int ni_request_gpct_mite_channel(struct a4l_device * dev,
+				 unsigned gpct_index, int direction)
+{
+	return -ENOTSUPP;
+}
+
+static inline int ni_request_cdo_mite_channel(struct a4l_device *dev)
+{
+	return -ENOTSUPP;
+}
+
+#define ni_release_ai_mite_channel(x) do { } while (0)
+#define ni_release_ao_mite_channel(x) do { } while (0)
+#define ni_release_gpct_mite_channel(x) do { } while (0)
+#define ni_release_cdo_mite_channel(x) do { } while (0)
+#define ni_sync_ai_dma(x) do { } while (0)
+#define mite_handle_b_linkc(x) do { } while (0)
+
+static inline int ni_ao_wait_for_dma_load(struct a4l_subdevice *subd)
+{
+	return -ENOTSUPP;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+/* E-series boards use the second irq signals to generate dma requests
+   for their counters */
+void ni_e_series_enable_second_irq(struct a4l_device *dev,
+				   unsigned gpct_index, short enable)
+{
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return;
+	switch (gpct_index) {
+	case 0:
+		if (enable) {
+			devpriv->stc_writew(dev, G0_Gate_Second_Irq_Enable,
+					    Second_IRQ_A_Enable_Register);
+		} else {
+			devpriv->stc_writew(dev, 0,
+					    Second_IRQ_A_Enable_Register);
+		}
+		break;
+	case 1:
+		if (enable) {
+			devpriv->stc_writew(dev, G1_Gate_Second_Irq_Enable,
+					    Second_IRQ_B_Enable_Register);
+		} else {
+			devpriv->stc_writew(dev, 0,
+					    Second_IRQ_B_Enable_Register);
+		}
+		break;
+	default:
+		BUG();
+		break;
+	}
+}
+
+void ni_clear_ai_fifo(struct a4l_device *dev)
+{
+	if (boardtype.reg_type == ni_reg_6143) {
+		/* Flush the 6143 data FIFO */
+		ni_writel(0x10, AIFIFO_Control_6143); /* Flush fifo */
+		ni_writel(0x00, AIFIFO_Control_6143); /* Flush fifo */
+		while (ni_readl(AIFIFO_Status_6143) & 0x10); /* Wait for complete */
+	} else {
+		devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
+		if (boardtype.reg_type == ni_reg_625x) {
+			ni_writeb(0, M_Offset_Static_AI_Control(0));
+			ni_writeb(1, M_Offset_Static_AI_Control(0));
+		}
+	}
+}
+
+#define ao_win_out(data, addr) ni_ao_win_outw(dev, data, addr)
+static inline void ni_ao_win_outw(struct a4l_device *dev, uint16_t data, int addr)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(addr, AO_Window_Address_611x);
+	ni_writew(data, AO_Window_Data_611x);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+}
+
+static inline void ni_ao_win_outl(struct a4l_device *dev, uint32_t data, int addr)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(addr, AO_Window_Address_611x);
+	ni_writel(data, AO_Window_Data_611x);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+}
+
+static inline unsigned short ni_ao_win_inw(struct a4l_device *dev, int addr)
+{
+	unsigned long flags;
+	unsigned short data;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(addr, AO_Window_Address_611x);
+	data = ni_readw(AO_Window_Data_611x);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+	return data;
+}
+
+/*
+ * ni_set_bits( ) allows different parts of the ni_mio_common driver
+ * to share registers (such as Interrupt_A_Register) without interfering
+ * with each other.
+ *
+ * NOTE: the switch/case statements are optimized out for a constant
+ * argument so this is actually quite fast--- If you must wrap another
+ * function around this make it inline to avoid a large speed penalty.
+ *
+ * value should only be 1 or 0.
+ */
+
+static inline void ni_set_bits(struct a4l_device *dev,
+			       int reg, unsigned bits, unsigned value)
+{
+	unsigned bit_values;
+
+	if (value)
+		bit_values = bits;
+	else
+		bit_values = 0;
+
+	ni_set_bitfield(dev, reg, bits, bit_values);
+}
+
+static void shutdown_ai_command(struct a4l_subdevice *subd)
+{
+	ni_ai_drain_dma(subd);
+	ni_handle_fifo_dregs(subd);
+	get_last_sample_611x(subd);
+	get_last_sample_6143(subd);
+
+	/* TODO: stop the acquisiton */
+}
+
+static void ni_handle_eos(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	if (devpriv->aimode == AIMODE_SCAN) {
+		static const int timeout = 10;
+		int i;
+
+		for (i = 0; i < timeout; i++) {
+			ni_sync_ai_dma(subd);
+			/* TODO: stop when the transfer is really over */
+			a4l_udelay(1);
+		}
+	}
+
+	/* Handle special case of single scan using AI_End_On_End_Of_Scan */
+	if ((devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
+		shutdown_ai_command(subd);
+	}
+}
+
+static void ni_event(struct a4l_subdevice * subd)
+{
+	/* Temporary hack */
+	struct a4l_buffer *buf = subd->buf;
+
+	if(test_bit(A4L_BUF_ERROR_NR, &buf->flags)) {
+		if (subd->cancel != NULL)
+			subd->cancel(subd);
+	}
+
+	a4l_buf_evt(subd, 0);
+
+}
+
+static void handle_gpct_interrupt(struct a4l_device *dev, unsigned short counter_index)
+{
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	struct ni_gpct *counter = devpriv->counter_dev->counters[counter_index];
+	a4l_ni_tio_handle_interrupt(counter, dev);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+}
+
+#ifdef CONFIG_DEBUG_MIO_COMMON
+static const char *const status_a_strings[] = {
+	"passthru0", "fifo", "G0_gate", "G0_TC",
+	"stop", "start", "sc_tc", "start1",
+	"start2", "sc_tc_error", "overflow", "overrun",
+	"fifo_empty", "fifo_half_full", "fifo_full", "interrupt_a"
+};
+
+static void ni_mio_print_status_a(int status)
+{
+	int i;
+
+	__a4l_info("A status:");
+	for (i = 15; i >= 0; i--) {
+		if (status & (1 << i)) {
+			__a4l_info(" %s", status_a_strings[i]);
+		}
+	}
+	__a4l_info("\n");
+}
+
+static const char *const status_b_strings[] = {
+	"passthru1", "fifo", "G1_gate", "G1_TC",
+	"UI2_TC", "UPDATE", "UC_TC", "BC_TC",
+	"start1", "overrun", "start", "bc_tc_error",
+	"fifo_empty", "fifo_half_full", "fifo_full", "interrupt_b"
+};
+
+static void ni_mio_print_status_b(int status)
+{
+	int i;
+
+	__a4l_info("B status:");
+	for (i = 15; i >= 0; i--) {
+		if (status & (1 << i)) {
+			__a4l_info(" %s", status_b_strings[i]);
+		}
+	}
+	__a4l_info("\n");
+}
+
+#else /* !CONFIG_DEBUG_MIO_COMMON */
+
+#define ni_mio_print_status_a(x)
+#define ni_mio_print_status_b(x)
+
+#endif /* CONFIG_DEBUG_MIO_COMMON */
+
+static void ack_a_interrupt(struct a4l_device *dev, unsigned short a_status)
+{
+	unsigned short ack = 0;
+
+	if (a_status & AI_SC_TC_St) {
+		ack |= AI_SC_TC_Interrupt_Ack;
+	}
+	if (a_status & AI_START1_St) {
+		ack |= AI_START1_Interrupt_Ack;
+	}
+	if (a_status & AI_START_St) {
+		ack |= AI_START_Interrupt_Ack;
+	}
+	if (a_status & AI_STOP_St) {
+		/* not sure why we used to ack the START here also,
+		   instead of doing it independently. Frank Hess
+		   2007-07-06 */
+		ack |= AI_STOP_Interrupt_Ack;
+	}
+	if (ack)
+		devpriv->stc_writew(dev, ack, Interrupt_A_Ack_Register);
+}
+
+static void handle_a_interrupt(struct a4l_device *dev,
+			       unsigned short status,unsigned int ai_mite_status)
+{
+
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AI_SUBDEV);
+
+	/* 67xx boards don't have ai subdevice, but their gpct0
+	   might generate an a interrupt. */
+
+	if((subd->flags & A4L_SUBD_TYPES) == A4L_SUBD_UNUSED)
+		return;
+
+	a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+		"a_status=%04x ai_mite_status=%08x\n",status, ai_mite_status);
+	ni_mio_print_status_a(status);
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	if (ai_mite_status & CHSR_LINKC)
+		ni_sync_ai_dma(subd);
+
+	if (ai_mite_status & ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
+			       CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
+			       CHSR_SABORT | CHSR_XFERR | CHSR_LxERR_mask)) {
+		a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+			"unknown mite interrupt, ack! (ai_mite_status=%08x)\n",
+			ai_mite_status);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	/* Test for all uncommon interrupt events at the same time */
+	if (status & (AI_Overrun_St | AI_Overflow_St | AI_SC_TC_Error_St |
+		      AI_SC_TC_St | AI_START1_St)) {
+		if (status == 0xffff) {
+			a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+				"a_status=0xffff.  Card removed?\n");
+			/* TODO: we probably aren't even running a command now,
+			   so it's a good idea to be careful.
+			   we should check the transfer status */
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+			ni_event(subd);
+			return;
+		}
+		if (status & (AI_Overrun_St | AI_Overflow_St |
+			      AI_SC_TC_Error_St)) {
+			a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+				"ai error a_status=%04x\n", status);
+			ni_mio_print_status_a(status);
+
+			shutdown_ai_command(subd);
+
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+			ni_event(subd);
+
+			return;
+		}
+		if (status & AI_SC_TC_St) {
+			a4l_dbg(1, drv_dbg, dev, "ni_mio_common: SC_TC interrupt\n");
+			if (!devpriv->ai_continuous) {
+				shutdown_ai_command(subd);
+			}
+		}
+	}
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+	if (status & AI_FIFO_Half_Full_St) {
+		int i;
+		static const int timeout = 10;
+		/* PCMCIA cards (at least 6036) seem to stop producing
+		   interrupts if we fail to get the fifo less than half
+		   full, so loop to be sure. */
+		for (i = 0; i < timeout; ++i) {
+			ni_handle_fifo_half_full(subd);
+			if ((devpriv->stc_readw(dev, AI_Status_1_Register) &
+			     AI_FIFO_Half_Full_St) == 0)
+				break;
+		}
+	}
+#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	if ((status & AI_STOP_St)) {
+		ni_handle_eos(subd);
+	}
+
+	ni_event(subd);
+
+	status = devpriv->stc_readw(dev, AI_Status_1_Register);
+	if (status & Interrupt_A_St)
+		a4l_dbg(1, drv_dbg, dev, "ni_mio_common: interrupt: "
+			" didn't clear interrupt? status=0x%x\n", status);
+}
+
+static void ack_b_interrupt(struct a4l_device *dev, unsigned short b_status)
+{
+	unsigned short ack = 0;
+	if (b_status & AO_BC_TC_St) {
+		ack |= AO_BC_TC_Interrupt_Ack;
+	}
+	if (b_status & AO_Overrun_St) {
+		ack |= AO_Error_Interrupt_Ack;
+	}
+	if (b_status & AO_START_St) {
+		ack |= AO_START_Interrupt_Ack;
+	}
+	if (b_status & AO_START1_St) {
+		ack |= AO_START1_Interrupt_Ack;
+	}
+	if (b_status & AO_UC_TC_St) {
+		ack |= AO_UC_TC_Interrupt_Ack;
+	}
+	if (b_status & AO_UI2_TC_St) {
+		ack |= AO_UI2_TC_Interrupt_Ack;
+	}
+	if (b_status & AO_UPDATE_St) {
+		ack |= AO_UPDATE_Interrupt_Ack;
+	}
+	if (ack)
+		devpriv->stc_writew(dev, ack, Interrupt_B_Ack_Register);
+}
+
+static void handle_b_interrupt(struct a4l_device * dev,
+			       unsigned short b_status, unsigned int ao_mite_status)
+{
+
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AO_SUBDEV);
+
+	a4l_dbg(1, drv_dbg, dev,
+		"ni_mio_common: interrupt: b_status=%04x m1_status=%08x\n",
+		b_status, ao_mite_status);
+
+	ni_mio_print_status_b(b_status);
+
+	if (b_status == 0xffff)
+		return;
+
+	if (b_status & AO_Overrun_St) {
+		a4l_err(dev,
+			"ni_mio_common: interrupt: "
+			"AO FIFO underrun status=0x%04x status2=0x%04x\n",
+			b_status,
+			devpriv->stc_readw(dev, AO_Status_2_Register));
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+
+	if (b_status & AO_BC_TC_St) {
+		a4l_dbg(1, drv_dbg, dev,
+			"ni_mio_common: interrupt: "
+			"AO BC_TC status=0x%04x status2=0x%04x\n",
+			b_status, devpriv->stc_readw(dev, AO_Status_2_Register));
+		a4l_buf_evt(subd, A4L_BUF_EOA);
+	}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+	if (ao_mite_status & CHSR_STOPS) {
+		a4l_dbg(1, drv_dbg, dev,
+			"ni_mio_common: interrupt: MITE transfer stopped\n");
+	} else if (ao_mite_status & CHSR_LINKC) {
+		/* Currently, mite.c requires us to handle LINKC */
+		mite_handle_b_linkc(subd);
+	}
+
+	if (ao_mite_status &
+	    ~(CHSR_INT | CHSR_LINKC | CHSR_DONE | CHSR_MRDY |
+	      CHSR_DRDY | CHSR_DRQ1 | CHSR_DRQ0 | CHSR_ERROR |
+	      CHSR_SABORT | CHSR_STOPS | CHSR_XFERR | CHSR_LxERR_mask)) {
+		a4l_err(dev,
+			"unknown mite interrupt, ack! (ao_mite_status=%08x)\n",
+			 ao_mite_status);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+	if (b_status & AO_FIFO_Request_St) {
+		int ret;
+
+		ret = ni_ao_fifo_half_empty(subd);
+		if (!ret) {
+			a4l_err(dev,
+				"ni_mio_common: "
+				"interrupt: AO buffer underrun\n");
+			ni_set_bits(dev, Interrupt_B_Enable_Register,
+				    AO_FIFO_Interrupt_Enable |
+				    AO_Error_Interrupt_Enable, 0);
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+		}
+	}
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	ni_event(subd);
+}
+
+int a4l_ni_E_interrupt(unsigned int irq, void *d)
+{
+	struct a4l_device *dev = d;
+	unsigned short a_status;
+	unsigned short b_status;
+	unsigned int ai_mite_status = 0;
+	unsigned int ao_mite_status = 0;
+	unsigned long flags;
+	struct mite_struct *mite = devpriv->mite;
+
+	/* Make sure dev->attached is checked before handler does
+	   anything else. */
+	smp_mb();
+
+	/* lock to avoid race with a4l_poll */
+	rtdm_lock_get_irqsave(&dev->lock, flags);
+	a_status = devpriv->stc_readw(dev, AI_Status_1_Register);
+	b_status = devpriv->stc_readw(dev, AO_Status_1_Register);
+	if (mite) {
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		rtdm_lock_get(&devpriv->mite_channel_lock);
+		if (devpriv->ai_mite_chan) {
+			ai_mite_status = a4l_mite_get_status(devpriv->ai_mite_chan);
+			if (ai_mite_status & CHSR_LINKC)
+				writel(CHOR_CLRLC,
+				       devpriv->mite->mite_io_addr +
+				       MITE_CHOR(devpriv->ai_mite_chan->channel));
+		}
+		if (devpriv->ao_mite_chan) {
+			ao_mite_status = a4l_mite_get_status(devpriv->ao_mite_chan);
+			if (ao_mite_status & CHSR_LINKC)
+				writel(CHOR_CLRLC,
+				       mite->mite_io_addr +
+				       MITE_CHOR(devpriv->ao_mite_chan->channel));
+		}
+		rtdm_lock_put(&devpriv->mite_channel_lock);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	}
+	ack_a_interrupt(dev, a_status);
+	ack_b_interrupt(dev, b_status);
+	if ((a_status & Interrupt_A_St) || (ai_mite_status & CHSR_INT))
+		handle_a_interrupt(dev, a_status, ai_mite_status);
+	if ((b_status & Interrupt_B_St) || (ao_mite_status & CHSR_INT))
+		handle_b_interrupt(dev, b_status, ao_mite_status);
+	handle_gpct_interrupt(dev, 0);
+	handle_gpct_interrupt(dev, 1);
+	handle_cdio_interrupt(dev);
+
+	rtdm_lock_put_irqrestore(&dev->lock, flags);
+	return 0;
+}
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static void ni_ao_fifo_load(struct a4l_subdevice *subd, int n)
+{
+	struct a4l_device *dev = subd->dev;
+	sampl_t d;
+	u32 packed_data;
+	int i, err = 1;
+
+	for (i = 0; i < n; i++) {
+		err = a4l_buf_get(subd, &d, sizeof(sampl_t));
+		if (err != 0)
+			break;
+
+		if (boardtype.reg_type & ni_reg_6xxx_mask) {
+			packed_data = d & 0xffff;
+			/* 6711 only has 16 bit wide ao fifo */
+			if (boardtype.reg_type != ni_reg_6711) {
+				err = a4l_buf_get(subd, &d, sizeof(sampl_t));
+				if (err != 0)
+					break;
+				i++;
+				packed_data |= (d << 16) & 0xffff0000;
+			}
+			ni_writel(packed_data, DAC_FIFO_Data_611x);
+		} else {
+			ni_writew(d, DAC_FIFO_Data);
+		}
+	}
+	if (err != 0) {
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+}
+
+/*
+ *  There's a small problem if the FIFO gets really low and we
+ *  don't have the data to fill it.  Basically, if after we fill
+ *  the FIFO with all the data available, the FIFO is _still_
+ *  less than half full, we never clear the interrupt.  If the
+ *  IRQ is in edge mode, we never get another interrupt, because
+ *  this one wasn't cleared.  If in level mode, we get flooded
+ *  with interrupts that we can't fulfill, because nothing ever
+ *  gets put into the buffer.
+ *
+ *  This kind of situation is recoverable, but it is easier to
+ *  just pretend we had a FIFO underrun, since there is a good
+ *  chance it will happen anyway.  This is _not_ the case for
+ *  RT code, as RT code might purposely be running close to the
+ *  metal.  Needs to be fixed eventually.
+ */
+static int ni_ao_fifo_half_empty(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	int n;
+
+	n = a4l_buf_count(subd);
+	if (n == 0) {
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+		return 0;
+	}
+
+	n /= sizeof(sampl_t);
+	if (n > boardtype.ao_fifo_depth / 2)
+		n = boardtype.ao_fifo_depth / 2;
+
+	ni_ao_fifo_load(subd, n);
+
+	return 1;
+}
+
+static int ni_ao_prep_fifo(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	int n;
+
+	/* Reset fifo */
+	devpriv->stc_writew(dev, 1, DAC_FIFO_Clear);
+	if (boardtype.reg_type & ni_reg_6xxx_mask)
+		ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
+
+	/* Load some data */
+	n = a4l_buf_count(subd);
+	if (n == 0)
+		return 0;
+
+	n /= sizeof(sampl_t);
+	if (n > boardtype.ao_fifo_depth)
+		n = boardtype.ao_fifo_depth;
+
+	ni_ao_fifo_load(subd, n);
+
+	return n;
+}
+
+static void ni_ai_fifo_read(struct a4l_subdevice *subd, int n)
+{
+	struct a4l_device *dev = subd->dev;
+	int i;
+
+	if (boardtype.reg_type == ni_reg_611x) {
+		sampl_t data[2];
+		u32 dl;
+
+		for (i = 0; i < n / 2; i++) {
+			dl = ni_readl(ADC_FIFO_Data_611x);
+			/* This may get the hi/lo data in the wrong order */
+			data[0] = (dl >> 16) & 0xffff;
+			data[1] = dl & 0xffff;
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+		}
+		/* Check if there's a single sample stuck in the FIFO */
+		if (n % 2) {
+			dl = ni_readl(ADC_FIFO_Data_611x);
+			data[0] = dl & 0xffff;
+			a4l_buf_put(subd, &data[0], sizeof(sampl_t));
+		}
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		sampl_t data[2];
+		u32 dl;
+
+		/* This just reads the FIFO assuming the data is
+		   present, no checks on the FIFO status are performed */
+		for (i = 0; i < n / 2; i++) {
+			dl = ni_readl(AIFIFO_Data_6143);
+
+			data[0] = (dl >> 16) & 0xffff;
+			data[1] = dl & 0xffff;
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+		}
+		if (n % 2) {
+			/* Assume there is a single sample stuck in the FIFO.
+			   Get stranded sample into FIFO */
+			ni_writel(0x01, AIFIFO_Control_6143);
+			dl = ni_readl(AIFIFO_Data_6143);
+			data[0] = (dl >> 16) & 0xffff;
+			a4l_buf_put(subd, &data[0], sizeof(sampl_t));
+		}
+	} else {
+		if (n > sizeof(devpriv->ai_fifo_buffer) /
+		    sizeof(devpriv->ai_fifo_buffer[0])) {
+			a4l_err(dev,
+				"ni_ai_fifo_read: "
+				"bug! ai_fifo_buffer too small");
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+			return;
+		}
+		for (i = 0; i < n; i++) {
+			devpriv->ai_fifo_buffer[i] =
+				ni_readw(ADC_FIFO_Data_Register);
+		}
+		a4l_buf_put(subd,
+			    devpriv->ai_fifo_buffer,
+			    n * sizeof(devpriv->ai_fifo_buffer[0]));
+	}
+}
+
+static void ni_handle_fifo_half_full(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	ni_ai_fifo_read(subd, boardtype.ai_fifo_depth / 2);
+}
+
+#endif /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_ai_drain_dma(struct a4l_subdevice *subd)
+{
+	int i;
+	static const int timeout = 10000;
+	unsigned long flags;
+	int retval = 0;
+	struct a4l_device *dev = subd->dev;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->ai_mite_chan) {
+		for (i = 0; i < timeout; i++) {
+			if ((devpriv->stc_readw(dev,
+						AI_Status_1_Register) &
+			     AI_FIFO_Empty_St)
+			    && a4l_mite_bytes_in_transit(devpriv->
+						     ai_mite_chan) == 0)
+				break;
+			a4l_udelay(5);
+		}
+		if (i == timeout) {
+			a4l_info(dev, "wait for dma drain timed out\n");
+
+			a4l_info(dev, "a4l_mite_bytes_in_transit=%i, "
+				 "AI_Status1_Register=0x%x\n",
+				 a4l_mite_bytes_in_transit(devpriv->ai_mite_chan),
+				 devpriv->stc_readw(dev, AI_Status_1_Register));
+			retval = -1;
+		}
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	ni_sync_ai_dma(subd);
+
+	return retval;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+/* Empties the AI fifo */
+static void ni_handle_fifo_dregs(struct a4l_subdevice *subd)
+{
+	sampl_t data[2];
+	u32 dl;
+	short fifo_empty;
+	int i;
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type == ni_reg_611x) {
+		while ((devpriv->stc_readw(dev,
+					   AI_Status_1_Register) &
+			AI_FIFO_Empty_St) == 0) {
+			dl = ni_readl(ADC_FIFO_Data_611x);
+
+			/* This may get the hi/lo data in the wrong order */
+			data[0] = (dl >> 16);
+			data[1] = (dl & 0xffff);
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+		}
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		i = 0;
+		while (ni_readl(AIFIFO_Status_6143) & 0x04) {
+			dl = ni_readl(AIFIFO_Data_6143);
+
+			/* This may get the hi/lo data in the wrong order */
+			data[0] = (dl >> 16);
+			data[1] = (dl & 0xffff);
+			a4l_buf_put(subd, data, sizeof(sampl_t) * 2);
+			i += 2;
+		}
+		// Check if stranded sample is present
+		if (ni_readl(AIFIFO_Status_6143) & 0x01) {
+			ni_writel(0x01, AIFIFO_Control_6143);	// Get stranded sample into FIFO
+			dl = ni_readl(AIFIFO_Data_6143);
+			data[0] = (dl >> 16) & 0xffff;
+			a4l_buf_put(subd, &data[0], sizeof(sampl_t));
+		}
+
+	} else {
+		fifo_empty =
+			devpriv->stc_readw(dev,
+					   AI_Status_1_Register) & AI_FIFO_Empty_St;
+		while (fifo_empty == 0) {
+			for (i = 0;
+			     i <
+				     sizeof(devpriv->ai_fifo_buffer) /
+				     sizeof(devpriv->ai_fifo_buffer[0]); i++) {
+				fifo_empty =
+					devpriv->stc_readw(dev,
+							   AI_Status_1_Register) &
+					AI_FIFO_Empty_St;
+				if (fifo_empty)
+					break;
+				devpriv->ai_fifo_buffer[i] =
+					ni_readw(ADC_FIFO_Data_Register);
+			}
+			a4l_buf_put(subd,
+				    devpriv->ai_fifo_buffer,
+				    i * sizeof(devpriv->ai_fifo_buffer[0]));
+		}
+	}
+}
+
+static void get_last_sample_611x(struct a4l_subdevice *subd)
+{
+	sampl_t data;
+	u32 dl;
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type != ni_reg_611x)
+		return;
+
+	/* Check if there's a single sample stuck in the FIFO */
+	if (ni_readb(XXX_Status) & 0x80) {
+		dl = ni_readl(ADC_FIFO_Data_611x);
+		data = (dl & 0xffff);
+		a4l_buf_put(subd, &data, sizeof(sampl_t));
+	}
+}
+
+static void get_last_sample_6143(struct a4l_subdevice *subd)
+{
+	sampl_t data;
+	u32 dl;
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type != ni_reg_6143)
+		return;
+
+	/* Check if there's a single sample stuck in the FIFO */
+	if (ni_readl(AIFIFO_Status_6143) & 0x01) {
+		/* Get stranded sample into FIFO */
+		ni_writel(0x01, AIFIFO_Control_6143);
+		dl = ni_readl(AIFIFO_Data_6143);
+
+		/* This may get the hi/lo data in the wrong order */
+		data = (dl >> 16) & 0xffff;
+		a4l_buf_put(subd, &data, sizeof(sampl_t));
+	}
+}
+
+static void ni_ai_munge16(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	struct a4l_device *dev = subd->dev;
+	struct a4l_cmd_desc *cmd = a4l_get_cmd(subd);
+	int chan_idx = a4l_get_chan(subd);
+	unsigned int i;
+	sampl_t *array = buf;
+
+	for (i = 0; i < size / sizeof(sampl_t); i++) {
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		array[i] = le16_to_cpu(array[i]);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+		array[i] += devpriv->ai_offset[chan_idx];
+		chan_idx++;
+		chan_idx %= cmd->nb_chan;
+	}
+}
+
+static void ni_ai_munge32(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	struct a4l_device *dev = subd->dev;
+	struct a4l_cmd_desc *cmd = a4l_get_cmd(subd);
+	int chan_idx = a4l_get_chan(subd);
+	unsigned int i;
+	lsampl_t *larray = buf;
+
+	for (i = 0; i < size / sizeof(lsampl_t); i++) {
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		larray[i] = le32_to_cpu(larray[i]);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+		larray[i] += devpriv->ai_offset[chan_idx];
+		chan_idx++;
+		chan_idx %= cmd->nb_chan;
+	}
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_ai_setup_MITE_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+	int err;
+
+	err = ni_request_ai_mite_channel(dev);
+	if (err < 0)
+		return err;
+
+	err = a4l_mite_buf_change(devpriv->ai_mite_chan->ring, subd);
+	if (err < 0)
+		return err;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	switch (boardtype.reg_type) {
+	case ni_reg_611x:
+	case ni_reg_6143:
+		a4l_mite_prep_dma(devpriv->ai_mite_chan, 32, 16);
+		break;
+	case ni_reg_628x:
+		a4l_mite_prep_dma(devpriv->ai_mite_chan, 32, 32);
+		break;
+	default:
+		a4l_mite_prep_dma(devpriv->ai_mite_chan, 16, 16);
+		break;
+	};
+
+	/* start the MITE */
+	a4l_mite_dma_arm(devpriv->ai_mite_chan);
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return 0;
+}
+
+static int ni_ao_setup_MITE_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+	int err;
+
+	err = ni_request_ao_mite_channel(dev);
+	if (err < 0)
+		return err;
+
+	err = a4l_mite_buf_change(devpriv->ao_mite_chan->ring, subd);
+	if (err < 0)
+		return err;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	if (devpriv->ao_mite_chan) {
+
+		if (boardtype.reg_type & (ni_reg_611x | ni_reg_6713)) {
+			a4l_mite_prep_dma(devpriv->ao_mite_chan, 32, 32);
+		} else {
+			/* Doing 32 instead of 16 bit wide transfers
+			   from memory makes the mite do 32 bit pci
+			   transfers, doubling pci bandwidth. */
+			a4l_mite_prep_dma(devpriv->ao_mite_chan, 16, 32);
+		}
+		a4l_mite_dma_arm(devpriv->ao_mite_chan);
+	} else
+		err = -EIO;
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return err;
+}
+
+static int ni_cdo_setup_MITE_dma(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned long flags;
+	int err;
+
+	err = ni_request_cdo_mite_channel(dev);
+	if (err < 0)
+		return err;
+
+	/* No need to get a lock to setup the ring buffer */
+	err = a4l_mite_buf_change(devpriv->cdo_mite_chan->ring, subd);
+	if (err < 0)
+		return err;
+
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+
+	/* This test should be useless but one never knows */
+	if (devpriv->cdo_mite_chan) {
+		/* Configure the DMA transfer */
+		a4l_mite_prep_dma(devpriv->cdo_mite_chan, 32, 32);
+		a4l_mite_dma_arm(devpriv->cdo_mite_chan);
+	} else
+		err = -EIO;
+
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	return err;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static void ni_ai_reset(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	ni_release_ai_mite_channel(dev);
+
+	/* ai configuration */
+	devpriv->stc_writew(dev, AI_Configuration_Start | AI_Reset,
+			    Joint_Reset_Register);
+
+	ni_set_bits(dev, Interrupt_A_Enable_Register,
+		    AI_SC_TC_Interrupt_Enable | AI_START1_Interrupt_Enable |
+		    AI_START2_Interrupt_Enable | AI_START_Interrupt_Enable |
+		    AI_STOP_Interrupt_Enable | AI_Error_Interrupt_Enable |
+		    AI_FIFO_Interrupt_Enable, 0);
+
+	ni_clear_ai_fifo(dev);
+
+	if (boardtype.reg_type != ni_reg_6143)
+		ni_writeb(0, Misc_Command);
+
+	devpriv->stc_writew(dev, AI_Disarm, AI_Command_1_Register);	/* reset pulses */
+	devpriv->stc_writew(dev,
+			    AI_Start_Stop | AI_Mode_1_Reserved /*| AI_Trigger_Once */ ,
+			    AI_Mode_1_Register);
+	devpriv->stc_writew(dev, 0x0000, AI_Mode_2_Register);
+	/* generate FIFO interrupts on non-empty */
+	devpriv->stc_writew(dev, (0 << 6) | 0x0000, AI_Mode_3_Register);
+	if (boardtype.reg_type == ni_reg_611x) {
+		devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
+				    AI_SOC_Polarity |
+				    AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register);
+		devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) |
+				    AI_EXTMUX_CLK_Output_Select(0) |
+				    AI_LOCALMUX_CLK_Output_Select(2) |
+				    AI_SC_TC_Output_Select(3) |
+				    AI_CONVERT_Output_Select(AI_CONVERT_Output_Enable_High),
+				    AI_Output_Control_Register);
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
+				    AI_SOC_Polarity |
+				    AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register);
+		devpriv->stc_writew(dev, AI_SCAN_IN_PROG_Output_Select(3) |
+				    AI_EXTMUX_CLK_Output_Select(0) |
+				    AI_LOCALMUX_CLK_Output_Select(2) |
+				    AI_SC_TC_Output_Select(3) |
+				    AI_CONVERT_Output_Select(AI_CONVERT_Output_Enable_Low),
+				    AI_Output_Control_Register);
+	} else {
+		unsigned int ai_output_control_bits;
+		devpriv->stc_writew(dev, AI_SHIFTIN_Pulse_Width |
+				    AI_SOC_Polarity |
+				    AI_CONVERT_Pulse_Width |
+				    AI_LOCALMUX_CLK_Pulse_Width, AI_Personal_Register);
+		ai_output_control_bits = AI_SCAN_IN_PROG_Output_Select(3) |
+			AI_EXTMUX_CLK_Output_Select(0) |
+			AI_LOCALMUX_CLK_Output_Select(2) |
+			AI_SC_TC_Output_Select(3);
+		if (boardtype.reg_type == ni_reg_622x)
+			ai_output_control_bits |=
+				AI_CONVERT_Output_Select
+				(AI_CONVERT_Output_Enable_High);
+		else
+			ai_output_control_bits |=
+				AI_CONVERT_Output_Select
+				(AI_CONVERT_Output_Enable_Low);
+		devpriv->stc_writew(dev, ai_output_control_bits,
+				    AI_Output_Control_Register);
+	}
+
+	/* the following registers should not be changed, because there
+	 * are no backup registers in devpriv.  If you want to change
+	 * any of these, add a backup register and other appropriate code:
+	 *      AI_Mode_1_Register
+	 *      AI_Mode_3_Register
+	 *      AI_Personal_Register
+	 *      AI_Output_Control_Register
+	 */
+
+	/* clear interrupts */
+	devpriv->stc_writew(dev, AI_SC_TC_Error_Confirm | AI_START_Interrupt_Ack |
+			    AI_START2_Interrupt_Ack | AI_START1_Interrupt_Ack |
+			    AI_SC_TC_Interrupt_Ack | AI_Error_Interrupt_Ack |
+			    AI_STOP_Interrupt_Ack, Interrupt_A_Ack_Register);
+
+	devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register);
+}
+
+static int ni_ai_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	const unsigned int mask = (1 << boardtype.adbits) - 1;
+	int i, n;
+	unsigned int signbits;
+	unsigned short d;
+	unsigned long dl;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	ni_load_channelgain_list(dev, 1, &insn->chan_desc);
+
+	ni_clear_ai_fifo(dev);
+
+	signbits = devpriv->ai_offset[0];
+	if (boardtype.reg_type == ni_reg_611x) {
+		for (n = 0; n < num_adc_stages_611x; n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+			a4l_udelay(1);
+		}
+		for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+			/* The 611x has screwy 32-bit FIFOs. */
+			d = 0;
+			for (i = 0; i < NI_TIMEOUT; i++) {
+				if (ni_readb(XXX_Status) & 0x80) {
+					d = (ni_readl(ADC_FIFO_Data_611x) >> 16)
+						& 0xffff;
+					break;
+				}
+				if (!(devpriv->stc_readw(dev,
+							 AI_Status_1_Register) &
+				      AI_FIFO_Empty_St)) {
+					d = ni_readl(ADC_FIFO_Data_611x) &
+						0xffff;
+					break;
+				}
+			}
+			if (i == NI_TIMEOUT) {
+				a4l_warn(dev,
+					 "ni_mio_common: "
+					 "timeout in 611x ni_ai_insn_read\n");
+				return -ETIME;
+			}
+			d += signbits;
+			data[n] = d;
+		}
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+
+			/* The 6143 has 32-bit FIFOs.
+			   You need to strobe a bit to move a single
+			   16bit stranded sample into the FIFO */
+			dl = 0;
+			for (i = 0; i < NI_TIMEOUT; i++) {
+				if (ni_readl(AIFIFO_Status_6143) & 0x01) {
+					ni_writel(0x01, AIFIFO_Control_6143);	// Get stranded sample into FIFO
+					dl = ni_readl(AIFIFO_Data_6143);
+					break;
+				}
+			}
+			if (i == NI_TIMEOUT) {
+				a4l_warn(dev,
+					 "ni_mio_common: "
+					 "timeout in 6143 ni_ai_insn_read\n");
+				return -ETIME;
+			}
+			data[n] = (((dl >> 16) & 0xFFFF) + signbits) & 0xFFFF;
+		}
+	} else {
+		for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+			devpriv->stc_writew(dev, AI_CONVERT_Pulse,
+					    AI_Command_1_Register);
+			for (i = 0; i < NI_TIMEOUT; i++) {
+				if (!(devpriv->stc_readw(dev,
+							 AI_Status_1_Register) &
+				      AI_FIFO_Empty_St))
+					break;
+			}
+			if (i == NI_TIMEOUT) {
+				a4l_warn(dev,
+					 "ni_mio_common: "
+					 "timeout in ni_ai_insn_read\n");
+				return -ETIME;
+			}
+			if (boardtype.reg_type & ni_reg_m_series_mask) {
+				data[n] = ni_readl(M_Offset_AI_FIFO_Data) & mask;
+			} else {
+				d = ni_readw(ADC_FIFO_Data_Register);
+				/* subtle: needs to be short addition */
+				d += signbits;
+				data[n] = d;
+			}
+		}
+	}
+	return 0;
+}
+
+void ni_prime_channelgain_list(struct a4l_device *dev)
+{
+	int i;
+	devpriv->stc_writew(dev, AI_CONVERT_Pulse, AI_Command_1_Register);
+	for (i = 0; i < NI_TIMEOUT; ++i) {
+		if (!(devpriv->stc_readw(dev,
+					 AI_Status_1_Register) &
+		      AI_FIFO_Empty_St)) {
+			devpriv->stc_writew(dev, 1, ADC_FIFO_Clear);
+			return;
+		}
+		a4l_udelay(1);
+	}
+	a4l_warn(dev, "ni_mio_common: timeout loading channel/gain list\n");
+}
+
+static void ni_m_series_load_channelgain_list(struct a4l_device *dev,
+					      unsigned int n_chan,
+					      unsigned int *list)
+{
+	unsigned int chan, range, aref;
+	unsigned int i;
+	unsigned offset;
+	unsigned int dither;
+	unsigned range_code;
+
+	devpriv->stc_writew(dev, 1, Configuration_Memory_Clear);
+
+	if ((list[0] & CR_ALT_SOURCE)) {
+		unsigned bypass_bits;
+		chan = CR_CHAN(list[0]);
+		range = CR_RNG(list[0]);
+		range_code = ni_gainlkup[boardtype.gainlkup][range];
+		dither = ((list[0] & CR_ALT_FILTER) != 0);
+		bypass_bits = MSeries_AI_Bypass_Config_FIFO_Bit;
+		bypass_bits |= chan;
+		bypass_bits |=
+			(devpriv->
+			 ai_calib_source) & (MSeries_AI_Bypass_Cal_Sel_Pos_Mask |
+					     MSeries_AI_Bypass_Cal_Sel_Neg_Mask |
+					     MSeries_AI_Bypass_Mode_Mux_Mask |
+					     MSeries_AO_Bypass_AO_Cal_Sel_Mask);
+		bypass_bits |= MSeries_AI_Bypass_Gain_Bits(range_code);
+		if (dither)
+			bypass_bits |= MSeries_AI_Bypass_Dither_Bit;
+		// don't use 2's complement encoding
+		bypass_bits |= MSeries_AI_Bypass_Polarity_Bit;
+		ni_writel(bypass_bits, M_Offset_AI_Config_FIFO_Bypass);
+	} else {
+		ni_writel(0, M_Offset_AI_Config_FIFO_Bypass);
+	}
+	offset = 0;
+	for (i = 0; i < n_chan; i++) {
+		unsigned config_bits = 0;
+		chan = CR_CHAN(list[i]);
+		aref = CR_AREF(list[i]);
+		range = CR_RNG(list[i]);
+		dither = ((list[i] & CR_ALT_FILTER) != 0);
+
+		range_code = ni_gainlkup[boardtype.gainlkup][range];
+		devpriv->ai_offset[i] = offset;
+		switch (aref) {
+		case AREF_DIFF:
+			config_bits |=
+				MSeries_AI_Config_Channel_Type_Differential_Bits;
+			break;
+		case AREF_COMMON:
+			config_bits |=
+				MSeries_AI_Config_Channel_Type_Common_Ref_Bits;
+			break;
+		case AREF_GROUND:
+			config_bits |=
+				MSeries_AI_Config_Channel_Type_Ground_Ref_Bits;
+			break;
+		case AREF_OTHER:
+			break;
+		}
+		config_bits |= MSeries_AI_Config_Channel_Bits(chan);
+		config_bits |=
+			MSeries_AI_Config_Bank_Bits(boardtype.reg_type, chan);
+		config_bits |= MSeries_AI_Config_Gain_Bits(range_code);
+		if (i == n_chan - 1)
+			config_bits |= MSeries_AI_Config_Last_Channel_Bit;
+		if (dither)
+			config_bits |= MSeries_AI_Config_Dither_Bit;
+		// don't use 2's complement encoding
+		config_bits |= MSeries_AI_Config_Polarity_Bit;
+		ni_writew(config_bits, M_Offset_AI_Config_FIFO_Data);
+	}
+	ni_prime_channelgain_list(dev);
+}
+
+/*
+ * Notes on the 6110 and 6111:
+ * These boards a slightly different than the rest of the series, since
+ * they have multiple A/D converters.
+ * From the driver side, the configuration memory is a
+ * little different.
+ * Configuration Memory Low:
+ *   bits 15-9: same
+ *   bit 8: unipolar/bipolar (should be 0 for bipolar)
+ *   bits 0-3: gain.  This is 4 bits instead of 3 for the other boards
+ *       1001 gain=0.1 (+/- 50)
+ *       1010 0.2
+ *       1011 0.1
+ *       0001 1
+ *       0010 2
+ *       0011 5
+ *       0100 10
+ *       0101 20
+ *       0110 50
+ * Configuration Memory High:
+ *   bits 12-14: Channel Type
+ *       001 for differential
+ *       000 for calibration
+ *   bit 11: coupling  (this is not currently handled)
+ *       1 AC coupling
+ *       0 DC coupling
+ *   bits 0-2: channel
+ *       valid channels are 0-3
+ */
+static void ni_load_channelgain_list(struct a4l_device *dev,
+				     unsigned int n_chan, unsigned int *list)
+{
+	unsigned int chan, range, aref;
+	unsigned int i;
+	unsigned int hi, lo;
+	unsigned offset;
+	unsigned int dither;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		ni_m_series_load_channelgain_list(dev, n_chan, list);
+		return;
+	}
+	if (n_chan == 1 && (boardtype.reg_type != ni_reg_611x)
+	    && (boardtype.reg_type != ni_reg_6143)) {
+		if (devpriv->changain_state
+		    && devpriv->changain_spec == list[0]) {
+			/* ready to go. */
+			return;
+		}
+		devpriv->changain_state = 1;
+		devpriv->changain_spec = list[0];
+	} else {
+		devpriv->changain_state = 0;
+	}
+
+	devpriv->stc_writew(dev, 1, Configuration_Memory_Clear);
+
+	/* Set up Calibration mode if required */
+	if (boardtype.reg_type == ni_reg_6143) {
+		if ((list[0] & CR_ALT_SOURCE)
+		    && !devpriv->ai_calib_source_enabled) {
+			/* Strobe Relay enable bit */
+			ni_writew(devpriv->
+				  ai_calib_source |
+				  Calibration_Channel_6143_RelayOn,
+				  Calibration_Channel_6143);
+			ni_writew(devpriv->ai_calib_source,
+				  Calibration_Channel_6143);
+			devpriv->ai_calib_source_enabled = 1;
+			/* Allow relays to change */
+			if(rtdm_in_rt_context())
+				rtdm_task_sleep(100*1000000);
+			else
+				msleep_interruptible(100);
+		} else if (!(list[0] & CR_ALT_SOURCE)
+			   && devpriv->ai_calib_source_enabled) {
+			/* Strobe Relay disable bit */
+			ni_writew(devpriv->
+				  ai_calib_source |
+				  Calibration_Channel_6143_RelayOff,
+				  Calibration_Channel_6143);
+			ni_writew(devpriv->ai_calib_source,
+				  Calibration_Channel_6143);
+			devpriv->ai_calib_source_enabled = 0;
+			/* Allow relays to change */
+			if(rtdm_in_rt_context())
+				rtdm_task_sleep(100*1000000);
+			else
+				msleep_interruptible(100);
+		}
+	}
+
+	offset = 1 << (boardtype.adbits - 1);
+	for (i = 0; i < n_chan; i++) {
+		if ((boardtype.reg_type != ni_reg_6143)
+		    && (list[i] & CR_ALT_SOURCE)) {
+			chan = devpriv->ai_calib_source;
+		} else {
+			chan = CR_CHAN(list[i]);
+		}
+		aref = CR_AREF(list[i]);
+		range = CR_RNG(list[i]);
+		dither = ((list[i] & CR_ALT_FILTER) != 0);
+
+		/* fix the external/internal range differences */
+		range = ni_gainlkup[boardtype.gainlkup][range];
+		if (boardtype.reg_type == ni_reg_611x)
+			devpriv->ai_offset[i] = offset;
+		else
+			devpriv->ai_offset[i] = (range & 0x100) ? 0 : offset;
+
+		hi = 0;
+		if ((list[i] & CR_ALT_SOURCE)) {
+			if (boardtype.reg_type == ni_reg_611x)
+				ni_writew(CR_CHAN(list[i]) & 0x0003,
+					  Calibration_Channel_Select_611x);
+		} else {
+			if (boardtype.reg_type == ni_reg_611x)
+				aref = AREF_DIFF;
+			else if (boardtype.reg_type == ni_reg_6143)
+				aref = AREF_OTHER;
+			switch (aref) {
+			case AREF_DIFF:
+				hi |= AI_DIFFERENTIAL;
+				break;
+			case AREF_COMMON:
+				hi |= AI_COMMON;
+				break;
+			case AREF_GROUND:
+				hi |= AI_GROUND;
+				break;
+			case AREF_OTHER:
+				break;
+			}
+		}
+		hi |= AI_CONFIG_CHANNEL(chan);
+
+		ni_writew(hi, Configuration_Memory_High);
+
+		if (boardtype.reg_type != ni_reg_6143) {
+			lo = range;
+			if (i == n_chan - 1)
+				lo |= AI_LAST_CHANNEL;
+			if (dither)
+				lo |= AI_DITHER;
+
+			ni_writew(lo, Configuration_Memory_Low);
+		}
+	}
+
+	/* prime the channel/gain list */
+	if ((boardtype.reg_type != ni_reg_611x)
+	    && (boardtype.reg_type != ni_reg_6143)) {
+		ni_prime_channelgain_list(dev);
+	}
+}
+
+static int ni_ns_to_timer(const struct a4l_device *dev,
+			  unsigned int nanosec, int round_mode)
+{
+	int divider;
+	switch (round_mode) {
+	case TRIG_ROUND_NEAREST:
+	default:
+		divider = (nanosec + devpriv->clock_ns / 2) / devpriv->clock_ns;
+		break;
+	case TRIG_ROUND_DOWN:
+		divider = (nanosec) / devpriv->clock_ns;
+		break;
+	case TRIG_ROUND_UP:
+		divider = (nanosec + devpriv->clock_ns - 1) / devpriv->clock_ns;
+		break;
+	}
+	return divider - 1;
+}
+
+static unsigned int ni_timer_to_ns(const struct a4l_device *dev, int timer)
+{
+	return devpriv->clock_ns * (timer + 1);
+}
+
+static unsigned int ni_min_ai_scan_period_ns(struct a4l_device *dev,
+					     unsigned int num_channels)
+{
+	switch (boardtype.reg_type) {
+	case ni_reg_611x:
+	case ni_reg_6143:
+		/* simultaneously-sampled inputs */
+		return boardtype.ai_speed;
+		break;
+	default:
+		/* multiplexed inputs */
+		break;
+	};
+	return boardtype.ai_speed * num_channels;
+}
+
+static struct a4l_cmd_desc mio_ai_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT | TRIG_EXT,
+	.scan_begin_src = TRIG_TIMER | TRIG_EXT,
+	.convert_src = TRIG_TIMER | TRIG_EXT | TRIG_NOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+int ni_ai_inttrig(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct a4l_device *dev = subd->dev;
+
+	if (trignum != 0)
+		return -EINVAL;
+
+	devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2,
+			    AI_Command_2_Register);
+
+	return 1;
+}
+
+#define cfc_check_trigger_arg_is(a,b) __cfc_check_trigger_arg_is(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_arg_is(unsigned int *arg,
+	                                     unsigned int val,
+					     struct a4l_device *dev,
+	                                     unsigned int line)
+{
+	if (*arg != val) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) != val (%d) \n",
+			line, *arg, val);
+		*arg = val;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define cfc_check_trigger_is_unique(a) __cfc_check_trigger_is_unique(a, dev, __LINE__)
+static inline int __cfc_check_trigger_is_unique(unsigned int src,
+					        struct a4l_device *dev,
+	                                        unsigned int line)
+{
+	/* this test is true if more than one _src bit is set */
+	if ((src & (src - 1)) != 0) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: src (%d) \n", line, src);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define cfc_check_trigger_src(a,b) __cfc_check_trigger_src(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_src(unsigned int *src,
+	                                  unsigned int flags,
+					  struct a4l_device *dev,
+	                                  unsigned int line)
+{
+	unsigned int orig_src = *src;
+
+	*src = orig_src & flags;
+	if (*src == 0 || *src != orig_src){
+		a4l_dbg(1, drv_dbg, dev, "line %d: *src (%d)  orig_src (%d) flags(%d) \n",
+			line, *src, orig_src, flags);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+#define cfc_check_trigger_arg_min(a,b) __cfc_check_trigger_arg_min(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_arg_min(unsigned int *arg,
+					      unsigned int val,
+					      struct a4l_device *dev,
+	                                      unsigned int line)
+{
+	if (*arg < val) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) < val (%d) \n",
+			line, *arg, val);
+		*arg = val;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#define cfc_check_trigger_arg_max(a,b) __cfc_check_trigger_arg_max(a,b, dev, __LINE__)
+static inline int __cfc_check_trigger_arg_max(unsigned int *arg,
+					      unsigned int val,
+					      struct a4l_device *dev,
+	                                      unsigned int line)
+{
+	if (*arg > val) {
+		a4l_dbg(1, drv_dbg, dev, "line %d: *arg (%d) > val (%d) \n",
+			line, *arg, val);
+		*arg = val;
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int ni_ai_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int sources;
+	int tmp, err = 0;
+
+	/* Step 1 : check if triggers are trivially valid */
+	err |= cfc_check_trigger_src(&cmd->start_src, TRIG_NOW | TRIG_INT | TRIG_EXT);
+	err |= cfc_check_trigger_src(&cmd->scan_begin_src, TRIG_TIMER | TRIG_EXT);
+
+	sources = TRIG_TIMER | TRIG_EXT;
+	if (boardtype.reg_type == ni_reg_611x || boardtype.reg_type == ni_reg_6143)
+		sources |= TRIG_NOW;
+
+	err |= cfc_check_trigger_src(&cmd->convert_src, sources);
+	err |= cfc_check_trigger_src(&cmd->scan_end_src, TRIG_COUNT);
+	err |= cfc_check_trigger_src(&cmd->stop_src, TRIG_COUNT | TRIG_NONE);
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(1))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 1 \n");
+		return -EINVAL;
+	}
+
+	/* Step 2a : make sure trigger sources are unique */
+	err |= cfc_check_trigger_is_unique(cmd->start_src);
+	err |= cfc_check_trigger_is_unique(cmd->scan_begin_src);
+	err |= cfc_check_trigger_is_unique(cmd->convert_src);
+	err |= cfc_check_trigger_is_unique(cmd->stop_src);
+
+	/* Step 2b : and mutually compatible */
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(2))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 2 \n");
+		return -EINVAL;
+	}
+
+	/* Step 3: check if arguments are trivially valid */
+
+	if (cmd->start_src == TRIG_EXT) {
+		/* external trigger */
+		unsigned int tmp = CR_CHAN(cmd->start_arg);
+		if (tmp > 16)
+			tmp = 16;
+		tmp |= (cmd->start_arg & (CR_INVERT | CR_EDGE));
+		err |= cfc_check_trigger_arg_is(&cmd->start_arg, tmp);
+
+	} else {
+		/* true for both TRIG_NOW and TRIG_INT */
+		err |= cfc_check_trigger_arg_is(&cmd->start_arg, 0);
+	}
+
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+		err |= cfc_check_trigger_arg_min(&cmd->scan_begin_arg,
+			ni_min_ai_scan_period_ns(dev, cmd->nb_chan));
+
+		err |= cfc_check_trigger_arg_max(&cmd->scan_begin_arg,
+						 devpriv->clock_ns * 0xffffff);
+	} else if (cmd->scan_begin_src == TRIG_EXT) {
+		/* external trigger */
+		unsigned int tmp = CR_CHAN(cmd->scan_begin_arg);
+
+		if (tmp > 16)
+			tmp = 16;
+		tmp |= (cmd->scan_begin_arg & (CR_INVERT | CR_EDGE));
+		err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, tmp);
+
+	} else {		/* TRIG_OTHER */
+		err |= cfc_check_trigger_arg_is(&cmd->scan_begin_arg, 0);
+
+	}
+
+	if (cmd->convert_src == TRIG_TIMER) {
+		if ((boardtype.reg_type == ni_reg_611x)
+		    || (boardtype.reg_type == ni_reg_6143)) {
+			err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+
+		} else {
+			err |= cfc_check_trigger_arg_min(&cmd->convert_arg,
+							 boardtype.ai_speed);
+			err |= cfc_check_trigger_arg_max(&cmd->convert_arg,
+						devpriv->clock_ns * 0xffff);
+		}
+	} else if (cmd->convert_src == TRIG_EXT) {
+		/* external trigger */
+		unsigned int tmp = CR_CHAN(cmd->convert_arg);
+
+		if (tmp > 16)
+			tmp = 16;
+		tmp |= (cmd->convert_arg & (CR_ALT_FILTER | CR_INVERT));
+		err |= cfc_check_trigger_arg_is(&cmd->convert_arg, tmp);
+	} else if (cmd->convert_src == TRIG_NOW) {
+		err |= cfc_check_trigger_arg_is(&cmd->convert_arg, 0);
+	}
+
+	err |= cfc_check_trigger_arg_is(&cmd->scan_end_arg, cmd->nb_chan);
+
+	if (cmd->stop_src == TRIG_COUNT) {
+		unsigned int max_count = 0x01000000;
+
+		if (boardtype.reg_type == ni_reg_611x)
+			max_count -= num_adc_stages_611x;
+		err |= cfc_check_trigger_arg_max(&cmd->stop_arg, max_count);
+		err |= cfc_check_trigger_arg_min(&cmd->stop_arg, 1);
+
+	} else {
+		/* TRIG_NONE */
+		err |= cfc_check_trigger_arg_is(&cmd->stop_arg, 0);
+	}
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(3))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 3 \n");
+		return 3;
+	}
+
+	/* step 4: fix up any arguments */
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+		tmp = cmd->scan_begin_arg;
+		cmd->scan_begin_arg =
+		    ni_timer_to_ns(dev, ni_ns_to_timer(dev,
+						       cmd->scan_begin_arg,
+						       cmd->flags &
+						       TRIG_ROUND_MASK));
+		if (tmp != cmd->scan_begin_arg)
+			err++;
+	}
+	if (cmd->convert_src == TRIG_TIMER) {
+		if ((boardtype.reg_type != ni_reg_611x)
+		    && (boardtype.reg_type != ni_reg_6143)) {
+			tmp = cmd->convert_arg;
+			cmd->convert_arg =
+			    ni_timer_to_ns(dev, ni_ns_to_timer(dev,
+							       cmd->convert_arg,
+							       cmd->
+							       flags &
+							       TRIG_ROUND_MASK));
+			if (tmp != cmd->convert_arg)
+				err++;
+			if (cmd->scan_begin_src == TRIG_TIMER &&
+			    cmd->scan_begin_arg <
+			    cmd->convert_arg * cmd->scan_end_arg) {
+				cmd->scan_begin_arg =
+				    cmd->convert_arg * cmd->scan_end_arg;
+				err++;
+			}
+		}
+	}
+
+	if (err) {
+		if (cmd->valid_simul_stages & BIT(4))
+			return 0;
+
+		a4l_dbg(1, drv_dbg, dev, "ai_cmdtest ERR 4 \n");
+		return -EINVAL;
+	}
+
+	return 0;
+
+
+}
+
+static int ni_ai_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+	int timer;
+	int mode1 = 0;		/* mode1 is needed for both stop and convert */
+	int mode2 = 0;
+	int start_stop_select = 0;
+	unsigned int stop_count;
+	int interrupt_a_enable = 0;
+
+	a4l_info(dev, "start\n");
+
+	if (a4l_get_irq(dev) == A4L_IRQ_UNUSED) {
+		a4l_err(dev, "ni_ai_cmd: cannot run command without an irq");
+		return -EIO;
+	}
+	ni_clear_ai_fifo(dev);
+
+	ni_load_channelgain_list(dev, cmd->nb_chan, cmd->chan_descs);
+
+	/* start configuration */
+	devpriv->stc_writew(dev, AI_Configuration_Start, Joint_Reset_Register);
+
+	/* disable analog triggering for now, since it
+	 * interferes with the use of pfi0 */
+	devpriv->an_trig_etc_reg &= ~Analog_Trigger_Enable;
+	devpriv->stc_writew(dev, devpriv->an_trig_etc_reg,
+			    Analog_Trigger_Etc_Register);
+
+	switch (cmd->start_src) {
+	case TRIG_INT:
+	case TRIG_NOW:
+		devpriv->stc_writew(dev, AI_START2_Select(0) |
+				    AI_START1_Sync | AI_START1_Edge | AI_START1_Select(0),
+				    AI_Trigger_Select_Register);
+		break;
+	case TRIG_EXT:
+	{
+		int chan = CR_CHAN(cmd->start_arg);
+		unsigned int bits = AI_START2_Select(0) |
+			AI_START1_Sync | AI_START1_Select(chan + 1);
+
+		if (cmd->start_arg & CR_INVERT)
+			bits |= AI_START1_Polarity;
+		if (cmd->start_arg & CR_EDGE)
+			bits |= AI_START1_Edge;
+		devpriv->stc_writew(dev, bits,
+				    AI_Trigger_Select_Register);
+		break;
+	}
+	}
+
+	mode2 &= ~AI_Pre_Trigger;
+	mode2 &= ~AI_SC_Initial_Load_Source;
+	mode2 &= ~AI_SC_Reload_Mode;
+	devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+	if (cmd->nb_chan == 1 || (boardtype.reg_type == ni_reg_611x)
+	    || (boardtype.reg_type == ni_reg_6143)) {
+		start_stop_select |= AI_STOP_Polarity;
+		start_stop_select |= AI_STOP_Select(31);/* logic low */
+		start_stop_select |= AI_STOP_Sync;
+	} else {
+		start_stop_select |= AI_STOP_Select(19);/* ai configuration memory */
+	}
+	devpriv->stc_writew(dev, start_stop_select,
+			    AI_START_STOP_Select_Register);
+
+	devpriv->ai_cmd2 = 0;
+	switch (cmd->stop_src) {
+	case TRIG_COUNT:
+		stop_count = cmd->stop_arg - 1;
+
+		if (boardtype.reg_type == ni_reg_611x) {
+			/* have to take 3 stage adc pipeline into account */
+			stop_count += num_adc_stages_611x;
+		}
+		/* stage number of scans */
+		devpriv->stc_writel(dev, stop_count, AI_SC_Load_A_Registers);
+
+		mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Trigger_Once;
+		devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
+		/* load SC (Scan Count) */
+		devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register);
+
+		devpriv->ai_continuous = 0;
+		if (stop_count == 0) {
+			devpriv->ai_cmd2 |= AI_End_On_End_Of_Scan;
+			interrupt_a_enable |= AI_STOP_Interrupt_Enable;
+			/* this is required to get the last sample
+			   for nb_chan > 1, not sure why */
+			if (cmd->nb_chan > 1)
+				start_stop_select |=
+					AI_STOP_Polarity | AI_STOP_Edge;
+		}
+		break;
+	case TRIG_NONE:
+		/* stage number of scans */
+		devpriv->stc_writel(dev, 0, AI_SC_Load_A_Registers);
+
+		mode1 |= AI_Start_Stop | AI_Mode_1_Reserved | AI_Continuous;
+		devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
+
+		/* load SC (Scan Count) */
+		devpriv->stc_writew(dev, AI_SC_Load, AI_Command_1_Register);
+
+		devpriv->ai_continuous = 1;
+
+		break;
+	}
+
+	switch (cmd->scan_begin_src) {
+	case TRIG_TIMER:
+		/*
+		  stop bits for non 611x boards
+		  AI_SI_Special_Trigger_Delay=0
+		  AI_Pre_Trigger=0
+		  AI_START_STOP_Select_Register:
+		  AI_START_Polarity=0 (?)      rising edge
+		  AI_START_Edge=1              edge triggered
+		  AI_START_Sync=1 (?)
+		  AI_START_Select=0            SI_TC
+		  AI_STOP_Polarity=0           rising edge
+		  AI_STOP_Edge=0               level
+		  AI_STOP_Sync=1
+		  AI_STOP_Select=19            external pin (configuration mem)
+		*/
+		start_stop_select |= AI_START_Edge | AI_START_Sync;
+		devpriv->stc_writew(dev, start_stop_select,
+				    AI_START_STOP_Select_Register);
+
+		mode2 |= AI_SI_Reload_Mode(0);
+		/* AI_SI_Initial_Load_Source=A */
+		mode2 &= ~AI_SI_Initial_Load_Source;
+
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+		/* load SI */
+		timer = ni_ns_to_timer(dev, cmd->scan_begin_arg,
+				       TRIG_ROUND_NEAREST);
+		devpriv->stc_writel(dev, timer, AI_SI_Load_A_Registers);
+		devpriv->stc_writew(dev, AI_SI_Load, AI_Command_1_Register);
+		break;
+	case TRIG_EXT:
+		if (cmd->scan_begin_arg & CR_EDGE)
+			start_stop_select |= AI_START_Edge;
+		/* AI_START_Polarity==1 is falling edge */
+		if (cmd->scan_begin_arg & CR_INVERT)
+			start_stop_select |= AI_START_Polarity;
+		if (cmd->scan_begin_src != cmd->convert_src ||
+		    (cmd->scan_begin_arg & ~CR_EDGE) !=
+		    (cmd->convert_arg & ~CR_EDGE))
+			start_stop_select |= AI_START_Sync;
+		start_stop_select |=
+			AI_START_Select(1 + CR_CHAN(cmd->scan_begin_arg));
+		devpriv->stc_writew(dev, start_stop_select,
+				    AI_START_STOP_Select_Register);
+		break;
+	}
+
+	switch (cmd->convert_src) {
+	case TRIG_TIMER:
+	case TRIG_NOW:
+		if (cmd->convert_arg == 0 || cmd->convert_src == TRIG_NOW)
+			timer = 1;
+		else
+			timer = ni_ns_to_timer(dev, cmd->convert_arg,
+					       TRIG_ROUND_NEAREST);
+		devpriv->stc_writew(dev, 1, AI_SI2_Load_A_Register);	/* 0,0 does not work. */
+		devpriv->stc_writew(dev, timer, AI_SI2_Load_B_Register);
+
+		/* AI_SI2_Reload_Mode = alternate */
+		/* AI_SI2_Initial_Load_Source = A */
+		mode2 &= ~AI_SI2_Initial_Load_Source;
+		mode2 |= AI_SI2_Reload_Mode;
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+		/* AI_SI2_Load */
+		devpriv->stc_writew(dev, AI_SI2_Load, AI_Command_1_Register);
+
+		mode2 |= AI_SI2_Reload_Mode; /* alternate */
+		mode2 |= AI_SI2_Initial_Load_Source; /* B */
+
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+		break;
+	case TRIG_EXT:
+		mode1 |= AI_CONVERT_Source_Select(1 + cmd->convert_arg);
+		if ((cmd->convert_arg & CR_INVERT) == 0)
+			mode1 |= AI_CONVERT_Source_Polarity;
+		devpriv->stc_writew(dev, mode1, AI_Mode_1_Register);
+
+		mode2 |= AI_Start_Stop_Gate_Enable | AI_SC_Gate_Enable;
+		devpriv->stc_writew(dev, mode2, AI_Mode_2_Register);
+
+		break;
+	}
+
+	if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) {
+
+		/* interrupt on FIFO, errors, SC_TC */
+		interrupt_a_enable |= AI_Error_Interrupt_Enable |
+			AI_SC_TC_Interrupt_Enable;
+
+#if (!defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) && \
+     !defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		interrupt_a_enable |= AI_FIFO_Interrupt_Enable;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		if (cmd->flags & TRIG_WAKE_EOS
+		    || (devpriv->ai_cmd2 & AI_End_On_End_Of_Scan)) {
+			/* wake on end-of-scan */
+			devpriv->aimode = AIMODE_SCAN;
+		} else {
+			devpriv->aimode = AIMODE_HALF_FULL;
+		}
+
+		switch (devpriv->aimode) {
+		case AIMODE_HALF_FULL:
+			/* generate FIFO interrupts and DMA requests on half-full */
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+			devpriv->stc_writew(dev, AI_FIFO_Mode_HF_to_E,
+					    AI_Mode_3_Register);
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			devpriv->stc_writew(dev, AI_FIFO_Mode_HF,
+					    AI_Mode_3_Register);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			break;
+		case AIMODE_SAMPLE:
+			/* generate FIFO interrupts on non-empty */
+			devpriv->stc_writew(dev, AI_FIFO_Mode_NE,
+					    AI_Mode_3_Register);
+			break;
+		case AIMODE_SCAN:
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+			devpriv->stc_writew(dev, AI_FIFO_Mode_NE,
+					    AI_Mode_3_Register);
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			devpriv->stc_writew(dev, AI_FIFO_Mode_HF,
+					    AI_Mode_3_Register);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+			interrupt_a_enable |= AI_STOP_Interrupt_Enable;
+			break;
+		default:
+			break;
+		}
+
+		/* Clear interrupts */
+		devpriv->stc_writew(dev,
+				    AI_Error_Interrupt_Ack | AI_STOP_Interrupt_Ack |
+				    AI_START_Interrupt_Ack | AI_START2_Interrupt_Ack |
+				    AI_START1_Interrupt_Ack | AI_SC_TC_Interrupt_Ack |
+				    AI_SC_TC_Error_Confirm, Interrupt_A_Ack_Register);	/* clear interrupts */
+
+		ni_set_bits(dev, Interrupt_A_Enable_Register,
+			    interrupt_a_enable, 1);
+
+		a4l_info(dev, "Interrupt_A_Enable_Register = 0x%04x\n",
+			 devpriv->int_a_enable_reg);
+	} else {
+		/* interrupt on nothing */
+		ni_set_bits(dev, Interrupt_A_Enable_Register, ~0, 0);
+
+		/* XXX start polling if necessary */
+		a4l_warn(dev, "ni_ai_cmd: interrupting on nothing\n");
+	}
+
+	/* end configuration */
+	devpriv->stc_writew(dev, AI_Configuration_End, Joint_Reset_Register);
+
+	switch (cmd->scan_begin_src) {
+	case TRIG_TIMER:
+		devpriv->stc_writew(dev,
+				    AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm,
+				    AI_Command_1_Register);
+		break;
+	case TRIG_EXT:
+		/* XXX AI_SI_Arm? */
+		devpriv->stc_writew(dev,
+				    AI_SI2_Arm | AI_SI_Arm | AI_DIV_Arm | AI_SC_Arm,
+				    AI_Command_1_Register);
+		break;
+	}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	{
+		int retval = ni_ai_setup_MITE_dma(subd);
+		if (retval)
+			return retval;
+	}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	switch (cmd->start_src) {
+	case TRIG_NOW:
+		/* AI_START1_Pulse */
+		devpriv->stc_writew(dev, AI_START1_Pulse | devpriv->ai_cmd2,
+				    AI_Command_2_Register);
+		break;
+	case TRIG_EXT:
+		/* TODO: set trigger callback field to NULL */
+		break;
+	case TRIG_INT:
+		/* TODO: set trigger callback field to ni_ai_inttrig */
+		break;
+	}
+
+	a4l_info(dev, "exit\n");
+
+	return 0;
+}
+
+int ni_ai_config_analog_trig(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int a, b, modebits;
+	int err = 0;
+	uint32_t *data = (uint32_t *)insn->data;
+
+	/* data[1] is flags
+	 * data[2] is analog line
+	 * data[3] is set level
+	 * data[4] is reset level */
+	if (!boardtype.has_analog_trig)
+		return -EINVAL;
+
+	if ((data[1] & 0xffff0000) != A4L_EV_SCAN_BEGIN) {
+		data[1] &= (A4L_EV_SCAN_BEGIN | 0xffff);
+		err++;
+	}
+	if (data[2] >= boardtype.n_adchan) {
+		data[2] = boardtype.n_adchan - 1;
+		err++;
+	}
+	if (data[3] > 255) {	/* a */
+		data[3] = 255;
+		err++;
+	}
+	if (data[4] > 255) {	/* b */
+		data[4] = 255;
+		err++;
+	}
+	/*
+	 * 00 ignore
+	 * 01 set
+	 * 10 reset
+	 *
+	 * modes:
+	 *   1 level:                    +b-   +a-
+	 *     high mode                00 00 01 10
+	 *     low mode                 00 00 10 01
+	 *   2 level: (a<b)
+	 *     hysteresis low mode      10 00 00 01
+	 *     hysteresis high mode     01 00 00 10
+	 *     middle mode              10 01 01 10
+	 */
+
+	a = data[3];
+	b = data[4];
+	modebits = data[1] & 0xff;
+	if (modebits & 0xf0) {
+		/* two level mode */
+		if (b < a) {
+			/* swap order */
+			a = data[4];
+			b = data[3];
+			modebits = ((data[1] & 0xf) << 4) |
+				((data[1] & 0xf0) >> 4);
+		}
+		devpriv->atrig_low = a;
+		devpriv->atrig_high = b;
+		switch (modebits) {
+		case 0x81:	/* low hysteresis mode */
+			devpriv->atrig_mode = 6;
+			break;
+		case 0x42:	/* high hysteresis mode */
+			devpriv->atrig_mode = 3;
+			break;
+		case 0x96:	/* middle window mode */
+			devpriv->atrig_mode = 2;
+			break;
+		default:
+			data[1] &= ~0xff;
+			err++;
+		}
+	} else {
+		/* one level mode */
+		if (b != 0) {
+			data[4] = 0;
+			err++;
+		}
+		switch (modebits) {
+		case 0x06:	/* high window mode */
+			devpriv->atrig_high = a;
+			devpriv->atrig_mode = 0;
+			break;
+		case 0x09:	/* low window mode */
+			devpriv->atrig_low = a;
+			devpriv->atrig_mode = 1;
+			break;
+		default:
+			data[1] &= ~0xff;
+			err++;
+		}
+	}
+
+	if (err)
+		return -EAGAIN;
+
+	return 0;
+}
+
+int ni_ai_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	if (insn->data_size < sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_ANALOG_TRIG:
+		return ni_ai_config_analog_trig(subd, insn);
+	case A4L_INSN_CONFIG_ALT_SOURCE:
+		if (boardtype.reg_type & ni_reg_m_series_mask) {
+			if (data[1] & ~(MSeries_AI_Bypass_Cal_Sel_Pos_Mask |
+					MSeries_AI_Bypass_Cal_Sel_Neg_Mask |
+					MSeries_AI_Bypass_Mode_Mux_Mask |
+					MSeries_AO_Bypass_AO_Cal_Sel_Mask)) {
+				return -EINVAL;
+			}
+			devpriv->ai_calib_source = data[1];
+		} else if (boardtype.reg_type == ni_reg_6143) {
+			unsigned int calib_source;
+
+			calib_source = data[1] & 0xf;
+
+			if (calib_source > 0xF)
+				return -EINVAL;
+
+			devpriv->ai_calib_source = calib_source;
+			ni_writew(calib_source, Calibration_Channel_6143);
+		} else {
+			unsigned int calib_source;
+			unsigned int calib_source_adjust;
+
+			calib_source = data[1] & 0xf;
+			calib_source_adjust = (data[1] >> 4) & 0xff;
+
+			if (calib_source >= 8)
+				return -EINVAL;
+			devpriv->ai_calib_source = calib_source;
+			if (boardtype.reg_type == ni_reg_611x) {
+				ni_writeb(calib_source_adjust,
+					  Cal_Gain_Select_611x);
+			}
+		}
+		return 0;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+/* munge data from unsigned to 2's complement for analog output bipolar modes */
+static void ni_ao_munge(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	struct a4l_device *dev = subd->dev;
+	struct a4l_cmd_desc *cmd = a4l_get_cmd(subd);
+	int chan_idx = a4l_get_chan(subd);
+	uint16_t *array = buf;
+	unsigned int i, range, offset;
+
+	offset = 1 << (boardtype.aobits - 1);
+	for (i = 0; i < size / sizeof(uint16_t); i++) {
+
+		range = CR_RNG(cmd->chan_descs[chan_idx]);
+		if (boardtype.ao_unipolar == 0 || (range & 1) == 0)
+			array[i] -= offset;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+		array[i] = cpu_to_le16(array[i]);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		chan_idx++;
+		chan_idx %= cmd->nb_chan;
+	}
+}
+
+static int ni_m_series_ao_config_chan_descs(struct a4l_subdevice *subd,
+					    unsigned int chanspec[],
+					    unsigned int n_chans, int timed)
+{
+	unsigned int range;
+	unsigned int chan;
+	unsigned int conf;
+	int i, invert = 0;
+	struct a4l_device *dev = subd->dev;
+
+	for (i = 0; i < boardtype.n_aochan; ++i) {
+		ni_writeb(0xf, M_Offset_AO_Waveform_Order(i));
+	}
+	for (i = 0; i < n_chans; i++) {
+		struct a4l_range *rng;
+		int idx;
+		chan = CR_CHAN(chanspec[i]);
+		range = CR_RNG(chanspec[i]);
+
+		/* TODO: this a huge hack!
+		   Something is missing in the kernel API. We must
+		   allow access on the proper range descriptor */
+		idx =  (subd->rng_desc->mode !=
+			A4L_RNG_GLOBAL_RNGDESC) ? chan : 0;
+		rng = &(subd->rng_desc->rngtabs[idx]->rngs[range]);
+
+		invert = 0;
+		conf = 0;
+		switch (rng->max - rng->min) {
+		case 20000000:
+			conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits;
+			ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		case 10000000:
+			conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits;
+			ni_writeb(0, M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		case 4000000:
+			conf |= MSeries_AO_DAC_Reference_10V_Internal_Bits;
+			ni_writeb(MSeries_Attenuate_x5_Bit,
+				  M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		case 2000000:
+			conf |= MSeries_AO_DAC_Reference_5V_Internal_Bits;
+			ni_writeb(MSeries_Attenuate_x5_Bit,
+				  M_Offset_AO_Reference_Attenuation(chan));
+			break;
+		default:
+			a4l_err(subd->dev,
+				"%s: bug! unhandled ao reference voltage\n",
+				__FUNCTION__);
+			break;
+		}
+		switch (rng->max + rng->min) {
+		case 0:
+			conf |= MSeries_AO_DAC_Offset_0V_Bits;
+			break;
+		case 10000000:
+			conf |= MSeries_AO_DAC_Offset_5V_Bits;
+			break;
+		default:
+			a4l_err(subd->dev,
+				"%s: bug! unhandled ao offset voltage\n",
+				__FUNCTION__);
+			break;
+		}
+		if (timed)
+			conf |= MSeries_AO_Update_Timed_Bit;
+		ni_writeb(conf, M_Offset_AO_Config_Bank(chan));
+		devpriv->ao_conf[chan] = conf;
+		ni_writeb(i, M_Offset_AO_Waveform_Order(chan));
+	}
+	return invert;
+}
+
+static int ni_old_ao_config_chan_descs(struct a4l_subdevice *subd,
+				       unsigned int chanspec[],
+				       unsigned int n_chans)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int range;
+	unsigned int chan;
+	unsigned int conf;
+	int i, invert = 0;
+
+	for (i = 0; i < n_chans; i++) {
+		chan = CR_CHAN(chanspec[i]);
+		range = CR_RNG(chanspec[i]);
+		conf = AO_Channel(chan);
+
+		if (boardtype.ao_unipolar) {
+			if ((range & 1) == 0) {
+				conf |= AO_Bipolar;
+				invert = (1 << (boardtype.aobits - 1));
+			} else {
+				invert = 0;
+			}
+			if (range & 2)
+				conf |= AO_Ext_Ref;
+		} else {
+			conf |= AO_Bipolar;
+			invert = (1 << (boardtype.aobits - 1));
+		}
+
+		/* not all boards can deglitch, but this shouldn't hurt */
+		if (chanspec[i] & CR_DEGLITCH)
+			conf |= AO_Deglitch;
+
+		/* analog reference */
+		/* AREF_OTHER connects AO ground to AI ground, i think */
+		conf |= (CR_AREF(chanspec[i]) ==
+			 AREF_OTHER) ? AO_Ground_Ref : 0;
+
+		ni_writew(conf, AO_Configuration);
+		devpriv->ao_conf[chan] = conf;
+	}
+	return invert;
+}
+
+static int ni_ao_config_chan_descs(struct a4l_subdevice *subd,
+				   unsigned int chanspec[],
+				   unsigned int n_chans, int timed)
+{
+	struct a4l_device *dev = subd->dev;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return ni_m_series_ao_config_chan_descs(subd,
+							chanspec,
+							n_chans, timed);
+	else
+		return ni_old_ao_config_chan_descs(subd, chanspec, n_chans);
+}
+
+int ni_ao_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	data[0] = devpriv->ao[CR_CHAN(insn->chan_desc)];
+
+	return 0;
+}
+
+int ni_ao_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int chan = CR_CHAN(insn->chan_desc);
+	uint16_t *data = (uint16_t *)insn->data;
+	unsigned int invert;
+
+	invert = ni_ao_config_chan_descs(subd,
+					 &insn->chan_desc, 1, 0);
+
+	devpriv->ao[chan] = data[0];
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		ni_writew(data[0], M_Offset_DAC_Direct_Data(chan));
+	} else
+		ni_writew(data[0] ^ invert,
+			  (chan) ? DAC1_Direct_Data : DAC0_Direct_Data);
+
+	return 0;
+}
+
+int ni_ao_insn_write_671x(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int chan = CR_CHAN(insn->chan_desc);
+	uint16_t *data = (uint16_t *)insn->data;
+	unsigned int invert;
+
+	ao_win_out(1 << chan, AO_Immediate_671x);
+	invert = 1 << (boardtype.aobits - 1);
+
+	ni_ao_config_chan_descs(subd, &insn->chan_desc, 1, 0);
+
+	devpriv->ao[chan] = data[0];
+	ao_win_out(data[0] ^ invert, DACx_Direct_Data_671x(chan));
+
+	return 0;
+}
+
+int ni_ao_inttrig(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct a4l_device *dev = subd->dev;
+	int ret, interrupt_b_bits, i;
+	static const int timeout = 1000;
+
+	if (trignum != 0)
+		return -EINVAL;
+
+	/* TODO: disable trigger until a command is recorded.
+	   Null trig at beginning prevent ao start trigger from executing
+	   more than once per command (and doing things like trying to
+	   allocate the ao dma channel multiple times) */
+
+	ni_set_bits(dev, Interrupt_B_Enable_Register,
+		    AO_FIFO_Interrupt_Enable | AO_Error_Interrupt_Enable, 0);
+	interrupt_b_bits = AO_Error_Interrupt_Enable;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	devpriv->stc_writew(dev, 1, DAC_FIFO_Clear);
+	if (boardtype.reg_type & ni_reg_6xxx_mask)
+		ni_ao_win_outl(dev, 0x6, AO_FIFO_Offset_Load_611x);
+	ret = ni_ao_setup_MITE_dma(subd);
+	if (ret)
+		return ret;
+	ret = ni_ao_wait_for_dma_load(subd);
+	if (ret < 0)
+		return ret;
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	ret = ni_ao_prep_fifo(subd);
+	if (ret == 0)
+		return -EPIPE;
+
+	interrupt_b_bits |= AO_FIFO_Interrupt_Enable;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+	devpriv->stc_writew(dev, devpriv->ao_mode3 | AO_Not_An_UPDATE,
+			    AO_Mode_3_Register);
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+	/* wait for DACs to be loaded */
+	for (i = 0; i < timeout; i++) {
+		a4l_udelay(1);
+		if ((devpriv->stc_readw(dev,Joint_Status_2_Register) &
+		     AO_TMRDACWRs_In_Progress_St) == 0)
+			break;
+	}
+	if (i == timeout) {
+		a4l_err(dev,
+			"ni_ao_inttrig: timed out "
+			"waiting for AO_TMRDACWRs_In_Progress_St to clear");
+		return -EIO;
+	}
+	/* stc manual says we are need to clear error interrupt after
+	   AO_TMRDACWRs_In_Progress_St clears */
+	devpriv->stc_writew(dev, AO_Error_Interrupt_Ack,
+			    Interrupt_B_Ack_Register);
+
+	ni_set_bits(dev, Interrupt_B_Enable_Register, interrupt_b_bits, 1);
+
+	devpriv->stc_writew(dev,
+			    devpriv->ao_cmd1 |
+			    AO_UI_Arm | AO_UC_Arm |
+			    AO_BC_Arm | AO_DAC1_Update_Mode |
+			    AO_DAC0_Update_Mode,
+			    AO_Command_1_Register);
+
+	devpriv->stc_writew(dev,
+			    devpriv->ao_cmd2 | AO_START1_Pulse,
+			    AO_Command_2_Register);
+
+	return 0;
+}
+
+int ni_ao_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	int bits;
+	int i;
+	unsigned trigvar;
+
+	if (a4l_get_irq(dev) == A4L_IRQ_UNUSED) {
+		a4l_err(dev, "ni_ao_cmd: cannot run command without an irq");
+		return -EIO;
+	}
+
+	devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register);
+
+	devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register);
+
+	if (boardtype.reg_type & ni_reg_6xxx_mask) {
+		ao_win_out(CLEAR_WG, AO_Misc_611x);
+
+		bits = 0;
+		for (i = 0; i < cmd->nb_chan; i++) {
+			int chan;
+
+			chan = CR_CHAN(cmd->chan_descs[i]);
+			bits |= 1 << chan;
+			ao_win_out(chan, AO_Waveform_Generation_611x);
+		}
+		ao_win_out(bits, AO_Timed_611x);
+	}
+
+	ni_ao_config_chan_descs(subd, cmd->chan_descs, cmd->nb_chan, 1);
+
+	if (cmd->stop_src == TRIG_NONE) {
+		devpriv->ao_mode1 |= AO_Continuous;
+		devpriv->ao_mode1 &= ~AO_Trigger_Once;
+	} else {
+		devpriv->ao_mode1 &= ~AO_Continuous;
+		devpriv->ao_mode1 |= AO_Trigger_Once;
+	}
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_trigger_select &=
+		~(AO_START1_Polarity | AO_START1_Select(-1));
+	devpriv->ao_trigger_select |= AO_START1_Edge | AO_START1_Sync;
+	devpriv->stc_writew(dev, devpriv->ao_trigger_select,
+			    AO_Trigger_Select_Register);
+	devpriv->ao_mode3 &= ~AO_Trigger_Length;
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_mode2 &= ~AO_BC_Initial_Load_Source;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+	if (cmd->stop_src == TRIG_NONE) {
+		devpriv->stc_writel(dev, 0xffffff, AO_BC_Load_A_Register);
+	} else {
+		devpriv->stc_writel(dev, 0, AO_BC_Load_A_Register);
+	}
+	devpriv->stc_writew(dev, AO_BC_Load, AO_Command_1_Register);
+	devpriv->ao_mode2 &= ~AO_UC_Initial_Load_Source;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+	switch (cmd->stop_src) {
+	case TRIG_COUNT:
+		devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, cmd->stop_arg - 1,
+				    AO_UC_Load_A_Register);
+		break;
+	case TRIG_NONE:
+		devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, 0xffffff, AO_UC_Load_A_Register);
+		break;
+	default:
+		devpriv->stc_writel(dev, 0, AO_UC_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UC_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, cmd->stop_arg, AO_UC_Load_A_Register);
+	}
+
+	devpriv->ao_mode1 &=
+		~(AO_UI_Source_Select(0x1f) | AO_UI_Source_Polarity |
+		  AO_UPDATE_Source_Select(0x1f) | AO_UPDATE_Source_Polarity);
+	switch (cmd->scan_begin_src) {
+	case TRIG_TIMER:
+		devpriv->ao_cmd2 &= ~AO_BC_Gate_Enable;
+		trigvar =
+			ni_ns_to_timer(dev, cmd->scan_begin_arg,
+				       TRIG_ROUND_NEAREST);
+		devpriv->stc_writel(dev, 1, AO_UI_Load_A_Register);
+		devpriv->stc_writew(dev, AO_UI_Load, AO_Command_1_Register);
+		devpriv->stc_writel(dev, trigvar, AO_UI_Load_A_Register);
+		break;
+	case TRIG_EXT:
+		devpriv->ao_mode1 |=
+			AO_UPDATE_Source_Select(cmd->scan_begin_arg);
+		if (cmd->scan_begin_arg & CR_INVERT)
+			devpriv->ao_mode1 |= AO_UPDATE_Source_Polarity;
+		devpriv->ao_cmd2 |= AO_BC_Gate_Enable;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register);
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_mode2 &=
+		~(AO_UI_Reload_Mode(3) | AO_UI_Initial_Load_Source);
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+
+	if ((boardtype.reg_type & ni_reg_6xxx_mask) == 0) {
+		if (cmd->scan_end_arg > 1) {
+			devpriv->ao_mode1 |= AO_Multiple_Channels;
+			devpriv->stc_writew(dev,
+					    AO_Number_Of_Channels(cmd->scan_end_arg - 1) |
+					    AO_UPDATE_Output_Select
+					    (AO_Update_Output_High_Z),
+					    AO_Output_Control_Register);
+		} else {
+			unsigned int bits;
+			devpriv->ao_mode1 &= ~AO_Multiple_Channels;
+			bits = AO_UPDATE_Output_Select(AO_Update_Output_High_Z);
+			if (boardtype.reg_type & ni_reg_m_series_mask) {
+				bits |= AO_Number_Of_Channels(0);
+			} else {
+				bits |= AO_Number_Of_Channels(CR_CHAN(cmd->
+								      chan_descs[0]));
+			}
+			devpriv->stc_writew(dev, bits,
+					    AO_Output_Control_Register);
+		}
+		devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	}
+
+	devpriv->stc_writew(dev, AO_DAC0_Update_Mode | AO_DAC1_Update_Mode,
+			    AO_Command_1_Register);
+
+	devpriv->ao_mode3 |= AO_Stop_On_Overrun_Error;
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+
+	devpriv->ao_mode2 &= ~AO_FIFO_Mode_Mask;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	devpriv->ao_mode2 |= AO_FIFO_Mode_HF_to_F;
+#else /* !CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	devpriv->ao_mode2 |= AO_FIFO_Mode_HF;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+	devpriv->ao_mode2 &= ~AO_FIFO_Retransmit_Enable;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+
+	bits = AO_BC_Source_Select | AO_UPDATE_Pulse_Width |
+		AO_TMRDACWR_Pulse_Width;
+	if (boardtype.ao_fifo_depth)
+		bits |= AO_FIFO_Enable;
+	else
+		bits |= AO_DMA_PIO_Control;
+#if 0
+	/* F Hess: windows driver does not set AO_Number_Of_DAC_Packages bit for 6281,
+	   verified with bus analyzer. */
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		bits |= AO_Number_Of_DAC_Packages;
+#endif
+	devpriv->stc_writew(dev, bits, AO_Personal_Register);
+	/* enable sending of ao dma requests */
+	devpriv->stc_writew(dev, AO_AOFREQ_Enable, AO_Start_Select_Register);
+
+	devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register);
+
+	if (cmd->stop_src == TRIG_COUNT) {
+		devpriv->stc_writew(dev, AO_BC_TC_Interrupt_Ack,
+				    Interrupt_B_Ack_Register);
+		ni_set_bits(dev, Interrupt_B_Enable_Register,
+			    AO_BC_TC_Interrupt_Enable, 1);
+	}
+
+	return 0;
+}
+
+struct a4l_cmd_desc mio_ao_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_INT,
+	.scan_begin_src = TRIG_TIMER | TRIG_EXT,
+	.convert_src = TRIG_NOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+int ni_ao_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	/* Make sure trigger sources are unique and mutually compatible */
+
+	if (cmd->stop_src != TRIG_COUNT && cmd->stop_src != TRIG_NONE)
+		return -EINVAL;
+
+	/* Make sure arguments are trivially compatible */
+
+	if (cmd->start_arg != 0) {
+		cmd->start_arg = 0;
+		return -EINVAL;
+	}
+
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+		if (cmd->scan_begin_arg < boardtype.ao_speed) {
+			cmd->scan_begin_arg = boardtype.ao_speed;
+			return -EINVAL;
+		}
+		if (cmd->scan_begin_arg > devpriv->clock_ns * 0xffffff) {
+			/* XXX check */
+			cmd->scan_begin_arg = devpriv->clock_ns * 0xffffff;
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->convert_arg != 0) {
+		cmd->convert_arg = 0;
+		return -EINVAL;
+	}
+	if (cmd->scan_end_arg != cmd->nb_chan) {
+		cmd->scan_end_arg = cmd->nb_chan;
+		return -EINVAL;
+	}
+	if (cmd->stop_src == TRIG_COUNT) {
+		/* XXX check */
+		if (cmd->stop_arg > 0x00ffffff) {
+			cmd->stop_arg = 0x00ffffff;
+			return -EINVAL;
+		}
+	} else {
+		/* TRIG_NONE */
+		if (cmd->stop_arg != 0) {
+			cmd->stop_arg = 0;
+			return -EINVAL;
+		}
+	}
+
+	/* step 4: fix up any arguments */
+	if (cmd->scan_begin_src == TRIG_TIMER) {
+
+		if(cmd->scan_begin_arg !=
+		   ni_timer_to_ns(dev,
+				  ni_ns_to_timer(dev,
+						 cmd->scan_begin_arg,
+						 cmd->flags & TRIG_ROUND_MASK)))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+void ni_ao_reset(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+
+	ni_release_ao_mite_channel(dev);
+
+	devpriv->stc_writew(dev, AO_Configuration_Start, Joint_Reset_Register);
+	devpriv->stc_writew(dev, AO_Disarm, AO_Command_1_Register);
+	ni_set_bits(dev, Interrupt_B_Enable_Register, ~0, 0);
+	devpriv->stc_writew(dev, AO_BC_Source_Select, AO_Personal_Register);
+	devpriv->stc_writew(dev, 0x3f98, Interrupt_B_Ack_Register);
+	devpriv->stc_writew(dev, AO_BC_Source_Select | AO_UPDATE_Pulse_Width |
+			    AO_TMRDACWR_Pulse_Width, AO_Personal_Register);
+	devpriv->stc_writew(dev, 0, AO_Output_Control_Register);
+	devpriv->stc_writew(dev, 0, AO_Start_Select_Register);
+	devpriv->ao_cmd1 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_cmd1, AO_Command_1_Register);
+	devpriv->ao_cmd2 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_cmd2, AO_Command_2_Register);
+	devpriv->ao_mode1 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_mode1, AO_Mode_1_Register);
+	devpriv->ao_mode2 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_mode2, AO_Mode_2_Register);
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		devpriv->ao_mode3 = AO_Last_Gate_Disable;
+	else
+		devpriv->ao_mode3 = 0;
+	devpriv->stc_writew(dev, devpriv->ao_mode3, AO_Mode_3_Register);
+	devpriv->ao_trigger_select = 0;
+	devpriv->stc_writew(dev, devpriv->ao_trigger_select,
+			    AO_Trigger_Select_Register);
+	if (boardtype.reg_type & ni_reg_6xxx_mask) {
+		ao_win_out(0x3, AO_Immediate_671x);
+		ao_win_out(CLEAR_WG, AO_Misc_611x);
+	}
+	devpriv->stc_writew(dev, AO_Configuration_End, Joint_Reset_Register);
+}
+
+/* digital io */
+
+int ni_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "chan=%d io=%d\n", CR_CHAN(insn->chan_desc), data[0]);
+#endif /* CONFIG_DEBUG_DIO */
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		devpriv->io_bits |= 1 << CR_CHAN(insn->chan_desc);
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		devpriv->io_bits &= ~(1 << CR_CHAN(insn->chan_desc));
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (devpriv->io_bits &
+			   (1 << CR_CHAN(insn->chan_desc))) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	devpriv->dio_control &= ~DIO_Pins_Dir_Mask;
+	devpriv->dio_control |= DIO_Pins_Dir(devpriv->io_bits);
+	devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
+
+	return 1;
+}
+
+int ni_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]);
+#endif
+
+	if (insn->data_size != 2 * sizeof(uint8_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		/* Perform check to make sure we're not using the
+		   serial part of the dio */
+		if ((data[0] & (DIO_SDIN | DIO_SDOUT))
+		    && devpriv->serial_interval_ns)
+			return -EBUSY;
+
+		devpriv->dio_state &= ~data[0];
+		devpriv->dio_state |= (data[0] & data[1]);
+		devpriv->dio_output &= ~DIO_Parallel_Data_Mask;
+		devpriv->dio_output |=
+			DIO_Parallel_Data_Out(devpriv->dio_state);
+		devpriv->stc_writew(dev, devpriv->dio_output,
+				    DIO_Output_Register);
+	}
+
+	data[1] = (uint8_t)
+		devpriv->stc_readw(dev, DIO_Parallel_Input_Register);
+
+	return 0;
+}
+
+int ni_m_series_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "chan=%d io=%d\n", CR_CHAN(insn->chan_desc), data[0]);
+#endif
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		devpriv->io_bits |= 1 << CR_CHAN(insn->chan_desc);
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		devpriv->io_bits &= ~(1 << CR_CHAN(insn->chan_desc));
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (devpriv->io_bits &
+			   (1 << CR_CHAN(insn->chan_desc))) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	ni_writel(devpriv->io_bits, M_Offset_DIO_Direction);
+
+	return 0;
+}
+
+int ni_m_series_dio_insn_bits_8(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]);
+#endif
+
+	if (insn->data_size != 2 * sizeof(uint8_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		devpriv->dio_state &= ~data[0];
+		devpriv->dio_state |= (data[0] & data[1]);
+		ni_writel(devpriv->dio_state, M_Offset_Static_Digital_Output);
+	}
+
+	data[1] = (uint8_t) ni_readl(M_Offset_Static_Digital_Input);
+
+	return 0;
+}
+
+int ni_m_series_dio_insn_bits_32(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint32_t *data = (uint32_t *)insn->data;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "mask=0x%x bits=0x%x\n", data[0], data[1]);
+#endif
+
+	if (insn->data_size != 2 * sizeof(uint32_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		devpriv->dio_state &= ~data[0];
+		devpriv->dio_state |= (data[0] & data[1]);
+		ni_writel(devpriv->dio_state, M_Offset_Static_Digital_Output);
+	}
+
+	data[1] = ni_readl(M_Offset_Static_Digital_Input);
+
+	return 0;
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+struct a4l_cmd_desc mio_dio_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_INT,
+	.scan_begin_src = TRIG_EXT,
+	.convert_src = TRIG_NOW,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+int ni_cdio_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	unsigned int i;
+
+	/* Make sure arguments are trivially compatible */
+
+	if (cmd->start_arg != 0) {
+		cmd->start_arg = 0;
+		return -EINVAL;
+	}
+
+	if ((cmd->scan_begin_arg &
+	     PACK_FLAGS(CDO_Sample_Source_Select_Mask, 0, 0, CR_INVERT)) !=
+	    cmd->scan_begin_arg)
+		return -EINVAL;
+
+	if (cmd->convert_arg != 0) {
+		cmd->convert_arg = 0;
+		return -EINVAL;
+	}
+
+	if (cmd->scan_end_arg != cmd->nb_chan) {
+		cmd->scan_end_arg = cmd->nb_chan;
+		return -EINVAL;
+	}
+
+	if (cmd->stop_arg != 0) {
+		cmd->stop_arg = 0;
+		return -EINVAL;
+	}
+
+	/* Check chan_descs */
+
+	for (i = 0; i < cmd->nb_chan; ++i) {
+		if (cmd->chan_descs[i] != i)
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+int ni_cdio_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned cdo_mode_bits = CDO_FIFO_Mode_Bit | CDO_Halt_On_Error_Bit;
+
+	ni_writel(CDO_Reset_Bit, M_Offset_CDIO_Command);
+	switch (cmd->scan_begin_src) {
+	case TRIG_EXT:
+		cdo_mode_bits |=
+			CR_CHAN(cmd->scan_begin_arg) &
+			CDO_Sample_Source_Select_Mask;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	if (cmd->scan_begin_arg & CR_INVERT)
+		cdo_mode_bits |= CDO_Polarity_Bit;
+	ni_writel(cdo_mode_bits, M_Offset_CDO_Mode);
+
+	if (devpriv->io_bits) {
+		ni_writel(devpriv->dio_state, M_Offset_CDO_FIFO_Data);
+		ni_writel(CDO_SW_Update_Bit, M_Offset_CDIO_Command);
+		ni_writel(devpriv->io_bits, M_Offset_CDO_Mask_Enable);
+	} else {
+		a4l_err(dev,
+			"ni_cdio_cmd: attempted to run digital "
+			"output command with no lines configured as outputs");
+		return -EIO;
+	}
+
+	return 0;
+}
+
+void ni_cdio_cancel(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	ni_writel(CDO_Disarm_Bit | CDO_Error_Interrupt_Enable_Clear_Bit |
+		  CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit |
+		  CDO_FIFO_Request_Interrupt_Enable_Clear_Bit,
+		  M_Offset_CDIO_Command);
+
+	ni_writel(0, M_Offset_CDO_Mask_Enable);
+	ni_release_cdo_mite_channel(dev);
+}
+
+int ni_cdo_inttrig(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct a4l_device *dev = subd->dev;
+	int err;
+	unsigned i;
+	const unsigned timeout = 1000;
+
+	/* TODO: disable trigger until a command is recorded.
+	   Null trig at beginning prevent ao start trigger from executing
+	   more than once per command (and doing things like trying to
+	   allocate the ao dma channel multiple times) */
+
+	err = ni_cdo_setup_MITE_dma(subd);
+	if (err < 0)
+		return err;
+
+	/* wait for dma to fill output fifo */
+	for (i = 0; i < timeout; ++i) {
+		if (ni_readl(M_Offset_CDIO_Status) & CDO_FIFO_Full_Bit)
+			break;
+		a4l_udelay(10);
+	}
+
+	if (i == timeout) {
+		a4l_err(dev, "ni_cdo_inttrig: dma failed to fill cdo fifo!");
+		ni_cdio_cancel(subd);
+		return -EIO;
+	}
+
+	ni_writel(CDO_Arm_Bit |
+		  CDO_Error_Interrupt_Enable_Set_Bit |
+		  CDO_Empty_FIFO_Interrupt_Enable_Set_Bit,
+		  M_Offset_CDIO_Command);
+
+	return 0;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static void handle_cdio_interrupt(struct a4l_device *dev)
+{
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+	unsigned cdio_status;
+	unsigned long flags;
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_DIO_SUBDEV);
+
+	if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) {
+		return;
+	}
+	rtdm_lock_get_irqsave(&devpriv->mite_channel_lock, flags);
+	if (devpriv->cdo_mite_chan) {
+		unsigned cdo_mite_status =
+			a4l_mite_get_status(devpriv->cdo_mite_chan);
+		if (cdo_mite_status & CHSR_LINKC) {
+			writel(CHOR_CLRLC,
+			       devpriv->mite->mite_io_addr +
+			       MITE_CHOR(devpriv->cdo_mite_chan->channel));
+		}
+		a4l_mite_sync_output_dma(devpriv->cdo_mite_chan, subd);
+	}
+	rtdm_lock_put_irqrestore(&devpriv->mite_channel_lock, flags);
+
+	cdio_status = ni_readl(M_Offset_CDIO_Status);
+	if (cdio_status & (CDO_Overrun_Bit | CDO_Underflow_Bit)) {
+		/* XXX just guessing this is needed and does something useful */
+		ni_writel(CDO_Error_Interrupt_Confirm_Bit, M_Offset_CDIO_Command);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+	if (cdio_status & CDO_FIFO_Empty_Bit) {
+		ni_writel(CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit,
+			  M_Offset_CDIO_Command);
+	}
+	a4l_buf_evt(subd, 0);
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+}
+
+static int ni_serial_hw_readwrite8(struct a4l_device * dev,
+				   unsigned char data_out, unsigned char *data_in)
+{
+	unsigned int status1;
+	int err = 0, count = 20;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "outputting 0x%x\n", data_out);
+#endif
+
+	devpriv->dio_output &= ~DIO_Serial_Data_Mask;
+	devpriv->dio_output |= DIO_Serial_Data_Out(data_out);
+	devpriv->stc_writew(dev, devpriv->dio_output, DIO_Output_Register);
+
+	status1 = devpriv->stc_readw(dev, Joint_Status_1_Register);
+	if (status1 & DIO_Serial_IO_In_Progress_St) {
+		err = -EBUSY;
+		goto Error;
+	}
+
+	devpriv->dio_control |= DIO_HW_Serial_Start;
+	devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
+	devpriv->dio_control &= ~DIO_HW_Serial_Start;
+
+	/* Wait until STC says we're done, but don't loop infinitely. */
+	while ((status1 =
+		devpriv->stc_readw(dev,
+				   Joint_Status_1_Register)) &
+	       DIO_Serial_IO_In_Progress_St) {
+		/* Delay one bit per loop */
+		a4l_udelay((devpriv->serial_interval_ns + 999) / 1000);
+		if (--count < 0) {
+			a4l_err(dev,
+				"ni_serial_hw_readwrite8: "
+				"SPI serial I/O didn't finish in time!\n");
+			err = -ETIME;
+			goto Error;
+		}
+	}
+
+	/* Delay for last bit. This delay is absolutely necessary, because
+	   DIO_Serial_IO_In_Progress_St goes high one bit too early. */
+	a4l_udelay((devpriv->serial_interval_ns + 999) / 1000);
+
+	if (data_in != NULL) {
+		*data_in = devpriv->stc_readw(dev, DIO_Serial_Input_Register);
+#ifdef CONFIG_DEBUG_DIO
+		a4l_info(dev, "inputted 0x%x\n", *data_in);
+#endif
+	}
+
+Error:
+	devpriv->stc_writew(dev, devpriv->dio_control, DIO_Control_Register);
+
+	return err;
+}
+
+static int ni_serial_sw_readwrite8(struct a4l_device * dev,
+				   unsigned char data_out, unsigned char *data_in)
+{
+	unsigned char mask, input = 0;
+
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "outputting 0x%x\n", data_out);
+#endif
+
+	/* Wait for one bit before transfer */
+	a4l_udelay((devpriv->serial_interval_ns + 999) / 1000);
+
+	for (mask = 0x80; mask; mask >>= 1) {
+		/* Output current bit; note that we cannot touch devpriv->dio_state
+		   because it is a per-subdevice field, and serial is
+		   a separate subdevice from DIO. */
+		devpriv->dio_output &= ~DIO_SDOUT;
+		if (data_out & mask) {
+			devpriv->dio_output |= DIO_SDOUT;
+		}
+		devpriv->stc_writew(dev, devpriv->dio_output,
+				    DIO_Output_Register);
+
+		/* Assert SDCLK (active low, inverted), wait for half of
+		   the delay, deassert SDCLK, and wait for the other half. */
+		devpriv->dio_control |= DIO_Software_Serial_Control;
+		devpriv->stc_writew(dev, devpriv->dio_control,
+				    DIO_Control_Register);
+
+		a4l_udelay((devpriv->serial_interval_ns + 999) / 2000);
+
+		devpriv->dio_control &= ~DIO_Software_Serial_Control;
+		devpriv->stc_writew(dev, devpriv->dio_control,
+				    DIO_Control_Register);
+
+		a4l_udelay((devpriv->serial_interval_ns + 999) / 2000);
+
+		/* Input current bit */
+		if (devpriv->stc_readw(dev,
+				       DIO_Parallel_Input_Register) & DIO_SDIN) {
+			input |= mask;
+		}
+	}
+#ifdef CONFIG_DEBUG_DIO
+	a4l_info(dev, "inputted 0x%x\n", input);
+#endif
+	if (data_in)
+		*data_in = input;
+
+	return 0;
+}
+
+int ni_serial_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	int err = 0;
+	unsigned char byte_out, byte_in = 0;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	if (insn->data_size != 2 * sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_SERIAL_CLOCK:
+
+#ifdef CONFIG_DEBUG_DIO
+		a4l_info(dev, "SPI serial clock Config %d\n", data[1]);
+#endif
+
+		devpriv->serial_hw_mode = 1;
+		devpriv->dio_control |= DIO_HW_Serial_Enable;
+
+		if (data[1] == SERIAL_DISABLED) {
+			devpriv->serial_hw_mode = 0;
+			devpriv->dio_control &= ~(DIO_HW_Serial_Enable |
+						  DIO_Software_Serial_Control);
+			data[1] = SERIAL_DISABLED;
+			devpriv->serial_interval_ns = data[1];
+		} else if (data[1] <= SERIAL_600NS) {
+			/* Warning: this clock speed is too fast to reliably
+			   control SCXI. */
+			devpriv->dio_control &= ~DIO_HW_Serial_Timebase;
+			devpriv->clock_and_fout |= Slow_Internal_Timebase;
+			devpriv->clock_and_fout &= ~DIO_Serial_Out_Divide_By_2;
+			data[1] = SERIAL_600NS;
+			devpriv->serial_interval_ns = data[1];
+		} else if (data[1] <= SERIAL_1_2US) {
+			devpriv->dio_control &= ~DIO_HW_Serial_Timebase;
+			devpriv->clock_and_fout |= Slow_Internal_Timebase |
+				DIO_Serial_Out_Divide_By_2;
+			data[1] = SERIAL_1_2US;
+			devpriv->serial_interval_ns = data[1];
+		} else if (data[1] <= SERIAL_10US) {
+			devpriv->dio_control |= DIO_HW_Serial_Timebase;
+			devpriv->clock_and_fout |= Slow_Internal_Timebase |
+				DIO_Serial_Out_Divide_By_2;
+			/* Note: DIO_Serial_Out_Divide_By_2 only affects
+			   600ns/1.2us. If you turn divide_by_2 off with the
+			   slow clock, you will still get 10us, except then
+			   all your delays are wrong. */
+			data[1] = SERIAL_10US;
+			devpriv->serial_interval_ns = data[1];
+		} else {
+			devpriv->dio_control &= ~(DIO_HW_Serial_Enable |
+						  DIO_Software_Serial_Control);
+			devpriv->serial_hw_mode = 0;
+			data[1] = (data[1] / 1000) * 1000;
+			devpriv->serial_interval_ns = data[1];
+		}
+
+		devpriv->stc_writew(dev, devpriv->dio_control,
+				    DIO_Control_Register);
+		devpriv->stc_writew(dev, devpriv->clock_and_fout,
+				    Clock_and_FOUT_Register);
+		return 0;
+
+		break;
+
+	case A4L_INSN_CONFIG_BIDIRECTIONAL_DATA:
+
+		if (devpriv->serial_interval_ns == 0) {
+			return -EINVAL;
+		}
+
+		byte_out = data[1] & 0xFF;
+
+		if (devpriv->serial_hw_mode) {
+			err = ni_serial_hw_readwrite8(dev, byte_out, &byte_in);
+		} else if (devpriv->serial_interval_ns > 0) {
+			err = ni_serial_sw_readwrite8(dev, byte_out, &byte_in);
+		} else {
+			a4l_err(dev,
+				"ni_serial_insn_config: serial disabled!\n");
+			return -EINVAL;
+		}
+		if (err < 0)
+			return err;
+		data[1] = byte_in & 0xFF;
+		return 0;
+
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return -EINVAL;
+}
+
+void mio_common_detach(struct a4l_device * dev)
+{
+	if (dev->priv) {
+		if (devpriv->counter_dev) {
+			a4l_ni_gpct_device_destroy(devpriv->counter_dev);
+		}
+	}
+}
+
+static void init_ao_67xx(struct a4l_device * dev)
+{
+	struct a4l_subdevice *subd = a4l_get_subd(dev, NI_AO_SUBDEV);
+	int i;
+
+	if (subd == NULL) {
+		a4l_err(dev, "%s: unable to find AO subdevice\n", __FUNCTION__);
+		return;
+	}
+
+	for (i = 0; i < subd->chan_desc->length; i++)
+		ni_ao_win_outw(dev, AO_Channel(i) | 0x0,
+			       AO_Configuration_2_67xx);
+}
+
+static unsigned int ni_gpct_to_stc_register(enum ni_gpct_register reg)
+{
+	unsigned stc_register;
+	switch (reg) {
+	case NITIO_G0_Autoincrement_Reg:
+		stc_register = G_Autoincrement_Register(0);
+		break;
+	case NITIO_G1_Autoincrement_Reg:
+		stc_register = G_Autoincrement_Register(1);
+		break;
+	case NITIO_G0_Command_Reg:
+		stc_register = G_Command_Register(0);
+		break;
+	case NITIO_G1_Command_Reg:
+		stc_register = G_Command_Register(1);
+		break;
+	case NITIO_G0_HW_Save_Reg:
+		stc_register = G_HW_Save_Register(0);
+		break;
+	case NITIO_G1_HW_Save_Reg:
+		stc_register = G_HW_Save_Register(1);
+		break;
+	case NITIO_G0_SW_Save_Reg:
+		stc_register = G_Save_Register(0);
+		break;
+	case NITIO_G1_SW_Save_Reg:
+		stc_register = G_Save_Register(1);
+		break;
+	case NITIO_G0_Mode_Reg:
+		stc_register = G_Mode_Register(0);
+		break;
+	case NITIO_G1_Mode_Reg:
+		stc_register = G_Mode_Register(1);
+		break;
+	case NITIO_G0_LoadA_Reg:
+		stc_register = G_Load_A_Register(0);
+		break;
+	case NITIO_G1_LoadA_Reg:
+		stc_register = G_Load_A_Register(1);
+		break;
+	case NITIO_G0_LoadB_Reg:
+		stc_register = G_Load_B_Register(0);
+		break;
+	case NITIO_G1_LoadB_Reg:
+		stc_register = G_Load_B_Register(1);
+		break;
+	case NITIO_G0_Input_Select_Reg:
+		stc_register = G_Input_Select_Register(0);
+		break;
+	case NITIO_G1_Input_Select_Reg:
+		stc_register = G_Input_Select_Register(1);
+		break;
+	case NITIO_G01_Status_Reg:
+		stc_register = G_Status_Register;
+		break;
+	case NITIO_G01_Joint_Reset_Reg:
+		stc_register = Joint_Reset_Register;
+		break;
+	case NITIO_G01_Joint_Status1_Reg:
+		stc_register = Joint_Status_1_Register;
+		break;
+	case NITIO_G01_Joint_Status2_Reg:
+		stc_register = Joint_Status_2_Register;
+		break;
+	case NITIO_G0_Interrupt_Acknowledge_Reg:
+		stc_register = Interrupt_A_Ack_Register;
+		break;
+	case NITIO_G1_Interrupt_Acknowledge_Reg:
+		stc_register = Interrupt_B_Ack_Register;
+		break;
+	case NITIO_G0_Status_Reg:
+		stc_register = AI_Status_1_Register;
+		break;
+	case NITIO_G1_Status_Reg:
+		stc_register = AO_Status_1_Register;
+		break;
+	case NITIO_G0_Interrupt_Enable_Reg:
+		stc_register = Interrupt_A_Enable_Register;
+		break;
+	case NITIO_G1_Interrupt_Enable_Reg:
+		stc_register = Interrupt_B_Enable_Register;
+		break;
+	default:
+		__a4l_err("%s: unhandled register 0x%x in switch.\n",
+			  __FUNCTION__, reg);
+		BUG();
+		return 0;
+		break;
+	}
+	return stc_register;
+}
+
+static void ni_gpct_write_register(struct ni_gpct *counter,
+				   unsigned int bits, enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	unsigned stc_register;
+	/* bits in the join reset register which are relevant to counters */
+	static const unsigned gpct_joint_reset_mask = G0_Reset | G1_Reset;
+	static const unsigned gpct_interrupt_a_enable_mask =
+		G0_Gate_Interrupt_Enable | G0_TC_Interrupt_Enable;
+	static const unsigned gpct_interrupt_b_enable_mask =
+		G1_Gate_Interrupt_Enable | G1_TC_Interrupt_Enable;
+
+	switch (reg) {
+		/* m-series-only registers */
+	case NITIO_G0_Counting_Mode_Reg:
+		ni_writew(bits, M_Offset_G0_Counting_Mode);
+		break;
+	case NITIO_G1_Counting_Mode_Reg:
+		ni_writew(bits, M_Offset_G1_Counting_Mode);
+		break;
+	case NITIO_G0_Second_Gate_Reg:
+		ni_writew(bits, M_Offset_G0_Second_Gate);
+		break;
+	case NITIO_G1_Second_Gate_Reg:
+		ni_writew(bits, M_Offset_G1_Second_Gate);
+		break;
+	case NITIO_G0_DMA_Config_Reg:
+		ni_writew(bits, M_Offset_G0_DMA_Config);
+		break;
+	case NITIO_G1_DMA_Config_Reg:
+		ni_writew(bits, M_Offset_G1_DMA_Config);
+		break;
+	case NITIO_G0_ABZ_Reg:
+		ni_writew(bits, M_Offset_G0_MSeries_ABZ);
+		break;
+	case NITIO_G1_ABZ_Reg:
+		ni_writew(bits, M_Offset_G1_MSeries_ABZ);
+		break;
+
+		/* 32 bit registers */
+	case NITIO_G0_LoadA_Reg:
+	case NITIO_G1_LoadA_Reg:
+	case NITIO_G0_LoadB_Reg:
+	case NITIO_G1_LoadB_Reg:
+		stc_register = ni_gpct_to_stc_register(reg);
+		devpriv->stc_writel(dev, bits, stc_register);
+		break;
+
+		/* 16 bit registers */
+	case NITIO_G0_Interrupt_Enable_Reg:
+		BUG_ON(bits & ~gpct_interrupt_a_enable_mask);
+		ni_set_bitfield(dev, Interrupt_A_Enable_Register,
+				gpct_interrupt_a_enable_mask, bits);
+		break;
+	case NITIO_G1_Interrupt_Enable_Reg:
+		BUG_ON(bits & ~gpct_interrupt_b_enable_mask);
+		ni_set_bitfield(dev, Interrupt_B_Enable_Register,
+				gpct_interrupt_b_enable_mask, bits);
+		break;
+	case NITIO_G01_Joint_Reset_Reg:
+		BUG_ON(bits & ~gpct_joint_reset_mask);
+		fallthrough;
+	default:
+		stc_register = ni_gpct_to_stc_register(reg);
+		devpriv->stc_writew(dev, bits, stc_register);
+	}
+}
+
+static unsigned int ni_gpct_read_register(struct ni_gpct *counter,
+					  enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	unsigned int stc_register;
+	switch (reg) {
+		/* m-series only registers */
+	case NITIO_G0_DMA_Status_Reg:
+		return ni_readw(M_Offset_G0_DMA_Status);
+		break;
+	case NITIO_G1_DMA_Status_Reg:
+		return ni_readw(M_Offset_G1_DMA_Status);
+		break;
+
+		/* 32 bit registers */
+	case NITIO_G0_HW_Save_Reg:
+	case NITIO_G1_HW_Save_Reg:
+	case NITIO_G0_SW_Save_Reg:
+	case NITIO_G1_SW_Save_Reg:
+		stc_register = ni_gpct_to_stc_register(reg);
+		return devpriv->stc_readl(dev, stc_register);
+		break;
+
+		/* 16 bit registers */
+	default:
+		stc_register = ni_gpct_to_stc_register(reg);
+		return devpriv->stc_readw(dev, stc_register);
+		break;
+	}
+	return 0;
+}
+
+int ni_freq_out_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] = FOUT_Divider(devpriv->clock_and_fout);
+
+	return 0;
+}
+
+int ni_freq_out_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	devpriv->clock_and_fout &= ~FOUT_Enable;
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+	devpriv->clock_and_fout &= ~FOUT_Divider_mask;
+	devpriv->clock_and_fout |= FOUT_Divider(data[0]);
+	devpriv->clock_and_fout |= FOUT_Enable;
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+
+	return 0;
+}
+
+static int ni_set_freq_out_clock(struct a4l_device * dev, lsampl_t clock_source)
+{
+	switch (clock_source) {
+	case NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC:
+		devpriv->clock_and_fout &= ~FOUT_Timebase_Select;
+		break;
+	case NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC:
+		devpriv->clock_and_fout |= FOUT_Timebase_Select;
+		break;
+	default:
+		return -EINVAL;
+	}
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+
+	return 0;
+}
+
+static void ni_get_freq_out_clock(struct a4l_device * dev,
+				  unsigned int * clock_source,
+				  unsigned int * clock_period_ns)
+{
+	if (devpriv->clock_and_fout & FOUT_Timebase_Select) {
+		*clock_source = NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC;
+		*clock_period_ns = TIMEBASE_2_NS;
+	} else {
+		*clock_source = NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC;
+		*clock_period_ns = TIMEBASE_1_NS * 2;
+	}
+}
+
+int ni_freq_out_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_SET_CLOCK_SRC:
+		return ni_set_freq_out_clock(dev, data[1]);
+		break;
+	case A4L_INSN_CONFIG_GET_CLOCK_SRC:
+		ni_get_freq_out_clock(dev, &data[1], &data[2]);
+		return 0;
+	default:
+		break;
+	}
+
+	return -EINVAL;
+}
+
+static int ni_8255_callback(int dir, int port, int data, unsigned long arg)
+{
+	struct a4l_device *dev = (struct a4l_device *) arg;
+
+	if (dir) {
+		ni_writeb(data, Port_A + 2 * port);
+		return 0;
+	} else {
+		return ni_readb(Port_A + 2 * port);
+	}
+}
+
+/*
+  reads bytes out of eeprom
+*/
+
+static int ni_read_eeprom(struct a4l_device *dev, int addr)
+{
+	int bit;
+	int bitstring;
+
+	bitstring = 0x0300 | ((addr & 0x100) << 3) | (addr & 0xff);
+	ni_writeb(0x04, Serial_Command);
+	for (bit = 0x8000; bit; bit >>= 1) {
+		ni_writeb(0x04 | ((bit & bitstring) ? 0x02 : 0),
+			  Serial_Command);
+		ni_writeb(0x05 | ((bit & bitstring) ? 0x02 : 0),
+			  Serial_Command);
+	}
+	bitstring = 0;
+	for (bit = 0x80; bit; bit >>= 1) {
+		ni_writeb(0x04, Serial_Command);
+		ni_writeb(0x05, Serial_Command);
+		bitstring |= ((ni_readb(XXX_Status) & PROMOUT) ? bit : 0);
+	}
+	ni_writeb(0x00, Serial_Command);
+
+	return bitstring;
+}
+
+/*
+  presents the EEPROM as a subdevice
+*/
+
+static int ni_eeprom_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] = ni_read_eeprom(dev, CR_CHAN(insn->chan_desc));
+
+	return 0;
+}
+
+
+static int ni_m_series_eeprom_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	data[0] = devpriv->eeprom_buffer[CR_CHAN(insn->chan_desc)];
+
+	return 0;
+}
+
+static int ni_get_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int*)insn->data;
+
+	data[1] = devpriv->pwm_up_count * devpriv->clock_ns;
+	data[2] = devpriv->pwm_down_count * devpriv->clock_ns;
+
+	return 0;
+}
+
+static int ni_m_series_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int up_count, down_count;
+	unsigned int *data = (unsigned int*)insn->data;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_PWM_OUTPUT:
+		switch (data[1]) {
+		case TRIG_ROUND_NEAREST:
+			up_count =
+				(data[2] +
+				 devpriv->clock_ns / 2) / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			up_count = data[2] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			up_count =(data[2] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		switch (data[3]) {
+		case TRIG_ROUND_NEAREST:
+			down_count = (data[4] + devpriv->clock_ns / 2) /
+				devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			down_count = data[4] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			down_count =
+				(data[4] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		if (up_count * devpriv->clock_ns != data[2] ||
+		    down_count * devpriv->clock_ns != data[4]) {
+			data[2] = up_count * devpriv->clock_ns;
+			data[4] = down_count * devpriv->clock_ns;
+			return -EAGAIN;
+		}
+		ni_writel(MSeries_Cal_PWM_High_Time_Bits(up_count) |
+			  MSeries_Cal_PWM_Low_Time_Bits(down_count),
+			  M_Offset_Cal_PWM);
+		devpriv->pwm_up_count = up_count;
+		devpriv->pwm_down_count = down_count;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_GET_PWM_OUTPUT:
+		return ni_get_pwm_config(subd, insn);
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static int ni_6143_pwm_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int*)insn->data;
+
+	unsigned up_count, down_count;
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_PWM_OUTPUT:
+		switch (data[1]) {
+		case TRIG_ROUND_NEAREST:
+			up_count =
+				(data[2] + devpriv->clock_ns / 2) /
+				devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			up_count = data[2] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			up_count = (data[2] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		switch (data[3]) {
+		case TRIG_ROUND_NEAREST:
+			down_count = (data[4] + devpriv->clock_ns / 2) /
+				devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_DOWN:
+			down_count = data[4] / devpriv->clock_ns;
+			break;
+		case TRIG_ROUND_UP:
+			down_count = (data[4] + devpriv->clock_ns - 1) /
+				devpriv->clock_ns;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		if (up_count * devpriv->clock_ns != data[2] ||
+		    down_count * devpriv->clock_ns != data[4]) {
+			data[2] = up_count * devpriv->clock_ns;
+			data[4] = down_count * devpriv->clock_ns;
+			return -EAGAIN;
+		}
+		ni_writel(up_count, Calibration_HighTime_6143);
+		devpriv->pwm_up_count = up_count;
+		ni_writel(down_count, Calibration_LowTime_6143);
+		devpriv->pwm_down_count = down_count;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_GET_PWM_OUTPUT:
+		return ni_get_pwm_config(subd, insn);
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static int pack_mb88341(int addr, int val, int *bitstring)
+{
+	/*
+	  Fujitsu MB 88341
+	  Note that address bits are reversed.  Thanks to
+	  Ingo Keen for noticing this.
+
+	  Note also that the 88341 expects address values from
+	  1-12, whereas we use channel numbers 0-11.  The NI
+	  docs use 1-12, also, so be careful here.
+	*/
+	addr++;
+	*bitstring = ((addr & 0x1) << 11) |
+		((addr & 0x2) << 9) |
+		((addr & 0x4) << 7) | ((addr & 0x8) << 5) | (val & 0xff);
+	return 12;
+}
+
+static int pack_dac8800(int addr, int val, int *bitstring)
+{
+	*bitstring = ((addr & 0x7) << 8) | (val & 0xff);
+	return 11;
+}
+
+static int pack_dac8043(int addr, int val, int *bitstring)
+{
+	*bitstring = val & 0xfff;
+	return 12;
+}
+
+static int pack_ad8522(int addr, int val, int *bitstring)
+{
+	*bitstring = (val & 0xfff) | (addr ? 0xc000 : 0xa000);
+	return 16;
+}
+
+static int pack_ad8804(int addr, int val, int *bitstring)
+{
+	*bitstring = ((addr & 0xf) << 8) | (val & 0xff);
+	return 12;
+}
+
+static int pack_ad8842(int addr, int val, int *bitstring)
+{
+	*bitstring = ((addr + 1) << 8) | (val & 0xff);
+	return 12;
+}
+
+struct caldac_struct {
+	int n_chans;
+	int n_bits;
+	int (*packbits) (int, int, int *);
+};
+
+static struct caldac_struct caldacs[] = {
+	[mb88341] = {12, 8, pack_mb88341},
+	[dac8800] = {8, 8, pack_dac8800},
+	[dac8043] = {1, 12, pack_dac8043},
+	[ad8522] = {2, 12, pack_ad8522},
+	[ad8804] = {12, 8, pack_ad8804},
+	[ad8842] = {8, 8, pack_ad8842},
+	[ad8804_debug] = {16, 8, pack_ad8804},
+};
+
+static void ni_write_caldac(struct a4l_device * dev, int addr, int val)
+{
+	unsigned int loadbit = 0, bits = 0, bit, bitstring = 0;
+	int i;
+	int type;
+
+	if (devpriv->caldacs[addr] == val)
+		return;
+	devpriv->caldacs[addr] = val;
+
+	for (i = 0; i < 3; i++) {
+		type = boardtype.caldac[i];
+		if (type == caldac_none)
+			break;
+		if (addr < caldacs[type].n_chans) {
+			bits = caldacs[type].packbits(addr, val, &bitstring);
+			loadbit = SerDacLd(i);
+			break;
+		}
+		addr -= caldacs[type].n_chans;
+	}
+
+	for (bit = 1 << (bits - 1); bit; bit >>= 1) {
+		ni_writeb(((bit & bitstring) ? 0x02 : 0), Serial_Command);
+		a4l_udelay(1);
+		ni_writeb(1 | ((bit & bitstring) ? 0x02 : 0), Serial_Command);
+		a4l_udelay(1);
+	}
+	ni_writeb(loadbit, Serial_Command);
+	a4l_udelay(1);
+	ni_writeb(0, Serial_Command);
+}
+
+static void caldac_setup(struct a4l_device *dev, struct a4l_subdevice *subd)
+{
+	int i, j;
+	int n_dacs;
+	int n_chans = 0;
+	int n_bits;
+	int diffbits = 0;
+	int type;
+	int chan;
+
+	type = boardtype.caldac[0];
+	if (type == caldac_none)
+		return;
+	n_bits = caldacs[type].n_bits;
+	for (i = 0; i < 3; i++) {
+		type = boardtype.caldac[i];
+		if (type == caldac_none)
+			break;
+		if (caldacs[type].n_bits != n_bits)
+			diffbits = 1;
+		n_chans += caldacs[type].n_chans;
+	}
+	n_dacs = i;
+
+	if (diffbits) {
+
+		if (n_chans > MAX_N_CALDACS) {
+			a4l_err(dev, "BUG! MAX_N_CALDACS too small\n");
+		}
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  n_chans * sizeof(struct a4l_channel), GFP_KERNEL);
+
+		memset(subd->chan_desc,
+		       0,
+		       sizeof(struct a4l_channels_desc) + n_chans * sizeof(struct a4l_channel));
+
+		subd->chan_desc->length = n_chans;
+		subd->chan_desc->mode = A4L_CHAN_PERCHAN_CHANDESC;
+
+		chan = 0;
+		for (i = 0; i < n_dacs; i++) {
+			type = boardtype.caldac[i];
+			for (j = 0; j < caldacs[type].n_chans; j++) {
+
+				subd->chan_desc->chans[chan].nb_bits =
+					caldacs[type].n_bits;
+
+				chan++;
+			}
+		}
+
+		for (chan = 0; chan < n_chans; chan++) {
+			unsigned long tmp =
+				(1 << subd->chan_desc->chans[chan].nb_bits) / 2;
+			ni_write_caldac(dev, chan, tmp);
+		}
+	} else {
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+
+		memset(subd->chan_desc,
+		       0, sizeof(struct a4l_channels_desc) + sizeof(struct a4l_channel));
+
+		subd->chan_desc->length = n_chans;
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+
+		type = boardtype.caldac[0];
+
+		subd->chan_desc->chans[0].nb_bits = caldacs[type].n_bits;
+
+		for (chan = 0; chan < n_chans; chan++)
+			ni_write_caldac(dev,
+					chan,
+					(1 << subd->chan_desc->chans[0].nb_bits) / 2);
+	}
+}
+
+static int ni_calib_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	ni_write_caldac(dev, CR_CHAN(insn->chan_desc), data[0]);
+	return 0;
+}
+
+static int ni_calib_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	data[0] = devpriv->caldacs[CR_CHAN(insn->chan_desc)];
+
+	return 0;
+}
+
+static int ni_gpct_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_insn_config(counter, insn);
+}
+
+static int ni_gpct_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_rinsn(counter, insn);
+}
+
+static int ni_gpct_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_winsn(counter, insn);
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static int ni_gpct_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	int retval;
+	struct a4l_device *dev = subd->dev;
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	struct mite_dma_descriptor_ring *ring;
+
+	retval = ni_request_gpct_mite_channel(dev,
+					      counter->counter_index,
+					      A4L_INPUT);
+	if (retval) {
+		a4l_err(dev,
+			"ni_gpct_cmd: "
+			"no dma channel available for use by counter\n");
+		return retval;
+	}
+
+	ring = devpriv->gpct_mite_ring[counter->counter_index];
+	retval = a4l_mite_buf_change(ring, subd);
+	if (retval) {
+		a4l_err(dev,
+			"ni_gpct_cmd: "
+			"dma ring configuration failed\n");
+		return retval;
+
+	}
+
+	a4l_ni_tio_acknowledge_and_confirm(counter, NULL, NULL, NULL, NULL);
+	ni_e_series_enable_second_irq(dev, counter->counter_index, 1);
+	retval = a4l_ni_tio_cmd(counter, cmd);
+
+	return retval;
+}
+
+static int ni_gpct_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+	return a4l_ni_tio_cmdtest(counter, cmd);
+}
+
+static void ni_gpct_cancel(struct a4l_subdevice *subd)
+{
+	struct a4l_device *dev = subd->dev;
+	struct ni_gpct *counter = (struct ni_gpct *)subd->priv;
+
+	a4l_ni_tio_cancel(counter);
+	ni_e_series_enable_second_irq(dev, counter->counter_index, 0);
+	ni_release_gpct_mite_channel(dev, counter->counter_index);
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+
+/*
+ *
+ *  Programmable Function Inputs
+ *
+ */
+
+static int ni_m_series_set_pfi_routing(struct a4l_device *dev,
+				       unsigned int chan, unsigned int source)
+{
+	unsigned int pfi_reg_index;
+	unsigned int array_offset;
+
+	if ((source & 0x1f) != source)
+		return -EINVAL;
+	pfi_reg_index = 1 + chan / 3;
+	array_offset = pfi_reg_index - 1;
+	devpriv->pfi_output_select_reg[array_offset] &=
+		~MSeries_PFI_Output_Select_Mask(chan);
+	devpriv->pfi_output_select_reg[array_offset] |=
+		MSeries_PFI_Output_Select_Bits(chan, source);
+	ni_writew(devpriv->pfi_output_select_reg[array_offset],
+		  M_Offset_PFI_Output_Select(pfi_reg_index));
+	return 2;
+}
+
+static unsigned int ni_old_get_pfi_routing(struct a4l_device *dev,
+					   unsigned int chan)
+{
+	/* pre-m-series boards have fixed signals on pfi pins */
+
+	switch (chan) {
+	case 0:
+		return NI_PFI_OUTPUT_AI_START1;
+		break;
+	case 1:
+		return NI_PFI_OUTPUT_AI_START2;
+		break;
+	case 2:
+		return NI_PFI_OUTPUT_AI_CONVERT;
+		break;
+	case 3:
+		return NI_PFI_OUTPUT_G_SRC1;
+		break;
+	case 4:
+		return NI_PFI_OUTPUT_G_GATE1;
+		break;
+	case 5:
+		return NI_PFI_OUTPUT_AO_UPDATE_N;
+		break;
+	case 6:
+		return NI_PFI_OUTPUT_AO_START1;
+		break;
+	case 7:
+		return NI_PFI_OUTPUT_AI_START_PULSE;
+		break;
+	case 8:
+		return NI_PFI_OUTPUT_G_SRC0;
+		break;
+	case 9:
+		return NI_PFI_OUTPUT_G_GATE0;
+		break;
+	default:
+		__a4l_err("%s: bug, unhandled case in switch.\n",
+			  __FUNCTION__);
+		break;
+	}
+	return 0;
+}
+
+static int ni_old_set_pfi_routing(struct a4l_device *dev,
+				  unsigned int chan, unsigned int source)
+{
+	/* pre-m-series boards have fixed signals on pfi pins */
+	if (source != ni_old_get_pfi_routing(dev, chan))
+		return -EINVAL;
+
+	return 2;
+}
+
+static int ni_set_pfi_routing(struct a4l_device *dev,
+			      unsigned int chan, unsigned int source)
+{
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return ni_m_series_set_pfi_routing(dev, chan, source);
+	else
+		return ni_old_set_pfi_routing(dev, chan, source);
+}
+
+static unsigned int ni_m_series_get_pfi_routing(struct a4l_device *dev,
+						unsigned int chan)
+{
+	const unsigned int array_offset = chan / 3;
+	return MSeries_PFI_Output_Select_Source(chan,
+						devpriv->pfi_output_select_reg[array_offset]);
+}
+
+static unsigned int ni_get_pfi_routing(struct a4l_device *dev, unsigned int chan)
+{
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		return ni_m_series_get_pfi_routing(dev, chan);
+	else
+		return ni_old_get_pfi_routing(dev, chan);
+}
+
+static int ni_config_filter(struct a4l_device *dev,
+			    unsigned int pfi_channel, int filter)
+{
+	unsigned int bits;
+	if ((boardtype.reg_type & ni_reg_m_series_mask) == 0) {
+		return -ENOTSUPP;
+	}
+	bits = ni_readl(M_Offset_PFI_Filter);
+	bits &= ~MSeries_PFI_Filter_Select_Mask(pfi_channel);
+	bits |= MSeries_PFI_Filter_Select_Bits(pfi_channel, filter);
+	ni_writel(bits, M_Offset_PFI_Filter);
+	return 0;
+}
+
+static int ni_pfi_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	if (data[0]) {
+		devpriv->pfi_state &= ~data[0];
+		devpriv->pfi_state |= (data[0] & data[1]);
+		ni_writew(devpriv->pfi_state, M_Offset_PFI_DO);
+	}
+
+	data[1] = ni_readw(M_Offset_PFI_DI);
+
+	return 0;
+}
+
+static int ni_pfi_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int chan, *data = (unsigned int *)insn->data;
+
+	if (insn->data_size < sizeof(unsigned int))
+		return -EINVAL;
+
+	chan = CR_CHAN(insn->chan_desc);
+
+	switch (data[0]) {
+	case A4L_OUTPUT:
+		ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 1);
+		break;
+	case A4L_INPUT:
+		ni_set_bits(dev, IO_Bidirection_Pin_Register, 1 << chan, 0);
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (devpriv->io_bidirection_pin_reg & (1 << chan)) ?
+			A4L_OUTPUT :	A4L_INPUT;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_SET_ROUTING:
+		return ni_set_pfi_routing(dev, chan, data[1]);
+		break;
+	case A4L_INSN_CONFIG_GET_ROUTING:
+		data[1] = ni_get_pfi_routing(dev, chan);
+		break;
+	case A4L_INSN_CONFIG_FILTER:
+		return ni_config_filter(dev, chan, data[1]);
+		break;
+	default:
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*
+ *
+ *  RTSI Bus Functions
+ *
+ */
+
+/* Find best multiplier/divider to try and get the PLL running at 80 MHz
+ * given an arbitrary frequency input clock */
+static int ni_mseries_get_pll_parameters(unsigned int reference_period_ns,
+					 unsigned int *freq_divider,
+					 unsigned int *freq_multiplier,
+					 unsigned int *actual_period_ns)
+{
+	unsigned div;
+	unsigned best_div = 1;
+	static const unsigned max_div = 0x10;
+	unsigned mult;
+	unsigned best_mult = 1;
+	static const unsigned max_mult = 0x100;
+	static const unsigned pico_per_nano = 1000;
+
+	const unsigned reference_picosec = reference_period_ns * pico_per_nano;
+	/* m-series wants the phased-locked loop to output 80MHz, which is divided by 4 to
+	 * 20 MHz for most timing clocks */
+	static const unsigned target_picosec = 12500;
+	static const unsigned fudge_factor_80_to_20Mhz = 4;
+	int best_period_picosec = 0;
+	for (div = 1; div <= max_div; ++div) {
+		for (mult = 1; mult <= max_mult; ++mult) {
+			unsigned new_period_ps =
+				(reference_picosec * div) / mult;
+			if (abs(new_period_ps - target_picosec) <
+			    abs(best_period_picosec - target_picosec)) {
+				best_period_picosec = new_period_ps;
+				best_div = div;
+				best_mult = mult;
+			}
+		}
+	}
+	if (best_period_picosec == 0) {
+		__a4l_err("%s: bug, failed to find pll parameters\n",
+			  __FUNCTION__);
+		return -EIO;
+	}
+	*freq_divider = best_div;
+	*freq_multiplier = best_mult;
+	*actual_period_ns =
+		(best_period_picosec * fudge_factor_80_to_20Mhz +
+		 (pico_per_nano / 2)) / pico_per_nano;
+	return 0;
+}
+
+static int ni_mseries_set_pll_master_clock(struct a4l_device * dev,
+					   unsigned int source,
+					   unsigned int period_ns)
+{
+	static const unsigned min_period_ns = 50;
+	static const unsigned max_period_ns = 1000;
+	static const unsigned timeout = 1000;
+	unsigned pll_control_bits;
+	unsigned freq_divider;
+	unsigned freq_multiplier;
+	unsigned i;
+	int retval;
+	if (source == NI_MIO_PLL_PXI10_CLOCK)
+		period_ns = 100;
+	/* These limits are somewhat arbitrary, but NI advertises 1 to
+	   20MHz range so we'll use that */
+	if (period_ns < min_period_ns || period_ns > max_period_ns) {
+		a4l_err(dev,
+			"%s: you must specify an input clock frequency "
+			"between %i and %i nanosec "
+			"for the phased-lock loop.\n",
+			__FUNCTION__, min_period_ns, max_period_ns);
+		return -EINVAL;
+	}
+	devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit;
+	devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
+			    RTSI_Trig_Direction_Register);
+	pll_control_bits =
+		MSeries_PLL_Enable_Bit | MSeries_PLL_VCO_Mode_75_150MHz_Bits;
+	devpriv->clock_and_fout2 |=
+		MSeries_Timebase1_Select_Bit | MSeries_Timebase3_Select_Bit;
+	devpriv->clock_and_fout2 &= ~MSeries_PLL_In_Source_Select_Mask;
+	switch (source) {
+	case NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK:
+		devpriv->clock_and_fout2 |=
+			MSeries_PLL_In_Source_Select_Star_Trigger_Bits;
+		retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider,
+						       &freq_multiplier, &devpriv->clock_ns);
+		if (retval < 0)
+			return retval;
+		break;
+	case NI_MIO_PLL_PXI10_CLOCK:
+		/* pxi clock is 10MHz */
+		devpriv->clock_and_fout2 |=
+			MSeries_PLL_In_Source_Select_PXI_Clock10;
+		retval = ni_mseries_get_pll_parameters(period_ns, &freq_divider,
+						       &freq_multiplier, &devpriv->clock_ns);
+		if (retval < 0)
+			return retval;
+		break;
+	default:
+	{
+		unsigned rtsi_channel;
+		static const unsigned max_rtsi_channel = 7;
+		for (rtsi_channel = 0; rtsi_channel <= max_rtsi_channel;
+		     ++rtsi_channel) {
+			if (source ==
+			    NI_MIO_PLL_RTSI_CLOCK(rtsi_channel)) {
+				devpriv->clock_and_fout2 |=
+					MSeries_PLL_In_Source_Select_RTSI_Bits
+					(rtsi_channel);
+				break;
+			}
+		}
+		if (rtsi_channel > max_rtsi_channel)
+			return -EINVAL;
+		retval = ni_mseries_get_pll_parameters(period_ns,
+						       &freq_divider, &freq_multiplier,
+						       &devpriv->clock_ns);
+		if (retval < 0)
+			return retval;
+	}
+	break;
+	}
+	ni_writew(devpriv->clock_and_fout2, M_Offset_Clock_and_Fout2);
+	pll_control_bits |=
+		MSeries_PLL_Divisor_Bits(freq_divider) |
+		MSeries_PLL_Multiplier_Bits(freq_multiplier);
+	ni_writew(pll_control_bits, M_Offset_PLL_Control);
+	devpriv->clock_source = source;
+	/* It seems to typically take a few hundred microseconds for PLL to lock */
+	for (i = 0; i < timeout; ++i) {
+		if (ni_readw(M_Offset_PLL_Status) & MSeries_PLL_Locked_Bit) {
+			break;
+		}
+		udelay(1);
+	}
+	if (i == timeout) {
+		a4l_err(dev,
+			"%s: timed out waiting for PLL to lock "
+			"to reference clock source %i with period %i ns.\n",
+			__FUNCTION__, source, period_ns);
+		return -ETIMEDOUT;
+	}
+	return 3;
+}
+
+static int ni_set_master_clock(struct a4l_device *dev,
+			       unsigned int source, unsigned int period_ns)
+{
+	if (source == NI_MIO_INTERNAL_CLOCK) {
+		devpriv->rtsi_trig_direction_reg &= ~Use_RTSI_Clock_Bit;
+		devpriv->stc_writew(dev, devpriv->rtsi_trig_direction_reg,
+				    RTSI_Trig_Direction_Register);
+		devpriv->clock_ns = TIMEBASE_1_NS;
+		if (boardtype.reg_type & ni_reg_m_series_mask) {
+			devpriv->clock_and_fout2 &=
+				~(MSeries_Timebase1_Select_Bit |
+				  MSeries_Timebase3_Select_Bit);
+			ni_writew(devpriv->clock_and_fout2,
+				  M_Offset_Clock_and_Fout2);
+			ni_writew(0, M_Offset_PLL_Control);
+		}
+		devpriv->clock_source = source;
+	} else {
+		if (boardtype.reg_type & ni_reg_m_series_mask) {
+			return ni_mseries_set_pll_master_clock(dev, source,
+							       period_ns);
+		} else {
+			if (source == NI_MIO_RTSI_CLOCK) {
+				devpriv->rtsi_trig_direction_reg |=
+					Use_RTSI_Clock_Bit;
+				devpriv->stc_writew(dev,
+						    devpriv->rtsi_trig_direction_reg,
+						    RTSI_Trig_Direction_Register);
+				if (devpriv->clock_ns == 0) {
+					a4l_err(dev,
+						"%s: we don't handle an "
+						"unspecified clock period "
+						"correctly yet, returning error.\n",
+						__FUNCTION__);
+					return -EINVAL;
+				} else {
+					devpriv->clock_ns = period_ns;
+				}
+				devpriv->clock_source = source;
+			} else
+				return -EINVAL;
+		}
+	}
+	return 3;
+}
+
+static void ni_rtsi_init(struct a4l_device * dev)
+{
+	/* Initialise the RTSI bus signal switch to a default state */
+
+	/* Set clock mode to internal */
+	devpriv->clock_and_fout2 = MSeries_RTSI_10MHz_Bit;
+	if (ni_set_master_clock(dev, NI_MIO_INTERNAL_CLOCK, 0) < 0) {
+		a4l_err(dev, "ni_set_master_clock failed, bug?");
+	}
+
+	/* Default internal lines routing to RTSI bus lines */
+	devpriv->rtsi_trig_a_output_reg =
+		RTSI_Trig_Output_Bits(0, NI_RTSI_OUTPUT_ADR_START1) |
+		RTSI_Trig_Output_Bits(1, NI_RTSI_OUTPUT_ADR_START2) |
+		RTSI_Trig_Output_Bits(2, NI_RTSI_OUTPUT_SCLKG) |
+		RTSI_Trig_Output_Bits(3, NI_RTSI_OUTPUT_DACUPDN);
+	devpriv->stc_writew(dev, devpriv->rtsi_trig_a_output_reg,
+			    RTSI_Trig_A_Output_Register);
+	devpriv->rtsi_trig_b_output_reg =
+		RTSI_Trig_Output_Bits(4, NI_RTSI_OUTPUT_DA_START1) |
+		RTSI_Trig_Output_Bits(5, NI_RTSI_OUTPUT_G_SRC0) |
+		RTSI_Trig_Output_Bits(6, NI_RTSI_OUTPUT_G_GATE0);
+
+	if (boardtype.reg_type & ni_reg_m_series_mask)
+		devpriv->rtsi_trig_b_output_reg |=
+			RTSI_Trig_Output_Bits(7, NI_RTSI_OUTPUT_RTSI_OSC);
+	devpriv->stc_writew(dev, devpriv->rtsi_trig_b_output_reg,
+			    RTSI_Trig_B_Output_Register);
+}
+
+int a4l_ni_E_init(struct a4l_device *dev)
+{
+	int ret;
+	unsigned int j, counter_variant;
+	struct a4l_subdevice *subd;
+
+	if (boardtype.n_aochan > MAX_N_AO_CHAN) {
+		a4l_err(dev, "bug! boardtype.n_aochan > MAX_N_AO_CHAN\n");
+		return -EINVAL;
+	}
+
+	/* analog input subdevice */
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: starting attach procedure...\n");
+
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering AI subdevice...\n");
+
+	if (boardtype.n_adchan) {
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AI: %d channels\n", boardtype.n_adchan);
+
+		subd->flags = A4L_SUBD_AI | A4L_SUBD_CMD | A4L_SUBD_MMAP;
+		subd->rng_desc = ni_range_lkup[boardtype.gainlkup];
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = boardtype.n_adchan;
+		subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_DIFF;
+		if (boardtype.reg_type != ni_reg_611x)
+			subd->chan_desc->chans[0].flags |= A4L_CHAN_AREF_GROUND |
+				A4L_CHAN_AREF_COMMON | A4L_CHAN_AREF_OTHER;
+		subd->chan_desc->chans[0].nb_bits = boardtype.adbits;
+
+		subd->insn_read = ni_ai_insn_read;
+		subd->insn_config = ni_ai_insn_config;
+		subd->do_cmdtest = ni_ai_cmdtest;
+		subd->do_cmd = ni_ai_cmd;
+		subd->cancel = ni_ai_reset;
+		subd->trigger = ni_ai_inttrig;
+
+		subd->munge = (boardtype.adbits > 16) ?
+			ni_ai_munge32 : ni_ai_munge16;
+
+		subd->cmd_mask = &mio_ai_cmd_mask;
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AI subdevice not present\n");
+		subd->flags = A4L_SUBD_UNUSED;
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_AI_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: AI subdevice registered\n");
+
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering AO subdevice...\n");
+
+	/* analog output subdevice */
+	if (boardtype.n_aochan) {
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AO: %d channels\n", boardtype.n_aochan);
+
+		subd->flags = A4L_SUBD_AO;
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = boardtype.n_aochan;
+		subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_GROUND;
+		subd->chan_desc->chans[0].nb_bits = boardtype.aobits;
+
+		subd->rng_desc = boardtype.ao_range_table;
+
+		subd->insn_read = ni_ao_insn_read;
+		if (boardtype.reg_type & ni_reg_6xxx_mask)
+			subd->insn_write = &ni_ao_insn_write_671x;
+		else
+			subd->insn_write = &ni_ao_insn_write;
+
+
+		if (boardtype.ao_fifo_depth) {
+			subd->flags |= A4L_SUBD_CMD | A4L_SUBD_MMAP;
+			subd->do_cmd = &ni_ao_cmd;
+			subd->cmd_mask = &mio_ao_cmd_mask;
+			subd->do_cmdtest = &ni_ao_cmdtest;
+			subd->trigger = ni_ao_inttrig;
+			if ((boardtype.reg_type & ni_reg_m_series_mask) == 0)
+				subd->munge = &ni_ao_munge;
+		}
+
+		subd->cancel = &ni_ao_reset;
+
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: AO subdevice not present\n");
+		subd->flags = A4L_SUBD_UNUSED;
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_AO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: AO subdevice registered\n");
+
+	if ((boardtype.reg_type & ni_reg_67xx_mask))
+		init_ao_67xx(dev);
+
+	/* digital i/o subdevice */
+
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering DIO subdevice...\n");
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: DIO: %d channels\n",
+		boardtype.num_p0_dio_channels);
+
+	subd->flags = A4L_SUBD_DIO;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = boardtype.num_p0_dio_channels;
+	subd->chan_desc->chans[0].flags = A4L_CHAN_AREF_GROUND;
+	subd->chan_desc->chans[0].nb_bits = 1;
+	devpriv->io_bits = 0; /* all bits input */
+
+	subd->rng_desc = &range_digital;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+
+		if (subd->chan_desc->length == 8)
+			subd->insn_bits = ni_m_series_dio_insn_bits_8;
+		else
+			subd->insn_bits = ni_m_series_dio_insn_bits_32;
+
+		subd->insn_config = ni_m_series_dio_insn_config;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: DIO: command feature available\n");
+
+		subd->flags |= A4L_SUBD_CMD;
+		subd->do_cmd = ni_cdio_cmd;
+		subd->do_cmdtest = ni_cdio_cmdtest;
+		subd->cmd_mask = &mio_dio_cmd_mask;
+		subd->cancel = ni_cdio_cancel;
+		subd->trigger = ni_cdo_inttrig;
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		ni_writel(CDO_Reset_Bit | CDI_Reset_Bit, M_Offset_CDIO_Command);
+		ni_writel(devpriv->io_bits, M_Offset_DIO_Direction);
+	} else {
+
+		subd->insn_bits = ni_dio_insn_bits;
+		subd->insn_config = ni_dio_insn_config;
+		devpriv->dio_control = DIO_Pins_Dir(devpriv->io_bits);
+		ni_writew(devpriv->dio_control, DIO_Control_Register);
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_DIO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: DIO subdevice registered\n");
+
+	/* 8255 device */
+	subd = a4l_alloc_subd(sizeof(subd_8255_t), NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering 8255 subdevice...\n");
+
+	if (boardtype.has_8255) {
+		devpriv->subd_8255.cb_arg = (unsigned long)dev;
+		devpriv->subd_8255.cb_func = ni_8255_callback;
+		a4l_subdev_8255_init(subd);
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: 8255 subdevice not present\n");
+		subd->flags = A4L_SUBD_UNUSED;
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_8255_DIO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: 8255 subdevice registered\n");
+
+	/* formerly general purpose counter/timer device, but no longer used */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	subd->flags = A4L_SUBD_UNUSED;
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_UNUSED_SUBDEV)
+		return ret;
+
+	/* calibration subdevice -- ai and ao */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: registering calib subdevice...\n");
+
+	subd->flags = A4L_SUBD_CALIB;
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		/* internal PWM analog output
+		   used for AI nonlinearity calibration */
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: calib: M series calibration");
+		subd->insn_config = ni_m_series_pwm_config;
+		ni_writel(0x0, M_Offset_Cal_PWM);
+	} else if (boardtype.reg_type == ni_reg_6143) {
+		/* internal PWM analog output
+		   used for AI nonlinearity calibration */
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: calib: 6143 calibration");
+		subd->insn_config = ni_6143_pwm_config;
+	} else {
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: calib: common calibration");
+		subd->insn_read = ni_calib_insn_read;
+		subd->insn_write = ni_calib_insn_write;
+		caldac_setup(dev, subd);
+	}
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_CALIBRATION_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: calib subdevice registered\n");
+
+	/* EEPROM */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering EEPROM subdevice...\n");
+
+	subd->flags = A4L_SUBD_MEMORY;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 8;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		subd->chan_desc->length = M_SERIES_EEPROM_SIZE;
+		subd->insn_read = ni_m_series_eeprom_insn_read;
+	} else {
+		subd->chan_desc->length = 512;
+		subd->insn_read = ni_eeprom_insn_read;
+	}
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: EEPROM: size = %lu\n", subd->chan_desc->length);
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_EEPROM_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: EEPROM subdevice registered\n");
+
+	/* PFI */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering PFI(DIO) subdevice...\n");
+
+	subd->flags = A4L_SUBD_DIO;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 1;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		unsigned int i;
+		subd->chan_desc->length = 16;
+		ni_writew(devpriv->dio_state, M_Offset_PFI_DO);
+		for (i = 0; i < NUM_PFI_OUTPUT_SELECT_REGS; ++i) {
+			ni_writew(devpriv->pfi_output_select_reg[i],
+				  M_Offset_PFI_Output_Select(i + 1));
+		}
+	} else
+		subd->chan_desc->length = 10;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: PFI: %lu bits...\n", subd->chan_desc->length);
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		subd->insn_bits = ni_pfi_insn_bits;
+	}
+
+	subd->insn_config = ni_pfi_insn_config;
+	ni_set_bits(dev, IO_Bidirection_Pin_Register, ~0, 0);
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_PFI_DIO_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: PFI subdevice registered\n");
+
+	/* cs5529 calibration adc */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+#if 0 /* TODO: add subdevices callbacks */
+	subd->flags = A4L_SUBD_AI;
+
+	if (boardtype.reg_type & ni_reg_67xx_mask) {
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = boardtype.n_aochan;
+		subd->chan_desc->chans[0].flags = 0;
+		subd->chan_desc->chans[0].nb_bits = 16;
+
+		/* one channel for each analog output channel */
+		subd->rng_desc = &a4l_range_unknown;	/* XXX */
+		s->insn_read = cs5529_ai_insn_read;
+		init_cs5529(dev);
+	} else
+#endif /* TODO: add subdevices callbacks */
+		subd->flags = A4L_SUBD_UNUSED;
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_CS5529_CALIBRATION_SUBDEV)
+		return ret;
+
+	/* Serial */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering serial subdevice...\n");
+
+	subd->flags = A4L_SUBD_SERIAL;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = 1;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 8;
+
+	subd->insn_config = ni_serial_insn_config;
+
+	devpriv->serial_interval_ns = 0;
+	devpriv->serial_hw_mode = 0;
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_SERIAL_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: serial subdevice registered\n");
+
+	/* RTSI */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+#if 1 /* TODO: add RTSI subdevice */
+	subd->flags = A4L_SUBD_UNUSED;
+	ni_rtsi_init(dev);
+
+#else /* TODO: add RTSI subdevice */
+	subd->flags = A4L_SUBD_DIO;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = 8;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 1;
+
+	subd->insn_bits = ni_rtsi_insn_bits;
+	subd->insn_config = ni_rtsi_insn_config;
+	ni_rtsi_init(dev);
+
+#endif /* TODO: add RTSI subdevice */
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_RTSI_SUBDEV)
+		return ret;
+
+	if (boardtype.reg_type & ni_reg_m_series_mask) {
+		counter_variant = ni_gpct_variant_m_series;
+	} else {
+		counter_variant = ni_gpct_variant_e_series;
+	}
+	devpriv->counter_dev =
+		a4l_ni_gpct_device_construct(dev,
+					     &ni_gpct_write_register,
+					     &ni_gpct_read_register,
+					     counter_variant, NUM_GPCT);
+
+	/* General purpose counters */
+	for (j = 0; j < NUM_GPCT; ++j) {
+		struct ni_gpct *counter;
+
+		subd = a4l_alloc_subd(sizeof(struct ni_gpct), NULL);
+		if(subd == NULL)
+			return -ENOMEM;
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: registering GPCT[%d] subdevice...\n", j);
+
+		subd->flags = A4L_SUBD_COUNTER;
+
+		subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+					  sizeof(struct a4l_channel), GFP_KERNEL);
+		subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+		subd->chan_desc->length = 3;
+		subd->chan_desc->chans[0].flags = 0;
+
+		if (boardtype.reg_type & ni_reg_m_series_mask)
+			subd->chan_desc->chans[0].nb_bits = 32;
+		else
+			subd->chan_desc->chans[0].nb_bits = 24;
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: GPCT[%d]: %lu bits\n",
+			j, subd->chan_desc->chans[0].nb_bits);
+
+		subd->insn_read = ni_gpct_insn_read;
+		subd->insn_write = ni_gpct_insn_write;
+		subd->insn_config = ni_gpct_insn_config;
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: GPCT[%d]: command feature available\n", j);
+		subd->flags |= A4L_SUBD_CMD;
+		subd->cmd_mask = &a4l_ni_tio_cmd_mask;
+		subd->do_cmd = ni_gpct_cmd;
+		subd->do_cmdtest = ni_gpct_cmdtest;
+		subd->cancel = ni_gpct_cancel;
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+		counter = (struct ni_gpct *)subd->priv;
+		rtdm_lock_init(&counter->lock);
+		counter->chip_index = 0;
+		counter->counter_index = j;
+		counter->counter_dev = devpriv->counter_dev;
+		devpriv->counter_dev->counters[j] = counter;
+
+		a4l_ni_tio_init_counter(counter);
+
+		ret = a4l_add_subd(dev, subd);
+		if(ret != NI_GPCT_SUBDEV(j))
+			return ret;
+
+		a4l_dbg(1, drv_dbg, dev,
+			"mio_common: GCPT[%d] subdevice registered\n", j);
+	}
+
+	/* Frequency output */
+	subd = a4l_alloc_subd(0, NULL);
+	if(subd == NULL)
+		return -ENOMEM;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: registering counter subdevice...\n");
+
+	subd->flags = A4L_SUBD_COUNTER;
+
+	subd->chan_desc = kmalloc(sizeof(struct a4l_channels_desc) +
+				  sizeof(struct a4l_channel), GFP_KERNEL);
+	subd->chan_desc->mode = A4L_CHAN_GLOBAL_CHANDESC;
+	subd->chan_desc->length = 1;
+	subd->chan_desc->chans[0].flags = 0;
+	subd->chan_desc->chans[0].nb_bits = 4;
+
+	subd->insn_read = ni_freq_out_insn_read;
+	subd->insn_write = ni_freq_out_insn_write;
+	subd->insn_config = ni_freq_out_insn_config;
+
+	ret = a4l_add_subd(dev, subd);
+	if(ret != NI_FREQ_OUT_SUBDEV)
+		return ret;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"mio_common: counter subdevice registered\n");
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: initializing AI...\n");
+
+	/* ai configuration */
+	ni_ai_reset(a4l_get_subd(dev, NI_AI_SUBDEV));
+	if ((boardtype.reg_type & ni_reg_6xxx_mask) == 0) {
+		// BEAM is this needed for PCI-6143 ??
+		devpriv->clock_and_fout =
+			Slow_Internal_Time_Divide_By_2 |
+			Slow_Internal_Timebase |
+			Clock_To_Board_Divide_By_2 |
+			Clock_To_Board |
+			AI_Output_Divide_By_2 | AO_Output_Divide_By_2;
+	} else {
+		devpriv->clock_and_fout =
+			Slow_Internal_Time_Divide_By_2 |
+			Slow_Internal_Timebase |
+			Clock_To_Board_Divide_By_2 | Clock_To_Board;
+	}
+	devpriv->stc_writew(dev, devpriv->clock_and_fout,
+			    Clock_and_FOUT_Register);
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: AI initialization OK\n");
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: initializing A0...\n");
+
+	/* analog output configuration */
+	ni_ao_reset(a4l_get_subd(dev, NI_AO_SUBDEV));
+
+	if (a4l_get_irq(dev) != A4L_IRQ_UNUSED) {
+		devpriv->stc_writew(dev,
+				    (devpriv->irq_polarity ? Interrupt_Output_Polarity : 0) |
+				    (Interrupt_Output_On_3_Pins & 0) | Interrupt_A_Enable |
+				    Interrupt_B_Enable |
+				    Interrupt_A_Output_Select(devpriv->irq_pin) |
+				    Interrupt_B_Output_Select(devpriv->irq_pin),
+				    Interrupt_Control_Register);
+	}
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: A0 initialization OK\n");
+
+	/* DMA setup */
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: DMA setup\n");
+
+	ni_writeb(devpriv->ai_ao_select_reg, AI_AO_Select);
+	ni_writeb(devpriv->g0_g1_select_reg, G0_G1_Select);
+
+	if (boardtype.reg_type & ni_reg_6xxx_mask) {
+		ni_writeb(0, Magic_611x);
+	} else if (boardtype.reg_type & ni_reg_m_series_mask) {
+		int channel;
+		for (channel = 0; channel < boardtype.n_aochan; ++channel) {
+			ni_writeb(0xf, M_Offset_AO_Waveform_Order(channel));
+			ni_writeb(0x0,
+				  M_Offset_AO_Reference_Attenuation(channel));
+		}
+		ni_writeb(0x0, M_Offset_AO_Calibration);
+	}
+
+	a4l_dbg(1, drv_dbg, dev, "mio_common: attach procedure complete\n");
+
+	return 0;
+}
+
+MODULE_DESCRIPTION("Analogy support for NI DAQ-STC based boards");
+MODULE_LICENSE("GPL");
+
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_limited);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_limited14);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_bipolar4);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ai_611x);
+EXPORT_SYMBOL_GPL(a4l_range_ni_M_ai_622x);
+EXPORT_SYMBOL_GPL(a4l_range_ni_M_ai_628x);
+EXPORT_SYMBOL_GPL(a4l_range_ni_S_ai_6143);
+EXPORT_SYMBOL_GPL(a4l_range_ni_E_ao_ext);
+EXPORT_SYMBOL_GPL(a4l_ni_E_interrupt);
+EXPORT_SYMBOL_GPL(a4l_ni_E_init);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c
new file mode 100644
index 0000000..4eb9eee
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.c
@@ -0,0 +1,839 @@
+/*
+ * Hardware driver for NI Mite PCI interface chip
+ *
+ * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * The NI Mite driver was originally written by Tomasz Motylewski
+ * <...>, and ported to comedi by ds.
+ *
+ * References for specifications:
+ *
+ * 321747b.pdf  Register Level Programmer Manual (obsolete)
+ * 321747c.pdf  Register Level Programmer Manual (new)
+ * DAQ-STC reference manual
+ *
+ * Other possibly relevant info:
+ *
+ * 320517c.pdf  User manual (obsolete)
+ * 320517f.pdf  User manual (new)
+ * 320889a.pdf  delete
+ * 320906c.pdf  maximum signal ratings
+ * 321066a.pdf  about 16x
+ * 321791a.pdf  discontinuation of at-mio-16e-10 rev. c
+ * 321808a.pdf  about at-mio-16e-10 rev P
+ * 321837a.pdf  discontinuation of at-mio-16de-10 rev d
+ * 321838a.pdf  about at-mio-16de-10 rev N
+ *
+ * ISSUES:
+ */
+
+#include <linux/module.h>
+#include "mite.h"
+
+#ifdef CONFIG_DEBUG_MITE
+#define MDPRINTK(fmt, args...) rtdm_printk(fmt, ##args)
+#else /* !CONFIG_DEBUG_MITE */
+#define MDPRINTK(fmt, args...)
+#endif /* CONFIG_DEBUG_MITE */
+
+static LIST_HEAD(mite_devices);
+
+static struct pci_device_id mite_id[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_NATINST, PCI_ANY_ID), },
+	{0, }
+};
+
+static int mite_probe(struct pci_dev *dev, const struct pci_device_id *id)
+{
+	int i, err = 0;
+	struct mite_struct *mite;
+
+	mite = kmalloc(sizeof(struct mite_struct), GFP_KERNEL);
+	if(mite == NULL)
+		return -ENOMEM;
+
+	memset(mite, 0, sizeof(struct mite_struct));
+
+	rtdm_lock_init(&mite->lock);
+
+	mite->pcidev = dev;
+	if (pci_enable_device(dev) < 0) {
+		__a4l_err("error enabling mite\n");
+		err = -EIO;
+		goto out;
+	}
+
+	for(i = 0; i < MAX_MITE_DMA_CHANNELS; i++) {
+		mite->channels[i].mite = mite;
+		mite->channels[i].channel = i;
+		mite->channels[i].done = 1;
+	}
+
+	list_add(&mite->list, &mite_devices);
+
+out:
+	if (err < 0)
+		kfree(mite);
+
+	return err;
+}
+
+static void mite_remove(struct pci_dev *dev)
+{
+	struct list_head *this;
+
+	list_for_each(this, &mite_devices) {
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		if(mite->pcidev == dev) {
+			list_del(this);
+			kfree(mite);
+			break;
+		}
+	}
+}
+
+static struct pci_driver mite_driver = {
+	.name = "analogy_mite",
+	.id_table = mite_id,
+	.probe = mite_probe,
+	.remove = mite_remove,
+};
+
+int a4l_mite_setup(struct mite_struct *mite, int use_iodwbsr_1)
+{
+	unsigned long length;
+	resource_size_t addr;
+	int i;
+	u32 csigr_bits;
+	unsigned unknown_dma_burst_bits;
+
+	__a4l_dbg(1, drv_dbg, "starting setup...\n");
+
+	pci_set_master(mite->pcidev);
+
+	if (pci_request_regions(mite->pcidev, "mite")) {
+		__a4l_err("failed to request mite io regions\n");
+		return -EIO;
+	};
+
+	/* The PCI BAR0 is the Mite */
+	addr = pci_resource_start(mite->pcidev, 0);
+	length = pci_resource_len(mite->pcidev, 0);
+	mite->mite_phys_addr = addr;
+	mite->mite_io_addr = ioremap(addr, length);
+	if (!mite->mite_io_addr) {
+		__a4l_err("failed to remap mite io memory address\n");
+		pci_release_regions(mite->pcidev);
+		return -ENOMEM;
+	}
+
+	__a4l_dbg(1, drv_dbg, "bar0(mite) 0x%08llx mapped to %p\n",
+		  (unsigned long long)mite->mite_phys_addr,
+		  mite->mite_io_addr);
+
+
+	/* The PCI BAR1 is the DAQ */
+	addr = pci_resource_start(mite->pcidev, 1);
+	length = pci_resource_len(mite->pcidev, 1);
+	mite->daq_phys_addr = addr;
+	mite->daq_io_addr = ioremap(mite->daq_phys_addr, length);
+	if (!mite->daq_io_addr) {
+		__a4l_err("failed to remap daq io memory address\n");
+		pci_release_regions(mite->pcidev);
+		return -ENOMEM;
+	}
+
+	__a4l_dbg(1, drv_dbg, "bar0(daq) 0x%08llx mapped to %p\n",
+		  (unsigned long long)mite->daq_phys_addr,
+		  mite->daq_io_addr);
+
+	if (use_iodwbsr_1) {
+		__a4l_dbg(1, drv_dbg, "using I/O Window Base Size register 1\n");
+		writel(0, mite->mite_io_addr + MITE_IODWBSR);
+		writel(mite->
+		       daq_phys_addr | WENAB |
+		       MITE_IODWBSR_1_WSIZE_bits(length),
+		       mite->mite_io_addr + MITE_IODWBSR_1);
+		writel(0, mite->mite_io_addr + MITE_IODWCR_1);
+	} else {
+		writel(mite->daq_phys_addr | WENAB,
+		       mite->mite_io_addr + MITE_IODWBSR);
+	}
+
+	/* Make sure dma bursts work.  I got this from running a bus analyzer
+	   on a pxi-6281 and a pxi-6713.  6713 powered up with register value
+	   of 0x61f and bursts worked.  6281 powered up with register value of
+	   0x1f and bursts didn't work.  The NI windows driver reads the register,
+	   then does a bitwise-or of 0x600 with it and writes it back.
+	*/
+	unknown_dma_burst_bits =
+		readl(mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
+	unknown_dma_burst_bits |= UNKNOWN_DMA_BURST_ENABLE_BITS;
+	writel(unknown_dma_burst_bits,
+	       mite->mite_io_addr + MITE_UNKNOWN_DMA_BURST_REG);
+
+	csigr_bits = readl(mite->mite_io_addr + MITE_CSIGR);
+	mite->num_channels = mite_csigr_dmac(csigr_bits);
+	if (mite->num_channels > MAX_MITE_DMA_CHANNELS) {
+		__a4l_err("MITE: bug? chip claims to have %i dma channels. "
+			  "Setting to %i.\n",
+			  mite->num_channels, MAX_MITE_DMA_CHANNELS);
+		mite->num_channels = MAX_MITE_DMA_CHANNELS;
+	}
+
+	__a4l_dbg(1, drv_dbg, " version = %i, type = %i, mite mode = %i, "
+		  "interface mode = %i\n",
+		  mite_csigr_version(csigr_bits),
+		  mite_csigr_type(csigr_bits),
+		  mite_csigr_mmode(csigr_bits),
+		  mite_csigr_imode(csigr_bits));
+	__a4l_dbg(1, drv_dbg, " num channels = %i, write post fifo depth = %i, "
+		  "wins = %i, iowins = %i\n",
+		  mite_csigr_dmac(csigr_bits),
+		  mite_csigr_wpdep(csigr_bits),
+		  mite_csigr_wins(csigr_bits),
+		  mite_csigr_iowins(csigr_bits));
+
+	for (i = 0; i < mite->num_channels; i++) {
+		/* Registers the channel as a free one */
+		mite->channel_allocated[i] = 0;
+		/* Reset the channel */
+		writel(CHOR_DMARESET, mite->mite_io_addr + MITE_CHOR(i));
+		/* Disable interrupts */
+		writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
+		       CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+		       CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+		       mite->mite_io_addr + MITE_CHCR(i));
+
+		__a4l_dbg(1, drv_dbg, "channel[%d] initialized\n", i);
+	}
+
+	mite->used = 1;
+
+	return 0;
+}
+
+void a4l_mite_unsetup(struct mite_struct *mite)
+{
+	if (!mite)
+		return;
+
+	if (mite->mite_io_addr) {
+		iounmap(mite->mite_io_addr);
+		mite->mite_io_addr = NULL;
+	}
+
+	if (mite->daq_io_addr) {
+		iounmap(mite->daq_io_addr);
+		mite->daq_io_addr = NULL;
+	}
+
+	if(mite->used)
+		pci_release_regions( mite->pcidev );
+
+	mite->used = 0;
+}
+
+void a4l_mite_list_devices(void)
+{
+	struct list_head *this;
+
+	printk("Analogy: MITE: Available NI device IDs:");
+	list_for_each(this, &mite_devices) {
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		printk(" 0x%04x", mite_device_id(mite));
+		if(mite->used)
+			printk("(used)");
+	}
+
+	printk("\n");
+}
+
+
+
+struct mite_struct * a4l_mite_find_device(int bus, 
+					  int slot, unsigned short device_id)
+{
+	struct list_head *this;
+
+	list_for_each(this, &mite_devices) {
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		if(mite->pcidev->device != device_id)
+			continue;
+
+		if((bus <= 0 && slot <= 0) ||
+		   (bus == mite->pcidev->bus->number &&
+		    slot == PCI_SLOT(mite->pcidev->devfn)))
+			return mite;
+	}
+
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(a4l_mite_find_device);
+
+struct mite_channel *
+a4l_mite_request_channel_in_range(struct mite_struct *mite,
+				  struct mite_dma_descriptor_ring *ring,
+				  unsigned min_channel, unsigned max_channel)
+{
+	int i;
+	unsigned long flags;
+	struct mite_channel *channel = NULL;
+
+	__a4l_dbg(1, drv_dbg, " min_channel = %u, max_channel = %u\n",
+		  min_channel, max_channel);
+
+	/* spin lock so a4l_mite_release_channel can be called safely
+	   from interrupts */
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	for (i = min_channel; i <= max_channel; ++i) {
+
+	__a4l_dbg(1, drv_dbg, " channel[%d] allocated = %d\n",
+		  i, mite->channel_allocated[i]);
+
+		if (mite->channel_allocated[i] == 0) {
+			mite->channel_allocated[i] = 1;
+			channel = &mite->channels[i];
+			channel->ring = ring;
+			break;
+		}
+	}
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+	return channel;
+}
+
+void a4l_mite_release_channel(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	unsigned long flags;
+
+	/* Spin lock to prevent races with mite_request_channel */
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	if (mite->channel_allocated[mite_chan->channel]) {
+		/* disable all channel's interrupts */
+		writel(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE |
+		       CHCR_CLR_SAR_IE | CHCR_CLR_DONE_IE |
+		       CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+		       CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE,
+		       mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
+		a4l_mite_dma_disarm(mite_chan);
+		mite_dma_reset(mite_chan);
+		mite->channel_allocated[mite_chan->channel] = 0;
+		mite_chan->ring = NULL;
+		mmiowb();
+	}
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+}
+
+void a4l_mite_dma_arm(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	int chor;
+	unsigned long flags;
+
+	MDPRINTK("a4l_mite_dma_arm ch%i\n", mite_chan->channel);
+	/* Memory barrier is intended to insure any twiddling with the buffer
+	   is done before writing to the mite to arm dma transfer */
+	smp_mb();
+	/* arm */
+	chor = CHOR_START;
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	mite_chan->done = 0;
+	writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+	mmiowb();
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+}
+
+void a4l_mite_dma_disarm(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	unsigned chor;
+
+	/* disarm */
+	chor = CHOR_ABORT;
+	writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+}
+
+int a4l_mite_buf_change(struct mite_dma_descriptor_ring *ring, struct a4l_subdevice *subd)
+{
+	struct a4l_buffer *buf = subd->buf;
+	unsigned int n_links;
+	int i;
+
+	if (ring->descriptors) {
+		dma_free_coherent(&ring->pcidev->dev,
+				  ring->n_links * sizeof(struct mite_dma_descriptor),
+				  ring->descriptors, ring->descriptors_dma_addr);
+	}
+	ring->descriptors = NULL;
+	ring->descriptors_dma_addr = 0;
+	ring->n_links = 0;
+
+	if (buf->size == 0) {
+		return 0;
+	}
+	n_links = buf->size >> PAGE_SHIFT;
+
+	MDPRINTK("ring->pcidev=%p, n_links=0x%04x\n", ring->pcidev, n_links);
+
+	ring->descriptors =
+		dma_alloc_coherent(&ring->pcidev->dev,
+				   n_links * sizeof(struct mite_dma_descriptor),
+				   &ring->descriptors_dma_addr, GFP_ATOMIC);
+	if (!ring->descriptors) {
+		printk("MITE: ring buffer allocation failed\n");
+		return -ENOMEM;
+	}
+	ring->n_links = n_links;
+
+	for (i = 0; i < n_links; i++) {
+		ring->descriptors[i].count = cpu_to_le32(PAGE_SIZE);
+		ring->descriptors[i].addr = cpu_to_le32(buf->pg_list[i]);
+		ring->descriptors[i].next =
+			cpu_to_le32(ring->descriptors_dma_addr +
+				    (i + 1) * sizeof(struct mite_dma_descriptor));
+	}
+
+	ring->descriptors[n_links - 1].next =
+		cpu_to_le32(ring->descriptors_dma_addr);
+
+	/* Barrier is meant to insure that all the writes to the dma descriptors
+	   have completed before the dma controller is commanded to read them */
+	smp_wmb();
+
+	return 0;
+}
+
+void a4l_mite_prep_dma(struct mite_channel *mite_chan,
+		   unsigned int num_device_bits, unsigned int num_memory_bits)
+{
+	unsigned int chor, chcr, mcr, dcr, lkcr;
+	struct mite_struct *mite = mite_chan->mite;
+
+	MDPRINTK("a4l_mite_prep_dma ch%i\n", mite_chan->channel);
+
+	/* reset DMA and FIFO */
+	chor = CHOR_DMARESET | CHOR_FRESET;
+	writel(chor, mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+
+	/* short link chaining mode */
+	chcr = CHCR_SET_DMA_IE | CHCR_LINKSHORT | CHCR_SET_DONE_IE |
+		CHCR_BURSTEN;
+	/*
+	 * Link Complete Interrupt: interrupt every time a link
+	 * in MITE_RING is completed. This can generate a lot of
+	 * extra interrupts, but right now we update the values
+	 * of buf_int_ptr and buf_int_count at each interrupt.  A
+	 * better method is to poll the MITE before each user
+	 * "read()" to calculate the number of bytes available.
+	 */
+	chcr |= CHCR_SET_LC_IE;
+	if (num_memory_bits == 32 && num_device_bits == 16) {
+		/* Doing a combined 32 and 16 bit byteswap gets the 16
+		   bit samples into the fifo in the right order.
+		   Tested doing 32 bit memory to 16 bit device
+		   transfers to the analog out of a pxi-6281, which
+		   has mite version = 1, type = 4.  This also works
+		   for dma reads from the counters on e-series boards.
+		*/
+		chcr |= CHCR_BYTE_SWAP_DEVICE | CHCR_BYTE_SWAP_MEMORY;
+	}
+
+	if (mite_chan->dir == A4L_INPUT) {
+		chcr |= CHCR_DEV_TO_MEM;
+	}
+	writel(chcr, mite->mite_io_addr + MITE_CHCR(mite_chan->channel));
+
+	/* to/from memory */
+	mcr = CR_RL(64) | CR_ASEQUP;
+	switch (num_memory_bits) {
+	case 8:
+		mcr |= CR_PSIZE8;
+		break;
+	case 16:
+		mcr |= CR_PSIZE16;
+		break;
+	case 32:
+		mcr |= CR_PSIZE32;
+		break;
+	default:
+		__a4l_err("MITE: bug! "
+			  "invalid mem bit width for dma transfer\n");
+		break;
+	}
+	writel(mcr, mite->mite_io_addr + MITE_MCR(mite_chan->channel));
+
+	/* from/to device */
+	dcr = CR_RL(64) | CR_ASEQUP;
+	dcr |= CR_PORTIO | CR_AMDEVICE | CR_REQSDRQ(mite_chan->channel);
+	switch (num_device_bits) {
+	case 8:
+		dcr |= CR_PSIZE8;
+		break;
+	case 16:
+		dcr |= CR_PSIZE16;
+		break;
+	case 32:
+		dcr |= CR_PSIZE32;
+		break;
+	default:
+		__a4l_info("MITE: bug! "
+			   "invalid dev bit width for dma transfer\n");
+		break;
+	}
+	writel(dcr, mite->mite_io_addr + MITE_DCR(mite_chan->channel));
+
+	/* reset the DAR */
+	writel(0, mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+
+	/* the link is 32bits */
+	lkcr = CR_RL(64) | CR_ASEQUP | CR_PSIZE32;
+	writel(lkcr, mite->mite_io_addr + MITE_LKCR(mite_chan->channel));
+
+	/* starting address for link chaining */
+	writel(mite_chan->ring->descriptors_dma_addr,
+	       mite->mite_io_addr + MITE_LKAR(mite_chan->channel));
+
+	MDPRINTK("exit a4l_mite_prep_dma\n");
+}
+
+u32 mite_device_bytes_transferred(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	return readl(mite->mite_io_addr + MITE_DAR(mite_chan->channel));
+}
+
+u32 a4l_mite_bytes_in_transit(struct mite_channel * mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	return readl(mite->mite_io_addr +
+		     MITE_FCR(mite_chan->channel)) & 0x000000FF;
+}
+
+/* Returns lower bound for number of bytes transferred from device to memory */
+u32 a4l_mite_bytes_written_to_memory_lb(struct mite_channel * mite_chan)
+{
+	u32 device_byte_count;
+
+	device_byte_count = mite_device_bytes_transferred(mite_chan);
+	return device_byte_count - a4l_mite_bytes_in_transit(mite_chan);
+}
+
+/* Returns upper bound for number of bytes transferred from device to memory */
+u32 a4l_mite_bytes_written_to_memory_ub(struct mite_channel * mite_chan)
+{
+	u32 in_transit_count;
+
+	in_transit_count = a4l_mite_bytes_in_transit(mite_chan);
+	return mite_device_bytes_transferred(mite_chan) - in_transit_count;
+}
+
+/* Returns lower bound for number of bytes read from memory for transfer to device */
+u32 a4l_mite_bytes_read_from_memory_lb(struct mite_channel * mite_chan)
+{
+	u32 device_byte_count;
+
+	device_byte_count = mite_device_bytes_transferred(mite_chan);
+	return device_byte_count + a4l_mite_bytes_in_transit(mite_chan);
+}
+
+/* Returns upper bound for number of bytes read from memory for transfer to device */
+u32 a4l_mite_bytes_read_from_memory_ub(struct mite_channel * mite_chan)
+{
+	u32 in_transit_count;
+
+	in_transit_count = a4l_mite_bytes_in_transit(mite_chan);
+	return mite_device_bytes_transferred(mite_chan) + in_transit_count;
+}
+
+int a4l_mite_sync_input_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd)
+{
+	unsigned int nbytes_lb, nbytes_ub;
+
+	nbytes_lb = a4l_mite_bytes_written_to_memory_lb(mite_chan);
+	nbytes_ub = a4l_mite_bytes_written_to_memory_ub(mite_chan);
+
+	if(a4l_buf_prepare_absput(subd, nbytes_ub) != 0) {
+		__a4l_err("MITE: DMA overwrite of free area\n");
+		return -EPIPE;
+	}
+
+	return a4l_buf_commit_absput(subd, nbytes_lb);
+}
+
+int a4l_mite_sync_output_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd)
+{
+	struct a4l_buffer *buf = subd->buf;
+	unsigned int nbytes_ub, nbytes_lb;
+	int err;
+
+	nbytes_lb = a4l_mite_bytes_read_from_memory_lb(mite_chan);
+	nbytes_ub = a4l_mite_bytes_read_from_memory_ub(mite_chan);
+
+	err = a4l_buf_prepare_absget(subd, nbytes_ub);
+	if(err < 0) {
+		__a4l_info("MITE: DMA underrun\n");
+		return -EPIPE;
+	}
+
+	err = a4l_buf_commit_absget(subd, nbytes_lb);
+
+	/* If the MITE has already transfered more than required, we
+	   can disable it */
+	if (test_bit(A4L_BUF_EOA_NR, &buf->flags))
+		writel(CHOR_STOP,
+		       mite_chan->mite->mite_io_addr +
+		       MITE_CHOR(mite_chan->channel));
+
+	return err;
+}
+
+u32 a4l_mite_get_status(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	u32 status;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	status = readl(mite->mite_io_addr + MITE_CHSR(mite_chan->channel));
+	if (status & CHSR_DONE) {
+		mite_chan->done = 1;
+		writel(CHOR_CLRDONE,
+		       mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+	}
+	mmiowb();
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+	return status;
+}
+
+int a4l_mite_done(struct mite_channel *mite_chan)
+{
+	struct mite_struct *mite = mite_chan->mite;
+	unsigned long flags;
+	int done;
+
+	a4l_mite_get_status(mite_chan);
+	rtdm_lock_get_irqsave(&mite->lock, flags);
+	done = mite_chan->done;
+	rtdm_lock_put_irqrestore(&mite->lock, flags);
+	return done;
+}
+
+#ifdef CONFIG_DEBUG_MITE
+
+static void a4l_mite_decode(const char *const bit_str[], unsigned int bits);
+
+/* names of bits in mite registers */
+
+static const char *const mite_CHOR_strings[] = {
+	"start", "cont", "stop", "abort",
+	"freset", "clrlc", "clrrb", "clrdone",
+	"clr_lpause", "set_lpause", "clr_send_tc",
+	"set_send_tc", "12", "13", "14",
+	"15", "16", "17", "18",
+	"19", "20", "21", "22",
+	"23", "24", "25", "26",
+	"27", "28", "29", "30",
+	"dmareset",
+};
+
+static const char *const mite_CHCR_strings[] = {
+	"continue", "ringbuff", "2", "3",
+	"4", "5", "6", "7",
+	"8", "9", "10", "11",
+	"12", "13", "bursten", "fifodis",
+	"clr_cont_rb_ie", "set_cont_rb_ie", "clr_lc_ie", "set_lc_ie",
+	"clr_drdy_ie", "set_drdy_ie", "clr_mrdy_ie", "set_mrdy_ie",
+	"clr_done_ie", "set_done_ie", "clr_sar_ie", "set_sar_ie",
+	"clr_linkp_ie", "set_linkp_ie", "clr_dma_ie", "set_dma_ie",
+};
+
+static const char *const mite_MCR_strings[] = {
+	"amdevice", "1", "2", "3",
+	"4", "5", "portio", "portvxi",
+	"psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "11",
+	"12", "13", "blocken", "berhand",
+	"reqsintlim/reqs0", "reqs1", "reqs2", "rd32",
+	"rd512", "rl1", "rl2", "rl8",
+	"24", "25", "26", "27",
+	"28", "29", "30", "stopen",
+};
+
+static const char *const mite_DCR_strings[] = {
+	"amdevice", "1", "2", "3",
+	"4", "5", "portio", "portvxi",
+	"psizebyte", "psizehalf (byte & half = word)", "aseqxp1", "aseqxp2",
+	"aseqxp8", "13", "blocken", "berhand",
+	"reqsintlim", "reqs1", "reqs2", "rd32",
+	"rd512", "rl1", "rl2", "rl8",
+	"23", "24", "25", "27",
+	"28", "wsdevc", "wsdevs", "rwdevpack",
+};
+
+static const char *const mite_LKCR_strings[] = {
+	"amdevice", "1", "2", "3",
+	"4", "5", "portio", "portvxi",
+	"psizebyte", "psizehalf (byte & half = word)", "asequp", "aseqdown",
+	"12", "13", "14", "berhand",
+	"16", "17", "18", "rd32",
+	"rd512", "rl1", "rl2", "rl8",
+	"24", "25", "26", "27",
+	"28", "29", "30", "chngend",
+};
+
+static const char *const mite_CHSR_strings[] = {
+	"d.err0", "d.err1", "m.err0", "m.err1",
+	"l.err0", "l.err1", "drq0", "drq1",
+	"end", "xferr", "operr0", "operr1",
+	"stops", "habort", "sabort", "error",
+	"16", "conts_rb", "18", "linkc",
+	"20", "drdy", "22", "mrdy",
+	"24", "done", "26", "sars",
+	"28", "lpauses", "30", "int",
+};
+
+void a4l_mite_dump_regs(struct mite_channel *mite_chan)
+{
+	unsigned long mite_io_addr =
+		(unsigned long)mite_chan->mite->mite_io_addr;
+	unsigned long addr = 0;
+	unsigned long temp = 0;
+
+	printk("a4l_mite_dump_regs ch%i\n", mite_chan->channel);
+	printk("mite address is  =0x%08lx\n", mite_io_addr);
+
+	addr = mite_io_addr + MITE_CHOR(mite_chan->channel);
+	printk("mite status[CHOR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_CHOR_strings, temp);
+	addr = mite_io_addr + MITE_CHCR(mite_chan->channel);
+	printk("mite status[CHCR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_CHCR_strings, temp);
+	addr = mite_io_addr + MITE_TCR(mite_chan->channel);
+	printk("mite status[TCR] at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+	addr = mite_io_addr + MITE_MCR(mite_chan->channel);
+	printk("mite status[MCR] at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_MCR_strings, temp);
+
+	addr = mite_io_addr + MITE_MAR(mite_chan->channel);
+	printk("mite status[MAR] at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+	addr = mite_io_addr + MITE_DCR(mite_chan->channel);
+	printk("mite status[DCR] at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_DCR_strings, temp);
+	addr = mite_io_addr + MITE_DAR(mite_chan->channel);
+	printk("mite status[DAR] at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+	addr = mite_io_addr + MITE_LKCR(mite_chan->channel);
+	printk("mite status[LKCR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_LKCR_strings, temp);
+	addr = mite_io_addr + MITE_LKAR(mite_chan->channel);
+	printk("mite status[LKAR]at 0x%08lx =0x%08x\n", addr,
+	       readl((void *)addr));
+
+	addr = mite_io_addr + MITE_CHSR(mite_chan->channel);
+	printk("mite status[CHSR]at 0x%08lx =0x%08lx\n", addr, temp =
+	       readl((void *)addr));
+	a4l_mite_decode(mite_CHSR_strings, temp);
+	addr = mite_io_addr + MITE_FCR(mite_chan->channel);
+	printk("mite status[FCR] at 0x%08lx =0x%08x\n\n", addr,
+	       readl((void *)addr));
+}
+
+
+static void a4l_mite_decode(const char *const bit_str[], unsigned int bits)
+{
+	int i;
+
+	for (i = 31; i >= 0; i--) {
+		if (bits & (1 << i)) {
+			printk(" %s", bit_str[i]);
+		}
+	}
+	printk("\n");
+}
+
+#endif /* CONFIG_DEBUG_MITE */
+
+
+static int __init mite_init(void)
+{
+	int err;
+
+	/* Register the mite's PCI driver */
+	err = pci_register_driver(&mite_driver);
+
+	if(err == 0)
+		a4l_mite_list_devices();
+
+	return err;
+}
+
+static void __exit mite_cleanup(void)
+{
+
+	/* Unregister the PCI structure driver */
+	pci_unregister_driver(&mite_driver);
+
+	/* Just paranoia... */
+	while(&mite_devices != mite_devices.next) {
+		struct list_head *this = mite_devices.next;
+		struct mite_struct *mite =
+			list_entry(this, struct mite_struct, list);
+
+		list_del(this);
+		kfree(mite);
+	}
+}
+
+MODULE_LICENSE("GPL");
+module_init(mite_init);
+module_exit(mite_cleanup);
+
+EXPORT_SYMBOL_GPL(a4l_mite_dma_arm);
+EXPORT_SYMBOL_GPL(a4l_mite_dma_disarm);
+EXPORT_SYMBOL_GPL(a4l_mite_sync_input_dma);
+EXPORT_SYMBOL_GPL(a4l_mite_sync_output_dma);
+EXPORT_SYMBOL_GPL(a4l_mite_setup);
+EXPORT_SYMBOL_GPL(a4l_mite_unsetup);
+EXPORT_SYMBOL_GPL(a4l_mite_list_devices);
+EXPORT_SYMBOL_GPL(a4l_mite_request_channel_in_range);
+EXPORT_SYMBOL_GPL(a4l_mite_release_channel);
+EXPORT_SYMBOL_GPL(a4l_mite_prep_dma);
+EXPORT_SYMBOL_GPL(a4l_mite_buf_change);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_written_to_memory_lb);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_written_to_memory_ub);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_read_from_memory_lb);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_read_from_memory_ub);
+EXPORT_SYMBOL_GPL(a4l_mite_bytes_in_transit);
+EXPORT_SYMBOL_GPL(a4l_mite_get_status);
+EXPORT_SYMBOL_GPL(a4l_mite_done);
+#ifdef CONFIG_DEBUG_MITE
+EXPORT_SYMBOL_GPL(a4l_mite_decode);
+EXPORT_SYMBOL_GPL(a4l_mite_dump_regs);
+#endif /* CONFIG_DEBUG_MITE */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h
new file mode 100644
index 0000000..3b9aaba
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/mite.h
@@ -0,0 +1,435 @@
+/*
+ * Hardware driver for NI Mite PCI interface chip
+ * @note Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef __ANALOGY_NI_MITE_H__
+#define __ANALOGY_NI_MITE_H__
+
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <rtdm/analogy/device.h>
+
+#define PCI_VENDOR_ID_NATINST 0x1093
+#define PCI_MITE_SIZE 4096
+#define PCI_DAQ_SIZE 4096
+#define PCI_DAQ_SIZE_660X 8192
+#define PCIMIO_COMPAT
+#define MAX_MITE_DMA_CHANNELS 8
+
+#define TOP_OF_PAGE(x) ((x)|(~(PAGE_MASK)))
+
+struct mite_dma_descriptor {
+	u32 count;
+	u32 addr;
+	u32 next;
+	u32 dar;
+};
+
+struct mite_dma_descriptor_ring {
+	struct pci_dev *pcidev;
+	u32 n_links;
+	struct mite_dma_descriptor *descriptors;
+	dma_addr_t descriptors_dma_addr;
+};
+
+struct mite_channel {
+	struct mite_struct *mite;
+	u32 channel;
+	u32 dir;
+	u32 done;
+	struct mite_dma_descriptor_ring *ring;
+};
+
+struct mite_struct {
+	struct list_head list;
+	rtdm_lock_t lock;
+	u32 used;
+	u32 num_channels;
+
+	struct mite_channel channels[MAX_MITE_DMA_CHANNELS];
+	u32 channel_allocated[MAX_MITE_DMA_CHANNELS];
+
+	struct pci_dev *pcidev;
+	resource_size_t mite_phys_addr;
+	void *mite_io_addr;
+	resource_size_t daq_phys_addr;
+	void *daq_io_addr;
+};
+
+static inline
+struct mite_dma_descriptor_ring *mite_alloc_ring(struct	mite_struct *mite)
+{
+	struct mite_dma_descriptor_ring *ring =
+		kmalloc(sizeof(struct mite_dma_descriptor_ring), GFP_DMA);
+
+	if (ring == NULL)
+		return ring;
+
+	memset(ring, 0, sizeof(struct mite_dma_descriptor_ring));
+
+	ring->pcidev = mite->pcidev;
+	if (ring->pcidev == NULL) {
+		kfree(ring);
+		return NULL;
+	}
+
+	return ring;
+};
+
+static inline void mite_free_ring(struct mite_dma_descriptor_ring *ring)
+{
+	if (ring) {
+		if (ring->descriptors) {
+			dma_free_coherent(
+				&ring->pcidev->dev,
+				ring->n_links *
+				sizeof(struct mite_dma_descriptor),
+				ring->descriptors, ring->descriptors_dma_addr);
+		}
+		kfree(ring);
+	}
+};
+
+static inline unsigned int mite_irq(struct mite_struct *mite)
+{
+	return mite->pcidev->irq;
+};
+static inline unsigned int mite_device_id(struct mite_struct *mite)
+{
+	return mite->pcidev->device;
+};
+
+int a4l_mite_setup(struct mite_struct *mite, int use_iodwbsr_1);
+void a4l_mite_unsetup(struct mite_struct *mite);
+void a4l_mite_list_devices(void);
+struct mite_struct * a4l_mite_find_device(int bus,
+					  int slot, unsigned short device_id);
+struct mite_channel *
+a4l_mite_request_channel_in_range(struct mite_struct *mite,
+				  struct mite_dma_descriptor_ring *ring,
+				  unsigned min_channel, unsigned max_channel);
+static inline struct mite_channel *mite_request_channel(struct mite_struct
+	*mite, struct mite_dma_descriptor_ring *ring)
+{
+	return a4l_mite_request_channel_in_range(mite, ring, 0,
+		mite->num_channels - 1);
+}
+void a4l_mite_release_channel(struct mite_channel *mite_chan);
+
+void a4l_mite_dma_arm(struct mite_channel *mite_chan);
+void a4l_mite_dma_disarm(struct mite_channel *mite_chan);
+int a4l_mite_sync_input_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd);
+int a4l_mite_sync_output_dma(struct mite_channel *mite_chan, struct a4l_subdevice *subd);
+u32 a4l_mite_bytes_written_to_memory_lb(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_written_to_memory_ub(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_read_from_memory_lb(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_read_from_memory_ub(struct mite_channel *mite_chan);
+u32 a4l_mite_bytes_in_transit(struct mite_channel *mite_chan);
+u32 a4l_mite_get_status(struct mite_channel *mite_chan);
+int a4l_mite_done(struct mite_channel *mite_chan);
+void a4l_mite_prep_dma(struct mite_channel *mite_chan,
+		   unsigned int num_device_bits, unsigned int num_memory_bits);
+int a4l_mite_buf_change(struct mite_dma_descriptor_ring *ring, struct a4l_subdevice *subd);
+
+#ifdef CONFIG_DEBUG_MITE
+void mite_print_chsr(unsigned int chsr);
+void a4l_mite_dump_regs(struct mite_channel *mite_chan);
+#endif
+
+static inline int CHAN_OFFSET(int channel)
+{
+	return 0x500 + 0x100 * channel;
+};
+
+enum mite_registers {
+	/* The bits 0x90180700 in MITE_UNKNOWN_DMA_BURST_REG can be
+	   written and read back.  The bits 0x1f always read as 1.
+	   The rest always read as zero. */
+	MITE_UNKNOWN_DMA_BURST_REG = 0x28,
+	MITE_IODWBSR = 0xc0,	//IO Device Window Base Size Register
+	MITE_IODWBSR_1 = 0xc4,	// IO Device Window Base Size Register 1
+	MITE_IODWCR_1 = 0xf4,
+	MITE_PCI_CONFIG_OFFSET = 0x300,
+	MITE_CSIGR = 0x460	//chip signature
+};
+static inline int MITE_CHOR(int channel)	// channel operation
+{
+	return CHAN_OFFSET(channel) + 0x0;
+};
+static inline int MITE_CHCR(int channel)	// channel control
+{
+	return CHAN_OFFSET(channel) + 0x4;
+};
+static inline int MITE_TCR(int channel)	// transfer count
+{
+	return CHAN_OFFSET(channel) + 0x8;
+};
+static inline int MITE_MCR(int channel)	// memory configuration
+{
+	return CHAN_OFFSET(channel) + 0xc;
+};
+static inline int MITE_MAR(int channel)	// memory address
+{
+	return CHAN_OFFSET(channel) + 0x10;
+};
+static inline int MITE_DCR(int channel)	// device configuration
+{
+	return CHAN_OFFSET(channel) + 0x14;
+};
+static inline int MITE_DAR(int channel)	// device address
+{
+	return CHAN_OFFSET(channel) + 0x18;
+};
+static inline int MITE_LKCR(int channel)	// link configuration
+{
+	return CHAN_OFFSET(channel) + 0x1c;
+};
+static inline int MITE_LKAR(int channel)	// link address
+{
+	return CHAN_OFFSET(channel) + 0x20;
+};
+static inline int MITE_LLKAR(int channel)	// see mite section of tnt5002 manual
+{
+	return CHAN_OFFSET(channel) + 0x24;
+};
+static inline int MITE_BAR(int channel)	// base address
+{
+	return CHAN_OFFSET(channel) + 0x28;
+};
+static inline int MITE_BCR(int channel)	// base count
+{
+	return CHAN_OFFSET(channel) + 0x2c;
+};
+static inline int MITE_SAR(int channel)	// ? address
+{
+	return CHAN_OFFSET(channel) + 0x30;
+};
+static inline int MITE_WSCR(int channel)	// ?
+{
+	return CHAN_OFFSET(channel) + 0x34;
+};
+static inline int MITE_WSER(int channel)	// ?
+{
+	return CHAN_OFFSET(channel) + 0x38;
+};
+static inline int MITE_CHSR(int channel)	// channel status
+{
+	return CHAN_OFFSET(channel) + 0x3c;
+};
+static inline int MITE_FCR(int channel)	// fifo count
+{
+	return CHAN_OFFSET(channel) + 0x40;
+};
+
+enum MITE_IODWBSR_bits {
+	WENAB = 0x80,		// window enable
+};
+
+static inline unsigned MITE_IODWBSR_1_WSIZE_bits(unsigned size)
+{
+	unsigned order = 0;
+	while (size >>= 1)
+		++order;
+	BUG_ON(order < 1);
+	return (order - 1) & 0x1f;
+}
+
+enum MITE_UNKNOWN_DMA_BURST_bits {
+	UNKNOWN_DMA_BURST_ENABLE_BITS = 0x600
+};
+
+static inline int mite_csigr_version(u32 csigr_bits)
+{
+	return csigr_bits & 0xf;
+};
+static inline int mite_csigr_type(u32 csigr_bits)
+{				// original mite = 0, minimite = 1
+	return (csigr_bits >> 4) & 0xf;
+};
+static inline int mite_csigr_mmode(u32 csigr_bits)
+{				// mite mode, minimite = 1
+	return (csigr_bits >> 8) & 0x3;
+};
+static inline int mite_csigr_imode(u32 csigr_bits)
+{				// cpu port interface mode, pci = 0x3
+	return (csigr_bits >> 12) & 0x3;
+};
+static inline int mite_csigr_dmac(u32 csigr_bits)
+{				// number of dma channels
+	return (csigr_bits >> 16) & 0xf;
+};
+static inline int mite_csigr_wpdep(u32 csigr_bits)
+{				// write post fifo depth
+	unsigned int wpdep_bits = (csigr_bits >> 20) & 0x7;
+	if (wpdep_bits == 0)
+		return 0;
+	else
+		return 1 << (wpdep_bits - 1);
+};
+static inline int mite_csigr_wins(u32 csigr_bits)
+{
+	return (csigr_bits >> 24) & 0x1f;
+};
+static inline int mite_csigr_iowins(u32 csigr_bits)
+{				// number of io windows
+	return (csigr_bits >> 29) & 0x7;
+};
+
+enum MITE_MCR_bits {
+	MCRPON = 0,
+};
+
+enum MITE_DCR_bits {
+	DCR_NORMAL = (1 << 29),
+	DCRPON = 0,
+};
+
+enum MITE_CHOR_bits {
+	CHOR_DMARESET = (1 << 31),
+	CHOR_SET_SEND_TC = (1 << 11),
+	CHOR_CLR_SEND_TC = (1 << 10),
+	CHOR_SET_LPAUSE = (1 << 9),
+	CHOR_CLR_LPAUSE = (1 << 8),
+	CHOR_CLRDONE = (1 << 7),
+	CHOR_CLRRB = (1 << 6),
+	CHOR_CLRLC = (1 << 5),
+	CHOR_FRESET = (1 << 4),
+	CHOR_ABORT = (1 << 3),	/* stop without emptying fifo */
+	CHOR_STOP = (1 << 2),	/* stop after emptying fifo */
+	CHOR_CONT = (1 << 1),
+	CHOR_START = (1 << 0),
+	CHOR_PON = (CHOR_CLR_SEND_TC | CHOR_CLR_LPAUSE),
+};
+
+enum MITE_CHCR_bits {
+	CHCR_SET_DMA_IE = (1 << 31),
+	CHCR_CLR_DMA_IE = (1 << 30),
+	CHCR_SET_LINKP_IE = (1 << 29),
+	CHCR_CLR_LINKP_IE = (1 << 28),
+	CHCR_SET_SAR_IE = (1 << 27),
+	CHCR_CLR_SAR_IE = (1 << 26),
+	CHCR_SET_DONE_IE = (1 << 25),
+	CHCR_CLR_DONE_IE = (1 << 24),
+	CHCR_SET_MRDY_IE = (1 << 23),
+	CHCR_CLR_MRDY_IE = (1 << 22),
+	CHCR_SET_DRDY_IE = (1 << 21),
+	CHCR_CLR_DRDY_IE = (1 << 20),
+	CHCR_SET_LC_IE = (1 << 19),
+	CHCR_CLR_LC_IE = (1 << 18),
+	CHCR_SET_CONT_RB_IE = (1 << 17),
+	CHCR_CLR_CONT_RB_IE = (1 << 16),
+	CHCR_FIFODIS = (1 << 15),
+	CHCR_FIFO_ON = 0,
+	CHCR_BURSTEN = (1 << 14),
+	CHCR_NO_BURSTEN = 0,
+	CHCR_BYTE_SWAP_DEVICE = (1 << 6),
+	CHCR_BYTE_SWAP_MEMORY = (1 << 4),
+	CHCR_DIR = (1 << 3),
+	CHCR_DEV_TO_MEM = CHCR_DIR,
+	CHCR_MEM_TO_DEV = 0,
+	CHCR_NORMAL = (0 << 0),
+	CHCR_CONTINUE = (1 << 0),
+	CHCR_RINGBUFF = (2 << 0),
+	CHCR_LINKSHORT = (4 << 0),
+	CHCR_LINKLONG = (5 << 0),
+	CHCRPON =
+		(CHCR_CLR_DMA_IE | CHCR_CLR_LINKP_IE | CHCR_CLR_SAR_IE |
+		CHCR_CLR_DONE_IE | CHCR_CLR_MRDY_IE | CHCR_CLR_DRDY_IE |
+		CHCR_CLR_LC_IE | CHCR_CLR_CONT_RB_IE),
+};
+
+enum ConfigRegister_bits {
+	CR_REQS_MASK = 0x7 << 16,
+	CR_ASEQDONT = 0x0 << 10,
+	CR_ASEQUP = 0x1 << 10,
+	CR_ASEQDOWN = 0x2 << 10,
+	CR_ASEQ_MASK = 0x3 << 10,
+	CR_PSIZE8 = (1 << 8),
+	CR_PSIZE16 = (2 << 8),
+	CR_PSIZE32 = (3 << 8),
+	CR_PORTCPU = (0 << 6),
+	CR_PORTIO = (1 << 6),
+	CR_PORTVXI = (2 << 6),
+	CR_PORTMXI = (3 << 6),
+	CR_AMDEVICE = (1 << 0),
+};
+static inline int CR_REQS(int source)
+{
+	return (source & 0x7) << 16;
+};
+static inline int CR_REQSDRQ(unsigned drq_line)
+{
+	/* This also works on m-series when
+	   using channels (drq_line) 4 or 5. */
+	return CR_REQS((drq_line & 0x3) | 0x4);
+}
+static inline int CR_RL(unsigned int retry_limit)
+{
+	int value = 0;
+
+	while (retry_limit) {
+		retry_limit >>= 1;
+		value++;
+	}
+	if (value > 0x7)
+		__a4l_err("bug! retry_limit too large\n");
+
+	return (value & 0x7) << 21;
+}
+
+enum CHSR_bits {
+	CHSR_INT = (1 << 31),
+	CHSR_LPAUSES = (1 << 29),
+	CHSR_SARS = (1 << 27),
+	CHSR_DONE = (1 << 25),
+	CHSR_MRDY = (1 << 23),
+	CHSR_DRDY = (1 << 21),
+	CHSR_LINKC = (1 << 19),
+	CHSR_CONTS_RB = (1 << 17),
+	CHSR_ERROR = (1 << 15),
+	CHSR_SABORT = (1 << 14),
+	CHSR_HABORT = (1 << 13),
+	CHSR_STOPS = (1 << 12),
+	CHSR_OPERR_mask = (3 << 10),
+	CHSR_OPERR_NOERROR = (0 << 10),
+	CHSR_OPERR_FIFOERROR = (1 << 10),
+	CHSR_OPERR_LINKERROR = (1 << 10),	/* ??? */
+	CHSR_XFERR = (1 << 9),
+	CHSR_END = (1 << 8),
+	CHSR_DRQ1 = (1 << 7),
+	CHSR_DRQ0 = (1 << 6),
+	CHSR_LxERR_mask = (3 << 4),
+	CHSR_LBERR = (1 << 4),
+	CHSR_LRERR = (2 << 4),
+	CHSR_LOERR = (3 << 4),
+	CHSR_MxERR_mask = (3 << 2),
+	CHSR_MBERR = (1 << 2),
+	CHSR_MRERR = (2 << 2),
+	CHSR_MOERR = (3 << 2),
+	CHSR_DxERR_mask = (3 << 0),
+	CHSR_DBERR = (1 << 0),
+	CHSR_DRERR = (2 << 0),
+	CHSR_DOERR = (3 << 0),
+};
+
+static inline void mite_dma_reset(struct mite_channel *mite_chan)
+{
+	writel(CHOR_DMARESET | CHOR_FRESET,
+		mite_chan->mite->mite_io_addr + MITE_CHOR(mite_chan->channel));
+};
+
+#endif /* !__ANALOGY_NI_MITE_H__ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c
new file mode 100644
index 0000000..40a4b26
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_660x.c
@@ -0,0 +1,1481 @@
+/*
+ * comedi/drivers/ni_660x.c
+ * Hardware driver for NI 660x devices
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Driver: ni_660x
+ * Description: National Instruments 660x counter/timer boards
+ * Devices:
+ * [National Instruments] PCI-6601 (ni_660x), PCI-6602, PXI-6602,
+ * PXI-6608
+ * Author: J.P. Mellor <jpmellor@rose-hulman.edu>,
+ * Herman.Bruyninckx@mech.kuleuven.ac.be,
+ * Wim.Meeussen@mech.kuleuven.ac.be,
+ * Klaas.Gadeyne@mech.kuleuven.ac.be,
+ * Frank Mori Hess <fmhess@users.sourceforge.net>
+ * Updated: Thu Oct 18 12:56:06 EDT 2007
+ * Status: experimental
+
+ * Encoders work.  PulseGeneration (both single pulse and pulse train)
+ * works. Buffered commands work for input but not output.
+
+ * References:
+ * DAQ 660x Register-Level Programmer Manual  (NI 370505A-01)
+ * DAQ 6601/6602 User Manual (NI 322137B-01)
+ */
+
+/*
+ * Integration with Xenomai/Analogy layer based on the
+ * comedi driver. Adaptation made by
+ *   Julien Delange <julien.delange@esa.int>
+ */
+
+#include <linux/interrupt.h>
+
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#include "../intel/8255.h"
+#include "ni_stc.h"
+#include "ni_mio.h"
+#include "ni_tio.h"
+#include "mite.h"
+
+enum io_direction {
+       DIRECTION_INPUT = 0,
+       DIRECTION_OUTPUT = 1,
+       DIRECTION_OPENDRAIN = 2
+};
+
+
+enum ni_660x_constants {
+	min_counter_pfi_chan = 8,
+	max_dio_pfi_chan = 31,
+	counters_per_chip = 4
+};
+
+struct ni_660x_subd_priv {
+   int                     io_bits;
+   unsigned int            state;
+   uint16_t                readback[2];
+   uint16_t                config;
+   struct ni_gpct*         counter;
+};
+
+#define NUM_PFI_CHANNELS 40
+/* Really there are only up to 3 dma channels, but the register layout
+   allows for 4 */
+#define MAX_DMA_CHANNEL 4
+
+static struct a4l_channels_desc chandesc_ni660x = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = NUM_PFI_CHANNELS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, sizeof(sampl_t)},
+	},
+};
+
+#define subdev_priv ((struct ni_660x_subd_priv*)s->priv)
+
+/* See Register-Level Programmer Manual page 3.1 */
+enum NI_660x_Register {
+	G0InterruptAcknowledge,
+	G0StatusRegister,
+	G1InterruptAcknowledge,
+	G1StatusRegister,
+	G01StatusRegister,
+	G0CommandRegister,
+	STCDIOParallelInput,
+	G1CommandRegister,
+	G0HWSaveRegister,
+	G1HWSaveRegister,
+	STCDIOOutput,
+	STCDIOControl,
+	G0SWSaveRegister,
+	G1SWSaveRegister,
+	G0ModeRegister,
+	G01JointStatus1Register,
+	G1ModeRegister,
+	STCDIOSerialInput,
+	G0LoadARegister,
+	G01JointStatus2Register,
+	G0LoadBRegister,
+	G1LoadARegister,
+	G1LoadBRegister,
+	G0InputSelectRegister,
+	G1InputSelectRegister,
+	G0AutoincrementRegister,
+	G1AutoincrementRegister,
+	G01JointResetRegister,
+	G0InterruptEnable,
+	G1InterruptEnable,
+	G0CountingModeRegister,
+	G1CountingModeRegister,
+	G0SecondGateRegister,
+	G1SecondGateRegister,
+	G0DMAConfigRegister,
+	G0DMAStatusRegister,
+	G1DMAConfigRegister,
+	G1DMAStatusRegister,
+	G2InterruptAcknowledge,
+	G2StatusRegister,
+	G3InterruptAcknowledge,
+	G3StatusRegister,
+	G23StatusRegister,
+	G2CommandRegister,
+	G3CommandRegister,
+	G2HWSaveRegister,
+	G3HWSaveRegister,
+	G2SWSaveRegister,
+	G3SWSaveRegister,
+	G2ModeRegister,
+	G23JointStatus1Register,
+	G3ModeRegister,
+	G2LoadARegister,
+	G23JointStatus2Register,
+	G2LoadBRegister,
+	G3LoadARegister,
+	G3LoadBRegister,
+	G2InputSelectRegister,
+	G3InputSelectRegister,
+	G2AutoincrementRegister,
+	G3AutoincrementRegister,
+	G23JointResetRegister,
+	G2InterruptEnable,
+	G3InterruptEnable,
+	G2CountingModeRegister,
+	G3CountingModeRegister,
+	G3SecondGateRegister,
+	G2SecondGateRegister,
+	G2DMAConfigRegister,
+	G2DMAStatusRegister,
+	G3DMAConfigRegister,
+	G3DMAStatusRegister,
+	DIO32Input,
+	DIO32Output,
+	ClockConfigRegister,
+	GlobalInterruptStatusRegister,
+	DMAConfigRegister,
+	GlobalInterruptConfigRegister,
+	IOConfigReg0_1,
+	IOConfigReg2_3,
+	IOConfigReg4_5,
+	IOConfigReg6_7,
+	IOConfigReg8_9,
+	IOConfigReg10_11,
+	IOConfigReg12_13,
+	IOConfigReg14_15,
+	IOConfigReg16_17,
+	IOConfigReg18_19,
+	IOConfigReg20_21,
+	IOConfigReg22_23,
+	IOConfigReg24_25,
+	IOConfigReg26_27,
+	IOConfigReg28_29,
+	IOConfigReg30_31,
+	IOConfigReg32_33,
+	IOConfigReg34_35,
+	IOConfigReg36_37,
+	IOConfigReg38_39,
+	NumRegisters,
+};
+
+static inline unsigned IOConfigReg(unsigned pfi_channel)
+{
+	unsigned reg = IOConfigReg0_1 + pfi_channel / 2;
+	BUG_ON(reg > IOConfigReg38_39);
+	return reg;
+}
+
+enum ni_660x_register_width {
+	DATA_1B,
+	DATA_2B,
+	DATA_4B
+};
+
+enum ni_660x_register_direction {
+	NI_660x_READ,
+	NI_660x_WRITE,
+	NI_660x_READ_WRITE
+};
+
+enum ni_660x_pfi_output_select {
+	pfi_output_select_high_Z = 0,
+	pfi_output_select_counter = 1,
+	pfi_output_select_do = 2,
+	num_pfi_output_selects
+};
+
+enum ni_660x_subdevices {
+	NI_660X_DIO_SUBDEV = 1,
+	NI_660X_GPCT_SUBDEV_0 = 2
+};
+
+static inline unsigned NI_660X_GPCT_SUBDEV(unsigned index)
+{
+	return NI_660X_GPCT_SUBDEV_0 + index;
+}
+
+struct NI_660xRegisterData {
+
+	const char *name; /*  Register Name */
+	int offset; /*  Offset from base address from GPCT chip */
+	enum ni_660x_register_direction direction;
+	enum ni_660x_register_width size; /*  1 byte, 2 bytes, or 4 bytes */
+};
+
+static const struct NI_660xRegisterData registerData[NumRegisters] = {
+	{"G0 Interrupt Acknowledge", 0x004, NI_660x_WRITE, DATA_2B},
+	{"G0 Status Register", 0x004, NI_660x_READ, DATA_2B},
+	{"G1 Interrupt Acknowledge", 0x006, NI_660x_WRITE, DATA_2B},
+	{"G1 Status Register", 0x006, NI_660x_READ, DATA_2B},
+	{"G01 Status Register ", 0x008, NI_660x_READ, DATA_2B},
+	{"G0 Command Register", 0x00C, NI_660x_WRITE, DATA_2B},
+	{"STC DIO Parallel Input", 0x00E, NI_660x_READ, DATA_2B},
+	{"G1 Command Register", 0x00E, NI_660x_WRITE, DATA_2B},
+	{"G0 HW Save Register", 0x010, NI_660x_READ, DATA_4B},
+	{"G1 HW Save Register", 0x014, NI_660x_READ, DATA_4B},
+	{"STC DIO Output", 0x014, NI_660x_WRITE, DATA_2B},
+	{"STC DIO Control", 0x016, NI_660x_WRITE, DATA_2B},
+	{"G0 SW Save Register", 0x018, NI_660x_READ, DATA_4B},
+	{"G1 SW Save Register", 0x01C, NI_660x_READ, DATA_4B},
+	{"G0 Mode Register", 0x034, NI_660x_WRITE, DATA_2B},
+	{"G01 Joint Status 1 Register", 0x036, NI_660x_READ, DATA_2B},
+	{"G1 Mode Register", 0x036, NI_660x_WRITE, DATA_2B},
+	{"STC DIO Serial Input", 0x038, NI_660x_READ, DATA_2B},
+	{"G0 Load A Register", 0x038, NI_660x_WRITE, DATA_4B},
+	{"G01 Joint Status 2 Register", 0x03A, NI_660x_READ, DATA_2B},
+	{"G0 Load B Register", 0x03C, NI_660x_WRITE, DATA_4B},
+	{"G1 Load A Register", 0x040, NI_660x_WRITE, DATA_4B},
+	{"G1 Load B Register", 0x044, NI_660x_WRITE, DATA_4B},
+	{"G0 Input Select Register", 0x048, NI_660x_WRITE, DATA_2B},
+	{"G1 Input Select Register", 0x04A, NI_660x_WRITE, DATA_2B},
+	{"G0 Autoincrement Register", 0x088, NI_660x_WRITE, DATA_2B},
+	{"G1 Autoincrement Register", 0x08A, NI_660x_WRITE, DATA_2B},
+	{"G01 Joint Reset Register", 0x090, NI_660x_WRITE, DATA_2B},
+	{"G0 Interrupt Enable", 0x092, NI_660x_WRITE, DATA_2B},
+	{"G1 Interrupt Enable", 0x096, NI_660x_WRITE, DATA_2B},
+	{"G0 Counting Mode Register", 0x0B0, NI_660x_WRITE, DATA_2B},
+	{"G1 Counting Mode Register", 0x0B2, NI_660x_WRITE, DATA_2B},
+	{"G0 Second Gate Register", 0x0B4, NI_660x_WRITE, DATA_2B},
+	{"G1 Second Gate Register", 0x0B6, NI_660x_WRITE, DATA_2B},
+	{"G0 DMA Config Register", 0x0B8, NI_660x_WRITE, DATA_2B},
+	{"G0 DMA Status Register", 0x0B8, NI_660x_READ, DATA_2B},
+	{"G1 DMA Config Register", 0x0BA, NI_660x_WRITE, DATA_2B},
+	{"G1 DMA Status Register", 0x0BA, NI_660x_READ, DATA_2B},
+	{"G2 Interrupt Acknowledge", 0x104, NI_660x_WRITE, DATA_2B},
+	{"G2 Status Register", 0x104, NI_660x_READ, DATA_2B},
+	{"G3 Interrupt Acknowledge", 0x106, NI_660x_WRITE, DATA_2B},
+	{"G3 Status Register", 0x106, NI_660x_READ, DATA_2B},
+	{"G23 Status Register", 0x108, NI_660x_READ, DATA_2B},
+	{"G2 Command Register", 0x10C, NI_660x_WRITE, DATA_2B},
+	{"G3 Command Register", 0x10E, NI_660x_WRITE, DATA_2B},
+	{"G2 HW Save Register", 0x110, NI_660x_READ, DATA_4B},
+	{"G3 HW Save Register", 0x114, NI_660x_READ, DATA_4B},
+	{"G2 SW Save Register", 0x118, NI_660x_READ, DATA_4B},
+	{"G3 SW Save Register", 0x11C, NI_660x_READ, DATA_4B},
+	{"G2 Mode Register", 0x134, NI_660x_WRITE, DATA_2B},
+	{"G23 Joint Status 1 Register", 0x136, NI_660x_READ, DATA_2B},
+	{"G3 Mode Register", 0x136, NI_660x_WRITE, DATA_2B},
+	{"G2 Load A Register", 0x138, NI_660x_WRITE, DATA_4B},
+	{"G23 Joint Status 2 Register", 0x13A, NI_660x_READ, DATA_2B},
+	{"G2 Load B Register", 0x13C, NI_660x_WRITE, DATA_4B},
+	{"G3 Load A Register", 0x140, NI_660x_WRITE, DATA_4B},
+	{"G3 Load B Register", 0x144, NI_660x_WRITE, DATA_4B},
+	{"G2 Input Select Register", 0x148, NI_660x_WRITE, DATA_2B},
+	{"G3 Input Select Register", 0x14A, NI_660x_WRITE, DATA_2B},
+	{"G2 Autoincrement Register", 0x188, NI_660x_WRITE, DATA_2B},
+	{"G3 Autoincrement Register", 0x18A, NI_660x_WRITE, DATA_2B},
+	{"G23 Joint Reset Register", 0x190, NI_660x_WRITE, DATA_2B},
+	{"G2 Interrupt Enable", 0x192, NI_660x_WRITE, DATA_2B},
+	{"G3 Interrupt Enable", 0x196, NI_660x_WRITE, DATA_2B},
+	{"G2 Counting Mode Register", 0x1B0, NI_660x_WRITE, DATA_2B},
+	{"G3 Counting Mode Register", 0x1B2, NI_660x_WRITE, DATA_2B},
+	{"G3 Second Gate Register", 0x1B6, NI_660x_WRITE, DATA_2B},
+	{"G2 Second Gate Register", 0x1B4, NI_660x_WRITE, DATA_2B},
+	{"G2 DMA Config Register", 0x1B8, NI_660x_WRITE, DATA_2B},
+	{"G2 DMA Status Register", 0x1B8, NI_660x_READ, DATA_2B},
+	{"G3 DMA Config Register", 0x1BA, NI_660x_WRITE, DATA_2B},
+	{"G3 DMA Status Register", 0x1BA, NI_660x_READ, DATA_2B},
+	{"32 bit Digital Input", 0x414, NI_660x_READ, DATA_4B},
+	{"32 bit Digital Output", 0x510, NI_660x_WRITE, DATA_4B},
+	{"Clock Config Register", 0x73C, NI_660x_WRITE, DATA_4B},
+	{"Global Interrupt Status Register", 0x754, NI_660x_READ, DATA_4B},
+	{"DMA Configuration Register", 0x76C, NI_660x_WRITE, DATA_4B},
+	{"Global Interrupt Config Register", 0x770, NI_660x_WRITE, DATA_4B},
+	{"IO Config Register 0-1", 0x77C, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 2-3", 0x77E, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 4-5", 0x780, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 6-7", 0x782, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 8-9", 0x784, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 10-11", 0x786, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 12-13", 0x788, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 14-15", 0x78A, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 16-17", 0x78C, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 18-19", 0x78E, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 20-21", 0x790, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 22-23", 0x792, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 24-25", 0x794, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 26-27", 0x796, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 28-29", 0x798, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 30-31", 0x79A, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 32-33", 0x79C, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 34-35", 0x79E, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 36-37", 0x7A0, NI_660x_READ_WRITE, DATA_2B},
+	{"IO Config Register 38-39", 0x7A2, NI_660x_READ_WRITE, DATA_2B}
+};
+
+/* kind of ENABLE for the second counter */
+enum clock_config_register_bits {
+	CounterSwap = 0x1 << 21
+};
+
+/* ioconfigreg */
+static inline unsigned ioconfig_bitshift(unsigned pfi_channel)
+{
+	if (pfi_channel % 2)
+		return 0;
+	else
+		return 8;
+}
+
+static inline unsigned pfi_output_select_mask(unsigned pfi_channel)
+{
+	return 0x3 << ioconfig_bitshift(pfi_channel);
+}
+
+static inline unsigned pfi_output_select_bits(unsigned pfi_channel,
+					      unsigned output_select)
+{
+	return (output_select & 0x3) << ioconfig_bitshift(pfi_channel);
+}
+
+static inline unsigned pfi_input_select_mask(unsigned pfi_channel)
+{
+	return 0x7 << (4 + ioconfig_bitshift(pfi_channel));
+}
+
+static inline unsigned pfi_input_select_bits(unsigned pfi_channel,
+					     unsigned input_select)
+{
+	return (input_select & 0x7) << (4 + ioconfig_bitshift(pfi_channel));
+}
+
+/* Dma configuration register bits */
+static inline unsigned dma_select_mask(unsigned dma_channel)
+{
+	BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
+	return 0x1f << (8 * dma_channel);
+}
+
+enum dma_selection {
+	dma_selection_none = 0x1f,
+};
+
+static inline unsigned dma_selection_counter(unsigned counter_index)
+{
+	BUG_ON(counter_index >= counters_per_chip);
+	return counter_index;
+}
+
+static inline unsigned dma_select_bits(unsigned dma_channel, unsigned selection)
+{
+	BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
+	return (selection << (8 * dma_channel)) & dma_select_mask(dma_channel);
+}
+
+static inline unsigned dma_reset_bit(unsigned dma_channel)
+{
+	BUG_ON(dma_channel >= MAX_DMA_CHANNEL);
+	return 0x80 << (8 * dma_channel);
+}
+
+enum global_interrupt_status_register_bits {
+	Counter_0_Int_Bit = 0x100,
+	Counter_1_Int_Bit = 0x200,
+	Counter_2_Int_Bit = 0x400,
+	Counter_3_Int_Bit = 0x800,
+	Cascade_Int_Bit = 0x20000000,
+	Global_Int_Bit = 0x80000000
+};
+
+enum global_interrupt_config_register_bits {
+	Cascade_Int_Enable_Bit = 0x20000000,
+	Global_Int_Polarity_Bit = 0x40000000,
+	Global_Int_Enable_Bit = 0x80000000
+};
+
+/* Offset of the GPCT chips from the base-adress of the card:
+   First chip is at base-address +0x00, etc. */
+static const unsigned GPCT_OFFSET[2] = { 0x0, 0x800 };
+
+/* Board description */
+struct ni_660x_board {
+	unsigned short dev_id;	/* `lspci` will show you this */
+	const char *name;
+	unsigned n_chips;	/* total number of TIO chips */
+};
+
+static const struct ni_660x_board ni_660x_boards[] = {
+	{
+	 .dev_id = 0x2c60,
+	 .name = "PCI-6601",
+	 .n_chips = 1,
+	 },
+	{
+	 .dev_id = 0x1310,
+	 .name = "PCI-6602",
+	 .n_chips = 2,
+	 },
+	{
+	 .dev_id = 0x1360,
+	 .name = "PXI-6602",
+	 .n_chips = 2,
+	 },
+	{
+	 .dev_id = 0x2cc0,
+	 .name = "PXI-6608",
+	 .n_chips = 2,
+	 },
+};
+
+#define NI_660X_MAX_NUM_CHIPS 2
+#define NI_660X_MAX_NUM_COUNTERS (NI_660X_MAX_NUM_CHIPS * counters_per_chip)
+
+static const struct pci_device_id ni_660x_pci_table[] = {
+	{
+	PCI_VENDOR_ID_NATINST, 0x2c60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_NATINST, 0x1310, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_NATINST, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	PCI_VENDOR_ID_NATINST, 0x2cc0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0}, {
+	0}
+};
+
+MODULE_DEVICE_TABLE(pci, ni_660x_pci_table);
+
+struct ni_660x_private {
+	struct mite_struct *mite;
+	struct ni_gpct_device *counter_dev;
+	uint64_t pfi_direction_bits;
+
+	struct mite_dma_descriptor_ring
+	  *mite_rings[NI_660X_MAX_NUM_CHIPS][counters_per_chip];
+
+	rtdm_lock_t mite_channel_lock;
+	/* Interrupt_lock prevents races between interrupt and
+	   comedi_poll */
+	rtdm_lock_t interrupt_lock;
+	unsigned int dma_configuration_soft_copies[NI_660X_MAX_NUM_CHIPS];
+	rtdm_lock_t soft_reg_copy_lock;
+	unsigned short pfi_output_selects[NUM_PFI_CHANNELS];
+
+	struct ni_660x_board *board_ptr;
+};
+
+#undef devpriv
+#define devpriv ((struct ni_660x_private *)dev->priv)
+
+static inline struct ni_660x_private *private(struct a4l_device *dev)
+{
+	return (struct ni_660x_private*) dev->priv;
+}
+
+/* Initialized in ni_660x_find_device() */
+static inline const struct ni_660x_board *board(struct a4l_device *dev)
+{
+	return ((struct ni_660x_private*)dev->priv)->board_ptr;
+}
+
+#define n_ni_660x_boards ARRAY_SIZE(ni_660x_boards)
+
+static int ni_660x_attach(struct a4l_device *dev,
+					 a4l_lnkdesc_t *arg);
+static int ni_660x_detach(struct a4l_device *dev);
+static void init_tio_chip(struct a4l_device *dev, int chipset);
+static void ni_660x_select_pfi_output(struct a4l_device *dev,
+				      unsigned pfi_channel,
+				      unsigned output_select);
+
+static struct a4l_driver ni_660x_drv = {
+	.board_name = "analogy_ni_660x",
+	.driver_name = "ni_660x",
+	.owner = THIS_MODULE,
+	.attach = ni_660x_attach,
+	.detach = ni_660x_detach,
+   .privdata_size = sizeof(struct ni_660x_private),
+};
+
+static int ni_660x_set_pfi_routing(struct a4l_device *dev, unsigned chan,
+				   unsigned source);
+
+/* Possible instructions for a GPCT */
+static int ni_660x_GPCT_rinsn(
+			      struct a4l_subdevice *s,
+			      struct a4l_kernel_instruction *insn);
+static int ni_660x_GPCT_insn_config(
+				    struct a4l_subdevice *s,
+				    struct a4l_kernel_instruction *insn);
+static int ni_660x_GPCT_winsn(
+			      struct a4l_subdevice *s,
+			      struct a4l_kernel_instruction *insn);
+
+/* Possible instructions for Digital IO */
+static int ni_660x_dio_insn_config(
+	       struct a4l_subdevice *s,
+	       struct a4l_kernel_instruction *insn);
+static int ni_660x_dio_insn_bits(
+	     struct a4l_subdevice *s,
+	     struct a4l_kernel_instruction *insn);
+
+static inline unsigned ni_660x_num_counters(struct a4l_device *dev)
+{
+	return board(dev)->n_chips * counters_per_chip;
+}
+
+static enum NI_660x_Register ni_gpct_to_660x_register(enum ni_gpct_register reg)
+{
+
+	enum NI_660x_Register ni_660x_register;
+	switch (reg) {
+	case NITIO_G0_Autoincrement_Reg:
+		ni_660x_register = G0AutoincrementRegister;
+		break;
+	case NITIO_G1_Autoincrement_Reg:
+		ni_660x_register = G1AutoincrementRegister;
+		break;
+	case NITIO_G2_Autoincrement_Reg:
+		ni_660x_register = G2AutoincrementRegister;
+		break;
+	case NITIO_G3_Autoincrement_Reg:
+		ni_660x_register = G3AutoincrementRegister;
+		break;
+	case NITIO_G0_Command_Reg:
+		ni_660x_register = G0CommandRegister;
+		break;
+	case NITIO_G1_Command_Reg:
+		ni_660x_register = G1CommandRegister;
+		break;
+	case NITIO_G2_Command_Reg:
+		ni_660x_register = G2CommandRegister;
+		break;
+	case NITIO_G3_Command_Reg:
+		ni_660x_register = G3CommandRegister;
+		break;
+	case NITIO_G0_HW_Save_Reg:
+		ni_660x_register = G0HWSaveRegister;
+		break;
+	case NITIO_G1_HW_Save_Reg:
+		ni_660x_register = G1HWSaveRegister;
+		break;
+	case NITIO_G2_HW_Save_Reg:
+		ni_660x_register = G2HWSaveRegister;
+		break;
+	case NITIO_G3_HW_Save_Reg:
+		ni_660x_register = G3HWSaveRegister;
+		break;
+	case NITIO_G0_SW_Save_Reg:
+		ni_660x_register = G0SWSaveRegister;
+		break;
+	case NITIO_G1_SW_Save_Reg:
+		ni_660x_register = G1SWSaveRegister;
+		break;
+	case NITIO_G2_SW_Save_Reg:
+		ni_660x_register = G2SWSaveRegister;
+		break;
+	case NITIO_G3_SW_Save_Reg:
+		ni_660x_register = G3SWSaveRegister;
+		break;
+	case NITIO_G0_Mode_Reg:
+		ni_660x_register = G0ModeRegister;
+		break;
+	case NITIO_G1_Mode_Reg:
+		ni_660x_register = G1ModeRegister;
+		break;
+	case NITIO_G2_Mode_Reg:
+		ni_660x_register = G2ModeRegister;
+		break;
+	case NITIO_G3_Mode_Reg:
+		ni_660x_register = G3ModeRegister;
+		break;
+	case NITIO_G0_LoadA_Reg:
+		ni_660x_register = G0LoadARegister;
+		break;
+	case NITIO_G1_LoadA_Reg:
+		ni_660x_register = G1LoadARegister;
+		break;
+	case NITIO_G2_LoadA_Reg:
+		ni_660x_register = G2LoadARegister;
+		break;
+	case NITIO_G3_LoadA_Reg:
+		ni_660x_register = G3LoadARegister;
+		break;
+	case NITIO_G0_LoadB_Reg:
+		ni_660x_register = G0LoadBRegister;
+		break;
+	case NITIO_G1_LoadB_Reg:
+		ni_660x_register = G1LoadBRegister;
+		break;
+	case NITIO_G2_LoadB_Reg:
+		ni_660x_register = G2LoadBRegister;
+		break;
+	case NITIO_G3_LoadB_Reg:
+		ni_660x_register = G3LoadBRegister;
+		break;
+	case NITIO_G0_Input_Select_Reg:
+		ni_660x_register = G0InputSelectRegister;
+		break;
+	case NITIO_G1_Input_Select_Reg:
+		ni_660x_register = G1InputSelectRegister;
+		break;
+	case NITIO_G2_Input_Select_Reg:
+		ni_660x_register = G2InputSelectRegister;
+		break;
+	case NITIO_G3_Input_Select_Reg:
+		ni_660x_register = G3InputSelectRegister;
+		break;
+	case NITIO_G01_Status_Reg:
+		ni_660x_register = G01StatusRegister;
+		break;
+	case NITIO_G23_Status_Reg:
+		ni_660x_register = G23StatusRegister;
+		break;
+	case NITIO_G01_Joint_Reset_Reg:
+		ni_660x_register = G01JointResetRegister;
+		break;
+	case NITIO_G23_Joint_Reset_Reg:
+		ni_660x_register = G23JointResetRegister;
+		break;
+	case NITIO_G01_Joint_Status1_Reg:
+		ni_660x_register = G01JointStatus1Register;
+		break;
+	case NITIO_G23_Joint_Status1_Reg:
+		ni_660x_register = G23JointStatus1Register;
+		break;
+	case NITIO_G01_Joint_Status2_Reg:
+		ni_660x_register = G01JointStatus2Register;
+		break;
+	case NITIO_G23_Joint_Status2_Reg:
+		ni_660x_register = G23JointStatus2Register;
+		break;
+	case NITIO_G0_Counting_Mode_Reg:
+		ni_660x_register = G0CountingModeRegister;
+		break;
+	case NITIO_G1_Counting_Mode_Reg:
+		ni_660x_register = G1CountingModeRegister;
+		break;
+	case NITIO_G2_Counting_Mode_Reg:
+		ni_660x_register = G2CountingModeRegister;
+		break;
+	case NITIO_G3_Counting_Mode_Reg:
+		ni_660x_register = G3CountingModeRegister;
+		break;
+	case NITIO_G0_Second_Gate_Reg:
+		ni_660x_register = G0SecondGateRegister;
+		break;
+	case NITIO_G1_Second_Gate_Reg:
+		ni_660x_register = G1SecondGateRegister;
+		break;
+	case NITIO_G2_Second_Gate_Reg:
+		ni_660x_register = G2SecondGateRegister;
+		break;
+	case NITIO_G3_Second_Gate_Reg:
+		ni_660x_register = G3SecondGateRegister;
+		break;
+	case NITIO_G0_DMA_Config_Reg:
+		ni_660x_register = G0DMAConfigRegister;
+		break;
+	case NITIO_G0_DMA_Status_Reg:
+		ni_660x_register = G0DMAStatusRegister;
+		break;
+	case NITIO_G1_DMA_Config_Reg:
+		ni_660x_register = G1DMAConfigRegister;
+		break;
+	case NITIO_G1_DMA_Status_Reg:
+		ni_660x_register = G1DMAStatusRegister;
+		break;
+	case NITIO_G2_DMA_Config_Reg:
+		ni_660x_register = G2DMAConfigRegister;
+		break;
+	case NITIO_G2_DMA_Status_Reg:
+		ni_660x_register = G2DMAStatusRegister;
+		break;
+	case NITIO_G3_DMA_Config_Reg:
+		ni_660x_register = G3DMAConfigRegister;
+		break;
+	case NITIO_G3_DMA_Status_Reg:
+		ni_660x_register = G3DMAStatusRegister;
+		break;
+	case NITIO_G0_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G0InterruptAcknowledge;
+		break;
+	case NITIO_G1_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G1InterruptAcknowledge;
+		break;
+	case NITIO_G2_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G2InterruptAcknowledge;
+		break;
+	case NITIO_G3_Interrupt_Acknowledge_Reg:
+		ni_660x_register = G3InterruptAcknowledge;
+		break;
+	case NITIO_G0_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G1_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G2_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G3_Status_Reg:
+		ni_660x_register = G0StatusRegister;
+		break;
+	case NITIO_G0_Interrupt_Enable_Reg:
+		ni_660x_register = G0InterruptEnable;
+		break;
+	case NITIO_G1_Interrupt_Enable_Reg:
+		ni_660x_register = G1InterruptEnable;
+		break;
+	case NITIO_G2_Interrupt_Enable_Reg:
+		ni_660x_register = G2InterruptEnable;
+		break;
+	case NITIO_G3_Interrupt_Enable_Reg:
+		ni_660x_register = G3InterruptEnable;
+		break;
+	default:
+		__a4l_err("%s: unhandled register 0x%x in switch.\n",
+			  __FUNCTION__, reg);
+		BUG();
+		return 0;
+		break;
+	}
+	return ni_660x_register;
+}
+
+static inline void ni_660x_write_register(struct a4l_device *dev,
+					  unsigned chip_index, unsigned bits,
+					  enum NI_660x_Register reg)
+{
+	void *const write_address =
+	    private(dev)->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+	    registerData[reg].offset;
+
+	switch (registerData[reg].size) {
+	case DATA_2B:
+		writew(bits, write_address);
+		break;
+	case DATA_4B:
+		writel(bits, write_address);
+		break;
+	default:
+		__a4l_err("%s: %s: bug! unhandled case (reg=0x%x) in switch.\n",
+			  __FILE__, __FUNCTION__, reg);
+		BUG();
+		break;
+	}
+}
+
+static inline unsigned ni_660x_read_register(struct a4l_device *dev,
+					     unsigned chip_index,
+					     enum NI_660x_Register reg)
+{
+	void *const read_address =
+	    private(dev)->mite->daq_io_addr + GPCT_OFFSET[chip_index] +
+	    registerData[reg].offset;
+
+	switch (registerData[reg].size) {
+	case DATA_2B:
+		return readw(read_address);
+		break;
+	case DATA_4B:
+		return readl(read_address);
+		break;
+	default:
+		__a4l_err("%s: %s: bug! unhandled case (reg=0x%x) in switch.\n",
+			  __FILE__, __FUNCTION__, reg);
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static void ni_gpct_write_register(struct ni_gpct *counter,
+				   unsigned int bits, enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
+
+	ni_660x_write_register(dev, counter->chip_index, bits,
+			       ni_660x_register);
+}
+
+static unsigned ni_gpct_read_register(struct ni_gpct *counter,
+				      enum ni_gpct_register reg)
+{
+	struct a4l_device *dev = counter->counter_dev->dev;
+	enum NI_660x_Register ni_660x_register = ni_gpct_to_660x_register(reg);
+
+	return ni_660x_read_register(dev, counter->chip_index,
+				     ni_660x_register);
+}
+
+static inline
+struct mite_dma_descriptor_ring *mite_ring(struct ni_660x_private *priv,
+					   struct ni_gpct *counter)
+{
+
+	return priv->mite_rings[counter->chip_index][counter->counter_index];
+}
+
+static inline
+void ni_660x_set_dma_channel(struct a4l_device *dev,
+			     unsigned int mite_channel, struct ni_gpct *counter)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&private(dev)->soft_reg_copy_lock, flags);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] &=
+	    ~dma_select_mask(mite_channel);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] |=
+	    dma_select_bits(mite_channel,
+			    dma_selection_counter(counter->counter_index));
+	ni_660x_write_register(dev, counter->chip_index,
+			       private(dev)->
+			       dma_configuration_soft_copies
+			       [counter->chip_index] |
+			       dma_reset_bit(mite_channel), DMAConfigRegister);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&private(dev)->soft_reg_copy_lock, flags);
+}
+
+static inline
+void ni_660x_unset_dma_channel(struct a4l_device *dev,
+			       unsigned int mite_channel,
+			       struct ni_gpct *counter)
+{
+	unsigned long flags;
+	rtdm_lock_get_irqsave(&private(dev)->soft_reg_copy_lock, flags);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] &=
+	    ~dma_select_mask(mite_channel);
+	private(dev)->dma_configuration_soft_copies[counter->chip_index] |=
+	    dma_select_bits(mite_channel, dma_selection_none);
+	ni_660x_write_register(dev, counter->chip_index,
+			       private(dev)->
+			       dma_configuration_soft_copies
+			       [counter->chip_index], DMAConfigRegister);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&private(dev)->soft_reg_copy_lock, flags);
+}
+
+static int ni_660x_request_mite_channel(struct a4l_device *dev,
+					struct ni_gpct *counter,
+					enum io_direction direction)
+{
+	unsigned long flags;
+	struct mite_channel *mite_chan;
+
+	rtdm_lock_get_irqsave(&private(dev)->mite_channel_lock, flags);
+	BUG_ON(counter->mite_chan);
+	mite_chan = mite_request_channel(private(dev)->mite,
+					 mite_ring(private(dev), counter));
+	if (mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags);
+		a4l_err(dev,
+			"%s: failed to reserve mite dma channel for counter.\n",
+			__FUNCTION__);
+		return -EBUSY;
+	}
+	mite_chan->dir = direction;
+	a4l_ni_tio_set_mite_channel(counter, mite_chan);
+	ni_660x_set_dma_channel(dev, mite_chan->channel, counter);
+	rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags);
+	return 0;
+}
+
+void ni_660x_release_mite_channel(struct a4l_device *dev,
+				  struct ni_gpct *counter)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&private(dev)->mite_channel_lock, flags);
+	if (counter->mite_chan) {
+		struct mite_channel *mite_chan = counter->mite_chan;
+
+		ni_660x_unset_dma_channel(dev, mite_chan->channel, counter);
+		a4l_ni_tio_set_mite_channel(counter, NULL);
+		a4l_mite_release_channel(mite_chan);
+	}
+	rtdm_lock_put_irqrestore(&private(dev)->mite_channel_lock, flags);
+}
+
+static int ni_660x_cmd(struct a4l_subdevice *s, struct a4l_cmd_desc* cmd)
+{
+	int retval;
+
+	struct ni_gpct *counter = subdev_priv->counter;
+
+	retval = ni_660x_request_mite_channel(s->dev, counter, A4L_INPUT);
+	if (retval) {
+		a4l_err(s->dev,
+			"%s: no dma channel available for use by counter",
+			__FUNCTION__);
+		return retval;
+	}
+
+	a4l_ni_tio_acknowledge_and_confirm (counter, NULL, NULL, NULL, NULL);
+	retval = a4l_ni_tio_cmd(counter, cmd);
+
+	return retval;
+}
+
+static int ni_660x_cmdtest(struct a4l_subdevice *s, struct a4l_cmd_desc *cmd)
+{
+	struct ni_gpct *counter = subdev_priv->counter;
+	return a4l_ni_tio_cmdtest(counter, cmd);
+}
+
+static int ni_660x_cancel(struct a4l_subdevice *s)
+{
+	struct ni_gpct *counter = subdev_priv->counter;
+	int retval;
+
+	retval = a4l_ni_tio_cancel(counter);
+	ni_660x_release_mite_channel(s->dev, counter);
+	return retval;
+}
+
+static void set_tio_counterswap(struct a4l_device *dev, int chipset)
+{
+	/* See P. 3.5 of the Register-Level Programming manual.  The
+	   CounterSwap bit has to be set on the second chip, otherwise
+	   it will try to use the same pins as the first chip.
+	 */
+
+	if (chipset)
+		ni_660x_write_register(dev,
+				       chipset,
+				       CounterSwap, ClockConfigRegister);
+	else
+		ni_660x_write_register(dev,
+				       chipset, 0, ClockConfigRegister);
+}
+
+static void ni_660x_handle_gpct_interrupt(struct a4l_device *dev,
+					  struct a4l_subdevice *s)
+{
+   struct a4l_buffer *buf = s->buf;
+
+   a4l_ni_tio_handle_interrupt(subdev_priv->counter, dev);
+   if ( test_bit(A4L_BUF_EOA_NR, &buf->flags) &&
+	test_bit(A4L_BUF_ERROR_NR, &buf->flags) &&
+	test_bit(A4L_BUF_EOA_NR, &buf->flags))
+	   ni_660x_cancel(s);
+   else
+	   a4l_buf_evt(s, 0);
+}
+
+static int ni_660x_interrupt(unsigned int irq, void *d)
+{
+	struct a4l_device *dev = d;
+	unsigned long flags;
+
+	if (test_bit(A4L_DEV_ATTACHED_NR, &dev->flags))
+		return -ENOENT;
+
+	/* Lock to avoid race with comedi_poll */
+	rtdm_lock_get_irqsave(&private(dev)->interrupt_lock, flags);
+	smp_mb();
+
+	while (&dev->subdvsq != dev->subdvsq.next) {
+		struct list_head *this = dev->subdvsq.next;
+		struct a4l_subdevice *tmp = list_entry(this, struct a4l_subdevice, list);
+		ni_660x_handle_gpct_interrupt(dev, tmp);
+	}
+
+	rtdm_lock_put_irqrestore(&private(dev)->interrupt_lock, flags);
+	return 0;
+}
+
+static int ni_660x_alloc_mite_rings(struct a4l_device *dev)
+{
+	unsigned int i;
+	unsigned int j;
+
+	for (i = 0; i < board(dev)->n_chips; ++i) {
+		for (j = 0; j < counters_per_chip; ++j) {
+			private(dev)->mite_rings[i][j] =
+				mite_alloc_ring(private(dev)->mite);
+			if (private(dev)->mite_rings[i][j] == NULL)
+				return -ENOMEM;
+		}
+	}
+
+	return 0;
+}
+
+static void ni_660x_free_mite_rings(struct a4l_device *dev)
+{
+	unsigned int i;
+	unsigned int j;
+
+	for (i = 0; i < board(dev)->n_chips; ++i)
+		for (j = 0; j < counters_per_chip; ++j)
+			mite_free_ring(private(dev)->mite_rings[i][j]);
+}
+
+
+static int __init driver_ni_660x_init_module(void)
+{
+	return a4l_register_drv (&ni_660x_drv);
+}
+
+static void __exit driver_ni_660x_cleanup_module(void)
+{
+	a4l_unregister_drv (&ni_660x_drv);
+}
+
+module_init(driver_ni_660x_init_module);
+module_exit(driver_ni_660x_cleanup_module);
+
+static int ni_660x_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	struct a4l_subdevice *s;
+	int ret;
+	int err;
+	int bus, slot;
+	unsigned i;
+	int nsubdev = 0;
+	unsigned global_interrupt_config_bits;
+	struct mite_struct *mitedev;
+	struct ni_660x_board* boardptr = NULL;
+
+	ret = 0;
+	bus = slot = 0;
+	mitedev = NULL;
+	nsubdev = 0;
+
+	if(arg->opts == NULL || arg->opts_size == 0)
+		bus = slot = 0;
+	else {
+		bus = arg->opts_size >= sizeof(unsigned long) ?
+			((unsigned long *)arg->opts)[0] : 0;
+		slot = arg->opts_size >= sizeof(unsigned long) * 2 ?
+			((unsigned long *)arg->opts)[1] : 0;
+	}
+
+	for (i = 0; ( i < n_ni_660x_boards ) && ( mitedev == NULL ); i++) {
+		mitedev  = a4l_mite_find_device(bus, slot,
+						ni_660x_boards[i].dev_id);
+		boardptr = (struct ni_660x_board*) &ni_660x_boards[i];
+	}
+
+
+	if(mitedev == NULL) {
+		a4l_info(dev, "mite device not found\n");
+		return -ENOENT;
+	}
+
+	a4l_info(dev, "Board found (name=%s), continue initialization ...",
+		 boardptr->name);
+
+	private(dev)->mite      = mitedev;
+	private(dev)->board_ptr = boardptr;
+
+	rtdm_lock_init(&private(dev)->mite_channel_lock);
+	rtdm_lock_init(&private(dev)->interrupt_lock);
+	rtdm_lock_init(&private(dev)->soft_reg_copy_lock);
+	for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
+		private(dev)->pfi_output_selects[i] = pfi_output_select_counter;
+	}
+
+	ret = a4l_mite_setup(private(dev)->mite, 1);
+	if (ret < 0) {
+		a4l_err(dev, "%s: error setting up mite\n", __FUNCTION__);
+		return ret;
+	}
+
+	ret = ni_660x_alloc_mite_rings(dev);
+	if (ret < 0) {
+		a4l_err(dev, "%s: error setting up mite rings\n", __FUNCTION__);
+		return ret;
+	}
+
+	/* Setup first subdevice */
+	s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+	if (s == NULL)
+		return -ENOMEM;
+
+	s->flags = A4L_SUBD_UNUSED;
+
+	err = a4l_add_subd(dev, s);
+	if (err != nsubdev) {
+		a4l_info(dev, "cannot add first subdevice, returns %d, expect %d\n", err, i);
+		return err;
+	}
+
+	nsubdev++;
+
+	/* Setup second subdevice */
+	s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+	if (s == NULL) {
+		a4l_info(dev, "cannot allocate second subdevice\n");
+		return -ENOMEM;
+	}
+
+	s->flags          = A4L_SUBD_DIO;
+	s->flags         |= A4L_SUBD_CMD;
+	s->chan_desc      = &chandesc_ni660x;
+	s->rng_desc       = &range_digital;
+	s->insn_bits      = ni_660x_dio_insn_bits;
+	s->insn_config    = ni_660x_dio_insn_config;
+	s->dev            = dev;
+	subdev_priv->io_bits = 0;
+	ni_660x_write_register(dev, 0, 0, STCDIOControl);
+
+	err = a4l_add_subd(dev, s);
+	if (err != nsubdev)
+		return err;
+
+	nsubdev++;
+
+	private(dev)->counter_dev =
+		a4l_ni_gpct_device_construct(dev,
+					     &ni_gpct_write_register,
+					     &ni_gpct_read_register,
+					     ni_gpct_variant_660x,
+					     ni_660x_num_counters (dev));
+	if (private(dev)->counter_dev == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < ni_660x_num_counters(dev); ++i) {
+		/* TODO: check why there are kmalloc here... and in pcimio */
+		private(dev)->counter_dev->counters[i] =
+			kmalloc(sizeof(struct ni_gpct), GFP_KERNEL);
+		private(dev)->counter_dev->counters[i]->counter_dev =
+			private(dev)->counter_dev;
+		rtdm_lock_init(&(private(dev)->counter_dev->counters[i]->lock));
+	}
+
+	for (i = 0; i < NI_660X_MAX_NUM_COUNTERS; ++i) {
+		if (i < ni_660x_num_counters(dev)) {
+			/* Setup other subdevice */
+			s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+
+			if (s == NULL)
+				return -ENOMEM;
+
+			s->flags             = A4L_SUBD_COUNTER;
+			s->chan_desc         = rtdm_malloc (sizeof (struct a4l_channels_desc));
+			s->chan_desc->length = 3;
+			s->insn_read         = ni_660x_GPCT_rinsn;
+			s->insn_write        = ni_660x_GPCT_winsn;
+			s->insn_config       = ni_660x_GPCT_insn_config;
+			s->do_cmd            = &ni_660x_cmd;
+			s->do_cmdtest        = &ni_660x_cmdtest;
+			s->cancel            = &ni_660x_cancel;
+
+			subdev_priv->counter = private(dev)->counter_dev->counters[i];
+
+			private(dev)->counter_dev->counters[i]->chip_index =
+				i / counters_per_chip;
+			private(dev)->counter_dev->counters[i]->counter_index =
+				i % counters_per_chip;
+		} else {
+			s = a4l_alloc_subd(sizeof(struct ni_660x_subd_priv), NULL);
+			if (s == NULL)
+				return -ENOMEM;
+			s->flags = A4L_SUBD_UNUSED;
+		}
+
+		err = a4l_add_subd(dev, s);
+
+		if (err != nsubdev)
+			return err;
+
+		nsubdev++;
+	}
+
+	for (i = 0; i < board(dev)->n_chips; ++i)
+		init_tio_chip(dev, i);
+
+	for (i = 0; i < ni_660x_num_counters(dev); ++i)
+		a4l_ni_tio_init_counter(private(dev)->counter_dev->counters[i]);
+
+	for (i = 0; i < NUM_PFI_CHANNELS; ++i) {
+		if (i < min_counter_pfi_chan)
+			ni_660x_set_pfi_routing(dev, i, pfi_output_select_do);
+		else
+			ni_660x_set_pfi_routing(dev, i,
+						pfi_output_select_counter);
+		ni_660x_select_pfi_output(dev, i, pfi_output_select_high_Z);
+	}
+
+
+	/* To be safe, set counterswap bits on tio chips after all the
+	   counter outputs have been set to high impedance mode */
+
+	for (i = 0; i < board(dev)->n_chips; ++i)
+		set_tio_counterswap(dev, i);
+
+	ret = a4l_request_irq(dev,
+			      mite_irq(private(dev)->mite),
+			      ni_660x_interrupt, RTDM_IRQTYPE_SHARED, dev);
+
+	if (ret < 0) {
+		a4l_err(dev, "%s: IRQ not available\n", __FUNCTION__);
+		return ret;
+	}
+
+	global_interrupt_config_bits = Global_Int_Enable_Bit;
+	if (board(dev)->n_chips > 1)
+		global_interrupt_config_bits |= Cascade_Int_Enable_Bit;
+
+	ni_660x_write_register(dev, 0, global_interrupt_config_bits,
+			       GlobalInterruptConfigRegister);
+
+	a4l_info(dev, "attach succeed, ready to be used\n");
+
+	return 0;
+}
+
+static int ni_660x_detach(struct a4l_device *dev)
+{
+	int i;
+
+	a4l_info(dev, "begin to detach the driver ...");
+
+	/* Free irq */
+	if(a4l_get_irq(dev)!=A4L_IRQ_UNUSED)
+		a4l_free_irq(dev,a4l_get_irq(dev));
+
+	if (dev->priv) {
+
+		if (private(dev)->counter_dev) {
+
+			for (i = 0; i < ni_660x_num_counters(dev); ++i)
+				if ((private(dev)->counter_dev->counters[i]) != NULL)
+					kfree (private(dev)->counter_dev->counters[i]);
+
+			a4l_ni_gpct_device_destroy(private(dev)->counter_dev);
+		}
+
+		if (private(dev)->mite) {
+			ni_660x_free_mite_rings(dev);
+			a4l_mite_unsetup(private(dev)->mite);
+		}
+	}
+
+	a4l_info(dev, "driver detached !\n");
+
+	return 0;
+}
+
+static int ni_660x_GPCT_rinsn(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	return a4l_ni_tio_rinsn(subdev_priv->counter, insn);
+}
+
+static void init_tio_chip(struct a4l_device *dev, int chipset)
+{
+	unsigned int i;
+
+	/*  Init dma configuration register */
+	private(dev)->dma_configuration_soft_copies[chipset] = 0;
+	for (i = 0; i < MAX_DMA_CHANNEL; ++i) {
+		private(dev)->dma_configuration_soft_copies[chipset] |=
+		    dma_select_bits(i, dma_selection_none) & dma_select_mask(i);
+	}
+
+	ni_660x_write_register(dev, chipset,
+			       private(dev)->
+			       dma_configuration_soft_copies[chipset],
+			       DMAConfigRegister);
+
+	for (i = 0; i < NUM_PFI_CHANNELS; ++i)
+		ni_660x_write_register(dev, chipset, 0, IOConfigReg(i));
+}
+
+static int ni_660x_GPCT_insn_config(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	return a4l_ni_tio_insn_config (subdev_priv->counter, insn);
+}
+
+static int ni_660x_GPCT_winsn(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	return a4l_ni_tio_winsn(subdev_priv->counter, insn);
+}
+
+static int ni_660x_dio_insn_bits(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	unsigned int* data = (unsigned int*) insn->data;
+	unsigned int base_bitfield_channel = CR_CHAN(insn->chan_desc);
+
+	/*  Check if we have to write some bits */
+	if (data[0]) {
+		subdev_priv->state &= ~(data[0] << base_bitfield_channel);
+		subdev_priv->state |= (data[0] & data[1]) << base_bitfield_channel;
+		/* Write out the new digital output lines */
+		ni_660x_write_register(s->dev, 0, subdev_priv->state, DIO32Output);
+	}
+
+	/* On return, data[1] contains the value of the digital input
+	   and output lines. */
+	data[1] = ni_660x_read_register(s->dev, 0,DIO32Input) >>
+		base_bitfield_channel;
+
+	return 0;
+}
+
+static void ni_660x_select_pfi_output(struct a4l_device *dev,
+				      unsigned pfi_channel,
+				      unsigned output_select)
+{
+	static const unsigned counter_4_7_first_pfi = 8;
+	static const unsigned counter_4_7_last_pfi = 23;
+	unsigned active_chipset = 0;
+	unsigned idle_chipset = 0;
+	unsigned active_bits;
+	unsigned idle_bits;
+
+	if (board(dev)->n_chips > 1) {
+		if (output_select == pfi_output_select_counter &&
+		    pfi_channel >= counter_4_7_first_pfi &&
+		    pfi_channel <= counter_4_7_last_pfi) {
+			active_chipset = 1;
+			idle_chipset = 0;
+		} else {
+			active_chipset = 0;
+			idle_chipset = 1;
+		}
+	}
+
+	if (idle_chipset != active_chipset) {
+
+		idle_bits =ni_660x_read_register(dev, idle_chipset,
+						 IOConfigReg(pfi_channel));
+		idle_bits &= ~pfi_output_select_mask(pfi_channel);
+		idle_bits |=
+		    pfi_output_select_bits(pfi_channel,
+					   pfi_output_select_high_Z);
+		ni_660x_write_register(dev, idle_chipset, idle_bits,
+				       IOConfigReg(pfi_channel));
+	}
+
+	active_bits =
+	    ni_660x_read_register(dev, active_chipset,
+				  IOConfigReg(pfi_channel));
+	active_bits &= ~pfi_output_select_mask(pfi_channel);
+	active_bits |= pfi_output_select_bits(pfi_channel, output_select);
+	ni_660x_write_register(dev, active_chipset, active_bits,
+			       IOConfigReg(pfi_channel));
+}
+
+static int ni_660x_set_pfi_routing(struct a4l_device *dev, unsigned chan,
+				   unsigned source)
+{
+	BUG_ON(chan >= NUM_PFI_CHANNELS);
+
+	if (source > num_pfi_output_selects)
+		return -EINVAL;
+	if (source == pfi_output_select_high_Z)
+		return -EINVAL;
+	if (chan < min_counter_pfi_chan) {
+		if (source == pfi_output_select_counter)
+			return -EINVAL;
+	} else if (chan > max_dio_pfi_chan) {
+		if (source == pfi_output_select_do)
+			return -EINVAL;
+	}
+	BUG_ON(chan >= NUM_PFI_CHANNELS);
+
+	private(dev)->pfi_output_selects[chan] = source;
+	if (private(dev)->pfi_direction_bits & (((uint64_t) 1) << chan))
+		ni_660x_select_pfi_output(dev, chan,
+					  private(dev)->
+					  pfi_output_selects[chan]);
+	return 0;
+}
+
+static unsigned ni_660x_get_pfi_routing(struct a4l_device *dev,
+					unsigned chan)
+{
+	BUG_ON(chan >= NUM_PFI_CHANNELS);
+	return private(dev)->pfi_output_selects[chan];
+}
+
+static void ni660x_config_filter(struct a4l_device *dev,
+				 unsigned pfi_channel,
+				 int filter)
+{
+	unsigned int bits;
+
+	bits = ni_660x_read_register(dev, 0, IOConfigReg(pfi_channel));
+	bits &= ~pfi_input_select_mask(pfi_channel);
+	bits |= pfi_input_select_bits(pfi_channel, filter);
+	ni_660x_write_register(dev, 0, bits, IOConfigReg(pfi_channel));
+}
+
+static int ni_660x_dio_insn_config(struct a4l_subdevice *s, struct a4l_kernel_instruction *insn)
+{
+	unsigned int* data = insn->data;
+	int chan = CR_CHAN(insn->chan_desc);
+	struct a4l_device* dev = s->dev;
+
+	if (data == NULL)
+		return -EINVAL;
+
+	/* The input or output configuration of each digital line is
+	 * configured by a special insn_config instruction.  chanspec
+	 * contains the channel to be changed, and data[0] contains the
+	 * value COMEDI_INPUT or COMEDI_OUTPUT. */
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		private(dev)->pfi_direction_bits |= ((uint64_t) 1) << chan;
+		ni_660x_select_pfi_output(dev, chan,
+					  private(dev)->
+					  pfi_output_selects[chan]);
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		private(dev)->pfi_direction_bits &= ~(((uint64_t) 1) << chan);
+		ni_660x_select_pfi_output(dev, chan, pfi_output_select_high_Z);
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] =
+		    (private(dev)->pfi_direction_bits &
+		     (((uint64_t) 1) << chan)) ? A4L_OUTPUT : A4L_INPUT;
+		return 0;
+	case A4L_INSN_CONFIG_SET_ROUTING:
+		return ni_660x_set_pfi_routing(dev, chan, data[1]);
+		break;
+	case A4L_INSN_CONFIG_GET_ROUTING:
+		data[1] = ni_660x_get_pfi_routing(dev, chan);
+		break;
+	case A4L_INSN_CONFIG_FILTER:
+		ni660x_config_filter(dev, chan, data[1]);
+		break;
+	default:
+		return -EINVAL;
+		break;
+	};
+
+	return 0;
+}
+
+
+MODULE_DESCRIPTION("Analogy driver for NI660x series cards");
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c
new file mode 100644
index 0000000..35749be
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_670x.c
@@ -0,0 +1,443 @@
+/*
+    comedi/drivers/ni_670x.c
+    Hardware driver for NI 670x devices
+
+    COMEDI - Linux Control and Measurement Device Interface
+    Copyright (C) 1997-2001 David A. Schleef <ds@schleef.org>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License
+    along with this program; if not, write to the Free Software
+    Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+
+*/
+/*
+Driver: ni_670x
+Description: National Instruments 670x
+Author: Bart Joris <bjoris@advalvas.be>
+Updated: Wed, 11 Dec 2002 18:25:35 -0800
+Devices: [National Instruments] PCI-6703 (ni_670x), PCI-6704
+Status: unknown
+
+Commands are not supported.
+*/
+
+/*
+	Bart Joris <bjoris@advalvas.be> Last updated on 20/08/2001
+
+	Manuals:
+
+	322110a.pdf	PCI/PXI-6704 User Manual
+	322110b.pdf	PCI/PXI-6703/6704 User Manual
+*/
+
+/*
+ * Integration with Xenomai/Analogy layer based on the
+ * comedi driver. Adaptation made by
+ *   Julien Delange <julien.delange@esa.int>
+ */
+
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <rtdm/analogy/device.h>
+
+#include "../intel/8255.h"
+#include "ni_mio.h"
+#include "mite.h"
+
+#define PCIMIO_IRQ_POLARITY 1
+
+#define  AO_VALUE_OFFSET         0x00
+#define  AO_CHAN_OFFSET          0x0c
+#define  AO_STATUS_OFFSET        0x10
+#define  AO_CONTROL_OFFSET       0x10
+#define  DIO_PORT0_DIR_OFFSET    0x20
+#define  DIO_PORT0_DATA_OFFSET   0x24
+#define  DIO_PORT1_DIR_OFFSET    0x28
+#define  DIO_PORT1_DATA_OFFSET   0x2c
+#define  MISC_STATUS_OFFSET      0x14
+#define  MISC_CONTROL_OFFSET     0x14
+
+/* Board description*/
+
+struct ni_670x_board {
+	unsigned short device_id;
+	const char *name;
+	unsigned short ao_chans;
+	unsigned short ao_bits;
+};
+
+#define thisboard ((struct ni_670x_board *)dev->board_ptr)
+
+struct ni_670x_private {
+	struct mite_struct *mite;
+	int boardtype;
+	int dio;
+	unsigned int ao_readback[32];
+
+	/*
+	 * Added when porting to xenomai
+	 */
+	int irq_polarity;
+	int irq_pin;
+	int irq;
+	struct ni_670x_board *board_ptr;
+	/*
+	 * END OF ADDED when porting to xenomai
+	 */
+};
+
+struct ni_670x_subd_priv {
+	int io_bits;
+	unsigned int state;
+	uint16_t readback[2];
+	uint16_t config;
+	void* counter;
+};
+
+static int ni_670x_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+static int ni_670x_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+static int ni_670x_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+static int ni_670x_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn);
+
+static struct a4l_channels_desc ni_670x_desc_dio = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+static struct a4l_channels_desc ni_670x_desc_ao = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 0, /* initialized later according to the board found */
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 16},
+	},
+};
+
+
+static struct a4l_rngtab range_0_20mA = { 1, {RANGE_mA(0, 20)} };
+static struct a4l_rngtab rng_bipolar10 = { 1, {RANGE_V(-10, 10) }};
+
+struct a4l_rngtab *range_table_list[32] = {
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&rng_bipolar10, &rng_bipolar10, &rng_bipolar10, &rng_bipolar10,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA,
+	&range_0_20mA, &range_0_20mA, &range_0_20mA, &range_0_20mA};
+
+static A4L_RNGDESC(32) ni670x_ao_desc;
+
+static void setup_subd_ao(struct a4l_subdevice *subd)
+{
+	int i;
+	int nchans;
+
+	nchans = ((struct ni_670x_private*)(subd->dev->priv))->board_ptr->ao_chans;
+	subd->flags                = A4L_SUBD_AO;
+	subd->chan_desc            = &ni_670x_desc_ao;
+	subd->chan_desc->length    = nchans;
+	if (nchans == 32) {
+
+		subd->rng_desc = (struct a4l_rngdesc*) &ni670x_ao_desc;
+		subd->rng_desc->mode = A4L_RNG_PERCHAN_RNGDESC;
+		for (i = 0 ; i < 16 ; i++) {
+			subd->rng_desc->rngtabs[i] =&rng_bipolar10;
+			subd->rng_desc->rngtabs[16+i] =&range_0_20mA;
+		}
+	} else
+		subd->rng_desc = &a4l_range_bipolar10;
+
+	subd->insn_write = &ni_670x_ao_winsn;
+	subd->insn_read = &ni_670x_ao_rinsn;
+}
+
+static void setup_subd_dio(struct a4l_subdevice *s)
+{
+	/* Digital i/o subdevice */
+	s->flags = A4L_SUBD_DIO;
+	s->chan_desc = &ni_670x_desc_dio;
+	s->rng_desc = &range_digital;
+	s->insn_bits = ni_670x_dio_insn_bits;
+	s->insn_config = ni_670x_dio_insn_config;
+}
+
+struct setup_subd {
+	void (*setup_func) (struct a4l_subdevice *);
+	int sizeof_priv;
+};
+
+static struct setup_subd setup_subds[2] = {
+	{
+		.setup_func = setup_subd_ao,
+		.sizeof_priv = sizeof(struct ni_670x_subd_priv),
+	},
+	{
+		.setup_func = setup_subd_dio,
+		.sizeof_priv = sizeof(struct ni_670x_subd_priv),
+	},
+};
+
+static const struct ni_670x_board ni_670x_boards[] = {
+	{
+		.device_id = 0x2c90,
+		.name = "PCI-6703",
+		.ao_chans = 16,
+		.ao_bits = 16,
+	},
+	{
+		.device_id = 0x1920,
+		.name = "PXI-6704",
+		.ao_chans = 32,
+		.ao_bits = 16,
+	},
+	{
+		.device_id = 0x1290,
+		.name = "PCI-6704",
+		.ao_chans = 32,
+		.ao_bits = 16,
+	 },
+};
+
+#define n_ni_670x_boards ((sizeof(ni_670x_boards)/sizeof(ni_670x_boards[0])))
+
+static const struct pci_device_id ni_670x_pci_table[] = {
+	{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x2c90)},
+	{PCI_DEVICE(PCI_VENDOR_ID_NI, 0x1920)},
+	{0}
+};
+
+MODULE_DEVICE_TABLE(pci, ni_670x_pci_table);
+
+#define devpriv ((struct ni_670x_private *)dev->priv)
+
+static inline struct ni_670x_private *private(struct a4l_device *dev)
+{
+	return (struct ni_670x_private*) dev->priv;
+}
+
+
+static int ni_670x_attach (struct a4l_device *dev, a4l_lnkdesc_t *arg);
+static int ni_670x_detach(struct a4l_device *dev);
+
+static struct a4l_driver ni_670x_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_ni_670x",
+	.driver_name = "ni_670x",
+	.attach = ni_670x_attach,
+	.detach = ni_670x_detach,
+	.privdata_size = sizeof(struct ni_670x_private),
+};
+
+static int __init driver_ni_670x_init_module(void)
+{
+	return a4l_register_drv (&ni_670x_drv);
+}
+
+static void __exit driver_ni_670x_cleanup_module(void)
+{
+	a4l_unregister_drv (&ni_670x_drv);
+}
+
+module_init(driver_ni_670x_init_module);
+module_exit(driver_ni_670x_cleanup_module);
+
+static int ni_670x_attach (struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int ret, bus, slot, i, irq;
+	struct mite_struct *mite;
+	struct ni_670x_board* board = NULL;
+	int err;
+
+	if(arg->opts == NULL || arg->opts_size == 0)
+		bus = slot = 0;
+	else {
+		bus = arg->opts_size >= sizeof(unsigned long) ?
+			((unsigned long *)arg->opts)[0] : 0;
+		slot = arg->opts_size >= sizeof(unsigned long) * 2 ?
+			((unsigned long *)arg->opts)[1] : 0;
+	}
+
+	a4l_info(dev, "ni670x attach procedure started(bus=%d/slot=%d)...\n",
+		 bus, slot);
+
+	mite = NULL;
+
+	for(i = 0; i <  n_ni_670x_boards && mite == NULL; i++) {
+		mite = a4l_mite_find_device(bus,
+					    slot, ni_670x_boards[i].device_id);
+		board = (struct ni_670x_board*) &ni_670x_boards[i];
+	}
+
+	if(mite == NULL) {
+		a4l_err(dev, "%s: cannot find the MITE device\n", __FUNCTION__);
+		return -ENOENT;
+	}
+
+	a4l_info(dev, "Found device %d %s\n", i, ni_670x_boards[i].name);
+
+	devpriv->irq_polarity = PCIMIO_IRQ_POLARITY;
+	devpriv->irq_pin = 0;
+
+	devpriv->mite = mite;
+	devpriv->board_ptr = board;
+
+	ret = a4l_mite_setup(devpriv->mite, 0);
+	if (ret < 0) {
+		a4l_err(dev, "%s: error setting up mite\n", __FUNCTION__);
+		return ret;
+	}
+
+	irq = mite_irq(devpriv->mite);
+	devpriv->irq = irq;
+
+	a4l_info(dev, "found %s board\n", board->name);
+
+	for (i = 0; i < 2; i++) {
+		struct a4l_subdevice *subd =
+			a4l_alloc_subd(setup_subds[i].sizeof_priv, NULL);
+
+		if (subd == NULL) {
+			a4l_err(dev,
+				"%s: cannot allocate subdevice\n",
+				__FUNCTION__);
+			return -ENOMEM;
+		}
+
+		err = a4l_add_subd(dev, subd);
+		if (err != i) {
+			a4l_err(dev,
+				"%s: cannot add subdevice\n",
+				__FUNCTION__);
+			return err;
+		}
+
+		setup_subds[i].setup_func (subd);
+	}
+
+	/* Config of misc registers */
+	writel(0x10, devpriv->mite->daq_io_addr + MISC_CONTROL_OFFSET);
+	/* Config of ao registers */
+	writel(0x00, devpriv->mite->daq_io_addr + AO_CONTROL_OFFSET);
+
+	a4l_info(dev, "ni670x attached\n");
+
+	return 0;
+}
+
+static int ni_670x_detach(struct a4l_device *dev)
+{
+	a4l_info(dev, "ni670x detach procedure started...\n");
+
+	if(dev->priv != NULL && devpriv->mite != NULL)
+		a4l_mite_unsetup(devpriv->mite);
+
+	a4l_info(dev, "ni670x detach procedure succeeded...\n");
+
+	return 0;
+}
+
+
+static int ni_670x_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	unsigned int *data = (unsigned int *)insn->data;
+	int chan = CR_CHAN(insn->chan_desc);
+	struct ni_670x_subd_priv *subdpriv =
+		(struct ni_670x_subd_priv *)subd->priv;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		subdpriv->io_bits |= 1 << chan;
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		subdpriv->io_bits &= ~(1 << chan);
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] = (subdpriv->io_bits & (1 << chan)) ?
+			A4L_OUTPUT : A4L_INPUT;
+		return 0;
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	writel(subdpriv->io_bits,
+	       devpriv->mite->daq_io_addr + DIO_PORT0_DIR_OFFSET);
+
+	return 0;
+}
+
+static int ni_670x_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	int i;
+	unsigned int tmp;
+	unsigned int* dtmp;
+	int chan;
+	dtmp = (unsigned int*)insn->data;
+	chan = CR_CHAN(insn->chan_desc);
+
+	/* Channel number mapping :
+
+	   NI 6703/ NI 6704     | NI 6704 Only
+	   ----------------------------------------------------
+	   vch(0)       :       0       | ich(16)       :       1
+	   vch(1)       :       2       | ich(17)       :       3
+	   .    :       .       |   .                   .
+	   .    :       .       |   .                   .
+	   .    :       .       |   .                   .
+	   vch(15)      :       30      | ich(31)       :       31 */
+
+	for (i = 0; i < insn->data_size / sizeof(unsigned int); i++) {
+
+		tmp = dtmp[i];
+
+		/* First write in channel register which channel to use */
+		writel(((chan & 15) << 1) | ((chan & 16) >> 4),
+		       private (subd->dev)->mite->daq_io_addr + AO_CHAN_OFFSET);
+
+		/* write channel value */
+		writel(dtmp[i],
+		       private(subd->dev)->mite->daq_io_addr + AO_VALUE_OFFSET);
+		private(subd->dev)->ao_readback[chan] = tmp;
+	}
+
+   return 0;
+}
+
+static int ni_670x_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	int i;
+	unsigned int* dtmp;
+	int chan = CR_CHAN(insn->chan_desc);
+
+	dtmp = (unsigned int*)insn->data;
+
+	for (i = 0; i < insn->data_size / sizeof(unsigned int); i++)
+		dtmp[i] = private(subd->dev)->ao_readback[chan];
+
+	return 0;
+}
+
+
+static int ni_670x_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	return -ENOSYS;
+}
+
+MODULE_DESCRIPTION("Analogy driver for NI670x series cards");
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h
new file mode 100644
index 0000000..7fee167
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_mio.h
@@ -0,0 +1,122 @@
+/*
+ * Hardware driver for NI Mite PCI interface chip
+ * Copyright (C) 1999 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __ANALOGY_NI_MIO_H__
+#define __ANALOGY_NI_MIO_H__
+
+/* Debug stuff */
+
+#ifdef CONFIG_DEBUG_MIO
+#define MDPRINTK(fmt, args...) rtdm_printk(format, ##args)
+#else /* !CONFIG_DEBUG_MIO */
+#define MDPRINTK(fmt, args...)
+#endif /* CONFIG_DEBUG_MIO */
+
+/* Subdevice related defines */
+
+#define AIMODE_NONE		0
+#define AIMODE_HALF_FULL	1
+#define AIMODE_SCAN		2
+#define AIMODE_SAMPLE		3
+
+#define NI_AI_SUBDEV		0
+#define NI_AO_SUBDEV		1
+#define NI_DIO_SUBDEV		2
+#define NI_8255_DIO_SUBDEV	3
+#define NI_UNUSED_SUBDEV	4
+#define NI_CALIBRATION_SUBDEV	5
+#define NI_EEPROM_SUBDEV	6
+#define NI_PFI_DIO_SUBDEV	7
+#define NI_CS5529_CALIBRATION_SUBDEV 8
+#define NI_SERIAL_SUBDEV	9
+#define NI_RTSI_SUBDEV		10
+#define NI_GPCT0_SUBDEV		11
+#define NI_GPCT1_SUBDEV		12
+#define NI_FREQ_OUT_SUBDEV	13
+#define NI_NUM_SUBDEVICES	14
+
+#define NI_GPCT_SUBDEV(x)	((x == 1) ? NI_GPCT1_SUBDEV : NI_GPCT0_SUBDEV)
+
+#define TIMEBASE_1_NS		50
+#define TIMEBASE_2_NS		10000
+
+#define SERIAL_DISABLED		0
+#define SERIAL_600NS		600
+#define SERIAL_1_2US		1200
+#define SERIAL_10US		10000
+
+/* PFI digital filtering options for ni m-series for use with
+   INSN_CONFIG_FILTER. */
+#define NI_PFI_FILTER_OFF	0x0
+#define NI_PFI_FILTER_125ns	0x1
+#define NI_PFI_FILTER_6425ns	0x2
+#define NI_PFI_FILTER_2550us	0x3
+
+/* Signals which can be routed to an NI PFI pin on an m-series board
+   with INSN_CONFIG_SET_ROUTING. These numbers are also returned by
+   INSN_CONFIG_GET_ROUTING on pre-m-series boards, even though their
+   routing cannot be changed. The numbers assigned are not arbitrary,
+   they correspond to the bits required to program the board. */
+#define NI_PFI_OUTPUT_PFI_DEFAULT	0
+#define NI_PFI_OUTPUT_AI_START1		1
+#define NI_PFI_OUTPUT_AI_START2		2
+#define NI_PFI_OUTPUT_AI_CONVERT	3
+#define NI_PFI_OUTPUT_G_SRC1		4
+#define NI_PFI_OUTPUT_G_GATE1		5
+#define NI_PFI_OUTPUT_AO_UPDATE_N	6
+#define NI_PFI_OUTPUT_AO_START1		7
+#define NI_PFI_OUTPUT_AI_START_PULSE	8
+#define NI_PFI_OUTPUT_G_SRC0		9
+#define NI_PFI_OUTPUT_G_GATE0		10
+#define NI_PFI_OUTPUT_EXT_STROBE	11
+#define NI_PFI_OUTPUT_AI_EXT_MUX_CLK	12
+#define NI_PFI_OUTPUT_GOUT0		13
+#define NI_PFI_OUTPUT_GOUT1		14
+#define NI_PFI_OUTPUT_FREQ_OUT		15
+#define NI_PFI_OUTPUT_PFI_DO		16
+#define NI_PFI_OUTPUT_I_ATRIG		17
+#define NI_PFI_OUTPUT_RTSI0		18
+#define NI_PFI_OUTPUT_PXI_STAR_TRIGGER_IN 26
+#define NI_PFI_OUTPUT_SCXI_TRIG1	27
+#define NI_PFI_OUTPUT_DIO_CHANGE_DETECT_RTSI 28
+#define NI_PFI_OUTPUT_CDI_SAMPLE	29
+#define NI_PFI_OUTPUT_CDO_UPDATE	30
+
+static inline unsigned int NI_PFI_OUTPUT_RTSI(unsigned rtsi_channel) {
+	return NI_PFI_OUTPUT_RTSI0 + rtsi_channel;
+}
+
+/* Ranges declarations */
+
+extern struct a4l_rngdesc a4l_range_ni_E_ai;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_limited;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_limited14;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_bipolar4;
+extern struct a4l_rngdesc a4l_range_ni_E_ai_611x;
+extern struct a4l_rngdesc range_ni_E_ai_622x;
+extern struct a4l_rngdesc range_ni_E_ai_628x;
+extern struct a4l_rngdesc a4l_range_ni_S_ai_6143;
+extern struct a4l_rngdesc a4l_range_ni_E_ao_ext;
+
+/* Misc functions declarations */
+
+int a4l_ni_E_interrupt(unsigned int irq, void *d);
+int a4l_ni_E_init(struct a4l_device *dev);
+
+
+#endif /* !__ANALOGY_NI_MIO_H__ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h
new file mode 100644
index 0000000..d600a32
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_stc.h
@@ -0,0 +1,1417 @@
+/*
+ * Register descriptions for NI DAQ-STC chip
+ *
+ * Copyright (C) 1998-9 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this code; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * References:
+ * 340934b.pdf  DAQ-STC reference manual
+ *
+ */
+#ifndef __ANALOGY_NI_STC_H__
+#define __ANALOGY_NI_STC_H__
+
+#include "ni_tio.h"
+
+#define _bit15		0x8000
+#define _bit14		0x4000
+#define _bit13		0x2000
+#define _bit12		0x1000
+#define _bit11		0x0800
+#define _bit10		0x0400
+#define _bit9		0x0200
+#define _bit8		0x0100
+#define _bit7		0x0080
+#define _bit6		0x0040
+#define _bit5		0x0020
+#define _bit4		0x0010
+#define _bit3		0x0008
+#define _bit2		0x0004
+#define _bit1		0x0002
+#define _bit0		0x0001
+
+#define NUM_PFI_OUTPUT_SELECT_REGS 6
+
+/* Registers in the National Instruments DAQ-STC chip */
+
+#define Interrupt_A_Ack_Register	2
+#define G0_Gate_Interrupt_Ack			_bit15
+#define G0_TC_Interrupt_Ack			_bit14
+#define AI_Error_Interrupt_Ack			_bit13
+#define AI_STOP_Interrupt_Ack			_bit12
+#define AI_START_Interrupt_Ack			_bit11
+#define AI_START2_Interrupt_Ack			_bit10
+#define AI_START1_Interrupt_Ack			_bit9
+#define AI_SC_TC_Interrupt_Ack			_bit8
+#define AI_SC_TC_Error_Confirm			_bit7
+#define G0_TC_Error_Confirm			_bit6
+#define G0_Gate_Error_Confirm			_bit5
+
+#define AI_Status_1_Register		2
+#define Interrupt_A_St				_bit15
+#define AI_FIFO_Full_St				_bit14
+#define AI_FIFO_Half_Full_St			_bit13
+#define AI_FIFO_Empty_St			_bit12
+#define AI_Overrun_St				_bit11
+#define AI_Overflow_St				_bit10
+#define AI_SC_TC_Error_St			_bit9
+#define AI_START2_St				_bit8
+#define AI_START1_St				_bit7
+#define AI_SC_TC_St				_bit6
+#define AI_START_St				_bit5
+#define AI_STOP_St				_bit4
+#define G0_TC_St				_bit3
+#define G0_Gate_Interrupt_St			_bit2
+#define AI_FIFO_Request_St			_bit1
+#define Pass_Thru_0_Interrupt_St		_bit0
+
+#define AI_Status_2_Register		5
+
+#define Interrupt_B_Ack_Register	3
+#define G1_Gate_Error_Confirm			_bit1
+#define G1_TC_Error_Confirm			_bit2
+#define AO_BC_TC_Trigger_Error_Confirm		_bit3
+#define AO_BC_TC_Error_Confirm			_bit4
+#define AO_UI2_TC_Error_Confrim			_bit5
+#define AO_UI2_TC_Interrupt_Ack			_bit6
+#define AO_UC_TC_Interrupt_Ack			_bit7
+#define AO_BC_TC_Interrupt_Ack			_bit8
+#define AO_START1_Interrupt_Ack			_bit9
+#define AO_UPDATE_Interrupt_Ack			_bit10
+#define AO_START_Interrupt_Ack			_bit11
+#define AO_STOP_Interrupt_Ack			_bit12
+#define AO_Error_Interrupt_Ack			_bit13
+#define G1_TC_Interrupt_Ack			_bit14
+#define G1_Gate_Interrupt_Ack			_bit15
+
+#define AO_Status_1_Register		3
+#define Interrupt_B_St				_bit15
+#define AO_FIFO_Full_St				_bit14
+#define AO_FIFO_Half_Full_St			_bit13
+#define AO_FIFO_Empty_St			_bit12
+#define AO_BC_TC_Error_St			_bit11
+#define AO_START_St				_bit10
+#define AO_Overrun_St				_bit9
+#define AO_START1_St				_bit8
+#define AO_BC_TC_St				_bit7
+#define AO_UC_TC_St				_bit6
+#define AO_UPDATE_St				_bit5
+#define AO_UI2_TC_St				_bit4
+#define G1_TC_St				_bit3
+#define G1_Gate_Interrupt_St			_bit2
+#define AO_FIFO_Request_St			_bit1
+#define Pass_Thru_1_Interrupt_St		_bit0
+
+
+#define AI_Command_2_Register		4
+#define AI_End_On_SC_TC				_bit15
+#define AI_End_On_End_Of_Scan			_bit14
+#define AI_START1_Disable			_bit11
+#define AI_SC_Save_Trace			_bit10
+#define AI_SI_Switch_Load_On_SC_TC		_bit9
+#define AI_SI_Switch_Load_On_STOP		_bit8
+#define AI_SI_Switch_Load_On_TC			_bit7
+#define AI_SC_Switch_Load_On_TC			_bit4
+#define AI_STOP_Pulse				_bit3
+#define AI_START_Pulse				_bit2
+#define AI_START2_Pulse				_bit1
+#define AI_START1_Pulse				_bit0
+
+#define AO_Command_2_Register		5
+#define AO_End_On_BC_TC(x)			(((x) & 0x3) << 14)
+#define AO_Start_Stop_Gate_Enable		_bit13
+#define AO_UC_Save_Trace			_bit12
+#define AO_BC_Gate_Enable			_bit11
+#define AO_BC_Save_Trace			_bit10
+#define AO_UI_Switch_Load_On_BC_TC		_bit9
+#define AO_UI_Switch_Load_On_Stop		_bit8
+#define AO_UI_Switch_Load_On_TC			_bit7
+#define AO_UC_Switch_Load_On_BC_TC		_bit6
+#define AO_UC_Switch_Load_On_TC			_bit5
+#define AO_BC_Switch_Load_On_TC			_bit4
+#define AO_Mute_B				_bit3
+#define AO_Mute_A				_bit2
+#define AO_UPDATE2_Pulse			_bit1
+#define AO_START1_Pulse				_bit0
+
+#define AO_Status_2_Register		6
+
+#define DIO_Parallel_Input_Register	7
+
+#define AI_Command_1_Register		8
+#define AI_Analog_Trigger_Reset			_bit14
+#define AI_Disarm				_bit13
+#define AI_SI2_Arm				_bit12
+#define AI_SI2_Load				_bit11
+#define AI_SI_Arm				_bit10
+#define AI_SI_Load				_bit9
+#define AI_DIV_Arm				_bit8
+#define AI_DIV_Load				_bit7
+#define AI_SC_Arm				_bit6
+#define AI_SC_Load				_bit5
+#define AI_SCAN_IN_PROG_Pulse			_bit4
+#define AI_EXTMUX_CLK_Pulse			_bit3
+#define AI_LOCALMUX_CLK_Pulse			_bit2
+#define AI_SC_TC_Pulse				_bit1
+#define AI_CONVERT_Pulse			_bit0
+
+#define AO_Command_1_Register		9
+#define AO_Analog_Trigger_Reset			_bit15
+#define AO_START_Pulse				_bit14
+#define AO_Disarm				_bit13
+#define AO_UI2_Arm_Disarm			_bit12
+#define AO_UI2_Load				_bit11
+#define AO_UI_Arm				_bit10
+#define AO_UI_Load				_bit9
+#define AO_UC_Arm				_bit8
+#define AO_UC_Load				_bit7
+#define AO_BC_Arm				_bit6
+#define AO_BC_Load				_bit5
+#define AO_DAC1_Update_Mode			_bit4
+#define AO_LDAC1_Source_Select			_bit3
+#define AO_DAC0_Update_Mode			_bit2
+#define AO_LDAC0_Source_Select			_bit1
+#define AO_UPDATE_Pulse				_bit0
+
+
+#define DIO_Output_Register		10
+#define DIO_Parallel_Data_Out(a)                ((a)&0xff)
+#define DIO_Parallel_Data_Mask                  0xff
+#define DIO_SDOUT                               _bit0
+#define DIO_SDIN                                _bit4
+#define DIO_Serial_Data_Out(a)                  (((a)&0xff)<<8)
+#define DIO_Serial_Data_Mask                    0xff00
+
+#define DIO_Control_Register		11
+#define DIO_Software_Serial_Control             _bit11
+#define DIO_HW_Serial_Timebase                  _bit10
+#define DIO_HW_Serial_Enable                    _bit9
+#define DIO_HW_Serial_Start                     _bit8
+#define DIO_Pins_Dir(a)                         ((a)&0xff)
+#define DIO_Pins_Dir_Mask                       0xff
+
+#define AI_Mode_1_Register		12
+#define AI_CONVERT_Source_Select(a)		(((a) & 0x1f) << 11)
+#define AI_SI_Source_select(a)			(((a) & 0x1f) << 6)
+#define AI_CONVERT_Source_Polarity		_bit5
+#define AI_SI_Source_Polarity		_bit4
+#define AI_Start_Stop				_bit3
+#define AI_Mode_1_Reserved			_bit2
+#define AI_Continuous				_bit1
+#define AI_Trigger_Once				_bit0
+
+#define AI_Mode_2_Register		13
+#define AI_SC_Gate_Enable			_bit15
+#define AI_Start_Stop_Gate_Enable		_bit14
+#define AI_Pre_Trigger				_bit13
+#define AI_External_MUX_Present			_bit12
+#define AI_SI2_Initial_Load_Source		_bit9
+#define AI_SI2_Reload_Mode			_bit8
+#define AI_SI_Initial_Load_Source		_bit7
+#define AI_SI_Reload_Mode(a)			(((a) & 0x7)<<4)
+#define AI_SI_Write_Switch			_bit3
+#define AI_SC_Initial_Load_Source		_bit2
+#define AI_SC_Reload_Mode			_bit1
+#define AI_SC_Write_Switch			_bit0
+
+#define AI_SI_Load_A_Registers		14
+#define AI_SI_Load_B_Registers		16
+#define AI_SC_Load_A_Registers		18
+#define AI_SC_Load_B_Registers		20
+#define AI_SI_Save_Registers		64
+#define AI_SC_Save_Registers		66
+
+#define AI_SI2_Load_A_Register		23
+#define AI_SI2_Load_B_Register		25
+
+#define Joint_Status_1_Register         27
+#define DIO_Serial_IO_In_Progress_St            _bit12
+
+#define DIO_Serial_Input_Register       28
+#define Joint_Status_2_Register         29
+#define AO_TMRDACWRs_In_Progress_St		_bit5
+
+#define AO_Mode_1_Register		38
+#define AO_UPDATE_Source_Select(x)		(((x)&0x1f)<<11)
+#define AO_UI_Source_Select(x)			(((x)&0x1f)<<6)
+#define AO_Multiple_Channels			_bit5
+#define AO_UPDATE_Source_Polarity		_bit4
+#define AO_UI_Source_Polarity			_bit3
+#define AO_UC_Switch_Load_Every_TC		_bit2
+#define AO_Continuous				_bit1
+#define AO_Trigger_Once				_bit0
+
+#define AO_Mode_2_Register		39
+#define AO_FIFO_Mode_Mask			( 0x3 << 14 )
+#define AO_FIFO_Mode_HF_to_F			(3<<14)
+#define AO_FIFO_Mode_F				(2<<14)
+#define AO_FIFO_Mode_HF				(1<<14)
+#define AO_FIFO_Mode_E				(0<<14)
+#define AO_FIFO_Retransmit_Enable		_bit13
+#define AO_START1_Disable			_bit12
+#define AO_UC_Initial_Load_Source		_bit11
+#define AO_UC_Write_Switch			_bit10
+#define AO_UI2_Initial_Load_Source		_bit9
+#define AO_UI2_Reload_Mode			_bit8
+#define AO_UI_Initial_Load_Source		_bit7
+#define AO_UI_Reload_Mode(x)			(((x) & 0x7) << 4)
+#define AO_UI_Write_Switch			_bit3
+#define AO_BC_Initial_Load_Source		_bit2
+#define AO_BC_Reload_Mode			_bit1
+#define AO_BC_Write_Switch			_bit0
+
+#define AO_UI_Load_A_Register		40
+#define AO_UI_Load_A_Register_High	40
+#define AO_UI_Load_A_Register_Low	41
+#define AO_UI_Load_B_Register		42
+#define AO_UI_Save_Registers		16
+#define AO_BC_Load_A_Register		44
+#define AO_BC_Load_A_Register_High	44
+#define AO_BC_Load_A_Register_Low	45
+#define AO_BC_Load_B_Register		46
+#define AO_BC_Load_B_Register_High	46
+#define AO_BC_Load_B_Register_Low	47
+#define AO_BC_Save_Registers		18
+#define AO_UC_Load_A_Register		48
+#define AO_UC_Load_A_Register_High	48
+#define AO_UC_Load_A_Register_Low	49
+#define AO_UC_Load_B_Register		50
+#define AO_UC_Save_Registers		20
+
+#define Clock_and_FOUT_Register		56
+#define FOUT_Enable				_bit15
+#define FOUT_Timebase_Select			_bit14
+#define DIO_Serial_Out_Divide_By_2		_bit13
+#define Slow_Internal_Time_Divide_By_2		_bit12
+#define Slow_Internal_Timebase			_bit11
+#define G_Source_Divide_By_2			_bit10
+#define Clock_To_Board_Divide_By_2		_bit9
+#define Clock_To_Board				_bit8
+#define AI_Output_Divide_By_2			_bit7
+#define AI_Source_Divide_By_2			_bit6
+#define AO_Output_Divide_By_2			_bit5
+#define AO_Source_Divide_By_2			_bit4
+#define FOUT_Divider_mask			0xf
+#define FOUT_Divider(x)				(((x) & 0xf) << 0)
+
+#define IO_Bidirection_Pin_Register	57
+#define	RTSI_Trig_Direction_Register	58
+#define	Drive_RTSI_Clock_Bit			0x1
+#define	Use_RTSI_Clock_Bit			0x2
+
+static inline unsigned int RTSI_Output_Bit(unsigned channel, int is_mseries)
+{
+	unsigned max_channel;
+	unsigned base_bit_shift;
+	if(is_mseries)
+	{
+		base_bit_shift = 8;
+		max_channel = 7;
+	}else
+	{
+		base_bit_shift = 9;
+		max_channel = 6;
+	}
+	if(channel > max_channel)
+	{
+		rtdm_printk("%s: bug, invalid RTSI_channel=%i\n",
+			    __FUNCTION__, channel);
+		return 0;
+	}
+	return 1 << (base_bit_shift + channel);
+}
+
+#define Interrupt_Control_Register	59
+#define Interrupt_B_Enable			_bit15
+#define Interrupt_B_Output_Select(x)		((x)<<12)
+#define Interrupt_A_Enable			_bit11
+#define Interrupt_A_Output_Select(x)		((x)<<8)
+#define Pass_Thru_0_Interrupt_Polarity		_bit3
+#define Pass_Thru_1_Interrupt_Polarity		_bit2
+#define Interrupt_Output_On_3_Pins		_bit1
+#define Interrupt_Output_Polarity		_bit0
+
+#define AI_Output_Control_Register	60
+#define AI_START_Output_Select			_bit10
+#define AI_SCAN_IN_PROG_Output_Select(x)	(((x) & 0x3) << 8)
+#define AI_EXTMUX_CLK_Output_Select(x)		(((x) & 0x3) << 6)
+#define AI_LOCALMUX_CLK_Output_Select(x)	((x)<<4)
+#define AI_SC_TC_Output_Select(x)		((x)<<2)
+#define AI_CONVERT_Output_High_Z		0
+#define AI_CONVERT_Output_Ground		1
+#define AI_CONVERT_Output_Enable_Low		2
+#define AI_CONVERT_Output_Enable_High		3
+#define AI_CONVERT_Output_Select(x)		((x) & 0x3)
+
+#define AI_START_STOP_Select_Register	62
+#define AI_START_Polarity			_bit15
+#define AI_STOP_Polarity			_bit14
+#define AI_STOP_Sync				_bit13
+#define AI_STOP_Edge				_bit12
+#define AI_STOP_Select(a)			(((a) & 0x1f)<<7)
+#define AI_START_Sync				_bit6
+#define AI_START_Edge				_bit5
+#define AI_START_Select(a)			((a) & 0x1f)
+
+#define AI_Trigger_Select_Register	63
+#define AI_START1_Polarity			_bit15
+#define AI_START2_Polarity			_bit14
+#define AI_START2_Sync				_bit13
+#define AI_START2_Edge				_bit12
+#define AI_START2_Select(a)			(((a) & 0x1f) << 7)
+#define AI_START1_Sync				_bit6
+#define AI_START1_Edge				_bit5
+#define AI_START1_Select(a)			((a) & 0x1f)
+
+#define AI_DIV_Load_A_Register	64
+
+#define AO_Start_Select_Register	66
+#define AO_UI2_Software_Gate			_bit15
+#define AO_UI2_External_Gate_Polarity		_bit14
+#define AO_START_Polarity			_bit13
+#define AO_AOFREQ_Enable			_bit12
+#define AO_UI2_External_Gate_Select(a)		(((a) & 0x1f) << 7)
+#define AO_START_Sync				_bit6
+#define AO_START_Edge				_bit5
+#define AO_START_Select(a)			((a) & 0x1f)
+
+#define AO_Trigger_Select_Register	67
+#define AO_UI2_External_Gate_Enable		_bit15
+#define AO_Delayed_START1			_bit14
+#define AO_START1_Polarity			_bit13
+#define AO_UI2_Source_Polarity			_bit12
+#define AO_UI2_Source_Select(x)			(((x)&0x1f)<<7)
+#define AO_START1_Sync				_bit6
+#define AO_START1_Edge				_bit5
+#define AO_START1_Select(x)			(((x)&0x1f)<<0)
+
+#define AO_Mode_3_Register		70
+#define AO_UI2_Switch_Load_Next_TC		_bit13
+#define AO_UC_Switch_Load_Every_BC_TC		_bit12
+#define AO_Trigger_Length			_bit11
+#define AO_Stop_On_Overrun_Error		_bit5
+#define AO_Stop_On_BC_TC_Trigger_Error		_bit4
+#define AO_Stop_On_BC_TC_Error			_bit3
+#define AO_Not_An_UPDATE			_bit2
+#define AO_Software_Gate			_bit1
+#define AO_Last_Gate_Disable			_bit0	/* M Series only */
+
+#define Joint_Reset_Register		72
+#define Software_Reset				_bit11
+#define AO_Configuration_End			_bit9
+#define AI_Configuration_End			_bit8
+#define AO_Configuration_Start			_bit5
+#define AI_Configuration_Start			_bit4
+#define G1_Reset				_bit3
+#define G0_Reset				_bit2
+#define AO_Reset				_bit1
+#define AI_Reset				_bit0
+
+#define Interrupt_A_Enable_Register	73
+#define Pass_Thru_0_Interrupt_Enable		_bit9
+#define G0_Gate_Interrupt_Enable		_bit8
+#define AI_FIFO_Interrupt_Enable		_bit7
+#define G0_TC_Interrupt_Enable			_bit6
+#define AI_Error_Interrupt_Enable		_bit5
+#define AI_STOP_Interrupt_Enable		_bit4
+#define AI_START_Interrupt_Enable		_bit3
+#define AI_START2_Interrupt_Enable		_bit2
+#define AI_START1_Interrupt_Enable		_bit1
+#define AI_SC_TC_Interrupt_Enable		_bit0
+
+#define Interrupt_B_Enable_Register	75
+#define Pass_Thru_1_Interrupt_Enable		_bit11
+#define G1_Gate_Interrupt_Enable		_bit10
+#define G1_TC_Interrupt_Enable			_bit9
+#define AO_FIFO_Interrupt_Enable		_bit8
+#define AO_UI2_TC_Interrupt_Enable		_bit7
+#define AO_UC_TC_Interrupt_Enable		_bit6
+#define AO_Error_Interrupt_Enable		_bit5
+#define AO_STOP_Interrupt_Enable		_bit4
+#define AO_START_Interrupt_Enable		_bit3
+#define AO_UPDATE_Interrupt_Enable		_bit2
+#define AO_START1_Interrupt_Enable		_bit1
+#define AO_BC_TC_Interrupt_Enable		_bit0
+
+#define Second_IRQ_A_Enable_Register	74
+#define AI_SC_TC_Second_Irq_Enable		_bit0
+#define AI_START1_Second_Irq_Enable		_bit1
+#define AI_START2_Second_Irq_Enable		_bit2
+#define AI_START_Second_Irq_Enable		_bit3
+#define AI_STOP_Second_Irq_Enable		_bit4
+#define AI_Error_Second_Irq_Enable		_bit5
+#define G0_TC_Second_Irq_Enable			_bit6
+#define AI_FIFO_Second_Irq_Enable		_bit7
+#define G0_Gate_Second_Irq_Enable		_bit8
+#define Pass_Thru_0_Second_Irq_Enable		_bit9
+
+#define Second_IRQ_B_Enable_Register	76
+#define AO_BC_TC_Second_Irq_Enable		_bit0
+#define AO_START1_Second_Irq_Enable		_bit1
+#define AO_UPDATE_Second_Irq_Enable		_bit2
+#define AO_START_Second_Irq_Enable		_bit3
+#define AO_STOP_Second_Irq_Enable		_bit4
+#define AO_Error_Second_Irq_Enable		_bit5
+#define AO_UC_TC_Second_Irq_Enable		_bit6
+#define AO_UI2_TC_Second_Irq_Enable		_bit7
+#define AO_FIFO_Second_Irq_Enable		_bit8
+#define G1_TC_Second_Irq_Enable			_bit9
+#define G1_Gate_Second_Irq_Enable		_bit10
+#define Pass_Thru_1_Second_Irq_Enable		_bit11
+
+#define AI_Personal_Register		77
+#define AI_SHIFTIN_Pulse_Width			_bit15
+#define AI_EOC_Polarity				_bit14
+#define AI_SOC_Polarity				_bit13
+#define AI_SHIFTIN_Polarity			_bit12
+#define AI_CONVERT_Pulse_Timebase		_bit11
+#define AI_CONVERT_Pulse_Width			_bit10
+#define AI_CONVERT_Original_Pulse		_bit9
+#define AI_FIFO_Flags_Polarity			_bit8
+#define AI_Overrun_Mode				_bit7
+#define AI_EXTMUX_CLK_Pulse_Width		_bit6
+#define AI_LOCALMUX_CLK_Pulse_Width		_bit5
+#define AI_AIFREQ_Polarity			_bit4
+
+#define AO_Personal_Register		78
+#define AO_Interval_Buffer_Mode			_bit3
+#define AO_BC_Source_Select			_bit4
+#define AO_UPDATE_Pulse_Width			_bit5
+#define AO_UPDATE_Pulse_Timebase		_bit6
+#define AO_UPDATE_Original_Pulse		_bit7
+#define AO_DMA_PIO_Control			_bit8 /* M Series: reserved */
+#define AO_AOFREQ_Polarity			_bit9 /* M Series: reserved */
+#define AO_FIFO_Enable				_bit10
+#define AO_FIFO_Flags_Polarity			_bit11 /* M Series: reserved */
+#define AO_TMRDACWR_Pulse_Width			_bit12
+#define AO_Fast_CPU				_bit13 /* M Series: reserved */
+#define AO_Number_Of_DAC_Packages		_bit14 /* 1 for "single" mode,
+							  0 for "dual" */
+#define AO_Multiple_DACS_Per_Package		_bit15 /* M Series only */
+
+#define	RTSI_Trig_A_Output_Register	79
+
+#define	RTSI_Trig_B_Output_Register	80
+#define RTSI_Sub_Selection_1_Bit		_bit15 /* not for M Series */
+#define RTSI_Trig_Output_Bits(x, y)		((y & 0xf) << ((x % 4) * 4))
+#define RTSI_Trig_Output_Mask(x)		(0xf << ((x % 4) * 4))
+#define RTSI_Trig_Output_Source(x, y)		((y >> ((x % 4) * 4)) & 0xf)
+
+#define	RTSI_Board_Register		81
+#define Write_Strobe_0_Register		82
+#define Write_Strobe_1_Register		83
+#define Write_Strobe_2_Register		84
+#define Write_Strobe_3_Register		85
+
+#define AO_Output_Control_Register	86
+#define AO_External_Gate_Enable			_bit15
+#define AO_External_Gate_Select(x)		(((x)&0x1f)<<10)
+#define AO_Number_Of_Channels(x)		(((x)&0xf)<<6)
+#define AO_UPDATE2_Output_Select(x)		(((x)&0x3)<<4)
+#define AO_External_Gate_Polarity		_bit3
+#define AO_UPDATE2_Output_Toggle		_bit2
+#define AO_Update_Output_High_Z			0
+#define AO_Update_Output_Ground			1
+#define AO_Update_Output_Enable_Low		2
+#define AO_Update_Output_Enable_High		3
+#define AO_UPDATE_Output_Select(x)		(x&0x3)
+
+#define AI_Mode_3_Register		87
+#define AI_Trigger_Length			_bit15
+#define AI_Delay_START				_bit14
+#define AI_Software_Gate			_bit13
+#define AI_SI_Special_Trigger_Delay		_bit12
+#define AI_SI2_Source_Select			_bit11
+#define AI_Delayed_START2			_bit10
+#define AI_Delayed_START1			_bit9
+#define AI_External_Gate_Mode			_bit8
+#define AI_FIFO_Mode_HF_to_E			(3<<6)
+#define AI_FIFO_Mode_F				(2<<6)
+#define AI_FIFO_Mode_HF				(1<<6)
+#define AI_FIFO_Mode_NE				(0<<6)
+#define AI_External_Gate_Polarity		_bit5
+#define AI_External_Gate_Select(a)		((a) & 0x1f)
+
+#define G_Autoincrement_Register(a)	(68+(a))
+#define G_Command_Register(a)		(6+(a))
+#define G_HW_Save_Register(a)		(8+(a)*2)
+#define G_HW_Save_Register_High(a)	(8+(a)*2)
+#define G_HW_Save_Register_Low(a)	(9+(a)*2)
+#define G_Input_Select_Register(a)	(36+(a))
+#define G_Load_A_Register(a)		(28+(a)*4)
+#define G_Load_A_Register_High(a)	(28+(a)*4)
+#define G_Load_A_Register_Low(a)	(29+(a)*4)
+#define G_Load_B_Register(a)		(30+(a)*4)
+#define G_Load_B_Register_High(a)	(30+(a)*4)
+#define G_Load_B_Register_Low(a)	(31+(a)*4)
+#define G_Mode_Register(a)		(26+(a))
+#define G_Save_Register(a)		(12+(a)*2)
+#define G_Save_Register_High(a)		(12+(a)*2)
+#define G_Save_Register_Low(a)		(13+(a)*2)
+#define G_Status_Register		4
+#define Analog_Trigger_Etc_Register	61
+
+/* command register */
+#define G_Disarm_Copy			_bit15		/* strobe */
+#define G_Save_Trace_Copy		_bit14
+#define G_Arm_Copy			_bit13		/* strobe */
+#define G_Bank_Switch_Start		_bit10		/* strobe */
+#define G_Little_Big_Endian		_bit9
+#define G_Synchronized_Gate		_bit8
+#define G_Write_Switch			_bit7
+#define G_Up_Down(a)			(((a)&0x03)<<5)
+#define G_Disarm			_bit4		/* strobe */
+#define G_Analog_Trigger_Reset		_bit3		/* strobe */
+#define G_Save_Trace			_bit1
+#define G_Arm				_bit0		/* strobe */
+
+/* channel agnostic names for the command register #defines */
+#define G_Bank_Switch_Enable		_bit12
+#define G_Bank_Switch_Mode		_bit11
+#define G_Load				_bit2		/* strobe */
+
+/* input select register */
+#define G_Gate_Select(a)		(((a)&0x1f)<<7)
+#define G_Source_Select(a)		(((a)&0x1f)<<2)
+#define G_Write_Acknowledges_Irq	_bit1
+#define G_Read_Acknowledges_Irq		_bit0
+
+/* same input select register, but with channel agnostic names */
+#define G_Source_Polarity		_bit15
+#define G_Output_Polarity		_bit14
+#define G_OR_Gate			_bit13
+#define G_Gate_Select_Load_Source	_bit12
+
+/* mode register */
+#define G_Loading_On_TC			_bit12
+#define G_Output_Mode(a)		(((a)&0x03)<<8)
+#define G_Trigger_Mode_For_Edge_Gate(a)	(((a)&0x03)<<3)
+#define G_Gating_Mode(a)		(((a)&0x03)<<0)
+
+/* same input mode register, but with channel agnostic names */
+#define G_Load_Source_Select		_bit7
+#define G_Reload_Source_Switching	_bit15
+#define G_Loading_On_Gate		_bit14
+#define G_Gate_Polarity		_bit13
+
+#define G_Counting_Once(a)		(((a)&0x03)<<10)
+#define G_Stop_Mode(a)			(((a)&0x03)<<5)
+#define G_Gate_On_Both_Edges		_bit2
+
+/* G_Status_Register */
+#define G1_Gate_Error_St		_bit15
+#define G0_Gate_Error_St		_bit14
+#define G1_TC_Error_St			_bit13
+#define G0_TC_Error_St			_bit12
+#define G1_No_Load_Between_Gates_St	_bit11
+#define G0_No_Load_Between_Gates_St	_bit10
+#define G1_Armed_St			_bit9
+#define G0_Armed_St			_bit8
+#define G1_Stale_Data_St		_bit7
+#define G0_Stale_Data_St		_bit6
+#define G1_Next_Load_Source_St		_bit5
+#define G0_Next_Load_Source_St		_bit4
+#define G1_Counting_St			_bit3
+#define G0_Counting_St			_bit2
+#define G1_Save_St			_bit1
+#define G0_Save_St			_bit0
+
+/* general purpose counter timer */
+#define G_Autoincrement(a)              ((a)<<0)
+
+/*Analog_Trigger_Etc_Register*/
+#define Analog_Trigger_Mode(x) ((x) & 0x7)
+#define Analog_Trigger_Enable _bit3
+#define Analog_Trigger_Drive _bit4
+#define GPFO_1_Output_Select		_bit7
+#define GPFO_0_Output_Select(a)		((a)<<11)
+#define GPFO_0_Output_Enable		_bit14
+#define GPFO_1_Output_Enable		_bit15
+
+/* Additional windowed registers unique to E series */
+
+/* 16 bit registers shadowed from DAQ-STC */
+#define Window_Address			0x00
+#define Window_Data			0x02
+
+#define Configuration_Memory_Clear	82
+#define ADC_FIFO_Clear			83
+#define DAC_FIFO_Clear			84
+
+/* i/o port offsets */
+
+/* 8 bit registers */
+#define XXX_Status			0x01
+#define PROMOUT					_bit0
+#define AI_FIFO_LOWER_NOT_EMPTY			_bit3
+
+#define Serial_Command			0x0d
+#define Misc_Command			0x0f
+#define Port_A				0x19
+#define Port_B				0x1b
+#define Port_C				0x1d
+#define Configuration			0x1f
+#define Strobes				0x01
+#define Channel_A_Mode			0x03
+#define Channel_B_Mode			0x05
+#define Channel_C_Mode			0x07
+#define AI_AO_Select			0x09
+#define AI_DMA_Select_Shift		0
+#define AI_DMA_Select_Mask		0xf
+#define AO_DMA_Select_Shift		4
+#define AO_DMA_Select_Mask		(0xf << AO_DMA_Select_Shift)
+
+#define G0_G1_Select			0x0b
+
+static inline unsigned ni_stc_dma_channel_select_bitfield(unsigned channel)
+{
+	if(channel < 4) return 1 << channel;
+	if(channel == 4) return 0x3;
+	if(channel == 5) return 0x5;
+	BUG();
+	return 0;
+}
+static inline unsigned GPCT_DMA_Select_Bits(unsigned gpct_index, unsigned mite_channel)
+{
+	BUG_ON(gpct_index > 1);
+	return ni_stc_dma_channel_select_bitfield(mite_channel) << (4 * gpct_index);
+}
+static inline unsigned GPCT_DMA_Select_Mask(unsigned gpct_index)
+{
+	BUG_ON(gpct_index > 1);
+	return 0xf << (4 * gpct_index);
+}
+
+/* 16 bit registers */
+
+#define Configuration_Memory_Low	0x10
+#define AI_DITHER				_bit9
+#define AI_LAST_CHANNEL				_bit15
+
+#define Configuration_Memory_High	0x12
+#define AI_AC_COUPLE				_bit11
+#define AI_DIFFERENTIAL				_bit12
+#define AI_COMMON				_bit13
+#define AI_GROUND				(_bit12|_bit13)
+#define AI_CONFIG_CHANNEL(x)			(x&0x3f)
+
+#define ADC_FIFO_Data_Register		0x1c
+
+#define AO_Configuration		0x16
+#define AO_Bipolar		_bit0
+#define AO_Deglitch		_bit1
+#define AO_Ext_Ref		_bit2
+#define AO_Ground_Ref		_bit3
+#define AO_Channel(x)		((x) << 8)
+
+#define DAC_FIFO_Data			0x1e
+#define DAC0_Direct_Data		0x18
+#define DAC1_Direct_Data		0x1a
+
+/* 611x registers (these boards differ from the e-series) */
+
+#define Magic_611x			0x19 /* w8 (new) */
+#define Calibration_Channel_Select_611x	0x1a /* w16 (new) */
+#define ADC_FIFO_Data_611x		0x1c /* r32 (incompatible) */
+#define AI_FIFO_Offset_Load_611x	0x05 /* r8 (new) */
+#define DAC_FIFO_Data_611x		0x14 /* w32 (incompatible) */
+#define Cal_Gain_Select_611x		0x05 /* w8 (new) */
+
+#define AO_Window_Address_611x		0x18
+#define AO_Window_Data_611x		0x1e
+
+/* 6143 registers */
+#define Magic_6143			0x19 /* w8 */
+#define G0G1_DMA_Select_6143		0x0B /* w8 */
+#define PipelineDelay_6143		0x1f /* w8 */
+#define EOC_Set_6143			0x1D /* w8 */
+#define AIDMA_Select_6143		0x09 /* w8 */
+#define AIFIFO_Data_6143		0x8C /* w32 */
+#define AIFIFO_Flag_6143		0x84 /* w32 */
+#define AIFIFO_Control_6143		0x88 /* w32 */
+#define AIFIFO_Status_6143		0x88 /* w32 */
+#define AIFIFO_DMAThreshold_6143	0x90 /* w32 */
+#define AIFIFO_Words_Available_6143	0x94 /* w32 */
+
+#define Calibration_Channel_6143	0x42 /* w16 */
+#define Calibration_LowTime_6143	0x20 /* w16 */
+#define Calibration_HighTime_6143	0x22 /* w16 */
+#define Relay_Counter_Load_Val__6143	0x4C /* w32 */
+#define Signature_6143			0x50 /* w32 */
+#define Release_Date_6143		0x54 /* w32 */
+#define Release_Oldest_Date_6143	0x58 /* w32 */
+
+#define Calibration_Channel_6143_RelayOn	0x8000	/* Calibration relay switch On */
+#define Calibration_Channel_6143_RelayOff	0x4000	/* Calibration relay switch Off */
+#define Calibration_Channel_Gnd_Gnd	0x00	/* Offset Calibration */
+#define Calibration_Channel_2v5_Gnd	0x02	/* 2.5V Reference */
+#define Calibration_Channel_Pwm_Gnd	0x05	/* +/- 5V Self Cal */
+#define Calibration_Channel_2v5_Pwm	0x0a	/* PWM Calibration */
+#define Calibration_Channel_Pwm_Pwm	0x0d	/* CMRR */
+#define Calibration_Channel_Gnd_Pwm	0x0e	/* PWM Calibration */
+
+/* 671x, 611x registers */
+
+/* 671xi 611x windowed ao registers */
+#define AO_Immediate_671x			0x11 /* W 16 */
+#define AO_Timed_611x				0x10 /* W 16 */
+#define AO_FIFO_Offset_Load_611x		0x13 /* W32 */
+#define AO_Later_Single_Point_Updates		0x14 /* W 16 */
+#define AO_Waveform_Generation_611x		0x15 /* W 16 */
+#define AO_Misc_611x				0x16 /* W 16 */
+#define AO_Calibration_Channel_Select_67xx	0x17 /* W 16 */
+#define AO_Configuration_2_67xx			0x18 /* W 16 */
+#define CAL_ADC_Command_67xx			0x19 /* W 8 */
+#define CAL_ADC_Status_67xx			0x1a /* R 8 */
+#define CAL_ADC_Data_67xx			0x1b /* R 16 */
+#define CAL_ADC_Config_Data_High_Word_67xx	0x1c /* RW 16 */
+#define CAL_ADC_Config_Data_Low_Word_67xx	0x1d /* RW 16 */
+
+static inline unsigned int DACx_Direct_Data_671x(int channel)
+{
+	return channel;
+}
+
+#define CLEAR_WG				_bit0
+
+#define CSCFG_CAL_CONTROL_MASK			0x7
+#define CSCFG_SELF_CAL_OFFSET			0x1
+#define CSCFG_SELF_CAL_GAIN			0x2
+#define CSCFG_SELF_CAL_OFFSET_GAIN		0x3
+#define CSCFG_SYSTEM_CAL_OFFSET			0x5
+#define CSCFG_SYSTEM_CAL_GAIN			0x6
+#define CSCFG_DONE				(1 << 3)
+#define CSCFG_POWER_SAVE_SELECT			(1 << 4)
+#define CSCFG_PORT_MODE				(1 << 5)
+#define CSCFG_RESET_VALID			(1 << 6)
+#define CSCFG_RESET				(1 << 7)
+#define CSCFG_UNIPOLAR				(1 << 12)
+#define CSCFG_WORD_RATE_2180_CYCLES		(0x0 << 13)
+#define CSCFG_WORD_RATE_1092_CYCLES		(0x1 << 13)
+#define CSCFG_WORD_RATE_532_CYCLES		(0x2 << 13)
+#define CSCFG_WORD_RATE_388_CYCLES		(0x3 << 13)
+#define CSCFG_WORD_RATE_324_CYCLES		(0x4 << 13)
+#define CSCFG_WORD_RATE_17444_CYCLES		(0x5 << 13)
+#define CSCFG_WORD_RATE_8724_CYCLES		(0x6 << 13)
+#define CSCFG_WORD_RATE_4364_CYCLES		(0x7 << 13)
+#define CSCFG_WORD_RATE_MASK			(0x7 << 13)
+#define CSCFG_LOW_POWER				(1 << 16)
+
+#define CS5529_CONFIG_DOUT(x)			(1 << (18 + x))
+#define CS5529_CONFIG_AOUT(x)			(1 << (22 + x))
+
+/* cs5529 command bits */
+#define CSCMD_POWER_SAVE			_bit0
+#define CSCMD_REGISTER_SELECT_MASK		0xe
+#define CSCMD_OFFSET_REGISTER			0x0
+#define CSCMD_GAIN_REGISTER			_bit1
+#define CSCMD_CONFIG_REGISTER			_bit2
+#define CSCMD_READ				_bit4
+#define CSCMD_CONTINUOUS_CONVERSIONS		_bit5
+#define CSCMD_SINGLE_CONVERSION			_bit6
+#define CSCMD_COMMAND				_bit7
+
+/* cs5529 status bits */
+#define CSS_ADC_BUSY				_bit0
+#define CSS_OSC_DETECT				_bit1 /* indicates adc error */
+#define CSS_OVERRANGE				_bit3
+
+#define SerDacLd(x)			(0x08<<(x))
+
+/*
+	This is stuff unique to the NI E series drivers,
+	but I thought I'd put it here anyway.
+*/
+
+enum
+{
+	ai_gain_16 = 0,
+	ai_gain_8,
+	ai_gain_14,
+	ai_gain_4,
+	ai_gain_611x,
+	ai_gain_622x,
+	ai_gain_628x,
+	ai_gain_6143
+};
+enum caldac_enum
+{
+	caldac_none=0,
+	mb88341,
+	dac8800,
+	dac8043,
+	ad8522,
+	ad8804,
+	ad8842,
+	ad8804_debug
+};
+enum ni_reg_type
+{
+	ni_reg_normal = 0x0,
+	ni_reg_611x = 0x1,
+	ni_reg_6711 = 0x2,
+	ni_reg_6713 = 0x4,
+	ni_reg_67xx_mask = 0x6,
+	ni_reg_6xxx_mask = 0x7,
+	ni_reg_622x = 0x8,
+	ni_reg_625x = 0x10,
+	ni_reg_628x = 0x18,
+	ni_reg_m_series_mask = 0x18,
+	ni_reg_6143 = 0x20
+};
+
+/* M Series registers offsets */
+#define M_Offset_CDIO_DMA_Select		0x7 /* write */
+#define M_Offset_SCXI_Status			0x7 /* read */
+#define M_Offset_AI_AO_Select			0x9 /* write, same offset as e-series */
+#define M_Offset_SCXI_Serial_Data_In		0x9 /* read */
+#define M_Offset_G0_G1_Select			0xb /* write, same offset as e-series */
+#define M_Offset_Misc_Command			0xf
+#define M_Offset_SCXI_Serial_Data_Out		0x11
+#define M_Offset_SCXI_Control			0x13
+#define M_Offset_SCXI_Output_Enable		0x15
+#define M_Offset_AI_FIFO_Data			0x1c
+#define M_Offset_Static_Digital_Output		0x24 /* write */
+#define M_Offset_Static_Digital_Input		0x24 /* read */
+#define M_Offset_DIO_Direction			0x28
+#define M_Offset_Cal_PWM			0x40
+#define M_Offset_AI_Config_FIFO_Data		0x5e
+#define M_Offset_Interrupt_C_Enable		0x88 /* write */
+#define M_Offset_Interrupt_C_Status		0x88 /* read */
+#define M_Offset_Analog_Trigger_Control		0x8c
+#define M_Offset_AO_Serial_Interrupt_Enable	0xa0
+#define M_Offset_AO_Serial_Interrupt_Ack	0xa1 /* write */
+#define M_Offset_AO_Serial_Interrupt_Status	0xa1 /* read */
+#define M_Offset_AO_Calibration			0xa3
+#define M_Offset_AO_FIFO_Data			0xa4
+#define M_Offset_PFI_Filter			0xb0
+#define M_Offset_RTSI_Filter			0xb4
+#define M_Offset_SCXI_Legacy_Compatibility	0xbc
+#define M_Offset_Interrupt_A_Ack		0x104 /* write */
+#define M_Offset_AI_Status_1			0x104 /* read */
+#define M_Offset_Interrupt_B_Ack		0x106 /* write */
+#define M_Offset_AO_Status_1			0x106 /* read */
+#define M_Offset_AI_Command_2			0x108 /* write */
+#define M_Offset_G01_Status			0x108 /* read */
+#define M_Offset_AO_Command_2			0x10a
+#define M_Offset_AO_Status_2			0x10c /* read */
+#define M_Offset_G0_Command			0x10c /* write */
+#define M_Offset_G1_Command			0x10e /* write */
+#define M_Offset_G0_HW_Save			0x110
+#define M_Offset_G0_HW_Save_High		0x110
+#define M_Offset_AI_Command_1			0x110
+#define M_Offset_G0_HW_Save_Low			0x112
+#define M_Offset_AO_Command_1			0x112
+#define M_Offset_G1_HW_Save			0x114
+#define M_Offset_G1_HW_Save_High		0x114
+#define M_Offset_G1_HW_Save_Low			0x116
+#define M_Offset_AI_Mode_1			0x118
+#define M_Offset_G0_Save			0x118
+#define M_Offset_G0_Save_High			0x118
+#define M_Offset_AI_Mode_2			0x11a
+#define M_Offset_G0_Save_Low			0x11a
+#define M_Offset_AI_SI_Load_A			0x11c
+#define M_Offset_G1_Save			0x11c
+#define M_Offset_G1_Save_High			0x11c
+#define M_Offset_G1_Save_Low			0x11e
+#define M_Offset_AI_SI_Load_B			0x120 /* write */
+#define M_Offset_AO_UI_Save			0x120 /* read */
+#define M_Offset_AI_SC_Load_A			0x124 /* write */
+#define M_Offset_AO_BC_Save			0x124 /* read */
+#define M_Offset_AI_SC_Load_B			0x128 /* write */
+#define M_Offset_AO_UC_Save			0x128 /* read */
+#define M_Offset_AI_SI2_Load_A			0x12c
+#define M_Offset_AI_SI2_Load_B			0x130
+#define M_Offset_G0_Mode			0x134
+#define M_Offset_G1_Mode			0x136 /* write */
+#define M_Offset_Joint_Status_1			0x136 /* read */
+#define M_Offset_G0_Load_A			0x138
+#define M_Offset_Joint_Status_2			0x13a
+#define M_Offset_G0_Load_B			0x13c
+#define M_Offset_G1_Load_A			0x140
+#define M_Offset_G1_Load_B			0x144
+#define M_Offset_G0_Input_Select		0x148
+#define M_Offset_G1_Input_Select		0x14a
+#define M_Offset_AO_Mode_1			0x14c
+#define M_Offset_AO_Mode_2			0x14e
+#define M_Offset_AO_UI_Load_A			0x150
+#define M_Offset_AO_UI_Load_B			0x154
+#define M_Offset_AO_BC_Load_A			0x158
+#define M_Offset_AO_BC_Load_B			0x15c
+#define M_Offset_AO_UC_Load_A			0x160
+#define M_Offset_AO_UC_Load_B			0x164
+#define M_Offset_Clock_and_FOUT			0x170
+#define M_Offset_IO_Bidirection_Pin		0x172
+#define M_Offset_RTSI_Trig_Direction		0x174
+#define M_Offset_Interrupt_Control		0x176
+#define M_Offset_AI_Output_Control		0x178
+#define M_Offset_Analog_Trigger_Etc		0x17a
+#define M_Offset_AI_START_STOP_Select		0x17c
+#define M_Offset_AI_Trigger_Select		0x17e
+#define M_Offset_AI_SI_Save			0x180 /* read */
+#define M_Offset_AI_DIV_Load_A			0x180 /* write */
+#define M_Offset_AI_SC_Save			0x184 /* read */
+#define M_Offset_AO_Start_Select		0x184 /* write */
+#define M_Offset_AO_Trigger_Select		0x186
+#define M_Offset_AO_Mode_3			0x18c
+#define M_Offset_G0_Autoincrement		0x188
+#define M_Offset_G1_Autoincrement		0x18a
+#define M_Offset_Joint_Reset			0x190
+#define M_Offset_Interrupt_A_Enable		0x192
+#define M_Offset_Interrupt_B_Enable		0x196
+#define M_Offset_AI_Personal			0x19a
+#define M_Offset_AO_Personal			0x19c
+#define M_Offset_RTSI_Trig_A_Output		0x19e
+#define M_Offset_RTSI_Trig_B_Output		0x1a0
+#define M_Offset_RTSI_Shared_MUX		0x1a2
+#define M_Offset_AO_Output_Control		0x1ac
+#define M_Offset_AI_Mode_3			0x1ae
+#define M_Offset_Configuration_Memory_Clear	0x1a4
+#define M_Offset_AI_FIFO_Clear			0x1a6
+#define M_Offset_AO_FIFO_Clear			0x1a8
+#define M_Offset_G0_Counting_Mode		0x1b0
+#define M_Offset_G1_Counting_Mode		0x1b2
+#define M_Offset_G0_Second_Gate			0x1b4
+#define M_Offset_G1_Second_Gate			0x1b6
+#define M_Offset_G0_DMA_Config			0x1b8 /* write */
+#define M_Offset_G0_DMA_Status			0x1b8 /* read */
+#define M_Offset_G1_DMA_Config			0x1ba /* write */
+#define M_Offset_G1_DMA_Status			0x1ba /* read */
+#define M_Offset_G0_MSeries_ABZ			0x1c0
+#define M_Offset_G1_MSeries_ABZ			0x1c2
+#define M_Offset_Clock_and_Fout2		0x1c4
+#define M_Offset_PLL_Control			0x1c6
+#define M_Offset_PLL_Status			0x1c8
+#define M_Offset_PFI_Output_Select_1		0x1d0
+#define M_Offset_PFI_Output_Select_2		0x1d2
+#define M_Offset_PFI_Output_Select_3		0x1d4
+#define M_Offset_PFI_Output_Select_4		0x1d6
+#define M_Offset_PFI_Output_Select_5		0x1d8
+#define M_Offset_PFI_Output_Select_6		0x1da
+#define M_Offset_PFI_DI				0x1dc
+#define M_Offset_PFI_DO				0x1de
+#define M_Offset_AI_Config_FIFO_Bypass		0x218
+#define M_Offset_SCXI_DIO_Enable		0x21c
+#define M_Offset_CDI_FIFO_Data			0x220 /* read */
+#define M_Offset_CDO_FIFO_Data			0x220 /* write */
+#define M_Offset_CDIO_Status			0x224 /* read */
+#define M_Offset_CDIO_Command			0x224 /* write */
+#define M_Offset_CDI_Mode			0x228
+#define M_Offset_CDO_Mode			0x22c
+#define M_Offset_CDI_Mask_Enable		0x230
+#define M_Offset_CDO_Mask_Enable		0x234
+#define M_Offset_AO_Waveform_Order(x)		(0xc2 + 0x4 * x)
+#define M_Offset_AO_Config_Bank(x)		(0xc3 + 0x4 * x)
+#define M_Offset_DAC_Direct_Data(x)		(0xc0 + 0x4 * x)
+#define M_Offset_Gen_PWM(x)			(0x44 + 0x2 * x)
+
+static inline int M_Offset_Static_AI_Control(int i)
+{
+	int offset[] =
+	{
+		0x64,
+		0x261,
+		0x262,
+		0x263,
+	};
+	if(((unsigned)i) >= sizeof(offset) / sizeof(offset[0]))
+	{
+		rtdm_printk("%s: invalid channel=%i\n", __FUNCTION__, i);
+		return offset[0];
+	}
+	return offset[i];
+};
+static inline int M_Offset_AO_Reference_Attenuation(int channel)
+{
+	int offset[] =
+	{
+		0x264,
+		0x265,
+		0x266,
+		0x267
+	};
+	if(((unsigned)channel) >= sizeof(offset) / sizeof(offset[0]))
+	{
+		rtdm_printk("%s: invalid channel=%i\n", __FUNCTION__, channel);
+		return offset[0];
+	}
+	return offset[channel];
+};
+static inline unsigned M_Offset_PFI_Output_Select(unsigned n)
+{
+	if(n < 1 || n > NUM_PFI_OUTPUT_SELECT_REGS)
+	{
+		rtdm_printk("%s: invalid pfi output select register=%i\n", __FUNCTION__, n);
+		return M_Offset_PFI_Output_Select_1;
+	}
+	return M_Offset_PFI_Output_Select_1 + (n - 1) * 2;
+}
+
+#define MSeries_AI_Config_Channel_Type_Mask			(0x7 << 6)
+#define MSeries_AI_Config_Channel_Type_Calibration_Bits		0x0
+#define MSeries_AI_Config_Channel_Type_Differential_Bits	(0x1 << 6)
+#define MSeries_AI_Config_Channel_Type_Common_Ref_Bits		(0x2 << 6)
+#define MSeries_AI_Config_Channel_Type_Ground_Ref_Bits		(0x3 << 6)
+#define MSeries_AI_Config_Channel_Type_Aux_Bits			(0x5 << 6)
+#define MSeries_AI_Config_Channel_Type_Ghost_Bits		(0x7 << 6)
+#define MSeries_AI_Config_Polarity_Bit				0x1000 /* 0 for 2's complement encoding */
+#define MSeries_AI_Config_Dither_Bit				0x2000
+#define MSeries_AI_Config_Last_Channel_Bit			0x4000
+#define MSeries_AI_Config_Channel_Bits(x)			(x & 0xf)
+#define MSeries_AI_Config_Gain_Bits(x)				((x & 0x7) << 9)
+
+static inline
+unsigned int MSeries_AI_Config_Bank_Bits(unsigned int reg_type,
+					 unsigned int channel)
+{
+	unsigned int bits = channel & 0x30;
+	if (reg_type == ni_reg_622x) {
+		if (channel & 0x40)
+			bits |= 0x400;
+	}
+	return bits;
+}
+
+#define MSeries_PLL_In_Source_Select_RTSI0_Bits			0xb
+#define MSeries_PLL_In_Source_Select_Star_Trigger_Bits		0x14
+#define MSeries_PLL_In_Source_Select_RTSI7_Bits			0x1b
+#define MSeries_PLL_In_Source_Select_PXI_Clock10		0x1d
+#define MSeries_PLL_In_Source_Select_Mask			0x1f
+#define MSeries_Timebase1_Select_Bit				0x20 /* use PLL for timebase 1 */
+#define MSeries_Timebase3_Select_Bit				0x40 /* use PLL for timebase 3 */
+/* Use 10MHz instead of 20MHz for RTSI clock frequency.  Appears
+   to have no effect, at least on pxi-6281, which always uses
+   20MHz rtsi clock frequency */
+#define MSeries_RTSI_10MHz_Bit					0x80
+
+static inline
+unsigned int MSeries_PLL_In_Source_Select_RTSI_Bits(unsigned int RTSI_channel)
+{
+	if(RTSI_channel > 7)
+	{
+		rtdm_printk("%s: bug, invalid RTSI_channel=%i\n", __FUNCTION__, RTSI_channel);
+		return 0;
+	}
+	if(RTSI_channel == 7) return MSeries_PLL_In_Source_Select_RTSI7_Bits;
+	else return MSeries_PLL_In_Source_Select_RTSI0_Bits + RTSI_channel;
+}
+
+#define MSeries_PLL_Enable_Bit					0x1000
+#define MSeries_PLL_VCO_Mode_200_325MHz_Bits			0x0
+#define MSeries_PLL_VCO_Mode_175_225MHz_Bits			0x2000
+#define MSeries_PLL_VCO_Mode_100_225MHz_Bits			0x4000
+#define MSeries_PLL_VCO_Mode_75_150MHz_Bits			0x6000
+
+static inline
+unsigned int MSeries_PLL_Divisor_Bits(unsigned int divisor)
+{
+	static const unsigned int max_divisor = 0x10;
+	if(divisor < 1 || divisor > max_divisor)
+	{
+		rtdm_printk("%s: bug, invalid divisor=%i\n", __FUNCTION__, divisor);
+		return 0;
+	}
+	return (divisor & 0xf) << 8;
+}
+static inline
+unsigned int MSeries_PLL_Multiplier_Bits(unsigned int multiplier)
+{
+	static const unsigned int max_multiplier = 0x100;
+	if(multiplier < 1 || multiplier > max_multiplier)
+	{
+		rtdm_printk("%s: bug, invalid multiplier=%i\n", __FUNCTION__, multiplier);
+		return 0;
+	}
+	return multiplier & 0xff;
+}
+
+#define MSeries_PLL_Locked_Bit				0x1
+
+#define MSeries_AI_Bypass_Channel_Mask			0x7
+#define MSeries_AI_Bypass_Bank_Mask			0x78
+#define MSeries_AI_Bypass_Cal_Sel_Pos_Mask		0x380
+#define MSeries_AI_Bypass_Cal_Sel_Neg_Mask		0x1c00
+#define MSeries_AI_Bypass_Mode_Mux_Mask			0x6000
+#define MSeries_AO_Bypass_AO_Cal_Sel_Mask		0x38000
+#define MSeries_AI_Bypass_Gain_Mask			0x1c0000
+#define MSeries_AI_Bypass_Dither_Bit			0x200000
+#define MSeries_AI_Bypass_Polarity_Bit			0x400000 /* 0 for 2's complement encoding */
+#define MSeries_AI_Bypass_Config_FIFO_Bit		0x80000000
+#define MSeries_AI_Bypass_Cal_Sel_Pos_Bits(x)		((x << 7) & \
+							 MSeries_AI_Bypass_Cal_Sel_Pos_Mask)
+#define MSeries_AI_Bypass_Cal_Sel_Neg_Bits(x)		((x << 10) & \
+							 MSeries_AI_Bypass_Cal_Sel_Pos_Mask)
+#define MSeries_AI_Bypass_Gain_Bits(x)			((x << 18) & \
+							 MSeries_AI_Bypass_Gain_Mask)
+
+#define MSeries_AO_DAC_Offset_Select_Mask		0x7
+#define MSeries_AO_DAC_Offset_0V_Bits			0x0
+#define MSeries_AO_DAC_Offset_5V_Bits			0x1
+#define MSeries_AO_DAC_Reference_Mask			0x38
+#define MSeries_AO_DAC_Reference_10V_Internal_Bits	0x0
+#define MSeries_AO_DAC_Reference_5V_Internal_Bits	0x8
+#define MSeries_AO_Update_Timed_Bit			0x40
+#define MSeries_AO_Bipolar_Bit				0x80 /* turns on 2's complement encoding */
+
+#define MSeries_Attenuate_x5_Bit			0x1
+
+#define MSeries_Cal_PWM_High_Time_Bits(x)		((x << 16) & 0xffff0000)
+#define MSeries_Cal_PWM_Low_Time_Bits(x)		(x & 0xffff)
+
+#define MSeries_PFI_Output_Select_Mask(x)		(0x1f << (x % 3) * 5)
+#define MSeries_PFI_Output_Select_Bits(x, y)		((y & 0x1f) << ((x % 3) * 5))
+// inverse to MSeries_PFI_Output_Select_Bits
+#define MSeries_PFI_Output_Select_Source(x, y)		((y >> ((x % 3) * 5)) & 0x1f)
+
+#define Gi_DMA_BankSW_Error_Bit				0x10
+#define Gi_DMA_Reset_Bit				0x8
+#define Gi_DMA_Int_Enable_Bit				0x4
+#define Gi_DMA_Write_Bit				0x2
+#define Gi_DMA_Enable_Bit				0x1
+
+#define MSeries_PFI_Filter_Select_Mask(x)		(0x3 << (x * 2))
+#define MSeries_PFI_Filter_Select_Bits(x, y)		((y << (x * 2)) & \
+							 MSeries_PFI_Filter_Select_Mask(x))
+
+/* CDIO DMA select bits */
+#define CDI_DMA_Select_Shift	0
+#define CDI_DMA_Select_Mask	0xf
+#define CDO_DMA_Select_Shift	4
+#define CDO_DMA_Select_Mask	0xf << CDO_DMA_Select_Shift
+
+/* CDIO status bits */
+#define CDO_FIFO_Empty_Bit	0x1
+#define CDO_FIFO_Full_Bit	0x2
+#define CDO_FIFO_Request_Bit	0x4
+#define CDO_Overrun_Bit		0x8
+#define CDO_Underflow_Bit	0x10
+#define CDI_FIFO_Empty_Bit	0x10000
+#define CDI_FIFO_Full_Bit	0x20000
+#define CDI_FIFO_Request_Bit	0x40000
+#define CDI_Overrun_Bit		0x80000
+#define CDI_Overflow_Bit	0x100000
+
+/* CDIO command bits */
+#define CDO_Disarm_Bit					0x1
+#define CDO_Arm_Bit					0x2
+#define CDI_Disarm_Bit					0x4
+#define CDI_Arm_Bit					0x8
+#define CDO_Reset_Bit					0x10
+#define CDI_Reset_Bit					0x20
+#define CDO_Error_Interrupt_Enable_Set_Bit		0x40
+#define CDO_Error_Interrupt_Enable_Clear_Bit		0x80
+#define CDI_Error_Interrupt_Enable_Set_Bit		0x100
+#define CDI_Error_Interrupt_Enable_Clear_Bit		0x200
+#define CDO_FIFO_Request_Interrupt_Enable_Set_Bit	0x400
+#define CDO_FIFO_Request_Interrupt_Enable_Clear_Bit	0x800
+#define CDI_FIFO_Request_Interrupt_Enable_Set_Bit	0x1000
+#define CDI_FIFO_Request_Interrupt_Enable_Clear_Bit	0x2000
+#define CDO_Error_Interrupt_Confirm_Bit			0x4000
+#define CDI_Error_Interrupt_Confirm_Bit			0x8000
+#define CDO_Empty_FIFO_Interrupt_Enable_Set_Bit		0x10000
+#define CDO_Empty_FIFO_Interrupt_Enable_Clear_Bit	0x20000
+#define CDO_SW_Update_Bit				0x80000
+#define CDI_SW_Update_Bit				0x100000
+
+/* CDIO mode bits */
+#define CDI_Sample_Source_Select_Mask	0x3f
+#define CDI_Halt_On_Error_Bit		0x200
+/* sample clock on falling edge */
+#define CDI_Polarity_Bit		0x400
+/* set for half full mode, clear for not empty mode */
+#define CDI_FIFO_Mode_Bit		0x800
+/* data lanes specify which dio channels map to byte or word accesses
+   to the dio fifos */
+#define CDI_Data_Lane_Mask		0x3000
+#define CDI_Data_Lane_0_15_Bits		0x0
+#define CDI_Data_Lane_16_31_Bits	0x1000
+#define CDI_Data_Lane_0_7_Bits		0x0
+#define CDI_Data_Lane_8_15_Bits		0x1000
+#define CDI_Data_Lane_16_23_Bits	0x2000
+#define CDI_Data_Lane_24_31_Bits	0x3000
+
+/* CDO mode bits */
+#define CDO_Sample_Source_Select_Mask	0x3f
+#define CDO_Retransmit_Bit		0x100
+#define CDO_Halt_On_Error_Bit		0x200
+/* sample clock on falling edge */
+#define CDO_Polarity_Bit		0x400
+/* set for half full mode, clear for not full mode */
+#define CDO_FIFO_Mode_Bit		0x800
+/* data lanes specify which dio channels map to byte or word accesses
+   to the dio fifos */
+#define CDO_Data_Lane_Mask		0x3000
+#define CDO_Data_Lane_0_15_Bits		0x0
+#define CDO_Data_Lane_16_31_Bits	0x1000
+#define CDO_Data_Lane_0_7_Bits		0x0
+#define CDO_Data_Lane_8_15_Bits		0x1000
+#define CDO_Data_Lane_16_23_Bits	0x2000
+#define CDO_Data_Lane_24_31_Bits	0x3000
+
+/* Interrupt C bits */
+#define Interrupt_Group_C_Enable_Bit	0x1
+#define Interrupt_Group_C_Status_Bit	0x1
+
+#define M_SERIES_EEPROM_SIZE 1024
+
+typedef struct ni_board_struct{
+	unsigned short device_id;
+	int isapnp_id;
+	char *name;
+
+	int n_adchan;
+	int adbits;
+
+	int ai_fifo_depth;
+	unsigned int alwaysdither : 1;
+	int gainlkup;
+	int ai_speed;
+
+	int n_aochan;
+	int aobits;
+	struct a4l_rngdesc *ao_range_table;
+	int ao_fifo_depth;
+
+	unsigned ao_speed;
+
+	unsigned num_p0_dio_channels;
+
+	int reg_type;
+	unsigned int ao_unipolar : 1;
+	unsigned int has_8255 : 1;
+	unsigned int has_analog_trig : 1;
+
+	enum caldac_enum caldac[3];
+} ni_board;
+
+#define n_ni_boards  (sizeof(ni_boards)/sizeof(ni_board))
+
+#define MAX_N_CALDACS 34
+#define MAX_N_AO_CHAN 8
+#define NUM_GPCT 2
+
+#define NI_PRIVATE_COMMON					\
+	uint16_t (*stc_readw)(struct a4l_device *dev, int register);	\
+	uint32_t (*stc_readl)(struct a4l_device *dev, int register);	\
+	void (*stc_writew)(struct a4l_device *dev, uint16_t value, int register);	\
+	void (*stc_writel)(struct a4l_device *dev, uint32_t value, int register);	\
+	\
+	int dio_state;						\
+	int pfi_state;						\
+	int io_bits;						\
+	unsigned short dio_output;				\
+	unsigned short dio_control;				\
+	int ao0p,ao1p;						\
+	int lastchan;						\
+	int last_do;						\
+	int rt_irq;						\
+	int irq_polarity;					\
+	int irq_pin;						\
+	int aimode;						\
+	int ai_continuous;					\
+	int blocksize;						\
+	int n_left;						\
+	unsigned int ai_calib_source;				\
+	unsigned int ai_calib_source_enabled;			\
+	rtdm_lock_t window_lock; \
+	rtdm_lock_t soft_reg_copy_lock; \
+	rtdm_lock_t mite_channel_lock; \
+								\
+	int changain_state;					\
+	unsigned int changain_spec;				\
+								\
+	unsigned int caldac_maxdata_list[MAX_N_CALDACS];	\
+	unsigned short ao[MAX_N_AO_CHAN];					\
+	unsigned short caldacs[MAX_N_CALDACS];				\
+								\
+	unsigned short ai_cmd2;	\
+								\
+	unsigned short ao_conf[MAX_N_AO_CHAN];				\
+	unsigned short ao_mode1;				\
+	unsigned short ao_mode2;				\
+	unsigned short ao_mode3;				\
+	unsigned short ao_cmd1;					\
+	unsigned short ao_cmd2;					\
+	unsigned short ao_cmd3;					\
+	unsigned short ao_trigger_select;			\
+								\
+	struct ni_gpct_device *counter_dev;	\
+	unsigned short an_trig_etc_reg;				\
+								\
+	unsigned ai_offset[512];				\
+								\
+	unsigned long serial_interval_ns;                       \
+	unsigned char serial_hw_mode;                           \
+	unsigned short clock_and_fout;				\
+	unsigned short clock_and_fout2;				\
+								\
+	unsigned short int_a_enable_reg;			\
+	unsigned short int_b_enable_reg;			\
+	unsigned short io_bidirection_pin_reg;			\
+	unsigned short rtsi_trig_direction_reg;			\
+	unsigned short rtsi_trig_a_output_reg; \
+	unsigned short rtsi_trig_b_output_reg; \
+	unsigned short pfi_output_select_reg[NUM_PFI_OUTPUT_SELECT_REGS]; \
+	unsigned short ai_ao_select_reg; \
+	unsigned short g0_g1_select_reg; \
+	unsigned short cdio_dma_select_reg; \
+	\
+	unsigned clock_ns; \
+	unsigned clock_source; \
+	\
+	unsigned short atrig_mode;				\
+	unsigned short atrig_high;				\
+	unsigned short atrig_low;				\
+	\
+	unsigned short pwm_up_count;	\
+	unsigned short pwm_down_count;	\
+	\
+	sampl_t ai_fifo_buffer[0x2000];				\
+	uint8_t eeprom_buffer[M_SERIES_EEPROM_SIZE]; \
+	\
+	struct mite_struct *mite; \
+	struct mite_channel *ai_mite_chan; \
+	struct mite_channel *ao_mite_chan;\
+	struct mite_channel *cdo_mite_chan;\
+	struct mite_dma_descriptor_ring *ai_mite_ring; \
+	struct mite_dma_descriptor_ring *ao_mite_ring; \
+	struct mite_dma_descriptor_ring *cdo_mite_ring; \
+	struct mite_dma_descriptor_ring *gpct_mite_ring[NUM_GPCT]; \
+	subd_8255_t subd_8255
+
+
+typedef struct {
+	ni_board *board_ptr;
+	NI_PRIVATE_COMMON;
+} ni_private;
+
+#define devpriv ((ni_private *)dev->priv)
+#define boardtype (*(ni_board *)devpriv->board_ptr)
+
+/* How we access registers */
+
+#define ni_writel(a,b)	(writel((a), devpriv->mite->daq_io_addr + (b)))
+#define ni_readl(a)	(readl(devpriv->mite->daq_io_addr + (a)))
+#define ni_writew(a,b)	(writew((a), devpriv->mite->daq_io_addr + (b)))
+#define ni_readw(a)	(readw(devpriv->mite->daq_io_addr + (a)))
+#define ni_writeb(a,b)	(writeb((a), devpriv->mite->daq_io_addr + (b)))
+#define ni_readb(a)	(readb(devpriv->mite->daq_io_addr + (a)))
+
+/* INSN_CONFIG_SET_CLOCK_SRC argument for NI cards */
+#define NI_FREQ_OUT_TIMEBASE_1_DIV_2_CLOCK_SRC	0 /* 10 MHz */
+#define NI_FREQ_OUT_TIMEBASE_2_CLOCK_SRC	1 /* 100 KHz */
+
+#endif /* _ANALOGY_NI_STC_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h
new file mode 100644
index 0000000..deef652
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/ni_tio.h
@@ -0,0 +1,1192 @@
+/*
+ * Hardware driver for NI general purpose counter
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __ANALOGY_NI_TIO_H__
+#define __ANALOGY_NI_TIO_H__
+
+#include <rtdm/analogy/device.h>
+
+#ifdef CONFIG_PCI
+#include "mite.h"
+#endif
+
+enum ni_gpct_register {
+	NITIO_G0_Autoincrement_Reg,
+	NITIO_G1_Autoincrement_Reg,
+	NITIO_G2_Autoincrement_Reg,
+	NITIO_G3_Autoincrement_Reg,
+	NITIO_G0_Command_Reg,
+	NITIO_G1_Command_Reg,
+	NITIO_G2_Command_Reg,
+	NITIO_G3_Command_Reg,
+	NITIO_G0_HW_Save_Reg,
+	NITIO_G1_HW_Save_Reg,
+	NITIO_G2_HW_Save_Reg,
+	NITIO_G3_HW_Save_Reg,
+	NITIO_G0_SW_Save_Reg,
+	NITIO_G1_SW_Save_Reg,
+	NITIO_G2_SW_Save_Reg,
+	NITIO_G3_SW_Save_Reg,
+	NITIO_G0_Mode_Reg,
+	NITIO_G1_Mode_Reg,
+	NITIO_G2_Mode_Reg,
+	NITIO_G3_Mode_Reg,
+	NITIO_G0_LoadA_Reg,
+	NITIO_G1_LoadA_Reg,
+	NITIO_G2_LoadA_Reg,
+	NITIO_G3_LoadA_Reg,
+	NITIO_G0_LoadB_Reg,
+	NITIO_G1_LoadB_Reg,
+	NITIO_G2_LoadB_Reg,
+	NITIO_G3_LoadB_Reg,
+	NITIO_G0_Input_Select_Reg,
+	NITIO_G1_Input_Select_Reg,
+	NITIO_G2_Input_Select_Reg,
+	NITIO_G3_Input_Select_Reg,
+	NITIO_G0_Counting_Mode_Reg,
+	NITIO_G1_Counting_Mode_Reg,
+	NITIO_G2_Counting_Mode_Reg,
+	NITIO_G3_Counting_Mode_Reg,
+	NITIO_G0_Second_Gate_Reg,
+	NITIO_G1_Second_Gate_Reg,
+	NITIO_G2_Second_Gate_Reg,
+	NITIO_G3_Second_Gate_Reg,
+	NITIO_G01_Status_Reg,
+	NITIO_G23_Status_Reg,
+	NITIO_G01_Joint_Reset_Reg,
+	NITIO_G23_Joint_Reset_Reg,
+	NITIO_G01_Joint_Status1_Reg,
+	NITIO_G23_Joint_Status1_Reg,
+	NITIO_G01_Joint_Status2_Reg,
+	NITIO_G23_Joint_Status2_Reg,
+	NITIO_G0_DMA_Config_Reg,
+	NITIO_G1_DMA_Config_Reg,
+	NITIO_G2_DMA_Config_Reg,
+	NITIO_G3_DMA_Config_Reg,
+	NITIO_G0_DMA_Status_Reg,
+	NITIO_G1_DMA_Status_Reg,
+	NITIO_G2_DMA_Status_Reg,
+	NITIO_G3_DMA_Status_Reg,
+	NITIO_G0_ABZ_Reg,
+	NITIO_G1_ABZ_Reg,
+	NITIO_G0_Interrupt_Acknowledge_Reg,
+	NITIO_G1_Interrupt_Acknowledge_Reg,
+	NITIO_G2_Interrupt_Acknowledge_Reg,
+	NITIO_G3_Interrupt_Acknowledge_Reg,
+	NITIO_G0_Status_Reg,
+	NITIO_G1_Status_Reg,
+	NITIO_G2_Status_Reg,
+	NITIO_G3_Status_Reg,
+	NITIO_G0_Interrupt_Enable_Reg,
+	NITIO_G1_Interrupt_Enable_Reg,
+	NITIO_G2_Interrupt_Enable_Reg,
+	NITIO_G3_Interrupt_Enable_Reg,
+	NITIO_Num_Registers,
+};
+
+static inline enum ni_gpct_register NITIO_Gi_Autoincrement_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Autoincrement_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Autoincrement_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Autoincrement_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Autoincrement_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Command_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Command_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Command_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Command_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Command_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Counting_Mode_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Counting_Mode_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Counting_Mode_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Counting_Mode_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Counting_Mode_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Input_Select_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Input_Select_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Input_Select_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Input_Select_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Input_Select_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Joint_Reset_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Joint_Reset_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Joint_Reset_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Joint_Status1_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Joint_Status1_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Joint_Status1_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Joint_Status2_Reg(unsigned
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Joint_Status2_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Joint_Status2_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gxx_Status_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+	case 1:
+		return NITIO_G01_Status_Reg;
+		break;
+	case 2:
+	case 3:
+		return NITIO_G23_Status_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_LoadA_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_LoadA_Reg;
+		break;
+	case 1:
+		return NITIO_G1_LoadA_Reg;
+		break;
+	case 2:
+		return NITIO_G2_LoadA_Reg;
+		break;
+	case 3:
+		return NITIO_G3_LoadA_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_LoadB_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_LoadB_Reg;
+		break;
+	case 1:
+		return NITIO_G1_LoadB_Reg;
+		break;
+	case 2:
+		return NITIO_G2_LoadB_Reg;
+		break;
+	case 3:
+		return NITIO_G3_LoadB_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Mode_Reg(unsigned counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Mode_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Mode_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Mode_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Mode_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_SW_Save_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_SW_Save_Reg;
+		break;
+	case 1:
+		return NITIO_G1_SW_Save_Reg;
+		break;
+	case 2:
+		return NITIO_G2_SW_Save_Reg;
+		break;
+	case 3:
+		return NITIO_G3_SW_Save_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Second_Gate_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Second_Gate_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Second_Gate_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Second_Gate_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Second_Gate_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_DMA_Config_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_DMA_Config_Reg;
+		break;
+	case 1:
+		return NITIO_G1_DMA_Config_Reg;
+		break;
+	case 2:
+		return NITIO_G2_DMA_Config_Reg;
+		break;
+	case 3:
+		return NITIO_G3_DMA_Config_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_DMA_Status_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_DMA_Status_Reg;
+		break;
+	case 1:
+		return NITIO_G1_DMA_Status_Reg;
+		break;
+	case 2:
+		return NITIO_G2_DMA_Status_Reg;
+		break;
+	case 3:
+		return NITIO_G3_DMA_Status_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_ABZ_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_ABZ_Reg;
+		break;
+	case 1:
+		return NITIO_G1_ABZ_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Interrupt_Acknowledge_Reg(int
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Interrupt_Acknowledge_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Interrupt_Acknowledge_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Interrupt_Acknowledge_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Interrupt_Acknowledge_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Status_Reg(int counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Status_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Status_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Status_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Status_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline enum ni_gpct_register NITIO_Gi_Interrupt_Enable_Reg(int
+	counter_index)
+{
+	switch (counter_index) {
+	case 0:
+		return NITIO_G0_Interrupt_Enable_Reg;
+		break;
+	case 1:
+		return NITIO_G1_Interrupt_Enable_Reg;
+		break;
+	case 2:
+		return NITIO_G2_Interrupt_Enable_Reg;
+		break;
+	case 3:
+		return NITIO_G3_Interrupt_Enable_Reg;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+enum ni_gpct_variant {
+	ni_gpct_variant_e_series,
+	ni_gpct_variant_m_series,
+	ni_gpct_variant_660x
+};
+
+struct ni_gpct {
+	struct ni_gpct_device *counter_dev;
+	unsigned counter_index;
+	unsigned chip_index;
+	uint64_t clock_period_ps; /* clock period in picoseconds */
+	struct mite_channel *mite_chan;
+	rtdm_lock_t lock;
+};
+
+struct ni_gpct_device {
+	struct a4l_device *dev;
+	void (*write_register)(struct ni_gpct * counter,
+				unsigned int bits, enum ni_gpct_register reg);
+	unsigned (*read_register)(struct ni_gpct * counter,
+				   enum ni_gpct_register reg);
+	enum ni_gpct_variant variant;
+	struct ni_gpct **counters;
+	unsigned num_counters;
+	unsigned regs[NITIO_Num_Registers];
+	rtdm_lock_t regs_lock;
+};
+
+#define Gi_Auto_Increment_Mask		0xff
+#define Gi_Up_Down_Shift		5
+
+#define Gi_Arm_Bit			0x1
+#define Gi_Save_Trace_Bit		0x2
+#define Gi_Load_Bit			0x4
+#define Gi_Disarm_Bit			0x10
+#define Gi_Up_Down_Mask			(0x3 << Gi_Up_Down_Shift)
+#define Gi_Always_Down_Bits		(0x0 << Gi_Up_Down_Shift)
+#define Gi_Always_Up_Bits		(0x1 << Gi_Up_Down_Shift)
+#define Gi_Up_Down_Hardware_IO_Bits	(0x2 << Gi_Up_Down_Shift)
+#define Gi_Up_Down_Hardware_Gate_Bits	(0x3 << Gi_Up_Down_Shift)
+#define Gi_Write_Switch_Bit		0x80
+#define Gi_Synchronize_Gate_Bit		0x100
+#define Gi_Little_Big_Endian_Bit	0x200
+#define Gi_Bank_Switch_Start_Bit	0x400
+#define Gi_Bank_Switch_Mode_Bit		0x800
+#define Gi_Bank_Switch_Enable_Bit	0x1000
+#define Gi_Arm_Copy_Bit			0x2000
+#define Gi_Save_Trace_Copy_Bit		0x4000
+#define Gi_Disarm_Copy_Bit		0x8000
+
+#define Gi_Index_Phase_Bitshift	5
+#define Gi_HW_Arm_Select_Shift		8
+
+#define Gi_Counting_Mode_Mask		0x7
+#define Gi_Counting_Mode_Normal_Bits	0x0
+#define Gi_Counting_Mode_QuadratureX1_Bits 0x1
+#define Gi_Counting_Mode_QuadratureX2_Bits 0x2
+#define Gi_Counting_Mode_QuadratureX4_Bits 0x3
+#define Gi_Counting_Mode_Two_Pulse_Bits	0x4
+#define Gi_Counting_Mode_Sync_Source_Bits 0x6
+#define Gi_Index_Mode_Bit		0x10
+#define Gi_Index_Phase_Mask		(0x3 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_LowA_LowB	(0x0 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_LowA_HighB	(0x1 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_HighA_LowB	(0x2 << Gi_Index_Phase_Bitshift)
+#define Gi_Index_Phase_HighA_HighB	(0x3 << Gi_Index_Phase_Bitshift)
+
+/* From m-series example code,
+   not documented in 660x register level manual */
+#define Gi_HW_Arm_Enable_Bit		0x80
+/* From m-series example code,
+   not documented in 660x register level manual */
+#define Gi_660x_HW_Arm_Select_Mask	(0x7 << Gi_HW_Arm_Select_Shift)
+#define Gi_660x_Prescale_X8_Bit		0x1000
+#define Gi_M_Series_Prescale_X8_Bit	0x2000
+#define Gi_M_Series_HW_Arm_Select_Mask	(0x1f << Gi_HW_Arm_Select_Shift)
+/* Must be set for clocks over 40MHz,
+   which includes synchronous counting and quadrature modes */
+#define Gi_660x_Alternate_Sync_Bit	0x2000
+#define Gi_M_Series_Alternate_Sync_Bit	0x4000
+/* From m-series example code,
+   not documented in 660x register level manual */
+#define Gi_660x_Prescale_X2_Bit		0x4000
+#define Gi_M_Series_Prescale_X2_Bit	0x8000
+
+static inline unsigned int Gi_Alternate_Sync_Bit(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_Alternate_Sync_Bit;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_Alternate_Sync_Bit;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline unsigned int Gi_Prescale_X2_Bit(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_Prescale_X2_Bit;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_Prescale_X2_Bit;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline unsigned int Gi_Prescale_X8_Bit(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_Prescale_X8_Bit;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_Prescale_X8_Bit;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline unsigned int Gi_HW_Arm_Select_Mask(enum ni_gpct_variant variant)
+{
+	switch (variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+		return Gi_M_Series_HW_Arm_Select_Mask;
+		break;
+	case ni_gpct_variant_660x:
+		return Gi_660x_HW_Arm_Select_Mask;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+#define NI_660x_Timebase_1_Clock	0x0 /* 20MHz */
+#define NI_660x_Source_Pin_i_Clock	0x1
+#define NI_660x_Next_Gate_Clock		0xa
+#define NI_660x_Timebase_2_Clock	0x12 /* 100KHz */
+#define NI_660x_Next_TC_Clock		0x13
+#define NI_660x_Timebase_3_Clock	0x1e /* 80MHz */
+#define NI_660x_Logic_Low_Clock		0x1f
+
+#define ni_660x_max_rtsi_channel	6
+#define ni_660x_max_source_pin		7
+
+static inline unsigned int NI_660x_RTSI_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_rtsi_channel);
+	return (0xb + n);
+}
+
+static inline unsigned int NI_660x_Source_Pin_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_source_pin);
+	return (0x2 + n);
+}
+
+/* Clock sources for ni e and m series boards,
+   get bits with Gi_Source_Select_Bits() */
+#define NI_M_Series_Timebase_1_Clock	0x0 /* 20MHz */
+#define NI_M_Series_Timebase_2_Clock	0x12 /* 100KHz */
+#define NI_M_Series_Next_TC_Clock	0x13
+#define NI_M_Series_Next_Gate_Clock	0x14 /* when Gi_Src_SubSelect = 0 */
+#define NI_M_Series_PXI_Star_Trigger_Clock 0x14 /* when Gi_Src_SubSelect = 1 */
+#define NI_M_Series_PXI10_Clock		0x1d
+#define NI_M_Series_Timebase_3_Clock	0x1e /* 80MHz, when Gi_Src_SubSelect = 0 */
+#define NI_M_Series_Analog_Trigger_Out_Clock 0x1e /* when Gi_Src_SubSelect = 1 */
+#define NI_M_Series_Logic_Low_Clock	0x1f
+
+#define ni_m_series_max_pfi_channel	15
+#define ni_m_series_max_rtsi_channel	7
+
+static inline unsigned int NI_M_Series_PFI_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_pfi_channel);
+	if (n < 10)
+		return 1 + n;
+	else
+		return 0xb + n;
+}
+
+static inline unsigned int NI_M_Series_RTSI_Clock(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_rtsi_channel);
+	if (n == 7)
+		return 0x1b;
+	else
+		return 0xb + n;
+}
+
+#define NI_660x_Source_Pin_i_Gate_Select 0x0
+#define NI_660x_Gate_Pin_i_Gate_Select	0x1
+#define NI_660x_Next_SRC_Gate_Select	0xa
+#define NI_660x_Next_Out_Gate_Select	0x14
+#define NI_660x_Logic_Low_Gate_Select	0x1f
+#define ni_660x_max_gate_pin 7
+
+static inline unsigned int NI_660x_Gate_Pin_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_gate_pin);
+	return 0x2 + n;
+}
+
+static inline unsigned int NI_660x_RTSI_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_rtsi_channel);
+	return 0xb + n;
+}
+
+
+#define NI_M_Series_Timestamp_Mux_Gate_Select	0x0
+#define NI_M_Series_AI_START2_Gate_Select	0x12
+#define NI_M_Series_PXI_Star_Trigger_Gate_Select 0x13
+#define NI_M_Series_Next_Out_Gate_Select	0x14
+#define NI_M_Series_AI_START1_Gate_Select	0x1c
+#define NI_M_Series_Next_SRC_Gate_Select	0x1d
+#define NI_M_Series_Analog_Trigger_Out_Gate_Select 0x1e
+#define NI_M_Series_Logic_Low_Gate_Select	0x1f
+
+static inline unsigned int NI_M_Series_RTSI_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_rtsi_channel);
+	if (n == 7)
+		return 0x1b;
+	return 0xb + n;
+}
+
+static inline unsigned int NI_M_Series_PFI_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_m_series_max_pfi_channel);
+	if (n < 10)
+		return 1 + n;
+	return 0xb + n;
+}
+
+
+#define Gi_Source_Select_Shift 2
+#define Gi_Gate_Select_Shift 7
+
+#define Gi_Read_Acknowledges_Irq	0x1 /* not present on 660x */
+#define Gi_Write_Acknowledges_Irq	0x2 /* not present on 660x */
+#define Gi_Source_Select_Mask		0x7c
+#define Gi_Gate_Select_Mask		(0x1f << Gi_Gate_Select_Shift)
+#define Gi_Gate_Select_Load_Source_Bit	0x1000
+#define Gi_Or_Gate_Bit			0x2000
+#define Gi_Output_Polarity_Bit		0x4000 /* set to invert */
+#define Gi_Source_Polarity_Bit		0x8000 /* set to invert */
+
+#define Gi_Source_Select_Bits(x) ((x << Gi_Source_Select_Shift) & \
+				  Gi_Source_Select_Mask)
+#define Gi_Gate_Select_Bits(x) ((x << Gi_Gate_Select_Shift) & \
+				Gi_Gate_Select_Mask)
+
+#define Gi_Gating_Mode_Mask		0x3
+#define Gi_Gating_Disabled_Bits		0x0
+#define Gi_Level_Gating_Bits		0x1
+#define Gi_Rising_Edge_Gating_Bits	0x2
+#define Gi_Falling_Edge_Gating_Bits	0x3
+#define Gi_Gate_On_Both_Edges_Bit	0x4 /* used in conjunction with
+					       rising edge gating mode */
+#define Gi_Trigger_Mode_for_Edge_Gate_Mask 0x18
+#define Gi_Edge_Gate_Starts_Stops_Bits	0x0
+#define Gi_Edge_Gate_Stops_Starts_Bits	0x8
+#define Gi_Edge_Gate_Starts_Bits	0x10
+#define Gi_Edge_Gate_No_Starts_or_Stops_Bits 0x18
+#define Gi_Stop_Mode_Mask		0x60
+#define Gi_Stop_on_Gate_Bits		0x00
+#define Gi_Stop_on_Gate_or_TC_Bits	0x20
+#define Gi_Stop_on_Gate_or_Second_TC_Bits 0x40
+#define Gi_Load_Source_Select_Bit	0x80
+#define Gi_Output_Mode_Mask		0x300
+#define Gi_Output_TC_Pulse_Bits		0x100
+#define Gi_Output_TC_Toggle_Bits	0x200
+#define Gi_Output_TC_or_Gate_Toggle_Bits 0x300
+#define Gi_Counting_Once_Mask		0xc00
+#define Gi_No_Hardware_Disarm_Bits	0x000
+#define Gi_Disarm_at_TC_Bits		0x400
+#define Gi_Disarm_at_Gate_Bits		0x800
+#define Gi_Disarm_at_TC_or_Gate_Bits	0xc00
+#define Gi_Loading_On_TC_Bit		0x1000
+#define Gi_Gate_Polarity_Bit		0x2000
+#define Gi_Loading_On_Gate_Bit		0x4000
+#define Gi_Reload_Source_Switching_Bit	0x8000
+
+#define NI_660x_Source_Pin_i_Second_Gate_Select		0x0
+#define NI_660x_Up_Down_Pin_i_Second_Gate_Select	0x1
+#define NI_660x_Next_SRC_Second_Gate_Select		0xa
+#define NI_660x_Next_Out_Second_Gate_Select		0x14
+#define NI_660x_Selected_Gate_Second_Gate_Select	0x1e
+#define NI_660x_Logic_Low_Second_Gate_Select		0x1f
+
+#define ni_660x_max_up_down_pin		7
+
+static inline
+unsigned int NI_660x_Up_Down_Pin_Second_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_up_down_pin);
+	return 0x2 + n;
+}
+static inline
+unsigned int NI_660x_RTSI_Second_Gate_Select(unsigned int n)
+{
+	BUG_ON(n > ni_660x_max_rtsi_channel);
+	return 0xb + n;
+}
+
+#define Gi_Second_Gate_Select_Shift	7
+
+/*FIXME: m-series has a second gate subselect bit */
+/*FIXME: m-series second gate sources are undocumented (by NI)*/
+#define Gi_Second_Gate_Mode_Bit		0x1
+#define Gi_Second_Gate_Select_Mask	(0x1f << Gi_Second_Gate_Select_Shift)
+#define Gi_Second_Gate_Polarity_Bit	0x2000
+#define Gi_Second_Gate_Subselect_Bit	0x4000 /* m-series only */
+#define Gi_Source_Subselect_Bit		0x8000 /* m-series only */
+
+static inline
+unsigned int Gi_Second_Gate_Select_Bits(unsigned int second_gate_select)
+{
+	return (second_gate_select << Gi_Second_Gate_Select_Shift) &
+		Gi_Second_Gate_Select_Mask;
+}
+
+#define G0_Save_Bit		0x1
+#define G1_Save_Bit		0x2
+#define G0_Counting_Bit		0x4
+#define G1_Counting_Bit		0x8
+#define G0_Next_Load_Source_Bit	0x10
+#define G1_Next_Load_Source_Bit	0x20
+#define G0_Stale_Data_Bit	0x40
+#define G1_Stale_Data_Bit	0x80
+#define G0_Armed_Bit		0x100
+#define G1_Armed_Bit		0x200
+#define G0_No_Load_Between_Gates_Bit 0x400
+#define G1_No_Load_Between_Gates_Bit 0x800
+#define G0_TC_Error_Bit		0x1000
+#define G1_TC_Error_Bit		0x2000
+#define G0_Gate_Error_Bit	0x4000
+#define G1_Gate_Error_Bit	0x8000
+
+static inline unsigned int Gi_Counting_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Counting_Bit;
+	return G0_Counting_Bit;
+}
+
+static inline unsigned int Gi_Armed_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Armed_Bit;
+	return G0_Armed_Bit;
+}
+
+static inline unsigned int Gi_Next_Load_Source_Bit(unsigned counter_index)
+{
+	if (counter_index % 2)
+		return G1_Next_Load_Source_Bit;
+	return G0_Next_Load_Source_Bit;
+}
+
+static inline unsigned int Gi_Stale_Data_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Stale_Data_Bit;
+	return G0_Stale_Data_Bit;
+}
+
+static inline unsigned int Gi_TC_Error_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_TC_Error_Bit;
+	return G0_TC_Error_Bit;
+}
+
+static inline unsigned int Gi_Gate_Error_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Gate_Error_Bit;
+	return G0_Gate_Error_Bit;
+}
+
+/* Joint reset register bits */
+static inline unsigned Gi_Reset_Bit(unsigned int counter_index)
+{
+	return 0x1 << (2 + (counter_index % 2));
+}
+
+#define G0_Output_Bit		0x1
+#define G1_Output_Bit		0x2
+#define G0_HW_Save_Bit		0x1000
+#define G1_HW_Save_Bit		0x2000
+#define G0_Permanent_Stale_Bit	0x4000
+#define G1_Permanent_Stale_Bit	0x8000
+
+static inline unsigned int Gi_Permanent_Stale_Bit(unsigned
+	counter_index)
+{
+	if (counter_index % 2)
+		return G1_Permanent_Stale_Bit;
+	return G0_Permanent_Stale_Bit;
+}
+
+#define Gi_DMA_Enable_Bit	0x1
+#define Gi_DMA_Write_Bit	0x2
+#define Gi_DMA_Int_Bit		0x4
+
+#define Gi_DMA_Readbank_Bit	0x2000
+#define Gi_DRQ_Error_Bit	0x4000
+#define Gi_DRQ_Status_Bit	0x8000
+
+#define G0_Gate_Error_Confirm_Bit	0x20
+#define G0_TC_Error_Confirm_Bit		0x40
+
+#define G1_Gate_Error_Confirm_Bit	0x2
+#define G1_TC_Error_Confirm_Bit		0x4
+
+static inline unsigned int Gi_Gate_Error_Confirm_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_Gate_Error_Confirm_Bit;
+	return G0_Gate_Error_Confirm_Bit;
+}
+
+static inline unsigned int Gi_TC_Error_Confirm_Bit(unsigned int counter_index)
+{
+	if (counter_index % 2)
+		return G1_TC_Error_Confirm_Bit;
+	return G0_TC_Error_Confirm_Bit;
+}
+
+/* Bits that are the same in G0/G2 and G1/G3 interrupt acknowledge registers */
+#define Gi_TC_Interrupt_Ack_Bit		0x4000
+#define Gi_Gate_Interrupt_Ack_Bit	0x8000
+
+#define Gi_Gate_Interrupt_Bit	0x4
+#define Gi_TC_Bit		0x8
+#define Gi_Interrupt_Bit	0x8000
+
+#define G0_TC_Interrupt_Enable_Bit	0x40
+#define G0_Gate_Interrupt_Enable_Bit	0x100
+
+#define G1_TC_Interrupt_Enable_Bit	0x200
+#define G1_Gate_Interrupt_Enable_Bit	0x400
+
+static inline unsigned int Gi_Gate_Interrupt_Enable_Bit(unsigned int counter_index)
+{
+	unsigned int bit;
+
+	if (counter_index % 2) {
+		bit = G1_Gate_Interrupt_Enable_Bit;
+	} else {
+		bit = G0_Gate_Interrupt_Enable_Bit;
+	}
+	return bit;
+}
+
+#define counter_status_mask (A4L_COUNTER_ARMED | A4L_COUNTER_COUNTING)
+
+#define NI_USUAL_PFI_SELECT(x)	((x < 10) ? (0x1 + x) : (0xb + x))
+#define NI_USUAL_RTSI_SELECT(x)	((x < 7 ) ? (0xb + x) : (0x1b + x))
+
+/* Mode bits for NI general-purpose counters, set with
+   INSN_CONFIG_SET_COUNTER_MODE */
+#define NI_GPCT_COUNTING_MODE_SHIFT		16
+#define NI_GPCT_INDEX_PHASE_BITSHIFT		20
+#define NI_GPCT_COUNTING_DIRECTION_SHIFT	24
+
+#define NI_GPCT_GATE_ON_BOTH_EDGES_BIT		0x4
+#define NI_GPCT_EDGE_GATE_MODE_MASK		0x18
+#define NI_GPCT_EDGE_GATE_STARTS_STOPS_BITS	0x0
+#define NI_GPCT_EDGE_GATE_STOPS_STARTS_BITS	0x8
+#define NI_GPCT_EDGE_GATE_STARTS_BITS		0x10
+#define NI_GPCT_EDGE_GATE_NO_STARTS_NO_STOPS_BITS 0x18
+#define NI_GPCT_STOP_MODE_MASK			0x60
+#define NI_GPCT_STOP_ON_GATE_BITS		0x00
+#define NI_GPCT_STOP_ON_GATE_OR_TC_BITS		0x20
+#define NI_GPCT_STOP_ON_GATE_OR_SECOND_TC_BITS	0x40
+#define NI_GPCT_LOAD_B_SELECT_BIT		0x80
+#define NI_GPCT_OUTPUT_MODE_MASK		0x300
+#define NI_GPCT_OUTPUT_TC_PULSE_BITS		0x100
+#define NI_GPCT_OUTPUT_TC_TOGGLE_BITS		0x200
+#define NI_GPCT_OUTPUT_TC_OR_GATE_TOGGLE_BITS	0x300
+#define NI_GPCT_HARDWARE_DISARM_MASK		0xc00
+#define NI_GPCT_NO_HARDWARE_DISARM_BITS		0x000
+#define NI_GPCT_DISARM_AT_TC_BITS		0x400
+#define NI_GPCT_DISARM_AT_GATE_BITS		0x800
+#define NI_GPCT_DISARM_AT_TC_OR_GATE_BITS	0xc00
+#define NI_GPCT_LOADING_ON_TC_BIT		0x1000
+#define NI_GPCT_LOADING_ON_GATE_BIT		0x4000
+#define NI_GPCT_COUNTING_MODE_MASK		0x7 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_NORMAL_BITS	0x0 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_QUADRATURE_X1_BITS 0x1 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_QUADRATURE_X2_BITS 0x2 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_QUADRATURE_X4_BITS 0x3 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_TWO_PULSE_BITS	0x4 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_COUNTING_MODE_SYNC_SOURCE_BITS	0x6 << NI_GPCT_COUNTING_MODE_SHIFT
+#define NI_GPCT_INDEX_PHASE_MASK		0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_LOW_A_LOW_B_BITS	0x0 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_LOW_A_HIGH_B_BITS	0x1 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_HIGH_A_LOW_B_BITS	0x2 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_PHASE_HIGH_A_HIGH_B_BITS	0x3 << NI_GPCT_INDEX_PHASE_BITSHIFT
+#define NI_GPCT_INDEX_ENABLE_BIT		0x400000
+#define NI_GPCT_COUNTING_DIRECTION_MASK		0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_DOWN_BITS	0x00 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_UP_BITS	0x1 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_HW_UP_DOWN_BITS 0x2 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_COUNTING_DIRECTION_HW_GATE_BITS 0x3 << NI_GPCT_COUNTING_DIRECTION_SHIFT
+#define NI_GPCT_RELOAD_SOURCE_MASK		0xc000000
+#define NI_GPCT_RELOAD_SOURCE_FIXED_BITS	0x0
+#define NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS	0x4000000
+#define NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS	0x8000000
+#define NI_GPCT_OR_GATE_BIT			0x10000000
+#define NI_GPCT_INVERT_OUTPUT_BIT		0x20000000
+
+/* Bits for setting a clock source with INSN_CONFIG_SET_CLOCK_SRC when
+   using NI general-purpose counters. */
+#define NI_GPCT_CLOCK_SRC_SELECT_MASK		0x3f
+#define NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS	0x0
+#define NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS	0x1
+#define NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS	0x2
+#define NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS	0x3
+#define NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS	0x4
+#define NI_GPCT_NEXT_TC_CLOCK_SRC_BITS		0x5
+#define NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS	0x6 /* NI 660x-specific */
+#define NI_GPCT_PXI10_CLOCK_SRC_BITS		0x7
+#define NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS	0x8
+#define NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS 0x9
+#define NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK	0x30000000
+#define NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS	0x0
+#define NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS	0x10000000 /* divide source by 2 */
+#define NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS	0x20000000 /* divide source by 8 */
+#define NI_GPCT_INVERT_CLOCK_SRC_BIT		0x80000000
+#define NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(x)	(0x10 + x)
+#define NI_GPCT_RTSI_CLOCK_SRC_BITS(x)		(0x18 + x)
+#define NI_GPCT_PFI_CLOCK_SRC_BITS(x)		(0x20 + x)
+
+/* Possibilities for setting a gate source with
+   INSN_CONFIG_SET_GATE_SRC when using NI general-purpose counters.
+   May be bitwise-or'd with CR_EDGE or CR_INVERT. */
+/* M-series gates */
+#define NI_GPCT_TIMESTAMP_MUX_GATE_SELECT	0x0
+#define NI_GPCT_AI_START2_GATE_SELECT		0x12
+#define NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT	0x13
+#define NI_GPCT_NEXT_OUT_GATE_SELECT		0x14
+#define NI_GPCT_AI_START1_GATE_SELECT		0x1c
+#define NI_GPCT_NEXT_SOURCE_GATE_SELECT		0x1d
+#define NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT	0x1e
+#define NI_GPCT_LOGIC_LOW_GATE_SELECT		0x1f
+/* More gates for 660x */
+#define NI_GPCT_SOURCE_PIN_i_GATE_SELECT	0x100
+#define NI_GPCT_GATE_PIN_i_GATE_SELECT		0x101
+/* More gates for 660x "second gate" */
+#define NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT	0x201
+#define NI_GPCT_SELECTED_GATE_GATE_SELECT	0x21e
+/* M-series "second gate" sources are unknown, we should add them here
+   with an offset of 0x300 when known. */
+#define NI_GPCT_DISABLED_GATE_SELECT		0x8000
+#define NI_GPCT_GATE_PIN_GATE_SELECT(x)	(0x102 + x)
+#define NI_GPCT_RTSI_GATE_SELECT(x)		NI_USUAL_RTSI_SELECT(x)
+#define NI_GPCT_PFI_GATE_SELECT(x)		NI_USUAL_PFI_SELECT(x)
+#define NI_GPCT_UP_DOWN_PIN_GATE_SELECT(x)	(0x202 + x)
+
+/* Possibilities for setting a source with INSN_CONFIG_SET_OTHER_SRC
+   when using NI general-purpose counters. */
+#define NI_GPCT_SOURCE_ENCODER_A 0
+#define NI_GPCT_SOURCE_ENCODER_B 1
+#define NI_GPCT_SOURCE_ENCODER_Z 2
+/* M-series gates */
+/* Still unknown, probably only need NI_GPCT_PFI_OTHER_SELECT */
+#define NI_GPCT_DISABLED_OTHER_SELECT	0x8000
+#define NI_GPCT_PFI_OTHER_SELECT(x) NI_USUAL_PFI_SELECT(x)
+
+/* Start sources for ni general-purpose counters for use with
+   INSN_CONFIG_ARM */
+#define NI_GPCT_ARM_IMMEDIATE		0x0
+/* Start both the counter and the adjacent paired counter
+   simultaneously */
+#define NI_GPCT_ARM_PAIRED_IMMEDIATE	0x1
+/* NI doesn't document bits for selecting hardware arm triggers.  If
+   the NI_GPCT_ARM_UNKNOWN bit is set, we will pass the least significant
+   bits (3 bits for 660x or 5 bits for m-series) through to the
+   hardware. This will at least allow someone to figure out what the bits
+   do later. */
+#define NI_GPCT_ARM_UNKNOWN		0x1000
+
+/* Digital filtering options for ni 660x for use with
+   INSN_CONFIG_FILTER. */
+#define NI_GPCT_FILTER_OFF		0x0
+#define NI_GPCT_FILTER_TIMEBASE_3_SYNC	0x1
+#define NI_GPCT_FILTER_100x_TIMEBASE_1	0x2
+#define NI_GPCT_FILTER_20x_TIMEBASE_1	0x3
+#define NI_GPCT_FILTER_10x_TIMEBASE_1	0x4
+#define NI_GPCT_FILTER_2x_TIMEBASE_1	0x5
+#define NI_GPCT_FILTER_2x_TIMEBASE_3	0x6
+
+/* Master clock sources for ni mio boards and
+   INSN_CONFIG_SET_CLOCK_SRC */
+#define NI_MIO_INTERNAL_CLOCK		0
+#define NI_MIO_RTSI_CLOCK		1
+/* Doesn't work for m-series, use NI_MIO_PLL_RTSI_CLOCK() the
+   NI_MIO_PLL_* sources are m-series only */
+#define NI_MIO_PLL_PXI_STAR_TRIGGER_CLOCK 2
+#define NI_MIO_PLL_PXI10_CLOCK		3
+#define NI_MIO_PLL_RTSI0_CLOCK		4
+
+#define NI_MIO_PLL_RTSI_CLOCK(x) (NI_MIO_PLL_RTSI0_CLOCK + (x))
+
+/* Signals which can be routed to an NI RTSI pin with
+   INSN_CONFIG_SET_ROUTING. The numbers assigned are not arbitrary, they
+   correspond to the bits required to program the board. */
+#define NI_RTSI_OUTPUT_ADR_START1	0
+#define NI_RTSI_OUTPUT_ADR_START2	1
+#define NI_RTSI_OUTPUT_SCLKG		2
+#define NI_RTSI_OUTPUT_DACUPDN		3
+#define NI_RTSI_OUTPUT_DA_START1	4
+#define NI_RTSI_OUTPUT_G_SRC0		5
+#define NI_RTSI_OUTPUT_G_GATE0		6
+#define NI_RTSI_OUTPUT_RGOUT0		7
+#define NI_RTSI_OUTPUT_RTSI_BRD_0	8
+/* Pre-m-series always have RTSI clock on line 7 */
+#define NI_RTSI_OUTPUT_RTSI_OSC		12
+
+#define NI_RTSI_OUTPUT_RTSI_BRD(x) (NI_RTSI_OUTPUT_RTSI_BRD_0 + (x))
+
+
+int a4l_ni_tio_rinsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn);
+int a4l_ni_tio_winsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn);
+int a4l_ni_tio_insn_config(struct ni_gpct *counter, struct a4l_kernel_instruction *insn);
+void a4l_ni_tio_init_counter(struct ni_gpct *counter);
+
+struct ni_gpct_device *a4l_ni_gpct_device_construct(struct a4l_device * dev,
+	void (*write_register) (struct ni_gpct * counter, unsigned int bits,
+		enum ni_gpct_register reg),
+	unsigned int (*read_register) (struct ni_gpct * counter,
+		enum ni_gpct_register reg), enum ni_gpct_variant variant,
+	unsigned int num_counters);
+void a4l_ni_gpct_device_destroy(struct ni_gpct_device *counter_dev);
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+extern struct a4l_cmd_desc a4l_ni_tio_cmd_mask;
+
+int a4l_ni_tio_input_inttrig(struct ni_gpct *counter, lsampl_t trignum);
+int a4l_ni_tio_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd);
+int a4l_ni_tio_cmdtest(struct ni_gpct *counter, struct a4l_cmd_desc *cmd);
+int a4l_ni_tio_cancel(struct ni_gpct *counter);
+
+void a4l_ni_tio_handle_interrupt(struct ni_gpct *counter, struct a4l_device *dev);
+void a4l_ni_tio_set_mite_channel(struct ni_gpct *counter,
+			     struct mite_channel *mite_chan);
+void a4l_ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
+				    int *gate_error,
+				    int *tc_error,
+				    int *perm_stale_data, int *stale_data);
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+#endif /* !__ANALOGY_NI_TIO_H__ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c
new file mode 100644
index 0000000..8a3cccc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/pcimio.c
@@ -0,0 +1,1603 @@
+/*
+ * Hardware driver for NI PCI-MIO E series cards
+ *
+ * Copyright (C) 1997-8 David A. Schleef <ds@schleef.org>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Description: National Instruments PCI-MIO-E series and M series
+ * (all boards)
+ *
+ * Author: ds, John Hallen, Frank Mori Hess, Rolf Mueller, Herbert Peremans,
+ * Herman Bruyninckx, Terry Barnaby
+ * Status: works
+ * Devices: [National Instruments] PCI-MIO-16XE-50 (ni_pcimio),
+ * PCI-MIO-16XE-10, PXI-6030E, PCI-MIO-16E-1, PCI-MIO-16E-4, PCI-6014,
+ * PCI-6040E,PXI-6040E, PCI-6030E, PCI-6031E, PCI-6032E, PCI-6033E,
+ * PCI-6071E, PCI-6023E, PCI-6024E, PCI-6025E, PXI-6025E, PCI-6034E,
+ * PCI-6035E, PCI-6052E, PCI-6110, PCI-6111, PCI-6220, PCI-6221,
+ * PCI-6224, PCI-6225, PCI-6229, PCI-6250, PCI-6251, PCIe-6251,
+ * PCI-6254, PCI-6259, PCIe-6259, PCI-6280, PCI-6281, PXI-6281,
+ * PCI-6284, PCI-6289, PCI-6711, PXI-6711, PCI-6713, PXI-6713,
+ * PXI-6071E, PCI-6070E, PXI-6070E, PXI-6052E, PCI-6036E, PCI-6731,
+ * PCI-6733, PXI-6733, PCI-6143, PXI-6143
+ *
+ * These boards are almost identical to the AT-MIO E series, except that
+ * they use the PCI bus instead of ISA (i.e., AT).  See the notes for
+ * the ni_atmio.o driver for additional information about these boards.
+ *
+ * By default, the driver uses DMA to transfer analog input data to
+ * memory.  When DMA is enabled, not all triggering features are
+ * supported.
+ *
+ * Note that the PCI-6143 is a simultaneous sampling device with 8
+ * convertors. With this board all of the convertors perform one
+ * simultaneous sample during a scan interval. The period for a scan
+ * is used for the convert time in an Analgoy cmd. The convert trigger
+ * source is normally set to TRIG_NOW by default.
+ *
+ * The RTSI trigger bus is supported on these cards on subdevice
+ * 10. See the Analogy library documentation for details.
+ *
+ * References:
+ * 341079b.pdf  PCI E Series Register-Level Programmer Manual
+ * 340934b.pdf  DAQ-STC reference manual
+ * 322080b.pdf  6711/6713/6715 User Manual
+ * 320945c.pdf  PCI E Series User Manual
+ * 322138a.pdf  PCI-6052E and DAQPad-6052E User Manual
+ *
+ * ISSUES:
+ * - When DMA is enabled, XXX_EV_CONVERT does not work correctly.
+ * - Calibration is not fully implemented
+ * - SCXI is probably broken for m-series boards
+ * - Digital I/O may not work on 673x.
+ * - Information (number of channels, bits, etc.) for some devices may
+ *   be incorrect.  Please check this and submit a bug if there are
+ *   problems for your device.
+ * - Need to deal with external reference for DAC, and other DAC
+ *   properties in board properties
+ * - Deal with at-mio-16de-10 revision D to N changes, etc.
+ * - Need to add other CALDAC type
+ * - Need to slow down DAC loading.  I don't trust NI's claim that two
+ *   writes to the PCI bus slows IO enough.  I would prefer to use
+ *   a4l_udelay().  Timing specs: (clock)
+ *     AD8522   30ns
+ *     DAC8043  120ns
+ *     DAC8800  60ns
+ *     MB88341   ?
+ *
+ */
+
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#include "../intel/8255.h"
+#include "ni_stc.h"
+#include "ni_mio.h"
+#include "mite.h"
+
+#define PCIMIO_IRQ_POLARITY 1
+
+/* The following two tables must be in the same order */
+static struct pci_device_id ni_pci_table[] __maybe_unused = {
+	{ PCI_VENDOR_ID_NATINST, 0x0162, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1170, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1180, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1190, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x11b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x11c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x11d0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1270, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1330, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1350, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x14e0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x14f0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1580, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x15b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1880, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x1870, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x18b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x18c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2410, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2420, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2430, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2890, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x28c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2a60, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2a70, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2a80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2ab0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2b80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2b90, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2c80, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x2ca0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70aa, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70ab, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70ac, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70af, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b4, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b6, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b7, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70b8, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70bd, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70bf, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70c0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x70f2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x710d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x716c, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x717f, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x71bc, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_NATINST, 0x717d, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 },
+	{ 0 }
+};
+MODULE_DEVICE_TABLE(pci, ni_pci_table);
+
+/* These are not all the possible ao ranges for 628x boards.
+ They can do OFFSET +- REFERENCE where OFFSET can be
+ 0V, 5V, APFI<0,1>, or AO<0...3> and RANGE can
+ be 10V, 5V, 2V, 1V, APFI<0,1>, AO<0...3>.  That's
+ 63 different possibilities.  An AO channel
+ can not act as it's own OFFSET or REFERENCE.
+*/
+
+#if 0
+static struct a4l_rngtab rng_ni_M_628x_ao = { 8, {
+	RANGE(-10, 10),
+	RANGE(-5, 5),
+	RANGE(-2, 2),
+	RANGE(-1, 1),
+	RANGE(-5, 15),
+	RANGE(0, 10),
+	RANGE(3, 7),
+	RANGE(4, 6),
+	RANGE_ext(-1, 1)
+}};
+static struct a4l_rngdesc range_ni_M_628x_ao =
+	RNG_GLOBAL(rng_ni_M_628x_ao);
+#endif
+
+static struct a4l_rngtab rng_ni_M_625x_ao = { 3, {
+	RANGE(-10, 10),
+	RANGE(-5, 5),
+	RANGE_ext(-1, 1)
+}};
+static struct a4l_rngdesc range_ni_M_625x_ao =
+	RNG_GLOBAL(rng_ni_M_625x_ao);
+
+static struct a4l_rngtab rng_ni_M_622x_ao = { 1, {
+	RANGE(-10, 10),
+}};
+static struct a4l_rngdesc range_ni_M_622x_ao =
+	RNG_GLOBAL(rng_ni_M_622x_ao);
+
+static ni_board ni_boards[]={
+	{       device_id:      0x0162, // NI also says 0x1620.  typo?
+		name:           "pci-mio-16xe-50",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  2048,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_8,
+		ai_speed:	50000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	50000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043},
+		has_8255:       0,
+	},
+	{       device_id:      0x1170,
+		name:           "pci-mio-16xe-10", // aka pci-6030E
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{	device_id:      0x28c0,
+		name:           "pci-6014",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:       5000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{       device_id:      0x11d0,
+		name:           "pxi-6030e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+
+	{       device_id:      0x1180,
+		name:           "pci-mio-16e-1",	/* aka pci-6070e */
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_16,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {mb88341},
+		has_8255:       0,
+	},
+	{       device_id:      0x1190,
+		name:           "pci-mio-16e-4", /* aka pci-6040e */
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_16,
+		/* Note: there have been reported problems with full speed
+		 * on this board */
+		ai_speed:	2000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  512,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, // doc says mb88341
+		has_8255:       0,
+	},
+	{       device_id:      0x11c0,
+		name:           "pxi-6040e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_16,
+		ai_speed:	2000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  512,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {mb88341},
+		has_8255:       0,
+	},
+
+	{       device_id:      0x1330,
+		name:           "pci-6031e",
+		n_adchan:       64,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{       device_id:      0x1270,
+		name:           "pci-6032e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{       device_id:      0x1340,
+		name:           "pci-6033e",
+		n_adchan:       64,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+		has_8255:       0,
+	},
+	{       device_id:      0x1350,
+		name:           "pci-6071e",
+		n_adchan:       64,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{       device_id:      0x2a60,
+		name:           "pci-6023e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	0,
+	},
+	{       device_id:      0x2a70,
+		name:           "pci-6024e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	0,
+	},
+	{       device_id:      0x2a80,
+		name:           "pci-6025e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	1,
+	},
+	{       device_id:      0x2ab0,
+		name:           "pxi-6025e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug}, /* manual is wrong */
+		has_8255:	1,
+	},
+
+	{       device_id:      0x2ca0,
+		name:           "pci-6034e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:	0,
+	},
+	{       device_id:      0x2c80,
+		name:           "pci-6035e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:	0,
+	},
+	{       device_id:      0x18b0,
+		name:           "pci-6052e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:	3000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_unipolar:    1,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_speed:	3000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug,ad8804_debug,ad8522}, /* manual is wrong */
+	},
+	{       device_id:      0x14e0,
+		name:           "pci-6110",
+		n_adchan:       4,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	200,
+		n_aochan:       2,
+		aobits:         16,
+		reg_type:	ni_reg_611x,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804,ad8804},
+	},
+	{       device_id:      0x14f0,
+		name:           "pci-6111",
+		n_adchan:       2,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	200,
+		n_aochan:       2,
+		aobits:         16,
+		reg_type:	ni_reg_611x,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804,ad8804},
+	},
+#if 0 /* Need device IDs */
+	/* The 6115 boards probably need their own driver */
+	{       device_id:      0x2ed0,
+		name:           "pci-6115",
+		n_adchan:       4,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	100,
+		n_aochan:       2,
+		aobits:         16,
+		ao_671x:	1,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		.num_p0_dio_channels = 8,
+		reg_611x:	1,
+		caldac:         {ad8804_debug,ad8804_debug,ad8804_debug},/* XXX */
+	},
+#endif
+#if 0 /* Need device IDs */
+	{       device_id:      0x0000,
+		name:           "pxi-6115",
+		n_adchan:       4,
+		adbits:         12,
+		ai_fifo_depth:  8192,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_611x,
+		ai_speed:	100,
+		n_aochan:       2,
+		aobits:         16,
+		ao_671x:	1,
+		ao_unipolar:    0,
+		ao_fifo_depth:  2048,
+		ao_speed:	250,
+		reg_611x:	1,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug,ad8804_debug,ad8804_debug},/* XXX */
+	},
+#endif
+	{       device_id:      0x1880,
+		name:           "pci-6711",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384, /* data sheet says 8192, but fifo really holds 16384 samples */
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+	{       device_id:      0x2b90,
+		name:           "pxi-6711",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+	{       device_id:      0x1870,
+		name:           "pci-6713",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{       device_id:      0x2b80,
+		name:           "pxi-6713",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         12,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{	device_id:	0x2430,
+		name:           "pci-6731",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  8192,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+#if 0	/* Need device IDs */
+	{       device_id:      0x0,
+		name:           "pxi-6731",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	4,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  8192,
+		.ao_range_table = &a4l_range_bipolar10,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6711,
+		caldac:         {ad8804_debug},
+	},
+#endif
+	{       device_id:      0x2410,
+		name:           "pci-6733",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{       device_id:      0x2420,
+		name:           "pxi-6733",
+		n_adchan:       0, /* no analog input */
+		n_aochan:	8,
+		aobits:         16,
+		ao_unipolar:    0,
+		ao_fifo_depth:  16384,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_6713,
+		caldac:         {ad8804_debug,ad8804_debug},
+	},
+	{	device_id:      0x15b0,
+		name:           "pxi-6071e",
+		n_adchan:       64,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:       800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{	device_id:      0x11b0,
+		name:           "pxi-6070e",
+		n_adchan:       16,
+		adbits:         12,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:       800,
+		n_aochan:       2,
+		aobits:         12,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	1000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:       0,
+	},
+	{	device_id:      0x18c0,
+		name:           "pxi-6052e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_16,
+		ai_speed:	3000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_unipolar:    1,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_speed:	3000,
+		.num_p0_dio_channels = 8,
+		caldac:         {mb88341,mb88341,ad8522},
+	},
+	{	device_id:      0x1580,
+		name:           "pxi-6031e",
+		n_adchan:       64,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_14,
+		ai_speed:	10000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  2048,
+		.ao_range_table = &a4l_range_ni_E_ao_ext,
+		ao_unipolar:    1,
+		ao_speed:	10000,
+		.num_p0_dio_channels = 8,
+		caldac:         {dac8800,dac8043,ad8522},
+	},
+	{	device_id:      0x2890,
+		name:           "pci-6036e",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,
+		alwaysdither:   1,
+		gainlkup:       ai_gain_4,
+		ai_speed:	5000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  0,
+		.ao_range_table = &a4l_range_bipolar10,
+		ao_unipolar:    0,
+		ao_speed:	100000,
+		.num_p0_dio_channels = 8,
+		caldac:         {ad8804_debug},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b0,
+		name:           "pci-6220",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  512,	//FIXME: guess
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		.num_p0_dio_channels = 8,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70af,
+		name:           "pci-6221",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &a4l_range_bipolar10,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x71bc,
+		name:           "pci-6221_37pin",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &a4l_range_bipolar10,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70f2,
+		name:           "pci-6224",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x716c,
+		name:           "pci-6225",
+		n_adchan:       80,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_622x_ao,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70aa,
+		name:           "pci-6229",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		gainlkup:       ai_gain_622x,
+		ai_speed:	4000,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_622x_ao,
+		reg_type:	ni_reg_622x,
+		ao_unipolar:    0,
+		ao_speed:	1200,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b4,
+		name:           "pci-6250",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b8,
+		name:           "pci-6251",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x717d,
+		name:           "pcie-6251",
+		n_adchan:       16,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70b7,
+		name:           "pci-6254",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70ab,
+		name:           "pci-6259",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x717f,
+		name:           "pcie-6259",
+		n_adchan:       32,
+		adbits:         16,
+		ai_fifo_depth:  4095,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	800,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_625x_ao,
+		reg_type:	ni_reg_625x,
+		ao_unipolar:    0,
+		ao_speed:	357,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+#if 0 /* TODO: fix data size */
+	{	device_id:      0x70b6,
+		name:           "pci-6280",
+		n_adchan:       16,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  8191,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70bd,
+		name:           "pci-6281",
+		n_adchan:       16,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_628x_ao,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    1,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70bf,
+		name:           "pxi-6281",
+		n_adchan:       16,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       2,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_628x_ao,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    1,
+		ao_speed:	357,
+		.num_p0_dio_channels = 8,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70bc,
+		name:           "pci-6284",
+		n_adchan:       32,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       0,
+		aobits:         0,
+		ao_fifo_depth:  0,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    0,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+	{	device_id:      0x70ac,
+		name:           "pci-6289",
+		n_adchan:       32,
+		adbits:         18,
+		ai_fifo_depth:  2047,
+		.gainlkup = ai_gain_628x,
+		ai_speed:	1600,
+		n_aochan:       4,
+		aobits:         16,
+		ao_fifo_depth:  8191,
+		.ao_range_table = &range_ni_M_628x_ao,
+		reg_type:	ni_reg_628x,
+		ao_unipolar:    1,
+		ao_speed:	357,
+		.num_p0_dio_channels = 32,
+		.caldac = {caldac_none},
+		has_8255:	0,
+	},
+#endif /* TODO: fix data size */
+	{	device_id:      0x70C0,
+		name:           "pci-6143",
+		n_adchan:       8,
+		adbits:         16,
+		ai_fifo_depth:  1024,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_6143,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		reg_type:	ni_reg_6143,
+		ao_unipolar:    0,
+		ao_fifo_depth:  0,
+		.num_p0_dio_channels = 8,
+		.caldac = {ad8804_debug,ad8804_debug},
+	},
+	{	device_id:      0x710D,
+		name:           "pxi-6143",
+		n_adchan:       8,
+		adbits:         16,
+		ai_fifo_depth:  1024,
+		alwaysdither:   0,
+		gainlkup:       ai_gain_6143,
+		ai_speed:	4000,
+		n_aochan:       0,
+		aobits:         0,
+		reg_type:	ni_reg_6143,
+		ao_unipolar:    0,
+		ao_fifo_depth:  0,
+		.num_p0_dio_channels = 8,
+		.caldac = {ad8804_debug,ad8804_debug},
+	},
+};
+#define n_pcimio_boards ((sizeof(ni_boards)/sizeof(ni_boards[0])))
+
+/* How we access STC registers */
+
+/* We automatically take advantage of STC registers that can be
+ * read/written directly in the I/O space of the board.  Most
+ * PCIMIO devices map the low 8 STC registers to iobase+addr*2.
+ * The 611x devices map the write registers to iobase+addr*2, and
+ * the read registers to iobase+(addr-1)*2. */
+/* However, the 611x boards still aren't working, so I'm disabling
+ * non-windowed STC access temporarily */
+
+static void e_series_win_out(struct a4l_device *dev, uint16_t data, int reg)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(reg, Window_Address);
+	ni_writew(data, Window_Data);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock, flags);
+}
+
+static uint16_t e_series_win_in(struct a4l_device *dev, int reg)
+{
+	unsigned long flags;
+	uint16_t ret;
+
+	rtdm_lock_get_irqsave(&devpriv->window_lock, flags);
+	ni_writew(reg, Window_Address);
+	ret = ni_readw(Window_Data);
+	rtdm_lock_put_irqrestore(&devpriv->window_lock,flags);
+
+	return ret;
+}
+
+static void m_series_stc_writew(struct a4l_device *dev, uint16_t data, int reg)
+{
+	unsigned offset;
+	switch(reg)
+	{
+	case ADC_FIFO_Clear:
+		offset = M_Offset_AI_FIFO_Clear;
+		break;
+	case AI_Command_1_Register:
+		offset = M_Offset_AI_Command_1;
+		break;
+	case AI_Command_2_Register:
+		offset = M_Offset_AI_Command_2;
+		break;
+	case AI_Mode_1_Register:
+		offset = M_Offset_AI_Mode_1;
+		break;
+	case AI_Mode_2_Register:
+		offset = M_Offset_AI_Mode_2;
+		break;
+	case AI_Mode_3_Register:
+		offset = M_Offset_AI_Mode_3;
+		break;
+	case AI_Output_Control_Register:
+		offset = M_Offset_AI_Output_Control;
+		break;
+	case AI_Personal_Register:
+		offset = M_Offset_AI_Personal;
+		break;
+	case AI_SI2_Load_A_Register:
+		/* This is actually a 32 bit register on m series boards */
+		ni_writel(data, M_Offset_AI_SI2_Load_A);
+		return;
+		break;
+	case AI_SI2_Load_B_Register:
+		/* This is actually a 32 bit register on m series boards */
+		ni_writel(data, M_Offset_AI_SI2_Load_B);
+		return;
+		break;
+	case AI_START_STOP_Select_Register:
+		offset = M_Offset_AI_START_STOP_Select;
+		break;
+	case AI_Trigger_Select_Register:
+		offset = M_Offset_AI_Trigger_Select;
+		break;
+	case Analog_Trigger_Etc_Register:
+		offset = M_Offset_Analog_Trigger_Etc;
+		break;
+	case AO_Command_1_Register:
+		offset = M_Offset_AO_Command_1;
+		break;
+	case AO_Command_2_Register:
+		offset = M_Offset_AO_Command_2;
+		break;
+	case AO_Mode_1_Register:
+		offset = M_Offset_AO_Mode_1;
+		break;
+	case AO_Mode_2_Register:
+		offset = M_Offset_AO_Mode_2;
+		break;
+	case AO_Mode_3_Register:
+		offset = M_Offset_AO_Mode_3;
+		break;
+	case AO_Output_Control_Register:
+		offset = M_Offset_AO_Output_Control;
+		break;
+	case AO_Personal_Register:
+		offset = M_Offset_AO_Personal;
+		break;
+	case AO_Start_Select_Register:
+		offset = M_Offset_AO_Start_Select;
+		break;
+	case AO_Trigger_Select_Register:
+		offset = M_Offset_AO_Trigger_Select;
+		break;
+	case Clock_and_FOUT_Register:
+		offset = M_Offset_Clock_and_FOUT;
+		break;
+	case Configuration_Memory_Clear:
+		offset = M_Offset_Configuration_Memory_Clear;
+		break;
+	case DAC_FIFO_Clear:
+		offset = M_Offset_AO_FIFO_Clear;
+		break;
+	case DIO_Control_Register:
+		rtdm_printk("%s: FIXME: register 0x%x does not map cleanly on to m-series boards.\n", __FUNCTION__, reg);
+		return;
+		break;
+	case G_Autoincrement_Register(0):
+		offset = M_Offset_G0_Autoincrement;
+		break;
+	case G_Autoincrement_Register(1):
+		offset = M_Offset_G1_Autoincrement;
+		break;
+	case G_Command_Register(0):
+		offset = M_Offset_G0_Command;
+		break;
+	case G_Command_Register(1):
+		offset = M_Offset_G1_Command;
+		break;
+	case G_Input_Select_Register(0):
+		offset = M_Offset_G0_Input_Select;
+		break;
+	case G_Input_Select_Register(1):
+		offset = M_Offset_G1_Input_Select;
+		break;
+	case G_Mode_Register(0):
+		offset = M_Offset_G0_Mode;
+		break;
+	case G_Mode_Register(1):
+		offset = M_Offset_G1_Mode;
+		break;
+	case Interrupt_A_Ack_Register:
+		offset = M_Offset_Interrupt_A_Ack;
+		break;
+	case Interrupt_A_Enable_Register:
+		offset = M_Offset_Interrupt_A_Enable;
+		break;
+	case Interrupt_B_Ack_Register:
+		offset = M_Offset_Interrupt_B_Ack;
+		break;
+	case Interrupt_B_Enable_Register:
+		offset = M_Offset_Interrupt_B_Enable;
+		break;
+	case Interrupt_Control_Register:
+		offset = M_Offset_Interrupt_Control;
+		break;
+	case IO_Bidirection_Pin_Register:
+		offset = M_Offset_IO_Bidirection_Pin;
+		break;
+	case Joint_Reset_Register:
+		offset = M_Offset_Joint_Reset;
+		break;
+	case RTSI_Trig_A_Output_Register:
+		offset = M_Offset_RTSI_Trig_A_Output;
+		break;
+	case RTSI_Trig_B_Output_Register:
+		offset = M_Offset_RTSI_Trig_B_Output;
+		break;
+	case RTSI_Trig_Direction_Register:
+		offset = M_Offset_RTSI_Trig_Direction;
+		break;
+		/* FIXME: DIO_Output_Register (16 bit reg) is replaced
+		by M_Offset_Static_Digital_Output (32 bit) and
+		M_Offset_SCXI_Serial_Data_Out (8 bit) */
+	default:
+		rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return;
+	}
+	ni_writew(data, offset);
+}
+
+static uint16_t m_series_stc_readw(struct a4l_device *dev, int reg)
+{
+	unsigned offset;
+	switch(reg)
+	{
+	case AI_Status_1_Register:
+		offset = M_Offset_AI_Status_1;
+		break;
+	case AO_Status_1_Register:
+		offset = M_Offset_AO_Status_1;
+		break;
+	case AO_Status_2_Register:
+		offset = M_Offset_AO_Status_2;
+		break;
+	case DIO_Serial_Input_Register:
+		return ni_readb(M_Offset_SCXI_Serial_Data_In);
+		break;
+	case Joint_Status_1_Register:
+		offset = M_Offset_Joint_Status_1;
+		break;
+	case Joint_Status_2_Register:
+		offset = M_Offset_Joint_Status_2;
+		break;
+	case G_Status_Register:
+		offset = M_Offset_G01_Status;
+		break;
+	default:
+		rtdm_printk("%s: bug! "
+			    "unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return 0;
+		break;
+	}
+	return ni_readw(offset);
+}
+
+static void m_series_stc_writel(struct a4l_device *dev, uint32_t data, int reg)
+{
+	unsigned offset;
+
+	switch(reg)
+	{
+	case AI_SC_Load_A_Registers:
+		offset = M_Offset_AI_SC_Load_A;
+		break;
+	case AI_SI_Load_A_Registers:
+		offset = M_Offset_AI_SI_Load_A;
+		break;
+	case AO_BC_Load_A_Register:
+		offset = M_Offset_AO_BC_Load_A;
+		break;
+	case AO_UC_Load_A_Register:
+		offset = M_Offset_AO_UC_Load_A;
+		break;
+	case AO_UI_Load_A_Register:
+		offset = M_Offset_AO_UI_Load_A;
+		break;
+	case G_Load_A_Register(0):
+		offset = M_Offset_G0_Load_A;
+		break;
+	case G_Load_A_Register(1):
+		offset = M_Offset_G1_Load_A;
+		break;
+	case G_Load_B_Register(0):
+		offset = M_Offset_G0_Load_B;
+		break;
+	case G_Load_B_Register(1):
+		offset = M_Offset_G1_Load_B;
+		break;
+	default:
+		rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return;
+	}
+	ni_writel(data, offset);
+}
+
+static uint32_t m_series_stc_readl(struct a4l_device *dev, int reg)
+{
+	unsigned offset;
+	switch(reg)
+	{
+	case G_HW_Save_Register(0):
+		offset = M_Offset_G0_HW_Save;
+		break;
+	case G_HW_Save_Register(1):
+		offset = M_Offset_G1_HW_Save;
+		break;
+	case G_Save_Register(0):
+		offset = M_Offset_G0_Save;
+		break;
+	case G_Save_Register(1):
+		offset = M_Offset_G1_Save;
+		break;
+	default:
+		rtdm_printk("%s: bug! unhandled register=0x%x in switch.\n",
+			    __FUNCTION__, reg);
+		BUG();
+		return 0;
+	}
+	return ni_readl(offset);
+}
+
+static void win_out2(struct a4l_device *dev, uint32_t data, int reg)
+{
+	devpriv->stc_writew(dev, data >> 16, reg);
+	devpriv->stc_writew(dev, data & 0xffff, reg + 1);
+}
+
+static uint32_t win_in2(struct a4l_device *dev, int reg)
+{
+	uint32_t bits;
+	bits = devpriv->stc_readw(dev, reg) << 16;
+	bits |= devpriv->stc_readw(dev, reg + 1);
+	return bits;
+}
+
+static void m_series_init_eeprom_buffer(struct a4l_device *dev)
+{
+	static const int Start_Cal_EEPROM = 0x400;
+	static const unsigned window_size = 10;
+	unsigned old_iodwbsr_bits;
+	unsigned old_iodwbsr1_bits;
+	unsigned old_iodwcr1_bits;
+	int i;
+
+	old_iodwbsr_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR);
+	old_iodwbsr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
+	old_iodwcr1_bits = readl(devpriv->mite->mite_io_addr + MITE_IODWCR_1);
+	writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWBSR);
+	writel(((0x80 | window_size) | devpriv->mite->daq_phys_addr),
+	       devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
+	writel(0x0, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
+	writel(0xf, devpriv->mite->mite_io_addr + 0x30);
+
+	for(i = 0; i < M_SERIES_EEPROM_SIZE; ++i)
+	{
+		devpriv->eeprom_buffer[i] = ni_readb(Start_Cal_EEPROM + i);
+	}
+
+	writel(old_iodwbsr1_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR_1);
+	writel(old_iodwbsr_bits, devpriv->mite->mite_io_addr + MITE_IODWBSR);
+	writel(old_iodwcr1_bits, devpriv->mite->mite_io_addr + MITE_IODWCR_1);
+	writel(0x0, devpriv->mite->mite_io_addr + 0x30);
+}
+
+static void init_6143(struct a4l_device *dev)
+{
+	/* Disable interrupts */
+	devpriv->stc_writew(dev, 0, Interrupt_Control_Register);
+
+	/* Initialise 6143 AI specific bits */
+
+	/* Set G0,G1 DMA mode to E series version */
+	ni_writeb(0x00, Magic_6143);
+	/* Set EOCMode, ADCMode and pipelinedelay */
+	ni_writeb(0x80, PipelineDelay_6143);
+	/* Set EOC Delay */
+	ni_writeb(0x00, EOC_Set_6143);
+
+	/* Set the FIFO half full level */
+	ni_writel(boardtype.ai_fifo_depth / 2, AIFIFO_Flag_6143);
+
+	/* Strobe Relay disable bit */
+	devpriv->ai_calib_source_enabled = 0;
+	ni_writew(devpriv->ai_calib_source | Calibration_Channel_6143_RelayOff,
+		  Calibration_Channel_6143);
+	ni_writew(devpriv->ai_calib_source, Calibration_Channel_6143);
+}
+
+static int pcimio_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int ret, bus, slot, i, irq;
+	struct mite_struct *mite = NULL;
+	struct ni_board_struct *board = NULL;
+
+	if(arg->opts == NULL || arg->opts_size == 0)
+		bus = slot = 0;
+	else {
+		bus = arg->opts_size >= sizeof(unsigned long) ?
+			((unsigned long *)arg->opts)[0] : 0;
+		slot = arg->opts_size >= sizeof(unsigned long) * 2 ?
+			((unsigned long *)arg->opts)[1] : 0;
+	}
+
+	for(i = 0; i < n_pcimio_boards && mite == NULL; i++) {
+		mite = a4l_mite_find_device(bus, slot, ni_boards[i].device_id);
+		board = &ni_boards[i];
+	}
+
+	if(mite == 0)
+		return -ENOENT;
+
+	devpriv->irq_polarity = PCIMIO_IRQ_POLARITY;
+	devpriv->irq_pin = 0;
+
+	devpriv->mite = mite;
+	devpriv->board_ptr = board;
+
+	devpriv->ai_mite_ring = mite_alloc_ring(mite);
+	devpriv->ao_mite_ring = mite_alloc_ring(mite);
+	devpriv->cdo_mite_ring = mite_alloc_ring(mite);
+	devpriv->gpct_mite_ring[0] = mite_alloc_ring(mite);
+	devpriv->gpct_mite_ring[1] = mite_alloc_ring(mite);
+
+	if(devpriv->ai_mite_ring == NULL ||
+	   devpriv->ao_mite_ring == NULL ||
+	   devpriv->cdo_mite_ring == NULL ||
+	   devpriv->gpct_mite_ring[0] == NULL ||
+	   devpriv->gpct_mite_ring[1] == NULL)
+		return -ENOMEM;
+
+	a4l_info(dev, "found %s board\n", boardtype.name);
+
+	if(boardtype.reg_type & ni_reg_m_series_mask)
+	{
+		devpriv->stc_writew = &m_series_stc_writew;
+		devpriv->stc_readw = &m_series_stc_readw;
+		devpriv->stc_writel = &m_series_stc_writel;
+		devpriv->stc_readl = &m_series_stc_readl;
+	}else
+	{
+		devpriv->stc_writew = &e_series_win_out;
+		devpriv->stc_readw = &e_series_win_in;
+		devpriv->stc_writel = &win_out2;
+		devpriv->stc_readl = &win_in2;
+	}
+
+	ret = a4l_mite_setup(devpriv->mite, 0);
+	if(ret < 0)
+	{
+		a4l_err(dev, "pcmio_attach: error setting up mite\n");
+		return ret;
+	}
+
+	if(boardtype.reg_type & ni_reg_m_series_mask)
+		m_series_init_eeprom_buffer(dev);
+	if(boardtype.reg_type == ni_reg_6143)
+		init_6143(dev);
+
+	irq = mite_irq(devpriv->mite);
+
+	if(irq == 0){
+		a4l_warn(dev, "pcimio_attach: unknown irq (bad)\n\n");
+	}else{
+		a4l_info(dev, "found irq %u\n", irq);
+		ret = a4l_request_irq(dev,
+				      irq,
+				      a4l_ni_E_interrupt, RTDM_IRQTYPE_SHARED, dev);
+		if(ret < 0)
+			a4l_err(dev, "pcimio_attach: irq not available\n");
+	}
+
+	ret = a4l_ni_E_init(dev);
+	if(ret < 0)
+		return ret;
+
+	dev->driver->driver_name = devpriv->board_ptr->name;
+
+	return ret;
+}
+
+static int pcimio_detach(struct a4l_device *dev)
+{
+	if(a4l_get_irq(dev)!=A4L_IRQ_UNUSED){
+		a4l_free_irq(dev,a4l_get_irq(dev));
+	}
+
+	if(dev->priv != NULL && devpriv->mite != NULL)
+	{
+		mite_free_ring(devpriv->ai_mite_ring);
+		mite_free_ring(devpriv->ao_mite_ring);
+		mite_free_ring(devpriv->gpct_mite_ring[0]);
+		mite_free_ring(devpriv->gpct_mite_ring[1]);
+		a4l_mite_unsetup(devpriv->mite);
+	}
+
+	dev->driver->driver_name = NULL;
+
+	return 0;
+}
+
+static struct a4l_driver pcimio_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_ni_pcimio",
+	.driver_name = NULL,
+	.attach = pcimio_attach,
+	.detach = pcimio_detach,
+	.privdata_size = sizeof(ni_private),
+};
+
+static int __init pcimio_init(void)
+{
+	return a4l_register_drv(&pcimio_drv);
+}
+
+static void __exit pcimio_cleanup(void)
+{
+	a4l_unregister_drv(&pcimio_drv);
+}
+
+MODULE_DESCRIPTION("Analogy driver for NI PCI-MIO series cards");
+MODULE_LICENSE("GPL");
+
+module_init(pcimio_init);
+module_exit(pcimio_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c
new file mode 100644
index 0000000..bcce728
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/national_instruments/tio_common.c
@@ -0,0 +1,1999 @@
+/*
+ * Hardware driver for NI general purpose counter
+ * Copyright (C) 2006 Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ *
+ * Description: National Instruments general purpose counters
+ * This module is not used directly by end-users.  Rather, it is used
+ * by other drivers (for example ni_660x and ni_pcimio) to provide
+ * support for NI's general purpose counters.  It was originally based
+ * on the counter code from ni_660x.c and ni_mio_common.c.
+ *
+ * Author:
+ * J.P. Mellor <jpmellor@rose-hulman.edu>
+ * Herman.Bruyninckx@mech.kuleuven.ac.be
+ * Wim.Meeussen@mech.kuleuven.ac.be,
+ * Klaas.Gadeyne@mech.kuleuven.ac.be,
+ * Frank Mori Hess <fmhess@users.sourceforge.net>
+ *
+ * References:
+ * DAQ 660x Register-Level Programmer Manual  (NI 370505A-01)
+ * DAQ 6601/6602 User Manual (NI 322137B-01)
+ * 340934b.pdf  DAQ-STC reference manual
+ *
+ * TODO:
+ * - Support use of both banks X and Y
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/io.h>
+#include <rtdm/analogy/device.h>
+
+#include "ni_tio.h"
+#include "ni_mio.h"
+
+static inline void write_register(struct ni_gpct *counter,
+				  unsigned int bits, enum ni_gpct_register reg)
+{
+	BUG_ON(reg >= NITIO_Num_Registers);
+	counter->counter_dev->write_register(counter, bits, reg);
+}
+
+static inline unsigned int read_register(struct ni_gpct *counter,
+				     enum ni_gpct_register reg)
+{
+	BUG_ON(reg >= NITIO_Num_Registers);
+	return counter->counter_dev->read_register(counter, reg);
+}
+
+struct ni_gpct_device *a4l_ni_gpct_device_construct(struct a4l_device * dev,
+	void (*write_register) (struct ni_gpct * counter, unsigned int bits,
+		enum ni_gpct_register reg),
+	unsigned int (*read_register) (struct ni_gpct * counter,
+		enum ni_gpct_register reg), enum ni_gpct_variant variant,
+	unsigned int num_counters)
+{
+	struct ni_gpct_device *counter_dev =
+		kmalloc(sizeof(struct ni_gpct_device), GFP_KERNEL);
+	if (counter_dev == NULL)
+		return NULL;
+
+	memset(counter_dev, 0, sizeof(struct ni_gpct_device));
+
+	counter_dev->dev = dev;
+	counter_dev->write_register = write_register;
+	counter_dev->read_register = read_register;
+	counter_dev->variant = variant;
+	rtdm_lock_init(&counter_dev->regs_lock);
+	BUG_ON(num_counters == 0);
+
+	counter_dev->counters =
+		kmalloc(sizeof(struct ni_gpct *) * num_counters, GFP_KERNEL);
+
+	if (counter_dev->counters == NULL) {
+		 kfree(counter_dev);
+		return NULL;
+	}
+
+	memset(counter_dev->counters, 0, sizeof(struct ni_gpct *) * num_counters);
+
+	counter_dev->num_counters = num_counters;
+	return counter_dev;
+}
+
+void a4l_ni_gpct_device_destroy(struct ni_gpct_device *counter_dev)
+{
+	if (counter_dev->counters == NULL)
+		return;
+	kfree(counter_dev->counters);
+	kfree(counter_dev);
+}
+
+static
+int ni_tio_counting_mode_registers_present(const struct ni_gpct_device *counter_dev)
+{
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		return 1;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static
+int ni_tio_second_gate_registers_present(const struct ni_gpct_device *counter_dev)
+{
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+		return 0;
+		break;
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		return 1;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static inline
+void ni_tio_set_bits_transient(struct ni_gpct *counter,
+			       enum ni_gpct_register register_index,
+			       unsigned int bit_mask,
+			       unsigned int bit_values,
+			       unsigned transient_bit_values)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned long flags;
+
+	BUG_ON(register_index >= NITIO_Num_Registers);
+	rtdm_lock_get_irqsave(&counter_dev->regs_lock, flags);
+	counter_dev->regs[register_index] &= ~bit_mask;
+	counter_dev->regs[register_index] |= (bit_values & bit_mask);
+	write_register(counter,
+		       counter_dev->regs[register_index] | transient_bit_values,
+		       register_index);
+	mmiowb();
+	rtdm_lock_put_irqrestore(&counter_dev->regs_lock, flags);
+}
+
+/* ni_tio_set_bits( ) is for safely writing to registers whose bits
+   may be twiddled in interrupt context, or whose software copy may be
+   read in interrupt context. */
+static inline void ni_tio_set_bits(struct ni_gpct *counter,
+				   enum ni_gpct_register register_index,
+				   unsigned int bit_mask,
+				   unsigned int bit_values)
+{
+	ni_tio_set_bits_transient(counter,
+				  register_index,
+				  bit_mask, bit_values, 0x0);
+}
+
+/* ni_tio_get_soft_copy( ) is for safely reading the software copy of
+   a register whose bits might be modified in interrupt context, or whose
+   software copy might need to be read in interrupt context. */
+static inline
+unsigned int ni_tio_get_soft_copy(const struct ni_gpct *counter,
+				  enum ni_gpct_register register_index)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned long flags;
+	unsigned value;
+
+	BUG_ON(register_index >= NITIO_Num_Registers);
+	rtdm_lock_get_irqsave(&counter_dev->regs_lock, flags);
+	value = counter_dev->regs[register_index];
+	rtdm_lock_put_irqrestore(&counter_dev->regs_lock, flags);
+	return value;
+}
+
+static void ni_tio_reset_count_and_disarm(struct ni_gpct *counter)
+{
+	write_register(counter, Gi_Reset_Bit(counter->counter_index),
+		       NITIO_Gxx_Joint_Reset_Reg(counter->counter_index));
+}
+
+void a4l_ni_tio_init_counter(struct ni_gpct *counter)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+
+	ni_tio_reset_count_and_disarm(counter);
+	/* Initialize counter registers */
+	counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->counter_index)] =
+		0x0;
+	write_register(counter,
+		counter_dev->regs[NITIO_Gi_Autoincrement_Reg(counter->
+				counter_index)],
+		NITIO_Gi_Autoincrement_Reg(counter->counter_index));
+	ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+		~0, Gi_Synchronize_Gate_Bit);
+	ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index), ~0,
+		0);
+	counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] = 0x0;
+	write_register(counter,
+		counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)],
+		NITIO_Gi_LoadA_Reg(counter->counter_index));
+	counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] = 0x0;
+	write_register(counter,
+		counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)],
+		NITIO_Gi_LoadB_Reg(counter->counter_index));
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index), ~0, 0);
+	if (ni_tio_counting_mode_registers_present(counter_dev)) {
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Counting_Mode_Reg(counter->counter_index), ~0,
+			0);
+	}
+	if (ni_tio_second_gate_registers_present(counter_dev)) {
+		counter_dev->regs[NITIO_Gi_Second_Gate_Reg(counter->
+				counter_index)] = 0x0;
+		write_register(counter,
+			counter_dev->regs[NITIO_Gi_Second_Gate_Reg(counter->
+					counter_index)],
+			NITIO_Gi_Second_Gate_Reg(counter->counter_index));
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_DMA_Config_Reg(counter->counter_index), ~0, 0x0);
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index), ~0, 0x0);
+}
+
+static lsampl_t ni_tio_counter_status(struct ni_gpct *counter)
+{
+	lsampl_t status = 0;
+	unsigned int bits;
+
+	bits = read_register(counter,NITIO_Gxx_Status_Reg(counter->counter_index));
+	if (bits & Gi_Armed_Bit(counter->counter_index)) {
+		status |= A4L_COUNTER_ARMED;
+		if (bits & Gi_Counting_Bit(counter->counter_index))
+			status |= A4L_COUNTER_COUNTING;
+	}
+	return status;
+}
+
+static
+uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
+				unsigned int generic_clock_source);
+static
+unsigned int ni_tio_generic_clock_src_select(const struct ni_gpct *counter);
+
+static void ni_tio_set_sync_mode(struct ni_gpct *counter, int force_alt_sync)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned counting_mode_reg =
+		NITIO_Gi_Counting_Mode_Reg(counter->counter_index);
+	static const uint64_t min_normal_sync_period_ps = 25000;
+	const uint64_t clock_period_ps = ni_tio_clock_period_ps(counter,
+		ni_tio_generic_clock_src_select(counter));
+
+	if (ni_tio_counting_mode_registers_present(counter_dev) == 0)
+		return;
+
+	switch (ni_tio_get_soft_copy(counter,
+			counting_mode_reg) & Gi_Counting_Mode_Mask) {
+	case Gi_Counting_Mode_QuadratureX1_Bits:
+	case Gi_Counting_Mode_QuadratureX2_Bits:
+	case Gi_Counting_Mode_QuadratureX4_Bits:
+	case Gi_Counting_Mode_Sync_Source_Bits:
+		force_alt_sync = 1;
+		break;
+	default:
+		break;
+	}
+
+	/* It's not clear what we should do if clock_period is
+	unknown, so we are not using the alt sync bit in that case,
+	but allow the caller to decide by using the force_alt_sync
+	parameter. */
+	if (force_alt_sync ||
+		(clock_period_ps
+			&& clock_period_ps < min_normal_sync_period_ps)) {
+		ni_tio_set_bits(counter, counting_mode_reg,
+			Gi_Alternate_Sync_Bit(counter_dev->variant),
+			Gi_Alternate_Sync_Bit(counter_dev->variant));
+	} else {
+		ni_tio_set_bits(counter, counting_mode_reg,
+			Gi_Alternate_Sync_Bit(counter_dev->variant), 0x0);
+	}
+}
+
+static int ni_tio_set_counter_mode(struct ni_gpct *counter, unsigned int mode)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned mode_reg_mask;
+	unsigned mode_reg_values;
+	unsigned input_select_bits = 0;
+
+	/* these bits map directly on to the mode register */
+	static const unsigned mode_reg_direct_mask =
+		NI_GPCT_GATE_ON_BOTH_EDGES_BIT | NI_GPCT_EDGE_GATE_MODE_MASK |
+		NI_GPCT_STOP_MODE_MASK | NI_GPCT_OUTPUT_MODE_MASK |
+		NI_GPCT_HARDWARE_DISARM_MASK | NI_GPCT_LOADING_ON_TC_BIT |
+		NI_GPCT_LOADING_ON_GATE_BIT | NI_GPCT_LOAD_B_SELECT_BIT;
+
+	mode_reg_mask = mode_reg_direct_mask | Gi_Reload_Source_Switching_Bit;
+	mode_reg_values = mode & mode_reg_direct_mask;
+	switch (mode & NI_GPCT_RELOAD_SOURCE_MASK) {
+	case NI_GPCT_RELOAD_SOURCE_FIXED_BITS:
+		break;
+	case NI_GPCT_RELOAD_SOURCE_SWITCHING_BITS:
+		mode_reg_values |= Gi_Reload_Source_Switching_Bit;
+		break;
+	case NI_GPCT_RELOAD_SOURCE_GATE_SELECT_BITS:
+		input_select_bits |= Gi_Gate_Select_Load_Source_Bit;
+		mode_reg_mask |= Gi_Gating_Mode_Mask;
+		mode_reg_values |= Gi_Level_Gating_Bits;
+		break;
+	default:
+		break;
+	}
+	ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+		mode_reg_mask, mode_reg_values);
+
+	if (ni_tio_counting_mode_registers_present(counter_dev)) {
+		unsigned counting_mode_bits = 0;
+		counting_mode_bits |=
+			(mode >> NI_GPCT_COUNTING_MODE_SHIFT) &
+			Gi_Counting_Mode_Mask;
+		counting_mode_bits |=
+			((mode >> NI_GPCT_INDEX_PHASE_BITSHIFT) <<
+			Gi_Index_Phase_Bitshift) & Gi_Index_Phase_Mask;
+		if (mode & NI_GPCT_INDEX_ENABLE_BIT) {
+			counting_mode_bits |= Gi_Index_Mode_Bit;
+		}
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Counting_Mode_Reg(counter->counter_index),
+			Gi_Counting_Mode_Mask | Gi_Index_Phase_Mask |
+			Gi_Index_Mode_Bit, counting_mode_bits);
+		ni_tio_set_sync_mode(counter, 0);
+	}
+
+	ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+		Gi_Up_Down_Mask,
+		(mode >> NI_GPCT_COUNTING_DIRECTION_SHIFT) << Gi_Up_Down_Shift);
+
+	if (mode & NI_GPCT_OR_GATE_BIT) {
+		input_select_bits |= Gi_Or_Gate_Bit;
+	}
+	if (mode & NI_GPCT_INVERT_OUTPUT_BIT) {
+		input_select_bits |= Gi_Output_Polarity_Bit;
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Gate_Select_Load_Source_Bit | Gi_Or_Gate_Bit |
+		Gi_Output_Polarity_Bit, input_select_bits);
+
+	return 0;
+}
+
+static int ni_tio_arm(struct ni_gpct *counter, int arm, unsigned int start_trigger)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+
+	unsigned int command_transient_bits = 0;
+
+	if (arm) {
+		switch (start_trigger) {
+		case NI_GPCT_ARM_IMMEDIATE:
+			command_transient_bits |= Gi_Arm_Bit;
+			break;
+		case NI_GPCT_ARM_PAIRED_IMMEDIATE:
+			command_transient_bits |= Gi_Arm_Bit | Gi_Arm_Copy_Bit;
+			break;
+		default:
+			break;
+		}
+		if (ni_tio_counting_mode_registers_present(counter_dev)) {
+			unsigned counting_mode_bits = 0;
+
+			switch (start_trigger) {
+			case NI_GPCT_ARM_IMMEDIATE:
+			case NI_GPCT_ARM_PAIRED_IMMEDIATE:
+				break;
+			default:
+				if (start_trigger & NI_GPCT_ARM_UNKNOWN) {
+					/* Pass-through the least
+					significant bits so we can
+					figure out what select later
+					*/
+					unsigned hw_arm_select_bits =
+						(start_trigger <<
+						Gi_HW_Arm_Select_Shift) &
+						Gi_HW_Arm_Select_Mask
+						(counter_dev->variant);
+
+					counting_mode_bits |=
+						Gi_HW_Arm_Enable_Bit |
+						hw_arm_select_bits;
+				} else {
+					return -EINVAL;
+				}
+				break;
+			}
+			ni_tio_set_bits(counter,
+				NITIO_Gi_Counting_Mode_Reg(counter->
+					counter_index),
+				Gi_HW_Arm_Select_Mask(counter_dev->
+					variant) | Gi_HW_Arm_Enable_Bit,
+				counting_mode_bits);
+		}
+	} else {
+		command_transient_bits |= Gi_Disarm_Bit;
+	}
+	ni_tio_set_bits_transient(counter,
+		NITIO_Gi_Command_Reg(counter->counter_index), 0, 0,
+		command_transient_bits);
+	return 0;
+}
+
+static unsigned int ni_660x_source_select_bits(lsampl_t clock_source)
+{
+	unsigned int ni_660x_clock;
+	unsigned int i;
+	const unsigned int clock_select_bits =
+		clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+
+	switch (clock_select_bits) {
+	case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Timebase_1_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Timebase_2_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Timebase_3_Clock;
+		break;
+	case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Logic_Low_Clock;
+		break;
+	case NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Source_Pin_i_Clock;
+		break;
+	case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Next_Gate_Clock;
+		break;
+	case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
+		ni_660x_clock = NI_660x_Next_TC_Clock;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
+				ni_660x_clock = NI_660x_RTSI_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_source_pin; ++i) {
+			if (clock_select_bits ==
+				NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i)) {
+				ni_660x_clock = NI_660x_Source_Pin_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_source_pin)
+			break;
+		ni_660x_clock = 0;
+		BUG();
+		break;
+	}
+	return Gi_Source_Select_Bits(ni_660x_clock);
+}
+
+static unsigned int ni_m_series_source_select_bits(lsampl_t clock_source)
+{
+	unsigned int ni_m_series_clock;
+	unsigned int i;
+	const unsigned int clock_select_bits =
+		clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK;
+	switch (clock_select_bits) {
+	case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Timebase_1_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Timebase_2_Clock;
+		break;
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Timebase_3_Clock;
+		break;
+	case NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Logic_Low_Clock;
+		break;
+	case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Next_Gate_Clock;
+		break;
+	case NI_GPCT_NEXT_TC_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Next_TC_Clock;
+		break;
+	case NI_GPCT_PXI10_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_PXI10_Clock;
+		break;
+	case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_PXI_Star_Trigger_Clock;
+		break;
+	case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
+		ni_m_series_clock = NI_M_Series_Analog_Trigger_Out_Clock;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (clock_select_bits == NI_GPCT_RTSI_CLOCK_SRC_BITS(i)) {
+				ni_m_series_clock = NI_M_Series_RTSI_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (clock_select_bits == NI_GPCT_PFI_CLOCK_SRC_BITS(i)) {
+				ni_m_series_clock = NI_M_Series_PFI_Clock(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		__a4l_err("invalid clock source 0x%lx\n",
+			     (unsigned long)clock_source);
+		BUG();
+		ni_m_series_clock = 0;
+		break;
+	}
+	return Gi_Source_Select_Bits(ni_m_series_clock);
+}
+
+static void ni_tio_set_source_subselect(struct ni_gpct *counter,
+					lsampl_t clock_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+
+	if (counter_dev->variant != ni_gpct_variant_m_series)
+		return;
+	switch (clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
+		/* Gi_Source_Subselect is zero */
+	case NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS:
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		counter_dev->regs[second_gate_reg] &= ~Gi_Source_Subselect_Bit;
+		break;
+		/* Gi_Source_Subselect is one */
+	case NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS:
+	case NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS:
+		counter_dev->regs[second_gate_reg] |= Gi_Source_Subselect_Bit;
+		break;
+		/* Gi_Source_Subselect doesn't matter */
+	default:
+		return;
+		break;
+	}
+	write_register(counter, counter_dev->regs[second_gate_reg],
+		second_gate_reg);
+}
+
+static int ni_tio_set_clock_src(struct ni_gpct *counter,
+				lsampl_t clock_source, lsampl_t period_ns)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned input_select_bits = 0;
+	static const uint64_t pico_per_nano = 1000;
+
+	/* FIXME: validate clock source */
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_660x:
+		input_select_bits |= ni_660x_source_select_bits(clock_source);
+		break;
+	case ni_gpct_variant_e_series:
+	case ni_gpct_variant_m_series:
+		input_select_bits |=
+			ni_m_series_source_select_bits(clock_source);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	if (clock_source & NI_GPCT_INVERT_CLOCK_SRC_BIT)
+		input_select_bits |= Gi_Source_Polarity_Bit;
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Source_Select_Mask | Gi_Source_Polarity_Bit,
+		input_select_bits);
+	ni_tio_set_source_subselect(counter, clock_source);
+	if (ni_tio_counting_mode_registers_present(counter_dev)) {
+		const unsigned prescaling_mode =
+			clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK;
+		unsigned counting_mode_bits = 0;
+
+		switch (prescaling_mode) {
+		case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
+			break;
+		case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
+			counting_mode_bits |=
+				Gi_Prescale_X2_Bit(counter_dev->variant);
+			break;
+		case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
+			counting_mode_bits |=
+				Gi_Prescale_X8_Bit(counter_dev->variant);
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Counting_Mode_Reg(counter->counter_index),
+			Gi_Prescale_X2_Bit(counter_dev->
+				variant) | Gi_Prescale_X8_Bit(counter_dev->
+				variant), counting_mode_bits);
+	}
+	counter->clock_period_ps = pico_per_nano * period_ns;
+	ni_tio_set_sync_mode(counter, 0);
+	return 0;
+}
+
+static unsigned int ni_tio_clock_src_modifiers(const struct ni_gpct *counter)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned counting_mode_bits = ni_tio_get_soft_copy(counter,
+		NITIO_Gi_Counting_Mode_Reg(counter->counter_index));
+	unsigned int bits = 0;
+
+	if (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Input_Select_Reg(counter->
+				counter_index)) & Gi_Source_Polarity_Bit)
+		bits |= NI_GPCT_INVERT_CLOCK_SRC_BIT;
+	if (counting_mode_bits & Gi_Prescale_X2_Bit(counter_dev->variant))
+		bits |= NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS;
+	if (counting_mode_bits & Gi_Prescale_X8_Bit(counter_dev->variant))
+		bits |= NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS;
+	return bits;
+}
+
+static unsigned int ni_m_series_clock_src_select(const struct ni_gpct *counter)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	unsigned int i, clock_source = 0;
+
+	const unsigned int input_select = (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Input_Select_Reg(counter->
+				counter_index)) & Gi_Source_Select_Mask) >>
+		Gi_Source_Select_Shift;
+
+	switch (input_select) {
+	case NI_M_Series_Timebase_1_Clock:
+		clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Timebase_2_Clock:
+		clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Timebase_3_Clock:
+		if (counter_dev->
+			regs[second_gate_reg] & Gi_Source_Subselect_Bit)
+			clock_source =
+				NI_GPCT_ANALOG_TRIGGER_OUT_CLOCK_SRC_BITS;
+		else
+			clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Logic_Low_Clock:
+		clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Next_Gate_Clock:
+		if (counter_dev->
+			regs[second_gate_reg] & Gi_Source_Subselect_Bit)
+			clock_source = NI_GPCT_PXI_STAR_TRIGGER_CLOCK_SRC_BITS;
+		else
+			clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_PXI10_Clock:
+		clock_source = NI_GPCT_PXI10_CLOCK_SRC_BITS;
+		break;
+	case NI_M_Series_Next_TC_Clock:
+		clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (input_select == NI_M_Series_RTSI_Clock(i)) {
+				clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (input_select == NI_M_Series_PFI_Clock(i)) {
+				clock_source = NI_GPCT_PFI_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		BUG();
+		break;
+	}
+	clock_source |= ni_tio_clock_src_modifiers(counter);
+	return clock_source;
+}
+
+static unsigned int ni_660x_clock_src_select(const struct ni_gpct *counter)
+{
+	unsigned int i, clock_source = 0;
+	const unsigned input_select = (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Input_Select_Reg(counter->
+				counter_index)) & Gi_Source_Select_Mask) >>
+		Gi_Source_Select_Shift;
+
+	switch (input_select) {
+	case NI_660x_Timebase_1_Clock:
+		clock_source = NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Timebase_2_Clock:
+		clock_source = NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Timebase_3_Clock:
+		clock_source = NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Logic_Low_Clock:
+		clock_source = NI_GPCT_LOGIC_LOW_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Source_Pin_i_Clock:
+		clock_source = NI_GPCT_SOURCE_PIN_i_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Next_Gate_Clock:
+		clock_source = NI_GPCT_NEXT_GATE_CLOCK_SRC_BITS;
+		break;
+	case NI_660x_Next_TC_Clock:
+		clock_source = NI_GPCT_NEXT_TC_CLOCK_SRC_BITS;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (input_select == NI_660x_RTSI_Clock(i)) {
+				clock_source = NI_GPCT_RTSI_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_source_pin; ++i) {
+			if (input_select == NI_660x_Source_Pin_Clock(i)) {
+				clock_source =
+					NI_GPCT_SOURCE_PIN_CLOCK_SRC_BITS(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_source_pin)
+			break;
+		BUG();
+		break;
+	}
+	clock_source |= ni_tio_clock_src_modifiers(counter);
+	return clock_source;
+}
+
+static unsigned int ni_tio_generic_clock_src_select(const struct ni_gpct *counter)
+{
+	switch (counter->counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+	case ni_gpct_variant_m_series:
+		return ni_m_series_clock_src_select(counter);
+		break;
+	case ni_gpct_variant_660x:
+		return ni_660x_clock_src_select(counter);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static uint64_t ni_tio_clock_period_ps(const struct ni_gpct *counter,
+				       unsigned int generic_clock_source)
+{
+	uint64_t clock_period_ps;
+
+	switch (generic_clock_source & NI_GPCT_CLOCK_SRC_SELECT_MASK) {
+	case NI_GPCT_TIMEBASE_1_CLOCK_SRC_BITS:
+		clock_period_ps = 50000;
+		break;
+	case NI_GPCT_TIMEBASE_2_CLOCK_SRC_BITS:
+		clock_period_ps = 10000000;
+		break;
+	case NI_GPCT_TIMEBASE_3_CLOCK_SRC_BITS:
+		clock_period_ps = 12500;
+		break;
+	case NI_GPCT_PXI10_CLOCK_SRC_BITS:
+		clock_period_ps = 100000;
+		break;
+	default:
+		/* Clock period is specified by user with prescaling
+		   already taken into account. */
+		return counter->clock_period_ps;
+		break;
+	}
+
+	switch (generic_clock_source & NI_GPCT_PRESCALE_MODE_CLOCK_SRC_MASK) {
+	case NI_GPCT_NO_PRESCALE_CLOCK_SRC_BITS:
+		break;
+	case NI_GPCT_PRESCALE_X2_CLOCK_SRC_BITS:
+		clock_period_ps *= 2;
+		break;
+	case NI_GPCT_PRESCALE_X8_CLOCK_SRC_BITS:
+		clock_period_ps *= 8;
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return clock_period_ps;
+}
+
+static void ni_tio_get_clock_src(struct ni_gpct *counter,
+				 unsigned int * clock_source,
+				 unsigned int * period_ns)
+{
+	static const unsigned int pico_per_nano = 1000;
+	uint64_t temp64;
+
+	*clock_source = ni_tio_generic_clock_src_select(counter);
+	temp64 = ni_tio_clock_period_ps(counter, *clock_source);
+	do_div(temp64, pico_per_nano);
+	*period_ns = temp64;
+}
+
+static void ni_tio_set_first_gate_modifiers(struct ni_gpct *counter,
+					    lsampl_t gate_source)
+{
+	const unsigned int mode_mask = Gi_Gate_Polarity_Bit | Gi_Gating_Mode_Mask;
+	unsigned int mode_values = 0;
+
+	if (gate_source & CR_INVERT) {
+		mode_values |= Gi_Gate_Polarity_Bit;
+	}
+	if (gate_source & CR_EDGE) {
+		mode_values |= Gi_Rising_Edge_Gating_Bits;
+	} else {
+		mode_values |= Gi_Level_Gating_Bits;
+	}
+	ni_tio_set_bits(counter, NITIO_Gi_Mode_Reg(counter->counter_index),
+		mode_mask, mode_values);
+}
+
+static int ni_660x_set_first_gate(struct ni_gpct *counter, lsampl_t gate_source)
+{
+	const unsigned int selected_gate = CR_CHAN(gate_source);
+	/* Bits of selected_gate that may be meaningful to
+	   input select register */
+	const unsigned int selected_gate_mask = 0x1f;
+	unsigned ni_660x_gate_select;
+	unsigned i;
+
+	switch (selected_gate) {
+	case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
+		ni_660x_gate_select = NI_660x_Next_SRC_Gate_Select;
+		break;
+	case NI_GPCT_NEXT_OUT_GATE_SELECT:
+	case NI_GPCT_LOGIC_LOW_GATE_SELECT:
+	case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
+	case NI_GPCT_GATE_PIN_i_GATE_SELECT:
+		ni_660x_gate_select = selected_gate & selected_gate_mask;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) {
+				ni_660x_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_gate_pin; ++i) {
+			if (selected_gate == NI_GPCT_GATE_PIN_GATE_SELECT(i)) {
+				ni_660x_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_gate_pin)
+			break;
+		return -EINVAL;
+		break;
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Gate_Select_Mask, Gi_Gate_Select_Bits(ni_660x_gate_select));
+	return 0;
+}
+
+static int ni_m_series_set_first_gate(struct ni_gpct *counter,
+				      lsampl_t gate_source)
+{
+	const unsigned int selected_gate = CR_CHAN(gate_source);
+	/* bits of selected_gate that may be meaningful to input select register */
+	const unsigned int selected_gate_mask = 0x1f;
+	unsigned int i, ni_m_series_gate_select;
+
+	switch (selected_gate) {
+	case NI_GPCT_TIMESTAMP_MUX_GATE_SELECT:
+	case NI_GPCT_AI_START2_GATE_SELECT:
+	case NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT:
+	case NI_GPCT_NEXT_OUT_GATE_SELECT:
+	case NI_GPCT_AI_START1_GATE_SELECT:
+	case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
+	case NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT:
+	case NI_GPCT_LOGIC_LOW_GATE_SELECT:
+		ni_m_series_gate_select = selected_gate & selected_gate_mask;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (selected_gate == NI_GPCT_RTSI_GATE_SELECT(i)) {
+				ni_m_series_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (selected_gate == NI_GPCT_PFI_GATE_SELECT(i)) {
+				ni_m_series_gate_select =
+					selected_gate & selected_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		return -EINVAL;
+		break;
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Gate_Select_Mask,
+		Gi_Gate_Select_Bits(ni_m_series_gate_select));
+	return 0;
+}
+
+static int ni_660x_set_second_gate(struct ni_gpct *counter,
+				   lsampl_t gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	const unsigned int selected_second_gate = CR_CHAN(gate_source);
+	/* bits of second_gate that may be meaningful to second gate register */
+	static const unsigned int selected_second_gate_mask = 0x1f;
+	unsigned int i, ni_660x_second_gate_select;
+
+	switch (selected_second_gate) {
+	case NI_GPCT_SOURCE_PIN_i_GATE_SELECT:
+	case NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT:
+	case NI_GPCT_SELECTED_GATE_GATE_SELECT:
+	case NI_GPCT_NEXT_OUT_GATE_SELECT:
+	case NI_GPCT_LOGIC_LOW_GATE_SELECT:
+		ni_660x_second_gate_select =
+			selected_second_gate & selected_second_gate_mask;
+		break;
+	case NI_GPCT_NEXT_SOURCE_GATE_SELECT:
+		ni_660x_second_gate_select =
+			NI_660x_Next_SRC_Second_Gate_Select;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (selected_second_gate == NI_GPCT_RTSI_GATE_SELECT(i)) {
+				ni_660x_second_gate_select =
+					selected_second_gate &
+					selected_second_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_up_down_pin; ++i) {
+			if (selected_second_gate ==
+				NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i)) {
+				ni_660x_second_gate_select =
+					selected_second_gate &
+					selected_second_gate_mask;
+				break;
+			}
+		}
+		if (i <= ni_660x_max_up_down_pin)
+			break;
+		return -EINVAL;
+		break;
+	};
+	counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit;
+	counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask;
+	counter_dev->regs[second_gate_reg] |=
+		Gi_Second_Gate_Select_Bits(ni_660x_second_gate_select);
+	write_register(counter, counter_dev->regs[second_gate_reg],
+		second_gate_reg);
+	return 0;
+}
+
+static int ni_m_series_set_second_gate(struct ni_gpct *counter,
+				       lsampl_t gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	const unsigned int selected_second_gate = CR_CHAN(gate_source);
+	/* Bits of second_gate that may be meaningful to second gate register */
+	static const unsigned int selected_second_gate_mask = 0x1f;
+	unsigned int ni_m_series_second_gate_select;
+
+	/* FIXME: We don't know what the m-series second gate codes
+	   are, so we'll just pass the bits through for now. */
+	switch (selected_second_gate) {
+	default:
+		ni_m_series_second_gate_select =
+			selected_second_gate & selected_second_gate_mask;
+		break;
+	};
+	counter_dev->regs[second_gate_reg] |= Gi_Second_Gate_Mode_Bit;
+	counter_dev->regs[second_gate_reg] &= ~Gi_Second_Gate_Select_Mask;
+	counter_dev->regs[second_gate_reg] |=
+		Gi_Second_Gate_Select_Bits(ni_m_series_second_gate_select);
+	write_register(counter, counter_dev->regs[second_gate_reg],
+		second_gate_reg);
+	return 0;
+}
+
+static int ni_tio_set_gate_src(struct ni_gpct *counter,
+			       unsigned int gate_index, lsampl_t gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+
+	switch (gate_index) {
+	case 0:
+		if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) {
+			ni_tio_set_bits(counter,
+				NITIO_Gi_Mode_Reg(counter->counter_index),
+				Gi_Gating_Mode_Mask, Gi_Gating_Disabled_Bits);
+			return 0;
+		}
+		ni_tio_set_first_gate_modifiers(counter, gate_source);
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_e_series:
+		case ni_gpct_variant_m_series:
+			return ni_m_series_set_first_gate(counter, gate_source);
+			break;
+		case ni_gpct_variant_660x:
+			return ni_660x_set_first_gate(counter, gate_source);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		break;
+	case 1:
+		if (ni_tio_second_gate_registers_present(counter_dev) == 0)
+			return -EINVAL;
+		if (CR_CHAN(gate_source) == NI_GPCT_DISABLED_GATE_SELECT) {
+			counter_dev->regs[second_gate_reg] &=
+				~Gi_Second_Gate_Mode_Bit;
+			write_register(counter,
+				counter_dev->regs[second_gate_reg],
+				second_gate_reg);
+			return 0;
+		}
+		if (gate_source & CR_INVERT) {
+			counter_dev->regs[second_gate_reg] |=
+				Gi_Second_Gate_Polarity_Bit;
+		} else {
+			counter_dev->regs[second_gate_reg] &=
+				~Gi_Second_Gate_Polarity_Bit;
+		}
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_m_series:
+			return ni_m_series_set_second_gate(counter,
+				gate_source);
+			break;
+		case ni_gpct_variant_660x:
+			return ni_660x_set_second_gate(counter, gate_source);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+static int ni_tio_set_other_src(struct ni_gpct *counter,
+				unsigned int index, unsigned int source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+
+	if (counter_dev->variant == ni_gpct_variant_m_series) {
+		unsigned int abz_reg, shift, mask;
+
+		abz_reg = NITIO_Gi_ABZ_Reg(counter->counter_index);
+		switch (index) {
+		case NI_GPCT_SOURCE_ENCODER_A:
+			shift = 10;
+			break;
+		case NI_GPCT_SOURCE_ENCODER_B:
+			shift = 5;
+			break;
+		case NI_GPCT_SOURCE_ENCODER_Z:
+			shift = 0;
+			break;
+		default:
+			return -EINVAL;
+			break;
+		}
+		mask = 0x1f << shift;
+		if (source > 0x1f) {
+			/* Disable gate */
+			source = 0x1f;
+		}
+		counter_dev->regs[abz_reg] &= ~mask;
+		counter_dev->regs[abz_reg] |= (source << shift) & mask;
+		write_register(counter, counter_dev->regs[abz_reg], abz_reg);
+		return 0;
+	}
+	return -EINVAL;
+}
+
+static unsigned int ni_660x_first_gate_to_generic_gate_source(unsigned int ni_660x_gate_select)
+{
+	unsigned int i;
+
+	switch (ni_660x_gate_select) {
+	case NI_660x_Source_Pin_i_Gate_Select:
+		return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Gate_Pin_i_Gate_Select:
+		return NI_GPCT_GATE_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Next_SRC_Gate_Select:
+		return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+		break;
+	case NI_660x_Next_Out_Gate_Select:
+		return NI_GPCT_NEXT_OUT_GATE_SELECT;
+		break;
+	case NI_660x_Logic_Low_Gate_Select:
+		return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (ni_660x_gate_select == NI_660x_RTSI_Gate_Select(i)) {
+				return NI_GPCT_RTSI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_gate_pin; ++i) {
+			if (ni_660x_gate_select ==
+				NI_660x_Gate_Pin_Gate_Select(i)) {
+				return NI_GPCT_GATE_PIN_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_gate_pin)
+			break;
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static unsigned int ni_m_series_first_gate_to_generic_gate_source(unsigned int
+	ni_m_series_gate_select)
+{
+	unsigned int i;
+
+	switch (ni_m_series_gate_select) {
+	case NI_M_Series_Timestamp_Mux_Gate_Select:
+		return NI_GPCT_TIMESTAMP_MUX_GATE_SELECT;
+		break;
+	case NI_M_Series_AI_START2_Gate_Select:
+		return NI_GPCT_AI_START2_GATE_SELECT;
+		break;
+	case NI_M_Series_PXI_Star_Trigger_Gate_Select:
+		return NI_GPCT_PXI_STAR_TRIGGER_GATE_SELECT;
+		break;
+	case NI_M_Series_Next_Out_Gate_Select:
+		return NI_GPCT_NEXT_OUT_GATE_SELECT;
+		break;
+	case NI_M_Series_AI_START1_Gate_Select:
+		return NI_GPCT_AI_START1_GATE_SELECT;
+		break;
+	case NI_M_Series_Next_SRC_Gate_Select:
+		return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+		break;
+	case NI_M_Series_Analog_Trigger_Out_Gate_Select:
+		return NI_GPCT_ANALOG_TRIGGER_OUT_GATE_SELECT;
+		break;
+	case NI_M_Series_Logic_Low_Gate_Select:
+		return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+		break;
+	default:
+		for (i = 0; i <= ni_m_series_max_rtsi_channel; ++i) {
+			if (ni_m_series_gate_select ==
+				NI_M_Series_RTSI_Gate_Select(i)) {
+				return NI_GPCT_RTSI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_m_series_max_pfi_channel; ++i) {
+			if (ni_m_series_gate_select ==
+				NI_M_Series_PFI_Gate_Select(i)) {
+				return NI_GPCT_PFI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_m_series_max_pfi_channel)
+			break;
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static unsigned int ni_660x_second_gate_to_generic_gate_source(unsigned int
+	ni_660x_gate_select)
+{
+	unsigned int i;
+
+	switch (ni_660x_gate_select) {
+	case NI_660x_Source_Pin_i_Second_Gate_Select:
+		return NI_GPCT_SOURCE_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Up_Down_Pin_i_Second_Gate_Select:
+		return NI_GPCT_UP_DOWN_PIN_i_GATE_SELECT;
+		break;
+	case NI_660x_Next_SRC_Second_Gate_Select:
+		return NI_GPCT_NEXT_SOURCE_GATE_SELECT;
+		break;
+	case NI_660x_Next_Out_Second_Gate_Select:
+		return NI_GPCT_NEXT_OUT_GATE_SELECT;
+		break;
+	case NI_660x_Selected_Gate_Second_Gate_Select:
+		return NI_GPCT_SELECTED_GATE_GATE_SELECT;
+		break;
+	case NI_660x_Logic_Low_Second_Gate_Select:
+		return NI_GPCT_LOGIC_LOW_GATE_SELECT;
+		break;
+	default:
+		for (i = 0; i <= ni_660x_max_rtsi_channel; ++i) {
+			if (ni_660x_gate_select ==
+				NI_660x_RTSI_Second_Gate_Select(i)) {
+				return NI_GPCT_RTSI_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_rtsi_channel)
+			break;
+		for (i = 0; i <= ni_660x_max_up_down_pin; ++i) {
+			if (ni_660x_gate_select ==
+				NI_660x_Up_Down_Pin_Second_Gate_Select(i)) {
+				return NI_GPCT_UP_DOWN_PIN_GATE_SELECT(i);
+				break;
+			}
+		}
+		if (i <= ni_660x_max_up_down_pin)
+			break;
+		BUG();
+		break;
+	}
+	return 0;
+}
+
+static unsigned int ni_m_series_second_gate_to_generic_gate_source(unsigned int
+	ni_m_series_gate_select)
+{
+	/* FIXME: the second gate sources for the m series are
+	   undocumented, so we just return the raw bits for now. */
+	switch (ni_m_series_gate_select) {
+	default:
+		return ni_m_series_gate_select;
+		break;
+	}
+	return 0;
+};
+
+static int ni_tio_get_gate_src(struct ni_gpct *counter,
+			       unsigned int gate_index,
+			       unsigned int * gate_source)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int mode_bits = ni_tio_get_soft_copy(counter,
+		NITIO_Gi_Mode_Reg(counter->counter_index));
+	const unsigned int second_gate_reg =
+		NITIO_Gi_Second_Gate_Reg(counter->counter_index);
+	unsigned int gate_select_bits;
+
+	switch (gate_index) {
+	case 0:
+		if ((mode_bits & Gi_Gating_Mode_Mask) ==
+			Gi_Gating_Disabled_Bits) {
+			*gate_source = NI_GPCT_DISABLED_GATE_SELECT;
+			return 0;
+		} else {
+			gate_select_bits =
+				(ni_tio_get_soft_copy(counter,
+					NITIO_Gi_Input_Select_Reg(counter->
+						counter_index)) &
+				Gi_Gate_Select_Mask) >> Gi_Gate_Select_Shift;
+		}
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_e_series:
+		case ni_gpct_variant_m_series:
+			*gate_source =
+				ni_m_series_first_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		case ni_gpct_variant_660x:
+			*gate_source =
+				ni_660x_first_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		if (mode_bits & Gi_Gate_Polarity_Bit) {
+			*gate_source |= CR_INVERT;
+		}
+		if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) {
+			*gate_source |= CR_EDGE;
+		}
+		break;
+	case 1:
+		if ((mode_bits & Gi_Gating_Mode_Mask) == Gi_Gating_Disabled_Bits
+			|| (counter_dev->
+				regs[second_gate_reg] & Gi_Second_Gate_Mode_Bit)
+			== 0) {
+			*gate_source = NI_GPCT_DISABLED_GATE_SELECT;
+			return 0;
+		} else {
+			gate_select_bits =
+				(counter_dev->
+				regs[second_gate_reg] &
+				Gi_Second_Gate_Select_Mask) >>
+				Gi_Second_Gate_Select_Shift;
+		}
+		switch (counter_dev->variant) {
+		case ni_gpct_variant_e_series:
+		case ni_gpct_variant_m_series:
+			*gate_source =
+				ni_m_series_second_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		case ni_gpct_variant_660x:
+			*gate_source =
+				ni_660x_second_gate_to_generic_gate_source
+				(gate_select_bits);
+			break;
+		default:
+			BUG();
+			break;
+		}
+		if (counter_dev->
+			regs[second_gate_reg] & Gi_Second_Gate_Polarity_Bit) {
+			*gate_source |= CR_INVERT;
+		}
+		/* Second gate can't have edge/level mode set independently */
+		if ((mode_bits & Gi_Gating_Mode_Mask) != Gi_Level_Gating_Bits) {
+			*gate_source |= CR_EDGE;
+		}
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+	return 0;
+}
+
+int a4l_ni_tio_insn_config(struct ni_gpct *counter, struct a4l_kernel_instruction *insn)
+{
+	unsigned int *data = (unsigned int *)insn->data;
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_SET_COUNTER_MODE:
+		return ni_tio_set_counter_mode(counter, data[1]);
+		break;
+	case A4L_INSN_CONFIG_ARM:
+		return ni_tio_arm(counter, 1, data[1]);
+		break;
+	case A4L_INSN_CONFIG_DISARM:
+		ni_tio_arm(counter, 0, 0);
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_GET_COUNTER_STATUS:
+		data[1] = ni_tio_counter_status(counter);
+		data[2] = counter_status_mask;
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_SET_CLOCK_SRC:
+		return ni_tio_set_clock_src(counter, data[1], data[2]);
+		break;
+	case A4L_INSN_CONFIG_GET_CLOCK_SRC:
+		ni_tio_get_clock_src(counter, &data[1], &data[2]);
+		return 0;
+		break;
+	case A4L_INSN_CONFIG_SET_GATE_SRC:
+		return ni_tio_set_gate_src(counter, data[1], data[2]);
+		break;
+	case A4L_INSN_CONFIG_GET_GATE_SRC:
+		return ni_tio_get_gate_src(counter, data[1], &data[2]);
+		break;
+	case A4L_INSN_CONFIG_SET_OTHER_SRC:
+		return ni_tio_set_other_src(counter, data[1], data[2]);
+		break;
+	case A4L_INSN_CONFIG_RESET:
+		ni_tio_reset_count_and_disarm(counter);
+		return 0;
+		break;
+	default:
+		break;
+	}
+	return -EINVAL;
+}
+
+int a4l_ni_tio_rinsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int channel = CR_CHAN(insn->chan_desc);
+	unsigned int first_read;
+	unsigned int second_read;
+	unsigned int correct_read;
+
+	uint32_t *data = (uint32_t *)insn->data;
+
+	if (insn->data_size != sizeof(uint32_t))
+		return -EINVAL;
+
+	switch (channel) {
+	case 0:
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Command_Reg(counter->counter_index),
+			Gi_Save_Trace_Bit, 0);
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Command_Reg(counter->counter_index),
+			Gi_Save_Trace_Bit, Gi_Save_Trace_Bit);
+		/* The count doesn't get latched until the next clock
+		   edge, so it is possible the count may change (once)
+		   while we are reading.  Since the read of the
+		   SW_Save_Reg isn't atomic (apparently even when it's a
+		   32 bit register according to 660x docs), we need to
+		   read twice and make sure the reading hasn't changed.
+		   If it has, a third read will be correct since the
+		   count value will definitely have latched by then. */
+		first_read =
+			read_register(counter,
+			NITIO_Gi_SW_Save_Reg(counter->counter_index));
+		second_read =
+			read_register(counter,
+			NITIO_Gi_SW_Save_Reg(counter->counter_index));
+		if (first_read != second_read)
+			correct_read =
+				read_register(counter,
+				NITIO_Gi_SW_Save_Reg(counter->counter_index));
+		else
+			correct_read = first_read;
+		data[0] = correct_read;
+		return 0;
+		break;
+	case 1:
+		data[0] = counter_dev->regs
+			[NITIO_Gi_LoadA_Reg(counter->counter_index)];
+		break;
+	case 2:
+		data[0] = counter_dev->regs
+			[NITIO_Gi_LoadB_Reg(counter->counter_index)];
+		break;
+	};
+
+	return 0;
+}
+
+static unsigned int ni_tio_next_load_register(struct ni_gpct *counter)
+{
+	const unsigned int bits = read_register(counter,
+		NITIO_Gxx_Status_Reg(counter->counter_index));
+
+	if (bits & Gi_Next_Load_Source_Bit(counter->counter_index)) {
+		return NITIO_Gi_LoadB_Reg(counter->counter_index);
+	} else {
+		return NITIO_Gi_LoadA_Reg(counter->counter_index);
+	}
+}
+
+int a4l_ni_tio_winsn(struct ni_gpct *counter, struct a4l_kernel_instruction *insn)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	const unsigned int channel = CR_CHAN(insn->chan_desc);
+	unsigned int load_reg;
+
+	uint32_t *data = (uint32_t *)insn->data;
+
+	if (insn->data_size != sizeof(uint32_t))
+		return -EINVAL;
+
+	switch (channel) {
+	case 0:
+		/* Unsafe if counter is armed.  Should probably check
+		   status and return -EBUSY if armed. */
+		/* Don't disturb load source select, just use
+		   whichever load register is already selected. */
+		load_reg = ni_tio_next_load_register(counter);
+		write_register(counter, data[0], load_reg);
+		ni_tio_set_bits_transient(counter,
+			NITIO_Gi_Command_Reg(counter->counter_index), 0, 0,
+			Gi_Load_Bit);
+		/* Restore state of load reg to whatever the user set
+		   last set it to */
+		write_register(counter, counter_dev->regs[load_reg], load_reg);
+		break;
+	case 1:
+		counter_dev->regs[NITIO_Gi_LoadA_Reg(counter->counter_index)] =
+			data[0];
+		write_register(counter, data[0],
+			NITIO_Gi_LoadA_Reg(counter->counter_index));
+		break;
+	case 2:
+		counter_dev->regs[NITIO_Gi_LoadB_Reg(counter->counter_index)] =
+			data[0];
+		write_register(counter, data[0],
+			NITIO_Gi_LoadB_Reg(counter->counter_index));
+		break;
+	default:
+		return -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+static void ni_tio_configure_dma(struct ni_gpct *counter,
+				 short enable, short read_not_write)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	unsigned int input_select_bits = 0;
+
+	if (enable) {
+		if (read_not_write) {
+			input_select_bits |= Gi_Read_Acknowledges_Irq;
+		} else {
+			input_select_bits |= Gi_Write_Acknowledges_Irq;
+		}
+	}
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Input_Select_Reg(counter->counter_index),
+		Gi_Read_Acknowledges_Irq | Gi_Write_Acknowledges_Irq,
+		input_select_bits);
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_e_series:
+		break;
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		{
+			unsigned gi_dma_config_bits = 0;
+
+			if (enable) {
+				gi_dma_config_bits |= Gi_DMA_Enable_Bit;
+				gi_dma_config_bits |= Gi_DMA_Int_Bit;
+			}
+			if (read_not_write == 0) {
+				gi_dma_config_bits |= Gi_DMA_Write_Bit;
+			}
+			ni_tio_set_bits(counter,
+				NITIO_Gi_DMA_Config_Reg(counter->counter_index),
+				Gi_DMA_Enable_Bit | Gi_DMA_Int_Bit |
+				Gi_DMA_Write_Bit, gi_dma_config_bits);
+		}
+		break;
+	}
+}
+
+/* TODO: a4l_ni_tio_input_inttrig is left unused because the trigger
+   callback cannot be changed at run time */
+int a4l_ni_tio_input_inttrig(struct ni_gpct *counter, lsampl_t trignum)
+{
+	unsigned long flags;
+	int retval = 0;
+
+	BUG_ON(counter == NULL);
+	if (trignum != 0)
+		return -EINVAL;
+
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan)
+		a4l_mite_dma_arm(counter->mite_chan);
+	else
+		retval = -EIO;
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+	if (retval < 0)
+		return retval;
+	retval = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+
+	/* TODO: disable trigger until a command is recorded.
+	   Null trig at beginning prevent ao start trigger from executing
+	   more than once per command (and doing things like trying to
+	   allocate the ao dma channel multiple times) */
+
+	return retval;
+}
+
+static int ni_tio_input_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	struct ni_gpct_device *counter_dev = counter->counter_dev;
+	int retval = 0;
+
+	counter->mite_chan->dir = A4L_INPUT;
+	switch (counter_dev->variant) {
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		a4l_mite_prep_dma(counter->mite_chan, 32, 32);
+		break;
+	case ni_gpct_variant_e_series:
+		a4l_mite_prep_dma(counter->mite_chan, 16, 32);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	ni_tio_set_bits(counter, NITIO_Gi_Command_Reg(counter->counter_index),
+		Gi_Save_Trace_Bit, 0);
+	ni_tio_configure_dma(counter, 1, 1);
+	switch (cmd->start_src) {
+	case TRIG_NOW:
+		a4l_mite_dma_arm(counter->mite_chan);
+		retval = ni_tio_arm(counter, 1, NI_GPCT_ARM_IMMEDIATE);
+		break;
+	case TRIG_INT:
+		break;
+	case TRIG_EXT:
+		a4l_mite_dma_arm(counter->mite_chan);
+		retval = ni_tio_arm(counter, 1, cmd->start_arg);
+		break;
+	case TRIG_OTHER:
+		a4l_mite_dma_arm(counter->mite_chan);
+		break;
+	default:
+		BUG();
+		break;
+	}
+	return retval;
+}
+
+static int ni_tio_output_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	__a4l_err("ni_tio: output commands not yet implemented.\n");
+	return -ENOTSUPP;
+}
+
+static int ni_tio_cmd_setup(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	int retval = 0, set_gate_source = 0;
+	unsigned int gate_source;
+
+	if (cmd->scan_begin_src == TRIG_EXT) {
+		set_gate_source = 1;
+		gate_source = cmd->scan_begin_arg;
+	} else if (cmd->convert_src == TRIG_EXT) {
+		set_gate_source = 1;
+		gate_source = cmd->convert_arg;
+	}
+	if (set_gate_source) {
+		retval = ni_tio_set_gate_src(counter, 0, gate_source);
+	}
+	if (cmd->flags & TRIG_WAKE_EOS) {
+		ni_tio_set_bits(counter,
+			NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
+			Gi_Gate_Interrupt_Enable_Bit(counter->counter_index),
+			Gi_Gate_Interrupt_Enable_Bit(counter->counter_index));
+	}
+	return retval;
+}
+
+int a4l_ni_tio_cmd(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	int retval = 0;
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan == NULL) {
+		__a4l_err("a4l_ni_tio_cmd: commands only supported with DMA."
+			     " Interrupt-driven commands not yet implemented.\n");
+		retval = -EIO;
+	} else {
+		retval = ni_tio_cmd_setup(counter, cmd);
+		if (retval == 0) {
+			if (cmd->flags & A4L_CMD_WRITE) {
+				retval = ni_tio_output_cmd(counter, cmd);
+			} else {
+				retval = ni_tio_input_cmd(counter, cmd);
+			}
+		}
+	}
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+	return retval;
+}
+
+struct a4l_cmd_desc a4l_ni_tio_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT | TRIG_OTHER | TRIG_EXT,
+	.scan_begin_src = TRIG_FOLLOW | TRIG_EXT | TRIG_OTHER,
+	.convert_src = TRIG_NOW | TRIG_EXT | TRIG_OTHER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_NONE,
+};
+
+int a4l_ni_tio_cmdtest(struct ni_gpct *counter, struct a4l_cmd_desc *cmd)
+{
+	/* Make sure trigger sources are trivially valid */
+
+	if ((cmd->start_src & TRIG_EXT) != 0 &&
+	    ni_tio_counting_mode_registers_present(counter->counter_dev) == 0)
+		return -EINVAL;
+
+	/* Make sure trigger sources are mutually compatible */
+
+	if (cmd->convert_src != TRIG_NOW && cmd->scan_begin_src != TRIG_FOLLOW)
+		return -EINVAL;
+
+	/* Make sure arguments are trivially compatible */
+
+	if (cmd->start_src != TRIG_EXT) {
+		if (cmd->start_arg != 0) {
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->scan_begin_src != TRIG_EXT) {
+		if (cmd->scan_begin_arg) {
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->convert_src != TRIG_EXT) {
+		if (cmd->convert_arg) {
+			return -EINVAL;
+		}
+	}
+
+	if (cmd->scan_end_arg != cmd->nb_chan) {
+		return -EINVAL;
+	}
+
+	if (cmd->stop_src == TRIG_NONE) {
+		if (cmd->stop_arg != 0) {
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+int a4l_ni_tio_cancel(struct ni_gpct *counter)
+{
+	unsigned long flags;
+
+	ni_tio_arm(counter, 0, 0);
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan) {
+		a4l_mite_dma_disarm(counter->mite_chan);
+	}
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+	ni_tio_configure_dma(counter, 0, 0);
+
+	ni_tio_set_bits(counter,
+		NITIO_Gi_Interrupt_Enable_Reg(counter->counter_index),
+		Gi_Gate_Interrupt_Enable_Bit(counter->counter_index), 0x0);
+	return 0;
+}
+
+/*  During buffered input counter operation for e-series, the gate
+   interrupt is acked automatically by the dma controller, due to the
+   Gi_Read/Write_Acknowledges_IRQ bits in the input select
+   register. */
+static int should_ack_gate(struct ni_gpct *counter)
+{
+	unsigned long flags;
+	int retval = 0;
+
+	switch (counter->counter_dev->variant) {
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		/* Not sure if 660x really supports gate interrupts
+		   (the bits are not listed in register-level manual) */
+		return 1;
+		break;
+	case ni_gpct_variant_e_series:
+		rtdm_lock_get_irqsave(&counter->lock, flags);
+		{
+			if (counter->mite_chan == NULL ||
+				counter->mite_chan->dir != A4L_INPUT ||
+				(a4l_mite_done(counter->mite_chan))) {
+				retval = 1;
+			}
+		}
+		rtdm_lock_put_irqrestore(&counter->lock, flags);
+		break;
+	}
+	return retval;
+}
+
+void a4l_ni_tio_acknowledge_and_confirm(struct ni_gpct *counter,
+				    int *gate_error,
+				    int *tc_error,
+				    int *perm_stale_data, int *stale_data)
+{
+	const unsigned short gxx_status = read_register(counter,
+		NITIO_Gxx_Status_Reg(counter->counter_index));
+	const unsigned short gi_status = read_register(counter,
+		NITIO_Gi_Status_Reg(counter->counter_index));
+	unsigned ack = 0;
+
+	if (gate_error)
+		*gate_error = 0;
+	if (tc_error)
+		*tc_error = 0;
+	if (perm_stale_data)
+		*perm_stale_data = 0;
+	if (stale_data)
+		*stale_data = 0;
+
+	if (gxx_status & Gi_Gate_Error_Bit(counter->counter_index)) {
+		ack |= Gi_Gate_Error_Confirm_Bit(counter->counter_index);
+		if (gate_error) {
+			/* 660x don't support automatic
+			   acknowledgement of gate interrupt via dma
+			   read/write and report bogus gate errors */
+			if (counter->counter_dev->variant !=
+				ni_gpct_variant_660x) {
+				*gate_error = 1;
+			}
+		}
+	}
+	if (gxx_status & Gi_TC_Error_Bit(counter->counter_index)) {
+		ack |= Gi_TC_Error_Confirm_Bit(counter->counter_index);
+		if (tc_error)
+			*tc_error = 1;
+	}
+	if (gi_status & Gi_TC_Bit) {
+		ack |= Gi_TC_Interrupt_Ack_Bit;
+	}
+	if (gi_status & Gi_Gate_Interrupt_Bit) {
+		if (should_ack_gate(counter))
+			ack |= Gi_Gate_Interrupt_Ack_Bit;
+	}
+	if (ack)
+		write_register(counter, ack,
+			NITIO_Gi_Interrupt_Acknowledge_Reg(counter->
+				counter_index));
+	if (ni_tio_get_soft_copy(counter,
+			NITIO_Gi_Mode_Reg(counter->
+				counter_index)) & Gi_Loading_On_Gate_Bit) {
+		if (gxx_status & Gi_Stale_Data_Bit(counter->counter_index)) {
+			if (stale_data)
+				*stale_data = 1;
+		}
+		if (read_register(counter,
+				NITIO_Gxx_Joint_Status2_Reg(counter->
+					counter_index)) &
+			Gi_Permanent_Stale_Bit(counter->counter_index)) {
+			__a4l_err("%s: Gi_Permanent_Stale_Data detected.\n",
+				    __FUNCTION__);
+			if (perm_stale_data)
+				*perm_stale_data = 1;
+		}
+	}
+}
+
+/* TODO: to be adapted after a4l_buf_evt review */
+void a4l_ni_tio_handle_interrupt(struct ni_gpct *counter, struct a4l_device *dev)
+{
+	unsigned gpct_mite_status;
+	unsigned long flags;
+	int gate_error;
+	int tc_error;
+	int perm_stale_data;
+	struct a4l_subdevice *subd =
+		a4l_get_subd(dev, NI_GPCT_SUBDEV(counter->counter_index));
+
+	a4l_ni_tio_acknowledge_and_confirm(counter, &gate_error, &tc_error,
+		&perm_stale_data, NULL);
+	if (gate_error) {
+		__a4l_err("%s: Gi_Gate_Error detected.\n", __FUNCTION__);
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+	if (perm_stale_data) {
+		a4l_buf_evt(subd, A4L_BUF_ERROR);
+	}
+	switch (counter->counter_dev->variant) {
+	case ni_gpct_variant_m_series:
+	case ni_gpct_variant_660x:
+		if (read_register(counter,
+				  NITIO_Gi_DMA_Status_Reg(counter->counter_index))
+		    & Gi_DRQ_Error_Bit) {
+			__a4l_err("%s: Gi_DRQ_Error detected.\n", __FUNCTION__);
+			a4l_buf_evt(subd, A4L_BUF_ERROR);
+		}
+		break;
+	case ni_gpct_variant_e_series:
+		break;
+	}
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	if (counter->mite_chan == NULL) {
+		rtdm_lock_put_irqrestore(&counter->lock, flags);
+		return;
+	}
+	gpct_mite_status = a4l_mite_get_status(counter->mite_chan);
+	if (gpct_mite_status & CHSR_LINKC) {
+		writel(CHOR_CLRLC,
+			counter->mite_chan->mite->mite_io_addr +
+			MITE_CHOR(counter->mite_chan->channel));
+	}
+	a4l_mite_sync_input_dma(counter->mite_chan, subd);
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+}
+
+void a4l_ni_tio_set_mite_channel(struct ni_gpct *counter,
+			     struct mite_channel *mite_chan)
+{
+	unsigned long flags;
+
+	rtdm_lock_get_irqsave(&counter->lock, flags);
+	counter->mite_chan = mite_chan;
+	rtdm_lock_put_irqrestore(&counter->lock, flags);
+}
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
+
+static int __init ni_tio_init_module(void)
+{
+	return 0;
+}
+
+static void __exit ni_tio_cleanup_module(void)
+{
+}
+
+MODULE_DESCRIPTION("Analogy support for NI general-purpose counters");
+MODULE_LICENSE("GPL");
+
+module_init(ni_tio_init_module);
+module_exit(ni_tio_cleanup_module);
+
+EXPORT_SYMBOL_GPL(a4l_ni_tio_rinsn);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_winsn);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_insn_config);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_init_counter);
+EXPORT_SYMBOL_GPL(a4l_ni_gpct_device_construct);
+EXPORT_SYMBOL_GPL(a4l_ni_gpct_device_destroy);
+
+#if (defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE) || \
+     defined(CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE_MODULE))
+
+EXPORT_SYMBOL_GPL(a4l_ni_tio_input_inttrig);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cmd);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cmd_mask);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cmdtest);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_cancel);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_handle_interrupt);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_set_mite_channel);
+EXPORT_SYMBOL_GPL(a4l_ni_tio_acknowledge_and_confirm);
+
+#endif /* CONFIG_XENO_DRIVERS_ANALOGY_NI_MITE */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h
new file mode 100644
index 0000000..a10e07a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/proc.h
@@ -0,0 +1,33 @@
+/*
+ * Analogy for Linux, procfs related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ANALOGY_PROC_H__
+#define __ANALOGY_PROC_H__
+
+#ifdef __KERNEL__
+
+#ifdef CONFIG_PROC_FS
+extern struct proc_dir_entry *a4l_proc_root;
+#endif /* CONFIG_PROC_FS */
+
+#endif /* __KERNEL__ */
+
+#endif /* __ANALOGY_PROC_H__ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c
new file mode 100644
index 0000000..6755941
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_helpers.c
@@ -0,0 +1,214 @@
+/*
+ * Analogy for Linux, RTDM helpers
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/version.h>
+#include <linux/fs.h>
+#include <asm/atomic.h>
+
+#include <rtdm/analogy/rtdm_helpers.h>
+
+/* --- Time section --- */
+
+static nanosecs_abs_t a4l_clkofs;
+
+void a4l_init_time(void)
+{
+	nanosecs_abs_t t1, t2;
+	t1 = rtdm_clock_read();
+	t2 = ktime_to_ns(ktime_get_real());
+	a4l_clkofs = t2 - t1;
+}
+
+nanosecs_abs_t a4l_get_time(void)
+{
+	return a4l_clkofs + rtdm_clock_read();
+}
+
+/* --- IRQ section --- */
+
+static int a4l_handle_irq(rtdm_irq_t *irq_handle)
+{
+	struct a4l_irq_descriptor *dsc =
+		rtdm_irq_get_arg(irq_handle, struct a4l_irq_descriptor);
+
+	if (dsc->handler((unsigned int)irq_handle->irq, dsc->cookie) == 0)
+		return RTDM_IRQ_HANDLED;
+	else
+		return RTDM_IRQ_NONE;
+}
+
+int __a4l_request_irq(struct a4l_irq_descriptor *dsc,
+		      unsigned int irq,
+		      a4l_irq_hdlr_t handler,
+		      unsigned long flags, void *cookie)
+{
+	/* Fills the IRQ descriptor */
+	dsc->handler = handler;
+	dsc->cookie = cookie;
+	dsc->irq = irq;
+
+	/* Registers the RT IRQ handler */
+	return rtdm_irq_request(&dsc->rtdm_desc,
+				(int)irq,
+				a4l_handle_irq, flags, "Analogy device", dsc);
+}
+
+int __a4l_free_irq(struct a4l_irq_descriptor * dsc)
+{
+	return rtdm_irq_free(&dsc->rtdm_desc);
+}
+
+/* --- Synchronization section --- */
+
+static void a4l_nrt_sync_handler(rtdm_nrtsig_t *nrt_sig, void *arg)
+{
+	struct a4l_sync *snc = (struct a4l_sync *) arg;
+	wake_up_interruptible(&snc->wq);
+}
+
+int a4l_init_sync(struct a4l_sync *snc)
+{
+	int ret = 0;
+
+	/* Initializes the flags field */
+	snc->status = 0;
+
+	/* If the process is NRT, we need a wait queue structure */
+	init_waitqueue_head(&snc->wq);
+
+	/* Initializes the RTDM event */
+	rtdm_event_init(&snc->rtdm_evt, 0);
+
+	/* Initializes the gateway to NRT context */
+	rtdm_nrtsig_init(&snc->nrt_sig, a4l_nrt_sync_handler, snc);
+
+	return ret;
+}
+
+void a4l_cleanup_sync(struct a4l_sync *snc)
+{
+	rtdm_nrtsig_destroy(&snc->nrt_sig);
+	rtdm_event_destroy(&snc->rtdm_evt);
+}
+
+int a4l_wait_sync(struct a4l_sync *snc, int rt)
+{
+	int ret = 0;
+
+	if (test_bit(__EVT_PDING, &snc->status))
+		goto out_wait;
+
+	if (rt != 0) {
+		/* If the calling process is in primary mode,
+		   we can use RTDM API ... */
+		set_bit(__RT_WAITER, &snc->status);
+		ret = rtdm_event_wait(&snc->rtdm_evt);
+	} else {
+		/* ... else if the process is NRT,
+		   the Linux wait queue system is used */
+		set_bit(__NRT_WAITER, &snc->status);
+		ret = wait_event_interruptible(snc->wq,
+					       test_bit(__EVT_PDING,
+							&snc->status));
+	}
+
+out_wait:
+
+	clear_bit(__EVT_PDING, &snc->status);
+
+	return ret;
+}
+
+int a4l_timedwait_sync(struct a4l_sync * snc,
+		       int rt, unsigned long long ns_timeout)
+{
+	int ret = 0;
+	unsigned long timeout;
+
+	if (test_bit(__EVT_PDING, &snc->status))
+		goto out_wait;
+
+	if (rt != 0) {
+		/* If the calling process is in primary mode,
+		   we can use RTDM API ... */
+		set_bit(__RT_WAITER, &snc->status);
+		ret = rtdm_event_timedwait(&snc->rtdm_evt, ns_timeout, NULL);
+	} else {
+		/* ... else if the process is NRT,
+		   the Linux wait queue system is used */
+
+		timeout = do_div(ns_timeout, 1000);
+
+		/* We consider the Linux kernel cannot tick at a frequency
+		   higher than 1 MHz
+		   If the timeout value is lower than 1us, we round up to 1us */
+		timeout = (timeout == 0) ? 1 : usecs_to_jiffies(timeout);
+
+		set_bit(__NRT_WAITER, &snc->status);
+
+		ret = wait_event_interruptible_timeout(snc->wq,
+						       test_bit(__EVT_PDING,
+								&snc->status),
+						       timeout);
+	}
+
+out_wait:
+
+	clear_bit(__EVT_PDING, &snc->status);
+
+	return ret;
+}
+
+void a4l_flush_sync(struct a4l_sync * snc)
+{
+	/* Clear the status bitfield */
+	snc->status = 0;
+
+	/* Flush the RTDM event */
+	rtdm_event_clear(&snc->rtdm_evt);
+}
+
+void a4l_signal_sync(struct a4l_sync * snc)
+{
+	int hit = 0;
+
+	set_bit(__EVT_PDING, &snc->status);
+
+	/* a4l_signal_sync() is bound not to be called upon the right
+	   user process context; so, the status flags stores its mode.
+	   Thus the proper event signaling function is called */
+	if (test_and_clear_bit(__RT_WAITER, &snc->status)) {
+		rtdm_event_signal(&snc->rtdm_evt);
+		hit++;
+	}
+
+	if (test_and_clear_bit(__NRT_WAITER, &snc->status)) {
+		rtdm_nrtsig_pend(&snc->nrt_sig);
+		hit++;
+	}
+
+	if (hit == 0) {
+		/* At first signaling, we may not know the proper way
+		   to send the event */
+		rtdm_event_signal(&snc->rtdm_evt);
+		rtdm_nrtsig_pend(&snc->nrt_sig);
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c
new file mode 100644
index 0000000..765c176
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/rtdm_interface.c
@@ -0,0 +1,308 @@
+/*
+ * Analogy for Linux, user interface (open, read, write, ioctl, proc)
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/fs.h>
+#include <linux/proc_fs.h>
+#include <linux/uaccess.h>
+#include <rtdm/driver.h>
+#include <rtdm/analogy/device.h>
+
+MODULE_AUTHOR("Alexis Berlemont");
+MODULE_DESCRIPTION("Analogy core driver");
+MODULE_LICENSE("GPL");
+
+int (* const a4l_ioctl_functions[]) (struct a4l_device_context *, void *) = {
+	[_IOC_NR(A4L_DEVCFG)] = a4l_ioctl_devcfg,
+	[_IOC_NR(A4L_DEVINFO)] = a4l_ioctl_devinfo,
+	[_IOC_NR(A4L_SUBDINFO)] = a4l_ioctl_subdinfo,
+	[_IOC_NR(A4L_CHANINFO)] = a4l_ioctl_chaninfo,
+	[_IOC_NR(A4L_RNGINFO)] = a4l_ioctl_rnginfo,
+	[_IOC_NR(A4L_CMD)] = a4l_ioctl_cmd,
+	[_IOC_NR(A4L_CANCEL)] = a4l_ioctl_cancel,
+	[_IOC_NR(A4L_INSNLIST)] = a4l_ioctl_insnlist,
+	[_IOC_NR(A4L_INSN)] = a4l_ioctl_insn,
+	[_IOC_NR(A4L_BUFCFG)] = a4l_ioctl_bufcfg,
+	[_IOC_NR(A4L_BUFINFO)] = a4l_ioctl_bufinfo,
+	[_IOC_NR(A4L_POLL)] = a4l_ioctl_poll,
+	[_IOC_NR(A4L_MMAP)] = a4l_ioctl_mmap,
+	[_IOC_NR(A4L_NBCHANINFO)] = a4l_ioctl_nbchaninfo,
+	[_IOC_NR(A4L_NBRNGINFO)] = a4l_ioctl_nbrnginfo,
+	[_IOC_NR(A4L_BUFCFG2)] = a4l_ioctl_bufcfg2,
+	[_IOC_NR(A4L_BUFINFO2)] = a4l_ioctl_bufinfo2
+};
+
+#ifdef CONFIG_PROC_FS
+struct proc_dir_entry *a4l_proc_root;
+
+static int a4l_proc_devs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, a4l_rdproc_devs, NULL);
+}
+
+static const DEFINE_PROC_OPS(a4l_proc_devs_ops,
+			a4l_proc_devs_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int a4l_proc_drvs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, a4l_rdproc_drvs, NULL);
+}
+
+static const DEFINE_PROC_OPS(a4l_proc_drvs_ops,
+			a4l_proc_drvs_open,
+			single_release,
+			seq_read,
+			NULL);
+
+int a4l_init_proc(void)
+{
+	int ret = 0;
+	struct proc_dir_entry *entry;
+
+	/* Creates the global directory */
+	a4l_proc_root = proc_mkdir("analogy", NULL);
+	if (a4l_proc_root == NULL) {
+		__a4l_err("a4l_proc_init: "
+			  "failed to create /proc/analogy\n");
+		return -ENOMEM;
+	}
+
+	/* Creates the devices related file */
+	entry = proc_create("devices", 0444, a4l_proc_root,
+			    &a4l_proc_devs_ops);
+	if (entry == NULL) {
+		__a4l_err("a4l_proc_init: "
+			  "failed to create /proc/analogy/devices\n");
+		ret = -ENOMEM;
+		goto err_proc_init;
+	}
+
+	/* Creates the drivers related file */
+	entry = proc_create("drivers", 0444, a4l_proc_root,
+			    &a4l_proc_drvs_ops);
+	if (entry == NULL) {
+		__a4l_err("a4l_proc_init: "
+			  "failed to create /proc/analogy/drivers\n");
+		ret = -ENOMEM;
+		goto err_proc_init;
+	}
+
+	return 0;
+
+err_proc_init:
+	remove_proc_entry("devices", a4l_proc_root);
+	remove_proc_entry("analogy", NULL);
+	return ret;
+}
+
+void a4l_cleanup_proc(void)
+{
+	remove_proc_entry("drivers", a4l_proc_root);
+	remove_proc_entry("devices", a4l_proc_root);
+	remove_proc_entry("analogy", NULL);
+}
+
+#else /* !CONFIG_PROC_FS */
+
+#define a4l_init_proc() 0
+#define a4l_cleanup_proc()
+
+#endif /* CONFIG_PROC_FS */
+
+int a4l_open(struct rtdm_fd *fd, int flags)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	/* Get a pointer on the selected device (thanks to minor index) */
+	a4l_set_dev(cxt);
+
+	/* Initialize the buffer structure */
+	cxt->buffer = rtdm_malloc(sizeof(struct a4l_buffer));
+
+	a4l_init_buffer(cxt->buffer);
+	/* Allocate the asynchronous buffer
+	   NOTE: it should be interesting to allocate the buffer only
+	   on demand especially if the system is short of memory */
+	if (cxt->dev->transfer.default_bufsize)
+		a4l_alloc_buffer(cxt->buffer,
+				 cxt->dev->transfer.default_bufsize);
+
+	__a4l_dbg(1, core_dbg, "cxt=%p cxt->buf=%p, cxt->buf->buf=%p\n",
+		cxt, cxt->buffer, cxt->buffer->buf);
+
+	return 0;
+}
+
+void a4l_close(struct rtdm_fd *fd)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	/* Cancel the maybe occuring asynchronous transfer */
+	a4l_cancel_buffer(cxt);
+
+	/* Free the buffer which was linked with this context and... */
+	a4l_free_buffer(cxt->buffer);
+
+	/* ...free the other buffer resources (sync) and... */
+	a4l_cleanup_buffer(cxt->buffer);
+
+	/* ...free the structure */
+	rtdm_free(cxt->buffer);
+}
+
+ssize_t a4l_read(struct rtdm_fd *fd, void *buf, size_t nbytes)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	/* Jump into the RT domain if possible */
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	if (nbytes == 0)
+		return 0;
+
+	return a4l_read_buffer(cxt, buf, nbytes);
+}
+
+ssize_t a4l_write(struct rtdm_fd *fd, const void *buf, size_t nbytes)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	/* Jump into the RT domain if possible */
+	if (!rtdm_in_rt_context() && rtdm_rt_capable(fd))
+		return -ENOSYS;
+
+	if (nbytes == 0)
+		return 0;
+
+	return a4l_write_buffer(cxt, buf, nbytes);
+}
+
+int a4l_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	return a4l_ioctl_functions[_IOC_NR(request)] (cxt, arg);
+}
+
+int a4l_rt_select(struct rtdm_fd *fd,
+		  rtdm_selector_t *selector,
+		  enum rtdm_selecttype type, unsigned fd_index)
+{
+	struct a4l_device_context *cxt = (struct a4l_device_context *)rtdm_fd_to_private(fd);
+
+	return a4l_select(cxt, selector, type, fd_index);
+}
+
+static struct rtdm_driver analogy_driver = {
+	.profile_info =		RTDM_PROFILE_INFO(analogy,
+						  RTDM_CLASS_EXPERIMENTAL,
+						  RTDM_SUBCLASS_ANALOGY,
+						  0),
+	.device_flags =		RTDM_NAMED_DEVICE,
+	.device_count =		A4L_NB_DEVICES,
+	.context_size =		sizeof(struct a4l_device_context),
+	.ops = {
+		.open =		a4l_open,
+		.close =	a4l_close,
+		.ioctl_rt =	a4l_ioctl,
+		.read_rt =	a4l_read,
+		.write_rt =	a4l_write,
+		.ioctl_nrt =	a4l_ioctl,
+		.read_nrt =	a4l_read,
+		.write_nrt =	a4l_write,
+		.select =	a4l_rt_select,
+	},
+};
+
+static struct rtdm_device rtdm_devs[A4L_NB_DEVICES] = {
+	[0 ... A4L_NB_DEVICES - 1] = {
+		.driver = &analogy_driver,
+		.label = "analogy%d",
+	}
+};
+
+int a4l_register(void)
+{
+	int i, ret;
+
+	for (i = 0; i < A4L_NB_DEVICES; i++) {
+		ret = rtdm_dev_register(rtdm_devs + i);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	while (i-- > 0)
+		rtdm_dev_unregister(rtdm_devs + i);
+
+	return ret;
+}
+
+void a4l_unregister(void)
+{
+	int i;
+	for (i = 0; i < A4L_NB_DEVICES; i++)
+		rtdm_dev_unregister(&(rtdm_devs[i]));
+}
+
+static int __init a4l_init(void)
+{
+	int ret;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	/* Initializes the devices */
+	a4l_init_devs();
+
+	/* Initializes Analogy time management */
+	a4l_init_time();
+
+	/* Registers RTDM / fops interface */
+	ret = a4l_register();
+	if (ret != 0) {
+		a4l_unregister();
+		goto out_a4l_init;
+	}
+
+	/* Initializes Analogy proc layer */
+	ret = a4l_init_proc();
+
+out_a4l_init:
+	return ret;
+}
+
+static void __exit a4l_cleanup(void)
+{
+	/* Removes Analogy proc files */
+	a4l_cleanup_proc();
+
+	/* Unregisters RTDM / fops interface */
+	a4l_unregister();
+}
+
+module_init(a4l_init);
+module_exit(a4l_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig
new file mode 100644
index 0000000..ce5aa51
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Kconfig
@@ -0,0 +1,5 @@
+
+config XENO_DRIVERS_ANALOGY_S526
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "Sensoray Model 526 driver"
+	default n
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile
new file mode 100644
index 0000000..51bad4d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/Makefile
@@ -0,0 +1,6 @@
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_S526) += analogy_s526.o
+
+analogy_s526-y := s526.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c
new file mode 100644
index 0000000..8ecda7e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/sensoray/s526.c
@@ -0,0 +1,756 @@
+/*
+ * Analogy driver for Sensoray Model 526 board
+ *
+ * Copyright (C) 2009 Simon Boulay <simon.boulay@gmail.com>
+ *
+ * Derived from comedi:
+ * Copyright (C) 2000 David A. Schleef <ds@schleef.org>
+ *               2006 Everett Wang <everett.wang@everteq.com>
+ *               2009 Ian Abbott <abbotti@mev.co.uk>
+ *
+ * This code is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * This code is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+/*
+ * Original code comes from comedi linux-next staging driver (2009.12.20)
+ * Board documentation: http://www.sensoray.com/products/526data.htm
+ * Everything should work as in comedi:
+ *   - Encoder works
+ *   - Analog input works
+ *   - Analog output works
+ *   - PWM output works
+ *   - Commands are not supported yet.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/io.h>
+#include <asm/byteorder.h>
+#include <rtdm/analogy/device.h>
+
+/* Board description */
+#define S526_GPCT_CHANS	4
+#define S526_GPCT_BITS	24
+#define S526_AI_CHANS	10	/* 8 regular differential inputs
+				 * channel 8 is "reference 0" (+10V)
+				 * channel 9 is "reference 1" (0V) */
+#define S526_AI_BITS	16
+#define S526_AI_TIMEOUT 100
+#define S526_AO_CHANS	4
+#define S526_AO_BITS	16
+#define S526_DIO_CHANS	8
+#define S526_DIO_BITS	1
+
+/* Ports */
+#define S526_IOSIZE		0x40  /* 64 bytes */
+#define S526_DEFAULT_ADDRESS	0x2C0 /* Manufacturing default */
+
+/* Registers */
+#define REG_TCR 0x00
+#define REG_WDC 0x02
+#define REG_DAC 0x04
+#define REG_ADC 0x06
+#define REG_ADD 0x08
+#define REG_DIO 0x0A
+#define REG_IER 0x0C
+#define REG_ISR 0x0E
+#define REG_MSC 0x10
+#define REG_C0L 0x12
+#define REG_C0H 0x14
+#define REG_C0M 0x16
+#define REG_C0C 0x18
+#define REG_C1L 0x1A
+#define REG_C1H 0x1C
+#define REG_C1M 0x1E
+#define REG_C1C 0x20
+#define REG_C2L 0x22
+#define REG_C2H 0x24
+#define REG_C2M 0x26
+#define REG_C2C 0x28
+#define REG_C3L 0x2A
+#define REG_C3H 0x2C
+#define REG_C3M 0x2E
+#define REG_C3C 0x30
+#define REG_EED 0x32
+#define REG_EEC 0x34
+
+#define ISR_ADC_DONE 0x4
+
+struct counter_mode_register_t {
+#if defined (__LITTLE_ENDIAN_BITFIELD)
+	unsigned short coutSource:1;
+	unsigned short coutPolarity:1;
+	unsigned short autoLoadResetRcap:3;
+	unsigned short hwCtEnableSource:2;
+	unsigned short ctEnableCtrl:2;
+	unsigned short clockSource:2;
+	unsigned short countDir:1;
+	unsigned short countDirCtrl:1;
+	unsigned short outputRegLatchCtrl:1;
+	unsigned short preloadRegSel:1;
+	unsigned short reserved:1;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	unsigned short reserved:1;
+	unsigned short preloadRegSel:1;
+	unsigned short outputRegLatchCtrl:1;
+	unsigned short countDirCtrl:1;
+	unsigned short countDir:1;
+	unsigned short clockSource:2;
+	unsigned short ctEnableCtrl:2;
+	unsigned short hwCtEnableSource:2;
+	unsigned short autoLoadResetRcap:3;
+	unsigned short coutPolarity:1;
+	unsigned short coutSource:1;
+#else
+#error Unknown bit field order
+#endif
+};
+
+union cmReg {
+	struct counter_mode_register_t reg;
+	unsigned short value;
+};
+
+/* Application Classes for GPCT Subdevices */
+enum S526_GPCT_APP_CLASS {
+	CountingAndTimeMeasurement,
+	SinglePulseGeneration,
+	PulseTrainGeneration,
+	PositionMeasurement,
+	Miscellaneous
+};
+
+/* GPCT subdevices configuration */
+#define MAX_GPCT_CONFIG_DATA 6
+struct s526GPCTConfig {
+	enum S526_GPCT_APP_CLASS app;
+	int data[MAX_GPCT_CONFIG_DATA];
+};
+
+typedef struct s526_priv {
+	unsigned long io_base;
+} s526_priv_t;
+
+struct s526_subd_gpct_priv {
+	struct s526GPCTConfig config[4];
+};
+
+struct s526_subd_ai_priv {
+	uint16_t config;
+};
+
+struct s526_subd_ao_priv {
+	uint16_t readback[2];
+};
+
+struct s526_subd_dio_priv {
+	int io_bits;
+	unsigned int state;
+};
+
+#define devpriv ((s526_priv_t*)(dev->priv))
+
+#define ADDR_REG(reg) (devpriv->io_base + (reg))
+#define ADDR_CHAN_REG(reg, chan) (devpriv->io_base + (reg) + (chan) * 8)
+
+
+static int s526_gpct_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_gpct_priv *subdpriv =
+	    (struct s526_subd_gpct_priv *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+	int subdev_channel = CR_CHAN(insn->chan_desc);
+	int i;
+	short value;
+	union cmReg cmReg;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"s526_gpct_insn_config: Configuring Channel %d\n",
+		subdev_channel);
+
+	for (i = 0; i < MAX_GPCT_CONFIG_DATA; i++) {
+		subdpriv->config[subdev_channel].data[i] = data[i];
+		a4l_dbg(1, drv_dbg, dev, "data[%d]=%x\n", i, data[i]);
+	}
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_GPCT_QUADRATURE_ENCODER:
+		/*
+		 * data[0]: Application Type
+		 * data[1]: Counter Mode Register Value
+		 * data[2]: Pre-load Register Value
+		 * data[3]: Conter Control Register
+		 */
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_insn_config: Configuring Encoder\n");
+		subdpriv->config[subdev_channel].app = PositionMeasurement;
+
+		/* Set Counter Mode Register */
+		cmReg.value = data[1] & 0xFFFF;
+
+		a4l_dbg(1, drv_dbg, dev, "Counter Mode register=%x\n", cmReg.value);
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Reset the counter if it is software preload */
+		if (cmReg.reg.autoLoadResetRcap == 0) {
+			outw(0x8000, ADDR_CHAN_REG(REG_C0C, subdev_channel)); /* Reset the counter */
+			/* outw(0x4000, ADDR_CHAN_REG(REG_C0C, subdev_channel));	/\* Load the counter from PR0 *\/ */
+		}
+		break;
+
+	case A4L_INSN_CONFIG_GPCT_SINGLE_PULSE_GENERATOR:
+		/*
+		 * data[0]: Application Type
+		 * data[1]: Counter Mode Register Value
+		 * data[2]: Pre-load Register 0 Value
+		 * data[3]: Pre-load Register 1 Value
+		 * data[4]: Conter Control Register
+		 */
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_insn_config: Configuring SPG\n");
+		subdpriv->config[subdev_channel].app = SinglePulseGeneration;
+
+		/* Set Counter Mode Register */
+		cmReg.value = (short)(data[1] & 0xFFFF);
+		cmReg.reg.preloadRegSel = 0; /* PR0 */
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Load the pre-load register 0 high word */
+		value = (short)((data[2] >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+
+		/* Load the pre-load register 0 low word */
+		value = (short)(data[2] & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+
+		/* Set Counter Mode Register */
+		cmReg.value = (short)(data[1] & 0xFFFF);
+		cmReg.reg.preloadRegSel = 1; /* PR1 */
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Load the pre-load register 1 high word */
+		value = (short)((data[3] >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+
+		/* Load the pre-load register 1 low word */
+		value = (short)(data[3] & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+
+		/* Write the Counter Control Register */
+		if (data[4] != 0) {
+			value = (short)(data[4] & 0xFFFF);
+			outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+		}
+		break;
+
+	case A4L_INSN_CONFIG_GPCT_PULSE_TRAIN_GENERATOR:
+		/*
+		 * data[0]: Application Type
+		 * data[1]: Counter Mode Register Value
+		 * data[2]: Pre-load Register 0 Value
+		 * data[3]: Pre-load Register 1 Value
+		 * data[4]: Conter Control Register
+		 */
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_insn_config: Configuring PTG\n");
+		subdpriv->config[subdev_channel].app = PulseTrainGeneration;
+
+		/* Set Counter Mode Register */
+		cmReg.value = (short)(data[1] & 0xFFFF);
+		cmReg.reg.preloadRegSel = 0; /* PR0 */
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Load the pre-load register 0 high word */
+		value = (short)((data[2] >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+
+		/* Load the pre-load register 0 low word */
+		value = (short)(data[2] & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+
+		/* Set Counter Mode Register */
+		cmReg.value = (short)(data[1] & 0xFFFF);
+		cmReg.reg.preloadRegSel = 1; /* PR1 */
+		outw(cmReg.value, ADDR_CHAN_REG(REG_C0M, subdev_channel));
+
+		/* Load the pre-load register 1 high word */
+		value = (short)((data[3] >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+
+		/* Load the pre-load register 1 low word */
+		value = (short)(data[3] & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+
+		/* Write the Counter Control Register */
+		if (data[4] != 0) {
+			value = (short)(data[4] & 0xFFFF);
+			outw(value, ADDR_CHAN_REG(REG_C0C, subdev_channel));
+		}
+		break;
+
+	default:
+		a4l_err(dev, "s526_gpct_insn_config: unsupported GPCT_insn_config\n");
+		return -EINVAL;
+		break;
+	}
+
+	return 0;
+}
+
+static int s526_gpct_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	uint32_t *data = (uint32_t *)insn->data;
+	int counter_channel = CR_CHAN(insn->chan_desc);
+	unsigned short datalow;
+	unsigned short datahigh;
+	int i;
+
+	if (insn->data_size <= 0) {
+		a4l_err(dev, "s526_gpct_rinsn: data size should be > 0\n");
+		return -EINVAL;
+	}
+
+	for (i = 0; i < insn->data_size / sizeof(uint32_t); i++) {
+		datalow = inw(ADDR_CHAN_REG(REG_C0L, counter_channel));
+		datahigh = inw(ADDR_CHAN_REG(REG_C0H, counter_channel));
+		data[i] = (int)(datahigh & 0x00FF);
+		data[i] = (data[i] << 16) | (datalow & 0xFFFF);
+		a4l_dbg(1, drv_dbg, dev,
+			"s526_gpct_rinsn GPCT[%d]: %x(0x%04x, 0x%04x)\n",
+			counter_channel, data[i], datahigh, datalow);
+	}
+
+	return 0;
+}
+
+static int s526_gpct_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_gpct_priv *subdpriv =
+	    (struct s526_subd_gpct_priv *)subd->priv;
+	uint32_t *data = (uint32_t *)insn->data;
+	int subdev_channel = CR_CHAN(insn->chan_desc);
+	short value;
+	union cmReg cmReg;
+
+	a4l_dbg(1, drv_dbg, dev,
+		"s526_gpct_winsn: GPCT_INSN_WRITE on channel %d\n",
+		subdev_channel);
+
+	cmReg.value = inw(ADDR_CHAN_REG(REG_C0M, subdev_channel));
+	a4l_dbg(1, drv_dbg, dev,
+		"s526_gpct_winsn: Counter Mode Register: %x\n", cmReg.value);
+
+	/* Check what Application of Counter this channel is configured for */
+	switch (subdpriv->config[subdev_channel].app) {
+	case PositionMeasurement:
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_winsn: INSN_WRITE: PM\n");
+		outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H,
+							     subdev_channel));
+		outw(0xFFFF & (*data),
+		     ADDR_CHAN_REG(REG_C0L, subdev_channel));
+		break;
+
+	case SinglePulseGeneration:
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_winsn: INSN_WRITE: SPG\n");
+		outw(0xFFFF & ((*data) >> 16), ADDR_CHAN_REG(REG_C0H,
+							     subdev_channel));
+		outw(0xFFFF & (*data),
+		     ADDR_CHAN_REG(REG_C0L, subdev_channel));
+		break;
+
+	case PulseTrainGeneration:
+		/*
+		 * data[0] contains the PULSE_WIDTH
+		 * data[1] contains the PULSE_PERIOD
+		 * @pre PULSE_PERIOD > PULSE_WIDTH > 0
+		 * The above periods must be expressed as a multiple of the
+		 * pulse frequency on the selected source
+		 */
+		a4l_dbg(1, drv_dbg, dev, "s526_gpct_winsn: INSN_WRITE: PTG\n");
+		if ((data[1] > data[0]) && (data[0] > 0)) {
+			(subdpriv->config[subdev_channel]).data[0] = data[0];
+			(subdpriv->config[subdev_channel]).data[1] = data[1];
+		} else {
+			a4l_err(dev,
+				"s526_gpct_winsn: INSN_WRITE: PTG: Problem with Pulse params -> %du %du\n",
+				data[0], data[1]);
+			return -EINVAL;
+		}
+
+		value = (short)((*data >> 16) & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0H, subdev_channel));
+		value = (short)(*data & 0xFFFF);
+		outw(value, ADDR_CHAN_REG(REG_C0L, subdev_channel));
+		break;
+	default:		/* Impossible */
+		a4l_err(dev,
+			"s526_gpct_winsn: INSN_WRITE: Functionality %d not implemented yet\n",
+			 subdpriv->config[subdev_channel].app);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int s526_ai_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_ai_priv *subdpriv =
+	    (struct s526_subd_ai_priv *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+
+	if (insn->data_size < sizeof(unsigned int))
+		return -EINVAL;
+
+	/* data[0] : channels was set in relevant bits.
+	 * data[1] : delay
+	 */
+	/* COMMENT: abbotti 2008-07-24: I don't know why you'd want to
+	 * enable channels here.  The channel should be enabled in the
+	 * INSN_READ handler. */
+
+	/* Enable ADC interrupt */
+	outw(ISR_ADC_DONE, ADDR_REG(REG_IER));
+	a4l_dbg(1, drv_dbg, dev,
+		"s526_ai_insn_config: ADC current value: 0x%04x\n",
+		inw(ADDR_REG(REG_ADC)));
+
+	subdpriv->config = (data[0] & 0x3FF) << 5;
+	if (data[1] > 0)
+		subdpriv->config |= 0x8000; /* set the delay */
+
+	subdpriv->config |= 0x0001; /* ADC start bit. */
+
+	return 0;
+}
+
+static int s526_ai_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_ai_priv *subdpriv =
+	    (struct s526_subd_ai_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+	int n, i;
+	int chan = CR_CHAN(insn->chan_desc);
+	uint16_t value;
+	uint16_t d;
+	uint16_t status;
+
+	/* Set configured delay, enable channel for this channel only,
+	 * select "ADC read" channel, set "ADC start" bit. */
+	value = (subdpriv->config & 0x8000) |
+	    ((1 << 5) << chan) | (chan << 1) | 0x0001;
+
+	/* convert n samples */
+	for (n = 0; n < insn->data_size / sizeof(uint16_t); n++) {
+		/* trigger conversion */
+		outw(value, ADDR_REG(REG_ADC));
+		a4l_dbg(1, drv_dbg, dev, "s526_ai_rinsn: Wrote 0x%04x to ADC\n",
+			value);
+
+		/* wait for conversion to end */
+		for (i = 0; i < S526_AI_TIMEOUT; i++) {
+			status = inw(ADDR_REG(REG_ISR));
+			if (status & ISR_ADC_DONE) {
+				outw(ISR_ADC_DONE, ADDR_REG(REG_ISR));
+				break;
+			}
+		}
+		if (i == S526_AI_TIMEOUT) {
+			a4l_warn(dev, "s526_ai_rinsn: ADC(0x%04x) timeout\n",
+				 inw(ADDR_REG(REG_ISR)));
+			return -ETIMEDOUT;
+		}
+
+		/* read data */
+		d = inw(ADDR_REG(REG_ADD));
+		a4l_dbg(1, drv_dbg, dev, "s526_ai_rinsn: AI[%d]=0x%04x\n",
+			n, (uint16_t)(d & 0xFFFF));
+
+		/* munge data */
+		data[n] = d ^ 0x8000;
+	}
+
+	return 0;
+}
+
+static int s526_ao_winsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_ao_priv *subdpriv =
+	    (struct s526_subd_ao_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+	int i;
+	int chan = CR_CHAN(insn->chan_desc);
+	uint16_t val;
+
+	val = chan << 1;
+	outw(val, ADDR_REG(REG_DAC));
+
+	for (i = 0; i < insn->data_size / sizeof(uint16_t); i++) {
+		outw(data[i], ADDR_REG(REG_ADD)); /* write the data to preload register */
+		subdpriv->readback[chan] = data[i];
+		outw(val + 1, ADDR_REG(REG_DAC)); /* starts the D/A conversion. */
+	}
+
+	return 0;
+}
+
+static int s526_ao_rinsn(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct s526_subd_ao_priv *subdpriv =
+		(struct s526_subd_ao_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+	int i;
+	int chan = CR_CHAN(insn->chan_desc);
+
+	for (i = 0; i < insn->data_size / sizeof(uint16_t); i++)
+		data[i] = subdpriv->readback[chan];
+
+	return 0;
+}
+
+static int s526_dio_insn_config(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_dio_priv *subdpriv =
+	    (struct s526_subd_dio_priv *)subd->priv;
+	unsigned int *data = (unsigned int *)insn->data;
+	int chan = CR_CHAN(insn->chan_desc);
+	int group, mask;
+
+	group = chan >> 2;
+	mask = 0xF << (group << 2);
+
+	switch (data[0]) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+		subdpriv->state |= 1 << (group + 10); /* bit 10/11 set the
+						       * group 1/2's mode */
+		subdpriv->io_bits |= mask;
+		break;
+	case A4L_INSN_CONFIG_DIO_INPUT:
+		subdpriv->state &= ~(1 << (group + 10)); /* 1 is output, 0 is
+							  * input. */
+		subdpriv->io_bits &= ~mask;
+		break;
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		data[1] =
+		    (subdpriv->io_bits & mask) ? A4L_OUTPUT : A4L_INPUT;
+		return 0;
+	default:
+		return -EINVAL;
+	}
+
+	outw(subdpriv->state, ADDR_REG(REG_DIO));
+
+	return 0;
+}
+
+static int s526_dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct a4l_device *dev = subd->dev;
+	struct s526_subd_dio_priv *subdpriv =
+		(struct s526_subd_dio_priv *)subd->priv;
+	uint8_t *data = (uint8_t *)insn->data;
+
+	if (insn->data_size != 2 * sizeof(uint8_t))
+		return -EINVAL;
+
+	if (data[0]) {
+		subdpriv->state &= ~(data[0]);
+		subdpriv->state |= data[0] & data[1];
+
+		outw(subdpriv->state, ADDR_REG(REG_DIO));
+	}
+
+	data[1] = inw(ADDR_REG(REG_DIO)) & 0xFF; /* low 8 bits are the data */
+
+	return 0;
+}
+
+/* --- Channels descriptor --- */
+
+static struct a4l_channels_desc s526_chan_desc_gpct = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = S526_GPCT_CHANS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, S526_GPCT_BITS},
+	},
+};
+
+static struct a4l_channels_desc s526_chan_desc_ai = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = S526_AI_CHANS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, S526_AI_BITS},
+	},
+};
+
+static struct a4l_channels_desc s526_chan_desc_ao = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = S526_AO_CHANS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, S526_AO_BITS},
+	},
+};
+
+static struct a4l_channels_desc s526_chan_desc_dio = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = S526_DIO_CHANS,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, S526_DIO_BITS},
+	},
+};
+
+/* --- Subdevice initialization functions --- */
+
+/* General purpose counter/timer (gpct) */
+static void setup_subd_gpct(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_COUNTER;
+	subd->chan_desc = &s526_chan_desc_gpct;
+	subd->insn_read = s526_gpct_rinsn;
+	subd->insn_config = s526_gpct_insn_config;
+	subd->insn_write = s526_gpct_winsn;
+}
+
+/* Analog input subdevice */
+static void setup_subd_ai(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_AI;
+	subd->chan_desc = &s526_chan_desc_ai;
+	subd->rng_desc = &a4l_range_bipolar10;
+	subd->insn_read = s526_ai_rinsn;
+	subd->insn_config = s526_ai_insn_config;
+}
+
+/* Analog output subdevice */
+static void setup_subd_ao(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_AO;
+	subd->chan_desc = &s526_chan_desc_ao;
+	subd->rng_desc = &a4l_range_bipolar10;
+	subd->insn_write = s526_ao_winsn;
+	subd->insn_read = s526_ao_rinsn;
+}
+
+/* Digital i/o subdevice */
+static void setup_subd_dio(struct a4l_subdevice *subd)
+{
+	subd->flags = A4L_SUBD_DIO;
+	subd->chan_desc = &s526_chan_desc_dio;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = s526_dio_insn_bits;
+	subd->insn_config = s526_dio_insn_config;
+}
+
+struct setup_subd {
+	void (*setup_func) (struct a4l_subdevice *);
+	int sizeof_priv;
+};
+
+static struct setup_subd setup_subds[4] = {
+	{
+		.setup_func = setup_subd_gpct,
+		.sizeof_priv = sizeof(struct s526_subd_gpct_priv),
+	},
+	{
+		.setup_func = setup_subd_ai,
+		.sizeof_priv = sizeof(struct s526_subd_ai_priv),
+	},
+	{
+		.setup_func = setup_subd_ao,
+		.sizeof_priv = sizeof(struct s526_subd_ao_priv),
+	},
+	{
+		.setup_func = setup_subd_dio,
+		.sizeof_priv = sizeof(struct s526_subd_dio_priv),
+	},
+};
+
+static int dev_s526_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	int io_base;
+	int i;
+	int err = 0;
+
+	if (arg->opts == NULL || arg->opts_size < sizeof(unsigned long)) {
+		a4l_warn(dev,
+			 "dev_s526_attach: no attach options specified; "
+			 "using defaults: addr=0x%x\n",
+			 S526_DEFAULT_ADDRESS);
+		io_base = S526_DEFAULT_ADDRESS;
+	} else {
+		io_base = ((unsigned long *)arg->opts)[0];
+	}
+
+	if (!request_region(io_base, S526_IOSIZE, "s526")) {
+		a4l_err(dev, "dev_s526_attach: I/O port conflict\n");
+		return -EIO;
+	}
+
+	/* Allocate the subdevice structures. */
+	for (i = 0; i < 4; i++) {
+		struct a4l_subdevice *subd = a4l_alloc_subd(setup_subds[i].sizeof_priv,
+						  setup_subds[i].setup_func);
+
+		if (subd == NULL)
+			return -ENOMEM;
+
+		err = a4l_add_subd(dev, subd);
+		if (err != i)
+			return err;
+	}
+
+	devpriv->io_base = io_base;
+
+	a4l_info(dev, " attached (address = 0x%x)\n", io_base);
+
+	return 0;
+}
+
+static int dev_s526_detach(struct a4l_device *dev)
+{
+	int err = 0;
+
+	if (devpriv->io_base != 0)
+		release_region(devpriv->io_base, S526_IOSIZE);
+
+	return err;
+}
+
+static struct a4l_driver drv_s526 = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_s526",
+	.driver_name = "s526",
+	.attach = dev_s526_attach,
+	.detach = dev_s526_detach,
+	.privdata_size = sizeof(s526_priv_t),
+};
+
+static int __init drv_s526_init(void)
+{
+	return a4l_register_drv(&drv_s526);
+}
+
+static void __exit drv_s526_cleanup(void)
+{
+	a4l_unregister_drv(&drv_s526);
+}
+
+MODULE_DESCRIPTION("Analogy driver for Sensoray Model 526 board.");
+MODULE_LICENSE("GPL");
+
+module_init(drv_s526_init);
+module_exit(drv_s526_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c
new file mode 100644
index 0000000..a6c9780
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/subdevice.c
@@ -0,0 +1,449 @@
+/*
+ * Analogy for Linux, subdevice, channel and range related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/mman.h>
+#include <asm/io.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+/* --- Common ranges declarations --- */
+
+struct a4l_rngtab rng_bipolar10 = { 1, {
+		RANGE_V(-10, 10),
+	}};
+struct a4l_rngdesc a4l_range_bipolar10 = RNG_GLOBAL(rng_bipolar10);
+
+struct a4l_rngtab rng_bipolar5 = { 1, {
+		RANGE_V(-5, 5),
+	}};
+struct a4l_rngdesc a4l_range_bipolar5 = RNG_GLOBAL(rng_bipolar5);
+
+struct a4l_rngtab rng_unipolar10 = { 1, {
+		RANGE_V(0, 10),
+	}};
+struct a4l_rngdesc a4l_range_unipolar10 = RNG_GLOBAL(rng_unipolar10);
+
+struct a4l_rngtab rng_unipolar5 = { 1, {
+		RANGE_V(0, 5),
+	}};
+struct a4l_rngdesc a4l_range_unipolar5 = RNG_GLOBAL(rng_unipolar5);
+
+struct a4l_rngtab rng_unknown = { 1, {
+		RANGE(0, 1),
+	}};
+struct a4l_rngdesc a4l_range_unknown = RNG_GLOBAL(rng_unknown);
+
+struct a4l_rngtab rng_fake = { 0, {
+		RANGE(0, 0),
+	}};
+struct a4l_rngdesc a4l_range_fake = RNG_GLOBAL(rng_fake);
+
+/* --- Basic channel / range management functions --- */
+
+struct a4l_channel *a4l_get_chfeat(struct a4l_subdevice *sb, int idx)
+{
+	int i = (sb->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? idx : 0;
+	return &(sb->chan_desc->chans[i]);
+}
+
+struct a4l_range *a4l_get_rngfeat(struct a4l_subdevice *sb, int chidx, int rngidx)
+{
+	int i = (sb->rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? chidx : 0;
+	return &(sb->rng_desc->rngtabs[i]->rngs[rngidx]);
+}
+
+int a4l_check_chanlist(struct a4l_subdevice *subd,
+		       unsigned char nb_chan, unsigned int *chans)
+{
+	int i, j;
+
+	if (nb_chan > subd->chan_desc->length)
+		return -EINVAL;
+
+	for (i = 0; i < nb_chan; i++) {
+		j = (subd->chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? i : 0;
+
+		if (CR_CHAN(chans[i]) >= subd->chan_desc->length) {
+			__a4l_err("a4l_check_chanlist: "
+				  "chan idx out_of range (%u>=%lu)\n",
+				  CR_CHAN(chans[i]), subd->chan_desc->length);
+			return -EINVAL;
+		}
+		if (CR_AREF(chans[i]) != 0 &&
+		    (CR_AREF(chans[i]) & subd->chan_desc->chans[j].flags) == 0)
+		{
+			__a4l_err("a4l_check_chanlist: "
+				  "bad channel type\n");
+			return -EINVAL;
+		}
+	}
+
+	if (subd->rng_desc == NULL)
+		return 0;
+
+	for (i = 0; i < nb_chan; i++) {
+		j = (subd->rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? i : 0;
+
+		if (CR_RNG(chans[i]) > subd->rng_desc->rngtabs[j]->length) {
+			__a4l_err("a4l_check_chanlist: "
+				  "rng idx out_of range (%u>=%u)\n",
+				  CR_RNG(chans[i]),
+				  subd->rng_desc->rngtabs[j]->length);
+			return -EINVAL;
+		}
+	}
+
+	return 0;
+}
+
+/* --- Upper layer functions --- */
+
+struct a4l_subdevice * a4l_alloc_subd(int sizeof_priv,
+			    void (*setup)(struct a4l_subdevice *))
+{
+	struct a4l_subdevice *subd;
+
+	subd = rtdm_malloc(sizeof(struct a4l_subdevice) + sizeof_priv);
+
+	if(subd != NULL) {
+		memset(subd, 0 , sizeof(struct a4l_subdevice) + sizeof_priv);
+		if(setup != NULL)
+			setup(subd);
+	}
+
+	return subd;
+}
+
+int a4l_add_subd(struct a4l_device * dev, struct a4l_subdevice * subd)
+{
+	struct list_head *this;
+	int i = 0;
+
+	/* Basic checking */
+	if (dev == NULL || subd == NULL)
+		return -EINVAL;
+
+	list_add_tail(&subd->list, &dev->subdvsq);
+
+	subd->dev = dev;
+
+	list_for_each(this, &dev->subdvsq) {
+		i++;
+	}
+
+	subd->idx = --i;
+
+	return i;
+}
+
+struct a4l_subdevice *a4l_get_subd(struct a4l_device *dev, int idx)
+{
+	int i = 0;
+	struct a4l_subdevice *subd = NULL;
+	struct list_head *this;
+
+	/* This function is not optimized as we do not go through the
+	   transfer structure */
+
+	list_for_each(this, &dev->subdvsq) {
+		if(idx == i++)
+			subd = list_entry(this, struct a4l_subdevice, list);
+	}
+
+	return subd;
+}
+
+/* --- IOCTL / FOPS functions --- */
+
+int a4l_ioctl_subdinfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	int i, ret = 0;
+	a4l_sbinfo_t *subd_info;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_subdinfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	subd_info = rtdm_malloc(dev->transfer.nb_subd *
+				sizeof(a4l_sbinfo_t));
+	if (subd_info == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < dev->transfer.nb_subd; i++) {
+		subd_info[i].flags = dev->transfer.subds[i]->flags;
+		subd_info[i].status = dev->transfer.subds[i]->status;
+		subd_info[i].nb_chan =
+			(dev->transfer.subds[i]->chan_desc != NULL) ?
+			dev->transfer.subds[i]->chan_desc->length : 0;
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg,
+				   subd_info, dev->transfer.nb_subd *
+				   sizeof(a4l_sbinfo_t)) != 0)
+		ret = -EFAULT;
+
+	rtdm_free(subd_info);
+
+	return ret;
+
+}
+
+int a4l_ioctl_nbchaninfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	a4l_chinfo_arg_t inarg;
+
+	/* Basic checking */
+	if (!dev->flags & A4L_DEV_ATTACHED_NR) {
+		__a4l_err("a4l_ioctl_nbchaninfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg, arg,
+				     sizeof(a4l_chinfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_nbchaninfo: subdevice index "
+			  "out of range\n");
+		return -EINVAL;
+	}
+
+	if(dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL)
+		inarg.info = (void *)0;
+	else
+		inarg.info = (void *)(unsigned long)
+			dev->transfer.subds[inarg.idx_subd]->chan_desc->length;
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg,
+				   &inarg, sizeof(a4l_chinfo_arg_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+int a4l_ioctl_chaninfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	a4l_chinfo_t *chan_info;
+	a4l_chinfo_arg_t inarg;
+	struct a4l_channels_desc *chan_desc;
+	struct a4l_rngdesc *rng_desc;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_chaninfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg, arg,
+				     sizeof(a4l_chinfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_chaninfo: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	chan_desc = dev->transfer.subds[inarg.idx_subd]->chan_desc;
+	rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc;
+
+	if (chan_desc == NULL) {
+		__a4l_err("a4l_ioctl_chaninfo: no channel descriptor "
+			  "for subdevice %d\n", inarg.idx_subd);
+		return -EINVAL;
+	}
+
+	if(rng_desc == NULL)
+		rng_desc = &a4l_range_fake;
+
+	chan_info = rtdm_malloc(chan_desc->length * sizeof(a4l_chinfo_t));
+	if (chan_info == NULL)
+		return -ENOMEM;
+
+	/* If the channel descriptor is global, the fields are filled
+	   with the same instance of channel descriptor */
+	for (i = 0; i < chan_desc->length; i++) {
+		int j =
+			(chan_desc->mode != A4L_CHAN_GLOBAL_CHANDESC) ? i : 0;
+		int k = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ? i : 0;
+
+		chan_info[i].chan_flags = chan_desc->chans[j].flags;
+		chan_info[i].nb_bits = chan_desc->chans[j].nb_bits;
+		chan_info[i].nb_rng = rng_desc->rngtabs[k]->length;
+
+		if (chan_desc->mode == A4L_CHAN_GLOBAL_CHANDESC)
+			chan_info[i].chan_flags |= A4L_CHAN_GLOBAL;
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   inarg.info,
+				   chan_info,
+				   chan_desc->length *
+				   sizeof(a4l_chinfo_t)) != 0)
+		return -EFAULT;
+
+	rtdm_free(chan_info);
+
+	return ret;
+}
+
+int a4l_ioctl_nbrnginfo(struct a4l_device_context * cxt, void *arg)
+{
+	int i;
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	a4l_rnginfo_arg_t inarg;
+	struct a4l_rngdesc *rng_desc;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_nbrnginfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg,
+				     arg, sizeof(a4l_rnginfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_nbrnginfo: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	if (dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL) {
+		__a4l_err("a4l_ioctl_nbrnginfo: no channel descriptor "
+			  "for subdevice %d\n", inarg.idx_subd);
+		return -EINVAL;
+	}
+
+	if (inarg.idx_chan >=
+	    dev->transfer.subds[inarg.idx_subd]->chan_desc->length) {
+		__a4l_err("a4l_ioctl_nbrnginfo: bad channel index\n");
+		return -EINVAL;
+	}
+
+	rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc;
+	if (rng_desc != NULL) {
+		i = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ?
+			inarg.idx_chan : 0;
+		inarg.info = (void *)(unsigned long)
+			rng_desc->rngtabs[i]->length;
+	} else
+		inarg.info = (void *)0;
+
+
+	if (rtdm_safe_copy_to_user(fd,
+				   arg,
+				   &inarg, sizeof(a4l_rnginfo_arg_t)) != 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+int a4l_ioctl_rnginfo(struct a4l_device_context * cxt, void *arg)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(cxt);
+	int i, ret = 0;
+	unsigned int tmp;
+	struct a4l_device *dev = a4l_get_dev(cxt);
+	struct a4l_rngdesc *rng_desc;
+	a4l_rnginfo_t *rng_info;
+	a4l_rnginfo_arg_t inarg;
+
+	/* Basic checking */
+	if (!test_bit(A4L_DEV_ATTACHED_NR, &dev->flags)) {
+		__a4l_err("a4l_ioctl_rnginfo: unattached device\n");
+		return -EINVAL;
+	}
+
+	if (rtdm_safe_copy_from_user(fd,
+				     &inarg,
+				     arg, sizeof(a4l_rnginfo_arg_t)) != 0)
+		return -EFAULT;
+
+	if (inarg.idx_subd >= dev->transfer.nb_subd) {
+		__a4l_err("a4l_ioctl_rnginfo: bad subdevice index\n");
+		return -EINVAL;
+	}
+
+	if (dev->transfer.subds[inarg.idx_subd]->chan_desc == NULL) {
+		__a4l_err("a4l_ioctl_rnginfo: no channel descriptor "
+			  "for subdevice %d\n", inarg.idx_subd);
+		return -EINVAL;
+	}
+
+	if (inarg.idx_chan >=
+	    dev->transfer.subds[inarg.idx_subd]->chan_desc->length) {
+		__a4l_err("a4l_ioctl_rnginfo: bad channel index\n");
+		return -EINVAL;
+	}
+
+	rng_desc = dev->transfer.subds[inarg.idx_subd]->rng_desc;
+	if (rng_desc == NULL) {
+		__a4l_err("a4l_ioctl_rnginfo: no range descriptor "
+			  "for channel %d\n", inarg.idx_chan);
+		return -EINVAL;
+	}
+
+	/* If the range descriptor is global,
+	   we take the first instance */
+	tmp = (rng_desc->mode != A4L_RNG_GLOBAL_RNGDESC) ?
+		inarg.idx_chan : 0;
+
+	rng_info = rtdm_malloc(rng_desc->rngtabs[tmp]->length *
+			       sizeof(a4l_rnginfo_t));
+	if (rng_info == NULL)
+		return -ENOMEM;
+
+	for (i = 0; i < rng_desc->rngtabs[tmp]->length; i++) {
+		rng_info[i].min = rng_desc->rngtabs[tmp]->rngs[i].min;
+		rng_info[i].max = rng_desc->rngtabs[tmp]->rngs[i].max;
+		rng_info[i].flags = rng_desc->rngtabs[tmp]->rngs[i].flags;
+
+		if (rng_desc->mode == A4L_RNG_GLOBAL_RNGDESC)
+			rng_info[i].flags |= A4L_RNG_GLOBAL;
+	}
+
+	if (rtdm_safe_copy_to_user(fd,
+				   inarg.info,
+				   rng_info,
+				   rng_desc->rngtabs[tmp]->length *
+				   sizeof(a4l_rnginfo_t)) != 0)
+		return -EFAULT;
+
+	rtdm_free(rng_info);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig
new file mode 100644
index 0000000..15db782
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Kconfig
@@ -0,0 +1,13 @@
+
+config XENO_DRIVERS_ANALOGY_FAKE
+	depends on XENO_DRIVERS_ANALOGY
+	tristate "Fake driver"
+	default n
+	help
+
+	The fake driver displays many subdevices:
+	- 0: analog input;
+	- 1: digital input / output;
+	- 2: analog output;
+	- 3: analog input; data written into the subdevice 2 can be
+          read here.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile
new file mode 100644
index 0000000..e92e5bc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/Makefile
@@ -0,0 +1,8 @@
+
+ccflags-y += -I$(srctree)/drivers/xenomai/analogy
+
+obj-$(CONFIG_XENO_DRIVERS_ANALOGY_FAKE) += analogy_fake.o
+
+analogy_fake-y := fake.o
+
+analogy_loop-y := loop.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c
new file mode 100644
index 0000000..c80c1cc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/fake.c
@@ -0,0 +1,686 @@
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#define TASK_PERIOD 1000000
+
+#define AI_SUBD 0
+#define DIO_SUBD 1
+#define AO_SUBD 2
+#define AI2_SUBD 3
+
+#define TRANSFER_SIZE 0x1000
+
+/* --- Driver related structures --- */
+struct fake_priv {
+	/* Attach configuration parameters
+	   (they should be relocated in ai_priv) */
+	unsigned long amplitude_div;
+	unsigned long quanta_cnt;
+
+	/* Task descriptor */
+	rtdm_task_t task;
+
+	/* Statuses of the asynchronous subdevices */
+	int ai_running;
+	int ao_running;
+	int ai2_running;
+};
+
+struct ai_priv {
+
+	/* Specific timing fields */
+	unsigned long scan_period_ns;
+	unsigned long convert_period_ns;
+	unsigned long current_ns;
+	unsigned long reminder_ns;
+	unsigned long long last_ns;
+
+	/* Misc fields */
+	unsigned long amplitude_div;
+	unsigned long quanta_cnt;
+};
+
+struct ao_ai2_priv {
+	/* Asynchronous loop stuff */
+	uint8_t buffer[TRANSFER_SIZE];
+	int count;
+	/* Synchronous loop stuff */
+	uint16_t insn_value;
+};
+
+struct dio_priv {
+	/* Bits status */
+	uint16_t bits_values;
+};
+
+/* --- Channels / ranges part --- */
+
+/* Channels descriptors */
+
+static struct a4l_channels_desc analog_chandesc = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 16},
+	},
+};
+
+static struct a4l_channels_desc dio_chandesc = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 16,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, 1},
+	},
+};
+
+/* Ranges tab */
+static struct a4l_rngtab analog_rngtab = {
+	.length = 2,
+	.rngs = {
+		RANGE_V(-5,5),
+		RANGE_V(-10,10),
+	},
+};
+/* Ranges descriptor */
+static struct a4l_rngdesc analog_rngdesc = RNG_GLOBAL(analog_rngtab);
+
+/* Command options masks */
+
+static struct a4l_cmd_desc ai_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW,
+	.scan_begin_src = TRIG_TIMER,
+	.convert_src = TRIG_NOW | TRIG_TIMER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+static struct a4l_cmd_desc ao_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT,
+	.scan_begin_src = TRIG_TIMER,
+	.convert_src = TRIG_NOW | TRIG_TIMER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT | TRIG_NONE,
+};
+
+/* --- Analog input simulation --- */
+
+/* --- Values generation for 1st AI --- */
+
+static inline uint16_t ai_value_output(struct ai_priv *priv)
+{
+	static uint16_t output_tab[8] = {
+		0x0001, 0x2000, 0x4000, 0x6000,
+		0x8000, 0xa000, 0xc000, 0xffff
+	};
+	static unsigned int output_idx;
+	static DEFINE_RTDM_LOCK(output_lock);
+
+	unsigned long flags;
+	unsigned int idx;
+
+	rtdm_lock_get_irqsave(&output_lock, flags);
+
+	output_idx += priv->quanta_cnt;
+	if(output_idx == 8)
+		output_idx = 0;
+	idx = output_idx;
+
+	rtdm_lock_put_irqrestore(&output_lock, flags);
+
+	return output_tab[idx] / priv->amplitude_div;
+}
+
+int ai_push_values(struct a4l_subdevice *subd)
+{
+	uint64_t now_ns, elapsed_ns = 0;
+	struct a4l_cmd_desc *cmd;
+	struct ai_priv *priv;
+	int i = 0;
+
+	if (!subd)
+		return -EINVAL;
+
+	priv = (struct ai_priv *)subd->priv;
+
+	cmd = a4l_get_cmd(subd);
+	if (!cmd)
+		return -EPIPE;
+
+	now_ns = a4l_get_time();
+	elapsed_ns += now_ns - priv->last_ns + priv->reminder_ns;
+	priv->last_ns = now_ns;
+
+	while(elapsed_ns >= priv->scan_period_ns) {
+		int j;
+
+		for(j = 0; j < cmd->nb_chan; j++) {
+			uint16_t value = ai_value_output(priv);
+			a4l_buf_put(subd, &value, sizeof(uint16_t));
+		}
+
+		elapsed_ns -= priv->scan_period_ns;
+		i++;
+	}
+
+	priv->current_ns += i * priv->scan_period_ns;
+	priv->reminder_ns = elapsed_ns;
+
+	if (i != 0)
+		a4l_buf_evt(subd, 0);
+
+	return 0;
+}
+
+/* --- Data retrieval for AO --- */
+
+int ao_pull_values(struct a4l_subdevice *subd)
+{
+	struct ao_ai2_priv *priv = (struct ao_ai2_priv *)subd->priv;
+	int err;
+
+	/* Let's have a look at how many samples are available */
+	priv->count = a4l_buf_count(subd) < TRANSFER_SIZE ?
+		      a4l_buf_count(subd) : TRANSFER_SIZE;
+
+	if (!priv->count)
+		return 0;
+
+	err = a4l_buf_get(subd, priv->buffer, priv->count);
+	if (err < 0) {
+		a4l_err(subd->dev, "ao_get_values: a4l_buf_get failed (err=%d)\n", err);
+		priv->count = 0;
+		return err;
+
+	}
+
+	a4l_info(subd->dev, " %d bytes added to private buffer from async p=%p\n",
+		priv->count, subd->buf->buf);
+
+	a4l_buf_evt(subd, 0);
+
+	return 0;
+}
+
+/* --- Data redirection for 2nd AI (from AO) --- */
+
+int ai2_push_values(struct a4l_subdevice *subd)
+{
+	struct ao_ai2_priv *priv = *((struct ao_ai2_priv **)subd->priv);
+	int err = 0;
+
+	if (priv->count) {
+		err = a4l_buf_put(subd, priv->buffer, priv->count);
+
+		/* If there is no more place in the asynchronous
+		buffer, data are likely to be dropped; it is just a
+		test driver so no need to implement trickier mechanism */
+		err = (err == -EAGAIN) ? 0 : err;
+
+		a4l_info(subd->dev, "%d bytes added to async buffer p=%p\n",
+			priv->count, subd->buf->buf);
+
+		priv->count = 0;
+		if (err < 0)
+			a4l_err(subd->dev,
+				"ai2_push_values: "
+				"a4l_buf_put failed (err=%d)\n", err);
+		else
+			a4l_buf_evt(subd, 0);
+	}
+
+	return err;
+}
+
+/* --- Asynchronous AI functions --- */
+
+static int ai_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+	struct ai_priv *ai_priv = (struct ai_priv *)subd->priv;
+
+	ai_priv->scan_period_ns = cmd->scan_begin_arg;
+	ai_priv->convert_period_ns = (cmd->convert_src==TRIG_TIMER)?
+		cmd->convert_arg:0;
+
+	a4l_dbg(1, drv_dbg, subd->dev, "scan_period=%luns convert_period=%luns\n",
+		ai_priv->scan_period_ns, ai_priv->convert_period_ns);
+
+	ai_priv->last_ns = a4l_get_time();
+
+	ai_priv->current_ns = ((unsigned long)ai_priv->last_ns);
+	ai_priv->reminder_ns = 0;
+
+	priv->ai_running = 1;
+
+	return 0;
+
+}
+
+static int ai_cmdtest(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	if(cmd->scan_begin_src == TRIG_TIMER)
+	{
+		if (cmd->scan_begin_arg < 1000)
+			return -EINVAL;
+
+		if (cmd->convert_src == TRIG_TIMER &&
+		    cmd->scan_begin_arg < (cmd->convert_arg * cmd->nb_chan))
+			return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void ai_cancel(struct a4l_subdevice *subd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+
+	priv->ai_running = 0;
+}
+
+static void ai_munge(struct a4l_subdevice *subd, void *buf, unsigned long size)
+{
+	int i;
+
+	for(i = 0; i < size / sizeof(uint16_t); i++)
+		((uint16_t *)buf)[i] += 1;
+}
+
+/* --- Asynchronous A0 functions --- */
+
+int ao_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	return 0;
+}
+
+int ao_trigger(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ao_running = 1;
+	return 0;
+}
+
+void ao_cancel(struct a4l_subdevice *subd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+	struct ao_ai2_priv *ao_priv = (struct ao_ai2_priv *)subd->priv;
+	int running;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ao_running = 0;
+
+	running = priv->ai2_running;
+	if (running) {
+		struct a4l_subdevice *ai2_subd =
+			(struct a4l_subdevice *)a4l_get_subd(subd->dev, AI2_SUBD);
+		/* Here, we have not saved the required amount of
+		   data; so, we cannot know whether or not, it is the
+		   end of the acquisition; that is why we force it */
+		priv->ai2_running = 0;
+		ao_priv->count = 0;
+
+		a4l_info(subd->dev, "subd %d cancelling subd %d too \n",
+			subd->idx, AI2_SUBD);
+
+		a4l_buf_evt(ai2_subd, A4L_BUF_EOA);
+	}
+}
+
+/* --- Asynchronous 2nd AI functions --- */
+
+int ai2_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ai2_running = 1;
+	return 0;
+}
+
+void ai2_cancel(struct a4l_subdevice *subd)
+{
+	struct fake_priv *priv = (struct fake_priv *)subd->dev->priv;
+	struct ao_ai2_priv *ai2_priv = *((struct ao_ai2_priv **)subd->priv);
+
+	int running;
+
+	a4l_info(subd->dev, "(subd=%d)\n", subd->idx);
+	priv->ai2_running = 0;
+
+	running = priv->ao_running;
+	if (running) {
+		struct a4l_subdevice *ao_subd =
+			(struct a4l_subdevice *)a4l_get_subd(subd->dev, AO_SUBD);
+		/* Here, we have not saved the required amount of
+		   data; so, we cannot know whether or not, it is the
+		   end of the acquisition; that is why we force it */
+		priv->ao_running = 0;
+		ai2_priv->count = 0;
+
+		a4l_info(subd->dev, "subd %d cancelling subd %d too \n",
+			 subd->idx, AO_SUBD);
+
+		a4l_buf_evt(ao_subd, A4L_BUF_EOA);
+	}
+
+}
+
+
+/* --- Synchronous AI functions --- */
+
+static int ai_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ai_priv *priv = (struct ai_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+	int i;
+
+	for(i = 0; i < insn->data_size / sizeof(uint16_t); i++)
+		data[i] = ai_value_output(priv);
+
+	return 0;
+}
+
+/* --- Synchronous DIO function --- */
+
+static int dio_insn_bits(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct dio_priv *priv = (struct dio_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	if (insn->data_size != 2 * sizeof(uint16_t))
+		return -EINVAL;
+
+	if (data[0] != 0) {
+		priv->bits_values &= ~(data[0]);
+		priv->bits_values |= (data[0] & data[1]);
+	}
+
+	data[1] = priv->bits_values;
+
+	return 0;
+}
+
+/* --- Synchronous AO + AI2 functions --- */
+
+int ao_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ao_ai2_priv *priv = (struct ao_ai2_priv *)subd->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Retrieves the value to memorize */
+	priv->insn_value = data[0];
+
+	return 0;
+}
+
+int ai2_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	struct ao_ai2_priv *priv = *((struct ao_ai2_priv **)subd->priv);
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Sets the memorized value */
+	data[0] = priv->insn_value;
+
+	return 0;
+}
+
+/* --- Global task part --- */
+
+/* One task is enough for all the asynchronous subdevices, it is just a fake
+ * driver after all
+ */
+static void task_proc(void *arg)
+{
+	struct a4l_subdevice *ai_subd, *ao_subd, *ai2_subd;
+	struct a4l_device *dev;
+	struct fake_priv *priv;
+	int running;
+
+	dev = arg;
+	ai_subd = a4l_get_subd(dev, AI_SUBD);
+	ao_subd = a4l_get_subd(dev, AO_SUBD);
+	ai2_subd = a4l_get_subd(dev, AI2_SUBD);
+
+	priv = dev->priv;
+
+	while(!rtdm_task_should_stop()) {
+
+		/* copy sample static data from the subd private buffer to the
+		 * asynchronous buffer
+		 */
+		running = priv->ai_running;
+		if (running && ai_push_values(ai_subd) < 0) {
+			/* on error, wait for detach to destroy the task */
+			rtdm_task_sleep(RTDM_TIMEOUT_INFINITE);
+			continue;
+		}
+
+		/*
+		 * pull the data from the output subdevice (asynchronous buffer)
+		 * into its private buffer
+		 */
+		running = priv->ao_running;
+		if (running && ao_pull_values(ao_subd) < 0) {
+			rtdm_task_sleep(RTDM_TIMEOUT_INFINITE);
+			continue;
+		}
+
+		running = priv->ai2_running;
+		/*
+		 * then loop it to the ai2 subd since their private data is shared: so
+		 * pull the data from the private buffer back into the device's
+		 * asynchronous buffer
+		 */
+		if (running && ai2_push_values(ai2_subd) < 0) {
+			rtdm_task_sleep(RTDM_TIMEOUT_INFINITE);
+			continue;
+		}
+
+		rtdm_task_sleep(TASK_PERIOD);
+	}
+}
+
+/* --- Initialization functions --- */
+
+void setup_ai_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_AI;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &analog_rngdesc;
+	subd->chan_desc = &analog_chandesc;
+	subd->do_cmd = ai_cmd;
+	subd->do_cmdtest = ai_cmdtest;
+	subd->cancel = ai_cancel;
+	subd->munge = ai_munge;
+	subd->cmd_mask = &ai_cmd_mask;
+	subd->insn_read = ai_insn_read;
+}
+
+void setup_dio_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_DIO;
+	subd->chan_desc = &dio_chandesc;
+	subd->rng_desc = &range_digital;
+	subd->insn_bits = dio_insn_bits;
+}
+
+void setup_ao_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_AO;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &analog_rngdesc;
+	subd->chan_desc = &analog_chandesc;
+	subd->do_cmd = ao_cmd;
+	subd->cancel = ao_cancel;
+	subd->trigger = ao_trigger;
+	subd->cmd_mask = &ao_cmd_mask;
+	subd->insn_write = ao_insn_write;
+}
+
+void setup_ai2_subd(struct a4l_subdevice *subd)
+{
+	/* Fill the subdevice structure */
+	subd->flags |= A4L_SUBD_AI;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &analog_rngdesc;
+	subd->chan_desc = &analog_chandesc;
+	subd->do_cmd = ai2_cmd;
+	subd->cancel = ai2_cancel;
+	subd->cmd_mask = &ai_cmd_mask;
+	subd->insn_read = ai2_insn_read;
+}
+
+/* --- Attach / detach functions ---  */
+
+int test_attach(struct a4l_device *dev, a4l_lnkdesc_t *arg)
+{
+	typedef void (*setup_subd_function) (struct a4l_subdevice *subd);
+	struct fake_priv *priv = (struct fake_priv *) dev->priv;
+	struct a4l_subdevice *subd;
+	unsigned long tmp;
+	struct ai_priv *r;
+	int i, ret = 0;
+
+	struct initializers {
+		struct a4l_subdevice *subd;
+		setup_subd_function init;
+		int private_len;
+		char *name;
+		int index;
+	} sds[] = {
+		[AI_SUBD] = {
+			.name = "AI",
+			.private_len = sizeof(struct ai_priv),
+			.init = setup_ai_subd,
+			.index = AI_SUBD,
+			.subd = NULL,
+		},
+		[DIO_SUBD] = {
+			.name = "DIO",
+			.private_len = sizeof(struct dio_priv),
+			.init = setup_dio_subd,
+			.index = DIO_SUBD,
+			.subd = NULL,
+		},
+		[AO_SUBD] = {
+			.name = "AO",
+			.private_len = sizeof(struct ao_ai2_priv),
+			.init = setup_ao_subd,
+			.index = AO_SUBD,
+			.subd = NULL,
+		},
+		[AI2_SUBD] = {
+			.name = "AI2",
+			.private_len = sizeof(struct ao_ai2_priv *),
+			.init = setup_ai2_subd,
+			.index = AI2_SUBD,
+			.subd = NULL,
+		},
+	};
+
+	a4l_dbg(1, drv_dbg, dev, "starting attach procedure...\n");
+
+	/* Set default values for attach parameters */
+	priv->amplitude_div = 1;
+	priv->quanta_cnt = 1;
+	if (arg->opts_size) {
+		unsigned long *args = (unsigned long *)arg->opts;
+		priv->amplitude_div = args[0];
+		if (arg->opts_size == 2 * sizeof(unsigned long))
+			priv->quanta_cnt = (args[1] > 7 || args[1] == 0) ?
+				1 : args[1];
+	}
+
+	/* create and register the subdevices */
+	for (i = 0; i < ARRAY_SIZE(sds) ; i++) {
+
+		subd = a4l_alloc_subd(sds[i].private_len, sds[i].init);
+		if (subd == NULL)
+			return -ENOMEM;
+
+		ret = a4l_add_subd(dev, subd);
+		if (ret != sds[i].index)
+			return (ret < 0) ? ret : -EINVAL;
+
+		sds[i].subd = subd;
+
+		a4l_dbg(1, drv_dbg, dev, " %s subdev registered \n", sds[i].name);
+	}
+
+	/* initialize specifics */
+	r = (void *) sds[AI_SUBD].subd->priv;
+	r->amplitude_div = priv->amplitude_div;
+	r->quanta_cnt = priv->quanta_cnt;
+
+	/* A0 and AI2 shared their private buffers */
+	tmp = (unsigned long) sds[AO_SUBD].subd->priv;
+	memcpy(sds[AI2_SUBD].subd->priv, &tmp, sds[AI2_SUBD].private_len) ;
+
+	/* create the task */
+	ret = rtdm_task_init(&priv->task, "Fake AI task", task_proc, dev,
+		             RTDM_TASK_HIGHEST_PRIORITY, 0);
+	if (ret)
+		a4l_dbg(1, drv_dbg, dev, "Error creating A4L task \n");
+
+	a4l_dbg(1, drv_dbg, dev, "attach procedure completed: "
+				 "adiv = %lu, qcount = %lu \n"
+		                  , priv->amplitude_div, priv->quanta_cnt);
+
+	return ret;
+}
+
+int test_detach(struct a4l_device *dev)
+{
+	struct fake_priv *priv = (struct fake_priv *)dev->priv;
+
+	rtdm_task_destroy(&priv->task);
+	a4l_dbg(1, drv_dbg, dev, "detach procedure complete\n");
+
+	return 0;
+}
+
+/* --- Module stuff --- */
+
+static struct a4l_driver test_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_fake",
+	.driver_name = "fake",
+	.attach = test_attach,
+	.detach = test_detach,
+	.privdata_size = sizeof(struct fake_priv),
+};
+
+static int __init a4l_fake_init(void)
+{
+	return a4l_register_drv(&test_drv);
+}
+
+static void __exit a4l_fake_cleanup(void)
+{
+	a4l_unregister_drv(&test_drv);
+}
+
+MODULE_DESCRIPTION("Analogy fake driver");
+MODULE_LICENSE("GPL");
+
+module_init(a4l_fake_init);
+module_exit(a4l_fake_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c
new file mode 100644
index 0000000..aaef81d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/testing/loop.c
@@ -0,0 +1,285 @@
+#include <linux/module.h>
+#include <rtdm/analogy/device.h>
+
+#define LOOP_TASK_PERIOD 1000000
+#define LOOP_NB_BITS 16
+
+#define LOOP_INPUT_SUBD 0
+#define LOOP_OUTPUT_SUBD 1
+
+/* Channels descriptor */
+static struct a4l_channels_desc loop_chandesc = {
+	.mode = A4L_CHAN_GLOBAL_CHANDESC,
+	.length = 8,
+	.chans = {
+		{A4L_CHAN_AREF_GROUND, LOOP_NB_BITS},
+	},
+};
+
+/* Ranges tab */
+static struct a4l_rngtab loop_rngtab = {
+	.length =  2,
+	.rngs = {
+		RANGE_V(-5,5),
+		RANGE_V(-10,10),
+	},
+};
+/* Ranges descriptor */
+struct a4l_rngdesc loop_rngdesc = RNG_GLOBAL(loop_rngtab);
+
+/* Command options mask */
+static struct a4l_cmd_desc loop_cmd_mask = {
+	.idx_subd = 0,
+	.start_src = TRIG_NOW | TRIG_INT,
+	.scan_begin_src = TRIG_TIMER,
+	.convert_src = TRIG_NOW | TRIG_TIMER,
+	.scan_end_src = TRIG_COUNT,
+	.stop_src = TRIG_COUNT| TRIG_NONE,
+};
+
+/* Private data organization */
+struct loop_priv {
+
+	/* Task descriptor */
+	rtdm_task_t loop_task;
+
+	/* Misc fields */
+	int loop_running;
+	uint16_t loop_insn_value;
+};
+typedef struct loop_priv lpprv_t;
+
+/* Attach arguments contents */
+struct loop_attach_arg {
+	unsigned long period;
+};
+typedef struct loop_attach_arg lpattr_t;
+
+static void loop_task_proc(void *arg);
+
+/* --- Task part --- */
+
+/* Timer task routine  */
+static void loop_task_proc(void *arg)
+{
+	struct a4l_device *dev = (struct a4l_device*)arg;
+	struct a4l_subdevice *input_subd, *output_subd;
+	lpprv_t *priv = (lpprv_t *)dev->priv;
+
+	input_subd = a4l_get_subd(dev, LOOP_INPUT_SUBD);
+	output_subd = a4l_get_subd(dev, LOOP_OUTPUT_SUBD);
+
+	if (input_subd == NULL || output_subd == NULL) {
+		a4l_err(dev, "loop_task_proc: subdevices unavailable\n");
+		return;
+	}
+
+	while (1) {
+
+		int running;
+
+		running = priv->loop_running;
+
+		if (running) {
+			uint16_t value;
+			int ret=0;
+
+			while (ret==0) {
+
+				ret = a4l_buf_get(output_subd,
+						  &value, sizeof(uint16_t));
+				if (ret == 0) {
+
+					a4l_info(dev,
+						 "loop_task_proc: "
+						 "data available\n");
+
+					a4l_buf_evt(output_subd, 0);
+
+					ret = a4l_buf_put(input_subd,
+							  &value,
+							  sizeof(uint16_t));
+
+					if (ret==0)
+						a4l_buf_evt(input_subd, 0);
+				}
+			}
+		}
+
+		rtdm_task_sleep(LOOP_TASK_PERIOD);
+	}
+}
+
+/* --- Analogy Callbacks --- */
+
+/* Command callback */
+int loop_cmd(struct a4l_subdevice *subd, struct a4l_cmd_desc *cmd)
+{
+	a4l_info(subd->dev, "loop_cmd: (subd=%d)\n", subd->idx);
+
+	return 0;
+
+}
+
+/* Trigger callback */
+int loop_trigger(struct a4l_subdevice *subd, lsampl_t trignum)
+{
+	lpprv_t *priv = (lpprv_t *)subd->dev->priv;
+
+	a4l_info(subd->dev, "loop_trigger: (subd=%d)\n", subd->idx);
+
+	priv->loop_running = 1;
+
+	return 0;
+}
+
+/* Cancel callback */
+void loop_cancel(struct a4l_subdevice *subd)
+{
+	lpprv_t *priv = (lpprv_t *)subd->dev->priv;
+
+	a4l_info(subd->dev, "loop_cancel: (subd=%d)\n", subd->idx);
+
+	priv->loop_running = 0;
+}
+
+/* Read instruction callback */
+int loop_insn_read(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	lpprv_t *priv = (lpprv_t*)subd->dev->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Sets the memorized value */
+	data[0] = priv->loop_insn_value;
+
+	return 0;
+}
+
+/* Write instruction callback */
+int loop_insn_write(struct a4l_subdevice *subd, struct a4l_kernel_instruction *insn)
+{
+	lpprv_t *priv = (lpprv_t*)subd->dev->priv;
+	uint16_t *data = (uint16_t *)insn->data;
+
+	/* Checks the buffer size */
+	if (insn->data_size != sizeof(uint16_t))
+		return -EINVAL;
+
+	/* Retrieves the value to memorize */
+	priv->loop_insn_value = data[0];
+
+	return 0;
+}
+
+void setup_input_subd(struct a4l_subdevice *subd)
+{
+	memset(subd, 0, sizeof(struct a4l_subdevice));
+
+	subd->flags |= A4L_SUBD_AI;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &loop_rngdesc;
+	subd->chan_desc = &loop_chandesc;
+	subd->do_cmd = loop_cmd;
+	subd->cancel = loop_cancel;
+	subd->cmd_mask = &loop_cmd_mask;
+	subd->insn_read = loop_insn_read;
+	subd->insn_write = loop_insn_write;
+}
+
+void setup_output_subd(struct a4l_subdevice *subd)
+{
+	memset(subd, 0, sizeof(struct a4l_subdevice));
+
+	subd->flags = A4L_SUBD_AO;
+	subd->flags |= A4L_SUBD_CMD;
+	subd->flags |= A4L_SUBD_MMAP;
+	subd->rng_desc = &loop_rngdesc;
+	subd->chan_desc = &loop_chandesc;
+	subd->do_cmd = loop_cmd;
+	subd->cancel = loop_cancel;
+	subd->trigger = loop_trigger;
+	subd->cmd_mask = &loop_cmd_mask;
+	subd->insn_read = loop_insn_read;
+	subd->insn_write = loop_insn_write;
+}
+
+/* Attach callback */
+int loop_attach(struct a4l_device *dev,
+		a4l_lnkdesc_t *arg)
+{
+	int ret = 0;
+	struct a4l_subdevice *subd;
+	lpprv_t *priv = (lpprv_t *)dev->priv;
+
+	/* Add the fake input subdevice */
+	subd = a4l_alloc_subd(0, setup_input_subd);
+	if (subd == NULL)
+		return -ENOMEM;
+
+	ret = a4l_add_subd(dev, subd);
+	if (ret != LOOP_INPUT_SUBD)
+		/* Let Analogy free the lately allocated subdevice */
+		return (ret < 0) ? ret : -EINVAL;
+
+	/* Add the fake output subdevice */
+	subd = a4l_alloc_subd(0, setup_output_subd);
+	if (subd == NULL)
+		/* Let Analogy free the lately allocated subdevice */
+		return -ENOMEM;
+
+	ret = a4l_add_subd(dev, subd);
+	if (ret != LOOP_OUTPUT_SUBD)
+		/* Let Analogy free the lately allocated subdevices */
+		return (ret < 0) ? ret : -EINVAL;
+
+	priv->loop_running = 0;
+	priv->loop_insn_value = 0;
+
+	ret = rtmd_task_init(&priv->loop_task,
+			    "a4l_loop task",
+			    loop_task_proc,
+			    dev, RTDM_TASK_HIGHEST_PRIORITY, 0);
+
+	return ret;
+}
+
+/* Detach callback */
+int loop_detach(struct a4l_device *dev)
+{
+	lpprv_t *priv = (lpprv_t *)dev->priv;
+
+	rtdm_task_destroy(&priv->loop_task);
+
+	return 0;
+}
+
+/* --- Module part --- */
+
+static struct a4l_driver loop_drv = {
+	.owner = THIS_MODULE,
+	.board_name = "analogy_loop",
+	.attach = loop_attach,
+	.detach = loop_detach,
+	.privdata_size = sizeof(lpprv_t),
+};
+
+static int __init a4l_loop_init(void)
+{
+	return a4l_register_drv(&loop_drv);
+}
+
+static void __exit a4l_loop_cleanup(void)
+{
+	a4l_unregister_drv(&loop_drv);
+}
+
+MODULE_DESCRIPTION("Analogy loop driver");
+MODULE_LICENSE("GPL");
+
+module_init(a4l_loop_init);
+module_exit(a4l_loop_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c
new file mode 100644
index 0000000..bf19c8c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/analogy/transfer.c
@@ -0,0 +1,259 @@
+/*
+ * Analogy for Linux, transfer related features
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/fs.h>
+#include <asm/errno.h>
+#include <rtdm/analogy/device.h>
+
+#include "proc.h"
+
+/* --- Initialization / cleanup / cancel functions --- */
+
+int a4l_precleanup_transfer(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev;
+	struct a4l_transfer *tsf;
+	int i, err = 0;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	if (tsf == NULL) {
+		__a4l_err("a4l_precleanup_transfer: "
+			  "incoherent status, transfer block not reachable\n");
+		return -ENODEV;
+	}
+
+	for (i = 0; i < tsf->nb_subd; i++) {
+		unsigned long *status = &tsf->subds[i]->status;
+
+		__a4l_dbg(1, core_dbg, "subd[%d]->status=0x%08lx\n", i, *status);
+
+		if (test_and_set_bit(A4L_SUBD_BUSY, status)) {
+			__a4l_err("a4l_precleanup_transfer: "
+				  "device busy, acquisition occuring\n");
+			err = -EBUSY;
+			goto out_error;
+		} else
+			set_bit(A4L_SUBD_CLEAN, status);
+	}
+
+	return 0;
+
+out_error:
+	for (i = 0; i < tsf->nb_subd; i++) {
+		unsigned long *status = &tsf->subds[i]->status;
+
+		if (test_bit(A4L_SUBD_CLEAN, status)){
+			clear_bit(A4L_SUBD_BUSY, status);
+			clear_bit(A4L_SUBD_CLEAN, status);
+		}
+	}
+
+	return err;
+}
+
+int a4l_cleanup_transfer(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev;
+	struct a4l_transfer *tsf;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	/* Releases the pointers tab, if need be */
+	if (tsf->subds != NULL) {
+		rtdm_free(tsf->subds);
+	}
+
+	memset(tsf, 0, sizeof(struct a4l_transfer));
+
+	return 0;
+}
+
+void a4l_presetup_transfer(struct a4l_device_context *cxt)
+{
+	struct a4l_device *dev = NULL;
+	struct a4l_transfer *tsf;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	/* Clear the structure */
+	memset(tsf, 0, sizeof(struct a4l_transfer));
+
+	tsf->default_bufsize = A4L_BUF_DEFSIZE;
+
+	/* 0 is also considered as a valid IRQ, then
+	   the IRQ number must be initialized with another value */
+	tsf->irq_desc.irq = A4L_IRQ_UNUSED;
+}
+
+int a4l_setup_transfer(struct a4l_device_context * cxt)
+{
+	struct a4l_device *dev = NULL;
+	struct a4l_transfer *tsf;
+	struct list_head *this;
+	int i = 0, ret = 0;
+
+	dev = a4l_get_dev(cxt);
+	tsf = &dev->transfer;
+
+	/* Recovers the subdevices count
+	   (as they are registered in a linked list */
+	list_for_each(this, &dev->subdvsq) {
+		tsf->nb_subd++;
+	}
+
+	__a4l_dbg(1, core_dbg, "nb_subd=%d\n", tsf->nb_subd);
+
+	/* Allocates a suitable tab for the subdevices */
+	tsf->subds = rtdm_malloc(tsf->nb_subd * sizeof(struct a4l_subdevice *));
+	if (tsf->subds == NULL) {
+		__a4l_err("a4l_setup_transfer: call1(alloc) failed \n");
+		ret = -ENOMEM;
+		goto out_setup_tsf;
+	}
+
+	/* Recovers the subdevices pointers */
+	list_for_each(this, &dev->subdvsq) {
+		tsf->subds[i++] = list_entry(this, struct a4l_subdevice, list);
+	}
+
+out_setup_tsf:
+
+	if (ret != 0)
+		a4l_cleanup_transfer(cxt);
+
+	return ret;
+}
+
+/* --- IRQ handling section --- */
+
+int a4l_request_irq(struct a4l_device * dev,
+		    unsigned int irq,
+		    a4l_irq_hdlr_t handler,
+		    unsigned long flags, void *cookie)
+{
+	int ret;
+
+	if (dev->transfer.irq_desc.irq != A4L_IRQ_UNUSED)
+		return -EBUSY;
+
+	ret = __a4l_request_irq(&dev->transfer.irq_desc, irq, handler, flags,
+		cookie);
+	if (ret != 0) {
+		__a4l_err("a4l_request_irq: IRQ registration failed\n");
+		dev->transfer.irq_desc.irq = A4L_IRQ_UNUSED;
+	}
+
+	return ret;
+}
+
+int a4l_free_irq(struct a4l_device * dev, unsigned int irq)
+{
+
+	int ret = 0;
+
+	if (dev->transfer.irq_desc.irq != irq)
+		return -EINVAL;
+
+	/* There is less need to use a spinlock
+	   than for a4l_request_irq() */
+	ret = __a4l_free_irq(&dev->transfer.irq_desc);
+
+	if (ret == 0)
+		dev->transfer.irq_desc.irq = A4L_IRQ_UNUSED;
+
+	return ret;
+}
+
+unsigned int a4l_get_irq(struct a4l_device * dev)
+{
+	return dev->transfer.irq_desc.irq;
+}
+
+/* --- Proc section --- */
+
+#ifdef CONFIG_PROC_FS
+
+int a4l_rdproc_transfer(struct seq_file *seq, void *v)
+{
+	struct a4l_transfer *transfer = (struct a4l_transfer *) seq->private;
+	int i;
+
+	if (v != SEQ_START_TOKEN)
+		return -EINVAL;
+
+	seq_printf(seq, "--  Subdevices --\n\n");
+	seq_printf(seq, "| idx | type\n");
+
+	/* Gives the subdevice type's name */
+	for (i = 0; i < transfer->nb_subd; i++) {
+		char *type;
+		switch (transfer->subds[i]->flags & A4L_SUBD_TYPES) {
+		case A4L_SUBD_UNUSED:
+			type = "Unused subdevice";
+			break;
+		case A4L_SUBD_AI:
+			type = "Analog input subdevice";
+			break;
+		case A4L_SUBD_AO:
+			type = "Analog output subdevice";
+			break;
+		case A4L_SUBD_DI:
+			type = "Digital input subdevice";
+			break;
+		case A4L_SUBD_DO:
+			type = "Digital output subdevice";
+			break;
+		case A4L_SUBD_DIO:
+			type = "Digital input/output subdevice";
+			break;
+		case A4L_SUBD_COUNTER:
+			type = "Counter subdevice";
+			break;
+		case A4L_SUBD_TIMER:
+			type = "Timer subdevice";
+			break;
+		case A4L_SUBD_MEMORY:
+			type = "Memory subdevice";
+			break;
+		case A4L_SUBD_CALIB:
+			type = "Calibration subdevice";
+			break;
+		case A4L_SUBD_PROC:
+			type = "Processor subdevice";
+			break;
+		case A4L_SUBD_SERIAL:
+			type = "Serial subdevice";
+			break;
+		default:
+			type = "Unknown subdevice";
+		}
+
+		seq_printf(seq, "|  %02d | %s\n", i, type);
+	}
+
+	return 0;
+}
+
+#endif /* CONFIG_PROC_FS */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig
new file mode 100644
index 0000000..3241597
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Kconfig
@@ -0,0 +1,3 @@
+
+config XENO_DRIVERS_AUTOTUNE
+	tristate
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile
new file mode 100644
index 0000000..12ba6cf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/Makefile
@@ -0,0 +1,4 @@
+
+obj-$(CONFIG_XENO_DRIVERS_AUTOTUNE) += xeno_autotune.o
+
+xeno_autotune-y := autotune.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c
new file mode 100644
index 0000000..d9208f2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/autotune/autotune.c
@@ -0,0 +1,820 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/atomic.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/sort.h>
+#include <cobalt/kernel/arith.h>
+#include <rtdm/driver.h>
+#include <rtdm/autotune.h>
+
+MODULE_DESCRIPTION("Xenomai/cobalt core clock autotuner");
+MODULE_AUTHOR("Philippe Gerum <rpm@xenomai.org>");
+MODULE_LICENSE("GPL");
+
+/* Auto-tuning services for the Cobalt core clock. */
+
+#define SAMPLING_TIME	500000000UL
+#define ADJUSTMENT_STEP 500
+#define WARMUP_STEPS	10
+#define AUTOTUNE_STEPS  40
+
+#define progress(__tuner, __fmt, __args...)				\
+	do {								\
+		if (!(__tuner)->quiet)					\
+			printk(XENO_INFO "autotune(%s) " __fmt "\n",	\
+			       (__tuner)->name, ##__args);		\
+	} while (0)
+
+struct tuning_score {
+	int pmean;
+	int stddev;
+	int minlat;
+	unsigned int step;
+	unsigned int gravity;
+};
+
+struct tuner_state {
+	xnticks_t ideal;
+	xnticks_t step;
+	int min_lat;
+	int max_lat;
+	int prev_mean;
+	long long prev_sqs;
+	long long cur_sqs;
+	unsigned int sum;
+	unsigned int cur_samples;
+	unsigned int max_samples;
+};
+
+struct gravity_tuner {
+	const char *name;
+	unsigned int (*get_gravity)(struct gravity_tuner *tuner);
+	void (*set_gravity)(struct gravity_tuner *tuner, unsigned int gravity);
+	unsigned int (*adjust_gravity)(struct gravity_tuner *tuner, int adjust);
+	int (*init_tuner)(struct gravity_tuner *tuner);
+	int (*start_tuner)(struct gravity_tuner *tuner, xnticks_t start_time,
+			   xnticks_t interval);
+	void (*destroy_tuner)(struct gravity_tuner *tuner);
+	struct tuner_state state;
+	rtdm_event_t done;
+	int status;
+	int quiet;
+	struct tuning_score scores[AUTOTUNE_STEPS];
+	int nscores;
+	atomic_t refcount;
+};
+
+struct irq_gravity_tuner {
+	rtdm_timer_t timer;
+	struct gravity_tuner tuner;
+};
+
+struct kthread_gravity_tuner {
+	rtdm_task_t task;
+	rtdm_event_t barrier;
+	xnticks_t start_time;
+	xnticks_t interval;
+	struct gravity_tuner tuner;
+};
+
+struct uthread_gravity_tuner {
+	rtdm_timer_t timer;
+	rtdm_event_t pulse;
+	struct gravity_tuner tuner;
+};
+
+struct autotune_context {
+	struct gravity_tuner *tuner;
+	struct autotune_setup setup;
+	rtdm_lock_t tuner_lock;
+};
+
+static inline void init_tuner(struct gravity_tuner *tuner)
+{
+	rtdm_event_init(&tuner->done, 0);
+	tuner->status = 0;
+	atomic_set(&tuner->refcount, 0);
+}
+
+static inline void destroy_tuner(struct gravity_tuner *tuner)
+{
+	rtdm_event_destroy(&tuner->done);
+}
+
+static inline void done_sampling(struct gravity_tuner *tuner,
+				 int status)
+{
+	tuner->status = status;
+	rtdm_event_signal(&tuner->done);
+}
+
+static int add_sample(struct gravity_tuner *tuner, xnticks_t timestamp)
+{
+	struct tuner_state *state;
+	int n, delta, cur_mean;
+
+	state = &tuner->state;
+
+	delta = (int)(timestamp - state->ideal);
+	if (delta < state->min_lat)
+		state->min_lat = delta;
+	if (delta > state->max_lat)
+		state->max_lat = delta;
+	if (delta < 0)
+		delta = 0;
+
+	state->sum += delta;
+	state->ideal += state->step;
+	n = ++state->cur_samples;
+
+	/*
+	 * Knuth citing Welford in TAOCP (Vol 2), single-pass
+	 * computation of variance using a recurrence relation.
+	 */
+	if (n == 1)
+		state->prev_mean = delta;
+	else {
+		cur_mean = state->prev_mean + (delta - state->prev_mean) / n;
+                state->cur_sqs = state->prev_sqs + (delta - state->prev_mean)
+			* (delta - cur_mean);
+                state->prev_mean = cur_mean; 
+                state->prev_sqs = state->cur_sqs;
+	}
+
+	if (n >= state->max_samples) {
+		done_sampling(tuner, 0);
+		return 1;	/* Finished. */
+	}
+
+	return 0;	/* Keep going. */
+}
+
+static void timer_handler(rtdm_timer_t *timer)
+{
+	struct irq_gravity_tuner *irq_tuner;
+	xnticks_t now;
+
+	irq_tuner = container_of(timer, struct irq_gravity_tuner, timer);
+	now = xnclock_read_raw(&nkclock);
+
+	if (add_sample(&irq_tuner->tuner, now))
+		rtdm_timer_stop_in_handler(timer);
+}
+
+static int init_irq_tuner(struct gravity_tuner *tuner)
+{
+	struct irq_gravity_tuner *irq_tuner;
+	int ret;
+
+	irq_tuner = container_of(tuner, struct irq_gravity_tuner, tuner);
+	ret = rtdm_timer_init(&irq_tuner->timer, timer_handler, "autotune");
+	if (ret)
+		return ret;
+
+	init_tuner(tuner);
+
+	return 0;
+}
+
+static void destroy_irq_tuner(struct gravity_tuner *tuner)
+{
+	struct irq_gravity_tuner *irq_tuner;
+
+	irq_tuner = container_of(tuner, struct irq_gravity_tuner, tuner);
+	rtdm_timer_destroy(&irq_tuner->timer);
+	destroy_tuner(tuner);
+}
+
+static unsigned int get_irq_gravity(struct gravity_tuner *tuner)
+{
+	return nkclock.gravity.irq;
+}
+
+static void set_irq_gravity(struct gravity_tuner *tuner, unsigned int gravity)
+{
+	nkclock.gravity.irq = gravity;
+}
+
+static unsigned int adjust_irq_gravity(struct gravity_tuner *tuner, int adjust)
+{
+	return nkclock.gravity.irq += adjust;
+}
+
+static int start_irq_tuner(struct gravity_tuner *tuner,
+			   xnticks_t start_time, xnticks_t interval)
+{
+	struct irq_gravity_tuner *irq_tuner;
+
+	irq_tuner = container_of(tuner, struct irq_gravity_tuner, tuner);
+
+	return rtdm_timer_start(&irq_tuner->timer, start_time,
+				interval, RTDM_TIMERMODE_ABSOLUTE);
+}
+
+struct irq_gravity_tuner irq_tuner = {
+	.tuner = {
+		.name = "irqhand",
+		.init_tuner = init_irq_tuner,
+		.destroy_tuner = destroy_irq_tuner,
+		.get_gravity = get_irq_gravity,
+		.set_gravity = set_irq_gravity,
+		.adjust_gravity = adjust_irq_gravity,
+		.start_tuner = start_irq_tuner,
+	},
+};
+
+void task_handler(void *arg)
+{
+	struct kthread_gravity_tuner *k_tuner = arg;
+	xnticks_t now;
+	int ret = 0;
+
+	for (;;) {
+		if (rtdm_task_should_stop())
+			break;
+
+		ret = rtdm_event_wait(&k_tuner->barrier);
+		if (ret)
+			break;
+
+		ret = rtdm_task_set_period(&k_tuner->task, k_tuner->start_time,
+					   k_tuner->interval);
+		if (ret)
+			break;
+
+		for (;;) {
+			ret = rtdm_task_wait_period(NULL);
+			if (ret && ret != -ETIMEDOUT)
+				goto out;
+
+			now = xnclock_read_raw(&nkclock);
+			if (add_sample(&k_tuner->tuner, now)) {
+				rtdm_task_set_period(&k_tuner->task, 0, 0);
+				break;
+			}
+		}
+	}
+out:
+	done_sampling(&k_tuner->tuner, ret);
+	rtdm_task_destroy(&k_tuner->task);
+}
+
+static int init_kthread_tuner(struct gravity_tuner *tuner)
+{
+	struct kthread_gravity_tuner *k_tuner;
+
+	init_tuner(tuner);
+	k_tuner = container_of(tuner, struct kthread_gravity_tuner, tuner);
+	rtdm_event_init(&k_tuner->barrier, 0);
+
+	return rtdm_task_init(&k_tuner->task, "autotune",
+			      task_handler, k_tuner,
+			      RTDM_TASK_HIGHEST_PRIORITY, 0);
+}
+
+static void destroy_kthread_tuner(struct gravity_tuner *tuner)
+{
+	struct kthread_gravity_tuner *k_tuner;
+
+	k_tuner = container_of(tuner, struct kthread_gravity_tuner, tuner);
+	rtdm_task_destroy(&k_tuner->task);
+	rtdm_event_destroy(&k_tuner->barrier);
+}
+
+static unsigned int get_kthread_gravity(struct gravity_tuner *tuner)
+{
+	return nkclock.gravity.kernel;
+}
+
+static void set_kthread_gravity(struct gravity_tuner *tuner, unsigned int gravity)
+{
+	nkclock.gravity.kernel = gravity;
+}
+
+static unsigned int adjust_kthread_gravity(struct gravity_tuner *tuner, int adjust)
+{
+	return nkclock.gravity.kernel += adjust;
+}
+
+static int start_kthread_tuner(struct gravity_tuner *tuner,
+			       xnticks_t start_time, xnticks_t interval)
+{
+	struct kthread_gravity_tuner *k_tuner;
+
+	k_tuner = container_of(tuner, struct kthread_gravity_tuner, tuner);
+
+	k_tuner->start_time = start_time;
+	k_tuner->interval = interval;
+	rtdm_event_signal(&k_tuner->barrier);
+
+	return 0;
+}
+
+struct kthread_gravity_tuner kthread_tuner = {
+	.tuner = {
+		.name = "kthread",
+		.init_tuner = init_kthread_tuner,
+		.destroy_tuner = destroy_kthread_tuner,
+		.get_gravity = get_kthread_gravity,
+		.set_gravity = set_kthread_gravity,
+		.adjust_gravity = adjust_kthread_gravity,
+		.start_tuner = start_kthread_tuner,
+	},
+};
+
+static void pulse_handler(rtdm_timer_t *timer)
+{
+	struct uthread_gravity_tuner *u_tuner;
+
+	u_tuner = container_of(timer, struct uthread_gravity_tuner, timer);
+	rtdm_event_signal(&u_tuner->pulse);
+}
+
+static int init_uthread_tuner(struct gravity_tuner *tuner)
+{
+	struct uthread_gravity_tuner *u_tuner;
+	int ret;
+
+	u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner);
+	ret = rtdm_timer_init(&u_tuner->timer, pulse_handler, "autotune");
+	if (ret)
+		return ret;
+
+	xntimer_set_gravity(&u_tuner->timer, XNTIMER_UGRAVITY); /* gasp... */
+	rtdm_event_init(&u_tuner->pulse, 0);
+	init_tuner(tuner);
+
+	return 0;
+}
+
+static void destroy_uthread_tuner(struct gravity_tuner *tuner)
+{
+	struct uthread_gravity_tuner *u_tuner;
+
+	u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner);
+	rtdm_timer_destroy(&u_tuner->timer);
+	rtdm_event_destroy(&u_tuner->pulse);
+}
+
+static unsigned int get_uthread_gravity(struct gravity_tuner *tuner)
+{
+	return nkclock.gravity.user;
+}
+
+static void set_uthread_gravity(struct gravity_tuner *tuner, unsigned int gravity)
+{
+	nkclock.gravity.user = gravity;
+}
+
+static unsigned int adjust_uthread_gravity(struct gravity_tuner *tuner, int adjust)
+{
+	return nkclock.gravity.user += adjust;
+}
+
+static int start_uthread_tuner(struct gravity_tuner *tuner,
+			       xnticks_t start_time, xnticks_t interval)
+{
+	struct uthread_gravity_tuner *u_tuner;
+
+	u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner);
+
+	return rtdm_timer_start(&u_tuner->timer, start_time,
+				interval, RTDM_TIMERMODE_ABSOLUTE);
+}
+
+static int add_uthread_sample(struct gravity_tuner *tuner,
+			      nanosecs_abs_t user_timestamp)
+{
+	struct uthread_gravity_tuner *u_tuner;
+	int ret;
+
+	u_tuner = container_of(tuner, struct uthread_gravity_tuner, tuner);
+
+	if (user_timestamp &&
+	    add_sample(tuner, xnclock_ns_to_ticks(&nkclock, user_timestamp))) {
+		rtdm_timer_stop(&u_tuner->timer);
+		/* Tell the caller to park until next round. */
+		ret = -EPIPE;
+	} else
+		ret = rtdm_event_wait(&u_tuner->pulse);
+
+	return ret;
+}
+
+struct uthread_gravity_tuner uthread_tuner = {
+	.tuner = {
+		.name = "uthread",
+		.init_tuner = init_uthread_tuner,
+		.destroy_tuner = destroy_uthread_tuner,
+		.get_gravity = get_uthread_gravity,
+		.set_gravity = set_uthread_gravity,
+		.adjust_gravity = adjust_uthread_gravity,
+		.start_tuner = start_uthread_tuner,
+	},
+};
+
+static inline void build_score(struct gravity_tuner *tuner, int step)
+{
+	struct tuner_state *state = &tuner->state;
+	unsigned int variance, n;
+
+	n = state->cur_samples;
+	tuner->scores[step].pmean = state->sum / n;
+	variance = n > 1 ? xnarch_llimd(state->cur_sqs, 1, n - 1) : 0;
+	tuner->scores[step].stddev = int_sqrt(variance);
+	tuner->scores[step].minlat = state->min_lat;
+	tuner->scores[step].gravity = tuner->get_gravity(tuner);
+	tuner->scores[step].step = step;
+	tuner->nscores++;
+}
+
+static int cmp_score_mean(const void *c, const void *r)
+{
+	const struct tuning_score *sc = c, *sr = r;
+	return sc->pmean - sr->pmean;
+}
+
+static int cmp_score_stddev(const void *c, const void *r)
+{
+	const struct tuning_score *sc = c, *sr = r;
+	return sc->stddev - sr->stddev;
+}
+
+static int cmp_score_minlat(const void *c, const void *r)
+{
+	const struct tuning_score *sc = c, *sr = r;
+	return sc->minlat - sr->minlat;
+}
+
+static int cmp_score_gravity(const void *c, const void *r)
+{
+	const struct tuning_score *sc = c, *sr = r;
+	return sc->gravity - sr->gravity;
+}
+
+static int filter_mean(struct gravity_tuner *tuner)
+{
+	sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score),
+	     cmp_score_mean, NULL);
+
+	/* Top half of the best pondered means. */
+
+	return (tuner->nscores + 1) / 2;
+}
+
+static int filter_stddev(struct gravity_tuner *tuner)
+{
+	sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score),
+	     cmp_score_stddev, NULL);
+
+	/* Top half of the best standard deviations. */
+
+	return (tuner->nscores + 1) / 2;
+}
+
+static int filter_minlat(struct gravity_tuner *tuner)
+{
+	sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score),
+	     cmp_score_minlat, NULL);
+
+	/* Top half of the minimum latencies. */
+
+	return (tuner->nscores + 1) / 2;
+}
+
+static int filter_gravity(struct gravity_tuner *tuner)
+{
+	sort(tuner->scores, tuner->nscores, sizeof(struct tuning_score),
+	     cmp_score_gravity, NULL);
+
+	/* Smallest gravity required among the shortest latencies. */
+
+	return tuner->nscores;
+}
+
+static void dump_scores(struct gravity_tuner *tuner)
+{
+	int n;
+
+	if (tuner->quiet)
+		return;
+
+	for (n = 0; n < tuner->nscores; n++)
+		printk(KERN_INFO
+		       ".. S%.2d pmean=%Ld stddev=%Lu minlat=%Lu gravity=%Lu\n",
+		       tuner->scores[n].step,
+		       xnclock_ticks_to_ns(&nkclock, tuner->scores[n].pmean),
+		       xnclock_ticks_to_ns(&nkclock, tuner->scores[n].stddev),
+		       xnclock_ticks_to_ns(&nkclock, tuner->scores[n].minlat),
+		       xnclock_ticks_to_ns(&nkclock, tuner->scores[n].gravity));
+}
+
+static inline void filter_score(struct gravity_tuner *tuner,
+				int (*filter)(struct gravity_tuner *tuner))
+{
+	tuner->nscores = filter(tuner);
+	dump_scores(tuner);
+}
+
+static int tune_gravity(struct gravity_tuner *tuner, int period)
+{
+	struct tuner_state *state = &tuner->state;
+	int ret, step, gravity_limit, adjust;
+	unsigned int orig_gravity;
+
+	state->step = xnclock_ns_to_ticks(&nkclock, period);
+	state->max_samples = SAMPLING_TIME / (period ?: 1);
+	orig_gravity = tuner->get_gravity(tuner);
+	tuner->set_gravity(tuner, 0);
+	tuner->nscores = 0;
+	/* Gravity adjustment step */
+	adjust = xnclock_ns_to_ticks(&nkclock, ADJUSTMENT_STEP) ?: 1;
+	gravity_limit = 0;
+	progress(tuner, "warming up...");
+
+	for (step = 0; step < WARMUP_STEPS + AUTOTUNE_STEPS; step++) {
+		state->ideal = xnclock_read_raw(&nkclock) + state->step * WARMUP_STEPS;
+		state->min_lat = xnclock_ns_to_ticks(&nkclock, SAMPLING_TIME);
+		state->max_lat = 0;
+		state->prev_mean = 0;
+		state->prev_sqs = 0;
+		state->cur_sqs = 0;
+		state->sum = 0;
+		state->cur_samples = 0;
+
+		ret = tuner->start_tuner(tuner,
+					 xnclock_ticks_to_ns(&nkclock, state->ideal),
+					 period);
+		if (ret)
+			goto fail;
+
+		/* Tuner stops when posting. */
+		ret = rtdm_event_wait(&tuner->done);
+		if (ret)
+			goto fail;
+
+		ret = tuner->status;
+		if (ret)
+			goto fail;
+
+		if (step < WARMUP_STEPS) {
+			if (state->min_lat > gravity_limit) {
+				gravity_limit = state->min_lat;
+				progress(tuner, "gravity limit set to %Lu ns (%d)",
+					 xnclock_ticks_to_ns(&nkclock, gravity_limit), state->min_lat);
+			}
+			continue;
+		}
+
+		/*
+		 * We should not be early by more than the gravity
+		 * value minus one tick, to account for the rounding
+		 * error involved when the timer frequency is lower
+		 * than 1e9 / ADJUSTMENT_STEP.
+		 */
+		if (state->min_lat < 0) {
+			if (tuner->get_gravity(tuner) < -state->min_lat - 1) {
+				printk(XENO_WARNING
+				       "autotune(%s) failed with early shot (%Ld ns)\n",
+				       tuner->name,
+				       xnclock_ticks_to_ns(&nkclock,
+						   -(tuner->get_gravity(tuner) +
+						     state->min_lat)));
+				ret = -EAGAIN;
+				goto fail;
+			}
+			break;
+		}
+
+		if (((step - WARMUP_STEPS) % 5) == 0)
+			progress(tuner, "calibrating... (slice %d)",
+				 (step - WARMUP_STEPS) / 5 + 1);
+
+		build_score(tuner, step - WARMUP_STEPS);
+
+		/*
+		 * Anticipating by more than the minimum latency
+		 * detected at warmup would make no sense: cap the
+		 * gravity we may try.
+		 */
+		if (tuner->adjust_gravity(tuner, adjust) > gravity_limit) {
+			progress(tuner, "beyond gravity limit at %Lu ns",
+				 xnclock_ticks_to_ns(&nkclock,
+						     tuner->get_gravity(tuner)));
+			break;
+		}
+	}
+
+	progress(tuner, "calibration scores");
+	dump_scores(tuner);
+	progress(tuner, "pondered mean filter");
+	filter_score(tuner, filter_mean);
+	progress(tuner, "standard deviation filter");
+	filter_score(tuner, filter_stddev);
+	progress(tuner, "minimum latency filter");
+	filter_score(tuner, filter_minlat);
+	progress(tuner, "gravity filter");
+	filter_score(tuner, filter_gravity);
+	tuner->set_gravity(tuner, tuner->scores[0].gravity);
+
+	return 0;
+fail:
+	tuner->set_gravity(tuner, orig_gravity);
+
+	return ret;
+}
+
+static int autotune_ioctl_nrt(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct autotune_context *context;
+	struct autotune_setup setup;
+	struct gravity_tuner *tuner, *old_tuner;
+	rtdm_lockctx_t lock_ctx;
+	int ret;
+
+	switch (request) {
+	case AUTOTUNE_RTIOC_RESET:
+		xnclock_reset_gravity(&nkclock);
+		return 0;
+	case AUTOTUNE_RTIOC_IRQ:
+		tuner = &irq_tuner.tuner;
+		break;
+	case AUTOTUNE_RTIOC_KERN:
+		tuner = &kthread_tuner.tuner;
+		break;
+	case AUTOTUNE_RTIOC_USER:
+		tuner = &uthread_tuner.tuner;
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	ret = rtdm_copy_from_user(fd, &setup, arg, sizeof(setup));
+	if (ret)
+		return ret;
+
+	ret = tuner->init_tuner(tuner);
+	if (ret)
+		return ret;
+
+	context = rtdm_fd_to_private(fd);
+
+	rtdm_lock_get_irqsave(&context->tuner_lock, lock_ctx);
+
+	old_tuner = context->tuner;
+	if (old_tuner && atomic_read(&old_tuner->refcount) > 0) {
+		rtdm_lock_put_irqrestore(&context->tuner_lock, lock_ctx);
+		tuner->destroy_tuner(tuner);
+		return -EBUSY;
+	}
+
+	context->tuner = tuner;
+	context->setup = setup;
+
+	rtdm_lock_put_irqrestore(&context->tuner_lock, lock_ctx);
+
+	if (old_tuner)
+		old_tuner->destroy_tuner(old_tuner);
+
+	if (setup.quiet <= 1)
+		printk(XENO_INFO "autotune(%s) started\n", tuner->name);
+
+	return ret;
+}
+
+static int autotune_ioctl_rt(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct autotune_context *context;
+	struct gravity_tuner *tuner;
+	rtdm_lockctx_t lock_ctx;
+	__u64 timestamp;
+	__u32 gravity;
+	int ret;
+
+	context = rtdm_fd_to_private(fd);
+
+	rtdm_lock_get_irqsave(&context->tuner_lock, lock_ctx);
+
+	tuner = context->tuner;
+	if (tuner)
+		atomic_inc(&tuner->refcount);
+
+	rtdm_lock_put_irqrestore(&context->tuner_lock, lock_ctx);
+
+	if (tuner == NULL)
+		return -ENOSYS;
+
+	switch (request) {
+	case AUTOTUNE_RTIOC_RUN:
+		tuner->quiet = context->setup.quiet;
+		ret = tune_gravity(tuner, context->setup.period);
+		if (ret)
+			break;
+		gravity = xnclock_ticks_to_ns(&nkclock,
+					      tuner->get_gravity(tuner));
+		ret = rtdm_safe_copy_to_user(fd, arg, &gravity,
+					     sizeof(gravity));
+		break;
+	case AUTOTUNE_RTIOC_PULSE:
+		if (tuner != &uthread_tuner.tuner) {
+			ret = -EINVAL;
+			break;
+		}
+		ret = rtdm_safe_copy_from_user(fd, &timestamp, arg,
+					       sizeof(timestamp));
+		if (ret)
+			break;
+		ret = add_uthread_sample(tuner, timestamp);
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	atomic_dec(&tuner->refcount);
+
+	return ret;
+}
+
+static int autotune_open(struct rtdm_fd *fd, int oflags)
+{
+	struct autotune_context *context;
+
+	context = rtdm_fd_to_private(fd);
+	context->tuner = NULL;
+	rtdm_lock_init(&context->tuner_lock);
+
+	return 0;
+}
+
+static void autotune_close(struct rtdm_fd *fd)
+{
+	struct autotune_context *context;
+	struct gravity_tuner *tuner;
+
+	context = rtdm_fd_to_private(fd);
+	tuner = context->tuner;
+	if (tuner) {
+		if (context->setup.quiet <= 1)
+			printk(XENO_INFO "autotune finished [%Lui/%Luk/%Luu]\n",
+			       xnclock_ticks_to_ns(&nkclock,
+						   xnclock_get_gravity(&nkclock, irq)),
+			       xnclock_ticks_to_ns(&nkclock,
+						   xnclock_get_gravity(&nkclock, kernel)),
+			       xnclock_ticks_to_ns(&nkclock,
+						   xnclock_get_gravity(&nkclock, user)));
+		tuner->destroy_tuner(tuner);
+	}
+}
+
+static struct rtdm_driver autotune_driver = {
+	.profile_info		=	RTDM_PROFILE_INFO(autotune,
+							  RTDM_CLASS_AUTOTUNE,
+							  RTDM_SUBCLASS_AUTOTUNE,
+							  0),
+	.device_flags		=	RTDM_NAMED_DEVICE|RTDM_EXCLUSIVE,
+	.device_count		=	1,
+	.context_size		=	sizeof(struct autotune_context),
+	.ops = {
+		.open		=	autotune_open,
+		.ioctl_rt	=	autotune_ioctl_rt,
+		.ioctl_nrt	=	autotune_ioctl_nrt,
+		.close		=	autotune_close,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &autotune_driver,
+	.label = "autotune",
+};
+
+static int __init autotune_init(void)
+{
+	return rtdm_dev_register(&device);
+}
+
+static void __exit autotune_exit(void)
+{
+	rtdm_dev_unregister(&device);
+}
+
+module_init(autotune_init);
+module_exit(autotune_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/CREDITS b/kernel/xenomai-v3.2.4/kernel/drivers/can/CREDITS
new file mode 100644
index 0000000..88c60ce
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/CREDITS
@@ -0,0 +1,37 @@
+The RT-Socket-CAN project is based on the SJA1000 socket-based CAN 
+driver for RTDM by Sebastian Smolorz [1]. Other parts are from the RTnet 
+project [2], especially the device interface, the RTDM serial device 
+driver and profile of Xenomai [3] and from other Open Source CAN driver 
+projects like PCAN [4], the linux-can.patch [5] and Socket-CAN [6].
+
+
+RT-Socket-CAN development team:
+
+Wolfgang Grandegger <wg@grandegger.com>
+Jan Kiszka          <kiszka@rts.uni-hannover.de>
+Sebastian Smolorz   <sebastian.smolorz@stud.uni-hannover.de>
+
+
+[1] http://www.rts.uni-hannover.de/rtaddon/RTDM_CAN_Device_Profile_Doc/index.html
+[2] http://www.rtnet.org
+[3] http://www.xenomai.org
+[4] http://www.peak-system.com/linux/
+[5] http://marc.theaimsgroup.com/?t=111088094000003&r=1&w=2
+[6] http://developer.berlios.de/projects/socketcan/
+
+This file is an attempt to give proper credit to the people who have
+contributed to this project so far. List entries are sorted by name
+and provide the usual tags for automated processing.
+
+N: Wolfgang Grandegger
+E: wg@grandegger.com
+D: Core development.
+
+N: Jan Kiszka
+E: kiszka@rts.uni-hannover.de
+D: Core development.
+
+N: Sebastian Smolorz
+E: sebastian.smolorz@stud.uni-hannover.de
+D: Core development.
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig
new file mode 100644
index 0000000..1c05549
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/Kconfig
@@ -0,0 +1,92 @@
+menu "CAN drivers"
+
+config XENO_DRIVERS_CAN
+	tristate "RT-Socket-CAN, CAN raw socket interface"
+	help
+	RT-Socket-CAN is a real-time socket interface for CAN controllers.
+
+config XENO_DRIVERS_CAN_DEBUG
+	depends on XENO_DRIVERS_CAN && PROC_FS
+	bool "Enable debug output"
+	default y
+	help
+
+	This option activates debugging checks and enhanced output for the
+	RT-Socket-CAN driver. It also allows to list the hardware registers
+	of the registered CAN controllers. It is a recommended option for
+	getting started and analysing potential problems. For production
+	purposes, it should be switched off (for the sake of latency).
+
+config XENO_DRIVERS_CAN_LOOPBACK
+	depends on XENO_DRIVERS_CAN
+	bool "Enable TX loopback to local sockets"
+	default n
+	help
+
+	This options adds support for TX loopback to local sockets. Normally,
+	messages sent to the CAN bus are not visible to sockets listening to
+	the same local device. When this option is enabled, TX messages are
+	looped back locally when the transmit has been done by default. This
+	behaviour can be deactivated or reactivated with "setsockopt". Enable
+	this option, if you want to have a "net-alike" behaviour.
+
+config XENO_DRIVERS_CAN_RXBUF_SIZE
+	depends on XENO_DRIVERS_CAN
+	int "Size of receive ring buffers (must be 2^N)"
+	default 1024
+
+config XENO_DRIVERS_CAN_MAX_DEVICES
+	depends on XENO_DRIVERS_CAN
+	int "Maximum number of devices"
+	default 4
+
+config XENO_DRIVERS_CAN_MAX_RECEIVERS
+	depends on XENO_DRIVERS_CAN
+	int "Maximum number of receive filters per device"
+	default 16
+	help
+
+	The driver maintains a receive filter list per device for fast access.
+
+config XENO_DRIVERS_CAN_BUS_ERR
+	depends on XENO_DRIVERS_CAN
+	bool
+	default n
+	help
+
+	To avoid unnecessary bus error interrupt flooding, this option enables
+	bus error interrupts when an application is calling a receive function
+	on a socket listening on bus errors. After one bus error has occured,
+	the interrupt will be disabled to allow the application time for error
+	processing. This option is automatically selected for CAN controllers
+	supporting bus error interrupts like the SJA1000.
+
+config XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+	depends on XENO_DRIVERS_CAN
+	bool "Old bit-time calculation algorithm (deprecated)"
+	default n
+	help
+
+	This option allows to enable the old algorithm to calculate the
+	CAN bit-timing parameters for backward compatibility.
+
+config XENO_DRIVERS_CAN_VIRT
+	depends on XENO_DRIVERS_CAN
+	tristate "Virtual CAN bus driver"
+	help
+
+	This driver provides two CAN ports that are virtually interconnected.
+	More ports can be enabled with the module parameter "devices".
+
+config XENO_DRIVERS_CAN_FLEXCAN
+	depends on XENO_DRIVERS_CAN && OF && !XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+	tristate "Freescale FLEXCAN based chips"
+	help
+
+	Say Y here if you want to support for Freescale FlexCAN.
+
+source "drivers/xenomai/can/mscan/Kconfig"
+source "drivers/xenomai/can/peak_canfd/Kconfig"
+source "drivers/xenomai/can/sja1000/Kconfig"
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile
new file mode 100644
index 0000000..f78f6af
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/Makefile
@@ -0,0 +1,10 @@
+
+ccflags-y += -I$(srctree)/drivers/xenomai/can
+
+obj-$(CONFIG_XENO_DRIVERS_CAN) += xeno_can.o mscan/ sja1000/ peak_canfd/
+obj-$(CONFIG_XENO_DRIVERS_CAN_FLEXCAN) += xeno_can_flexcan.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_VIRT) += xeno_can_virt.o
+
+xeno_can-y := rtcan_dev.o rtcan_socket.o rtcan_module.o rtcan_raw.o rtcan_raw_dev.o rtcan_raw_filter.o
+xeno_can_virt-y := rtcan_virt.o
+xeno_can_flexcan-y := rtcan_flexcan.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/README b/kernel/xenomai-v3.2.4/kernel/drivers/can/README
new file mode 100644
index 0000000..cb0ef37
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/README
@@ -0,0 +1,143 @@
+RT-Socket-CAN - RTDM driver for CAN devices
+===========================================
+
+RT-Socket-CAN is an Open Source hard real-time protocol stack for CAN 
+devices based on BSD sockets. This implementation is for RTDM, the 
+Real-Time-Driver-Model. Note that there is a similar variant being 
+developed for standard Linux using the Linux networking stack.
+
+
+Status:
+------
+
+Currently drivers are available for the following CAN controllers and 
+devices:
+
+   SJA1000 ISA devices
+   SJA1000 Memory-mapped devices
+   SJA1000 esd EPPC405 embedded controller and CPCI405 boards
+   SJA1000 PEAK PCI card
+   SJA1000 PEAK parallel port Dongle
+   SJA1000 IXXAT PCI card
+   MSCAN for MPC5200 boards
+
+Utilities for RT-Socket-CAN are available in "src/utils/can".
+
+
+Installation:
+------------
+
+This example installation is for the DENX "linuxppc_2_4_devel" tree
+(Linux 2.4.25) using the ELDK (see http://www.denx.de). It works in a
+similar way for other kernels and distributions including Linux 2.6.
+
+
+o Kernel space part:
+
+  - Please install the Xenomai kernel space part as described in the
+    README.INSTALL.
+
+  - Configure RT-Socket-CAN as kernel modules as required by your 
+    hardware (and make sure that loadable module support is enabled):
+
+    $ cd <linux-kernel-root>
+    $ export CROSS_COMPILE=ppc_82xx-
+    $ make menuconfig
+    ... Select "Loadable module support  --->" 
+    [*] Enable loadable module support 
+    ... Exit
+    ... Select "Real-time sub-system --->"
+               "Real-time drivers --->" 
+                 "CAN bus controller --->"
+    [M] RT-Socket-CAN, CAN raw socket interface (NEW)
+    (1024) Size of receive ring buffers (must be 2^N) (NEW)
+    (4) Maximum number of devices (NEW)
+    (16) Maximum number of receive filters per device (NEW)
+    [M] MSCAN driver for MPC5200 (NEW)
+    [*] Enable CAN 1 (NEW)
+    [*] Enable CAN 2 (NEW)
+    (66000000) Clock Frequency in Hz (NEW)
+    (I2C1/TMR01) Pin Configuration
+    <M> Philips SJA1000 CAN controller (NEW)
+    <M>   Standard ISA devices
+    (4)   Maximum number of ISA devices (NEW)
+    <M>   PEAK PCI cards
+    ... Exit and save
+
+    Note: you can also statically link the MSCAN drivers into 
+    the kernel.
+
+
+  - Make the Linux kernel and RT-Socket-CAN modules and copy them to 
+    the root filesystem:
+
+    $ make dep
+    $ make uImage
+    $ cp -p arch/ppc/boot/images/uImage /tftpboot/icecube/uImage-rtcan
+    $ make modules
+    $ export DESTDIR=/opt/eldk/ppc_82xx
+    $ make modules_install INSTALL_MOD_PATH=$DESTDIR
+    $ find $DESTDIR/lib/modules/2.4.25/kernel/drivers/xenomai/rtcan
+    .../rtcan
+    .../rtcan/xeno_can.o
+    .../rtcan/mscan
+    .../rtcan/mscan/xeno_can_mscan.o
+    .../rtcan/sja1000/xeno_can_sja1000.o
+    .../rtcan/sja1000/xeno_can_peak_pci.o
+    .../rtcan/sja1000/xeno_can_isa.o
+
+  - Loading the RT-Socket-CAN modules
+
+    Now boot the Xenomai enabled kernel on your target system.
+
+    In case RT-Socket-CAN is built as kernel modules, you need to load 
+    them using modprobe or insmod, e.g. for this example build:
+
+    # export MODDIR=/lib/modules/2.4.25/kernel/drivers/xenomai/rtcan
+    # insmod $MODDIR/xeno_can.o
+    # insmod $MODDIR/mscan/xeno_can_mscan.o
+    # insmod $MODDIR/sja1000/xeno_can_sja1000.o
+    # insmod $MODDIR/sja1000/xeno_can_peak_pci.o
+
+    Note that various kernel module parameters can be passed with 
+    insmod. Please use "modinfo" to list them or check the 
+    corresponding source code files for further information.
+
+
+o User space part:
+
+  - User space CAN utilities for RT-Socket-CAN are available in
+    "src/utils/can". Please check the README in there for further 
+    information.
+
+
+Documentation:
+-------------
+
+The RTDM CAN profile is documented at
+http://www.xenomai.org/documentation/xenomai-3/html/xeno3prm/group__rtdm__can.html
+
+Feedback:
+--------
+
+Please report Xenomai related bugs and comments to the Xenomai mailing 
+list (xenomai@xenomai.org).
+
+Please report CAN related bugs and comments to the "Socketcan" mailing 
+list (Socketcan-users@lists.berlios.de) or directly to the main authors 
+Wolfgang Grandegger (wg@grandegger.com) or Sebastian Smolorz 
+(Sebastian.Smolorz@stud.uni-hannover.de). 
+
+
+Credits:
+-------
+
+See CREDITS file in this directory.
+
+
+License:
+-------
+
+RT-Socket-CAN is free software, and you are welcome to redistribute it 
+under the terms of the GNU General Public License. This program comes 
+with ABSOLUTELY NO WARRANTY. See "COPYING" for details.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig
new file mode 100644
index 0000000..dfbf5af
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Kconfig
@@ -0,0 +1,8 @@
+config XENO_DRIVERS_CAN_MSCAN
+	depends on XENO_DRIVERS_CAN && (PPC_MPC52xx || PPC_MPC512x)
+	tristate "MSCAN driver for MPC52xx and MPC512x"
+	default n
+	help
+
+	This driver is for the MSCAN on the MPC5200 and MPC512x processor
+	from Freescale.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile
new file mode 100644
index 0000000..0f157e9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/Makefile
@@ -0,0 +1,6 @@
+
+ccflags-y += -I$(srctree)/drivers/xenomai/can -I$(srctree)/drivers/xenomai/can/mscan
+
+obj-$(CONFIG_XENO_DRIVERS_CAN_MSCAN) += xeno_can_mscan.o
+
+xeno_can_mscan-y := rtcan_mscan.o rtcan_mscan_proc.o rtcan_mscan_mpc5xxx.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c
new file mode 100644
index 0000000..da573ab
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.c
@@ -0,0 +1,798 @@
+/*
+ * Copyright (C) 2006-2010 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Derived from the PCAN project file driver/src/pcan_mpc5200.c:
+ *
+ * Copyright (c) 2003 Wolfgang Denk, DENX Software Engineering, wd@denx.de.
+ *
+ * Copyright (c) 2005 Felix Daners, Plugit AG, felix.daners@plugit.ch
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_internal.h"
+#include "rtcan_mscan_regs.h"
+#include "rtcan_mscan.h"
+
+#define MSCAN_SET_MODE_RETRIES	255
+
+#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+static struct can_bittiming_const mscan_bittiming_const = {
+	.name = "mscan",
+	.tseg1_min = 4,
+	.tseg1_max = 16,
+	.tseg2_min = 2,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 64,
+	.brp_inc = 1,
+};
+#endif
+
+/**
+ *  Reception Interrupt handler
+ *
+ *  Inline function first called within @ref rtcan_mscan_interrupt when an RX
+ *  interrupt was detected. Here the HW registers are read out and composed
+ *  to a struct rtcan_skb.
+ *
+ *  @param[out] skb  Pointer to an instance of struct rtcan_skb which will be
+ *                   filled with received CAN message
+ *  @param[in]  dev  Device ID
+ */
+static inline void rtcan_mscan_rx_interrupt(struct rtcan_device *dev,
+					    struct rtcan_skb *skb)
+{
+	int i;
+	unsigned char size;
+	struct rtcan_rb_frame *frame = &skb->rb_frame;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE;
+
+	frame->can_dlc = in_8(&regs->canrxfg.dlr) & 0x0F;
+
+	/* If DLC exceeds 8 bytes adjust it to 8 (for the payload size) */
+	size = (frame->can_dlc > 8) ? 8 : frame->can_dlc;
+
+	if (in_8(&regs->canrxfg.idr[1]) & MSCAN_BUF_EXTENDED) {
+		frame->can_id = ((in_8(&regs->canrxfg.idr[0]) << 21) |
+				 ((in_8(&regs->canrxfg.idr[1]) & 0xE0) << 13) |
+				 ((in_8(&regs->canrxfg.idr[1]) & 0x07) << 15) |
+				 (in_8(&regs->canrxfg.idr[4]) << 7) |
+				 (in_8(&regs->canrxfg.idr[5]) >> 1));
+
+		frame->can_id |= CAN_EFF_FLAG;
+
+		if ((in_8(&regs->canrxfg.idr[5]) & MSCAN_BUF_EXT_RTR)) {
+			frame->can_id |= CAN_RTR_FLAG;
+		} else {
+			for (i = 0; i < size; i++)
+				frame->data[i] =
+					in_8(&regs->canrxfg.dsr[i +
+								(i / 2) * 2]);
+			skb->rb_frame_size += size;
+		}
+
+	} else {
+		frame->can_id = ((in_8(&regs->canrxfg.idr[0]) << 3) |
+				 (in_8(&regs->canrxfg.idr[1]) >> 5));
+
+		if ((in_8(&regs->canrxfg.idr[1]) & MSCAN_BUF_STD_RTR)) {
+			frame->can_id |= CAN_RTR_FLAG;
+		} else {
+			for (i = 0; i < size; i++)
+				frame->data[i] =
+					in_8(&regs->canrxfg.dsr[i +
+								(i / 2) * 2]);
+			skb->rb_frame_size += size;
+		}
+	}
+
+
+	/* Store the interface index */
+	frame->can_ifindex = dev->ifindex;
+}
+
+static can_state_t mscan_stat_map[4] = {
+	CAN_STATE_ACTIVE,
+	CAN_STATE_BUS_WARNING,
+	CAN_STATE_BUS_PASSIVE,
+	CAN_STATE_BUS_OFF
+};
+
+static inline void rtcan_mscan_err_interrupt(struct rtcan_device *dev,
+					     struct rtcan_skb *skb,
+					     int r_status)
+{
+	u8 rstat, tstat;
+	struct rtcan_rb_frame *frame = &skb->rb_frame;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + CAN_ERR_DLC;
+
+	frame->can_id = CAN_ERR_FLAG;
+	frame->can_dlc = CAN_ERR_DLC;
+
+	memset(&frame->data[0], 0, frame->can_dlc);
+
+	if ((r_status & MSCAN_OVRIF)) {
+		frame->can_id |= CAN_ERR_CRTL;
+		frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+	} else if ((r_status & (MSCAN_CSCIF))) {
+
+		rstat = (r_status & (MSCAN_TSTAT0 |
+				     MSCAN_TSTAT1)) >> 2 & 0x3;
+		tstat = (r_status & (MSCAN_RSTAT0 |
+				     MSCAN_RSTAT1)) >> 4 & 0x3;
+		dev->state = mscan_stat_map[max(rstat, tstat)];
+
+		switch (dev->state) {
+		case CAN_STATE_BUS_OFF:
+			/* Bus-off condition */
+			frame->can_id |= CAN_ERR_BUSOFF;
+			dev->state = CAN_STATE_BUS_OFF;
+			/* Disable receiver interrupts */
+			out_8(&regs->canrier, 0);
+			/* Wake up waiting senders */
+			rtdm_sem_destroy(&dev->tx_sem);
+			break;
+
+		case CAN_STATE_BUS_PASSIVE:
+			frame->can_id |= CAN_ERR_CRTL;
+			if (tstat > rstat)
+				frame->data[1] = CAN_ERR_CRTL_TX_PASSIVE;
+			else
+				frame->data[1] = CAN_ERR_CRTL_RX_PASSIVE;
+			break;
+
+		case CAN_STATE_BUS_WARNING:
+			frame->can_id |= CAN_ERR_CRTL;
+			if (tstat > rstat)
+				frame->data[1] = CAN_ERR_CRTL_TX_WARNING;
+			else
+				frame->data[1] = CAN_ERR_CRTL_RX_WARNING;
+			break;
+
+		default:
+			break;
+
+		}
+	}
+	/* Store the interface index */
+	frame->can_ifindex = dev->ifindex;
+}
+
+/** Interrupt handler */
+static int rtcan_mscan_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtcan_skb skb;
+	struct rtcan_device *dev;
+	struct mscan_regs *regs;
+	u8 canrflg;
+	int recv_lock_free = 1;
+	int ret = RTDM_IRQ_NONE;
+
+
+	dev = (struct rtcan_device *)rtdm_irq_get_arg(irq_handle, void);
+	regs = (struct mscan_regs *)dev->base_addr;
+
+	rtdm_lock_get(&dev->device_lock);
+
+	canrflg = in_8(&regs->canrflg);
+
+	ret = RTDM_IRQ_HANDLED;
+
+	/* Transmit Interrupt? */
+	if ((in_8(&regs->cantier) & MSCAN_TXIE0) &&
+	    (in_8(&regs->cantflg) & MSCAN_TXE0)) {
+		out_8(&regs->cantier, 0);
+		/* Wake up a sender */
+		rtdm_sem_up(&dev->tx_sem);
+		dev->tx_count++;
+
+		if (rtcan_loopback_pending(dev)) {
+
+			if (recv_lock_free) {
+				recv_lock_free = 0;
+				rtdm_lock_get(&rtcan_recv_list_lock);
+				rtdm_lock_get(&rtcan_socket_lock);
+			}
+
+			rtcan_loopback(dev);
+		}
+	}
+
+	/* Wakeup interrupt?  */
+	if ((canrflg & MSCAN_WUPIF)) {
+		rtdm_printk("WUPIF interrupt\n");
+	}
+
+	/* Receive Interrupt? */
+	if ((canrflg & MSCAN_RXF)) {
+
+		/* Read out HW registers */
+		rtcan_mscan_rx_interrupt(dev, &skb);
+
+		/* Take more locks. Ensure that they are taken and
+		 * released only once in the IRQ handler. */
+		/* WARNING: Nested locks are dangerous! But they are
+		 * nested only in this routine so a deadlock should
+		 * not be possible. */
+		if (recv_lock_free) {
+			recv_lock_free = 0;
+			rtdm_lock_get(&rtcan_recv_list_lock);
+			rtdm_lock_get(&rtcan_socket_lock);
+		}
+
+		/* Pass received frame out to the sockets */
+		rtcan_rcv(dev, &skb);
+	}
+
+	/* Error Interrupt? */
+	if ((canrflg & (MSCAN_CSCIF | MSCAN_OVRIF))) {
+		/* Check error condition and fill error frame */
+		rtcan_mscan_err_interrupt(dev, &skb, canrflg);
+
+		if (recv_lock_free) {
+			recv_lock_free = 0;
+			rtdm_lock_get(&rtcan_recv_list_lock);
+			rtdm_lock_get(&rtcan_socket_lock);
+		}
+
+		/* Pass error frame out to the sockets */
+		rtcan_rcv(dev, &skb);
+	}
+
+	/* Acknowledge the handled interrupt within the controller.
+	 * Only do so for the receiver interrupts.
+	 */
+	if (canrflg)
+		out_8(&regs->canrflg, canrflg);
+
+	if (!recv_lock_free) {
+		rtdm_lock_put(&rtcan_socket_lock);
+		rtdm_lock_put(&rtcan_recv_list_lock);
+	}
+	rtdm_lock_put(&dev->device_lock);
+
+	return ret;
+}
+
+/**
+ *   Set controller into reset mode. Called from @ref rtcan_mscan_ioctl
+ *   (main usage), init_module and cleanup_module.
+ *
+ *   @param dev_id   Device ID
+ *   @param lock_ctx Pointer to saved IRQ context (if stored before calling
+ *                   this function). Only evaluated if @c locked is true.
+ *   @param locked   Boolean value indicating if function was called in an
+ *                   spin locked and IRQ disabled context
+ *
+ *   @return 0 on success, otherwise:
+ *   - -EAGAIN: Reset mode bit could not be verified after setting it.
+ *              See also note.
+ *
+ *   @note According to the MSCAN specification, it is necessary to check
+ *   the reset mode bit in PeliCAN mode after having set it. So we do. But if
+ *   using a ISA card like the PHYTEC eNET card this should not be necessary
+ *   because the CAN controller clock of this card (16 MHz) is twice as high
+ *   as the ISA bus clock.
+ */
+static int rtcan_mscan_mode_stop(struct rtcan_device *dev,
+				 rtdm_lockctx_t *lock_ctx)
+{
+	int ret = 0;
+	int rinit = 0;
+	can_state_t state;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+	u8 reg;
+
+	state = dev->state;
+	/* If controller is not operating anyway, go out */
+	if (!CAN_STATE_OPERATING(state))
+		goto out;
+
+	/* Switch to sleep mode */
+	setbits8(&regs->canctl0, MSCAN_SLPRQ);
+	reg = in_8(&regs->canctl1);
+	while (!(reg & MSCAN_SLPAK) &&
+	        (rinit < MSCAN_SET_MODE_RETRIES)) {
+		if (likely(lock_ctx != NULL))
+			rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+		/* Busy sleep 1 microsecond */
+		rtdm_task_busy_sleep(1000);
+		if (likely(lock_ctx != NULL))
+			rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+		rinit++;
+		reg = in_8(&regs->canctl1);
+	}
+	/*
+	 * The mscan controller will fail to enter sleep mode,
+	 * while there are irregular activities on bus, like
+	 * somebody keeps retransmitting. This behavior is
+	 * undocumented and seems to differ between mscan built
+	 * in mpc5200b and mpc5200. We proceed in that case,
+	 * since otherwise the slprq will be kept set and the
+	 * controller will get stuck. NOTE: INITRQ or CSWAI
+	 * will abort all active transmit actions, if still
+	 * any, at once.
+	 */
+	if (rinit >= MSCAN_SET_MODE_RETRIES)
+		rtdm_printk("rtcan_mscan: device failed to enter sleep mode. "
+				"We proceed anyhow.\n");
+	else
+		dev->state = CAN_STATE_SLEEPING;
+
+	rinit = 0;
+	setbits8(&regs->canctl0, MSCAN_INITRQ);
+
+	reg = in_8(&regs->canctl1);
+	while (!(reg & MSCAN_INITAK) &&
+	        (rinit < MSCAN_SET_MODE_RETRIES)) {
+		if (likely(lock_ctx != NULL))
+			rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+		/* Busy sleep 1 microsecond */
+		rtdm_task_busy_sleep(1000);
+		if (likely(lock_ctx != NULL))
+			rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+		rinit++;
+		reg = in_8(&regs->canctl1);
+	}
+	if (rinit >= MSCAN_SET_MODE_RETRIES)
+		ret = -ENODEV;
+
+	/* Volatile state could have changed while we slept busy. */
+	dev->state = CAN_STATE_STOPPED;
+	/* Wake up waiting senders */
+	rtdm_sem_destroy(&dev->tx_sem);
+
+out:
+	return ret;
+}
+
+/**
+ *   Set controller into operating mode.
+ *
+ *   Called from @ref rtcan_mscan_ioctl in spin locked and IRQ disabled
+ *   context.
+ *
+ *   @param dev_id   Device ID
+ *   @param lock_ctx Pointer to saved IRQ context (only used when coming
+ *                   from @ref CAN_STATE_SLEEPING, see also note)
+ *
+ *   @return 0 on success, otherwise:
+ *   - -EINVAL: No Baud rate set before request to set start mode
+ *
+ *   @note If coming from @c CAN_STATE_SLEEPING, the controller must wait
+ *         some time to avoid bus errors. Measured on an PHYTEC eNET card,
+ *         this time was 110 microseconds.
+ */
+static int rtcan_mscan_mode_start(struct rtcan_device *dev,
+				  rtdm_lockctx_t *lock_ctx)
+{
+	int ret = 0, retries = 0;
+	can_state_t state;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	/* We won't forget that state in the device structure is volatile and
+	 * access to it will not be optimized by the compiler. So ... */
+	state = dev->state;
+
+	switch (state) {
+	case CAN_STATE_ACTIVE:
+	case CAN_STATE_BUS_WARNING:
+	case CAN_STATE_BUS_PASSIVE:
+		break;
+
+	case CAN_STATE_SLEEPING:
+	case CAN_STATE_STOPPED:
+		/* Set error active state */
+		state = CAN_STATE_ACTIVE;
+		/* Set up sender "mutex" */
+		rtdm_sem_init(&dev->tx_sem, 1);
+
+		if ((dev->ctrl_mode & CAN_CTRLMODE_LISTENONLY)) {
+			setbits8(&regs->canctl1, MSCAN_LISTEN);
+		} else {
+			clrbits8(&regs->canctl1, MSCAN_LISTEN);
+		}
+		if ((dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)) {
+			setbits8(&regs->canctl1, MSCAN_LOOPB);
+		} else {
+			clrbits8(&regs->canctl1, MSCAN_LOOPB);
+		}
+
+		/* Switch to normal mode */
+		clrbits8(&regs->canctl0, MSCAN_INITRQ);
+		clrbits8(&regs->canctl0, MSCAN_SLPRQ);
+		while ((in_8(&regs->canctl1) & MSCAN_INITAK) ||
+		       (in_8(&regs->canctl1) & MSCAN_SLPAK)) {
+			if (likely(lock_ctx != NULL))
+				rtdm_lock_put_irqrestore(&dev->device_lock,
+							 *lock_ctx);
+			/* Busy sleep 1 microsecond */
+			rtdm_task_busy_sleep(1000);
+			if (likely(lock_ctx != NULL))
+				rtdm_lock_get_irqsave(&dev->device_lock,
+						      *lock_ctx);
+			retries++;
+		}
+		/* Enable interrupts */
+		setbits8(&regs->canrier, MSCAN_RIER);
+
+		break;
+
+	case CAN_STATE_BUS_OFF:
+		/* Trigger bus-off recovery */
+		out_8(&regs->canrier, MSCAN_RIER);
+		/* Set up sender "mutex" */
+		rtdm_sem_init(&dev->tx_sem, 1);
+		/* Set error active state */
+		state = CAN_STATE_ACTIVE;
+
+		break;
+
+	default:
+		/* Never reached, but we don't want nasty compiler warnings */
+		break;
+	}
+	/* Store new state in device structure (or old state) */
+	dev->state = state;
+
+	return ret;
+}
+
+static int rtcan_mscan_set_bit_time(struct rtcan_device *dev,
+				    struct can_bittime *bit_time,
+				    rtdm_lockctx_t *lock_ctx)
+{
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+	u8 btr0, btr1;
+
+	switch (bit_time->type) {
+	case CAN_BITTIME_BTR:
+		btr0 = bit_time->btr.btr0;
+		btr1 = bit_time->btr.btr1;
+		break;
+
+	case CAN_BITTIME_STD:
+		btr0 = (BTR0_SET_BRP(bit_time->std.brp) |
+			BTR0_SET_SJW(bit_time->std.sjw));
+		btr1 = (BTR1_SET_TSEG1(bit_time->std.prop_seg +
+				       bit_time->std.phase_seg1) |
+			BTR1_SET_TSEG2(bit_time->std.phase_seg2) |
+			BTR1_SET_SAM(bit_time->std.sam));
+		break;
+
+	default:
+		return -EINVAL;
+	}
+
+	out_8(&regs->canbtr0, btr0);
+	out_8(&regs->canbtr1, btr1);
+
+	rtdm_printk("%s: btr0=0x%02x btr1=0x%02x\n", dev->name, btr0, btr1);
+
+	return 0;
+}
+
+static int rtcan_mscan_set_mode(struct rtcan_device *dev,
+				can_mode_t mode,
+				rtdm_lockctx_t *lock_ctx)
+{
+	int ret = 0, retries = 0;
+	can_state_t state;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	switch (mode) {
+
+	case CAN_MODE_STOP:
+		ret = rtcan_mscan_mode_stop(dev, lock_ctx);
+		break;
+
+	case CAN_MODE_START:
+		ret = rtcan_mscan_mode_start(dev, lock_ctx);
+		break;
+
+	case CAN_MODE_SLEEP:
+
+		state = dev->state;
+
+		/* Controller must operate, otherwise go out */
+		if (!CAN_STATE_OPERATING(state)) {
+			ret = -ENETDOWN;
+			goto mode_sleep_out;
+		}
+
+		/* Is controller sleeping yet? If yes, go out */
+		if (state == CAN_STATE_SLEEPING)
+			goto mode_sleep_out;
+
+		/* Remember into which state to return when we
+		 * wake up */
+		dev->state_before_sleep = state;
+		state = CAN_STATE_SLEEPING;
+
+		/* Let's take a nap. (Now I REALLY understand
+		 * the meaning of interrupts ...) */
+		out_8(&regs->canrier, 0);
+		out_8(&regs->cantier, 0);
+		setbits8(&regs->canctl0,
+			 MSCAN_SLPRQ /*| MSCAN_INITRQ*/ | MSCAN_WUPE);
+		while (!(in_8(&regs->canctl1) & MSCAN_SLPAK)) {
+			rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+			/* Busy sleep 1 microsecond */
+			rtdm_task_busy_sleep(1000);
+			rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+			if (retries++ >= 1000)
+				break;
+		}
+		rtdm_printk("Fallen asleep after %d tries.\n", retries);
+		clrbits8(&regs->canctl0, MSCAN_INITRQ);
+		while ((in_8(&regs->canctl1) & MSCAN_INITAK)) {
+			rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+			/* Busy sleep 1 microsecond */
+			rtdm_task_busy_sleep(1000);
+			rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+			if (retries++ >= 1000)
+				break;
+		}
+		rtdm_printk("Back to normal after %d tries.\n", retries);
+		out_8(&regs->canrier, MSCAN_WUPIE);
+
+	mode_sleep_out:
+		dev->state = state;
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+	}
+
+	return ret;
+}
+
+/**
+ *  Start a transmission to a MSCAN
+ *
+ *  Inline function called within @ref rtcan_mscan_sendmsg.
+ *  This is the completion of a send call when hardware access is granted.
+ *  Spinlock is taken before calling this function.
+ *
+ *  @param[in] frame  Pointer to CAN frame which is about to be sent
+ *  @param[in] dev Device ID
+ */
+static int rtcan_mscan_start_xmit(struct rtcan_device *dev, can_frame_t *frame)
+{
+	int             i, id;
+	/* "Real" size of the payload */
+	unsigned char   size;
+	/* Content of frame information register */
+	unsigned char   dlc;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	/* Is TX buffer empty? */
+	if (!(in_8(&regs->cantflg) & MSCAN_TXE0)) {
+		rtdm_printk("rtcan_mscan_start_xmit: TX buffer not empty");
+		return -EIO;
+	}
+	/* Select the buffer we've found. */
+	out_8(&regs->cantbsel, MSCAN_TXE0);
+
+	/* Get DLC and ID */
+	dlc = frame->can_dlc;
+
+	/* If DLC exceeds 8 bytes adjust it to 8 (for the payload) */
+	size = (dlc > 8) ? 8 : dlc;
+
+	id = frame->can_id;
+	if (frame->can_id & CAN_EFF_FLAG) {
+		out_8(&regs->cantxfg.idr[0], (id & 0x1fe00000) >> 21);
+		out_8(&regs->cantxfg.idr[1], ((id & 0x001c0000) >> 13) |
+		      ((id & 0x00038000) >> 15) |
+		      0x18); /* set SRR and IDE bits */
+
+		out_8(&regs->cantxfg.idr[4], (id & 0x00007f80) >> 7);
+		out_8(&regs->cantxfg.idr[5], (id & 0x0000007f) << 1);
+
+		/* RTR? */
+		if (frame->can_id & CAN_RTR_FLAG)
+			setbits8(&regs->cantxfg.idr[5], 0x1);
+		else {
+			clrbits8(&regs->cantxfg.idr[5], 0x1);
+			/* No RTR, write data bytes */
+			for (i = 0; i < size; i++)
+				out_8(&regs->cantxfg.dsr[i + (i / 2) * 2],
+				      frame->data[i]);
+		}
+
+	} else {
+		/* Send standard frame */
+
+		out_8(&regs->cantxfg.idr[0], (id & 0x000007f8) >> 3);
+		out_8(&regs->cantxfg.idr[1], (id & 0x00000007) << 5);
+
+		/* RTR? */
+		if (frame->can_id & CAN_RTR_FLAG)
+			setbits8(&regs->cantxfg.idr[1], 0x10);
+		else {
+			clrbits8(&regs->cantxfg.idr[1], 0x10);
+			/* No RTR, write data bytes */
+			for (i = 0; i < size; i++)
+				out_8(&regs->cantxfg.dsr[i + (i / 2) * 2],
+				      frame->data[i]);
+		}
+	}
+
+	out_8(&regs->cantxfg.dlr, frame->can_dlc);
+	out_8(&regs->cantxfg.tbpr, 0);	/* all messages have the same prio */
+
+	/* Trigger transmission. */
+	out_8(&regs->cantflg, MSCAN_TXE0);
+
+	/* Enable interrupt. */
+	setbits8(&regs->cantier, MSCAN_TXIE0);
+
+	return 0;
+}
+
+/**
+ *  MSCAN Chip configuration
+ *
+ *  Called during @ref init_module. Here, the configuration registers which
+ *  must be set only once are written with the right values. The controller
+ *  is left in reset mode and goes into operating mode not until the IOCTL
+ *  for starting it is triggered.
+ *
+ *  @param[in] dev Device ID of the controller to be configured
+ */
+static inline void __init mscan_chip_config(struct mscan_regs *regs,
+					    int mscan_clksrc)
+{
+	/* Choose IP bus as clock source.
+	 */
+	if (mscan_clksrc)
+		setbits8(&regs->canctl1, MSCAN_CLKSRC);
+	clrbits8(&regs->canctl1, MSCAN_LISTEN);
+
+	/* Configure MSCAN to accept all incoming messages.
+	 */
+	out_8(&regs->canidar0, 0x00);
+	out_8(&regs->canidar1, 0x00);
+	out_8(&regs->canidar2, 0x00);
+	out_8(&regs->canidar3, 0x00);
+	out_8(&regs->canidmr0, 0xFF);
+	out_8(&regs->canidmr1, 0xFF);
+	out_8(&regs->canidmr2, 0xFF);
+	out_8(&regs->canidmr3, 0xFF);
+	out_8(&regs->canidar4, 0x00);
+	out_8(&regs->canidar5, 0x00);
+	out_8(&regs->canidar6, 0x00);
+	out_8(&regs->canidar7, 0x00);
+	out_8(&regs->canidmr4, 0xFF);
+	out_8(&regs->canidmr5, 0xFF);
+	out_8(&regs->canidmr6, 0xFF);
+	out_8(&regs->canidmr7, 0xFF);
+	clrbits8(&regs->canidac, MSCAN_IDAM0 | MSCAN_IDAM1);
+}
+
+/**
+ *  MSCAN Chip registration
+ *
+ *  Called during @ref init_module.
+ *
+ *  @param[in] dev Device ID of the controller to be registered
+ *  @param[in] mscan_clksrc clock source to be used
+ */
+int rtcan_mscan_register(struct rtcan_device *dev, int irq, int mscan_clksrc)
+{
+	int ret;
+	struct mscan_regs *regs;
+
+	regs = (struct mscan_regs *)dev->base_addr;
+
+	/* Enable MSCAN module. */
+	setbits8(&regs->canctl1, MSCAN_CANE);
+	udelay(100);
+
+	/* Set dummy state for following call */
+	dev->state = CAN_STATE_ACTIVE;
+
+	/* Enter reset mode */
+	rtcan_mscan_mode_stop(dev, NULL);
+
+	/* Give device an interface name (so that programs using this driver
+	   don't need to know the device ID) */
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	dev->hard_start_xmit = rtcan_mscan_start_xmit;
+	dev->do_set_mode = rtcan_mscan_set_mode;
+	dev->do_set_bit_time = rtcan_mscan_set_bit_time;
+#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+	dev->bittiming_const = &mscan_bittiming_const;
+#endif
+
+	/* Register IRQ handler and pass device structure as arg */
+	ret = rtdm_irq_request(&dev->irq_handle, irq, rtcan_mscan_interrupt,
+			       0, RTCAN_DRV_NAME, (void *)dev);
+	if (ret) {
+		printk("ERROR! rtdm_irq_request for IRQ %d failed\n", irq);
+		goto out_can_disable;
+	}
+
+	mscan_chip_config(regs, mscan_clksrc);
+
+	/* Register RTDM device */
+	ret = rtcan_dev_register(dev);
+	if (ret) {
+		printk(KERN_ERR
+		       "ERROR while trying to register RTCAN device!\n");
+		goto out_irq_free;
+	}
+
+	rtcan_mscan_create_proc(dev);
+
+	return 0;
+
+out_irq_free:
+	rtdm_irq_free(&dev->irq_handle);
+
+out_can_disable:
+	/* Disable MSCAN module. */
+	clrbits8(&regs->canctl1, MSCAN_CANE);
+
+	return ret;
+}
+
+/**
+ *  MSCAN Chip deregistration
+ *
+ *  Called during @ref cleanup_module
+ *
+ *  @param[in] dev Device ID of the controller to be registered
+ */
+int rtcan_mscan_unregister(struct rtcan_device *dev)
+{
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+
+	printk("Unregistering %s device %s\n", RTCAN_DRV_NAME, dev->name);
+
+	rtcan_mscan_mode_stop(dev, NULL);
+	rtdm_irq_free(&dev->irq_handle);
+	rtcan_mscan_remove_proc(dev);
+	rtcan_dev_unregister(dev);
+
+	/* Disable MSCAN module. */
+	clrbits8(&regs->canctl1, MSCAN_CANE);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h
new file mode 100644
index 0000000..654a0f5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2009 Wolfgang Grandegger <wg@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_MSCAN_H_
+#define __RTCAN_MSCAN_H_
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "rtcan_mscan"
+
+/* MSCAN type variants */
+enum {
+	MSCAN_TYPE_MPC5200,
+	MSCAN_TYPE_MPC5121
+};
+
+extern int rtcan_mscan_register(struct rtcan_device *dev, int irq,
+				       int mscan_clksrc);
+extern int rtcan_mscan_unregister(struct rtcan_device *dev);
+
+extern int rtcan_mscan_create_proc(struct rtcan_device* dev);
+extern void rtcan_mscan_remove_proc(struct rtcan_device* dev);
+
+#endif /* __RTCAN_MSCAN_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c
new file mode 100644
index 0000000..de08d94
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_mpc5xxx.c
@@ -0,0 +1,392 @@
+/*
+ * CAN bus driver for the Freescale MPC5xxx embedded CPU.
+ *
+ * Copyright (C) 2004-2005 Andrey Volkov <avolkov@varma-el.com>,
+ *                         Varma Electronics Oy
+ * Copyright (C) 2008-2010 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2009 Wolfram Sang, Pengutronix <w.sang@pengutronix.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/netdevice.h>
+#include <linux/of_platform.h>
+#include <sysdev/fsl_soc.h>
+#include <linux/clk.h>
+#include <linux/io.h>
+#include <asm/mpc52xx.h>
+
+#include "rtcan_dev.h"
+#include "rtcan_mscan_regs.h"
+#include "rtcan_mscan.h"
+
+#define of_device platform_device
+#define of_platform_driver platform_driver
+#define of_register_platform_driver platform_driver_register
+#define of_unregister_platform_driver platform_driver_unregister
+
+static char mscan_ctrl_name_mpc5200[] = "MSCAN-MPC5200";
+static char mscan_ctrl_name_mpc512x[] = "MSCAN-MPC512x";
+static char mscan_board_name[] = "unkown";
+
+struct mpc5xxx_can_data {
+	unsigned int type;
+	u32 (*get_clock)(struct of_device *ofdev, const char *clock_name,
+			 int *mscan_clksrc);
+};
+
+#ifdef CONFIG_PPC_MPC52xx
+static struct of_device_id mpc52xx_cdm_ids[] = {
+	{ .compatible = "fsl,mpc5200-cdm", },
+	{}
+};
+
+static u32 mpc52xx_can_get_clock(struct of_device *ofdev,
+				 const char *clock_name,
+				 int *mscan_clksrc)
+{
+	unsigned int pvr;
+	struct mpc52xx_cdm  __iomem *cdm;
+	struct device_node *np_cdm;
+	unsigned int freq;
+	u32 val;
+
+	pvr = mfspr(SPRN_PVR);
+
+	/*
+	 * Either the oscillator clock (SYS_XTAL_IN) or the IP bus clock
+	 * (IP_CLK) can be selected as MSCAN clock source. According to
+	 * the MPC5200 user's manual, the oscillator clock is the better
+	 * choice as it has less jitter. For this reason, it is selected
+	 * by default. Unfortunately, it can not be selected for the old
+	 * MPC5200 Rev. A chips due to a hardware bug (check errata).
+	 */
+	if (clock_name && strcmp(clock_name, "ip") == 0)
+		*mscan_clksrc = MSCAN_CLKSRC_BUS;
+	else
+		*mscan_clksrc = MSCAN_CLKSRC_XTAL;
+
+	freq = mpc5xxx_get_bus_frequency(mpc5xxx_get_of_node(ofdev));
+	if (!freq)
+		return 0;
+
+	if (*mscan_clksrc == MSCAN_CLKSRC_BUS || pvr == 0x80822011)
+		return freq;
+
+	/* Determine SYS_XTAL_IN frequency from the clock domain settings */
+	np_cdm = of_find_matching_node(NULL, mpc52xx_cdm_ids);
+	if (!np_cdm) {
+		dev_err(&ofdev->dev, "can't get clock node!\n");
+		return 0;
+	}
+	cdm = of_iomap(np_cdm, 0);
+
+	if (in_8(&cdm->ipb_clk_sel) & 0x1)
+		freq *= 2;
+	val = in_be32(&cdm->rstcfg);
+
+	freq *= (val & (1 << 5)) ? 8 : 4;
+	freq /= (val & (1 << 6)) ? 12 : 16;
+
+	of_node_put(np_cdm);
+	iounmap(cdm);
+
+	return freq;
+}
+#else /* !CONFIG_PPC_MPC5200 */
+static u32 mpc52xx_can_get_clock(struct of_device *ofdev,
+				 const char *clock_name,
+				 int *mscan_clksrc)
+{
+	return 0;
+}
+#endif /* CONFIG_PPC_MPC52xx */
+
+#ifdef CONFIG_PPC_MPC512x
+struct mpc512x_clockctl {
+	u32 spmr;		/* System PLL Mode Reg */
+	u32 sccr[2];		/* System Clk Ctrl Reg 1 & 2 */
+	u32 scfr1;		/* System Clk Freq Reg 1 */
+	u32 scfr2;		/* System Clk Freq Reg 2 */
+	u32 reserved;
+	u32 bcr;		/* Bread Crumb Reg */
+	u32 pccr[12];		/* PSC Clk Ctrl Reg 0-11 */
+	u32 spccr;		/* SPDIF Clk Ctrl Reg */
+	u32 cccr;		/* CFM Clk Ctrl Reg */
+	u32 dccr;		/* DIU Clk Cnfg Reg */
+	u32 mccr[4];		/* MSCAN Clk Ctrl Reg 1-3 */
+};
+
+static struct of_device_id mpc512x_clock_ids[] = {
+	{ .compatible = "fsl,mpc5121-clock", },
+	{}
+};
+
+static u32 mpc512x_can_get_clock(struct of_device *ofdev,
+				 const char *clock_name,
+				 int *mscan_clksrc)
+{
+	struct mpc512x_clockctl __iomem *clockctl;
+	struct device_node *np_clock;
+	struct clk *sys_clk, *ref_clk;
+	int plen, clockidx, clocksrc = -1;
+	u32 sys_freq, val, clockdiv = 1, freq = 0;
+	const u32 *pval;
+
+	np_clock = of_find_matching_node(NULL, mpc512x_clock_ids);
+	if (!np_clock) {
+		dev_err(&ofdev->dev, "couldn't find clock node\n");
+		return -ENODEV;
+	}
+	clockctl = of_iomap(np_clock, 0);
+	if (!clockctl) {
+		dev_err(&ofdev->dev, "couldn't map clock registers\n");
+		return 0;
+	}
+
+	/* Determine the MSCAN device index from the physical address */
+	pval = of_get_property(mpc5xxx_get_of_node(ofdev), "reg", &plen);
+	BUG_ON(!pval || plen < sizeof(*pval));
+	clockidx = (*pval & 0x80) ? 1 : 0;
+	if (*pval & 0x2000)
+		clockidx += 2;
+
+	/*
+	 * Clock source and divider selection: 3 different clock sources
+	 * can be selected: "ip", "ref" or "sys". For the latter two, a
+	 * clock divider can be defined as well. If the clock source is
+	 * not specified by the device tree, we first try to find an
+	 * optimal CAN source clock based on the system clock. If that
+	 * is not posslible, the reference clock will be used.
+	 */
+	if (clock_name && !strcmp(clock_name, "ip")) {
+		*mscan_clksrc = MSCAN_CLKSRC_IPS;
+		freq = mpc5xxx_get_bus_frequency(mpc5xxx_get_of_node(ofdev));
+	} else {
+		*mscan_clksrc = MSCAN_CLKSRC_BUS;
+
+		pval = of_get_property(mpc5xxx_get_of_node(ofdev),
+				       "fsl,mscan-clock-divider", &plen);
+		if (pval && plen == sizeof(*pval))
+			clockdiv = *pval;
+		if (!clockdiv)
+			clockdiv = 1;
+
+		if (!clock_name || !strcmp(clock_name, "sys")) {
+			sys_clk = clk_get(&ofdev->dev, "sys_clk");
+			if (!sys_clk) {
+				dev_err(&ofdev->dev, "couldn't get sys_clk\n");
+				goto exit_unmap;
+			}
+			/* Get and round up/down sys clock rate */
+			sys_freq = 1000000 *
+				((clk_get_rate(sys_clk) + 499999) / 1000000);
+
+			if (!clock_name) {
+				/* A multiple of 16 MHz would be optimal */
+				if ((sys_freq % 16000000) == 0) {
+					clocksrc = 0;
+					clockdiv = sys_freq / 16000000;
+					freq = sys_freq / clockdiv;
+				}
+			} else {
+				clocksrc = 0;
+				freq = sys_freq / clockdiv;
+			}
+		}
+
+		if (clocksrc < 0) {
+			ref_clk = clk_get(&ofdev->dev, "ref_clk");
+			if (!ref_clk) {
+				dev_err(&ofdev->dev, "couldn't get ref_clk\n");
+				goto exit_unmap;
+			}
+			clocksrc = 1;
+			freq = clk_get_rate(ref_clk) / clockdiv;
+		}
+	}
+
+	/* Disable clock */
+	out_be32(&clockctl->mccr[clockidx], 0x0);
+	if (clocksrc >= 0) {
+		/* Set source and divider */
+		val = (clocksrc << 14) | ((clockdiv - 1) << 17);
+		out_be32(&clockctl->mccr[clockidx], val);
+		/* Enable clock */
+		out_be32(&clockctl->mccr[clockidx], val | 0x10000);
+	}
+
+	/* Enable MSCAN clock domain */
+	val = in_be32(&clockctl->sccr[1]);
+	if (!(val & (1 << 25)))
+		out_be32(&clockctl->sccr[1], val | (1 << 25));
+
+	dev_dbg(&ofdev->dev, "using '%s' with frequency divider %d\n",
+		*mscan_clksrc == MSCAN_CLKSRC_IPS ? "ips_clk" :
+		clocksrc == 1 ? "ref_clk" : "sys_clk", clockdiv);
+
+exit_unmap:
+	of_node_put(np_clock);
+	iounmap(clockctl);
+
+	return freq;
+}
+#else /* !CONFIG_PPC_MPC512x */
+static u32 mpc512x_can_get_clock(struct of_device *ofdev,
+				 const char *clock_name,
+				 int *mscan_clksrc)
+{
+	return 0;
+}
+#endif /* CONFIG_PPC_MPC512x */
+
+static struct of_device_id mpc5xxx_can_table[];
+static int mpc5xxx_can_probe(struct of_device *ofdev)
+{
+	struct device_node *np = mpc5xxx_get_of_node(ofdev);
+	struct mpc5xxx_can_data *data;
+	struct rtcan_device *dev;
+	void __iomem *base;
+	const char *clock_name = NULL;
+	int irq, mscan_clksrc = 0;
+	int err = -ENOMEM;
+
+	const struct of_device_id *id;
+
+	id = of_match_device(mpc5xxx_can_table, &ofdev->dev);
+	if (!id)
+		return -EINVAL;
+
+	data = (struct mpc5xxx_can_data *)id->data;
+
+	base = of_iomap(np, 0);
+	if (!base) {
+		dev_err(&ofdev->dev, "couldn't ioremap\n");
+		return err;
+	}
+
+	irq = irq_of_parse_and_map(np, 0);
+	if (!irq) {
+		dev_err(&ofdev->dev, "no irq found\n");
+		err = -ENODEV;
+		goto exit_unmap_mem;
+	}
+
+	dev = rtcan_dev_alloc(0, 0);
+	if (!dev)
+		goto exit_dispose_irq;
+
+	clock_name = of_get_property(np, "fsl,mscan-clock-source", NULL);
+
+	BUG_ON(!data);
+	dev->can_sys_clock = data->get_clock(ofdev, clock_name,
+					     &mscan_clksrc);
+	if (!dev->can_sys_clock) {
+		dev_err(&ofdev->dev, "couldn't get MSCAN clock properties\n");
+		goto exit_free_mscan;
+	}
+
+	if (data->type == MSCAN_TYPE_MPC5121)
+		dev->ctrl_name = mscan_ctrl_name_mpc512x;
+	else
+		dev->ctrl_name = mscan_ctrl_name_mpc5200;
+	dev->board_name = mscan_board_name;
+	dev->base_addr = (unsigned long)base;
+
+	err = rtcan_mscan_register(dev, irq, mscan_clksrc);
+	if (err) {
+		dev_err(&ofdev->dev, "registering %s failed (err=%d)\n",
+			RTCAN_DRV_NAME, err);
+		goto exit_free_mscan;
+	}
+
+	dev_set_drvdata(&ofdev->dev, dev);
+
+	dev_info(&ofdev->dev, "MSCAN at 0x%p, irq %d, clock %d Hz\n",
+		 base, irq, dev->can_sys_clock);
+
+	return 0;
+
+exit_free_mscan:
+	rtcan_dev_free(dev);
+exit_dispose_irq:
+	irq_dispose_mapping(irq);
+exit_unmap_mem:
+	iounmap(base);
+
+	return err;
+}
+
+static int mpc5xxx_can_remove(struct of_device *ofdev)
+{
+	struct rtcan_device *dev = dev_get_drvdata(&ofdev->dev);
+
+	dev_set_drvdata(&ofdev->dev, NULL);
+
+	rtcan_mscan_unregister(dev);
+	iounmap((void *)dev->base_addr);
+	rtcan_dev_free(dev);
+
+	return 0;
+}
+
+static struct mpc5xxx_can_data mpc5200_can_data = {
+	.type = MSCAN_TYPE_MPC5200,
+	.get_clock = mpc52xx_can_get_clock,
+};
+
+static struct mpc5xxx_can_data mpc5121_can_data = {
+	.type = MSCAN_TYPE_MPC5121,
+	.get_clock = mpc512x_can_get_clock,
+};
+
+static struct of_device_id mpc5xxx_can_table[] = {
+	{ .compatible = "fsl,mpc5200-mscan", .data = &mpc5200_can_data, },
+	/* Note that only MPC5121 Rev. 2 (and later) is supported */
+	{ .compatible = "fsl,mpc5121-mscan", .data = &mpc5121_can_data, },
+	{},
+};
+
+static struct of_platform_driver mpc5xxx_can_driver = {
+	.driver = {
+		.owner = THIS_MODULE,
+		.name = RTCAN_DRV_NAME,
+		.of_match_table = mpc5xxx_can_table,
+	},
+	.probe = mpc5xxx_can_probe,
+	.remove = mpc5xxx_can_remove,
+};
+
+static int __init mpc5xxx_can_init(void)
+{
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	return of_register_platform_driver(&mpc5xxx_can_driver);
+}
+module_init(mpc5xxx_can_init);
+
+static void __exit mpc5xxx_can_exit(void)
+{
+	return of_unregister_platform_driver(&mpc5xxx_can_driver);
+};
+module_exit(mpc5xxx_can_exit);
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RT-Socket-CAN driver for MPC5200 and MPC521x");
+MODULE_LICENSE("GPL v2");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c
new file mode 100644
index 0000000..91f8f84
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_proc.c
@@ -0,0 +1,151 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include "rtcan_dev.h"
+#include "rtcan_internal.h"
+#include "rtcan_mscan_regs.h"
+
+#define MSCAN_REG_ARGS(reg) \
+	"%-8s 0x%02x\n", #reg, (int)(in_8(&regs->reg)) & 0xff
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG
+
+static int rtcan_mscan_proc_regs(struct seq_file *p, void *data)
+{
+	struct rtcan_device *dev = (struct rtcan_device *)data;
+	struct mscan_regs *regs = (struct mscan_regs *)dev->base_addr;
+#ifdef MPC5xxx_GPIO
+	struct mpc5xxx_gpio *gpio = (struct mpc5xxx_gpio *)MPC5xxx_GPIO;
+	u32 port_config;
+#endif
+	u8 canctl0, canctl1;
+
+	seq_printf(p, "MSCAN registers at %p\n", regs);
+
+	canctl0 = in_8(&regs->canctl0);
+	seq_printf(p, "canctl0  0x%02x%s%s%s%s%s%s%s%s\n",
+		   canctl0,
+		   (canctl0 & MSCAN_RXFRM) ? " rxfrm" :"",
+		   (canctl0 & MSCAN_RXACT) ? " rxact" :"",
+		   (canctl0 & MSCAN_CSWAI) ? " cswai" :"",
+		   (canctl0 & MSCAN_SYNCH) ? " synch" :"",
+		   (canctl0 & MSCAN_TIME)  ? " time"  :"",
+		   (canctl0 & MSCAN_WUPE)  ? " wupe"  :"",
+		   (canctl0 & MSCAN_SLPRQ) ? " slprq" :"",
+		   (canctl0 & MSCAN_INITRQ)? " initrq":"" );
+	canctl1 = in_8(&regs->canctl1);
+	seq_printf(p, "canctl1  0x%02x%s%s%s%s%s%s%s\n",
+		   canctl1,
+		   (canctl1 & MSCAN_CANE)  ? " cane"  :"",
+		   (canctl1 & MSCAN_CLKSRC)? " clksrc":"",
+		   (canctl1 & MSCAN_LOOPB) ? " loopb" :"",
+		   (canctl1 & MSCAN_LISTEN)? " listen":"",
+		   (canctl1 & MSCAN_WUPM)  ? " wump"  :"",
+		   (canctl1 & MSCAN_SLPAK) ? " slpak" :"",
+		   (canctl1 & MSCAN_INITAK)? " initak":"");
+	seq_printf(p, MSCAN_REG_ARGS(canbtr0 ));
+	seq_printf(p, MSCAN_REG_ARGS(canbtr1 ));
+	seq_printf(p, MSCAN_REG_ARGS(canrflg ));
+	seq_printf(p, MSCAN_REG_ARGS(canrier ));
+	seq_printf(p, MSCAN_REG_ARGS(cantflg ));
+	seq_printf(p, MSCAN_REG_ARGS(cantier ));
+	seq_printf(p, MSCAN_REG_ARGS(cantarq ));
+	seq_printf(p, MSCAN_REG_ARGS(cantaak ));
+	seq_printf(p, MSCAN_REG_ARGS(cantbsel));
+	seq_printf(p, MSCAN_REG_ARGS(canidac ));
+	seq_printf(p, MSCAN_REG_ARGS(canrxerr));
+	seq_printf(p, MSCAN_REG_ARGS(cantxerr));
+	seq_printf(p, MSCAN_REG_ARGS(canidar0));
+	seq_printf(p, MSCAN_REG_ARGS(canidar1));
+	seq_printf(p, MSCAN_REG_ARGS(canidar2));
+	seq_printf(p, MSCAN_REG_ARGS(canidar3));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr0));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr1));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr2));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr3));
+	seq_printf(p, MSCAN_REG_ARGS(canidar4));
+	seq_printf(p, MSCAN_REG_ARGS(canidar5));
+	seq_printf(p, MSCAN_REG_ARGS(canidar6));
+	seq_printf(p, MSCAN_REG_ARGS(canidar7));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr4));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr5));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr6));
+	seq_printf(p, MSCAN_REG_ARGS(canidmr7));
+
+#ifdef MPC5xxx_GPIO
+	seq_printf(p, "GPIO registers\n");
+	port_config = in_be32(&gpio->port_config);
+	seq_printf(p, "port_config 0x%08x %s\n", port_config,
+		   (port_config & 0x10000000 ?
+			"CAN1 on I2C1, CAN2 on TMR0/1 pins" :
+			(port_config & 0x70) == 0x10 ?
+				"CAN1/2 on PSC2 pins" :
+				"MSCAN1/2 not routed"));
+#endif
+
+	return 0;
+}
+
+static int rtcan_mscan_proc_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_mscan_proc_regs, PDE_DATA(inode));
+}
+
+static const DEFINE_PROC_OPS(rtcan_mscan_proc_regs_ops,
+			rtcan_mscan_proc_regs_open,
+			single_elease,
+			seq_read,
+			NULL);
+
+int rtcan_mscan_create_proc(struct rtcan_device* dev)
+{
+	if (!dev->proc_root)
+		return -EINVAL;
+
+	proc_create_data("registers", S_IFREG | S_IRUGO | S_IWUSR,
+			 dev->proc_root, &rtcan_mscan_proc_regs_ops, dev);
+	return 0;
+}
+
+void rtcan_mscan_remove_proc(struct rtcan_device* dev)
+{
+	if (!dev->proc_root)
+		return;
+
+	remove_proc_entry("registers", dev->proc_root);
+}
+
+#else /* !CONFIG_XENO_DRIVERS_CAN_DEBUG */
+
+void rtcan_mscan_remove_proc(struct rtcan_device* dev)
+{
+}
+
+int rtcan_mscan_create_proc(struct rtcan_device* dev)
+{
+	return 0;
+}
+#endif	/* CONFIG_XENO_DRIVERS_CAN_DEBUG */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h
new file mode 100644
index 0000000..11b85a9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/mscan/rtcan_mscan_regs.h
@@ -0,0 +1,226 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Based on linux-2.4.25/include/asm-ppc/mpc5xxx.h
+ * Prototypes, etc. for the Motorola MPC5xxx embedded cpu chips
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_MSCAN_REGS_H_
+#define __RTCAN_MSCAN_REGS_H_
+
+#include <linux/version.h>
+#include <linux/of_platform.h>
+#include <asm/mpc52xx.h>
+
+static inline void __iomem *mpc5xxx_gpio_find_and_map(void)
+{
+	struct device_node *ofn;
+	ofn = of_find_compatible_node(NULL, NULL, "mpc5200-gpio");
+	if (!ofn)
+		ofn = of_find_compatible_node(NULL, NULL, "fsl,mpc5200-gpio");
+	return ofn ? of_iomap(ofn, 0) : NULL;
+}
+
+#define MPC5xxx_GPIO	mpc5xxx_gpio_find_and_map()
+#define mpc5xxx_gpio	mpc52xx_gpio
+
+#define mpc5xxx_get_of_node(ofdev) (ofdev)->dev.of_node
+
+#define MSCAN_CAN1_ADDR	(MSCAN_MBAR + 0x0900) /* MSCAN Module 1 */
+#define MSCAN_CAN2_ADDR	(MSCAN_MBAR + 0x0980) /* MSCAN Module 2 */
+#define MSCAN_SIZE	0x80
+
+/* MSCAN control register 0 (CANCTL0) bits */
+#define MSCAN_RXFRM	0x80
+#define MSCAN_RXACT	0x40
+#define MSCAN_CSWAI	0x20
+#define MSCAN_SYNCH	0x10
+#define MSCAN_TIME	0x08
+#define MSCAN_WUPE	0x04
+#define MSCAN_SLPRQ	0x02
+#define MSCAN_INITRQ	0x01
+
+/* MSCAN control register 1 (CANCTL1) bits */
+#define MSCAN_CANE	0x80
+#define MSCAN_CLKSRC	0x40
+#define MSCAN_LOOPB	0x20
+#define MSCAN_LISTEN	0x10
+#define MSCAN_WUPM	0x04
+#define MSCAN_SLPAK	0x02
+#define MSCAN_INITAK	0x01
+
+/* MSCAN receiver flag register (CANRFLG) bits */
+#define MSCAN_WUPIF	0x80
+#define MSCAN_CSCIF	0x40
+#define MSCAN_RSTAT1	0x20
+#define MSCAN_RSTAT0	0x10
+#define MSCAN_TSTAT1	0x08
+#define MSCAN_TSTAT0	0x04
+#define MSCAN_OVRIF	0x02
+#define MSCAN_RXF	0x01
+
+/* MSCAN receiver interrupt enable register (CANRIER) bits */
+#define MSCAN_WUPIE	0x80
+#define MSCAN_CSCIE	0x40
+#define MSCAN_RSTATE1	0x20
+#define MSCAN_RSTATE0	0x10
+#define MSCAN_TSTATE1	0x08
+#define MSCAN_TSTATE0	0x04
+#define MSCAN_OVRIE	0x02
+#define MSCAN_RXFIE	0x01
+
+/* MSCAN transmitter flag register (CANTFLG) bits */
+#define MSCAN_TXE2	0x04
+#define MSCAN_TXE1	0x02
+#define MSCAN_TXE0	0x01
+#define MSCAN_TXE	(MSCAN_TXE2 | MSCAN_TXE1 | MSCAN_TXE0)
+
+/* MSCAN transmitter interrupt enable register (CANTIER) bits */
+#define MSCAN_TXIE2	0x04
+#define MSCAN_TXIE1	0x02
+#define MSCAN_TXIE0	0x01
+#define MSCAN_TXIE	(MSCAN_TXIE2 | MSCAN_TXIE1 | MSCAN_TXIE0)
+
+/* MSCAN transmitter message abort request (CANTARQ) bits */
+#define MSCAN_ABTRQ2	0x04
+#define MSCAN_ABTRQ1	0x02
+#define MSCAN_ABTRQ0	0x01
+
+/* MSCAN transmitter message abort ack (CANTAAK) bits */
+#define MSCAN_ABTAK2	0x04
+#define MSCAN_ABTAK1	0x02
+#define MSCAN_ABTAK0	0x01
+
+/* MSCAN transmit buffer selection (CANTBSEL) bits */
+#define MSCAN_TX2	0x04
+#define MSCAN_TX1	0x02
+#define MSCAN_TX0	0x01
+
+/* MSCAN ID acceptance control register (CANIDAC) bits */
+#define MSCAN_IDAM1	0x20
+#define MSCAN_IDAM0	0x10
+#define MSCAN_IDHIT2	0x04
+#define MSCAN_IDHIT1	0x02
+#define MSCAN_IDHIT0	0x01
+
+struct mscan_msgbuf {
+	volatile u8  idr[0x8];		/* 0x00 */
+	volatile u8  dsr[0x10];		/* 0x08 */
+	volatile u8  dlr;		/* 0x18 */
+	volatile u8  tbpr;		/* 0x19 */	/* This register is not applicable for receive buffers */
+	volatile u16 rsrv1;		/* 0x1A */
+	volatile u8  tsrh;		/* 0x1C */
+	volatile u8  tsrl;		/* 0x1D */
+	volatile u16 rsrv2;		/* 0x1E */
+};
+
+struct mscan_regs {
+	volatile u8  canctl0;		/* MSCAN + 0x00 */
+	volatile u8  canctl1;		/* MSCAN + 0x01 */
+	volatile u16 rsrv1;		/* MSCAN + 0x02 */
+	volatile u8  canbtr0;		/* MSCAN + 0x04 */
+	volatile u8  canbtr1;		/* MSCAN + 0x05 */
+	volatile u16 rsrv2;		/* MSCAN + 0x06 */
+	volatile u8  canrflg;		/* MSCAN + 0x08 */
+	volatile u8  canrier;		/* MSCAN + 0x09 */
+	volatile u16 rsrv3;		/* MSCAN + 0x0A */
+	volatile u8  cantflg;		/* MSCAN + 0x0C */
+	volatile u8  cantier;		/* MSCAN + 0x0D */
+	volatile u16 rsrv4;		/* MSCAN + 0x0E */
+	volatile u8  cantarq;		/* MSCAN + 0x10 */
+	volatile u8  cantaak;		/* MSCAN + 0x11 */
+	volatile u16 rsrv5;		/* MSCAN + 0x12 */
+	volatile u8  cantbsel;		/* MSCAN + 0x14 */
+	volatile u8  canidac;		/* MSCAN + 0x15 */
+	volatile u16 rsrv6[3];		/* MSCAN + 0x16 */
+	volatile u8  canrxerr;		/* MSCAN + 0x1C */
+	volatile u8  cantxerr;		/* MSCAN + 0x1D */
+	volatile u16 rsrv7;		/* MSCAN + 0x1E */
+	volatile u8  canidar0;		/* MSCAN + 0x20 */
+	volatile u8  canidar1;		/* MSCAN + 0x21 */
+	volatile u16 rsrv8;		/* MSCAN + 0x22 */
+	volatile u8  canidar2;		/* MSCAN + 0x24 */
+	volatile u8  canidar3;		/* MSCAN + 0x25 */
+	volatile u16 rsrv9;		/* MSCAN + 0x26 */
+	volatile u8  canidmr0;		/* MSCAN + 0x28 */
+	volatile u8  canidmr1;		/* MSCAN + 0x29 */
+	volatile u16 rsrv10;		/* MSCAN + 0x2A */
+	volatile u8  canidmr2;		/* MSCAN + 0x2C */
+	volatile u8  canidmr3;		/* MSCAN + 0x2D */
+	volatile u16 rsrv11;		/* MSCAN + 0x2E */
+	volatile u8  canidar4;		/* MSCAN + 0x30 */
+	volatile u8  canidar5;		/* MSCAN + 0x31 */
+	volatile u16 rsrv12;		/* MSCAN + 0x32 */
+	volatile u8  canidar6;		/* MSCAN + 0x34 */
+	volatile u8  canidar7;		/* MSCAN + 0x35 */
+	volatile u16 rsrv13;		/* MSCAN + 0x36 */
+	volatile u8  canidmr4;		/* MSCAN + 0x38 */
+	volatile u8  canidmr5;		/* MSCAN + 0x39 */
+	volatile u16 rsrv14;		/* MSCAN + 0x3A */
+	volatile u8  canidmr6;		/* MSCAN + 0x3C */
+	volatile u8  canidmr7;		/* MSCAN + 0x3D */
+	volatile u16 rsrv15;		/* MSCAN + 0x3E */
+
+	struct mscan_msgbuf canrxfg;	/* MSCAN + 0x40 */    /* Foreground receive buffer */
+	struct mscan_msgbuf cantxfg;	/* MSCAN + 0x60 */    /* Foreground transmit buffer */
+};
+
+/* Clock source selection
+ */
+#define MSCAN_CLKSRC_BUS	0
+#define MSCAN_CLKSRC_XTAL	MSCAN_CLKSRC
+#define MSCAN_CLKSRC_IPS	MSCAN_CLKSRC
+
+/* Message type access macros.
+ */
+#define MSCAN_BUF_STD_RTR	0x10
+#define MSCAN_BUF_EXT_RTR	0x01
+#define MSCAN_BUF_EXTENDED	0x08
+
+#define MSCAN_IDAM1		0x20
+/* Value for the interrupt enable register */
+#define MSCAN_RIER		(MSCAN_OVRIE |		\
+				 MSCAN_RXFIE |		\
+				 MSCAN_WUPIF |		\
+				 MSCAN_CSCIE |		\
+				 MSCAN_RSTATE0 |	\
+				 MSCAN_RSTATE1 |	\
+				 MSCAN_TSTATE0 |	\
+				 MSCAN_TSTATE1)
+
+#define BTR0_BRP_MASK		0x3f
+#define BTR0_SJW_SHIFT		6
+#define BTR0_SJW_MASK		(0x3 << BTR0_SJW_SHIFT)
+
+#define BTR1_TSEG1_MASK		0xf
+#define BTR1_TSEG2_SHIFT	4
+#define BTR1_TSEG2_MASK		(0x7 << BTR1_TSEG2_SHIFT)
+#define BTR1_SAM_SHIFT		7
+
+#define BTR0_SET_BRP(brp)	(((brp) - 1) & BTR0_BRP_MASK)
+#define BTR0_SET_SJW(sjw)	((((sjw) - 1) << BTR0_SJW_SHIFT) & \
+				 BTR0_SJW_MASK)
+
+#define BTR1_SET_TSEG1(tseg1)	(((tseg1) - 1) & BTR1_TSEG1_MASK)
+#define BTR1_SET_TSEG2(tseg2)	((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & \
+				 BTR1_TSEG2_MASK)
+#define BTR1_SET_SAM(sam)	(((sam) & 1) << BTR1_SAM_SHIFT)
+
+#endif /* __RTCAN_MSCAN_REGS_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig
new file mode 100644
index 0000000..0fccce9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Kconfig
@@ -0,0 +1,6 @@
+config XENO_DRIVERS_CAN_PEAK_CANFD
+	depends on XENO_DRIVERS_CAN && PCI && !XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+	tristate "PEAK driver for PCAN-PCIe FD family"
+	help
+
+	This driver supports the PCAN-PCIe FD boards family from PEAK-System.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile
new file mode 100644
index 0000000..f56f451
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/Makefile
@@ -0,0 +1,8 @@
+#
+# Makefile for the PEAK-System CAN-FD IP module drivers
+#
+ccflags-y += -I$(srctree)/drivers/xenomai/can
+
+obj-$(CONFIG_XENO_DRIVERS_CAN_PEAK_CANFD) += xeno_can_peak_pciefd.o
+
+xeno_can_peak_pciefd-y := rtcan_peak_pciefd.o rtcan_peak_canfd.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c
new file mode 100644
index 0000000..4ecc1e9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd.c
@@ -0,0 +1,628 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CANFD firmware interface.
+ *
+ * Copyright (C) 2001-2021 PEAK System-Technik GmbH
+ * Copyright (C) 2019-2021 Stephane Grosjean <s.grosjean@peak-system.com>
+ */
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_peak_canfd_user.h"
+
+#define DRV_NAME		"xeno_peak_canfd"
+
+#define RTCAN_DEV_NAME		"rtcan%d"
+#define RTCAN_CTRLR_NAME	"peak_canfd"
+
+/* bittiming ranges of the PEAK-System PC CAN-FD interfaces */
+static const struct can_bittiming_const peak_canfd_nominal_const = {
+	.name = RTCAN_CTRLR_NAME,
+	.tseg1_min = 1,
+	.tseg1_max = (1 << PUCAN_TSLOW_TSGEG1_BITS),
+	.tseg2_min = 1,
+	.tseg2_max = (1 << PUCAN_TSLOW_TSGEG2_BITS),
+	.sjw_max = (1 << PUCAN_TSLOW_SJW_BITS),
+	.brp_min = 1,
+	.brp_max = (1 << PUCAN_TSLOW_BRP_BITS),
+	.brp_inc = 1,
+};
+
+/* initialize the command area */
+static struct peak_canfd_priv *pucan_init_cmd(struct peak_canfd_priv *priv)
+{
+	priv->cmd_len = 0;
+	return priv;
+}
+
+/* add command 'cmd_op' to the command area */
+static void *pucan_add_cmd(struct peak_canfd_priv *priv, int cmd_op)
+{
+	struct pucan_command *cmd;
+
+	if (priv->cmd_len + sizeof(*cmd) > priv->cmd_maxlen)
+		return NULL;
+
+	cmd = priv->cmd_buffer + priv->cmd_len;
+
+	/* reset all unused bit to default */
+	memset(cmd, 0, sizeof(*cmd));
+
+	cmd->opcode_channel = pucan_cmd_opcode_channel(priv->index, cmd_op);
+	priv->cmd_len += sizeof(*cmd);
+
+	return cmd;
+}
+
+/* send the command(s) to the IP core through the host-device interface */
+static int pucan_write_cmd(struct peak_canfd_priv *priv)
+{
+	int err;
+
+	/* prepare environment before writing the command */
+	if (priv->pre_cmd) {
+		err = priv->pre_cmd(priv);
+		if (err)
+			return err;
+	}
+
+	err = priv->write_cmd(priv);
+	if (err)
+		return err;
+
+	/* update environment after writing the command */
+	if (priv->post_cmd)
+		err = priv->post_cmd(priv);
+
+	return err;
+}
+
+/* set the device in RESET mode */
+static int pucan_set_reset_mode(struct peak_canfd_priv *priv)
+{
+	int err;
+
+	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RESET_MODE);
+	err = pucan_write_cmd(priv);
+	if (!err)
+		priv->rdev->state = CAN_STATE_STOPPED;
+
+	return err;
+}
+
+/* set the device in NORMAL mode */
+static int pucan_set_normal_mode(struct peak_canfd_priv *priv)
+{
+	int err;
+
+	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_NORMAL_MODE);
+	err = pucan_write_cmd(priv);
+	if (!err)
+		priv->rdev->state = CAN_STATE_ERROR_ACTIVE;
+
+	return err;
+}
+
+/* set the device in LISTEN_ONLY mode */
+static int pucan_set_listen_only_mode(struct peak_canfd_priv *priv)
+{
+	int err;
+
+	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_LISTEN_ONLY_MODE);
+	err = pucan_write_cmd(priv);
+	if (!err)
+		priv->rdev->state = CAN_STATE_ERROR_ACTIVE;
+
+	return err;
+}
+
+/* set acceptance filters */
+static int pucan_set_std_filter(struct peak_canfd_priv *priv, u8 row, u32 mask)
+{
+	struct pucan_std_filter *cmd;
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_STD_FILTER);
+
+	/* All the 11-bit CAN ID values are represented by one bit in a
+	 * 64 rows array of 32 columns: the upper 6 bit of the CAN ID select
+	 * the row while the lowest 5 bit select the column in that row.
+	 *
+	 * bit  filter
+	 * 1    passed
+	 * 0    discarded
+	 */
+
+	/* select the row */
+	cmd->idx = row;
+
+	/* set/unset bits in the row */
+	cmd->mask = cpu_to_le32(mask);
+
+	return pucan_write_cmd(priv);
+}
+
+/* request the device to stop transmission */
+static int pucan_tx_abort(struct peak_canfd_priv *priv, u16 flags)
+{
+	struct pucan_tx_abort *cmd;
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TX_ABORT);
+
+	cmd->flags = cpu_to_le16(flags);
+
+	return pucan_write_cmd(priv);
+}
+
+/* request the device to clear rx/tx error counters */
+static int pucan_clr_err_counters(struct peak_canfd_priv *priv)
+{
+	struct pucan_wr_err_cnt *cmd;
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_WR_ERR_CNT);
+
+	cmd->sel_mask = cpu_to_le16(PUCAN_WRERRCNT_TE | PUCAN_WRERRCNT_RE);
+
+	/* write the counters new value */
+	cmd->tx_counter = 0;
+	cmd->rx_counter = 0;
+
+	return pucan_write_cmd(priv);
+}
+
+/* set options to the device */
+static int pucan_set_options(struct peak_canfd_priv *priv, u16 opt_mask)
+{
+	struct pucan_options *cmd;
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_SET_EN_OPTION);
+
+	cmd->options = cpu_to_le16(opt_mask);
+
+	return pucan_write_cmd(priv);
+}
+
+/* request the device to notify the driver when Tx path is ready */
+static int pucan_setup_rx_barrier(struct peak_canfd_priv *priv)
+{
+	pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_RX_BARRIER);
+
+	return pucan_write_cmd(priv);
+}
+
+/* handle the reception of one CAN frame */
+static int pucan_handle_can_rx(struct peak_canfd_priv *priv,
+			       struct pucan_rx_msg *msg)
+{
+	struct rtcan_skb skb = { .rb_frame_size = EMPTY_RB_FRAME_SIZE, };
+	struct rtcan_rb_frame *cf = &skb.rb_frame;
+	struct rtcan_device *rdev = priv->rdev;
+	const u16 rx_msg_flags = le16_to_cpu(msg->flags);
+
+	if (rx_msg_flags & PUCAN_MSG_EXT_DATA_LEN) {
+		/* CAN-FD frames are silently discarded */
+		return 0;
+	}
+
+	cf->can_id = le32_to_cpu(msg->can_id);
+	cf->can_dlc = get_can_dlc(pucan_msg_get_dlc(msg));
+
+	if (rx_msg_flags & PUCAN_MSG_EXT_ID)
+		cf->can_id |= CAN_EFF_FLAG;
+
+	if (rx_msg_flags & PUCAN_MSG_RTR)
+		cf->can_id |= CAN_RTR_FLAG;
+	else {
+		memcpy(cf->data, msg->d, cf->can_dlc);
+		skb.rb_frame_size += cf->can_dlc;
+	}
+
+	cf->can_ifindex = rdev->ifindex;
+
+	/* Pass received frame out to the sockets */
+	rtcan_rcv(rdev, &skb);
+
+	return 0;
+}
+
+/* handle rx/tx error counters notification */
+static int pucan_handle_error(struct peak_canfd_priv *priv,
+			      struct pucan_error_msg *msg)
+{
+	priv->bec.txerr = msg->tx_err_cnt;
+	priv->bec.rxerr = msg->rx_err_cnt;
+
+	return 0;
+}
+
+/* handle status notification */
+static int pucan_handle_status(struct peak_canfd_priv *priv,
+			       struct pucan_status_msg *msg)
+{
+	struct rtcan_skb skb = { .rb_frame_size = EMPTY_RB_FRAME_SIZE, };
+	struct rtcan_rb_frame *cf = &skb.rb_frame;
+	struct rtcan_device *rdev = priv->rdev;
+
+	/* this STATUS is the CNF of the RX_BARRIER: Tx path can be setup */
+	if (pucan_status_is_rx_barrier(msg)) {
+		if (priv->enable_tx_path) {
+			int err = priv->enable_tx_path(priv);
+
+			if (err)
+				return err;
+		}
+
+		/* unlock senders */
+		rtdm_sem_up(&rdev->tx_sem);
+		return 0;
+	}
+
+	/* otherwise, it's a BUS status */
+	cf->can_id = CAN_ERR_FLAG;
+	cf->can_dlc = CAN_ERR_DLC;
+
+	/* test state error bits according to their priority */
+	if (pucan_status_is_busoff(msg)) {
+		rtdm_printk(DRV_NAME " CAN%u: Bus-off entry status\n",
+			    priv->index+1);
+		rdev->state = CAN_STATE_BUS_OFF;
+		cf->can_id |= CAN_ERR_BUSOFF;
+
+		/* wakeup waiting senders */
+		rtdm_sem_destroy(&rdev->tx_sem);
+
+	} else if (pucan_status_is_passive(msg)) {
+		rtdm_printk(DRV_NAME " CAN%u: Error passive status\n",
+			    priv->index+1);
+		rdev->state = CAN_STATE_ERROR_PASSIVE;
+		cf->can_id |= CAN_ERR_CRTL;
+		cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
+					CAN_ERR_CRTL_TX_PASSIVE :
+					CAN_ERR_CRTL_RX_PASSIVE;
+		cf->data[6] = priv->bec.txerr;
+		cf->data[7] = priv->bec.rxerr;
+
+	} else if (pucan_status_is_warning(msg)) {
+		rtdm_printk(DRV_NAME " CAN%u: Error warning status\n",
+			    priv->index+1);
+		rdev->state = CAN_STATE_ERROR_WARNING;
+
+		cf->can_id |= CAN_ERR_CRTL;
+		cf->data[1] = (priv->bec.txerr > priv->bec.rxerr) ?
+					CAN_ERR_CRTL_TX_WARNING :
+					CAN_ERR_CRTL_RX_WARNING;
+		cf->data[6] = priv->bec.txerr;
+		cf->data[7] = priv->bec.rxerr;
+
+	} else if (rdev->state != CAN_STATE_ERROR_ACTIVE) {
+		/* back to ERROR_ACTIVE */
+		rtdm_printk(DRV_NAME " CAN%u: Error active status\n",
+			    priv->index+1);
+		rdev->state = CAN_STATE_ERROR_ACTIVE;
+	}
+
+	skb.rb_frame_size += cf->can_dlc;
+	cf->can_ifindex = rdev->ifindex;
+
+	/* Pass received frame out to the sockets */
+	rtcan_rcv(rdev, &skb);
+
+	return 0;
+}
+
+/* handle IP core Rx overflow notification */
+static int pucan_handle_cache_critical(struct peak_canfd_priv *priv)
+{
+	struct rtcan_skb skb = { .rb_frame_size = EMPTY_RB_FRAME_SIZE, };
+	struct rtcan_rb_frame *cf = &skb.rb_frame;
+	struct rtcan_device *rdev = priv->rdev;
+
+	cf->can_id = CAN_ERR_FLAG | CAN_ERR_CRTL;
+	cf->can_dlc = CAN_ERR_DLC;
+
+	cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+
+	cf->data[6] = priv->bec.txerr;
+	cf->data[7] = priv->bec.rxerr;
+
+	skb.rb_frame_size += cf->can_dlc;
+	cf->can_ifindex = rdev->ifindex;
+
+	/* Pass received frame out to the sockets */
+	rtcan_rcv(rdev, &skb);
+
+	return 0;
+}
+
+/* handle a single uCAN message */
+int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
+			  struct pucan_rx_msg *msg)
+{
+	u16 msg_type = le16_to_cpu(msg->type);
+	int msg_size = le16_to_cpu(msg->size);
+	int err;
+
+	if (!msg_size || !msg_type) {
+		/* null packet found: end of list */
+		goto exit;
+	}
+
+	switch (msg_type) {
+	case PUCAN_MSG_CAN_RX:
+		err = pucan_handle_can_rx(priv, (struct pucan_rx_msg *)msg);
+		break;
+	case PUCAN_MSG_ERROR:
+		err = pucan_handle_error(priv, (struct pucan_error_msg *)msg);
+		break;
+	case PUCAN_MSG_STATUS:
+		err = pucan_handle_status(priv,
+					  (struct pucan_status_msg *)msg);
+		break;
+	case PUCAN_MSG_CACHE_CRITICAL:
+		err = pucan_handle_cache_critical(priv);
+		break;
+	default:
+		err = 0;
+	}
+
+	if (err < 0)
+		return err;
+
+exit:
+	return msg_size;
+}
+
+/* handle a list of rx_count messages from rx_msg memory address */
+int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
+				struct pucan_rx_msg *msg_list, int msg_count)
+{
+	void *msg_ptr = msg_list;
+	int i, msg_size = 0;
+
+	for (i = 0; i < msg_count; i++) {
+		msg_size = peak_canfd_handle_msg(priv, msg_ptr);
+
+		/* a null packet can be found at the end of a list */
+		if (msg_size <= 0)
+			break;
+
+		msg_ptr += ALIGN(msg_size, 4);
+	}
+
+	if (msg_size < 0)
+		return msg_size;
+
+	return i;
+}
+
+/* start the device (set the IP core in NORMAL or LISTEN-ONLY mode) */
+static int peak_canfd_start(struct rtcan_device *rdev,
+			    rtdm_lockctx_t *lock_ctx)
+{
+	struct peak_canfd_priv *priv = rdev->priv;
+	int i, err = 0;
+
+	switch (rdev->state) {
+	case CAN_STATE_BUS_OFF:
+	case CAN_STATE_STOPPED:
+		err = pucan_set_reset_mode(priv);
+		if (err)
+			break;
+
+		/* set ineeded option: get rx/tx error counters */
+		err = pucan_set_options(priv, PUCAN_OPTION_ERROR);
+		if (err)
+			break;
+
+		/* accept all standard CAN ID */
+		for (i = 0; i <= PUCAN_FLTSTD_ROW_IDX_MAX; i++)
+			pucan_set_std_filter(priv, i, 0xffffffff);
+
+		/* clear device rx/tx error counters */
+		err = pucan_clr_err_counters(priv);
+		if (err)
+			break;
+
+		/* set resquested mode */
+		if (priv->rdev->ctrl_mode & CAN_CTRLMODE_LISTENONLY)
+			err = pucan_set_listen_only_mode(priv);
+		else
+			err = pucan_set_normal_mode(priv);
+
+		rtdm_sem_init(&rdev->tx_sem, 1);
+
+		/* receiving the RB status says when Tx path is ready */
+		err = pucan_setup_rx_barrier(priv);
+		break;
+
+	default:
+		break;
+	}
+
+	return err;
+}
+
+/* stop the device (set the IP core in RESET mode) */
+static int peak_canfd_stop(struct rtcan_device *rdev,
+			   rtdm_lockctx_t *lock_ctx)
+{
+	struct peak_canfd_priv *priv = rdev->priv;
+	int err = 0;
+
+	switch (rdev->state) {
+	case CAN_STATE_BUS_OFF:
+	case CAN_STATE_STOPPED:
+		break;
+
+	default:
+		/* go back to RESET mode */
+		err = pucan_set_reset_mode(priv);
+		if (err) {
+			rtdm_printk(DRV_NAME " CAN%u: reset failed\n",
+				    priv->index+1);
+			break;
+		}
+
+		/* abort last Tx (MUST be done in RESET mode only!) */
+		pucan_tx_abort(priv, PUCAN_TX_ABORT_FLUSH);
+
+		rtdm_sem_destroy(&rdev->tx_sem);
+		break;
+	}
+
+	return err;
+}
+
+/* RT-Socket-CAN driver interface */
+static int peak_canfd_set_mode(struct rtcan_device *rdev, can_mode_t mode,
+			       rtdm_lockctx_t *lock_ctx)
+{
+	int err = 0;
+
+	switch (mode) {
+	case CAN_MODE_STOP:
+		err = peak_canfd_stop(rdev, lock_ctx);
+		break;
+	case CAN_MODE_START:
+		err = peak_canfd_start(rdev, lock_ctx);
+		break;
+	case CAN_MODE_SLEEP:
+		/* Controller must operate, otherwise go out */
+		if (!CAN_STATE_OPERATING(rdev->state)) {
+			err = -ENETDOWN;
+			break;
+		}
+		if (rdev->state == CAN_STATE_SLEEPING)
+			break;
+
+		fallthrough;
+	default:
+		err = -EOPNOTSUPP;
+		break;
+	}
+
+	return err;
+}
+
+static int peak_canfd_set_bittiming(struct rtcan_device *rdev,
+				    struct can_bittime *pbt,
+				    rtdm_lockctx_t *lock_ctx)
+{
+	struct peak_canfd_priv *priv = rdev->priv;
+	struct pucan_timing_slow *cmd;
+
+	/* can't support BTR0BTR1 mode with clock greater than 8 MHz */
+	if (pbt->type != CAN_BITTIME_STD) {
+		rtdm_printk(DRV_NAME
+			    " CAN%u: unsupported bittiming mode %u\n",
+			    priv->index+1, pbt->type);
+		return -EINVAL;
+	}
+
+	cmd = pucan_add_cmd(pucan_init_cmd(priv), PUCAN_CMD_TIMING_SLOW);
+
+	cmd->sjw_t = PUCAN_TSLOW_SJW_T(pbt->std.sjw - 1,
+				       priv->rdev->ctrl_mode &
+						CAN_CTRLMODE_3_SAMPLES);
+
+	cmd->tseg1 = PUCAN_TSLOW_TSEG1(pbt->std.prop_seg +
+				       pbt->std.phase_seg1 - 1);
+	cmd->tseg2 = PUCAN_TSLOW_TSEG2(pbt->std.phase_seg2 - 1);
+	cmd->brp = cpu_to_le16(PUCAN_TSLOW_BRP(pbt->std.brp - 1));
+
+	cmd->ewl = 96;	/* default */
+
+	rtdm_printk(DRV_NAME ": nominal: brp=%u tseg1=%u tseg2=%u sjw=%u\n",
+		   le16_to_cpu(cmd->brp), cmd->tseg1, cmd->tseg2, cmd->sjw_t);
+
+	return pucan_write_cmd(priv);
+}
+
+/* hard transmit callback: write the CAN frame to the device */
+static netdev_tx_t peak_canfd_start_xmit(struct rtcan_device *rdev,
+					 can_frame_t *cf)
+{
+	struct peak_canfd_priv *priv = rdev->priv;
+	struct pucan_tx_msg *msg;
+	u16 msg_size, msg_flags;
+	int room_left;
+	const u8 dlc = (cf->can_dlc > CAN_MAX_DLC) ? CAN_MAX_DLC : cf->can_dlc;
+
+	msg_size = ALIGN(sizeof(*msg) + dlc, 4);
+	msg = priv->alloc_tx_msg(priv, msg_size, &room_left);
+
+	/* should never happen except under bus-off condition and
+	 * (auto-)restart mechanism
+	 */
+	if (!msg) {
+		rtdm_printk(DRV_NAME
+			    " CAN%u: skb lost (No room left in tx buffer)\n",
+			    priv->index+1);
+		return 0;
+	}
+
+	msg->size = cpu_to_le16(msg_size);
+	msg->type = cpu_to_le16(PUCAN_MSG_CAN_TX);
+	msg_flags = 0;
+	if (cf->can_id & CAN_EFF_FLAG) {
+		msg_flags |= PUCAN_MSG_EXT_ID;
+		msg->can_id = cpu_to_le32(cf->can_id & CAN_EFF_MASK);
+	} else {
+		msg->can_id = cpu_to_le32(cf->can_id & CAN_SFF_MASK);
+	}
+
+	if (cf->can_id & CAN_RTR_FLAG)
+		msg_flags |= PUCAN_MSG_RTR;
+
+	/* set driver specific bit to differentiate with application
+	 * loopback
+	 */
+	if (rdev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)
+		msg_flags |= PUCAN_MSG_LOOPED_BACK;
+
+	msg->flags = cpu_to_le16(msg_flags);
+	msg->channel_dlc = PUCAN_MSG_CHANNEL_DLC(priv->index, dlc);
+	memcpy(msg->d, cf->data, dlc);
+
+	/* write the skb on the interface */
+	priv->write_tx_msg(priv, msg);
+
+	/* control senders flow */
+	if (room_left > (sizeof(*msg) + CAN_MAX_DLC))
+		rtdm_sem_up(&rdev->tx_sem);
+
+	return 0;
+}
+
+/* allocate a rtcan device for channel #index, with enough space to store
+ * private information.
+ */
+struct rtcan_device *alloc_peak_canfd_dev(int sizeof_priv, int index)
+{
+	struct rtcan_device *rdev;
+	struct peak_canfd_priv *priv;
+
+	/* allocate the candev object */
+	rdev = rtcan_dev_alloc(sizeof_priv, 0);
+	if (!rdev)
+		return NULL;
+
+	/* RTCAN part initialization */
+	strncpy(rdev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+	rdev->ctrl_name = RTCAN_CTRLR_NAME;
+	rdev->can_sys_clock = 80*1000*1000;	/* default */
+	rdev->state = CAN_STATE_STOPPED;
+	rdev->hard_start_xmit = peak_canfd_start_xmit;
+	rdev->do_set_mode = peak_canfd_set_mode;
+	rdev->do_set_bit_time = peak_canfd_set_bittiming;
+	rdev->bittiming_const = &peak_canfd_nominal_const;
+
+	priv = rdev->priv;
+
+	/* private part initialization */
+	priv->rdev = rdev;
+	priv->index = index;
+	priv->cmd_len = 0;
+	priv->bec.txerr = 0;
+	priv->bec.rxerr = 0;
+
+	return rdev;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h
new file mode 100644
index 0000000..f2b911e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_canfd_user.h
@@ -0,0 +1,51 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * CAN driver for PEAK System micro-CAN based adapters.
+ *
+ * Copyright (C) 2001-2021 PEAK System-Technik GmbH
+ * Copyright (C) 2019-2021 Stephane Grosjean <s.grosjean@peak-system.com>
+ */
+#ifndef PEAK_CANFD_USER_H
+#define PEAK_CANFD_USER_H
+
+#include <linux/can/dev/peak_canfd.h>
+
+#define CAN_MAX_DLC		8
+#define get_can_dlc(i)		(min_t(__u8, (i), CAN_MAX_DLC))
+
+struct peak_berr_counter {
+	__u16 txerr;
+	__u16 rxerr;
+};
+
+/* data structure private to each uCAN interface */
+struct peak_canfd_priv {
+	struct rtcan_device *rdev;	/* RTCAN device */
+	int index;			/* channel index */
+
+	struct peak_berr_counter bec;
+
+	int cmd_len;
+	void *cmd_buffer;
+	int cmd_maxlen;
+
+	int (*pre_cmd)(struct peak_canfd_priv *priv);
+	int (*write_cmd)(struct peak_canfd_priv *priv);
+	int (*post_cmd)(struct peak_canfd_priv *priv);
+
+	int (*enable_tx_path)(struct peak_canfd_priv *priv);
+	void *(*alloc_tx_msg)(struct peak_canfd_priv *priv, u16 msg_size,
+			      int *room_left);
+	int (*write_tx_msg)(struct peak_canfd_priv *priv,
+			    struct pucan_tx_msg *msg);
+};
+
+struct rtcan_device *alloc_peak_canfd_dev(int sizeof_priv, int index);
+void rtcan_peak_pciefd_remove_proc(struct rtcan_device *rdev);
+int rtcan_peak_pciefd_create_proc(struct rtcan_device *rdev);
+
+int peak_canfd_handle_msg(struct peak_canfd_priv *priv,
+			  struct pucan_rx_msg *msg);
+int peak_canfd_handle_msgs_list(struct peak_canfd_priv *priv,
+				struct pucan_rx_msg *rx_msg, int rx_count);
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c
new file mode 100644
index 0000000..921182f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/peak_canfd/rtcan_peak_pciefd.c
@@ -0,0 +1,1001 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * CAN driver PCI interface.
+ *
+ * Copyright (C) 2001-2021 PEAK System-Technik GmbH
+ * Copyright (C) 2019-2021 Stephane Grosjean <s.grosjean@peak-system.com>
+ */
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_peak_canfd_user.h"
+
+#ifdef CONFIG_PCI_MSI
+#define PCIEFD_USES_MSI
+#endif
+
+#ifndef struct_size
+#define struct_size(p, member, n)	((n)*sizeof(*(p)->member) + \
+					 sizeof(*(p)))
+#endif
+
+#define DRV_NAME			"xeno_peak_pciefd"
+
+static char *pciefd_board_name = "PEAK-PCIe FD";
+
+MODULE_AUTHOR("Stephane Grosjean <s.grosjean@peak-system.com>");
+MODULE_DESCRIPTION("RTCAN driver for PEAK PCAN PCIe/M.2 FD family cards");
+MODULE_LICENSE("GPL v2");
+
+#define PEAK_PCI_VENDOR_ID	0x001c	/* The PCI device and vendor IDs */
+#define PEAK_PCIEFD_ID		0x0013	/* for PCIe slot cards */
+#define PCAN_CPCIEFD_ID		0x0014	/* for Compact-PCI Serial slot cards */
+#define PCAN_PCIE104FD_ID	0x0017	/* for PCIe-104 Express slot cards */
+#define PCAN_MINIPCIEFD_ID	0x0018	/* for mini-PCIe slot cards */
+#define PCAN_PCIEFD_OEM_ID	0x0019	/* for PCIe slot OEM cards */
+#define PCAN_M2_ID		0x001a	/* for M2 slot cards */
+
+/* supported device ids. */
+static const struct pci_device_id peak_pciefd_tbl[] = {
+	{PEAK_PCI_VENDOR_ID, PEAK_PCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_CPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_PCIE104FD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_MINIPCIEFD_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_PCIEFD_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PCAN_M2_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, peak_pciefd_tbl);
+
+/* PEAK PCIe board access description */
+#define PCIEFD_BAR0_SIZE		(64 * 1024)
+#define PCIEFD_RX_DMA_SIZE		(4 * 1024)
+#define PCIEFD_TX_DMA_SIZE		(4 * 1024)
+
+#define PCIEFD_TX_PAGE_SIZE		(2 * 1024)
+
+/* System Control Registers */
+#define PCIEFD_REG_SYS_CTL_SET		0x0000	/* set bits */
+#define PCIEFD_REG_SYS_CTL_CLR		0x0004	/* clear bits */
+
+/* Version info registers */
+#define PCIEFD_REG_SYS_VER1		0x0040	/* version reg #1 */
+#define PCIEFD_REG_SYS_VER2		0x0044	/* version reg #2 */
+
+#define PCIEFD_FW_VERSION(x, y, z)	(((u32)(x) << 24) | \
+					 ((u32)(y) << 16) | \
+					 ((u32)(z) << 8))
+
+/* System Control Registers Bits */
+#define PCIEFD_SYS_CTL_TS_RST		0x00000001	/* timestamp clock */
+#define PCIEFD_SYS_CTL_CLK_EN		0x00000002	/* system clock */
+
+/* CAN-FD channel addresses */
+#define PCIEFD_CANX_OFF(c)		(((c) + 1) * 0x1000)
+
+#define PCIEFD_ECHO_SKB_MAX		PCANFD_ECHO_SKB_DEF
+
+/* CAN-FD channel registers */
+#define PCIEFD_REG_CAN_MISC		0x0000	/* Misc. control */
+#define PCIEFD_REG_CAN_CLK_SEL		0x0008	/* Clock selector */
+#define PCIEFD_REG_CAN_CMD_PORT_L	0x0010	/* 64-bits command port */
+#define PCIEFD_REG_CAN_CMD_PORT_H	0x0014
+#define PCIEFD_REG_CAN_TX_REQ_ACC	0x0020	/* Tx request accumulator */
+#define PCIEFD_REG_CAN_TX_CTL_SET	0x0030	/* Tx control set register */
+#define PCIEFD_REG_CAN_TX_CTL_CLR	0x0038	/* Tx control clear register */
+#define PCIEFD_REG_CAN_TX_DMA_ADDR_L	0x0040	/* 64-bits addr for Tx DMA */
+#define PCIEFD_REG_CAN_TX_DMA_ADDR_H	0x0044
+#define PCIEFD_REG_CAN_RX_CTL_SET	0x0050	/* Rx control set register */
+#define PCIEFD_REG_CAN_RX_CTL_CLR	0x0058	/* Rx control clear register */
+#define PCIEFD_REG_CAN_RX_CTL_WRT	0x0060	/* Rx control write register */
+#define PCIEFD_REG_CAN_RX_CTL_ACK	0x0068	/* Rx control ACK register */
+#define PCIEFD_REG_CAN_RX_DMA_ADDR_L	0x0070	/* 64-bits addr for Rx DMA */
+#define PCIEFD_REG_CAN_RX_DMA_ADDR_H	0x0074
+
+/* CAN-FD channel misc register bits */
+#define CANFD_MISC_TS_RST		0x00000001	/* timestamp cnt rst */
+
+/* CAN-FD channel Clock SELector Source & DIVider */
+#define CANFD_CLK_SEL_DIV_MASK		0x00000007
+#define CANFD_CLK_SEL_DIV_60MHZ		0x00000000	/* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_40MHZ		0x00000001	/* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_30MHZ		0x00000002	/* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_24MHZ		0x00000003	/* SRC=240MHz only */
+#define CANFD_CLK_SEL_DIV_20MHZ		0x00000004	/* SRC=240MHz only */
+
+#define CANFD_CLK_SEL_SRC_MASK		0x00000008	/* 0=80MHz, 1=240MHz */
+#define CANFD_CLK_SEL_SRC_240MHZ	0x00000008
+#define CANFD_CLK_SEL_SRC_80MHZ		(~CANFD_CLK_SEL_SRC_240MHZ & \
+					 CANFD_CLK_SEL_SRC_MASK)
+
+#define CANFD_CLK_SEL_20MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_20MHZ)
+#define CANFD_CLK_SEL_24MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_24MHZ)
+#define CANFD_CLK_SEL_30MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_30MHZ)
+#define CANFD_CLK_SEL_40MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_40MHZ)
+#define CANFD_CLK_SEL_60MHZ		(CANFD_CLK_SEL_SRC_240MHZ |\
+					 CANFD_CLK_SEL_DIV_60MHZ)
+#define CANFD_CLK_SEL_80MHZ		(CANFD_CLK_SEL_SRC_80MHZ)
+
+/* CAN-FD channel Rx/Tx control register bits */
+#define CANFD_CTL_UNC_BIT		0x00010000	/* Uncached DMA mem */
+#define CANFD_CTL_RST_BIT		0x00020000	/* reset DMA action */
+#define CANFD_CTL_IEN_BIT		0x00040000	/* IRQ enable */
+
+/* Rx IRQ Count and Time Limits */
+#define CANFD_CTL_IRQ_CL_DEF	8	/* Rx msg max nb per IRQ in Rx DMA */
+#define CANFD_CTL_IRQ_TL_DEF	5	/* Time before IRQ if < CL (x100 us) */
+
+#define CANFD_OPTIONS_SET	(CANFD_OPTION_ERROR | CANFD_OPTION_BUSLOAD)
+
+/* Tx anticipation window (link logical address should be aligned on 2K
+ * boundary)
+ */
+#define PCIEFD_TX_PAGE_COUNT	(PCIEFD_TX_DMA_SIZE / PCIEFD_TX_PAGE_SIZE)
+
+#define CANFD_MSG_LNK_TX	0x1001	/* Tx msgs link */
+
+/* 32-bit IRQ status fields, heading Rx DMA area */
+static inline int pciefd_irq_tag(u32 irq_status)
+{
+	return irq_status & 0x0000000f;
+}
+
+static inline int pciefd_irq_rx_cnt(u32 irq_status)
+{
+	return (irq_status & 0x000007f0) >> 4;
+}
+
+static inline int pciefd_irq_is_lnk(u32 irq_status)
+{
+	return irq_status & 0x00010000;
+}
+
+/* Rx record */
+struct pciefd_rx_dma {
+	__le32 irq_status;
+	__le32 sys_time_low;
+	__le32 sys_time_high;
+	struct pucan_rx_msg msg[0];
+} __packed __aligned(4);
+
+/* Tx Link record */
+struct pciefd_tx_link {
+	__le16 size;
+	__le16 type;
+	__le32 laddr_lo;
+	__le32 laddr_hi;
+} __packed __aligned(4);
+
+/* Tx page descriptor */
+struct pciefd_page {
+	void *vbase;			/* page virtual address */
+	dma_addr_t lbase;		/* page logical address */
+	u32 offset;
+	u32 size;
+};
+
+/* CAN channel object */
+struct pciefd_board;
+struct pciefd_can {
+	struct peak_canfd_priv ucan;	/* must be the first member */
+	void __iomem *reg_base;		/* channel config base addr */
+	struct pciefd_board *board;	/* reverse link */
+
+	struct pucan_command pucan_cmd;	/* command buffer */
+
+	dma_addr_t rx_dma_laddr;	/* DMA virtual and logical addr */
+	void *rx_dma_vaddr;		/* for Rx and Tx areas */
+	dma_addr_t tx_dma_laddr;
+	void *tx_dma_vaddr;
+
+	struct pciefd_page tx_pages[PCIEFD_TX_PAGE_COUNT];
+	u16 tx_pages_free;		/* free Tx pages counter */
+	u16 tx_page_index;		/* current page used for Tx */
+	rtdm_lock_t tx_lock;
+	u32 irq_status;
+	u32 irq_tag;			/* next irq tag */
+	int irq;
+
+	u32 flags;
+};
+
+/* PEAK-PCIe FD board object */
+struct pciefd_board {
+	void __iomem *reg_base;
+	struct pci_dev *pci_dev;
+	int can_count;
+	int irq_flags;			/* RTDM_IRQTYPE_SHARED or 0 */
+	rtdm_lock_t cmd_lock;		/* 64-bits cmds must be atomic */
+	struct pciefd_can *can[0];	/* array of network devices */
+};
+
+#define CANFD_CTL_IRQ_CL_MIN	1
+#define CANFD_CTL_IRQ_CL_MAX	127	/* 7-bit field */
+
+#define CANFD_CTL_IRQ_TL_MIN	1
+#define CANFD_CTL_IRQ_TL_MAX	15	/* 4-bit field */
+
+static uint irqcl = CANFD_CTL_IRQ_CL_DEF;
+module_param(irqcl, uint, 0644);
+MODULE_PARM_DESC(irqcl,
+" PCIe FD IRQ Count Limit (default=" __stringify(CANFD_CTL_IRQ_CL_DEF) ")");
+
+static uint irqtl = CANFD_CTL_IRQ_TL_DEF;
+module_param(irqtl, uint, 0644);
+MODULE_PARM_DESC(irqtl,
+" PCIe FD IRQ Time Limit (default=" __stringify(CANFD_CTL_IRQ_TL_DEF) ")");
+
+#ifdef PCIEFD_USES_MSI
+
+#ifdef CONFIG_XENO_OPT_SHIRQ
+/* default behaviour: run as mainline driver in INTx mode */
+#define PCIEFD_USEMSI_DEFAULT	0
+#else
+/* default behaviour: run in MSI mode (one IRQ per channel) */
+#define PCIEFD_USEMSI_DEFAULT	1
+#endif
+
+static uint usemsi = PCIEFD_USEMSI_DEFAULT;
+module_param(usemsi, uint, 0644);
+MODULE_PARM_DESC(usemsi,
+" 0=INTA; 1=MSI (def=" __stringify(PCIEFD_USEMSI_DEFAULT) ")");
+#endif
+
+/* read a 32 bit value from a SYS block register */
+static inline u32 pciefd_sys_readreg(const struct pciefd_board *priv, u16 reg)
+{
+	return readl(priv->reg_base + reg);
+}
+
+/* write a 32 bit value into a SYS block register */
+static inline void pciefd_sys_writereg(const struct pciefd_board *priv,
+				       u32 val, u16 reg)
+{
+	writel(val, priv->reg_base + reg);
+}
+
+/* read a 32 bits value from CAN-FD block register */
+static inline u32 pciefd_can_readreg(const struct pciefd_can *priv, u16 reg)
+{
+	return readl(priv->reg_base + reg);
+}
+
+/* write a 32 bits value into a CAN-FD block register */
+static inline void pciefd_can_writereg(const struct pciefd_can *priv,
+				       u32 val, u16 reg)
+{
+	writel(val, priv->reg_base + reg);
+}
+
+/* give a channel logical Rx DMA address to the board */
+static void pciefd_can_setup_rx_dma(struct pciefd_can *priv)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	const u32 dma_addr_h = (u32)(priv->rx_dma_laddr >> 32);
+#else
+	const u32 dma_addr_h = 0;
+#endif
+
+	/* (DMA must be reset for Rx) */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_RX_CTL_SET);
+
+	/* write the logical address of the Rx DMA area for this channel */
+	pciefd_can_writereg(priv, (u32)priv->rx_dma_laddr,
+			    PCIEFD_REG_CAN_RX_DMA_ADDR_L);
+	pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_RX_DMA_ADDR_H);
+
+	/* also indicates that Rx DMA is cacheable */
+	pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT,
+			    PCIEFD_REG_CAN_RX_CTL_CLR);
+}
+
+/* clear channel logical Rx DMA address from the board */
+static void pciefd_can_clear_rx_dma(struct pciefd_can *priv)
+{
+	/* DMA must be reset for Rx */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_RX_CTL_SET);
+
+	/* clear the logical address of the Rx DMA area for this channel */
+	pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_L);
+	pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_RX_DMA_ADDR_H);
+}
+
+/* give a channel logical Tx DMA address to the board */
+static void pciefd_can_setup_tx_dma(struct pciefd_can *priv)
+{
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	const u32 dma_addr_h = (u32)(priv->tx_dma_laddr >> 32);
+#else
+	const u32 dma_addr_h = 0;
+#endif
+
+	/* (DMA must be reset for Tx) */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_TX_CTL_SET);
+
+	/* write the logical address of the Tx DMA area for this channel */
+	pciefd_can_writereg(priv, (u32)priv->tx_dma_laddr,
+			    PCIEFD_REG_CAN_TX_DMA_ADDR_L);
+	pciefd_can_writereg(priv, dma_addr_h, PCIEFD_REG_CAN_TX_DMA_ADDR_H);
+
+	/* also indicates that Tx DMA is cacheable */
+	pciefd_can_writereg(priv, CANFD_CTL_UNC_BIT,
+			    PCIEFD_REG_CAN_TX_CTL_CLR);
+}
+
+/* clear channel logical Tx DMA address from the board */
+static void pciefd_can_clear_tx_dma(struct pciefd_can *priv)
+{
+	/* DMA must be reset for Tx */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_TX_CTL_SET);
+
+	/* clear the logical address of the Tx DMA area for this channel */
+	pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_L);
+	pciefd_can_writereg(priv, 0, PCIEFD_REG_CAN_TX_DMA_ADDR_H);
+}
+
+/* acknowledge interrupt to the device */
+static void pciefd_can_ack_rx_dma(struct pciefd_can *priv)
+{
+	/* read value of current IRQ tag and inc it for next one */
+	priv->irq_tag = le32_to_cpu(*(__le32 *)priv->rx_dma_vaddr);
+	priv->irq_tag++;
+	priv->irq_tag &= 0xf;
+
+	/* write the next IRQ tag for this CAN */
+	pciefd_can_writereg(priv, priv->irq_tag, PCIEFD_REG_CAN_RX_CTL_ACK);
+}
+
+/* IRQ handler */
+static int pciefd_irq_handler(rtdm_irq_t *irq_handle)
+{
+	struct pciefd_can *priv = rtdm_irq_get_arg(irq_handle, void);
+	struct pciefd_rx_dma *rx_dma = priv->rx_dma_vaddr;
+
+	/* INTA mode only, dummy read to sync with PCIe transaction */
+	if (!pci_dev_msi_enabled(priv->board->pci_dev))
+		(void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);
+
+	/* read IRQ status from the first 32-bit of the Rx DMA area */
+	priv->irq_status = le32_to_cpu(rx_dma->irq_status);
+
+	/* check if this (shared) IRQ is for this CAN */
+	if (pciefd_irq_tag(priv->irq_status) != priv->irq_tag)
+		return RTDM_IRQ_NONE;
+
+	/* handle rx messages (if any) */
+	peak_canfd_handle_msgs_list(&priv->ucan,
+				    rx_dma->msg,
+				    pciefd_irq_rx_cnt(priv->irq_status));
+
+	/* handle tx link interrupt (if any) */
+	if (pciefd_irq_is_lnk(priv->irq_status)) {
+		rtdm_lock_get(&priv->tx_lock);
+		priv->tx_pages_free++;
+		rtdm_lock_put(&priv->tx_lock);
+
+		/* Wake up a sender */
+		rtdm_sem_up(&priv->ucan.rdev->tx_sem);
+	}
+
+	/* re-enable Rx DMA transfer for this CAN */
+	pciefd_can_ack_rx_dma(priv);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/* initialize structures used for sending CAN frames */
+static int pciefd_enable_tx_path(struct peak_canfd_priv *ucan)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	int i;
+
+	/* initialize the Tx pages descriptors */
+	priv->tx_pages_free = PCIEFD_TX_PAGE_COUNT - 1;
+	priv->tx_page_index = 0;
+
+	priv->tx_pages[0].vbase = priv->tx_dma_vaddr;
+	priv->tx_pages[0].lbase = priv->tx_dma_laddr;
+
+	for (i = 0; i < PCIEFD_TX_PAGE_COUNT; i++) {
+		priv->tx_pages[i].offset = 0;
+		priv->tx_pages[i].size = PCIEFD_TX_PAGE_SIZE -
+					 sizeof(struct pciefd_tx_link);
+		if (i) {
+			priv->tx_pages[i].vbase =
+					  priv->tx_pages[i - 1].vbase +
+					  PCIEFD_TX_PAGE_SIZE;
+			priv->tx_pages[i].lbase =
+					  priv->tx_pages[i - 1].lbase +
+					  PCIEFD_TX_PAGE_SIZE;
+		}
+	}
+
+	/* setup Tx DMA addresses into IP core */
+	pciefd_can_setup_tx_dma(priv);
+
+	/* start (TX_RST=0) Tx Path */
+	pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+			    PCIEFD_REG_CAN_TX_CTL_CLR);
+
+	return 0;
+}
+
+/* board specific command pre-processing */
+static int pciefd_pre_cmd(struct peak_canfd_priv *ucan)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd);
+
+	/* pre-process command */
+	switch (cmd) {
+	case PUCAN_CMD_NORMAL_MODE:
+	case PUCAN_CMD_LISTEN_ONLY_MODE:
+
+		if (ucan->rdev->state == CAN_STATE_BUS_OFF)
+			break;
+
+		/* setup Rx DMA address */
+		pciefd_can_setup_rx_dma(priv);
+
+		/* setup max count of msgs per IRQ */
+		pciefd_can_writereg(priv, (irqtl << 8) | irqcl,
+				    PCIEFD_REG_CAN_RX_CTL_WRT);
+
+		/* clear DMA RST for Rx (Rx start) */
+		pciefd_can_writereg(priv, CANFD_CTL_RST_BIT,
+				    PCIEFD_REG_CAN_RX_CTL_CLR);
+
+		/* reset timestamps */
+		pciefd_can_writereg(priv, !CANFD_MISC_TS_RST,
+				    PCIEFD_REG_CAN_MISC);
+
+		/* do an initial ACK */
+		pciefd_can_ack_rx_dma(priv);
+
+		/* enable IRQ for this CAN after having set next irq_tag */
+		pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
+				    PCIEFD_REG_CAN_RX_CTL_SET);
+
+		/* Tx path will be setup as soon as RX_BARRIER is received */
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/* write a command */
+static int pciefd_write_cmd(struct peak_canfd_priv *ucan)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	unsigned long flags;
+
+	/* 64-bit command must be atomic */
+	rtdm_lock_get_irqsave(&priv->board->cmd_lock, flags);
+
+	pciefd_can_writereg(priv, *(u32 *)ucan->cmd_buffer,
+			    PCIEFD_REG_CAN_CMD_PORT_L);
+	pciefd_can_writereg(priv, *(u32 *)(ucan->cmd_buffer + 4),
+			    PCIEFD_REG_CAN_CMD_PORT_H);
+
+	rtdm_lock_put_irqrestore(&priv->board->cmd_lock, flags);
+
+	return 0;
+}
+
+/* board specific command post-processing */
+static int pciefd_post_cmd(struct peak_canfd_priv *ucan)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	u16 cmd = pucan_cmd_get_opcode(&priv->pucan_cmd);
+
+	switch (cmd) {
+	case PUCAN_CMD_RESET_MODE:
+
+		if (ucan->rdev->state == CAN_STATE_STOPPED)
+			break;
+
+		/* controller now in reset mode: disable IRQ for this CAN */
+		pciefd_can_writereg(priv, CANFD_CTL_IEN_BIT,
+				    PCIEFD_REG_CAN_RX_CTL_CLR);
+
+		/* stop and reset DMA addresses in Tx/Rx engines */
+		pciefd_can_clear_tx_dma(priv);
+		pciefd_can_clear_rx_dma(priv);
+
+		/* wait for above commands to complete (read cycle) */
+		(void)pciefd_sys_readreg(priv->board, PCIEFD_REG_SYS_VER1);
+
+		ucan->rdev->state = CAN_STATE_STOPPED;
+
+		break;
+	}
+
+	return 0;
+}
+
+/* allocate enough room into the Tx dma area to store a CAN message */
+static void *pciefd_alloc_tx_msg(struct peak_canfd_priv *ucan, u16 msg_size,
+				 int *room_left)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	struct pciefd_page *page = priv->tx_pages + priv->tx_page_index;
+	unsigned long flags;
+	void *msg;
+
+	rtdm_lock_get_irqsave(&priv->tx_lock, flags);
+
+	if (page->offset + msg_size > page->size) {
+		struct pciefd_tx_link *lk;
+
+		/* not enough space in this page: try another one */
+		if (!priv->tx_pages_free) {
+			rtdm_lock_put_irqrestore(&priv->tx_lock, flags);
+
+			/* Tx overflow */
+			return NULL;
+		}
+
+		priv->tx_pages_free--;
+
+		/* keep address of the very last free slot of current page */
+		lk = page->vbase + page->offset;
+
+		/* next, move on a new free page */
+		priv->tx_page_index = (priv->tx_page_index + 1) %
+				      PCIEFD_TX_PAGE_COUNT;
+		page = priv->tx_pages + priv->tx_page_index;
+
+		/* put link record to this new page at the end of prev one */
+		lk->size = cpu_to_le16(sizeof(*lk));
+		lk->type = cpu_to_le16(CANFD_MSG_LNK_TX);
+		lk->laddr_lo = cpu_to_le32(page->lbase);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+		lk->laddr_hi = cpu_to_le32(page->lbase >> 32);
+#else
+		lk->laddr_hi = 0;
+#endif
+		/* next msgs will be put from the begininng of this new page */
+		page->offset = 0;
+	}
+
+	*room_left = priv->tx_pages_free * page->size;
+
+	rtdm_lock_put_irqrestore(&priv->tx_lock, flags);
+
+	msg = page->vbase + page->offset;
+
+	/* give back room left in the tx ring */
+	*room_left += page->size - (page->offset + msg_size);
+
+	return msg;
+}
+
+/* tell the IP core tha a frame has been written into the Tx DMA area */
+static int pciefd_write_tx_msg(struct peak_canfd_priv *ucan,
+			       struct pucan_tx_msg *msg)
+{
+	struct pciefd_can *priv = (struct pciefd_can *)ucan;
+	struct pciefd_page *page = priv->tx_pages + priv->tx_page_index;
+
+	/* this slot is now reserved for writing the frame */
+	page->offset += le16_to_cpu(msg->size);
+
+	/* tell the board a frame has been written in Tx DMA area */
+	pciefd_can_writereg(priv, 1, PCIEFD_REG_CAN_TX_REQ_ACC);
+
+	return 0;
+}
+
+/* probe for CAN channel number #pciefd_board->can_count */
+static int pciefd_can_probe(struct pciefd_board *pciefd)
+{
+	struct rtcan_device *rdev;
+	struct pciefd_can *priv;
+	u32 clk;
+	int err;
+
+	/* allocate the RTCAN object */
+	rdev = alloc_peak_canfd_dev(sizeof(*priv), pciefd->can_count);
+	if (!rdev) {
+		dev_err(&pciefd->pci_dev->dev,
+			"failed to alloc RTCAN device object\n");
+		goto failure;
+	}
+
+	/* fill-in board specific parts */
+	rdev->board_name = pciefd_board_name;
+
+	/* fill-in rtcan private object */
+	priv = rdev->priv;
+
+	/* setup PCIe-FD own callbacks */
+	priv->ucan.pre_cmd = pciefd_pre_cmd;
+	priv->ucan.write_cmd = pciefd_write_cmd;
+	priv->ucan.post_cmd = pciefd_post_cmd;
+	priv->ucan.enable_tx_path = pciefd_enable_tx_path;
+	priv->ucan.alloc_tx_msg = pciefd_alloc_tx_msg;
+	priv->ucan.write_tx_msg = pciefd_write_tx_msg;
+
+	/* setup PCIe-FD own command buffer */
+	priv->ucan.cmd_buffer = &priv->pucan_cmd;
+	priv->ucan.cmd_maxlen = sizeof(priv->pucan_cmd);
+
+	priv->board = pciefd;
+
+	/* CAN config regs block address */
+	priv->reg_base = pciefd->reg_base + PCIEFD_CANX_OFF(priv->ucan.index);
+	rdev->base_addr = (unsigned long)priv->reg_base;
+
+	/* allocate non-cacheable DMA'able 4KB memory area for Rx */
+	priv->rx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev,
+						 PCIEFD_RX_DMA_SIZE,
+						 &priv->rx_dma_laddr,
+						 GFP_KERNEL);
+	if (!priv->rx_dma_vaddr) {
+		dev_err(&pciefd->pci_dev->dev,
+			"Rx dmam_alloc_coherent(%u) failure\n",
+			PCIEFD_RX_DMA_SIZE);
+		goto err_free_rtdev;
+	}
+
+	/* allocate non-cacheable DMA'able 4KB memory area for Tx */
+	priv->tx_dma_vaddr = dmam_alloc_coherent(&pciefd->pci_dev->dev,
+						 PCIEFD_TX_DMA_SIZE,
+						 &priv->tx_dma_laddr,
+						 GFP_KERNEL);
+	if (!priv->tx_dma_vaddr) {
+		dev_err(&pciefd->pci_dev->dev,
+			"Tx dmam_alloc_coherent(%u) failure\n",
+			PCIEFD_TX_DMA_SIZE);
+		goto err_free_rtdev;
+	}
+
+	/* CAN clock in RST mode */
+	pciefd_can_writereg(priv, CANFD_MISC_TS_RST, PCIEFD_REG_CAN_MISC);
+
+	/* read current clock value */
+	clk = pciefd_can_readreg(priv, PCIEFD_REG_CAN_CLK_SEL);
+	switch (clk) {
+	case CANFD_CLK_SEL_20MHZ:
+		priv->ucan.rdev->can_sys_clock = 20 * 1000 * 1000;
+		break;
+	case CANFD_CLK_SEL_24MHZ:
+		priv->ucan.rdev->can_sys_clock = 24 * 1000 * 1000;
+		break;
+	case CANFD_CLK_SEL_30MHZ:
+		priv->ucan.rdev->can_sys_clock = 30 * 1000 * 1000;
+		break;
+	case CANFD_CLK_SEL_40MHZ:
+		priv->ucan.rdev->can_sys_clock = 40 * 1000 * 1000;
+		break;
+	case CANFD_CLK_SEL_60MHZ:
+		priv->ucan.rdev->can_sys_clock = 60 * 1000 * 1000;
+		break;
+	default:
+		pciefd_can_writereg(priv, CANFD_CLK_SEL_80MHZ,
+				    PCIEFD_REG_CAN_CLK_SEL);
+
+		fallthrough;
+	case CANFD_CLK_SEL_80MHZ:
+		priv->ucan.rdev->can_sys_clock = 80 * 1000 * 1000;
+		break;
+	}
+
+#ifdef PCIEFD_USES_MSI
+	priv->irq = (pciefd->irq_flags & RTDM_IRQTYPE_SHARED) ?
+		    pciefd->pci_dev->irq :
+		    pci_irq_vector(pciefd->pci_dev, priv->ucan.index);
+#else
+	priv->irq = pciefd->pci_dev->irq;
+#endif
+
+	/* setup irq handler */
+	err = rtdm_irq_request(&rdev->irq_handle,
+			       priv->irq,
+			       pciefd_irq_handler,
+			       pciefd->irq_flags,
+			       DRV_NAME,
+			       priv);
+	if (err) {
+		dev_err(&pciefd->pci_dev->dev,
+			"rtdm_irq_request(IRQ%u) failure err %d\n",
+			priv->irq, err);
+		goto err_free_rtdev;
+	}
+
+	err = rtcan_dev_register(rdev);
+	if (err) {
+		dev_err(&pciefd->pci_dev->dev,
+			"couldn't register RTCAN device: %d\n", err);
+		goto err_free_irq;
+	}
+
+	rtdm_lock_init(&priv->tx_lock);
+
+	/* save the object address in the board structure */
+	pciefd->can[pciefd->can_count] = priv;
+
+	dev_info(&pciefd->pci_dev->dev, "%s at reg_base=0x%p irq=%d\n",
+		 rdev->name, priv->reg_base, priv->irq);
+
+	return 0;
+
+err_free_irq:
+	rtdm_irq_free(&rdev->irq_handle);
+
+err_free_rtdev:
+	rtcan_dev_free(rdev);
+
+failure:
+	return -ENOMEM;
+}
+
+/* wakeup all RT tasks that are blocked on read */
+static void pciefd_can_unlock_recv_tasks(struct rtcan_device *rdev)
+{
+	struct rtcan_recv *recv_listener = rdev->recv_list;
+
+	while (recv_listener) {
+		struct rtcan_socket *sock = recv_listener->sock;
+
+		/* wakeup any rx task */
+		rtdm_sem_destroy(&sock->recv_sem);
+
+		recv_listener = recv_listener->next;
+	}
+}
+
+/* remove a CAN-FD channel by releasing all of its resources */
+static void pciefd_can_remove(struct pciefd_can *priv)
+{
+	struct rtcan_device *rdev = priv->ucan.rdev;
+
+	/* unlock any tasks that wait for read on a socket bound to this CAN */
+	pciefd_can_unlock_recv_tasks(rdev);
+
+	/* in case the driver is removed when the interface is UP
+	 * (device MUST be closed before being unregistered)
+	 */
+	rdev->do_set_mode(rdev, CAN_MODE_STOP, NULL);
+
+	rtcan_dev_unregister(rdev);
+	rtdm_irq_disable(&rdev->irq_handle);
+	rtdm_irq_free(&rdev->irq_handle);
+	rtcan_dev_free(rdev);
+}
+
+/* remove all CAN-FD channels by releasing their own resources */
+static void pciefd_can_remove_all(struct pciefd_board *pciefd)
+{
+	while (pciefd->can_count > 0)
+		pciefd_can_remove(pciefd->can[--pciefd->can_count]);
+}
+
+/* probe for the entire device */
+static int peak_pciefd_probe(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	struct pciefd_board *pciefd;
+	int err, can_count;
+	u16 sub_sys_id;
+	u8 hw_ver_major;
+	u8 hw_ver_minor;
+	u8 hw_ver_sub;
+	u32 v2;
+
+	err = pci_enable_device(pdev);
+	if (err)
+		return err;
+
+	err = pci_request_regions(pdev, DRV_NAME);
+	if (err)
+		goto err_disable_pci;
+
+	/* the number of channels depends on sub-system id */
+	err = pci_read_config_word(pdev, PCI_SUBSYSTEM_ID, &sub_sys_id);
+	if (err)
+		goto err_release_regions;
+
+	dev_dbg(&pdev->dev, "probing device %04x:%04x:%04x\n",
+		pdev->vendor, pdev->device, sub_sys_id);
+
+	if (sub_sys_id >= 0x0012)
+		can_count = 4;
+	else if (sub_sys_id >= 0x0010)
+		can_count = 3;
+	else if (sub_sys_id >= 0x0004)
+		can_count = 2;
+	else
+		can_count = 1;
+
+	/* allocate board structure object */
+	pciefd = devm_kzalloc(&pdev->dev, struct_size(pciefd, can, can_count),
+			      GFP_KERNEL);
+	if (!pciefd) {
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
+	/* initialize the board structure */
+	pciefd->pci_dev = pdev;
+	rtdm_lock_init(&pciefd->cmd_lock);
+
+	/* save the PCI BAR0 virtual address for further system regs access */
+	pciefd->reg_base = pci_iomap(pdev, 0, PCIEFD_BAR0_SIZE);
+	if (!pciefd->reg_base) {
+		dev_err(&pdev->dev, "failed to map PCI resource #0\n");
+		err = -ENOMEM;
+		goto err_release_regions;
+	}
+
+	/* read the firmware version number */
+	v2 = pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER2);
+
+	hw_ver_major = (v2 & 0x0000f000) >> 12;
+	hw_ver_minor = (v2 & 0x00000f00) >> 8;
+	hw_ver_sub = (v2 & 0x000000f0) >> 4;
+
+	dev_info(&pdev->dev,
+		 "%ux CAN-FD PCAN-PCIe FPGA v%u.%u.%u:\n", can_count,
+		 hw_ver_major, hw_ver_minor, hw_ver_sub);
+
+#ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT
+	/* DMA logic doesn't handle mix of 32-bit and 64-bit logical addresses
+	 * in fw <= 3.2.x
+	 */
+	if (PCIEFD_FW_VERSION(hw_ver_major, hw_ver_minor, hw_ver_sub) <
+		PCIEFD_FW_VERSION(3, 3, 0)) {
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err)
+			dev_warn(&pdev->dev,
+				"warning: can't set DMA mask %llxh (err %d)\n",
+				DMA_BIT_MASK(32), err);
+	}
+#endif
+
+	/* default interrupt mode is: shared INTx */
+	pciefd->irq_flags = RTDM_IRQTYPE_SHARED;
+
+#ifdef PCIEFD_USES_MSI
+	if (usemsi) {
+		err = pci_msi_vec_count(pdev);
+		if (err > 0) {
+			int msi_maxvec = err;
+
+			err = pci_alloc_irq_vectors_affinity(pdev, can_count,
+							     msi_maxvec,
+							     PCI_IRQ_MSI,
+							     NULL);
+			dev_info(&pdev->dev,
+				 "MSI[%u..%u] enabling status: %d\n",
+				 can_count, msi_maxvec, err);
+
+			/* if didn't get the requested count of MSI, fall back
+			 * to INTx
+			 */
+			if (err >= can_count)
+				pciefd->irq_flags &= ~RTDM_IRQTYPE_SHARED;
+			else if (err >= 0)
+				pci_free_irq_vectors(pdev);
+		}
+	}
+#endif
+
+	/* stop system clock */
+	pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
+			    PCIEFD_REG_SYS_CTL_CLR);
+
+	pci_set_master(pdev);
+
+	/* create now the corresponding channels objects */
+	while (pciefd->can_count < can_count) {
+		err = pciefd_can_probe(pciefd);
+		if (err)
+			goto err_free_canfd;
+
+		pciefd->can_count++;
+	}
+
+	/* set system timestamps counter in RST mode */
+	pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST,
+			    PCIEFD_REG_SYS_CTL_SET);
+
+	/* wait a bit (read cycle) */
+	(void)pciefd_sys_readreg(pciefd, PCIEFD_REG_SYS_VER1);
+
+	/* free all clocks */
+	pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_TS_RST,
+			    PCIEFD_REG_SYS_CTL_CLR);
+
+	/* start system clock */
+	pciefd_sys_writereg(pciefd, PCIEFD_SYS_CTL_CLK_EN,
+			    PCIEFD_REG_SYS_CTL_SET);
+
+	/* remember the board structure address in the device user data */
+	pci_set_drvdata(pdev, pciefd);
+
+	return 0;
+
+err_free_canfd:
+	pciefd_can_remove_all(pciefd);
+
+#ifdef PCIEFD_USES_MSI
+	pci_free_irq_vectors(pdev);
+#endif
+	pci_iounmap(pdev, pciefd->reg_base);
+
+err_release_regions:
+	pci_release_regions(pdev);
+
+err_disable_pci:
+	pci_disable_device(pdev);
+
+	/* pci_xxx_config_word() return positive PCIBIOS_xxx error codes while
+	 * the probe() function must return a negative errno in case of failure
+	 * (err is unchanged if negative)
+	 */
+	return pcibios_err_to_errno(err);
+}
+
+/* free the board structure object, as well as its resources: */
+static void peak_pciefd_remove(struct pci_dev *pdev)
+{
+	struct pciefd_board *pciefd = pci_get_drvdata(pdev);
+
+	/* release CAN-FD channels resources */
+	pciefd_can_remove_all(pciefd);
+
+#ifdef PCIEFD_USES_MSI
+	pci_free_irq_vectors(pdev);
+#endif
+	pci_iounmap(pdev, pciefd->reg_base);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+}
+
+static struct pci_driver rtcan_peak_pciefd_driver = {
+	.name = DRV_NAME,
+	.id_table = peak_pciefd_tbl,
+	.probe = peak_pciefd_probe,
+	.remove = peak_pciefd_remove,
+};
+
+static int __init rtcan_peak_pciefd_init(void)
+{
+	if (!realtime_core_enabled())
+		return 0;
+
+	return pci_register_driver(&rtcan_peak_pciefd_driver);
+}
+
+static void __exit rtcan_peak_pciefd_exit(void)
+{
+	if (realtime_core_enabled())
+		pci_unregister_driver(&rtcan_peak_pciefd_driver);
+}
+
+module_init(rtcan_peak_pciefd_init);
+module_exit(rtcan_peak_pciefd_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c
new file mode 100644
index 0000000..da64be7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.c
@@ -0,0 +1,305 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from RTnet project file stack/rtdev.c:
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/spinlock.h>
+#include <linux/if.h>
+#include <linux/if_arp.h>
+#include <linux/netdevice.h>
+#include <linux/module.h>
+
+#include "rtcan_internal.h"
+#include "rtcan_dev.h"
+
+
+static struct rtcan_device *rtcan_devices[RTCAN_MAX_DEVICES];
+static DEFINE_RTDM_LOCK(rtcan_devices_rt_lock);
+
+DEFINE_SEMAPHORE(rtcan_devices_nrt_lock);
+
+/* Spinlock for all reception lists and also for some members in
+ * struct rtcan_socket */
+DEFINE_RTDM_LOCK(rtcan_socket_lock);
+
+/* Spinlock for all reception lists and also for some members in
+ * struct rtcan_socket */
+DEFINE_RTDM_LOCK(rtcan_recv_list_lock);
+
+static inline struct rtcan_device *__rtcan_dev_get_by_name(const char *name)
+{
+    int i;
+    struct rtcan_device *dev;
+
+
+    for (i = 0; i < RTCAN_MAX_DEVICES; i++) {
+	dev = rtcan_devices[i];
+	if ((dev != NULL) && (strncmp(dev->name, name, IFNAMSIZ) == 0))
+	    return dev;
+    }
+    return NULL;
+}
+
+
+struct rtcan_device *rtcan_dev_get_by_name(const char *name)
+{
+    struct rtcan_device *dev;
+#ifdef RTCAN_USE_REFCOUNT
+    rtdm_lockctx_t context;
+#endif
+
+
+#ifdef RTCAN_USE_REFCOUNT
+    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+#endif
+
+    dev = __rtcan_dev_get_by_name(name);
+
+#ifdef RTCAN_USE_REFCOUNT
+    if (dev != NULL)
+	atomic_inc(&dev->refcount);
+    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+#endif
+
+    return dev;
+}
+
+
+static inline struct rtcan_device *__rtcan_dev_get_by_index(int ifindex)
+{
+    return rtcan_devices[ifindex - 1];
+}
+
+
+struct rtcan_device *rtcan_dev_get_by_index(int ifindex)
+{
+    struct rtcan_device *dev;
+#ifdef RTCAN_USE_REFCOUNT
+    rtdm_lockctx_t context;
+#endif
+
+
+    if ((ifindex <= 0) || (ifindex > RTCAN_MAX_DEVICES))
+	return NULL;
+
+#ifdef RTCAN_USE_REFCOUNT
+    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+#endif
+
+    dev = __rtcan_dev_get_by_index(ifindex);
+
+#ifdef RTCAN_USE_REFCOUNT
+    if (dev != NULL)
+	atomic_inc(&dev->refcount);
+    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+#endif
+
+    return dev;
+}
+
+
+void rtcan_dev_alloc_name(struct rtcan_device *dev, const char *mask)
+{
+    char buf[IFNAMSIZ];
+    struct rtcan_device *tmp;
+    int i;
+
+
+    for (i = 0; i < RTCAN_MAX_DEVICES; i++) {
+	ksformat(buf, IFNAMSIZ, mask, i);
+	if ((tmp = rtcan_dev_get_by_name(buf)) == NULL) {
+	    strncpy(dev->name, buf, IFNAMSIZ);
+	    break;
+	}
+#ifdef RTCAN_USE_REFCOUNT
+	else
+	    rtcan_dev_dereference(tmp);
+#endif
+    }
+}
+
+
+struct rtcan_device *rtcan_dev_alloc(int sizeof_priv, int sizeof_board_priv)
+{
+    struct rtcan_device *dev;
+    struct rtcan_recv *recv_list_elem;
+    int alloc_size;
+    int j;
+
+
+    alloc_size = sizeof(*dev) + sizeof_priv + sizeof_board_priv;
+
+    dev = (struct rtcan_device *)kmalloc(alloc_size, GFP_KERNEL);
+    if (dev == NULL) {
+	printk(KERN_ERR "rtcan: cannot allocate rtcan device\n");
+	return NULL;
+    }
+
+    memset(dev, 0, alloc_size);
+
+    sema_init(&dev->nrt_lock, 1);
+
+    rtdm_lock_init(&dev->device_lock);
+
+    /* Init TX Semaphore, will be destroyed forthwith
+     * when setting stop mode */
+    rtdm_sem_init(&dev->tx_sem, 0);
+#ifdef RTCAN_USE_REFCOUNT
+    atomic_set(&dev->refcount, 0);
+#endif
+
+    /* Initialize receive list */
+    dev->empty_list = recv_list_elem = dev->receivers;
+    for (j = 0; j < RTCAN_MAX_RECEIVERS - 1; j++, recv_list_elem++)
+	recv_list_elem->next = recv_list_elem + 1;
+    recv_list_elem->next = NULL;
+    dev->free_entries = RTCAN_MAX_RECEIVERS;
+
+    if (sizeof_priv)
+	dev->priv = (void *)((unsigned long)dev + sizeof(*dev));
+    if (sizeof_board_priv)
+	dev->board_priv = (void *)((unsigned long)dev + sizeof(*dev) + sizeof_priv);
+
+    return dev;
+}
+
+void rtcan_dev_free (struct rtcan_device *dev)
+{
+    if (dev != NULL) {
+	rtdm_sem_destroy(&dev->tx_sem);
+	kfree(dev);
+    }
+}
+
+
+static inline int __rtcan_dev_new_index(void)
+{
+    int i;
+
+
+    for (i = 0; i < RTCAN_MAX_DEVICES; i++)
+	if (rtcan_devices[i] == NULL)
+	     return i+1;
+
+    return -ENOMEM;
+}
+
+
+int rtcan_dev_register(struct rtcan_device *dev)
+{
+    rtdm_lockctx_t context;
+    int ret;
+
+    down(&rtcan_devices_nrt_lock);
+
+    if ((ret = __rtcan_dev_new_index()) < 0) {
+	up(&rtcan_devices_nrt_lock);
+	return ret;
+    }
+    dev->ifindex = ret;
+
+    if (strchr(dev->name,'%') != NULL)
+	rtcan_dev_alloc_name(dev, dev->name);
+
+    if (__rtcan_dev_get_by_name(dev->name) != NULL) {
+	up(&rtcan_devices_nrt_lock);
+	return -EEXIST;
+    }
+
+    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+
+    rtcan_devices[dev->ifindex - 1] = dev;
+
+    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+    rtcan_dev_create_proc(dev);
+
+    up(&rtcan_devices_nrt_lock);
+
+    printk("rtcan: registered %s\n", dev->name);
+
+    return 0;
+}
+
+
+int rtcan_dev_unregister(struct rtcan_device *dev)
+{
+    rtdm_lockctx_t context;
+
+
+    RTCAN_ASSERT(dev->ifindex != 0,
+		 printk("RTCAN: device %s/%p was not registered\n",
+			dev->name, dev); return -ENODEV;);
+
+    /* If device is running, close it first. */
+    if (CAN_STATE_OPERATING(dev->state))
+	return -EBUSY;
+
+    down(&rtcan_devices_nrt_lock);
+
+    rtcan_dev_remove_proc(dev);
+
+    rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+
+#ifdef RTCAN_USE_REFCOUNT
+    while (atomic_read(&dev->refcount) > 0) {
+	rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+	up(&rtcan_devices_nrt_lock);
+
+	RTCAN_DBG("RTCAN: unregistering %s deferred (refcount = %d)\n",
+		  dev->name, atomic_read(&dev->refcount));
+	set_current_state(TASK_UNINTERRUPTIBLE);
+	schedule_timeout(1*HZ); /* wait a second */
+
+	down(&rtcan_devices_nrt_lock);
+	rtdm_lock_get_irqsave(&rtcan_devices_rt_lock, context);
+    }
+#endif
+    rtcan_devices[dev->ifindex - 1] = NULL;
+
+    rtdm_lock_put_irqrestore(&rtcan_devices_rt_lock, context);
+    up(&rtcan_devices_nrt_lock);
+
+#ifdef RTCAN_USE_REFCOUNT
+    RTCAN_ASSERT(atomic_read(&dev->refcount) == 0,
+		 printk("RTCAN: dev reference counter < 0!\n"););
+#endif
+
+    printk("RTCAN: unregistered %s\n", dev->name);
+
+    return 0;
+}
+
+
+EXPORT_SYMBOL_GPL(rtcan_socket_lock);
+EXPORT_SYMBOL_GPL(rtcan_recv_list_lock);
+
+EXPORT_SYMBOL_GPL(rtcan_dev_free);
+
+EXPORT_SYMBOL_GPL(rtcan_dev_alloc);
+EXPORT_SYMBOL_GPL(rtcan_dev_alloc_name);
+
+EXPORT_SYMBOL_GPL(rtcan_dev_register);
+EXPORT_SYMBOL_GPL(rtcan_dev_unregister);
+
+EXPORT_SYMBOL_GPL(rtcan_dev_get_by_name);
+EXPORT_SYMBOL_GPL(rtcan_dev_get_by_index);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h
new file mode 100644
index 0000000..3642e92
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_dev.h
@@ -0,0 +1,205 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from RTnet project file stack/include/rtdev.h:
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __RTCAN_DEV_H_
+#define __RTCAN_DEV_H_
+
+
+#ifdef __KERNEL__
+
+#include <asm/atomic.h>
+#include <linux/netdevice.h>
+#include <linux/semaphore.h>
+
+#include "rtcan_list.h"
+
+
+/* Number of MSCAN devices the driver can handle */
+#define RTCAN_MAX_DEVICES    CONFIG_XENO_DRIVERS_CAN_MAX_DEVICES
+
+/* Maximum number of single filters per controller which can be registered
+ * for reception at the same time using Bind */
+#define RTCAN_MAX_RECEIVERS  CONFIG_XENO_DRIVERS_CAN_MAX_RECEIVERS
+
+/* Suppress handling of refcount if module support is not enabled
+ * or modules cannot be unloaded */
+
+#if defined(CONFIG_MODULES) && defined(CONFIG_MODULE_UNLOAD)
+#define RTCAN_USE_REFCOUNT
+#endif
+
+/*
+ * CAN harware-dependent bit-timing constant
+ *
+ * Used for calculating and checking bit-timing parameters
+ */
+struct can_bittiming_const {
+	char name[16];		/* Name of the CAN controller hardware */
+	__u32 tseg1_min;	/* Time segement 1 = prop_seg + phase_seg1 */
+	__u32 tseg1_max;
+	__u32 tseg2_min;	/* Time segement 2 = phase_seg2 */
+	__u32 tseg2_max;
+	__u32 sjw_max;		/* Synchronisation jump width */
+	__u32 brp_min;		/* Bit-rate prescaler */
+	__u32 brp_max;
+	__u32 brp_inc;
+};
+
+struct rtcan_device {
+    unsigned int        version;
+
+    char                name[IFNAMSIZ];
+
+    char                *ctrl_name; /* Name of CAN controller */
+    char                *board_name;/* Name of CAN board */
+
+    unsigned long       base_addr;  /* device I/O address   */
+    rtdm_irq_t          irq_handle; /* RTDM IRQ handle */
+
+    int                 ifindex;
+#ifdef RTCAN_USE_REFCOUNT
+    atomic_t            refcount;
+#endif
+
+    void                *priv;      /* pointer to chip private data */
+
+    void                *board_priv;/* pointer to board private data*/
+
+    struct semaphore    nrt_lock;   /* non-real-time locking        */
+
+    /* Spinlock for all devices (but not for all attributes) and also for HW
+     * access to all CAN controllers
+     */
+    rtdm_lock_t         device_lock;
+
+    /* Acts as a mutex allowing only one sender to write to the MSCAN
+     * simultaneously. Created when the controller goes into operating mode,
+     * destroyed if it goes into reset mode. */
+    rtdm_sem_t          tx_sem;
+
+    /* Baudrate of this device. Protected by device_lock in all device
+     * structures. */
+    unsigned int        can_sys_clock;
+
+
+    /* Baudrate of this device. Protected by device_lock in all device
+     * structures. */
+    can_baudrate_t      baudrate;
+
+    struct can_bittime  bit_time;
+    const struct can_bittiming_const *bittiming_const;
+
+    /* State which the controller is in. Protected by device_lock in all
+     * device structures. */
+    can_state_t state;
+
+    /* State which the controller was before sleeping. Protected by
+     * device_lock in all device structures. */
+    can_state_t          state_before_sleep;
+
+    /* Controller specific settings. Protected by device_lock in all
+     * device structures. */
+    can_ctrlmode_t       ctrl_mode;
+
+    /* Device operations */
+    int                 (*hard_start_xmit)(struct rtcan_device *dev,
+					   struct can_frame *frame);
+    int                 (*do_set_mode)(struct rtcan_device *dev,
+				       can_mode_t mode,
+				       rtdm_lockctx_t *lock_ctx);
+    can_state_t         (*do_get_state)(struct rtcan_device *dev);
+    int                 (*do_set_bit_time)(struct rtcan_device *dev,
+					   struct can_bittime *bit_time,
+					   rtdm_lockctx_t *lock_ctx);
+#ifdef CONFIG_XENO_DRIVERS_CAN_BUS_ERR
+    void                (*do_enable_bus_err)(struct rtcan_device *dev);
+#endif
+
+    /* Reception list head. This list contains all filters which have been
+     * registered via a bind call. */
+    struct rtcan_recv               *recv_list;
+
+    /* Empty list head. This list contains all empty entries not needed
+     * by the reception list and therefore is disjunctive with it. */
+    struct rtcan_recv               *empty_list;
+
+    /* Preallocated array for the list entries. To increase cache
+     * locality all list elements are kept in this array. */
+    struct rtcan_recv               receivers[RTCAN_MAX_RECEIVERS];
+
+    /* Indicates the length of the empty list */
+    int                             free_entries;
+
+    /* A few statistics counters */
+    unsigned int tx_count;
+    unsigned int rx_count;
+    unsigned int err_count;
+
+#ifdef CONFIG_PROC_FS
+    struct proc_dir_entry *proc_root;
+#endif
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+    struct rtcan_skb tx_skb;
+    struct rtcan_socket *tx_socket;
+#endif /* CONFIG_XENO_DRIVERS_CAN_LOOPBACK */
+};
+
+
+extern struct semaphore rtcan_devices_nrt_lock;
+
+
+void rtcan_dev_free(struct rtcan_device *dev);
+
+int rtcan_dev_register(struct rtcan_device *dev);
+int rtcan_dev_unregister(struct rtcan_device *dev);
+
+struct rtcan_device *rtcan_dev_alloc(int sizeof_priv, int sizeof_board_priv);
+void rtcan_dev_alloc_name (struct rtcan_device *dev, const char *name_mask);
+
+struct rtcan_device *rtcan_dev_get_by_name(const char *if_name);
+struct rtcan_device *rtcan_dev_get_by_index(int ifindex);
+
+#ifdef RTCAN_USE_REFCOUNT
+#define rtcan_dev_reference(dev)      atomic_inc(&(dev)->refcount)
+#define rtcan_dev_dereference(dev)    atomic_dec(&(dev)->refcount)
+#else
+#define rtcan_dev_reference(dev)      do {} while(0)
+#define rtcan_dev_dereference(dev)    do {} while(0)
+#endif
+
+#ifdef CONFIG_PROC_FS
+int rtcan_dev_create_proc(struct rtcan_device* dev);
+void rtcan_dev_remove_proc(struct rtcan_device* dev);
+#else /* !CONFIG_PROC_FS */
+static inline int rtcan_dev_create_proc(struct rtcan_device* dev)
+{
+	return 0;
+}
+static inline void rtcan_dev_remove_proc(struct rtcan_device* dev) { }
+#endif /* !CONFIG_PROC_FS */
+
+#endif  /* __KERNEL__ */
+
+#endif  /* __RTCAN_DEV_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c
new file mode 100644
index 0000000..3348e8c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_flexcan.c
@@ -0,0 +1,1538 @@
+/*
+ * RTDM-based FLEXCAN CAN controller driver
+ *
+ * Rebased on linux 4.14.58 flexcan driver:
+ * Copyright (c) 2018 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Original port to RTDM:
+ * Copyright (c) 2012 Wolfgang Grandegger <wg@denx.de>
+ *
+ * Copyright (c) 2005-2006 Varma Electronics Oy
+ * Copyright (c) 2009 Sascha Hauer, Pengutronix
+ * Copyright (c) 2010-2017 Pengutronix, Marc Kleine-Budde <kernel@pengutronix.de>
+ * Copyright (c) 2014 David Jander, Protonic Holland
+ *
+ * Based on code originally by Andrey Volkov <avolkov@varma-el.com>
+ *
+ * LICENCE:
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation version 2.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/platform_device.h>
+#include <linux/regulator/consumer.h>
+#include <rtdm/driver.h>
+#include <rtdm/can.h>
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_internal.h"
+#include <asm/unaligned.h>
+
+#define DRV_NAME	"flexcan"
+#define DEV_NAME	"rtcan%d"
+
+#define CAN_MAX_DLC 8
+#define get_can_dlc(i)		(min_t(__u8, (i), CAN_MAX_DLC))
+
+/* 8 for RX fifo and 2 error handling */
+#define FLEXCAN_NAPI_WEIGHT		(8 + 2)
+
+/* FLEXCAN module configuration register (CANMCR) bits */
+#define FLEXCAN_MCR_MDIS		BIT(31)
+#define FLEXCAN_MCR_FRZ			BIT(30)
+#define FLEXCAN_MCR_FEN			BIT(29)
+#define FLEXCAN_MCR_HALT		BIT(28)
+#define FLEXCAN_MCR_NOT_RDY		BIT(27)
+#define FLEXCAN_MCR_WAK_MSK		BIT(26)
+#define FLEXCAN_MCR_SOFTRST		BIT(25)
+#define FLEXCAN_MCR_FRZ_ACK		BIT(24)
+#define FLEXCAN_MCR_SUPV		BIT(23)
+#define FLEXCAN_MCR_SLF_WAK		BIT(22)
+#define FLEXCAN_MCR_WRN_EN		BIT(21)
+#define FLEXCAN_MCR_LPM_ACK		BIT(20)
+#define FLEXCAN_MCR_WAK_SRC		BIT(19)
+#define FLEXCAN_MCR_DOZE		BIT(18)
+#define FLEXCAN_MCR_SRX_DIS		BIT(17)
+#define FLEXCAN_MCR_IRMQ		BIT(16)
+#define FLEXCAN_MCR_LPRIO_EN		BIT(13)
+#define FLEXCAN_MCR_AEN			BIT(12)
+/* MCR_MAXMB: maximum used MBs is MAXMB + 1 */
+#define FLEXCAN_MCR_MAXMB(x)		((x) & 0x7f)
+#define FLEXCAN_MCR_IDAM_A		(0x0 << 8)
+#define FLEXCAN_MCR_IDAM_B		(0x1 << 8)
+#define FLEXCAN_MCR_IDAM_C		(0x2 << 8)
+#define FLEXCAN_MCR_IDAM_D		(0x3 << 8)
+
+/* FLEXCAN control register (CANCTRL) bits */
+#define FLEXCAN_CTRL_PRESDIV(x)		(((x) & 0xff) << 24)
+#define FLEXCAN_CTRL_RJW(x)		(((x) & 0x03) << 22)
+#define FLEXCAN_CTRL_PSEG1(x)		(((x) & 0x07) << 19)
+#define FLEXCAN_CTRL_PSEG2(x)		(((x) & 0x07) << 16)
+#define FLEXCAN_CTRL_BOFF_MSK		BIT(15)
+#define FLEXCAN_CTRL_ERR_MSK		BIT(14)
+#define FLEXCAN_CTRL_CLK_SRC		BIT(13)
+#define FLEXCAN_CTRL_LPB		BIT(12)
+#define FLEXCAN_CTRL_TWRN_MSK		BIT(11)
+#define FLEXCAN_CTRL_RWRN_MSK		BIT(10)
+#define FLEXCAN_CTRL_SMP		BIT(7)
+#define FLEXCAN_CTRL_BOFF_REC		BIT(6)
+#define FLEXCAN_CTRL_TSYN		BIT(5)
+#define FLEXCAN_CTRL_LBUF		BIT(4)
+#define FLEXCAN_CTRL_LOM		BIT(3)
+#define FLEXCAN_CTRL_PROPSEG(x)		((x) & 0x07)
+#define FLEXCAN_CTRL_ERR_BUS		(FLEXCAN_CTRL_ERR_MSK)
+#define FLEXCAN_CTRL_ERR_STATE \
+	(FLEXCAN_CTRL_TWRN_MSK | FLEXCAN_CTRL_RWRN_MSK | \
+	 FLEXCAN_CTRL_BOFF_MSK)
+#define FLEXCAN_CTRL_ERR_ALL \
+	(FLEXCAN_CTRL_ERR_BUS | FLEXCAN_CTRL_ERR_STATE)
+
+/* FLEXCAN control register 2 (CTRL2) bits */
+#define FLEXCAN_CTRL2_ECRWRE		BIT(29)
+#define FLEXCAN_CTRL2_WRMFRZ		BIT(28)
+#define FLEXCAN_CTRL2_RFFN(x)		(((x) & 0x0f) << 24)
+#define FLEXCAN_CTRL2_TASD(x)		(((x) & 0x1f) << 19)
+#define FLEXCAN_CTRL2_MRP		BIT(18)
+#define FLEXCAN_CTRL2_RRS		BIT(17)
+#define FLEXCAN_CTRL2_EACEN		BIT(16)
+
+/* FLEXCAN memory error control register (MECR) bits */
+#define FLEXCAN_MECR_ECRWRDIS		BIT(31)
+#define FLEXCAN_MECR_HANCEI_MSK		BIT(19)
+#define FLEXCAN_MECR_FANCEI_MSK		BIT(18)
+#define FLEXCAN_MECR_CEI_MSK		BIT(16)
+#define FLEXCAN_MECR_HAERRIE		BIT(15)
+#define FLEXCAN_MECR_FAERRIE		BIT(14)
+#define FLEXCAN_MECR_EXTERRIE		BIT(13)
+#define FLEXCAN_MECR_RERRDIS		BIT(9)
+#define FLEXCAN_MECR_ECCDIS		BIT(8)
+#define FLEXCAN_MECR_NCEFAFRZ		BIT(7)
+
+/* FLEXCAN error and status register (ESR) bits */
+#define FLEXCAN_ESR_TWRN_INT		BIT(17)
+#define FLEXCAN_ESR_RWRN_INT		BIT(16)
+#define FLEXCAN_ESR_BIT1_ERR		BIT(15)
+#define FLEXCAN_ESR_BIT0_ERR		BIT(14)
+#define FLEXCAN_ESR_ACK_ERR		BIT(13)
+#define FLEXCAN_ESR_CRC_ERR		BIT(12)
+#define FLEXCAN_ESR_FRM_ERR		BIT(11)
+#define FLEXCAN_ESR_STF_ERR		BIT(10)
+#define FLEXCAN_ESR_TX_WRN		BIT(9)
+#define FLEXCAN_ESR_RX_WRN		BIT(8)
+#define FLEXCAN_ESR_IDLE		BIT(7)
+#define FLEXCAN_ESR_TXRX		BIT(6)
+#define FLEXCAN_EST_FLT_CONF_SHIFT	(4)
+#define FLEXCAN_ESR_FLT_CONF_MASK	(0x3 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_FLT_CONF_ACTIVE	(0x0 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_FLT_CONF_PASSIVE	(0x1 << FLEXCAN_EST_FLT_CONF_SHIFT)
+#define FLEXCAN_ESR_BOFF_INT		BIT(2)
+#define FLEXCAN_ESR_ERR_INT		BIT(1)
+#define FLEXCAN_ESR_WAK_INT		BIT(0)
+#define FLEXCAN_ESR_ERR_BUS \
+	(FLEXCAN_ESR_BIT1_ERR | FLEXCAN_ESR_BIT0_ERR | \
+	 FLEXCAN_ESR_ACK_ERR | FLEXCAN_ESR_CRC_ERR | \
+	 FLEXCAN_ESR_FRM_ERR | FLEXCAN_ESR_STF_ERR)
+#define FLEXCAN_ESR_ERR_STATE \
+	(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | FLEXCAN_ESR_BOFF_INT)
+#define FLEXCAN_ESR_ERR_ALL \
+	(FLEXCAN_ESR_ERR_BUS | FLEXCAN_ESR_ERR_STATE)
+#define FLEXCAN_ESR_ALL_INT \
+	(FLEXCAN_ESR_TWRN_INT | FLEXCAN_ESR_RWRN_INT | \
+	 FLEXCAN_ESR_BOFF_INT | FLEXCAN_ESR_ERR_INT)
+
+/* FLEXCAN interrupt flag register (IFLAG) bits */
+/* Errata ERR005829 step7: Reserve first valid MB */
+#define FLEXCAN_TX_MB_RESERVED_OFF_FIFO	8
+#define FLEXCAN_TX_MB_OFF_FIFO		9
+#define FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP	0
+#define FLEXCAN_TX_MB_OFF_TIMESTAMP		1
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST	(FLEXCAN_TX_MB_OFF_TIMESTAMP + 1)
+#define FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST	63
+#define FLEXCAN_RX_MB_TIMESTAMP_COUNT	(FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST -	\
+					 FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST + 1)
+#define FLEXCAN_IFLAG_MB(x)		BIT(x)
+#define FLEXCAN_IFLAG_RX_FIFO_OVERFLOW	BIT(7)
+#define FLEXCAN_IFLAG_RX_FIFO_WARN	BIT(6)
+#define FLEXCAN_IFLAG_RX_FIFO_AVAILABLE	BIT(5)
+
+/* FLEXCAN message buffers */
+#define FLEXCAN_MB_CODE_MASK		(0xf << 24)
+#define FLEXCAN_MB_CODE_RX_BUSY_BIT	(0x1 << 24)
+#define FLEXCAN_MB_CODE_RX_INACTIVE	(0x0 << 24)
+#define FLEXCAN_MB_CODE_RX_EMPTY	(0x4 << 24)
+#define FLEXCAN_MB_CODE_RX_FULL		(0x2 << 24)
+#define FLEXCAN_MB_CODE_RX_OVERRUN	(0x6 << 24)
+#define FLEXCAN_MB_CODE_RX_RANSWER	(0xa << 24)
+
+#define FLEXCAN_MB_CODE_TX_INACTIVE	(0x8 << 24)
+#define FLEXCAN_MB_CODE_TX_ABORT	(0x9 << 24)
+#define FLEXCAN_MB_CODE_TX_DATA		(0xc << 24)
+#define FLEXCAN_MB_CODE_TX_TANSWER	(0xe << 24)
+
+#define FLEXCAN_MB_CNT_SRR		BIT(22)
+#define FLEXCAN_MB_CNT_IDE		BIT(21)
+#define FLEXCAN_MB_CNT_RTR		BIT(20)
+#define FLEXCAN_MB_CNT_LENGTH(x)	(((x) & 0xf) << 16)
+#define FLEXCAN_MB_CNT_TIMESTAMP(x)	((x) & 0xffff)
+
+#define FLEXCAN_TIMEOUT_US		(50)
+
+/* FLEXCAN hardware feature flags
+ *
+ * Below is some version info we got:
+ *    SOC   Version   IP-Version  Glitch- [TR]WRN_INT IRQ Err Memory err RTR re-
+ *                                Filter? connected?  Passive detection  ception in MB
+ *   MX25  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX28  FlexCAN2  03.00.04.00    yes       yes        no       no        no
+ *   MX35  FlexCAN2  03.00.00.00     no        no         ?       no        no
+ *   MX53  FlexCAN2  03.00.00.00    yes        no        no       no        no
+ *   MX6s  FlexCAN3  10.00.12.00    yes       yes        no       no       yes
+ *   VF610 FlexCAN3  ?               no       yes        no      yes       yes?
+ *
+ * Some SOCs do not have the RX_WARN & TX_WARN interrupt line connected.
+ */
+#define FLEXCAN_QUIRK_BROKEN_WERR_STATE	BIT(1) /* [TR]WRN_INT not connected */
+#define FLEXCAN_QUIRK_DISABLE_RXFG	BIT(2) /* Disable RX FIFO Global mask */
+#define FLEXCAN_QUIRK_ENABLE_EACEN_RRS	BIT(3) /* Enable EACEN and RRS bit in ctrl2 */
+#define FLEXCAN_QUIRK_DISABLE_MECR	BIT(4) /* Disable Memory error detection */
+#define FLEXCAN_QUIRK_USE_OFF_TIMESTAMP	BIT(5) /* Use timestamp based offloading */
+#define FLEXCAN_QUIRK_BROKEN_PERR_STATE	BIT(6) /* No interrupt for error passive */
+
+/* Structure of the message buffer */
+struct flexcan_mb {
+	u32 can_ctrl;
+	u32 can_id;
+	u32 data[2];
+};
+
+/* Structure of the hardware registers */
+struct flexcan_regs {
+	u32 mcr;		/* 0x00 */
+	u32 ctrl;		/* 0x04 */
+	u32 timer;		/* 0x08 */
+	u32 _reserved1;		/* 0x0c */
+	u32 rxgmask;		/* 0x10 */
+	u32 rx14mask;		/* 0x14 */
+	u32 rx15mask;		/* 0x18 */
+	u32 ecr;		/* 0x1c */
+	u32 esr;		/* 0x20 */
+	u32 imask2;		/* 0x24 */
+	u32 imask1;		/* 0x28 */
+	u32 iflag2;		/* 0x2c */
+	u32 iflag1;		/* 0x30 */
+	union {			/* 0x34 */
+		u32 gfwr_mx28;	/* MX28, MX53 */
+		u32 ctrl2;	/* MX6, VF610 */
+	};
+	u32 esr2;		/* 0x38 */
+	u32 imeur;		/* 0x3c */
+	u32 lrfr;		/* 0x40 */
+	u32 crcr;		/* 0x44 */
+	u32 rxfgmask;		/* 0x48 */
+	u32 rxfir;		/* 0x4c */
+	u32 _reserved3[12];	/* 0x50 */
+	struct flexcan_mb mb[64];	/* 0x80 */
+	/* FIFO-mode:
+	 *			MB
+	 * 0x080...0x08f	0	RX message buffer
+	 * 0x090...0x0df	1-5	reserverd
+	 * 0x0e0...0x0ff	6-7	8 entry ID table
+	 *				(mx25, mx28, mx35, mx53)
+	 * 0x0e0...0x2df	6-7..37	8..128 entry ID table
+	 *				size conf'ed via ctrl2::RFFN
+	 *				(mx6, vf610)
+	 */
+	u32 _reserved4[256];	/* 0x480 */
+	u32 rximr[64];		/* 0x880 */
+	u32 _reserved5[24];	/* 0x980 */
+	u32 gfwr_mx6;		/* 0x9e0 - MX6 */
+	u32 _reserved6[63];	/* 0x9e4 */
+	u32 mecr;		/* 0xae0 */
+	u32 erriar;		/* 0xae4 */
+	u32 erridpr;		/* 0xae8 */
+	u32 errippr;		/* 0xaec */
+	u32 rerrar;		/* 0xaf0 */
+	u32 rerrdr;		/* 0xaf4 */
+	u32 rerrsynr;		/* 0xaf8 */
+	u32 errsr;		/* 0xafc */
+};
+
+struct flexcan_devtype_data {
+	u32 quirks;		/* quirks needed for different IP cores */
+};
+
+struct flexcan_timestamped_frame {
+	struct rtcan_skb skb;
+	u32 timestamp;
+	struct list_head next;
+};
+
+struct flexcan_priv {
+	unsigned int irq;
+	unsigned int mb_first;
+	unsigned int mb_last;
+	struct can_bittime bittiming;
+	struct flexcan_timestamped_frame *ts_frames;
+
+	struct flexcan_regs __iomem *regs;
+	struct flexcan_mb __iomem *tx_mb;
+	struct flexcan_mb __iomem *tx_mb_reserved;
+	u8 tx_mb_idx;
+	u32 reg_ctrl_default;
+	u32 reg_imask1_default;
+	u32 reg_imask2_default;
+
+	struct clk *clk_ipg;
+	struct clk *clk_per;
+	const struct flexcan_devtype_data *devtype_data;
+	struct regulator *reg_xceiver;
+
+	unsigned long bus_errors;
+};
+
+static const struct flexcan_devtype_data fsl_p1010_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
+
+static const struct flexcan_devtype_data fsl_imx28_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
+
+static const struct flexcan_devtype_data fsl_imx6q_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+	FLEXCAN_QUIRK_USE_OFF_TIMESTAMP | FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
+
+static const struct flexcan_devtype_data fsl_vf610_devtype_data = {
+	.quirks = FLEXCAN_QUIRK_DISABLE_RXFG | FLEXCAN_QUIRK_ENABLE_EACEN_RRS |
+		FLEXCAN_QUIRK_DISABLE_MECR | FLEXCAN_QUIRK_USE_OFF_TIMESTAMP |
+		FLEXCAN_QUIRK_BROKEN_PERR_STATE,
+};
+
+static const struct can_bittiming_const flexcan_bittiming_const = {
+	.name = DRV_NAME,
+	.tseg1_min = 4,
+	.tseg1_max = 16,
+	.tseg2_min = 2,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 256,
+	.brp_inc = 1,
+};
+
+/* Abstract off the read/write for arm versus ppc. This
+ * assumes that PPC uses big-endian registers and everything
+ * else uses little-endian registers, independent of CPU
+ * endianness.
+ */
+#if defined(CONFIG_PPC)
+static inline u32 flexcan_read(void __iomem *addr)
+{
+	return in_be32(addr);
+}
+
+static inline void flexcan_write(u32 val, void __iomem *addr)
+{
+	out_be32(addr, val);
+}
+#else
+static inline u32 flexcan_read(void __iomem *addr)
+{
+	return readl(addr);
+}
+
+static inline void flexcan_write(u32 val, void __iomem *addr)
+{
+	writel(val, addr);
+}
+#endif
+
+static inline void flexcan_error_irq_enable(const struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg_ctrl = (priv->reg_ctrl_default | FLEXCAN_CTRL_ERR_MSK);
+
+	flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
+static inline void flexcan_error_irq_disable(const struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg_ctrl = (priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_MSK);
+
+	flexcan_write(reg_ctrl, &regs->ctrl);
+}
+
+static inline int flexcan_transceiver_enable(const struct flexcan_priv *priv)
+{
+	if (!priv->reg_xceiver)
+		return 0;
+
+	return regulator_enable(priv->reg_xceiver);
+}
+
+static inline int flexcan_transceiver_disable(const struct flexcan_priv *priv)
+{
+	if (!priv->reg_xceiver)
+		return 0;
+
+	return regulator_disable(priv->reg_xceiver);
+}
+
+static int flexcan_chip_enable(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+	u32 reg;
+
+	reg = flexcan_read(&regs->mcr);
+	reg &= ~FLEXCAN_MCR_MDIS;
+	flexcan_write(reg, &regs->mcr);
+
+	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+		udelay(10);
+
+	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_chip_disable(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+	u32 reg;
+
+	reg = flexcan_read(&regs->mcr);
+	reg |= FLEXCAN_MCR_MDIS;
+	flexcan_write(reg, &regs->mcr);
+
+	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+		udelay(10);
+
+	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_LPM_ACK))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_chip_freeze(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = 1000 * 1000 * 10 / dev->baudrate;
+	u32 reg;
+
+	reg = flexcan_read(&regs->mcr);
+	reg |= FLEXCAN_MCR_HALT;
+	flexcan_write(reg, &regs->mcr);
+
+	while (timeout-- && !(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+		udelay(100);
+
+	if (!(flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_chip_unfreeze(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+	u32 reg;
+
+	reg = flexcan_read(&regs->mcr);
+	reg &= ~FLEXCAN_MCR_HALT;
+	flexcan_write(reg, &regs->mcr);
+
+	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK))
+		udelay(10);
+
+	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_FRZ_ACK)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_chip_softreset(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	unsigned int timeout = FLEXCAN_TIMEOUT_US / 10;
+
+	flexcan_write(FLEXCAN_MCR_SOFTRST, &regs->mcr);
+	while (timeout-- && (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST))
+		udelay(10);
+
+	if (flexcan_read(&regs->mcr) & FLEXCAN_MCR_SOFTRST)
+		return -ETIMEDOUT;
+
+	return 0;
+}
+
+static int flexcan_start_xmit(struct rtcan_device *dev, struct can_frame *cf)
+{
+	const struct flexcan_priv *priv = rtcan_priv(dev);
+	u32 can_id, data, ctrl;
+
+	ctrl = FLEXCAN_MB_CODE_TX_DATA | (cf->can_dlc << 16);
+	if (cf->can_id & CAN_EFF_FLAG) {
+		can_id = cf->can_id & CAN_EFF_MASK;
+		ctrl |= FLEXCAN_MB_CNT_IDE | FLEXCAN_MB_CNT_SRR;
+	} else {
+		can_id = (cf->can_id & CAN_SFF_MASK) << 18;
+	}
+
+	if (cf->can_id & CAN_RTR_FLAG)
+		ctrl |= FLEXCAN_MB_CNT_RTR;
+
+	if (cf->can_dlc > CAN_MAX_DLC)
+		cf->can_dlc = CAN_MAX_DLC;
+
+	if (cf->can_dlc > 0) {
+		data = be32_to_cpup((__be32 *)&cf->data[0]);
+		flexcan_write(data, &priv->tx_mb->data[0]);
+	}
+	if (cf->can_dlc > 4) {
+		data = be32_to_cpup((__be32 *)&cf->data[4]);
+		flexcan_write(data, &priv->tx_mb->data[1]);
+	}
+
+	flexcan_write(can_id, &priv->tx_mb->can_id);
+	flexcan_write(ctrl, &priv->tx_mb->can_ctrl);
+
+	/* Errata ERR005829 step8:
+	 * Write twice INACTIVE(0x8) code to first MB.
+	 */
+	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		      &priv->tx_mb_reserved->can_ctrl);
+	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		      &priv->tx_mb_reserved->can_ctrl);
+
+	return 0;
+}
+
+static void init_err_skb(struct rtcan_skb *skb)
+{
+	struct rtcan_rb_frame *cf = &skb->rb_frame;
+
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + CAN_ERR_DLC;
+	cf->can_id = CAN_ERR_FLAG;
+	cf->can_dlc = CAN_ERR_DLC;
+	memset(&cf->data[0], 0, cf->can_dlc);
+}
+
+static void flexcan_irq_bus_err(struct rtcan_device *dev,
+				u32 reg_esr, struct rtcan_skb *skb)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct rtcan_rb_frame *cf = &skb->rb_frame;
+
+	init_err_skb(skb);
+
+	cf->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+	if (reg_esr & FLEXCAN_ESR_BIT1_ERR) {
+		rtcandev_dbg(dev, "BIT1_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT1;
+	}
+	if (reg_esr & FLEXCAN_ESR_BIT0_ERR) {
+		rtcandev_dbg(dev, "BIT0_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT0;
+	}
+	if (reg_esr & FLEXCAN_ESR_ACK_ERR) {
+		rtcandev_dbg(dev, "ACK_ERR irq\n");
+		cf->can_id |= CAN_ERR_ACK;
+		cf->data[3] = CAN_ERR_PROT_LOC_ACK;
+	}
+	if (reg_esr & FLEXCAN_ESR_CRC_ERR) {
+		rtcandev_dbg(dev, "CRC_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_BIT;
+		cf->data[3] = CAN_ERR_PROT_LOC_CRC_SEQ;
+	}
+	if (reg_esr & FLEXCAN_ESR_FRM_ERR) {
+		rtcandev_dbg(dev, "FRM_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_FORM;
+	}
+	if (reg_esr & FLEXCAN_ESR_STF_ERR) {
+		rtcandev_dbg(dev, "STF_ERR irq\n");
+		cf->data[2] |= CAN_ERR_PROT_STUFF;
+	}
+
+	priv->bus_errors++;
+}
+
+struct berr_counter {
+	u16 txerr;
+	u16 rxerr;
+};
+
+static void flexcan_change_state(struct rtcan_device *dev,
+				 struct rtcan_rb_frame *cf,
+				 struct berr_counter *bec,
+				 can_state_t new_state)
+{
+	switch (dev->state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		/*
+		 * from: ERROR_ACTIVE
+		 * to  : ERROR_WARNING, ERROR_PASSIVE, BUS_OFF
+		 * =>  : there was a warning int
+		 */
+		if (new_state >= CAN_STATE_ERROR_WARNING &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			rtcandev_dbg(dev, "Error Warning IRQ\n");
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (bec->txerr > bec->rxerr) ?
+				CAN_ERR_CRTL_TX_WARNING :
+				CAN_ERR_CRTL_RX_WARNING;
+		}
+		fallthrough;
+	case CAN_STATE_ERROR_WARNING:
+		/*
+		 * from: ERROR_ACTIVE, ERROR_WARNING
+		 * to  : ERROR_PASSIVE, BUS_OFF
+		 * =>  : error passive int
+		 */
+		if (new_state >= CAN_STATE_ERROR_PASSIVE &&
+		    new_state <= CAN_STATE_BUS_OFF) {
+			rtcandev_dbg(dev, "Error Passive IRQ\n");
+
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = (bec->txerr > bec->rxerr) ?
+				CAN_ERR_CRTL_TX_PASSIVE :
+				CAN_ERR_CRTL_RX_PASSIVE;
+		}
+		break;
+	case CAN_STATE_BUS_OFF:
+		rtcandev_err(dev, "BUG! "
+			     "hardware recovered automatically from BUS_OFF\n");
+		break;
+	default:
+		break;
+	}
+
+	/* process state changes depending on the new state */
+	switch (new_state) {
+	case CAN_STATE_ERROR_ACTIVE:
+		rtcandev_dbg(dev, "Error Active\n");
+		cf->can_id |= CAN_ERR_PROT;
+		cf->data[2] = CAN_ERR_PROT_ACTIVE;
+		break;
+	case CAN_STATE_BUS_OFF:
+		cf->can_id |= CAN_ERR_BUSOFF;
+		/* Wake up waiting senders */
+		rtdm_sem_destroy(&dev->tx_sem);
+		break;
+	default:
+		break;
+	}
+
+	dev->state = new_state;
+}
+
+static bool flexcan_irq_state(struct rtcan_device *dev, u32 reg_esr,
+			      struct rtcan_skb *skb)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	enum CAN_STATE new_state, rx_state, tx_state;
+	struct rtcan_rb_frame *cf = &skb->rb_frame;
+	struct berr_counter bec;
+	u32 reg;
+	int flt;
+
+	reg = flexcan_read(&regs->ecr);
+	bec.txerr = (reg >> 0) & 0xff;
+	bec.rxerr = (reg >> 8) & 0xff;
+
+	flt = reg_esr & FLEXCAN_ESR_FLT_CONF_MASK;
+	if (likely(flt == FLEXCAN_ESR_FLT_CONF_ACTIVE)) {
+		tx_state = unlikely(reg_esr & FLEXCAN_ESR_TX_WRN) ?
+			CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+		rx_state = unlikely(reg_esr & FLEXCAN_ESR_RX_WRN) ?
+			CAN_STATE_ERROR_WARNING : CAN_STATE_ERROR_ACTIVE;
+		new_state = max(tx_state, rx_state);
+	} else
+		new_state = flt == FLEXCAN_ESR_FLT_CONF_PASSIVE ?
+			CAN_STATE_ERROR_PASSIVE : CAN_STATE_BUS_OFF;
+
+	/* state hasn't changed */
+	if (likely(new_state == dev->state))
+		return false;
+
+	init_err_skb(skb);
+	
+	flexcan_change_state(dev, cf, &bec, new_state);
+
+	return true;
+}
+
+static unsigned int flexcan_mailbox_read(struct rtcan_device *dev,
+					 struct rtcan_skb *skb,
+					 u32 *timestamp, unsigned int n)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	struct flexcan_mb __iomem *mb = &regs->mb[n];
+	u32 reg_ctrl, reg_id, reg_iflag1, code;
+	struct rtcan_rb_frame *cf = &skb->rb_frame;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		do {
+			reg_ctrl = flexcan_read(&mb->can_ctrl);
+		} while (reg_ctrl & FLEXCAN_MB_CODE_RX_BUSY_BIT);
+
+		/* is this MB empty? */
+		code = reg_ctrl & FLEXCAN_MB_CODE_MASK;
+		if ((code != FLEXCAN_MB_CODE_RX_FULL) &&
+		    (code != FLEXCAN_MB_CODE_RX_OVERRUN))
+			return 0;
+	} else {
+		reg_iflag1 = flexcan_read(&regs->iflag1);
+		if (!(reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE))
+			return 0;
+
+		reg_ctrl = flexcan_read(&mb->can_ctrl);
+	}
+
+	/* increase timstamp to full 32 bit */
+	*timestamp = reg_ctrl << 16;
+
+	cf->can_dlc = get_can_dlc((reg_ctrl >> 16) & 0xf);
+	reg_id = flexcan_read(&mb->can_id);
+	if (reg_ctrl & FLEXCAN_MB_CNT_IDE)
+		cf->can_id = ((reg_id >> 0) & CAN_EFF_MASK) | CAN_EFF_FLAG;
+	else
+		cf->can_id = (reg_id >> 18) & CAN_SFF_MASK;
+
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE;
+
+	if (reg_ctrl & FLEXCAN_MB_CNT_RTR)
+		cf->can_id |= CAN_RTR_FLAG;
+	else
+		skb->rb_frame_size += cf->can_dlc;
+
+	put_unaligned_be32(flexcan_read(&mb->data[0]), cf->data + 0);
+	put_unaligned_be32(flexcan_read(&mb->data[1]), cf->data + 4);
+
+	cf->can_ifindex = dev->ifindex;
+
+	/* mark as read */
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		/* Clear IRQ */
+		if (n < 32)
+			flexcan_write(BIT(n), &regs->iflag1);
+		else
+			flexcan_write(BIT(n - 32), &regs->iflag2);
+	} else {
+		flexcan_write(FLEXCAN_IFLAG_RX_FIFO_AVAILABLE, &regs->iflag1);
+		flexcan_read(&regs->timer);
+	}
+
+	return 1;
+}
+
+static inline bool flexcan_rx_le(struct flexcan_priv *priv, unsigned int a, unsigned int b)
+{
+	if (priv->mb_first < priv->mb_last)
+		return a <= b;
+
+	return a >= b;
+}
+
+static inline unsigned int flexcan_rx_inc(struct flexcan_priv *priv, unsigned int *val)
+{
+	if (priv->mb_first < priv->mb_last)
+		return (*val)++;
+
+	return (*val)--;
+}
+
+static int flexcan_mailbox_read_timestamp(struct rtcan_device *dev, u64 pending)
+{
+	struct flexcan_timestamped_frame *new, *pos, *tmp;
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct list_head q, *head;
+	int i, count = 0;
+
+	INIT_LIST_HEAD(&q);
+
+	for (i = priv->mb_first;
+	     flexcan_rx_le(priv, i, priv->mb_last);
+	     flexcan_rx_inc(priv, &i)) {
+		if (!(pending & BIT_ULL(i)))
+			continue;
+
+		new = priv->ts_frames + (i - priv->mb_first);
+		if (!flexcan_mailbox_read(dev, &new->skb, &new->timestamp, i))
+			break;
+
+		head = &q;
+		if (list_empty(&q))
+			goto add;
+
+		list_for_each_entry_reverse(pos, &q, next) {
+			/*
+			 * Substract two u32 and return result as int,
+			 * to keep difference steady around the u32
+			 * overflow.
+			 */
+			if (((int)(new->timestamp - pos->timestamp)) >= 0) {
+				head = &pos->next;
+				break;
+			}
+		}
+	add:
+		list_add(&new->next, head);
+		count++;
+	}
+
+	if (list_empty(&q))
+		return 0;
+
+	list_for_each_entry_safe(pos, tmp, &q, next)
+		rtcan_rcv(dev, &pos->skb);
+	
+	return count;
+}
+
+static void flexcan_mailbox_read_fifo(struct rtcan_device *dev)
+{
+	struct rtcan_skb skb;
+	u32 timestamp;
+	
+	for (;;) {
+		if (!flexcan_mailbox_read(dev, &skb, &timestamp, 0))
+			break;
+		rtcan_rcv(dev, &skb);
+	}
+}
+
+static inline u64 flexcan_read_reg_iflag_rx(struct flexcan_priv *priv)
+{
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 iflag1, iflag2;
+
+	iflag2 = flexcan_read(&regs->iflag2) & priv->reg_imask2_default;
+	iflag1 = flexcan_read(&regs->iflag1) & priv->reg_imask1_default &
+		~FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+
+	return (u64)iflag2 << 32 | iflag1;
+}
+
+static int flexcan_do_rx(struct rtcan_device *dev, u32 reg_iflag1)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	struct rtcan_skb skb;
+	struct rtcan_rb_frame *cf = &skb.rb_frame;
+	bool input = false;
+	u64 reg;
+	int ret;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		while ((reg = flexcan_read_reg_iflag_rx(priv))) {
+			input = true;
+			ret = flexcan_mailbox_read_timestamp(dev, reg);
+			if (!ret)
+				break;
+		}
+	} else {
+		if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_OVERFLOW) {
+			flexcan_write(FLEXCAN_IFLAG_RX_FIFO_OVERFLOW, &regs->iflag1);
+			init_err_skb(&skb);
+			cf->can_id |= CAN_ERR_CRTL;
+			cf->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+			input = true;
+		} else  if (reg_iflag1 & FLEXCAN_IFLAG_RX_FIFO_AVAILABLE) {
+			flexcan_mailbox_read_fifo(dev);
+			input = true;
+		}
+	}
+
+	return input;
+}
+
+static int flexcan_irq(rtdm_irq_t *irq_handle)
+{
+	struct rtcan_device *dev = rtdm_irq_get_arg(irq_handle, void);
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg_iflag1, reg_esr;
+	struct rtcan_skb skb;
+	int handled;
+
+	rtdm_lock_get(&dev->device_lock);
+	rtdm_lock_get(&rtcan_recv_list_lock);
+	rtdm_lock_get(&rtcan_socket_lock);
+
+	reg_iflag1 = flexcan_read(&regs->iflag1);
+
+	/* reception interrupt */
+	if (flexcan_do_rx(dev, reg_iflag1))
+		handled = RTDM_IRQ_HANDLED;
+
+	/* transmission complete interrupt */
+	if (reg_iflag1 & FLEXCAN_IFLAG_MB(priv->tx_mb_idx)) {
+		/* after sending a RTR frame MB is in RX mode */
+		flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+			      &priv->tx_mb->can_ctrl);
+		flexcan_write(FLEXCAN_IFLAG_MB(priv->tx_mb_idx), &regs->iflag1);
+		rtdm_sem_up(&dev->tx_sem);
+		dev->tx_count++;
+		if (rtcan_loopback_pending(dev))
+			rtcan_loopback(dev);
+		handled = RTDM_IRQ_HANDLED;
+	}
+
+	reg_esr = flexcan_read(&regs->esr);
+
+	/* ACK all bus error and state change IRQ sources */
+	if (reg_esr & FLEXCAN_ESR_ALL_INT) {
+		flexcan_write(reg_esr & FLEXCAN_ESR_ALL_INT, &regs->esr);
+		handled = RTDM_IRQ_HANDLED;
+	}
+
+	/* state change interrupt or broken error state quirk fix is enabled */
+	if (reg_esr & FLEXCAN_ESR_ERR_STATE)
+		handled = RTDM_IRQ_HANDLED;
+	else if (priv->devtype_data->quirks & (FLEXCAN_QUIRK_BROKEN_WERR_STATE |
+					       FLEXCAN_QUIRK_BROKEN_PERR_STATE))
+		goto esr_err;
+	
+	if (reg_esr & FLEXCAN_ESR_ERR_STATE) {
+	esr_err:
+		if (flexcan_irq_state(dev, reg_esr, &skb)) {
+			rtcan_rcv(dev, &skb);
+		}
+	}
+
+	/* bus error IRQ - report unconditionally */
+	if (reg_esr & FLEXCAN_ESR_ERR_BUS) {
+		flexcan_irq_bus_err(dev, reg_esr, &skb);
+		rtcan_rcv(dev, &skb);
+		handled = RTDM_IRQ_HANDLED;
+	}
+
+	rtdm_lock_put(&rtcan_socket_lock);
+	rtdm_lock_put(&rtcan_recv_list_lock);
+	rtdm_lock_put(&dev->device_lock);
+
+	return handled;
+}
+
+static void flexcan_set_bittiming(struct rtcan_device *dev)
+{
+	const struct flexcan_priv *priv = rtcan_priv(dev);
+	const struct can_bittime *bt = &priv->bittiming;
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg;
+
+	reg = flexcan_read(&regs->ctrl);
+	reg &= ~(FLEXCAN_CTRL_PRESDIV(0xff) |
+		 FLEXCAN_CTRL_RJW(0x3) |
+		 FLEXCAN_CTRL_PSEG1(0x7) |
+		 FLEXCAN_CTRL_PSEG2(0x7) |
+		 FLEXCAN_CTRL_PROPSEG(0x7) |
+		 FLEXCAN_CTRL_LPB |
+		 FLEXCAN_CTRL_SMP |
+		 FLEXCAN_CTRL_LOM);
+
+	reg |= FLEXCAN_CTRL_PRESDIV(bt->std.brp - 1) |
+		FLEXCAN_CTRL_PSEG1(bt->std.phase_seg1 - 1) |
+		FLEXCAN_CTRL_PSEG2(bt->std.phase_seg2 - 1) |
+		FLEXCAN_CTRL_RJW(bt->std.sjw - 1) |
+		FLEXCAN_CTRL_PROPSEG(bt->std.prop_seg - 1);
+
+	if (dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)
+		reg |= FLEXCAN_CTRL_LPB;
+	if (dev->ctrl_mode & CAN_CTRLMODE_LISTENONLY)
+		reg |= FLEXCAN_CTRL_LOM;
+	if (dev->ctrl_mode & CAN_CTRLMODE_3_SAMPLES)
+		reg |= FLEXCAN_CTRL_SMP;
+
+	rtcandev_dbg(dev, "writing ctrl=0x%08x\n", reg);
+	flexcan_write(reg, &regs->ctrl);
+
+	/* print chip status */
+	rtcandev_dbg(dev, "%s: mcr=0x%08x ctrl=0x%08x\n", __func__,
+		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+}
+
+/* flexcan_chip_start
+ *
+ * this functions is entered with clocks enabled
+ *
+ */
+static int flexcan_chip_start(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg_mcr, reg_ctrl, reg_ctrl2, reg_mecr;
+	int err, i;
+
+	err = clk_prepare_enable(priv->clk_ipg);
+	if (err)
+		return err;
+
+	err = clk_prepare_enable(priv->clk_per);
+	if (err)
+		goto out_disable_ipg;
+
+	/* enable module */
+	err = flexcan_chip_enable(priv);
+	if (err)
+		goto out_disable_per;
+
+	/* soft reset */
+	err = flexcan_chip_softreset(priv);
+	if (err)
+		goto out_chip_disable;
+
+	flexcan_set_bittiming(dev);
+
+	/* MCR
+	 *
+	 * enable freeze
+	 * enable fifo
+	 * halt now
+	 * only supervisor access
+	 * enable warning int
+	 * disable local echo
+	 * enable individual RX masking
+	 * choose format C
+	 * set max mailbox number
+	 */
+	reg_mcr = flexcan_read(&regs->mcr);
+	reg_mcr &= ~FLEXCAN_MCR_MAXMB(0xff);
+	reg_mcr |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT | FLEXCAN_MCR_SUPV |
+		FLEXCAN_MCR_WRN_EN | FLEXCAN_MCR_SRX_DIS | FLEXCAN_MCR_IRMQ |
+		FLEXCAN_MCR_IDAM_C;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		reg_mcr &= ~FLEXCAN_MCR_FEN;
+		reg_mcr |= FLEXCAN_MCR_MAXMB(priv->mb_last);
+	} else {
+		reg_mcr |= FLEXCAN_MCR_FEN |
+			FLEXCAN_MCR_MAXMB(priv->tx_mb_idx);
+	}
+	rtcandev_dbg(dev, "%s: writing mcr=0x%08x", __func__, reg_mcr);
+	flexcan_write(reg_mcr, &regs->mcr);
+
+	/* CTRL
+	 *
+	 * disable timer sync feature
+	 *
+	 * disable auto busoff recovery
+	 * transmit lowest buffer first
+	 *
+	 * enable tx and rx warning interrupt
+	 * enable bus off interrupt
+	 * (== FLEXCAN_CTRL_ERR_STATE)
+	 */
+	reg_ctrl = flexcan_read(&regs->ctrl);
+	reg_ctrl &= ~FLEXCAN_CTRL_TSYN;
+	reg_ctrl |= FLEXCAN_CTRL_BOFF_REC | FLEXCAN_CTRL_LBUF |
+		FLEXCAN_CTRL_ERR_STATE;
+
+	/* enable the "error interrupt" (FLEXCAN_CTRL_ERR_MSK),
+	 * on most Flexcan cores, too. Otherwise we don't get
+	 * any error warning or passive interrupts.
+	 */
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_BROKEN_WERR_STATE)
+		reg_ctrl |= FLEXCAN_CTRL_ERR_MSK;
+	else
+		reg_ctrl &= ~FLEXCAN_CTRL_ERR_MSK;
+
+	/* save for later use */
+	priv->reg_ctrl_default = reg_ctrl;
+	/* leave interrupts disabled for now */
+	reg_ctrl &= ~FLEXCAN_CTRL_ERR_ALL;
+	rtcandev_dbg(dev, "%s: writing ctrl=0x%08x", __func__, reg_ctrl);
+	flexcan_write(reg_ctrl, &regs->ctrl);
+
+	if ((priv->devtype_data->quirks & FLEXCAN_QUIRK_ENABLE_EACEN_RRS)) {
+		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+		reg_ctrl2 |= FLEXCAN_CTRL2_EACEN | FLEXCAN_CTRL2_RRS;
+		flexcan_write(reg_ctrl2, &regs->ctrl2);
+	}
+
+	/* clear and invalidate all mailboxes first */
+	for (i = priv->tx_mb_idx; i < ARRAY_SIZE(regs->mb); i++) {
+		flexcan_write(FLEXCAN_MB_CODE_RX_INACTIVE,
+			      &regs->mb[i].can_ctrl);
+	}
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		for (i = priv->mb_first; i <= priv->mb_last; i++)
+			flexcan_write(FLEXCAN_MB_CODE_RX_EMPTY,
+				      &regs->mb[i].can_ctrl);
+	}
+
+	/* Errata ERR005829: mark first TX mailbox as INACTIVE */
+	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		      &priv->tx_mb_reserved->can_ctrl);
+
+	/* mark TX mailbox as INACTIVE */
+	flexcan_write(FLEXCAN_MB_CODE_TX_INACTIVE,
+		      &priv->tx_mb->can_ctrl);
+
+	/* acceptance mask/acceptance code (accept everything) */
+	flexcan_write(0x0, &regs->rxgmask);
+	flexcan_write(0x0, &regs->rx14mask);
+	flexcan_write(0x0, &regs->rx15mask);
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_RXFG)
+		flexcan_write(0x0, &regs->rxfgmask);
+
+	/* clear acceptance filters */
+	for (i = 0; i < ARRAY_SIZE(regs->mb); i++)
+		flexcan_write(0, &regs->rximr[i]);
+
+	/* On Vybrid, disable memory error detection interrupts
+	 * and freeze mode.
+	 * This also works around errata e5295 which generates
+	 * false positive memory errors and put the device in
+	 * freeze mode.
+	 */
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_DISABLE_MECR) {
+		/* Follow the protocol as described in "Detection
+		 * and Correction of Memory Errors" to write to
+		 * MECR register
+		 */
+		reg_ctrl2 = flexcan_read(&regs->ctrl2);
+		reg_ctrl2 |= FLEXCAN_CTRL2_ECRWRE;
+		flexcan_write(reg_ctrl2, &regs->ctrl2);
+
+		reg_mecr = flexcan_read(&regs->mecr);
+		reg_mecr &= ~FLEXCAN_MECR_ECRWRDIS;
+		flexcan_write(reg_mecr, &regs->mecr);
+		reg_mecr &= ~(FLEXCAN_MECR_NCEFAFRZ | FLEXCAN_MECR_HANCEI_MSK |
+			      FLEXCAN_MECR_FANCEI_MSK);
+		flexcan_write(reg_mecr, &regs->mecr);
+	}
+
+	err = flexcan_transceiver_enable(priv);
+	if (err)
+		goto out_chip_disable;
+
+	/* synchronize with the can bus */
+	err = flexcan_chip_unfreeze(priv);
+	if (err)
+		goto out_transceiver_disable;
+
+	dev->state = CAN_STATE_ERROR_ACTIVE;
+
+	/* enable interrupts atomically */
+	rtdm_irq_disable(&dev->irq_handle);
+	flexcan_write(priv->reg_ctrl_default, &regs->ctrl);
+	flexcan_write(priv->reg_imask1_default, &regs->imask1);
+	flexcan_write(priv->reg_imask2_default, &regs->imask2);
+	rtdm_irq_enable(&dev->irq_handle);
+
+	/* print chip status */
+	rtcandev_dbg(dev, "%s: reading mcr=0x%08x ctrl=0x%08x\n", __func__,
+		   flexcan_read(&regs->mcr), flexcan_read(&regs->ctrl));
+
+	return 0;
+
+ out_transceiver_disable:
+	flexcan_transceiver_disable(priv);
+ out_chip_disable:
+	flexcan_chip_disable(priv);
+ out_disable_per:
+	clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
+	clk_disable_unprepare(priv->clk_ipg);
+
+	return err;
+}
+
+/* flexcan_chip_stop
+ *
+ * this functions is entered with clocks enabled
+ */
+static void flexcan_chip_stop(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+
+	/* freeze + disable module */
+	flexcan_chip_freeze(dev);
+	flexcan_chip_disable(priv);
+
+	/* Disable all interrupts */
+	flexcan_write(0, &regs->imask2);
+	flexcan_write(0, &regs->imask1);
+	flexcan_write(priv->reg_ctrl_default & ~FLEXCAN_CTRL_ERR_ALL,
+		      &regs->ctrl);
+
+	flexcan_transceiver_disable(priv);
+
+	clk_disable_unprepare(priv->clk_per);
+	clk_disable_unprepare(priv->clk_ipg);
+}
+
+static int flexcan_mode_start(struct rtcan_device *dev,
+			      rtdm_lockctx_t *lock_ctx)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	int err = 0;
+
+	rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+
+	switch (dev->state) {
+
+	case CAN_STATE_ACTIVE:
+	case CAN_STATE_BUS_WARNING:
+	case CAN_STATE_BUS_PASSIVE:
+		break;
+
+	case CAN_STATE_STOPPED:
+		/* Register IRQ handler and pass device structure as arg */
+		err = rtdm_irq_request(&dev->irq_handle, priv->irq,
+				       flexcan_irq, 0, DRV_NAME,
+				       dev);
+		if (err) {
+			rtcandev_err(dev, "couldn't request irq %d\n",
+				     priv->irq);
+			goto out;
+		}
+
+		/* Set up sender "mutex" */
+		rtdm_sem_init(&dev->tx_sem, 1);
+
+		/* start chip and queuing */
+		err = flexcan_chip_start(dev);
+		if (err) {
+			rtdm_irq_free(&dev->irq_handle);
+			rtdm_sem_destroy(&dev->tx_sem);
+			goto out;
+		}
+		break;
+
+	case CAN_STATE_BUS_OFF:
+		/* Set up sender "mutex" */
+		rtdm_sem_init(&dev->tx_sem, 1);
+		/* start chip and queuing */
+		err = flexcan_chip_start(dev);
+		if (err) {
+			rtdm_sem_destroy(&dev->tx_sem);
+			goto out;
+		}
+		break;
+
+	case CAN_STATE_SLEEPING:
+	default:
+		err = 0;
+		break;
+	}
+
+out:
+	rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+
+	return err;
+}
+
+static int flexcan_mode_stop(struct rtcan_device *dev,
+			     rtdm_lockctx_t *lock_ctx)
+{
+	if (!CAN_STATE_OPERATING(dev->state))
+		return 0;
+
+	dev->state = CAN_STATE_STOPPED;
+
+	rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+
+	flexcan_chip_stop(dev);
+	rtdm_irq_free(&dev->irq_handle);
+	rtdm_sem_destroy(&dev->tx_sem);
+
+	rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+
+	return 0;
+}
+
+static int flexcan_set_mode(struct rtcan_device *dev, can_mode_t mode,
+			    rtdm_lockctx_t *lock_ctx)
+{
+	if (mode == CAN_MODE_START)
+		return flexcan_mode_start(dev, lock_ctx);
+
+	if (mode == CAN_MODE_STOP)
+		return flexcan_mode_stop(dev, lock_ctx);
+
+	return -EOPNOTSUPP;
+}
+
+static int flexcan_copy_bittiming(struct rtcan_device *dev,
+				  struct can_bittime *bt,
+				  rtdm_lockctx_t *lock_ctx)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+
+	memcpy(&priv->bittiming, bt, sizeof(*bt));
+
+	return 0;
+}
+
+static int register_flexcandev(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+	struct flexcan_regs __iomem *regs = priv->regs;
+	u32 reg, err;
+
+	err = clk_prepare_enable(priv->clk_ipg);
+	if (err)
+		return err;
+
+	err = clk_prepare_enable(priv->clk_per);
+	if (err)
+		goto out_disable_ipg;
+
+	/* select "bus clock", chip must be disabled */
+	err = flexcan_chip_disable(priv);
+	if (err)
+		goto out_disable_per;
+	reg = flexcan_read(&regs->ctrl);
+	reg |= FLEXCAN_CTRL_CLK_SRC;
+	flexcan_write(reg, &regs->ctrl);
+
+	err = flexcan_chip_enable(priv);
+	if (err)
+		goto out_chip_disable;
+
+	/* set freeze, halt and activate FIFO, restrict register access */
+	reg = flexcan_read(&regs->mcr);
+	reg |= FLEXCAN_MCR_FRZ | FLEXCAN_MCR_HALT |
+		FLEXCAN_MCR_FEN | FLEXCAN_MCR_SUPV;
+	flexcan_write(reg, &regs->mcr);
+
+	/* Currently we only support newer versions of this core
+	 * featuring a RX hardware FIFO (although this driver doesn't
+	 * make use of it on some cores). Older cores, found on some
+	 * Coldfire derivates are not tested.
+	 */
+	reg = flexcan_read(&regs->mcr);
+	if (!(reg & FLEXCAN_MCR_FEN)) {
+		rtcandev_err(dev, "Could not enable RX FIFO, unsupported core\n");
+		err = -ENODEV;
+		goto out_chip_disable;
+	}
+
+	err = rtcan_dev_register(dev);
+
+	/* disable core and turn off clocks */
+ out_chip_disable:
+	flexcan_chip_disable(priv);
+ out_disable_per:
+	clk_disable_unprepare(priv->clk_per);
+ out_disable_ipg:
+	clk_disable_unprepare(priv->clk_ipg);
+
+	return err;
+}
+
+static void unregister_flexcandev(struct rtcan_device *dev)
+{
+	struct flexcan_priv *priv = rtcan_priv(dev);
+
+	rtcan_dev_unregister(dev);
+	if (priv->ts_frames)
+		kfree(priv->ts_frames);
+}
+
+static const struct of_device_id flexcan_of_match[] = {
+	{ .compatible = "fsl,imx6q-flexcan", .data = &fsl_imx6q_devtype_data, },
+	{ .compatible = "fsl,imx28-flexcan", .data = &fsl_imx28_devtype_data, },
+	{ .compatible = "fsl,p1010-flexcan", .data = &fsl_p1010_devtype_data, },
+	{ .compatible = "fsl,vf610-flexcan", .data = &fsl_vf610_devtype_data, },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, flexcan_of_match);
+
+static const struct platform_device_id flexcan_id_table[] = {
+	{ .name = "flexcan", .driver_data = (kernel_ulong_t)&fsl_p1010_devtype_data, },
+	{ /* sentinel */ },
+};
+MODULE_DEVICE_TABLE(platform, flexcan_id_table);
+
+static int flexcan_probe(struct platform_device *pdev)
+{
+	const struct of_device_id *of_id;
+	const struct flexcan_devtype_data *devtype_data;
+	struct rtcan_device *dev;
+	struct flexcan_priv *priv;
+	struct regulator *reg_xceiver;
+	struct resource *mem;
+	struct clk *clk_ipg = NULL, *clk_per = NULL;
+	struct flexcan_regs __iomem *regs;
+	int err, irq;
+	u32 clock_freq = 0;
+
+	reg_xceiver = devm_regulator_get(&pdev->dev, "xceiver");
+	if (PTR_ERR(reg_xceiver) == -EPROBE_DEFER)
+		return -EPROBE_DEFER;
+	else if (IS_ERR(reg_xceiver))
+		reg_xceiver = NULL;
+
+	if (pdev->dev.of_node)
+		of_property_read_u32(pdev->dev.of_node,
+				     "clock-frequency", &clock_freq);
+
+	if (!clock_freq) {
+		clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+		if (IS_ERR(clk_ipg)) {
+			dev_err(&pdev->dev, "no ipg clock defined\n");
+			return PTR_ERR(clk_ipg);
+		}
+
+		clk_per = devm_clk_get(&pdev->dev, "per");
+		if (IS_ERR(clk_per)) {
+			dev_err(&pdev->dev, "no per clock defined\n");
+			return PTR_ERR(clk_per);
+		}
+		clock_freq = clk_get_rate(clk_per);
+	}
+
+	mem = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	irq = platform_get_irq(pdev, 0);
+	if (irq <= 0)
+		return -ENODEV;
+
+	regs = devm_ioremap_resource(&pdev->dev, mem);
+	if (IS_ERR(regs))
+		return PTR_ERR(regs);
+
+	of_id = of_match_device(flexcan_of_match, &pdev->dev);
+	if (of_id) {
+		devtype_data = of_id->data;
+	} else if (platform_get_device_id(pdev)->driver_data) {
+		devtype_data = (struct flexcan_devtype_data *)
+			platform_get_device_id(pdev)->driver_data;
+	} else {
+		return -ENODEV;
+	}
+
+	dev = rtcan_dev_alloc(sizeof(struct flexcan_priv), 0);
+	if (!dev)
+		return -ENOMEM;
+
+	platform_set_drvdata(pdev, dev);
+
+	priv = rtcan_priv(dev);
+	priv->regs = regs;
+	priv->irq = irq;
+	priv->clk_ipg = clk_ipg;
+	priv->clk_per = clk_per;
+	priv->devtype_data = devtype_data;
+	priv->reg_xceiver = reg_xceiver;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_TIMESTAMP;
+		priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_TIMESTAMP];
+	} else {
+		priv->tx_mb_idx = FLEXCAN_TX_MB_OFF_FIFO;
+		priv->tx_mb_reserved = &regs->mb[FLEXCAN_TX_MB_RESERVED_OFF_FIFO];
+	}
+	priv->tx_mb = &regs->mb[priv->tx_mb_idx];
+
+	priv->reg_imask1_default = FLEXCAN_IFLAG_MB(priv->tx_mb_idx);
+	priv->reg_imask2_default = 0;
+
+	if (priv->devtype_data->quirks & FLEXCAN_QUIRK_USE_OFF_TIMESTAMP) {
+		u64 imask;
+
+		priv->mb_first = FLEXCAN_RX_MB_OFF_TIMESTAMP_FIRST;
+		priv->mb_last = FLEXCAN_RX_MB_OFF_TIMESTAMP_LAST;
+		priv->ts_frames = kzalloc(sizeof(*priv->ts_frames) *
+					  FLEXCAN_RX_MB_TIMESTAMP_COUNT, GFP_KERNEL);
+		if (priv->ts_frames == NULL) {
+			err = -ENOMEM;
+			goto failed_fralloc;
+		}
+
+		imask = GENMASK_ULL(priv->mb_last, priv->mb_first);
+		priv->reg_imask1_default |= imask;
+		priv->reg_imask2_default |= imask >> 32;
+	} else {
+		priv->reg_imask1_default |= FLEXCAN_IFLAG_RX_FIFO_OVERFLOW |
+			FLEXCAN_IFLAG_RX_FIFO_AVAILABLE;
+		priv->ts_frames = NULL;
+	}
+
+	dev->ctrl_name = "FLEXCAN";
+	dev->board_name = "FLEXCAN";
+	dev->base_addr = (unsigned long)regs;
+	dev->can_sys_clock = clock_freq;
+	dev->hard_start_xmit = flexcan_start_xmit;
+	dev->do_set_mode = flexcan_set_mode;
+	dev->do_set_bit_time = flexcan_copy_bittiming;
+	dev->bittiming_const = &flexcan_bittiming_const;
+	dev->state = CAN_STATE_STOPPED;
+	strncpy(dev->name, DEV_NAME, IFNAMSIZ);
+	
+	err = register_flexcandev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "registering netdev failed\n");
+		goto failed_register;
+	}
+
+	dev_info(&pdev->dev, "device registered (reg_base=%p, irq=%d)\n",
+		 priv->regs, priv->irq);
+
+	return 0;
+
+ failed_register:
+	if (priv->ts_frames)
+		kfree(priv->ts_frames);
+ failed_fralloc:
+	rtcan_dev_free(dev);
+	return err;
+}
+
+static int flexcan_remove(struct platform_device *pdev)
+{
+	struct rtcan_device *dev = platform_get_drvdata(pdev);
+
+	unregister_flexcandev(dev);
+	rtcan_dev_free(dev);
+
+	return 0;
+}
+
+static struct platform_driver flexcan_driver = {
+	.driver = {
+		.name = DRV_NAME,
+		.of_match_table = flexcan_of_match,
+	},
+	.probe = flexcan_probe,
+	.remove = flexcan_remove,
+	.id_table = flexcan_id_table,
+};
+
+module_platform_driver(flexcan_driver);
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@denx.de>, "
+	      "Sascha Hauer <kernel@pengutronix.de>, "
+	      "Marc Kleine-Budde <kernel@pengutronix.de>");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("RT-CAN port driver for flexcan based chip");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h
new file mode 100644
index 0000000..b290005
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_internal.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from RTnet project file stack/include/rtnet_internal.h:
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __RTCAN_INTERNAL_H_
+#define __RTCAN_INTERNAL_H_
+
+#include <linux/module.h>
+#include <rtdm/driver.h>
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG
+#define RTCAN_ASSERT(expr, func) \
+    if (!(expr)) { \
+	rtdm_printk("Assertion failed! %s:%s:%d %s\n", \
+	__FILE__, __FUNCTION__, __LINE__, (#expr)); \
+	func \
+    }
+#else
+#define RTCAN_ASSERT(expr, func)
+#endif /* CONFIG_RTCAN_CHECKED */
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG
+# define RTCAN_DBG(fmt,args...) do { printk(fmt ,##args); } while (0)
+# define RTCAN_RTDM_DBG(fmt,args...) do { rtdm_printk(fmt ,##args); } while (0)
+#else
+# define RTCAN_DBG(fmt,args...) do {} while (0)
+# define RTCAN_RTDM_DBG(fmt,args...) do {} while (0)
+#endif
+
+#define rtcan_priv(dev)			(dev)->priv
+#define rtcandev_dbg(dev, fmt, args...)				\
+	printk(KERN_DEBUG "%s: " fmt, (dev)->name, ##args)
+#define rtcandev_info(dev, fmt, args...)			\
+	printk(KERN_INFO "%s: " fmt, (dev)->name, ##args)
+#define rtcandev_warn(dev, fmt, args...)			\
+	printk(KERN_WARNING "%s: " fmt, (dev)->name, ##args)
+#define rtcandev_err(dev, fmt, args...)				\
+	printk(KERN_ERR "%s: " fmt, (dev)->name, ##args)
+
+#endif /* __RTCAN_INTERNAL_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h
new file mode 100644
index 0000000..17a4fbd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_list.h
@@ -0,0 +1,68 @@
+/*
+ * List management for the RTDM RTCAN device driver
+ *
+ * Copyright (C) 2005,2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_LIST_H_
+#define __RTCAN_LIST_H_
+
+#include "rtcan_socket.h"
+
+
+/*
+ * List element in a single linked list used for registering reception sockets.
+ * Every single struct can_filter which was bound to a socket gets such a
+ * list entry. There is no member for the CAN interface because there is one
+ * reception list for every CAN controller. This is because when a CAN message
+ * is received it is clear from which interface and therefore minimizes
+ * searching time.
+ */
+struct rtcan_recv {
+    can_filter_t            can_filter;     /* filter used for deciding if
+					     *   a socket wants to get a CAN
+					     *   message */
+    unsigned int            match_count;    /* count accepted messages */
+    struct rtcan_socket     *sock;          /* pointer to registered socket
+					     */
+    struct rtcan_recv       *next;          /* pointer to next list element
+					     */
+};
+
+
+/*
+ *  Element in a TX wait queue.
+ *
+ *  Every socket holds a TX wait queue where all RT tasks are queued when they
+ *  are blocked while waiting to be able to transmit a message via this socket.
+ *
+ *  Every sender holds its own element.
+ */
+struct tx_wait_queue {
+    struct list_head        tx_wait_list;   /* List pointers */
+    rtdm_task_t             *rt_task;       /* Pointer to task handle */
+};
+
+
+/* Spinlock for all reception lists and also for some members in
+ * struct rtcan_socket */
+extern rtdm_lock_t rtcan_recv_list_lock;
+
+
+#endif  /* __RTCAN_LIST_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c
new file mode 100644
index 0000000..c071e12
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_module.c
@@ -0,0 +1,439 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from RTnet project file stack/rtcan_module.c:
+ *
+ * Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <rtdm/driver.h>
+#include <rtdm/can.h>
+#include <rtcan_version.h>
+#include <rtcan_internal.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+
+MODULE_LICENSE("GPL");
+
+
+const char rtcan_rtdm_provider_name[] =
+    "(C) 2006 RT-Socket-CAN Development Team";
+
+
+#ifdef CONFIG_PROC_FS
+
+struct proc_dir_entry *rtcan_proc_root;
+
+static void rtcan_dev_get_ctrlmode_name(can_ctrlmode_t ctrlmode,
+					char* name, int max_len)
+{
+    snprintf(name, max_len, "%s%s",
+	     ctrlmode & CAN_CTRLMODE_LISTENONLY ? "listen-only " : "",
+	     ctrlmode & CAN_CTRLMODE_LOOPBACK ? "loopback " : "");
+}
+
+static char *rtcan_state_names[] = {
+    "active", "warning", "passive" , "bus-off",
+    "scanning", "stopped", "sleeping"
+};
+
+static void rtcan_dev_get_state_name(can_state_t state,
+				     char* name, int max_len)
+{
+    if (state >= CAN_STATE_ACTIVE &&
+	state <= CAN_STATE_SLEEPING)
+	strncpy(name, rtcan_state_names[state], max_len);
+    else
+	strncpy(name, "unknown", max_len);
+}
+
+static void rtcan_dev_get_baudrate_name(can_baudrate_t baudrate,
+					char* name, int max_len)
+{
+    switch (baudrate) {
+    case CAN_BAUDRATE_UNCONFIGURED:
+	strncpy(name, "undefined", max_len);
+	break;
+    case CAN_BAUDRATE_UNKNOWN:
+	strncpy(name, "unknown", max_len);
+	break;
+    default:
+	ksformat(name, max_len, "%d", baudrate);
+	break;
+    }
+}
+
+static void rtcan_dev_get_bittime_name(struct can_bittime *bit_time,
+				       char* name, int max_len)
+{
+    switch (bit_time->type) {
+    case CAN_BITTIME_STD:
+	ksformat(name, max_len,
+		 "brp=%d prop_seg=%d phase_seg1=%d "
+		 "phase_seg2=%d sjw=%d sam=%d",
+		 bit_time->std.brp,
+		 bit_time->std.prop_seg,
+		 bit_time->std.phase_seg1,
+		 bit_time->std.phase_seg2,
+		 bit_time->std.sjw,
+		 bit_time->std.sam);
+	break;
+    case CAN_BITTIME_BTR:
+	ksformat(name, max_len, "btr0=0x%02x btr1=0x%02x",
+		 bit_time->btr.btr0, bit_time->btr.btr1);
+	break;
+    default:
+	strncpy(name, "unknown", max_len);
+	break;
+    }
+}
+
+static void rtcan_get_timeout_name(nanosecs_rel_t timeout,
+				   char* name, int max_len)
+{
+    if (timeout == RTDM_TIMEOUT_INFINITE)
+	strncpy(name, "infinite", max_len);
+    else
+	ksformat(name, max_len, "%lld", (long long)timeout);
+}
+
+static int rtcan_read_proc_devices(struct seq_file *p, void *data)
+{
+    int i;
+    struct rtcan_device *dev;
+    char state_name[20], baudrate_name[20];
+
+    if (down_interruptible(&rtcan_devices_nrt_lock))
+	return -ERESTARTSYS;
+
+    /* Name___________ _Baudrate State___ _TX_Counts _TX_Counts ____Errors
+     * rtcan0             125000 stopped  1234567890 1234567890 1234567890
+     * rtcan1          undefined warning  1234567890 1234567890 1234567890
+     * rtcan2          undefined scanning 1234567890 1234567890 1234567890
+     */
+    seq_printf(p, "Name___________ _Baudrate State___ TX_Counter RX_Counter "
+		  "____Errors\n");
+
+    for (i = 1; i <= RTCAN_MAX_DEVICES; i++) {
+	if ((dev = rtcan_dev_get_by_index(i)) != NULL) {
+	    rtcan_dev_get_state_name(dev->state,
+				     state_name, sizeof(state_name));
+	    rtcan_dev_get_baudrate_name(dev->baudrate,
+					baudrate_name, sizeof(baudrate_name));
+	    seq_printf(p, "%-15s %9s %-8s %10d %10d %10d\n",
+		       dev->name, baudrate_name, state_name, dev->tx_count,
+		       dev->rx_count, dev->err_count);
+	    rtcan_dev_dereference(dev);
+	}
+    }
+
+    up(&rtcan_devices_nrt_lock);
+
+    return 0;
+}
+
+static int rtcan_proc_devices_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_devices, NULL);
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_devices_ops,
+			rtcan_proc_devices_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int rtcan_read_proc_sockets(struct seq_file *p, void *data)
+{
+    struct rtcan_socket *sock;
+    struct rtdm_fd *fd;
+    struct rtcan_device *dev;
+    char name[IFNAMSIZ] = "not-bound";
+    char rx_timeout[20], tx_timeout[20];
+    rtdm_lockctx_t lock_ctx;
+    int ifindex;
+
+    if (down_interruptible(&rtcan_devices_nrt_lock))
+	return -ERESTARTSYS;
+
+    /* Name___________ Filter ErrMask RX_Timeout TX_Timeout RX_BufFull TX_Lo
+     * rtcan0               1 0x00010 1234567890 1234567890 1234567890 12345
+     */
+    seq_printf(p, "Name___________ Filter ErrMask RX_Timeout_ns "
+		  "TX_Timeout_ns RX_BufFull TX_Lo\n");
+
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+    list_for_each_entry(sock, &rtcan_socket_list, socket_list) {
+	fd = rtcan_socket_to_fd(sock);
+	if (rtcan_sock_is_bound(sock)) {
+	    ifindex = atomic_read(&sock->ifindex);
+	    if (ifindex) {
+		dev = rtcan_dev_get_by_index(ifindex);
+		if (dev) {
+		    strncpy(name, dev->name, IFNAMSIZ);
+		    rtcan_dev_dereference(dev);
+		}
+	    } else
+		ksformat(name, sizeof(name), "%d", ifindex);
+	}
+	rtcan_get_timeout_name(sock->tx_timeout,
+			       tx_timeout, sizeof(tx_timeout));
+	rtcan_get_timeout_name(sock->rx_timeout,
+			       rx_timeout, sizeof(rx_timeout));
+	seq_printf(p, "%-15s %6d 0x%05x %13s %13s %10d %5d\n",
+		   name, sock->flistlen, sock->err_mask,
+		   rx_timeout, tx_timeout, sock->rx_buf_full,
+		   rtcan_loopback_enabled(sock));
+    }
+
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+    up(&rtcan_devices_nrt_lock);
+
+    return 0;
+}
+
+static int rtcan_proc_sockets_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_sockets, NULL);
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_sockets_ops,
+			rtcan_proc_sockets_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int rtcan_read_proc_info(struct seq_file *p, void *data)
+{
+    struct rtcan_device *dev = p->private;
+    char state_name[20], baudrate_name[20];
+    char ctrlmode_name[80], bittime_name[80];
+
+    if (down_interruptible(&rtcan_devices_nrt_lock))
+	return -ERESTARTSYS;
+
+    rtcan_dev_get_state_name(dev->state,
+			     state_name, sizeof(state_name));
+    rtcan_dev_get_ctrlmode_name(dev->ctrl_mode,
+				ctrlmode_name, sizeof(ctrlmode_name));
+    rtcan_dev_get_baudrate_name(dev->baudrate,
+				baudrate_name, sizeof(baudrate_name));
+    rtcan_dev_get_bittime_name(&dev->bit_time,
+			       bittime_name, sizeof(bittime_name));
+
+    seq_printf(p, "Device     %s\n", dev->name);
+    seq_printf(p, "Controller %s\n", dev->ctrl_name);
+    seq_printf(p, "Board      %s\n", dev->board_name);
+    seq_printf(p, "Clock-Hz   %d\n", dev->can_sys_clock);
+    seq_printf(p, "Baudrate   %s\n", baudrate_name);
+    seq_printf(p, "Bit-time   %s\n", bittime_name);
+    seq_printf(p, "Ctrl-Mode  %s\n", ctrlmode_name);
+    seq_printf(p, "State      %s\n", state_name);
+    seq_printf(p, "TX-Counter %d\n", dev->tx_count);
+    seq_printf(p, "RX-Counter %d\n", dev->rx_count);
+    seq_printf(p, "Errors     %d\n", dev->err_count);
+#ifdef RTCAN_USE_REFCOUNT
+    seq_printf(p, "Refcount   %d\n", atomic_read(&dev->refcount));
+#endif
+
+    up(&rtcan_devices_nrt_lock);
+
+    return 0;
+}
+
+static int rtcan_proc_info_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_info, pde_data(inode));
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_info_ops,
+			rtcan_proc_info_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int rtcan_read_proc_filter(struct seq_file *p, void *data)
+{
+    struct rtcan_device *dev = p->private;
+    struct rtcan_recv *recv_listener = dev->recv_list;
+    struct rtdm_fd *fd;
+    rtdm_lockctx_t lock_ctx;
+
+    /*  __CAN_ID__ _CAN_Mask_ Inv MatchCount
+     *  0x12345678 0x12345678  no 1234567890
+     */
+
+    seq_printf(p, "__CAN_ID__ _CAN_Mask_ Inv MatchCount\n");
+
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+    /* Loop over the reception list of the device */
+    while (recv_listener != NULL) {
+	fd = rtcan_socket_to_fd(recv_listener->sock);
+
+	seq_printf(p, "0x%08x 0x%08x %s %10d\n",
+		   recv_listener->can_filter.can_id,
+		   recv_listener->can_filter.can_mask & ~CAN_INV_FILTER,
+		   (recv_listener->can_filter.can_mask & CAN_INV_FILTER) ?
+			"yes" : " no",
+		   recv_listener->match_count);
+
+	recv_listener = recv_listener->next;
+    }
+
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+    return 0;
+}
+
+static int rtcan_proc_filter_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_filter, pde_data(inode));
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_filter_ops,
+			rtcan_proc_filter_open,
+			single_release,
+			seq_read,
+			NULL);
+
+static int rtcan_read_proc_version(struct seq_file *p, void *data)
+{
+	seq_printf(p, "RT-Socket-CAN %d.%d.%d\n",
+		   RTCAN_MAJOR_VER, RTCAN_MINOR_VER, RTCAN_BUGFIX_VER);
+
+	return 0;
+}
+
+static int rtcan_proc_version_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_read_proc_version, NULL);
+}
+
+static const DEFINE_PROC_OPS(rtcan_proc_version_ops,
+			rtcan_proc_version_open,
+			single_release,
+			seq_read,
+			NULL);
+
+void rtcan_dev_remove_proc(struct rtcan_device* dev)
+{
+    if (!dev->proc_root)
+	return;
+
+    remove_proc_entry("info", dev->proc_root);
+    remove_proc_entry("filters", dev->proc_root);
+    remove_proc_entry(dev->name, rtcan_proc_root);
+
+    dev->proc_root = NULL;
+}
+
+int rtcan_dev_create_proc(struct rtcan_device* dev)
+{
+    if (!rtcan_proc_root)
+	return -EINVAL;
+
+    dev->proc_root = proc_mkdir(dev->name, rtcan_proc_root);
+    if (!dev->proc_root) {
+	printk("%s: unable to create /proc device entries\n", dev->name);
+	return -1;
+    }
+
+    proc_create_data("info", S_IFREG | S_IRUGO | S_IWUSR, dev->proc_root,
+		     &rtcan_proc_info_ops, dev);
+    proc_create_data("filters", S_IFREG | S_IRUGO | S_IWUSR, dev->proc_root,
+		     &rtcan_proc_filter_ops, dev);
+    return 0;
+
+}
+
+
+static int rtcan_proc_register(void)
+{
+    rtcan_proc_root = proc_mkdir("rtcan", NULL);
+    if (!rtcan_proc_root) {
+	printk("rtcan: unable to initialize /proc entries\n");
+	return -1;
+    }
+
+    proc_create("devices", S_IFREG | S_IRUGO | S_IWUSR, rtcan_proc_root,
+		&rtcan_proc_devices_ops);
+    proc_create("version", S_IFREG | S_IRUGO | S_IWUSR, rtcan_proc_root,
+		&rtcan_proc_version_ops);
+    proc_create("sockets", S_IFREG | S_IRUGO | S_IWUSR, rtcan_proc_root,
+		&rtcan_proc_sockets_ops);
+    return 0;
+}
+
+
+
+static void rtcan_proc_unregister(void)
+{
+    remove_proc_entry("devices", rtcan_proc_root);
+    remove_proc_entry("version", rtcan_proc_root);
+    remove_proc_entry("sockets", rtcan_proc_root);
+    remove_proc_entry("rtcan", 0);
+}
+#endif  /* CONFIG_PROC_FS */
+
+
+
+int __init rtcan_init(void)
+{
+    int err = 0;
+
+    if (!rtdm_available())
+	return -ENOSYS;
+
+    printk("RT-Socket-CAN %d.%d.%d - %s\n",
+	   RTCAN_MAJOR_VER, RTCAN_MINOR_VER, RTCAN_BUGFIX_VER,
+	   rtcan_rtdm_provider_name);
+
+    if ((err = rtcan_raw_proto_register()) != 0)
+	goto out;
+
+#ifdef CONFIG_PROC_FS
+    if ((err = rtcan_proc_register()) != 0)
+	goto out;
+#endif
+
+ out:
+    return err;
+}
+
+
+void __exit rtcan_exit(void)
+{
+    rtcan_raw_proto_unregister();
+#ifdef CONFIG_PROC_FS
+    rtcan_proc_unregister();
+#endif
+
+    printk("rtcan: unloaded\n");
+}
+
+
+module_init(rtcan_init);
+module_exit(rtcan_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c
new file mode 100644
index 0000000..441bfbc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.c
@@ -0,0 +1,994 @@
+/*
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * Parts of this software are based on the following:
+ *
+ * - RTAI CAN device driver for SJA1000 controllers by Jan Kiszka
+ *
+ * - linux-can.patch, a CAN socket framework for Linux,
+ *   Copyright (C) 2004, 2005, Robert Schwebel, Benedikt Spranger,
+ *   Marc Kleine-Budde, Sascha Hauer, Pengutronix
+ *
+ * - RTnet (www.rtnet.org)
+ *
+ * - serial device driver and profile included in Xenomai (RTDM),
+ *   Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/stringify.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include <rtdm/compat.h>
+#include "rtcan_version.h"
+#include "rtcan_socket.h"
+#include "rtcan_list.h"
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_internal.h"
+
+
+/*
+ * Set if socket wants to receive a high precision timestamp together with
+ * CAN frames
+ */
+#define RTCAN_GET_TIMESTAMP         0
+
+
+MODULE_AUTHOR("RT-Socket-CAN Development Team");
+MODULE_DESCRIPTION("RTDM CAN raw socket device driver");
+MODULE_VERSION(__stringify(RTCAN_MAJOR_VER)
+	       __stringify(RTCAN_MINOR_VER)
+	       __stringify(RTCAN_BUGFIX_VER));
+MODULE_LICENSE("GPL");
+
+void rtcan_tx_push(struct rtcan_device *dev, struct rtcan_socket *sock,
+		   can_frame_t *frame);
+
+static inline int rtcan_accept_msg(uint32_t can_id, can_filter_t *filter)
+{
+    if ((filter->can_mask & CAN_INV_FILTER))
+	return ((can_id & filter->can_mask) != filter->can_id);
+    else
+	return ((can_id & filter->can_mask) == filter->can_id);
+}
+
+
+static void rtcan_rcv_deliver(struct rtcan_recv *recv_listener,
+			      struct rtcan_skb *skb)
+{
+    int size_free;
+    size_t cpy_size, first_part_size;
+    struct rtcan_rb_frame *frame = &skb->rb_frame;
+    struct rtdm_fd *fd = rtdm_private_to_fd(recv_listener->sock);
+    struct rtcan_socket *sock;
+
+    if (rtdm_fd_lock(fd) < 0)
+	return;
+
+    sock = recv_listener->sock;
+
+    cpy_size = skb->rb_frame_size;
+    /* Check if socket wants to receive a timestamp */
+    if (test_bit(RTCAN_GET_TIMESTAMP, &sock->flags)) {
+	cpy_size += RTCAN_TIMESTAMP_SIZE;
+	frame->can_dlc |= RTCAN_HAS_TIMESTAMP;
+    } else
+	frame->can_dlc &= RTCAN_HAS_NO_TIMESTAMP;
+
+    /* Calculate free size in the ring buffer */
+    size_free = sock->recv_head - sock->recv_tail;
+    if (size_free <= 0)
+	size_free += RTCAN_RXBUF_SIZE;
+
+    /* Test if ring buffer has enough space. */
+    if (size_free > cpy_size) {
+	/* Check if we must wrap around the end of buffer */
+	if ((sock->recv_tail + cpy_size) > RTCAN_RXBUF_SIZE) {
+	    /* Wrap around: Two memcpy operations */
+
+	    first_part_size = RTCAN_RXBUF_SIZE - sock->recv_tail;
+
+	    memcpy(&sock->recv_buf[sock->recv_tail], (void *)frame,
+		   first_part_size);
+	    memcpy(&sock->recv_buf[0], (void *)frame +
+		   first_part_size, cpy_size - first_part_size);
+	} else
+	    memcpy(&sock->recv_buf[sock->recv_tail], (void *)frame,
+		   cpy_size);
+
+	/* Adjust tail */
+	sock->recv_tail = (sock->recv_tail + cpy_size) &
+	    (RTCAN_RXBUF_SIZE - 1);
+
+	/*Notify the delivery of the message */
+	rtdm_sem_up(&sock->recv_sem);
+
+    } else {
+	/* Overflow of socket's ring buffer! */
+	sock->rx_buf_full++;
+	RTCAN_RTDM_DBG("rtcan: socket buffer overflow, message discarded\n");
+    }
+
+    rtdm_fd_unlock(fd);
+}
+
+
+void rtcan_rcv(struct rtcan_device *dev, struct rtcan_skb *skb)
+{
+    nanosecs_abs_t timestamp = rtdm_clock_read();
+    /* Entry in reception list, begin with head */
+    struct rtcan_recv *recv_listener = dev->recv_list;
+    struct rtcan_rb_frame *frame = &skb->rb_frame;
+
+    /* Copy timestamp to skb */
+    memcpy((void *)&skb->rb_frame + skb->rb_frame_size,
+	   &timestamp, RTCAN_TIMESTAMP_SIZE);
+
+    if ((frame->can_id & CAN_ERR_FLAG)) {
+	dev->err_count++;
+	while (recv_listener != NULL) {
+	    if ((frame->can_id & recv_listener->sock->err_mask)) {
+		recv_listener->match_count++;
+		rtcan_rcv_deliver(recv_listener, skb);
+	    }
+	    recv_listener = recv_listener->next;
+	}
+    } else {
+	dev->rx_count++;
+	while (recv_listener != NULL) {
+	    if (rtcan_accept_msg(frame->can_id, &recv_listener->can_filter)) {
+		recv_listener->match_count++;
+		rtcan_rcv_deliver(recv_listener, skb);
+	    }
+	    recv_listener = recv_listener->next;
+	}
+    }
+}
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+
+void rtcan_tx_push(struct rtcan_device *dev, struct rtcan_socket *sock,
+		   can_frame_t *frame)
+{
+    struct rtcan_rb_frame *rb_frame = &dev->tx_skb.rb_frame;
+
+    RTCAN_ASSERT(dev->tx_socket == 0,
+		 rtdm_printk("(%d) TX skb still in use", dev->ifindex););
+
+    rb_frame->can_id = frame->can_id;
+    rb_frame->can_dlc = frame->can_dlc;
+    dev->tx_skb.rb_frame_size = EMPTY_RB_FRAME_SIZE;
+    if (frame->can_dlc && !(frame->can_id & CAN_RTR_FLAG)) {
+	memcpy(rb_frame->data, frame->data, frame->can_dlc);
+	dev->tx_skb.rb_frame_size += frame->can_dlc;
+    }
+    rb_frame->can_ifindex = dev->ifindex;
+    dev->tx_socket = sock;
+}
+
+void rtcan_loopback(struct rtcan_device *dev)
+{
+    nanosecs_abs_t timestamp = rtdm_clock_read();
+    /* Entry in reception list, begin with head */
+    struct rtcan_recv *recv_listener = dev->recv_list;
+    struct rtcan_rb_frame *frame = &dev->tx_skb.rb_frame;
+
+    memcpy((void *)&dev->tx_skb.rb_frame + dev->tx_skb.rb_frame_size,
+	   &timestamp, RTCAN_TIMESTAMP_SIZE);
+
+    while (recv_listener != NULL) {
+	dev->rx_count++;
+	if ((dev->tx_socket != recv_listener->sock) &&
+	    rtcan_accept_msg(frame->can_id, &recv_listener->can_filter)) {
+	    recv_listener->match_count++;
+	    rtcan_rcv_deliver(recv_listener, &dev->tx_skb);
+	}
+	recv_listener = recv_listener->next;
+    }
+    dev->tx_socket = NULL;
+}
+
+EXPORT_SYMBOL_GPL(rtcan_loopback);
+
+#endif /* CONFIG_XENO_DRIVERS_CAN_LOOPBACK */
+
+
+int rtcan_raw_socket(struct rtdm_fd *fd, int protocol)
+{
+    /* Only protocol CAN_RAW is supported */
+    if (protocol != CAN_RAW && protocol != 0)
+	return -EPROTONOSUPPORT;
+
+    rtcan_socket_init(fd);
+
+    return 0;
+}
+
+
+static inline void rtcan_raw_unbind(struct rtcan_socket *sock)
+{
+    rtcan_raw_remove_filter(sock);
+    if (!rtcan_flist_no_filter(sock->flist) && sock->flist)
+	rtdm_free(sock->flist);
+    sock->flist = NULL;
+    sock->flistlen = RTCAN_SOCK_UNBOUND;
+    atomic_set(&sock->ifindex, 0);
+}
+
+
+static void rtcan_raw_close(struct rtdm_fd *fd)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    rtdm_lockctx_t lock_ctx;
+
+    /* Get lock for reception lists */
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+    /* Check if socket is bound */
+    if (rtcan_sock_is_bound(sock))
+	rtcan_raw_unbind(sock);
+
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+
+    rtcan_socket_cleanup(fd);
+}
+
+
+int rtcan_raw_bind(struct rtdm_fd *fd,
+		   struct sockaddr_can *scan)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    rtdm_lockctx_t lock_ctx;
+    int ret = 0;
+
+    /* Check address family and
+       check if given length of filter list is plausible */
+    if (scan->can_family != AF_CAN)
+	return -EINVAL;
+    /* Check range of ifindex, must be between 0 and RTCAN_MAX_DEVICES */
+    if (scan->can_ifindex < 0 || scan->can_ifindex > RTCAN_MAX_DEVICES)
+	return -ENODEV;
+
+    /* Get lock for reception lists */
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+    if ((ret = rtcan_raw_check_filter(sock, scan->can_ifindex,
+				      sock->flist)))
+	goto out;
+    rtcan_raw_remove_filter(sock);
+    /* Add filter and mark socket as bound */
+    sock->flistlen = rtcan_raw_add_filter(sock, scan->can_ifindex);
+
+    /* Set new interface index the socket is now bound to */
+    atomic_set(&sock->ifindex, scan->can_ifindex);
+
+ out:
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+    return ret;
+}
+
+
+static int rtcan_raw_setsockopt(struct rtdm_fd *fd,
+				struct _rtdm_setsockopt_args *so)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    struct rtcan_filter_list *flist;
+    int ifindex = atomic_read(&sock->ifindex);
+    rtdm_lockctx_t lock_ctx;
+    can_err_mask_t err_mask;
+    int val, ret = 0;
+
+    if (so->level != SOL_CAN_RAW)
+	return -ENOPROTOOPT;
+
+    switch (so->optname) {
+
+    case CAN_RAW_FILTER:
+	if (so->optlen == 0) {
+	    flist = RTCAN_FLIST_NO_FILTER;
+	} else {
+	    int flistlen;
+	    flistlen = so->optlen / sizeof(struct can_filter);
+	    if (flistlen < 1 || flistlen > RTCAN_MAX_RECEIVERS ||
+		so->optlen % sizeof(struct can_filter) != 0)
+		return -EINVAL;
+
+	    flist = (struct rtcan_filter_list *)rtdm_malloc(so->optlen + sizeof(int));
+	    if (flist == NULL)
+		return -ENOMEM;
+	    if (rtdm_fd_is_user(fd)) {
+		if (!rtdm_read_user_ok(fd, so->optval, so->optlen) ||
+		    rtdm_copy_from_user(fd, flist->flist,
+					so->optval, so->optlen)) {
+		    rtdm_free(flist);
+		    return -EFAULT;
+		}
+	    } else
+		memcpy(flist->flist, so->optval, so->optlen);
+	    flist->flistlen = flistlen;
+	}
+
+	/* Get lock for reception lists */
+	rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+
+	/* Check if there is space for the filter list if already bound */
+	if (rtcan_sock_is_bound(sock)) {
+	    if (!rtcan_flist_no_filter(flist) &&
+		(ret = rtcan_raw_check_filter(sock, ifindex, flist))) {
+		rtdm_free(flist);
+		goto out_filter;
+	    }
+	    rtcan_raw_remove_filter(sock);
+	}
+
+	/* Remove previous list and attach the new one */
+	if (!rtcan_flist_no_filter(flist) && sock->flist)
+	    rtdm_free(sock->flist);
+	sock->flist = flist;
+
+	if (rtcan_sock_is_bound(sock))
+	    sock->flistlen = rtcan_raw_add_filter(sock, ifindex);
+
+    out_filter:
+	/* Release lock for reception lists */
+	rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+	break;
+
+    case CAN_RAW_ERR_FILTER:
+
+	if (so->optlen != sizeof(can_err_mask_t))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    if (!rtdm_read_user_ok(fd, so->optval, so->optlen) ||
+		rtdm_copy_from_user(fd, &err_mask, so->optval, so->optlen))
+		return -EFAULT;
+	} else
+	    memcpy(&err_mask, so->optval, so->optlen);
+
+	/* Get lock for reception lists */
+	rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+	sock->err_mask = err_mask;
+	rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+	break;
+
+    case CAN_RAW_LOOPBACK:
+
+	if (so->optlen != sizeof(int))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    if (!rtdm_read_user_ok(fd, so->optval, so->optlen) ||
+		rtdm_copy_from_user(fd, &val, so->optval, so->optlen))
+		return -EFAULT;
+	} else
+	    memcpy(&val, so->optval, so->optlen);
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+	sock->loopback = val;
+#else
+	if (val)
+	    return -EOPNOTSUPP;
+#endif
+	break;
+
+    default:
+	ret = -ENOPROTOOPT;
+    }
+
+    return ret;
+}
+
+
+int rtcan_raw_ioctl(struct rtdm_fd *fd,
+		    unsigned int request, void *arg)
+{
+    int ret = 0;
+
+    switch (request) {
+    COMPAT_CASE(_RTIOC_BIND): {
+
+	struct _rtdm_setsockaddr_args *setaddr, setaddr_buf;
+	struct sockaddr_can *sockaddr, sockaddr_buf;
+
+	if (rtdm_fd_is_user(fd)) {
+	    ret = rtdm_fd_get_setsockaddr_args(fd, &setaddr_buf, arg);
+	    if (ret)
+		return ret;
+
+	    setaddr = &setaddr_buf;
+
+	    /* Check size */
+	    if (setaddr->addrlen != sizeof(struct sockaddr_can))
+		return -EINVAL;
+
+	    /* Copy argument structure from userspace */
+	    if (!rtdm_read_user_ok(fd, arg,
+				   sizeof(struct sockaddr_can)) ||
+		rtdm_copy_from_user(fd, &sockaddr_buf, setaddr->addr,
+				    sizeof(struct sockaddr_can)))
+		return -EFAULT;
+	    sockaddr = &sockaddr_buf;
+	} else {
+	    setaddr = (struct _rtdm_setsockaddr_args *)arg;
+	    sockaddr = (struct sockaddr_can *)setaddr->addr;
+	}
+
+	/* Now, all required data are in kernel space */
+	ret = rtcan_raw_bind(fd, sockaddr);
+
+	break;
+    }
+
+    COMPAT_CASE(_RTIOC_SETSOCKOPT): {
+	struct _rtdm_setsockopt_args *setopt;
+	struct _rtdm_setsockopt_args setopt_buf;
+
+	if (rtdm_fd_is_user(fd)) {
+	    ret = rtdm_fd_get_setsockopt_args(fd, &setopt_buf, arg);
+	    if (ret)
+		    return ret;
+
+	    setopt = &setopt_buf;
+	} else
+	    setopt = (struct _rtdm_setsockopt_args *)arg;
+
+	return rtcan_raw_setsockopt(fd, setopt);
+    }
+
+    case RTCAN_RTIOC_TAKE_TIMESTAMP: {
+	struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+	long timestamp_switch = (long)arg;
+
+	if (timestamp_switch == RTCAN_TAKE_TIMESTAMPS)
+	    set_bit(RTCAN_GET_TIMESTAMP, &sock->flags);
+	else
+	    clear_bit(RTCAN_GET_TIMESTAMP, &sock->flags);
+	break;
+    }
+
+    case RTCAN_RTIOC_RCV_TIMEOUT:
+    case RTCAN_RTIOC_SND_TIMEOUT: {
+	/* Do some work these requests have in common. */
+	struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+
+	nanosecs_rel_t *timeout = (nanosecs_rel_t *)arg;
+	nanosecs_rel_t timeo_buf;
+
+	if (rtdm_fd_is_user(fd)) {
+	    /* Copy 64 bit timeout value from userspace */
+	    if (!rtdm_read_user_ok(fd, arg,
+				   sizeof(nanosecs_rel_t)) ||
+		rtdm_copy_from_user(fd, &timeo_buf,
+				    arg, sizeof(nanosecs_rel_t)))
+		return -EFAULT;
+
+	    timeout = &timeo_buf;
+	}
+
+	/* Now the differences begin between the requests. */
+	if (request == RTCAN_RTIOC_RCV_TIMEOUT)
+	    sock->rx_timeout = *timeout;
+	else
+	    sock->tx_timeout = *timeout;
+
+	break;
+    }
+
+    default:
+	ret = rtcan_raw_ioctl_dev(fd, request, arg);
+	break;
+    }
+
+    return ret;
+}
+
+
+#define MEMCPY_FROM_RING_BUF(to, len)					\
+do {									\
+	if (unlikely((recv_buf_index + len) > RTCAN_RXBUF_SIZE)) { 	\
+		/* Wrap around end of buffer */				\
+		first_part_size = RTCAN_RXBUF_SIZE - recv_buf_index; 	\
+		memcpy(to, &recv_buf[recv_buf_index], first_part_size);	\
+		memcpy((void *)to + first_part_size, recv_buf,		\
+		       len - first_part_size);				\
+	} else								\
+		memcpy(to, &recv_buf[recv_buf_index], len);		\
+	recv_buf_index = (recv_buf_index + len) & (RTCAN_RXBUF_SIZE - 1); \
+} while (0)
+
+ssize_t rtcan_raw_recvmsg(struct rtdm_fd *fd,
+			  struct user_msghdr *msg, int flags)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    struct sockaddr_can scan;
+    nanosecs_rel_t timeout;
+    struct iovec *iov = (struct iovec *)msg->msg_iov;
+    struct iovec iov_buf;
+    can_frame_t frame;
+    nanosecs_abs_t timestamp = 0;
+    unsigned char ifindex;
+    unsigned char can_dlc;
+    unsigned char *recv_buf;
+    int recv_buf_index;
+    size_t first_part_size;
+    size_t payload_size;
+    rtdm_lockctx_t lock_ctx;
+    int ret;
+
+    /* Clear frame memory location */
+    memset(&frame, 0, sizeof(can_frame_t));
+
+    /* Check flags */
+    if (flags & ~(MSG_DONTWAIT | MSG_PEEK))
+	return -EINVAL;
+
+
+    /* Check if msghdr entries are sane */
+
+    if (msg->msg_name != NULL) {
+	if (msg->msg_namelen < sizeof(struct sockaddr_can))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    if (!rtdm_rw_user_ok(fd, msg->msg_name, msg->msg_namelen))
+		return -EFAULT;
+	}
+
+    } else {
+	if (msg->msg_namelen != 0)
+	    return -EINVAL;
+    }
+
+    /* Check msg_iovlen, only one buffer allowed */
+    if (msg->msg_iovlen != 1)
+	return -EMSGSIZE;
+
+    if (rtdm_fd_is_user(fd)) {
+	/* Copy IO vector from userspace */
+	ret = rtdm_fd_get_iovec(fd, &iov_buf, msg, true);
+	if (ret)
+		return -EFAULT;
+
+	iov = &iov_buf;
+    }
+
+    /* Check size of buffer */
+    if (iov->iov_len < sizeof(can_frame_t))
+	return -EMSGSIZE;
+
+    /* Check buffer if in user space */
+    if (rtdm_fd_is_user(fd)) {
+	if (!rtdm_rw_user_ok(fd, iov->iov_base, iov->iov_len))
+	    return -EFAULT;
+    }
+
+    if (msg->msg_control != NULL) {
+	if (msg->msg_controllen < sizeof(nanosecs_abs_t))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    if (!rtdm_rw_user_ok(fd, msg->msg_control,
+				 msg->msg_controllen))
+		return -EFAULT;
+	}
+
+    } else {
+	if (msg->msg_controllen != 0)
+	    return -EINVAL;
+    }
+
+    rtcan_raw_enable_bus_err(sock);
+
+    /* Set RX timeout */
+    timeout = (flags & MSG_DONTWAIT) ? RTDM_TIMEOUT_NONE : sock->rx_timeout;
+
+    /* Fetch message (ok, try it ...) */
+    ret = rtdm_sem_timeddown(&sock->recv_sem, timeout, NULL);
+
+    /* Error code returned? */
+    if (unlikely(ret)) {
+	/* Which error code? */
+
+	if (ret == -EIDRM)
+	    /* Socket was closed */
+	    return -EBADF;
+
+	else if (ret == -EWOULDBLOCK)
+	    /* We would block but don't want to */
+	    return -EAGAIN;
+
+	else
+	    /* Return all other error codes unmodified. */
+	    return ret;
+    }
+
+
+    /* OK, we've got mail. */
+
+    rtdm_lock_get_irqsave(&rtcan_socket_lock, lock_ctx);
+
+
+    /* Construct a struct can_frame with data from socket's ring buffer */
+    recv_buf_index = sock->recv_head;
+    recv_buf = sock->recv_buf;
+
+
+    /* Begin with CAN ID */
+    MEMCPY_FROM_RING_BUF(&frame.can_id, sizeof(uint32_t));
+
+
+    /* Fetch interface index */
+    ifindex = recv_buf[recv_buf_index];
+    recv_buf_index = (recv_buf_index + 1) & (RTCAN_RXBUF_SIZE - 1);
+
+
+    /* Fetch DLC (with indicator if a timestamp exists) */
+    can_dlc = recv_buf[recv_buf_index];
+    recv_buf_index = (recv_buf_index + 1) & (RTCAN_RXBUF_SIZE - 1);
+
+    frame.can_dlc = can_dlc & RTCAN_HAS_NO_TIMESTAMP;
+    payload_size = (frame.can_dlc > 8) ? 8 : frame.can_dlc;
+
+
+    /* If frame is an RTR or one with no payload it's not necessary
+     * to copy the data bytes. */
+    if (!(frame.can_id & CAN_RTR_FLAG) && payload_size)
+	/* Copy data bytes */
+	MEMCPY_FROM_RING_BUF(frame.data, payload_size);
+
+    /* Is a timestamp available and is the caller actually interested? */
+    if (msg->msg_controllen && (can_dlc & RTCAN_HAS_TIMESTAMP))
+	/* Copy timestamp */
+	MEMCPY_FROM_RING_BUF(&timestamp, RTCAN_TIMESTAMP_SIZE);
+
+    /* Message completely read from the socket's ring buffer. Now check if
+     * caller is just peeking. */
+    if (flags & MSG_PEEK)
+	/* Next one, please! */
+	rtdm_sem_up(&sock->recv_sem);
+    else
+	/* Adjust begin of first message in the ring buffer. */
+	sock->recv_head = recv_buf_index;
+
+
+    /* Release lock */
+    rtdm_lock_put_irqrestore(&rtcan_socket_lock, lock_ctx);
+
+
+    /* Create CAN socket address to give back */
+    if (msg->msg_namelen) {
+	scan.can_family = AF_CAN;
+	scan.can_ifindex = ifindex;
+    }
+
+
+    /* Last duty: Copy all back to the caller's buffers. */
+
+    if (rtdm_fd_is_user(fd)) {
+	/* Copy to user space */
+
+	/* Copy socket address */
+	if (msg->msg_namelen) {
+	    if (rtdm_copy_to_user(fd, msg->msg_name, &scan,
+				  sizeof(struct sockaddr_can)))
+		return -EFAULT;
+
+	    msg->msg_namelen = sizeof(struct sockaddr_can);
+	}
+
+	/* Copy CAN frame */
+	if (rtdm_copy_to_user(fd, iov->iov_base, &frame,
+			      sizeof(can_frame_t)))
+	    return -EFAULT;
+	/* Adjust iovec in the common way */
+	iov->iov_base += sizeof(can_frame_t);
+	iov->iov_len -= sizeof(can_frame_t);
+	/* ... and copy it, too. */
+	ret = rtdm_fd_put_iovec(fd, iov, msg);
+	if (ret)
+		return -EFAULT;
+
+	/* Copy timestamp if existent and wanted */
+	if (msg->msg_controllen) {
+	    if (can_dlc & RTCAN_HAS_TIMESTAMP) {
+		if (rtdm_copy_to_user(fd, msg->msg_control,
+				      &timestamp, RTCAN_TIMESTAMP_SIZE))
+		    return -EFAULT;
+
+		msg->msg_controllen = RTCAN_TIMESTAMP_SIZE;
+	    } else
+		msg->msg_controllen = 0;
+	}
+
+    } else {
+	/* Kernel space */
+
+	/* Copy socket address */
+	if (msg->msg_namelen) {
+	    memcpy(msg->msg_name, &scan, sizeof(struct sockaddr_can));
+	    msg->msg_namelen = sizeof(struct sockaddr_can);
+	}
+
+	/* Copy CAN frame */
+	memcpy(iov->iov_base, &frame, sizeof(can_frame_t));
+	/* Adjust iovec in the common way */
+	iov->iov_base += sizeof(can_frame_t);
+	iov->iov_len -= sizeof(can_frame_t);
+
+	/* Copy timestamp if existent and wanted */
+	if (msg->msg_controllen) {
+	    if (can_dlc & RTCAN_HAS_TIMESTAMP) {
+		memcpy(msg->msg_control, &timestamp, RTCAN_TIMESTAMP_SIZE);
+		msg->msg_controllen = RTCAN_TIMESTAMP_SIZE;
+	    } else
+		msg->msg_controllen = 0;
+	}
+    }
+
+
+    return sizeof(can_frame_t);
+}
+
+
+ssize_t rtcan_raw_sendmsg(struct rtdm_fd *fd,
+			  const struct user_msghdr *msg, int flags)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    struct sockaddr_can *scan = (struct sockaddr_can *)msg->msg_name;
+    struct sockaddr_can scan_buf;
+    struct iovec *iov = (struct iovec *)msg->msg_iov;
+    struct iovec iov_buf;
+    can_frame_t *frame;
+    can_frame_t frame_buf;
+    rtdm_lockctx_t lock_ctx;
+    nanosecs_rel_t timeout = 0;
+    struct tx_wait_queue tx_wait;
+    struct rtcan_device *dev;
+    int ifindex = 0;
+    int ret  = 0;
+    spl_t s;
+
+
+    if (flags & MSG_OOB)   /* Mirror BSD error message compatibility */
+	return -EOPNOTSUPP;
+
+    /* Only MSG_DONTWAIT is a valid flag. */
+    if (flags & ~MSG_DONTWAIT)
+	return -EINVAL;
+
+    /* Check msg_iovlen, only one buffer allowed */
+    if (msg->msg_iovlen != 1)
+	return -EMSGSIZE;
+
+    if (scan == NULL) {
+	/* No socket address. Will use bound interface for sending */
+
+	if (msg->msg_namelen != 0)
+	    return -EINVAL;
+
+
+	/* We only want a consistent value here, a spin lock would be
+	 * overkill. Nevertheless, the binding could change till we have
+	 * the chance to send. Blame the user, though. */
+	ifindex = atomic_read(&sock->ifindex);
+
+	if (!ifindex)
+	    /* Socket isn't bound or bound to all interfaces. Go out. */
+	    return -ENXIO;
+    } else {
+	/* Socket address given */
+	if (msg->msg_namelen < sizeof(struct sockaddr_can))
+	    return -EINVAL;
+
+	if (rtdm_fd_is_user(fd)) {
+	    /* Copy socket address from userspace */
+	    if (!rtdm_read_user_ok(fd, msg->msg_name,
+				   sizeof(struct sockaddr_can)) ||
+		rtdm_copy_from_user(fd, &scan_buf, msg->msg_name,
+				    sizeof(struct sockaddr_can)))
+		return -EFAULT;
+
+	    scan = &scan_buf;
+	}
+
+	/* Check address family */
+	if (scan->can_family != AF_CAN)
+	    return -EINVAL;
+
+	ifindex = scan->can_ifindex;
+    }
+
+    if (rtdm_fd_is_user(fd)) {
+	/* Copy IO vector from userspace */
+	if (rtdm_fd_get_iovec(fd, &iov_buf, msg, false))
+	    return -EFAULT;
+
+	iov = &iov_buf;
+    }
+
+    /* Check size of buffer */
+    if (iov->iov_len != sizeof(can_frame_t))
+	return -EMSGSIZE;
+
+    frame = (can_frame_t *)iov->iov_base;
+
+    if (rtdm_fd_is_user(fd)) {
+	/* Copy CAN frame from userspace */
+	if (!rtdm_read_user_ok(fd, iov->iov_base,
+			       sizeof(can_frame_t)) ||
+	    rtdm_copy_from_user(fd, &frame_buf, iov->iov_base,
+				sizeof(can_frame_t)))
+	    return -EFAULT;
+
+	frame = &frame_buf;
+    }
+
+    /* Adjust iovec in the common way */
+    iov->iov_base += sizeof(can_frame_t);
+    iov->iov_len -= sizeof(can_frame_t);
+    /* ... and copy it back to userspace if necessary */
+    if (rtdm_fd_is_user(fd)) {
+	if (rtdm_copy_to_user(fd, msg->msg_iov, iov,
+			      sizeof(struct iovec)))
+	    return -EFAULT;
+    }
+
+    /* At last, we've got the frame ... */
+
+    /* Check if DLC between 0 and 15 */
+    if (frame->can_dlc > 15)
+	return -EINVAL;
+
+    /* Check if it is a standard frame and the ID between 0 and 2031 */
+    if (!(frame->can_id & CAN_EFF_FLAG)) {
+	u32 id = frame->can_id & CAN_EFF_MASK;
+	if (id > (CAN_SFF_MASK - 16))
+	    return -EINVAL;
+    }
+
+    if ((dev = rtcan_dev_get_by_index(ifindex)) == NULL)
+	return -ENXIO;
+
+    timeout = (flags & MSG_DONTWAIT) ? RTDM_TIMEOUT_NONE : sock->tx_timeout;
+
+    tx_wait.rt_task = rtdm_task_current();
+
+    /* Register the task at the socket's TX wait queue and decrement
+     * the TX semaphore. This must be atomic. Finally, the task must
+     * be deregistered again (also atomic). */
+    cobalt_atomic_enter(s);
+
+    list_add(&tx_wait.tx_wait_list, &sock->tx_wait_head);
+
+    /* Try to pass the guard in order to access the controller */
+    ret = rtdm_sem_timeddown(&dev->tx_sem, timeout, NULL);
+
+    /* Only dequeue task again if socket isn't being closed i.e. if
+     * this task was not unblocked within the close() function. */
+    if (likely(!list_empty(&tx_wait.tx_wait_list)))
+	/* Dequeue this task from the TX wait queue */
+	list_del_init(&tx_wait.tx_wait_list);
+    else
+	/* The socket was closed. */
+	ret = -EBADF;
+
+    cobalt_atomic_leave(s);
+
+    /* Error code returned? */
+    if (ret != 0) {
+	/* Which error code? */
+	switch (ret) {
+	case -EIDRM:
+	    /* Controller is stopped or bus-off */
+	    ret = -ENETDOWN;
+	    goto send_out1;
+
+	case -EWOULDBLOCK:
+	    /* We would block but don't want to */
+	    ret = -EAGAIN;
+	    goto send_out1;
+
+	default:
+	    /* Return all other error codes unmodified. */
+	    goto send_out1;
+	}
+    }
+
+    /* We got access */
+
+
+    /* Push message onto stack for loopback when TX done */
+    if (rtcan_loopback_enabled(sock))
+	rtcan_tx_push(dev, sock, frame);
+
+    rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx);
+
+    /* Controller should be operating */
+    if (!CAN_STATE_OPERATING(dev->state)) {
+	if (dev->state == CAN_STATE_SLEEPING) {
+	    ret = -ECOMM;
+	    rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+	    rtdm_sem_up(&dev->tx_sem);
+	    goto send_out1;
+	}
+	ret = -ENETDOWN;
+	goto send_out2;
+    }
+
+    ret = dev->hard_start_xmit(dev, frame);
+
+    /* Return number of bytes sent upon successful completion */
+    if (ret == 0)
+	ret = sizeof(can_frame_t);
+
+ send_out2:
+    rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+ send_out1:
+    rtcan_dev_dereference(dev);
+    return ret;
+}
+
+
+static struct rtdm_driver rtcan_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(rtcan,
+						    RTDM_CLASS_CAN,
+						    RTDM_SUBCLASS_GENERIC,
+						    RTCAN_PROFILE_VER),
+	.device_flags		= RTDM_PROTOCOL_DEVICE,
+	.device_count		= 1,
+	.context_size		= sizeof(struct rtcan_socket),
+	.protocol_family	= PF_CAN,
+	.socket_type		= SOCK_RAW,
+	.ops = {
+		.socket		= rtcan_raw_socket,
+		.close		= rtcan_raw_close,
+		.ioctl_nrt	= rtcan_raw_ioctl,
+		.recvmsg_rt	= rtcan_raw_recvmsg,
+		.sendmsg_rt	= rtcan_raw_sendmsg,
+	},
+};
+
+static struct rtdm_device rtcan_device = {
+	.driver = &rtcan_driver,
+	.label = "rtcan",
+};
+
+int __init rtcan_raw_proto_register(void)
+{
+    return rtdm_dev_register(&rtcan_device);
+}
+
+void __exit rtcan_raw_proto_unregister(void)
+{
+    rtdm_dev_unregister(&rtcan_device);
+}
+
+
+EXPORT_SYMBOL_GPL(rtcan_rcv);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h
new file mode 100644
index 0000000..cd1523e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_RAW_H_
+#define __RTCAN_RAW_H_
+
+#ifdef __KERNEL__
+
+int rtcan_raw_ioctl_dev(struct rtdm_fd *fd, int request, void *arg);
+
+int rtcan_raw_check_filter(struct rtcan_socket *sock,
+			   int ifindex, struct rtcan_filter_list *flist);
+int rtcan_raw_add_filter(struct rtcan_socket *sock, int ifindex);
+void rtcan_raw_remove_filter(struct rtcan_socket *sock);
+
+void rtcan_rcv(struct rtcan_device *rtcandev, struct rtcan_skb *skb);
+
+void rtcan_loopback(struct rtcan_device *rtcandev);
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+#define rtcan_loopback_enabled(sock) (sock->loopback)
+#define rtcan_loopback_pending(dev) (dev->tx_socket)
+#else /* !CONFIG_XENO_DRIVERS_CAN_LOOPBACK */
+#define rtcan_loopback_enabled(sock) (0)
+#define rtcan_loopback_pending(dev) (0)
+#endif /* CONFIG_XENO_DRIVERS_CAN_LOOPBACK */
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_BUS_ERR
+void __rtcan_raw_enable_bus_err(struct rtcan_socket *sock);
+static inline void rtcan_raw_enable_bus_err(struct rtcan_socket *sock)
+{
+    if ((sock->err_mask & CAN_ERR_BUSERROR))
+	__rtcan_raw_enable_bus_err(sock);
+}
+#else
+#define rtcan_raw_enable_bus_err(sock)
+#endif
+
+int __init rtcan_raw_proto_register(void);
+void __exit rtcan_raw_proto_unregister(void);
+
+#endif  /* __KERNEL__ */
+
+#endif  /* __RTCAN_RAW_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c
new file mode 100644
index 0000000..d1ff640
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_dev.c
@@ -0,0 +1,455 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger, <wg@grandegger.com>
+ * Copyright (C) 2005 Marc Kleine-Budde, Pengutronix
+ * Copyright (C) 2006 Andrey Volkov, Varma Electronics
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+#include "rtcan_internal.h"
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+
+#define RTCAN_MAX_TSEG1  15
+#define RTCAN_MAX_TSEG2  7
+
+/*
+ * Calculate standard bit-time values for odd bitrates.
+ * Most parts of this code is from Arnaud Westenberg <arnaud@wanadoo.nl>
+ */
+static int rtcan_calc_bit_time(struct rtcan_device *dev,
+			       can_baudrate_t rate,
+			       struct can_bittime_std *bit_time)
+{
+    int best_error = 1000000000;
+    int error;
+    int best_tseg=0, best_brp=0, best_rate=0, brp=0;
+    int tseg=0, tseg1=0, tseg2=0;
+    int clock = dev->can_sys_clock;
+    int sjw = 0;
+    int sampl_pt = 90;
+
+    /* some heuristic specials */
+    if (rate > ((1000000 + 500000) / 2))
+	sampl_pt = 75;
+
+    if (rate < ((12500 + 10000) / 2))
+	sampl_pt = 75;
+
+    if (rate < ((100000 + 125000) / 2))
+	sjw = 1;
+
+    /* tseg even = round down, odd = round up */
+    for (tseg = (0 + 0 + 2) * 2;
+	 tseg <= (RTCAN_MAX_TSEG2 + RTCAN_MAX_TSEG1 + 2) * 2 + 1;
+	 tseg++) {
+	brp = clock / ((1 + tseg / 2) * rate) + tseg % 2;
+	if ((brp == 0) || (brp > 64))
+	    continue;
+
+	error = rate - clock / (brp * (1 + tseg / 2));
+	if (error < 0)
+	    error = -error;
+
+	if (error <= best_error) {
+	    best_error = error;
+	    best_tseg = tseg/2;
+	    best_brp = brp - 1;
+	    best_rate = clock / (brp * (1 + tseg / 2));
+	}
+    }
+
+    if (best_error && (rate / best_error < 10)) {
+	RTCAN_RTDM_DBG("%s: bitrate %d is not possible with %d Hz clock\n",
+		       dev->name, rate, clock);
+	return -EDOM;
+    }
+
+    tseg2 = best_tseg - (sampl_pt * (best_tseg + 1)) / 100;
+
+    if (tseg2 < 0)
+	tseg2 = 0;
+
+    if (tseg2 > RTCAN_MAX_TSEG2)
+	tseg2 = RTCAN_MAX_TSEG2;
+
+    tseg1 = best_tseg - tseg2 - 2;
+
+    if (tseg1 > RTCAN_MAX_TSEG1)  {
+	tseg1 = RTCAN_MAX_TSEG1;
+	tseg2 = best_tseg-tseg1-2;
+    }
+
+    bit_time->brp = best_brp + 1;
+    bit_time->prop_seg = 0;
+    bit_time->phase_seg1 = tseg1 + 1;
+    bit_time->phase_seg2 = tseg2 + 1;
+    bit_time->sjw = sjw + 1;
+    bit_time->sam = 0;
+
+    return 0;
+}
+
+#else /* !CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD */
+
+/* This is the bit-time calculation method from the Linux kernel */
+
+#define CAN_CALC_MAX_ERROR 50 /* in one-tenth of a percent */
+
+static int can_update_spt(const struct can_bittiming_const *btc,
+			  unsigned int sampl_pt, unsigned int tseg,
+			  unsigned int *tseg1, unsigned int *tseg2)
+{
+    *tseg2 = tseg + 1 - (sampl_pt * (tseg + 1)) / 1000;
+    *tseg2 = clamp(*tseg2, btc->tseg2_min, btc->tseg2_max);
+    *tseg1 = tseg - *tseg2;
+    if (*tseg1 > btc->tseg1_max) {
+	*tseg1 = btc->tseg1_max;
+	*tseg2 = tseg - *tseg1;
+    }
+
+    return 1000 * (tseg + 1 - *tseg2) / (tseg + 1);
+}
+
+static int rtcan_calc_bit_time(struct rtcan_device *dev,
+			       can_baudrate_t bitrate,
+			       struct can_bittime_std *bt)
+{
+    const struct can_bittiming_const *btc = dev->bittiming_const;
+    long rate;	/* current bitrate */
+    long rate_error;/* difference between current and target value */
+    long best_rate_error = 1000000000;
+    int spt;	/* current sample point in thousandth */
+    int spt_error;	/* difference between current and target value */
+    int best_spt_error = 1000;
+    int sampl_pt;	/* target sample point */
+    int best_tseg = 0, best_brp = 0;	/* current best values for tseg and brp */
+    unsigned int brp, tsegall, tseg, tseg1, tseg2;
+    u64 v64;
+
+    if (!dev->bittiming_const)
+	return -ENOTSUPP;
+
+    /* Use CIA recommended sample points */
+    if (bitrate > 800000)
+	sampl_pt = 750;
+    else if (bitrate > 500000)
+	sampl_pt = 800;
+    else
+	sampl_pt = 875;
+
+    /* tseg even = round down, odd = round up */
+    for (tseg = (btc->tseg1_max + btc->tseg2_max) * 2 + 1;
+	 tseg >= (btc->tseg1_min + btc->tseg2_min) * 2; tseg--) {
+	tsegall = 1 + tseg / 2;
+
+	/* Compute all possible tseg choices (tseg=tseg1+tseg2) */
+	brp = dev->can_sys_clock / (tsegall * bitrate) + tseg % 2;
+
+	/* chose brp step which is possible in system */
+	brp = (brp / btc->brp_inc) * btc->brp_inc;
+	if ((brp < btc->brp_min) || (brp > btc->brp_max))
+	    continue;
+
+	rate = dev->can_sys_clock / (brp * tsegall);
+	rate_error = abs((long)(bitrate - rate));
+
+	/* tseg brp biterror */
+	if (rate_error > best_rate_error)
+	    continue;
+
+	/* reset sample point error if we have a better bitrate */
+	if (rate_error < best_rate_error)
+	    best_spt_error = 1000;
+
+	spt = can_update_spt(btc, sampl_pt, tseg / 2, &tseg1, &tseg2);
+	spt_error = abs((long)(sampl_pt - spt));
+	if (spt_error > best_spt_error)
+	    continue;
+
+	best_spt_error = spt_error;
+	best_rate_error = rate_error;
+	best_tseg = tseg / 2;
+	best_brp = brp;
+
+	if (rate_error == 0 && spt_error == 0)
+	    break;
+    }
+
+    if (best_rate_error) {
+	/* Error in one-tenth of a percent */
+	rate_error = (best_rate_error * 1000) / bitrate;
+	if (rate_error > CAN_CALC_MAX_ERROR) {
+	    rtcandev_err(dev,
+			 "bitrate error %ld.%ld%% too high\n",
+			 rate_error / 10, rate_error % 10);
+	    return -EDOM;
+	} else {
+	    rtcandev_warn(dev, "bitrate error %ld.%ld%%\n",
+			  rate_error / 10, rate_error % 10);
+	}
+    }
+
+    /* real sample point */
+    sampl_pt = can_update_spt(btc, sampl_pt, best_tseg, &tseg1, &tseg2);
+
+    v64 = (u64)best_brp * 1000000000UL;
+    do_div(v64, dev->can_sys_clock);
+    bt->prop_seg = tseg1 / 2;
+    bt->phase_seg1 = tseg1 - bt->prop_seg;
+    bt->phase_seg2 = tseg2;
+    bt->sjw = 1;
+    bt->sam = 0;
+    bt->brp = best_brp;
+
+    /* real bit-rate */
+    rate = dev->can_sys_clock / (bt->brp * (tseg1 + tseg2 + 1));
+
+    rtcandev_dbg(dev, "real bitrate %ld, sampling point %d.%d%%\n",
+		 rate, sampl_pt/10, sampl_pt%10);
+
+    return 0;
+}
+
+#endif /* CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD */
+
+static inline int rtcan_raw_ioctl_dev_get(struct rtcan_device *dev,
+					  int request, struct can_ifreq *ifr)
+{
+    rtdm_lockctx_t lock_ctx;
+
+    switch (request) {
+
+    case SIOCGIFINDEX:
+	ifr->ifr_ifindex = dev->ifindex;
+	break;
+
+    case SIOCGCANSTATE:
+	rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx);
+	if (dev->do_get_state)
+	    dev->state = dev->do_get_state(dev);
+	ifr->ifr_ifru.state = dev->state;
+	rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+	break;
+
+    case SIOCGCANCTRLMODE:
+	ifr->ifr_ifru.ctrlmode = dev->ctrl_mode;
+	break;
+
+    case SIOCGCANBAUDRATE:
+	ifr->ifr_ifru.baudrate = dev->baudrate;
+	break;
+
+    case SIOCGCANCUSTOMBITTIME:
+	ifr->ifr_ifru.bittime = dev->bit_time;
+	break;
+    }
+
+    return 0;
+}
+
+static inline int rtcan_raw_ioctl_dev_set(struct rtcan_device *dev,
+					  int request, struct can_ifreq *ifr)
+{
+    rtdm_lockctx_t lock_ctx;
+    int ret = 0, started = 0;
+    struct can_bittime bit_time, *bt;
+
+    switch (request) {
+    case SIOCSCANBAUDRATE:
+	if (!dev->do_set_bit_time)
+	    return 0;
+	ret = rtcan_calc_bit_time(dev, ifr->ifr_ifru.baudrate, &bit_time.std);
+	if (ret)
+	    break;
+	bit_time.type = CAN_BITTIME_STD;
+	break;
+    }
+
+    rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx);
+
+    if (dev->do_get_state)
+	dev->state = dev->do_get_state(dev);
+
+    switch (request) {
+    case SIOCSCANCTRLMODE:
+    case SIOCSCANBAUDRATE:
+    case SIOCSCANCUSTOMBITTIME:
+	if ((started = CAN_STATE_OPERATING(dev->state))) {
+	    if ((ret = dev->do_set_mode(dev, CAN_MODE_STOP, &lock_ctx)))
+		goto out;
+	}
+	break;
+    }
+
+    switch (request) {
+    case SIOCSCANMODE:
+	if (dev->do_set_mode &&
+	    !(ifr->ifr_ifru.mode == CAN_MODE_START &&
+	      CAN_STATE_OPERATING(dev->state)))
+	    ret = dev->do_set_mode(dev, ifr->ifr_ifru.mode, &lock_ctx);
+	break;
+
+    case SIOCSCANCTRLMODE:
+	dev->ctrl_mode = ifr->ifr_ifru.ctrlmode;
+	break;
+
+    case SIOCSCANBAUDRATE:
+	ret = dev->do_set_bit_time(dev, &bit_time, &lock_ctx);
+	if (!ret) {
+	    dev->baudrate = ifr->ifr_ifru.baudrate;
+	    dev->bit_time = bit_time;
+	}
+	break;
+
+    case SIOCSCANCUSTOMBITTIME:
+	bt = &ifr->ifr_ifru.bittime;
+	ret = dev->do_set_bit_time(dev, bt, &lock_ctx);
+	if (!ret) {
+	    dev->bit_time = *bt;
+	    if (bt->type == CAN_BITTIME_STD && bt->std.brp)
+		dev->baudrate = (dev->can_sys_clock /
+				 (bt->std.brp * (1 + bt->std.prop_seg +
+						 bt->std.phase_seg1 +
+						 bt->std.phase_seg2)));
+	    else
+		dev->baudrate = CAN_BAUDRATE_UNKNOWN;
+	}
+	break;
+
+    default:
+	ret = -EOPNOTSUPP;
+	break;
+    }
+
+ out:
+    if (started)
+	dev->do_set_mode(dev, CAN_MODE_START, &lock_ctx);
+
+    rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+
+    return ret;
+}
+
+int rtcan_raw_ioctl_dev(struct rtdm_fd *fd, int request, void *arg)
+{
+    struct can_ifreq *ifr;
+    int ret = 0, get = 0;
+    union {
+	    /*
+	     * We need to deal with callers still passing struct ifreq
+	     * instead of can_ifreq, which might have a larger memory
+	     * footprint (but can't be smaller though). Field offsets
+	     * will be the same regardless.
+	     */
+	    struct ifreq ifr_legacy;
+	    struct can_ifreq ifr_can;
+    } ifr_buf;
+    struct rtcan_device *dev;
+
+    switch (request) {
+
+    case SIOCGIFINDEX:
+    case SIOCGCANSTATE:
+    case SIOCGCANBAUDRATE:
+    case SIOCGCANCUSTOMBITTIME:
+	    get = 1;
+	    fallthrough;
+    case SIOCSCANMODE:
+    case SIOCSCANCTRLMODE:
+    case SIOCSCANBAUDRATE:
+    case SIOCSCANCUSTOMBITTIME:
+
+	if (rtdm_fd_is_user(fd)) {
+	    /* Copy struct can_ifreq from userspace */
+	    if (!rtdm_read_user_ok(fd, arg,
+				   sizeof(struct can_ifreq)) ||
+		rtdm_copy_from_user(fd, &ifr_buf, arg,
+				    sizeof(struct can_ifreq)))
+		return -EFAULT;
+
+	    ifr = &ifr_buf.ifr_can;
+	} else
+	    ifr = (struct can_ifreq *)arg;
+
+	/* Get interface index and data */
+	dev = rtcan_dev_get_by_name(ifr->ifr_name);
+	if (dev == NULL)
+	    return -ENODEV;
+
+	if (get) {
+		ret = rtcan_raw_ioctl_dev_get(dev, request, ifr);
+		rtcan_dev_dereference(dev);
+		if (ret == 0 && rtdm_fd_is_user(fd)) {
+		    /*
+		     * Since we yet tested if user memory is rw safe,
+		     * we can copy to user space directly.
+		     */
+		    if (rtdm_copy_to_user(fd, arg, ifr,
+					  sizeof(struct can_ifreq)))
+			    return -EFAULT;
+		}
+	} else {
+		ret = rtcan_raw_ioctl_dev_set(dev, request, ifr);
+		rtcan_dev_dereference(dev);
+	}
+	break;
+
+    default:
+	ret = -EOPNOTSUPP;
+	break;
+
+    }
+
+    return ret;
+}
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_BUS_ERR
+void __rtcan_raw_enable_bus_err(struct rtcan_socket *sock)
+{
+    int i, begin, end;
+    struct rtcan_device *dev;
+    rtdm_lockctx_t lock_ctx;
+    int ifindex = atomic_read(&sock->ifindex);
+
+    if (ifindex) {
+	begin = ifindex;
+	end   = ifindex;
+    } else {
+	begin = 1;
+	end = RTCAN_MAX_DEVICES;
+    }
+
+    for (i = begin; i <= end; i++) {
+	if ((dev = rtcan_dev_get_by_index(i)) == NULL)
+	    continue;
+
+	if (dev->do_enable_bus_err) {
+	    rtdm_lock_get_irqsave(&dev->device_lock, lock_ctx);
+	    dev->do_enable_bus_err(dev);
+	    rtdm_lock_put_irqrestore(&dev->device_lock, lock_ctx);
+	}
+	rtcan_dev_dereference(dev);
+    }
+}
+#endif /* CONFIG_XENO_DRIVERS_CAN_BUS_ERR*/
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c
new file mode 100644
index 0000000..e121061
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_raw_filter.c
@@ -0,0 +1,256 @@
+/*
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; eitherer version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include "rtcan_internal.h"
+#include "rtcan_socket.h"
+#include "rtcan_list.h"
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+
+
+#if 0
+void rtcan_raw_print_filter(struct rtcan_device *dev)
+{
+    int i;
+    struct rtcan_recv *r = dev->receivers;
+
+    rtdm_printk("%s: recv_list=%p empty_list=%p free_entries=%d\n",
+		dev->name, dev->recv_list, dev->empty_list, dev->free_entries);
+    for (i = 0; i < RTCAN_MAX_RECEIVERS; i++, r++) {
+	rtdm_printk("%2d %p sock=%p next=%p id=%x mask=%x\n",
+		    i, r, r->sock, r->next,
+		    r->can_filter.can_id, r->can_filter.can_mask);
+    }
+}
+#else
+#define rtcan_raw_print_filter(dev)
+#endif
+
+
+static inline void rtcan_raw_mount_filter(can_filter_t *recv_filter,
+					  can_filter_t *filter)
+{
+    if (filter->can_id & CAN_INV_FILTER) {
+	recv_filter->can_id = filter->can_id & ~CAN_INV_FILTER;
+	recv_filter->can_mask = filter->can_mask | CAN_INV_FILTER;
+    } else {
+	recv_filter->can_id = filter->can_id;
+	recv_filter->can_mask = filter->can_mask & ~CAN_INV_FILTER;
+    }
+
+    /* Apply mask for fast filter check */
+    recv_filter->can_id &= recv_filter->can_mask;
+}
+
+
+int rtcan_raw_check_filter(struct rtcan_socket *sock, int ifindex,
+			   struct rtcan_filter_list *flist)
+{
+    int old_ifindex = 0, old_flistlen_all = 0;
+    int free_entries, i, begin, end;
+    struct rtcan_device *dev;
+    int flistlen;
+
+    if (rtcan_flist_no_filter(flist))
+	return 0;
+
+    /* Check if filter list has been defined by user */
+    flistlen = (flist) ? flist->flistlen : 1;
+
+    /* Now we check if a reception list would overflow. This takes some
+     * preparation, so let's go ... */
+
+    /* Check current bind status */
+    if (rtcan_sock_has_filter(sock)) {
+	/* Socket is bound */
+	i = atomic_read(&sock->ifindex);
+
+	if (i == 0)
+	    /* Socket was bound to ALL interfaces */
+	    old_flistlen_all = sock->flistlen;
+	else    /* Socket was bound to only one interface */
+	    old_ifindex = i;
+    }
+
+    if (ifindex) {
+	/* We bind the socket to only one interface. */
+	begin = ifindex;
+	end   = ifindex;
+    } else {
+	/* Socket must be bound to all interfaces. */
+	begin = 1;
+	end = RTCAN_MAX_DEVICES;
+    }
+
+    /* Check if there is space for the new binding */
+    for (i = begin; i <= end; i++) {
+	if ((dev = rtcan_dev_get_by_index(i)) == NULL)
+	    continue;
+	free_entries = dev->free_entries + old_flistlen_all;
+	rtcan_dev_dereference(dev);
+	if (i == old_ifindex)
+	    free_entries += sock->flistlen;
+	/* Compare free list space to new filter list length */
+	if (free_entries < flistlen)
+	    return -ENOSPC;
+    }
+
+    return 0;
+}
+
+
+int rtcan_raw_add_filter(struct rtcan_socket *sock, int ifindex)
+{
+    int i, j, begin, end;
+    struct rtcan_recv *first, *last;
+    struct rtcan_device *dev;
+    /* Check if filter list has been defined by user */
+    int flistlen;
+
+    if (rtcan_flist_no_filter(sock->flist)) {
+	return 0;
+    }
+
+    flistlen = (sock->flist) ? sock->flist->flistlen : 0;
+
+    if (ifindex) {
+	/* We bind the socket to only one interface. */
+	begin = ifindex;
+	end   = ifindex;
+    } else {
+	/* Socket must be bound to all interfaces. */
+	begin = 1;
+	end = RTCAN_MAX_DEVICES;
+    }
+
+    for (i = begin; i <= end; i++) {
+	if ((dev = rtcan_dev_get_by_index(i)) == NULL)
+	    continue;
+
+	/* Take first entry of empty list */
+	first = last = dev->empty_list;
+	/* Check if filter list is empty */
+	if (flistlen) {
+	    /* Filter list is not empty */
+	    /* Register first filter */
+	    rtcan_raw_mount_filter(&last->can_filter,
+				   &sock->flist->flist[0]);
+	    last->match_count = 0;
+	    last->sock = sock;
+	    for (j = 1; j < flistlen; j++) {
+		/* Register remaining filters */
+		last = last->next;
+		rtcan_raw_mount_filter(&last->can_filter,
+				       &sock->flist->flist[j]);
+		last->sock = sock;
+		last->match_count = 0;
+	    }
+	    /* Decrease free entries counter by length of filter list */
+	    dev->free_entries -= flistlen;
+
+	} else {
+	    /* Filter list is empty. Socket must be bound to all CAN IDs. */
+	    /* Fill list entry members */
+	    last->can_filter.can_id = last->can_filter.can_mask = 0;
+	    last->sock = sock;
+	    last->match_count = 0;
+	    /* Decrease free entries counter by 1
+	     * (one filter for all CAN frames) */
+	    dev->free_entries--;
+	}
+
+	/* Set new empty list header */
+	dev->empty_list = last->next;
+	/* Add new partial recv list to the head of reception list */
+	last->next = dev->recv_list;
+	/* Adjust rececption list pointer */
+	dev->recv_list = first;
+
+	rtcan_raw_print_filter(dev);
+	rtcan_dev_dereference(dev);
+    }
+
+    return (flistlen) ? flistlen : 1;
+}
+
+
+void rtcan_raw_remove_filter(struct rtcan_socket *sock)
+{
+    int i, j, begin, end;
+    struct rtcan_recv *first, *next, *last;
+    int ifindex = atomic_read(&sock->ifindex);
+    struct rtcan_device *dev;
+
+    if (!rtcan_sock_has_filter(sock)) /* nothing to do */
+	return;
+
+    if (ifindex) {
+	/* Socket was bound to one interface only. */
+	begin = ifindex;
+	end   = ifindex;
+    } else {
+	/* Socket was bound to all interfaces */
+	begin = 1;
+	end = RTCAN_MAX_DEVICES;
+    }
+
+    for (i = begin; i <= end; i++) {
+
+	if ((dev = rtcan_dev_get_by_index(i)) == NULL)
+	    continue;
+
+	/* Search for first list entry pointing to this socket */
+	first = NULL;
+	next = dev->recv_list;
+	while (next->sock != sock) {
+	    first = next;
+	    next = first->next;
+	}
+
+	/* Now go to the end of the old filter list */
+	last = next;
+	for (j = 1; j < sock->flistlen; j++)
+	    last = last->next;
+
+	/* Detach found first list entry from reception list */
+	if (first)
+	    first->next = last->next;
+	else
+	    dev->recv_list = last->next;
+	/* Add partial list to the head of empty list */
+	last->next = dev->empty_list;
+	/* Adjust empty list pointer */
+	dev->empty_list = next;
+
+	/* Increase free entries counter by length of old filter list */
+	dev->free_entries += sock->flistlen;
+
+	rtcan_raw_print_filter(dev);
+	rtcan_dev_dereference(dev);
+    }
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c
new file mode 100644
index 0000000..edd4619
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2005,2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * Based on stack/socket.c - sockets implementation for RTnet
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include "rtcan_socket.h"
+#include "rtcan_list.h"
+
+
+LIST_HEAD(rtcan_socket_list);
+
+void rtcan_socket_init(struct rtdm_fd *fd)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    rtdm_lockctx_t lock_ctx;
+
+
+    rtdm_sem_init(&sock->recv_sem, 0);
+
+    sock->recv_head = 0;
+    sock->recv_tail = 0;
+    atomic_set(&sock->ifindex, 0);
+    sock->flistlen = RTCAN_SOCK_UNBOUND;
+    sock->flist = NULL;
+    sock->err_mask = 0;
+    sock->rx_buf_full = 0;
+    sock->flags = 0;
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+    sock->loopback = 1;
+#endif
+
+    sock->tx_timeout = RTDM_TIMEOUT_INFINITE;
+    sock->rx_timeout = RTDM_TIMEOUT_INFINITE;
+
+    INIT_LIST_HEAD(&sock->tx_wait_head);
+
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+    list_add(&sock->socket_list, &rtcan_socket_list);
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+}
+
+
+void rtcan_socket_cleanup(struct rtdm_fd *fd)
+{
+    struct rtcan_socket *sock = rtdm_fd_to_private(fd);
+    struct tx_wait_queue *tx_waiting;
+    rtdm_lockctx_t lock_ctx;
+    int tx_list_empty;
+
+    /* Wake up sleeping senders. This is re-entrant-safe. */
+    do {
+	cobalt_atomic_enter(lock_ctx);
+	/* Is someone there? */
+	if (list_empty(&sock->tx_wait_head))
+		tx_list_empty = 1;
+	else {
+		tx_list_empty = 0;
+
+		/* Get next entry pointing to a waiting task */
+		tx_waiting = list_entry(sock->tx_wait_head.next,
+					struct tx_wait_queue, tx_wait_list);
+
+		/* Remove it from list */
+		list_del_init(&tx_waiting->tx_wait_list);
+
+		/* Wake task up (atomic section is left implicitly) */
+		rtdm_task_unblock(tx_waiting->rt_task);
+	}
+	cobalt_atomic_leave(lock_ctx);
+    } while (!tx_list_empty);
+
+    rtdm_sem_destroy(&sock->recv_sem);
+
+    rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+    if (sock->socket_list.next) {
+	list_del(&sock->socket_list);
+	sock->socket_list.next = NULL;
+    }
+    rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h
new file mode 100644
index 0000000..cf4422a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_socket.h
@@ -0,0 +1,207 @@
+/*
+ * Copyright (C) 2005,2006 Sebastian Smolorz
+ *                         <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * Derived from RTnet project file include/stack/socket.h:
+ *
+ * Copyright (C) 1999       Lineo, Inc
+ *               1999, 2002 David A. Schleef <ds@schleef.org>
+ *               2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_SOCKET_H_
+#define __RTCAN_SOCKET_H_
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+
+
+
+/* This MUST BE 2^N */
+#define RTCAN_RXBUF_SIZE          CONFIG_XENO_DRIVERS_CAN_RXBUF_SIZE
+
+/* Size of timestamp */
+#define RTCAN_TIMESTAMP_SIZE      sizeof(nanosecs_abs_t)
+
+/* Bit in the can_dlc member of struct ring_buffer_frame used to indicate
+ * whether a frame has got a timestamp or not */
+#define RTCAN_HAS_TIMESTAMP       0x80
+
+/* Mask for clearing bit RTCAN_HAS_TIMESTAMP */
+#define RTCAN_HAS_NO_TIMESTAMP    0x7F
+
+#define RTCAN_SOCK_UNBOUND        -1
+#define RTCAN_FLIST_NO_FILTER     (struct rtcan_filter_list *)-1
+#define rtcan_flist_no_filter(f)  ((f) == RTCAN_FLIST_NO_FILTER)
+#define rtcan_sock_has_filter(s)  ((s)->flistlen > 0)
+#define rtcan_sock_is_bound(s)    ((s)->flistlen >= 0)
+
+/*
+ *  Internal frame representation within the ring buffer of a
+ *  struct rtcan_socket.
+ *
+ *  The data array is of arbitrary size when the frame is actually
+ *  stored in a socket's ring buffer. The timestamp member exists if the
+ *  socket was set to take timestamps (then it follows direcly after the
+ *  arbitrary-sized data array), otherwise it does not exist.
+ */
+struct rtcan_rb_frame {
+
+    /* CAN ID representation equal to struct can_frame */
+    uint32_t            can_id;
+
+    /* Interface index from which the frame originates */
+    unsigned char       can_ifindex;
+
+    /* DLC (between 0 and 15) and mark if frame has got a timestamp. The
+     * existence of a timestamp is indicated by the RTCAN_HAS_TIMESTAMP
+     * bit. */
+    unsigned char       can_dlc;
+
+    /* Data bytes */
+    uint8_t             data[8];
+
+    /* High precision timestamp indicating when the frame was received.
+     * Exists when RTCAN_HAS_TIMESTAMP bit in can_dlc is set. */
+    nanosecs_abs_t      timestamp;
+
+} __attribute__ ((packed));
+
+
+/* Size of struct rtcan_rb_frame without any data bytes and timestamp */
+#define EMPTY_RB_FRAME_SIZE \
+    sizeof(struct rtcan_rb_frame) - 8 - RTCAN_TIMESTAMP_SIZE
+
+
+/*
+ *  Wrapper structure around a struct rtcan_rb_frame with actual size
+ *  of the frame.
+ *
+ *  This isn't really a socket buffer but only a sort of. It is constructed
+ *  within the interrupt routine when a CAN frame is read from
+ *  the controller. Then it's passed to the reception handler where only
+ *  rb_frame finds its way to the sockets' ring buffers.
+ */
+struct rtcan_skb {
+    /* Actual size of following rb_frame (without timestamp) */
+    size_t                rb_frame_size;
+    /* Frame to be stored in the sockets' ring buffers (as is) */
+    struct rtcan_rb_frame rb_frame;
+};
+
+struct rtcan_filter_list {
+    int flistlen;
+    struct can_filter flist[1];
+};
+
+/*
+ * Internal CAN socket structure.
+ *
+ * Every socket has an internal ring buffer for incoming messages. A message
+ * is not stored as a struct can_frame (in order to save buffer space)
+ * but as struct rtcan_rb_frame of arbitrary length depending on the
+ * actual payload.
+ */
+struct rtcan_socket {
+
+    struct list_head    socket_list;
+
+    unsigned long	flags;
+
+    /* Transmission timeout in ns. Protected by rtcan_socket_lock
+     * in all socket structures. */
+    nanosecs_rel_t      tx_timeout;
+
+    /* Reception timeout in ns. Protected by rtcan_socket_lock
+     * in all socket structures. */
+    nanosecs_rel_t      rx_timeout;
+
+
+    /* Begin of first frame data in the ring buffer. Protected by
+     * rtcan_socket_lock in all socket structures. */
+    int                 recv_head;
+
+    /* End of last frame data in the ring buffer. I.e. position of first
+     * free byte in the ring buffer. Protected by
+     * rtcan_socket_lock in all socket structures. */
+    int                 recv_tail;
+
+    /* Ring buffer for incoming CAN frames. Protected by
+     * rtcan_socket_lock in all socket structures. */
+    unsigned char       recv_buf[RTCAN_RXBUF_SIZE];
+
+    /* Semaphore for receivers and incoming messages */
+    rtdm_sem_t          recv_sem;
+
+
+    /* All senders waiting to be able to send
+     * via this socket are queued here */
+    struct list_head    tx_wait_head;
+
+
+    /* Interface index the socket is bound to. Protected by
+     * rtcan_recv_list_lock in all socket structures. */
+    atomic_t            ifindex;
+
+    /* Length of filter list. I.e. how many entries does this socket occupy in
+     * the reception list. 0 if unbound. Protected by
+     * rtcan_recv_list_lock in all socket structures. */
+    int                 flistlen;
+
+    uint32_t            err_mask;
+
+    uint32_t            rx_buf_full;
+
+    struct rtcan_filter_list *flist;
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_LOOPBACK
+    int loopback;
+#endif
+};
+
+
+
+/*
+ *  Get the RTDM context from a struct rtcan_socket
+ *
+ *  @param[in] sock Pointer to socket structure
+ *
+ *  @return Pointer to a file descriptor of type struct rtdm_fd this socket
+ *          belongs to
+ */
+/* FIXME: to be replaced with container_of */
+static inline struct rtdm_fd *rtcan_socket_to_fd(struct rtcan_socket *sock)
+{
+    return rtdm_private_to_fd(sock);
+}
+
+/* Spinlock protecting the ring buffers and the timeouts of all
+ * rtcan_sockets */
+extern rtdm_lock_t rtcan_socket_lock;
+extern struct list_head rtcan_socket_list;
+
+extern void rtcan_socket_init(struct rtdm_fd *fd);
+extern void rtcan_socket_cleanup(struct rtdm_fd *fd);
+
+
+#endif  /* __RTCAN_SOCKET_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h
new file mode 100644
index 0000000..c2ced56
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_version.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __RTCAN_VERSION_H_
+#define __RTCAN_VERSION_H_
+
+#define RTCAN_MAJOR_VER    0
+#define RTCAN_MINOR_VER   90
+#define RTCAN_BUGFIX_VER   2
+
+#endif /* __RTCAN_VERSION_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c
new file mode 100644
index 0000000..28e06a9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/rtcan_virt.c
@@ -0,0 +1,199 @@
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#include <linux/module.h>
+#include <rtdm/driver.h>
+#include <rtdm/can.h>
+#include "rtcan_dev.h"
+#include "rtcan_raw.h"
+
+#define RTCAN_DEV_NAME          "rtcan%d"
+#define RTCAN_DRV_NAME          "VIRT"
+#define RTCAN_MAX_VIRT_DEVS     8
+
+#define VIRT_TX_BUFS            1
+
+static char *virt_ctlr_name  = "<virtual>";
+static char *virt_board_name = "<virtual>";
+
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_DESCRIPTION("Virtual RT-Socket-CAN driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int devices = 2;
+
+module_param(devices, uint, 0400);
+MODULE_PARM_DESC(devices, "Number of devices on the virtual bus");
+
+static struct rtcan_device *rtcan_virt_devs[RTCAN_MAX_VIRT_DEVS];
+
+
+static int rtcan_virt_start_xmit(struct rtcan_device *tx_dev,
+				 can_frame_t *tx_frame)
+{
+	int i;
+	struct rtcan_device *rx_dev;
+	struct rtcan_skb skb;
+	struct rtcan_rb_frame *rx_frame = &skb.rb_frame;
+	rtdm_lockctx_t lock_ctx;
+
+	/* we can transmit immediately again */
+	rtdm_sem_up(&tx_dev->tx_sem);
+	tx_dev->tx_count++;
+
+	skb.rb_frame_size = EMPTY_RB_FRAME_SIZE;
+
+	rx_frame->can_dlc = tx_frame->can_dlc;
+	rx_frame->can_id  = tx_frame->can_id;
+
+	if (!(tx_frame->can_id & CAN_RTR_FLAG)) {
+		memcpy(rx_frame->data, tx_frame->data, tx_frame->can_dlc);
+		skb.rb_frame_size += tx_frame->can_dlc;
+	}
+
+	rtdm_lock_get_irqsave(&rtcan_recv_list_lock, lock_ctx);
+	rtdm_lock_get(&rtcan_socket_lock);
+
+
+	/* Deliver to all other devices on the virtual bus */
+	for (i = 0; i < devices; i++) {
+		rx_dev = rtcan_virt_devs[i];
+		if (rx_dev->state == CAN_STATE_ACTIVE) {
+			if (tx_dev != rx_dev) {
+				rx_frame->can_ifindex = rx_dev->ifindex;
+				rtcan_rcv(rx_dev, &skb);
+			} else if (rtcan_loopback_pending(tx_dev))
+				rtcan_loopback(tx_dev);
+		}
+	}
+	rtdm_lock_put(&rtcan_socket_lock);
+	rtdm_lock_put_irqrestore(&rtcan_recv_list_lock, lock_ctx);
+
+	return 0;
+}
+
+
+static int rtcan_virt_set_mode(struct rtcan_device *dev, can_mode_t mode,
+			       rtdm_lockctx_t *lock_ctx)
+{
+	int err = 0;
+
+	switch (mode) {
+	case CAN_MODE_STOP:
+		dev->state = CAN_STATE_STOPPED;
+		/* Wake up waiting senders */
+		rtdm_sem_destroy(&dev->tx_sem);
+		break;
+
+	case CAN_MODE_START:
+		rtdm_sem_init(&dev->tx_sem, VIRT_TX_BUFS);
+		dev->state = CAN_STATE_ACTIVE;
+		break;
+
+	default:
+		err = -EOPNOTSUPP;
+	}
+
+	return err;
+}
+
+
+static int __init rtcan_virt_init_one(int idx)
+{
+	struct rtcan_device *dev;
+	int err;
+
+	if ((dev = rtcan_dev_alloc(0, 0)) == NULL)
+		return -ENOMEM;
+
+	dev->ctrl_name = virt_ctlr_name;
+	dev->board_name = virt_board_name;
+
+	rtcan_virt_set_mode(dev, CAN_MODE_STOP, NULL);
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	dev->hard_start_xmit = rtcan_virt_start_xmit;
+	dev->do_set_mode = rtcan_virt_set_mode;
+
+	/* Register RTDM device */
+	err = rtcan_dev_register(dev);
+	if (err) {
+	    printk(KERN_ERR "ERROR %d while trying to register RTCAN device!\n", err);
+		goto error_out;
+	}
+
+	/* Remember initialized devices */
+	rtcan_virt_devs[idx] = dev;
+
+	printk("%s: %s driver loaded\n", dev->name, RTCAN_DRV_NAME);
+
+	return 0;
+
+ error_out:
+	rtcan_dev_free(dev);
+	return err;
+}
+
+
+/** Init module */
+static int __init rtcan_virt_init(void)
+{
+	int i, err = 0;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (i = 0; i < devices; i++) {
+		err = rtcan_virt_init_one(i);
+		if (err) {
+			while (--i >= 0) {
+				struct rtcan_device *dev = rtcan_virt_devs[i];
+
+				rtcan_dev_unregister(dev);
+				rtcan_dev_free(dev);
+			}
+			break;
+		}
+	}
+
+	return err;
+}
+
+
+/** Cleanup module */
+static void __exit rtcan_virt_exit(void)
+{
+	int i;
+	struct rtcan_device *dev;
+
+	for (i = 0; i < devices; i++) {
+		dev = rtcan_virt_devs[i];
+
+		printk("Unloading %s device %s\n", RTCAN_DRV_NAME, dev->name);
+
+		rtcan_virt_set_mode(dev, CAN_MODE_STOP, NULL);
+		rtcan_dev_unregister(dev);
+		rtcan_dev_free(dev);
+	}
+}
+
+module_init(rtcan_virt_init);
+module_exit(rtcan_virt_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig
new file mode 100644
index 0000000..9fab4a4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Kconfig
@@ -0,0 +1,100 @@
+config XENO_DRIVERS_CAN_SJA1000
+	depends on XENO_DRIVERS_CAN
+	tristate "Philips SJA1000 CAN controller"
+	select XENO_DRIVERS_CAN_BUS_ERR
+
+config XENO_DRIVERS_CAN_SJA1000_ISA
+	depends on XENO_DRIVERS_CAN_SJA1000
+	tristate "Standard ISA controllers"
+	help
+
+	This driver is for CAN devices connected to the ISA bus of a PC
+	or a PC/104 system. The I/O port, interrupt number and a few other
+	hardware specific parameters can be defined via module parameters.
+
+config XENO_DRIVERS_CAN_SJA1000_MEM
+	depends on XENO_DRIVERS_CAN_SJA1000
+	tristate "Memory mapped controllers"
+	help
+
+	This driver is for memory mapped CAN devices. The memory address,
+	interrupt number and a few other hardware specific parameters can
+	be defined via module parameters.
+
+config XENO_DRIVERS_CAN_SJA1000_PEAK_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "PEAK PCI Card"
+	help
+
+	This driver is for the PCAN PCI, the PC-PCI CAN plug-in card (1 or
+	2 channel) from PEAK Systems (http://www.peak-system.com). To get
+	the second channel working, Xenomai's shared interrupt support
+	must be enabled.
+
+config XENO_DRIVERS_CAN_SJA1000_IXXAT_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "IXXAT PCI Card"
+	help
+
+	This driver is for the IXXAT PC-I 04/PCI card (1 or 2 channel)
+	from the IXXAT Automation GmbH (http://www.ixxat.de). To get
+	the second channel working, Xenomai's shared interrupt support
+	must be enabled.
+
+config XENO_DRIVERS_CAN_SJA1000_ADV_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "ADVANTECH PCI Cards"
+	help
+
+	This driver is for the ADVANTECH PCI cards (1 or more channels)
+	It supports the 1680U and some other ones.
+
+
+config XENO_DRIVERS_CAN_SJA1000_PLX_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "PLX90xx PCI-bridge based Cards"
+	help
+
+	This driver is for CAN interface cards based on
+	the PLX90xx PCI bridge.
+	Driver supports now:
+	 - Adlink PCI-7841/cPCI-7841 card (http://www.adlinktech.com/)
+	 - Adlink PCI-7841/cPCI-7841 SE card
+	 - esd CAN-PCI/CPCI/PCI104/200 (http://www.esd.eu/)
+	 - esd CAN-PCI/PMC/266
+	 - esd CAN-PCIe/2000
+	 - Marathon CAN-bus-PCI card (http://www.marathon.ru/)
+	 - TEWS TECHNOLOGIES TPMC810 card (http://www.tews.com/)
+
+config XENO_DRIVERS_CAN_SJA1000_EMS_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "EMS CPC PCI Card"
+	help
+
+	This driver is for the 2 channel CPC PCI card from EMS Dr. Thomas
+	Wünsche (http://www.ems-wuensche.de). To get the second channel
+	working, Xenomai's shared interrupt support must be enabled.
+
+config XENO_DRIVERS_CAN_SJA1000_ESD_PCI
+	depends on XENO_DRIVERS_CAN_SJA1000 && PCI
+	tristate "ESD PCI Cards (DEPRECATED)"
+	help
+
+	This driver supports the esd PCI CAN cards CAN-PCI/200,
+	CAN-PCI/266, CAN-PMC/266 (PMC), CAN-CPCI/200 (CompactPCI),
+	CAN-PCIe2000 (PCI Express) and CAN-PCI104/200 (PCI104)
+	from the esd electronic system design gmbh (http://www.esd.eu).
+
+	This driver is deprecated. It's functionality is now provided by
+	"PLX90xx PCI-bridge based Cards" driver.
+
+config XENO_DRIVERS_CAN_SJA1000_PEAK_DNG
+	depends on XENO_DRIVERS_CAN_SJA1000 && !PARPORT
+	tristate "PEAK Parallel Port Dongle"
+	help
+
+	This driver is for the PCAN Dongle, the PC parallel port to CAN
+	converter from PEAK Systems (http://www.peak-system.com). You need
+	to disable parallel port support in the kernel (CONFIG_PARPORT) for
+	proper operation. The interface type (sp or epp), I/O port and
+	interrupt number should be defined via module parameters.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile
new file mode 100644
index 0000000..ff67155
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/Makefile
@@ -0,0 +1,24 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/can -I$(srctree)/drivers/xenomai/can/sja1000
+
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000) += xeno_can_sja1000.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_PEAK_PCI) += xeno_can_peak_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_PEAK_DNG) += xeno_can_peak_dng.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_PLX_PCI) += xeno_can_plx_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_IXXAT_PCI) += xeno_can_ixxat_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_ADV_PCI) += xeno_can_adv_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_EMS_PCI) += xeno_can_ems_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_ESD_PCI) += xeno_can_esd_pci.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_ISA) += xeno_can_isa.o
+obj-$(CONFIG_XENO_DRIVERS_CAN_SJA1000_MEM) += xeno_can_mem.o
+
+xeno_can_sja1000-y := rtcan_sja1000.o
+xeno_can_sja1000-$(CONFIG_FS_PROCFS) += rtcan_sja1000_proc.o
+xeno_can_peak_pci-y := rtcan_peak_pci.o
+xeno_can_peak_dng-y := rtcan_peak_dng.o
+xeno_can_plx_pci-y := rtcan_plx_pci.o
+xeno_can_ixxat_pci-y := rtcan_ixxat_pci.o
+xeno_can_adv_pci-y := rtcan_adv_pci.o
+xeno_can_ems_pci-y := rtcan_ems_pci.o
+xeno_can_esd_pci-y := rtcan_esd_pci.o
+xeno_can_isa-y := rtcan_isa.o
+xeno_can_mem-y := rtcan_mem.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c
new file mode 100644
index 0000000..f09be05
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_adv_pci.c
@@ -0,0 +1,360 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2012 Thierry Bultel <thierry.bultel@basystemes.fr>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <rtdm/driver.h>
+
+#define ADV_PCI_BASE_SIZE	0x80
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME "rtcan%d"
+#define RTCAN_DRV_NAME "ADV-PCI-CAN"
+
+static char *adv_pci_board_name = "ADV-PCI";
+
+MODULE_AUTHOR("Thierry Bultel <thierry.bultel@basystemes.fr>");
+MODULE_DESCRIPTION("RTCAN board driver for Advantech PCI cards");
+MODULE_LICENSE("GPL");
+
+struct rtcan_adv_pci {
+	struct pci_dev *pci_dev;
+	struct rtcan_device *slave_dev;
+	void __iomem *conf_addr;
+	void __iomem *base_addr;
+};
+
+/*
+ * According to the datasheet,
+ * internal clock is 1/2 of the external oscillator frequency
+ * which is 16 MHz
+ */
+#define ADV_PCI_CAN_CLOCK (16000000 / 2)
+
+/*
+ * Output control register
+  Depends on the board configuration
+ */
+
+#define ADV_PCI_OCR (SJA_OCR_MODE_NORMAL	|\
+		     SJA_OCR_TX0_PUSHPULL	|\
+		     SJA_OCR_TX1_PUSHPULL	|\
+		     SJA_OCR_TX1_INVERT)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ */
+#define ADV_PCI_CDR (SJA_CDR_CBP | SJA_CDR_CAN_MODE)
+
+#define ADV_PCI_VENDOR_ID 0x13fe
+
+#define CHANNEL_SINGLE 0 /* this is a single channel device */
+#define CHANNEL_MASTER 1 /* multi channel device, this device is master */
+#define CHANNEL_SLAVE  2 /* multi channel device, this is slave */
+
+#define ADV_PCI_DEVICE(device_id)\
+	{ ADV_PCI_VENDOR_ID, device_id, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0 }
+
+static const struct pci_device_id adv_pci_tbl[] = {
+	ADV_PCI_DEVICE(0x1680),
+	ADV_PCI_DEVICE(0x3680),
+	ADV_PCI_DEVICE(0x2052),
+	ADV_PCI_DEVICE(0x1681),
+	ADV_PCI_DEVICE(0xc001),
+	ADV_PCI_DEVICE(0xc002),
+	ADV_PCI_DEVICE(0xc004),
+	ADV_PCI_DEVICE(0xc101),
+	ADV_PCI_DEVICE(0xc102),
+	ADV_PCI_DEVICE(0xc104),
+	/* required last entry */
+	{ }
+};
+
+MODULE_DEVICE_TABLE(pci, adv_pci_tbl);
+
+static u8 rtcan_adv_pci_read_reg(struct rtcan_device *dev, int port)
+{
+	struct rtcan_adv_pci *board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	return ioread8(board->base_addr + port);
+}
+
+static void rtcan_adv_pci_write_reg(struct rtcan_device *dev, int port, u8 data)
+{
+	struct rtcan_adv_pci *board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	iowrite8(data, board->base_addr + port);
+}
+
+static void rtcan_adv_pci_del_chan(struct pci_dev *pdev,
+				   struct rtcan_device *dev)
+{
+	struct rtcan_adv_pci *board;
+
+	if (!dev)
+		return;
+
+	board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	rtcan_sja1000_unregister(dev);
+
+	pci_iounmap(pdev, board->base_addr);
+
+	rtcan_dev_free(dev);
+}
+
+
+static int rtcan_adv_pci_add_chan(struct pci_dev *pdev,
+				  int channel,
+				  unsigned int bar,
+				  unsigned int offset,
+				  struct rtcan_device **master_dev)
+{
+	struct rtcan_device *dev;
+	struct rtcan_sja1000 *chip;
+	struct rtcan_adv_pci *board;
+	void __iomem *base_addr;
+	int ret;
+
+	dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			      sizeof(struct rtcan_adv_pci));
+	if (dev == NULL)
+		return -ENOMEM;
+
+	chip = (struct rtcan_sja1000 *)dev->priv;
+	board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	if (channel == CHANNEL_SLAVE) {
+		struct rtcan_adv_pci *master_board =
+			(struct rtcan_adv_pci *)(*master_dev)->board_priv;
+		master_board->slave_dev = dev;
+
+		if (offset) {
+			base_addr = master_board->base_addr+offset;
+		} else {
+			base_addr = pci_iomap(pdev, bar, ADV_PCI_BASE_SIZE);
+			if (!base_addr) {
+				ret = -EIO;
+				goto failure;
+			}
+		}
+	} else {
+		base_addr = pci_iomap(pdev, bar, ADV_PCI_BASE_SIZE) + offset;
+		if (!base_addr) {
+			ret = -EIO;
+			goto failure;
+		}
+	}
+
+	board->pci_dev = pdev;
+	board->conf_addr = NULL;
+	board->base_addr = base_addr;
+
+	dev->board_name = adv_pci_board_name;
+
+	chip->read_reg = rtcan_adv_pci_read_reg;
+	chip->write_reg = rtcan_adv_pci_write_reg;
+
+	/* Clock frequency in Hz */
+	dev->can_sys_clock = ADV_PCI_CAN_CLOCK;
+
+	/* Output control register */
+	chip->ocr = ADV_PCI_OCR;
+
+	/* Clock divider register */
+	chip->cdr = ADV_PCI_CDR;
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	/* Make sure SJA1000 is in reset mode */
+	chip->write_reg(dev, SJA_MOD, SJA_MOD_RM);
+	/* Set PeliCAN mode */
+	chip->write_reg(dev, SJA_CDR, SJA_CDR_CAN_MODE);
+
+	/* check if mode is set */
+	ret = chip->read_reg(dev, SJA_CDR);
+	if (ret != SJA_CDR_CAN_MODE) {
+		ret = -EIO;
+		goto failure_iounmap;
+	}
+
+	/* Register and setup interrupt handling */
+	chip->irq_flags = RTDM_IRQTYPE_SHARED;
+	chip->irq_num = pdev->irq;
+
+	RTCAN_DBG("%s: base_addr=%p conf_addr=%p irq=%d ocr=%#x cdr=%#x\n",
+		   RTCAN_DRV_NAME, board->base_addr, board->conf_addr,
+		   chip->irq_num, chip->ocr, chip->cdr);
+
+	/* Register SJA1000 device */
+	ret = rtcan_sja1000_register(dev);
+	if (ret) {
+		printk(KERN_ERR "ERROR %d while trying to register SJA1000 device!\n",
+		       ret);
+		goto failure_iounmap;
+	}
+
+	if (channel != CHANNEL_SLAVE)
+		*master_dev = dev;
+
+	return 0;
+
+failure_iounmap:
+	if (channel != CHANNEL_SLAVE || !offset)
+		pci_iounmap(pdev, base_addr);
+failure:
+	rtcan_dev_free(dev);
+
+	return ret;
+}
+
+static int adv_pci_init_one(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	int ret, channel;
+	unsigned int nb_ports = 0;
+	unsigned int bar = 0;
+	unsigned int bar_flag = 0;
+	unsigned int offset = 0;
+	unsigned int ix;
+
+	struct rtcan_device *master_dev = NULL;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	dev_info(&pdev->dev, "RTCAN Registering card");
+
+	ret = pci_enable_device(pdev);
+	if (ret)
+		goto failure;
+
+	dev_info(&pdev->dev, "RTCAN detected Advantech PCI card at slot #%i\n",
+		 PCI_SLOT(pdev->devfn));
+
+	ret = pci_request_regions(pdev, RTCAN_DRV_NAME);
+	if (ret)
+		goto failure_device;
+
+	switch (pdev->device) {
+	case 0xc001:
+	case 0xc002:
+	case 0xc004:
+	case 0xc101:
+	case 0xc102:
+	case 0xc104:
+		nb_ports = pdev->device & 0x7;
+		offset = 0x100;
+		bar = 0;
+		break;
+	case 0x1680:
+	case 0x2052:
+		nb_ports = 2;
+		bar = 2;
+		bar_flag = 1;
+		break;
+	case 0x1681:
+		nb_ports = 1;
+		bar = 2;
+		bar_flag = 1;
+		break;
+	default:
+		goto failure_regions;
+	}
+
+	if (nb_ports > 1)
+		channel = CHANNEL_MASTER;
+	else
+		channel = CHANNEL_SINGLE;
+
+	RTCAN_DBG("%s: Initializing device %04x:%04x:%04x\n",
+		   RTCAN_DRV_NAME,
+		   pdev->vendor,
+		   pdev->device,
+		   pdev->subsystem_device);
+
+	ret = rtcan_adv_pci_add_chan(pdev, channel, bar, offset, &master_dev);
+	if (ret)
+		goto failure_iounmap;
+
+	/* register slave channel, if any */
+
+	for (ix = 1; ix < nb_ports; ix++) {
+		ret = rtcan_adv_pci_add_chan(pdev,
+					     CHANNEL_SLAVE,
+					     bar + (bar_flag ? ix : 0),
+					     offset * ix,
+					     &master_dev);
+		if (ret)
+			goto failure_iounmap;
+	}
+
+	pci_set_drvdata(pdev, master_dev);
+
+	return 0;
+
+failure_iounmap:
+	if (master_dev)
+		rtcan_adv_pci_del_chan(pdev, master_dev);
+
+failure_regions:
+	pci_release_regions(pdev);
+
+failure_device:
+	pci_disable_device(pdev);
+
+failure:
+	return ret;
+}
+
+static void adv_pci_remove_one(struct pci_dev *pdev)
+{
+	struct rtcan_device *dev = pci_get_drvdata(pdev);
+	struct rtcan_adv_pci *board = (struct rtcan_adv_pci *)dev->board_priv;
+
+	if (board->slave_dev)
+		rtcan_adv_pci_del_chan(pdev, board->slave_dev);
+
+	rtcan_adv_pci_del_chan(pdev, dev);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rtcan_adv_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = adv_pci_tbl,
+	.probe = adv_pci_init_one,
+	.remove = adv_pci_remove_one,
+};
+
+module_pci_driver(rtcan_adv_pci_driver);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c
new file mode 100644
index 0000000..2e3001b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ems_pci.c
@@ -0,0 +1,393 @@
+/*
+ * Copyright (C) 2007, 2016 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
+ * Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ *
+ * Derived from Linux CAN SJA1000 PCI driver "ems_pci".
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, see <http://www.gnu.org/licenses/>.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "EMS-CPC-PCI-CAN"
+
+static char *ems_pci_board_name = "EMS-CPC-PCI";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for EMS CPC-PCI/PCIe/104P CAN cards");
+MODULE_LICENSE("GPL v2");
+
+#define EMS_PCI_V1_MAX_CHAN 2
+#define EMS_PCI_V2_MAX_CHAN 4
+#define EMS_PCI_MAX_CHAN    EMS_PCI_V2_MAX_CHAN
+
+struct ems_pci_card {
+	int version;
+	int channels;
+
+	struct pci_dev *pci_dev;
+	struct rtcan_device *rtcan_dev[EMS_PCI_MAX_CHAN];
+
+	void __iomem *conf_addr;
+	void __iomem *base_addr;
+};
+
+#define EMS_PCI_CAN_CLOCK (16000000 / 2)
+
+/*
+ * Register definitions and descriptions are from LinCAN 0.3.3.
+ *
+ * PSB4610 PITA-2 bridge control registers
+ */
+#define PITA2_ICR           0x00	/* Interrupt Control Register */
+#define PITA2_ICR_INT0      0x00000002	/* [RC] INT0 Active/Clear */
+#define PITA2_ICR_INT0_EN   0x00020000	/* [RW] Enable INT0 */
+
+#define PITA2_MISC          0x1c	/* Miscellaneous Register */
+#define PITA2_MISC_CONFIG   0x04000000	/* Multiplexed parallel interface */
+
+/*
+ * Register definitions for the PLX 9030
+ */
+#define PLX_ICSR            0x4c   /* Interrupt Control/Status register */
+#define PLX_ICSR_LINTI1_ENA 0x0001 /* LINTi1 Enable */
+#define PLX_ICSR_PCIINT_ENA 0x0040 /* PCI Interrupt Enable */
+#define PLX_ICSR_LINTI1_CLR 0x0400 /* Local Edge Triggerable Interrupt Clear */
+#define PLX_ICSR_ENA_CLR    (PLX_ICSR_LINTI1_ENA | PLX_ICSR_PCIINT_ENA | \
+			     PLX_ICSR_LINTI1_CLR)
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode, push-pull and the correct polarity.
+ */
+#define EMS_PCI_OCR         (SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define EMS_PCI_CDR             (SJA_CDR_CBP | SJA_CDR_CLKOUT_MASK)
+
+#define EMS_PCI_V1_BASE_BAR     1
+#define EMS_PCI_V1_CONF_SIZE    4096 /* size of PITA control area */
+#define EMS_PCI_V2_BASE_BAR     2
+#define EMS_PCI_V2_CONF_SIZE    128 /* size of PLX control area */
+#define EMS_PCI_CAN_BASE_OFFSET 0x400 /* offset where the controllers starts */
+#define EMS_PCI_CAN_CTRL_SIZE   0x200 /* memory size for each controller */
+
+#define EMS_PCI_BASE_SIZE  4096 /* size of controller area */
+
+static const struct pci_device_id ems_pci_tbl[] = {
+	/* CPC-PCI v1 */
+	{PCI_VENDOR_ID_SIEMENS, 0x2104, PCI_ANY_ID, PCI_ANY_ID,},
+	/* CPC-PCI v2 */
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4000},
+	/* CPC-104P v2 */
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030, PCI_VENDOR_ID_PLX, 0x4002},
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, ems_pci_tbl);
+
+/*
+ * Helper to read internal registers from card logic (not CAN)
+ */
+static u8 ems_pci_v1_readb(struct ems_pci_card *card, unsigned int port)
+{
+	return readb((void __iomem *)card->base_addr + (port * 4));
+}
+
+static u8 ems_pci_v1_read_reg(struct rtcan_device *dev, int port)
+{
+	return readb((void __iomem *)dev->base_addr + (port * 4));
+}
+
+static void ems_pci_v1_write_reg(struct rtcan_device *dev,
+				 int port, u8 val)
+{
+	writeb(val, (void __iomem *)dev->base_addr + (port * 4));
+}
+
+static void ems_pci_v1_post_irq(struct rtcan_device *dev)
+{
+	struct ems_pci_card *card = (struct ems_pci_card *)dev->board_priv;
+
+	/* reset int flag of pita */
+	writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
+	       card->conf_addr + PITA2_ICR);
+}
+
+static u8 ems_pci_v2_read_reg(struct rtcan_device *dev, int port)
+{
+	return readb((void __iomem *)dev->base_addr + port);
+}
+
+static void ems_pci_v2_write_reg(struct rtcan_device *dev,
+				 int port, u8 val)
+{
+	writeb(val, (void __iomem *)dev->base_addr + port);
+}
+
+static void ems_pci_v2_post_irq(struct rtcan_device *dev)
+{
+	struct ems_pci_card *card = (struct ems_pci_card *)dev->board_priv;
+
+	writel(PLX_ICSR_ENA_CLR, card->conf_addr + PLX_ICSR);
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to set 'em into the PeliCAN mode
+ */
+static inline int ems_pci_check_chan(struct rtcan_device *dev)
+{
+	struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+	unsigned char res;
+
+	/* Make sure SJA1000 is in reset mode */
+	chip->write_reg(dev, SJA_MOD, 1);
+
+	chip->write_reg(dev, SJA_CDR, SJA_CDR_CAN_MODE);
+
+	/* read reset-values */
+	res = chip->read_reg(dev, SJA_CDR);
+
+	if (res == SJA_CDR_CAN_MODE)
+		return 1;
+
+	return 0;
+}
+
+static void ems_pci_del_card(struct pci_dev *pdev)
+{
+	struct ems_pci_card *card = pci_get_drvdata(pdev);
+	struct rtcan_device *dev;
+	int i = 0;
+
+	for (i = 0; i < card->channels; i++) {
+		dev = card->rtcan_dev[i];
+
+		if (!dev)
+			continue;
+
+		dev_info(&pdev->dev, "Removing %s.\n", dev->name);
+		rtcan_sja1000_unregister(dev);
+		rtcan_dev_free(dev);
+	}
+
+	if (card->base_addr != NULL)
+		pci_iounmap(card->pci_dev, card->base_addr);
+
+	if (card->conf_addr != NULL)
+		pci_iounmap(card->pci_dev, card->conf_addr);
+
+	kfree(card);
+
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static void ems_pci_card_reset(struct ems_pci_card *card)
+{
+	/* Request board reset */
+	writeb(0, card->base_addr);
+}
+
+/*
+ * Probe PCI device for EMS CAN signature and register each available
+ * CAN channel to RTCAN subsystem.
+ */
+static int ems_pci_add_card(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	struct rtcan_sja1000 *chip;
+	struct rtcan_device *dev;
+	struct ems_pci_card *card;
+	int max_chan, conf_size, base_bar;
+	int err, i;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	/* Enabling PCI device */
+	if (pci_enable_device(pdev) < 0) {
+		dev_err(&pdev->dev, "Enabling PCI device failed\n");
+		return -ENODEV;
+	}
+
+	/* Allocating card structures to hold addresses, ... */
+	card = kzalloc(sizeof(*card), GFP_KERNEL);
+	if (card == NULL) {
+		pci_disable_device(pdev);
+		return -ENOMEM;
+	}
+
+	pci_set_drvdata(pdev, card);
+
+	card->pci_dev = pdev;
+
+	card->channels = 0;
+
+	if (pdev->vendor == PCI_VENDOR_ID_PLX) {
+		card->version = 2; /* CPC-PCI v2 */
+		max_chan = EMS_PCI_V2_MAX_CHAN;
+		base_bar = EMS_PCI_V2_BASE_BAR;
+		conf_size = EMS_PCI_V2_CONF_SIZE;
+	} else {
+		card->version = 1; /* CPC-PCI v1 */
+		max_chan = EMS_PCI_V1_MAX_CHAN;
+		base_bar = EMS_PCI_V1_BASE_BAR;
+		conf_size = EMS_PCI_V1_CONF_SIZE;
+	}
+
+	/* Remap configuration space and controller memory area */
+	card->conf_addr = pci_iomap(pdev, 0, conf_size);
+	if (card->conf_addr == NULL) {
+		err = -ENOMEM;
+		goto failure_cleanup;
+	}
+
+	card->base_addr = pci_iomap(pdev, base_bar, EMS_PCI_BASE_SIZE);
+	if (card->base_addr == NULL) {
+		err = -ENOMEM;
+		goto failure_cleanup;
+	}
+
+	if (card->version == 1) {
+		/* Configure PITA-2 parallel interface (enable MUX) */
+		writel(PITA2_MISC_CONFIG, card->conf_addr + PITA2_MISC);
+
+		/* Check for unique EMS CAN signature */
+		if (ems_pci_v1_readb(card, 0) != 0x55 ||
+		    ems_pci_v1_readb(card, 1) != 0xAA ||
+		    ems_pci_v1_readb(card, 2) != 0x01 ||
+		    ems_pci_v1_readb(card, 3) != 0xCB ||
+		    ems_pci_v1_readb(card, 4) != 0x11) {
+			dev_err(&pdev->dev,
+				"Not EMS Dr. Thomas Wuensche interface\n");
+			err = -ENODEV;
+			goto failure_cleanup;
+		}
+	}
+
+	ems_pci_card_reset(card);
+
+	for (i = 0; i < max_chan; i++) {
+		dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000), 0);
+		if (!dev) {
+			err = -ENOMEM;
+			goto failure_cleanup;
+		}
+
+		strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+		dev->board_name = ems_pci_board_name;
+		dev->board_priv = card;
+
+		card->rtcan_dev[i] = dev;
+		chip = card->rtcan_dev[i]->priv;
+		chip->irq_flags = RTDM_IRQTYPE_SHARED;
+		chip->irq_num = pdev->irq;
+
+		dev->base_addr = (unsigned long)card->base_addr +
+			EMS_PCI_CAN_BASE_OFFSET + (i * EMS_PCI_CAN_CTRL_SIZE);
+		if (card->version == 1) {
+			chip->read_reg  = ems_pci_v1_read_reg;
+			chip->write_reg = ems_pci_v1_write_reg;
+			chip->irq_ack = ems_pci_v1_post_irq;
+		} else {
+			chip->read_reg  = ems_pci_v2_read_reg;
+			chip->write_reg = ems_pci_v2_write_reg;
+			chip->irq_ack = ems_pci_v2_post_irq;
+		}
+
+		/* Check if channel is present */
+		if (ems_pci_check_chan(dev)) {
+			dev->can_sys_clock = EMS_PCI_CAN_CLOCK;
+			chip->ocr = EMS_PCI_OCR | SJA_OCR_MODE_NORMAL;
+			chip->cdr = EMS_PCI_CDR | SJA_CDR_CAN_MODE;
+
+			if (card->version == 1)
+				/* reset int flag of pita */
+				writel(PITA2_ICR_INT0_EN | PITA2_ICR_INT0,
+				       card->conf_addr + PITA2_ICR);
+			else
+				/* enable IRQ in PLX 9030 */
+				writel(PLX_ICSR_ENA_CLR,
+				       card->conf_addr + PLX_ICSR);
+
+			/* Register SJA1000 device */
+			err = rtcan_sja1000_register(dev);
+			if (err) {
+				dev_err(&pdev->dev, "Registering device failed "
+					"(err=%d)\n", err);
+				rtcan_dev_free(dev);
+				goto failure_cleanup;
+			}
+
+			card->channels++;
+
+			dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
+				 "registered as %s\n", i + 1,
+				 (void* __iomem)dev->base_addr, chip->irq_num,
+				 dev->name);
+		} else {
+			dev_err(&pdev->dev, "Channel #%d not detected\n",
+				i + 1);
+			rtcan_dev_free(dev);
+		}
+	}
+
+	if (!card->channels) {
+		err = -ENODEV;
+		goto failure_cleanup;
+	}
+
+	return 0;
+
+failure_cleanup:
+	dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
+
+	ems_pci_del_card(pdev);
+
+	return err;
+}
+
+static struct pci_driver ems_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = ems_pci_tbl,
+	.probe = ems_pci_add_card,
+	.remove = ems_pci_del_card,
+};
+
+module_pci_driver(ems_pci_driver);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c
new file mode 100644
index 0000000..2b5a19c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_esd_pci.c
@@ -0,0 +1,344 @@
+/*
+ * Copyright (C) 2009 Sebastian Smolorz <sesmo@gmx.net>
+ *
+ * This driver is based on the Socket-CAN driver esd_pci.c,
+ * Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2008 Sascha Hauer <s.hauer@pengutronix.de>, Pengutronix
+ * Copyright (C) 2009 Matthias Fuchs <matthias.fuchs@esd.eu>, esd gmbh
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME "rtcan%d"
+#define RTCAN_DRV_NAME "ESD-PCI-CAN"
+
+static char *esd_pci_board_name = "ESD-PCI";
+
+MODULE_AUTHOR("Sebastian Smolorz <sesmo@gmx.net");
+MODULE_DESCRIPTION("RTCAN board driver for esd PCI/PMC/CPCI/PCIe/PCI104 " \
+		   "CAN cards");
+MODULE_LICENSE("GPL v2");
+
+struct rtcan_esd_pci {
+	struct pci_dev *pci_dev;
+	struct rtcan_device *slave_dev;
+	void __iomem *conf_addr;
+	void __iomem *base_addr;
+};
+
+#define ESD_PCI_CAN_CLOCK	(16000000 / 2)
+
+#define ESD_PCI_OCR		(SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL | \
+				 SJA_OCR_TX1_INVERT | SJA_OCR_MODE_CLOCK)
+#define ESD_PCI_CDR		(SJA_CDR_CLK_OFF | SJA_CDR_CBP | \
+				 SJA_CDR_CAN_MODE)
+
+#define CHANNEL_SINGLE 0 /* this is a single channel device */
+#define CHANNEL_MASTER 1 /* multi channel device, this device is master */
+#define CHANNEL_SLAVE  2 /* multi channel device, this is slave */
+
+#define CHANNEL_OFFSET		0x100
+
+#define INTCSR_OFFSET		0x4c /* Offset in PLX9050 conf registers */
+#define INTCSR_LINTI1		(1 << 0)
+#define INTCSR_PCI		(1 << 6)
+
+#define INTCSR9056_OFFSET	0x68 /* Offset in PLX9056 conf registers */
+#define INTCSR9056_LINTI	(1 << 11)
+#define INTCSR9056_PCI		(1 << 8)
+
+#ifndef PCI_DEVICE_ID_PLX_9056
+# define PCI_DEVICE_ID_PLX_9056 0x9056
+#endif
+
+/* PCI subsystem IDs of esd's SJA1000 based CAN cards */
+
+/* CAN-PCI/200: PCI, 33MHz only, bridge: PLX9050 */
+#define ESD_PCI_SUB_SYS_ID_PCI200	0x0004
+
+/* CAN-PCI/266: PCI, 33/66MHz, bridge: PLX9056 */
+#define ESD_PCI_SUB_SYS_ID_PCI266	0x0009
+
+/* CAN-PMC/266: PMC module, 33/66MHz, bridge: PLX9056 */
+#define ESD_PCI_SUB_SYS_ID_PMC266	0x000e
+
+/* CAN-CPCI/200: Compact PCI, 33MHz only, bridge: PLX9030 */
+#define ESD_PCI_SUB_SYS_ID_CPCI200	0x010b
+
+/* CAN-PCIE/2000: PCI Express 1x, bridge: PEX8311 = PEX8111 + PLX9056 */
+#define ESD_PCI_SUB_SYS_ID_PCIE2000	0x0200
+
+/* CAN-PCI/104: PCI104 module, 33MHz only, bridge: PLX9030 */
+#define ESD_PCI_SUB_SYS_ID_PCI104200	0x0501
+
+static struct pci_device_id esd_pci_tbl[] = {
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000},
+	{PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+	 PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200},
+	{0,}
+};
+
+#define ESD_PCI_BASE_SIZE  0x200
+
+MODULE_DEVICE_TABLE(pci, esd_pci_tbl);
+
+
+static u8 rtcan_esd_pci_read_reg(struct rtcan_device *dev, int port)
+{
+	struct rtcan_esd_pci *board = (struct rtcan_esd_pci *)dev->board_priv;
+	return readb(board->base_addr + port);
+}
+
+static void rtcan_esd_pci_write_reg(struct rtcan_device *dev, int port, u8 val)
+{
+	struct rtcan_esd_pci *board = (struct rtcan_esd_pci *)dev->board_priv;
+	writeb(val, board->base_addr + port);
+}
+
+static void rtcan_esd_pci_del_chan(struct rtcan_device *dev)
+{
+	struct rtcan_esd_pci *board;
+
+	if (!dev)
+		return;
+
+	board = (struct rtcan_esd_pci *)dev->board_priv;
+
+	printk("Removing %s %s device %s\n",
+		esd_pci_board_name, dev->ctrl_name, dev->name);
+
+	rtcan_sja1000_unregister(dev);
+
+	rtcan_dev_free(dev);
+}
+
+static int rtcan_esd_pci_add_chan(struct pci_dev *pdev, int channel,
+				  struct rtcan_device **master_dev,
+				  void __iomem *conf_addr,
+				  void __iomem *base_addr)
+{
+	struct rtcan_device *dev;
+	struct rtcan_sja1000 *chip;
+	struct rtcan_esd_pci *board;
+	int ret;
+
+	dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			      sizeof(struct rtcan_esd_pci));
+	if (dev == NULL)
+		return -ENOMEM;
+
+	chip = (struct rtcan_sja1000 *)dev->priv;
+	board = (struct rtcan_esd_pci *)dev->board_priv;
+
+	board->pci_dev = pdev;
+	board->conf_addr = conf_addr;
+	board->base_addr = base_addr;
+
+	if (channel == CHANNEL_SLAVE) {
+		struct rtcan_esd_pci *master_board =
+			(struct rtcan_esd_pci *)(*master_dev)->board_priv;
+		master_board->slave_dev = dev;
+	}
+
+	dev->board_name = esd_pci_board_name;
+
+	chip->read_reg = rtcan_esd_pci_read_reg;
+	chip->write_reg = rtcan_esd_pci_write_reg;
+
+	dev->can_sys_clock = ESD_PCI_CAN_CLOCK;
+
+	chip->ocr = ESD_PCI_OCR;
+	chip->cdr = ESD_PCI_CDR;
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	chip->irq_flags = RTDM_IRQTYPE_SHARED;
+	chip->irq_num = pdev->irq;
+
+	RTCAN_DBG("%s: base_addr=0x%p conf_addr=0x%p irq=%d ocr=%#x cdr=%#x\n",
+		  RTCAN_DRV_NAME, board->base_addr, board->conf_addr,
+		  chip->irq_num, chip->ocr, chip->cdr);
+
+	/* Register SJA1000 device */
+	ret = rtcan_sja1000_register(dev);
+	if (ret) {
+		printk(KERN_ERR "ERROR %d while trying to register SJA1000 "
+				"device!\n", ret);
+		goto failure;
+	}
+
+	if (channel != CHANNEL_SLAVE)
+		*master_dev = dev;
+
+	return 0;
+
+
+failure:
+	rtcan_dev_free(dev);
+	return ret;
+}
+
+static int esd_pci_init_one(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	int ret, channel;
+	void __iomem *base_addr;
+	void __iomem *conf_addr;
+	struct rtcan_device *master_dev = NULL;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	if ((ret = pci_enable_device (pdev)))
+		goto failure;
+
+	if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME)))
+		goto failure;
+
+	RTCAN_DBG("%s: Initializing device %04x:%04x %04x:%04x\n",
+		 RTCAN_DRV_NAME, pdev->vendor, pdev->device,
+		 pdev->subsystem_vendor, pdev->subsystem_device);
+
+	conf_addr = pci_iomap(pdev, 0, ESD_PCI_BASE_SIZE);
+	if (conf_addr == NULL) {
+		ret = -ENODEV;
+		goto failure_release_pci;
+	}
+
+	base_addr = pci_iomap(pdev, 2, ESD_PCI_BASE_SIZE);
+	if (base_addr == NULL) {
+		ret = -ENODEV;
+		goto failure_iounmap_conf;
+	}
+
+	/* Check if second channel is available */
+	writeb(SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD);
+	writeb(SJA_CDR_CBP, base_addr + CHANNEL_OFFSET + SJA_CDR);
+	writeb(SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD);
+	if (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) == 0x21) {
+		writeb(SJA_MOD_SM | SJA_MOD_AFM | SJA_MOD_STM | SJA_MOD_LOM |
+		       SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD);
+		if (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) == 0x3f)
+			channel = CHANNEL_MASTER;
+		else {
+			writeb(SJA_MOD_RM,
+				base_addr + CHANNEL_OFFSET + SJA_MOD);
+			channel = CHANNEL_SINGLE;
+		}
+	} else {
+		writeb(SJA_MOD_RM, base_addr + CHANNEL_OFFSET + SJA_MOD);
+		channel = CHANNEL_SINGLE;
+	}
+
+	if ((ret = rtcan_esd_pci_add_chan(pdev, channel, &master_dev,
+						conf_addr, base_addr)))
+		goto failure_iounmap_base;
+
+	if (channel != CHANNEL_SINGLE) {
+		channel = CHANNEL_SLAVE;
+		if ((ret = rtcan_esd_pci_add_chan(pdev, channel, &master_dev,
+				      conf_addr, base_addr + CHANNEL_OFFSET)))
+			goto failure_iounmap_base;
+	}
+
+	if ((pdev->device == PCI_DEVICE_ID_PLX_9050) ||
+	    (pdev->device == PCI_DEVICE_ID_PLX_9030)) {
+		/* Enable interrupts in PLX9050 */
+		writel(INTCSR_LINTI1 | INTCSR_PCI, conf_addr + INTCSR_OFFSET);
+	} else {
+		/* Enable interrupts in PLX9056*/
+		writel(INTCSR9056_LINTI | INTCSR9056_PCI,
+					conf_addr + INTCSR9056_OFFSET);
+	}
+
+	pci_set_drvdata(pdev, master_dev);
+
+	return 0;
+
+
+failure_iounmap_base:
+	if (master_dev)
+		rtcan_esd_pci_del_chan(master_dev);
+	pci_iounmap(pdev, base_addr);
+
+failure_iounmap_conf:
+	pci_iounmap(pdev, conf_addr);
+
+failure_release_pci:
+	pci_release_regions(pdev);
+
+failure:
+	return ret;
+}
+
+static void esd_pci_remove_one(struct pci_dev *pdev)
+{
+	struct rtcan_device *dev = pci_get_drvdata(pdev);
+	struct rtcan_esd_pci *board = (struct rtcan_esd_pci *)dev->board_priv;
+
+	if ((pdev->device == PCI_DEVICE_ID_PLX_9050) ||
+	    (pdev->device == PCI_DEVICE_ID_PLX_9030)) {
+		/* Disable interrupts in PLX9050*/
+		writel(0, board->conf_addr + INTCSR_OFFSET);
+	} else {
+		/* Disable interrupts in PLX9056*/
+		writel(0, board->conf_addr + INTCSR9056_OFFSET);
+	}
+
+	if (board->slave_dev)
+		rtcan_esd_pci_del_chan(board->slave_dev);
+	rtcan_esd_pci_del_chan(dev);
+
+
+	pci_iounmap(pdev, board->base_addr);
+	pci_iounmap(pdev, board->conf_addr);
+
+	pci_release_regions(pdev);
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rtcan_esd_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = esd_pci_tbl,
+	.probe = esd_pci_init_one,
+	.remove = esd_pci_remove_one,
+};
+
+module_pci_driver(rtcan_esd_pci_driver);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c
new file mode 100644
index 0000000..a0e49fe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_isa.c
@@ -0,0 +1,200 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005, 2006, 2009 Sebastian Smolorz
+ *                               <smolorz@rts.uni-hannover.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; eitherer version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "sja1000-isa"
+
+#define RTCAN_ISA_MAX_DEV 4
+
+static char *isa_board_name = "ISA-Board";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for standard ISA boards");
+MODULE_LICENSE("GPL");
+
+static u16 io[RTCAN_ISA_MAX_DEV];
+static int irq[RTCAN_ISA_MAX_DEV];
+static u32 can_clock[RTCAN_ISA_MAX_DEV];
+static u8 ocr[RTCAN_ISA_MAX_DEV];
+static u8 cdr[RTCAN_ISA_MAX_DEV];
+
+module_param_array(io, ushort, NULL, 0444);
+module_param_array(irq, int, NULL, 0444);
+module_param_array(can_clock, uint, NULL, 0444);
+module_param_array(ocr, byte, NULL, 0444);
+module_param_array(cdr, byte, NULL, 0444);
+
+MODULE_PARM_DESC(io, "The io-port address");
+MODULE_PARM_DESC(irq, "The interrupt number");
+MODULE_PARM_DESC(can_clock, "External clock frequency (default 16 MHz)");
+MODULE_PARM_DESC(ocr, "Value of output control register (default 0x1a)");
+MODULE_PARM_DESC(cdr, "Value of clock divider register (default 0xc8");
+
+#define RTCAN_ISA_PORT_SIZE 32
+
+struct rtcan_isa
+{
+	u16 io;
+};
+
+static struct rtcan_device *rtcan_isa_devs[RTCAN_ISA_MAX_DEV];
+
+static u8 rtcan_isa_readreg(struct rtcan_device *dev, int port)
+{
+	struct rtcan_isa *board = (struct rtcan_isa *)dev->board_priv;
+	return inb(board->io + port);
+}
+
+static void rtcan_isa_writereg(struct rtcan_device *dev, int port, u8 val)
+{
+	struct rtcan_isa *board = (struct rtcan_isa *)dev->board_priv;
+	outb(val, board->io + port);
+}
+
+
+int __init rtcan_isa_init_one(int idx)
+{
+	struct rtcan_device *dev;
+	struct rtcan_sja1000 *chip;
+	struct rtcan_isa *board;
+	int ret;
+
+	if ((dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+				   sizeof(struct rtcan_isa))) == NULL)
+		return -ENOMEM;
+
+	chip = (struct rtcan_sja1000 *)dev->priv;
+	board = (struct rtcan_isa *)dev->board_priv;
+
+	dev->board_name = isa_board_name;
+
+	board->io = io[idx];
+
+	chip->irq_num = irq[idx];
+	chip->irq_flags = RTDM_IRQTYPE_SHARED | RTDM_IRQTYPE_EDGE;
+
+	chip->read_reg = rtcan_isa_readreg;
+	chip->write_reg = rtcan_isa_writereg;
+
+	/* Check and request I/O ports */
+	if (!request_region(board->io, RTCAN_ISA_PORT_SIZE, RTCAN_DRV_NAME)) {
+		ret = -EBUSY;
+		goto out_dev_free;
+	}
+
+	/* Clock frequency in Hz */
+	if (can_clock[idx])
+		dev->can_sys_clock = can_clock[idx] / 2;
+	else
+		dev->can_sys_clock = 8000000; /* 16/2 MHz */
+
+	/* Output control register */
+	if (ocr[idx])
+		chip->ocr = ocr[idx];
+	else
+		chip->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL;
+
+	if (cdr[idx])
+		chip->cdr = cdr[idx];
+	else
+		chip->cdr = SJA_CDR_CAN_MODE | SJA_CDR_CLK_OFF | SJA_CDR_CBP;
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	ret = rtcan_sja1000_register(dev);
+	if (ret) {
+		printk(KERN_ERR "ERROR %d while trying to register SJA1000 "
+		       "device!\n", ret);
+		goto out_free_region;
+	}
+
+	rtcan_isa_devs[idx] = dev;
+	return 0;
+
+ out_free_region:
+	release_region(board->io, RTCAN_ISA_PORT_SIZE);
+
+ out_dev_free:
+	rtcan_dev_free(dev);
+
+	return ret;
+}
+
+static void rtcan_isa_exit(void);
+
+/** Init module */
+static int __init rtcan_isa_init(void)
+{
+	int i, err;
+	int devices = 0;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (i = 0; i < RTCAN_ISA_MAX_DEV && io[i] != 0; i++) {
+		err = rtcan_isa_init_one(i);
+		if (err) {
+			rtcan_isa_exit();
+			return err;
+		}
+		devices++;
+	}
+	if (devices)
+		return 0;
+
+	printk(KERN_ERR "ERROR! No devices specified! "
+	       "Use io=<port1>[,...] irq=<irq1>[,...]\n");
+	return -EINVAL;
+}
+
+
+/** Cleanup module */
+static void rtcan_isa_exit(void)
+{
+	int i;
+	struct rtcan_device *dev;
+
+	for (i = 0; i < RTCAN_ISA_MAX_DEV; i++) {
+		dev = rtcan_isa_devs[i];
+		if (!dev)
+			continue;
+		rtcan_sja1000_unregister(dev);
+		release_region(io[i], RTCAN_ISA_PORT_SIZE);
+		rtcan_dev_free(dev);
+	}
+}
+
+module_init(rtcan_isa_init);
+module_exit(rtcan_isa_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c
new file mode 100644
index 0000000..9c5197f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_ixxat_pci.c
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME "rtcan%d"
+#define RTCAN_DRV_NAME "IXXAT-PCI-CAN"
+
+static char *ixxat_pci_board_name = "IXXAT-PCI";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for IXXAT-PCI cards");
+MODULE_LICENSE("GPL");
+
+struct rtcan_ixxat_pci
+{
+    struct pci_dev *pci_dev;
+    struct rtcan_device *slave_dev;
+    int conf_addr;
+    void __iomem *base_addr;
+};
+
+#define IXXAT_PCI_CAN_SYS_CLOCK (16000000 / 2)
+
+#define CHANNEL_SINGLE 0 /* this is a single channel device */
+#define CHANNEL_MASTER 1 /* multi channel device, this device is master */
+#define CHANNEL_SLAVE  2 /* multi channel device, this is slave */
+
+#define CHANNEL_OFFSET       0x200
+#define CHANNEL_MASTER_RESET 0x110
+#define CHANNEL_SLAVE_RESET  (CHANNEL_MASTER_RESET + CHANNEL_OFFSET)
+
+#define IXXAT_INTCSR_OFFSET  0x4c /* Offset in PLX9050 conf registers */
+#define IXXAT_INTCSR_SLAVE   0x41 /* LINT1 and PCI interrupt enabled */
+#define IXXAT_INTCSR_MASTER  0x08 /* LINT2 enabled */
+#define IXXAT_SJA_MOD_MASK   0xa1 /* Mask for reading dual/single channel */
+
+/* PCI vender, device and sub-device ID */
+#define IXXAT_PCI_VENDOR_ID  0x10b5
+#define IXXAT_PCI_DEVICE_ID  0x9050
+#define IXXAT_PCI_SUB_SYS_ID 0x2540
+
+#define IXXAT_CONF_PORT_SIZE 0x0080
+#define IXXAT_BASE_PORT_SIZE 0x0400
+
+static struct pci_device_id ixxat_pci_tbl[] = {
+	{IXXAT_PCI_VENDOR_ID, IXXAT_PCI_DEVICE_ID,
+	 IXXAT_PCI_VENDOR_ID, IXXAT_PCI_SUB_SYS_ID, 0, 0, 0},
+	{ }
+};
+MODULE_DEVICE_TABLE (pci, ixxat_pci_tbl);
+
+
+static u8 rtcan_ixxat_pci_read_reg(struct rtcan_device *dev, int port)
+{
+    struct rtcan_ixxat_pci *board = (struct rtcan_ixxat_pci *)dev->board_priv;
+    return readb(board->base_addr + port);
+}
+
+static void rtcan_ixxat_pci_write_reg(struct rtcan_device *dev, int port, u8 data)
+{
+    struct rtcan_ixxat_pci *board = (struct rtcan_ixxat_pci *)dev->board_priv;
+    writeb(data, board->base_addr + port);
+}
+
+static void rtcan_ixxat_pci_del_chan(struct rtcan_device *dev)
+{
+    struct rtcan_ixxat_pci *board;
+    u8 intcsr;
+
+    if (!dev)
+	return;
+
+    board = (struct rtcan_ixxat_pci *)dev->board_priv;
+
+    printk("Removing %s %s device %s\n",
+	   ixxat_pci_board_name, dev->ctrl_name, dev->name);
+
+    rtcan_sja1000_unregister(dev);
+
+    /* Disable PCI interrupts */
+    intcsr = inb(board->conf_addr + IXXAT_INTCSR_OFFSET);
+    if (board->slave_dev) {
+	intcsr &= ~IXXAT_INTCSR_MASTER;
+	outb(intcsr, board->conf_addr + IXXAT_INTCSR_OFFSET);
+	writeb(0x1, board->base_addr + CHANNEL_MASTER_RESET);
+	iounmap(board->base_addr);
+    } else {
+	intcsr &= ~IXXAT_INTCSR_SLAVE;
+	outb(intcsr, board->conf_addr + IXXAT_INTCSR_OFFSET);
+	writeb(0x1, board->base_addr + CHANNEL_SLAVE_RESET );
+    }
+    rtcan_dev_free(dev);
+}
+
+static int rtcan_ixxat_pci_add_chan(struct pci_dev *pdev,
+				    int channel,
+				    struct rtcan_device **master_dev,
+				    int conf_addr,
+				    void __iomem *base_addr)
+{
+    struct rtcan_device *dev;
+    struct rtcan_sja1000 *chip;
+    struct rtcan_ixxat_pci *board;
+    u8 intcsr;
+    int ret;
+
+    dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			  sizeof(struct rtcan_ixxat_pci));
+    if (dev == NULL)
+	return -ENOMEM;
+
+    chip = (struct rtcan_sja1000 *)dev->priv;
+    board = (struct rtcan_ixxat_pci *)dev->board_priv;
+
+    board->pci_dev = pdev;
+    board->conf_addr = conf_addr;
+    board->base_addr = base_addr;
+
+    if (channel == CHANNEL_SLAVE) {
+	struct rtcan_ixxat_pci *master_board =
+	    (struct rtcan_ixxat_pci *)(*master_dev)->board_priv;
+	master_board->slave_dev = dev;
+    }
+
+    dev->board_name = ixxat_pci_board_name;
+
+    chip->read_reg = rtcan_ixxat_pci_read_reg;
+    chip->write_reg = rtcan_ixxat_pci_write_reg;
+
+    /* Clock frequency in Hz */
+    dev->can_sys_clock = IXXAT_PCI_CAN_SYS_CLOCK;
+
+    /* Output control register */
+    chip->ocr = (SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_INVERT |
+		 SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL);
+
+    /* Clock divider register */
+    chip->cdr = SJA_CDR_CAN_MODE;
+
+    strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+    /* Enable PCI interrupts */
+    intcsr = inb(board->conf_addr + IXXAT_INTCSR_OFFSET);
+    if (channel == CHANNEL_SLAVE)
+	intcsr |= IXXAT_INTCSR_SLAVE;
+    else
+	intcsr |= IXXAT_INTCSR_MASTER;
+    outb(intcsr, board->conf_addr + IXXAT_INTCSR_OFFSET);
+
+    /* Register and setup interrupt handling */
+    chip->irq_flags = RTDM_IRQTYPE_SHARED;
+    chip->irq_num = pdev->irq;
+
+    RTCAN_DBG("%s: base_addr=0x%p conf_addr=%#x irq=%d ocr=%#x cdr=%#x\n",
+	      RTCAN_DRV_NAME, board->base_addr, board->conf_addr,
+	      chip->irq_num, chip->ocr, chip->cdr);
+
+    /* Register SJA1000 device */
+    ret = rtcan_sja1000_register(dev);
+    if (ret) {
+	printk(KERN_ERR "ERROR %d while trying to register SJA1000 device!\n",
+	       ret);
+	goto failure;
+    }
+
+    if (channel != CHANNEL_SLAVE)
+	*master_dev = dev;
+
+    return 0;
+
+ failure:
+    rtcan_dev_free(dev);
+    return ret;
+}
+
+static int ixxat_pci_init_one(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+    int ret, channel, conf_addr;
+    unsigned long addr;
+    void __iomem *base_addr;
+    struct rtcan_device *master_dev = NULL;
+
+    if (!rtdm_available())
+	return -ENODEV;
+
+    if ((ret = pci_enable_device (pdev)))
+	goto failure;
+
+    if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME)))
+	goto failure;
+
+    RTCAN_DBG("%s: Initializing device %04x:%04x:%04x\n",
+	      RTCAN_DRV_NAME, pdev->vendor, pdev->device,
+	      pdev->subsystem_device);
+
+    /* Enable memory and I/O space */
+    if ((ret = pci_write_config_word(pdev, 0x04, 0x3)))
+	goto failure_release_pci;
+
+    conf_addr = pci_resource_start(pdev, 1);
+
+    addr = pci_resource_start(pdev, 2);
+    base_addr = ioremap(addr, IXXAT_BASE_PORT_SIZE);
+    if (base_addr == 0) {
+	ret = -ENODEV;
+	goto failure_release_pci;
+    }
+
+    /* Check if second channel is available after reset */
+    writeb(0x1, base_addr + CHANNEL_MASTER_RESET);
+    writeb(0x1, base_addr + CHANNEL_SLAVE_RESET);
+    udelay(100);
+    if ( (readb(base_addr + CHANNEL_OFFSET + SJA_MOD) & IXXAT_SJA_MOD_MASK ) != 0x21 ||
+	readb(base_addr + CHANNEL_OFFSET + SJA_SR ) != 0x0c ||
+	readb(base_addr + CHANNEL_OFFSET + SJA_IR ) != 0xe0)
+	channel = CHANNEL_SINGLE;
+    else
+	channel = CHANNEL_MASTER;
+
+    if ((ret = rtcan_ixxat_pci_add_chan(pdev, channel, &master_dev,
+					conf_addr, base_addr)))
+	goto failure_iounmap;
+
+    if (channel != CHANNEL_SINGLE) {
+	channel = CHANNEL_SLAVE;
+	if ((ret = rtcan_ixxat_pci_add_chan(pdev, channel,
+					    &master_dev, conf_addr,
+					    base_addr + CHANNEL_OFFSET)))
+	    goto failure_iounmap;
+    }
+
+    pci_set_drvdata(pdev, master_dev);
+    return 0;
+
+failure_iounmap:
+    if (master_dev)
+	rtcan_ixxat_pci_del_chan(master_dev);
+    iounmap(base_addr);
+
+failure_release_pci:
+    pci_release_regions(pdev);
+
+failure:
+    return ret;
+}
+
+static void ixxat_pci_remove_one(struct pci_dev *pdev)
+{
+    struct rtcan_device *dev = pci_get_drvdata(pdev);
+    struct rtcan_ixxat_pci *board = (struct rtcan_ixxat_pci *)dev->board_priv;
+
+    if (board->slave_dev)
+	rtcan_ixxat_pci_del_chan(board->slave_dev);
+    rtcan_ixxat_pci_del_chan(dev);
+
+    pci_release_regions(pdev);
+    pci_disable_device(pdev);
+    pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rtcan_ixxat_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = ixxat_pci_tbl,
+	.probe = ixxat_pci_init_one,
+	.remove = ixxat_pci_remove_one,
+};
+
+module_pci_driver(rtcan_ixxat_pci_driver);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c
new file mode 100644
index 0000000..965735e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_mem.c
@@ -0,0 +1,215 @@
+/*
+ * Copyright (C) 2006 Matthias Fuchs <matthias.fuchs@esd-electronics.com>,
+ *                    Jan Kiszka <jan.kiszka@web.de>
+ *
+ * RTCAN driver for memory mapped SJA1000 CAN controller
+ * This code has been tested on esd's CPCI405/EPPC405 PPC405 systems.
+ *
+ * This driver is derived from the rtcan-isa driver by
+ * Wolfgang Grandegger and Sebastian Smolorz.
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; eitherer version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "sja1000-mem"
+
+#define RTCAN_MEM_MAX_DEV 4
+
+static char *mem_board_name = "mem mapped";
+
+MODULE_AUTHOR("Matthias Fuchs <matthias.fuchs@esd-electronics.com>");
+MODULE_DESCRIPTION("RTCAN driver for memory mapped SJA1000 controller");
+MODULE_LICENSE("GPL");
+
+static u32 mem[RTCAN_MEM_MAX_DEV];
+static int irq[RTCAN_MEM_MAX_DEV];
+static u32 can_clock[RTCAN_MEM_MAX_DEV];
+static u8 ocr[RTCAN_MEM_MAX_DEV];
+static u8 cdr[RTCAN_MEM_MAX_DEV];
+
+module_param_array(mem, uint, NULL, 0444);
+module_param_array(irq, int, NULL, 0444);
+module_param_array(can_clock, uint, NULL, 0444);
+module_param_array(ocr, byte, NULL, 0444);
+module_param_array(cdr, byte, NULL, 0444);
+
+MODULE_PARM_DESC(mem, "The io-memory address");
+MODULE_PARM_DESC(irq, "The interrupt number");
+MODULE_PARM_DESC(can_clock, "External clock frequency (default 16 MHz)");
+MODULE_PARM_DESC(ocr, "Value of output control register (default 0x1a)");
+MODULE_PARM_DESC(cdr, "Value of clock divider register (default 0xc8");
+
+#define RTCAN_MEM_RANGE 0x80
+
+struct rtcan_mem
+{
+	volatile void __iomem *vmem;
+};
+
+static struct rtcan_device *rtcan_mem_devs[RTCAN_MEM_MAX_DEV];
+
+static u8 rtcan_mem_readreg(struct rtcan_device *dev, int reg)
+{
+	struct rtcan_mem *board = (struct rtcan_mem *)dev->board_priv;
+	return readb(board->vmem + reg);
+}
+
+static void rtcan_mem_writereg(struct rtcan_device *dev, int reg, u8 val)
+{
+	struct rtcan_mem *board = (struct rtcan_mem *)dev->board_priv;
+	writeb(val, board->vmem + reg);
+}
+
+int __init rtcan_mem_init_one(int idx)
+{
+	struct rtcan_device *dev;
+	struct rtcan_sja1000 *chip;
+	struct rtcan_mem *board;
+	int ret;
+
+	if ((dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+				   sizeof(struct rtcan_mem))) == NULL)
+		return -ENOMEM;
+
+	chip = (struct rtcan_sja1000 *)dev->priv;
+	board = (struct rtcan_mem *)dev->board_priv;
+
+	dev->board_name = mem_board_name;
+
+	chip->irq_num = irq[idx];
+	chip->irq_flags = RTDM_IRQTYPE_SHARED;
+	chip->read_reg = rtcan_mem_readreg;
+	chip->write_reg = rtcan_mem_writereg;
+
+	if (!request_mem_region(mem[idx], RTCAN_MEM_RANGE, RTCAN_DRV_NAME)) {
+		ret = -EBUSY;
+		goto out_dev_free;
+	}
+
+	/* ioremap io memory */
+	if (!(board->vmem = ioremap(mem[idx], RTCAN_MEM_RANGE))) {
+		ret = -EBUSY;
+		goto out_release_mem;
+	}
+
+	/* Clock frequency in Hz */
+	if (can_clock[idx])
+		dev->can_sys_clock = can_clock[idx] / 2;
+	else
+		dev->can_sys_clock = 8000000; /* 16/2 MHz */
+
+	/* Output control register */
+	if (ocr[idx])
+		chip->ocr = ocr[idx];
+	else
+		chip->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL;
+
+	if (cdr[idx])
+		chip->cdr = cdr[idx];
+	else
+		chip->cdr = SJA_CDR_CAN_MODE | SJA_CDR_CLK_OFF | SJA_CDR_CBP;
+
+	strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+	ret = rtcan_sja1000_register(dev);
+	if (ret) {
+		printk(KERN_ERR "ERROR %d while trying to register SJA1000 "
+		       "device!\n", ret);
+		goto out_iounmap;
+	}
+
+	rtcan_mem_devs[idx] = dev;
+	return 0;
+
+ out_iounmap:
+	iounmap((void *)board->vmem);
+
+ out_release_mem:
+	release_mem_region(mem[idx], RTCAN_MEM_RANGE);
+
+ out_dev_free:
+	rtcan_dev_free(dev);
+
+	return ret;
+}
+
+static void rtcan_mem_exit(void);
+
+/** Init module */
+static int __init rtcan_mem_init(void)
+{
+	int i, err;
+	int devices = 0;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (i = 0; i < RTCAN_MEM_MAX_DEV && mem[i] != 0; i++) {
+		err = rtcan_mem_init_one(i);
+		if (err) {
+			rtcan_mem_exit();
+			return err;
+		}
+		devices++;
+	}
+	if (devices)
+		return 0;
+
+	printk(KERN_ERR "ERROR! No devices specified! "
+	       "Use mem=<port1>[,...] irq=<irq1>[,...]\n");
+	return -EINVAL;
+}
+
+
+/** Cleanup module */
+static void rtcan_mem_exit(void)
+{
+	int i;
+	struct rtcan_device *dev;
+	volatile void __iomem *vmem;
+
+	for (i = 0; i < RTCAN_MEM_MAX_DEV; i++) {
+		dev = rtcan_mem_devs[i];
+		if (!dev)
+			continue;
+		vmem = ((struct rtcan_mem *)dev->board_priv)->vmem;
+		rtcan_sja1000_unregister(dev);
+		iounmap((void *)vmem);
+		release_mem_region(mem[i], RTCAN_MEM_RANGE);
+		rtcan_dev_free(dev);
+	}
+}
+
+module_init(rtcan_mem_init);
+module_exit(rtcan_mem_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c
new file mode 100644
index 0000000..d1e3f44
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_dng.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from the PCAN project file driver/src/pcan_dongle.c:
+ *
+ * Copyright (C) 2001-2006  PEAK System-Technik GmbH
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/version.h>
+#include <linux/delay.h>
+#include <linux/pnp.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "PEAK-Dongle"
+
+#define RTCAN_PEAK_DNG_MAX_DEV 1
+
+static char *dongle_board_name = "PEAK-Dongle";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for PEAK-Dongle");
+MODULE_LICENSE("GPL");
+
+static char   *type[RTCAN_PEAK_DNG_MAX_DEV];
+static ushort io[RTCAN_PEAK_DNG_MAX_DEV];
+static char   irq[RTCAN_PEAK_DNG_MAX_DEV];
+
+module_param_array(type, charp,  NULL, 0444);
+module_param_array(io,   ushort, NULL, 0444);
+module_param_array(irq,  byte,   NULL, 0444);
+
+MODULE_PARM_DESC(type, "The type of interface (sp, epp)");
+MODULE_PARM_DESC(io,   "The io-port address");
+MODULE_PARM_DESC(irq,  "The interrupt number");
+
+#define DONGLE_TYPE_SP  0
+#define DONGLE_TYPE_EPP 1
+
+#define DNG_PORT_SIZE            4  /* the address range of the dongle-port */
+#define ECR_PORT_SIZE            1  /* size of the associated ECR register */
+
+struct rtcan_peak_dng
+{
+    u16  ioport;
+    u16  ecr;      /* ECR register in case of EPP */
+    u8   old_data; /* the overwritten contents of the port registers */
+    u8   old_ctrl;
+    u8   old_ecr;
+    u8   type;
+};
+
+static struct rtcan_device *rtcan_peak_dng_devs[RTCAN_PEAK_DNG_MAX_DEV];
+
+static u16 dng_ports[] = {0x378, 0x278, 0x3bc, 0x2bc};
+static u8  dng_irqs[]  = {7, 5, 7, 5};
+
+static unsigned char nibble_decode[32] =
+{
+    0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
+    0x8, 0x9, 0xa, 0xb, 0xc, 0xd, 0xe, 0xf,
+    0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7,
+    0x0, 0x1, 0x2, 0x3, 0x4, 0x5, 0x6, 0x7
+};
+
+/* Enable and disable irqs */
+static inline void rtcan_parport_disable_irq(u32 port)
+{
+    u32 pc = port + 2;
+    outb(inb(pc) & ~0x10, pc);
+}
+
+static inline void rtcan_parport_enable_irq(u32 port)
+{
+    u32 pc = port + 2;
+    outb(inb(pc) | 0x10, pc);
+}
+
+/* Functions for SP port */
+static u8 rtcan_peak_dng_sp_readreg(struct rtcan_device *dev, int port)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 pa = dng->ioport;
+    u32 pb = pa + 1;
+    u32 pc = pb + 1;
+    u8  b0, b1 ;
+    u8  irq_enable = inb(pc) & 0x10; /* don't influence irq_enable */
+
+    outb((0x0B ^ 0x0D) | irq_enable, pc);
+    outb((port & 0x1F) | 0x80, pa);
+    outb((0x0B ^ 0x0C) | irq_enable, pc);
+    b1=nibble_decode[inb(pb)>>3];
+    outb(0x40, pa);
+    b0=nibble_decode[inb(pb)>>3];
+    outb((0x0B ^ 0x0D) | irq_enable, pc);
+
+    return  (b1 << 4) | b0 ;
+}
+
+static void rtcan_peak_dng_writereg(struct rtcan_device *dev, int port, u8 data)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 pa = dng->ioport;
+    u32 pc = pa + 2;
+    u8  irq_enable = inb(pc) & 0x10; /* don't influence irq_enable */
+
+    outb((0x0B ^ 0x0D) | irq_enable, pc);
+    outb(port & 0x1F, pa);
+    outb((0x0B ^ 0x0C) | irq_enable, pc);
+    outb(data, pa);
+    outb((0x0B ^ 0x0D) | irq_enable, pc);
+}
+
+/* Functions for EPP port */
+static u8 rtcan_peak_dng_epp_readreg(struct rtcan_device *dev, int port)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 pa = dng->ioport;
+    u32 pc = pa + 2;
+    u8  val;
+    u8  irq_enable = inb(pc) & 0x10; /* don't influence irq_enable */
+
+    outb((0x0B ^ 0x0F) | irq_enable, pc);
+    outb((port & 0x1F) | 0x80, pa);
+    outb((0x0B ^ 0x2E) | irq_enable, pc);
+    val = inb(pa);
+    outb((0x0B ^ 0x0F) | irq_enable, pc);
+
+    return val;
+}
+
+
+/* to switch epp on or restore register */
+static void dongle_set_ecr(u16 port, struct rtcan_peak_dng *dng)
+{
+    u32 ecr = dng->ecr;
+
+    dng->old_ecr = inb(ecr);
+    outb((dng->old_ecr & 0x1F) | 0x20, ecr);
+
+    if (dng->old_ecr == 0xff)
+	printk(KERN_DEBUG "%s: realy ECP mode configured?\n", RTCAN_DRV_NAME);
+}
+
+static void dongle_restore_ecr(u16 port, struct rtcan_peak_dng *dng)
+{
+    u32 ecr = dng->ecr;
+
+    outb(dng->old_ecr, ecr);
+
+    printk(KERN_DEBUG "%s: restore ECR\n", RTCAN_DRV_NAME);
+}
+
+static inline void rtcan_peak_dng_enable(struct rtcan_device *dev)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 port = dng->ioport;
+
+    /* save old port contents */
+    dng->old_data = inb(port);
+    dng->old_ctrl = inb(port + 2);
+
+    /* switch to epp mode if possible */
+    if (dng->type == DONGLE_TYPE_EPP)
+	dongle_set_ecr(port, dng);
+
+    rtcan_parport_enable_irq(port);
+}
+
+static inline void rtcan_peak_dng_disable(struct rtcan_device *dev)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+    u32 port = dng->ioport;
+
+    rtcan_parport_disable_irq(port);
+
+    if (dng->type == DONGLE_TYPE_EPP)
+	dongle_restore_ecr(port, dng);
+
+    /* restore port state */
+    outb(dng->old_data, port);
+    outb(dng->old_ctrl, port + 2);
+}
+
+/** Init module */
+int __init rtcan_peak_dng_init_one(int idx)
+{
+    int ret, dtype;
+    struct rtcan_device *dev;
+    struct rtcan_sja1000 *sja;
+    struct rtcan_peak_dng *dng;
+
+    if (strncmp(type[idx], "sp", 2) == 0)
+	dtype = DONGLE_TYPE_SP;
+    else if (strncmp(type[idx], "epp", 3) == 0)
+	dtype = DONGLE_TYPE_EPP;
+    else {
+	printk("%s: type %s is invalid, use \"sp\" or \"epp\".",
+	       RTCAN_DRV_NAME, type[idx]);
+	return -EINVAL;
+    }
+
+    if ((dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			       sizeof(struct rtcan_peak_dng))) == NULL)
+	return -ENOMEM;
+
+    sja = (struct rtcan_sja1000 *)dev->priv;
+    dng = (struct rtcan_peak_dng *)dev->board_priv;
+
+    dev->board_name = dongle_board_name;
+
+    if (io[idx])
+	dng->ioport = io[idx];
+    else
+	dng->ioport = dng_ports[idx];
+
+    if (irq[idx])
+	sja->irq_num = irq[idx];
+    else
+	sja->irq_num = dng_irqs[idx];
+    sja->irq_flags = 0;
+
+    if (dtype == DONGLE_TYPE_SP) {
+	sja->read_reg = rtcan_peak_dng_sp_readreg;
+	sja->write_reg = rtcan_peak_dng_writereg;
+	dng->ecr = 0; /* set to anything */
+    } else {
+	sja->read_reg = rtcan_peak_dng_epp_readreg;
+	sja->write_reg = rtcan_peak_dng_writereg;
+	dng->ecr = dng->ioport + 0x402;
+    }
+
+    /* Check and request I/O ports */
+    if (!request_region(dng->ioport, DNG_PORT_SIZE, RTCAN_DRV_NAME)) {
+	ret = -EBUSY;
+	goto out_dev_free;
+    }
+
+    if (dng->type == DONGLE_TYPE_EPP) {
+	if (!request_region(dng->ecr, ECR_PORT_SIZE, RTCAN_DRV_NAME)) {
+	    ret = -EBUSY;
+	    goto out_free_region;
+	}
+    }
+
+    /* Clock frequency in Hz */
+    dev->can_sys_clock = 8000000;	/* 16/2 MHz */
+
+    /* Output control register */
+    sja->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL;
+
+    sja->cdr = SJA_CDR_CAN_MODE;
+
+    strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+    rtcan_peak_dng_enable(dev);
+
+    /* Register RTDM device */
+    ret = rtcan_sja1000_register(dev);
+    if (ret) {
+	printk(KERN_ERR "ERROR while trying to register SJA1000 device %d!\n",
+	       ret);
+	goto out_free_region2;
+    }
+
+    rtcan_peak_dng_devs[idx] = dev;
+    return 0;
+
+ out_free_region2:
+    if (dng->type == DONGLE_TYPE_EPP)
+	release_region(dng->ecr, ECR_PORT_SIZE);
+
+ out_free_region:
+    release_region(dng->ioport, DNG_PORT_SIZE);
+
+ out_dev_free:
+    rtcan_dev_free(dev);
+
+    return ret;
+}
+
+void rtcan_peak_dng_exit_one(struct rtcan_device *dev)
+{
+    struct rtcan_peak_dng *dng = (struct rtcan_peak_dng *)dev->board_priv;
+
+    rtcan_sja1000_unregister(dev);
+    rtcan_peak_dng_disable(dev);
+    if (dng->type == DONGLE_TYPE_EPP)
+	release_region(dng->ecr, ECR_PORT_SIZE);
+    release_region(dng->ioport, DNG_PORT_SIZE);
+    rtcan_dev_free(dev);
+}
+
+static const struct pnp_device_id rtcan_peak_dng_pnp_tbl[] = {
+    /* Standard LPT Printer Port */
+    {.id = "PNP0400", .driver_data = 0},
+    /* ECP Printer Port */
+    {.id = "PNP0401", .driver_data = 0},
+    { }
+};
+
+static int rtcan_peak_dng_pnp_probe(struct pnp_dev *dev,
+				    const struct pnp_device_id *id)
+{
+    return 0;
+}
+
+static struct pnp_driver rtcan_peak_dng_pnp_driver = {
+    .name     = RTCAN_DRV_NAME,
+    .id_table = rtcan_peak_dng_pnp_tbl,
+    .probe    = rtcan_peak_dng_pnp_probe,
+};
+
+static int pnp_registered;
+
+/** Cleanup module */
+static void rtcan_peak_dng_exit(void)
+{
+    int i;
+    struct rtcan_device *dev;
+
+    for (i = 0, dev = rtcan_peak_dng_devs[i];
+	 i < RTCAN_PEAK_DNG_MAX_DEV && dev != NULL;
+	 i++)
+	rtcan_peak_dng_exit_one(dev);
+
+    if (pnp_registered)
+	pnp_unregister_driver(&rtcan_peak_dng_pnp_driver);
+}
+
+/** Init module */
+static int __init rtcan_peak_dng_init(void)
+{
+    int i, ret = -EINVAL, done = 0;
+
+    if (!rtdm_available())
+	return -ENOSYS;
+
+    if (pnp_register_driver(&rtcan_peak_dng_pnp_driver) == 0)
+	pnp_registered = 1;
+
+    for (i = 0;
+	 i < RTCAN_PEAK_DNG_MAX_DEV && type[i] != 0;
+	 i++) {
+
+	if ((ret = rtcan_peak_dng_init_one(i)) != 0) {
+	    printk(KERN_ERR "%s: Init failed with %d\n", RTCAN_DRV_NAME, ret);
+	    goto cleanup;
+	}
+	done++;
+    }
+    if (done)
+	return 0;
+
+    printk(KERN_ERR "%s: Please specify type=epp or type=sp\n",
+	   RTCAN_DRV_NAME);
+
+cleanup:
+    rtcan_peak_dng_exit();
+    return ret;
+}
+
+module_init(rtcan_peak_dng_init);
+module_exit(rtcan_peak_dng_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c
new file mode 100644
index 0000000..19f728b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_peak_pci.c
@@ -0,0 +1,361 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Derived from the PCAN project file driver/src/pcan_pci.c:
+ *
+ * Copyright (C) 2001-2006  PEAK System-Technik GmbH
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <asm/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DEV_NAME    "rtcan%d"
+#define RTCAN_DRV_NAME    "PEAK-PCI-CAN"
+
+static char *peak_pci_board_name = "PEAK-PCI";
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@grandegger.com>");
+MODULE_DESCRIPTION("RTCAN board driver for PEAK-PCI cards");
+MODULE_LICENSE("GPL");
+
+struct rtcan_peak_pci
+{
+    struct pci_dev *pci_dev;
+    struct rtcan_device *slave_dev;
+    int channel;
+    volatile void __iomem *base_addr;
+    volatile void __iomem *conf_addr;
+};
+
+#define PEAK_PCI_CAN_SYS_CLOCK (16000000 / 2)
+
+#define PELICAN_SINGLE  (SJA_CDR_CAN_MODE | SJA_CDR_CBP | 0x07 | SJA_CDR_CLK_OFF)
+#define PELICAN_MASTER  (SJA_CDR_CAN_MODE | SJA_CDR_CBP | 0x07            )
+#define PELICAN_DEFAULT (SJA_CDR_CAN_MODE                                 )
+
+#define CHANNEL_SINGLE 0 /* this is a single channel device */
+#define CHANNEL_MASTER 1 /* multi channel device, this device is master */
+#define CHANNEL_SLAVE  2 /* multi channel device, this is slave */
+
+// important PITA registers
+#define PITA_ICR         0x00        // interrupt control register
+#define PITA_GPIOICR     0x18        // general purpose IO interface control register
+#define PITA_MISC        0x1C        // miscellanoes register
+
+#define PEAK_PCI_VENDOR_ID      0x001C  // the PCI device and vendor IDs
+#define PEAK_PCI_DEVICE_ID      0x0001  // Device ID for PCI and older PCIe cards
+#define PEAK_PCIE_DEVICE_ID     0x0003  // Device ID for newer PCIe cards (IPEH-003027)
+#define PEAK_CPCI_DEVICE_ID     0x0004  // for nextgen cPCI slot cards
+#define PEAK_MPCI_DEVICE_ID     0x0005  // for nextgen miniPCI slot cards
+#define PEAK_PC_104P_DEVICE_ID  0x0006  // PCAN-PC/104+ cards
+#define PEAK_PCI_104E_DEVICE_ID 0x0007  // PCAN-PCI/104 Express cards
+#define PEAK_MPCIE_DEVICE_ID    0x0008  // The miniPCIe slot cards
+#define PEAK_PCIE_OEM_ID        0x0009  // PCAN-PCI Express OEM
+
+#define PCI_CONFIG_PORT_SIZE 0x1000  // size of the config io-memory
+#define PCI_PORT_SIZE        0x0400  // size of a channel io-memory
+
+static struct pci_device_id peak_pci_tbl[] = {
+	{PEAK_PCI_VENDOR_ID, PEAK_PCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_PCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_MPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_MPCIE_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_PC_104P_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_PCI_104E_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_CPCI_DEVICE_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{PEAK_PCI_VENDOR_ID, PEAK_PCIE_OEM_ID, PCI_ANY_ID, PCI_ANY_ID,},
+	{ }
+};
+MODULE_DEVICE_TABLE (pci, peak_pci_tbl);
+
+
+static u8 rtcan_peak_pci_read_reg(struct rtcan_device *dev, int port)
+{
+    struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv;
+    return readb(board->base_addr + ((unsigned long)port << 2));
+}
+
+static void rtcan_peak_pci_write_reg(struct rtcan_device *dev, int port, u8 data)
+{
+    struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv;
+    writeb(data, board->base_addr + ((unsigned long)port << 2));
+}
+
+static void rtcan_peak_pci_irq_ack(struct rtcan_device *dev)
+{
+    struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv;
+    u16 pita_icr_low;
+
+    /* Select and clear in Pita stored interrupt */
+    pita_icr_low = readw(board->conf_addr + PITA_ICR);
+    if (board->channel == CHANNEL_SLAVE) {
+	if (pita_icr_low & 0x0001)
+	    writew(0x0001, board->conf_addr + PITA_ICR);
+    }
+    else {
+	if (pita_icr_low & 0x0002)
+	    writew(0x0002, board->conf_addr + PITA_ICR);
+    }
+}
+
+static void rtcan_peak_pci_del_chan(struct rtcan_device *dev,
+				    int init_step)
+{
+    struct rtcan_peak_pci *board;
+    u16 pita_icr_high;
+
+    if (!dev)
+	return;
+
+    board = (struct rtcan_peak_pci *)dev->board_priv;
+
+    switch (init_step) {
+    case 0:			/* Full cleanup */
+	printk("Removing %s %s device %s\n",
+	       peak_pci_board_name, dev->ctrl_name, dev->name);
+	rtcan_sja1000_unregister(dev);
+	fallthrough;
+    case 5:
+	pita_icr_high = readw(board->conf_addr + PITA_ICR + 2);
+	if (board->channel == CHANNEL_SLAVE) {
+	    pita_icr_high &= ~0x0001;
+	} else {
+	    pita_icr_high &= ~0x0002;
+	}
+	writew(pita_icr_high, board->conf_addr + PITA_ICR + 2);
+	fallthrough;
+    case 4:
+	iounmap((void *)board->base_addr);
+	fallthrough;
+    case 3:
+	if (board->channel != CHANNEL_SLAVE)
+	    iounmap((void *)board->conf_addr);
+	fallthrough;
+    case 2:
+	rtcan_dev_free(dev);
+	fallthrough;
+    case 1:
+	break;
+    }
+
+}
+
+static int rtcan_peak_pci_add_chan(struct pci_dev *pdev, int channel,
+				   struct rtcan_device **master_dev)
+{
+    struct rtcan_device *dev;
+    struct rtcan_sja1000 *chip;
+    struct rtcan_peak_pci *board;
+    u16 pita_icr_high;
+    unsigned long addr;
+    int ret, init_step = 1;
+
+    dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+			  sizeof(struct rtcan_peak_pci));
+    if (dev == NULL)
+	return -ENOMEM;
+    init_step = 2;
+
+    chip = (struct rtcan_sja1000 *)dev->priv;
+    board = (struct rtcan_peak_pci *)dev->board_priv;
+
+    board->pci_dev = pdev;
+    board->channel = channel;
+
+    if (channel != CHANNEL_SLAVE) {
+
+	addr = pci_resource_start(pdev, 0);
+	board->conf_addr = ioremap(addr, PCI_CONFIG_PORT_SIZE);
+	if (board->conf_addr == 0) {
+	    ret = -ENODEV;
+	    goto failure;
+	}
+	init_step = 3;
+
+	/* Set GPIO control register */
+	writew(0x0005, board->conf_addr + PITA_GPIOICR + 2);
+
+	if (channel == CHANNEL_MASTER)
+	    writeb(0x00, board->conf_addr + PITA_GPIOICR); /* enable both */
+	else
+	    writeb(0x04, board->conf_addr + PITA_GPIOICR); /* enable single */
+
+	writeb(0x05, board->conf_addr + PITA_MISC + 3);  /* toggle reset */
+	mdelay(5);
+	writeb(0x04, board->conf_addr + PITA_MISC + 3);  /* leave parport mux mode */
+    } else {
+	struct rtcan_peak_pci *master_board =
+	    (struct rtcan_peak_pci *)(*master_dev)->board_priv;
+	master_board->slave_dev = dev;
+	board->conf_addr = master_board->conf_addr;
+    }
+
+    addr = pci_resource_start(pdev, 1);
+    if (channel == CHANNEL_SLAVE)
+	addr += 0x400;
+
+    board->base_addr = ioremap(addr, PCI_PORT_SIZE);
+    if (board->base_addr == 0) {
+	ret = -ENODEV;
+	goto failure;
+    }
+    init_step = 4;
+
+    dev->board_name = peak_pci_board_name;
+
+    chip->read_reg = rtcan_peak_pci_read_reg;
+    chip->write_reg = rtcan_peak_pci_write_reg;
+    chip->irq_ack = rtcan_peak_pci_irq_ack;
+
+    /* Clock frequency in Hz */
+    dev->can_sys_clock = PEAK_PCI_CAN_SYS_CLOCK;
+
+    /* Output control register */
+    chip->ocr = SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL;
+
+    /* Clock divider register */
+    if (channel == CHANNEL_MASTER)
+	chip->cdr = PELICAN_MASTER;
+    else
+	chip->cdr = PELICAN_SINGLE;
+
+    strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+
+    /* Register and setup interrupt handling */
+    chip->irq_flags = RTDM_IRQTYPE_SHARED;
+    chip->irq_num = pdev->irq;
+    pita_icr_high = readw(board->conf_addr + PITA_ICR + 2);
+    if (channel == CHANNEL_SLAVE) {
+	pita_icr_high |= 0x0001;
+    } else {
+	pita_icr_high |= 0x0002;
+    }
+    writew(pita_icr_high, board->conf_addr + PITA_ICR + 2);
+    init_step = 5;
+
+    printk("%s: base_addr=%p conf_addr=%p irq=%d\n", RTCAN_DRV_NAME,
+	   board->base_addr, board->conf_addr, chip->irq_num);
+
+    /* Register SJA1000 device */
+    ret = rtcan_sja1000_register(dev);
+    if (ret) {
+	printk(KERN_ERR
+	       "ERROR %d while trying to register SJA1000 device!\n", ret);
+	goto failure;
+    }
+
+    if (channel != CHANNEL_SLAVE)
+	*master_dev = dev;
+
+    return 0;
+
+ failure:
+    rtcan_peak_pci_del_chan(dev, init_step);
+    return ret;
+}
+
+static int peak_pci_init_one(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+    int ret;
+    u16 sub_sys_id;
+    struct rtcan_device *master_dev = NULL;
+
+    if (!rtdm_available())
+	return -ENODEV;
+
+    printk("%s: initializing device %04x:%04x\n",
+	   RTCAN_DRV_NAME,  pdev->vendor, pdev->device);
+
+    if ((ret = pci_enable_device (pdev)))
+	goto failure;
+
+    if ((ret = pci_request_regions(pdev, RTCAN_DRV_NAME)))
+	goto failure;
+
+    if ((ret = pci_read_config_word(pdev, 0x2e, &sub_sys_id)))
+	goto failure_cleanup;
+
+    /* Enable memory space */
+    if ((ret = pci_write_config_word(pdev, 0x04, 2)))
+	goto failure_cleanup;
+
+    if ((ret = pci_write_config_word(pdev, 0x44, 0)))
+	goto failure_cleanup;
+
+    if (sub_sys_id > 3) {
+	if ((ret = rtcan_peak_pci_add_chan(pdev, CHANNEL_MASTER,
+					   &master_dev)))
+	    goto failure_cleanup;
+	if ((ret = rtcan_peak_pci_add_chan(pdev, CHANNEL_SLAVE,
+					   &master_dev)))
+	    goto failure_cleanup;
+    } else {
+	if ((ret = rtcan_peak_pci_add_chan(pdev, CHANNEL_SINGLE,
+					   &master_dev)))
+	    goto failure_cleanup;
+    }
+
+    pci_set_drvdata(pdev, master_dev);
+    return 0;
+
+ failure_cleanup:
+    if (master_dev)
+	rtcan_peak_pci_del_chan(master_dev, 0);
+
+    pci_release_regions(pdev);
+
+ failure:
+    return ret;
+
+}
+
+static void peak_pci_remove_one(struct pci_dev *pdev)
+{
+    struct rtcan_device *dev = pci_get_drvdata(pdev);
+    struct rtcan_peak_pci *board = (struct rtcan_peak_pci *)dev->board_priv;
+
+    if (board->slave_dev)
+	rtcan_peak_pci_del_chan(board->slave_dev, 0);
+    rtcan_peak_pci_del_chan(dev, 0);
+
+    pci_release_regions(pdev);
+    pci_disable_device(pdev);
+    pci_set_drvdata(pdev, NULL);
+}
+
+static struct pci_driver rtcan_peak_pci_driver = {
+	.name		= RTCAN_DRV_NAME,
+	.id_table	= peak_pci_tbl,
+	.probe		= peak_pci_init_one,
+	.remove		= peak_pci_remove_one,
+};
+
+module_pci_driver(rtcan_peak_pci_driver);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c
new file mode 100644
index 0000000..4da14f2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_plx_pci.c
@@ -0,0 +1,593 @@
+/*
+ * Copyright (C) 2008-2010 Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>
+ *
+ * Derived from the ems_pci.c driver:
+ *	Copyright (C) 2007 Wolfgang Grandegger <wg@grandegger.com>
+ *	Copyright (C) 2008 Markus Plessing <plessing@ems-wuensche.com>
+ *	Copyright (C) 2008 Sebastian Haas <haas@ems-wuensche.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the version 2 of the GNU General Public License
+ * as published by the Free Software Foundation
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/io.h>
+
+#include <rtdm/driver.h>
+
+/* CAN device profile */
+#include <rtdm/can.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+#define RTCAN_DRV_NAME "rt_sja1000_plx_pci"
+#define RTCAN_DEV_NAME "rtcan%d"
+
+MODULE_AUTHOR("Pavel Cheblakov <P.B.Cheblakov@inp.nsk.su>");
+MODULE_DESCRIPTION("RTCAN driver for PLX90xx PCI-bridge cards with "
+		   "the SJA1000 chips");
+MODULE_LICENSE("GPL v2");
+
+#define PLX_PCI_MAX_CHAN 2
+
+struct plx_pci_card {
+	int channels;			/* detected channels count */
+	struct rtcan_device *rtcan_dev[PLX_PCI_MAX_CHAN];
+	void __iomem *conf_addr;
+
+	/* Pointer to device-dependent reset function */
+	void (*reset_func)(struct pci_dev *pdev);
+};
+
+#define PLX_PCI_CAN_CLOCK (16000000 / 2)
+
+/* PLX9030/9050/9052 registers */
+#define PLX_INTCSR	0x4c		/* Interrupt Control/Status */
+#define PLX_CNTRL	0x50		/* User I/O, Direct Slave Response,
+					 * Serial EEPROM, and Initialization
+					 * Control register
+					 */
+
+#define PLX_LINT1_EN	0x1		/* Local interrupt 1 enable */
+#define PLX_LINT2_EN	(1 << 3)	/* Local interrupt 2 enable */
+#define PLX_PCI_INT_EN	(1 << 6)	/* PCI Interrupt Enable */
+#define PLX_PCI_RESET	(1 << 30)	/* PCI Adapter Software Reset */
+
+/* PLX9056 registers */
+#define PLX9056_INTCSR	0x68		/* Interrupt Control/Status */
+#define PLX9056_CNTRL	0x6c		/* Control / Software Reset */
+
+#define PLX9056_LINTI	(1 << 11)
+#define PLX9056_PCI_INT_EN (1 << 8)
+#define PLX9056_PCI_RCR	(1 << 29)	/* Read Configuration Registers */
+
+/*
+ * The board configuration is probably following:
+ * RX1 is connected to ground.
+ * TX1 is not connected.
+ * CLKO is not connected.
+ * Setting the OCR register to 0xDA is a good idea.
+ * This means normal output mode, push-pull and the correct polarity.
+ */
+#define PLX_PCI_OCR	(SJA_OCR_MODE_NORMAL | SJA_OCR_TX0_PUSHPULL | SJA_OCR_TX1_PUSHPULL)
+
+/*
+ * In the CDR register, you should set CBP to 1.
+ * You will probably also want to set the clock divider value to 7
+ * (meaning direct oscillator output) because the second SJA1000 chip
+ * is driven by the first one CLKOUT output.
+ */
+#define PLX_PCI_CDR			(SJA_CDR_CBP | SJA_CDR_CAN_MODE)
+
+/* SJA1000 Control Register in the BasicCAN Mode */
+#define SJA_CR				0x00
+
+/* States of some SJA1000 registers after hardware reset in the BasicCAN mode*/
+#define REG_CR_BASICCAN_INITIAL		0x21
+#define REG_CR_BASICCAN_INITIAL_MASK	0xa1
+#define REG_SR_BASICCAN_INITIAL		0x0c
+#define REG_IR_BASICCAN_INITIAL		0xe0
+
+/* States of some SJA1000 registers after hardware reset in the PeliCAN mode*/
+#define REG_MOD_PELICAN_INITIAL		0x01
+#define REG_SR_PELICAN_INITIAL		0x3c
+#define REG_IR_PELICAN_INITIAL		0x00
+
+#define ADLINK_PCI_VENDOR_ID		0x144A
+#define ADLINK_PCI_DEVICE_ID		0x7841
+
+#define ESD_PCI_SUB_SYS_ID_PCI200	0x0004
+#define ESD_PCI_SUB_SYS_ID_PCI266	0x0009
+#define ESD_PCI_SUB_SYS_ID_PMC266	0x000e
+#define ESD_PCI_SUB_SYS_ID_CPCI200	0x010b
+#define ESD_PCI_SUB_SYS_ID_PCIE2000	0x0200
+#define ESD_PCI_SUB_SYS_ID_PCI104200	0x0501
+
+#define MARATHON_PCI_DEVICE_ID		0x2715
+
+#define TEWS_PCI_VENDOR_ID		0x1498
+#define TEWS_PCI_DEVICE_ID_TMPC810	0x032A
+
+static void plx_pci_reset_common(struct pci_dev *pdev);
+static void plx_pci_reset_marathon(struct pci_dev *pdev);
+static void plx9056_pci_reset_common(struct pci_dev *pdev);
+
+struct plx_pci_channel_map {
+	u32 bar;
+	u32 offset;
+	u32 size;		/* 0x00 - auto, e.g. length of entire bar */
+};
+
+struct plx_pci_card_info {
+	const char *name;
+	int channel_count;
+	u32 can_clock;
+	u8 ocr;			/* output control register */
+	u8 cdr;			/* clock divider register */
+
+	/* Parameters for mapping local configuration space */
+	struct plx_pci_channel_map conf_map;
+
+	/* Parameters for mapping the SJA1000 chips */
+	struct plx_pci_channel_map chan_map_tbl[PLX_PCI_MAX_CHAN];
+
+	/* Pointer to device-dependent reset function */
+	void (*reset_func)(struct pci_dev *pdev);
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink = {
+	"Adlink PCI-7841/cPCI-7841", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{1, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_adlink_se = {
+	"Adlink PCI-7841/cPCI-7841 SE", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x80, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd200 = {
+	"esd CAN-PCI/CPCI/PCI104/200", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9030/9050 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd266 = {
+	"esd CAN-PCI/PMC/266", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+	&plx9056_pci_reset_common
+	/* based on PLX9056 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_esd2000 = {
+	"esd CAN-PCIe/2000", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x80}, {2, 0x100, 0x80} },
+	&plx9056_pci_reset_common
+	/* based on PEX8311 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_marathon = {
+	"Marathon CAN-bus-PCI", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x00, 0x00}, {4, 0x00, 0x00} },
+	&plx_pci_reset_marathon
+	/* based on PLX9052 */
+};
+
+static struct plx_pci_card_info plx_pci_card_info_tews = {
+	"TEWS TECHNOLOGIES TPMC810", 2,
+	PLX_PCI_CAN_CLOCK, PLX_PCI_OCR, PLX_PCI_CDR,
+	{0, 0x00, 0x00}, { {2, 0x000, 0x80}, {2, 0x100, 0x80} },
+	&plx_pci_reset_common
+	/* based on PLX9030 */
+};
+
+static const struct pci_device_id plx_pci_tbl[] = {
+	{
+		/* Adlink PCI-7841/cPCI-7841 */
+		ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+		PCI_ANY_ID, PCI_ANY_ID,
+		PCI_CLASS_NETWORK_OTHER << 8, ~0,
+		(kernel_ulong_t)&plx_pci_card_info_adlink
+	},
+	{
+		/* Adlink PCI-7841/cPCI-7841 SE */
+		ADLINK_PCI_VENDOR_ID, ADLINK_PCI_DEVICE_ID,
+		PCI_ANY_ID, PCI_ANY_ID,
+		PCI_CLASS_COMMUNICATION_OTHER << 8, ~0,
+		(kernel_ulong_t)&plx_pci_card_info_adlink_se
+	},
+	{
+		/* esd CAN-PCI/200 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9050,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI200,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd200
+	},
+	{
+		/* esd CAN-CPCI/200 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_CPCI200,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd200
+	},
+	{
+		/* esd CAN-PCI104/200 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9030,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI104200,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd200
+	},
+	{
+		/* esd CAN-PCI/266 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCI266,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd266
+	},
+	{
+		/* esd CAN-PMC/266 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PMC266,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd266
+	},
+	{
+		/* esd CAN-PCIE/2000 */
+		PCI_VENDOR_ID_PLX, PCI_DEVICE_ID_PLX_9056,
+		PCI_VENDOR_ID_ESDGMBH, ESD_PCI_SUB_SYS_ID_PCIE2000,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_esd2000
+	},
+	{
+		/* Marathon CAN-bus-PCI card */
+		PCI_VENDOR_ID_PLX, MARATHON_PCI_DEVICE_ID,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_marathon
+	},
+	{
+		/* TEWS TECHNOLOGIES TPMC810 card */
+		TEWS_PCI_VENDOR_ID, TEWS_PCI_DEVICE_ID_TMPC810,
+		PCI_ANY_ID, PCI_ANY_ID,
+		0, 0,
+		(kernel_ulong_t)&plx_pci_card_info_tews
+	},
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, plx_pci_tbl);
+
+static u8 plx_pci_read_reg(struct rtcan_device *dev, int port)
+{
+	return ioread8((void* __iomem)dev->base_addr + port);
+}
+
+static void plx_pci_write_reg(struct rtcan_device *dev, int port, u8 val)
+{
+	iowrite8(val, (void* __iomem)dev->base_addr + port);
+}
+
+/*
+ * Check if a CAN controller is present at the specified location
+ * by trying to switch 'em from the Basic mode into the PeliCAN mode.
+ * Also check states of some registers in reset mode.
+ */
+static inline int plx_pci_check_sja1000(struct rtcan_device *dev)
+{
+	int flag = 0;
+
+	struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+	/*
+	 * Check registers after hardware reset (the Basic mode)
+	 * See states on p. 10 of the Datasheet.
+	 */
+	if ((chip->read_reg(dev, SJA_CR) & REG_CR_BASICCAN_INITIAL_MASK) ==
+	    REG_CR_BASICCAN_INITIAL &&
+	    (chip->read_reg(dev, SJA_SR) == REG_SR_BASICCAN_INITIAL) &&
+	    (chip->read_reg(dev, SJA_IR) == REG_IR_BASICCAN_INITIAL))
+		flag = 1;
+
+	/* Bring the SJA1000 into the PeliCAN mode*/
+	chip->write_reg(dev, SJA_CDR, SJA_CDR_CAN_MODE);
+
+	/*
+	 * Check registers after reset in the PeliCAN mode.
+	 * See states on p. 23 of the Datasheet.
+	 */
+	if (chip->read_reg(dev, SJA_MOD) == REG_MOD_PELICAN_INITIAL &&
+	    chip->read_reg(dev, SJA_SR) == REG_SR_PELICAN_INITIAL &&
+	    chip->read_reg(dev, SJA_IR) == REG_IR_PELICAN_INITIAL)
+		return flag;
+
+	return 0;
+}
+
+/*
+ * PLX9030/50/52 software reset
+ * Also LRESET# asserts and brings to reset device on the Local Bus (if wired).
+ * For most cards it's enough for reset the SJA1000 chips.
+ */
+static void plx_pci_reset_common(struct pci_dev *pdev)
+{
+	struct plx_pci_card *card = pci_get_drvdata(pdev);
+	u32 cntrl;
+
+	cntrl = ioread32(card->conf_addr + PLX_CNTRL);
+	cntrl |= PLX_PCI_RESET;
+	iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+	udelay(100);
+	cntrl ^= PLX_PCI_RESET;
+	iowrite32(cntrl, card->conf_addr + PLX_CNTRL);
+};
+
+/*
+ * PLX9056 software reset
+ * Assert LRESET# and reset device(s) on the Local Bus (if wired).
+ */
+static void plx9056_pci_reset_common(struct pci_dev *pdev)
+{
+	struct plx_pci_card *card = pci_get_drvdata(pdev);
+	u32 cntrl;
+
+	/* issue a local bus reset */
+	cntrl = ioread32(card->conf_addr + PLX9056_CNTRL);
+	cntrl |= PLX_PCI_RESET;
+	iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+	udelay(100);
+	cntrl ^= PLX_PCI_RESET;
+	iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+	/* reload local configuration from EEPROM */
+	cntrl |= PLX9056_PCI_RCR;
+	iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+
+	/*
+	 * There is no safe way to poll for the end
+	 * of reconfiguration process. Waiting for 10ms
+	 * is safe.
+	 */
+	mdelay(10);
+
+	cntrl ^= PLX9056_PCI_RCR;
+	iowrite32(cntrl, card->conf_addr + PLX9056_CNTRL);
+};
+
+/* Special reset function for Marathon card */
+static void plx_pci_reset_marathon(struct pci_dev *pdev)
+{
+	void __iomem *reset_addr;
+	int i;
+	int reset_bar[2] = {3, 5};
+
+	plx_pci_reset_common(pdev);
+
+	for (i = 0; i < 2; i++) {
+		reset_addr = pci_iomap(pdev, reset_bar[i], 0);
+		if (!reset_addr) {
+			dev_err(&pdev->dev, "Failed to remap reset "
+				"space %d (BAR%d)\n", i, reset_bar[i]);
+		} else {
+			/* reset the SJA1000 chip */
+			iowrite8(0x1, reset_addr);
+			udelay(100);
+			pci_iounmap(pdev, reset_addr);
+		}
+	}
+}
+
+static void plx_pci_del_card(struct pci_dev *pdev)
+{
+	struct plx_pci_card *card = pci_get_drvdata(pdev);
+	struct rtcan_device *dev;
+	int i = 0;
+
+	for (i = 0; i < card->channels; i++) {
+		dev = card->rtcan_dev[i];
+		if (!dev)
+			continue;
+
+		dev_info(&pdev->dev, "Removing %s\n", dev->name);
+		rtcan_sja1000_unregister(dev);
+		if (dev->base_addr)
+			pci_iounmap(pdev, (void* __iomem)dev->base_addr);
+		rtcan_dev_free(dev);
+	}
+
+	card->reset_func(pdev);
+
+	/*
+	 * Disable interrupts from PCI-card and disable local
+	 * interrupts
+	 */
+	if (pdev->device != PCI_DEVICE_ID_PLX_9056)
+		iowrite32(0x0, card->conf_addr + PLX_INTCSR);
+	else
+		iowrite32(0x0, card->conf_addr + PLX9056_INTCSR);
+
+	if (card->conf_addr)
+		pci_iounmap(pdev, card->conf_addr);
+
+	kfree(card);
+
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+/*
+ * Probe PLX90xx based device for the SJA1000 chips and register each
+ * available CAN channel to SJA1000 Socket-CAN subsystem.
+ */
+static int plx_pci_add_card(struct pci_dev *pdev,
+			    const struct pci_device_id *ent)
+{
+	struct rtcan_sja1000 *chip;
+	struct rtcan_device *dev;
+	struct plx_pci_card *card;
+	struct plx_pci_card_info *ci;
+	int err, i;
+	u32 val;
+	void __iomem *addr;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	ci = (struct plx_pci_card_info *)ent->driver_data;
+
+	if (pci_enable_device(pdev) < 0) {
+		dev_err(&pdev->dev, "Failed to enable PCI device\n");
+		return -ENODEV;
+	}
+
+	dev_info(&pdev->dev, "Detected \"%s\" card at slot #%i\n",
+		 ci->name, PCI_SLOT(pdev->devfn));
+
+	/* Allocate card structures to hold addresses, ... */
+	card = kzalloc(sizeof(*card), GFP_KERNEL);
+	if (!card) {
+		dev_err(&pdev->dev, "Unable to allocate memory\n");
+		pci_disable_device(pdev);
+		return -ENOMEM;
+	}
+
+	pci_set_drvdata(pdev, card);
+
+	card->channels = 0;
+
+	/* Remap PLX90xx configuration space */
+	addr = pci_iomap(pdev, ci->conf_map.bar, ci->conf_map.size);
+	if (!addr) {
+		err = -ENOMEM;
+		dev_err(&pdev->dev, "Failed to remap configuration space "
+			"(BAR%d)\n", ci->conf_map.bar);
+		goto failure_cleanup;
+	}
+	card->conf_addr = addr + ci->conf_map.offset;
+
+	ci->reset_func(pdev);
+	card->reset_func = ci->reset_func;
+
+	/* Detect available channels */
+	for (i = 0; i < ci->channel_count; i++) {
+		struct plx_pci_channel_map *cm = &ci->chan_map_tbl[i];
+
+		dev = rtcan_dev_alloc(sizeof(struct rtcan_sja1000),
+				      sizeof(struct plx_pci_card));
+		if (!dev) {
+			err = -ENOMEM;
+			goto failure_cleanup;
+		}
+
+		strncpy(dev->name, RTCAN_DEV_NAME, IFNAMSIZ);
+		dev->board_name = (char *)ci->name;
+
+		card->rtcan_dev[i] = dev;
+		chip = card->rtcan_dev[i]->priv;
+		chip->irq_flags = RTDM_IRQTYPE_SHARED;
+		chip->irq_num = pdev->irq;
+
+		/*
+		 * Remap IO space of the SJA1000 chips
+		 * This is device-dependent mapping
+		 */
+		addr = pci_iomap(pdev, cm->bar, cm->size);
+		if (!addr) {
+			err = -ENOMEM;
+			dev_err(&pdev->dev, "Failed to remap BAR%d\n", cm->bar);
+			goto failure_cleanup;
+		}
+
+		dev->base_addr = (unsigned long)(addr + cm->offset);
+		chip->read_reg = plx_pci_read_reg;
+		chip->write_reg = plx_pci_write_reg;
+
+		/* Check if channel is present */
+		if (plx_pci_check_sja1000(dev)) {
+			dev->can_sys_clock = ci->can_clock;
+			chip->ocr = ci->ocr;
+			chip->cdr = ci->cdr;
+
+			/* Register SJA1000 device */
+			err = rtcan_sja1000_register(dev);
+			if (err) {
+				dev_err(&pdev->dev, "Registering device failed "
+					"(err=%d)\n", err);
+				rtcan_dev_free(dev);
+				goto failure_cleanup;
+			}
+
+			card->channels++;
+
+			dev_info(&pdev->dev, "Channel #%d at 0x%p, irq %d "
+				 "registered as %s\n", i + 1,
+				 (void* __iomem)dev->base_addr, chip->irq_num,
+				 dev->name);
+		} else {
+			dev_err(&pdev->dev, "Channel #%d not detected\n",
+				i + 1);
+			rtcan_dev_free(dev);
+		}
+	}
+
+	if (!card->channels) {
+		err = -ENODEV;
+		goto failure_cleanup;
+	}
+
+	/*
+	 * Enable interrupts from PCI-card (PLX90xx) and enable Local_1,
+	 * Local_2 interrupts from the SJA1000 chips
+	 */
+	if (pdev->device != PCI_DEVICE_ID_PLX_9056) {
+		val = ioread32(card->conf_addr + PLX_INTCSR);
+		if (pdev->subsystem_vendor == PCI_VENDOR_ID_ESDGMBH)
+			val |= PLX_LINT1_EN | PLX_PCI_INT_EN;
+		else
+			val |= PLX_LINT1_EN | PLX_LINT2_EN | PLX_PCI_INT_EN;
+		iowrite32(val, card->conf_addr + PLX_INTCSR);
+	} else {
+		iowrite32(PLX9056_LINTI | PLX9056_PCI_INT_EN,
+			  card->conf_addr + PLX9056_INTCSR);
+	}
+	return 0;
+
+failure_cleanup:
+	dev_err(&pdev->dev, "Error: %d. Cleaning Up.\n", err);
+
+	plx_pci_del_card(pdev);
+
+	return err;
+}
+
+static struct pci_driver plx_pci_driver = {
+	.name = RTCAN_DRV_NAME,
+	.id_table = plx_pci_tbl,
+	.probe = plx_pci_add_card,
+	.remove = plx_pci_del_card,
+};
+
+module_pci_driver(plx_pci_driver);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c
new file mode 100644
index 0000000..0f49551
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.c
@@ -0,0 +1,842 @@
+/*
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * Parts of this software are based on the following:
+ *
+ * - RTAI CAN device driver for SJA1000 controllers by Jan Kiszka
+ *
+ * - linux-can.patch, a CAN socket framework for Linux,
+ *   Copyright (C) 2004, 2005, Robert Schwebel, Benedikt Spranger,
+ *   Marc Kleine-Budde, Sascha Hauer, Pengutronix
+ *
+ * - RTnet (www.rtnet.org)
+ *
+ * - serial device driver and profile included in Xenomai (RTDM),
+ *   Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+
+#include <rtdm/driver.h>
+#include <rtdm/can.h>
+
+#include <rtcan_socket.h>
+#include <rtcan_dev.h>
+#include <rtcan_raw.h>
+#include <rtcan_list.h>
+#include <rtcan_sja1000.h>
+#include <rtcan_sja1000_regs.h>
+
+
+#define BTR0_BRP_MASK	0x3f
+#define BTR0_SJW_SHIFT	6
+#define BTR0_SJW_MASK	(0x3 << BTR0_SJW_SHIFT)
+
+#define BTR1_TSEG1_MASK  0xf
+#define BTR1_TSEG2_SHIFT 4
+#define BTR1_TSEG2_MASK  (0x7 << BTR1_TSEG2_SHIFT)
+#define BTR1_SAM_SHIFT   7
+
+#define BTR0_SET_BRP(brp)     (((brp) - 1) & BTR0_BRP_MASK)
+#define BTR0_SET_SJW(sjw)     ((((sjw) - 1) << BTR0_SJW_SHIFT) & BTR0_SJW_MASK)
+
+#define BTR1_SET_TSEG1(tseg1) (((tseg1) - 1) & BTR1_TSEG1_MASK)
+#define BTR1_SET_TSEG2(tseg2) ((((tseg2) - 1) << BTR1_TSEG2_SHIFT) & BTR1_TSEG2_MASK)
+#define BTR1_SET_SAM(sam)     (((sam) & 1) << BTR1_SAM_SHIFT)
+
+/* Value for the interrupt enable register */
+#define SJA1000_IER                 SJA_IER_RIE | SJA_IER_TIE | \
+				    SJA_IER_EIE | SJA_IER_WUIE | \
+				    SJA_IER_EPIE | SJA_IER_BEIE | \
+				    SJA_IER_ALIE | SJA_IER_DOIE
+
+static char *sja_ctrl_name = "SJA1000";
+
+#define STATE_OPERATING(state) \
+    ((state) != CAN_STATE_STOPPED && (state) != CAN_STATE_BUS_OFF)
+
+#define STATE_RESET(state) \
+    ((state) == CAN_STATE_STOPPED || (state) == CAN_STATE_BUS_OFF)
+
+
+MODULE_AUTHOR("Sebastian.Smolorz@stud.uni-hannover.de");
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RT-Socket-CAN driver for SJA1000");
+
+#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+static struct can_bittiming_const sja1000_bittiming_const = {
+	.name = "sja1000",
+	.tseg1_min = 1,
+	.tseg1_max = 16,
+	.tseg2_min = 1,
+	.tseg2_max = 8,
+	.sjw_max = 4,
+	.brp_min = 1,
+	.brp_max = 64,
+	.brp_inc = 1,
+};
+#endif
+
+static inline void rtcan_sja_rx_interrupt(struct rtcan_device *dev,
+					  struct rtcan_skb *skb)
+{
+    int i;
+    /* "Real" size of the payload */
+    u8 size;
+    /* Content of frame information register */
+    u8 fir;
+    /* Ring buffer frame within skb */
+    struct rtcan_rb_frame *frame = &skb->rb_frame;
+    struct rtcan_sja1000 *chip = dev->priv;
+
+    /* Read out frame information register */
+    fir = chip->read_reg(dev, SJA_FIR);
+
+    /* Extract data length code */
+    frame->can_dlc = fir & SJA_FIR_DLC_MASK;
+
+    /* If DLC exceeds 8 bytes adjust it to 8 (for the payload size) */
+    size = (frame->can_dlc > 8) ? 8 : frame->can_dlc;
+
+
+    if (fir & SJA_FIR_EFF) {
+	/* Extended frame */
+	frame->can_id = CAN_EFF_FLAG;
+
+	/* Read ID */
+	frame->can_id |= chip->read_reg(dev, SJA_ID1) << 21;
+	frame->can_id |= chip->read_reg(dev, SJA_ID2) << 13;
+	frame->can_id |= chip->read_reg(dev, SJA_ID3) << 5;
+	frame->can_id |= chip->read_reg(dev, SJA_ID4) >> 3;
+
+	if (!(fir & SJA_FIR_RTR)) {
+	    /* No RTR, read data bytes */
+	    for (i = 0; i < size; i++)
+		frame->data[i] = chip->read_reg(dev,
+						SJA_DATA_EFF(i));
+	}
+
+    } else {
+	/* Standard frame */
+
+	/* Read ID */
+	frame->can_id  = chip->read_reg(dev, SJA_ID1) << 3;
+	frame->can_id |= chip->read_reg(dev, SJA_ID2) >> 5;
+
+	if (!(fir & SJA_FIR_RTR)) {
+	    /* No RTR, read data bytes */
+	    for (i = 0; i < size; i++)
+		frame->data[i] = chip->read_reg(dev, SJA_DATA_SFF(i));
+	}
+    }
+
+    /* Release Receive Buffer */
+    chip->write_reg(dev, SJA_CMR, SJA_CMR_RRB);
+
+
+    /* RTR? */
+    if (fir & SJA_FIR_RTR) {
+	frame->can_id |= CAN_RTR_FLAG;
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE;
+    } else
+	skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + size;
+
+    /* Store the interface index */
+    frame->can_ifindex = dev->ifindex;
+}
+
+
+static inline void rtcan_sja_err_interrupt(struct rtcan_device *dev,
+					   struct rtcan_sja1000 *chip,
+					   struct rtcan_skb *skb,
+					   u8 irq_source)
+{
+    struct rtcan_rb_frame *frame = &skb->rb_frame;
+    can_state_t state = dev->state;
+    u8 status, txerr, rxerr;
+
+    status = chip->read_reg(dev, SJA_SR);
+    txerr = chip->read_reg(dev, SJA_TXERR);
+    rxerr = chip->read_reg(dev, SJA_RXERR);
+
+    skb->rb_frame_size = EMPTY_RB_FRAME_SIZE + CAN_ERR_DLC;
+
+    frame->can_id = CAN_ERR_FLAG;
+    frame->can_dlc = CAN_ERR_DLC;
+
+    memset(&frame->data[0], 0, frame->can_dlc);
+
+    /* Data overrun interrupt? */
+    if (irq_source & SJA_IR_DOI) {
+	frame->can_id |= CAN_ERR_CRTL;
+	frame->data[1] = CAN_ERR_CRTL_RX_OVERFLOW;
+    }
+
+    /* Arbitratio lost interrupt? */
+    if (irq_source & SJA_IR_ALI) {
+	frame->can_id |= CAN_ERR_LOSTARB;
+	frame->data[0] = chip->read_reg(dev, SJA_ALC)  & 0x1f;
+    }
+
+    /* Bus error interrupt? */
+    if (irq_source & SJA_IR_BEI) {
+	u8 ecc = chip->read_reg(dev, SJA_ECC);
+
+	frame->can_id |= CAN_ERR_PROT | CAN_ERR_BUSERROR;
+
+	switch (ecc & SJA_ECC_ERR_MASK) {
+	case SJA_ECC_ERR_BIT:
+	    frame->data[2] |= CAN_ERR_PROT_BIT;
+	    break;
+	case SJA_ECC_ERR_FORM:
+	    frame->data[2] |= CAN_ERR_PROT_FORM;
+	    break;
+	case SJA_ECC_ERR_STUFF:
+	    frame->data[2] |= CAN_ERR_PROT_STUFF;
+	    break;
+	default:
+	    frame->data[2] |= CAN_ERR_PROT_UNSPEC;
+	    frame->data[3] = ecc & SJA_ECC_SEG_MASK;
+	    break;
+	}
+	/* Error occured during transmission? */
+	if ((ecc & SJA_ECC_DIR) == 0)
+	    frame->data[2] |= CAN_ERR_PROT_TX;
+    }
+
+    /* Error passive interrupt? */
+    if (unlikely(irq_source & SJA_IR_EPI)) {
+	if (state == CAN_STATE_BUS_WARNING) {
+	    state = CAN_STATE_BUS_PASSIVE;
+	} else {
+	    state = CAN_STATE_BUS_WARNING;
+	}
+    }
+
+    /* Error warning interrupt? */
+    if (irq_source & SJA_IR_EI) {
+
+	/* Test bus status (bus-off condition) */
+	if (status & SJA_SR_BS) {
+	    /* Bus-off */
+	    state = CAN_STATE_BUS_OFF;
+	    frame->can_id |= CAN_ERR_BUSOFF;
+	    /* Only allow error warning interrupts
+	       (otherwise an EPI would arise during bus-off
+	       recovery) */
+	    chip->write_reg(dev, SJA_IER, SJA_IER_EIE);
+	    /* Wake up waiting senders */
+	    rtdm_sem_destroy(&dev->tx_sem);
+	}
+
+	/* Test error status (error warning limit) */
+	else if (status & SJA_SR_ES)
+	    /* error warning limit reached */
+	    state = CAN_STATE_BUS_WARNING;
+
+	/* Re-entrance into error active state from bus-warn? */
+	else if (state == CAN_STATE_BUS_WARNING)
+	    state = CAN_STATE_ACTIVE;
+
+	else
+	    /* Bus-off recovery complete, enable all interrupts again */
+	    chip->write_reg(dev, SJA_IER, SJA1000_IER);
+    }
+
+    if (state != dev->state &&
+	(state == CAN_STATE_BUS_WARNING || state == CAN_STATE_BUS_PASSIVE)) {
+	frame->can_id |= CAN_ERR_PROT;
+	if (txerr > rxerr)
+	    frame->data[1] = CAN_ERR_CRTL_TX_WARNING;
+	else
+	    frame->data[1] = CAN_ERR_CRTL_RX_WARNING;
+    }
+
+    dev->state = state;
+    frame->can_ifindex = dev->ifindex;
+}
+
+static int rtcan_sja_interrupt(rtdm_irq_t *irq_handle)
+{
+    struct rtcan_device *dev;
+    struct rtcan_sja1000 *chip;
+    struct rtcan_skb skb;
+    int recv_lock_free = 1;
+    int irq_count = 0;
+    int ret = RTDM_IRQ_NONE;
+    u8 irq_source;
+
+
+    /* Get the ID of the device which registered this IRQ. */
+    dev = (struct rtcan_device *)rtdm_irq_get_arg(irq_handle, void);
+    chip = (struct rtcan_sja1000 *)dev->priv;
+
+    /* Take spinlock protecting HW register access and device structures. */
+    rtdm_lock_get(&dev->device_lock);
+
+    /* Loop as long as the device reports an event */
+    while ((irq_source = chip->read_reg(dev, SJA_IR))) {
+	ret = RTDM_IRQ_HANDLED;
+	irq_count++;
+
+	/* Now look up which interrupts appeared */
+
+	/* Wake-up interrupt? */
+	if (irq_source & SJA_IR_WUI)
+	    dev->state = dev->state_before_sleep;
+
+	/* Error Interrupt? */
+	if (irq_source & (SJA_IR_EI | SJA_IR_DOI | SJA_IR_EPI |
+			  SJA_IR_ALI | SJA_IR_BEI)) {
+
+	    /* Check error condition and fill error frame */
+	    if (!((irq_source & SJA_IR_BEI) && (chip->bus_err_on-- < 2))) {
+		rtcan_sja_err_interrupt(dev, chip, &skb, irq_source);
+
+		if (recv_lock_free) {
+		    recv_lock_free = 0;
+		    rtdm_lock_get(&rtcan_recv_list_lock);
+		    rtdm_lock_get(&rtcan_socket_lock);
+		}
+		/* Pass error frame out to the sockets */
+		rtcan_rcv(dev, &skb);
+	    }
+	}
+
+	/* Transmit Interrupt? */
+	if (irq_source & SJA_IR_TI) {
+	    /* Wake up a sender */
+	    rtdm_sem_up(&dev->tx_sem);
+	    dev->tx_count++;
+
+	    if (rtcan_loopback_pending(dev)) {
+
+		if (recv_lock_free) {
+		    recv_lock_free = 0;
+		    rtdm_lock_get(&rtcan_recv_list_lock);
+		    rtdm_lock_get(&rtcan_socket_lock);
+		}
+
+		rtcan_loopback(dev);
+	    }
+	}
+
+	/* Receive Interrupt? */
+	if (irq_source & SJA_IR_RI) {
+
+	    /* Read out HW registers */
+	    rtcan_sja_rx_interrupt(dev, &skb);
+
+	    /* Take more locks. Ensure that they are taken and
+	     * released only once in the IRQ handler. */
+	    /* WARNING: Nested locks are dangerous! But they are
+	     * nested only in this routine so a deadlock should
+	     * not be possible. */
+	    if (recv_lock_free) {
+		recv_lock_free = 0;
+		rtdm_lock_get(&rtcan_recv_list_lock);
+		rtdm_lock_get(&rtcan_socket_lock);
+	    }
+
+	    /* Pass received frame out to the sockets */
+	    rtcan_rcv(dev, &skb);
+	}
+    }
+
+    if (chip->irq_ack)
+	chip->irq_ack(dev);
+
+    /* Release spinlocks */
+    if (!recv_lock_free) {
+	rtdm_lock_put(&rtcan_socket_lock);
+	rtdm_lock_put(&rtcan_recv_list_lock);
+    }
+    rtdm_lock_put(&dev->device_lock);
+
+    return ret;
+}
+
+
+
+/*
+ * Inline function to decide if controller is operating
+ *
+ * Catch the very unlikely case that setting stop mode
+ * returned without success before this call but in the
+ * meantime the controller went into reset mode.
+ */
+static inline int rtcan_sja_is_operating(struct rtcan_device *dev,
+					 can_state_t *state)
+{
+    int is_operating = STATE_OPERATING(*state);
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    if (unlikely(is_operating && chip->read_reg(dev, SJA_MOD) & SJA_MOD_RM)) {
+	*state = CAN_STATE_STOPPED;
+	is_operating = 0;
+	/* Disable the controller's interrupts */
+	chip->write_reg(dev, SJA_IER, 0x00);
+	/* Wake up waiting senders */
+	rtdm_sem_destroy(&dev->tx_sem);
+    }
+
+    return is_operating;
+}
+
+
+/*
+ * Set controller into reset mode.
+ *
+ * According to the SJA1000 specification, it is necessary to check the
+ * reset mode bit in PeliCAN mode after having set it. So we do. But if
+ * using a ISA card like the PHYTEC eNET card this should not be necessary
+ * because the CAN controller clock of this card (16 MHz) is twice as high
+ * as the ISA bus clock.
+ */
+static int rtcan_sja_mode_stop(struct rtcan_device *dev,
+			       rtdm_lockctx_t *lock_ctx)
+{
+    int ret = 0;
+    /* Max. 50 loops busy sleep. If the controller is stopped while in
+     * sleep mode 20-40 loops are needed (tested on PHYTEC eNET). */
+    int wait_loop = 50;
+    can_state_t state;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    state = dev->state;
+    /* If controller is not operating anyway, go out */
+    if (STATE_RESET(state))
+	goto out;
+
+    /* Disable the controller's interrupts */
+    chip->write_reg(dev, SJA_IER, 0x00);
+
+    /* Set reset mode bit */
+    chip->write_reg(dev, SJA_MOD, SJA_MOD_RM);
+
+    /* Read reset mode bit, multiple tests */
+    do {
+	if (chip->read_reg(dev, SJA_MOD) & SJA_MOD_RM)
+	    break;
+
+	if (lock_ctx)
+	    rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+	/* Busy sleep 1 microsecond */
+	rtdm_task_busy_sleep(1000);
+	if (lock_ctx)
+	    rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+    } while(--wait_loop);
+
+
+    if (wait_loop) {
+	/* Volatile state could have changed while we slept busy. */
+	dev->state = CAN_STATE_STOPPED;
+	/* Wake up waiting senders */
+	rtdm_sem_destroy(&dev->tx_sem);
+    } else {
+	ret = -EAGAIN;
+	/* Enable interrupts again as we did not succeed */
+	chip->write_reg(dev, SJA_IER, SJA1000_IER);
+    }
+
+ out:
+    return ret;
+}
+
+
+
+/*
+ * Set controller into operating mode.
+ *
+ * If coming from CAN_STATE_SLEEPING, the controller must wait
+ * some time to avoid bus errors. Measured on an PHYTEC eNET card,
+ * this time was 110 microseconds.
+ */
+static int rtcan_sja_mode_start(struct rtcan_device *dev,
+				rtdm_lockctx_t *lock_ctx)
+{
+    int ret = 0;
+    u8 mod_reg;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    /* We won't forget that state in the device structure is volatile and
+     * access to it will not be optimized by the compiler. So ... */
+
+    mod_reg = 0;
+    if (dev->ctrl_mode & CAN_CTRLMODE_LISTENONLY)
+	mod_reg |= SJA_MOD_LOM;
+    if (dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)
+	mod_reg |= SJA_MOD_STM;
+
+    switch (dev->state) {
+
+    case CAN_STATE_ACTIVE:
+    case CAN_STATE_BUS_WARNING:
+    case CAN_STATE_BUS_PASSIVE:
+	break;
+
+    case CAN_STATE_STOPPED:
+	/* Clear error counters */
+	chip->write_reg(dev, SJA_RXERR , 0);
+	chip->write_reg(dev, SJA_TXERR , 0);
+	/* Clear error code capture (i.e. read it) */
+	chip->read_reg(dev, SJA_ECC);
+	/* Set error active state */
+	dev->state = CAN_STATE_ACTIVE;
+	/* Set up sender "mutex" */
+	rtdm_sem_init(&dev->tx_sem, 1);
+	/* Enable interrupts */
+	chip->write_reg(dev, SJA_IER, SJA1000_IER);
+
+	/* Clear reset mode bit in SJA1000 */
+	chip->write_reg(dev, SJA_MOD, mod_reg);
+
+	break;
+
+    case CAN_STATE_SLEEPING:
+	/* Trigger Wake-up interrupt */
+	chip->write_reg(dev, SJA_MOD, mod_reg);
+
+	/* Ok, coming from sleep mode is problematic. We have to wait
+	 * for the SJA1000 to get on both feet again. */
+	rtdm_lock_put_irqrestore(&dev->device_lock, *lock_ctx);
+	rtdm_task_busy_sleep(110000);
+	rtdm_lock_get_irqsave(&dev->device_lock, *lock_ctx);
+
+	/* Meanwhile, the Wake-up interrupt was serviced and has set the
+	 * right state. As we don't want to set it back jump out. */
+	goto out;
+
+	break;
+
+    case CAN_STATE_BUS_OFF:
+	/* Trigger bus-off recovery */
+	chip->write_reg(dev, SJA_MOD, mod_reg);
+	/* Set up sender "mutex" */
+	rtdm_sem_init(&dev->tx_sem, 1);
+	/* Set error active state */
+	dev->state = CAN_STATE_ACTIVE;
+
+	break;
+
+    default:
+	/* Never reached, but we don't want nasty compiler warnings ... */
+	break;
+    }
+
+ out:
+    return ret;
+}
+
+can_state_t rtcan_sja_get_state(struct rtcan_device *dev)
+{
+    can_state_t state = dev->state;
+    rtcan_sja_is_operating(dev, &state);
+    return state;
+}
+
+int rtcan_sja_set_mode(struct rtcan_device *dev,
+		       can_mode_t mode,
+		       rtdm_lockctx_t *lock_ctx)
+{
+    int ret = 0;
+    can_state_t state;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000*)dev->priv;
+
+    switch (mode) {
+
+    case CAN_MODE_STOP:
+	ret = rtcan_sja_mode_stop(dev, lock_ctx);
+	break;
+
+    case CAN_MODE_START:
+	ret = rtcan_sja_mode_start(dev, lock_ctx);
+	break;
+
+    case CAN_MODE_SLEEP:
+
+	state = dev->state;
+
+	/* Controller must operate, otherwise go out */
+	if (!rtcan_sja_is_operating(dev, &state)) {
+	    ret = -ENETDOWN;
+	    goto mode_sleep_out;
+	}
+
+	/* Is controller sleeping yet? If yes, go out */
+	if (state == CAN_STATE_SLEEPING)
+	    goto mode_sleep_out;
+
+	/* Remember into which state to return when we
+	 * wake up */
+	dev->state_before_sleep = state;
+
+	/* Let's take a nap. (Now I REALLY understand
+	 * the meaning of interrupts ...) */
+	state = CAN_STATE_SLEEPING;
+	chip->write_reg(dev, SJA_MOD,
+			chip->read_reg(dev, SJA_MOD) | SJA_MOD_SM);
+
+    mode_sleep_out:
+	dev->state = state;
+	break;
+
+    default:
+	ret = -EOPNOTSUPP;
+	break;
+    }
+
+    return ret;
+}
+
+int rtcan_sja_set_bit_time(struct rtcan_device *dev,
+			   struct can_bittime *bit_time,
+			   rtdm_lockctx_t *lock_ctx)
+{
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+    u8 btr0, btr1;
+
+    switch (bit_time->type) {
+    case CAN_BITTIME_BTR:
+	btr0 = bit_time->btr.btr0;
+	btr1 = bit_time->btr.btr1;
+	break;
+
+    case CAN_BITTIME_STD:
+	btr0 = (BTR0_SET_BRP(bit_time->std.brp) |
+		BTR0_SET_SJW(bit_time->std.sjw));
+	btr1 = (BTR1_SET_TSEG1(bit_time->std.prop_seg +
+			       bit_time->std.phase_seg1) |
+		BTR1_SET_TSEG2(bit_time->std.phase_seg2) |
+		BTR1_SET_SAM(bit_time->std.sam));
+
+	break;
+
+    default:
+	return -EINVAL;
+    }
+
+    printk("%s: btr0=%#x btr1=%#x\n", __func__, btr0, btr1);
+    chip->write_reg(dev, SJA_BTR0, btr0);
+    chip->write_reg(dev, SJA_BTR1, btr1);
+
+    return 0;
+}
+
+void rtcan_sja_enable_bus_err(struct rtcan_device *dev)
+{
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    if (chip->bus_err_on < 2) {
+	if (chip->bus_err_on < 1)
+	    chip->read_reg(dev, SJA_ECC);
+	chip->bus_err_on = 2;
+    }
+}
+
+/*
+ *  Start a transmission to a SJA1000 device
+ */
+static int rtcan_sja_start_xmit(struct rtcan_device *dev,
+				can_frame_t *frame)
+{
+    int             i;
+    /* "Real" size of the payload */
+    u8   size;
+    /* Content of frame information register */
+    u8   fir;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+
+    /* Get DLC */
+    fir  = frame->can_dlc;
+
+    /* If DLC exceeds 8 bytes adjust it to 8 (for the payload) */
+    size = (fir > 8) ? 8 : fir;
+
+
+    if (frame->can_id & CAN_EFF_FLAG) {
+	/* Send extended frame */
+	fir |= SJA_FIR_EFF;
+
+	/* Write ID */
+	chip->write_reg(dev, SJA_ID1, frame->can_id >> 21);
+	chip->write_reg(dev, SJA_ID2, frame->can_id >> 13);
+	chip->write_reg(dev, SJA_ID3, frame->can_id >> 5);
+	chip->write_reg(dev, SJA_ID4, frame->can_id << 3);
+
+	/* RTR? */
+	if (frame->can_id & CAN_RTR_FLAG)
+	    fir |= SJA_FIR_RTR;
+
+	else {
+	    /* No RTR, write data bytes */
+	    for (i = 0; i < size; i++)
+		chip->write_reg(dev, SJA_DATA_EFF(i),
+				frame->data[i]);
+	}
+
+    } else {
+	/* Send standard frame */
+
+	/* Write ID */
+	chip->write_reg(dev, SJA_ID1, frame->can_id >> 3);
+	chip->write_reg(dev, SJA_ID2, frame->can_id << 5);
+
+	/* RTR? */
+	if (frame->can_id & CAN_RTR_FLAG)
+	    fir |= SJA_FIR_RTR;
+
+	else {
+	    /* No RTR, write data bytes */
+	    for (i = 0; i < size; i++)
+		chip->write_reg(dev, SJA_DATA_SFF(i),
+				frame->data[i]);
+	}
+    }
+
+
+    /* Write frame information register */
+    chip->write_reg(dev, SJA_FIR, fir);
+
+    /* Push the 'send' button */
+    if (dev->ctrl_mode & CAN_CTRLMODE_LOOPBACK)
+	chip->write_reg(dev, SJA_CMR, SJA_CMR_SRR);
+    else
+	chip->write_reg(dev, SJA_CMR, SJA_CMR_TR);
+
+    return 0;
+}
+
+
+
+/*
+ *  SJA1000 chip configuration
+ */
+static void sja1000_chip_config(struct rtcan_device *dev)
+{
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000* )dev->priv;
+
+    chip->write_reg(dev, SJA_CDR, chip->cdr);
+    chip->write_reg(dev, SJA_OCR, chip->ocr);
+
+    chip->write_reg(dev, SJA_AMR0, 0xFF);
+    chip->write_reg(dev, SJA_AMR1, 0xFF);
+    chip->write_reg(dev, SJA_AMR2, 0xFF);
+    chip->write_reg(dev, SJA_AMR3, 0xFF);
+}
+
+
+int rtcan_sja1000_register(struct rtcan_device *dev)
+{
+    int                         ret;
+    struct rtcan_sja1000 *chip = dev->priv;
+
+    if (chip == NULL)
+	return -EINVAL;
+
+    /* Set dummy state for following call */
+    dev->state = CAN_STATE_ACTIVE;
+    /* Enter reset mode */
+    rtcan_sja_mode_stop(dev, NULL);
+
+    if ((chip->read_reg(dev, SJA_SR) &
+	 (SJA_SR_RBS | SJA_SR_DOS | SJA_SR_TBS)) != SJA_SR_TBS) {
+	printk("ERROR! No SJA1000 device found!\n");
+	return -ENODEV;
+    }
+
+    dev->ctrl_name = sja_ctrl_name;
+
+    dev->hard_start_xmit = rtcan_sja_start_xmit;
+    dev->do_set_mode = rtcan_sja_set_mode;
+    dev->do_get_state = rtcan_sja_get_state;
+    dev->do_set_bit_time = rtcan_sja_set_bit_time;
+    dev->do_enable_bus_err = rtcan_sja_enable_bus_err;
+#ifndef CONFIG_XENO_DRIVERS_CAN_CALC_BITTIME_OLD
+    dev->bittiming_const = &sja1000_bittiming_const;
+#endif
+
+    chip->bus_err_on = 1;
+
+    ret = rtdm_irq_request(&dev->irq_handle,
+			   chip->irq_num, rtcan_sja_interrupt,
+			   chip->irq_flags, sja_ctrl_name, dev);
+    if (ret) {
+	printk(KERN_ERR "ERROR %d: IRQ %d is %s!\n",
+	       ret, chip->irq_num, ret == -EBUSY ?
+	       "busy, check shared interrupt support" : "invalid");
+	return ret;
+    }
+
+    sja1000_chip_config(dev);
+
+    /* Register RTDM device */
+    ret = rtcan_dev_register(dev);
+    if (ret) {
+	    printk(KERN_ERR
+		   "ERROR %d while trying to register RTCAN device!\n", ret);
+	goto out_irq_free;
+    }
+
+    rtcan_sja_create_proc(dev);
+
+    return 0;
+
+ out_irq_free:
+    rtdm_irq_free(&dev->irq_handle);
+
+    return ret;
+}
+
+
+/* Cleanup module */
+void rtcan_sja1000_unregister(struct rtcan_device *dev)
+{
+    printk("Unregistering SJA1000 device %s\n", dev->name);
+
+    rtdm_irq_disable(&dev->irq_handle);
+    rtcan_sja_mode_stop(dev, NULL);
+    rtdm_irq_free(&dev->irq_handle);
+    rtcan_sja_remove_proc(dev);
+    rtcan_dev_unregister(dev);
+}
+
+int __init rtcan_sja_init(void)
+{
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	printk("RTCAN SJA1000 driver initialized\n");
+	return 0;
+}
+
+
+void __exit rtcan_sja_exit(void)
+{
+	printk("%s removed\n", sja_ctrl_name);
+}
+
+module_init(rtcan_sja_init);
+module_exit(rtcan_sja_exit);
+
+EXPORT_SYMBOL_GPL(rtcan_sja1000_register);
+EXPORT_SYMBOL_GPL(rtcan_sja1000_unregister);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h
new file mode 100644
index 0000000..84eb41b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2006, Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SJA1000_H_
+#define __SJA1000_H_
+
+#include <rtcan_dev.h>
+
+struct rtcan_sja1000 {
+    unsigned char (*read_reg)(struct rtcan_device *dev, int off);
+    void (*write_reg)(struct rtcan_device *dev, int off, unsigned char val);
+    void (*irq_ack)(struct rtcan_device *dev);
+    unsigned short irq_num;
+    unsigned short irq_flags;
+    unsigned char ocr;
+    unsigned char cdr;
+    char bus_err_on;
+};
+
+#ifdef CONFIG_FS_PROCFS
+int rtcan_sja_create_proc(struct rtcan_device* dev);
+void rtcan_sja_remove_proc(struct rtcan_device* dev);
+#else
+static inline int rtcan_sja_create_proc(struct rtcan_device* dev)
+{ return 0; }
+static inline void rtcan_sja_remove_proc(struct rtcan_device* dev) { }
+#endif
+int rtcan_sja1000_register(struct rtcan_device *dev);
+void rtcan_sja1000_unregister(struct rtcan_device *dev);
+
+
+#endif  /* __SJA1000_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c
new file mode 100644
index 0000000..57fd807
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_proc.c
@@ -0,0 +1,87 @@
+/*
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/delay.h>
+
+#include <rtdm/driver.h>
+
+#include <rtcan_dev.h>
+#include <rtcan_internal.h>
+#include <rtcan_sja1000.h>
+
+#ifdef CONFIG_XENO_DRIVERS_CAN_DEBUG
+
+static int rtcan_sja_proc_regs(struct seq_file *p, void *data)
+{
+    struct rtcan_device *dev = (struct rtcan_device *)data;
+    struct rtcan_sja1000 *chip = (struct rtcan_sja1000 *)dev->priv;
+    int i;
+
+    seq_printf(p, "SJA1000 registers");
+    for (i = 0; i < 0x20; i++) {
+	if ((i % 0x10) == 0)
+	    seq_printf(p, "\n%02x:", i);
+	seq_printf(p, " %02x", chip->read_reg(dev, i));
+    }
+    seq_printf(p, "\n");
+    return 0;
+}
+
+static int rtcan_sja_proc_regs_open(struct inode *inode, struct file *file)
+{
+	return single_open(file, rtcan_sja_proc_regs, pde_data(inode));
+}
+
+static const DEFINE_PROC_OPS(rtcan_sja_proc_regs_ops,
+			rtcan_sja_proc_regs_open,
+			single_release,
+			seq_read,
+			NULL);
+
+int rtcan_sja_create_proc(struct rtcan_device* dev)
+{
+    if (!dev->proc_root)
+	return -EINVAL;
+
+    proc_create_data("registers", S_IFREG | S_IRUGO | S_IWUSR, dev->proc_root,
+		     &rtcan_sja_proc_regs_ops, dev);
+    return 0;
+}
+
+void rtcan_sja_remove_proc(struct rtcan_device* dev)
+{
+    if (!dev->proc_root)
+	return;
+
+    remove_proc_entry("registers", dev->proc_root);
+}
+
+#else /* !CONFIG_XENO_DRIVERS_CAN_DEBUG */
+
+void rtcan_sja_remove_proc(struct rtcan_device* dev)
+{
+}
+
+int rtcan_sja_create_proc(struct rtcan_device* dev)
+{
+    return 0;
+}
+#endif	/* CONFIG_XENO_DRIVERS_CAN_DEBUG */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h
new file mode 100644
index 0000000..9f2f871
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/can/sja1000/rtcan_sja1000_regs.h
@@ -0,0 +1,206 @@
+/*
+ * Copyright (C) 2005,2006 Sebastian Smolorz
+ *                        <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ * Based on drivers/can/sja1000.h in linux-can.patch, a CAN socket
+ * framework for Linux:
+ *
+ * Copyright (C) 2005, Sascha Hauer, Pengutronix
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __SJA1000_REGS_H_
+#define __SJA1000_REGS_H_
+
+
+/* PeliCAN mode address map */
+
+/* reset and operating mode */
+#define SJA_MOD          0       /* Mode register                   */
+#define SJA_CMR          1       /* Command register                */
+#define SJA_SR           2       /* Status register                 */
+#define SJA_IR           3       /* Interrupt register              */
+#define SJA_IER          4       /* Interrupt enable register       */
+#define SJA_BTR0         6       /* Bus timing register 0           */
+#define SJA_BTR1         7       /* Bus timing register 1           */
+#define SJA_OCR          8       /* Output control register         */
+#define SJA_ALC         11       /* Arbitration lost capture        */
+#define SJA_ECC         12       /* Error code capture register     */
+#define SJA_RXERR       14       /* Receive error counter           */
+#define SJA_TXERR       15       /* Transmit error counter          */
+#define SJA_CDR         31       /* Clock divider register          */
+
+/* reset mode */
+#define SJA_ACR0        16       /* Acceptance code register 0      */
+#define SJA_ACR1        17       /* Acceptance code register 1      */
+#define SJA_ACR2        18       /* Acceptance code register 2      */
+#define SJA_ACR3        19       /* Acceptance code register 3      */
+#define SJA_AMR0        20       /* Acceptance mask register 0      */
+#define SJA_AMR1        21       /* Acceptance mask register 1      */
+#define SJA_AMR2        22       /* Acceptance mask register 2      */
+#define SJA_AMR3        23       /* Acceptance mask register 3      */
+
+/* operating mode */
+#define SJA_FIR         16       /* Frame information register      */
+#define SJA_ID1         17       /* Identifier 1                    */
+#define SJA_ID2         18       /* Identifier 2                    */
+#define SJA_ID3         19       /* Identifier 3 (EFF only)         */
+#define SJA_ID4         20       /* Identifier 4 (EFF only)         */
+
+#define SJA_DATA_SFF(x) (19 + (x)) /* Data registers in case of standard
+				    * frame format; 0 <= x <= 7 */
+#define SJA_DATA_EFF(x) (21 + (x)) /* Data registers in case of extended
+				    * frame format; 0 <= x <= 7 */
+
+/* Mode register */
+enum SJA1000_PELI_MOD {
+    SJA_MOD_RM           = 1,    /* Reset Mode                          */
+    SJA_MOD_LOM          = 1<<1, /* Listen Only Mode                    */
+    SJA_MOD_STM          = 1<<2, /* Self Test Mode                      */
+    SJA_MOD_AFM          = 1<<3, /* Acceptance Filter Mode              */
+    SJA_MOD_SM           = 1<<4  /* Sleep Mode                          */
+};
+
+/* Command register */
+enum SJA1000_PELI_CMR {
+    SJA_CMR_TR  = 1,             /* Transmission request                */
+    SJA_CMR_AT  = 1<<1,          /* Abort Transmission                  */
+    SJA_CMR_RRB = 1<<2,          /* Release Receive Buffer              */
+    SJA_CMR_CDO = 1<<3,          /* Clear Data Overrun                  */
+    SJA_CMR_SRR = 1<<4           /* Self reception request              */
+};
+
+/* Status register */
+enum SJA1000_PELI_SR {
+    SJA_SR_RBS           = 1,    /* Receive Buffer Status               */
+    SJA_SR_DOS           = 1<<1, /* Data Overrun Status                 */
+    SJA_SR_TBS           = 1<<2, /* Transmit Buffer Status              */
+    SJA_SR_ES            = 1<<6, /* Error Status                        */
+    SJA_SR_BS            = 1<<7  /* Bus Status                          */
+};
+
+/* Interrupt register */
+enum SJA1000_PELI_IR {
+    SJA_IR_RI           = 1,     /* Receive Interrupt                   */
+    SJA_IR_TI           = 1<<1,  /* Transmit Interrupt                  */
+    SJA_IR_EI           = 1<<2,  /* Error Warning Interrupt             */
+    SJA_IR_DOI          = 1<<3,  /* Data Overrun Interrupt              */
+    SJA_IR_WUI          = 1<<4,  /* Wake-Up Interrupt                   */
+    SJA_IR_EPI          = 1<<5,  /* Error Passive Interrupt             */
+    SJA_IR_ALI          = 1<<6,  /* Arbitration Lost Interrupt          */
+    SJA_IR_BEI          = 1<<7,  /* Bus Error Interrupt                 */
+};
+
+/* Interrupt enable register */
+enum SJA1000_PELI_IER {
+    SJA_IER_RIE         = 1,     /* Receive Interrupt Enable            */
+    SJA_IER_TIE         = 1<<1,  /* Transmit Interrupt Enable           */
+    SJA_IER_EIE         = 1<<2,  /* Error Warning Interrupt Enable      */
+    SJA_IER_DOIE        = 1<<3,  /* Data Overrun Interrupt Enable       */
+    SJA_IER_WUIE        = 1<<4,  /* Wake-Up Interrupt Enable            */
+    SJA_IER_EPIE        = 1<<5,  /* Error Passive Interrupt Enable      */
+    SJA_IER_ALIE        = 1<<6,  /* Arbitration Lost Interrupt Enable   */
+    SJA_IER_BEIE        = 1<<7,  /* Bus Error Interrupt Enable          */
+};
+
+/* Bus timing register 0 */
+enum SJA1000_PELI_BTR0 {
+    /* Period of the CAN system clock t_SCl
+     * (t_CLK = time period of XTAL frequency) */
+    SJA_BTR0_T_SCL_2_T_CLK  = 0,    /* t_SCl = 2 x t_CLK                 */
+    SJA_BTR0_T_SCL_4_T_CLK  = 1,    /* t_SCl = 4 x t_CLK                 */
+    SJA_BTR0_T_SCL_6_T_CLK  = 2,    /* t_SCl = 6 x t_CLK                 */
+    SJA_BTR0_T_SCL_8_T_CLK  = 3,    /* t_SCl = 8 x t_CLK                 */
+    SJA_BTR0_T_SCL_10_T_CLK = 4,    /* t_SCl = 10 x t_CLK                */
+    SJA_BTR0_T_SCL_12_T_CLK = 5,    /* t_SCl = 12 x t_CLK                */
+    SJA_BTR0_T_SCL_14_T_CLK = 6,    /* t_SCl = 14 x t_CLK                */
+    SJA_BTR0_T_SCL_16_T_CLK = 7,    /* t_SCl = 16 x t_CLK                */
+    SJA_BTR0_T_SCL_20_T_CLK = 9,    /* t_SCl = 20 x t_CLK                */
+    SJA_BTR0_T_SCL_40_T_CLK = 19,   /* t_SCl = 40 x t_CLK                */
+    SJA_BTR0_T_SCL_100_T_CLK = 49,  /* t_SCl = 100 x t_CLK               */
+
+};
+
+/* Bus timing register 1 */
+enum SJA1000_PELI_BTR1 {
+    /* Time segment 1 */
+    SJA_BTR1_T_SEG1_1_T_SCL = 0,    /* t_SEG1 = 1 x t_SCl               */
+    SJA_BTR1_T_SEG1_2_T_SCL = 1,    /* t_SEG1 = 2 x t_SCl               */
+    SJA_BTR1_T_SEG1_3_T_SCL = 2,    /* t_SEG1 = 3 x t_SCl               */
+    SJA_BTR1_T_SEG1_4_T_SCL = 3,    /* t_SEG1 = 4 x t_SCl               */
+    SJA_BTR1_T_SEG1_5_T_SCL = 4,    /* t_SEG1 = 5 x t_SCl               */
+    SJA_BTR1_T_SEG1_6_T_SCL = 5,    /* t_SEG1 = 6 x t_SCl               */
+    SJA_BTR1_T_SEG1_7_T_SCL = 6,    /* t_SEG1 = 7 x t_SCl               */
+    SJA_BTR1_T_SEG1_8_T_SCL = 7,    /* t_SEG1 = 8 x t_SCl               */
+    /* Time segment 2 */
+    SJA_BTR1_T_SEG2_1_T_SCL = 0<<4, /* t_SEG2 = 1 x t_SCl               */
+    SJA_BTR1_T_SEG2_2_T_SCL = 1<<4, /* t_SEG2 = 2 x t_SCl               */
+    SJA_BTR1_T_SEG2_3_T_SCL = 2<<4, /* t_SEG2 = 3 x t_SCl               */
+    SJA_BTR1_T_SEG2_4_T_SCL = 3<<4, /* t_SEG2 = 4 x t_SCl               */
+    SJA_BTR1_T_SEG2_5_T_SCL = 4<<4, /* t_SEG2 = 5 x t_SCl               */
+    SJA_BTR1_T_SEG2_6_T_SCL = 5<<4, /* t_SEG2 = 6 x t_SCl               */
+    SJA_BTR1_T_SEG2_7_T_SCL = 6<<4, /* t_SEG2 = 7 x t_SCl               */
+    SJA_BTR1_T_SEG2_8_T_SCL = 7<<4, /* t_SEG2 = 8 x t_SCl               */
+};
+
+/* One bit time = t_SCl + t_SEG1 + t_SEG2 */
+
+
+/* Output control register */
+enum SJA1000_PELI_OCR {
+    SJA_OCR_MODE_BIPHASE = 0,
+    SJA_OCR_MODE_TEST    = 1,
+    SJA_OCR_MODE_NORMAL  = 2,
+    SJA_OCR_MODE_CLOCK   = 3,
+    SJA_OCR_TX0_INVERT   = 1<<2,
+    SJA_OCR_TX0_PULLDOWN = 1<<3,
+    SJA_OCR_TX0_PULLUP   = 2<<3,
+    SJA_OCR_TX0_PUSHPULL = 3<<3,
+    SJA_OCR_TX1_INVERT   = 1<<5,
+    SJA_OCR_TX1_PULLDOWN = 1<<6,
+    SJA_OCR_TX1_PULLUP   = 2<<6,
+    SJA_OCR_TX1_PUSHPULL = 3<<6
+};
+
+/* Error code capture register */
+enum SJA1000_PELI_ECC {
+    /* The segmentation field gives information about the location of
+     * errors on the bus */
+    SJA_ECC_SEG_MASK     = 31,   /* Segmentation field mask             */
+    SJA_ECC_DIR          = 1<<5, /* Transfer direction                  */
+    SJA_ECC_ERR_BIT      = 0<<6,
+    SJA_ECC_ERR_FORM     = 1<<6,
+    SJA_ECC_ERR_STUFF    = 2<<6,
+    SJA_ECC_ERR_MASK     = 3<<6  /* Error code mask                     */
+};
+
+/* Frame information register */
+enum SJA1000_PELI_FIR {
+    SJA_FIR_DLC_MASK     = 15,   /* Data length code mask               */
+    SJA_FIR_RTR          = 1<<6, /* Remote transmission request         */
+    SJA_FIR_EFF          = 1<<7  /* Extended frame format               */
+};
+
+/* Clock divider register */
+enum SJA1000_PELI_CDR {
+    SJA_CDR_CLKOUT_MASK  = 0x07,
+    SJA_CDR_CLK_OFF      = 1<<3, /* Clock off (CLKOUT pin)              */
+    SJA_CDR_CBP          = 1<<6, /* CAN input comparator bypass         */
+    SJA_CDR_CAN_MODE     = 1<<7  /* CAN mode: 1 = PeliCAN               */
+};
+
+#endif  /* __SJA1000_REGS_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig
new file mode 100644
index 0000000..c257444
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Kconfig
@@ -0,0 +1,72 @@
+menu "Real-time GPIO drivers"
+
+config XENO_DRIVERS_GPIO
+       bool "GPIO controller"
+       depends on GPIOLIB
+       help
+
+       Real-time capable GPIO module.
+
+if XENO_DRIVERS_GPIO
+
+config XENO_DRIVERS_GPIO_BCM2835
+	depends on MACH_BCM2708 || ARCH_BCM2835
+	tristate "Support for BCM2835 GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	Broadcom's BCM2835 SoC.
+
+config XENO_DRIVERS_GPIO_MXC
+	depends on GPIO_MXC
+	tristate "Support for MXC GPIOs"
+	help
+
+	Suitable for the GPIO controller available from
+	Freescale/NXP's MXC architecture.
+
+config XENO_DRIVERS_GPIO_SUN8I_H3
+	depends on MACH_SUN8I && PINCTRL_SUN8I_H3
+	tristate "Support for SUN8I H3 GPIOs"
+	help
+
+	Suitable for the GPIO controller available from Allwinner's H3
+	SoC, as found on the NanoPI boards.
+
+config XENO_DRIVERS_GPIO_ZYNQ7000
+	depends on ARCH_ZYNQ || ARCH_ZYNQMP
+	tristate "Support for Zynq7000 GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	Xilinx's Zynq7000 SoC.
+
+config XENO_DRIVERS_GPIO_XILINX
+	depends on ARCH_ZYNQ || ARCH_ZYNQMP
+	tristate "Support for Xilinx GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	Xilinx's softcore IP.
+
+config XENO_DRIVERS_GPIO_OMAP
+	depends on ARCH_OMAP2PLUS || ARCH_OMAP
+	tristate "Support for OMAP GPIOs"
+	help
+
+	Enables support for the GPIO controller available from
+	OMAP family SOC.
+
+config XENO_DRIVERS_GPIO_CHERRYVIEW
+	depends on PINCTRL_CHERRYVIEW
+	tristate "Support for Cherryview GPIOs"
+	help
+
+	Enables support for the Intel Cherryview GPIO controller
+
+config XENO_DRIVERS_GPIO_DEBUG
+       bool "Enable GPIO core debugging features"
+
+endif
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile
new file mode 100644
index 0000000..e534eab
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/Makefile
@@ -0,0 +1,18 @@
+ccflags-$(CONFIG_XENO_DRIVERS_GPIO_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_XENO_DRIVERS_GPIO_BCM2835) += xeno-gpio-bcm2835.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_MXC) += xeno-gpio-mxc.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_SUN8I_H3) += xeno-gpio-sun8i-h3.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_ZYNQ7000) += xeno-gpio-zynq7000.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_XILINX) += xeno-gpio-xilinx.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_OMAP) += xeno-gpio-omap.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO_CHERRYVIEW) += xeno-gpio-cherryview.o
+obj-$(CONFIG_XENO_DRIVERS_GPIO) += gpio-core.o
+
+xeno-gpio-bcm2835-y := gpio-bcm2835.o
+xeno-gpio-mxc-y := gpio-mxc.o
+xeno-gpio-sun8i-h3-y := gpio-sun8i-h3.o
+xeno-gpio-zynq7000-y := gpio-zynq7000.o
+xeno-gpio-xilinx-y := gpio-xilinx.o
+xeno-gpio-omap-y := gpio-omap.o
+xeno-gpio-cherryview-y := gpio-cherryview.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c
new file mode 100644
index 0000000..c379e6c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-bcm2835.c
@@ -0,0 +1,37 @@
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_BCM2835  1
+
+static int __init bcm2835_gpio_init(void)
+{
+ 	return rtdm_gpiochip_scan_of(NULL, "brcm,bcm2835-gpio",
+				     RTDM_SUBCLASS_BCM2835);
+}
+module_init(bcm2835_gpio_init);
+
+static void __exit bcm2835_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_BCM2835);
+}
+module_exit(bcm2835_gpio_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c
new file mode 100644
index 0000000..1234a3e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-cherryview.c
@@ -0,0 +1,42 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * @note Copyright (C) 2021 Hongzhan Chen <hongzhan.chen@intel.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_CHERRYVIEW  7
+
+static const char *label_array[] = {
+	"INT33FF:00",
+	"INT33FF:01",
+	"INT33FF:02",
+	"INT33FF:03",
+};
+
+static int __init cherryview_gpio_init(void)
+{
+	return rtdm_gpiochip_array_find(NULL, label_array,
+					ARRAY_SIZE(label_array),
+					RTDM_SUBCLASS_CHERRYVIEW);
+}
+module_init(cherryview_gpio_init);
+
+static void __exit cherryview_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_CHERRYVIEW);
+}
+module_exit(cherryview_gpio_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c
new file mode 100644
index 0000000..f67a5bf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-core.c
@@ -0,0 +1,691 @@
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/gpio.h>
+#include <linux/irq.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <rtdm/gpio.h>
+
+struct rtdm_gpio_chan {
+	int requested : 1,
+		has_direction : 1,
+		is_output : 1,
+	        is_interrupt : 1,
+		want_timestamp : 1;
+};
+
+static LIST_HEAD(rtdm_gpio_chips);
+
+static DEFINE_MUTEX(chip_lock);
+
+static int gpio_pin_interrupt(rtdm_irq_t *irqh)
+{
+	struct rtdm_gpio_pin *pin;
+
+	pin = rtdm_irq_get_arg(irqh, struct rtdm_gpio_pin);
+
+	if (pin->monotonic_timestamp)
+		pin->timestamp = rtdm_clock_read_monotonic();
+	else
+		pin->timestamp = rtdm_clock_read();
+	rtdm_event_signal(&pin->event);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int request_gpio_irq(unsigned int gpio, struct rtdm_gpio_pin *pin,
+			    struct rtdm_gpio_chan *chan,
+			    int trigger)
+{
+	int ret, irq_trigger, irq;
+
+	if (trigger & ~GPIO_TRIGGER_MASK)
+		return -EINVAL;
+
+	if (!chan->requested) {
+		ret = gpio_request(gpio, pin->name);
+		if (ret) {
+			if (ret != -EPROBE_DEFER)
+				printk(XENO_ERR 
+				       "can not request GPIO%d\n", gpio);
+			return ret;
+		}
+		chan->requested = true;
+	}
+
+	ret = gpio_direction_input(gpio);
+	if (ret) {
+		printk(XENO_ERR "cannot set GPIO%d as input\n", gpio);
+		goto fail;
+	}
+
+	chan->has_direction = true;
+	gpio_export(gpio, true);
+
+	rtdm_event_clear(&pin->event);
+
+	/*
+	 * Attempt to hook the interrupt associated to that pin. We
+	 * might fail getting a valid IRQ number, in case the GPIO
+	 * chip did not define any mapping handler (->to_irq). If so,
+	 * just assume that either we have no IRQ indeed, or interrupt
+	 * handling may be open coded elsewhere.
+	 */
+	irq = gpio_to_irq(gpio);
+	if (irq < 0)
+		goto done;
+
+	irq_trigger = 0;
+	if (trigger & GPIO_TRIGGER_EDGE_RISING)
+		irq_trigger |= IRQ_TYPE_EDGE_RISING;
+	if (trigger & GPIO_TRIGGER_EDGE_FALLING)
+		irq_trigger |= IRQ_TYPE_EDGE_FALLING;
+	if (trigger & GPIO_TRIGGER_LEVEL_HIGH)
+		irq_trigger |= IRQ_TYPE_LEVEL_HIGH;
+	if (trigger & GPIO_TRIGGER_LEVEL_LOW)
+		irq_trigger |= IRQ_TYPE_LEVEL_LOW;
+
+	if (irq_trigger)
+		irq_set_irq_type(irq, irq_trigger);
+	
+	ret = rtdm_irq_request(&pin->irqh, irq, gpio_pin_interrupt,
+			       0, pin->name, pin);
+	if (ret) {
+		printk(XENO_ERR "cannot request GPIO%d interrupt\n", gpio);
+		goto fail;
+	}
+
+
+	rtdm_irq_enable(&pin->irqh);
+done:
+	chan->is_interrupt = true;
+
+	return 0;
+fail:
+	gpio_free(gpio);
+	chan->requested = false;
+
+	return ret;
+}
+
+static void release_gpio_irq(unsigned int gpio, struct rtdm_gpio_pin *pin,
+			     struct rtdm_gpio_chan *chan)
+{
+	if (chan->is_interrupt) {
+		rtdm_irq_free(&pin->irqh);
+		chan->is_interrupt = false;
+	}
+	gpio_free(gpio);
+	chan->requested = false;
+}
+
+static int gpio_pin_ioctl_nrt(struct rtdm_fd *fd,
+			      unsigned int request, void *arg)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	unsigned int gpio = rtdm_fd_minor(fd);
+	int ret = 0, val, trigger;
+	struct rtdm_gpio_pin *pin;
+	
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+
+	switch (request) {
+	case GPIO_RTIOC_DIR_OUT:
+		ret = rtdm_safe_copy_from_user(fd, &val, arg, sizeof(val));
+		if (ret)
+			return ret;
+		ret = gpio_direction_output(gpio, val);
+		if (ret == 0) {
+			chan->has_direction = true;
+			chan->is_output = true;
+		}
+		break;
+	case GPIO_RTIOC_DIR_IN:
+		ret = gpio_direction_input(gpio);
+		if (ret == 0)
+			chan->has_direction = true;
+		break;
+	case GPIO_RTIOC_IRQEN:
+		if (chan->is_interrupt) {
+			return -EBUSY;
+		}
+		ret = rtdm_safe_copy_from_user(fd, &trigger,
+					       arg, sizeof(trigger));
+		if (ret)
+			return ret;
+		ret = request_gpio_irq(gpio, pin, chan, trigger);
+		break;
+	case GPIO_RTIOC_IRQDIS:
+		if (chan->is_interrupt) {
+			release_gpio_irq(gpio, pin, chan);
+			chan->requested = false;
+			chan->is_interrupt = false;
+		}
+		break;
+	case GPIO_RTIOC_REQS:
+		ret = gpio_request(gpio, pin->name);
+		if (ret)
+			return ret;
+		else
+			chan->requested = true;
+		break;
+	case GPIO_RTIOC_RELS:
+		gpio_free(gpio);
+		chan->requested = false;
+		break;
+	case GPIO_RTIOC_TS_MONO:
+	case GPIO_RTIOC_TS_REAL:
+		ret = rtdm_safe_copy_from_user(fd, &val, arg, sizeof(val));
+		if (ret)
+			return ret;
+		chan->want_timestamp = !!val;
+		pin->monotonic_timestamp = request == GPIO_RTIOC_TS_MONO;
+		break;
+	default:
+		return -EINVAL;
+	}
+	
+	return ret;
+}
+
+static ssize_t gpio_pin_read_rt(struct rtdm_fd *fd,
+				void __user *buf, size_t len)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_gpio_readout rdo;
+	struct rtdm_gpio_pin *pin;
+	int ret;
+
+	if (!chan->has_direction)
+		return -EAGAIN;
+
+	if (chan->is_output)
+		return -EINVAL;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+
+	if (chan->want_timestamp) {
+		if (len < sizeof(rdo))
+			return -EINVAL;
+
+		if (!(fd->oflags & O_NONBLOCK)) {
+			ret = rtdm_event_wait(&pin->event);
+			if (ret)
+				return ret;
+			rdo.timestamp = pin->timestamp;
+		} else if (pin->monotonic_timestamp) {
+			rdo.timestamp = rtdm_clock_read_monotonic();
+		} else {
+			rdo.timestamp = rtdm_clock_read();
+		}
+
+		len = sizeof(rdo);
+		rdo.value = gpiod_get_raw_value(pin->desc);
+		ret = rtdm_safe_copy_to_user(fd, buf, &rdo, len);
+	} else {
+		if (len < sizeof(rdo.value))
+			return -EINVAL;
+
+		if (!(fd->oflags & O_NONBLOCK)) {
+			ret = rtdm_event_wait(&pin->event);
+			if (ret)
+				return ret;
+		}
+
+		len = sizeof(rdo.value);
+		rdo.value = gpiod_get_raw_value(pin->desc);
+		ret = rtdm_safe_copy_to_user(fd, buf, &rdo.value, len);
+	}
+	
+	return ret ?: len;
+}
+
+static ssize_t gpio_pin_write_rt(struct rtdm_fd *fd,
+				 const void __user *buf, size_t len)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_gpio_pin *pin;
+	int value, ret;
+
+	if (len < sizeof(value))
+		return -EINVAL;
+
+	if (!chan->has_direction)
+		return -EAGAIN;
+
+	if (!chan->is_output)
+		return -EINVAL;
+
+	ret = rtdm_safe_copy_from_user(fd, &value, buf, sizeof(value));
+	if (ret)
+		return ret;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+	gpiod_set_raw_value(pin->desc, value);
+
+	return sizeof(value);
+}
+
+static int gpio_pin_select(struct rtdm_fd *fd, struct xnselector *selector,
+			   unsigned int type, unsigned int index)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	struct rtdm_gpio_pin *pin;
+
+	if (!chan->has_direction)
+		return -EAGAIN;
+
+	if (chan->is_output)
+		return -EINVAL;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+
+	return rtdm_event_select(&pin->event, selector, type, index);
+}
+
+int gpio_pin_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	unsigned int gpio = rtdm_fd_minor(fd);
+	int ret = 0;
+	struct rtdm_gpio_pin *pin;
+
+	pin = container_of(dev, struct rtdm_gpio_pin, dev);
+	ret = gpio_request(gpio, pin->name);
+	if (ret) {
+		printk(XENO_ERR "failed to request pin %d : %d\n", gpio, ret);
+		return ret;
+	} else {
+		chan->requested = true;
+	}
+
+	return 0;
+}
+
+static void gpio_pin_close(struct rtdm_fd *fd)
+{
+	struct rtdm_gpio_chan *chan = rtdm_fd_to_private(fd);
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+	unsigned int gpio = rtdm_fd_minor(fd);
+	struct rtdm_gpio_pin *pin;
+
+	if (chan->requested) {
+		pin = container_of(dev, struct rtdm_gpio_pin, dev);
+		release_gpio_irq(gpio, pin, chan);
+	}
+}
+
+static void delete_pin_devices(struct rtdm_gpio_chip *rgc)
+{
+	struct rtdm_gpio_pin *pin;
+	struct rtdm_device *dev;
+	int offset;
+
+	for (offset = 0; offset < rgc->gc->ngpio; offset++) {
+		pin = rgc->pins + offset;
+		dev = &pin->dev;
+		rtdm_dev_unregister(dev);
+		rtdm_event_destroy(&pin->event);
+		kfree(dev->label);
+		kfree(pin->name);
+	}
+}
+
+static int create_pin_devices(struct rtdm_gpio_chip *rgc)
+{
+	struct gpio_chip *gc = rgc->gc;
+	struct rtdm_gpio_pin *pin;
+	struct rtdm_device *dev;
+	int offset, ret, gpio;
+
+	for (offset = 0; offset < gc->ngpio; offset++) {
+		ret = -ENOMEM;
+		gpio = gc->base + offset;
+		pin = rgc->pins + offset;
+		pin->name = kasprintf(GFP_KERNEL, "gpio%d", gpio);
+		if (pin->name == NULL)
+			goto fail_name;
+		pin->desc = gpio_to_desc(gpio);
+		if (pin->desc == NULL) {
+			ret = -ENODEV;
+			goto fail_desc;
+		}
+		dev = &pin->dev;
+		dev->driver = &rgc->driver;
+		dev->label = kasprintf(GFP_KERNEL, "%s/gpio%%d", gc->label);
+		if (dev->label == NULL)
+			goto fail_label;
+		dev->minor = gpio;
+		dev->device_data = rgc;
+		ret = rtdm_dev_register(dev);
+		if (ret)
+			goto fail_register;
+		rtdm_event_init(&pin->event, 0);
+	}
+
+	return 0;
+
+fail_register:
+	kfree(dev->label);
+fail_desc:
+fail_label:
+	kfree(pin->name);
+fail_name:
+	delete_pin_devices(rgc);
+
+	return ret;
+}
+
+static char *gpio_pin_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "rtdm/%s/%s",
+			 dev->class->name,
+			 dev_name(dev));
+}
+
+int rtdm_gpiochip_add(struct rtdm_gpio_chip *rgc,
+		      struct gpio_chip *gc, int gpio_subclass)
+{
+	int ret;
+
+	rgc->devclass = class_create(gc->owner, gc->label);
+	if (IS_ERR(rgc->devclass)) {
+		printk(XENO_ERR "cannot create sysfs class\n");
+		return PTR_ERR(rgc->devclass);
+	}
+	rgc->devclass->devnode = gpio_pin_devnode;
+
+	rgc->driver.profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(rtdm_gpio_chip,
+				  RTDM_CLASS_GPIO,
+				  gpio_subclass,
+				  0);
+	rgc->driver.device_flags = RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR;
+	rgc->driver.base_minor = gc->base;
+	rgc->driver.device_count = gc->ngpio;
+	rgc->driver.context_size = sizeof(struct rtdm_gpio_chan);
+	rgc->driver.ops = (struct rtdm_fd_ops){
+		.open		=	gpio_pin_open,
+		.close		=	gpio_pin_close,
+		.ioctl_nrt	=	gpio_pin_ioctl_nrt,
+		.read_rt	=	gpio_pin_read_rt,
+		.write_rt	=	gpio_pin_write_rt,
+		.select		=	gpio_pin_select,
+	};
+	
+	rtdm_drv_set_sysclass(&rgc->driver, rgc->devclass);
+
+	rgc->gc = gc;
+	rtdm_lock_init(&rgc->lock);
+
+	ret = create_pin_devices(rgc);
+	if (ret)
+		class_destroy(rgc->devclass);
+	
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_add);
+
+struct rtdm_gpio_chip *
+rtdm_gpiochip_alloc(struct gpio_chip *gc, int gpio_subclass)
+{
+	struct rtdm_gpio_chip *rgc;
+	size_t asize;
+	int ret;
+
+	if (gc->ngpio == 0)
+		return ERR_PTR(-EINVAL);
+
+	asize = sizeof(*rgc) + gc->ngpio * sizeof(struct rtdm_gpio_pin);
+	rgc = kzalloc(asize, GFP_KERNEL);
+	if (rgc == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_gpiochip_add(rgc, gc, gpio_subclass);
+	if (ret) {
+		kfree(rgc);
+		return ERR_PTR(ret);
+	}
+
+	mutex_lock(&chip_lock);
+	list_add(&rgc->next, &rtdm_gpio_chips);
+	mutex_unlock(&chip_lock);
+
+	return rgc;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_alloc);
+
+void rtdm_gpiochip_remove(struct rtdm_gpio_chip *rgc)
+{
+	mutex_lock(&chip_lock);
+	list_del(&rgc->next);
+	mutex_unlock(&chip_lock);
+	delete_pin_devices(rgc);
+	class_destroy(rgc->devclass);
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_remove);
+
+int rtdm_gpiochip_post_event(struct rtdm_gpio_chip *rgc,
+			     unsigned int offset)
+{
+	struct rtdm_gpio_pin *pin;
+
+	if (offset >= rgc->gc->ngpio)
+		return -EINVAL;
+
+	pin = rgc->pins + offset;
+	if (pin->monotonic_timestamp)
+		pin->timestamp = rtdm_clock_read_monotonic();
+	else
+		pin->timestamp = rtdm_clock_read();
+	rtdm_event_signal(&pin->event);
+	
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_post_event);
+
+static int gpiochip_match_name(struct gpio_chip *chip, void *data)
+{
+	const char *name = data;
+
+	return !strcmp(chip->label, name);
+}
+
+static struct gpio_chip *find_chip_by_name(const char *name)
+{
+	return gpiochip_find((void *)name, gpiochip_match_name);
+}
+
+int rtdm_gpiochip_add_by_name(struct rtdm_gpio_chip *rgc,
+			      const char *label, int gpio_subclass)
+{
+	struct gpio_chip *gc = find_chip_by_name(label);
+
+	if (gc == NULL)
+		return -EPROBE_DEFER;
+
+	return rtdm_gpiochip_add(rgc, gc, gpio_subclass);
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_add_by_name);
+
+int rtdm_gpiochip_find(struct device_node *from, const char *label, int type)
+{
+	struct rtdm_gpio_chip *rgc;
+	struct gpio_chip *chip;
+	int ret = -ENODEV;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	chip = find_chip_by_name(label);
+	if (chip == NULL)
+		return ret;
+
+	ret = 0;
+	rgc = rtdm_gpiochip_alloc(chip, type);
+	if (IS_ERR(rgc))
+		ret = PTR_ERR(rgc);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_find);
+
+int rtdm_gpiochip_array_find(struct device_node *from, const char *label[],
+			     int nentries, int type)
+{
+	int ret = -ENODEV, _ret, n;
+
+	for (n = 0; n < nentries; n++) {
+		_ret = rtdm_gpiochip_find(from, label[n], type);
+		if (_ret) {
+			if (_ret != -ENODEV)
+				return _ret;
+		} else
+			ret = 0;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_array_find);
+
+#ifdef CONFIG_OF
+
+#include <linux/of_platform.h>
+
+struct gpiochip_holder {
+	struct gpio_chip *chip;
+	struct list_head next;
+};
+	
+struct gpiochip_match_data {
+	struct device *parent;
+	struct list_head list;
+};
+
+static int match_gpio_chip(struct gpio_chip *gc, void *data)
+{
+	struct gpiochip_match_data *d = data;
+	struct gpiochip_holder *h;
+
+	if (cobalt_gpiochip_dev(gc) == d->parent) {
+		h = kmalloc(sizeof(*h), GFP_KERNEL);
+		if (h) {
+			h->chip = gc;
+			list_add(&h->next, &d->list);
+		}
+	}
+
+	/*
+	 * Iterate over all existing GPIO chips, we may have several
+	 * hosted by the same pin controller mapping different ranges.
+	 */
+	return 0;
+}
+
+int rtdm_gpiochip_scan_of(struct device_node *from, const char *compat,
+			  int type)
+{
+	struct gpiochip_match_data match;
+	struct gpiochip_holder *h, *n;
+	struct device_node *np = from;
+	struct platform_device *pdev;
+	struct rtdm_gpio_chip *rgc;
+	int ret = -ENODEV, _ret;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (;;) {
+		np = of_find_compatible_node(np, NULL, compat);
+		if (np == NULL)
+			break;
+		pdev = of_find_device_by_node(np);
+		of_node_put(np);
+		if (pdev == NULL)
+			break;
+		match.parent = &pdev->dev;
+		INIT_LIST_HEAD(&match.list);
+		gpiochip_find(&match, match_gpio_chip);
+		if (!list_empty(&match.list)) {
+			ret = 0;
+			list_for_each_entry_safe(h, n, &match.list, next) {
+				list_del(&h->next);
+				_ret = 0;
+				rgc = rtdm_gpiochip_alloc(h->chip, type);
+				if (IS_ERR(rgc))
+					_ret = PTR_ERR(rgc);
+				kfree(h);
+				if (_ret && !ret)
+					ret = _ret;
+			}
+			if (ret)
+				break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_scan_of);
+
+int rtdm_gpiochip_scan_array_of(struct device_node *from,
+				const char *compat[],
+				int nentries, int type)
+{
+	int ret = -ENODEV, _ret, n;
+
+	for (n = 0; n < nentries; n++) {
+		_ret = rtdm_gpiochip_scan_of(from, compat[n], type);
+		if (_ret) {
+			if (_ret != -ENODEV)
+				return _ret;
+		} else
+			ret = 0;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_scan_array_of);
+
+#endif /* CONFIG_OF */
+
+void rtdm_gpiochip_remove_by_type(int type)
+{
+	struct rtdm_gpio_chip *rgc, *n;
+
+	mutex_lock(&chip_lock);
+
+	list_for_each_entry_safe(rgc, n, &rtdm_gpio_chips, next) {
+		if (rgc->driver.profile_info.subclass_id == type) {
+			mutex_unlock(&chip_lock);
+			rtdm_gpiochip_remove(rgc);
+			kfree(rgc);
+			mutex_lock(&chip_lock);
+		}
+	}
+
+	mutex_unlock(&chip_lock);
+}
+EXPORT_SYMBOL_GPL(rtdm_gpiochip_remove_by_type);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c
new file mode 100644
index 0000000..ccc41da
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-mxc.c
@@ -0,0 +1,42 @@
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_MXC  2
+
+static const char *compat_array[] = {
+	"fsl,imx6q-gpio",
+	"fsl,imx7d-gpio",
+};
+
+static int __init mxc_gpio_init(void)
+{
+	return rtdm_gpiochip_scan_array_of(NULL, compat_array,
+					   ARRAY_SIZE(compat_array),
+					   RTDM_SUBCLASS_MXC);
+}
+module_init(mxc_gpio_init);
+
+static void __exit mxc_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_MXC);
+}
+module_exit(mxc_gpio_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c
new file mode 100644
index 0000000..ea213a3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-omap.c
@@ -0,0 +1,43 @@
+/**
+ * @note Copyright (C) 2020 Greg Gallagher <greg@embeddedgreg.com>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_OMAP  6
+
+static const char *compat_array[] = {
+	"ti,omap4-gpio",
+	"ti,omap3-gpio",
+	"ti,omap2-gpio",
+};
+
+static int __init omap_gpio_init(void)
+{
+	return rtdm_gpiochip_scan_array_of(NULL, compat_array,
+					   ARRAY_SIZE(compat_array),
+					   RTDM_SUBCLASS_OMAP);
+}
+module_init(omap_gpio_init);
+
+static void __exit omap_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_OMAP);
+}
+module_exit(omap_gpio_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c
new file mode 100644
index 0000000..55059ef
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-sun8i-h3.c
@@ -0,0 +1,43 @@
+/**
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_H3  3
+
+static int __init h3_gpio_init(void)
+{
+	int ret;
+	
+	ret = rtdm_gpiochip_scan_of(NULL, "allwinner,sun8i-h3-pinctrl",
+				    RTDM_SUBCLASS_H3);
+	if (ret)
+		return ret;
+
+	return rtdm_gpiochip_scan_of(NULL, "allwinner,sun8i-h3-r-pinctrl",
+				     RTDM_SUBCLASS_H3);
+}
+module_init(h3_gpio_init);
+
+static void __exit h3_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_H3);
+}
+module_exit(h3_gpio_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c
new file mode 100644
index 0000000..e9ae3b1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-xilinx.c
@@ -0,0 +1,40 @@
+/**
+ * @note Copyright (C) 2017 Greg Gallagher <greg@embeddedgreg.com>
+ *
+ * This driver controls the gpio that can be located on the PL
+ * of the Zynq SOC
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_XILINX  5
+
+static int __init xilinx_gpio_init(void)
+{
+	return rtdm_gpiochip_scan_of(NULL, "xlnx,xps-gpio-1.00.a",
+                     RTDM_SUBCLASS_XILINX);
+}
+module_init(xilinx_gpio_init);
+
+static void __exit xilinx_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_XILINX);
+}
+module_exit(xilinx_gpio_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c
new file mode 100644
index 0000000..9997a74
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpio/gpio-zynq7000.c
@@ -0,0 +1,40 @@
+/**
+ * @note Copyright (C) 2017 Greg Gallagher <greg@embeddedgreg.com>
+ * 
+ * This driver is inspired by:
+ * gpio-bcm2835.c, please see original file for copyright information
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <rtdm/gpio.h>
+
+#define RTDM_SUBCLASS_ZYNQ7000  4
+
+static int __init zynq7000_gpio_init(void)
+{
+ 	return rtdm_gpiochip_scan_of(NULL, "xlnx,zynq-gpio-1.0", 
+                     RTDM_SUBCLASS_ZYNQ7000);
+}
+module_init(zynq7000_gpio_init);
+
+static void __exit zynq7000_gpio_exit(void)
+{
+	rtdm_gpiochip_remove_by_type(RTDM_SUBCLASS_ZYNQ7000);
+}
+module_exit(zynq7000_gpio_exit);
+
+MODULE_LICENSE("GPL");
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig
new file mode 100644
index 0000000..532742a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Kconfig
@@ -0,0 +1,9 @@
+menu "GPIOPWM support"
+
+config XENO_DRIVERS_GPIOPWM
+	tristate "GPIOPWM driver"
+	help
+
+	An RTDM-based GPIO PWM generator driver
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile
new file mode 100644
index 0000000..8c9d5be
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(srctree)/kernel -I$(srctree)/include/xenomai/
+
+obj-$(CONFIG_XENO_DRIVERS_GPIOPWM) += xeno_gpiopwm.o
+
+xeno_gpiopwm-y := gpiopwm.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c
new file mode 100644
index 0000000..ed42e08
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/gpiopwm/gpiopwm.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2015 Jorge Ramirez <jro@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/slab.h>
+#include <linux/gpio.h>
+#include <linux/module.h>
+#include <rtdm/driver.h>
+#include <rtdm/gpiopwm.h>
+
+MODULE_AUTHOR("Jorge Ramirez <jro@xenomai.org>");
+MODULE_DESCRIPTION("PWM driver");
+MODULE_VERSION("0.0.1");
+MODULE_LICENSE("GPL");
+
+#define MAX_DUTY_CYCLE		100
+#define MAX_SAMPLES		(MAX_DUTY_CYCLE + 1)
+
+struct gpiopwm_base_signal {
+	unsigned long period;
+};
+
+struct gpiopwm_duty_signal {
+	unsigned int range_min;
+	unsigned int range_max;
+	unsigned long period;
+	unsigned int cycle;
+};
+
+struct gpiopwm_control {
+	struct gpiopwm_duty_signal duty;
+	unsigned int configured;
+	unsigned int update;
+};
+
+struct gpiopwm_priv {
+	struct gpiopwm_base_signal base;
+	struct gpiopwm_duty_signal duty;
+	struct gpiopwm_control ctrl;
+
+	rtdm_timer_t base_timer;
+	rtdm_timer_t duty_timer;
+
+	int gpio;
+};
+
+static inline int div100(long long dividend)
+{
+	const long long divisor = 0x28f5c29;
+	return ((divisor * dividend) >> 32) & 0xffffffff;
+}
+
+static inline unsigned long duty_period(struct gpiopwm_duty_signal *p)
+{
+	unsigned long period;
+
+	period = p->range_min + div100((p->range_max - p->range_min) * p->cycle);
+	return period * 1000;
+}
+
+static void gpiopwm_handle_base_timer(rtdm_timer_t *timer)
+{
+	struct gpiopwm_priv *ctx = container_of(timer, struct gpiopwm_priv,
+						base_timer);
+	gpio_set_value(ctx->gpio, 1);
+
+	/* one shot timer to avoid carrying over errors */
+	rtdm_timer_start_in_handler(&ctx->duty_timer, ctx->duty.period, 0,
+		RTDM_TIMERMODE_RELATIVE);
+
+	if (ctx->ctrl.update) {
+		ctx->duty.period = ctx->ctrl.duty.period;
+		ctx->duty.cycle = ctx->ctrl.duty.cycle;
+		ctx->ctrl.update = 0;
+	}
+}
+
+static void gpiopwm_handle_duty_timer(rtdm_timer_t *timer)
+{
+	struct gpiopwm_priv *ctx = container_of(timer, struct gpiopwm_priv,
+						duty_timer);
+	gpio_set_value(ctx->gpio, 0);
+}
+
+static inline int gpiopwm_config(struct rtdm_fd *fd, struct gpiopwm *conf)
+{
+	struct rtdm_dev_context *dev_ctx = rtdm_fd_to_context(fd);
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+	int ret;
+
+	if (ctx->ctrl.configured)
+		return -EINVAL;
+
+	if (conf->duty_cycle > MAX_DUTY_CYCLE)
+		return -EINVAL;
+
+	ret = gpio_request(conf->gpio, dev_ctx->device->name);
+	if (ret < 0) {
+		ctx->gpio = -1;
+		return ret;
+	}
+
+	ret = gpio_direction_output(conf->gpio, 0);
+	if (ret < 0)
+		return ret;
+
+	gpio_set_value(conf->gpio, 0);
+
+	ctx->duty.range_min = ctx->ctrl.duty.range_min = conf->range_min;
+	ctx->duty.range_max = ctx->ctrl.duty.range_max = conf->range_max;
+	ctx->duty.cycle = conf->duty_cycle;
+	ctx->base.period = conf->period;
+	ctx->gpio = conf->gpio;
+	ctx->duty.period = duty_period(&ctx->duty);
+
+	rtdm_timer_init(&ctx->base_timer, gpiopwm_handle_base_timer, "base_timer");
+	rtdm_timer_init(&ctx->duty_timer, gpiopwm_handle_duty_timer, "duty_timer");
+
+	ctx->ctrl.configured = 1;
+
+	return 0;
+}
+
+static inline int gpiopwm_change_duty_cycle(struct gpiopwm_priv *ctx, unsigned int cycle)
+{
+	if (cycle > MAX_DUTY_CYCLE)
+		return -EINVAL;
+
+	/* prepare the new data on the calling thread */
+	ctx->ctrl.duty.cycle = cycle;
+	ctx->ctrl.duty.period = duty_period(&ctx->ctrl.duty);
+
+	/* update data on the next base signal timeout */
+	ctx->ctrl.update = 1;
+
+	return 0;
+}
+
+static inline int gpiopwm_stop(struct rtdm_fd *fd)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	if (!ctx->ctrl.configured)
+		return -EINVAL;
+
+	gpio_set_value(ctx->gpio, 0);
+
+	rtdm_timer_stop(&ctx->base_timer);
+	rtdm_timer_stop(&ctx->duty_timer);
+
+	return 0;
+}
+
+static inline int gpiopwm_start(struct rtdm_fd *fd)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	if (!ctx->ctrl.configured)
+		return -EINVAL;
+
+	/* update duty cycle on next timeout */
+	ctx->ctrl.update = 1;
+
+	/* start the base signal tick */
+	rtdm_timer_start(&ctx->base_timer, ctx->base.period, ctx->base.period,
+			 RTDM_TIMERMODE_RELATIVE);
+
+	return 0;
+}
+
+static int gpiopwm_ioctl_rt(struct rtdm_fd *fd, unsigned int request, void __user *arg)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case GPIOPWM_RTIOC_SET_CONFIG:
+		return -ENOSYS;
+	case GPIOPWM_RTIOC_CHANGE_DUTY_CYCLE:
+		return gpiopwm_change_duty_cycle(ctx, (unsigned long) arg);
+	case GPIOPWM_RTIOC_START:
+		return gpiopwm_start(fd);
+	case GPIOPWM_RTIOC_STOP:
+		return gpiopwm_stop(fd);
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int gpiopwm_ioctl_nrt(struct rtdm_fd *fd, unsigned int request, void __user *arg)
+{
+	struct gpiopwm conf;
+
+	switch (request) {
+	case GPIOPWM_RTIOC_SET_CONFIG:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(conf)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &conf, arg, sizeof(conf));
+		return gpiopwm_config(fd, &conf);
+	case GPIOPWM_RTIOC_GET_CONFIG:
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int gpiopwm_open(struct rtdm_fd *fd, int oflags)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	ctx->ctrl.configured = 0;
+	ctx->gpio = -1;
+
+	return 0;
+}
+
+static void gpiopwm_close(struct rtdm_fd *fd)
+{
+	struct gpiopwm_priv *ctx = rtdm_fd_to_private(fd);
+
+	if (ctx->gpio >= 0)
+		gpio_free(ctx->gpio);
+
+	if (!ctx->ctrl.configured)
+		return;
+
+	rtdm_timer_destroy(&ctx->base_timer);
+	rtdm_timer_destroy(&ctx->duty_timer);
+}
+
+static struct rtdm_driver gpiopwm_driver = {
+	.profile_info           = RTDM_PROFILE_INFO(gpiopwm,
+						    RTDM_CLASS_PWM,
+						    RTDM_SUBCLASS_GENERIC,
+						    RTPWM_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 8,
+	.context_size		= sizeof(struct gpiopwm_priv),
+	.ops = {
+		.open		= gpiopwm_open,
+		.close		= gpiopwm_close,
+		.ioctl_rt	= gpiopwm_ioctl_rt,
+		.ioctl_nrt	= gpiopwm_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device device[8] = {
+	[0 ... 7] = {
+		.driver = &gpiopwm_driver,
+		.label = "gpiopwm%d",
+	}
+};
+
+static int __init __gpiopwm_init(void)
+{
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++) {
+		ret = rtdm_dev_register(device + i);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	while (i-- > 0)
+		rtdm_dev_unregister(device + i);
+
+	return ret;
+}
+
+static void __exit __gpiopwm_exit(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++)
+		rtdm_dev_unregister(device + i);
+}
+
+module_init(__gpiopwm_init);
+module_exit(__gpiopwm_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig
new file mode 100644
index 0000000..104413a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Kconfig
@@ -0,0 +1,81 @@
+menu "Real-time IPC drivers"
+
+config XENO_DRIVERS_RTIPC
+	tristate "RTIPC protocol family"
+	help
+
+	This driver provides the real-time IPC protocol family
+	(PF_RTIPC) over RTDM.
+
+config XENO_DRIVERS_RTIPC_XDDP
+	depends on XENO_DRIVERS_RTIPC
+	select XENO_OPT_PIPE
+	default y
+	bool "XDDP cross-domain datagram protocol"
+	help
+
+	Xenomai's XDDP protocol enables threads to exchange datagrams
+	across the Xenomai/Linux domain boundary, using "message
+	pipes".
+
+	Message pipes are bi-directional FIFO communication channels
+	allowing data exchange between real-time Xenomai threads and
+	regular (i.e. non real-time) user-space processes. Message
+	pipes are datagram-based and thus natively preserve message
+	boundaries, but they can also be used in byte stream mode when
+	sending from the real-time to the non real-time domain.
+
+	The maximum number of communication ports available in the
+	system can be configured using the XENO_OPT_PIPE_NRDEV option
+	from the Nucleus menu.
+
+config XENO_DRIVERS_RTIPC_IDDP
+	depends on XENO_DRIVERS_RTIPC
+	select XENO_OPT_MAP
+	default y
+	bool "IDDP intra-domain datagram protocol"
+	help
+
+	Xenomai's IDDP protocol enables real-time threads to exchange
+	datagrams within the Xenomai domain.
+
+config XENO_OPT_IDDP_NRPORT
+	depends on XENO_DRIVERS_RTIPC_IDDP
+	int "Number of IDDP communication ports"
+	default 32
+	help
+
+	This parameter defines the number of IDDP ports available in
+	the system for creating receiver endpoints. Port numbers range
+	from 0 to CONFIG_XENO_OPT_IDDP_NRPORT - 1.
+
+config XENO_DRIVERS_RTIPC_BUFP
+	depends on XENO_DRIVERS_RTIPC
+	select XENO_OPT_MAP
+	default y
+	bool "Buffer protocol"
+	help
+
+	The buffer protocol implements a byte-oriented, one-way
+	Producer-Consumer data path, which makes it a bit faster than
+	datagram-oriented protocols. All messages written are buffered
+	into a single memory area in strict FIFO order, until read by
+	the consumer.
+
+	This protocol prevents short writes, and only allows short
+	reads when a potential deadlock situation arises (i.e. readers
+	and writers waiting for each other indefinitely), which
+	usually means that the buffer size does not fit the use peer
+	threads are making from the protocol.
+
+config XENO_OPT_BUFP_NRPORT
+	depends on XENO_DRIVERS_RTIPC_BUFP
+	int "Number of BUFP communication ports"
+	default 32
+	help
+
+	This parameter defines the number of BUFP ports available in
+	the system for creating receiver endpoints. Port numbers range
+	from 0 to CONFIG_XENO_OPT_BUFP_NRPORT - 1.
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile
new file mode 100644
index 0000000..75fb27c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/Makefile
@@ -0,0 +1,8 @@
+
+obj-$(CONFIG_XENO_DRIVERS_RTIPC) += xeno_rtipc.o
+
+xeno_rtipc-y := rtipc.o
+
+xeno_rtipc-$(CONFIG_XENO_DRIVERS_RTIPC_XDDP) += xddp.o
+xeno_rtipc-$(CONFIG_XENO_DRIVERS_RTIPC_IDDP) += iddp.o
+xeno_rtipc-$(CONFIG_XENO_DRIVERS_RTIPC_BUFP) += bufp.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c
new file mode 100644
index 0000000..fd533db
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/bufp.c
@@ -0,0 +1,1104 @@
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/time.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/map.h>
+#include <cobalt/kernel/bufd.h>
+#include <rtdm/ipc.h>
+#include "internal.h"
+
+#define BUFP_SOCKET_MAGIC 0xa61a61a6
+
+struct bufp_socket {
+	int magic;
+	struct sockaddr_ipc name;
+	struct sockaddr_ipc peer;
+
+	void *bufmem;
+	size_t bufsz;
+	u_long status;
+	xnhandle_t handle;
+	char label[XNOBJECT_NAME_LEN];
+
+	off_t rdoff;
+	off_t rdrsvd;
+	int rdsem;
+	off_t wroff;
+	off_t wrrsvd;
+	int wrsem;
+	size_t fillsz;
+	rtdm_event_t i_event;
+	rtdm_event_t o_event;
+
+	nanosecs_rel_t rx_timeout;
+	nanosecs_rel_t tx_timeout;
+
+	struct rtipc_private *priv;
+};
+
+struct bufp_wait_context {
+	struct rtipc_wait_context wc;
+	size_t len;
+	struct bufp_socket *sk;
+};
+
+static struct sockaddr_ipc nullsa = {
+	.sipc_family = AF_RTIPC,
+	.sipc_port = -1
+};
+
+static struct xnmap *portmap;
+
+#define _BUFP_BINDING   0
+#define _BUFP_BOUND     1
+#define _BUFP_CONNECTED 2
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static char *__bufp_link_target(void *obj)
+{
+	struct bufp_socket *sk = obj;
+
+	return kasformat("%d", sk->name.sipc_port);
+}
+
+extern struct xnptree rtipc_ptree;
+
+static struct xnpnode_link __bufp_pnode = {
+	.node = {
+		.dirname = "bufp",
+		.root = &rtipc_ptree,
+		.ops = &xnregistry_vlink_ops,
+	},
+	.target = __bufp_link_target,
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __bufp_pnode = {
+	.node = {
+		.dirname = "bufp",
+	},
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+static int bufp_socket(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state;
+
+	sk->magic = BUFP_SOCKET_MAGIC;
+	sk->name = nullsa;	/* Unbound */
+	sk->peer = nullsa;
+	sk->bufmem = NULL;
+	sk->bufsz = 0;
+	sk->rdoff = 0;
+	sk->wroff = 0;
+	sk->fillsz = 0;
+	sk->rdrsvd = 0;
+	sk->wrrsvd = 0;
+	sk->rdsem = 0;
+	sk->wrsem = 0;
+	sk->status = 0;
+	sk->handle = 0;
+	sk->rx_timeout = RTDM_TIMEOUT_INFINITE;
+	sk->tx_timeout = RTDM_TIMEOUT_INFINITE;
+	*sk->label = 0;
+	rtdm_event_init(&sk->i_event, 0);
+	rtdm_event_init(&sk->o_event, 0);
+	sk->priv = priv;
+
+	return 0;
+}
+
+static void bufp_close(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state;
+	rtdm_lockctx_t s;
+
+	rtdm_event_destroy(&sk->i_event);
+	rtdm_event_destroy(&sk->o_event);
+
+	if (test_bit(_BUFP_BOUND, &sk->status)) {
+		if (sk->name.sipc_port > -1) {
+			cobalt_atomic_enter(s);
+			xnmap_remove(portmap, sk->name.sipc_port);
+			cobalt_atomic_leave(s);
+		}
+
+		if (sk->handle)
+			xnregistry_remove(sk->handle);
+
+		if (sk->bufmem)
+			xnheap_vfree(sk->bufmem);
+	}
+
+	kfree(sk);
+}
+
+static ssize_t __bufp_readbuf(struct bufp_socket *sk,
+			      struct xnbufd *bufd,
+			      int flags)
+{
+	struct bufp_wait_context wait, *bufwc;
+	struct rtipc_wait_context *wc;
+	struct xnthread *waiter;
+	size_t rbytes, n, avail;
+	ssize_t len, ret, xret;
+	rtdm_toseq_t toseq;
+	rtdm_lockctx_t s;
+	off_t rdoff;
+	int resched;
+
+	len = bufd->b_len;
+
+	rtdm_toseq_init(&toseq, sk->rx_timeout);
+
+	cobalt_atomic_enter(s);
+redo:
+	for (;;) {
+		/*
+		 * We should be able to read a complete message of the
+		 * requested length, or block.
+		 */
+		avail = sk->fillsz - sk->rdrsvd;
+		if (avail < len)
+			goto wait;
+
+		/* Reserve a read slot into the circular buffer. */
+		rdoff = sk->rdoff;
+		sk->rdoff = (rdoff + len) % sk->bufsz;
+		sk->rdrsvd += len;
+		sk->rdsem++;
+		rbytes = ret = len;
+
+		do {
+			if (rdoff + rbytes > sk->bufsz)
+				n = sk->bufsz - rdoff;
+			else
+				n = rbytes;
+			/*
+			 * Drop the lock before copying data to
+			 * user. The read slot is consumed in any
+			 * case: the non-copied portion of the message
+			 * is lost on bad write.
+			 */
+			cobalt_atomic_leave(s);
+			xret = xnbufd_copy_from_kmem(bufd, sk->bufmem + rdoff, n);
+			cobalt_atomic_enter(s);
+			if (xret < 0) {
+				ret = -EFAULT;
+				break;
+			}
+
+			rbytes -= n;
+			rdoff = (rdoff + n) % sk->bufsz;
+		} while (rbytes > 0);
+
+		if (--sk->rdsem > 0)
+			goto out;
+
+		resched = 0;
+		if (sk->fillsz == sk->bufsz) /* -> becomes writable */
+			resched |= xnselect_signal(&sk->priv->send_block, POLLOUT);
+
+		sk->fillsz -= sk->rdrsvd;
+		sk->rdrsvd = 0;
+
+		if (sk->fillsz == 0) /* -> becomes non-readable */
+			resched |= xnselect_signal(&sk->priv->recv_block, 0);
+
+		/*
+		 * Wake up all threads pending on the output wait
+		 * queue, if we freed enough room for the leading one
+		 * to post its message.
+		 */
+		waiter = rtipc_peek_wait_head(&sk->o_event);
+		if (waiter == NULL)
+			goto out;
+
+		wc = rtipc_get_wait_context(waiter);
+		XENO_BUG_ON(COBALT, wc == NULL);
+		bufwc = container_of(wc, struct bufp_wait_context, wc);
+		if (bufwc->len + sk->fillsz <= sk->bufsz)
+			/* This call rescheds internally. */
+			rtdm_event_pulse(&sk->o_event);
+		else if (resched)
+			xnsched_run();
+		/*
+		 * We cannot fail anymore once some data has been
+		 * copied via the buffer descriptor, so no need to
+		 * check for any reason to invalidate the latter.
+		 */
+		goto out;
+
+	wait:
+		if (flags & MSG_DONTWAIT) {
+			ret = -EWOULDBLOCK;
+			break;
+		}
+
+		/*
+		 * Check whether writers are already waiting for
+		 * sending data, while we are about to wait for
+		 * receiving some. In such a case, we have a
+		 * pathological use of the buffer. We must allow for a
+		 * short read to prevent a deadlock.
+		 */
+		if (sk->fillsz > 0 && rtipc_peek_wait_head(&sk->o_event)) {
+			len = sk->fillsz;
+			goto redo;
+		}
+
+		wait.len = len;
+		wait.sk = sk;
+		rtipc_prepare_wait(&wait.wc);
+		/*
+		 * Keep the nucleus lock across the wait call, so that
+		 * we don't miss a pulse.
+		 */
+		ret = rtdm_event_timedwait(&sk->i_event,
+					   sk->rx_timeout, &toseq);
+		if (unlikely(ret))
+			break;
+	}
+out:
+	cobalt_atomic_leave(s);
+
+	return ret;
+}
+
+static ssize_t __bufp_recvmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      struct sockaddr_ipc *saddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state;
+	ssize_t len, wrlen, vlen, ret;
+	struct xnbufd bufd;
+	int nvec;
+
+	if (!test_bit(_BUFP_BOUND, &sk->status))
+		return -EAGAIN;
+
+	len = rtdm_get_iov_flatlen(iov, iovlen);
+	if (len == 0)
+		return 0;
+	/*
+	 * We may only return complete messages to readers, so there
+	 * is no point in waiting for messages which are larger than
+	 * what the buffer can hold.
+	 */
+	if (len > sk->bufsz)
+		return -EINVAL;
+
+	/*
+	 * Write "len" bytes from the buffer to the vector cells. Each
+	 * cell is handled as a separate message.
+	 */
+	for (nvec = 0, wrlen = len; nvec < iovlen && wrlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = wrlen >= iov[nvec].iov_len ? iov[nvec].iov_len : wrlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = __bufp_readbuf(sk, &bufd, flags);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = __bufp_readbuf(sk, &bufd, flags);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			return ret;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		wrlen -= vlen;
+		if (ret < vlen)
+			/* Short reads may happen in rare cases. */
+			break;
+	}
+
+	/*
+	 * There is no way to determine who the sender was since we
+	 * process data in byte-oriented mode, so we just copy our own
+	 * sockaddr to send back a valid address.
+	 */
+	if (saddr)
+		*saddr = sk->name;
+
+	return len - wrlen;
+}
+
+static ssize_t bufp_recvmsg(struct rtdm_fd *fd,
+			    struct user_msghdr *msg, int flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct sockaddr_ipc saddr;
+	ssize_t ret;
+
+	if (flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen < sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+	} else if (msg->msg_namelen != 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __bufp_recvmsg(fd, iov, msg->msg_iovlen, flags, &saddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy the updated I/O vector back */
+	if (rtdm_put_iovec(fd, iov, msg, iov_fast))
+		return -EFAULT;
+
+	/* Copy the source address if required. */
+	if (msg->msg_name) {
+		if (rtipc_put_arg(fd, msg->msg_name,
+				  &saddr, sizeof(saddr)))
+			return -EFAULT;
+		msg->msg_namelen = sizeof(struct sockaddr_ipc);
+	}
+
+	return ret;
+}
+
+static ssize_t bufp_read(struct rtdm_fd *fd, void *buf, size_t len)
+{
+	struct iovec iov = { .iov_base = buf, .iov_len = len };
+
+	return __bufp_recvmsg(fd, &iov, 1, 0, NULL);
+}
+
+static ssize_t __bufp_writebuf(struct bufp_socket *rsk,
+			       struct bufp_socket *sk,
+			       struct xnbufd *bufd,
+			       int flags)
+{
+	struct bufp_wait_context wait, *bufwc;
+	struct rtipc_wait_context *wc;
+	struct xnthread *waiter;
+	size_t wbytes, n, avail;
+	ssize_t len, ret, xret;
+	rtdm_toseq_t toseq;
+	rtdm_lockctx_t s;
+	off_t wroff;
+	int resched;
+
+	len = bufd->b_len;
+
+	rtdm_toseq_init(&toseq, sk->tx_timeout);
+
+	cobalt_atomic_enter(s);
+
+	for (;;) {
+		/*
+		 * No short or scattered writes: we should write the
+		 * entire message atomically or block.
+		 */
+		avail = rsk->fillsz + rsk->wrrsvd;
+		if (avail + len > rsk->bufsz)
+			goto wait;
+
+		/* Reserve a write slot into the circular buffer. */
+		wroff = rsk->wroff;
+		rsk->wroff = (wroff + len) % rsk->bufsz;
+		rsk->wrrsvd += len;
+		rsk->wrsem++;
+		wbytes = ret = len;
+
+		do {
+			if (wroff + wbytes > rsk->bufsz)
+				n = rsk->bufsz - wroff;
+			else
+				n = wbytes;
+			/*
+			 * We have to drop the lock while reading in
+			 * data, but we can't rollback on bad read
+			 * from user because some other thread might
+			 * have populated the memory ahead of our
+			 * write slot already: bluntly clear the
+			 * unavailable bytes on copy error.
+			 */
+			cobalt_atomic_leave(s);
+			xret = xnbufd_copy_to_kmem(rsk->bufmem + wroff, bufd, n);
+			cobalt_atomic_enter(s);
+			if (xret < 0) {
+				memset(rsk->bufmem + wroff, 0, n);
+				ret = -EFAULT;
+				break;
+			}
+
+			wbytes -= n;
+			wroff = (wroff + n) % rsk->bufsz;
+		} while (wbytes > 0);
+
+		if (--rsk->wrsem > 0)
+			goto out;
+
+		resched = 0;
+		if (rsk->fillsz == 0) /* -> becomes readable */
+			resched |= xnselect_signal(&rsk->priv->recv_block, POLLIN);
+
+		rsk->fillsz += rsk->wrrsvd;
+		rsk->wrrsvd = 0;
+
+		if (rsk->fillsz == rsk->bufsz) /* becomes non-writable */
+			resched |= xnselect_signal(&rsk->priv->send_block, 0);
+		/*
+		 * Wake up all threads pending on the input wait
+		 * queue, if we accumulated enough data to feed the
+		 * leading one.
+		 */
+		waiter = rtipc_peek_wait_head(&rsk->i_event);
+		if (waiter == NULL)
+			goto out;
+
+		wc = rtipc_get_wait_context(waiter);
+		XENO_BUG_ON(COBALT, wc == NULL);
+		bufwc = container_of(wc, struct bufp_wait_context, wc);
+		if (bufwc->len <= rsk->fillsz)
+			rtdm_event_pulse(&rsk->i_event);
+		else if (resched)
+			xnsched_run();
+		/*
+		 * We cannot fail anymore once some data has been
+		 * copied via the buffer descriptor, so no need to
+		 * check for any reason to invalidate the latter.
+		 */
+		goto out;
+	wait:
+		if (flags & MSG_DONTWAIT) {
+			ret = -EWOULDBLOCK;
+			break;
+		}
+
+		wait.len = len;
+		wait.sk = rsk;
+		rtipc_prepare_wait(&wait.wc);
+		/*
+		 * Keep the nucleus lock across the wait call, so that
+		 * we don't miss a pulse.
+		 */
+		ret = rtdm_event_timedwait(&rsk->o_event,
+					   sk->tx_timeout, &toseq);
+		if (unlikely(ret))
+			break;
+	}
+out:
+	cobalt_atomic_leave(s);
+
+	return ret;
+}
+
+static ssize_t __bufp_sendmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      const struct sockaddr_ipc *daddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state, *rsk;
+	ssize_t len, rdlen, vlen, ret = 0;
+	struct rtdm_fd *rfd;
+	struct xnbufd bufd;
+	rtdm_lockctx_t s;
+	int nvec;
+
+	len = rtdm_get_iov_flatlen(iov, iovlen);
+	if (len == 0)
+		return 0;
+
+	cobalt_atomic_enter(s);
+	rfd = xnmap_fetch_nocheck(portmap, daddr->sipc_port);
+	if (rfd && rtdm_fd_lock(rfd) < 0)
+		rfd = NULL;
+	cobalt_atomic_leave(s);
+	if (rfd == NULL)
+		return -ECONNRESET;
+
+	rsk = rtipc_fd_to_state(rfd);
+	if (!test_bit(_BUFP_BOUND, &rsk->status)) {
+		rtdm_fd_unlock(rfd);
+		return -ECONNREFUSED;
+	}
+
+	/*
+	 * We may only send complete messages, so there is no point in
+	 * accepting messages which are larger than what the buffer
+	 * can hold.
+	 */
+	if (len > rsk->bufsz) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	/*
+	 * Read "len" bytes to the buffer from the vector cells. Each
+	 * cell is handled as a separate message.
+	 */
+	for (nvec = 0, rdlen = len; nvec < iovlen && rdlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = __bufp_writebuf(rsk, sk, &bufd, flags);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = __bufp_writebuf(rsk, sk, &bufd, flags);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			goto fail;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		rdlen -= vlen;
+	}
+
+	rtdm_fd_unlock(rfd);
+
+	return len - rdlen;
+fail:
+	rtdm_fd_unlock(rfd);
+
+	return ret;
+}
+
+static ssize_t bufp_sendmsg(struct rtdm_fd *fd,
+			    const struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct bufp_socket *sk = priv->state;
+	struct sockaddr_ipc daddr;
+	ssize_t ret;
+
+	if (flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen != sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+
+		/* Fetch the destination address to send to. */
+		if (rtipc_get_arg(fd, &daddr, msg->msg_name, sizeof(daddr)))
+			return -EFAULT;
+
+		if (daddr.sipc_port < 0 ||
+		    daddr.sipc_port >= CONFIG_XENO_OPT_BUFP_NRPORT)
+			return -EINVAL;
+	} else {
+		if (msg->msg_namelen != 0)
+			return -EINVAL;
+		daddr = sk->peer;
+		if (daddr.sipc_port < 0)
+			return -EDESTADDRREQ;
+	}
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __bufp_sendmsg(fd, iov, msg->msg_iovlen, flags, &daddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy updated I/O vector back */
+	return rtdm_put_iovec(fd, iov, msg, iov_fast) ?: ret;
+}
+
+static ssize_t bufp_write(struct rtdm_fd *fd,
+			  const void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
+	struct bufp_socket *sk = priv->state;
+
+	if (sk->peer.sipc_port < 0)
+		return -EDESTADDRREQ;
+
+	return __bufp_sendmsg(fd, &iov, 1, 0, &sk->peer);
+}
+
+static int __bufp_bind_socket(struct rtipc_private *priv,
+			      struct sockaddr_ipc *sa)
+{
+	struct bufp_socket *sk = priv->state;
+	int ret = 0, port;
+	struct rtdm_fd *fd;
+	rtdm_lockctx_t s;
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_BUFP_NRPORT)
+		return -EINVAL;
+
+	cobalt_atomic_enter(s);
+	if (test_bit(_BUFP_BOUND, &sk->status) ||
+	    __test_and_set_bit(_BUFP_BINDING, &sk->status))
+		ret = -EADDRINUSE;
+	cobalt_atomic_leave(s);
+	
+	if (ret)
+		return ret;
+
+	/* Will auto-select a free port number if unspec (-1). */
+	port = sa->sipc_port;
+	fd = rtdm_private_to_fd(priv);
+	cobalt_atomic_enter(s);
+	port = xnmap_enter(portmap, port, fd);
+	cobalt_atomic_leave(s);
+	if (port < 0)
+		return port == -EEXIST ? -EADDRINUSE : -ENOMEM;
+
+	sa->sipc_port = port;
+
+	/*
+	 * The caller must have told us how much memory is needed for
+	 * buffer space via setsockopt(), before we got there.
+	 */
+	if (sk->bufsz == 0)
+		return -ENOBUFS;
+
+	sk->bufmem = xnheap_vmalloc(sk->bufsz);
+	if (sk->bufmem == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	sk->name = *sa;
+	/* Set default destination if unset at binding time. */
+	if (sk->peer.sipc_port < 0)
+		sk->peer = *sa;
+
+	if (*sk->label) {
+		ret = xnregistry_enter(sk->label, sk,
+				       &sk->handle, &__bufp_pnode.node);
+		if (ret) {
+			xnheap_vfree(sk->bufmem);
+			goto fail;
+		}
+	}
+
+	cobalt_atomic_enter(s);
+	__clear_bit(_BUFP_BINDING, &sk->status);
+	__set_bit(_BUFP_BOUND, &sk->status);
+	if (xnselect_signal(&priv->send_block, POLLOUT))
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+fail:
+	xnmap_remove(portmap, port);
+	clear_bit(_BUFP_BINDING, &sk->status);
+
+	return ret;
+}
+
+static int __bufp_connect_socket(struct bufp_socket *sk,
+				 struct sockaddr_ipc *sa)
+{
+	struct sockaddr_ipc _sa;
+	struct bufp_socket *rsk;
+	int ret, resched = 0;
+	rtdm_lockctx_t s;
+	xnhandle_t h;
+
+	if (sa == NULL) {
+		_sa = nullsa;
+		sa = &_sa;
+		goto set_assoc;
+	}
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_BUFP_NRPORT)
+		return -EINVAL;
+	/*
+	 * - If a valid sipc_port is passed in the [0..NRPORT-1] range,
+	 * it is used verbatim and the connection succeeds
+	 * immediately, regardless of whether the destination is
+	 * bound at the time of the call.
+	 *
+	 * - If sipc_port is -1 and a label was set via BUFP_LABEL,
+	 * connect() blocks for the requested amount of time (see
+	 * SO_RCVTIMEO) until a socket is bound to the same label.
+	 *
+	 * - If sipc_port is -1 and no label is given, the default
+	 * destination address is cleared, meaning that any subsequent
+	 * write() to the socket will return -EDESTADDRREQ, until a
+	 * valid destination address is set via connect() or bind().
+	 *
+	 * - In all other cases, -EINVAL is returned.
+	 */
+	if (sa->sipc_port < 0 && *sk->label) {
+		ret = xnregistry_bind(sk->label,
+				      sk->rx_timeout, XN_RELATIVE, &h);
+		if (ret)
+			return ret;
+
+		cobalt_atomic_enter(s);
+		rsk = xnregistry_lookup(h, NULL);
+		if (rsk == NULL || rsk->magic != BUFP_SOCKET_MAGIC)
+			ret = -EINVAL;
+		else {
+			/* Fetch labeled port number. */
+			sa->sipc_port = rsk->name.sipc_port;
+			resched = xnselect_signal(&sk->priv->send_block, POLLOUT);
+		}
+		cobalt_atomic_leave(s);
+		if (ret)
+			return ret;
+	} else if (sa->sipc_port < 0)
+		sa = &nullsa;
+set_assoc:
+	cobalt_atomic_enter(s);
+	if (!test_bit(_BUFP_BOUND, &sk->status))
+		/* Set default name. */
+		sk->name = *sa;
+	/* Set default destination. */
+	sk->peer = *sa;
+	if (sa->sipc_port < 0)
+		__clear_bit(_BUFP_CONNECTED, &sk->status);
+	else
+		__set_bit(_BUFP_CONNECTED, &sk->status);
+	if (resched)
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+}
+
+static int __bufp_setsockopt(struct bufp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_setsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	size_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptin(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->rx_timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		case SO_SNDTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->tx_timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_BUFP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case BUFP_BUFSZ:
+		ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen);
+		if (ret)
+			return ret;
+		if (len == 0)
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		/*
+		 * We may not do this more than once, and we have to
+		 * do this before the first binding.
+		 */
+		if (test_bit(_BUFP_BOUND, &sk->status) ||
+		    test_bit(_BUFP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else
+			sk->bufsz = len;
+		cobalt_atomic_leave(s);
+		break;
+
+	case BUFP_LABEL:
+		if (sopt.optlen < sizeof(plabel))
+			return -EINVAL;
+		if (rtipc_get_arg(fd, &plabel, sopt.optval, sizeof(plabel)))
+			return -EFAULT;
+		cobalt_atomic_enter(s);
+		/*
+		 * We may attach a label to a client socket which was
+		 * previously bound in BUFP.
+		 */
+		if (test_bit(_BUFP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else {
+			strcpy(sk->label, plabel.label);
+			sk->label[XNOBJECT_NAME_LEN-1] = 0;
+		}
+		cobalt_atomic_leave(s);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __bufp_getsockopt(struct bufp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_getsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	socklen_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptout(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (rtipc_get_arg(fd, &len, sopt.optlen, sizeof(len)))
+		return -EFAULT;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->rx_timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		case SO_SNDTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->tx_timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_BUFP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case BUFP_LABEL:
+		if (len < sizeof(plabel))
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		strcpy(plabel.label, sk->label);
+		cobalt_atomic_leave(s);
+		if (rtipc_put_arg(fd, sopt.optval, &plabel, sizeof(plabel)))
+			return -EFAULT;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __bufp_ioctl(struct rtdm_fd *fd,
+			unsigned int request, void *arg)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct sockaddr_ipc saddr, *saddrp = &saddr;
+	struct bufp_socket *sk = priv->state;
+	int ret = 0;
+
+	switch (request) {
+
+	COMPAT_CASE(_RTIOC_CONNECT):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+		  return ret;
+		ret = __bufp_connect_socket(sk, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_BIND):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+			return ret;
+		if (saddrp == NULL)
+			return -EFAULT;
+		ret = __bufp_bind_socket(priv, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->name);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETPEERNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->peer);
+		break;
+
+	COMPAT_CASE(_RTIOC_SETSOCKOPT):
+		ret = __bufp_setsockopt(sk, fd, arg);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKOPT):
+		ret = __bufp_getsockopt(sk, fd, arg);
+		break;
+
+	case _RTIOC_LISTEN:
+	COMPAT_CASE(_RTIOC_ACCEPT):
+		ret = -EOPNOTSUPP;
+		break;
+
+	case _RTIOC_SHUTDOWN:
+		ret = -ENOTCONN;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int bufp_ioctl(struct rtdm_fd *fd,
+		      unsigned int request, void *arg)
+{
+	int ret;
+
+	switch (request) {
+	COMPAT_CASE(_RTIOC_BIND):
+		if (rtdm_in_rt_context())
+			return -ENOSYS;	/* Try downgrading to NRT */
+		fallthrough;
+	default:
+		ret = __bufp_ioctl(fd, request, arg);
+	}
+
+	return ret;
+}
+
+static unsigned int bufp_pollstate(struct rtdm_fd *fd) /* atomic */
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct bufp_socket *sk = priv->state, *rsk;
+	unsigned int mask = 0;
+	struct rtdm_fd *rfd;
+
+	if (test_bit(_BUFP_BOUND, &sk->status) && sk->fillsz > 0)
+		mask |= POLLIN;
+
+	/*
+	 * If the socket is connected, POLLOUT means that the peer
+	 * exists, is bound and can receive data. Otherwise POLLOUT is
+	 * always set, assuming the client is likely to use explicit
+	 * addressing in send operations.
+	 */
+	if (test_bit(_BUFP_CONNECTED, &sk->status)) {
+		rfd = xnmap_fetch_nocheck(portmap, sk->peer.sipc_port);
+		if (rfd) {
+			rsk = rtipc_fd_to_state(rfd);
+			if (rsk->fillsz < rsk->bufsz)
+				mask |= POLLOUT;
+		}
+	} else
+		mask |= POLLOUT;
+
+	return mask;
+}
+
+static int bufp_init(void)
+{
+	portmap = xnmap_create(CONFIG_XENO_OPT_BUFP_NRPORT, 0, 0);
+	if (portmap == NULL)
+		return -ENOMEM;
+
+	return 0;
+}
+
+static void bufp_exit(void)
+{
+	xnmap_delete(portmap);
+}
+
+struct rtipc_protocol bufp_proto_driver = {
+	.proto_name = "bufp",
+	.proto_statesz = sizeof(struct bufp_socket),
+	.proto_init = bufp_init,
+	.proto_exit = bufp_exit,
+	.proto_ops = {
+		.socket = bufp_socket,
+		.close = bufp_close,
+		.recvmsg = bufp_recvmsg,
+		.sendmsg = bufp_sendmsg,
+		.read = bufp_read,
+		.write = bufp_write,
+		.ioctl = bufp_ioctl,
+		.pollstate = bufp_pollstate,
+	}
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c
new file mode 100644
index 0000000..a553902
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/iddp.c
@@ -0,0 +1,990 @@
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/list.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/time.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/bufd.h>
+#include <cobalt/kernel/map.h>
+#include <rtdm/ipc.h>
+#include "internal.h"
+
+#define IDDP_SOCKET_MAGIC 0xa37a37a8
+
+struct iddp_message {
+	struct list_head next;
+	int from;
+	size_t rdoff;
+	size_t len;
+	char data[];
+};
+
+struct iddp_socket {
+	int magic;
+	struct sockaddr_ipc name;
+	struct sockaddr_ipc peer;
+	struct xnheap *bufpool;
+	struct xnheap privpool;
+	rtdm_waitqueue_t *poolwaitq;
+	rtdm_waitqueue_t privwaitq;
+	size_t poolsz;
+	rtdm_sem_t insem;
+	struct list_head inq;
+	u_long status;
+	xnhandle_t handle;
+	char label[XNOBJECT_NAME_LEN];
+	nanosecs_rel_t rx_timeout;
+	nanosecs_rel_t tx_timeout;
+	unsigned long stalls;	/* Buffer stall counter. */
+	struct rtipc_private *priv;
+};
+
+static struct sockaddr_ipc nullsa = {
+	.sipc_family = AF_RTIPC,
+	.sipc_port = -1
+};
+
+static struct xnmap *portmap;
+
+static rtdm_waitqueue_t poolwaitq;
+
+#define _IDDP_BINDING   0
+#define _IDDP_BOUND     1
+#define _IDDP_CONNECTED 2
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static char *__iddp_link_target(void *obj)
+{
+	struct iddp_socket *sk = obj;
+
+	return kasformat("%d", sk->name.sipc_port);
+}
+
+extern struct xnptree rtipc_ptree;
+
+static struct xnpnode_link __iddp_pnode = {
+	.node = {
+		.dirname = "iddp",
+		.root = &rtipc_ptree,
+		.ops = &xnregistry_vlink_ops,
+	},
+	.target = __iddp_link_target,
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __iddp_pnode = {
+	.node = {
+		.dirname = "iddp",
+	},
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+static inline void __iddp_init_mbuf(struct iddp_message *mbuf, size_t len)
+{
+	mbuf->rdoff = 0;
+	mbuf->len = len;
+	INIT_LIST_HEAD(&mbuf->next);
+}
+
+static struct iddp_message *
+__iddp_alloc_mbuf(struct iddp_socket *sk, size_t len,
+		  nanosecs_rel_t timeout, int flags, int *pret)
+{
+	struct iddp_message *mbuf = NULL;
+	rtdm_toseq_t timeout_seq;
+	rtdm_lockctx_t s;
+	int ret = 0;
+
+	rtdm_toseq_init(&timeout_seq, timeout);
+
+	for (;;) {
+		mbuf = xnheap_alloc(sk->bufpool, len + sizeof(*mbuf));
+		if (mbuf) {
+			__iddp_init_mbuf(mbuf, len);
+			break;
+		}
+		if (flags & MSG_DONTWAIT) {
+			ret = -EAGAIN;
+			break;
+		}
+		/*
+		 * No luck, no buffer free. Wait for a buffer to be
+		 * released and retry. Admittedly, we might create a
+		 * thundering herd effect if many waiters put a lot of
+		 * memory pressure on the pool, but in this case, the
+		 * pool size should be adjusted.
+		 */
+		rtdm_waitqueue_lock(sk->poolwaitq, s);
+		++sk->stalls;
+		ret = rtdm_timedwait_locked(sk->poolwaitq, timeout, &timeout_seq);
+		rtdm_waitqueue_unlock(sk->poolwaitq, s);
+		if (unlikely(ret == -EIDRM))
+			ret = -ECONNRESET;
+		if (ret)
+			break;
+	}
+
+	*pret = ret;
+
+	return mbuf;
+}
+
+static void __iddp_free_mbuf(struct iddp_socket *sk,
+			     struct iddp_message *mbuf)
+{
+	xnheap_free(sk->bufpool, mbuf);
+	rtdm_waitqueue_broadcast(sk->poolwaitq);
+}
+
+static int iddp_socket(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+
+	sk->magic = IDDP_SOCKET_MAGIC;
+	sk->name = nullsa;	/* Unbound */
+	sk->peer = nullsa;
+	sk->bufpool = &cobalt_heap;
+	sk->poolwaitq = &poolwaitq;
+	sk->poolsz = 0;
+	sk->status = 0;
+	sk->handle = 0;
+	sk->rx_timeout = RTDM_TIMEOUT_INFINITE;
+	sk->tx_timeout = RTDM_TIMEOUT_INFINITE;
+	sk->stalls = 0;
+	*sk->label = 0;
+	INIT_LIST_HEAD(&sk->inq);
+	rtdm_sem_init(&sk->insem, 0);
+	rtdm_waitqueue_init(&sk->privwaitq);
+	sk->priv = priv;
+
+	return 0;
+}
+
+static void iddp_close(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+	struct iddp_message *mbuf;
+	rtdm_lockctx_t s;
+	void *poolmem;
+	u32 poolsz;
+
+	rtdm_sem_destroy(&sk->insem);
+	rtdm_waitqueue_destroy(&sk->privwaitq);
+
+	if (test_bit(_IDDP_BOUND, &sk->status)) {
+		if (sk->handle)
+			xnregistry_remove(sk->handle);
+		if (sk->name.sipc_port > -1) {
+			cobalt_atomic_enter(s);
+			xnmap_remove(portmap, sk->name.sipc_port);
+			cobalt_atomic_leave(s);
+		}
+		if (sk->bufpool != &cobalt_heap) {
+			poolmem = xnheap_get_membase(&sk->privpool);
+			poolsz = xnheap_get_size(&sk->privpool);
+			xnheap_destroy(&sk->privpool);
+			xnheap_vfree(poolmem);
+			return;
+		}
+	}
+
+	/* Send unread datagrams back to the system heap. */
+	while (!list_empty(&sk->inq)) {
+		mbuf = list_entry(sk->inq.next, struct iddp_message, next);
+		list_del(&mbuf->next);
+		xnheap_free(&cobalt_heap, mbuf);
+	}
+
+	kfree(sk);
+
+	return;
+}
+
+static ssize_t __iddp_recvmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      struct sockaddr_ipc *saddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+	ssize_t maxlen, len, wrlen, vlen;
+	rtdm_toseq_t timeout_seq, *toseq;
+	int nvec, rdoff, ret, dofree;
+	struct iddp_message *mbuf;
+	nanosecs_rel_t timeout;
+	struct xnbufd bufd;
+	rtdm_lockctx_t s;
+
+	if (!test_bit(_IDDP_BOUND, &sk->status))
+		return -EAGAIN;
+
+	maxlen = rtdm_get_iov_flatlen(iov, iovlen);
+	if (maxlen == 0)
+		return 0;
+
+	if (flags & MSG_DONTWAIT) {
+		timeout = RTDM_TIMEOUT_NONE;
+		toseq = NULL;
+	} else {
+		timeout = sk->rx_timeout;
+		toseq = &timeout_seq;
+	}
+
+	/* We want to pick one buffer from the queue. */
+	
+	for (;;) {
+		ret = rtdm_sem_timeddown(&sk->insem, timeout, toseq);
+		if (unlikely(ret)) {
+			if (ret == -EIDRM)
+				return -ECONNRESET;
+			return ret;
+		}
+		/* We may have spurious wakeups. */
+		cobalt_atomic_enter(s);
+		if (!list_empty(&sk->inq))
+			break;
+		cobalt_atomic_leave(s);
+	}
+
+	/* Pull heading message from input queue. */
+	mbuf = list_entry(sk->inq.next, struct iddp_message, next);
+	rdoff = mbuf->rdoff;
+	len = mbuf->len - rdoff;
+	if (saddr) {
+		saddr->sipc_family = AF_RTIPC;
+		saddr->sipc_port = mbuf->from;
+	}
+	if (maxlen >= len) {
+		list_del(&mbuf->next);
+		dofree = 1;
+		if (list_empty(&sk->inq)) /* -> non-readable */
+			xnselect_signal(&priv->recv_block, 0);
+
+	} else {
+		/* Buffer is only partially read: repost. */
+		mbuf->rdoff += maxlen;
+		len = maxlen;
+		dofree = 0;
+	}
+
+	if (!dofree)
+		rtdm_sem_up(&sk->insem);
+
+	cobalt_atomic_leave(s);
+
+	/* Now, write "len" bytes from mbuf->data to the vector cells */
+	for (nvec = 0, wrlen = len; nvec < iovlen && wrlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = wrlen >= iov[nvec].iov_len ? iov[nvec].iov_len : wrlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			break;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		wrlen -= vlen;
+		rdoff += vlen;
+	}
+
+	if (dofree)
+		__iddp_free_mbuf(sk, mbuf);
+
+	return ret ?: len;
+}
+
+static ssize_t iddp_recvmsg(struct rtdm_fd *fd,
+			    struct user_msghdr *msg, int flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct sockaddr_ipc saddr;
+	ssize_t ret;
+
+	if (flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen < sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+	} else if (msg->msg_namelen != 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __iddp_recvmsg(fd, iov, msg->msg_iovlen, flags, &saddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy the updated I/O vector back */
+	if (rtdm_put_iovec(fd, iov, msg, iov_fast))
+		return -EFAULT;
+
+	/* Copy the source address if required. */
+	if (msg->msg_name) {
+		if (rtipc_put_arg(fd, msg->msg_name, &saddr, sizeof(saddr)))
+			return -EFAULT;
+		msg->msg_namelen = sizeof(struct sockaddr_ipc);
+	}
+
+	return ret;
+}
+
+static ssize_t iddp_read(struct rtdm_fd *fd, void *buf, size_t len)
+{
+	struct iovec iov = { .iov_base = buf, .iov_len = len };
+
+	return __iddp_recvmsg(fd, &iov, 1, 0, NULL);
+}
+
+static ssize_t __iddp_sendmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      const struct sockaddr_ipc *daddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state, *rsk;
+	struct iddp_message *mbuf;
+	ssize_t len, rdlen, vlen;
+	int nvec, wroff, ret;
+	struct rtdm_fd *rfd;
+	struct xnbufd bufd;
+	rtdm_lockctx_t s;
+
+	len = rtdm_get_iov_flatlen(iov, iovlen);
+	if (len == 0)
+		return 0;
+
+	cobalt_atomic_enter(s);
+	rfd = xnmap_fetch_nocheck(portmap, daddr->sipc_port);
+	if (rfd && rtdm_fd_lock(rfd) < 0)
+		rfd = NULL;
+	cobalt_atomic_leave(s);
+	if (rfd == NULL)
+		return -ECONNRESET;
+
+	rsk = rtipc_fd_to_state(rfd);
+	if (!test_bit(_IDDP_BOUND, &rsk->status)) {
+		rtdm_fd_unlock(rfd);
+		return -ECONNREFUSED;
+	}
+
+	mbuf = __iddp_alloc_mbuf(rsk, len, sk->tx_timeout, flags, &ret);
+	if (unlikely(ret)) {
+		rtdm_fd_unlock(rfd);
+		return ret;
+	}
+
+	/* Now, move "len" bytes to mbuf->data from the vector cells */
+	for (nvec = 0, rdlen = len, wroff = 0;
+	     nvec < iovlen && rdlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_to_kmem(mbuf->data + wroff, &bufd, vlen);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_to_kmem(mbuf->data + wroff, &bufd, vlen);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			goto fail;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		rdlen -= vlen;
+		wroff += vlen;
+	}
+
+	cobalt_atomic_enter(s);
+
+	/*
+	 * CAUTION: we must remain atomic from the moment we signal
+	 * POLLIN, until sem_up has happened.
+	 */
+	if (list_empty(&rsk->inq)) /* -> readable */
+		xnselect_signal(&rsk->priv->recv_block, POLLIN);
+
+	mbuf->from = sk->name.sipc_port;
+
+	if (flags & MSG_OOB)
+		list_add(&mbuf->next, &rsk->inq);
+	else
+		list_add_tail(&mbuf->next, &rsk->inq);
+
+	rtdm_sem_up(&rsk->insem); /* Will resched. */
+
+	cobalt_atomic_leave(s);
+
+	rtdm_fd_unlock(rfd);
+
+	return len;
+
+fail:
+	__iddp_free_mbuf(rsk, mbuf);
+
+	rtdm_fd_unlock(rfd);
+
+	return ret;
+}
+
+static ssize_t iddp_sendmsg(struct rtdm_fd *fd,
+			    const struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct iddp_socket *sk = priv->state;
+	struct sockaddr_ipc daddr;
+	ssize_t ret;
+
+	if (flags & ~(MSG_OOB | MSG_DONTWAIT))
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen != sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+
+		/* Fetch the destination address to send to. */
+		if (rtipc_get_arg(fd, &daddr, msg->msg_name, sizeof(daddr)))
+			return -EFAULT;
+
+		if (daddr.sipc_port < 0 ||
+		    daddr.sipc_port >= CONFIG_XENO_OPT_IDDP_NRPORT)
+			return -EINVAL;
+	} else {
+		if (msg->msg_namelen != 0)
+			return -EINVAL;
+		daddr = sk->peer;
+		if (daddr.sipc_port < 0)
+			return -EDESTADDRREQ;
+	}
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __iddp_sendmsg(fd, iov, msg->msg_iovlen, flags, &daddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy updated I/O vector back */
+	return rtdm_put_iovec(fd, iov, msg, iov_fast) ?: ret;
+}
+
+static ssize_t iddp_write(struct rtdm_fd *fd,
+			  const void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
+	struct iddp_socket *sk = priv->state;
+
+	if (sk->peer.sipc_port < 0)
+		return -EDESTADDRREQ;
+
+	return __iddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
+}
+
+static int __iddp_bind_socket(struct rtdm_fd *fd,
+			      struct sockaddr_ipc *sa)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+	int ret = 0, port;
+	rtdm_lockctx_t s;
+	void *poolmem;
+	size_t poolsz;
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_IDDP_NRPORT)
+		return -EINVAL;
+
+	cobalt_atomic_enter(s);
+	if (test_bit(_IDDP_BOUND, &sk->status) ||
+	    __test_and_set_bit(_IDDP_BINDING, &sk->status))
+		ret = -EADDRINUSE;
+	cobalt_atomic_leave(s);
+	if (ret)
+		return ret;
+
+	/* Will auto-select a free port number if unspec (-1). */
+	port = sa->sipc_port;
+	cobalt_atomic_enter(s);
+	port = xnmap_enter(portmap, port, fd);
+	cobalt_atomic_leave(s);
+	if (port < 0)
+		return port == -EEXIST ? -EADDRINUSE : -ENOMEM;
+
+	sa->sipc_port = port;
+
+	/*
+	 * Allocate a local buffer pool if we were told to do so via
+	 * setsockopt() before we got there.
+	 */
+	poolsz = sk->poolsz;
+	if (poolsz > 0) {
+		poolsz = PAGE_ALIGN(poolsz);
+		poolmem = xnheap_vmalloc(poolsz);
+		if (poolmem == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		ret = xnheap_init(&sk->privpool, poolmem, poolsz);
+		if (ret) {
+			xnheap_vfree(poolmem);
+			goto fail;
+		}
+		xnheap_set_name(&sk->privpool, "iddp-pool@%d", port);
+		sk->poolwaitq = &sk->privwaitq;
+		sk->bufpool = &sk->privpool;
+	}
+
+	sk->name = *sa;
+	/* Set default destination if unset at binding time. */
+	if (sk->peer.sipc_port < 0)
+		sk->peer = *sa;
+
+	if (*sk->label) {
+		ret = xnregistry_enter(sk->label, sk,
+				       &sk->handle, &__iddp_pnode.node);
+		if (ret) {
+			if (poolsz > 0) {
+				xnheap_destroy(&sk->privpool);
+				xnheap_vfree(poolmem);
+			}
+			goto fail;
+		}
+	}
+
+	cobalt_atomic_enter(s);
+	__clear_bit(_IDDP_BINDING, &sk->status);
+	__set_bit(_IDDP_BOUND, &sk->status);
+	if (xnselect_signal(&priv->send_block, POLLOUT))
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+fail:
+	xnmap_remove(portmap, port);
+	clear_bit(_IDDP_BINDING, &sk->status);
+
+	return ret;
+}
+
+static int __iddp_connect_socket(struct iddp_socket *sk,
+				 struct sockaddr_ipc *sa)
+{
+	struct sockaddr_ipc _sa;
+	struct iddp_socket *rsk;
+	int ret, resched = 0;
+	rtdm_lockctx_t s;
+	xnhandle_t h;
+
+	if (sa == NULL) {
+		_sa = nullsa;
+		sa = &_sa;
+		goto set_assoc;
+	}
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_IDDP_NRPORT)
+		return -EINVAL;
+	/*
+	 * - If a valid sipc_port is passed in the [0..NRPORT-1] range,
+	 * it is used verbatim and the connection succeeds
+	 * immediately, regardless of whether the destination is
+	 * bound at the time of the call.
+	 *
+	 * - If sipc_port is -1 and a label was set via IDDP_LABEL,
+	 * connect() blocks for the requested amount of time (see
+	 * SO_RCVTIMEO) until a socket is bound to the same label.
+	 *
+	 * - If sipc_port is -1 and no label is given, the default
+	 * destination address is cleared, meaning that any subsequent
+	 * write() to the socket will return -EDESTADDRREQ, until a
+	 * valid destination address is set via connect() or bind().
+	 *
+	 * - In all other cases, -EINVAL is returned.
+	 */
+	if (sa->sipc_port < 0 && *sk->label) {
+		ret = xnregistry_bind(sk->label,
+				      sk->rx_timeout, XN_RELATIVE, &h);
+		if (ret)
+			return ret;
+
+		cobalt_atomic_enter(s);
+		rsk = xnregistry_lookup(h, NULL);
+		if (rsk == NULL || rsk->magic != IDDP_SOCKET_MAGIC)
+			ret = -EINVAL;
+		else {
+			/* Fetch labeled port number. */
+			sa->sipc_port = rsk->name.sipc_port;
+			resched = xnselect_signal(&sk->priv->send_block, POLLOUT);
+		}
+		cobalt_atomic_leave(s);
+		if (ret)
+			return ret;
+	} else if (sa->sipc_port < 0)
+		sa = &nullsa;
+set_assoc:
+	cobalt_atomic_enter(s);
+	if (!test_bit(_IDDP_BOUND, &sk->status))
+		/* Set default name. */
+		sk->name = *sa;
+	/* Set default destination. */
+	sk->peer = *sa;
+	if (sa->sipc_port < 0)
+		__clear_bit(_IDDP_CONNECTED, &sk->status);
+	else
+		__set_bit(_IDDP_CONNECTED, &sk->status);
+	if (resched)
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+}
+
+static int __iddp_setsockopt(struct iddp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_setsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	size_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptin(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->rx_timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		case SO_SNDTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->tx_timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_IDDP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case IDDP_POOLSZ:
+		ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen);
+		if (ret)
+			return ret;
+		if (len == 0)
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		/*
+		 * We may not do this more than once, and we have to
+		 * do this before the first binding.
+		 */
+		if (test_bit(_IDDP_BOUND, &sk->status) ||
+		    test_bit(_IDDP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else
+			sk->poolsz = len;
+		cobalt_atomic_leave(s);
+		break;
+
+	case IDDP_LABEL:
+		if (sopt.optlen < sizeof(plabel))
+			return -EINVAL;
+		if (rtipc_get_arg(fd, &plabel, sopt.optval, sizeof(plabel)))
+			return -EFAULT;
+		cobalt_atomic_enter(s);
+		/*
+		 * We may attach a label to a client socket which was
+		 * previously bound in IDDP.
+		 */
+		if (test_bit(_IDDP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else {
+			strcpy(sk->label, plabel.label);
+			sk->label[XNOBJECT_NAME_LEN-1] = 0;
+		}
+		cobalt_atomic_leave(s);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __iddp_getsockopt(struct iddp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_getsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	socklen_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptout(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	ret = rtipc_get_arg(fd, &len, sopt.optlen, sizeof(len));
+	if (ret)
+		return ret;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->rx_timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		case SO_SNDTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->tx_timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_IDDP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case IDDP_LABEL:
+		if (len < sizeof(plabel))
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		strcpy(plabel.label, sk->label);
+		cobalt_atomic_leave(s);
+		if (rtipc_put_arg(fd, sopt.optval, &plabel, sizeof(plabel)))
+			return -EFAULT;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __iddp_ioctl(struct rtdm_fd *fd,
+			unsigned int request, void *arg)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct sockaddr_ipc saddr, *saddrp = &saddr;
+	struct iddp_socket *sk = priv->state;
+	int ret = 0;
+
+	switch (request) {
+
+	COMPAT_CASE(_RTIOC_CONNECT):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+		  return ret;
+		ret = __iddp_connect_socket(sk, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_BIND):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+			return ret;
+		if (saddrp == NULL)
+			return -EFAULT;
+		ret = __iddp_bind_socket(fd, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->name);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETPEERNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->peer);
+		break;
+
+	COMPAT_CASE(_RTIOC_SETSOCKOPT):
+		ret = __iddp_setsockopt(sk, fd, arg);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKOPT):
+		ret = __iddp_getsockopt(sk, fd, arg);
+		break;
+
+	case _RTIOC_LISTEN:
+	COMPAT_CASE(_RTIOC_ACCEPT):
+		ret = -EOPNOTSUPP;
+		break;
+
+	case _RTIOC_SHUTDOWN:
+		ret = -ENOTCONN;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int iddp_ioctl(struct rtdm_fd *fd,
+		      unsigned int request, void *arg)
+{
+	int ret;
+
+	switch (request) {
+	COMPAT_CASE(_RTIOC_BIND):
+		if (rtdm_in_rt_context())
+			return -ENOSYS;	/* Try downgrading to NRT */
+		fallthrough;
+	default:
+		ret = __iddp_ioctl(fd, request, arg);
+	}
+
+	return ret;
+}
+
+static int iddp_init(void)
+{
+	portmap = xnmap_create(CONFIG_XENO_OPT_IDDP_NRPORT, 0, 0);
+	if (portmap == NULL)
+		return -ENOMEM;
+
+	rtdm_waitqueue_init(&poolwaitq);
+
+	return 0;
+}
+
+static void iddp_exit(void)
+{
+	rtdm_waitqueue_destroy(&poolwaitq);
+	xnmap_delete(portmap);
+}
+
+static unsigned int iddp_pollstate(struct rtdm_fd *fd) /* atomic */
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iddp_socket *sk = priv->state;
+	unsigned int mask = 0;
+	struct rtdm_fd *rfd;
+
+	if (test_bit(_IDDP_BOUND, &sk->status) && !list_empty(&sk->inq))
+		mask |= POLLIN;
+
+	/*
+	 * If the socket is connected, POLLOUT means that the peer
+	 * exists. Otherwise POLLOUT is always set, assuming the
+	 * client is likely to use explicit addressing in send
+	 * operations.
+	 *
+	 * If the peer exists, we still can't really know whether
+	 * writing to the socket would block as it depends on the
+	 * message size and other highly dynamic factors, so pretend
+	 * it would not.
+	 */
+	if (test_bit(_IDDP_CONNECTED, &sk->status)) {
+		rfd = xnmap_fetch_nocheck(portmap, sk->peer.sipc_port);
+		if (rfd)
+			mask |= POLLOUT;
+	} else
+		mask |= POLLOUT;
+
+	return mask;
+}
+
+struct rtipc_protocol iddp_proto_driver = {
+	.proto_name = "iddp",
+	.proto_statesz = sizeof(struct iddp_socket),
+	.proto_init = iddp_init,
+	.proto_exit = iddp_exit,
+	.proto_ops = {
+		.socket = iddp_socket,
+		.close = iddp_close,
+		.recvmsg = iddp_recvmsg,
+		.sendmsg = iddp_sendmsg,
+		.read = iddp_read,
+		.write = iddp_write,
+		.ioctl = iddp_ioctl,
+		.pollstate = iddp_pollstate,
+	}
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h
new file mode 100644
index 0000000..919a5d9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/internal.h
@@ -0,0 +1,135 @@
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTIPC_INTERNAL_H
+#define _RTIPC_INTERNAL_H
+
+#include <linux/uio.h>
+#include <linux/time.h>
+#include <cobalt/kernel/registry.h>
+#include <cobalt/kernel/clock.h>
+#include <cobalt/kernel/select.h>
+#include <rtdm/rtdm.h>
+#include <rtdm/compat.h>
+#include <rtdm/driver.h>
+
+struct rtipc_protocol;
+
+struct rtipc_private {
+	struct rtipc_protocol *proto;
+	DECLARE_XNSELECT(send_block);
+	DECLARE_XNSELECT(recv_block);
+	void *state;
+};
+
+struct rtipc_protocol {
+	const char *proto_name;
+	int proto_statesz;
+	int (*proto_init)(void);
+	void (*proto_exit)(void);
+	struct {
+		int (*socket)(struct rtdm_fd *fd);
+		void (*close)(struct rtdm_fd *fd);
+		ssize_t (*recvmsg)(struct rtdm_fd *fd,
+				   struct user_msghdr *msg, int flags);
+		ssize_t (*sendmsg)(struct rtdm_fd *fd,
+				   const struct user_msghdr *msg, int flags);
+		ssize_t (*read)(struct rtdm_fd *fd,
+				void *buf, size_t len);
+		ssize_t (*write)(struct rtdm_fd *fd,
+				 const void *buf, size_t len);
+		int (*ioctl)(struct rtdm_fd *fd,
+			     unsigned int request, void *arg);
+		unsigned int (*pollstate)(struct rtdm_fd *fd);
+	} proto_ops;
+};
+
+static inline void *rtipc_fd_to_state(struct rtdm_fd *fd)
+{
+	struct rtipc_private *p = rtdm_fd_to_private(fd);
+	return p->state;
+}
+
+static inline nanosecs_rel_t rtipc_timeval_to_ns(const struct __kernel_old_timeval *tv)
+{
+	nanosecs_rel_t ns = tv->tv_usec * 1000;
+
+	if (tv->tv_sec)
+		ns += (nanosecs_rel_t)tv->tv_sec * 1000000000UL;
+
+	return ns;
+}
+
+static inline void rtipc_ns_to_timeval(struct __kernel_old_timeval *tv, nanosecs_rel_t ns)
+{
+	unsigned long nsecs;
+
+	tv->tv_sec = xnclock_divrem_billion(ns, &nsecs);
+	tv->tv_usec = nsecs / 1000;
+}
+
+int rtipc_get_sockaddr(struct rtdm_fd *fd,
+		       struct sockaddr_ipc **saddrp,
+		       const void *arg);
+
+int rtipc_put_sockaddr(struct rtdm_fd *fd, void *arg,
+		       const struct sockaddr_ipc *saddr);
+
+int rtipc_get_sockoptout(struct rtdm_fd *fd,
+			 struct _rtdm_getsockopt_args *sopt,
+			 const void *arg);
+
+int rtipc_put_sockoptout(struct rtdm_fd *fd, void *arg,
+			 const struct _rtdm_getsockopt_args *sopt);
+
+int rtipc_get_sockoptin(struct rtdm_fd *fd,
+			struct _rtdm_setsockopt_args *sopt,
+			const void *arg);
+
+int rtipc_get_timeval(struct rtdm_fd *fd, struct __kernel_old_timeval *tv,
+		      const void *arg, size_t arglen);
+
+int rtipc_put_timeval(struct rtdm_fd *fd, void *arg,
+		      const struct __kernel_old_timeval *tv, size_t arglen);
+
+int rtipc_get_length(struct rtdm_fd *fd, size_t *lenp,
+		     const void *arg, size_t arglen);
+
+int rtipc_get_arg(struct rtdm_fd *fd, void *dst, const void *src,
+		  size_t len);
+
+int rtipc_put_arg(struct rtdm_fd *fd, void *dst, const void *src,
+		  size_t len);
+
+extern struct rtipc_protocol xddp_proto_driver;
+
+extern struct rtipc_protocol iddp_proto_driver;
+
+extern struct rtipc_protocol bufp_proto_driver;
+
+extern struct xnptree rtipc_ptree;
+
+#define rtipc_wait_context		xnthread_wait_context
+#define rtipc_prepare_wait		xnthread_prepare_wait
+#define rtipc_get_wait_context		xnthread_get_wait_context
+#define rtipc_peek_wait_head(obj)	xnsynch_peek_pendq(&(obj)->synch_base)
+
+#define COMPAT_CASE(__op)	case __op __COMPAT_CASE(__op  ## _COMPAT)
+
+#endif /* !_RTIPC_INTERNAL_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c
new file mode 100644
index 0000000..abb7681
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/rtipc.c
@@ -0,0 +1,524 @@
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/poll.h>
+#include <linux/time.h>
+#include <rtdm/ipc.h>
+#include <rtdm/compat.h>
+#include "internal.h"
+
+MODULE_DESCRIPTION("Real-time IPC interface");
+MODULE_AUTHOR("Philippe Gerum <rpm@xenomai.org>");
+MODULE_LICENSE("GPL");
+
+static struct rtipc_protocol *protocols[IPCPROTO_MAX] = {
+#ifdef CONFIG_XENO_DRIVERS_RTIPC_XDDP
+	[IPCPROTO_XDDP - 1] = &xddp_proto_driver,
+#endif
+#ifdef CONFIG_XENO_DRIVERS_RTIPC_IDDP
+	[IPCPROTO_IDDP - 1] = &iddp_proto_driver,
+#endif
+#ifdef CONFIG_XENO_DRIVERS_RTIPC_BUFP
+	[IPCPROTO_BUFP - 1] = &bufp_proto_driver,
+#endif
+};
+
+DEFINE_XNPTREE(rtipc_ptree, "rtipc");
+
+int rtipc_get_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(dst, src, len);
+		return 0;
+	}
+
+	return rtdm_copy_from_user(fd, dst, src, len);
+}
+
+int rtipc_put_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		memcpy(dst, src, len);
+		return 0;
+	}
+
+	return rtdm_copy_to_user(fd, dst, src, len);
+}
+
+int rtipc_get_sockaddr(struct rtdm_fd *fd, struct sockaddr_ipc **saddrp,
+		       const void *arg)
+{
+	const struct _rtdm_setsockaddr_args *p;
+	struct _rtdm_setsockaddr_args sreq;
+	int ret;
+
+	if (!rtdm_fd_is_user(fd)) {
+		p = arg;
+		if (p->addrlen > 0) {
+			if (p->addrlen != sizeof(**saddrp))
+				return -EINVAL;
+			memcpy(*saddrp, p->addr, sizeof(**saddrp));
+		} else {
+			if (p->addr)
+				return -EINVAL;
+			*saddrp = NULL;
+		}
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_setsockaddr_args csreq;
+		ret = rtdm_safe_copy_from_user(fd, &csreq, arg, sizeof(csreq));
+		if (ret)
+			return ret;
+		if (csreq.addrlen > 0) {
+			if (csreq.addrlen != sizeof(**saddrp))
+				return -EINVAL;
+			return rtdm_safe_copy_from_user(fd, *saddrp,
+							compat_ptr(csreq.addr),
+							sizeof(**saddrp));
+		}
+		if (csreq.addr)
+			return -EINVAL;
+
+		*saddrp = NULL;
+
+		return 0;
+	}
+#endif
+
+	ret = rtdm_safe_copy_from_user(fd, &sreq, arg, sizeof(sreq));
+	if (ret)
+		return ret;
+	if (sreq.addrlen > 0) {
+		if (sreq.addrlen != sizeof(**saddrp))
+			return -EINVAL;
+		return rtdm_safe_copy_from_user(fd, *saddrp,
+						sreq.addr, sizeof(**saddrp));
+	}
+	if (sreq.addr)
+		return -EINVAL;
+
+	*saddrp = NULL;
+
+	return 0;
+}
+
+int rtipc_put_sockaddr(struct rtdm_fd *fd, void *arg,
+		       const struct sockaddr_ipc *saddr)
+{
+	const struct _rtdm_getsockaddr_args *p;
+	struct _rtdm_getsockaddr_args sreq;
+	socklen_t len;
+	int ret;
+
+	if (!rtdm_fd_is_user(fd)) {
+		p = arg;
+		if (*p->addrlen < sizeof(*saddr))
+			return -EINVAL;
+		memcpy(p->addr, saddr, sizeof(*saddr));
+		*p->addrlen = sizeof(*saddr);
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_getsockaddr_args csreq;
+		ret = rtdm_safe_copy_from_user(fd, &csreq, arg, sizeof(csreq));
+		if (ret)
+			return ret;
+
+		ret = rtdm_safe_copy_from_user(fd, &len,
+					       compat_ptr(csreq.addrlen),
+					       sizeof(len));
+		if (ret)
+			return ret;
+
+		if (len < sizeof(*saddr))
+			return -EINVAL;
+
+		ret = rtdm_safe_copy_to_user(fd, compat_ptr(csreq.addr),
+					     saddr, sizeof(*saddr));
+		if (ret)
+			return ret;
+
+		len = sizeof(*saddr);
+		return rtdm_safe_copy_to_user(fd, compat_ptr(csreq.addrlen),
+					      &len, sizeof(len));
+	}
+#endif
+
+	sreq.addr = NULL;
+	sreq.addrlen = NULL;
+	ret = rtdm_safe_copy_from_user(fd, &sreq, arg, sizeof(sreq));
+	if (ret)
+		return ret;
+
+	ret = rtdm_safe_copy_from_user(fd, &len, sreq.addrlen, sizeof(len));
+	if (ret)
+		return ret;
+
+	if (len < sizeof(*saddr))
+		return -EINVAL;
+
+	ret = rtdm_safe_copy_to_user(fd, sreq.addr, saddr, sizeof(*saddr));
+	if (ret)
+		return ret;
+
+	len = sizeof(*saddr);
+
+	return rtdm_safe_copy_to_user(fd, sreq.addrlen, &len, sizeof(len));
+}
+
+int rtipc_get_sockoptout(struct rtdm_fd *fd, struct _rtdm_getsockopt_args *sopt,
+			 const void *arg)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		*sopt = *(struct _rtdm_getsockopt_args *)arg;
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_getsockopt_args csopt;
+		int ret;
+		ret = rtdm_safe_copy_from_user(fd, &csopt, arg, sizeof(csopt));
+		if (ret)
+			return ret;
+		sopt->level = csopt.level;
+		sopt->optname = csopt.optname;
+		sopt->optval = compat_ptr(csopt.optval);
+		sopt->optlen = compat_ptr(csopt.optlen);
+		return 0;
+	}
+#endif
+
+	return rtdm_safe_copy_from_user(fd, sopt, arg, sizeof(*sopt));
+}
+
+int rtipc_put_sockoptout(struct rtdm_fd *fd, void *arg,
+			 const struct _rtdm_getsockopt_args *sopt)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		*(struct _rtdm_getsockopt_args *)arg = *sopt;
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_getsockopt_args csopt;
+		int ret;
+		csopt.level = sopt->level;
+		csopt.optname = sopt->optname;
+		csopt.optval = ptr_to_compat(sopt->optval);
+		csopt.optlen = ptr_to_compat(sopt->optlen);
+		ret = rtdm_safe_copy_to_user(fd, arg, &csopt, sizeof(csopt));
+		if (ret)
+			return ret;
+		return 0;
+	}
+#endif
+
+	return rtdm_safe_copy_to_user(fd, arg, sopt, sizeof(*sopt));
+}
+
+int rtipc_get_sockoptin(struct rtdm_fd *fd, struct _rtdm_setsockopt_args *sopt,
+			const void *arg)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		*sopt = *(struct _rtdm_setsockopt_args *)arg;
+		return 0;
+	}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		struct compat_rtdm_setsockopt_args csopt;
+		int ret;
+		ret = rtdm_safe_copy_from_user(fd, &csopt, arg, sizeof(csopt));
+		if (ret)
+			return ret;
+		sopt->level = csopt.level;
+		sopt->optname = csopt.optname;
+		sopt->optval = compat_ptr(csopt.optval);
+		sopt->optlen = csopt.optlen;
+		return 0;
+	}
+#endif
+
+	return rtdm_safe_copy_from_user(fd, sopt, arg, sizeof(*sopt));
+}
+
+int rtipc_get_timeval(struct rtdm_fd *fd, struct __kernel_old_timeval *tv,
+		      const void *arg, size_t arglen)
+{
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		if (arglen != sizeof(struct old_timeval32))
+			return -EINVAL;
+		return sys32_get_timeval(tv, arg);
+	}
+#endif
+
+	if (arglen != sizeof(*tv))
+		return -EINVAL;
+
+	if (!rtdm_fd_is_user(fd)) {
+		*tv = *(struct __kernel_old_timeval *)arg;
+		return 0;
+	}
+
+	return rtdm_safe_copy_from_user(fd, tv, arg, sizeof(*tv));
+}
+
+int rtipc_put_timeval(struct rtdm_fd *fd, void *arg,
+		      const struct __kernel_old_timeval *tv, size_t arglen)
+{
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		if (arglen != sizeof(struct old_timeval32))
+			return -EINVAL;
+		return sys32_put_timeval(arg, tv);
+	}
+#endif
+
+	if (arglen != sizeof(*tv))
+		return -EINVAL;
+
+	if (!rtdm_fd_is_user(fd)) {
+		*(struct __kernel_old_timeval *)arg = *tv;
+		return 0;
+	}
+
+	return rtdm_safe_copy_to_user(fd, arg, tv, sizeof(*tv));
+}
+
+int rtipc_get_length(struct rtdm_fd *fd, size_t *lenp,
+		     const void *arg, size_t arglen)
+{
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	if (rtdm_fd_is_compat(fd)) {
+		const compat_size_t *csz;
+		if (arglen != sizeof(*csz))
+			return -EINVAL;
+		csz = arg;
+		return csz == NULL ||
+			!access_rok(csz, sizeof(*csz)) ||
+			__xn_get_user(*lenp, csz) ? -EFAULT : 0;
+	}
+#endif
+
+	if (arglen != sizeof(size_t))
+		return -EINVAL;
+
+	if (!rtdm_fd_is_user(fd)) {
+		*lenp = *(size_t *)arg;
+		return 0;
+	}
+
+	return rtdm_safe_copy_from_user(fd, lenp, arg, sizeof(*lenp));
+}
+
+static int rtipc_socket(struct rtdm_fd *fd, int protocol)
+{
+	struct rtipc_protocol *proto;
+	struct rtipc_private *priv;
+	int ret;
+
+	if (protocol < 0 || protocol >= IPCPROTO_MAX)
+		return -EPROTONOSUPPORT;
+
+	if (protocol == IPCPROTO_IPC)
+		/* Default protocol is IDDP */
+		protocol = IPCPROTO_IDDP;
+
+	proto = protocols[protocol - 1];
+	if (proto == NULL)	/* Not compiled in? */
+		return -ENOPROTOOPT;
+
+	priv = rtdm_fd_to_private(fd);
+	priv->proto = proto;
+	priv->state = kmalloc(proto->proto_statesz, GFP_KERNEL);
+	if (priv->state == NULL)
+		return -ENOMEM;
+
+	xnselect_init(&priv->send_block);
+	xnselect_init(&priv->recv_block);
+
+	ret = proto->proto_ops.socket(fd);
+	if (ret)
+		kfree(priv->state);
+
+	return ret;
+}
+
+static void rtipc_close(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	/*
+	 * CAUTION: priv->state shall be released by the
+	 * proto_ops.close() handler when appropriate (which may be
+	 * done asynchronously later, see XDDP).
+	 */
+	priv->proto->proto_ops.close(fd);
+	xnselect_destroy(&priv->recv_block);
+	xnselect_destroy(&priv->send_block);
+}
+
+static ssize_t rtipc_recvmsg(struct rtdm_fd *fd,
+			     struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.recvmsg(fd, msg, flags);
+}
+
+static ssize_t rtipc_sendmsg(struct rtdm_fd *fd,
+			     const struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.sendmsg(fd, msg, flags);
+}
+
+static ssize_t rtipc_read(struct rtdm_fd *fd,
+			  void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.read(fd, buf, len);
+}
+
+static ssize_t rtipc_write(struct rtdm_fd *fd,
+			   const void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.write(fd, buf, len);
+}
+
+static int rtipc_ioctl(struct rtdm_fd *fd,
+		       unsigned int request, void *arg)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	return priv->proto->proto_ops.ioctl(fd, request, arg);
+}
+
+static int rtipc_select(struct rtdm_fd *fd, struct xnselector *selector,
+			unsigned int type, unsigned int index)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xnselect_binding *binding;
+	unsigned int pollstate, mask;
+	struct xnselect *block;
+	spl_t s;
+	int ret;
+	
+	if (type != XNSELECT_READ && type != XNSELECT_WRITE)
+		return -EINVAL;
+
+	binding = xnmalloc(sizeof(*binding));
+	if (binding == NULL)
+		return -ENOMEM;
+
+	cobalt_atomic_enter(s);
+
+	pollstate = priv->proto->proto_ops.pollstate(fd);
+
+	if (type == XNSELECT_READ) {
+		mask = pollstate & POLLIN;
+		block = &priv->recv_block;
+	} else {
+		mask = pollstate & POLLOUT;
+		block = &priv->send_block;
+	}
+
+	ret = xnselect_bind(block, binding, selector, type, index, mask);
+
+	cobalt_atomic_leave(s);
+
+	if (ret)
+		xnfree(binding);
+
+	return ret;
+}
+
+static struct rtdm_driver rtipc_driver = {
+	.profile_info		=	RTDM_PROFILE_INFO(rtipc,
+							  RTDM_CLASS_RTIPC,
+							  RTDM_SUBCLASS_GENERIC,
+							  1),
+	.device_flags		=	RTDM_PROTOCOL_DEVICE,
+	.device_count		=	1,
+	.context_size		=	sizeof(struct rtipc_private),
+	.protocol_family	=	PF_RTIPC,
+	.socket_type		=	SOCK_DGRAM,
+	.ops = {
+		.socket		=	rtipc_socket,
+		.close		=	rtipc_close,
+		.recvmsg_rt	=	rtipc_recvmsg,
+		.recvmsg_nrt	=	NULL,
+		.sendmsg_rt	=	rtipc_sendmsg,
+		.sendmsg_nrt	=	NULL,
+		.ioctl_rt	=	rtipc_ioctl,
+		.ioctl_nrt	=	rtipc_ioctl,
+		.read_rt	=	rtipc_read,
+		.read_nrt	=	NULL,
+		.write_rt	=	rtipc_write,
+		.write_nrt	=	NULL,
+		.select		=	rtipc_select,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &rtipc_driver,
+	.label = "rtipc",
+};
+
+int __init __rtipc_init(void)
+{
+	int ret, n;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	for (n = 0; n < IPCPROTO_MAX; n++) {
+		if (protocols[n] && protocols[n]->proto_init) {
+			ret = protocols[n]->proto_init();
+			if (ret)
+				return ret;
+		}
+	}
+
+	return rtdm_dev_register(&device);
+}
+
+void __exit __rtipc_exit(void)
+{
+	int n;
+
+	rtdm_dev_unregister(&device);
+
+	for (n = 0; n < IPCPROTO_MAX; n++) {
+		if (protocols[n] && protocols[n]->proto_exit)
+			protocols[n]->proto_exit();
+	}
+}
+
+module_init(__rtipc_init);
+module_exit(__rtipc_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c
new file mode 100644
index 0000000..ae5b720
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/ipc/xddp.c
@@ -0,0 +1,1132 @@
+/**
+ * This file is part of the Xenomai project.
+ *
+ * @note Copyright (C) 2009 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/poll.h>
+#include <linux/slab.h>
+#include <linux/time.h>
+#include <cobalt/kernel/heap.h>
+#include <cobalt/kernel/bufd.h>
+#include <cobalt/kernel/pipe.h>
+#include <rtdm/ipc.h>
+#include "internal.h"
+
+#define XDDP_SOCKET_MAGIC 0xa21a21a2
+
+struct xddp_message {
+	struct xnpipe_mh mh;
+	char data[];
+};
+
+struct xddp_socket {
+	int magic;
+	struct sockaddr_ipc name;
+	struct sockaddr_ipc peer;
+
+	int minor;
+	size_t poolsz;
+	xnhandle_t handle;
+	char label[XNOBJECT_NAME_LEN];
+	struct rtdm_fd *fd;			/* i.e. RTDM socket fd */
+
+	struct xddp_message *buffer;
+	int buffer_port;
+	struct xnheap *bufpool;
+	struct xnheap privpool;
+	size_t fillsz;
+	size_t curbufsz;	/* Current streaming buffer size */
+	u_long status;
+	rtdm_lock_t lock;
+
+	nanosecs_rel_t timeout;	/* connect()/recvmsg() timeout */
+	size_t reqbufsz;	/* Requested streaming buffer size */
+
+	int (*monitor)(struct rtdm_fd *fd, int event, long arg);
+	struct rtipc_private *priv;
+};
+
+static struct sockaddr_ipc nullsa = {
+	.sipc_family = AF_RTIPC,
+	.sipc_port = -1
+};
+
+static struct rtdm_fd *portmap[CONFIG_XENO_OPT_PIPE_NRDEV]; /* indexes RTDM fildes */
+
+#define _XDDP_SYNCWAIT  0
+#define _XDDP_ATOMIC    1
+#define _XDDP_BINDING   2
+#define _XDDP_BOUND     3
+#define _XDDP_CONNECTED 4
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+static char *__xddp_link_target(void *obj)
+{
+	struct xddp_socket *sk = obj;
+
+	return kasformat("/dev/rtp%d", sk->minor);
+}
+
+extern struct xnptree rtipc_ptree;
+
+static struct xnpnode_link __xddp_pnode = {
+	.node = {
+		.dirname = "xddp",
+		.root = &rtipc_ptree,
+		.ops = &xnregistry_vlink_ops,
+	},
+	.target = __xddp_link_target,
+};
+
+#else /* !CONFIG_XENO_OPT_VFILE */
+
+static struct xnpnode_link __xddp_pnode = {
+	.node = {
+		.dirname = "xddp",
+	},
+};
+
+#endif /* !CONFIG_XENO_OPT_VFILE */
+
+static void *__xddp_alloc_handler(size_t size, void *skarg) /* nklock free */
+{
+	struct xddp_socket *sk = skarg;
+	void *buf;
+
+	/* Try to allocate memory for the incoming message. */
+	buf = xnheap_alloc(sk->bufpool, size);
+	if (unlikely(buf == NULL)) {
+		if (sk->monitor)
+			sk->monitor(sk->fd, XDDP_EVTNOBUF, size);
+		if (size > xnheap_get_size(sk->bufpool))
+			buf = (void *)-1; /* Will never succeed. */
+	}
+
+	return buf;
+}
+
+static int __xddp_resize_streambuf(struct xddp_socket *sk) /* sk->lock held */
+{
+	if (sk->buffer)
+		xnheap_free(sk->bufpool, sk->buffer);
+
+	if (sk->reqbufsz == 0) {
+		sk->buffer = NULL;
+		sk->curbufsz = 0;
+		return 0;
+	}
+
+	sk->buffer = xnheap_alloc(sk->bufpool, sk->reqbufsz);
+	if (sk->buffer == NULL) {
+		sk->curbufsz = 0;
+		return -ENOMEM;
+	}
+
+	sk->curbufsz = sk->reqbufsz;
+
+	return 0;
+}
+
+static void __xddp_free_handler(void *buf, void *skarg) /* nklock free */
+{
+	struct xddp_socket *sk = skarg;
+	rtdm_lockctx_t s;
+
+	if (buf != sk->buffer) {
+		xnheap_free(sk->bufpool, buf);
+		return;
+	}
+
+	/* Reset the streaming buffer. */
+
+	rtdm_lock_get_irqsave(&sk->lock, s);
+
+	sk->fillsz = 0;
+	sk->buffer_port = -1;
+	__clear_bit(_XDDP_SYNCWAIT, &sk->status);
+	__clear_bit(_XDDP_ATOMIC, &sk->status);
+
+	/*
+	 * If a XDDP_BUFSZ request is pending, resize the streaming
+	 * buffer on-the-fly.
+	 */
+	if (unlikely(sk->curbufsz != sk->reqbufsz))
+		__xddp_resize_streambuf(sk);
+
+	rtdm_lock_put_irqrestore(&sk->lock, s);
+}
+
+static void __xddp_output_handler(struct xnpipe_mh *mh, void *skarg) /* nklock held */
+{
+	struct xddp_socket *sk = skarg;
+
+	if (sk->monitor)
+		sk->monitor(sk->fd, XDDP_EVTOUT, xnpipe_m_size(mh));
+}
+
+static int __xddp_input_handler(struct xnpipe_mh *mh, int retval, void *skarg) /* nklock held */
+{
+	struct xddp_socket *sk = skarg;
+
+	if (sk->monitor) {
+		if (retval == 0)
+			/* Callee may alter the return value passed to userland. */
+			retval = sk->monitor(sk->fd, XDDP_EVTIN, xnpipe_m_size(mh));
+		else if (retval == -EPIPE && mh == NULL)
+			sk->monitor(sk->fd, XDDP_EVTDOWN, 0);
+	}
+
+	if (retval == 0 &&
+	    (__xnpipe_pollstate(sk->minor) & POLLIN) != 0 &&
+	    xnselect_signal(&sk->priv->recv_block, POLLIN))
+		xnsched_run();
+
+	return retval;
+}
+
+static void __xddp_release_handler(void *skarg) /* nklock free */
+{
+	struct xddp_socket *sk = skarg;
+	void *poolmem;
+	u32 poolsz;
+
+	if (sk->bufpool == &sk->privpool) {
+		poolmem = xnheap_get_membase(&sk->privpool);
+		poolsz = xnheap_get_size(&sk->privpool);
+		xnheap_destroy(&sk->privpool);
+		xnheap_vfree(poolmem);
+	} else if (sk->buffer)
+		xnfree(sk->buffer);
+
+	kfree(sk);
+}
+
+static int xddp_socket(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xddp_socket *sk = priv->state;
+
+	sk->magic = XDDP_SOCKET_MAGIC;
+	sk->name = nullsa;	/* Unbound */
+	sk->peer = nullsa;
+	sk->minor = -1;
+	sk->handle = 0;
+	*sk->label = 0;
+	sk->poolsz = 0;
+	sk->buffer = NULL;
+	sk->buffer_port = -1;
+	sk->bufpool = NULL;
+	sk->fillsz = 0;
+	sk->status = 0;
+	sk->timeout = RTDM_TIMEOUT_INFINITE;
+	sk->curbufsz = 0;
+	sk->reqbufsz = 0;
+	sk->monitor = NULL;
+	rtdm_lock_init(&sk->lock);
+	sk->priv = priv;
+
+	return 0;
+}
+
+static void xddp_close(struct rtdm_fd *fd)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xddp_socket *sk = priv->state;
+	rtdm_lockctx_t s;
+
+	sk->monitor = NULL;
+
+	if (!test_bit(_XDDP_BOUND, &sk->status))
+		return;
+
+	cobalt_atomic_enter(s);
+	portmap[sk->name.sipc_port] = NULL;
+	cobalt_atomic_leave(s);
+
+	if (sk->handle)
+		xnregistry_remove(sk->handle);
+
+	xnpipe_disconnect(sk->minor);
+}
+
+static ssize_t __xddp_recvmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      struct sockaddr_ipc *saddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xddp_message *mbuf = NULL; /* Fake GCC */
+	struct xddp_socket *sk = priv->state;
+	ssize_t maxlen, len, wrlen, vlen;
+	nanosecs_rel_t timeout;
+	struct xnpipe_mh *mh;
+	int nvec, rdoff, ret;
+	struct xnbufd bufd;
+	spl_t s;
+
+	if (!test_bit(_XDDP_BOUND, &sk->status))
+		return -EAGAIN;
+
+	maxlen = rtdm_get_iov_flatlen(iov, iovlen);
+	if (maxlen == 0)
+		return 0;
+
+	timeout = (flags & MSG_DONTWAIT) ? RTDM_TIMEOUT_NONE : sk->timeout;
+	/* Pull heading message from the input queue. */
+	len = xnpipe_recv(sk->minor, &mh, timeout);
+	if (len < 0)
+		return len == -EIDRM ? 0 : len;
+	if (len > maxlen) {
+		ret = -ENOBUFS;
+		goto out;
+	}
+
+	mbuf = container_of(mh, struct xddp_message, mh);
+
+	if (saddr)
+		*saddr = sk->name;
+
+	/* Write "len" bytes from mbuf->data to the vector cells */
+	for (ret = 0, nvec = 0, rdoff = 0, wrlen = len;
+	     nvec < iovlen && wrlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = wrlen >= iov[nvec].iov_len ? iov[nvec].iov_len : wrlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_from_kmem(&bufd, mbuf->data + rdoff, vlen);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			goto out;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		wrlen -= vlen;
+		rdoff += vlen;
+	}
+out:
+	xnheap_free(sk->bufpool, mbuf);
+	cobalt_atomic_enter(s);
+	if ((__xnpipe_pollstate(sk->minor) & POLLIN) == 0 &&
+	    xnselect_signal(&priv->recv_block, 0))
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return ret ?: len;
+}
+
+static ssize_t xddp_recvmsg(struct rtdm_fd *fd,
+			    struct user_msghdr *msg, int flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct sockaddr_ipc saddr;
+	ssize_t ret;
+
+	if (flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen < sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+	} else if (msg->msg_namelen != 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __xddp_recvmsg(fd, iov, msg->msg_iovlen, flags, &saddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy the updated I/O vector back */
+	if (rtdm_put_iovec(fd, iov, msg, iov_fast))
+		return -EFAULT;
+
+	/* Copy the source address if required. */
+	if (msg->msg_name) {
+		if (rtipc_put_arg(fd, msg->msg_name, &saddr, sizeof(saddr)))
+			return -EFAULT;
+		msg->msg_namelen = sizeof(struct sockaddr_ipc);
+	}
+
+	return ret;
+}
+
+static ssize_t xddp_read(struct rtdm_fd *fd, void *buf, size_t len)
+{
+	struct iovec iov = { .iov_base = buf, .iov_len = len };
+
+	return __xddp_recvmsg(fd, &iov, 1, 0, NULL);
+}
+
+static ssize_t __xddp_stream(struct xddp_socket *sk,
+			     int from, struct xnbufd *bufd)
+{
+	struct xddp_message *mbuf;
+	size_t fillptr, rembytes;
+	rtdm_lockctx_t s;
+	ssize_t outbytes;
+	int ret;
+
+	/*
+	 * xnpipe_msend() and xnpipe_mfixup() routines will only grab
+	 * the nklock directly or indirectly, so holding our socket
+	 * lock across those calls is fine.
+	 */
+	rtdm_lock_get_irqsave(&sk->lock, s);
+
+	/*
+	 * There are two cases in which we must remove the cork
+	 * unconditionally and send the incoming data as a standalone
+	 * datagram: the destination port does not support streaming,
+	 * or its streaming buffer is already filled with data issued
+	 * from another port.
+	 */
+	if (sk->curbufsz == 0 ||
+	    (sk->buffer_port >= 0 && sk->buffer_port != from)) {
+		/* This will end up into a standalone datagram. */
+		outbytes = 0;
+		goto out;
+	}
+
+	mbuf = sk->buffer;
+	rembytes = sk->curbufsz - sizeof(*mbuf) - sk->fillsz;
+	outbytes = bufd->b_len > rembytes ? rembytes : bufd->b_len;
+	if (likely(outbytes > 0)) {
+	repeat:
+		/* Mark the beginning of a should-be-atomic section. */
+		__set_bit(_XDDP_ATOMIC, &sk->status);
+		fillptr = sk->fillsz;
+		sk->fillsz += outbytes;
+
+		rtdm_lock_put_irqrestore(&sk->lock, s);
+		ret = xnbufd_copy_to_kmem(mbuf->data + fillptr,
+					  bufd, outbytes);
+		rtdm_lock_get_irqsave(&sk->lock, s);
+
+		if (ret < 0) {
+			outbytes = ret;
+			__clear_bit(_XDDP_ATOMIC, &sk->status);
+			goto out;
+		}
+
+		/* We haven't been atomic, let's try again. */
+		if (!__test_and_clear_bit(_XDDP_ATOMIC, &sk->status))
+			goto repeat;
+
+		if (__test_and_set_bit(_XDDP_SYNCWAIT, &sk->status))
+			outbytes = xnpipe_mfixup(sk->minor,
+						 &mbuf->mh, outbytes);
+		else {
+			sk->buffer_port = from;
+			outbytes = xnpipe_send(sk->minor, &mbuf->mh,
+					       outbytes + sizeof(*mbuf),
+					       XNPIPE_NORMAL);
+			if (outbytes > 0)
+				outbytes -= sizeof(*mbuf);
+		}
+	}
+
+out:
+	rtdm_lock_put_irqrestore(&sk->lock, s);
+
+	return outbytes;
+}
+
+static ssize_t __xddp_sendmsg(struct rtdm_fd *fd,
+			      struct iovec *iov, int iovlen, int flags,
+			      const struct sockaddr_ipc *daddr)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	ssize_t len, rdlen, wrlen, vlen, ret, sublen;
+	struct xddp_socket *sk = priv->state;
+	struct xddp_message *mbuf;
+	struct xddp_socket *rsk;
+	struct rtdm_fd *rfd;
+	int nvec, to, from;
+	struct xnbufd bufd;
+	rtdm_lockctx_t s;
+
+	len = rtdm_get_iov_flatlen(iov, iovlen);
+	if (len == 0)
+		return 0;
+
+	from = sk->name.sipc_port;
+	to = daddr->sipc_port;
+
+	cobalt_atomic_enter(s);
+	rfd = portmap[to];
+	if (rfd && rtdm_fd_lock(rfd) < 0)
+		rfd = NULL;
+	cobalt_atomic_leave(s);
+
+	if (rfd == NULL)
+		return -ECONNRESET;
+
+	rsk = rtipc_fd_to_state(rfd);
+	if (!test_bit(_XDDP_BOUND, &rsk->status)) {
+		rtdm_fd_unlock(rfd);
+		return -ECONNREFUSED;
+	}
+
+	sublen = len;
+	nvec = 0;
+
+	/*
+	 * If active, the streaming buffer is already pending on the
+	 * output queue, so we basically have nothing to do during a
+	 * MSG_MORE -> MSG_NONE transition. Therefore, we only have to
+	 * take care of filling that buffer when MSG_MORE is
+	 * given. Yummie.
+	 */
+	if (flags & MSG_MORE) {
+		for (rdlen = sublen, wrlen = 0;
+		     nvec < iovlen && rdlen > 0; nvec++) {
+			if (iov[nvec].iov_len == 0)
+				continue;
+			vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen;
+			if (rtdm_fd_is_user(fd)) {
+				xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+				ret = __xddp_stream(rsk, from, &bufd);
+				xnbufd_unmap_uread(&bufd);
+			} else {
+				xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+				ret = __xddp_stream(rsk, from, &bufd);
+				xnbufd_unmap_kread(&bufd);
+			}
+			if (ret < 0)
+				goto fail_unlock;
+			wrlen += ret;
+			rdlen -= ret;
+			iov[nvec].iov_base += ret;
+			iov[nvec].iov_len -= ret;
+			/*
+			 * In case of a short write to the streaming
+			 * buffer, send the unsent part as a
+			 * standalone datagram.
+			 */
+			if (ret < vlen) {
+				sublen = rdlen;
+				goto nostream;
+			}
+		}
+		len = wrlen;
+		goto done;
+	}
+
+nostream:
+	mbuf = xnheap_alloc(rsk->bufpool, sublen + sizeof(*mbuf));
+	if (unlikely(mbuf == NULL)) {
+		ret = -ENOMEM;
+		goto fail_unlock;
+	}
+
+	/*
+	 * Move "sublen" bytes to mbuf->data from the vector cells
+	 */
+	for (rdlen = sublen, wrlen = 0; nvec < iovlen && rdlen > 0; nvec++) {
+		if (iov[nvec].iov_len == 0)
+			continue;
+		vlen = rdlen >= iov[nvec].iov_len ? iov[nvec].iov_len : rdlen;
+		if (rtdm_fd_is_user(fd)) {
+			xnbufd_map_uread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_to_kmem(mbuf->data + wrlen, &bufd, vlen);
+			xnbufd_unmap_uread(&bufd);
+		} else {
+			xnbufd_map_kread(&bufd, iov[nvec].iov_base, vlen);
+			ret = xnbufd_copy_to_kmem(mbuf->data + wrlen, &bufd, vlen);
+			xnbufd_unmap_kread(&bufd);
+		}
+		if (ret < 0)
+			goto fail_freebuf;
+		iov[nvec].iov_base += vlen;
+		iov[nvec].iov_len -= vlen;
+		rdlen -= vlen;
+		wrlen += vlen;
+	}
+
+	ret = xnpipe_send(rsk->minor, &mbuf->mh,
+			  sublen + sizeof(*mbuf),
+			  (flags & MSG_OOB) ?
+			  XNPIPE_URGENT : XNPIPE_NORMAL);
+
+	if (unlikely(ret < 0)) {
+	fail_freebuf:
+		xnheap_free(rsk->bufpool, mbuf);
+	fail_unlock:
+		rtdm_fd_unlock(rfd);
+		return ret;
+	}
+done:
+	rtdm_fd_unlock(rfd);
+
+	return len;
+}
+
+static ssize_t xddp_sendmsg(struct rtdm_fd *fd,
+			    const struct user_msghdr *msg, int flags)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	struct xddp_socket *sk = priv->state;
+	struct sockaddr_ipc daddr;
+	ssize_t ret;
+
+	/*
+	 * We accept MSG_DONTWAIT, but do not care about it, since
+	 * writing to the real-time endpoint of a message pipe must be
+	 * a non-blocking operation.
+	 */
+	if (flags & ~(MSG_MORE | MSG_OOB | MSG_DONTWAIT))
+		return -EINVAL;
+
+	/*
+	 * MSG_MORE and MSG_OOB are mutually exclusive in our
+	 * implementation.
+	 */
+	if ((flags & (MSG_MORE | MSG_OOB)) == (MSG_MORE | MSG_OOB))
+		return -EINVAL;
+
+	if (msg->msg_name) {
+		if (msg->msg_namelen != sizeof(struct sockaddr_ipc))
+			return -EINVAL;
+
+		/* Fetch the destination address to send to. */
+		if (rtipc_get_arg(fd, &daddr, msg->msg_name, sizeof(daddr)))
+			return -EFAULT;
+
+		if (daddr.sipc_port < 0 ||
+		    daddr.sipc_port >= CONFIG_XENO_OPT_PIPE_NRDEV)
+			return -EINVAL;
+	} else {
+		if (msg->msg_namelen != 0)
+			return -EINVAL;
+		daddr = sk->peer;
+		if (daddr.sipc_port < 0)
+			return -EDESTADDRREQ;
+	}
+
+	if (msg->msg_iovlen >= UIO_MAXIOV)
+		return -EINVAL;
+
+	/* Copy I/O vector in */
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	ret = __xddp_sendmsg(fd, iov, msg->msg_iovlen, flags, &daddr);
+	if (ret <= 0) {
+		rtdm_drop_iovec(iov, iov_fast);
+		return ret;
+	}
+
+	/* Copy updated I/O vector back */
+	return rtdm_put_iovec(fd, iov, msg, iov_fast) ?: ret;
+}
+
+static ssize_t xddp_write(struct rtdm_fd *fd,
+			  const void *buf, size_t len)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct iovec iov = { .iov_base = (void *)buf, .iov_len = len };
+	struct xddp_socket *sk = priv->state;
+
+	if (sk->peer.sipc_port < 0)
+		return -EDESTADDRREQ;
+
+	return __xddp_sendmsg(fd, &iov, 1, 0, &sk->peer);
+}
+
+static int __xddp_bind_socket(struct rtipc_private *priv,
+			      struct sockaddr_ipc *sa)
+{
+	struct xddp_socket *sk = priv->state;
+	struct xnpipe_operations ops;
+	rtdm_lockctx_t s;
+	size_t poolsz;
+	void *poolmem;
+	int ret = 0;
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	/* Allow special port -1 for auto-selection. */
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_PIPE_NRDEV)
+		return -EINVAL;
+
+	cobalt_atomic_enter(s);
+	if (test_bit(_XDDP_BOUND, &sk->status) ||
+	    __test_and_set_bit(_XDDP_BINDING, &sk->status))
+		ret = -EADDRINUSE;
+	cobalt_atomic_leave(s);
+	if (ret)
+		return ret;
+
+	poolsz = sk->poolsz;
+	if (poolsz > 0) {
+		poolsz = PAGE_ALIGN(poolsz);
+		poolsz += PAGE_ALIGN(sk->reqbufsz);
+		poolmem = xnheap_vmalloc(poolsz);
+		if (poolmem == NULL) {
+			ret = -ENOMEM;
+			goto fail;
+		}
+
+		ret = xnheap_init(&sk->privpool, poolmem, poolsz);
+		if (ret) {
+			xnheap_vfree(poolmem);
+			goto fail;
+		}
+
+		sk->bufpool = &sk->privpool;
+	} else
+		sk->bufpool = &cobalt_heap;
+
+	if (sk->reqbufsz > 0) {
+		sk->buffer = xnheap_alloc(sk->bufpool, sk->reqbufsz);
+		if (sk->buffer == NULL) {
+			ret = -ENOMEM;
+			goto fail_freeheap;
+		}
+		sk->curbufsz = sk->reqbufsz;
+	}
+
+	sk->fd = rtdm_private_to_fd(priv);
+
+	ops.output = &__xddp_output_handler;
+	ops.input = &__xddp_input_handler;
+	ops.alloc_ibuf = &__xddp_alloc_handler;
+	ops.free_ibuf = &__xddp_free_handler;
+	ops.free_obuf = &__xddp_free_handler;
+	ops.release = &__xddp_release_handler;
+
+	ret = xnpipe_connect(sa->sipc_port, &ops, sk);
+	if (ret < 0) {
+		if (ret == -EBUSY)
+			ret = -EADDRINUSE;
+	fail_freeheap:
+		if (poolsz > 0) {
+			xnheap_destroy(&sk->privpool);
+			xnheap_vfree(poolmem);
+		}
+	fail:
+		clear_bit(_XDDP_BINDING, &sk->status);
+		return ret;
+	}
+
+	sk->minor = ret;
+	sa->sipc_port = ret;
+	sk->name = *sa;
+	/* Set default destination if unset at binding time. */
+	if (sk->peer.sipc_port < 0)
+		sk->peer = *sa;
+
+	if (poolsz > 0)
+		xnheap_set_name(sk->bufpool, "xddp-pool@%d", sa->sipc_port);
+
+	if (*sk->label) {
+		ret = xnregistry_enter(sk->label, sk, &sk->handle,
+				       &__xddp_pnode.node);
+		if (ret) {
+			/* The release handler will cleanup the pool for us. */
+			xnpipe_disconnect(sk->minor);
+			return ret;
+		}
+	}
+
+	cobalt_atomic_enter(s);
+	portmap[sk->minor] = rtdm_private_to_fd(priv);
+	__clear_bit(_XDDP_BINDING, &sk->status);
+	__set_bit(_XDDP_BOUND, &sk->status);
+	if (xnselect_signal(&priv->send_block, POLLOUT))
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+}
+
+static int __xddp_connect_socket(struct xddp_socket *sk,
+				 struct sockaddr_ipc *sa)
+{
+	struct sockaddr_ipc _sa;
+	struct xddp_socket *rsk;
+	int ret, resched = 0;
+	rtdm_lockctx_t s;
+	xnhandle_t h;
+
+	if (sa == NULL) {
+		_sa = nullsa;
+		sa = &_sa;
+		goto set_assoc;
+	}
+
+	if (sa->sipc_family != AF_RTIPC)
+		return -EINVAL;
+
+	if (sa->sipc_port < -1 ||
+	    sa->sipc_port >= CONFIG_XENO_OPT_PIPE_NRDEV)
+		return -EINVAL;
+	/*
+	 * - If a valid sipc_port is passed in the [0..NRDEV-1] range,
+	 * it is used verbatim and the connection succeeds
+	 * immediately, regardless of whether the destination is
+	 * bound at the time of the call.
+	 *
+	 * - If sipc_port is -1 and a label was set via XDDP_LABEL,
+	 * connect() blocks for the requested amount of time (see
+	 * SO_RCVTIMEO) until a socket is bound to the same label.
+	 *
+	 * - If sipc_port is -1 and no label is given, the default
+	 * destination address is cleared, meaning that any subsequent
+	 * write() to the socket will return -EDESTADDRREQ, until a
+	 * valid destination address is set via connect() or bind().
+	 *
+	 * - In all other cases, -EINVAL is returned.
+	 */
+	if (sa->sipc_port < 0 && *sk->label) {
+		ret = xnregistry_bind(sk->label,
+				      sk->timeout, XN_RELATIVE, &h);
+		if (ret)
+			return ret;
+
+		cobalt_atomic_enter(s);
+		rsk = xnregistry_lookup(h, NULL);
+		if (rsk == NULL || rsk->magic != XDDP_SOCKET_MAGIC)
+			ret = -EINVAL;
+		else {
+			/* Fetch labeled port number. */
+			sa->sipc_port = rsk->minor;
+			resched = xnselect_signal(&sk->priv->send_block, POLLOUT);
+		}
+		cobalt_atomic_leave(s);
+		if (ret)
+			return ret;
+	} else if (sa->sipc_port < 0)
+		sa = &nullsa;
+set_assoc:
+	cobalt_atomic_enter(s);
+	if (!test_bit(_XDDP_BOUND, &sk->status))
+		/* Set default name. */
+		sk->name = *sa;
+	/* Set default destination. */
+	sk->peer = *sa;
+	if (sa->sipc_port < 0)
+		__clear_bit(_XDDP_CONNECTED, &sk->status);
+	else
+		__set_bit(_XDDP_CONNECTED, &sk->status);
+	if (resched)
+		xnsched_run();
+	cobalt_atomic_leave(s);
+
+	return 0;
+}
+
+static int __xddp_setsockopt(struct xddp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	int (*monitor)(struct rtdm_fd *fd, int event, long arg);
+	struct _rtdm_setsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	size_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptin(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			ret = rtipc_get_timeval(fd, &tv, sopt.optval, sopt.optlen);
+			if (ret)
+				return ret;
+			sk->timeout = rtipc_timeval_to_ns(&tv);
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_XDDP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case XDDP_BUFSZ:
+		ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen);
+		if (ret)
+			return ret;
+		if (len > 0) {
+			len += sizeof(struct xddp_message);
+			if (sk->bufpool &&
+			    len > xnheap_get_size(sk->bufpool)) {
+				return -EINVAL;
+			}
+		}
+		rtdm_lock_get_irqsave(&sk->lock, s);
+		sk->reqbufsz = len;
+		if (len != sk->curbufsz &&
+		    !test_bit(_XDDP_SYNCWAIT, &sk->status) &&
+		    test_bit(_XDDP_BOUND, &sk->status))
+			ret = __xddp_resize_streambuf(sk);
+		rtdm_lock_put_irqrestore(&sk->lock, s);
+		break;
+
+	case XDDP_POOLSZ:
+		ret = rtipc_get_length(fd, &len, sopt.optval, sopt.optlen);
+		if (ret)
+			return ret;
+		if (len == 0)
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		if (test_bit(_XDDP_BOUND, &sk->status) ||
+		    test_bit(_XDDP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else
+			sk->poolsz = len;
+		cobalt_atomic_leave(s);
+		break;
+
+	case XDDP_MONITOR:
+		/* Monitoring is available from kernel-space only. */
+		if (rtdm_fd_is_user(fd))
+			return -EPERM;
+		if (sopt.optlen != sizeof(monitor))
+			return -EINVAL;
+		if (rtipc_get_arg(NULL, &monitor, sopt.optval, sizeof(monitor)))
+			return -EFAULT;
+		sk->monitor = monitor;
+		break;
+
+	case XDDP_LABEL:
+		if (sopt.optlen < sizeof(plabel))
+			return -EINVAL;
+		if (rtipc_get_arg(fd, &plabel, sopt.optval, sizeof(plabel)))
+			return -EFAULT;
+		cobalt_atomic_enter(s);
+		if (test_bit(_XDDP_BOUND, &sk->status) ||
+		    test_bit(_XDDP_BINDING, &sk->status))
+			ret = -EALREADY;
+		else {
+			strcpy(sk->label, plabel.label);
+			sk->label[XNOBJECT_NAME_LEN-1] = 0;
+		}
+		cobalt_atomic_leave(s);
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __xddp_getsockopt(struct xddp_socket *sk,
+			     struct rtdm_fd *fd,
+			     void *arg)
+{
+	struct _rtdm_getsockopt_args sopt;
+	struct rtipc_port_label plabel;
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t s;
+	socklen_t len;
+	int ret;
+
+	ret = rtipc_get_sockoptout(fd, &sopt, arg);
+	if (ret)
+		return ret;
+
+	if (rtipc_get_arg(fd, &len, sopt.optlen, sizeof(len)))
+		return -EFAULT;
+
+	if (sopt.level == SOL_SOCKET) {
+		switch (sopt.optname) {
+
+		case SO_RCVTIMEO_OLD:
+			rtipc_ns_to_timeval(&tv, sk->timeout);
+			ret = rtipc_put_timeval(fd, sopt.optval, &tv, len);
+			if (ret)
+				return ret;
+			break;
+
+		default:
+			ret = -EINVAL;
+		}
+
+		return ret;
+	}
+
+	if (sopt.level != SOL_XDDP)
+		return -ENOPROTOOPT;
+
+	switch (sopt.optname) {
+
+	case XDDP_LABEL:
+		if (len < sizeof(plabel))
+			return -EINVAL;
+		cobalt_atomic_enter(s);
+		strcpy(plabel.label, sk->label);
+		cobalt_atomic_leave(s);
+		if (rtipc_put_arg(fd, sopt.optval, &plabel, sizeof(plabel)))
+			return -EFAULT;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int __xddp_ioctl(struct rtdm_fd *fd,
+			unsigned int request, void *arg)
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct sockaddr_ipc saddr, *saddrp = &saddr;
+	struct xddp_socket *sk = priv->state;
+	int ret = 0;
+
+	switch (request) {
+
+	COMPAT_CASE(_RTIOC_CONNECT):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret == 0)
+			ret = __xddp_connect_socket(sk, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_BIND):
+		ret = rtipc_get_sockaddr(fd, &saddrp, arg);
+		if (ret)
+			return ret;
+		if (saddrp == NULL)
+			return -EFAULT;
+		ret = __xddp_bind_socket(priv, saddrp);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->name);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETPEERNAME):
+		ret = rtipc_put_sockaddr(fd, arg, &sk->peer);
+		break;
+
+	COMPAT_CASE(_RTIOC_SETSOCKOPT):
+		ret = __xddp_setsockopt(sk, fd, arg);
+		break;
+
+	COMPAT_CASE(_RTIOC_GETSOCKOPT):
+		ret = __xddp_getsockopt(sk, fd, arg);
+		break;
+
+	case _RTIOC_LISTEN:
+	COMPAT_CASE(_RTIOC_ACCEPT):
+		ret = -EOPNOTSUPP;
+		break;
+
+	case _RTIOC_SHUTDOWN:
+		ret = -ENOTCONN;
+		break;
+
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static int xddp_ioctl(struct rtdm_fd *fd,
+		      unsigned int request, void *arg)
+{
+	int ret;
+
+	switch (request) {
+	COMPAT_CASE(_RTIOC_BIND):
+		if (rtdm_in_rt_context())
+			return -ENOSYS;	/* Try downgrading to NRT */
+		fallthrough;
+	default:
+		ret = __xddp_ioctl(fd, request, arg);
+	}
+
+	return ret;
+}
+
+static unsigned int xddp_pollstate(struct rtdm_fd *fd) /* atomic */
+{
+	struct rtipc_private *priv = rtdm_fd_to_private(fd);
+	struct xddp_socket *sk = priv->state, *rsk;
+	unsigned int mask = 0, pollstate;
+	struct rtdm_fd *rfd;
+
+	pollstate = __xnpipe_pollstate(sk->minor);
+	if (test_bit(_XDDP_BOUND, &sk->status))
+		mask |= (pollstate & POLLIN);
+
+	/*
+	 * If the socket is connected, POLLOUT means that the peer
+	 * exists, is bound and can receive data. Otherwise POLLOUT is
+	 * always set, assuming the client is likely to use explicit
+	 * addressing in send operations.
+	 */
+	if (test_bit(_XDDP_CONNECTED, &sk->status)) {
+		rfd = portmap[sk->peer.sipc_port];
+		if (rfd) {
+			rsk = rtipc_fd_to_state(rfd);
+			mask |= (pollstate & POLLOUT);
+		}
+	} else
+		mask |= POLLOUT;
+
+	return mask;
+}
+
+struct rtipc_protocol xddp_proto_driver = {
+	.proto_name = "xddp",
+	.proto_statesz = sizeof(struct xddp_socket),
+	.proto_ops = {
+		.socket = xddp_socket,
+		.close = xddp_close,
+		.recvmsg = xddp_recvmsg,
+		.sendmsg = xddp_sendmsg,
+		.read = xddp_read,
+		.write = xddp_write,
+		.ioctl = xddp_ioctl,
+		.pollstate = xddp_pollstate,
+	}
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig
new file mode 100644
index 0000000..2e80324
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/Kconfig
@@ -0,0 +1,25 @@
+menu "RTnet"
+
+config XENO_DRIVERS_NET
+    depends on m
+    select NET
+    tristate "RTnet, TCP/IP socket interface"
+
+if XENO_DRIVERS_NET
+
+config XENO_DRIVERS_RTNET_CHECKED
+    bool "Internal Bug Checks"
+    default n
+    help
+    Switch on if you face crashes when RTnet is running or if you suspect
+    any other RTnet-related issues. This feature will add a few sanity
+    checks at critical points that will produce warnings on the kernel
+    console in case certain internal bugs are detected.
+
+source "drivers/xenomai/net/stack/Kconfig"
+source "drivers/xenomai/net/drivers/Kconfig"
+source "drivers/xenomai/net/addons/Kconfig"
+
+endif
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile
new file mode 100644
index 0000000..94525b4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_XENO_DRIVERS_NET) += stack/ drivers/ addons/
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig
new file mode 100644
index 0000000..e92f6d8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Kconfig
@@ -0,0 +1,44 @@
+menu "Add-Ons"
+    depends on XENO_DRIVERS_NET
+
+config XENO_DRIVERS_NET_ADDON_RTCAP
+    depends on XENO_DRIVERS_NET && m
+    select ETHERNET
+    tristate "Real-Time Capturing Support"
+    default n
+    help
+    This feature allows to capture real-time packets traversing the RTnet
+    stack. It can both be used to sniff passively on a network (in this
+    case you may want to enable the promisc mode of your real-time NIC via
+    rtifconfig) and to log the traffic the node receives and transmits
+    during normal operation. RTcap consists of additional hooks in the
+    RTnet stack and a separate module as interface to standard network
+    analysis tools like Ethereal.
+
+    For further information see Documentation/README.rtcap.
+
+config XENO_DRIVERS_NET_ADDON_PROXY
+    depends on XENO_DRIVERS_NET_RTIPV4 && m
+    select ETHERNET
+    tristate "IP protocol proxy for Linux"
+    default n
+    help
+    Enables a forward-to-Linux module for all IP protocols that are not
+    handled by the IPv4 implemenation of RTnet (TCP, UDP, etc.). Only use
+    when you know what you are doing - it can easily break your real-time
+    requirements!
+
+    See Documentation/README.rtnetproxy for further information.
+
+config XENO_DRIVERS_NET_ADDON_PROXY_ARP
+    depends on XENO_DRIVERS_NET_ADDON_PROXY
+    bool "Enable ARP handling via protocol proxy"
+    default n
+    help
+    Enables ARP support for the IP protocol proxy. Incoming ARP replies
+    are then delivered to both, the RTnet and the Linux network stack,
+    but only answered by Linux. The IP protocol proxy gets attached to
+    the RTnet device specified by the module parameter "rtdev_attach",
+    rteth0 by default.
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile
new file mode 100644
index 0000000..1f3939b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP) += rtcap.o
+
+rtcap-y := cap.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY) += rtnetproxy.o
+
+rtnetproxy-y := proxy.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c
new file mode 100644
index 0000000..3784b65
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/cap.c
@@ -0,0 +1,502 @@
+/***
+ *
+ *  rtcap/rtcap.c
+ *
+ *  Real-Time Capturing Interface
+ *
+ *  Copyright (C) 2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/sched.h>
+
+#include <rtdev.h>
+#include <rtnet_chrdev.h>
+#include <rtnet_port.h> /* for netdev_priv() */
+
+MODULE_LICENSE("GPL");
+
+static unsigned int rtcap_rtskbs = 128;
+module_param(rtcap_rtskbs, uint, 0444);
+MODULE_PARM_DESC(rtcap_rtskbs, "Number of real-time socket buffers per "
+			       "real-time device");
+
+#define TAP_DEV 1
+#define RTMAC_TAP_DEV 2
+#define XMIT_HOOK 4
+
+static rtdm_nrtsig_t cap_signal;
+static struct rtskb_queue cap_queue;
+static struct rtskb_pool cap_pool;
+
+static struct tap_device_t {
+	struct net_device *tap_dev;
+	struct net_device *rtmac_tap_dev;
+	struct net_device_stats tap_dev_stats;
+	int present;
+	int (*orig_xmit)(struct rtskb *skb, struct rtnet_device *dev);
+} tap_device[MAX_RT_DEVICES];
+
+void rtcap_rx_hook(struct rtskb *rtskb)
+{
+	bool			trigger = false;
+
+	if ((rtskb->cap_comp_skb = rtskb_pool_dequeue(&cap_pool)) == 0) {
+		tap_device[rtskb->rtdev->ifindex].tap_dev_stats.rx_dropped++;
+		return;
+	}
+
+	if (cap_queue.first == NULL) {
+		cap_queue.first = rtskb;
+		trigger = true;
+	} else
+		cap_queue.last->cap_next = rtskb;
+	cap_queue.last = rtskb;
+	rtskb->cap_next = NULL;
+
+	rtskb->cap_flags |= RTSKB_CAP_SHARED;
+
+	if (trigger)
+		rtdm_nrtsig_pend(&cap_signal);
+}
+
+int rtcap_xmit_hook(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	struct tap_device_t *tap_dev = &tap_device[rtskb->rtdev->ifindex];
+	rtdm_lockctx_t context;
+	bool trigger = false;
+
+	if ((rtskb->cap_comp_skb = rtskb_pool_dequeue(&cap_pool)) == 0) {
+		tap_dev->tap_dev_stats.rx_dropped++;
+		return tap_dev->orig_xmit(rtskb, rtdev);
+	}
+
+	rtskb->cap_next = NULL;
+	rtskb->cap_start = rtskb->data;
+	rtskb->cap_len = rtskb->len;
+	rtskb->cap_flags |= RTSKB_CAP_SHARED;
+
+	rtskb->time_stamp = rtdm_clock_read();
+
+	rtdm_lock_get_irqsave(&rtcap_lock, context);
+
+	if (cap_queue.first == NULL) {
+		cap_queue.first = rtskb;
+		trigger = true;
+	} else
+		cap_queue.last->cap_next = rtskb;
+	cap_queue.last = rtskb;
+
+	rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+	if (trigger)
+		rtdm_nrtsig_pend(&cap_signal);
+
+	return tap_dev->orig_xmit(rtskb, rtdev);
+}
+
+int rtcap_loopback_xmit_hook(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	struct tap_device_t *tap_dev = &tap_device[rtskb->rtdev->ifindex];
+
+	rtskb->time_stamp = rtdm_clock_read();
+
+	return tap_dev->orig_xmit(rtskb, rtdev);
+}
+
+void rtcap_kfree_rtskb(struct rtskb *rtskb)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *comp_skb;
+
+	rtdm_lock_get_irqsave(&rtcap_lock, context);
+
+	if (rtskb->cap_flags & RTSKB_CAP_SHARED) {
+		rtskb->cap_flags &= ~RTSKB_CAP_SHARED;
+
+		comp_skb = rtskb->cap_comp_skb;
+
+		rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+		rtskb_pool_queue_tail(comp_skb->pool, comp_skb);
+
+		return;
+	}
+
+	rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+	rtskb->chain_end = rtskb;
+	rtskb_pool_queue_tail(rtskb->pool, rtskb);
+}
+
+static void convert_timestamp(nanosecs_abs_t timestamp, struct sk_buff *skb)
+{
+#ifdef CONFIG_KTIME_SCALAR
+	skb->tstamp.tv64 = timestamp;
+#else /* !CONFIG_KTIME_SCALAR */
+	unsigned long rem;
+
+	rem = do_div(timestamp, NSEC_PER_SEC);
+	skb->tstamp = ktime_set((long)timestamp, rem);
+#endif /* !CONFIG_KTIME_SCALAR */
+}
+
+static void rtcap_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg)
+{
+	struct rtskb *rtskb;
+	struct sk_buff *skb;
+	struct sk_buff *rtmac_skb;
+	struct net_device_stats *stats;
+	int ifindex;
+	int active;
+	rtdm_lockctx_t context;
+
+	while (1) {
+		rtdm_lock_get_irqsave(&rtcap_lock, context);
+
+		if ((rtskb = cap_queue.first) == NULL) {
+			rtdm_lock_put_irqrestore(&rtcap_lock, context);
+			break;
+		}
+
+		cap_queue.first = rtskb->cap_next;
+
+		rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+		ifindex = rtskb->rtdev->ifindex;
+		active = tap_device[ifindex].present;
+
+		if (active) {
+			if ((tap_device[ifindex].tap_dev->flags & IFF_UP) == 0)
+				active &= ~TAP_DEV;
+			if (active & RTMAC_TAP_DEV &&
+			    !(tap_device[ifindex].rtmac_tap_dev->flags &
+			      IFF_UP))
+				active &= ~RTMAC_TAP_DEV;
+		}
+
+		if (active == 0) {
+			tap_device[ifindex].tap_dev_stats.rx_dropped++;
+			rtcap_kfree_rtskb(rtskb);
+			continue;
+		}
+
+		skb = dev_alloc_skb(rtskb->cap_len);
+		if (skb) {
+			memcpy(skb_put(skb, rtskb->cap_len), rtskb->cap_start,
+			       rtskb->cap_len);
+
+			if (active & TAP_DEV) {
+				skb->dev = tap_device[ifindex].tap_dev;
+				skb->protocol = eth_type_trans(skb, skb->dev);
+				convert_timestamp(rtskb->time_stamp, skb);
+
+				rtmac_skb = NULL;
+				if ((rtskb->cap_flags &
+				     RTSKB_CAP_RTMAC_STAMP) &&
+				    (active & RTMAC_TAP_DEV)) {
+					rtmac_skb = skb_clone(skb, GFP_ATOMIC);
+					if (rtmac_skb != NULL)
+						convert_timestamp(
+							rtskb->cap_rtmac_stamp,
+							rtmac_skb);
+				}
+
+				rtcap_kfree_rtskb(rtskb);
+
+				stats = &tap_device[ifindex].tap_dev_stats;
+				stats->rx_packets++;
+				stats->rx_bytes += skb->len;
+
+				if (rtmac_skb != NULL) {
+					rtmac_skb->dev = tap_device[ifindex]
+								 .rtmac_tap_dev;
+					netif_rx(rtmac_skb);
+				}
+				netif_rx(skb);
+			} else if (rtskb->cap_flags & RTSKB_CAP_RTMAC_STAMP) {
+				skb->dev = tap_device[ifindex].rtmac_tap_dev;
+				skb->protocol = eth_type_trans(skb, skb->dev);
+				convert_timestamp(rtskb->cap_rtmac_stamp, skb);
+
+				rtcap_kfree_rtskb(rtskb);
+
+				stats = &tap_device[ifindex].tap_dev_stats;
+				stats->rx_packets++;
+				stats->rx_bytes += skb->len;
+
+				netif_rx(skb);
+			} else {
+				dev_kfree_skb(skb);
+				rtcap_kfree_rtskb(rtskb);
+			}
+		} else {
+			printk("RTcap: unable to allocate linux skb\n");
+			rtcap_kfree_rtskb(rtskb);
+		}
+	}
+}
+
+static int tap_dev_open(struct net_device *dev)
+{
+	int err;
+
+	err = try_module_get(THIS_MODULE);
+	if (err == 0)
+		return -EIDRM;
+
+	dev_addr_set(dev,
+		     (*(struct rtnet_device **)netdev_priv(dev))->dev_addr);
+
+	return 0;
+}
+
+static int tap_dev_stop(struct net_device *dev)
+{
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+static int tap_dev_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	netif_stop_queue(dev);
+	return 1;
+}
+
+static struct net_device_stats *tap_dev_get_stats(struct net_device *dev)
+{
+	struct rtnet_device *rtdev = *(struct rtnet_device **)netdev_priv(dev);
+
+	return &tap_device[rtdev->ifindex].tap_dev_stats;
+}
+
+static int tap_dev_change_mtu(struct net_device *dev, int new_mtu)
+{
+	return -EINVAL;
+}
+
+static const struct net_device_ops tap_netdev_ops = {
+	.ndo_open = tap_dev_open,
+	.ndo_stop = tap_dev_stop,
+	.ndo_start_xmit = tap_dev_xmit,
+	.ndo_get_stats = tap_dev_get_stats,
+	.ndo_change_mtu = tap_dev_change_mtu,
+};
+
+static void tap_dev_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->netdev_ops = &tap_netdev_ops;
+	dev->mtu = 1500;
+	dev->flags &= ~IFF_MULTICAST;
+}
+
+void cleanup_tap_devices(void)
+{
+	int i;
+	struct rtnet_device *rtdev;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++)
+		if ((tap_device[i].present & TAP_DEV) != 0) {
+			if ((tap_device[i].present & XMIT_HOOK) != 0) {
+				rtdev = *(struct rtnet_device **)netdev_priv(
+					tap_device[i].tap_dev);
+
+				mutex_lock(&rtdev->nrt_lock);
+				rtdev->hard_start_xmit =
+					tap_device[i].orig_xmit;
+				if (rtdev->features & NETIF_F_LLTX)
+					rtdev->start_xmit =
+						tap_device[i].orig_xmit;
+				mutex_unlock(&rtdev->nrt_lock);
+
+				rtdev_dereference(rtdev);
+			}
+
+			if ((tap_device[i].present & RTMAC_TAP_DEV) != 0) {
+				unregister_netdev(tap_device[i].rtmac_tap_dev);
+				free_netdev(tap_device[i].rtmac_tap_dev);
+			}
+
+			unregister_netdev(tap_device[i].tap_dev);
+			free_netdev(tap_device[i].tap_dev);
+		}
+}
+
+int __init rtcap_init(void)
+{
+	struct rtnet_device *rtdev;
+	struct net_device *dev;
+	int ret;
+	int devices = 0;
+	int i;
+
+	printk("RTcap: real-time capturing interface\n");
+
+	rtskb_queue_init(&cap_queue);
+
+	rtdm_nrtsig_init(&cap_signal, rtcap_signal_handler, NULL);
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		tap_device[i].present = 0;
+
+		rtdev = rtdev_get_by_index(i);
+		if (rtdev != NULL) {
+			mutex_lock(&rtdev->nrt_lock);
+
+			if (test_bit(PRIV_FLAG_UP, &rtdev->priv_flags)) {
+				mutex_unlock(&rtdev->nrt_lock);
+				printk("RTcap: %s busy, skipping device!\n",
+				       rtdev->name);
+				rtdev_dereference(rtdev);
+				continue;
+			}
+
+			if (rtdev->mac_priv != NULL) {
+				mutex_unlock(&rtdev->nrt_lock);
+
+				printk("RTcap: RTmac discipline already active on device %s. "
+				       "Load RTcap before RTmac!\n",
+				       rtdev->name);
+
+				rtdev_dereference(rtdev);
+				continue;
+			}
+
+			memset(&tap_device[i].tap_dev_stats, 0,
+			       sizeof(struct net_device_stats));
+
+			dev = alloc_netdev(sizeof(struct rtnet_device *),
+					   rtdev->name, NET_NAME_UNKNOWN,
+					   tap_dev_setup);
+			if (!dev) {
+				ret = -ENOMEM;
+				goto error3;
+			}
+
+			tap_device[i].tap_dev = dev;
+			*(struct rtnet_device **)netdev_priv(dev) = rtdev;
+
+			ret = register_netdev(dev);
+			if (ret < 0)
+				goto error3;
+
+			tap_device[i].present = TAP_DEV;
+
+			tap_device[i].orig_xmit = rtdev->hard_start_xmit;
+
+			if ((rtdev->flags & IFF_LOOPBACK) == 0) {
+				dev = alloc_netdev(
+					sizeof(struct rtnet_device *),
+					rtdev->name, NET_NAME_UNKNOWN,
+					tap_dev_setup);
+				if (!dev) {
+					ret = -ENOMEM;
+					goto error3;
+				}
+
+				tap_device[i].rtmac_tap_dev = dev;
+				*(struct rtnet_device **)netdev_priv(dev) =
+					rtdev;
+				strncat(dev->name, "-mac",
+					IFNAMSIZ - strlen(dev->name));
+
+				ret = register_netdev(dev);
+				if (ret < 0)
+					goto error3;
+
+				tap_device[i].present |= RTMAC_TAP_DEV;
+
+				rtdev->hard_start_xmit = rtcap_xmit_hook;
+			} else
+				rtdev->hard_start_xmit =
+					rtcap_loopback_xmit_hook;
+
+			/* If the device requires no xmit_lock, start_xmit points equals
+	     * hard_start_xmit => we have to update this as well
+	     */
+			if (rtdev->features & NETIF_F_LLTX)
+				rtdev->start_xmit = rtdev->hard_start_xmit;
+
+			tap_device[i].present |= XMIT_HOOK;
+
+			mutex_unlock(&rtdev->nrt_lock);
+
+			devices++;
+		}
+	}
+
+	if (devices == 0) {
+		printk("RTcap: no real-time devices found!\n");
+		ret = -ENODEV;
+		goto error2;
+	}
+
+	if (rtskb_module_pool_init(&cap_pool, rtcap_rtskbs * devices) <
+	    rtcap_rtskbs * devices) {
+		rtskb_pool_release(&cap_pool);
+		ret = -ENOMEM;
+		goto error2;
+	}
+
+	/* register capturing handlers with RTnet core
+     * (adding the handler need no locking) */
+	rtcap_handler = rtcap_rx_hook;
+
+	return 0;
+
+error3:
+	mutex_unlock(&rtdev->nrt_lock);
+	rtdev_dereference(rtdev);
+	printk("RTcap: unable to register %s!\n", dev->name);
+
+error2:
+	cleanup_tap_devices();
+	rtdm_nrtsig_destroy(&cap_signal);
+
+	return ret;
+}
+
+void rtcap_cleanup(void)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_nrtsig_destroy(&cap_signal);
+
+	/* unregister capturing handlers
+     * (take lock to avoid any unloading code before handler was left) */
+	rtdm_lock_get_irqsave(&rtcap_lock, context);
+	rtcap_handler = NULL;
+	rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+	/* empty queue (should be already empty) */
+	rtcap_signal_handler(0, NULL /* we ignore them anyway */);
+
+	cleanup_tap_devices();
+
+	rtskb_pool_release(&cap_pool);
+
+	printk("RTcap: unloaded\n");
+}
+
+module_init(rtcap_init);
+module_exit(rtcap_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c
new file mode 100644
index 0000000..f61794a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/addons/proxy.c
@@ -0,0 +1,442 @@
+/* rtnetproxy.c: a Linux network driver that uses the RTnet driver to
+ * transport IP data from/to Linux kernel mode.
+ * This allows the usage of TCP/IP from linux space using via the RTNET
+ * network adapter.
+ *
+ *
+ * Usage:
+ *
+ * insmod rtnetproxy.o    (only after having rtnet up and running)
+ *
+ * ifconfig rtproxy up IP_ADDRESS netmask NETMASK
+ *
+ * Use it like any other network device from linux.
+ *
+ * Restrictions:
+ * Only IPV4 based protocols are supported, UDP and ICMP can be send out
+ * but not received - as these are handled directly by rtnet!
+ *
+ *
+ *
+ * Based on the linux net driver dummy.c by Nick Holloway
+ *
+ *
+ * Changelog:
+ *
+ * 08-Nov-2002  Mathias Koehrer - Clear separation between rtai context and
+ *                                standard linux driver context.
+ *                                Data exchange via ringbuffers.
+ *                                A RTAI thread is used for rtnet transmission.
+ *
+ * 05-Nov-2002  Mathias Koehrer - Initial version!
+ *                                Development based on rtnet 0.2.6,
+ *                                rtai-24.1.10, kernel 2.4.19
+ *
+ *
+ * Mathias Koehrer - mathias_koehrer@yahoo.de
+*/
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/init.h>
+#include <linux/inet.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <net/sock.h>
+#include <net/ip.h>
+
+#include <linux/if_ether.h> /* For the statistics structure. */
+#include <linux/if_arp.h> /* For ARPHRD_ETHER */
+
+#include <rtdev.h>
+#include <rtskb.h>
+#include <rtdm/driver.h>
+#include <ipv4/ip_input.h>
+#include <ipv4/route.h>
+#include <rtnet_port.h>
+
+static struct net_device *dev_rtnetproxy;
+
+/* **************************************************************************
+ *  SKB pool management (JK):
+ * ************************************************************************ */
+#define DEFAULT_PROXY_RTSKBS 32
+
+static unsigned int proxy_rtskbs = DEFAULT_PROXY_RTSKBS;
+module_param(proxy_rtskbs, uint, 0444);
+MODULE_PARM_DESC(proxy_rtskbs,
+		 "Number of realtime socket buffers in proxy pool");
+
+static struct rtskb_pool rtskb_pool;
+
+static struct rtskb_queue tx_queue;
+static struct rtskb_queue rx_queue;
+
+/* handle for non-real-time signal */
+static rtdm_nrtsig_t rtnetproxy_rx_signal;
+
+/* Thread for transmission */
+static rtdm_task_t rtnetproxy_tx_task;
+
+static rtdm_event_t rtnetproxy_tx_event;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+static char *rtdev_attach = "rteth0";
+module_param(rtdev_attach, charp, 0444);
+MODULE_PARM_DESC(rtdev_attach, "Attach to the specified RTnet device");
+
+struct rtnet_device *rtnetproxy_rtdev;
+#endif
+
+/* ************************************************************************
+ * ************************************************************************
+ *   T R A N S M I T
+ * ************************************************************************
+ * ************************************************************************ */
+
+static void rtnetproxy_tx_loop(void *arg)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(&rtnetproxy_tx_event) < 0)
+			break;
+
+		while ((rtskb = rtskb_dequeue(&tx_queue)) != NULL) {
+			rtdev = rtskb->rtdev;
+			rtdev_xmit_proxy(rtskb);
+			rtdev_dereference(rtdev);
+		}
+	}
+}
+
+/* ************************************************************************
+ *  hard_xmit
+ *
+ *  This function runs in linux kernel context and is executed whenever
+ *  there is a frame to be sent out.
+ * ************************************************************************ */
+static int rtnetproxy_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct ethhdr *eth = (struct ethhdr *)skb->data;
+	struct rtskb *rtskb;
+	int len = skb->len;
+#ifndef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	struct dest_route rt;
+	struct iphdr *iph;
+	u32 saddr, daddr;
+#endif
+
+	switch (ntohs(eth->h_proto)) {
+	case ETH_P_IP:
+		if (len < sizeof(struct ethhdr) + sizeof(struct iphdr))
+			goto drop1;
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	case ETH_P_ARP:
+#endif
+		break;
+	default:
+	drop1:
+		dev->stats.tx_dropped++;
+		dev_kfree_skb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	rtskb = alloc_rtskb(len, &rtskb_pool);
+	if (!rtskb)
+		return NETDEV_TX_BUSY;
+
+	memcpy(rtskb_put(rtskb, len), skb->data, len);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	dev_kfree_skb(skb);
+
+	rtskb->rtdev = rtnetproxy_rtdev;
+	if (rtdev_reference(rtnetproxy_rtdev) == 0) {
+		dev->stats.tx_dropped++;
+		kfree_rtskb(rtskb);
+		return NETDEV_TX_BUSY;
+	}
+
+#else /* !CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+	iph = (struct iphdr *)(skb->data + sizeof(struct ethhdr));
+	saddr = iph->saddr;
+	daddr = iph->daddr;
+
+	dev_kfree_skb(skb);
+
+	if (rt_ip_route_output(&rt, daddr, INADDR_ANY) < 0) {
+	drop2:
+		dev->stats.tx_dropped++;
+		kfree_rtskb(rtskb);
+		return NETDEV_TX_OK;
+	}
+	if (rt.rtdev->local_ip != saddr) {
+		rtdev_dereference(rt.rtdev);
+		goto drop2;
+	}
+
+	eth = (struct ethhdr *)rtskb->data;
+	memcpy(eth->h_source, rt.rtdev->dev_addr, rt.rtdev->addr_len);
+	memcpy(eth->h_dest, rt.dev_addr, rt.rtdev->addr_len);
+
+	rtskb->rtdev = rt.rtdev;
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+
+	dev->stats.tx_packets++;
+	dev->stats.tx_bytes += len;
+
+	rtskb_queue_tail(&tx_queue, rtskb);
+	rtdm_event_signal(&rtnetproxy_tx_event);
+
+	return NETDEV_TX_OK;
+}
+
+/* ************************************************************************
+ * ************************************************************************
+ *   R E C E I V E
+ * ************************************************************************
+ * ************************************************************************ */
+
+/* ************************************************************************
+ * This function runs in real-time context.
+ *
+ * It is called from inside rtnet whenever a packet has been received that
+ * has to be processed by rtnetproxy.
+ * ************************************************************************ */
+static void rtnetproxy_recv(struct rtskb *rtskb)
+{
+	/* Acquire rtskb (JK) */
+	if (rtskb_acquire(rtskb, &rtskb_pool) != 0) {
+		dev_rtnetproxy->stats.rx_dropped++;
+		rtdm_printk("rtnetproxy_recv: No free rtskb in pool\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	if (rtskb_queue_tail_check(&rx_queue, rtskb))
+		rtdm_nrtsig_pend(&rtnetproxy_rx_signal);
+}
+
+/* ************************************************************************
+ * This function runs in kernel mode.
+ * It is activated from rtnetproxy_signal_handler whenever rtnet received a
+ * frame to be processed by rtnetproxy.
+ * ************************************************************************ */
+static inline void rtnetproxy_kernel_recv(struct rtskb *rtskb)
+{
+	struct sk_buff *skb;
+	struct net_device *dev = dev_rtnetproxy;
+
+	int header_len = rtskb->rtdev->hard_header_len;
+	int len = rtskb->len + header_len;
+
+	/* Copy the realtime skb (rtskb) to the standard skb: */
+	skb = dev_alloc_skb(len + 2);
+	skb_reserve(skb, 2);
+
+	memcpy(skb_put(skb, len), rtskb->data - header_len, len);
+
+	/* Set some relevant entries in the skb: */
+	skb->protocol = eth_type_trans(skb, dev);
+	skb->dev = dev;
+	skb->ip_summed = CHECKSUM_UNNECESSARY;
+	skb->pkt_type = PACKET_HOST; /* Extremely important! Why?!? */
+
+	/* the rtskb stamp is useless (different clock), get new one */
+	__net_timestamp(skb);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 11, 0)
+	dev->last_rx = jiffies;
+#endif
+	dev->stats.rx_bytes += skb->len;
+	dev->stats.rx_packets++;
+
+	netif_rx(skb); /* pass it to the received stuff */
+}
+
+/* ************************************************************************
+ * This function runs in kernel mode.
+ * It is activated from rtnetproxy_recv whenever rtnet received a frame to
+ * be processed by rtnetproxy.
+ * ************************************************************************ */
+static void rtnetproxy_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg)
+{
+	struct rtskb *rtskb;
+
+	while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) {
+		rtnetproxy_kernel_recv(rtskb);
+		kfree_rtskb(rtskb);
+	}
+}
+
+/* ************************************************************************
+ * ************************************************************************
+ *   G E N E R A L
+ * ************************************************************************
+ * ************************************************************************ */
+
+static void fake_multicast_support(struct net_device *dev)
+{
+}
+
+#ifdef CONFIG_NET_FASTROUTE
+static int rtnetproxy_accept_fastpath(struct net_device *dev,
+				      struct dst_entry *dst)
+{
+	return -1;
+}
+#endif
+
+static int rtnetproxy_open(struct net_device *dev)
+{
+	int err = try_module_get(THIS_MODULE);
+	if (err == 0)
+		return -EIDRM;
+
+	return 0;
+}
+
+static int rtnetproxy_stop(struct net_device *dev)
+{
+	module_put(THIS_MODULE);
+	return 0;
+}
+
+static const struct net_device_ops rtnetproxy_netdev_ops = {
+	.ndo_open = rtnetproxy_open,
+	.ndo_stop = rtnetproxy_stop,
+	.ndo_start_xmit = rtnetproxy_xmit,
+	.ndo_set_rx_mode = fake_multicast_support,
+};
+
+/* ************************************************************************
+ *  device init
+ * ************************************************************************ */
+static void __init rtnetproxy_init(struct net_device *dev)
+{
+	/* Fill in device structure with ethernet-generic values. */
+	ether_setup(dev);
+
+	dev->tx_queue_len = 0;
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	dev_addr_set(dev, rtnetproxy_rtdev->dev_addr);
+#else
+	dev->flags |= IFF_NOARP;
+#endif
+	dev->flags &= ~IFF_MULTICAST;
+
+	dev->netdev_ops = &rtnetproxy_netdev_ops;
+}
+
+/* ************************************************************************
+ * ************************************************************************
+ *   I N I T
+ * ************************************************************************
+ * ************************************************************************ */
+static int __init rtnetproxy_init_module(void)
+{
+	int err;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	if ((rtnetproxy_rtdev = rtdev_get_by_name(rtdev_attach)) == NULL) {
+		printk("Couldn't attach to %s\n", rtdev_attach);
+		return -EINVAL;
+	}
+	printk("RTproxy attached to %s\n", rtdev_attach);
+#endif
+
+	/* Initialize the proxy's rtskb pool (JK) */
+	if (rtskb_module_pool_init(&rtskb_pool, proxy_rtskbs) < proxy_rtskbs) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	dev_rtnetproxy =
+		alloc_netdev(0, "rtproxy", NET_NAME_UNKNOWN, rtnetproxy_init);
+	if (!dev_rtnetproxy) {
+		err = -ENOMEM;
+		goto err1;
+	}
+
+	rtdm_nrtsig_init(&rtnetproxy_rx_signal, rtnetproxy_signal_handler,
+			 NULL);
+
+	rtskb_queue_init(&tx_queue);
+	rtskb_queue_init(&rx_queue);
+
+	err = register_netdev(dev_rtnetproxy);
+	if (err < 0)
+		goto err3;
+
+	/* Init the task for transmission */
+	rtdm_event_init(&rtnetproxy_tx_event, 0);
+	err = rtdm_task_init(&rtnetproxy_tx_task, "rtnetproxy",
+			     rtnetproxy_tx_loop, 0, RTDM_TASK_LOWEST_PRIORITY,
+			     0);
+	if (err)
+		goto err4;
+
+	/* Register with RTnet */
+	rt_ip_fallback_handler = rtnetproxy_recv;
+
+	printk("rtnetproxy installed as \"%s\"\n", dev_rtnetproxy->name);
+
+	return 0;
+
+err4:
+	unregister_netdev(dev_rtnetproxy);
+
+err3:
+	rtdm_nrtsig_destroy(&rtnetproxy_rx_signal);
+
+	free_netdev(dev_rtnetproxy);
+
+err1:
+	rtskb_pool_release(&rtskb_pool);
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	rtdev_dereference(rtnetproxy_rtdev);
+#endif
+	return err;
+}
+
+static void __exit rtnetproxy_cleanup_module(void)
+{
+	struct rtskb *rtskb;
+
+	/* Unregister the fallback at rtnet */
+	rt_ip_fallback_handler = NULL;
+
+	/* Unregister the net device: */
+	unregister_netdev(dev_rtnetproxy);
+	free_netdev(dev_rtnetproxy);
+
+	rtdm_event_destroy(&rtnetproxy_tx_event);
+	rtdm_task_destroy(&rtnetproxy_tx_task);
+
+	/* free the non-real-time signal */
+	rtdm_nrtsig_destroy(&rtnetproxy_rx_signal);
+
+	while ((rtskb = rtskb_dequeue(&tx_queue)) != NULL) {
+		rtdev_dereference(rtskb->rtdev);
+		kfree_rtskb(rtskb);
+	}
+
+	while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) {
+		kfree_rtskb(rtskb);
+	}
+
+	rtskb_pool_release(&rtskb_pool);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	rtdev_dereference(rtnetproxy_rtdev);
+#endif
+}
+
+module_init(rtnetproxy_init_module);
+module_exit(rtnetproxy_cleanup_module);
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.drvporting b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.drvporting
new file mode 100644
index 0000000..0380971
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.drvporting
@@ -0,0 +1,251 @@
+This list was created when porting the pcnet32 driver to RTnet and was
+extended and revised afterwards. It is absolutely unsorted. Some points may
+not apply to every driver, some may have to be added for others. It is
+recommended to take a look at pcnet32-rt.c or other existing drivers if some
+steps remain unclear.
+
+IMPORTANT: Check if the critical paths of the driver (xmit function, interrupt
+handler) are free of any unbounded or unacceptable long delays, e.g. caused by
+waiting on hardware events.
+
+
+1. Add to beginning of file (also add a #define for MAX_UNITS if it is missing
+   so far):
+
+    #include <rtnet_port.h>
+
+    static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+    compat_module_int_param_array(cards, MAX_UNITS);
+    MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+
+
+2. disable any copybreak mechanism (rtskbs are all equally sized)
+
+
+3. add the following fields to private data:
+
+    struct rtskb_queue skb_pool;
+    rtdm_irq_t irq_handle;
+
+
+4. initialize skb pool in probe or init function:
+
+    if (rtskb_pool_init(&<priv>->skb_pool, RX_RING_SIZE*2) < RX_RING_SIZE*2) {
+        rtskb_pool_release(&<priv>->skb_pool);
+        <cleanup>...
+        return -ENOMEM;
+    }
+
+
+5. free skb pool in cleanup function
+
+
+6. replace unregister_netdev with rt_unregister_rtnetdev
+
+
+7. call rt_rtdev_disconnect in cleanup function (and on error cleanups!)
+
+
+8. cleanup device structure with rtdev_free
+
+
+9. replace netif_stop_queue with rtnetif_stop_queue
+
+
+10. add to the close function replacing the free_irq call:
+
+    if ( (i=rtdm_irq_free(&<priv>->irq_handle))<0 )
+        return i;
+
+    rt_stack_disconnect(dev);
+
+
+11. replace struct sk_buff with struct rtskb
+
+
+12. replace skb_XXX calls with rtskb_XXX
+
+
+13. replace eth_type_trans with rt_eth_type_trans
+
+
+14. replace netif_rx with rtnetif_rx
+
+
+15. replace struct net_device with struct rtnet_device
+
+
+16. replace netif_start_queue with rtnetif_start_queue
+
+
+17. revise the xmit routine
+
+17.1. add new locking scheme replacing any standard spin lock calls:
+
+      rtdm_lockctx_t context;
+      ...
+      rtdm_lock_get_irqsave(&<priv>->lock, context);
+      ...
+      rtdm_lock_put_irqrestore(&<priv>->lock, context);
+
+      /* ONLY IN EXCEPTIONAL CASES, e.g. if the operation can take more than a
+       * few ten microseconds: */
+
+      rtdm_irq_disable(&<priv>->irq_handle);
+      rtdm_lock_get(&<priv>->lock);
+      ...
+      rtdm_lock_put(&<priv>->lock);
+      rtdm_irq_enable(&<priv>->irq_handle);
+
+      /* Note that the latter scheme does not work if the IRQ line is shared
+       * with other devices. Also, rtdm_irq_disable/enable can be costly
+       * themselves on certain architectures. */
+
+17.2. add the following code right before the code which triggers the physical
+      transmission (take care if data has to be transfered manually, i.e.
+      without DMA):
+
+      /* get and patch time stamp just before the transmission */
+      if (skb->xmit_stamp)
+          *skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+17.3. make the code above and the transmission triggering atomical by switching
+      off all interrupts:
+
+      rtdm_lockctx_t context;
+      ...
+      rtdm_lock_irqsave(context);
+      <patch time stamp>
+      <trigger transmission>
+      rtdm_lock_irqrestore(context);
+
+      /* or combined with the spinlock: */
+
+      rtdm_lock_irqsave(&<priv>->lock, context);
+      <prepare transmission>
+      <patch time stamp>
+      <trigger transmission>
+      rtdm_lock_irqrestore(&<priv>->lock, context);
+
+      NOTE: Some hardware may require the driver to calculate the frame
+      checksum, thus making a patching of the frame effectively impossible. In
+      this case use the following strategy: switch off the interrupts only if
+      there is actually a time stamp to patch. Normally, frames using this
+      feature are rather short and will not cause long irq locks. Take a look
+      at 8139too-rt or via-rhine-rt to find some examples.
+
+
+18. modify interrupt handler:
+
+    static int XXX_interrupt(rtdm_irq_t *irq_handle)
+    {
+        struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+        ...
+
+    Also adapt the prototype of the interrupt handler accordingly if provided.
+
+
+19. replace spin_lock/spin_unlock with rtdm_lock_get/rtdm_lock_put within the
+    interrupt handler
+
+
+20. replace printk in xmit function, interrupt handler, and any function called
+    within this context with rtdm_printk. Where avoidable, disable output in
+    critical functions (i.e. when interrupts are off) completely.
+
+
+21. replace dev_kfree_skb[_XXX] with dev_kfree_rtskb
+
+
+22. replace alloc_etherdev with the following lines:
+
+    dev = rt_alloc_etherdev(sizeof(struct XXX_private) /* or 0 */);
+    if (dev == NULL)
+        return -ENOMEM;
+    rtdev_alloc_name(dev, "rteth%d");
+    rt_rtdev_connect(dev, &RTDEV_manager);
+    RTNET_SET_MODULE_OWNER(dev);
+    dev->vers = RTDEV_VERS_2_0;
+
+
+23. replace request_irq in open function with the following lines:
+
+    rt_stack_connect(dev, &STACK_manager);
+    retval = rtdm_irq_request(&<priv>->irq_handle, dev->irq, XXX_interrupt,
+                              RTDM_IRQTYPE_SHARED, NULL /* or driver name */, dev);
+    if (retval)
+        return retval;
+
+
+24. replace netif_queue_stopped with rtnetif_queue_stopped
+
+
+25. replace netif_wake_queue with rtnetif_wake_queue
+
+
+26. add to the beginning of the probe or card-init function:
+
+    static int cards_found = -1;
+
+    cards_found++;
+    if (cards[cards_found] == 0)
+        return -ENODEV;
+
+
+27. call rtdm_clock_read within receive interrupt and set time_stamp field of skb accordingly
+
+
+28. initialize new unsigned int old_packet_cnt with <priv>->stats.rx_packets at
+    the beginning of the interrupt handler
+
+
+29. add to the end of the interrupt handler:
+
+    rtdm_lock_put(&<priv>->lock); /* if locking is not done in interrupt main function */
+    if (old_packet_cnt != <priv>->stats.rx_packets)
+        rt_mark_stack_mgr(dev);
+
+
+30. disable any timer setup and delete calls
+
+
+31. uncomment not required(!) MII related assignments and functions
+
+
+32. uncomment any other unused functions
+
+
+33. replace register_netdev with rt_register_rtnetdev
+
+
+34. replace netif_carrier_{on|off} with rtnetif_carrier_{on|off}
+
+
+35. replace dev_alloc_skb(size) with dev_alloc_rtskb(size, &<priv>->skb_pool)
+
+
+36. reduce RX_RING_SIZE to 8
+
+
+37. replace MOD_INC_USE_COUNT/MOD_DEC_USE_COUNT with RTNET_MOD_INC_USE_COUNT/RTNET_MOD_DEC_USE_COUNT
+    and check if they are used appropriately
+
+
+38. rename type of lock field in private data from spinlock_t to rtdm_lock_t
+
+
+39. replace spin_lock_init(&<priv>->lock) with rtdm_lock_init(&<priv>->lock)
+
+
+40. rtskb structure does not contain a data_len field => set any occurrence to zero
+
+
+41. return from interrupt handler only by providing RTDM_IRQ_HANDLED or RTDM_IRQ_NONE as
+    return values, depending if the IRQ was handled or not
+
+42. fill rtdev field in every received rtskb object properly
+
+    skb->rtdev = rtdev
+
+XX. check the critical paths in xmit function and interrupt handler for delays
+    or hardware wait loops, disable or avoid them
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.eth1394 b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.eth1394
new file mode 100644
index 0000000..5ae5f49
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.eth1394
@@ -0,0 +1,65 @@
+HOWTO for using RTnet over FireWire (ETH1394)
+=============================================
+To use RTnet over FireWire, one needs another package, i.e. RT-FireWire, which
+can be checked out via "svn checkout svn://svn.berlios.de/rtfirewire/trunk".
+RT-FireWire package is developed by RT-FireWire project team, see the project
+homepage for more interesting information (http://rtfirewire.berlios.de).
+
+It is recommended to compile and test the RT-FireWire package first.
+RT-FireWire only compiles with fusion. At the time of writing, it is the CVS
+version of fusion which will become release 0.9. Use --with-rtai=XXX to
+specify the installation location of fusion in your system.
+
+To compile RTnet's Eth1394 driver with RT-FireWire, one needs to do 2 things
+in configuration:
+1. add --with-rtfw=XXX to specify the source location of RT-FireWire
+2. add --enable-eth1394 to enable the compiling of eth1394
+Of course, don't forget --with-rtai=XXX for RTnet.
+
+RT-FireWire comes with some basic testing tool, one of which is similiar to
+"rtping" on Ethernet. See the Readme of RT-FireWire for how to play around
+with basic FireWire testing.
+
+Currently, Eth1394 appears exactly the same as normal Ethernet device. So from
+the application point of view, no medium difference can be seen, which means
+application on Ethernet can be directly moved to FireWire without any porting
+effort.
+
+So, play around with your new medium i.e. FireWire, with exactly the same tool
+on Ethernet-:).
+
+
+Modification to RFC2734
+=======================
+Each IP-capable node must have it own unique hardware address in the network.
+The original IPover1394 spec (RFC2734) employs the 64-bit GUID of each
+FireWire adapter chip as the hardware address. That way, the hardware address
+can be guaranteed to be unique even in the world scale, but the address
+resolution process is not efficient, see below:
+
+               ARP                    Eth1394 internal
+            resolution                   resolution
+  48-bit                    MAC                            16-bit
+IP address -----------> (64-bit GUID) ---------------> FireWire nodeid
+
+The modified ARP on IPover1394 directly use the FireWire node id as hardware
+address for each Eth1394 nodes. That way, the mapping between IP address and
+hardware address (FireWire node id) only needs one time of resolution, which
+is more efficient than the original one. Note that here we assume that we use
+static allocation of 1394 address space to IPover1394, i.e. on each node, the
+address space for Eth1394 would be exactly the same, see "eth1394.h". So, the
+16 bits would be enough to represent the hardware address. Now the address
+resolution process is  more efficient, as below:
+
+                   ARP resolution
+48-bit IP address ---------------> MAC (FireWire nodeid)
+
+To give exactly the same look as normal Ethernet devices, the MAC address of
+Eth1394 is extended to 6-bytes by filling 0 after the 2 bytes FireWire node
+id. This way all the highlevel stuff which is already working on Ethernet,
+like RTnet's TDMA, RTcfg, can be directly moved to Eth1394.
+
+
+Good Luck!
+
+2005-08-02 Zhang Yuchen <yuchen623-at-gmail.com>
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.ipfragmentation b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.ipfragmentation
new file mode 100644
index 0000000..f552bbc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.ipfragmentation
@@ -0,0 +1,49 @@
+README.ipfragmentation
+=======================
+
+19-May-2003 - Mathias Koehrer (mathias_koehrer@yahoo.de) (original version)
+21-Oct-2003 - Jan Kiszka (jan.kiszka@web.de)
+
+
+This file documents the restrictions and pitfalls when using fragmented IP
+packets with RTnet.
+
+
+Introduction:
+-------------
+Ethernet provides 1500 bytes of payload within each packet. Subtracting the IP
+header (20 bytes without options) and the UDP header (8 bytes), this leaves
+1472 bytes of data for the (UDP) user. When sending larger packets, the RTnet
+implementation of IP fragments the packet and sends it in multiple chunks over
+the network. When a RTnet station receives a sequence of fragmented IP packets,
+it reassembles it and passed the whole packet to the next layer (UDP)
+afterwards.
+
+
+Restrictions:
+-------------
+Incoming IP fragments are collected by the IP layer. The collector mechanism is
+a global resource, when all collector slots are used, unassignable fragmented
+packets are dropped! In order to guarantee bounded execution time of the
+collector lookup mechanism, it is not possible to provide an unlimited number
+of collectors (currently 10 are support, see ipv4/ip_fragment.c). Therefore, be
+careful how many fragmented packets all of your stations are producing and if
+one receiver might be overwhelmed with fragments!
+
+Fragmented IP packets are generated AND received at the expense of the socket
+rtskb pool. Adjust the pool size appropriately to provide sufficient rtskbs
+(see also examples/frap_ip).
+
+To identify the destination socket and to simplify the defragmentation, all IP
+fragments must arrive in a strictly ascending order. Unordered packets are
+dropped, if they can be assigned to an existing collector, the already
+collected fragments are also cleaned up. However, for typically isolated
+real-time networks, this requirement can be easily fulfilled.
+
+
+Known Issues:
+-------------
+When sending fragmented IP packets over a NIC without RTmac being installed,
+the NIC's transmission queue may easily overflow (take a look at the driver
+source for the exact limit - typically TX_RING_SIZE). This is due to the still
+lacking flow control for packet transmission. Will hopefully be fixed soon...
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.pools b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.pools
new file mode 100644
index 0000000..b932a1d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.pools
@@ -0,0 +1,117 @@
+                              Buffer Pool Management
+                              ======================
+
+RTnet holds packet or packet fragments internally in so-called real-time socket
+buffers (rtskbs, comparable to Linux skbs). These buffers are used to store
+incoming data while it is processed by the stack and before it is copied to the
+user buffer. They are also used for setting up outgoing packets and passing
+them to the NIC driver.
+
+Unlike buffers in a normal network stack, rtskbs have to be allocatable in a
+strictly deterministic way. For this reason, rtskbs are kept preallocated in
+multiple pools, one for each producer or consumer of packets. When a filled
+buffer is passed from a producer to a consumer, the consumer has to return an
+empty rtskb back. Thus it can be avoided that a failing component can exhaust
+global resources like the buffers and lock the whole RTnet system.
+
+This is an overview of rtskb pool in RTnet, how large they are by default, and
+how they can be extended or shrunk.
+
+
+1. Socket Pools
+---------------
+
+Default Size:   16
+Resizable:      module parameter "socket_rtskbs"
+Runtime Resize: [rt_dev_]setsockopt()
+Initialization: real-time / non real-time (see text)
+
+Every socket gets an own rtskb pool upon creation. This pool is used for
+compensation when an incoming packet needs to be stored until the user fetches
+it and when a packet is prepared for transmission. The initial pool size can be
+set with "socket_rtskbs".
+
+During runtime the pool can be extended (RT_SO_EXTPOOL) or shrunk
+(RT_SO_SHRPOOL) using the [rt_dev_]setsockopt() function. When a socket is to
+be created within a real-time context (e.g. a kernel RT-task), the buffers are
+allocated from the real-time rtskb cache (see below) instead of using a Linux
+system call. When a real-time-created socket is closed again, the buffers
+return to that cache. Note that a [rt_dev_]close() call can fail if not all
+buffers have yet return to the socket pool. In this case, be patient and retry
+later. :)
+
+
+2. Global Pool
+--------------
+
+Default Size:   0 + 16 * number of registered NICs
+Resizable:      module parameter "global_rtskbs" (base value)
+                module parameter "device_rtskbs" (increment per NIC)
+Runtime Resize: by adding or removing NIC drivers
+Initialization: non real-time
+
+The global pool is used by the ARP protocol (transmission only) and by the
+real-time protocol part of RTmac.
+
+
+3. ICMP Pool
+------------
+
+Default Size:   8
+Resizable:      -
+Runtime Resize: -
+Initialization: non real-time
+
+For technical reasons, the ICMP pool which is used for replying incoming
+requests is separated from the global pool.
+
+
+4. NIC Receiver Pool
+--------------------
+
+Default Size:   16 (typically RX_RING_SIZE*2)
+Resizable:      module parameter "rx_pool_size" (8139too-rt.o only)
+Runtime Resize: -
+Initialization: non real-time
+
+The receiver pools are used by the NICs to store incoming packets. Their size
+is typically fixed and can only be changed by recompiling the driver.
+
+
+5. VNIC Pool
+------------
+
+Default Size:   32
+Resizable:      module parameter "vnic_rtskbs" (rtmac.o)
+Runtime Resize: -
+Initialization: non real-time
+
+The VNIC pool is used from compensating incoming non real-time packets when
+they are queued for being processed by Linux. The pool is also used for
+creating outgoing VNIC packets.
+
+
+6. rtnetproxy Pool
+------------------
+
+Default Size:   32
+Resizable:      module parameter "proxy_rtskbs" (rtnetproxy.o)
+Runtime Resize: -
+Initialization: non real-time
+
+This pool is used the same way as the VNIC pool.
+
+
+All module parameters at a glance:
+
+  Module     | Parameter        | Default Value
+ -----------------------------------------------
+  rtnet      | socket_rtskbs    | 16
+  rtnet      | global_rtskbs    | 0
+  rtnet      | device_rtskbs    | 16
+  rtmac      | vnic_rtskbs      | 32
+  rtnetproxy | proxy_rtskbs     | 32
+  rt_8139too | rx_pool_size     | 16
+
+A statistic of the currently allocated pools is available through the /proc
+interface of RTnet (/proc/rtnet/rtskb).
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.routing b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.routing
new file mode 100644
index 0000000..f59c3f3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.routing
@@ -0,0 +1,117 @@
+                              IP Routing Subsystem
+                              ====================
+
+The IPv4 implementation of RTnet comes with a real-time routing subsystem which
+has some differences compared to normal IP stacks. Basically, all dynamic
+elements of the routing and device address resolution (ARP) process have been
+converted into statically configurable mechanisms. This allows an easy analysis
+of the routing and address resolution complexity for known real-time networks.
+
+
+1. Concept
+----------
+
+The routing systems is based on two tables. The so-called host routing table
+contains all destination IPs which can be reached directly over local network
+segments. These IPs include local loopback addresses and network broadcasts.
+
+The optional network routing table provides the addresses of gateways
+to distant real-time networks, thus allowing more complex network structures.
+In order to use the network routing feature, RTnet has to be compiled with
+--enable-net-routing (see configure script).
+
+When preparing the transmission of an IP packet, RTnet first tries to find the
+destination address in the host routing table. If this fails and network
+routing is available, the network routing table is queried. On success, the
+host routing table is consulted again, this time using the gateway IP.
+
+Incoming IP packets are no longer checked against any routing table on standard
+RTnet nodes. Only if RTnet was compiled as a router by passing --enable-router
+to the configure script, the destination IP is checked if it describes a
+non-local address. In case the destination address does not equals the unicast
+or broadcast IP of the receiving device and if the input channel is not a
+loopback device, the RTnet router will try to find the next hop by performing
+the output routing procedure described above and, on success, will forward the
+packet. Note that, just like with non-real-time networks, any RTnet router can
+become a bottleneck for real-time messages if the traffic is not planned
+thoroughly (packets of the RTmac VNICs do not interfer with the real-time
+routing).
+
+
+2. Host Routing Table
+---------------------
+
+The content of the host routing table is comparable to ARP tables of standard
+IP stacks: destination IP address, the respective device address, and a
+reference to the output device. While normal ARP table lookups are not
+performed before the routing decision is made, RTnet is using this table
+already for the first and mostly sole routing process, and regardless of the
+device type, thus also for loopback IPs.
+
+All entries of the host routing table are stored according to a hash mechanism.
+The hash key is calculated using the least significant bits of the destination
+IP. The size of the hash table, i.e. the number of relevant destination bits is
+statically configured (default: 64, see ipv4/route.c). Also the number of
+available host routing entries is statically limited (default: 32) and can be
+set by recompiling RTnet with modified values.
+
+
+Example (hash table size 64):
+
+192.168.2.35 & 0.0.0.63 = 35, the host hash key
+
+
+Host routes are either added or updated manually via the rtroute tool or
+automatically when an ARP request or reply arrives. Note that ARP messages are
+only triggered by explicite user commands (rtroute solicit). Moreover, the
+entries in the host routing table will not expire until they are manually
+removed, e.g. by shutting down the respective output device.
+
+The easiest way to create and maintain the host routing table is to use RTcfg,
+see README.rtcfg for further information.
+
+
+3. Network Routing Table
+------------------------
+
+The entries of the network routing table contain the destination IP address, a
+mask defining the relevant bits of the destination IP, and the IP of the
+gateway to reach the destination network (or host). To simplify updates of host
+routes, i.e. foremost changes of the destination device address, gateway IPs
+have to be resolved through the host routing table.
+
+Network routes are either stored using a hash key derived from the destination
+IP or without any hashing mechanism. The size of the hash table and thus the
+number of considered IP bits for generating the key is defined in the source
+code (default: 32). The start of the bit range is specified by a module
+parameter of rtnet.o called net_hash_key_shift (default: 8).
+
+
+Example (hash table size 32, net_hash_key_shift 8):
+
+(192.168.2.35 >> 8) & 0.0.0.31 =
+= 0.192.168.2 & 0.0.0.31 = 2, the network hash key
+
+
+A new network route is only assigned to a hash key if the network mask of the
+route completely covers the hash mask.
+
+
+Examples (hash table size is 32, net_hash_key_shift is 8):
+
+rtroute add 192.168.2.0 netmask 255.255.255.0 gw 192.168.0.1
+hashmask = 0.0.0.31 << 8 = 0.0.31.0
+netmask & hashmask = 255.255.255.0 & 0.0.31.0 = 0.0.31.0 = hashmask => use key!
+
+rtroute add 10.0.0.0 netmask 255.0.0.0 gw 192.168.0.250
+netmask & hashmask = 255.0.0.0 & 0.0.31.0 = 0.0.0.0 != hashmask => no hash key!
+
+
+In the latter case, RTnet adds the new route to the list of key-less network
+routes. This list is querried only if a network route lookup in the hash table
+fails. Thus, the network routing process effectively consists of two stages:
+the hash-key-based lookup and a potential query of the key-less list of routes.
+
+RTnet provides by default a pool of 16 network routes. This number can be
+modified in the source code (see ipv4/route.c). Network routes are only
+manually added or removed via rtroute.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcap b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcap
new file mode 100644
index 0000000..52b2ced
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcap
@@ -0,0 +1,39 @@
+Real-Time Ethernet Capturing (RTcap)
+------------------------------------
+
+RTnet can capture incoming and outgoing Ethernet packets with a very low time
+stamp jitter, typically below 10 us (depends on the hardware).
+
+When it is configured and compiled with --enable-rtcap, some extensions will be
+added to the RTnet stack and an additional module rtcap.o will be created. This
+module has to be loaded *after* all NIC drivers are inserted and *before* any
+device is started or a RTmac discipline is attached to it. It will create two
+read-only Linux shadow network devices for every NIC:
+
+    <rtdevX>     (e.g. rteth0) and
+    <rtdevX>-mac (exception: loopback device will only be mirrored to "rtlo").
+
+The first capturing device mirrors any incoming packet the hardware reports to
+the stack and any outgoing packet sent on the local station using RTnet. The
+second one captures only packets which have be delayed by an active RTmac
+discipline. As the capturing time is dictated by the parent shadow device,
+packet lists can be unchronologic, but it provides a deeper look on the
+influence of RTmac on the packet transmission process.
+
+After these shadow devices are started up using ifconfig, any capturing tool
+like tcpdump or Ethereal can be used for the actual analysis work. In order to
+get hold of any packet on the network, the real-time NIC should be
+furthermore switched to promiscuous mode when it is configured:
+
+    rtifconfig <rtdevX> up <IP> promisc
+
+If you notice any potential packet losses while capturing, you can try to
+increase the number of real-time buffer used for storing packets before they
+can be processed by Linux. The module parameter rtcap_rtskb controls this
+parameter. It is set to 128 by default. Generally you should also tell RTcap to
+switch on the RTAI timer (module parameter: start_timer=1) and prevent any
+other module or program to do so as well.
+
+The capturing support adds a slight overhead to both paths of packets,
+therefore the compilation parameter should only be switched on when the service
+is actually required.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcfg b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcfg
new file mode 100644
index 0000000..a0d7ff2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtcfg
@@ -0,0 +1,135 @@
+                                    RTcfg
+                                    =====
+
+The Real-Time Configuration Service (RTcfg) provides a mechanism to start up
+RTnet nodes synchronously. It implements a rendezvous during the RTnet start-up
+process, exchanges MAC addresses and optionally IP routes, and distributes
+configuration data to all stations.
+
+RTcfg consists of a kernel module which can either be configured to run as a
+server or a client. The server takes a list of all expected stations in the
+network and waits for them to come up while broadcasting invitation messages to
+the clients. The clients wait for the invitation, then exchange the
+configuration with the server, and wait for all other clients to start up.
+After all configuration steps are performed, the stations can use a further
+rendezvous mechanisms before starting the user application.
+
+
+Usage
+-----
+
+The RTcfg server and client functionality is controlled by the command line
+tool rtcfg. Note: Some feature may not be implemented yet so that the
+respective options has no effect.
+
+
+Server Commands
+---------------
+
+rtcfg <dev> server [-p period] [-b burstrate] [-h <heartbeat>]
+      [-t <threshold>] [-r]
+
+Starts a RTcfg server for the specified device <dev>. The server then sends
+every 1000 ms stage 1 configuration frames to new clients. <period> (in
+milliseconds) can be used to override the interval value. The number of
+clients invited within one period is controlled by <burstrate> (default: 4).
+This value also defines the number of stage 2 configuration fragments the
+server should send as far as the client supports it (see also "announce").
+<heartbeat> specifies the Heartbeat period of the clients in milliseconds
+(default: 1000 ms), the value 0 turns the heartbeat mechanism off. <threshold>
+sets the number of missing heartbeats after which a client shall be considered
+dead (default: 2). If -r is given, the server automatically reports to be
+ready within its stage 1 configuration frame, thus disengading it from issuing
+an explicite "ready" command.
+
+rtcfg <dev> add <address> [-hw <hw_address>] [-stage1 <stage1_file>]
+      [-stage2 <stage2_file>] [-t <timeout>]
+
+Adds a client to the server's list of potential participants of the network
+connected to the specified device <dev>. <address> can be either an IP address
+(A.B.C.D) or a physical address (AA:BB:CC:DD:EE:FF). If a physical address is
+explicitely assigned using <hw_address>, the <address> parameter must define
+the client's IP address. Optionally, files can specified which will be passed
+during the different configuration stages. If <stage1_file> is "-", rtcfg will
+read the stage 1 data from standard input. <timeout> (in milliseconds) defines
+the internal timeout after which a half-finished client configuration is reset
+to its initial state again. By default this reset is never performed.
+
+rtcfg <dev> del <address>
+
+Removes a client from the list of network participants. See above for details
+about the address format.
+
+rtcfg <dev> wait [-t <timeout>]
+
+Waits until both configuration stages for all clients in the server's list are
+completed. If <timeout> (in milliseconds) is given, rtcfg will return an error
+code when the configuration cannot be completed within the specified time. The
+default timeout is infinite.
+
+rtcfg <dev> ready [-t <timeout>]
+
+Reports that the server has completed its setup, generally including the RTmac
+startup phase, and waits until all other stations are reporting to be ready as
+well. If <timeout> (in milliseconds) is given, rtcfg will return an error code
+when the synchronisation cannot be completed within the specified time. The
+default timeout is infinite.
+
+rtcfg <dev> detach
+
+Stops the RTcfg server on the specified device <dev>. Afterwards, the device
+can be re-configured to act as server or client.
+
+
+Client Commands
+---------------
+
+rtcfg <dev> client [-t <timeout>] [-c|-f <stage1_file>] [-m maxstations]
+
+Waits until the first configuration stage is completed for the device <dev>.
+If <timeout> (in milliseconds) is given, rtcfg will return an error code when
+the configuration cannot be completed within the specified time. The default
+timeout is infinite. The incoming configuration data is either send to the
+standard output if -c is given or to <stage1_file> if specified. By default
+clients can synchronise with up to 32 other stations (including the server).
+This limit can be modified using the <maxstations> parameter.
+
+rtcfg <dev> announce [-t <timeout>] [-c|-f <stage2_file>] [-b burstrate] [-r]
+
+Sends an New Announcement frame over the device <dev> and waits until this
+second configuration stage is completed. If <timeout> (in milliseconds) is
+given, rtcfg will return an error code when the configuration cannot be
+completed within the specified time. The default timeout is infinite. If -c or
+-f is given, stage 2 configuration data is requested and either send to the
+standard output or to <stage2_file>. <burstrate> controls the number of stage 2
+configuration fragments the client should accept (default: 4). The actual
+amount is negotiated according to both the client's and the server's capability
+(see also "server"). If -r is given, the client automatically reports to be
+ready within its announcement frame, thus disengading it from issuing an
+explicite "ready" command.
+
+rtcfg <dev> ready [-t <timeout>]
+
+Reports that the client has completed its setup and waits until all other
+stations are reporting to be ready as well. If <timeout> (in milliseconds) is
+given, rtcfg will return an error code when the synchronisation cannot be
+completed within the specified time. The default timeout is infinite.
+
+rtcfg <dev> detach
+
+Stops the RTcfg client on the specified device <dev>. Afterwards, the device
+can be re-configured to act as server or client.
+
+
+Module Parameters
+-----------------
+
+start_timer     Set to zero if RTAI timer is already running. By default the
+                rtcfg module starts the timer when it is loaded.
+
+num_rtskbs      Number of realtime socket buffers used by the rtcfg module. You
+                may have to increase the default value of 32 when you are
+                working with multiple interfaces.
+
+
+2003-2005, Jan Kiszka <jan.kiszka-at-web.de>
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtmac b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtmac
new file mode 100644
index 0000000..008385b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtmac
@@ -0,0 +1,341 @@
+                     Real-Time Media Access Control (RTmac)
+                     ======================================
+
+RTmac is a module designed to be used with RTnet. It provides a media access
+control (MAC) infrastructure for RTnet. The actual access control mechanism is
+implemented by so-called discipline modules. The current version comes with a
+time division multiple access (TDMA) discipline. Because of the RTmac's modular
+design, you can also easily attach your own MAC discipline optimised for the
+specific application.
+
+
+RTmac Layer
+===========
+
+Without RTmac:
+
+           +---------------+
+           |RT applications|
+           +-------v-------+
+                   |
+          +--------v---------+
+          |  RT UDP/IP stack |
+          +------------------+
+          |RT ethernet driver|
+          +--------v---------+
+                   |
+              +----v---+
+              |   NIC  |
+              +--------+
+
+With RTmac inserted:
+
+           +---------------+    +-------------------+
+           |RT applications|    |   Linux network   |
+           +-------v-------+    |stack (TCP/IP etc.)|
+                   |            +---------v---------+
+          +--------v---------+            |
+          |  RT UDP/IP stack |         +--v--+
+          +------------------+         |VNIC |
+          |      RTmac       |         +--v--+
+          |      Layer       |            |
+          | .--------------. <------------+
+          | |MAC algorithm | |
+          | `--------------´ |
+          +------------------+
+          |RT ethernet driver|
+          +--------v---------+
+                   |
+              +----v---+
+              |   NIC  |
+              +--------+
+
+RTmac, if loaded, has the exclusive control over transmission of the network
+driver. Every outgoing packet is passed to RTmac which forwards it to the MAC
+discipline. It will decide then when the packets can be sent to the hardware
+driver.
+
+
+
+TDMA - Time Division Multiple Access
+====================================
+
+The TDMA media access control discipline is based on a master/slave hierarchy.
+A network master periodically publishes so-called Synchronisation frames,
+forming elementary cycles. Network participants, including the master, have
+exclusively assigned access windows (time slots) within these cycles, defined
+relatively to the Synchronisation frames. In order to catch potential breakdowns
+of the central master, additional backup masters can be set up which will take
+over sending Synchronistation frames in case of the the primary master failing
+to do so.
+
+A time slot can be used to transmit a single packet of up to a specified maximum
+size. This discipline revision supports flexible assignment of time slots to
+real-time network participants. It is possible to use multiple slots per cycle.
+Furthermore, a slot can be shared between participants by occupying it only
+every Nth cycle. Besides at least one payload slot per participant, slots have
+to be reserved for the Synchronisation frame and, optionally, for one or more
+backup Synchronisation frames. The concrete timing strongly depends on the
+capability of all network participants. Therefore, timing requirements like
+worst case jitters or minimum slot gaps are not statically specified, they can
+be defined individually for every project.
+
+In contrast to earlier TDMA discipline revisions, the slave configuration is
+no longer distributed by the TDMA master. This means that the slaves have to
+be aware of their slot setup before sending any data to a TDMA-managed
+network. Therefore, the required settings either have to be stored on the
+slaves or, if a centralised management is desired, the RTnet configuration
+service RTcfg has to be used (see related documentation for further details).
+
+
+Slot Identification and Selection
+---------------------------------
+
+Time slots carry an internal ID number, unique per participant. These numbers
+are used when determining the slot in which an outgoing packet shall be
+transmitted. The TDMA discipline contains no automatic scheduling mechanism.
+Instead, the sender, i.e. an user or a service, either explicitly provides a
+desired slot ID or a default slot is used.
+
+  Slot ID | Description
+ ---------+-----------------------------------------------------------------
+     0    | default slot for RT; also default NRT slot if slot 1 is missing
+     1    | non-RT slot; if missing, slot 0 is used
+     2    | user slots, used for explicitly scheduled packets
+     :    |
+
+
+Configuration Files
+-------------------
+
+To ease the setup of TDMA-based networks, the rtnet start script is provided
+with the RTnet distribution. It is controlled by a configuration file which is
+typically named rtnet.conf and stored in /etc. By setting the TDMA_MODE in this
+file, the role of the station is set to either "master" or "slave".
+
+Beyond this common parameter, the start script supports two configuration modes
+for TDMA. In the simple mode, only the IPs of all slaves have to listed in
+TDMA_SLAVES, the cycle period has to be provided in TDMA_CYCLE, and the slot
+offset difference must be specified in TDMA_OFFSET. Every station is then
+assigned a single time slot with the ID 0, starting with offset 0 for the
+master node, i.e. the master's payload frame will directly follow the
+Synchronisation frame. Further offsets are calculated by incrementing the
+previous value by TDMA_OFFSET for each further station.
+
+In contrast, the extended mode allows a detailed configuration of every node.
+To enable this mode, a TDMA configuration file (typically /etc/tdma.conf) is
+required. The path of this file has to be provided to rtnet.conf in the
+variable TDMA_CONFIG, while TDMA_SLAVES, TDMA_CYCLE, and TDMA_OFFSET have to
+be disabled, e.g. by commenting out. Beside TDMA-related paramters, also
+individual stage-2 files can be set for every slave node, overwriting the
+common STAGE_2_SRC variable in rtnet.conf (see RTcfg documentation for details
+about the configuration concept). The format of the TDMA configuration file is
+defined as follows:
+
+# Note: every station needs at least one time slot
+master:
+[ip 1.2.3.4]
+cycle <cycle_in_us>
+slot <id> <offset_in_us> [<phasing>/<period> [<size>]]
+[slot ...]
+
+# Slave with known MAC address, IP is assigned by the RTcfg server
+slave:
+ip 1.2.3.4
+mac AA:BB:CC:DD:EE:FF
+[stage2 <file>]
+slot ...
+
+# Slave with unknown MAC address, it is aware of its IP when starting
+slave:
+ip 1.2.3.4
+[stage2 <file>]
+slot ...
+
+# Slave with known MAC address without IP support
+slave:
+mac AA:BB:CC:DD:EE:FF
+[stage2 <file>]
+slot ...
+
+# Note:
+# - multiple backup masters can be set up, always the one with the smallest
+#   backup-slot value will take over in case of a failure
+# - the cycle period is already defined with the primary master
+backup-master:
+ip 1.2.3.4 (or IP+MAC or only MAC, see slave scenarios)
+backup-slot <offset_in_us>
+[stage2 <file>]
+slot ...
+
+
+Configuration Example
+---------------------
+
+An exemplary configuration consisting of two masters, one serving as backup,
+and three slaves is shown below. The slot period is expressed in the form
+<phasing>/<period>. For instance, 1/3 means that this slot will be used in
+every first of three cycles, while 3/3 means in every third of three.
+
+  +------+  +----------+  +---------+  +---------+  +----------+
+  |      |  | Master 2 |  | Slave A |  | Slave B |  | Master 1 |
+  | Sync |  |  Backup  |  | Slot 0  |  | Slot 0  |  |  Slot 0  |
+  |      |  |   Sync   |  | RT/NRT  |  |   RT    |  |  RT/NRT  |
+  | 1/1  |  |   1/1    |  |   1/1   |  |   1/1   |  |   1/1    |
+--+------+--+----------+--+---------+--+---------+--+----------+--...
+
+                                     +----------+
+                                     | Slave C  |
+                                     |  Slot 3  |
+                                     |    RT    |
+                                     |   3/3    |
+     +---------+                     +----------+
+     | Slave C |                     | Master 2 |
+     | Slot 0  |                     |  Slot 0  |
+     | RT/NRT  |                     |  RT/NRT  |
+     |   2/2   |                     |   2/3    |
+     +---------+        +---------+  +----------+             +------+
+     | Slave B |        | Slave C |  |  Slave A |             |      |
+     | Slot 1  |        | Slot 2  |  |  Slot 2  |             | Sync |
+     |   NRT   |        |   NRT   |  |    RT    |             |      |
+     |   1/2   |        |   1/4   |  |   1/3    |             | 1/1  |
+...--+---------+--------+---------+--+----------+-------------+------+-->
+
+A tdma.conf file describing this scenario is shown below (all time values are
+examplary, only expressing relative relations):
+
+# Master 1
+master:
+ip 10.0.0.1
+cycle 5000
+slot 0 800
+
+# Master 2
+backup-master:
+ip 10.0.0.2
+backup-offset 200
+slot 0 1500 2/3
+
+# Slave A
+slave:
+ip 10.0.0.3
+slot 0 400
+slot 2 1500 1/3
+
+# Slave B
+ip 10.0.0.2
+slot 0 600
+slot 1 1000 1/2
+
+# Slave C
+ip 10.0.0.2
+slot 0 1000 2/2
+slot 2 1300 1/4
+slot 3 1500 3/3
+
+
+Management Interface
+--------------------
+
+The TDMA discipline is managed by the command line tool tdmacfg. In the
+following, the usage of this tool is described. For typical setups, the rtnet
+start script manages the execution of tdmacfg.
+
+tdmacfg <dev> master <cycle_period> [-b <backup_offset>]
+        [-c calibration_rounds] [-i max_slot_id] [-m max_calibration_requests]
+
+Starts a TDMA master on the specified device <dev>. The cycle period length is
+given in microseconds using the <cycle_period> parameter. If <backup_offset>
+is provided, the master becomes a backup system. In case the main master
+fails, the backup master with the smallest <backup_offset> will start sending
+Synchronisation frames with the specified offset in microseconds relative to
+the scheduled cycle start. <calibration_rounds> specifies the number of clock
+calibration requests the master will send to any other potentially already
+active master during startup. By default, 100 rounds are performed. The
+calibration will be performed when the first slot is added. By default, a
+master can handle up to 64 calibration requests at the same time. This value
+can be adapted by specifying the <max_calibration_requests> parameter. The
+largest used slot ID is tunable by providing <max_slot_id> or will be limited
+to 7 if this parameter is omitted.
+
+tdmacfg <dev> slave [-c calibration_rounds] [-i max_slot_id]
+
+Starts a TDMA slave on the specified device <dev>. <calibration_rounds>
+specifies the number of clock calibration requests the slave sends to the
+active master during startup. By default, 100 rounds are performed. The
+calibration will be performed when the first slot is added. The largest used
+slot ID is tunable by providing <max_slot_id> or will be limited to 7 if this
+parameter is omitted.
+
+tdmacfg <dev> slot <id> [<offset> [-p <phasing>/<period>] [-s <size>]
+        [-j joint_slot] [-l calibration_log_file] [-t calibration_timeout]]
+
+Adds, reconfigures, or removes a time slot for outgoing data on a started TDMA
+master or slave. <id> is used to distinguish between multiple slots. See above
+slot ID table for predefined values. If <offset> is given, the time slot is
+added or modified to send data with the specified offset in microseconds
+relative to the scheduled cycle start, if omitted, the slot is removed from
+the station's configuration.
+
+By default, a slot will be used in every cycle. When providing <phasing> and
+<period>, the slot will only be occupied in every <phasing>-th of <period>
+cycles. By assigning e.g. 1/2 to one and 2/2 to another slot, the usage of the
+physical time slot will alternate between both slot owners. The <size>
+parameter limits the maximum payload size in bytes which can be transmitted
+within this slot. If no <size> parameter is provided, the maximum size the
+hardware supports is applied. To share the same output queue among several
+slots, secondary slots can be attached to a primary <joint_slot>. The slot
+sizes must match for this purpose.
+
+The addition of the station's first slot will trigger the clock calibration
+process. To store the results of each calibration handshake, a
+<calibration_log_file> can be provided. By default, this command will not
+terminate until the calibration is completed. The <calibration_timeout>
+parameter can be used to specify an upper time limit.
+
+NOTE: Reconfiguring an existing slot during runtime can cause packet drops on
+the involved output channel. You should stop all applications using this slot
+before reconfiguring it.
+
+tdmacfg <dev> detach
+
+Detaches a master or slave from the given devices <dev>. Past this command,
+the write access to the device is uncoordinated again and may interfere with
+remaining real-time network participants.
+
+
+
+NoMAC - Void Media Access Control
+=================================
+
+Formost as a skeleton for new MAC implementations, the NoMAC discipline module
+is provided. It simply forwards every outgoing packet to the driver as soon as
+the stack passes it over. NoMAC is configured using the command line tool
+nomaccfg. To attach NoMAC to a real-time network adapter, call
+
+nomaccfg <dev> attach
+
+To detach it again, use
+
+nomaccfg <dev> detach
+
+
+
+VNIC configuration
+==================
+
+As soon as an RTmac discipline is loaded and appropriately configured for a
+real-time network adapter, a virtual network interface controller (VNIC) is
+provided to standard Linux. It is named "vnic<n>", where <n> is the number of
+the associated rteth device (e.g. rteth1 --> vnic1). You just have to configure
+the VNIC as a normal network device using ifconfig. You are even free to assign
+a different IP than the real-time interface uses.
+
+
+
+References
+==========
+
+ - Real-Time Media Access Control Framework (RTmac), revision 2.0
+ - TDMA Media Access Control Discipline, revision 2.1a
+ - RTnet Configuration Service (RTcfg), revision 1.7
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtnetproxy b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtnetproxy
new file mode 100644
index 0000000..647cdb6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.rtnetproxy
@@ -0,0 +1,74 @@
+README.rtnetproxy
+===================
+08-Nov-2002, Mathias Koehrer <mathias_koehrer@yahoo.de>
+02-May-2008, Wolfgang Grandegger <wg@grandegger.com>
+
+
+RTnetproxy can be used to share a single network adapter for both - realtime
+and non-realtime ethernet traffic. TCP/IP, UDP and ARP can be used via RTnet
+(of course not in realtime!)
+
+RTnetproxy represents a network device to standard Linux and can be used
+as any other Linux network device (ifconfig for configuration), the name
+the network device is "rtproxy".
+
+Setup:
+--------
+Get your RTnet working first! All IP addresses you are interested in have
+to be set via "rtifconfig ethX route solicit IP_ADDRESS"!
+
+     insmod rtnetproxy.o
+
+Now, you have a network device "rtproxy" ready to be used with Linux.
+Configure this network device using "ifconfig":
+
+Example:
+
+    ifconfig rtproxy up 192.168.10.10 netmask 255.255.255.0
+
+That's it!
+
+Configuration options:
+------------------------
+--enable-proxy: this enables RTnetproxy support, which is by default
+    restricted to IP-based protocols (TCP/IP!!!). Incoming frames from
+    ICMP are interpreted directly by RTnet and are not forwarded to the
+    RTnetproxy. UDP packets are forwarded if they are not requested by
+    an RTnet application.
+
+--enable-proxy-arp: this option enables ARP support for the rtproxy Linux
+    network device. Incoming ARP replys are delivered to both, the RTnet
+    and the Linux network stack. The rtproxy then gets attached to the
+    corresponding RTnet device, rteth0 by default.
+
+--disable-icmp: this option disables the RTnet IPv4 ICMP support. ICMP
+    will then be handled by the Linux network stack via the rtproxy Linux
+    network device.
+
+Important note:
+-----------------
+It is highly recommended to strictly separate realtime LAN traffic and non-
+realtime LAN traffic. For a configuration/setup phase, TCP/IP is sometimes
+very useful, buf for realtime data exchange the LAN should be reserved for
+the realtime traffic using UDP!
+
+
+How it works internally:
+--------------------------
+RTnetproxy works on top of RTnet.
+All data to be sent out or received is actually copied between RTnet and
+RTnetproxy => The performance is not as good as with the standard Linux
+network drivers.
+All incoming IPv4 frames, having a IP protocol ID that is not handled by
+RTnet are passed to RTnetproxy.
+Incoming frames, that are passed to RTnetproxy (TCP frames) slow down the
+realtime stuff a little bit - as all this is done in realtime mode context!
+
+
+Possible enhancements:
+-----------------------
+Pass incoming frames to RTnetproxy not only by checking the protocol ID but
+by actual checking, if a certain frame has been processed by RTnet or not.
+This leads to a couple of changes in the RTnet implementation...
+
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.tcp b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.tcp
new file mode 100644
index 0000000..73ccd8d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/README.tcp
@@ -0,0 +1,52 @@
+TCP implementation for RTnet
+----------------------------
+  The goal of implementing the TCP support for RTnet is to allow talking
+  to non-RTnet devices that only expose their features via a TCP interface.
+  TCP remains tricky to use under real-time constraints, so it may not be
+  the first choice when designing both sides from scratch.
+
+  TCP is described in 130 RFCs (about 30 of them are already outdated)
+  for the last 30 years, and this complicates a complex enough stack
+  by itself.
+
+  To keep things simple, some features are lacked, and some of them
+  could be improved. Below is a short list of misfeatures and features
+  in wishlist.
+
+  *) PSH and URG packet flags are ignored and do not influence stack
+     or application behaviour.
+  *) All TCP packet options like MSS or window scaling are not parsed
+     in input packets and not generated.
+  *) The TCP stack is implemented with so known silly window syndrome
+     (see RFC 813 for details). In two words, SWS is a degeneration in
+     the throughput which develops over time, during a long data
+     transfer. Eventually it is not a challenging task to remove this
+     misfeature, but as for now it is present in the
+     implementation. If your application uses short TCP transfers, you
+     won't notice any discomfort, but if you would like to develop a
+     FTP or HTTP server over RTnet TCP, remember about this warning.
+  *) Server part of the stack is implemented in embryonic phase, so it
+     is even possible to create one server connection in non-POSIX
+     compilant way, but not more. Server connection socket descriptor
+     is the same, that you _pass_ to accept(), not _returned_ by the
+     call. As a consequence, listen() connection queue is not
+     implemented because of no further use.
+  *) Half closed connections, i. e. entered by shutdown() calls, are
+     not implemented.
+  *) sendmsg() and recvmsg() functions accept only one-element io
+     vertors.
+  *) Referencing to BSD code, anyone can find up to seven timers
+     related to every connection. In RTnet implementation it was
+     decided to exploit the idea of timerwheel data structure to
+     manage the only one timer for a connection - a packet
+     retransmission timer. It is possible to use timerwheels for
+     developing other kind of timers in price of one additional thread
+     in the stack for one kind of timers.
+     To simplify stack logic timers are missed for RTO, connection
+     establishment (retransmission timer is reused), delayed ACK,
+     persist timer, keepalive timer (half-implemented), FIN_WAIT_2 and
+     TIME_WAIT timers.
+  *) In comparison with Berkeley sockets lots of socket options are
+     not implemented. For now only SO_SNDTIMEO is implemented, and
+     SO_KEEPALIVE is half-implemented
+  *) TCP congestion avoidance is not covered at all.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTcfg.spec b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTcfg.spec
new file mode 100644
index 0000000..0745aab
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTcfg.spec
@@ -0,0 +1,469 @@
+                     RTnet Configuration Service (RTcfg)
+                     ===================================
+
+                                Revision: 1.8
+
+
+RTcfg is a configuration service for setting up a RTnet network and
+distributing additional user-defined configuration data. This document
+describes the protocol and the user interface of RTcfg.
+
+
+
+Sequence Diagram
+================
+
+Normal Startup
+--------------
+
+Configuration                                                   Existing
+   Server                              New Client                Client
+      |                                     |                      |
+      |       Client Config, Stage 1        |                      |
+      |  (unicast/broadcast, single frame)  |   (if broadcasted)   |
+      |-----------------------------------> | -------------------->|
+      |                                    +-+                     |
+      |                                    | |                     |
+      |                                    | |   Set               |
+      |                                    | | Config 1            |
+      |                                    | |                     |
+      |                                    +-+                     |
+      .                                     .                      .
+      .                                     .                      .
+      |         Announce (broadcast)        |                      |
+      | <-----------------------------------|--------------------> |
+     +-+                                    |                     +-+
+     | |                                    |                     | |
+     | |                                    |                     | | Update
+     | |                                    |                     | | Tables
+     | |                                    |                     | |
+     | |                                    |  Announce (unicast) +-+
+     | | Update                             | <------------------- |
+     | | Tables                            +-+                     |
+     | |                                   | |                     |
+     | |                                   | | Update              |
+     | |                                   | | Tables              |
+     | |                                   | |                     |
+     | |      Client Config, Stage 2       +-+                     |
+     +-+    (unicast, multiple frames)      |                      |
+      | ----------------------------------> |                      |
+      |                                    +-+                     |
+      |                                    | |                     |
+      |                                    | | Receive             |
+      |                                    | | Config 2            |
+      |                                    | |                     |
+      |     Acknowledge Config (unicast)   +-+                     |
+      |<----------------------------------- |                      |
+      |                                     |                      |
+      |                                    +-+                     |
+      |                                    | |                     |
+      |                                    | | Process             |
+      |                                    | | Config 2            |
+      |                                    | |                     |
+      |                                    +-+                     |
+      .                                     .                      .
+      .                                     .                      .
+      |          Ready (broadcast)          |                      |
+      |<------------------------------------|--------------------->|
+      .                                     .                      .
+      .                                     .                      .
+      |          Ready (broadcast)          |                      |
+      |------------------------------------>|--------------------->|
+      |                                     |                      |
+
+
+
+Normal Operation
+----------------
+
+Configuration
+   Server                                Client A               Client B
+      |         Heartbeat (unicast)         |                      |
+      |<------------------------------------|                      |
+      |                                     |                      |
+      .                                     .                      .
+      .                                     .                      .
+      |         Heartbeat (unicast)         |                      |
+      |<-----------------------------------------------------------|
+      |                                     |                      |
+
+
+
+Failing Client
+--------------
+
+Configuration
+   Server                                Client A               Client B
+      |                                     |                      |
+     +-+                                    |                      |
+     | |                                    |                      |
+     | | Missing                            |                      |
+     | | Heartbeat                          |                      |
+     | | Detection                          |                      |
+     | |                                    |                      |
+     +-+      Dead Station (broadcast)      |                      |
+      | ----------------------------------> | -------------------> |
+     +-+                                   +-+                    +-+
+     | |                                   | |                    | |
+     | | Update                            | | Update             | | Update
+     | | Tables                            | | Tables             | | Tables
+     | |                                   | |                    | |
+     +-+                                   +-+                    +-+
+      |                                     |                      |
+
+
+
+Server Restart
+--------------
+
+Configuration                            Running                Running
+   Server                                Client A               Client B
+      |                                     |                      |
+      |       Client Config, Stage 1        |                      |
+      |  (unicast/broadcast, single frame)  |   (if broadcasted)   |
+      |-----------------------------------> | -------------------->|
+      |                                    +-+                     |
+      |                                    | |                     |
+      |                                    | | Receive             |
+      |                                    | | Config 1            |
+      |                                    | |                     |
+      |         Announce (unicast)         +-+                     |
+      |<----------------------------------- |                      |
+     +-+                                   +-+                     |
+     | |                                   | |                     |
+     | | Update                            | | Update              |
+     | | Client Status                     | | Server Address      |
+     | | and Tables                        | | and Tables          |
+     | |                                   | |                     |
+     +-+                                   +-+                     |
+      |                                     |                      |
+
+Note: The configuration of a restarted or replace server must not differ from
+      the configuration the currently running clients originally received. The
+      only exception are the servers physical and logical addresses.
+
+
+
+Frame Formats
+=============
+
+RTcfg frames are identified by the hexadecimal Ethernet type 9022. All frame
+fields are encoded in network order (big endian). The first field consists of
+an identification byte as illustrated below. Currently, the version bits are
+zero in all frames, but they must be ignored in order to remain compatible
+with possible future extensions.
+
+ +---------------+------------------------+
+ |  Bits 7 - 5   |       Bits 4 - 0       |
+ | Frame Version |    Frame Identifier    |
+ +---------------+------------------------+
+
+When using RTmac, the lowest real-time priority is applied to RTcfg frames.
+
+
+
+Stage 1 Configuration Frame
+---------------------------
+
+ +----------+----------------+----------------+----------------+ - -
+ |  ID: 0   | Client Address | Client Address | Server Address |
+ | (1 byte) |  Type (1 byte) |   (variable)   |   (variable)   |
+ +----------+----------------+----------------+----------------+ - -
+  - - +---------------+-----------------+-----------------+
+      | Stage 2 Burst |  Configuration  |  Configuration  |
+      | Rate (1 Byte) | Length (2 bytes)| Data (variable) |
+  - - +---------------+-----------------+-----------------+
+
+The overall frame length must not be greater than the MTU of the network
+interface (typical: 1500 bytes). It might be limited by the installed RTmac
+discipline.
+
+Valid address types are:
+
+  Symbolic Name    | Value | Address Length [Bytes per Field]
+ ------------------+-------+----------------------------------
+   RTCFG_ADDR_MAC  |   0   |                0
+   RTCFG_ADDR_IP   |   1   |                4
+   <extensible>    |  ...  |               ...
+
+Stage 1 Configuration frames are sent as unicast when either only physical
+client addresses are used (RTCFG_MAC), or when the linkage of physical and
+logical (e.g. RTCFG_ADDR_IP) address is known. In any other case the frames
+are broadcasted to all stations.
+
+The Stage 2 Burst Rate field specifies the number of stage 2 configuration
+frames the server is able to send without receiving an Acknowledge
+Configuration frame. See below for the handshake mechanism to determine the
+actual burst rate.
+
+The configuration data of the first stage typically consists of parameters (or
+even shell commands) which are required for the new client to become part of
+an RTmac-managed network. If no data is available for this stage (e.g. when
+RTmac is not used), the server sets the Configuration Length field to zero.
+
+
+
+Announcement Frames
+-------------------
+
+New Announcement Frame:
+ +----------+----------------+----------------+----------+---------------+
+ |  ID: 1   | Client Address | Client Address |  Flags   | Stage 2 Burst |
+ | (1 byte) |  Type (1 byte) |   (variable)   | (1 byte) | Rate (1 byte) |
+ +----------+----------------+----------------+----------+---------------+
+
+Reply Announcement Frame:
+ +----------+----------------+----------------+----------+---------------+
+ |  ID: 2   | Client Address | Client Address |  Flags   | Padding Field |
+ | (1 byte) |  Type (1 byte) |   (variable)   | (1 byte) |   (1 byte)    |
+ +----------+----------------+----------------+----------+---------------+
+
+See "Stage 1 Configuration Frame" for valid address types and lengths.
+
+New Announcement frames are sent as broadcast so that every other client can
+update its ARP and routing table appropriately. In contrast, the Reply
+Announcement frame is sent directly to the new client. A Reply Announcement
+frame is also sent to the server if a client received a Stage 1 Configuration
+frame while already being in operation mode. This occurs when the server is
+restarted or replaced after a failure.
+
+Flags are encoded as follows:
+
+  Bit Number | Interpretation if set
+ ------------+---------------------------------------------------------------
+       0     | requests available stage 2 configuration data from the server
+       1     | client is ready (i.e. will not send an explicit Ready frame)
+      2-7    | <reserved>
+
+Furthermore, the client reports its own Stage 2 Burst Rate back to the server.
+The minimum of the server and the client value is selected as the actual burst
+rate. After the server has send the according number of Stage 2 Configuration
+frames, it will wait for an Acknowledge Configuration frame from the client.
+
+
+
+Stage 2 Configuration Frames
+----------------------------
+
+Initial Frame:
+ +----------+----------+-----------------+------------------+ - -
+ |  ID: 3   |  Flags   | Active Stations | Heartbeat Period |
+ | (1 byte) | (1 byte) |    (4 bytes)    |    (2 bytes)     |
+ +----------+----------+-----------------+------------------+ - -
+  - - +----------------------+--------------------+
+      | Configuration Length | Configuration Data |
+      |      (4 bytes)       |     (variable)     |
+  - - +----------------------+--------------------+
+
+Subsequent Fragments:
+ +----------+-----------------+--------------------+
+ |  ID: 4   | Fragment Offset | Configuration Data |
+ | (1 byte) |    (4 bytes)    |     (variable)     |
+ +----------+-----------------+--------------------+
+
+The maximum length of a fragment is determined by the available MTU.
+
+Stage 2 Configuration frames are always sent as unicast.
+
+The Active Stations field contains the number of currently running stations,
+including the server, but excluding the new client. This number is used be the
+client to detect when all other clients have sent their Reply Announcement
+frames, and when all stations have reported to be ready.
+
+If the heartbeat mechanism shall be enabled on the new client, the Heartbeat
+Period field contains the client's period in milliseconds for sending Heartbeat
+frames. Otherwise it is set to zero.
+
+Flags are encoded as follows:
+
+  Bit Number | Interpretation if set
+ ------------+---------------------------------------------------------------
+       0     | <reserved>
+       1     | server is ready (i.e. will not send an explicit Ready frame)
+      2-7    | <reserved>
+
+The second configuration stage can be used to distribute user-defined
+configurations, applications, etc. (e.g. by sending a tar archive). If no
+data is available for this stage, the server sets the Configuration Length
+field to zero.
+
+
+
+Acknowledge Configuration Frames
+--------------------------------
+
+ +----------+--------------------+
+ |  ID: 5   | Acknowledge Length |
+ | (1 byte) |     (4 bytes)      |
+ +----------+--------------------+
+
+An Acknowledge Configuration frame is sent by a new client after it has either
+received the number of Stage 2 Configuration frames specified by the negotiated
+burst rate (see above), or the last expected Stage 2 Configuration frame has
+arrived.
+
+The Acknowledge Length field is set to the number of yet successfully received
+bytes. If the client has detected an inconsistent fragment, this number only
+reflects the amount of data which was correctly received. The server will then
+continue the Stage 2 Configuration frame transmission according to the
+specified offset.
+
+
+
+Ready Frame
+-----------
+
+ +----------+
+ |  ID: 6   |
+ | (1 byte) |
+ +----------+
+
+After a station has finished its setup procedures, it signals this state to all
+other stations by sending a Ready frame as broadcast. This allows the server
+and the clients to synchronise the completion of their configuration phase. The
+frame is not sent if the client has already set the Ready Bit in its New
+Announcement frame.
+
+
+
+Heartbeat Frame
+---------------
+
+ +----------+
+ |  ID: 7   |
+ | (1 byte) |
+ +----------+
+
+Every client has to send Heartbeat frames within the period specified in the
+Stage 2 Configuration frame as unicast to the server.
+
+
+
+Dead Station Frame
+------------------
+
+ +----------+----------------+--------------------+--------------------+
+ |  ID: 8   | Client Address |   Logical Client   |  Physical Client   |
+ | (1 byte) |  Type (1 byte) | Address (variable) | Address (32 bytes) |
+ +----------+----------------+--------------------+--------------------+
+
+See "Stage 1 Configuration Frame" for valid address types and lengths.
+
+When the server detects that a client failed to send a heartbeat frame within
+the specified maximum period, it broadcasts a Dead Station frame to all other
+clients. Every station will then remove the corresponding entries from its ARP
+and routing tables.
+
+
+
+Management Tool
+===============
+
+NOTE: The following specifications are OPTIONAL. They describe the internal
+      realisation of RTcfg as applied to the implementation in RTnet.
+
+The RTcfg server and client functionality is controlled by the command line
+tool rtcfg.
+
+
+
+Server Commands
+---------------
+
+rtcfg <dev> server [-p period] [-b burstrate] [-h <heartbeat>]
+      [-t <threshold>] [-r]
+
+Starts a RTcfg server for the specified device <dev>. The server then sends
+every 1000 ms stage 1 configuration frames to new clients. <period> (in
+milliseconds) can be used to override the interval value. The number of
+clients invited within one period is controlled by <burstrate> (default: 4).
+This value also defines the number of stage 2 configuration fragments the
+server should send as far as the client supports it (see also "announce").
+<heartbeat> specifies the Heartbeat period of the clients in milliseconds
+(default: 1000 ms), the value 0 turns the heartbeat mechanism off. <threshold>
+sets the number of missing heartbeats after which a client shall be considered
+dead (default: 2). If -r is given, the server automatically reports to be
+ready within its stage 1 configuration frame, thus disengading it from issuing
+an explicite "ready" command.
+
+rtcfg <dev> add <address> [-hw <hw_address>] [-stage1 <stage1_file>]
+      [-stage2 <stage2_file>] [-t <timeout>]
+
+Adds a client to the server's list of potential participants of the network
+connected to the specified device <dev>. <address> can be either an IP address
+(A.B.C.D) or a physical address (AA:BB:CC:DD:EE:FF). If a physical address is
+explicitely assigned using <hw_address>, the <address> parameter must define
+the client's IP address. Optionally, files can specified which will be passed
+during the different configuration stages. If <stage1_file> is "-", rtcfg will
+read the stage 1 data from standard input. <timeout> (in milliseconds) defines
+the internal timeout after which a half-finished client configuration is reset
+to its initial state again. By default this reset is never performed.
+
+rtcfg <dev> del <address>
+
+Removes a client from the list of network participants. See above for details
+about the address format.
+
+rtcfg <dev> wait [-t <timeout>]
+
+Waits until both configuration stages for all clients in the server's list are
+completed. If <timeout> (in milliseconds) is given, rtcfg will return an error
+code when the configuration cannot be completed within the specified time. The
+default timeout is infinite.
+
+rtcfg <dev> ready [-t <timeout>]
+
+Reports that the server has completed its setup, generally including the RTmac
+startup phase, and waits until all other stations are reporting to be ready as
+well. If <timeout> (in milliseconds) is given, rtcfg will return an error code
+when the synchronisation cannot be completed within the specified time. The
+default timeout is infinite.
+
+rtcfg <dev> detach
+
+Stops the RTcfg client on the specified device <dev>. Afterwards, the device
+can be re-configured to act as server or client.
+
+
+
+Client Commands
+---------------
+
+rtcfg <dev> client [-t <timeout>] [-c|-f <stage1_file>] [-m maxstations]
+
+Waits until the first configuration stage is completed for the device <dev>.
+If <timeout> (in milliseconds) is given, rtcfg will return an error code when
+the configuration cannot be completed within the specified time. The default
+timeout is infinite. The incoming configuration data is either send to the
+standard output if -c is given or to <stage1_file> if specified. By default
+clients can synchronise with up to 32 other stations (including the server).
+This limit can be modified using the <maxstations> parameter.
+
+rtcfg <dev> announce [-t <timeout>] [-c|-f <stage2_file>] [-b burstrate] [-r]
+
+Sends an New Announcement frame over the device <dev> and waits until this
+second configuration stage is completed. If <timeout> (in milliseconds) is
+given, rtcfg will return an error code when the configuration cannot be
+completed within the specified time. The default timeout is infinite. If -c or
+-f is given, stage 2 configuration data is requested and either send to the
+standard output or to <stage2_file>. <burstrate> controls the number of stage 2
+configuration fragments the client should accept (default: 4). The actual
+amount is negotiated according to both the client's and the server's capability
+(see also "server"). If -r is given, the client automatically reports to be
+ready within its announcement frame, thus disengading it from issuing an
+explicite "ready" command.
+
+rtcfg <dev> ready [-t <timeout>]
+
+Reports that the client has completed its setup and waits until all other
+stations are reporting to be ready as well. If <timeout> (in milliseconds) is
+given, rtcfg will return an error code when the synchronisation cannot be
+completed within the specified time. The default timeout is infinite.
+
+rtcfg <dev> detach
+
+Stops the RTcfg client on the specified device <dev>. Afterwards, the device
+can be re-configured to act as server or client.
+
+
+2003-2005, Jan Kiszka <jan.kiszka-at-web.de>
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTmac.spec b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTmac.spec
new file mode 100644
index 0000000..4e22547
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTmac.spec
@@ -0,0 +1,44 @@
+               Real-Time Media Access Control Framework (RTmac)
+               ================================================
+
+                                Revision: 2.0
+
+
+This document describes the protocol header of the real-time media access
+control framework for RTnet. The actual control protocol is implemented by
+so-called disciplines, see related specifications.
+
+
+
+Frame Format
+============
+
+RTmac frames are identified by the hexadecimal Ethernet type 0x9021. All frame
+fields are encoded in network order (big endian). The version identifier of
+the RTmac header shall only be changed if the format becomes incompatible to
+the previous revision. Currently, this version field contains the hexadecimal
+value 0x02.
+
+
+
+RTmac Frame Header
+------------------
+
+ +----------------------+---------------+---------------+
+ |         Type         | Version: 0x02 |     Flags     |
+ |      (2 bytes)       |   (1 byte)    |    (1 byte)   |
+ +----------------------+---------------+---------------+
+
+Depending on the tunnelling flag, the type field either contains the
+identifier of the succeeding discipline frame or the Ethernet type of a
+tunnelled non-real-time packet introduced by this header.
+
+Flags are encoded as follows:
+
+  Bit Number | Interpretation if set
+ ------------+---------------------------------------------------------------
+       0     | tunnelling frame if set, otherwise discipline frame
+      1-7    | <reserved>
+
+
+2004, Jan Kiszka <jan.kiszka-at-web.de>
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTnet.oxy b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTnet.oxy
new file mode 100644
index 0000000..7db150f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/RTnet.oxy
@@ -0,0 +1,1150 @@
+# If you run "doxygen RTnet.oxy", a subdirectory RTnet.oxy.html will
+# be generated, where you can find the overview about RTnet.
+#
+# This overview is not meant as reference documentation for users but
+# as overview over the data structures of RTnet for developers.
+#
+# Doxygen requires the package graphviz to generate the graphics.
+
+# Doxyfile 1.3.7
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project
+#
+# All text after a hash (#) is considered a comment and will be ignored
+# The format is:
+#       TAG = value [value, ...]
+# For lists items can also be appended using:
+#       TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ")
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded 
+# by quotes) that should identify the project.
+
+PROJECT_NAME           = RTnet
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number. 
+# This could be handy for archiving the generated documentation or 
+# if some version control system is used.
+
+PROJECT_NUMBER         =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute) 
+# base path where the generated documentation will be put. 
+# If a relative path is entered, it will be relative to the location 
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY       = 
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create 
+# 2 levels of 10 sub-directories under the output directory of each output 
+# format and will distribute the generated files over these directories. 
+# Enabling this option can be useful when feeding doxygen a huge amount of source 
+# files, where putting all generated files in the same directory would otherwise 
+# cause performance problems for the file system.
+
+CREATE_SUBDIRS         = yes
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all 
+# documentation generated by doxygen is written. Doxygen will use this 
+# information to generate all constant output in the proper language. 
+# The default language is English, other supported languages are: 
+# Brazilian, Catalan, Chinese, Chinese-Traditional, Croatian, Czech, Danish, Dutch, 
+# Finnish, French, German, Greek, Hungarian, Italian, Japanese, Japanese-en 
+# (Japanese with English messages), Korean, Korean-en, Norwegian, Polish, Portuguese, 
+# Romanian, Russian, Serbian, Slovak, Slovene, Spanish, Swedish, and Ukrainian.
+
+OUTPUT_LANGUAGE        = English
+
+# This tag can be used to specify the encoding used in the generated output. 
+# The encoding is not always determined by the language that is chosen, 
+# but also whether or not the output is meant for Windows or non-Windows users. 
+# In case there is a difference, setting the USE_WINDOWS_ENCODING tag to YES 
+# forces the Windows encoding (this is the default for the Windows binary), 
+# whereas setting the tag to NO uses a Unix-style encoding (the default for 
+# all platforms other than Windows).
+
+USE_WINDOWS_ENCODING   = NO
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will 
+# include brief member descriptions after the members that are listed in 
+# the file and class documentation (similar to JavaDoc). 
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC      = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend 
+# the brief description of a member or function before the detailed description. 
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the 
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF           = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator 
+# that is used to form the text in various listings. Each string 
+# in this list, if found as the leading text of the brief description, will be 
+# stripped from the text and the result after processing the whole list, is used 
+# as the annotated text. Otherwise, the brief description is used as-is. If left 
+# blank, the following values are used ("$name" is automatically replaced with the 
+# name of the entity): "The $name class" "The $name widget" "The $name file" 
+# "is" "provides" "specifies" "contains" "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF       = 
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then 
+# Doxygen will generate a detailed section even if there is only a brief 
+# description.
+
+ALWAYS_DETAILED_SEC    = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all inherited 
+# members of a class in the documentation of that class as if those members were 
+# ordinary class members. Constructors, destructors and assignment operators of 
+# the base classes will not be shown.
+
+INLINE_INHERITED_MEMB  = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full 
+# path before files name in the file list and in the header files. If set 
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES        = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag 
+# can be used to strip a user-defined part of the path. Stripping is 
+# only done if one of the specified strings matches the left-hand part of 
+# the path. The tag can be used to show relative paths in the file list. 
+# If left blank the directory from which doxygen is run is used as the 
+# path to strip.
+
+STRIP_FROM_PATH        = 
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of 
+# the path mentioned in the documentation of a class, which tells 
+# the reader which header file to include in order to use a class. 
+# If left blank only the name of the header file containing the class 
+# definition is used. Otherwise one should specify the include paths that 
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH    = 
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter 
+# (but less readable) file names. This can be useful is your file systems 
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES            = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen 
+# will interpret the first line (until the first dot) of a JavaDoc-style 
+# comment as the brief description. If set to NO, the JavaDoc 
+# comments will behave just like the Qt-style comments (thus requiring an 
+# explicit @brief command for a brief description.
+
+JAVADOC_AUTOBRIEF      = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen 
+# treat a multi-line C++ special comment block (i.e. a block of //! or /// 
+# comments) as a brief description. This used to be the default behaviour. 
+# The new default is to treat a multi-line C++ comment block as a detailed 
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the DETAILS_AT_TOP tag is set to YES then Doxygen 
+# will output the detailed description near the top, like JavaDoc.
+# If set to NO, the detailed description appears after the member 
+# documentation.
+
+DETAILS_AT_TOP         = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented 
+# member inherits the documentation from any documented member that it 
+# re-implements.
+
+INHERIT_DOCS           = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC 
+# tag is set to YES, then doxygen will reuse the documentation of the first 
+# member in the group (if any) for the other members of the group. By default 
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC   = NO
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab. 
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE               = 8
+
+# This tag can be used to specify a number of aliases that acts 
+# as commands in the documentation. An alias has the form "name=value". 
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to 
+# put the command \sideeffect (or @sideeffect) in the documentation, which 
+# will result in a user-defined paragraph with heading "Side Effects:". 
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES                = 
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C sources 
+# only. Doxygen will then generate output that is more tailored for C. 
+# For instance, some of the names that are used will be different. The list 
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C  = yes
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java sources 
+# only. Doxygen will then generate output that is more tailored for Java. 
+# For instance, namespaces will be presented as packages, qualified scopes 
+# will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA   = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of 
+# the same type (for instance a group of public functions) to be put as a 
+# subgroup of that type (e.g. under the Public Functions section). Set it to 
+# NO to prevent subgrouping. Alternatively, this can be done per class using 
+# the \nosubgrouping command.
+
+SUBGROUPING            = YES
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in 
+# documentation are documented, even if no documentation was available. 
+# Private class members and static file members will be hidden unless 
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL            = yes
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class 
+# will be included in the documentation.
+
+EXTRACT_PRIVATE        = NO
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file 
+# will be included in the documentation.
+
+EXTRACT_STATIC         = NO
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs) 
+# defined locally in source files will be included in the documentation. 
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES  = YES
+
+# This flag is only useful for Objective-C code. When set to YES local 
+# methods, which are defined in the implementation section but not in 
+# the interface are included in the documentation. 
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS  = NO
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all 
+# undocumented members of documented classes, files or namespaces. 
+# If set to NO (the default) these members will be included in the 
+# various overviews, but no documentation section is generated. 
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS     = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all 
+# undocumented classes that are normally visible in the class hierarchy. 
+# If set to NO (the default) these classes will be included in the various 
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES     = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all 
+# friend (class|struct|union) declarations. 
+# If set to NO (the default) these declarations will be included in the 
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS  = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any 
+# documentation blocks found inside the body of a function. 
+# If set to NO (the default) these blocks will be appended to the 
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS      = NO
+
+# The INTERNAL_DOCS tag determines if documentation 
+# that is typed after a \internal command is included. If the tag is set 
+# to NO (the default) then the documentation will be excluded. 
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS          = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate 
+# file names in lower-case letters. If set to YES upper-case letters are also 
+# allowed. This is useful if you have classes or files whose names only differ 
+# in case and if your file system supports case sensitive file names. Windows 
+# users are advised to set this option to NO.
+
+CASE_SENSE_NAMES       = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen 
+# will show members with their full class and namespace scopes in the 
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES       = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen 
+# will put a list of the files that are included by a file in the documentation 
+# of that file.
+
+SHOW_INCLUDE_FILES     = YES
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline] 
+# is inserted in the documentation for inline members.
+
+INLINE_INFO            = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen 
+# will sort the (detailed) documentation of file and class members 
+# alphabetically by member name. If set to NO the members will appear in 
+# declaration order.
+
+SORT_MEMBER_DOCS       = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the 
+# brief documentation of file, namespace and class members alphabetically 
+# by member name. If set to NO (the default) the members will appear in 
+# declaration order.
+
+SORT_BRIEF_DOCS        = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be 
+# sorted by fully-qualified names, including namespaces. If set to 
+# NO (the default), the class list will be sorted only by class name, 
+# not including the namespace part. 
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the 
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME     = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or 
+# disable (NO) the todo list. This list is created by putting \todo 
+# commands in the documentation.
+
+GENERATE_TODOLIST      = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or 
+# disable (NO) the test list. This list is created by putting \test 
+# commands in the documentation.
+
+GENERATE_TESTLIST      = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or 
+# disable (NO) the bug list. This list is created by putting \bug 
+# commands in the documentation.
+
+GENERATE_BUGLIST       = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or 
+# disable (NO) the deprecated list. This list is created by putting 
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional 
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS       = 
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines 
+# the initial value of a variable or define consists of for it to appear in 
+# the documentation. If the initializer consists of more lines than specified 
+# here it will be hidden. Use a value of 0 to hide initializers completely. 
+# The appearance of the initializer of individual variables and defines in the 
+# documentation can be controlled using \showinitializer or \hideinitializer 
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES  = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated 
+# at the bottom of the documentation of classes and structs. If set to YES the 
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES        = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated 
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET                  = NO
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are 
+# generated by doxygen. Possible values are YES and NO. If left blank 
+# NO is used.
+
+WARNINGS               = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings 
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will 
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED   = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for 
+# potential errors in the documentation, such as not documenting some 
+# parameters in a documented function, or documenting parameters that 
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR      = YES
+
+# The WARN_FORMAT tag determines the format of the warning messages that 
+# doxygen can produce. The string should contain the $file, $line, and $text 
+# tags, which will be replaced by the file and line number from which the 
+# warning originated and the warning text.
+
+WARN_FORMAT            = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning 
+# and error messages should be written. If left blank the output is written 
+# to stderr.
+
+WARN_LOGFILE           = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain 
+# documented source files. You may enter file names like "myfile.cpp" or 
+# directories like "/usr/src/myproject". Separate the files or directories 
+# with spaces.
+
+INPUT                  = ..
+
+# If the value of the INPUT tag contains directories, you can use the 
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank the following patterns are tested: 
+# *.c *.cc *.cxx *.cpp *.c++ *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh *.hxx *.hpp 
+# *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm
+
+FILE_PATTERNS          = 
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories 
+# should be searched for input files as well. Possible values are YES and NO. 
+# If left blank NO is used.
+
+RECURSIVE              = YES
+
+# The EXCLUDE tag can be used to specify files and/or directories that should 
+# excluded from the INPUT source files. This way you can easily exclude a 
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE                = 
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or directories 
+# that are symbolic links (a Unix filesystem feature) are excluded from the input.
+
+EXCLUDE_SYMLINKS       = NO
+
+# If the value of the INPUT tag contains directories, you can use the 
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude 
+# certain files from those directories.
+
+EXCLUDE_PATTERNS       = 
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or 
+# directories that contain example code fragments that are included (see 
+# the \include command).
+
+EXAMPLE_PATH           = 
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the 
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp 
+# and *.h) to filter out the source-files in the directories. If left 
+# blank all files are included.
+
+EXAMPLE_PATTERNS       = 
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be 
+# searched for input files to be used with the \include or \dontinclude 
+# commands irrespective of the value of the RECURSIVE tag. 
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE      = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or 
+# directories that contain image that are included in the documentation (see 
+# the \image command).
+
+IMAGE_PATH             = 
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should 
+# invoke to filter for each input file. Doxygen will invoke the filter program 
+# by executing (via popen()) the command <filter> <input-file>, where <filter> 
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an 
+# input file. Doxygen will then use the output that the filter program writes 
+# to standard output.
+
+INPUT_FILTER           = 
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using 
+# INPUT_FILTER) will be used to filter the input files when producing source 
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES    = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will 
+# be generated. Documented entities will be cross-referenced with these sources. 
+# Note: To get rid of all source code in the generated output, make sure also 
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER         = yes
+
+# Setting the INLINE_SOURCES tag to YES will include the body 
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES         = yes
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct 
+# doxygen to hide any special comment blocks from generated source code 
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS    = YES
+
+# If the REFERENCED_BY_RELATION tag is set to YES (the default) 
+# then for each documented function all documented 
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES (the default) 
+# then for each documented function all documented entities 
+# called/used by that function will be listed.
+
+REFERENCES_RELATION    = YES
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen 
+# will generate a verbatim copy of the header file for each class for 
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS       = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index 
+# of all compounds will be generated. Enable this if the project 
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX     = yes
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then 
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns 
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX    = 5
+
+# In case all classes in a project start with a common prefix, all 
+# classes will be put under the same header in the alphabetical index. 
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that 
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX          = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will 
+# generate HTML output.
+
+GENERATE_HTML          = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT            = RTnet.oxy.html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for 
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank 
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION    = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard header.
+
+HTML_HEADER            = 
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for 
+# each generated HTML page. If it is left blank doxygen will generate a 
+# standard footer.
+
+HTML_FOOTER            = 
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading 
+# style sheet that is used by each HTML page. It can be used to 
+# fine-tune the look of the HTML output. If the tag is left blank doxygen 
+# will generate a default style sheet. Note that doxygen will try to copy 
+# the style sheet file to the HTML output directory, so don't put your own 
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET        = 
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes, 
+# files or namespaces will be aligned in HTML using tables. If set to 
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS     = YES
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files 
+# will be generated that can be used as input for tools like the 
+# Microsoft HTML help workshop to generate a compressed HTML help file (.chm) 
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP      = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can 
+# be used to specify the file name of the resulting .chm file. You 
+# can add a path in front of the file if the result should not be 
+# written to the html output directory.
+
+CHM_FILE               = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can 
+# be used to specify the location (absolute path including file name) of 
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run 
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION           = 
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag 
+# controls if a separate .chi index file is generated (YES) or that 
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI           = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag 
+# controls whether a binary table of contents is generated (YES) or a 
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC             = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members 
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND             = NO
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at 
+# top of each HTML page. The value NO (the default) enables the index and 
+# the value YES disables it.
+
+DISABLE_INDEX          = NO
+
+# This tag can be used to set the number of enum values (range [1..20]) 
+# that doxygen will group on one line in the generated HTML documentation.
+
+ENUM_VALUES_PER_LINE   = 4
+
+# If the GENERATE_TREEVIEW tag is set to YES, a side panel will be
+# generated containing a tree-like index structure (just like the one that 
+# is generated for HTML Help). For this to work a browser that supports 
+# JavaScript, DHTML, CSS and frames is required (for instance Mozilla 1.0+, 
+# Netscape 6.0+, Internet explorer 5.0+, or Konqueror). Windows users are 
+# probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW      = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be 
+# used to set the initial width (in pixels) of the frame in which the tree 
+# is shown.
+
+TREEVIEW_WIDTH         = 250
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will 
+# generate Latex output.
+
+GENERATE_LATEX         = NO
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT           = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be 
+# invoked. If left blank `latex' will be used as the default command name.
+
+LATEX_CMD_NAME         = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to 
+# generate index for LaTeX. If left blank `makeindex' will be used as the 
+# default command name.
+
+MAKEINDEX_CMD_NAME     = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact 
+# LaTeX documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_LATEX          = NO
+
+# The PAPER_TYPE tag can be used to set the paper type that is used 
+# by the printer. Possible values are: a4, a4wide, letter, legal and 
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE             = a4wide
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX 
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES         = 
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for 
+# the generated latex document. The header should contain everything until 
+# the first chapter. If it is left blank doxygen will generate a 
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER           = 
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated 
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will 
+# contain links (just like the HTML output) instead of page references 
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS         = NO
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of 
+# plain latex in the generated Makefile. Set this option to YES to get a 
+# higher quality PDF documentation.
+
+USE_PDFLATEX           = NO
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode. 
+# command to the generated LaTeX files. This will instruct LaTeX to keep 
+# running if errors occur, instead of asking the user for help. 
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE        = NO
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not 
+# include the index chapters (such as File Index, Compound Index, etc.) 
+# in the output.
+
+LATEX_HIDE_INDICES     = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output 
+# The RTF output is optimized for Word 97 and may not look very pretty with 
+# other RTF readers or editors.
+
+GENERATE_RTF           = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT             = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact 
+# RTF documents. This may be useful for small projects and may help to 
+# save some trees in general.
+
+COMPACT_RTF            = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated 
+# will contain hyperlink fields. The RTF file will 
+# contain links (just like the HTML output) instead of page references. 
+# This makes the output suitable for online browsing using WORD or other 
+# programs which support those fields. 
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS         = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's 
+# config file, i.e. a series of assignments. You only have to provide 
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE    = 
+
+# Set optional variables used in the generation of an rtf document. 
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE    = 
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will 
+# generate man pages
+
+GENERATE_MAN           = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT             = man
+
+# The MAN_EXTENSION tag determines the extension that is added to 
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION          = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output, 
+# then it will generate one additional man file for each entity 
+# documented in the real man page(s). These additional files 
+# only source the real man page, but without them the man command 
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS              = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will 
+# generate an XML file that captures the structure of 
+# the code including all documentation.
+
+GENERATE_XML           = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put. 
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be 
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT             = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
+XML_SCHEMA             = 
+
+# The XML_DTD tag can be used to specify an XML DTD, 
+# which can be used by a validating XML parser to check the 
+# syntax of the XML files.
+
+XML_DTD                = 
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will 
+# dump the program listings (including syntax highlighting 
+# and cross-referencing information) to the XML output. Note that 
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING     = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will 
+# generate an AutoGen Definitions (see autogen.sf.net) file 
+# that captures the structure of the code including all 
+# documentation. Note that this feature is still experimental 
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF   = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will 
+# generate a Perl module file that captures the structure of 
+# the code including all documentation. Note that this 
+# feature is still experimental and incomplete at the 
+# moment.
+
+GENERATE_PERLMOD       = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate 
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able 
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX          = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be 
+# nicely formatted so it can be parsed by a human reader.  This is useful 
+# if you want to understand what is going on.  On the other hand, if this 
+# tag is set to NO the size of the Perl module output will be much smaller 
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY         = YES
+
+# The names of the make variables in the generated doxyrules.make file 
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX. 
+# This is useful so different doxyrules.make files included by the same 
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX = 
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor   
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will 
+# evaluate all C-preprocessor directives found in the sources and include 
+# files.
+
+ENABLE_PREPROCESSING   = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro 
+# names in the source code. If set to NO (the default) only conditional 
+# compilation will be performed. Macro expansion can be done in a controlled 
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION        = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES 
+# then the macro expansion is limited to the macros specified with the 
+# PREDEFINED and EXPAND_AS_PREDEFINED tags.
+
+EXPAND_ONLY_PREDEF     = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files 
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES        = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that 
+# contain include files that are not input files but should be processed by 
+# the preprocessor.
+
+INCLUDE_PATH           = 
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard 
+# patterns (like *.h and *.hpp) to filter out the header-files in the 
+# directories. If left blank, the patterns specified with FILE_PATTERNS will 
+# be used.
+
+INCLUDE_FILE_PATTERNS  = 
+
+# The PREDEFINED tag can be used to specify one or more macro names that 
+# are defined before the preprocessor is started (similar to the -D option of 
+# gcc). The argument of the tag is a list of macros of the form: name 
+# or name=definition (no spaces). If the definition and the = are 
+# omitted =1 is assumed.
+
+PREDEFINED             = 
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then 
+# this tag can be used to specify a list of macro names that should be expanded. 
+# The macro definition that is found in the sources will be used. 
+# Use the PREDEFINED tag if you want to use a different macro definition.
+
+EXPAND_AS_DEFINED      = 
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then 
+# doxygen's preprocessor will remove all function-like macros that are alone 
+# on a line, have an all uppercase name, and do not end with a semicolon. Such 
+# function macros are typically used for boiler-plate code, and will confuse the 
+# parser if not removed.
+
+SKIP_FUNCTION_MACROS   = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references   
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles. 
+# Optionally an initial location of the external documentation 
+# can be added for each tagfile. The format of a tag file without 
+# this location is as follows: 
+#   TAGFILES = file1 file2 ... 
+# Adding location for the tag files is done as follows: 
+#   TAGFILES = file1=loc1 "file2 = loc2" ... 
+# where "loc1" and "loc2" can be relative or absolute paths or 
+# URLs. If a location is present for each tag, the installdox tool 
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen 
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES               = 
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create 
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE       = 
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed 
+# in the class index. If set to NO only the inherited external classes 
+# will be listed.
+
+ALLEXTERNALS           = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed 
+# in the modules index. If set to NO, only the current project's groups will 
+# be listed.
+
+EXTERNAL_GROUPS        = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script 
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH              = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool   
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will 
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base or 
+# super classes. Setting the tag to NO turns the diagrams off. Note that this 
+# option is superseded by the HAVE_DOT option below. This is only a fallback. It is 
+# recommended to install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS         = YES
+
+# If set to YES, the inheritance and collaboration graphs will hide 
+# inheritance and usage relations if the target is undocumented 
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS   = YES
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is 
+# available from the path. This tool is part of Graphviz, a graph visualization 
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section 
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT               = yes
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect inheritance relations. Setting this tag to YES will force the 
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH            = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen 
+# will generate a graph for each documented class showing the direct and 
+# indirect implementation dependencies (inheritance, containment, and 
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH    = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and 
+# collaboration diagrams in a style similar to the OMG's Unified Modeling 
+# Language.
+
+UML_LOOK               = NO
+
+# If set to YES, the inheritance and collaboration graphs will show the 
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS     = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT 
+# tags are set to YES then doxygen will generate a graph for each documented 
+# file showing the direct and indirect include dependencies of the file with 
+# other documented files.
+
+INCLUDE_GRAPH          = YES
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and 
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each 
+# documented header file showing the documented files that directly or 
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH      = YES
+
+# If the CALL_GRAPH and HAVE_DOT tags are set to YES then doxygen will 
+# generate a call dependency graph for every global function or class method. 
+# Note that enabling this option will significantly increase the time of a run. 
+# So in most cases it will be better to enable call graphs for selected 
+# functions only using the \callgraph command.
+
+CALL_GRAPH             = yes
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen 
+# will graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY    = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images 
+# generated by dot. Possible values are png, jpg, or gif
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT       = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be 
+# found. If left blank, it is assumed the dot tool can be found on the path.
+
+DOT_PATH               = 
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that 
+# contain dot files that are included in the documentation (see the 
+# \dotfile command).
+
+DOTFILE_DIRS           = 
+
+# The MAX_DOT_GRAPH_WIDTH tag can be used to set the maximum allowed width 
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
+# this value, doxygen will try to truncate the graph, so that it fits within 
+# the specified constraint. Beware that most browsers cannot cope with very 
+# large images.
+
+MAX_DOT_GRAPH_WIDTH    = 1024
+
+# The MAX_DOT_GRAPH_HEIGHT tag can be used to set the maximum allows height 
+# (in pixels) of the graphs generated by dot. If a graph becomes larger than 
+# this value, doxygen will try to truncate the graph, so that it fits within 
+# the specified constraint. Beware that most browsers cannot cope with very 
+# large images.
+
+MAX_DOT_GRAPH_HEIGHT   = 1024
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the 
+# graphs generated by dot. A depth value of 3 means that only nodes reachable 
+# from the root by following a path via at most 3 edges will be shown. Nodes that 
+# lay further from the root node will be omitted. Note that setting this option to 
+# 1 or 2 may greatly reduce the computation time needed for large code bases. Also 
+# note that a graph may be further truncated if the graph's image dimensions are 
+# not sufficient to fit the graph (see MAX_DOT_GRAPH_WIDTH and MAX_DOT_GRAPH_HEIGHT). 
+# If 0 is used for the depth value (the default), the graph is not depth-constrained.
+
+MAX_DOT_GRAPH_DEPTH    = 0
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will 
+# generate a legend page explaining the meaning of the various boxes and 
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND        = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will 
+# remove the intermediate dot files that are used to generate 
+# the various graphs.
+
+DOT_CLEANUP            = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to the search engine   
+#---------------------------------------------------------------------------
+
+# The SEARCHENGINE tag specifies whether or not a search engine should be 
+# used. If set to NO the values of all tags below this one will be ignored.
+
+SEARCHENGINE           = NO
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/TDMA.spec b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/TDMA.spec
new file mode 100644
index 0000000..d9e189b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/doc/TDMA.spec
@@ -0,0 +1,597 @@
+                     TDMA Media Access Control Discipline
+                     ====================================
+
+                                Revision: 2.1a
+
+
+This document describes the second generation of a TDMA-based (Time Division
+Multiple Access) real-time media access control discipline for RTnet. Clock
+synchronisation is managed by a participant acting as a master. Additional
+backup masters are supported in order to compensate a failing master. Slave
+participants can be added in arbitrary order without influence on existing
+real-time communication. In the following, the TDMA protocol and its
+management interface are specified.
+
+
+
+Sequence Diagram
+================
+
+Normal Startup
+--------------
+
+  Master                             Slave A                Slave B
+    |                                   |                      |
+   +-+                                  |                      |
+   | | Detect                           |                      |
+   | | Other Master                     |                      |   INIT
+   | | (3 x Cycle Period)               |                      |   PHASE
+   . .                                  |                      |
+   . .                                  |                      |
+   | |                                  |                      |
+   +-+   Synchronisation (broadcast)    |                      |
+- - | --------------------------------> | -------------------> | - - - - - - -
+    |                                  +-+                    +-+
+    |                                  | | Start              | | Start
+    |                                  | | Slot Timer         | | Slot Timer
+    |                                  +-+                    +-+
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+    |                                  +-+                     |
+    |                                  | | Slot                |
+    |                                  | | Timeout             |
+    |   Calibration Request (unicast)  +-+                     |
+    | <-------------------------------- |                      |
+   +-+                                  |                      |
+   | | Queue                            |                      |
+   | | Reply                            |                      |
+   +-+                                  |                      |
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+    |                                   |                     +-+
+    |                                   |                     | | Slot
+    |                                   |                     | | Timeout
+    |   Calibration Request (unicast)   |                     +-+
+    | <---------------------------------|--------------------- |
+   +-+                                  |                      |
+   | | Queue                            |                      |
+   | | Reply                            |                      |
+   +-+                                  |                      |
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+   +-+                                  |                      |
+   | | Cycle Timeout                    |                      |
+   | |                                  |                      |
+   +-+   Synchronisation (broadcast)    |                      |   CALIBRATION
+    | --------------------------------->|--------------------->|   PHASE
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+   +-+                                  |                      |
+   | | Slot Timeout                     |                      |
+   | |                                  |                      |
+   +-+  Calibration Reply (unicast)     |                      |
+    | --------------------------------> |                      |
+    |                                  +-+                     |
+    |                                  | | Calculate           |
+    |                                  | | Transmission        |
+    |                                  | | Delay               |
+    |                                  +-+                     |
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+   +-+                                  |                      |
+   | | Slot Timeout                     |                      |
+   | |                                  |                      |
+   +-+  Calibration Reply (unicast)     |                      |
+    | ----------------------------------|--------------------> |
+    |                                   |                     +-+
+    |                                   |                     | | Calculate
+    |                                   |                     | | Transmission
+    |                                   |                     | | Delay
+    |                                   |                     +-+
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+   +-+                                  |                      |
+   | | Cycle Timeout                    |                      |
+   | |                                  |                      |
+   +-+   Synchronisation (broadcast)    |                      |
+- - | --------------------------------> | -------------------> | - - - - - - -
+    |                                   |                      |
+
+Note: The calibration phase is repeated several times in order to estimate the
+      average transmission delay. The number of repetitions depends on the
+      expected variance of the measurings and has to be chosen appropriately.
+
+
+
+Failing Master
+--------------
+
+  Master                          Backup Master              Slave
+    |                                   |                      |
+   +-+                                  |                      |
+   | | Cycle Timeout                    |                      |
+   | |                                  |                      |
+   +-+   Synchronisation (broadcast)    |                      |
+    | --------------------------------> | -------------------> |
+    |                                  +-+                    +-+
+    |                                  | | Sync With          | | Start
+    |                                  | | Alive Master       | | Slot Timer
+    |                                  +-+                    +-+
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+    |                                  +-+                     |
+    |                                  | | Backup Cycle        |
+    |                                  | | Timeout             |
+    |                                  | | (ignore)            |
+    |                                  +-+                     |
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+    |                                   |                     +-+
+    |                                   |                     | | Slot
+    |                                   |                     | | Timeout
+    |                                   |           Payload   +-+
+    |                                   |        <------------ |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+    X  Failure                          |                      |
+                                        .                      .
+                                        .                      .
+                                        |                      |
+                                       +-+                     |
+                                       | | Backup              |
+                                       | | Cycle               |
+                                       | | Timeout             |
+         Synchronisation (broadcast)   +-+                     |
+     <--------------------------------- | -------------------> |
+                                        |                     +-+
+                                        |                     | | Start
+                                        |                     | | Slot Timer
+                                        |                     +-+
+                                        |                      |
+                                        .                      .
+                                        .                      .
+                                        |                      |
+                                        |                     +-+
+                                        |                     | | Slot
+                                        |                     | | Timeout
+                                        |           Payload   +-+
+                                        |        <------------ |
+                                        |                      |
+
+
+
+Master Restart
+--------------
+
+  Master                          Backup Master              Slave
+   | |                                  |                      |
+   | |                                 +-+                     |
+   | | Detect                          | | Backup              |   INIT
+   | | Other Master                    | | Cycle               |   PHASE
+   | |                                 | | Timeout             |
+   +-+   Synchronisation (broadcast)   +-+                     |
+- - | <-------------------------------- | -------------------> | - - - - - - -
+   +-+                                  |                     +-+
+   | | Start                            |                     | | Start
+   | | Slot Timer                       |                     | | Slot Timer
+   +-+                                  |                     +-+
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+   +-+                                  |                      |
+   | | Slot Timeout                     |                      |
+   | |                                  |                      |
+   +-+  Calibration Request (unicast)   |                      |   CALIBRATION
+    | --------------------------------> |                      |   PHASE
+    |                                  +-+                     |
+    |                                  | | Queue               |
+    |                                  | | Reply               |
+    |                                  +-+                     |
+    |                                   |                      |
+    .                                   .                      .
+    .        <continue calibration as described above>         .
+    .                                   .                      .
+    |                                   |                      |
+   +-+                                  |                      |
+   | | Cycle Timeout                    |                      |
+   | |                                  |                      |
+   +-+   Synchronisation (broadcast)    |                      |
+- - | --------------------------------> | -------------------> | - - - - - - -
+    |                                  +-+                    +-+
+    |                                  | | Sync With          | | Start
+    |                                  | | Alive Master       | | Slot Timer
+    |                                  +-+                    +-+
+    |                                   |                      |
+    .                                   .                      .
+    .                                   .                      .
+    |                                   |                      |
+    |                                  +-+                     |
+    |                                  | | Backup Cycle        |
+    |                                  | | Timeout             |
+    |                                  | | (ignore)            |
+    |                                  +-+                     |
+    |                                   |                      |
+
+
+
+Frame Formats
+=============
+
+TDMA frames are introduced by the generic RTmac discipline header as described
+in the related document. The hexadecimal RTmac type identifier is 0x0001. All
+frame fields are encoded in network byte order (big endian). Version
+identifiers of TDMA frames shall only be changed if the format becomes
+incompatible to the previous revision. Currently, all frames carry the
+hexadecimal value 0x0201.
+
+
+
+Synchronisation Frame
+---------------------
+
+ +------------------+------------------+----------------------+ - -
+ | Version: 0x0201  | Frame ID: 0x0000 |     Cycle Number     |
+ |    (2 bytes)     |    (2 bytes)     |      (4 bytes)       |
+ +------------------+------------------+----------------------+ - -
+  - - +-----------------------------+-----------------------------+
+      |   Transmission Time Stamp   | Scheduled Transmission Time |
+      |          (8 bytes)          |          (8 bytes)          |
+  - - +-----------------------------+-----------------------------+
+
+Synchronisation frames are sent as broadcast by the currently active master.
+They signal the beginning of a new elementary cycle and distribute the value
+of the reference clock.
+
+The Cycle Number field is incremented by one for every new cycle, and it is
+reset to zero on overflow. The Transmission Time Stamp contains the value of
+the reference clock, typically located on the master, in nanoseconds. It shall
+be acquired with minimum jitter relative to the physical packet transmission
+time. The Scheduled Transmission Time, also in nanoseconds, contains the
+reference time when the transmission was intended to be performed.
+
+By comparing the Transmission Time Stamp and the Scheduled Transmission Time,
+receivers of Synchronisation frames are able to reduce the deviation between
+claimed and actual transmission time on the master station. This helps to
+improve global time synchronisation. Furthermore, backup masters use the main
+master's Scheduled Transmission Time value when submitting their replacement
+Synchronisation frames, although these frames are scheduled for a different
+time slot. As a result, the slave will automatically compensate the time shift
+of Synchronisation frames sent by backup masters.
+
+
+
+Calibration Frames
+------------------
+
+Request Calibration Frame:
+ +------------------+------------------+-----------------------------+ - -
+ | Version: 0x0201  | Frame ID: 0x0010 |   Transmission Time Stamp   |
+ |    (2 bytes)     |    (2 bytes)     |          (8 bytes)          |
+ +------------------+------------------+-----------------------------+ - -
+  - - +----------------------+-----------------------------+
+      |     Reply Cycle      |      Reply Slot Offset      |
+      |   Number (4 bytes)   |          (8 bytes)          |
+  - - +----------------------+-----------------------------+
+
+Reply Calibration Frame:
+ +------------------+------------------+-----------------------------+ - -
+ | Version: 0x0201  | Frame ID: 0x0011 |  Request Transmission Time  |
+ |    (2 bytes)     |    (2 bytes)     |          (8 bytes)          |
+ +------------------+------------------+-----------------------------+ - -
+  - - +-----------------------------+-----------------------------+
+      |    Reception Time Stamp     |   Transmission Time Stamp   |
+      |          (8 bytes)          |          (8 bytes)          |
+  - - +-----------------------------+-----------------------------+
+
+Calibration frames are sent as unicast to the respective receiver. They are
+used to estimate the average delay between the transmission of Synchronisation
+frames by a master and their reception on the slave side. Request Calibration
+frames are sent by participants to the currently active master. The master
+returns one Reply Calibration frame for every request frame in a time slot
+specified by the sender.
+
+The Transmission Time Stamp fields in both frame types contain the value of
+the sender's local clock in nanoseconds. It shall be acquired with minimum
+jitter relative to the physical packet transmission time. The slave determines
+in which cycle (Reply Cycle Number) and with which offset relative to the
+cycle's Synchronisation frame (Reply Slot Offset) the master shall send the
+reply. Only time slots actually owned by the slave can be specified here, and
+the slave must not use these released slots for own transmissions in the
+following.
+
+The Transmission Time Stamp field of the Request Calibration frame is copied
+into the Request Transmission Time field of the Reply Calibration frame. On
+reception of a request frame, a local time stamp is acquired and stored in the
+Reception Time Stamp field of the corresponding reply frame. The acquisition
+shall be performed with minimum jitter relative to the physical packet
+reception. All times are in nanoseconds.
+
+
+
+Time Arithmetics
+================
+
+Synchronisation on Global Clock
+-------------------------------
+
+            Master                    Slave
+              |                         |
+    T_sched  -|-   -   -   -   -   -   -|-  T'_sched
+              |                         |
+              |                         |
+     T_xmit  -|-    Synchronisation     |
+       /|\    | \        Frame          |
+        |     |  \       ---->          |
+     t_trans  |   \_________________    |
+        |     |                     \   |
+        |     |                      \  |
+       \|/    |                       \ |
+     T_recv  -|-   -   -   -   -   -   -|-  T'_recv
+              |                         |
+              .                         .
+              .                         .
+              |                         |
+          T  -|-   -   -   -   -   -   -|-  T'
+              |                         |
+
+Calculate the clock offset:
+        t_offs = T_recv - T'_recv =
+               = T_xmit + t_trans - T'_recv
+
+Calculate a global time:
+        T = T' + t_offs
+
+Calculate a time relative to a Synchronisation frame:
+        T' = T'_sched + t =
+           = T_sched - t_offs + t
+
+Symbols:
+        T_sched         Scheduled transmission time (global clock) of the
+                        Synchronisation frame. It is distributed in the
+                        Scheduled Transmission Time field of the
+                        Synchronisation frame.
+        T'_sched        T_sched in units of the slave's local clock
+        T_xmit          Actual transmission time (global clock) of the
+                        Synchronisation frame. It is distributed in the
+                        Transmission Time Stamp field of the
+                        Synchronisation frame.
+        t_trans         Average time between transmission of a frame by the
+                        master and its reception by the slave. This value is
+                        acquired during the calibration phase.
+        T_recv          Reception time of the Synchronisation frame in units
+                        of the global clock.
+        T'_recv         Reception time of the Synchronisation frame in units
+                        of the slave's local clock.
+        T, T'           An arbitrary time in global and local clock units.
+        t_offs          Offset between local and global clock.
+        t               An arbitrary offset relative to a Synchronisation
+                        frame
+
+
+
+Calibration of the Transmission Delay
+-------------------------------------
+
+            Master                    Slave
+              |                         |
+              |       Calibration      -|-  T'_xmit_req
+              |      Request Frame    / |
+              |          <----       /  |
+              |    _________________/   |
+              |   /                     |
+              |  /                      |
+              | /                       |
+ T_recv_req  -|-                        |
+              |                         |
+              .                         .
+              .                         .
+              |                         |
+ T_xmit_rpl  -|-      Calibration       |
+              | \     Reply Frame       |
+              |  \       ---->          |
+              |   \_________________    |
+              |                     \   |
+              |                      \  |
+              |                       \ |
+              |                        -|-  T'_recv_rpl
+              |                         |
+
+Calculate the transmission delay:
+        t_trans = 1/2 * ((T'_recv_rpl - T'_xmit_req) -
+                         (T_xmit_rpl - T_recv_req))
+
+The overall transmission delay shall be averaged over several calibration
+rounds. As the measuring is only performed against the main master, backup
+masters should be selected so that they show similar timing characteristics.
+
+Symbols:
+        T'_xmit_req     Time stamp taken on the transmission of a Calibration
+                        Request frame in units of the slave's local clock.
+                        This value is stored in the Transmission Time Stamp
+                        field of the request frame and later copied to the
+                        Request Transmission Time field of the corresponding
+                        reply frame.
+        T_recv_req      Time stamp taken on the reception of a Calibration
+                        Request frame in units of the master's local clock.
+                        This value is stored in the Reception Time Stamp field
+                        of the Calibration Reply frame.
+        T_xmit_rpl      Time stamp taken on the transmission of a Calibration
+                        Reply frame in units of the master's local clock. This
+                        value is stored in the Transmission Time Stamp field
+                        of the Calibration Reply frame.
+        T'_recv_rpl     Time stamp taken on the reception of a Calibration
+                        Reply frame in units of the slave's local clock.
+
+
+
+Time Slots
+==========
+
+A time slot can be used to transmit a single packet of up to a specified maximum
+size. This TDMA discipline revision supports flexible assignment of time slots
+to real-time network participants. It is now possible to use multiple slots per
+cycle. Furthermore, a slot can be shared between participants by occupying it
+only every Nth cycle. Besides at least one payload slot per participant, slots
+have to be reserved for the Synchronisation frame and, optionally, for one or
+more backup Synchronisation frames. The concrete timing strongly depends on the
+capability of all network participants. Therefore, timing requirements like
+worst case jitters or minimum slot gaps are not specified here.
+
+In contrast to earlier TDMA discipline revisions, the slave configuration is
+no longer distributed by the TDMA master. This means that the slaves have to
+be aware of their slot setup before sending any data to a TDMA-managed
+network. Therefore, the required settings either have to be stored on the
+slaves or, if a centralised management is desired, the RTnet configuration
+service RTcfg has to be used (see related specification for further details).
+
+
+
+Slot Identification and Selection
+---------------------------------
+
+NOTE: The following specifications are OPTIONAL. They describe the internal
+      realisation of this TDMA discipline as applied to the first
+      implementation in RTnet.
+
+Time slots carry an internal ID number, unique per participant. These numbers
+are used when determining the slot in which an outgoing packet shall be
+transmitted. The TDMA discipline contains no automatic scheduling mechanism.
+Instead, the sender, i.e. an user or a service, either explicitly provides a
+desired slot ID or a default slot is used.
+
+  Slot ID | Description
+ ---------+-----------------------------------------------------------------
+     0    | default slot for RT; also default NRT slot if slot 1 is missing
+     1    | non-RT slot; if missing, slot 0 is used
+     2    | user slots, used for explicitly scheduled packets
+     :    |
+
+
+
+Configuration Example
+---------------------
+
+An exemplary configuration consisting of two masters, one serving as backup,
+and three slaves is shown below. The slot period is expressed in the form
+<phasing>/<period>. For instance, 1/3 means that this slot will be used in
+every first of three cycles, while 3/3 means in every third or three.
+
+  +------+  +----------+  +---------+  +---------+  +----------+
+  |      |  | Master 2 |  | Slave A |  | Slave B |  | Master 1 |
+  | Sync |  |  Backup  |  | Slot 0  |  | Slot 0  |  |  Slot 0  |
+  |      |  |   Sync   |  | RT/NRT  |  |   RT    |  |  RT/NRT  |
+  | 1/1  |  |   1/1    |  |   1/1   |  |   1/1   |  |   1/1    |
+--+------+--+----------+--+---------+--+---------+--+----------+--...
+
+                                     +----------+
+                                     | Slave C  |
+                                     |  Slot 3  |
+                                     |    RT    |
+                                     |   3/3    |
+     +---------+                     +----------+
+     | Slave C |                     | Master 2 |
+     | Slot 0  |                     |  Slot 0  |
+     | RT/NRT  |                     |  RT/NRT  |
+     |   2/2   |                     |   2/3    |
+     +---------+        +---------+  +----------+             +------+
+     | Slave B |        | Slave C |  |  Slave A |             |      |
+     | Slot 1  |        | Slot 2  |  |  Slot 2  |             | Sync |
+     |   NRT   |        |   NRT   |  |    RT    |             |      |
+     |   1/2   |        |   1/4   |  |   1/3    |             | 1/1  |
+...--+---------+--------+---------+--+----------+-------------+------+-->
+
+
+
+Management Interface
+====================
+
+NOTE: The following specifications are OPTIONAL. They describe the internal
+      realisation of this TDMA discipline as applied to the first
+      implementation in RTnet.
+
+The TDMA discipline is managed by the command line tool tdmacfg. In the
+following, the usage of this tool is described.
+
+
+
+Commands
+--------
+
+tdmacfg <dev> master <cycle_period> [-b <backup_offset>]
+        [-c calibration_rounds] [-i max_slot_id] [-m max_calibration_requests]
+
+Starts a TDMA master on the specified device <dev>. The cycle period length is
+given in microseconds using the <cycle_period> parameter. If <backup_offset>
+is provided, the master becomes a backup system. In case the main master
+fails, the backup master with the smallest <backup_offset> will start sending
+Synchronisation frames with the specified offset in microseconds relative to
+the scheduled cycle start. <calibration_rounds> specifies the number of clock
+calibration requests the master will send to any other potentially already
+active master during startup. By default, 100 rounds are performed. The
+calibration will be performed when the first slot is added. By default, a
+master can handle up to 64 calibration requests at the same time. This value
+can be adapted by specifying the <max_calibration_requests> parameter. The
+largest used slot ID is tunable by providing <max_slot_id> or will be limited
+to 7 if this parameter is omitted.
+
+tdmacfg <dev> slave [-c calibration_rounds] [-i max_slot_id]
+
+Starts a TDMA slave on the specified device <dev>. <calibration_rounds>
+specifies the number of clock calibration requests the slave sends to the
+active master during startup. By default, 100 rounds are performed. The
+calibration will be performed when the first slot is added. The largest used
+slot ID is tunable by providing <max_slot_id> or will be limited to 7 if this
+parameter is omitted.
+
+tdmacfg <dev> slot <id> [<offset> [-p <phasing>/<period>] [-s <size>]
+        [-j joint_slot] [-l calibration_log_file] [-t calibration_timeout]]
+
+Adds, reconfigures, or removes a time slot for outgoing data on a started TDMA
+master or slave. <id> is used to distinguish between multiple slots. See above
+slot ID table for predefined values. If <offset> is given, the time slot is
+added or modified to send data with the specified offset in microseconds
+relative to the scheduled cycle start, if omitted, the slot is removed from
+the station's configuration.
+
+By default, a slot will be used in every cycle. When providing <phasing> and
+<period>, the slot will only be occupied in every <phasing>-th of <period>
+cycles. By assigning e.g. 1/2 to one and 2/2 to another slot, the usage of the
+physical time slot will alternate between both slot owners. The <size>
+parameter limits the maximum payload size in bytes which can be transmitted
+within this slot. If no <size> parameter is provided, the maximum size the
+hardware supports is applied. To share the same output queue among several
+slots, secondary slots can be attached to a primary <joint_slot>. The slot
+sizes must match for this purpose.
+
+The addition of the station's first slot will trigger the clock calibration
+process. To store the results of each calibration handshake, a
+<calibration_log_file> can be provided. By default, this command will not
+terminate until the calibration is completed. The <calibration_timeout>
+parameter can be used to specify an upper time limit.
+
+tdmacfg <dev> detach
+
+Detaches a master or slave from the given devices <dev>. Past this command,
+the write access to the device is uncoordinated again and may interfere with
+remaining real-time network participants.
+
+
+2004, 2005, Jan Kiszka <jan.kiszka-at-web.de>
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c
new file mode 100644
index 0000000..c335b30
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/8139too.c
@@ -0,0 +1,1733 @@
+/***
+ * rt_8139too.c - Realtime driver for
+ * for more information, look to end of file or '8139too.c'
+ *
+ * Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+ /*
+  * This Version was modified by Fabian Koch
+  * It includes a different implementation of the 'cards' module parameter
+  * we are using an array of integers to determine which cards to use
+  * for RTnet (e.g. cards=0,1,0)
+  *
+  * Thanks to Jan Kiszka for this idea
+  */
+
+#define DRV_NAME            "rt_8139too"
+#define DRV_VERSION         "0.9.24-rt0.7"
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/compiler.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/if.h>
+#include <linux/ethtool.h>
+#include <linux/rtnetlink.h>
+#include <linux/delay.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/completion.h>
+#include <linux/crc32.h>
+#include <linux/uaccess.h>
+#include <asm/io.h>
+
+/* *** RTnet *** */
+#include <rtnet_port.h>
+
+#define MAX_UNITS               8
+#define DEFAULT_RX_POOL_SIZE    16
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+static int media[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = -1 };
+static unsigned int rx_pool_size = DEFAULT_RX_POOL_SIZE;
+module_param_array(cards, int, NULL, 0444);
+module_param_array(media, int, NULL, 0444);
+module_param(rx_pool_size, uint, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+MODULE_PARM_DESC(media, "8139too: Bits 4+9: force full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC(rx_pool_size, "number of receive buffers");
+
+/* *** RTnet *** */
+
+
+#define RTL8139_DRIVER_NAME   DRV_NAME " Fast Ethernet driver " DRV_VERSION
+#define PFX DRV_NAME ": "
+
+/* enable PIO instead of MMIO, if CONFIG_8139TOO_PIO is selected */
+/* *** RTnet ***
+#ifdef CONFIG_8139TOO_PIO
+#define USE_IO_OPS 1
+#endif
+ *** RTnet *** */
+
+/* Size of the in-memory receive ring. */
+#define RX_BUF_LEN_IDX        2        /* 0==8K, 1==16K, 2==32K, 3==64K */
+#define RX_BUF_LEN        (8192 << RX_BUF_LEN_IDX)
+#define RX_BUF_PAD        16
+#define RX_BUF_WRAP_PAD 2048 /* spare padding to handle lack of packet wrap */
+#define RX_BUF_TOT_LEN        (RX_BUF_LEN + RX_BUF_PAD + RX_BUF_WRAP_PAD)
+
+/* Number of Tx descriptor registers. */
+#define NUM_TX_DESC        4
+
+/* max supported ethernet frame size -- must be at least (rtdev->mtu+14+4).*/
+#define MAX_ETH_FRAME_SIZE        1536
+
+/* Size of the Tx bounce buffers -- must be at least (rtdev->mtu+14+4). */
+#define TX_BUF_SIZE        MAX_ETH_FRAME_SIZE
+#define TX_BUF_TOT_LEN        (TX_BUF_SIZE * NUM_TX_DESC)
+
+/* PCI Tuning Parameters
+   Threshold is bytes transferred to chip before transmission starts. */
+#define TX_FIFO_THRESH 256        /* In bytes, rounded down to 32 byte units. */
+
+/* The following settings are log_2(bytes)-4:  0 == 16 bytes .. 6==1024, 7==end of packet. */
+#define RX_FIFO_THRESH        7        /* Rx buffer level before first PCI xfer.  */
+#define RX_DMA_BURST        7        /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST        6        /* Maximum PCI burst, '6' is 1024 */
+#define TX_RETRY        8        /* 0-15.  retries = 16 + (TX_RETRY * 16) */
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (6*HZ)
+
+
+enum {
+	HAS_MII_XCVR = 0x010000,
+	HAS_CHIP_XCVR = 0x020000,
+	HAS_LNK_CHNG = 0x040000,
+};
+
+#define RTL_MIN_IO_SIZE 0x80
+#define RTL8139B_IO_SIZE 256
+
+#define RTL8129_CAPS        HAS_MII_XCVR
+#define RTL8139_CAPS        HAS_CHIP_XCVR|HAS_LNK_CHNG
+
+typedef enum {
+	RTL8139 = 0,
+	RTL8139_CB,
+	SMC1211TX,
+	/*MPX5030,*/
+	DELTA8139,
+	ADDTRON8139,
+	DFE538TX,
+	DFE690TXD,
+	FE2000VX,
+	ALLIED8139,
+	RTL8129,
+} board_t;
+
+
+/* indexed by board_t, above */
+static struct {
+	const char *name;
+	u32 hw_flags;
+} board_info[] = {
+	{ "RealTek RTL8139", RTL8139_CAPS },
+	{ "RealTek RTL8129", RTL8129_CAPS },
+};
+
+
+static struct pci_device_id rtl8139_pci_tbl[] = {
+	{0x10ec, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x10ec, 0x8138, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1113, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1500, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x4033, 0x1360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1186, 0x1300, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1186, 0x1340, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x13d1, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1259, 0xa117, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1259, 0xa11e, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x14ea, 0xab06, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x14ea, 0xab07, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x11db, 0x1234, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1432, 0x9130, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x02ac, 0x1012, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x018a, 0x0106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x126c, 0x1211, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x1743, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+	{0x021b, 0x8139, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+
+#ifdef CONFIG_SH_SECUREEDGE5410
+	/* Bogus 8139 silicon reports 8129 without external PROM :-( */
+	{0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8139 },
+#endif
+#ifdef CONFIG_8139TOO_8129
+	{0x10ec, 0x8129, PCI_ANY_ID, PCI_ANY_ID, 0, 0, RTL8129 },
+#endif
+
+	/* some crazy cards report invalid vendor ids like
+	 * 0x0001 here.  The other ids are valid and constant,
+	 * so we simply don't match on the main vendor id.
+	 */
+	{PCI_ANY_ID, 0x8139, 0x10ec, 0x8139, 0, 0, RTL8139 },
+	{PCI_ANY_ID, 0x8139, 0x1186, 0x1300, 0, 0, RTL8139 },
+	{PCI_ANY_ID, 0x8139, 0x13d1, 0xab06, 0, 0, RTL8139 },
+
+	{0,}
+};
+MODULE_DEVICE_TABLE (pci, rtl8139_pci_tbl);
+
+/* The rest of these values should never change. */
+
+/* Symbolic offsets to registers. */
+enum RTL8139_registers {
+	MAC0 = 0,                /* Ethernet hardware address. */
+	MAR0 = 8,                /* Multicast filter. */
+	TxStatus0 = 0x10,        /* Transmit status (Four 32bit registers). */
+	TxAddr0 = 0x20,                /* Tx descriptors (also four 32bit). */
+	RxBuf = 0x30,
+	ChipCmd = 0x37,
+	RxBufPtr = 0x38,
+	RxBufAddr = 0x3A,
+	IntrMask = 0x3C,
+	IntrStatus = 0x3E,
+	TxConfig = 0x40,
+	ChipVersion = 0x43,
+	RxConfig = 0x44,
+	Timer = 0x48,                /* A general-purpose counter. */
+	RxMissed = 0x4C,        /* 24 bits valid, write clears. */
+	Cfg9346 = 0x50,
+	Config0 = 0x51,
+	Config1 = 0x52,
+	FlashReg = 0x54,
+	MediaStatus = 0x58,
+	Config3 = 0x59,
+	Config4 = 0x5A,                /* absent on RTL-8139A */
+	HltClk = 0x5B,
+	MultiIntr = 0x5C,
+	TxSummary = 0x60,
+	BasicModeCtrl = 0x62,
+	BasicModeStatus = 0x64,
+	NWayAdvert = 0x66,
+	NWayLPAR = 0x68,
+	NWayExpansion = 0x6A,
+	/* Undocumented registers, but required for proper operation. */
+	FIFOTMS = 0x70,                /* FIFO Control and test. */
+	CSCR = 0x74,                /* Chip Status and Configuration Register. */
+	PARA78 = 0x78,
+	PARA7c = 0x7c,                /* Magic transceiver parameter register. */
+	Config5 = 0xD8,                /* absent on RTL-8139A */
+};
+
+enum ClearBitMasks {
+	MultiIntrClear = 0xF000,
+	ChipCmdClear = 0xE2,
+	Config1Clear = (1<<7)|(1<<6)|(1<<3)|(1<<2)|(1<<1),
+};
+
+enum ChipCmdBits {
+	CmdReset = 0x10,
+	CmdRxEnb = 0x08,
+	CmdTxEnb = 0x04,
+	RxBufEmpty = 0x01,
+};
+
+/* Interrupt register bits, using my own meaningful names. */
+enum IntrStatusBits {
+	PCIErr = 0x8000,
+	PCSTimeout = 0x4000,
+	RxFIFOOver = 0x40,
+	RxUnderrun = 0x20,
+	RxOverflow = 0x10,
+	TxErr = 0x08,
+	TxOK = 0x04,
+	RxErr = 0x02,
+	RxOK = 0x01,
+
+	RxAckBits = RxFIFOOver | RxOverflow | RxOK,
+};
+
+enum TxStatusBits {
+	TxHostOwns = 0x2000,
+	TxUnderrun = 0x4000,
+	TxStatOK = 0x8000,
+	TxOutOfWindow = 0x20000000,
+	TxAborted = 0x40000000,
+	TxCarrierLost = 0x80000000,
+};
+enum RxStatusBits {
+	RxMulticast = 0x8000,
+	RxPhysical = 0x4000,
+	RxBroadcast = 0x2000,
+	RxBadSymbol = 0x0020,
+	RxRunt = 0x0010,
+	RxTooLong = 0x0008,
+	RxCRCErr = 0x0004,
+	RxBadAlign = 0x0002,
+	RxStatusOK = 0x0001,
+};
+
+/* Bits in RxConfig. */
+enum rx_mode_bits {
+	AcceptErr = 0x20,
+	AcceptRunt = 0x10,
+	AcceptBroadcast = 0x08,
+	AcceptMulticast = 0x04,
+	AcceptMyPhys = 0x02,
+	AcceptAllPhys = 0x01,
+};
+
+/* Bits in TxConfig. */
+enum tx_config_bits {
+
+	/* Interframe Gap Time. Only TxIFG96 doesn't violate IEEE 802.3 */
+	TxIFGShift = 24,
+	TxIFG84 = (0 << TxIFGShift),    /* 8.4us / 840ns (10 / 100Mbps) */
+	TxIFG88 = (1 << TxIFGShift),    /* 8.8us / 880ns (10 / 100Mbps) */
+	TxIFG92 = (2 << TxIFGShift),    /* 9.2us / 920ns (10 / 100Mbps) */
+	TxIFG96 = (3 << TxIFGShift),    /* 9.6us / 960ns (10 / 100Mbps) */
+
+	TxLoopBack = (1 << 18) | (1 << 17), /* enable loopback test mode */
+	TxCRC = (1 << 16),        /* DISABLE appending CRC to end of Tx packets */
+	TxClearAbt = (1 << 0),        /* Clear abort (WO) */
+	TxDMAShift = 8,                /* DMA burst value (0-7) is shifted this many bits */
+	TxRetryShift = 4,        /* TXRR value (0-15) is shifted this many bits */
+
+	TxVersionMask = 0x7C800000, /* mask out version bits 30-26, 23 */
+};
+
+/* Bits in Config1 */
+enum Config1Bits {
+	Cfg1_PM_Enable = 0x01,
+	Cfg1_VPD_Enable = 0x02,
+	Cfg1_PIO = 0x04,
+	Cfg1_MMIO = 0x08,
+	LWAKE = 0x10,                /* not on 8139, 8139A */
+	Cfg1_Driver_Load = 0x20,
+	Cfg1_LED0 = 0x40,
+	Cfg1_LED1 = 0x80,
+	SLEEP = (1 << 1),        /* only on 8139, 8139A */
+	PWRDN = (1 << 0),        /* only on 8139, 8139A */
+};
+
+/* Bits in Config3 */
+enum Config3Bits {
+	Cfg3_FBtBEn    = (1 << 0), /* 1 = Fast Back to Back */
+	Cfg3_FuncRegEn = (1 << 1), /* 1 = enable CardBus Function registers */
+	Cfg3_CLKRUN_En = (1 << 2), /* 1 = enable CLKRUN */
+	Cfg3_CardB_En  = (1 << 3), /* 1 = enable CardBus registers */
+	Cfg3_LinkUp    = (1 << 4), /* 1 = wake up on link up */
+	Cfg3_Magic     = (1 << 5), /* 1 = wake up on Magic Packet (tm) */
+	Cfg3_PARM_En   = (1 << 6), /* 0 = software can set twister parameters */
+	Cfg3_GNTSel    = (1 << 7), /* 1 = delay 1 clock from PCI GNT signal */
+};
+
+/* Bits in Config4 */
+enum Config4Bits {
+	LWPTN = (1 << 2),        /* not on 8139, 8139A */
+};
+
+/* Bits in Config5 */
+enum Config5Bits {
+	Cfg5_PME_STS     = (1 << 0), /* 1 = PCI reset resets PME_Status */
+	Cfg5_LANWake     = (1 << 1), /* 1 = enable LANWake signal */
+	Cfg5_LDPS        = (1 << 2), /* 0 = save power when link is down */
+	Cfg5_FIFOAddrPtr = (1 << 3), /* Realtek internal SRAM testing */
+	Cfg5_UWF         = (1 << 4), /* 1 = accept unicast wakeup frame */
+	Cfg5_MWF         = (1 << 5), /* 1 = accept multicast wakeup frame */
+	Cfg5_BWF         = (1 << 6), /* 1 = accept broadcast wakeup frame */
+};
+
+enum RxConfigBits {
+	/* rx fifo threshold */
+	RxCfgFIFOShift = 13,
+	RxCfgFIFONone = (7 << RxCfgFIFOShift),
+
+	/* Max DMA burst */
+	RxCfgDMAShift = 8,
+	RxCfgDMAUnlimited = (7 << RxCfgDMAShift),
+
+	/* rx ring buffer length */
+	RxCfgRcv8K = 0,
+	RxCfgRcv16K = (1 << 11),
+	RxCfgRcv32K = (1 << 12),
+	RxCfgRcv64K = (1 << 11) | (1 << 12),
+
+	/* Disable packet wrap at end of Rx buffer */
+	RxNoWrap = (1 << 7),
+};
+
+
+/* Twister tuning parameters from RealTek.
+   Completely undocumented, but required to tune bad links. */
+enum CSCRBits {
+	CSCR_LinkOKBit = 0x0400,
+	CSCR_LinkChangeBit = 0x0800,
+	CSCR_LinkStatusBits = 0x0f000,
+	CSCR_LinkDownOffCmd = 0x003c0,
+	CSCR_LinkDownCmd = 0x0f3c0,
+};
+
+
+enum Cfg9346Bits {
+	Cfg9346_Lock = 0x00,
+	Cfg9346_Unlock = 0xC0,
+};
+
+
+#define PARA78_default        0x78fa8388
+#define PARA7c_default        0xcb38de43        /* param[0][3] */
+#define PARA7c_xxx                0xcb38de43
+/*static const unsigned long param[4][4] = {
+	{0xcb39de43, 0xcb39ce43, 0xfb38de03, 0xcb38de43},
+	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+	{0xcb39de43, 0xcb39ce43, 0xcb39ce83, 0xcb39ce83},
+	{0xbb39de43, 0xbb39ce43, 0xbb39ce83, 0xbb39ce83}
+};*/
+
+typedef enum {
+	CH_8139 = 0,
+	CH_8139_K,
+	CH_8139A,
+	CH_8139B,
+	CH_8130,
+	CH_8139C,
+} chip_t;
+
+enum chip_flags {
+	HasHltClk = (1 << 0),
+	HasLWake = (1 << 1),
+};
+
+
+/* directly indexed by chip_t, above */
+const static struct {
+	const char *name;
+	u8 version; /* from RTL8139C docs */
+	u32 flags;
+} rtl_chip_info[] = {
+	{ "RTL-8139",
+	  0x40,
+	  HasHltClk,
+	},
+
+	{ "RTL-8139 rev K",
+	  0x60,
+	  HasHltClk,
+	},
+
+	{ "RTL-8139A",
+	  0x70,
+	  HasHltClk, /* XXX undocumented? */
+	},
+
+	{ "RTL-8139A rev G",
+	  0x72,
+	  HasHltClk, /* XXX undocumented? */
+	},
+
+	{ "RTL-8139B",
+	  0x78,
+	  HasLWake,
+	},
+
+	{ "RTL-8130",
+	  0x7C,
+	  HasLWake,
+	},
+
+	{ "RTL-8139C",
+	  0x74,
+	  HasLWake,
+	},
+
+	{ "RTL-8100",
+	  0x7A,
+	  HasLWake,
+	 },
+
+	{ "RTL-8100B/8139D",
+	  0x75,
+	  HasHltClk /* XXX undocumented? */
+	  | HasLWake,
+	},
+
+	{ "RTL-8101",
+	  0x77,
+	  HasLWake,
+	},
+};
+
+struct rtl_extra_stats {
+	unsigned long early_rx;
+	unsigned long tx_buf_mapped;
+	unsigned long tx_timeouts;
+	unsigned long rx_lost_in_ring;
+};
+
+struct rtl8139_private {
+	void *mmio_addr;
+	int drv_flags;
+	struct pci_dev *pci_dev;
+	struct net_device_stats stats;
+	unsigned char *rx_ring;
+	unsigned int cur_rx;        /* Index into the Rx buffer of next Rx pkt. */
+	unsigned int tx_flag;
+	unsigned long cur_tx;
+	unsigned long dirty_tx;
+	unsigned char *tx_buf[NUM_TX_DESC];        /* Tx bounce buffers */
+	unsigned char *tx_bufs;        /* Tx bounce buffer region. */
+	dma_addr_t rx_ring_dma;
+	dma_addr_t tx_bufs_dma;
+	signed char phys[4];                /* MII device addresses. */
+	char twistie, twist_row, twist_col;        /* Twister tune state. */
+	unsigned int default_port:4;        /* Last rtdev->if_port value. */
+	unsigned int medialock:1;        /* Don't sense media type. */
+	rtdm_lock_t lock;
+	chip_t chipset;
+	pid_t thr_pid;
+	u32 rx_config;
+	struct rtl_extra_stats xstats;
+	int time_to_die;
+	struct mii_if_info mii;
+	rtdm_irq_t irq_handle;
+};
+
+MODULE_AUTHOR ("Jeff Garzik <jgarzik@mandrakesoft.com>");
+MODULE_DESCRIPTION ("RealTek RTL-8139 Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+static int read_eeprom (void *ioaddr, int location, int addr_len);
+static int mdio_read (struct rtnet_device *rtdev, int phy_id, int location);
+static void mdio_write (struct rtnet_device *rtdev, int phy_id, int location, int val);
+
+
+static int rtl8139_open (struct rtnet_device *rtdev);
+static int rtl8139_close (struct rtnet_device *rtdev);
+static int rtl8139_interrupt (rtdm_irq_t *irq_handle);
+static int rtl8139_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev);
+
+static int rtl8139_ioctl(struct rtnet_device *, struct ifreq *rq, int cmd);
+static struct net_device_stats *rtl8139_get_stats(struct rtnet_device*rtdev);
+
+static void rtl8139_init_ring (struct rtnet_device *rtdev);
+static void rtl8139_set_rx_mode (struct rtnet_device *rtdev);
+static void __set_rx_mode (struct rtnet_device *rtdev);
+static void rtl8139_hw_start (struct rtnet_device *rtdev);
+
+#ifdef USE_IO_OPS
+
+#define RTL_R8(reg)                inb (((unsigned long)ioaddr) + (reg))
+#define RTL_R16(reg)                inw (((unsigned long)ioaddr) + (reg))
+#define RTL_R32(reg)                inl (((unsigned long)ioaddr) + (reg))
+#define RTL_W8(reg, val8)        outb ((val8), ((unsigned long)ioaddr) + (reg))
+#define RTL_W16(reg, val16)        outw ((val16), ((unsigned long)ioaddr) + (reg))
+#define RTL_W32(reg, val32)        outl ((val32), ((unsigned long)ioaddr) + (reg))
+#define RTL_W8_F                RTL_W8
+#define RTL_W16_F                RTL_W16
+#define RTL_W32_F                RTL_W32
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb(addr) inb((unsigned long)(addr))
+#define readw(addr) inw((unsigned long)(addr))
+#define readl(addr) inl((unsigned long)(addr))
+#define writeb(val,addr) outb((val),(unsigned long)(addr))
+#define writew(val,addr) outw((val),(unsigned long)(addr))
+#define writel(val,addr) outl((val),(unsigned long)(addr))
+
+#else
+
+/* write MMIO register, with flush */
+/* Flush avoids rtl8139 bug w/ posted MMIO writes */
+#define RTL_W8_F(reg, val8)        do { writeb ((val8), ioaddr + (reg)); readb (ioaddr + (reg)); } while (0)
+#define RTL_W16_F(reg, val16)        do { writew ((val16), ioaddr + (reg)); readw (ioaddr + (reg)); } while (0)
+#define RTL_W32_F(reg, val32)        do { writel ((val32), ioaddr + (reg)); readl (ioaddr + (reg)); } while (0)
+
+
+#define MMIO_FLUSH_AUDIT_COMPLETE 1
+#if MMIO_FLUSH_AUDIT_COMPLETE
+
+/* write MMIO register */
+#define RTL_W8(reg, val8)        writeb ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16)        writew ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32)        writel ((val32), ioaddr + (reg))
+
+#else
+
+/* write MMIO register, then flush */
+#define RTL_W8                RTL_W8_F
+#define RTL_W16                RTL_W16_F
+#define RTL_W32                RTL_W32_F
+
+#endif /* MMIO_FLUSH_AUDIT_COMPLETE */
+
+/* read MMIO register */
+#define RTL_R8(reg)                readb (ioaddr + (reg))
+#define RTL_R16(reg)                readw (ioaddr + (reg))
+#define RTL_R32(reg)                readl (ioaddr + (reg))
+
+#endif /* USE_IO_OPS */
+
+
+static const u16 rtl8139_intr_mask =
+	PCIErr | PCSTimeout | RxUnderrun | RxOverflow | RxFIFOOver |
+	TxErr | TxOK | RxErr | RxOK;
+
+static const unsigned int rtl8139_rx_config =
+	RxCfgRcv32K | RxNoWrap |
+	(RX_FIFO_THRESH << RxCfgFIFOShift) |
+	(RX_DMA_BURST << RxCfgDMAShift);
+
+static const unsigned int rtl8139_tx_config =
+	TxIFG96 | (TX_DMA_BURST << TxDMAShift) | (TX_RETRY << TxRetryShift);
+
+
+
+
+static void rtl8139_chip_reset (void *ioaddr)
+{
+	int i;
+
+	/* Soft reset the chip. */
+	RTL_W8 (ChipCmd, CmdReset);
+
+	/* Check that the chip has finished the reset. */
+	for (i = 1000; i > 0; i--) {
+		barrier();
+		if ((RTL_R8 (ChipCmd) & CmdReset) == 0)
+			break;
+		udelay (10);
+	}
+}
+
+
+static int rtl8139_init_board (struct pci_dev *pdev,
+					 struct rtnet_device **dev_out)
+{
+	void *ioaddr;
+	struct rtnet_device *rtdev;
+	struct rtl8139_private *tp;
+	u8 tmp8;
+	int rc;
+	unsigned int i;
+#ifdef USE_IO_OPS
+	u32 pio_start, pio_end, pio_flags, pio_len;
+#endif
+	unsigned long mmio_start, mmio_flags, mmio_len;
+	u32 tmp;
+
+
+	*dev_out = NULL;
+
+	/* dev and rtdev->priv zeroed in alloc_etherdev */
+	rtdev=rt_alloc_etherdev(sizeof (struct rtl8139_private),
+				rx_pool_size + NUM_TX_DESC);
+	if (rtdev==NULL) {
+		rtdm_printk (KERN_ERR PFX "%s: Unable to alloc new net device\n", pci_name(pdev));
+		return -ENOMEM;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->sysbind = &pdev->dev;
+	tp = rtdev->priv;
+	tp->pci_dev = pdev;
+
+	/* enable device (incl. PCI PM wakeup and hotplug setup) */
+	rc = pci_enable_device (pdev);
+	if (rc)
+		goto err_out;
+
+	rc = pci_request_regions (pdev, "rtnet8139too");
+	if (rc)
+		goto err_out;
+
+	/* enable PCI bus-mastering */
+	pci_set_master (pdev);
+
+	mmio_start = pci_resource_start (pdev, 1);
+	mmio_flags = pci_resource_flags (pdev, 1);
+	mmio_len = pci_resource_len (pdev, 1);
+
+	/* set this immediately, we need to know before
+	 * we talk to the chip directly */
+#ifdef USE_IO_OPS
+	pio_start = pci_resource_start (pdev, 0);
+	pio_end = pci_resource_end (pdev, 0);
+	pio_flags = pci_resource_flags (pdev, 0);
+	pio_len = pci_resource_len (pdev, 0);
+
+	/* make sure PCI base addr 0 is PIO */
+	if (!(pio_flags & IORESOURCE_IO)) {
+		rtdm_printk (KERN_ERR PFX "%s: region #0 not a PIO resource, aborting\n", pci_name(pdev));
+		rc = -ENODEV;
+		goto err_out;
+	}
+	/* check for weird/broken PCI region reporting */
+	if (pio_len < RTL_MIN_IO_SIZE) {
+		rtdm_printk (KERN_ERR PFX "%s: Invalid PCI I/O region size(s), aborting\n", pci_name(pdev));
+		rc = -ENODEV;
+		goto err_out;
+	}
+#else
+	/* make sure PCI base addr 1 is MMIO */
+	if (!(mmio_flags & IORESOURCE_MEM)) {
+		rtdm_printk(KERN_ERR PFX "%s: region #1 not an MMIO resource, aborting\n", pci_name(pdev));
+		rc = -ENODEV;
+		goto err_out;
+	}
+	if (mmio_len < RTL_MIN_IO_SIZE) {
+		rtdm_printk(KERN_ERR PFX "%s: Invalid PCI mem region size(s), aborting\n", pci_name(pdev));
+		rc = -ENODEV;
+		goto err_out;
+	}
+#endif
+
+#ifdef USE_IO_OPS
+	ioaddr = (void *) pio_start;
+	rtdev->base_addr = pio_start;
+	tp->mmio_addr = ioaddr;
+#else
+	/* ioremap MMIO region */
+	ioaddr = ioremap (mmio_start, mmio_len);
+	if (ioaddr == NULL) {
+		rtdm_printk(KERN_ERR PFX "%s: cannot remap MMIO, aborting\n", pci_name(pdev));
+		rc = -EIO;
+		goto err_out;
+	}
+	rtdev->base_addr = (long) ioaddr;
+	tp->mmio_addr = ioaddr;
+#endif /* USE_IO_OPS */
+
+	/* Bring old chips out of low-power mode. */
+	RTL_W8 (HltClk, 'R');
+
+	/* check for missing/broken hardware */
+	if (RTL_R32 (TxConfig) == 0xFFFFFFFF) {
+		rtdm_printk(KERN_ERR PFX "%s: Chip not responding, ignoring board\n", pci_name(pdev));
+		rc = -EIO;
+		goto err_out;
+	}
+
+	/* identify chip attached to board */
+	tmp = RTL_R8 (ChipVersion);
+	for (i = 0; i < ARRAY_SIZE (rtl_chip_info); i++)
+		if (tmp == rtl_chip_info[i].version) {
+			tp->chipset = i;
+			goto match;
+		}
+
+	rtdm_printk("rt8139too: unknown chip version, assuming RTL-8139\n");
+	rtdm_printk("rt8139too: TxConfig = 0x%08x\n", RTL_R32 (TxConfig));
+
+	tp->chipset = 0;
+
+match:
+	if (tp->chipset >= CH_8139B) {
+		u8 new_tmp8 = tmp8 = RTL_R8 (Config1);
+		if ((rtl_chip_info[tp->chipset].flags & HasLWake) &&
+		    (tmp8 & LWAKE))
+			new_tmp8 &= ~LWAKE;
+		new_tmp8 |= Cfg1_PM_Enable;
+		if (new_tmp8 != tmp8) {
+			RTL_W8 (Cfg9346, Cfg9346_Unlock);
+			RTL_W8 (Config1, tmp8);
+			RTL_W8 (Cfg9346, Cfg9346_Lock);
+		}
+		if (rtl_chip_info[tp->chipset].flags & HasLWake) {
+			tmp8 = RTL_R8 (Config4);
+			if (tmp8 & LWPTN) {
+				RTL_W8 (Cfg9346, Cfg9346_Unlock);
+				RTL_W8 (Config4, tmp8 & ~LWPTN);
+				RTL_W8 (Cfg9346, Cfg9346_Lock);
+			}
+		}
+	} else {
+		tmp8 = RTL_R8 (Config1);
+		tmp8 &= ~(SLEEP | PWRDN);
+		RTL_W8 (Config1, tmp8);
+	}
+
+	rtl8139_chip_reset (ioaddr);
+
+	*dev_out = rtdev;
+	return 0;
+
+err_out:
+#ifndef USE_IO_OPS
+	if (tp->mmio_addr) iounmap (tp->mmio_addr);
+#endif /* !USE_IO_OPS */
+	/* it's ok to call this even if we have no regions to free */
+	pci_release_regions (pdev);
+	rtdev_free(rtdev);
+	pci_set_drvdata (pdev, NULL);
+
+	return rc;
+}
+
+
+
+
+static int rtl8139_init_one (struct pci_dev *pdev,
+				       const struct pci_device_id *ent)
+{
+	struct rtnet_device *rtdev = NULL;
+	struct rtl8139_private *tp;
+	int i, addr_len;
+	int option;
+	void *ioaddr;
+	static int board_idx = -1;
+
+	board_idx++;
+
+	if( cards[board_idx] == 0)
+		return -ENODEV;
+
+	/* when we're built into the kernel, the driver version message
+	 * is only printed if at least one 8139 board has been found
+	 */
+#ifndef MODULE
+	{
+		static int printed_version;
+		if (!printed_version++)
+			rtdm_printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+	}
+#endif
+
+	if ((i=rtl8139_init_board (pdev, &rtdev)) < 0)
+		return i;
+
+
+	tp = rtdev->priv;
+	ioaddr = tp->mmio_addr;
+
+	addr_len = read_eeprom (ioaddr, 0, 8) == 0x8129 ? 8 : 6;
+	for (i = 0; i < 3; i++)
+		((u16 *) (rtdev->dev_addr))[i] =
+		    le16_to_cpu (read_eeprom (ioaddr, i + 7, addr_len));
+
+	/* The Rtl8139-specific entries in the device structure. */
+	rtdev->open = rtl8139_open;
+	rtdev->stop = rtl8139_close;
+	rtdev->hard_header = &rt_eth_header;
+	rtdev->hard_start_xmit = rtl8139_start_xmit;
+	rtdev->do_ioctl = rtl8139_ioctl;
+	rtdev->get_stats = rtl8139_get_stats;
+
+	/*rtdev->set_multicast_list = rtl8139_set_rx_mode; */
+	rtdev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+
+	rtdev->irq = pdev->irq;
+
+	/* rtdev->priv/tp zeroed and aligned in init_etherdev */
+	tp = rtdev->priv;
+
+	/* note: tp->chipset set in rtl8139_init_board */
+	tp->drv_flags = board_info[ent->driver_data].hw_flags;
+	tp->mmio_addr = ioaddr;
+	rtdm_lock_init (&tp->lock);
+
+	if ( (i=rt_register_rtnetdev(rtdev)) )
+		goto err_out;
+
+	pci_set_drvdata (pdev, rtdev);
+
+	tp->phys[0] = 32;
+
+	/* The lower four bits are the media type. */
+	option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
+	if (option > 0) {
+		tp->mii.full_duplex = (option & 0x210) ? 1 : 0;
+		tp->default_port = option & 0xFF;
+		if (tp->default_port)
+			tp->medialock = 1;
+	}
+	if (tp->default_port) {
+		rtdm_printk(KERN_INFO "  Forcing %dMbps %s-duplex operation.\n",
+			    (option & 0x20 ? 100 : 10),
+			    (option & 0x10 ? "full" : "half"));
+		mdio_write(rtdev, tp->phys[0], 0,
+				   ((option & 0x20) ? 0x2000 : 0) |         /* 100Mbps? */
+				   ((option & 0x10) ? 0x0100 : 0)); /* Full duplex? */
+	}
+
+
+	/* Put the chip into low-power mode. */
+	if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+		RTL_W8 (HltClk, 'H');        /* 'R' would leave the clock running. */
+
+	return 0;
+
+
+err_out:
+#ifndef USE_IO_OPS
+	if (tp->mmio_addr) iounmap (tp->mmio_addr);
+#endif /* !USE_IO_OPS */
+	/* it's ok to call this even if we have no regions to free */
+	pci_release_regions (pdev);
+	rtdev_free(rtdev);
+	pci_set_drvdata (pdev, NULL);
+
+	return i;
+}
+
+
+static void rtl8139_remove_one (struct pci_dev *pdev)
+{
+	struct rtnet_device *rtdev = pci_get_drvdata(pdev);
+
+#ifndef USE_IO_OPS
+	struct rtl8139_private *tp = rtdev->priv;
+
+	if (tp->mmio_addr)
+		iounmap (tp->mmio_addr);
+#endif /* !USE_IO_OPS */
+
+	/* it's ok to call this even if we have no regions to free */
+	rt_unregister_rtnetdev(rtdev);
+	rt_rtdev_disconnect(rtdev);
+
+	pci_release_regions(pdev);
+	pci_set_drvdata(pdev, NULL);
+
+	rtdev_free(rtdev);
+}
+
+
+/* Serial EEPROM section. */
+
+/*  EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK        0x04        /* EEPROM shift clock. */
+#define EE_CS                        0x08        /* EEPROM chip select. */
+#define EE_DATA_WRITE        0x02        /* EEPROM chip data in. */
+#define EE_WRITE_0                0x00
+#define EE_WRITE_1                0x02
+#define EE_DATA_READ        0x01        /* EEPROM chip data out. */
+#define EE_ENB                        (0x80 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+   No extra delay is needed with 33Mhz PCI, but 66Mhz may change this.
+ */
+
+#define eeprom_delay()        readl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_WRITE_CMD        (5)
+#define EE_READ_CMD                (6)
+#define EE_ERASE_CMD        (7)
+
+static int read_eeprom (void *ioaddr, int location, int addr_len)
+{
+	int i;
+	unsigned retval = 0;
+	void *ee_addr = ioaddr + Cfg9346;
+	int read_cmd = location | (EE_READ_CMD << addr_len);
+
+	writeb (EE_ENB & ~EE_CS, ee_addr);
+	writeb (EE_ENB, ee_addr);
+	eeprom_delay ();
+
+	/* Shift the read command bits out. */
+	for (i = 4 + addr_len; i >= 0; i--) {
+		int dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+		writeb (EE_ENB | dataval, ee_addr);
+		eeprom_delay ();
+		writeb (EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay ();
+	}
+	writeb (EE_ENB, ee_addr);
+	eeprom_delay ();
+
+	for (i = 16; i > 0; i--) {
+		writeb (EE_ENB | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay ();
+		retval =
+		    (retval << 1) | ((readb (ee_addr) & EE_DATA_READ) ? 1 :
+				     0);
+		writeb (EE_ENB, ee_addr);
+		eeprom_delay ();
+	}
+
+	/* Terminate the EEPROM access. */
+	writeb (~EE_CS, ee_addr);
+	eeprom_delay ();
+
+	return retval;
+}
+
+/* MII serial management: mostly bogus for now. */
+/* Read and write the MII management registers using software-generated
+   serial MDIO protocol.
+   The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
+   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+   "overclocking" issues. */
+#define MDIO_DIR                0x80
+#define MDIO_DATA_OUT        0x04
+#define MDIO_DATA_IN        0x02
+#define MDIO_CLK                0x01
+#define MDIO_WRITE0 (MDIO_DIR)
+#define MDIO_WRITE1 (MDIO_DIR | MDIO_DATA_OUT)
+
+#define mdio_delay(mdio_addr)        readb(mdio_addr)
+
+
+
+static char mii_2_8139_map[8] = {
+	BasicModeCtrl,
+	BasicModeStatus,
+	0,
+	0,
+	NWayAdvert,
+	NWayLPAR,
+	NWayExpansion,
+	0
+};
+
+#ifdef CONFIG_8139TOO_8129
+/* Syncronize the MII management interface by shifting 32 one bits out. */
+static void mdio_sync (void *mdio_addr)
+{
+	int i;
+
+	for (i = 32; i >= 0; i--) {
+		writeb (MDIO_WRITE1, mdio_addr);
+		mdio_delay (mdio_addr);
+		writeb (MDIO_WRITE1 | MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+}
+#endif
+
+
+static int mdio_read (struct rtnet_device *rtdev, int phy_id, int location)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	int retval = 0;
+#ifdef CONFIG_8139TOO_8129
+	void *mdio_addr = tp->mmio_addr + Config4;
+	int mii_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+	int i;
+#endif
+
+	if (phy_id > 31) {        /* Really a 8139.  Use internal registers. */
+		return location < 8 && mii_2_8139_map[location] ?
+		    readw (tp->mmio_addr + mii_2_8139_map[location]) : 0;
+	}
+
+#ifdef CONFIG_8139TOO_8129
+	mdio_sync (mdio_addr);
+	/* Shift the read command bits out. */
+	for (i = 15; i >= 0; i--) {
+		int dataval = (mii_cmd & (1 << i)) ? MDIO_DATA_OUT : 0;
+
+		writeb (MDIO_DIR | dataval, mdio_addr);
+		mdio_delay (mdio_addr);
+		writeb (MDIO_DIR | dataval | MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		writeb (0, mdio_addr);
+		mdio_delay (mdio_addr);
+		retval = (retval << 1) | ((readb (mdio_addr) & MDIO_DATA_IN) ? 1 : 0);
+		writeb (MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+#endif
+
+	return (retval >> 1) & 0xffff;
+}
+
+
+static void mdio_write (struct rtnet_device *rtdev, int phy_id, int location,
+			int value)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+#ifdef CONFIG_8139TOO_8129
+	void *mdio_addr = tp->mmio_addr + Config4;
+	int mii_cmd = (0x5002 << 16) | (phy_id << 23) | (location << 18) | value;
+	int i;
+#endif
+
+	if (phy_id > 31) {        /* Really a 8139.  Use internal registers. */
+		void *ioaddr = tp->mmio_addr;
+		if (location == 0) {
+			RTL_W8 (Cfg9346, Cfg9346_Unlock);
+			RTL_W16 (BasicModeCtrl, value);
+			RTL_W8 (Cfg9346, Cfg9346_Lock);
+		} else if (location < 8 && mii_2_8139_map[location])
+			RTL_W16 (mii_2_8139_map[location], value);
+		return;
+	}
+
+#ifdef CONFIG_8139TOO_8129
+	mdio_sync (mdio_addr);
+
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval =
+		    (mii_cmd & (1 << i)) ? MDIO_WRITE1 : MDIO_WRITE0;
+		writeb (dataval, mdio_addr);
+		mdio_delay (mdio_addr);
+		writeb (dataval | MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+	/* Clear out extra bits. */
+	for (i = 2; i > 0; i--) {
+		writeb (0, mdio_addr);
+		mdio_delay (mdio_addr);
+		writeb (MDIO_CLK, mdio_addr);
+		mdio_delay (mdio_addr);
+	}
+#endif
+}
+
+static int rtl8139_open (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	int retval;
+
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	retval = rtdm_irq_request(&tp->irq_handle, rtdev->irq,
+				  rtl8139_interrupt, RTDM_IRQTYPE_SHARED,
+				  rtdev->name, rtdev);
+	if (retval)
+		return retval;
+
+	tp->tx_bufs = dma_alloc_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
+					 &tp->tx_bufs_dma, GFP_ATOMIC);
+	tp->rx_ring = dma_alloc_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
+					 &tp->rx_ring_dma, GFP_ATOMIC);
+
+	if (tp->tx_bufs == NULL || tp->rx_ring == NULL) {
+		rtdm_irq_free(&tp->irq_handle);
+		if (tp->tx_bufs)
+			dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN,
+					  tp->tx_bufs, tp->tx_bufs_dma);
+		if (tp->rx_ring)
+			dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN,
+					  tp->rx_ring, tp->rx_ring_dma);
+
+		return -ENOMEM;
+	}
+	/* FIXME: create wrapper for duplex_lock vs. force_media
+	   tp->mii.full_duplex = tp->mii.duplex_lock; */
+	tp->tx_flag = (TX_FIFO_THRESH << 11) & 0x003f0000;
+	tp->twistie = 1;
+	tp->time_to_die = 0;
+
+	rtl8139_init_ring (rtdev);
+	rtl8139_hw_start (rtdev);
+
+	return 0;
+}
+
+
+static void rtl_check_media (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	u16 mii_lpa;
+
+	if (tp->phys[0] < 0)
+		return;
+
+	mii_lpa = mdio_read(rtdev, tp->phys[0], MII_LPA);
+	if (mii_lpa == 0xffff)
+		return;
+
+	tp->mii.full_duplex = (mii_lpa & LPA_100FULL) == LPA_100FULL ||
+		(mii_lpa & 0x00C0) == LPA_10FULL;
+}
+
+
+/* Start the hardware at open or resume. */
+static void rtl8139_hw_start (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	void *ioaddr = tp->mmio_addr;
+	u32 i;
+	u8 tmp;
+
+	/* Bring old chips out of low-power mode. */
+	if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+		RTL_W8 (HltClk, 'R');
+
+	rtl8139_chip_reset(ioaddr);
+
+	/* unlock Config[01234] and BMCR register writes */
+	RTL_W8_F (Cfg9346, Cfg9346_Unlock);
+	/* Restore our idea of the MAC address. */
+	RTL_W32_F (MAC0 + 0, cpu_to_le32 (*(u32 *) (rtdev->dev_addr + 0)));
+	RTL_W32_F (MAC0 + 4, cpu_to_le32 (*(u32 *) (rtdev->dev_addr + 4)));
+
+	tp->cur_rx = 0;
+
+	/* init Rx ring buffer DMA address */
+	RTL_W32_F (RxBuf, tp->rx_ring_dma);
+
+	/* Must enable Tx/Rx before setting transfer thresholds! */
+	RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+	tp->rx_config = rtl8139_rx_config | AcceptBroadcast | AcceptMyPhys;
+	RTL_W32 (RxConfig, tp->rx_config);
+
+	/* Check this value: the documentation for IFG contradicts ifself. */
+	RTL_W32 (TxConfig, rtl8139_tx_config);
+
+	rtl_check_media (rtdev);
+
+	if (tp->chipset >= CH_8139B) {
+		/* Disable magic packet scanning, which is enabled
+		 * when PM is enabled in Config1.  It can be reenabled
+		 * via ETHTOOL_SWOL if desired.  */
+		RTL_W8 (Config3, RTL_R8 (Config3) & ~Cfg3_Magic);
+	}
+
+	/* Lock Config[01234] and BMCR register writes */
+	RTL_W8 (Cfg9346, Cfg9346_Lock);
+
+	/* init Tx buffer DMA addresses */
+	for (i = 0; i < NUM_TX_DESC; i++)
+		RTL_W32_F (TxAddr0 + (i * 4), tp->tx_bufs_dma + (tp->tx_buf[i] - tp->tx_bufs));
+
+	RTL_W32 (RxMissed, 0);
+
+	rtl8139_set_rx_mode (rtdev);
+
+	/* no early-rx interrupts */
+	RTL_W16 (MultiIntr, RTL_R16 (MultiIntr) & MultiIntrClear);
+
+	/* make sure RxTx has started */
+	tmp = RTL_R8 (ChipCmd);
+	if ((!(tmp & CmdRxEnb)) || (!(tmp & CmdTxEnb)))
+		RTL_W8 (ChipCmd, CmdRxEnb | CmdTxEnb);
+
+	/* Enable all known interrupts by setting the interrupt mask. */
+	RTL_W16 (IntrMask, rtl8139_intr_mask);
+
+	rtnetif_start_queue (rtdev);
+}
+
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void rtl8139_init_ring (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	int i;
+
+	tp->cur_rx = 0;
+	tp->cur_tx = 0;
+	tp->dirty_tx = 0;
+
+	for (i = 0; i < NUM_TX_DESC; i++)
+		tp->tx_buf[i] = &tp->tx_bufs[i * TX_BUF_SIZE];
+}
+
+
+static void rtl8139_tx_clear (struct rtl8139_private *tp)
+{
+	tp->cur_tx = 0;
+	tp->dirty_tx = 0;
+
+	/* XXX account for unsent Tx packets in tp->stats.tx_dropped */
+}
+
+
+
+static int rtl8139_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+
+	void *ioaddr = tp->mmio_addr;
+	unsigned int entry;
+	unsigned int len = skb->len;
+	rtdm_lockctx_t context;
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = tp->cur_tx % NUM_TX_DESC;
+
+	if (likely(len < TX_BUF_SIZE)) {
+		if (unlikely(skb->xmit_stamp != NULL)) {
+			rtdm_lock_irqsave(context);
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+						       *skb->xmit_stamp);
+			/* typically, we are only copying a few bytes here */
+			rtskb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
+		} else {
+			/* copy larger packets outside the lock */
+			rtskb_copy_and_csum_dev(skb, tp->tx_buf[entry]);
+			rtdm_lock_irqsave(context);
+		}
+	} else {
+		dev_kfree_rtskb(skb);
+		tp->stats.tx_dropped++;
+		return 0;
+	}
+
+
+	/* Note: the chip doesn't have auto-pad! */
+	rtdm_lock_get(&tp->lock);
+	RTL_W32_F (TxStatus0 + (entry * sizeof (u32)), tp->tx_flag | max(len, (unsigned int)ETH_ZLEN));
+	tp->cur_tx++;
+	wmb();
+	if ((tp->cur_tx - NUM_TX_DESC) == tp->dirty_tx)
+		rtnetif_stop_queue (rtdev);
+	rtdm_lock_put_irqrestore(&tp->lock, context);
+
+	dev_kfree_rtskb(skb);
+
+#ifdef DEBUG
+	rtdm_printk ("%s: Queued Tx packet size %u to slot %d.\n", rtdev->name, len, entry);
+#endif
+	return 0;
+}
+
+static int rtl8139_ioctl(struct rtnet_device *rtdev, struct ifreq *ifr, int cmd)
+{
+    struct rtl8139_private *tp = rtdev->priv;
+    void *ioaddr = tp->mmio_addr;
+    int nReturn = 0;
+    struct ethtool_value *value;
+
+    switch (cmd) {
+	case SIOCETHTOOL:
+	    /* TODO: user-safe parameter access, most probably one layer higher */
+	    value = (struct ethtool_value *)ifr->ifr_data;
+	    if (value->cmd == ETHTOOL_GLINK)
+	    {
+		if (RTL_R16(CSCR) & CSCR_LinkOKBit)
+		    value->data = 1;
+		else
+		    value->data = 0;
+	    }
+	    break;
+
+	default:
+	    nReturn = -EOPNOTSUPP;
+	    break;
+    }
+    return nReturn;
+}
+
+static struct net_device_stats *rtl8139_get_stats(struct rtnet_device*rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	return &tp->stats;
+}
+
+static void rtl8139_tx_interrupt (struct rtnet_device *rtdev,
+				  struct rtl8139_private *tp,
+				  void *ioaddr)
+{
+	unsigned long dirty_tx, tx_left;
+
+	dirty_tx = tp->dirty_tx;
+	tx_left = tp->cur_tx - dirty_tx;
+
+	while (tx_left > 0) {
+		int entry = dirty_tx % NUM_TX_DESC;
+		int txstatus;
+
+		txstatus = RTL_R32 (TxStatus0 + (entry * sizeof (u32)));
+
+		if (!(txstatus & (TxStatOK | TxUnderrun | TxAborted)))
+			break;        /* It still hasn't been Txed */
+
+		/* Note: TxCarrierLost is always asserted at 100mbps. */
+		if (txstatus & (TxOutOfWindow | TxAborted)) {
+			/* There was an major error, log it. */
+			rtdm_printk("%s: Transmit error, Tx status %8.8x.\n",
+				    rtdev->name, txstatus);
+			tp->stats.tx_errors++;
+			if (txstatus & TxAborted) {
+				tp->stats.tx_aborted_errors++;
+				RTL_W32 (TxConfig, TxClearAbt);
+				RTL_W16 (IntrStatus, TxErr);
+				wmb();
+			}
+			if (txstatus & TxCarrierLost)
+				tp->stats.tx_carrier_errors++;
+			if (txstatus & TxOutOfWindow)
+				tp->stats.tx_window_errors++;
+#ifdef ETHER_STATS
+			if ((txstatus & 0x0f000000) == 0x0f000000)
+				tp->stats.collisions16++;
+#endif
+		} else {
+			if (txstatus & TxUnderrun) {
+				/* Add 64 to the Tx FIFO threshold. */
+				if (tp->tx_flag < 0x00300000)
+					tp->tx_flag += 0x00020000;
+				tp->stats.tx_fifo_errors++;
+			}
+			tp->stats.collisions += (txstatus >> 24) & 15;
+			tp->stats.tx_bytes += txstatus & 0x7ff;
+			tp->stats.tx_packets++;
+		}
+
+		dirty_tx++;
+		tx_left--;
+	}
+
+	/* only wake the queue if we did work, and the queue is stopped */
+	if (tp->dirty_tx != dirty_tx) {
+		tp->dirty_tx = dirty_tx;
+		mb();
+		if (rtnetif_queue_stopped (rtdev))
+			rtnetif_wake_queue (rtdev);
+	}
+}
+
+
+/* TODO: clean this up!  Rx reset need not be this intensive */
+static void rtl8139_rx_err
+(u32 rx_status, struct rtnet_device *rtdev, struct rtl8139_private *tp, void *ioaddr)
+{
+/*        u8 tmp8;
+#ifndef CONFIG_8139_NEW_RX_RESET
+	int tmp_work;
+#endif */
+
+	/* RTnet-TODO: We really need an error manager to handle such issues... */
+	rtdm_printk("%s: FATAL - Ethernet frame had errors, status %8.8x.\n",
+		    rtdev->name, rx_status);
+}
+
+
+static void rtl8139_rx_interrupt (struct rtnet_device *rtdev,
+				  struct rtl8139_private *tp, void *ioaddr,
+				  nanosecs_abs_t *time_stamp)
+{
+	unsigned char *rx_ring;
+	u16 cur_rx;
+
+	rx_ring = tp->rx_ring;
+	cur_rx = tp->cur_rx;
+
+	while ((RTL_R8 (ChipCmd) & RxBufEmpty) == 0) {
+		int ring_offset = cur_rx % RX_BUF_LEN;
+		u32 rx_status;
+		unsigned int rx_size;
+		unsigned int pkt_size;
+		struct rtskb *skb;
+
+		rmb();
+
+		/* read size+status of next frame from DMA ring buffer */
+		rx_status = le32_to_cpu (*(u32 *) (rx_ring + ring_offset));
+		rx_size = rx_status >> 16;
+		pkt_size = rx_size - 4;
+
+		/* Packet copy from FIFO still in progress.
+		 * Theoretically, this should never happen
+		 * since EarlyRx is disabled.
+		 */
+		if (rx_size == 0xfff0) {
+			tp->xstats.early_rx++;
+			break;
+		}
+
+		/* If Rx err or invalid rx_size/rx_status received
+		 * (which happens if we get lost in the ring),
+		 * Rx process gets reset, so we abort any further
+		 * Rx processing.
+		 */
+		if ((rx_size > (MAX_ETH_FRAME_SIZE+4)) ||
+		    (rx_size < 8) ||
+		    (!(rx_status & RxStatusOK))) {
+			rtl8139_rx_err (rx_status, rtdev, tp, ioaddr);
+			return;
+		}
+
+		/* Malloc up new buffer, compatible with net-2e. */
+		/* Omit the four octet CRC from the length. */
+
+		/* TODO: consider allocating skb's outside of
+		 * interrupt context, both to speed interrupt processing,
+		 * and also to reduce the chances of having to
+		 * drop packets here under memory pressure.
+		 */
+
+		skb = rtnetdev_alloc_rtskb(rtdev, pkt_size + 2);
+		if (skb) {
+			skb->time_stamp = *time_stamp;
+			rtskb_reserve (skb, 2);        /* 16 byte align the IP fields. */
+
+
+			/* eth_copy_and_sum (skb, &rx_ring[ring_offset + 4], pkt_size, 0); */
+			memcpy (skb->data, &rx_ring[ring_offset + 4], pkt_size);
+			rtskb_put (skb, pkt_size);
+			skb->protocol = rt_eth_type_trans (skb, rtdev);
+			rtnetif_rx (skb);
+			tp->stats.rx_bytes += pkt_size;
+			tp->stats.rx_packets++;
+		} else {
+			rtdm_printk (KERN_WARNING"%s: Memory squeeze, dropping packet.\n", rtdev->name);
+			tp->stats.rx_dropped++;
+		}
+
+		cur_rx = (cur_rx + rx_size + 4 + 3) & ~3;
+		RTL_W16 (RxBufPtr, cur_rx - 16);
+
+		if (RTL_R16 (IntrStatus) & RxAckBits)
+			RTL_W16_F (IntrStatus, RxAckBits);
+	}
+
+	tp->cur_rx = cur_rx;
+}
+
+
+static void rtl8139_weird_interrupt (struct rtnet_device *rtdev,
+				     struct rtl8139_private *tp,
+				     void *ioaddr,
+				     int status, int link_changed)
+{
+	rtdm_printk ("%s: Abnormal interrupt, status %8.8x.\n",
+		      rtdev->name, status);
+
+	/* Update the error count. */
+	tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+	RTL_W32 (RxMissed, 0);
+
+	if ((status & RxUnderrun) && link_changed && (tp->drv_flags & HAS_LNK_CHNG)) {
+		/* Really link-change on new chips. */
+		status &= ~RxUnderrun;
+	}
+
+	/* XXX along with rtl8139_rx_err, are we double-counting errors? */
+	if (status &
+	    (RxUnderrun | RxOverflow | RxErr | RxFIFOOver))
+		tp->stats.rx_errors++;
+
+	if (status & PCSTimeout)
+		tp->stats.rx_length_errors++;
+
+	if (status & (RxUnderrun | RxFIFOOver))
+		tp->stats.rx_fifo_errors++;
+
+	if (status & PCIErr) {
+		u16 pci_cmd_status;
+		pci_read_config_word (tp->pci_dev, PCI_STATUS, &pci_cmd_status);
+		pci_write_config_word (tp->pci_dev, PCI_STATUS, pci_cmd_status);
+
+		rtdm_printk (KERN_ERR "%s: PCI Bus error %4.4x.\n", rtdev->name, pci_cmd_status);
+	}
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static int rtl8139_interrupt(rtdm_irq_t *irq_handle)
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct rtl8139_private *tp = rtdev->priv;
+	void *ioaddr = tp->mmio_addr;
+	int ackstat;
+	int status;
+	int link_changed = 0; /* avoid bogus "uninit" warning */
+	int saved_status = 0;
+	int ret = RTDM_IRQ_NONE;
+
+	rtdm_lock_get(&tp->lock);
+
+	status = RTL_R16(IntrStatus);
+
+	/* h/w no longer present (hotplug?) or major error, bail */
+	if (unlikely(status == 0xFFFF) || unlikely(!(status & rtl8139_intr_mask)))
+		goto out;
+
+	ret = RTDM_IRQ_HANDLED;
+
+	/* close possible race with dev_close */
+	if (unlikely(!rtnetif_running(rtdev))) {
+		RTL_W16(IntrMask, 0);
+		goto out;
+	}
+
+	/* Acknowledge all of the current interrupt sources ASAP, but
+	   first get an additional status bit from CSCR. */
+	if (unlikely(status & RxUnderrun))
+		link_changed = RTL_R16(CSCR) & CSCR_LinkChangeBit;
+
+	/* The chip takes special action when we clear RxAckBits,
+	 * so we clear them later in rtl8139_rx_interrupt
+	 */
+	ackstat = status & ~(RxAckBits | TxErr);
+	if (ackstat)
+		RTL_W16(IntrStatus, ackstat);
+
+	if (status & RxAckBits) {
+		saved_status |= RxAckBits;
+		rtl8139_rx_interrupt(rtdev, tp, ioaddr, &time_stamp);
+	}
+
+	/* Check uncommon events with one test. */
+	if (unlikely(status & (PCIErr | PCSTimeout | RxUnderrun | RxErr)))
+		rtl8139_weird_interrupt(rtdev, tp, ioaddr, status, link_changed);
+
+	if (status & (TxOK |TxErr)) {
+		rtl8139_tx_interrupt(rtdev, tp, ioaddr);
+		if (status & TxErr) {
+			RTL_W16(IntrStatus, TxErr);
+			saved_status |= TxErr;
+		}
+	}
+ out:
+	rtdm_lock_put(&tp->lock);
+
+	if (saved_status & RxAckBits)
+		rt_mark_stack_mgr(rtdev);
+
+	if (saved_status & TxErr)
+		rtnetif_err_tx(rtdev);
+
+	return ret;
+}
+
+
+static int rtl8139_close (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	void *ioaddr = tp->mmio_addr;
+	rtdm_lockctx_t context;
+
+	printk ("%s: Shutting down ethercard, status was 0x%4.4x.\n", rtdev->name, RTL_R16 (IntrStatus));
+
+	rtnetif_stop_queue (rtdev);
+
+	rtdm_lock_get_irqsave (&tp->lock, context);
+	/* Stop the chip's Tx and Rx DMA processes. */
+	RTL_W8 (ChipCmd, 0);
+	/* Disable interrupts by clearing the interrupt mask. */
+	RTL_W16 (IntrMask, 0);
+	/* Update the error counts. */
+	tp->stats.rx_missed_errors += RTL_R32 (RxMissed);
+	RTL_W32 (RxMissed, 0);
+	rtdm_lock_put_irqrestore (&tp->lock, context);
+
+	rtdm_irq_free(&tp->irq_handle);
+
+	rt_stack_disconnect(rtdev);
+
+	rtl8139_tx_clear (tp);
+
+	dma_free_coherent(&tp->pci_dev->dev, RX_BUF_TOT_LEN, tp->rx_ring,
+			  tp->rx_ring_dma);
+	dma_free_coherent(&tp->pci_dev->dev, TX_BUF_TOT_LEN, tp->tx_bufs,
+			  tp->tx_bufs_dma);
+	tp->rx_ring = NULL;
+	tp->tx_bufs = NULL;
+
+	/* Green! Put the chip in low-power mode. */
+	RTL_W8 (Cfg9346, Cfg9346_Unlock);
+
+	if (rtl_chip_info[tp->chipset].flags & HasHltClk)
+		RTL_W8 (HltClk, 'H');        /* 'R' would leave the clock running. */
+
+	return 0;
+}
+
+
+
+/* Set or clear the multicast filter for this adaptor.
+   This routine is not state sensitive and need not be SMP locked. */
+static void __set_rx_mode (struct rtnet_device *rtdev)
+{
+	struct rtl8139_private *tp = rtdev->priv;
+	void *ioaddr = tp->mmio_addr;
+	u32 mc_filter[2];        /* Multicast hash filter */
+	int rx_mode;
+	u32 tmp;
+
+#ifdef DEBUG
+	rtdm_printk ("%s:   rtl8139_set_rx_mode(%4.4x) done -- Rx config %8.8lx.\n",
+			rtdev->name, rtdev->flags, RTL_R32 (RxConfig));
+#endif
+
+	/* Note: do not reorder, GCC is clever about common statements. */
+	if (rtdev->flags & IFF_PROMISC) {
+		/* Unconditionally log net taps. */
+		/*printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", rtdev->name);*/
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else if (rtdev->flags & IFF_ALLMULTI) {
+		/* Too many to filter perfectly -- accept all multicasts. */
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else {
+		rx_mode = AcceptBroadcast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0;
+	}
+
+	/* We can safely update without stopping the chip. */
+	tmp = rtl8139_rx_config | rx_mode;
+	if (tp->rx_config != tmp) {
+		RTL_W32_F (RxConfig, tmp);
+		tp->rx_config = tmp;
+	}
+	RTL_W32_F (MAR0 + 0, mc_filter[0]);
+	RTL_W32_F (MAR0 + 4, mc_filter[1]);
+}
+
+static void rtl8139_set_rx_mode (struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct rtl8139_private *tp = rtdev->priv;
+
+	rtdm_lock_get_irqsave (&tp->lock, context);
+	__set_rx_mode(rtdev);
+	rtdm_lock_put_irqrestore (&tp->lock, context);
+}
+
+static struct pci_driver rtl8139_pci_driver = {
+	name:                   DRV_NAME,
+	id_table:               rtl8139_pci_tbl,
+	probe:                  rtl8139_init_one,
+	remove:                 rtl8139_remove_one,
+	suspend:                NULL,
+	resume:                 NULL,
+};
+
+
+static int __init rtl8139_init_module (void)
+{
+	/* when we're a module, we always print a version message,
+	 * even if no 8139 board is found.
+	 */
+
+#ifdef MODULE
+	printk (KERN_INFO RTL8139_DRIVER_NAME "\n");
+#endif
+
+	return pci_register_driver (&rtl8139_pci_driver);
+}
+
+
+static void __exit rtl8139_cleanup_module (void)
+{
+	pci_unregister_driver (&rtl8139_pci_driver);
+}
+
+
+module_init(rtl8139_init_module);
+module_exit(rtl8139_cleanup_module);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig
new file mode 100644
index 0000000..d71d7ec
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Kconfig
@@ -0,0 +1,147 @@
+menu "Drivers"
+    depends on XENO_DRIVERS_NET
+
+comment "Common PCI Drivers"
+    depends on PCI
+
+config XENO_DRIVERS_NET_DRV_PCNET32
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "AMD PCnet32"
+
+
+config XENO_DRIVERS_NET_DRV_TULIP
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "DEC Tulip"
+
+
+config XENO_DRIVERS_NET_DRV_EEPRO100
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Intel EtherExpress PRO/100"
+    default y
+
+config XENO_DRIVERS_NET_DRV_EEPRO100_CMDTIMEOUT
+    depends on XENO_DRIVERS_NET && PCI
+    int "Command Timeout"
+    depends on XENO_DRIVERS_NET_DRV_EEPRO100
+    default 20
+    help
+    Timeout in microseconds of transmission or configuration commands that
+    are issued in real-time contexts.
+
+config XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+    depends on XENO_DRIVERS_NET && PCI
+    bool "Enable debugging and instrumentation"
+    depends on XENO_DRIVERS_NET_DRV_EEPRO100
+    help
+    This option switches on internal debugging code of the EEPRO/100 driver.
+    It also enables the collection of worst-case command delays in real-time
+    contexts in order to reduce the command timeout (which, effectively, will
+    also reduce the worst-case transmission latency).
+
+
+config XENO_DRIVERS_NET_DRV_E1000
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Intel(R) PRO/1000 (Gigabit)"
+    default y
+
+config XENO_DRIVERS_NET_DRV_E1000E
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "New Intel(R) PRO/1000 PCIe (Gigabit)"
+
+
+config XENO_DRIVERS_NET_DRV_NATSEMI
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "NatSemi"
+
+
+config XENO_DRIVERS_NET_DRV_8139
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Realtek 8139"
+    default y
+
+
+config XENO_DRIVERS_NET_DRV_VIA_RHINE
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "VIA Rhine"
+
+
+config XENO_DRIVERS_NET_DRV_IGB
+    select I2C
+    select I2C_ALGOBIT
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Intel(R) 82575 (Gigabit)"
+
+
+config XENO_DRIVERS_NET_DRV_R8169
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Realtek 8169 (Gigabit)"
+
+
+if PPC
+
+comment "Embedded MPC Drivers"
+    depends on XENO_DRIVERS_NET
+
+config XENO_DRIVERS_NET_DRV_FCC_ENET
+    depends on XENO_DRIVERS_NET
+    tristate "MPC8260 FCC Ethernet"
+
+
+config XENO_DRIVERS_NET_DRV_FEC_ENET
+    depends on XENO_DRIVERS_NET
+    tristate "MPC8xx FEC Ethernet"
+
+
+config XENO_DRIVERS_NET_DRV_SCC_ENET
+    depends on XENO_DRIVERS_NET
+    tristate "MPC8xx SCC Ethernet"
+
+
+config XENO_DRIVERS_NET_DRV_MPC52XX_FEC
+    depends on XENO_DRIVERS_NET
+    tristate "MPC52xx FEC Ethernet"
+
+endif
+
+
+comment "Misc Drivers"
+
+config XENO_DRIVERS_NET_DRV_LOOPBACK
+    depends on XENO_DRIVERS_NET
+    tristate "Loopback"
+    default y
+
+if ARM
+
+config XENO_DRIVERS_NET_DRV_AT91_ETHER
+    depends on XENO_DRIVERS_NET && SOC_AT91RM9200
+    select XENO_DRIVERS_NET_DRV_MACB
+    tristate "AT91RM9200 Board Ethernet Driver"
+
+config XENO_DRIVERS_NET_DRV_MACB
+    depends on XENO_DRIVERS_NET
+    select AT91_PROGRAMMABLE_CLOCKS if ARCH_AT91
+    tristate "Cadence MACB/GEM devices"
+    help
+    Driver for internal MAC-controller on AT91SAM926x microcontrollers.
+    Porting by Cristiano Mantovani and Stefano Banzi (Marposs SpA).
+
+endif
+
+if ARM64
+
+config XENO_DRIVERS_NET_FEC
+    depends on XENO_DRIVERS_NET
+    tristate "Freescale FEC"
+    depends on ARCH_MXC || SOC_IMX28
+    select PHYLIB
+    imply PTP_1588_CLOCK
+    help
+    For built-in 10/100 Fast ethernet controller on Freescale i.MX
+    processors.
+
+endif
+
+source "drivers/xenomai/net/drivers/experimental/Kconfig"
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile
new file mode 100644
index 0000000..3c07320
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/Makefile
@@ -0,0 +1,63 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_EXP_DRIVERS) += experimental/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000) += e1000/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000E) += e1000e/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_MPC52XX_FEC) += mpc52xx_fec/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_TULIP) += tulip/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_IGB) += igb/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_FEC) += freescale/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_8139) += rt_8139too.o
+
+rt_8139too-y := 8139too.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_AT91_ETHER) += rt_at91_ether.o
+
+rt_at91_ether-y := at91_ether.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100) += rt_eepro100.o
+
+rt_eepro100-y := eepro100.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK) += rt_loopback.o
+
+rt_loopback-y := loopback.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_FCC_ENET) += rt_mpc8260_fcc_enet.o
+
+rt_mpc8260_fcc_enet-y := mpc8260_fcc_enet.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_SCC_ENET) += rt_mpc8xx_enet.o
+
+rt_mpc8xx_enet-y := mpc8xx_enet.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_FEC_ENET) += rt_mpc8xx_fec.o
+
+rt_mpc8xx_fec-y := mpc8xx_fec.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_NATSEMI) += rt_natsemi.o
+
+rt_natsemi-y := natsemi.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_PCNET32) += rt_pcnet32.o
+
+rt_pcnet32-y := pcnet32.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_MACB) += rt_macb.o
+
+rt_macb-y := macb.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_VIA_RHINE) += rt_via-rhine.o
+
+rt_via-rhine-y := via-rhine.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_R8169) += rt_r8169.o
+
+rt_r8169-y := r8169.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/README.r8169 b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/README.r8169
new file mode 100644
index 0000000..69942f9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/README.r8169
@@ -0,0 +1,42 @@
+For general informations on RTnet an Gigabit Ethernet please have a look
+at the README.gigabit file in the RTnet 'Documentation' directory.
+
+This is the RTnet driver for NICs based on the Realtek RTL8169(S) chipset.
+The following cards should have this chipset:
+
+ o Buffalo LGY-PCI-GT (8169S)
+ o Corega CG-LAPCIGT (8169S)
+ o D-Link DGE-528T (8169S)
+ o Gigabyte 7N400 Pro2 Integrated Gigabit Ethernet (8110S)
+ o LevelOne GNC-0105T (8169S)
+ o Linksys EG1032v3 (8169S)
+ o Netgear GA511 PC Card (8169)
+ o PLANEX COMMUNICATIONS Inc. GN-1200TC (8169S)
+ o Surecom EP-320G-TX1 (8169S)
+ o US Robotics USR997902 (8169S)
+ o Xterasys XN-152 10/100/1000 NIC (8169)
+
+(see <http://www.openbsd.org/cgi-bin/man.cgi?query=re&arch=macppc&sektion=4>)
+
+This driver was actually only tested with a D-Link DGE-528T; for other NICs
+you may have to extend the PCI device id table within the driver.
+
+You can set various debugging levels while loading the module:
+
+DEBUG_RX_SYNC    1    Show received TDMA synchronisation frames
+DEBUG_RX_OTHER   2    Show other received packets
+DEBUG_TX_SYNC    4    Show sent TDMA synchronisation frames
+DEBUG_TX_OTHER   8    Show other sent packets
+DEBUG_RUN       16    Show general debugging infos when running...
+
+(The debugging output is in work, DEBUG_RX_* don't work yet).
+
+To see all sent packets (except TDMA sync frames) and general debugging
+output, just load the driver like this:
+
+  modprobe rt_r8169 debug=24
+ (DEBUG_TX_OTHER + DEBUG_RUN = 8 + 16 = 24)
+
+
+More documentation follows... =8-)
+                                                                 Klaus Keppler
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c
new file mode 100644
index 0000000..f8223b0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/at91_ether.c
@@ -0,0 +1,460 @@
+/*
+ * Ethernet driver for the Atmel AT91RM9200 (Thunder)
+ *
+ *  Copyright (C) 2003 SAN People (Pty) Ltd
+ *
+ * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
+ * Initial version by Rick Bronson 01/11/2003
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ *
+ * RTnet port:
+ * Copyright (C) 2014 Gilles Chanteperdrix <gch@xenomai.org>
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/dma-mapping.h>
+#include <linux/ethtool.h>
+#include <linux/platform_data/macb.h>
+#include <linux/platform_device.h>
+#include <linux/clk.h>
+#include <linux/gfp.h>
+#include <linux/phy.h>
+#include <linux/io.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_net.h>
+
+#include <rtdev.h>
+#include <rtdm/net.h>
+#include <rtnet_port.h>
+#include <rtskb.h>
+#include "rt_macb.h"
+
+/* 1518 rounded up */
+#define MAX_RBUFF_SZ	0x600
+/* max number of receive buffers */
+#define MAX_RX_DESCR	9
+
+/* Initialize and start the Receiver and Transmit subsystems */
+static int at91ether_start(struct rtnet_device *dev)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+	dma_addr_t addr;
+	u32 ctl;
+	int i;
+
+	lp->rx_ring = dma_alloc_coherent(&lp->pdev->dev,
+					 (MAX_RX_DESCR *
+					  sizeof(struct macb_dma_desc)),
+					 &lp->rx_ring_dma, GFP_KERNEL);
+	if (!lp->rx_ring)
+		return -ENOMEM;
+
+	lp->rx_buffers = dma_alloc_coherent(&lp->pdev->dev,
+					    MAX_RX_DESCR * MAX_RBUFF_SZ,
+					    &lp->rx_buffers_dma, GFP_KERNEL);
+	if (!lp->rx_buffers) {
+		dma_free_coherent(&lp->pdev->dev,
+				  MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+				  lp->rx_ring, lp->rx_ring_dma);
+		lp->rx_ring = NULL;
+		return -ENOMEM;
+	}
+
+	addr = lp->rx_buffers_dma;
+	for (i = 0; i < MAX_RX_DESCR; i++) {
+		lp->rx_ring[i].addr = addr;
+		lp->rx_ring[i].ctrl = 0;
+		addr += MAX_RBUFF_SZ;
+	}
+
+	/* Set the Wrap bit on the last descriptor */
+	lp->rx_ring[MAX_RX_DESCR - 1].addr |= MACB_BIT(RX_WRAP);
+
+	/* Reset buffer index */
+	lp->rx_tail = 0;
+
+	/* Program address of descriptor list in Rx Buffer Queue register */
+	macb_writel(lp, RBQP, lp->rx_ring_dma);
+
+	/* Enable Receive and Transmit */
+	ctl = macb_readl(lp, NCR);
+	macb_writel(lp, NCR, ctl | MACB_BIT(RE) | MACB_BIT(TE));
+
+	return 0;
+}
+
+/* Open the ethernet interface */
+static int at91ether_open(struct rtnet_device *dev)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+	u32 ctl;
+	int ret;
+
+	rt_stack_connect(dev, &STACK_manager);
+
+	/* Clear internal statistics */
+	ctl = macb_readl(lp, NCR);
+	macb_writel(lp, NCR, ctl | MACB_BIT(CLRSTAT));
+
+	rtmacb_set_hwaddr(lp);
+
+	ret = at91ether_start(dev);
+	if (ret)
+		return ret;
+
+	/* Enable MAC interrupts */
+	macb_writel(lp, IER, MACB_BIT(RCOMP)	|
+			     MACB_BIT(RXUBR)	|
+			     MACB_BIT(ISR_TUND)	|
+			     MACB_BIT(ISR_RLE)	|
+			     MACB_BIT(TCOMP)	|
+			     MACB_BIT(ISR_ROVR)	|
+			     MACB_BIT(HRESP));
+
+	/* schedule a link state check */
+	phy_start(lp->phy_dev);
+
+	rtnetif_start_queue(dev);
+
+	return 0;
+}
+
+/* Close the interface */
+static int at91ether_close(struct rtnet_device *dev)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+	u32 ctl;
+
+	/* Disable Receiver and Transmitter */
+	ctl = macb_readl(lp, NCR);
+	macb_writel(lp, NCR, ctl & ~(MACB_BIT(TE) | MACB_BIT(RE)));
+
+	/* Disable MAC interrupts */
+	macb_writel(lp, IDR, MACB_BIT(RCOMP)	|
+			     MACB_BIT(RXUBR)	|
+			     MACB_BIT(ISR_TUND)	|
+			     MACB_BIT(ISR_RLE)	|
+			     MACB_BIT(TCOMP)	|
+			     MACB_BIT(ISR_ROVR) |
+			     MACB_BIT(HRESP));
+
+	rtnetif_stop_queue(dev);
+
+	dma_free_coherent(&lp->pdev->dev,
+				MAX_RX_DESCR * sizeof(struct macb_dma_desc),
+				lp->rx_ring, lp->rx_ring_dma);
+	lp->rx_ring = NULL;
+
+	dma_free_coherent(&lp->pdev->dev,
+				MAX_RX_DESCR * MAX_RBUFF_SZ,
+				lp->rx_buffers, lp->rx_buffers_dma);
+	lp->rx_buffers = NULL;
+
+	rt_stack_disconnect(dev);
+
+	return 0;
+}
+
+/* Transmit packet */
+static int at91ether_start_xmit(struct rtskb *skb, struct rtnet_device *dev)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+
+	if (macb_readl(lp, TSR) & MACB_BIT(RM9200_BNQ)) {
+		rtnetif_stop_queue(dev);
+
+		/* Store packet information (to free when Tx completed) */
+		lp->skb = skb;
+		lp->skb_length = skb->len;
+		lp->skb_physaddr = dma_map_single(NULL, skb->data, skb->len,
+							DMA_TO_DEVICE);
+
+		/* Set address of the data in the Transmit Address register */
+		macb_writel(lp, TAR, lp->skb_physaddr);
+		/* Set length of the packet in the Transmit Control register */
+		macb_writel(lp, TCR, skb->len);
+
+	} else {
+		rtdev_err(dev, "%s called, but device is busy!\n", __func__);
+		return RTDEV_TX_BUSY;
+	}
+
+	return RTDEV_TX_OK;
+}
+
+/* Extract received frame from buffer descriptors and sent to upper layers.
+ * (Called from interrupt context)
+ */
+static bool at91ether_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp)
+{
+	struct macb *lp = rtnetdev_priv(dev);
+	unsigned char *p_recv;
+	struct rtskb *skb;
+	unsigned int pktlen;
+	bool ret = false;
+
+	while (lp->rx_ring[lp->rx_tail].addr & MACB_BIT(RX_USED)) {
+		p_recv = lp->rx_buffers + lp->rx_tail * MAX_RBUFF_SZ;
+		pktlen = MACB_BF(RX_FRMLEN, lp->rx_ring[lp->rx_tail].ctrl);
+		skb = rtnetdev_alloc_rtskb(dev, pktlen + 2);
+		if (skb) {
+			rtskb_reserve(skb, 2);
+			memcpy(rtskb_put(skb, pktlen), p_recv, pktlen);
+
+			skb->protocol = rt_eth_type_trans(skb, dev);
+			lp->stats.rx_packets++;
+			lp->stats.rx_bytes += pktlen;
+			ret = true;
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+		} else {
+			lp->stats.rx_dropped++;
+		}
+
+		if (lp->rx_ring[lp->rx_tail].ctrl & MACB_BIT(RX_MHASH_MATCH))
+			lp->stats.multicast++;
+
+		/* reset ownership bit */
+		lp->rx_ring[lp->rx_tail].addr &= ~MACB_BIT(RX_USED);
+
+		/* wrap after last buffer */
+		if (lp->rx_tail == MAX_RX_DESCR - 1)
+			lp->rx_tail = 0;
+		else
+			lp->rx_tail++;
+	}
+
+	return ret;
+}
+
+/* MAC interrupt handler */
+static int at91ether_interrupt(rtdm_irq_t *irq_handle)
+{
+	void *dev_id = rtdm_irq_get_arg(irq_handle, void);
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *dev = dev_id;
+	struct macb *lp = rtnetdev_priv(dev);
+	u32 intstatus, ctl;
+
+	/* MAC Interrupt Status register indicates what interrupts are pending.
+	 * It is automatically cleared once read.
+	 */
+	intstatus = macb_readl(lp, ISR);
+
+	/* Receive complete */
+	if ((intstatus & MACB_BIT(RCOMP)) && at91ether_rx(dev, &time_stamp))
+		rt_mark_stack_mgr(dev);
+
+	/* Transmit complete */
+	if (intstatus & MACB_BIT(TCOMP)) {
+		/* The TCOM bit is set even if the transmission failed */
+		if (intstatus & (MACB_BIT(ISR_TUND) | MACB_BIT(ISR_RLE)))
+			lp->stats.tx_errors++;
+
+		if (lp->skb) {
+			dev_kfree_rtskb(lp->skb);
+			lp->skb = NULL;
+			dma_unmap_single(NULL, lp->skb_physaddr, lp->skb_length, DMA_TO_DEVICE);
+			lp->stats.tx_packets++;
+			lp->stats.tx_bytes += lp->skb_length;
+		}
+		rtnetif_wake_queue(dev);
+	}
+
+	/* Work-around for EMAC Errata section 41.3.1 */
+	if (intstatus & MACB_BIT(RXUBR)) {
+		ctl = macb_readl(lp, NCR);
+		macb_writel(lp, NCR, ctl & ~MACB_BIT(RE));
+		macb_writel(lp, NCR, ctl | MACB_BIT(RE));
+	}
+
+	if (intstatus & MACB_BIT(ISR_ROVR))
+		rtdev_err(dev, "ROVR error\n");
+
+	return RTDM_IRQ_HANDLED;
+}
+
+#if defined(CONFIG_OF)
+static const struct of_device_id at91ether_dt_ids[] = {
+	{ .compatible = "cdns,at91rm9200-emac" },
+	{ .compatible = "cdns,emac" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, at91ether_dt_ids);
+#endif
+
+/* Detect MAC & PHY and perform ethernet interface initialization */
+static int __init at91ether_probe(struct platform_device *pdev)
+{
+	struct macb_platform_data *board_data = dev_get_platdata(&pdev->dev);
+	struct resource *regs;
+	struct rtnet_device *dev;
+	struct phy_device *phydev;
+	struct macb *lp;
+	int res;
+	u32 reg;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)
+	const char *mac;
+#endif
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs)
+		return -ENOENT;
+
+	dev = rt_alloc_etherdev(sizeof(struct macb), MAX_RX_DESCR * 2 + 2);
+	if (!dev)
+		return -ENOMEM;
+
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+
+	lp = rtnetdev_priv(dev);
+	lp->pdev = pdev;
+	lp->dev = dev;
+	rtdm_lock_init(&lp->lock);
+
+	/* physical base address */
+	dev->base_addr = regs->start;
+	lp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+	if (!lp->regs) {
+		res = -ENOMEM;
+		goto err_free_dev;
+	}
+
+	/* Clock */
+	lp->pclk = devm_clk_get(&pdev->dev, "ether_clk");
+	if (IS_ERR(lp->pclk)) {
+		res = PTR_ERR(lp->pclk);
+		goto err_free_dev;
+	}
+	clk_enable(lp->pclk);
+
+	lp->hclk = ERR_PTR(-ENOENT);
+	lp->tx_clk = ERR_PTR(-ENOENT);
+
+	/* Install the interrupt handler */
+	dev->irq = platform_get_irq(pdev, 0);
+	res = rtdm_irq_request(&lp->irq_handle, dev->irq, at91ether_interrupt, 0, dev->name, dev);
+	if (res)
+		goto err_disable_clock;
+
+	dev->open = at91ether_open;
+	dev->stop = at91ether_close;
+	dev->hard_start_xmit = at91ether_start_xmit;
+	dev->do_ioctl = rtmacb_ioctl;
+	dev->get_stats = rtmacb_get_stats;
+
+	platform_set_drvdata(pdev, dev);
+
+#if (LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0))
+	res = of_get_mac_address(pdev->dev.of_node, lp->dev->dev_addr);
+	if (res)
+#else
+	mac = of_get_mac_address(pdev->dev.of_node);
+	if (mac)
+		memcpy(lp->dev->dev_addr, mac, ETH_ALEN);
+	else
+#endif
+		rtmacb_get_hwaddr(lp);
+
+	res = of_get_phy_mode(pdev->dev.of_node);
+	if (res < 0) {
+		if (board_data && board_data->is_rmii)
+			lp->phy_interface = PHY_INTERFACE_MODE_RMII;
+		else
+			lp->phy_interface = PHY_INTERFACE_MODE_MII;
+	} else {
+		lp->phy_interface = res;
+	}
+
+	macb_writel(lp, NCR, 0);
+
+	reg = MACB_BF(CLK, MACB_CLK_DIV32) | MACB_BIT(BIG);
+	if (lp->phy_interface == PHY_INTERFACE_MODE_RMII)
+		reg |= MACB_BIT(RM9200_RMII);
+
+	macb_writel(lp, NCFGR, reg);
+
+	/* Register the network interface */
+	res = rt_register_rtnetdev(dev);
+	if (res)
+		goto err_irq_free;
+
+	res = rtmacb_mii_init(lp);
+	if (res)
+		goto err_out_unregister_netdev;
+
+	/* will be enabled in open() */
+	rtnetif_carrier_off(dev);
+
+	phydev = lp->phy_dev;
+	rtdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+				phydev->drv->name, dev_name(&phydev->dev),
+				phydev->irq);
+
+	/* Display ethernet banner */
+	rtdev_info(dev, "AT91 ethernet at 0x%08lx int=%d (%pM)\n",
+				dev->base_addr, dev->irq, dev->dev_addr);
+
+	return 0;
+
+err_out_unregister_netdev:
+	rt_unregister_rtnetdev(dev);
+err_irq_free:
+	rtdm_irq_free(&lp->irq_handle);
+err_disable_clock:
+	clk_disable(lp->pclk);
+err_free_dev:
+	rtdev_free(dev);
+	return res;
+}
+
+static int at91ether_remove(struct platform_device *pdev)
+{
+	struct rtnet_device *dev = platform_get_drvdata(pdev);
+	struct macb *lp = rtnetdev_priv(dev);
+
+	if (lp->phy_dev)
+		phy_disconnect(lp->phy_dev);
+
+	mdiobus_unregister(lp->mii_bus);
+	if (lp->phy_phony_net_device)
+		free_netdev(lp->phy_phony_net_device);
+	kfree(lp->mii_bus->irq);
+	rt_rtdev_disconnect(dev);
+	rtdm_irq_free(&lp->irq_handle);
+	mdiobus_free(lp->mii_bus);
+	rt_unregister_rtnetdev(dev);
+	clk_disable(lp->pclk);
+	rtdev_free(dev);
+
+	return 0;
+}
+
+static struct platform_driver at91ether_driver = {
+	.remove		= at91ether_remove,
+	.driver		= {
+		.name	= "at91_ether",
+		.owner	= THIS_MODULE,
+		.of_match_table	= of_match_ptr(at91ether_dt_ids),
+	},
+};
+
+module_platform_driver_probe(at91ether_driver, at91ether_probe);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("AT91RM9200 EMAC Ethernet driver");
+MODULE_AUTHOR("Andrew Victor");
+MODULE_ALIAS("platform:at91_ether");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile
new file mode 100644
index 0000000..1c28452
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/Makefile
@@ -0,0 +1,8 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000) += rt_e1000.o
+
+rt_e1000-y := \
+	e1000_hw.o \
+	e1000_main.o \
+	e1000_param.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h
new file mode 100644
index 0000000..44f1efa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000.h
@@ -0,0 +1,391 @@
+/*******************************************************************************
+
+
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
+  any later version.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/stddef.h>
+#include <linux/module.h>
+#include <linux/types.h>
+#include <asm/byteorder.h>
+#include <linux/init.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/pci.h>
+#include <linux/kernel.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/delay.h>
+#include <linux/timer.h>
+#include <linux/slab.h>
+#include <linux/vmalloc.h>
+#include <linux/interrupt.h>
+#include <linux/string.h>
+#include <linux/pagemap.h>
+#include <linux/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+#include <linux/capability.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <net/pkt_sched.h>
+#include <linux/list.h>
+#include <linux/reboot.h>
+#ifdef NETIF_F_ISO
+#undef NETIF_F_ISO
+#endif
+
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#endif
+#ifdef SIOCGMIIPHY
+#include <linux/mii.h>
+#endif
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#undef NETIF_F_HW_VLAN_TX
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+// RTNET
+#include <rtnet_port.h>
+
+
+#define BAR_0		0
+#define BAR_1		1
+#define BAR_5		5
+
+#include "kcompat.h"
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+	PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#include "e1000_hw.h"
+
+#ifdef DBG
+#define E1000_DBG(args...) printk(KERN_DEBUG "e1000: " args)
+#else
+#define E1000_DBG(args...)
+#endif
+
+#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
+
+#define PFX "e1000: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+	(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+		__FUNCTION__ , ## args))
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD                  256
+#define E1000_MAX_TXD                      256
+#define E1000_MIN_TXD                       80
+#define E1000_MAX_82544_TXD               4096
+
+#define E1000_DEFAULT_RXD                  256
+#define E1000_MAX_RXD                      256
+#define E1000_MIN_RXD                       80
+#define E1000_MAX_82544_RXD               4096
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128   128    /* Used for packet split */
+#define E1000_RXBUFFER_256   256    /* Used for packet split */
+#define E1000_RXBUFFER_512   512
+#define E1000_RXBUFFER_1024  1024
+#define E1000_RXBUFFER_2048  2048
+#define E1000_RXBUFFER_4096  4096
+#define E1000_RXBUFFER_8192  8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX       15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Flow Control Watermarks */
+#define E1000_FC_HIGH_DIFF 0x1638  /* High: 5688 bytes below Rx FIFO size */
+#define E1000_FC_LOW_DIFF 0x1640   /* Low:  5696 bytes below Rx FIFO size */
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE	16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define AUTO_ALL_MODES            0
+#define E1000_EEPROM_82544_APM    0x0004
+#define E1000_EEPROM_ICH8_APME    0x0004
+#define E1000_EEPROM_APME         0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE	e1000_ms_hw_default
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#define E1000_MNG_VLAN_NONE -1
+#endif
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
+
+/* only works for sizes that are powers of 2 */
+#define E1000_ROUNDUP(i, size) ((i) = (((i) + (size) - 1) & ~((size) - 1)))
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+	struct rtskb *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	uint16_t length;
+	uint16_t next_to_watch;
+};
+
+
+struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
+struct e1000_ps_page_dma { uint64_t ps_page_dma[PS_PAGE_BUFFERS]; };
+
+struct e1000_tx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+	rtdm_lock_t tx_lock;
+	uint16_t tdh;
+	uint16_t tdt;
+	boolean_t last_tx_tso;
+};
+
+struct e1000_rx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+	/* arrays of page information for packet split */
+	struct e1000_ps_page *ps_page;
+	struct e1000_ps_page_dma *ps_page_dma;
+
+	/* cpu for rx queue */
+	int cpu;
+
+	uint16_t rdh;
+	uint16_t rdt;
+};
+
+#define E1000_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_PS(R, i)	    \
+	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+#define E1000_RX_DESC_EXT(R, i)	    \
+	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i)	E1000_GET_DESC(R, i, e1000_context_desc)
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+#ifdef NETIF_F_HW_VLAN_TX
+	struct vlan_group *vlgrp;
+	uint16_t mng_vlan_id;
+#endif
+	uint32_t bd_number;
+	uint32_t rx_buffer_len;
+	uint32_t part_num;
+	uint32_t wol;
+	uint32_t ksp3_port_a;
+	uint32_t smartspeed;
+	uint32_t en_mng_pt;
+	uint16_t link_speed;
+	uint16_t link_duplex;
+#ifdef CONFIG_E1000_NAPI
+	spinlock_t tx_queue_lock;
+#endif
+	atomic_t irq_sem;
+	struct work_struct reset_task;
+	uint8_t fc_autoneg;
+
+#ifdef ETHTOOL_PHYS_ID
+	struct timer_list blink_timer;
+	unsigned long led_status;
+#endif
+
+	/* TX */
+	struct e1000_tx_ring *tx_ring;      /* One per active queue */
+	unsigned long tx_queue_len;
+	uint32_t txd_cmd;
+	uint32_t tx_int_delay;
+	uint32_t tx_abs_int_delay;
+	uint32_t gotcl;
+	uint64_t gotcl_old;
+	uint64_t tpt_old;
+	uint64_t colc_old;
+	uint32_t tx_timeout_count;
+	uint32_t tx_fifo_head;
+	uint32_t tx_head_addr;
+	uint32_t tx_fifo_size;
+	uint8_t  tx_timeout_factor;
+	atomic_t tx_fifo_stall;
+	boolean_t pcix_82544;
+	boolean_t detect_tx_hung;
+
+	/* RX */
+#ifdef CONFIG_E1000_NAPI
+	boolean_t (*clean_rx) (struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       int *work_done, int work_to_do);
+#else
+	boolean_t (*clean_rx) (struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring);
+#endif
+	void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+			      struct e1000_rx_ring *rx_ring,
+				int cleaned_count);
+	struct e1000_rx_ring *rx_ring;      /* One per active queue */
+#ifdef CONFIG_E1000_NAPI
+	struct net_device *polling_netdev;  /* One per active queue */
+#endif
+	int num_tx_queues;
+	int num_rx_queues;
+
+	uint64_t hw_csum_err;
+	uint64_t hw_csum_good;
+	uint64_t rx_hdr_split;
+	uint32_t alloc_rx_buff_failed;
+	uint32_t rx_int_delay;
+	uint32_t rx_abs_int_delay;
+	boolean_t rx_csum;
+	unsigned int rx_ps_pages;
+	uint32_t gorcl;
+	uint64_t gorcl_old;
+	uint16_t rx_ps_bsize0;
+
+	/* Interrupt Throttle Rate */
+	uint32_t itr;
+
+	/* OS defined structs */
+	struct rtnet_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	rtdm_irq_t irq_handle;
+	boolean_t data_received;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+	struct e1000_phy_stats phy_stats;
+
+#ifdef ETHTOOL_TEST
+	uint32_t test_icr;
+	struct e1000_tx_ring test_tx_ring;
+	struct e1000_rx_ring test_rx_ring;
+#endif
+
+#ifdef E1000_COUNT_ICR
+	uint64_t icr_txdw;
+	uint64_t icr_txqe;
+	uint64_t icr_lsc;
+	uint64_t icr_rxseq;
+	uint64_t icr_rxdmt;
+	uint64_t icr_rxo;
+	uint64_t icr_rxt;
+	uint64_t icr_mdac;
+	uint64_t icr_rxcfg;
+	uint64_t icr_gpi;
+#endif
+
+	uint32_t *config_space;
+	int msg_enable;
+#ifdef CONFIG_PCI_MSI
+	boolean_t have_msi;
+#endif
+	/* to not mess up cache alignment, always add to the bottom */
+#ifdef NETIF_F_TSO
+	boolean_t tso_force;
+#endif
+	boolean_t smart_power_down;	/* phy smart power down */
+	unsigned long flags;
+
+	struct delayed_work watchdog_task;
+	struct delayed_work fifo_stall_task;
+	struct delayed_work phy_info_task;
+};
+
+enum e1000_state_t {
+	__E1000_DRIVER_TESTING,
+	__E1000_RESETTING,
+};
+#endif /* _E1000_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c
new file mode 100644
index 0000000..59c55ff
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.c
@@ -0,0 +1,9094 @@
+/*******************************************************************************
+
+  
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+  
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.c
+ * Shared functions for accessing and configuring the MAC
+ */
+
+
+#include "e1000_hw.h"
+
+static int32_t e1000_set_phy_type(struct e1000_hw *hw);
+static void e1000_phy_init_script(struct e1000_hw *hw);
+static int32_t e1000_setup_copper_link(struct e1000_hw *hw);
+static int32_t e1000_setup_fiber_serdes_link(struct e1000_hw *hw);
+static int32_t e1000_adjust_serdes_amplitude(struct e1000_hw *hw);
+static int32_t e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static int32_t e1000_config_mac_to_phy(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl);
+static void e1000_lower_mdi_clk(struct e1000_hw *hw, uint32_t *ctrl);
+static void e1000_shift_out_mdi_bits(struct e1000_hw *hw, uint32_t data,
+                                     uint16_t count);
+static uint16_t e1000_shift_in_mdi_bits(struct e1000_hw *hw);
+static int32_t e1000_phy_reset_dsp(struct e1000_hw *hw);
+static int32_t e1000_write_eeprom_spi(struct e1000_hw *hw, uint16_t offset,
+                                      uint16_t words, uint16_t *data);
+static int32_t e1000_write_eeprom_microwire(struct e1000_hw *hw,
+                                            uint16_t offset, uint16_t words,
+                                            uint16_t *data);
+static int32_t e1000_spi_eeprom_ready(struct e1000_hw *hw);
+static void e1000_raise_ee_clk(struct e1000_hw *hw, uint32_t *eecd);
+static void e1000_lower_ee_clk(struct e1000_hw *hw, uint32_t *eecd);
+static void e1000_shift_out_ee_bits(struct e1000_hw *hw, uint16_t data,
+                                    uint16_t count);
+static int32_t e1000_write_phy_reg_ex(struct e1000_hw *hw, uint32_t reg_addr,
+                                      uint16_t phy_data);
+static int32_t e1000_read_phy_reg_ex(struct e1000_hw *hw,uint32_t reg_addr,
+                                     uint16_t *phy_data);
+static uint16_t e1000_shift_in_ee_bits(struct e1000_hw *hw, uint16_t count);
+static int32_t e1000_acquire_eeprom(struct e1000_hw *hw);
+static void e1000_release_eeprom(struct e1000_hw *hw);
+static void e1000_standby_eeprom(struct e1000_hw *hw);
+static int32_t e1000_set_vco_speed(struct e1000_hw *hw);
+static int32_t e1000_polarity_reversal_workaround(struct e1000_hw *hw);
+static int32_t e1000_set_phy_mode(struct e1000_hw *hw);
+static int32_t e1000_host_if_read_cookie(struct e1000_hw *hw, uint8_t *buffer);
+static uint8_t e1000_calculate_mng_checksum(char *buffer, uint32_t length);
+static int32_t e1000_configure_kmrn_for_10_100(struct e1000_hw *hw,
+                                               uint16_t duplex);
+static int32_t e1000_configure_kmrn_for_1000(struct e1000_hw *hw);
+
+/* IGP cable length table */
+static const
+uint16_t e1000_igp_cable_length_table[IGP01E1000_AGC_LENGTH_TABLE_SIZE] =
+    { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+      25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+      40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+      60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+      90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+      100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+      110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+
+static const
+uint16_t e1000_igp_2_cable_length_table[IGP02E1000_AGC_LENGTH_TABLE_SIZE] =
+    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+      104, 109, 114, 118, 121, 124};
+
+
+/******************************************************************************
+ * Set the phy type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_set_phy_type(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_set_phy_type");
+
+    if (hw->mac_type == e1000_undefined)
+        return -E1000_ERR_PHY_TYPE;
+
+    switch (hw->phy_id) {
+    case M88E1000_E_PHY_ID:
+    case M88E1000_I_PHY_ID:
+    case M88E1011_I_PHY_ID:
+    case M88E1111_I_PHY_ID:
+        hw->phy_type = e1000_phy_m88;
+        break;
+    case IGP01E1000_I_PHY_ID:
+        if (hw->mac_type == e1000_82541 ||
+            hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            hw->phy_type = e1000_phy_igp;
+            break;
+        }
+        fallthrough;
+    case IGP03E1000_E_PHY_ID:
+        hw->phy_type = e1000_phy_igp_3;
+        break;
+    case IFE_E_PHY_ID:
+    case IFE_PLUS_E_PHY_ID:
+    case IFE_C_E_PHY_ID:
+        hw->phy_type = e1000_phy_ife;
+        break;
+    case GG82563_E_PHY_ID:
+        if (hw->mac_type == e1000_80003es2lan) {
+            hw->phy_type = e1000_phy_gg82563;
+            break;
+        }
+        fallthrough;
+    default:
+        /* Should never have loaded on this device */
+        hw->phy_type = e1000_phy_undefined;
+        return -E1000_ERR_PHY_TYPE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * IGP phy init script - initializes the GbE PHY
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+e1000_phy_init_script(struct e1000_hw *hw)
+{
+    uint32_t ret_val;
+    uint16_t phy_saved_data;
+
+    DEBUGFUNC("e1000_phy_init_script");
+
+    if (hw->phy_init_script) {
+        msec_delay(20);
+
+        /* Save off the current value of register 0x2F5B to be restored at
+         * the end of this routine. */
+        ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+        /* Disabled the PHY transmitter */
+        e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+        msec_delay(20);
+
+        e1000_write_phy_reg(hw,0x0000,0x0140);
+
+        msec_delay(5);
+
+        switch (hw->mac_type) {
+        case e1000_82541:
+        case e1000_82547:
+            e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+
+            e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+
+            e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+
+            e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+
+            e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+
+            e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+
+            e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+
+            e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+
+            e1000_write_phy_reg(hw, 0x2010, 0x0008);
+            break;
+
+        case e1000_82541_rev_2:
+        case e1000_82547_rev_2:
+            e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+            break;
+        default:
+            break;
+        }
+
+        e1000_write_phy_reg(hw, 0x0000, 0x3300);
+
+        msec_delay(20);
+
+        /* Now enable the transmitter */
+        e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+        if (hw->mac_type == e1000_82547) {
+            uint16_t fused, fine, coarse;
+
+            /* Move to analog registers page */
+            e1000_read_phy_reg(hw, IGP01E1000_ANALOG_SPARE_FUSE_STATUS, &fused);
+
+            if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+                e1000_read_phy_reg(hw, IGP01E1000_ANALOG_FUSE_STATUS, &fused);
+
+                fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+                coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+                if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+                    coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+                    fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+                } else if (coarse == IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+                    fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+                fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+                        (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+                        (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_CONTROL, fused);
+                e1000_write_phy_reg(hw, IGP01E1000_ANALOG_FUSE_BYPASS,
+                                    IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+            }
+        }
+    }
+}
+
+/******************************************************************************
+ * Set the mac type member in the hw struct.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_set_mac_type(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_set_mac_type");
+
+    switch (hw->device_id) {
+    case E1000_DEV_ID_82542:
+        switch (hw->revision_id) {
+        case E1000_82542_2_0_REV_ID:
+            hw->mac_type = e1000_82542_rev2_0;
+            break;
+        case E1000_82542_2_1_REV_ID:
+            hw->mac_type = e1000_82542_rev2_1;
+            break;
+        default:
+            /* Invalid 82542 revision ID */
+            return -E1000_ERR_MAC_TYPE;
+        }
+        break;
+    case E1000_DEV_ID_82543GC_FIBER:
+    case E1000_DEV_ID_82543GC_COPPER:
+        hw->mac_type = e1000_82543;
+        break;
+    case E1000_DEV_ID_82544EI_COPPER:
+    case E1000_DEV_ID_82544EI_FIBER:
+    case E1000_DEV_ID_82544GC_COPPER:
+    case E1000_DEV_ID_82544GC_LOM:
+        hw->mac_type = e1000_82544;
+        break;
+    case E1000_DEV_ID_82540EM:
+    case E1000_DEV_ID_82540EM_LOM:
+    case E1000_DEV_ID_82540EP:
+    case E1000_DEV_ID_82540EP_LOM:
+    case E1000_DEV_ID_82540EP_LP:
+        hw->mac_type = e1000_82540;
+        break;
+    case E1000_DEV_ID_82545EM_COPPER:
+    case E1000_DEV_ID_82545EM_FIBER:
+        hw->mac_type = e1000_82545;
+        break;
+    case E1000_DEV_ID_82545GM_COPPER:
+    case E1000_DEV_ID_82545GM_FIBER:
+    case E1000_DEV_ID_82545GM_SERDES:
+        hw->mac_type = e1000_82545_rev_3;
+        break;
+    case E1000_DEV_ID_82546EB_COPPER:
+    case E1000_DEV_ID_82546EB_FIBER:
+    case E1000_DEV_ID_82546EB_QUAD_COPPER:
+        hw->mac_type = e1000_82546;
+        break;
+    case E1000_DEV_ID_82546GB_COPPER:
+    case E1000_DEV_ID_82546GB_FIBER:
+    case E1000_DEV_ID_82546GB_SERDES:
+    case E1000_DEV_ID_82546GB_PCIE:
+    case E1000_DEV_ID_82546GB_QUAD_COPPER:
+    case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+        hw->mac_type = e1000_82546_rev_3;
+        break;
+    case E1000_DEV_ID_82541EI:
+    case E1000_DEV_ID_82541EI_MOBILE:
+    case E1000_DEV_ID_82541ER_LOM:
+        hw->mac_type = e1000_82541;
+        break;
+    case E1000_DEV_ID_82541ER:
+    case E1000_DEV_ID_82541GI:
+    case E1000_DEV_ID_82541GI_LF:
+    case E1000_DEV_ID_82541GI_MOBILE:
+        hw->mac_type = e1000_82541_rev_2;
+        break;
+    case E1000_DEV_ID_82547EI:
+    case E1000_DEV_ID_82547EI_MOBILE:
+        hw->mac_type = e1000_82547;
+        break;
+    case E1000_DEV_ID_82547GI:
+        hw->mac_type = e1000_82547_rev_2;
+        break;
+    case E1000_DEV_ID_82571EB_COPPER:
+    case E1000_DEV_ID_82571EB_FIBER:
+    case E1000_DEV_ID_82571EB_SERDES:
+    case E1000_DEV_ID_82571EB_QUAD_COPPER:
+    case E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE:
+            hw->mac_type = e1000_82571;
+        break;
+    case E1000_DEV_ID_82572EI_COPPER:
+    case E1000_DEV_ID_82572EI_FIBER:
+    case E1000_DEV_ID_82572EI_SERDES:
+    case E1000_DEV_ID_82572EI:
+        hw->mac_type = e1000_82572;
+        break;
+    case E1000_DEV_ID_82573E:
+    case E1000_DEV_ID_82573E_IAMT:
+    case E1000_DEV_ID_82573L:
+        hw->mac_type = e1000_82573;
+        break;
+    case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+    case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+    case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+    case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+        hw->mac_type = e1000_80003es2lan;
+        break;
+    case E1000_DEV_ID_ICH8_IGP_M_AMT:
+    case E1000_DEV_ID_ICH8_IGP_AMT:
+    case E1000_DEV_ID_ICH8_IGP_C:
+    case E1000_DEV_ID_ICH8_IFE:
+    case E1000_DEV_ID_ICH8_IFE_GT:
+    case E1000_DEV_ID_ICH8_IFE_G:
+    case E1000_DEV_ID_ICH8_IGP_M:
+        hw->mac_type = e1000_ich8lan;
+        break;
+    default:
+        /* Should never have loaded on this device */
+        return -E1000_ERR_MAC_TYPE;
+    }
+
+    switch (hw->mac_type) {
+    case e1000_ich8lan:
+        hw->swfwhw_semaphore_present = TRUE;
+        hw->asf_firmware_present = TRUE;
+        break;
+    case e1000_80003es2lan:
+        hw->swfw_sync_present = TRUE;
+        fallthrough;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+        hw->eeprom_semaphore_present = TRUE;
+        fallthrough;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        hw->asf_firmware_present = TRUE;
+        break;
+    default:
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set media type and TBI compatibility.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * **************************************************************************/
+void
+e1000_set_media_type(struct e1000_hw *hw)
+{
+    uint32_t status;
+
+    DEBUGFUNC("e1000_set_media_type");
+
+    if (hw->mac_type != e1000_82543) {
+        /* tbi_compatibility is only valid on 82543 */
+        hw->tbi_compatibility_en = FALSE;
+    }
+
+    switch (hw->device_id) {
+    case E1000_DEV_ID_82545GM_SERDES:
+    case E1000_DEV_ID_82546GB_SERDES:
+    case E1000_DEV_ID_82571EB_SERDES:
+    case E1000_DEV_ID_82572EI_SERDES:
+    case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+        hw->media_type = e1000_media_type_internal_serdes;
+        break;
+    default:
+        switch (hw->mac_type) {
+        case e1000_82542_rev2_0:
+        case e1000_82542_rev2_1:
+            hw->media_type = e1000_media_type_fiber;
+            break;
+        case e1000_ich8lan:
+        case e1000_82573:
+            /* The STATUS_TBIMODE bit is reserved or reused for the this
+             * device.
+             */
+            hw->media_type = e1000_media_type_copper;
+            break;
+        default:
+            status = E1000_READ_REG(hw, STATUS);
+            if (status & E1000_STATUS_TBIMODE) {
+                hw->media_type = e1000_media_type_fiber;
+                /* tbi_compatibility not valid on fiber */
+                hw->tbi_compatibility_en = FALSE;
+            } else {
+                hw->media_type = e1000_media_type_copper;
+            }
+            break;
+        }
+    }
+}
+
+/******************************************************************************
+ * Reset the transmit and receive units; mask and clear all interrupts.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_reset_hw(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    uint32_t ctrl_ext;
+    uint32_t icr;
+    uint32_t manc;
+    uint32_t led_ctrl;
+    uint32_t timeout;
+    uint32_t extcnf_ctrl;
+    int32_t ret_val;
+
+    DEBUGFUNC("e1000_reset_hw");
+
+    /* For 82542 (rev 2.0), disable MWI before issuing a device reset */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+        e1000_pci_clear_mwi(hw);
+    }
+
+    if (hw->bus_type == e1000_bus_type_pci_express) {
+        /* Prevent the PCI-E bus from sticking if there is no TLP connection
+         * on the last TLP read/write transaction when MAC is reset.
+         */
+        if (e1000_disable_pciex_master(hw) != E1000_SUCCESS) {
+            DEBUGOUT("PCI-E Master disable polling has failed.\n");
+        }
+    }
+
+    /* Clear interrupt mask to stop board from generating interrupts */
+    DEBUGOUT("Masking off all interrupts\n");
+    E1000_WRITE_REG(hw, IMC, 0xffffffff);
+
+    /* Disable the Transmit and Receive units.  Then delay to allow
+     * any pending transactions to complete before we hit the MAC with
+     * the global reset.
+     */
+    E1000_WRITE_REG(hw, RCTL, 0);
+    E1000_WRITE_REG(hw, TCTL, E1000_TCTL_PSP);
+    E1000_WRITE_FLUSH(hw);
+
+    /* The tbi_compatibility_on Flag must be cleared when Rctl is cleared. */
+    hw->tbi_compatibility_on = FALSE;
+
+    /* Delay to allow any outstanding PCI transactions to complete before
+     * resetting the device
+     */
+    msec_delay(10);
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Must reset the PHY before resetting the MAC */
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_PHY_RST));
+        msec_delay(5);
+    }
+
+    /* Must acquire the MDIO ownership before MAC reset.
+     * Ownership defaults to firmware after a reset. */
+    if (hw->mac_type == e1000_82573) {
+        timeout = 10;
+
+        extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+        extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+        do {
+            E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+            extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+
+            if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+                break;
+            else
+                extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+            msec_delay(2);
+            timeout--;
+        } while (timeout);
+    }
+
+    /* Workaround for ICH8 bit corruption issue in FIFO memory */
+    if (hw->mac_type == e1000_ich8lan) {
+        /* Set Tx and Rx buffer allocation to 8k apiece. */
+        E1000_WRITE_REG(hw, PBA, E1000_PBA_8K);
+        /* Set Packet Buffer Size to 16k. */
+        E1000_WRITE_REG(hw, PBS, E1000_PBS_16K);
+    }
+
+    /* Issue a global reset to the MAC.  This will reset the chip's
+     * transmit, receive, DMA, and link units.  It will not effect
+     * the current PCI configuration.  The global reset bit is self-
+     * clearing, and should clear within a microsecond.
+     */
+    DEBUGOUT("Issuing a global reset to MAC\n");
+
+    switch (hw->mac_type) {
+        case e1000_82544:
+        case e1000_82540:
+        case e1000_82545:
+        case e1000_82546:
+        case e1000_82541:
+        case e1000_82541_rev_2:
+            /* These controllers can't ack the 64-bit write when issuing the
+             * reset, so use IO-mapping as a workaround to issue the reset */
+            E1000_WRITE_REG_IO(hw, CTRL, (ctrl | E1000_CTRL_RST));
+            break;
+        case e1000_82545_rev_3:
+        case e1000_82546_rev_3:
+            /* Reset is performed on a shadow of the control register */
+            E1000_WRITE_REG(hw, CTRL_DUP, (ctrl | E1000_CTRL_RST));
+            break;
+        case e1000_ich8lan:
+            if (!hw->phy_reset_disable &&
+                e1000_check_phy_reset_block(hw) == E1000_SUCCESS) {
+                /* e1000_ich8lan PHY HW reset requires MAC CORE reset
+                 * at the same time to make sure the interface between
+                 * MAC and the external PHY is reset.
+                 */
+                ctrl |= E1000_CTRL_PHY_RST;
+            }
+
+            e1000_get_software_flag(hw);
+            E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
+            msec_delay(5);
+            break;
+        default:
+            E1000_WRITE_REG(hw, CTRL, (ctrl | E1000_CTRL_RST));
+            break;
+    }
+
+    /* After MAC reset, force reload of EEPROM to restore power-on settings to
+     * device.  Later controllers reload the EEPROM automatically, so just wait
+     * for reload to complete.
+     */
+    switch (hw->mac_type) {
+        case e1000_82542_rev2_0:
+        case e1000_82542_rev2_1:
+        case e1000_82543:
+        case e1000_82544:
+            /* Wait for reset to complete */
+            usec_delay(10);
+            ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+            ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+            E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+            E1000_WRITE_FLUSH(hw);
+            /* Wait for EEPROM reload */
+            msec_delay(2);
+            break;
+        case e1000_82541:
+        case e1000_82541_rev_2:
+        case e1000_82547:
+        case e1000_82547_rev_2:
+            /* Wait for EEPROM reload */
+            msec_delay(20);
+            break;
+        case e1000_82573:
+            if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
+                usec_delay(10);
+                ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+                ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+                E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+                E1000_WRITE_FLUSH(hw);
+            }
+            fallthrough;
+        case e1000_82571:
+        case e1000_82572:
+        case e1000_ich8lan:
+        case e1000_80003es2lan:
+            ret_val = e1000_get_auto_rd_done(hw);
+            if (ret_val)
+                /* We don't want to continue accessing MAC registers. */
+                return ret_val;
+            break;
+        default:
+            /* Wait for EEPROM reload (it happens automatically) */
+            msec_delay(5);
+            break;
+    }
+
+    /* Disable HW ARPs */
+    manc = E1000_READ_REG(hw, MANC);
+    manc &= ~(E1000_MANC_ARP_EN | E1000_MANC_ARP_RES_EN);
+    E1000_WRITE_REG(hw, MANC, manc);
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        e1000_phy_init_script(hw);
+
+        /* Configure activity LED after PHY reset */
+        led_ctrl = E1000_READ_REG(hw, LEDCTL);
+        led_ctrl &= IGP_ACTIVITY_LED_MASK;
+        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+        E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+    }
+
+    /* Clear interrupt mask to stop board from generating interrupts */
+    DEBUGOUT("Masking off all interrupts\n");
+    E1000_WRITE_REG(hw, IMC, 0xffffffff);
+
+    /* Clear any pending interrupt events. */
+    icr = E1000_READ_REG(hw, ICR);
+
+    /* If MWI was previously enabled, reenable it. */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+            e1000_pci_set_mwi(hw);
+    }
+
+    if (hw->mac_type == e1000_ich8lan) {
+        uint32_t kab = E1000_READ_REG(hw, KABGTXD);
+        kab |= E1000_KABGTXD_BGSQLBIAS;
+        E1000_WRITE_REG(hw, KABGTXD, kab);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Performs basic configuration of the adapter.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Assumes that the controller has previously been reset and is in a
+ * post-reset uninitialized state. Initializes the receive address registers,
+ * multicast table, and VLAN filter table. Calls routines to setup link
+ * configuration and flow control settings. Clears all on-chip counters. Leaves
+ * the transmit and receive units disabled and uninitialized.
+ *****************************************************************************/
+int32_t
+e1000_init_hw(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    uint32_t i;
+    int32_t ret_val;
+    uint16_t pcix_cmd_word;
+    uint16_t pcix_stat_hi_word;
+    uint16_t cmd_mmrbc;
+    uint16_t stat_mmrbc;
+    uint32_t mta_size;
+    uint32_t reg_data;
+    uint32_t ctrl_ext;
+
+    DEBUGFUNC("e1000_init_hw");
+
+    /* Initialize Identification LED */
+    ret_val = e1000_id_led_init(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Initializing Identification LED\n");
+        return ret_val;
+    }
+
+    /* Set the media type and TBI compatibility */
+    e1000_set_media_type(hw);
+
+    /* Disabling VLAN filtering. */
+    DEBUGOUT("Initializing the IEEE VLAN\n");
+    /* VET hardcoded to standard value and VFTA removed in ICH8 LAN */
+    if (hw->mac_type != e1000_ich8lan) {
+        if (hw->mac_type < e1000_82545_rev_3)
+            E1000_WRITE_REG(hw, VET, 0);
+        e1000_clear_vfta(hw);
+    }
+
+    /* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+        e1000_pci_clear_mwi(hw);
+        E1000_WRITE_REG(hw, RCTL, E1000_RCTL_RST);
+        E1000_WRITE_FLUSH(hw);
+        msec_delay(5);
+    }
+
+    /* Setup the receive address. This involves initializing all of the Receive
+     * Address Registers (RARs 0 - 15).
+     */
+    e1000_init_rx_addrs(hw);
+
+    /* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+    if (hw->mac_type == e1000_82542_rev2_0) {
+        E1000_WRITE_REG(hw, RCTL, 0);
+        E1000_WRITE_FLUSH(hw);
+        msec_delay(1);
+        if (hw->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+            e1000_pci_set_mwi(hw);
+    }
+
+    /* Zero out the Multicast HASH table */
+    DEBUGOUT("Zeroing the MTA\n");
+    mta_size = E1000_MC_TBL_SIZE;
+    if (hw->mac_type == e1000_ich8lan)
+        mta_size = E1000_MC_TBL_SIZE_ICH8LAN;
+    for (i = 0; i < mta_size; i++) {
+        E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+        /* use write flush to prevent Memory Write Block (MWB) from
+         * occuring when accessing our register space */
+        E1000_WRITE_FLUSH(hw);
+    }
+
+    /* Set the PCI priority bit correctly in the CTRL register.  This
+     * determines if the adapter gives priority to receives, or if it
+     * gives equal priority to transmits and receives.  Valid only on
+     * 82542 and 82543 silicon.
+     */
+    if (hw->dma_fairness && hw->mac_type <= e1000_82543) {
+        ctrl = E1000_READ_REG(hw, CTRL);
+        E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PRIOR);
+    }
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+        break;
+    default:
+        /* Workaround for PCI-X problem when BIOS sets MMRBC incorrectly. */
+        if (hw->bus_type == e1000_bus_type_pcix) {
+            e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd_word);
+            e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI,
+                &pcix_stat_hi_word);
+            cmd_mmrbc = (pcix_cmd_word & PCIX_COMMAND_MMRBC_MASK) >>
+                PCIX_COMMAND_MMRBC_SHIFT;
+            stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+                PCIX_STATUS_HI_MMRBC_SHIFT;
+            if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+                stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+            if (cmd_mmrbc > stat_mmrbc) {
+                pcix_cmd_word &= ~PCIX_COMMAND_MMRBC_MASK;
+                pcix_cmd_word |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+                e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER,
+                    &pcix_cmd_word);
+            }
+        }
+        break;
+    }
+
+    /* More time needed for PHY to initialize */
+    if (hw->mac_type == e1000_ich8lan)
+        msec_delay(15);
+
+    /* Call a subroutine to configure the link and setup flow control. */
+    ret_val = e1000_setup_link(hw);
+
+    /* Set the transmit descriptor write-back policy */
+    if (hw->mac_type > e1000_82544) {
+        ctrl = E1000_READ_REG(hw, TXDCTL);
+        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+        switch (hw->mac_type) {
+        default:
+            break;
+        case e1000_82571:
+        case e1000_82572:
+        case e1000_82573:
+        case e1000_ich8lan:
+        case e1000_80003es2lan:
+            ctrl |= E1000_TXDCTL_COUNT_DESC;
+            break;
+        }
+        E1000_WRITE_REG(hw, TXDCTL, ctrl);
+    }
+
+    if (hw->mac_type == e1000_82573) {
+        e1000_enable_tx_pkt_filtering(hw);
+    }
+
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_80003es2lan:
+        /* Enable retransmit on late collisions */
+        reg_data = E1000_READ_REG(hw, TCTL);
+        reg_data |= E1000_TCTL_RTLC;
+        E1000_WRITE_REG(hw, TCTL, reg_data);
+
+        /* Configure Gigabit Carry Extend Padding */
+        reg_data = E1000_READ_REG(hw, TCTL_EXT);
+        reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+        reg_data |= DEFAULT_80003ES2LAN_TCTL_EXT_GCEX;
+        E1000_WRITE_REG(hw, TCTL_EXT, reg_data);
+
+        /* Configure Transmit Inter-Packet Gap */
+        reg_data = E1000_READ_REG(hw, TIPG);
+        reg_data &= ~E1000_TIPG_IPGT_MASK;
+        reg_data |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+        E1000_WRITE_REG(hw, TIPG, reg_data);
+
+        reg_data = E1000_READ_REG_ARRAY(hw, FFLT, 0x0001);
+        reg_data &= ~0x00100000;
+        E1000_WRITE_REG_ARRAY(hw, FFLT, 0x0001, reg_data);
+        fallthrough;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_ich8lan:
+        ctrl = E1000_READ_REG(hw, TXDCTL1);
+        ctrl = (ctrl & ~E1000_TXDCTL_WTHRESH) | E1000_TXDCTL_FULL_TX_DESC_WB;
+        if (hw->mac_type >= e1000_82571)
+            ctrl |= E1000_TXDCTL_COUNT_DESC;
+        E1000_WRITE_REG(hw, TXDCTL1, ctrl);
+        break;
+    }
+
+
+
+    if (hw->mac_type == e1000_82573) {
+        uint32_t gcr = E1000_READ_REG(hw, GCR);
+        gcr |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+        E1000_WRITE_REG(hw, GCR, gcr);
+    }
+
+    /* Clear all of the statistics registers (clear on read).  It is
+     * important that we do this after we have tried to establish link
+     * because the symbol error count will increment wildly if there
+     * is no link.
+     */
+    e1000_clear_hw_cntrs(hw);
+
+    /* ICH8/Nahum No-snoop bits are opposite polarity.
+     * Set to snoop by default after reset. */
+    if (hw->mac_type == e1000_ich8lan)
+        e1000_set_pci_ex_no_snoop(hw, PCI_EX_82566_SNOOP_ALL);
+
+    if (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER ||
+        hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3) {
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        /* Relaxed ordering must be disabled to avoid a parity
+         * error crash in a PCI slot. */
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+    }
+
+    return ret_val;
+}
+
+/******************************************************************************
+ * Adjust SERDES output amplitude based on EEPROM setting.
+ *
+ * hw - Struct containing variables accessed by shared code.
+ *****************************************************************************/
+static int32_t
+e1000_adjust_serdes_amplitude(struct e1000_hw *hw)
+{
+    uint16_t eeprom_data;
+    int32_t  ret_val;
+
+    DEBUGFUNC("e1000_adjust_serdes_amplitude");
+
+    if (hw->media_type != e1000_media_type_internal_serdes)
+        return E1000_SUCCESS;
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+        break;
+    default:
+        return E1000_SUCCESS;
+    }
+
+    ret_val = e1000_read_eeprom(hw, EEPROM_SERDES_AMPLITUDE, 1, &eeprom_data);
+    if (ret_val) {
+        return ret_val;
+    }
+
+    if (eeprom_data != EEPROM_RESERVED_WORD) {
+        /* Adjust SERDES output amplitude only. */
+        eeprom_data &= EEPROM_SERDES_AMPLITUDE_MASK;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_EXT_CTRL, eeprom_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control and link settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Determines which flow control settings to use. Calls the apropriate media-
+ * specific link configuration function. Configures the flow control settings.
+ * Assuming the adapter has a valid link partner, a valid link should be
+ * established. Assumes the hardware has previously been reset and the
+ * transmitter and receiver are not enabled.
+ *****************************************************************************/
+int32_t
+e1000_setup_link(struct e1000_hw *hw)
+{
+    uint32_t ctrl_ext;
+    int32_t ret_val;
+    uint16_t eeprom_data;
+
+    DEBUGFUNC("e1000_setup_link");
+
+    /* In the case of the phy reset being blocked, we already have a link.
+     * We do not have to set it up again. */
+    if (e1000_check_phy_reset_block(hw))
+        return E1000_SUCCESS;
+
+    /* Read and store word 0x0F of the EEPROM. This word contains bits
+     * that determine the hardware's default PAUSE (flow control) mode,
+     * a bit that determines whether the HW defaults to enabling or
+     * disabling auto-negotiation, and the direction of the
+     * SW defined pins. If there is no SW over-ride of the flow
+     * control setting, then the variable hw->fc will
+     * be initialized based on a value in the EEPROM.
+     */
+    if (hw->fc == e1000_fc_default) {
+        switch (hw->mac_type) {
+        case e1000_ich8lan:
+        case e1000_82573:
+            hw->fc = e1000_fc_full;
+            break;
+        default:
+            ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                        1, &eeprom_data);
+            if (ret_val) {
+                DEBUGOUT("EEPROM Read Error\n");
+                return -E1000_ERR_EEPROM;
+            }
+            if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) == 0)
+                hw->fc = e1000_fc_none;
+            else if ((eeprom_data & EEPROM_WORD0F_PAUSE_MASK) ==
+                    EEPROM_WORD0F_ASM_DIR)
+                hw->fc = e1000_fc_tx_pause;
+            else
+                hw->fc = e1000_fc_full;
+            break;
+        }
+    }
+
+    /* We want to save off the original Flow Control configuration just
+     * in case we get disconnected and then reconnected into a different
+     * hub or switch with different Flow Control capabilities.
+     */
+    if (hw->mac_type == e1000_82542_rev2_0)
+        hw->fc &= (~e1000_fc_tx_pause);
+
+    if ((hw->mac_type < e1000_82543) && (hw->report_tx_early == 1))
+        hw->fc &= (~e1000_fc_rx_pause);
+
+    hw->original_fc = hw->fc;
+
+    DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc);
+
+    /* Take the 4 bits from EEPROM word 0x0F that determine the initial
+     * polarity value for the SW controlled pins, and setup the
+     * Extended Device Control reg with that info.
+     * This is needed because one of the SW controlled pins is used for
+     * signal detection.  So this should be done before e1000_setup_pcs_link()
+     * or e1000_phy_setup() is called.
+     */
+    if (hw->mac_type == e1000_82543) {
+        ret_val = e1000_read_eeprom(hw, EEPROM_INIT_CONTROL2_REG,
+                                    1, &eeprom_data);
+        if (ret_val) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        ctrl_ext = ((eeprom_data & EEPROM_WORD0F_SWPDIO_EXT) <<
+                    SWDPIO__EXT_SHIFT);
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+    }
+
+    /* Call the necessary subroutine to configure the link. */
+    ret_val = (hw->media_type == e1000_media_type_copper) ?
+              e1000_setup_copper_link(hw) :
+              e1000_setup_fiber_serdes_link(hw);
+
+    /* Initialize the flow control address, type, and PAUSE timer
+     * registers to their default values.  This is done even if flow
+     * control is disabled, because it does not hurt anything to
+     * initialize these registers.
+     */
+    DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+
+    /* FCAL/H and FCT are hardcoded to standard values in e1000_ich8lan. */
+    if (hw->mac_type != e1000_ich8lan) {
+        E1000_WRITE_REG(hw, FCT, FLOW_CONTROL_TYPE);
+        E1000_WRITE_REG(hw, FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+        E1000_WRITE_REG(hw, FCAL, FLOW_CONTROL_ADDRESS_LOW);
+    }
+
+    E1000_WRITE_REG(hw, FCTTV, hw->fc_pause_time);
+
+    /* Set the flow control receive threshold registers.  Normally,
+     * these registers will be set to a default threshold that may be
+     * adjusted later by the driver's runtime code.  However, if the
+     * ability to transmit pause frames in not enabled, then these
+     * registers will be set to 0.
+     */
+    if (!(hw->fc & e1000_fc_tx_pause)) {
+        E1000_WRITE_REG(hw, FCRTL, 0);
+        E1000_WRITE_REG(hw, FCRTH, 0);
+    } else {
+        /* We need to set up the Receive Threshold high and low water marks
+         * as well as (optionally) enabling the transmission of XON frames.
+         */
+        if (hw->fc_send_xon) {
+            E1000_WRITE_REG(hw, FCRTL, (hw->fc_low_water | E1000_FCRTL_XONE));
+            E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
+        } else {
+            E1000_WRITE_REG(hw, FCRTL, hw->fc_low_water);
+            E1000_WRITE_REG(hw, FCRTH, hw->fc_high_water);
+        }
+    }
+    return ret_val;
+}
+
+/******************************************************************************
+ * Sets up link for a fiber based or serdes based adapter
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Manipulates Physical Coding Sublayer functions in order to configure
+ * link. Assumes the hardware has been previously reset and the transmitter
+ * and receiver are not enabled.
+ *****************************************************************************/
+static int32_t
+e1000_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    uint32_t status;
+    uint32_t txcw = 0;
+    uint32_t i;
+    uint32_t signal = 0;
+    int32_t ret_val;
+
+    DEBUGFUNC("e1000_setup_fiber_serdes_link");
+
+    /* On 82571 and 82572 Fiber connections, SerDes loopback mode persists
+     * until explicitly turned off or a power cycle is performed.  A read to
+     * the register does not indicate its status.  Therefore, we ensure
+     * loopback mode is disabled during initialization.
+     */
+    if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572)
+        E1000_WRITE_REG(hw, SCTL, E1000_DISABLE_SERDES_LOOPBACK);
+
+    /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+     * set when the optics detect a signal. On older adapters, it will be
+     * cleared when there is a signal.  This applies to fiber media only.
+     * If we're on serdes media, adjust the output amplitude to value set in
+     * the EEPROM.
+     */
+    ctrl = E1000_READ_REG(hw, CTRL);
+    if (hw->media_type == e1000_media_type_fiber)
+        signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+
+    ret_val = e1000_adjust_serdes_amplitude(hw);
+    if (ret_val)
+        return ret_val;
+
+    /* Take the link out of reset */
+    ctrl &= ~(E1000_CTRL_LRST);
+
+    /* Adjust VCO speed to improve BER performance */
+    ret_val = e1000_set_vco_speed(hw);
+    if (ret_val)
+        return ret_val;
+
+    e1000_config_collision_dist(hw);
+
+    /* Check for a software override of the flow control settings, and setup
+     * the device accordingly.  If auto-negotiation is enabled, then software
+     * will have to set the "PAUSE" bits to the correct value in the Tranmsit
+     * Config Word Register (TXCW) and re-start auto-negotiation.  However, if
+     * auto-negotiation is disabled, then software will have to manually
+     * configure the two flow control enable bits in the CTRL register.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause frames, but
+     *          not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames but we do
+     *          not support receiving pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) are enabled.
+     */
+    switch (hw->fc) {
+    case e1000_fc_none:
+        /* Flow control is completely disabled by a software over-ride. */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+        break;
+    case e1000_fc_rx_pause:
+        /* RX Flow control is enabled and TX Flow control is disabled by a
+         * software over-ride. Since there really isn't a way to advertise
+         * that we are capable of RX Pause ONLY, we will advertise that we
+         * support both symmetric and asymmetric RX PAUSE. Later, we will
+         *  disable the adapter's ability to send PAUSE frames.
+         */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+        break;
+    case e1000_fc_tx_pause:
+        /* TX Flow control is enabled, and RX Flow control is disabled, by a
+         * software over-ride.
+         */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+        break;
+    case e1000_fc_full:
+        /* Flow control (both RX and TX) is enabled by a software over-ride. */
+        txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+        break;
+    }
+
+    /* Since auto-negotiation is enabled, take the link out of reset (the link
+     * will be in reset, because we previously reset the chip). This will
+     * restart auto-negotiation.  If auto-neogtiation is successful then the
+     * link-up status bit will be set and the flow control enable bits (RFCE
+     * and TFCE) will be set according to their negotiated value.
+     */
+    DEBUGOUT("Auto-negotiation enabled\n");
+
+    E1000_WRITE_REG(hw, TXCW, txcw);
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+    E1000_WRITE_FLUSH(hw);
+
+    hw->txcw = txcw;
+    msec_delay(1);
+
+    /* If we have a signal (the cable is plugged in) then poll for a "Link-Up"
+     * indication in the Device Status Register.  Time-out if a link isn't
+     * seen in 500 milliseconds seconds (Auto-negotiation should complete in
+     * less than 500 milliseconds even if the other end is doing it in SW).
+     * For internal serdes, we just assume a signal is present, then poll.
+     */
+    if (hw->media_type == e1000_media_type_internal_serdes ||
+       (E1000_READ_REG(hw, CTRL) & E1000_CTRL_SWDPIN1) == signal) {
+        DEBUGOUT("Looking for Link\n");
+        for (i = 0; i < (LINK_UP_TIMEOUT / 10); i++) {
+            msec_delay(10);
+            status = E1000_READ_REG(hw, STATUS);
+            if (status & E1000_STATUS_LU) break;
+        }
+        if (i == (LINK_UP_TIMEOUT / 10)) {
+            DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+            hw->autoneg_failed = 1;
+            /* AutoNeg failed to achieve a link, so we'll call
+             * e1000_check_for_link. This routine will force the link up if
+             * we detect a signal. This will allow us to communicate with
+             * non-autonegotiating link partners.
+             */
+            ret_val = e1000_check_for_link(hw);
+            if (ret_val) {
+                DEBUGOUT("Error while checking for link\n");
+                return ret_val;
+            }
+            hw->autoneg_failed = 0;
+        } else {
+            hw->autoneg_failed = 0;
+            DEBUGOUT("Valid Link Found\n");
+        }
+    } else {
+        DEBUGOUT("No Signal Detected\n");
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Make sure we have a valid PHY and change PHY mode before link setup.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_copper_link_preconfig(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_copper_link_preconfig");
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+    /* With 82543, we need to force speed and duplex on the MAC equal to what
+     * the PHY speed and duplex configuration is. In addition, we need to
+     * perform a hardware reset on the PHY to take it out of reset.
+     */
+    if (hw->mac_type > e1000_82543) {
+        ctrl |= E1000_CTRL_SLU;
+        ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+    } else {
+        ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX | E1000_CTRL_SLU);
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+        ret_val = e1000_phy_hw_reset(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    /* Make sure we have a valid PHY */
+    ret_val = e1000_detect_gig_phy(hw);
+    if (ret_val) {
+        DEBUGOUT("Error, did not detect valid phy.\n");
+        return ret_val;
+    }
+    DEBUGOUT1("Phy ID = %x \n", hw->phy_id);
+
+    /* Set PHY to class A mode (if necessary) */
+    ret_val = e1000_set_phy_mode(hw);
+    if (ret_val)
+        return ret_val;
+
+    if ((hw->mac_type == e1000_82545_rev_3) ||
+       (hw->mac_type == e1000_82546_rev_3)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        phy_data |= 0x00000008;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+    }
+
+    if (hw->mac_type <= e1000_82543 ||
+        hw->mac_type == e1000_82541 || hw->mac_type == e1000_82547 ||
+        hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2)
+        hw->phy_reset_disable = FALSE;
+
+   return E1000_SUCCESS;
+}
+
+
+/********************************************************************
+* Copper link setup for e1000_phy_igp series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_igp_setup(struct e1000_hw *hw)
+{
+    uint32_t led_ctrl;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_copper_link_igp_setup");
+
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+
+    ret_val = e1000_phy_reset(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Resetting the PHY\n");
+        return ret_val;
+    }
+
+    /* Wait 15ms for MAC to configure PHY from eeprom settings */
+    msec_delay(15);
+    if (hw->mac_type != e1000_ich8lan) {
+    /* Configure activity LED after PHY reset */
+    led_ctrl = E1000_READ_REG(hw, LEDCTL);
+    led_ctrl &= IGP_ACTIVITY_LED_MASK;
+    led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+    E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+    }
+
+    /* disable lplu d3 during driver init */
+    ret_val = e1000_set_d3_lplu_state(hw, FALSE);
+    if (ret_val) {
+        DEBUGOUT("Error Disabling LPLU D3\n");
+        return ret_val;
+    }
+
+    /* disable lplu d0 during driver init */
+    ret_val = e1000_set_d0_lplu_state(hw, FALSE);
+    if (ret_val) {
+        DEBUGOUT("Error Disabling LPLU D0\n");
+        return ret_val;
+    }
+    /* Configure mdi-mdix settings */
+    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        hw->dsp_config_state = e1000_dsp_config_disabled;
+        /* Force MDI for earlier revs of the IGP PHY */
+        phy_data &= ~(IGP01E1000_PSCR_AUTO_MDIX | IGP01E1000_PSCR_FORCE_MDI_MDIX);
+        hw->mdix = 1;
+
+    } else {
+        hw->dsp_config_state = e1000_dsp_config_enabled;
+        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+        switch (hw->mdix) {
+        case 1:
+            phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+            break;
+        case 2:
+            phy_data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+            break;
+        case 0:
+        default:
+            phy_data |= IGP01E1000_PSCR_AUTO_MDIX;
+            break;
+        }
+    }
+    ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* set auto-master slave resolution settings */
+    if (hw->autoneg) {
+        e1000_ms_type phy_ms_setting = hw->master_slave;
+
+        if (hw->ffe_config_state == e1000_ffe_config_active)
+            hw->ffe_config_state = e1000_ffe_config_enabled;
+
+        if (hw->dsp_config_state == e1000_dsp_config_activated)
+            hw->dsp_config_state = e1000_dsp_config_enabled;
+
+        /* when autonegotiation advertisment is only 1000Mbps then we
+          * should disable SmartSpeed and enable Auto MasterSlave
+          * resolution as hardware default. */
+        if (hw->autoneg_advertised == ADVERTISE_1000_FULL) {
+            /* Disable SmartSpeed */
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+            /* Set auto Master/Slave resolution process */
+            ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+            if (ret_val)
+                return ret_val;
+            phy_data &= ~CR_1000T_MS_ENABLE;
+            ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* load defaults for future use */
+        hw->original_master_slave = (phy_data & CR_1000T_MS_ENABLE) ?
+                                        ((phy_data & CR_1000T_MS_VALUE) ?
+                                         e1000_ms_force_master :
+                                         e1000_ms_force_slave) :
+                                         e1000_ms_auto;
+
+        switch (phy_ms_setting) {
+        case e1000_ms_force_master:
+            phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+            break;
+        case e1000_ms_force_slave:
+            phy_data |= CR_1000T_MS_ENABLE;
+            phy_data &= ~(CR_1000T_MS_VALUE);
+            break;
+        case e1000_ms_auto:
+            phy_data &= ~CR_1000T_MS_ENABLE;
+            default:
+            break;
+        }
+        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_gg82563 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_ggp_setup(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+    uint32_t reg_data;
+
+    DEBUGFUNC("e1000_copper_link_ggp_setup");
+
+    if (!hw->phy_reset_disable) {
+
+        /* Enable CRS on TX for half-duplex operation. */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+        /* Use 25MHz for both link down and 1000BASE-T for Tx clock */
+        phy_data |= GG82563_MSCR_TX_CLK_1000MBPS_25MHZ;
+
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+                                      phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Options:
+         *   MDI/MDI-X = 0 (default)
+         *   0 - Auto for all speeds
+         *   1 - MDI mode
+         *   2 - MDI-X mode
+         *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+         */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+        switch (hw->mdix) {
+        case 1:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+            break;
+        case 2:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+            break;
+        case 0:
+        default:
+            phy_data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+            break;
+        }
+
+        /* Options:
+         *   disable_polarity_correction = 0 (default)
+         *       Automatic Correction for Reversed Cable Polarity
+         *   0 - Disabled
+         *   1 - Enabled
+         */
+        phy_data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+        if (hw->disable_polarity_correction == 1)
+            phy_data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+
+        if (ret_val)
+            return ret_val;
+
+        /* SW Reset the PHY so all changes take effect */
+        ret_val = e1000_phy_reset(hw);
+        if (ret_val) {
+            DEBUGOUT("Error Resetting the PHY\n");
+            return ret_val;
+        }
+    } /* phy_reset_disable */
+
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Bypass RX and TX FIFO's */
+        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL,
+                                       E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS |
+                                       E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, phy_data);
+
+        if (ret_val)
+            return ret_val;
+
+        reg_data = E1000_READ_REG(hw, CTRL_EXT);
+        reg_data &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+        E1000_WRITE_REG(hw, CTRL_EXT, reg_data);
+
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+                                          &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Do not init these registers when the HW is in IAMT mode, since the
+         * firmware will have already initialized them.  We only initialize
+         * them if the HW is not in IAMT mode.
+         */
+        if (e1000_check_mng_mode(hw) == FALSE) {
+            /* Enable Electrical Idle on the PHY */
+            phy_data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+            ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+                                          phy_data);
+
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* Workaround: Disable padding in Kumeran interface in the MAC
+         * and in the PHY to avoid CRC errors.
+         */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        phy_data |= GG82563_ICR_DIS_PADDING;
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL,
+                                      phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_m88 series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_mgp_setup(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_copper_link_mgp_setup");
+
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+
+    /* Enable CRS on TX. This must be set for half-duplex operation. */
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+    /* Options:
+     *   MDI/MDI-X = 0 (default)
+     *   0 - Auto for all speeds
+     *   1 - MDI mode
+     *   2 - MDI-X mode
+     *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+     */
+    phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+    switch (hw->mdix) {
+    case 1:
+        phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+        break;
+    case 2:
+        phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+        break;
+    case 3:
+        phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+        break;
+    case 0:
+    default:
+        phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+        break;
+    }
+
+    /* Options:
+     *   disable_polarity_correction = 0 (default)
+     *       Automatic Correction for Reversed Cable Polarity
+     *   0 - Disabled
+     *   1 - Enabled
+     */
+    phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+    if (hw->disable_polarity_correction == 1)
+        phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if (hw->phy_revision < M88E1011_I_REV_4) {
+        /* Force TX_CLK in the Extended PHY Specific Control Register
+         * to 25MHz clock.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+        if ((hw->phy_revision == E1000_REVISION_2) &&
+            (hw->phy_id == M88E1111_I_PHY_ID)) {
+            /* Vidalia Phy, set the downshift counter to 5x */
+            phy_data &= ~(M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK);
+            phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+            ret_val = e1000_write_phy_reg(hw,
+                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            /* Configure Master and Slave downshift values */
+            phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+                              M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+            phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+                             M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+            ret_val = e1000_write_phy_reg(hw,
+                                        M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+            if (ret_val)
+               return ret_val;
+        }
+    }
+
+    /* SW Reset the PHY so all changes take effect */
+    ret_val = e1000_phy_reset(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Resetting the PHY\n");
+        return ret_val;
+    }
+
+   return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Setup auto-negotiation and flow control advertisements,
+* and then perform auto-negotiation.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_copper_link_autoneg");
+
+    /* Perform some bounds checking on the hw->autoneg_advertised
+     * parameter.  If this variable is zero, then set it to the default.
+     */
+    hw->autoneg_advertised &= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+    /* If autoneg_advertised is zero, we assume it was not defaulted
+     * by the calling code so we set to advertise full capability.
+     */
+    if (hw->autoneg_advertised == 0)
+        hw->autoneg_advertised = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+    /* IFE phy only supports 10/100 */
+    if (hw->phy_type == e1000_phy_ife)
+        hw->autoneg_advertised &= AUTONEG_ADVERTISE_10_100_ALL;
+
+    DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+    ret_val = e1000_phy_setup_autoneg(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Setting up Auto-Negotiation\n");
+        return ret_val;
+    }
+    DEBUGOUT("Restarting Auto-Neg\n");
+
+    /* Restart auto-negotiation by setting the Auto Neg Enable bit and
+     * the Auto Neg Restart bit in the PHY control register.
+     */
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Does the user want to wait for Auto-Neg to complete here, or
+     * check at a later time (for example, callback routine).
+     */
+    if (hw->wait_autoneg_complete) {
+        ret_val = e1000_wait_autoneg(hw);
+        if (ret_val) {
+            DEBUGOUT("Error while waiting for autoneg to complete\n");
+            return ret_val;
+        }
+    }
+
+    hw->get_link_status = TRUE;
+
+    return E1000_SUCCESS;
+}
+
+/********************************************************************
+* Copper link setup for e1000_phy_ife (Fast Ethernet PHY) series.
+*
+* hw - Struct containing variables accessed by shared code
+*********************************************************************/
+static int32_t
+e1000_copper_link_ife_setup(struct e1000_hw *hw)
+{
+    if (hw->phy_reset_disable)
+        return E1000_SUCCESS;
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Config the MAC and the PHY after link is up.
+*   1) Set up the MAC to the current PHY speed/duplex
+*      if we are on 82543.  If we
+*      are on newer silicon, we only need to configure
+*      collision distance in the Transmit Control Register.
+*   2) Set up flow control on the MAC to that established with
+*      the link partner.
+*   3) Config DSP to improve Gigabit link quality for some PHY revisions.
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_copper_link_postconfig(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    DEBUGFUNC("e1000_copper_link_postconfig");
+
+    if (hw->mac_type >= e1000_82544) {
+        e1000_config_collision_dist(hw);
+    } else {
+        ret_val = e1000_config_mac_to_phy(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring MAC to PHY settings\n");
+            return ret_val;
+        }
+    }
+    ret_val = e1000_config_fc_after_link_up(hw);
+    if (ret_val) {
+        DEBUGOUT("Error Configuring Flow Control\n");
+        return ret_val;
+    }
+
+    /* Config DSP to improve Giga link quality */
+    if (hw->phy_type == e1000_phy_igp) {
+        ret_val = e1000_config_dsp_after_link_change(hw, TRUE);
+        if (ret_val) {
+            DEBUGOUT("Error Configuring DSP after link up\n");
+            return ret_val;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Detects which PHY is present and setup the speed and duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_setup_copper_link(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t i;
+    uint16_t phy_data;
+    uint16_t reg_data;
+
+    DEBUGFUNC("e1000_setup_copper_link");
+
+    switch (hw->mac_type) {
+    case e1000_80003es2lan:
+    case e1000_ich8lan:
+        /* Set the mac to wait the maximum time between each
+         * iteration and increase the max iterations when
+         * polling the phy; this fixes erroneous timeouts at 10Mbps. */
+        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+        if (ret_val)
+            return ret_val;
+        reg_data |= 0x3F;
+        ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+        if (ret_val)
+            return ret_val;
+    default:
+        break;
+    }
+
+    /* Check if it is a valid PHY and set PHY mode if necessary. */
+    ret_val = e1000_copper_link_preconfig(hw);
+    if (ret_val)
+        return ret_val;
+
+    switch (hw->mac_type) {
+    case e1000_80003es2lan:
+        /* Kumeran registers are written-only */
+        reg_data = E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT;
+        reg_data |= E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING;
+        ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_INB_CTRL,
+                                       reg_data);
+        if (ret_val)
+            return ret_val;
+        break;
+    default:
+        break;
+    }
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) {
+        ret_val = e1000_copper_link_igp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_m88) {
+        ret_val = e1000_copper_link_mgp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        ret_val = e1000_copper_link_ggp_setup(hw);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_copper_link_ife_setup(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (hw->autoneg) {
+        /* Setup autoneg and flow control advertisement
+          * and perform autonegotiation */
+        ret_val = e1000_copper_link_autoneg(hw);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* PHY will be set to 10H, 10F, 100H,or 100F
+          * depending on value from forced_speed_duplex. */
+        DEBUGOUT("Forcing speed and duplex\n");
+        ret_val = e1000_phy_force_speed_duplex(hw);
+        if (ret_val) {
+            DEBUGOUT("Error Forcing Speed and Duplex\n");
+            return ret_val;
+        }
+    }
+
+    /* Check link status. Wait up to 100 microseconds for link to become
+     * valid.
+     */
+    for (i = 0; i < 10; i++) {
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (phy_data & MII_SR_LINK_STATUS) {
+            /* Config the MAC and PHY after link is up */
+            ret_val = e1000_copper_link_postconfig(hw);
+            if (ret_val)
+                return ret_val;
+
+            DEBUGOUT("Valid link established!!!\n");
+            return E1000_SUCCESS;
+        }
+        usec_delay(10);
+    }
+
+    DEBUGOUT("Unable to establish link!!!\n");
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Configure the MAC-to-PHY interface for 10/100Mbps
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_configure_kmrn_for_10_100(struct e1000_hw *hw, uint16_t duplex)
+{
+    int32_t ret_val = E1000_SUCCESS;
+    uint32_t tipg;
+    uint16_t reg_data;
+
+    DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+    reg_data = E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT;
+    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+                                   reg_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Configure Transmit Inter-Packet Gap */
+    tipg = E1000_READ_REG(hw, TIPG);
+    tipg &= ~E1000_TIPG_IPGT_MASK;
+    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_10_100;
+    E1000_WRITE_REG(hw, TIPG, tipg);
+
+    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+    if (ret_val)
+        return ret_val;
+
+    if (duplex == HALF_DUPLEX)
+        reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+    else
+        reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+    return ret_val;
+}
+
+static int32_t
+e1000_configure_kmrn_for_1000(struct e1000_hw *hw)
+{
+    int32_t ret_val = E1000_SUCCESS;
+    uint16_t reg_data;
+    uint32_t tipg;
+
+    DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+    reg_data = E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT;
+    ret_val = e1000_write_kmrn_reg(hw, E1000_KUMCTRLSTA_OFFSET_HD_CTRL,
+                                   reg_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Configure Transmit Inter-Packet Gap */
+    tipg = E1000_READ_REG(hw, TIPG);
+    tipg &= ~E1000_TIPG_IPGT_MASK;
+    tipg |= DEFAULT_80003ES2LAN_TIPG_IPGT_1000;
+    E1000_WRITE_REG(hw, TIPG, tipg);
+
+    ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+
+    if (ret_val)
+        return ret_val;
+
+    reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+    ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Configures PHY autoneg and flow control advertisement settings
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t mii_autoneg_adv_reg;
+    uint16_t mii_1000t_ctrl_reg;
+
+    DEBUGFUNC("e1000_phy_setup_autoneg");
+
+    /* Read the MII Auto-Neg Advertisement Register (Address 4). */
+    ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+    if (ret_val)
+        return ret_val;
+
+    if (hw->phy_type != e1000_phy_ife) {
+        /* Read the MII 1000Base-T Control Register (Address 9). */
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+        if (ret_val)
+            return ret_val;
+    } else
+        mii_1000t_ctrl_reg=0;
+
+    /* Need to parse both autoneg_advertised and fc and set up
+     * the appropriate PHY registers.  First we will parse for
+     * autoneg_advertised software override.  Since we can advertise
+     * a plethora of combinations, we need to check each bit
+     * individually.
+     */
+
+    /* First we clear all the 10/100 mb speed bits in the Auto-Neg
+     * Advertisement Register (Address 4) and the 1000 mb speed bits in
+     * the  1000Base-T Control Register (Address 9).
+     */
+    mii_autoneg_adv_reg &= ~REG4_SPEED_MASK;
+    mii_1000t_ctrl_reg &= ~REG9_SPEED_MASK;
+
+    DEBUGOUT1("autoneg_advertised %x\n", hw->autoneg_advertised);
+
+    /* Do we want to advertise 10 Mb Half Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_10_HALF) {
+        DEBUGOUT("Advertise 10mb Half duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+    }
+
+    /* Do we want to advertise 10 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_10_FULL) {
+        DEBUGOUT("Advertise 10mb Full duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+    }
+
+    /* Do we want to advertise 100 Mb Half Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_100_HALF) {
+        DEBUGOUT("Advertise 100mb Half duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+    }
+
+    /* Do we want to advertise 100 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_100_FULL) {
+        DEBUGOUT("Advertise 100mb Full duplex\n");
+        mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+    }
+
+    /* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+    if (hw->autoneg_advertised & ADVERTISE_1000_HALF) {
+        DEBUGOUT("Advertise 1000mb Half duplex requested, request denied!\n");
+    }
+
+    /* Do we want to advertise 1000 Mb Full Duplex? */
+    if (hw->autoneg_advertised & ADVERTISE_1000_FULL) {
+        DEBUGOUT("Advertise 1000mb Full duplex\n");
+        mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+        if (hw->phy_type == e1000_phy_ife) {
+            DEBUGOUT("e1000_phy_ife is a 10/100 PHY. Gigabit speed is not supported.\n");
+        }
+    }
+
+    /* Check for a software override of the flow control settings, and
+     * setup the PHY advertisement registers accordingly.  If
+     * auto-negotiation is enabled, then software will have to set the
+     * "PAUSE" bits to the correct value in the Auto-Negotiation
+     * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-negotiation.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause frames
+     *          but not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames
+     *          but we do not support receiving pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) are enabled.
+     *  other:  No software override.  The flow control configuration
+     *          in the EEPROM is used.
+     */
+    switch (hw->fc) {
+    case e1000_fc_none: /* 0 */
+        /* Flow control (RX & TX) is completely disabled by a
+         * software over-ride.
+         */
+        mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    case e1000_fc_rx_pause: /* 1 */
+        /* RX Flow control is enabled, and TX Flow control is
+         * disabled, by a software over-ride.
+         */
+        /* Since there really isn't a way to advertise that we are
+         * capable of RX Pause ONLY, we will advertise that we
+         * support both symmetric and asymmetric RX PAUSE.  Later
+         * (in e1000_config_fc_after_link_up) we will disable the
+         *hw's ability to send PAUSE frames.
+         */
+        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    case e1000_fc_tx_pause: /* 2 */
+        /* TX Flow control is enabled, and RX Flow control is
+         * disabled, by a software over-ride.
+         */
+        mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+        mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+        break;
+    case e1000_fc_full: /* 3 */
+        /* Flow control (both RX and TX) is enabled by a software
+         * over-ride.
+         */
+        mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+    if (ret_val)
+        return ret_val;
+
+    DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+    if (hw->phy_type != e1000_phy_ife) {
+        ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Force PHY speed and duplex settings to hw->forced_speed_duplex
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    int32_t ret_val;
+    uint16_t mii_ctrl_reg;
+    uint16_t mii_status_reg;
+    uint16_t phy_data;
+    uint16_t i;
+
+    DEBUGFUNC("e1000_phy_force_speed_duplex");
+
+    /* Turn off Flow control if we are forcing speed and duplex. */
+    hw->fc = e1000_fc_none;
+
+    DEBUGOUT1("hw->fc = %d\n", hw->fc);
+
+    /* Read the Device Control Register. */
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Set the bits to Force Speed and Duplex in the Device Ctrl Reg. */
+    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+    ctrl &= ~(DEVICE_SPEED_MASK);
+
+    /* Clear the Auto Speed Detect Enable bit. */
+    ctrl &= ~E1000_CTRL_ASDE;
+
+    /* Read the MII Control Register. */
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &mii_ctrl_reg);
+    if (ret_val)
+        return ret_val;
+
+    /* We need to disable autoneg in order to force link and duplex. */
+
+    mii_ctrl_reg &= ~MII_CR_AUTO_NEG_EN;
+
+    /* Are we forcing Full or Half Duplex? */
+    if (hw->forced_speed_duplex == e1000_100_full ||
+        hw->forced_speed_duplex == e1000_10_full) {
+        /* We want to force full duplex so we SET the full duplex bits in the
+         * Device and MII Control Registers.
+         */
+        ctrl |= E1000_CTRL_FD;
+        mii_ctrl_reg |= MII_CR_FULL_DUPLEX;
+        DEBUGOUT("Full Duplex\n");
+    } else {
+        /* We want to force half duplex so we CLEAR the full duplex bits in
+         * the Device and MII Control Registers.
+         */
+        ctrl &= ~E1000_CTRL_FD;
+        mii_ctrl_reg &= ~MII_CR_FULL_DUPLEX;
+        DEBUGOUT("Half Duplex\n");
+    }
+
+    /* Are we forcing 100Mbps??? */
+    if (hw->forced_speed_duplex == e1000_100_full ||
+       hw->forced_speed_duplex == e1000_100_half) {
+        /* Set the 100Mb bit and turn off the 1000Mb and 10Mb bits. */
+        ctrl |= E1000_CTRL_SPD_100;
+        mii_ctrl_reg |= MII_CR_SPEED_100;
+        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+        DEBUGOUT("Forcing 100mb ");
+    } else {
+        /* Set the 10Mb bit and turn off the 1000Mb and 100Mb bits. */
+        ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+        mii_ctrl_reg |= MII_CR_SPEED_10;
+        mii_ctrl_reg &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+        DEBUGOUT("Forcing 10mb ");
+    }
+
+    e1000_config_collision_dist(hw);
+
+    /* Write the configured values back to the Device Control Reg. */
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+
+    if ((hw->phy_type == e1000_phy_m88) ||
+        (hw->phy_type == e1000_phy_gg82563)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* Clear Auto-Crossover to force MDI manually. M88E1000 requires MDI
+         * forced whenever speed are duplex are forced.
+         */
+        phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        DEBUGOUT1("M88E1000 PSCR: %x \n", phy_data);
+
+        /* Need to reset the PHY or these changes will be ignored */
+        mii_ctrl_reg |= MII_CR_RESET;
+    /* Disable MDI-X support for 10/100 */
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IFE_PMC_AUTO_MDIX;
+        phy_data &= ~IFE_PMC_FORCE_MDIX;
+
+        ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, phy_data);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+         * forced whenever speed or duplex are forced.
+         */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+        phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    /* Write back the modified PHY MII control register. */
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, mii_ctrl_reg);
+    if (ret_val)
+        return ret_val;
+
+    usec_delay(1);
+
+    /* The wait_autoneg_complete flag may be a little misleading here.
+     * Since we are forcing speed and duplex, Auto-Neg is not enabled.
+     * But we do want to delay for a period while forcing only so we
+     * don't generate false No Link messages.  So we will wait here
+     * only if the user has set wait_autoneg_complete to 1, which is
+     * the default.
+     */
+    if (hw->wait_autoneg_complete) {
+        /* We will wait for autoneg to complete. */
+        DEBUGOUT("Waiting for forced speed/duplex link.\n");
+        mii_status_reg = 0;
+
+        /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
+            /* Read the MII Status Register and wait for Auto-Neg Complete bit
+             * to be set.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
+            msec_delay(100);
+        }
+        if ((i == 0) &&
+           ((hw->phy_type == e1000_phy_m88) ||
+            (hw->phy_type == e1000_phy_gg82563))) {
+            /* We didn't get link.  Reset the DSP and wait again for link. */
+            ret_val = e1000_phy_reset_dsp(hw);
+            if (ret_val) {
+                DEBUGOUT("Error Resetting PHY DSP\n");
+                return ret_val;
+            }
+        }
+        /* This loop will early-out if the link condition has been met.  */
+        for (i = PHY_FORCE_TIME; i > 0; i--) {
+            if (mii_status_reg & MII_SR_LINK_STATUS) break;
+            msec_delay(100);
+            /* Read the MII Status Register and wait for Auto-Neg Complete bit
+             * to be set.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+            if (ret_val)
+                return ret_val;
+        }
+    }
+
+    if (hw->phy_type == e1000_phy_m88) {
+        /* Because we reset the PHY above, we need to re-force TX_CLK in the
+         * Extended PHY Specific Control Register to 25MHz clock.  This value
+         * defaults back to a 2.5MHz clock when the PHY is reset.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_EPSCR_TX_CLK_25;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* In addition, because of the s/w reset above, we need to enable CRS on
+         * TX.  This must be set for both full and half duplex operation.
+         */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+        ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+            (!hw->autoneg) && (hw->forced_speed_duplex == e1000_10_full ||
+             hw->forced_speed_duplex == e1000_10_half)) {
+            ret_val = e1000_polarity_reversal_workaround(hw);
+            if (ret_val)
+                return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        /* The TX_CLK of the Extended PHY Specific Control Register defaults
+         * to 2.5MHz on a reset.  We need to re-force it back to 25MHz, if
+         * we're not in a forced 10/duplex configuration. */
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+        if ((hw->forced_speed_duplex == e1000_10_full) ||
+            (hw->forced_speed_duplex == e1000_10_half))
+            phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ;
+        else
+            phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25MHZ;
+
+        /* Also due to the reset, we need to enable CRS on Tx. */
+        phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+
+        ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Sets the collision distance in the Transmit Control register
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Link should have been established previously. Reads the speed and duplex
+* information from the Device Status register.
+******************************************************************************/
+void
+e1000_config_collision_dist(struct e1000_hw *hw)
+{
+    uint32_t tctl, coll_dist;
+
+    DEBUGFUNC("e1000_config_collision_dist");
+
+    if (hw->mac_type < e1000_82543)
+        coll_dist = E1000_COLLISION_DISTANCE_82542;
+    else
+        coll_dist = E1000_COLLISION_DISTANCE;
+
+    tctl = E1000_READ_REG(hw, TCTL);
+
+    tctl &= ~E1000_TCTL_COLD;
+    tctl |= coll_dist << E1000_COLD_SHIFT;
+
+    E1000_WRITE_REG(hw, TCTL, tctl);
+    E1000_WRITE_FLUSH(hw);
+}
+
+/******************************************************************************
+* Sets MAC speed and duplex settings to reflect the those in the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* mii_reg - data to write to the MII control register
+*
+* The contents of the PHY register containing the needed information need to
+* be passed in.
+******************************************************************************/
+static int32_t
+e1000_config_mac_to_phy(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_config_mac_to_phy");
+
+    /* 82544 or newer MAC, Auto Speed Detection takes care of
+    * MAC speed/duplex configuration.*/
+    if (hw->mac_type >= e1000_82544)
+        return E1000_SUCCESS;
+
+    /* Read the Device Control Register and set the bits to Force Speed
+     * and Duplex.
+     */
+    ctrl = E1000_READ_REG(hw, CTRL);
+    ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+    ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+    /* Set up duplex in the Device Control and Transmit Control
+     * registers depending on negotiated values.
+     */
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if (phy_data & M88E1000_PSSR_DPLX)
+        ctrl |= E1000_CTRL_FD;
+    else
+        ctrl &= ~E1000_CTRL_FD;
+
+    e1000_config_collision_dist(hw);
+
+    /* Set up speed in the Device Control register depending on
+     * negotiated values.
+     */
+    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+        ctrl |= E1000_CTRL_SPD_1000;
+    else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+        ctrl |= E1000_CTRL_SPD_100;
+
+    /* Write the configured values back to the Device Control Reg. */
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Forces the MAC's flow control settings.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sets the TFCE and RFCE bits in the device control register to reflect
+ * the adapter settings. TFCE and RFCE need to be explicitly set by
+ * software when a Copper PHY is used because autonegotiation is managed
+ * by the PHY rather than the MAC. Software must also configure these
+ * bits when link is forced on a fiber connection.
+ *****************************************************************************/
+int32_t
+e1000_force_mac_fc(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+
+    DEBUGFUNC("e1000_force_mac_fc");
+
+    /* Get the current configuration of the Device Control Register */
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Because we didn't get link via the internal auto-negotiation
+     * mechanism (we either forced link or we got link via PHY
+     * auto-neg), we have to manually enable/disable transmit an
+     * receive flow control.
+     *
+     * The "Case" statement below enables/disable flow control
+     * according to the "hw->fc" parameter.
+     *
+     * The possible values of the "fc" parameter are:
+     *      0:  Flow control is completely disabled
+     *      1:  Rx flow control is enabled (we can receive pause
+     *          frames but not send pause frames).
+     *      2:  Tx flow control is enabled (we can send pause frames
+     *          frames but we do not receive pause frames).
+     *      3:  Both Rx and TX flow control (symmetric) is enabled.
+     *  other:  No other values should be possible at this point.
+     */
+
+    switch (hw->fc) {
+    case e1000_fc_none:
+        ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+        break;
+    case e1000_fc_rx_pause:
+        ctrl &= (~E1000_CTRL_TFCE);
+        ctrl |= E1000_CTRL_RFCE;
+        break;
+    case e1000_fc_tx_pause:
+        ctrl &= (~E1000_CTRL_RFCE);
+        ctrl |= E1000_CTRL_TFCE;
+        break;
+    case e1000_fc_full:
+        ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+        break;
+    default:
+        DEBUGOUT("Flow control param set incorrectly\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    /* Disable TX Flow Control for 82542 (rev 2.0) */
+    if (hw->mac_type == e1000_82542_rev2_0)
+        ctrl &= (~E1000_CTRL_TFCE);
+
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Configures flow control settings after link is established
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Should be called immediately after a valid link has been established.
+ * Forces MAC flow control settings if link was forced. When in MII/GMII mode
+ * and autonegotiation is enabled, the MAC flow control settings will be set
+ * based on the flow control negotiated by the PHY. In TBI mode, the TFCE
+ * and RFCE bits will be automaticaly set to the negotiated flow control mode.
+ *****************************************************************************/
+int32_t
+e1000_config_fc_after_link_up(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t mii_status_reg;
+    uint16_t mii_nway_adv_reg;
+    uint16_t mii_nway_lp_ability_reg;
+    uint16_t speed;
+    uint16_t duplex;
+
+    DEBUGFUNC("e1000_config_fc_after_link_up");
+
+    /* Check for the case where we have fiber media and auto-neg failed
+     * so we had to force link.  In this case, we need to force the
+     * configuration of the MAC to match the "fc" parameter.
+     */
+    if (((hw->media_type == e1000_media_type_fiber) && (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_internal_serdes) &&
+         (hw->autoneg_failed)) ||
+        ((hw->media_type == e1000_media_type_copper) && (!hw->autoneg))) {
+        ret_val = e1000_force_mac_fc(hw);
+        if (ret_val) {
+            DEBUGOUT("Error forcing flow control settings\n");
+            return ret_val;
+        }
+    }
+
+    /* Check for the case where we have copper media and auto-neg is
+     * enabled.  In this case, we need to check and see if Auto-Neg
+     * has completed, and if so, how the PHY and link partner has
+     * flow control configured.
+     */
+    if ((hw->media_type == e1000_media_type_copper) && hw->autoneg) {
+        /* Read the MII Status Register and check to see if AutoNeg
+         * has completed.  We read this twice because this reg has
+         * some "sticky" (latched) bits.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if (mii_status_reg & MII_SR_AUTONEG_COMPLETE) {
+            /* The AutoNeg process has completed, so we now need to
+             * read both the Auto Negotiation Advertisement Register
+             * (Address 4) and the Auto_Negotiation Base Page Ability
+             * Register (Address 5) to determine how flow control was
+             * negotiated.
+             */
+            ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+                                         &mii_nway_adv_reg);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+                                         &mii_nway_lp_ability_reg);
+            if (ret_val)
+                return ret_val;
+
+            /* Two bits in the Auto Negotiation Advertisement Register
+             * (Address 4) and two bits in the Auto Negotiation Base
+             * Page Ability Register (Address 5) determine flow control
+             * for both the PHY and the link partner.  The following
+             * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+             * 1999, describes these PAUSE resolution bits and how flow
+             * control is determined based upon these settings.
+             * NOTE:  DC = Don't Care
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+             *-------|---------|-------|---------|--------------------
+             *   0   |    0    |  DC   |   DC    | e1000_fc_none
+             *   0   |    1    |   0   |   DC    | e1000_fc_none
+             *   0   |    1    |   1   |    0    | e1000_fc_none
+             *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+             *   1   |    0    |   0   |   DC    | e1000_fc_none
+             *   1   |   DC    |   1   |   DC    | e1000_fc_full
+             *   1   |    1    |   0   |    0    | e1000_fc_none
+             *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+             *
+             */
+            /* Are both PAUSE bits set to 1?  If so, this implies
+             * Symmetric Flow Control is enabled at both ends.  The
+             * ASM_DIR bits are irrelevant per the spec.
+             *
+             * For Symmetric Flow Control:
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   1   |   DC    |   1   |   DC    | e1000_fc_full
+             *
+             */
+            if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+                /* Now we need to check if the user selected RX ONLY
+                 * of pause frames.  In this case, we had to advertise
+                 * FULL flow control because we could not advertise RX
+                 * ONLY. Hence, we must now check to see if we need to
+                 * turn OFF  the TRANSMISSION of PAUSE frames.
+                 */
+                if (hw->original_fc == e1000_fc_full) {
+                    hw->fc = e1000_fc_full;
+                    DEBUGOUT("Flow Control = FULL.\n");
+                } else {
+                    hw->fc = e1000_fc_rx_pause;
+                    DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+                }
+            }
+            /* For receiving PAUSE frames ONLY.
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+             *
+             */
+            else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                hw->fc = e1000_fc_tx_pause;
+                DEBUGOUT("Flow Control = TX PAUSE frames only.\n");
+            }
+            /* For transmitting PAUSE frames ONLY.
+             *
+             *   LOCAL DEVICE  |   LINK PARTNER
+             * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+             *-------|---------|-------|---------|--------------------
+             *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+             *
+             */
+            else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+                     (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+                     !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+                     (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+                hw->fc = e1000_fc_rx_pause;
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+            }
+            /* Per the IEEE spec, at this point flow control should be
+             * disabled.  However, we want to consider that we could
+             * be connected to a legacy switch that doesn't advertise
+             * desired flow control, but can be forced on the link
+             * partner.  So if we advertised no flow control, that is
+             * what we will resolve to.  If we advertised some kind of
+             * receive capability (Rx Pause Only or Full Flow Control)
+             * and the link partner advertised none, we will configure
+             * ourselves to enable Rx Flow Control only.  We can do
+             * this safely for two reasons:  If the link partner really
+             * didn't want flow control enabled, and we enable Rx, no
+             * harm done since we won't be receiving any PAUSE frames
+             * anyway.  If the intent on the link partner was to have
+             * flow control enabled, then by us enabling RX only, we
+             * can at least receive pause frames and process them.
+             * This is a good idea because in most cases, since we are
+             * predominantly a server NIC, more times than not we will
+             * be asked to delay transmission of packets than asking
+             * our link partner to pause transmission of frames.
+             */
+            else if ((hw->original_fc == e1000_fc_none ||
+                      hw->original_fc == e1000_fc_tx_pause) ||
+                      hw->fc_strict_ieee) {
+                hw->fc = e1000_fc_none;
+                DEBUGOUT("Flow Control = NONE.\n");
+            } else {
+                hw->fc = e1000_fc_rx_pause;
+                DEBUGOUT("Flow Control = RX PAUSE frames only.\n");
+            }
+
+            /* Now we need to do one last check...  If we auto-
+             * negotiated to HALF DUPLEX, flow control should not be
+             * enabled per IEEE 802.3 spec.
+             */
+            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+            if (ret_val) {
+                DEBUGOUT("Error getting link speed and duplex\n");
+                return ret_val;
+            }
+
+            if (duplex == HALF_DUPLEX)
+                hw->fc = e1000_fc_none;
+
+            /* Now we call a subroutine to actually force the MAC
+             * controller to use the correct flow control settings.
+             */
+            ret_val = e1000_force_mac_fc(hw);
+            if (ret_val) {
+                DEBUGOUT("Error forcing flow control settings\n");
+                return ret_val;
+            }
+        } else {
+            DEBUGOUT("Copper PHY and Auto Neg has not completed.\n");
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Checks to see if the link status of the hardware has changed.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Called by any function that needs to check the link status of the adapter.
+ *****************************************************************************/
+int32_t
+e1000_check_for_link(struct e1000_hw *hw)
+{
+    uint32_t rxcw = 0;
+    uint32_t ctrl;
+    uint32_t status;
+    uint32_t rctl;
+    uint32_t icr;
+    uint32_t signal = 0;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_check_for_link");
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+    status = E1000_READ_REG(hw, STATUS);
+
+    /* On adapters with a MAC newer than 82544, SW Defineable pin 1 will be
+     * set when the optics detect a signal. On older adapters, it will be
+     * cleared when there is a signal.  This applies to fiber media only.
+     */
+    if ((hw->media_type == e1000_media_type_fiber) ||
+        (hw->media_type == e1000_media_type_internal_serdes)) {
+        rxcw = E1000_READ_REG(hw, RXCW);
+
+        if (hw->media_type == e1000_media_type_fiber) {
+            signal = (hw->mac_type > e1000_82544) ? E1000_CTRL_SWDPIN1 : 0;
+            if (status & E1000_STATUS_LU)
+                hw->get_link_status = FALSE;
+        }
+    }
+
+    /* If we have a copper PHY then we only want to go out to the PHY
+     * registers to see if Auto-Neg has completed and/or if our link
+     * status has changed.  The get_link_status flag will be set if we
+     * receive a Link Status Change interrupt or we have Rx Sequence
+     * Errors.
+     */
+    if ((hw->media_type == e1000_media_type_copper) && hw->get_link_status) {
+        /* First we want to see if the MII Status Register reports
+         * link.  If so, then we want to get the current speed/duplex
+         * of the PHY.
+         * Read the register twice since the link bit is sticky.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (phy_data & MII_SR_LINK_STATUS) {
+            hw->get_link_status = FALSE;
+            /* Check if there was DownShift, must be checked immediately after
+             * link-up */
+            e1000_check_downshift(hw);
+
+            /* If we are on 82544 or 82543 silicon and speed/duplex
+             * are forced to 10H or 10F, then we will implement the polarity
+             * reversal workaround.  We disable interrupts first, and upon
+             * returning, place the devices interrupt state to its previous
+             * value except for the link status change interrupt which will
+             * happen due to the execution of this workaround.
+             */
+
+            if ((hw->mac_type == e1000_82544 || hw->mac_type == e1000_82543) &&
+                (!hw->autoneg) &&
+                (hw->forced_speed_duplex == e1000_10_full ||
+                 hw->forced_speed_duplex == e1000_10_half)) {
+                E1000_WRITE_REG(hw, IMC, 0xffffffff);
+                ret_val = e1000_polarity_reversal_workaround(hw);
+                icr = E1000_READ_REG(hw, ICR);
+                E1000_WRITE_REG(hw, ICS, (icr & ~E1000_ICS_LSC));
+                E1000_WRITE_REG(hw, IMS, IMS_ENABLE_MASK);
+            }
+
+        } else {
+            /* No link detected */
+            e1000_config_dsp_after_link_change(hw, FALSE);
+            return 0;
+        }
+
+        /* If we are forcing speed/duplex, then we simply return since
+         * we have already determined whether we have link or not.
+         */
+        if (!hw->autoneg) return -E1000_ERR_CONFIG;
+
+        /* optimize the dsp settings for the igp phy */
+        e1000_config_dsp_after_link_change(hw, TRUE);
+
+        /* We have a M88E1000 PHY and Auto-Neg is enabled.  If we
+         * have Si on board that is 82544 or newer, Auto
+         * Speed Detection takes care of MAC speed/duplex
+         * configuration.  So we only need to configure Collision
+         * Distance in the MAC.  Otherwise, we need to force
+         * speed/duplex on the MAC to the current PHY speed/duplex
+         * settings.
+         */
+        if (hw->mac_type >= e1000_82544)
+            e1000_config_collision_dist(hw);
+        else {
+            ret_val = e1000_config_mac_to_phy(hw);
+            if (ret_val) {
+                DEBUGOUT("Error configuring MAC to PHY settings\n");
+                return ret_val;
+            }
+        }
+
+        /* Configure Flow Control now that Auto-Neg has completed. First, we
+         * need to restore the desired flow control settings because we may
+         * have had to re-autoneg with a different link partner.
+         */
+        ret_val = e1000_config_fc_after_link_up(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring flow control\n");
+            return ret_val;
+        }
+
+        /* At this point we know that we are on copper and we have
+         * auto-negotiated link.  These are conditions for checking the link
+         * partner capability register.  We use the link speed to determine if
+         * TBI compatibility needs to be turned on or off.  If the link is not
+         * at gigabit speed, then TBI compatibility is not needed.  If we are
+         * at gigabit speed, we turn on TBI compatibility.
+         */
+        if (hw->tbi_compatibility_en) {
+            uint16_t speed, duplex;
+            ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+            if (ret_val) {
+                DEBUGOUT("Error getting link speed and duplex\n");
+                return ret_val;
+            }
+            if (speed != SPEED_1000) {
+                /* If link speed is not set to gigabit speed, we do not need
+                 * to enable TBI compatibility.
+                 */
+                if (hw->tbi_compatibility_on) {
+                    /* If we previously were in the mode, turn it off. */
+                    rctl = E1000_READ_REG(hw, RCTL);
+                    rctl &= ~E1000_RCTL_SBP;
+                    E1000_WRITE_REG(hw, RCTL, rctl);
+                    hw->tbi_compatibility_on = FALSE;
+                }
+            } else {
+                /* If TBI compatibility is was previously off, turn it on. For
+                 * compatibility with a TBI link partner, we will store bad
+                 * packets. Some frames have an additional byte on the end and
+                 * will look like CRC errors to to the hardware.
+                 */
+                if (!hw->tbi_compatibility_on) {
+                    hw->tbi_compatibility_on = TRUE;
+                    rctl = E1000_READ_REG(hw, RCTL);
+                    rctl |= E1000_RCTL_SBP;
+                    E1000_WRITE_REG(hw, RCTL, rctl);
+                }
+            }
+        }
+    }
+    /* If we don't have link (auto-negotiation failed or link partner cannot
+     * auto-negotiate), the cable is plugged in (we have signal), and our
+     * link partner is not trying to auto-negotiate with us (we are receiving
+     * idles or data), we need to force link up. We also need to give
+     * auto-negotiation time to complete, in case the cable was just plugged
+     * in. The autoneg_failed flag does this.
+     */
+    else if ((((hw->media_type == e1000_media_type_fiber) &&
+              ((ctrl & E1000_CTRL_SWDPIN1) == signal)) ||
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (!(status & E1000_STATUS_LU)) &&
+              (!(rxcw & E1000_RXCW_C))) {
+        if (hw->autoneg_failed == 0) {
+            hw->autoneg_failed = 1;
+            return 0;
+        }
+        DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+        /* Disable auto-negotiation in the TXCW register */
+        E1000_WRITE_REG(hw, TXCW, (hw->txcw & ~E1000_TXCW_ANE));
+
+        /* Force link-up and also force full-duplex. */
+        ctrl = E1000_READ_REG(hw, CTRL);
+        ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+
+        /* Configure Flow Control after forcing link up. */
+        ret_val = e1000_config_fc_after_link_up(hw);
+        if (ret_val) {
+            DEBUGOUT("Error configuring flow control\n");
+            return ret_val;
+        }
+    }
+    /* If we are forcing link and we are receiving /C/ ordered sets, re-enable
+     * auto-negotiation in the TXCW register and disable forced link in the
+     * Device Control register in an attempt to auto-negotiate with our link
+     * partner.
+     */
+    else if (((hw->media_type == e1000_media_type_fiber) ||
+              (hw->media_type == e1000_media_type_internal_serdes)) &&
+              (ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+        DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+        E1000_WRITE_REG(hw, TXCW, hw->txcw);
+        E1000_WRITE_REG(hw, CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+        hw->serdes_link_down = FALSE;
+    }
+    /* If we force link for non-auto-negotiation switch, check link status
+     * based on MAC synchronization for internal serdes media type.
+     */
+    else if ((hw->media_type == e1000_media_type_internal_serdes) &&
+             !(E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+        /* SYNCH bit and IV bit are sticky. */
+        usec_delay(10);
+        if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, RXCW)) {
+            if (!(rxcw & E1000_RXCW_IV)) {
+                hw->serdes_link_down = FALSE;
+                DEBUGOUT("SERDES: Link is up.\n");
+            }
+        } else {
+            hw->serdes_link_down = TRUE;
+            DEBUGOUT("SERDES: Link is down.\n");
+        }
+    }
+    if ((hw->media_type == e1000_media_type_internal_serdes) &&
+        (E1000_TXCW_ANE & E1000_READ_REG(hw, TXCW))) {
+        hw->serdes_link_down = !(E1000_STATUS_LU & E1000_READ_REG(hw, STATUS));
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Detects the current speed and duplex settings of the hardware.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * speed - Speed of the connection
+ * duplex - Duplex setting of the connection
+ *****************************************************************************/
+int32_t
+e1000_get_speed_and_duplex(struct e1000_hw *hw,
+                           uint16_t *speed,
+                           uint16_t *duplex)
+{
+    uint32_t status;
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_get_speed_and_duplex");
+
+    if (hw->mac_type >= e1000_82543) {
+        status = E1000_READ_REG(hw, STATUS);
+        if (status & E1000_STATUS_SPEED_1000) {
+            *speed = SPEED_1000;
+            DEBUGOUT("1000 Mbs, ");
+        } else if (status & E1000_STATUS_SPEED_100) {
+            *speed = SPEED_100;
+            DEBUGOUT("100 Mbs, ");
+        } else {
+            *speed = SPEED_10;
+            DEBUGOUT("10 Mbs, ");
+        }
+
+        if (status & E1000_STATUS_FD) {
+            *duplex = FULL_DUPLEX;
+            DEBUGOUT("Full Duplex\n");
+        } else {
+            *duplex = HALF_DUPLEX;
+            DEBUGOUT(" Half Duplex\n");
+        }
+    } else {
+        DEBUGOUT("1000 Mbs, Full Duplex\n");
+        *speed = SPEED_1000;
+        *duplex = FULL_DUPLEX;
+    }
+
+    /* IGP01 PHY may advertise full duplex operation after speed downgrade even
+     * if it is operating at half duplex.  Here we set the duplex settings to
+     * match the duplex in the link partner's capabilities.
+     */
+    if (hw->phy_type == e1000_phy_igp && hw->speed_downgraded) {
+        ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        if (!(phy_data & NWAY_ER_LP_NWAY_CAPS))
+            *duplex = HALF_DUPLEX;
+        else {
+            ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy_data);
+            if (ret_val)
+                return ret_val;
+            if ((*speed == SPEED_100 && !(phy_data & NWAY_LPAR_100TX_FD_CAPS)) ||
+               (*speed == SPEED_10 && !(phy_data & NWAY_LPAR_10T_FD_CAPS)))
+                *duplex = HALF_DUPLEX;
+        }
+    }
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (hw->media_type == e1000_media_type_copper)) {
+        if (*speed == SPEED_1000)
+            ret_val = e1000_configure_kmrn_for_1000(hw);
+        else
+            ret_val = e1000_configure_kmrn_for_10_100(hw, *duplex);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if ((hw->phy_type == e1000_phy_igp_3) && (*speed == SPEED_1000)) {
+        ret_val = e1000_kumeran_lock_loss_workaround(hw);
+        if (ret_val)
+            return ret_val;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Blocks until autoneg completes or times out (~4.5 seconds)
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_wait_autoneg(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t i;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_wait_autoneg");
+    DEBUGOUT("Waiting for Auto-Neg to complete.\n");
+
+    /* We will wait for autoneg to complete or 4.5 seconds to expire. */
+    for (i = PHY_AUTO_NEG_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Auto-Neg
+         * Complete bit to be set.
+         */
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+        if (phy_data & MII_SR_AUTONEG_COMPLETE) {
+            return E1000_SUCCESS;
+        }
+        msec_delay(100);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Raises the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void
+e1000_raise_mdi_clk(struct e1000_hw *hw,
+                    uint32_t *ctrl)
+{
+    /* Raise the clock input to the Management Data Clock (by setting the MDC
+     * bit), and then delay 10 microseconds.
+     */
+    E1000_WRITE_REG(hw, CTRL, (*ctrl | E1000_CTRL_MDC));
+    E1000_WRITE_FLUSH(hw);
+    usec_delay(10);
+}
+
+/******************************************************************************
+* Lowers the Management Data Clock
+*
+* hw - Struct containing variables accessed by shared code
+* ctrl - Device control register's current value
+******************************************************************************/
+static void
+e1000_lower_mdi_clk(struct e1000_hw *hw,
+                    uint32_t *ctrl)
+{
+    /* Lower the clock input to the Management Data Clock (by clearing the MDC
+     * bit), and then delay 10 microseconds.
+     */
+    E1000_WRITE_REG(hw, CTRL, (*ctrl & ~E1000_CTRL_MDC));
+    E1000_WRITE_FLUSH(hw);
+    usec_delay(10);
+}
+
+/******************************************************************************
+* Shifts data bits out to the PHY
+*
+* hw - Struct containing variables accessed by shared code
+* data - Data to send out to the PHY
+* count - Number of bits to shift out
+*
+* Bits are shifted out in MSB to LSB order.
+******************************************************************************/
+static void
+e1000_shift_out_mdi_bits(struct e1000_hw *hw,
+                         uint32_t data,
+                         uint16_t count)
+{
+    uint32_t ctrl;
+    uint32_t mask;
+
+    /* We need to shift "count" number of bits out to the PHY. So, the value
+     * in the "data" parameter will be shifted out to the PHY one bit at a
+     * time. In order to do this, "data" must be broken down into bits.
+     */
+    mask = 0x01;
+    mask <<= (count - 1);
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+    ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+    while (mask) {
+        /* A "1" is shifted out to the PHY by setting the MDIO bit to "1" and
+         * then raising and lowering the Management Data Clock. A "0" is
+         * shifted out to the PHY by setting the MDIO bit to "0" and then
+         * raising and lowering the clock.
+         */
+        if (data & mask)
+            ctrl |= E1000_CTRL_MDIO;
+        else
+            ctrl &= ~E1000_CTRL_MDIO;
+
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+        E1000_WRITE_FLUSH(hw);
+
+        usec_delay(10);
+
+        e1000_raise_mdi_clk(hw, &ctrl);
+        e1000_lower_mdi_clk(hw, &ctrl);
+
+        mask = mask >> 1;
+    }
+}
+
+/******************************************************************************
+* Shifts data bits in from the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Bits are shifted in in MSB to LSB order.
+******************************************************************************/
+static uint16_t
+e1000_shift_in_mdi_bits(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+    uint16_t data = 0;
+    uint8_t i;
+
+    /* In order to read a register from the PHY, we need to shift in a total
+     * of 18 bits from the PHY. The first two bit (turnaround) times are used
+     * to avoid contention on the MDIO pin when a read operation is performed.
+     * These two bits are ignored by us and thrown away. Bits are "shifted in"
+     * by raising the input to the Management Data Clock (setting the MDC bit),
+     * and then reading the value of the MDIO bit.
+     */
+    ctrl = E1000_READ_REG(hw, CTRL);
+
+    /* Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as input. */
+    ctrl &= ~E1000_CTRL_MDIO_DIR;
+    ctrl &= ~E1000_CTRL_MDIO;
+
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+    E1000_WRITE_FLUSH(hw);
+
+    /* Raise and Lower the clock before reading in the data. This accounts for
+     * the turnaround bits. The first clock occurred when we clocked out the
+     * last bit of the Register Address.
+     */
+    e1000_raise_mdi_clk(hw, &ctrl);
+    e1000_lower_mdi_clk(hw, &ctrl);
+
+    for (data = 0, i = 0; i < 16; i++) {
+        data = data << 1;
+        e1000_raise_mdi_clk(hw, &ctrl);
+        ctrl = E1000_READ_REG(hw, CTRL);
+        /* Check to see if we shifted in a "1". */
+        if (ctrl & E1000_CTRL_MDIO)
+            data |= 1;
+        e1000_lower_mdi_clk(hw, &ctrl);
+    }
+
+    e1000_raise_mdi_clk(hw, &ctrl);
+    e1000_lower_mdi_clk(hw, &ctrl);
+
+    return data;
+}
+
+int32_t
+e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask)
+{
+    uint32_t swfw_sync = 0;
+    uint32_t swmask = mask;
+    uint32_t fwmask = mask << 16;
+    int32_t timeout = 200;
+
+    DEBUGFUNC("e1000_swfw_sync_acquire");
+
+    if (hw->swfwhw_semaphore_present)
+        return e1000_get_software_flag(hw);
+
+    if (!hw->swfw_sync_present)
+        return e1000_get_hw_eeprom_semaphore(hw);
+
+    while (timeout) {
+            if (e1000_get_hw_eeprom_semaphore(hw))
+                return -E1000_ERR_SWFW_SYNC;
+
+            swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC);
+            if (!(swfw_sync & (fwmask | swmask))) {
+                break;
+            }
+
+            /* firmware currently using resource (fwmask) */
+            /* or other software thread currently using resource (swmask) */
+            e1000_put_hw_eeprom_semaphore(hw);
+            msec_delay_irq(5);
+            timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+        return -E1000_ERR_SWFW_SYNC;
+    }
+
+    swfw_sync |= swmask;
+    E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync);
+
+    e1000_put_hw_eeprom_semaphore(hw);
+    return E1000_SUCCESS;
+}
+
+void
+e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask)
+{
+    uint32_t swfw_sync;
+    uint32_t swmask = mask;
+
+    DEBUGFUNC("e1000_swfw_sync_release");
+
+    if (hw->swfwhw_semaphore_present) {
+        e1000_release_software_flag(hw);
+        return;
+    }
+
+    if (!hw->swfw_sync_present) {
+        e1000_put_hw_eeprom_semaphore(hw);
+        return;
+    }
+
+    /* if (e1000_get_hw_eeprom_semaphore(hw))
+     *    return -E1000_ERR_SWFW_SYNC; */
+    while (e1000_get_hw_eeprom_semaphore(hw) != E1000_SUCCESS);
+        /* empty */
+
+    swfw_sync = E1000_READ_REG(hw, SW_FW_SYNC);
+    swfw_sync &= ~swmask;
+    E1000_WRITE_REG(hw, SW_FW_SYNC, swfw_sync);
+
+    e1000_put_hw_eeprom_semaphore(hw);
+}
+
+/*****************************************************************************
+* Reads the value from a PHY register, if the value is on a specific non zero
+* page, sets the page first.
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to read
+******************************************************************************/
+int32_t
+e1000_read_phy_reg(struct e1000_hw *hw,
+                   uint32_t reg_addr,
+                   uint16_t *phy_data)
+{
+    uint32_t ret_val;
+    uint16_t swfw;
+
+    DEBUGFUNC("e1000_read_phy_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    if ((hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) &&
+       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                         (uint16_t)reg_addr);
+        if (ret_val) {
+            e1000_swfw_sync_release(hw, swfw);
+            return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+            (hw->mac_type == e1000_80003es2lan)) {
+            /* Select Configuration Page */
+            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+            } else {
+                /* Use Alternative Page Select register to access
+                 * registers 30 and 31
+                 */
+                ret_val = e1000_write_phy_reg_ex(hw,
+                                                 GG82563_PHY_PAGE_SELECT_ALT,
+                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+            }
+
+            if (ret_val) {
+                e1000_swfw_sync_release(hw, swfw);
+                return ret_val;
+            }
+        }
+    }
+
+    ret_val = e1000_read_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                    phy_data);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return ret_val;
+}
+
+int32_t
+e1000_read_phy_reg_ex(struct e1000_hw *hw,
+                      uint32_t reg_addr,
+                      uint16_t *phy_data)
+{
+    uint32_t i;
+    uint32_t mdic = 0;
+    const uint32_t phy_addr = 1;
+
+    DEBUGFUNC("e1000_read_phy_reg_ex");
+
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
+        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+        return -E1000_ERR_PARAM;
+    }
+
+    if (hw->mac_type > e1000_82543) {
+        /* Set up Op-code, Phy Address, and register address in the MDI
+         * Control register.  The MAC will take care of interfacing with the
+         * PHY to retrieve the desired data.
+         */
+        mdic = ((reg_addr << E1000_MDIC_REG_SHIFT) |
+                (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                (E1000_MDIC_OP_READ));
+
+        E1000_WRITE_REG(hw, MDIC, mdic);
+
+        /* Poll the ready bit to see if the MDI read completed */
+        for (i = 0; i < 64; i++) {
+            usec_delay(50);
+            mdic = E1000_READ_REG(hw, MDIC);
+            if (mdic & E1000_MDIC_READY) break;
+        }
+        if (!(mdic & E1000_MDIC_READY)) {
+            DEBUGOUT("MDI Read did not complete\n");
+            return -E1000_ERR_PHY;
+        }
+        if (mdic & E1000_MDIC_ERROR) {
+            DEBUGOUT("MDI Error\n");
+            return -E1000_ERR_PHY;
+        }
+        *phy_data = (uint16_t) mdic;
+    } else {
+        /* We must first send a preamble through the MDIO pin to signal the
+         * beginning of an MII instruction.  This is done by sending 32
+         * consecutive "1" bits.
+         */
+        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+        /* Now combine the next few fields that are required for a read
+         * operation.  We use this method instead of calling the
+         * e1000_shift_out_mdi_bits routine five different times. The format of
+         * a MII read instruction consists of a shift out of 14 bits and is
+         * defined as follows:
+         *    <Preamble><SOF><Op Code><Phy Addr><Reg Addr>
+         * followed by a shift in of 18 bits.  This first two bits shifted in
+         * are TurnAround bits used to avoid contention on the MDIO pin when a
+         * READ operation is performed.  These two bits are thrown away
+         * followed by a shift in of 16 bits which contains the desired data.
+         */
+        mdic = ((reg_addr) | (phy_addr << 5) |
+                (PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+        e1000_shift_out_mdi_bits(hw, mdic, 14);
+
+        /* Now that we've shifted out the read command to the MII, we need to
+         * "shift in" the 16-bit value (18 total bits) of the requested PHY
+         * register address.
+         */
+        *phy_data = e1000_shift_in_mdi_bits(hw);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Writes a value to a PHY register
+*
+* hw - Struct containing variables accessed by shared code
+* reg_addr - address of the PHY register to write
+* data - data to write to the PHY
+******************************************************************************/
+int32_t
+e1000_write_phy_reg(struct e1000_hw *hw,
+                    uint32_t reg_addr,
+                    uint16_t phy_data)
+{
+    uint32_t ret_val;
+    uint16_t swfw;
+
+    DEBUGFUNC("e1000_write_phy_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    if ((hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) &&
+       (reg_addr > MAX_PHY_MULTI_PAGE_REG)) {
+        ret_val = e1000_write_phy_reg_ex(hw, IGP01E1000_PHY_PAGE_SELECT,
+                                         (uint16_t)reg_addr);
+        if (ret_val) {
+            e1000_swfw_sync_release(hw, swfw);
+            return ret_val;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        if (((reg_addr & MAX_PHY_REG_ADDRESS) > MAX_PHY_MULTI_PAGE_REG) ||
+            (hw->mac_type == e1000_80003es2lan)) {
+            /* Select Configuration Page */
+            if ((reg_addr & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+                ret_val = e1000_write_phy_reg_ex(hw, GG82563_PHY_PAGE_SELECT,
+                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+            } else {
+                /* Use Alternative Page Select register to access
+                 * registers 30 and 31
+                 */
+                ret_val = e1000_write_phy_reg_ex(hw,
+                                                 GG82563_PHY_PAGE_SELECT_ALT,
+                          (uint16_t)((uint16_t)reg_addr >> GG82563_PAGE_SHIFT));
+            }
+
+            if (ret_val) {
+                e1000_swfw_sync_release(hw, swfw);
+                return ret_val;
+            }
+        }
+    }
+
+    ret_val = e1000_write_phy_reg_ex(hw, MAX_PHY_REG_ADDRESS & reg_addr,
+                                     phy_data);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return ret_val;
+}
+
+int32_t
+e1000_write_phy_reg_ex(struct e1000_hw *hw,
+                    uint32_t reg_addr,
+                    uint16_t phy_data)
+{
+    uint32_t i;
+    uint32_t mdic = 0;
+    const uint32_t phy_addr = 1;
+
+    DEBUGFUNC("e1000_write_phy_reg_ex");
+
+    if (reg_addr > MAX_PHY_REG_ADDRESS) {
+        DEBUGOUT1("PHY Address %d is out of range\n", reg_addr);
+        return -E1000_ERR_PARAM;
+    }
+
+    if (hw->mac_type > e1000_82543) {
+        /* Set up Op-code, Phy Address, register address, and data intended
+         * for the PHY register in the MDI Control register.  The MAC will take
+         * care of interfacing with the PHY to send the desired data.
+         */
+        mdic = (((uint32_t) phy_data) |
+                (reg_addr << E1000_MDIC_REG_SHIFT) |
+                (phy_addr << E1000_MDIC_PHY_SHIFT) |
+                (E1000_MDIC_OP_WRITE));
+
+        E1000_WRITE_REG(hw, MDIC, mdic);
+
+        /* Poll the ready bit to see if the MDI read completed */
+        for (i = 0; i < 641; i++) {
+            usec_delay(5);
+            mdic = E1000_READ_REG(hw, MDIC);
+            if (mdic & E1000_MDIC_READY) break;
+        }
+        if (!(mdic & E1000_MDIC_READY)) {
+            DEBUGOUT("MDI Write did not complete\n");
+            return -E1000_ERR_PHY;
+        }
+    } else {
+        /* We'll need to use the SW defined pins to shift the write command
+         * out to the PHY. We first send a preamble to the PHY to signal the
+         * beginning of the MII instruction.  This is done by sending 32
+         * consecutive "1" bits.
+         */
+        e1000_shift_out_mdi_bits(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+        /* Now combine the remaining required fields that will indicate a
+         * write operation. We use this method instead of calling the
+         * e1000_shift_out_mdi_bits routine for each field in the command. The
+         * format of a MII write instruction is as follows:
+         * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+         */
+        mdic = ((PHY_TURNAROUND) | (reg_addr << 2) | (phy_addr << 7) |
+                (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+        mdic <<= 16;
+        mdic |= (uint32_t) phy_data;
+
+        e1000_shift_out_mdi_bits(hw, mdic, 32);
+    }
+
+    return E1000_SUCCESS;
+}
+
+int32_t
+e1000_read_kmrn_reg(struct e1000_hw *hw,
+                    uint32_t reg_addr,
+                    uint16_t *data)
+{
+    uint32_t reg_val;
+    uint16_t swfw;
+    DEBUGFUNC("e1000_read_kmrn_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    /* Write register address */
+    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+              E1000_KUMCTRLSTA_OFFSET) |
+              E1000_KUMCTRLSTA_REN;
+    E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val);
+    usec_delay(2);
+
+    /* Read the data returned */
+    reg_val = E1000_READ_REG(hw, KUMCTRLSTA);
+    *data = (uint16_t)reg_val;
+
+    e1000_swfw_sync_release(hw, swfw);
+    return E1000_SUCCESS;
+}
+
+int32_t
+e1000_write_kmrn_reg(struct e1000_hw *hw,
+                     uint32_t reg_addr,
+                     uint16_t data)
+{
+    uint32_t reg_val;
+    uint16_t swfw;
+    DEBUGFUNC("e1000_write_kmrn_reg");
+
+    if ((hw->mac_type == e1000_80003es2lan) &&
+        (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+        swfw = E1000_SWFW_PHY1_SM;
+    } else {
+        swfw = E1000_SWFW_PHY0_SM;
+    }
+    if (e1000_swfw_sync_acquire(hw, swfw))
+        return -E1000_ERR_SWFW_SYNC;
+
+    reg_val = ((reg_addr << E1000_KUMCTRLSTA_OFFSET_SHIFT) &
+              E1000_KUMCTRLSTA_OFFSET) | data;
+    E1000_WRITE_REG(hw, KUMCTRLSTA, reg_val);
+    usec_delay(2);
+
+    e1000_swfw_sync_release(hw, swfw);
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Returns the PHY to the power-on reset state
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+    uint32_t ctrl, ctrl_ext;
+    uint32_t led_ctrl;
+    int32_t ret_val;
+    uint16_t swfw;
+
+    DEBUGFUNC("e1000_phy_hw_reset");
+
+    /* In the case of the phy reset being blocked, it's not an error, we
+     * simply return success without performing the reset. */
+    ret_val = e1000_check_phy_reset_block(hw);
+    if (ret_val)
+        return E1000_SUCCESS;
+
+    DEBUGOUT("Resetting Phy...\n");
+
+    if (hw->mac_type > e1000_82543) {
+        if ((hw->mac_type == e1000_80003es2lan) &&
+            (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)) {
+            swfw = E1000_SWFW_PHY1_SM;
+        } else {
+            swfw = E1000_SWFW_PHY0_SM;
+        }
+        if (e1000_swfw_sync_acquire(hw, swfw)) {
+            e1000_release_software_semaphore(hw);
+            return -E1000_ERR_SWFW_SYNC;
+        }
+        /* Read the device control register and assert the E1000_CTRL_PHY_RST
+         * bit. Then, take it out of reset.
+         * For pre-e1000_82571 hardware, we delay for 10ms between the assert
+         * and deassert.  For e1000_82571 hardware and later, we instead delay
+         * for 50us between and 10ms after the deassertion.
+         */
+        ctrl = E1000_READ_REG(hw, CTRL);
+        E1000_WRITE_REG(hw, CTRL, ctrl | E1000_CTRL_PHY_RST);
+        E1000_WRITE_FLUSH(hw);
+
+        if (hw->mac_type < e1000_82571)
+            msec_delay(10);
+        else
+            usec_delay(100);
+
+        E1000_WRITE_REG(hw, CTRL, ctrl);
+        E1000_WRITE_FLUSH(hw);
+
+        if (hw->mac_type >= e1000_82571)
+            msec_delay_irq(10);
+        e1000_swfw_sync_release(hw, swfw);
+    } else {
+        /* Read the Extended Device Control Register, assert the PHY_RESET_DIR
+         * bit to put the PHY into reset. Then, take it out of reset.
+         */
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+        ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+        E1000_WRITE_FLUSH(hw);
+        msec_delay(10);
+        ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+        E1000_WRITE_FLUSH(hw);
+    }
+    usec_delay(150);
+
+    if ((hw->mac_type == e1000_82541) || (hw->mac_type == e1000_82547)) {
+        /* Configure activity LED after PHY reset */
+        led_ctrl = E1000_READ_REG(hw, LEDCTL);
+        led_ctrl &= IGP_ACTIVITY_LED_MASK;
+        led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+        E1000_WRITE_REG(hw, LEDCTL, led_ctrl);
+    }
+
+    /* Wait for FW to finish PHY configuration. */
+    ret_val = e1000_get_phy_cfg_done(hw);
+    e1000_release_software_semaphore(hw);
+
+        if ((hw->mac_type == e1000_ich8lan) &&
+            (hw->phy_type == e1000_phy_igp_3)) {
+            ret_val = e1000_init_lcd_from_nvm(hw);
+            if (ret_val)
+                return ret_val;
+        }
+    return ret_val;
+}
+
+/******************************************************************************
+* Resets the PHY
+*
+* hw - Struct containing variables accessed by shared code
+*
+* Sets bit 15 of the MII Control regiser
+******************************************************************************/
+int32_t
+e1000_phy_reset(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_phy_reset");
+
+    /* In the case of the phy reset being blocked, it's not an error, we
+     * simply return success without performing the reset. */
+    ret_val = e1000_check_phy_reset_block(hw);
+    if (ret_val)
+        return E1000_SUCCESS;
+
+    switch (hw->mac_type) {
+    case e1000_82541_rev_2:
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_ich8lan:
+        ret_val = e1000_phy_hw_reset(hw);
+        if (ret_val)
+            return ret_val;
+
+        break;
+    default:
+        ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |= MII_CR_RESET;
+        ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+        if (ret_val)
+            return ret_val;
+
+        usec_delay(1);
+        break;
+    }
+
+    if (hw->phy_type == e1000_phy_igp || hw->phy_type == e1000_phy_igp_2)
+        e1000_phy_init_script(hw);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Work-around for 82566 power-down: on D3 entry-
+* 1) disable gigabit link
+* 2) write VR power-down enable
+* 3) read it back
+* if successful continue, else issue LCD reset and repeat
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+void
+e1000_phy_powerdown_workaround(struct e1000_hw *hw)
+{
+    int32_t reg;
+    uint16_t phy_data;
+    int32_t retry = 0;
+
+    DEBUGFUNC("e1000_phy_powerdown_workaround");
+
+    if (hw->phy_type != e1000_phy_igp_3)
+        return;
+
+    do {
+        /* Disable link */
+        reg = E1000_READ_REG(hw, PHY_CTRL);
+        E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+        /* Write VR power-down enable */
+        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+        e1000_write_phy_reg(hw, IGP3_VR_CTRL, phy_data |
+                            IGP3_VR_CTRL_MODE_SHUT);
+
+        /* Read it back and test */
+        e1000_read_phy_reg(hw, IGP3_VR_CTRL, &phy_data);
+        if ((phy_data & IGP3_VR_CTRL_MODE_SHUT) || retry)
+            break;
+
+        /* Issue PHY reset and repeat at most one more time */
+        reg = E1000_READ_REG(hw, CTRL);
+        E1000_WRITE_REG(hw, CTRL, reg | E1000_CTRL_PHY_RST);
+        retry++;
+    } while (retry);
+
+    return;
+
+}
+
+/******************************************************************************
+* Work-around for 82566 Kumeran PCS lock loss:
+* On link status change (i.e. PCI reset, speed change) and link is up and
+* speed is gigabit-
+* 0) if workaround is optionally disabled do nothing
+* 1) wait 1ms for Kumeran link to come up
+* 2) check Kumeran Diagnostic register PCS lock loss bit
+* 3) if not set the link is locked (all is good), otherwise...
+* 4) reset the PHY
+* 5) repeat up to 10 times
+* Note: this is only called for IGP3 copper when speed is 1gb.
+*
+* hw - struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    int32_t reg;
+    int32_t cnt;
+    uint16_t phy_data;
+
+    if (hw->kmrn_lock_loss_workaround_disabled)
+        return E1000_SUCCESS;
+
+    /* Make sure link is up before proceeding. If not just return. 
+     * Attempting this while link is negotiating fouls up link
+     * stability */
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+
+    if (phy_data & MII_SR_LINK_STATUS) {
+        for (cnt = 0; cnt < 10; cnt++) {
+            /* read once to clear */
+            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+            if (ret_val)
+                return ret_val;
+            /* and again to get new status */
+            ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* check for PCS lock */
+            if (!(phy_data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+                return E1000_SUCCESS;
+
+            /* Issue PHY reset */
+            e1000_phy_hw_reset(hw);
+            msec_delay_irq(5);
+        }
+        /* Disable GigE link negotiation */
+        reg = E1000_READ_REG(hw, PHY_CTRL);
+        E1000_WRITE_REG(hw, PHY_CTRL, reg | E1000_PHY_CTRL_GBE_DISABLE |
+                        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+
+        /* unable to acquire PCS lock */
+        return E1000_ERR_PHY;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Probes the expected PHY address for known PHY IDs
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+int32_t
+e1000_detect_gig_phy(struct e1000_hw *hw)
+{
+    int32_t phy_init_status, ret_val;
+    uint16_t phy_id_high, phy_id_low;
+    boolean_t match = FALSE;
+
+    DEBUGFUNC("e1000_detect_gig_phy");
+
+    /* The 82571 firmware may still be configuring the PHY.  In this
+     * case, we cannot access the PHY until the configuration is done.  So
+     * we explicitly set the PHY values. */
+    if (hw->mac_type == e1000_82571 ||
+        hw->mac_type == e1000_82572) {
+        hw->phy_id = IGP01E1000_I_PHY_ID;
+        hw->phy_type = e1000_phy_igp_2;
+        return E1000_SUCCESS;
+    }
+
+    /* ESB-2 PHY reads require e1000_phy_gg82563 to be set because of a work-
+     * around that forces PHY page 0 to be set or the reads fail.  The rest of
+     * the code in this routine uses e1000_read_phy_reg to read the PHY ID.
+     * So for ESB-2 we need to have this set so our reads won't fail.  If the
+     * attached PHY is not a e1000_phy_gg82563, the routines below will figure
+     * this out as well. */
+    if (hw->mac_type == e1000_80003es2lan)
+        hw->phy_type = e1000_phy_gg82563;
+
+    /* Read the PHY ID Registers to identify which PHY is onboard. */
+    ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id_high);
+    if (ret_val)
+        return ret_val;
+
+    hw->phy_id = (uint32_t) (phy_id_high << 16);
+    usec_delay(20);
+    ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id_low);
+    if (ret_val)
+        return ret_val;
+
+    hw->phy_id |= (uint32_t) (phy_id_low & PHY_REVISION_MASK);
+    hw->phy_revision = (uint32_t) phy_id_low & ~PHY_REVISION_MASK;
+
+    switch (hw->mac_type) {
+    case e1000_82543:
+        if (hw->phy_id == M88E1000_E_PHY_ID) match = TRUE;
+        break;
+    case e1000_82544:
+        if (hw->phy_id == M88E1000_I_PHY_ID) match = TRUE;
+        break;
+    case e1000_82540:
+    case e1000_82545:
+    case e1000_82545_rev_3:
+    case e1000_82546:
+    case e1000_82546_rev_3:
+        if (hw->phy_id == M88E1011_I_PHY_ID) match = TRUE;
+        break;
+    case e1000_82541:
+    case e1000_82541_rev_2:
+    case e1000_82547:
+    case e1000_82547_rev_2:
+        if (hw->phy_id == IGP01E1000_I_PHY_ID) match = TRUE;
+        break;
+    case e1000_82573:
+        if (hw->phy_id == M88E1111_I_PHY_ID) match = TRUE;
+        break;
+    case e1000_80003es2lan:
+        if (hw->phy_id == GG82563_E_PHY_ID) match = TRUE;
+        break;
+    case e1000_ich8lan:
+        if (hw->phy_id == IGP03E1000_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == IFE_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == IFE_PLUS_E_PHY_ID) match = TRUE;
+        if (hw->phy_id == IFE_C_E_PHY_ID) match = TRUE;
+        break;
+    default:
+        DEBUGOUT1("Invalid MAC type %d\n", hw->mac_type);
+        return -E1000_ERR_CONFIG;
+    }
+    phy_init_status = e1000_set_phy_type(hw);
+
+    if ((match) && (phy_init_status == E1000_SUCCESS)) {
+        DEBUGOUT1("PHY ID 0x%X detected\n", hw->phy_id);
+        return E1000_SUCCESS;
+    }
+    DEBUGOUT1("Invalid PHY ID 0x%X\n", hw->phy_id);
+    return -E1000_ERR_PHY;
+}
+
+/******************************************************************************
+* Resets the PHY's DSP
+*
+* hw - Struct containing variables accessed by shared code
+******************************************************************************/
+static int32_t
+e1000_phy_reset_dsp(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    DEBUGFUNC("e1000_phy_reset_dsp");
+
+    do {
+        if (hw->phy_type != e1000_phy_gg82563) {
+            ret_val = e1000_write_phy_reg(hw, 29, 0x001d);
+            if (ret_val) break;
+        }
+        ret_val = e1000_write_phy_reg(hw, 30, 0x00c1);
+        if (ret_val) break;
+        ret_val = e1000_write_phy_reg(hw, 30, 0x0000);
+        if (ret_val) break;
+        ret_val = E1000_SUCCESS;
+    } while (0);
+
+    return ret_val;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for igp PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_igp_get_info(struct e1000_hw *hw,
+                       struct e1000_phy_info *phy_info)
+{
+    int32_t ret_val;
+    uint16_t phy_data, polarity, min_length, max_length, average;
+
+    DEBUGFUNC("e1000_phy_igp_get_info");
+
+    /* The downshift status is checked only once, after link is established,
+     * and it stored in the hw->speed_downgraded parameter. */
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+    /* IGP01E1000 does not need to support it. */
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+    /* IGP01E1000 always correct polarity reversal */
+    phy_info->polarity_correction = e1000_polarity_reversal_enabled;
+
+    /* Check polarity status */
+    ret_val = e1000_check_polarity(hw, &polarity);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (phy_data & IGP01E1000_PSSR_MDIX) >>
+                          IGP01E1000_PSSR_MDIX_SHIFT;
+
+    if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+       IGP01E1000_PSSR_SPEED_1000MBPS) {
+        /* Local/Remote Receiver Information are only valid at 1000 Mbps */
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                             SR_1000T_LOCAL_RX_STATUS_SHIFT;
+        phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                              SR_1000T_REMOTE_RX_STATUS_SHIFT;
+
+        /* Get cable length */
+        ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+        if (ret_val)
+            return ret_val;
+
+        /* Translate to old method */
+        average = (max_length + min_length) / 2;
+
+        if (average <= e1000_igp_cable_length_50)
+            phy_info->cable_length = e1000_cable_length_50;
+        else if (average <= e1000_igp_cable_length_80)
+            phy_info->cable_length = e1000_cable_length_50_80;
+        else if (average <= e1000_igp_cable_length_110)
+            phy_info->cable_length = e1000_cable_length_80_110;
+        else if (average <= e1000_igp_cable_length_140)
+            phy_info->cable_length = e1000_cable_length_110_140;
+        else
+            phy_info->cable_length = e1000_cable_length_140;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers for ife PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_ife_get_info(struct e1000_hw *hw,
+                       struct e1000_phy_info *phy_info)
+{
+    int32_t ret_val;
+    uint16_t phy_data, polarity;
+
+    DEBUGFUNC("e1000_phy_ife_get_info");
+
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_normal;
+
+    ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+    phy_info->polarity_correction =
+                        (phy_data & IFE_PSC_AUTO_POLARITY_DISABLE) >>
+                        IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT;
+
+    if (phy_info->polarity_correction == e1000_polarity_reversal_enabled) {
+        ret_val = e1000_check_polarity(hw, &polarity);
+        if (ret_val)
+            return ret_val;
+    } else {
+        /* Polarity is forced. */
+        polarity = (phy_data & IFE_PSC_FORCE_POLARITY) >>
+                       IFE_PSC_FORCE_POLARITY_SHIFT;
+    }
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode =
+                     (phy_data & (IFE_PMC_AUTO_MDIX | IFE_PMC_FORCE_MDIX)) >>
+                     IFE_PMC_MDIX_MODE_SHIFT;
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers fot m88 PHY only.
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_m88_get_info(struct e1000_hw *hw,
+                       struct e1000_phy_info *phy_info)
+{
+    int32_t ret_val;
+    uint16_t phy_data, polarity;
+
+    DEBUGFUNC("e1000_phy_m88_get_info");
+
+    /* The downshift status is checked only once, after link is established,
+     * and it stored in the hw->speed_downgraded parameter. */
+    phy_info->downshift = (e1000_downshift)hw->speed_downgraded;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->extended_10bt_distance =
+        (phy_data & M88E1000_PSCR_10BT_EXT_DIST_ENABLE) >>
+        M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT;
+    phy_info->polarity_correction =
+        (phy_data & M88E1000_PSCR_POLARITY_REVERSAL) >>
+        M88E1000_PSCR_POLARITY_REVERSAL_SHIFT;
+
+    /* Check polarity status */
+    ret_val = e1000_check_polarity(hw, &polarity);
+    if (ret_val)
+        return ret_val;
+    phy_info->cable_polarity = polarity;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_info->mdix_mode = (phy_data & M88E1000_PSSR_MDIX) >>
+                          M88E1000_PSSR_MDIX_SHIFT;
+
+    if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+        /* Cable Length Estimation and Local/Remote Receiver Information
+         * are only valid at 1000 Mbps.
+         */
+        if (hw->phy_type != e1000_phy_gg82563) {
+            phy_info->cable_length = ((phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                                      M88E1000_PSSR_CABLE_LENGTH_SHIFT);
+        } else {
+            ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_info->cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
+        }
+
+        ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_info->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS) >>
+                             SR_1000T_LOCAL_RX_STATUS_SHIFT;
+
+        phy_info->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS) >>
+                              SR_1000T_REMOTE_RX_STATUS_SHIFT;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+* Get PHY information from various PHY registers
+*
+* hw - Struct containing variables accessed by shared code
+* phy_info - PHY information structure
+******************************************************************************/
+int32_t
+e1000_phy_get_info(struct e1000_hw *hw,
+                   struct e1000_phy_info *phy_info)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_phy_get_info");
+
+    phy_info->cable_length = e1000_cable_length_undefined;
+    phy_info->extended_10bt_distance = e1000_10bt_ext_dist_enable_undefined;
+    phy_info->cable_polarity = e1000_rev_polarity_undefined;
+    phy_info->downshift = e1000_downshift_undefined;
+    phy_info->polarity_correction = e1000_polarity_reversal_undefined;
+    phy_info->mdix_mode = e1000_auto_x_mode_undefined;
+    phy_info->local_rx = e1000_1000t_rx_status_undefined;
+    phy_info->remote_rx = e1000_1000t_rx_status_undefined;
+
+    if (hw->media_type != e1000_media_type_copper) {
+        DEBUGOUT("PHY info is only valid for copper media\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    if ((phy_data & MII_SR_LINK_STATUS) != MII_SR_LINK_STATUS) {
+        DEBUGOUT("PHY info is only valid if link is up\n");
+        return -E1000_ERR_CONFIG;
+    }
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2)
+        return e1000_phy_igp_get_info(hw, phy_info);
+    else if (hw->phy_type == e1000_phy_ife)
+        return e1000_phy_ife_get_info(hw, phy_info);
+    else
+        return e1000_phy_m88_get_info(hw, phy_info);
+}
+
+int32_t
+e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_validate_mdi_settings");
+
+    if (!hw->autoneg && (hw->mdix == 0 || hw->mdix == 3)) {
+        DEBUGOUT("Invalid MDI setting detected\n");
+        hw->mdix = 1;
+        return -E1000_ERR_CONFIG;
+    }
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Sets up eeprom variables in the hw struct.  Must be called after mac_type
+ * is configured.  Additionally, if this is ICH8, the flash controller GbE
+ * registers must be mapped, or this will crash.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_init_eeprom_params(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd = E1000_READ_REG(hw, EECD);
+    int32_t ret_val = E1000_SUCCESS;
+    uint16_t eeprom_size;
+
+    DEBUGFUNC("e1000_init_eeprom_params");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        eeprom->type = e1000_eeprom_microwire;
+        eeprom->word_size = 64;
+        eeprom->opcode_bits = 3;
+        eeprom->address_bits = 6;
+        eeprom->delay_usec = 50;
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_82540:
+    case e1000_82545:
+    case e1000_82545_rev_3:
+    case e1000_82546:
+    case e1000_82546_rev_3:
+        eeprom->type = e1000_eeprom_microwire;
+        eeprom->opcode_bits = 3;
+        eeprom->delay_usec = 50;
+        if (eecd & E1000_EECD_SIZE) {
+            eeprom->word_size = 256;
+            eeprom->address_bits = 8;
+        } else {
+            eeprom->word_size = 64;
+            eeprom->address_bits = 6;
+        }
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_82541:
+    case e1000_82541_rev_2:
+    case e1000_82547:
+    case e1000_82547_rev_2:
+        if (eecd & E1000_EECD_TYPE) {
+            eeprom->type = e1000_eeprom_spi;
+            eeprom->opcode_bits = 8;
+            eeprom->delay_usec = 1;
+            if (eecd & E1000_EECD_ADDR_BITS) {
+                eeprom->page_size = 32;
+                eeprom->address_bits = 16;
+            } else {
+                eeprom->page_size = 8;
+                eeprom->address_bits = 8;
+            }
+        } else {
+            eeprom->type = e1000_eeprom_microwire;
+            eeprom->opcode_bits = 3;
+            eeprom->delay_usec = 50;
+            if (eecd & E1000_EECD_ADDR_BITS) {
+                eeprom->word_size = 256;
+                eeprom->address_bits = 8;
+            } else {
+                eeprom->word_size = 64;
+                eeprom->address_bits = 6;
+            }
+        }
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_82571:
+    case e1000_82572:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_82573:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = TRUE;
+        eeprom->use_eewr = TRUE;
+        if (e1000_is_onboard_nvm_eeprom(hw) == FALSE) {
+            eeprom->type = e1000_eeprom_flash;
+            eeprom->word_size = 2048;
+
+            /* Ensure that the Autonomous FLASH update bit is cleared due to
+             * Flash update issue on parts which use a FLASH for NVM. */
+            eecd &= ~E1000_EECD_AUPDEN;
+            E1000_WRITE_REG(hw, EECD, eecd);
+        }
+        break;
+    case e1000_80003es2lan:
+        eeprom->type = e1000_eeprom_spi;
+        eeprom->opcode_bits = 8;
+        eeprom->delay_usec = 1;
+        if (eecd & E1000_EECD_ADDR_BITS) {
+            eeprom->page_size = 32;
+            eeprom->address_bits = 16;
+        } else {
+            eeprom->page_size = 8;
+            eeprom->address_bits = 8;
+        }
+        eeprom->use_eerd = TRUE;
+        eeprom->use_eewr = FALSE;
+        break;
+    case e1000_ich8lan:
+    {
+        int32_t  i = 0;
+        uint32_t flash_size = E1000_READ_ICH8_REG(hw, ICH8_FLASH_GFPREG);
+
+        eeprom->type = e1000_eeprom_ich8;
+        eeprom->use_eerd = FALSE;
+        eeprom->use_eewr = FALSE;
+        eeprom->word_size = E1000_SHADOW_RAM_WORDS;
+
+        /* Zero the shadow RAM structure. But don't load it from NVM
+         * so as to save time for driver init */
+        if (hw->eeprom_shadow_ram != NULL) {
+            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+                hw->eeprom_shadow_ram[i].modified = FALSE;
+                hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+            }
+        }
+
+        hw->flash_base_addr = (flash_size & ICH8_GFPREG_BASE_MASK) *
+                              ICH8_FLASH_SECTOR_SIZE;
+
+        hw->flash_bank_size = ((flash_size >> 16) & ICH8_GFPREG_BASE_MASK) + 1;
+        hw->flash_bank_size -= (flash_size & ICH8_GFPREG_BASE_MASK);
+        hw->flash_bank_size *= ICH8_FLASH_SECTOR_SIZE;
+        hw->flash_bank_size /= 2 * sizeof(uint16_t);
+
+        break;
+    }
+    default:
+        break;
+    }
+
+    if (eeprom->type == e1000_eeprom_spi) {
+        /* eeprom_size will be an enum [0..8] that maps to eeprom sizes 128B to
+         * 32KB (incremented by powers of 2).
+         */
+        if (hw->mac_type <= e1000_82547_rev_2) {
+            /* Set to default value for initial eeprom read. */
+            eeprom->word_size = 64;
+            ret_val = e1000_read_eeprom(hw, EEPROM_CFG, 1, &eeprom_size);
+            if (ret_val)
+                return ret_val;
+            eeprom_size = (eeprom_size & EEPROM_SIZE_MASK) >> EEPROM_SIZE_SHIFT;
+            /* 256B eeprom size was not supported in earlier hardware, so we
+             * bump eeprom_size up one to ensure that "1" (which maps to 256B)
+             * is never the result used in the shifting logic below. */
+            if (eeprom_size)
+                eeprom_size++;
+        } else {
+            eeprom_size = (uint16_t)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+                          E1000_EECD_SIZE_EX_SHIFT);
+        }
+
+        eeprom->word_size = 1 << (eeprom_size + EEPROM_WORD_SIZE_SHIFT);
+    }
+    return ret_val;
+}
+
+/******************************************************************************
+ * Raises the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void
+e1000_raise_ee_clk(struct e1000_hw *hw,
+                   uint32_t *eecd)
+{
+    /* Raise the clock input to the EEPROM (by setting the SK bit), and then
+     * wait <delay> microseconds.
+     */
+    *eecd = *eecd | E1000_EECD_SK;
+    E1000_WRITE_REG(hw, EECD, *eecd);
+    E1000_WRITE_FLUSH(hw);
+    usec_delay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Lowers the EEPROM's clock input.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * eecd - EECD's current value
+ *****************************************************************************/
+static void
+e1000_lower_ee_clk(struct e1000_hw *hw,
+                   uint32_t *eecd)
+{
+    /* Lower the clock input to the EEPROM (by clearing the SK bit), and then
+     * wait 50 microseconds.
+     */
+    *eecd = *eecd & ~E1000_EECD_SK;
+    E1000_WRITE_REG(hw, EECD, *eecd);
+    E1000_WRITE_FLUSH(hw);
+    usec_delay(hw->eeprom.delay_usec);
+}
+
+/******************************************************************************
+ * Shift data bits out to the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * data - data to send to the EEPROM
+ * count - number of bits to shift out
+ *****************************************************************************/
+static void
+e1000_shift_out_ee_bits(struct e1000_hw *hw,
+                        uint16_t data,
+                        uint16_t count)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd;
+    uint32_t mask;
+
+    /* We need to shift "count" bits out to the EEPROM. So, value in the
+     * "data" parameter will be shifted out to the EEPROM one bit at a time.
+     * In order to do this, "data" must be broken down into bits.
+     */
+    mask = 0x01 << (count - 1);
+    eecd = E1000_READ_REG(hw, EECD);
+    if (eeprom->type == e1000_eeprom_microwire) {
+        eecd &= ~E1000_EECD_DO;
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        eecd |= E1000_EECD_DO;
+    }
+    do {
+        /* A "1" is shifted out to the EEPROM by setting bit "DI" to a "1",
+         * and then raising and then lowering the clock (the SK bit controls
+         * the clock input to the EEPROM).  A "0" is shifted out to the EEPROM
+         * by setting "DI" to "0" and then raising and then lowering the clock.
+         */
+        eecd &= ~E1000_EECD_DI;
+
+        if (data & mask)
+            eecd |= E1000_EECD_DI;
+
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+
+        usec_delay(eeprom->delay_usec);
+
+        e1000_raise_ee_clk(hw, &eecd);
+        e1000_lower_ee_clk(hw, &eecd);
+
+        mask = mask >> 1;
+
+    } while (mask);
+
+    /* We leave the "DI" bit set to "0" when we leave this routine. */
+    eecd &= ~E1000_EECD_DI;
+    E1000_WRITE_REG(hw, EECD, eecd);
+}
+
+/******************************************************************************
+ * Shift data bits in from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static uint16_t
+e1000_shift_in_ee_bits(struct e1000_hw *hw,
+                       uint16_t count)
+{
+    uint32_t eecd;
+    uint32_t i;
+    uint16_t data;
+
+    /* In order to read a register from the EEPROM, we need to shift 'count'
+     * bits in from the EEPROM. Bits are "shifted in" by raising the clock
+     * input to the EEPROM (setting the SK bit), and then reading the value of
+     * the "DO" bit.  During this "shifting in" process the "DI" bit should
+     * always be clear.
+     */
+
+    eecd = E1000_READ_REG(hw, EECD);
+
+    eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+    data = 0;
+
+    for (i = 0; i < count; i++) {
+        data = data << 1;
+        e1000_raise_ee_clk(hw, &eecd);
+
+        eecd = E1000_READ_REG(hw, EECD);
+
+        eecd &= ~(E1000_EECD_DI);
+        if (eecd & E1000_EECD_DO)
+            data |= 1;
+
+        e1000_lower_ee_clk(hw, &eecd);
+    }
+
+    return data;
+}
+
+/******************************************************************************
+ * Prepares EEPROM for access
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Lowers EEPROM clock. Clears input pin. Sets the chip select pin. This
+ * function should be called before issuing a command to the EEPROM.
+ *****************************************************************************/
+static int32_t
+e1000_acquire_eeprom(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd, i=0;
+
+    DEBUGFUNC("e1000_acquire_eeprom");
+
+    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+        return -E1000_ERR_SWFW_SYNC;
+    eecd = E1000_READ_REG(hw, EECD);
+
+    if (hw->mac_type != e1000_82573) {
+        /* Request EEPROM Access */
+        if (hw->mac_type > e1000_82544) {
+            eecd |= E1000_EECD_REQ;
+            E1000_WRITE_REG(hw, EECD, eecd);
+            eecd = E1000_READ_REG(hw, EECD);
+            while ((!(eecd & E1000_EECD_GNT)) &&
+                  (i < E1000_EEPROM_GRANT_ATTEMPTS)) {
+                i++;
+                usec_delay(5);
+                eecd = E1000_READ_REG(hw, EECD);
+            }
+            if (!(eecd & E1000_EECD_GNT)) {
+                eecd &= ~E1000_EECD_REQ;
+                E1000_WRITE_REG(hw, EECD, eecd);
+                DEBUGOUT("Could not acquire EEPROM grant\n");
+                e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+                return -E1000_ERR_EEPROM;
+            }
+        }
+    }
+
+    /* Setup EEPROM for Read/Write */
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        /* Clear SK and DI */
+        eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+        E1000_WRITE_REG(hw, EECD, eecd);
+
+        /* Set CS */
+        eecd |= E1000_EECD_CS;
+        E1000_WRITE_REG(hw, EECD, eecd);
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        /* Clear SK and CS */
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+        E1000_WRITE_REG(hw, EECD, eecd);
+        usec_delay(1);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Returns EEPROM to a "standby" state
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+e1000_standby_eeprom(struct e1000_hw *hw)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd;
+
+    eecd = E1000_READ_REG(hw, EECD);
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+
+        /* Clock high */
+        eecd |= E1000_EECD_SK;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+
+        /* Select EEPROM */
+        eecd |= E1000_EECD_CS;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+
+        /* Clock low */
+        eecd &= ~E1000_EECD_SK;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+    } else if (eeprom->type == e1000_eeprom_spi) {
+        /* Toggle CS to flush commands */
+        eecd |= E1000_EECD_CS;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+        eecd &= ~E1000_EECD_CS;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(eeprom->delay_usec);
+    }
+}
+
+/******************************************************************************
+ * Terminates a command by inverting the EEPROM's chip select pin
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static void
+e1000_release_eeprom(struct e1000_hw *hw)
+{
+    uint32_t eecd;
+
+    DEBUGFUNC("e1000_release_eeprom");
+
+    eecd = E1000_READ_REG(hw, EECD);
+
+    if (hw->eeprom.type == e1000_eeprom_spi) {
+        eecd |= E1000_EECD_CS;  /* Pull CS high */
+        eecd &= ~E1000_EECD_SK; /* Lower SCK */
+
+        E1000_WRITE_REG(hw, EECD, eecd);
+
+        usec_delay(hw->eeprom.delay_usec);
+    } else if (hw->eeprom.type == e1000_eeprom_microwire) {
+        /* cleanup eeprom */
+
+        /* CS on Microwire is active-high */
+        eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+
+        E1000_WRITE_REG(hw, EECD, eecd);
+
+        /* Rising edge of clock */
+        eecd |= E1000_EECD_SK;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(hw->eeprom.delay_usec);
+
+        /* Falling edge of clock */
+        eecd &= ~E1000_EECD_SK;
+        E1000_WRITE_REG(hw, EECD, eecd);
+        E1000_WRITE_FLUSH(hw);
+        usec_delay(hw->eeprom.delay_usec);
+    }
+
+    /* Stop requesting EEPROM access */
+    if (hw->mac_type > e1000_82544) {
+        eecd &= ~E1000_EECD_REQ;
+        E1000_WRITE_REG(hw, EECD, eecd);
+    }
+
+    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_spi_eeprom_ready(struct e1000_hw *hw)
+{
+    uint16_t retry_count = 0;
+    uint8_t spi_stat_reg;
+
+    DEBUGFUNC("e1000_spi_eeprom_ready");
+
+    /* Read "Status Register" repeatedly until the LSB is cleared.  The
+     * EEPROM will signal that the command has been completed by clearing
+     * bit 0 of the internal status register.  If it's not cleared within
+     * 5 milliseconds, then error out.
+     */
+    retry_count = 0;
+    do {
+        e1000_shift_out_ee_bits(hw, EEPROM_RDSR_OPCODE_SPI,
+                                hw->eeprom.opcode_bits);
+        spi_stat_reg = (uint8_t)e1000_shift_in_ee_bits(hw, 8);
+        if (!(spi_stat_reg & EEPROM_STATUS_RDY_SPI))
+            break;
+
+        usec_delay(5);
+        retry_count += 5;
+
+        e1000_standby_eeprom(hw);
+    } while (retry_count < EEPROM_MAX_RETRY_SPI);
+
+    /* ATMEL SPI write time could vary from 0-20mSec on 3.3V devices (and
+     * only 0-5mSec on 5V devices)
+     */
+    if (retry_count >= EEPROM_MAX_RETRY_SPI) {
+        DEBUGOUT("SPI EEPROM Status error\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_read_eeprom(struct e1000_hw *hw,
+                  uint16_t offset,
+                  uint16_t words,
+                  uint16_t *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t i = 0;
+    int32_t ret_val;
+
+    DEBUGFUNC("e1000_read_eeprom");
+
+    /* A check for invalid values:  offset too large, too many words, and not
+     * enough words.
+     */
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+       (words == 0)) {
+        DEBUGOUT("\"words\" parameter out of bounds\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    /* FLASH reads without acquiring the semaphore are safe */
+    if (e1000_is_onboard_nvm_eeprom(hw) == TRUE &&
+        hw->eeprom.use_eerd == FALSE) {
+        switch (hw->mac_type) {
+        case e1000_80003es2lan:
+            break;
+        default:
+            /* Prepare the EEPROM for reading  */
+            if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+                return -E1000_ERR_EEPROM;
+            break;
+        }
+    }
+
+    if (eeprom->use_eerd == TRUE) {
+        ret_val = e1000_read_eeprom_eerd(hw, offset, words, data);
+        if ((e1000_is_onboard_nvm_eeprom(hw) == TRUE) ||
+            (hw->mac_type != e1000_82573))
+            e1000_release_eeprom(hw);
+        return ret_val;
+    }
+
+    if (eeprom->type == e1000_eeprom_ich8)
+        return e1000_read_eeprom_ich8(hw, offset, words, data);
+
+    if (eeprom->type == e1000_eeprom_spi) {
+        uint16_t word_in;
+        uint8_t read_opcode = EEPROM_READ_OPCODE_SPI;
+
+        if (e1000_spi_eeprom_ready(hw)) {
+            e1000_release_eeprom(hw);
+            return -E1000_ERR_EEPROM;
+        }
+
+        e1000_standby_eeprom(hw);
+
+        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+        if ((eeprom->address_bits == 8) && (offset >= 128))
+            read_opcode |= EEPROM_A8_OPCODE_SPI;
+
+        /* Send the READ command (opcode + addr)  */
+        e1000_shift_out_ee_bits(hw, read_opcode, eeprom->opcode_bits);
+        e1000_shift_out_ee_bits(hw, (uint16_t)(offset*2), eeprom->address_bits);
+
+        /* Read the data.  The address of the eeprom internally increments with
+         * each byte (spi) being read, saving on the overhead of eeprom setup
+         * and tear-down.  The address counter will roll over if reading beyond
+         * the size of the eeprom, thus allowing the entire memory to be read
+         * starting from any offset. */
+        for (i = 0; i < words; i++) {
+            word_in = e1000_shift_in_ee_bits(hw, 16);
+            data[i] = (word_in >> 8) | (word_in << 8);
+        }
+    } else if (eeprom->type == e1000_eeprom_microwire) {
+        for (i = 0; i < words; i++) {
+            /* Send the READ command (opcode + addr)  */
+            e1000_shift_out_ee_bits(hw, EEPROM_READ_OPCODE_MICROWIRE,
+                                    eeprom->opcode_bits);
+            e1000_shift_out_ee_bits(hw, (uint16_t)(offset + i),
+                                    eeprom->address_bits);
+
+            /* Read the data.  For microwire, each word requires the overhead
+             * of eeprom setup and tear-down. */
+            data[i] = e1000_shift_in_ee_bits(hw, 16);
+            e1000_standby_eeprom(hw);
+        }
+    }
+
+    /* End this read operation */
+    e1000_release_eeprom(hw);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word from the EEPROM using the EERD register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_read_eeprom_eerd(struct e1000_hw *hw,
+                  uint16_t offset,
+                  uint16_t words,
+                  uint16_t *data)
+{
+    uint32_t i, eerd = 0;
+    int32_t error = 0;
+
+    for (i = 0; i < words; i++) {
+        eerd = ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) +
+                         E1000_EEPROM_RW_REG_START;
+
+        E1000_WRITE_REG(hw, EERD, eerd);
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_READ);
+
+        if (error) {
+            break;
+        }
+        data[i] = (E1000_READ_REG(hw, EERD) >> E1000_EEPROM_RW_REG_DATA);
+
+    }
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word from the EEPROM using the EEWR register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_eewr(struct e1000_hw *hw,
+                   uint16_t offset,
+                   uint16_t words,
+                   uint16_t *data)
+{
+    uint32_t    register_value = 0;
+    uint32_t    i              = 0;
+    int32_t     error          = 0;
+
+    if (e1000_swfw_sync_acquire(hw, E1000_SWFW_EEP_SM))
+        return -E1000_ERR_SWFW_SYNC;
+
+    for (i = 0; i < words; i++) {
+        register_value = (data[i] << E1000_EEPROM_RW_REG_DATA) |
+                         ((offset+i) << E1000_EEPROM_RW_ADDR_SHIFT) |
+                         E1000_EEPROM_RW_REG_START;
+
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+        if (error) {
+            break;
+        }
+
+        E1000_WRITE_REG(hw, EEWR, register_value);
+
+        error = e1000_poll_eerd_eewr_done(hw, E1000_EEPROM_POLL_WRITE);
+
+        if (error) {
+            break;
+        }
+    }
+
+    e1000_swfw_sync_release(hw, E1000_SWFW_EEP_SM);
+    return error;
+}
+
+/******************************************************************************
+ * Polls the status bit (bit 1) of the EERD to determine when the read is done.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd)
+{
+    uint32_t attempts = 100000;
+    uint32_t i, reg = 0;
+    int32_t done = E1000_ERR_EEPROM;
+
+    for (i = 0; i < attempts; i++) {
+        if (eerd == E1000_EEPROM_POLL_READ)
+            reg = E1000_READ_REG(hw, EERD);
+        else
+            reg = E1000_READ_REG(hw, EEWR);
+
+        if (reg & E1000_EEPROM_RW_REG_DONE) {
+            done = E1000_SUCCESS;
+            break;
+        }
+        usec_delay(5);
+    }
+
+    return done;
+}
+
+/***************************************************************************
+* Description:     Determines if the onboard NVM is FLASH or EEPROM.
+*
+* hw - Struct containing variables accessed by shared code
+****************************************************************************/
+boolean_t
+e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw)
+{
+    uint32_t eecd = 0;
+
+    DEBUGFUNC("e1000_is_onboard_nvm_eeprom");
+
+    if (hw->mac_type == e1000_ich8lan)
+        return FALSE;
+
+    if (hw->mac_type == e1000_82573) {
+        eecd = E1000_READ_REG(hw, EECD);
+
+        /* Isolate bits 15 & 16 */
+        eecd = ((eecd >> 15) & 0x03);
+
+        /* If both bits are set, device is Flash type */
+        if (eecd == 0x03) {
+            return FALSE;
+        }
+    }
+    return TRUE;
+}
+
+/******************************************************************************
+ * Verifies that the EEPROM has a valid checksum
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Reads the first 64 16 bit words of the EEPROM and sums the values read.
+ * If the the sum of the 64 16 bit words is 0xBABA, the EEPROM's checksum is
+ * valid.
+ *****************************************************************************/
+int32_t
+e1000_validate_eeprom_checksum(struct e1000_hw *hw)
+{
+    uint16_t checksum = 0;
+    uint16_t i, eeprom_data;
+
+    DEBUGFUNC("e1000_validate_eeprom_checksum");
+
+    if ((hw->mac_type == e1000_82573) &&
+        (e1000_is_onboard_nvm_eeprom(hw) == FALSE)) {
+        /* Check bit 4 of word 10h.  If it is 0, firmware is done updating
+         * 10h-12h.  Checksum may need to be fixed. */
+        e1000_read_eeprom(hw, 0x10, 1, &eeprom_data);
+        if ((eeprom_data & 0x10) == 0) {
+            /* Read 0x23 and check bit 15.  This bit is a 1 when the checksum
+             * has already been fixed.  If the checksum is still wrong and this
+             * bit is a 1, we need to return bad checksum.  Otherwise, we need
+             * to set this bit to a 1 and update the checksum. */
+            e1000_read_eeprom(hw, 0x23, 1, &eeprom_data);
+            if ((eeprom_data & 0x8000) == 0) {
+                eeprom_data |= 0x8000;
+                e1000_write_eeprom(hw, 0x23, 1, &eeprom_data);
+                e1000_update_eeprom_checksum(hw);
+            }
+        }
+    }
+
+    if (hw->mac_type == e1000_ich8lan) {
+        /* Drivers must allocate the shadow ram structure for the
+         * EEPROM checksum to be updated.  Otherwise, this bit as well
+         * as the checksum must both be set correctly for this
+         * validation to pass.
+         */
+        e1000_read_eeprom(hw, 0x19, 1, &eeprom_data);
+        if ((eeprom_data & 0x40) == 0) {
+            eeprom_data |= 0x40;
+            e1000_write_eeprom(hw, 0x19, 1, &eeprom_data);
+            e1000_update_eeprom_checksum(hw);
+        }
+    }
+
+    for (i = 0; i < (EEPROM_CHECKSUM_REG + 1); i++) {
+        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        checksum += eeprom_data;
+    }
+
+    if (checksum == (uint16_t) EEPROM_SUM)
+        return E1000_SUCCESS;
+    else {
+        DEBUGOUT("EEPROM Checksum Invalid\n");
+        return -E1000_ERR_EEPROM;
+    }
+}
+
+/******************************************************************************
+ * Calculates the EEPROM checksum and writes it to the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Sums the first 63 16 bit words of the EEPROM. Subtracts the sum from 0xBABA.
+ * Writes the difference to word offset 63 of the EEPROM.
+ *****************************************************************************/
+int32_t
+e1000_update_eeprom_checksum(struct e1000_hw *hw)
+{
+    uint32_t ctrl_ext;
+    uint16_t checksum = 0;
+    uint16_t i, eeprom_data;
+
+    DEBUGFUNC("e1000_update_eeprom_checksum");
+
+    for (i = 0; i < EEPROM_CHECKSUM_REG; i++) {
+        if (e1000_read_eeprom(hw, i, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        checksum += eeprom_data;
+    }
+    checksum = (uint16_t) EEPROM_SUM - checksum;
+    if (e1000_write_eeprom(hw, EEPROM_CHECKSUM_REG, 1, &checksum) < 0) {
+        DEBUGOUT("EEPROM Write Error\n");
+        return -E1000_ERR_EEPROM;
+    } else if (hw->eeprom.type == e1000_eeprom_flash) {
+        e1000_commit_shadow_ram(hw);
+    } else if (hw->eeprom.type == e1000_eeprom_ich8) {
+        e1000_commit_shadow_ram(hw);
+        /* Reload the EEPROM, or else modifications will not appear
+         * until after next adapter reset. */
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+        msec_delay(10);
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Parent function for writing words to the different EEPROM types.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - 16 bit word to be written to the EEPROM
+ *
+ * If e1000_update_eeprom_checksum is not called after this function, the
+ * EEPROM will most likely contain an invalid checksum.
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom(struct e1000_hw *hw,
+                   uint16_t offset,
+                   uint16_t words,
+                   uint16_t *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    int32_t status = 0;
+
+    DEBUGFUNC("e1000_write_eeprom");
+
+    /* A check for invalid values:  offset too large, too many words, and not
+     * enough words.
+     */
+    if ((offset >= eeprom->word_size) || (words > eeprom->word_size - offset) ||
+       (words == 0)) {
+        DEBUGOUT("\"words\" parameter out of bounds\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    /* 82573 writes only through eewr */
+    if (eeprom->use_eewr == TRUE)
+        return e1000_write_eeprom_eewr(hw, offset, words, data);
+
+    if (eeprom->type == e1000_eeprom_ich8)
+        return e1000_write_eeprom_ich8(hw, offset, words, data);
+
+    /* Prepare the EEPROM for writing  */
+    if (e1000_acquire_eeprom(hw) != E1000_SUCCESS)
+        return -E1000_ERR_EEPROM;
+
+    if (eeprom->type == e1000_eeprom_microwire) {
+        status = e1000_write_eeprom_microwire(hw, offset, words, data);
+    } else {
+        status = e1000_write_eeprom_spi(hw, offset, words, data);
+        msec_delay(10);
+    }
+
+    /* Done with writing */
+    e1000_release_eeprom(hw);
+
+    return status;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in an SPI EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 8 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_spi(struct e1000_hw *hw,
+                       uint16_t offset,
+                       uint16_t words,
+                       uint16_t *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint16_t widx = 0;
+
+    DEBUGFUNC("e1000_write_eeprom_spi");
+
+    while (widx < words) {
+        uint8_t write_opcode = EEPROM_WRITE_OPCODE_SPI;
+
+        if (e1000_spi_eeprom_ready(hw)) return -E1000_ERR_EEPROM;
+
+        e1000_standby_eeprom(hw);
+
+        /*  Send the WRITE ENABLE command (8 bit opcode )  */
+        e1000_shift_out_ee_bits(hw, EEPROM_WREN_OPCODE_SPI,
+                                    eeprom->opcode_bits);
+
+        e1000_standby_eeprom(hw);
+
+        /* Some SPI eeproms use the 8th address bit embedded in the opcode */
+        if ((eeprom->address_bits == 8) && (offset >= 128))
+            write_opcode |= EEPROM_A8_OPCODE_SPI;
+
+        /* Send the Write command (8-bit opcode + addr) */
+        e1000_shift_out_ee_bits(hw, write_opcode, eeprom->opcode_bits);
+
+        e1000_shift_out_ee_bits(hw, (uint16_t)((offset + widx)*2),
+                                eeprom->address_bits);
+
+        /* Send the data */
+
+        /* Loop to allow for up to whole page write (32 bytes) of eeprom */
+        while (widx < words) {
+            uint16_t word_out = data[widx];
+            word_out = (word_out >> 8) | (word_out << 8);
+            e1000_shift_out_ee_bits(hw, word_out, 16);
+            widx++;
+
+            /* Some larger eeprom sizes are capable of a 32-byte PAGE WRITE
+             * operation, while the smaller eeproms are capable of an 8-byte
+             * PAGE WRITE operation.  Break the inner loop to pass new address
+             */
+            if ((((offset + widx)*2) % eeprom->page_size) == 0) {
+                e1000_standby_eeprom(hw);
+                break;
+            }
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word to a given offset in a Microwire EEPROM.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset within the EEPROM to be written to
+ * words - number of words to write
+ * data - pointer to array of 16 bit words to be written to the EEPROM
+ *
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_microwire(struct e1000_hw *hw,
+                             uint16_t offset,
+                             uint16_t words,
+                             uint16_t *data)
+{
+    struct e1000_eeprom_info *eeprom = &hw->eeprom;
+    uint32_t eecd;
+    uint16_t words_written = 0;
+    uint16_t i = 0;
+
+    DEBUGFUNC("e1000_write_eeprom_microwire");
+
+    /* Send the write enable command to the EEPROM (3-bit opcode plus
+     * 6/8-bit dummy address beginning with 11).  It's less work to include
+     * the 11 of the dummy address as part of the opcode than it is to shift
+     * it over the correct number of bits for the address.  This puts the
+     * EEPROM into write/erase mode.
+     */
+    e1000_shift_out_ee_bits(hw, EEPROM_EWEN_OPCODE_MICROWIRE,
+                            (uint16_t)(eeprom->opcode_bits + 2));
+
+    e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2));
+
+    /* Prepare the EEPROM */
+    e1000_standby_eeprom(hw);
+
+    while (words_written < words) {
+        /* Send the Write command (3-bit opcode + addr) */
+        e1000_shift_out_ee_bits(hw, EEPROM_WRITE_OPCODE_MICROWIRE,
+                                eeprom->opcode_bits);
+
+        e1000_shift_out_ee_bits(hw, (uint16_t)(offset + words_written),
+                                eeprom->address_bits);
+
+        /* Send the data */
+        e1000_shift_out_ee_bits(hw, data[words_written], 16);
+
+        /* Toggle the CS line.  This in effect tells the EEPROM to execute
+         * the previous command.
+         */
+        e1000_standby_eeprom(hw);
+
+        /* Read DO repeatedly until it is high (equal to '1').  The EEPROM will
+         * signal that the command has been completed by raising the DO signal.
+         * If DO does not go high in 10 milliseconds, then error out.
+         */
+        for (i = 0; i < 200; i++) {
+            eecd = E1000_READ_REG(hw, EECD);
+            if (eecd & E1000_EECD_DO) break;
+            usec_delay(50);
+        }
+        if (i == 200) {
+            DEBUGOUT("EEPROM Write did not complete\n");
+            return -E1000_ERR_EEPROM;
+        }
+
+        /* Recover from write */
+        e1000_standby_eeprom(hw);
+
+        words_written++;
+    }
+
+    /* Send the write disable command to the EEPROM (3-bit opcode plus
+     * 6/8-bit dummy address beginning with 10).  It's less work to include
+     * the 10 of the dummy address as part of the opcode than it is to shift
+     * it over the correct number of bits for the address.  This takes the
+     * EEPROM out of write/erase mode.
+     */
+    e1000_shift_out_ee_bits(hw, EEPROM_EWDS_OPCODE_MICROWIRE,
+                            (uint16_t)(eeprom->opcode_bits + 2));
+
+    e1000_shift_out_ee_bits(hw, 0, (uint16_t)(eeprom->address_bits - 2));
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Flushes the cached eeprom to NVM. This is done by saving the modified values
+ * in the eeprom cache and the non modified values in the currently active bank
+ * to the new bank.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of  word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_commit_shadow_ram(struct e1000_hw *hw)
+{
+    uint32_t attempts = 100000;
+    uint32_t eecd = 0;
+    uint32_t flop = 0;
+    uint32_t i = 0;
+    int32_t error = E1000_SUCCESS;
+    uint32_t old_bank_offset = 0;
+    uint32_t new_bank_offset = 0;
+    uint32_t sector_retries = 0;
+    uint8_t low_byte = 0;
+    uint8_t high_byte = 0;
+    uint8_t temp_byte = 0;
+    boolean_t sector_write_failed = FALSE;
+
+    if (hw->mac_type == e1000_82573) {
+        /* The flop register will be used to determine if flash type is STM */
+        flop = E1000_READ_REG(hw, FLOP);
+        for (i=0; i < attempts; i++) {
+            eecd = E1000_READ_REG(hw, EECD);
+            if ((eecd & E1000_EECD_FLUPD) == 0) {
+                break;
+            }
+            usec_delay(5);
+        }
+
+        if (i == attempts) {
+            return -E1000_ERR_EEPROM;
+        }
+
+        /* If STM opcode located in bits 15:8 of flop, reset firmware */
+        if ((flop & 0xFF00) == E1000_STM_OPCODE) {
+            E1000_WRITE_REG(hw, HICR, E1000_HICR_FW_RESET);
+        }
+
+        /* Perform the flash update */
+        E1000_WRITE_REG(hw, EECD, eecd | E1000_EECD_FLUPD);
+
+        for (i=0; i < attempts; i++) {
+            eecd = E1000_READ_REG(hw, EECD);
+            if ((eecd & E1000_EECD_FLUPD) == 0) {
+                break;
+            }
+            usec_delay(5);
+        }
+
+        if (i == attempts) {
+            return -E1000_ERR_EEPROM;
+        }
+    }
+
+    if (hw->mac_type == e1000_ich8lan && hw->eeprom_shadow_ram != NULL) {
+        /* We're writing to the opposite bank so if we're on bank 1,
+         * write to bank 0 etc.  We also need to erase the segment that
+         * is going to be written */
+        if (!(E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL)) {
+            new_bank_offset = hw->flash_bank_size * 2;
+            old_bank_offset = 0;
+            e1000_erase_ich8_4k_segment(hw, 1);
+        } else {
+            old_bank_offset = hw->flash_bank_size * 2;
+            new_bank_offset = 0;
+            e1000_erase_ich8_4k_segment(hw, 0);
+        }
+
+        do {
+            sector_write_failed = FALSE;
+            /* Loop for every byte in the shadow RAM,
+             * which is in units of words. */
+            for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+                /* Determine whether to write the value stored
+                 * in the other NVM bank or a modified value stored
+                 * in the shadow RAM */
+                if (hw->eeprom_shadow_ram[i].modified == TRUE) {
+                    low_byte = (uint8_t)hw->eeprom_shadow_ram[i].eeprom_word;
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+                                         &temp_byte);
+                    usec_delay(100);
+                    error = e1000_verify_write_ich8_byte(hw,
+                                                 (i << 1) + new_bank_offset,
+                                                 low_byte);
+                    if (error != E1000_SUCCESS)
+                        sector_write_failed = TRUE;
+                    high_byte =
+                        (uint8_t)(hw->eeprom_shadow_ram[i].eeprom_word >> 8);
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+                                         &temp_byte);
+                    usec_delay(100);
+                } else {
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset,
+                                         &low_byte);
+                    usec_delay(100);
+                    error = e1000_verify_write_ich8_byte(hw,
+                                 (i << 1) + new_bank_offset, low_byte);
+                    if (error != E1000_SUCCESS)
+                        sector_write_failed = TRUE;
+                    e1000_read_ich8_byte(hw, (i << 1) + old_bank_offset + 1,
+                                         &high_byte);
+                }
+
+                /* If the word is 0x13, then make sure the signature bits
+                 * (15:14) are 11b until the commit has completed.
+                 * This will allow us to write 10b which indicates the
+                 * signature is valid.  We want to do this after the write
+                 * has completed so that we don't mark the segment valid
+                 * while the write is still in progress */
+                if (i == E1000_ICH8_NVM_SIG_WORD)
+                    high_byte = E1000_ICH8_NVM_SIG_MASK | high_byte;
+
+                error = e1000_verify_write_ich8_byte(hw,
+                             (i << 1) + new_bank_offset + 1, high_byte);
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = TRUE;
+
+                if (sector_write_failed == FALSE) {
+                    /* Clear the now not used entry in the cache */
+                    hw->eeprom_shadow_ram[i].modified = FALSE;
+                    hw->eeprom_shadow_ram[i].eeprom_word = 0xFFFF;
+                }
+            }
+
+            /* Don't bother writing the segment valid bits if sector
+             * programming failed. */
+            if (sector_write_failed == FALSE) {
+                /* Finally validate the new segment by setting bit 15:14
+                 * to 10b in word 0x13 , this can be done without an
+                 * erase as well since these bits are 11 to start with
+                 * and we need to change bit 14 to 0b */
+                e1000_read_ich8_byte(hw,
+                    E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+                    &high_byte);
+                high_byte &= 0xBF;
+                error = e1000_verify_write_ich8_byte(hw,
+                            E1000_ICH8_NVM_SIG_WORD * 2 + 1 + new_bank_offset,
+                            high_byte);
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = TRUE;
+
+                /* And invalidate the previously valid segment by setting
+                 * its signature word (0x13) high_byte to 0b. This can be
+                 * done without an erase because flash erase sets all bits
+                 * to 1's. We can write 1's to 0's without an erase */
+                error = e1000_verify_write_ich8_byte(hw,
+                            E1000_ICH8_NVM_SIG_WORD * 2 + 1 + old_bank_offset,
+                            0);
+                if (error != E1000_SUCCESS)
+                    sector_write_failed = TRUE;
+            }
+        } while (++sector_retries < 10 && sector_write_failed == TRUE);
+    }
+
+    return error;
+}
+
+/******************************************************************************
+ * Reads the adapter's part number from the EEPROM
+ *
+ * hw - Struct containing variables accessed by shared code
+ * part_num - Adapter's part number
+ *****************************************************************************/
+int32_t
+e1000_read_part_num(struct e1000_hw *hw,
+                    uint32_t *part_num)
+{
+    uint16_t offset = EEPROM_PBA_BYTE_1;
+    uint16_t eeprom_data;
+
+    DEBUGFUNC("e1000_read_part_num");
+
+    /* Get word 0 from EEPROM */
+    if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+        DEBUGOUT("EEPROM Read Error\n");
+        return -E1000_ERR_EEPROM;
+    }
+    /* Save word 0 in upper half of part_num */
+    *part_num = (uint32_t) (eeprom_data << 16);
+
+    /* Get word 1 from EEPROM */
+    if (e1000_read_eeprom(hw, ++offset, 1, &eeprom_data) < 0) {
+        DEBUGOUT("EEPROM Read Error\n");
+        return -E1000_ERR_EEPROM;
+    }
+    /* Save word 1 in lower half of part_num */
+    *part_num |= eeprom_data;
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Reads the adapter's MAC address from the EEPROM and inverts the LSB for the
+ * second function of dual function devices
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_read_mac_addr(struct e1000_hw * hw)
+{
+    uint16_t offset;
+    uint16_t eeprom_data, i;
+
+    DEBUGFUNC("e1000_read_mac_addr");
+
+    for (i = 0; i < NODE_ADDRESS_SIZE; i += 2) {
+        offset = i >> 1;
+        if (e1000_read_eeprom(hw, offset, 1, &eeprom_data) < 0) {
+            DEBUGOUT("EEPROM Read Error\n");
+            return -E1000_ERR_EEPROM;
+        }
+        hw->perm_mac_addr[i] = (uint8_t) (eeprom_data & 0x00FF);
+        hw->perm_mac_addr[i+1] = (uint8_t) (eeprom_data >> 8);
+    }
+
+    switch (hw->mac_type) {
+    default:
+        break;
+    case e1000_82546:
+    case e1000_82546_rev_3:
+    case e1000_82571:
+    case e1000_80003es2lan:
+        if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+            hw->perm_mac_addr[5] ^= 0x01;
+        break;
+    }
+
+    for (i = 0; i < NODE_ADDRESS_SIZE; i++)
+        hw->mac_addr[i] = hw->perm_mac_addr[i];
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Initializes receive address filters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Places the MAC address in receive address register 0 and clears the rest
+ * of the receive addresss registers. Clears the multicast table. Assumes
+ * the receiver is in reset when the routine is called.
+ *****************************************************************************/
+void
+e1000_init_rx_addrs(struct e1000_hw *hw)
+{
+    uint32_t i;
+    uint32_t rar_num;
+
+    DEBUGFUNC("e1000_init_rx_addrs");
+
+    /* Setup the receive address. */
+    DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+    e1000_rar_set(hw, hw->mac_addr, 0);
+
+    rar_num = E1000_RAR_ENTRIES;
+
+    /* Reserve a spot for the Locally Administered Address to work around
+     * an 82571 issue in which a reset on one port will reload the MAC on
+     * the other port. */
+    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
+        rar_num -= 1;
+    if (hw->mac_type == e1000_ich8lan)
+        rar_num = E1000_RAR_ENTRIES_ICH8LAN;
+
+    /* Zero out the other 15 receive addresses. */
+    DEBUGOUT("Clearing RAR[1-15]\n");
+    for (i = 1; i < rar_num; i++) {
+        E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+        E1000_WRITE_FLUSH(hw);
+        E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+        E1000_WRITE_FLUSH(hw);
+    }
+}
+
+/******************************************************************************
+ * Updates the MAC's list of multicast addresses.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr_list - the list of new multicast addresses
+ * mc_addr_count - number of addresses
+ * pad - number of bytes between addresses in the list
+ * rar_used_count - offset where to start adding mc addresses into the RAR's
+ *
+ * The given list replaces any existing list. Clears the last 15 receive
+ * address registers and the multicast table. Uses receive address registers
+ * for the first 15 multicast addresses, and hashes the rest into the
+ * multicast table.
+ *****************************************************************************/
+void
+e1000_mc_addr_list_update(struct e1000_hw *hw,
+                          uint8_t *mc_addr_list,
+                          uint32_t mc_addr_count,
+                          uint32_t pad,
+                          uint32_t rar_used_count)
+{
+    uint32_t hash_value;
+    uint32_t i;
+    uint32_t num_rar_entry;
+    uint32_t num_mta_entry;
+
+    DEBUGFUNC("e1000_mc_addr_list_update");
+
+    /* Set the new number of MC addresses that we are being requested to use. */
+    hw->num_mc_addrs = mc_addr_count;
+
+    /* Clear RAR[1-15] */
+    DEBUGOUT(" Clearing RAR[1-15]\n");
+    num_rar_entry = E1000_RAR_ENTRIES;
+    if (hw->mac_type == e1000_ich8lan)
+        num_rar_entry = E1000_RAR_ENTRIES_ICH8LAN;
+    /* Reserve a spot for the Locally Administered Address to work around
+     * an 82571 issue in which a reset on one port will reload the MAC on
+     * the other port. */
+    if ((hw->mac_type == e1000_82571) && (hw->laa_is_present == TRUE))
+        num_rar_entry -= 1;
+
+    for (i = rar_used_count; i < num_rar_entry; i++) {
+        E1000_WRITE_REG_ARRAY(hw, RA, (i << 1), 0);
+        E1000_WRITE_FLUSH(hw);
+        E1000_WRITE_REG_ARRAY(hw, RA, ((i << 1) + 1), 0);
+        E1000_WRITE_FLUSH(hw);
+    }
+
+    /* Clear the MTA */
+    DEBUGOUT(" Clearing MTA\n");
+    num_mta_entry = E1000_NUM_MTA_REGISTERS;
+    if (hw->mac_type == e1000_ich8lan)
+        num_mta_entry = E1000_NUM_MTA_REGISTERS_ICH8LAN;
+    for (i = 0; i < num_mta_entry; i++) {
+        E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+        E1000_WRITE_FLUSH(hw);
+    }
+
+    /* Add the new addresses */
+    for (i = 0; i < mc_addr_count; i++) {
+        DEBUGOUT(" Adding the multicast addresses:\n");
+        DEBUGOUT7(" MC Addr #%d =%.2X %.2X %.2X %.2X %.2X %.2X\n", i,
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad)],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 1],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 2],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 3],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 4],
+                  mc_addr_list[i * (ETH_LENGTH_OF_ADDRESS + pad) + 5]);
+
+        hash_value = e1000_hash_mc_addr(hw,
+                                        mc_addr_list +
+                                        (i * (ETH_LENGTH_OF_ADDRESS + pad)));
+
+        DEBUGOUT1(" Hash value = 0x%03X\n", hash_value);
+
+        /* Place this multicast address in the RAR if there is room, *
+         * else put it in the MTA
+         */
+        if (rar_used_count < num_rar_entry) {
+            e1000_rar_set(hw,
+                          mc_addr_list + (i * (ETH_LENGTH_OF_ADDRESS + pad)),
+                          rar_used_count);
+            rar_used_count++;
+        } else {
+            e1000_mta_set(hw, hash_value);
+        }
+    }
+    DEBUGOUT("MC Update Complete\n");
+}
+
+/******************************************************************************
+ * Hashes an address to determine its location in the multicast table
+ *
+ * hw - Struct containing variables accessed by shared code
+ * mc_addr - the multicast address to hash
+ *****************************************************************************/
+uint32_t
+e1000_hash_mc_addr(struct e1000_hw *hw,
+                   uint8_t *mc_addr)
+{
+    uint32_t hash_value = 0;
+
+    /* The portion of the address that is used for the hash table is
+     * determined by the mc_filter_type setting.
+     */
+    switch (hw->mc_filter_type) {
+    /* [0] [1] [2] [3] [4] [5]
+     * 01  AA  00  12  34  56
+     * LSB                 MSB
+     */
+    case 0:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [47:38] i.e. 0x158 for above example address */
+            hash_value = ((mc_addr[4] >> 6) | (((uint16_t) mc_addr[5]) << 2));
+        } else {
+            /* [47:36] i.e. 0x563 for above example address */
+            hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+        }
+        break;
+    case 1:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [46:37] i.e. 0x2B1 for above example address */
+            hash_value = ((mc_addr[4] >> 5) | (((uint16_t) mc_addr[5]) << 3));
+        } else {
+            /* [46:35] i.e. 0xAC6 for above example address */
+            hash_value = ((mc_addr[4] >> 3) | (((uint16_t) mc_addr[5]) << 5));
+        }
+        break;
+    case 2:
+        if (hw->mac_type == e1000_ich8lan) {
+            /*[45:36] i.e. 0x163 for above example address */
+            hash_value = ((mc_addr[4] >> 4) | (((uint16_t) mc_addr[5]) << 4));
+        } else {
+            /* [45:34] i.e. 0x5D8 for above example address */
+            hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+        }
+        break;
+    case 3:
+        if (hw->mac_type == e1000_ich8lan) {
+            /* [43:34] i.e. 0x18D for above example address */
+            hash_value = ((mc_addr[4] >> 2) | (((uint16_t) mc_addr[5]) << 6));
+        } else {
+            /* [43:32] i.e. 0x634 for above example address */
+            hash_value = ((mc_addr[4]) | (((uint16_t) mc_addr[5]) << 8));
+        }
+        break;
+    }
+
+    hash_value &= 0xFFF;
+    if (hw->mac_type == e1000_ich8lan)
+        hash_value &= 0x3FF;
+
+    return hash_value;
+}
+
+/******************************************************************************
+ * Sets the bit in the multicast table corresponding to the hash value.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * hash_value - Multicast address hash value
+ *****************************************************************************/
+void
+e1000_mta_set(struct e1000_hw *hw,
+              uint32_t hash_value)
+{
+    uint32_t hash_bit, hash_reg;
+    uint32_t mta;
+    uint32_t temp;
+
+    /* The MTA is a register array of 128 32-bit registers.
+     * It is treated like an array of 4096 bits.  We want to set
+     * bit BitArray[hash_value]. So we figure out what register
+     * the bit is in, read it, OR in the new bit, then write
+     * back the new value.  The register is determined by the
+     * upper 7 bits of the hash value and the bit within that
+     * register are determined by the lower 5 bits of the value.
+     */
+    hash_reg = (hash_value >> 5) & 0x7F;
+    if (hw->mac_type == e1000_ich8lan)
+        hash_reg &= 0x1F;
+    hash_bit = hash_value & 0x1F;
+
+    mta = E1000_READ_REG_ARRAY(hw, MTA, hash_reg);
+
+    mta |= (1 << hash_bit);
+
+    /* If we are on an 82544 and we are trying to write an odd offset
+     * in the MTA, save off the previous entry before writing and
+     * restore the old value after writing.
+     */
+    if ((hw->mac_type == e1000_82544) && ((hash_reg & 0x1) == 1)) {
+        temp = E1000_READ_REG_ARRAY(hw, MTA, (hash_reg - 1));
+        E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+        E1000_WRITE_FLUSH(hw);
+        E1000_WRITE_REG_ARRAY(hw, MTA, (hash_reg - 1), temp);
+        E1000_WRITE_FLUSH(hw);
+    } else {
+        E1000_WRITE_REG_ARRAY(hw, MTA, hash_reg, mta);
+        E1000_WRITE_FLUSH(hw);
+    }
+}
+
+/******************************************************************************
+ * Puts an ethernet address into a receive address register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * addr - Address to put into receive address register
+ * index - Receive address register to write
+ *****************************************************************************/
+void
+e1000_rar_set(struct e1000_hw *hw,
+              uint8_t *addr,
+              uint32_t index)
+{
+    uint32_t rar_low, rar_high;
+
+    /* HW expects these in little endian so we reverse the byte order
+     * from network order (big endian) to little endian
+     */
+    rar_low = ((uint32_t) addr[0] |
+               ((uint32_t) addr[1] << 8) |
+               ((uint32_t) addr[2] << 16) | ((uint32_t) addr[3] << 24));
+    rar_high = ((uint32_t) addr[4] | ((uint32_t) addr[5] << 8));
+
+    /* Disable Rx and flush all Rx frames before enabling RSS to avoid Rx
+     * unit hang.
+     *
+     * Description:
+     * If there are any Rx frames queued up or otherwise present in the HW
+     * before RSS is enabled, and then we enable RSS, the HW Rx unit will
+     * hang.  To work around this issue, we have to disable receives and
+     * flush out all Rx frames before we enable RSS. To do so, we modify we
+     * redirect all Rx traffic to manageability and then reset the HW.
+     * This flushes away Rx frames, and (since the redirections to
+     * manageability persists across resets) keeps new ones from coming in
+     * while we work.  Then, we clear the Address Valid AV bit for all MAC
+     * addresses and undo the re-direction to manageability.
+     * Now, frames are coming in again, but the MAC won't accept them, so
+     * far so good.  We now proceed to initialize RSS (if necessary) and
+     * configure the Rx unit.  Last, we re-enable the AV bits and continue
+     * on our merry way.
+     */
+    switch (hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_80003es2lan:
+        if (hw->leave_av_bit_off == TRUE)
+            break;
+        fallthrough;
+    default:
+        /* Indicate to hardware the Address is Valid. */
+        rar_high |= E1000_RAH_AV;
+        break;
+    }
+
+    E1000_WRITE_REG_ARRAY(hw, RA, (index << 1), rar_low);
+    E1000_WRITE_FLUSH(hw);
+    E1000_WRITE_REG_ARRAY(hw, RA, ((index << 1) + 1), rar_high);
+    E1000_WRITE_FLUSH(hw);
+}
+
+/******************************************************************************
+ * Writes a value to the specified offset in the VLAN filter table.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - Offset in VLAN filer table to write
+ * value - Value to write into VLAN filter table
+ *****************************************************************************/
+void
+e1000_write_vfta(struct e1000_hw *hw,
+                 uint32_t offset,
+                 uint32_t value)
+{
+    uint32_t temp;
+
+    if (hw->mac_type == e1000_ich8lan)
+        return;
+
+    if ((hw->mac_type == e1000_82544) && ((offset & 0x1) == 1)) {
+        temp = E1000_READ_REG_ARRAY(hw, VFTA, (offset - 1));
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+        E1000_WRITE_FLUSH(hw);
+        E1000_WRITE_REG_ARRAY(hw, VFTA, (offset - 1), temp);
+        E1000_WRITE_FLUSH(hw);
+    } else {
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, value);
+        E1000_WRITE_FLUSH(hw);
+    }
+}
+
+/******************************************************************************
+ * Clears the VLAN filer table
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_clear_vfta(struct e1000_hw *hw)
+{
+    uint32_t offset;
+    uint32_t vfta_value = 0;
+    uint32_t vfta_offset = 0;
+    uint32_t vfta_bit_in_reg = 0;
+
+    if (hw->mac_type == e1000_ich8lan)
+        return;
+
+    if (hw->mac_type == e1000_82573) {
+        if (hw->mng_cookie.vlan_id != 0) {
+            /* The VFTA is a 4096b bit-field, each identifying a single VLAN
+             * ID.  The following operations determine which 32b entry
+             * (i.e. offset) into the array we want to set the VLAN ID
+             * (i.e. bit) of the manageability unit. */
+            vfta_offset = (hw->mng_cookie.vlan_id >>
+                           E1000_VFTA_ENTRY_SHIFT) &
+                          E1000_VFTA_ENTRY_MASK;
+            vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+                                    E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+        }
+    }
+    for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+        /* If the offset we want to clear is the same offset of the
+         * manageability VLAN ID, then clear all bits except that of the
+         * manageability unit */
+        vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+        E1000_WRITE_REG_ARRAY(hw, VFTA, offset, vfta_value);
+        E1000_WRITE_FLUSH(hw);
+    }
+}
+
+int32_t
+e1000_id_led_init(struct e1000_hw * hw)
+{
+    uint32_t ledctl;
+    const uint32_t ledctl_mask = 0x000000FF;
+    const uint32_t ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+    const uint32_t ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+    uint16_t eeprom_data, i, temp;
+    const uint16_t led_mask = 0x0F;
+
+    DEBUGFUNC("e1000_id_led_init");
+
+    if (hw->mac_type < e1000_82540) {
+        /* Nothing to do */
+        return E1000_SUCCESS;
+    }
+
+    ledctl = E1000_READ_REG(hw, LEDCTL);
+    hw->ledctl_default = ledctl;
+    hw->ledctl_mode1 = hw->ledctl_default;
+    hw->ledctl_mode2 = hw->ledctl_default;
+
+    if (e1000_read_eeprom(hw, EEPROM_ID_LED_SETTINGS, 1, &eeprom_data) < 0) {
+        DEBUGOUT("EEPROM Read Error\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    if ((hw->mac_type == e1000_82573) &&
+        (eeprom_data == ID_LED_RESERVED_82573))
+        eeprom_data = ID_LED_DEFAULT_82573;
+    else if ((eeprom_data == ID_LED_RESERVED_0000) ||
+            (eeprom_data == ID_LED_RESERVED_FFFF)) {
+        if (hw->mac_type == e1000_ich8lan)
+            eeprom_data = ID_LED_DEFAULT_ICH8LAN;
+        else
+            eeprom_data = ID_LED_DEFAULT;
+    }
+    for (i = 0; i < 4; i++) {
+        temp = (eeprom_data >> (i << 2)) & led_mask;
+        switch (temp) {
+        case ID_LED_ON1_DEF2:
+        case ID_LED_ON1_ON2:
+        case ID_LED_ON1_OFF2:
+            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode1 |= ledctl_on << (i << 3);
+            break;
+        case ID_LED_OFF1_DEF2:
+        case ID_LED_OFF1_ON2:
+        case ID_LED_OFF1_OFF2:
+            hw->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode1 |= ledctl_off << (i << 3);
+            break;
+        default:
+            /* Do nothing */
+            break;
+        }
+        switch (temp) {
+        case ID_LED_DEF1_ON2:
+        case ID_LED_ON1_ON2:
+        case ID_LED_OFF1_ON2:
+            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode2 |= ledctl_on << (i << 3);
+            break;
+        case ID_LED_DEF1_OFF2:
+        case ID_LED_ON1_OFF2:
+        case ID_LED_OFF1_OFF2:
+            hw->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+            hw->ledctl_mode2 |= ledctl_off << (i << 3);
+            break;
+        default:
+            /* Do nothing */
+            break;
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Prepares SW controlable LED for use and saves the current state of the LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_setup_led(struct e1000_hw *hw)
+{
+    uint32_t ledctl;
+    int32_t ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_setup_led");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        /* No setup necessary */
+        break;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        /* Turn off PHY Smart Power Down (if enabled) */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                     &hw->phy_spd_default);
+        if (ret_val)
+            return ret_val;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                      (uint16_t)(hw->phy_spd_default &
+                                      ~IGP01E1000_GMII_SPD));
+        if (ret_val)
+            return ret_val;
+        fallthrough;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            ledctl = E1000_READ_REG(hw, LEDCTL);
+            /* Save current LEDCTL settings */
+            hw->ledctl_default = ledctl;
+            /* Turn off LED0 */
+            ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+                        E1000_LEDCTL_LED0_BLINK |
+                        E1000_LEDCTL_LED0_MODE_MASK);
+            ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+                       E1000_LEDCTL_LED0_MODE_SHIFT);
+            E1000_WRITE_REG(hw, LEDCTL, ledctl);
+        } else if (hw->media_type == e1000_media_type_copper)
+            E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/******************************************************************************
+ * Used on 82571 and later Si that has LED blink bits.
+ * Callers must use their own timer and should have already called
+ * e1000_id_led_init()
+ * Call e1000_cleanup led() to stop blinking
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_blink_led_start(struct e1000_hw *hw)
+{
+    int16_t  i;
+    uint32_t ledctl_blink = 0;
+
+    DEBUGFUNC("e1000_id_led_blink_on");
+
+    if (hw->mac_type < e1000_82571) {
+        /* Nothing to do */
+        return E1000_SUCCESS;
+    }
+    if (hw->media_type == e1000_media_type_fiber) {
+        /* always blink LED0 for PCI-E fiber */
+        ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+                     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+    } else {
+        /* set the blink bit for each LED that's "on" (0x0E) in ledctl_mode2 */
+        ledctl_blink = hw->ledctl_mode2;
+        for (i=0; i < 4; i++)
+            if (((hw->ledctl_mode2 >> (i * 8)) & 0xFF) ==
+                E1000_LEDCTL_MODE_LED_ON)
+                ledctl_blink |= (E1000_LEDCTL_LED0_BLINK << (i * 8));
+    }
+
+    E1000_WRITE_REG(hw, LEDCTL, ledctl_blink);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Restores the saved state of the SW controlable LED.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_cleanup_led(struct e1000_hw *hw)
+{
+    int32_t ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_cleanup_led");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+    case e1000_82544:
+        /* No cleanup necessary */
+        break;
+    case e1000_82541:
+    case e1000_82547:
+    case e1000_82541_rev_2:
+    case e1000_82547_rev_2:
+        /* Turn on PHY Smart Power Down (if previously enabled) */
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO,
+                                      hw->phy_spd_default);
+        if (ret_val)
+            return ret_val;
+        fallthrough;
+    default:
+        if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+            break;
+        }
+        /* Restore LEDCTL settings */
+        E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_default);
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns on the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_led_on(struct e1000_hw *hw)
+{
+    uint32_t ctrl = E1000_READ_REG(hw, CTRL);
+
+    DEBUGFUNC("e1000_led_on");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+        /* Set SW Defineable Pin 0 to turn on the LED */
+        ctrl |= E1000_CTRL_SWDPIN0;
+        ctrl |= E1000_CTRL_SWDPIO0;
+        break;
+    case e1000_82544:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Set SW Defineable Pin 0 to turn on the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else {
+            /* Clear SW Defineable Pin 0 to turn on the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        }
+        break;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Clear SW Defineable Pin 0 to turn on the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+        } else if (hw->media_type == e1000_media_type_copper) {
+            E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode2);
+            return E1000_SUCCESS;
+        }
+        break;
+    }
+
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Turns off the software controllable LED
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+int32_t
+e1000_led_off(struct e1000_hw *hw)
+{
+    uint32_t ctrl = E1000_READ_REG(hw, CTRL);
+
+    DEBUGFUNC("e1000_led_off");
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+    case e1000_82543:
+        /* Clear SW Defineable Pin 0 to turn off the LED */
+        ctrl &= ~E1000_CTRL_SWDPIN0;
+        ctrl |= E1000_CTRL_SWDPIO0;
+        break;
+    case e1000_82544:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Clear SW Defineable Pin 0 to turn off the LED */
+            ctrl &= ~E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else {
+            /* Set SW Defineable Pin 0 to turn off the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        }
+        break;
+    default:
+        if (hw->media_type == e1000_media_type_fiber) {
+            /* Set SW Defineable Pin 0 to turn off the LED */
+            ctrl |= E1000_CTRL_SWDPIN0;
+            ctrl |= E1000_CTRL_SWDPIO0;
+        } else if (hw->phy_type == e1000_phy_ife) {
+            e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+                 (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+        } else if (hw->media_type == e1000_media_type_copper) {
+            E1000_WRITE_REG(hw, LEDCTL, hw->ledctl_mode1);
+            return E1000_SUCCESS;
+        }
+        break;
+    }
+
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Clears all hardware statistics counters.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_clear_hw_cntrs(struct e1000_hw *hw)
+{
+    volatile uint32_t temp;
+
+    temp = E1000_READ_REG(hw, CRCERRS);
+    temp = E1000_READ_REG(hw, SYMERRS);
+    temp = E1000_READ_REG(hw, MPC);
+    temp = E1000_READ_REG(hw, SCC);
+    temp = E1000_READ_REG(hw, ECOL);
+    temp = E1000_READ_REG(hw, MCC);
+    temp = E1000_READ_REG(hw, LATECOL);
+    temp = E1000_READ_REG(hw, COLC);
+    temp = E1000_READ_REG(hw, DC);
+    temp = E1000_READ_REG(hw, SEC);
+    temp = E1000_READ_REG(hw, RLEC);
+    temp = E1000_READ_REG(hw, XONRXC);
+    temp = E1000_READ_REG(hw, XONTXC);
+    temp = E1000_READ_REG(hw, XOFFRXC);
+    temp = E1000_READ_REG(hw, XOFFTXC);
+    temp = E1000_READ_REG(hw, FCRUC);
+
+    if (hw->mac_type != e1000_ich8lan) {
+    temp = E1000_READ_REG(hw, PRC64);
+    temp = E1000_READ_REG(hw, PRC127);
+    temp = E1000_READ_REG(hw, PRC255);
+    temp = E1000_READ_REG(hw, PRC511);
+    temp = E1000_READ_REG(hw, PRC1023);
+    temp = E1000_READ_REG(hw, PRC1522);
+    }
+
+    temp = E1000_READ_REG(hw, GPRC);
+    temp = E1000_READ_REG(hw, BPRC);
+    temp = E1000_READ_REG(hw, MPRC);
+    temp = E1000_READ_REG(hw, GPTC);
+    temp = E1000_READ_REG(hw, GORCL);
+    temp = E1000_READ_REG(hw, GORCH);
+    temp = E1000_READ_REG(hw, GOTCL);
+    temp = E1000_READ_REG(hw, GOTCH);
+    temp = E1000_READ_REG(hw, RNBC);
+    temp = E1000_READ_REG(hw, RUC);
+    temp = E1000_READ_REG(hw, RFC);
+    temp = E1000_READ_REG(hw, ROC);
+    temp = E1000_READ_REG(hw, RJC);
+    temp = E1000_READ_REG(hw, TORL);
+    temp = E1000_READ_REG(hw, TORH);
+    temp = E1000_READ_REG(hw, TOTL);
+    temp = E1000_READ_REG(hw, TOTH);
+    temp = E1000_READ_REG(hw, TPR);
+    temp = E1000_READ_REG(hw, TPT);
+
+    if (hw->mac_type != e1000_ich8lan) {
+    temp = E1000_READ_REG(hw, PTC64);
+    temp = E1000_READ_REG(hw, PTC127);
+    temp = E1000_READ_REG(hw, PTC255);
+    temp = E1000_READ_REG(hw, PTC511);
+    temp = E1000_READ_REG(hw, PTC1023);
+    temp = E1000_READ_REG(hw, PTC1522);
+    }
+
+    temp = E1000_READ_REG(hw, MPTC);
+    temp = E1000_READ_REG(hw, BPTC);
+
+    if (hw->mac_type < e1000_82543) return;
+
+    temp = E1000_READ_REG(hw, ALGNERRC);
+    temp = E1000_READ_REG(hw, RXERRC);
+    temp = E1000_READ_REG(hw, TNCRS);
+    temp = E1000_READ_REG(hw, CEXTERR);
+    temp = E1000_READ_REG(hw, TSCTC);
+    temp = E1000_READ_REG(hw, TSCTFC);
+
+    if (hw->mac_type <= e1000_82544) return;
+
+    temp = E1000_READ_REG(hw, MGTPRC);
+    temp = E1000_READ_REG(hw, MGTPDC);
+    temp = E1000_READ_REG(hw, MGTPTC);
+
+    if (hw->mac_type <= e1000_82547_rev_2) return;
+
+    temp = E1000_READ_REG(hw, IAC);
+    temp = E1000_READ_REG(hw, ICRXOC);
+
+    if (hw->mac_type == e1000_ich8lan) return;
+
+    temp = E1000_READ_REG(hw, ICRXPTC);
+    temp = E1000_READ_REG(hw, ICRXATC);
+    temp = E1000_READ_REG(hw, ICTXPTC);
+    temp = E1000_READ_REG(hw, ICTXATC);
+    temp = E1000_READ_REG(hw, ICTXQEC);
+    temp = E1000_READ_REG(hw, ICTXQMTC);
+    temp = E1000_READ_REG(hw, ICRXDMTC);
+}
+
+/******************************************************************************
+ * Resets Adaptive IFS to its default state.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * Call this after e1000_init_hw. You may override the IFS defaults by setting
+ * hw->ifs_params_forced to TRUE. However, you must initialize hw->
+ * current_ifs_val, ifs_min_val, ifs_max_val, ifs_step_size, and ifs_ratio
+ * before calling this function.
+ *****************************************************************************/
+void
+e1000_reset_adaptive(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_reset_adaptive");
+
+    if (hw->adaptive_ifs) {
+        if (!hw->ifs_params_forced) {
+            hw->current_ifs_val = 0;
+            hw->ifs_min_val = IFS_MIN;
+            hw->ifs_max_val = IFS_MAX;
+            hw->ifs_step_size = IFS_STEP;
+            hw->ifs_ratio = IFS_RATIO;
+        }
+        hw->in_ifs_mode = FALSE;
+        E1000_WRITE_REG(hw, AIT, 0);
+    } else {
+        DEBUGOUT("Not in Adaptive IFS mode!\n");
+    }
+}
+
+/******************************************************************************
+ * Called during the callback/watchdog routine to update IFS value based on
+ * the ratio of transmits to collisions.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * tx_packets - Number of transmits since last callback
+ * total_collisions - Number of collisions since last callback
+ *****************************************************************************/
+void
+e1000_update_adaptive(struct e1000_hw *hw)
+{
+    DEBUGFUNC("e1000_update_adaptive");
+
+    if (hw->adaptive_ifs) {
+        if ((hw->collision_delta * hw->ifs_ratio) > hw->tx_packet_delta) {
+            if (hw->tx_packet_delta > MIN_NUM_XMITS) {
+                hw->in_ifs_mode = TRUE;
+                if (hw->current_ifs_val < hw->ifs_max_val) {
+                    if (hw->current_ifs_val == 0)
+                        hw->current_ifs_val = hw->ifs_min_val;
+                    else
+                        hw->current_ifs_val += hw->ifs_step_size;
+                    E1000_WRITE_REG(hw, AIT, hw->current_ifs_val);
+                }
+            }
+        } else {
+            if (hw->in_ifs_mode && (hw->tx_packet_delta <= MIN_NUM_XMITS)) {
+                hw->current_ifs_val = 0;
+                hw->in_ifs_mode = FALSE;
+                E1000_WRITE_REG(hw, AIT, 0);
+            }
+        }
+    } else {
+        DEBUGOUT("Not in Adaptive IFS mode!\n");
+    }
+}
+
+/******************************************************************************
+ * Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ *
+ * hw - Struct containing variables accessed by shared code
+ * frame_len - The length of the frame in question
+ * mac_addr - The Ethernet destination address of the frame in question
+ *****************************************************************************/
+void
+e1000_tbi_adjust_stats(struct e1000_hw *hw,
+                       struct e1000_hw_stats *stats,
+                       uint32_t frame_len,
+                       uint8_t *mac_addr)
+{
+    uint64_t carry_bit;
+
+    /* First adjust the frame length. */
+    frame_len--;
+    /* We need to adjust the statistics counters, since the hardware
+     * counters overcount this packet as a CRC error and undercount
+     * the packet as a good packet
+     */
+    /* This packet should not be counted as a CRC error.    */
+    stats->crcerrs--;
+    /* This packet does count as a Good Packet Received.    */
+    stats->gprc++;
+
+    /* Adjust the Good Octets received counters             */
+    carry_bit = 0x80000000 & stats->gorcl;
+    stats->gorcl += frame_len;
+    /* If the high bit of Gorcl (the low 32 bits of the Good Octets
+     * Received Count) was one before the addition,
+     * AND it is zero after, then we lost the carry out,
+     * need to add one to Gorch (Good Octets Received Count High).
+     * This could be simplified if all environments supported
+     * 64-bit integers.
+     */
+    if (carry_bit && ((stats->gorcl & 0x80000000) == 0))
+        stats->gorch++;
+    /* Is this a broadcast or multicast?  Check broadcast first,
+     * since the test for a multicast frame will test positive on
+     * a broadcast frame.
+     */
+    if ((mac_addr[0] == (uint8_t) 0xff) && (mac_addr[1] == (uint8_t) 0xff))
+        /* Broadcast packet */
+        stats->bprc++;
+    else if (*mac_addr & 0x01)
+        /* Multicast packet */
+        stats->mprc++;
+
+    if (frame_len == hw->max_frame_size) {
+        /* In this case, the hardware has overcounted the number of
+         * oversize frames.
+         */
+        if (stats->roc > 0)
+            stats->roc--;
+    }
+
+    /* Adjust the bin counters when the extra byte put the frame in the
+     * wrong bin. Remember that the frame_len was adjusted above.
+     */
+    if (frame_len == 64) {
+        stats->prc64++;
+        stats->prc127--;
+    } else if (frame_len == 127) {
+        stats->prc127++;
+        stats->prc255--;
+    } else if (frame_len == 255) {
+        stats->prc255++;
+        stats->prc511--;
+    } else if (frame_len == 511) {
+        stats->prc511++;
+        stats->prc1023--;
+    } else if (frame_len == 1023) {
+        stats->prc1023++;
+        stats->prc1522--;
+    } else if (frame_len == 1522) {
+        stats->prc1522++;
+    }
+}
+
+/******************************************************************************
+ * Gets the current PCI bus type, speed, and width of the hardware
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+void
+e1000_get_bus_info(struct e1000_hw *hw)
+{
+    uint32_t status;
+
+    switch (hw->mac_type) {
+    case e1000_82542_rev2_0:
+    case e1000_82542_rev2_1:
+        hw->bus_type = e1000_bus_type_unknown;
+        hw->bus_speed = e1000_bus_speed_unknown;
+        hw->bus_width = e1000_bus_width_unknown;
+        break;
+    case e1000_82572:
+    case e1000_82573:
+        hw->bus_type = e1000_bus_type_pci_express;
+        hw->bus_speed = e1000_bus_speed_2500;
+        hw->bus_width = e1000_bus_width_pciex_1;
+        break;
+    case e1000_82571:
+    case e1000_ich8lan:
+    case e1000_80003es2lan:
+        hw->bus_type = e1000_bus_type_pci_express;
+        hw->bus_speed = e1000_bus_speed_2500;
+        hw->bus_width = e1000_bus_width_pciex_4;
+        break;
+    default:
+        status = E1000_READ_REG(hw, STATUS);
+        hw->bus_type = (status & E1000_STATUS_PCIX_MODE) ?
+                       e1000_bus_type_pcix : e1000_bus_type_pci;
+
+        if (hw->device_id == E1000_DEV_ID_82546EB_QUAD_COPPER) {
+            hw->bus_speed = (hw->bus_type == e1000_bus_type_pci) ?
+                            e1000_bus_speed_66 : e1000_bus_speed_120;
+        } else if (hw->bus_type == e1000_bus_type_pci) {
+            hw->bus_speed = (status & E1000_STATUS_PCI66) ?
+                            e1000_bus_speed_66 : e1000_bus_speed_33;
+        } else {
+            switch (status & E1000_STATUS_PCIX_SPEED) {
+            case E1000_STATUS_PCIX_SPEED_66:
+                hw->bus_speed = e1000_bus_speed_66;
+                break;
+            case E1000_STATUS_PCIX_SPEED_100:
+                hw->bus_speed = e1000_bus_speed_100;
+                break;
+            case E1000_STATUS_PCIX_SPEED_133:
+                hw->bus_speed = e1000_bus_speed_133;
+                break;
+            default:
+                hw->bus_speed = e1000_bus_speed_reserved;
+                break;
+            }
+        }
+        hw->bus_width = (status & E1000_STATUS_BUS64) ?
+                        e1000_bus_width_64 : e1000_bus_width_32;
+        break;
+    }
+}
+/******************************************************************************
+ * Reads a value from one of the devices registers using port I/O (as opposed
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to read from
+ *****************************************************************************/
+uint32_t
+e1000_read_reg_io(struct e1000_hw *hw,
+                  uint32_t offset)
+{
+    unsigned long io_addr = hw->io_base;
+    unsigned long io_data = hw->io_base + 4;
+
+    e1000_io_write(hw, io_addr, offset);
+    return e1000_io_read(hw, io_data);
+}
+
+/******************************************************************************
+ * Writes a value to one of the devices registers using port I/O (as opposed to
+ * memory mapped I/O). Only 82544 and newer devices support port I/O.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset to write to
+ * value - value to write
+ *****************************************************************************/
+void
+e1000_write_reg_io(struct e1000_hw *hw,
+                   uint32_t offset,
+                   uint32_t value)
+{
+    unsigned long io_addr = hw->io_base;
+    unsigned long io_data = hw->io_base + 4;
+
+    e1000_io_write(hw, io_addr, offset);
+    e1000_io_write(hw, io_data, value);
+}
+
+
+/******************************************************************************
+ * Estimates the cable length.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * min_length - The estimated minimum length
+ * max_length - The estimated maximum length
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * This function always returns a ranged length (minimum & maximum).
+ * So for M88 phy's, this function interprets the one value returned from the
+ * register to the minimum and maximum range.
+ * For IGP phy's, the function calculates the range by the AGC registers.
+ *****************************************************************************/
+int32_t
+e1000_get_cable_length(struct e1000_hw *hw,
+                       uint16_t *min_length,
+                       uint16_t *max_length)
+{
+    int32_t ret_val;
+    uint16_t agc_value = 0;
+    uint16_t i, phy_data;
+    uint16_t cable_length;
+
+    DEBUGFUNC("e1000_get_cable_length");
+
+    *min_length = *max_length = 0;
+
+    /* Use old method for Phy older than IGP */
+    if (hw->phy_type == e1000_phy_m88) {
+
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        cable_length = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+                       M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+
+        /* Convert the enum value to ranged values */
+        switch (cable_length) {
+        case e1000_cable_length_50:
+            *min_length = 0;
+            *max_length = e1000_igp_cable_length_50;
+            break;
+        case e1000_cable_length_50_80:
+            *min_length = e1000_igp_cable_length_50;
+            *max_length = e1000_igp_cable_length_80;
+            break;
+        case e1000_cable_length_80_110:
+            *min_length = e1000_igp_cable_length_80;
+            *max_length = e1000_igp_cable_length_110;
+            break;
+        case e1000_cable_length_110_140:
+            *min_length = e1000_igp_cable_length_110;
+            *max_length = e1000_igp_cable_length_140;
+            break;
+        case e1000_cable_length_140:
+            *min_length = e1000_igp_cable_length_140;
+            *max_length = e1000_igp_cable_length_170;
+            break;
+        default:
+            return -E1000_ERR_PHY;
+            break;
+        }
+    } else if (hw->phy_type == e1000_phy_gg82563) {
+        ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        cable_length = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+        switch (cable_length) {
+        case e1000_gg_cable_length_60:
+            *min_length = 0;
+            *max_length = e1000_igp_cable_length_60;
+            break;
+        case e1000_gg_cable_length_60_115:
+            *min_length = e1000_igp_cable_length_60;
+            *max_length = e1000_igp_cable_length_115;
+            break;
+        case e1000_gg_cable_length_115_150:
+            *min_length = e1000_igp_cable_length_115;
+            *max_length = e1000_igp_cable_length_150;
+            break;
+        case e1000_gg_cable_length_150:
+            *min_length = e1000_igp_cable_length_150;
+            *max_length = e1000_igp_cable_length_180;
+            break;
+        default:
+            return -E1000_ERR_PHY;
+            break;
+        }
+    } else if (hw->phy_type == e1000_phy_igp) { /* For IGP PHY */
+        uint16_t cur_agc_value;
+        uint16_t min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+        uint16_t agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+                                                         {IGP01E1000_PHY_AGC_A,
+                                                          IGP01E1000_PHY_AGC_B,
+                                                          IGP01E1000_PHY_AGC_C,
+                                                          IGP01E1000_PHY_AGC_D};
+        /* Read the AGC registers for all channels */
+        for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+
+            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            cur_agc_value = phy_data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+            /* Value bound check. */
+            if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+                (cur_agc_value == 0))
+                return -E1000_ERR_PHY;
+
+            agc_value += cur_agc_value;
+
+            /* Update minimal AGC value. */
+            if (min_agc_value > cur_agc_value)
+                min_agc_value = cur_agc_value;
+        }
+
+        /* Remove the minimal AGC result for length < 50m */
+        if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * e1000_igp_cable_length_50) {
+            agc_value -= min_agc_value;
+
+            /* Get the average length of the remaining 3 channels */
+            agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+        } else {
+            /* Get the average length of all the 4 channels. */
+            agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+        }
+
+        /* Set the range of the calculated length. */
+        *min_length = ((e1000_igp_cable_length_table[agc_value] -
+                       IGP01E1000_AGC_RANGE) > 0) ?
+                       (e1000_igp_cable_length_table[agc_value] -
+                       IGP01E1000_AGC_RANGE) : 0;
+        *max_length = e1000_igp_cable_length_table[agc_value] +
+                      IGP01E1000_AGC_RANGE;
+    } else if (hw->phy_type == e1000_phy_igp_2 ||
+               hw->phy_type == e1000_phy_igp_3) {
+        uint16_t cur_agc_index, max_agc_index = 0;
+        uint16_t min_agc_index = IGP02E1000_AGC_LENGTH_TABLE_SIZE - 1;
+        uint16_t agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+                                                         {IGP02E1000_PHY_AGC_A,
+                                                          IGP02E1000_PHY_AGC_B,
+                                                          IGP02E1000_PHY_AGC_C,
+                                                          IGP02E1000_PHY_AGC_D};
+        /* Read the AGC registers for all channels */
+        for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+            ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* Getting bits 15:9, which represent the combination of course and
+             * fine gain values.  The result is a number that can be put into
+             * the lookup table to obtain the approximate cable length. */
+            cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+                            IGP02E1000_AGC_LENGTH_MASK;
+
+            /* Array index bound check. */
+            if ((cur_agc_index >= IGP02E1000_AGC_LENGTH_TABLE_SIZE) ||
+                (cur_agc_index == 0))
+                return -E1000_ERR_PHY;
+
+            /* Remove min & max AGC values from calculation. */
+            if (e1000_igp_2_cable_length_table[min_agc_index] >
+                e1000_igp_2_cable_length_table[cur_agc_index])
+                min_agc_index = cur_agc_index;
+            if (e1000_igp_2_cable_length_table[max_agc_index] <
+                e1000_igp_2_cable_length_table[cur_agc_index])
+                max_agc_index = cur_agc_index;
+
+            agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+        }
+
+        agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+                      e1000_igp_2_cable_length_table[max_agc_index]);
+        agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+        /* Calculate cable length with the error range of +/- 10 meters. */
+        *min_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+                       (agc_value - IGP02E1000_AGC_RANGE) : 0;
+        *max_length = agc_value + IGP02E1000_AGC_RANGE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check the cable polarity
+ *
+ * hw - Struct containing variables accessed by shared code
+ * polarity - output parameter : 0 - Polarity is not reversed
+ *                               1 - Polarity is reversed.
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function simply reads the polarity bit in the
+ * Phy Status register.  For IGP phy's, this bit is valid only if link speed is
+ * 10 Mbps.  If the link speed is 100 Mbps there is no polarity so this bit will
+ * return 0.  If the link speed is 1000 Mbps the polarity status is in the
+ * IGP01E1000_PHY_PCS_INIT_REG.
+ *****************************************************************************/
+int32_t
+e1000_check_polarity(struct e1000_hw *hw,
+                     uint16_t *polarity)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_check_polarity");
+
+    if ((hw->phy_type == e1000_phy_m88) ||
+        (hw->phy_type == e1000_phy_gg82563)) {
+        /* return the Polarity bit in the Status register. */
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        *polarity = (phy_data & M88E1000_PSSR_REV_POLARITY) >>
+                    M88E1000_PSSR_REV_POLARITY_SHIFT;
+    } else if (hw->phy_type == e1000_phy_igp ||
+              hw->phy_type == e1000_phy_igp_3 ||
+              hw->phy_type == e1000_phy_igp_2) {
+        /* Read the Status register to check the speed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        /* If speed is 1000 Mbps, must read the IGP01E1000_PHY_PCS_INIT_REG to
+         * find the polarity status */
+        if ((phy_data & IGP01E1000_PSSR_SPEED_MASK) ==
+           IGP01E1000_PSSR_SPEED_1000MBPS) {
+
+            /* Read the GIG initialization PCS register (0x00B4) */
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            /* Check the polarity bits */
+            *polarity = (phy_data & IGP01E1000_PHY_POLARITY_MASK) ? 1 : 0;
+        } else {
+            /* For 10 Mbps, read the polarity bit in the status register. (for
+             * 100 Mbps this bit is always 0) */
+            *polarity = phy_data & IGP01E1000_PSSR_POLARITY_REVERSED;
+        }
+    } else if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_EXTENDED_STATUS_CONTROL,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+        *polarity = (phy_data & IFE_PESC_POLARITY_REVERSED) >>
+                           IFE_PESC_POLARITY_REVERSED_SHIFT;
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Check if Downshift occured
+ *
+ * hw - Struct containing variables accessed by shared code
+ * downshift - output parameter : 0 - No Downshift ocured.
+ *                                1 - Downshift ocured.
+ *
+ * returns: - E1000_ERR_XXX
+ *            E1000_SUCCESS
+ *
+ * For phy's older then IGP, this function reads the Downshift bit in the Phy
+ * Specific Status register.  For IGP phy's, it reads the Downgrade bit in the
+ * Link Health register.  In IGP this bit is latched high, so the driver must
+ * read it immediately after link is established.
+ *****************************************************************************/
+int32_t
+e1000_check_downshift(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_check_downshift");
+
+    if (hw->phy_type == e1000_phy_igp ||
+        hw->phy_type == e1000_phy_igp_3 ||
+        hw->phy_type == e1000_phy_igp_2) {
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_LINK_HEALTH,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        hw->speed_downgraded = (phy_data & IGP01E1000_PLHR_SS_DOWNGRADE) ? 1 : 0;
+    } else if ((hw->phy_type == e1000_phy_m88) ||
+               (hw->phy_type == e1000_phy_gg82563)) {
+        ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS,
+                                     &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        hw->speed_downgraded = (phy_data & M88E1000_PSSR_DOWNSHIFT) >>
+                               M88E1000_PSSR_DOWNSHIFT_SHIFT;
+    } else if (hw->phy_type == e1000_phy_ife) {
+        /* e1000_phy_ife supports 10/100 speed only */
+        hw->speed_downgraded = FALSE;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * 82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ * gigabit link is achieved to improve link quality.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+int32_t
+e1000_config_dsp_after_link_change(struct e1000_hw *hw,
+                                   boolean_t link_up)
+{
+    int32_t ret_val;
+    uint16_t phy_data, phy_saved_data, speed, duplex, i;
+    uint16_t dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+                                        {IGP01E1000_PHY_AGC_PARAM_A,
+                                        IGP01E1000_PHY_AGC_PARAM_B,
+                                        IGP01E1000_PHY_AGC_PARAM_C,
+                                        IGP01E1000_PHY_AGC_PARAM_D};
+    uint16_t min_length, max_length;
+
+    DEBUGFUNC("e1000_config_dsp_after_link_change");
+
+    if (hw->phy_type != e1000_phy_igp)
+        return E1000_SUCCESS;
+
+    if (link_up) {
+        ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+        if (ret_val) {
+            DEBUGOUT("Error getting link speed and duplex\n");
+            return ret_val;
+        }
+
+        if (speed == SPEED_1000) {
+
+            ret_val = e1000_get_cable_length(hw, &min_length, &max_length);
+            if (ret_val)
+                return ret_val;
+
+            if ((hw->dsp_config_state == e1000_dsp_config_enabled) &&
+                min_length >= e1000_igp_cable_length_50) {
+
+                for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                    ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i],
+                                                 &phy_data);
+                    if (ret_val)
+                        return ret_val;
+
+                    phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+                    ret_val = e1000_write_phy_reg(hw, dsp_reg_array[i],
+                                                  phy_data);
+                    if (ret_val)
+                        return ret_val;
+                }
+                hw->dsp_config_state = e1000_dsp_config_activated;
+            }
+
+            if ((hw->ffe_config_state == e1000_ffe_config_enabled) &&
+               (min_length < e1000_igp_cable_length_50)) {
+
+                uint16_t ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+                uint32_t idle_errs = 0;
+
+                /* clear previous idle error counts */
+                ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+                                             &phy_data);
+                if (ret_val)
+                    return ret_val;
+
+                for (i = 0; i < ffe_idle_err_timeout; i++) {
+                    usec_delay(1000);
+                    ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+                                                 &phy_data);
+                    if (ret_val)
+                        return ret_val;
+
+                    idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+                    if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+                        hw->ffe_config_state = e1000_ffe_config_active;
+
+                        ret_val = e1000_write_phy_reg(hw,
+                                    IGP01E1000_PHY_DSP_FFE,
+                                    IGP01E1000_PHY_DSP_FFE_CM_CP);
+                        if (ret_val)
+                            return ret_val;
+                        break;
+                    }
+
+                    if (idle_errs)
+                        ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+                }
+            }
+        }
+    } else {
+        if (hw->dsp_config_state == e1000_dsp_config_activated) {
+            /* Save off the current value of register 0x2F5B to be restored at
+             * the end of the routines. */
+            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            /* Disable the PHY transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+            if (ret_val)
+                return ret_val;
+
+            msec_delay_irq(20);
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_FORCE_GIGA);
+            if (ret_val)
+                return ret_val;
+            for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+                ret_val = e1000_read_phy_reg(hw, dsp_reg_array[i], &phy_data);
+                if (ret_val)
+                    return ret_val;
+
+                phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+                phy_data |=  IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+                ret_val = e1000_write_phy_reg(hw,dsp_reg_array[i], phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_RESTART_AUTONEG);
+            if (ret_val)
+                return ret_val;
+
+            msec_delay_irq(20);
+
+            /* Now enable the transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            hw->dsp_config_state = e1000_dsp_config_enabled;
+        }
+
+        if (hw->ffe_config_state == e1000_ffe_config_active) {
+            /* Save off the current value of register 0x2F5B to be restored at
+             * the end of the routines. */
+            ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            /* Disable the PHY transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+            if (ret_val)
+                return ret_val;
+
+            msec_delay_irq(20);
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_FORCE_GIGA);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_DSP_FFE,
+                                          IGP01E1000_PHY_DSP_FFE_DEFAULT);
+            if (ret_val)
+                return ret_val;
+
+            ret_val = e1000_write_phy_reg(hw, 0x0000,
+                                          IGP01E1000_IEEE_RESTART_AUTONEG);
+            if (ret_val)
+                return ret_val;
+
+            msec_delay_irq(20);
+
+            /* Now enable the transmitter */
+            ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+            if (ret_val)
+                return ret_val;
+
+            hw->ffe_config_state = e1000_ffe_config_enabled;
+        }
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * Set PHY to class A mode
+ * Assumes the following operations will follow to enable the new class mode.
+ *  1. Do a PHY soft reset
+ *  2. Restart auto-negotiation or force link.
+ *
+ * hw - Struct containing variables accessed by shared code
+ ****************************************************************************/
+static int32_t
+e1000_set_phy_mode(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t eeprom_data;
+
+    DEBUGFUNC("e1000_set_phy_mode");
+
+    if ((hw->mac_type == e1000_82545_rev_3) &&
+        (hw->media_type == e1000_media_type_copper)) {
+        ret_val = e1000_read_eeprom(hw, EEPROM_PHY_CLASS_WORD, 1, &eeprom_data);
+        if (ret_val) {
+            return ret_val;
+        }
+
+        if ((eeprom_data != EEPROM_RESERVED_WORD) &&
+            (eeprom_data & EEPROM_PHY_CLASS_A)) {
+            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x000B);
+            if (ret_val)
+                return ret_val;
+            ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x8104);
+            if (ret_val)
+                return ret_val;
+
+            hw->phy_reset_disable = FALSE;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu state according to the active flag.  When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+int32_t
+e1000_set_d3_lplu_state(struct e1000_hw *hw,
+                        boolean_t active)
+{
+    uint32_t phy_ctrl = 0;
+    int32_t ret_val;
+    uint16_t phy_data;
+    DEBUGFUNC("e1000_set_d3_lplu_state");
+
+    if (hw->phy_type != e1000_phy_igp && hw->phy_type != e1000_phy_igp_2
+        && hw->phy_type != e1000_phy_igp_3)
+        return E1000_SUCCESS;
+
+    /* During driver activity LPLU should not be used or it will attain link
+     * from the lowest speeds starting from 10Mbps. The capability is used for
+     * Dx transitions and states */
+    if (hw->mac_type == e1000_82541_rev_2 || hw->mac_type == e1000_82547_rev_2) {
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &phy_data);
+        if (ret_val)
+            return ret_val;
+    } else if (hw->mac_type == e1000_ich8lan) {
+        /* MAC writes into PHY register based on the state transition
+         * and start auto-negotiation. SW driver can overwrite the settings
+         * in CSR PHY power control E1000_PHY_CTRL register. */
+        phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
+    } else {
+        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (!active) {
+        if (hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            phy_data &= ~IGP01E1000_GMII_FLEX_SPD;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            if (hw->mac_type == e1000_ich8lan) {
+                phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+                E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+            } else {
+                phy_data &= ~IGP02E1000_PM_D3_LPLU;
+                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                              phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+        }
+
+        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
+         * Dx states where the power conservation is most important.  During
+         * driver activity we should enable SmartSpeed, so performance is
+         * maintained. */
+        if (hw->smart_speed == e1000_smart_speed_on) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        } else if (hw->smart_speed == e1000_smart_speed_off) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+	    if (ret_val)
+                return ret_val;
+
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+    } else if ((hw->autoneg_advertised == AUTONEG_ADVERTISE_SPEED_DEFAULT) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_ALL ) ||
+               (hw->autoneg_advertised == AUTONEG_ADVERTISE_10_100_ALL)) {
+
+        if (hw->mac_type == e1000_82541_rev_2 ||
+            hw->mac_type == e1000_82547_rev_2) {
+            phy_data |= IGP01E1000_GMII_FLEX_SPD;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, phy_data);
+            if (ret_val)
+                return ret_val;
+        } else {
+            if (hw->mac_type == e1000_ich8lan) {
+                phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+                E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+            } else {
+                phy_data |= IGP02E1000_PM_D3_LPLU;
+                ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+                                              phy_data);
+                if (ret_val)
+                    return ret_val;
+            }
+        }
+
+        /* When LPLU is enabled we should disable SmartSpeed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ *
+ * This function sets the lplu d0 state according to the active flag.  When
+ * activating lplu this function also disables smart speed and vise versa.
+ * lplu will not be activated unless the device autonegotiation advertisment
+ * meets standards of either 10 or 10/100 or 10/100/1000 at all duplexes.
+ * hw: Struct containing variables accessed by shared code
+ * active - true to enable lplu false to disable lplu.
+ *
+ * returns: - E1000_ERR_PHY if fail to read/write the PHY
+ *            E1000_SUCCESS at any other case.
+ *
+ ****************************************************************************/
+
+int32_t
+e1000_set_d0_lplu_state(struct e1000_hw *hw,
+                        boolean_t active)
+{
+    uint32_t phy_ctrl = 0;
+    int32_t ret_val;
+    uint16_t phy_data;
+    DEBUGFUNC("e1000_set_d0_lplu_state");
+
+    if (hw->mac_type <= e1000_82547_rev_2)
+        return E1000_SUCCESS;
+
+    if (hw->mac_type == e1000_ich8lan) {
+        phy_ctrl = E1000_READ_REG(hw, PHY_CTRL);
+    } else {
+        ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+        if (ret_val)
+            return ret_val;
+    }
+
+    if (!active) {
+        if (hw->mac_type == e1000_ich8lan) {
+            phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+            E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+        } else {
+            phy_data &= ~IGP02E1000_PM_D0_LPLU;
+            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* LPLU and SmartSpeed are mutually exclusive.  LPLU is used during
+         * Dx states where the power conservation is most important.  During
+         * driver activity we should enable SmartSpeed, so performance is
+         * maintained. */
+        if (hw->smart_speed == e1000_smart_speed_on) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+            if (ret_val)
+                return ret_val;
+
+            phy_data |= IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        } else if (hw->smart_speed == e1000_smart_speed_off) {
+            ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                         &phy_data);
+	    if (ret_val)
+                return ret_val;
+
+            phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+            ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+                                          phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+
+    } else {
+
+        if (hw->mac_type == e1000_ich8lan) {
+            phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+            E1000_WRITE_REG(hw, PHY_CTRL, phy_ctrl);
+        } else {
+            phy_data |= IGP02E1000_PM_D0_LPLU;
+            ret_val = e1000_write_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+            if (ret_val)
+                return ret_val;
+        }
+
+        /* When LPLU is enabled we should disable SmartSpeed */
+        ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+        ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CONFIG, phy_data);
+        if (ret_val)
+            return ret_val;
+
+    }
+    return E1000_SUCCESS;
+}
+
+/******************************************************************************
+ * Change VCO speed register to improve Bit Error Rate performance of SERDES.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *****************************************************************************/
+static int32_t
+e1000_set_vco_speed(struct e1000_hw *hw)
+{
+    int32_t  ret_val;
+    uint16_t default_page = 0;
+    uint16_t phy_data;
+
+    DEBUGFUNC("e1000_set_vco_speed");
+
+    switch (hw->mac_type) {
+    case e1000_82545_rev_3:
+    case e1000_82546_rev_3:
+       break;
+    default:
+        return E1000_SUCCESS;
+    }
+
+    /* Set PHY register 30, page 5, bit 8 to 0 */
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, &default_page);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    /* Set PHY register 30, page 4, bit 11 to 1 */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, default_page);
+    if (ret_val)
+        return ret_val;
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function reads the cookie from ARC ram.
+ *
+ * returns: - E1000_SUCCESS .
+ ****************************************************************************/
+int32_t
+e1000_host_if_read_cookie(struct e1000_hw * hw, uint8_t *buffer)
+{
+    uint8_t i;
+    uint32_t offset = E1000_MNG_DHCP_COOKIE_OFFSET;
+    uint8_t length = E1000_MNG_DHCP_COOKIE_LENGTH;
+
+    length = (length >> 2);
+    offset = (offset >> 2);
+
+    for (i = 0; i < length; i++) {
+        *((uint32_t *) buffer + i) =
+            E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset + i);
+    }
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks whether the HOST IF is enabled for command operaton
+ * and also checks whether the previous command is completed.
+ * It busy waits in case of previous command is not completed.
+ *
+ * returns: - E1000_ERR_HOST_INTERFACE_COMMAND in case if is not ready or
+ *            timeout
+ *          - E1000_SUCCESS for success.
+ ****************************************************************************/
+int32_t
+e1000_mng_enable_host_if(struct e1000_hw * hw)
+{
+    uint32_t hicr;
+    uint8_t i;
+
+    /* Check that the host interface is enabled. */
+    hicr = E1000_READ_REG(hw, HICR);
+    if ((hicr & E1000_HICR_EN) == 0) {
+        DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+        return -E1000_ERR_HOST_INTERFACE_COMMAND;
+    }
+    /* check the previous command is completed */
+    for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+        hicr = E1000_READ_REG(hw, HICR);
+        if (!(hicr & E1000_HICR_C))
+            break;
+        msec_delay_irq(1);
+    }
+
+    if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+        DEBUGOUT("Previous command timeout failed .\n");
+        return -E1000_ERR_HOST_INTERFACE_COMMAND;
+    }
+    return E1000_SUCCESS;
+}
+
+/*****************************************************************************
+ * This function writes the buffer content at the offset given on the host if.
+ * It also does alignment considerations to do the writes in most efficient way.
+ * Also fills up the sum of the buffer in *buffer parameter.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+int32_t
+e1000_mng_host_if_write(struct e1000_hw * hw, uint8_t *buffer,
+                        uint16_t length, uint16_t offset, uint8_t *sum)
+{
+    uint8_t *tmp;
+    uint8_t *bufptr = buffer;
+    uint32_t data = 0;
+    uint16_t remaining, i, j, prev_bytes;
+
+    /* sum = only sum of the data and it is not checksum */
+
+    if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+        return -E1000_ERR_PARAM;
+    }
+
+    tmp = (uint8_t *)&data;
+    prev_bytes = offset & 0x3;
+    offset &= 0xFFFC;
+    offset >>= 2;
+
+    if (prev_bytes) {
+        data = E1000_READ_REG_ARRAY_DWORD(hw, HOST_IF, offset);
+        for (j = prev_bytes; j < sizeof(uint32_t); j++) {
+            *(tmp + j) = *bufptr++;
+            *sum += *(tmp + j);
+        }
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset, data);
+        length -= j - prev_bytes;
+        offset++;
+    }
+
+    remaining = length & 0x3;
+    length -= remaining;
+
+    /* Calculate length in DWORDs */
+    length >>= 2;
+
+    /* The device driver writes the relevant command block into the
+     * ram area. */
+    for (i = 0; i < length; i++) {
+        for (j = 0; j < sizeof(uint32_t); j++) {
+            *(tmp + j) = *bufptr++;
+            *sum += *(tmp + j);
+        }
+
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+    }
+    if (remaining) {
+        for (j = 0; j < sizeof(uint32_t); j++) {
+            if (j < remaining)
+                *(tmp + j) = *bufptr++;
+            else
+                *(tmp + j) = 0;
+
+            *sum += *(tmp + j);
+        }
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, offset + i, data);
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function writes the command header after does the checksum calculation.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+int32_t
+e1000_mng_write_cmd_header(struct e1000_hw * hw,
+                           struct e1000_host_mng_command_header * hdr)
+{
+    uint16_t i;
+    uint8_t sum;
+    uint8_t *buffer;
+
+    /* Write the whole command header structure which includes sum of
+     * the buffer */
+
+    uint16_t length = sizeof(struct e1000_host_mng_command_header);
+
+    sum = hdr->checksum;
+    hdr->checksum = 0;
+
+    buffer = (uint8_t *) hdr;
+    i = length;
+    while (i--)
+        sum += buffer[i];
+
+    hdr->checksum = 0 - sum;
+
+    length >>= 2;
+    /* The device driver writes the relevant command block into the ram area. */
+    for (i = 0; i < length; i++) {
+        E1000_WRITE_REG_ARRAY_DWORD(hw, HOST_IF, i, *((uint32_t *) hdr + i));
+        E1000_WRITE_FLUSH(hw);
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function indicates to ARC that a new command is pending which completes
+ * one write operation by the driver.
+ *
+ * returns  - E1000_SUCCESS for success.
+ ****************************************************************************/
+int32_t
+e1000_mng_write_commit(struct e1000_hw * hw)
+{
+    uint32_t hicr;
+
+    hicr = E1000_READ_REG(hw, HICR);
+    /* Setting this bit tells the ARC that a new command is pending. */
+    E1000_WRITE_REG(hw, HICR, hicr | E1000_HICR_C);
+
+    return E1000_SUCCESS;
+}
+
+
+/*****************************************************************************
+ * This function checks the mode of the firmware.
+ *
+ * returns  - TRUE when the mode is IAMT or FALSE.
+ ****************************************************************************/
+boolean_t
+e1000_check_mng_mode(struct e1000_hw *hw)
+{
+    uint32_t fwsm;
+
+    fwsm = E1000_READ_REG(hw, FWSM);
+
+    if (hw->mac_type == e1000_ich8lan) {
+        if ((fwsm & E1000_FWSM_MODE_MASK) ==
+            (E1000_MNG_ICH_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+            return TRUE;
+    } else if ((fwsm & E1000_FWSM_MODE_MASK) ==
+               (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT))
+        return TRUE;
+
+    return FALSE;
+}
+
+
+/*****************************************************************************
+ * This function writes the dhcp info .
+ ****************************************************************************/
+int32_t
+e1000_mng_write_dhcp_info(struct e1000_hw * hw, uint8_t *buffer,
+			  uint16_t length)
+{
+    int32_t ret_val;
+    struct e1000_host_mng_command_header hdr;
+
+    hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+    hdr.command_length = length;
+    hdr.reserved1 = 0;
+    hdr.reserved2 = 0;
+    hdr.checksum = 0;
+
+    ret_val = e1000_mng_enable_host_if(hw);
+    if (ret_val == E1000_SUCCESS) {
+        ret_val = e1000_mng_host_if_write(hw, buffer, length, sizeof(hdr),
+                                          &(hdr.checksum));
+        if (ret_val == E1000_SUCCESS) {
+            ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+            if (ret_val == E1000_SUCCESS)
+                ret_val = e1000_mng_write_commit(hw);
+        }
+    }
+    return ret_val;
+}
+
+
+/*****************************************************************************
+ * This function calculates the checksum.
+ *
+ * returns  - checksum of buffer contents.
+ ****************************************************************************/
+uint8_t
+e1000_calculate_mng_checksum(char *buffer, uint32_t length)
+{
+    uint8_t sum = 0;
+    uint32_t i;
+
+    if (!buffer)
+        return 0;
+
+    for (i=0; i < length; i++)
+        sum += buffer[i];
+
+    return (uint8_t) (0 - sum);
+}
+
+/*****************************************************************************
+ * This function checks whether tx pkt filtering needs to be enabled or not.
+ *
+ * returns  - TRUE for packet filtering or FALSE.
+ ****************************************************************************/
+boolean_t
+e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+    /* called in init as well as watchdog timer functions */
+
+    int32_t ret_val, checksum;
+    boolean_t tx_filter = FALSE;
+    struct e1000_host_mng_dhcp_cookie *hdr = &(hw->mng_cookie);
+    uint8_t *buffer = (uint8_t *) &(hw->mng_cookie);
+
+    if (e1000_check_mng_mode(hw)) {
+        ret_val = e1000_mng_enable_host_if(hw);
+        if (ret_val == E1000_SUCCESS) {
+            ret_val = e1000_host_if_read_cookie(hw, buffer);
+            if (ret_val == E1000_SUCCESS) {
+                checksum = hdr->checksum;
+                hdr->checksum = 0;
+                if ((hdr->signature == E1000_IAMT_SIGNATURE) &&
+                    checksum == e1000_calculate_mng_checksum((char *)buffer,
+                                               E1000_MNG_DHCP_COOKIE_LENGTH)) {
+                    if (hdr->status &
+                        E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT)
+                        tx_filter = TRUE;
+                } else
+                    tx_filter = TRUE;
+            } else
+                tx_filter = TRUE;
+        }
+    }
+
+    hw->tx_pkt_filtering = tx_filter;
+    return tx_filter;
+}
+
+/******************************************************************************
+ * Verifies the hardware needs to allow ARPs to be processed by the host
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - TRUE/FALSE
+ *
+ *****************************************************************************/
+uint32_t
+e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+    uint32_t manc;
+    uint32_t fwsm, factps;
+
+    if (hw->asf_firmware_present) {
+        manc = E1000_READ_REG(hw, MANC);
+
+        if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+            !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+            return FALSE;
+        if (e1000_arc_subsystem_valid(hw) == TRUE) {
+            fwsm = E1000_READ_REG(hw, FWSM);
+            factps = E1000_READ_REG(hw, FACTPS);
+
+            if (((fwsm & E1000_FWSM_MODE_MASK) ==
+                (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT)) &&
+                (factps & E1000_FACTPS_MNGCG))
+                return TRUE;
+        } else
+            if ((manc & E1000_MANC_SMBUS_EN) && !(manc & E1000_MANC_ASF_EN))
+                return TRUE;
+    }
+    return FALSE;
+}
+
+static int32_t
+e1000_polarity_reversal_workaround(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t mii_status_reg;
+    uint16_t i;
+
+    /* Polarity reversal workaround for forced 10F/10H links. */
+
+    /* Disable the transmitter on the PHY */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+    if (ret_val)
+        return ret_val;
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    /* This loop will early-out if the NO link condition has been met. */
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Link Status bit
+         * to be clear.
+         */
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0) break;
+        msec_delay_irq(100);
+    }
+
+    /* Recommended delay time after link has been lost */
+    msec_delay_irq(1000);
+
+    /* Now we will re-enable th transmitter on the PHY */
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+    if (ret_val)
+        return ret_val;
+    msec_delay_irq(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+    if (ret_val)
+        return ret_val;
+    msec_delay_irq(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+    if (ret_val)
+        return ret_val;
+    msec_delay_irq(50);
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+    if (ret_val)
+        return ret_val;
+
+    /* This loop will early-out if the link condition has been met. */
+    for (i = PHY_FORCE_TIME; i > 0; i--) {
+        /* Read the MII Status Register and wait for Link Status bit
+         * to be set.
+         */
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+        if (ret_val)
+            return ret_val;
+
+        if (mii_status_reg & MII_SR_LINK_STATUS) break;
+        msec_delay_irq(100);
+    }
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Disables PCI-Express master access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - none.
+ *
+ ***************************************************************************/
+void
+e1000_set_pci_express_master_disable(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+
+    DEBUGFUNC("e1000_set_pci_express_master_disable");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return;
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+    ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+}
+
+/***************************************************************************
+ *
+ * Enables PCI-Express master access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - none.
+ *
+ ***************************************************************************/
+void
+e1000_enable_pciex_master(struct e1000_hw *hw)
+{
+    uint32_t ctrl;
+
+    DEBUGFUNC("e1000_enable_pciex_master");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return;
+
+    ctrl = E1000_READ_REG(hw, CTRL);
+    ctrl &= ~E1000_CTRL_GIO_MASTER_DISABLE;
+    E1000_WRITE_REG(hw, CTRL, ctrl);
+}
+
+/*******************************************************************************
+ *
+ * Disables PCI-Express master access and verifies there are no pending requests
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_MASTER_REQUESTS_PENDING if master disable bit hasn't
+ *            caused the master requests to be disabled.
+ *            E1000_SUCCESS master requests disabled.
+ *
+ ******************************************************************************/
+int32_t
+e1000_disable_pciex_master(struct e1000_hw *hw)
+{
+    int32_t timeout = MASTER_DISABLE_TIMEOUT;   /* 80ms */
+
+    DEBUGFUNC("e1000_disable_pciex_master");
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return E1000_SUCCESS;
+
+    e1000_set_pci_express_master_disable(hw);
+
+    while (timeout) {
+        if (!(E1000_READ_REG(hw, STATUS) & E1000_STATUS_GIO_MASTER_ENABLE))
+            break;
+        else
+            usec_delay(100);
+        timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Master requests are pending.\n");
+        return -E1000_ERR_MASTER_REQUESTS_PENDING;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/*******************************************************************************
+ *
+ * Check for EEPROM Auto Read bit done.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ *
+ ******************************************************************************/
+int32_t
+e1000_get_auto_rd_done(struct e1000_hw *hw)
+{
+    int32_t timeout = AUTO_READ_DONE_TIMEOUT;
+
+    DEBUGFUNC("e1000_get_auto_rd_done");
+
+    switch (hw->mac_type) {
+    default:
+        msec_delay(5);
+        break;
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+    case e1000_ich8lan:
+        while (timeout) {
+            if (E1000_READ_REG(hw, EECD) & E1000_EECD_AUTO_RD)
+                break;
+            else msec_delay(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("Auto read by HW from EEPROM has not completed.\n");
+            return -E1000_ERR_RESET;
+        }
+        break;
+    }
+
+    /* PHY configuration from NVM just starts after EECD_AUTO_RD sets to high.
+     * Need to wait for PHY configuration completion before accessing NVM
+     * and PHY. */
+    if (hw->mac_type == e1000_82573)
+        msec_delay(25);
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * Checks if the PHY configuration is done
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to reset MAC
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+int32_t
+e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+    int32_t timeout = PHY_CFG_TIMEOUT;
+    uint32_t cfg_mask = E1000_EEPROM_CFG_DONE;
+
+    DEBUGFUNC("e1000_get_phy_cfg_done");
+
+    switch (hw->mac_type) {
+    default:
+        msec_delay_irq(10);
+        break;
+    case e1000_80003es2lan:
+        /* Separate *_CFG_DONE_* bit for each port */
+        if (E1000_READ_REG(hw, STATUS) & E1000_STATUS_FUNC_1)
+            cfg_mask = E1000_EEPROM_CFG_DONE_PORT_1;
+        fallthrough;
+    case e1000_82571:
+    case e1000_82572:
+        while (timeout) {
+            if (E1000_READ_REG(hw, EEMNGCTL) & cfg_mask)
+                break;
+            else
+                msec_delay(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("MNG configuration cycle has not completed.\n");
+            return -E1000_ERR_RESET;
+        }
+        break;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Using the combination of SMBI and SWESMBI semaphore bits when resetting
+ * adapter or Eeprom access.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_EEPROM if fail to access EEPROM.
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+int32_t
+e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+    int32_t timeout;
+    uint32_t swsm;
+
+    DEBUGFUNC("e1000_get_hw_eeprom_semaphore");
+
+    if (!hw->eeprom_semaphore_present)
+        return E1000_SUCCESS;
+
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Get the SW semaphore. */
+        if (e1000_get_software_semaphore(hw) != E1000_SUCCESS)
+            return -E1000_ERR_EEPROM;
+    }
+
+    /* Get the FW semaphore. */
+    timeout = hw->eeprom.word_size + 1;
+    while (timeout) {
+        swsm = E1000_READ_REG(hw, SWSM);
+        swsm |= E1000_SWSM_SWESMBI;
+        E1000_WRITE_REG(hw, SWSM, swsm);
+        /* if we managed to set the bit we got the semaphore. */
+        swsm = E1000_READ_REG(hw, SWSM);
+        if (swsm & E1000_SWSM_SWESMBI)
+            break;
+
+        usec_delay(50);
+        timeout--;
+    }
+
+    if (!timeout) {
+        /* Release semaphores */
+        e1000_put_hw_eeprom_semaphore(hw);
+        DEBUGOUT("Driver can't access the Eeprom - SWESMBI bit is set.\n");
+        return -E1000_ERR_EEPROM;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ * This function clears HW semaphore bits.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - None.
+ *
+ ***************************************************************************/
+void
+e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw)
+{
+    uint32_t swsm;
+
+    DEBUGFUNC("e1000_put_hw_eeprom_semaphore");
+
+    if (!hw->eeprom_semaphore_present)
+        return;
+
+    swsm = E1000_READ_REG(hw, SWSM);
+    if (hw->mac_type == e1000_80003es2lan) {
+        /* Release both semaphores. */
+        swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+    } else
+        swsm &= ~(E1000_SWSM_SWESMBI);
+    E1000_WRITE_REG(hw, SWSM, swsm);
+}
+
+/***************************************************************************
+ *
+ * Obtaining software semaphore bit (SMBI) before resetting PHY.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_ERR_RESET if fail to obtain semaphore.
+ *            E1000_SUCCESS at any other case.
+ *
+ ***************************************************************************/
+int32_t
+e1000_get_software_semaphore(struct e1000_hw *hw)
+{
+    int32_t timeout = hw->eeprom.word_size + 1;
+    uint32_t swsm;
+
+    DEBUGFUNC("e1000_get_software_semaphore");
+
+    if (hw->mac_type != e1000_80003es2lan)
+        return E1000_SUCCESS;
+
+    while (timeout) {
+        swsm = E1000_READ_REG(hw, SWSM);
+        /* If SMBI bit cleared, it is now set and we hold the semaphore */
+        if (!(swsm & E1000_SWSM_SMBI))
+            break;
+        msec_delay_irq(1);
+        timeout--;
+    }
+
+    if (!timeout) {
+        DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+        return -E1000_ERR_RESET;
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release semaphore bit (SMBI).
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+void
+e1000_release_software_semaphore(struct e1000_hw *hw)
+{
+    uint32_t swsm;
+
+    DEBUGFUNC("e1000_release_software_semaphore");
+
+    if (hw->mac_type != e1000_80003es2lan)
+        return;
+
+    swsm = E1000_READ_REG(hw, SWSM);
+    /* Release the SW semaphores.*/
+    swsm &= ~E1000_SWSM_SMBI;
+    E1000_WRITE_REG(hw, SWSM, swsm);
+}
+
+/******************************************************************************
+ * Checks if PHY reset is blocked due to SOL/IDER session, for example.
+ * Returning E1000_BLK_PHY_RESET isn't necessarily an error.  But it's up to
+ * the caller to figure out how to deal with it.
+ *
+ * hw - Struct containing variables accessed by shared code
+ *
+ * returns: - E1000_BLK_PHY_RESET
+ *            E1000_SUCCESS
+ *
+ *****************************************************************************/
+int32_t
+e1000_check_phy_reset_block(struct e1000_hw *hw)
+{
+    uint32_t manc = 0;
+    uint32_t fwsm = 0;
+
+    if (hw->mac_type == e1000_ich8lan) {
+        fwsm = E1000_READ_REG(hw, FWSM);
+        return (fwsm & E1000_FWSM_RSPCIPHY) ? E1000_SUCCESS
+                                            : E1000_BLK_PHY_RESET;
+    }
+
+    if (hw->mac_type > e1000_82547_rev_2)
+        manc = E1000_READ_REG(hw, MANC);
+    return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+	    E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+uint8_t
+e1000_arc_subsystem_valid(struct e1000_hw *hw)
+{
+    uint32_t fwsm;
+
+    /* On 8257x silicon, registers in the range of 0x8800 - 0x8FFC
+     * may not be provided a DMA clock when no manageability features are
+     * enabled.  We do not want to perform any reads/writes to these registers
+     * if this is the case.  We read FWSM to determine the manageability mode.
+     */
+    switch (hw->mac_type) {
+    case e1000_82571:
+    case e1000_82572:
+    case e1000_82573:
+    case e1000_80003es2lan:
+        fwsm = E1000_READ_REG(hw, FWSM);
+        if ((fwsm & E1000_FWSM_MODE_MASK) != 0)
+            return TRUE;
+        break;
+    case e1000_ich8lan:
+        return TRUE;
+    default:
+        break;
+    }
+    return FALSE;
+}
+
+
+/******************************************************************************
+ * Configure PCI-Ex no-snoop
+ *
+ * hw - Struct containing variables accessed by shared code.
+ * no_snoop - Bitmap of no-snoop events.
+ *
+ * returns: E1000_SUCCESS
+ *
+ *****************************************************************************/
+int32_t
+e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop)
+{
+    uint32_t gcr_reg = 0;
+
+    DEBUGFUNC("e1000_set_pci_ex_no_snoop");
+
+    if (hw->bus_type == e1000_bus_type_unknown)
+        e1000_get_bus_info(hw);
+
+    if (hw->bus_type != e1000_bus_type_pci_express)
+        return E1000_SUCCESS;
+
+    if (no_snoop) {
+        gcr_reg = E1000_READ_REG(hw, GCR);
+        gcr_reg &= ~(PCI_EX_NO_SNOOP_ALL);
+        gcr_reg |= no_snoop;
+        E1000_WRITE_REG(hw, GCR, gcr_reg);
+    }
+    if (hw->mac_type == e1000_ich8lan) {
+        uint32_t ctrl_ext;
+
+        E1000_WRITE_REG(hw, GCR, PCI_EX_82566_SNOOP_ALL);
+
+        ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+        ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+        E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Get software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+int32_t
+e1000_get_software_flag(struct e1000_hw *hw)
+{
+    int32_t timeout = PHY_CFG_TIMEOUT;
+    uint32_t extcnf_ctrl;
+
+    DEBUGFUNC("e1000_get_software_flag");
+
+    if (hw->mac_type == e1000_ich8lan) {
+        while (timeout) {
+            extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+            extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+            E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+
+            extcnf_ctrl = E1000_READ_REG(hw, EXTCNF_CTRL);
+            if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+                break;
+            msec_delay_irq(1);
+            timeout--;
+        }
+
+        if (!timeout) {
+            DEBUGOUT("FW or HW locks the resource too long.\n");
+            return -E1000_ERR_CONFIG;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+/***************************************************************************
+ *
+ * Release software semaphore FLAG bit (SWFLAG).
+ * SWFLAG is used to synchronize the access to all shared resource between
+ * SW, FW and HW.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+void
+e1000_release_software_flag(struct e1000_hw *hw)
+{
+    uint32_t extcnf_ctrl;
+
+    DEBUGFUNC("e1000_release_software_flag");
+
+    if (hw->mac_type == e1000_ich8lan) {
+        extcnf_ctrl= E1000_READ_REG(hw, EXTCNF_CTRL);
+        extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+        E1000_WRITE_REG(hw, EXTCNF_CTRL, extcnf_ctrl);
+    }
+
+    return;
+}
+
+/***************************************************************************
+ *
+ * Disable dynamic power down mode in ife PHY.
+ * It can be used to workaround band-gap problem.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+int32_t
+e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw)
+{
+    uint16_t phy_data;
+    int32_t ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_ife_disable_dynamic_power_down");
+
+    if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data |=  IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
+        ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
+    }
+
+    return ret_val;
+}
+
+/***************************************************************************
+ *
+ * Enable dynamic power down mode in ife PHY.
+ * It can be used to workaround band-gap problem.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ ***************************************************************************/
+int32_t
+e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw)
+{
+    uint16_t phy_data;
+    int32_t ret_val = E1000_SUCCESS;
+
+    DEBUGFUNC("e1000_ife_enable_dynamic_power_down");
+
+    if (hw->phy_type == e1000_phy_ife) {
+        ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &phy_data);
+        if (ret_val)
+            return ret_val;
+
+        phy_data &=  ~IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN;
+        ret_val = e1000_write_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, phy_data);
+    }
+
+    return ret_val;
+}
+
+/******************************************************************************
+ * Reads a 16 bit word or words from the EEPROM using the ICH8's flash access
+ * register.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to read
+ * data - word read from the EEPROM
+ * words - number of words to read
+ *****************************************************************************/
+int32_t
+e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
+                       uint16_t *data)
+{
+    int32_t  error = E1000_SUCCESS;
+    uint32_t flash_bank = 0;
+    uint32_t act_offset = 0;
+    uint32_t bank_offset = 0;
+    uint16_t word = 0;
+    uint16_t i = 0;
+
+    /* We need to know which is the valid flash bank.  In the event
+     * that we didn't allocate eeprom_shadow_ram, we may not be
+     * managing flash_bank.  So it cannot be trusted and needs
+     * to be updated with each read.
+     */
+    /* Value of bit 22 corresponds to the flash bank we're on. */
+    flash_bank = (E1000_READ_REG(hw, EECD) & E1000_EECD_SEC1VAL) ? 1 : 0;
+
+    /* Adjust offset appropriately if we're on bank 1 - adjust for word size */
+    bank_offset = flash_bank * (hw->flash_bank_size * 2);
+
+    error = e1000_get_software_flag(hw);
+    if (error != E1000_SUCCESS)
+        return error;
+
+    for (i = 0; i < words; i++) {
+        if (hw->eeprom_shadow_ram != NULL &&
+            hw->eeprom_shadow_ram[offset+i].modified == TRUE) {
+            data[i] = hw->eeprom_shadow_ram[offset+i].eeprom_word;
+        } else {
+            /* The NVM part needs a byte offset, hence * 2 */
+            act_offset = bank_offset + ((offset + i) * 2);
+            error = e1000_read_ich8_word(hw, act_offset, &word);
+            if (error != E1000_SUCCESS)
+                break;
+            data[i] = word;
+        }
+    }
+
+    e1000_release_software_flag(hw);
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a 16 bit word or words to the EEPROM using the ICH8's flash access
+ * register.  Actually, writes are written to the shadow ram cache in the hw
+ * structure hw->e1000_shadow_ram.  e1000_commit_shadow_ram flushes this to
+ * the NVM, which occurs when the NVM checksum is updated.
+ *
+ * hw - Struct containing variables accessed by shared code
+ * offset - offset of word in the EEPROM to write
+ * words - number of words to write
+ * data - words to write to the EEPROM
+ *****************************************************************************/
+int32_t
+e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset, uint16_t words,
+                        uint16_t *data)
+{
+    uint32_t i = 0;
+    int32_t error = E1000_SUCCESS;
+
+    error = e1000_get_software_flag(hw);
+    if (error != E1000_SUCCESS)
+        return error;
+
+    /* A driver can write to the NVM only if it has eeprom_shadow_ram
+     * allocated.  Subsequent reads to the modified words are read from
+     * this cached structure as well.  Writes will only go into this
+     * cached structure unless it's followed by a call to
+     * e1000_update_eeprom_checksum() where it will commit the changes
+     * and clear the "modified" field.
+     */
+    if (hw->eeprom_shadow_ram != NULL) {
+        for (i = 0; i < words; i++) {
+            if ((offset + i) < E1000_SHADOW_RAM_WORDS) {
+                hw->eeprom_shadow_ram[offset+i].modified = TRUE;
+                hw->eeprom_shadow_ram[offset+i].eeprom_word = data[i];
+            } else {
+                error = -E1000_ERR_EEPROM;
+                break;
+            }
+        }
+    } else {
+        /* Drivers have the option to not allocate eeprom_shadow_ram as long
+         * as they don't perform any NVM writes.  An attempt in doing so
+         * will result in this error.
+         */
+        error = -E1000_ERR_EEPROM;
+    }
+
+    e1000_release_software_flag(hw);
+
+    return error;
+}
+
+/******************************************************************************
+ * This function does initial flash setup so that a new read/write/erase cycle
+ * can be started.
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+int32_t
+e1000_ich8_cycle_init(struct e1000_hw *hw)
+{
+    union ich8_hws_flash_status hsfsts;
+    int32_t error = E1000_ERR_EEPROM;
+    int32_t i     = 0;
+
+    DEBUGFUNC("e1000_ich8_cycle_init");
+
+    hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+
+    /* May be check the Flash Des Valid bit in Hw status */
+    if (hsfsts.hsf_status.fldesvalid == 0) {
+        DEBUGOUT("Flash descriptor invalid.  SW Sequencing must be used.");
+        return error;
+    }
+
+    /* Clear FCERR in Hw status by writing 1 */
+    /* Clear DAEL in Hw status by writing a 1 */
+    hsfsts.hsf_status.flcerr = 1;
+    hsfsts.hsf_status.dael = 1;
+
+    E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+
+    /* Either we should have a hardware SPI cycle in progress bit to check
+     * against, in order to start a new cycle or FDONE bit should be changed
+     * in the hardware so that it is 1 after harware reset, which can then be
+     * used as an indication whether a cycle is in progress or has been
+     * completed .. we should also have some software semaphore mechanism to
+     * guard FDONE or the cycle in progress bit so that two threads access to
+     * those bits can be sequentiallized or a way so that 2 threads dont
+     * start the cycle at the same time */
+
+    if (hsfsts.hsf_status.flcinprog == 0) {
+        /* There is no cycle running at present, so we can start a cycle */
+        /* Begin by setting Flash Cycle Done. */
+        hsfsts.hsf_status.flcdone = 1;
+        E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+        error = E1000_SUCCESS;
+    } else {
+        /* otherwise poll for sometime so the current cycle has a chance
+         * to end before giving up. */
+        for (i = 0; i < ICH8_FLASH_COMMAND_TIMEOUT; i++) {
+            hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcinprog == 0) {
+                error = E1000_SUCCESS;
+                break;
+            }
+            usec_delay(1);
+        }
+        if (error == E1000_SUCCESS) {
+            /* Successful in waiting for previous cycle to timeout,
+             * now set the Flash Cycle Done. */
+            hsfsts.hsf_status.flcdone = 1;
+            E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFSTS, hsfsts.regval);
+        } else {
+            DEBUGOUT("Flash controller busy, cannot get access");
+        }
+    }
+    return error;
+}
+
+/******************************************************************************
+ * This function starts a flash cycle and waits for its completion
+ *
+ * hw - The pointer to the hw structure
+ ****************************************************************************/
+int32_t
+e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout)
+{
+    union ich8_hws_flash_ctrl hsflctl;
+    union ich8_hws_flash_status hsfsts;
+    int32_t error = E1000_ERR_EEPROM;
+    uint32_t i = 0;
+
+    /* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+    hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+    hsflctl.hsf_ctrl.flcgo = 1;
+    E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+    /* wait till FDONE bit is set to 1 */
+    do {
+        hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+        if (hsfsts.hsf_status.flcdone == 1)
+            break;
+        usec_delay(1);
+        i++;
+    } while (i < timeout);
+    if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0) {
+        error = E1000_SUCCESS;
+    }
+    return error;
+}
+
+/******************************************************************************
+ * Reads a byte or word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte or word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - Pointer to the word to store the value read.
+ *****************************************************************************/
+int32_t
+e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
+                     uint32_t size, uint16_t* data)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    uint32_t flash_linear_address;
+    uint32_t flash_data = 0;
+    int32_t error = -E1000_ERR_EEPROM;
+    int32_t count = 0;
+
+    DEBUGFUNC("e1000_read_ich8_data");
+
+    if (size < 1  || size > 2 || data == 0x0 ||
+        index > ICH8_FLASH_LINEAR_ADDR_MASK)
+        return error;
+
+    flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+                           hw->flash_base_addr;
+
+    do {
+        usec_delay(1);
+        /* Steps */
+        error = e1000_ich8_cycle_init(hw);
+        if (error != E1000_SUCCESS)
+            break;
+
+        hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+        hsflctl.hsf_ctrl.fldbcount = size - 1;
+        hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_READ;
+        E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+        /* Write the last 24 bits of index into Flash Linear address field in
+         * Flash Address */
+        /* TODO: TBD maybe check the index against the size of flash */
+
+        E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+        error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+
+        /* Check if FCERR is set to 1, if set to 1, clear it and try the whole
+         * sequence a few more times, else read in (shift in) the Flash Data0,
+         * the order is least significant byte first msb to lsb */
+        if (error == E1000_SUCCESS) {
+            flash_data = E1000_READ_ICH8_REG(hw, ICH8_FLASH_FDATA0);
+            if (size == 1) {
+                *data = (uint8_t)(flash_data & 0x000000FF);
+            } else if (size == 2) {
+                *data = (uint16_t)(flash_data & 0x0000FFFF);
+            }
+            break;
+        } else {
+            /* If we've gotten here, then things are probably completely hosed,
+             * but if the error condition is detected, it won't hurt to give
+             * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+             */
+            hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcerr == 1) {
+                /* Repeat for some time before giving up. */
+                continue;
+            } else if (hsfsts.hsf_status.flcdone == 0) {
+                DEBUGOUT("Timeout error - flash cycle did not complete.");
+                break;
+            }
+        }
+    } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes One /two bytes to the NVM using the ICH8 flash access registers.
+ *
+ * hw - The pointer to the hw structure
+ * index - The index of the byte/word to read.
+ * size - Size of data to read, 1=byte 2=word
+ * data - The byte(s) to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index, uint32_t size,
+                      uint16_t data)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    uint32_t flash_linear_address;
+    uint32_t flash_data = 0;
+    int32_t error = -E1000_ERR_EEPROM;
+    int32_t count = 0;
+
+    DEBUGFUNC("e1000_write_ich8_data");
+
+    if (size < 1  || size > 2 || data > size * 0xff ||
+        index > ICH8_FLASH_LINEAR_ADDR_MASK)
+        return error;
+
+    flash_linear_address = (ICH8_FLASH_LINEAR_ADDR_MASK & index) +
+                           hw->flash_base_addr;
+
+    do {
+        usec_delay(1);
+        /* Steps */
+        error = e1000_ich8_cycle_init(hw);
+        if (error != E1000_SUCCESS)
+            break;
+
+        hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+        /* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+        hsflctl.hsf_ctrl.fldbcount = size -1;
+        hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_WRITE;
+        E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+        /* Write the last 24 bits of index into Flash Linear address field in
+         * Flash Address */
+        E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+        if (size == 1)
+            flash_data = (uint32_t)data & 0x00FF;
+        else
+            flash_data = (uint32_t)data;
+
+        E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FDATA0, flash_data);
+
+        /* check if FCERR is set to 1 , if set to 1, clear it and try the whole
+         * sequence a few more times else done */
+        error = e1000_ich8_flash_cycle(hw, ICH8_FLASH_COMMAND_TIMEOUT);
+        if (error == E1000_SUCCESS) {
+            break;
+        } else {
+            /* If we're here, then things are most likely completely hosed,
+             * but if the error condition is detected, it won't hurt to give
+             * it another try...ICH8_FLASH_CYCLE_REPEAT_COUNT times.
+             */
+            hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+            if (hsfsts.hsf_status.flcerr == 1) {
+                /* Repeat for some time before giving up. */
+                continue;
+            } else if (hsfsts.hsf_status.flcdone == 0) {
+                DEBUGOUT("Timeout error - flash cycle did not complete.");
+                break;
+            }
+        }
+    } while (count++ < ICH8_FLASH_CYCLE_REPEAT_COUNT);
+
+    return error;
+}
+
+/******************************************************************************
+ * Reads a single byte from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - Pointer to a byte to store the value read.
+ *****************************************************************************/
+int32_t
+e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t* data)
+{
+    int32_t status = E1000_SUCCESS;
+    uint16_t word = 0;
+
+    status = e1000_read_ich8_data(hw, index, 1, &word);
+    if (status == E1000_SUCCESS) {
+        *data = (uint8_t)word;
+    }
+
+    return status;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ * Performs verification by reading back the value and then going through
+ * a retry algorithm before giving up.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to write.
+ * byte - The byte to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t byte)
+{
+    int32_t error = E1000_SUCCESS;
+    int32_t program_retries;
+    uint8_t temp_byte = 0;
+
+    e1000_write_ich8_byte(hw, index, byte);
+    usec_delay(100);
+
+    for (program_retries = 0; program_retries < 100; program_retries++) {
+        e1000_read_ich8_byte(hw, index, &temp_byte);
+        if (temp_byte == byte)
+            break;
+        usec_delay(10);
+        e1000_write_ich8_byte(hw, index, byte);
+        usec_delay(100);
+    }
+    if (program_retries == 100)
+        error = E1000_ERR_EEPROM;
+
+    return error;
+}
+
+/******************************************************************************
+ * Writes a single byte to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The index of the byte to read.
+ * data - The byte to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index, uint8_t data)
+{
+    int32_t status = E1000_SUCCESS;
+    uint16_t word = (uint16_t)data;
+
+    status = e1000_write_ich8_data(hw, index, 1, word);
+
+    return status;
+}
+
+/******************************************************************************
+ * Reads a word from the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - Pointer to a word to store the value read.
+ *****************************************************************************/
+int32_t
+e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t *data)
+{
+    int32_t status = E1000_SUCCESS;
+    status = e1000_read_ich8_data(hw, index, 2, data);
+    return status;
+}
+
+/******************************************************************************
+ * Writes a word to the NVM using the ICH8 flash access registers.
+ *
+ * hw - pointer to e1000_hw structure
+ * index - The starting byte index of the word to read.
+ * data - The word to write to the NVM.
+ *****************************************************************************/
+int32_t
+e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index, uint16_t data)
+{
+    int32_t status = E1000_SUCCESS;
+    status = e1000_write_ich8_data(hw, index, 2, data);
+    return status;
+}
+
+/******************************************************************************
+ * Erases the bank specified. Each bank is a 4k block. Segments are 0 based.
+ * segment N is 4096 * N + flash_reg_addr.
+ *
+ * hw - pointer to e1000_hw structure
+ * segment - 0 for first segment, 1 for second segment, etc.
+ *****************************************************************************/
+int32_t
+e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment)
+{
+    union ich8_hws_flash_status hsfsts;
+    union ich8_hws_flash_ctrl hsflctl;
+    uint32_t flash_linear_address;
+    int32_t  count = 0;
+    int32_t  error = E1000_ERR_EEPROM;
+    int32_t  iteration, seg_size;
+    int32_t  sector_size;
+    int32_t  j = 0;
+    int32_t  error_flag = 0;
+
+    hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+
+    /* Determine HW Sector size: Read BERASE bits of Hw flash Status register */
+    /* 00: The Hw sector is 256 bytes, hence we need to erase 16
+     *     consecutive sectors.  The start index for the nth Hw sector can be
+     *     calculated as = segment * 4096 + n * 256
+     * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+     *     The start index for the nth Hw sector can be calculated
+     *     as = segment * 4096
+     * 10: Error condition
+     * 11: The Hw sector size is much bigger than the size asked to
+     *     erase...error condition */
+    if (hsfsts.hsf_status.berasesz == 0x0) {
+        /* Hw sector size 256 */
+        sector_size = seg_size = ICH8_FLASH_SEG_SIZE_256;
+        iteration = ICH8_FLASH_SECTOR_SIZE / ICH8_FLASH_SEG_SIZE_256;
+    } else if (hsfsts.hsf_status.berasesz == 0x1) {
+        sector_size = seg_size = ICH8_FLASH_SEG_SIZE_4K;
+        iteration = 1;
+    } else if (hsfsts.hsf_status.berasesz == 0x3) {
+        sector_size = seg_size = ICH8_FLASH_SEG_SIZE_64K;
+        iteration = 1;
+    } else {
+        return error;
+    }
+
+    for (j = 0; j < iteration ; j++) {
+        do {
+            count++;
+            /* Steps */
+            error = e1000_ich8_cycle_init(hw);
+            if (error != E1000_SUCCESS) {
+                error_flag = 1;
+                break;
+            }
+
+            /* Write a value 11 (block Erase) in Flash Cycle field in Hw flash
+             * Control */
+            hsflctl.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFCTL);
+            hsflctl.hsf_ctrl.flcycle = ICH8_CYCLE_ERASE;
+            E1000_WRITE_ICH8_REG16(hw, ICH8_FLASH_HSFCTL, hsflctl.regval);
+
+            /* Write the last 24 bits of an index within the block into Flash
+             * Linear address field in Flash Address.  This probably needs to
+             * be calculated here based off the on-chip segment size and the
+             * software segment size assumed (4K) */
+            /* TBD */
+            flash_linear_address = segment * sector_size + j * seg_size;
+            flash_linear_address &= ICH8_FLASH_LINEAR_ADDR_MASK;
+            flash_linear_address += hw->flash_base_addr;
+
+            E1000_WRITE_ICH8_REG(hw, ICH8_FLASH_FADDR, flash_linear_address);
+
+            error = e1000_ich8_flash_cycle(hw, 1000000);
+            /* Check if FCERR is set to 1.  If 1, clear it and try the whole
+             * sequence a few more times else Done */
+            if (error == E1000_SUCCESS) {
+                break;
+            } else {
+                hsfsts.regval = E1000_READ_ICH8_REG16(hw, ICH8_FLASH_HSFSTS);
+                if (hsfsts.hsf_status.flcerr == 1) {
+                    /* repeat for some time before giving up */
+                    continue;
+                } else if (hsfsts.hsf_status.flcdone == 0) {
+                    error_flag = 1;
+                    break;
+                }
+            }
+        } while ((count < ICH8_FLASH_CYCLE_REPEAT_COUNT) && !error_flag);
+        if (error_flag == 1)
+            break;
+    }
+    if (error_flag != 1)
+        error = E1000_SUCCESS;
+    return error;
+}
+
+/******************************************************************************
+ *
+ * Reverse duplex setting without breaking the link.
+ *
+ * hw: Struct containing variables accessed by shared code
+ *
+ *****************************************************************************/
+int32_t
+e1000_duplex_reversal(struct e1000_hw *hw)
+{
+    int32_t ret_val;
+    uint16_t phy_data;
+
+    if (hw->phy_type != e1000_phy_igp_3)
+        return E1000_SUCCESS;
+
+    ret_val = e1000_read_phy_reg(hw, PHY_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data ^= MII_CR_FULL_DUPLEX;
+
+    ret_val = e1000_write_phy_reg(hw, PHY_CTRL, phy_data);
+    if (ret_val)
+        return ret_val;
+
+    ret_val = e1000_read_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, &phy_data);
+    if (ret_val)
+        return ret_val;
+
+    phy_data |= IGP3_PHY_MISC_DUPLEX_MANUAL_SET;
+    ret_val = e1000_write_phy_reg(hw, IGP3E1000_PHY_MISC_CTRL, phy_data);
+
+    return ret_val;
+}
+
+int32_t
+e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw,
+                                      uint32_t cnf_base_addr, uint32_t cnf_size)
+{
+    uint32_t ret_val = E1000_SUCCESS;
+    uint16_t word_addr, reg_data, reg_addr;
+    uint16_t i;
+
+    /* cnf_base_addr is in DWORD */
+    word_addr = (uint16_t)(cnf_base_addr << 1);
+
+    /* cnf_size is returned in size of dwords */
+    for (i = 0; i < cnf_size; i++) {
+        ret_val = e1000_read_eeprom(hw, (word_addr + i*2), 1, &reg_data);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_read_eeprom(hw, (word_addr + i*2 + 1), 1, &reg_addr);
+        if (ret_val)
+            return ret_val;
+
+        ret_val = e1000_get_software_flag(hw);
+        if (ret_val != E1000_SUCCESS)
+            return ret_val;
+
+        ret_val = e1000_write_phy_reg_ex(hw, (uint32_t)reg_addr, reg_data);
+
+        e1000_release_software_flag(hw);
+    }
+
+    return ret_val;
+}
+
+
+int32_t
+e1000_init_lcd_from_nvm(struct e1000_hw *hw)
+{
+    uint32_t reg_data, cnf_base_addr, cnf_size, ret_val, loop;
+
+    if (hw->phy_type != e1000_phy_igp_3)
+          return E1000_SUCCESS;
+
+    /* Check if SW needs configure the PHY */
+    reg_data = E1000_READ_REG(hw, FEXTNVM);
+    if (!(reg_data & FEXTNVM_SW_CONFIG))
+        return E1000_SUCCESS;
+
+    /* Wait for basic configuration completes before proceeding*/
+    loop = 0;
+    do {
+        reg_data = E1000_READ_REG(hw, STATUS) & E1000_STATUS_LAN_INIT_DONE;
+        usec_delay(100);
+        loop++;
+    } while ((!reg_data) && (loop < 50));
+
+    /* Clear the Init Done bit for the next init event */
+    reg_data = E1000_READ_REG(hw, STATUS);
+    reg_data &= ~E1000_STATUS_LAN_INIT_DONE;
+    E1000_WRITE_REG(hw, STATUS, reg_data);
+
+    /* Make sure HW does not configure LCD from PHY extended configuration
+       before SW configuration */
+    reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+    if ((reg_data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE) == 0x0000) {
+        reg_data = E1000_READ_REG(hw, EXTCNF_SIZE);
+        cnf_size = reg_data & E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH;
+        cnf_size >>= 16;
+        if (cnf_size) {
+            reg_data = E1000_READ_REG(hw, EXTCNF_CTRL);
+            cnf_base_addr = reg_data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER;
+            /* cnf_base_addr is in DWORD */
+            cnf_base_addr >>= 16;
+
+            /* Configure LCD from extended configuration region. */
+            ret_val = e1000_init_lcd_from_nvm_config_region(hw, cnf_base_addr,
+                                                            cnf_size);
+            if (ret_val)
+                return ret_val;
+        }
+    }
+
+    return E1000_SUCCESS;
+}
+
+
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h
new file mode 100644
index 0000000..93cbe37
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_hw.h
@@ -0,0 +1,3454 @@
+/*******************************************************************************
+
+  
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+  
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_hw.h
+ * Structures, enums, and macros for the MAC
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+
+
+/* Forward declarations of structures used by the shared code */
+struct e1000_hw;
+struct e1000_hw_stats;
+
+/* Enumerated types specific to the e1000 hardware */
+/* Media Access Controlers */
+typedef enum {
+    e1000_undefined = 0,
+    e1000_82542_rev2_0,
+    e1000_82542_rev2_1,
+    e1000_82543,
+    e1000_82544,
+    e1000_82540,
+    e1000_82545,
+    e1000_82545_rev_3,
+    e1000_82546,
+    e1000_82546_rev_3,
+    e1000_82541,
+    e1000_82541_rev_2,
+    e1000_82547,
+    e1000_82547_rev_2,
+    e1000_82571,
+    e1000_82572,
+    e1000_82573,
+    e1000_80003es2lan,
+    e1000_ich8lan,
+    e1000_num_macs
+} e1000_mac_type;
+
+typedef enum {
+    e1000_eeprom_uninitialized = 0,
+    e1000_eeprom_spi,
+    e1000_eeprom_microwire,
+    e1000_eeprom_flash,
+    e1000_eeprom_ich8,
+    e1000_eeprom_none, /* No NVM support */
+    e1000_num_eeprom_types
+} e1000_eeprom_type;
+
+/* Media Types */
+typedef enum {
+    e1000_media_type_copper = 0,
+    e1000_media_type_fiber = 1,
+    e1000_media_type_internal_serdes = 2,
+    e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+    e1000_10_half = 0,
+    e1000_10_full = 1,
+    e1000_100_half = 2,
+    e1000_100_full = 3
+} e1000_speed_duplex_type;
+
+/* Flow Control Settings */
+typedef enum {
+    e1000_fc_none = 0,
+    e1000_fc_rx_pause = 1,
+    e1000_fc_tx_pause = 2,
+    e1000_fc_full = 3,
+    e1000_fc_default = 0xFF
+} e1000_fc_type;
+
+struct e1000_shadow_ram {
+    uint16_t    eeprom_word;
+    boolean_t   modified;
+};
+
+/* PCI bus types */
+typedef enum {
+    e1000_bus_type_unknown = 0,
+    e1000_bus_type_pci,
+    e1000_bus_type_pcix,
+    e1000_bus_type_pci_express,
+    e1000_bus_type_reserved
+} e1000_bus_type;
+
+/* PCI bus speeds */
+typedef enum {
+    e1000_bus_speed_unknown = 0,
+    e1000_bus_speed_33,
+    e1000_bus_speed_66,
+    e1000_bus_speed_100,
+    e1000_bus_speed_120,
+    e1000_bus_speed_133,
+    e1000_bus_speed_2500,
+    e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+/* PCI bus widths */
+typedef enum {
+    e1000_bus_width_unknown = 0,
+    e1000_bus_width_32,
+    e1000_bus_width_64,
+    e1000_bus_width_pciex_1,
+    e1000_bus_width_pciex_2,
+    e1000_bus_width_pciex_4,
+    e1000_bus_width_reserved
+} e1000_bus_width;
+
+/* PHY status info structure and supporting enums */
+typedef enum {
+    e1000_cable_length_50 = 0,
+    e1000_cable_length_50_80,
+    e1000_cable_length_80_110,
+    e1000_cable_length_110_140,
+    e1000_cable_length_140,
+    e1000_cable_length_undefined = 0xFF
+} e1000_cable_length;
+
+typedef enum {
+    e1000_gg_cable_length_60 = 0,
+    e1000_gg_cable_length_60_115 = 1,
+    e1000_gg_cable_length_115_150 = 2,
+    e1000_gg_cable_length_150 = 4
+} e1000_gg_cable_length;
+
+typedef enum {
+    e1000_igp_cable_length_10  = 10,
+    e1000_igp_cable_length_20  = 20,
+    e1000_igp_cable_length_30  = 30,
+    e1000_igp_cable_length_40  = 40,
+    e1000_igp_cable_length_50  = 50,
+    e1000_igp_cable_length_60  = 60,
+    e1000_igp_cable_length_70  = 70,
+    e1000_igp_cable_length_80  = 80,
+    e1000_igp_cable_length_90  = 90,
+    e1000_igp_cable_length_100 = 100,
+    e1000_igp_cable_length_110 = 110,
+    e1000_igp_cable_length_115 = 115,
+    e1000_igp_cable_length_120 = 120,
+    e1000_igp_cable_length_130 = 130,
+    e1000_igp_cable_length_140 = 140,
+    e1000_igp_cable_length_150 = 150,
+    e1000_igp_cable_length_160 = 160,
+    e1000_igp_cable_length_170 = 170,
+    e1000_igp_cable_length_180 = 180
+} e1000_igp_cable_length;
+
+typedef enum {
+    e1000_10bt_ext_dist_enable_normal = 0,
+    e1000_10bt_ext_dist_enable_lower,
+    e1000_10bt_ext_dist_enable_undefined = 0xFF
+} e1000_10bt_ext_dist_enable;
+
+typedef enum {
+    e1000_rev_polarity_normal = 0,
+    e1000_rev_polarity_reversed,
+    e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+    e1000_downshift_normal = 0,
+    e1000_downshift_activated,
+    e1000_downshift_undefined = 0xFF
+} e1000_downshift;
+
+typedef enum {
+    e1000_smart_speed_default = 0,
+    e1000_smart_speed_on,
+    e1000_smart_speed_off
+} e1000_smart_speed;
+
+typedef enum {
+    e1000_polarity_reversal_enabled = 0,
+    e1000_polarity_reversal_disabled,
+    e1000_polarity_reversal_undefined = 0xFF
+} e1000_polarity_reversal;
+
+typedef enum {
+    e1000_auto_x_mode_manual_mdi = 0,
+    e1000_auto_x_mode_manual_mdix,
+    e1000_auto_x_mode_auto1,
+    e1000_auto_x_mode_auto2,
+    e1000_auto_x_mode_undefined = 0xFF
+} e1000_auto_x_mode;
+
+typedef enum {
+    e1000_1000t_rx_status_not_ok = 0,
+    e1000_1000t_rx_status_ok,
+    e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+    e1000_phy_m88 = 0,
+    e1000_phy_igp,
+    e1000_phy_igp_2,
+    e1000_phy_gg82563,
+    e1000_phy_igp_3,
+    e1000_phy_ife,
+    e1000_phy_undefined = 0xFF
+} e1000_phy_type;
+
+typedef enum {
+    e1000_ms_hw_default = 0,
+    e1000_ms_force_master,
+    e1000_ms_force_slave,
+    e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+    e1000_ffe_config_enabled = 0,
+    e1000_ffe_config_active,
+    e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+    e1000_dsp_config_disabled = 0,
+    e1000_dsp_config_enabled,
+    e1000_dsp_config_activated,
+    e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+struct e1000_phy_info {
+    e1000_cable_length cable_length;
+    e1000_10bt_ext_dist_enable extended_10bt_distance;
+    e1000_rev_polarity cable_polarity;
+    e1000_downshift downshift;
+    e1000_polarity_reversal polarity_correction;
+    e1000_auto_x_mode mdix_mode;
+    e1000_1000t_rx_status local_rx;
+    e1000_1000t_rx_status remote_rx;
+};
+
+struct e1000_phy_stats {
+    uint32_t idle_errors;
+    uint32_t receive_errors;
+};
+
+struct e1000_eeprom_info {
+    e1000_eeprom_type type;
+    uint16_t word_size;
+    uint16_t opcode_bits;
+    uint16_t address_bits;
+    uint16_t delay_usec;
+    uint16_t page_size;
+    boolean_t use_eerd;
+    boolean_t use_eewr;
+};
+
+/* Flex ASF Information */
+#define E1000_HOST_IF_MAX_SIZE  2048
+
+typedef enum {
+    e1000_byte_align = 0,
+    e1000_word_align = 1,
+    e1000_dword_align = 2
+} e1000_align_type;
+
+
+
+/* Error Codes */
+#define E1000_SUCCESS      0
+#define E1000_ERR_EEPROM   1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_TYPE 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+
+/* Function prototypes */
+/* Initialization */
+int32_t e1000_reset_hw(struct e1000_hw *hw);
+int32_t e1000_init_hw(struct e1000_hw *hw);
+int32_t e1000_id_led_init(struct e1000_hw * hw);
+int32_t e1000_set_mac_type(struct e1000_hw *hw);
+void e1000_set_media_type(struct e1000_hw *hw);
+
+/* Link Configuration */
+int32_t e1000_setup_link(struct e1000_hw *hw);
+int32_t e1000_phy_setup_autoneg(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+int32_t e1000_config_fc_after_link_up(struct e1000_hw *hw);
+int32_t e1000_check_for_link(struct e1000_hw *hw);
+int32_t e1000_get_speed_and_duplex(struct e1000_hw *hw, uint16_t * speed, uint16_t * duplex);
+int32_t e1000_wait_autoneg(struct e1000_hw *hw);
+int32_t e1000_force_mac_fc(struct e1000_hw *hw);
+
+/* PHY */
+int32_t e1000_read_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *phy_data);
+int32_t e1000_write_phy_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
+int32_t e1000_phy_hw_reset(struct e1000_hw *hw);
+int32_t e1000_phy_reset(struct e1000_hw *hw);
+void e1000_phy_powerdown_workaround(struct e1000_hw *hw);
+int32_t e1000_kumeran_lock_loss_workaround(struct e1000_hw *hw);
+int32_t e1000_duplex_reversal(struct e1000_hw *hw);
+int32_t e1000_init_lcd_from_nvm_config_region(struct e1000_hw *hw, uint32_t cnf_base_addr, uint32_t cnf_size);
+int32_t e1000_init_lcd_from_nvm(struct e1000_hw *hw);
+int32_t e1000_detect_gig_phy(struct e1000_hw *hw);
+int32_t e1000_phy_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+int32_t e1000_phy_m88_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+int32_t e1000_phy_igp_get_info(struct e1000_hw *hw, struct e1000_phy_info *phy_info);
+int32_t e1000_get_cable_length(struct e1000_hw *hw, uint16_t *min_length, uint16_t *max_length);
+int32_t e1000_check_polarity(struct e1000_hw *hw, uint16_t *polarity);
+int32_t e1000_check_downshift(struct e1000_hw *hw);
+int32_t e1000_validate_mdi_setting(struct e1000_hw *hw);
+int32_t e1000_read_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t *data);
+int32_t e1000_write_kmrn_reg(struct e1000_hw *hw, uint32_t reg_addr, uint16_t data);
+
+/* EEPROM Functions */
+int32_t e1000_init_eeprom_params(struct e1000_hw *hw);
+boolean_t e1000_is_onboard_nvm_eeprom(struct e1000_hw *hw);
+int32_t e1000_read_eeprom_eerd(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
+int32_t e1000_write_eeprom_eewr(struct e1000_hw *hw, uint16_t offset, uint16_t words, uint16_t *data);
+int32_t e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int eerd);
+
+/* MNG HOST IF functions */
+uint32_t e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD   64
+#define E1000_HI_MAX_MNG_DATA_LENGTH    0x6F8   /* Host Interface data length */
+
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT  10      /* Time in ms to process MNG command */
+#define E1000_MNG_DHCP_COOKIE_OFFSET    0x6F0   /* Cookie offset */
+#define E1000_MNG_DHCP_COOKIE_LENGTH    0x10    /* Cookie length */
+#define E1000_MNG_IAMT_MODE             0x3
+#define E1000_MNG_ICH_IAMT_MODE         0x2
+#define E1000_IAMT_SIGNATURE            0x544D4149 /* Intel(R) Active Management Technology signature */
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING_SUPPORT 0x1 /* DHCP parsing enabled */
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN_SUPPORT    0x2 /* DHCP parsing enabled */
+#define E1000_VFTA_ENTRY_SHIFT                       0x5
+#define E1000_VFTA_ENTRY_MASK                        0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK              0x1F
+
+struct e1000_host_mng_command_header {
+    uint8_t command_id;
+    uint8_t checksum;
+    uint16_t reserved1;
+    uint16_t reserved2;
+    uint16_t command_length;
+};
+
+struct e1000_host_mng_command_info {
+    struct e1000_host_mng_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
+    uint8_t command_data[E1000_HI_MAX_MNG_DATA_LENGTH];   /* Command data can length 0..0x658*/
+};
+#ifdef E1000_BIG_ENDIAN
+struct e1000_host_mng_dhcp_cookie{
+    uint32_t signature;
+    uint16_t vlan_id;
+    uint8_t reserved0;
+    uint8_t status;
+    uint32_t reserved1;
+    uint8_t checksum;
+    uint8_t reserved3;
+    uint16_t reserved2;
+};
+#else
+struct e1000_host_mng_dhcp_cookie{
+    uint32_t signature;
+    uint8_t status;
+    uint8_t reserved0;
+    uint16_t vlan_id;
+    uint32_t reserved1;
+    uint16_t reserved2;
+    uint8_t reserved3;
+    uint8_t checksum;
+};
+#endif
+
+int32_t e1000_mng_write_dhcp_info(struct e1000_hw *hw, uint8_t *buffer,
+                                  uint16_t length);
+boolean_t e1000_check_mng_mode(struct e1000_hw *hw);
+boolean_t e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+int32_t e1000_mng_enable_host_if(struct e1000_hw *hw);
+int32_t e1000_mng_host_if_write(struct e1000_hw *hw, uint8_t *buffer,
+                            uint16_t length, uint16_t offset, uint8_t *sum);
+int32_t e1000_mng_write_cmd_header(struct e1000_hw* hw,
+                                   struct e1000_host_mng_command_header* hdr);
+
+int32_t e1000_mng_write_commit(struct e1000_hw *hw);
+
+int32_t e1000_read_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
+int32_t e1000_validate_eeprom_checksum(struct e1000_hw *hw);
+int32_t e1000_update_eeprom_checksum(struct e1000_hw *hw);
+int32_t e1000_write_eeprom(struct e1000_hw *hw, uint16_t reg, uint16_t words, uint16_t *data);
+int32_t e1000_read_part_num(struct e1000_hw *hw, uint32_t * part_num);
+int32_t e1000_read_mac_addr(struct e1000_hw * hw);
+int32_t e1000_swfw_sync_acquire(struct e1000_hw *hw, uint16_t mask);
+void e1000_swfw_sync_release(struct e1000_hw *hw, uint16_t mask);
+void e1000_release_software_flag(struct e1000_hw *hw);
+int32_t e1000_get_software_flag(struct e1000_hw *hw);
+
+/* Filters (multicast, vlan, receive) */
+void e1000_init_rx_addrs(struct e1000_hw *hw);
+void e1000_mc_addr_list_update(struct e1000_hw *hw, uint8_t * mc_addr_list, uint32_t mc_addr_count, uint32_t pad, uint32_t rar_used_count);
+uint32_t e1000_hash_mc_addr(struct e1000_hw *hw, uint8_t * mc_addr);
+void e1000_mta_set(struct e1000_hw *hw, uint32_t hash_value);
+void e1000_rar_set(struct e1000_hw *hw, uint8_t * mc_addr, uint32_t rar_index);
+void e1000_write_vfta(struct e1000_hw *hw, uint32_t offset, uint32_t value);
+void e1000_clear_vfta(struct e1000_hw *hw);
+
+/* LED functions */
+int32_t e1000_setup_led(struct e1000_hw *hw);
+int32_t e1000_cleanup_led(struct e1000_hw *hw);
+int32_t e1000_led_on(struct e1000_hw *hw);
+int32_t e1000_led_off(struct e1000_hw *hw);
+int32_t e1000_blink_led_start(struct e1000_hw *hw);
+
+/* Adaptive IFS Functions */
+
+/* Everything else */
+void e1000_clear_hw_cntrs(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+void e1000_tbi_adjust_stats(struct e1000_hw *hw, struct e1000_hw_stats *stats, uint32_t frame_len, uint8_t * mac_addr);
+void e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+void e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t * value);
+/* Port I/O is only supported on 82544 and newer */
+uint32_t e1000_io_read(struct e1000_hw *hw, unsigned long port);
+uint32_t e1000_read_reg_io(struct e1000_hw *hw, uint32_t offset);
+void e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value);
+void e1000_write_reg_io(struct e1000_hw *hw, uint32_t offset, uint32_t value);
+int32_t e1000_config_dsp_after_link_change(struct e1000_hw *hw, boolean_t link_up);
+int32_t e1000_set_d3_lplu_state(struct e1000_hw *hw, boolean_t active);
+int32_t e1000_set_d0_lplu_state(struct e1000_hw *hw, boolean_t active);
+void e1000_set_pci_express_master_disable(struct e1000_hw *hw);
+void e1000_enable_pciex_master(struct e1000_hw *hw);
+int32_t e1000_disable_pciex_master(struct e1000_hw *hw);
+int32_t e1000_get_auto_rd_done(struct e1000_hw *hw);
+int32_t e1000_get_phy_cfg_done(struct e1000_hw *hw);
+int32_t e1000_get_software_semaphore(struct e1000_hw *hw);
+void e1000_release_software_semaphore(struct e1000_hw *hw);
+int32_t e1000_check_phy_reset_block(struct e1000_hw *hw);
+int32_t e1000_get_hw_eeprom_semaphore(struct e1000_hw *hw);
+void e1000_put_hw_eeprom_semaphore(struct e1000_hw *hw);
+int32_t e1000_commit_shadow_ram(struct e1000_hw *hw);
+uint8_t e1000_arc_subsystem_valid(struct e1000_hw *hw);
+int32_t e1000_set_pci_ex_no_snoop(struct e1000_hw *hw, uint32_t no_snoop);
+
+int32_t e1000_read_ich8_byte(struct e1000_hw *hw, uint32_t index,
+                             uint8_t *data);
+int32_t e1000_verify_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
+                                     uint8_t byte);
+int32_t e1000_write_ich8_byte(struct e1000_hw *hw, uint32_t index,
+                              uint8_t byte);
+int32_t e1000_read_ich8_word(struct e1000_hw *hw, uint32_t index,
+                             uint16_t *data);
+int32_t e1000_write_ich8_word(struct e1000_hw *hw, uint32_t index,
+                              uint16_t word);
+int32_t e1000_read_ich8_data(struct e1000_hw *hw, uint32_t index,
+                             uint32_t size, uint16_t *data);
+int32_t e1000_write_ich8_data(struct e1000_hw *hw, uint32_t index,
+                              uint32_t size, uint16_t data);
+int32_t e1000_read_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+                               uint16_t words, uint16_t *data);
+int32_t e1000_write_eeprom_ich8(struct e1000_hw *hw, uint16_t offset,
+                                uint16_t words, uint16_t *data);
+int32_t e1000_erase_ich8_4k_segment(struct e1000_hw *hw, uint32_t segment);
+int32_t e1000_ich8_cycle_init(struct e1000_hw *hw);
+int32_t e1000_ich8_flash_cycle(struct e1000_hw *hw, uint32_t timeout);
+int32_t e1000_phy_ife_get_info(struct e1000_hw *hw,
+                               struct e1000_phy_info *phy_info);
+int32_t e1000_ife_disable_dynamic_power_down(struct e1000_hw *hw);
+int32_t e1000_ife_enable_dynamic_power_down(struct e1000_hw *hw);
+
+#define E1000_READ_REG_IO(a, reg) \
+    e1000_read_reg_io((a), E1000_##reg)
+#define E1000_WRITE_REG_IO(a, reg, val) \
+    e1000_write_reg_io((a), E1000_##reg, val)
+
+/* PCI Device IDs */
+#define E1000_DEV_ID_82542               0x1000
+#define E1000_DEV_ID_82543GC_FIBER       0x1001
+#define E1000_DEV_ID_82543GC_COPPER      0x1004
+#define E1000_DEV_ID_82544EI_COPPER      0x1008
+#define E1000_DEV_ID_82544EI_FIBER       0x1009
+#define E1000_DEV_ID_82544GC_COPPER      0x100C
+#define E1000_DEV_ID_82544GC_LOM         0x100D
+#define E1000_DEV_ID_82540EM             0x100E
+#define E1000_DEV_ID_82540EM_LOM         0x1015
+#define E1000_DEV_ID_82540EP_LOM         0x1016
+#define E1000_DEV_ID_82540EP             0x1017
+#define E1000_DEV_ID_82540EP_LP          0x101E
+#define E1000_DEV_ID_82545EM_COPPER      0x100F
+#define E1000_DEV_ID_82545EM_FIBER       0x1011
+#define E1000_DEV_ID_82545GM_COPPER      0x1026
+#define E1000_DEV_ID_82545GM_FIBER       0x1027
+#define E1000_DEV_ID_82545GM_SERDES      0x1028
+#define E1000_DEV_ID_82546EB_COPPER      0x1010
+#define E1000_DEV_ID_82546EB_FIBER       0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER 0x101D
+#define E1000_DEV_ID_82541EI             0x1013
+#define E1000_DEV_ID_82541EI_MOBILE      0x1018
+#define E1000_DEV_ID_82541ER_LOM         0x1014
+#define E1000_DEV_ID_82541ER             0x1078
+#define E1000_DEV_ID_82547GI             0x1075
+#define E1000_DEV_ID_82541GI             0x1076
+#define E1000_DEV_ID_82541GI_MOBILE      0x1077
+#define E1000_DEV_ID_82541GI_LF          0x107C
+#define E1000_DEV_ID_82546GB_COPPER      0x1079
+#define E1000_DEV_ID_82546GB_FIBER       0x107A
+#define E1000_DEV_ID_82546GB_SERDES      0x107B
+#define E1000_DEV_ID_82546GB_PCIE        0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER 0x1099
+#define E1000_DEV_ID_82547EI             0x1019
+#define E1000_DEV_ID_82547EI_MOBILE      0x101A
+#define E1000_DEV_ID_82571EB_COPPER      0x105E
+#define E1000_DEV_ID_82571EB_FIBER       0x105F
+#define E1000_DEV_ID_82571EB_SERDES      0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER 0x10A4
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LOWPROFILE  0x10BC
+#define E1000_DEV_ID_82572EI_COPPER      0x107D
+#define E1000_DEV_ID_82572EI_FIBER       0x107E
+#define E1000_DEV_ID_82572EI_SERDES      0x107F
+#define E1000_DEV_ID_82572EI             0x10B9
+#define E1000_DEV_ID_82573E              0x108B
+#define E1000_DEV_ID_82573E_IAMT         0x108C
+#define E1000_DEV_ID_82573L              0x109A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT     0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT     0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT     0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT     0x10BB
+
+#define E1000_DEV_ID_ICH8_IGP_M_AMT      0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT        0x104A
+#define E1000_DEV_ID_ICH8_IGP_C          0x104B
+#define E1000_DEV_ID_ICH8_IFE            0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT         0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G          0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M          0x104D
+
+
+#define NODE_ADDRESS_SIZE 6
+#define ETH_LENGTH_OF_ADDRESS 6
+
+/* MAC decode size is 128K - This is the size of BAR0 */
+#define MAC_DECODE_SIZE (128 * 1024)
+
+#define E1000_82542_2_0_REV_ID 2
+#define E1000_82542_2_1_REV_ID 3
+#define E1000_REVISION_0       0
+#define E1000_REVISION_1       1
+#define E1000_REVISION_2       2
+#define E1000_REVISION_3       3
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+/* The sizes (in bytes) of a ethernet packet */
+#define ENET_HEADER_SIZE             14
+#define MAXIMUM_ETHERNET_FRAME_SIZE  1518 /* With FCS */
+#define MINIMUM_ETHERNET_FRAME_SIZE  64   /* With FCS */
+#define ETHERNET_FCS_SIZE            4
+#define MAXIMUM_ETHERNET_PACKET_SIZE \
+    (MAXIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define MINIMUM_ETHERNET_PACKET_SIZE \
+    (MINIMUM_ETHERNET_FRAME_SIZE - ETHERNET_FCS_SIZE)
+#define CRC_LENGTH                   ETHERNET_FCS_SIZE
+#define MAX_JUMBO_FRAME_SIZE         0x3F00
+
+
+/* 802.1q VLAN Packet Sizes */
+#define VLAN_TAG_SIZE  4     /* 802.3ac tag (not DMAed) */
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+#define ETHERNET_IP_TYPE        0x0800  /* IP packets */
+#define ETHERNET_ARP_TYPE       0x0806  /* Address Resolution Protocol (ARP) */
+
+/* Packet Header defines */
+#define IP_PROTOCOL_TCP    6
+#define IP_PROTOCOL_UDP    0x11
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+    E1000_IMS_RXDMT0 |         \
+    E1000_IMS_RXSEQ)
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Additional interrupts need to be handled for e1000_ich8lan:
+    DSW = The FW changed the status of the DISSW bit in FWSM
+    PHYINT = The LAN connected device generates an interrupt
+    EPRST = Manageability reset event */
+#define IMS_ICH8LAN_ENABLE_MASK (\
+    E1000_IMS_DSW   | \
+    E1000_IMS_PHYINT | \
+    E1000_IMS_EPRST)
+
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor. We
+ * reserve one of these spots for our directed address, allowing us room for
+ * E1000_RAR_ENTRIES - 1 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES 15
+#define E1000_RAR_ENTRIES_ICH8LAN  7
+
+#define MIN_NUMBER_OF_DESCRIPTORS 8
+#define MAX_NUMBER_OF_DESCRIPTORS 0xFFF8
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+    uint64_t buffer_addr; /* Address of the descriptor's data buffer */
+    uint16_t length;     /* Length of data DMAed into data buffer */
+    uint16_t csum;       /* Packet checksum */
+    uint8_t status;      /* Descriptor status */
+    uint8_t errors;      /* Descriptor Errors */
+    uint16_t special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+    struct {
+        uint64_t buffer_addr;
+        uint64_t reserved;
+    } read;
+    struct {
+        struct {
+            uint32_t mrq;              /* Multiple Rx Queues */
+            union {
+                uint32_t rss;          /* RSS Hash */
+                struct {
+                    uint16_t ip_id;    /* IP id */
+                    uint16_t csum;     /* Packet Checksum */
+                } csum_ip;
+            } hi_dword;
+        } lower;
+        struct {
+            uint32_t status_error;     /* ext status/error */
+            uint16_t length;
+            uint16_t vlan;             /* VLAN tag */
+        } upper;
+    } wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+    struct {
+        /* one buffer for protocol header(s), three data buffers */
+        uint64_t buffer_addr[MAX_PS_BUFFERS];
+    } read;
+    struct {
+        struct {
+            uint32_t mrq;              /* Multiple Rx Queues */
+            union {
+                uint32_t rss;          /* RSS Hash */
+                struct {
+                    uint16_t ip_id;    /* IP id */
+                    uint16_t csum;     /* Packet Checksum */
+                } csum_ip;
+            } hi_dword;
+        } lower;
+        struct {
+            uint32_t status_error;     /* ext status/error */
+            uint16_t length0;          /* length of buffer 0 */
+            uint16_t vlan;             /* VLAN tag */
+        } middle;
+        struct {
+            uint16_t header_status;
+            uint16_t length[3];        /* length of buffers 1-3 */
+        } upper;
+        uint64_t reserved;
+    } wb; /* writeback */
+};
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP        0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK  0x000003FF
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+    uint64_t buffer_addr;       /* Address of the descriptor's data buffer */
+    union {
+        uint32_t data;
+        struct {
+            uint16_t length;    /* Data buffer length */
+            uint8_t cso;        /* Checksum offset */
+            uint8_t cmd;        /* Descriptor control */
+        } flags;
+    } lower;
+    union {
+        uint32_t data;
+        struct {
+            uint8_t status;     /* Descriptor status */
+            uint8_t css;        /* Checksum start */
+            uint16_t special;
+        } fields;
+    } upper;
+};
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+    union {
+        uint32_t ip_config;
+        struct {
+            uint8_t ipcss;      /* IP checksum start */
+            uint8_t ipcso;      /* IP checksum offset */
+            uint16_t ipcse;     /* IP checksum end */
+        } ip_fields;
+    } lower_setup;
+    union {
+        uint32_t tcp_config;
+        struct {
+            uint8_t tucss;      /* TCP checksum start */
+            uint8_t tucso;      /* TCP checksum offset */
+            uint16_t tucse;     /* TCP checksum end */
+        } tcp_fields;
+    } upper_setup;
+    uint32_t cmd_and_length;    /* */
+    union {
+        uint32_t data;
+        struct {
+            uint8_t status;     /* Descriptor status */
+            uint8_t hdr_len;    /* Header length */
+            uint16_t mss;       /* Maximum segment size */
+        } fields;
+    } tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+    uint64_t buffer_addr;       /* Address of the descriptor's buffer address */
+    union {
+        uint32_t data;
+        struct {
+            uint16_t length;    /* Data buffer length */
+            uint8_t typ_len_ext;        /* */
+            uint8_t cmd;        /* */
+        } flags;
+    } lower;
+    union {
+        uint32_t data;
+        struct {
+            uint8_t status;     /* Descriptor status */
+            uint8_t popts;      /* Packet Options */
+            uint16_t special;   /* */
+        } fields;
+    } upper;
+};
+
+/* Filters */
+#define E1000_NUM_UNICAST          16   /* Unicast filter entries */
+#define E1000_MC_TBL_SIZE          128  /* Multicast Filter Table (4096 bits) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+#define E1000_NUM_UNICAST_ICH8LAN  7
+#define E1000_MC_TBL_SIZE_ICH8LAN  32
+
+
+/* Receive Address Register */
+struct e1000_rar {
+    volatile uint32_t low;      /* receive address low */
+    volatile uint32_t high;     /* receive address high */
+};
+
+/* Number of entries in the Multicast Table Array (MTA). */
+#define E1000_NUM_MTA_REGISTERS 128
+#define E1000_NUM_MTA_REGISTERS_ICH8LAN 32
+
+/* IPv4 Address Table Entry */
+struct e1000_ipv4_at_entry {
+    volatile uint32_t ipv4_addr;        /* IP Address (RW) */
+    volatile uint32_t reserved;
+};
+
+/* Four wakeup IP addresses are supported */
+#define E1000_WAKEUP_IP_ADDRESS_COUNT_MAX 4
+#define E1000_IP4AT_SIZE                  E1000_WAKEUP_IP_ADDRESS_COUNT_MAX
+#define E1000_IP4AT_SIZE_ICH8LAN          3
+#define E1000_IP6AT_SIZE                  1
+
+/* IPv6 Address Table Entry */
+struct e1000_ipv6_at_entry {
+    volatile uint8_t ipv6_addr[16];
+};
+
+/* Flexible Filter Length Table Entry */
+struct e1000_fflt_entry {
+    volatile uint32_t length;   /* Flexible Filter Length (RW) */
+    volatile uint32_t reserved;
+};
+
+/* Flexible Filter Mask Table Entry */
+struct e1000_ffmt_entry {
+    volatile uint32_t mask;     /* Flexible Filter Mask (RW) */
+    volatile uint32_t reserved;
+};
+
+/* Flexible Filter Value Table Entry */
+struct e1000_ffvt_entry {
+    volatile uint32_t value;    /* Flexible Filter Value (RW) */
+    volatile uint32_t reserved;
+};
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+#define E1000_DISABLE_SERDES_LOOPBACK   0x0400
+
+/* Register Set. (82543, 82544)
+ *
+ * Registers are defined to be 32 bits and  should be accessed as 32 bit values.
+ * These registers are physically located on the NIC, but are mapped into the
+ * host memory address space.
+ *
+ * RW - register is both readable and writable
+ * RO - register is read only
+ * WO - register is write only
+ * R/clr - register is read only and is cleared when read
+ * A - register array
+ */
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C  /* Flash Access - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FEXTNVM  0x00028  /* Future Extended NVM register */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* RX Control - RW */
+#define E1000_RDTR1    0x02820  /* RX Delay Timer (1) - RW */
+#define E1000_RDBAL1   0x02900  /* RX Descriptor Base Address Low (1) - RW */
+#define E1000_RDBAH1   0x02904  /* RX Descriptor Base Address High (1) - RW */
+#define E1000_RDLEN1   0x02908  /* RX Descriptor Length (1) - RW */
+#define E1000_RDH1     0x02910  /* RX Descriptor Head (1) - RW */
+#define E1000_RDT1     0x02918  /* RX Descriptor Tail (1) - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* TX Configuration Word - RW */
+#define E1000_RXCW     0x00180  /* RX Configuration Word - RO */
+#define E1000_TCTL     0x00400  /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended TX Control - RW */
+#define E1000_TIPG     0x00410  /* TX Inter-packet gap -RW */
+#define E1000_TBT      0x00448  /* TX Burst Timer - RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define FEXTNVM_SW_CONFIG  0x0001
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_FLASH_UPDATES 1000
+#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030  /* FLASH control register */
+#define E1000_FLSWDATA 0x01034  /* FLASH data register */
+#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
+#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
+#define E1000_RDBAL    0x02800  /* RX Descriptor Base Address Low - RW */
+#define E1000_RDBAH    0x02804  /* RX Descriptor Base Address High - RW */
+#define E1000_RDLEN    0x02808  /* RX Descriptor Length - RW */
+#define E1000_RDH      0x02810  /* RX Descriptor Head - RW */
+#define E1000_RDT      0x02818  /* RX Descriptor Tail - RW */
+#define E1000_RDTR     0x02820  /* RX Delay Timer - RW */
+#define E1000_RDBAL0   E1000_RDBAL /* RX Desc Base Address Low (0) - RW */
+#define E1000_RDBAH0   E1000_RDBAH /* RX Desc Base Address High (0) - RW */
+#define E1000_RDLEN0   E1000_RDLEN /* RX Desc Length (0) - RW */
+#define E1000_RDH0     E1000_RDH   /* RX Desc Head (0) - RW */
+#define E1000_RDT0     E1000_RDT   /* RX Desc Tail (0) - RW */
+#define E1000_RDTR0    E1000_RDTR  /* RX Delay Timer (0) - RW */
+#define E1000_RXDCTL   0x02828  /* RX Descriptor Control queue 0 - RW */
+#define E1000_RXDCTL1  0x02928  /* RX Descriptor Control queue 1 - RW */
+#define E1000_RADV     0x0282C  /* RX Interrupt Absolute Delay Timer - RW */
+#define E1000_RSRPD    0x02C00  /* RX Small Packet Detect - RW */
+#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000  /* TX DMA Control - RW */
+#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
+#define E1000_TDFH     0x03410  /* TX Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428  /* TX Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430  /* TX Data FIFO Packet Count - RW */
+#define E1000_TDBAL    0x03800  /* TX Descriptor Base Address Low - RW */
+#define E1000_TDBAH    0x03804  /* TX Descriptor Base Address High - RW */
+#define E1000_TDLEN    0x03808  /* TX Descriptor Length - RW */
+#define E1000_TDH      0x03810  /* TX Descriptor Head - RW */
+#define E1000_TDT      0x03818  /* TX Descripotr Tail - RW */
+#define E1000_TIDV     0x03820  /* TX Interrupt Delay Value - RW */
+#define E1000_TXDCTL   0x03828  /* TX Descriptor Control - RW */
+#define E1000_TADV     0x0382C  /* TX Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_TARC0    0x03840  /* TX Arbitration Count (0) */
+#define E1000_TDBAL1   0x03900  /* TX Desc Base Address Low (1) - RW */
+#define E1000_TDBAH1   0x03904  /* TX Desc Base Address High (1) - RW */
+#define E1000_TDLEN1   0x03908  /* TX Desc Length (1) - RW */
+#define E1000_TDH1     0x03910  /* TX Desc Head (1) - RW */
+#define E1000_TDT1     0x03918  /* TX Desc Tail (1) - RW */
+#define E1000_TXDCTL1  0x03928  /* TX Descriptor Control (1) - RW */
+#define E1000_TARC1    0x03940  /* TX Arbitration Count (1) */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* TX-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON RX Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON TX Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF TX Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets RX Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets TX Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* RX No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* RX Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* RX Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* RX Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets TX Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets RX Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets RX High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets TX Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets TX High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets RX - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets TX - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+#define E1000_RXCSUM   0x05000  /* RX Checksum Control - RW */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
+#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800  /* Host Interface */
+#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KUMCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA     0x0003C  /* PHY address - RW */
+#define E1000_MANC2H     0x05860  /* Managment Control To Host - RW */
+#define E1000_SW_FW_SYNC 0x05B5C /* Software-Firmware Synchronization - RW */
+
+#define E1000_GCR       0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
+#define E1000_HICR      0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_RETA      0x05C00 /* Redirection Table - RW Array */
+#define E1000_RSSRK     0x05C80 /* RSS Random Key - RW Array */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+/* Register Set (82542)
+ *
+ * Some of the 82542 registers are located at different offsets than they are
+ * in more current versions of the 8254x. Despite the difference in location,
+ * the registers function in the same manner.
+ */
+#define E1000_82542_CTRL     E1000_CTRL
+#define E1000_82542_CTRL_DUP E1000_CTRL_DUP
+#define E1000_82542_STATUS   E1000_STATUS
+#define E1000_82542_EECD     E1000_EECD
+#define E1000_82542_EERD     E1000_EERD
+#define E1000_82542_CTRL_EXT E1000_CTRL_EXT
+#define E1000_82542_FLA      E1000_FLA
+#define E1000_82542_MDIC     E1000_MDIC
+#define E1000_82542_SCTL     E1000_SCTL
+#define E1000_82542_FEXTNVM  E1000_FEXTNVM
+#define E1000_82542_FCAL     E1000_FCAL
+#define E1000_82542_FCAH     E1000_FCAH
+#define E1000_82542_FCT      E1000_FCT
+#define E1000_82542_VET      E1000_VET
+#define E1000_82542_RA       0x00040
+#define E1000_82542_ICR      E1000_ICR
+#define E1000_82542_ITR      E1000_ITR
+#define E1000_82542_ICS      E1000_ICS
+#define E1000_82542_IMS      E1000_IMS
+#define E1000_82542_IMC      E1000_IMC
+#define E1000_82542_RCTL     E1000_RCTL
+#define E1000_82542_RDTR     0x00108
+#define E1000_82542_RDBAL    0x00110
+#define E1000_82542_RDBAH    0x00114
+#define E1000_82542_RDLEN    0x00118
+#define E1000_82542_RDH      0x00120
+#define E1000_82542_RDT      0x00128
+#define E1000_82542_RDTR0    E1000_82542_RDTR
+#define E1000_82542_RDBAL0   E1000_82542_RDBAL
+#define E1000_82542_RDBAH0   E1000_82542_RDBAH
+#define E1000_82542_RDLEN0   E1000_82542_RDLEN
+#define E1000_82542_RDH0     E1000_82542_RDH
+#define E1000_82542_RDT0     E1000_82542_RDT
+#define E1000_82542_SRRCTL(_n) (0x280C + ((_n) << 8)) /* Split and Replication
+                                                       * RX Control - RW */
+#define E1000_82542_DCA_RXCTRL(_n) (0x02814 + ((_n) << 8))
+#define E1000_82542_RDBAH3   0x02B04 /* RX Desc Base High Queue 3 - RW */
+#define E1000_82542_RDBAL3   0x02B00 /* RX Desc Low Queue 3 - RW */
+#define E1000_82542_RDLEN3   0x02B08 /* RX Desc Length Queue 3 - RW */
+#define E1000_82542_RDH3     0x02B10 /* RX Desc Head Queue 3 - RW */
+#define E1000_82542_RDT3     0x02B18 /* RX Desc Tail Queue 3 - RW */
+#define E1000_82542_RDBAL2   0x02A00 /* RX Desc Base Low Queue 2 - RW */
+#define E1000_82542_RDBAH2   0x02A04 /* RX Desc Base High Queue 2 - RW */
+#define E1000_82542_RDLEN2   0x02A08 /* RX Desc Length Queue 2 - RW */
+#define E1000_82542_RDH2     0x02A10 /* RX Desc Head Queue 2 - RW */
+#define E1000_82542_RDT2     0x02A18 /* RX Desc Tail Queue 2 - RW */
+#define E1000_82542_RDTR1    0x00130
+#define E1000_82542_RDBAL1   0x00138
+#define E1000_82542_RDBAH1   0x0013C
+#define E1000_82542_RDLEN1   0x00140
+#define E1000_82542_RDH1     0x00148
+#define E1000_82542_RDT1     0x00150
+#define E1000_82542_FCRTH    0x00160
+#define E1000_82542_FCRTL    0x00168
+#define E1000_82542_FCTTV    E1000_FCTTV
+#define E1000_82542_TXCW     E1000_TXCW
+#define E1000_82542_RXCW     E1000_RXCW
+#define E1000_82542_MTA      0x00200
+#define E1000_82542_TCTL     E1000_TCTL
+#define E1000_82542_TCTL_EXT E1000_TCTL_EXT
+#define E1000_82542_TIPG     E1000_TIPG
+#define E1000_82542_TDBAL    0x00420
+#define E1000_82542_TDBAH    0x00424
+#define E1000_82542_TDLEN    0x00428
+#define E1000_82542_TDH      0x00430
+#define E1000_82542_TDT      0x00438
+#define E1000_82542_TIDV     0x00440
+#define E1000_82542_TBT      E1000_TBT
+#define E1000_82542_AIT      E1000_AIT
+#define E1000_82542_VFTA     0x00600
+#define E1000_82542_LEDCTL   E1000_LEDCTL
+#define E1000_82542_PBA      E1000_PBA
+#define E1000_82542_PBS      E1000_PBS
+#define E1000_82542_EEMNGCTL E1000_EEMNGCTL
+#define E1000_82542_EEARBC   E1000_EEARBC
+#define E1000_82542_FLASHT   E1000_FLASHT
+#define E1000_82542_EEWR     E1000_EEWR
+#define E1000_82542_FLSWCTL  E1000_FLSWCTL
+#define E1000_82542_FLSWDATA E1000_FLSWDATA
+#define E1000_82542_FLSWCNT  E1000_FLSWCNT
+#define E1000_82542_FLOP     E1000_FLOP
+#define E1000_82542_EXTCNF_CTRL  E1000_EXTCNF_CTRL
+#define E1000_82542_EXTCNF_SIZE  E1000_EXTCNF_SIZE
+#define E1000_82542_PHY_CTRL E1000_PHY_CTRL
+#define E1000_82542_ERT      E1000_ERT
+#define E1000_82542_RXDCTL   E1000_RXDCTL
+#define E1000_82542_RXDCTL1  E1000_RXDCTL1
+#define E1000_82542_RADV     E1000_RADV
+#define E1000_82542_RSRPD    E1000_RSRPD
+#define E1000_82542_TXDMAC   E1000_TXDMAC
+#define E1000_82542_KABGTXD  E1000_KABGTXD
+#define E1000_82542_TDFHS    E1000_TDFHS
+#define E1000_82542_TDFTS    E1000_TDFTS
+#define E1000_82542_TDFPC    E1000_TDFPC
+#define E1000_82542_TXDCTL   E1000_TXDCTL
+#define E1000_82542_TADV     E1000_TADV
+#define E1000_82542_TSPMT    E1000_TSPMT
+#define E1000_82542_CRCERRS  E1000_CRCERRS
+#define E1000_82542_ALGNERRC E1000_ALGNERRC
+#define E1000_82542_SYMERRS  E1000_SYMERRS
+#define E1000_82542_RXERRC   E1000_RXERRC
+#define E1000_82542_MPC      E1000_MPC
+#define E1000_82542_SCC      E1000_SCC
+#define E1000_82542_ECOL     E1000_ECOL
+#define E1000_82542_MCC      E1000_MCC
+#define E1000_82542_LATECOL  E1000_LATECOL
+#define E1000_82542_COLC     E1000_COLC
+#define E1000_82542_DC       E1000_DC
+#define E1000_82542_TNCRS    E1000_TNCRS
+#define E1000_82542_SEC      E1000_SEC
+#define E1000_82542_CEXTERR  E1000_CEXTERR
+#define E1000_82542_RLEC     E1000_RLEC
+#define E1000_82542_XONRXC   E1000_XONRXC
+#define E1000_82542_XONTXC   E1000_XONTXC
+#define E1000_82542_XOFFRXC  E1000_XOFFRXC
+#define E1000_82542_XOFFTXC  E1000_XOFFTXC
+#define E1000_82542_FCRUC    E1000_FCRUC
+#define E1000_82542_PRC64    E1000_PRC64
+#define E1000_82542_PRC127   E1000_PRC127
+#define E1000_82542_PRC255   E1000_PRC255
+#define E1000_82542_PRC511   E1000_PRC511
+#define E1000_82542_PRC1023  E1000_PRC1023
+#define E1000_82542_PRC1522  E1000_PRC1522
+#define E1000_82542_GPRC     E1000_GPRC
+#define E1000_82542_BPRC     E1000_BPRC
+#define E1000_82542_MPRC     E1000_MPRC
+#define E1000_82542_GPTC     E1000_GPTC
+#define E1000_82542_GORCL    E1000_GORCL
+#define E1000_82542_GORCH    E1000_GORCH
+#define E1000_82542_GOTCL    E1000_GOTCL
+#define E1000_82542_GOTCH    E1000_GOTCH
+#define E1000_82542_RNBC     E1000_RNBC
+#define E1000_82542_RUC      E1000_RUC
+#define E1000_82542_RFC      E1000_RFC
+#define E1000_82542_ROC      E1000_ROC
+#define E1000_82542_RJC      E1000_RJC
+#define E1000_82542_MGTPRC   E1000_MGTPRC
+#define E1000_82542_MGTPDC   E1000_MGTPDC
+#define E1000_82542_MGTPTC   E1000_MGTPTC
+#define E1000_82542_TORL     E1000_TORL
+#define E1000_82542_TORH     E1000_TORH
+#define E1000_82542_TOTL     E1000_TOTL
+#define E1000_82542_TOTH     E1000_TOTH
+#define E1000_82542_TPR      E1000_TPR
+#define E1000_82542_TPT      E1000_TPT
+#define E1000_82542_PTC64    E1000_PTC64
+#define E1000_82542_PTC127   E1000_PTC127
+#define E1000_82542_PTC255   E1000_PTC255
+#define E1000_82542_PTC511   E1000_PTC511
+#define E1000_82542_PTC1023  E1000_PTC1023
+#define E1000_82542_PTC1522  E1000_PTC1522
+#define E1000_82542_MPTC     E1000_MPTC
+#define E1000_82542_BPTC     E1000_BPTC
+#define E1000_82542_TSCTC    E1000_TSCTC
+#define E1000_82542_TSCTFC   E1000_TSCTFC
+#define E1000_82542_RXCSUM   E1000_RXCSUM
+#define E1000_82542_WUC      E1000_WUC
+#define E1000_82542_WUFC     E1000_WUFC
+#define E1000_82542_WUS      E1000_WUS
+#define E1000_82542_MANC     E1000_MANC
+#define E1000_82542_IPAV     E1000_IPAV
+#define E1000_82542_IP4AT    E1000_IP4AT
+#define E1000_82542_IP6AT    E1000_IP6AT
+#define E1000_82542_WUPL     E1000_WUPL
+#define E1000_82542_WUPM     E1000_WUPM
+#define E1000_82542_FFLT     E1000_FFLT
+#define E1000_82542_TDFH     0x08010
+#define E1000_82542_TDFT     0x08018
+#define E1000_82542_FFMT     E1000_FFMT
+#define E1000_82542_FFVT     E1000_FFVT
+#define E1000_82542_HOST_IF  E1000_HOST_IF
+#define E1000_82542_IAM         E1000_IAM
+#define E1000_82542_EEMNGCTL    E1000_EEMNGCTL
+#define E1000_82542_PSRCTL      E1000_PSRCTL
+#define E1000_82542_RAID        E1000_RAID
+#define E1000_82542_TARC0       E1000_TARC0
+#define E1000_82542_TDBAL1      E1000_TDBAL1
+#define E1000_82542_TDBAH1      E1000_TDBAH1
+#define E1000_82542_TDLEN1      E1000_TDLEN1
+#define E1000_82542_TDH1        E1000_TDH1
+#define E1000_82542_TDT1        E1000_TDT1
+#define E1000_82542_TXDCTL1     E1000_TXDCTL1
+#define E1000_82542_TARC1       E1000_TARC1
+#define E1000_82542_RFCTL       E1000_RFCTL
+#define E1000_82542_GCR         E1000_GCR
+#define E1000_82542_GSCL_1      E1000_GSCL_1
+#define E1000_82542_GSCL_2      E1000_GSCL_2
+#define E1000_82542_GSCL_3      E1000_GSCL_3
+#define E1000_82542_GSCL_4      E1000_GSCL_4
+#define E1000_82542_FACTPS      E1000_FACTPS
+#define E1000_82542_SWSM        E1000_SWSM
+#define E1000_82542_FWSM        E1000_FWSM
+#define E1000_82542_FFLT_DBG    E1000_FFLT_DBG
+#define E1000_82542_IAC         E1000_IAC
+#define E1000_82542_ICRXPTC     E1000_ICRXPTC
+#define E1000_82542_ICRXATC     E1000_ICRXATC
+#define E1000_82542_ICTXPTC     E1000_ICTXPTC
+#define E1000_82542_ICTXATC     E1000_ICTXATC
+#define E1000_82542_ICTXQEC     E1000_ICTXQEC
+#define E1000_82542_ICTXQMTC    E1000_ICTXQMTC
+#define E1000_82542_ICRXDMTC    E1000_ICRXDMTC
+#define E1000_82542_ICRXOC      E1000_ICRXOC
+#define E1000_82542_HICR        E1000_HICR
+
+#define E1000_82542_CPUVEC      E1000_CPUVEC
+#define E1000_82542_MRQC        E1000_MRQC
+#define E1000_82542_RETA        E1000_RETA
+#define E1000_82542_RSSRK       E1000_RSSRK
+#define E1000_82542_RSSIM       E1000_RSSIM
+#define E1000_82542_RSSIR       E1000_RSSIR
+#define E1000_82542_KUMCTRLSTA E1000_KUMCTRLSTA
+#define E1000_82542_SW_FW_SYNC E1000_SW_FW_SYNC
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+    uint64_t crcerrs;
+    uint64_t algnerrc;
+    uint64_t symerrs;
+    uint64_t rxerrc;
+    uint64_t mpc;
+    uint64_t scc;
+    uint64_t ecol;
+    uint64_t mcc;
+    uint64_t latecol;
+    uint64_t colc;
+    uint64_t dc;
+    uint64_t tncrs;
+    uint64_t sec;
+    uint64_t cexterr;
+    uint64_t rlec;
+    uint64_t xonrxc;
+    uint64_t xontxc;
+    uint64_t xoffrxc;
+    uint64_t xofftxc;
+    uint64_t fcruc;
+    uint64_t prc64;
+    uint64_t prc127;
+    uint64_t prc255;
+    uint64_t prc511;
+    uint64_t prc1023;
+    uint64_t prc1522;
+    uint64_t gprc;
+    uint64_t bprc;
+    uint64_t mprc;
+    uint64_t gptc;
+    uint64_t gorcl;
+    uint64_t gorch;
+    uint64_t gotcl;
+    uint64_t gotch;
+    uint64_t rnbc;
+    uint64_t ruc;
+    uint64_t rfc;
+    uint64_t roc;
+    uint64_t rjc;
+    uint64_t mgprc;
+    uint64_t mgpdc;
+    uint64_t mgptc;
+    uint64_t torl;
+    uint64_t torh;
+    uint64_t totl;
+    uint64_t toth;
+    uint64_t tpr;
+    uint64_t tpt;
+    uint64_t ptc64;
+    uint64_t ptc127;
+    uint64_t ptc255;
+    uint64_t ptc511;
+    uint64_t ptc1023;
+    uint64_t ptc1522;
+    uint64_t mptc;
+    uint64_t bptc;
+    uint64_t tsctc;
+    uint64_t tsctfc;
+    uint64_t iac;
+    uint64_t icrxptc;
+    uint64_t icrxatc;
+    uint64_t ictxptc;
+    uint64_t ictxatc;
+    uint64_t ictxqec;
+    uint64_t ictxqmtc;
+    uint64_t icrxdmtc;
+    uint64_t icrxoc;
+};
+
+/* Structure containing variables used by the shared code (e1000_hw.c) */
+struct e1000_hw {
+    uint8_t *hw_addr;
+    uint8_t *flash_address;
+    e1000_mac_type mac_type;
+    e1000_phy_type phy_type;
+    uint32_t phy_init_script;
+    e1000_media_type media_type;
+    void *back;
+    struct e1000_shadow_ram *eeprom_shadow_ram;
+    uint32_t flash_bank_size;
+    uint32_t flash_base_addr;
+    e1000_fc_type fc;
+    e1000_bus_speed bus_speed;
+    e1000_bus_width bus_width;
+    e1000_bus_type bus_type;
+    struct e1000_eeprom_info eeprom;
+    e1000_ms_type master_slave;
+    e1000_ms_type original_master_slave;
+    e1000_ffe_config ffe_config_state;
+    uint32_t asf_firmware_present;
+    uint32_t eeprom_semaphore_present;
+    uint32_t swfw_sync_present;
+    uint32_t swfwhw_semaphore_present;
+    unsigned long io_base;
+    uint32_t phy_id;
+    uint32_t phy_revision;
+    uint32_t phy_addr;
+    uint32_t original_fc;
+    uint32_t txcw;
+    uint32_t autoneg_failed;
+    uint32_t max_frame_size;
+    uint32_t min_frame_size;
+    uint32_t mc_filter_type;
+    uint32_t num_mc_addrs;
+    uint32_t collision_delta;
+    uint32_t tx_packet_delta;
+    uint32_t ledctl_default;
+    uint32_t ledctl_mode1;
+    uint32_t ledctl_mode2;
+    boolean_t tx_pkt_filtering;
+    struct e1000_host_mng_dhcp_cookie mng_cookie;
+    uint16_t phy_spd_default;
+    uint16_t autoneg_advertised;
+    uint16_t pci_cmd_word;
+    uint16_t fc_high_water;
+    uint16_t fc_low_water;
+    uint16_t fc_pause_time;
+    uint16_t current_ifs_val;
+    uint16_t ifs_min_val;
+    uint16_t ifs_max_val;
+    uint16_t ifs_step_size;
+    uint16_t ifs_ratio;
+    uint16_t device_id;
+    uint16_t vendor_id;
+    uint16_t subsystem_id;
+    uint16_t subsystem_vendor_id;
+    uint8_t revision_id;
+    uint8_t autoneg;
+    uint8_t mdix;
+    uint8_t forced_speed_duplex;
+    uint8_t wait_autoneg_complete;
+    uint8_t dma_fairness;
+    uint8_t mac_addr[NODE_ADDRESS_SIZE];
+    uint8_t perm_mac_addr[NODE_ADDRESS_SIZE];
+    boolean_t disable_polarity_correction;
+    boolean_t speed_downgraded;
+    e1000_smart_speed smart_speed;
+    e1000_dsp_config dsp_config_state;
+    boolean_t get_link_status;
+    boolean_t serdes_link_down;
+    boolean_t tbi_compatibility_en;
+    boolean_t tbi_compatibility_on;
+    boolean_t laa_is_present;
+    boolean_t phy_reset_disable;
+    boolean_t fc_send_xon;
+    boolean_t fc_strict_ieee;
+    boolean_t report_tx_early;
+    boolean_t adaptive_ifs;
+    boolean_t ifs_params_forced;
+    boolean_t in_ifs_mode;
+    boolean_t mng_reg_access_disabled;
+    boolean_t leave_av_bit_off;
+    boolean_t kmrn_lock_loss_workaround_disabled;
+};
+
+
+#define E1000_EEPROM_SWDPIN0   0x0001   /* SWDPIN 0 EEPROM Value */
+#define E1000_EEPROM_LED_LOGIC 0x0020   /* Led Logic Word */
+#define E1000_EEPROM_RW_REG_DATA   16   /* Offset to data in EEPROM read/write registers */
+#define E1000_EEPROM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_EEPROM_RW_REG_START  1    /* First bit for telling part to start operation */
+#define E1000_EEPROM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_EEPROM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_EEPROM_POLL_READ     0    /* Flag for polling for read complete */
+/* Register Bit Masks */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000  /* Initiate an interrupt to manageability engine */
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion
+                                                   by EEPROM/Flash */
+#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8       0x04000000
+#define E1000_STATUS_FUSE_9       0x08000000
+#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed  50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed  66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+/* EEPROM/Flash Control */
+#define E1000_EECD_SK        0x00000001 /* EEPROM Clock */
+#define E1000_EECD_CS        0x00000002 /* EEPROM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* EEPROM Data In */
+#define E1000_EECD_DO        0x00000008 /* EEPROM Data Out */
+#define E1000_EECD_FWE_MASK  0x00000030
+#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ       0x00000040 /* EEPROM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* EEPROM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* EEPROM Present */
+#define E1000_EECD_SIZE      0x00000200 /* EEPROM Size (0=64 word 1=256 word) */
+#define E1000_EECD_ADDR_BITS 0x00000400 /* EEPROM Addressing bits based on type
+                                         * (0-small, 1-large) */
+#define E1000_EECD_TYPE      0x00002000 /* EEPROM Type (1-SPI, 0-Microwire) */
+#ifndef E1000_EEPROM_GRANT_ATTEMPTS
+#define E1000_EEPROM_GRANT_ATTEMPTS 1000 /* EEPROM # attempts to gain grant */
+#endif
+#define E1000_EECD_AUTO_RD          0x00000200  /* EEPROM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* EEprom Size */
+#define E1000_EECD_SIZE_EX_SHIFT    11
+#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT      22
+#define E1000_STM_OPCODE     0xDB00
+#define E1000_HICR_FW_RESET  0xC0
+
+#define E1000_SHADOW_RAM_WORDS     2048
+#define E1000_ICH8_NVM_SIG_WORD    0x13
+#define E1000_ICH8_NVM_SIG_MASK    0xC0
+
+/* EEPROM Read */
+#define E1000_EERD_START      0x00000001 /* Start Read */
+#define E1000_EERD_DONE       0x00000010 /* Read Done */
+#define E1000_EERD_ADDR_SHIFT 8
+#define E1000_EERD_ADDR_MASK  0x0000FF00 /* Read Address */
+#define E1000_EERD_DATA_SHIFT 16
+#define E1000_EERD_DATA_MASK  0xFFFF0000 /* Read Data */
+
+/* SPI EEPROM Status Register */
+#define EEPROM_STATUS_RDY_SPI  0x01
+#define EEPROM_STATUS_WEN_SPI  0x02
+#define EEPROM_STATUS_BP0_SPI  0x04
+#define EEPROM_STATUS_BP1_SPI  0x08
+#define EEPROM_STATUS_WPEN_SPI 0x80
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR  0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN    0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000  /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME           0x08000000  /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000  /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+
+#define E1000_KUMCTRLSTA_MASK           0x0000FFFF
+#define E1000_KUMCTRLSTA_OFFSET         0x001F0000
+#define E1000_KUMCTRLSTA_OFFSET_SHIFT   16
+#define E1000_KUMCTRLSTA_REN            0x00200000
+
+#define E1000_KUMCTRLSTA_OFFSET_FIFO_CTRL      0x00000000
+#define E1000_KUMCTRLSTA_OFFSET_CTRL           0x00000001
+#define E1000_KUMCTRLSTA_OFFSET_INB_CTRL       0x00000002
+#define E1000_KUMCTRLSTA_OFFSET_DIAG           0x00000003
+#define E1000_KUMCTRLSTA_OFFSET_TIMEOUTS       0x00000004
+#define E1000_KUMCTRLSTA_OFFSET_INB_PARAM      0x00000009
+#define E1000_KUMCTRLSTA_OFFSET_HD_CTRL        0x00000010
+#define E1000_KUMCTRLSTA_OFFSET_M2P_SERDES     0x0000001E
+#define E1000_KUMCTRLSTA_OFFSET_M2P_MODES      0x0000001F
+
+/* FIFO Control */
+#define E1000_KUMCTRLSTA_FIFO_CTRL_RX_BYPASS   0x00000008
+#define E1000_KUMCTRLSTA_FIFO_CTRL_TX_BYPASS   0x00000800
+
+/* In-Band Control */
+#define E1000_KUMCTRLSTA_INB_CTRL_LINK_STATUS_TX_TIMEOUT_DEFAULT    0x00000500
+#define E1000_KUMCTRLSTA_INB_CTRL_DIS_PADDING  0x00000010
+
+/* Half-Duplex Control */
+#define E1000_KUMCTRLSTA_HD_CTRL_10_100_DEFAULT 0x00000004
+#define E1000_KUMCTRLSTA_HD_CTRL_1000_DEFAULT  0x00000000
+
+#define E1000_KUMCTRLSTA_OFFSET_K0S_CTRL       0x0000001E
+
+#define E1000_KUMCTRLSTA_DIAG_FELPBK           0x2000
+#define E1000_KUMCTRLSTA_DIAG_NELPBK           0x1000
+
+#define E1000_KUMCTRLSTA_K0S_100_EN            0x2000
+#define E1000_KUMCTRLSTA_K0S_GBE_EN            0x1000
+#define E1000_KUMCTRLSTA_K0S_ENTRY_LATENCY_MASK   0x0003
+
+#define E1000_KABGTXD_BGSQLBIAS                0x00050000
+
+#define E1000_PHY_CTRL_SPD_EN                  0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU                0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU             0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE      0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE             0x00000040
+#define E1000_PHY_CTRL_B2B_EN                  0x00000080
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_BLINK_RATE      0x0000020
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT      8
+#define E1000_LEDCTL_LED1_BLINK_RATE      0x0002000
+#define E1000_LEDCTL_LED1_IVRT            0x00004000
+#define E1000_LEDCTL_LED1_BLINK           0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT      16
+#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
+#define E1000_LEDCTL_LED2_IVRT            0x00400000
+#define E1000_LEDCTL_LED2_BLINK           0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
+#define E1000_LEDCTL_LED3_IVRT            0x40000000
+#define E1000_LEDCTL_LED3_BLINK           0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_ACTIVITY      0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10       0x5
+#define E1000_LEDCTL_MODE_LINK_100      0x6
+#define E1000_LEDCTL_MODE_LINK_1000     0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
+#define E1000_LEDCTL_MODE_COLLISION     0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
+#define E1000_LEDCTL_MODE_PAUSED        0xD
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Receive Address */
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400 /* RX /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW       0x00008000
+#define E1000_ICR_SRPD          0x00010000
+#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR  0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST         0x00100000 /* ME handware reset occurs */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD      E1000_ICR_SRPD
+#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW       E1000_ICR_DSW
+#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
+#define E1000_ICS_EPRST     E1000_ICR_EPRST
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD      E1000_ICR_SRPD
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW       E1000_ICR_DSW
+#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMS_EPRST     E1000_ICR_EPRST
+
+/* Interrupt Mask Clear */
+#define E1000_IMC_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMC_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMC_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMC_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMC_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMC_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMC_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMC_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMC_RXCFG     E1000_ICR_RXCFG     /* RX /c/ ordered set */
+#define E1000_IMC_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMC_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMC_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMC_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMC_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMC_SRPD      E1000_ICR_SRPD
+#define E1000_IMC_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMC_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMC_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMC_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMC_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMC_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMC_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMC_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMC_DSW       E1000_ICR_DSW
+#define E1000_IMC_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMC_EPRST     E1000_ICR_EPRST
+
+/* Receive Control */
+#define E1000_RCTL_RST            0x00000001    /* Software reset */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SW_W_SYNC definitions */
+#define E1000_SWFW_EEP_SM     0x0001
+#define E1000_SWFW_PHY0_SM    0x0002
+#define E1000_SWFW_PHY1_SM    0x0004
+#define E1000_SWFW_MAC_CSR_SM 0x0008
+
+/* Receive Descriptor */
+#define E1000_RDT_DELAY 0x0000ffff      /* Delay timer (1=1024us) */
+#define E1000_RDT_FPDB  0x80000000      /* Flush descriptor block */
+#define E1000_RDLEN_LEN 0x0007ff80      /* descriptor length */
+#define E1000_RDH_RDH   0x0000ffff      /* receive descriptor head */
+#define E1000_RDT_RDT   0x0000ffff      /* receive descriptor tail */
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS           0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_NFS_VER_MASK        0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT       8
+#define E1000_RFCTL_IPV6_DIS            0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_ACKD_DIS            0x00002000
+#define E1000_RFCTL_IPFRSP_DIS          0x00004000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+
+/* Receive Descriptor Control */
+#define E1000_RXDCTL_PTHRESH 0x0000003F /* RXDCTL Prefetch Threshold */
+#define E1000_RXDCTL_HTHRESH 0x00003F00 /* RXDCTL Host Threshold */
+#define E1000_RXDCTL_WTHRESH 0x003F0000 /* RXDCTL Writeback Threshold */
+#define E1000_RXDCTL_GRAN    0x01000000 /* RXDCTL Granularity */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x000000FF /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x0000FF00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x00FF0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000 /* Enable the counting of desc.
+                                              still to be processed. */
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW    0x0000ffff     /* RxConfigWord mask */
+#define E1000_RXCW_NC    0x04000000     /* Receive config no carrier */
+#define E1000_RXCW_IV    0x08000000     /* Receive config invalid */
+#define E1000_RXCW_CC    0x10000000     /* Receive config change */
+#define E1000_RXCW_C     0x20000000     /* Receive config */
+#define E1000_RXCW_SYNCH 0x40000000     /* Receive config synch */
+#define E1000_RXCW_ANC   0x80000000     /* Auto-neg complete */
+
+/* Transmit Control */
+#define E1000_TCTL_RST    0x00000001    /* software reset */
+#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+/* Extended Transmit Control */
+#define E1000_TCTL_EXT_BST_MASK  0x000003FF /* Backoff Slot Time */
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+
+#define DEFAULT_80003ES2LAN_TCTL_EXT_GCEX   0x00010000
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Multiple Receive Queue Control */
+#define E1000_MRQC_ENABLE_MASK              0x00000003
+#define E1000_MRQC_ENABLE_RSS_2Q            0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT           0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK           0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP       0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4           0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX    0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX        0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6           0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP       0x00200000
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO      0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS 0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET 16       /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC 0x00000001 /* Link Status Changed */
+#define E1000_WUS_MAG  0x00000002 /* Magic Packet Received */
+#define E1000_WUS_EX   0x00000004 /* Directed Exact Received */
+#define E1000_WUS_MC   0x00000008 /* Directed Multicast Received */
+#define E1000_WUS_BC   0x00000010 /* Broadcast Received */
+#define E1000_WUS_ARP  0x00000020 /* ARP Request Packet Received */
+#define E1000_WUS_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Received */
+#define E1000_WUS_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Received */
+#define E1000_WUS_FLX0 0x00010000 /* Flexible Filter 0 Match */
+#define E1000_WUS_FLX1 0x00020000 /* Flexible Filter 1 Match */
+#define E1000_WUS_FLX2 0x00040000 /* Flexible Filter 2 Match */
+#define E1000_WUS_FLX3 0x00080000 /* Flexible Filter 3 Match */
+#define E1000_WUS_FLX_FILTERS 0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000 /* Enable Neighbor Discovery
+                                             * Filtering */
+#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000 /* Enable MAC address
+                                                    * filtering */
+#define E1000_MANC_EN_MNG2HOST   0x00200000 /* Enable MNG packets to host
+                                             * memory */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000 /* Enable IP address
+                                                    * filtering */
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN         0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+/* FW Semaphore Register */
+#define E1000_FWSM_MODE_MASK    0x0000000E /* FW mode */
+#define E1000_FWSM_MODE_SHIFT            1
+#define E1000_FWSM_FW_VALID     0x00008000 /* FW established a valid mode */
+
+#define E1000_FWSM_RSPCIPHY        0x00000040 /* Reset PHY on PCI reset */
+#define E1000_FWSM_DISSW           0x10000000 /* FW disable SW Write Access */
+#define E1000_FWSM_SKUSEL_MASK     0x60000000 /* LAN SKU select */
+#define E1000_FWSM_SKUEL_SHIFT     29
+#define E1000_FWSM_SKUSEL_EMB      0x0 /* Embedded SKU */
+#define E1000_FWSM_SKUSEL_CONS     0x1 /* Consumer SKU */
+#define E1000_FWSM_SKUSEL_PERF_100 0x2 /* Perf & Corp 10/100 SKU */
+#define E1000_FWSM_SKUSEL_PERF_GBE 0x3 /* Perf & Copr GbE SKU */
+
+/* FFLT Debug Register */
+#define E1000_FFLT_DBG_INVC     0x00100000 /* Invalid /C/ code handling */
+
+typedef enum {
+    e1000_mng_mode_none     = 0,
+    e1000_mng_mode_asf,
+    e1000_mng_mode_pt,
+    e1000_mng_mode_ipmi,
+    e1000_mng_mode_host_interface_only
+} e1000_mng_mode;
+
+/* Host Inteface Control Register */
+#define E1000_HICR_EN           0x00000001  /* Enable Bit - RO */
+#define E1000_HICR_C            0x00000002  /* Driver sets this bit when done
+                                             * to put command in RAM */
+#define E1000_HICR_SV           0x00000004  /* Status Validity */
+#define E1000_HICR_FWR          0x00000080  /* FW reset. Set by the Host */
+
+/* Host Interface Command Interface - Address range 0x8800-0x8EFF */
+#define E1000_HI_MAX_DATA_LENGTH         252 /* Host Interface data length */
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH  1792 /* Number of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH  448 /* Number of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT         500 /* Time in ms to process HI command */
+
+struct e1000_host_command_header {
+    uint8_t command_id;
+    uint8_t command_length;
+    uint8_t command_options;   /* I/F bits for command, status for return */
+    uint8_t checksum;
+};
+struct e1000_host_command_info {
+    struct e1000_host_command_header command_header;  /* Command Head/Command Result Head has 4 bytes */
+    uint8_t command_data[E1000_HI_MAX_DATA_LENGTH];   /* Command data can length 0..252 */
+};
+
+/* Host SMB register #0 */
+#define E1000_HSMC0R_CLKIN      0x00000001  /* SMB Clock in */
+#define E1000_HSMC0R_DATAIN     0x00000002  /* SMB Data in */
+#define E1000_HSMC0R_DATAOUT    0x00000004  /* SMB Data out */
+#define E1000_HSMC0R_CLKOUT     0x00000008  /* SMB Clock out */
+
+/* Host SMB register #1 */
+#define E1000_HSMC1R_CLKIN      E1000_HSMC0R_CLKIN
+#define E1000_HSMC1R_DATAIN     E1000_HSMC0R_DATAIN
+#define E1000_HSMC1R_DATAOUT    E1000_HSMC0R_DATAOUT
+#define E1000_HSMC1R_CLKOUT     E1000_HSMC0R_CLKOUT
+
+/* FW Status Register */
+#define E1000_FWSTS_FWS_MASK    0x000000FF  /* FW Status */
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+
+#define E1000_MDALIGN          4096
+
+/* PCI-Ex registers*/
+
+/* PCI-Ex Control Register */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+
+#define PCI_EX_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+                             E1000_GCR_RXDSCW_NO_SNOOP      | \
+                             E1000_GCR_RXDSCR_NO_SNOOP      | \
+                             E1000_GCR_TXD_NO_SNOOP         | \
+                             E1000_GCR_TXDSCW_NO_SNOOP      | \
+                             E1000_GCR_TXDSCR_NO_SNOOP)
+
+#define PCI_EX_82566_SNOOP_ALL PCI_EX_NO_SNOOP_ALL
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+/* Function Active and Power State to MNG */
+#define E1000_FACTPS_FUNC0_POWER_STATE_MASK         0x00000003
+#define E1000_FACTPS_LAN0_VALID                     0x00000004
+#define E1000_FACTPS_FUNC0_AUX_EN                   0x00000008
+#define E1000_FACTPS_FUNC1_POWER_STATE_MASK         0x000000C0
+#define E1000_FACTPS_FUNC1_POWER_STATE_SHIFT        6
+#define E1000_FACTPS_LAN1_VALID                     0x00000100
+#define E1000_FACTPS_FUNC1_AUX_EN                   0x00000200
+#define E1000_FACTPS_FUNC2_POWER_STATE_MASK         0x00003000
+#define E1000_FACTPS_FUNC2_POWER_STATE_SHIFT        12
+#define E1000_FACTPS_IDE_ENABLE                     0x00004000
+#define E1000_FACTPS_FUNC2_AUX_EN                   0x00008000
+#define E1000_FACTPS_FUNC3_POWER_STATE_MASK         0x000C0000
+#define E1000_FACTPS_FUNC3_POWER_STATE_SHIFT        18
+#define E1000_FACTPS_SP_ENABLE                      0x00100000
+#define E1000_FACTPS_FUNC3_AUX_EN                   0x00200000
+#define E1000_FACTPS_FUNC4_POWER_STATE_MASK         0x03000000
+#define E1000_FACTPS_FUNC4_POWER_STATE_SHIFT        24
+#define E1000_FACTPS_IPMI_ENABLE                    0x04000000
+#define E1000_FACTPS_FUNC4_AUX_EN                   0x08000000
+#define E1000_FACTPS_MNGCG                          0x20000000
+#define E1000_FACTPS_LAN_FUNC_SEL                   0x40000000
+#define E1000_FACTPS_PM_STATE_CHANGED               0x80000000
+
+/* EEPROM Commands - Microwire */
+#define EEPROM_READ_OPCODE_MICROWIRE  0x6  /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_MICROWIRE 0x5  /* EEPROM write opcode */
+#define EEPROM_ERASE_OPCODE_MICROWIRE 0x7  /* EEPROM erase opcode */
+#define EEPROM_EWEN_OPCODE_MICROWIRE  0x13 /* EEPROM erase/write enable */
+#define EEPROM_EWDS_OPCODE_MICROWIRE  0x10 /* EEPROM erast/write disable */
+
+/* EEPROM Commands - SPI */
+#define EEPROM_MAX_RETRY_SPI        5000 /* Max wait of 5ms, for RDY signal */
+#define EEPROM_READ_OPCODE_SPI      0x03  /* EEPROM read opcode */
+#define EEPROM_WRITE_OPCODE_SPI     0x02  /* EEPROM write opcode */
+#define EEPROM_A8_OPCODE_SPI        0x08  /* opcode bit-3 = address bit-8 */
+#define EEPROM_WREN_OPCODE_SPI      0x06  /* EEPROM set Write Enable latch */
+#define EEPROM_WRDI_OPCODE_SPI      0x04  /* EEPROM reset Write Enable latch */
+#define EEPROM_RDSR_OPCODE_SPI      0x05  /* EEPROM read Status register */
+#define EEPROM_WRSR_OPCODE_SPI      0x01  /* EEPROM write Status register */
+#define EEPROM_ERASE4K_OPCODE_SPI   0x20  /* EEPROM ERASE 4KB */
+#define EEPROM_ERASE64K_OPCODE_SPI  0xD8  /* EEPROM ERASE 64KB */
+#define EEPROM_ERASE256_OPCODE_SPI  0xDB  /* EEPROM ERASE 256B */
+
+/* EEPROM Size definitions */
+#define EEPROM_WORD_SIZE_SHIFT  6
+#define EEPROM_SIZE_SHIFT       10
+#define EEPROM_SIZE_MASK        0x1C00
+
+/* EEPROM Word Offsets */
+#define EEPROM_COMPAT                 0x0003
+#define EEPROM_ID_LED_SETTINGS        0x0004
+#define EEPROM_VERSION                0x0005
+#define EEPROM_SERDES_AMPLITUDE       0x0006 /* For SERDES output amplitude adjustment. */
+#define EEPROM_PHY_CLASS_WORD         0x0007
+#define EEPROM_INIT_CONTROL1_REG      0x000A
+#define EEPROM_INIT_CONTROL2_REG      0x000F
+#define EEPROM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define EEPROM_INIT_CONTROL3_PORT_B   0x0014
+#define EEPROM_INIT_3GIO_3            0x001A
+#define EEPROM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define EEPROM_INIT_CONTROL3_PORT_A   0x0024
+#define EEPROM_CFG                    0x0012
+#define EEPROM_FLASH_VERSION          0x0032
+#define EEPROM_CHECKSUM_REG           0x003F
+
+#define E1000_EEPROM_CFG_DONE         0x00040000   /* MNG config cycle done */
+#define E1000_EEPROM_CFG_DONE_PORT_1  0x00080000   /* ...for second port */
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_RESERVED_82573  0xF746
+#define ID_LED_DEFAULT_82573   0x1811
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2 << 12) | \
+                              (ID_LED_OFF1_OFF2 << 8) | \
+                              (ID_LED_DEF1_DEF2 << 4) | \
+                              (ID_LED_DEF1_DEF2))
+#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
+                                 (ID_LED_DEF1_OFF2 <<  8) | \
+                                 (ID_LED_DEF1_ON2  <<  4) | \
+                                 (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+
+/* Mask bits for SERDES amplitude adjustment in Word 6 of the EEPROM */
+#define EEPROM_SERDES_AMPLITUDE_MASK  0x000F
+
+/* Mask bit for PHY class in Word 7 of the EEPROM */
+#define EEPROM_PHY_CLASS_A   0x8000
+
+/* Mask bits for fields in Word 0x0a of the EEPROM */
+#define EEPROM_WORD0A_ILOS   0x0010
+#define EEPROM_WORD0A_SWDPIO 0x01E0
+#define EEPROM_WORD0A_LRST   0x0200
+#define EEPROM_WORD0A_FD     0x0400
+#define EEPROM_WORD0A_66MHZ  0x0800
+
+/* Mask bits for fields in Word 0x0f of the EEPROM */
+#define EEPROM_WORD0F_PAUSE_MASK 0x3000
+#define EEPROM_WORD0F_PAUSE      0x1000
+#define EEPROM_WORD0F_ASM_DIR    0x2000
+#define EEPROM_WORD0F_ANE        0x0800
+#define EEPROM_WORD0F_SWPDIO_EXT 0x00F0
+#define EEPROM_WORD0F_LPLU       0x0001
+
+/* Mask bits for fields in Word 0x10/0x20 of the EEPROM */
+#define EEPROM_WORD1020_GIGA_DISABLE         0x0010
+#define EEPROM_WORD1020_GIGA_DISABLE_NON_D0A 0x0008
+
+/* Mask bits for fields in Word 0x1a of the EEPROM */
+#define EEPROM_WORD1A_ASPM_MASK  0x000C
+
+/* For checksumming, the sum of all words in the EEPROM should equal 0xBABA. */
+#define EEPROM_SUM 0xBABA
+
+/* EEPROM Map defines (WORD OFFSETS)*/
+#define EEPROM_NODE_ADDRESS_BYTE_0 0
+#define EEPROM_PBA_BYTE_1          8
+
+#define EEPROM_RESERVED_WORD          0xFFFF
+
+/* EEPROM Map Sizes (Byte Counts) */
+#define PBA_SIZE 4
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+/* Collision distance is a 0-based value that applies to
+ * half-duplex-capable hardware only. */
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLLISION_DISTANCE_82542  64
+#define E1000_FDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
+#define E1000_HDX_COLLISION_DISTANCE    E1000_COLLISION_DISTANCE
+#define E1000_COLD_SHIFT                12
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT        10
+#define DEFAULT_82543_TIPG_IPGT_FIBER  9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_10_100 0x00000009
+#define DEFAULT_80003ES2LAN_TIPG_IPGT_1000   0x00000008
+#define E1000_TXDMAC_DPP 0x00000001
+
+/* Adaptive IFS defines */
+#define TX_THRESHOLD_START     8
+#define TX_THRESHOLD_INCREMENT 10
+#define TX_THRESHOLD_DECREMENT 1
+#define TX_THRESHOLD_STOP      190
+#define TX_THRESHOLD_DISABLE   0
+#define TX_THRESHOLD_TIMER_MS  10000
+#define MIN_NUM_XMITS          1000
+#define IFS_MAX                80
+#define IFS_STEP               10
+#define IFS_MIN                40
+#define IFS_RATIO              4
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_PCIE_WRITE_ENABLE 0x00000001
+#define E1000_EXTCNF_CTRL_PHY_WRITE_ENABLE  0x00000002
+#define E1000_EXTCNF_CTRL_D_UD_ENABLE       0x00000004
+#define E1000_EXTCNF_CTRL_D_UD_LATENCY      0x00000008
+#define E1000_EXTCNF_CTRL_D_UD_OWNER        0x00000010
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP 0x00000020
+#define E1000_EXTCNF_CTRL_MDIO_HW_OWNERSHIP 0x00000040
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER   0x0FFF0000
+
+#define E1000_EXTCNF_SIZE_EXT_PHY_LENGTH    0x000000FF
+#define E1000_EXTCNF_SIZE_EXT_DOCK_LENGTH   0x0000FF00
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH   0x00FF0000
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE  0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG            0x00000020
+
+/* PBA constants */
+#define E1000_PBA_8K 0x0008    /* 8KB, default Rx allocation */
+#define E1000_PBA_12K 0x000C    /* 12KB, default Rx allocation */
+#define E1000_PBA_16K 0x0010    /* 16KB, default TX allocation */
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030    /* 48KB, default RX allocation */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* The historical defaults for the flow control values are given below. */
+#define FC_DEFAULT_HI_THRESH        (0x8000)    /* 32KB */
+#define FC_DEFAULT_LO_THRESH        (0x4000)    /* 16KB */
+#define FC_DEFAULT_TX_TIMER         (0x100)     /* ~130 us */
+
+/* PCIX Config space */
+#define PCIX_COMMAND_REGISTER    0xE6
+#define PCIX_STATUS_REGISTER_LO  0xE8
+#define PCIX_STATUS_REGISTER_HI  0xEA
+
+#define PCIX_COMMAND_MMRBC_MASK      0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT     0x2
+#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
+#define PCIX_STATUS_HI_MMRBC_4K      0x3
+#define PCIX_STATUS_HI_MMRBC_2K      0x2
+
+
+/* Number of bits required to shift right the "pause" bits from the
+ * EEPROM (bits 13:12) to the "pause" (bits 8:7) field in the TXCW register.
+ */
+#define PAUSE_SHIFT 5
+
+/* Number of bits required to shift left the "SWDPIO" bits from the
+ * EEPROM (bits 8:5) to the "SWDPIO" (bits 25:22) field in the CTRL register.
+ */
+#define SWDPIO_SHIFT 17
+
+/* Number of bits required to shift left the "SWDPIO_EXT" bits from the
+ * EEPROM word F (bits 7:4) to the bits 11:8 of The Extended CTRL register.
+ */
+#define SWDPIO__EXT_SHIFT 4
+
+/* Number of bits required to shift left the "ILOS" bit from the EEPROM
+ * (bit 4) to the "ILOS" (bit 7) field in the CTRL register.
+ */
+#define ILOS_SHIFT  3
+
+
+#define RECEIVE_BUFFER_ALIGN_SIZE  (256)
+
+/* Number of milliseconds we wait for auto-negotiation to complete */
+#define LINK_UP_TIMEOUT             500
+
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for Eeprom auto read bit done after MAC reset */
+#define AUTO_READ_DONE_TIMEOUT      10
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+
+#define E1000_TX_BUFFER_SIZE ((uint32_t)1514)
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION   0x0F
+
+/* TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ *      adapter = a pointer to struct e1000_hw
+ *      status = the 8 bit status field of the RX descriptor with EOP set
+ *      error = the 8 bit error field of the RX descriptor with EOP set
+ *      length = the sum of all the length fields of the RX descriptors that
+ *               make up the current frame
+ *      last_byte = the last byte of the frame DMAed by the hardware
+ *      max_frame_length = the maximum frame length we want to accept.
+ *      min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ *  ...
+ *  if (TBI_ACCEPT) {
+ *      accept_frame = TRUE;
+ *      e1000_tbi_adjust_stats(adapter, MacAddress);
+ *      frame_length--;
+ *  } else {
+ *      accept_frame = FALSE;
+ *  }
+ *  ...
+ */
+
+#define TBI_ACCEPT(adapter, status, errors, length, last_byte) \
+    ((adapter)->tbi_compatibility_on && \
+     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+     ((last_byte) == CARRIER_EXTENSION) && \
+     (((status) & E1000_RXD_STAT_VP) ? \
+          (((length) > ((adapter)->min_frame_size - VLAN_TAG_SIZE)) && \
+           ((length) <= ((adapter)->max_frame_size + 1))) : \
+          (((length) > (adapter)->min_frame_size) && \
+           ((length) <= ((adapter)->max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+
+/* Structures, enums, and macros for the PHY */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CTRL         0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Regiser */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page TX */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+#define MAX_PHY_REG_ADDRESS        0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG     0xF   /* Registers equal on all pages */
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+
+#define IGP01E1000_IEEE_REGS_PAGE  0x0000
+#define IGP01E1000_IEEE_RESTART_AUTONEG 0x3300
+#define IGP01E1000_IEEE_FORCE_GIGA      0x0140
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG 0x10 /* PHY Specific Port Config Register */
+#define IGP01E1000_PHY_PORT_STATUS 0x11 /* PHY Specific Status Register */
+#define IGP01E1000_PHY_PORT_CTRL   0x12 /* PHY Specific Control Register */
+#define IGP01E1000_PHY_LINK_HEALTH 0x13 /* PHY Link Health Register */
+#define IGP01E1000_GMII_FIFO       0x14 /* GMII FIFO Register */
+#define IGP01E1000_PHY_CHANNEL_QUALITY 0x15 /* PHY Channel Quality Register */
+#define IGP02E1000_PHY_POWER_MGMT      0x19
+#define IGP01E1000_PHY_PAGE_SELECT     0x1F /* PHY Page Select Core Register */
+
+/* IGP01E1000 AGC Registers - stores the cable length values*/
+#define IGP01E1000_PHY_AGC_A        0x1172
+#define IGP01E1000_PHY_AGC_B        0x1272
+#define IGP01E1000_PHY_AGC_C        0x1472
+#define IGP01E1000_PHY_AGC_D        0x1872
+
+/* IGP02E1000 AGC Registers for cable length values */
+#define IGP02E1000_PHY_AGC_A        0x11B1
+#define IGP02E1000_PHY_AGC_B        0x12B1
+#define IGP02E1000_PHY_AGC_C        0x14B1
+#define IGP02E1000_PHY_AGC_D        0x18B1
+
+/* IGP01E1000 DSP Reset Register */
+#define IGP01E1000_PHY_DSP_RESET   0x1F33
+#define IGP01E1000_PHY_DSP_SET     0x1F71
+#define IGP01E1000_PHY_DSP_FFE     0x1F35
+
+#define IGP01E1000_PHY_CHANNEL_NUM    4
+#define IGP02E1000_PHY_CHANNEL_NUM    4
+
+#define IGP01E1000_PHY_AGC_PARAM_A    0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B    0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C    0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D    0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX        0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS 0x8000
+
+#define IGP01E1000_PHY_ANALOG_TX_STATE      0x2890
+#define IGP01E1000_PHY_ANALOG_CLASS_A       0x2000
+#define IGP01E1000_PHY_FORCE_ANALOG_ENABLE  0x0004
+#define IGP01E1000_PHY_DSP_FFE_CM_CP        0x0069
+
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT      0x002A
+/* IGP01E1000 PCS Initialization register - stores the polarity status when
+ * speed = 1000 Mbps. */
+#define IGP01E1000_PHY_PCS_INIT_REG  0x00B4
+#define IGP01E1000_PHY_PCS_CTRL_REG  0x00B5
+
+#define IGP01E1000_ANALOG_REGS_PAGE  0x20C0
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+        GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS         \
+        GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE          \
+        GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2       \
+        GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR         \
+        GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT         \
+        GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+        GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+        GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL       \
+        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+        GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2     \
+        GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+        GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+        GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET          \
+        GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID         \
+        GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID           \
+        GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+        GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL     \
+        GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+        GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL           \
+        GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL         \
+        GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC     \
+        GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS        \
+        GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY         \
+        GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE       \
+        GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC           \
+        GG82563_REG(194, 26) /* Misc. */
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD 0x0001   /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS    0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS    0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS  0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS  0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS     0x0200   /* 100T4 Capable */
+#define NWAY_AR_PAUSE          0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR        0x0800   /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT   0x2000   /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE      0x8000   /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* Next Page TX Register */
+#define NPTX_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define NPTX_TOGGLE         0x0800 /* Toggles between exchanges
+                                    * of different NP
+                                    */
+#define NPTX_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
+                                    * 0 = cannot comply with msg
+                                    */
+#define NPTX_MSG_PAGE       0x2000 /* formatted(1)/unformatted(0) pg */
+#define NPTX_NEXT_PAGE      0x8000 /* 1 = addition NP will follow
+                                    * 0 = sending last NP
+                                    */
+
+/* Link Partner Next Page Register */
+#define LP_RNPR_MSG_CODE_FIELD 0x0001 /* NP msg code or unformatted data */
+#define LP_RNPR_TOGGLE         0x0800 /* Toggles between exchanges
+                                       * of different NP
+                                       */
+#define LP_RNPR_ACKNOWLDGE2    0x1000 /* 1 = will comply with msg
+                                       * 0 = cannot comply with msg
+                                       */
+#define LP_RNPR_MSG_PAGE       0x2000  /* formatted(1)/unformatted(0) pg */
+#define LP_RNPR_ACKNOWLDGE     0x4000  /* 1 = ACK / 0 = NO ACK */
+#define LP_RNPR_NEXT_PAGE      0x8000  /* 1 = addition NP will follow
+                                        * 0 = sending last NP
+                                        */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
+                                        /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+                                        /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+                                        /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR   0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local TX is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+#define SR_1000T_REMOTE_RX_STATUS_SHIFT          12
+#define SR_1000T_LOCAL_RX_STATUS_SHIFT           13
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT    5
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20            20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100           100
+
+/* Extended Status Register */
+#define IEEE_ESR_1000T_HD_CAPS 0x1000 /* 1000T HD capable */
+#define IEEE_ESR_1000T_FD_CAPS 0x2000 /* 1000T FD capable */
+#define IEEE_ESR_1000X_HD_CAPS 0x4000 /* 1000X HD capable */
+#define IEEE_ESR_1000X_FD_CAPS 0x8000 /* 1000X FD capable */
+
+#define PHY_TX_POLARITY_MASK   0x0100 /* register 10h bit 8 (polarity bit) */
+#define PHY_TX_NORMAL_POLARITY 0      /* register 10h bit 8 (normal polarity) */
+
+#define AUTO_POLARITY_DISABLE  0x0010 /* register 11h bit 4 */
+                                      /* (0=enable, 1=disable) */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010 /* 1=CLK125 low,
+                                                * 0=CLK125 toggling
+                                                */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+                                               /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040  /* 1000BASE-T: Auto crossover,
+                                                *  100BASE-TX/10BASE-T:
+                                                *  MDI Mode
+                                                */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060  /* Auto crossover enabled
+                                                * all speeds.
+                                                */
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE 0x0080
+                                        /* 1=Enable Extended 10BASE-T distance
+                                         * (Lower 10BASE-T RX Threshold)
+                                         * 0=Normal 10BASE-T RX Threshold */
+#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
+                                        /* 1=5-Bit interface in 100BASE-TX
+                                         * 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+
+#define M88E1000_PSCR_POLARITY_REVERSAL_SHIFT    1
+#define M88E1000_PSCR_AUTO_X_MODE_SHIFT          5
+#define M88E1000_PSCR_10BT_EXT_DIST_ENABLE_SHIFT 7
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380 /* 0=<50M;1=50-80M;2=80-110M;
+                                            * 3=110-140M;4=>140M */
+#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_REV_POLARITY_SHIFT 1
+#define M88E1000_PSSR_DOWNSHIFT_SHIFT    5
+#define M88E1000_PSSR_MDIX_SHIFT         6
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000 /* 1=Lost lock detect enabled.
+                                              * Will assert lost lock and bring
+                                              * link down if idle not seen
+                                              * within 1ms in 1000BASE-T
+                                              */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5     0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0       0x0000 /* NO  TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
+
+/* IGP01E1000 Specific Port Config Register - R/W */
+#define IGP01E1000_PSCFR_AUTO_MDIX_PAR_DETECT  0x0010
+#define IGP01E1000_PSCFR_PRE_EN                0x0020
+#define IGP01E1000_PSCFR_SMART_SPEED           0x0080
+#define IGP01E1000_PSCFR_DISABLE_TPLOOPBACK    0x0100
+#define IGP01E1000_PSCFR_DISABLE_JABBER        0x0400
+#define IGP01E1000_PSCFR_DISABLE_TRANSMIT      0x2000
+
+/* IGP01E1000 Specific Port Status Register - R/O */
+#define IGP01E1000_PSSR_AUTONEG_FAILED         0x0001 /* RO LH SC */
+#define IGP01E1000_PSSR_POLARITY_REVERSED      0x0002
+#define IGP01E1000_PSSR_CABLE_LENGTH           0x007C
+#define IGP01E1000_PSSR_FULL_DUPLEX            0x0200
+#define IGP01E1000_PSSR_LINK_UP                0x0400
+#define IGP01E1000_PSSR_MDIX                   0x0800
+#define IGP01E1000_PSSR_SPEED_MASK             0xC000 /* speed bits mask */
+#define IGP01E1000_PSSR_SPEED_10MBPS           0x4000
+#define IGP01E1000_PSSR_SPEED_100MBPS          0x8000
+#define IGP01E1000_PSSR_SPEED_1000MBPS         0xC000
+#define IGP01E1000_PSSR_CABLE_LENGTH_SHIFT     0x0002 /* shift right 2 */
+#define IGP01E1000_PSSR_MDIX_SHIFT             0x000B /* shift right 11 */
+
+/* IGP01E1000 Specific Port Control Register - R/W */
+#define IGP01E1000_PSCR_TP_LOOPBACK            0x0010
+#define IGP01E1000_PSCR_CORRECT_NC_SCMBLR      0x0200
+#define IGP01E1000_PSCR_TEN_CRS_SELECT         0x0400
+#define IGP01E1000_PSCR_FLIP_CHIP              0x0800
+#define IGP01E1000_PSCR_AUTO_MDIX              0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX         0x2000 /* 0-MDI, 1-MDIX */
+
+/* IGP01E1000 Specific Port Link Health Register */
+#define IGP01E1000_PLHR_SS_DOWNGRADE           0x8000
+#define IGP01E1000_PLHR_GIG_SCRAMBLER_ERROR    0x4000
+#define IGP01E1000_PLHR_MASTER_FAULT           0x2000
+#define IGP01E1000_PLHR_MASTER_RESOLUTION      0x1000
+#define IGP01E1000_PLHR_GIG_REM_RCVR_NOK       0x0800 /* LH */
+#define IGP01E1000_PLHR_IDLE_ERROR_CNT_OFLOW   0x0400 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_1             0x0200 /* LH */
+#define IGP01E1000_PLHR_DATA_ERR_0             0x0100
+#define IGP01E1000_PLHR_AUTONEG_FAULT          0x0040
+#define IGP01E1000_PLHR_AUTONEG_ACTIVE         0x0010
+#define IGP01E1000_PLHR_VALID_CHANNEL_D        0x0008
+#define IGP01E1000_PLHR_VALID_CHANNEL_C        0x0004
+#define IGP01E1000_PLHR_VALID_CHANNEL_B        0x0002
+#define IGP01E1000_PLHR_VALID_CHANNEL_A        0x0001
+
+/* IGP01E1000 Channel Quality Register */
+#define IGP01E1000_MSE_CHANNEL_D        0x000F
+#define IGP01E1000_MSE_CHANNEL_C        0x00F0
+#define IGP01E1000_MSE_CHANNEL_B        0x0F00
+#define IGP01E1000_MSE_CHANNEL_A        0xF000
+
+#define IGP02E1000_PM_SPD                         0x0001  /* Smart Power Down */
+#define IGP02E1000_PM_D3_LPLU                     0x0004  /* Enable LPLU in non-D0a modes */
+#define IGP02E1000_PM_D0_LPLU                     0x0002  /* Enable LPLU in D0a mode */
+
+/* IGP01E1000 DSP reset macros */
+#define DSP_RESET_ENABLE     0x0
+#define DSP_RESET_DISABLE    0x2
+#define E1000_MAX_DSP_RESETS 10
+
+/* IGP01E1000 & IGP02E1000 AGC Registers */
+
+#define IGP01E1000_AGC_LENGTH_SHIFT 7         /* Coarse - 13:11, Fine - 10:7 */
+#define IGP02E1000_AGC_LENGTH_SHIFT 9         /* Coarse - 15:13, Fine - 12:9 */
+
+/* IGP02E1000 AGC Register Length 9-bit mask */
+#define IGP02E1000_AGC_LENGTH_MASK  0x7F
+
+/* 7 bits (3 Coarse + 4 Fine) --> 128 optional values */
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE 128
+#define IGP02E1000_AGC_LENGTH_TABLE_SIZE 113
+
+/* The precision error of the cable length is +/- 10 meters */
+#define IGP01E1000_AGC_RANGE    10
+#define IGP02E1000_AGC_RANGE    15
+
+/* IGP01E1000 PCS Initialization register */
+/* bits 3:6 in the PCS registers stores the channels polarity */
+#define IGP01E1000_PHY_POLARITY_MASK    0x0078
+
+/* IGP01E1000 GMII FIFO Register */
+#define IGP01E1000_GMII_FLEX_SPD               0x10 /* Enable flexible speed
+                                                     * on Link-Up */
+#define IGP01E1000_GMII_SPD                    0x20 /* Enable SPD */
+
+/* IGP01E1000 Analog Register */
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS       0x20D1
+#define IGP01E1000_ANALOG_FUSE_STATUS             0x20D0
+#define IGP01E1000_ANALOG_FUSE_CONTROL            0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS             0x20DE
+
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK            0xF000
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK            0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK          0x0070
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED        0x0100
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL    0x0002
+
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH        0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10            0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1               0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10              0x0500
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_DISABLE_JABBER             0x0001 /* 1=Disable Jabber */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE  0x0002 /* 1=Polarity Reversal Disabled */
+#define GG82563_PSCR_POWER_DOWN                 0x0004 /* 1=Power Down */
+#define GG82563_PSCR_COPPER_TRANSMITER_DISABLE  0x0008 /* 1=Transmitter Disabled */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK        0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI         0x0000 /* 00=Manual MDI configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX        0x0020 /* 01=Manual MDIX configuration */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO        0x0060 /* 11=Automatic crossover */
+#define GG82563_PSCR_ENALBE_EXTENDED_DISTANCE   0x0080 /* 1=Enable Extended Distance */
+#define GG82563_PSCR_ENERGY_DETECT_MASK         0x0300
+#define GG82563_PSCR_ENERGY_DETECT_OFF          0x0000 /* 00,01=Off */
+#define GG82563_PSCR_ENERGY_DETECT_RX           0x0200 /* 10=Sense on Rx only (Energy Detect) */
+#define GG82563_PSCR_ENERGY_DETECT_RX_TM        0x0300 /* 11=Sense and Tx NLP */
+#define GG82563_PSCR_FORCE_LINK_GOOD            0x0400 /* 1=Force Link Good */
+#define GG82563_PSCR_DOWNSHIFT_ENABLE           0x0800 /* 1=Enable Downshift */
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_MASK     0x7000
+#define GG82563_PSCR_DOWNSHIFT_COUNTER_SHIFT    12
+
+/* PHY Specific Status Register (Page 0, Register 17) */
+#define GG82563_PSSR_JABBER                0x0001 /* 1=Jabber */
+#define GG82563_PSSR_POLARITY              0x0002 /* 1=Polarity Reversed */
+#define GG82563_PSSR_LINK                  0x0008 /* 1=Link is Up */
+#define GG82563_PSSR_ENERGY_DETECT         0x0010 /* 1=Sleep, 0=Active */
+#define GG82563_PSSR_DOWNSHIFT             0x0020 /* 1=Downshift */
+#define GG82563_PSSR_CROSSOVER_STATUS      0x0040 /* 1=MDIX, 0=MDI */
+#define GG82563_PSSR_RX_PAUSE_ENABLED      0x0100 /* 1=Receive Pause Enabled */
+#define GG82563_PSSR_TX_PAUSE_ENABLED      0x0200 /* 1=Transmit Pause Enabled */
+#define GG82563_PSSR_LINK_UP               0x0400 /* 1=Link Up */
+#define GG82563_PSSR_SPEED_DUPLEX_RESOLVED 0x0800 /* 1=Resolved */
+#define GG82563_PSSR_PAGE_RECEIVED         0x1000 /* 1=Page Received */
+#define GG82563_PSSR_DUPLEX                0x2000 /* 1-Full-Duplex */
+#define GG82563_PSSR_SPEED_MASK            0xC000
+#define GG82563_PSSR_SPEED_10MBPS          0x0000 /* 00=10Mbps */
+#define GG82563_PSSR_SPEED_100MBPS         0x4000 /* 01=100Mbps */
+#define GG82563_PSSR_SPEED_1000MBPS        0x8000 /* 10=1000Mbps */
+
+/* PHY Specific Status Register 2 (Page 0, Register 19) */
+#define GG82563_PSSR2_JABBER                0x0001 /* 1=Jabber */
+#define GG82563_PSSR2_POLARITY_CHANGED      0x0002 /* 1=Polarity Changed */
+#define GG82563_PSSR2_ENERGY_DETECT_CHANGED 0x0010 /* 1=Energy Detect Changed */
+#define GG82563_PSSR2_DOWNSHIFT_INTERRUPT   0x0020 /* 1=Downshift Detected */
+#define GG82563_PSSR2_MDI_CROSSOVER_CHANGE  0x0040 /* 1=Crossover Changed */
+#define GG82563_PSSR2_FALSE_CARRIER         0x0100 /* 1=False Carrier */
+#define GG82563_PSSR2_SYMBOL_ERROR          0x0200 /* 1=Symbol Error */
+#define GG82563_PSSR2_LINK_STATUS_CHANGED   0x0400 /* 1=Link Status Changed */
+#define GG82563_PSSR2_AUTO_NEG_COMPLETED    0x0800 /* 1=Auto-Neg Completed */
+#define GG82563_PSSR2_PAGE_RECEIVED         0x1000 /* 1=Page Received */
+#define GG82563_PSSR2_DUPLEX_CHANGED        0x2000 /* 1=Duplex Changed */
+#define GG82563_PSSR2_SPEED_CHANGED         0x4000 /* 1=Speed Changed */
+#define GG82563_PSSR2_AUTO_NEG_ERROR        0x8000 /* 1=Auto-Neg Error */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_10BT_POLARITY_FORCE           0x0002 /* 1=Force Negative Polarity */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_MASK       0x000C
+#define GG82563_PSCR2_1000MB_TEST_SELECT_NORMAL     0x0000 /* 00,01=Normal Operation */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_112NS      0x0008 /* 10=Select 112ns Sequence */
+#define GG82563_PSCR2_1000MB_TEST_SELECT_16NS       0x000C /* 11=Select 16ns Sequence */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG              0x2000 /* 1=Reverse Auto-Negotiation */
+#define GG82563_PSCR2_1000BT_DISABLE                0x4000 /* 1=Disable 1000BASE-T */
+#define GG82563_PSCR2_TRANSMITER_TYPE_MASK          0x8000
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_B      0x0000 /* 0=Class B */
+#define GG82563_PSCR2_TRANSMITTER_TYPE_CLASS_A      0x8000 /* 1=Class A */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK                    0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5MHZ           0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25MHZ           0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_2_5MHZ         0x0006
+#define GG82563_MSCR_TX_CLK_1000MBPS_25MHZ          0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX               0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH               0x0007 /* 0 = <50M;
+                                                          1 = 50-80M;
+                                                          2 = 80-110M;
+                                                          3 = 110-140M;
+                                                          4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PHY_LEDS_EN                    0x0020 /* 1=PHY LEDs, 0=Kumeran Inband LEDs */
+#define GG82563_KMCR_FORCE_LINK_UP                  0x0040 /* 1=Force Link Up */
+#define GG82563_KMCR_SUPPRESS_SGMII_EPD_EXT         0x0080
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT_MASK     0x0400
+#define GG82563_KMCR_MDIO_BUS_SPEED_SELECT          0x0400 /* 1=6.25MHz, 0=0.8MHz */
+#define GG82563_KMCR_PASS_FALSE_CARRIER             0x0800
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE         0x0001 /* 1=Enalbe SERDES Electrical Idle */
+#define GG82563_PMCR_DISABLE_PORT                   0x0002 /* 1=Disable Port */
+#define GG82563_PMCR_DISABLE_SERDES                 0x0004 /* 1=Disable SERDES */
+#define GG82563_PMCR_REVERSE_AUTO_NEG               0x0008 /* 1=Enable Reverse Auto-Negotiation */
+#define GG82563_PMCR_DISABLE_1000_NON_D0            0x0010 /* 1=Disable 1000Mbps Auto-Neg in non D0 */
+#define GG82563_PMCR_DISABLE_1000                   0x0020 /* 1=Disable 1000Mbps Auto-Neg Always */
+#define GG82563_PMCR_REVERSE_AUTO_NEG_D0A           0x0040 /* 1=Enable D0a Reverse Auto-Negotiation */
+#define GG82563_PMCR_FORCE_POWER_STATE              0x0080 /* 1=Force Power State */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_MASK    0x0300
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_DR      0x0000 /* 00=Dr */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0U     0x0100 /* 01=D0u */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D0A     0x0200 /* 10=D0a */
+#define GG82563_PMCR_PROGRAMMED_POWER_STATE_D3      0x0300 /* 11=D3 */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING                     0x0010 /* Disable Padding Use */
+
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID  0x01410C50
+#define M88E1000_I_PHY_ID  0x01410C30
+#define M88E1011_I_PHY_ID  0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1000_12_PHY_ID M88E1000_E_PHY_ID
+#define M88E1000_14_PHY_ID M88E1000_E_PHY_ID
+#define M88E1011_I_REV_4   0x04
+#define M88E1111_I_PHY_ID  0x01410CC0
+#define L1LXT971A_PHY_ID   0x001378E0
+#define GG82563_E_PHY_ID   0x01410CA0
+
+
+/* Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define PHY_PAGE_SHIFT        5
+#define PHY_REG(page, reg)    \
+        (((page) << PHY_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+
+#define IGP3_PHY_PORT_CTRL           \
+        PHY_REG(769, 17) /* Port General Configuration */
+#define IGP3_PHY_RATE_ADAPT_CTRL \
+        PHY_REG(769, 25) /* Rate Adapter Control Register */
+
+#define IGP3_KMRN_FIFO_CTRL_STATS \
+        PHY_REG(770, 16) /* KMRN FIFO's control/status register */
+#define IGP3_KMRN_POWER_MNG_CTRL \
+        PHY_REG(770, 17) /* KMRN Power Management Control Register */
+#define IGP3_KMRN_INBAND_CTRL \
+        PHY_REG(770, 18) /* KMRN Inband Control Register */
+#define IGP3_KMRN_DIAG \
+        PHY_REG(770, 19) /* KMRN Diagnostic register */
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS 0x0002 /* RX PCS is not synced */
+#define IGP3_KMRN_ACK_TIMEOUT \
+        PHY_REG(770, 20) /* KMRN Acknowledge Timeouts register */
+
+#define IGP3_VR_CTRL \
+        PHY_REG(776, 18) /* Voltage regulator control register */
+#define IGP3_VR_CTRL_MODE_SHUT       0x0200 /* Enter powerdown, shutdown VRs */
+
+#define IGP3_CAPABILITY \
+        PHY_REG(776, 19) /* IGP3 Capability Register */
+
+/* Capabilities for SKU Control  */
+#define IGP3_CAP_INITIATE_TEAM       0x0001 /* Able to initiate a team */
+#define IGP3_CAP_WFM                 0x0002 /* Support WoL and PXE */
+#define IGP3_CAP_ASF                 0x0004 /* Support ASF */
+#define IGP3_CAP_LPLU                0x0008 /* Support Low Power Link Up */
+#define IGP3_CAP_DC_AUTO_SPEED       0x0010 /* Support AC/DC Auto Link Speed */
+#define IGP3_CAP_SPD                 0x0020 /* Support Smart Power Down */
+#define IGP3_CAP_MULT_QUEUE          0x0040 /* Support 2 tx & 2 rx queues */
+#define IGP3_CAP_RSS                 0x0080 /* Support RSS */
+#define IGP3_CAP_8021PQ              0x0100 /* Support 802.1Q & 802.1p */
+#define IGP3_CAP_AMT_CB              0x0200 /* Support active manageability and circuit breaker */
+
+#define IGP3_PPC_JORDAN_EN           0x0001
+#define IGP3_PPC_JORDAN_GIGA_SPEED   0x0002
+
+#define IGP3_KMRN_PMC_EE_IDLE_LINK_DIS         0x0001
+#define IGP3_KMRN_PMC_K0S_ENTRY_LATENCY_MASK   0x001E
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_GIGA        0x0020
+#define IGP3_KMRN_PMC_K0S_MODE1_EN_100         0x0040
+
+#define IGP3E1000_PHY_MISC_CTRL                0x1B   /* Misc. Ctrl register */
+#define IGP3_PHY_MISC_DUPLEX_MANUAL_SET        0x1000 /* Duplex Manual Set */
+
+#define IGP3_KMRN_EXT_CTRL  PHY_REG(770, 18)
+#define IGP3_KMRN_EC_DIS_INBAND    0x0080
+
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330 /* 10/100 PHY */
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL   0x10  /* 100BaseTx Extended Status, Control and Address */
+#define IFE_PHY_SPECIAL_CONTROL           0x11  /* 100BaseTx PHY special control register */
+#define IFE_PHY_RCV_FALSE_CARRIER         0x13  /* 100BaseTx Receive False Carrier Counter */
+#define IFE_PHY_RCV_DISCONNECT            0x14  /* 100BaseTx Receive Disconnet Counter */
+#define IFE_PHY_RCV_ERROT_FRAME           0x15  /* 100BaseTx Receive Error Frame Counter */
+#define IFE_PHY_RCV_SYMBOL_ERR            0x16  /* Receive Symbol Error Counter */
+#define IFE_PHY_PREM_EOF_ERR              0x17  /* 100BaseTx Receive Premature End Of Frame Error Counter */
+#define IFE_PHY_RCV_EOF_ERR               0x18  /* 10BaseT Receive End Of Frame Error Counter */
+#define IFE_PHY_TX_JABBER_DETECT          0x19  /* 10BaseT Transmit Jabber Detect Counter */
+#define IFE_PHY_EQUALIZER                 0x1A  /* PHY Equalizer Control and Status */
+#define IFE_PHY_SPECIAL_CONTROL_LED       0x1B  /* PHY special control and LED configuration */
+#define IFE_PHY_MDIX_CONTROL              0x1C  /* MDI/MDI-X Control register */
+#define IFE_PHY_HWI_CONTROL               0x1D  /* Hardware Integrity Control (HWI) */
+
+#define IFE_PESC_REDUCED_POWER_DOWN_DISABLE  0x2000  /* Defaut 1 = Disable auto reduced power down */
+#define IFE_PESC_100BTX_POWER_DOWN           0x0400  /* Indicates the power state of 100BASE-TX */
+#define IFE_PESC_10BTX_POWER_DOWN            0x0200  /* Indicates the power state of 10BASE-T */
+#define IFE_PESC_POLARITY_REVERSED           0x0100  /* Indicates 10BASE-T polarity */
+#define IFE_PESC_PHY_ADDR_MASK               0x007C  /* Bit 6:2 for sampled PHY address */
+#define IFE_PESC_SPEED                       0x0002  /* Auto-negotiation speed result 1=100Mbs, 0=10Mbs */
+#define IFE_PESC_DUPLEX                      0x0001  /* Auto-negotiation duplex result 1=Full, 0=Half */
+#define IFE_PESC_POLARITY_REVERSED_SHIFT     8
+
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN   0x0100  /* 1 = Dyanmic Power Down disabled */
+#define IFE_PSC_FORCE_POLARITY               0x0020  /* 1=Reversed Polarity, 0=Normal */
+#define IFE_PSC_AUTO_POLARITY_DISABLE        0x0010  /* 1=Auto Polarity Disabled, 0=Enabled */
+#define IFE_PSC_JABBER_FUNC_DISABLE          0x0001  /* 1=Jabber Disabled, 0=Normal Jabber Operation */
+#define IFE_PSC_FORCE_POLARITY_SHIFT         5
+#define IFE_PSC_AUTO_POLARITY_DISABLE_SHIFT  4
+
+#define IFE_PMC_AUTO_MDIX                    0x0080  /* 1=enable MDI/MDI-X feature, default 0=disabled */
+#define IFE_PMC_FORCE_MDIX                   0x0040  /* 1=force MDIX-X, 0=force MDI */
+#define IFE_PMC_MDIX_STATUS                  0x0020  /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_AUTO_MDIX_COMPLETE           0x0010  /* Resolution algorthm is completed */
+#define IFE_PMC_MDIX_MODE_SHIFT              6
+#define IFE_PHC_MDIX_RESET_ALL_MASK          0x0000  /* Disable auto MDI-X */
+
+#define IFE_PHC_HWI_ENABLE                   0x8000  /* Enable the HWI feature */
+#define IFE_PHC_ABILITY_CHECK                0x4000  /* 1= Test Passed, 0=failed */
+#define IFE_PHC_TEST_EXEC                    0x2000  /* PHY launch test pulses on the wire */
+#define IFE_PHC_HIGHZ                        0x0200  /* 1 = Open Circuit */
+#define IFE_PHC_LOWZ                         0x0400  /* 1 = Short Circuit */
+#define IFE_PHC_LOW_HIGH_Z_MASK              0x0600  /* Mask for indication type of problem on the line */
+#define IFE_PHC_DISTANCE_MASK                0x01FF  /* Mask for distance to the cable problem, in 80cm granularity */
+#define IFE_PHC_RESET_ALL_MASK               0x0000  /* Disable HWI */
+#define IFE_PSCL_PROBE_MODE                  0x0020  /* LED Probe mode */
+#define IFE_PSCL_PROBE_LEDS_OFF              0x0006  /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON               0x0007  /* Force LEDs 0 and 2 on */
+
+#define ICH8_FLASH_COMMAND_TIMEOUT           500   /* 500 ms , should be adjusted */
+#define ICH8_FLASH_CYCLE_REPEAT_COUNT        10    /* 10 cycles , should be adjusted */
+#define ICH8_FLASH_SEG_SIZE_256              256
+#define ICH8_FLASH_SEG_SIZE_4K               4096
+#define ICH8_FLASH_SEG_SIZE_64K              65536
+
+#define ICH8_CYCLE_READ                      0x0
+#define ICH8_CYCLE_RESERVED                  0x1
+#define ICH8_CYCLE_WRITE                     0x2
+#define ICH8_CYCLE_ERASE                     0x3
+
+#define ICH8_FLASH_GFPREG   0x0000
+#define ICH8_FLASH_HSFSTS   0x0004
+#define ICH8_FLASH_HSFCTL   0x0006
+#define ICH8_FLASH_FADDR    0x0008
+#define ICH8_FLASH_FDATA0   0x0010
+#define ICH8_FLASH_FRACC    0x0050
+#define ICH8_FLASH_FREG0    0x0054
+#define ICH8_FLASH_FREG1    0x0058
+#define ICH8_FLASH_FREG2    0x005C
+#define ICH8_FLASH_FREG3    0x0060
+#define ICH8_FLASH_FPR0     0x0074
+#define ICH8_FLASH_FPR1     0x0078
+#define ICH8_FLASH_SSFSTS   0x0090
+#define ICH8_FLASH_SSFCTL   0x0092
+#define ICH8_FLASH_PREOP    0x0094
+#define ICH8_FLASH_OPTYPE   0x0096
+#define ICH8_FLASH_OPMENU   0x0098
+
+#define ICH8_FLASH_REG_MAPSIZE      0x00A0
+#define ICH8_FLASH_SECTOR_SIZE      4096
+#define ICH8_GFPREG_BASE_MASK       0x1FFF
+#define ICH8_FLASH_LINEAR_ADDR_MASK 0x00FFFFFF
+
+/* ICH8 GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+    struct ich8_hsfsts {
+#ifdef E1000_BIG_ENDIAN
+        uint16_t reserved2      :6;
+        uint16_t fldesvalid     :1;
+        uint16_t flockdn        :1;
+        uint16_t flcdone        :1;
+        uint16_t flcerr         :1;
+        uint16_t dael           :1;
+        uint16_t berasesz       :2;
+        uint16_t flcinprog      :1;
+        uint16_t reserved1      :2;
+#else
+        uint16_t flcdone        :1;   /* bit 0 Flash Cycle Done */
+        uint16_t flcerr         :1;   /* bit 1 Flash Cycle Error */
+        uint16_t dael           :1;   /* bit 2 Direct Access error Log */
+        uint16_t berasesz       :2;   /* bit 4:3 Block/Sector Erase Size */
+        uint16_t flcinprog      :1;   /* bit 5 flash SPI cycle in Progress */
+        uint16_t reserved1      :2;   /* bit 13:6 Reserved */
+        uint16_t reserved2      :6;   /* bit 13:6 Reserved */
+        uint16_t fldesvalid     :1;   /* bit 14 Flash Descriptor Valid */
+        uint16_t flockdn        :1;   /* bit 15 Flash Configuration Lock-Down */
+#endif
+    } hsf_status;
+    uint16_t regval;
+};
+
+/* ICH8 GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+    struct ich8_hsflctl {
+#ifdef E1000_BIG_ENDIAN
+        uint16_t fldbcount      :2;
+        uint16_t flockdn        :6;
+        uint16_t flcgo          :1;
+        uint16_t flcycle        :2;
+        uint16_t reserved       :5;
+#else
+        uint16_t flcgo          :1;   /* 0 Flash Cycle Go */
+        uint16_t flcycle        :2;   /* 2:1 Flash Cycle */
+        uint16_t reserved       :5;   /* 7:3 Reserved  */
+        uint16_t fldbcount      :2;   /* 9:8 Flash Data Byte Count */
+        uint16_t flockdn        :6;   /* 15:10 Reserved */
+#endif
+    } hsf_ctrl;
+    uint16_t regval;
+};
+
+/* ICH8 Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+    struct ich8_flracc {
+#ifdef E1000_BIG_ENDIAN
+        uint32_t gmwag          :8;
+        uint32_t gmrag          :8;
+        uint32_t grwa           :8;
+        uint32_t grra           :8;
+#else
+        uint32_t grra           :8;   /* 0:7 GbE region Read Access */
+        uint32_t grwa           :8;   /* 8:15 GbE region Write Access */
+        uint32_t gmrag          :8;   /* 23:16 GbE Master Read Access Grant  */
+        uint32_t gmwag          :8;   /* 31:24 GbE Master Write Access Grant */
+#endif
+    } hsf_flregacc;
+    uint16_t regval;
+};
+
+/* Miscellaneous PHY bit definitions. */
+#define PHY_PREAMBLE        0xFFFFFFFF
+#define PHY_SOF             0x01
+#define PHY_OP_READ         0x02
+#define PHY_OP_WRITE        0x01
+#define PHY_TURNAROUND      0x02
+#define PHY_PREAMBLE_SIZE   32
+#define MII_CR_SPEED_1000   0x0040
+#define MII_CR_SPEED_100    0x2000
+#define MII_CR_SPEED_10     0x0000
+#define E1000_PHY_ADDRESS   0x01
+#define PHY_AUTO_NEG_TIME   45  /* 4.5 Seconds */
+#define PHY_FORCE_TIME      20  /* 2.0 Seconds */
+#define PHY_REVISION_MASK   0xFFFFFFF0
+#define DEVICE_SPEED_MASK   0x00000300  /* Device Ctrl Reg Speed Mask */
+#define REG4_SPEED_MASK     0x01E0
+#define REG9_SPEED_MASK     0x0300
+#define ADVERTISE_10_HALF   0x0001
+#define ADVERTISE_10_FULL   0x0002
+#define ADVERTISE_100_HALF  0x0004
+#define ADVERTISE_100_FULL  0x0008
+#define ADVERTISE_1000_HALF 0x0010
+#define ADVERTISE_1000_FULL 0x0020
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT 0x002F  /* Everything but 1000-Half */
+#define AUTONEG_ADVERTISE_10_100_ALL    0x000F /* All 10/100 speeds*/
+#define AUTONEG_ADVERTISE_10_ALL        0x0003 /* 10Mbps Full & Half speeds*/
+
+#endif /* _E1000_HW_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c
new file mode 100644
index 0000000..726b38a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_main.c
@@ -0,0 +1,3184 @@
+/*******************************************************************************
+
+
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
+  any later version.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/* Change Log
+ *
+ * Port to rtnet (0.9.3) by Mathias Koehrer. Base version: e1000-7.1.9
+ *             8-Aug-2006
+ *
+ * 7.0.36      10-Mar-2006
+ *   o fixups for compilation issues on older kernels
+ * 7.0.35      3-Mar-2006
+ * 7.0.34
+ *   o Major performance fixes by understanding relationship of rx_buffer_len
+ *     to window size growth.  _ps and legacy receive paths changed
+ *   o merge with kernel changes
+ *   o legacy receive path went back to single descriptor model for jumbos
+ * 7.0.33      3-Feb-2006
+ *   o Added another fix for the pass false carrier bit
+ * 7.0.32      24-Jan-2006
+ *   o Need to rebuild with noew version number for the pass false carrier
+ *     fix in e1000_hw.c
+ * 7.0.30      18-Jan-2006
+ *   o fixup for tso workaround to disable it for pci-x
+ *   o fix mem leak on 82542
+ *   o fixes for 10 Mb/s connections and incorrect stats
+ * 7.0.28      01/06/2006
+ *   o hardware workaround to only set "speed mode" bit for 1G link.
+ * 7.0.26      12/23/2005
+ *   o wake on lan support modified for device ID 10B5
+ *   o fix dhcp + vlan issue not making it to the iAMT firmware
+ * 7.0.24      12/9/2005
+ *   o New hardware support for the Gigabit NIC embedded in the south bridge
+ *   o Fixes to the recycling logic (skb->tail) from IBM LTC
+ * 6.3.7	11/18/2005
+ *   o Honor eeprom setting for enabling/disabling Wake On Lan
+ * 6.3.5	11/17/2005
+ *   o Fix memory leak in rx ring handling for PCI Express adapters
+ * 6.3.4	11/8/05
+ *   o Patch from Jesper Juhl to remove redundant NULL checks for kfree
+ * 6.3.2	9/20/05
+ *   o Render logic that sets/resets DRV_LOAD as inline functions to
+ *     avoid code replication. If f/w is AMT then set DRV_LOAD only when
+ *     network interface is open.
+ *   o Handle DRV_LOAD set/reset in cases where AMT uses VLANs.
+ *   o Adjust PBA partioning for Jumbo frames using MTU size and not
+ *     rx_buffer_len
+ * 6.3.1	9/19/05
+ *   o Use adapter->tx_timeout_factor in Tx Hung Detect logic
+ *      (e1000_clean_tx_irq)
+ *   o Support for 8086:10B5 device (Quad Port)
+ */
+
+char e1000_driver_name[] = "rt_e1000";
+static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+#ifndef CONFIG_E1000_NAPI
+#define DRIVERNAPI
+#else
+#define DRIVERNAPI "-NAPI"
+#endif
+#define DRV_VERSION "7.1.9"DRIVERNAPI
+char e1000_driver_version[] = DRV_VERSION;
+static char e1000_copyright[] = "Copyright (c) 1999-2006 Intel Corporation.";
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+static struct pci_device_id e1000_pci_tbl[] = {
+	INTEL_E1000_ETHERNET_DEVICE(0x1000),
+	INTEL_E1000_ETHERNET_DEVICE(0x1001),
+	INTEL_E1000_ETHERNET_DEVICE(0x1004),
+	INTEL_E1000_ETHERNET_DEVICE(0x1008),
+	INTEL_E1000_ETHERNET_DEVICE(0x1009),
+	INTEL_E1000_ETHERNET_DEVICE(0x100C),
+	INTEL_E1000_ETHERNET_DEVICE(0x100D),
+	INTEL_E1000_ETHERNET_DEVICE(0x100E),
+	INTEL_E1000_ETHERNET_DEVICE(0x100F),
+	INTEL_E1000_ETHERNET_DEVICE(0x1010),
+	INTEL_E1000_ETHERNET_DEVICE(0x1011),
+	INTEL_E1000_ETHERNET_DEVICE(0x1012),
+	INTEL_E1000_ETHERNET_DEVICE(0x1013),
+	INTEL_E1000_ETHERNET_DEVICE(0x1014),
+	INTEL_E1000_ETHERNET_DEVICE(0x1015),
+	INTEL_E1000_ETHERNET_DEVICE(0x1016),
+	INTEL_E1000_ETHERNET_DEVICE(0x1017),
+	INTEL_E1000_ETHERNET_DEVICE(0x1018),
+	INTEL_E1000_ETHERNET_DEVICE(0x1019),
+	INTEL_E1000_ETHERNET_DEVICE(0x101A),
+	INTEL_E1000_ETHERNET_DEVICE(0x101D),
+	INTEL_E1000_ETHERNET_DEVICE(0x101E),
+	INTEL_E1000_ETHERNET_DEVICE(0x1026),
+	INTEL_E1000_ETHERNET_DEVICE(0x1027),
+	INTEL_E1000_ETHERNET_DEVICE(0x1028),
+	INTEL_E1000_ETHERNET_DEVICE(0x1049),
+	INTEL_E1000_ETHERNET_DEVICE(0x104A),
+	INTEL_E1000_ETHERNET_DEVICE(0x104B),
+	INTEL_E1000_ETHERNET_DEVICE(0x104C),
+	INTEL_E1000_ETHERNET_DEVICE(0x104D),
+	INTEL_E1000_ETHERNET_DEVICE(0x105E),
+	INTEL_E1000_ETHERNET_DEVICE(0x105F),
+	INTEL_E1000_ETHERNET_DEVICE(0x1060),
+	INTEL_E1000_ETHERNET_DEVICE(0x1075),
+	INTEL_E1000_ETHERNET_DEVICE(0x1076),
+	INTEL_E1000_ETHERNET_DEVICE(0x1077),
+	INTEL_E1000_ETHERNET_DEVICE(0x1078),
+	INTEL_E1000_ETHERNET_DEVICE(0x1079),
+	INTEL_E1000_ETHERNET_DEVICE(0x107A),
+	INTEL_E1000_ETHERNET_DEVICE(0x107B),
+	INTEL_E1000_ETHERNET_DEVICE(0x107C),
+	INTEL_E1000_ETHERNET_DEVICE(0x107D),
+	INTEL_E1000_ETHERNET_DEVICE(0x107E),
+	INTEL_E1000_ETHERNET_DEVICE(0x107F),
+	INTEL_E1000_ETHERNET_DEVICE(0x108A),
+	INTEL_E1000_ETHERNET_DEVICE(0x108B),
+	INTEL_E1000_ETHERNET_DEVICE(0x108C),
+	INTEL_E1000_ETHERNET_DEVICE(0x1096),
+	INTEL_E1000_ETHERNET_DEVICE(0x1098),
+	INTEL_E1000_ETHERNET_DEVICE(0x1099),
+	INTEL_E1000_ETHERNET_DEVICE(0x109A),
+	INTEL_E1000_ETHERNET_DEVICE(0x10A4),
+	INTEL_E1000_ETHERNET_DEVICE(0x10B5),
+	INTEL_E1000_ETHERNET_DEVICE(0x10B9),
+	INTEL_E1000_ETHERNET_DEVICE(0x10BA),
+	INTEL_E1000_ETHERNET_DEVICE(0x10BB),
+	INTEL_E1000_ETHERNET_DEVICE(0x10BC),
+	INTEL_E1000_ETHERNET_DEVICE(0x10C4),
+	INTEL_E1000_ETHERNET_DEVICE(0x10C5),
+	/* required last entry */
+	{0,}
+};
+
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+int e1000_up(struct e1000_adapter *adapter);
+void e1000_down(struct e1000_adapter *adapter);
+void e1000_reinit_locked(struct e1000_adapter *adapter);
+void e1000_reset(struct e1000_adapter *adapter);
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx);
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+			     struct e1000_tx_ring *txdr);
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+			     struct e1000_rx_ring *rxdr);
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+			     struct e1000_tx_ring *tx_ring);
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+			     struct e1000_rx_ring *rx_ring);
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct rtnet_device *netdev);
+static int e1000_close(struct rtnet_device *netdev);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+				struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+				struct e1000_rx_ring *rx_ring);
+static void e1000_set_multi(struct rtnet_device *netdev);
+static void e1000_update_phy_info_task(struct work_struct *work);
+static void e1000_watchdog(struct work_struct *work);
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
+static int e1000_xmit_frame(struct rtskb *skb, struct rtnet_device *netdev);
+static int e1000_intr(rtdm_irq_t *irq_handle);
+static boolean_t e1000_clean_tx_irq(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring);
+static boolean_t e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+				    nanosecs_abs_t *time_stamp);
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count);
+#ifdef SIOCGMIIPHY
+#endif
+void e1000_set_ethtool_ops(struct rtnet_device *netdev);
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+				       struct rtskb *skb);
+
+
+
+
+
+/* Exported from other modules */
+
+extern void e1000_check_options(struct e1000_adapter *adapter);
+
+static struct pci_driver e1000_driver = {
+	.name     = e1000_driver_name,
+	.id_table = e1000_pci_tbl,
+	.probe    = e1000_probe,
+	.remove   = e1000_remove,
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver for rtnet");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+static int local_debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param_named(debug, local_debug, int, 0);
+MODULE_PARM_DESC(debug, "Debug level (0=none,...,16=all)");
+
+
+#define MAX_UNITS           8
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (eg. 1,0,1)");
+
+
+#define kmalloc(a,b) rtdm_malloc(a)
+#define vmalloc(a) rtdm_malloc(a)
+#define kfree(a) rtdm_free(a)
+#define vfree(a) rtdm_free(a)
+
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+
+static int __init
+e1000_init_module(void)
+{
+	int ret;
+	printk(KERN_INFO "%s - version %s\n",
+	       e1000_driver_string, e1000_driver_version);
+
+	printk(KERN_INFO "%s\n", e1000_copyright);
+
+	ret = pci_register_driver(&e1000_driver);
+	return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+
+static void __exit
+e1000_exit_module(void)
+{
+	pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int flags, err = 0;
+
+	flags = RTDM_IRQTYPE_SHARED;
+#ifdef CONFIG_PCI_MSI
+	if (adapter->hw.mac_type > e1000_82547_rev_2) {
+		adapter->have_msi = TRUE;
+		if ((err = pci_enable_msi(adapter->pdev))) {
+			DPRINTK(PROBE, ERR,
+			 "Unable to allocate MSI interrupt Error: %d\n", err);
+			adapter->have_msi = FALSE;
+		}
+	}
+	if (adapter->have_msi)
+		flags = 0;
+#endif
+	rt_stack_connect(netdev, &STACK_manager);
+	if ((err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq,
+				    e1000_intr, flags, netdev->name, netdev)))
+		DPRINTK(PROBE, ERR,
+		    "Unable to allocate interrupt Error: %d\n", err);
+
+	return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+	// struct rtnet_device *netdev = adapter->netdev;
+
+	rtdm_irq_free(&adapter->irq_handle);
+
+#ifdef CONFIG_PCI_MSI
+	if (adapter->have_msi)
+		pci_disable_msi(adapter->pdev);
+#endif
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_irq_disable(struct e1000_adapter *adapter)
+{
+	atomic_inc(&adapter->irq_sem);
+	E1000_WRITE_REG(&adapter->hw, IMC, ~0);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_irq_enable(struct e1000_adapter *adapter)
+{
+	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
+		E1000_WRITE_REG(&adapter->hw, IMS, IMS_ENABLE_MASK);
+		E1000_WRITE_FLUSH(&adapter->hw);
+	}
+}
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the netowrk i/f is closed.
+ *
+ **/
+
+static void
+e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+	uint32_t ctrl_ext;
+	uint32_t swsm;
+	uint32_t extcnf;
+
+	/* Let firmware taken over control of h/w */
+	switch (adapter->hw.mac_type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+				ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	case e1000_82573:
+		swsm = E1000_READ_REG(&adapter->hw, SWSM);
+		E1000_WRITE_REG(&adapter->hw, SWSM,
+				swsm & ~E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_ich8lan:
+		extcnf = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+				extcnf & ~E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|FWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the netowrk i/f is open.
+ *
+ **/
+
+static void
+e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+	uint32_t ctrl_ext;
+	uint32_t swsm;
+	uint32_t extcnf;
+	/* Let firmware know the driver has taken over */
+	switch (adapter->hw.mac_type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		ctrl_ext = E1000_READ_REG(&adapter->hw, CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, CTRL_EXT,
+				ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	case e1000_82573:
+		swsm = E1000_READ_REG(&adapter->hw, SWSM);
+		E1000_WRITE_REG(&adapter->hw, SWSM,
+				swsm | E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_ich8lan:
+		extcnf = E1000_READ_REG(&adapter->hw, EXTCNF_CTRL);
+		E1000_WRITE_REG(&adapter->hw, EXTCNF_CTRL,
+				extcnf | E1000_EXTCNF_CTRL_SWFLAG);
+		break;
+	default:
+		break;
+	}
+}
+
+int
+e1000_up(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int i;
+
+	/* hardware has been reset, we need to reload some things */
+
+	e1000_set_multi(netdev);
+
+
+	e1000_configure_tx(adapter);
+	e1000_setup_rctl(adapter);
+	e1000_configure_rx(adapter);
+	/* call E1000_DESC_UNUSED which always leaves
+	 * at least 1 descriptor unused to make sure
+	 * next_to_use != next_to_clean */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+		adapter->alloc_rx_buf(adapter, ring,
+				      E1000_DESC_UNUSED(ring));
+	}
+
+	// TODO makoehre adapter->tx_queue_len = netdev->tx_queue_len;
+
+	schedule_delayed_work(&adapter->watchdog_task, 1);
+
+	e1000_irq_enable(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000_reset ***
+ *
+ **/
+
+static void e1000_power_up_phy(struct e1000_adapter *adapter)
+{
+	uint16_t mii_reg = 0;
+
+	/* Just clear the power down bit to wake the phy back up */
+	if (adapter->hw.media_type == e1000_media_type_copper) {
+		/* according to the manual, the phy will retain its
+		 * settings across a power-down/up cycle */
+		e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+		mii_reg &= ~MII_CR_POWER_DOWN;
+		e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+	}
+}
+
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+	boolean_t mng_mode_enabled = (adapter->hw.mac_type >= e1000_82571) &&
+				      e1000_check_mng_mode(&adapter->hw);
+	/* Power down the PHY so no link is implied when interface is down *
+	 * The PHY cannot be powered down if any of the following is TRUE *
+	 * (a) WoL is enabled
+	 * (b) AMT is active
+	 * (c) SoL/IDER session is active */
+	if (!adapter->wol && adapter->hw.mac_type >= e1000_82540 &&
+	   adapter->hw.mac_type != e1000_ich8lan &&
+	   adapter->hw.media_type == e1000_media_type_copper &&
+	   !(E1000_READ_REG(&adapter->hw, MANC) & E1000_MANC_SMBUS_EN) &&
+	   !mng_mode_enabled &&
+	   !e1000_check_phy_reset_block(&adapter->hw)) {
+		uint16_t mii_reg = 0;
+		e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &mii_reg);
+		mii_reg |= MII_CR_POWER_DOWN;
+		e1000_write_phy_reg(&adapter->hw, PHY_CTRL, mii_reg);
+		mdelay(1);
+	}
+}
+
+static void e1000_down_and_stop(struct e1000_adapter *adapter)
+{
+	cancel_work_sync(&adapter->reset_task);
+	cancel_delayed_work_sync(&adapter->watchdog_task);
+	cancel_delayed_work_sync(&adapter->phy_info_task);
+	cancel_delayed_work_sync(&adapter->fifo_stall_task);
+}
+
+void
+e1000_down(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+
+	e1000_irq_disable(adapter);
+
+	e1000_down_and_stop(adapter);
+
+	// TODO makoehre     netdev->tx_queue_len = adapter->tx_queue_len;
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+	rtnetif_carrier_off(netdev);
+	rtnetif_stop_queue(netdev);
+
+	e1000_reset(adapter);
+	e1000_clean_all_tx_rings(adapter);
+	e1000_clean_all_rx_rings(adapter);
+}
+
+void
+e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	if (test_and_set_bit(__E1000_RESETTING, &adapter->flags))
+		msleep(1);
+	e1000_down(adapter);
+	e1000_up(adapter);
+	clear_bit(__E1000_RESETTING, &adapter->flags);
+}
+
+void
+e1000_reset(struct e1000_adapter *adapter)
+{
+	uint32_t pba;
+	uint16_t fc_high_water_mark = E1000_FC_HIGH_DIFF;
+
+	/* Repartition Pba for greater than 9k mtu
+	 * To take effect CTRL.RST is required.
+	 */
+
+	switch (adapter->hw.mac_type) {
+	case e1000_82547:
+	case e1000_82547_rev_2:
+		pba = E1000_PBA_30K;
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		pba = E1000_PBA_38K;
+		break;
+	case e1000_82573:
+		pba = E1000_PBA_12K;
+		break;
+	case e1000_ich8lan:
+		pba = E1000_PBA_8K;
+		break;
+	default:
+		pba = E1000_PBA_48K;
+		break;
+	}
+
+	if ((adapter->hw.mac_type != e1000_82573) &&
+	   (adapter->netdev->mtu > E1000_RXBUFFER_8192))
+		pba -= 8; /* allocate more FIFO for Tx */
+
+
+	if (adapter->hw.mac_type == e1000_82547) {
+		adapter->tx_fifo_head = 0;
+		adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+		adapter->tx_fifo_size =
+			(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+		atomic_set(&adapter->tx_fifo_stall, 0);
+	}
+
+	E1000_WRITE_REG(&adapter->hw, PBA, pba);
+
+	/* flow control settings */
+	/* Set the FC high water mark to 90% of the FIFO size.
+	 * Required to clear last 3 LSB */
+	fc_high_water_mark = ((pba * 9216)/10) & 0xFFF8;
+	/* We can't use 90% on small FIFOs because the remainder
+	 * would be less than 1 full frame.  In this case, we size
+	 * it to allow at least a full frame above the high water
+	 *  mark. */
+	if (pba < E1000_PBA_16K)
+		fc_high_water_mark = (pba * 1024) - 1600;
+
+	adapter->hw.fc_high_water = fc_high_water_mark;
+	adapter->hw.fc_low_water = fc_high_water_mark - 8;
+	if (adapter->hw.mac_type == e1000_80003es2lan)
+		adapter->hw.fc_pause_time = 0xFFFF;
+	else
+		adapter->hw.fc_pause_time = E1000_FC_PAUSE_TIME;
+	adapter->hw.fc_send_xon = 1;
+	adapter->hw.fc = adapter->hw.original_fc;
+
+	/* Allow time for pending master requests to run */
+	e1000_reset_hw(&adapter->hw);
+	if (adapter->hw.mac_type >= e1000_82544)
+		E1000_WRITE_REG(&adapter->hw, WUC, 0);
+	if (e1000_init_hw(&adapter->hw))
+		DPRINTK(PROBE, ERR, "Hardware Error\n");
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	E1000_WRITE_REG(&adapter->hw, VET, ETHERNET_IEEE_VLAN_TYPE);
+
+	E1000_WRITE_REG(&adapter->hw, AIT, 0);  // Set adaptive interframe spacing to zero
+
+	// e1000_reset_adaptive(&adapter->hw);
+	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+
+	if (!adapter->smart_power_down &&
+	    (adapter->hw.mac_type == e1000_82571 ||
+	     adapter->hw.mac_type == e1000_82572)) {
+		uint16_t phy_data = 0;
+		/* speed up time to link by disabling smart power down, ignore
+		 * the return value of this function because there is nothing
+		 * different we would do if it failed */
+		e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+				   &phy_data);
+		phy_data &= ~IGP02E1000_PM_SPD;
+		e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+				    phy_data);
+	}
+
+}
+
+static void
+e1000_reset_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter =
+		container_of(work, struct e1000_adapter, reset_task);
+
+	e1000_reinit_locked(adapter);
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+
+static int e1000_probe(struct pci_dev *pdev,
+	    const struct pci_device_id *ent)
+{
+	struct rtnet_device *netdev;
+	struct e1000_adapter *adapter;
+	unsigned long mmio_start, mmio_len;
+	unsigned long flash_start, flash_len;
+
+	static int cards_found = 0;
+	static int e1000_ksp3_port_a = 0; /* global ksp3 port a indication */
+	int i, err;
+	uint16_t eeprom_data;
+	uint16_t eeprom_apme_mask = E1000_EEPROM_APME;
+
+	if (cards[cards_found++] == 0)
+	{
+	    return -ENODEV;
+	}
+
+	if ((err = pci_enable_device(pdev)))
+		return err;
+
+	if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64))) ||
+	    (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64)))) {
+		if ((err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) &&
+		    (err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(32)))) {
+			E1000_ERR("No usable DMA configuration, aborting\n");
+			return err;
+		}
+	}
+
+	if ((err = pci_request_regions(pdev, e1000_driver_name)))
+		return err;
+
+	pci_set_master(pdev);
+
+	netdev = rt_alloc_etherdev(sizeof(struct e1000_adapter), 48);
+	if (!netdev) {
+		err = -ENOMEM;
+		goto err_alloc_etherdev;
+	}
+	memset(netdev->priv, 0, sizeof(struct e1000_adapter));
+
+	rt_rtdev_connect(netdev, &RTDEV_manager);
+
+
+	// SET_NETDEV_DEV(netdev, &pdev->dev);
+	netdev->vers = RTDEV_VERS_2_0;
+	netdev->sysbind = &pdev->dev;
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev->priv;
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->hw.back = adapter;
+	adapter->msg_enable = (1 << local_debug) - 1;
+
+	mmio_start = pci_resource_start(pdev, BAR_0);
+	mmio_len = pci_resource_len(pdev, BAR_0);
+
+	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+	if (!adapter->hw.hw_addr) {
+		err = -EIO;
+		goto err_ioremap;
+	}
+
+	for (i = BAR_1; i <= BAR_5; i++) {
+		if (pci_resource_len(pdev, i) == 0)
+			continue;
+		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+			adapter->hw.io_base = pci_resource_start(pdev, i);
+			break;
+		}
+	}
+
+	netdev->open = &e1000_open;
+	netdev->stop = &e1000_close;
+	netdev->hard_start_xmit = &e1000_xmit_frame;
+	// netdev->get_stats = &e1000_get_stats;
+	// netdev->set_multicast_list = &e1000_set_multi;
+	// netdev->set_mac_address = &e1000_set_mac;
+	// netdev->change_mtu = &e1000_change_mtu;
+	// netdev->do_ioctl = &e1000_ioctl;
+	// e1000_set_ethtool_ops(netdev);
+	strcpy(netdev->name, pci_name(pdev));
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len;
+	netdev->base_addr = adapter->hw.io_base;
+
+	adapter->bd_number = cards_found - 1;
+
+	/* setup the private structure */
+
+	if ((err = e1000_sw_init(adapter)))
+		goto err_sw_init;
+
+	/* Flash BAR mapping must happen after e1000_sw_init
+	 * because it depends on mac_type */
+	if ((adapter->hw.mac_type == e1000_ich8lan) &&
+	   (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+		flash_start = pci_resource_start(pdev, 1);
+		flash_len = pci_resource_len(pdev, 1);
+		adapter->hw.flash_address = ioremap(flash_start, flash_len);
+		if (!adapter->hw.flash_address) {
+			err = -EIO;
+			goto err_flashmap;
+		}
+	}
+
+	if ((err = e1000_check_phy_reset_block(&adapter->hw)))
+		DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
+
+	/* if ksp3, indicate if it's port a being setup */
+	if (pdev->device == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 &&
+			e1000_ksp3_port_a == 0)
+		adapter->ksp3_port_a = 1;
+	e1000_ksp3_port_a++;
+	/* Reset for multiple KP3 adapters */
+	if (e1000_ksp3_port_a == 4)
+		e1000_ksp3_port_a = 0;
+
+	netdev->features |= NETIF_F_LLTX;
+
+	adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
+
+	/* initialize eeprom parameters */
+
+	if (e1000_init_eeprom_params(&adapter->hw)) {
+		E1000_ERR("EEPROM initialization failed\n");
+		return -EIO;
+	}
+
+	/* before reading the EEPROM, reset the controller to
+	 * put the device in a known good starting state */
+
+	e1000_reset_hw(&adapter->hw);
+
+	/* make sure the EEPROM is good */
+
+	if (e1000_validate_eeprom_checksum(&adapter->hw) < 0) {
+		DPRINTK(PROBE, ERR, "The EEPROM Checksum Is Not Valid\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	/* copy the MAC address out of the EEPROM */
+
+	if (e1000_read_mac_addr(&adapter->hw))
+		DPRINTK(PROBE, ERR, "EEPROM Read Error\n");
+	memcpy(netdev->dev_addr, adapter->hw.mac_addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
+	memcpy(netdev->perm_addr, adapter->hw.mac_addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
+		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	e1000_read_part_num(&adapter->hw, &(adapter->part_num));
+
+	e1000_get_bus_info(&adapter->hw);
+
+	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog);
+	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
+			  e1000_82547_tx_fifo_stall_task);
+	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
+	INIT_WORK(&adapter->reset_task,
+		(void (*)(struct work_struct *))e1000_reset_task);
+
+	/* we're going to reset, so assume we have no link for now */
+
+	rtnetif_carrier_off(netdev);
+	rtnetif_stop_queue(netdev);
+
+	e1000_check_options(adapter);
+
+	/* Initial Wake on LAN setting
+	 * If APM wake is enabled in the EEPROM,
+	 * enable the ACPI Magic Packet filter
+	 */
+
+	switch (adapter->hw.mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+	case e1000_82543:
+		break;
+	case e1000_82544:
+		e1000_read_eeprom(&adapter->hw,
+			EEPROM_INIT_CONTROL2_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_82544_APM;
+		break;
+	case e1000_ich8lan:
+		e1000_read_eeprom(&adapter->hw,
+			EEPROM_INIT_CONTROL1_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_ICH8_APME;
+		break;
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82571:
+	case e1000_80003es2lan:
+		if (E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_FUNC_1){
+			e1000_read_eeprom(&adapter->hw,
+				EEPROM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+			break;
+		}
+		fallthrough;
+	default:
+		e1000_read_eeprom(&adapter->hw,
+			EEPROM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+		break;
+	}
+	if (eeprom_data & eeprom_apme_mask)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	/* print bus type/speed/width info */
+	{
+	struct e1000_hw *hw = &adapter->hw;
+	DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+		((hw->bus_type == e1000_bus_type_pcix) ? "-X" :
+		 (hw->bus_type == e1000_bus_type_pci_express ? " Express":"")),
+		((hw->bus_speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+		 (hw->bus_speed == e1000_bus_speed_133) ? "133MHz" :
+		 (hw->bus_speed == e1000_bus_speed_120) ? "120MHz" :
+		 (hw->bus_speed == e1000_bus_speed_100) ? "100MHz" :
+		 (hw->bus_speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+		((hw->bus_width == e1000_bus_width_64) ? "64-bit" :
+		 (hw->bus_width == e1000_bus_width_pciex_4) ? "Width x4" :
+		 (hw->bus_width == e1000_bus_width_pciex_1) ? "Width x1" :
+		 "32-bit"));
+	}
+
+	printk(KERN_INFO "e1000: hw ");
+	for (i = 0; i < 6; i++)
+		printk(KERN_CONT "%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
+	/* reset the hardware with the new settings */
+	e1000_reset(adapter);
+
+	/* If the controller is 82573 and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (adapter->hw.mac_type != e1000_82573 ||
+	    !e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	strcpy(netdev->name, "rteth%d");
+	if ((err = rt_register_rtnetdev(netdev)))
+		goto err_register;
+
+	DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+
+	return 0;
+
+err_register:
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+err_flashmap:
+err_sw_init:
+err_eeprom:
+	iounmap(adapter->hw.hw_addr);
+err_ioremap:
+	rtdev_free(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+	return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+
+static void e1000_remove(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+	uint32_t manc;
+
+	e1000_down_and_stop(adapter);
+
+	if (adapter->hw.mac_type >= e1000_82540 &&
+	   adapter->hw.mac_type != e1000_ich8lan &&
+	   adapter->hw.media_type == e1000_media_type_copper) {
+		manc = E1000_READ_REG(&adapter->hw, MANC);
+		if (manc & E1000_MANC_SMBUS_EN) {
+			manc |= E1000_MANC_ARP_EN;
+			E1000_WRITE_REG(&adapter->hw, MANC, manc);
+		}
+	}
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	e1000_release_hw_control(adapter);
+
+	rt_unregister_rtnetdev(netdev);
+
+	if (!e1000_check_phy_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+
+	iounmap(adapter->hw.hw_addr);
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+	pci_release_regions(pdev);
+
+	rtdev_free(netdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+
+static int e1000_sw_init(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_id = pdev->subsystem_device;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->pci_cmd_word);
+
+	adapter->rx_buffer_len = MAXIMUM_ETHERNET_FRAME_SIZE;
+	adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
+	hw->max_frame_size = netdev->mtu +
+			     ENET_HEADER_SIZE + ETHERNET_FCS_SIZE;
+	hw->min_frame_size = MINIMUM_ETHERNET_FRAME_SIZE;
+
+	/* identify the MAC */
+
+	if (e1000_set_mac_type(hw)) {
+		DPRINTK(PROBE, ERR, "Unknown MAC Type\n");
+		return -EIO;
+	}
+
+	switch (hw->mac_type) {
+	default:
+		break;
+	case e1000_82541:
+	case e1000_82547:
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		hw->phy_init_script = 1;
+		break;
+	}
+
+	e1000_set_media_type(hw);
+
+	hw->wait_autoneg_complete = FALSE;
+	hw->tbi_compatibility_en = TRUE;
+	hw->adaptive_ifs = FALSE;
+
+	/* Copper options */
+
+	if (hw->media_type == e1000_media_type_copper) {
+		hw->mdix = AUTO_ALL_MODES;
+		hw->disable_polarity_correction = FALSE;
+		hw->master_slave = E1000_MASTER_SLAVE;
+	}
+
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_queues = 1;
+
+
+	if (e1000_alloc_queues(adapter)) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	atomic_set(&adapter->irq_sem, 1);
+
+	return 0;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ *
+ * We allocate one ring per queue at run-time since we don't know the
+ * number of queues at compile-time.  The polling_netdev array is
+ * intended for Multiqueue, but should work fine with a single queue.
+ **/
+
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+	int size;
+
+	size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
+	adapter->tx_ring = kmalloc(size, GFP_KERNEL);
+	if (!adapter->tx_ring)
+		return -ENOMEM;
+	memset(adapter->tx_ring, 0, size);
+
+	size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
+	adapter->rx_ring = kmalloc(size, GFP_KERNEL);
+	if (!adapter->rx_ring) {
+		kfree(adapter->tx_ring);
+		return -ENOMEM;
+	}
+	memset(adapter->rx_ring, 0, size);
+
+
+	return E1000_SUCCESS;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+
+static int
+e1000_open(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__E1000_DRIVER_TESTING, &adapter->flags))
+		return -EBUSY;
+
+	/* allocate transmit descriptors */
+
+	if ((err = e1000_setup_all_tx_resources(adapter)))
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+
+	if ((err = e1000_setup_all_rx_resources(adapter)))
+		goto err_setup_rx;
+
+	err = e1000_request_irq(adapter);
+	if (err)
+		goto err_up;
+
+	e1000_power_up_phy(adapter);
+
+	if ((err = e1000_up(adapter)))
+		goto err_up;
+
+	/* If AMT is enabled, let the firmware know that the network
+	 * interface is now open */
+	if (adapter->hw.mac_type == e1000_82573 &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	/* Wait for the hardware to come up */
+	msleep(3000);
+
+	return E1000_SUCCESS;
+
+err_up:
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	e1000_free_all_tx_resources(adapter);
+err_setup_tx:
+	e1000_reset(adapter);
+
+	return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+
+static int
+e1000_close(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+
+	WARN_ON(test_bit(__E1000_RESETTING, &adapter->flags));
+	e1000_down(adapter);
+	e1000_power_down_phy(adapter);
+	e1000_free_irq(adapter);
+
+	e1000_free_all_tx_resources(adapter);
+	e1000_free_all_rx_resources(adapter);
+
+
+	/* If AMT is enabled, let the firmware know that the network
+	 * interface is now closed */
+	if (adapter->hw.mac_type == e1000_82573 &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_release_hw_control(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @start: address of beginning of memory
+ * @len: length of memory
+ **/
+static boolean_t
+e1000_check_64k_bound(struct e1000_adapter *adapter,
+		      void *start, unsigned long len)
+{
+	unsigned long begin = (unsigned long) start;
+	unsigned long end = begin + len;
+
+	/* First rev 82545 and 82546 need to not allow any memory
+	 * write location to cross 64k boundary due to errata 23 */
+	if (adapter->hw.mac_type == e1000_82545 ||
+	    adapter->hw.mac_type == e1000_82546) {
+		return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
+	}
+
+	return TRUE;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @txdr:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+static int
+e1000_setup_tx_resources(struct e1000_adapter *adapter,
+			 struct e1000_tx_ring *txdr)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct e1000_buffer) * txdr->count;
+	txdr->buffer_info = vmalloc(size);
+	if (!txdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(txdr->buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+
+	txdr->size = txdr->count * sizeof(struct e1000_tx_desc);
+	E1000_ROUNDUP(txdr->size, 4096);
+
+	txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size, &txdr->dma,
+					GFP_ATOMIC);
+	if (!txdr->desc) {
+setup_tx_desc_die:
+		vfree(txdr->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+		void *olddesc = txdr->desc;
+		dma_addr_t olddma = txdr->dma;
+		DPRINTK(TX_ERR, ERR, "txdr align check failed: %u bytes "
+				     "at %p\n", txdr->size, txdr->desc);
+		/* Try again, without freeing the previous */
+		txdr->desc = dma_alloc_coherent(&pdev->dev, txdr->size,
+						&txdr->dma, GFP_ATOMIC);
+		/* Failed allocation, critical failure */
+		if (!txdr->desc) {
+			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+					  olddma);
+			goto setup_tx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, txdr->desc, txdr->size)) {
+			/* give up */
+			dma_free_coherent(&pdev->dev, txdr->size, txdr->desc,
+					  txdr->dma);
+			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+					  olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the transmit descriptor ring\n");
+			vfree(txdr->buffer_info);
+			return -ENOMEM;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			dma_free_coherent(&pdev->dev, txdr->size, olddesc,
+					  olddma);
+		}
+	}
+	memset(txdr->desc, 0, txdr->size);
+
+	txdr->next_to_use = 0;
+	txdr->next_to_clean = 0;
+	rtdm_lock_init(&txdr->tx_lock);
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ *				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Tx Queue %u failed\n", i);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+
+static void
+e1000_configure_tx(struct e1000_adapter *adapter)
+{
+	uint64_t tdba;
+	struct e1000_hw *hw = &adapter->hw;
+	uint32_t tdlen, tctl, tipg, tarc;
+	uint32_t ipgr1, ipgr2;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+
+	switch (adapter->num_tx_queues) {
+	case 1:
+	default:
+		tdba = adapter->tx_ring[0].dma;
+		tdlen = adapter->tx_ring[0].count *
+			sizeof(struct e1000_tx_desc);
+		E1000_WRITE_REG(hw, TDLEN, tdlen);
+		E1000_WRITE_REG(hw, TDBAH, (tdba >> 32));
+		E1000_WRITE_REG(hw, TDBAL, (tdba & 0x00000000ffffffffULL));
+		E1000_WRITE_REG(hw, TDT, 0);
+		E1000_WRITE_REG(hw, TDH, 0);
+		adapter->tx_ring[0].tdh = E1000_TDH;
+		adapter->tx_ring[0].tdt = E1000_TDT;
+		break;
+	}
+
+	/* Set the default values for the Tx Inter Packet Gap timer */
+
+	if (hw->media_type == e1000_media_type_fiber ||
+	    hw->media_type == e1000_media_type_internal_serdes)
+		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+	else
+		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
+	switch (hw->mac_type) {
+	case e1000_82542_rev2_0:
+	case e1000_82542_rev2_1:
+		tipg = DEFAULT_82542_TIPG_IPGT;
+		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
+		break;
+	case e1000_80003es2lan:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
+		break;
+	default:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+		break;
+	}
+	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+	E1000_WRITE_REG(hw, TIPG, tipg);
+
+	/* Set the Tx Interrupt Delay register */
+
+	E1000_WRITE_REG(hw, TIDV, adapter->tx_int_delay);
+	if (hw->mac_type >= e1000_82540)
+		E1000_WRITE_REG(hw, TADV, adapter->tx_abs_int_delay);
+
+	/* Program the Transmit Control Register */
+
+	tctl = E1000_READ_REG(hw, TCTL);
+
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+#ifdef DISABLE_MULR
+	/* disable Multiple Reads for debugging */
+	tctl &= ~E1000_TCTL_MULR;
+#endif
+
+	if (hw->mac_type == e1000_82571 || hw->mac_type == e1000_82572) {
+		tarc = E1000_READ_REG(hw, TARC0);
+		tarc |= ((1 << 25) | (1 << 21));
+		E1000_WRITE_REG(hw, TARC0, tarc);
+		tarc = E1000_READ_REG(hw, TARC1);
+		tarc |= (1 << 25);
+		if (tctl & E1000_TCTL_MULR)
+			tarc &= ~(1 << 28);
+		else
+			tarc |= (1 << 28);
+		E1000_WRITE_REG(hw, TARC1, tarc);
+	} else if (hw->mac_type == e1000_80003es2lan) {
+		tarc = E1000_READ_REG(hw, TARC0);
+		tarc |= 1;
+		if (hw->media_type == e1000_media_type_internal_serdes)
+			tarc |= (1 << 20);
+		E1000_WRITE_REG(hw, TARC0, tarc);
+		tarc = E1000_READ_REG(hw, TARC1);
+		tarc |= 1;
+		E1000_WRITE_REG(hw, TARC1, tarc);
+	}
+
+	e1000_config_collision_dist(hw);
+
+	/* Setup Transmit Descriptor Settings for eop descriptor */
+	adapter->txd_cmd = E1000_TXD_CMD_IDE | E1000_TXD_CMD_EOP |
+		E1000_TXD_CMD_IFCS;
+
+	if (hw->mac_type < e1000_82543)
+		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+	else
+		adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+	/* Cache if we're 82544 running in PCI-X because we'll
+	 * need this to apply a workaround later in the send path. */
+	if (hw->mac_type == e1000_82544 &&
+	    hw->bus_type == e1000_bus_type_pcix)
+		adapter->pcix_82544 = 1;
+
+	E1000_WRITE_REG(hw, TCTL, tctl);
+
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rxdr:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+
+static int
+e1000_setup_rx_resources(struct e1000_adapter *adapter,
+			 struct e1000_rx_ring *rxdr)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size, desc_len;
+
+	size = sizeof(struct e1000_buffer) * rxdr->count;
+	rxdr->buffer_info = vmalloc(size);
+	if (!rxdr->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rxdr->buffer_info, 0, size);
+
+	size = sizeof(struct e1000_ps_page) * rxdr->count;
+	rxdr->ps_page = kmalloc(size, GFP_KERNEL);
+	if (!rxdr->ps_page) {
+		vfree(rxdr->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rxdr->ps_page, 0, size);
+
+	size = sizeof(struct e1000_ps_page_dma) * rxdr->count;
+	rxdr->ps_page_dma = kmalloc(size, GFP_KERNEL);
+	if (!rxdr->ps_page_dma) {
+		vfree(rxdr->buffer_info);
+		kfree(rxdr->ps_page);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rxdr->ps_page_dma, 0, size);
+
+	if (adapter->hw.mac_type <= e1000_82547_rev_2)
+		desc_len = sizeof(struct e1000_rx_desc);
+	else
+		desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+	/* Round up to nearest 4K */
+
+	rxdr->size = rxdr->count * desc_len;
+	E1000_ROUNDUP(rxdr->size, 4096);
+
+	rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size, &rxdr->dma,
+					GFP_ATOMIC);
+
+	if (!rxdr->desc) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+setup_rx_desc_die:
+		vfree(rxdr->buffer_info);
+		kfree(rxdr->ps_page);
+		kfree(rxdr->ps_page_dma);
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+		void *olddesc = rxdr->desc;
+		dma_addr_t olddma = rxdr->dma;
+		DPRINTK(RX_ERR, ERR, "rxdr align check failed: %u bytes "
+				     "at %p\n", rxdr->size, rxdr->desc);
+		/* Try again, without freeing the previous */
+		rxdr->desc = dma_alloc_coherent(&pdev->dev, rxdr->size,
+						&rxdr->dma, GFP_ATOMIC);
+		/* Failed allocation, critical failure */
+		if (!rxdr->desc) {
+			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+					  olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, rxdr->desc, rxdr->size)) {
+			/* give up */
+			dma_free_coherent(&pdev->dev, rxdr->size, rxdr->desc,
+					   rxdr->dma);
+			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+					  olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			dma_free_coherent(&pdev->dev, rxdr->size, olddesc,
+					  olddma);
+		}
+	}
+	memset(rxdr->desc, 0, rxdr->size);
+
+	rxdr->next_to_clean = 0;
+	rxdr->next_to_use = 0;
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ *				  (Descriptors) for all queues
+ * @adapter: board private structure
+ *
+ * If this function returns with an error, then it's possible one or
+ * more of the rings is populated (while the rest are not).  It is the
+ * callers duty to clean those orphaned rings.
+ *
+ * Return 0 on success, negative on failure
+ **/
+
+int
+e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Rx Queue %u failed\n", i);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+			(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static void
+e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+	uint32_t rctl;
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+	uint32_t pages = 0;
+#endif
+
+	rctl = E1000_READ_REG(&adapter->hw, RCTL);
+
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	/* FIXME: disable the stripping of CRC because it breaks
+	 * BMC firmware connected over SMBUS
+	if (adapter->hw.mac_type > e1000_82543)
+		rctl |= E1000_RCTL_SECRC;
+	*/
+
+	if (adapter->hw.tbi_compatibility_on == 1)
+		rctl |= E1000_RCTL_SBP;
+	else
+		rctl &= ~E1000_RCTL_SBP;
+
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		rctl &= ~E1000_RCTL_LPE;
+	else
+		rctl |= E1000_RCTL_LPE;
+
+	/* Setup buffer sizes */
+	rctl &= ~E1000_RCTL_SZ_4096;
+	rctl |= E1000_RCTL_BSEX;
+	switch (adapter->rx_buffer_len) {
+		case E1000_RXBUFFER_256:
+			rctl |= E1000_RCTL_SZ_256;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_512:
+			rctl |= E1000_RCTL_SZ_512;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_1024:
+			rctl |= E1000_RCTL_SZ_1024;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_2048:
+		default:
+			rctl |= E1000_RCTL_SZ_2048;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_4096:
+			rctl |= E1000_RCTL_SZ_4096;
+			break;
+		case E1000_RXBUFFER_8192:
+			rctl |= E1000_RCTL_SZ_8192;
+			break;
+		case E1000_RXBUFFER_16384:
+			rctl |= E1000_RCTL_SZ_16384;
+			break;
+	}
+
+	adapter->rx_ps_pages = 0;
+
+	E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+
+static void
+e1000_configure_rx(struct e1000_adapter *adapter)
+{
+	uint64_t rdba;
+	struct e1000_hw *hw = &adapter->hw;
+	uint32_t rdlen, rctl, rxcsum, ctrl_ext;
+
+	{
+		rdlen = adapter->rx_ring[0].count *
+			sizeof(struct e1000_rx_desc);
+		adapter->clean_rx = NULL; /* unused */
+		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+	}
+
+	/* disable receives while setting up the descriptors */
+	rctl = E1000_READ_REG(hw, RCTL);
+	E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+
+	/* set the Receive Delay Timer Register */
+	E1000_WRITE_REG(hw, RDTR, adapter->rx_int_delay);
+
+	if (hw->mac_type >= e1000_82540) {
+		E1000_WRITE_REG(hw, RADV, adapter->rx_abs_int_delay);
+		if (adapter->itr > 1)
+			E1000_WRITE_REG(hw, ITR,
+				1000000000 / (adapter->itr * 256));
+	}
+
+	if (hw->mac_type >= e1000_82571) {
+		ctrl_ext = E1000_READ_REG(hw, CTRL_EXT);
+		/* Reset delay timers after every interrupt */
+		ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
+		E1000_WRITE_REG(hw, CTRL_EXT, ctrl_ext);
+		E1000_WRITE_REG(hw, IAM, ~0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	switch (adapter->num_rx_queues) {
+	case 1:
+	default:
+		rdba = adapter->rx_ring[0].dma;
+		E1000_WRITE_REG(hw, RDLEN, rdlen);
+		E1000_WRITE_REG(hw, RDBAH, (rdba >> 32));
+		E1000_WRITE_REG(hw, RDBAL, (rdba & 0x00000000ffffffffULL));
+		E1000_WRITE_REG(hw, RDT, 0);
+		E1000_WRITE_REG(hw, RDH, 0);
+		adapter->rx_ring[0].rdh = E1000_RDH;
+		adapter->rx_ring[0].rdt = E1000_RDT;
+		break;
+	}
+
+	/* Enable 82543 Receive Checksum Offload for TCP and UDP */
+	if (hw->mac_type >= e1000_82543) {
+		rxcsum = E1000_READ_REG(hw, RXCSUM);
+		if (adapter->rx_csum == TRUE) {
+			rxcsum |= E1000_RXCSUM_TUOFL;
+
+		} else {
+			rxcsum &= ~E1000_RXCSUM_TUOFL;
+			/* don't need to clear IPPCSE as it defaults to 0 */
+		}
+		E1000_WRITE_REG(hw, RXCSUM, rxcsum);
+	}
+
+
+	/* Enable Receives */
+	E1000_WRITE_REG(hw, RCTL, rctl);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+
+static void
+e1000_free_tx_resources(struct e1000_adapter *adapter,
+			struct e1000_tx_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+
+	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+			  tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+
+void
+e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void
+e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+			struct e1000_buffer *buffer_info)
+{
+	if (buffer_info->dma) {
+		dma_unmap_page(&adapter->pdev->dev,
+			       buffer_info->dma,
+			       buffer_info->length,
+			       DMA_TO_DEVICE);
+	}
+	if (buffer_info->skb)
+		kfree_rtskb(buffer_info->skb);
+	memset(buffer_info, 0, sizeof(struct e1000_buffer));
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+
+static void
+e1000_clean_tx_ring(struct e1000_adapter *adapter,
+		    struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_buffer *buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+	}
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->last_tx_tso = 0;
+
+	writel(0, adapter->hw.hw_addr + tx_ring->tdh);
+	writel(0, adapter->hw.hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+
+static void
+e1000_free_rx_resources(struct e1000_adapter *adapter,
+			struct e1000_rx_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+	kfree(rx_ring->ps_page);
+	rx_ring->ps_page = NULL;
+	kfree(rx_ring->ps_page_dma);
+	rx_ring->ps_page_dma = NULL;
+
+	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+			  rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void
+e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+
+static void
+e1000_clean_rx_ring(struct e1000_adapter *adapter,
+		    struct e1000_rx_ring *rx_ring)
+{
+	struct e1000_buffer *buffer_info;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		buffer_info = &rx_ring->buffer_info[i];
+		if (buffer_info->skb) {
+			dma_unmap_single(&pdev->dev,
+					 buffer_info->dma,
+					 buffer_info->length,
+					 DMA_FROM_DEVICE);
+
+			kfree_rtskb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct e1000_buffer) * rx_ring->count;
+	memset(rx_ring->buffer_info, 0, size);
+	size = sizeof(struct e1000_ps_page) * rx_ring->count;
+	memset(rx_ring->ps_page, 0, size);
+	size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
+	memset(rx_ring->ps_page_dma, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	writel(0, adapter->hw.hw_addr + rx_ring->rdh);
+	writel(0, adapter->hw.hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+
+static void
+e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+static void
+e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	uint32_t rctl;
+
+	e1000_pci_clear_mwi(&adapter->hw);
+
+	rctl = E1000_READ_REG(&adapter->hw, RCTL);
+	rctl |= E1000_RCTL_RST;
+	E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	mdelay(5);
+
+	if (rtnetif_running(netdev))
+		e1000_clean_all_rx_rings(adapter);
+}
+
+static void
+e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	uint32_t rctl;
+
+	rctl = E1000_READ_REG(&adapter->hw, RCTL);
+	rctl &= ~E1000_RCTL_RST;
+	E1000_WRITE_REG(&adapter->hw, RCTL, rctl);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	mdelay(5);
+
+	if (adapter->hw.pci_cmd_word & PCI_COMMAND_INVALIDATE)
+		e1000_pci_set_mwi(&adapter->hw);
+
+	if (rtnetif_running(netdev)) {
+		/* No need to loop, because 82542 supports only 1 queue */
+		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+		e1000_configure_rx(adapter);
+		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+	}
+}
+
+/**
+ * e1000_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+
+static void
+e1000_set_multi(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	uint32_t rctl;
+	int i, rar_entries = E1000_RAR_ENTRIES;
+	int mta_reg_count = (hw->mac_type == e1000_ich8lan) ?
+				E1000_NUM_MTA_REGISTERS_ICH8LAN :
+				E1000_NUM_MTA_REGISTERS;
+
+	if (adapter->hw.mac_type == e1000_ich8lan)
+		rar_entries = E1000_RAR_ENTRIES_ICH8LAN;
+
+	/* reserve RAR[14] for LAA over-write work-around */
+	if (adapter->hw.mac_type == e1000_82571)
+		rar_entries--;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	rctl = E1000_READ_REG(hw, RCTL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		rctl |= E1000_RCTL_MPE;
+		rctl &= ~E1000_RCTL_UPE;
+	} else {
+		rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+	}
+
+	E1000_WRITE_REG(hw, RCTL, rctl);
+
+	/* 82542 2.0 needs to be in reset to write receive address registers */
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_enter_82542_rst(adapter);
+
+	/* load the first 14 multicast address into the exact filters 1-14
+	 * RAR 0 is used for the station MAC adddress
+	 * if there are not 14 addresses, go ahead and clear the filters
+	 * -- with 82571 controllers only 0-13 entries are filled here
+	 */
+
+	for (i = 1; i < rar_entries; i++) {
+		E1000_WRITE_REG_ARRAY(hw, RA, i << 1, 0);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG_ARRAY(hw, RA, (i << 1) + 1, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* clear the old settings from the multicast hash table */
+
+	for (i = 0; i < mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, MTA, i, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	if (hw->mac_type == e1000_82542_rev2_0)
+		e1000_leave_82542_rst(adapter);
+}
+
+/**
+ * e1000_update_phy_info_task - get phy info
+ * @work: work struct contained inside adapter struct
+ *
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+static void e1000_update_phy_info_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     phy_info_task.work);
+	e1000_phy_get_info(&adapter->hw, &adapter->phy_info);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall_task - task to complete work
+ * @work: work struct contained inside adapter struct
+ **/
+
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     fifo_stall_task.work);
+	struct rtnet_device *netdev = adapter->netdev;
+	uint32_t tctl;
+
+	if (atomic_read(&adapter->tx_fifo_stall)) {
+		if ((E1000_READ_REG(&adapter->hw, TDT) ==
+		    E1000_READ_REG(&adapter->hw, TDH)) &&
+		   (E1000_READ_REG(&adapter->hw, TDFT) ==
+		    E1000_READ_REG(&adapter->hw, TDFH)) &&
+		   (E1000_READ_REG(&adapter->hw, TDFTS) ==
+		    E1000_READ_REG(&adapter->hw, TDFHS))) {
+			tctl = E1000_READ_REG(&adapter->hw, TCTL);
+			E1000_WRITE_REG(&adapter->hw, TCTL,
+					tctl & ~E1000_TCTL_EN);
+			E1000_WRITE_REG(&adapter->hw, TDFT,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, TDFH,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, TDFTS,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, TDFHS,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+			E1000_WRITE_FLUSH(&adapter->hw);
+
+			adapter->tx_fifo_head = 0;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+			rtnetif_wake_queue(netdev);
+		} else {
+			schedule_delayed_work(&adapter->fifo_stall_task, 1);
+		}
+	}
+}
+
+/**
+ * e1000_watchdog - work function
+ * @work: work struct contained inside adapter struct
+ **/
+static void e1000_watchdog(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     watchdog_task.work);
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_tx_ring *txdr = adapter->tx_ring;
+	uint32_t link, tctl;
+	int32_t ret_val;
+
+	ret_val = e1000_check_for_link(&adapter->hw);
+	if ((ret_val == E1000_ERR_PHY) &&
+	    (adapter->hw.phy_type == e1000_phy_igp_3) &&
+	    (E1000_READ_REG(&adapter->hw, CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+		/* See e1000_kumeran_lock_loss_workaround() */
+		DPRINTK(LINK, INFO,
+			"Gigabit has been disabled, downgrading speed\n");
+	}
+	if (adapter->hw.mac_type == e1000_82573) {
+		e1000_enable_tx_pkt_filtering(&adapter->hw);
+	}
+
+	if ((adapter->hw.media_type == e1000_media_type_internal_serdes) &&
+	   !(E1000_READ_REG(&adapter->hw, TXCW) & E1000_TXCW_ANE))
+		link = !adapter->hw.serdes_link_down;
+	else
+		link = E1000_READ_REG(&adapter->hw, STATUS) & E1000_STATUS_LU;
+
+	if (link) {
+		if (!rtnetif_carrier_ok(netdev)) {
+			boolean_t txb2b = 1;
+			e1000_get_speed_and_duplex(&adapter->hw,
+						   &adapter->link_speed,
+						   &adapter->link_duplex);
+
+			DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s\n",
+			       adapter->link_speed,
+			       adapter->link_duplex == FULL_DUPLEX ?
+			       "Full Duplex" : "Half Duplex");
+
+			/* tweak tx_queue_len according to speed/duplex
+			 * and adjust the timeout factor */
+			// TODO makoehre netdev->tx_queue_len = adapter->tx_queue_len;
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				txb2b = 0;
+				// TODO makoehre netdev->tx_queue_len = 10;
+				adapter->tx_timeout_factor = 8;
+				break;
+			case SPEED_100:
+				txb2b = 0;
+				// TODO makoehre netdev->tx_queue_len = 100;
+				/* maybe add some timeout factor ? */
+				break;
+			}
+
+			if ((adapter->hw.mac_type == e1000_82571 ||
+			     adapter->hw.mac_type == e1000_82572) &&
+			    txb2b == 0) {
+#define SPEED_MODE_BIT (1 << 21)
+				uint32_t tarc0;
+				tarc0 = E1000_READ_REG(&adapter->hw, TARC0);
+				tarc0 &= ~SPEED_MODE_BIT;
+				E1000_WRITE_REG(&adapter->hw, TARC0, tarc0);
+			}
+
+
+			/* enable transmits in the hardware, need to do this
+			 * after setting TARC0 */
+			tctl = E1000_READ_REG(&adapter->hw, TCTL);
+			tctl |= E1000_TCTL_EN;
+			E1000_WRITE_REG(&adapter->hw, TCTL, tctl);
+
+			rtnetif_carrier_on(netdev);
+			rtnetif_wake_queue(netdev);
+			schedule_delayed_work(&adapter->phy_info_task, 2 * HZ);
+			adapter->smartspeed = 0;
+		}
+	} else {
+		if (rtnetif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+			DPRINTK(LINK, INFO, "NIC Link is Down\n");
+			rtnetif_carrier_off(netdev);
+			rtnetif_stop_queue(netdev);
+			schedule_delayed_work(&adapter->phy_info_task, 2 * HZ);
+
+			/* 80003ES2LAN workaround--
+			 * For packet buffer work-around on link down event;
+			 * disable receives in the ISR and
+			 * reset device here in the watchdog
+			 */
+			if (adapter->hw.mac_type == e1000_80003es2lan)
+				/* reset device */
+				schedule_work(&adapter->reset_task);
+		}
+
+		e1000_smartspeed(adapter);
+	}
+
+
+	adapter->hw.tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+	adapter->tpt_old = adapter->stats.tpt;
+	adapter->hw.collision_delta = adapter->stats.colc - adapter->colc_old;
+	adapter->colc_old = adapter->stats.colc;
+
+	adapter->gorcl = adapter->stats.gorcl - adapter->gorcl_old;
+	adapter->gorcl_old = adapter->stats.gorcl;
+	adapter->gotcl = adapter->stats.gotcl - adapter->gotcl_old;
+	adapter->gotcl_old = adapter->stats.gotcl;
+
+	// e1000_update_adaptive(&adapter->hw);
+
+	if (!rtnetif_carrier_ok(netdev)) {
+		if (E1000_DESC_UNUSED(txdr) + 1 < txdr->count) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context). */
+			adapter->tx_timeout_count++;
+			schedule_work(&adapter->reset_task);
+		}
+	}
+
+	/* Dynamic mode for Interrupt Throttle Rate (ITR) */
+	if (adapter->hw.mac_type >= e1000_82540 && adapter->itr == 1) {
+		/* Symmetric Tx/Rx gets a reduced ITR=2000; Total
+		 * asymmetrical Tx or Rx gets ITR=8000; everyone
+		 * else is between 2000-8000. */
+		uint32_t goc = (adapter->gotcl + adapter->gorcl) / 10000;
+		uint32_t dif = (adapter->gotcl > adapter->gorcl ?
+			adapter->gotcl - adapter->gorcl :
+			adapter->gorcl - adapter->gotcl) / 10000;
+		uint32_t itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+		E1000_WRITE_REG(&adapter->hw, ITR, 1000000000 / (itr * 256));
+	}
+
+	/* Cause software interrupt to ensure rx ring is cleaned */
+	E1000_WRITE_REG(&adapter->hw, ICS, E1000_ICS_RXDMT0);
+
+	/* Force detection of hung controller every watchdog period */
+	adapter->detect_tx_hung = TRUE;
+
+	/* With 82571 controllers, LAA may be overwritten due to controller
+	 * reset from the other port. Set the appropriate LAA in RAR[0] */
+	if (adapter->hw.mac_type == e1000_82571 && adapter->hw.laa_is_present)
+		e1000_rar_set(&adapter->hw, adapter->hw.mac_addr, 0);
+
+	/* Reschedule the task */
+	schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
+}
+
+#define E1000_TX_FLAGS_CSUM		0x00000001
+#define E1000_TX_FLAGS_VLAN		0x00000002
+#define E1000_TX_FLAGS_TSO		0x00000004
+#define E1000_TX_FLAGS_IPV4		0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT	16
+
+
+static boolean_t
+e1000_tx_csum(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+	      struct rtskb *skb)
+{
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	uint8_t css;
+
+	if (likely(skb->ip_summed == CHECKSUM_PARTIAL)) {
+		css = skb->h.raw - skb->data;
+
+		i = tx_ring->next_to_use;
+		buffer_info = &tx_ring->buffer_info[i];
+		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+		context_desc->upper_setup.tcp_fields.tucss = css;
+		context_desc->upper_setup.tcp_fields.tucso = css + skb->csum;
+		context_desc->upper_setup.tcp_fields.tucse = 0;
+		context_desc->tcp_seg_setup.data = 0;
+		context_desc->cmd_and_length = cpu_to_le32(E1000_TXD_CMD_DEXT);
+
+		buffer_info->time_stamp = jiffies;
+
+		if (unlikely(++i == tx_ring->count)) i = 0;
+		tx_ring->next_to_use = i;
+
+		return TRUE;
+	}
+
+	return FALSE;
+}
+
+#define E1000_MAX_TXD_PWR	12
+#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
+
+static int
+e1000_tx_map(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+	     struct rtskb *skb, unsigned int first, unsigned int max_per_txd,
+	     unsigned int nr_frags, unsigned int mss)
+{
+	struct e1000_buffer *buffer_info;
+	unsigned int len = skb->len;
+	unsigned int offset = 0, size, count = 0, i;
+
+	i = tx_ring->next_to_use;
+
+	while (len) {
+		buffer_info = &tx_ring->buffer_info[i];
+		size = min(len, max_per_txd);
+		/* work-around for errata 10 and it applies
+		 * to all controllers in PCI-X mode
+		 * The fix is to make sure that the first descriptor of a
+		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
+		 */
+		if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+				(size > 2015) && count == 0))
+			size = 2015;
+
+		/* Workaround for potential 82544 hang in PCI-X.  Avoid
+		 * terminating buffers within evenly-aligned dwords. */
+		if (unlikely(adapter->pcix_82544 &&
+		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+		   size > 4))
+			size -= 4;
+
+		buffer_info->length = size;
+		buffer_info->dma =
+			dma_map_single(&adapter->pdev->dev,
+				       skb->data + offset,
+				       size,
+				       DMA_TO_DEVICE);
+		buffer_info->time_stamp = jiffies;
+
+		len -= size;
+		offset += size;
+		count++;
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+
+	i = (i == 0) ? tx_ring->count - 1 : i - 1;
+	tx_ring->buffer_info[i].skb = skb;
+	tx_ring->buffer_info[first].next_to_watch = i;
+
+	return count;
+}
+
+static void
+e1000_tx_queue(struct e1000_adapter *adapter, struct e1000_tx_ring *tx_ring,
+	       int tx_flags, int count, nanosecs_abs_t *xmit_stamp)
+{
+	struct e1000_tx_desc *tx_desc = NULL;
+	struct e1000_buffer *buffer_info;
+	uint32_t txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+	unsigned int i;
+
+
+	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+	}
+
+	i = tx_ring->next_to_use;
+
+	while (count--) {
+		buffer_info = &tx_ring->buffer_info[i];
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+		tx_desc->lower.data =
+			cpu_to_le32(txd_lower | buffer_info->length);
+		tx_desc->upper.data = cpu_to_le32(txd_upper);
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+	if (xmit_stamp)
+		*xmit_stamp = cpu_to_be64(rtdm_clock_read() + *xmit_stamp);
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64). */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+}
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time.  This gives the Tx FIFO an opportunity to
+ * flush all packets.  When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+
+#define E1000_FIFO_HDR			0x10
+#define E1000_82547_PAD_LEN		0x3E0
+
+static int
+e1000_82547_fifo_workaround(struct e1000_adapter *adapter, struct rtskb *skb)
+{
+	uint32_t fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+	uint32_t skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+	E1000_ROUNDUP(skb_fifo_len, E1000_FIFO_HDR);
+
+	if (adapter->link_duplex != HALF_DUPLEX)
+		goto no_fifo_stall_required;
+
+	if (atomic_read(&adapter->tx_fifo_stall))
+		return 1;
+
+	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+		atomic_set(&adapter->tx_fifo_stall, 1);
+		return 1;
+	}
+
+no_fifo_stall_required:
+	adapter->tx_fifo_head += skb_fifo_len;
+	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
+		adapter->tx_fifo_head -= adapter->tx_fifo_size;
+	return 0;
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int
+e1000_transfer_dhcp_info(struct e1000_adapter *adapter, struct rtskb *skb)
+{
+	struct e1000_hw *hw =  &adapter->hw;
+	uint16_t length, offset;
+	if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
+		struct ethhdr *eth = (struct ethhdr *) skb->data;
+		if ((htons(ETH_P_IP) == eth->h_proto)) {
+			const struct iphdr *ip =
+				(struct iphdr *)((uint8_t *)skb->data+14);
+			if (IPPROTO_UDP == ip->protocol) {
+				struct udphdr *udp =
+					(struct udphdr *)((uint8_t *)ip +
+						(ip->ihl << 2));
+				if (ntohs(udp->dest) == 67) {
+					offset = (uint8_t *)udp + 8 - skb->data;
+					length = skb->len - offset;
+
+					return e1000_mng_write_dhcp_info(hw,
+							(uint8_t *)udp + 8,
+							length);
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int
+e1000_xmit_frame(struct rtskb *skb, struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_tx_ring *tx_ring;
+	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+	unsigned int tx_flags = 0;
+	unsigned int len = skb->len;
+	rtdm_lockctx_t context;
+	unsigned int nr_frags = 0;
+	unsigned int mss = 0;
+	int count = 0;
+
+	/* This goes back to the question of how to logically map a tx queue
+	 * to a flow.  Right now, performance is impacted slightly negatively
+	 * if using multiple tx queues.  If the stack breaks away from a
+	 * single qdisc implementation, we can look at this again. */
+	tx_ring = adapter->tx_ring;
+
+	if (unlikely(skb->len <= 0)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		count++;
+
+
+	count += TXD_USE_COUNT(len, max_txd_pwr);
+
+	if (adapter->pcix_82544)
+		count++;
+
+	/* work-around for errata 10 and it applies to all controllers
+	 * in PCI-X mode, so add one more descriptor to the count
+	 */
+	if (unlikely((adapter->hw.bus_type == e1000_bus_type_pcix) &&
+			(len > 2015)))
+		count++;
+
+
+	if (adapter->hw.tx_pkt_filtering &&
+	    (adapter->hw.mac_type == e1000_82573))
+		e1000_transfer_dhcp_info(adapter, skb);
+
+	rtdm_lock_get_irqsave(&tx_ring->tx_lock, context);
+
+	/* need: count + 2 desc gap to keep tail from touching
+	 * head, otherwise try next time */
+	if (unlikely(E1000_DESC_UNUSED(tx_ring) < count + 2)) {
+		rtnetif_stop_queue(netdev);
+		rtdm_lock_put_irqrestore(&tx_ring->tx_lock, context);
+		rtdm_printk("FATAL: rt_e1000 ran into tail close to head situation!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(adapter->hw.mac_type == e1000_82547)) {
+		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+			rtnetif_stop_queue(netdev);
+			rtdm_lock_put_irqrestore(&tx_ring->tx_lock, context);
+
+			/* FIXME: warn the user earlier, i.e. on startup if
+			   half-duplex is detected! */
+			rtdm_printk("FATAL: rt_e1000 ran into 82547 "
+				    "controller bug!\n");
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+	first = tx_ring->next_to_use;
+
+	if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+		tx_flags |= E1000_TX_FLAGS_CSUM;
+
+	e1000_tx_queue(adapter, tx_ring, tx_flags,
+		       e1000_tx_map(adapter, tx_ring, skb, first,
+				    max_per_txd, nr_frags, mss),
+		       skb->xmit_stamp);
+
+	rtdm_lock_put_irqrestore(&tx_ring->tx_lock, context);
+
+	return NETDEV_TX_OK;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ * @pt_regs: CPU registers structure
+ **/
+
+static int
+e1000_intr(rtdm_irq_t *irq_handle)
+    /* int irq, void *data, struct pt_regs *regs) */
+{
+
+	struct rtnet_device *netdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	uint32_t rctl, icr = E1000_READ_REG(hw, ICR);
+	int i;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	if (unlikely(!icr)) {
+		return RTDM_IRQ_NONE;  /* Not our interrupt */
+	}
+	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+		hw->get_link_status = 1;
+		/* 80003ES2LAN workaround--
+		 * For packet buffer work-around on link down event;
+		 * disable receives here in the ISR and
+		 * reset adapter in watchdog
+		 */
+		if (rtnetif_carrier_ok(netdev) &&
+		    (adapter->hw.mac_type == e1000_80003es2lan)) {
+			/* disable receives */
+			rctl = E1000_READ_REG(hw, RCTL);
+			E1000_WRITE_REG(hw, RCTL, rctl & ~E1000_RCTL_EN);
+		}
+		/* FIXME: we need to handle this via some yet-to-be-invented
+		   error manager (Linux botton-half and/or kthread)
+		mod_timer(&adapter->watchdog_timer, jiffies);*/
+	}
+
+	/* Writing IMC and IMS is needed for 82547.
+	 * Due to Hub Link bus being occupied, an interrupt
+	 * de-assertion message is not able to be sent.
+	 * When an interrupt assertion message is generated later,
+	 * two messages are re-ordered and sent out.
+	 * That causes APIC to think 82547 is in de-assertion
+	 * state, while 82547 is in assertion state, resulting
+	 * in dead lock. Writing IMC forces 82547 into
+	 * de-assertion state.
+	 */
+	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2) {
+		atomic_inc(&adapter->irq_sem);
+		E1000_WRITE_REG(hw, IMC, ~0);
+	}
+
+	adapter->data_received = 0;
+
+	for (i = 0; i < E1000_MAX_INTR; i++)
+		if (unlikely(!e1000_clean_rx_irq(adapter, adapter->rx_ring,
+						 &time_stamp) &
+		   !e1000_clean_tx_irq(adapter, adapter->tx_ring)))
+			break;
+
+	if (hw->mac_type == e1000_82547 || hw->mac_type == e1000_82547_rev_2)
+		e1000_irq_enable(adapter);
+
+
+	if (adapter->data_received)
+		rt_mark_stack_mgr(netdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ **/
+
+static boolean_t
+e1000_clean_tx_irq(struct e1000_adapter *adapter,
+		   struct e1000_tx_ring *tx_ring)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_tx_desc *tx_desc, *eop_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i, eop;
+	boolean_t cleaned = FALSE;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->buffer_info[i].next_to_watch;
+	eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+		for (cleaned = FALSE; !cleaned; ) {
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			cleaned = (i == eop);
+
+			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+			memset(tx_desc, 0, sizeof(struct e1000_tx_desc));
+
+			if (unlikely(++i == tx_ring->count)) i = 0;
+		}
+
+
+		eop = tx_ring->buffer_info[i].next_to_watch;
+		eop_desc = E1000_TX_DESC(*tx_ring, eop);
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	if (unlikely(cleaned && rtnetif_queue_stopped(netdev) &&
+		     rtnetif_carrier_ok(netdev))) {
+		rtdm_lock_get(&tx_ring->tx_lock);
+		if (rtnetif_queue_stopped(netdev) &&
+		    (E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD))
+			rtnetif_wake_queue(netdev);
+		rtdm_lock_put(&tx_ring->tx_lock);
+	}
+
+	if (adapter->detect_tx_hung) {
+		/* Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i */
+		adapter->detect_tx_hung = FALSE;
+		if (tx_ring->buffer_info[eop].dma &&
+		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+			       (adapter->tx_timeout_factor * HZ))
+		    && !(E1000_READ_REG(&adapter->hw, STATUS) &
+			 E1000_STATUS_TXOFF)) {
+
+			/* detected Tx unit hang */
+			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+					"  Tx Queue             <%lu>\n"
+					"  TDH                  <%x>\n"
+					"  TDT                  <%x>\n"
+					"  next_to_use          <%x>\n"
+					"  next_to_clean        <%x>\n"
+					"buffer_info[next_to_clean]\n"
+					"  time_stamp           <%lx>\n"
+					"  next_to_watch        <%x>\n"
+					"  jiffies              <%lx>\n"
+					"  next_to_watch.status <%x>\n",
+				(unsigned long)((tx_ring - adapter->tx_ring) /
+					sizeof(struct e1000_tx_ring)),
+				readl(adapter->hw.hw_addr + tx_ring->tdh),
+				readl(adapter->hw.hw_addr + tx_ring->tdt),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				tx_ring->buffer_info[eop].time_stamp,
+				eop,
+				jiffies,
+				eop_desc->upper.fields.status);
+			rtnetif_stop_queue(netdev);
+		}
+	}
+	return cleaned;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter:     board private structure
+ * @status_err:  receive descriptor status and error fields
+ * @csum:        receive descriptor csum field
+ * @sk_buff:     socket buffer with received data
+ **/
+
+static void
+e1000_rx_checksum(struct e1000_adapter *adapter,
+		  uint32_t status_err, uint32_t csum,
+		  struct rtskb *skb)
+{
+	uint16_t status = (uint16_t)status_err;
+	uint8_t errors = (uint8_t)(status_err >> 24);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* 82543 or newer only */
+	if (unlikely(adapter->hw.mac_type < e1000_82543)) return;
+	/* Ignore Checksum bit is set */
+	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+	/* TCP/UDP checksum error bit is set */
+	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_err++;
+		return;
+	}
+	/* TCP/UDP Checksum has not been calculated */
+	if (adapter->hw.mac_type <= e1000_82547_rev_2) {
+		if (!(status & E1000_RXD_STAT_TCPCS))
+			return;
+	} else {
+		if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+			return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (likely(status & E1000_RXD_STAT_TCPCS)) {
+		/* TCP checksum is good */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else if (adapter->hw.mac_type > e1000_82547_rev_2) {
+		/* IP fragment with UDP payload */
+		/* Hardware complements the payload checksum, so we undo it
+		 * and then put the value in host order for further stack use.
+		 */
+		csum = ntohl(csum ^ 0xFFFF);
+		skb->csum = csum;
+		skb->ip_summed = CHECKSUM_PARTIAL;
+	}
+	adapter->hw_csum_good++;
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ **/
+
+static boolean_t
+e1000_clean_rx_irq(struct e1000_adapter *adapter,
+		   struct e1000_rx_ring *rx_ring,
+		   nanosecs_abs_t *time_stamp)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_buffer *buffer_info, *next_buffer;
+	uint32_t length;
+	uint8_t last_byte;
+	unsigned int i;
+	int cleaned_count = 0;
+	boolean_t cleaned = FALSE;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct rtskb *skb, *next_skb;
+		u8 status;
+
+		status = rx_desc->status;
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+		next_skb = next_buffer->skb;
+		prefetch(next_skb->data - NET_IP_ALIGN);
+
+		cleaned = TRUE;
+		cleaned_count++;
+		dma_unmap_single(&pdev->dev,
+				 buffer_info->dma,
+				 buffer_info->length,
+				 DMA_FROM_DEVICE);
+
+		length = le16_to_cpu(rx_desc->length);
+
+		if (unlikely(!(status & E1000_RXD_STAT_EOP))) {
+			/* All receives must fit into a single buffer */
+			E1000_DBG("%s: Receive packet consumed multiple"
+				  " buffers\n", netdev->name);
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+			last_byte = *(skb->data + length - 1);
+			if (TBI_ACCEPT(&adapter->hw, status,
+				      rx_desc->errors, length, last_byte)) {
+				length--;
+			} else {
+				/* recycle */
+				buffer_info->skb = skb;
+				goto next_desc;
+			}
+		}
+
+		/* code added for copybreak, this should improve
+		 * performance for small packets with large amounts
+		 * of reassembly being done in the stack */
+		rtskb_put(skb, length);
+
+		/* end copybreak code */
+
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter,
+				  (uint32_t)(status) |
+				  ((uint32_t)(rx_desc->errors) << 24),
+				  le16_to_cpu(rx_desc->csum), skb);
+
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+		skb->time_stamp = *time_stamp;
+		rtnetif_rx(skb);
+		adapter->data_received = 1; // Set flag for the main interrupt routine
+
+next_desc:
+		rx_desc->status = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	return cleaned;
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: address of board private structure
+ **/
+
+static void
+e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+		       struct e1000_rx_ring *rx_ring,
+		       int cleaned_count)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc;
+	struct e1000_buffer *buffer_info;
+	struct rtskb *skb;
+	unsigned int i;
+	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		if (!(skb = buffer_info->skb))
+			skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+		else {
+			rtskb_trim(skb, 0);
+			goto map_skb;
+		}
+
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+			struct rtskb *oldskb = skb;
+			DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
+					     "at %p\n", bufsz, skb->data);
+			/* Try again, without freeing the previous */
+			skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+			/* Failed allocation, critical failure */
+			if (!skb) {
+				kfree_rtskb(oldskb);
+				break;
+			}
+
+			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+				/* give up */
+				kfree_rtskb(skb);
+				kfree_rtskb(oldskb);
+				break; /* while !buffer_info->skb */
+			} else {
+				/* Use new allocation */
+				kfree_rtskb(oldskb);
+			}
+		}
+		/* Make buffer alignment 2 beyond a 16 byte boundary
+		 * this will result in a 16 byte aligned IP header after
+		 * the 14 byte MAC header is removed
+		 */
+		rtskb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+		buffer_info->length = adapter->rx_buffer_len;
+map_skb:
+		buffer_info->dma = dma_map_single(&pdev->dev,
+						  skb->data,
+						  adapter->rx_buffer_len,
+						  DMA_FROM_DEVICE);
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter,
+					(void *)(unsigned long)buffer_info->dma,
+					adapter->rx_buffer_len)) {
+			DPRINTK(RX_ERR, ERR,
+				"dma align check failed: %u bytes at %p\n",
+				adapter->rx_buffer_len,
+				(void *)(unsigned long)buffer_info->dma);
+			kfree_rtskb(skb);
+			buffer_info->skb = NULL;
+
+			dma_unmap_single(&pdev->dev, buffer_info->dma,
+					 adapter->rx_buffer_len,
+					 DMA_FROM_DEVICE);
+
+			break; /* while !buffer_info->skb */
+		}
+		rx_desc = E1000_RX_DESC(*rx_ring, i);
+		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(++i == rx_ring->count))
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+	}
+}
+
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+
+static void
+e1000_smartspeed(struct e1000_adapter *adapter)
+{
+	uint16_t phy_status;
+	uint16_t phy_ctrl;
+
+	if ((adapter->hw.phy_type != e1000_phy_igp) || !adapter->hw.autoneg ||
+	   !(adapter->hw.autoneg_advertised & ADVERTISE_1000_FULL))
+		return;
+
+	if (adapter->smartspeed == 0) {
+		/* If Master/Slave config fault is asserted twice,
+		 * we assume back-to-back */
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+		if (phy_ctrl & CR_1000T_MS_ENABLE) {
+			phy_ctrl &= ~CR_1000T_MS_ENABLE;
+			e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
+					    phy_ctrl);
+			adapter->smartspeed++;
+			if (!e1000_phy_setup_autoneg(&adapter->hw) &&
+			   !e1000_read_phy_reg(&adapter->hw, PHY_CTRL,
+					       &phy_ctrl)) {
+				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+					     MII_CR_RESTART_AUTO_NEG);
+				e1000_write_phy_reg(&adapter->hw, PHY_CTRL,
+						    phy_ctrl);
+			}
+		}
+		return;
+	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+		/* If still no link, perhaps using 2/3 pair cable */
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+		phy_ctrl |= CR_1000T_MS_ENABLE;
+		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
+		if (!e1000_phy_setup_autoneg(&adapter->hw) &&
+		   !e1000_read_phy_reg(&adapter->hw, PHY_CTRL, &phy_ctrl)) {
+			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+				     MII_CR_RESTART_AUTO_NEG);
+			e1000_write_phy_reg(&adapter->hw, PHY_CTRL, phy_ctrl);
+		}
+	}
+	/* Restart process after E1000_SMARTSPEED_MAX iterations */
+	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+		adapter->smartspeed = 0;
+}
+
+
+
+void
+e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+#ifdef HAVE_PCI_SET_MWI
+	int ret_val = pci_set_mwi(adapter->pdev);
+
+	if (ret_val)
+		DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+#else
+	pci_write_config_word(adapter->pdev, PCI_COMMAND,
+			      adapter->hw.pci_cmd_word |
+			      PCI_COMMAND_INVALIDATE);
+#endif
+}
+
+void
+e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+#ifdef HAVE_PCI_SET_MWI
+	pci_clear_mwi(adapter->pdev);
+#else
+	pci_write_config_word(adapter->pdev, PCI_COMMAND,
+			      adapter->hw.pci_cmd_word &
+			      ~PCI_COMMAND_INVALIDATE);
+#endif
+}
+
+void
+e1000_read_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void
+e1000_write_pci_cfg(struct e1000_hw *hw, uint32_t reg, uint16_t *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+uint32_t
+e1000_io_read(struct e1000_hw *hw, unsigned long port)
+{
+	return inl(port);
+}
+
+void
+e1000_io_write(struct e1000_hw *hw, unsigned long port, uint32_t value)
+{
+	outl(value, port);
+}
+
+
+int
+e1000_set_spd_dplx(struct e1000_adapter *adapter, uint16_t spddplx)
+{
+	adapter->hw.autoneg = 0;
+
+	/* Fiber NICs only allow 1000 gbps Full duplex */
+	if ((adapter->hw.media_type == e1000_media_type_fiber) &&
+		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+
+	switch (spddplx) {
+	case SPEED_10 + DUPLEX_HALF:
+		adapter->hw.forced_speed_duplex = e1000_10_half;
+		break;
+	case SPEED_10 + DUPLEX_FULL:
+		adapter->hw.forced_speed_duplex = e1000_10_full;
+		break;
+	case SPEED_100 + DUPLEX_HALF:
+		adapter->hw.forced_speed_duplex = e1000_100_half;
+		break;
+	case SPEED_100 + DUPLEX_FULL:
+		adapter->hw.forced_speed_duplex = e1000_100_full;
+		break;
+	case SPEED_1000 + DUPLEX_FULL:
+		adapter->hw.autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_HALF: /* not supported */
+	default:
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h
new file mode 100644
index 0000000..8de3048
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_osdep.h
@@ -0,0 +1,148 @@
+/*******************************************************************************
+
+
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
+  any later version.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS independent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <linux/interrupt.h>
+#include <linux/sched.h>
+#include "kcompat.h"
+
+#define usec_delay(x) udelay(x)
+#ifndef msec_delay
+#define msec_delay(x)	do { if(in_interrupt()) { \
+				/* Don't mdelay in interrupt context! */ \
+				BUG(); \
+			} else { \
+				msleep(x); \
+			} } while (0)
+
+/* Some workarounds require millisecond delays and are run during interrupt
+ * context.  Most notably, when establishing link, the phy may need tweaking
+ * but cannot process phy register reads/writes faster than millisecond
+ * intervals...and we establish link due to a "link status change" interrupt.
+ */
+#define msec_delay_irq(x) mdelay(x)
+#endif
+
+#define PCI_COMMAND_REGISTER   PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
+
+typedef enum {
+#undef FALSE
+    FALSE = 0,
+#undef TRUE
+    TRUE = 1
+} boolean_t;
+
+#define MSGOUT(S, A, B)	printk(KERN_DEBUG S "\n", A, B)
+
+#ifdef DBG
+#define DEBUGOUT(S)		printk(KERN_DEBUG S "\n")
+#define DEBUGOUT1(S, A...)	printk(KERN_DEBUG S "\n", A)
+#else
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+#endif
+
+#define DEBUGFUNC(F) DEBUGOUT(F)
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+#ifdef __BIG_ENDIAN
+#define E1000_BIG_ENDIAN __BIG_ENDIAN
+#endif
+
+#define E1000_WRITE_REG(a, reg, value) ( \
+    writel((value), ((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg))))
+
+#define E1000_READ_REG(a, reg) ( \
+    readl((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg)))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+    writel((value), ((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+    readl((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+    writew((value), ((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+    readw((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+    writeb((value), ((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	(offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+    readb((a)->hw_addr + \
+	(((a)->mac_type >= e1000_82543) ? E1000_##reg : E1000_82542_##reg) + \
+	(offset)))
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, STATUS)
+
+#define E1000_WRITE_ICH8_REG(a, reg, value) ( \
+    writel((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH8_REG(a, reg) ( \
+    readl((a)->flash_address + reg))
+
+#define E1000_WRITE_ICH8_REG16(a, reg, value) ( \
+    writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_ICH8_REG16(a, reg) ( \
+    readw((a)->flash_address + reg))
+
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c
new file mode 100644
index 0000000..42e94d5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/e1000_param.c
@@ -0,0 +1,906 @@
+/*******************************************************************************
+
+  
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+  
+  This program is free software; you can redistribute it and/or modify it 
+  under the terms of the GNU General Public License as published by the Free 
+  Software Foundation; either version 2 of the License, or (at your option) 
+  any later version.
+  
+  This program is distributed in the hope that it will be useful, but WITHOUT 
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or 
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for 
+  more details.
+  
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59 
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+  
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+  
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when e1000_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define E1000_PARAM(X, desc) \
+	static const int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	MODULE_PARM(X, "1-" __MODULE_STRING(E1000_MAX_NIC) "i"); \
+	MODULE_PARM_DESC(X, desc);
+#else
+#define E1000_PARAM(X, desc) \
+	static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	static int num_##X = 0; \
+	module_param_array_named(X, X, int, &num_##X, 0); \
+	MODULE_PARM_DESC(X, desc);
+#endif
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ *  - 0    - auto-negotiate at all supported speeds
+ *  - 10   - only link at 10 Mbps
+ *  - 100  - only link at 100 Mbps
+ *  - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ *  - 0 - auto-negotiate for duplex
+ *  - 1 - only link at half duplex
+ *  - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit           7     6     5      4      3     2     1      0
+ * Speed (Mbps)  N/A   N/A   1000   N/A    100   100   10     10
+ * Duplex                    Full          Full  Half  Full   Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ *  - 0 - No Flow Control
+ *  - 1 - Rx only, respond to PAUSE frames but do not generate them
+ *  - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ *  - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+
+E1000_PARAM(FlowControl, "Flow Control setting");
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ *  - 0 - disables all checksum offload
+ *  - 1 - enables receive IP/TCP/UDP checksum offload
+ *        on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0 for rtnet
+ */
+
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0
+ */
+
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ *
+ * Default Value: 0 for rtnet
+ */
+
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic)
+ *
+ * Default Value: 0 for rtnet
+ */
+
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+#define AUTONEG_ADV_DEFAULT  0x2F
+#define AUTONEG_ADV_MASK     0x2F
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+#define DEFAULT_RDTR                   0
+#define MAX_RXDELAY               0xFFFF
+#define MIN_RXDELAY                    0
+
+#define DEFAULT_RADV                   0
+#define MAX_RXABSDELAY            0xFFFF
+#define MIN_RXABSDELAY                 0
+
+#define DEFAULT_TIDV                   0 
+#define MAX_TXDELAY               0xFFFF
+#define MIN_TXDELAY                    0
+
+#define DEFAULT_TADV                   0
+#define MAX_TXABSDELAY            0xFFFF
+#define MIN_TXABSDELAY                 0
+
+#define DEFAULT_ITR                    0
+#define MAX_ITR                   100000
+#define MIN_ITR                      100
+
+struct e1000_option {
+	enum { enable_option, range_option, list_option } type;
+	char *name;
+	char *err;
+	int  def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			struct e1000_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int e1000_validate_option(int *value, struct e1000_option *opt,
+		struct e1000_adapter *adapter)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			DPRINTK(PROBE, INFO,
+					"%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+		struct e1000_opt_list *ent;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					DPRINTK(PROBE, INFO, "%s\n", ent->str);
+				return 0;
+			}
+		}
+	}
+		break;
+	default:
+		BUG();
+	}
+
+	DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+	       opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+
+void e1000_check_options(struct e1000_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+	if (bd >= E1000_MAX_NIC) {
+		DPRINTK(PROBE, NOTICE,
+		       "Warning: no configuration for board #%i\n", bd);
+		DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+#ifndef module_param_array
+		bd = E1000_MAX_NIC;
+#endif
+	}
+
+	{ /* Transmit Descriptor Count */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_TXD),
+			.def  = E1000_DEFAULT_TXD,
+			.arg  = { .r = { .min = E1000_MIN_TXD }}
+		};
+		struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+		int i;
+		e1000_mac_type mac_type = adapter->hw.mac_type;
+		opt.arg.r.max = mac_type < e1000_82544 ?
+			E1000_MAX_TXD : E1000_MAX_82544_TXD;
+
+#ifdef module_param_array
+		if (num_TxDescriptors > bd) {
+#endif
+			tx_ring->count = TxDescriptors[bd];
+			e1000_validate_option(&tx_ring->count, &opt, adapter);
+			E1000_ROUNDUP(tx_ring->count,
+						REQ_TX_DESCRIPTOR_MULTIPLE);
+#ifdef module_param_array
+		} else {
+			tx_ring->count = opt.def;
+		}
+#endif
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			tx_ring[i].count = tx_ring->count;
+	}
+	{ /* Receive Descriptor Count */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_RXD),
+			.def  = E1000_DEFAULT_RXD,
+			.arg  = { .r = { .min = E1000_MIN_RXD }}
+		};
+		struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+		int i;
+		e1000_mac_type mac_type = adapter->hw.mac_type;
+		opt.arg.r.max = mac_type < e1000_82544 ? E1000_MAX_RXD :
+			E1000_MAX_82544_RXD;
+
+#ifdef module_param_array
+		if (num_RxDescriptors > bd) {
+#endif
+			rx_ring->count = RxDescriptors[bd];
+			e1000_validate_option(&rx_ring->count, &opt, adapter);
+			E1000_ROUNDUP(rx_ring->count,
+						REQ_RX_DESCRIPTOR_MULTIPLE);
+#ifdef module_param_array
+		} else {
+			rx_ring->count = opt.def;
+		}
+#endif
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			rx_ring[i].count = rx_ring->count;
+	}
+	{ /* Checksum Offload Enable/Disable */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Checksum Offload",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+#ifdef module_param_array
+		if (num_XsumRX > bd) {
+#endif
+			int rx_csum = XsumRX[bd];
+			e1000_validate_option(&rx_csum, &opt, adapter);
+			adapter->rx_csum = rx_csum;
+#ifdef module_param_array
+		} else {
+			adapter->rx_csum = opt.def;
+		}
+#endif
+	}
+	{ /* Flow Control */
+
+		struct e1000_opt_list fc_list[] =
+			{{ e1000_fc_none,    "Flow Control Disabled" },
+			 { e1000_fc_rx_pause,"Flow Control Receive Only" },
+			 { e1000_fc_tx_pause,"Flow Control Transmit Only" },
+			 { e1000_fc_full,    "Flow Control Enabled" },
+			 { e1000_fc_default, "Flow Control Hardware Default" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Flow Control",
+			.err  = "reading default settings from EEPROM",
+			.def  = e1000_fc_default,
+			.arg  = { .l = { .nr = ARRAY_SIZE(fc_list),
+					 .p = fc_list }}
+		};
+
+#ifdef module_param_array
+		if (num_FlowControl > bd) {
+#endif
+			int fc = FlowControl[bd];
+			e1000_validate_option(&fc, &opt, adapter);
+			adapter->hw.fc = adapter->hw.original_fc = fc;
+#ifdef module_param_array
+		} else {
+			adapter->hw.fc = adapter->hw.original_fc = opt.def;
+		}
+#endif
+	}
+	{ /* Transmit Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+			.def  = DEFAULT_TIDV,
+			.arg  = { .r = { .min = MIN_TXDELAY,
+					 .max = MAX_TXDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_TxIntDelay > bd) {
+#endif
+			adapter->tx_int_delay = TxIntDelay[bd];
+			e1000_validate_option(&adapter->tx_int_delay, &opt,
+								adapter);
+#ifdef module_param_array
+		} else {
+			adapter->tx_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Transmit Absolute Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TADV),
+			.def  = DEFAULT_TADV,
+			.arg  = { .r = { .min = MIN_TXABSDELAY,
+					 .max = MAX_TXABSDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_TxAbsIntDelay > bd) {
+#endif
+			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+								adapter);
+#ifdef module_param_array
+		} else {
+			adapter->tx_abs_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Receive Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+			.def  = DEFAULT_RDTR,
+			.arg  = { .r = { .min = MIN_RXDELAY,
+					 .max = MAX_RXDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_RxIntDelay > bd) {
+#endif
+			adapter->rx_int_delay = RxIntDelay[bd];
+			e1000_validate_option(&adapter->rx_int_delay, &opt,
+								adapter);
+#ifdef module_param_array
+		} else {
+			adapter->rx_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Receive Absolute Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RADV),
+			.def  = DEFAULT_RADV,
+			.arg  = { .r = { .min = MIN_RXABSDELAY,
+					 .max = MAX_RXABSDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_RxAbsIntDelay > bd) {
+#endif
+			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+								adapter);
+#ifdef module_param_array
+		} else {
+			adapter->rx_abs_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Interrupt Throttling Rate */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Interrupt Throttling Rate (ints/sec)",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_ITR),
+			.def  = DEFAULT_ITR,
+			.arg  = { .r = { .min = MIN_ITR,
+					 .max = MAX_ITR }}
+		};
+
+#ifdef module_param_array
+		if (num_InterruptThrottleRate > bd) {
+#endif
+			adapter->itr = InterruptThrottleRate[bd];
+			switch (adapter->itr) {
+			case 0:
+				DPRINTK(PROBE, INFO, "%s turned off\n",
+					opt.name);
+				break;
+			case 1:
+				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+					opt.name);
+				break;
+			default:
+				e1000_validate_option(&adapter->itr, &opt,
+					adapter);
+				break;
+			}
+#ifdef module_param_array
+		} else {
+			adapter->itr = opt.def;
+		}
+#endif
+	}
+	{ /* Smart Power Down */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "PHY Smart Power Down",
+			.err  = "defaulting to Disabled",
+			.def  = OPTION_DISABLED
+		};
+
+#ifdef module_param_array
+		if (num_SmartPowerDownEnable > bd) {
+#endif
+			int spd = SmartPowerDownEnable[bd];
+			e1000_validate_option(&spd, &opt, adapter);
+			adapter->smart_power_down = spd;
+#ifdef module_param_array
+		} else {
+			adapter->smart_power_down = opt.def;
+		}
+#endif
+	}
+	{ /* Kumeran Lock Loss Workaround */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Kumeran Lock Loss Workaround",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+#ifdef module_param_array
+		if (num_KumeranLockLoss > bd) {
+#endif
+			int kmrn_lock_loss = KumeranLockLoss[bd];
+			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+			adapter->hw.kmrn_lock_loss_workaround_disabled = !kmrn_lock_loss;
+#ifdef module_param_array
+		} else {
+			adapter->hw.kmrn_lock_loss_workaround_disabled = !opt.def;
+		}
+#endif
+	}
+
+	switch (adapter->hw.media_type) {
+	case e1000_media_type_fiber:
+	case e1000_media_type_internal_serdes:
+		e1000_check_fiber_options(adapter);
+		break;
+	case e1000_media_type_copper:
+		e1000_check_copper_options(adapter);
+		break;
+	default:
+		BUG();
+	}
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+#ifndef module_param_array
+	bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+	if ((Speed[bd] != OPTION_UNSET)) {
+#else
+	if (num_Speed > bd) {
+#endif
+		DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+#ifndef module_param_array
+	if ((Duplex[bd] != OPTION_UNSET)) {
+#else
+	if (num_Duplex > bd) {
+#endif
+		DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+#ifndef module_param_array
+	if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
+#else
+	if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+#endif
+		DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+				 "not valid for fiber adapters, "
+				 "parameter ignored\n");
+	}
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+
+static void e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+	int speed, dplx, an;
+	int bd = adapter->bd_number;
+#ifndef module_param_array
+	bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+#endif
+
+	{ /* Speed */
+		struct e1000_opt_list speed_list[] = {{          0, "" },
+						      {   SPEED_10, "" },
+						      {  SPEED_100, "" },
+						      { SPEED_1000, "" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Speed",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(speed_list),
+					 .p = speed_list }}
+		};
+
+#ifdef module_param_array
+		if (num_Speed > bd) {
+#endif
+			speed = Speed[bd];
+			e1000_validate_option(&speed, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			speed = opt.def;
+		}
+#endif
+	}
+	{ /* Duplex */
+		struct e1000_opt_list dplx_list[] = {{           0, "" },
+						     { HALF_DUPLEX, "" },
+						     { FULL_DUPLEX, "" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Duplex",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(dplx_list),
+					 .p = dplx_list }}
+		};
+
+		if (e1000_check_phy_reset_block(&adapter->hw)) {
+			DPRINTK(PROBE, INFO,
+				"Link active due to SoL/IDER Session. "
+			        "Speed/Duplex/AutoNeg parameter ignored.\n");
+			return;
+		}
+#ifdef module_param_array
+		if (num_Duplex > bd) {
+#endif
+			dplx = Duplex[bd];
+			e1000_validate_option(&dplx, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			dplx = opt.def;
+		}
+#endif
+	}
+
+#ifdef module_param_array
+	if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+#else
+	if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) {
+#endif
+		DPRINTK(PROBE, INFO,
+		       "AutoNeg specified along with Speed or Duplex, "
+		       "parameter ignored\n");
+		adapter->hw.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+	} else { /* Autoneg */
+		struct e1000_opt_list an_list[] =
+			#define AA "AutoNeg advertising "
+			{{ 0x01, AA "10/HD" },
+			 { 0x02, AA "10/FD" },
+			 { 0x03, AA "10/FD, 10/HD" },
+			 { 0x04, AA "100/HD" },
+			 { 0x05, AA "100/HD, 10/HD" },
+			 { 0x06, AA "100/HD, 10/FD" },
+			 { 0x07, AA "100/HD, 10/FD, 10/HD" },
+			 { 0x08, AA "100/FD" },
+			 { 0x09, AA "100/FD, 10/HD" },
+			 { 0x0a, AA "100/FD, 10/FD" },
+			 { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+			 { 0x0c, AA "100/FD, 100/HD" },
+			 { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+			 { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+			 { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x20, AA "1000/FD" },
+			 { 0x21, AA "1000/FD, 10/HD" },
+			 { 0x22, AA "1000/FD, 10/FD" },
+			 { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+			 { 0x24, AA "1000/FD, 100/HD" },
+			 { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+			 { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+			 { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x28, AA "1000/FD, 100/FD" },
+			 { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+			 { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+			 { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+			 { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+			 { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+			 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+			 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "AutoNeg",
+			.err  = "parameter ignored",
+			.def  = AUTONEG_ADV_DEFAULT,
+			.arg  = { .l = { .nr = ARRAY_SIZE(an_list),
+					 .p = an_list }}
+		};
+
+#ifdef module_param_array
+		if (num_AutoNeg > bd) {
+#endif
+			an = AutoNeg[bd];
+			e1000_validate_option(&an, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			an = opt.def;
+		}
+#endif
+		adapter->hw.autoneg_advertised = an;
+	}
+
+	switch (speed + dplx) {
+	case 0:
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+#ifdef module_param_array
+		if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+#else
+		if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET)
+#endif
+			DPRINTK(PROBE, INFO,
+			       "Speed and duplex autonegotiation enabled\n");
+		break;
+	case HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Half Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+		                                 ADVERTISE_100_HALF;
+		break;
+	case FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_FULL |
+		                                 ADVERTISE_100_FULL |
+		                                 ADVERTISE_1000_FULL;
+		break;
+	case SPEED_10:
+		DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_10_HALF |
+		                                 ADVERTISE_10_FULL;
+		break;
+	case SPEED_10 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_10_half;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_10 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_10_full;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_100:
+		DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"100 Mbps only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_100_HALF |
+		                                 ADVERTISE_100_FULL;
+		break;
+	case SPEED_100 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_100_half;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_100 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 0;
+		adapter->hw.forced_speed_duplex = e1000_100_full;
+		adapter->hw.autoneg_advertised = 0;
+		break;
+	case SPEED_1000:
+		DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+			"Duplex\n");
+		DPRINTK(PROBE, INFO,
+			"Using Autonegotiation at 1000 Mbps "
+			"Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO,
+			"Half Duplex is not supported at 1000 Mbps\n");
+		DPRINTK(PROBE, INFO,
+			"Using Autonegotiation at 1000 Mbps "
+			"Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO,
+		       "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+		adapter->hw.autoneg = adapter->fc_autoneg = 1;
+		adapter->hw.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	default:
+		BUG();
+	}
+
+	/* Speed, AutoNeg and MDI/MDI-X must all play nice */
+	if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+		DPRINTK(PROBE, INFO,
+			"Speed, AutoNeg and MDI-X specifications are "
+			"incompatible. Setting MDI-X to a compatible value.\n");
+	}
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h
new file mode 100644
index 0000000..0acb218
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000/kcompat.h
@@ -0,0 +1,446 @@
+/*******************************************************************************
+
+
+  Copyright(c) 1999 - 2006 Intel Corporation. All rights reserved.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms of the GNU General Public License as published by the Free
+  Software Foundation; either version 2 of the License, or (at your option)
+  any later version.
+
+  This program is distributed in the hope that it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc., 59
+  Temple Place - Suite 330, Boston, MA  02111-1307, USA.
+
+  The full GNU General Public License is included in this distribution in the
+  file called LICENSE.
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#include <linux/version.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/pagemap.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <asm/io.h>
+
+#include <rtnet_port.h>
+
+#ifndef IRQ_HANDLED
+#define irqreturn_t void
+#define IRQ_HANDLED
+#define IRQ_NONE
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+/* Useful settings for rtnet */
+#undef MAX_SKB_FRAGS
+#undef NETIF_F_TSO
+#undef E1000_COUNT_ICR
+#undef NETIF_F_HW_VLAN_TX
+#undef CONFIG_NET_POLL_CONTROLLER
+#undef ETHTOOL_OPS_COMPAT
+#undef ETHTOOL_GPERMADDR
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(x)	kfree(x)
+#endif
+
+#undef E1000_NAPI
+#undef CONFIG_E1000_NAPI
+
+#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT 1
+
+
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+#undef CONFIG_PM
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK  0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+/*****************************************************************************/
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+/*****************************************************************************/
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+	.vendor = (vend), .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+	uint32_t cmd;
+	char	 driver[32];
+	char	 version[32];
+	char	 fw_version[32];
+	char	 bus_info[32];
+	char	 reserved1[32];
+	char	 reserved2[16];
+	uint32_t n_stats;
+	uint32_t testinfo_len;
+	uint32_t eedump_len;
+	uint32_t regdump_len;
+};
+
+struct ethtool_stats {
+	uint32_t cmd;
+	uint32_t n_stats;
+	uint64_t data[0];
+};
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+	ETH_SS_TEST             = 0,
+	ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+	u32     cmd;            /* ETHTOOL_GSTRINGS */
+	u32     string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
+	u32     len;            /* number of strings in the string set */
+	u8      data[0];
+};
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+	ETH_TEST_FL_OFFLINE	= (1 << 0),
+	ETH_TEST_FL_FAILED	= (1 << 1),
+};
+struct ethtool_test {
+	uint32_t cmd;
+	uint32_t flags;
+	uint32_t reserved;
+	uint32_t len;
+	uint64_t data[0];
+};
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+	uint32_t cmd;
+	uint32_t magic;
+	uint32_t offset;
+	uint32_t len;
+	uint8_t	 data[0];
+};
+
+struct ethtool_value {
+	uint32_t cmd;
+	uint32_t data;
+};
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* Ethtool version without link support */
+#endif /* Ethtool version without eeprom support */
+#endif /* Ethtool version without test support */
+#endif /* Ethtool version without strings support */
+#endif /* Ethtool version wihtout adapter id support */
+#endif /* Ethtool version without statistics support */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS		0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+	u32	cmd;
+	u32	version; /* driver-specific, indicates different chips/revs */
+	u32	len; /* bytes */
+	u8	data[0];
+};
+#endif
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL		0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL		0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST	0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK		0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM		0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM		0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE	0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+	u32	cmd;	/* ETHTOOL_{G,S}COALESCE */
+
+	/* How many usecs to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	rx_coalesce_usecs;
+
+	/* How many packets to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause RX interrupts to never be
+	 * generated.
+	 */
+	u32	rx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	rx_coalesce_usecs_irq;
+	u32	rx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	tx_coalesce_usecs;
+
+	/* How many packets to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause TX interrupts to never be
+	 * generated.
+	 */
+	u32	tx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	tx_coalesce_usecs_irq;
+	u32	tx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay in-memory statistics
+	 * block updates.  Some drivers do not have an in-memory
+	 * statistic block, and in such cases this value is ignored.
+	 * This value must not be zero.
+	 */
+	u32	stats_block_coalesce_usecs;
+
+	/* Adaptive RX/TX coalescing is an algorithm implemented by
+	 * some drivers to improve latency under low packet rates and
+	 * improve throughput under high packet rates.  Some drivers
+	 * only implement one of RX or TX adaptive coalescing.  Anything
+	 * not implemented by the driver causes these values to be
+	 * silently ignored.
+	 */
+	u32	use_adaptive_rx_coalesce;
+	u32	use_adaptive_tx_coalesce;
+
+	/* When the packet rate (measured in packets per second)
+	 * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+	 * used.
+	 */
+	u32	pkt_rate_low;
+	u32	rx_coalesce_usecs_low;
+	u32	rx_max_coalesced_frames_low;
+	u32	tx_coalesce_usecs_low;
+	u32	tx_max_coalesced_frames_low;
+
+	/* When the packet rate is below pkt_rate_high but above
+	 * pkt_rate_low (both measured in packets per second) the
+	 * normal {rx,tx}_* coalescing parameters are used.
+	 */
+
+	/* When the packet rate is (measured in packets per second)
+	 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+	 * used.
+	 */
+	u32	pkt_rate_high;
+	u32	rx_coalesce_usecs_high;
+	u32	rx_max_coalesced_frames_high;
+	u32	tx_coalesce_usecs_high;
+	u32	tx_max_coalesced_frames_high;
+
+	/* How often to do adaptive coalescing packet rate sampling,
+	 * measured in seconds.  Must not be zero.
+	 */
+	u32	rate_sample_interval;
+};
+#endif
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE	0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM	0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+	u32	cmd;	/* ETHTOOL_{G,S}RINGPARAM */
+
+	/* Read only attributes.  These indicate the maximum number
+	 * of pending RX/TX ring entries the driver will allow the
+	 * user to set.
+	 */
+	u32	rx_max_pending;
+	u32	rx_mini_max_pending;
+	u32	rx_jumbo_max_pending;
+	u32	tx_max_pending;
+
+	/* Values changeable by the user.  The valid values are
+	 * in the range 1 to the "*_max_pending" counterpart above.
+	 */
+	u32	rx_pending;
+	u32	rx_mini_pending;
+	u32	rx_jumbo_pending;
+	u32	tx_pending;
+};
+#endif
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM	0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM	0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+	u32	cmd;	/* ETHTOOL_{G,S}PAUSEPARAM */
+
+	/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+	 * being true) the user may set 'autonet' here non-zero to have the
+	 * pause parameters be auto-negotiated too.  In such a case, the
+	 * {rx,tx}_pause values below determine what capabilities are
+	 * advertised.
+	 *
+	 * If 'autoneg' is zero or the link is not being auto-negotiated,
+	 * then {rx,tx}_pause force the driver to use/not-use pause
+	 * flow control.
+	 */
+	u32	autoneg;
+	u32	rx_pause;
+	u32	tx_pause;
+};
+#endif
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM	0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM		0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM		0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM		0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM		0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG		0x00000018 /* Get scatter-gather enable
+					    * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG		0x00000019 /* Set scatter-gather enable
+					    * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST		0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS	0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID		0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS		0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO		0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO		0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef NET_IP_ALIGN
+#define NET_IP_ALIGN 2
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0 /* driver took care of the packet */
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1 /* driver tx path was busy */
+#endif
+
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1 /* driver tx lock was already taken */
+#endif
+
+/* if we do not have the infrastructure to detect if skb_header is cloned *
+ * just return false in all cases */
+#ifndef SKB_DATAREF_SHIFT
+#define skb_header_cloned(x) 0
+#endif /* SKB_DATAREF_SHIFT not defined */
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#define USE_DRIVER_SHUTDOWN_HANDLER
+
+#ifndef SA_PROBEIRQ
+#define SA_PROBEIRQ 0
+#endif
+
+#endif /* _KCOMPAT_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c
new file mode 100644
index 0000000..e1159e5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/80003es2lan.c
@@ -0,0 +1,1515 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 80003ES2LAN Gigabit Ethernet Controller (Copper)
+ * 80003ES2LAN Gigabit Ethernet Controller (Serdes)
+ */
+
+#include "e1000.h"
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL	 0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL	 0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL	 0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE	 0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS	 0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS	 0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING	 0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT	 0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE		 0x2000
+
+#define E1000_KMRNCTRLSTA_OPMODE_MASK		 0x000C
+#define E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO	 0x0004
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN	 0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN	 0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN	 0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE	 0x0002 /* 1=Reversal Disab. */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK	 0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI		 0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX	 0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO	 0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG		 0x2000
+						/* 1=Reverse Auto-Negotiation */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK		 0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5		 0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25		 0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_25		 0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX		 0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+#define GG82563_DSPD_CABLE_LENGTH		 0x0007 /* 0 = <50M
+							   1 = 50-80M
+							   2 = 80-110M
+							   3 = 110-140M
+							   4 = >140M */
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER		 0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY  0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE	 0x0001
+					   /* 1=Enable SERDES Electrical Idle */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING			 0x0010 /* Disable Padding */
+
+/*
+ * A table for the GG82563 cable length where the range is defined
+ * with a lower bound at "index" and the upper bound at
+ * "index + 5".
+ */
+static const u16 e1000_gg82563_cable_length_table[] = {
+	 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+		ARRAY_SIZE(e1000_gg82563_cable_length_table)
+
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
+static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+                                            u16 *data);
+static s32  e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+                                             u16 data);
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
+
+/**
+ *  e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type	= e1000_phy_none;
+		return 0;
+	} else {
+		phy->ops.power_up = e1000_power_up_phy_copper;
+		phy->ops.power_down = e1000_power_down_phy_copper_80003es2lan;
+	}
+
+	phy->addr		= 1;
+	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us      = 100;
+	phy->type		= e1000_phy_gg82563;
+
+	/* This can only be done after all function pointers are setup. */
+	ret_val = e1000e_get_phy_id(hw);
+
+	/* Verify phy id */
+	if (phy->id != GG82563_E_PHY_ID)
+		return -E1000_ERR_PHY;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+	u16 size;
+
+	nvm->opcode_bits	= 8;
+	nvm->delay_usec	 = 1;
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size    = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size    = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+		break;
+	}
+
+	nvm->type = e1000_nvm_eeprom_spi;
+
+	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+			  E1000_EECD_SIZE_EX_SHIFT);
+
+	/*
+	 * Added to a constant, "size" becomes the left-shift value
+	 * for setting word_size.
+	 */
+	size += NVM_WORD_SIZE_BASE_SHIFT;
+
+	/* EEPROM access above 16k is unsupported */
+	if (size > 14)
+		size = 14;
+	nvm->word_size	= 1 << size;
+
+	return 0;
+}
+
+/**
+ *  e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_80003es2lan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_mac_operations *func = &mac->ops;
+
+	/* Set media type */
+	switch (adapter->pdev->device) {
+	case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* FWSM register */
+	mac->has_fwsm = true;
+	/* ARC supported; valid only if manageability features are enabled. */
+	mac->arc_subsystem_valid =
+	        (er32(FWSM) & E1000_FWSM_MODE_MASK)
+	                ? true : false;
+	/* Adaptive IFS not supported */
+	mac->adaptive_ifs = false;
+
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->setup_physical_interface = e1000_setup_copper_link_80003es2lan;
+		func->check_for_link = e1000e_check_for_copper_link;
+		break;
+	case e1000_media_type_fiber:
+		func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+		func->check_for_link = e1000e_check_for_fiber_link;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->setup_physical_interface = e1000e_setup_fiber_serdes_link;
+		func->check_for_link = e1000e_check_for_serdes_link;
+		break;
+	default:
+		return -E1000_ERR_CONFIG;
+		break;
+	}
+
+	/* set lan id for port to determine which phy lock to use */
+	hw->mac.ops.set_lan_id(hw);
+
+	return 0;
+}
+
+static s32 e1000_get_variants_80003es2lan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	s32 rc;
+
+	rc = e1000_init_mac_params_80003es2lan(adapter);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_nvm_params_80003es2lan(hw);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_phy_params_80003es2lan(hw);
+	if (rc)
+		return rc;
+
+	return 0;
+}
+
+/**
+ *  e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to acquire access rights to the correct PHY.
+ **/
+static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_release_phy_80003es2lan - Release rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to release access rights to the correct PHY.
+ **/
+static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_acquire_mac_csr_80003es2lan - Acquire rights to access Kumeran register
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the semaphore to access the Kumeran interface.
+ *
+ **/
+static s32 e1000_acquire_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	mask = E1000_SWFW_CSR_SM;
+
+	return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_release_mac_csr_80003es2lan - Release rights to access Kumeran Register
+ *  @hw: pointer to the HW structure
+ *
+ *  Release the semaphore used to access the Kumeran interface
+ **/
+static void e1000_release_mac_csr_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	mask = E1000_SWFW_CSR_SM;
+
+	e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the semaphore to access the EEPROM.
+ **/
+static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_acquire_nvm(hw);
+
+	if (ret_val)
+		e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  Release the semaphore used to access the EEPROM.
+ **/
+static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
+{
+	e1000e_release_nvm(hw);
+	e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 i = 0;
+	s32 timeout = 50;
+
+	while (i < timeout) {
+		if (e1000e_get_hw_semaphore(hw))
+			return -E1000_ERR_SWFW_SYNC;
+
+		swfw_sync = er32(SW_FW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask)))
+			break;
+
+		/*
+		 * Firmware currently using resource (fwmask)
+		 * or other software thread using resource (swmask)
+		 */
+		e1000e_put_hw_semaphore(hw);
+		mdelay(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		e_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+		return -E1000_ERR_SWFW_SYNC;
+	}
+
+	swfw_sync |= swmask;
+	ew32(SW_FW_SYNC, swfw_sync);
+
+	e1000e_put_hw_semaphore(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+	while (e1000e_get_hw_semaphore(hw) != 0)
+		; /* Empty */
+
+	swfw_sync = er32(SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	ew32(SW_FW_SYNC, swfw_sync);
+
+	e1000e_put_hw_semaphore(hw);
+}
+
+/**
+ *  e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @data: pointer to the data returned from the operation
+ *
+ *  Read the GG82563 PHY register.
+ **/
+static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+						  u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u32 page_select;
+	u16 temp;
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Select Configuration Page */
+	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+		page_select = GG82563_PHY_PAGE_SELECT;
+	} else {
+		/*
+		 * Use Alternative Page Select register to access
+		 * registers 30 and 31
+		 */
+		page_select = GG82563_PHY_PAGE_SELECT_ALT;
+	}
+
+	temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+	ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+	if (ret_val) {
+		e1000_release_phy_80003es2lan(hw);
+		return ret_val;
+	}
+
+	if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+		/*
+		 * The "ready" bit in the MDIC register may be incorrectly set
+		 * before the device has completed the "Page Select" MDI
+		 * transaction.  So we wait 200us after each MDI command...
+		 */
+		udelay(200);
+
+		/* ...and verify the command was successful. */
+		ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+		if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+			ret_val = -E1000_ERR_PHY;
+			e1000_release_phy_80003es2lan(hw);
+			return ret_val;
+		}
+
+		udelay(200);
+
+		ret_val = e1000e_read_phy_reg_mdic(hw,
+		                                  MAX_PHY_REG_ADDRESS & offset,
+		                                  data);
+
+		udelay(200);
+	} else {
+		ret_val = e1000e_read_phy_reg_mdic(hw,
+		                                  MAX_PHY_REG_ADDRESS & offset,
+		                                  data);
+	}
+
+	e1000_release_phy_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @data: value to write to the register
+ *
+ *  Write to the GG82563 PHY register.
+ **/
+static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+						   u32 offset, u16 data)
+{
+	s32 ret_val;
+	u32 page_select;
+	u16 temp;
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Select Configuration Page */
+	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+		page_select = GG82563_PHY_PAGE_SELECT;
+	} else {
+		/*
+		 * Use Alternative Page Select register to access
+		 * registers 30 and 31
+		 */
+		page_select = GG82563_PHY_PAGE_SELECT_ALT;
+	}
+
+	temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+	ret_val = e1000e_write_phy_reg_mdic(hw, page_select, temp);
+	if (ret_val) {
+		e1000_release_phy_80003es2lan(hw);
+		return ret_val;
+	}
+
+	if (hw->dev_spec.e80003es2lan.mdic_wa_enable == true) {
+		/*
+		 * The "ready" bit in the MDIC register may be incorrectly set
+		 * before the device has completed the "Page Select" MDI
+		 * transaction.  So we wait 200us after each MDI command...
+		 */
+		udelay(200);
+
+		/* ...and verify the command was successful. */
+		ret_val = e1000e_read_phy_reg_mdic(hw, page_select, &temp);
+
+		if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+			e1000_release_phy_80003es2lan(hw);
+			return -E1000_ERR_PHY;
+		}
+
+		udelay(200);
+
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+		                                  MAX_PHY_REG_ADDRESS & offset,
+		                                  data);
+
+		udelay(200);
+	} else {
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+		                                  MAX_PHY_REG_ADDRESS & offset,
+		                                  data);
+	}
+
+	e1000_release_phy_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_80003es2lan - Write to ESB2 NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @words: number of words to write
+ *  @data: buffer of data to write to the NVM
+ *
+ *  Write "words" of data to the ESB2 NVM.
+ **/
+static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+				       u16 words, u16 *data)
+{
+	return e1000e_write_nvm_spi(hw, offset, words, data);
+}
+
+/**
+ *  e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
+ *  @hw: pointer to the HW structure
+ *
+ *  Wait a specific amount of time for manageability processes to complete.
+ *  This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+	if (hw->bus.func == 1)
+		mask = E1000_NVM_CFG_DONE_PORT_1;
+
+	while (timeout) {
+		if (er32(EEMNGCTL) & mask)
+			break;
+		usleep_range(1000, 2000);
+		timeout--;
+	}
+	if (!timeout) {
+		e_dbg("MNG configuration cycle has not completed.\n");
+		return -E1000_ERR_RESET;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the speed and duplex settings onto the PHY.  This is a
+ *  function pointer entry point called by the phy module.
+ **/
+static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
+	ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e_dbg("GG82563 PSCR: %X\n", phy_data);
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	/* Reset the phy to commit changes. */
+	phy_data |= MII_CR_RESET;
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	udelay(1);
+
+	if (hw->phy.autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link "
+			 "on GG82563 phy.\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+						     100000, &link);
+		if (ret_val)
+			return ret_val;
+
+		if (!link) {
+			/*
+			 * We didn't get link.
+			 * Reset the DSP and cross our fingers.
+			 */
+			ret_val = e1000e_phy_reset_dsp(hw);
+			if (ret_val)
+				return ret_val;
+		}
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+						     100000, &link);
+		if (ret_val)
+			return ret_val;
+	}
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Resetting the phy means we need to verify the TX_CLK corresponds
+	 * to the link speed.  10Mbps -> 2.5MHz, else 25MHz.
+	 */
+	phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+	if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
+		phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
+	else
+		phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
+
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+	ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_80003es2lan - Set approximate cable length
+ *  @hw: pointer to the HW structure
+ *
+ *  Find the approximate cable length as measured by the GG82563 PHY.
+ *  This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_data, index;
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+
+	if (index >= GG82563_CABLE_LENGTH_TABLE_SIZE - 5) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+	phy->min_cable_length = e1000_gg82563_cable_length_table[index];
+	phy->max_cable_length = e1000_gg82563_cable_length_table[index + 5];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_link_up_info_80003es2lan - Report speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to speed buffer
+ *  @duplex: pointer to duplex buffer
+ *
+ *  Retrieve the current speed and duplex configuration.
+ **/
+static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+					      u16 *duplex)
+{
+	s32 ret_val;
+
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		ret_val = e1000e_get_speed_and_duplex_copper(hw,
+								    speed,
+								    duplex);
+		hw->phy.ops.cfg_on_link_up(hw);
+	} else {
+		ret_val = e1000e_get_speed_and_duplex_fiber_serdes(hw,
+								  speed,
+								  duplex);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_80003es2lan - Reset the ESB2 controller
+ *  @hw: pointer to the HW structure
+ *
+ *  Perform a global reset to the ESB2 controller.
+ **/
+static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000e_disable_pcie_master(hw);
+	if (ret_val)
+		e_dbg("PCI-E Master disable polling has failed.\n");
+
+	e_dbg("Masking off all interrupts\n");
+	ew32(IMC, 0xffffffff);
+
+	ew32(RCTL, 0);
+	ew32(TCTL, E1000_TCTL_PSP);
+	e1e_flush();
+
+	usleep_range(10000, 20000);
+
+	ctrl = er32(CTRL);
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	e_dbg("Issuing a global reset to MAC\n");
+	ew32(CTRL, ctrl | E1000_CTRL_RST);
+	e1000_release_phy_80003es2lan(hw);
+
+	ret_val = e1000e_get_auto_rd_done(hw);
+	if (ret_val)
+		/* We don't want to continue accessing MAC registers. */
+		return ret_val;
+
+	/* Clear any pending interrupt events. */
+	ew32(IMC, 0xffffffff);
+	er32(ICR);
+
+	ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_80003es2lan - Initialize the ESB2 controller
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
+ **/
+static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 reg_data;
+	s32 ret_val;
+	u16 kum_reg_data;
+	u16 i;
+
+	e1000_initialize_hw_bits_80003es2lan(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000e_id_led_init(hw);
+	if (ret_val)
+		e_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+
+	/* Disabling VLAN filtering */
+	e_dbg("Initializing the IEEE VLAN\n");
+	mac->ops.clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	e_dbg("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000e_setup_link(hw);
+
+	/* Disable IBIST slave mode (far-end loopback) */
+	e1000_read_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+					&kum_reg_data);
+	kum_reg_data |= E1000_KMRNCTRLSTA_IBIST_DISABLE;
+	e1000_write_kmrn_reg_80003es2lan(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+					 kum_reg_data);
+
+	/* Set the transmit descriptor write-back policy */
+	reg_data = er32(TXDCTL(0));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+		   E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+	ew32(TXDCTL(0), reg_data);
+
+	/* ...for both queues. */
+	reg_data = er32(TXDCTL(1));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+		   E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+	ew32(TXDCTL(1), reg_data);
+
+	/* Enable retransmit on late collisions */
+	reg_data = er32(TCTL);
+	reg_data |= E1000_TCTL_RTLC;
+	ew32(TCTL, reg_data);
+
+	/* Configure Gigabit Carry Extend Padding */
+	reg_data = er32(TCTL_EXT);
+	reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+	reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
+	ew32(TCTL_EXT, reg_data);
+
+	/* Configure Transmit Inter-Packet Gap */
+	reg_data = er32(TIPG);
+	reg_data &= ~E1000_TIPG_IPGT_MASK;
+	reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+	ew32(TIPG, reg_data);
+
+	reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
+	reg_data &= ~0x00100000;
+	E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+
+	/* default to true to enable the MDIC W/A */
+	hw->dev_spec.e80003es2lan.mdic_wa_enable = true;
+
+	ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+	                              E1000_KMRNCTRLSTA_OFFSET >>
+	                              E1000_KMRNCTRLSTA_OFFSET_SHIFT,
+	                              &i);
+	if (!ret_val) {
+		if ((i & E1000_KMRNCTRLSTA_OPMODE_MASK) ==
+		     E1000_KMRNCTRLSTA_OPMODE_INBAND_MDIO)
+			hw->dev_spec.e80003es2lan.mdic_wa_enable = false;
+	}
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	/* Transmit Descriptor Control 0 */
+	reg = er32(TXDCTL(0));
+	reg |= (1 << 22);
+	ew32(TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = er32(TXDCTL(1));
+	reg |= (1 << 22);
+	ew32(TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = er32(TARC(0));
+	reg &= ~(0xF << 27); /* 30:27 */
+	if (hw->phy.media_type != e1000_media_type_copper)
+		reg &= ~(1 << 20);
+	ew32(TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = er32(TARC(1));
+	if (er32(TCTL) & E1000_TCTL_MULR)
+		reg &= ~(1 << 28);
+	else
+		reg |= (1 << 28);
+	ew32(TARC(1), reg);
+}
+
+/**
+ *  e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
+ *  @hw: pointer to the HW structure
+ *
+ *  Setup some GG82563 PHY registers for obtaining link
+ **/
+static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u32 ctrl_ext;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_MAC_SPEC_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+	/* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+	data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+
+	ret_val = e1e_wphy(hw, GG82563_PHY_MAC_SPEC_CTRL, data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+	switch (phy->mdix) {
+	case 1:
+		data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+		break;
+	case 2:
+		data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+		break;
+	case 0:
+	default:
+		data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+		break;
+	}
+
+	/*
+	 * Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+	if (phy->disable_polarity_correction)
+		data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+
+	ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL, data);
+	if (ret_val)
+		return ret_val;
+
+	/* SW Reset the PHY so all changes take effect */
+	ret_val = e1000e_commit_phy(hw);
+	if (ret_val) {
+		e_dbg("Error Resetting the PHY\n");
+		return ret_val;
+	}
+
+	/* Bypass Rx and Tx FIFO's */
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+					E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
+					E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+					E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+				       E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+				       &data);
+	if (ret_val)
+		return ret_val;
+	data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+					E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+					data);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_SPEC_CTRL_2, &data);
+	if (ret_val)
+		return ret_val;
+
+	data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+	ret_val = e1e_wphy(hw, GG82563_PHY_SPEC_CTRL_2, data);
+	if (ret_val)
+		return ret_val;
+
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+	ew32(CTRL_EXT, ctrl_ext);
+
+	ret_val = e1e_rphy(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Do not init these registers when the HW is in IAMT mode, since the
+	 * firmware will have already initialized them.  We only initialize
+	 * them if the HW is not in IAMT mode.
+	 */
+	if (!e1000e_check_mng_mode(hw)) {
+		/* Enable Electrical Idle on the PHY */
+		data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+		ret_val = e1e_wphy(hw, GG82563_PHY_PWR_MGMT_CTRL, data);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &data);
+		if (ret_val)
+			return ret_val;
+
+		data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+		ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, data);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/*
+	 * Workaround: Disable padding in Kumeran interface in the MAC
+	 * and in the PHY to avoid CRC errors.
+	 */
+	ret_val = e1e_rphy(hw, GG82563_PHY_INBAND_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data |= GG82563_ICR_DIS_PADDING;
+	ret_val = e1e_wphy(hw, GG82563_PHY_INBAND_CTRL, data);
+	if (ret_val)
+		return ret_val;
+
+	return 0;
+}
+
+/**
+ *  e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
+ *  @hw: pointer to the HW structure
+ *
+ *  Essentially a wrapper for setting up all things "copper" related.
+ *  This is a function pointer entry point called by the mac module.
+ **/
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	u16 reg_data;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ew32(CTRL, ctrl);
+
+	/*
+	 * Set the mac to wait the maximum time between each
+	 * iteration and increase the max iterations when
+	 * polling the phy; this fixes erroneous timeouts at 10Mbps.
+	 */
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 4),
+	                                           0xFFFF);
+	if (ret_val)
+		return ret_val;
+	ret_val = e1000_read_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+	                                          &reg_data);
+	if (ret_val)
+		return ret_val;
+	reg_data |= 0x3F;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw, GG82563_REG(0x34, 9),
+	                                           reg_data);
+	if (ret_val)
+		return ret_val;
+	ret_val = e1000_read_kmrn_reg_80003es2lan(hw,
+				      E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+				      &reg_data);
+	if (ret_val)
+		return ret_val;
+	reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+					E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+					reg_data);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_setup_copper_link(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_cfg_on_link_up_80003es2lan - es2 link configuration after link-up
+ *  @hw: pointer to the HW structure
+ *  @duplex: current duplex setting
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  10/100 operation.
+ **/
+static s32 e1000_cfg_on_link_up_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 speed;
+	u16 duplex;
+
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		ret_val = e1000e_get_speed_and_duplex_copper(hw, &speed,
+		                                             &duplex);
+		if (ret_val)
+			return ret_val;
+
+		if (speed == SPEED_1000)
+			ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
+		else
+			ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw, duplex);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
+ *  @hw: pointer to the HW structure
+ *  @duplex: current duplex setting
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  10/100 operation.
+ **/
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
+{
+	s32 ret_val;
+	u32 tipg;
+	u32 i = 0;
+	u16 reg_data, reg_data2;
+
+	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+	                               reg_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Configure Transmit Inter-Packet Gap */
+	tipg = er32(TIPG);
+	tipg &= ~E1000_TIPG_IPGT_MASK;
+	tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
+	ew32(TIPG, tipg);
+
+	do {
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+		if (ret_val)
+			return ret_val;
+		i++;
+	} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+	if (duplex == HALF_DUPLEX)
+		reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+	else
+		reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+	ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+	return 0;
+}
+
+/**
+ *  e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  gigabit operation.
+ **/
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 reg_data, reg_data2;
+	u32 tipg;
+	u32 i = 0;
+
+	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
+	ret_val = e1000_write_kmrn_reg_80003es2lan(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+	                               reg_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Configure Transmit Inter-Packet Gap */
+	tipg = er32(TIPG);
+	tipg &= ~E1000_TIPG_IPGT_MASK;
+	tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+	ew32(TIPG, tipg);
+
+	do {
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, GG82563_PHY_KMRN_MODE_CTRL, &reg_data2);
+		if (ret_val)
+			return ret_val;
+		i++;
+	} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+	reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+	ret_val = e1e_wphy(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_kmrn_reg_80003es2lan - Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquire semaphore, then read the PHY register at offset
+ *  using the kumeran interface.  The information retrieved is stored in data.
+ *  Release the semaphore before exiting.
+ **/
+static s32 e1000_read_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+					   u16 *data)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val = 0;
+
+	ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+	               E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+	ew32(KMRNCTRLSTA, kmrnctrlsta);
+	e1e_flush();
+
+	udelay(2);
+
+	kmrnctrlsta = er32(KMRNCTRLSTA);
+	*data = (u16)kmrnctrlsta;
+
+	e1000_release_mac_csr_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_kmrn_reg_80003es2lan - Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquire semaphore, then write the data to PHY register
+ *  at the offset using the kumeran interface.  Release semaphore
+ *  before exiting.
+ **/
+static s32 e1000_write_kmrn_reg_80003es2lan(struct e1000_hw *hw, u32 offset,
+					    u16 data)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val = 0;
+
+	ret_val = e1000_acquire_mac_csr_80003es2lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+	               E1000_KMRNCTRLSTA_OFFSET) | data;
+	ew32(KMRNCTRLSTA, kmrnctrlsta);
+	e1e_flush();
+
+	udelay(2);
+
+	e1000_release_mac_csr_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_80003es2lan - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/*
+	 * If there's an alternate MAC address place it in RAR0
+	 * so that it will override the Si installed default perm
+	 * address.
+	 */
+	ret_val = e1000_check_alt_mac_addr_generic(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(hw->mac.ops.check_mng_mode(hw) ||
+	      hw->phy.ops.check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+}
+
+/**
+ *  e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+{
+	e1000e_clear_hw_cntrs_base(hw);
+
+	er32(PRC64);
+	er32(PRC127);
+	er32(PRC255);
+	er32(PRC511);
+	er32(PRC1023);
+	er32(PRC1522);
+	er32(PTC64);
+	er32(PTC127);
+	er32(PTC255);
+	er32(PTC511);
+	er32(PTC1023);
+	er32(PTC1522);
+
+	er32(ALGNERRC);
+	er32(RXERRC);
+	er32(TNCRS);
+	er32(CEXTERR);
+	er32(TSCTC);
+	er32(TSCTFC);
+
+	er32(MGTPRC);
+	er32(MGTPDC);
+	er32(MGTPTC);
+
+	er32(IAC);
+	er32(ICRXOC);
+
+	er32(ICRXPTC);
+	er32(ICRXATC);
+	er32(ICTXPTC);
+	er32(ICTXATC);
+	er32(ICTXQEC);
+	er32(ICTXQMTC);
+	er32(ICRXDMTC);
+}
+
+static const struct e1000_mac_operations es2_mac_ops = {
+	.read_mac_addr		= e1000_read_mac_addr_80003es2lan,
+	.id_led_init		= e1000e_id_led_init,
+	.blink_led		= e1000e_blink_led_generic,
+	.check_mng_mode		= e1000e_check_mng_mode_generic,
+	/* check_for_link dependent on media type */
+	.cleanup_led		= e1000e_cleanup_led_generic,
+	.clear_hw_cntrs		= e1000_clear_hw_cntrs_80003es2lan,
+	.get_bus_info		= e1000e_get_bus_info_pcie,
+	.set_lan_id		= e1000_set_lan_id_multi_port_pcie,
+	.get_link_up_info	= e1000_get_link_up_info_80003es2lan,
+	.led_on			= e1000e_led_on_generic,
+	.led_off		= e1000e_led_off_generic,
+	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
+	.write_vfta		= e1000_write_vfta_generic,
+	.clear_vfta		= e1000_clear_vfta_generic,
+	.reset_hw		= e1000_reset_hw_80003es2lan,
+	.init_hw		= e1000_init_hw_80003es2lan,
+	.setup_link		= e1000e_setup_link,
+	/* setup_physical_interface dependent on media type */
+	.setup_led		= e1000e_setup_led_generic,
+};
+
+static const struct e1000_phy_operations es2_phy_ops = {
+	.acquire		= e1000_acquire_phy_80003es2lan,
+	.check_polarity		= e1000_check_polarity_m88,
+	.check_reset_block	= e1000e_check_reset_block_generic,
+	.commit		 	= e1000e_phy_sw_reset,
+	.force_speed_duplex 	= e1000_phy_force_speed_duplex_80003es2lan,
+	.get_cfg_done       	= e1000_get_cfg_done_80003es2lan,
+	.get_cable_length   	= e1000_get_cable_length_80003es2lan,
+	.get_info       	= e1000e_get_phy_info_m88,
+	.read_reg       	= e1000_read_phy_reg_gg82563_80003es2lan,
+	.release		= e1000_release_phy_80003es2lan,
+	.reset		  	= e1000e_phy_hw_reset_generic,
+	.set_d0_lplu_state  	= NULL,
+	.set_d3_lplu_state  	= e1000e_set_d3_lplu_state,
+	.write_reg      	= e1000_write_phy_reg_gg82563_80003es2lan,
+	.cfg_on_link_up      	= e1000_cfg_on_link_up_80003es2lan,
+};
+
+static const struct e1000_nvm_operations es2_nvm_ops = {
+	.acquire		= e1000_acquire_nvm_80003es2lan,
+	.read			= e1000e_read_nvm_eerd,
+	.release		= e1000_release_nvm_80003es2lan,
+	.update			= e1000e_update_nvm_checksum_generic,
+	.valid_led_default	= e1000e_valid_led_default,
+	.validate		= e1000e_validate_nvm_checksum_generic,
+	.write			= e1000_write_nvm_80003es2lan,
+};
+
+const struct e1000_info e1000_es2_info = {
+	.mac			= e1000_80003es2lan,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_RX_NEEDS_RESTART /* errata */
+				  | FLAG_TARC_SET_BIT_ZERO /* errata */
+				  | FLAG_APME_CHECK_PORT_B
+				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
+				  | FLAG_TIPG_MEDIUM_FOR_80003ESLAN,
+	.flags2			= FLAG2_DMA_BURST,
+	.pba			= 38,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_80003es2lan,
+	.mac_ops		= &es2_mac_ops,
+	.phy_ops		= &es2_phy_ops,
+	.nvm_ops		= &es2_nvm_ops,
+};
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c
new file mode 100644
index 0000000..1a3fa39
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/82571.c
@@ -0,0 +1,2112 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 82571EB Gigabit Ethernet Controller
+ * 82571EB Gigabit Ethernet Controller (Copper)
+ * 82571EB Gigabit Ethernet Controller (Fiber)
+ * 82571EB Dual Port Gigabit Mezzanine Adapter
+ * 82571EB Quad Port Gigabit Mezzanine Adapter
+ * 82571PT Gigabit PT Quad Port Server ExpressModule
+ * 82572EI Gigabit Ethernet Controller (Copper)
+ * 82572EI Gigabit Ethernet Controller (Fiber)
+ * 82572EI Gigabit Ethernet Controller
+ * 82573V Gigabit Ethernet Controller (Copper)
+ * 82573E Gigabit Ethernet Controller (Copper)
+ * 82573L Gigabit Ethernet Controller
+ * 82574L Gigabit Network Connection
+ * 82583V Gigabit Network Connection
+ */
+
+#include "e1000.h"
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+			      (ID_LED_OFF1_ON2  <<  8) | \
+			      (ID_LED_DEF1_DEF2 <<  4) | \
+			      (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+#define AN_RETRY_COUNT          5 /* Autoneg Retry Count value */
+#define E1000_BASE1000T_STATUS          10
+#define E1000_IDLE_ERROR_COUNT_MASK     0xFF
+#define E1000_RECEIVE_ERROR_COUNTER     21
+#define E1000_RECEIVE_ERROR_MAX         0xFFFF
+
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw);
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw);
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw);
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+				      u16 words, u16 *data);
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
+static s32 e1000_setup_link_82571(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+static void e1000_clear_vfta_82571(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw);
+static s32 e1000_led_on_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw);
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active);
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active);
+
+/**
+ *  e1000_init_phy_params_82571 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type = e1000_phy_none;
+		return 0;
+	}
+
+	phy->addr			 = 1;
+	phy->autoneg_mask		 = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us		 = 100;
+
+	phy->ops.power_up		 = e1000_power_up_phy_copper;
+	phy->ops.power_down		 = e1000_power_down_phy_copper_82571;
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		phy->type		 = e1000_phy_igp_2;
+		break;
+	case e1000_82573:
+		phy->type		 = e1000_phy_m88;
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		phy->type		 = e1000_phy_bm;
+		phy->ops.acquire = e1000_get_hw_semaphore_82574;
+		phy->ops.release = e1000_put_hw_semaphore_82574;
+		phy->ops.set_d0_lplu_state = e1000_set_d0_lplu_state_82574;
+		phy->ops.set_d3_lplu_state = e1000_set_d3_lplu_state_82574;
+		break;
+	default:
+		return -E1000_ERR_PHY;
+		break;
+	}
+
+	/* This can only be done after all function pointers are setup. */
+	ret_val = e1000_get_phy_id_82571(hw);
+	if (ret_val) {
+		e_dbg("Error getting PHY ID\n");
+		return ret_val;
+	}
+
+	/* Verify phy id */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		if (phy->id != IGP01E1000_I_PHY_ID)
+			ret_val = -E1000_ERR_PHY;
+		break;
+	case e1000_82573:
+		if (phy->id != M88E1111_I_PHY_ID)
+			ret_val = -E1000_ERR_PHY;
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		if (phy->id != BME1000_E_PHY_ID_R2)
+			ret_val = -E1000_ERR_PHY;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+	if (ret_val)
+		e_dbg("PHY ID unknown: type = 0x%08x\n", phy->id);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82571 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+	u16 size;
+
+	nvm->opcode_bits = 8;
+	nvm->delay_usec = 1;
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+		break;
+	}
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		if (((eecd >> 15) & 0x3) == 0x3) {
+			nvm->type = e1000_nvm_flash_hw;
+			nvm->word_size = 2048;
+			/*
+			 * Autonomous Flash update bit must be cleared due
+			 * to Flash update issue.
+			 */
+			eecd &= ~E1000_EECD_AUPDEN;
+			ew32(EECD, eecd);
+			break;
+		}
+		fallthrough;
+	default:
+		nvm->type = e1000_nvm_eeprom_spi;
+		size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+				  E1000_EECD_SIZE_EX_SHIFT);
+		/*
+		 * Added to a constant, "size" becomes the left-shift value
+		 * for setting word_size.
+		 */
+		size += NVM_WORD_SIZE_BASE_SHIFT;
+
+		/* EEPROM access above 16k is unsupported */
+		if (size > 14)
+			size = 14;
+		nvm->word_size	= 1 << size;
+		break;
+	}
+
+	/* Function Pointers */
+	switch (hw->mac.type) {
+	case e1000_82574:
+	case e1000_82583:
+		nvm->ops.acquire = e1000_get_hw_semaphore_82574;
+		nvm->ops.release = e1000_put_hw_semaphore_82574;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_init_mac_params_82571 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_init_mac_params_82571(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_mac_operations *func = &mac->ops;
+	u32 swsm = 0;
+	u32 swsm2 = 0;
+	bool force_clear_smbi = false;
+
+	/* Set media type */
+	switch (adapter->pdev->device) {
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82572EI_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+		hw->phy.media_type = e1000_media_type_fiber;
+		break;
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82572EI_SERDES:
+	case E1000_DEV_ID_82571EB_SERDES_DUAL:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* Adaptive IFS supported */
+	mac->adaptive_ifs = true;
+
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->setup_physical_interface = e1000_setup_copper_link_82571;
+		func->check_for_link = e1000e_check_for_copper_link;
+		func->get_link_up_info = e1000e_get_speed_and_duplex_copper;
+		break;
+	case e1000_media_type_fiber:
+		func->setup_physical_interface =
+			e1000_setup_fiber_serdes_link_82571;
+		func->check_for_link = e1000e_check_for_fiber_link;
+		func->get_link_up_info =
+			e1000e_get_speed_and_duplex_fiber_serdes;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->setup_physical_interface =
+			e1000_setup_fiber_serdes_link_82571;
+		func->check_for_link = e1000_check_for_serdes_link_82571;
+		func->get_link_up_info =
+			e1000e_get_speed_and_duplex_fiber_serdes;
+		break;
+	default:
+		return -E1000_ERR_CONFIG;
+		break;
+	}
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+		func->set_lan_id = e1000_set_lan_id_single_port;
+		func->check_mng_mode = e1000e_check_mng_mode_generic;
+		func->led_on = e1000e_led_on_generic;
+		func->blink_led = e1000e_blink_led_generic;
+
+		/* FWSM register */
+		mac->has_fwsm = true;
+		/*
+		 * ARC supported; valid only if manageability features are
+		 * enabled.
+		 */
+		mac->arc_subsystem_valid =
+			(er32(FWSM) & E1000_FWSM_MODE_MASK)
+			? true : false;
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		func->set_lan_id = e1000_set_lan_id_single_port;
+		func->check_mng_mode = e1000_check_mng_mode_82574;
+		func->led_on = e1000_led_on_82574;
+		break;
+	default:
+		func->check_mng_mode = e1000e_check_mng_mode_generic;
+		func->led_on = e1000e_led_on_generic;
+		func->blink_led = e1000e_blink_led_generic;
+
+		/* FWSM register */
+		mac->has_fwsm = true;
+		break;
+	}
+
+	/*
+	 * Ensure that the inter-port SWSM.SMBI lock bit is clear before
+	 * first NVM or PHY access. This should be done for single-port
+	 * devices, and for one port only on dual-port devices so that
+	 * for those devices we can still use the SMBI lock to synchronize
+	 * inter-port accesses to the PHY & NVM.
+	 */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		swsm2 = er32(SWSM2);
+
+		if (!(swsm2 & E1000_SWSM2_LOCK)) {
+			/* Only do this for the first interface on this card */
+			ew32(SWSM2,
+			    swsm2 | E1000_SWSM2_LOCK);
+			force_clear_smbi = true;
+		} else
+			force_clear_smbi = false;
+		break;
+	default:
+		force_clear_smbi = true;
+		break;
+	}
+
+	if (force_clear_smbi) {
+		/* Make sure SWSM.SMBI is clear */
+		swsm = er32(SWSM);
+		if (swsm & E1000_SWSM_SMBI) {
+			/* This bit should not be set on a first interface, and
+			 * indicates that the bootagent or EFI code has
+			 * improperly left this bit enabled
+			 */
+			e_dbg("Please update your 82571 Bootagent\n");
+		}
+		ew32(SWSM, swsm & ~E1000_SWSM_SMBI);
+	}
+
+	/*
+	 * Initialize device specific counter of SMBI acquisition
+	 * timeouts.
+	 */
+	 hw->dev_spec.e82571.smb_counter = 0;
+
+	return 0;
+}
+
+static s32 e1000_get_variants_82571(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	static int global_quad_port_a; /* global port a indication */
+	struct pci_dev *pdev = adapter->pdev;
+	int is_port_b = er32(STATUS) & E1000_STATUS_FUNC_1;
+	s32 rc;
+
+	rc = e1000_init_mac_params_82571(adapter);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_nvm_params_82571(hw);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_phy_params_82571(hw);
+	if (rc)
+		return rc;
+
+	/* tag quad port adapters first, it's used below */
+	switch (pdev->device) {
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+		adapter->flags |= FLAG_IS_QUAD_PORT;
+		/* mark the first port */
+		if (global_quad_port_a == 0)
+			adapter->flags |= FLAG_IS_QUAD_PORT_A;
+		/* Reset for multiple quad port adapters */
+		global_quad_port_a++;
+		if (global_quad_port_a == 4)
+			global_quad_port_a = 0;
+		break;
+	default:
+		break;
+	}
+
+	switch (adapter->hw.mac.type) {
+	case e1000_82571:
+		/* these dual ports don't have WoL on port B at all */
+		if (((pdev->device == E1000_DEV_ID_82571EB_FIBER) ||
+		     (pdev->device == E1000_DEV_ID_82571EB_SERDES) ||
+		     (pdev->device == E1000_DEV_ID_82571EB_COPPER)) &&
+		    (is_port_b))
+			adapter->flags &= ~FLAG_HAS_WOL;
+		/* quad ports only support WoL on port A */
+		if (adapter->flags & FLAG_IS_QUAD_PORT &&
+		    (!(adapter->flags & FLAG_IS_QUAD_PORT_A)))
+			adapter->flags &= ~FLAG_HAS_WOL;
+		/* Does not support WoL on any port */
+		if (pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD)
+			adapter->flags &= ~FLAG_HAS_WOL;
+		break;
+	case e1000_82573:
+		if (pdev->device == E1000_DEV_ID_82573L) {
+			adapter->flags |= FLAG_HAS_JUMBO_FRAMES;
+			adapter->max_hw_frame_size = DEFAULT_JUMBO;
+		}
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_id = 0;
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		/*
+		 * The 82571 firmware may still be configuring the PHY.
+		 * In this case, we cannot access the PHY until the
+		 * configuration is done.  So we explicitly set the
+		 * PHY ID.
+		 */
+		phy->id = IGP01E1000_I_PHY_ID;
+		break;
+	case e1000_82573:
+		return e1000e_get_phy_id(hw);
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+		if (ret_val)
+			return ret_val;
+
+		phy->id = (u32)(phy_id << 16);
+		udelay(20);
+		ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+		if (ret_val)
+			return ret_val;
+
+		phy->id |= (u32)(phy_id);
+		phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+		break;
+	default:
+		return -E1000_ERR_PHY;
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 sw_timeout = hw->nvm.word_size + 1;
+	s32 fw_timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	/*
+	 * If we have timedout 3 times on trying to acquire
+	 * the inter-port SMBI semaphore, there is old code
+	 * operating on the other port, and it is not
+	 * releasing SMBI. Modify the number of times that
+	 * we try for the semaphore to interwork with this
+	 * older code.
+	 */
+	if (hw->dev_spec.e82571.smb_counter > 2)
+		sw_timeout = 1;
+
+	/* Get the SW semaphore */
+	while (i < sw_timeout) {
+		swsm = er32(SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		udelay(50);
+		i++;
+	}
+
+	if (i == sw_timeout) {
+		e_dbg("Driver can't access device - SMBI bit is set.\n");
+		hw->dev_spec.e82571.smb_counter++;
+	}
+	/* Get the FW semaphore. */
+	for (i = 0; i < fw_timeout; i++) {
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (er32(SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == fw_timeout) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_82571(hw);
+		e_dbg("Driver can't access the NVM\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82571 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	swsm = er32(SWSM);
+	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+	ew32(SWSM, swsm);
+}
+/**
+ *  e1000_get_hw_semaphore_82573 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore during reset.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82573(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+	s32 ret_val = 0;
+	s32 i = 0;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+	extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+	do {
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
+		extcnf_ctrl = er32(EXTCNF_CTRL);
+
+		if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+			break;
+
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+		usleep_range(2000, 4000);
+		i++;
+	} while (i < MDIO_OWNERSHIP_TIMEOUT);
+
+	if (i == MDIO_OWNERSHIP_TIMEOUT) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_82573(hw);
+		e_dbg("Driver can't access the PHY\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82573 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used during reset.
+ *
+ **/
+static void e1000_put_hw_semaphore_82573(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+	ew32(EXTCNF_CTRL, extcnf_ctrl);
+}
+
+static DEFINE_MUTEX(swflag_mutex);
+
+/**
+ *  e1000_get_hw_semaphore_82574 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM.
+ *
+ **/
+static s32 e1000_get_hw_semaphore_82574(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	mutex_lock(&swflag_mutex);
+	ret_val = e1000_get_hw_semaphore_82573(hw);
+	if (ret_val)
+		mutex_unlock(&swflag_mutex);
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82574 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ *
+ **/
+static void e1000_put_hw_semaphore_82574(struct e1000_hw *hw)
+{
+	e1000_put_hw_semaphore_82573(hw);
+	mutex_unlock(&swflag_mutex);
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82574 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.
+ *  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (active)
+		data |= E1000_PHY_CTRL_D0A_LPLU;
+	else
+		data &= ~E1000_PHY_CTRL_D0A_LPLU;
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_82574 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  when active is true, else clear lplu for D3. LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+static s32 e1000_set_d3_lplu_state_82574(struct e1000_hw *hw, bool active)
+{
+	u16 data = er32(POEMB);
+
+	if (!active) {
+		data &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+	} else if ((hw->phy.autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (hw->phy.autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= E1000_PHY_CTRL_NOND0A_LPLU;
+	}
+
+	ew32(POEMB, data);
+	return 0;
+}
+
+/**
+ *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  To gain access to the EEPROM, first we must obtain a hardware semaphore.
+ *  Then for non-82573 hardware, set the EEPROM access request bit and wait
+ *  for EEPROM access grant bit.  If the access grant bit is not set, release
+ *  hardware semaphore.
+ **/
+static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	ret_val = e1000_get_hw_semaphore_82571(hw);
+	if (ret_val)
+		return ret_val;
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+		break;
+	default:
+		ret_val = e1000e_acquire_nvm(hw);
+		break;
+	}
+
+	if (ret_val)
+		e1000_put_hw_semaphore_82571(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_82571 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+static void e1000_release_nvm_82571(struct e1000_hw *hw)
+{
+	e1000e_release_nvm(hw);
+	e1000_put_hw_semaphore_82571(hw);
+}
+
+/**
+ *  e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  For non-82573 silicon, write data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000e_update_nvm_checksum is not called after this function, the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
+				 u16 *data)
+{
+	s32 ret_val;
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+		ret_val = e1000e_write_nvm_spi(hw, offset, words, data);
+		break;
+	default:
+		ret_val = -E1000_ERR_NVM;
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_82571 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	u32 eecd;
+	s32 ret_val;
+	u16 i;
+
+	ret_val = e1000e_update_nvm_checksum_generic(hw);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * If our nvm is an EEPROM, then we're done
+	 * otherwise, commit the checksum to the flash NVM.
+	 */
+	if (hw->nvm.type != e1000_nvm_flash_hw)
+		return ret_val;
+
+	/* Check for pending operations. */
+	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+		usleep_range(1000, 2000);
+		if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+			break;
+	}
+
+	if (i == E1000_FLASH_UPDATES)
+		return -E1000_ERR_NVM;
+
+	/* Reset the firmware if using STM opcode. */
+	if ((er32(FLOP) & 0xFF00) == E1000_STM_OPCODE) {
+		/*
+		 * The enabling of and the actual reset must be done
+		 * in two write cycles.
+		 */
+		ew32(HICR, E1000_HICR_FW_RESET_ENABLE);
+		e1e_flush();
+		ew32(HICR, E1000_HICR_FW_RESET);
+	}
+
+	/* Commit the write to flash */
+	eecd = er32(EECD) | E1000_EECD_FLUPD;
+	ew32(EECD, eecd);
+
+	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+		usleep_range(1000, 2000);
+		if ((er32(EECD) & E1000_EECD_FLUPD) == 0)
+			break;
+	}
+
+	if (i == E1000_FLASH_UPDATES)
+		return -E1000_ERR_NVM;
+
+	return 0;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	if (hw->nvm.type == e1000_nvm_flash_hw)
+		e1000_fix_nvm_checksum_82571(hw);
+
+	return e1000e_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ *  e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  After checking for invalid values, poll the EEPROM to ensure the previous
+ *  command has completed before trying to write the next word.  After write
+ *  poll for completion.
+ *
+ *  If e1000e_update_nvm_checksum is not called after this function, the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+				      u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eewr = 0;
+	s32 ret_val = 0;
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		return -E1000_ERR_NVM;
+	}
+
+	for (i = 0; i < words; i++) {
+		eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
+		       ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+		       E1000_NVM_RW_REG_START;
+
+		ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+		if (ret_val)
+			break;
+
+		ew32(EEWR, eewr);
+
+		ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+		if (ret_val)
+			break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_82571 - Poll for configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the management control register for the config done bit to be set.
+ **/
+static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+
+	while (timeout) {
+		if (er32(EEMNGCTL) &
+		    E1000_NVM_CFG_DONE_PORT_0)
+			break;
+		usleep_range(1000, 2000);
+		timeout--;
+	}
+	if (!timeout) {
+		e_dbg("MNG configuration cycle has not completed.\n");
+		return -E1000_ERR_RESET;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When activating LPLU
+ *  this function also disables smart speed and vice versa.  LPLU will not be
+ *  activated unless the device autonegotiation advertisement meets standards
+ *  of either 10 or 10/100 or 10/100/1000 at all duplexes.  This is a function
+ *  pointer entry point only called by PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		return ret_val;
+
+	if (active) {
+		data |= IGP02E1000_PM_D0_LPLU;
+		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+		if (ret_val)
+			return ret_val;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+		if (ret_val)
+			return ret_val;
+	} else {
+		data &= ~IGP02E1000_PM_D0_LPLU;
+		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_reset_hw_82571 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.
+ **/
+static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
+{
+	u32 ctrl, ctrl_ext;
+	s32 ret_val;
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000e_disable_pcie_master(hw);
+	if (ret_val)
+		e_dbg("PCI-E Master disable polling has failed.\n");
+
+	e_dbg("Masking off all interrupts\n");
+	ew32(IMC, 0xffffffff);
+
+	ew32(RCTL, 0);
+	ew32(TCTL, E1000_TCTL_PSP);
+	e1e_flush();
+
+	usleep_range(10000, 20000);
+
+	/*
+	 * Must acquire the MDIO ownership before MAC reset.
+	 * Ownership defaults to firmware after a reset.
+	 */
+	switch (hw->mac.type) {
+	case e1000_82573:
+		ret_val = e1000_get_hw_semaphore_82573(hw);
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		ret_val = e1000_get_hw_semaphore_82574(hw);
+		break;
+	default:
+		break;
+	}
+	if (ret_val)
+		e_dbg("Cannot acquire MDIO ownership\n");
+
+	ctrl = er32(CTRL);
+
+	e_dbg("Issuing a global reset to MAC\n");
+	ew32(CTRL, ctrl | E1000_CTRL_RST);
+
+	/* Must release MDIO ownership and mutex after MAC reset. */
+	switch (hw->mac.type) {
+	case e1000_82574:
+	case e1000_82583:
+		e1000_put_hw_semaphore_82574(hw);
+		break;
+	default:
+		break;
+	}
+
+	if (hw->nvm.type == e1000_nvm_flash_hw) {
+		udelay(10);
+		ctrl_ext = er32(CTRL_EXT);
+		ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+		ew32(CTRL_EXT, ctrl_ext);
+		e1e_flush();
+	}
+
+	ret_val = e1000e_get_auto_rd_done(hw);
+	if (ret_val)
+		/* We don't want to continue accessing MAC registers. */
+		return ret_val;
+
+	/*
+	 * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+	 * Need to wait for Phy configuration completion before accessing
+	 * NVM and Phy.
+	 */
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		msleep(25);
+		break;
+	default:
+		break;
+	}
+
+	/* Clear any pending interrupt events. */
+	ew32(IMC, 0xffffffff);
+	er32(ICR);
+
+	if (hw->mac.type == e1000_82571) {
+		/* Install any alternate MAC address into RAR0 */
+		ret_val = e1000_check_alt_mac_addr_generic(hw);
+		if (ret_val)
+			return ret_val;
+
+		e1000e_set_laa_state_82571(hw, true);
+	}
+
+	/* Reinitialize the 82571 serdes link state machine */
+	if (hw->phy.media_type == e1000_media_type_internal_serdes)
+		hw->mac.serdes_link_state = e1000_serdes_link_down;
+
+	return 0;
+}
+
+/**
+ *  e1000_init_hw_82571 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82571(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 reg_data;
+	s32 ret_val;
+	u16 i, rar_count = mac->rar_entry_count;
+
+	e1000_initialize_hw_bits_82571(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000e_id_led_init(hw);
+	if (ret_val)
+		e_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+
+	/* Disabling VLAN filtering */
+	e_dbg("Initializing the IEEE VLAN\n");
+	mac->ops.clear_vfta(hw);
+
+	/* Setup the receive address. */
+	/*
+	 * If, however, a locally administered address was assigned to the
+	 * 82571, we must reserve a RAR for it to work around an issue where
+	 * resetting one port will reload the MAC on the other port.
+	 */
+	if (e1000e_get_laa_state_82571(hw))
+		rar_count--;
+	e1000e_init_rx_addrs(hw, rar_count);
+
+	/* Zero out the Multicast HASH table */
+	e_dbg("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link_82571(hw);
+
+	/* Set the transmit descriptor write-back policy */
+	reg_data = er32(TXDCTL(0));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+		   E1000_TXDCTL_FULL_TX_DESC_WB |
+		   E1000_TXDCTL_COUNT_DESC;
+	ew32(TXDCTL(0), reg_data);
+
+	/* ...for both queues. */
+	switch (mac->type) {
+	case e1000_82573:
+		e1000e_enable_tx_pkt_filtering(hw);
+		fallthrough;
+	case e1000_82574:
+	case e1000_82583:
+		reg_data = er32(GCR);
+		reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+		ew32(GCR, reg_data);
+		break;
+	default:
+		reg_data = er32(TXDCTL(1));
+		reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+			   E1000_TXDCTL_FULL_TX_DESC_WB |
+			   E1000_TXDCTL_COUNT_DESC;
+		ew32(TXDCTL(1), reg_data);
+		break;
+	}
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82571(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	/* Transmit Descriptor Control 0 */
+	reg = er32(TXDCTL(0));
+	reg |= (1 << 22);
+	ew32(TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = er32(TXDCTL(1));
+	reg |= (1 << 22);
+	ew32(TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = er32(TARC(0));
+	reg &= ~(0xF << 27); /* 30:27 */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+		break;
+	default:
+		break;
+	}
+	ew32(TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = er32(TARC(1));
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		reg &= ~((1 << 29) | (1 << 30));
+		reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+		if (er32(TCTL) & E1000_TCTL_MULR)
+			reg &= ~(1 << 28);
+		else
+			reg |= (1 << 28);
+		ew32(TARC(1), reg);
+		break;
+	default:
+		break;
+	}
+
+	/* Device Control */
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		reg = er32(CTRL);
+		reg &= ~(1 << 29);
+		ew32(CTRL, reg);
+		break;
+	default:
+		break;
+	}
+
+	/* Extended Device Control */
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		reg = er32(CTRL_EXT);
+		reg &= ~(1 << 23);
+		reg |= (1 << 22);
+		ew32(CTRL_EXT, reg);
+		break;
+	default:
+		break;
+	}
+
+	if (hw->mac.type == e1000_82571) {
+		reg = er32(PBA_ECC);
+		reg |= E1000_PBA_ECC_CORR_EN;
+		ew32(PBA_ECC, reg);
+	}
+	/*
+	 * Workaround for hardware errata.
+	 * Ensure that DMA Dynamic Clock gating is disabled on 82571 and 82572
+	 */
+
+        if ((hw->mac.type == e1000_82571) ||
+           (hw->mac.type == e1000_82572)) {
+                reg = er32(CTRL_EXT);
+                reg &= ~E1000_CTRL_EXT_DMA_DYN_CLK_EN;
+                ew32(CTRL_EXT, reg);
+        }
+
+
+	/* PCI-Ex Control Registers */
+	switch (hw->mac.type) {
+	case e1000_82574:
+	case e1000_82583:
+		reg = er32(GCR);
+		reg |= (1 << 22);
+		ew32(GCR, reg);
+
+		/*
+		 * Workaround for hardware errata.
+		 * apply workaround for hardware errata documented in errata
+		 * docs Fixes issue where some error prone or unreliable PCIe
+		 * completions are occurring, particularly with ASPM enabled.
+		 * Without fix, issue can cause Tx timeouts.
+		 */
+		reg = er32(GCR2);
+		reg |= 1;
+		ew32(GCR2, reg);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ *  e1000_clear_vfta_82571 - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+static void e1000_clear_vfta_82571(struct e1000_hw *hw)
+{
+	u32 offset;
+	u32 vfta_value = 0;
+	u32 vfta_offset = 0;
+	u32 vfta_bit_in_reg = 0;
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		if (hw->mng_cookie.vlan_id != 0) {
+			/*
+			 * The VFTA is a 4096b bit-field, each identifying
+			 * a single VLAN ID.  The following operations
+			 * determine which 32b entry (i.e. offset) into the
+			 * array we want to set the VLAN ID (i.e. bit) of
+			 * the manageability unit.
+			 */
+			vfta_offset = (hw->mng_cookie.vlan_id >>
+				       E1000_VFTA_ENTRY_SHIFT) &
+				      E1000_VFTA_ENTRY_MASK;
+			vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+					       E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+		}
+		break;
+	default:
+		break;
+	}
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		/*
+		 * If the offset we want to clear is the same offset of the
+		 * manageability VLAN ID, then clear all bits except that of
+		 * the manageability unit.
+		 */
+		vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
+		e1e_flush();
+	}
+}
+
+/**
+ *  e1000_check_mng_mode_82574 - Check manageability is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the NVM Initialization Control Word 2 and returns true
+ *  (>0) if any manageability is enabled, else false (0).
+ **/
+static bool e1000_check_mng_mode_82574(struct e1000_hw *hw)
+{
+	u16 data;
+
+	e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+	return (data & E1000_NVM_INIT_CTRL2_MNGM) != 0;
+}
+
+/**
+ *  e1000_led_on_82574 - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+static s32 e1000_led_on_82574(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	u32 i;
+
+	ctrl = hw->mac.ledctl_mode2;
+	if (!(E1000_STATUS_LU & er32(STATUS))) {
+		/*
+		 * If no link, then turn LED on by setting the invert bit
+		 * for each LED that's "on" (0x0E) in ledctl_mode2.
+		 */
+		for (i = 0; i < 4; i++)
+			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+			    E1000_LEDCTL_MODE_LED_ON)
+				ctrl |= (E1000_LEDCTL_LED0_IVRT << (i * 8));
+	}
+	ew32(LEDCTL, ctrl);
+
+	return 0;
+}
+
+/**
+ *  e1000_check_phy_82574 - check 82574 phy hung state
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns whether phy is hung or not
+ **/
+bool e1000_check_phy_82574(struct e1000_hw *hw)
+{
+	u16 status_1kbt = 0;
+	u16 receive_errors = 0;
+	bool phy_hung = false;
+	s32 ret_val = 0;
+
+	/*
+	 * Read PHY Receive Error counter first, if its is max - all F's then
+	 * read the Base1000T status register If both are max then PHY is hung.
+	 */
+	ret_val = e1e_rphy(hw, E1000_RECEIVE_ERROR_COUNTER, &receive_errors);
+
+	if (ret_val)
+		goto out;
+	if (receive_errors == E1000_RECEIVE_ERROR_MAX)  {
+		ret_val = e1e_rphy(hw, E1000_BASE1000T_STATUS, &status_1kbt);
+		if (ret_val)
+			goto out;
+		if ((status_1kbt & E1000_IDLE_ERROR_COUNT_MASK) ==
+		    E1000_IDLE_ERROR_COUNT_MASK)
+			phy_hung = true;
+	}
+out:
+	return phy_hung;
+}
+
+/**
+ *  e1000_setup_link_82571 - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_82571(struct e1000_hw *hw)
+{
+	/*
+	 * 82573 does not have a word in the NVM to determine
+	 * the default flow control setting, so we explicitly
+	 * set it to full.
+	 */
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		if (hw->fc.requested_mode == e1000_fc_default)
+			hw->fc.requested_mode = e1000_fc_full;
+		break;
+	default:
+		break;
+	}
+
+	return e1000e_setup_link(hw);
+}
+
+/**
+ *  e1000_setup_copper_link_82571 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ew32(CTRL, ctrl);
+
+	switch (hw->phy.type) {
+	case e1000_phy_m88:
+	case e1000_phy_bm:
+		ret_val = e1000e_copper_link_setup_m88(hw);
+		break;
+	case e1000_phy_igp_2:
+		ret_val = e1000e_copper_link_setup_igp(hw);
+		break;
+	default:
+		return -E1000_ERR_PHY;
+		break;
+	}
+
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_setup_copper_link(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes links.
+ *  Upon successful setup, poll for link.
+ **/
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
+{
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		/*
+		 * If SerDes loopback mode is entered, there is no form
+		 * of reset to take the adapter out of that mode.  So we
+		 * have to explicitly take the adapter out of loopback
+		 * mode.  This prevents drivers from twiddling their thumbs
+		 * if another tool failed to take it out of loopback mode.
+		 */
+		ew32(SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+		break;
+	default:
+		break;
+	}
+
+	return e1000e_setup_fiber_serdes_link(hw);
+}
+
+/**
+ *  e1000_check_for_serdes_link_82571 - Check for link (Serdes)
+ *  @hw: pointer to the HW structure
+ *
+ *  Reports the link state as up or down.
+ *
+ *  If autonegotiation is supported by the link partner, the link state is
+ *  determined by the result of autonegotiation. This is the most likely case.
+ *  If autonegotiation is not supported by the link partner, and the link
+ *  has a valid signal, force the link up.
+ *
+ *  The link state is represented internally here by 4 states:
+ *
+ *  1) down
+ *  2) autoneg_progress
+ *  3) autoneg_complete (the link successfully autonegotiated)
+ *  4) forced_up (the link has been forced up, it did not autonegotiate)
+ *
+ **/
+static s32 e1000_check_for_serdes_link_82571(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	u32 txcw;
+	u32 i;
+	s32 ret_val = 0;
+
+	ctrl = er32(CTRL);
+	status = er32(STATUS);
+	rxcw = er32(RXCW);
+
+	if ((rxcw & E1000_RXCW_SYNCH) && !(rxcw & E1000_RXCW_IV)) {
+
+		/* Receiver is synchronized with no invalid bits.  */
+		switch (mac->serdes_link_state) {
+		case e1000_serdes_link_autoneg_complete:
+			if (!(status & E1000_STATUS_LU)) {
+				/*
+				 * We have lost link, retry autoneg before
+				 * reporting link failure
+				 */
+				mac->serdes_link_state =
+				    e1000_serdes_link_autoneg_progress;
+				mac->serdes_has_link = false;
+				e_dbg("AN_UP     -> AN_PROG\n");
+			} else {
+				mac->serdes_has_link = true;
+			}
+			break;
+
+		case e1000_serdes_link_forced_up:
+			/*
+			 * If we are receiving /C/ ordered sets, re-enable
+			 * auto-negotiation in the TXCW register and disable
+			 * forced link in the Device Control register in an
+			 * attempt to auto-negotiate with our link partner.
+			 * If the partner code word is null, stop forcing
+			 * and restart auto negotiation.
+			 */
+			if ((rxcw & E1000_RXCW_C) || !(rxcw & E1000_RXCW_CW))  {
+				/* Enable autoneg, and unforce link up */
+				ew32(TXCW, mac->txcw);
+				ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+				mac->serdes_link_state =
+				    e1000_serdes_link_autoneg_progress;
+				mac->serdes_has_link = false;
+				e_dbg("FORCED_UP -> AN_PROG\n");
+			} else {
+				mac->serdes_has_link = true;
+			}
+			break;
+
+		case e1000_serdes_link_autoneg_progress:
+			if (rxcw & E1000_RXCW_C) {
+				/*
+				 * We received /C/ ordered sets, meaning the
+				 * link partner has autonegotiated, and we can
+				 * trust the Link Up (LU) status bit.
+				 */
+				if (status & E1000_STATUS_LU) {
+					mac->serdes_link_state =
+					    e1000_serdes_link_autoneg_complete;
+					e_dbg("AN_PROG   -> AN_UP\n");
+					mac->serdes_has_link = true;
+				} else {
+					/* Autoneg completed, but failed. */
+					mac->serdes_link_state =
+					    e1000_serdes_link_down;
+					e_dbg("AN_PROG   -> DOWN\n");
+				}
+			} else {
+				/*
+				 * The link partner did not autoneg.
+				 * Force link up and full duplex, and change
+				 * state to forced.
+				 */
+				ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+				ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+				ew32(CTRL, ctrl);
+
+				/* Configure Flow Control after link up. */
+				ret_val = e1000e_config_fc_after_link_up(hw);
+				if (ret_val) {
+					e_dbg("Error config flow control\n");
+					break;
+				}
+				mac->serdes_link_state =
+				    e1000_serdes_link_forced_up;
+				mac->serdes_has_link = true;
+				e_dbg("AN_PROG   -> FORCED_UP\n");
+			}
+			break;
+
+		case e1000_serdes_link_down:
+		default:
+			/*
+			 * The link was down but the receiver has now gained
+			 * valid sync, so lets see if we can bring the link
+			 * up.
+			 */
+			ew32(TXCW, mac->txcw);
+			ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+			mac->serdes_link_state =
+			    e1000_serdes_link_autoneg_progress;
+			mac->serdes_has_link = false;
+			e_dbg("DOWN      -> AN_PROG\n");
+			break;
+		}
+	} else {
+		if (!(rxcw & E1000_RXCW_SYNCH)) {
+			mac->serdes_has_link = false;
+			mac->serdes_link_state = e1000_serdes_link_down;
+			e_dbg("ANYSTATE  -> DOWN\n");
+		} else {
+			/*
+			 * Check several times, if Sync and Config
+			 * both are consistently 1 then simply ignore
+			 * the Invalid bit and restart Autoneg
+			 */
+			for (i = 0; i < AN_RETRY_COUNT; i++) {
+				udelay(10);
+				rxcw = er32(RXCW);
+				if ((rxcw & E1000_RXCW_IV) &&
+				    !((rxcw & E1000_RXCW_SYNCH) &&
+				      (rxcw & E1000_RXCW_C))) {
+					mac->serdes_has_link = false;
+					mac->serdes_link_state =
+					    e1000_serdes_link_down;
+					e_dbg("ANYSTATE  -> DOWN\n");
+					break;
+				}
+			}
+
+			if (i == AN_RETRY_COUNT) {
+				txcw = er32(TXCW);
+				txcw |= E1000_TXCW_ANE;
+				ew32(TXCW, txcw);
+				mac->serdes_link_state =
+				    e1000_serdes_link_autoneg_progress;
+				mac->serdes_has_link = false;
+				e_dbg("ANYSTATE  -> AN_PROG\n");
+			}
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_valid_led_default_82571 - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		return ret_val;
+	}
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+	case e1000_82574:
+	case e1000_82583:
+		if (*data == ID_LED_RESERVED_F746)
+			*data = ID_LED_DEFAULT_82573;
+		break;
+	default:
+		if (*data == ID_LED_RESERVED_0000 ||
+		    *data == ID_LED_RESERVED_FFFF)
+			*data = ID_LED_DEFAULT;
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_laa_state_82571 - Get locally administered address state
+ *  @hw: pointer to the HW structure
+ *
+ *  Retrieve and return the current locally administered address state.
+ **/
+bool e1000e_get_laa_state_82571(struct e1000_hw *hw)
+{
+	if (hw->mac.type != e1000_82571)
+		return false;
+
+	return hw->dev_spec.e82571.laa_is_present;
+}
+
+/**
+ *  e1000e_set_laa_state_82571 - Set locally administered address state
+ *  @hw: pointer to the HW structure
+ *  @state: enable/disable locally administered address
+ *
+ *  Enable/Disable the current locally administered address state.
+ **/
+void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state)
+{
+	if (hw->mac.type != e1000_82571)
+		return;
+
+	hw->dev_spec.e82571.laa_is_present = state;
+
+	/* If workaround is activated... */
+	if (state)
+		/*
+		 * Hold a copy of the LAA in RAR[14] This is done so that
+		 * between the time RAR[0] gets clobbered and the time it
+		 * gets fixed, the actual LAA is in one of the RARs and no
+		 * incoming packets directed to this port are dropped.
+		 * Eventually the LAA will be in RAR[0] and RAR[14].
+		 */
+		e1000e_rar_set(hw, hw->mac.addr, hw->mac.rar_entry_count - 1);
+}
+
+/**
+ *  e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies that the EEPROM has completed the update.  After updating the
+ *  EEPROM, we need to check bit 15 in work 0x23 for the checksum fix.  If
+ *  the checksum fix is not implemented, we need to set the bit and update
+ *  the checksum.  Otherwise, if bit 15 is set and the checksum is incorrect,
+ *  we need to return bad checksum.
+ **/
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val;
+	u16 data;
+
+	if (nvm->type != e1000_nvm_flash_hw)
+		return 0;
+
+	/*
+	 * Check bit 4 of word 10h.  If it is 0, firmware is done updating
+	 * 10h-12h.  Checksum may need to be fixed.
+	 */
+	ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
+	if (ret_val)
+		return ret_val;
+
+	if (!(data & 0x10)) {
+		/*
+		 * Read 0x23 and check bit 15.  This bit is a 1
+		 * when the checksum has already been fixed.  If
+		 * the checksum is still wrong and this bit is a
+		 * 1, we need to return bad checksum.  Otherwise,
+		 * we need to set this bit to a 1 and update the
+		 * checksum.
+		 */
+		ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
+		if (ret_val)
+			return ret_val;
+
+		if (!(data & 0x8000)) {
+			data |= 0x8000;
+			ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
+			if (ret_val)
+				return ret_val;
+			ret_val = e1000e_update_nvm_checksum(hw);
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_read_mac_addr_82571 - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	if (hw->mac.type == e1000_82571) {
+		/*
+		 * If there's an alternate MAC address place it in RAR0
+		 * so that it will override the Si installed default perm
+		 * address.
+		 */
+		ret_val = e1000_check_alt_mac_addr_generic(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_read_mac_addr_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_mac_info *mac = &hw->mac;
+
+	if (!(phy->ops.check_reset_block))
+		return;
+
+	/* If the management interface is not enabled, then power down */
+	if (!(mac->ops.check_mng_mode(hw) || phy->ops.check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
+{
+	e1000e_clear_hw_cntrs_base(hw);
+
+	er32(PRC64);
+	er32(PRC127);
+	er32(PRC255);
+	er32(PRC511);
+	er32(PRC1023);
+	er32(PRC1522);
+	er32(PTC64);
+	er32(PTC127);
+	er32(PTC255);
+	er32(PTC511);
+	er32(PTC1023);
+	er32(PTC1522);
+
+	er32(ALGNERRC);
+	er32(RXERRC);
+	er32(TNCRS);
+	er32(CEXTERR);
+	er32(TSCTC);
+	er32(TSCTFC);
+
+	er32(MGTPRC);
+	er32(MGTPDC);
+	er32(MGTPTC);
+
+	er32(IAC);
+	er32(ICRXOC);
+
+	er32(ICRXPTC);
+	er32(ICRXATC);
+	er32(ICTXPTC);
+	er32(ICTXATC);
+	er32(ICTXQEC);
+	er32(ICTXQMTC);
+	er32(ICRXDMTC);
+}
+
+static const struct e1000_mac_operations e82571_mac_ops = {
+	/* .check_mng_mode: mac type dependent */
+	/* .check_for_link: media type dependent */
+	.id_led_init		= e1000e_id_led_init,
+	.cleanup_led		= e1000e_cleanup_led_generic,
+	.clear_hw_cntrs		= e1000_clear_hw_cntrs_82571,
+	.get_bus_info		= e1000e_get_bus_info_pcie,
+	.set_lan_id		= e1000_set_lan_id_multi_port_pcie,
+	/* .get_link_up_info: media type dependent */
+	/* .led_on: mac type dependent */
+	.led_off		= e1000e_led_off_generic,
+	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
+	.write_vfta		= e1000_write_vfta_generic,
+	.clear_vfta		= e1000_clear_vfta_82571,
+	.reset_hw		= e1000_reset_hw_82571,
+	.init_hw		= e1000_init_hw_82571,
+	.setup_link		= e1000_setup_link_82571,
+	/* .setup_physical_interface: media type dependent */
+	.setup_led		= e1000e_setup_led_generic,
+	.read_mac_addr		= e1000_read_mac_addr_82571,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_igp = {
+	.acquire		= e1000_get_hw_semaphore_82571,
+	.check_polarity		= e1000_check_polarity_igp,
+	.check_reset_block	= e1000e_check_reset_block_generic,
+	.commit			= NULL,
+	.force_speed_duplex	= e1000e_phy_force_speed_duplex_igp,
+	.get_cfg_done		= e1000_get_cfg_done_82571,
+	.get_cable_length	= e1000e_get_cable_length_igp_2,
+	.get_info		= e1000e_get_phy_info_igp,
+	.read_reg		= e1000e_read_phy_reg_igp,
+	.release		= e1000_put_hw_semaphore_82571,
+	.reset			= e1000e_phy_hw_reset_generic,
+	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
+	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
+	.write_reg		= e1000e_write_phy_reg_igp,
+	.cfg_on_link_up      	= NULL,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_m88 = {
+	.acquire		= e1000_get_hw_semaphore_82571,
+	.check_polarity		= e1000_check_polarity_m88,
+	.check_reset_block	= e1000e_check_reset_block_generic,
+	.commit			= e1000e_phy_sw_reset,
+	.force_speed_duplex	= e1000e_phy_force_speed_duplex_m88,
+	.get_cfg_done		= e1000e_get_cfg_done,
+	.get_cable_length	= e1000e_get_cable_length_m88,
+	.get_info		= e1000e_get_phy_info_m88,
+	.read_reg		= e1000e_read_phy_reg_m88,
+	.release		= e1000_put_hw_semaphore_82571,
+	.reset			= e1000e_phy_hw_reset_generic,
+	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
+	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
+	.write_reg		= e1000e_write_phy_reg_m88,
+	.cfg_on_link_up      	= NULL,
+};
+
+static const struct e1000_phy_operations e82_phy_ops_bm = {
+	.acquire		= e1000_get_hw_semaphore_82571,
+	.check_polarity		= e1000_check_polarity_m88,
+	.check_reset_block	= e1000e_check_reset_block_generic,
+	.commit			= e1000e_phy_sw_reset,
+	.force_speed_duplex	= e1000e_phy_force_speed_duplex_m88,
+	.get_cfg_done		= e1000e_get_cfg_done,
+	.get_cable_length	= e1000e_get_cable_length_m88,
+	.get_info		= e1000e_get_phy_info_m88,
+	.read_reg		= e1000e_read_phy_reg_bm2,
+	.release		= e1000_put_hw_semaphore_82571,
+	.reset			= e1000e_phy_hw_reset_generic,
+	.set_d0_lplu_state	= e1000_set_d0_lplu_state_82571,
+	.set_d3_lplu_state	= e1000e_set_d3_lplu_state,
+	.write_reg		= e1000e_write_phy_reg_bm2,
+	.cfg_on_link_up      	= NULL,
+};
+
+static const struct e1000_nvm_operations e82571_nvm_ops = {
+	.acquire		= e1000_acquire_nvm_82571,
+	.read			= e1000e_read_nvm_eerd,
+	.release		= e1000_release_nvm_82571,
+	.update			= e1000_update_nvm_checksum_82571,
+	.valid_led_default	= e1000_valid_led_default_82571,
+	.validate		= e1000_validate_nvm_checksum_82571,
+	.write			= e1000_write_nvm_82571,
+};
+
+const struct e1000_info e1000_82571_info = {
+	.mac			= e1000_82571,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_SMART_POWER_DOWN
+				  | FLAG_RESET_OVERWRITES_LAA /* errata */
+				  | FLAG_TARC_SPEED_MODE_BIT /* errata */
+				  | FLAG_APME_CHECK_PORT_B,
+	.flags2			= FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+				  | FLAG2_DMA_BURST,
+	.pba			= 38,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_igp,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82572_info = {
+	.mac			= e1000_82572,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_TARC_SPEED_MODE_BIT, /* errata */
+	.flags2			= FLAG2_DISABLE_ASPM_L1 /* errata 13 */
+				  | FLAG2_DMA_BURST,
+	.pba			= 38,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_igp,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82573_info = {
+	.mac			= e1000_82573,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_SMART_POWER_DOWN
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_SWSM_ON_LOAD,
+	.flags2			= FLAG2_DISABLE_ASPM_L1
+				  | FLAG2_DISABLE_ASPM_L0S,
+	.pba			= 20,
+	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_m88,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82574_info = {
+	.mac			= e1000_82574,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_MSIX
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_SMART_POWER_DOWN
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_CTRLEXT_ON_LOAD,
+	.flags2			  = FLAG2_CHECK_PHY_HANG
+				  | FLAG2_DISABLE_ASPM_L0S
+				  | FLAG2_NO_DISABLE_RX,
+	.pba			= 32,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_bm,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
+const struct e1000_info e1000_82583_info = {
+	.mac			= e1000_82583,
+	.flags			= FLAG_HAS_HW_VLAN_FILTER
+				  | FLAG_HAS_WOL
+				  | FLAG_APME_IN_CTRL3
+				  | FLAG_HAS_SMART_POWER_DOWN
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_HAS_CTRLEXT_ON_LOAD,
+	.flags2			= FLAG2_DISABLE_ASPM_L0S
+				  | FLAG2_NO_DISABLE_RX,
+	.pba			= 32,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_82571,
+	.mac_ops		= &e82571_mac_ops,
+	.phy_ops		= &e82_phy_ops_bm,
+	.nvm_ops		= &e82571_nvm_ops,
+};
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile
new file mode 100644
index 0000000..6a488cb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/Makefile
@@ -0,0 +1,12 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000E) += rt_e1000e.o
+
+rt_e1000e-y := \
+	82571.o \
+	80003es2lan.o \
+	ich8lan.o \
+	lib.o \
+	netdev.o \
+	param.o \
+	phy.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h
new file mode 100644
index 0000000..ffa4c02
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/defines.h
@@ -0,0 +1,852 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PHY_WAKE   0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC         E1000_WUFC_LNKC
+#define E1000_WUS_MAG          E1000_WUFC_MAG
+#define E1000_WUS_EX           E1000_WUFC_EX
+#define E1000_WUS_MC           E1000_WUFC_MC
+#define E1000_WUS_BC           E1000_WUFC_BC
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_LPCD  0x00000004     /* LCD Power Cycle Done */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Definable Pin 3 */
+#define E1000_CTRL_EXT_FORCE_SMBUS 0x00000004 /* Force SMBus mode*/
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_DMA_DYN_CLK_EN 0x00080000 /* DMA Dynamic Clock Gating */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_EIAME          0x01000000
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_LSECCK         0x00001000
+#define E1000_CTRL_EXT_PHYPDEN        0x00100000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_RXDPS_HDRSTAT_HDRSP              0x00008000
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST   0x00200000
+
+#define E1000_MANC2H_PORT_623    0x00000020 /* Port 0x26f */
+#define E1000_MANC2H_PORT_664    0x00000040 /* Port 0x298 */
+#define E1000_MDEF_PORT_623      0x00000800 /* Port 0x26f */
+#define E1000_MDEF_PORT_664      0x00000400 /* Port 0x298 */
+
+/* Receive Control */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* Rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* Rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* Rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* Rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* Rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* Rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* Rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* Rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+
+/*
+ * Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM   0x1
+#define E1000_SWFW_PHY0_SM  0x2
+#define E1000_SWFW_PHY1_SM  0x4
+#define E1000_SWFW_CSR_SM   0x8
+
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_LANPHYPC_OVERRIDE 0x00010000 /* SW control of LANPHYPC */
+#define E1000_CTRL_LANPHYPC_VALUE    0x00020000 /* SW value of LANPHYPC */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+
+/*
+ * Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion by NVM */
+#define E1000_STATUS_PHYRA      0x00000400      /* PHY Reset Asserted */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+
+/* Constants used to interpret the masked PCI-X bus speed. */
+
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+
+#define ADVERTISE_10_HALF                 0x0001
+#define ADVERTISE_10_FULL                 0x0002
+#define ADVERTISE_100_HALF                0x0004
+#define ADVERTISE_100_FULL                0x0008
+#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL               0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+				ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
+						     ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG      ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+				ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED      (ADVERTISE_10_HALF |   ADVERTISE_10_FULL)
+#define E1000_ALL_HALF_DUPLEX   (ADVERTISE_10_HALF |  ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_PHY_LED0_MODE_MASK          0x00000007
+#define E1000_PHY_LED0_IVRT               0x00000008
+#define E1000_PHY_LED0_MASK               0x0000001F
+
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+
+/* Transmit Control */
+#define E1000_TCTL_EN     0x00000002    /* enable Tx */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+
+/* Transmit Arbitration Count */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+
+/* Header split receive */
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLD_SHIFT                12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+#define MAX_JUMBO_FRAME_SIZE    0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
+#define E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE       0x00000008
+#define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
+#define E1000_EXTCNF_CTRL_GATE_PHY_CFG           0x00000080
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT          16
+
+#define E1000_PHY_CTRL_D0A_LPLU           0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU        0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE        0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS           0x00050000
+
+/* PBA constants */
+#define E1000_PBA_8K  0x0008    /* 8KB */
+#define E1000_PBA_16K 0x0010    /* 16KB */
+
+#define E1000_PBS_16K E1000_PBA_16K
+
+#define IFS_MAX       80
+#define IFS_MIN       40
+#define IFS_RATIO     4
+#define IFS_STEP      10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+#define E1000_SWSM2_LOCK        0x00000002 /* Secondary driver semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* Rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* Rx desc min. threshold (0) */
+#define E1000_ICR_RXT0          0x00000080 /* Rx timer intr (ring 0) */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXQ0          0x00100000 /* Rx Queue 0 Interrupt */
+#define E1000_ICR_RXQ1          0x00200000 /* Rx Queue 1 Interrupt */
+#define E1000_ICR_TXQ0          0x00400000 /* Tx Queue 0 Interrupt */
+#define E1000_ICR_TXQ1          0x00800000 /* Tx Queue 1 Interrupt */
+#define E1000_ICR_OTHER         0x01000000 /* Other Interrupts */
+
+/* PBA ECC Register */
+#define E1000_PBA_ECC_COUNTER_MASK  0xFFF00000 /* ECC counter mask */
+#define E1000_PBA_ECC_COUNTER_SHIFT 20         /* ECC counter shift value */
+#define E1000_PBA_ECC_CORR_EN       0x00000001 /* ECC correction enable */
+#define E1000_PBA_ECC_STAT_CLR      0x00000002 /* Clear ECC error counter */
+#define E1000_PBA_ECC_INT_EN        0x00000004 /* Enable ICR bit 5 for ECC */
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* Rx timer intr */
+#define E1000_IMS_RXQ0      E1000_ICR_RXQ0      /* Rx Queue 0 Interrupt */
+#define E1000_IMS_RXQ1      E1000_ICR_RXQ1      /* Rx Queue 1 Interrupt */
+#define E1000_IMS_TXQ0      E1000_ICR_TXQ0      /* Tx Queue 0 Interrupt */
+#define E1000_IMS_TXQ1      E1000_ICR_TXQ1      /* Tx Queue 1 Interrupt */
+#define E1000_IMS_OTHER     E1000_ICR_OTHER     /* Other Interrupts */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* Rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* Rx desc min. threshold */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of desc. still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* 802.1q VLAN Packet Size */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/*
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots.  However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES     15
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+
+/* Error Codes */
+#define E1000_ERR_NVM      1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_INVALID_ARGUMENT  16
+#define E1000_ERR_NO_SPACE          17
+#define E1000_ERR_NVM_PBA_SECTION   18
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT               50
+#define COPPER_LINK_UP_LIMIT              10
+#define PHY_AUTO_NEG_LIMIT                45
+#define PHY_FORCE_LIMIT                   20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT      10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT      10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
+#define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
+#define E1000_RXCW_C          0x20000000        /* Receive config */
+#define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
+
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+			   E1000_GCR_RXDSCW_NO_SNOOP      | \
+			   E1000_GCR_RXDSCR_NO_SNOOP      | \
+			   E1000_GCR_TXD_NO_SNOOP         | \
+			   E1000_GCR_TXDSCW_NO_SNOOP      | \
+			   E1000_GCR_TXDSCR_NO_SNOOP)
+
+/* PHY Control Register */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000       0x0040
+#define MII_CR_SPEED_100        0x2000
+#define MII_CR_SPEED_10         0x0000
+
+/* PHY Status Register */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP 100TX Full Dplx Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS     0x0001 /* LP has Auto Neg Capability */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+					/* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+					/* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+					/* 0=Automatic Master/Slave config */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL      0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Register */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+#define PHY_CONTROL_LB   0x4000 /* PHY Loopback bit */
+
+/* NVM Control */
+#define E1000_EECD_SK        0x00000001 /* NVM Clock */
+#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* NVM Data In */
+#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
+/* NVM Addressing bits based on type (0-small, 1-large) */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SEC1VAL_VALID_MASK (E1000_EECD_AUTO_RD | E1000_EECD_PRES)
+
+#define E1000_NVM_RW_REG_DATA   16   /* Offset to data in NVM read/write registers */
+#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START  1    /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES  2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
+#define NVM_ID_LED_SETTINGS        0x0004
+#define NVM_INIT_CONTROL2_REG      0x000F
+#define NVM_INIT_CONTROL3_PORT_B   0x0014
+#define NVM_INIT_3GIO_3            0x001A
+#define NVM_INIT_CONTROL3_PORT_A   0x0024
+#define NVM_CFG                    0x0012
+#define NVM_ALT_MAC_ADDR_PTR       0x0037
+#define NVM_CHECKSUM_REG           0x003F
+
+#define E1000_NVM_INIT_CTRL2_MNGM 0x6000 /* Manageability Operation Mode mask */
+
+#define E1000_NVM_CFG_DONE_PORT_0  0x40000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x80000 /* ...for second port */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK       0x3000
+#define NVM_WORD0F_PAUSE            0x1000
+#define NVM_WORD0F_ASM_DIR          0x2000
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK  0x000C
+
+/* Mask bits for fields in Word 0x03 of the EEPROM */
+#define NVM_COMPAT_LOM    0x0800
+
+/* length of string needed to store PBA number */
+#define E1000_PBANUM_LENGTH             11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM                    0xBABA
+
+/* PBA (printed board assembly) number words */
+#define NVM_PBA_OFFSET_0           8
+#define NVM_PBA_OFFSET_1           9
+#define NVM_PBA_PTR_GUARD          0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT   6
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI         0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
+			      (ID_LED_OFF1_OFF2 <<  8) | \
+			      (ID_LED_DEF1_DEF2 <<  4) | \
+			      (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCI_HEADER_TYPE_REGISTER     0x0E
+#define PCIE_LINK_STATUS             0x12
+
+#define PCI_HEADER_TYPE_MULTIFUNC    0x80
+#define PCIE_LINK_WIDTH_MASK         0x3F0
+#define PCIE_LINK_WIDTH_SHIFT        4
+
+#define PHY_REVISION_MASK      0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/*
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID    0x01410C50
+#define M88E1000_I_PHY_ID    0x01410C30
+#define M88E1011_I_PHY_ID    0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1111_I_PHY_ID    0x01410CC0
+#define GG82563_E_PHY_ID     0x01410CA0
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+#define BME1000_E_PHY_ID     0x01410CB0
+#define BME1000_E_PHY_ID_R2  0x01410CB1
+#define I82577_E_PHY_ID      0x01540050
+#define I82578_E_PHY_ID      0x004DD040
+#define I82579_E_PHY_ID      0x01540090
+#define I217_E_PHY_ID        0x015400A0
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+					       /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060
+/*
+ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold)
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX 0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+/* 0=<50M; 1=50-80M; 2=80-110M; 3=110-140M; 4=>140M */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+
+#define I82578_EPSCR_DOWNSHIFT_ENABLE          0x0020
+#define I82578_EPSCR_DOWNSHIFT_COUNTER_MASK    0x001C
+
+/* BME1000 PHY Specific Control Register */
+#define BME1000_PSCR_ENABLE_DOWNSHIFT   0x0800 /* 1 = enable downshift */
+
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+                           ((reg) & MAX_PHY_REG_ADDRESS))
+
+/*
+ * Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+	(((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+	GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_PAGE_SELECT         \
+	GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+	GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+	GG82563_REG(0, 29) /* Alternate Page Select */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+	GG82563_REG(2, 21) /* MAC Specific Control Register */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+	GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+	GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+	GG82563_REG(193, 20) /* Power Management Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_INBAND_CTRL         \
+	GG82563_REG(194, 18) /* Inband Control */
+
+/* MDI Control */
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_ERROR     0x40000000
+
+/* SerDes Control */
+#define E1000_GEN_POLL_TIMEOUT          640
+
+/* FW Semaphore */
+#define E1000_FWSM_WLOCK_MAC_MASK	0x0380
+#define E1000_FWSM_WLOCK_MAC_SHIFT	7
+
+#endif /* _E1000_DEFINES_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h
new file mode 100644
index 0000000..d6fa3d4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/e1000.h
@@ -0,0 +1,764 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include <linux/bitops.h>
+#include <linux/types.h>
+#include <linux/timer.h>
+#include <linux/workqueue.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+#include <linux/crc32.h>
+#include <linux/if_vlan.h>
+#include <linux/ethtool.h>
+
+#include <rtnet_port.h>
+
+#include "hw.h"
+
+struct e1000_info;
+
+#define e_dbg(format, arg...) \
+	pr_debug(format, ## arg)
+#define e_err(format, arg...) \
+	pr_err(format, ## arg)
+#define e_info(format, arg...) \
+	pr_info(format, ## arg)
+#define e_warn(format, arg...) \
+	pr_warn(format, ## arg)
+#define e_notice(format, arg...) \
+	pr_notice(format, ## arg)
+
+
+/* Interrupt modes, as used by the IntMode parameter */
+#define E1000E_INT_MODE_LEGACY		0
+#define E1000E_INT_MODE_MSI		1
+#define E1000E_INT_MODE_MSIX		2
+
+/* Tx/Rx descriptor defines */
+#define E1000_DEFAULT_TXD		256
+#define E1000_MAX_TXD			4096
+#define E1000_MIN_TXD			64
+
+#define E1000_DEFAULT_RXD		256
+#define E1000_MAX_RXD			4096
+#define E1000_MIN_RXD			64
+
+#define E1000_MIN_ITR_USECS		10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS		10000 /* 100    irq/sec */
+
+/* Early Receive defines */
+#define E1000_ERT_2048			0x100
+
+#define E1000_FC_PAUSE_TIME		0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE		16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES			0
+#define E1000_EEPROM_APME		0x0400
+
+#define E1000_MNG_VLAN_NONE		(-1)
+
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS			(MAX_PS_BUFFERS - 1)
+
+#define DEFAULT_JUMBO			9234
+
+/* BM/HV Specific Registers */
+#define BM_PORT_CTRL_PAGE                 769
+
+#define PHY_UPPER_SHIFT                   21
+#define BM_PHY_REG(page, reg) \
+	(((reg) & MAX_PHY_REG_ADDRESS) |\
+	 (((page) & 0xFFFF) << PHY_PAGE_SHIFT) |\
+	 (((reg) & ~MAX_PHY_REG_ADDRESS) << (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)))
+
+/* PHY Wakeup Registers and defines */
+#define BM_PORT_GEN_CFG PHY_REG(BM_PORT_CTRL_PAGE, 17)
+#define BM_RCTL         PHY_REG(BM_WUC_PAGE, 0)
+#define BM_WUC          PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC         PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS          PHY_REG(BM_WUC_PAGE, 3)
+#define BM_RAR_L(_i)    (BM_PHY_REG(BM_WUC_PAGE, 16 + ((_i) << 2)))
+#define BM_RAR_M(_i)    (BM_PHY_REG(BM_WUC_PAGE, 17 + ((_i) << 2)))
+#define BM_RAR_H(_i)    (BM_PHY_REG(BM_WUC_PAGE, 18 + ((_i) << 2)))
+#define BM_RAR_CTRL(_i) (BM_PHY_REG(BM_WUC_PAGE, 19 + ((_i) << 2)))
+#define BM_MTA(_i)      (BM_PHY_REG(BM_WUC_PAGE, 128 + ((_i) << 1)))
+
+#define BM_RCTL_UPE           0x0001          /* Unicast Promiscuous Mode */
+#define BM_RCTL_MPE           0x0002          /* Multicast Promiscuous Mode */
+#define BM_RCTL_MO_SHIFT      3               /* Multicast Offset Shift */
+#define BM_RCTL_MO_MASK       (3 << 3)        /* Multicast Offset Mask */
+#define BM_RCTL_BAM           0x0020          /* Broadcast Accept Mode */
+#define BM_RCTL_PMCF          0x0040          /* Pass MAC Control Frames */
+#define BM_RCTL_RFCE          0x0080          /* Rx Flow Control Enable */
+
+#define HV_STATS_PAGE	778
+#define HV_SCC_UPPER	PHY_REG(HV_STATS_PAGE, 16) /* Single Collision Count */
+#define HV_SCC_LOWER	PHY_REG(HV_STATS_PAGE, 17)
+#define HV_ECOL_UPPER	PHY_REG(HV_STATS_PAGE, 18) /* Excessive Coll. Count */
+#define HV_ECOL_LOWER	PHY_REG(HV_STATS_PAGE, 19)
+#define HV_MCC_UPPER	PHY_REG(HV_STATS_PAGE, 20) /* Multiple Coll. Count */
+#define HV_MCC_LOWER	PHY_REG(HV_STATS_PAGE, 21)
+#define HV_LATECOL_UPPER PHY_REG(HV_STATS_PAGE, 23) /* Late Collision Count */
+#define HV_LATECOL_LOWER PHY_REG(HV_STATS_PAGE, 24)
+#define HV_COLC_UPPER	PHY_REG(HV_STATS_PAGE, 25) /* Collision Count */
+#define HV_COLC_LOWER	PHY_REG(HV_STATS_PAGE, 26)
+#define HV_DC_UPPER	PHY_REG(HV_STATS_PAGE, 27) /* Defer Count */
+#define HV_DC_LOWER	PHY_REG(HV_STATS_PAGE, 28)
+#define HV_TNCRS_UPPER	PHY_REG(HV_STATS_PAGE, 29) /* Transmit with no CRS */
+#define HV_TNCRS_LOWER	PHY_REG(HV_STATS_PAGE, 30)
+
+#define E1000_FCRTV_PCH     0x05F40 /* PCH Flow Control Refresh Timer Value */
+
+/* BM PHY Copper Specific Status */
+#define BM_CS_STATUS                      17
+#define BM_CS_STATUS_LINK_UP              0x0400
+#define BM_CS_STATUS_RESOLVED             0x0800
+#define BM_CS_STATUS_SPEED_MASK           0xC000
+#define BM_CS_STATUS_SPEED_1000           0x8000
+
+/* 82577 Mobile Phy Status Register */
+#define HV_M_STATUS                       26
+#define HV_M_STATUS_AUTONEG_COMPLETE      0x1000
+#define HV_M_STATUS_SPEED_MASK            0x0300
+#define HV_M_STATUS_SPEED_1000            0x0200
+#define HV_M_STATUS_LINK_UP               0x0040
+
+#define E1000_ICH_FWSM_PCIM2PCI		0x01000000 /* ME PCIm-to-PCI active */
+#define E1000_ICH_FWSM_PCIM2PCI_COUNT	2000
+
+/* Time to wait before putting the device into D3 if there's no link (in ms). */
+#define LINK_TIMEOUT		100
+
+#define DEFAULT_RDTR			0
+#define DEFAULT_RADV			8
+#define BURST_RDTR			0x20
+#define BURST_RADV			0x20
+
+/*
+ * in the case of WTHRESH, it appears at least the 82571/2 hardware
+ * writes back 4 descriptors when WTHRESH=5, and 3 descriptors when
+ * WTHRESH=4, and since we want 64 bytes at a time written back, set
+ * it to 5
+ */
+#define E1000_TXDCTL_DMA_BURST_ENABLE                          \
+	(E1000_TXDCTL_GRAN | /* set descriptor granularity */  \
+	 E1000_TXDCTL_COUNT_DESC |                             \
+	 (5 << 16) | /* wthresh must be +1 more than desired */\
+	 (1 << 8)  | /* hthresh */                             \
+	 0x1f)       /* pthresh */
+
+#define E1000_RXDCTL_DMA_BURST_ENABLE                          \
+	(0x01000000 | /* set descriptor granularity */         \
+	 (4 << 16)  | /* set writeback threshold    */         \
+	 (4 << 8)   | /* set prefetch threshold     */         \
+	 0x20)        /* set hthresh                */
+
+#define E1000_TIDV_FPD (1 << 31)
+#define E1000_RDTR_FPD (1 << 31)
+
+enum e1000_boards {
+	board_82571,
+	board_82572,
+	board_82573,
+	board_82574,
+	board_82583,
+	board_80003es2lan,
+	board_ich8lan,
+	board_ich9lan,
+	board_ich10lan,
+	board_pchlan,
+	board_pch2lan,
+	board_pch_lpt,
+};
+
+struct e1000_ps_page {
+	struct page *page;
+	u64 dma; /* must be u64 - written to hw */
+};
+
+/*
+ * wrappers around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct e1000_buffer {
+	dma_addr_t dma;
+	struct rtskb *skb;
+	union {
+		/* Tx */
+		struct {
+			unsigned long time_stamp;
+			u16 length;
+			u16 next_to_watch;
+			unsigned int segs;
+			unsigned int bytecount;
+			u16 mapped_as_page;
+		};
+		/* Rx */
+		struct {
+			/* arrays of page information for packet split */
+			struct e1000_ps_page *ps_pages;
+			struct page *page;
+		};
+	};
+};
+
+struct e1000_ring {
+	void *desc;			/* pointer to ring memory  */
+	dma_addr_t dma;			/* phys address of ring    */
+	unsigned int size;		/* length of ring in bytes */
+	unsigned int count;		/* number of desc. in ring */
+
+	u16 next_to_use;
+	u16 next_to_clean;
+
+	u16 head;
+	u16 tail;
+
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+	char name[IFNAMSIZ + 5];
+	u32 ims_val;
+	u32 itr_val;
+	u16 itr_register;
+	int set_itr;
+
+	struct rtskb *rx_skb_top;
+
+	rtdm_lock_t lock;
+};
+
+/* PHY register snapshot values */
+struct e1000_phy_regs {
+	u16 bmcr;		/* basic mode control register    */
+	u16 bmsr;		/* basic mode status register     */
+	u16 advertise;		/* auto-negotiation advertisement */
+	u16 lpa;		/* link partner ability register  */
+	u16 expansion;		/* auto-negotiation expansion reg */
+	u16 ctrl1000;		/* 1000BASE-T control register    */
+	u16 stat1000;		/* 1000BASE-T status register     */
+	u16 estatus;		/* extended status register       */
+};
+
+/* board specific private data structure */
+struct e1000_adapter {
+	struct timer_list watchdog_timer;
+	struct timer_list phy_info_timer;
+	struct timer_list blink_timer;
+
+	struct work_struct reset_task;
+	struct work_struct watchdog_task;
+
+	const struct e1000_info *ei;
+
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+	u32 bd_number;
+	u32 rx_buffer_len;
+	u16 mng_vlan_id;
+	u16 link_speed;
+	u16 link_duplex;
+	u16 eeprom_vers;
+
+	/* track device up/down/testing state */
+	unsigned long state;
+
+	/* Interrupt Throttle Rate */
+	u32 itr;
+	u32 itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
+
+	/*
+	 * Tx
+	 */
+	struct e1000_ring *tx_ring /* One per active queue */
+						____cacheline_aligned_in_smp;
+
+	struct napi_struct napi;
+
+	unsigned int restart_queue;
+	u32 txd_cmd;
+
+	bool detect_tx_hung;
+	u8 tx_timeout_factor;
+
+	u32 tx_int_delay;
+	u32 tx_abs_int_delay;
+
+	unsigned int total_tx_bytes;
+	unsigned int total_tx_packets;
+	unsigned int total_rx_bytes;
+	unsigned int total_rx_packets;
+
+	/* Tx stats */
+	u64 tpt_old;
+	u64 colc_old;
+	u32 gotc;
+	u64 gotc_old;
+	u32 tx_timeout_count;
+	u32 tx_fifo_head;
+	u32 tx_head_addr;
+	u32 tx_fifo_size;
+	u32 tx_dma_failed;
+
+	/*
+	 * Rx
+	 */
+	bool (*clean_rx) (struct e1000_adapter *adapter,
+			  nanosecs_abs_t *time_stamp)
+						____cacheline_aligned_in_smp;
+	void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+			      int cleaned_count, gfp_t gfp);
+	struct e1000_ring *rx_ring;
+
+	u32 rx_int_delay;
+	u32 rx_abs_int_delay;
+
+	/* Rx stats */
+	u64 hw_csum_err;
+	u64 hw_csum_good;
+	u64 rx_hdr_split;
+	u32 gorc;
+	u64 gorc_old;
+	u32 alloc_rx_buff_failed;
+	u32 rx_dma_failed;
+
+	unsigned int rx_ps_pages;
+	u16 rx_ps_bsize0;
+	u32 max_frame_size;
+	u32 min_frame_size;
+
+	/* OS defined structs */
+	struct rtnet_device *netdev;
+	struct pci_dev *pdev;
+
+	rtdm_irq_t irq_handle;
+	rtdm_irq_t rx_irq_handle;
+	rtdm_irq_t tx_irq_handle;
+	rtdm_nrtsig_t mod_timer_sig;
+	rtdm_nrtsig_t downshift_sig;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+
+	spinlock_t stats64_lock;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+	struct e1000_phy_stats phy_stats;
+
+	/* Snapshot of PHY registers */
+	struct e1000_phy_regs phy_regs;
+
+	struct e1000_ring test_tx_ring;
+	struct e1000_ring test_rx_ring;
+	u32 test_icr;
+
+	u32 msg_enable;
+	unsigned int num_vectors;
+	struct msix_entry *msix_entries;
+	int int_mode;
+	u32 eiac_mask;
+
+	u32 eeprom_wol;
+	u32 wol;
+	u32 pba;
+	u32 max_hw_frame_size;
+
+	bool fc_autoneg;
+
+	unsigned int flags;
+	unsigned int flags2;
+	struct work_struct downshift_task;
+	struct work_struct update_phy_task;
+	struct work_struct print_hang_task;
+
+	bool idle_check;
+	int phy_hang_count;
+};
+
+struct e1000_info {
+	enum e1000_mac_type	mac;
+	unsigned int		flags;
+	unsigned int		flags2;
+	u32			pba;
+	u32			max_hw_frame_size;
+	s32			(*get_variants)(struct e1000_adapter *);
+	const struct e1000_mac_operations *mac_ops;
+	const struct e1000_phy_operations *phy_ops;
+	const struct e1000_nvm_operations *nvm_ops;
+};
+
+/* hardware capability, feature, and workaround flags */
+#define FLAG_HAS_AMT                      (1 << 0)
+#define FLAG_HAS_FLASH                    (1 << 1)
+#define FLAG_HAS_HW_VLAN_FILTER           (1 << 2)
+#define FLAG_HAS_WOL                      (1 << 3)
+#define FLAG_HAS_ERT                      (1 << 4)
+#define FLAG_HAS_CTRLEXT_ON_LOAD          (1 << 5)
+#define FLAG_HAS_SWSM_ON_LOAD             (1 << 6)
+#define FLAG_HAS_JUMBO_FRAMES             (1 << 7)
+#define FLAG_READ_ONLY_NVM                (1 << 8)
+#define FLAG_IS_ICH                       (1 << 9)
+#define FLAG_HAS_MSIX                     (1 << 10)
+#define FLAG_HAS_SMART_POWER_DOWN         (1 << 11)
+#define FLAG_IS_QUAD_PORT_A               (1 << 12)
+#define FLAG_IS_QUAD_PORT                 (1 << 13)
+#define FLAG_TIPG_MEDIUM_FOR_80003ESLAN   (1 << 14)
+#define FLAG_APME_IN_WUC                  (1 << 15)
+#define FLAG_APME_IN_CTRL3                (1 << 16)
+#define FLAG_APME_CHECK_PORT_B            (1 << 17)
+#define FLAG_DISABLE_FC_PAUSE_TIME        (1 << 18)
+#define FLAG_NO_WAKE_UCAST                (1 << 19)
+#define FLAG_MNG_PT_ENABLED               (1 << 20)
+#define FLAG_RESET_OVERWRITES_LAA         (1 << 21)
+#define FLAG_TARC_SPEED_MODE_BIT          (1 << 22)
+#define FLAG_TARC_SET_BIT_ZERO            (1 << 23)
+#define FLAG_RX_NEEDS_RESTART             (1 << 24)
+#define FLAG_LSC_GIG_SPEED_DROP           (1 << 25)
+#define FLAG_SMART_POWER_DOWN             (1 << 26)
+#define FLAG_MSI_ENABLED                  (1 << 27)
+/* reserved (1 << 28) */
+#define FLAG_TSO_FORCE                    (1 << 29)
+#define FLAG_RX_RESTART_NOW               (1 << 30)
+#define FLAG_MSI_TEST_FAILED              (1 << 31)
+
+#define FLAG2_CRC_STRIPPING               (1 << 0)
+#define FLAG2_HAS_PHY_WAKEUP              (1 << 1)
+#define FLAG2_IS_DISCARDING               (1 << 2)
+#define FLAG2_DISABLE_ASPM_L1             (1 << 3)
+#define FLAG2_HAS_PHY_STATS               (1 << 4)
+#define FLAG2_HAS_EEE                     (1 << 5)
+#define FLAG2_DMA_BURST                   (1 << 6)
+#define FLAG2_DISABLE_ASPM_L0S            (1 << 7)
+#define FLAG2_DISABLE_AIM                 (1 << 8)
+#define FLAG2_CHECK_PHY_HANG              (1 << 9)
+#define FLAG2_NO_DISABLE_RX               (1 << 10)
+#define FLAG2_PCIM2PCI_ARBITER_WA         (1 << 11)
+
+#define E1000_RX_DESC_PS(R, i)	    \
+	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+#define E1000_RX_DESC_EXT(R, i)	    \
+	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
+#define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i)	E1000_GET_DESC(R, i, e1000_context_desc)
+
+enum e1000_state_t {
+	__E1000_TESTING,
+	__E1000_RESETTING,
+	__E1000_ACCESS_SHARED_RESOURCE,
+	__E1000_DOWN
+};
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+extern char e1000e_driver_name[];
+extern const char e1000e_driver_version[];
+
+extern void e1000e_check_options(struct e1000_adapter *adapter);
+extern void e1000e_set_ethtool_ops(struct net_device *netdev);
+
+extern int e1000e_up(struct e1000_adapter *adapter);
+extern void e1000e_down(struct e1000_adapter *adapter);
+extern void e1000e_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000e_reset(struct e1000_adapter *adapter);
+extern void e1000e_power_up_phy(struct e1000_adapter *adapter);
+extern int e1000e_setup_rx_resources(struct e1000_adapter *adapter);
+extern int e1000e_setup_tx_resources(struct e1000_adapter *adapter);
+extern void e1000e_free_rx_resources(struct e1000_adapter *adapter);
+extern void e1000e_free_tx_resources(struct e1000_adapter *adapter);
+extern struct rtnl_link_stats64 *e1000e_get_stats64(struct net_device *netdev,
+						    struct rtnl_link_stats64
+						    *stats);
+extern void e1000e_set_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter);
+extern void e1000e_get_hw_control(struct e1000_adapter *adapter);
+extern void e1000e_release_hw_control(struct e1000_adapter *adapter);
+
+extern unsigned int copybreak;
+
+extern char *e1000e_get_hw_dev_name(struct e1000_hw *hw);
+
+extern const struct e1000_info e1000_82571_info;
+extern const struct e1000_info e1000_82572_info;
+extern const struct e1000_info e1000_82573_info;
+extern const struct e1000_info e1000_82574_info;
+extern const struct e1000_info e1000_82583_info;
+extern const struct e1000_info e1000_ich8_info;
+extern const struct e1000_info e1000_ich9_info;
+extern const struct e1000_info e1000_ich10_info;
+extern const struct e1000_info e1000_pch_info;
+extern const struct e1000_info e1000_pch2_info;
+extern const struct e1000_info e1000_pch_lpt_info;
+extern const struct e1000_info e1000_es2_info;
+
+extern s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+					 u32 pba_num_size);
+
+extern s32  e1000e_commit_phy(struct e1000_hw *hw);
+
+extern bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw);
+
+extern bool e1000e_get_laa_state_82571(struct e1000_hw *hw);
+extern void e1000e_set_laa_state_82571(struct e1000_hw *hw, bool state);
+
+extern void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw);
+extern void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+						 bool state);
+extern void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+extern void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+extern void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw);
+extern void e1000_resume_workarounds_pchlan(struct e1000_hw *hw);
+extern s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable);
+extern s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable);
+extern void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw);
+
+extern s32 e1000e_check_for_copper_link(struct e1000_hw *hw);
+extern s32 e1000e_check_for_fiber_link(struct e1000_hw *hw);
+extern s32 e1000e_check_for_serdes_link(struct e1000_hw *hw);
+extern s32 e1000e_setup_led_generic(struct e1000_hw *hw);
+extern s32 e1000e_cleanup_led_generic(struct e1000_hw *hw);
+extern s32 e1000e_led_on_generic(struct e1000_hw *hw);
+extern s32 e1000e_led_off_generic(struct e1000_hw *hw);
+extern s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw);
+extern void e1000_set_lan_id_single_port(struct e1000_hw *hw);
+extern s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+extern s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex);
+extern s32 e1000e_disable_pcie_master(struct e1000_hw *hw);
+extern s32 e1000e_get_auto_rd_done(struct e1000_hw *hw);
+extern s32 e1000e_id_led_init(struct e1000_hw *hw);
+extern void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw);
+extern s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw);
+extern s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw);
+extern s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw);
+extern s32 e1000e_setup_link(struct e1000_hw *hw);
+extern void e1000_clear_vfta_generic(struct e1000_hw *hw);
+extern void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+extern void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+					       u8 *mc_addr_list,
+					       u32 mc_addr_count);
+extern void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+extern s32 e1000e_set_fc_watermarks(struct e1000_hw *hw);
+extern void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop);
+extern s32 e1000e_get_hw_semaphore(struct e1000_hw *hw);
+extern s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data);
+extern void e1000e_config_collision_dist(struct e1000_hw *hw);
+extern s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw);
+extern s32 e1000e_force_mac_fc(struct e1000_hw *hw);
+extern s32 e1000e_blink_led_generic(struct e1000_hw *hw);
+extern void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+extern s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+extern void e1000e_reset_adaptive(struct e1000_hw *hw);
+extern void e1000e_update_adaptive(struct e1000_hw *hw);
+
+extern s32 e1000e_setup_copper_link(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_id(struct e1000_hw *hw);
+extern void e1000e_put_hw_semaphore(struct e1000_hw *hw);
+extern s32 e1000e_check_reset_block_generic(struct e1000_hw *hw);
+extern s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+extern s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_info_igp(struct e1000_hw *hw);
+extern s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page);
+extern s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+					  u16 *data);
+extern s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw);
+extern s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+extern s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset,
+					   u16 data);
+extern s32 e1000e_phy_sw_reset(struct e1000_hw *hw);
+extern s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+extern s32 e1000e_get_cfg_done(struct e1000_hw *hw);
+extern s32 e1000e_get_cable_length_m88(struct e1000_hw *hw);
+extern s32 e1000e_get_phy_info_m88(struct e1000_hw *hw);
+extern s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw);
+extern enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id);
+extern s32 e1000e_determine_phy_address(struct e1000_hw *hw);
+extern s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+						 u16 *phy_reg);
+extern s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw,
+						  u16 *phy_reg);
+extern s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data);
+extern void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+extern s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+					u16 data);
+extern s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset,
+				       u16 *data);
+extern s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+			       u32 usec_interval, bool *success);
+extern s32 e1000e_phy_reset_dsp(struct e1000_hw *hw);
+extern void e1000_power_up_phy_copper(struct e1000_hw *hw);
+extern void e1000_power_down_phy_copper(struct e1000_hw *hw);
+extern s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000e_check_downshift(struct e1000_hw *hw);
+extern s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data);
+extern s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+					u16 *data);
+extern s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+				      u16 *data);
+extern s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data);
+extern s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset,
+					 u16 data);
+extern s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset,
+				       u16 data);
+extern s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw);
+extern s32 e1000_copper_link_setup_82577(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_82577(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_82577(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw);
+extern s32 e1000_get_cable_length_82577(struct e1000_hw *hw);
+
+extern s32 e1000_check_polarity_m88(struct e1000_hw *hw);
+extern s32 e1000_get_phy_info_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_ife(struct e1000_hw *hw);
+extern s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw);
+extern s32 e1000_check_polarity_igp(struct e1000_hw *hw);
+extern bool e1000_check_phy_82574(struct e1000_hw *hw);
+
+static inline s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+	return hw->phy.ops.reset(hw);
+}
+
+static inline s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+	return hw->phy.ops.check_reset_block(hw);
+}
+
+static inline s32 e1e_rphy(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return hw->phy.ops.read_reg(hw, offset, data);
+}
+
+static inline s32 e1e_rphy_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return hw->phy.ops.read_reg_locked(hw, offset, data);
+}
+
+static inline s32 e1e_wphy(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return hw->phy.ops.write_reg(hw, offset, data);
+}
+
+static inline s32 e1e_wphy_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return hw->phy.ops.write_reg_locked(hw, offset, data);
+}
+
+static inline s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+	return hw->phy.ops.get_cable_length(hw);
+}
+
+extern s32 e1000e_acquire_nvm(struct e1000_hw *hw);
+extern s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+extern s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw);
+extern s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+extern s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+extern s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw);
+extern void e1000e_release_nvm(struct e1000_hw *hw);
+extern void e1000e_reload_nvm(struct e1000_hw *hw);
+extern s32 e1000_read_mac_addr_generic(struct e1000_hw *hw);
+
+static inline s32 e1000e_read_mac_addr(struct e1000_hw *hw)
+{
+	if (hw->mac.ops.read_mac_addr)
+		return hw->mac.ops.read_mac_addr(hw);
+
+	return e1000_read_mac_addr_generic(hw);
+}
+
+static inline s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+	return hw->nvm.ops.validate(hw);
+}
+
+static inline s32 e1000e_update_nvm_checksum(struct e1000_hw *hw)
+{
+	return hw->nvm.ops.update(hw);
+}
+
+static inline s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	return hw->nvm.ops.read(hw, offset, words, data);
+}
+
+static inline s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	return hw->nvm.ops.write(hw, offset, words, data);
+}
+
+static inline s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+	return hw->phy.ops.get_info(hw);
+}
+
+static inline s32 e1000e_check_mng_mode(struct e1000_hw *hw)
+{
+	return hw->mac.ops.check_mng_mode(hw);
+}
+
+extern bool e1000e_check_mng_mode_generic(struct e1000_hw *hw);
+extern bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw);
+extern s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length);
+
+static inline u32 __er32(struct e1000_hw *hw, unsigned long reg)
+{
+	return readl(hw->hw_addr + reg);
+}
+
+static inline void __ew32(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+	writel(val, hw->hw_addr + reg);
+}
+
+#endif /* _E1000_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h
new file mode 100644
index 0000000..247f79e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/hw.h
@@ -0,0 +1,997 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include <linux/types.h>
+
+struct e1000_hw;
+struct e1000_adapter;
+
+#include "defines.h"
+
+#define er32(reg)	__er32(hw, E1000_##reg)
+#define ew32(reg,val)	__ew32(hw, E1000_##reg, (val))
+#define e1e_flush()	er32(STATUS)
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) \
+	(writel((value), ((a)->hw_addr + reg + ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) \
+	(readl((a)->hw_addr + reg + ((offset) << 2)))
+
+enum e1e_registers {
+	E1000_CTRL     = 0x00000, /* Device Control - RW */
+	E1000_STATUS   = 0x00008, /* Device Status - RO */
+	E1000_EECD     = 0x00010, /* EEPROM/Flash Control - RW */
+	E1000_EERD     = 0x00014, /* EEPROM Read - RW */
+	E1000_CTRL_EXT = 0x00018, /* Extended Device Control - RW */
+	E1000_FLA      = 0x0001C, /* Flash Access - RW */
+	E1000_MDIC     = 0x00020, /* MDI Control - RW */
+	E1000_SCTL     = 0x00024, /* SerDes Control - RW */
+	E1000_FCAL     = 0x00028, /* Flow Control Address Low - RW */
+	E1000_FCAH     = 0x0002C, /* Flow Control Address High -RW */
+	E1000_FEXTNVM4 = 0x00024, /* Future Extended NVM 4 - RW */
+	E1000_FEXTNVM  = 0x00028, /* Future Extended NVM - RW */
+	E1000_FCT      = 0x00030, /* Flow Control Type - RW */
+	E1000_VET      = 0x00038, /* VLAN Ether Type - RW */
+	E1000_ICR      = 0x000C0, /* Interrupt Cause Read - R/clr */
+	E1000_ITR      = 0x000C4, /* Interrupt Throttling Rate - RW */
+	E1000_ICS      = 0x000C8, /* Interrupt Cause Set - WO */
+	E1000_IMS      = 0x000D0, /* Interrupt Mask Set - RW */
+	E1000_IMC      = 0x000D8, /* Interrupt Mask Clear - WO */
+	E1000_EIAC_82574 = 0x000DC, /* Ext. Interrupt Auto Clear - RW */
+	E1000_IAM      = 0x000E0, /* Interrupt Acknowledge Auto Mask */
+	E1000_IVAR     = 0x000E4, /* Interrupt Vector Allocation - RW */
+	E1000_EITR_82574_BASE = 0x000E8, /* Interrupt Throttling - RW */
+#define E1000_EITR_82574(_n) (E1000_EITR_82574_BASE + (_n << 2))
+	E1000_RCTL     = 0x00100, /* Rx Control - RW */
+	E1000_FCTTV    = 0x00170, /* Flow Control Transmit Timer Value - RW */
+	E1000_TXCW     = 0x00178, /* Tx Configuration Word - RW */
+	E1000_RXCW     = 0x00180, /* Rx Configuration Word - RO */
+	E1000_TCTL     = 0x00400, /* Tx Control - RW */
+	E1000_TCTL_EXT = 0x00404, /* Extended Tx Control - RW */
+	E1000_TIPG     = 0x00410, /* Tx Inter-packet gap -RW */
+	E1000_AIT      = 0x00458, /* Adaptive Interframe Spacing Throttle -RW */
+	E1000_LEDCTL   = 0x00E00, /* LED Control - RW */
+	E1000_EXTCNF_CTRL  = 0x00F00, /* Extended Configuration Control */
+	E1000_EXTCNF_SIZE  = 0x00F08, /* Extended Configuration Size */
+	E1000_PHY_CTRL     = 0x00F10, /* PHY Control Register in CSR */
+#define E1000_POEMB	E1000_PHY_CTRL	/* PHY OEM Bits */
+	E1000_PBA      = 0x01000, /* Packet Buffer Allocation - RW */
+	E1000_PBS      = 0x01008, /* Packet Buffer Size */
+	E1000_EEMNGCTL = 0x01010, /* MNG EEprom Control */
+	E1000_EEWR     = 0x0102C, /* EEPROM Write Register - RW */
+	E1000_FLOP     = 0x0103C, /* FLASH Opcode Register */
+	E1000_PBA_ECC  = 0x01100, /* PBA ECC Register */
+	E1000_ERT      = 0x02008, /* Early Rx Threshold - RW */
+	E1000_FCRTL    = 0x02160, /* Flow Control Receive Threshold Low - RW */
+	E1000_FCRTH    = 0x02168, /* Flow Control Receive Threshold High - RW */
+	E1000_PSRCTL   = 0x02170, /* Packet Split Receive Control - RW */
+	E1000_RDBAL    = 0x02800, /* Rx Descriptor Base Address Low - RW */
+	E1000_RDBAH    = 0x02804, /* Rx Descriptor Base Address High - RW */
+	E1000_RDLEN    = 0x02808, /* Rx Descriptor Length - RW */
+	E1000_RDH      = 0x02810, /* Rx Descriptor Head - RW */
+	E1000_RDT      = 0x02818, /* Rx Descriptor Tail - RW */
+	E1000_RDTR     = 0x02820, /* Rx Delay Timer - RW */
+	E1000_RXDCTL_BASE = 0x02828, /* Rx Descriptor Control - RW */
+#define E1000_RXDCTL(_n)   (E1000_RXDCTL_BASE + (_n << 8))
+	E1000_RADV     = 0x0282C, /* Rx Interrupt Absolute Delay Timer - RW */
+
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ *
+ */
+#define E1000_RDBAL_REG(_n)   (E1000_RDBAL + (_n << 8))
+	E1000_KABGTXD  = 0x03004, /* AFE Band Gap Transmit Ref Data */
+	E1000_TDBAL    = 0x03800, /* Tx Descriptor Base Address Low - RW */
+	E1000_TDBAH    = 0x03804, /* Tx Descriptor Base Address High - RW */
+	E1000_TDLEN    = 0x03808, /* Tx Descriptor Length - RW */
+	E1000_TDH      = 0x03810, /* Tx Descriptor Head - RW */
+	E1000_TDT      = 0x03818, /* Tx Descriptor Tail - RW */
+	E1000_TIDV     = 0x03820, /* Tx Interrupt Delay Value - RW */
+	E1000_TXDCTL_BASE = 0x03828, /* Tx Descriptor Control - RW */
+#define E1000_TXDCTL(_n)   (E1000_TXDCTL_BASE + (_n << 8))
+	E1000_TADV     = 0x0382C, /* Tx Interrupt Absolute Delay Val - RW */
+	E1000_TARC_BASE = 0x03840, /* Tx Arbitration Count (0) */
+#define E1000_TARC(_n)   (E1000_TARC_BASE + (_n << 8))
+	E1000_CRCERRS  = 0x04000, /* CRC Error Count - R/clr */
+	E1000_ALGNERRC = 0x04004, /* Alignment Error Count - R/clr */
+	E1000_SYMERRS  = 0x04008, /* Symbol Error Count - R/clr */
+	E1000_RXERRC   = 0x0400C, /* Receive Error Count - R/clr */
+	E1000_MPC      = 0x04010, /* Missed Packet Count - R/clr */
+	E1000_SCC      = 0x04014, /* Single Collision Count - R/clr */
+	E1000_ECOL     = 0x04018, /* Excessive Collision Count - R/clr */
+	E1000_MCC      = 0x0401C, /* Multiple Collision Count - R/clr */
+	E1000_LATECOL  = 0x04020, /* Late Collision Count - R/clr */
+	E1000_COLC     = 0x04028, /* Collision Count - R/clr */
+	E1000_DC       = 0x04030, /* Defer Count - R/clr */
+	E1000_TNCRS    = 0x04034, /* Tx-No CRS - R/clr */
+	E1000_SEC      = 0x04038, /* Sequence Error Count - R/clr */
+	E1000_CEXTERR  = 0x0403C, /* Carrier Extension Error Count - R/clr */
+	E1000_RLEC     = 0x04040, /* Receive Length Error Count - R/clr */
+	E1000_XONRXC   = 0x04048, /* XON Rx Count - R/clr */
+	E1000_XONTXC   = 0x0404C, /* XON Tx Count - R/clr */
+	E1000_XOFFRXC  = 0x04050, /* XOFF Rx Count - R/clr */
+	E1000_XOFFTXC  = 0x04054, /* XOFF Tx Count - R/clr */
+	E1000_FCRUC    = 0x04058, /* Flow Control Rx Unsupported Count- R/clr */
+	E1000_PRC64    = 0x0405C, /* Packets Rx (64 bytes) - R/clr */
+	E1000_PRC127   = 0x04060, /* Packets Rx (65-127 bytes) - R/clr */
+	E1000_PRC255   = 0x04064, /* Packets Rx (128-255 bytes) - R/clr */
+	E1000_PRC511   = 0x04068, /* Packets Rx (255-511 bytes) - R/clr */
+	E1000_PRC1023  = 0x0406C, /* Packets Rx (512-1023 bytes) - R/clr */
+	E1000_PRC1522  = 0x04070, /* Packets Rx (1024-1522 bytes) - R/clr */
+	E1000_GPRC     = 0x04074, /* Good Packets Rx Count - R/clr */
+	E1000_BPRC     = 0x04078, /* Broadcast Packets Rx Count - R/clr */
+	E1000_MPRC     = 0x0407C, /* Multicast Packets Rx Count - R/clr */
+	E1000_GPTC     = 0x04080, /* Good Packets Tx Count - R/clr */
+	E1000_GORCL    = 0x04088, /* Good Octets Rx Count Low - R/clr */
+	E1000_GORCH    = 0x0408C, /* Good Octets Rx Count High - R/clr */
+	E1000_GOTCL    = 0x04090, /* Good Octets Tx Count Low - R/clr */
+	E1000_GOTCH    = 0x04094, /* Good Octets Tx Count High - R/clr */
+	E1000_RNBC     = 0x040A0, /* Rx No Buffers Count - R/clr */
+	E1000_RUC      = 0x040A4, /* Rx Undersize Count - R/clr */
+	E1000_RFC      = 0x040A8, /* Rx Fragment Count - R/clr */
+	E1000_ROC      = 0x040AC, /* Rx Oversize Count - R/clr */
+	E1000_RJC      = 0x040B0, /* Rx Jabber Count - R/clr */
+	E1000_MGTPRC   = 0x040B4, /* Management Packets Rx Count - R/clr */
+	E1000_MGTPDC   = 0x040B8, /* Management Packets Dropped Count - R/clr */
+	E1000_MGTPTC   = 0x040BC, /* Management Packets Tx Count - R/clr */
+	E1000_TORL     = 0x040C0, /* Total Octets Rx Low - R/clr */
+	E1000_TORH     = 0x040C4, /* Total Octets Rx High - R/clr */
+	E1000_TOTL     = 0x040C8, /* Total Octets Tx Low - R/clr */
+	E1000_TOTH     = 0x040CC, /* Total Octets Tx High - R/clr */
+	E1000_TPR      = 0x040D0, /* Total Packets Rx - R/clr */
+	E1000_TPT      = 0x040D4, /* Total Packets Tx - R/clr */
+	E1000_PTC64    = 0x040D8, /* Packets Tx (64 bytes) - R/clr */
+	E1000_PTC127   = 0x040DC, /* Packets Tx (65-127 bytes) - R/clr */
+	E1000_PTC255   = 0x040E0, /* Packets Tx (128-255 bytes) - R/clr */
+	E1000_PTC511   = 0x040E4, /* Packets Tx (256-511 bytes) - R/clr */
+	E1000_PTC1023  = 0x040E8, /* Packets Tx (512-1023 bytes) - R/clr */
+	E1000_PTC1522  = 0x040EC, /* Packets Tx (1024-1522 Bytes) - R/clr */
+	E1000_MPTC     = 0x040F0, /* Multicast Packets Tx Count - R/clr */
+	E1000_BPTC     = 0x040F4, /* Broadcast Packets Tx Count - R/clr */
+	E1000_TSCTC    = 0x040F8, /* TCP Segmentation Context Tx - R/clr */
+	E1000_TSCTFC   = 0x040FC, /* TCP Segmentation Context Tx Fail - R/clr */
+	E1000_IAC      = 0x04100, /* Interrupt Assertion Count */
+	E1000_ICRXPTC  = 0x04104, /* Irq Cause Rx Packet Timer Expire Count */
+	E1000_ICRXATC  = 0x04108, /* Irq Cause Rx Abs Timer Expire Count */
+	E1000_ICTXPTC  = 0x0410C, /* Irq Cause Tx Packet Timer Expire Count */
+	E1000_ICTXATC  = 0x04110, /* Irq Cause Tx Abs Timer Expire Count */
+	E1000_ICTXQEC  = 0x04118, /* Irq Cause Tx Queue Empty Count */
+	E1000_ICTXQMTC = 0x0411C, /* Irq Cause Tx Queue MinThreshold Count */
+	E1000_ICRXDMTC = 0x04120, /* Irq Cause Rx Desc MinThreshold Count */
+	E1000_ICRXOC   = 0x04124, /* Irq Cause Receiver Overrun Count */
+	E1000_RXCSUM   = 0x05000, /* Rx Checksum Control - RW */
+	E1000_RFCTL    = 0x05008, /* Receive Filter Control */
+	E1000_MTA      = 0x05200, /* Multicast Table Array - RW Array */
+	E1000_RAL_BASE = 0x05400, /* Receive Address Low - RW */
+#define E1000_RAL(_n)   (E1000_RAL_BASE + ((_n) * 8))
+#define E1000_RA        (E1000_RAL(0))
+	E1000_RAH_BASE = 0x05404, /* Receive Address High - RW */
+#define E1000_RAH(_n)   (E1000_RAH_BASE + ((_n) * 8))
+	E1000_SHRAL_PCH_LPT_BASE = 0x05408,
+#define E1000_SHRAL_PCH_LPT(_n)   (E1000_SHRAL_PCH_LPT_BASE + ((_n) * 8))
+	E1000_SHRAH_PCH_LTP_BASE = 0x0540C,
+#define E1000_SHRAH_PCH_LPT(_n)   (E1000_SHRAH_PCH_LTP_BASE + ((_n) * 8))
+	E1000_VFTA     = 0x05600, /* VLAN Filter Table Array - RW Array */
+	E1000_WUC      = 0x05800, /* Wakeup Control - RW */
+	E1000_WUFC     = 0x05808, /* Wakeup Filter Control - RW */
+	E1000_WUS      = 0x05810, /* Wakeup Status - RO */
+	E1000_MANC     = 0x05820, /* Management Control - RW */
+	E1000_FFLT     = 0x05F00, /* Flexible Filter Length Table - RW Array */
+	E1000_HOST_IF  = 0x08800, /* Host Interface */
+
+	E1000_KMRNCTRLSTA = 0x00034, /* MAC-PHY interface - RW */
+	E1000_MANC2H    = 0x05860, /* Management Control To Host - RW */
+	E1000_MDEF_BASE = 0x05890, /* Management Decision Filters */
+#define E1000_MDEF(_n)   (E1000_MDEF_BASE + ((_n) * 4))
+	E1000_SW_FW_SYNC = 0x05B5C, /* Software-Firmware Synchronization - RW */
+	E1000_GCR	= 0x05B00, /* PCI-Ex Control */
+	E1000_GCR2      = 0x05B64, /* PCI-Ex Control #2 */
+	E1000_FACTPS    = 0x05B30, /* Function Active and Power State to MNG */
+	E1000_SWSM      = 0x05B50, /* SW Semaphore */
+	E1000_FWSM      = 0x05B54, /* FW Semaphore */
+	E1000_SWSM2     = 0x05B58, /* Driver-only SW semaphore */
+	E1000_FFLT_DBG  = 0x05F04, /* Debug Register */
+	E1000_PCH_RAICC_BASE = 0x05F50, /* Receive Address Initial CRC */
+#define E1000_PCH_RAICC(_n)	(E1000_PCH_RAICC_BASE + ((_n) * 4))
+#define E1000_CRC_OFFSET	E1000_PCH_RAICC_BASE
+	E1000_HICR      = 0x08F00, /* Host Interface Control */
+};
+
+#define E1000_MAX_PHY_ADDR		4
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG	0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS	0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL	0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH	0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT	0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT	0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT		22   /* Page Select for BM */
+#define IGP_PAGE_SHIFT			5
+#define PHY_REG_MASK			0x1F
+
+#define BM_WUC_PAGE			800
+#define BM_WUC_ADDRESS_OPCODE		0x11
+#define BM_WUC_DATA_OPCODE		0x12
+#define BM_WUC_ENABLE_PAGE		769
+#define BM_WUC_ENABLE_REG		17
+#define BM_WUC_ENABLE_BIT		(1 << 2)
+#define BM_WUC_HOST_WU_BIT		(1 << 4)
+#define BM_WUC_ME_WU_BIT		(1 << 5)
+
+#define BM_WUC	PHY_REG(BM_WUC_PAGE, 1)
+#define BM_WUFC PHY_REG(BM_WUC_PAGE, 2)
+#define BM_WUS	PHY_REG(BM_WUC_PAGE, 3)
+
+#define IGP01E1000_PHY_PCS_INIT_REG	0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK	0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX	0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX	0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED	0x0080
+
+#define IGP02E1000_PM_SPD		0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU		0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU		0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE	0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED	0x0002
+#define IGP01E1000_PSSR_MDIX			0x0800
+#define IGP01E1000_PSSR_SPEED_MASK		0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS		0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM		4
+#define IGP02E1000_PHY_AGC_A			0x11B1
+#define IGP02E1000_PHY_AGC_B			0x12B1
+#define IGP02E1000_PHY_AGC_C			0x14B1
+#define IGP02E1000_PHY_AGC_D			0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT	9 /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK	0x7F
+#define IGP02E1000_AGC_RANGE		15
+
+/* manage.c */
+#define E1000_VFTA_ENTRY_SHIFT		5
+#define E1000_VFTA_ENTRY_MASK		0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK	0x1F
+
+#define E1000_HICR_EN			0x01  /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C			0x02
+#define E1000_HICR_FW_RESET_ENABLE	0x40
+#define E1000_HICR_FW_RESET		0x80
+
+#define E1000_FWSM_MODE_MASK		0xE
+#define E1000_FWSM_MODE_SHIFT		1
+
+#define E1000_MNG_IAMT_MODE		0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH	0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET	0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT	10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD	64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING	0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN	0x2
+
+/* nvm.c */
+#define E1000_STM_OPCODE  0xDB00
+
+#define E1000_KMRNCTRLSTA_OFFSET	0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT	16
+#define E1000_KMRNCTRLSTA_REN		0x00200000
+#define E1000_KMRNCTRLSTA_CTRL_OFFSET	0x1    /* Kumeran Control */
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET	0x3    /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_TIMEOUTS	0x4    /* Kumeran Timeouts */
+#define E1000_KMRNCTRLSTA_INBAND_PARAM	0x9    /* Kumeran InBand Parameters */
+#define E1000_KMRNCTRLSTA_IBIST_DISABLE	0x0200 /* Kumeran IBIST Disable */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK	0x1000 /* Nearend Loopback mode */
+#define E1000_KMRNCTRLSTA_K1_CONFIG	0x7
+#define E1000_KMRNCTRLSTA_K1_ENABLE	0x0002
+#define E1000_KMRNCTRLSTA_HD_CTRL	0x10   /* Kumeran HD Control */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL	0x10
+#define IFE_PHY_SPECIAL_CONTROL		0x11 /* 100BaseTx PHY Special Control */
+#define IFE_PHY_SPECIAL_CONTROL_LED	0x1B /* PHY Special and LED Control */
+#define IFE_PHY_MDIX_CONTROL		0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED	0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE		0x0010
+#define IFE_PSC_FORCE_POLARITY			0x0020
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE		0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF		0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON		0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS	0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX	0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX	0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
+
+#define E1000_CABLE_LENGTH_UNDEFINED	0xFF
+
+#define E1000_DEV_ID_82571EB_COPPER		0x105E
+#define E1000_DEV_ID_82571EB_FIBER		0x105F
+#define E1000_DEV_ID_82571EB_SERDES		0x1060
+#define E1000_DEV_ID_82571EB_QUAD_COPPER	0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER	0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER		0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP	0x10BC
+#define E1000_DEV_ID_82571EB_SERDES_DUAL	0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD	0x10DA
+#define E1000_DEV_ID_82572EI_COPPER		0x107D
+#define E1000_DEV_ID_82572EI_FIBER		0x107E
+#define E1000_DEV_ID_82572EI_SERDES		0x107F
+#define E1000_DEV_ID_82572EI			0x10B9
+#define E1000_DEV_ID_82573E			0x108B
+#define E1000_DEV_ID_82573E_IAMT		0x108C
+#define E1000_DEV_ID_82573L			0x109A
+#define E1000_DEV_ID_82574L			0x10D3
+#define E1000_DEV_ID_82574LA			0x10F6
+#define E1000_DEV_ID_82583V                     0x150C
+
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT	0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT	0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT	0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT	0x10BB
+
+#define E1000_DEV_ID_ICH8_82567V_3		0x1501
+#define E1000_DEV_ID_ICH8_IGP_M_AMT		0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT		0x104A
+#define E1000_DEV_ID_ICH8_IGP_C			0x104B
+#define E1000_DEV_ID_ICH8_IFE			0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT		0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G			0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M			0x104D
+#define E1000_DEV_ID_ICH9_IGP_AMT		0x10BD
+#define E1000_DEV_ID_ICH9_BM			0x10E5
+#define E1000_DEV_ID_ICH9_IGP_M_AMT		0x10F5
+#define E1000_DEV_ID_ICH9_IGP_M			0x10BF
+#define E1000_DEV_ID_ICH9_IGP_M_V		0x10CB
+#define E1000_DEV_ID_ICH9_IGP_C			0x294C
+#define E1000_DEV_ID_ICH9_IFE			0x10C0
+#define E1000_DEV_ID_ICH9_IFE_GT		0x10C3
+#define E1000_DEV_ID_ICH9_IFE_G			0x10C2
+#define E1000_DEV_ID_ICH10_R_BM_LM		0x10CC
+#define E1000_DEV_ID_ICH10_R_BM_LF		0x10CD
+#define E1000_DEV_ID_ICH10_R_BM_V		0x10CE
+#define E1000_DEV_ID_ICH10_D_BM_LM		0x10DE
+#define E1000_DEV_ID_ICH10_D_BM_LF		0x10DF
+#define E1000_DEV_ID_ICH10_D_BM_V		0x1525
+#define E1000_DEV_ID_PCH_M_HV_LM		0x10EA
+#define E1000_DEV_ID_PCH_M_HV_LC		0x10EB
+#define E1000_DEV_ID_PCH_D_HV_DM		0x10EF
+#define E1000_DEV_ID_PCH_D_HV_DC		0x10F0
+#define E1000_DEV_ID_PCH2_LV_LM			0x1502
+#define E1000_DEV_ID_PCH2_LV_V			0x1503
+#define E1000_DEV_ID_PCH_LPT_I217_LM		0x153A
+#define E1000_DEV_ID_PCH_LPT_I217_V		0x153B
+#define E1000_DEV_ID_PCH_LPTLP_I218_LM		0x155A
+#define E1000_DEV_ID_PCH_LPTLP_I218_V		0x1559
+
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_1 1
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0   0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
+
+enum e1000_mac_type {
+	e1000_82571,
+	e1000_82572,
+	e1000_82573,
+	e1000_82574,
+	e1000_82583,
+	e1000_80003es2lan,
+	e1000_ich8lan,
+	e1000_ich9lan,
+	e1000_ich10lan,
+	e1000_pchlan,
+	e1000_pch2lan,
+	e1000_pch_lpt,
+};
+
+enum e1000_media_type {
+	e1000_media_type_unknown = 0,
+	e1000_media_type_copper = 1,
+	e1000_media_type_fiber = 2,
+	e1000_media_type_internal_serdes = 3,
+	e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+	e1000_nvm_unknown = 0,
+	e1000_nvm_none,
+	e1000_nvm_eeprom_spi,
+	e1000_nvm_flash_hw,
+	e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+	e1000_nvm_override_none = 0,
+	e1000_nvm_override_spi_small,
+	e1000_nvm_override_spi_large
+};
+
+enum e1000_phy_type {
+	e1000_phy_unknown = 0,
+	e1000_phy_none,
+	e1000_phy_m88,
+	e1000_phy_igp,
+	e1000_phy_igp_2,
+	e1000_phy_gg82563,
+	e1000_phy_igp_3,
+	e1000_phy_ife,
+	e1000_phy_bm,
+	e1000_phy_82578,
+	e1000_phy_82577,
+	e1000_phy_82579,
+	e1000_phy_i217,
+};
+
+enum e1000_bus_width {
+	e1000_bus_width_unknown = 0,
+	e1000_bus_width_pcie_x1,
+	e1000_bus_width_pcie_x2,
+	e1000_bus_width_pcie_x4 = 4,
+	e1000_bus_width_32,
+	e1000_bus_width_64,
+	e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+	e1000_1000t_rx_status_not_ok = 0,
+	e1000_1000t_rx_status_ok,
+	e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity{
+	e1000_rev_polarity_normal = 0,
+	e1000_rev_polarity_reversed,
+	e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+	e1000_fc_none = 0,
+	e1000_fc_rx_pause,
+	e1000_fc_tx_pause,
+	e1000_fc_full,
+	e1000_fc_default = 0xFF
+};
+
+enum e1000_ms_type {
+	e1000_ms_hw_default = 0,
+	e1000_ms_force_master,
+	e1000_ms_force_slave,
+	e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+	e1000_smart_speed_default = 0,
+	e1000_smart_speed_on,
+	e1000_smart_speed_off
+};
+
+enum e1000_serdes_link_state {
+	e1000_serdes_link_down = 0,
+	e1000_serdes_link_autoneg_progress,
+	e1000_serdes_link_autoneg_complete,
+	e1000_serdes_link_forced_up
+};
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+	__le64 buffer_addr; /* Address of the descriptor's data buffer */
+	__le16 length;      /* Length of data DMAed into data buffer */
+	__le16 csum;	/* Packet checksum */
+	u8  status;      /* Descriptor status */
+	u8  errors;      /* Descriptor Errors */
+	__le16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+	struct {
+		__le64 buffer_addr;
+		__le64 reserved;
+	} read;
+	struct {
+		struct {
+			__le32 mrq;	      /* Multiple Rx Queues */
+			union {
+				__le32 rss;	    /* RSS Hash */
+				struct {
+					__le16 ip_id;  /* IP id */
+					__le16 csum;   /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error;     /* ext status/error */
+			__le16 length;
+			__le16 vlan;	     /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+	struct {
+		/* one buffer for protocol header(s), three data buffers */
+		__le64 buffer_addr[MAX_PS_BUFFERS];
+	} read;
+	struct {
+		struct {
+			__le32 mrq;	      /* Multiple Rx Queues */
+			union {
+				__le32 rss;	      /* RSS Hash */
+				struct {
+					__le16 ip_id;    /* IP id */
+					__le16 csum;     /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error;     /* ext status/error */
+			__le16 length0;	  /* length of buffer 0 */
+			__le16 vlan;	     /* VLAN tag */
+		} middle;
+		struct {
+			__le16 header_status;
+			__le16 length[3];	/* length of buffers 1-3 */
+		} upper;
+		__le64 reserved;
+	} wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+	__le64 buffer_addr;      /* Address of the descriptor's data buffer */
+	union {
+		__le32 data;
+		struct {
+			__le16 length;    /* Data buffer length */
+			u8 cso;	/* Checksum offset */
+			u8 cmd;	/* Descriptor control */
+		} flags;
+	} lower;
+	union {
+		__le32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 css;	/* Checksum start */
+			__le16 special;
+		} fields;
+	} upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+	union {
+		__le32 ip_config;
+		struct {
+			u8 ipcss;      /* IP checksum start */
+			u8 ipcso;      /* IP checksum offset */
+			__le16 ipcse;     /* IP checksum end */
+		} ip_fields;
+	} lower_setup;
+	union {
+		__le32 tcp_config;
+		struct {
+			u8 tucss;      /* TCP checksum start */
+			u8 tucso;      /* TCP checksum offset */
+			__le16 tucse;     /* TCP checksum end */
+		} tcp_fields;
+	} upper_setup;
+	__le32 cmd_and_length;
+	union {
+		__le32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 hdr_len;    /* Header length */
+			__le16 mss;       /* Maximum segment size */
+		} fields;
+	} tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+	__le64 buffer_addr;   /* Address of the descriptor's buffer address */
+	union {
+		__le32 data;
+		struct {
+			__le16 length;    /* Data buffer length */
+			u8 typ_len_ext;
+			u8 cmd;
+		} flags;
+	} lower;
+	union {
+		__le32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 popts;      /* Packet Options */
+			__le16 special;   /* */
+		} fields;
+	} upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+	u64 crcerrs;
+	u64 algnerrc;
+	u64 symerrs;
+	u64 rxerrc;
+	u64 mpc;
+	u64 scc;
+	u64 ecol;
+	u64 mcc;
+	u64 latecol;
+	u64 colc;
+	u64 dc;
+	u64 tncrs;
+	u64 sec;
+	u64 cexterr;
+	u64 rlec;
+	u64 xonrxc;
+	u64 xontxc;
+	u64 xoffrxc;
+	u64 xofftxc;
+	u64 fcruc;
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 rnbc;
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mgprc;
+	u64 mgpdc;
+	u64 mgptc;
+	u64 tor;
+	u64 tot;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 tsctc;
+	u64 tsctfc;
+	u64 iac;
+	u64 icrxptc;
+	u64 icrxatc;
+	u64 ictxptc;
+	u64 ictxatc;
+	u64 ictxqec;
+	u64 ictxqmtc;
+	u64 icrxdmtc;
+	u64 icrxoc;
+};
+
+struct e1000_phy_stats {
+	u32 idle_errors;
+	u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+	u32 signature;
+	u8  status;
+	u8  reserved0;
+	u16 vlan_id;
+	u32 reserved1;
+	u16 reserved2;
+	u8  reserved3;
+	u8  checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+	u8 command_id;
+	u8 command_length;
+	u8 command_options;
+	u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH     252
+struct e1000_host_command_info {
+	struct e1000_host_command_header command_header;
+	u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+	u8  command_id;
+	u8  checksum;
+	u16 reserved1;
+	u16 reserved2;
+	u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+	struct e1000_host_mng_command_header command_header;
+	u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+/* Function pointers and static data for the MAC. */
+struct e1000_mac_operations {
+	s32  (*id_led_init)(struct e1000_hw *);
+	s32  (*blink_led)(struct e1000_hw *);
+	bool (*check_mng_mode)(struct e1000_hw *);
+	s32  (*check_for_link)(struct e1000_hw *);
+	s32  (*cleanup_led)(struct e1000_hw *);
+	void (*clear_hw_cntrs)(struct e1000_hw *);
+	void (*clear_vfta)(struct e1000_hw *);
+	s32  (*get_bus_info)(struct e1000_hw *);
+	void (*set_lan_id)(struct e1000_hw *);
+	s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+	s32  (*led_on)(struct e1000_hw *);
+	s32  (*led_off)(struct e1000_hw *);
+	void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32);
+	s32  (*reset_hw)(struct e1000_hw *);
+	s32  (*init_hw)(struct e1000_hw *);
+	s32  (*setup_link)(struct e1000_hw *);
+	s32  (*setup_physical_interface)(struct e1000_hw *);
+	s32  (*setup_led)(struct e1000_hw *);
+	void (*write_vfta)(struct e1000_hw *, u32, u32);
+	void (*config_collision_dist)(struct e1000_hw *);
+	void (*rar_set)(struct e1000_hw *, u8 *, u32);
+	s32  (*read_mac_addr)(struct e1000_hw *);
+};
+
+/*
+ * When to use various PHY register access functions:
+ *
+ *                 Func   Caller
+ *   Function      Does   Does    When to use
+ *   ~~~~~~~~~~~~  ~~~~~  ~~~~~~  ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
+ *   X_reg         L,P,A  n/a     for simple PHY reg accesses
+ *   X_reg_locked  P,A    L       for multiple accesses of different regs
+ *                                on different pages
+ *   X_reg_page    A      L,P     for multiple accesses of different regs
+ *                                on the same page
+ *
+ * Where X=[read|write], L=locking, P=sets page, A=register access
+ *
+ */
+struct e1000_phy_operations {
+	s32  (*acquire)(struct e1000_hw *);
+	s32  (*cfg_on_link_up)(struct e1000_hw *);
+	s32  (*check_polarity)(struct e1000_hw *);
+	s32  (*check_reset_block)(struct e1000_hw *);
+	s32  (*commit)(struct e1000_hw *);
+	s32  (*force_speed_duplex)(struct e1000_hw *);
+	s32  (*get_cfg_done)(struct e1000_hw *hw);
+	s32  (*get_cable_length)(struct e1000_hw *);
+	s32  (*get_info)(struct e1000_hw *);
+	s32  (*set_page)(struct e1000_hw *, u16);
+	s32  (*read_reg)(struct e1000_hw *, u32, u16 *);
+	s32  (*read_reg_locked)(struct e1000_hw *, u32, u16 *);
+	s32  (*read_reg_page)(struct e1000_hw *, u32, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32  (*reset)(struct e1000_hw *);
+	s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
+	s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
+	s32  (*write_reg)(struct e1000_hw *, u32, u16);
+	s32  (*write_reg_locked)(struct e1000_hw *, u32, u16);
+	s32  (*write_reg_page)(struct e1000_hw *, u32, u16);
+	void (*power_up)(struct e1000_hw *);
+	void (*power_down)(struct e1000_hw *);
+};
+
+/* Function pointers for the NVM. */
+struct e1000_nvm_operations {
+	s32  (*acquire)(struct e1000_hw *);
+	s32  (*read)(struct e1000_hw *, u16, u16, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32  (*update)(struct e1000_hw *);
+	s32  (*valid_led_default)(struct e1000_hw *, u16 *);
+	s32  (*validate)(struct e1000_hw *);
+	s32  (*write)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+	struct e1000_mac_operations ops;
+	u8 addr[ETH_ALEN];
+	u8 perm_addr[ETH_ALEN];
+
+	enum e1000_mac_type type;
+
+	u32 collision_delta;
+	u32 ledctl_default;
+	u32 ledctl_mode1;
+	u32 ledctl_mode2;
+	u32 mc_filter_type;
+	u32 tx_packet_delta;
+	u32 txcw;
+
+	u16 current_ifs_val;
+	u16 ifs_max_val;
+	u16 ifs_min_val;
+	u16 ifs_ratio;
+	u16 ifs_step_size;
+	u16 mta_reg_count;
+
+	/* Maximum size of the MTA register table in all supported adapters */
+	#define MAX_MTA_REG 128
+	u32 mta_shadow[MAX_MTA_REG];
+	u16 rar_entry_count;
+
+	u8  forced_speed_duplex;
+
+	bool adaptive_ifs;
+	bool has_fwsm;
+	bool arc_subsystem_valid;
+	bool autoneg;
+	bool autoneg_failed;
+	bool get_link_status;
+	bool in_ifs_mode;
+	bool serdes_has_link;
+	bool tx_pkt_filtering;
+	enum e1000_serdes_link_state serdes_link_state;
+};
+
+struct e1000_phy_info {
+	struct e1000_phy_operations ops;
+
+	enum e1000_phy_type type;
+
+	enum e1000_1000t_rx_status local_rx;
+	enum e1000_1000t_rx_status remote_rx;
+	enum e1000_ms_type ms_type;
+	enum e1000_ms_type original_ms_type;
+	enum e1000_rev_polarity cable_polarity;
+	enum e1000_smart_speed smart_speed;
+
+	u32 addr;
+	u32 id;
+	u32 reset_delay_us; /* in usec */
+	u32 revision;
+
+	enum e1000_media_type media_type;
+
+	u16 autoneg_advertised;
+	u16 autoneg_mask;
+	u16 cable_length;
+	u16 max_cable_length;
+	u16 min_cable_length;
+
+	u8 mdix;
+
+	bool disable_polarity_correction;
+	bool is_mdix;
+	bool polarity_correction;
+	bool speed_downgraded;
+	bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+	struct e1000_nvm_operations ops;
+
+	enum e1000_nvm_type type;
+	enum e1000_nvm_override override;
+
+	u32 flash_bank_size;
+	u32 flash_base_addr;
+
+	u16 word_size;
+	u16 delay_usec;
+	u16 address_bits;
+	u16 opcode_bits;
+	u16 page_size;
+};
+
+struct e1000_bus_info {
+	enum e1000_bus_width width;
+
+	u16 func;
+};
+
+struct e1000_fc_info {
+	u32 high_water;          /* Flow control high-water mark */
+	u32 low_water;           /* Flow control low-water mark */
+	u16 pause_time;          /* Flow control pause timer */
+	u16 refresh_time;        /* Flow control refresh timer */
+	bool send_xon;           /* Flow control send XON */
+	bool strict_ieee;        /* Strict IEEE mode */
+	enum e1000_fc_mode current_mode; /* FC mode in effect */
+	enum e1000_fc_mode requested_mode; /* FC mode requested by caller */
+};
+
+struct e1000_dev_spec_82571 {
+	bool laa_is_present;
+	u32 smb_counter;
+};
+
+struct e1000_dev_spec_80003es2lan {
+	bool  mdic_wa_enable;
+};
+
+struct e1000_shadow_ram {
+	u16  value;
+	bool modified;
+};
+
+#define E1000_ICH8_SHADOW_RAM_WORDS		2048
+
+struct e1000_dev_spec_ich8lan {
+	bool kmrn_lock_loss_workaround_enabled;
+	struct e1000_shadow_ram shadow_ram[E1000_ICH8_SHADOW_RAM_WORDS];
+	bool nvm_k1_enabled;
+	bool eee_disable;
+	u16 eee_lp_ability;
+};
+
+struct e1000_hw {
+	struct e1000_adapter *adapter;
+
+	u8 __iomem *hw_addr;
+	u8 __iomem *flash_address;
+
+	struct e1000_mac_info  mac;
+	struct e1000_fc_info   fc;
+	struct e1000_phy_info  phy;
+	struct e1000_nvm_info  nvm;
+	struct e1000_bus_info  bus;
+	struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+	union {
+		struct e1000_dev_spec_82571	e82571;
+		struct e1000_dev_spec_80003es2lan e80003es2lan;
+		struct e1000_dev_spec_ich8lan	ich8lan;
+	} dev_spec;
+};
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c
new file mode 100644
index 0000000..8bdcf3d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/ich8lan.c
@@ -0,0 +1,4446 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/*
+ * 82562G 10/100 Network Connection
+ * 82562G-2 10/100 Network Connection
+ * 82562GT 10/100 Network Connection
+ * 82562GT-2 10/100 Network Connection
+ * 82562V 10/100 Network Connection
+ * 82562V-2 10/100 Network Connection
+ * 82566DC-2 Gigabit Network Connection
+ * 82566DC Gigabit Network Connection
+ * 82566DM-2 Gigabit Network Connection
+ * 82566DM Gigabit Network Connection
+ * 82566MC Gigabit Network Connection
+ * 82566MM Gigabit Network Connection
+ * 82567LM Gigabit Network Connection
+ * 82567LF Gigabit Network Connection
+ * 82567V Gigabit Network Connection
+ * 82567LM-2 Gigabit Network Connection
+ * 82567LF-2 Gigabit Network Connection
+ * 82567V-2 Gigabit Network Connection
+ * 82567LF-3 Gigabit Network Connection
+ * 82567LM-3 Gigabit Network Connection
+ * 82567LM-4 Gigabit Network Connection
+ * 82577LM Gigabit Network Connection
+ * 82577LC Gigabit Network Connection
+ * 82578DM Gigabit Network Connection
+ * 82578DC Gigabit Network Connection
+ * 82579LM Gigabit Network Connection
+ * 82579V Gigabit Network Connection
+ */
+
+#include "e1000.h"
+
+#define ICH_FLASH_GFPREG		0x0000
+#define ICH_FLASH_HSFSTS		0x0004
+#define ICH_FLASH_HSFCTL		0x0006
+#define ICH_FLASH_FADDR			0x0008
+#define ICH_FLASH_FDATA0		0x0010
+#define ICH_FLASH_PR0			0x0074
+
+#define ICH_FLASH_READ_COMMAND_TIMEOUT	500
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT	500
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT	3000000
+#define ICH_FLASH_LINEAR_ADDR_MASK	0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT	10
+
+#define ICH_CYCLE_READ			0
+#define ICH_CYCLE_WRITE			2
+#define ICH_CYCLE_ERASE			3
+
+#define FLASH_GFPREG_BASE_MASK		0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT		12
+
+#define ICH_FLASH_SEG_SIZE_256		256
+#define ICH_FLASH_SEG_SIZE_4K		4096
+#define ICH_FLASH_SEG_SIZE_8K		8192
+#define ICH_FLASH_SEG_SIZE_64K		65536
+
+
+#define E1000_ICH_FWSM_RSPCIPHY	0x00000040 /* Reset PHY on PCI Reset */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID		0x00008000
+
+#define E1000_ICH_MNG_IAMT_MODE		0x2
+
+#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
+				 (ID_LED_DEF1_OFF2 <<  8) | \
+				 (ID_LED_DEF1_ON2  <<  4) | \
+				 (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD		0x13
+#define E1000_ICH_NVM_SIG_MASK		0xC000
+#define E1000_ICH_NVM_VALID_SIG_MASK    0xC0
+#define E1000_ICH_NVM_SIG_VALUE         0x80
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT	1500
+
+#define E1000_FEXTNVM_SW_CONFIG		1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M :/ */
+
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_MASK    0x0C000000
+#define E1000_FEXTNVM3_PHY_CFG_COUNTER_50MSEC  0x08000000
+
+#define E1000_FEXTNVM4_BEACON_DURATION_MASK    0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_8USEC   0x7
+#define E1000_FEXTNVM4_BEACON_DURATION_16USEC  0x3
+
+#define PCIE_ICH8_SNOOP_ALL		PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES		7
+#define E1000_PCH2_RAR_ENTRIES		5 /* RAR[0], SHRA[0-3] */
+#define E1000_PCH_LPT_RAR_ENTRIES	12 /* RAR[0], SHRA[0-10] */
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+			   ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG  PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL    PHY_REG(776, 18) /* Voltage Regulator Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS	0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN	0x0200
+
+#define HV_LED_CONFIG		PHY_REG(768, 30) /* LED Configuration */
+
+#define SW_FLAG_TIMEOUT    1000 /* SW Semaphore flag timeout in milliseconds */
+
+/* SMBus Control Phy Register */
+#define CV_SMB_CTRL		PHY_REG(769, 23)
+#define CV_SMB_CTRL_FORCE_SMBUS	0x0001
+
+/* SMBus Address Phy Register */
+#define HV_SMB_ADDR            PHY_REG(768, 26)
+#define HV_SMB_ADDR_MASK       0x007F
+#define HV_SMB_ADDR_PEC_EN     0x0200
+#define HV_SMB_ADDR_VALID      0x0080
+#define HV_SMB_ADDR_FREQ_MASK           0x1100
+#define HV_SMB_ADDR_FREQ_LOW_SHIFT      8
+#define HV_SMB_ADDR_FREQ_HIGH_SHIFT     12
+
+/* PHY Power Management Control */
+#define HV_PM_CTRL		PHY_REG(770, 17)
+#define HV_PM_CTRL_PLL_STOP_IN_K1_GIGA	0x100
+
+/* PHY Low Power Idle Control */
+#define I82579_LPI_CTRL				PHY_REG(772, 20)
+#define I82579_LPI_CTRL_ENABLE_MASK		0x6000
+#define I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT	0x80
+
+/* EMI Registers */
+#define I82579_EMI_ADDR         0x10
+#define I82579_EMI_DATA         0x11
+#define I82579_LPI_UPDATE_TIMER 0x4805	/* in 40ns units + 40 ns base value */
+
+#define I217_EEE_ADVERTISEMENT  0x8001	/* IEEE MMD Register 7.60 */
+#define I217_EEE_LP_ABILITY     0x8002	/* IEEE MMD Register 7.61 */
+#define I217_EEE_100_SUPPORTED  (1 << 1)	/* 100BaseTx EEE supported */
+
+/* Intel Rapid Start Technology Support */
+#define I217_PROXY_CTRL                 PHY_REG(BM_WUC_PAGE, 70)
+#define I217_PROXY_CTRL_AUTO_DISABLE    0x0080
+#define I217_SxCTRL                     PHY_REG(BM_PORT_CTRL_PAGE, 28)
+#define I217_SxCTRL_MASK                0x1000
+#define I217_CGFREG                     PHY_REG(772, 29)
+#define I217_CGFREG_MASK                0x0002
+#define I217_MEMPWR                     PHY_REG(772, 26)
+#define I217_MEMPWR_MASK                0x0010
+
+/* Strapping Option Register - RO */
+#define E1000_STRAP                     0x0000C
+#define E1000_STRAP_SMBUS_ADDRESS_MASK  0x00FE0000
+#define E1000_STRAP_SMBUS_ADDRESS_SHIFT 17
+#define E1000_STRAP_SMT_FREQ_MASK       0x00003000
+#define E1000_STRAP_SMT_FREQ_SHIFT      12
+
+/* OEM Bits Phy Register */
+#define HV_OEM_BITS            PHY_REG(768, 25)
+#define HV_OEM_BITS_LPLU       0x0004 /* Low Power Link Up */
+#define HV_OEM_BITS_GBE_DIS    0x0040 /* Gigabit Disable */
+#define HV_OEM_BITS_RESTART_AN 0x0400 /* Restart Auto-negotiation */
+
+#define E1000_NVM_K1_CONFIG 0x1B /* NVM K1 Config Word */
+#define E1000_NVM_K1_ENABLE 0x1  /* NVM Enable K1 bit */
+
+/* KMRN Mode Control */
+#define HV_KMRN_MODE_CTRL      PHY_REG(769, 16)
+#define HV_KMRN_MDIO_SLOW      0x0400
+
+/* KMRN FIFO Control and Status */
+#define HV_KMRN_FIFO_CTRLSTA                  PHY_REG(770, 16)
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK    0x7000
+#define HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT   12
+
+/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+	struct ich8_hsfsts {
+		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
+		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
+		u16 dael       :1; /* bit 2 Direct Access error Log */
+		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
+		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
+		u16 reserved1  :2; /* bit 13:6 Reserved */
+		u16 reserved2  :6; /* bit 13:6 Reserved */
+		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
+		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
+	} hsf_status;
+	u16 regval;
+};
+
+/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+	struct ich8_hsflctl {
+		u16 flcgo      :1;   /* 0 Flash Cycle Go */
+		u16 flcycle    :2;   /* 2:1 Flash Cycle */
+		u16 reserved   :5;   /* 7:3 Reserved  */
+		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
+		u16 flockdn    :6;   /* 15:10 Reserved */
+	} hsf_ctrl;
+	u16 regval;
+};
+
+/* ICH Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+	struct ich8_flracc {
+		u32 grra      :8; /* 0:7 GbE region Read Access */
+		u32 grwa      :8; /* 8:15 GbE region Write Access */
+		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
+		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
+	} hsf_flregacc;
+	u16 regval;
+};
+
+/* ICH Flash Protected Region */
+union ich8_flash_protected_range {
+	struct ich8_pr {
+		u32 base:13;     /* 0:12 Protected Range Base */
+		u32 reserved1:2; /* 13:14 Reserved */
+		u32 rpe:1;       /* 15 Read Protection Enable */
+		u32 limit:13;    /* 16:28 Protected Range Limit */
+		u32 reserved2:2; /* 29:30 Reserved */
+		u32 wpe:1;       /* 31 Write Protection Enable */
+	} range;
+	u32 regval;
+};
+
+static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+						u32 offset, u8 byte);
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 *data);
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u16 *data);
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 size, u16 *data);
+static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw);
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw);
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw);
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw);
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw);
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active);
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw);
+static s32  e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link);
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw);
+static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index);
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw);
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate);
+
+static inline u16 __er16flash(struct e1000_hw *hw, unsigned long reg)
+{
+	return readw(hw->flash_address + reg);
+}
+
+static inline u32 __er32flash(struct e1000_hw *hw, unsigned long reg)
+{
+	return readl(hw->flash_address + reg);
+}
+
+static inline void __ew16flash(struct e1000_hw *hw, unsigned long reg, u16 val)
+{
+	writew(val, hw->flash_address + reg);
+}
+
+static inline void __ew32flash(struct e1000_hw *hw, unsigned long reg, u32 val)
+{
+	writel(val, hw->flash_address + reg);
+}
+
+#define er16flash(reg)		__er16flash(hw, (reg))
+#define er32flash(reg)		__er32flash(hw, (reg))
+#define ew16flash(reg,val)	__ew16flash(hw, (reg), (val))
+#define ew32flash(reg,val)	__ew32flash(hw, (reg), (val))
+
+static void e1000_toggle_lanphypc_value_ich8lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_LANPHYPC_OVERRIDE;
+	ctrl &= ~E1000_CTRL_LANPHYPC_VALUE;
+	ew32(CTRL, ctrl);
+	e1e_flush();
+	udelay(10);
+	ctrl &= ~E1000_CTRL_LANPHYPC_OVERRIDE;
+	ew32(CTRL, ctrl);
+}
+
+/**
+ *  e1000_init_phy_params_pchlan - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_pchlan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 fwsm;
+	s32 ret_val = 0;
+
+	phy->addr                     = 1;
+	phy->reset_delay_us           = 100;
+
+	phy->ops.set_page             = e1000_set_page_igp;
+	phy->ops.read_reg             = e1000_read_phy_reg_hv;
+	phy->ops.read_reg_locked      = e1000_read_phy_reg_hv_locked;
+	phy->ops.read_reg_page        = e1000_read_phy_reg_page_hv;
+	phy->ops.set_d0_lplu_state    = e1000_set_lplu_state_pchlan;
+	phy->ops.set_d3_lplu_state    = e1000_set_lplu_state_pchlan;
+	phy->ops.write_reg            = e1000_write_phy_reg_hv;
+	phy->ops.write_reg_locked     = e1000_write_phy_reg_hv_locked;
+	phy->ops.write_reg_page       = e1000_write_phy_reg_page_hv;
+	phy->ops.power_up             = e1000_power_up_phy_copper;
+	phy->ops.power_down           = e1000_power_down_phy_copper_ich8lan;
+	phy->autoneg_mask             = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+
+	/*
+	 * The MAC-PHY interconnect may still be in SMBus mode
+	 * after Sx->S0.  If the manageability engine (ME) is
+	 * disabled, then toggle the LANPHYPC Value bit to force
+	 * the interconnect to PCIe mode.
+	 */
+	fwsm = er32(FWSM);
+	if (!(fwsm & E1000_ICH_FWSM_FW_VALID) && !e1000_check_reset_block(hw)) {
+		e1000_toggle_lanphypc_value_ich8lan(hw);
+		msleep(50);
+
+		/*
+		 * Gate automatic PHY configuration by hardware on
+		 * non-managed 82579
+		 */
+		if (hw->mac.type == e1000_pch2lan)
+			e1000_gate_hw_phy_config_ich8lan(hw, true);
+	}
+
+	/*
+	 * Reset the PHY before any access to it.  Doing so, ensures that
+	 * the PHY is in a known good state before we read/write PHY registers.
+	 * The generic reset is sufficient here, because we haven't determined
+	 * the PHY type yet.
+	 */
+	ret_val = e1000e_phy_hw_reset_generic(hw);
+	if (ret_val)
+		goto out;
+
+	/* Ungate automatic PHY configuration on non-managed 82579 */
+	if ((hw->mac.type == e1000_pch2lan) &&
+	    !(fwsm & E1000_ICH_FWSM_FW_VALID)) {
+		usleep_range(10000, 20000);
+		e1000_gate_hw_phy_config_ich8lan(hw, false);
+	}
+
+	phy->id = e1000_phy_unknown;
+	switch (hw->mac.type) {
+	default:
+		ret_val = e1000e_get_phy_id(hw);
+		if (ret_val)
+			goto out;
+		if ((phy->id != 0) && (phy->id != PHY_REVISION_MASK))
+			break;
+		fallthrough;
+	case e1000_pch2lan:
+	case e1000_pch_lpt:
+		/*
+		 * In case the PHY needs to be in mdio slow mode,
+		 * set slow mode and try to get the PHY id again.
+		 */
+		ret_val = e1000_set_mdio_slow_mode_hv(hw);
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_get_phy_id(hw);
+		if (ret_val)
+			goto out;
+		break;
+	}
+	phy->type = e1000e_get_phy_type_from_id(phy->id);
+
+	switch (phy->type) {
+	case e1000_phy_82577:
+	case e1000_phy_82579:
+	case e1000_phy_i217:
+		phy->ops.check_polarity = e1000_check_polarity_82577;
+		phy->ops.force_speed_duplex =
+		    e1000_phy_force_speed_duplex_82577;
+		phy->ops.get_cable_length = e1000_get_cable_length_82577;
+		phy->ops.get_info = e1000_get_phy_info_82577;
+		phy->ops.commit = e1000e_phy_sw_reset;
+		break;
+	case e1000_phy_82578:
+		phy->ops.check_polarity = e1000_check_polarity_m88;
+		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
+		phy->ops.get_cable_length = e1000e_get_cable_length_m88;
+		phy->ops.get_info = e1000e_get_phy_info_m88;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 i = 0;
+
+	phy->addr			= 1;
+	phy->reset_delay_us		= 100;
+
+	phy->ops.power_up               = e1000_power_up_phy_copper;
+	phy->ops.power_down             = e1000_power_down_phy_copper_ich8lan;
+
+	/*
+	 * We may need to do this twice - once for IGP and if that fails,
+	 * we'll set BM func pointers and try again
+	 */
+	ret_val = e1000e_determine_phy_address(hw);
+	if (ret_val) {
+		phy->ops.write_reg = e1000e_write_phy_reg_bm;
+		phy->ops.read_reg  = e1000e_read_phy_reg_bm;
+		ret_val = e1000e_determine_phy_address(hw);
+		if (ret_val) {
+			e_dbg("Cannot determine PHY addr. Erroring out\n");
+			return ret_val;
+		}
+	}
+
+	phy->id = 0;
+	while ((e1000_phy_unknown == e1000e_get_phy_type_from_id(phy->id)) &&
+	       (i++ < 100)) {
+		usleep_range(1000, 2000);
+		ret_val = e1000e_get_phy_id(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Verify phy id */
+	switch (phy->id) {
+	case IGP03E1000_E_PHY_ID:
+		phy->type = e1000_phy_igp_3;
+		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+		phy->ops.read_reg_locked = e1000e_read_phy_reg_igp_locked;
+		phy->ops.write_reg_locked = e1000e_write_phy_reg_igp_locked;
+		phy->ops.get_info = e1000e_get_phy_info_igp;
+		phy->ops.check_polarity = e1000_check_polarity_igp;
+		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_igp;
+		break;
+	case IFE_E_PHY_ID:
+	case IFE_PLUS_E_PHY_ID:
+	case IFE_C_E_PHY_ID:
+		phy->type = e1000_phy_ife;
+		phy->autoneg_mask = E1000_ALL_NOT_GIG;
+		phy->ops.get_info = e1000_get_phy_info_ife;
+		phy->ops.check_polarity = e1000_check_polarity_ife;
+		phy->ops.force_speed_duplex = e1000_phy_force_speed_duplex_ife;
+		break;
+	case BME1000_E_PHY_ID:
+		phy->type = e1000_phy_bm;
+		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+		phy->ops.read_reg = e1000e_read_phy_reg_bm;
+		phy->ops.write_reg = e1000e_write_phy_reg_bm;
+		phy->ops.commit = e1000e_phy_sw_reset;
+		phy->ops.get_info = e1000e_get_phy_info_m88;
+		phy->ops.check_polarity = e1000_check_polarity_m88;
+		phy->ops.force_speed_duplex = e1000e_phy_force_speed_duplex_m88;
+		break;
+	default:
+		return -E1000_ERR_PHY;
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific NVM parameters and function
+ *  pointers.
+ **/
+static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 gfpreg, sector_base_addr, sector_end_addr;
+	u16 i;
+
+	/* Can't read flash registers if the register set isn't mapped. */
+	if (!hw->flash_address) {
+		e_dbg("ERROR: Flash registers not mapped\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	nvm->type = e1000_nvm_flash_sw;
+
+	gfpreg = er32flash(ICH_FLASH_GFPREG);
+
+	/*
+	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
+	 * Add 1 to sector_end_addr since this sector is included in
+	 * the overall size.
+	 */
+	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+
+	/* flash_base_addr is byte-aligned */
+	nvm->flash_base_addr = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+
+	/*
+	 * find total size of the NVM, then cut in half since the total
+	 * size represents two separate NVM banks.
+	 */
+	nvm->flash_bank_size = (sector_end_addr - sector_base_addr)
+				<< FLASH_SECTOR_ADDR_SHIFT;
+	nvm->flash_bank_size /= 2;
+	/* Adjust to word count */
+	nvm->flash_bank_size /= sizeof(u16);
+
+	nvm->word_size = E1000_ICH8_SHADOW_RAM_WORDS;
+
+	/* Clear shadow ram */
+	for (i = 0; i < nvm->word_size; i++) {
+		dev_spec->shadow_ram[i].modified = false;
+		dev_spec->shadow_ram[i].value    = 0xFFFF;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific MAC parameters and function
+ *  pointers.
+ **/
+static s32 e1000_init_mac_params_ich8lan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_mac_info *mac = &hw->mac;
+
+	/* Set media type function pointer */
+	hw->phy.media_type = e1000_media_type_copper;
+
+	/* Set mta register count */
+	mac->mta_reg_count = 32;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
+	if (mac->type == e1000_ich8lan)
+		mac->rar_entry_count--;
+	/* FWSM register */
+	mac->has_fwsm = true;
+	/* ARC subsystem not supported */
+	mac->arc_subsystem_valid = false;
+	/* Adaptive IFS supported */
+	mac->adaptive_ifs = true;
+
+	/* LED operations */
+	switch (mac->type) {
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+	case e1000_ich10lan:
+		/* check management mode */
+		mac->ops.check_mng_mode = e1000_check_mng_mode_ich8lan;
+		/* ID LED init */
+		mac->ops.id_led_init = e1000e_id_led_init;
+		/* blink LED */
+		mac->ops.blink_led = e1000e_blink_led_generic;
+		/* setup LED */
+		mac->ops.setup_led = e1000e_setup_led_generic;
+		/* cleanup LED */
+		mac->ops.cleanup_led = e1000_cleanup_led_ich8lan;
+		/* turn on/off LED */
+		mac->ops.led_on = e1000_led_on_ich8lan;
+		mac->ops.led_off = e1000_led_off_ich8lan;
+		break;
+	case e1000_pch_lpt:
+	case e1000_pchlan:
+	case e1000_pch2lan:
+		/* check management mode */
+		mac->ops.check_mng_mode = e1000_check_mng_mode_pchlan;
+		/* ID LED init */
+		mac->ops.id_led_init = e1000_id_led_init_pchlan;
+		/* setup LED */
+		mac->ops.setup_led = e1000_setup_led_pchlan;
+		/* cleanup LED */
+		mac->ops.cleanup_led = e1000_cleanup_led_pchlan;
+		/* turn on/off LED */
+		mac->ops.led_on = e1000_led_on_pchlan;
+		mac->ops.led_off = e1000_led_off_pchlan;
+		break;
+	default:
+		break;
+	}
+
+	if (mac->type == e1000_pch_lpt) {
+		mac->rar_entry_count = E1000_PCH_LPT_RAR_ENTRIES;
+		mac->ops.rar_set = e1000_rar_set_pch_lpt;
+	}
+
+	/* Enable PCS Lock-loss workaround for ICH8 */
+	if (mac->type == e1000_ich8lan)
+		e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw, true);
+
+	/* Gate automatic PHY configuration by hardware on managed
+	 * 82579 and i217
+	 */
+	if ((mac->type == e1000_pch2lan || mac->type == e1000_pch_lpt) &&
+	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+		e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+	return 0;
+}
+
+/**
+ *  e1000_set_eee_pchlan - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *
+ *  Enable/disable EEE based on setting in dev_spec structure.  The bits in
+ *  the LPI Control register will remain set only if/when link is up.
+ **/
+static s32 e1000_set_eee_pchlan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	s32 ret_val = 0;
+	u16 phy_reg;
+
+	if ((hw->phy.type != e1000_phy_82579) &&
+	    (hw->phy.type != e1000_phy_i217))
+		return ret_val;
+
+	ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+	if (ret_val)
+		return ret_val;
+
+	if (dev_spec->eee_disable)
+		phy_reg &= ~I82579_LPI_CTRL_ENABLE_MASK;
+	else
+		phy_reg |= I82579_LPI_CTRL_ENABLE_MASK;
+
+	ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+
+	if (ret_val)
+		return ret_val;
+
+	if ((hw->phy.type == e1000_phy_i217) && !dev_spec->eee_disable) {
+		/* Save off link partner's EEE ability */
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return ret_val;
+		ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
+					  I217_EEE_LP_ABILITY);
+		if (ret_val)
+			goto release;
+		e1e_rphy_locked(hw, I82579_EMI_DATA, &dev_spec->eee_lp_ability);
+
+		/* EEE is not supported in 100Half, so ignore partner's EEE
+		 * in 100 ability if full-duplex is not advertised.
+		 */
+		e1e_rphy_locked(hw, PHY_LP_ABILITY, &phy_reg);
+		if (!(phy_reg & NWAY_LPAR_100TX_FD_CAPS))
+			dev_spec->eee_lp_ability &= ~I217_EEE_100_SUPPORTED;
+release:
+		hw->phy.ops.release(hw);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_check_for_copper_link_ich8lan - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+static s32 e1000_check_for_copper_link_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+	u16 phy_reg;
+
+	/*
+	 * We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status) {
+		ret_val = 0;
+		goto out;
+	}
+
+	/*
+	 * First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.type == e1000_pchlan) {
+		ret_val = e1000_k1_gig_workaround_hv(hw, link);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Clear link partner's EEE ability */
+	hw->dev_spec.ich8lan.eee_lp_ability = 0;
+
+	if (!link)
+		goto out; /* No link detected */
+
+	mac->get_link_status = false;
+
+	switch (hw->mac.type) {
+	case e1000_pch2lan:
+		ret_val = e1000_k1_workaround_lv(hw);
+		if (ret_val)
+			goto out;
+		fallthrough;
+	case e1000_pchlan:
+		if (hw->phy.type == e1000_phy_82578) {
+			ret_val = e1000_link_stall_workaround_hv(hw);
+			if (ret_val)
+				goto out;
+		}
+
+		/*
+		 * Workaround for PCHx parts in half-duplex:
+		 * Set the number of preambles removed from the packet
+		 * when it is passed from the PHY to the MAC to prevent
+		 * the MAC from misinterpreting the packet type.
+		 */
+		e1e_rphy(hw, HV_KMRN_FIFO_CTRLSTA, &phy_reg);
+		phy_reg &= ~HV_KMRN_FIFO_CTRLSTA_PREAMBLE_MASK;
+
+		if ((er32(STATUS) & E1000_STATUS_FD) != E1000_STATUS_FD)
+			phy_reg |= (1 << HV_KMRN_FIFO_CTRLSTA_PREAMBLE_SHIFT);
+
+		e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, phy_reg);
+		break;
+	default:
+		break;
+	}
+
+	/*
+	 * Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	e1000e_check_downshift(hw);
+
+	/* Enable/Disable EEE after link up */
+	ret_val = e1000_set_eee_pchlan(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/*
+	 * Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	e1000e_config_collision_dist(hw);
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000e_config_fc_after_link_up(hw);
+	if (ret_val)
+		e_dbg("Error configuring flow control\n");
+
+out:
+	return ret_val;
+}
+
+static s32 e1000_get_variants_ich8lan(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	s32 rc;
+
+	rc = e1000_init_mac_params_ich8lan(adapter);
+	if (rc)
+		return rc;
+
+	rc = e1000_init_nvm_params_ich8lan(hw);
+	if (rc)
+		return rc;
+
+	switch (hw->mac.type) {
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+	case e1000_ich10lan:
+		rc = e1000_init_phy_params_ich8lan(hw);
+		break;
+	case e1000_pchlan:
+	case e1000_pch2lan:
+	case e1000_pch_lpt:
+		rc = e1000_init_phy_params_pchlan(hw);
+		break;
+	default:
+		break;
+	}
+	if (rc)
+		return rc;
+
+	/*
+	 * Disable Jumbo Frame support on parts with Intel 10/100 PHY or
+	 * on parts with MACsec enabled in NVM (reflected in CTRL_EXT).
+	 */
+	if ((adapter->hw.phy.type == e1000_phy_ife) ||
+	    ((adapter->hw.mac.type >= e1000_pch2lan) &&
+	     (!(er32(CTRL_EXT) & E1000_CTRL_EXT_LSECCK)))) {
+		adapter->flags &= ~FLAG_HAS_JUMBO_FRAMES;
+		adapter->max_hw_frame_size = ETH_FRAME_LEN + ETH_FCS_LEN;
+
+		hw->mac.ops.blink_led = NULL;
+	}
+
+	if ((adapter->hw.mac.type == e1000_ich8lan) &&
+	    (adapter->hw.phy.type != e1000_phy_ife))
+		adapter->flags |= FLAG_LSC_GIG_SPEED_DROP;
+
+	/* Enable workaround for 82579 w/ ME enabled */
+	if ((adapter->hw.mac.type == e1000_pch2lan) &&
+	    (er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+		adapter->flags2 |= FLAG2_PCIM2PCI_ARBITER_WA;
+
+	/* Disable EEE by default until IEEE802.3az spec is finalized */
+	if (adapter->flags2 & FLAG2_HAS_EEE)
+		adapter->hw.dev_spec.ich8lan.eee_disable = true;
+
+	return 0;
+}
+
+static DEFINE_MUTEX(nvm_mutex);
+
+/**
+ *  e1000_acquire_nvm_ich8lan - Acquire NVM mutex
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquires the mutex for performing NVM operations.
+ **/
+static s32 e1000_acquire_nvm_ich8lan(struct e1000_hw *hw)
+{
+	mutex_lock(&nvm_mutex);
+
+	return 0;
+}
+
+/**
+ *  e1000_release_nvm_ich8lan - Release NVM mutex
+ *  @hw: pointer to the HW structure
+ *
+ *  Releases the mutex used while performing NVM operations.
+ **/
+static void e1000_release_nvm_ich8lan(struct e1000_hw *hw)
+{
+	mutex_unlock(&nvm_mutex);
+}
+
+/**
+ *  e1000_acquire_swflag_ich8lan - Acquire software control flag
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquires the software control flag for performing PHY and select
+ *  MAC CSR accesses.
+ **/
+static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
+	s32 ret_val = 0;
+
+	if (test_and_set_bit(__E1000_ACCESS_SHARED_RESOURCE,
+			     &hw->adapter->state)) {
+		WARN(1, "e1000e: %s: contention for Phy access\n",
+		     hw->adapter->netdev->name);
+		return -E1000_ERR_PHY;
+	}
+
+	while (timeout) {
+		extcnf_ctrl = er32(EXTCNF_CTRL);
+		if (!(extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG))
+			break;
+
+		mdelay(1);
+		timeout--;
+	}
+
+	if (!timeout) {
+		e_dbg("SW has already locked the resource.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	timeout = SW_FLAG_TIMEOUT;
+
+	extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+	ew32(EXTCNF_CTRL, extcnf_ctrl);
+
+	while (timeout) {
+		extcnf_ctrl = er32(EXTCNF_CTRL);
+		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+			break;
+
+		mdelay(1);
+		timeout--;
+	}
+
+	if (!timeout) {
+		e_dbg("Failed to acquire the semaphore, FW or HW has it: "
+		      "FWSM=0x%8.8x EXTCNF_CTRL=0x%8.8x)\n",
+		      er32(FWSM), extcnf_ctrl);
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+out:
+	if (ret_val)
+		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_release_swflag_ich8lan - Release software control flag
+ *  @hw: pointer to the HW structure
+ *
+ *  Releases the software control flag for performing PHY and select
+ *  MAC CSR accesses.
+ **/
+static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+
+	if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG) {
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+		ew32(EXTCNF_CTRL, extcnf_ctrl);
+	} else {
+		e_dbg("Semaphore unexpectedly released by sw/fw/hw\n");
+	}
+
+	clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
+}
+
+/**
+ *  e1000_check_mng_mode_ich8lan - Checks management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has any manageability enabled.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	fwsm = er32(FWSM);
+	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+	       ((fwsm & E1000_FWSM_MODE_MASK) ==
+		(E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ *  e1000_check_mng_mode_pchlan - Checks management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has iAMT enabled.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_pchlan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	fwsm = er32(FWSM);
+	return (fwsm & E1000_ICH_FWSM_FW_VALID) &&
+	       (fwsm & (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ *  e1000_rar_set_pch_lpt - Set receive address registers
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address register array at index to the address passed
+ *  in by addr. For LPT, RAR[0] is the base address register that is to
+ *  contain the MAC address. SHRA[0-10] are the shared receive address
+ *  registers that are shared between the Host and manageability engine (ME).
+ **/
+static void e1000_rar_set_pch_lpt(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+	u32 wlock_mac;
+
+	/* HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32)addr[0] | ((u32)addr[1] << 8) |
+		   ((u32)addr[2] << 16) | ((u32)addr[3] << 24));
+
+	rar_high = ((u32)addr[4] | ((u32)addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high)
+		rar_high |= E1000_RAH_AV;
+
+	if (index == 0) {
+		ew32(RAL(index), rar_low);
+		e1e_flush();
+		ew32(RAH(index), rar_high);
+		e1e_flush();
+		return;
+	}
+
+	/* The manageability engine (ME) can lock certain SHRAR registers that
+	 * it is using - those registers are unavailable for use.
+	 */
+	if (index < hw->mac.rar_entry_count) {
+		wlock_mac = er32(FWSM) & E1000_FWSM_WLOCK_MAC_MASK;
+		wlock_mac >>= E1000_FWSM_WLOCK_MAC_SHIFT;
+
+		/* Check if all SHRAR registers are locked */
+		if (wlock_mac == 1)
+			goto out;
+
+		if ((wlock_mac == 0) || (index <= wlock_mac)) {
+			s32 ret_val;
+
+			ret_val = e1000_acquire_swflag_ich8lan(hw);
+
+			if (ret_val)
+				goto out;
+
+			ew32(SHRAL_PCH_LPT(index - 1), rar_low);
+			e1e_flush();
+			ew32(SHRAH_PCH_LPT(index - 1), rar_high);
+			e1e_flush();
+
+			e1000_release_swflag_ich8lan(hw);
+
+			/* verify the register updates */
+			if ((er32(SHRAL_PCH_LPT(index - 1)) == rar_low) &&
+			    (er32(SHRAH_PCH_LPT(index - 1)) == rar_high))
+				return;
+		}
+	}
+
+out:
+	e_dbg("Failed to write receive address at index %d\n", index);
+}
+
+/**
+ *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks if firmware is blocking the reset of the PHY.
+ *  This is a function pointer entry point only called by
+ *  reset routines.
+ **/
+static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	fwsm = er32(FWSM);
+
+	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? 0 : E1000_BLK_PHY_RESET;
+}
+
+/**
+ *  e1000_write_smbus_addr - Write SMBus address to PHY needed during Sx states
+ *  @hw: pointer to the HW structure
+ *
+ *  Assumes semaphore already acquired.
+ *
+ **/
+static s32 e1000_write_smbus_addr(struct e1000_hw *hw)
+{
+	u16 phy_data;
+	u32 strap = er32(STRAP);
+	u32 freq = (strap & E1000_STRAP_SMT_FREQ_MASK) >>
+	    E1000_STRAP_SMT_FREQ_SHIFT;
+	s32 ret_val = 0;
+
+	strap &= E1000_STRAP_SMBUS_ADDRESS_MASK;
+
+	ret_val = e1000_read_phy_reg_hv_locked(hw, HV_SMB_ADDR, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~HV_SMB_ADDR_MASK;
+	phy_data |= (strap >> E1000_STRAP_SMBUS_ADDRESS_SHIFT);
+	phy_data |= HV_SMB_ADDR_PEC_EN | HV_SMB_ADDR_VALID;
+
+	if (hw->phy.type == e1000_phy_i217) {
+		/* Restore SMBus frequency */
+		if (freq--) {
+			phy_data &= ~HV_SMB_ADDR_FREQ_MASK;
+			phy_data |= (freq & (1 << 0)) <<
+			    HV_SMB_ADDR_FREQ_LOW_SHIFT;
+			phy_data |= (freq & (1 << 1)) <<
+			    (HV_SMB_ADDR_FREQ_HIGH_SHIFT - 1);
+		} else {
+			e_dbg("Unsupported SMB frequency in PHY\n");
+		}
+	}
+
+	ret_val = e1000_write_phy_reg_hv_locked(hw, HV_SMB_ADDR, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_sw_lcd_config_ich8lan - SW-based LCD Configuration
+ *  @hw:   pointer to the HW structure
+ *
+ *  SW should configure the LCD from the NVM extended configuration region
+ *  as a workaround for certain parts.
+ **/
+static s32 e1000_sw_lcd_config_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+	s32 ret_val = 0;
+	u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+	/*
+	 * Initialize the PHY from the NVM on ICH platforms.  This
+	 * is needed due to an issue where the NVM configuration is
+	 * not properly autoloaded after power transitions.
+	 * Therefore, after each PHY reset, we will load the
+	 * configuration data out of the NVM manually.
+	 */
+	switch (hw->mac.type) {
+	case e1000_ich8lan:
+		if (phy->type != e1000_phy_igp_3)
+			return ret_val;
+
+		if ((hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_AMT) ||
+		    (hw->adapter->pdev->device == E1000_DEV_ID_ICH8_IGP_C)) {
+			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+			break;
+		}
+		fallthrough;
+	case e1000_pchlan:
+	case e1000_pch2lan:
+	case e1000_pch_lpt:
+		sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+		break;
+	default:
+		return ret_val;
+	}
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	data = er32(FEXTNVM);
+	if (!(data & sw_cfg_mask))
+		goto out;
+
+	/*
+	 * Make sure HW does not configure LCD from PHY
+	 * extended configuration before SW configuration
+	 */
+	data = er32(EXTCNF_CTRL);
+	if ((hw->mac.type < e1000_pch2lan) &&
+	    (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE))
+		goto out;
+
+	cnf_size = er32(EXTCNF_SIZE);
+	cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+	cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+	if (!cnf_size)
+		goto out;
+
+	cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+	cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+	if (((hw->mac.type == e1000_pchlan) &&
+	     !(data & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)) ||
+	    (hw->mac.type > e1000_pchlan)) {
+		/*
+		 * HW configures the SMBus address and LEDs when the
+		 * OEM and LCD Write Enable bits are set in the NVM.
+		 * When both NVM bits are cleared, SW will configure
+		 * them instead.
+		 */
+		ret_val = e1000_write_smbus_addr(hw);
+		if (ret_val)
+			goto out;
+
+		data = er32(LEDCTL);
+		ret_val = e1000_write_phy_reg_hv_locked(hw, HV_LED_CONFIG,
+							(u16)data);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Configure LCD from extended configuration region. */
+
+	/* cnf_base_addr is in DWORD */
+	word_addr = (u16)(cnf_base_addr << 1);
+
+	for (i = 0; i < cnf_size; i++) {
+		ret_val = e1000_read_nvm(hw, (word_addr + i * 2), 1,
+					 &reg_data);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_nvm(hw, (word_addr + i * 2 + 1),
+					 1, &reg_addr);
+		if (ret_val)
+			goto out;
+
+		/* Save off the PHY page for future writes. */
+		if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+			phy_page = reg_data;
+			continue;
+		}
+
+		reg_addr &= PHY_REG_MASK;
+		reg_addr |= phy_page;
+
+		ret_val = phy->ops.write_reg_locked(hw, (u32)reg_addr,
+						    reg_data);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000_k1_gig_workaround_hv - K1 Si workaround
+ *  @hw:   pointer to the HW structure
+ *  @link: link up bool flag
+ *
+ *  If K1 is enabled for 1Gbps, the MAC might stall when transitioning
+ *  from a lower speed.  This workaround disables K1 whenever link is at 1Gig
+ *  If link is down, the function will restore the default K1 setting located
+ *  in the NVM.
+ **/
+static s32 e1000_k1_gig_workaround_hv(struct e1000_hw *hw, bool link)
+{
+	s32 ret_val = 0;
+	u16 status_reg = 0;
+	bool k1_enable = hw->dev_spec.ich8lan.nvm_k1_enabled;
+
+	if (hw->mac.type != e1000_pchlan)
+		goto out;
+
+	/* Wrap the whole flow with the sw flag */
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	/* Disable K1 when link is 1Gbps, otherwise use the NVM setting */
+	if (link) {
+		if (hw->phy.type == e1000_phy_82578) {
+			ret_val = hw->phy.ops.read_reg_locked(hw, BM_CS_STATUS,
+			                                          &status_reg);
+			if (ret_val)
+				goto release;
+
+			status_reg &= BM_CS_STATUS_LINK_UP |
+			              BM_CS_STATUS_RESOLVED |
+			              BM_CS_STATUS_SPEED_MASK;
+
+			if (status_reg == (BM_CS_STATUS_LINK_UP |
+			                   BM_CS_STATUS_RESOLVED |
+			                   BM_CS_STATUS_SPEED_1000))
+				k1_enable = false;
+		}
+
+		if (hw->phy.type == e1000_phy_82577) {
+			ret_val = hw->phy.ops.read_reg_locked(hw, HV_M_STATUS,
+			                                          &status_reg);
+			if (ret_val)
+				goto release;
+
+			status_reg &= HV_M_STATUS_LINK_UP |
+			              HV_M_STATUS_AUTONEG_COMPLETE |
+			              HV_M_STATUS_SPEED_MASK;
+
+			if (status_reg == (HV_M_STATUS_LINK_UP |
+			                   HV_M_STATUS_AUTONEG_COMPLETE |
+			                   HV_M_STATUS_SPEED_1000))
+				k1_enable = false;
+		}
+
+		/* Link stall fix for link up */
+		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+		                                           0x0100);
+		if (ret_val)
+			goto release;
+
+	} else {
+		/* Link stall fix for link down */
+		ret_val = hw->phy.ops.write_reg_locked(hw, PHY_REG(770, 19),
+		                                           0x4100);
+		if (ret_val)
+			goto release;
+	}
+
+	ret_val = e1000_configure_k1_ich8lan(hw, k1_enable);
+
+release:
+	hw->phy.ops.release(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_configure_k1_ich8lan - Configure K1 power state
+ *  @hw: pointer to the HW structure
+ *  @enable: K1 state to configure
+ *
+ *  Configure the K1 power state based on the provided parameter.
+ *  Assumes semaphore already acquired.
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ **/
+s32 e1000_configure_k1_ich8lan(struct e1000_hw *hw, bool k1_enable)
+{
+	s32 ret_val = 0;
+	u32 ctrl_reg = 0;
+	u32 ctrl_ext = 0;
+	u32 reg = 0;
+	u16 kmrn_reg = 0;
+
+	ret_val = e1000e_read_kmrn_reg_locked(hw,
+	                                     E1000_KMRNCTRLSTA_K1_CONFIG,
+	                                     &kmrn_reg);
+	if (ret_val)
+		goto out;
+
+	if (k1_enable)
+		kmrn_reg |= E1000_KMRNCTRLSTA_K1_ENABLE;
+	else
+		kmrn_reg &= ~E1000_KMRNCTRLSTA_K1_ENABLE;
+
+	ret_val = e1000e_write_kmrn_reg_locked(hw,
+	                                      E1000_KMRNCTRLSTA_K1_CONFIG,
+	                                      kmrn_reg);
+	if (ret_val)
+		goto out;
+
+	udelay(20);
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_reg = er32(CTRL);
+
+	reg = ctrl_reg & ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+	reg |= E1000_CTRL_FRCSPD;
+	ew32(CTRL, reg);
+
+	ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_SPD_BYPS);
+	e1e_flush();
+	udelay(20);
+	ew32(CTRL, ctrl_reg);
+	ew32(CTRL_EXT, ctrl_ext);
+	e1e_flush();
+	udelay(20);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_oem_bits_config_ich8lan - SW-based LCD Configuration
+ *  @hw:       pointer to the HW structure
+ *  @d0_state: boolean if entering d0 or d3 device state
+ *
+ *  SW will configure Gbe Disable and LPLU based on the NVM. The four bits are
+ *  collectively called OEM bits.  The OEM Write Enable bit and SW Config bit
+ *  in NVM determines whether HW should configure LPLU and Gbe Disable.
+ **/
+static s32 e1000_oem_bits_config_ich8lan(struct e1000_hw *hw, bool d0_state)
+{
+	s32 ret_val = 0;
+	u32 mac_reg;
+	u16 oem_reg;
+
+	if (hw->mac.type < e1000_pchlan)
+		return ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	if (hw->mac.type == e1000_pchlan) {
+		mac_reg = er32(EXTCNF_CTRL);
+		if (mac_reg & E1000_EXTCNF_CTRL_OEM_WRITE_ENABLE)
+			goto out;
+	}
+
+	mac_reg = er32(FEXTNVM);
+	if (!(mac_reg & E1000_FEXTNVM_SW_CONFIG_ICH8M))
+		goto out;
+
+	mac_reg = er32(PHY_CTRL);
+
+	ret_val = hw->phy.ops.read_reg_locked(hw, HV_OEM_BITS, &oem_reg);
+	if (ret_val)
+		goto out;
+
+	oem_reg &= ~(HV_OEM_BITS_GBE_DIS | HV_OEM_BITS_LPLU);
+
+	if (d0_state) {
+		if (mac_reg & E1000_PHY_CTRL_GBE_DISABLE)
+			oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+		if (mac_reg & E1000_PHY_CTRL_D0A_LPLU)
+			oem_reg |= HV_OEM_BITS_LPLU;
+
+		/* Set Restart auto-neg to activate the bits */
+		if (!e1000_check_reset_block(hw))
+			oem_reg |= HV_OEM_BITS_RESTART_AN;
+	} else {
+		if (mac_reg & (E1000_PHY_CTRL_GBE_DISABLE |
+			       E1000_PHY_CTRL_NOND0A_GBE_DISABLE))
+			oem_reg |= HV_OEM_BITS_GBE_DIS;
+
+		if (mac_reg & (E1000_PHY_CTRL_D0A_LPLU |
+			       E1000_PHY_CTRL_NOND0A_LPLU))
+			oem_reg |= HV_OEM_BITS_LPLU;
+	}
+
+	ret_val = hw->phy.ops.write_reg_locked(hw, HV_OEM_BITS, oem_reg);
+
+out:
+	hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+
+/**
+ *  e1000_set_mdio_slow_mode_hv - Set slow MDIO access mode
+ *  @hw:   pointer to the HW structure
+ **/
+static s32 e1000_set_mdio_slow_mode_hv(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, HV_KMRN_MODE_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data |= HV_KMRN_MDIO_SLOW;
+
+	ret_val = e1e_wphy(hw, HV_KMRN_MODE_CTRL, data);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_hv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ *  done after every PHY reset.
+ **/
+static s32 e1000_hv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 phy_data;
+
+	if (hw->mac.type != e1000_pchlan)
+		return ret_val;
+
+	/* Set MDIO slow mode before any other MDIO access */
+	if (hw->phy.type == e1000_phy_82577) {
+		ret_val = e1000_set_mdio_slow_mode_hv(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	if (((hw->phy.type == e1000_phy_82577) &&
+	     ((hw->phy.revision == 1) || (hw->phy.revision == 2))) ||
+	    ((hw->phy.type == e1000_phy_82578) && (hw->phy.revision == 1))) {
+		/* Disable generation of early preamble */
+		ret_val = e1e_wphy(hw, PHY_REG(769, 25), 0x4431);
+		if (ret_val)
+			return ret_val;
+
+		/* Preamble tuning for SSC */
+		ret_val = e1e_wphy(hw, HV_KMRN_FIFO_CTRLSTA, 0xA204);
+		if (ret_val)
+			return ret_val;
+	}
+
+	if (hw->phy.type == e1000_phy_82578) {
+		/*
+		 * Return registers to default by doing a soft reset then
+		 * writing 0x3140 to the control register.
+		 */
+		if (hw->phy.revision < 2) {
+			e1000e_phy_sw_reset(hw);
+			ret_val = e1e_wphy(hw, PHY_CONTROL, 0x3140);
+		}
+	}
+
+	/* Select page 0 */
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	hw->phy.addr = 1;
+	ret_val = e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, 0);
+	hw->phy.ops.release(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Configure the K1 Si workaround during phy reset assuming there is
+	 * link so that it disables K1 if link is in 1Gbps.
+	 */
+	ret_val = e1000_k1_gig_workaround_hv(hw, true);
+	if (ret_val)
+		goto out;
+
+	/* Workaround for link disconnects on a busy hub in half duplex */
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+	ret_val = hw->phy.ops.read_reg_locked(hw, BM_PORT_GEN_CFG, &phy_data);
+	if (ret_val)
+		goto release;
+	ret_val = hw->phy.ops.write_reg_locked(hw, BM_PORT_GEN_CFG,
+					       phy_data & 0x00FF);
+release:
+	hw->phy.ops.release(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copy_rx_addrs_to_phy_ich8lan - Copy Rx addresses from MAC to PHY
+ *  @hw:   pointer to the HW structure
+ **/
+void e1000_copy_rx_addrs_to_phy_ich8lan(struct e1000_hw *hw)
+{
+	u32 mac_reg;
+	u16 i, phy_reg = 0;
+	s32 ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return;
+	ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+	if (ret_val)
+		goto release;
+
+	/* Copy both RAL/H (rar_entry_count) and SHRAL/H (+4) to PHY */
+	for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+		mac_reg = er32(RAL(i));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_L(i),
+					   (u16)(mac_reg & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_M(i),
+					   (u16)((mac_reg >> 16) & 0xFFFF));
+
+		mac_reg = er32(RAH(i));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_H(i),
+					   (u16)(mac_reg & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_RAR_CTRL(i),
+					   (u16)((mac_reg & E1000_RAH_AV)
+						 >> 16));
+	}
+
+	e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+release:
+	hw->phy.ops.release(hw);
+}
+
+/**
+ *  e1000_lv_jumbo_workaround_ich8lan - required for jumbo frame operation
+ *  with 82579 PHY
+ *  @hw: pointer to the HW structure
+ *  @enable: flag to enable/disable workaround when enabling/disabling jumbos
+ **/
+s32 e1000_lv_jumbo_workaround_ich8lan(struct e1000_hw *hw, bool enable)
+{
+	s32 ret_val = 0;
+	u16 phy_reg, data;
+	u32 mac_reg;
+	u16 i;
+
+	if (hw->mac.type < e1000_pch2lan)
+		goto out;
+
+	/* disable Rx path while enabling/disabling workaround */
+	e1e_rphy(hw, PHY_REG(769, 20), &phy_reg);
+	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg | (1 << 14));
+	if (ret_val)
+		goto out;
+
+	if (enable) {
+		/*
+		 * Write Rx addresses (rar_entry_count for RAL/H, +4 for
+		 * SHRAL/H) and initial CRC values to the MAC
+		 */
+		for (i = 0; i < (hw->mac.rar_entry_count + 4); i++) {
+			u8 mac_addr[ETH_ALEN] = {0};
+			u32 addr_high, addr_low;
+
+			addr_high = er32(RAH(i));
+			if (!(addr_high & E1000_RAH_AV))
+				continue;
+			addr_low = er32(RAL(i));
+			mac_addr[0] = (addr_low & 0xFF);
+			mac_addr[1] = ((addr_low >> 8) & 0xFF);
+			mac_addr[2] = ((addr_low >> 16) & 0xFF);
+			mac_addr[3] = ((addr_low >> 24) & 0xFF);
+			mac_addr[4] = (addr_high & 0xFF);
+			mac_addr[5] = ((addr_high >> 8) & 0xFF);
+
+			ew32(PCH_RAICC(i), ~ether_crc_le(ETH_ALEN, mac_addr));
+		}
+
+		/* Write Rx addresses to the PHY */
+		e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+		/* Enable jumbo frame workaround in the MAC */
+		mac_reg = er32(FFLT_DBG);
+		mac_reg &= ~(1 << 14);
+		mac_reg |= (7 << 15);
+		ew32(FFLT_DBG, mac_reg);
+
+		mac_reg = er32(RCTL);
+		mac_reg |= E1000_RCTL_SECRC;
+		ew32(RCTL, mac_reg);
+
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						&data);
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						data | (1 << 0));
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						&data);
+		if (ret_val)
+			goto out;
+		data &= ~(0xF << 8);
+		data |= (0xB << 8);
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						data);
+		if (ret_val)
+			goto out;
+
+		/* Enable jumbo frame workaround in the PHY */
+		e1e_rphy(hw, PHY_REG(769, 23), &data);
+		data &= ~(0x7F << 5);
+		data |= (0x37 << 5);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(769, 16), &data);
+		data &= ~(1 << 13);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(776, 20), &data);
+		data &= ~(0x3FF << 2);
+		data |= (0x1A << 2);
+		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+		if (ret_val)
+			goto out;
+		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0xF100);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, HV_PM_CTRL, &data);
+		ret_val = e1e_wphy(hw, HV_PM_CTRL, data | (1 << 10));
+		if (ret_val)
+			goto out;
+	} else {
+		/* Write MAC register values back to h/w defaults */
+		mac_reg = er32(FFLT_DBG);
+		mac_reg &= ~(0xF << 14);
+		ew32(FFLT_DBG, mac_reg);
+
+		mac_reg = er32(RCTL);
+		mac_reg &= ~E1000_RCTL_SECRC;
+		ew32(RCTL, mac_reg);
+
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						&data);
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_CTRL_OFFSET,
+						data & ~(1 << 0));
+		if (ret_val)
+			goto out;
+		ret_val = e1000e_read_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						&data);
+		if (ret_val)
+			goto out;
+		data &= ~(0xF << 8);
+		data |= (0xB << 8);
+		ret_val = e1000e_write_kmrn_reg(hw,
+						E1000_KMRNCTRLSTA_HD_CTRL,
+						data);
+		if (ret_val)
+			goto out;
+
+		/* Write PHY register values back to h/w defaults */
+		e1e_rphy(hw, PHY_REG(769, 23), &data);
+		data &= ~(0x7F << 5);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 23), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(769, 16), &data);
+		data |= (1 << 13);
+		ret_val = e1e_wphy(hw, PHY_REG(769, 16), data);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, PHY_REG(776, 20), &data);
+		data &= ~(0x3FF << 2);
+		data |= (0x8 << 2);
+		ret_val = e1e_wphy(hw, PHY_REG(776, 20), data);
+		if (ret_val)
+			goto out;
+		ret_val = e1e_wphy(hw, PHY_REG(776, 23), 0x7E00);
+		if (ret_val)
+			goto out;
+		e1e_rphy(hw, HV_PM_CTRL, &data);
+		ret_val = e1e_wphy(hw, HV_PM_CTRL, data & ~(1 << 10));
+		if (ret_val)
+			goto out;
+	}
+
+	/* re-enable Rx path after enabling/disabling workaround */
+	ret_val = e1e_wphy(hw, PHY_REG(769, 20), phy_reg & ~(1 << 14));
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_lv_phy_workarounds_ich8lan - A series of Phy workarounds to be
+ *  done after every PHY reset.
+ **/
+static s32 e1000_lv_phy_workarounds_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	if (hw->mac.type < e1000_pch2lan)
+		goto out;
+
+	/* Set MDIO slow mode before any other MDIO access */
+	ret_val = e1000_set_mdio_slow_mode_hv(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_k1_gig_workaround_lv - K1 Si workaround
+ *  @hw:   pointer to the HW structure
+ *
+ *  Workaround to set the K1 beacon duration for 82579 parts
+ **/
+static s32 e1000_k1_workaround_lv(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 status_reg = 0;
+	u32 mac_reg;
+	u16 phy_reg;
+
+	if (hw->mac.type != e1000_pch2lan)
+		goto out;
+
+	/* Set K1 beacon duration based on 1Gbps speed or otherwise */
+	ret_val = e1e_rphy(hw, HV_M_STATUS, &status_reg);
+	if (ret_val)
+		goto out;
+
+	if ((status_reg & (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE))
+	    == (HV_M_STATUS_LINK_UP | HV_M_STATUS_AUTONEG_COMPLETE)) {
+		mac_reg = er32(FEXTNVM4);
+		mac_reg &= ~E1000_FEXTNVM4_BEACON_DURATION_MASK;
+
+		ret_val = e1e_rphy(hw, I82579_LPI_CTRL, &phy_reg);
+		if (ret_val)
+			goto out;
+
+		if (status_reg & HV_M_STATUS_SPEED_1000) {
+			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_8USEC;
+			phy_reg &= ~I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+		} else {
+			mac_reg |= E1000_FEXTNVM4_BEACON_DURATION_16USEC;
+			phy_reg |= I82579_LPI_CTRL_FORCE_PLL_LOCK_COUNT;
+		}
+		ew32(FEXTNVM4, mac_reg);
+		ret_val = e1e_wphy(hw, I82579_LPI_CTRL, phy_reg);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_gate_hw_phy_config_ich8lan - disable PHY config via hardware
+ *  @hw:   pointer to the HW structure
+ *  @gate: boolean set to true to gate, false to ungate
+ *
+ *  Gate/ungate the automatic PHY configuration via hardware; perform
+ *  the configuration via software instead.
+ **/
+static void e1000_gate_hw_phy_config_ich8lan(struct e1000_hw *hw, bool gate)
+{
+	u32 extcnf_ctrl;
+
+	if (hw->mac.type != e1000_pch2lan)
+		return;
+
+	extcnf_ctrl = er32(EXTCNF_CTRL);
+
+	if (gate)
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+	else
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_GATE_PHY_CFG;
+
+	ew32(EXTCNF_CTRL, extcnf_ctrl);
+	return;
+}
+
+/**
+ *  e1000_lan_init_done_ich8lan - Check for PHY config completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check the appropriate indication the MAC has finished configuring the
+ *  PHY after a software reset.
+ **/
+static void e1000_lan_init_done_ich8lan(struct e1000_hw *hw)
+{
+	u32 data, loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+
+	/* Wait for basic configuration completes before proceeding */
+	do {
+		data = er32(STATUS);
+		data &= E1000_STATUS_LAN_INIT_DONE;
+		udelay(100);
+	} while ((!data) && --loop);
+
+	/*
+	 * If basic configuration is incomplete before the above loop
+	 * count reaches 0, loading the configuration from NVM will
+	 * leave the PHY in a bad state possibly resulting in no link.
+	 */
+	if (loop == 0)
+		e_dbg("LAN_INIT_DONE not set, increase timeout\n");
+
+	/* Clear the Init Done bit for the next init event */
+	data = er32(STATUS);
+	data &= ~E1000_STATUS_LAN_INIT_DONE;
+	ew32(STATUS, data);
+}
+
+/**
+ *  e1000_post_phy_reset_ich8lan - Perform steps required after a PHY reset
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_post_phy_reset_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 reg;
+
+	if (e1000_check_reset_block(hw))
+		goto out;
+
+	/* Allow time for h/w to get to quiescent state after reset */
+	usleep_range(10000, 20000);
+
+	/* Perform any necessary post-reset workarounds */
+	switch (hw->mac.type) {
+	case e1000_pchlan:
+		ret_val = e1000_hv_phy_workarounds_ich8lan(hw);
+		if (ret_val)
+			goto out;
+		break;
+	case e1000_pch2lan:
+		ret_val = e1000_lv_phy_workarounds_ich8lan(hw);
+		if (ret_val)
+			goto out;
+		break;
+	default:
+		break;
+	}
+
+	/* Clear the host wakeup bit after lcd reset */
+	if (hw->mac.type >= e1000_pchlan) {
+		e1e_rphy(hw, BM_PORT_GEN_CFG, &reg);
+		reg &= ~BM_WUC_HOST_WU_BIT;
+		e1e_wphy(hw, BM_PORT_GEN_CFG, reg);
+	}
+
+	/* Configure the LCD with the extended configuration region in NVM */
+	ret_val = e1000_sw_lcd_config_ich8lan(hw);
+	if (ret_val)
+		goto out;
+
+	/* Configure the LCD with the OEM bits in NVM */
+	ret_val = e1000_oem_bits_config_ich8lan(hw, true);
+
+	if (hw->mac.type == e1000_pch2lan) {
+		/* Ungate automatic PHY configuration on non-managed 82579 */
+		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+			usleep_range(10000, 20000);
+			e1000_gate_hw_phy_config_ich8lan(hw, false);
+		}
+
+		/* Set EEE LPI Update Timer to 200usec */
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_ADDR,
+						       I82579_LPI_UPDATE_TIMER);
+		if (ret_val)
+			goto release;
+		ret_val = hw->phy.ops.write_reg_locked(hw, I82579_EMI_DATA,
+						       0x1387);
+release:
+		hw->phy.ops.release(hw);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the PHY
+ *  This is a function pointer entry point called by drivers
+ *  or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/* Gate automatic PHY configuration by hardware on non-managed 82579 */
+	if ((hw->mac.type == e1000_pch2lan) &&
+	    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+		e1000_gate_hw_phy_config_ich8lan(hw, true);
+
+	ret_val = e1000e_phy_hw_reset_generic(hw);
+	if (ret_val)
+		return ret_val;
+
+	return e1000_post_phy_reset_ich8lan(hw);
+}
+
+/**
+ *  e1000_set_lplu_state_pchlan - Set Low Power Link Up state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU state according to the active flag.  For PCH, if OEM write
+ *  bit are disabled in the NVM, writing the LPLU bits in the MAC will not set
+ *  the phy speed. This function will manually set the LPLU bit and restart
+ *  auto-neg as hw would do. D3 and D0 LPLU will call the same function
+ *  since it configures the same bit.
+ **/
+static s32 e1000_set_lplu_state_pchlan(struct e1000_hw *hw, bool active)
+{
+	s32 ret_val = 0;
+	u16 oem_reg;
+
+	ret_val = e1e_rphy(hw, HV_OEM_BITS, &oem_reg);
+	if (ret_val)
+		goto out;
+
+	if (active)
+		oem_reg |= HV_OEM_BITS_LPLU;
+	else
+		oem_reg &= ~HV_OEM_BITS_LPLU;
+
+	oem_reg |= HV_OEM_BITS_RESTART_AN;
+	ret_val = e1e_wphy(hw, HV_OEM_BITS, oem_reg);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 phy_ctrl;
+	s32 ret_val = 0;
+	u16 data;
+
+	if (phy->type == e1000_phy_ife)
+		return ret_val;
+
+	phy_ctrl = er32(PHY_CTRL);
+
+	if (active) {
+		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+		ew32(PHY_CTRL, phy_ctrl);
+
+		if (phy->type != e1000_phy_igp_3)
+			return 0;
+
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
+		if (hw->mac.type == e1000_ich8lan)
+			e1000e_gig_downshift_workaround_ich8lan(hw);
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+		if (ret_val)
+			return ret_val;
+	} else {
+		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+		ew32(PHY_CTRL, phy_ctrl);
+
+		if (phy->type != e1000_phy_igp_3)
+			return 0;
+
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D3 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 phy_ctrl;
+	s32 ret_val;
+	u16 data;
+
+	phy_ctrl = er32(PHY_CTRL);
+
+	if (!active) {
+		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+		ew32(PHY_CTRL, phy_ctrl);
+
+		if (phy->type != e1000_phy_igp_3)
+			return 0;
+
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+		ew32(PHY_CTRL, phy_ctrl);
+
+		if (phy->type != e1000_phy_igp_3)
+			return 0;
+
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
+		if (hw->mac.type == e1000_ich8lan)
+			e1000e_gig_downshift_workaround_ich8lan(hw);
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+		if (ret_val)
+			return ret_val;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
+ *  @hw: pointer to the HW structure
+ *  @bank:  pointer to the variable that returns the active bank
+ *
+ *  Reads signature byte from the NVM using the flash access registers.
+ *  Word 0x13 bits 15:14 = 10b indicate a valid signature for that bank.
+ **/
+static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+{
+	u32 eecd;
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 bank1_offset = nvm->flash_bank_size * sizeof(u16);
+	u32 act_offset = E1000_ICH_NVM_SIG_WORD * 2 + 1;
+	u8 sig_byte = 0;
+	s32 ret_val = 0;
+
+	switch (hw->mac.type) {
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		eecd = er32(EECD);
+		if ((eecd & E1000_EECD_SEC1VAL_VALID_MASK) ==
+		    E1000_EECD_SEC1VAL_VALID_MASK) {
+			if (eecd & E1000_EECD_SEC1VAL)
+				*bank = 1;
+			else
+				*bank = 0;
+
+			return 0;
+		}
+		e_dbg("Unable to determine valid NVM bank via EEC - "
+		       "reading flash signature\n");
+		fallthrough;
+	default:
+		/* set bank to 0 in case flash read fails */
+		*bank = 0;
+
+		/* Check bank 0 */
+		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset,
+		                                        &sig_byte);
+		if (ret_val)
+			return ret_val;
+		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+		    E1000_ICH_NVM_SIG_VALUE) {
+			*bank = 0;
+			return 0;
+		}
+
+		/* Check bank 1 */
+		ret_val = e1000_read_flash_byte_ich8lan(hw, act_offset +
+		                                        bank1_offset,
+		                                        &sig_byte);
+		if (ret_val)
+			return ret_val;
+		if ((sig_byte & E1000_ICH_NVM_VALID_SIG_MASK) ==
+		    E1000_ICH_NVM_SIG_VALUE) {
+			*bank = 1;
+			return 0;
+		}
+
+		e_dbg("ERROR: No valid NVM bank present\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the word(s) to read.
+ *  @words: Size of data to read in words
+ *  @data: Pointer to the word(s) to read at offset.
+ *
+ *  Reads a word(s) from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 act_offset;
+	s32 ret_val = 0;
+	u32 bank = 0;
+	u16 i, word;
+
+	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	nvm->ops.acquire(hw);
+
+	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val) {
+		e_dbg("Could not detect valid bank, assuming bank 0\n");
+		bank = 0;
+	}
+
+	act_offset = (bank) ? nvm->flash_bank_size : 0;
+	act_offset += offset;
+
+	ret_val = 0;
+	for (i = 0; i < words; i++) {
+		if (dev_spec->shadow_ram[offset+i].modified) {
+			data[i] = dev_spec->shadow_ram[offset+i].value;
+		} else {
+			ret_val = e1000_read_flash_word_ich8lan(hw,
+								act_offset + i,
+								&word);
+			if (ret_val)
+				break;
+			data[i] = word;
+		}
+	}
+
+	nvm->ops.release(hw);
+
+out:
+	if (ret_val)
+		e_dbg("NVM read error: %d\n", ret_val);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_flash_cycle_init_ich8lan - Initialize flash
+ *  @hw: pointer to the HW structure
+ *
+ *  This function does initial flash setup so that a new read/write/erase cycle
+ *  can be started.
+ **/
+static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
+{
+	union ich8_hws_flash_status hsfsts;
+	s32 ret_val = -E1000_ERR_NVM;
+
+	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+	/* Check if the flash descriptor is valid */
+	if (hsfsts.hsf_status.fldesvalid == 0) {
+		e_dbg("Flash descriptor invalid.  "
+			 "SW Sequencing must be used.\n");
+		return -E1000_ERR_NVM;
+	}
+
+	/* Clear FCERR and DAEL in hw status by writing 1 */
+	hsfsts.hsf_status.flcerr = 1;
+	hsfsts.hsf_status.dael = 1;
+
+	ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+
+	/*
+	 * Either we should have a hardware SPI cycle in progress
+	 * bit to check against, in order to start a new cycle or
+	 * FDONE bit should be changed in the hardware so that it
+	 * is 1 after hardware reset, which can then be used as an
+	 * indication whether a cycle is in progress or has been
+	 * completed.
+	 */
+
+	if (hsfsts.hsf_status.flcinprog == 0) {
+		/*
+		 * There is no cycle running at present,
+		 * so we can start a cycle.
+		 * Begin by setting Flash Cycle Done.
+		 */
+		hsfsts.hsf_status.flcdone = 1;
+		ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+		ret_val = 0;
+	} else {
+		s32 i = 0;
+
+		/*
+		 * Otherwise poll for sometime so the current
+		 * cycle has a chance to end before giving up.
+		 */
+		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
+			hsfsts.regval = __er16flash(hw, ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcinprog == 0) {
+				ret_val = 0;
+				break;
+			}
+			udelay(1);
+		}
+		if (ret_val == 0) {
+			/*
+			 * Successful in waiting for previous cycle to timeout,
+			 * now set the Flash Cycle Done.
+			 */
+			hsfsts.hsf_status.flcdone = 1;
+			ew16flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+		} else {
+			e_dbg("Flash controller busy, cannot get access\n");
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
+ *  @hw: pointer to the HW structure
+ *  @timeout: maximum time to wait for completion
+ *
+ *  This function starts a flash cycle and waits for its completion.
+ **/
+static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
+{
+	union ich8_hws_flash_ctrl hsflctl;
+	union ich8_hws_flash_status hsfsts;
+	s32 ret_val = -E1000_ERR_NVM;
+	u32 i = 0;
+
+	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+	hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+	hsflctl.hsf_ctrl.flcgo = 1;
+	ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+	/* wait till FDONE bit is set to 1 */
+	do {
+		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+		if (hsfsts.hsf_status.flcdone == 1)
+			break;
+		udelay(1);
+	} while (i++ < timeout);
+
+	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
+		return 0;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_flash_word_ich8lan - Read word from flash
+ *  @hw: pointer to the HW structure
+ *  @offset: offset to data location
+ *  @data: pointer to the location for storing the data
+ *
+ *  Reads the flash word at offset into data.  Offset is converted
+ *  to bytes before read.
+ **/
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u16 *data)
+{
+	/* Must convert offset into bytes. */
+	offset <<= 1;
+
+	return e1000_read_flash_data_ich8lan(hw, offset, 2, data);
+}
+
+/**
+ *  e1000_read_flash_byte_ich8lan - Read byte from flash
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset of the byte to read.
+ *  @data: Pointer to a byte to store the value read.
+ *
+ *  Reads a single byte from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 *data)
+{
+	s32 ret_val;
+	u16 word = 0;
+
+	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 1, &word);
+	if (ret_val)
+		return ret_val;
+
+	*data = (u8)word;
+
+	return 0;
+}
+
+/**
+ *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the byte or word to read.
+ *  @size: Size of data to read, 1=byte 2=word
+ *  @data: Pointer to the word to store the value read.
+ *
+ *  Reads a byte or word from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+					 u8 size, u16 *data)
+{
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	u32 flash_data = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+	u8 count = 0;
+
+	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+		return -E1000_ERR_NVM;
+
+	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+			    hw->nvm.flash_base_addr;
+
+	do {
+		udelay(1);
+		/* Steps */
+		ret_val = e1000_flash_cycle_init_ich8lan(hw);
+		if (ret_val != 0)
+			break;
+
+		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+		hsflctl.hsf_ctrl.fldbcount = size - 1;
+		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+		ret_val = e1000_flash_cycle_ich8lan(hw,
+						ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+		/*
+		 * Check if FCERR is set to 1, if set to 1, clear it
+		 * and try the whole sequence a few more times, else
+		 * read in (shift in) the Flash Data0, the order is
+		 * least significant byte first msb to lsb
+		 */
+		if (ret_val == 0) {
+			flash_data = er32flash(ICH_FLASH_FDATA0);
+			if (size == 1)
+				*data = (u8)(flash_data & 0x000000FF);
+			else if (size == 2)
+				*data = (u16)(flash_data & 0x0000FFFF);
+			break;
+		} else {
+			/*
+			 * If we've gotten here, then things are probably
+			 * completely hosed, but if the error condition is
+			 * detected, it won't hurt to give it another try...
+			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+			 */
+			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcerr == 1) {
+				/* Repeat for some time before giving up. */
+				continue;
+			} else if (hsfsts.hsf_status.flcdone == 0) {
+				e_dbg("Timeout error - flash cycle "
+					 "did not complete.\n");
+				break;
+			}
+		}
+	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the word(s) to write.
+ *  @words: Size of data to write in words
+ *  @data: Pointer to the word(s) to write at offset.
+ *
+ *  Writes a byte or word to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u16 i;
+
+	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		return -E1000_ERR_NVM;
+	}
+
+	nvm->ops.acquire(hw);
+
+	for (i = 0; i < words; i++) {
+		dev_spec->shadow_ram[offset+i].modified = true;
+		dev_spec->shadow_ram[offset+i].value = data[i];
+	}
+
+	nvm->ops.release(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  The NVM checksum is updated by calling the generic update_nvm_checksum,
+ *  which writes the checksum to the shadow ram.  The changes in the shadow
+ *  ram are then committed to the EEPROM by processing each bank at a time
+ *  checking for the modified bit and writing only the pending changes.
+ *  After a successful commit, the shadow ram is cleared and is ready for
+ *  future writes.
+ **/
+static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1000e_update_nvm_checksum_generic(hw);
+	if (ret_val)
+		goto out;
+
+	if (nvm->type != e1000_nvm_flash_sw)
+		goto out;
+
+	nvm->ops.acquire(hw);
+
+	/*
+	 * We're writing to the opposite bank so if we're on bank 1,
+	 * write to bank 0 etc.  We also need to erase the segment that
+	 * is going to be written
+	 */
+	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val) {
+		e_dbg("Could not detect valid bank, assuming bank 0\n");
+		bank = 0;
+	}
+
+	if (bank == 0) {
+		new_bank_offset = nvm->flash_bank_size;
+		old_bank_offset = 0;
+		ret_val = e1000_erase_flash_bank_ich8lan(hw, 1);
+		if (ret_val)
+			goto release;
+	} else {
+		old_bank_offset = nvm->flash_bank_size;
+		new_bank_offset = 0;
+		ret_val = e1000_erase_flash_bank_ich8lan(hw, 0);
+		if (ret_val)
+			goto release;
+	}
+
+	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+		/*
+		 * Determine whether to write the value stored
+		 * in the other NVM bank or a modified value stored
+		 * in the shadow RAM
+		 */
+		if (dev_spec->shadow_ram[i].modified) {
+			data = dev_spec->shadow_ram[i].value;
+		} else {
+			ret_val = e1000_read_flash_word_ich8lan(hw, i +
+			                                        old_bank_offset,
+			                                        &data);
+			if (ret_val)
+				break;
+		}
+
+		/*
+		 * If the word is 0x13, then make sure the signature bits
+		 * (15:14) are 11b until the commit has completed.
+		 * This will allow us to write 10b which indicates the
+		 * signature is valid.  We want to do this after the write
+		 * has completed so that we don't mark the segment valid
+		 * while the write is still in progress
+		 */
+		if (i == E1000_ICH_NVM_SIG_WORD)
+			data |= E1000_ICH_NVM_SIG_MASK;
+
+		/* Convert offset to bytes. */
+		act_offset = (i + new_bank_offset) << 1;
+
+		udelay(100);
+		/* Write the bytes to the new bank. */
+		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+							       act_offset,
+							       (u8)data);
+		if (ret_val)
+			break;
+
+		udelay(100);
+		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+							  act_offset + 1,
+							  (u8)(data >> 8));
+		if (ret_val)
+			break;
+	}
+
+	/*
+	 * Don't bother writing the segment valid bits if sector
+	 * programming failed.
+	 */
+	if (ret_val) {
+		/* Possibly read-only, see e1000e_write_protect_nvm_ich8lan() */
+		e_dbg("Flash commit failed.\n");
+		goto release;
+	}
+
+	/*
+	 * Finally validate the new segment by setting bit 15:14
+	 * to 10b in word 0x13 , this can be done without an
+	 * erase as well since these bits are 11 to start with
+	 * and we need to change bit 14 to 0b
+	 */
+	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+	ret_val = e1000_read_flash_word_ich8lan(hw, act_offset, &data);
+	if (ret_val)
+		goto release;
+
+	data &= 0xBFFF;
+	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+						       act_offset * 2 + 1,
+						       (u8)(data >> 8));
+	if (ret_val)
+		goto release;
+
+	/*
+	 * And invalidate the previously valid segment by setting
+	 * its signature word (0x13) high_byte to 0b. This can be
+	 * done without an erase because flash erase sets all bits
+	 * to 1's. We can write 1's to 0's without an erase
+	 */
+	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+	if (ret_val)
+		goto release;
+
+	/* Great!  Everything worked, we can now clear the cached entries. */
+	for (i = 0; i < E1000_ICH8_SHADOW_RAM_WORDS; i++) {
+		dev_spec->shadow_ram[i].modified = false;
+		dev_spec->shadow_ram[i].value = 0xFFFF;
+	}
+
+release:
+	nvm->ops.release(hw);
+
+	/*
+	 * Reload the EEPROM, or else modifications will not appear
+	 * until after the next adapter reset.
+	 */
+	if (!ret_val) {
+		e1000e_reload_nvm(hw);
+		usleep_range(10000, 20000);
+	}
+
+out:
+	if (ret_val)
+		e_dbg("NVM update error: %d\n", ret_val);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
+ *  If the bit is 0, that the EEPROM had been modified, but the checksum was not
+ *  calculated, in which case we need to calculate the checksum and set bit 6.
+ **/
+static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 data;
+
+	/*
+	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
+	 * needs to be fixed.  This bit is an indication that the NVM
+	 * was prepared by OEM software and did not calculate the
+	 * checksum...a likely scenario.
+	 */
+	ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
+	if (ret_val)
+		return ret_val;
+
+	if ((data & 0x40) == 0) {
+		data |= 0x40;
+		ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
+		if (ret_val)
+			return ret_val;
+		ret_val = e1000e_update_nvm_checksum(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	return e1000e_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ *  e1000e_write_protect_nvm_ich8lan - Make the NVM read-only
+ *  @hw: pointer to the HW structure
+ *
+ *  To prevent malicious write/erase of the NVM, set it to be read-only
+ *  so that the hardware ignores all write/erase cycles of the NVM via
+ *  the flash control registers.  The shadow-ram copy of the NVM will
+ *  still be updated, however any updates to this copy will not stick
+ *  across driver reloads.
+ **/
+void e1000e_write_protect_nvm_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	union ich8_flash_protected_range pr0;
+	union ich8_hws_flash_status hsfsts;
+	u32 gfpreg;
+
+	nvm->ops.acquire(hw);
+
+	gfpreg = er32flash(ICH_FLASH_GFPREG);
+
+	/* Write-protect GbE Sector of NVM */
+	pr0.regval = er32flash(ICH_FLASH_PR0);
+	pr0.range.base = gfpreg & FLASH_GFPREG_BASE_MASK;
+	pr0.range.limit = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK);
+	pr0.range.wpe = true;
+	ew32flash(ICH_FLASH_PR0, pr0.regval);
+
+	/*
+	 * Lock down a subset of GbE Flash Control Registers, e.g.
+	 * PR0 to prevent the write-protection from being lifted.
+	 * Once FLOCKDN is set, the registers protected by it cannot
+	 * be written until FLOCKDN is cleared by a hardware reset.
+	 */
+	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+	hsfsts.hsf_status.flockdn = true;
+	ew32flash(ICH_FLASH_HSFSTS, hsfsts.regval);
+
+	nvm->ops.release(hw);
+}
+
+/**
+ *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the byte/word to read.
+ *  @size: Size of data to read, 1=byte 2=word
+ *  @data: The byte(s) to write to the NVM.
+ *
+ *  Writes one/two bytes to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+					  u8 size, u16 data)
+{
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	u32 flash_data = 0;
+	s32 ret_val;
+	u8 count = 0;
+
+	if (size < 1 || size > 2 || data > size * 0xff ||
+	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
+		return -E1000_ERR_NVM;
+
+	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+			    hw->nvm.flash_base_addr;
+
+	do {
+		udelay(1);
+		/* Steps */
+		ret_val = e1000_flash_cycle_init_ich8lan(hw);
+		if (ret_val)
+			break;
+
+		hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+		hsflctl.hsf_ctrl.fldbcount = size -1;
+		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+		ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+		ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+		if (size == 1)
+			flash_data = (u32)data & 0x00FF;
+		else
+			flash_data = (u32)data;
+
+		ew32flash(ICH_FLASH_FDATA0, flash_data);
+
+		/*
+		 * check if FCERR is set to 1 , if set to 1, clear it
+		 * and try the whole sequence a few more times else done
+		 */
+		ret_val = e1000_flash_cycle_ich8lan(hw,
+					       ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+		if (!ret_val)
+			break;
+
+		/*
+		 * If we're here, then things are most likely
+		 * completely hosed, but if the error condition
+		 * is detected, it won't hurt to give it another
+		 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+		 */
+		hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+		if (hsfsts.hsf_status.flcerr == 1)
+			/* Repeat for some time before giving up. */
+			continue;
+		if (hsfsts.hsf_status.flcdone == 0) {
+			e_dbg("Timeout error - flash cycle "
+				 "did not complete.");
+			break;
+		}
+	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The index of the byte to read.
+ *  @data: The byte to write to the NVM.
+ *
+ *  Writes a single byte to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+					  u8 data)
+{
+	u16 word = (u16)data;
+
+	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
+}
+
+/**
+ *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset of the byte to write.
+ *  @byte: The byte to write to the NVM.
+ *
+ *  Writes a single byte to the NVM using the flash access registers.
+ *  Goes through a retry algorithm before giving up.
+ **/
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+						u32 offset, u8 byte)
+{
+	s32 ret_val;
+	u16 program_retries;
+
+	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+	if (!ret_val)
+		return ret_val;
+
+	for (program_retries = 0; program_retries < 100; program_retries++) {
+		e_dbg("Retrying Byte %2.2X at offset %u\n", byte, offset);
+		udelay(100);
+		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+		if (!ret_val)
+			break;
+	}
+	if (program_retries == 100)
+		return -E1000_ERR_NVM;
+
+	return 0;
+}
+
+/**
+ *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
+ *  @hw: pointer to the HW structure
+ *  @bank: 0 for first bank, 1 for second bank, etc.
+ *
+ *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
+ *  bank N is 4096 * N + flash_reg_addr.
+ **/
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	/* bank size is in 16bit words - adjust to bytes */
+	u32 flash_bank_size = nvm->flash_bank_size * 2;
+	s32 ret_val;
+	s32 count = 0;
+	s32 j, iteration, sector_size;
+
+	hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+
+	/*
+	 * Determine HW Sector size: Read BERASE bits of hw flash status
+	 * register
+	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
+	 *     consecutive sectors.  The start index for the nth Hw sector
+	 *     can be calculated as = bank * 4096 + n * 256
+	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+	 *     The start index for the nth Hw sector can be calculated
+	 *     as = bank * 4096
+	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
+	 *     (ich9 only, otherwise error condition)
+	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
+	 */
+	switch (hsfsts.hsf_status.berasesz) {
+	case 0:
+		/* Hw sector size 256 */
+		sector_size = ICH_FLASH_SEG_SIZE_256;
+		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
+		break;
+	case 1:
+		sector_size = ICH_FLASH_SEG_SIZE_4K;
+		iteration = 1;
+		break;
+	case 2:
+		sector_size = ICH_FLASH_SEG_SIZE_8K;
+		iteration = 1;
+		break;
+	case 3:
+		sector_size = ICH_FLASH_SEG_SIZE_64K;
+		iteration = 1;
+		break;
+	default:
+		return -E1000_ERR_NVM;
+	}
+
+	/* Start with the base address, then add the sector offset. */
+	flash_linear_addr = hw->nvm.flash_base_addr;
+	flash_linear_addr += (bank) ? flash_bank_size : 0;
+
+	for (j = 0; j < iteration ; j++) {
+		do {
+			/* Steps */
+			ret_val = e1000_flash_cycle_init_ich8lan(hw);
+			if (ret_val)
+				return ret_val;
+
+			/*
+			 * Write a value 11 (block Erase) in Flash
+			 * Cycle field in hw flash control
+			 */
+			hsflctl.regval = er16flash(ICH_FLASH_HSFCTL);
+			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+			ew16flash(ICH_FLASH_HSFCTL, hsflctl.regval);
+
+			/*
+			 * Write the last 24 bits of an index within the
+			 * block into Flash Linear address field in Flash
+			 * Address.
+			 */
+			flash_linear_addr += (j * sector_size);
+			ew32flash(ICH_FLASH_FADDR, flash_linear_addr);
+
+			ret_val = e1000_flash_cycle_ich8lan(hw,
+					       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+			if (ret_val == 0)
+				break;
+
+			/*
+			 * Check if FCERR is set to 1.  If 1,
+			 * clear it and try the whole sequence
+			 * a few more times else Done
+			 */
+			hsfsts.regval = er16flash(ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcerr == 1)
+				/* repeat for some time before giving up */
+				continue;
+			else if (hsfsts.hsf_status.flcdone == 0)
+				return ret_val;
+		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_valid_led_default_ich8lan - Set the default LED settings
+ *  @hw: pointer to the HW structure
+ *  @data: Pointer to the LED settings
+ *
+ *  Reads the LED default settings from the NVM to data.  If the NVM LED
+ *  settings is all 0's or F's, set the LED default to a valid LED default
+ *  setting.
+ **/
+static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		return ret_val;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 ||
+	    *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT_ICH8LAN;
+
+	return 0;
+}
+
+/**
+ *  e1000_id_led_init_pchlan - store LED configurations
+ *  @hw: pointer to the HW structure
+ *
+ *  PCH does not control LEDs via the LEDCTL register, rather it uses
+ *  the PHY LED configuration register.
+ *
+ *  PCH also does not have an "always on" or "always off" mode which
+ *  complicates the ID feature.  Instead of using the "on" mode to indicate
+ *  in ledctl_mode2 the LEDs to use for ID (see e1000e_id_led_init()),
+ *  use "link_up" mode.  The LEDs will still ID on request if there is no
+ *  link based on logic in e1000_led_[on|off]_pchlan().
+ **/
+static s32 e1000_id_led_init_pchlan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	const u32 ledctl_on = E1000_LEDCTL_MODE_LINK_UP;
+	const u32 ledctl_off = E1000_LEDCTL_MODE_LINK_UP | E1000_PHY_LED0_IVRT;
+	u16 data, i, temp, shift;
+
+	/* Get default ID LED modes */
+	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+	if (ret_val)
+		goto out;
+
+	mac->ledctl_default = er32(LEDCTL);
+	mac->ledctl_mode1 = mac->ledctl_default;
+	mac->ledctl_mode2 = mac->ledctl_default;
+
+	for (i = 0; i < 4; i++) {
+		temp = (data >> (i << 2)) & E1000_LEDCTL_LED0_MODE_MASK;
+		shift = (i * 5);
+		switch (temp) {
+		case ID_LED_ON1_DEF2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_ON1_OFF2:
+			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+			mac->ledctl_mode1 |= (ledctl_on << shift);
+			break;
+		case ID_LED_OFF1_DEF2:
+		case ID_LED_OFF1_ON2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode1 &= ~(E1000_PHY_LED0_MASK << shift);
+			mac->ledctl_mode1 |= (ledctl_off << shift);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		switch (temp) {
+		case ID_LED_DEF1_ON2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_OFF1_ON2:
+			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+			mac->ledctl_mode2 |= (ledctl_on << shift);
+			break;
+		case ID_LED_DEF1_OFF2:
+		case ID_LED_ON1_OFF2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode2 &= ~(E1000_PHY_LED0_MASK << shift);
+			mac->ledctl_mode2 |= (ledctl_off << shift);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
+ *  @hw: pointer to the HW structure
+ *
+ *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
+ *  register, so the the bus width is hard coded.
+ **/
+static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val;
+
+	ret_val = e1000e_get_bus_info_pcie(hw);
+
+	/*
+	 * ICH devices are "PCI Express"-ish.  They have
+	 * a configuration space, but do not contain
+	 * PCI Express Capability registers, so bus width
+	 * must be hardcoded.
+	 */
+	if (bus->width == e1000_bus_width_unknown)
+		bus->width = e1000_bus_width_pcie_x1;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_ich8lan - Reset the hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a full reset of the hardware which includes a reset of the PHY and
+ *  MAC.
+ **/
+static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u16 reg;
+	u32 ctrl, kab;
+	s32 ret_val;
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000e_disable_pcie_master(hw);
+	if (ret_val)
+		e_dbg("PCI-E Master disable polling has failed.\n");
+
+	e_dbg("Masking off all interrupts\n");
+	ew32(IMC, 0xffffffff);
+
+	/*
+	 * Disable the Transmit and Receive units.  Then delay to allow
+	 * any pending transactions to complete before we hit the MAC
+	 * with the global reset.
+	 */
+	ew32(RCTL, 0);
+	ew32(TCTL, E1000_TCTL_PSP);
+	e1e_flush();
+
+	usleep_range(10000, 20000);
+
+	/* Workaround for ICH8 bit corruption issue in FIFO memory */
+	if (hw->mac.type == e1000_ich8lan) {
+		/* Set Tx and Rx buffer allocation to 8k apiece. */
+		ew32(PBA, E1000_PBA_8K);
+		/* Set Packet Buffer Size to 16k. */
+		ew32(PBS, E1000_PBS_16K);
+	}
+
+	if (hw->mac.type == e1000_pchlan) {
+		/* Save the NVM K1 bit setting*/
+		ret_val = e1000_read_nvm(hw, E1000_NVM_K1_CONFIG, 1, &reg);
+		if (ret_val)
+			return ret_val;
+
+		if (reg & E1000_NVM_K1_ENABLE)
+			dev_spec->nvm_k1_enabled = true;
+		else
+			dev_spec->nvm_k1_enabled = false;
+	}
+
+	ctrl = er32(CTRL);
+
+	if (!e1000_check_reset_block(hw)) {
+		/*
+		 * Full-chip reset requires MAC and PHY reset at the same
+		 * time to make sure the interface between MAC and the
+		 * external PHY is reset.
+		 */
+		ctrl |= E1000_CTRL_PHY_RST;
+
+		/*
+		 * Gate automatic PHY configuration by hardware on
+		 * non-managed 82579
+		 */
+		if ((hw->mac.type == e1000_pch2lan) &&
+		    !(er32(FWSM) & E1000_ICH_FWSM_FW_VALID))
+			e1000_gate_hw_phy_config_ich8lan(hw, true);
+	}
+	ret_val = e1000_acquire_swflag_ich8lan(hw);
+	e_dbg("Issuing a global reset to ich8lan\n");
+	ew32(CTRL, (ctrl | E1000_CTRL_RST));
+	/* cannot issue a flush here because it hangs the hardware */
+	msleep(20);
+
+	if (!ret_val)
+		clear_bit(__E1000_ACCESS_SHARED_RESOURCE, &hw->adapter->state);
+
+	if (ctrl & E1000_CTRL_PHY_RST) {
+		ret_val = hw->phy.ops.get_cfg_done(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_post_phy_reset_ich8lan(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	/*
+	 * For PCH, this write will make sure that any noise
+	 * will be detected as a CRC error and be dropped rather than show up
+	 * as a bad packet to the DMA engine.
+	 */
+	if (hw->mac.type == e1000_pchlan)
+		ew32(CRC_OFFSET, 0x65656565);
+
+	ew32(IMC, 0xffffffff);
+	er32(ICR);
+
+	kab = er32(KABGTXD);
+	kab |= E1000_KABGTXD_BGSQLBIAS;
+	ew32(KABGTXD, kab);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_ich8lan - Initialize the hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  Prepares the hardware for transmit and receive by doing the following:
+ *   - initialize hardware bits
+ *   - initialize LED identification
+ *   - setup receive address registers
+ *   - setup flow control
+ *   - setup transmit descriptors
+ *   - clear statistics
+ **/
+static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl_ext, txdctl, snoop;
+	s32 ret_val;
+	u16 i;
+
+	e1000_initialize_hw_bits_ich8lan(hw);
+
+	/* Initialize identification LED */
+	ret_val = mac->ops.id_led_init(hw);
+	if (ret_val)
+		e_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+
+	/* Setup the receive address. */
+	e1000e_init_rx_addrs(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	e_dbg("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/*
+	 * The 82578 Rx buffer will stall if wakeup is enabled in host and
+	 * the ME.  Disable wakeup by clearing the host wakeup bit.
+	 * Reset the phy after disabling host wakeup to reset the Rx buffer.
+	 */
+	if (hw->phy.type == e1000_phy_82578) {
+		e1e_rphy(hw, BM_PORT_GEN_CFG, &i);
+		i &= ~BM_WUC_HOST_WU_BIT;
+		e1e_wphy(hw, BM_PORT_GEN_CFG, i);
+		ret_val = e1000_phy_hw_reset_ich8lan(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link_ich8lan(hw);
+
+	/* Set the transmit descriptor write-back policy for both queues */
+	txdctl = er32(TXDCTL(0));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+		 E1000_TXDCTL_FULL_TX_DESC_WB;
+	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+	ew32(TXDCTL(0), txdctl);
+	txdctl = er32(TXDCTL(1));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+		 E1000_TXDCTL_FULL_TX_DESC_WB;
+	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+		 E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+	ew32(TXDCTL(1), txdctl);
+
+	/*
+	 * ICH8 has opposite polarity of no_snoop bits.
+	 * By default, we should use snoop behavior.
+	 */
+	if (mac->type == e1000_ich8lan)
+		snoop = PCIE_ICH8_SNOOP_ALL;
+	else
+		snoop = (u32) ~(PCIE_NO_SNOOP_ALL);
+	e1000e_set_pcie_no_snoop(hw, snoop);
+
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+	ew32(CTRL_EXT, ctrl_ext);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_ich8lan(hw);
+
+	return 0;
+}
+/**
+ *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets/Clears required hardware bits necessary for correctly setting up the
+ *  hardware for transmit and receive.
+ **/
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	/* Extended Device Control */
+	reg = er32(CTRL_EXT);
+	reg |= (1 << 22);
+	/* Enable PHY low-power state when MAC is at D3 w/o WoL */
+	if (hw->mac.type >= e1000_pchlan)
+		reg |= E1000_CTRL_EXT_PHYPDEN;
+	ew32(CTRL_EXT, reg);
+
+	/* Transmit Descriptor Control 0 */
+	reg = er32(TXDCTL(0));
+	reg |= (1 << 22);
+	ew32(TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = er32(TXDCTL(1));
+	reg |= (1 << 22);
+	ew32(TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = er32(TARC(0));
+	if (hw->mac.type == e1000_ich8lan)
+		reg |= (1 << 28) | (1 << 29);
+	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+	ew32(TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = er32(TARC(1));
+	if (er32(TCTL) & E1000_TCTL_MULR)
+		reg &= ~(1 << 28);
+	else
+		reg |= (1 << 28);
+	reg |= (1 << 24) | (1 << 26) | (1 << 30);
+	ew32(TARC(1), reg);
+
+	/* Device Status */
+	if (hw->mac.type == e1000_ich8lan) {
+		reg = er32(STATUS);
+		reg &= ~(1 << 31);
+		ew32(STATUS, reg);
+	}
+
+	/*
+	 * work-around descriptor data corruption issue during nfs v2 udp
+	 * traffic, just disable the nfs filtering capability
+	 */
+	reg = er32(RFCTL);
+	reg |= (E1000_RFCTL_NFSW_DIS | E1000_RFCTL_NFSR_DIS);
+	ew32(RFCTL, reg);
+}
+
+/**
+ *  e1000_setup_link_ich8lan - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	if (e1000_check_reset_block(hw))
+		return 0;
+
+	/*
+	 * ICH parts do not have a word in the NVM to determine
+	 * the default flow control setting, so we explicitly
+	 * set it to full.
+	 */
+	if (hw->fc.requested_mode == e1000_fc_default) {
+		/* Workaround h/w hang when Tx flow control enabled */
+		if (hw->mac.type == e1000_pchlan)
+			hw->fc.requested_mode = e1000_fc_rx_pause;
+		else
+			hw->fc.requested_mode = e1000_fc_full;
+	}
+
+	/*
+	 * Save off the requested flow control mode for use later.  Depending
+	 * on the link partner's capabilities, we may or may not use this mode.
+	 */
+	hw->fc.current_mode = hw->fc.requested_mode;
+
+	e_dbg("After fix-ups FlowControl is now = %x\n",
+		hw->fc.current_mode);
+
+	/* Continue to configure the copper link. */
+	ret_val = e1000_setup_copper_link_ich8lan(hw);
+	if (ret_val)
+		return ret_val;
+
+	ew32(FCTTV, hw->fc.pause_time);
+	if ((hw->phy.type == e1000_phy_82578) ||
+	    (hw->phy.type == e1000_phy_82579) ||
+	    (hw->phy.type == e1000_phy_i217) ||
+	    (hw->phy.type == e1000_phy_82577)) {
+		ew32(FCRTV_PCH, hw->fc.refresh_time);
+
+		ret_val = e1e_wphy(hw, PHY_REG(BM_PORT_CTRL_PAGE, 27),
+				   hw->fc.pause_time);
+		if (ret_val)
+			return ret_val;
+	}
+
+	return e1000e_set_fc_watermarks(hw);
+}
+
+/**
+ *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the kumeran interface to the PHY to wait the appropriate time
+ *  when polling the PHY, then call the generic setup_copper_link to finish
+ *  configuring the copper link.
+ **/
+static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	u16 reg_data;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ew32(CTRL, ctrl);
+
+	/*
+	 * Set the mac to wait the maximum time between each iteration
+	 * and increase the max iterations when polling the phy;
+	 * this fixes erroneous timeouts at 10Mbps.
+	 */
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_TIMEOUTS, 0xFFFF);
+	if (ret_val)
+		return ret_val;
+	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+	                               &reg_data);
+	if (ret_val)
+		return ret_val;
+	reg_data |= 0x3F;
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_INBAND_PARAM,
+	                                reg_data);
+	if (ret_val)
+		return ret_val;
+
+	switch (hw->phy.type) {
+	case e1000_phy_igp_3:
+		ret_val = e1000e_copper_link_setup_igp(hw);
+		if (ret_val)
+			return ret_val;
+		break;
+	case e1000_phy_bm:
+	case e1000_phy_82578:
+		ret_val = e1000e_copper_link_setup_m88(hw);
+		if (ret_val)
+			return ret_val;
+		break;
+	case e1000_phy_82577:
+	case e1000_phy_82579:
+	case e1000_phy_i217:
+		ret_val = e1000_copper_link_setup_82577(hw);
+		if (ret_val)
+			return ret_val;
+		break;
+	case e1000_phy_ife:
+		ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
+		if (ret_val)
+			return ret_val;
+
+		reg_data &= ~IFE_PMC_AUTO_MDIX;
+
+		switch (hw->phy.mdix) {
+		case 1:
+			reg_data &= ~IFE_PMC_FORCE_MDIX;
+			break;
+		case 2:
+			reg_data |= IFE_PMC_FORCE_MDIX;
+			break;
+		case 0:
+		default:
+			reg_data |= IFE_PMC_AUTO_MDIX;
+			break;
+		}
+		ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, reg_data);
+		if (ret_val)
+			return ret_val;
+		break;
+	default:
+		break;
+	}
+	return e1000e_setup_copper_link(hw);
+}
+
+/**
+ *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to store current link speed
+ *  @duplex: pointer to store the current link duplex
+ *
+ *  Calls the generic get_speed_and_duplex to retrieve the current link
+ *  information and then calls the Kumeran lock loss workaround for links at
+ *  gigabit speeds.
+ **/
+static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
+					  u16 *duplex)
+{
+	s32 ret_val;
+
+	ret_val = e1000e_get_speed_and_duplex_copper(hw, speed, duplex);
+	if (ret_val)
+		return ret_val;
+
+	if ((hw->mac.type == e1000_ich8lan) &&
+	    (hw->phy.type == e1000_phy_igp_3) &&
+	    (*speed == SPEED_1000)) {
+		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
+ *  @hw: pointer to the HW structure
+ *
+ *  Work-around for 82566 Kumeran PCS lock loss:
+ *  On link status change (i.e. PCI reset, speed change) and link is up and
+ *  speed is gigabit-
+ *    0) if workaround is optionally disabled do nothing
+ *    1) wait 1ms for Kumeran link to come up
+ *    2) check Kumeran Diagnostic register PCS lock loss bit
+ *    3) if not set the link is locked (all is good), otherwise...
+ *    4) reset the PHY
+ *    5) repeat up to 10 times
+ *  Note: this is only called for IGP3 copper when speed is 1gb.
+ **/
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 phy_ctrl;
+	s32 ret_val;
+	u16 i, data;
+	bool link;
+
+	if (!dev_spec->kmrn_lock_loss_workaround_enabled)
+		return 0;
+
+	/*
+	 * Make sure link is up before proceeding.  If not just return.
+	 * Attempting this while link is negotiating fouled up link
+	 * stability
+	 */
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (!link)
+		return 0;
+
+	for (i = 0; i < 10; i++) {
+		/* read once to clear */
+		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
+		if (ret_val)
+			return ret_val;
+		/* and again to get new status */
+		ret_val = e1e_rphy(hw, IGP3_KMRN_DIAG, &data);
+		if (ret_val)
+			return ret_val;
+
+		/* check for PCS lock */
+		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS))
+			return 0;
+
+		/* Issue PHY reset */
+		e1000_phy_hw_reset(hw);
+		mdelay(5);
+	}
+	/* Disable GigE link negotiation */
+	phy_ctrl = er32(PHY_CTRL);
+	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
+		     E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+	ew32(PHY_CTRL, phy_ctrl);
+
+	/*
+	 * Call gig speed drop workaround on Gig disable before accessing
+	 * any PHY registers
+	 */
+	e1000e_gig_downshift_workaround_ich8lan(hw);
+
+	/* unable to acquire PCS lock */
+	return -E1000_ERR_PHY;
+}
+
+/**
+ *  e1000_set_kmrn_lock_loss_workaround_ich8lan - Set Kumeran workaround state
+ *  @hw: pointer to the HW structure
+ *  @state: boolean value used to set the current Kumeran workaround state
+ *
+ *  If ICH8, set the current Kumeran workaround state (enabled - true
+ *  /disabled - false).
+ **/
+void e1000e_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+						 bool state)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+
+	if (hw->mac.type != e1000_ich8lan) {
+		e_dbg("Workaround applies to ICH8 only.\n");
+		return;
+	}
+
+	dev_spec->kmrn_lock_loss_workaround_enabled = state;
+}
+
+/**
+ *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ *  @hw: pointer to the HW structure
+ *
+ *  Workaround for 82566 power-down on D3 entry:
+ *    1) disable gigabit link
+ *    2) write VR power-down enable
+ *    3) read it back
+ *  Continue if successful, else issue LCD reset and repeat
+ **/
+void e1000e_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
+{
+	u32 reg;
+	u16 data;
+	u8  retry = 0;
+
+	if (hw->phy.type != e1000_phy_igp_3)
+		return;
+
+	/* Try the workaround twice (if needed) */
+	do {
+		/* Disable link */
+		reg = er32(PHY_CTRL);
+		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
+			E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+		ew32(PHY_CTRL, reg);
+
+		/*
+		 * Call gig speed drop workaround on Gig disable before
+		 * accessing any PHY registers
+		 */
+		if (hw->mac.type == e1000_ich8lan)
+			e1000e_gig_downshift_workaround_ich8lan(hw);
+
+		/* Write VR power-down enable */
+		e1e_rphy(hw, IGP3_VR_CTRL, &data);
+		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+		e1e_wphy(hw, IGP3_VR_CTRL, data | IGP3_VR_CTRL_MODE_SHUTDOWN);
+
+		/* Read it back and test */
+		e1e_rphy(hw, IGP3_VR_CTRL, &data);
+		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
+			break;
+
+		/* Issue PHY reset and repeat at most one more time */
+		reg = er32(CTRL);
+		ew32(CTRL, reg | E1000_CTRL_PHY_RST);
+		retry++;
+	} while (retry);
+}
+
+/**
+ *  e1000e_gig_downshift_workaround_ich8lan - WoL from S5 stops working
+ *  @hw: pointer to the HW structure
+ *
+ *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
+ *  LPLU, Gig disable, MDIC PHY reset):
+ *    1) Set Kumeran Near-end loopback
+ *    2) Clear Kumeran Near-end loopback
+ *  Should only be called for ICH8[m] devices with any 1G Phy.
+ **/
+void e1000e_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 reg_data;
+
+	if ((hw->mac.type != e1000_ich8lan) || (hw->phy.type == e1000_phy_ife))
+		return;
+
+	ret_val = e1000e_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+				      &reg_data);
+	if (ret_val)
+		return;
+	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+				       reg_data);
+	if (ret_val)
+		return;
+	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
+	ret_val = e1000e_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+				       reg_data);
+}
+
+/**
+ *  e1000_suspend_workarounds_ich8lan - workarounds needed during S0->Sx
+ *  @hw: pointer to the HW structure
+ *
+ *  During S0 to Sx transition, it is possible the link remains at gig
+ *  instead of negotiating to a lower speed.  Before going to Sx, set
+ *  'LPLU Enabled' and 'Gig Disable' to force link speed negotiation
+ *  to a lower speed.  For PCH and newer parts, the OEM bits PHY register
+ *  (LED, GbE disable and LPLU configurations) also needs to be written.
+ *  Parts that support (and are linked to a partner which support) EEE in
+ *  100Mbps should disable LPLU since 100Mbps w/ EEE requires less power
+ *  than 10Mbps w/o EEE.
+ **/
+void e1000_suspend_workarounds_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec = &hw->dev_spec.ich8lan;
+	u32 phy_ctrl;
+	s32 ret_val;
+
+	phy_ctrl = er32(PHY_CTRL);
+	phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU | E1000_PHY_CTRL_GBE_DISABLE;
+
+	if (hw->phy.type == e1000_phy_i217) {
+		u16 phy_reg;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+
+		if (!dev_spec->eee_disable) {
+			u16 eee_advert;
+
+			ret_val = e1e_wphy_locked(hw, I82579_EMI_ADDR,
+						  I217_EEE_ADVERTISEMENT);
+			if (ret_val)
+				goto release;
+			e1e_rphy_locked(hw, I82579_EMI_DATA, &eee_advert);
+
+			/* Disable LPLU if both link partners support 100BaseT
+			 * EEE and 100Full is advertised on both ends of the
+			 * link.
+			 */
+			if ((eee_advert & I217_EEE_100_SUPPORTED) &&
+			    (dev_spec->eee_lp_ability &
+			     I217_EEE_100_SUPPORTED) &&
+			    (hw->phy.autoneg_advertised & ADVERTISE_100_FULL))
+				phy_ctrl &= ~(E1000_PHY_CTRL_D0A_LPLU |
+					      E1000_PHY_CTRL_NOND0A_LPLU);
+		}
+
+		/* For i217 Intel Rapid Start Technology support,
+		 * when the system is going into Sx and no manageability engine
+		 * is present, the driver must configure proxy to reset only on
+		 * power good.  LPI (Low Power Idle) state must also reset only
+		 * on power good, as well as the MTA (Multicast table array).
+		 * The SMBus release must also be disabled on LCD reset.
+		 */
+		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+
+			/* Enable proxy to reset only on power good. */
+			e1e_rphy_locked(hw, I217_PROXY_CTRL, &phy_reg);
+			phy_reg |= I217_PROXY_CTRL_AUTO_DISABLE;
+			e1e_wphy_locked(hw, I217_PROXY_CTRL, phy_reg);
+
+			/* Set bit enable LPI (EEE) to reset only on
+			 * power good.
+			 */
+			e1e_rphy_locked(hw, I217_SxCTRL, &phy_reg);
+			phy_reg |= I217_SxCTRL_MASK;
+			e1e_wphy_locked(hw, I217_SxCTRL, phy_reg);
+
+			/* Disable the SMB release on LCD reset. */
+			e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+			phy_reg &= ~I217_MEMPWR;
+			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
+		}
+
+		/* Enable MTA to reset for Intel Rapid Start Technology
+		 * Support
+		 */
+		e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+		phy_reg |= I217_CGFREG_MASK;
+		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
+
+release:
+		hw->phy.ops.release(hw);
+	}
+out:
+	ew32(PHY_CTRL, phy_ctrl);
+
+	if (hw->mac.type == e1000_ich8lan)
+		e1000e_gig_downshift_workaround_ich8lan(hw);
+
+	if (hw->mac.type >= e1000_pchlan) {
+		e1000_oem_bits_config_ich8lan(hw, false);
+		e1000_phy_hw_reset_ich8lan(hw);
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return;
+		e1000_write_smbus_addr(hw);
+		hw->phy.ops.release(hw);
+	}
+}
+
+/**
+ *  e1000_resume_workarounds_pchlan - workarounds needed during Sx->S0
+ *  @hw: pointer to the HW structure
+ *
+ *  During Sx to S0 transitions on non-managed devices or managed devices
+ *  on which PHY resets are not blocked, if the PHY registers cannot be
+ *  accessed properly by the s/w toggle the LANPHYPC value to power cycle
+ *  the PHY.
+ *  On i217, setup Intel Rapid Start Technology.
+ **/
+void e1000_resume_workarounds_pchlan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	if (hw->mac.type != e1000_pch2lan)
+		return;
+
+	fwsm = er32(FWSM);
+	if (!(fwsm & E1000_ICH_FWSM_FW_VALID) || !e1000_check_reset_block(hw)) {
+		u16 phy_id1, phy_id2;
+		s32 ret_val;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val) {
+			e_dbg("Failed to acquire PHY semaphore in resume\n");
+			return;
+		}
+
+	/* For i217 Intel Rapid Start Technology support when the system
+	 * is transitioning from Sx and no manageability engine is present
+	 * configure SMBus to restore on reset, disable proxy, and enable
+	 * the reset on MTA (Multicast table array).
+	 */
+	if (hw->phy.type == e1000_phy_i217) {
+		u16 phy_reg;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val) {
+			e_dbg("Failed to setup iRST\n");
+			return;
+		}
+
+		if (!(er32(FWSM) & E1000_ICH_FWSM_FW_VALID)) {
+			/* Restore clear on SMB if no manageability engine
+			 * is present
+			 */
+			ret_val = e1e_rphy_locked(hw, I217_MEMPWR, &phy_reg);
+			if (ret_val)
+				goto _release;
+			phy_reg |= I217_MEMPWR_MASK;
+			e1e_wphy_locked(hw, I217_MEMPWR, phy_reg);
+
+			/* Disable Proxy */
+			e1e_wphy_locked(hw, I217_PROXY_CTRL, 0);
+		}
+		/* Enable reset on MTA */
+		ret_val = e1e_rphy_locked(hw, I217_CGFREG, &phy_reg);
+		if (ret_val)
+			goto _release;
+		phy_reg &= ~I217_CGFREG_MASK;
+		e1e_wphy_locked(hw, I217_CGFREG, phy_reg);
+	_release:
+		if (ret_val)
+			e_dbg("Error %d in resume workarounds\n", ret_val);
+		hw->phy.ops.release(hw);
+	}
+
+		/* Test access to the PHY registers by reading the ID regs */
+		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID1, &phy_id1);
+		if (ret_val)
+			goto release;
+		ret_val = hw->phy.ops.read_reg_locked(hw, PHY_ID2, &phy_id2);
+		if (ret_val)
+			goto release;
+
+		if (hw->phy.id == ((u32)(phy_id1 << 16) |
+				   (u32)(phy_id2 & PHY_REVISION_MASK)))
+			goto release;
+
+		e1000_toggle_lanphypc_value_ich8lan(hw);
+
+		hw->phy.ops.release(hw);
+		msleep(50);
+		e1000_phy_hw_reset(hw);
+		msleep(50);
+		return;
+	}
+
+release:
+	hw->phy.ops.release(hw);
+
+	return;
+}
+
+/**
+ *  e1000_cleanup_led_ich8lan - Restore the default LED operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
+{
+	if (hw->phy.type == e1000_phy_ife)
+		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED, 0);
+
+	ew32(LEDCTL, hw->mac.ledctl_default);
+	return 0;
+}
+
+/**
+ *  e1000_led_on_ich8lan - Turn LEDs on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn on the LEDs.
+ **/
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
+{
+	if (hw->phy.type == e1000_phy_ife)
+		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+				(IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+
+	ew32(LEDCTL, hw->mac.ledctl_mode2);
+	return 0;
+}
+
+/**
+ *  e1000_led_off_ich8lan - Turn LEDs off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn off the LEDs.
+ **/
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
+{
+	if (hw->phy.type == e1000_phy_ife)
+		return e1e_wphy(hw, IFE_PHY_SPECIAL_CONTROL_LED,
+				(IFE_PSCL_PROBE_MODE |
+				 IFE_PSCL_PROBE_LEDS_OFF));
+
+	ew32(LEDCTL, hw->mac.ledctl_mode1);
+	return 0;
+}
+
+/**
+ *  e1000_setup_led_pchlan - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use.
+ **/
+static s32 e1000_setup_led_pchlan(struct e1000_hw *hw)
+{
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_mode1);
+}
+
+/**
+ *  e1000_cleanup_led_pchlan - Restore the default LED operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_pchlan(struct e1000_hw *hw)
+{
+	return e1e_wphy(hw, HV_LED_CONFIG, (u16)hw->mac.ledctl_default);
+}
+
+/**
+ *  e1000_led_on_pchlan - Turn LEDs on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn on the LEDs.
+ **/
+static s32 e1000_led_on_pchlan(struct e1000_hw *hw)
+{
+	u16 data = (u16)hw->mac.ledctl_mode2;
+	u32 i, led;
+
+	/*
+	 * If no link, then turn LED on by setting the invert bit
+	 * for each LED that's mode is "link_up" in ledctl_mode2.
+	 */
+	if (!(er32(STATUS) & E1000_STATUS_LU)) {
+		for (i = 0; i < 3; i++) {
+			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+			if ((led & E1000_PHY_LED0_MODE_MASK) !=
+			    E1000_LEDCTL_MODE_LINK_UP)
+				continue;
+			if (led & E1000_PHY_LED0_IVRT)
+				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+			else
+				data |= (E1000_PHY_LED0_IVRT << (i * 5));
+		}
+	}
+
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ *  e1000_led_off_pchlan - Turn LEDs off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn off the LEDs.
+ **/
+static s32 e1000_led_off_pchlan(struct e1000_hw *hw)
+{
+	u16 data = (u16)hw->mac.ledctl_mode1;
+	u32 i, led;
+
+	/*
+	 * If no link, then turn LED off by clearing the invert bit
+	 * for each LED that's mode is "link_up" in ledctl_mode1.
+	 */
+	if (!(er32(STATUS) & E1000_STATUS_LU)) {
+		for (i = 0; i < 3; i++) {
+			led = (data >> (i * 5)) & E1000_PHY_LED0_MASK;
+			if ((led & E1000_PHY_LED0_MODE_MASK) !=
+			    E1000_LEDCTL_MODE_LINK_UP)
+				continue;
+			if (led & E1000_PHY_LED0_IVRT)
+				data &= ~(E1000_PHY_LED0_IVRT << (i * 5));
+			else
+				data |= (E1000_PHY_LED0_IVRT << (i * 5));
+		}
+	}
+
+	return e1e_wphy(hw, HV_LED_CONFIG, data);
+}
+
+/**
+ *  e1000_get_cfg_done_ich8lan - Read config done bit after Full or PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Read appropriate register for the config done bit for completion status
+ *  and configure the PHY through s/w for EEPROM-less parts.
+ *
+ *  NOTE: some silicon which is EEPROM-less will fail trying to read the
+ *  config done bit, so only an error is logged and continues.  If we were
+ *  to return with error, EEPROM-less silicon would not be able to be reset
+ *  or change link.
+ **/
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 bank = 0;
+	u32 status;
+
+	e1000e_get_cfg_done(hw);
+
+	/* Wait for indication from h/w that it has completed basic config */
+	if (hw->mac.type >= e1000_ich10lan) {
+		e1000_lan_init_done_ich8lan(hw);
+	} else {
+		ret_val = e1000e_get_auto_rd_done(hw);
+		if (ret_val) {
+			/*
+			 * When auto config read does not complete, do not
+			 * return with an error. This can happen in situations
+			 * where there is no eeprom and prevents getting link.
+			 */
+			e_dbg("Auto Read Done did not complete\n");
+			ret_val = 0;
+		}
+	}
+
+	/* Clear PHY Reset Asserted bit */
+	status = er32(STATUS);
+	if (status & E1000_STATUS_PHYRA)
+		ew32(STATUS, status & ~E1000_STATUS_PHYRA);
+	else
+		e_dbg("PHY Reset Asserted not set - needs delay\n");
+
+	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
+	if (hw->mac.type <= e1000_ich9lan) {
+		if (((er32(EECD) & E1000_EECD_PRES) == 0) &&
+		    (hw->phy.type == e1000_phy_igp_3)) {
+			e1000e_phy_init_script_igp3(hw);
+		}
+	} else {
+		if (e1000_valid_nvm_bank_detect_ich8lan(hw, &bank)) {
+			/* Maybe we should do a basic PHY config */
+			e_dbg("EEPROM not present\n");
+			ret_val = -E1000_ERR_CONFIG;
+		}
+	}
+
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(hw->mac.ops.check_mng_mode(hw) ||
+	      hw->phy.ops.check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+}
+
+/**
+ *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears hardware counters specific to the silicon family and calls
+ *  clear_hw_cntrs_generic to clear all general purpose counters.
+ **/
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
+{
+	u16 phy_data;
+	s32 ret_val;
+
+	e1000e_clear_hw_cntrs_base(hw);
+
+	er32(ALGNERRC);
+	er32(RXERRC);
+	er32(TNCRS);
+	er32(CEXTERR);
+	er32(TSCTC);
+	er32(TSCTFC);
+
+	er32(MGTPRC);
+	er32(MGTPDC);
+	er32(MGTPTC);
+
+	er32(IAC);
+	er32(ICRXOC);
+
+	/* Clear PHY statistics registers */
+	if ((hw->phy.type == e1000_phy_82578) ||
+	    (hw->phy.type == e1000_phy_82579) ||
+	    (hw->phy.type == e1000_phy_i217) ||	
+	    (hw->phy.type == e1000_phy_82577)) {
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return;
+		ret_val = hw->phy.ops.set_page(hw,
+					       HV_STATS_PAGE << IGP_PAGE_SHIFT);
+		if (ret_val)
+			goto release;
+		hw->phy.ops.read_reg_page(hw, HV_SCC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_SCC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_ECOL_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_ECOL_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_MCC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_MCC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_LATECOL_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_LATECOL_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_COLC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_COLC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_DC_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_DC_LOWER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_TNCRS_UPPER, &phy_data);
+		hw->phy.ops.read_reg_page(hw, HV_TNCRS_LOWER, &phy_data);
+release:
+		hw->phy.ops.release(hw);
+	}
+}
+
+static const struct e1000_mac_operations ich8_mac_ops = {
+	.id_led_init		= e1000e_id_led_init,
+	/* check_mng_mode dependent on mac type */
+	.check_for_link		= e1000_check_for_copper_link_ich8lan,
+	/* cleanup_led dependent on mac type */
+	.clear_hw_cntrs		= e1000_clear_hw_cntrs_ich8lan,
+	.get_bus_info		= e1000_get_bus_info_ich8lan,
+	.set_lan_id		= e1000_set_lan_id_single_port,
+	.get_link_up_info	= e1000_get_link_up_info_ich8lan,
+	/* led_on dependent on mac type */
+	/* led_off dependent on mac type */
+	.update_mc_addr_list	= e1000e_update_mc_addr_list_generic,
+	.reset_hw		= e1000_reset_hw_ich8lan,
+	.init_hw		= e1000_init_hw_ich8lan,
+	.setup_link		= e1000_setup_link_ich8lan,
+	.setup_physical_interface= e1000_setup_copper_link_ich8lan,
+	/* id_led_init dependent on mac type */
+};
+
+static const struct e1000_phy_operations ich8_phy_ops = {
+	.acquire		= e1000_acquire_swflag_ich8lan,
+	.check_reset_block	= e1000_check_reset_block_ich8lan,
+	.commit			= NULL,
+	.get_cfg_done		= e1000_get_cfg_done_ich8lan,
+	.get_cable_length	= e1000e_get_cable_length_igp_2,
+	.read_reg		= e1000e_read_phy_reg_igp,
+	.release		= e1000_release_swflag_ich8lan,
+	.reset			= e1000_phy_hw_reset_ich8lan,
+	.set_d0_lplu_state	= e1000_set_d0_lplu_state_ich8lan,
+	.set_d3_lplu_state	= e1000_set_d3_lplu_state_ich8lan,
+	.write_reg		= e1000e_write_phy_reg_igp,
+};
+
+static const struct e1000_nvm_operations ich8_nvm_ops = {
+	.acquire		= e1000_acquire_nvm_ich8lan,
+	.read		 	= e1000_read_nvm_ich8lan,
+	.release		= e1000_release_nvm_ich8lan,
+	.update			= e1000_update_nvm_checksum_ich8lan,
+	.valid_led_default	= e1000_valid_led_default_ich8lan,
+	.validate		= e1000_validate_nvm_checksum_ich8lan,
+	.write			= e1000_write_nvm_ich8lan,
+};
+
+const struct e1000_info e1000_ich8_info = {
+	.mac			= e1000_ich8lan,
+	.flags			= FLAG_HAS_WOL
+				  | FLAG_IS_ICH
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_APME_IN_WUC,
+	.pba			= 8,
+	.max_hw_frame_size	= ETH_FRAME_LEN + ETH_FCS_LEN,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_ich9_info = {
+	.mac			= e1000_ich9lan,
+	.flags			= FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_ERT
+				  | FLAG_HAS_FLASH
+				  | FLAG_APME_IN_WUC,
+	.pba			= 10,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_ich10_info = {
+	.mac			= e1000_ich10lan,
+	.flags			= FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_ERT
+				  | FLAG_HAS_FLASH
+				  | FLAG_APME_IN_WUC,
+	.pba			= 10,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch_info = {
+	.mac			= e1000_pchlan,
+	.flags			= FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_DISABLE_FC_PAUSE_TIME /* errata */
+				  | FLAG_APME_IN_WUC,
+	.flags2			= FLAG2_HAS_PHY_STATS,
+	.pba			= 26,
+	.max_hw_frame_size	= 4096,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch2_info = {
+	.mac			= e1000_pch2lan,
+	.flags			= FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_APME_IN_WUC,
+	.flags2			= FLAG2_HAS_PHY_STATS
+				  | FLAG2_HAS_EEE,
+	.pba			= 26,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
+
+const struct e1000_info e1000_pch_lpt_info = {
+	.mac			= e1000_pch_lpt,
+	.flags			= FLAG_IS_ICH
+				  | FLAG_HAS_WOL
+				  | FLAG_HAS_CTRLEXT_ON_LOAD
+				  | FLAG_HAS_AMT
+				  | FLAG_HAS_FLASH
+				  | FLAG_HAS_JUMBO_FRAMES
+				  | FLAG_APME_IN_WUC,
+	.flags2			= FLAG2_HAS_PHY_STATS
+				  | FLAG2_HAS_EEE,
+	.pba			= 26,
+	.max_hw_frame_size	= DEFAULT_JUMBO,
+	.get_variants		= e1000_get_variants_ich8lan,
+	.mac_ops		= &ich8_mac_ops,
+	.phy_ops		= &ich8_phy_ops,
+	.nvm_ops		= &ich8_nvm_ops,
+};
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c
new file mode 100644
index 0000000..fbdccdc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/lib.c
@@ -0,0 +1,2693 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000.h"
+
+enum e1000_mng_mode {
+	e1000_mng_mode_none = 0,
+	e1000_mng_mode_asf,
+	e1000_mng_mode_pt,
+	e1000_mng_mode_ipmi,
+	e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG		0x20000000
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE		0x544D4149
+
+/**
+ *  e1000e_get_bus_info_pcie - Get PCIe bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000e_get_bus_info_pcie(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_bus_info *bus = &hw->bus;
+	struct e1000_adapter *adapter = hw->adapter;
+	u16 pcie_link_status, cap_offset;
+
+	cap_offset = pci_pcie_cap(adapter->pdev);
+	if (!cap_offset) {
+		bus->width = e1000_bus_width_unknown;
+	} else {
+		pci_read_config_word(adapter->pdev,
+				     cap_offset + PCIE_LINK_STATUS,
+				     &pcie_link_status);
+		bus->width = (enum e1000_bus_width)((pcie_link_status &
+						     PCIE_LINK_WIDTH_MASK) >>
+						    PCIE_LINK_WIDTH_SHIFT);
+	}
+
+	mac->ops.set_lan_id(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_set_lan_id_multi_port_pcie - Set LAN id for PCIe multiple port devices
+ *
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines the LAN function id by reading memory-mapped registers
+ *  and swaps the port value if requested.
+ **/
+void e1000_set_lan_id_multi_port_pcie(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	u32 reg;
+
+	/*
+	 * The status register reports the correct function number
+	 * for the device regardless of function swap state.
+	 */
+	reg = er32(STATUS);
+	bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+}
+
+/**
+ *  e1000_set_lan_id_single_port - Set LAN id for a single port device
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the LAN function id to zero for a single port device.
+ **/
+void e1000_set_lan_id_single_port(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+
+	bus->func = 0;
+}
+
+/**
+ *  e1000_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+	u32 offset;
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+		e1e_flush();
+	}
+}
+
+/**
+ *  e1000_write_vfta_generic - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+	e1e_flush();
+}
+
+/**
+ *  e1000e_init_rx_addrs - Initialize receive address's
+ *  @hw: pointer to the HW structure
+ *  @rar_count: receive address registers
+ *
+ *  Setup the receive address registers by setting the base receive address
+ *  register to the devices MAC address and clearing all the other receive
+ *  address registers to 0.
+ **/
+void e1000e_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
+{
+	u32 i;
+	u8 mac_addr[ETH_ALEN] = {0};
+
+	/* Setup the receive address */
+	e_dbg("Programming MAC Address into RAR[0]\n");
+
+	e1000e_rar_set(hw, hw->mac.addr, 0);
+
+	/* Zero out the other (rar_entry_count - 1) receive addresses */
+	e_dbg("Clearing RAR[1-%u]\n", rar_count-1);
+	for (i = 1; i < rar_count; i++)
+		e1000e_rar_set(hw, mac_addr, i);
+}
+
+/**
+ *  e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the nvm for an alternate MAC address.  An alternate MAC address
+ *  can be setup by pre-boot software and must be treated like a permanent
+ *  address and must override the actual permanent MAC address. If an
+ *  alternate MAC address is found it is programmed into RAR0, replacing
+ *  the permanent address that was installed into RAR0 by the Si on reset.
+ *  This function will return SUCCESS unless it encounters an error while
+ *  reading the EEPROM.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+	u32 i;
+	s32 ret_val = 0;
+	u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+	u8 alt_mac_addr[ETH_ALEN];
+
+	ret_val = e1000_read_nvm(hw, NVM_COMPAT, 1, &nvm_data);
+	if (ret_val)
+		goto out;
+
+	/* Check for LOM (vs. NIC) or one of two valid mezzanine cards */
+	if (!((nvm_data & NVM_COMPAT_LOM) ||
+	      (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_DUAL) ||
+	      (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES_QUAD) ||
+	      (hw->adapter->pdev->device == E1000_DEV_ID_82571EB_SERDES)))
+		goto out;
+
+	ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+	                         &nvm_alt_mac_addr_offset);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+	    (nvm_alt_mac_addr_offset == 0x0000))
+		/* There is no Alternate MAC Address */
+		goto out;
+
+	if (hw->bus.func == E1000_FUNC_1)
+		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+	for (i = 0; i < ETH_ALEN; i += 2) {
+		offset = nvm_alt_mac_addr_offset + (i >> 1);
+		ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+		if (ret_val) {
+			e_dbg("NVM Read Error\n");
+			goto out;
+		}
+
+		alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+		alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+	}
+
+	/* if multicast bit is set, the alternate address will not be used */
+	if (is_multicast_ether_addr(alt_mac_addr)) {
+		e_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+		goto out;
+	}
+
+	/*
+	 * We have a valid alternate MAC address, and we want to treat it the
+	 * same as the normal permanent MAC address stored by the HW into the
+	 * RAR. Do this by mapping this address into RAR0.
+	 */
+	e1000e_rar_set(hw, alt_mac_addr, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_rar_set - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+void e1000e_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+
+	/*
+	 * HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] |
+		   ((u32) addr[1] << 8) |
+		    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high)
+		rar_high |= E1000_RAH_AV;
+
+	/*
+	 * Some bridges will combine consecutive 32-bit writes into
+	 * a single burst write, which will malfunction on some parts.
+	 * The flushes avoid this.
+	 */
+	ew32(RAL(index), rar_low);
+	e1e_flush();
+	ew32(RAH(index), rar_high);
+	e1e_flush();
+}
+
+/**
+ *  e1000_hash_mc_addr - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.  See
+ *  e1000_mta_set_generic()
+ **/
+static u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+	u32 hash_value, hash_mask;
+	u8 bit_shift = 0;
+
+	/* Register count multiplied by bits per register */
+	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+	/*
+	 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+	 * where 0xFF would still fall within the hash mask.
+	 */
+	while (hash_mask >> bit_shift != 0xFF)
+		bit_shift++;
+
+	/*
+	 * The portion of the address that is used for the hash table
+	 * is determined by the mc_filter_type setting.
+	 * The algorithm is such that there is a total of 8 bits of shifting.
+	 * The bit_shift for a mc_filter_type of 0 represents the number of
+	 * left-shifts where the MSB of mc_addr[5] would still fall within
+	 * the hash_mask.  Case 0 does this exactly.  Since there are a total
+	 * of 8 bits of shifting, then mc_addr[4] will shift right the
+	 * remaining number of bits. Thus 8 - bit_shift.  The rest of the
+	 * cases are a variation of this algorithm...essentially raising the
+	 * number of bits to shift mc_addr[5] left, while still keeping the
+	 * 8-bit shifting total.
+	 *
+	 * For example, given the following Destination MAC Address and an
+	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+	 * we can see that the bit_shift for case 0 is 4.  These are the hash
+	 * values resulting from each mc_filter_type...
+	 * [0] [1] [2] [3] [4] [5]
+	 * 01  AA  00  12  34  56
+	 * LSB		 MSB
+	 *
+	 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+	 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+	 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+	 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+	 */
+	switch (hw->mac.mc_filter_type) {
+	default:
+	case 0:
+		break;
+	case 1:
+		bit_shift += 1;
+		break;
+	case 2:
+		bit_shift += 2;
+		break;
+	case 3:
+		bit_shift += 4;
+		break;
+	}
+
+	hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+				  (((u16) mc_addr[5]) << bit_shift)));
+
+	return hash_value;
+}
+
+/**
+ *  e1000e_update_mc_addr_list_generic - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates entire Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void e1000e_update_mc_addr_list_generic(struct e1000_hw *hw,
+					u8 *mc_addr_list, u32 mc_addr_count)
+{
+	u32 hash_value, hash_bit, hash_reg;
+	int i;
+
+	/* clear mta_shadow */
+	memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+	/* update mta_shadow from mc_addr_list */
+	for (i = 0; (u32) i < mc_addr_count; i++) {
+		hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
+
+		hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+		hash_bit = hash_value & 0x1F;
+
+		hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+		mc_addr_list += (ETH_ALEN);
+	}
+
+	/* replace the entire MTA table */
+	for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, hw->mac.mta_shadow[i]);
+	e1e_flush();
+}
+
+/**
+ *  e1000e_clear_hw_cntrs_base - Clear base hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000e_clear_hw_cntrs_base(struct e1000_hw *hw)
+{
+	er32(CRCERRS);
+	er32(SYMERRS);
+	er32(MPC);
+	er32(SCC);
+	er32(ECOL);
+	er32(MCC);
+	er32(LATECOL);
+	er32(COLC);
+	er32(DC);
+	er32(SEC);
+	er32(RLEC);
+	er32(XONRXC);
+	er32(XONTXC);
+	er32(XOFFRXC);
+	er32(XOFFTXC);
+	er32(FCRUC);
+	er32(GPRC);
+	er32(BPRC);
+	er32(MPRC);
+	er32(GPTC);
+	er32(GORCL);
+	er32(GORCH);
+	er32(GOTCL);
+	er32(GOTCH);
+	er32(RNBC);
+	er32(RUC);
+	er32(RFC);
+	er32(ROC);
+	er32(RJC);
+	er32(TORL);
+	er32(TORH);
+	er32(TOTL);
+	er32(TOTH);
+	er32(TPR);
+	er32(TPT);
+	er32(MPTC);
+	er32(BPTC);
+}
+
+/**
+ *  e1000e_check_for_copper_link - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+s32 e1000e_check_for_copper_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+
+	/*
+	 * We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status)
+		return 0;
+
+	/*
+	 * First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		return ret_val;
+
+	if (!link)
+		return ret_val; /* No link detected */
+
+	mac->get_link_status = false;
+
+	/*
+	 * Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	e1000e_check_downshift(hw);
+
+	/*
+	 * If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		return ret_val;
+	}
+
+	/*
+	 * Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	e1000e_config_collision_dist(hw);
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000e_config_fc_after_link_up(hw);
+	if (ret_val)
+		e_dbg("Error configuring flow control\n");
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_check_for_fiber_link - Check for link (Fiber)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000e_check_for_fiber_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	s32 ret_val;
+
+	ctrl = er32(CTRL);
+	status = er32(STATUS);
+	rxcw = er32(RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), the cable is plugged in (we have signal),
+	 * and our link partner is not trying to auto-negotiate with us (we
+	 * are receiving idles or data), we need to force link up. We also
+	 * need to give auto-negotiation time to complete, in case the cable
+	 * was just plugged in. The autoneg_failed flag does this.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+	if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
+	    (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			return 0;
+		}
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = er32(CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		ew32(CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000e_config_fc_after_link_up(hw);
+		if (ret_val) {
+			e_dbg("Error configuring flow control\n");
+			return ret_val;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+		ew32(TXCW, mac->txcw);
+		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = true;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_check_for_serdes_link - Check for link (Serdes)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000e_check_for_serdes_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	s32 ret_val;
+
+	ctrl = er32(CTRL);
+	status = er32(STATUS);
+	rxcw = er32(RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), and our link partner is not trying to
+	 * auto-negotiate with us (we are receiving idles or data),
+	 * we need to force link up. We also need to give auto-negotiation
+	 * time to complete.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+	if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			return 0;
+		}
+		e_dbg("NOT Rx'ing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		ew32(TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = er32(CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		ew32(CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000e_config_fc_after_link_up(hw);
+		if (ret_val) {
+			e_dbg("Error configuring flow control\n");
+			return ret_val;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		e_dbg("Rx'ing /C/, enable AutoNeg and stop forcing link.\n");
+		ew32(TXCW, mac->txcw);
+		ew32(CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = true;
+	} else if (!(E1000_TXCW_ANE & er32(TXCW))) {
+		/*
+		 * If we force link for non-auto-negotiation switch, check
+		 * link status based on MAC synchronization for internal
+		 * serdes media type.
+		 */
+		/* SYNCH bit and IV bit are sticky. */
+		udelay(10);
+		rxcw = er32(RXCW);
+		if (rxcw & E1000_RXCW_SYNCH) {
+			if (!(rxcw & E1000_RXCW_IV)) {
+				mac->serdes_has_link = true;
+				e_dbg("SERDES: Link up - forced.\n");
+			}
+		} else {
+			mac->serdes_has_link = false;
+			e_dbg("SERDES: Link down - force failed.\n");
+		}
+	}
+
+	if (E1000_TXCW_ANE & er32(TXCW)) {
+		status = er32(STATUS);
+		if (status & E1000_STATUS_LU) {
+			/* SYNCH bit and IV bit are sticky, so reread rxcw.  */
+			udelay(10);
+			rxcw = er32(RXCW);
+			if (rxcw & E1000_RXCW_SYNCH) {
+				if (!(rxcw & E1000_RXCW_IV)) {
+					mac->serdes_has_link = true;
+					e_dbg("SERDES: Link up - autoneg "
+					   "completed successfully.\n");
+				} else {
+					mac->serdes_has_link = false;
+					e_dbg("SERDES: Link down - invalid"
+					   "codewords detected in autoneg.\n");
+				}
+			} else {
+				mac->serdes_has_link = false;
+				e_dbg("SERDES: Link down - no sync.\n");
+			}
+		} else {
+			mac->serdes_has_link = false;
+			e_dbg("SERDES: Link down - autoneg failed\n");
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_set_default_fc_generic - Set flow control default values
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM for the default values for flow control and store the
+ *  values.
+ **/
+static s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 nvm_data;
+
+	/*
+	 * Read and store word 0x0F of the EEPROM. This word contains bits
+	 * that determine the hardware's default PAUSE (flow control) mode,
+	 * a bit that determines whether the HW defaults to enabling or
+	 * disabling auto-negotiation, and the direction of the
+	 * SW defined pins. If there is no SW over-ride of the flow
+	 * control setting, then the variable hw->fc will
+	 * be initialized based on a value in the EEPROM.
+	 */
+	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		return ret_val;
+	}
+
+	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+		hw->fc.requested_mode = e1000_fc_none;
+	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+		 NVM_WORD0F_ASM_DIR)
+		hw->fc.requested_mode = e1000_fc_tx_pause;
+	else
+		hw->fc.requested_mode = e1000_fc_full;
+
+	return 0;
+}
+
+/**
+ *  e1000e_setup_link - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+s32 e1000e_setup_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+
+	/*
+	 * In the case of the phy reset being blocked, we already have a link.
+	 * We do not need to set it up again.
+	 */
+	if (e1000_check_reset_block(hw))
+		return 0;
+
+	/*
+	 * If requested flow control is set to default, set flow control
+	 * based on the EEPROM flow control settings.
+	 */
+	if (hw->fc.requested_mode == e1000_fc_default) {
+		ret_val = e1000_set_default_fc_generic(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/*
+	 * Save off the requested flow control mode for use later.  Depending
+	 * on the link partner's capabilities, we may or may not use this mode.
+	 */
+	hw->fc.current_mode = hw->fc.requested_mode;
+
+	e_dbg("After fix-ups FlowControl is now = %x\n",
+		hw->fc.current_mode);
+
+	/* Call the necessary media_type subroutine to configure the link. */
+	ret_val = mac->ops.setup_physical_interface(hw);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Initialize the flow control address, type, and PAUSE timer
+	 * registers to their default values.  This is done even if flow
+	 * control is disabled, because it does not hurt anything to
+	 * initialize these registers.
+	 */
+	e_dbg("Initializing the Flow Control address, type and timer regs\n");
+	ew32(FCT, FLOW_CONTROL_TYPE);
+	ew32(FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	ew32(FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+	ew32(FCTTV, hw->fc.pause_time);
+
+	return e1000e_set_fc_watermarks(hw);
+}
+
+/**
+ *  e1000_commit_fc_settings_generic - Configure flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Write the flow control settings to the Transmit Config Word Register (TXCW)
+ *  base on the flow control settings in e1000_mac_info.
+ **/
+static s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 txcw;
+
+	/*
+	 * Check for a software override of the flow control settings, and
+	 * setup the device accordingly.  If auto-negotiation is enabled, then
+	 * software will have to set the "PAUSE" bits to the correct value in
+	 * the Transmit Config Word Register (TXCW) and re-start auto-
+	 * negotiation.  However, if auto-negotiation is disabled, then
+	 * software will have to manually configure the two flow control enable
+	 * bits in the CTRL register.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames,
+	 *          but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames but we
+	 *          do not support receiving pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+	 */
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		/* Flow control completely disabled by a software over-ride. */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+		break;
+	case e1000_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is disabled
+		 * by a software over-ride. Since there really isn't a way to
+		 * advertise that we are capable of Rx Pause ONLY, we will
+		 * advertise that we support both symmetric and asymmetric Rx
+		 * PAUSE.  Later, we will disable the adapter's ability to send
+		 * PAUSE frames.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+		break;
+	case e1000_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is disabled,
+		 * by a software over-ride.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+		break;
+	case e1000_fc_full:
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
+		 * over-ride.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+		break;
+	default:
+		e_dbg("Flow control param set incorrectly\n");
+		return -E1000_ERR_CONFIG;
+		break;
+	}
+
+	ew32(TXCW, txcw);
+	mac->txcw = txcw;
+
+	return 0;
+}
+
+/**
+ *  e1000_poll_fiber_serdes_link_generic - Poll for link up
+ *  @hw: pointer to the HW structure
+ *
+ *  Polls for link up by reading the status register, if link fails to come
+ *  up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+static s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 i, status;
+	s32 ret_val;
+
+	/*
+	 * If we have a signal (the cable is plugged in, or assumed true for
+	 * serdes media) then poll for a "Link-Up" indication in the Device
+	 * Status Register.  Time-out if a link isn't seen in 500 milliseconds
+	 * seconds (Auto-negotiation should complete in less than 500
+	 * milliseconds even if the other end is doing it in SW).
+	 */
+	for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+		usleep_range(10000, 20000);
+		status = er32(STATUS);
+		if (status & E1000_STATUS_LU)
+			break;
+	}
+	if (i == FIBER_LINK_UP_LIMIT) {
+		e_dbg("Never got a valid link from auto-neg!!!\n");
+		mac->autoneg_failed = 1;
+		/*
+		 * AutoNeg failed to achieve a link, so we'll call
+		 * mac->check_for_link. This routine will force the
+		 * link up if we detect a signal. This will allow us to
+		 * communicate with non-autonegotiating link partners.
+		 */
+		ret_val = mac->ops.check_for_link(hw);
+		if (ret_val) {
+			e_dbg("Error while checking for link\n");
+			return ret_val;
+		}
+		mac->autoneg_failed = 0;
+	} else {
+		mac->autoneg_failed = 0;
+		e_dbg("Valid Link Found\n");
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_setup_fiber_serdes_link - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes
+ *  links.  Upon successful setup, poll for link.
+ **/
+s32 e1000e_setup_fiber_serdes_link(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	ctrl = er32(CTRL);
+
+	/* Take the link out of reset */
+	ctrl &= ~E1000_CTRL_LRST;
+
+	e1000e_config_collision_dist(hw);
+
+	ret_val = e1000_commit_fc_settings_generic(hw);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Since auto-negotiation is enabled, take the link out of reset (the
+	 * link will be in reset, because we previously reset the chip). This
+	 * will restart auto-negotiation.  If auto-negotiation is successful
+	 * then the link-up status bit will be set and the flow control enable
+	 * bits (RFCE and TFCE) will be set according to their negotiated value.
+	 */
+	e_dbg("Auto-negotiation enabled\n");
+
+	ew32(CTRL, ctrl);
+	e1e_flush();
+	usleep_range(1000, 2000);
+
+	/*
+	 * For these adapters, the SW definable pin 1 is set when the optics
+	 * detect a signal.  If we have a signal, then poll for a "Link-Up"
+	 * indication.
+	 */
+	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+	    (er32(CTRL) & E1000_CTRL_SWDPIN1)) {
+		ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+	} else {
+		e_dbg("No signal detected\n");
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_config_collision_dist - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000e_config_collision_dist(struct e1000_hw *hw)
+{
+	u32 tctl;
+
+	tctl = er32(TCTL);
+
+	tctl &= ~E1000_TCTL_COLD;
+	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+	ew32(TCTL, tctl);
+	e1e_flush();
+}
+
+/**
+ *  e1000e_set_fc_watermarks - Set flow control high/low watermarks
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the flow control high/low threshold (watermark) registers.  If
+ *  flow control XON frame transmission is enabled, then set XON frame
+ *  transmission as well.
+ **/
+s32 e1000e_set_fc_watermarks(struct e1000_hw *hw)
+{
+	u32 fcrtl = 0, fcrth = 0;
+
+	/*
+	 * Set the flow control receive threshold registers.  Normally,
+	 * these registers will be set to a default threshold that may be
+	 * adjusted later by the driver's runtime code.  However, if the
+	 * ability to transmit pause frames is not enabled, then these
+	 * registers will be set to 0.
+	 */
+	if (hw->fc.current_mode & e1000_fc_tx_pause) {
+		/*
+		 * We need to set up the Receive Threshold high and low water
+		 * marks as well as (optionally) enabling the transmission of
+		 * XON frames.
+		 */
+		fcrtl = hw->fc.low_water;
+		fcrtl |= E1000_FCRTL_XONE;
+		fcrth = hw->fc.high_water;
+	}
+	ew32(FCRTL, fcrtl);
+	ew32(FCRTH, fcrth);
+
+	return 0;
+}
+
+/**
+ *  e1000e_force_mac_fc - Force the MAC's flow control settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
+ *  device control register to reflect the adapter settings.  TFCE and RFCE
+ *  need to be explicitly set by software when a copper PHY is used because
+ *  autonegotiation is managed by the PHY rather than the MAC.  Software must
+ *  also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000e_force_mac_fc(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	ctrl = er32(CTRL);
+
+	/*
+	 * Because we didn't get link via the internal auto-negotiation
+	 * mechanism (we either forced link or we got link via PHY
+	 * auto-neg), we have to manually enable/disable transmit an
+	 * receive flow control.
+	 *
+	 * The "Case" statement below enables/disable flow control
+	 * according to the "hw->fc.current_mode" parameter.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause
+	 *          frames but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          frames but we do not receive pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) is enabled.
+	 *  other:  No other values should be possible at this point.
+	 */
+	e_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+		break;
+	case e1000_fc_rx_pause:
+		ctrl &= (~E1000_CTRL_TFCE);
+		ctrl |= E1000_CTRL_RFCE;
+		break;
+	case e1000_fc_tx_pause:
+		ctrl &= (~E1000_CTRL_RFCE);
+		ctrl |= E1000_CTRL_TFCE;
+		break;
+	case e1000_fc_full:
+		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+		break;
+	default:
+		e_dbg("Flow control param set incorrectly\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	ew32(CTRL, ctrl);
+
+	return 0;
+}
+
+/**
+ *  e1000e_config_fc_after_link_up - Configures flow control after link
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the status of auto-negotiation after link up to ensure that the
+ *  speed and duplex were not forced.  If the link needed to be forced, then
+ *  flow control needs to be forced also.  If auto-negotiation is enabled
+ *  and did not fail, then we configure flow control based on our link
+ *  partner.
+ **/
+s32 e1000e_config_fc_after_link_up(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = 0;
+	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+	u16 speed, duplex;
+
+	/*
+	 * Check for the case where we have fiber media and auto-neg failed
+	 * so we had to force link.  In this case, we need to force the
+	 * configuration of the MAC to match the "fc" parameter.
+	 */
+	if (mac->autoneg_failed) {
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes)
+			ret_val = e1000e_force_mac_fc(hw);
+	} else {
+		if (hw->phy.media_type == e1000_media_type_copper)
+			ret_val = e1000e_force_mac_fc(hw);
+	}
+
+	if (ret_val) {
+		e_dbg("Error forcing flow control settings\n");
+		return ret_val;
+	}
+
+	/*
+	 * Check for the case where we have copper media and auto-neg is
+	 * enabled.  In this case, we need to check and see if Auto-Neg
+	 * has completed, and if so, how the PHY and link partner has
+	 * flow control configured.
+	 */
+	if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+		/*
+		 * Read the MII Status Register and check to see if AutoNeg
+		 * has completed.  We read this twice because this reg has
+		 * some "sticky" (latched) bits.
+		 */
+		ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			return ret_val;
+		ret_val = e1e_rphy(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			return ret_val;
+
+		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+			e_dbg("Copper PHY and Auto Neg "
+				 "has not completed.\n");
+			return ret_val;
+		}
+
+		/*
+		 * The AutoNeg process has completed, so we now need to
+		 * read both the Auto Negotiation Advertisement
+		 * Register (Address 4) and the Auto_Negotiation Base
+		 * Page Ability Register (Address 5) to determine how
+		 * flow control was negotiated.
+		 */
+		ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_nway_adv_reg);
+		if (ret_val)
+			return ret_val;
+		ret_val =
+		    e1e_rphy(hw, PHY_LP_ABILITY, &mii_nway_lp_ability_reg);
+		if (ret_val)
+			return ret_val;
+
+		/*
+		 * Two bits in the Auto Negotiation Advertisement Register
+		 * (Address 4) and two bits in the Auto Negotiation Base
+		 * Page Ability Register (Address 5) determine flow control
+		 * for both the PHY and the link partner.  The following
+		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+		 * 1999, describes these PAUSE resolution bits and how flow
+		 * control is determined based upon these settings.
+		 * NOTE:  DC = Don't Care
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
+		 *   0   |    1    |   0   |   DC    | e1000_fc_none
+		 *   0   |    1    |   1   |    0    | e1000_fc_none
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *   1   |    0    |   0   |   DC    | e1000_fc_none
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *   1   |    1    |   0   |    0    | e1000_fc_none
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 * Are both PAUSE bits set to 1?  If so, this implies
+		 * Symmetric Flow Control is enabled at both ends.  The
+		 * ASM_DIR bits are irrelevant per the spec.
+		 *
+		 * For Symmetric Flow Control:
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |   DC    |   1   |   DC    | E1000_fc_full
+		 *
+		 */
+		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+			/*
+			 * Now we need to check if the user selected Rx ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise Rx
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.requested_mode == e1000_fc_full) {
+				hw->fc.current_mode = e1000_fc_full;
+				e_dbg("Flow Control = FULL.\r\n");
+			} else {
+				hw->fc.current_mode = e1000_fc_rx_pause;
+				e_dbg("Flow Control = "
+				      "Rx PAUSE frames only.\r\n");
+			}
+		}
+		/*
+		 * For receiving PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 */
+		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+			  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+			  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+			  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_tx_pause;
+			e_dbg("Flow Control = Tx PAUSE frames only.\r\n");
+		}
+		/*
+		 * For transmitting PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 */
+		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+			 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+			 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+			 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_rx_pause;
+			e_dbg("Flow Control = Rx PAUSE frames only.\r\n");
+		} else {
+			/*
+			 * Per the IEEE spec, at this point flow control
+			 * should be disabled.
+			 */
+			hw->fc.current_mode = e1000_fc_none;
+			e_dbg("Flow Control = NONE.\r\n");
+		}
+
+		/*
+		 * Now we need to do one last check...  If we auto-
+		 * negotiated to HALF DUPLEX, flow control should not be
+		 * enabled per IEEE 802.3 spec.
+		 */
+		ret_val = mac->ops.get_link_up_info(hw, &speed, &duplex);
+		if (ret_val) {
+			e_dbg("Error getting link speed and duplex\n");
+			return ret_val;
+		}
+
+		if (duplex == HALF_DUPLEX)
+			hw->fc.current_mode = e1000_fc_none;
+
+		/*
+		 * Now we call a subroutine to actually force the MAC
+		 * controller to use the correct flow control settings.
+		 */
+		ret_val = e1000e_force_mac_fc(hw);
+		if (ret_val) {
+			e_dbg("Error forcing flow control settings\n");
+			return ret_val;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Read the status register for the current speed/duplex and store the current
+ *  speed and duplex for copper connections.
+ **/
+s32 e1000e_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+	u32 status;
+
+	status = er32(STATUS);
+	if (status & E1000_STATUS_SPEED_1000)
+		*speed = SPEED_1000;
+	else if (status & E1000_STATUS_SPEED_100)
+		*speed = SPEED_100;
+	else
+		*speed = SPEED_10;
+
+	if (status & E1000_STATUS_FD)
+		*duplex = FULL_DUPLEX;
+	else
+		*duplex = HALF_DUPLEX;
+
+	e_dbg("%u Mbps, %s Duplex\n",
+	      *speed == SPEED_1000 ? 1000 : *speed == SPEED_100 ? 100 : 10,
+	      *duplex == FULL_DUPLEX ? "Full" : "Half");
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_speed_and_duplex_fiber_serdes - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Sets the speed and duplex to gigabit full duplex (the only possible option)
+ *  for fiber/serdes links.
+ **/
+s32 e1000e_get_speed_and_duplex_fiber_serdes(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+	*speed = SPEED_1000;
+	*duplex = FULL_DUPLEX;
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_hw_semaphore - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000e_get_hw_semaphore(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	/* Get the SW semaphore */
+	while (i < timeout) {
+		swsm = er32(SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		udelay(50);
+		i++;
+	}
+
+	if (i == timeout) {
+		e_dbg("Driver can't access device - SMBI bit is set.\n");
+		return -E1000_ERR_NVM;
+	}
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (er32(SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		e1000e_put_hw_semaphore(hw);
+		e_dbg("Driver can't access the NVM\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_put_hw_semaphore - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000e_put_hw_semaphore(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	swsm = er32(SWSM);
+	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+	ew32(SWSM, swsm);
+}
+
+/**
+ *  e1000e_get_auto_rd_done - Check for auto read completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000e_get_auto_rd_done(struct e1000_hw *hw)
+{
+	s32 i = 0;
+
+	while (i < AUTO_READ_DONE_TIMEOUT) {
+		if (er32(EECD) & E1000_EECD_AUTO_RD)
+			break;
+		usleep_range(1000, 2000);
+		i++;
+	}
+
+	if (i == AUTO_READ_DONE_TIMEOUT) {
+		e_dbg("Auto read by HW from NVM has not completed.\n");
+		return -E1000_ERR_RESET;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_valid_led_default - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000e_valid_led_default(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		return ret_val;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT;
+
+	return 0;
+}
+
+/**
+ *  e1000e_id_led_init -
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000e_id_led_init(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	const u32 ledctl_mask = 0x000000FF;
+	const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+	const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+	u16 data, i, temp;
+	const u16 led_mask = 0x0F;
+
+	ret_val = hw->nvm.ops.valid_led_default(hw, &data);
+	if (ret_val)
+		return ret_val;
+
+	mac->ledctl_default = er32(LEDCTL);
+	mac->ledctl_mode1 = mac->ledctl_default;
+	mac->ledctl_mode2 = mac->ledctl_default;
+
+	for (i = 0; i < 4; i++) {
+		temp = (data >> (i << 2)) & led_mask;
+		switch (temp) {
+		case ID_LED_ON1_DEF2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_ON1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_OFF1_DEF2:
+		case ID_LED_OFF1_ON2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		switch (temp) {
+		case ID_LED_DEF1_ON2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_OFF1_ON2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_DEF1_OFF2:
+		case ID_LED_ON1_OFF2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_setup_led_generic - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored.
+ **/
+s32 e1000e_setup_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl;
+
+	if (hw->mac.ops.setup_led != e1000e_setup_led_generic)
+		return -E1000_ERR_CONFIG;
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		ledctl = er32(LEDCTL);
+		hw->mac.ledctl_default = ledctl;
+		/* Turn off LED0 */
+		ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+		            E1000_LEDCTL_LED0_BLINK |
+		            E1000_LEDCTL_LED0_MODE_MASK);
+		ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+		           E1000_LEDCTL_LED0_MODE_SHIFT);
+		ew32(LEDCTL, ledctl);
+	} else if (hw->phy.media_type == e1000_media_type_copper) {
+		ew32(LEDCTL, hw->mac.ledctl_mode1);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_cleanup_led_generic - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.
+ **/
+s32 e1000e_cleanup_led_generic(struct e1000_hw *hw)
+{
+	ew32(LEDCTL, hw->mac.ledctl_default);
+	return 0;
+}
+
+/**
+ *  e1000e_blink_led_generic - Blink LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Blink the LEDs which are set to be on.
+ **/
+s32 e1000e_blink_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl_blink = 0;
+	u32 i;
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		/* always blink LED0 for PCI-E fiber */
+		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+	} else {
+		/*
+		 * set the blink bit for each LED that's "on" (0x0E)
+		 * in ledctl_mode2
+		 */
+		ledctl_blink = hw->mac.ledctl_mode2;
+		for (i = 0; i < 4; i++)
+			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+			    E1000_LEDCTL_MODE_LED_ON)
+				ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
+						 (i * 8));
+	}
+
+	ew32(LEDCTL, ledctl_blink);
+
+	return 0;
+}
+
+/**
+ *  e1000e_led_on_generic - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+s32 e1000e_led_on_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+		ctrl = er32(CTRL);
+		ctrl &= ~E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		ew32(CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		ew32(LEDCTL, hw->mac.ledctl_mode2);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_led_off_generic - Turn LED off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED off.
+ **/
+s32 e1000e_led_off_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+		ctrl = er32(CTRL);
+		ctrl |= E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		ew32(CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		ew32(LEDCTL, hw->mac.ledctl_mode1);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_set_pcie_no_snoop - Set PCI-express capabilities
+ *  @hw: pointer to the HW structure
+ *  @no_snoop: bitmap of snoop events
+ *
+ *  Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000e_set_pcie_no_snoop(struct e1000_hw *hw, u32 no_snoop)
+{
+	u32 gcr;
+
+	if (no_snoop) {
+		gcr = er32(GCR);
+		gcr &= ~(PCIE_NO_SNOOP_ALL);
+		gcr |= no_snoop;
+		ew32(GCR, gcr);
+	}
+}
+
+/**
+ *  e1000e_disable_pcie_master - Disables PCI-express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns 0 if successful, else returns -10
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ *  the master requests to be disabled.
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests.
+ **/
+s32 e1000e_disable_pcie_master(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 timeout = MASTER_DISABLE_TIMEOUT;
+
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+	ew32(CTRL, ctrl);
+
+	while (timeout) {
+		if (!(er32(STATUS) &
+		      E1000_STATUS_GIO_MASTER_ENABLE))
+			break;
+		udelay(100);
+		timeout--;
+	}
+
+	if (!timeout) {
+		e_dbg("Master requests are pending.\n");
+		return -E1000_ERR_MASTER_REQUESTS_PENDING;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_reset_adaptive - Reset Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000e_reset_adaptive(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+	if (!mac->adaptive_ifs) {
+		e_dbg("Not in Adaptive IFS mode!\n");
+		goto out;
+	}
+
+	mac->current_ifs_val = 0;
+	mac->ifs_min_val = IFS_MIN;
+	mac->ifs_max_val = IFS_MAX;
+	mac->ifs_step_size = IFS_STEP;
+	mac->ifs_ratio = IFS_RATIO;
+
+	mac->in_ifs_mode = false;
+	ew32(AIT, 0);
+out:
+	return;
+}
+
+/**
+ *  e1000e_update_adaptive - Update Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Update the Adaptive Interframe Spacing Throttle value based on the
+ *  time between transmitted packets and time between collisions.
+ **/
+void e1000e_update_adaptive(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+	if (!mac->adaptive_ifs) {
+		e_dbg("Not in Adaptive IFS mode!\n");
+		goto out;
+	}
+
+	if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+		if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+			mac->in_ifs_mode = true;
+			if (mac->current_ifs_val < mac->ifs_max_val) {
+				if (!mac->current_ifs_val)
+					mac->current_ifs_val = mac->ifs_min_val;
+				else
+					mac->current_ifs_val +=
+						mac->ifs_step_size;
+				ew32(AIT, mac->current_ifs_val);
+			}
+		}
+	} else {
+		if (mac->in_ifs_mode &&
+		    (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+			mac->current_ifs_val = 0;
+			mac->in_ifs_mode = false;
+			ew32(AIT, 0);
+		}
+	}
+out:
+	return;
+}
+
+/**
+ *  e1000_raise_eec_clk - Raise EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd | E1000_EECD_SK;
+	ew32(EECD, *eecd);
+	e1e_flush();
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_lower_eec_clk - Lower EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd & ~E1000_EECD_SK;
+	ew32(EECD, *eecd);
+	e1e_flush();
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
+ *  "data" parameter will be shifted out to the EEPROM one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+	u32 mask;
+
+	mask = 0x01 << (count - 1);
+	if (nvm->type == e1000_nvm_eeprom_spi)
+		eecd |= E1000_EECD_DO;
+
+	do {
+		eecd &= ~E1000_EECD_DI;
+
+		if (data & mask)
+			eecd |= E1000_EECD_DI;
+
+		ew32(EECD, eecd);
+		e1e_flush();
+
+		udelay(nvm->delay_usec);
+
+		e1000_raise_eec_clk(hw, &eecd);
+		e1000_lower_eec_clk(hw, &eecd);
+
+		mask >>= 1;
+	} while (mask);
+
+	eecd &= ~E1000_EECD_DI;
+	ew32(EECD, eecd);
+}
+
+/**
+ *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @count: number of bits to shift in
+ *
+ *  In order to read a register from the EEPROM, we need to shift 'count' bits
+ *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
+ *  the EEPROM (setting the SK bit), and then reading the value of the data out
+ *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
+ *  always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+	u32 eecd;
+	u32 i;
+	u16 data;
+
+	eecd = er32(EECD);
+
+	eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+	data = 0;
+
+	for (i = 0; i < count; i++) {
+		data <<= 1;
+		e1000_raise_eec_clk(hw, &eecd);
+
+		eecd = er32(EECD);
+
+		eecd &= ~E1000_EECD_DI;
+		if (eecd & E1000_EECD_DO)
+			data |= 1;
+
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+
+	return data;
+}
+
+/**
+ *  e1000e_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ *  @hw: pointer to the HW structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the EEPROM status bit for either read or write completion based
+ *  upon the value of 'ee_reg'.
+ **/
+s32 e1000e_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+	u32 attempts = 100000;
+	u32 i, reg = 0;
+
+	for (i = 0; i < attempts; i++) {
+		if (ee_reg == E1000_NVM_POLL_READ)
+			reg = er32(EERD);
+		else
+			reg = er32(EEWR);
+
+		if (reg & E1000_NVM_RW_REG_DONE)
+			return 0;
+
+		udelay(5);
+	}
+
+	return -E1000_ERR_NVM;
+}
+
+/**
+ *  e1000e_acquire_nvm - Generic request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000e_acquire_nvm(struct e1000_hw *hw)
+{
+	u32 eecd = er32(EECD);
+	s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+
+	ew32(EECD, eecd | E1000_EECD_REQ);
+	eecd = er32(EECD);
+
+	while (timeout) {
+		if (eecd & E1000_EECD_GNT)
+			break;
+		udelay(5);
+		eecd = er32(EECD);
+		timeout--;
+	}
+
+	if (!timeout) {
+		eecd &= ~E1000_EECD_REQ;
+		ew32(EECD, eecd);
+		e_dbg("Could not acquire NVM grant\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_standby_nvm - Return EEPROM to standby state
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Toggle CS to flush commands */
+		eecd |= E1000_EECD_CS;
+		ew32(EECD, eecd);
+		e1e_flush();
+		udelay(nvm->delay_usec);
+		eecd &= ~E1000_EECD_CS;
+		ew32(EECD, eecd);
+		e1e_flush();
+		udelay(nvm->delay_usec);
+	}
+}
+
+/**
+ *  e1000_stop_nvm - Terminate EEPROM command
+ *  @hw: pointer to the HW structure
+ *
+ *  Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	eecd = er32(EECD);
+	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+		/* Pull CS high */
+		eecd |= E1000_EECD_CS;
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+}
+
+/**
+ *  e1000e_release_nvm - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000e_release_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	e1000_stop_nvm(hw);
+
+	eecd = er32(EECD);
+	eecd &= ~E1000_EECD_REQ;
+	ew32(EECD, eecd);
+}
+
+/**
+ *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = er32(EECD);
+	u8 spi_stat_reg;
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		u16 timeout = NVM_MAX_RETRY_SPI;
+
+		/* Clear SK and CS */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		ew32(EECD, eecd);
+		e1e_flush();
+		udelay(1);
+
+		/*
+		 * Read "Status Register" repeatedly until the LSB is cleared.
+		 * The EEPROM will signal that the command has been completed
+		 * by clearing bit 0 of the internal status register.  If it's
+		 * not cleared within 'timeout', then error out.
+		 */
+		while (timeout) {
+			e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+						 hw->nvm.opcode_bits);
+			spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+				break;
+
+			udelay(5);
+			e1000_standby_nvm(hw);
+			timeout--;
+		}
+
+		if (!timeout) {
+			e_dbg("SPI NVM Status error\n");
+			return -E1000_ERR_NVM;
+		}
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_read_nvm_eerd - Reads EEPROM using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000e_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eerd = 0;
+	s32 ret_val = 0;
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * too many words for the offset, and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		return -E1000_ERR_NVM;
+	}
+
+	for (i = 0; i < words; i++) {
+		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+		       E1000_NVM_RW_REG_START;
+
+		ew32(EERD, eerd);
+		ret_val = e1000e_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+		if (ret_val)
+			break;
+
+		data[i] = (er32(EERD) >> E1000_NVM_RW_REG_DATA);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_nvm_spi - Write to EEPROM using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000e_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likely contain an invalid checksum.
+ **/
+s32 e1000e_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val;
+	u16 widx = 0;
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		e_dbg("nvm parameter(s) out of bounds\n");
+		return -E1000_ERR_NVM;
+	}
+
+	ret_val = nvm->ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	while (widx < words) {
+		u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+		ret_val = e1000_ready_nvm_eeprom(hw);
+		if (ret_val) {
+			nvm->ops.release(hw);
+			return ret_val;
+		}
+
+		e1000_standby_nvm(hw);
+
+		/* Send the WRITE ENABLE command (8 bit opcode) */
+		e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+					 nvm->opcode_bits);
+
+		e1000_standby_nvm(hw);
+
+		/*
+		 * Some SPI eeproms use the 8th address bit embedded in the
+		 * opcode
+		 */
+		if ((nvm->address_bits == 8) && (offset >= 128))
+			write_opcode |= NVM_A8_OPCODE_SPI;
+
+		/* Send the Write command (8-bit opcode + addr) */
+		e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+		e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+					 nvm->address_bits);
+
+		/* Loop to allow for up to whole page write of eeprom */
+		while (widx < words) {
+			u16 word_out = data[widx];
+			word_out = (word_out >> 8) | (word_out << 8);
+			e1000_shift_out_eec_bits(hw, word_out, 16);
+			widx++;
+
+			if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+				e1000_standby_nvm(hw);
+				break;
+			}
+		}
+	}
+
+	usleep_range(10000, 20000);
+	nvm->ops.release(hw);
+	return 0;
+}
+
+/**
+ *  e1000_read_pba_string_generic - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *  @pba_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ **/
+s32 e1000_read_pba_string_generic(struct e1000_hw *hw, u8 *pba_num,
+				  u32 pba_num_size)
+{
+	s32 ret_val;
+	u16 nvm_data;
+	u16 pba_ptr;
+	u16 offset;
+	u16 length;
+
+	if (pba_num == NULL) {
+		e_dbg("PBA string buffer was null\n");
+		ret_val = E1000_ERR_INVALID_ARGUMENT;
+		goto out;
+	}
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &pba_ptr);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	/*
+	 * if nvm_data is not ptr guard the PBA must be in legacy format which
+	 * means pba_ptr is actually our second data word for the PBA number
+	 * and we can decode it into an ascii string
+	 */
+	if (nvm_data != NVM_PBA_PTR_GUARD) {
+		e_dbg("NVM PBA number is not stored as string\n");
+
+		/* we will need 11 characters to store the PBA */
+		if (pba_num_size < 11) {
+			e_dbg("PBA string buffer too small\n");
+			return E1000_ERR_NO_SPACE;
+		}
+
+		/* extract hex string from data and pba_ptr */
+		pba_num[0] = (nvm_data >> 12) & 0xF;
+		pba_num[1] = (nvm_data >> 8) & 0xF;
+		pba_num[2] = (nvm_data >> 4) & 0xF;
+		pba_num[3] = nvm_data & 0xF;
+		pba_num[4] = (pba_ptr >> 12) & 0xF;
+		pba_num[5] = (pba_ptr >> 8) & 0xF;
+		pba_num[6] = '-';
+		pba_num[7] = 0;
+		pba_num[8] = (pba_ptr >> 4) & 0xF;
+		pba_num[9] = pba_ptr & 0xF;
+
+		/* put a null character on the end of our string */
+		pba_num[10] = '\0';
+
+		/* switch all the data but the '-' to hex char */
+		for (offset = 0; offset < 10; offset++) {
+			if (pba_num[offset] < 0xA)
+				pba_num[offset] += '0';
+			else if (pba_num[offset] < 0x10)
+				pba_num[offset] += 'A' - 0xA;
+		}
+
+		goto out;
+	}
+
+	ret_val = e1000_read_nvm(hw, pba_ptr, 1, &length);
+	if (ret_val) {
+		e_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (length == 0xFFFF || length == 0) {
+		e_dbg("NVM PBA number section invalid length\n");
+		ret_val = E1000_ERR_NVM_PBA_SECTION;
+		goto out;
+	}
+	/* check if pba_num buffer is big enough */
+	if (pba_num_size < (((u32)length * 2) - 1)) {
+		e_dbg("PBA string buffer too small\n");
+		ret_val = E1000_ERR_NO_SPACE;
+		goto out;
+	}
+
+	/* trim pba length from start of string */
+	pba_ptr++;
+	length--;
+
+	for (offset = 0; offset < length; offset++) {
+		ret_val = e1000_read_nvm(hw, pba_ptr + offset, 1, &nvm_data);
+		if (ret_val) {
+			e_dbg("NVM Read Error\n");
+			goto out;
+		}
+		pba_num[offset * 2] = (u8)(nvm_data >> 8);
+		pba_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+	}
+	pba_num[offset * 2] = '\0';
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_generic - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the device MAC address from the EEPROM and stores the value.
+ *  Since devices with two ports use the same EEPROM, we increment the
+ *  last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+	u32 rar_high;
+	u32 rar_low;
+	u16 i;
+
+	rar_high = er32(RAH(0));
+	rar_low = er32(RAL(0));
+
+	for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+		hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+	for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+		hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+	for (i = 0; i < ETH_ALEN; i++)
+		hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+	return 0;
+}
+
+/**
+ *  e1000e_validate_nvm_checksum_generic - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000e_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			e_dbg("NVM Read Error\n");
+			return ret_val;
+		}
+		checksum += nvm_data;
+	}
+
+	if (checksum != (u16) NVM_SUM) {
+		e_dbg("NVM Checksum Invalid\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_update_nvm_checksum_generic - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 e1000e_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			e_dbg("NVM Read Error while updating checksum.\n");
+			return ret_val;
+		}
+		checksum += nvm_data;
+	}
+	checksum = (u16) NVM_SUM - checksum;
+	ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
+	if (ret_val)
+		e_dbg("NVM Write Error while updating checksum.\n");
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_reload_nvm - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+void e1000e_reload_nvm(struct e1000_hw *hw)
+{
+	u32 ctrl_ext;
+
+	udelay(10);
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+	ew32(CTRL_EXT, ctrl_ext);
+	e1e_flush();
+}
+
+/**
+ *  e1000_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+	u32 i;
+	u8  sum = 0;
+
+	if (!buffer)
+		return 0;
+
+	for (i = 0; i < length; i++)
+		sum += buffer[i];
+
+	return (u8) (0 - sum);
+}
+
+/**
+ *  e1000_mng_enable_host_if - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operation
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+static s32 e1000_mng_enable_host_if(struct e1000_hw *hw)
+{
+	u32 hicr;
+	u8 i;
+
+	if (!(hw->mac.arc_subsystem_valid)) {
+		e_dbg("ARC subsystem not valid.\n");
+		return -E1000_ERR_HOST_INTERFACE_COMMAND;
+	}
+
+	/* Check that the host interface is enabled. */
+	hicr = er32(HICR);
+	if ((hicr & E1000_HICR_EN) == 0) {
+		e_dbg("E1000_HOST_EN bit disabled.\n");
+		return -E1000_ERR_HOST_INTERFACE_COMMAND;
+	}
+	/* check the previous command is completed */
+	for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+		hicr = er32(HICR);
+		if (!(hicr & E1000_HICR_C))
+			break;
+		mdelay(1);
+	}
+
+	if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+		e_dbg("Previous command timeout failed .\n");
+		return -E1000_ERR_HOST_INTERFACE_COMMAND;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_check_mng_mode_generic - check management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the firmware semaphore register and returns true (>0) if
+ *  manageability is enabled, else false (0).
+ **/
+bool e1000e_check_mng_mode_generic(struct e1000_hw *hw)
+{
+	u32 fwsm = er32(FWSM);
+
+	return (fwsm & E1000_FWSM_MODE_MASK) ==
+		(E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT);
+}
+
+/**
+ *  e1000e_enable_tx_pkt_filtering - Enable packet filtering on Tx
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ **/
+bool e1000e_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+	struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+	u32 *buffer = (u32 *)&hw->mng_cookie;
+	u32 offset;
+	s32 ret_val, hdr_csum, csum;
+	u8 i, len;
+
+	hw->mac.tx_pkt_filtering = true;
+
+	/* No manageability, no filtering */
+	if (!e1000e_check_mng_mode(hw)) {
+		hw->mac.tx_pkt_filtering = false;
+		goto out;
+	}
+
+	/*
+	 * If we can't read from the host interface for whatever
+	 * reason, disable filtering.
+	 */
+	ret_val = e1000_mng_enable_host_if(hw);
+	if (ret_val) {
+		hw->mac.tx_pkt_filtering = false;
+		goto out;
+	}
+
+	/* Read in the header.  Length and offset are in dwords. */
+	len    = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+	offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+	for (i = 0; i < len; i++)
+		*(buffer + i) = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset + i);
+	hdr_csum = hdr->checksum;
+	hdr->checksum = 0;
+	csum = e1000_calculate_checksum((u8 *)hdr,
+					E1000_MNG_DHCP_COOKIE_LENGTH);
+	/*
+	 * If either the checksums or signature don't match, then
+	 * the cookie area isn't considered valid, in which case we
+	 * take the safe route of assuming Tx filtering is enabled.
+	 */
+	if ((hdr_csum != csum) || (hdr->signature != E1000_IAMT_SIGNATURE)) {
+		hw->mac.tx_pkt_filtering = true;
+		goto out;
+	}
+
+	/* Cookie area is valid, make the final check for filtering. */
+	if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING)) {
+		hw->mac.tx_pkt_filtering = false;
+		goto out;
+	}
+
+out:
+	return hw->mac.tx_pkt_filtering;
+}
+
+/**
+ *  e1000_mng_write_cmd_header - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+static s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+				  struct e1000_host_mng_command_header *hdr)
+{
+	u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+	/* Write the whole command header structure with new checksum. */
+
+	hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+	length >>= 2;
+	/* Write the relevant command block into the ram area. */
+	for (i = 0; i < length; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, i,
+					    *((u32 *) hdr + i));
+		e1e_flush();
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000_mng_host_if_write - Write to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+static s32 e1000_mng_host_if_write(struct e1000_hw *hw, u8 *buffer,
+				   u16 length, u16 offset, u8 *sum)
+{
+	u8 *tmp;
+	u8 *bufptr = buffer;
+	u32 data = 0;
+	u16 remaining, i, j, prev_bytes;
+
+	/* sum = only sum of the data and it is not checksum */
+
+	if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH)
+		return -E1000_ERR_PARAM;
+
+	tmp = (u8 *)&data;
+	prev_bytes = offset & 0x3;
+	offset >>= 2;
+
+	if (prev_bytes) {
+		data = E1000_READ_REG_ARRAY(hw, E1000_HOST_IF, offset);
+		for (j = prev_bytes; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset, data);
+		length -= j - prev_bytes;
+		offset++;
+	}
+
+	remaining = length & 0x3;
+	length -= remaining;
+
+	/* Calculate length in DWORDs */
+	length >>= 2;
+
+	/*
+	 * The device driver writes the relevant command block into the
+	 * ram area.
+	 */
+	for (i = 0; i < length; i++) {
+		for (j = 0; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+
+		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+	}
+	if (remaining) {
+		for (j = 0; j < sizeof(u32); j++) {
+			if (j < remaining)
+				*(tmp + j) = *bufptr++;
+			else
+				*(tmp + j) = 0;
+
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY(hw, E1000_HOST_IF, offset + i, data);
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_mng_write_dhcp_info - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000e_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+	struct e1000_host_mng_command_header hdr;
+	s32 ret_val;
+	u32 hicr;
+
+	hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+	hdr.command_length = length;
+	hdr.reserved1 = 0;
+	hdr.reserved2 = 0;
+	hdr.checksum = 0;
+
+	/* Enable the host interface */
+	ret_val = e1000_mng_enable_host_if(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Populate the host interface with the contents of "buffer". */
+	ret_val = e1000_mng_host_if_write(hw, buffer, length,
+					  sizeof(hdr), &(hdr.checksum));
+	if (ret_val)
+		return ret_val;
+
+	/* Write the manageability command header */
+	ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+	if (ret_val)
+		return ret_val;
+
+	/* Tell the ARC a new command is pending. */
+	hicr = er32(HICR);
+	ew32(HICR, hicr | E1000_HICR_C);
+
+	return 0;
+}
+
+/**
+ *  e1000e_enable_mng_pass_thru - Check if management passthrough is needed
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies the hardware needs to leave interface enabled so that frames can
+ *  be directed to and from the management interface.
+ **/
+bool e1000e_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+	u32 manc;
+	u32 fwsm, factps;
+	bool ret_val = false;
+
+	manc = er32(MANC);
+
+	if (!(manc & E1000_MANC_RCV_TCO_EN))
+		goto out;
+
+	if (hw->mac.has_fwsm) {
+		fwsm = er32(FWSM);
+		factps = er32(FACTPS);
+
+		if (!(factps & E1000_FACTPS_MNGCG) &&
+		    ((fwsm & E1000_FWSM_MODE_MASK) ==
+		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+			ret_val = true;
+			goto out;
+		}
+	} else if ((hw->mac.type == e1000_82574) ||
+		   (hw->mac.type == e1000_82583)) {
+		u16 data;
+
+		factps = er32(FACTPS);
+		e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+
+		if (!(factps & E1000_FACTPS_MNGCG) &&
+		    ((data & E1000_NVM_INIT_CTRL2_MNGM) ==
+		     (e1000_mng_mode_pt << 13))) {
+			ret_val = true;
+			goto out;
+		}
+	} else if ((manc & E1000_MANC_SMBUS_EN) &&
+		    !(manc & E1000_MANC_ASF_EN)) {
+			ret_val = true;
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c
new file mode 100644
index 0000000..20073aa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/netdev.c
@@ -0,0 +1,4423 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/interrupt.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if_vlan.h>
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#include <linux/version.h>
+#include <linux/pm_qos.h>
+#include <linux/pm_runtime.h>
+#include <linux/aer.h>
+#include <linux/prefetch.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#include <linux/pci-aspm.h>
+#endif
+
+#include "e1000.h"
+
+#define RT_E1000E_NUM_RXD	64
+
+#define DRV_EXTRAVERSION "-k-rt"
+
+#define DRV_VERSION "1.5.1" DRV_EXTRAVERSION
+char e1000e_driver_name[] = "rt_e1000e";
+const char e1000e_driver_version[] = DRV_VERSION;
+
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state);
+
+static const struct e1000_info *e1000_info_tbl[] = {
+	[board_82571]		= &e1000_82571_info,
+	[board_82572]		= &e1000_82572_info,
+	[board_82573]		= &e1000_82573_info,
+	[board_82574]		= &e1000_82574_info,
+	[board_82583]		= &e1000_82583_info,
+	[board_80003es2lan]	= &e1000_es2_info,
+	[board_ich8lan]		= &e1000_ich8_info,
+	[board_ich9lan]		= &e1000_ich9_info,
+	[board_ich10lan]	= &e1000_ich10_info,
+	[board_pchlan]		= &e1000_pch_info,
+	[board_pch2lan]		= &e1000_pch2_info,
+	[board_pch_lpt]		= &e1000_pch_lpt_info,
+};
+
+struct e1000_reg_info {
+	u32 ofs;
+	char *name;
+};
+
+#define E1000_RDFH	0x02410	/* Rx Data FIFO Head - RW */
+#define E1000_RDFT	0x02418	/* Rx Data FIFO Tail - RW */
+#define E1000_RDFHS	0x02420	/* Rx Data FIFO Head Saved - RW */
+#define E1000_RDFTS	0x02428	/* Rx Data FIFO Tail Saved - RW */
+#define E1000_RDFPC	0x02430	/* Rx Data FIFO Packet Count - RW */
+
+#define E1000_TDFH	0x03410	/* Tx Data FIFO Head - RW */
+#define E1000_TDFT	0x03418	/* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS	0x03420	/* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS	0x03428	/* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC	0x03430	/* Tx Data FIFO Packet Count - RW */
+
+static const struct e1000_reg_info e1000_reg_info_tbl[] = {
+
+	/* General Registers */
+	{E1000_CTRL, "CTRL"},
+	{E1000_STATUS, "STATUS"},
+	{E1000_CTRL_EXT, "CTRL_EXT"},
+
+	/* Interrupt Registers */
+	{E1000_ICR, "ICR"},
+
+	/* Rx Registers */
+	{E1000_RCTL, "RCTL"},
+	{E1000_RDLEN, "RDLEN"},
+	{E1000_RDH, "RDH"},
+	{E1000_RDT, "RDT"},
+	{E1000_RDTR, "RDTR"},
+	{E1000_RXDCTL(0), "RXDCTL"},
+	{E1000_ERT, "ERT"},
+	{E1000_RDBAL, "RDBAL"},
+	{E1000_RDBAH, "RDBAH"},
+	{E1000_RDFH, "RDFH"},
+	{E1000_RDFT, "RDFT"},
+	{E1000_RDFHS, "RDFHS"},
+	{E1000_RDFTS, "RDFTS"},
+	{E1000_RDFPC, "RDFPC"},
+
+	/* Tx Registers */
+	{E1000_TCTL, "TCTL"},
+	{E1000_TDBAL, "TDBAL"},
+	{E1000_TDBAH, "TDBAH"},
+	{E1000_TDLEN, "TDLEN"},
+	{E1000_TDH, "TDH"},
+	{E1000_TDT, "TDT"},
+	{E1000_TIDV, "TIDV"},
+	{E1000_TXDCTL(0), "TXDCTL"},
+	{E1000_TADV, "TADV"},
+	{E1000_TARC(0), "TARC"},
+	{E1000_TDFH, "TDFH"},
+	{E1000_TDFT, "TDFT"},
+	{E1000_TDFHS, "TDFHS"},
+	{E1000_TDFTS, "TDFTS"},
+	{E1000_TDFPC, "TDFPC"},
+
+	/* List Terminator */
+	{}
+};
+
+/*
+ * e1000_regdump - register printout routine
+ */
+static void e1000_regdump(struct e1000_hw *hw, struct e1000_reg_info *reginfo)
+{
+	int n = 0;
+	char rname[16];
+	u32 regs[8];
+
+	switch (reginfo->ofs) {
+	case E1000_RXDCTL(0):
+		for (n = 0; n < 2; n++)
+			regs[n] = __er32(hw, E1000_RXDCTL(n));
+		break;
+	case E1000_TXDCTL(0):
+		for (n = 0; n < 2; n++)
+			regs[n] = __er32(hw, E1000_TXDCTL(n));
+		break;
+	case E1000_TARC(0):
+		for (n = 0; n < 2; n++)
+			regs[n] = __er32(hw, E1000_TARC(n));
+		break;
+	default:
+		printk(KERN_INFO "%-15s %08x\n",
+		       reginfo->name, __er32(hw, reginfo->ofs));
+		return;
+	}
+
+	snprintf(rname, 16, "%s%s", reginfo->name, "[0-1]");
+	printk(KERN_INFO "%-15s ", rname);
+	for (n = 0; n < 2; n++)
+		printk(KERN_CONT "%08x ", regs[n]);
+	printk(KERN_CONT "\n");
+}
+
+/*
+ * e1000e_dump - Print registers, Tx-ring and Rx-ring
+ */
+static void e1000e_dump(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_reg_info *reginfo;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_tx_desc *tx_desc;
+	struct my_u0 {
+		u64 a;
+		u64 b;
+	} *u0;
+	struct e1000_buffer *buffer_info;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	union e1000_rx_desc_packet_split *rx_desc_ps;
+	union e1000_rx_desc_extended *rx_desc;
+	struct my_u1 {
+		u64 a;
+		u64 b;
+		u64 c;
+		u64 d;
+	} *u1;
+	u32 staterr;
+	int i = 0;
+
+	if (!netif_msg_hw(adapter))
+		return;
+
+	/* Print netdevice Info */
+	if (netdev) {
+		dev_info(&adapter->pdev->dev, "Net device Info\n");
+		printk(KERN_INFO "Device Name     state            "
+		       "trans_start      last_rx\n");
+		printk(KERN_INFO "%-15s\n", netdev->name);
+	}
+
+	/* Print Registers */
+	dev_info(&adapter->pdev->dev, "Register Dump\n");
+	printk(KERN_INFO " Register Name   Value\n");
+	for (reginfo = (struct e1000_reg_info *)e1000_reg_info_tbl;
+	     reginfo->name; reginfo++) {
+		e1000_regdump(hw, reginfo);
+	}
+
+	/* Print Tx Ring Summary */
+	if (!netdev || !rtnetif_running(netdev))
+		goto exit;
+
+	dev_info(&adapter->pdev->dev, "Tx Ring Summary\n");
+	printk(KERN_INFO "Queue [NTU] [NTC] [bi(ntc)->dma  ]"
+	       " leng ntw timestamp\n");
+	buffer_info = &tx_ring->buffer_info[tx_ring->next_to_clean];
+	printk(KERN_INFO " %5d %5X %5X %016llX %04X %3X %016llX\n",
+	       0, tx_ring->next_to_use, tx_ring->next_to_clean,
+	       (unsigned long long)buffer_info->dma,
+	       buffer_info->length,
+	       buffer_info->next_to_watch,
+	       (unsigned long long)buffer_info->time_stamp);
+
+	/* Print Tx Ring */
+	if (!netif_msg_tx_done(adapter))
+		goto rx_ring_summary;
+
+	dev_info(&adapter->pdev->dev, "Tx Ring Dump\n");
+
+	/* Transmit Descriptor Formats - DEXT[29] is 0 (Legacy) or 1 (Extended)
+	 *
+	 * Legacy Transmit Descriptor
+	 *   +--------------------------------------------------------------+
+	 * 0 |         Buffer Address [63:0] (Reserved on Write Back)       |
+	 *   +--------------------------------------------------------------+
+	 * 8 | Special  |    CSS     | Status |  CMD    |  CSO   |  Length  |
+	 *   +--------------------------------------------------------------+
+	 *   63       48 47        36 35    32 31     24 23    16 15        0
+	 *
+	 * Extended Context Descriptor (DTYP=0x0) for TSO or checksum offload
+	 *   63      48 47    40 39       32 31             16 15    8 7      0
+	 *   +----------------------------------------------------------------+
+	 * 0 |  TUCSE  | TUCS0  |   TUCSS   |     IPCSE       | IPCS0 | IPCSS |
+	 *   +----------------------------------------------------------------+
+	 * 8 |   MSS   | HDRLEN | RSV | STA | TUCMD | DTYP |      PAYLEN      |
+	 *   +----------------------------------------------------------------+
+	 *   63      48 47    40 39 36 35 32 31   24 23  20 19                0
+	 *
+	 * Extended Data Descriptor (DTYP=0x1)
+	 *   +----------------------------------------------------------------+
+	 * 0 |                     Buffer Address [63:0]                      |
+	 *   +----------------------------------------------------------------+
+	 * 8 | VLAN tag |  POPTS  | Rsvd | Status | Command | DTYP |  DTALEN  |
+	 *   +----------------------------------------------------------------+
+	 *   63       48 47     40 39  36 35    32 31     24 23  20 19        0
+	 */
+	printk(KERN_INFO "Tl[desc]     [address 63:0  ] [SpeCssSCmCsLen]"
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Legacy format\n");
+	printk(KERN_INFO "Tc[desc]     [Ce CoCsIpceCoS] [MssHlRSCm0Plen]"
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Context format\n");
+	printk(KERN_INFO "Td[desc]     [address 63:0  ] [VlaPoRSCm1Dlen]"
+	       " [bi->dma       ] leng  ntw timestamp        bi->skb "
+	       "<-- Ext Data format\n");
+	for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		buffer_info = &tx_ring->buffer_info[i];
+		u0 = (struct my_u0 *)tx_desc;
+		printk(KERN_INFO "T%c[0x%03X]    %016llX %016llX %016llX "
+		       "%04X  %3X %016llX %p",
+		       (!(le64_to_cpu(u0->b) & (1 << 29)) ? 'l' :
+			((le64_to_cpu(u0->b) & (1 << 20)) ? 'd' : 'c')), i,
+		       (unsigned long long)le64_to_cpu(u0->a),
+		       (unsigned long long)le64_to_cpu(u0->b),
+		       (unsigned long long)buffer_info->dma,
+		       buffer_info->length, buffer_info->next_to_watch,
+		       (unsigned long long)buffer_info->time_stamp,
+		       buffer_info->skb);
+		if (i == tx_ring->next_to_use && i == tx_ring->next_to_clean)
+			printk(KERN_CONT " NTC/U\n");
+		else if (i == tx_ring->next_to_use)
+			printk(KERN_CONT " NTU\n");
+		else if (i == tx_ring->next_to_clean)
+			printk(KERN_CONT " NTC\n");
+		else
+			printk(KERN_CONT "\n");
+
+		if (netif_msg_pktdata(adapter) && buffer_info->dma != 0)
+			print_hex_dump(KERN_INFO, "", DUMP_PREFIX_ADDRESS,
+				       16, 1, phys_to_virt(buffer_info->dma),
+				       buffer_info->length, true);
+	}
+
+	/* Print Rx Ring Summary */
+rx_ring_summary:
+	dev_info(&adapter->pdev->dev, "Rx Ring Summary\n");
+	printk(KERN_INFO "Queue [NTU] [NTC]\n");
+	printk(KERN_INFO " %5d %5X %5X\n", 0,
+	       rx_ring->next_to_use, rx_ring->next_to_clean);
+
+	/* Print Rx Ring */
+	if (!netif_msg_rx_status(adapter))
+		goto exit;
+
+	dev_info(&adapter->pdev->dev, "Rx Ring Dump\n");
+	switch (adapter->rx_ps_pages) {
+	case 1:
+	case 2:
+	case 3:
+		/* [Extended] Packet Split Receive Descriptor Format
+		 *
+		 *    +-----------------------------------------------------+
+		 *  0 |                Buffer Address 0 [63:0]              |
+		 *    +-----------------------------------------------------+
+		 *  8 |                Buffer Address 1 [63:0]              |
+		 *    +-----------------------------------------------------+
+		 * 16 |                Buffer Address 2 [63:0]              |
+		 *    +-----------------------------------------------------+
+		 * 24 |                Buffer Address 3 [63:0]              |
+		 *    +-----------------------------------------------------+
+		 */
+		printk(KERN_INFO "R  [desc]      [buffer 0 63:0 ] "
+		       "[buffer 1 63:0 ] "
+		       "[buffer 2 63:0 ] [buffer 3 63:0 ] [bi->dma       ] "
+		       "[bi->skb] <-- Ext Pkt Split format\n");
+		/* [Extended] Receive Descriptor (Write-Back) Format
+		 *
+		 *   63       48 47    32 31     13 12    8 7    4 3        0
+		 *   +------------------------------------------------------+
+		 * 0 | Packet   | IP     |  Rsvd   | MRQ   | Rsvd | MRQ RSS |
+		 *   | Checksum | Ident  |         | Queue |      |  Type   |
+		 *   +------------------------------------------------------+
+		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+		 *   +------------------------------------------------------+
+		 *   63       48 47    32 31            20 19               0
+		 */
+		printk(KERN_INFO "RWB[desc]      [ck ipid mrqhsh] "
+		       "[vl   l0 ee  es] "
+		       "[ l3  l2  l1 hs] [reserved      ] ---------------- "
+		       "[bi->skb] <-- Ext Rx Write-Back format\n");
+		for (i = 0; i < rx_ring->count; i++) {
+			buffer_info = &rx_ring->buffer_info[i];
+			rx_desc_ps = E1000_RX_DESC_PS(*rx_ring, i);
+			u1 = (struct my_u1 *)rx_desc_ps;
+			staterr =
+			    le32_to_cpu(rx_desc_ps->wb.middle.status_error);
+			if (staterr & E1000_RXD_STAT_DD) {
+				/* Descriptor Done */
+				printk(KERN_INFO "RWB[0x%03X]     %016llX "
+				       "%016llX %016llX %016llX "
+				       "---------------- %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       buffer_info->skb);
+			} else {
+				printk(KERN_INFO "R  [0x%03X]     %016llX "
+				       "%016llX %016llX %016llX %016llX %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)le64_to_cpu(u1->c),
+				       (unsigned long long)le64_to_cpu(u1->d),
+				       (unsigned long long)buffer_info->dma,
+				       buffer_info->skb);
+
+				if (netif_msg_pktdata(adapter))
+					print_hex_dump(KERN_INFO, "",
+						DUMP_PREFIX_ADDRESS, 16, 1,
+						phys_to_virt(buffer_info->dma),
+						adapter->rx_ps_bsize0, true);
+			}
+
+			if (i == rx_ring->next_to_use)
+				printk(KERN_CONT " NTU\n");
+			else if (i == rx_ring->next_to_clean)
+				printk(KERN_CONT " NTC\n");
+			else
+				printk(KERN_CONT "\n");
+		}
+		break;
+	default:
+	case 0:
+		/* Extended Receive Descriptor (Read) Format
+		 *
+		 *   +-----------------------------------------------------+
+		 * 0 |                Buffer Address [63:0]                |
+		 *   +-----------------------------------------------------+
+		 * 8 |                      Reserved                       |
+		 *   +-----------------------------------------------------+
+		 */
+		printk(KERN_INFO "R  [desc]      [buf addr 63:0 ] "
+		       "[reserved 63:0 ] [bi->dma       ] "
+		       "[bi->skb] <-- Ext (Read) format\n");
+		/* Extended Receive Descriptor (Write-Back) Format
+		 *
+		 *   63       48 47    32 31    24 23            4 3        0
+		 *   +------------------------------------------------------+
+		 *   |     RSS Hash      |        |               |         |
+		 * 0 +-------------------+  Rsvd  |   Reserved    | MRQ RSS |
+		 *   | Packet   | IP     |        |               |  Type   |
+		 *   | Checksum | Ident  |        |               |         |
+		 *   +------------------------------------------------------+
+		 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+		 *   +------------------------------------------------------+
+		 *   63       48 47    32 31            20 19               0
+		 */
+		printk(KERN_INFO "RWB[desc]      [cs ipid    mrq] "
+		       "[vt   ln xe  xs] "
+		       "[bi->skb] <-- Ext (Write-Back) format\n");
+
+		for (i = 0; i < rx_ring->count; i++) {
+			buffer_info = &rx_ring->buffer_info[i];
+			rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+			u1 = (struct my_u1 *)rx_desc;
+			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+			if (staterr & E1000_RXD_STAT_DD) {
+				/* Descriptor Done */
+				printk(KERN_INFO "RWB[0x%03X]     %016llX "
+				       "%016llX ---------------- %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       buffer_info->skb);
+			} else {
+				printk(KERN_INFO "R  [0x%03X]     %016llX "
+				       "%016llX %016llX %p", i,
+				       (unsigned long long)le64_to_cpu(u1->a),
+				       (unsigned long long)le64_to_cpu(u1->b),
+				       (unsigned long long)buffer_info->dma,
+				       buffer_info->skb);
+
+				if (netif_msg_pktdata(adapter))
+					print_hex_dump(KERN_INFO, "",
+						       DUMP_PREFIX_ADDRESS, 16,
+						       1,
+						       phys_to_virt
+						       (buffer_info->dma),
+						       adapter->rx_buffer_len,
+						       true);
+			}
+
+			if (i == rx_ring->next_to_use)
+				printk(KERN_CONT " NTU\n");
+			else if (i == rx_ring->next_to_clean)
+				printk(KERN_CONT " NTC\n");
+			else
+				printk(KERN_CONT "\n");
+		}
+	}
+
+exit:
+	return;
+}
+
+void e1000e_mod_watchdog_timer(rtdm_nrtsig_t *nrt_sig, void *data)
+{
+	struct timer_list *timer = data;
+
+	mod_timer(timer, jiffies + 1);
+}
+
+void e1000e_trigger_downshift(rtdm_nrtsig_t *nrt_sig, void *data)
+{
+	struct work_struct *downshift_task = data;
+
+	schedule_work(downshift_task);
+}
+
+/**
+ * e1000_desc_unused - calculate if we have unused descriptors
+ **/
+static int e1000_desc_unused(struct e1000_ring *ring)
+{
+	if (ring->next_to_clean > ring->next_to_use)
+		return ring->next_to_clean - ring->next_to_use - 1;
+
+	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload
+ * @adapter:     board private structure
+ * @status_err:  receive descriptor status and error fields
+ * @csum:	receive descriptor csum field
+ * @sk_buff:     socket buffer with received data
+ **/
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+			      u32 csum, struct rtskb *skb)
+{
+	u16 status = (u16)status_err;
+	u8 errors = (u8)(status_err >> 24);
+
+	/* Ignore Checksum bit is set */
+	if (status & E1000_RXD_STAT_IXSM)
+		return;
+	/* TCP/UDP checksum error bit is set */
+	if (errors & E1000_RXD_ERR_TCPE) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_err++;
+		return;
+	}
+
+	/* TCP/UDP Checksum has not been calculated */
+	if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+		return;
+
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (status & E1000_RXD_STAT_TCPCS) {
+		/* TCP checksum is good */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else {
+		/*
+		 * IP fragment with UDP payload
+		 * Hardware complements the payload checksum, so we undo it
+		 * and then put the value in host order for further stack use.
+		 */
+		__sum16 sum = (__force __sum16)htons(csum);
+		skb->csum = csum_unfold(~sum);
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+	adapter->hw_csum_good++;
+}
+
+/**
+ * e1000e_update_tail_wa - helper function for e1000e_update_[rt]dt_wa()
+ * @hw: pointer to the HW structure
+ * @tail: address of tail descriptor register
+ * @i: value to write to tail descriptor register
+ *
+ * When updating the tail register, the ME could be accessing Host CSR
+ * registers at the same time.  Normally, this is handled in h/w by an
+ * arbiter but on some parts there is a bug that acknowledges Host accesses
+ * later than it should which could result in the descriptor register to
+ * have an incorrect value.  Workaround this by checking the FWSM register
+ * which has bit 24 set while ME is accessing Host CSR registers, wait
+ * if it is set and try again a number of times.
+ **/
+static inline s32 e1000e_update_tail_wa(struct e1000_hw *hw, u8 __iomem * tail,
+					unsigned int i)
+{
+	unsigned int j = 0;
+
+	while ((j++ < E1000_ICH_FWSM_PCIM2PCI_COUNT) &&
+	       (er32(FWSM) & E1000_ICH_FWSM_PCIM2PCI))
+		udelay(50);
+
+	writel(i, tail);
+
+	if ((j == E1000_ICH_FWSM_PCIM2PCI_COUNT) && (i != readl(tail)))
+		return E1000_ERR_SWFW_SYNC;
+
+	return 0;
+}
+
+static void e1000e_update_rdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+	u8 __iomem *tail = (adapter->hw.hw_addr + adapter->rx_ring->tail);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (e1000e_update_tail_wa(hw, tail, i)) {
+		u32 rctl = er32(RCTL);
+		ew32(RCTL, rctl & ~E1000_RCTL_EN);
+		e_err("ME firmware caused invalid RDT - resetting\n");
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+	}
+}
+
+static void e1000e_update_tdt_wa(struct e1000_adapter *adapter, unsigned int i)
+{
+	u8 __iomem *tail = (adapter->hw.hw_addr + adapter->tx_ring->tail);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (e1000e_update_tail_wa(hw, tail, i)) {
+		u32 tctl = er32(TCTL);
+		ew32(TCTL, tctl & ~E1000_TCTL_EN);
+		e_err("ME firmware caused invalid TDT - resetting\n");
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+	}
+}
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   int cleaned_count, gfp_t gfp)
+{
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	union e1000_rx_desc_extended *rx_desc;
+	struct e1000_buffer *buffer_info;
+	struct rtskb *skb;
+	unsigned int i;
+	unsigned int bufsz = adapter->rx_buffer_len;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		skb = buffer_info->skb;
+		if (skb) {
+			rtskb_trim(skb, 0);
+			goto map_skb;
+		}
+
+		skb = rtnetdev_alloc_rtskb(adapter->netdev, bufsz);
+		if (!skb) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+		rtskb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+map_skb:
+		buffer_info->dma = rtskb_data_dma_addr(skb, 0);
+
+		rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+		rx_desc->read.buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(!(i & (E1000_RX_BUFFER_WRITE - 1)))) {
+			/*
+			 * Force memory writes to complete before letting h/w
+			 * know there are new descriptors to fetch.  (Only
+			 * applicable for weak-ordered memory model archs,
+			 * such as IA-64).
+			 */
+			wmb();
+			if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+				e1000e_update_rdt_wa(adapter, i);
+			else
+				writel(i, adapter->hw.hw_addr + rx_ring->tail);
+		}
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	rx_ring->next_to_use = i;
+}
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+			       nanosecs_abs_t *time_stamp)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	union e1000_rx_desc_extended *rx_desc, *next_rxd;
+	struct e1000_buffer *buffer_info, *next_buffer;
+	u32 length, staterr;
+	unsigned int i;
+	int cleaned_count = 0;
+	bool data_received = false;
+	unsigned int total_rx_bytes = 0, total_rx_packets = 0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC_EXT(*rx_ring, i);
+	staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (staterr & E1000_RXD_STAT_DD) {
+		struct rtskb *skb;
+
+		rmb();	/* read descriptor and rx_buffer_info after status DD */
+
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		i++;
+		if (i == rx_ring->count)
+			i = 0;
+		next_rxd = E1000_RX_DESC_EXT(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned_count++;
+		buffer_info->dma = 0;
+
+		length = le16_to_cpu(rx_desc->wb.upper.length);
+
+		/*
+		 * !EOP means multiple descriptors were used to store a single
+		 * packet, if that's the case we need to toss it.  In fact, we
+		 * need to toss every packet with the EOP bit clear and the
+		 * next frame that _does_ have the EOP bit set, as it is by
+		 * definition only a frame fragment
+		 */
+		if (unlikely(!(staterr & E1000_RXD_STAT_EOP)))
+			adapter->flags2 |= FLAG2_IS_DISCARDING;
+
+		if (adapter->flags2 & FLAG2_IS_DISCARDING) {
+			/* All receives must fit into a single buffer */
+			e_dbg("Receive packet consumed multiple buffers\n");
+			/* recycle */
+			buffer_info->skb = skb;
+			if (staterr & E1000_RXD_STAT_EOP)
+				adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+			goto next_desc;
+		}
+
+		if (staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK) {
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		/* adjust length to remove Ethernet CRC */
+		if (!(adapter->flags2 & FLAG2_CRC_STRIPPING))
+			length -= 4;
+
+		total_rx_bytes += length;
+		total_rx_packets++;
+
+		rtskb_put(skb, length);
+
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter, staterr,
+				  le16_to_cpu(rx_desc->wb.lower.hi_dword.
+					      csum_ip.csum), skb);
+
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+		skb->time_stamp = *time_stamp;
+		rtnetif_rx(skb);
+		data_received = true;
+
+next_desc:
+		rx_desc->wb.upper.status_error &= cpu_to_le32(~0xFF);
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= E1000_RX_BUFFER_WRITE) {
+			adapter->alloc_rx_buf(adapter, cleaned_count,
+					      GFP_ATOMIC);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+
+		staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = e1000_desc_unused(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, cleaned_count, GFP_ATOMIC);
+
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->total_rx_packets += total_rx_packets;
+	return data_received;
+}
+
+static void e1000_put_txbuf(struct e1000_adapter *adapter,
+			     struct e1000_buffer *buffer_info)
+{
+	buffer_info->dma = 0;
+	if (buffer_info->skb) {
+		kfree_rtskb(buffer_info->skb);
+		buffer_info->skb = NULL;
+	}
+	buffer_info->time_stamp = 0;
+}
+
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_tx_desc *tx_desc, *eop_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i, eop;
+	unsigned int count = 0;
+	unsigned int total_tx_bytes = 0, total_tx_packets = 0;
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->buffer_info[i].next_to_watch;
+	eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+	while ((eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) &&
+	       (count < tx_ring->count)) {
+		bool cleaned = false;
+		rmb(); /* read buffer_info after eop_desc */
+		for (; !cleaned; count++) {
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			cleaned = (i == eop);
+
+			if (cleaned) {
+				total_tx_packets += buffer_info->segs;
+				total_tx_bytes += buffer_info->bytecount;
+			}
+
+			e1000_put_txbuf(adapter, buffer_info);
+			tx_desc->upper.data = 0;
+
+			i++;
+			if (i == tx_ring->count)
+				i = 0;
+		}
+
+		if (i == tx_ring->next_to_use)
+			break;
+		eop = tx_ring->buffer_info[i].next_to_watch;
+		eop_desc = E1000_TX_DESC(*tx_ring, eop);
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	if (count && rtnetif_carrier_ok(netdev) &&
+	    e1000_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+
+		if (rtnetif_queue_stopped(netdev) &&
+		    !(test_bit(__E1000_DOWN, &adapter->state))) {
+			rtnetif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+	}
+
+	if (adapter->detect_tx_hung) {
+		/*
+		 * Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i
+		 */
+		adapter->detect_tx_hung = 0;
+		if (tx_ring->buffer_info[i].time_stamp &&
+		    time_after(jiffies, tx_ring->buffer_info[i].time_stamp
+			       + (adapter->tx_timeout_factor * HZ)) &&
+		    !(er32(STATUS) & E1000_STATUS_TXOFF)) {
+			rtnetif_stop_queue(netdev);
+		}
+	}
+	adapter->total_tx_bytes += total_tx_bytes;
+	adapter->total_tx_packets += total_tx_packets;
+	return count < tx_ring->count;
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ **/
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter)
+{
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		buffer_info = &rx_ring->buffer_info[i];
+		buffer_info->dma = 0;
+
+		if (buffer_info->skb) {
+			kfree_rtskb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	/* there also may be some cached data from a chained receive */
+	if (rx_ring->rx_skb_top) {
+		kfree_rtskb(rx_ring->rx_skb_top);
+		rx_ring->rx_skb_top = NULL;
+	}
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+	adapter->flags2 &= ~FLAG2_IS_DISCARDING;
+
+	writel(0, adapter->hw.hw_addr + rx_ring->head);
+	writel(0, adapter->hw.hw_addr + rx_ring->tail);
+}
+
+static void e1000e_downshift_workaround(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+					struct e1000_adapter, downshift_task);
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	e1000e_gig_downshift_workaround_ich8lan(&adapter->hw);
+}
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static int e1000_intr_msi(rtdm_irq_t *irq_handle)
+{
+	struct e1000_adapter *adapter =
+		rtdm_irq_get_arg(irq_handle, struct e1000_adapter);
+	struct e1000_hw *hw = &adapter->hw;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	u32 icr = er32(ICR);
+
+	/*
+	 * read ICR disables interrupts using IAM
+	 */
+
+	if (icr & E1000_ICR_LSC) {
+		hw->mac.get_link_status = 1;
+		/*
+		 * ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers
+		 */
+		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+		    (!(er32(STATUS) & E1000_STATUS_LU)))
+			rtdm_schedule_nrt_work(&adapter->downshift_task);
+
+		/*
+		 * 80003ES2LAN workaround-- For packet buffer work-around on
+		 * link down event; disable receives here in the ISR and reset
+		 * adapter in watchdog
+		 */
+		if (rtnetif_carrier_ok(adapter->netdev) &&
+		    adapter->flags & FLAG_RX_NEEDS_RESTART) {
+			/* disable receives */
+			u32 rctl = er32(RCTL);
+			ew32(RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= FLAG_RX_RESTART_NOW;
+		}
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			rtdm_nrtsig_pend(&adapter->mod_timer_sig);
+	}
+
+	if (!e1000_clean_tx_irq(adapter))
+		/* Ring was not completely cleaned, so fire another interrupt */
+		ew32(ICS, adapter->tx_ring->ims_val);
+
+	if (e1000_clean_rx_irq(adapter, &time_stamp))
+		rt_mark_stack_mgr(adapter->netdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static int e1000_intr(rtdm_irq_t *irq_handle)
+{
+	struct e1000_adapter *adapter =
+		rtdm_irq_get_arg(irq_handle, struct e1000_adapter);
+	struct e1000_hw *hw = &adapter->hw;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	u32 rctl, icr = er32(ICR);
+
+	if (!icr || test_bit(__E1000_DOWN, &adapter->state))
+		return RTDM_IRQ_NONE;  /* Not our interrupt */
+
+	/*
+	 * IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt
+	 */
+	if (!(icr & E1000_ICR_INT_ASSERTED))
+		return RTDM_IRQ_NONE;
+
+	/*
+	 * Interrupt Auto-Mask...upon reading ICR,
+	 * interrupts are masked.  No need for the
+	 * IMC write
+	 */
+
+	if (icr & E1000_ICR_LSC) {
+		hw->mac.get_link_status = 1;
+		/*
+		 * ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers
+		 */
+		if ((adapter->flags & FLAG_LSC_GIG_SPEED_DROP) &&
+		    (!(er32(STATUS) & E1000_STATUS_LU)))
+			rtdm_nrtsig_pend(&adapter->downshift_sig);
+
+		/*
+		 * 80003ES2LAN workaround--
+		 * For packet buffer work-around on link down event;
+		 * disable receives here in the ISR and
+		 * reset adapter in watchdog
+		 */
+		if (rtnetif_carrier_ok(adapter->netdev) &&
+		    (adapter->flags & FLAG_RX_NEEDS_RESTART)) {
+			/* disable receives */
+			rctl = er32(RCTL);
+			ew32(RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= FLAG_RX_RESTART_NOW;
+		}
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			rtdm_nrtsig_pend(&adapter->mod_timer_sig);
+	}
+
+	if (!e1000_clean_tx_irq(adapter))
+		/* Ring was not completely cleaned, so fire another interrupt */
+		ew32(ICS, adapter->tx_ring->ims_val);
+
+	if (e1000_clean_rx_irq(adapter, &time_stamp))
+		rt_mark_stack_mgr(adapter->netdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static irqreturn_t e1000_msix_other(int irq, void *data)
+{
+	struct rtnet_device *netdev = data;
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = er32(ICR);
+
+	if (!(icr & E1000_ICR_INT_ASSERTED)) {
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			ew32(IMS, E1000_IMS_OTHER);
+		return IRQ_NONE;
+	}
+
+	if (icr & adapter->eiac_mask)
+		ew32(ICS, (icr & adapter->eiac_mask));
+
+	if (icr & E1000_ICR_OTHER) {
+		if (!(icr & E1000_ICR_LSC))
+			goto no_link_interrupt;
+		hw->mac.get_link_status = 1;
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__E1000_DOWN, &adapter->state))
+			mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+no_link_interrupt:
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		ew32(IMS, E1000_IMS_LSC | E1000_IMS_OTHER);
+
+	return IRQ_HANDLED;
+}
+
+
+static int e1000_intr_msix_tx(rtdm_irq_t *irq_handle)
+{
+	struct e1000_adapter *adapter =
+		rtdm_irq_get_arg(irq_handle, struct e1000_adapter);
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+
+
+	adapter->total_tx_bytes = 0;
+	adapter->total_tx_packets = 0;
+
+	if (!e1000_clean_tx_irq(adapter))
+		/* Ring was not completely cleaned, so fire another interrupt */
+		ew32(ICS, tx_ring->ims_val);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int e1000_intr_msix_rx(rtdm_irq_t *irq_handle)
+{
+	struct e1000_adapter *adapter =
+		rtdm_irq_get_arg(irq_handle, struct e1000_adapter);
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	/* Write the ITR value calculated at the end of the
+	 * previous interrupt.
+	 */
+	if (adapter->rx_ring->set_itr) {
+		writel(1000000000 / (adapter->rx_ring->itr_val * 256),
+		       adapter->hw.hw_addr + adapter->rx_ring->itr_register);
+		adapter->rx_ring->set_itr = 0;
+	}
+
+	if (e1000_clean_rx_irq(adapter, &time_stamp))
+		rt_mark_stack_mgr(adapter->netdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ * e1000_configure_msix - Configure MSI-X hardware
+ *
+ * e1000_configure_msix sets up the hardware to properly
+ * generate MSI-X interrupts.
+ **/
+static void e1000_configure_msix(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	int vector = 0;
+	u32 ctrl_ext, ivar = 0;
+
+	adapter->eiac_mask = 0;
+
+	/* Workaround issue with spurious interrupts on 82574 in MSI-X mode */
+	if (hw->mac.type == e1000_82574) {
+		u32 rfctl = er32(RFCTL);
+		rfctl |= E1000_RFCTL_ACK_DIS;
+		ew32(RFCTL, rfctl);
+	}
+
+#define E1000_IVAR_INT_ALLOC_VALID	0x8
+	/* Configure Rx vector */
+	rx_ring->ims_val = E1000_IMS_RXQ0;
+	adapter->eiac_mask |= rx_ring->ims_val;
+	if (rx_ring->itr_val)
+		writel(1000000000 / (rx_ring->itr_val * 256),
+		       hw->hw_addr + rx_ring->itr_register);
+	else
+		writel(1, hw->hw_addr + rx_ring->itr_register);
+	ivar = E1000_IVAR_INT_ALLOC_VALID | vector;
+
+	/* Configure Tx vector */
+	tx_ring->ims_val = E1000_IMS_TXQ0;
+	vector++;
+	if (tx_ring->itr_val)
+		writel(1000000000 / (tx_ring->itr_val * 256),
+		       hw->hw_addr + tx_ring->itr_register);
+	else
+		writel(1, hw->hw_addr + tx_ring->itr_register);
+	adapter->eiac_mask |= tx_ring->ims_val;
+	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 8);
+
+	/* set vector for Other Causes, e.g. link changes */
+	vector++;
+	ivar |= ((E1000_IVAR_INT_ALLOC_VALID | vector) << 16);
+	if (rx_ring->itr_val)
+		writel(1000000000 / (rx_ring->itr_val * 256),
+		       hw->hw_addr + E1000_EITR_82574(vector));
+	else
+		writel(1, hw->hw_addr + E1000_EITR_82574(vector));
+
+	/* Cause Tx interrupts on every write back */
+	ivar |= (1 << 31);
+
+	ew32(IVAR, ivar);
+
+	/* enable MSI-X PBA support */
+	ctrl_ext = er32(CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_PBA_CLR;
+
+	/* Auto-Mask Other interrupts upon ICR read */
+#define E1000_EIAC_MASK_82574   0x01F00000
+	ew32(IAM, ~E1000_EIAC_MASK_82574 | E1000_IMS_OTHER);
+	ctrl_ext |= E1000_CTRL_EXT_EIAME;
+	ew32(CTRL_EXT, ctrl_ext);
+	e1e_flush();
+}
+
+void e1000e_reset_interrupt_capability(struct e1000_adapter *adapter)
+{
+	if (adapter->msix_entries) {
+		pci_disable_msix(adapter->pdev);
+		kfree(adapter->msix_entries);
+		adapter->msix_entries = NULL;
+	} else if (adapter->flags & FLAG_MSI_ENABLED) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~FLAG_MSI_ENABLED;
+	}
+}
+
+/**
+ * e1000e_set_interrupt_capability - set MSI or MSI-X if supported
+ *
+ * Attempt to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+void e1000e_set_interrupt_capability(struct e1000_adapter *adapter)
+{
+	int err;
+	int i;
+
+	switch (adapter->int_mode) {
+	case E1000E_INT_MODE_MSIX:
+		if (adapter->flags & FLAG_HAS_MSIX) {
+			adapter->num_vectors = 3; /* RxQ0, TxQ0 and other */
+			adapter->msix_entries = kcalloc(adapter->num_vectors,
+						      sizeof(struct msix_entry),
+						      GFP_KERNEL);
+			if (adapter->msix_entries) {
+				for (i = 0; i < adapter->num_vectors; i++)
+					adapter->msix_entries[i].entry = i;
+
+				err = pci_enable_msix_range(adapter->pdev,
+							adapter->msix_entries,
+							adapter->num_vectors,
+							adapter->num_vectors);
+				if (err == 0)
+					return;
+			}
+			/* MSI-X failed, so fall through and try MSI */
+			e_err("Failed to initialize MSI-X interrupts.  "
+			      "Falling back to MSI interrupts.\n");
+			e1000e_reset_interrupt_capability(adapter);
+		}
+		adapter->int_mode = E1000E_INT_MODE_MSI;
+		fallthrough;
+	case E1000E_INT_MODE_MSI:
+		if (!pci_enable_msi(adapter->pdev)) {
+			adapter->flags |= FLAG_MSI_ENABLED;
+		} else {
+			adapter->int_mode = E1000E_INT_MODE_LEGACY;
+			e_err("Failed to initialize MSI interrupts.  Falling "
+			      "back to legacy interrupts.\n");
+		}
+		fallthrough;
+	case E1000E_INT_MODE_LEGACY:
+		/* Don't do anything; this is the system default */
+		break;
+	}
+
+	/* store the number of vectors being used */
+	adapter->num_vectors = 1;
+}
+
+/**
+ * e1000_request_msix - Initialize MSI-X interrupts
+ *
+ * e1000_request_msix allocates MSI-X vectors and requests interrupts from the
+ * kernel.
+ **/
+static int e1000_request_msix(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int err = 0, vector = 0;
+
+	if (strlen(netdev->name) < (IFNAMSIZ - 5))
+		snprintf(adapter->rx_ring->name,
+			 sizeof(adapter->rx_ring->name) - 1,
+			 "%s-rx-0", netdev->name);
+	else
+		memcpy(adapter->rx_ring->name, netdev->name, IFNAMSIZ);
+	err = rtdm_irq_request(&adapter->rx_irq_handle,
+			       adapter->msix_entries[vector].vector,
+			       e1000_intr_msix_rx, 0, adapter->rx_ring->name,
+			       adapter);
+	if (err)
+		goto out;
+	adapter->rx_ring->itr_register = E1000_EITR_82574(vector);
+	adapter->rx_ring->itr_val = adapter->itr;
+	vector++;
+
+	if (strlen(netdev->name) < (IFNAMSIZ - 5))
+		snprintf(adapter->tx_ring->name,
+			 sizeof(adapter->tx_ring->name) - 1,
+			 "%s-tx-0", netdev->name);
+	else
+		memcpy(adapter->tx_ring->name, netdev->name, IFNAMSIZ);
+	err = rtdm_irq_request(&adapter->tx_irq_handle,
+			       adapter->msix_entries[vector].vector,
+			       e1000_intr_msix_tx, 0, adapter->tx_ring->name,
+			       adapter);
+	if (err)
+		goto out;
+	adapter->tx_ring->itr_register = E1000_EITR_82574(vector);
+	adapter->tx_ring->itr_val = adapter->itr;
+	vector++;
+
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  e1000_msix_other, 0, netdev->name, netdev);
+	if (err)
+		goto out;
+
+	e1000_configure_msix(adapter);
+	return 0;
+out:
+	return err;
+}
+
+/**
+ * e1000_request_irq - initialize interrupts
+ *
+ * Attempts to configure interrupts using the best available
+ * capabilities of the hardware and kernel.
+ **/
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int err;
+
+	if (adapter->msix_entries) {
+		err = e1000_request_msix(adapter);
+		if (!err)
+			return err;
+		/* fall back to MSI */
+		e1000e_reset_interrupt_capability(adapter);
+		adapter->int_mode = E1000E_INT_MODE_MSI;
+		e1000e_set_interrupt_capability(adapter);
+	}
+	if (adapter->flags & FLAG_MSI_ENABLED) {
+		err = rtdm_irq_request(&adapter->irq_handle,
+				       adapter->pdev->irq, e1000_intr_msi,
+				       0, netdev->name, adapter);
+		if (!err)
+			return err;
+
+		/* fall back to legacy interrupt */
+		e1000e_reset_interrupt_capability(adapter);
+		adapter->int_mode = E1000E_INT_MODE_LEGACY;
+	}
+
+	err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq,
+			       e1000_intr, 0, netdev->name, adapter);
+	if (err)
+		e_err("Unable to allocate interrupt, Error: %d\n", err);
+
+	return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+
+	if (adapter->msix_entries) {
+		int vector = 0;
+
+		rtdm_irq_disable(&adapter->rx_irq_handle);
+		rtdm_irq_free(&adapter->rx_irq_handle);
+		vector++;
+
+		rtdm_irq_disable(&adapter->tx_irq_handle);
+		rtdm_irq_free(&adapter->tx_irq_handle);
+		vector++;
+
+		/* Other Causes interrupt vector */
+		free_irq(adapter->msix_entries[vector].vector, netdev);
+		return;
+	}
+
+	if (adapter->flags & FLAG_MSI_ENABLED)
+		rtdm_irq_disable(&adapter->irq_handle);
+	rtdm_irq_free(&adapter->irq_handle);
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ **/
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	ew32(IMC, ~0);
+	if (adapter->msix_entries)
+		ew32(EIAC_82574, 0);
+	e1e_flush();
+
+	if (adapter->msix_entries) {
+		int i;
+		for (i = 0; i < adapter->num_vectors; i++)
+			synchronize_irq(adapter->msix_entries[i].vector);
+	} else {
+		synchronize_irq(adapter->pdev->irq);
+	}
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ **/
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->msix_entries) {
+		ew32(EIAC_82574, adapter->eiac_mask & E1000_EIAC_MASK_82574);
+		ew32(IMS, adapter->eiac_mask | E1000_IMS_OTHER | E1000_IMS_LSC);
+	} else {
+		ew32(IMS, IMS_ENABLE_MASK);
+	}
+	e1e_flush();
+}
+
+/**
+ * e1000e_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ **/
+void e1000e_get_hw_control(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext;
+	u32 swsm;
+
+	/* Let firmware know the driver has taken over */
+	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm | E1000_SWSM_DRV_LOAD);
+	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+		ctrl_ext = er32(CTRL_EXT);
+		ew32(CTRL_EXT, ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+	}
+}
+
+/**
+ * e1000e_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000e_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+void e1000e_release_hw_control(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext;
+	u32 swsm;
+
+	/* Let firmware taken over control of h/w */
+	if (adapter->flags & FLAG_HAS_SWSM_ON_LOAD) {
+		swsm = er32(SWSM);
+		ew32(SWSM, swsm & ~E1000_SWSM_DRV_LOAD);
+	} else if (adapter->flags & FLAG_HAS_CTRLEXT_ON_LOAD) {
+		ctrl_ext = er32(CTRL_EXT);
+		ew32(CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+	}
+}
+
+/**
+ * @e1000_alloc_ring - allocate memory for a ring structure
+ **/
+static int e1000_alloc_ring_dma(struct e1000_adapter *adapter,
+				struct e1000_ring *ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	ring->desc = dma_alloc_coherent(&pdev->dev, ring->size, &ring->dma,
+					GFP_KERNEL);
+	if (!ring->desc)
+		return -ENOMEM;
+
+	return 0;
+}
+
+/**
+ * e1000e_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Return 0 on success, negative on failure
+ **/
+int e1000e_setup_tx_resources(struct e1000_adapter *adapter)
+{
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	int err = -ENOMEM, size;
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	tx_ring->buffer_info = vzalloc(size);
+	if (!tx_ring->buffer_info)
+		goto err;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	err = e1000_alloc_ring_dma(adapter, tx_ring);
+	if (err)
+		goto err;
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	return 0;
+err:
+	vfree(tx_ring->buffer_info);
+	e_err("Unable to allocate memory for the transmit descriptor ring\n");
+	return err;
+}
+
+/**
+ * e1000e_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+int e1000e_setup_rx_resources(struct e1000_adapter *adapter)
+{
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	int size, desc_len, err = -ENOMEM;
+
+	size = sizeof(struct e1000_buffer) * rx_ring->count;
+	rx_ring->buffer_info = vzalloc(size);
+	if (!rx_ring->buffer_info)
+		goto err;
+
+	desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * desc_len;
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	err = e1000_alloc_ring_dma(adapter, rx_ring);
+	if (err)
+		goto err;
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+	rx_ring->rx_skb_top = NULL;
+
+	return 0;
+
+err:
+	vfree(rx_ring->buffer_info);
+	e_err("Unable to allocate memory for the receive descriptor ring\n");
+	return err;
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ **/
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter)
+{
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_buffer *buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_put_txbuf(adapter, buffer_info);
+	}
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	memset(tx_ring->buffer_info, 0, size);
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	writel(0, adapter->hw.hw_addr + tx_ring->head);
+	writel(0, adapter->hw.hw_addr + tx_ring->tail);
+}
+
+/**
+ * e1000e_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+void e1000e_free_tx_resources(struct e1000_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+
+	e1000_clean_tx_ring(adapter);
+
+	vfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+
+	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+			  tx_ring->dma);
+	tx_ring->desc = NULL;
+}
+
+/**
+ * e1000e_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+
+void e1000e_free_rx_resources(struct e1000_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	int i;
+
+	e1000_clean_rx_ring(adapter);
+
+	for (i = 0; i < rx_ring->count; i++)
+		kfree(rx_ring->buffer_info[i].ps_pages);
+
+	vfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+
+	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+			  rx_ring->dma);
+	rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+	adapter->tx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		goto err;
+
+	rtdm_lock_init(&adapter->tx_ring->lock);
+
+	adapter->rx_ring = kzalloc(sizeof(struct e1000_ring), GFP_KERNEL);
+	if (!adapter->rx_ring)
+		goto err;
+
+	return 0;
+err:
+	e_err("Unable to allocate memory for queues\n");
+	kfree(adapter->rx_ring);
+	kfree(adapter->tx_ring);
+	return -ENOMEM;
+}
+
+static void e1000_vlan_rx_add_vid(struct rtnet_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vfta, index;
+
+	/* don't update vlan cookie if already programmed */
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id))
+		return;
+
+	/* add VID to filter table */
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		index = (vid >> 5) & 0x7F;
+		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+		vfta |= (1 << (vid & 0x1F));
+		hw->mac.ops.write_vfta(hw, index, vfta);
+	}
+
+	set_bit(vid, adapter->active_vlans);
+}
+
+static void e1000_vlan_rx_kill_vid(struct rtnet_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vfta, index;
+
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id)) {
+		/* release control to f/w */
+		e1000e_release_hw_control(adapter);
+		return;
+	}
+
+	/* remove VID from filter table */
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		index = (vid >> 5) & 0x7F;
+		vfta = E1000_READ_REG_ARRAY(hw, E1000_VFTA, index);
+		vfta &= ~(1 << (vid & 0x1F));
+		hw->mac.ops.write_vfta(hw, index, vfta);
+	}
+
+	clear_bit(vid, adapter->active_vlans);
+}
+
+/**
+ * e1000e_vlan_filter_disable - helper to disable hw VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_disable(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		/* disable VLAN receive filtering */
+		rctl = er32(RCTL);
+		rctl &= ~(E1000_RCTL_VFE | E1000_RCTL_CFIEN);
+		ew32(RCTL, rctl);
+
+		if (adapter->mng_vlan_id != (u16)E1000_MNG_VLAN_NONE) {
+			e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+			adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+		}
+	}
+}
+
+/**
+ * e1000e_vlan_filter_enable - helper to enable HW VLAN filtering
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_filter_enable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER) {
+		/* enable VLAN receive filtering */
+		rctl = er32(RCTL);
+		rctl |= E1000_RCTL_VFE;
+		rctl &= ~E1000_RCTL_CFIEN;
+		ew32(RCTL, rctl);
+	}
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to disable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_disable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	/* disable VLAN tag insert/strip */
+	ctrl = er32(CTRL);
+	ctrl &= ~E1000_CTRL_VME;
+	ew32(CTRL, ctrl);
+}
+
+/**
+ * e1000e_vlan_strip_enable - helper to enable HW VLAN stripping
+ * @adapter: board private structure to initialize
+ **/
+static void e1000e_vlan_strip_enable(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	/* enable VLAN tag insert/strip */
+	ctrl = er32(CTRL);
+	ctrl |= E1000_CTRL_VME;
+	ew32(CTRL, ctrl);
+}
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	u16 vid = adapter->hw.mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
+
+	if (adapter->hw.mng_cookie.status &
+	    E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+		e1000_vlan_rx_add_vid(netdev, vid);
+		adapter->mng_vlan_id = vid;
+	}
+
+	if ((old_vid != (u16)E1000_MNG_VLAN_NONE) && (vid != old_vid))
+		e1000_vlan_rx_kill_vid(netdev, old_vid);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+	u16 vid;
+
+	e1000_vlan_rx_add_vid(adapter->netdev, 0);
+
+	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+		e1000_vlan_rx_add_vid(adapter->netdev, vid);
+}
+
+static void e1000_init_manageability_pt(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 manc, manc2h, mdef, i, j;
+
+	if (!(adapter->flags & FLAG_MNG_PT_ENABLED))
+		return;
+
+	manc = er32(MANC);
+
+	/*
+	 * enable receiving management packets to the host. this will probably
+	 * generate destination unreachable messages from the host OS, but
+	 * the packets will be handled on SMBUS
+	 */
+	manc |= E1000_MANC_EN_MNG2HOST;
+	manc2h = er32(MANC2H);
+
+	switch (hw->mac.type) {
+	default:
+		manc2h |= (E1000_MANC2H_PORT_623 | E1000_MANC2H_PORT_664);
+		break;
+	case e1000_82574:
+	case e1000_82583:
+		/*
+		 * Check if IPMI pass-through decision filter already exists;
+		 * if so, enable it.
+		 */
+		for (i = 0, j = 0; i < 8; i++) {
+			mdef = er32(MDEF(i));
+
+			/* Ignore filters with anything other than IPMI ports */
+			if (mdef & ~(E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+				continue;
+
+			/* Enable this decision filter in MANC2H */
+			if (mdef)
+				manc2h |= (1 << i);
+
+			j |= mdef;
+		}
+
+		if (j == (E1000_MDEF_PORT_623 | E1000_MDEF_PORT_664))
+			break;
+
+		/* Create new decision filter in an empty filter */
+		for (i = 0, j = 0; i < 8; i++)
+			if (er32(MDEF(i)) == 0) {
+				ew32(MDEF(i), (E1000_MDEF_PORT_623 |
+					       E1000_MDEF_PORT_664));
+				manc2h |= (1 << 1);
+				j++;
+				break;
+			}
+
+		if (!j)
+			e_warn("Unable to create IPMI pass-through filter\n");
+		break;
+	}
+
+	ew32(MANC2H, manc2h);
+	ew32(MANC, manc);
+}
+
+/**
+ * e1000_configure_tx - Configure Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	u64 tdba;
+	u32 tdlen, tctl, tipg, tarc;
+	u32 ipgr1, ipgr2;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	tdba = tx_ring->dma;
+	tdlen = tx_ring->count * sizeof(struct e1000_tx_desc);
+	ew32(TDBAL, (tdba & DMA_BIT_MASK(32)));
+	ew32(TDBAH, (tdba >> 32));
+	ew32(TDLEN, tdlen);
+	ew32(TDH, 0);
+	ew32(TDT, 0);
+	tx_ring->head = E1000_TDH;
+	tx_ring->tail = E1000_TDT;
+
+	/* Set the default values for the Tx Inter Packet Gap timer */
+	tipg = DEFAULT_82543_TIPG_IPGT_COPPER;          /*  8  */
+	ipgr1 = DEFAULT_82543_TIPG_IPGR1;               /*  8  */
+	ipgr2 = DEFAULT_82543_TIPG_IPGR2;               /*  6  */
+
+	if (adapter->flags & FLAG_TIPG_MEDIUM_FOR_80003ESLAN)
+		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2; /*  7  */
+
+	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+	ew32(TIPG, tipg);
+
+	/* Set the Tx Interrupt Delay register */
+	ew32(TIDV, adapter->tx_int_delay);
+	/* Tx irq moderation */
+	ew32(TADV, adapter->tx_abs_int_delay);
+
+	if (adapter->flags2 & FLAG2_DMA_BURST) {
+		u32 txdctl = er32(TXDCTL(0));
+		txdctl &= ~(E1000_TXDCTL_PTHRESH | E1000_TXDCTL_HTHRESH |
+			    E1000_TXDCTL_WTHRESH);
+		/*
+		 * set up some performance related parameters to encourage the
+		 * hardware to use the bus more efficiently in bursts, depends
+		 * on the tx_int_delay to be enabled,
+		 * wthresh = 5 ==> burst write a cacheline (64 bytes) at a time
+		 * hthresh = 1 ==> prefetch when one or more available
+		 * pthresh = 0x1f ==> prefetch if internal cache 31 or less
+		 * BEWARE: this seems to work but should be considered first if
+		 * there are Tx hangs or other Tx related bugs
+		 */
+		txdctl |= E1000_TXDCTL_DMA_BURST_ENABLE;
+		ew32(TXDCTL(0), txdctl);
+		/* erratum work around: set txdctl the same for both queues */
+		ew32(TXDCTL(1), txdctl);
+	}
+
+	/* Program the Transmit Control Register */
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	if (adapter->flags & FLAG_TARC_SPEED_MODE_BIT) {
+		tarc = er32(TARC(0));
+		/*
+		 * set the speed mode bit, we'll clear it if we're not at
+		 * gigabit link later
+		 */
+#define SPEED_MODE_BIT (1 << 21)
+		tarc |= SPEED_MODE_BIT;
+		ew32(TARC(0), tarc);
+	}
+
+	/* errata: program both queues to unweighted RR */
+	if (adapter->flags & FLAG_TARC_SET_BIT_ZERO) {
+		tarc = er32(TARC(0));
+		tarc |= 1;
+		ew32(TARC(0), tarc);
+		tarc = er32(TARC(1));
+		tarc |= 1;
+		ew32(TARC(1), tarc);
+	}
+
+	/* Setup Transmit Descriptor Settings for eop descriptor */
+	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+	/* only set IDE if we are delaying interrupts using the timers */
+	if (adapter->tx_int_delay)
+		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+	/* enable Report Status bit */
+	adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+	ew32(TCTL, tctl);
+
+	e1000e_config_collision_dist(hw);
+}
+
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+			   (((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl, rfctl;
+
+	/* Workaround Si errata on PCHx - configure jumbo frame flow */
+	if (hw->mac.type >= e1000_pch2lan) {
+		s32 ret_val;
+
+		if (adapter->netdev->mtu > ETH_DATA_LEN)
+			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, true);
+		else
+			ret_val = e1000_lv_jumbo_workaround_ich8lan(hw, false);
+
+		if (ret_val)
+			e_dbg("failed to enable jumbo frame workaround mode\n");
+	}
+
+	/* Program MC offset vector base */
+	rctl = er32(RCTL);
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	/* Do not Store bad packets */
+	rctl &= ~E1000_RCTL_SBP;
+
+	/* Enable Long Packet receive */
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		rctl &= ~E1000_RCTL_LPE;
+	else
+		rctl |= E1000_RCTL_LPE;
+
+	/* Some systems expect that the CRC is included in SMBUS traffic. The
+	 * hardware strips the CRC before sending to both SMBUS (BMC) and to
+	 * host memory when this is enabled
+	 */
+	if (adapter->flags2 & FLAG2_CRC_STRIPPING)
+		rctl |= E1000_RCTL_SECRC;
+
+	/* Workaround Si errata on 82577 PHY - configure IPG for jumbos */
+	if ((hw->phy.type == e1000_phy_82577) && (rctl & E1000_RCTL_LPE)) {
+		u16 phy_data;
+
+		e1e_rphy(hw, PHY_REG(770, 26), &phy_data);
+		phy_data &= 0xfff8;
+		phy_data |= (1 << 2);
+		e1e_wphy(hw, PHY_REG(770, 26), phy_data);
+
+		e1e_rphy(hw, 22, &phy_data);
+		phy_data &= 0x0fff;
+		phy_data |= (1 << 14);
+		e1e_wphy(hw, 0x10, 0x2823);
+		e1e_wphy(hw, 0x11, 0x0003);
+		e1e_wphy(hw, 22, phy_data);
+	}
+
+	/* Setup buffer sizes */
+	rctl &= ~E1000_RCTL_SZ_4096;
+	rctl |= E1000_RCTL_BSEX;
+	switch (adapter->rx_buffer_len) {
+	case 2048:
+	default:
+		rctl |= E1000_RCTL_SZ_2048;
+		rctl &= ~E1000_RCTL_BSEX;
+		break;
+	case 4096:
+		rctl |= E1000_RCTL_SZ_4096;
+		break;
+	case 8192:
+		rctl |= E1000_RCTL_SZ_8192;
+		break;
+	case 16384:
+		rctl |= E1000_RCTL_SZ_16384;
+		break;
+	}
+
+	/* Enable Extended Status in all Receive Descriptors */
+	rfctl = er32(RFCTL);
+	rfctl |= E1000_RFCTL_EXTEN;
+
+	adapter->rx_ps_pages = 0;
+
+	ew32(RFCTL, rfctl);
+	ew32(RCTL, rctl);
+	/* just started the receive unit, no need to restart */
+	adapter->flags &= ~FLAG_RX_RESTART_NOW;
+}
+
+/**
+ * e1000_configure_rx - Configure Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_ring *rx_ring = adapter->rx_ring;
+	u64 rdba;
+	u32 rdlen, rctl, rxcsum, ctrl_ext;
+
+	rdlen = rx_ring->count * sizeof(union e1000_rx_desc_extended);
+	adapter->clean_rx = e1000_clean_rx_irq;
+	adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+
+	/* disable receives while setting up the descriptors */
+	rctl = er32(RCTL);
+	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+		ew32(RCTL, rctl & ~E1000_RCTL_EN);
+	e1e_flush();
+	usleep_range(10000, 20000);
+
+	if (adapter->flags2 & FLAG2_DMA_BURST) {
+		/*
+		 * set the writeback threshold (only takes effect if the RDTR
+		 * is set). set GRAN=1 and write back up to 0x4 worth, and
+		 * enable prefetching of 0x20 Rx descriptors
+		 * granularity = 01
+		 * wthresh = 04,
+		 * hthresh = 04,
+		 * pthresh = 0x20
+		 */
+		ew32(RXDCTL(0), E1000_RXDCTL_DMA_BURST_ENABLE);
+		ew32(RXDCTL(1), E1000_RXDCTL_DMA_BURST_ENABLE);
+
+		/*
+		 * override the delay timers for enabling bursting, only if
+		 * the value was not set by the user via module options
+		 */
+		if (adapter->rx_int_delay == DEFAULT_RDTR)
+			adapter->rx_int_delay = BURST_RDTR;
+		if (adapter->rx_abs_int_delay == DEFAULT_RADV)
+			adapter->rx_abs_int_delay = BURST_RADV;
+	}
+
+	/* set the Receive Delay Timer Register */
+	ew32(RDTR, adapter->rx_int_delay);
+
+	/* irq moderation */
+	ew32(RADV, adapter->rx_abs_int_delay);
+	if ((adapter->itr_setting != 0) && (adapter->itr != 0))
+		ew32(ITR, 1000000000 / (adapter->itr * 256));
+
+	ctrl_ext = er32(CTRL_EXT);
+	ew32(CTRL_EXT, ctrl_ext);
+	e1e_flush();
+
+	/*
+	 * Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
+	rdba = rx_ring->dma;
+	ew32(RDBAL, (rdba & DMA_BIT_MASK(32)));
+	ew32(RDBAH, (rdba >> 32));
+	ew32(RDLEN, rdlen);
+	ew32(RDH, 0);
+	ew32(RDT, 0);
+	rx_ring->head = E1000_RDH;
+	rx_ring->tail = E1000_RDT;
+
+	/* Enable Receive Checksum Offload for TCP and UDP */
+	rxcsum = er32(RXCSUM);
+	if (adapter->netdev->features & NETIF_F_RXCSUM) {
+		rxcsum |= E1000_RXCSUM_TUOFL;
+	} else {
+		rxcsum &= ~E1000_RXCSUM_TUOFL;
+		/* no need to clear IPPCSE as it defaults to 0 */
+	}
+	ew32(RXCSUM, rxcsum);
+
+	/* Enable Receives */
+	ew32(RCTL, rctl);
+}
+
+/**
+ *  e1000_update_mc_addr_list - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates the Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+static void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+				      u32 mc_addr_count)
+{
+	hw->mac.ops.update_mc_addr_list(hw, mc_addr_list, mc_addr_count);
+}
+
+/**
+ * e1000_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void e1000_set_multi(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	rctl = er32(RCTL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+		rctl &= ~E1000_RCTL_VFE;
+		/* Do not hardware filter VLANs in promisc mode */
+		e1000e_vlan_filter_disable(adapter);
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			rctl |= E1000_RCTL_MPE;
+			rctl &= ~E1000_RCTL_UPE;
+		} else {
+			rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+		}
+		e1000e_vlan_filter_enable(adapter);
+	}
+
+	ew32(RCTL, rctl);
+
+	e1000_update_mc_addr_list(hw, NULL, 0);
+
+	if (netdev->features & NETIF_F_HW_VLAN_CTAG_RX)
+		e1000e_vlan_strip_enable(adapter);
+	else
+		e1000e_vlan_strip_disable(adapter);
+}
+
+/**
+ * e1000_configure - configure the hardware for Rx and Tx
+ * @adapter: private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+	e1000_set_multi(adapter->netdev);
+
+	e1000_restore_vlan(adapter);
+	e1000_init_manageability_pt(adapter);
+
+	e1000_configure_tx(adapter);
+	e1000_setup_rctl(adapter);
+	e1000_configure_rx(adapter);
+	adapter->alloc_rx_buf(adapter, e1000_desc_unused(adapter->rx_ring),
+			      GFP_KERNEL);
+}
+
+/**
+ * e1000e_power_up_phy - restore link in case the phy was powered down
+ * @adapter: address of board private structure
+ *
+ * The phy may be powered down to save power and turn off link when the
+ * driver is unloaded and wake on lan is not enabled (among others)
+ * *** this routine MUST be followed by a call to e1000e_reset ***
+ **/
+void e1000e_power_up_phy(struct e1000_adapter *adapter)
+{
+	if (adapter->hw.phy.ops.power_up)
+		adapter->hw.phy.ops.power_up(&adapter->hw);
+
+	adapter->hw.mac.ops.setup_link(&adapter->hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down the PHY
+ *
+ * Power down the PHY so no link is implied when interface is down.
+ * The PHY cannot be powered down if management or WoL is active.
+ */
+static void e1000_power_down_phy(struct e1000_adapter *adapter)
+{
+	/* WoL is enabled */
+	if (adapter->wol)
+		return;
+
+	if (adapter->hw.phy.ops.power_down)
+		adapter->hw.phy.ops.power_down(&adapter->hw);
+}
+
+/**
+ * e1000e_reset - bring the hardware into a known good state
+ *
+ * This function boots the hardware and enables some settings that
+ * require a configuration cycle of the hardware - those cannot be
+ * set/changed during runtime. After reset the device needs to be
+ * properly configured for Rx, Tx etc.
+ */
+void e1000e_reset(struct e1000_adapter *adapter)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_fc_info *fc = &adapter->hw.fc;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tx_space, min_tx_space, min_rx_space;
+	u32 pba = adapter->pba;
+	u16 hwm;
+
+	/* reset Packet Buffer Allocation to default */
+	ew32(PBA, pba);
+
+	if (adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) {
+		/*
+		 * To maintain wire speed transmits, the Tx FIFO should be
+		 * large enough to accommodate two full transmit packets,
+		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+		 * the Rx FIFO should be large enough to accommodate at least
+		 * one full receive packet and is similarly rounded up and
+		 * expressed in KB.
+		 */
+		pba = er32(PBA);
+		/* upper 16 bits has Tx packet buffer allocation size in KB */
+		tx_space = pba >> 16;
+		/* lower 16 bits has Rx packet buffer allocation size in KB */
+		pba &= 0xffff;
+		/*
+		 * the Tx fifo also stores 16 bytes of information about the Tx
+		 * but don't include ethernet FCS because hardware appends it
+		 */
+		min_tx_space = (adapter->max_frame_size +
+				sizeof(struct e1000_tx_desc) -
+				ETH_FCS_LEN) * 2;
+		min_tx_space = ALIGN(min_tx_space, 1024);
+		min_tx_space >>= 10;
+		/* software strips receive CRC, so leave room for it */
+		min_rx_space = adapter->max_frame_size;
+		min_rx_space = ALIGN(min_rx_space, 1024);
+		min_rx_space >>= 10;
+
+		/*
+		 * If current Tx allocation is less than the min Tx FIFO size,
+		 * and the min Tx FIFO size is less than the current Rx FIFO
+		 * allocation, take space away from current Rx allocation
+		 */
+		if ((tx_space < min_tx_space) &&
+		    ((min_tx_space - tx_space) < pba)) {
+			pba -= min_tx_space - tx_space;
+
+			/*
+			 * if short on Rx space, Rx wins and must trump Tx
+			 * adjustment or use Early Receive if available
+			 */
+			if ((pba < min_rx_space) &&
+			    (!(adapter->flags & FLAG_HAS_ERT)))
+				/* ERT enabled in e1000_configure_rx */
+				pba = min_rx_space;
+		}
+
+		ew32(PBA, pba);
+	}
+
+	/*
+	 * flow control settings
+	 *
+	 * The high water mark must be low enough to fit one full frame
+	 * (or the size used for early receive) above it in the Rx FIFO.
+	 * Set it to the lower of:
+	 * - 90% of the Rx FIFO size, and
+	 * - the full Rx FIFO size minus the early receive size (for parts
+	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
+	 * - the full Rx FIFO size minus one full frame
+	 */
+	if (adapter->flags & FLAG_DISABLE_FC_PAUSE_TIME)
+		fc->pause_time = 0xFFFF;
+	else
+		fc->pause_time = E1000_FC_PAUSE_TIME;
+	fc->send_xon = 1;
+	fc->current_mode = fc->requested_mode;
+
+	switch (hw->mac.type) {
+	default:
+		if ((adapter->flags & FLAG_HAS_ERT) &&
+		    (adapter->netdev->mtu > ETH_DATA_LEN))
+			hwm = min(((pba << 10) * 9 / 10),
+				  ((pba << 10) - (E1000_ERT_2048 << 3)));
+		else
+			hwm = min(((pba << 10) * 9 / 10),
+				  ((pba << 10) - adapter->max_frame_size));
+
+		fc->high_water = hwm & E1000_FCRTH_RTH; /* 8-byte granularity */
+		fc->low_water = fc->high_water - 8;
+		break;
+	case e1000_pchlan:
+		/*
+		 * Workaround PCH LOM adapter hangs with certain network
+		 * loads.  If hangs persist, try disabling Tx flow control.
+		 */
+		if (adapter->netdev->mtu > ETH_DATA_LEN) {
+			fc->high_water = 0x3500;
+			fc->low_water  = 0x1500;
+		} else {
+			fc->high_water = 0x5000;
+			fc->low_water  = 0x3000;
+		}
+		fc->refresh_time = 0x1000;
+		break;
+	case e1000_pch2lan:
+	case e1000_pch_lpt:
+		fc->high_water = 0x05C20;
+		fc->low_water = 0x05048;
+		fc->pause_time = 0x0650;
+		fc->refresh_time = 0x0400;
+		if (adapter->netdev->mtu > ETH_DATA_LEN) {
+			pba = 14;
+			ew32(PBA, pba);
+		}
+		break;
+	}
+
+	/*
+	 * Disable Adaptive Interrupt Moderation if 2 full packets cannot
+	 * fit in receive buffer and early-receive not supported.
+	 */
+	if (adapter->itr_setting & 0x3) {
+		if (((adapter->max_frame_size * 2) > (pba << 10)) &&
+		    !(adapter->flags & FLAG_HAS_ERT)) {
+			if (!(adapter->flags2 & FLAG2_DISABLE_AIM)) {
+				dev_info(&adapter->pdev->dev,
+					"Interrupt Throttle Rate turned off\n");
+				adapter->flags2 |= FLAG2_DISABLE_AIM;
+				ew32(ITR, 0);
+			}
+		} else if (adapter->flags2 & FLAG2_DISABLE_AIM) {
+			dev_info(&adapter->pdev->dev,
+				 "Interrupt Throttle Rate turned on\n");
+			adapter->flags2 &= ~FLAG2_DISABLE_AIM;
+			adapter->itr = 20000;
+			ew32(ITR, 1000000000 / (adapter->itr * 256));
+		}
+	}
+
+	/* Allow time for pending master requests to run */
+	mac->ops.reset_hw(hw);
+
+	/*
+	 * For parts with AMT enabled, let the firmware know
+	 * that the network interface is in control
+	 */
+	if (adapter->flags & FLAG_HAS_AMT)
+		e1000e_get_hw_control(adapter);
+
+	ew32(WUC, 0);
+
+	if (mac->ops.init_hw(hw))
+		e_err("Hardware Error\n");
+
+	e1000_update_mng_vlan(adapter);
+
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	ew32(VET, ETH_P_8021Q);
+
+	e1000e_reset_adaptive(hw);
+
+	if (!rtnetif_running(adapter->netdev) &&
+	    !test_bit(__E1000_TESTING, &adapter->state)) {
+		e1000_power_down_phy(adapter);
+		return;
+	}
+
+	e1000_get_phy_info(hw);
+
+	if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN) &&
+	    !(adapter->flags & FLAG_SMART_POWER_DOWN)) {
+		u16 phy_data = 0;
+		/*
+		 * speed up time to link by disabling smart power down, ignore
+		 * the return value of this function because there is nothing
+		 * different we would do if it failed
+		 */
+		e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &phy_data);
+		phy_data &= ~IGP02E1000_PM_SPD;
+		e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, phy_data);
+	}
+}
+
+int e1000e_up(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* hardware has been reset, we need to reload some things */
+	e1000_configure(adapter);
+
+	clear_bit(__E1000_DOWN, &adapter->state);
+
+	if (adapter->msix_entries)
+		e1000_configure_msix(adapter);
+	e1000_irq_enable(adapter);
+
+	rtnetif_start_queue(adapter->netdev);
+
+	/* fire a link change interrupt to start the watchdog */
+	if (adapter->msix_entries)
+		ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+	else
+		ew32(ICS, E1000_ICS_LSC);
+
+	return 0;
+}
+
+static void e1000e_flush_descriptors(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (!(adapter->flags2 & FLAG2_DMA_BURST))
+		return;
+
+	/* flush pending descriptor writebacks to memory */
+	ew32(TIDV, adapter->tx_int_delay | E1000_TIDV_FPD);
+	ew32(RDTR, adapter->rx_int_delay | E1000_RDTR_FPD);
+
+	/* execute the writes immediately */
+	e1e_flush();
+}
+
+void e1000e_down(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tctl, rctl;
+
+	/*
+	 * signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer
+	 */
+	set_bit(__E1000_DOWN, &adapter->state);
+
+	/* disable receives in the hardware */
+	rctl = er32(RCTL);
+	if (!(adapter->flags2 & FLAG2_NO_DISABLE_RX))
+		ew32(RCTL, rctl & ~E1000_RCTL_EN);
+	/* flush and sleep below */
+
+	rtnetif_stop_queue(netdev);
+
+	/* disable transmits in the hardware */
+	tctl = er32(TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	ew32(TCTL, tctl);
+
+	/* flush both disables and wait for them to finish */
+	e1e_flush();
+	usleep_range(10000, 20000);
+
+	e1000_irq_disable(adapter);
+
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	rtnetif_carrier_off(netdev);
+
+	e1000e_flush_descriptors(adapter);
+	e1000_clean_tx_ring(adapter);
+	e1000_clean_rx_ring(adapter);
+
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+
+	if (!pci_channel_offline(adapter->pdev))
+		e1000e_reset(adapter);
+
+	/*
+	 * TODO: for power management, we could drop the link and
+	 * pci_disable_device here.
+	 */
+}
+
+void e1000e_reinit_locked(struct e1000_adapter *adapter)
+{
+	might_sleep();
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+	e1000e_down(adapter);
+	e1000e_up(adapter);
+	clear_bit(__E1000_RESETTING, &adapter->state);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int e1000_sw_init(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+
+	adapter->rx_buffer_len = ETH_FRAME_LEN + VLAN_HLEN + ETH_FCS_LEN;
+	adapter->rx_ps_bsize0 = 128;
+	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN;
+	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+	spin_lock_init(&adapter->stats64_lock);
+
+	e1000e_set_interrupt_capability(adapter);
+
+	if (e1000_alloc_queues(adapter))
+		return -ENOMEM;
+
+	/* Explicitly disable IRQ since the NIC can be in any state. */
+	e1000_irq_disable(adapter);
+
+	set_bit(__E1000_DOWN, &adapter->state);
+	return 0;
+}
+
+/**
+ * e1000_intr_msi_test - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+{
+	struct rtnet_device *netdev = data;
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = er32(ICR);
+
+	e_dbg("icr is %08X\n", icr);
+	if (icr & E1000_ICR_RXSEQ) {
+		adapter->flags &= ~FLAG_MSI_TEST_FAILED;
+		wmb();
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * e1000_test_msi_interrupt - Returns 0 for successful test
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c
+ **/
+static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	int err;
+
+	/* poll_enable hasn't been called yet, so don't need disable */
+	/* clear any pending events */
+	er32(ICR);
+
+	/* free the real vector and request a test handler */
+	e1000_free_irq(adapter);
+	e1000e_reset_interrupt_capability(adapter);
+
+	/* Assume that the test fails, if it succeeds then the test
+	 * MSI irq handler will unset this flag */
+	adapter->flags |= FLAG_MSI_TEST_FAILED;
+
+	err = pci_enable_msi(adapter->pdev);
+	if (err)
+		goto msi_test_failed;
+
+	err = request_irq(adapter->pdev->irq, e1000_intr_msi_test, 0,
+			  netdev->name, netdev);
+	if (err) {
+		pci_disable_msi(adapter->pdev);
+		goto msi_test_failed;
+	}
+
+	wmb();
+
+	e1000_irq_enable(adapter);
+
+	/* fire an unusual interrupt on the test handler */
+	ew32(ICS, E1000_ICS_RXSEQ);
+	e1e_flush();
+	msleep(50);
+
+	e1000_irq_disable(adapter);
+
+	rmb();
+
+	if (adapter->flags & FLAG_MSI_TEST_FAILED) {
+		adapter->int_mode = E1000E_INT_MODE_LEGACY;
+		e_info("MSI interrupt test failed, using legacy interrupt.\n");
+	} else
+		e_dbg("MSI interrupt test succeeded!\n");
+
+	free_irq(adapter->pdev->irq, netdev);
+	pci_disable_msi(adapter->pdev);
+
+msi_test_failed:
+	e1000e_set_interrupt_capability(adapter);
+	return e1000_request_irq(adapter);
+}
+
+/**
+ * e1000_test_msi - Returns 0 if MSI test succeeds or INTx mode is restored
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c, called with e1000 interrupts disabled.
+ **/
+static int e1000_test_msi(struct e1000_adapter *adapter)
+{
+	int err;
+	u16 pci_cmd;
+
+	if (!(adapter->flags & FLAG_MSI_ENABLED))
+		return 0;
+
+	/* disable SERR in case the MSI write causes a master abort */
+	pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+	if (pci_cmd & PCI_COMMAND_SERR)
+		pci_write_config_word(adapter->pdev, PCI_COMMAND,
+				      pci_cmd & ~PCI_COMMAND_SERR);
+
+	err = e1000_test_msi_interrupt(adapter);
+
+	/* re-enable SERR */
+	if (pci_cmd & PCI_COMMAND_SERR) {
+		pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+		pci_cmd |= PCI_COMMAND_SERR;
+		pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+	}
+
+	return err;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int e1000_open(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__E1000_TESTING, &adapter->state))
+		return -EBUSY;
+
+	pm_runtime_get_sync(&pdev->dev);
+
+	rtnetif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = e1000e_setup_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = e1000e_setup_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	/*
+	 * If AMT is enabled, let the firmware know that the network
+	 * interface is now open and reset the part to a known state.
+	 */
+	if (adapter->flags & FLAG_HAS_AMT) {
+		e1000e_get_hw_control(adapter);
+		e1000e_reset(adapter);
+	}
+
+	e1000e_power_up_phy(adapter);
+
+	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN))
+		e1000_update_mng_vlan(adapter);
+
+	/*
+	 * before we allocate an interrupt, we must be ready to handle it.
+	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+	 * as soon as we call pci_request_irq, so we have to setup our
+	 * clean_rx handler before we do so.
+	 */
+	e1000_configure(adapter);
+
+	rt_stack_connect(netdev, &STACK_manager);
+
+	err = e1000_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/*
+	 * Work around PCIe errata with MSI interrupts causing some chipsets to
+	 * ignore e1000e MSI messages, which means we need to test our MSI
+	 * interrupt now
+	 */
+	if (adapter->int_mode != E1000E_INT_MODE_LEGACY) {
+		err = e1000_test_msi(adapter);
+		if (err) {
+			e_err("Interrupt allocation failed\n");
+			goto err_req_irq;
+		}
+	}
+
+	/* From here on the code is the same as e1000e_up() */
+	clear_bit(__E1000_DOWN, &adapter->state);
+
+	e1000_irq_enable(adapter);
+
+	rtnetif_start_queue(netdev);
+
+	adapter->idle_check = true;
+	pm_runtime_put(&pdev->dev);
+
+	/* fire a link status change interrupt to start the watchdog */
+	if (adapter->msix_entries)
+		ew32(ICS, E1000_ICS_LSC | E1000_ICR_OTHER);
+	else
+		ew32(ICS, E1000_ICS_LSC);
+
+	return 0;
+
+err_req_irq:
+	e1000e_release_hw_control(adapter);
+	e1000_power_down_phy(adapter);
+	e1000e_free_rx_resources(adapter);
+err_setup_rx:
+	e1000e_free_tx_resources(adapter);
+err_setup_tx:
+	e1000e_reset(adapter);
+	pm_runtime_put_sync(&pdev->dev);
+
+	return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int e1000_close(struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct pci_dev *pdev = adapter->pdev;
+
+	WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+
+	pm_runtime_get_sync(&pdev->dev);
+
+	if (!test_bit(__E1000_DOWN, &adapter->state)) {
+		e1000e_down(adapter);
+		e1000_free_irq(adapter);
+	}
+	e1000_power_down_phy(adapter);
+
+	rt_stack_disconnect(netdev);
+
+	e1000e_free_tx_resources(adapter);
+	e1000e_free_rx_resources(adapter);
+
+	/*
+	 * kill manageability vlan ID if supported, but not if a vlan with
+	 * the same ID is registered on the host OS (let 8021q kill it)
+	 */
+	if (adapter->hw.mng_cookie.status &
+	    E1000_MNG_DHCP_COOKIE_STATUS_VLAN)
+		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+
+	/*
+	 * If AMT is enabled, let the firmware know that the network
+	 * interface is now closed
+	 */
+	if ((adapter->flags & FLAG_HAS_AMT) &&
+	    !test_bit(__E1000_TESTING, &adapter->state))
+		e1000e_release_hw_control(adapter);
+
+	pm_runtime_put_sync(&pdev->dev);
+
+	return 0;
+}
+
+/**
+ * e1000e_update_phy_task - work thread to update phy
+ * @work: pointer to our work struct
+ *
+ * this worker thread exists because we must acquire a
+ * semaphore to read the phy, which we could msleep while
+ * waiting for it, and we can't msleep in a timer.
+ **/
+static void e1000e_update_phy_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+					struct e1000_adapter, update_phy_task);
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	e1000_get_phy_info(&adapter->hw);
+}
+
+/*
+ * Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void e1000_update_phy_info(struct timer_list *t)
+{
+	struct e1000_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+#else /* < 4.14 */
+static void e1000_update_phy_info(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+#endif /* < 4.14 */
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	rtdm_schedule_nrt_work(&adapter->update_phy_task);
+}
+
+/**
+ * e1000_phy_read_status - Update the PHY register status snapshot
+ * @adapter: board private structure
+ **/
+static void e1000_phy_read_status(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_phy_regs *phy = &adapter->phy_regs;
+
+	if ((er32(STATUS) & E1000_STATUS_LU) &&
+	    (adapter->hw.phy.media_type == e1000_media_type_copper)) {
+		int ret_val;
+
+		ret_val  = e1e_rphy(hw, PHY_CONTROL, &phy->bmcr);
+		ret_val |= e1e_rphy(hw, PHY_STATUS, &phy->bmsr);
+		ret_val |= e1e_rphy(hw, PHY_AUTONEG_ADV, &phy->advertise);
+		ret_val |= e1e_rphy(hw, PHY_LP_ABILITY, &phy->lpa);
+		ret_val |= e1e_rphy(hw, PHY_AUTONEG_EXP, &phy->expansion);
+		ret_val |= e1e_rphy(hw, PHY_1000T_CTRL, &phy->ctrl1000);
+		ret_val |= e1e_rphy(hw, PHY_1000T_STATUS, &phy->stat1000);
+		ret_val |= e1e_rphy(hw, PHY_EXT_STATUS, &phy->estatus);
+		if (ret_val)
+			e_warn("Error reading PHY register\n");
+	} else {
+		/*
+		 * Do not read PHY registers if link is not up
+		 * Set values to typical power-on defaults
+		 */
+		phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
+		phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
+			     BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
+			     BMSR_ERCAP);
+		phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
+				  ADVERTISE_ALL | ADVERTISE_CSMA);
+		phy->lpa = 0;
+		phy->expansion = EXPANSION_ENABLENPAGE;
+		phy->ctrl1000 = ADVERTISE_1000FULL;
+		phy->stat1000 = 0;
+		phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
+	}
+}
+
+static void e1000_print_link_info(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl = er32(CTRL);
+
+	/* Link status message must follow this format for user tools */
+	printk(KERN_INFO "e1000e: %s NIC Link is Up %d Mbps %s, "
+	       "Flow Control: %s\n",
+	       adapter->netdev->name,
+	       adapter->link_speed,
+	       (adapter->link_duplex == FULL_DUPLEX) ?
+	       "Full Duplex" : "Half Duplex",
+	       ((ctrl & E1000_CTRL_TFCE) && (ctrl & E1000_CTRL_RFCE)) ?
+	       "Rx/Tx" :
+	       ((ctrl & E1000_CTRL_RFCE) ? "Rx" :
+		((ctrl & E1000_CTRL_TFCE) ? "Tx" : "None")));
+}
+
+static bool e1000e_has_link(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	bool link_active = 0;
+	s32 ret_val = 0;
+
+	/*
+	 * get_link_status is set on LSC (link status) interrupt or
+	 * Rx sequence error interrupt.  get_link_status will stay
+	 * false until the check_for_link establishes link
+	 * for copper adapters ONLY
+	 */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		if (hw->mac.get_link_status) {
+			ret_val = hw->mac.ops.check_for_link(hw);
+			link_active = !hw->mac.get_link_status;
+		} else {
+			link_active = 1;
+		}
+		break;
+	case e1000_media_type_fiber:
+		ret_val = hw->mac.ops.check_for_link(hw);
+		link_active = !!(er32(STATUS) & E1000_STATUS_LU);
+		break;
+	case e1000_media_type_internal_serdes:
+		ret_val = hw->mac.ops.check_for_link(hw);
+		link_active = adapter->hw.mac.serdes_has_link;
+		break;
+	default:
+	case e1000_media_type_unknown:
+		break;
+	}
+
+	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+	    (er32(CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
+		e_info("Gigabit has been disabled, downgrading speed\n");
+	}
+
+	return link_active;
+}
+
+static void e1000e_enable_receives(struct e1000_adapter *adapter)
+{
+	/* make sure the receive unit is started */
+	if ((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+	    (adapter->flags & FLAG_RX_RESTART_NOW)) {
+		struct e1000_hw *hw = &adapter->hw;
+		u32 rctl = er32(RCTL);
+		ew32(RCTL, rctl | E1000_RCTL_EN);
+		adapter->flags &= ~FLAG_RX_RESTART_NOW;
+	}
+}
+
+static void e1000e_check_82574_phy_workaround(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/*
+	 * With 82574 controllers, PHY needs to be checked periodically
+	 * for hung state and reset, if two calls return true
+	 */
+	if (e1000_check_phy_82574(hw))
+		adapter->phy_hang_count++;
+	else
+		adapter->phy_hang_count = 0;
+
+	if (adapter->phy_hang_count > 1) {
+		adapter->phy_hang_count = 0;
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+	}
+}
+
+/**
+ * e1000_watchdog - Timer Call-back
+ * @data: pointer to adapter cast into an unsigned long
+ **/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void e1000_watchdog(struct timer_list *t)
+{
+	struct e1000_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+#else /* < 4.14 */
+static void e1000_watchdog(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+#endif /* < 4.14 */
+
+	/* Do the rest outside of interrupt context */
+	rtdm_schedule_nrt_work(&adapter->watchdog_task);
+
+	/* TODO: make this use queue_delayed_work() */
+}
+
+static void e1000_watchdog_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+					struct e1000_adapter, watchdog_task);
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_phy_info *phy = &adapter->hw.phy;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 link, tctl;
+
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	link = e1000e_has_link(adapter);
+	if ((rtnetif_carrier_ok(netdev)) && link) {
+		e1000e_enable_receives(adapter);
+		goto link_up;
+	}
+
+	if ((e1000e_enable_tx_pkt_filtering(hw)) &&
+	    (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id))
+		e1000_update_mng_vlan(adapter);
+
+	if (link) {
+		if (!rtnetif_carrier_ok(netdev)) {
+			bool txb2b = 1;
+
+			/* update snapshot of PHY registers on LSC */
+			e1000_phy_read_status(adapter);
+			mac->ops.get_link_up_info(&adapter->hw,
+						   &adapter->link_speed,
+						   &adapter->link_duplex);
+			e1000_print_link_info(adapter);
+			/*
+			 * On supported PHYs, check for duplex mismatch only
+			 * if link has autonegotiated at 10/100 half
+			 */
+			if ((hw->phy.type == e1000_phy_igp_3 ||
+			     hw->phy.type == e1000_phy_bm) &&
+			    (hw->mac.autoneg == true) &&
+			    (adapter->link_speed == SPEED_10 ||
+			     adapter->link_speed == SPEED_100) &&
+			    (adapter->link_duplex == HALF_DUPLEX)) {
+				u16 autoneg_exp;
+
+				e1e_rphy(hw, PHY_AUTONEG_EXP, &autoneg_exp);
+
+				if (!(autoneg_exp & NWAY_ER_LP_NWAY_CAPS))
+					e_info("Autonegotiated half duplex but"
+					       " link partner cannot autoneg. "
+					       " Try forcing full duplex if "
+					       "link gets many collisions.\n");
+			}
+
+			/* adjust timeout factor according to speed/duplex */
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				txb2b = 0;
+				adapter->tx_timeout_factor = 16;
+				break;
+			case SPEED_100:
+				txb2b = 0;
+				adapter->tx_timeout_factor = 10;
+				break;
+			}
+
+			/*
+			 * workaround: re-program speed mode bit after
+			 * link-up event
+			 */
+			if ((adapter->flags & FLAG_TARC_SPEED_MODE_BIT) &&
+			    !txb2b) {
+				u32 tarc0;
+				tarc0 = er32(TARC(0));
+				tarc0 &= ~SPEED_MODE_BIT;
+				ew32(TARC(0), tarc0);
+			}
+
+			/*
+			 * disable TSO for pcie and 10/100 speeds, to avoid
+			 * some hardware issues
+			 */
+			if (!(adapter->flags & FLAG_TSO_FORCE)) {
+				switch (adapter->link_speed) {
+				case SPEED_10:
+				case SPEED_100:
+					e_info("10/100 speed: disabling TSO\n");
+					netdev->features &= ~NETIF_F_TSO;
+					netdev->features &= ~NETIF_F_TSO6;
+					break;
+				case SPEED_1000:
+					netdev->features |= NETIF_F_TSO;
+					netdev->features |= NETIF_F_TSO6;
+					break;
+				default:
+					/* oops */
+					break;
+				}
+			}
+
+			/*
+			 * enable transmits in the hardware, need to do this
+			 * after setting TARC(0)
+			 */
+			tctl = er32(TCTL);
+			tctl |= E1000_TCTL_EN;
+			ew32(TCTL, tctl);
+
+			/*
+			 * Perform any post-link-up configuration before
+			 * reporting link up.
+			 */
+			if (phy->ops.cfg_on_link_up)
+				phy->ops.cfg_on_link_up(hw);
+
+			rtnetif_carrier_on(netdev);
+
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+					  round_jiffies(jiffies + 2 * HZ));
+		}
+	} else {
+		if (rtnetif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+			/* Link status message must follow this format */
+			printk(KERN_INFO "e1000e: %s NIC Link is Down\n",
+			       adapter->netdev->name);
+			rtnetif_carrier_off(netdev);
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+					  round_jiffies(jiffies + 2 * HZ));
+
+			if (adapter->flags & FLAG_RX_NEEDS_RESTART)
+				rtdm_schedule_nrt_work(&adapter->reset_task);
+		}
+	}
+
+link_up:
+	spin_lock(&adapter->stats64_lock);
+
+	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+	adapter->tpt_old = adapter->stats.tpt;
+	mac->collision_delta = adapter->stats.colc - adapter->colc_old;
+	adapter->colc_old = adapter->stats.colc;
+
+	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
+	adapter->gorc_old = adapter->stats.gorc;
+	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
+	adapter->gotc_old = adapter->stats.gotc;
+	spin_unlock(&adapter->stats64_lock);
+
+	e1000e_update_adaptive(&adapter->hw);
+
+	if (!rtnetif_carrier_ok(netdev) &&
+	    (e1000_desc_unused(tx_ring) + 1 < tx_ring->count)) {
+		/*
+		 * We've lost link, so the controller stops DMA,
+		 * but we've got queued Tx work that's never going
+		 * to get done, so reset controller to flush Tx.
+		 * (Do the reset outside of interrupt context).
+		 */
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+		/* return immediately since reset is imminent */
+		return;
+	}
+
+	/* Simple mode for Interrupt Throttle Rate (ITR) */
+	if (adapter->itr_setting == 4) {
+		/*
+		 * Symmetric Tx/Rx gets a reduced ITR=2000;
+		 * Total asymmetrical Tx or Rx gets ITR=8000;
+		 * everyone else is between 2000-8000.
+		 */
+		u32 goc = (adapter->gotc + adapter->gorc) / 10000;
+		u32 dif = (adapter->gotc > adapter->gorc ?
+			    adapter->gotc - adapter->gorc :
+			    adapter->gorc - adapter->gotc) / 10000;
+		u32 itr = goc > 0 ? (dif * 6000 / goc + 2000) : 8000;
+
+		ew32(ITR, 1000000000 / (itr * 256));
+	}
+
+	/* Cause software interrupt to ensure Rx ring is cleaned */
+	if (adapter->msix_entries)
+		ew32(ICS, adapter->rx_ring->ims_val);
+	else
+		ew32(ICS, E1000_ICS_RXDMT0);
+
+	/* flush pending descriptors to memory before detecting Tx hang */
+	e1000e_flush_descriptors(adapter);
+
+	/* Force detection of hung controller every watchdog period */
+	adapter->detect_tx_hung = 1;
+
+	/*
+	 * With 82571 controllers, LAA may be overwritten due to controller
+	 * reset from the other port. Set the appropriate LAA in RAR[0]
+	 */
+	if (e1000e_get_laa_state_82571(hw))
+		e1000e_rar_set(hw, adapter->hw.mac.addr, 0);
+
+	if (adapter->flags2 & FLAG2_CHECK_PHY_HANG)
+		e1000e_check_82574_phy_workaround(adapter);
+
+	/* Reset the timer */
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		mod_timer(&adapter->watchdog_timer,
+			  round_jiffies(jiffies + 2 * HZ));
+}
+
+#define E1000_TX_FLAGS_CSUM		0x00000001
+#define E1000_TX_FLAGS_VLAN		0x00000002
+#define E1000_TX_FLAGS_TSO		0x00000004
+#define E1000_TX_FLAGS_IPV4		0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT	16
+
+#define E1000_MAX_PER_TXD	8192
+#define E1000_MAX_TXD_PWR	12
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+			struct rtskb *skb, unsigned int first)
+{
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_buffer *buffer_info;
+	unsigned int offset = 0, size, i;
+
+	i = tx_ring->next_to_use;
+
+	buffer_info = &tx_ring->buffer_info[i];
+	size = skb->len;
+
+	buffer_info->length = size;
+	buffer_info->time_stamp = jiffies;
+	buffer_info->next_to_watch = i;
+	buffer_info->dma = rtskb_data_dma_addr(skb, offset);
+	buffer_info->mapped_as_page = false;
+
+	tx_ring->buffer_info[i].skb = skb;
+	tx_ring->buffer_info[i].segs = 1;
+	tx_ring->buffer_info[i].bytecount = size;
+	tx_ring->buffer_info[first].next_to_watch = i;
+
+	return 1;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+			   int tx_flags, int count)
+{
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	struct e1000_tx_desc *tx_desc = NULL;
+	struct e1000_buffer *buffer_info;
+	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+	unsigned int i;
+
+	if (tx_flags & E1000_TX_FLAGS_CSUM) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+	}
+
+	if (tx_flags & E1000_TX_FLAGS_VLAN) {
+		txd_lower |= E1000_TXD_CMD_VLE;
+		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+	}
+
+	i = tx_ring->next_to_use;
+
+	do {
+		buffer_info = &tx_ring->buffer_info[i];
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+		tx_desc->lower.data =
+			cpu_to_le32(txd_lower | buffer_info->length);
+		tx_desc->upper.data = cpu_to_le32(txd_upper);
+
+		i++;
+		if (i == tx_ring->count)
+			i = 0;
+	} while (--count > 0);
+
+	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+	/*
+	 * Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64).
+	 */
+	wmb();
+
+	tx_ring->next_to_use = i;
+
+	if (adapter->flags2 & FLAG2_PCIM2PCI_ARBITER_WA)
+		e1000e_update_tdt_wa(adapter, i);
+	else
+		writel(i, adapter->hw.hw_addr + tx_ring->tail);
+
+	/*
+	 * we need this if more than one processor can write to our tail
+	 * at a time, it synchronizes IO on IA64/Altix systems
+	 */
+	mmiowb();
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+				    struct rtskb *skb)
+{
+	struct e1000_hw *hw =  &adapter->hw;
+	u16 length, offset;
+
+	if (skb->len <= MINIMUM_DHCP_PACKET_SIZE)
+		return 0;
+
+	if (((struct ethhdr *) skb->data)->h_proto != htons(ETH_P_IP))
+		return 0;
+
+	{
+		const struct iphdr *ip = (struct iphdr *)((u8 *)skb->data+14);
+		struct udphdr *udp;
+
+		if (ip->protocol != IPPROTO_UDP)
+			return 0;
+
+		udp = (struct udphdr *)((u8 *)ip + (ip->ihl << 2));
+		if (ntohs(udp->dest) != 67)
+			return 0;
+
+		offset = (u8 *)udp + 8 - skb->data;
+		length = skb->len - offset;
+		return e1000e_mng_write_dhcp_info(hw, (u8 *)udp + 8, length);
+	}
+
+	return 0;
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int e1000_xmit_frame(struct rtskb *skb, struct rtnet_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_ring *tx_ring = adapter->tx_ring;
+	rtdm_lockctx_t context;
+	unsigned int first;
+	unsigned int tx_flags = 0;
+	int count = 0;
+
+	if (test_bit(__E1000_DOWN, &adapter->state)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb->len <= 0) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	count++;
+
+	count += skb->len;
+
+	if (adapter->hw.mac.tx_pkt_filtering)
+		e1000_transfer_dhcp_info(adapter, skb);
+
+	rtdm_lock_get_irqsave(&tx_ring->lock, context);
+
+	first = tx_ring->next_to_use;
+
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	/* if count is 0 then mapping error has occurred */
+	count = e1000_tx_map(adapter, skb, first);
+	if (count) {
+		e1000_tx_queue(adapter, tx_flags, count);
+		rtdm_lock_put_irqrestore(&tx_ring->lock, context);
+	} else {
+		tx_ring->buffer_info[first].time_stamp = 0;
+		tx_ring->next_to_use = first;
+		rtdm_lock_put_irqrestore(&tx_ring->lock, context);
+		kfree_rtskb(skb);
+	}
+
+	return NETDEV_TX_OK;
+}
+
+static void e1000_reset_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter;
+	adapter = container_of(work, struct e1000_adapter, reset_task);
+
+	/* don't run the task if already down */
+	if (test_bit(__E1000_DOWN, &adapter->state))
+		return;
+
+	if (!((adapter->flags & FLAG_RX_NEEDS_RESTART) &&
+	      (adapter->flags & FLAG_RX_RESTART_NOW))) {
+		e1000e_dump(adapter);
+		e_err("Reset adapter\n");
+	}
+	e1000e_reinit_locked(adapter);
+}
+
+static int e1000_init_phy_wakeup(struct e1000_adapter *adapter, u32 wufc)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 i, mac_reg;
+	u16 phy_reg, wuc_enable;
+	int retval = 0;
+
+	/* copy MAC RARs to PHY RARs */
+	e1000_copy_rx_addrs_to_phy_ich8lan(hw);
+
+	retval = hw->phy.ops.acquire(hw);
+	if (retval) {
+		e_err("Could not acquire PHY\n");
+		return retval;
+	}
+
+	/* Enable access to wakeup registers on and set page to BM_WUC_PAGE */
+	retval = e1000_enable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+	if (retval)
+		goto out;
+
+	/* copy MAC MTA to PHY MTA - only needed for pchlan */
+	for (i = 0; i < adapter->hw.mac.mta_reg_count; i++) {
+		mac_reg = E1000_READ_REG_ARRAY(hw, E1000_MTA, i);
+		hw->phy.ops.write_reg_page(hw, BM_MTA(i),
+					   (u16)(mac_reg & 0xFFFF));
+		hw->phy.ops.write_reg_page(hw, BM_MTA(i) + 1,
+					   (u16)((mac_reg >> 16) & 0xFFFF));
+	}
+
+	/* configure PHY Rx Control register */
+	hw->phy.ops.read_reg_page(&adapter->hw, BM_RCTL, &phy_reg);
+	mac_reg = er32(RCTL);
+	if (mac_reg & E1000_RCTL_UPE)
+		phy_reg |= BM_RCTL_UPE;
+	if (mac_reg & E1000_RCTL_MPE)
+		phy_reg |= BM_RCTL_MPE;
+	phy_reg &= ~(BM_RCTL_MO_MASK);
+	if (mac_reg & E1000_RCTL_MO_3)
+		phy_reg |= (((mac_reg & E1000_RCTL_MO_3) >> E1000_RCTL_MO_SHIFT)
+				<< BM_RCTL_MO_SHIFT);
+	if (mac_reg & E1000_RCTL_BAM)
+		phy_reg |= BM_RCTL_BAM;
+	if (mac_reg & E1000_RCTL_PMCF)
+		phy_reg |= BM_RCTL_PMCF;
+	mac_reg = er32(CTRL);
+	if (mac_reg & E1000_CTRL_RFCE)
+		phy_reg |= BM_RCTL_RFCE;
+	hw->phy.ops.write_reg_page(&adapter->hw, BM_RCTL, phy_reg);
+
+	/* enable PHY wakeup in MAC register */
+	ew32(WUFC, wufc);
+	ew32(WUC, E1000_WUC_PHY_WAKE | E1000_WUC_PME_EN);
+
+	/* configure and enable PHY wakeup in PHY registers */
+	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUFC, wufc);
+	hw->phy.ops.write_reg_page(&adapter->hw, BM_WUC, E1000_WUC_PME_EN);
+
+	/* activate PHY wakeup */
+	wuc_enable |= BM_WUC_ENABLE_BIT | BM_WUC_HOST_WU_BIT;
+	retval = e1000_disable_phy_wakeup_reg_access_bm(hw, &wuc_enable);
+	if (retval)
+		e_err("Could not set PHY Host Wakeup bit\n");
+out:
+	hw->phy.ops.release(hw);
+
+	return retval;
+}
+
+static int __e1000_shutdown(struct pci_dev *pdev, bool *enable_wake,
+			    bool runtime)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl, ctrl_ext, rctl, status;
+	/* Runtime suspend should only enable wakeup for link changes */
+	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+	int retval = 0;
+
+	rtnetif_device_detach(netdev);
+
+	if (rtnetif_running(netdev)) {
+		WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+		e1000e_down(adapter);
+		e1000_free_irq(adapter);
+	}
+	e1000e_reset_interrupt_capability(adapter);
+
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+
+	status = er32(STATUS);
+	if (status & E1000_STATUS_LU)
+		wufc &= ~E1000_WUFC_LNKC;
+
+	if (wufc) {
+		e1000_setup_rctl(adapter);
+		e1000_set_multi(netdev);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+		if (wufc & E1000_WUFC_MC) {
+			rctl = er32(RCTL);
+			rctl |= E1000_RCTL_MPE;
+			ew32(RCTL, rctl);
+		}
+
+		ctrl = er32(CTRL);
+		/* advertise wake from D3Cold */
+		#define E1000_CTRL_ADVD3WUC 0x00100000
+		/* phy power management enable */
+		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+		ctrl |= E1000_CTRL_ADVD3WUC;
+		if (!(adapter->flags2 & FLAG2_HAS_PHY_WAKEUP))
+			ctrl |= E1000_CTRL_EN_PHY_PWR_MGMT;
+		ew32(CTRL, ctrl);
+
+		if (adapter->hw.phy.media_type == e1000_media_type_fiber ||
+		    adapter->hw.phy.media_type ==
+		    e1000_media_type_internal_serdes) {
+			/* keep the laser running in D3 */
+			ctrl_ext = er32(CTRL_EXT);
+			ctrl_ext |= E1000_CTRL_EXT_SDP3_DATA;
+			ew32(CTRL_EXT, ctrl_ext);
+		}
+
+		if (adapter->flags & FLAG_IS_ICH)
+			e1000_suspend_workarounds_ich8lan(&adapter->hw);
+
+		/* Allow time for pending master requests to run */
+		e1000e_disable_pcie_master(&adapter->hw);
+
+		if (adapter->flags2 & FLAG2_HAS_PHY_WAKEUP) {
+			/* enable wakeup by the PHY */
+			retval = e1000_init_phy_wakeup(adapter, wufc);
+			if (retval)
+				return retval;
+		} else {
+			/* enable wakeup by the MAC */
+			ew32(WUFC, wufc);
+			ew32(WUC, E1000_WUC_PME_EN);
+		}
+	} else {
+		ew32(WUC, 0);
+		ew32(WUFC, 0);
+	}
+
+	*enable_wake = !!wufc;
+
+	/* make sure adapter isn't asleep if manageability is enabled */
+	if ((adapter->flags & FLAG_MNG_PT_ENABLED) ||
+	    (hw->mac.ops.check_mng_mode(hw)))
+		*enable_wake = true;
+
+	if (adapter->hw.phy.type == e1000_phy_igp_3)
+		e1000e_igp3_phy_powerdown_workaround_ich8lan(&adapter->hw);
+
+	/*
+	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
+	e1000e_release_hw_control(adapter);
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+static void e1000_power_off(struct pci_dev *pdev, bool sleep, bool wake)
+{
+	if (sleep && wake) {
+		pci_prepare_to_sleep(pdev);
+		return;
+	}
+
+	pci_wake_from_d3(pdev, wake);
+	pci_set_power_state(pdev, PCI_D3hot);
+}
+
+static void e1000_complete_shutdown(struct pci_dev *pdev, bool sleep,
+				    bool wake)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	/*
+	 * The pci-e switch on some quad port adapters will report a
+	 * correctable error when the MAC transitions from D0 to D3.  To
+	 * prevent this we need to mask off the correctable errors on the
+	 * downstream port of the pci-e switch.
+	 */
+	if (adapter->flags & FLAG_IS_QUAD_PORT) {
+		struct pci_dev *us_dev = pdev->bus->self;
+		int pos = pci_pcie_cap(us_dev);
+		u16 devctl;
+
+		pci_read_config_word(us_dev, pos + PCI_EXP_DEVCTL, &devctl);
+		pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL,
+				      (devctl & ~PCI_EXP_DEVCTL_CERE));
+
+		e1000_power_off(pdev, sleep, wake);
+
+		pci_write_config_word(us_dev, pos + PCI_EXP_DEVCTL, devctl);
+	} else {
+		e1000_power_off(pdev, sleep, wake);
+	}
+}
+
+static void __e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+	int pos;
+	u16 reg16;
+
+	/*
+	 * Both device and parent should have the same ASPM setting.
+	 * Disable ASPM in downstream component first and then upstream.
+	 */
+	pos = pci_pcie_cap(pdev);
+	pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+	reg16 &= ~state;
+	pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
+
+	if (!pdev->bus->self)
+		return;
+
+	pos = pci_pcie_cap(pdev->bus->self);
+	pci_read_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, &reg16);
+	reg16 &= ~state;
+	pci_write_config_word(pdev->bus->self, pos + PCI_EXP_LNKCTL, reg16);
+}
+
+static void e1000e_disable_aspm(struct pci_dev *pdev, u16 state)
+{
+	dev_info(&pdev->dev, "Disabling ASPM %s %s\n",
+		 (state & PCIE_LINK_STATE_L0S) ? "L0s" : "",
+		 (state & PCIE_LINK_STATE_L1) ? "L1" : "");
+
+	__e1000e_disable_aspm(pdev, state);
+}
+
+static void e1000_shutdown(struct pci_dev *pdev)
+{
+	bool wake = false;
+
+	__e1000_shutdown(pdev, &wake, false);
+
+	if (system_state == SYSTEM_POWER_OFF)
+		e1000_complete_shutdown(pdev, false, wake);
+}
+
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	rtnetif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (rtnetif_running(netdev))
+		e1000e_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+	struct e1000_hw *hw = &adapter->hw;
+	u16 aspm_disable_flag = 0;
+	int err;
+	pci_ers_result_t result;
+
+	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L0S)
+		aspm_disable_flag = PCIE_LINK_STATE_L0S;
+	if (adapter->flags2 & FLAG2_DISABLE_ASPM_L1)
+		aspm_disable_flag |= PCIE_LINK_STATE_L1;
+	if (aspm_disable_flag)
+		e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset.\n");
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		pci_set_master(pdev);
+		pdev->state_saved = true;
+		pci_restore_state(pdev);
+
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_enable_wake(pdev, PCI_D3cold, 0);
+
+		e1000e_reset(adapter);
+		ew32(WUS, ~0);
+		result = PCI_ERS_RESULT_RECOVERED;
+	}
+
+	pci_aer_clear_nonfatal_status(pdev);
+
+	return result;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	e1000_init_manageability_pt(adapter);
+
+	if (rtnetif_running(netdev)) {
+		if (e1000e_up(adapter)) {
+			dev_err(&pdev->dev,
+				"can't bring device back up after reset\n");
+			return;
+		}
+	}
+
+	rtnetif_device_attach(netdev);
+
+	/*
+	 * If the controller has AMT, do not set DRV_LOAD until the interface
+	 * is up.  For all other cases, let the f/w know that the h/w is now
+	 * under the control of the driver.
+	 */
+	if (!(adapter->flags & FLAG_HAS_AMT))
+		e1000e_get_hw_control(adapter);
+
+}
+
+static void e1000_print_device_info(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct rtnet_device *netdev = adapter->netdev;
+	u32 ret_val;
+	u8 pba_str[E1000_PBANUM_LENGTH];
+
+	/* print bus type/speed/width info */
+	e_info("(PCI Express:2.5GT/s:%s) %pM\n",
+	       /* bus width */
+	       ((hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+		"Width x1"),
+	       /* MAC address */
+	       netdev->dev_addr);
+	e_info("Intel(R) PRO/%s Network Connection\n",
+	       (hw->phy.type == e1000_phy_ife) ? "10/100" : "1000");
+	ret_val = e1000_read_pba_string_generic(hw, pba_str,
+						E1000_PBANUM_LENGTH);
+	if (ret_val)
+		strncpy((char *)pba_str, "Unknown", sizeof(pba_str) - 1);
+	e_info("MAC: %d, PHY: %d, PBA No: %s\n",
+	       hw->mac.type, hw->phy.type, pba_str);
+}
+
+static void e1000_eeprom_checks(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int ret_val;
+	u16 buf = 0;
+
+	if (hw->mac.type != e1000_82573)
+		return;
+
+	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &buf);
+	if (!ret_val && (!(le16_to_cpu(buf) & (1 << 0)))) {
+		/* Deep Smart Power Down (DSPD) */
+		dev_warn(&adapter->pdev->dev,
+			 "Warning: detected DSPD enabled in EEPROM\n");
+	}
+}
+
+static dma_addr_t e1000_map_rtskb(struct rtnet_device *netdev,
+				  struct rtskb *skb)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct device *dev = &adapter->pdev->dev;
+	dma_addr_t addr;
+
+	addr = dma_map_single(dev, skb->buf_start, RTSKB_SIZE,
+			      DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, addr)) {
+		dev_err(dev, "DMA map failed\n");
+		return RTSKB_UNMAPPED;
+	}
+	return addr;
+}
+
+static void e1000_unmap_rtskb(struct rtnet_device *netdev,
+			      struct rtskb *skb)
+{
+	struct e1000_adapter *adapter = netdev->priv;
+	struct device *dev = &adapter->pdev->dev;
+
+	dma_unmap_single(dev, skb->buf_dma_addr, RTSKB_SIZE,
+			 DMA_BIDIRECTIONAL);
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int e1000_probe(struct pci_dev *pdev,
+				 const struct pci_device_id *ent)
+{
+	struct rtnet_device *netdev;
+	struct e1000_adapter *adapter;
+	struct e1000_hw *hw;
+	const struct e1000_info *ei = e1000_info_tbl[ent->driver_data];
+	resource_size_t mmio_start, mmio_len;
+	resource_size_t flash_start, flash_len;
+
+	static int cards_found;
+	u16 aspm_disable_flag = 0;
+	int i, err, pci_using_dac;
+	u16 eeprom_data = 0;
+	u16 eeprom_apme_mask = E1000_EEPROM_APME;
+
+	if (ei->flags2 & FLAG2_DISABLE_ASPM_L0S)
+		aspm_disable_flag = PCIE_LINK_STATE_L0S;
+	if (ei->flags2 & FLAG2_DISABLE_ASPM_L1)
+		aspm_disable_flag |= PCIE_LINK_STATE_L1;
+	if (aspm_disable_flag)
+		e1000e_disable_aspm(pdev, aspm_disable_flag);
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	pci_using_dac = 0;
+	err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(64));
+	if (!err) {
+		err = dma_set_coherent_mask(&pdev->dev, DMA_BIT_MASK(64));
+		if (!err)
+			pci_using_dac = 1;
+	} else {
+		err = dma_set_mask(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			err = dma_set_coherent_mask(&pdev->dev,
+						    DMA_BIT_MASK(32));
+			if (err) {
+				dev_err(&pdev->dev, "No usable DMA "
+					"configuration, aborting\n");
+				goto err_dma;
+			}
+		}
+	}
+
+	err = pci_request_selected_regions_exclusive(pdev,
+					  pci_select_bars(pdev, IORESOURCE_MEM),
+					  e1000e_driver_name);
+	if (err)
+		goto err_pci_reg;
+
+	/* AER (Advanced Error Reporting) hooks */
+	pci_enable_pcie_error_reporting(pdev);
+
+	pci_set_master(pdev);
+	/* PCI config space info */
+	err = pci_save_state(pdev);
+	if (err)
+		goto err_alloc_etherdev;
+
+	err = -ENOMEM;
+	netdev = rt_alloc_etherdev(sizeof(*adapter),
+				2 * RT_E1000E_NUM_RXD + 256);
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+	rtdev_alloc_name(netdev, "rteth%d");
+	rt_rtdev_connect(netdev, &RTDEV_manager);
+	netdev->vers = RTDEV_VERS_2_0;
+	netdev->sysbind = &pdev->dev;
+
+	netdev->irq = pdev->irq;
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev->priv;
+	hw = &adapter->hw;
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->ei = ei;
+	adapter->pba = ei->pba;
+	adapter->flags = ei->flags;
+	adapter->flags2 = ei->flags2;
+	adapter->hw.adapter = adapter;
+	adapter->hw.mac.type = ei->mac;
+	adapter->max_hw_frame_size = ei->max_hw_frame_size;
+	adapter->msg_enable = (1 << NETIF_MSG_DRV | NETIF_MSG_PROBE) - 1;
+
+	mmio_start = pci_resource_start(pdev, 0);
+	mmio_len = pci_resource_len(pdev, 0);
+
+	err = -EIO;
+	adapter->hw.hw_addr = ioremap(mmio_start, mmio_len);
+	if (!adapter->hw.hw_addr)
+		goto err_ioremap;
+
+	if ((adapter->flags & FLAG_HAS_FLASH) &&
+	    (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+		flash_start = pci_resource_start(pdev, 1);
+		flash_len = pci_resource_len(pdev, 1);
+		adapter->hw.flash_address = ioremap(flash_start, flash_len);
+		if (!adapter->hw.flash_address)
+			goto err_flashmap;
+	}
+
+	/* construct the net_device struct */
+	netdev->open = e1000_open;
+	netdev->stop = e1000_close;
+	netdev->hard_start_xmit = e1000_xmit_frame;
+	//netdev->get_stats = e1000_get_stats;
+	netdev->map_rtskb = e1000_map_rtskb;
+	netdev->unmap_rtskb = e1000_unmap_rtskb;
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	netdev->mem_start = mmio_start;
+	netdev->mem_end = mmio_start + mmio_len;
+
+	adapter->bd_number = cards_found++;
+
+	e1000e_check_options(adapter);
+
+	/* setup adapter struct */
+	err = e1000_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
+	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+
+	err = ei->get_variants(adapter);
+	if (err)
+		goto err_hw_init;
+
+	if ((adapter->flags & FLAG_IS_ICH) &&
+	    (adapter->flags & FLAG_READ_ONLY_NVM))
+		e1000e_write_protect_nvm_ich8lan(&adapter->hw);
+
+	hw->mac.ops.get_bus_info(&adapter->hw);
+
+	adapter->hw.phy.autoneg_wait_to_complete = 0;
+
+	/* Copper options */
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+		adapter->hw.phy.mdix = AUTO_ALL_MODES;
+		adapter->hw.phy.disable_polarity_correction = 0;
+		adapter->hw.phy.ms_type = e1000_ms_hw_default;
+	}
+
+	if (e1000_check_reset_block(&adapter->hw))
+		e_info("PHY reset is blocked due to SOL/IDER session.\n");
+
+	/* Set initial default active device features */
+	netdev->features = (NETIF_F_SG |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_TX |
+			    NETIF_F_TSO |
+			    NETIF_F_TSO6 |
+			    NETIF_F_RXCSUM |
+			    NETIF_F_HW_CSUM);
+
+	if (adapter->flags & FLAG_HAS_HW_VLAN_FILTER)
+		netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+
+	if (pci_using_dac) {
+		netdev->features |= NETIF_F_HIGHDMA;
+	}
+
+	if (e1000e_enable_mng_pass_thru(&adapter->hw))
+		adapter->flags |= FLAG_MNG_PT_ENABLED;
+
+	/*
+	 * before reading the NVM, reset the controller to
+	 * put the device in a known good starting state
+	 */
+	adapter->hw.mac.ops.reset_hw(&adapter->hw);
+
+	/*
+	 * systems with ASPM and others may see the checksum fail on the first
+	 * attempt. Let's give it a few tries
+	 */
+	for (i = 0;; i++) {
+		if (e1000_validate_nvm_checksum(&adapter->hw) >= 0)
+			break;
+		if (i == 2) {
+			e_err("The NVM Checksum Is Not Valid\n");
+			err = -EIO;
+			goto err_eeprom;
+		}
+	}
+
+	e1000_eeprom_checks(adapter);
+
+	/* copy the MAC address */
+	if (e1000e_read_mac_addr(&adapter->hw))
+		e_err("NVM Read Error while reading MAC address\n");
+
+	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+	timer_setup(&adapter->watchdog_timer, e1000_watchdog, 0);
+	timer_setup(&adapter->phy_info_timer, e1000_update_phy_info, 0);
+#else /* < 4.14 */
+	init_timer(&adapter->watchdog_timer);
+	adapter->watchdog_timer.function = e1000_watchdog;
+	adapter->watchdog_timer.data = (unsigned long) adapter;
+
+	init_timer(&adapter->phy_info_timer);
+	adapter->phy_info_timer.function = e1000_update_phy_info;
+	adapter->phy_info_timer.data = (unsigned long) adapter;
+#endif /* < 4.14 */
+
+	INIT_WORK(&adapter->reset_task, e1000_reset_task);
+	INIT_WORK(&adapter->watchdog_task, e1000_watchdog_task);
+	INIT_WORK(&adapter->downshift_task, e1000e_downshift_workaround);
+	INIT_WORK(&adapter->update_phy_task, e1000e_update_phy_task);
+
+	rtdm_nrtsig_init(&adapter->mod_timer_sig, e1000e_mod_watchdog_timer,
+			(void*)&adapter->watchdog_timer);
+	rtdm_nrtsig_init(&adapter->downshift_sig, e1000e_trigger_downshift,
+			&adapter->downshift_task);
+
+	/* Initialize link parameters. User can change them with ethtool */
+	adapter->hw.mac.autoneg = 1;
+	adapter->fc_autoneg = 1;
+	adapter->hw.fc.requested_mode = e1000_fc_default;
+	adapter->hw.fc.current_mode = e1000_fc_default;
+	adapter->hw.phy.autoneg_advertised = 0x2f;
+
+	/* ring size defaults */
+	adapter->rx_ring->count = RT_E1000E_NUM_RXD;
+	adapter->tx_ring->count = 256;
+
+	/*
+	 * Initial Wake on LAN setting - If APM wake is enabled in
+	 * the EEPROM, enable the ACPI Magic Packet filter
+	 */
+	if (adapter->flags & FLAG_APME_IN_WUC) {
+		/* APME bit in EEPROM is mapped to WUC.APME */
+		eeprom_data = er32(WUC);
+		eeprom_apme_mask = E1000_WUC_APME;
+		if ((hw->mac.type > e1000_ich10lan) &&
+		    (eeprom_data & E1000_WUC_PHY_WAKE))
+			adapter->flags2 |= FLAG2_HAS_PHY_WAKEUP;
+	} else if (adapter->flags & FLAG_APME_IN_CTRL3) {
+		if (adapter->flags & FLAG_APME_CHECK_PORT_B &&
+		    (adapter->hw.bus.func == 1))
+			e1000_read_nvm(&adapter->hw,
+				NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+		else
+			e1000_read_nvm(&adapter->hw,
+				NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+	}
+
+	/* fetch WoL from EEPROM */
+	if (eeprom_data & eeprom_apme_mask)
+		adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+	/*
+	 * now that we have the eeprom settings, apply the special cases
+	 * where the eeprom may be wrong or the board simply won't support
+	 * wake on lan on a particular port
+	 */
+	if (!(adapter->flags & FLAG_HAS_WOL))
+		adapter->eeprom_wol = 0;
+
+	/* initialize the wol settings based on the eeprom settings */
+	adapter->wol = adapter->eeprom_wol;
+	device_set_wakeup_enable(&adapter->pdev->dev, adapter->wol);
+
+	/* save off EEPROM version number */
+	e1000_read_nvm(&adapter->hw, 5, 1, &adapter->eeprom_vers);
+
+	/* reset the hardware with the new settings */
+	e1000e_reset(adapter);
+
+	/*
+	 * If the controller has AMT, do not set DRV_LOAD until the interface
+	 * is up.  For all other cases, let the f/w know that the h/w is now
+	 * under the control of the driver.
+	 */
+	if (!(adapter->flags & FLAG_HAS_AMT))
+		e1000e_get_hw_control(adapter);
+
+	strncpy(netdev->name, "rteth%d", sizeof(netdev->name) - 1);
+	err = rt_register_rtnetdev(netdev);
+	if (err)
+		goto err_register;
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	rtnetif_carrier_off(netdev);
+
+	e1000_print_device_info(adapter);
+
+	if (pci_dev_run_wake(pdev))
+		pm_runtime_put_noidle(&pdev->dev);
+
+	return 0;
+
+err_register:
+	rtdm_nrtsig_destroy(&adapter->downshift_sig);
+	rtdm_nrtsig_destroy(&adapter->mod_timer_sig);
+	if (!(adapter->flags & FLAG_HAS_AMT))
+		e1000e_release_hw_control(adapter);
+err_eeprom:
+	if (!e1000_check_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+err_hw_init:
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+err_sw_init:
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+	e1000e_reset_interrupt_capability(adapter);
+err_flashmap:
+	iounmap(adapter->hw.hw_addr);
+err_ioremap:
+	rtdev_free(netdev);
+err_alloc_etherdev:
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void e1000_remove(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+	bool down = test_bit(__E1000_DOWN, &adapter->state);
+
+	/*
+	 * The timers may be rescheduled, so explicitly disable them
+	 * from being rescheduled.
+	 */
+	if (!down)
+		set_bit(__E1000_DOWN, &adapter->state);
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	rtdm_nrtsig_destroy(&adapter->downshift_sig);
+	rtdm_nrtsig_destroy(&adapter->mod_timer_sig);
+
+	cancel_work_sync(&adapter->reset_task);
+	cancel_work_sync(&adapter->watchdog_task);
+	cancel_work_sync(&adapter->downshift_task);
+	cancel_work_sync(&adapter->update_phy_task);
+
+	if (!(netdev->flags & IFF_UP))
+		e1000_power_down_phy(adapter);
+
+	/* Don't lie to e1000_close() down the road. */
+	if (!down)
+		clear_bit(__E1000_DOWN, &adapter->state);
+	rt_unregister_rtnetdev(netdev);
+
+	if (pci_dev_run_wake(pdev))
+		pm_runtime_get_noresume(&pdev->dev);
+
+	/*
+	 * Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
+	e1000e_release_hw_control(adapter);
+
+	e1000e_reset_interrupt_capability(adapter);
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	iounmap(adapter->hw.hw_addr);
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+
+	rtdev_free(netdev);
+
+	/* AER disable */
+	pci_disable_pcie_error_reporting(pdev);
+
+	pci_disable_device(pdev);
+}
+
+/* PCI Error Recovery (ERS) */
+static struct pci_error_handlers e1000_err_handler = {
+	.error_detected = e1000_io_error_detected,
+	.slot_reset = e1000_io_slot_reset,
+	.resume = e1000_io_resume,
+};
+
+static const struct pci_device_id e1000_pci_tbl[] = {
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_COPPER), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_FIBER), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_COPPER_LP), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_QUAD_FIBER), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_DUAL), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571EB_SERDES_QUAD), board_82571 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82571PT_QUAD_COPPER), board_82571 },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI), board_82572 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_COPPER), board_82572 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_FIBER), board_82572 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82572EI_SERDES), board_82572 },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E), board_82573 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573E_IAMT), board_82573 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82573L), board_82573 },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574L), board_82574 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82574LA), board_82574 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82583V), board_82583 },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_DPT),
+	  board_80003es2lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_COPPER_SPT),
+	  board_80003es2lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_DPT),
+	  board_80003es2lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_80003ES2LAN_SERDES_SPT),
+	  board_80003es2lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_G), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IFE_GT), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_AMT), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_C), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_IGP_M_AMT), board_ich8lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH8_82567V_3), board_ich8lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_G), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IFE_GT), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_AMT), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_C), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_BM), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_AMT), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH9_IGP_M_V), board_ich9lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LM), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_LF), board_ich9lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_R_BM_V), board_ich9lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LM), board_ich10lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_LF), board_ich10lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_ICH10_D_BM_V), board_ich10lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LM), board_pchlan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_M_HV_LC), board_pchlan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DM), board_pchlan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_D_HV_DC), board_pchlan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_LM), board_pch2lan },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH2_LV_V), board_pch2lan },
+
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_LM), board_pch_lpt },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPT_I217_V), board_pch_lpt },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_LM), board_pch_lpt },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_PCH_LPTLP_I218_V), board_pch_lpt },
+
+	{ }	/* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+/* PCI Device API Driver */
+static struct pci_driver e1000_driver = {
+	.name     = e1000e_driver_name,
+	.id_table = e1000_pci_tbl,
+	.probe    = e1000_probe,
+	.remove   = e1000_remove,
+	.shutdown = e1000_shutdown,
+	.err_handler = &e1000_err_handler
+};
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init e1000_init_module(void)
+{
+	int ret;
+	pr_info("Intel(R) PRO/1000 Network Driver - %s\n",
+		e1000e_driver_version);
+	pr_info("Copyright(c) 1999 - 2011 Intel Corporation.\n");
+	ret = pci_register_driver(&e1000_driver);
+
+	return ret;
+}
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit e1000_exit_module(void)
+{
+	pci_unregister_driver(&e1000_driver);
+}
+module_exit(e1000_exit_module);
+
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+/* e1000_main.c */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c
new file mode 100644
index 0000000..22a6f5a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/param.c
@@ -0,0 +1,484 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/netdevice.h>
+#include <linux/pci.h>
+
+#include "e1000.h"
+
+/*
+ * This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+#define COPYBREAK_DEFAULT 256
+unsigned int copybreak = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+	"Maximum size of packet that is copied to a new buffer on receive");
+
+/*
+ * All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#define E1000_PARAM(X, desc)					\
+	static int X[E1000_MAX_NIC+1]		\
+		= E1000_PARAM_INIT;				\
+	static unsigned int num_##X;				\
+	module_param_array_named(X, X, int, &num_##X, 0);	\
+	MODULE_PARM_DESC(X, desc);
+
+/*
+ * Transmit Interrupt Delay in units of 1.024 microseconds
+ * Tx interrupt delay needs to typically be set to something non-zero
+ *
+ * Valid Range: 0-65535
+ * 
+ * Default Value: 0 for rtnet
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV 0
+#define MAX_TXDELAY 0xFFFF
+#define MIN_TXDELAY 0
+
+/*
+ * Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ * 
+ * Default Value: 0 for rtnet
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV 0
+#define MAX_TXABSDELAY 0xFFFF
+#define MIN_TXABSDELAY 0
+
+/*
+ * Receive Interrupt Delay in units of 1.024 microseconds
+ * hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define MAX_RXDELAY 0xFFFF
+#define MIN_RXDELAY 0
+
+/*
+ * Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define MAX_RXABSDELAY 0xFFFF
+#define MIN_RXABSDELAY 0
+
+/*
+ * Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ * 
+ * Default Value: 0 for rtnet
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR 0
+#define MAX_ITR 100000
+#define MIN_ITR 100
+
+/* IntMode (Interrupt Mode)
+ *
+ * Valid Range: 0 - 2
+ *
+ * Default Value: 2 (MSI-X)
+ */
+E1000_PARAM(IntMode, "Interrupt Mode");
+#define MAX_INTMODE	2
+#define MIN_INTMODE	0
+
+/*
+ * Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/*
+ * Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+/*
+ * Write Protect NVM
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(WriteProtectNVM, "Write-protect NVM [WARNING: disabling this can lead to corrupted NVM]");
+
+/*
+ * Enable CRC Stripping
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(CrcStripping, "Enable CRC Stripping, disable if your BMC needs " \
+                          "the CRC");
+
+struct e1000_option {
+	enum { enable_option, range_option, list_option } type;
+	const char *name;
+	const char *err;
+	int def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			struct e1000_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int e1000_validate_option(unsigned int *value,
+					   const struct e1000_option *opt,
+					   struct e1000_adapter *adapter)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			e_info("%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			e_info("%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			e_info("%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+		struct e1000_opt_list *ent;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					e_info("%s\n", ent->str);
+				return 0;
+			}
+		}
+	}
+		break;
+	default:
+		BUG();
+	}
+
+	e_info("Invalid %s value specified (%i) %s\n", opt->name, *value,
+	       opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+/**
+ * e1000e_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+void e1000e_check_options(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int bd = adapter->bd_number;
+
+	if (bd >= E1000_MAX_NIC) {
+		e_notice("Warning: no configuration for board #%i\n", bd);
+		e_notice("Using defaults for all values\n");
+	}
+
+	{ /* Transmit Interrupt Delay */
+		static const struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Interrupt Delay",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_TIDV),
+			.def  = DEFAULT_TIDV,
+			.arg  = { .r = { .min = MIN_TXDELAY,
+					 .max = MAX_TXDELAY } }
+		};
+
+		if (num_TxIntDelay > bd) {
+			adapter->tx_int_delay = TxIntDelay[bd];
+			e1000_validate_option(&adapter->tx_int_delay, &opt,
+					      adapter);
+		} else {
+			adapter->tx_int_delay = opt.def;
+		}
+	}
+	{ /* Transmit Absolute Interrupt Delay */
+		static const struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Absolute Interrupt Delay",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_TADV),
+			.def  = DEFAULT_TADV,
+			.arg  = { .r = { .min = MIN_TXABSDELAY,
+					 .max = MAX_TXABSDELAY } }
+		};
+
+		if (num_TxAbsIntDelay > bd) {
+			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+					      adapter);
+		} else {
+			adapter->tx_abs_int_delay = opt.def;
+		}
+	}
+	{ /* Receive Interrupt Delay */
+		static struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Interrupt Delay",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_RDTR),
+			.def  = DEFAULT_RDTR,
+			.arg  = { .r = { .min = MIN_RXDELAY,
+					 .max = MAX_RXDELAY } }
+		};
+
+		if (num_RxIntDelay > bd) {
+			adapter->rx_int_delay = RxIntDelay[bd];
+			e1000_validate_option(&adapter->rx_int_delay, &opt,
+					      adapter);
+		} else {
+			adapter->rx_int_delay = opt.def;
+		}
+	}
+	{ /* Receive Absolute Interrupt Delay */
+		static const struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Absolute Interrupt Delay",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_RADV),
+			.def  = DEFAULT_RADV,
+			.arg  = { .r = { .min = MIN_RXABSDELAY,
+					 .max = MAX_RXABSDELAY } }
+		};
+
+		if (num_RxAbsIntDelay > bd) {
+			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+					      adapter);
+		} else {
+			adapter->rx_abs_int_delay = opt.def;
+		}
+	}
+	{ /* Interrupt Throttling Rate */
+		static const struct e1000_option opt = {
+			.type = range_option,
+			.name = "Interrupt Throttling Rate (ints/sec)",
+			.err  = "using default of "
+				__MODULE_STRING(DEFAULT_ITR),
+			.def  = DEFAULT_ITR,
+			.arg  = { .r = { .min = MIN_ITR,
+					 .max = MAX_ITR } }
+		};
+
+		if (num_InterruptThrottleRate > bd) {
+			adapter->itr = InterruptThrottleRate[bd];
+			switch (adapter->itr) {
+			case 0:
+				e_info("%s turned off\n", opt.name);
+				break;
+			case 1:
+				e_info("%s set to dynamic mode\n", opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			case 3:
+				e_info("%s set to dynamic conservative mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			case 4:
+				e_info("%s set to simplified (2000-8000 ints) "
+				       "mode\n", opt.name);
+				adapter->itr_setting = 4;
+				break;
+			default:
+				/*
+				 * Save the setting, because the dynamic bits
+				 * change itr.
+				 */
+				if (e1000_validate_option(&adapter->itr, &opt,
+							  adapter) &&
+				    (adapter->itr == 3)) {
+					/*
+					 * In case of invalid user value,
+					 * default to conservative mode.
+					 */
+					adapter->itr_setting = adapter->itr;
+					adapter->itr = 20000;
+				} else {
+					/*
+					 * Clear the lower two bits because
+					 * they are used as control.
+					 */
+					adapter->itr_setting =
+						adapter->itr & ~3;
+				}
+				break;
+			}
+		} else {
+			adapter->itr_setting = opt.def;
+			adapter->itr = 0;
+		}
+	}
+	{ /* Interrupt Mode */
+		static struct e1000_option opt = {
+			.type = range_option,
+			.name = "Interrupt Mode",
+			.err  = "defaulting to 2 (MSI-X)",
+			.def  = E1000E_INT_MODE_MSIX,
+			.arg  = { .r = { .min = MIN_INTMODE,
+					 .max = MAX_INTMODE } }
+		};
+
+		if (num_IntMode > bd) {
+			unsigned int int_mode = IntMode[bd];
+			e1000_validate_option(&int_mode, &opt, adapter);
+			adapter->int_mode = int_mode;
+		} else {
+			adapter->int_mode = opt.def;
+		}
+	}
+	{ /* Smart Power Down */
+		static const struct e1000_option opt = {
+			.type = enable_option,
+			.name = "PHY Smart Power Down",
+			.err  = "defaulting to Disabled",
+			.def  = OPTION_DISABLED
+		};
+
+		if (num_SmartPowerDownEnable > bd) {
+			unsigned int spd = SmartPowerDownEnable[bd];
+			e1000_validate_option(&spd, &opt, adapter);
+			if ((adapter->flags & FLAG_HAS_SMART_POWER_DOWN)
+			    && spd)
+				adapter->flags |= FLAG_SMART_POWER_DOWN;
+		}
+	}
+	{ /* CRC Stripping */
+		static const struct e1000_option opt = {
+			.type = enable_option,
+			.name = "CRC Stripping",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (num_CrcStripping > bd) {
+			unsigned int crc_stripping = CrcStripping[bd];
+			e1000_validate_option(&crc_stripping, &opt, adapter);
+			if (crc_stripping == OPTION_ENABLED)
+				adapter->flags2 |= FLAG2_CRC_STRIPPING;
+		} else {
+			adapter->flags2 |= FLAG2_CRC_STRIPPING;
+		}
+	}
+	{ /* Kumeran Lock Loss Workaround */
+		static const struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Kumeran Lock Loss Workaround",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (num_KumeranLockLoss > bd) {
+			unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+			if (hw->mac.type == e1000_ich8lan)
+				e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+								kmrn_lock_loss);
+		} else {
+			if (hw->mac.type == e1000_ich8lan)
+				e1000e_set_kmrn_lock_loss_workaround_ich8lan(hw,
+								       opt.def);
+		}
+	}
+	{ /* Write-protect NVM */
+		static const struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Write-protect NVM",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+		if (adapter->flags & FLAG_IS_ICH) {
+			if (num_WriteProtectNVM > bd) {
+				unsigned int write_protect_nvm = WriteProtectNVM[bd];
+				e1000_validate_option(&write_protect_nvm, &opt,
+						      adapter);
+				if (write_protect_nvm)
+					adapter->flags |= FLAG_READ_ONLY_NVM;
+			} else {
+				if (opt.def)
+					adapter->flags |= FLAG_READ_ONLY_NVM;
+			}
+		}
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c
new file mode 100644
index 0000000..9ec7835
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/e1000e/phy.c
@@ -0,0 +1,3385 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2011 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/delay.h>
+
+#include "e1000.h"
+
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+static s32 e1000_wait_autoneg(struct e1000_hw *hw);
+static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg);
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+					  u16 *data, bool read, bool page_set);
+static u32 e1000_get_phy_addr_for_hv_page(u32 page);
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+                                          u16 *data, bool read);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] = {
+	0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+		ARRAY_SIZE(e1000_m88_cable_length_table)
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21, 0, 0, 0, 3,
+	6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41, 6, 10, 14, 18, 22,
+	26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61, 21, 26, 31, 35, 40,
+	44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82, 40, 45, 51, 56, 61,
+	66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104, 60, 66, 72, 77, 82,
+	87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121, 83, 89, 95,
+	100, 105, 109, 113, 116, 119, 122, 124, 104, 109, 114, 118, 121,
+	124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+		ARRAY_SIZE(e1000_igp_2_cable_length_table)
+
+#define BM_PHY_REG_PAGE(offset) \
+	((u16)(((offset) >> PHY_PAGE_SHIFT) & 0xFFFF))
+#define BM_PHY_REG_NUM(offset) \
+	((u16)(((offset) & MAX_PHY_REG_ADDRESS) |\
+	 (((offset) >> (PHY_UPPER_SHIFT - PHY_PAGE_SHIFT)) &\
+		~MAX_PHY_REG_ADDRESS)))
+
+#define HV_INTC_FC_PAGE_START             768
+#define I82578_ADDR_REG                   29
+#define I82577_ADDR_REG                   16
+#define I82577_CFG_REG                    22
+#define I82577_CFG_ASSERT_CRS_ON_TX       (1 << 15)
+#define I82577_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
+#define I82577_CTRL_REG                   23
+
+/* 82577 specific PHY registers */
+#define I82577_PHY_CTRL_2            18
+#define I82577_PHY_STATUS_2          26
+#define I82577_PHY_DIAG_STATUS       31
+
+/* I82577 PHY Status 2 */
+#define I82577_PHY_STATUS2_REV_POLARITY   0x0400
+#define I82577_PHY_STATUS2_MDIX           0x0800
+#define I82577_PHY_STATUS2_SPEED_MASK     0x0300
+#define I82577_PHY_STATUS2_SPEED_1000MBPS 0x0200
+
+/* I82577 PHY Control 2 */
+#define I82577_PHY_CTRL2_AUTO_MDIX        0x0400
+#define I82577_PHY_CTRL2_FORCE_MDI_MDIX   0x0200
+
+/* I82577 PHY Diagnostics Status */
+#define I82577_DSTATUS_CABLE_LENGTH       0x03FC
+#define I82577_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* BM PHY Copper Specific Control 1 */
+#define BM_CS_CTRL1                       16
+
+#define HV_MUX_DATA_CTRL               PHY_REG(776, 16)
+#define HV_MUX_DATA_CTRL_GEN_TO_MAC    0x0400
+#define HV_MUX_DATA_CTRL_FORCE_SPEED   0x0004
+
+/**
+ *  e1000e_check_reset_block_generic - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the PHY management control register and check whether a PHY reset
+ *  is blocked.  If a reset is not blocked return 0, otherwise
+ *  return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000e_check_reset_block_generic(struct e1000_hw *hw)
+{
+	u32 manc;
+
+	manc = er32(MANC);
+
+	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+	       E1000_BLK_PHY_RESET : 0;
+}
+
+/**
+ *  e1000e_get_phy_id - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+s32 e1000e_get_phy_id(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_id;
+	u16 retry_count = 0;
+
+	if (!(phy->ops.read_reg))
+		goto out;
+
+	while (retry_count < 2) {
+		ret_val = e1e_rphy(hw, PHY_ID1, &phy_id);
+		if (ret_val)
+			goto out;
+
+		phy->id = (u32)(phy_id << 16);
+		udelay(20);
+		ret_val = e1e_rphy(hw, PHY_ID2, &phy_id);
+		if (ret_val)
+			goto out;
+
+		phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+		phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+		if (phy->id != 0 && phy->id != PHY_REVISION_MASK)
+			goto out;
+
+		retry_count++;
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_reset_dsp - Reset PHY DSP
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the digital signal processor.
+ **/
+s32 e1000e_phy_reset_dsp(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	ret_val = e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+	if (ret_val)
+		return ret_val;
+
+	return e1e_wphy(hw, M88E1000_PHY_GEN_CONTROL, 0);
+}
+
+/**
+ *  e1000e_read_phy_reg_mdic - Read MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control register in the PHY at offset and stores the
+ *  information read to data.
+ **/
+s32 e1000e_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		e_dbg("PHY Address %d is out of range\n", offset);
+		return -E1000_ERR_PARAM;
+	}
+
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+		(phy->addr << E1000_MDIC_PHY_SHIFT) |
+		(E1000_MDIC_OP_READ));
+
+	ew32(MDIC, mdic);
+
+	/*
+	 * Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		udelay(50);
+		mdic = er32(MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		e_dbg("MDI Read did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		e_dbg("MDI Error\n");
+		return -E1000_ERR_PHY;
+	}
+	*data = (u16) mdic;
+
+	/*
+	 * Allow some time after each MDIC transaction to avoid
+	 * reading duplicate data in the next MDIC transaction.
+	 */
+	if (hw->mac.type == e1000_pch2lan)
+		udelay(100);
+
+	return 0;
+}
+
+/**
+ *  e1000e_write_phy_reg_mdic - Write MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000e_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		e_dbg("PHY Address %d is out of range\n", offset);
+		return -E1000_ERR_PARAM;
+	}
+
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = (((u32)data) |
+		(offset << E1000_MDIC_REG_SHIFT) |
+		(phy->addr << E1000_MDIC_PHY_SHIFT) |
+		(E1000_MDIC_OP_WRITE));
+
+	ew32(MDIC, mdic);
+
+	/*
+	 * Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		udelay(50);
+		mdic = er32(MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		e_dbg("MDI Write did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		e_dbg("MDI Error\n");
+		return -E1000_ERR_PHY;
+	}
+
+	/*
+	 * Allow some time after each MDIC transaction to avoid
+	 * reading duplicate data in the next MDIC transaction.
+	 */
+	if (hw->mac.type == e1000_pch2lan)
+		udelay(100);
+
+	return 0;
+}
+
+/**
+ *  e1000e_read_phy_reg_m88 - Read m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					   data);
+
+	hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_phy_reg_m88 - Write m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					    data);
+
+	hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+/**
+ *  @brief Set page as on IGP-like PHY(s)
+ *  @param hw pointer to the HW structure
+ *  @param page page to set (shifted left when necessary)
+ *
+ *  Sets PHY page required for PHY register access.  Assumes semaphore is
+ *  already acquired.  Note, this function sets phy.addr to 1 so the caller
+ *  must set it appropriately (if necessary) after this function returns.
+ */
+s32 e1000_set_page_igp(struct e1000_hw *hw, u16 page)
+{
+	e_dbg("Setting page 0x%x\n", page);
+
+	hw->phy.addr = 1;
+
+	return e1000e_write_phy_reg_mdic(hw, IGP01E1000_PHY_PAGE_SELECT, page);
+}
+
+/**
+ *  __e1000e_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and stores the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+static s32 __e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data,
+                                    bool locked)
+{
+	s32 ret_val = 0;
+
+	if (!locked) {
+		if (!(hw->phy.ops.acquire))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+						    IGP01E1000_PHY_PAGE_SELECT,
+						    (u16)offset);
+		if (ret_val)
+			goto release;
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+	                                  data);
+
+release:
+	if (!locked)
+		hw->phy.ops.release(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores the
+ *  retrieved information in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000e_read_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_read_phy_reg_igp_locked - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000e_read_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ *  e1000e_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data,
+                                     bool locked)
+{
+	s32 ret_val = 0;
+
+	if (!locked) {
+		if (!(hw->phy.ops.acquire))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000e_write_phy_reg_mdic(hw,
+						    IGP01E1000_PHY_PAGE_SELECT,
+						    (u16)offset);
+		if (ret_val)
+			goto release;
+	}
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					    data);
+
+release:
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000e_write_phy_reg_igp(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_write_phy_reg_igp_locked - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_phy_reg_igp_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000e_write_phy_reg_igp(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_read_kmrn_reg - Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
+ *  using the kumeran interface.  The information retrieved is stored in data.
+ *  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data,
+                                 bool locked)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val = 0;
+
+	if (!locked) {
+		if (!(hw->phy.ops.acquire))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+		       E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+	ew32(KMRNCTRLSTA, kmrnctrlsta);
+	e1e_flush();
+
+	udelay(2);
+
+	kmrnctrlsta = er32(KMRNCTRLSTA);
+	*data = (u16)kmrnctrlsta;
+
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_read_kmrn_reg -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset using the
+ *  kumeran interface.  The information retrieved is stored in data.
+ *  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_read_kmrn_reg_locked -  Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the kumeran interface.  The
+ *  information retrieved is stored in data.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_read_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ *  __e1000_write_kmrn_reg - Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary.  Then write the data to PHY register
+ *  at the offset using the kumeran interface.  Release any acquired semaphores
+ *  before exiting.
+ **/
+static s32 __e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data,
+                                  bool locked)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val = 0;
+
+	if (!locked) {
+		if (!(hw->phy.ops.acquire))
+			goto out;
+
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+		       E1000_KMRNCTRLSTA_OFFSET) | data;
+	ew32(KMRNCTRLSTA, kmrnctrlsta);
+	e1e_flush();
+
+	udelay(2);
+
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_kmrn_reg -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to the PHY register at the offset
+ *  using the kumeran interface.  Release the acquired semaphore before exiting.
+ **/
+s32 e1000e_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_kmrn_reg(hw, offset, data, false);
+}
+
+/**
+ *  e1000e_write_kmrn_reg_locked -  Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Write the data to PHY register at the offset using the kumeran interface.
+ *  Assumes semaphore already acquired.
+ **/
+s32 e1000e_write_kmrn_reg_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_kmrn_reg(hw, offset, data, true);
+}
+
+/**
+ *  e1000_copper_link_setup_82577 - Setup 82577 PHY for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 e1000_copper_link_setup_82577(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
+	ret_val = e1e_rphy(hw, I82577_CFG_REG, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= I82577_CFG_ASSERT_CRS_ON_TX;
+
+	/* Enable downshift */
+	phy_data |= I82577_CFG_ENABLE_DOWNSHIFT;
+
+	ret_val = e1e_wphy(hw, I82577_CFG_REG, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
+ *  and downshift values are set also.
+ **/
+s32 e1000e_copper_link_setup_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* For BM PHY this bit is downshift enable */
+	if (phy->type != e1000_phy_bm)
+		phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+	/*
+	 * Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+	switch (phy->mdix) {
+	case 1:
+		phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+		break;
+	case 2:
+		phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+		break;
+	case 3:
+		phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+		break;
+	case 0:
+	default:
+		phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+		break;
+	}
+
+	/*
+	 * Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+	if (phy->disable_polarity_correction == 1)
+		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+	/* Enable downshift on BM (disabled by default) */
+	if (phy->type == e1000_phy_bm)
+		phy_data |= BME1000_PSCR_ENABLE_DOWNSHIFT;
+
+	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	if ((phy->type == e1000_phy_m88) &&
+	    (phy->revision < E1000_REVISION_4) &&
+	    (phy->id != BME1000_E_PHY_ID_R2)) {
+		/*
+		 * Force TX_CLK in the Extended PHY Specific Control Register
+		 * to 25MHz clock.
+		 */
+		ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+		if ((phy->revision == 2) &&
+		    (phy->id == M88E1111_I_PHY_ID)) {
+			/* 82573L PHY - set the downshift counter to 5x. */
+			phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+			phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+		} else {
+			/* Configure Master and Slave downshift values */
+			phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+				      M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+			phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+				     M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+		}
+		ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+		if (ret_val)
+			return ret_val;
+	}
+
+	if ((phy->type == e1000_phy_bm) && (phy->id == BME1000_E_PHY_ID_R2)) {
+		/* Set PHY page 0, register 29 to 0x0003 */
+		ret_val = e1e_wphy(hw, 29, 0x0003);
+		if (ret_val)
+			return ret_val;
+
+		/* Set PHY page 0, register 30 to 0x0000 */
+		ret_val = e1e_wphy(hw, 30, 0x0000);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Commit the changes. */
+	ret_val = e1000e_commit_phy(hw);
+	if (ret_val) {
+		e_dbg("Error committing the PHY changes\n");
+		return ret_val;
+	}
+
+	if (phy->type == e1000_phy_82578) {
+		ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		/* 82578 PHY - set the downshift count to 1x. */
+		phy_data |= I82578_EPSCR_DOWNSHIFT_ENABLE;
+		phy_data &= ~I82578_EPSCR_DOWNSHIFT_COUNTER_MASK;
+		ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+		if (ret_val)
+			return ret_val;
+	}
+
+	return 0;
+}
+
+/**
+ *  e1000e_copper_link_setup_igp - Setup igp PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ *  igp PHY's.
+ **/
+s32 e1000e_copper_link_setup_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1000_phy_hw_reset(hw);
+	if (ret_val) {
+		e_dbg("Error resetting the PHY.\n");
+		return ret_val;
+	}
+
+	/*
+	 * Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+	 * timeout issues when LFS is enabled.
+	 */
+	msleep(100);
+
+	/* disable lplu d0 during driver init */
+	ret_val = e1000_set_d0_lplu_state(hw, false);
+	if (ret_val) {
+		e_dbg("Error Disabling LPLU D0\n");
+		return ret_val;
+	}
+	/* Configure mdi-mdix settings */
+	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+	if (ret_val)
+		return ret_val;
+
+	data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+	switch (phy->mdix) {
+	case 1:
+		data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 2:
+		data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 0:
+	default:
+		data |= IGP01E1000_PSCR_AUTO_MDIX;
+		break;
+	}
+	ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, data);
+	if (ret_val)
+		return ret_val;
+
+	/* set auto-master slave resolution settings */
+	if (hw->mac.autoneg) {
+		/*
+		 * when autonegotiation advertisement is only 1000Mbps then we
+		 * should disable SmartSpeed and enable Auto MasterSlave
+		 * resolution as hardware default.
+		 */
+		if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+			/* Disable SmartSpeed */
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+
+			/* Set auto Master/Slave resolution process */
+			ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~CR_1000T_MS_ENABLE;
+			ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+			if (ret_val)
+				return ret_val;
+		}
+
+		ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &data);
+		if (ret_val)
+			return ret_val;
+
+		/* load defaults for future use */
+		phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+			((data & CR_1000T_MS_VALUE) ?
+			e1000_ms_force_master :
+			e1000_ms_force_slave) :
+			e1000_ms_auto;
+
+		switch (phy->ms_type) {
+		case e1000_ms_force_master:
+			data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_force_slave:
+			data |= CR_1000T_MS_ENABLE;
+			data &= ~(CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_auto:
+			data &= ~CR_1000T_MS_ENABLE;
+		default:
+			break;
+		}
+		ret_val = e1e_wphy(hw, PHY_1000T_CTRL, data);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MII auto-neg advertisement register and/or the 1000T control
+ *  register and if the PHY is already setup for auto-negotiation, then
+ *  return successful.  Otherwise, setup advertisement and flow control to
+ *  the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 mii_autoneg_adv_reg;
+	u16 mii_1000t_ctrl_reg = 0;
+
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	ret_val = e1e_rphy(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+	if (ret_val)
+		return ret_val;
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		/* Read the MII 1000Base-T Control Register (Address 9). */
+		ret_val = e1e_rphy(hw, PHY_1000T_CTRL, &mii_1000t_ctrl_reg);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/*
+	 * Need to parse both autoneg_advertised and fc and set up
+	 * the appropriate PHY registers.  First we will parse for
+	 * autoneg_advertised software override.  Since we can advertise
+	 * a plethora of combinations, we need to check each bit
+	 * individually.
+	 */
+
+	/*
+	 * First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9).
+	 */
+	mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+				 NWAY_AR_100TX_HD_CAPS |
+				 NWAY_AR_10T_FD_CAPS   |
+				 NWAY_AR_10T_HD_CAPS);
+	mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+	e_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+	/* Do we want to advertise 10 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+		e_dbg("Advertise 10mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+	}
+
+	/* Do we want to advertise 10 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+		e_dbg("Advertise 10mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+		e_dbg("Advertise 100mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+		e_dbg("Advertise 100mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+	}
+
+	/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+	if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+		e_dbg("Advertise 1000mb Half duplex request denied!\n");
+
+	/* Do we want to advertise 1000 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+		e_dbg("Advertise 1000mb Full duplex\n");
+		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+	}
+
+	/*
+	 * Check for a software override of the flow control settings, and
+	 * setup the PHY advertisement registers accordingly.  If
+	 * auto-negotiation is enabled, then software will have to set the
+	 * "PAUSE" bits to the correct value in the Auto-Negotiation
+	 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+	 * negotiation.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames
+	 *	  but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *	  but we do not support receiving pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+	 *  other:  No software override.  The flow control configuration
+	 *	  in the EEPROM is used.
+	 */
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		/*
+		 * Flow control (Rx & Tx) is completely disabled by a
+		 * software over-ride.
+		 */
+		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled, and Tx Flow control is
+		 * disabled, by a software over-ride.
+		 *
+		 * Since there really isn't a way to advertise that we are
+		 * capable of Rx Pause ONLY, we will advertise that we
+		 * support both symmetric and asymmetric Rx PAUSE.  Later
+		 * (in e1000e_config_fc_after_link_up) we will disable the
+		 * hw's ability to send PAUSE frames.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled, by a software over-ride.
+		 */
+		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+		break;
+	case e1000_fc_full:
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
+		 * over-ride.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	default:
+		e_dbg("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		return ret_val;
+	}
+
+	ret_val = e1e_wphy(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+	if (ret_val)
+		return ret_val;
+
+	e_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL)
+		ret_val = e1e_wphy(hw, PHY_1000T_CTRL, mii_1000t_ctrl_reg);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs initial bounds checking on autoneg advertisement parameter, then
+ *  configure to advertise the full capability.  Setup the PHY to autoneg
+ *  and restart the negotiation process between the link partner.  If
+ *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	/*
+	 * Perform some bounds checking on the autoneg advertisement
+	 * parameter.
+	 */
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/*
+	 * If autoneg_advertised is zero, we assume it was not defaulted
+	 * by the calling code so we set to advertise full capability.
+	 */
+	if (phy->autoneg_advertised == 0)
+		phy->autoneg_advertised = phy->autoneg_mask;
+
+	e_dbg("Reconfiguring auto-neg advertisement params\n");
+	ret_val = e1000_phy_setup_autoneg(hw);
+	if (ret_val) {
+		e_dbg("Error Setting up Auto-Negotiation\n");
+		return ret_val;
+	}
+	e_dbg("Restarting Auto-Neg\n");
+
+	/*
+	 * Restart auto-negotiation by setting the Auto Neg Enable bit and
+	 * the Auto Neg Restart bit in the PHY control register.
+	 */
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		return ret_val;
+
+	phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Does the user want to wait for Auto-Neg to complete here, or
+	 * check at a later time (for example, callback routine).
+	 */
+	if (phy->autoneg_wait_to_complete) {
+		ret_val = e1000_wait_autoneg(hw);
+		if (ret_val) {
+			e_dbg("Error while waiting for "
+				 "autoneg to complete\n");
+			return ret_val;
+		}
+	}
+
+	hw->mac.get_link_status = 1;
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_setup_copper_link - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000e_setup_copper_link(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	bool link;
+
+	if (hw->mac.autoneg) {
+		/*
+		 * Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
+		ret_val = e1000_copper_link_autoneg(hw);
+		if (ret_val)
+			return ret_val;
+	} else {
+		/*
+		 * PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
+		e_dbg("Forcing Speed and Duplex\n");
+		ret_val = e1000_phy_force_speed_duplex(hw);
+		if (ret_val) {
+			e_dbg("Error Forcing Speed and Duplex\n");
+			return ret_val;
+		}
+	}
+
+	/*
+	 * Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = e1000e_phy_has_link_generic(hw,
+					     COPPER_LINK_UP_LIMIT,
+					     10,
+					     &link);
+	if (ret_val)
+		return ret_val;
+
+	if (link) {
+		e_dbg("Valid link established!!!\n");
+		e1000e_config_collision_dist(hw);
+		ret_val = e1000e_config_fc_after_link_up(hw);
+	} else {
+		e_dbg("Unable to establish link!!!\n");
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000e_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+	phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+	ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e_dbg("IGP PSCR: %X\n", phy_data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw,
+						     PHY_FORCE_LIMIT,
+						     100000,
+						     &link);
+		if (ret_val)
+			return ret_val;
+
+		if (!link)
+			e_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw,
+						     PHY_FORCE_LIMIT,
+						     100000,
+						     &link);
+		if (ret_val)
+			return ret_val;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Resets the PHY to commit the
+ *  changes.  If time expires while waiting for link up, we reset the DSP.
+ *  After reset, TX_CLK and CRS on Tx must be set.  Return successful upon
+ *  successful completion, else return corresponding error code.
+ **/
+s32 e1000e_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e_dbg("M88E1000 PSCR: %X\n", phy_data);
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Reset the phy to commit changes. */
+	ret_val = e1000e_commit_phy(hw);
+	if (ret_val)
+		return ret_val;
+
+	if (phy->autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+						     100000, &link);
+		if (ret_val)
+			return ret_val;
+
+		if (!link) {
+			if (hw->phy.type != e1000_phy_m88) {
+				e_dbg("Link taking longer than expected.\n");
+			} else {
+				/*
+				 * We didn't get link.
+				 * Reset the DSP and cross our fingers.
+				 */
+				ret_val = e1e_wphy(hw, M88E1000_PHY_PAGE_SELECT,
+						   0x001d);
+				if (ret_val)
+					return ret_val;
+				ret_val = e1000e_phy_reset_dsp(hw);
+				if (ret_val)
+					return ret_val;
+			}
+		}
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+						     100000, &link);
+		if (ret_val)
+			return ret_val;
+	}
+
+	if (hw->phy.type != e1000_phy_m88)
+		return 0;
+
+	ret_val = e1e_rphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * Resetting the phy means we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock from
+	 * the reset value of 2.5MHz.
+	 */
+	phy_data |= M88E1000_EPSCR_TX_CLK_25;
+	ret_val = e1e_wphy(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	ret_val = e1e_wphy(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_ife - Force PHY speed & duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Forces the speed and duplex settings of the PHY.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+s32 e1000_phy_force_speed_duplex_ife(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &data);
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	/* Disable MDI-X support for 10/100 */
+	ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IFE_PMC_AUTO_MDIX;
+	data &= ~IFE_PMC_FORCE_MDIX;
+
+	ret_val = e1e_wphy(hw, IFE_PHY_MDIX_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	e_dbg("IFE PMC: %X\n", data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link on IFE phy.\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			e_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ *  Forces speed and duplex on the PHY by doing the following: disable flow
+ *  control, force speed/duplex on the MAC, disable auto speed detection,
+ *  disable auto-negotiation, configure duplex, configure speed, configure
+ *  the collision distance, write configuration to CTRL register.  The
+ *  caller must write to the PHY_CONTROL register for these settings to
+ *  take affect.
+ **/
+void e1000e_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl;
+
+	/* Turn off flow control when forcing speed/duplex */
+	hw->fc.current_mode = e1000_fc_none;
+
+	/* Force speed/duplex on the mac */
+	ctrl = er32(CTRL);
+	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ctrl &= ~E1000_CTRL_SPD_SEL;
+
+	/* Disable Auto Speed Detection */
+	ctrl &= ~E1000_CTRL_ASDE;
+
+	/* Disable autoneg on the phy */
+	*phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+	/* Forcing Full or Half Duplex? */
+	if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+		ctrl &= ~E1000_CTRL_FD;
+		*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+		e_dbg("Half Duplex\n");
+	} else {
+		ctrl |= E1000_CTRL_FD;
+		*phy_ctrl |= MII_CR_FULL_DUPLEX;
+		e_dbg("Full Duplex\n");
+	}
+
+	/* Forcing 10mb or 100mb? */
+	if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+		ctrl |= E1000_CTRL_SPD_100;
+		*phy_ctrl |= MII_CR_SPEED_100;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+		e_dbg("Forcing 100mb\n");
+	} else {
+		ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+		*phy_ctrl |= MII_CR_SPEED_10;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+		e_dbg("Forcing 10mb\n");
+	}
+
+	e1000e_config_collision_dist(hw);
+
+	ew32(CTRL, ctrl);
+}
+
+/**
+ *  e1000e_set_d3_lplu_state - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 e1000e_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		return ret_val;
+
+	if (!active) {
+		data &= ~IGP02E1000_PM_D3_LPLU;
+		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+		if (ret_val)
+			return ret_val;
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   &data);
+			if (ret_val)
+				return ret_val;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG,
+					   data);
+			if (ret_val)
+				return ret_val;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= IGP02E1000_PM_D3_LPLU;
+		ret_val = e1e_wphy(hw, IGP02E1000_PHY_POWER_MGMT, data);
+		if (ret_val)
+			return ret_val;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_CONFIG, &data);
+		if (ret_val)
+			return ret_val;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1e_wphy(hw, IGP01E1000_PHY_PORT_CONFIG, data);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_check_downshift - Checks whether a downshift in speed occurred
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000e_check_downshift(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	switch (phy->type) {
+	case e1000_phy_m88:
+	case e1000_phy_gg82563:
+	case e1000_phy_bm:
+	case e1000_phy_82578:
+		offset	= M88E1000_PHY_SPEC_STATUS;
+		mask	= M88E1000_PSSR_DOWNSHIFT;
+		break;
+	case e1000_phy_igp_2:
+	case e1000_phy_igp_3:
+		offset	= IGP01E1000_PHY_LINK_HEALTH;
+		mask	= IGP01E1000_PLHR_SS_DOWNGRADE;
+		break;
+	default:
+		/* speed downshift not supported */
+		phy->speed_downgraded = false;
+		return 0;
+	}
+
+	ret_val = e1e_rphy(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->speed_downgraded = (phy_data & mask);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_m88 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_igp - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY port status register, and the
+ *  current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data, offset, mask;
+
+	/*
+	 * Polarity is determined based on the speed of
+	 * our connection.
+	 */
+	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		offset	= IGP01E1000_PHY_PCS_INIT_REG;
+		mask	= IGP01E1000_PHY_POLARITY_MASK;
+	} else {
+		/*
+		 * This really only applies to 10Mbps since
+		 * there is no polarity for 100Mbps (always 0).
+		 */
+		offset	= IGP01E1000_PHY_PORT_STATUS;
+		mask	= IGP01E1000_PSSR_POLARITY_REVERSED;
+	}
+
+	ret_val = e1e_rphy(hw, offset, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & mask)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_ife - Check cable polarity for IFE PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Polarity is determined on the polarity reversal feature being enabled.
+ **/
+s32 e1000_check_polarity_ife(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	/*
+	 * Polarity is determined based on the reversal feature being enabled.
+	 */
+	if (phy->polarity_correction) {
+		offset = IFE_PHY_EXTENDED_STATUS_CONTROL;
+		mask = IFE_PESC_POLARITY_REVERSED;
+	} else {
+		offset = IFE_PHY_SPECIAL_CONTROL;
+		mask = IFE_PSC_FORCE_POLARITY;
+	}
+
+	ret_val = e1e_rphy(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->cable_polarity = (phy_data & mask)
+		                       ? e1000_rev_polarity_reversed
+		                       : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_wait_autoneg - Wait for auto-neg completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for auto-negotiation to complete or for the auto-negotiation time
+ *  limit to expire, which ever happens first.
+ **/
+static s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 i, phy_status;
+
+	/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+	for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_AUTONEG_COMPLETE)
+			break;
+		msleep(100);
+	}
+
+	/*
+	 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+	 * has completed.
+	 */
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_has_link_generic - Polls PHY for link
+ *  @hw: pointer to the HW structure
+ *  @iterations: number of times to poll for link
+ *  @usec_interval: delay between polling attempts
+ *  @success: pointer to whether polling was successful or not
+ *
+ *  Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000e_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+			       u32 usec_interval, bool *success)
+{
+	s32 ret_val = 0;
+	u16 i, phy_status;
+
+	for (i = 0; i < iterations; i++) {
+		/*
+		 * Some PHYs require the PHY_STATUS register to be read
+		 * twice due to the link bit being sticky.  No harm doing
+		 * it across the board.
+		 */
+		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+		if (ret_val) {
+			/*
+			 * If the first read fails, another entity may have
+			 * ownership of the resources, wait and try again to
+			 * see if they have relinquished the resources yet.
+			 */
+			if (usec_interval >= 1000)
+				mdelay(usec_interval/1000);
+			else
+				udelay(usec_interval);
+		}
+		ret_val = e1e_rphy(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_LINK_STATUS)
+			break;
+		if (usec_interval >= 1000)
+			mdelay(usec_interval/1000);
+		else
+			udelay(usec_interval);
+	}
+
+	*success = (i < iterations);
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_get_cable_length_m88 - Determine cable length for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY specific status register to retrieve the cable length
+ *  information.  The cable length is determined by averaging the minimum and
+ *  maximum values to get the "average" cable length.  The m88 PHY has four
+ *  possible cable length values, which are:
+ *	Register Value		Cable Length
+ *	0			< 50 meters
+ *	1			50 - 80 meters
+ *	2			80 - 110 meters
+ *	3			110 - 140 meters
+ *	4			> 140 meters
+ **/
+s32 e1000e_get_cable_length_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, index;
+
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+	        M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+	phy->min_cable_length = e1000_m88_cable_length_table[index];
+	phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which represent the
+ *  combination of coarse and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.
+ **/
+s32 e1000e_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, i, agc_value = 0;
+	u16 cur_agc_index, max_agc_index = 0;
+	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+	       IGP02E1000_PHY_AGC_A,
+	       IGP02E1000_PHY_AGC_B,
+	       IGP02E1000_PHY_AGC_C,
+	       IGP02E1000_PHY_AGC_D
+	};
+
+	/* Read the AGC registers for all channels */
+	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+		ret_val = e1e_rphy(hw, agc_reg_array[i], &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		/*
+		 * Getting bits 15:9, which represent the combination of
+		 * coarse and fine gain values.  The result is a number
+		 * that can be put into the lookup table to obtain the
+		 * approximate cable length.
+		 */
+		cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+				IGP02E1000_AGC_LENGTH_MASK;
+
+		/* Array index bound check. */
+		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		    (cur_agc_index == 0))
+			return -E1000_ERR_PHY;
+
+		/* Remove min & max AGC values from calculation. */
+		if (e1000_igp_2_cable_length_table[min_agc_index] >
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			min_agc_index = cur_agc_index;
+		if (e1000_igp_2_cable_length_table[max_agc_index] <
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			max_agc_index = cur_agc_index;
+
+		agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+	}
+
+	agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+		      e1000_igp_2_cable_length_table[max_agc_index]);
+	agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+	/* Calculate cable length with the error range of +/- 10 meters. */
+	phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+				 (agc_value - IGP02E1000_AGC_RANGE) : 0;
+	phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_get_phy_info_m88 - Retrieve PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Valid for only copper links.  Read the PHY status register (sticky read)
+ *  to verify that link is up.  Read the PHY special control register to
+ *  determine the polarity and 10base-T extended distance.  Read the PHY
+ *  special status register to determine MDI/MDIx and current speed.  If
+ *  speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000e_get_phy_info_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u16 phy_data;
+	bool link;
+
+	if (phy->media_type != e1000_media_type_copper) {
+		e_dbg("Phy info is only valid for copper media\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		return ret_val;
+
+	if (!link) {
+		e_dbg("Phy info is only valid if link is up\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy->polarity_correction = (phy_data &
+				    M88E1000_PSCR_POLARITY_REVERSAL);
+
+	ret_val = e1000_check_polarity_m88(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1e_rphy(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX);
+
+	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		/* Set values to "undefined" */
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_get_phy_info_igp - Retrieve igp PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000e_get_phy_info_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		return ret_val;
+
+	if (!link) {
+		e_dbg("Phy info is only valid if link is up\n");
+		return -E1000_ERR_CONFIG;
+	}
+
+	phy->polarity_correction = true;
+
+	ret_val = e1000_check_polarity_igp(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = e1e_rphy(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX);
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			return ret_val;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_ife - Retrieves various IFE PHY states
+ *  @hw: pointer to the HW structure
+ *
+ *  Populates "phy" structure with various feature states.
+ **/
+s32 e1000_get_phy_info_ife(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		e_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1e_rphy(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+	if (ret_val)
+		goto out;
+	phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
+	                           ? false : true;
+
+	if (phy->polarity_correction) {
+		ret_val = e1000_check_polarity_ife(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/* Polarity is forced */
+		phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+	}
+
+	ret_val = e1e_rphy(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? true : false;
+
+	/* The following parameters are undefined for 10/100 operation. */
+	phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+	phy->local_rx = e1000_1000t_rx_status_undefined;
+	phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_sw_reset - PHY software reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a software reset of the PHY by reading the PHY control register and
+ *  setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000e_phy_sw_reset(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		return ret_val;
+
+	phy_ctrl |= MII_CR_RESET;
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		return ret_val;
+
+	udelay(1);
+
+	return ret_val;
+}
+
+/**
+ *  e1000e_phy_hw_reset_generic - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and release the semaphore (if necessary).
+ **/
+s32 e1000e_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u32 ctrl;
+
+	ret_val = e1000_check_reset_block(hw);
+	if (ret_val)
+		return 0;
+
+	ret_val = phy->ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ctrl = er32(CTRL);
+	ew32(CTRL, ctrl | E1000_CTRL_PHY_RST);
+	e1e_flush();
+
+	udelay(phy->reset_delay_us);
+
+	ew32(CTRL, ctrl);
+	e1e_flush();
+
+	udelay(150);
+
+	phy->ops.release(hw);
+
+	return e1000_get_phy_cfg_done(hw);
+}
+
+/**
+ *  e1000e_get_cfg_done - Generic configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Generic function to wait 10 milli-seconds for configuration to complete
+ *  and return success.
+ **/
+s32 e1000e_get_cfg_done(struct e1000_hw *hw)
+{
+	mdelay(10);
+	return 0;
+}
+
+/**
+ *  e1000e_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000e_phy_init_script_igp3(struct e1000_hw *hw)
+{
+	e_dbg("Running IGP 3 PHY init script\n");
+
+	/* PHY init IGP 3 */
+	/* Enable rise/fall, 10-mode work in class-A */
+	e1e_wphy(hw, 0x2F5B, 0x9018);
+	/* Remove all caps from Replica path filter */
+	e1e_wphy(hw, 0x2F52, 0x0000);
+	/* Bias trimming for ADC, AFE and Driver (Default) */
+	e1e_wphy(hw, 0x2FB1, 0x8B24);
+	/* Increase Hybrid poly bias */
+	e1e_wphy(hw, 0x2FB2, 0xF8F0);
+	/* Add 4% to Tx amplitude in Gig mode */
+	e1e_wphy(hw, 0x2010, 0x10B0);
+	/* Disable trimming (TTT) */
+	e1e_wphy(hw, 0x2011, 0x0000);
+	/* Poly DC correction to 94.6% + 2% for all channels */
+	e1e_wphy(hw, 0x20DD, 0x249A);
+	/* ABS DC correction to 95.9% */
+	e1e_wphy(hw, 0x20DE, 0x00D3);
+	/* BG temp curve trim */
+	e1e_wphy(hw, 0x28B4, 0x04CE);
+	/* Increasing ADC OPAMP stage 1 currents to max */
+	e1e_wphy(hw, 0x2F70, 0x29E4);
+	/* Force 1000 ( required for enabling PHY regs configuration) */
+	e1e_wphy(hw, 0x0000, 0x0140);
+	/* Set upd_freq to 6 */
+	e1e_wphy(hw, 0x1F30, 0x1606);
+	/* Disable NPDFE */
+	e1e_wphy(hw, 0x1F31, 0xB814);
+	/* Disable adaptive fixed FFE (Default) */
+	e1e_wphy(hw, 0x1F35, 0x002A);
+	/* Enable FFE hysteresis */
+	e1e_wphy(hw, 0x1F3E, 0x0067);
+	/* Fixed FFE for short cable lengths */
+	e1e_wphy(hw, 0x1F54, 0x0065);
+	/* Fixed FFE for medium cable lengths */
+	e1e_wphy(hw, 0x1F55, 0x002A);
+	/* Fixed FFE for long cable lengths */
+	e1e_wphy(hw, 0x1F56, 0x002A);
+	/* Enable Adaptive Clip Threshold */
+	e1e_wphy(hw, 0x1F72, 0x3FB0);
+	/* AHT reset limit to 1 */
+	e1e_wphy(hw, 0x1F76, 0xC0FF);
+	/* Set AHT master delay to 127 msec */
+	e1e_wphy(hw, 0x1F77, 0x1DEC);
+	/* Set scan bits for AHT */
+	e1e_wphy(hw, 0x1F78, 0xF9EF);
+	/* Set AHT Preset bits */
+	e1e_wphy(hw, 0x1F79, 0x0210);
+	/* Change integ_factor of channel A to 3 */
+	e1e_wphy(hw, 0x1895, 0x0003);
+	/* Change prop_factor of channels BCD to 8 */
+	e1e_wphy(hw, 0x1796, 0x0008);
+	/* Change cg_icount + enable integbp for channels BCD */
+	e1e_wphy(hw, 0x1798, 0xD008);
+	/*
+	 * Change cg_icount + enable integbp + change prop_factor_master
+	 * to 8 for channel A
+	 */
+	e1e_wphy(hw, 0x1898, 0xD918);
+	/* Disable AHT in Slave mode on channel A */
+	e1e_wphy(hw, 0x187A, 0x0800);
+	/*
+	 * Enable LPLU and disable AN to 1000 in non-D0a states,
+	 * Enable SPD+B2B
+	 */
+	e1e_wphy(hw, 0x0019, 0x008D);
+	/* Enable restart AN on an1000_dis change */
+	e1e_wphy(hw, 0x001B, 0x2080);
+	/* Enable wh_fifo read clock in 10/100 modes */
+	e1e_wphy(hw, 0x0014, 0x0045);
+	/* Restart AN, Speed selection is 1000 */
+	e1e_wphy(hw, 0x0000, 0x1340);
+
+	return 0;
+}
+
+/* Internal function pointers */
+
+/**
+ *  e1000_get_phy_cfg_done - Generic PHY configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family did not implement a family specific
+ *  get_cfg_done function.
+ **/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.get_cfg_done)
+		return hw->phy.ops.get_cfg_done(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  When the silicon family has not implemented a forced speed/duplex
+ *  function for the PHY, simply return 0.
+ **/
+static s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.force_speed_duplex)
+		return hw->phy.ops.force_speed_duplex(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000e_get_phy_type_from_id - Get PHY type from id
+ *  @phy_id: phy_id read from the phy
+ *
+ *  Returns the phy type from the id.
+ **/
+enum e1000_phy_type e1000e_get_phy_type_from_id(u32 phy_id)
+{
+	enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+	switch (phy_id) {
+	case M88E1000_I_PHY_ID:
+	case M88E1000_E_PHY_ID:
+	case M88E1111_I_PHY_ID:
+	case M88E1011_I_PHY_ID:
+		phy_type = e1000_phy_m88;
+		break;
+	case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+		phy_type = e1000_phy_igp_2;
+		break;
+	case GG82563_E_PHY_ID:
+		phy_type = e1000_phy_gg82563;
+		break;
+	case IGP03E1000_E_PHY_ID:
+		phy_type = e1000_phy_igp_3;
+		break;
+	case IFE_E_PHY_ID:
+	case IFE_PLUS_E_PHY_ID:
+	case IFE_C_E_PHY_ID:
+		phy_type = e1000_phy_ife;
+		break;
+	case BME1000_E_PHY_ID:
+	case BME1000_E_PHY_ID_R2:
+		phy_type = e1000_phy_bm;
+		break;
+	case I82578_E_PHY_ID:
+		phy_type = e1000_phy_82578;
+		break;
+	case I82577_E_PHY_ID:
+		phy_type = e1000_phy_82577;
+		break;
+	case I82579_E_PHY_ID:
+		phy_type = e1000_phy_82579;
+		break;
+	case I217_E_PHY_ID:
+		phy_type = e1000_phy_i217;
+		break;
+	default:
+		phy_type = e1000_phy_unknown;
+		break;
+	}
+	return phy_type;
+}
+
+/**
+ *  e1000e_determine_phy_address - Determines PHY address.
+ *  @hw: pointer to the HW structure
+ *
+ *  This uses a trial and error method to loop through possible PHY
+ *  addresses. It tests each by reading the PHY ID registers and
+ *  checking for a match.
+ **/
+s32 e1000e_determine_phy_address(struct e1000_hw *hw)
+{
+	s32 ret_val = -E1000_ERR_PHY_TYPE;
+	u32 phy_addr = 0;
+	u32 i;
+	enum e1000_phy_type phy_type = e1000_phy_unknown;
+
+	hw->phy.id = phy_type;
+
+	for (phy_addr = 0; phy_addr < E1000_MAX_PHY_ADDR; phy_addr++) {
+		hw->phy.addr = phy_addr;
+		i = 0;
+
+		do {
+			e1000e_get_phy_id(hw);
+			phy_type = e1000e_get_phy_type_from_id(hw->phy.id);
+
+			/*
+			 * If phy_type is valid, break - we found our
+			 * PHY address
+			 */
+			if (phy_type  != e1000_phy_unknown) {
+				ret_val = 0;
+				goto out;
+			}
+			usleep_range(1000, 2000);
+			i++;
+		} while (i < 10);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  @brief Retrieve PHY page address
+ *  @param page page to access
+ *
+ *  @return PHY address for the page requested.
+ **/
+static u32 e1000_get_phy_addr_for_bm_page(u32 page, u32 reg)
+{
+	u32 phy_addr = 2;
+
+	if ((page >= 768) || (page == 0 && reg == 25) || (reg == 31))
+		phy_addr = 1;
+
+	return phy_addr;
+}
+
+/**
+ *  e1000e_write_phy_reg_bm - Write BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+	u32 page = offset >> IGP_PAGE_SHIFT;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+							 false, false);
+		goto out;
+	}
+
+	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		u32 page_shift, page_select;
+
+		/*
+		 * Page select is register 31 for phy address 1 and 22 for
+		 * phy address 2 and 3. Page select is shifted only for
+		 * phy address 1.
+		 */
+		if (hw->phy.addr == 1) {
+			page_shift = IGP_PAGE_SHIFT;
+			page_select = IGP01E1000_PHY_PAGE_SELECT;
+		} else {
+			page_shift = 0;
+			page_select = BM_PHY_PAGE_SELECT;
+		}
+
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+		                                    (page << page_shift));
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+	                                    data);
+
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000e_read_phy_reg_bm - Read BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u32 page = offset >> IGP_PAGE_SHIFT;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+							 true, false);
+		goto out;
+	}
+
+	hw->phy.addr = e1000_get_phy_addr_for_bm_page(page, offset);
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		u32 page_shift, page_select;
+
+		/*
+		 * Page select is register 31 for phy address 1 and 22 for
+		 * phy address 2 and 3. Page select is shifted only for
+		 * phy address 1.
+		 */
+		if (hw->phy.addr == 1) {
+			page_shift = IGP_PAGE_SHIFT;
+			page_select = IGP01E1000_PHY_PAGE_SELECT;
+		} else {
+			page_shift = 0;
+			page_select = BM_PHY_PAGE_SELECT;
+		}
+
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, page_select,
+		                                    (page << page_shift));
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+	                                   data);
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000e_read_phy_reg_bm2 - Read BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000e_read_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+							 true, false);
+		goto out;
+	}
+
+	hw->phy.addr = 1;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+						    page);
+
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					   data);
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000e_write_phy_reg_bm2 - Write BM PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000e_write_phy_reg_bm2(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+	u16 page = (u16)(offset >> IGP_PAGE_SHIFT);
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+							 false, false);
+		goto out;
+	}
+
+	hw->phy.addr = 1;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		/* Page is shifted left, PHY expects (page x 32) */
+		ret_val = e1000e_write_phy_reg_mdic(hw, BM_PHY_PAGE_SELECT,
+						    page);
+
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					    data);
+
+out:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  e1000_enable_phy_wakeup_reg_access_bm - enable access to BM wakeup registers
+ *  @hw: pointer to the HW structure
+ *  @phy_reg: pointer to store original contents of BM_WUC_ENABLE_REG
+ *
+ *  Assumes semaphore already acquired and phy_reg points to a valid memory
+ *  address to store contents of the BM_WUC_ENABLE_REG register.
+ **/
+s32 e1000_enable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+	s32 ret_val;
+	u16 temp;
+
+	/* All page select, port ctrl and wakeup registers use phy address 1 */
+	hw->phy.addr = 1;
+
+	/* Select Port Control Registers page */
+	ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+	if (ret_val) {
+		e_dbg("Could not set Port Control page\n");
+		goto out;
+	}
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, phy_reg);
+	if (ret_val) {
+		e_dbg("Could not read PHY register %d.%d\n",
+		      BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+		goto out;
+	}
+
+	/*
+	 * Enable both PHY wakeup mode and Wakeup register page writes.
+	 * Prevent a power state change by disabling ME and Host PHY wakeup.
+	 */
+	temp = *phy_reg;
+	temp |= BM_WUC_ENABLE_BIT;
+	temp &= ~(BM_WUC_ME_WU_BIT | BM_WUC_HOST_WU_BIT);
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, temp);
+	if (ret_val) {
+		e_dbg("Could not write PHY register %d.%d\n",
+		      BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+		goto out;
+	}
+
+	/* Select Host Wakeup Registers page */
+	ret_val = e1000_set_page_igp(hw, (BM_WUC_PAGE << IGP_PAGE_SHIFT));
+
+	/* caller now able to write registers on the Wakeup registers page */
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_disable_phy_wakeup_reg_access_bm - disable access to BM wakeup regs
+ *  @hw: pointer to the HW structure
+ *  @phy_reg: pointer to original contents of BM_WUC_ENABLE_REG
+ *
+ *  Restore BM_WUC_ENABLE_REG to its original value.
+ *
+ *  Assumes semaphore already acquired and *phy_reg is the contents of the
+ *  BM_WUC_ENABLE_REG before register(s) on BM_WUC_PAGE were accessed by
+ *  caller.
+ **/
+s32 e1000_disable_phy_wakeup_reg_access_bm(struct e1000_hw *hw, u16 *phy_reg)
+{
+	s32 ret_val = 0;
+
+	/* Select Port Control Registers page */
+	ret_val = e1000_set_page_igp(hw, (BM_PORT_CTRL_PAGE << IGP_PAGE_SHIFT));
+	if (ret_val) {
+		e_dbg("Could not set Port Control page\n");
+		goto out;
+	}
+
+	/* Restore 769.17 to its original value */
+	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ENABLE_REG, *phy_reg);
+	if (ret_val)
+		e_dbg("Could not restore PHY register %d.%d\n",
+		      BM_PORT_CTRL_PAGE, BM_WUC_ENABLE_REG);
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_access_phy_wakeup_reg_bm - Read/write BM PHY wakeup register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read or written
+ *  @data: pointer to the data to read or write
+ *  @read: determines if operation is read or write
+ *  @page_set: BM_WUC_PAGE already set and access enabled
+ *
+ *  Read the PHY register at offset and store the retrieved information in
+ *  data, or write data to PHY register at offset.  Note the procedure to
+ *  access the PHY wakeup registers is different than reading the other PHY
+ *  registers. It works as such:
+ *  1) Set 769.17.2 (page 769, register 17, bit 2) = 1
+ *  2) Set page to 800 for host (801 if we were manageability)
+ *  3) Write the address using the address opcode (0x11)
+ *  4) Read or write the data using the data opcode (0x12)
+ *  5) Restore 769.17.2 to its original value
+ *
+ *  Steps 1 and 2 are done by e1000_enable_phy_wakeup_reg_access_bm() and
+ *  step 5 is done by e1000_disable_phy_wakeup_reg_access_bm().
+ *
+ *  Assumes semaphore is already acquired.  When page_set==true, assumes
+ *  the PHY page is set to BM_WUC_PAGE (i.e. a function in the call stack
+ *  is responsible for calls to e1000_[enable|disable]_phy_wakeup_reg_bm()).
+ **/
+static s32 e1000_access_phy_wakeup_reg_bm(struct e1000_hw *hw, u32 offset,
+					  u16 *data, bool read, bool page_set)
+{
+	s32 ret_val;
+	u16 reg = BM_PHY_REG_NUM(offset);
+	u16 page = BM_PHY_REG_PAGE(offset);
+	u16 phy_reg = 0;
+
+	/* Gig must be disabled for MDIO accesses to Host Wakeup reg page */
+	if ((hw->mac.type == e1000_pchlan) &&
+	    (!(er32(PHY_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)))
+		e_dbg("Attempting to access page %d while gig enabled.\n",
+		      page);
+
+	if (!page_set) {
+		/* Enable access to PHY wakeup registers */
+		ret_val = e1000_enable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+		if (ret_val) {
+			e_dbg("Could not enable PHY wakeup reg access\n");
+			goto out;
+		}
+	}
+
+	e_dbg("Accessing PHY page %d reg 0x%x\n", page, reg);
+
+	/* Write the Wakeup register page offset value using opcode 0x11 */
+	ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_ADDRESS_OPCODE, reg);
+	if (ret_val) {
+		e_dbg("Could not write address opcode to page %d\n", page);
+		goto out;
+	}
+
+	if (read) {
+		/* Read the Wakeup register page value using opcode 0x12 */
+		ret_val = e1000e_read_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+		                                   data);
+	} else {
+		/* Write the Wakeup register page value using opcode 0x12 */
+		ret_val = e1000e_write_phy_reg_mdic(hw, BM_WUC_DATA_OPCODE,
+						    *data);
+	}
+
+	if (ret_val) {
+		e_dbg("Could not access PHY reg %d.%d\n", page, reg);
+		goto out;
+	}
+
+	if (!page_set)
+		ret_val = e1000_disable_phy_wakeup_reg_access_bm(hw, &phy_reg);
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1e_rphy(hw, PHY_CONTROL, &mii_reg);
+	mii_reg &= ~MII_CR_POWER_DOWN;
+	e1e_wphy(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1e_rphy(hw, PHY_CONTROL, &mii_reg);
+	mii_reg |= MII_CR_POWER_DOWN;
+	e1e_wphy(hw, PHY_CONTROL, mii_reg);
+	usleep_range(1000, 2000);
+}
+
+/**
+ *  e1000e_commit_phy - Soft PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a soft PHY reset on those that apply. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000e_commit_phy(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.commit)
+		return hw->phy.ops.commit(hw);
+
+	return 0;
+}
+
+/**
+ *  e1000_set_d0_lplu_state - Sets low power link up state for D0
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D0
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D0
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+static s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+	if (hw->phy.ops.set_d0_lplu_state)
+		return hw->phy.ops.set_d0_lplu_state(hw, active);
+
+	return 0;
+}
+
+/**
+ *  __e1000_read_phy_reg_hv -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and stores the retrieved information in data.  Release any acquired
+ *  semaphore before exiting.
+ **/
+static s32 __e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data,
+				   bool locked, bool page_set)
+{
+	s32 ret_val;
+	u16 page = BM_PHY_REG_PAGE(offset);
+	u16 reg = BM_PHY_REG_NUM(offset);
+	u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+	if (!locked) {
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, data,
+							 true, page_set);
+		goto out;
+	}
+
+	if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+		ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+		                                         data, true);
+		goto out;
+	}
+
+	if (!page_set) {
+		if (page == HV_INTC_FC_PAGE_START)
+			page = 0;
+
+		if (reg > MAX_PHY_MULTI_PAGE_REG) {
+			/* Page is shifted left, PHY expects (page x 32) */
+			ret_val = e1000_set_page_igp(hw,
+						     (page << IGP_PAGE_SHIFT));
+
+			hw->phy.addr = phy_addr;
+
+			if (ret_val)
+				goto out;
+		}
+	}
+
+	e_dbg("reading PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+	      page << IGP_PAGE_SHIFT, reg);
+
+	ret_val = e1000e_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+	                                  data);
+out:
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_hv -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore then reads the PHY register at offset and stores
+ *  the retrieved information in data.  Release the acquired semaphore
+ *  before exiting.
+ **/
+s32 e1000_read_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ *  e1000_read_phy_reg_hv_locked -  Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired.
+ **/
+s32 e1000_read_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ *  e1000_read_phy_reg_page_hv - Read HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Reads the PHY register at offset and stores the retrieved information
+ *  in data.  Assumes semaphore already acquired and page already set.
+ **/
+s32 e1000_read_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return __e1000_read_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ *  __e1000_write_phy_reg_hv - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *  @locked: semaphore has already been acquired or not
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+static s32 __e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data,
+				    bool locked, bool page_set)
+{
+	s32 ret_val;
+	u16 page = BM_PHY_REG_PAGE(offset);
+	u16 reg = BM_PHY_REG_NUM(offset);
+	u32 phy_addr = hw->phy.addr = e1000_get_phy_addr_for_hv_page(page);
+
+	if (!locked) {
+		ret_val = hw->phy.ops.acquire(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Page 800 works differently than the rest so it has its own func */
+	if (page == BM_WUC_PAGE) {
+		ret_val = e1000_access_phy_wakeup_reg_bm(hw, offset, &data,
+							 false, page_set);
+		goto out;
+	}
+
+	if (page > 0 && page < HV_INTC_FC_PAGE_START) {
+		ret_val = e1000_access_phy_debug_regs_hv(hw, offset,
+		                                         &data, false);
+		goto out;
+	}
+
+	if (!page_set) {
+		if (page == HV_INTC_FC_PAGE_START)
+			page = 0;
+
+		/*
+		 * Workaround MDIO accesses being disabled after entering IEEE
+		 * Power Down (when bit 11 of the PHY Control register is set)
+		 */
+		if ((hw->phy.type == e1000_phy_82578) &&
+		    (hw->phy.revision >= 1) &&
+		    (hw->phy.addr == 2) &&
+		    ((MAX_PHY_REG_ADDRESS & reg) == 0) && (data & (1 << 11))) {
+			u16 data2 = 0x7EFF;
+			ret_val = e1000_access_phy_debug_regs_hv(hw,
+								 (1 << 6) | 0x3,
+								 &data2, false);
+			if (ret_val)
+				goto out;
+		}
+
+		if (reg > MAX_PHY_MULTI_PAGE_REG) {
+			/* Page is shifted left, PHY expects (page x 32) */
+			ret_val = e1000_set_page_igp(hw,
+						     (page << IGP_PAGE_SHIFT));
+
+			hw->phy.addr = phy_addr;
+
+			if (ret_val)
+				goto out;
+		}
+	}
+
+	e_dbg("writing PHY page %d (or 0x%x shifted) reg 0x%x\n", page,
+	      page << IGP_PAGE_SHIFT, reg);
+
+	ret_val = e1000e_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & reg,
+	                                  data);
+
+out:
+	if (!locked)
+		hw->phy.ops.release(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_hv - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore then writes the data to PHY register at the offset.
+ *  Release the acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_phy_reg_hv(hw, offset, data, false, false);
+}
+
+/**
+ *  e1000_write_phy_reg_hv_locked - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.  Assumes semaphore
+ *  already acquired.
+ **/
+s32 e1000_write_phy_reg_hv_locked(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_phy_reg_hv(hw, offset, data, true, false);
+}
+
+/**
+ *  e1000_write_phy_reg_page_hv - Write HV PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset.  Assumes semaphore
+ *  already acquired and page already set.
+ **/
+s32 e1000_write_phy_reg_page_hv(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return __e1000_write_phy_reg_hv(hw, offset, data, true, true);
+}
+
+/**
+ * @brief Get PHY address based on page
+ * @param page page to be accessed
+ * @return PHY address
+ */
+static u32 e1000_get_phy_addr_for_hv_page(u32 page)
+{
+	u32 phy_addr = 2;
+
+	if (page >= HV_INTC_FC_PAGE_START)
+		phy_addr = 1;
+
+	return phy_addr;
+}
+
+/**
+ *  e1000_access_phy_debug_regs_hv - Read HV PHY vendor specific high registers
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read or written
+ *  @data: pointer to the data to be read or written
+ *  @read: determines if operation is read or write
+ *
+ *  Reads the PHY register at offset and stores the retreived information
+ *  in data.  Assumes semaphore already acquired.  Note that the procedure
+ *  to access these regs uses the address port and data port to read/write.
+ *  These accesses done with PHY address 2 and without using pages.
+ **/
+static s32 e1000_access_phy_debug_regs_hv(struct e1000_hw *hw, u32 offset,
+                                          u16 *data, bool read)
+{
+	s32 ret_val;
+	u32 addr_reg = 0;
+	u32 data_reg = 0;
+
+	/* This takes care of the difference with desktop vs mobile phy */
+	addr_reg = (hw->phy.type == e1000_phy_82578) ?
+	           I82578_ADDR_REG : I82577_ADDR_REG;
+	data_reg = addr_reg + 1;
+
+	/* All operations in this function are phy address 2 */
+	hw->phy.addr = 2;
+
+	/* masking with 0x3F to remove the page from offset */
+	ret_val = e1000e_write_phy_reg_mdic(hw, addr_reg, (u16)offset & 0x3F);
+	if (ret_val) {
+		e_dbg("Could not write the Address Offset port register\n");
+		goto out;
+	}
+
+	/* Read or write the data value next */
+	if (read)
+		ret_val = e1000e_read_phy_reg_mdic(hw, data_reg, data);
+	else
+		ret_val = e1000e_write_phy_reg_mdic(hw, data_reg, *data);
+
+	if (ret_val) {
+		e_dbg("Could not access the Data port register\n");
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_link_stall_workaround_hv - Si workaround
+ *  @hw: pointer to the HW structure
+ *
+ *  This function works around a Si bug where the link partner can get
+ *  a link up indication before the PHY does.  If small packets are sent
+ *  by the link partner they can be placed in the packet buffer without
+ *  being properly accounted for by the PHY and will stall preventing
+ *  further packets from being received.  The workaround is to clear the
+ *  packet buffer after the PHY detects link up.
+ **/
+s32 e1000_link_stall_workaround_hv(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 data;
+
+	if (hw->phy.type != e1000_phy_82578)
+		goto out;
+
+	/* Do not apply workaround if in PHY loopback bit 14 set */
+	e1e_rphy(hw, PHY_CONTROL, &data);
+	if (data & PHY_CONTROL_LB)
+		goto out;
+
+	/* check if link is up and at 1Gbps */
+	ret_val = e1e_rphy(hw, BM_CS_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	data &= BM_CS_STATUS_LINK_UP |
+	        BM_CS_STATUS_RESOLVED |
+	        BM_CS_STATUS_SPEED_MASK;
+
+	if (data != (BM_CS_STATUS_LINK_UP |
+	             BM_CS_STATUS_RESOLVED |
+	             BM_CS_STATUS_SPEED_1000))
+		goto out;
+
+	mdelay(200);
+
+	/* flush the packets in the fifo buffer */
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC |
+			   HV_MUX_DATA_CTRL_FORCE_SPEED);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1e_wphy(hw, HV_MUX_DATA_CTRL, HV_MUX_DATA_CTRL_GEN_TO_MAC);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_82577 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_82577(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & I82577_PHY_STATUS2_REV_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_82577 - Force speed/duplex for I82577 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.
+ **/
+s32 e1000_phy_force_speed_duplex_82577(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	ret_val = e1e_rphy(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000e_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = e1e_wphy(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		e_dbg("Waiting for forced speed/duplex link on 82577 phy\n");
+
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			e_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = e1000e_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_82577 - Retrieve I82577 PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_82577(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = e1000e_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		e_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = true;
+
+	ret_val = e1000_check_polarity_82577(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1e_rphy(hw, I82577_PHY_STATUS_2, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & I82577_PHY_STATUS2_MDIX) ? true : false;
+
+	if ((data & I82577_PHY_STATUS2_SPEED_MASK) ==
+	    I82577_PHY_STATUS2_SPEED_1000MBPS) {
+		ret_val = hw->phy.ops.get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1e_rphy(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+		                ? e1000_1000t_rx_status_ok
+		                : e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+		                 ? e1000_1000t_rx_status_ok
+		                 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_82577 - Determine cable length for 82577 PHY
+ *  @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 e1000_get_cable_length_82577(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, length;
+
+	ret_val = e1e_rphy(hw, I82577_PHY_DIAG_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	length = (phy_data & I82577_DSTATUS_CABLE_LENGTH) >>
+	         I82577_DSTATUS_CABLE_LENGTH_SHIFT;
+
+	if (length == E1000_CABLE_LENGTH_UNDEFINED)
+		ret_val = -E1000_ERR_PHY;
+
+	phy->cable_length = length;
+
+out:
+	return ret_val;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c
new file mode 100644
index 0000000..1985bef
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eepro100.c
@@ -0,0 +1,1854 @@
+/* rtnet/drivers/eepro100-rt.c: An Intel i82557-559 Real-Time-Ethernet driver for Linux. */
+/*
+	RTnet porting 2002 by Jan Kiszka <Jan.Kiszka@web.de>
+	Originally written 1996-1999 by Donald Becker.
+
+	The driver also contains updates by different kernel developers
+	(see incomplete list below).
+	Current maintainer is Andrey V. Savochkin <saw@saw.sw.com.sg>.
+	Please use this email address and linux-kernel mailing list for bug reports.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	This driver is for the Intel EtherExpress Pro100 (Speedo3) design.
+	It should work with all i82557/558/559 boards.
+
+	Version history:
+	1998 Apr - 2000 Feb  Andrey V. Savochkin <saw@saw.sw.com.sg>
+		Serious fixes for multicast filter list setting, TX timeout routine;
+		RX ring refilling logic;  other stuff
+	2000 Feb  Jeff Garzik <jgarzik@mandrakesoft.com>
+		Convert to new PCI driver interface
+	2000 Mar 24  Dragan Stancevic <visitor@valinux.com>
+		Disabled FC and ER, to avoid lockups when when we get FCP interrupts.
+	2000 Jul 17 Goutham Rao <goutham.rao@intel.com>
+		PCI DMA API fixes, adding pci_dma_sync_single calls where neccesary
+
+	2002 May 16 Jan Kiszka <Jan.Kiszka@web.de>
+		Ported to RTnet (RTAI version)
+*/
+
+static const char *version =
+"eepro100-rt.c:1.36-RTnet-0.8 2002-2006 Jan Kiszka <Jan.Kiszka@web.de>\n"
+"eepro100-rt.c: based on eepro100.c 1.36 by D. Becker, A. V. Savochkin and others\n";
+
+/* A few user-configurable values that apply to all boards.
+   First set is undocumented and spelled per Intel recommendations. */
+
+static int txfifo = 8;		/* Tx FIFO threshold in 4 byte units, 0-15 */
+static int rxfifo = 8;		/* Rx FIFO threshold, default 32 bytes. */
+/* Tx/Rx DMA burst length, 0-127, 0 == no preemption, tx==128 -> disabled. */
+static int txdmacount = 128;
+static int rxdmacount /* = 0 */;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast) */
+static int multicast_filter_limit = 64;
+
+/* 'options' is used to pass a transceiver override or full-duplex flag
+   e.g. "options=16" for FD, "options=32" for 100mbps-only. */
+static int full_duplex[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int options[] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int local_debug = -1;	/* The debug level */
+
+/* A few values that may be tweaked. */
+/* The ring sizes should be a power of two for efficiency. */
+#define TX_RING_SIZE	32
+#define RX_RING_SIZE	8 /* RX_RING_SIZE*2 rtskbs will be preallocated */
+/* How much slots multicast filter setup may take.
+   Do not descrease without changing set_rx_mode() implementaion. */
+#define TX_MULTICAST_SIZE   2
+#define TX_MULTICAST_RESERV (TX_MULTICAST_SIZE*2)
+/* Actual number of TX packets queued, must be
+   <= TX_RING_SIZE-TX_MULTICAST_RESERV. */
+#define TX_QUEUE_LIMIT  (TX_RING_SIZE-TX_MULTICAST_RESERV)
+/* Hysteresis marking queue as no longer full. */
+#define TX_QUEUE_UNFULL (TX_QUEUE_LIMIT-4)
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT		(2*HZ)
+/* Size of an pre-allocated Rx buffer: <Ethernet MTU> + slack.*/
+#define PKT_BUF_SZ		VLAN_ETH_FRAME_LEN
+
+#if !defined(__OPTIMIZE__)  ||  !defined(__KERNEL__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/version.h>
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/spinlock.h>
+#include <linux/init.h>
+#include <linux/mii.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+
+// *** RTnet ***
+#include <linux/if_vlan.h>
+#include <rtnet_port.h>
+
+#define MAX_UNITS               8
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+// *** RTnet ***
+
+MODULE_AUTHOR("Maintainer: Jan Kiszka <Jan.Kiszka@web.de>");
+MODULE_DESCRIPTION("Intel i82557/i82558/i82559 PCI EtherExpressPro driver");
+MODULE_LICENSE("GPL");
+module_param_named(debug, local_debug, int, 0444);
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+module_param(txfifo, int, 0444);
+module_param(rxfifo, int, 0444);
+module_param(txdmacount, int, 0444);
+module_param(rxdmacount, int, 0444);
+module_param(max_interrupt_work, int, 0444);
+module_param(multicast_filter_limit, int, 0444);
+MODULE_PARM_DESC(debug, "eepro100 debug level (0-6)");
+MODULE_PARM_DESC(options, "eepro100: Bits 0-3: tranceiver type, bit 4: full duplex, bit 5: 100Mbps");
+MODULE_PARM_DESC(full_duplex, "eepro100 full duplex setting(s) (1)");
+MODULE_PARM_DESC(txfifo, "eepro100 Tx FIFO threshold in 4 byte units, (0-15)");
+MODULE_PARM_DESC(rxfifo, "eepro100 Rx FIFO threshold in 4 byte units, (0-15)");
+MODULE_PARM_DESC(txdmaccount, "eepro100 Tx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(rxdmaccount, "eepro100 Rx DMA burst length; 128 - disable (0-128)");
+MODULE_PARM_DESC(max_interrupt_work, "eepro100 maximum events handled per interrupt");
+MODULE_PARM_DESC(multicast_filter_limit, "eepro100 maximum number of filtered multicast addresses");
+
+#define RUN_AT(x) (jiffies + (x))
+
+// *** RTnet - no power management ***
+#undef pci_set_power_state
+#define pci_set_power_state null_set_power_state
+static inline int null_set_power_state(struct pci_dev *dev, int state)
+{
+	return 0;
+}
+// *** RTnet ***
+
+#define netdevice_start(dev)
+#define netdevice_stop(dev)
+#define netif_set_tx_timeout(dev, tf, tm) \
+								do { \
+									(dev)->tx_timeout = (tf); \
+									(dev)->watchdog_timeo = (tm); \
+								} while(0)
+
+
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+static int speedo_debug = 1;
+#else
+#define speedo_debug 0
+#endif
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This device driver is designed for the Intel i82557 "Speedo3" chip, Intel's
+single-chip fast Ethernet controller for PCI, as used on the Intel
+EtherExpress Pro 100 adapter.
+
+II. Board-specific settings
+
+PCI bus devices are configured by the system at boot time, so no jumpers
+need to be set on the board.  The system BIOS should be set to assign the
+PCI INTA signal to an otherwise unused system IRQ line.  While it's
+possible to share PCI interrupt lines, it negatively impacts performance and
+only recent kernels support it.
+
+III. Driver operation
+
+IIIA. General
+The Speedo3 is very similar to other Intel network chips, that is to say
+"apparently designed on a different planet".  This chips retains the complex
+Rx and Tx descriptors and multiple buffers pointers as previous chips, but
+also has simplified Tx and Rx buffer modes.  This driver uses the "flexible"
+Tx mode, but in a simplified lower-overhead manner: it associates only a
+single buffer descriptor with each frame descriptor.
+
+Despite the extra space overhead in each receive skbuff, the driver must use
+the simplified Rx buffer mode to assure that only a single data buffer is
+associated with each RxFD. The driver implements this by reserving space
+for the Rx descriptor at the head of each Rx skbuff.
+
+The Speedo-3 has receive and command unit base addresses that are added to
+almost all descriptor pointers.  The driver sets these to zero, so that all
+pointer fields are absolute addresses.
+
+The System Control Block (SCB) of some previous Intel chips exists on the
+chip in both PCI I/O and memory space.  This driver uses the I/O space
+registers, but might switch to memory mapped mode to better support non-x86
+processors.
+
+IIIB. Transmit structure
+
+The driver must use the complex Tx command+descriptor mode in order to
+have a indirect pointer to the skbuff data section.  Each Tx command block
+(TxCB) is associated with two immediately appended Tx Buffer Descriptor
+(TxBD).  A fixed ring of these TxCB+TxBD pairs are kept as part of the
+speedo_private data structure for each adapter instance.
+
+The newer i82558 explicitly supports this structure, and can read the two
+TxBDs in the same PCI burst as the TxCB.
+
+This ring structure is used for all normal transmit packets, but the
+transmit packet descriptors aren't long enough for most non-Tx commands such
+as CmdConfigure.  This is complicated by the possibility that the chip has
+already loaded the link address in the previous descriptor.  So for these
+commands we convert the next free descriptor on the ring to a NoOp, and point
+that descriptor's link to the complex command.
+
+An additional complexity of these non-transmit commands are that they may be
+added asynchronous to the normal transmit queue, so we disable interrupts
+whenever the Tx descriptor ring is manipulated.
+
+A notable aspect of these special configure commands is that they do
+work with the normal Tx ring entry scavenge method.  The Tx ring scavenge
+is done at interrupt time using the 'dirty_tx' index, and checking for the
+command-complete bit.  While the setup frames may have the NoOp command on the
+Tx ring marked as complete, but not have completed the setup command, this
+is not a problem.  The tx_ring entry can be still safely reused, as the
+tx_skbuff[] entry is always empty for config_cmd and mc_setup frames.
+
+Commands may have bits set e.g. CmdSuspend in the command word to either
+suspend or stop the transmit/command unit.  This driver always flags the last
+command with CmdSuspend, erases the CmdSuspend in the previous command, and
+then issues a CU_RESUME.
+Note: Watch out for the potential race condition here: imagine
+	erasing the previous suspend
+		the chip processes the previous command
+		the chip processes the final command, and suspends
+	doing the CU_RESUME
+		the chip processes the next-yet-valid post-final-command.
+So blindly sending a CU_RESUME is only safe if we do it immediately after
+after erasing the previous CmdSuspend, without the possibility of an
+intervening delay.  Thus the resume command is always within the
+interrupts-disabled region.  This is a timing dependence, but handling this
+condition in a timing-independent way would considerably complicate the code.
+
+Note: In previous generation Intel chips, restarting the command unit was a
+notoriously slow process.  This is presumably no longer true.
+
+IIIC. Receive structure
+
+Because of the bus-master support on the Speedo3 this driver uses the new
+SKBUFF_RX_COPYBREAK scheme, rather than a fixed intermediate receive buffer.
+This scheme allocates full-sized skbuffs as receive buffers.  The value
+SKBUFF_RX_COPYBREAK is used as the copying breakpoint: it is chosen to
+trade-off the memory wasted by passing the full-sized skbuff to the queue
+layer for all frames vs. the copying cost of copying a frame to a
+correctly-sized skbuff.
+
+For small frames the copying cost is negligible (esp. considering that we
+are pre-loading the cache with immediately useful header information), so we
+allocate a new, minimally-sized skbuff.  For large frames the copying cost
+is non-trivial, and the larger copy might flush the cache of useful data, so
+we pass up the skbuff the packet was received into.
+
+IV. Notes
+
+Thanks to Steve Williams of Intel for arranging the non-disclosure agreement
+that stated that I could disclose the information.  But I still resent
+having to sign an Intel NDA when I'm helping Intel sell their own product!
+
+*/
+
+static int speedo_found1(struct pci_dev *pdev, long ioaddr, int fnd_cnt, int acpi_idle_state);
+
+enum pci_flags_bit {
+	PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+	PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+static inline unsigned int io_inw(unsigned long port)
+{
+	return inw(port);
+}
+static inline void io_outw(unsigned int val, unsigned long port)
+{
+	outw(val, port);
+}
+
+#ifndef USE_IO
+/* Currently alpha headers define in/out macros.
+   Undefine them.  2000/03/30  SAW */
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb(addr) readb((void *)(addr))
+#define inw(addr) readw((void *)(addr))
+#define inl(addr) readl((void *)(addr))
+#define outb(val, addr) writeb(val, (void *)(addr))
+#define outw(val, addr) writew(val, (void *)(addr))
+#define outl(val, addr) writel(val, (void *)(addr))
+#endif
+
+/* How to wait for the command unit to accept a command.
+   Typically this takes 0 ticks. */
+static inline void wait_for_cmd_done(long cmd_ioaddr)
+{
+	int wait = 1000;
+	do  udelay(1) ;
+	while(inb(cmd_ioaddr) && --wait >= 0);
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+	if (wait < 0)
+		printk(KERN_ALERT "eepro100: wait_for_cmd_done timeout!\n");
+#endif
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_CMDSTATS
+static inline int rt_wait_for_cmd_done(long cmd_ioaddr, const char *cmd)
+{
+    int wait = CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_CMDTIMEOUT;
+    rtmd_time_t t0, t1;
+
+    t0 = rtdm_clock_read();
+    while (inb(cmd_ioaddr) != 0) {
+	if (wait-- == 0) {
+	    rtdm_printk(KERN_ALERT "eepro100: rt_wait_for_cmd_done(%s) "
+			"timeout!\n", cmd);
+	    return 1;
+	}
+	rtdm_task_busy_sleep(1000);
+    }
+    return 0;
+}
+#else
+static inline int rt_wait_for_cmd_done(long cmd_ioaddr, const char *cmd)
+{
+    int wait = CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_CMDTIMEOUT;
+
+    while (inb(cmd_ioaddr) != 0) {
+	if (wait-- == 0)
+	    return 1;
+	rtdm_task_busy_sleep(1000);
+    }
+    return 0;
+}
+#endif
+
+/* Offsets to the various registers.
+   All accesses need not be longword aligned. */
+enum speedo_offsets {
+	SCBStatus = 0, SCBCmd = 2,	/* Rx/Command Unit command and status. */
+	SCBPointer = 4,				/* General purpose pointer. */
+	SCBPort = 8,				/* Misc. commands and operands.  */
+	SCBflash = 12, SCBeeprom = 14, /* EEPROM and flash memory control. */
+	SCBCtrlMDI = 16,			/* MDI interface control. */
+	SCBEarlyRx = 20,			/* Early receive byte count. */
+};
+/* Commands that can be put in a command list entry. */
+enum commands {
+	CmdNOp = 0, CmdIASetup = 0x10000, CmdConfigure = 0x20000,
+	CmdMulticastList = 0x30000, CmdTx = 0x40000, CmdTDR = 0x50000,
+	CmdDump = 0x60000, CmdDiagnose = 0x70000,
+	CmdSuspend = 0x40000000,	/* Suspend after completion. */
+	CmdIntr = 0x20000000,		/* Interrupt after completion. */
+	CmdTxFlex = 0x00080000,		/* Use "Flexible mode" for CmdTx command. */
+};
+/* Clear CmdSuspend (1<<30) avoiding interference with the card access to the
+   status bits.  Previous driver versions used separate 16 bit fields for
+   commands and statuses.  --SAW
+ */
+#if defined(__alpha__)
+# define clear_suspend(cmd)  clear_bit(30, &(cmd)->cmd_status);
+#else
+# if defined(__LITTLE_ENDIAN)
+#  define clear_suspend(cmd)  ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x4000
+# elif defined(__BIG_ENDIAN)
+#  define clear_suspend(cmd)  ((__u16 *)&(cmd)->cmd_status)[1] &= ~0x0040
+# else
+#  error Unsupported byteorder
+# endif
+#endif
+
+enum SCBCmdBits {
+	SCBMaskCmdDone=0x8000, SCBMaskRxDone=0x4000, SCBMaskCmdIdle=0x2000,
+	SCBMaskRxSuspend=0x1000, SCBMaskEarlyRx=0x0800, SCBMaskFlowCtl=0x0400,
+	SCBTriggerIntr=0x0200, SCBMaskAll=0x0100,
+	/* The rest are Rx and Tx commands. */
+	CUStart=0x0010, CUResume=0x0020, CUStatsAddr=0x0040, CUShowStats=0x0050,
+	CUCmdBase=0x0060,	/* CU Base address (set to zero) . */
+	CUDumpStats=0x0070, /* Dump then reset stats counters. */
+	RxStart=0x0001, RxResume=0x0002, RxAbort=0x0004, RxAddrLoad=0x0006,
+	RxResumeNoResources=0x0007,
+};
+
+enum SCBPort_cmds {
+	PortReset=0, PortSelfTest=1, PortPartialReset=2, PortDump=3,
+};
+
+/* The Speedo3 Rx and Tx frame/buffer descriptors. */
+struct descriptor {                         /* A generic descriptor. */
+	s32 cmd_status;				/* All command and status fields. */
+	u32 link;                                   /* struct descriptor *  */
+	unsigned char params[0];
+};
+
+/* The Speedo3 Rx and Tx buffer descriptors. */
+struct RxFD {					/* Receive frame descriptor. */
+	s32 status;
+	u32 link;					/* struct RxFD * */
+	u32 rx_buf_addr;			/* void * */
+	u32 count;
+};
+
+/* Selected elements of the Tx/RxFD.status word. */
+enum RxFD_bits {
+	RxComplete=0x8000, RxOK=0x2000,
+	RxErrCRC=0x0800, RxErrAlign=0x0400, RxErrTooBig=0x0200, RxErrSymbol=0x0010,
+	RxEth2Type=0x0020, RxNoMatch=0x0004, RxNoIAMatch=0x0002,
+	TxUnderrun=0x1000,  StatusComplete=0x8000,
+};
+
+#define CONFIG_DATA_SIZE 22
+struct TxFD {					/* Transmit frame descriptor set. */
+	s32 status;
+	u32 link;					/* void * */
+	u32 tx_desc_addr;			/* Always points to the tx_buf_addr element. */
+	s32 count;					/* # of TBD (=1), Tx start thresh., etc. */
+	/* This constitutes two "TBD" entries -- we only use one. */
+#define TX_DESCR_BUF_OFFSET 16
+	u32 tx_buf_addr0;			/* void *, frame to be transmitted.  */
+	s32 tx_buf_size0;			/* Length of Tx frame. */
+	u32 tx_buf_addr1;			/* void *, frame to be transmitted.  */
+	s32 tx_buf_size1;			/* Length of Tx frame. */
+	/* the structure must have space for at least CONFIG_DATA_SIZE starting
+	 * from tx_desc_addr field */
+};
+
+/* Multicast filter setting block.  --SAW */
+struct speedo_mc_block {
+	struct speedo_mc_block *next;
+	unsigned int tx;
+	dma_addr_t frame_dma;
+	unsigned int len;
+	struct descriptor frame __attribute__ ((__aligned__(16)));
+};
+
+/* Elements of the dump_statistics block. This block must be lword aligned. */
+struct speedo_stats {
+	u32 tx_good_frames;
+	u32 tx_coll16_errs;
+	u32 tx_late_colls;
+	u32 tx_underruns;
+	u32 tx_lost_carrier;
+	u32 tx_deferred;
+	u32 tx_one_colls;
+	u32 tx_multi_colls;
+	u32 tx_total_colls;
+	u32 rx_good_frames;
+	u32 rx_crc_errs;
+	u32 rx_align_errs;
+	u32 rx_resource_errs;
+	u32 rx_overrun_errs;
+	u32 rx_colls_errs;
+	u32 rx_runt_errs;
+	u32 done_marker;
+};
+
+enum Rx_ring_state_bits {
+	RrNoMem=1, RrPostponed=2, RrNoResources=4, RrOOMReported=8,
+};
+
+/* Do not change the position (alignment) of the first few elements!
+   The later elements are grouped for cache locality.
+
+   Unfortunately, all the positions have been shifted since there.
+   A new re-alignment is required.  2000/03/06  SAW */
+struct speedo_private {
+	struct TxFD *tx_ring;			/* Commands (usually CmdTxPacket). */
+	struct RxFD *rx_ringp[RX_RING_SIZE];	/* Rx descriptor, used as ring. */
+
+	// *** RTnet ***
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	struct rtskb *rx_skbuff[RX_RING_SIZE];
+	// *** RTnet ***
+
+	/* Mapped addresses of the rings. */
+	dma_addr_t tx_ring_dma;
+#define TX_RING_ELEM_DMA(sp, n) ((sp)->tx_ring_dma + (n)*sizeof(struct TxFD))
+	dma_addr_t rx_ring_dma[RX_RING_SIZE];
+	struct descriptor *last_cmd;		/* Last command sent. */
+	unsigned int cur_tx, dirty_tx;		/* The ring entries to be free()ed. */
+	rtdm_lock_t lock;					/* Group with Tx control cache line. */
+	u32 tx_threshold;					/* The value for txdesc.count. */
+	struct RxFD *last_rxf;				/* Last filled RX buffer. */
+	dma_addr_t last_rxf_dma;
+	unsigned int cur_rx, dirty_rx;		/* The next free ring entry */
+	long last_rx_time;			/* Last Rx, in jiffies, to handle Rx hang. */
+	struct net_device_stats stats;
+	struct speedo_stats *lstats;
+	dma_addr_t lstats_dma;
+	int chip_id;
+	struct pci_dev *pdev;
+	struct speedo_mc_block *mc_setup_head;/* Multicast setup frame list head. */
+	struct speedo_mc_block *mc_setup_tail;/* Multicast setup frame list tail. */
+	long in_interrupt;					/* Word-aligned rtdev->interrupt */
+	unsigned char acpi_pwr;
+	signed char rx_mode;					/* Current PROMISC/ALLMULTI setting. */
+	unsigned int tx_full:1;				/* The Tx queue is full. */
+	unsigned int full_duplex:1;			/* Full-duplex operation requested. */
+	unsigned int flow_ctrl:1;			/* Use 802.3x flow control. */
+	unsigned int rx_bug:1;				/* Work around receiver hang errata. */
+	unsigned char default_port:8;		/* Last rtdev->if_port value. */
+	unsigned char rx_ring_state;		/* RX ring status flags. */
+	unsigned short phy[2];				/* PHY media interfaces available. */
+	unsigned short advertising;			/* Current PHY advertised caps. */
+	unsigned short partner;				/* Link partner caps. */
+	rtdm_irq_t irq_handle;
+};
+
+/* The parameters for a CmdConfigure operation.
+   There are so many options that it would be difficult to document each bit.
+   We mostly use the default or recommended settings. */
+static const char i82558_config_cmd[CONFIG_DATA_SIZE] = {
+	22, 0x08, 0, 1,  0, 0, 0x22, 0x03,  1, /* 1=Use MII  0=Use AUI */
+	0, 0x2E, 0,  0x60, 0x08, 0x88,
+	0x68, 0, 0x40, 0xf2, 0x84,		/* Disable FC */
+	0x31, 0x05, };
+
+/* PHY media interface chips. */
+enum phy_chips { NonSuchPhy=0, I82553AB, I82553C, I82503, DP83840, S80C240,
+					 S80C24, I82555, DP83840A=10, };
+#define EE_READ_CMD		(6)
+
+static int eepro100_init_one(struct pci_dev *pdev,
+		const struct pci_device_id *ent);
+static void eepro100_remove_one (struct pci_dev *pdev);
+
+static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len);
+static int mdio_read(long ioaddr, int phy_id, int location);
+static int speedo_open(struct rtnet_device *rtdev);
+static void speedo_resume(struct rtnet_device *rtdev);
+static void speedo_init_rx_ring(struct rtnet_device *rtdev);
+static int speedo_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static void speedo_refill_rx_buffers(struct rtnet_device *rtdev, int force);
+static int speedo_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp);
+static void speedo_tx_buffer_gc(struct rtnet_device *rtdev);
+static int speedo_interrupt(rtdm_irq_t *irq_handle);
+static int speedo_close(struct rtnet_device *rtdev);
+static void set_rx_mode(struct rtnet_device *rtdev);
+static void speedo_show_state(struct rtnet_device *rtdev);
+static struct net_device_stats *speedo_get_stats(struct rtnet_device *rtdev);
+
+
+static inline void speedo_write_flush(long ioaddr)
+{
+	/* Flush previous PCI writes through intermediate bridges
+	 * by doing a benign read */
+	(void)readb((void *)(ioaddr + SCBStatus));
+}
+
+static int eepro100_init_one (struct pci_dev *pdev,
+		const struct pci_device_id *ent)
+{
+	unsigned long ioaddr;
+	int irq;
+	int acpi_idle_state = 0, pm;
+	static int cards_found = -1;
+
+	static int did_version /* = 0 */;		/* Already printed version info. */
+	if (speedo_debug > 0  &&  did_version++ == 0)
+		printk(version);
+
+	// *** RTnet ***
+	cards_found++;
+	if (cards[cards_found] == 0)
+		goto err_out_none;
+	// *** RTnet ***
+
+	if (!request_region(pci_resource_start(pdev, 1),
+			pci_resource_len(pdev, 1), "eepro100")) {
+		printk (KERN_ERR "eepro100: cannot reserve I/O ports\n");
+		goto err_out_none;
+	}
+	if (!request_mem_region(pci_resource_start(pdev, 0),
+			pci_resource_len(pdev, 0), "eepro100")) {
+		printk (KERN_ERR "eepro100: cannot reserve MMIO region\n");
+		goto err_out_free_pio_region;
+	}
+
+	irq = pdev->irq;
+#ifdef USE_IO
+	ioaddr = pci_resource_start(pdev, 1);
+	if (speedo_debug > 2)
+		printk("Found Intel i82557 PCI Speedo at I/O %#lx, IRQ %d.\n",
+			   ioaddr, irq);
+#else
+	ioaddr = (unsigned long)ioremap(pci_resource_start(pdev, 0),
+									pci_resource_len(pdev, 0));
+	if (!ioaddr) {
+		printk(KERN_ERR "eepro100: cannot remap MMIO region %llx @ %llx\n",
+		       (unsigned long long)pci_resource_len(pdev, 0),
+		       (unsigned long long)pci_resource_start(pdev, 0));
+		goto err_out_free_mmio_region;
+	}
+	if (speedo_debug > 2)
+		printk("Found Intel i82557 PCI Speedo, MMIO at %#llx, IRQ %d.\n",
+		       (unsigned long long)pci_resource_start(pdev, 0), irq);
+#endif
+
+	/* save power state b4 pci_enable_device overwrites it */
+	pm = pci_find_capability(pdev, PCI_CAP_ID_PM);
+	if (pm) {
+		u16 pwr_command;
+		pci_read_config_word(pdev, pm + PCI_PM_CTRL, &pwr_command);
+		acpi_idle_state = pwr_command & PCI_PM_CTRL_STATE_MASK;
+	}
+
+	if (pci_enable_device(pdev))
+		goto err_out_free_mmio_region;
+
+	pci_set_master(pdev);
+
+	if (speedo_found1(pdev, ioaddr, cards_found, acpi_idle_state) != 0)
+		goto err_out_iounmap;
+
+	return 0;
+
+err_out_iounmap: ;
+#ifndef USE_IO
+	iounmap ((void *)ioaddr);
+#endif
+err_out_free_mmio_region:
+	release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+err_out_free_pio_region:
+	release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+err_out_none:
+	return -ENODEV;
+}
+
+static int speedo_found1(struct pci_dev *pdev,
+		long ioaddr, int card_idx, int acpi_idle_state)
+{
+	// *** RTnet ***
+	struct rtnet_device *rtdev = NULL;
+	// *** RTnet ***
+
+	struct speedo_private *sp;
+	const char *product;
+	int i, option;
+	u16 eeprom[0x100];
+	int size;
+	void *tx_ring_space;
+	dma_addr_t tx_ring_dma;
+
+	size = TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats);
+	tx_ring_space = dma_alloc_coherent(&pdev->dev, size, &tx_ring_dma,
+					   GFP_ATOMIC);
+	if (tx_ring_space == NULL)
+		return -1;
+
+	// *** RTnet ***
+	rtdev = rt_alloc_etherdev(sizeof(struct speedo_private),
+				RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (rtdev == NULL) {
+		printk(KERN_ERR "eepro100: Could not allocate ethernet device.\n");
+		dma_free_coherent(&pdev->dev, size, tx_ring_space, tx_ring_dma);
+		return -1;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	memset(rtdev->priv, 0, sizeof(struct speedo_private));
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->sysbind = &pdev->dev;
+	// *** RTnet ***
+
+	if (rtdev->mem_start > 0)
+		option = rtdev->mem_start;
+	else if (card_idx >= 0  &&  options[card_idx] >= 0)
+		option = options[card_idx];
+	else
+		option = 0;
+
+	/* Read the station address EEPROM before doing the reset.
+	   Nominally his should even be done before accepting the device, but
+	   then we wouldn't have a device name with which to report the error.
+	   The size test is for 6 bit vs. 8 bit address serial EEPROMs.
+	*/
+	{
+		unsigned long iobase;
+		int read_cmd, ee_size;
+		u16 sum;
+		int j;
+
+		/* Use IO only to avoid postponed writes and satisfy EEPROM timing
+		   requirements. */
+		iobase = pci_resource_start(pdev, 1);
+		if ((do_eeprom_cmd(iobase, EE_READ_CMD << 24, 27) & 0xffe0000)
+			== 0xffe0000) {
+			ee_size = 0x100;
+			read_cmd = EE_READ_CMD << 24;
+		} else {
+			ee_size = 0x40;
+			read_cmd = EE_READ_CMD << 22;
+		}
+
+		for (j = 0, i = 0, sum = 0; i < ee_size; i++) {
+			u16 value = do_eeprom_cmd(iobase, read_cmd | (i << 16), 27);
+			eeprom[i] = value;
+			sum += value;
+			if (i < 3) {
+				rtdev->dev_addr[j++] = value;
+				rtdev->dev_addr[j++] = value >> 8;
+			}
+		}
+		if (sum != 0xBABA)
+			printk(KERN_WARNING "%s: Invalid EEPROM checksum %#4.4x, "
+				   "check settings before activating this device!\n",
+				   rtdev->name, sum);
+		/* Don't  unregister_netdev(dev);  as the EEPro may actually be
+		   usable, especially if the MAC address is set later.
+		   On the other hand, it may be unusable if MDI data is corrupted. */
+	}
+
+	/* Reset the chip: stop Tx and Rx processes and clear counters.
+	   This takes less than 10usec and will easily finish before the next
+	   action. */
+	outl(PortReset, ioaddr + SCBPort);
+	inl(ioaddr + SCBPort);
+	udelay(10);
+
+	if (eeprom[3] & 0x0100)
+		product = "OEM i82557/i82558 10/100 Ethernet";
+	else
+		product = pci_name(pdev);
+
+	printk(KERN_INFO "%s: %s, ", rtdev->name, product);
+
+	for (i = 0; i < 5; i++)
+		printk("%2.2X:", rtdev->dev_addr[i]);
+	printk("%2.2X, ", rtdev->dev_addr[i]);
+#ifdef USE_IO
+	printk("I/O at %#3lx, ", ioaddr);
+#endif
+	printk("IRQ %d.\n", pdev->irq);
+
+	outl(PortReset, ioaddr + SCBPort);
+	inl(ioaddr + SCBPort);
+	udelay(10);
+
+	/* Return the chip to its original power state. */
+	pci_set_power_state(pdev, acpi_idle_state);
+
+	rtdev->base_addr = ioaddr;
+	rtdev->irq = pdev->irq;
+
+	sp = rtdev->priv;
+	sp->pdev = pdev;
+	sp->acpi_pwr = acpi_idle_state;
+	sp->tx_ring = tx_ring_space;
+	sp->tx_ring_dma = tx_ring_dma;
+	sp->lstats = (struct speedo_stats *)(sp->tx_ring + TX_RING_SIZE);
+	sp->lstats_dma = TX_RING_ELEM_DMA(sp, TX_RING_SIZE);
+
+	sp->full_duplex = option >= 0 && (option & 0x10) ? 1 : 0;
+	if (card_idx >= 0) {
+		if (full_duplex[card_idx] >= 0)
+			sp->full_duplex = full_duplex[card_idx];
+	}
+	sp->default_port = option >= 0 ? (option & 0x0f) : 0;
+
+	sp->phy[0] = eeprom[6];
+	sp->phy[1] = eeprom[7];
+	sp->rx_bug = (eeprom[3] & 0x03) == 3 ? 0 : 1;
+	if (((pdev->device > 0x1030 && (pdev->device < 0x1039)))
+	    || (pdev->device == 0x2449)) {
+		sp->chip_id = 1;
+	}
+
+	if (sp->rx_bug)
+		printk(KERN_ERR "  *** Receiver lock-up bug detected ***\n"
+		       KERN_ERR "  Your device may not work reliably!\n");
+
+	// *** RTnet ***
+	/* The Speedo-specific entries in the device structure. */
+	rtdev->open = &speedo_open;
+	rtdev->hard_start_xmit = &speedo_start_xmit;
+	rtdev->stop = &speedo_close;
+	rtdev->hard_header = &rt_eth_header;
+	rtdev->get_stats = &speedo_get_stats;
+	//rtdev->do_ioctl = NULL;
+
+	if ( (i=rt_register_rtnetdev(rtdev)) )
+	{
+		dma_free_coherent(&pdev->dev, size, tx_ring_space, tx_ring_dma);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+	pci_set_drvdata (pdev, rtdev);
+	// *** RTnet ***
+
+	return 0;
+}
+
+/* Serial EEPROM section.
+   A "bit" grungy, but we work our way through bit-by-bit :->. */
+/*  EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK	0x01	/* EEPROM shift clock. */
+#define EE_CS			0x02	/* EEPROM chip select. */
+#define EE_DATA_WRITE	0x04	/* EEPROM chip data in. */
+#define EE_DATA_READ	0x08	/* EEPROM chip data out. */
+#define EE_ENB			(0x4800 | EE_CS)
+#define EE_WRITE_0		0x4802
+#define EE_WRITE_1		0x4806
+#define EE_OFFSET		SCBeeprom
+
+/* The fixes for the code were kindly provided by Dragan Stancevic
+   <visitor@valinux.com> to strictly follow Intel specifications of EEPROM
+   access timing.
+   The publicly available sheet 64486302 (sec. 3.1) specifies 1us access
+   interval for serial EEPROM.  However, it looks like that there is an
+   additional requirement dictating larger udelay's in the code below.
+   2000/05/24  SAW */
+static int do_eeprom_cmd(long ioaddr, int cmd, int cmd_len)
+{
+	unsigned retval = 0;
+	long ee_addr = ioaddr + SCBeeprom;
+
+	io_outw(EE_ENB, ee_addr); udelay(2);
+	io_outw(EE_ENB | EE_SHIFT_CLK, ee_addr); udelay(2);
+
+	/* Shift the command bits out. */
+	do {
+		short dataval = (cmd & (1 << cmd_len)) ? EE_WRITE_1 : EE_WRITE_0;
+		io_outw(dataval, ee_addr); udelay(2);
+		io_outw(dataval | EE_SHIFT_CLK, ee_addr); udelay(2);
+		retval = (retval << 1) | ((io_inw(ee_addr) & EE_DATA_READ) ? 1 : 0);
+	} while (--cmd_len >= 0);
+	io_outw(EE_ENB, ee_addr); udelay(2);
+
+	/* Terminate the EEPROM access. */
+	io_outw(EE_ENB & ~EE_CS, ee_addr);
+	return retval;
+}
+
+static int mdio_read(long ioaddr, int phy_id, int location)
+{
+	int val, boguscnt = 64*10;		/* <64 usec. to complete, typ 27 ticks */
+	outl(0x08000000 | (location<<16) | (phy_id<<21), ioaddr + SCBCtrlMDI);
+	do {
+		val = inl(ioaddr + SCBCtrlMDI);
+		if (--boguscnt < 0) {
+			printk(KERN_ERR " mdio_read() timed out with val = %8.8x.\n", val);
+			break;
+		}
+	} while (! (val & 0x10000000));
+	return val & 0xffff;
+}
+
+
+static int
+speedo_open(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int retval;
+
+	if (speedo_debug > 1)
+		printk(KERN_DEBUG "%s: speedo_open() irq %d.\n", rtdev->name, rtdev->irq);
+
+	pci_set_power_state(sp->pdev, 0);
+
+	/* Set up the Tx queue early.. */
+	sp->cur_tx = 0;
+	sp->dirty_tx = 0;
+	sp->last_cmd = 0;
+	sp->tx_full = 0;
+	rtdm_lock_init(&sp->lock);
+	sp->in_interrupt = 0;
+
+	// *** RTnet ***
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	retval = rtdm_irq_request(&sp->irq_handle, rtdev->irq,
+				  speedo_interrupt, RTDM_IRQTYPE_SHARED,
+				  "rt_eepro100", rtdev);
+	if (retval) {
+		return retval;
+	}
+	// *** RTnet ***
+
+	rtdev->if_port = sp->default_port;
+
+	speedo_init_rx_ring(rtdev);
+
+	/* Fire up the hardware. */
+	outw(SCBMaskAll, ioaddr + SCBCmd);
+	speedo_write_flush(ioaddr);
+	speedo_resume(rtdev);
+
+	netdevice_start(rtdev);
+	rtnetif_start_queue(rtdev);
+
+	/* Setup the chip and configure the multicast list. */
+	sp->mc_setup_head = NULL;
+	sp->mc_setup_tail = NULL;
+	sp->flow_ctrl = sp->partner = 0;
+	sp->rx_mode = -1;			/* Invalid -> always reset the mode. */
+	set_rx_mode(rtdev);
+	if ((sp->phy[0] & 0x8000) == 0)
+		sp->advertising = mdio_read(ioaddr, sp->phy[0] & 0x1f, 4);
+
+	if (mdio_read(ioaddr, sp->phy[0] & 0x1f, MII_BMSR) & BMSR_LSTATUS)
+		rtnetif_carrier_on(rtdev);
+	else
+		rtnetif_carrier_off(rtdev);
+
+	if (speedo_debug > 2) {
+		printk(KERN_DEBUG "%s: Done speedo_open(), status %8.8x.\n",
+			   rtdev->name, inw(ioaddr + SCBStatus));
+	}
+
+	/* No need to wait for the command unit to accept here. */
+	if ((sp->phy[0] & 0x8000) == 0)
+		mdio_read(ioaddr, sp->phy[0] & 0x1f, 0);
+
+	return 0;
+}
+
+/* Start the chip hardware after a full reset. */
+static void speedo_resume(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+
+	/* Start with a Tx threshold of 256 (0x..20.... 8 byte units). */
+	sp->tx_threshold = 0x01208000;
+
+	/* Set the segment registers to '0'. */
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outl(0, ioaddr + SCBPointer);
+	/* impose a delay to avoid a bug */
+	inl(ioaddr + SCBPointer);
+	udelay(10);
+	outb(RxAddrLoad, ioaddr + SCBCmd);
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outb(CUCmdBase, ioaddr + SCBCmd);
+
+	/* Load the statistics block and rx ring addresses. */
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outl(sp->lstats_dma, ioaddr + SCBPointer);
+	outb(CUStatsAddr, ioaddr + SCBCmd);
+	sp->lstats->done_marker = 0;
+
+	if (sp->rx_ringp[sp->cur_rx % RX_RING_SIZE] == NULL) {
+		if (speedo_debug > 2)
+			printk(KERN_DEBUG "%s: NULL cur_rx in speedo_resume().\n",
+					rtdev->name);
+	} else {
+		wait_for_cmd_done(ioaddr + SCBCmd);
+		outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+			 ioaddr + SCBPointer);
+		outb(RxStart, ioaddr + SCBCmd);
+	}
+
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outb(CUDumpStats, ioaddr + SCBCmd);
+	udelay(30);
+
+	/* Fill the first command with our physical address. */
+	{
+		struct descriptor *ias_cmd;
+
+		ias_cmd =
+			(struct descriptor *)&sp->tx_ring[sp->cur_tx++ % TX_RING_SIZE];
+		/* Avoid a bug(?!) here by marking the command already completed. */
+		ias_cmd->cmd_status = cpu_to_le32((CmdSuspend | CmdIASetup) | 0xa000);
+		ias_cmd->link =
+			cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
+		memcpy(ias_cmd->params, rtdev->dev_addr, 6);
+		sp->last_cmd = ias_cmd;
+	}
+
+	/* Start the chip's Tx process and unmask interrupts. */
+	wait_for_cmd_done(ioaddr + SCBCmd);
+	outl(TX_RING_ELEM_DMA(sp, sp->dirty_tx % TX_RING_SIZE),
+		 ioaddr + SCBPointer);
+	/* We are not ACK-ing FCP and ER in the interrupt handler yet so they should
+	   remain masked --Dragan */
+	outw(CUStart | SCBMaskEarlyRx | SCBMaskFlowCtl, ioaddr + SCBCmd);
+}
+
+static void speedo_show_state(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	unsigned int i;
+
+	/* Print a few items for debugging. */
+	if (speedo_debug > 0) {
+		printk(KERN_DEBUG "%s: Tx ring dump,  Tx queue %u / %u:\n", rtdev->name,
+			   sp->cur_tx, sp->dirty_tx);
+		for (i = 0; i < TX_RING_SIZE; i++)
+			printk(KERN_DEBUG "%s:  %c%c%2d %8.8x.\n", rtdev->name,
+				   i == sp->dirty_tx % TX_RING_SIZE ? '*' : ' ',
+				   i == sp->cur_tx % TX_RING_SIZE ? '=' : ' ',
+				   i, sp->tx_ring[i].status);
+	}
+	printk(KERN_DEBUG "%s: Printing Rx ring"
+		   " (next to receive into %u, dirty index %u).\n",
+		   rtdev->name, sp->cur_rx, sp->dirty_rx);
+
+	for (i = 0; i < RX_RING_SIZE; i++)
+		printk(KERN_DEBUG "%s: %c%c%c%2d %8.8x.\n", rtdev->name,
+			   sp->rx_ringp[i] == sp->last_rxf ? 'l' : ' ',
+			   i == sp->dirty_rx % RX_RING_SIZE ? '*' : ' ',
+			   i == sp->cur_rx % RX_RING_SIZE ? '=' : ' ',
+			   i, (sp->rx_ringp[i] != NULL) ?
+					   (unsigned)sp->rx_ringp[i]->status : 0);
+
+	{
+		long ioaddr = rtdev->base_addr;
+		int phy_num = sp->phy[0] & 0x1f;
+		for (i = 0; i < 16; i++) {
+			/* FIXME: what does it mean?  --SAW */
+			if (i == 6) i = 21;
+			printk(KERN_DEBUG "%s:  PHY index %d register %d is %4.4x.\n",
+				   rtdev->name, phy_num, i, mdio_read(ioaddr, phy_num, i));
+		}
+	}
+}
+
+static struct net_device_stats *speedo_get_stats(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	return &sp->stats;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void
+speedo_init_rx_ring(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	struct RxFD *rxf, *last_rxf = NULL;
+	dma_addr_t last_rxf_dma = 0 /* to shut up the compiler */;
+	int i;
+
+	sp->cur_rx = 0;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct rtskb *skb;
+		skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ + 2 + sizeof(struct RxFD));
+		sp->rx_skbuff[i] = skb;
+		if (skb == NULL)
+			break;			/* OK.  Just initially short of Rx bufs. */
+		// *** RTnet ***
+		rtskb_reserve(skb, 2);  /* IP header alignment */
+		// *** RTnet ***
+		rxf = (struct RxFD *)skb->tail;
+		sp->rx_ringp[i] = rxf;
+		sp->rx_ring_dma[i] =
+			dma_map_single(&sp->pdev->dev, rxf,
+				       PKT_BUF_SZ + sizeof(struct RxFD),
+				       DMA_BIDIRECTIONAL);
+		rtskb_reserve(skb, sizeof(struct RxFD));
+		if (last_rxf) {
+			last_rxf->link = cpu_to_le32(sp->rx_ring_dma[i]);
+			dma_sync_single_for_device(&sp->pdev->dev, last_rxf_dma,
+						   sizeof(struct RxFD),
+						   DMA_TO_DEVICE);
+		}
+		last_rxf = rxf;
+		last_rxf_dma = sp->rx_ring_dma[i];
+		rxf->status = cpu_to_le32(0x00000001);	/* '1' is flag value only. */
+		rxf->link = 0;						/* None yet. */
+		/* This field unused by i82557. */
+		rxf->rx_buf_addr = 0xffffffff;
+		rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
+		dma_sync_single_for_device(&sp->pdev->dev, sp->rx_ring_dma[i],
+					   sizeof(struct RxFD), DMA_TO_DEVICE);
+	}
+	sp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+	/* Mark the last entry as end-of-list. */
+	last_rxf->status = cpu_to_le32(0xC0000002);	/* '2' is flag value only. */
+	dma_sync_single_for_device(&sp->pdev->dev,
+				   sp->rx_ring_dma[RX_RING_SIZE-1],
+				   sizeof(struct RxFD), DMA_TO_DEVICE);
+	sp->last_rxf = last_rxf;
+	sp->last_rxf_dma = last_rxf_dma;
+}
+
+static int
+speedo_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int entry;
+	// *** RTnet ***
+	rtdm_lockctx_t context;
+
+	/* Prevent interrupts from changing the Tx ring from underneath us. */
+	rtdm_lock_get_irqsave(&sp->lock, context);
+	// *** RTnet ***
+
+	/* Check if there are enough space. */
+	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+		// *** RTnet ***
+		rtnetif_stop_queue(rtdev);
+		sp->tx_full = 1;
+
+		rtdm_lock_put_irqrestore(&sp->lock, context);
+
+		rtdm_printk(KERN_ERR "%s: incorrect tbusy state, fixed.\n", rtdev->name);
+		// *** RTnet ***
+
+		return 1;
+	}
+
+	/* Calculate the Tx descriptor entry. */
+	entry = sp->cur_tx++ % TX_RING_SIZE;
+
+	sp->tx_skbuff[entry] = skb;
+	sp->tx_ring[entry].status =
+		cpu_to_le32(CmdSuspend | CmdTx | CmdTxFlex);
+	if (!(entry & ((TX_RING_SIZE>>2)-1)))
+		sp->tx_ring[entry].status |= cpu_to_le32(CmdIntr);
+	sp->tx_ring[entry].link =
+		cpu_to_le32(TX_RING_ELEM_DMA(sp, sp->cur_tx % TX_RING_SIZE));
+	sp->tx_ring[entry].tx_desc_addr =
+		cpu_to_le32(TX_RING_ELEM_DMA(sp, entry) + TX_DESCR_BUF_OFFSET);
+	/* The data region is always in one buffer descriptor. */
+	sp->tx_ring[entry].count = cpu_to_le32(sp->tx_threshold);
+	sp->tx_ring[entry].tx_buf_addr0 =
+		cpu_to_le32(dma_map_single(&sp->pdev->dev, skb->data,
+					   skb->len, DMA_TO_DEVICE));
+	sp->tx_ring[entry].tx_buf_size0 = cpu_to_le32(skb->len);
+
+// *** RTnet ***
+// Disabled to gain shorter worst-case execution times.
+// Hope this bug is not relevant for us
+
+	/* Trigger the command unit resume. */
+	if (rt_wait_for_cmd_done(ioaddr + SCBCmd, __FUNCTION__) != 0) {
+		rtdm_lock_put_irqrestore(&sp->lock, context);
+
+		return 1;
+	}
+
+	/* get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+// *** RTnet ***
+
+	clear_suspend(sp->last_cmd);
+	/* We want the time window between clearing suspend flag on the previous
+	   command and resuming CU to be as small as possible.
+	   Interrupts in between are very undesired.  --SAW */
+	outb(CUResume, ioaddr + SCBCmd);
+	sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+	/* Leave room for set_rx_mode(). If there is no more space than reserved
+	   for multicast filter mark the ring as full. */
+	if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+		rtnetif_stop_queue(rtdev);
+		sp->tx_full = 1;
+	}
+
+	// *** RTnet ***
+	rtdm_lock_put_irqrestore(&sp->lock, context);
+	// *** RTnet ***
+
+	return 0;
+}
+
+static void speedo_tx_buffer_gc(struct rtnet_device *rtdev)
+{
+	unsigned int dirty_tx;
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+
+	dirty_tx = sp->dirty_tx;
+	while ((int)(sp->cur_tx - dirty_tx) > 0) {
+		int entry = dirty_tx % TX_RING_SIZE;
+		int status = le32_to_cpu(sp->tx_ring[entry].status);
+
+		if (speedo_debug > 5)
+			printk(KERN_DEBUG " scavenge candidate %d status %4.4x.\n",
+				   entry, status);
+		if ((status & StatusComplete) == 0)
+			break;			/* It still hasn't been processed. */
+		if (status & TxUnderrun)
+			if (sp->tx_threshold < 0x01e08000) {
+				if (speedo_debug > 2)
+					printk(KERN_DEBUG "%s: TX underrun, threshold adjusted.\n",
+						   rtdev->name);
+				sp->tx_threshold += 0x00040000;
+			}
+		/* Free the original skb. */
+		if (sp->tx_skbuff[entry]) {
+			sp->stats.tx_packets++;	/* Count only user packets. */
+			sp->stats.tx_bytes += sp->tx_skbuff[entry]->len;
+			dma_unmap_single(&sp->pdev->dev,
+					 le32_to_cpu(sp->tx_ring[entry].tx_buf_addr0),
+					 sp->tx_skbuff[entry]->len, DMA_TO_DEVICE);
+
+			// *** RTnet ***
+			dev_kfree_rtskb(sp->tx_skbuff[entry]);
+			// *** RTnet ***
+
+			sp->tx_skbuff[entry] = 0;
+		}
+		dirty_tx++;
+	}
+
+// *** RTnet ***
+// *** RTnet ***
+
+	sp->dirty_tx = dirty_tx;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static int speedo_interrupt(rtdm_irq_t *irq_handle)
+{
+	// *** RTnet ***
+	nanosecs_abs_t      time_stamp = rtdm_clock_read();
+	struct rtnet_device *rtdev     =
+	rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	int ret = RTDM_IRQ_NONE;
+	// *** RTnet ***
+
+	struct speedo_private *sp;
+	long ioaddr, boguscnt = max_interrupt_work;
+	unsigned short status;
+
+
+	ioaddr = rtdev->base_addr;
+	sp = (struct speedo_private *)rtdev->priv;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+	/* A lock to prevent simultaneous entry on SMP machines. */
+	if (test_and_set_bit(0, (void*)&sp->in_interrupt)) {
+		rtdm_printk(KERN_ERR"%s: SMP simultaneous entry of an interrupt handler.\n",
+			   rtdev->name);
+		sp->in_interrupt = 0;	/* Avoid halting machine. */
+		return ret;
+	}
+#endif
+
+	do {
+		status = inw(ioaddr + SCBStatus);
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		/* Will change from 0xfc00 to 0xff00 when we start handling
+		   FCP and ER interrupts --Dragan */
+		outw(status & 0xfc00, ioaddr + SCBStatus);
+		speedo_write_flush(ioaddr);
+
+		if (speedo_debug > 4)
+			rtdm_printk(KERN_DEBUG "%s: interrupt  status=%#4.4x.\n",
+				   rtdev->name, status);
+
+		if ((status & 0xfc00) == 0)
+			break;
+
+		ret = RTDM_IRQ_HANDLED;
+
+		/* Always check if all rx buffers are allocated.  --SAW */
+		speedo_refill_rx_buffers(rtdev, 0);
+
+		if ((status & 0x5000) ||	/* Packet received, or Rx error. */
+			(sp->rx_ring_state&(RrNoMem|RrPostponed)) == RrPostponed)
+									/* Need to gather the postponed packet. */
+			speedo_rx(rtdev, &packets, &time_stamp);
+
+		if (status & 0x1000) {
+			rtdm_lock_get(&sp->lock);
+			if ((status & 0x003c) == 0x0028) {		/* No more Rx buffers. */
+				struct RxFD *rxf;
+				rtdm_printk(KERN_WARNING "%s: card reports no RX buffers.\n",
+						rtdev->name);
+				rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
+				if (rxf == NULL) {
+					if (speedo_debug > 2)
+						rtdm_printk(KERN_DEBUG
+								"%s: NULL cur_rx in speedo_interrupt().\n",
+								rtdev->name);
+					sp->rx_ring_state |= RrNoMem|RrNoResources;
+				} else if (rxf == sp->last_rxf) {
+					if (speedo_debug > 2)
+						rtdm_printk(KERN_DEBUG
+								"%s: cur_rx is last in speedo_interrupt().\n",
+								rtdev->name);
+					sp->rx_ring_state |= RrNoMem|RrNoResources;
+				} else
+					outb(RxResumeNoResources, ioaddr + SCBCmd);
+			} else if ((status & 0x003c) == 0x0008) { /* No resources. */
+				struct RxFD *rxf;
+				rtdm_printk(KERN_WARNING "%s: card reports no resources.\n",
+						rtdev->name);
+				rxf = sp->rx_ringp[sp->cur_rx % RX_RING_SIZE];
+				if (rxf == NULL) {
+					if (speedo_debug > 2)
+						rtdm_printk(KERN_DEBUG
+								"%s: NULL cur_rx in speedo_interrupt().\n",
+								rtdev->name);
+					sp->rx_ring_state |= RrNoMem|RrNoResources;
+				} else if (rxf == sp->last_rxf) {
+					if (speedo_debug > 2)
+						rtdm_printk(KERN_DEBUG
+								"%s: cur_rx is last in speedo_interrupt().\n",
+								rtdev->name);
+					sp->rx_ring_state |= RrNoMem|RrNoResources;
+				} else {
+					/* Restart the receiver. */
+					outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+						 ioaddr + SCBPointer);
+					outb(RxStart, ioaddr + SCBCmd);
+				}
+			}
+			sp->stats.rx_errors++;
+			rtdm_lock_put(&sp->lock);
+		}
+
+		if ((sp->rx_ring_state&(RrNoMem|RrNoResources)) == RrNoResources) {
+			rtdm_printk(KERN_WARNING
+					"%s: restart the receiver after a possible hang.\n",
+					rtdev->name);
+			rtdm_lock_get(&sp->lock);
+			/* Restart the receiver.
+			   I'm not sure if it's always right to restart the receiver
+			   here but I don't know another way to prevent receiver hangs.
+			   1999/12/25 SAW */
+			outl(sp->rx_ring_dma[sp->cur_rx % RX_RING_SIZE],
+				 ioaddr + SCBPointer);
+			outb(RxStart, ioaddr + SCBCmd);
+			sp->rx_ring_state &= ~RrNoResources;
+			rtdm_lock_put(&sp->lock);
+		}
+
+		/* User interrupt, Command/Tx unit interrupt or CU not active. */
+		if (status & 0xA400) {
+			rtdm_lock_get(&sp->lock);
+			speedo_tx_buffer_gc(rtdev);
+			if (sp->tx_full
+				&& (int)(sp->cur_tx - sp->dirty_tx) < TX_QUEUE_UNFULL) {
+				/* The ring is no longer full. */
+				sp->tx_full = 0;
+				rtnetif_wake_queue(rtdev); /* Attention: under a spinlock.  --SAW */
+			}
+			rtdm_lock_put(&sp->lock);
+		}
+
+		if (--boguscnt < 0) {
+			rtdm_printk(KERN_ERR "%s: Too much work at interrupt, status=0x%4.4x.\n",
+				   rtdev->name, status);
+			/* Clear all interrupt sources. */
+			/* Will change from 0xfc00 to 0xff00 when we start handling
+			   FCP and ER interrupts --Dragan */
+			outw(0xfc00, ioaddr + SCBStatus);
+			break;
+		}
+	} while (1);
+
+	if (speedo_debug > 3)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status=%#4.4x.\n",
+			   rtdev->name, inw(ioaddr + SCBStatus));
+
+	clear_bit(0, (void*)&sp->in_interrupt);
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+	return ret;
+}
+
+static inline struct RxFD *speedo_rx_alloc(struct rtnet_device *rtdev, int entry)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	struct RxFD *rxf;
+	struct rtskb *skb;
+	/* Get a fresh skbuff to replace the consumed one. */
+	skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ + 2 + sizeof(struct RxFD));
+	sp->rx_skbuff[entry] = skb;
+	if (skb == NULL) {
+		sp->rx_ringp[entry] = NULL;
+		return NULL;
+	}
+	rtskb_reserve(skb, 2);  /* IP header alignment */
+	rxf = sp->rx_ringp[entry] = (struct RxFD *)skb->tail;
+	sp->rx_ring_dma[entry] =
+		dma_map_single(&sp->pdev->dev, rxf,
+			       PKT_BUF_SZ + sizeof(struct RxFD),
+			       DMA_FROM_DEVICE);
+	rtskb_reserve(skb, sizeof(struct RxFD));
+	rxf->rx_buf_addr = 0xffffffff;
+	dma_sync_single_for_device(&sp->pdev->dev, sp->rx_ring_dma[entry],
+				   sizeof(struct RxFD), DMA_TO_DEVICE);
+	return rxf;
+}
+
+static inline void speedo_rx_link(struct rtnet_device *rtdev, int entry,
+								  struct RxFD *rxf, dma_addr_t rxf_dma)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	rxf->status = cpu_to_le32(0xC0000001);  /* '1' for driver use only. */
+	rxf->link = 0;			/* None yet. */
+	rxf->count = cpu_to_le32(PKT_BUF_SZ << 16);
+	sp->last_rxf->link = cpu_to_le32(rxf_dma);
+	sp->last_rxf->status &= cpu_to_le32(~0xC0000000);
+	dma_sync_single_for_device(&sp->pdev->dev, sp->last_rxf_dma,
+				   sizeof(struct RxFD), DMA_TO_DEVICE);
+	sp->last_rxf = rxf;
+	sp->last_rxf_dma = rxf_dma;
+}
+
+static int speedo_refill_rx_buf(struct rtnet_device *rtdev, int force)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	int entry;
+	struct RxFD *rxf;
+
+	entry = sp->dirty_rx % RX_RING_SIZE;
+	if (sp->rx_skbuff[entry] == NULL) {
+		rxf = speedo_rx_alloc(rtdev, entry);
+		if (rxf == NULL) {
+			unsigned int forw;
+			int forw_entry;
+			if (speedo_debug > 2 || !(sp->rx_ring_state & RrOOMReported)) {
+				// *** RTnet ***
+				rtdm_printk(KERN_WARNING "%s: can't fill rx buffer (force %d)!\n",
+						rtdev->name, force);
+				// *** RTnet ***
+				sp->rx_ring_state |= RrOOMReported;
+			}
+			if (!force)
+				return -1;	/* Better luck next time!  */
+			/* Borrow an skb from one of next entries. */
+			for (forw = sp->dirty_rx + 1; forw != sp->cur_rx; forw++)
+				if (sp->rx_skbuff[forw % RX_RING_SIZE] != NULL)
+					break;
+			if (forw == sp->cur_rx)
+				return -1;
+			forw_entry = forw % RX_RING_SIZE;
+			sp->rx_skbuff[entry] = sp->rx_skbuff[forw_entry];
+			sp->rx_skbuff[forw_entry] = NULL;
+			rxf = sp->rx_ringp[forw_entry];
+			sp->rx_ringp[forw_entry] = NULL;
+			sp->rx_ringp[entry] = rxf;
+		}
+	} else {
+		rxf = sp->rx_ringp[entry];
+	}
+	speedo_rx_link(rtdev, entry, rxf, sp->rx_ring_dma[entry]);
+	sp->dirty_rx++;
+	sp->rx_ring_state &= ~(RrNoMem|RrOOMReported); /* Mark the progress. */
+	return 0;
+}
+
+static void speedo_refill_rx_buffers(struct rtnet_device *rtdev, int force)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+
+	/* Refill the RX ring. */
+	while ((int)(sp->cur_rx - sp->dirty_rx) > 0 &&
+			speedo_refill_rx_buf(rtdev, force) != -1);
+}
+
+static int
+speedo_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	int entry = sp->cur_rx % RX_RING_SIZE;
+	int rx_work_limit = sp->dirty_rx + RX_RING_SIZE - sp->cur_rx;
+	int alloc_ok = 1;
+
+	if (speedo_debug > 4)
+		rtdm_printk(KERN_DEBUG " In speedo_rx().\n");
+	/* If we own the next entry, it's a new packet. Send it up. */
+	while (sp->rx_ringp[entry] != NULL) {
+		int status;
+		int pkt_len;
+
+		dma_sync_single_for_cpu(&sp->pdev->dev, sp->rx_ring_dma[entry],
+					sizeof(struct RxFD), DMA_FROM_DEVICE);
+		status = le32_to_cpu(sp->rx_ringp[entry]->status);
+		pkt_len = le32_to_cpu(sp->rx_ringp[entry]->count) & 0x3fff;
+
+		if (!(status & RxComplete))
+			break;
+
+		if (--rx_work_limit < 0)
+			break;
+
+		/* Check for a rare out-of-memory case: the current buffer is
+		   the last buffer allocated in the RX ring.  --SAW */
+		if (sp->last_rxf == sp->rx_ringp[entry]) {
+			/* Postpone the packet.  It'll be reaped at an interrupt when this
+			   packet is no longer the last packet in the ring. */
+			if (speedo_debug > 2)
+				rtdm_printk(KERN_DEBUG "%s: RX packet postponed!\n",
+					   rtdev->name);
+			sp->rx_ring_state |= RrPostponed;
+			break;
+		}
+
+		if (speedo_debug > 4)
+			rtdm_printk(KERN_DEBUG "  speedo_rx() status %8.8x len %d.\n", status,
+				   pkt_len);
+		if ((status & (RxErrTooBig|RxOK|0x0f90)) != RxOK) {
+			if (status & RxErrTooBig)
+				rtdm_printk(KERN_ERR "%s: Ethernet frame overran the Rx buffer, "
+					   "status %8.8x!\n", rtdev->name, status);
+			else if (! (status & RxOK)) {
+				/* There was a fatal error.  This *should* be impossible. */
+				sp->stats.rx_errors++;
+				rtdm_printk(KERN_ERR "%s: Anomalous event in speedo_rx(), "
+					   "status %8.8x.\n",
+					   rtdev->name, status);
+			}
+		} else {
+			struct rtskb *skb;
+
+// *** RTnet ***
+			{
+// *** RTnet ***
+				/* Pass up the already-filled skbuff. */
+				skb = sp->rx_skbuff[entry];
+				if (skb == NULL) {
+					rtdm_printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n",
+						   rtdev->name);
+					break;
+				}
+				sp->rx_skbuff[entry] = NULL;
+				rtskb_put(skb, pkt_len);
+				sp->rx_ringp[entry] = NULL;
+				dma_unmap_single(&sp->pdev->dev,
+						 sp->rx_ring_dma[entry],
+						 PKT_BUF_SZ + sizeof(struct RxFD),
+						 DMA_FROM_DEVICE);
+			}
+			skb->protocol = rt_eth_type_trans(skb, rtdev);
+			//rtmac
+			skb->time_stamp = *time_stamp;
+			//rtmac
+			rtnetif_rx(skb);
+			(*packets)++;
+			sp->stats.rx_packets++;
+			sp->stats.rx_bytes += pkt_len;
+		}
+		entry = (++sp->cur_rx) % RX_RING_SIZE;
+		sp->rx_ring_state &= ~RrPostponed;
+		/* Refill the recently taken buffers.
+		   Do it one-by-one to handle traffic bursts better. */
+		if (alloc_ok && speedo_refill_rx_buf(rtdev, 0) == -1)
+			alloc_ok = 0;
+	}
+
+	/* Try hard to refill the recently taken buffers. */
+	speedo_refill_rx_buffers(rtdev, 1);
+
+	sp->last_rx_time = jiffies;
+
+	return 0;
+}
+
+static int
+speedo_close(struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	int i;
+
+	netdevice_stop(rtdev);
+	rtnetif_stop_queue(rtdev);
+
+	if (speedo_debug > 1)
+		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n",
+			   rtdev->name, inw(ioaddr + SCBStatus));
+
+	/* Shutdown procedure according to Intel's e100 */
+	outl(PortPartialReset, ioaddr + SCBPort);
+	speedo_write_flush(ioaddr); udelay(20);
+
+	outl(PortReset, ioaddr + SCBPort);
+	speedo_write_flush(ioaddr); udelay(20);
+
+	outw(SCBMaskAll, ioaddr + SCBCmd);
+	speedo_write_flush(ioaddr);
+
+	// *** RTnet ***
+	if ( (i=rtdm_irq_free(&sp->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(rtdev);
+
+	// *** RTnet ***
+
+	/* Print a few items for debugging. */
+	if (speedo_debug > 3)
+		speedo_show_state(rtdev);
+
+    /* Free all the skbuffs in the Rx and Tx queues. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct rtskb *skb = sp->rx_skbuff[i];
+		sp->rx_skbuff[i] = 0;
+		/* Clear the Rx descriptors. */
+		if (skb) {
+			dma_unmap_single(&sp->pdev->dev,
+					 sp->rx_ring_dma[i],
+					 PKT_BUF_SZ + sizeof(struct RxFD),
+					 DMA_FROM_DEVICE);
+			dev_kfree_rtskb(skb);
+		}
+	}
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		struct rtskb *skb = sp->tx_skbuff[i];
+		sp->tx_skbuff[i] = 0;
+		/* Clear the Tx descriptors. */
+		if (skb) {
+			dma_unmap_single(&sp->pdev->dev,
+					 le32_to_cpu(sp->tx_ring[i].tx_buf_addr0),
+					 skb->len, DMA_TO_DEVICE);
+
+			// *** RTnet ***
+			dev_kfree_rtskb(skb);
+			// *** RTnet ***
+		}
+	}
+
+// *** RTnet ***
+// *** RTnet ***
+
+	pci_set_power_state(sp->pdev, 2);
+
+	return 0;
+}
+
+
+/* Set or clear the multicast filter for this adaptor.
+   This is very ugly with Intel chips -- we usually have to execute an
+   entire configuration command, plus process a multicast command.
+   This is complicated.  We must put a large configuration command and
+   an arbitrarily-sized multicast command in the transmit list.
+   To minimize the disruption -- the previous command might have already
+   loaded the link -- we convert the current command block, normally a Tx
+   command, into a no-op and link it to the new command.
+*/
+static void set_rx_mode(struct rtnet_device *rtdev)
+{
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	struct descriptor *last_cmd;
+	char new_rx_mode;
+	//unsigned long flags;
+	int entry/*, i*/;
+
+	if (rtdev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		new_rx_mode = 3;
+	} else if (rtdev->flags & IFF_ALLMULTI) {
+		new_rx_mode = 1;
+	} else
+		new_rx_mode = 0;
+
+	if (speedo_debug > 3)
+		printk(KERN_DEBUG "%s: set_rx_mode %d -> %d\n", rtdev->name,
+				sp->rx_mode, new_rx_mode);
+
+	if ((int)(sp->cur_tx - sp->dirty_tx) > TX_RING_SIZE - TX_MULTICAST_SIZE) {
+	    /* The Tx ring is full -- don't add anything!  Hope the mode will be
+		 * set again later. */
+		sp->rx_mode = -1;
+		return;
+	}
+
+	if (new_rx_mode != sp->rx_mode) {
+		u8 *config_cmd_data;
+
+		//spin_lock_irqsave(&sp->lock, flags); --- disabled for now as it runs before irq handler is active
+		entry = sp->cur_tx++ % TX_RING_SIZE;
+		last_cmd = sp->last_cmd;
+		sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+		sp->tx_skbuff[entry] = 0;			/* Redundant. */
+		sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdConfigure);
+		sp->tx_ring[entry].link =
+			cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
+		config_cmd_data = (void *)&sp->tx_ring[entry].tx_desc_addr;
+		/* Construct a full CmdConfig frame. */
+		memcpy(config_cmd_data, i82558_config_cmd, CONFIG_DATA_SIZE);
+		config_cmd_data[1] = (txfifo << 4) | rxfifo;
+		config_cmd_data[4] = rxdmacount;
+		config_cmd_data[5] = txdmacount + 0x80;
+		config_cmd_data[15] |= (new_rx_mode & 2) ? 1 : 0;
+		/* 0x80 doesn't disable FC 0x84 does.
+		   Disable Flow control since we are not ACK-ing any FC interrupts
+		   for now. --Dragan */
+		config_cmd_data[19] = 0x84;
+		config_cmd_data[19] |= sp->full_duplex ? 0x40 : 0;
+		config_cmd_data[21] = (new_rx_mode & 1) ? 0x0D : 0x05;
+		if (sp->phy[0] & 0x8000) {			/* Use the AUI port instead. */
+			config_cmd_data[15] |= 0x80;
+			config_cmd_data[8] = 0;
+		}
+		/* Trigger the command unit resume. */
+		wait_for_cmd_done(ioaddr + SCBCmd);
+		clear_suspend(last_cmd);
+		outb(CUResume, ioaddr + SCBCmd);
+		if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+			rtnetif_stop_queue(rtdev);
+			sp->tx_full = 1;
+		}
+		//spin_unlock_irqrestore(&sp->lock, flags);
+	}
+
+	if (new_rx_mode == 0) {
+		/* The simple case of 0-3 multicast list entries occurs often, and
+		   fits within one tx_ring[] entry. */
+		/*struct dev_mc_list *mclist;*/
+		u16 *setup_params/*, *eaddrs*/;
+
+		//spin_lock_irqsave(&sp->lock, flags); --- disabled for now as it runs before irq handler is active
+		entry = sp->cur_tx++ % TX_RING_SIZE;
+		last_cmd = sp->last_cmd;
+		sp->last_cmd = (struct descriptor *)&sp->tx_ring[entry];
+
+		sp->tx_skbuff[entry] = 0;
+		sp->tx_ring[entry].status = cpu_to_le32(CmdSuspend | CmdMulticastList);
+		sp->tx_ring[entry].link =
+			cpu_to_le32(TX_RING_ELEM_DMA(sp, (entry + 1) % TX_RING_SIZE));
+		sp->tx_ring[entry].tx_desc_addr = 0; /* Really MC list count. */
+		setup_params = (u16 *)&sp->tx_ring[entry].tx_desc_addr;
+		*setup_params++ = cpu_to_le16(0); /* mc_count */
+// *** RTnet ***
+// *** RTnet ***
+
+		wait_for_cmd_done(ioaddr + SCBCmd);
+		clear_suspend(last_cmd);
+		/* Immediately trigger the command unit resume. */
+		outb(CUResume, ioaddr + SCBCmd);
+
+		if ((int)(sp->cur_tx - sp->dirty_tx) >= TX_QUEUE_LIMIT) {
+			rtnetif_stop_queue(rtdev);
+			sp->tx_full = 1;
+		}
+		//spin_unlock_irqrestore(&sp->lock, flags);
+// *** RTnet ***
+// *** RTnet ***
+	}
+
+	sp->rx_mode = new_rx_mode;
+}
+
+
+static void eepro100_remove_one (struct pci_dev *pdev)
+{
+	// *** RTnet ***
+	struct rtnet_device *rtdev = pci_get_drvdata (pdev);
+
+	struct speedo_private *sp = (struct speedo_private *)rtdev->priv;
+
+	rt_unregister_rtnetdev(rtdev);
+	rt_rtdev_disconnect(rtdev);
+	// *** RTnet ***
+
+	release_region(pci_resource_start(pdev, 1), pci_resource_len(pdev, 1));
+	release_mem_region(pci_resource_start(pdev, 0), pci_resource_len(pdev, 0));
+
+#ifndef USE_IO
+	iounmap((char *)rtdev->base_addr);
+#endif
+
+	dma_free_coherent(
+		&pdev->dev,
+		TX_RING_SIZE * sizeof(struct TxFD) + sizeof(struct speedo_stats),
+		sp->tx_ring, sp->tx_ring_dma);
+	pci_disable_device(pdev);
+
+	// *** RTnet ***
+	rtdev_free(rtdev);
+	// *** RTnet ***
+}
+
+static struct pci_device_id eepro100_pci_tbl[] = {
+	{ PCI_VENDOR_ID_INTEL, 0x1229, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1209, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1029, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1030, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1031, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1032, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1033, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1034, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1035, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1036, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1037, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1038, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1039, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103A, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103B, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103C, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103D, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x103E, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1092, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1227, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x1228, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x2449, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x2459, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x245D, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x27DC, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x5200, PCI_ANY_ID, PCI_ANY_ID, },
+	{ PCI_VENDOR_ID_INTEL, 0x5201, PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0,}
+};
+MODULE_DEVICE_TABLE(pci, eepro100_pci_tbl);
+
+static struct pci_driver eepro100_driver = {
+	name:		"eepro100_rt",
+	id_table:	eepro100_pci_tbl,
+	probe:		eepro100_init_one,
+	remove:		eepro100_remove_one,
+	suspend:	NULL,
+	resume:		NULL,
+};
+
+static int __init eepro100_init_module(void)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG
+	if (local_debug >= 0 && speedo_debug != local_debug)
+		printk(KERN_INFO "eepro100.c: Debug level is %d.\n", local_debug);
+	if (local_debug >= 0)
+		speedo_debug = local_debug;
+#else  /* !CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG */
+	local_debug = speedo_debug; /* touch debug variable */
+#endif /* CONFIG_XENO_DRIVERS_NET_DRV_EEPRO100_DBG */
+
+	return pci_register_driver(&eepro100_driver);
+}
+
+static void __exit eepro100_cleanup_module(void)
+{
+	pci_unregister_driver(&eepro100_driver);
+}
+
+module_init(eepro100_init_module);
+module_exit(eepro100_cleanup_module);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c
new file mode 100644
index 0000000..e6bf2d6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/eth1394.c
@@ -0,0 +1,1536 @@
+/*
+ * eth1394.h -- RTnet Driver for Ethernet emulation over FireWire
+ *              (adapted from Linux1394)
+ *
+ * Copyright (C) 2005 Zhang Yuchen <yuchen623@gmail.com>
+ *
+ * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/if_arp.h>
+#include <linux/if_ether.h>
+#include <linux/ip.h>
+#include <linux/in.h>
+#include <linux/bitops.h>
+#include <linux/uaccess.h>
+#include <net/arp.h>
+
+#define rtos_spinlock_t rtdm_lock_t
+#define nanosecs_abs_t  nanosecs_t
+
+#include <rt_eth1394.h>
+
+#include <rtnet_port.h>
+
+#include <ieee1394_types.h>
+#include <ieee1394_core.h>
+#include <ieee1394_transactions.h>
+#include <ieee1394.h>
+#include <highlevel.h>
+#include <iso.h>
+
+#define driver_name	"RT-ETH1394"
+
+
+#define ETH1394_PRINT_G(level, fmt, args...) \
+	rtdm_printk(level "%s: " fmt, driver_name, ## args)
+
+#define ETH1394_PRINT(level, dev_name, fmt, args...) \
+	rtdm_printk(level "%s: %s: " fmt, driver_name, dev_name, ## args)
+
+//#define ETH1394_DEBUG 1
+
+#ifdef ETH1394_DEBUG
+#define DEBUGP(fmt, args...) \
+	rtdm_printk(KERN_ERR "%s:%s[%d]: " fmt "\n", driver_name, __FUNCTION__, __LINE__, ## args)
+#else
+#define DEBUGP(fmt, args...)
+#endif
+
+#define TRACE() rtdm_printk(KERN_ERR "%s:%s[%d] ---- TRACE\n", driver_name, __FUNCTION__, __LINE__)
+
+/* Change this to IEEE1394_SPEED_S100 to make testing easier */
+#define ETH1394_SPEED_DEF	0x03 /*IEEE1394_SPEED_MAX*/
+
+/* For now, this needs to be 1500, so that XP works with us */
+#define ETH1394_DATA_LEN		1500/*ETH_DATA_LEN*/
+
+struct fragment_info {
+	struct list_head list;
+	int offset;
+	int len;
+};
+
+struct partial_datagram {
+	struct list_head list;
+	u16 dgl;
+	u16 dg_size;
+	u16 ether_type;
+	struct rtskb *skb;
+	char *pbuf;
+	struct list_head frag_info;
+};
+
+ static const u16 eth1394_speedto_maxpayload[] = {
+/*     S100, S200, S400, S800, S1600, S3200 */
+	512, 1024, 2048, 4096,  4096,  4096
+};
+
+static struct hpsb_highlevel eth1394_highlevel;
+
+/* Use common.lf to determine header len */
+static const int hdr_type_len[] = {
+	sizeof (struct eth1394_uf_hdr),
+	sizeof (struct eth1394_ff_hdr),
+	sizeof (struct eth1394_sf_hdr),
+	sizeof (struct eth1394_sf_hdr)
+};
+
+/* The max_partial_datagrams parameter is the maximum number of fragmented
+ * datagrams per node that eth1394 will keep in memory.  Providing an upper
+ * bound allows us to limit the amount of memory that partial datagrams
+ * consume in the event that some partial datagrams are never completed.  This
+ * should probably change to a sysctl item or the like if possible.
+ */
+static int max_partial_datagrams = 25;
+module_param(max_partial_datagrams, int, 0444);
+MODULE_PARM_DESC(max_partial_datagrams,
+		 "Maximum number of partially received fragmented datagrams "
+		 "(default = 25).");
+
+
+static int eth1394_header(struct rtskb *skb, struct rtnet_device *dev,
+			    unsigned short type, void *daddr, void *saddr,
+			    unsigned len);
+
+static int eth1394_write(struct hpsb_host *host,struct hpsb_packet *packet, unsigned int length);
+
+static inline void purge_partial_datagram(struct list_head *old);
+static int eth1394_tx(struct rtskb *skb, struct rtnet_device *dev);
+static void eth1394_iso(struct hpsb_iso *iso, void *arg);
+
+/* Function for incoming 1394 packets */
+static struct hpsb_address_ops eth1394_ops = {
+	.write =	eth1394_write,
+};
+
+static void eth1394_add_host (struct hpsb_host *host);
+static void eth1394_remove_host (struct hpsb_host *host);
+static void eth1394_host_reset (struct hpsb_host *host);
+
+/* Ieee1394 highlevel driver functions */
+static struct hpsb_highlevel eth1394_highlevel = {
+	.name =		driver_name,
+	.add_host =	eth1394_add_host,
+	.remove_host =	eth1394_remove_host,
+	.host_reset =	eth1394_host_reset,
+};
+
+static void eth1394_iso_shutdown(struct eth1394_priv *priv)
+{
+	priv->bc_state = ETHER1394_BC_CLOSED;
+
+	if (priv->iso != NULL) {
+		//~ if (!in_interrupt())
+			hpsb_iso_shutdown(priv->iso);
+		priv->iso = NULL;
+	}
+}
+
+static int eth1394_init_bc(struct rtnet_device *dev)
+{
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+
+	/* First time sending?  Need a broadcast channel for ARP and for
+	 * listening on */
+	if (priv->bc_state == ETHER1394_BC_CHECK) {
+		quadlet_t bc;
+
+		/* Get the local copy of the broadcast channel and check its
+		 * validity (the IRM should validate it for us) */
+
+		bc = priv->host->csr.broadcast_channel;
+
+		if ((bc & 0x80000000) != 0x80000000) { //used to be 0xc0000000
+			/* broadcast channel not validated yet */
+			ETH1394_PRINT(KERN_WARNING, dev->name,
+				      "Error BROADCAST_CHANNEL register valid "
+				      "bit not set, can't send IP traffic\n");
+
+			eth1394_iso_shutdown(priv);
+
+			return -EAGAIN;
+		}
+		if (priv->broadcast_channel != (bc & 0x3f)) {
+			/* This really shouldn't be possible, but just in case
+			 * the IEEE 1394 spec changes regarding broadcast
+			 * channels in the future. */
+
+			eth1394_iso_shutdown(priv);
+
+			//~ if (in_interrupt())
+				//~ return -EAGAIN;
+
+			priv->broadcast_channel = bc & 0x3f;
+			ETH1394_PRINT(KERN_INFO, dev->name,
+				      "Changing to broadcast channel %d...\n",
+				      priv->broadcast_channel);
+
+			priv->iso = hpsb_iso_recv_init(priv->host, 16 * 4096,
+						       16, priv->broadcast_channel, HPSB_ISO_DMA_PACKET_PER_BUFFER,
+						       1, eth1394_iso, 0, "eth1394_iso", IEEE1394_PRIORITY_HIGHEST);
+
+			if (priv->iso == NULL) {
+				ETH1394_PRINT(KERN_ERR, dev->name,
+					      "failed to change broadcast "
+					      "channel\n");
+				return -EAGAIN;
+			}
+		}
+		if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0) {
+			ETH1394_PRINT(KERN_ERR, dev->name,
+				      "Could not start data stream reception\n");
+
+			eth1394_iso_shutdown(priv);
+
+			return -EAGAIN;
+		}
+		priv->bc_state = ETHER1394_BC_OPENED;
+	}
+
+	return 0;
+}
+
+static int eth1394_open (struct rtnet_device *dev)
+{
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	rtdm_lockctx_t context;
+	int ret;
+
+	/* Something bad happened, don't even try */
+	if (priv->bc_state == ETHER1394_BC_CLOSED)
+	{
+		return -EAGAIN;
+	}
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	ret = eth1394_init_bc(dev);
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	if (ret)
+		return ret;
+	rt_stack_connect(dev,&STACK_manager);
+	rtnetif_start_queue (dev);
+	return 0;
+}
+
+static int eth1394_stop (struct rtnet_device *dev)
+{
+	rtnetif_stop_queue (dev);
+	rt_stack_disconnect(dev);
+	return 0;
+}
+
+/* Return statistics to the caller */
+static struct net_device_stats *eth1394_stats (struct rtnet_device *dev)
+{
+	return &(((struct eth1394_priv *)dev->priv)->stats);
+}
+
+static inline void eth1394_register_limits(int nodeid, u16 maxpayload,
+					     unsigned char sspd,
+					     struct eth1394_priv *priv)
+{
+
+	if (nodeid < 0 || nodeid >= ALL_NODES) {
+		ETH1394_PRINT_G (KERN_ERR, "Cannot register invalid nodeid %d\n", nodeid);
+		return;
+	}
+
+	priv->maxpayload[nodeid]	= maxpayload;
+	priv->sspd[nodeid]		= sspd;
+	priv->maxpayload[ALL_NODES] = min(priv->maxpayload[ALL_NODES], maxpayload);
+	priv->sspd[ALL_NODES] = min(priv->sspd[ALL_NODES], sspd);
+
+	return;
+}
+
+
+static void eth1394_reset_priv (struct rtnet_device *dev, int set_mtu)
+{
+	rtdm_lockctx_t context;
+	int i;
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	struct hpsb_host *host = priv->host;
+	int phy_id = NODEID_TO_NODE(host->node_id);
+	u16 maxpayload = 1 << (host->csr.max_rec + 1);
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	/* Clear the speed/payload/offset tables */
+	memset (priv->maxpayload, 0, sizeof (priv->maxpayload));
+	memset (priv->sspd, 0, sizeof (priv->sspd));
+
+	priv->sspd[ALL_NODES] = ETH1394_SPEED_DEF;
+	priv->maxpayload[ALL_NODES] = eth1394_speedto_maxpayload[priv->sspd[ALL_NODES]];
+
+	priv->bc_state = ETHER1394_BC_CHECK;
+
+	/* Register our limits now */
+	eth1394_register_limits(phy_id, maxpayload,
+				    host->speed_map[(phy_id << 6) + phy_id], priv);
+
+	/* We'll use our maxpayload as the default mtu */
+	if (set_mtu) {
+		dev->mtu = min(ETH1394_DATA_LEN, (int)(priv->maxpayload[phy_id] -
+			       (sizeof(union eth1394_hdr) + ETHER1394_GASP_OVERHEAD)));
+
+		//~ /* Set our hardware address while we're at it */
+		//~ *(u64*)dev->dev_addr = guid;
+		//~ *(u64*)dev->broadcast = ~0x0ULL;
+		*(u16*)dev->dev_addr = LOCAL_BUS | phy_id; //we directly use FireWire address for our MAC address
+		*(u16*)dev->broadcast =  LOCAL_BUS | ALL_NODES;
+	}
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	for (i = 0; i < ALL_NODES; i++) {
+		struct list_head *lh, *n;
+
+		rtdm_lock_get_irqsave(&priv->pdg[i].lock, context);
+		if (!set_mtu) {
+			list_for_each_safe(lh, n, &priv->pdg[i].list) {
+				//~ purge_partial_datagram(lh);
+			}
+		}
+		INIT_LIST_HEAD(&(priv->pdg[i].list));
+		priv->pdg[i].sz = 0;
+		rtdm_lock_put_irqrestore(&priv->pdg[i].lock, context);
+	}
+
+}
+
+static void eth1394_add_host (struct hpsb_host *host)
+{
+	int i;
+	struct host_info *hi = NULL;
+
+	//*******RTnet********
+	struct rtnet_device *dev = NULL;
+	//
+	struct eth1394_priv *priv;
+
+	/* We should really have our own alloc_hpsbdev() function in
+	 * net_init.c instead of calling the one for ethernet then hijacking
+	 * it for ourselves.  That way we'd be a real networking device. */
+
+	//******RTnet******
+
+	dev = rt_alloc_etherdev(sizeof (struct eth1394_priv),
+				RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (dev == NULL) {
+		ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to allocate "
+				 "etherdevice for IEEE 1394 device\n");
+		goto free_dev;
+	}
+	rtdev_alloc_name(dev, "rteth%d");
+	memset(dev->priv, 0, sizeof(struct eth1394_priv));
+	rt_rtdev_connect(dev, &RTDEV_manager);
+
+	//dev->init = eth1394_init_dev;
+
+	dev->vers = RTDEV_VERS_2_0;
+	dev->open = eth1394_open;
+	dev->hard_start_xmit = eth1394_tx;
+	dev->stop = eth1394_stop;
+	dev->hard_header = eth1394_header;
+	dev->get_stats = eth1394_stats;
+	dev->flags		= IFF_BROADCAST | IFF_MULTICAST;
+	dev->addr_len		= ETH_ALEN;
+	dev->hard_header_len	= ETH_HLEN;
+	dev->type		= ARPHRD_IEEE1394;
+
+	//rtdev->do_ioctl = NULL;
+	priv = (struct eth1394_priv *)dev->priv;
+
+	rtdm_lock_init(&priv->lock);
+	priv->host = host;
+
+	for (i = 0; i < ALL_NODES; i++) {
+		rtdm_lock_init(&priv->pdg[i].lock);
+		INIT_LIST_HEAD(&priv->pdg[i].list);
+		priv->pdg[i].sz = 0;
+	}
+
+	hi = hpsb_create_hostinfo(&eth1394_highlevel, host, sizeof(*hi));
+	if (hi == NULL) {
+		ETH1394_PRINT_G (KERN_ERR, "Out of memory trying to create "
+				 "hostinfo for IEEE 1394 device\n");
+		goto free_hi;
+	}
+
+	if(rt_register_rtnetdev(dev))
+	{
+		ETH1394_PRINT (KERN_ERR, dev->name, "Error registering network driver\n");
+		goto free_hi;
+	}
+
+	ETH1394_PRINT (KERN_ERR, dev->name, "IEEE-1394 IPv4 over 1394 Ethernet\n");
+
+	hi->host = host;
+	hi->dev = dev;
+
+	eth1394_reset_priv (dev, 1);
+
+	/* Ignore validity in hopes that it will be set in the future.  It'll
+	 * be checked when the eth device is opened. */
+	priv->broadcast_channel = host->csr.broadcast_channel & 0x3f;
+
+	priv->iso = hpsb_iso_recv_init(host, (ETHER1394_GASP_BUFFERS * 2 *
+					      2048), // XXX workaround for limitation in rawiso
+					      //(1 << (host->csr.max_rec + 1))),
+				       ETHER1394_GASP_BUFFERS,
+				       priv->broadcast_channel,
+				       HPSB_ISO_DMA_PACKET_PER_BUFFER,
+				       1, eth1394_iso, 0, "eth1394_iso", IEEE1394_PRIORITY_HIGHEST);
+
+
+
+	if (priv->iso == NULL) {
+		ETH1394_PRINT(KERN_ERR, dev->name,
+			      "Could not allocate isochronous receive context "
+			      "for the broadcast channel\n");
+		priv->bc_state = ETHER1394_BC_ERROR;
+		goto unregister_dev;
+	} else {
+		if (hpsb_iso_recv_start(priv->iso, -1, (1 << 3), -1) < 0){
+			priv->bc_state = ETHER1394_BC_STOPPED;
+			goto unregister_dev;
+		}
+		else
+			priv->bc_state = ETHER1394_BC_RUNNING;
+	}
+
+	hpsb_register_addrspace(&eth1394_highlevel, host, &eth1394_ops, ETHER1394_REGION_ADDR,
+				 ETHER1394_REGION_ADDR_END);
+
+	return;
+
+unregister_dev:
+	rt_unregister_rtnetdev(dev);
+free_hi:
+	hpsb_destroy_hostinfo(&eth1394_highlevel, host);
+free_dev:
+	rtdev_free(dev);
+
+	return;
+}
+
+static void eth1394_remove_host (struct hpsb_host *host)
+{
+	struct host_info *hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
+
+	if (hi != NULL) {
+		struct eth1394_priv *priv = (struct eth1394_priv *)hi->dev->priv;
+
+		eth1394_iso_shutdown(priv);
+
+		if (hi->dev) {
+			rt_stack_disconnect(hi->dev);
+			rt_unregister_rtnetdev (hi->dev);
+			rtdev_free(hi->dev);
+		}
+	}
+	return;
+}
+
+static void eth1394_host_reset (struct hpsb_host *host)
+{
+	struct host_info *hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
+	struct rtnet_device *dev;
+
+	/* This can happen for hosts that we don't use */
+	if (hi == NULL)
+		return;
+
+	dev = hi->dev;
+
+	/* Reset our private host data, but not our mtu */
+	rtnetif_stop_queue (dev);
+	eth1394_reset_priv (dev, 1);
+	rtnetif_wake_queue (dev);
+}
+
+
+/******************************************
+ * HW Header net device functions
+ ******************************************/
+/* These functions have been adapted from net/ethernet/eth.c */
+
+
+/* Create a fake MAC header for an arbitrary protocol layer.
+ * saddr=NULL means use device source address
+ * daddr=NULL means leave destination address (eg unresolved arp). */
+static int eth1394_header(struct rtskb *skb, struct rtnet_device *dev,
+			    unsigned short type, void *daddr, void *saddr,
+			    unsigned len)
+{
+	struct ethhdr *eth = (struct ethhdr *)rtskb_push(skb,ETH_HLEN);
+	memset(eth, 0, sizeof(*eth));
+
+	eth->h_proto = htons(type);
+
+	if (saddr)
+		memcpy(eth->h_source, saddr, sizeof(nodeid_t));
+	else
+		memcpy(eth->h_source, dev->dev_addr, sizeof(nodeid_t));
+
+	if (dev->flags & (IFF_LOOPBACK|IFF_NOARP))
+	{
+		memset(eth->h_dest, 0, dev->addr_len);
+		return(dev->hard_header_len);
+	}
+
+	if (daddr)
+	{
+		memcpy(eth->h_dest,daddr, sizeof(nodeid_t));
+		return dev->hard_header_len;
+	}
+
+	return -dev->hard_header_len;
+
+}
+
+
+/******************************************
+ * Datagram reception code
+ ******************************************/
+
+/* Copied from net/ethernet/eth.c */
+static inline u16 eth1394_type_trans(struct rtskb *skb,
+				       struct rtnet_device *dev)
+{
+	struct ethhdr *eth;
+	unsigned char *rawp;
+
+	skb->mac.raw = skb->data;
+	rtskb_pull (skb, ETH_HLEN);
+	eth = (struct ethhdr*)skb->mac.raw;
+
+	if (*eth->h_dest & 1) {
+		if (memcmp(eth->h_dest, dev->broadcast, dev->addr_len)==0)
+			skb->pkt_type = PACKET_BROADCAST;
+	} else {
+		if (memcmp(eth->h_dest, dev->dev_addr, dev->addr_len))
+			skb->pkt_type = PACKET_OTHERHOST;
+	}
+
+	if (ntohs (eth->h_proto) >= 1536)
+		return eth->h_proto;
+
+	rawp = skb->data;
+
+	if (*(unsigned short *)rawp == 0xFFFF)
+		return htons (ETH_P_802_3);
+
+	return htons (ETH_P_802_2);
+}
+
+/* Parse an encapsulated IP1394 header into an ethernet frame packet.
+ * We also perform ARP translation here, if need be.  */
+static inline u16 eth1394_parse_encap(struct rtskb *skb,
+					struct rtnet_device *dev,
+					nodeid_t srcid, nodeid_t destid,
+					u16 ether_type)
+{
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	unsigned short ret = 0;
+
+	/* If this is an ARP packet, convert it. First, we want to make
+	 * use of some of the fields, since they tell us a little bit
+	 * about the sending machine.  */
+	if (ether_type == __constant_htons (ETH_P_ARP)) {
+		rtdm_lockctx_t context;
+		struct eth1394_arp *arp1394 =
+				(struct eth1394_arp*)((u8 *)skb->data);
+		struct arphdr *arp =
+				(struct arphdr *)((u8 *)skb->data);
+		unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+		u8 max_rec = min(priv->host->csr.max_rec,
+				 (u8)(arp1394->max_rec));
+		int sspd = arp1394->sspd;
+		u16 maxpayload;
+		/* Sanity check. MacOSX seems to be sending us 131 in this
+		 * field (atleast on my Panther G5). Not sure why. */
+		if (sspd > 5 || sspd < 0)
+			sspd = 0;
+
+		maxpayload = min(eth1394_speedto_maxpayload[sspd], (u16)(1 << (max_rec + 1)));
+
+
+
+		/* Update our speed/payload/fifo_offset table */
+		rtdm_lock_get_irqsave(&priv->lock, context);
+		eth1394_register_limits(NODEID_TO_NODE(srcid), maxpayload,
+					  arp1394->sspd,
+						priv);
+		rtdm_lock_put_irqrestore(&priv->lock, context);
+
+		/* Now that we're done with the 1394 specific stuff, we'll
+		 * need to alter some of the data.  Believe it or not, all
+		 * that needs to be done is sender_IP_address needs to be
+		 * moved, the destination hardware address get stuffed
+		 * in and the hardware address length set to 8.
+		 *
+		 * IMPORTANT: The code below overwrites 1394 specific data
+		 * needed above data so keep the call to
+		 * eth1394_register_limits() before munging the data for the
+		 * higher level IP stack. */
+
+		arp->ar_hln = ETH_ALEN;
+		arp_ptr += arp->ar_hln;		/* skip over sender unique id */
+		*(u32*)arp_ptr = arp1394->sip;	/* move sender IP addr */
+		arp_ptr += arp->ar_pln;		/* skip over sender IP addr */
+
+		if (arp->ar_op == 1)
+			/* just set ARP req target unique ID to 0 */
+			memset(arp_ptr, 0, ETH_ALEN);
+		else
+			memcpy(arp_ptr, dev->dev_addr, ETH_ALEN);
+	}
+
+	/* Now add the ethernet header. */
+	//no need to add ethernet header now, since we did not get rid of it on the sending side
+	if (dev->hard_header (skb, dev, __constant_ntohs (ether_type),
+			      &destid, &srcid, skb->len) >= 0)
+		ret = eth1394_type_trans(skb, dev);
+
+	return ret;
+}
+
+static inline int fragment_overlap(struct list_head *frag_list, int offset, int len)
+{
+	struct list_head *lh;
+	struct fragment_info *fi;
+
+	list_for_each(lh, frag_list) {
+		fi = list_entry(lh, struct fragment_info, list);
+
+		if ( ! ((offset > (fi->offset + fi->len - 1)) ||
+		       ((offset + len - 1) < fi->offset)))
+			return 1;
+	}
+	return 0;
+}
+
+static inline struct list_head *find_partial_datagram(struct list_head *pdgl, int dgl)
+{
+	struct list_head *lh;
+	struct partial_datagram *pd;
+
+	list_for_each(lh, pdgl) {
+		pd = list_entry(lh, struct partial_datagram, list);
+		if (pd->dgl == dgl)
+			return lh;
+	}
+	return NULL;
+}
+
+/* Assumes that new fragment does not overlap any existing fragments */
+static inline int new_fragment(struct list_head *frag_info, int offset, int len)
+{
+	struct list_head *lh;
+	struct fragment_info *fi, *fi2, *new;
+
+	list_for_each(lh, frag_info) {
+		fi = list_entry(lh, struct fragment_info, list);
+		if ((fi->offset + fi->len) == offset) {
+			/* The new fragment can be tacked on to the end */
+			fi->len += len;
+			/* Did the new fragment plug a hole? */
+			fi2 = list_entry(lh->next, struct fragment_info, list);
+			if ((fi->offset + fi->len) == fi2->offset) {
+				/* glue fragments together */
+				fi->len += fi2->len;
+				list_del(lh->next);
+				kfree(fi2);
+			}
+			return 0;
+		} else if ((offset + len) == fi->offset) {
+			/* The new fragment can be tacked on to the beginning */
+			fi->offset = offset;
+			fi->len += len;
+			/* Did the new fragment plug a hole? */
+			fi2 = list_entry(lh->prev, struct fragment_info, list);
+			if ((fi2->offset + fi2->len) == fi->offset) {
+				/* glue fragments together */
+				fi2->len += fi->len;
+				list_del(lh);
+				kfree(fi);
+			}
+			return 0;
+		} else if (offset > (fi->offset + fi->len)) {
+			break;
+		} else if ((offset + len) < fi->offset) {
+			lh = lh->prev;
+			break;
+		}
+	}
+
+	new = kmalloc(sizeof(struct fragment_info), GFP_ATOMIC);
+	if (!new)
+		return -ENOMEM;
+
+	new->offset = offset;
+	new->len = len;
+
+	list_add(&new->list, lh);
+
+	return 0;
+}
+
+static inline int new_partial_datagram(struct rtnet_device *dev,
+				       struct list_head *pdgl, int dgl,
+				       int dg_size, char *frag_buf,
+				       int frag_off, int frag_len)
+{
+	struct partial_datagram *new;
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+
+	new = kmalloc(sizeof(struct partial_datagram), GFP_ATOMIC);
+	if (!new)
+		return -ENOMEM;
+
+	INIT_LIST_HEAD(&new->frag_info);
+
+	if (new_fragment(&new->frag_info, frag_off, frag_len) < 0) {
+		kfree(new);
+		return -ENOMEM;
+	}
+
+	new->dgl = dgl;
+	new->dg_size = dg_size;
+
+	new->skb = rtnetdev_alloc_rtskb(dev, dg_size + dev->hard_header_len + 15);
+	if (!new->skb) {
+		struct fragment_info *fi = list_entry(new->frag_info.next,
+						      struct fragment_info,
+						      list);
+		kfree(fi);
+		kfree(new);
+		return -ENOMEM;
+	}
+
+	rtskb_reserve(new->skb, (dev->hard_header_len + 15) & ~15);
+	new->pbuf = rtskb_put(new->skb, dg_size);
+	memcpy(new->pbuf + frag_off, frag_buf, frag_len);
+
+	list_add(&new->list, pdgl);
+
+	return 0;
+}
+
+static inline int update_partial_datagram(struct list_head *pdgl, struct list_head *lh,
+					  char *frag_buf, int frag_off, int frag_len)
+{
+	struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
+
+	if (new_fragment(&pd->frag_info, frag_off, frag_len) < 0) {
+		return -ENOMEM;
+	}
+
+	memcpy(pd->pbuf + frag_off, frag_buf, frag_len);
+
+	/* Move list entry to beginnig of list so that oldest partial
+	 * datagrams percolate to the end of the list */
+	list_del(lh);
+	list_add(lh, pdgl);
+
+	return 0;
+}
+
+static inline void purge_partial_datagram(struct list_head *old)
+{
+	struct partial_datagram *pd = list_entry(old, struct partial_datagram, list);
+	struct list_head *lh, *n;
+
+	list_for_each_safe(lh, n, &pd->frag_info) {
+		struct fragment_info *fi = list_entry(lh, struct fragment_info, list);
+		list_del(lh);
+		kfree(fi);
+	}
+	list_del(old);
+	kfree_rtskb(pd->skb);
+	kfree(pd);
+}
+
+static inline int is_datagram_complete(struct list_head *lh, int dg_size)
+{
+	struct partial_datagram *pd = list_entry(lh, struct partial_datagram, list);
+	struct fragment_info *fi = list_entry(pd->frag_info.next,
+					      struct fragment_info, list);
+
+	return (fi->len == dg_size);
+}
+
+
+
+
+/* Packet reception. We convert the IP1394 encapsulation header to an
+ * ethernet header, and fill it with some of our other fields. This is
+ * an incoming packet from the 1394 bus.  */
+static int eth1394_data_handler(struct rtnet_device *dev, int srcid, int destid,
+				  char *buf, int len, nanosecs_abs_t time_stamp)
+{
+	struct rtskb *skb;
+	rtdm_lockctx_t context;
+	struct eth1394_priv *priv;
+	union eth1394_hdr *hdr = (union eth1394_hdr *)buf;
+	u16 ether_type = 0;  /* initialized to clear warning */
+	int hdr_len;
+
+	//~ nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	priv = (struct eth1394_priv *)dev->priv;
+
+	/* First, did we receive a fragmented or unfragmented datagram? */
+	hdr->words.word1 = ntohs(hdr->words.word1);
+
+	hdr_len = hdr_type_len[hdr->common.lf];
+
+	if (hdr->common.lf == ETH1394_HDR_LF_UF) {
+		DEBUGP("a single datagram has been received\n");
+		/* An unfragmented datagram has been received by the ieee1394
+		 * bus. Build an skbuff around it so we can pass it to the
+		 * high level network layer. */
+
+		//~ if(rtpkb_acquire((struct rtpkb*)packet, &priv->skb_pool)){
+			//~ HPSB_PRINT (KERN_ERR, "eth1394 rx: low on mem\n");
+			//~ priv->stats.rx_dropped++;
+			//~ return -1;
+		//~ }
+
+		skb = rtnetdev_alloc_rtskb(dev, len + dev->hard_header_len + 15);
+		if (!skb) {
+			ETH1394_PRINT_G(KERN_ERR, "eth1394 rx: low on mem\n");
+			priv->stats.rx_dropped++;
+			return -1;
+		}
+		//~ skb = (struct rtskb *)packet;//we can do this, because these two belong to the same common object, rtpkb.
+		//~ rtpkb_put(skb, len-hdr_len);
+		//~ skb->data = (u8 *)packet->data + hdr_len; //we jump over the 1394-specific fragment overhead
+		//~ rtskb_put(skb, );
+		rtskb_reserve(skb, (dev->hard_header_len + 15) & ~15);//we reserve the space to put in fake MAC address
+		memcpy(rtskb_put(skb, len - hdr_len), buf + hdr_len, len - hdr_len);
+		ether_type = hdr->uf.ether_type;
+	} else {
+		/* A datagram fragment has been received, now the fun begins. */
+		struct list_head *pdgl, *lh;
+		struct partial_datagram *pd;
+		int fg_off;
+		int fg_len = len - hdr_len;
+		int dg_size;
+		int dgl;
+		int retval;
+		int sid = NODEID_TO_NODE(srcid);
+		struct pdg_list *pdg = &(priv->pdg[sid]);
+
+		DEBUGP("a datagram fragment has been received\n");
+		hdr->words.word3 = ntohs(hdr->words.word3);
+		/* The 4th header word is reserved so no need to do ntohs() */
+
+		if (hdr->common.lf == ETH1394_HDR_LF_FF) {
+			//first fragment
+			ether_type = hdr->ff.ether_type;
+			dgl = hdr->ff.dgl;
+			dg_size = hdr->ff.dg_size + 1;
+			fg_off = 0;
+		} else {
+			hdr->words.word2 = ntohs(hdr->words.word2);
+			dgl = hdr->sf.dgl;
+			dg_size = hdr->sf.dg_size + 1;
+			fg_off = hdr->sf.fg_off;
+		}
+		rtdm_lock_get_irqsave(&pdg->lock, context);
+
+		pdgl = &(pdg->list);
+		lh = find_partial_datagram(pdgl, dgl);
+
+		if (lh == NULL) {
+			if (pdg->sz == max_partial_datagrams) {
+				/* remove the oldest */
+				purge_partial_datagram(pdgl->prev);
+				pdg->sz--;
+			}
+
+			retval = new_partial_datagram(dev, pdgl, dgl, dg_size,
+						      buf + hdr_len, fg_off,
+						      fg_len);
+			if (retval < 0) {
+				rtdm_lock_put_irqrestore(&pdg->lock, context);
+				goto bad_proto;
+			}
+			pdg->sz++;
+			lh = find_partial_datagram(pdgl, dgl);
+		} else {
+			struct partial_datagram *pd;
+
+			pd = list_entry(lh, struct partial_datagram, list);
+
+			if (fragment_overlap(&pd->frag_info, fg_off, fg_len)) {
+				/* Overlapping fragments, obliterate old
+				 * datagram and start new one. */
+				purge_partial_datagram(lh);
+				retval = new_partial_datagram(dev, pdgl, dgl,
+							      dg_size,
+							      buf + hdr_len,
+							      fg_off, fg_len);
+				if (retval < 0) {
+					pdg->sz--;
+					rtdm_lock_put_irqrestore(&pdg->lock, context);
+					goto bad_proto;
+				}
+			} else {
+				retval = update_partial_datagram(pdgl, lh,
+								 buf + hdr_len,
+								 fg_off, fg_len);
+				if (retval < 0) {
+					/* Couldn't save off fragment anyway
+					 * so might as well obliterate the
+					 * datagram now. */
+					purge_partial_datagram(lh);
+					pdg->sz--;
+					rtdm_lock_put_irqrestore(&pdg->lock, context);
+					goto bad_proto;
+				}
+			} /* fragment overlap */
+		} /* new datagram or add to existing one */
+
+		pd = list_entry(lh, struct partial_datagram, list);
+
+		if (hdr->common.lf == ETH1394_HDR_LF_FF) {
+			pd->ether_type = ether_type;
+		}
+
+		if (is_datagram_complete(lh, dg_size)) {
+			ether_type = pd->ether_type;
+			pdg->sz--;
+			//skb = skb_get(pd->skb);
+			skb = pd->skb;
+			purge_partial_datagram(lh);
+			rtdm_lock_put_irqrestore(&pdg->lock, context);
+		} else {
+			/* Datagram is not complete, we're done for the
+			 * moment. */
+			rtdm_lock_put_irqrestore(&pdg->lock, context);
+			return 0;
+		}
+	} /* unframgented datagram or fragmented one */
+
+	/* Write metadata, and then pass to the receive level */
+	skb->ip_summed = CHECKSUM_UNNECESSARY;	/* don't check it */
+
+	/* Parse the encapsulation header. This actually does the job of
+	 * converting to an ethernet frame header, aswell as arp
+	 * conversion if needed. ARP conversion is easier in this
+	 * direction, since we are using ethernet as our backend.  */
+	skb->protocol = eth1394_parse_encap(skb, dev, srcid, destid,
+					      ether_type);
+
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	if (!skb->protocol) {
+		DEBUG_PRINT("pointer to %s(%s)%d\n",__FILE__,__FUNCTION__,__LINE__);
+		priv->stats.rx_errors++;
+		priv->stats.rx_dropped++;
+		//dev_kfree_skb_any(skb);
+		kfree_rtskb(skb);
+		goto bad_proto;
+	}
+
+	skb->time_stamp = time_stamp;
+	/*if (netif_rx(skb) == NET_RX_DROP) {
+		priv->stats.rx_errors++;
+		priv->stats.rx_dropped++;
+		goto bad_proto;
+	}*/
+	rtnetif_rx(skb);//finally, we deliver the packet
+
+	/* Statistics */
+	priv->stats.rx_packets++;
+	priv->stats.rx_bytes += skb->len;
+	rt_mark_stack_mgr(dev);
+
+bad_proto:
+	if (rtnetif_queue_stopped(dev))
+		rtnetif_wake_queue(dev);
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	//dev->last_rx = jiffies;
+
+	return 0;
+}
+
+
+static int eth1394_write(struct hpsb_host *host, struct hpsb_packet *packet, unsigned int length)
+{
+	struct host_info *hi = hpsb_get_hostinfo(&eth1394_highlevel, host);
+	int ret;
+
+	if (hi == NULL) {
+		ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
+				host->driver->name);
+		return RCODE_ADDRESS_ERROR;
+	}
+
+	//we need to parse the packet now
+	ret = eth1394_data_handler(hi->dev, packet->header[1]>>16, //source id
+							 packet->header[0]>>16, //dest id
+							 (char *)packet->data, //data
+							packet->data_size, packet->time_stamp);
+	//we only get the request packet, serve it, but dont free it, since it does not belong to us!!!!
+
+	if(ret)
+		return RCODE_ADDRESS_ERROR;
+	else
+		return RCODE_COMPLETE;
+}
+
+
+/**
+ * callback function for broadcast channel
+ * called from hpsb_iso_wake( )
+ */
+static void eth1394_iso(struct hpsb_iso *iso, void *arg)
+{
+	quadlet_t *data;
+	char *buf;
+	struct rtnet_device *dev;
+	unsigned int len;
+	u32 specifier_id;
+	u16 source_id;
+	int i;
+	int nready;
+
+	struct host_info *hi = hpsb_get_hostinfo(&eth1394_highlevel, iso->host);
+	if (hi == NULL) {
+		ETH1394_PRINT_G(KERN_ERR, "Could not find net device for host %s\n",
+				iso->host->driver->name);
+		return;
+	}
+
+	dev = hi->dev;
+
+	nready = hpsb_iso_n_ready(iso);
+	for (i = 0; i < nready; i++) {
+		struct hpsb_iso_packet_info *info =
+			&iso->infos[(iso->first_packet + i) % iso->buf_packets];
+		data = (quadlet_t*) (iso->data_buf.kvirt + info->offset);
+
+		/* skip over GASP header */
+		buf = (char *)data + 8;
+		len = info->len - 8;
+
+		specifier_id = (((be32_to_cpu(data[0]) & 0xffff) << 8) |
+				((be32_to_cpu(data[1]) & 0xff000000) >> 24));
+		source_id = be32_to_cpu(data[0]) >> 16;
+
+		if (info->channel != (iso->host->csr.broadcast_channel & 0x3f) ||
+				specifier_id != ETHER1394_GASP_SPECIFIER_ID) {
+			/* This packet is not for us */
+			continue;
+		}
+		eth1394_data_handler(dev, source_id, LOCAL_BUS | ALL_NODES,
+				       buf, len, rtdm_clock_read());
+	}
+
+	hpsb_iso_recv_release_packets(iso, i);
+
+	//dev->last_rx = jiffies;
+}
+
+/******************************************
+ * Datagram transmission code
+ ******************************************/
+
+/* Convert a standard ARP packet to 1394 ARP. The first 8 bytes (the entire
+ * arphdr) is the same format as the ip1394 header, so they overlap.  The rest
+ * needs to be munged a bit.  The remainder of the arphdr is formatted based
+ * on hwaddr len and ipaddr len.  We know what they'll be, so it's easy to
+ * judge.
+ *
+ * Now that the EUI is used for the hardware address all we need to do to make
+ * this work for 1394 is to insert 2 quadlets that contain max_rec size,
+ * speed, and unicast FIFO address information between the sender_unique_id
+ * and the IP addresses.
+ */
+
+//we dont need the EUI id now. fifo_hi should contain the bus id and node id.
+//fifo_lo should contain the highest 32 bits of in-node address.
+static inline void eth1394_arp_to_1394arp(struct rtskb *skb,
+					    struct rtnet_device *dev)
+{
+	struct eth1394_priv *priv = (struct eth1394_priv *)(dev->priv);
+	u16 phy_id = NODEID_TO_NODE(priv->host->node_id);
+
+	struct arphdr *arp = (struct arphdr *)skb->data;
+	unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+	struct eth1394_arp *arp1394 = (struct eth1394_arp *)skb->data;
+
+	arp1394->hw_addr_len	= 6;
+	arp1394->sip		= *(u32*)(arp_ptr + ETH_ALEN);
+	arp1394->max_rec	= priv->host->csr.max_rec;
+	arp1394->sspd		= priv->sspd[phy_id];
+
+	return;
+}
+
+/* We need to encapsulate the standard header with our own. We use the
+ * ethernet header's proto for our own. */
+static inline unsigned int eth1394_encapsulate_prep(unsigned int max_payload,
+						      int proto,
+						      union eth1394_hdr *hdr,
+						      u16 dg_size, u16 dgl)
+{
+	unsigned int adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_UF];
+
+	/* Does it all fit in one packet? */
+	if (dg_size <= adj_max_payload) {
+		hdr->uf.lf = ETH1394_HDR_LF_UF;
+		hdr->uf.ether_type = proto;
+	} else {
+		hdr->ff.lf = ETH1394_HDR_LF_FF;
+		hdr->ff.ether_type = proto;
+		hdr->ff.dg_size = dg_size - 1;
+		hdr->ff.dgl = dgl;
+		adj_max_payload = max_payload - hdr_type_len[ETH1394_HDR_LF_FF];
+	}
+	return((dg_size + (adj_max_payload - 1)) / adj_max_payload);
+}
+
+static inline unsigned int eth1394_encapsulate(struct rtskb *skb,
+						 unsigned int max_payload,
+						 union eth1394_hdr *hdr)
+{
+	union eth1394_hdr *bufhdr;
+	int ftype = hdr->common.lf;
+	int hdrsz = hdr_type_len[ftype];
+	unsigned int adj_max_payload = max_payload - hdrsz;
+
+	switch(ftype) {
+	case ETH1394_HDR_LF_UF:
+		bufhdr = (union eth1394_hdr *)rtskb_push(skb, hdrsz);
+		bufhdr->words.word1 = htons(hdr->words.word1);
+		bufhdr->words.word2 = hdr->words.word2;
+		break;
+
+	case ETH1394_HDR_LF_FF:
+		bufhdr = (union eth1394_hdr *)rtskb_push(skb, hdrsz);
+		bufhdr->words.word1 = htons(hdr->words.word1);
+		bufhdr->words.word2 = hdr->words.word2;
+		bufhdr->words.word3 = htons(hdr->words.word3);
+		bufhdr->words.word4 = 0;
+
+		/* Set frag type here for future interior fragments */
+		hdr->common.lf = ETH1394_HDR_LF_IF;
+		hdr->sf.fg_off = 0;
+		break;
+
+	default:
+		hdr->sf.fg_off += adj_max_payload;
+		bufhdr = (union eth1394_hdr *)rtskb_pull(skb, adj_max_payload);
+		if (max_payload >= skb->len)
+			hdr->common.lf = ETH1394_HDR_LF_LF;
+		bufhdr->words.word1 = htons(hdr->words.word1);
+		bufhdr->words.word2 = htons(hdr->words.word2);
+		bufhdr->words.word3 = htons(hdr->words.word3);
+		bufhdr->words.word4 = 0;
+	}
+
+	return min(max_payload, skb->len);
+}
+
+//just allocate a hpsb_packet header, without payload.
+static inline struct hpsb_packet *eth1394_alloc_common_packet(struct hpsb_host *host, unsigned int priority)
+{
+	struct hpsb_packet *p;
+
+	p = hpsb_alloc_packet(0,&host->pool, priority);
+	if (p) {
+		p->host = host;
+		p->data = NULL;
+		p->generation = get_hpsb_generation(host);
+		p->type = hpsb_async;
+	}
+	return p;
+}
+
+//prepare an asynchronous write packet
+static inline int eth1394_prep_write_packet(struct hpsb_packet *p,
+					      struct hpsb_host *host,
+					      nodeid_t node, u64 addr,
+					      void * data, int tx_len)
+{
+	p->node_id = node;
+
+	p->tcode = TCODE_WRITEB;
+
+	p->header[1] = (host->node_id << 16) | (addr >> 32);
+	p->header[2] = addr & 0xffffffff;
+
+	p->header_size = 16;
+	p->expect_response = 1;
+
+	if (hpsb_get_tlabel(p)) {
+		ETH1394_PRINT_G(KERN_ERR, "No more tlabels left while sending "
+				"to node " NODE_BUS_FMT "\n", NODE_BUS_ARGS(host, node));
+		return -1;
+	}
+	p->header[0] = (p->node_id << 16) | (p->tlabel << 10)
+		| (1 << 8) | (TCODE_WRITEB << 4);
+
+	p->header[3] = tx_len << 16;
+	p->data_size = tx_len + (tx_len % 4 ? 4 - (tx_len % 4) : 0);
+	p->data = (quadlet_t*)data;
+
+	return 0;
+}
+
+//prepare gasp packet from skb.
+static inline void eth1394_prep_gasp_packet(struct hpsb_packet *p,
+					      struct eth1394_priv *priv,
+					      struct rtskb *skb, int length)
+{
+	p->header_size = 4;
+	p->tcode = TCODE_STREAM_DATA;
+
+	p->header[0] = (length << 16) | (3 << 14)
+		| ((priv->broadcast_channel) << 8)
+		| (TCODE_STREAM_DATA << 4);
+	p->data_size = length;
+	p->data = ((quadlet_t*)skb->data) - 2; //we need 64bits for extra spec_id and gasp version.
+	p->data[0] = cpu_to_be32((priv->host->node_id << 16) |
+				      ETHER1394_GASP_SPECIFIER_ID_HI);
+	p->data[1] = cpu_to_be32((ETHER1394_GASP_SPECIFIER_ID_LO << 24) |
+				      ETHER1394_GASP_VERSION);
+
+	/* Setting the node id to ALL_NODES (not LOCAL_BUS | ALL_NODES)
+	 * prevents hpsb_send_packet() from setting the speed to an arbitrary
+	 * value based on packet->node_id if packet->node_id is not set. */
+	p->node_id = ALL_NODES;
+	p->speed_code = priv->sspd[ALL_NODES];
+}
+
+
+static inline void eth1394_free_packet(struct hpsb_packet *packet)
+{
+	if (packet->tcode != TCODE_STREAM_DATA)
+		hpsb_free_tlabel(packet);
+	hpsb_free_packet(packet);
+}
+
+static void eth1394_complete_cb(struct hpsb_packet *packet, void *__ptask);
+
+
+/**
+ * this function does the real calling of hpsb_send_packet
+ *But before that, it also constructs the FireWire packet according to
+ * ptask
+ */
+static int eth1394_send_packet(struct packet_task *ptask, unsigned int tx_len, nanosecs_abs_t *xmit_stamp)
+{
+	struct eth1394_priv *priv = ptask->priv;
+	struct hpsb_packet *packet = NULL;
+	int ret;
+
+	packet = eth1394_alloc_common_packet(priv->host, ptask->priority);
+	if (!packet) {
+		ret = -ENOMEM;
+		return ret;
+	}
+	if(xmit_stamp)
+		packet->xmit_stamp = xmit_stamp;
+
+	if (ptask->tx_type == ETH1394_GASP) {
+		int length = tx_len + (2 * sizeof(quadlet_t)); //for the extra gasp overhead
+
+		eth1394_prep_gasp_packet(packet, priv, ptask->skb, length);
+	} else if (eth1394_prep_write_packet(packet, priv->host,
+					       ptask->dest_node,
+					       ptask->addr, ptask->skb->data,
+					       tx_len)) {
+		hpsb_free_packet(packet);
+		return -1;
+	}
+
+	ptask->packet = packet;
+	hpsb_set_packet_complete_task(ptask->packet, eth1394_complete_cb,
+				      ptask);
+
+	ret = hpsb_send_packet(packet);
+	if (ret != 0) {
+		eth1394_free_packet(packet);
+	}
+
+	return ret;
+}
+
+
+/* Task function to be run when a datagram transmission is completed */
+static inline void eth1394_dg_complete(struct packet_task *ptask, int fail)
+{
+	struct rtskb *skb = ptask->skb;
+	struct rtnet_device *dev = skb->rtdev;
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	rtdm_lockctx_t context;
+
+	/* Statistics */
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	if (fail) {
+		priv->stats.tx_dropped++;
+		priv->stats.tx_errors++;
+	} else {
+		priv->stats.tx_bytes += skb->len;
+		priv->stats.tx_packets++;
+	}
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	//dev_kfree_skb_any(skb);
+	kfree_rtskb(skb);
+	//~ kmem_cache_free(packet_task_cache, ptask);
+	//this means this ptask structure has been freed
+	ptask->packet=NULL;
+}
+
+
+/* Callback for when a packet has been sent and the status of that packet is
+ * known */
+static void eth1394_complete_cb(struct hpsb_packet *packet, void *__ptask)
+{
+	struct packet_task *ptask = (struct packet_task *)__ptask;
+	int fail = 0;
+
+	if (packet->tcode != TCODE_STREAM_DATA)
+		fail = hpsb_packet_success(packet);
+
+	//we have no rights to free packet, since it belongs to RT-FireWire kernel.
+	//~ eth1394_free_packet(packet);
+
+	ptask->outstanding_pkts--;
+	if (ptask->outstanding_pkts > 0 && !fail)
+	{
+		int tx_len;
+
+		/* Add the encapsulation header to the fragment */
+		tx_len = eth1394_encapsulate(ptask->skb, ptask->max_payload,
+					       &ptask->hdr);
+		if (eth1394_send_packet(ptask, tx_len, NULL))
+			eth1394_dg_complete(ptask, 1);
+	} else {
+		eth1394_dg_complete(ptask, fail);
+	}
+}
+
+
+
+/**
+ *Transmit a packet (called by kernel)
+ * this is the dev->hard_start_transmit
+ */
+static int eth1394_tx (struct rtskb *skb, struct rtnet_device *dev)
+{
+
+	struct ethhdr *eth;
+	struct eth1394_priv *priv = (struct eth1394_priv *)dev->priv;
+	int proto;
+	rtdm_lockctx_t context;
+	nodeid_t dest_node;
+	eth1394_tx_type tx_type;
+	int ret = 0;
+	unsigned int tx_len;
+	unsigned int max_payload;
+	u16 dg_size;
+	u16 dgl;
+
+	//we try to find the available ptask struct, if failed, we can not send packet
+	struct packet_task *ptask = NULL;
+	int i;
+	for(i=0;i<20;i++){
+		if(priv->ptask_list[i].packet == NULL){
+			ptask = &priv->ptask_list[i];
+			break;
+		}
+	}
+	if(ptask == NULL)
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	if (priv->bc_state == ETHER1394_BC_CLOSED) {
+		ETH1394_PRINT(KERN_ERR, dev->name,
+			      "Cannot send packet, no broadcast channel available.\n");
+		ret = -EAGAIN;
+		rtdm_lock_put_irqrestore(&priv->lock, context);
+		goto fail;
+	}
+	if ((ret = eth1394_init_bc(dev))) {
+		rtdm_lock_put_irqrestore(&priv->lock, context);
+		goto fail;
+	}
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+	//if ((skb = skb_share_check (skb, kmflags)) == NULL) {
+	//	ret = -ENOMEM;
+	//	goto fail;
+	//}
+
+	/* Get rid of the fake eth1394 header, but save a pointer */
+	eth = (struct ethhdr*)skb->data;
+	rtskb_pull(skb, ETH_HLEN);
+	//dont get rid of the fake eth1394 header, since we need it on the receiving side
+	//eth = (struct ethhdr*)skb->data;
+
+	//~ //find the node id via our fake MAC address
+	//~ ne = hpsb_guid_get_entry(be64_to_cpu(*(u64*)eth->h_dest));
+	//~ if (!ne)
+		//~ dest_node = LOCAL_BUS | ALL_NODES;
+	//~ else
+		//~ dest_node = ne->nodeid;
+	//now it is much easier
+	dest_node = *(u16*)eth->h_dest;
+	if(dest_node != 0xffff)
+	DEBUGP("%s: dest_node is %x\n", __FUNCTION__, dest_node);
+
+	proto = eth->h_proto;
+
+	/* If this is an ARP packet, convert it */
+	if (proto == __constant_htons (ETH_P_ARP))
+		eth1394_arp_to_1394arp (skb, dev);
+
+	max_payload = priv->maxpayload[NODEID_TO_NODE(dest_node)];
+	DEBUGP("%s: max_payload is %d\n", __FUNCTION__, max_payload);
+
+	/* This check should be unnecessary, but we'll keep it for safety for
+	 * a while longer. */
+	if (max_payload < 512) {
+		DEBUGP("max_payload too small: %d   (setting to 512)\n",
+			      max_payload);
+		max_payload = 512;
+	}
+
+	/* Set the transmission type for the packet.  ARP packets and IP
+	 * broadcast packets are sent via GASP. */
+	if (memcmp(eth->h_dest, dev->broadcast, sizeof(nodeid_t)) == 0 ||
+	    proto == __constant_htons(ETH_P_ARP) ||
+	    (proto == __constant_htons(ETH_P_IP) &&
+	     IN_MULTICAST(__constant_ntohl(skb->nh.iph->daddr)))) {
+		tx_type = ETH1394_GASP;
+		max_payload -= ETHER1394_GASP_OVERHEAD; //we have extra overhead for gasp packet
+	} else {
+		tx_type = ETH1394_WRREQ;
+	}
+
+	dg_size = skb->len;
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	dgl = priv->dgl[NODEID_TO_NODE(dest_node)];
+	if (max_payload < dg_size + hdr_type_len[ETH1394_HDR_LF_UF])
+		priv->dgl[NODEID_TO_NODE(dest_node)]++;
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	ptask->hdr.words.word1 = 0;
+	ptask->hdr.words.word2 = 0;
+	ptask->hdr.words.word3 = 0;
+	ptask->hdr.words.word4 = 0;
+	ptask->skb = skb;
+	ptask->priv = priv;
+	ptask->tx_type = tx_type;
+
+	if (tx_type != ETH1394_GASP) {
+		u64 addr;
+
+		/* This test is just temporary until ConfigROM support has
+		 * been added to eth1394.  Until then, we need an ARP packet
+		 * after a bus reset from the current destination node so that
+		 * we can get FIFO information. */
+		//~ if (priv->fifo[NODEID_TO_NODE(dest_node)] == 0ULL) {
+			//~ ret = -EAGAIN;
+			//~ goto fail;
+		//~ }
+
+		//~ rtos_spin_lock_irqsave(&priv->lock, flags);
+		//~ addr = priv->fifo[NODEID_TO_NODE(dest_node)];
+		addr =  ETHER1394_REGION_ADDR;
+		//~ rtos_spin_unlock_irqrestore(&priv->lock, flags);
+
+		ptask->addr = addr;
+		ptask->dest_node = dest_node;
+	}
+
+	ptask->tx_type = tx_type;
+	ptask->max_payload = max_payload;
+	ptask->outstanding_pkts = eth1394_encapsulate_prep(max_payload, proto,
+							     &ptask->hdr, dg_size,
+							     dgl);
+
+	/* Add the encapsulation header to the fragment */
+	tx_len = eth1394_encapsulate(skb, max_payload, &ptask->hdr);
+	//dev->trans_start = jiffies;
+	//~ if(skb->xmit_stamp)
+		//~ *skb->xmit_stamp = cpu_to_be64(rtos_get_time() + *skb->xmit_stamp);
+
+
+	if (eth1394_send_packet(ptask, tx_len, skb->xmit_stamp))
+		goto fail;
+
+	rtnetif_wake_queue(dev);
+	return 0;
+fail:
+	if (ptask!=NULL){
+		//~ kmem_cache_free(packet_task_cache, ptask);
+		ptask->packet=NULL;
+		ptask=NULL;
+	}
+
+	if (skb != NULL)
+		dev_kfree_rtskb(skb);
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+	priv->stats.tx_dropped++;
+	priv->stats.tx_errors++;
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	if (rtnetif_queue_stopped(dev))
+		rtnetif_wake_queue(dev);
+
+	return 0;  /* returning non-zero causes serious problems */
+}
+
+static int eth1394_init(void)
+{
+	hpsb_register_highlevel(&eth1394_highlevel);
+
+	return 0;
+}
+
+static void eth1394_exit(void)
+{
+	hpsb_unregister_highlevel(&eth1394_highlevel);
+}
+
+module_init(eth1394_init);
+module_exit(eth1394_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c
new file mode 100644
index 0000000..94e0c93
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/3c59x.c
@@ -0,0 +1,2752 @@
+#warning  *********************************************************************
+#warning  This driver is probably not real-time safe! Under certain conditions
+#warning  it can cause interrupt locks of up to 1 second (issue_and_wait). We
+#warning  need a rewrite of critical parts, but we are lacking the knowledge
+#warning  about the hardware details (e.g. how long does a normal delay take =>
+#warning  apply this value and throw an error message on timeouts).
+#warning  *********************************************************************
+
+/* EtherLinkXL.c: A 3Com EtherLink PCI III/XL ethernet driver for linux / RTnet. */
+/*
+  RTnet porting 2002 by Mathias Koehrer (mathias_koehrer@yahoo.de)
+  -- Support only for PCI boards, EISA stuff ignored...
+
+  Originally written 1996-1999 by Donald Becker.
+
+  This software may be used and distributed according to the terms
+  of the GNU General Public License, incorporated herein by reference.
+
+  This driver is for the 3Com "Vortex" and "Boomerang" series ethercards.
+  Members of the series include Fast EtherLink 3c590/3c592/3c595/3c597
+  and the EtherLink XL 3c900 and 3c905 cards.
+
+  Problem reports and questions should be directed to
+  vortex@scyld.com
+
+  The author may be reached as becker@scyld.com, or C/O
+  Scyld Computing Corporation
+  410 Severn Ave., Suite 210
+  Annapolis MD 21403
+
+  Linux Kernel Additions:
+
+  0.99H+lk0.9 - David S. Miller - softnet, PCI DMA updates
+  0.99H+lk1.0 - Jeff Garzik <jgarzik@mandrakesoft.com>
+  Remove compatibility defines for kernel versions < 2.2.x.
+  Update for new 2.3.x module interface
+  LK1.1.2 (March 19, 2000)
+  * New PCI interface (jgarzik)
+
+  LK1.1.3 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
+  - Merged with 3c575_cb.c
+  - Don't set RxComplete in boomerang interrupt enable reg
+  - spinlock in vortex_timer to protect mdio functions
+  - disable local interrupts around call to vortex_interrupt in
+  vortex_tx_timeout() (So vortex_interrupt can use spin_lock())
+  - Select window 3 in vortex_timer()'s write to Wn3_MAC_Ctrl
+  - In vortex_start_xmit(), move the lock to _after_ we've altered
+  vp->cur_tx and vp->tx_full.  This defeats the race between
+  vortex_start_xmit() and vortex_interrupt which was identified
+  by Bogdan Costescu.
+  - Merged back support for six new cards from various sources
+  - Set vortex_have_pci if pci_module_init returns zero (fixes cardbus
+  insertion oops)
+  - Tell it that 3c905C has NWAY for 100bT autoneg
+  - Fix handling of SetStatusEnd in 'Too much work..' code, as
+  per 2.3.99's 3c575_cb (Dave Hinds).
+  - Split ISR into two for vortex & boomerang
+  - Fix MOD_INC/DEC races
+  - Handle resource allocation failures.
+  - Fix 3CCFE575CT LED polarity
+  - Make tx_interrupt_mitigation the default
+
+  LK1.1.4 25 April 2000, Andrew Morton <andrewm@uow.edu.au>
+  - Add extra TxReset to vortex_up() to fix 575_cb hotplug initialisation probs.
+  - Put vortex_info_tbl into __devinitdata
+  - In the vortex_error StatsFull HACK, disable stats in vp->intr_enable as well
+  as in the hardware.
+  - Increased the loop counter in issue_and_wait from 2,000 to 4,000.
+
+  LK1.1.5 28 April 2000, andrewm
+  - Added powerpc defines (John Daniel <jdaniel@etresoft.com> said these work...)
+  - Some extra diagnostics
+  - In vortex_error(), reset the Tx on maxCollisions.  Otherwise most
+  chips usually get a Tx timeout.
+  - Added extra_reset module parm
+  - Replaced some inline timer manip with mod_timer
+  (Franois romieu <Francois.Romieu@nic.fr>)
+  - In vortex_up(), don't make Wn3_config initialisation dependent upon has_nway
+  (this came across from 3c575_cb).
+
+  LK1.1.6 06 Jun 2000, andrewm
+  - Backed out the PPC defines.
+  - Use del_timer_sync(), mod_timer().
+  - Fix wrapped ulong comparison in boomerang_rx()
+  - Add IS_TORNADO, use it to suppress 3c905C checksum error msg
+  (Donald Becker, I Lee Hetherington <ilh@sls.lcs.mit.edu>)
+  - Replace union wn3_config with BFINS/BFEXT manipulation for
+  sparc64 (Pete Zaitcev, Peter Jones)
+  - In vortex_error, do_tx_reset and vortex_tx_timeout(Vortex):
+  do a netif_wake_queue() to better recover from errors. (Anders Pedersen,
+  Donald Becker)
+  - Print a warning on out-of-memory (rate limited to 1 per 10 secs)
+  - Added two more Cardbus 575 NICs: 5b57 and 6564 (Paul Wagland)
+
+  LK1.1.7 2 Jul 2000 andrewm
+  - Better handling of shared IRQs
+  - Reset the transmitter on a Tx reclaim error
+  - Fixed crash under OOM during vortex_open() (Mark Hemment)
+  - Fix Rx cessation problem during OOM (help from Mark Hemment)
+  - The spinlocks around the mdio access were blocking interrupts for 300uS.
+  Fix all this to use spin_lock_bh() within mdio_read/write
+  - Only write to TxFreeThreshold if it's a boomerang - other NICs don't
+  have one.
+  - Added 802.3x MAC-layer flow control support
+
+  LK1.1.8 13 Aug 2000 andrewm
+  - Ignore request_region() return value - already reserved if Cardbus.
+  - Merged some additional Cardbus flags from Don's 0.99Qk
+  - Some fixes for 3c556 (Fred Maciel)
+  - Fix for EISA initialisation (Jan Rekorajski)
+  - Renamed MII_XCVR_PWR and EEPROM_230 to align with 3c575_cb and D. Becker's drivers
+  - Fixed MII_XCVR_PWR for 3CCFE575CT
+  - Added INVERT_LED_PWR, used it.
+  - Backed out the extra_reset stuff
+
+  LK1.1.9 12 Sep 2000 andrewm
+  - Backed out the tx_reset_resume flags.  It was a no-op.
+  - In vortex_error, don't reset the Tx on txReclaim errors
+  - In vortex_error, don't reset the Tx on maxCollisions errors.
+  Hence backed out all the DownListPtr logic here.
+  - In vortex_error, give Tornado cards a partial TxReset on
+  maxCollisions (David Hinds).	Defined MAX_COLLISION_RESET for this.
+  - Redid some driver flags and device names based on pcmcia_cs-3.1.20.
+  - Fixed a bug where, if vp->tx_full is set when the interface
+  is downed, it remains set when the interface is upped.  Bad
+  things happen.
+
+  LK1.1.10 17 Sep 2000 andrewm
+  - Added EEPROM_8BIT for 3c555 (Fred Maciel)
+  - Added experimental support for the 3c556B Laptop Hurricane (Louis Gerbarg)
+  - Add HAS_NWAY to "3c900 Cyclone 10Mbps TPO"
+
+  LK1.1.11 13 Nov 2000 andrewm
+  - Dump MOD_INC/DEC_USE_COUNT, use SET_MODULE_OWNER
+
+  LK1.1.12 1 Jan 2001 andrewm (2.4.0-pre1)
+  - Call pci_enable_device before we request our IRQ (Tobias Ringstrom)
+  - Add 3c590 PCI latency timer hack to vortex_probe1 (from 0.99Ra)
+  - Added extended issue_and_wait for the 3c905CX.
+  - Look for an MII on PHY index 24 first (3c905CX oddity).
+  - Add HAS_NWAY to 3cSOHO100-TX (Brett Frankenberger)
+  - Don't free skbs we don't own on oom path in vortex_open().
+
+  LK1.1.13 27 Jan 2001
+  - Added explicit `medialock' flag so we can truly
+  lock the media type down with `options'.
+  - "check ioremap return and some tidbits" (Arnaldo Carvalho de Melo <acme@conectiva.com.br>)
+  - Added and used EEPROM_NORESET for 3c556B PM resumes.
+  - Fixed leakage of vp->rx_ring.
+  - Break out separate HAS_HWCKSM device capability flag.
+  - Kill vp->tx_full (ANK)
+  - Merge zerocopy fragment handling (ANK?)
+
+  LK1.1.14 15 Feb 2001
+  - Enable WOL.  Can be turned on with `enable_wol' module option.
+  - EISA and PCI initialisation fixes (jgarzik, Manfred Spraul)
+  - If a device's internalconfig register reports it has NWAY,
+  use it, even if autoselect is enabled.
+
+  LK1.1.15 6 June 2001 akpm
+  - Prevent double counting of received bytes (Lars Christensen)
+  - Add ethtool support (jgarzik)
+  - Add module parm descriptions (Andrzej M. Krzysztofowicz)
+  - Implemented alloc_etherdev() API
+  - Special-case the 'Tx error 82' message.
+
+  LK1.1.16 18 July 2001 akpm
+  - Make NETIF_F_SG dependent upon nr_free_highpages(), not on CONFIG_HIGHMEM
+  - Lessen verbosity of bootup messages
+  - Fix WOL - use new PM API functions.
+  - Use netif_running() instead of vp->open in suspend/resume.
+  - Don't reset the interface logic on open/close/rmmod.  It upsets
+  autonegotiation, and hence DHCP (from 0.99T).
+  - Back out EEPROM_NORESET flag because of the above (we do it for all
+  NICs).
+  - Correct 3c982 identification string
+  - Rename wait_for_completion() to issue_and_wait() to avoid completion.h
+  clash.
+
+  - See http://www.uow.edu.au/~andrewm/linux/#3c59x-2.3 for more details.
+  - Also see Documentation/networking/vortex.txt
+*/
+
+/*
+ * FIXME: This driver _could_ support MTU changing, but doesn't.  See Don's hamachi.c implementation
+ * as well as other drivers
+ *
+ * NOTE: If you make 'vortex_debug' a constant (#define vortex_debug 0) the driver shrinks by 2k
+ * due to dead code elimination.  There will be some performance benefits from this due to
+ * elimination of all the tests and reduced cache footprint.
+ */
+
+
+#define DRV_NAME	"3c59x"
+#define DRV_VERSION	"LK1.1.16"
+#define DRV_RELDATE	"19 July 2001"
+
+
+
+/* A few values that may be tweaked. */
+/* Keep the ring sizes a power of two for efficiency. */
+#define TX_RING_SIZE	16
+#define RX_RING_SIZE	8 /*** RTnet ***/
+#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
+
+/* "Knobs" that adjust features and parameters. */
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1512 effectively disables this feature. */
+/*** RTnet ***/
+/*** RTnet ***/
+/* Allow setting MTU to a larger size, bypassing the normal ethernet setup. */
+static const int mtu = 1500;
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 32;
+/* Tx timeout interval (millisecs) */
+// *** RTnet ***
+//static int watchdog = 5000;
+// *** RTnet ***
+
+/* Allow aggregation of Tx interrupts.	Saves CPU load at the cost
+ * of possible Tx stalls if the system is blocking interrupts
+ * somewhere else.  Undefine this to disable.
+ */
+#define tx_interrupt_mitigation 1
+
+/* Put out somewhat more debugging messages. (0: no msg, 1 minimal .. 6). */
+#define vortex_debug debug
+#ifdef VORTEX_DEBUG
+static int vortex_debug = VORTEX_DEBUG;
+#else
+static int vortex_debug = 1;
+#endif
+
+#ifndef __OPTIMIZE__
+#error You must compile this file with the correct options!
+#error See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/in.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/mii.h>
+#include <linux/init.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ethtool.h>
+#include <linux/highmem.h>
+#include <linux/uaccess.h>
+#include <asm/irq.h>			/* For NR_IRQS only. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+
+// *** RTnet ***
+#include <rtnet_port.h>
+
+static int cards = INT_MAX;
+module_param(cards, int, 0444);
+MODULE_PARM_DESC(cards, "number of cards to be supported");
+// *** RTnet ***
+
+/* Kernel compatibility defines, some common to David Hinds' PCMCIA package.
+   This is only in the support-all-kernels source code. */
+
+#define RUN_AT(x) (jiffies + (x))
+
+#include <linux/delay.h>
+
+// *** RTnet - no power management ***
+#undef pci_set_power_state
+#define pci_set_power_state null_set_power_state
+static inline int null_set_power_state(struct pci_dev *dev, int state)
+{
+	return 0;
+}
+// *** RTnet ***
+
+
+static char version[] =
+	DRV_NAME " for RTnet : Donald Becker and others. www.scyld.com/network/vortex.html\n";
+
+MODULE_AUTHOR("Donald Becker <becker@scyld.com>");
+MODULE_DESCRIPTION("3Com 3c59x/3c9xx ethernet driver for RTnet "
+		DRV_VERSION " " DRV_RELDATE);
+MODULE_LICENSE("GPL");
+
+/* Operational parameter that usually are not changed. */
+
+/* The Vortex size is twice that of the original EtherLinkIII series: the
+   runtime register window, window 1, is now always mapped in.
+   The Boomerang size is twice as large as the Vortex -- it has additional
+   bus master control registers. */
+#define VORTEX_TOTAL_SIZE 0x20
+#define BOOMERANG_TOTAL_SIZE 0x40
+
+/* Set iff a MII transceiver on any interface requires mdio preamble.
+   This only set with the original DP83840 on older 3c905 boards, so the extra
+   code size of a per-interface flag is not worthwhile. */
+static char mii_preamble_required;
+
+#define PFX DRV_NAME ": "
+
+
+
+/*
+  Theory of Operation
+
+  I. Board Compatibility
+
+  This device driver is designed for the 3Com FastEtherLink and FastEtherLink
+  XL, 3Com's PCI to 10/100baseT adapters.  It also works with the 10Mbs
+  versions of the FastEtherLink cards.	The supported product IDs are
+  3c590, 3c592, 3c595, 3c597, 3c900, 3c905
+
+  The related ISA 3c515 is supported with a separate driver, 3c515.c, included
+  with the kernel source or available from
+  cesdis.gsfc.nasa.gov:/pub/linux/drivers/3c515.html
+
+  II. Board-specific settings
+
+  PCI bus devices are configured by the system at boot time, so no jumpers
+  need to be set on the board.	The system BIOS should be set to assign the
+  PCI INTA signal to an otherwise unused system IRQ line.
+
+  The EEPROM settings for media type and forced-full-duplex are observed.
+  The EEPROM media type should be left at the default "autoselect" unless using
+  10base2 or AUI connections which cannot be reliably detected.
+
+  III. Driver operation
+
+  The 3c59x series use an interface that's very similar to the previous 3c5x9
+  series.  The primary interface is two programmed-I/O FIFOs, with an
+  alternate single-contiguous-region bus-master transfer (see next).
+
+  The 3c900 "Boomerang" series uses a full-bus-master interface with separate
+  lists of transmit and receive descriptors, similar to the AMD LANCE/PCnet,
+  DEC Tulip and Intel Speedo3.	The first chip version retains a compatible
+  programmed-I/O interface that has been removed in 'B' and subsequent board
+  revisions.
+
+  One extension that is advertised in a very large font is that the adapters
+  are capable of being bus masters.  On the Vortex chip this capability was
+  only for a single contiguous region making it far less useful than the full
+  bus master capability.  There is a significant performance impact of taking
+  an extra interrupt or polling for the completion of each transfer, as well
+  as difficulty sharing the single transfer engine between the transmit and
+  receive threads.  Using DMA transfers is a win only with large blocks or
+  with the flawed versions of the Intel Orion motherboard PCI controller.
+
+  The Boomerang chip's full-bus-master interface is useful, and has the
+  currently-unused advantages over other similar chips that queued transmit
+  packets may be reordered and receive buffer groups are associated with a
+  single frame.
+
+  With full-bus-master support, this driver uses a "RX_COPYBREAK" scheme.
+  Rather than a fixed intermediate receive buffer, this scheme allocates
+  full-sized skbuffs as receive buffers.  The value RX_COPYBREAK is used as
+  the copying breakpoint: it is chosen to trade-off the memory wasted by
+  passing the full-sized skbuff to the queue layer for all frames vs. the
+  copying cost of copying a frame to a correctly-sized skbuff.
+
+  IIIC. Synchronization
+  The driver runs as two independent, single-threaded flows of control.  One
+  is the send-packet routine, which enforces single-threaded use by the
+  dev->tbusy flag.  The other thread is the interrupt handler, which is single
+  threaded by the hardware and other software.
+
+  IV. Notes
+
+  Thanks to Cameron Spitzer and Terry Murphy of 3Com for providing development
+  3c590, 3c595, and 3c900 boards.
+  The name "Vortex" is the internal 3Com project name for the PCI ASIC, and
+  the EISA version is called "Demon".  According to Terry these names come
+  from rides at the local amusement park.
+
+  The new chips support both ethernet (1.5K) and FDDI (4.5K) packet sizes!
+  This driver only supports ethernet packets because of the skbuff allocation
+  limit of 4K.
+*/
+
+/* This table drives the PCI probe routines.  It's mostly boilerplate in all
+   of the drivers, and will likely be provided by some future kernel.
+*/
+enum pci_flags_bit {
+	PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+	PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+enum {	IS_VORTEX=1, IS_BOOMERANG=2, IS_CYCLONE=4, IS_TORNADO=8,
+	EEPROM_8BIT=0x10,	/* AKPM: Uses 0x230 as the base bitmaps for EEPROM reads */
+	HAS_PWR_CTRL=0x20, HAS_MII=0x40, HAS_NWAY=0x80, HAS_CB_FNS=0x100,
+	INVERT_MII_PWR=0x200, INVERT_LED_PWR=0x400, MAX_COLLISION_RESET=0x800,
+	EEPROM_OFFSET=0x1000, HAS_HWCKSM=0x2000 };
+
+enum vortex_chips {
+	CH_3C590 = 0,
+	CH_3C592,
+	CH_3C597,
+	CH_3C595_1,
+	CH_3C595_2,
+
+	CH_3C595_3,
+	CH_3C900_1,
+	CH_3C900_2,
+	CH_3C900_3,
+	CH_3C900_4,
+
+	CH_3C900_5,
+	CH_3C900B_FL,
+	CH_3C905_1,
+	CH_3C905_2,
+	CH_3C905B_1,
+
+	CH_3C905B_2,
+	CH_3C905B_FX,
+	CH_3C905C,
+	CH_3C980,
+	CH_3C9805,
+
+	CH_3CSOHO100_TX,
+	CH_3C555,
+	CH_3C556,
+	CH_3C556B,
+	CH_3C575,
+
+	CH_3C575_1,
+	CH_3CCFE575,
+	CH_3CCFE575CT,
+	CH_3CCFE656,
+	CH_3CCFEM656,
+
+	CH_3CCFEM656_1,
+	CH_3C450,
+};
+
+
+/* note: this array directly indexed by above enums, and MUST
+ * be kept in sync with both the enums above, and the PCI device
+ * table below
+ */
+static struct vortex_chip_info {
+	const char *name;
+	int flags;
+	int drv_flags;
+	int io_size;
+} vortex_info_tbl[] = {
+#define EISA_TBL_OFFSET	0		/* Offset of this entry for vortex_eisa_init */
+	{"3c590 Vortex 10Mbps",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c592 EISA 10Mbps Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c597 EISA Fast Demon/Vortex",					/* AKPM: from Don's 3c59x_cb.c 0.49H */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c595 Vortex 100baseTx",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c595 Vortex 100baseT4",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+
+	{"3c595 Vortex 100base-MII",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_VORTEX, 32, },
+	{"3c900 Boomerang 10baseT",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, },
+	{"3c900 Boomerang 10Mbps Combo",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG, 64, },
+	{"3c900 Cyclone 10Mbps TPO",						/* AKPM: from Don's 0.99M */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+	{"3c900 Cyclone 10Mbps Combo",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+
+	{"3c900 Cyclone 10Mbps TPC",						/* AKPM: from Don's 0.99M */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+	{"3c900B-FL Cyclone 10base-FL",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+	{"3c905 Boomerang 100baseTx",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
+	{"3c905 Boomerang 100baseT4",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII, 64, },
+	{"3c905B Cyclone 100baseTx",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+
+	{"3c905B Cyclone 10/100/BNC",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+	{"3c905B-FX Cyclone 100baseFx",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+	{"3c905C Tornado",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+	{"3c980 Cyclone",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+	{"3c982 Dual Port Server Cyclone",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_HWCKSM, 128, },
+
+	{"3cSOHO100-TX Hurricane",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_HWCKSM, 128, },
+	{"3c555 Laptop Hurricane",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|EEPROM_8BIT|HAS_HWCKSM, 128, },
+	{"3c556 Laptop Tornado",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_8BIT|HAS_CB_FNS|INVERT_MII_PWR|
+	 HAS_HWCKSM, 128, },
+	{"3c556B Laptop Hurricane",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|EEPROM_OFFSET|HAS_CB_FNS|INVERT_MII_PWR|
+	 HAS_HWCKSM, 128, },
+	{"3c575 [Megahertz] 10/100 LAN	CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+
+	{"3c575 Boomerang CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_BOOMERANG|HAS_MII|EEPROM_8BIT, 128, },
+	{"3CCFE575BT Cyclone CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|
+	 INVERT_LED_PWR|HAS_HWCKSM, 128, },
+	{"3CCFE575CT Tornado CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+	 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
+	{"3CCFE656 Cyclone CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+	 INVERT_LED_PWR|HAS_HWCKSM, 128, },
+	{"3CCFEM656B Cyclone+Winmodem CardBus",
+	 PCI_USES_IO|PCI_USES_MASTER, IS_CYCLONE|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+	 INVERT_LED_PWR|HAS_HWCKSM, 128, },
+
+	{"3CXFEM656C Tornado+Winmodem CardBus",			/* From pcmcia-cs-3.1.5 */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_CB_FNS|EEPROM_8BIT|INVERT_MII_PWR|
+	 MAX_COLLISION_RESET|HAS_HWCKSM, 128, },
+	{"3c450 HomePNA Tornado",						/* AKPM: from Don's 0.99Q */
+	 PCI_USES_IO|PCI_USES_MASTER, IS_TORNADO|HAS_NWAY|HAS_HWCKSM, 128, },
+	{0,}, /* 0 terminated list. */
+};
+
+
+static struct pci_device_id vortex_pci_tbl[] = {
+	{ 0x10B7, 0x5900, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C590 },
+	{ 0x10B7, 0x5920, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C592 },
+	{ 0x10B7, 0x5970, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C597 },
+	{ 0x10B7, 0x5950, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_1 },
+	{ 0x10B7, 0x5951, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_2 },
+
+	{ 0x10B7, 0x5952, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C595_3 },
+	{ 0x10B7, 0x9000, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_1 },
+	{ 0x10B7, 0x9001, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_2 },
+	{ 0x10B7, 0x9004, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_3 },
+	{ 0x10B7, 0x9005, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_4 },
+
+	{ 0x10B7, 0x9006, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900_5 },
+	{ 0x10B7, 0x900A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C900B_FL },
+	{ 0x10B7, 0x9050, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_1 },
+	{ 0x10B7, 0x9051, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905_2 },
+	{ 0x10B7, 0x9055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_1 },
+
+	{ 0x10B7, 0x9058, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_2 },
+	{ 0x10B7, 0x905A, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905B_FX },
+	{ 0x10B7, 0x9200, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C905C },
+	{ 0x10B7, 0x9800, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C980 },
+	{ 0x10B7, 0x9805, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C9805 },
+
+	{ 0x10B7, 0x7646, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CSOHO100_TX },
+	{ 0x10B7, 0x5055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C555 },
+	{ 0x10B7, 0x6055, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556 },
+	{ 0x10B7, 0x6056, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C556B },
+	{ 0x10B7, 0x5b57, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575 },
+
+	{ 0x10B7, 0x5057, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C575_1 },
+	{ 0x10B7, 0x5157, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575 },
+	{ 0x10B7, 0x5257, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE575CT },
+	{ 0x10B7, 0x6560, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFE656 },
+	{ 0x10B7, 0x6562, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656 },
+
+	{ 0x10B7, 0x6564, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3CCFEM656_1 },
+	{ 0x10B7, 0x4500, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CH_3C450 },
+	{0,}						/* 0 terminated list. */
+};
+MODULE_DEVICE_TABLE(pci, vortex_pci_tbl);
+
+
+/* Operational definitions.
+   These are not used by other compilation units and thus are not
+   exported in a ".h" file.
+
+   First the windows.  There are eight register windows, with the command
+   and status registers available in each.
+*/
+#define EL3WINDOW(win_num) outw(SelectWindow + (win_num), ioaddr + EL3_CMD)
+#define EL3_CMD 0x0e
+#define EL3_STATUS 0x0e
+
+/* The top five bits written to EL3_CMD are a command, the lower
+   11 bits are the parameter, if applicable.
+   Note that 11 parameters bits was fine for ethernet, but the new chip
+   can handle FDDI length frames (~4500 octets) and now parameters count
+   32-bit 'Dwords' rather than octets. */
+
+enum vortex_cmd {
+	TotalReset = 0<<11, SelectWindow = 1<<11, StartCoax = 2<<11,
+	RxDisable = 3<<11, RxEnable = 4<<11, RxReset = 5<<11,
+	UpStall = 6<<11, UpUnstall = (6<<11)+1,
+	DownStall = (6<<11)+2, DownUnstall = (6<<11)+3,
+	RxDiscard = 8<<11, TxEnable = 9<<11, TxDisable = 10<<11, TxReset = 11<<11,
+	FakeIntr = 12<<11, AckIntr = 13<<11, SetIntrEnb = 14<<11,
+	SetStatusEnb = 15<<11, SetRxFilter = 16<<11, SetRxThreshold = 17<<11,
+	SetTxThreshold = 18<<11, SetTxStart = 19<<11,
+	StartDMAUp = 20<<11, StartDMADown = (20<<11)+1, StatsEnable = 21<<11,
+	StatsDisable = 22<<11, StopCoax = 23<<11, SetFilterBit = 25<<11,};
+
+/* The SetRxFilter command accepts the following classes: */
+enum RxFilter {
+	RxStation = 1, RxMulticast = 2, RxBroadcast = 4, RxProm = 8 };
+
+/* Bits in the general status register. */
+enum vortex_status {
+	IntLatch = 0x0001, HostError = 0x0002, TxComplete = 0x0004,
+	TxAvailable = 0x0008, RxComplete = 0x0010, RxEarly = 0x0020,
+	IntReq = 0x0040, StatsFull = 0x0080,
+	DMADone = 1<<8, DownComplete = 1<<9, UpComplete = 1<<10,
+	DMAInProgress = 1<<11,			/* DMA controller is still busy.*/
+	CmdInProgress = 1<<12,			/* EL3_CMD is still busy.*/
+};
+
+/* Register window 1 offsets, the window used in normal operation.
+   On the Vortex this window is always mapped at offsets 0x10-0x1f. */
+enum Window1 {
+	TX_FIFO = 0x10,  RX_FIFO = 0x10,  RxErrors = 0x14,
+	RxStatus = 0x18,  Timer=0x1A, TxStatus = 0x1B,
+	TxFree = 0x1C, /* Remaining free bytes in Tx buffer. */
+};
+enum Window0 {
+	Wn0EepromCmd = 10,		/* Window 0: EEPROM command register. */
+	Wn0EepromData = 12,		/* Window 0: EEPROM results register. */
+	IntrStatus=0x0E,		/* Valid in all windows. */
+};
+enum Win0_EEPROM_bits {
+	EEPROM_Read = 0x80, EEPROM_WRITE = 0x40, EEPROM_ERASE = 0xC0,
+	EEPROM_EWENB = 0x30,		/* Enable erasing/writing for 10 msec. */
+	EEPROM_EWDIS = 0x00,		/* Disable EWENB before 10 msec timeout. */
+};
+/* EEPROM locations. */
+enum eeprom_offset {
+	PhysAddr01=0, PhysAddr23=1, PhysAddr45=2, ModelID=3,
+	EtherLink3ID=7, IFXcvrIO=8, IRQLine=9,
+	NodeAddr01=10, NodeAddr23=11, NodeAddr45=12,
+	DriverTune=13, Checksum=15};
+
+enum Window2 {			/* Window 2. */
+	Wn2_ResetOptions=12,
+};
+enum Window3 {			/* Window 3: MAC/config bits. */
+	Wn3_Config=0, Wn3_MAC_Ctrl=6, Wn3_Options=8,
+};
+
+#define BFEXT(value, offset, bitcount)					\
+	((((unsigned long)(value)) >> (offset)) & ((1 << (bitcount)) - 1))
+
+#define BFINS(lhs, rhs, offset, bitcount)				\
+	(((lhs) & ~((((1 << (bitcount)) - 1)) << (offset))) |		\
+		(((rhs) & ((1 << (bitcount)) - 1)) << (offset)))
+
+#define RAM_SIZE(v)		BFEXT(v, 0, 3)
+#define RAM_WIDTH(v)	BFEXT(v, 3, 1)
+#define RAM_SPEED(v)	BFEXT(v, 4, 2)
+#define ROM_SIZE(v)		BFEXT(v, 6, 2)
+#define RAM_SPLIT(v)	BFEXT(v, 16, 2)
+#define XCVR(v)			BFEXT(v, 20, 4)
+#define AUTOSELECT(v)	BFEXT(v, 24, 1)
+
+enum Window4 {		/* Window 4: Xcvr/media bits. */
+	Wn4_FIFODiag = 4, Wn4_NetDiag = 6, Wn4_PhysicalMgmt=8, Wn4_Media = 10,
+};
+enum Win4_Media_bits {
+	Media_SQE = 0x0008,		/* Enable SQE error counting for AUI. */
+	Media_10TP = 0x00C0,	/* Enable link beat and jabber for 10baseT. */
+	Media_Lnk = 0x0080,		/* Enable just link beat for 100TX/100FX. */
+	Media_LnkBeat = 0x0800,
+};
+enum Window7 {					/* Window 7: Bus Master control. */
+	Wn7_MasterAddr = 0, Wn7_MasterLen = 6, Wn7_MasterStatus = 12,
+};
+/* Boomerang bus master control registers. */
+enum MasterCtrl {
+	PktStatus = 0x20, DownListPtr = 0x24, FragAddr = 0x28, FragLen = 0x2c,
+	TxFreeThreshold = 0x2f, UpPktStatus = 0x30, UpListPtr = 0x38,
+};
+
+/* The Rx and Tx descriptor lists.
+   Caution Alpha hackers: these types are 32 bits!  Note also the 8 byte
+   alignment contraint on tx_ring[] and rx_ring[]. */
+#define LAST_FRAG	0x80000000			/* Last Addr/Len pair in descriptor. */
+#define DN_COMPLETE	0x00010000			/* This packet has been downloaded */
+struct boom_rx_desc {
+	u32 next;					/* Last entry points to 0.   */
+	s32 status;
+	u32 addr;					/* Up to 63 addr/len pairs possible. */
+	s32 length;					/* Set LAST_FRAG to indicate last pair. */
+};
+/* Values for the Rx status entry. */
+enum rx_desc_status {
+	RxDComplete=0x00008000, RxDError=0x4000,
+	/* See boomerang_rx() for actual error bits */
+	IPChksumErr=1<<25, TCPChksumErr=1<<26, UDPChksumErr=1<<27,
+	IPChksumValid=1<<29, TCPChksumValid=1<<30, UDPChksumValid=1<<31,
+};
+
+// *** RTnet ***
+//#ifdef MAX_SKB_FRAGS
+//#define DO_ZEROCOPY 1
+//#else
+#define DO_ZEROCOPY 0
+//#endif
+
+struct boom_tx_desc {
+	u32 next;					/* Last entry points to 0.   */
+	s32 status;					/* bits 0:12 length, others see below.	*/
+#if DO_ZEROCOPY
+	struct {
+		u32 addr;
+		s32 length;
+	} frag[1+MAX_SKB_FRAGS];
+#else
+	u32 addr;
+	s32 length;
+#endif
+};
+
+/* Values for the Tx status entry. */
+enum tx_desc_status {
+	CRCDisable=0x2000, TxDComplete=0x8000,
+	AddIPChksum=0x02000000, AddTCPChksum=0x04000000, AddUDPChksum=0x08000000,
+	TxIntrUploaded=0x80000000,		/* IRQ when in FIFO, but maybe not sent. */
+};
+
+/* Chip features we care about in vp->capabilities, read from the EEPROM. */
+enum ChipCaps { CapBusMaster=0x20, CapPwrMgmt=0x2000 };
+
+struct vortex_private {
+	/* The Rx and Tx rings should be quad-word-aligned. */
+	struct boom_rx_desc* rx_ring;
+	struct boom_tx_desc* tx_ring;
+	dma_addr_t rx_ring_dma;
+	dma_addr_t tx_ring_dma;
+	/* The addresses of transmit- and receive-in-place skbuffs. */
+
+	// *** RTnet ***
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	struct rtskb *rx_skbuff[RX_RING_SIZE];
+	// *** RTnet ***
+
+	struct rtnet_device *next_module;		/* NULL if PCI device */
+	unsigned int cur_rx, cur_tx;		/* The next free ring entry */
+	unsigned int dirty_rx, dirty_tx;	/* The ring entries to be free()ed. */
+	struct net_device_stats stats;
+	struct rtskb *tx_skb;				/* Packet being eaten by bus master ctrl.  */
+	dma_addr_t tx_skb_dma;				/* Allocated DMA address for bus master ctrl DMA.   */
+
+	/* PCI configuration space information. */
+	struct pci_dev *pdev;
+	char *cb_fn_base;					/* CardBus function status addr space. */
+
+	/* Some values here only for performance evaluation and path-coverage */
+	int rx_nocopy, rx_copy, queued_packet, rx_csumhits;
+	int card_idx;
+
+	/* The remainder are related to chip state, mostly media selection. */
+	struct timer_list timer;			/* Media selection timer. */
+	struct timer_list rx_oom_timer;		/* Rx skb allocation retry timer */
+	int options;						/* User-settable misc. driver options. */
+	unsigned int media_override:4,		/* Passed-in media type. */
+		default_media:4,				/* Read from the EEPROM/Wn3_Config. */
+		full_duplex:1, force_fd:1, autoselect:1,
+		bus_master:1,					/* Vortex can only do a fragment bus-m. */
+		full_bus_master_tx:1, full_bus_master_rx:2, /* Boomerang  */
+		flow_ctrl:1,					/* Use 802.3x flow control (PAUSE only) */
+		partner_flow_ctrl:1,			/* Partner supports flow control */
+		has_nway:1,
+		enable_wol:1,					/* Wake-on-LAN is enabled */
+		pm_state_valid:1,				/* power_state[] has sane contents */
+		open:1,
+		medialock:1,
+		must_free_region:1;				/* Flag: if zero, Cardbus owns the I/O region */
+	int drv_flags;
+	u16 status_enable;
+	u16 intr_enable;
+	u16 available_media;				/* From Wn3_Options. */
+	u16 capabilities, info1, info2;		/* Various, from EEPROM. */
+	u16 advertising;					/* NWay media advertisement */
+	unsigned char phys[2];				/* MII device addresses. */
+	u16 deferred;						/* Resend these interrupts when we
+								 * bale from the ISR */
+	u16 io_size;						/* Size of PCI region (for release_region) */
+	rtdm_lock_t lock;					/* Serialise access to device & its vortex_private */
+	spinlock_t mdio_lock;				/* Serialise access to mdio hardware */
+	u32 power_state[16];
+	rtdm_irq_t irq_handle;
+};
+
+/* The action to take with a media selection timer tick.
+   Note that we deviate from the 3Com order by checking 10base2 before AUI.
+*/
+enum xcvr_types {
+	XCVR_10baseT=0, XCVR_AUI, XCVR_10baseTOnly, XCVR_10base2, XCVR_100baseTx,
+	XCVR_100baseFx, XCVR_MII=6, XCVR_NWAY=8, XCVR_ExtMII=9, XCVR_Default=10,
+};
+
+static struct media_table {
+	char *name;
+	unsigned int media_bits:16,		/* Bits to set in Wn4_Media register. */
+		mask:8,						/* The transceiver-present bit in Wn3_Config.*/
+		next:8;						/* The media type to try next. */
+	int wait;						/* Time before we check media status. */
+} media_tbl[] = {
+	{	"10baseT",   Media_10TP,0x08, XCVR_10base2, (14*HZ)/10},
+	{ "10Mbs AUI", Media_SQE, 0x20, XCVR_Default, (1*HZ)/10},
+	{ "undefined", 0,			0x80, XCVR_10baseT, 10000},
+	{ "10base2",   0,			0x10, XCVR_AUI,		(1*HZ)/10},
+	{ "100baseTX", Media_Lnk, 0x02, XCVR_100baseFx, (14*HZ)/10},
+	{ "100baseFX", Media_Lnk, 0x04, XCVR_MII,		(14*HZ)/10},
+	{ "MII",               0,			0x41, XCVR_10baseT, 3*HZ },
+	{ "undefined", 0,			0x01, XCVR_10baseT, 10000},
+	{ "Autonegotiate", 0,		0x41, XCVR_10baseT, 3*HZ},
+	{ "MII-External",      0,		0x41, XCVR_10baseT, 3*HZ },
+	{ "Default",   0,			0xFF, XCVR_10baseT, 10000},
+};
+
+static int vortex_probe1(struct pci_dev *pdev, long ioaddr, int irq,
+			int chip_idx, int card_idx);
+static void vortex_up(struct rtnet_device *rtdev);
+static void vortex_down(struct rtnet_device *rtdev);
+static int vortex_open(struct rtnet_device *rtdev);
+static void mdio_sync(long ioaddr, int bits);
+static int mdio_read(struct rtnet_device *rtdev, int phy_id, int location);
+static void mdio_write(struct rtnet_device *vp, int phy_id, int location, int value);
+
+// *** RTnet ***
+//static void vortex_timer(unsigned long arg);
+//static void rx_oom_timer(unsigned long arg);
+// *** RTnet ***
+
+static int vortex_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static int boomerang_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static int vortex_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int boomerang_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int vortex_interrupt(rtdm_irq_t *irq_handle);
+static int boomerang_interrupt(rtdm_irq_t *irq_handle);
+static int vortex_close(struct rtnet_device *rtdev);
+static void dump_tx_ring(struct rtnet_device *rtdev);
+
+static void update_stats(long ioaddr, struct rtnet_device *dev);
+static struct net_device_stats *vortex_get_stats(struct rtnet_device *rtdev);
+
+static void set_rx_mode(struct rtnet_device *rtdev);
+
+// *** RTnet ***
+//static int vortex_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+//static void vortex_tx_timeout(struct net_device *dev);
+// *** RTnet ***
+
+static void acpi_set_WOL(struct rtnet_device *rtdev);
+
+/* This driver uses 'options' to pass the media type, full-duplex flag, etc. */
+/* Option count limit only -- unlimited interfaces are supported. */
+#define MAX_UNITS 8
+static int options[MAX_UNITS] = { -1, -1, -1, -1, -1, -1, -1, -1,};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int hw_checksums[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int flow_ctrl[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int enable_wol[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+module_param(debug, int, 0444);
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+module_param_array(hw_checksums, int, NULL, 0444);
+module_param_array(flow_ctrl, int, NULL, 0444);
+module_param_array(enable_wol, int, NULL, 0444);
+/*** RTnet ***
+     MODULE_PARM(rx_copybreak, "i");
+     *** RTnet ***/
+module_param(max_interrupt_work, int, 0444);
+/*** RTnet ***
+     MODULE_PARM(compaq_ioaddr, "i");
+     MODULE_PARM(compaq_irq, "i");
+     MODULE_PARM(compaq_device_id, "i");
+     MODULE_PARM(watchdog, "i");
+     *** RTnet ***/
+MODULE_PARM_DESC(debug, "3c59x debug level (0-6)");
+MODULE_PARM_DESC(options, "3c59x: Bits 0-3: media type, bit 4: bus mastering, bit 9: full duplex");
+MODULE_PARM_DESC(full_duplex, "3c59x full duplex setting(s) (1)");
+MODULE_PARM_DESC(hw_checksums, "3c59x Hardware checksum checking by adapter(s) (0-1)");
+MODULE_PARM_DESC(flow_ctrl, "3c59x 802.3x flow control usage (PAUSE only) (0-1)");
+MODULE_PARM_DESC(enable_wol, "3c59x: Turn on Wake-on-LAN for adapter(s) (0-1)");
+/*** RTnet ***
+     MODULE_PARM_DESC(rx_copybreak, "3c59x copy breakpoint for copy-only-tiny-frames");
+     *** RTnet ***/
+MODULE_PARM_DESC(max_interrupt_work, "3c59x maximum events handled per interrupt");
+/*** RTnet ***
+     MODULE_PARM_DESC(compaq_ioaddr, "3c59x PCI I/O base address (Compaq BIOS problem workaround)");
+     MODULE_PARM_DESC(compaq_irq, "3c59x PCI IRQ number (Compaq BIOS problem workaround)");
+     MODULE_PARM_DESC(compaq_device_id, "3c59x PCI device ID (Compaq BIOS problem workaround)");
+     MODULE_PARM_DESC(watchdog, "3c59x transmit timeout in milliseconds");
+     *** RTnet ***/
+
+/* #define dev_alloc_skb dev_alloc_skb_debug */
+
+/* A list of all installed Vortex EISA devices, for removing the driver module. */
+static struct rtnet_device *root_vortex_eisa_dev;
+
+/* Variables to work-around the Compaq PCI BIOS32 problem. */
+// *** RTnet ***
+//static int compaq_ioaddr, compaq_irq, compaq_device_id = 0x5900;
+// *** RTnet ***
+
+static int vortex_cards_found;
+
+#ifdef CONFIG_PM
+
+#endif /* CONFIG_PM */
+
+/* returns count found (>= 0), or negative on error */
+
+/* returns count (>= 0), or negative on error */
+static int vortex_init_one (struct pci_dev *pdev,
+			const struct pci_device_id *ent)
+{
+	int rc;
+
+	if( vortex_cards_found >= cards )
+		return -ENODEV;
+
+	/* wake up and enable device */
+	if (pci_enable_device (pdev)) {
+		rc = -EIO;
+	} else {
+		rc = vortex_probe1 (pdev, pci_resource_start (pdev, 0), pdev->irq,
+				ent->driver_data, vortex_cards_found);
+		if (rc == 0)
+			vortex_cards_found++;
+	}
+	return rc;
+}
+
+/*
+ * Start up the PCI device which is described by *pdev.
+ * Return 0 on success.
+ *
+ * NOTE: pdev can be NULL, for the case of an EISA driver
+ */
+static int vortex_probe1(struct pci_dev *pdev,
+			long ioaddr, int irq,
+			int chip_idx, int card_idx)
+{
+	// *** RTnet ***
+	struct rtnet_device *rtdev = NULL;
+	// *** RTnet ***
+
+	struct vortex_private *vp;
+	int option;
+	unsigned int eeprom[0x40], checksum = 0;		/* EEPROM contents */
+	int i, step;
+	static int printed_version;
+	int retval, print_info;
+	struct vortex_chip_info * const vci = &vortex_info_tbl[chip_idx];
+	const char *print_name;
+
+
+
+	if (!printed_version) {
+		printk (version);
+		printed_version = 1;
+	}
+
+	print_name = pdev ? pci_name(pdev) : "3c59x";
+
+	// *** RTnet ***
+	rtdev = rt_alloc_etherdev(sizeof(*vp), RX_RING_SIZE * 2 + TX_RING_SIZE);
+	retval = -ENOMEM;
+	if (!rtdev) {
+		printk (KERN_ERR PFX "unable to allocate etherdev, aborting\n");
+		goto out;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	memset(rtdev->priv, 0, sizeof(*vp));
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+	// *** RTnet ***
+
+	vp = rtdev->priv;
+
+	/* The lower four bits are the media type. */
+	if (rtdev->mem_start) {
+		/*
+		 * The 'options' param is passed in as the third arg to the
+		 * LILO 'ether=' argument for non-modular use
+		 */
+		option = rtdev->mem_start;
+	}
+	else if (card_idx < MAX_UNITS)
+		option = options[card_idx];
+	else
+		option = -1;
+
+	if (option > 0) {
+		if (option & 0x8000)
+			vortex_debug = 7;
+		if (option & 0x4000)
+			vortex_debug = 2;
+		if (option & 0x0400)
+			vp->enable_wol = 1;
+	}
+
+	print_info = (vortex_debug > 1);
+	if (print_info)
+		printk (KERN_INFO "See Documentation/networking/vortex.txt\n");
+
+	printk(KERN_INFO "%s: 3Com %s %s at 0x%lx. Vers " DRV_VERSION "\n",
+		print_name,
+		pdev ? "PCI" : "EISA",
+		vci->name,
+		ioaddr);
+
+	rtdev->base_addr = ioaddr;
+	rtdev->irq = irq;
+	rtdev->mtu = mtu;
+	vp->drv_flags = vci->drv_flags;
+	vp->has_nway = (vci->drv_flags & HAS_NWAY) ? 1 : 0;
+	vp->io_size = vci->io_size;
+	vp->card_idx = card_idx;
+
+	/* module list only for EISA devices */
+	if (pdev == NULL) {
+		vp->next_module = root_vortex_eisa_dev;
+		root_vortex_eisa_dev = rtdev;
+	}
+
+	/* PCI-only startup logic */
+	if (pdev) {
+		/* EISA resources already marked, so only PCI needs to do this here */
+		/* Ignore return value, because Cardbus drivers already allocate for us */
+		if (!request_region(ioaddr, vci->io_size, print_name))
+			printk(KERN_INFO "rt_3c50x: request region failed\n");
+		else
+			vp->must_free_region = 1;
+
+		/* enable bus-mastering if necessary */
+		if (vci->flags & PCI_USES_MASTER)
+			pci_set_master (pdev);
+
+		if (vci->drv_flags & IS_VORTEX) {
+			u8 pci_latency;
+			u8 new_latency = 248;
+
+			/* Check the PCI latency value.  On the 3c590 series the latency timer
+			   must be set to the maximum value to avoid data corruption that occurs
+			   when the timer expires during a transfer.  This bug exists the Vortex
+			   chip only. */
+			pci_read_config_byte(pdev, PCI_LATENCY_TIMER, &pci_latency);
+			if (pci_latency < new_latency) {
+				printk(KERN_INFO "%s: Overriding PCI latency"
+					" timer (CFLT) setting of %d, new value is %d.\n",
+					print_name, pci_latency, new_latency);
+				pci_write_config_byte(pdev, PCI_LATENCY_TIMER, new_latency);
+			}
+		}
+	}
+
+	rtdm_lock_init(&vp->lock);
+	spin_lock_init(&vp->mdio_lock);
+	vp->pdev = pdev;
+
+	/* Makes sure rings are at least 16 byte aligned. */
+	vp->rx_ring = dma_alloc_coherent(
+			&pdev->dev,
+			sizeof(struct boom_rx_desc) * RX_RING_SIZE
+			+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+			&vp->rx_ring_dma,
+			GFP_ATOMIC);
+	retval = -ENOMEM;
+	if (vp->rx_ring == 0)
+		goto free_region;
+
+	vp->tx_ring = (struct boom_tx_desc *)(vp->rx_ring + RX_RING_SIZE);
+	vp->tx_ring_dma = vp->rx_ring_dma + sizeof(struct boom_rx_desc) * RX_RING_SIZE;
+
+	/* if we are a PCI driver, we store info in pdev->driver_data
+	 * instead of a module list */
+	if (pdev)
+		pci_set_drvdata(pdev, rtdev);
+
+	vp->media_override = 7;
+	if (option >= 0) {
+		vp->media_override = ((option & 7) == 2)  ?  0	:  option & 15;
+		if (vp->media_override != 7)
+			vp->medialock = 1;
+		vp->full_duplex = (option & 0x200) ? 1 : 0;
+		vp->bus_master = (option & 16) ? 1 : 0;
+	}
+
+	if (card_idx < MAX_UNITS) {
+		if (full_duplex[card_idx] > 0)
+			vp->full_duplex = 1;
+		if (flow_ctrl[card_idx] > 0)
+			vp->flow_ctrl = 1;
+		if (enable_wol[card_idx] > 0)
+			vp->enable_wol = 1;
+	}
+
+	vp->force_fd = vp->full_duplex;
+	vp->options = option;
+
+	/* Read the station address from the EEPROM. */
+	EL3WINDOW(0);
+	{
+		int base;
+
+		if (vci->drv_flags & EEPROM_8BIT)
+			base = 0x230;
+		else if (vci->drv_flags & EEPROM_OFFSET)
+			base = EEPROM_Read + 0x30;
+		else
+			base = EEPROM_Read;
+
+		for (i = 0; i < 0x40; i++) {
+			int timer;
+			outw(base + i, ioaddr + Wn0EepromCmd);
+			/* Pause for at least 162 us. for the read to take place. */
+			for (timer = 10; timer >= 0; timer--) {
+				udelay(162);
+				if ((inw(ioaddr + Wn0EepromCmd) & 0x8000) == 0)
+					break;
+			}
+			eeprom[i] = inw(ioaddr + Wn0EepromData);
+		}
+	}
+	for (i = 0; i < 0x18; i++)
+		checksum ^= eeprom[i];
+	checksum = (checksum ^ (checksum >> 8)) & 0xff;
+	if (checksum != 0x00) {		/* Grrr, needless incompatible change 3Com. */
+		while (i < 0x21)
+			checksum ^= eeprom[i++];
+		checksum = (checksum ^ (checksum >> 8)) & 0xff;
+	}
+	if ((checksum != 0x00) && !(vci->drv_flags & IS_TORNADO))
+		printk(" ***INVALID CHECKSUM %4.4x*** ", checksum);
+
+	for (i = 0; i < 3; i++)
+		((u16 *)rtdev->dev_addr)[i] = htons(eeprom[i + 10]);
+	if (print_info) {
+		for (i = 0; i < 6; i++)
+			printk("%c%2.2x", i ? ':' : ' ', rtdev->dev_addr[i]);
+	}
+	EL3WINDOW(2);
+	for (i = 0; i < 6; i++)
+		outb(rtdev->dev_addr[i], ioaddr + i);
+
+#ifdef __sparc__
+	if (print_info)
+		printk(", IRQ %s\n", __irq_itoa(rtdev->irq));
+#else
+	if (print_info)
+		printk(", IRQ %d\n", rtdev->irq);
+	/* Tell them about an invalid IRQ. */
+	if (rtdev->irq <= 0 || rtdev->irq >= NR_IRQS)
+		printk(KERN_WARNING " *** Warning: IRQ %d is unlikely to work! ***\n",
+			rtdev->irq);
+#endif
+
+	EL3WINDOW(4);
+	step = (inb(ioaddr + Wn4_NetDiag) & 0x1e) >> 1;
+	if (print_info) {
+		printk(KERN_INFO "  product code %02x%02x rev %02x.%d date %02d-"
+			"%02d-%02d\n", eeprom[6]&0xff, eeprom[6]>>8, eeprom[0x14],
+			step, (eeprom[4]>>5) & 15, eeprom[4] & 31, eeprom[4]>>9);
+	}
+
+
+	if (pdev && vci->drv_flags & HAS_CB_FNS) {
+		unsigned long fn_st_addr;			/* Cardbus function status space */
+		unsigned short n;
+
+		fn_st_addr = pci_resource_start (pdev, 2);
+		if (fn_st_addr) {
+			vp->cb_fn_base = ioremap(fn_st_addr, 128);
+			retval = -ENOMEM;
+			if (!vp->cb_fn_base)
+				goto free_ring;
+		}
+		if (print_info) {
+			printk(KERN_INFO "%s: CardBus functions mapped %8.8lx->%p\n",
+				print_name, fn_st_addr, vp->cb_fn_base);
+		}
+		EL3WINDOW(2);
+
+		n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
+		if (vp->drv_flags & INVERT_LED_PWR)
+			n |= 0x10;
+		if (vp->drv_flags & INVERT_MII_PWR)
+			n |= 0x4000;
+		outw(n, ioaddr + Wn2_ResetOptions);
+	}
+
+	/* Extract our information from the EEPROM data. */
+	vp->info1 = eeprom[13];
+	vp->info2 = eeprom[15];
+	vp->capabilities = eeprom[16];
+
+	if (vp->info1 & 0x8000) {
+		vp->full_duplex = 1;
+		if (print_info)
+			printk(KERN_INFO "Full duplex capable\n");
+	}
+
+	{
+		static const char * ram_split[] = {"5:3", "3:1", "1:1", "3:5"};
+		unsigned int config;
+		EL3WINDOW(3);
+		vp->available_media = inw(ioaddr + Wn3_Options);
+		if ((vp->available_media & 0xff) == 0)		/* Broken 3c916 */
+			vp->available_media = 0x40;
+		config = inl(ioaddr + Wn3_Config);
+		if (print_info) {
+			printk(KERN_DEBUG "  Internal config register is %4.4x, "
+				"transceivers %#x.\n", config, inw(ioaddr + Wn3_Options));
+			printk(KERN_INFO "  %dK %s-wide RAM %s Rx:Tx split, %s%s interface.\n",
+				8 << RAM_SIZE(config),
+				RAM_WIDTH(config) ? "word" : "byte",
+				ram_split[RAM_SPLIT(config)],
+				AUTOSELECT(config) ? "autoselect/" : "",
+				XCVR(config) > XCVR_ExtMII ? "<invalid transceiver>" :
+				media_tbl[XCVR(config)].name);
+		}
+		vp->default_media = XCVR(config);
+		if (vp->default_media == XCVR_NWAY)
+			vp->has_nway = 1;
+		vp->autoselect = AUTOSELECT(config);
+	}
+
+	if (vp->media_override != 7) {
+		printk(KERN_INFO "%s:  Media override to transceiver type %d (%s).\n",
+			print_name, vp->media_override,
+			media_tbl[vp->media_override].name);
+		rtdev->if_port = vp->media_override;
+	} else
+		rtdev->if_port = vp->default_media;
+
+	if (rtdev->if_port == XCVR_MII || rtdev->if_port == XCVR_NWAY) {
+		int phy, phy_idx = 0;
+		EL3WINDOW(4);
+		mii_preamble_required++;
+		mii_preamble_required++;
+		mdio_read(rtdev, 24, 1);
+		for (phy = 0; phy < 32 && phy_idx < 1; phy++) {
+			int mii_status, phyx;
+
+			/*
+			 * For the 3c905CX we look at index 24 first, because it bogusly
+			 * reports an external PHY at all indices
+			 */
+			if (phy == 0)
+				phyx = 24;
+			else if (phy <= 24)
+				phyx = phy - 1;
+			else
+				phyx = phy;
+			mii_status = mdio_read(rtdev, phyx, 1);
+			if (mii_status	&&  mii_status != 0xffff) {
+				vp->phys[phy_idx++] = phyx;
+				if (print_info) {
+					printk(KERN_INFO "  MII transceiver found at address %d,"
+						" status %4x.\n", phyx, mii_status);
+				}
+				if ((mii_status & 0x0040) == 0)
+					mii_preamble_required++;
+			}
+		}
+		mii_preamble_required--;
+		if (phy_idx == 0) {
+			printk(KERN_WARNING"  ***WARNING*** No MII transceivers found!\n");
+			vp->phys[0] = 24;
+		} else {
+			vp->advertising = mdio_read(rtdev, vp->phys[0], 4);
+			if (vp->full_duplex) {
+				/* Only advertise the FD media types. */
+				vp->advertising &= ~0x02A0;
+				mdio_write(rtdev, vp->phys[0], 4, vp->advertising);
+			}
+		}
+	}
+
+	if (vp->capabilities & CapBusMaster) {
+		vp->full_bus_master_tx = 1;
+		if (print_info) {
+			printk(KERN_INFO "  Enabling bus-master transmits and %s receives.\n",
+				(vp->info2 & 1) ? "early" : "whole-frame" );
+		}
+		vp->full_bus_master_rx = (vp->info2 & 1) ? 1 : 2;
+		vp->bus_master = 0;		/* AKPM: vortex only */
+	}
+
+	// *** RTnet ***
+	/* The 3c59x-specific entries in the device structure. */
+	rtdev->open = vortex_open;
+	if (vp->full_bus_master_tx) {
+		rtdev->hard_start_xmit = boomerang_start_xmit;
+		/* Actually, it still should work with iommu. */
+		rtdev->features |= NETIF_F_SG;
+		if (((hw_checksums[card_idx] == -1) && (vp->drv_flags & HAS_HWCKSM)) ||
+			(hw_checksums[card_idx] == 1)) {
+			rtdev->features |= NETIF_F_IP_CSUM;
+		}
+	} else {
+		rtdev->hard_start_xmit = vortex_start_xmit;
+	}
+	rtdev->get_stats = vortex_get_stats;
+
+	if (print_info) {
+		printk(KERN_INFO "%s: scatter/gather %sabled. h/w checksums %sabled\n",
+			print_name,
+			(rtdev->features & NETIF_F_SG) ? "en":"dis",
+			(rtdev->features & NETIF_F_IP_CSUM) ? "en":"dis");
+	}
+
+	rtdev->stop = vortex_close;
+	retval = rt_register_rtnetdev(rtdev);
+	if (retval) {
+		printk(KERN_ERR "rt_3c59x: rtnet device registration failed %d\n",retval);
+		goto free_ring;
+	}
+	return 0;
+
+	// *** RTnet ***
+
+  free_ring:
+	dma_free_coherent(&pdev->dev,
+		    sizeof(struct boom_rx_desc) * RX_RING_SIZE
+		    + sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+		    vp->rx_ring,
+		    vp->rx_ring_dma);
+  free_region:
+	if (vp->must_free_region)
+		release_region(ioaddr, vci->io_size);
+	rtdev_free (rtdev);
+	printk(KERN_ERR PFX "vortex_probe1 fails.  Returns %d\n", retval);
+  out:
+	return retval;
+}
+
+static void
+issue_and_wait(struct rtnet_device *rtdev, int cmd)
+{
+	int i;
+
+	outw(cmd, rtdev->base_addr + EL3_CMD);
+	for (i = 0; i < 2000; i++) {
+		if (!(inw(rtdev->base_addr + EL3_STATUS) & CmdInProgress))
+			return;
+	}
+
+	/* OK, that didn't work.  Do it the slow way.  One second */
+	for (i = 0; i < 100000; i++) {
+		if (!(inw(rtdev->base_addr + EL3_STATUS) & CmdInProgress)) {
+			if (vortex_debug > 1)
+				rtdm_printk(KERN_INFO "%s: command 0x%04x took %d usecs\n",
+					rtdev->name, cmd, i * 10);
+			return;
+		}
+		udelay(10);
+	}
+	rtdm_printk(KERN_ERR "%s: command 0x%04x did not complete! Status=0x%x\n",
+		rtdev->name, cmd, inw(rtdev->base_addr + EL3_STATUS));
+}
+
+static void
+vortex_up(struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	unsigned int config;
+	int i;
+
+	if (vp->pdev && vp->enable_wol) {
+		pci_set_power_state(vp->pdev, 0);	/* Go active */
+		pci_restore_state(vp->pdev, vp->power_state);
+	}
+
+	/* Before initializing select the active media port. */
+	EL3WINDOW(3);
+	config = inl(ioaddr + Wn3_Config);
+
+	if (vp->media_override != 7) {
+		printk(KERN_INFO "%s: Media override to transceiver %d (%s).\n",
+			rtdev->name, vp->media_override,
+			media_tbl[vp->media_override].name);
+		rtdev->if_port = vp->media_override;
+	} else if (vp->autoselect) {
+		if (vp->has_nway) {
+			if (vortex_debug > 1)
+				printk(KERN_INFO "%s: using NWAY device table, not %d\n",
+					rtdev->name, rtdev->if_port);
+			rtdev->if_port = XCVR_NWAY;
+		} else {
+			/* Find first available media type, starting with 100baseTx. */
+			rtdev->if_port = XCVR_100baseTx;
+			while (! (vp->available_media & media_tbl[rtdev->if_port].mask))
+				rtdev->if_port = media_tbl[rtdev->if_port].next;
+			if (vortex_debug > 1)
+				printk(KERN_INFO "%s: first available media type: %s\n",
+					rtdev->name, media_tbl[rtdev->if_port].name);
+		}
+	} else {
+		rtdev->if_port = vp->default_media;
+		if (vortex_debug > 1)
+			printk(KERN_INFO "%s: using default media %s\n",
+				rtdev->name, media_tbl[rtdev->if_port].name);
+	}
+
+	init_timer(&vp->timer);
+	vp->timer.expires = RUN_AT(media_tbl[rtdev->if_port].wait);
+	vp->timer.data = (unsigned long)rtdev;
+	// *** RTnet  vp->timer.function = vortex_timer;		/* timer handler */
+	// *** RTnet  add_timer(&vp->timer);
+
+	init_timer(&vp->rx_oom_timer);
+	vp->rx_oom_timer.data = (unsigned long)rtdev;
+	// **** RTnet *** vp->rx_oom_timer.function = rx_oom_timer;
+
+	if (vortex_debug > 1)
+		printk(KERN_DEBUG "%s: Initial media type %s.\n",
+			rtdev->name, media_tbl[rtdev->if_port].name);
+
+	vp->full_duplex = vp->force_fd;
+	config = BFINS(config, rtdev->if_port, 20, 4);
+	if (vortex_debug > 6)
+		printk(KERN_DEBUG "vortex_up(): writing 0x%x to InternalConfig\n", config);
+	outl(config, ioaddr + Wn3_Config);
+
+	if (rtdev->if_port == XCVR_MII || rtdev->if_port == XCVR_NWAY) {
+		int mii_reg1, mii_reg5;
+		EL3WINDOW(4);
+		/* Read BMSR (reg1) only to clear old status. */
+		mii_reg1 = mdio_read(rtdev, vp->phys[0], 1);
+		mii_reg5 = mdio_read(rtdev, vp->phys[0], 5);
+		if (mii_reg5 == 0xffff	||  mii_reg5 == 0x0000)
+			;					/* No MII device or no link partner report */
+		else if ((mii_reg5 & 0x0100) != 0	/* 100baseTx-FD */
+			|| (mii_reg5 & 0x00C0) == 0x0040) /* 10T-FD, but not 100-HD */
+			vp->full_duplex = 1;
+		vp->partner_flow_ctrl = ((mii_reg5 & 0x0400) != 0);
+		if (vortex_debug > 1)
+			printk(KERN_INFO "%s: MII #%d status %4.4x, link partner capability %4.4x,"
+				" info1 %04x, setting %s-duplex.\n",
+				rtdev->name, vp->phys[0],
+				mii_reg1, mii_reg5,
+				vp->info1, ((vp->info1 & 0x8000) || vp->full_duplex) ? "full" : "half");
+		EL3WINDOW(3);
+	}
+
+	/* Set the full-duplex bit. */
+	outw(	((vp->info1 & 0x8000) || vp->full_duplex ? 0x20 : 0) |
+		(rtdev->mtu > 1500 ? 0x40 : 0) |
+		((vp->full_duplex && vp->flow_ctrl && vp->partner_flow_ctrl) ? 0x100 : 0),
+		ioaddr + Wn3_MAC_Ctrl);
+
+	if (vortex_debug > 1) {
+		printk(KERN_DEBUG "%s: vortex_up() InternalConfig %8.8x.\n",
+			rtdev->name, config);
+	}
+
+	issue_and_wait(rtdev, TxReset);
+	/*
+	 * Don't reset the PHY - that upsets autonegotiation during DHCP operations.
+	 */
+	issue_and_wait(rtdev, RxReset|0x04);
+
+	outw(SetStatusEnb | 0x00, ioaddr + EL3_CMD);
+
+	if (vortex_debug > 1) {
+		EL3WINDOW(4);
+		printk(KERN_DEBUG "%s: vortex_up() irq %d media status %4.4x.\n",
+			rtdev->name, rtdev->irq, inw(ioaddr + Wn4_Media));
+	}
+
+	/* Set the station address and mask in window 2 each time opened. */
+	EL3WINDOW(2);
+	for (i = 0; i < 6; i++)
+		outb(rtdev->dev_addr[i], ioaddr + i);
+	for (; i < 12; i+=2)
+		outw(0, ioaddr + i);
+
+	if (vp->cb_fn_base) {
+		unsigned short n = inw(ioaddr + Wn2_ResetOptions) & ~0x4010;
+		if (vp->drv_flags & INVERT_LED_PWR)
+			n |= 0x10;
+		if (vp->drv_flags & INVERT_MII_PWR)
+			n |= 0x4000;
+		outw(n, ioaddr + Wn2_ResetOptions);
+	}
+
+	if (rtdev->if_port == XCVR_10base2)
+		/* Start the thinnet transceiver. We should really wait 50ms...*/
+		outw(StartCoax, ioaddr + EL3_CMD);
+	if (rtdev->if_port != XCVR_NWAY) {
+		EL3WINDOW(4);
+		outw((inw(ioaddr + Wn4_Media) & ~(Media_10TP|Media_SQE)) |
+			media_tbl[rtdev->if_port].media_bits, ioaddr + Wn4_Media);
+	}
+
+	/* Switch to the stats window, and clear all stats by reading. */
+	outw(StatsDisable, ioaddr + EL3_CMD);
+	EL3WINDOW(6);
+	for (i = 0; i < 10; i++)
+		inb(ioaddr + i);
+	inw(ioaddr + 10);
+	inw(ioaddr + 12);
+	/* New: On the Vortex we must also clear the BadSSD counter. */
+	EL3WINDOW(4);
+	inb(ioaddr + 12);
+	/* ..and on the Boomerang we enable the extra statistics bits. */
+	outw(0x0040, ioaddr + Wn4_NetDiag);
+
+	/* Switch to register set 7 for normal use. */
+	EL3WINDOW(7);
+
+	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+		vp->cur_rx = vp->dirty_rx = 0;
+		/* Initialize the RxEarly register as recommended. */
+		outw(SetRxThreshold + (1536>>2), ioaddr + EL3_CMD);
+		outl(0x0020, ioaddr + PktStatus);
+		outl(vp->rx_ring_dma, ioaddr + UpListPtr);
+	}
+	if (vp->full_bus_master_tx) {		/* Boomerang bus master Tx. */
+		vp->cur_tx = vp->dirty_tx = 0;
+		if (vp->drv_flags & IS_BOOMERANG)
+			outb(PKT_BUF_SZ>>8, ioaddr + TxFreeThreshold); /* Room for a packet. */
+		/* Clear the Rx, Tx rings. */
+		for (i = 0; i < RX_RING_SIZE; i++)	/* AKPM: this is done in vortex_open, too */
+			vp->rx_ring[i].status = 0;
+		for (i = 0; i < TX_RING_SIZE; i++)
+			vp->tx_skbuff[i] = 0;
+		outl(0, ioaddr + DownListPtr);
+	}
+	/* Set receiver mode: presumably accept b-case and phys addr only. */
+	set_rx_mode(rtdev);
+	outw(StatsEnable, ioaddr + EL3_CMD); /* Turn on statistics. */
+
+//	issue_and_wait(dev, SetTxStart|0x07ff);
+	outw(RxEnable, ioaddr + EL3_CMD); /* Enable the receiver. */
+	outw(TxEnable, ioaddr + EL3_CMD); /* Enable transmitter. */
+	/* Allow status bits to be seen. */
+	vp->status_enable = SetStatusEnb | HostError|IntReq|StatsFull|TxComplete|
+		(vp->full_bus_master_tx ? DownComplete : TxAvailable) |
+		(vp->full_bus_master_rx ? UpComplete : RxComplete) |
+		(vp->bus_master ? DMADone : 0);
+	vp->intr_enable = SetIntrEnb | IntLatch | TxAvailable |
+		(vp->full_bus_master_rx ? 0 : RxComplete) |
+		StatsFull | HostError | TxComplete | IntReq
+		| (vp->bus_master ? DMADone : 0) | UpComplete | DownComplete;
+	outw(vp->status_enable, ioaddr + EL3_CMD);
+	/* Ack all pending events, and set active indicator mask. */
+	outw(AckIntr | IntLatch | TxAvailable | RxEarly | IntReq,
+		ioaddr + EL3_CMD);
+	outw(vp->intr_enable, ioaddr + EL3_CMD);
+	if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
+		writel(0x8000, vp->cb_fn_base + 4);
+	rtnetif_start_queue (rtdev);
+}
+
+static int
+vortex_open(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	int i;
+	int retval;
+
+	// *** RTnet ***
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	if ((retval = rtdm_irq_request(&vp->irq_handle, rtdev->irq,
+						(vp->full_bus_master_rx ? boomerang_interrupt : vortex_interrupt),
+						0, "rt_3c59x", rtdev))) {
+		printk(KERN_ERR "%s: Could not reserve IRQ %d\n", rtdev->name, rtdev->irq);
+		goto out;
+	}
+	// *** RTnet ***
+
+	if (vp->full_bus_master_rx) { /* Boomerang bus master. */
+		if (vortex_debug > 2)
+			printk(KERN_DEBUG "%s:	Filling in the Rx ring.\n", rtdev->name);
+		for (i = 0; i < RX_RING_SIZE; i++) {
+			struct rtskb *skb; // *** RTnet
+			vp->rx_ring[i].next = cpu_to_le32(vp->rx_ring_dma + sizeof(struct boom_rx_desc) * (i+1));
+			vp->rx_ring[i].status = 0;	/* Clear complete bit. */
+			vp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ | LAST_FRAG);
+			skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ);
+			vp->rx_skbuff[i] = skb;
+			if (skb == NULL)
+				break;			/* Bad news!  */
+			// *** RTnet ***
+			rtskb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+			vp->rx_ring[i].addr = cpu_to_le32(pci_map_single(vp->pdev,
+													skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+			// *** RTnet ***
+		}
+		if (i != RX_RING_SIZE) {
+			int j;
+			printk(KERN_EMERG "%s: no memory for rx ring\n", rtdev->name);
+			for (j = 0; j < i; j++) {
+				if (vp->rx_skbuff[j]) {
+					dev_kfree_rtskb(vp->rx_skbuff[j]);
+					vp->rx_skbuff[j] = 0;
+				}
+			}
+			retval = -ENOMEM;
+			goto out_free_irq;
+		}
+		/* Wrap the ring. */
+		vp->rx_ring[i-1].next = cpu_to_le32(vp->rx_ring_dma);
+	}
+
+	vortex_up(rtdev);
+	return 0;
+
+  out_free_irq:
+
+	// *** RTnet ***
+	if ( (i=rtdm_irq_free(&vp->irq_handle))<0 )
+		return i;
+	rt_stack_disconnect(rtdev);
+	// *** RTnet ***
+  out:
+	if (vortex_debug > 1)
+		printk(KERN_ERR "%s: vortex_open() fails: returning %d\n", rtdev->name, retval);
+	return retval;
+}
+
+/*
+ * Handle uncommon interrupt sources.  This is a separate routine to minimize
+ * the cache impact.
+ */
+static void
+vortex_error(struct rtnet_device *rtdev, int status, nanosecs_abs_t *time_stamp)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int do_tx_reset = 0, reset_mask = 0;
+	unsigned char tx_status = 0;
+	int packets=0;
+
+	if (vortex_debug > 2) {
+		rtdm_printk(KERN_ERR "%s: vortex_error(), status=0x%x\n", rtdev->name, status);
+	}
+
+	if (status & TxComplete) {			/* Really "TxError" for us. */
+		tx_status = inb(ioaddr + TxStatus);
+		/* Presumably a tx-timeout. We must merely re-enable. */
+		if (vortex_debug > 2
+			|| (tx_status != 0x88 && vortex_debug > 0)) {
+			rtdm_printk(KERN_ERR "%s: Transmit error, Tx status register %2.2x.\n",
+				rtdev->name, tx_status);
+			if (tx_status == 0x82) {
+				rtdm_printk(KERN_ERR "Probably a duplex mismatch.  See "
+					"Documentation/networking/vortex.txt\n");
+			}
+			dump_tx_ring(rtdev);
+		}
+		if (tx_status & 0x14)  vp->stats.tx_fifo_errors++;
+		if (tx_status & 0x38)  vp->stats.tx_aborted_errors++;
+		outb(0, ioaddr + TxStatus);
+		if (tx_status & 0x30) {			/* txJabber or txUnderrun */
+			do_tx_reset = 1;
+		} else if ((tx_status & 0x08) && (vp->drv_flags & MAX_COLLISION_RESET)) {	/* maxCollisions */
+			do_tx_reset = 1;
+			reset_mask = 0x0108;		/* Reset interface logic, but not download logic */
+		} else {						/* Merely re-enable the transmitter. */
+			outw(TxEnable, ioaddr + EL3_CMD);
+		}
+	}
+
+	if (status & RxEarly) {				/* Rx early is unused. */
+		vortex_rx(rtdev, &packets, time_stamp);
+		outw(AckIntr | RxEarly, ioaddr + EL3_CMD);
+	}
+	if (status & StatsFull) {			/* Empty statistics. */
+		static int DoneDidThat;
+		if (vortex_debug > 4)
+			rtdm_printk(KERN_DEBUG "%s: Updating stats.\n", rtdev->name);
+		// *** RTnet *** update_stats(ioaddr, dev);
+		/* HACK: Disable statistics as an interrupt source. */
+		/* This occurs when we have the wrong media type! */
+		if (DoneDidThat == 0  &&
+			inw(ioaddr + EL3_STATUS) & StatsFull) {
+			rtdm_printk(KERN_WARNING "%s: Updating statistics failed, disabling "
+				"stats as an interrupt source.\n", rtdev->name);
+			EL3WINDOW(5);
+			outw(SetIntrEnb | (inw(ioaddr + 10) & ~StatsFull), ioaddr + EL3_CMD);
+			vp->intr_enable &= ~StatsFull;
+			EL3WINDOW(7);
+			DoneDidThat++;
+		}
+	}
+	if (status & IntReq) {		/* Restore all interrupt sources.  */
+		outw(vp->status_enable, ioaddr + EL3_CMD);
+		outw(vp->intr_enable, ioaddr + EL3_CMD);
+	}
+	if (status & HostError) {
+		u16 fifo_diag;
+		EL3WINDOW(4);
+		fifo_diag = inw(ioaddr + Wn4_FIFODiag);
+		rtdm_printk(KERN_ERR "%s: Host error, FIFO diagnostic register %4.4x.\n",
+			rtdev->name, fifo_diag);
+		/* Adapter failure requires Tx/Rx reset and reinit. */
+		if (vp->full_bus_master_tx) {
+			int bus_status = inl(ioaddr + PktStatus);
+			/* 0x80000000 PCI master abort. */
+			/* 0x40000000 PCI target abort. */
+			if (vortex_debug)
+				rtdm_printk(KERN_ERR "%s: PCI bus error, bus status %8.8x\n", rtdev->name, bus_status);
+
+			/* In this case, blow the card away */
+			vortex_down(rtdev);
+			issue_and_wait(rtdev, TotalReset | 0xff);
+			vortex_up(rtdev);		/* AKPM: bug.  vortex_up() assumes that the rx ring is full. It may not be. */
+		} else if (fifo_diag & 0x0400)
+			do_tx_reset = 1;
+		if (fifo_diag & 0x3000) {
+			/* Reset Rx fifo and upload logic */
+			issue_and_wait(rtdev, RxReset|0x07);
+			/* Set the Rx filter to the current state. */
+			set_rx_mode(rtdev);
+			outw(RxEnable, ioaddr + EL3_CMD); /* Re-enable the receiver. */
+			outw(AckIntr | HostError, ioaddr + EL3_CMD);
+		}
+	}
+
+	if (do_tx_reset) {
+		issue_and_wait(rtdev, TxReset|reset_mask);
+		outw(TxEnable, ioaddr + EL3_CMD);
+		if (!vp->full_bus_master_tx)
+			rtnetif_wake_queue(rtdev);
+	}
+}
+
+static int
+vortex_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	rtdm_lockctx_t context;
+
+	/* Put out the doubleword header... */
+	outl(skb->len, ioaddr + TX_FIFO);
+	if (vp->bus_master) {
+		/* Set the bus-master controller to transfer the packet. */
+		int len = (skb->len + 3) & ~3;
+		outl(	vp->tx_skb_dma = pci_map_single(vp->pdev, skb->data,
+							len, PCI_DMA_TODEVICE),
+			ioaddr + Wn7_MasterAddr);
+		outw(len, ioaddr + Wn7_MasterLen);
+		vp->tx_skb = skb;
+
+		rtdm_lock_irqsave(context);
+		if (unlikely(skb->xmit_stamp != NULL))
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+						*skb->xmit_stamp);
+		outw(StartDMADown, ioaddr + EL3_CMD);
+		rtdm_lock_irqrestore(context);
+
+		/* rtnetif_wake_queue() will be called at the DMADone interrupt. */
+	} else {
+		rtdm_printk("rt_3x59x: UNSUPPORTED CODE PATH (device is lacking DMA support)!\n");
+		/* ... and the packet rounded to a doubleword. */
+		outsl(ioaddr + TX_FIFO, skb->data, (skb->len + 3) >> 2);
+		dev_kfree_rtskb (skb);
+		if (inw(ioaddr + TxFree) > 1536) {
+			rtnetif_start_queue (rtdev);	/* AKPM: redundant? */
+		} else {
+			/* Interrupt us when the FIFO has room for max-sized packet. */
+			rtnetif_stop_queue(rtdev);
+			outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+		}
+	}
+
+	//rtdev->trans_start = jiffies;
+
+	/* Clear the Tx status stack. */
+	{
+		int tx_status;
+		int i = 32;
+
+		while (--i > 0	&&	(tx_status = inb(ioaddr + TxStatus)) > 0) {
+			if (tx_status & 0x3C) {		/* A Tx-disabling error occurred.  */
+				if (vortex_debug > 2)
+					printk(KERN_DEBUG "%s: Tx error, status %2.2x.\n",
+						rtdev->name, tx_status);
+				if (tx_status & 0x04) vp->stats.tx_fifo_errors++;
+				if (tx_status & 0x38) vp->stats.tx_aborted_errors++;
+				if (tx_status & 0x30) {
+					issue_and_wait(rtdev, TxReset);
+				}
+				outw(TxEnable, ioaddr + EL3_CMD);
+			}
+			outb(0x00, ioaddr + TxStatus); /* Pop the status stack. */
+		}
+	}
+	return 0;
+}
+
+static int
+boomerang_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	/* Calculate the next Tx descriptor entry. */
+	int entry = vp->cur_tx % TX_RING_SIZE;
+	struct boom_tx_desc *prev_entry = &vp->tx_ring[(vp->cur_tx-1) % TX_RING_SIZE];
+	rtdm_lockctx_t context;
+
+	if (vortex_debug > 6) {
+		rtdm_printk(KERN_DEBUG "boomerang_start_xmit()\n");
+		if (vortex_debug > 3)
+			rtdm_printk(KERN_DEBUG "%s: Trying to send a packet, Tx index %d.\n",
+				rtdev->name, vp->cur_tx);
+	}
+
+	if (vp->cur_tx - vp->dirty_tx >= TX_RING_SIZE) {
+		if (vortex_debug > 0)
+			rtdm_printk(KERN_WARNING "%s: BUG! Tx Ring full, refusing to send buffer.\n",
+				rtdev->name);
+		rtnetif_stop_queue(rtdev);
+		return 1;
+	}
+
+	vp->tx_skbuff[entry] = skb;
+
+	vp->tx_ring[entry].next = 0;
+#if DO_ZEROCOPY
+	if (skb->ip_summed != CHECKSUM_HW)
+		vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+	else
+		vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded | AddTCPChksum);
+
+	if (!skb_shinfo(skb)->nr_frags) {
+		{
+//            int j;
+//            for (j=0; j<skb->len; j++)
+//            {
+//                rtdm_printk("%02x ", skb->data[j]);
+//            }
+
+		}
+		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev,
+														skb->data, skb->len, PCI_DMA_TODEVICE));
+		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len | LAST_FRAG);
+	} else {
+		int i;
+
+		vp->tx_ring[entry].frag[0].addr = cpu_to_le32(pci_map_single(vp->pdev,
+														skb->data, skb->len, PCI_DMA_TODEVICE));
+		vp->tx_ring[entry].frag[0].length = cpu_to_le32(skb->len);
+
+		for (i = 0; i < skb_shinfo(skb)->nr_frags; i++) {
+			skb_frag_t *frag = &skb_shinfo(skb)->frags[i];
+
+			vp->tx_ring[entry].frag[i+1].addr =
+				cpu_to_le32(pci_map_single(vp->pdev, // *** RTnet: page mapping correct? Or is this code never used?
+								(void*)page_address(frag->page) + frag->page_offset,
+								frag->size, PCI_DMA_TODEVICE));
+
+			if (i == skb_shinfo(skb)->nr_frags-1)
+				vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size|LAST_FRAG);
+			else
+				vp->tx_ring[entry].frag[i+1].length = cpu_to_le32(frag->size);
+		}
+	}
+#else
+	vp->tx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev,
+											skb->data, skb->len, PCI_DMA_TODEVICE));
+	vp->tx_ring[entry].length = cpu_to_le32(skb->len | LAST_FRAG);
+	vp->tx_ring[entry].status = cpu_to_le32(skb->len | TxIntrUploaded);
+#endif
+
+	// *** RTnet ***
+	rtdm_irq_disable(&vp->irq_handle);
+	rtdm_lock_get(&vp->lock);
+	// *** RTnet ***
+
+	/* Wait for the stall to complete. */
+	issue_and_wait(rtdev, DownStall);
+
+	rtdm_lock_irqsave(context);
+	if (unlikely(skb->xmit_stamp != NULL))
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	prev_entry->next = cpu_to_le32(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc));
+	if (inl(ioaddr + DownListPtr) == 0) {
+		outl(vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc), ioaddr + DownListPtr);
+		vp->queued_packet++;
+	}
+
+	vp->cur_tx++;
+	if (vp->cur_tx - vp->dirty_tx > TX_RING_SIZE - 1) {
+		rtnetif_stop_queue (rtdev);
+	} else {					/* Clear previous interrupt enable. */
+#if defined(tx_interrupt_mitigation)
+		/* Dubious. If in boomeang_interrupt "faster" cyclone ifdef
+		 * were selected, this would corrupt DN_COMPLETE. No?
+		 */
+		prev_entry->status &= cpu_to_le32(~TxIntrUploaded);
+#endif
+	}
+	outw(DownUnstall, ioaddr + EL3_CMD);
+	rtdm_lock_put_irqrestore(&vp->lock, context);
+	rtdm_irq_enable(&vp->irq_handle);
+	//rtdev->trans_start = jiffies;
+	return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+
+/*
+ * This is the ISR for the vortex series chips.
+ * full_bus_master_tx == 0 && full_bus_master_rx == 0
+ */
+
+static int vortex_interrupt(rtdm_irq_t *irq_handle)
+{
+	// *** RTnet ***
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	// *** RTnet ***
+
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr;
+	int status;
+	int work_done = max_interrupt_work;
+
+	ioaddr = rtdev->base_addr;
+	rtdm_lock_get(&vp->lock);
+
+	status = inw(ioaddr + EL3_STATUS);
+
+	if (vortex_debug > 6)
+		printk("vortex_interrupt(). status=0x%4x\n", status);
+
+	if ((status & IntLatch) == 0)
+		goto handler_exit;		/* No interrupt: shared IRQs cause this */
+
+	if (status & IntReq) {
+		status |= vp->deferred;
+		vp->deferred = 0;
+	}
+
+	if (status == 0xffff)		/* h/w no longer present (hotplug)? */
+		goto handler_exit;
+
+	if (vortex_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+			rtdev->name, status, inb(ioaddr + Timer));
+
+	do {
+		if (vortex_debug > 5)
+			rtdm_printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+				rtdev->name, status);
+		if (status & RxComplete)
+			vortex_rx(rtdev, &packets, &time_stamp);
+
+		if (status & TxAvailable) {
+			if (vortex_debug > 5)
+				rtdm_printk(KERN_DEBUG "	TX room bit was handled.\n");
+			/* There's room in the FIFO for a full-sized packet. */
+			outw(AckIntr | TxAvailable, ioaddr + EL3_CMD);
+			rtnetif_wake_queue (rtdev);
+		}
+
+		if (status & DMADone) {
+			if (inw(ioaddr + Wn7_MasterStatus) & 0x1000) {
+				outw(0x1000, ioaddr + Wn7_MasterStatus); /* Ack the event. */
+				pci_unmap_single(vp->pdev, vp->tx_skb_dma, (vp->tx_skb->len + 3) & ~3, PCI_DMA_TODEVICE);
+				dev_kfree_rtskb(vp->tx_skb); /* Release the transferred buffer */
+				if (inw(ioaddr + TxFree) > 1536) {
+					/*
+					 * AKPM: FIXME: I don't think we need this.  If the queue was stopped due to
+					 * insufficient FIFO room, the TxAvailable test will succeed and call
+					 * rtnetif_wake_queue()
+					 */
+					rtnetif_wake_queue(rtdev);
+				} else { /* Interrupt when FIFO has room for max-sized packet. */
+					outw(SetTxThreshold + (1536>>2), ioaddr + EL3_CMD);
+					rtnetif_stop_queue(rtdev);
+				}
+			}
+		}
+		/* Check for all uncommon interrupts at once. */
+		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq)) {
+			if (status == 0xffff)
+				break;
+			vortex_error(rtdev, status, &time_stamp);
+		}
+
+		if (--work_done < 0) {
+			rtdm_printk(KERN_WARNING "%s: Too much work in interrupt, status "
+				"%4.4x.\n", rtdev->name, status);
+			/* Disable all pending interrupts. */
+			do {
+				vp->deferred |= status;
+				outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
+					ioaddr + EL3_CMD);
+				outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+			} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
+			/* The timer will reenable interrupts. */
+			mod_timer(&vp->timer, jiffies + 1*HZ);
+			break;
+		}
+		/* Acknowledge the IRQ. */
+		outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+	} while ((status = inw(ioaddr + EL3_STATUS)) & (IntLatch | RxComplete));
+
+	if (vortex_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+			rtdev->name, status);
+  handler_exit:
+	rtdm_lock_put(&vp->lock);
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/*
+ * This is the ISR for the boomerang series chips.
+ * full_bus_master_tx == 1 && full_bus_master_rx == 1
+ */
+
+static int boomerang_interrupt(rtdm_irq_t *irq_handle)
+{
+	// *** RTnet ***
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	// *** RTnet ***
+
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr;
+	int status;
+	int work_done = max_interrupt_work;
+
+	ioaddr = rtdev->base_addr;
+
+	/*
+	 * It seems dopey to put the spinlock this early, but we could race against vortex_tx_timeout
+	 * and boomerang_start_xmit
+	 */
+	rtdm_lock_get(&vp->lock);
+
+	status = inw(ioaddr + EL3_STATUS);
+
+	if (vortex_debug > 6)
+		rtdm_printk(KERN_DEBUG "boomerang_interrupt. status=0x%4x\n", status);
+
+	if ((status & IntLatch) == 0)
+		goto handler_exit;		/* No interrupt: shared IRQs can cause this */
+
+	if (status == 0xffff) {		/* h/w no longer present (hotplug)? */
+		if (vortex_debug > 1)
+			rtdm_printk(KERN_DEBUG "boomerang_interrupt(1): status = 0xffff\n");
+		goto handler_exit;
+	}
+
+	if (status & IntReq) {
+		status |= vp->deferred;
+		vp->deferred = 0;
+	}
+
+	if (vortex_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: interrupt, status %4.4x, latency %d ticks.\n",
+			rtdev->name, status, inb(ioaddr + Timer));
+	do {
+		if (vortex_debug > 5)
+			rtdm_printk(KERN_DEBUG "%s: In interrupt loop, status %4.4x.\n",
+				rtdev->name, status);
+		if (status & UpComplete) {
+			outw(AckIntr | UpComplete, ioaddr + EL3_CMD);
+			if (vortex_debug > 5)
+				rtdm_printk(KERN_DEBUG "boomerang_interrupt->boomerang_rx\n");
+			boomerang_rx(rtdev, &packets, &time_stamp);
+		}
+
+		if (status & DownComplete) {
+			unsigned int dirty_tx = vp->dirty_tx;
+
+			outw(AckIntr | DownComplete, ioaddr + EL3_CMD);
+			while (vp->cur_tx - dirty_tx > 0) {
+				int entry = dirty_tx % TX_RING_SIZE;
+				if (inl(ioaddr + DownListPtr) ==
+					vp->tx_ring_dma + entry * sizeof(struct boom_tx_desc))
+					break;			/* It still hasn't been processed. */
+
+				if (vp->tx_skbuff[entry]) {
+					struct rtskb *skb = vp->tx_skbuff[entry];
+#if DO_ZEROCOPY
+					int i;
+					for (i=0; i<=skb_shinfo(skb)->nr_frags; i++)
+						pci_unmap_single(vp->pdev,
+								le32_to_cpu(vp->tx_ring[entry].frag[i].addr),
+								le32_to_cpu(vp->tx_ring[entry].frag[i].length)&0xFFF,
+								PCI_DMA_TODEVICE);
+#else
+					pci_unmap_single(vp->pdev,
+							le32_to_cpu(vp->tx_ring[entry].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
+					dev_kfree_rtskb(skb);
+					vp->tx_skbuff[entry] = 0;
+				} else {
+					rtdm_printk(KERN_DEBUG "boomerang_interrupt: no skb!\n");
+				}
+				/* vp->stats.tx_packets++;  Counted below. */
+				dirty_tx++;
+			}
+			vp->dirty_tx = dirty_tx;
+			if (vp->cur_tx - dirty_tx <= TX_RING_SIZE - 1) {
+				if (vortex_debug > 6)
+					rtdm_printk(KERN_DEBUG "boomerang_interrupt: wake queue\n");
+				rtnetif_wake_queue (rtdev);
+			}
+		}
+
+		/* Check for all uncommon interrupts at once. */
+		if (status & (HostError | RxEarly | StatsFull | TxComplete | IntReq))
+			vortex_error(rtdev, status, &time_stamp);
+
+		if (--work_done < 0) {
+			rtdm_printk(KERN_WARNING "%s: Too much work in interrupt, status "
+				"%4.4x.\n", rtdev->name, status);
+			/* Disable all pending interrupts. */
+			do {
+				vp->deferred |= status;
+				outw(SetStatusEnb | (~vp->deferred & vp->status_enable),
+					ioaddr + EL3_CMD);
+				outw(AckIntr | (vp->deferred & 0x7ff), ioaddr + EL3_CMD);
+			} while ((status = inw(ioaddr + EL3_CMD)) & IntLatch);
+			/* The timer will reenable interrupts. */
+			mod_timer(&vp->timer, jiffies + 1*HZ);
+			break;
+		}
+		/* Acknowledge the IRQ. */
+		outw(AckIntr | IntReq | IntLatch, ioaddr + EL3_CMD);
+		if (vp->cb_fn_base)			/* The PCMCIA people are idiots.  */
+			writel(0x8000, vp->cb_fn_base + 4);
+
+	} while ((status = inw(ioaddr + EL3_STATUS)) & IntLatch);
+
+	if (vortex_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status %4.4x.\n",
+			rtdev->name, status);
+  handler_exit:
+	rtdm_lock_put(&vp->lock);
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int vortex_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int i;
+	short rx_status;
+
+	if (vortex_debug > 5)
+		printk(KERN_DEBUG "vortex_rx(): status %4.4x, rx_status %4.4x.\n",
+			inw(ioaddr+EL3_STATUS), inw(ioaddr+RxStatus));
+	while ((rx_status = inw(ioaddr + RxStatus)) > 0) {
+		if (rx_status & 0x4000) { /* Error, update stats. */
+			unsigned char rx_error = inb(ioaddr + RxErrors);
+			if (vortex_debug > 2)
+				printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+			vp->stats.rx_errors++;
+			if (rx_error & 0x01)  vp->stats.rx_over_errors++;
+			if (rx_error & 0x02)  vp->stats.rx_length_errors++;
+			if (rx_error & 0x04)  vp->stats.rx_frame_errors++;
+			if (rx_error & 0x08)  vp->stats.rx_crc_errors++;
+			if (rx_error & 0x10)  vp->stats.rx_length_errors++;
+		} else {
+			/* The packet length: up to 4.5K!. */
+			int pkt_len = rx_status & 0x1fff;
+			struct rtskb *skb;
+
+			skb = rtnetdev_alloc_rtskb(rtdev, pkt_len + 5);
+			if (vortex_debug > 4)
+				printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+					pkt_len, rx_status);
+			if (skb != NULL) {
+				rtskb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+				/* 'skb_put()' points to the start of sk_buff data area. */
+				if (vp->bus_master &&
+					! (inw(ioaddr + Wn7_MasterStatus) & 0x8000)) {
+					dma_addr_t dma = pci_map_single(vp->pdev,
+									rtskb_put(skb, pkt_len),
+									pkt_len, PCI_DMA_FROMDEVICE);
+					outl(dma, ioaddr + Wn7_MasterAddr);
+					outw((skb->len + 3) & ~3, ioaddr + Wn7_MasterLen);
+					outw(StartDMAUp, ioaddr + EL3_CMD);
+					while (inw(ioaddr + Wn7_MasterStatus) & 0x8000)
+						;
+					pci_unmap_single(vp->pdev, dma, pkt_len, PCI_DMA_FROMDEVICE);
+				} else {
+					insl(ioaddr + RX_FIFO, rtskb_put(skb, pkt_len),
+						(pkt_len + 3) >> 2);
+				}
+				outw(RxDiscard, ioaddr + EL3_CMD); /* Pop top Rx packet. */
+				skb->protocol = rt_eth_type_trans(skb, rtdev);
+				skb->time_stamp = *time_stamp;
+				rtnetif_rx(skb);
+				//rtdev->last_rx = jiffies;
+				vp->stats.rx_packets++;
+				(*packets)++;
+
+				/* Wait a limited time to go to next packet. */
+				for (i = 200; i >= 0; i--)
+					if ( ! (inw(ioaddr + EL3_STATUS) & CmdInProgress))
+						break;
+				continue;
+			} else if (vortex_debug > 0)
+				printk(KERN_NOTICE "%s: No memory to allocate a sk_buff of "
+					"size %d.\n", rtdev->name, pkt_len);
+		}
+		vp->stats.rx_dropped++;
+		issue_and_wait(rtdev, RxDiscard);
+	}
+
+	return 0;
+}
+
+static int
+boomerang_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	int entry = vp->cur_rx % RX_RING_SIZE;
+	long ioaddr = rtdev->base_addr;
+	int rx_status;
+	int rx_work_limit = vp->dirty_rx + RX_RING_SIZE - vp->cur_rx;
+
+
+	if (vortex_debug > 5)
+		rtdm_printk(KERN_DEBUG "boomerang_rx(): status %4.4x\n", inw(ioaddr+EL3_STATUS));
+
+	while ((rx_status = le32_to_cpu(vp->rx_ring[entry].status)) & RxDComplete){
+		if (--rx_work_limit < 0)
+			break;
+		if (rx_status & RxDError) { /* Error, update stats. */
+			unsigned char rx_error = rx_status >> 16;
+			if (vortex_debug > 2)
+				rtdm_printk(KERN_DEBUG " Rx error: status %2.2x.\n", rx_error);
+			vp->stats.rx_errors++;
+			if (rx_error & 0x01)  vp->stats.rx_over_errors++;
+			if (rx_error & 0x02)  vp->stats.rx_length_errors++;
+			if (rx_error & 0x04)  vp->stats.rx_frame_errors++;
+			if (rx_error & 0x08)  vp->stats.rx_crc_errors++;
+			if (rx_error & 0x10)  vp->stats.rx_length_errors++;
+		} else {
+			/* The packet length: up to 4.5K!. */
+			int pkt_len = rx_status & 0x1fff;
+			struct rtskb *skb;
+			dma_addr_t dma = le32_to_cpu(vp->rx_ring[entry].addr);
+
+			if (vortex_debug > 4)
+				rtdm_printk(KERN_DEBUG "Receiving packet size %d status %4.4x.\n",
+					pkt_len, rx_status);
+
+			/* Check if the packet is long enough to just accept without
+			   copying to a properly sized skbuff. */
+			{
+/*** RTnet ***/
+				/* Pass up the skbuff already on the Rx ring. */
+				skb = vp->rx_skbuff[entry];
+				vp->rx_skbuff[entry] = NULL;
+				rtskb_put(skb, pkt_len);
+				pci_unmap_single(vp->pdev, dma, PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+				vp->rx_nocopy++;
+			}
+			skb->protocol = rt_eth_type_trans(skb, rtdev);
+			skb->time_stamp = *time_stamp;
+			{					/* Use hardware checksum info. */
+				int csum_bits = rx_status & 0xee000000;
+				if (csum_bits &&
+					(csum_bits == (IPChksumValid | TCPChksumValid) ||
+						csum_bits == (IPChksumValid | UDPChksumValid))) {
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+					vp->rx_csumhits++;
+				}
+			}
+			rtnetif_rx(skb);
+			//rtdev->last_rx = jiffies;
+			vp->stats.rx_packets++;
+			(*packets)++;
+		}
+		entry = (++vp->cur_rx) % RX_RING_SIZE;
+	}
+	/* Refill the Rx ring buffers. */
+	for (; vp->cur_rx - vp->dirty_rx > 0; vp->dirty_rx++) {
+		struct rtskb *skb;
+		entry = vp->dirty_rx % RX_RING_SIZE;
+		if (vp->rx_skbuff[entry] == NULL) {
+			skb = rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ);
+			if (skb == NULL) {
+				static unsigned long last_jif;
+				if ((jiffies - last_jif) > 10 * HZ) {
+					rtdm_printk(KERN_WARNING "%s: memory shortage\n", rtdev->name);
+					last_jif = jiffies;
+				}
+				if ((vp->cur_rx - vp->dirty_rx) == RX_RING_SIZE)
+					{
+						// *** RTnet *** mod_timer(&vp->rx_oom_timer, RUN_AT(HZ * 1));
+						;
+					}
+				break;			/* Bad news!  */
+			}
+			rtskb_reserve(skb, 2);	/* Align IP on 16 byte boundaries */
+			vp->rx_ring[entry].addr = cpu_to_le32(pci_map_single(vp->pdev,
+													skb->tail, PKT_BUF_SZ, PCI_DMA_FROMDEVICE));
+			vp->rx_skbuff[entry] = skb;
+		}
+		vp->rx_ring[entry].status = 0;	/* Clear complete bit. */
+		outw(UpUnstall, ioaddr + EL3_CMD);
+	}
+	return 0;
+}
+
+/*
+ * If we've hit a total OOM refilling the Rx ring we poll once a second
+ * for some memory.  Otherwise there is no way to restart the rx process.
+ */
+static void
+vortex_down(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+
+	rtnetif_stop_queue (rtdev);
+
+	del_timer_sync(&vp->rx_oom_timer);
+	del_timer_sync(&vp->timer);
+
+	/* Turn off statistics ASAP.  We update vp->stats below. */
+	outw(StatsDisable, ioaddr + EL3_CMD);
+
+	/* Disable the receiver and transmitter. */
+	outw(RxDisable, ioaddr + EL3_CMD);
+	outw(TxDisable, ioaddr + EL3_CMD);
+
+	if (rtdev->if_port == XCVR_10base2)
+		/* Turn off thinnet power.  Green! */
+		outw(StopCoax, ioaddr + EL3_CMD);
+
+	outw(SetIntrEnb | 0x0000, ioaddr + EL3_CMD);
+
+	// *** RTnet ***  update_stats(ioaddr, dev);
+	if (vp->full_bus_master_rx)
+		outl(0, ioaddr + UpListPtr);
+	if (vp->full_bus_master_tx)
+		outl(0, ioaddr + DownListPtr);
+
+	if (vp->pdev && vp->enable_wol) {
+		pci_save_state(vp->pdev, vp->power_state);
+		acpi_set_WOL(rtdev);
+	}
+}
+
+static int
+vortex_close(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int i;
+
+	// rtnet_device is always present after vortex_open was called.
+	//if (netif_device_present(dev))
+	//	vortex_down(dev);
+	vortex_down(rtdev);
+
+	if (vortex_debug > 1) {
+		printk(KERN_DEBUG"%s: vortex_close() status %4.4x, Tx status %2.2x.\n",
+			rtdev->name, inw(ioaddr + EL3_STATUS), inb(ioaddr + TxStatus));
+		printk(KERN_DEBUG "%s: vortex close stats: rx_nocopy %d rx_copy %d"
+			" tx_queued %d Rx pre-checksummed %d.\n",
+			rtdev->name, vp->rx_nocopy, vp->rx_copy, vp->queued_packet, vp->rx_csumhits);
+	}
+
+#if DO_ZEROCOPY
+	if (	vp->rx_csumhits &&
+		((vp->drv_flags & HAS_HWCKSM) == 0) &&
+		(hw_checksums[vp->card_idx] == -1)) {
+		printk(KERN_WARNING "%s supports hardware checksums, and we're not using them!\n", rtdev->name);
+		printk(KERN_WARNING "Please see http://www.uow.edu.au/~andrewm/zerocopy.html\n");
+	}
+#endif
+
+	// *** RTnet ***
+	if ( (i=rtdm_irq_free(&vp->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(rtdev);
+
+	// *** RTnet ***
+
+	if (vp->full_bus_master_rx) { /* Free Boomerang bus master Rx buffers. */
+		for (i = 0; i < RX_RING_SIZE; i++)
+			if (vp->rx_skbuff[i]) {
+				pci_unmap_single(	vp->pdev, le32_to_cpu(vp->rx_ring[i].addr),
+						PKT_BUF_SZ, PCI_DMA_FROMDEVICE);
+				dev_kfree_rtskb(vp->rx_skbuff[i]);
+				vp->rx_skbuff[i] = 0;
+			}
+	}
+	if (vp->full_bus_master_tx) { /* Free Boomerang bus master Tx buffers. */
+		for (i = 0; i < TX_RING_SIZE; i++) {
+			if (vp->tx_skbuff[i]) {
+				struct rtskb *skb = vp->tx_skbuff[i];
+#if DO_ZEROCOPY
+				int k;
+
+				for (k=0; k<=skb_shinfo(skb)->nr_frags; k++)
+					pci_unmap_single(vp->pdev,
+							le32_to_cpu(vp->tx_ring[i].frag[k].addr),
+							le32_to_cpu(vp->tx_ring[i].frag[k].length)&0xFFF,
+							PCI_DMA_TODEVICE);
+#else
+				pci_unmap_single(vp->pdev, le32_to_cpu(vp->tx_ring[i].addr), skb->len, PCI_DMA_TODEVICE);
+#endif
+				dev_kfree_rtskb(skb);
+				vp->tx_skbuff[i] = 0;
+			}
+		}
+	}
+
+	return 0;
+}
+
+static void
+dump_tx_ring(struct rtnet_device *rtdev)
+{
+	if (vortex_debug > 0) {
+		struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+		long ioaddr = rtdev->base_addr;
+
+		if (vp->full_bus_master_tx) {
+			int i;
+			int stalled = inl(ioaddr + PktStatus) & 0x04;	/* Possible racy. But it's only debug stuff */
+
+			rtdm_printk(KERN_ERR "	Flags; bus-master %d, dirty %d(%d) current %d(%d)\n",
+				vp->full_bus_master_tx,
+				vp->dirty_tx, vp->dirty_tx % TX_RING_SIZE,
+				vp->cur_tx, vp->cur_tx % TX_RING_SIZE);
+			rtdm_printk(KERN_ERR "	Transmit list %8.8x vs. %p.\n",
+				inl(ioaddr + DownListPtr),
+				&vp->tx_ring[vp->dirty_tx % TX_RING_SIZE]);
+			issue_and_wait(rtdev, DownStall);
+			for (i = 0; i < TX_RING_SIZE; i++) {
+				rtdm_printk(KERN_ERR "	%d: @%p  length %8.8x status %8.8x\n", i,
+					&vp->tx_ring[i],
+#if DO_ZEROCOPY
+					le32_to_cpu(vp->tx_ring[i].frag[0].length),
+#else
+					le32_to_cpu(vp->tx_ring[i].length),
+#endif
+					le32_to_cpu(vp->tx_ring[i].status));
+			}
+			if (!stalled)
+				outw(DownUnstall, ioaddr + EL3_CMD);
+		}
+	}
+}
+
+static struct net_device_stats *vortex_get_stats(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	rtdm_lockctx_t flags;
+
+	if (rtnetif_device_present(rtdev)) {	/* AKPM: Used to be netif_running */
+		rtdm_lock_get_irqsave (&vp->lock, flags);
+		update_stats(rtdev->base_addr, rtdev);
+		rtdm_lock_put_irqrestore (&vp->lock, flags);
+	}
+	return &vp->stats;
+}
+
+/*  Update statistics.
+    Unlike with the EL3 we need not worry about interrupts changing
+    the window setting from underneath us, but we must still guard
+    against a race condition with a StatsUpdate interrupt updating the
+    table.  This is done by checking that the ASM (!) code generated uses
+    atomic updates with '+='.
+*/
+static void update_stats(long ioaddr, struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	int old_window = inw(ioaddr + EL3_CMD);
+
+	if (old_window == 0xffff)	/* Chip suspended or ejected. */
+		return;
+	/* Unlike the 3c5x9 we need not turn off stats updates while reading. */
+	/* Switch to the stats window, and read everything. */
+	EL3WINDOW(6);
+	vp->stats.tx_carrier_errors		+= inb(ioaddr + 0);
+	vp->stats.tx_heartbeat_errors	+= inb(ioaddr + 1);
+	/* Multiple collisions. */		inb(ioaddr + 2);
+	vp->stats.collisions			+= inb(ioaddr + 3);
+	vp->stats.tx_window_errors		+= inb(ioaddr + 4);
+	vp->stats.rx_fifo_errors		+= inb(ioaddr + 5);
+	vp->stats.tx_packets			+= inb(ioaddr + 6);
+	vp->stats.tx_packets			+= (inb(ioaddr + 9)&0x30) << 4;
+	/* Rx packets	*/				inb(ioaddr + 7);   /* Must read to clear */
+	/* Tx deferrals */				inb(ioaddr + 8);
+	/* Don't bother with register 9, an extension of registers 6&7.
+	   If we do use the 6&7 values the atomic update assumption above
+	   is invalid. */
+	vp->stats.rx_bytes += inw(ioaddr + 10);
+	vp->stats.tx_bytes += inw(ioaddr + 12);
+	/* New: On the Vortex we must also clear the BadSSD counter. */
+	EL3WINDOW(4);
+	inb(ioaddr + 12);
+
+	{
+		u8 up = inb(ioaddr + 13);
+		vp->stats.rx_bytes += (up & 0x0f) << 16;
+		vp->stats.tx_bytes += (up & 0xf0) << 12;
+	}
+
+	EL3WINDOW(old_window >> 13);
+	return;
+}
+
+/* Pre-Cyclone chips have no documented multicast filter, so the only
+   multicast setting is to receive all multicast frames.  At least
+   the chip has a very clean way to set the mode, unlike many others. */
+static void set_rx_mode(struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	int new_mode;
+
+	if (rtdev->flags & IFF_PROMISC) {
+		if (vortex_debug > 0)
+			printk(KERN_NOTICE "%s: Setting promiscuous mode.\n", rtdev->name);
+		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast|RxProm;
+	} else	if (rtdev->flags & IFF_ALLMULTI) {
+		new_mode = SetRxFilter|RxStation|RxMulticast|RxBroadcast;
+	} else
+		new_mode = SetRxFilter | RxStation | RxBroadcast;
+
+	outw(new_mode, ioaddr + EL3_CMD);
+}
+
+/* MII transceiver control section.
+   Read and write the MII registers using software-generated serial
+   MDIO protocol.  See the MII specifications or DP83840A data sheet
+   for details. */
+
+/* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
+   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+   "overclocking" issues. */
+#define mdio_delay() inl(mdio_addr)
+
+#define MDIO_SHIFT_CLK	0x01
+#define MDIO_DIR_WRITE	0x04
+#define MDIO_DATA_WRITE0 (0x00 | MDIO_DIR_WRITE)
+#define MDIO_DATA_WRITE1 (0x02 | MDIO_DIR_WRITE)
+#define MDIO_DATA_READ	0x02
+#define MDIO_ENB_IN		0x00
+
+/* Generate the preamble required for initial synchronization and
+   a few older transceivers. */
+static void mdio_sync(long ioaddr, int bits)
+{
+	long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+	/* Establish sync by sending at least 32 logic ones. */
+	while (-- bits >= 0) {
+		outw(MDIO_DATA_WRITE1, mdio_addr);
+		mdio_delay();
+		outw(MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+}
+
+static int mdio_read(struct rtnet_device *rtdev, int phy_id, int location)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	int i;
+	long ioaddr = rtdev->base_addr;
+	int read_cmd = (0xf6 << 10) | (phy_id << 5) | location;
+	unsigned int retval = 0;
+	long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+
+	spin_lock_bh(&vp->mdio_lock);
+
+	if (mii_preamble_required)
+		mdio_sync(ioaddr, 32);
+
+	/* Shift the read command bits out. */
+	for (i = 14; i >= 0; i--) {
+		int dataval = (read_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+		outw(dataval, mdio_addr);
+		mdio_delay();
+		outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		outw(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		retval = (retval << 1) | ((inw(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+		outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	spin_unlock_bh(&vp->mdio_lock);
+	return retval & 0x20000 ? 0xffff : retval>>1 & 0xffff;
+}
+
+static void mdio_write(struct rtnet_device *rtdev, int phy_id, int location, int value)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int write_cmd = 0x50020000 | (phy_id << 23) | (location << 18) | value;
+	long mdio_addr = ioaddr + Wn4_PhysicalMgmt;
+	int i;
+
+	spin_lock_bh(&vp->mdio_lock);
+
+	if (mii_preamble_required)
+		mdio_sync(ioaddr, 32);
+
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval = (write_cmd&(1<<i)) ? MDIO_DATA_WRITE1 : MDIO_DATA_WRITE0;
+		outw(dataval, mdio_addr);
+		mdio_delay();
+		outw(dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Leave the interface idle. */
+	for (i = 1; i >= 0; i--) {
+		outw(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		outw(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	spin_unlock_bh(&vp->mdio_lock);
+	return;
+}
+
+/* ACPI: Advanced Configuration and Power Interface. */
+/* Set Wake-On-LAN mode and put the board into D3 (power-down) state. */
+static void acpi_set_WOL(struct rtnet_device *rtdev)
+{
+	struct vortex_private *vp = (struct vortex_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+
+	/* Power up on: 1==Downloaded Filter, 2==Magic Packets, 4==Link Status. */
+	EL3WINDOW(7);
+	outw(2, ioaddr + 0x0c);
+	/* The RxFilter must accept the WOL frames. */
+	outw(SetRxFilter|RxStation|RxMulticast|RxBroadcast, ioaddr + EL3_CMD);
+	outw(RxEnable, ioaddr + EL3_CMD);
+
+	/* Change the power state to D3; RxEnable doesn't take effect. */
+	pci_enable_wake(vp->pdev, 0, 1);
+	pci_set_power_state(vp->pdev, 3);
+}
+
+
+static void vortex_remove_one (struct pci_dev *pdev)
+{
+	struct vortex_private *vp;
+	// *** RTnet ***
+	struct rtnet_device *rtdev = pci_get_drvdata (pdev);
+
+
+
+	if (!rtdev) {
+		printk("vortex_remove_one called for EISA device!\n");
+		BUG();
+	}
+
+	vp = rtdev->priv;
+
+	/* AKPM: FIXME: we should have
+	 *	if (vp->cb_fn_base) iounmap(vp->cb_fn_base);
+	 * here
+	 */
+	rt_unregister_rtnetdev(rtdev);
+	/* Should really use issue_and_wait() here */
+	outw(TotalReset|0x14, rtdev->base_addr + EL3_CMD);
+
+	if (vp->pdev && vp->enable_wol) {
+		pci_set_power_state(vp->pdev, 0);	/* Go active */
+		if (vp->pm_state_valid)
+			pci_restore_state(vp->pdev, vp->power_state);
+	}
+
+	dma_free_coherent(&pdev->dev,
+			sizeof(struct boom_rx_desc) * RX_RING_SIZE
+			+ sizeof(struct boom_tx_desc) * TX_RING_SIZE,
+			vp->rx_ring,
+			vp->rx_ring_dma);
+	if (vp->must_free_region)
+		release_region(rtdev->base_addr, vp->io_size);
+	// *** RTnet ***
+	rtdev_free(rtdev);
+	// *** RTnet ***
+}
+
+
+static struct pci_driver vortex_driver = {
+  name:		"3c59x_rt",
+  probe:		vortex_init_one,
+  remove:		vortex_remove_one,
+  id_table:	vortex_pci_tbl,
+#ifdef CONFIG_PM
+  suspend:	NULL,
+  resume:		NULL,
+#endif
+};
+
+
+static int vortex_have_pci;
+
+
+static int __init vortex_init (void)
+{
+	int pci_rc;
+
+	pci_rc = pci_register_driver(&vortex_driver);
+
+	if (pci_rc == 0)
+		vortex_have_pci = 1;
+
+	return (vortex_have_pci) ? 0 : -ENODEV;
+}
+
+
+static void __exit vortex_cleanup (void)
+{
+	if (vortex_have_pci)
+		pci_unregister_driver (&vortex_driver);
+}
+
+module_init(vortex_init);
+module_exit(vortex_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig
new file mode 100644
index 0000000..4620c94
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Kconfig
@@ -0,0 +1,17 @@
+config XENO_DRIVERS_NET_EXP_DRIVERS
+    depends on XENO_DRIVERS_NET && PCI
+    bool "Experimental Drivers"
+
+if XENO_DRIVERS_NET_EXP_DRIVERS
+
+config XENO_DRIVERS_NET_DRV_3C59X
+    depends on PCI
+    tristate "3Com 59x"
+
+config XENO_DRIVERS_NET_DRV_E1000_NEW
+    depends on PCI
+    tristate "New Intel(R) PRO/1000 (Gigabit)"
+
+source "drivers/xenomai/net/drivers/experimental/rt2500/Kconfig"
+
+endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile
new file mode 100644
index 0000000..eddd29d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_RT2500) += rt2500/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000_NEW) += e1000/
+
+obj-$(CONFIG_RTNET_DRV_3C59X) += rt_3c59x.o
+
+rt_3c59x-y := 3c59x.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile
new file mode 100644
index 0000000..be144c4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/Makefile
@@ -0,0 +1,19 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_E1000_NEW) += rt_e1000_new.o
+
+rt_e1000_new-y := \
+	e1000_80003es2lan.o \
+	e1000_82540.o \
+	e1000_82541.o \
+	e1000_82542.o \
+	e1000_82543.o \
+	e1000_82571.o \
+	e1000_api.o \
+	e1000_ich8lan.o \
+	e1000_mac.o \
+	e1000_main.o \
+	e1000_manage.o \
+	e1000_nvm.o \
+	e1000_param.o \
+	e1000_phy.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h
new file mode 100644
index 0000000..8b9830b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000.h
@@ -0,0 +1,425 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _E1000_H_
+#define _E1000_H_
+
+#include "kcompat.h"
+
+#include "e1000_api.h"
+
+#define BAR_0		0
+#define BAR_1		1
+#define BAR_5		5
+
+#define INTEL_E1000_ETHERNET_DEVICE(device_id) {\
+	PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+
+struct e1000_adapter;
+
+#define E1000_DBG(args...)
+
+#define E1000_ERR(args...) printk(KERN_ERR "e1000: " args)
+
+#define PFX "e1000: "
+#define DPRINTK(nlevel, klevel, fmt, args...) \
+	(void)((NETIF_MSG_##nlevel & adapter->msg_enable) && \
+	printk(KERN_##klevel PFX "%s: %s: " fmt, adapter->netdev->name, \
+		__FUNCTION__ , ## args))
+
+#define E1000_MAX_INTR 10
+
+/* TX/RX descriptor defines */
+#define E1000_DEFAULT_TXD                  256
+#define E1000_MAX_TXD                      256
+#define E1000_MIN_TXD                       80
+#define E1000_MAX_82544_TXD               4096
+
+#define E1000_DEFAULT_RXD                  256
+#define E1000_MAX_RXD                      256
+
+#define E1000_MIN_RXD                       80
+#define E1000_MAX_82544_RXD               4096
+
+#define E1000_MIN_ITR_USECS                 10 /* 100000 irq/sec */
+#define E1000_MAX_ITR_USECS              10000 /* 100    irq/sec */
+
+#ifdef CONFIG_E1000_MQ
+#define E1000_MAX_TX_QUEUES                  4
+#endif
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define E1000_RXBUFFER_128   128    /* Used for packet split */
+#define E1000_RXBUFFER_256   256    /* Used for packet split */
+#define E1000_RXBUFFER_512   512
+#define E1000_RXBUFFER_1024  1024
+#define E1000_RXBUFFER_2048  2048
+#define E1000_RXBUFFER_4096  4096
+#define E1000_RXBUFFER_8192  8192
+#define E1000_RXBUFFER_16384 16384
+
+/* SmartSpeed delimiters */
+#define E1000_SMARTSPEED_DOWNSHIFT 3
+#define E1000_SMARTSPEED_MAX       15
+
+/* Packet Buffer allocations */
+#define E1000_PBA_BYTES_SHIFT 0xA
+#define E1000_TX_HEAD_ADDR_SHIFT 7
+#define E1000_PBA_TX_MASK 0xFFFF0000
+
+/* Early Receive defines */
+#define E1000_ERT_2048 0x100
+
+#define E1000_FC_PAUSE_TIME 0x0680 /* 858 usec */
+
+/* How many Tx Descriptors do we need to call netif_wake_queue ? */
+#define E1000_TX_QUEUE_WAKE	16
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define E1000_RX_BUFFER_WRITE	16	/* Must be power of 2 */
+
+#define AUTO_ALL_MODES            0
+#define E1000_EEPROM_82544_APM    0x0004
+#define E1000_EEPROM_APME         0x0400
+
+#ifndef E1000_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define E1000_MASTER_SLAVE	e1000_ms_hw_default
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#define E1000_MNG_VLAN_NONE -1
+#endif
+/* Number of packet split data buffers (not including the header buffer) */
+#define PS_PAGE_BUFFERS MAX_PS_BUFFERS-1
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer */
+struct e1000_buffer {
+	struct rtskb *skb;
+	dma_addr_t dma;
+	unsigned long time_stamp;
+	u16 length;
+	u16 next_to_watch;
+};
+
+struct e1000_rx_buffer {
+	struct rtskb *skb;
+	dma_addr_t dma;
+	struct page *page;
+};
+
+#ifdef CONFIG_E1000_MQ
+struct e1000_queue_stats {
+	u64 packets;
+	u64 bytes;
+};
+#endif
+
+struct e1000_ps_page { struct page *ps_page[PS_PAGE_BUFFERS]; };
+struct e1000_ps_page_dma { u64 ps_page_dma[PS_PAGE_BUFFERS]; };
+
+struct e1000_tx_ring {
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+	/* array of buffer information structs */
+	struct e1000_buffer *buffer_info;
+
+#ifdef CONFIG_E1000_MQ
+	/* for tx ring cleanup - needed for multiqueue */
+	spinlock_t tx_queue_lock;
+#endif
+	rtdm_lock_t tx_lock;
+	u16 tdh;
+	u16 tdt;
+#ifdef CONFIG_E1000_MQ
+	struct e1000_queue_stats tx_stats;
+#endif
+	bool last_tx_tso;
+};
+
+struct e1000_rx_ring {
+	struct e1000_adapter *adapter; /* back link */
+	/* pointer to the descriptor ring memory */
+	void *desc;
+	/* physical address of the descriptor ring */
+	dma_addr_t dma;
+	/* length of descriptor ring in bytes */
+	unsigned int size;
+	/* number of descriptors in the ring */
+	unsigned int count;
+	/* next descriptor to associate a buffer with */
+	unsigned int next_to_use;
+	/* next descriptor to check for DD status bit */
+	unsigned int next_to_clean;
+#ifdef CONFIG_E1000_NAPI
+	struct napi_struct napi;
+#endif
+	/* array of buffer information structs */
+	struct e1000_rx_buffer *buffer_info;
+	/* arrays of page information for packet split */
+	struct e1000_ps_page *ps_page;
+	struct e1000_ps_page_dma *ps_page_dma;
+	struct sk_buff *rx_skb_top;
+
+	/* cpu for rx queue */
+	int cpu;
+
+	u16 rdh;
+	u16 rdt;
+#ifdef CONFIG_E1000_MQ
+	struct e1000_queue_stats rx_stats;
+#endif
+};
+
+#define E1000_DESC_UNUSED(R) \
+	((((R)->next_to_clean > (R)->next_to_use) ? 0 : (R)->count) + \
+	(R)->next_to_clean - (R)->next_to_use - 1)
+
+#define E1000_RX_DESC_PS(R, i)	    \
+	(&(((union e1000_rx_desc_packet_split *)((R).desc))[i]))
+#define E1000_RX_DESC_EXT(R, i)	    \
+	(&(((union e1000_rx_desc_extended *)((R).desc))[i]))
+#define E1000_GET_DESC(R, i, type)	(&(((struct type *)((R).desc))[i]))
+#define E1000_RX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_rx_desc)
+#define E1000_TX_DESC(R, i)		E1000_GET_DESC(R, i, e1000_tx_desc)
+#define E1000_CONTEXT_DESC(R, i)	E1000_GET_DESC(R, i, e1000_context_desc)
+
+#ifdef SIOCGMIIPHY
+/* PHY register snapshot values */
+struct e1000_phy_regs {
+	u16 bmcr;		/* basic mode control register    */
+	u16 bmsr;		/* basic mode status register     */
+	u16 advertise;		/* auto-negotiation advertisement */
+	u16 lpa;		/* link partner ability register  */
+	u16 expansion;		/* auto-negotiation expansion reg */
+	u16 ctrl1000;		/* 1000BASE-T control register    */
+	u16 stat1000;		/* 1000BASE-T status register     */
+	u16 estatus;		/* extended status register       */
+};
+#endif
+
+/* board specific private data structure */
+
+struct e1000_adapter {
+#ifdef NETIF_F_HW_VLAN_TX
+	struct vlan_group *vlgrp;
+	u16 mng_vlan_id;
+#endif
+	u32 bd_number;
+	u32 rx_buffer_len;
+	u32 wol;
+	u32 smartspeed;
+	u32 en_mng_pt;
+	u16 link_speed;
+	u16 link_duplex;
+	rtdm_lock_t  stats_lock;
+#ifdef CONFIG_E1000_NAPI
+	spinlock_t tx_queue_lock;
+#endif
+	atomic_t irq_sem;
+	unsigned int total_tx_bytes;
+	unsigned int total_tx_packets;
+	unsigned int total_rx_bytes;
+	unsigned int total_rx_packets;
+	/* Interrupt Throttle Rate */
+	u32 itr;
+	u32 itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
+
+	bool fc_autoneg;
+
+#ifdef ETHTOOL_PHYS_ID
+	struct timer_list blink_timer;
+	unsigned long led_status;
+#endif
+
+	/* TX */
+	struct e1000_tx_ring *tx_ring;      /* One per active queue */
+#ifdef CONFIG_E1000_MQ
+	struct e1000_tx_ring **cpu_tx_ring; /* per-cpu */
+#endif
+	unsigned int restart_queue;
+	unsigned long tx_queue_len;
+	u32 txd_cmd;
+	u32 tx_int_delay;
+	u32 tx_abs_int_delay;
+	u32 gotc;
+	u64 gotc_old;
+	u64 tpt_old;
+	u64 colc_old;
+	u32 tx_timeout_count;
+	u32 tx_fifo_head;
+	u32 tx_head_addr;
+	u32 tx_fifo_size;
+	u8 tx_timeout_factor;
+	atomic_t tx_fifo_stall;
+	bool pcix_82544;
+	bool detect_tx_hung;
+
+	/* RX */
+#ifdef CONFIG_E1000_NAPI
+	bool (*clean_rx) (struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       int *work_done, int work_to_do);
+#else
+	bool (*clean_rx) (struct e1000_adapter *adapter,
+			       struct e1000_rx_ring *rx_ring,
+			       nanosecs_abs_t *time_stamp);
+#endif
+	void (*alloc_rx_buf) (struct e1000_adapter *adapter,
+			      struct e1000_rx_ring *rx_ring,
+				int cleaned_count);
+	struct e1000_rx_ring *rx_ring;      /* One per active queue */
+#ifdef CONFIG_E1000_NAPI
+	//struct napi_struct napi;
+#endif
+	int num_tx_queues;
+	int num_rx_queues;
+
+	u64 hw_csum_err;
+	u64 hw_csum_good;
+	u64 rx_hdr_split;
+	u32 alloc_rx_buff_failed;
+	u32 rx_int_delay;
+	u32 rx_abs_int_delay;
+	bool rx_csum;
+	unsigned int rx_ps_pages;
+	u32 gorc;
+	u64 gorc_old;
+	u16 rx_ps_bsize0;
+	u32 max_frame_size;
+	u32 min_frame_size;
+
+
+	/* OS defined structs */
+	struct rtnet_device *netdev;
+	struct pci_dev *pdev;
+	struct net_device_stats net_stats;
+
+	rtdm_irq_t irq_handle;
+	char  data_received;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+	struct e1000_phy_stats phy_stats;
+
+#ifdef SIOCGMIIPHY
+	/* Snapshot of PHY registers */
+	struct e1000_phy_regs phy_regs;
+#endif
+
+#ifdef ETHTOOL_TEST
+	u32 test_icr;
+	struct e1000_tx_ring test_tx_ring;
+	struct e1000_rx_ring test_rx_ring;
+#endif
+
+
+	int msg_enable;
+	/* to not mess up cache alignment, always add to the bottom */
+	unsigned long state;
+	u32 eeprom_wol;
+
+	u32 *config_space;
+
+	/* hardware capability, feature, and workaround flags */
+	unsigned int flags;
+
+	struct work_struct reset_task;
+	struct delayed_work watchdog_task;
+	struct delayed_work fifo_stall_task;
+	struct delayed_work phy_info_task;
+};
+
+#define E1000_FLAG_HAS_SMBUS                (1 << 0)
+#define E1000_FLAG_HAS_MANC2H               (1 << 1)
+#define E1000_FLAG_HAS_MSI                  (1 << 2)
+#define E1000_FLAG_MSI_ENABLED              (1 << 3)
+#define E1000_FLAG_HAS_INTR_MODERATION      (1 << 4)
+#define E1000_FLAG_RX_NEEDS_RESTART         (1 << 5)
+#define E1000_FLAG_BAD_TX_CARRIER_STATS_FD  (1 << 6)
+#define E1000_FLAG_INT_ASSERT_AUTO_MASK     (1 << 7)
+#define E1000_FLAG_QUAD_PORT_A              (1 << 8)
+#define E1000_FLAG_SMART_POWER_DOWN         (1 << 9)
+#ifdef NETIF_F_TSO
+#define E1000_FLAG_HAS_TSO                  (1 << 10)
+#ifdef NETIF_F_TSO6
+#define E1000_FLAG_HAS_TSO6                 (1 << 11)
+#endif
+#define E1000_FLAG_TSO_FORCE                (1 << 12)
+#endif
+#define E1000_FLAG_RX_RESTART_NOW           (1 << 13)
+
+enum e1000_state_t {
+	__E1000_TESTING,
+	__E1000_RESETTING,
+	__E1000_DOWN
+};
+
+extern char e1000_driver_name[];
+extern const char e1000_driver_version[];
+
+extern void e1000_power_up_phy(struct e1000_hw *hw);
+
+extern void e1000_set_ethtool_ops(struct net_device *netdev);
+extern void e1000_check_options(struct e1000_adapter *adapter);
+
+extern int e1000_up(struct e1000_adapter *adapter);
+extern void e1000_down(struct e1000_adapter *adapter);
+extern void e1000_reinit_locked(struct e1000_adapter *adapter);
+extern void e1000_reset(struct e1000_adapter *adapter);
+extern int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx);
+extern int e1000_setup_all_rx_resources(struct e1000_adapter *adapter);
+extern int e1000_setup_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_rx_resources(struct e1000_adapter *adapter);
+extern void e1000_free_all_tx_resources(struct e1000_adapter *adapter);
+extern void e1000_update_stats(struct e1000_adapter *adapter);
+#ifdef ETHTOOL_OPS_COMPAT
+extern int ethtool_ioctl(struct ifreq *ifr);
+#endif
+
+#endif /* _E1000_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c
new file mode 100644
index 0000000..2ef70d6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.c
@@ -0,0 +1,1401 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_80003es2lan
+ */
+
+#include "e1000_api.h"
+#include "e1000_80003es2lan.h"
+
+static s32  e1000_init_phy_params_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_acquire_phy_80003es2lan(struct e1000_hw *hw);
+static void e1000_release_phy_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw);
+static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+                                                   u32 offset,
+                                                   u16 *data);
+static s32  e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+                                                    u32 offset,
+                                                    u16 data);
+static s32  e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+                                        u16 words, u16 *data);
+static s32  e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_get_cable_length_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+                                               u16 *duplex);
+static s32  e1000_reset_hw_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_init_hw_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static s32  e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex);
+static s32  e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw);
+static s32  e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw);
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask);
+static s32  e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw);
+
+/*
+ * A table for the GG82563 cable length where the range is defined
+ * with a lower bound at "index" and the upper bound at
+ * "index + 5".
+ */
+static const u16 e1000_gg82563_cable_length_table[] =
+         { 0, 60, 115, 150, 150, 60, 115, 150, 180, 180, 0xFF };
+#define GG82563_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_gg82563_cable_length_table) / \
+                 sizeof(e1000_gg82563_cable_length_table[0]))
+
+/**
+ *  e1000_init_phy_params_80003es2lan - Init ESB2 PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_80003es2lan");
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type        = e1000_phy_none;
+		goto out;
+	} else {
+		func->power_up_phy = e1000_power_up_phy_copper;
+		func->power_down_phy = e1000_power_down_phy_copper_80003es2lan;
+	}
+
+	phy->addr                = 1;
+	phy->autoneg_mask        = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us      = 100;
+	phy->type                = e1000_phy_gg82563;
+
+	func->acquire_phy        = e1000_acquire_phy_80003es2lan;
+	func->check_polarity     = e1000_check_polarity_m88;
+	func->check_reset_block  = e1000_check_reset_block_generic;
+	func->commit_phy         = e1000_phy_sw_reset_generic;
+	func->get_cfg_done       = e1000_get_cfg_done_80003es2lan;
+	func->get_phy_info       = e1000_get_phy_info_m88;
+	func->release_phy        = e1000_release_phy_80003es2lan;
+	func->reset_phy          = e1000_phy_hw_reset_generic;
+	func->set_d3_lplu_state  = e1000_set_d3_lplu_state_generic;
+
+	func->force_speed_duplex = e1000_phy_force_speed_duplex_80003es2lan;
+	func->get_cable_length   = e1000_get_cable_length_80003es2lan;
+	func->read_phy_reg       = e1000_read_phy_reg_gg82563_80003es2lan;
+	func->write_phy_reg      = e1000_write_phy_reg_gg82563_80003es2lan;
+
+	/* This can only be done after all function pointers are setup. */
+	ret_val = e1000_get_phy_id(hw);
+
+	/* Verify phy id */
+	if (phy->id != GG82563_E_PHY_ID) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_80003es2lan - Init ESB2 NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u16 size;
+
+	DEBUGFUNC("e1000_init_nvm_params_80003es2lan");
+
+	nvm->opcode_bits        = 8;
+	nvm->delay_usec         = 1;
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size    = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size    = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+		break;
+	}
+
+	nvm->type               = e1000_nvm_eeprom_spi;
+
+	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+	                  E1000_EECD_SIZE_EX_SHIFT);
+
+	/*
+	 * Added to a constant, "size" becomes the left-shift value
+	 * for setting word_size.
+	 */
+	size += NVM_WORD_SIZE_BASE_SHIFT;
+
+	/* EEPROM access above 16k is unsupported */
+	if (size > 14)
+		size = 14;
+	nvm->word_size	= 1 << size;
+
+	/* Function Pointers */
+	func->acquire_nvm       = e1000_acquire_nvm_80003es2lan;
+	func->read_nvm          = e1000_read_nvm_eerd;
+	func->release_nvm       = e1000_release_nvm_80003es2lan;
+	func->update_nvm        = e1000_update_nvm_checksum_generic;
+	func->valid_led_default = e1000_valid_led_default_generic;
+	func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+	func->write_nvm         = e1000_write_nvm_80003es2lan;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_80003es2lan - Init ESB2 MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_80003es2lan");
+
+	/* Set media type */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = TRUE;
+	/* Set if manageability features are enabled. */
+	mac->arc_subsystem_valid =
+	        (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
+	                ? TRUE : FALSE;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pcie_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_80003es2lan;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_80003es2lan;
+	/* link setup */
+	func->setup_link = e1000_setup_link_generic;
+	/* physical interface link setup */
+	func->setup_physical_interface =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_setup_copper_link_80003es2lan
+	                : e1000_setup_fiber_serdes_link_generic;
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->check_for_link = e1000_check_for_copper_link_generic;
+		break;
+	case e1000_media_type_fiber:
+		func->check_for_link = e1000_check_for_fiber_link_generic;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->check_for_link = e1000_check_for_serdes_link_generic;
+		break;
+	default:
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+		break;
+	}
+	/* check management mode */
+	func->check_mng_mode = e1000_check_mng_mode_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* read mac address */
+	func->read_mac_addr = e1000_read_mac_addr_80003es2lan;
+	/* blink LED */
+	func->blink_led = e1000_blink_led_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_generic;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_generic;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_generic;
+	func->led_off = e1000_led_off_generic;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_80003es2lan;
+	/* link info */
+	func->get_link_up_info = e1000_get_link_up_info_80003es2lan;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_80003es2lan - Init ESB2 func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_80003es2lan");
+
+	hw->func.init_mac_params = e1000_init_mac_params_80003es2lan;
+	hw->func.init_nvm_params = e1000_init_nvm_params_80003es2lan;
+	hw->func.init_phy_params = e1000_init_phy_params_80003es2lan;
+}
+
+/**
+ *  e1000_acquire_phy_80003es2lan - Acquire rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to acquire access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_acquire_phy_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	DEBUGFUNC("e1000_acquire_phy_80003es2lan");
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	mask |= E1000_SWFW_CSR_SM;
+
+	return e1000_acquire_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_release_phy_80003es2lan - Release rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to release access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static void e1000_release_phy_80003es2lan(struct e1000_hw *hw)
+{
+	u16 mask;
+
+	DEBUGFUNC("e1000_release_phy_80003es2lan");
+
+	mask = hw->bus.func ? E1000_SWFW_PHY1_SM : E1000_SWFW_PHY0_SM;
+	mask |= E1000_SWFW_CSR_SM;
+
+	e1000_release_swfw_sync_80003es2lan(hw, mask);
+}
+
+/**
+ *  e1000_acquire_nvm_80003es2lan - Acquire rights to access NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the semaphore to access the EEPROM.  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_acquire_nvm_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_acquire_nvm_80003es2lan");
+
+	ret_val = e1000_acquire_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_acquire_nvm_generic(hw);
+
+	if (ret_val)
+		e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_80003es2lan - Relinquish rights to access NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  Release the semaphore used to access the EEPROM.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static void e1000_release_nvm_80003es2lan(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_release_nvm_80003es2lan");
+
+	e1000_release_nvm_generic(hw);
+	e1000_release_swfw_sync_80003es2lan(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  e1000_acquire_swfw_sync_80003es2lan - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+static s32 e1000_acquire_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 ret_val = E1000_SUCCESS;
+	s32 i = 0, timeout = 200;
+
+	DEBUGFUNC("e1000_acquire_swfw_sync_80003es2lan");
+
+	while (i < timeout) {
+		if (e1000_get_hw_semaphore_generic(hw)) {
+			ret_val = -E1000_ERR_SWFW_SYNC;
+			goto out;
+		}
+
+		swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask)))
+			break;
+
+		/*
+		 * Firmware currently using resource (fwmask)
+		 * or other software thread using resource (swmask)
+		 */
+		e1000_put_hw_semaphore_generic(hw);
+		msec_delay_irq(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		DEBUGOUT("Driver can't access resource, SW_FW_SYNC timeout.\n");
+		ret_val = -E1000_ERR_SWFW_SYNC;
+		goto out;
+	}
+
+	swfw_sync |= swmask;
+	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+	e1000_put_hw_semaphore_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_swfw_sync_80003es2lan - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+static void e1000_release_swfw_sync_80003es2lan(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+	DEBUGFUNC("e1000_release_swfw_sync_80003es2lan");
+
+	while (e1000_get_hw_semaphore_generic(hw) != E1000_SUCCESS);
+	/* Empty */
+
+	swfw_sync = E1000_READ_REG(hw, E1000_SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	E1000_WRITE_REG(hw, E1000_SW_FW_SYNC, swfw_sync);
+
+	e1000_put_hw_semaphore_generic(hw);
+}
+
+/**
+ *  e1000_read_phy_reg_gg82563_80003es2lan - Read GG82563 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @data: pointer to the data returned from the operation
+ *
+ *  Read the GG82563 PHY register.  This is a function pointer entry
+ *  point called by the api module.
+ **/
+static s32 e1000_read_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+                                                  u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u32 page_select;
+	u16 temp;
+
+	DEBUGFUNC("e1000_read_phy_reg_gg82563_80003es2lan");
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	if (ret_val)
+		goto out;
+
+	/* Select Configuration Page */
+	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+		page_select = GG82563_PHY_PAGE_SELECT;
+	} else {
+		/*
+		 * Use Alternative Page Select register to access
+		 * registers 30 and 31
+		 */
+		page_select = GG82563_PHY_PAGE_SELECT_ALT;
+	}
+
+	temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+	ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp);
+	if (ret_val) {
+		e1000_release_phy_80003es2lan(hw);
+		goto out;
+	}
+
+	/*
+	 * The "ready" bit in the MDIC register may be incorrectly set
+	 * before the device has completed the "Page Select" MDI
+	 * transaction.  So we wait 200us after each MDI command...
+	 */
+	usec_delay(200);
+
+	/* ...and verify the command was successful. */
+	ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+
+	if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+		ret_val = -E1000_ERR_PHY;
+		e1000_release_phy_80003es2lan(hw);
+		goto out;
+	}
+
+	usec_delay(200);
+
+	ret_val = e1000_read_phy_reg_mdic(hw,
+	                                 MAX_PHY_REG_ADDRESS & offset,
+	                                 data);
+
+	usec_delay(200);
+	e1000_release_phy_80003es2lan(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_gg82563_80003es2lan - Write GG82563 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @data: value to write to the register
+ *
+ *  Write to the GG82563 PHY register.  This is a function pointer entry
+ *  point called by the api module.
+ **/
+static s32 e1000_write_phy_reg_gg82563_80003es2lan(struct e1000_hw *hw,
+                                                   u32 offset, u16 data)
+{
+	s32 ret_val;
+	u32 page_select;
+	u16 temp;
+
+	DEBUGFUNC("e1000_write_phy_reg_gg82563_80003es2lan");
+
+	ret_val = e1000_acquire_phy_80003es2lan(hw);
+	if (ret_val)
+		goto out;
+
+	/* Select Configuration Page */
+	if ((offset & MAX_PHY_REG_ADDRESS) < GG82563_MIN_ALT_REG) {
+		page_select = GG82563_PHY_PAGE_SELECT;
+	} else {
+		/*
+		 * Use Alternative Page Select register to access
+		 * registers 30 and 31
+		 */
+		page_select = GG82563_PHY_PAGE_SELECT_ALT;
+	}
+
+	temp = (u16)((u16)offset >> GG82563_PAGE_SHIFT);
+	ret_val = e1000_write_phy_reg_mdic(hw, page_select, temp);
+	if (ret_val) {
+		e1000_release_phy_80003es2lan(hw);
+		goto out;
+	}
+
+
+	/*
+	 * The "ready" bit in the MDIC register may be incorrectly set
+	 * before the device has completed the "Page Select" MDI
+	 * transaction.  So we wait 200us after each MDI command...
+	 */
+	usec_delay(200);
+
+	/* ...and verify the command was successful. */
+	ret_val = e1000_read_phy_reg_mdic(hw, page_select, &temp);
+
+	if (((u16)offset >> GG82563_PAGE_SHIFT) != temp) {
+		ret_val = -E1000_ERR_PHY;
+		e1000_release_phy_80003es2lan(hw);
+		goto out;
+	}
+
+	usec_delay(200);
+
+	ret_val = e1000_write_phy_reg_mdic(hw,
+	                                  MAX_PHY_REG_ADDRESS & offset,
+	                                  data);
+
+	usec_delay(200);
+	e1000_release_phy_80003es2lan(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_80003es2lan - Write to ESB2 NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of the register to read
+ *  @words: number of words to write
+ *  @data: buffer of data to write to the NVM
+ *
+ *  Write "words" of data to the ESB2 NVM.  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_write_nvm_80003es2lan(struct e1000_hw *hw, u16 offset,
+                            u16 words, u16 *data)
+{
+	DEBUGFUNC("e1000_write_nvm_80003es2lan");
+
+	return e1000_write_nvm_spi(hw, offset, words, data);
+}
+
+/**
+ *  e1000_get_cfg_done_80003es2lan - Wait for configuration to complete
+ *  @hw: pointer to the HW structure
+ *
+ *  Wait a specific amount of time for manageability processes to complete.
+ *  This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cfg_done_80003es2lan(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+	DEBUGFUNC("e1000_get_cfg_done_80003es2lan");
+
+	if (hw->bus.func == 1)
+		mask = E1000_NVM_CFG_DONE_PORT_1;
+
+	while (timeout) {
+		if (E1000_READ_REG(hw, E1000_EEMNGCTL) & mask)
+			break;
+		msec_delay(1);
+		timeout--;
+	}
+	if (!timeout) {
+		DEBUGOUT("MNG configuration cycle has not completed.\n");
+		ret_val = -E1000_ERR_RESET;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_80003es2lan - Force PHY speed and duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the speed and duplex settings onto the PHY.  This is a
+ *  function pointer entry point called by the phy module.
+ **/
+static s32 e1000_phy_force_speed_duplex_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_80003es2lan");
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~GG82563_PSCR_CROSSOVER_MODE_AUTO;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("GG82563 PSCR: %X\n", phy_data);
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	/* Reset the phy to commit changes. */
+	phy_data |= MII_CR_RESET;
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	usec_delay(1);
+
+	if (hw->phy.autoneg_wait_to_complete) {
+		DEBUGOUT("Waiting for forced speed/duplex link "
+		         "on GG82563 phy.\n");
+
+		ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+		                                     100000, &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			/*
+			 * We didn't get link.
+			 * Reset the DSP and cross our fingers.
+			 */
+			ret_val = e1000_phy_reset_dsp_generic(hw);
+			if (ret_val)
+				goto out;
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_LIMIT,
+		                                     100000, &link);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Resetting the phy means we need to verify the TX_CLK corresponds
+	 * to the link speed.  10Mbps -> 2.5MHz, else 25MHz.
+	 */
+	phy_data &= ~GG82563_MSCR_TX_CLK_MASK;
+	if (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED)
+		phy_data |= GG82563_MSCR_TX_CLK_10MBPS_2_5;
+	else
+		phy_data |= GG82563_MSCR_TX_CLK_100MBPS_25;
+
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	phy_data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_80003es2lan - Set approximate cable length
+ *  @hw: pointer to the HW structure
+ *
+ *  Find the approximate cable length as measured by the GG82563 PHY.
+ *  This is a function pointer entry point called by the phy module.
+ **/
+static s32 e1000_get_cable_length_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, index;
+
+	DEBUGFUNC("e1000_get_cable_length_80003es2lan");
+
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_DSP_DISTANCE, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = phy_data & GG82563_DSPD_CABLE_LENGTH;
+	phy->min_cable_length = e1000_gg82563_cable_length_table[index];
+	phy->max_cable_length = e1000_gg82563_cable_length_table[index+5];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_link_up_info_80003es2lan - Report speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to speed buffer
+ *  @duplex: pointer to duplex buffer
+ *
+ *  Retrieve the current speed and duplex configuration.
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_get_link_up_info_80003es2lan(struct e1000_hw *hw, u16 *speed,
+                                              u16 *duplex)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_get_link_up_info_80003es2lan");
+
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		ret_val = e1000_get_speed_and_duplex_copper_generic(hw,
+		                                                    speed,
+		                                                    duplex);
+		if (ret_val)
+			goto out;
+		if (*speed == SPEED_1000)
+			ret_val = e1000_cfg_kmrn_1000_80003es2lan(hw);
+		else
+			ret_val = e1000_cfg_kmrn_10_100_80003es2lan(hw,
+			                                      *duplex);
+	} else {
+		ret_val = e1000_get_speed_and_duplex_fiber_serdes_generic(hw,
+		                                                  speed,
+		                                                  duplex);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_80003es2lan - Reset the ESB2 controller
+ *  @hw: pointer to the HW structure
+ *
+ *  Perform a global reset to the ESB2 controller.
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_80003es2lan(struct e1000_hw *hw)
+{
+	u32 ctrl, icr;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_reset_hw_80003es2lan");
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000_disable_pcie_master_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("PCI-E Master disable polling has failed.\n");
+	}
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to MAC\n");
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+	ret_val = e1000_get_auto_rd_done_generic(hw);
+	if (ret_val)
+		/* We don't want to continue accessing MAC registers. */
+		goto out;
+
+	/* Clear any pending interrupt events. */
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	ret_val = e1000_check_alt_mac_addr_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_80003es2lan - Initialize the ESB2 controller
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize the hw bits, LED, VFTA, MTA, link and hw counters.
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_hw_80003es2lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 reg_data;
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_80003es2lan");
+
+	e1000_initialize_hw_bits_80003es2lan(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	DEBUGOUT("Initializing the IEEE VLAN\n");
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	/* Set the transmit descriptor write-back policy */
+	reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+	           E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data);
+
+	/* ...for both queues. */
+	reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+	           E1000_TXDCTL_FULL_TX_DESC_WB | E1000_TXDCTL_COUNT_DESC;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
+
+	/* Enable retransmit on late collisions */
+	reg_data = E1000_READ_REG(hw, E1000_TCTL);
+	reg_data |= E1000_TCTL_RTLC;
+	E1000_WRITE_REG(hw, E1000_TCTL, reg_data);
+
+	/* Configure Gigabit Carry Extend Padding */
+	reg_data = E1000_READ_REG(hw, E1000_TCTL_EXT);
+	reg_data &= ~E1000_TCTL_EXT_GCEX_MASK;
+	reg_data |= DEFAULT_TCTL_EXT_GCEX_80003ES2LAN;
+	E1000_WRITE_REG(hw, E1000_TCTL_EXT, reg_data);
+
+	/* Configure Transmit Inter-Packet Gap */
+	reg_data = E1000_READ_REG(hw, E1000_TIPG);
+	reg_data &= ~E1000_TIPG_IPGT_MASK;
+	reg_data |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+	E1000_WRITE_REG(hw, E1000_TIPG, reg_data);
+
+	reg_data = E1000_READ_REG_ARRAY(hw, E1000_FFLT, 0x0001);
+	reg_data &= ~0x00100000;
+	E1000_WRITE_REG_ARRAY(hw, E1000_FFLT, 0x0001, reg_data);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_80003es2lan(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_initialize_hw_bits_80003es2lan - Init hw bits of ESB2
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_80003es2lan(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	DEBUGFUNC("e1000_initialize_hw_bits_80003es2lan");
+
+	if (hw->mac.disable_hw_init_bits)
+		goto out;
+
+	/* Transmit Descriptor Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TARC(0));
+	reg &= ~(0xF << 27); /* 30:27 */
+	if (hw->phy.media_type != e1000_media_type_copper)
+		reg &= ~(1 << 20);
+	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TARC(1));
+	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+		reg &= ~(1 << 28);
+	else
+		reg |= (1 << 28);
+	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+
+out:
+	return;
+}
+
+/**
+ *  e1000_copper_link_setup_gg82563_80003es2lan - Configure GG82563 Link
+ *  @hw: pointer to the HW structure
+ *
+ *  Setup some GG82563 PHY registers for obtaining link
+ **/
+static s32 e1000_copper_link_setup_gg82563_80003es2lan(struct e1000_hw *hw)
+{
+	struct   e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u32 ctrl_ext;
+	u16 data;
+
+	DEBUGFUNC("e1000_copper_link_setup_gg82563_80003es2lan");
+
+	if (!phy->reset_disable) {
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+		                             &data);
+		if (ret_val)
+			goto out;
+
+		data |= GG82563_MSCR_ASSERT_CRS_ON_TX;
+		/* Use 25MHz for both link down and 1000Base-T for Tx clock. */
+		data |= GG82563_MSCR_TX_CLK_1000MBPS_25;
+
+		ret_val = e1000_write_phy_reg(hw, GG82563_PHY_MAC_SPEC_CTRL,
+		                              data);
+		if (ret_val)
+			goto out;
+
+		/*
+		 * Options:
+		 *   MDI/MDI-X = 0 (default)
+		 *   0 - Auto for all speeds
+		 *   1 - MDI mode
+		 *   2 - MDI-X mode
+		 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+		 */
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL, &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~GG82563_PSCR_CROSSOVER_MODE_MASK;
+
+		switch (phy->mdix) {
+		case 1:
+			data |= GG82563_PSCR_CROSSOVER_MODE_MDI;
+			break;
+		case 2:
+			data |= GG82563_PSCR_CROSSOVER_MODE_MDIX;
+			break;
+		case 0:
+		default:
+			data |= GG82563_PSCR_CROSSOVER_MODE_AUTO;
+			break;
+		}
+
+		/*
+		 * Options:
+		 *   disable_polarity_correction = 0 (default)
+		 *       Automatic Correction for Reversed Cable Polarity
+		 *   0 - Disabled
+		 *   1 - Enabled
+		 */
+		data &= ~GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+		if (phy->disable_polarity_correction)
+			data |= GG82563_PSCR_POLARITY_REVERSAL_DISABLE;
+
+		ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL, data);
+		if (ret_val)
+			goto out;
+
+		/* SW Reset the PHY so all changes take effect */
+		ret_val = e1000_phy_commit(hw);
+		if (ret_val) {
+			DEBUGOUT("Error Resetting the PHY\n");
+			goto out;
+		}
+
+	}
+
+	/* Bypass Rx and Tx FIFO's */
+	ret_val = e1000_write_kmrn_reg(hw,
+	                        E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL,
+	                        E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS |
+	                                E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_kmrn_reg(hw,
+	                              E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+	                              &data);
+	if (ret_val)
+		goto out;
+	data |= E1000_KMRNCTRLSTA_OPMODE_E_IDLE;
+	ret_val = e1000_write_kmrn_reg(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE,
+	                               data);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~GG82563_PSCR2_REVERSE_AUTO_NEG;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_SPEC_CTRL_2, data);
+	if (ret_val)
+		goto out;
+
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext &= ~(E1000_CTRL_EXT_LINK_MODE_MASK);
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_PWR_MGMT_CTRL, &data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Do not init these registers when the HW is in IAMT mode, since the
+	 * firmware will have already initialized them.  We only initialize
+	 * them if the HW is not in IAMT mode.
+	 */
+	if (!(e1000_check_mng_mode(hw))) {
+		/* Enable Electrical Idle on the PHY */
+		data |= GG82563_PMCR_ENABLE_ELECTRICAL_IDLE;
+		ret_val = e1000_write_phy_reg(hw,
+		                             GG82563_PHY_PWR_MGMT_CTRL,
+		                             data);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw,
+		                            GG82563_PHY_KMRN_MODE_CTRL,
+		                            &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+		ret_val = e1000_write_phy_reg(hw,
+		                             GG82563_PHY_KMRN_MODE_CTRL,
+		                             data);
+
+		if (ret_val)
+			goto out;
+	}
+
+	/*
+	 * Workaround: Disable padding in Kumeran interface in the MAC
+	 * and in the PHY to avoid CRC errors.
+	 */
+	ret_val = e1000_read_phy_reg(hw, GG82563_PHY_INBAND_CTRL, &data);
+	if (ret_val)
+		goto out;
+
+	data |= GG82563_ICR_DIS_PADDING;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_INBAND_CTRL, data);
+	if (ret_val)
+		goto out;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_80003es2lan - Setup Copper Link for ESB2
+ *  @hw: pointer to the HW structure
+ *
+ *  Essentially a wrapper for setting up all things "copper" related.
+ *  This is a function pointer entry point called by the mac module.
+ **/
+static s32 e1000_setup_copper_link_80003es2lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32  ret_val;
+	u16 reg_data;
+
+	DEBUGFUNC("e1000_setup_copper_link_80003es2lan");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	/*
+	 * Set the mac to wait the maximum time between each
+	 * iteration and increase the max iterations when
+	 * polling the phy; this fixes erroneous timeouts at 10Mbps.
+	 */
+	ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+	if (ret_val)
+		goto out;
+	ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+	if (ret_val)
+		goto out;
+	reg_data |= 0x3F;
+	ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+	if (ret_val)
+		goto out;
+	ret_val = e1000_read_kmrn_reg(hw,
+	                              E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+	                              &reg_data);
+	if (ret_val)
+		goto out;
+	reg_data |= E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING;
+	ret_val = e1000_write_kmrn_reg(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_INB_CTRL,
+	                               reg_data);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_copper_link_setup_gg82563_80003es2lan(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_cfg_kmrn_10_100_80003es2lan - Apply "quirks" for 10/100 operation
+ *  @hw: pointer to the HW structure
+ *  @duplex: current duplex setting
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  10/100 operation.
+ **/
+static s32 e1000_cfg_kmrn_10_100_80003es2lan(struct e1000_hw *hw, u16 duplex)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u32 tipg;
+	u32 i = 0;
+	u16 reg_data, reg_data2;
+
+	DEBUGFUNC("e1000_configure_kmrn_for_10_100");
+
+	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT;
+	ret_val = e1000_write_kmrn_reg(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+	                               reg_data);
+	if (ret_val)
+		goto out;
+
+	/* Configure Transmit Inter-Packet Gap */
+	tipg = E1000_READ_REG(hw, E1000_TIPG);
+	tipg &= ~E1000_TIPG_IPGT_MASK;
+	tipg |= DEFAULT_TIPG_IPGT_10_100_80003ES2LAN;
+	E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+
+	do {
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+		                             &reg_data);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+		                             &reg_data2);
+		if (ret_val)
+			goto out;
+		i++;
+	} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+	if (duplex == HALF_DUPLEX)
+		reg_data |= GG82563_KMCR_PASS_FALSE_CARRIER;
+	else
+		reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_cfg_kmrn_1000_80003es2lan - Apply "quirks" for gigabit operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Configure the KMRN interface by applying last minute quirks for
+ *  gigabit operation.
+ **/
+static s32 e1000_cfg_kmrn_1000_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 reg_data, reg_data2;
+	u32 tipg;
+	u32 i = 0;
+
+	DEBUGFUNC("e1000_configure_kmrn_for_1000");
+
+	reg_data = E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT;
+	ret_val = e1000_write_kmrn_reg(hw,
+	                               E1000_KMRNCTRLSTA_OFFSET_HD_CTRL,
+	                               reg_data);
+	if (ret_val)
+		goto out;
+
+	/* Configure Transmit Inter-Packet Gap */
+	tipg = E1000_READ_REG(hw, E1000_TIPG);
+	tipg &= ~E1000_TIPG_IPGT_MASK;
+	tipg |= DEFAULT_TIPG_IPGT_1000_80003ES2LAN;
+	E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+
+	do {
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+		                             &reg_data);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL,
+		                             &reg_data2);
+		if (ret_val)
+			goto out;
+		i++;
+	} while ((reg_data != reg_data2) && (i < GG82563_MAX_KMRN_RETRY));
+
+	reg_data &= ~GG82563_KMCR_PASS_FALSE_CARRIER;
+	ret_val = e1000_write_phy_reg(hw, GG82563_PHY_KMRN_MODE_CTRL, reg_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_80003es2lan - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_80003es2lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_mac_addr_80003es2lan");
+	if (e1000_check_alt_mac_addr_generic(hw))
+		ret_val = e1000_read_mac_addr_generic(hw);
+
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_80003es2lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_80003es2lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_80003es2lan - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_80003es2lan(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_80003es2lan");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+
+	temp = E1000_READ_REG(hw, E1000_IAC);
+	temp = E1000_READ_REG(hw, E1000_ICRXOC);
+
+	temp = E1000_READ_REG(hw, E1000_ICRXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICTXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQEC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQMTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXDMTC);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h
new file mode 100644
index 0000000..ec84d27
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_80003es2lan.h
@@ -0,0 +1,95 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_80003ES2LAN_H_
+#define _E1000_80003ES2LAN_H_
+
+#define E1000_KMRNCTRLSTA_OFFSET_FIFO_CTRL       0x00
+#define E1000_KMRNCTRLSTA_OFFSET_INB_CTRL        0x02
+#define E1000_KMRNCTRLSTA_OFFSET_HD_CTRL         0x10
+#define E1000_KMRNCTRLSTA_OFFSET_MAC2PHY_OPMODE  0x1F
+
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_RX_BYPASS    0x0008
+#define E1000_KMRNCTRLSTA_FIFO_CTRL_TX_BYPASS    0x0800
+#define E1000_KMRNCTRLSTA_INB_CTRL_DIS_PADDING   0x0010
+
+#define E1000_KMRNCTRLSTA_HD_CTRL_10_100_DEFAULT 0x0004
+#define E1000_KMRNCTRLSTA_HD_CTRL_1000_DEFAULT   0x0000
+#define E1000_KMRNCTRLSTA_OPMODE_E_IDLE          0x2000
+
+#define E1000_TCTL_EXT_GCEX_MASK 0x000FFC00 /* Gigabit Carry Extend Padding */
+#define DEFAULT_TCTL_EXT_GCEX_80003ES2LAN        0x00010000
+
+#define DEFAULT_TIPG_IPGT_1000_80003ES2LAN       0x8
+#define DEFAULT_TIPG_IPGT_10_100_80003ES2LAN     0x9
+
+/* GG82563 PHY Specific Status Register (Page 0, Register 16 */
+#define GG82563_PSCR_POLARITY_REVERSAL_DISABLE  0x0002 /* 1=Reversal Disabled */
+#define GG82563_PSCR_CROSSOVER_MODE_MASK        0x0060
+#define GG82563_PSCR_CROSSOVER_MODE_MDI         0x0000 /* 00=Manual MDI */
+#define GG82563_PSCR_CROSSOVER_MODE_MDIX        0x0020 /* 01=Manual MDIX */
+#define GG82563_PSCR_CROSSOVER_MODE_AUTO        0x0060 /* 11=Auto crossover */
+
+/* PHY Specific Control Register 2 (Page 0, Register 26) */
+#define GG82563_PSCR2_REVERSE_AUTO_NEG          0x2000
+                                               /* 1=Reverse Auto-Negotiation */
+
+/* MAC Specific Control Register (Page 2, Register 21) */
+/* Tx clock speed for Link Down and 1000BASE-T for the following speeds */
+#define GG82563_MSCR_TX_CLK_MASK                0x0007
+#define GG82563_MSCR_TX_CLK_10MBPS_2_5          0x0004
+#define GG82563_MSCR_TX_CLK_100MBPS_25          0x0005
+#define GG82563_MSCR_TX_CLK_1000MBPS_2_5        0x0006
+#define GG82563_MSCR_TX_CLK_1000MBPS_25         0x0007
+
+#define GG82563_MSCR_ASSERT_CRS_ON_TX           0x0010 /* 1=Assert */
+
+/* DSP Distance Register (Page 5, Register 26) */
+/*
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-100M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define GG82563_DSPD_CABLE_LENGTH               0x0007
+
+/* Kumeran Mode Control Register (Page 193, Register 16) */
+#define GG82563_KMCR_PASS_FALSE_CARRIER         0x0800
+
+/* Max number of times Kumeran read/write should be validated */
+#define GG82563_MAX_KMRN_RETRY                  0x5
+
+/* Power Management Control Register (Page 193, Register 20) */
+#define GG82563_PMCR_ENABLE_ELECTRICAL_IDLE     0x0001
+                                          /* 1=Enable SERDES Electrical Idle */
+
+/* In-Band Control Register (Page 194, Register 18) */
+#define GG82563_ICR_DIS_PADDING                 0x0010 /* Disable Padding */
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c
new file mode 100644
index 0000000..6e6e1f0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82540.c
@@ -0,0 +1,680 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82540
+ * e1000_82545
+ * e1000_82546
+ * e1000_82545_rev_3
+ * e1000_82546_rev_3
+ */
+
+#include "e1000_api.h"
+
+static s32  e1000_init_phy_params_82540(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82540(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82540(struct e1000_hw *hw);
+static s32  e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw);
+static s32  e1000_init_hw_82540(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82540(struct e1000_hw *hw);
+static s32  e1000_set_phy_mode_82540(struct e1000_hw *hw);
+static s32  e1000_set_vco_speed_82540(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_82540(struct e1000_hw *hw);
+static s32  e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82540(struct e1000_hw *hw);
+
+/**
+ * e1000_init_phy_params_82540 - Init PHY func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82540(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	phy->addr                       = 1;
+	phy->autoneg_mask               = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us             = 10000;
+	phy->type                       = e1000_phy_m88;
+
+	/* Function Pointers */
+	func->check_polarity            = e1000_check_polarity_m88;
+	func->commit_phy                = e1000_phy_sw_reset_generic;
+	func->force_speed_duplex        = e1000_phy_force_speed_duplex_m88;
+	func->get_cable_length          = e1000_get_cable_length_m88;
+	func->get_cfg_done              = e1000_get_cfg_done_generic;
+	func->read_phy_reg              = e1000_read_phy_reg_m88;
+	func->reset_phy                 = e1000_phy_hw_reset_generic;
+	func->write_phy_reg             = e1000_write_phy_reg_m88;
+	func->get_phy_info              = e1000_get_phy_info_m88;
+	func->power_up_phy              = e1000_power_up_phy_copper;
+	func->power_down_phy            = e1000_power_down_phy_copper_82540;
+
+	ret_val = e1000_get_phy_id(hw);
+	if (ret_val)
+		goto out;
+
+	/* Verify phy id */
+	switch (hw->mac.type) {
+	case e1000_82540:
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+		if (phy->id == M88E1011_I_PHY_ID)
+			break;
+		fallthrough;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+		break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_init_nvm_params_82540 - Init NVM func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82540(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	DEBUGFUNC("e1000_init_nvm_params_82540");
+
+	nvm->type               = e1000_nvm_eeprom_microwire;
+	nvm->delay_usec         = 50;
+	nvm->opcode_bits        = 3;
+	switch (nvm->override) {
+	case e1000_nvm_override_microwire_large:
+		nvm->address_bits       = 8;
+		nvm->word_size          = 256;
+		break;
+	case e1000_nvm_override_microwire_small:
+		nvm->address_bits       = 6;
+		nvm->word_size          = 64;
+		break;
+	default:
+		nvm->address_bits       = eecd & E1000_EECD_SIZE ? 8 : 6;
+		nvm->word_size          = eecd & E1000_EECD_SIZE ? 256 : 64;
+		break;
+	}
+
+	/* Function Pointers */
+	func->acquire_nvm        = e1000_acquire_nvm_generic;
+	func->read_nvm           = e1000_read_nvm_microwire;
+	func->release_nvm        = e1000_release_nvm_generic;
+	func->update_nvm         = e1000_update_nvm_checksum_generic;
+	func->valid_led_default  = e1000_valid_led_default_generic;
+	func->validate_nvm       = e1000_validate_nvm_checksum_generic;
+	func->write_nvm          = e1000_write_nvm_microwire;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ * e1000_init_mac_params_82540 - Init MAC func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82540(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_82540");
+
+	/* Set media type */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82545EM_FIBER:
+	case E1000_DEV_ID_82545GM_FIBER:
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+		hw->phy.media_type = e1000_media_type_fiber;
+		break;
+	case E1000_DEV_ID_82545GM_SERDES:
+	case E1000_DEV_ID_82546GB_SERDES:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pci_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82540;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82540;
+	/* link setup */
+	func->setup_link = e1000_setup_link_generic;
+	/* physical interface setup */
+	func->setup_physical_interface =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_setup_copper_link_82540
+	                : e1000_setup_fiber_serdes_link_82540;
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->check_for_link = e1000_check_for_copper_link_generic;
+		break;
+	case e1000_media_type_fiber:
+		func->check_for_link = e1000_check_for_fiber_link_generic;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->check_for_link = e1000_check_for_serdes_link_generic;
+		break;
+	default:
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+		break;
+	}
+	/* link info */
+	func->get_link_up_info =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_get_speed_and_duplex_copper_generic
+	                : e1000_get_speed_and_duplex_fiber_serdes_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_generic;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_generic;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_generic;
+	func->led_off = e1000_led_off_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82540;
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_init_function_pointers_82540 - Init func ptrs.
+ * @hw: pointer to the HW structure
+ *
+ * The only function explicitly called by the api module to initialize
+ * all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82540(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82540");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82540;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82540;
+	hw->func.init_phy_params = e1000_init_phy_params_82540;
+}
+
+/**
+ *  e1000_reset_hw_82540 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82540(struct e1000_hw *hw)
+{
+	u32 ctrl, icr, manc;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_reset_hw_82540");
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	/*
+	 * Delay to allow any outstanding PCI transactions to complete
+	 * before resetting the device.
+	 */
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to 82540/82545/82546 MAC\n");
+	switch (hw->mac.type) {
+	case e1000_82545_rev_3:
+	case e1000_82546_rev_3:
+		E1000_WRITE_REG(hw, E1000_CTRL_DUP, ctrl | E1000_CTRL_RST);
+		break;
+	default:
+		/*
+		 * These controllers can't ack the 64-bit write when
+		 * issuing the reset, so we use IO-mapping as a
+		 * workaround to issue the reset.
+		 */
+		E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+		break;
+	}
+
+	/* Wait for EEPROM reload */
+	msec_delay(5);
+
+	/* Disable HW ARPs on ASF enabled adapters */
+	manc = E1000_READ_REG(hw, E1000_MANC);
+	manc &= ~E1000_MANC_ARP_EN;
+	E1000_WRITE_REG(hw, E1000_MANC, manc);
+
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82540 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_hw_82540(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 txdctl, ctrl_ext;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_82540");
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	DEBUGOUT("Initializing the IEEE VLAN\n");
+	if (mac->type < e1000_82545_rev_3)
+		E1000_WRITE_REG(hw, E1000_VET, 0);
+
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		/*
+		 * Avoid back to back register writes by adding the register
+		 * read (flush).  This is to protect against some strange
+		 * bridge configurations that may issue Memory Write Block
+		 * (MWB) to our register space.  The *_rev_3 hardware at
+		 * least doesn't respond correctly to every other dword in an
+		 * MWB to our register space.
+		 */
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	if (mac->type < e1000_82545_rev_3)
+		e1000_pcix_mmrbc_workaround_generic(hw);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+	         E1000_TXDCTL_FULL_TX_DESC_WB;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82540(hw);
+
+	if ((hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER) ||
+	    (hw->device_id == E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)) {
+		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		/*
+		 * Relaxed ordering must be disabled to avoid a parity
+		 * error crash in a PCI slot.
+		 */
+		ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_82540 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_setup_copper_link_82540(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_setup_copper_link_82540");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	ret_val = e1000_set_phy_mode_82540(hw);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.type == e1000_82545_rev_3 ||
+	    hw->mac.type == e1000_82546_rev_3) {
+		ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &data);
+		if (ret_val)
+			goto out;
+		data |= 0x00000008;
+		ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, data);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_copper_link_setup_m88(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_82540 - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the output amplitude to the value in the EEPROM and adjust the VCO
+ *  speed to improve Bit Error Rate (BER) performance.  Configures collision
+ *  distance and flow control for fiber and serdes links.  Upon successful
+ *  setup, poll for link.  This is a function pointer entry point called by
+ *  the api module.
+ **/
+static s32 e1000_setup_fiber_serdes_link_82540(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_fiber_serdes_link_82540");
+
+	switch (mac->type) {
+	case e1000_82545_rev_3:
+	case e1000_82546_rev_3:
+		if (hw->phy.media_type == e1000_media_type_internal_serdes) {
+			/*
+			 * If we're on serdes media, adjust the output
+			 * amplitude to value set in the EEPROM.
+			 */
+			ret_val = e1000_adjust_serdes_amplitude_82540(hw);
+			if (ret_val)
+				goto out;
+		}
+		/* Adjust VCO speed to improve BER performance */
+		ret_val = e1000_set_vco_speed_82540(hw);
+		if (ret_val)
+			goto out;
+	default:
+		break;
+	}
+
+	ret_val = e1000_setup_fiber_serdes_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_adjust_serdes_amplitude_82540 - Adjust amplitude based on EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Adjust the SERDES ouput amplitude based on the EEPROM settings.
+ **/
+static s32 e1000_adjust_serdes_amplitude_82540(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 nvm_data;
+
+	DEBUGFUNC("e1000_adjust_serdes_amplitude_82540");
+
+	ret_val = e1000_read_nvm(hw, NVM_SERDES_AMPLITUDE, 1, &nvm_data);
+	if (ret_val)
+		goto out;
+
+	if (nvm_data != NVM_RESERVED_WORD) {
+		/* Adjust serdes output amplitude only. */
+		nvm_data &= NVM_SERDES_AMPLITUDE_MASK;
+		ret_val = e1000_write_phy_reg(hw,
+		                             M88E1000_PHY_EXT_CTRL,
+		                             nvm_data);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_vco_speed_82540 - Set VCO speed for better performance
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the VCO speed to improve Bit Error Rate (BER) performance.
+ **/
+static s32 e1000_set_vco_speed_82540(struct e1000_hw *hw)
+{
+	s32  ret_val = E1000_SUCCESS;
+	u16 default_page = 0;
+	u16 phy_data;
+
+	DEBUGFUNC("e1000_set_vco_speed_82540");
+
+	/* Set PHY register 30, page 5, bit 8 to 0 */
+
+	ret_val = e1000_read_phy_reg(hw,
+	                            M88E1000_PHY_PAGE_SELECT,
+	                            &default_page);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0005);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~M88E1000_PHY_VCO_REG_BIT8;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Set PHY register 30, page 4, bit 11 to 1 */
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0004);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PHY_VCO_REG_BIT11;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
+	                              default_page);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_phy_mode_82540 - Set PHY to class A mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the PHY to class A mode and assumes the following operations will
+ *  follow to enable the new class mode:
+ *    1.  Do a PHY soft reset.
+ *    2.  Restart auto-negotiation or force link.
+ **/
+static s32 e1000_set_phy_mode_82540(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+	u16 nvm_data;
+
+	DEBUGFUNC("e1000_set_phy_mode_82540");
+
+	if (hw->mac.type != e1000_82545_rev_3)
+		goto out;
+
+	ret_val = e1000_read_nvm(hw, NVM_PHY_CLASS_WORD, 1, &nvm_data);
+	if (ret_val) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+	if ((nvm_data != NVM_RESERVED_WORD) && (nvm_data & NVM_PHY_CLASS_A)) {
+		ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT,
+		                              0x000B);
+		if (ret_val) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		ret_val = e1000_write_phy_reg(hw,
+		                              M88E1000_PHY_GEN_CONTROL,
+		                              0x8104);
+		if (ret_val) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		phy->reset_disable = FALSE;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_82540 - Remove link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82540(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82540 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82540(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82540");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c
new file mode 100644
index 0000000..a0d5c88
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.c
@@ -0,0 +1,1328 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82541
+ * e1000_82547
+ * e1000_82541_rev_2
+ * e1000_82547_rev_2
+ */
+
+#include "e1000_api.h"
+#include "e1000_82541.h"
+
+static s32  e1000_init_phy_params_82541(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82541(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82541(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82541(struct e1000_hw *hw);
+static s32  e1000_init_hw_82541(struct e1000_hw *hw);
+static s32  e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
+                                         u16 *duplex);
+static s32  e1000_phy_hw_reset_82541(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_82541(struct e1000_hw *hw);
+static s32  e1000_check_for_link_82541(struct e1000_hw *hw);
+static s32  e1000_get_cable_length_igp_82541(struct e1000_hw *hw);
+static s32  e1000_set_d3_lplu_state_82541(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_setup_led_82541(struct e1000_hw *hw);
+static s32  e1000_cleanup_led_82541(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw);
+static s32  e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
+                                                     bool link_up);
+static s32  e1000_phy_init_script_82541(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82541(struct e1000_hw *hw);
+
+static const u16 e1000_igp_cable_length_table[] =
+    { 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+      5, 10, 10, 10, 10, 10, 10, 10, 20, 20, 20, 20, 20, 25, 25, 25,
+      25, 25, 25, 25, 30, 30, 30, 30, 40, 40, 40, 40, 40, 40, 40, 40,
+      40, 50, 50, 50, 50, 50, 50, 50, 60, 60, 60, 60, 60, 60, 60, 60,
+      60, 70, 70, 70, 70, 70, 70, 80, 80, 80, 80, 80, 80, 90, 90, 90,
+      90, 90, 90, 90, 90, 90, 100, 100, 100, 100, 100, 100, 100, 100, 100, 100,
+      100, 100, 100, 100, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110, 110,
+      110, 110, 110, 110, 110, 110, 120, 120, 120, 120, 120, 120, 120, 120, 120, 120};
+#define IGP01E1000_AGC_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_igp_cable_length_table) / \
+                 sizeof(e1000_igp_cable_length_table[0]))
+
+struct e1000_dev_spec_82541 {
+	e1000_dsp_config dsp_config;
+	e1000_ffe_config ffe_config;
+	u16 spd_default;
+	bool phy_init_script;
+};
+
+/**
+ *  e1000_init_phy_params_82541 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82541(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_82541");
+
+	phy->addr                       = 1;
+	phy->autoneg_mask               = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us             = 10000;
+	phy->type                       = e1000_phy_igp;
+
+	/* Function Pointers */
+	func->check_polarity            = e1000_check_polarity_igp;
+	func->force_speed_duplex        = e1000_phy_force_speed_duplex_igp;
+	func->get_cable_length          = e1000_get_cable_length_igp_82541;
+	func->get_cfg_done              = e1000_get_cfg_done_generic;
+	func->get_phy_info              = e1000_get_phy_info_igp;
+	func->read_phy_reg              = e1000_read_phy_reg_igp;
+	func->reset_phy                 = e1000_phy_hw_reset_82541;
+	func->set_d3_lplu_state         = e1000_set_d3_lplu_state_82541;
+	func->write_phy_reg             = e1000_write_phy_reg_igp;
+	func->power_up_phy              = e1000_power_up_phy_copper;
+	func->power_down_phy            = e1000_power_down_phy_copper_82541;
+
+	ret_val = e1000_get_phy_id(hw);
+	if (ret_val)
+		goto out;
+
+	/* Verify phy id */
+	if (phy->id != IGP01E1000_I_PHY_ID) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82541 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82541(struct e1000_hw *hw)
+{
+	struct   e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	s32  ret_val = E1000_SUCCESS;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u16 size;
+
+	DEBUGFUNC("e1000_init_nvm_params_82541");
+
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->type = e1000_nvm_eeprom_spi;
+		eecd |= E1000_EECD_ADDR_BITS;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->type = e1000_nvm_eeprom_spi;
+		eecd &= ~E1000_EECD_ADDR_BITS;
+		break;
+	case e1000_nvm_override_microwire_large:
+		nvm->type = e1000_nvm_eeprom_microwire;
+		eecd |= E1000_EECD_SIZE;
+		break;
+	case e1000_nvm_override_microwire_small:
+		nvm->type = e1000_nvm_eeprom_microwire;
+		eecd &= ~E1000_EECD_SIZE;
+		break;
+	default:
+		nvm->type = eecd & E1000_EECD_TYPE
+		            ? e1000_nvm_eeprom_spi
+		            : e1000_nvm_eeprom_microwire;
+		break;
+	}
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		nvm->address_bits       = (eecd & E1000_EECD_ADDR_BITS)
+		                          ? 16 : 8;
+		nvm->delay_usec         = 1;
+		nvm->opcode_bits        = 8;
+		nvm->page_size          = (eecd & E1000_EECD_ADDR_BITS)
+		                          ? 32 : 8;
+
+		/* Function Pointers */
+		func->acquire_nvm       = e1000_acquire_nvm_generic;
+		func->read_nvm          = e1000_read_nvm_spi;
+		func->release_nvm       = e1000_release_nvm_generic;
+		func->update_nvm        = e1000_update_nvm_checksum_generic;
+		func->valid_led_default = e1000_valid_led_default_generic;
+		func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+		func->write_nvm         = e1000_write_nvm_spi;
+
+		/*
+		 * nvm->word_size must be discovered after the pointers
+		 * are set so we can verify the size from the nvm image
+		 * itself.  Temporarily set it to a dummy value so the
+		 * read will work.
+		 */
+		nvm->word_size = 64;
+		ret_val = e1000_read_nvm(hw, NVM_CFG, 1, &size);
+		if (ret_val)
+			goto out;
+		size = (size & NVM_SIZE_MASK) >> NVM_SIZE_SHIFT;
+		/*
+		 * if size != 0, it can be added to a constant and become
+		 * the left-shift value to set the word_size.  Otherwise,
+		 * word_size stays at 64.
+		 */
+		if (size) {
+			size += NVM_WORD_SIZE_BASE_SHIFT_82541;
+			nvm->word_size = 1 << size;
+		}
+	} else {
+		nvm->address_bits       = (eecd & E1000_EECD_ADDR_BITS)
+		                          ? 8 : 6;
+		nvm->delay_usec         = 50;
+		nvm->opcode_bits        = 3;
+		nvm->word_size          = (eecd & E1000_EECD_ADDR_BITS)
+		                          ? 256 : 64;
+
+		/* Function Pointers */
+		func->acquire_nvm       = e1000_acquire_nvm_generic;
+		func->read_nvm          = e1000_read_nvm_microwire;
+		func->release_nvm       = e1000_release_nvm_generic;
+		func->update_nvm        = e1000_update_nvm_checksum_generic;
+		func->valid_led_default = e1000_valid_led_default_generic;
+		func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+		func->write_nvm         = e1000_write_nvm_microwire;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_mac_params_82541 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82541(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_init_mac_params_82541");
+
+	/* Set media type */
+	hw->phy.media_type = e1000_media_type_copper;
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = TRUE;
+
+	/* Function Pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pci_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82541;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82541;
+	/* link setup */
+	func->setup_link = e1000_setup_link_generic;
+	/* physical interface link setup */
+	func->setup_physical_interface = e1000_setup_copper_link_82541;
+	/* check for link */
+	func->check_for_link = e1000_check_for_link_82541;
+	/* link info */
+	func->get_link_up_info = e1000_get_link_up_info_82541;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_82541;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_82541;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_generic;
+	func->led_off = e1000_led_off_generic;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82541;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_82541);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_82541 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82541(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82541");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82541;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82541;
+	hw->func.init_phy_params = e1000_init_phy_params_82541;
+}
+
+/**
+ *  e1000_reset_hw_82541 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82541(struct e1000_hw *hw)
+{
+	u32 ledctl, ctrl, icr, manc;
+
+	DEBUGFUNC("e1000_reset_hw_82541");
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	/*
+	 * Delay to allow any outstanding PCI transactions to complete
+	 * before resetting the device.
+	 */
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* Must reset the Phy before resetting the MAC */
+	if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+		E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_PHY_RST));
+		msec_delay(5);
+	}
+
+	DEBUGOUT("Issuing a global reset to 82541/82547 MAC\n");
+	switch (hw->mac.type) {
+	case e1000_82541:
+	case e1000_82541_rev_2:
+		/*
+		 * These controllers can't ack the 64-bit write when
+		 * issuing the reset, so we use IO-mapping as a
+		 * workaround to issue the reset.
+		 */
+		E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+		break;
+	default:
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+		break;
+	}
+
+	/* Wait for NVM reload */
+	msec_delay(20);
+
+	/* Disable HW ARPs on ASF enabled adapters */
+	manc = E1000_READ_REG(hw, E1000_MANC);
+	manc &= ~E1000_MANC_ARP_EN;
+	E1000_WRITE_REG(hw, E1000_MANC, manc);
+
+	if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+		e1000_phy_init_script_82541(hw);
+
+		/* Configure activity LED after Phy reset */
+		ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+		ledctl &= IGP_ACTIVITY_LED_MASK;
+		ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+	}
+
+	/* Once again, mask the interrupts */
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+
+	/* Clear any pending interrupt events. */
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_hw_82541 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_hw_82541(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 i, txdctl;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_init_hw_82541");
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	DEBUGOUT("Initializing the IEEE VLAN\n");
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		/*
+		 * Avoid back to back register writes by adding the register
+		 * read (flush).  This is to protect against some strange
+		 * bridge configurations that may issue Memory Write Block
+		 * (MWB) to our register space.
+		 */
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+	         E1000_TXDCTL_FULL_TX_DESC_WB;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82541(hw);
+
+	return ret_val;
+}
+
+/**
+ * e1000_get_link_up_info_82541 - Report speed and duplex
+ * @hw: pointer to the HW structure
+ * @speed: pointer to speed buffer
+ * @duplex: pointer to duplex buffer
+ *
+ * Retrieve the current speed and duplex configuration.
+ * This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_get_link_up_info_82541(struct e1000_hw *hw, u16 *speed,
+                                        u16 *duplex)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_get_link_up_info_82541");
+
+	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
+	if (ret_val)
+		goto out;
+
+	if (!phy->speed_downgraded)
+		goto out;
+
+	/*
+	 * IGP01 PHY may advertise full duplex operation after speed
+	 * downgrade even if it is operating at half duplex.
+	 * Here we set the duplex settings to match the duplex in the
+	 * link partner's capabilities.
+	 */
+	ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_EXP, &data);
+	if (ret_val)
+		goto out;
+
+	if (!(data & NWAY_ER_LP_NWAY_CAPS)) {
+		*duplex = HALF_DUPLEX;
+	} else {
+		ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY, &data);
+		if (ret_val)
+			goto out;
+
+		if (*speed == SPEED_100) {
+			if (!(data & NWAY_LPAR_100TX_FD_CAPS))
+				*duplex = HALF_DUPLEX;
+		} else if (*speed == SPEED_10) {
+			if (!(data & NWAY_LPAR_10T_FD_CAPS))
+				*duplex = HALF_DUPLEX;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_82541 - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and relase the semaphore (if necessary).
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_phy_hw_reset_82541(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u32 ledctl;
+
+	DEBUGFUNC("e1000_phy_hw_reset_82541");
+
+	ret_val = e1000_phy_hw_reset_generic(hw);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_init_script_82541(hw);
+
+	if ((hw->mac.type == e1000_82541) || (hw->mac.type == e1000_82547)) {
+		/* Configure activity LED after PHY reset */
+		ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+		ledctl &= IGP_ACTIVITY_LED_MASK;
+		ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_82541 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_setup_copper_link_82541(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_dev_spec_82541 *dev_spec;
+	s32  ret_val;
+	u32 ctrl, ledctl;
+
+	DEBUGFUNC("e1000_setup_copper_link_82541");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	hw->phy.reset_disable = FALSE;
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	/* Earlier revs of the IGP phy require us to force MDI. */
+	if (hw->mac.type == e1000_82541 || hw->mac.type == e1000_82547) {
+		dev_spec->dsp_config = e1000_dsp_config_disabled;
+		phy->mdix = 1;
+	} else {
+		dev_spec->dsp_config = e1000_dsp_config_enabled;
+	}
+
+	ret_val = e1000_copper_link_setup_igp(hw);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.autoneg) {
+		if (dev_spec->ffe_config == e1000_ffe_config_active)
+			dev_spec->ffe_config = e1000_ffe_config_enabled;
+	}
+
+	/* Configure activity LED after Phy reset */
+	ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+	ledctl &= IGP_ACTIVITY_LED_MASK;
+	ledctl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+	E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_link_82541 - Check/Store link connection
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks the link condition of the adapter and stores the
+ *  results in the hw->mac structure. This is a function pointer entry
+ *  point called by the api module.
+ **/
+static s32 e1000_check_for_link_82541(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+
+	DEBUGFUNC("e1000_check_for_link_82541");
+
+	/*
+	 * We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/*
+	 * First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		ret_val = e1000_config_dsp_after_link_change_82541(hw, FALSE);
+		goto out; /* No link detected */
+	}
+
+	mac->get_link_status = FALSE;
+
+	/*
+	 * Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	e1000_check_downshift_generic(hw);
+
+	/*
+	 * If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_config_dsp_after_link_change_82541(hw, TRUE);
+
+	/*
+	 * Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	e1000_config_collision_dist_generic(hw);
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000_config_fc_after_link_up_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error configuring flow control\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_dsp_after_link_change_82541 - Config DSP after link
+ *  @hw: pointer to the HW structure
+ *  @link_up: boolean flag for link up status
+ *
+ *  Return E1000_ERR_PHY when failing to read/write the PHY, else E1000_SUCCESS
+ *  at any other case.
+ *
+ *  82541_rev_2 & 82547_rev_2 have the capability to configure the DSP when a
+ *  gigabit link is achieved to improve link quality.
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_config_dsp_after_link_change_82541(struct e1000_hw *hw,
+                                                    bool link_up)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_dev_spec_82541 *dev_spec;
+	s32 ret_val;
+	u32 idle_errs = 0;
+	u16 phy_data, phy_saved_data, speed, duplex, i;
+	u16 ffe_idle_err_timeout = FFE_IDLE_ERR_COUNT_TIMEOUT_20;
+	u16 dsp_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+	                                           {IGP01E1000_PHY_AGC_PARAM_A,
+	                                            IGP01E1000_PHY_AGC_PARAM_B,
+	                                            IGP01E1000_PHY_AGC_PARAM_C,
+	                                            IGP01E1000_PHY_AGC_PARAM_D};
+
+	DEBUGFUNC("e1000_config_dsp_after_link_change_82541");
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	if (link_up) {
+		ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			DEBUGOUT("Error getting link speed and duplex\n");
+			goto out;
+		}
+
+		if (speed != SPEED_1000) {
+			ret_val = E1000_SUCCESS;
+			goto out;
+		}
+
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		if ((dev_spec->dsp_config == e1000_dsp_config_enabled) &&
+		    phy->min_cable_length >= 50) {
+
+			for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+				ret_val = e1000_read_phy_reg(hw,
+				                            dsp_reg_array[i],
+				                            &phy_data);
+				if (ret_val)
+					goto out;
+
+				phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+
+				ret_val = e1000_write_phy_reg(hw,
+				                             dsp_reg_array[i],
+				                             phy_data);
+				if (ret_val)
+					goto out;
+			}
+			dev_spec->dsp_config = e1000_dsp_config_activated;
+		}
+
+		if ((dev_spec->ffe_config != e1000_ffe_config_enabled) ||
+		    (phy->min_cable_length >= 50)) {
+			ret_val = E1000_SUCCESS;
+			goto out;
+		}
+
+		/* clear previous idle error counts */
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+		if (ret_val)
+			goto out;
+
+		for (i = 0; i < ffe_idle_err_timeout; i++) {
+			usec_delay(1000);
+			ret_val = e1000_read_phy_reg(hw,
+			                            PHY_1000T_STATUS,
+			                            &phy_data);
+			if (ret_val)
+				goto out;
+
+			idle_errs += (phy_data & SR_1000T_IDLE_ERROR_CNT);
+			if (idle_errs > SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT) {
+				dev_spec->ffe_config = e1000_ffe_config_active;
+
+				ret_val = e1000_write_phy_reg(hw,
+				                  IGP01E1000_PHY_DSP_FFE,
+				                  IGP01E1000_PHY_DSP_FFE_CM_CP);
+				if (ret_val)
+					goto out;
+				break;
+			}
+
+			if (idle_errs)
+				ffe_idle_err_timeout =
+				                 FFE_IDLE_ERR_COUNT_TIMEOUT_100;
+		}
+	} else {
+		if (dev_spec->dsp_config == e1000_dsp_config_activated) {
+			/*
+			 * Save off the current value of register 0x2F5B
+			 * to be restored at the end of the routines.
+			 */
+			ret_val = e1000_read_phy_reg(hw,
+			                            0x2F5B,
+			                            &phy_saved_data);
+			if (ret_val)
+				goto out;
+
+			/* Disable the PHY transmitter */
+			ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+			if (ret_val)
+				goto out;
+
+			msec_delay_irq(20);
+
+			ret_val = e1000_write_phy_reg(hw,
+			                             0x0000,
+			                             IGP01E1000_IEEE_FORCE_GIG);
+			if (ret_val)
+				goto out;
+			for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+				ret_val = e1000_read_phy_reg(hw,
+				                            dsp_reg_array[i],
+				                            &phy_data);
+				if (ret_val)
+					goto out;
+
+				phy_data &= ~IGP01E1000_PHY_EDAC_MU_INDEX;
+				phy_data |= IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS;
+
+				ret_val = e1000_write_phy_reg(hw,
+				                             dsp_reg_array[i],
+				                             phy_data);
+				if (ret_val)
+					goto out;
+			}
+
+			ret_val = e1000_write_phy_reg(hw,
+			                       0x0000,
+			                       IGP01E1000_IEEE_RESTART_AUTONEG);
+			if (ret_val)
+				goto out;
+
+			msec_delay_irq(20);
+
+			/* Now enable the transmitter */
+			ret_val = e1000_write_phy_reg(hw,
+			                             0x2F5B,
+			                             phy_saved_data);
+			if (ret_val)
+				goto out;
+
+			dev_spec->dsp_config = e1000_dsp_config_enabled;
+		}
+
+		if (dev_spec->ffe_config != e1000_ffe_config_active) {
+			ret_val = E1000_SUCCESS;
+			goto out;
+		}
+
+		/*
+		 * Save off the current value of register 0x2F5B
+		 * to be restored at the end of the routines.
+		 */
+		ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+		if (ret_val)
+			goto out;
+
+		/* Disable the PHY transmitter */
+		ret_val = e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+		if (ret_val)
+			goto out;
+
+		msec_delay_irq(20);
+
+		ret_val = e1000_write_phy_reg(hw,
+		                             0x0000,
+		                             IGP01E1000_IEEE_FORCE_GIG);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP01E1000_PHY_DSP_FFE,
+		                             IGP01E1000_PHY_DSP_FFE_DEFAULT);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_write_phy_reg(hw,
+		                             0x0000,
+		                             IGP01E1000_IEEE_RESTART_AUTONEG);
+		if (ret_val)
+			goto out;
+
+		msec_delay_irq(20);
+
+		/* Now enable the transmitter */
+		ret_val = e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+		if (ret_val)
+			goto out;
+
+		dev_spec->ffe_config = e1000_ffe_config_enabled;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_igp_82541 - Determine cable length for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which reperesent the
+ *  cobination of course and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.  This is a function pointer entry point called by the
+ *  api module.
+ **/
+static s32 e1000_get_cable_length_igp_82541(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, data;
+	u16 cur_agc_value, agc_value = 0;
+	u16 min_agc_value = IGP01E1000_AGC_LENGTH_TABLE_SIZE;
+	u16 agc_reg_array[IGP01E1000_PHY_CHANNEL_NUM] =
+	                                                 {IGP01E1000_PHY_AGC_A,
+	                                                  IGP01E1000_PHY_AGC_B,
+	                                                  IGP01E1000_PHY_AGC_C,
+	                                                  IGP01E1000_PHY_AGC_D};
+
+	DEBUGFUNC("e1000_get_cable_length_igp_82541");
+
+	/* Read the AGC registers for all channels */
+	for (i = 0; i < IGP01E1000_PHY_CHANNEL_NUM; i++) {
+		ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &data);
+		if (ret_val)
+			goto out;
+
+		cur_agc_value = data >> IGP01E1000_AGC_LENGTH_SHIFT;
+
+		/* Bounds checking */
+		if ((cur_agc_value >= IGP01E1000_AGC_LENGTH_TABLE_SIZE - 1) ||
+		    (cur_agc_value == 0)) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		agc_value += cur_agc_value;
+
+		if (min_agc_value > cur_agc_value)
+			min_agc_value = cur_agc_value;
+	}
+
+	/* Remove the minimal AGC result for length < 50m */
+	if (agc_value < IGP01E1000_PHY_CHANNEL_NUM * 50) {
+		agc_value -= min_agc_value;
+		/* Average the three remaining channels for the length. */
+		agc_value /= (IGP01E1000_PHY_CHANNEL_NUM - 1);
+	} else {
+		/* Average the channels for the length. */
+		agc_value /= IGP01E1000_PHY_CHANNEL_NUM;
+	}
+
+	phy->min_cable_length = (e1000_igp_cable_length_table[agc_value] >
+	                         IGP01E1000_AGC_RANGE)
+	                        ? (e1000_igp_cable_length_table[agc_value] -
+	                           IGP01E1000_AGC_RANGE)
+	                        : 0;
+	phy->max_cable_length = e1000_igp_cable_length_table[agc_value] +
+	                        IGP01E1000_AGC_RANGE;
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_82541 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by the
+ *  api module.
+ **/
+static s32 e1000_set_d3_lplu_state_82541(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d3_lplu_state_82541");
+
+	switch (hw->mac.type) {
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		break;
+	default:
+		ret_val = e1000_set_d3_lplu_state_generic(hw, active);
+		goto out;
+		break;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_GMII_FIFO, &data);
+	if (ret_val)
+		goto out;
+
+	if (!active) {
+		data &= ~IGP01E1000_GMII_FLEX_SPD;
+		ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, data);
+		if (ret_val)
+			goto out;
+
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= IGP01E1000_GMII_FLEX_SPD;
+		ret_val = e1000_write_phy_reg(hw, IGP01E1000_GMII_FIFO, data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                            IGP01E1000_PHY_PORT_CONFIG,
+		                            &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_led_82541 - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored.  This is a function pointer entry
+ *  point called by the api module.
+ **/
+static s32 e1000_setup_led_82541(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82541 *dev_spec;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_setup_led_82541");
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	ret_val = e1000_read_phy_reg(hw,
+	                            IGP01E1000_GMII_FIFO,
+	                            &dev_spec->spd_default);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw,
+	                             IGP01E1000_GMII_FIFO,
+	                             (u16)(dev_spec->spd_default &
+	                                        ~IGP01E1000_GMII_SPD));
+	if (ret_val)
+		goto out;
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_cleanup_led_82541 - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.  This is a function pointer
+ *  entry point called by the api module.
+ **/
+static s32 e1000_cleanup_led_82541(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82541 *dev_spec;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_cleanup_led_82541");
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	ret_val = e1000_write_phy_reg(hw,
+	                             IGP01E1000_GMII_FIFO,
+	                             dev_spec->spd_default);
+	if (ret_val)
+		goto out;
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_init_script_82541 - Initialize GbE PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the IGP PHY.
+ **/
+static s32 e1000_phy_init_script_82541(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82541 *dev_spec;
+	u32 ret_val;
+	u16 phy_saved_data;
+
+	DEBUGFUNC("e1000_phy_init_script_82541");
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	if (!dev_spec->phy_init_script) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/* Delay after phy reset to enable NVM configuration to load */
+	msec_delay(20);
+
+	/*
+	 * Save off the current value of register 0x2F5B to be restored at
+	 * the end of this routine.
+	 */
+	ret_val = e1000_read_phy_reg(hw, 0x2F5B, &phy_saved_data);
+
+	/* Disabled the PHY transmitter */
+	e1000_write_phy_reg(hw, 0x2F5B, 0x0003);
+
+	msec_delay(20);
+
+	e1000_write_phy_reg(hw, 0x0000, 0x0140);
+
+	msec_delay(5);
+
+	switch (hw->mac.type) {
+	case e1000_82541:
+	case e1000_82547:
+		e1000_write_phy_reg(hw, 0x1F95, 0x0001);
+
+		e1000_write_phy_reg(hw, 0x1F71, 0xBD21);
+
+		e1000_write_phy_reg(hw, 0x1F79, 0x0018);
+
+		e1000_write_phy_reg(hw, 0x1F30, 0x1600);
+
+		e1000_write_phy_reg(hw, 0x1F31, 0x0014);
+
+		e1000_write_phy_reg(hw, 0x1F32, 0x161C);
+
+		e1000_write_phy_reg(hw, 0x1F94, 0x0003);
+
+		e1000_write_phy_reg(hw, 0x1F96, 0x003F);
+
+		e1000_write_phy_reg(hw, 0x2010, 0x0008);
+		break;
+	case e1000_82541_rev_2:
+	case e1000_82547_rev_2:
+		e1000_write_phy_reg(hw, 0x1F73, 0x0099);
+		break;
+	default:
+		break;
+	}
+
+	e1000_write_phy_reg(hw, 0x0000, 0x3300);
+
+	msec_delay(20);
+
+	/* Now enable the transmitter */
+	e1000_write_phy_reg(hw, 0x2F5B, phy_saved_data);
+
+	if (hw->mac.type == e1000_82547) {
+		u16 fused, fine, coarse;
+
+		/* Move to analog registers page */
+		e1000_read_phy_reg(hw,
+		                  IGP01E1000_ANALOG_SPARE_FUSE_STATUS,
+		                  &fused);
+
+		if (!(fused & IGP01E1000_ANALOG_SPARE_FUSE_ENABLED)) {
+			e1000_read_phy_reg(hw,
+			                  IGP01E1000_ANALOG_FUSE_STATUS,
+			                  &fused);
+
+			fine = fused & IGP01E1000_ANALOG_FUSE_FINE_MASK;
+			coarse = fused & IGP01E1000_ANALOG_FUSE_COARSE_MASK;
+
+			if (coarse > IGP01E1000_ANALOG_FUSE_COARSE_THRESH) {
+				coarse -= IGP01E1000_ANALOG_FUSE_COARSE_10;
+				fine -= IGP01E1000_ANALOG_FUSE_FINE_1;
+			} else if (coarse ==
+			           IGP01E1000_ANALOG_FUSE_COARSE_THRESH)
+				fine -= IGP01E1000_ANALOG_FUSE_FINE_10;
+
+			fused = (fused & IGP01E1000_ANALOG_FUSE_POLY_MASK) |
+			        (fine & IGP01E1000_ANALOG_FUSE_FINE_MASK) |
+			        (coarse & IGP01E1000_ANALOG_FUSE_COARSE_MASK);
+
+			e1000_write_phy_reg(hw,
+			                   IGP01E1000_ANALOG_FUSE_CONTROL,
+			                   fused);
+			e1000_write_phy_reg(hw,
+			              IGP01E1000_ANALOG_FUSE_BYPASS,
+			              IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL);
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_script_state_82541 - Enable/Disable PHY init script
+ *  @hw: pointer to the HW structure
+ *  @state: boolean value used to enable/disable PHY init script
+ *
+ *  Allows the driver to enable/disable the PHY init script, if the PHY is an
+ *  IGP PHY.  This is a function pointer entry point called by the api module.
+ **/
+void e1000_init_script_state_82541(struct e1000_hw *hw, bool state)
+{
+	struct e1000_dev_spec_82541 *dev_spec;
+
+	DEBUGFUNC("e1000_init_script_state_82541");
+
+	if (hw->phy.type != e1000_phy_igp) {
+		DEBUGOUT("Initialization script not necessary.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82541 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	dev_spec->phy_init_script = state;
+
+out:
+	return;
+}
+
+/**
+ * e1000_power_down_phy_copper_82541 - Remove link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82541(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(E1000_READ_REG(hw, E1000_MANC) & E1000_MANC_SMBUS_EN))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82541 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82541(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82541");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h
new file mode 100644
index 0000000..8588606
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82541.h
@@ -0,0 +1,84 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_82541_H_
+#define _E1000_82541_H_
+
+#define NVM_WORD_SIZE_BASE_SHIFT_82541 (NVM_WORD_SIZE_BASE_SHIFT + 1)
+
+#define IGP01E1000_PHY_CHANNEL_NUM                    4
+
+#define IGP01E1000_PHY_AGC_A                     0x1172
+#define IGP01E1000_PHY_AGC_B                     0x1272
+#define IGP01E1000_PHY_AGC_C                     0x1472
+#define IGP01E1000_PHY_AGC_D                     0x1872
+
+#define IGP01E1000_PHY_AGC_PARAM_A               0x1171
+#define IGP01E1000_PHY_AGC_PARAM_B               0x1271
+#define IGP01E1000_PHY_AGC_PARAM_C               0x1471
+#define IGP01E1000_PHY_AGC_PARAM_D               0x1871
+
+#define IGP01E1000_PHY_EDAC_MU_INDEX             0xC000
+#define IGP01E1000_PHY_EDAC_SIGN_EXT_9_BITS      0x8000
+
+#define IGP01E1000_PHY_DSP_RESET                 0x1F33
+
+#define IGP01E1000_PHY_DSP_FFE                   0x1F35
+#define IGP01E1000_PHY_DSP_FFE_CM_CP             0x0069
+#define IGP01E1000_PHY_DSP_FFE_DEFAULT           0x002A
+
+#define IGP01E1000_IEEE_FORCE_GIG                0x0140
+#define IGP01E1000_IEEE_RESTART_AUTONEG          0x3300
+
+#define IGP01E1000_AGC_LENGTH_SHIFT                   7
+#define IGP01E1000_AGC_RANGE                         10
+
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_20                20
+#define FFE_IDLE_ERR_COUNT_TIMEOUT_100              100
+
+#define IGP01E1000_ANALOG_FUSE_STATUS            0x20D0
+#define IGP01E1000_ANALOG_SPARE_FUSE_STATUS      0x20D1
+#define IGP01E1000_ANALOG_FUSE_CONTROL           0x20DC
+#define IGP01E1000_ANALOG_FUSE_BYPASS            0x20DE
+
+#define IGP01E1000_ANALOG_SPARE_FUSE_ENABLED     0x0100
+#define IGP01E1000_ANALOG_FUSE_FINE_MASK         0x0F80
+#define IGP01E1000_ANALOG_FUSE_COARSE_MASK       0x0070
+#define IGP01E1000_ANALOG_FUSE_COARSE_THRESH     0x0040
+#define IGP01E1000_ANALOG_FUSE_COARSE_10         0x0010
+#define IGP01E1000_ANALOG_FUSE_FINE_1            0x0080
+#define IGP01E1000_ANALOG_FUSE_FINE_10           0x0500
+#define IGP01E1000_ANALOG_FUSE_POLY_MASK         0xF000
+#define IGP01E1000_ANALOG_FUSE_ENABLE_SW_CONTROL 0x0002
+
+#define IGP01E1000_MSE_CHANNEL_D                 0x000F
+#define IGP01E1000_MSE_CHANNEL_C                 0x00F0
+#define IGP01E1000_MSE_CHANNEL_B                 0x0F00
+#define IGP01E1000_MSE_CHANNEL_A                 0xF000
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c
new file mode 100644
index 0000000..55fd6d6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82542.c
@@ -0,0 +1,543 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82542 (rev 1 & 2)
+ */
+
+#include "e1000_api.h"
+
+static s32  e1000_init_phy_params_82542(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82542(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82542(struct e1000_hw *hw);
+static s32  e1000_get_bus_info_82542(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82542(struct e1000_hw *hw);
+static s32  e1000_init_hw_82542(struct e1000_hw *hw);
+static s32  e1000_setup_link_82542(struct e1000_hw *hw);
+static s32  e1000_led_on_82542(struct e1000_hw *hw);
+static s32  e1000_led_off_82542(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw);
+
+struct e1000_dev_spec_82542 {
+	bool dma_fairness;
+};
+
+/**
+ *  e1000_init_phy_params_82542 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82542(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_82542");
+
+	phy->type               = e1000_phy_none;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82542 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82542(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+
+	DEBUGFUNC("e1000_init_nvm_params_82542");
+
+	nvm->address_bits       =  6;
+	nvm->delay_usec         = 50;
+	nvm->opcode_bits        =  3;
+	nvm->type               = e1000_nvm_eeprom_microwire;
+	nvm->word_size          = 64;
+
+	/* Function Pointers */
+	func->read_nvm          = e1000_read_nvm_microwire;
+	func->release_nvm       = e1000_stop_nvm;
+	func->write_nvm         = e1000_write_nvm_microwire;
+	func->update_nvm        = e1000_update_nvm_checksum_generic;
+	func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_82542 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82542(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_82542");
+
+	/* Set media type */
+	hw->phy.media_type = e1000_media_type_fiber;
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_82542;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82542;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82542;
+	/* link setup */
+	func->setup_link = e1000_setup_link_82542;
+	/* phy/fiber/serdes setup */
+	func->setup_physical_interface = e1000_setup_fiber_serdes_link_generic;
+	/* check for link */
+	func->check_for_link = e1000_check_for_fiber_link_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_82542;
+	func->led_off = e1000_led_off_82542;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82542;
+	/* link info */
+	func->get_link_up_info = e1000_get_speed_and_duplex_fiber_serdes_generic;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_82542);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_82542 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82542(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82542");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82542;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82542;
+	hw->func.init_phy_params = e1000_init_phy_params_82542;
+}
+
+/**
+ *  e1000_get_bus_info_82542 - Obtain bus information for adapter
+ *  @hw: pointer to the HW structure
+ *
+ *  This will obtain information about the HW bus for which the
+ *  adaper is attached and stores it in the hw structure.  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_get_bus_info_82542(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_get_bus_info_82542");
+
+	hw->bus.type = e1000_bus_type_pci;
+	hw->bus.speed = e1000_bus_speed_unknown;
+	hw->bus.width = e1000_bus_width_unknown;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_reset_hw_82542 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82542(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val = E1000_SUCCESS;
+	u32 ctrl, icr;
+
+	DEBUGFUNC("e1000_reset_hw_82542");
+
+	if (hw->revision_id == E1000_REVISION_2) {
+		DEBUGOUT("Disabling MWI on 82542 rev 2\n");
+		e1000_pci_clear_mwi(hw);
+	}
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	/*
+	 * Delay to allow any outstanding PCI transactions to complete before
+	 * resetting the device
+	 */
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to 82542/82543 MAC\n");
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+	e1000_reload_nvm(hw);
+	msec_delay(2);
+
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	if (hw->revision_id == E1000_REVISION_2) {
+		if (bus->pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+			e1000_pci_set_mwi(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82542 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_hw_82542(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_dev_spec_82542 *dev_spec;
+	s32 ret_val = E1000_SUCCESS;
+	u32 ctrl;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_82542");
+
+	dev_spec = (struct e1000_dev_spec_82542 *)hw->dev_spec;
+
+	/* Disabling VLAN filtering */
+	E1000_WRITE_REG(hw, E1000_VET, 0);
+	e1000_clear_vfta(hw);
+
+	/* For 82542 (rev 2.0), disable MWI and put the receiver into reset */
+	if (hw->revision_id == E1000_REVISION_2) {
+		DEBUGOUT("Disabling MWI on 82542 rev 2.0\n");
+		e1000_pci_clear_mwi(hw);
+		E1000_WRITE_REG(hw, E1000_RCTL, E1000_RCTL_RST);
+		E1000_WRITE_FLUSH(hw);
+		msec_delay(5);
+	}
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* For 82542 (rev 2.0), take the receiver out of reset and enable MWI */
+	if (hw->revision_id == E1000_REVISION_2) {
+		E1000_WRITE_REG(hw, E1000_RCTL, 0);
+		E1000_WRITE_FLUSH(hw);
+		msec_delay(1);
+		if (hw->bus.pci_cmd_word & CMD_MEM_WRT_INVALIDATE)
+			e1000_pci_set_mwi(hw);
+	}
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/*
+	 * Set the PCI priority bit correctly in the CTRL register.  This
+	 * determines if the adapter gives priority to receives, or if it
+	 * gives equal priority to transmits and receives.
+	 */
+	if (dev_spec->dma_fairness) {
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR);
+	}
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link_82542(hw);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82542(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_link_82542 - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.  This is a function
+ *  pointer entry point called by the api module.
+ **/
+static s32 e1000_setup_link_82542(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_link_82542");
+
+	ret_val = e1000_set_default_fc_generic(hw);
+	if (ret_val)
+		goto out;
+
+	hw->fc.type &= ~e1000_fc_tx_pause;
+
+	if (mac->report_tx_early == 1)
+		hw->fc.type &= ~e1000_fc_rx_pause;
+
+	/*
+	 * We want to save off the original Flow Control configuration just in
+	 * case we get disconnected and then reconnected into a different hub
+	 * or switch with different Flow Control capabilities.
+	 */
+	hw->fc.original_type = hw->fc.type;
+
+	DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type);
+
+	/* Call the necessary subroutine to configure the link. */
+	ret_val = func->setup_physical_interface(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Initialize the flow control address, type, and PAUSE timer
+	 * registers to their default values.  This is done even if flow
+	 * control is disabled, because it does not hurt anything to
+	 * initialize these registers.
+	 */
+	DEBUGOUT("Initializing Flow Control address, type and timer regs\n");
+
+	E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+	E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+
+	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+	ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_led_on_82542 - Turn on SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED on.  This is a function pointer entry point
+ *  called by the api module.
+ **/
+static s32 e1000_led_on_82542(struct e1000_hw *hw)
+{
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGFUNC("e1000_led_on_82542");
+
+	ctrl |= E1000_CTRL_SWDPIN0;
+	ctrl |= E1000_CTRL_SWDPIO0;
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off_82542 - Turn off SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED off.  This is a function pointer entry point
+ *  called by the api module.
+ **/
+static s32 e1000_led_off_82542(struct e1000_hw *hw)
+{
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGFUNC("e1000_led_off_82542");
+
+	ctrl &= ~E1000_CTRL_SWDPIN0;
+	ctrl |= E1000_CTRL_SWDPIO0;
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_translate_register_82542 - Translate the proper regiser offset
+ *  @reg: e1000 register to be read
+ *
+ *  Registers in 82542 are located in different offsets than other adapters
+ *  even though they function in the same manner.  This function takes in
+ *  the name of the register to read and returns the correct offset for
+ *  82542 silicon.
+ **/
+u32 e1000_translate_register_82542(u32 reg)
+{
+	/*
+	 * Some of the 82542 registers are located at different
+	 * offsets than they are in newer adapters.
+	 * Despite the difference in location, the registers
+	 * function in the same manner.
+	 */
+	switch (reg) {
+	case E1000_RA:
+		reg = 0x00040;
+		break;
+	case E1000_RDTR:
+		reg = 0x00108;
+		break;
+	case E1000_RDBAL(0):
+		reg = 0x00110;
+		break;
+	case E1000_RDBAH(0):
+		reg = 0x00114;
+		break;
+	case E1000_RDLEN(0):
+		reg = 0x00118;
+		break;
+	case E1000_RDH(0):
+		reg = 0x00120;
+		break;
+	case E1000_RDT(0):
+		reg = 0x00128;
+		break;
+	case E1000_RDBAL(1):
+		reg = 0x00138;
+		break;
+	case E1000_RDBAH(1):
+		reg = 0x0013C;
+		break;
+	case E1000_RDLEN(1):
+		reg = 0x00140;
+		break;
+	case E1000_RDH(1):
+		reg = 0x00148;
+		break;
+	case E1000_RDT(1):
+		reg = 0x00150;
+		break;
+	case E1000_FCRTH:
+		reg = 0x00160;
+		break;
+	case E1000_FCRTL:
+		reg = 0x00168;
+		break;
+	case E1000_MTA:
+		reg = 0x00200;
+		break;
+	case E1000_TDBAL(0):
+		reg = 0x00420;
+		break;
+	case E1000_TDBAH(0):
+		reg = 0x00424;
+		break;
+	case E1000_TDLEN(0):
+		reg = 0x00428;
+		break;
+	case E1000_TDH(0):
+		reg = 0x00430;
+		break;
+	case E1000_TDT(0):
+		reg = 0x00438;
+		break;
+	case E1000_TIDV:
+		reg = 0x00440;
+		break;
+	case E1000_VFTA:
+		reg = 0x00600;
+		break;
+	case E1000_TDFH:
+		reg = 0x08010;
+		break;
+	case E1000_TDFT:
+		reg = 0x08018;
+		break;
+	default:
+		break;
+	}
+
+	return reg;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82542 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82542(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82542");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c
new file mode 100644
index 0000000..5ff9a58
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.c
@@ -0,0 +1,1654 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82543
+ * e1000_82544
+ */
+
+#include "e1000_api.h"
+#include "e1000_82543.h"
+
+static s32  e1000_init_phy_params_82543(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82543(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82543(struct e1000_hw *hw);
+static s32  e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset,
+                                     u16 *data);
+static s32  e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset,
+                                      u16 data);
+static s32  e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw);
+static s32  e1000_phy_hw_reset_82543(struct e1000_hw *hw);
+static s32  e1000_reset_hw_82543(struct e1000_hw *hw);
+static s32  e1000_init_hw_82543(struct e1000_hw *hw);
+static s32  e1000_setup_link_82543(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_82543(struct e1000_hw *hw);
+static s32  e1000_setup_fiber_link_82543(struct e1000_hw *hw);
+static s32  e1000_check_for_copper_link_82543(struct e1000_hw *hw);
+static s32  e1000_check_for_fiber_link_82543(struct e1000_hw *hw);
+static s32  e1000_led_on_82543(struct e1000_hw *hw);
+static s32  e1000_led_off_82543(struct e1000_hw *hw);
+static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset,
+                                   u32 value);
+static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value);
+static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw);
+static s32  e1000_config_mac_to_phy_82543(struct e1000_hw *hw);
+static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw);
+static void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl);
+static s32  e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw);
+static void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl);
+static u16  e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw);
+static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
+                                           u16 count);
+static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw);
+static void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state);
+
+struct e1000_dev_spec_82543 {
+	u32  tbi_compatibility;
+	bool dma_fairness;
+	bool init_phy_disabled;
+};
+
+/**
+ *  e1000_init_phy_params_82543 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82543(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_82543");
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type               = e1000_phy_none;
+		goto out;
+	} else {
+		func->power_up_phy      = e1000_power_up_phy_copper;
+		func->power_down_phy    = e1000_power_down_phy_copper;
+	}
+
+	phy->addr                       = 1;
+	phy->autoneg_mask               = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us             = 10000;
+	phy->type                       = e1000_phy_m88;
+
+	/* Function Pointers */
+	func->check_polarity            = e1000_check_polarity_m88;
+	func->commit_phy                = e1000_phy_sw_reset_generic;
+	func->force_speed_duplex        = e1000_phy_force_speed_duplex_82543;
+	func->get_cable_length          = e1000_get_cable_length_m88;
+	func->get_cfg_done              = e1000_get_cfg_done_generic;
+	func->read_phy_reg              = (hw->mac.type == e1000_82543)
+	                                  ? e1000_read_phy_reg_82543
+	                                  : e1000_read_phy_reg_m88;
+	func->reset_phy                 = (hw->mac.type == e1000_82543)
+	                                  ? e1000_phy_hw_reset_82543
+	                                  : e1000_phy_hw_reset_generic;
+	func->write_phy_reg             = (hw->mac.type == e1000_82543)
+	                                  ? e1000_write_phy_reg_82543
+	                                  : e1000_write_phy_reg_m88;
+	func->get_phy_info              = e1000_get_phy_info_m88;
+
+	/*
+	 * The external PHY of the 82543 can be in a funky state.
+	 * Resetting helps us read the PHY registers for acquiring
+	 * the PHY ID.
+	 */
+	if (!e1000_init_phy_disabled_82543(hw)) {
+		ret_val = e1000_phy_hw_reset(hw);
+		if (ret_val) {
+			DEBUGOUT("Resetting PHY during init failed.\n");
+			goto out;
+		}
+		msec_delay(20);
+	}
+
+	ret_val = e1000_get_phy_id(hw);
+	if (ret_val)
+		goto out;
+
+	/* Verify phy id */
+	switch (hw->mac.type) {
+	case e1000_82543:
+		if (phy->id != M88E1000_E_PHY_ID) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		break;
+	case e1000_82544:
+		if (phy->id != M88E1000_I_PHY_ID) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+		break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82543 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82543(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+
+	DEBUGFUNC("e1000_init_nvm_params_82543");
+
+	nvm->type               = e1000_nvm_eeprom_microwire;
+	nvm->word_size          = 64;
+	nvm->delay_usec         = 50;
+	nvm->address_bits       =  6;
+	nvm->opcode_bits        =  3;
+
+	/* Function Pointers */
+	func->read_nvm          = e1000_read_nvm_microwire;
+	func->update_nvm        = e1000_update_nvm_checksum_generic;
+	func->valid_led_default = e1000_valid_led_default_generic;
+	func->validate_nvm      = e1000_validate_nvm_checksum_generic;
+	func->write_nvm         = e1000_write_nvm_microwire;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_82543 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82543(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_init_mac_params_82543");
+
+	/* Set media type */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82544EI_FIBER:
+		hw->phy.media_type = e1000_media_type_fiber;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pci_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82543;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82543;
+	/* link setup */
+	func->setup_link = e1000_setup_link_82543;
+	/* physical interface setup */
+	func->setup_physical_interface =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_setup_copper_link_82543
+	                : e1000_setup_fiber_link_82543;
+	/* check for link */
+	func->check_for_link =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_check_for_copper_link_82543
+	                : e1000_check_for_fiber_link_82543;
+	/* link info */
+	func->get_link_up_info =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_get_speed_and_duplex_copper_generic
+	                : e1000_get_speed_and_duplex_fiber_serdes_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_82543;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_82543;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_82543;
+	func->led_off = e1000_led_off_82543;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82543;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_82543);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+	if (ret_val)
+		goto out;
+
+	/* Set tbi compatibility */
+	if ((hw->mac.type != e1000_82543) ||
+	    (hw->phy.media_type == e1000_media_type_fiber))
+		e1000_set_tbi_compatibility_82543(hw, FALSE);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_82543 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82543(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82543");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82543;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82543;
+	hw->func.init_phy_params = e1000_init_phy_params_82543;
+}
+
+/**
+ *  e1000_tbi_compatibility_enabled_82543 - Returns TBI compat status
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns the curent status of 10-bit Interface (TBI) compatibility
+ *  (enabled/disabled).
+ **/
+static bool e1000_tbi_compatibility_enabled_82543(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+	bool state = FALSE;
+
+	DEBUGFUNC("e1000_tbi_compatibility_enabled_82543");
+
+	if (hw->mac.type != e1000_82543) {
+		DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	state = (dev_spec->tbi_compatibility & TBI_COMPAT_ENABLED)
+	        ? TRUE : FALSE;
+
+out:
+	return state;
+}
+
+/**
+ *  e1000_set_tbi_compatibility_82543 - Set TBI compatibility
+ *  @hw: pointer to the HW structure
+ *  @state: enable/disable TBI compatibility
+ *
+ *  Enables or disabled 10-bit Interface (TBI) compatibility.
+ **/
+void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw, bool state)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+
+	DEBUGFUNC("e1000_set_tbi_compatibility_82543");
+
+	if (hw->mac.type != e1000_82543) {
+		DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	if (state)
+		dev_spec->tbi_compatibility |= TBI_COMPAT_ENABLED;
+	else
+		dev_spec->tbi_compatibility &= ~TBI_COMPAT_ENABLED;
+
+out:
+	return;
+}
+
+/**
+ *  e1000_tbi_sbp_enabled_82543 - Returns TBI SBP status
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns the curent status of 10-bit Interface (TBI) store bad packet (SBP)
+ *  (enabled/disabled).
+ **/
+bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+	bool state = FALSE;
+
+	DEBUGFUNC("e1000_tbi_sbp_enabled_82543");
+
+	if (hw->mac.type != e1000_82543) {
+		DEBUGOUT("TBI compatibility workaround for 82543 only.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	state = (dev_spec->tbi_compatibility & TBI_SBP_ENABLED)
+	        ? TRUE : FALSE;
+
+out:
+	return state;
+}
+
+/**
+ *  e1000_set_tbi_sbp_82543 - Set TBI SBP
+ *  @hw: pointer to the HW structure
+ *  @state: enable/disable TBI store bad packet
+ *
+ *  Enables or disabled 10-bit Interface (TBI) store bad packet (SBP).
+ **/
+static void e1000_set_tbi_sbp_82543(struct e1000_hw *hw, bool state)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+
+	DEBUGFUNC("e1000_set_tbi_sbp_82543");
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (state && e1000_tbi_compatibility_enabled_82543(hw))
+		dev_spec->tbi_compatibility |= TBI_SBP_ENABLED;
+	else
+		dev_spec->tbi_compatibility &= ~TBI_SBP_ENABLED;
+
+	return;
+}
+
+/**
+ *  e1000_init_phy_disabled_82543 - Returns init PHY status
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns the current status of whether PHY initialization is disabled.
+ *  True if PHY initialization is disabled else false.
+ **/
+static bool e1000_init_phy_disabled_82543(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82543 *dev_spec;
+	bool ret_val;
+
+	DEBUGFUNC("e1000_init_phy_disabled_82543");
+
+	if (hw->mac.type != e1000_82543) {
+		ret_val = FALSE;
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = FALSE;
+		goto out;
+	}
+
+	ret_val = dev_spec->init_phy_disabled;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_tbi_adjust_stats_82543 - Adjust stats when TBI enabled
+ *  @hw: pointer to the HW structure
+ *  @stats: Struct containing statistic register values
+ *  @frame_len: The length of the frame in question
+ *  @mac_addr: The Ethernet destination address of the frame in question
+ *  @max_frame_size: The maximum frame size
+ *
+ *  Adjusts the statistic counters when a frame is accepted by TBI_ACCEPT
+ **/
+void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
+                                  struct e1000_hw_stats *stats, u32 frame_len,
+                                  u8 *mac_addr, u32 max_frame_size)
+{
+	if (!(e1000_tbi_sbp_enabled_82543(hw)))
+		goto out;
+
+	/* First adjust the frame length. */
+	frame_len--;
+	/*
+	 * We need to adjust the statistics counters, since the hardware
+	 * counters overcount this packet as a CRC error and undercount
+	 * the packet as a good packet
+	 */
+	/* This packet should not be counted as a CRC error.    */
+	stats->crcerrs--;
+	/* This packet does count as a Good Packet Received.    */
+	stats->gprc++;
+
+	/* Adjust the Good Octets received counters             */
+	stats->gorc += frame_len;
+
+	/*
+	 * Is this a broadcast or multicast?  Check broadcast first,
+	 * since the test for a multicast frame will test positive on
+	 * a broadcast frame.
+	 */
+	if ((mac_addr[0] == 0xff) && (mac_addr[1] == 0xff))
+		/* Broadcast packet */
+		stats->bprc++;
+	else if (*mac_addr & 0x01)
+		/* Multicast packet */
+		stats->mprc++;
+
+	/*
+	 * In this case, the hardware has overcounted the number of
+	 * oversize frames.
+	 */
+	if ((frame_len == max_frame_size) && (stats->roc > 0))
+		stats->roc--;
+
+	/*
+	 * Adjust the bin counters when the extra byte put the frame in the
+	 * wrong bin. Remember that the frame_len was adjusted above.
+	 */
+	if (frame_len == 64) {
+		stats->prc64++;
+		stats->prc127--;
+	} else if (frame_len == 127) {
+		stats->prc127++;
+		stats->prc255--;
+	} else if (frame_len == 255) {
+		stats->prc255++;
+		stats->prc511--;
+	} else if (frame_len == 511) {
+		stats->prc511++;
+		stats->prc1023--;
+	} else if (frame_len == 1023) {
+		stats->prc1023++;
+		stats->prc1522--;
+	} else if (frame_len == 1522) {
+		stats->prc1522++;
+	}
+
+out:
+	return;
+}
+
+/**
+ *  e1000_read_phy_reg_82543 - Read PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY at offset and stores the information read to data.
+ **/
+static s32 e1000_read_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	u32 mdic;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_phy_reg_82543");
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		DEBUGOUT1("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/*
+	 * We must first send a preamble through the MDIO pin to signal the
+	 * beginning of an MII instruction.  This is done by sending 32
+	 * consecutive "1" bits.
+	 */
+	e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+	/*
+	 * Now combine the next few fields that are required for a read
+	 * operation.  We use this method instead of calling the
+	 * e1000_shift_out_mdi_bits routine five different times.  The format
+	 * of an MII read instruction consists of a shift out of 14 bits and
+	 * is defined as follows:
+	 * 	<Preamble><SOF><Op Code><Phy Addr><Offset>
+	 * followed by a shift in of 18 bits.  This first two bits shifted in
+	 * are TurnAround bits used to avoid contention on the MDIO pin when a
+	 * READ operation is performed.  These two bits are thrown away
+	 * followed by a shift in of 16 bits which contains the desired data.
+	 */
+	mdic = (offset | (hw->phy.addr << 5) |
+		(PHY_OP_READ << 10) | (PHY_SOF << 12));
+
+	e1000_shift_out_mdi_bits_82543(hw, mdic, 14);
+
+	/*
+	 * Now that we've shifted out the read command to the MII, we need to
+	 * "shift in" the 16-bit value (18 total bits) of the requested PHY
+	 * register address.
+	 */
+	*data = e1000_shift_in_mdi_bits_82543(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_82543 - Write PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be written
+ *  @data: pointer to the data to be written at offset
+ *
+ *  Writes data to the PHY at offset.
+ **/
+static s32 e1000_write_phy_reg_82543(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	u32 mdic;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_write_phy_reg_82543");
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		DEBUGOUT1("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/*
+	 * We'll need to use the SW defined pins to shift the write command
+	 * out to the PHY. We first send a preamble to the PHY to signal the
+	 * beginning of the MII instruction.  This is done by sending 32
+	 * consecutive "1" bits.
+	 */
+	e1000_shift_out_mdi_bits_82543(hw, PHY_PREAMBLE, PHY_PREAMBLE_SIZE);
+
+	/*
+	 * Now combine the remaining required fields that will indicate a
+	 * write operation. We use this method instead of calling the
+	 * e1000_shift_out_mdi_bits routine for each field in the command. The
+	 * format of a MII write instruction is as follows:
+	 * <Preamble><SOF><Op Code><Phy Addr><Reg Addr><Turnaround><Data>.
+	 */
+	mdic = ((PHY_TURNAROUND) | (offset << 2) | (hw->phy.addr << 7) |
+	        (PHY_OP_WRITE << 12) | (PHY_SOF << 14));
+	mdic <<= 16;
+	mdic |= (u32) data;
+
+	e1000_shift_out_mdi_bits_82543(hw, mdic, 32);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_raise_mdi_clk_82543 - Raise Management Data Input clock
+ *  @hw: pointer to the HW structure
+ *  @ctrl: pointer to the control register
+ *
+ *  Raise the management data input clock by setting the MDC bit in the control
+ *  register.
+ **/
+static void e1000_raise_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl)
+{
+	/*
+	 * Raise the clock input to the Management Data Clock (by setting the
+	 * MDC bit), and then delay a sufficient amount of time.
+	 */
+	E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl | E1000_CTRL_MDC));
+	E1000_WRITE_FLUSH(hw);
+	usec_delay(10);
+}
+
+/**
+ *  e1000_lower_mdi_clk_82543 - Lower Management Data Input clock
+ *  @hw: pointer to the HW structure
+ *  @ctrl: pointer to the control register
+ *
+ *  Lower the management data input clock by clearing the MDC bit in the
+ *  control register.
+ **/
+static void e1000_lower_mdi_clk_82543(struct e1000_hw *hw, u32 *ctrl)
+{
+	/*
+	 * Lower the clock input to the Management Data Clock (by clearing the
+	 * MDC bit), and then delay a sufficient amount of time.
+	 */
+	E1000_WRITE_REG(hw, E1000_CTRL, (*ctrl & ~E1000_CTRL_MDC));
+	E1000_WRITE_FLUSH(hw);
+	usec_delay(10);
+}
+
+/**
+ *  e1000_shift_out_mdi_bits_82543 - Shift data bits our to the PHY
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the PHY
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the PHY.  So, the value in the
+ *  "data" parameter will be shifted out to the PHY one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_mdi_bits_82543(struct e1000_hw *hw, u32 data,
+                                           u16 count)
+{
+	u32 ctrl, mask;
+
+	/*
+	 * We need to shift "count" number of bits out to the PHY.  So, the
+	 * value in the "data" parameter will be shifted out to the PHY one
+	 * bit at a time.  In order to do this, "data" must be broken down
+	 * into bits.
+	 */
+	mask = 0x01;
+	mask <<= (count -1);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* Set MDIO_DIR and MDC_DIR direction bits to be used as output pins. */
+	ctrl |= (E1000_CTRL_MDIO_DIR | E1000_CTRL_MDC_DIR);
+
+	while (mask) {
+		/*
+		 * A "1" is shifted out to the PHY by setting the MDIO bit to
+		 * "1" and then raising and lowering the Management Data Clock.
+		 * A "0" is shifted out to the PHY by setting the MDIO bit to
+		 * "0" and then raising and lowering the clock.
+		 */
+		if (data & mask) ctrl |= E1000_CTRL_MDIO;
+		else ctrl &= ~E1000_CTRL_MDIO;
+
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		E1000_WRITE_FLUSH(hw);
+
+		usec_delay(10);
+
+		e1000_raise_mdi_clk_82543(hw, &ctrl);
+		e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+		mask >>= 1;
+	}
+}
+
+/**
+ *  e1000_shift_in_mdi_bits_82543 - Shift data bits in from the PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  In order to read a register from the PHY, we need to shift 18 bits
+ *  in from the PHY.  Bits are "shifted in" by raising the clock input to
+ *  the PHY (setting the MDC bit), and then reading the value of the data out
+ *  MDIO bit.
+ **/
+static u16 e1000_shift_in_mdi_bits_82543(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	u16 data = 0;
+	u8 i;
+
+	/*
+	 * In order to read a register from the PHY, we need to shift in a
+	 * total of 18 bits from the PHY.  The first two bit (turnaround)
+	 * times are used to avoid contention on the MDIO pin when a read
+	 * operation is performed.  These two bits are ignored by us and
+	 * thrown away.  Bits are "shifted in" by raising the input to the
+	 * Management Data Clock (setting the MDC bit) and then reading the
+	 * value of the MDIO bit.
+	 */
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/*
+	 * Clear MDIO_DIR (SWDPIO1) to indicate this bit is to be used as
+	 * input.
+	 */
+	ctrl &= ~E1000_CTRL_MDIO_DIR;
+	ctrl &= ~E1000_CTRL_MDIO;
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	E1000_WRITE_FLUSH(hw);
+
+	/*
+	 * Raise and lower the clock before reading in the data.  This accounts
+	 * for the turnaround bits.  The first clock occurred when we clocked
+	 * out the last bit of the Register Address.
+	 */
+	e1000_raise_mdi_clk_82543(hw, &ctrl);
+	e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+	for (data = 0, i = 0; i < 16; i++) {
+		data <<= 1;
+		e1000_raise_mdi_clk_82543(hw, &ctrl);
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		/* Check to see if we shifted in a "1". */
+		if (ctrl & E1000_CTRL_MDIO)
+			data |= 1;
+		e1000_lower_mdi_clk_82543(hw, &ctrl);
+	}
+
+	e1000_raise_mdi_clk_82543(hw, &ctrl);
+	e1000_lower_mdi_clk_82543(hw, &ctrl);
+
+	return data;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_82543 - Force speed/duplex for PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the function to force speed and duplex for the m88 PHY, and
+ *  if the PHY is not auto-negotiating and the speed is forced to 10Mbit,
+ *  then call the function for polarity reversal workaround.
+ **/
+static s32 e1000_phy_force_speed_duplex_82543(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_82543");
+
+	ret_val = e1000_phy_force_speed_duplex_m88(hw);
+	if (ret_val)
+		goto out;
+
+	if (!hw->mac.autoneg &&
+	    (hw->mac.forced_speed_duplex & E1000_ALL_10_SPEED))
+		ret_val = e1000_polarity_reversal_workaround_82543(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_polarity_reversal_workaround_82543 - Workaround polarity reversal
+ *  @hw: pointer to the HW structure
+ *
+ *  When forcing link to 10 Full or 10 Half, the PHY can reverse the polarity
+ *  inadvertantly.  To workaround the issue, we disable the transmitter on
+ *  the PHY until we have established the link partner's link parameters.
+ **/
+static s32 e1000_polarity_reversal_workaround_82543(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 mii_status_reg;
+	u16 i;
+	bool link;
+
+	/* Polarity reversal workaround for forced 10F/10H links. */
+
+	/* Disable the transmitter on the PHY */
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+	if (ret_val)
+		goto out;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFFF);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * This loop will early-out if the NO link condition has been met.
+	 * In other words, DO NOT use e1000_phy_has_link_generic() here.
+	 */
+	for (i = PHY_FORCE_TIME; i > 0; i--) {
+		/*
+		 * Read the MII Status Register and wait for Link Status bit
+		 * to be clear.
+		 */
+
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+
+		if ((mii_status_reg & ~MII_SR_LINK_STATUS) == 0)
+			break;
+		msec_delay_irq(100);
+	}
+
+	/* Recommended delay time after link has been lost */
+	msec_delay_irq(1000);
+
+	/* Now we will re-enable the transmitter on the PHY */
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0019);
+	if (ret_val)
+		goto out;
+	msec_delay_irq(50);
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFFF0);
+	if (ret_val)
+		goto out;
+	msec_delay_irq(50);
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xFF00);
+	if (ret_val)
+		goto out;
+	msec_delay_irq(50);
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0x0000);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_PAGE_SELECT, 0x0000);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Read the MII Status Register and wait for Link Status bit
+	 * to be set.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw, PHY_FORCE_TIME, 100000, &link);
+	if (ret_val)
+		goto out;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_82543 - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the PHY_RESET_DIR bit in the extended device control register
+ *  to put the PHY into a reset and waits for completion.  Once the reset
+ *  has been accomplished, clear the PHY_RESET_DIR bit to take the PHY out
+ *  of reset.  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_phy_hw_reset_82543(struct e1000_hw *hw)
+{
+	struct e1000_functions *func = &hw->func;
+	u32 ctrl_ext;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_phy_hw_reset_82543");
+
+	/*
+	 * Read the Extended Device Control Register, assert the PHY_RESET_DIR
+	 * bit to put the PHY into reset...
+	 */
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_SDP4_DIR;
+	ctrl_ext &= ~E1000_CTRL_EXT_SDP4_DATA;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	E1000_WRITE_FLUSH(hw);
+
+	msec_delay(10);
+
+	/* ...then take it out of reset. */
+	ctrl_ext |= E1000_CTRL_EXT_SDP4_DATA;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	E1000_WRITE_FLUSH(hw);
+
+	usec_delay(150);
+
+	ret_val = func->get_cfg_done(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_82543 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82543(struct e1000_hw *hw)
+{
+	u32 ctrl, icr;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_reset_hw_82543");
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	e1000_set_tbi_sbp_82543(hw, FALSE);
+
+	/*
+	 * Delay to allow any outstanding PCI transactions to complete before
+	 * resetting the device
+	 */
+	msec_delay(10);
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to 82543/82544 MAC\n");
+	if (hw->mac.type == e1000_82543) {
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+	} else {
+		/*
+		 * The 82544 can't ACK the 64-bit write when issuing the
+		 * reset, so use IO-mapping as a workaround.
+		 */
+		E1000_WRITE_REG_IO(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+	}
+
+	/*
+	 * After MAC reset, force reload of NVM to restore power-on
+	 * settings to device.
+	 */
+	e1000_reload_nvm(hw);
+	msec_delay(2);
+
+	/* Masking off and clearing any pending interrupts */
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82543 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82543(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_dev_spec_82543 *dev_spec;
+	u32 ctrl;
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_82543");
+
+	dev_spec = (struct e1000_dev_spec_82543 *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/* Disabling VLAN filtering */
+	E1000_WRITE_REG(hw, E1000_VET, 0);
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/*
+	 * Set the PCI priority bit correctly in the CTRL register.  This
+	 * determines if the adapter gives priority to receives, or if it
+	 * gives equal priority to transmits and receives.
+	 */
+	if (hw->mac.type == e1000_82543 && dev_spec->dma_fairness) {
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PRIOR);
+	}
+
+	e1000_pcix_mmrbc_workaround_generic(hw);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82543(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_link_82543 - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM to determine the initial polarity value and write the
+ *  extended device control register with the information before calling
+ *  the generic setup link function, which does the following:
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_82543(struct e1000_hw *hw)
+{
+	u32 ctrl_ext;
+	s32  ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_setup_link_82543");
+
+	/*
+	 * Take the 4 bits from NVM word 0xF that determine the initial
+	 * polarity value for the SW controlled pins, and setup the
+	 * Extended Device Control reg with that info.
+	 * This is needed because one of the SW controlled pins is used for
+	 * signal detection.  So this should be done before phy setup.
+	 */
+	if (hw->mac.type == e1000_82543) {
+		ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error\n");
+			ret_val = -E1000_ERR_NVM;
+			goto out;
+		}
+		ctrl_ext = ((data & NVM_WORD0F_SWPDIO_EXT_MASK) <<
+		            NVM_SWDPIO_EXT_SHIFT);
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	}
+
+	ret_val = e1000_setup_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_82543 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82543(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	bool link;
+
+	DEBUGFUNC("e1000_setup_copper_link_82543");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL) | E1000_CTRL_SLU;
+	/*
+	 * With 82543, we need to force speed and duplex on the MAC
+	 * equal to what the PHY speed and duplex configuration is.
+	 * In addition, we need to perform a hardware reset on the
+	 * PHY to take it out of reset.
+	 */
+	if (hw->mac.type == e1000_82543) {
+		ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		ret_val = e1000_phy_hw_reset(hw);
+		if (ret_val)
+			goto out;
+		hw->phy.reset_disable = FALSE;
+	} else {
+		ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	}
+
+	/* Set MDI/MDI-X, Polarity Reversal, and downshift settings */
+	ret_val = e1000_copper_link_setup_m88(hw);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.autoneg) {
+		/*
+		 * Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
+		ret_val = e1000_copper_link_autoneg(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/*
+		 * PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
+		DEBUGOUT("Forcing Speed and Duplex\n");
+		ret_val = e1000_phy_force_speed_duplex_82543(hw);
+		if (ret_val) {
+			DEBUGOUT("Error Forcing Speed and Duplex\n");
+			goto out;
+		}
+	}
+
+	/*
+	 * Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw,
+	                                     COPPER_LINK_UP_LIMIT,
+	                                     10,
+	                                     &link);
+	if (ret_val)
+		goto out;
+
+
+	if (link) {
+		DEBUGOUT("Valid link established!!!\n");
+		/* Config the MAC and PHY after link is up */
+		if (hw->mac.type == e1000_82544) {
+			e1000_config_collision_dist_generic(hw);
+		} else {
+			ret_val = e1000_config_mac_to_phy_82543(hw);
+			if (ret_val)
+				goto out;
+		}
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+	} else {
+		DEBUGOUT("Unable to establish link!!!\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_link_82543 - Setup link for fiber
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber links.  Upon
+ *  successful setup, poll for link.
+ **/
+static s32 e1000_setup_fiber_link_82543(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_setup_fiber_link_82543");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* Take the link out of reset */
+	ctrl &= ~E1000_CTRL_LRST;
+
+	e1000_config_collision_dist_generic(hw);
+
+	ret_val = e1000_commit_fc_settings_generic(hw);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT("Auto-negotiation enabled\n");
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	E1000_WRITE_FLUSH(hw);
+	msec_delay(1);
+
+	/*
+	 * For these adapters, the SW defineable pin 1 is cleared when the
+	 * optics detect a signal.  If we have a signal, then poll for a
+	 * "Link-Up" indication.
+	 */
+	if (!(E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+		ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+	} else {
+		DEBUGOUT("No signal detected\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_copper_link_82543 - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the phy for link, if link exists, do the following:
+ *   - check for downshift
+ *   - do polarity workaround (if necessary)
+ *   - configure collision distance
+ *   - configure flow control after link up
+ *   - configure tbi compatibility
+ **/
+static s32 e1000_check_for_copper_link_82543(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 icr, rctl;
+	s32 ret_val;
+	u16 speed, duplex;
+	bool link;
+
+	DEBUGFUNC("e1000_check_for_copper_link_82543");
+
+	if (!mac->get_link_status) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link)
+		goto out; /* No link detected */
+
+	mac->get_link_status = FALSE;
+
+	e1000_check_downshift_generic(hw);
+
+	/*
+	 * If we are forcing speed/duplex, then we can return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		/*
+		 * If speed and duplex are forced to 10H or 10F, then we will
+		 * implement the polarity reversal workaround.  We disable
+		 * interrupts first, and upon returning, place the devices
+		 * interrupt state to its previous value except for the link
+		 * status change interrupt which will happened due to the
+		 * execution of this workaround.
+		 */
+		if (mac->forced_speed_duplex & E1000_ALL_10_SPEED) {
+			E1000_WRITE_REG(hw, E1000_IMC, 0xFFFFFFFF);
+			ret_val = e1000_polarity_reversal_workaround_82543(hw);
+			icr = E1000_READ_REG(hw, E1000_ICR);
+			E1000_WRITE_REG(hw, E1000_ICS, (icr & ~E1000_ICS_LSC));
+			E1000_WRITE_REG(hw, E1000_IMS, IMS_ENABLE_MASK);
+		}
+
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/*
+	 * We have a M88E1000 PHY and Auto-Neg is enabled.  If we
+	 * have Si on board that is 82544 or newer, Auto
+	 * Speed Detection takes care of MAC speed/duplex
+	 * configuration.  So we only need to configure Collision
+	 * Distance in the MAC.  Otherwise, we need to force
+	 * speed/duplex on the MAC to the current PHY speed/duplex
+	 * settings.
+	 */
+	if (mac->type == e1000_82544)
+		e1000_config_collision_dist_generic(hw);
+	else {
+		ret_val = e1000_config_mac_to_phy_82543(hw);
+		if (ret_val) {
+			DEBUGOUT("Error configuring MAC to PHY settings\n");
+			goto out;
+		}
+	}
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000_config_fc_after_link_up_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error configuring flow control\n");
+	}
+
+	/*
+	 * At this point we know that we are on copper and we have
+	 * auto-negotiated link.  These are conditions for checking the link
+	 * partner capability register.  We use the link speed to determine if
+	 * TBI compatibility needs to be turned on or off.  If the link is not
+	 * at gigabit speed, then TBI compatibility is not needed.  If we are
+	 * at gigabit speed, we turn on TBI compatibility.
+	 */
+	if (e1000_tbi_compatibility_enabled_82543(hw)) {
+		ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			DEBUGOUT("Error getting link speed and duplex\n");
+			return ret_val;
+		}
+		if (speed != SPEED_1000) {
+			/*
+			 * If link speed is not set to gigabit speed,
+			 * we do not need to enable TBI compatibility.
+			 */
+			if (e1000_tbi_sbp_enabled_82543(hw)) {
+				/*
+				 * If we previously were in the mode,
+				 * turn it off.
+				 */
+				e1000_set_tbi_sbp_82543(hw, FALSE);
+				rctl = E1000_READ_REG(hw, E1000_RCTL);
+				rctl &= ~E1000_RCTL_SBP;
+				E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+			}
+		} else {
+			/*
+			 * If TBI compatibility is was previously off,
+			 * turn it on. For compatibility with a TBI link
+			 * partner, we will store bad packets. Some
+			 * frames have an additional byte on the end and
+			 * will look like CRC errors to to the hardware.
+			 */
+			if (!e1000_tbi_sbp_enabled_82543(hw)) {
+				e1000_set_tbi_sbp_82543(hw, TRUE);
+				rctl = E1000_READ_REG(hw, E1000_RCTL);
+				rctl |= E1000_RCTL_SBP;
+				E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+			}
+		}
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_fiber_link_82543 - Check for link (Fiber)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+static s32 e1000_check_for_fiber_link_82543(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw, ctrl, status;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_check_for_fiber_link_82543");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	status = E1000_READ_REG(hw, E1000_STATUS);
+	rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), the cable is plugged in (we have signal),
+	 * and our link partner is not trying to auto-negotiate with us (we
+	 * are receiving idles or data), we need to force link up. We also
+	 * need to give auto-negotiation time to complete, in case the cable
+	 * was just plugged in. The autoneg_failed flag does this.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 0 == have signal */
+	if ((!(ctrl & E1000_CTRL_SWDPIN1)) &&
+	    (!(status & E1000_STATUS_LU)) &&
+	    (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			ret_val = 0;
+			goto out;
+		}
+		DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+		if (ret_val) {
+			DEBUGOUT("Error configuring flow control\n");
+			goto out;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+		E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = TRUE;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_mac_to_phy_82543 - Configure MAC to PHY settings
+ *  @hw: pointer to the HW structure
+ *
+ *  For the 82543 silicon, we need to set the MAC to match the settings
+ *  of the PHY, even if the PHY is auto-negotiating.
+ **/
+static s32 e1000_config_mac_to_phy_82543(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	u16 phy_data;
+
+	DEBUGFUNC("e1000_config_mac_to_phy_82543");
+
+	/* Set the bits to force speed and duplex */
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ctrl &= ~(E1000_CTRL_SPD_SEL | E1000_CTRL_ILOS);
+
+	/*
+	 * Set up duplex in the Device Control and Transmit Control
+	 * registers depending on negotiated values.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	ctrl &= ~E1000_CTRL_FD;
+	if (phy_data & M88E1000_PSSR_DPLX)
+		ctrl |= E1000_CTRL_FD;
+
+	e1000_config_collision_dist_generic(hw);
+
+	/*
+	 * Set up speed in the Device Control register depending on
+	 * negotiated values.
+	 */
+	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS)
+		ctrl |= E1000_CTRL_SPD_1000;
+	else if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_100MBS)
+		ctrl |= E1000_CTRL_SPD_100;
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_vfta_82543 - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: the 32-bit offset in which to write the value to.
+ *  @value: the 32-bit value to write at location offset.
+ *
+ *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ *  table.
+ **/
+static void e1000_write_vfta_82543(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	u32 temp;
+
+	DEBUGFUNC("e1000_write_vfta_82543");
+
+	if ((hw->mac.type == e1000_82544) && (offset & 1)) {
+		temp = E1000_READ_REG_ARRAY(hw, E1000_VFTA, offset - 1);
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset - 1, temp);
+		E1000_WRITE_FLUSH(hw);
+	} else {
+		e1000_write_vfta_generic(hw, offset, value);
+	}
+}
+
+/**
+ *  e1000_mta_set_82543 - Set multicast filter table address
+ *  @hw: pointer to the HW structure
+ *  @hash_value: determines the MTA register and bit to set
+ *
+ *  The multicast table address is a register array of 32-bit registers.
+ *  The hash_value is used to determine what register the bit is in, the
+ *  current value is read, the new bit is OR'd in and the new value is
+ *  written back into the register.
+ **/
+static void e1000_mta_set_82543(struct e1000_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg, mta, temp;
+
+	DEBUGFUNC("e1000_mta_set_82543");
+
+	hash_reg = (hash_value >> 5);
+
+	/*
+	 * If we are on an 82544 and we are trying to write an odd offset
+	 * in the MTA, save off the previous entry before writing and
+	 * restore the old value after writing.
+	 */
+	if ((hw->mac.type == e1000_82544) && (hash_reg & 1)) {
+		hash_reg &= (hw->mac.mta_reg_count - 1);
+		hash_bit = hash_value & 0x1F;
+		mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
+		mta |= (1 << hash_bit);
+		temp = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg - 1);
+
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg - 1, temp);
+		E1000_WRITE_FLUSH(hw);
+	} else {
+		e1000_mta_set_generic(hw, hash_value);
+	}
+}
+
+/**
+ *  e1000_led_on_82543 - Turn on SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED on.  This is a function pointer entry point
+ *  called by the api module.
+ **/
+static s32 e1000_led_on_82543(struct e1000_hw *hw)
+{
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGFUNC("e1000_led_on_82543");
+
+	if (hw->mac.type == e1000_82544 &&
+	    hw->phy.media_type == e1000_media_type_copper) {
+		/* Clear SW-defineable Pin 0 to turn on the LED */
+		ctrl &= ~E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+	} else {
+		/* Fiber 82544 and all 82543 use this method */
+		ctrl |= E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+	}
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off_82543 - Turn off SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED off.  This is a function pointer entry point
+ *  called by the api module.
+ **/
+static s32 e1000_led_off_82543(struct e1000_hw *hw)
+{
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGFUNC("e1000_led_off_82543");
+
+	if (hw->mac.type == e1000_82544 &&
+	    hw->phy.media_type == e1000_media_type_copper) {
+		/* Set SW-defineable Pin 0 to turn off the LED */
+		ctrl |= E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+	} else {
+		ctrl &= ~E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+	}
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82543 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82543(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82543");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h
new file mode 100644
index 0000000..6e6fe82
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82543.h
@@ -0,0 +1,44 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_82543_H_
+#define _E1000_82543_H_
+
+#define PHY_PREAMBLE      0xFFFFFFFF
+#define PHY_PREAMBLE_SIZE 32
+#define PHY_SOF           0x1
+#define PHY_OP_READ       0x2
+#define PHY_OP_WRITE      0x1
+#define PHY_TURNAROUND    0x2
+
+#define TBI_COMPAT_ENABLED 0x1 /* Global "knob" for the workaround */
+/* If TBI_COMPAT_ENABLED, then this is the current state (on/off) */
+#define TBI_SBP_ENABLED    0x2 
+                                
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c
new file mode 100644
index 0000000..af32a34
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.c
@@ -0,0 +1,1430 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_82571
+ * e1000_82572
+ * e1000_82573
+ * e1000_82574
+ */
+
+#include "e1000_api.h"
+#include "e1000_82571.h"
+
+static s32  e1000_init_phy_params_82571(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_82571(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_82571(struct e1000_hw *hw);
+static s32  e1000_acquire_nvm_82571(struct e1000_hw *hw);
+static void e1000_release_nvm_82571(struct e1000_hw *hw);
+static s32  e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset,
+                                  u16 words, u16 *data);
+static s32  e1000_update_nvm_checksum_82571(struct e1000_hw *hw);
+static s32  e1000_validate_nvm_checksum_82571(struct e1000_hw *hw);
+static s32  e1000_get_cfg_done_82571(struct e1000_hw *hw);
+static s32  e1000_set_d0_lplu_state_82571(struct e1000_hw *hw,
+                                          bool active);
+static s32  e1000_reset_hw_82571(struct e1000_hw *hw);
+static s32  e1000_init_hw_82571(struct e1000_hw *hw);
+static void e1000_clear_vfta_82571(struct e1000_hw *hw);
+static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
+                                           u8 *mc_addr_list, u32 mc_addr_count,
+                                           u32 rar_used_count, u32 rar_count);
+static s32  e1000_setup_link_82571(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_82571(struct e1000_hw *hw);
+static s32  e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw);
+static s32  e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data);
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw);
+static s32  e1000_get_hw_semaphore_82571(struct e1000_hw *hw);
+static s32  e1000_fix_nvm_checksum_82571(struct e1000_hw *hw);
+static s32  e1000_get_phy_id_82571(struct e1000_hw *hw);
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw);
+static s32  e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+                                       u16 words, u16 *data);
+static s32  e1000_read_mac_addr_82571(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw);
+
+struct e1000_dev_spec_82571 {
+	bool laa_is_present;
+};
+
+/**
+ *  e1000_init_phy_params_82571 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_phy_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_phy_params_82571");
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type        = e1000_phy_none;
+		goto out;
+	}
+
+	phy->addr                        = 1;
+	phy->autoneg_mask                = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us              = 100;
+
+	func->acquire_phy                = e1000_get_hw_semaphore_82571;
+	func->check_polarity             = e1000_check_polarity_igp;
+	func->check_reset_block          = e1000_check_reset_block_generic;
+	func->release_phy                = e1000_put_hw_semaphore_82571;
+	func->reset_phy                  = e1000_phy_hw_reset_generic;
+	func->set_d0_lplu_state          = e1000_set_d0_lplu_state_82571;
+	func->set_d3_lplu_state          = e1000_set_d3_lplu_state_generic;
+	func->power_up_phy               = e1000_power_up_phy_copper;
+	func->power_down_phy             = e1000_power_down_phy_copper_82571;
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		phy->type                = e1000_phy_igp_2;
+		func->get_cfg_done       = e1000_get_cfg_done_82571;
+		func->get_phy_info       = e1000_get_phy_info_igp;
+		func->force_speed_duplex = e1000_phy_force_speed_duplex_igp;
+		func->get_cable_length   = e1000_get_cable_length_igp_2;
+		func->read_phy_reg       = e1000_read_phy_reg_igp;
+		func->write_phy_reg      = e1000_write_phy_reg_igp;
+
+		/* This uses above function pointers */
+		ret_val = e1000_get_phy_id_82571(hw);
+
+		/* Verify PHY ID */
+		if (phy->id != IGP01E1000_I_PHY_ID) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		break;
+	case e1000_82573:
+		phy->type                = e1000_phy_m88;
+		func->get_cfg_done       = e1000_get_cfg_done_generic;
+		func->get_phy_info       = e1000_get_phy_info_m88;
+		func->commit_phy         = e1000_phy_sw_reset_generic;
+		func->force_speed_duplex = e1000_phy_force_speed_duplex_m88;
+		func->get_cable_length   = e1000_get_cable_length_m88;
+		func->read_phy_reg       = e1000_read_phy_reg_m88;
+		func->write_phy_reg      = e1000_write_phy_reg_m88;
+
+		/* This uses above function pointers */
+		ret_val = e1000_get_phy_id_82571(hw);
+
+		/* Verify PHY ID */
+		if (phy->id != M88E1111_I_PHY_ID) {
+			ret_val = -E1000_ERR_PHY;
+			DEBUGOUT1("PHY ID unknown: type = 0x%08x\n", phy->id);
+			goto out;
+		}
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+		break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_82571 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_nvm_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u16 size;
+
+	DEBUGFUNC("e1000_init_nvm_params_82571");
+
+	nvm->opcode_bits          = 8;
+	nvm->delay_usec           = 1;
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size    = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size    = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size    = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ? 16 : 8;
+		break;
+	}
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+		if (((eecd >> 15) & 0x3) == 0x3) {
+			nvm->type = e1000_nvm_flash_hw;
+			nvm->word_size = 2048;
+			/*
+			 * Autonomous Flash update bit must be cleared due
+			 * to Flash update issue.
+			 */
+			eecd &= ~E1000_EECD_AUPDEN;
+			E1000_WRITE_REG(hw, E1000_EECD, eecd);
+			break;
+		}
+		fallthrough;
+	default:
+		nvm->type	= e1000_nvm_eeprom_spi;
+		size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+		                  E1000_EECD_SIZE_EX_SHIFT);
+		/*
+		 * Added to a constant, "size" becomes the left-shift value
+		 * for setting word_size.
+		 */
+		size += NVM_WORD_SIZE_BASE_SHIFT;
+
+		/* EEPROM access above 16k is unsupported */
+		if (size > 14)
+			size = 14;
+		nvm->word_size	= 1 << size;
+		break;
+	}
+
+	/* Function Pointers */
+	func->acquire_nvm       = e1000_acquire_nvm_82571;
+	func->read_nvm          = (hw->mac.type == e1000_82573)
+	                          ? e1000_read_nvm_eerd
+	                          : e1000_read_nvm_spi;
+	func->release_nvm       = e1000_release_nvm_82571;
+	func->update_nvm        = e1000_update_nvm_checksum_82571;
+	func->validate_nvm      = e1000_validate_nvm_checksum_82571;
+	func->valid_led_default = e1000_valid_led_default_82571;
+	func->write_nvm         = e1000_write_nvm_82571;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_init_mac_params_82571 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  This is a function pointer entry point called by the api module.
+ **/
+static s32 e1000_init_mac_params_82571(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_82571");
+
+	/* Set media type */
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82572EI_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+		hw->phy.media_type = e1000_media_type_fiber;
+		break;
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_SERDES_DUAL:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+	case E1000_DEV_ID_82572EI_SERDES:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	default:
+		hw->phy.media_type = e1000_media_type_copper;
+		break;
+	}
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_RAR_ENTRIES;
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = TRUE;
+	/* Set if manageability features are enabled. */
+	mac->arc_subsystem_valid =
+	        (E1000_READ_REG(hw, E1000_FWSM) & E1000_FWSM_MODE_MASK)
+	                ? TRUE : FALSE;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_pcie_generic;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_82571;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_82571;
+	/* link setup */
+	func->setup_link = e1000_setup_link_82571;
+	/* physical interface link setup */
+	func->setup_physical_interface =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_setup_copper_link_82571
+	                : e1000_setup_fiber_serdes_link_82571;
+	/* check for link */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		func->check_for_link = e1000_check_for_copper_link_generic;
+		break;
+	case e1000_media_type_fiber:
+		func->check_for_link = e1000_check_for_fiber_link_generic;
+		break;
+	case e1000_media_type_internal_serdes:
+		func->check_for_link = e1000_check_for_serdes_link_generic;
+		break;
+	default:
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+		break;
+	}
+	/* check management mode */
+	func->check_mng_mode = e1000_check_mng_mode_generic;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_82571;
+	/* writing VFTA */
+	func->write_vfta = e1000_write_vfta_generic;
+	/* clearing VFTA */
+	func->clear_vfta = e1000_clear_vfta_82571;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* read mac address */
+	func->read_mac_addr = e1000_read_mac_addr_82571;
+	/* blink LED */
+	func->blink_led = e1000_blink_led_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_generic;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_generic;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_generic;
+	func->led_off = e1000_led_off_generic;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_82571;
+	/* link info */
+	func->get_link_up_info =
+	        (hw->phy.media_type == e1000_media_type_copper)
+	                ? e1000_get_speed_and_duplex_copper_generic
+	                : e1000_get_speed_and_duplex_fiber_serdes_generic;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_82571);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_82571 - Init func ptrs.
+ *  @hw: pointer to the HW structure
+ *
+ *  The only function explicitly called by the api module to initialize
+ *  all function pointers and parameters.
+ **/
+void e1000_init_function_pointers_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_82571");
+
+	hw->func.init_mac_params = e1000_init_mac_params_82571;
+	hw->func.init_nvm_params = e1000_init_nvm_params_82571;
+	hw->func.init_phy_params = e1000_init_phy_params_82571;
+}
+
+/**
+ *  e1000_get_phy_id_82571 - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+static s32 e1000_get_phy_id_82571(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_get_phy_id_82571");
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		/*
+		 * The 82571 firmware may still be configuring the PHY.
+		 * In this case, we cannot access the PHY until the
+		 * configuration is done.  So we explicitly set the
+		 * PHY ID.
+		 */
+		phy->id = IGP01E1000_I_PHY_ID;
+		break;
+	case e1000_82573:
+		ret_val = e1000_get_phy_id(hw);
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_hw_semaphore_82571 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+static s32 e1000_get_hw_semaphore_82571(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 ret_val = E1000_SUCCESS;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	DEBUGFUNC("e1000_get_hw_semaphore_82571");
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = E1000_READ_REG(hw, E1000_SWSM);
+		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		usec_delay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_generic(hw);
+		DEBUGOUT("Driver can't access the NVM\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_82571 - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+static void e1000_put_hw_semaphore_82571(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	DEBUGFUNC("e1000_put_hw_semaphore_82571");
+
+	swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+	swsm &= ~E1000_SWSM_SWESMBI;
+
+	E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ *  e1000_acquire_nvm_82571 - Request for access to the EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  To gain access to the EEPROM, first we must obtain a hardware semaphore.
+ *  Then for non-82573 hardware, set the EEPROM access request bit and wait
+ *  for EEPROM access grant bit.  If the access grant bit is not set, release
+ *  hardware semaphore.
+ **/
+static s32 e1000_acquire_nvm_82571(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_acquire_nvm_82571");
+
+	ret_val = e1000_get_hw_semaphore_82571(hw);
+	if (ret_val)
+		goto out;
+
+	if (hw->mac.type != e1000_82573)
+		ret_val = e1000_acquire_nvm_generic(hw);
+
+	if (ret_val)
+		e1000_put_hw_semaphore_82571(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_nvm_82571 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+static void e1000_release_nvm_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_release_nvm_82571");
+
+	e1000_release_nvm_generic(hw);
+	e1000_put_hw_semaphore_82571(hw);
+}
+
+/**
+ *  e1000_write_nvm_82571 - Write to EEPROM using appropriate interface
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  For non-82573 silicon, write data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function, the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_82571(struct e1000_hw *hw, u16 offset, u16 words,
+                                 u16 *data)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_write_nvm_82571");
+
+	switch (hw->mac.type) {
+	case e1000_82573:
+		ret_val = e1000_write_nvm_eewr_82571(hw, offset, words, data);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+		ret_val = e1000_write_nvm_spi(hw, offset, words, data);
+		break;
+	default:
+		ret_val = -E1000_ERR_NVM;
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_82571 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+static s32 e1000_update_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	u32 eecd;
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("e1000_update_nvm_checksum_82571");
+
+	ret_val = e1000_update_nvm_checksum_generic(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * If our nvm is an EEPROM, then we're done
+	 * otherwise, commit the checksum to the flash NVM.
+	 */
+	if (hw->nvm.type != e1000_nvm_flash_hw)
+		goto out;
+
+	/* Check for pending operations. */
+	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+		msec_delay(1);
+		if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD) == 0)
+			break;
+	}
+
+	if (i == E1000_FLASH_UPDATES) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Reset the firmware if using STM opcode. */
+	if ((E1000_READ_REG(hw, E1000_FLOP) & 0xFF00) == E1000_STM_OPCODE) {
+		/*
+		 * The enabling of and the actual reset must be done
+		 * in two write cycles.
+		 */
+		E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET_ENABLE);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG(hw, E1000_HICR, E1000_HICR_FW_RESET);
+	}
+
+	/* Commit the write to flash */
+	eecd = E1000_READ_REG(hw, E1000_EECD) | E1000_EECD_FLUPD;
+	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+
+	for (i = 0; i < E1000_FLASH_UPDATES; i++) {
+		msec_delay(1);
+		if ((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_FLUPD) == 0)
+			break;
+	}
+
+	if (i == E1000_FLASH_UPDATES) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_82571 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 e1000_validate_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_validate_nvm_checksum_82571");
+
+	if (hw->nvm.type == e1000_nvm_flash_hw)
+		e1000_fix_nvm_checksum_82571(hw);
+
+	return e1000_validate_nvm_checksum_generic(hw);
+}
+
+/**
+ *  e1000_write_nvm_eewr_82571 - Write to EEPROM for 82573 silicon
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  After checking for invalid values, poll the EEPROM to ensure the previous
+ *  command has completed before trying to write the next word.  After write
+ *  poll for completion.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function, the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+static s32 e1000_write_nvm_eewr_82571(struct e1000_hw *hw, u16 offset,
+                                      u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eewr = 0;
+	s32 ret_val = 0;
+
+	DEBUGFUNC("e1000_write_nvm_eewr_82571");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eewr = (data[i] << E1000_NVM_RW_REG_DATA) |
+		       ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+		       E1000_NVM_RW_REG_START;
+
+		ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+		if (ret_val)
+			break;
+
+		E1000_WRITE_REG(hw, E1000_EEWR, eewr);
+
+		ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_WRITE);
+		if (ret_val)
+			break;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_82571 - Poll for configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the management control register for the config done bit to be set.
+ **/
+static s32 e1000_get_cfg_done_82571(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_get_cfg_done_82571");
+
+	while (timeout) {
+		if (E1000_READ_REG(hw, E1000_EEMNGCTL) & E1000_NVM_CFG_DONE_PORT_0)
+			break;
+		msec_delay(1);
+		timeout--;
+	}
+	if (!timeout) {
+		DEBUGOUT("MNG configuration cycle has not completed.\n");
+		ret_val = -E1000_ERR_RESET;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_82571 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When activating LPLU
+ *  this function also disables smart speed and vice versa.  LPLU will not be
+ *  activated unless the device autonegotiation advertisement meets standards
+ *  of either 10 or 10/100 or 10/100/1000 at all duplexes.  This is a function
+ *  pointer entry point only called by PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_82571(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d0_lplu_state_82571");
+
+	ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (active) {
+		data |= IGP02E1000_PM_D0_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP02E1000_PHY_POWER_MGMT,
+		                              data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             &data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP01E1000_PHY_PORT_CONFIG,
+		                              data);
+		if (ret_val)
+			goto out;
+	} else {
+		data &= ~IGP02E1000_PM_D0_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP02E1000_PHY_POWER_MGMT,
+		                              data);
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_82571 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 e1000_reset_hw_82571(struct e1000_hw *hw)
+{
+	u32 ctrl, extcnf_ctrl, ctrl_ext, icr;
+	s32 ret_val;
+	u16 i = 0;
+
+	DEBUGFUNC("e1000_reset_hw_82571");
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000_disable_pcie_master_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("PCI-E Master disable polling has failed.\n");
+	}
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	msec_delay(10);
+
+	/*
+	 * Must acquire the MDIO ownership before MAC reset.
+	 * Ownership defaults to firmware after a reset.
+	 */
+	if (hw->mac.type == e1000_82573) {
+		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+		do {
+			E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+			extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+
+			if (extcnf_ctrl & E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP)
+				break;
+
+			extcnf_ctrl |= E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP;
+
+			msec_delay(2);
+			i++;
+		} while (i < MDIO_OWNERSHIP_TIMEOUT);
+	}
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	DEBUGOUT("Issuing a global reset to MAC\n");
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+	if (hw->nvm.type == e1000_nvm_flash_hw) {
+		usec_delay(10);
+		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	ret_val = e1000_get_auto_rd_done_generic(hw);
+	if (ret_val)
+		/* We don't want to continue accessing MAC registers. */
+		goto out;
+
+	/*
+	 * Phy configuration from NVM just starts after EECD_AUTO_RD is set.
+	 * Need to wait for Phy configuration completion before accessing
+	 * NVM and Phy.
+	 */
+	if (hw->mac.type == e1000_82573)
+		msec_delay(25);
+
+	/* Clear any pending interrupt events. */
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	if (!(e1000_check_alt_mac_addr_generic(hw)))
+		e1000_set_laa_state_82571(hw, TRUE);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_82571 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 e1000_init_hw_82571(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 reg_data;
+	s32 ret_val;
+	u16 i, rar_count = mac->rar_entry_count;
+
+	DEBUGFUNC("e1000_init_hw_82571");
+
+	e1000_initialize_hw_bits_82571(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	DEBUGOUT("Initializing the IEEE VLAN\n");
+	e1000_clear_vfta(hw);
+
+	/* Setup the receive address. */
+	/*
+	 * If, however, a locally administered address was assigned to the
+	 * 82571, we must reserve a RAR for it to work around an issue where
+	 * resetting one port will reload the MAC on the other port.
+	 */
+	if (e1000_get_laa_state_82571(hw))
+		rar_count--;
+	e1000_init_rx_addrs_generic(hw, rar_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	/* Set the transmit descriptor write-back policy */
+	reg_data = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+	           E1000_TXDCTL_FULL_TX_DESC_WB |
+	           E1000_TXDCTL_COUNT_DESC;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg_data);
+
+	/* ...for both queues. */
+	if (mac->type != e1000_82573) {
+		reg_data = E1000_READ_REG(hw, E1000_TXDCTL(1));
+		reg_data = (reg_data & ~E1000_TXDCTL_WTHRESH) |
+		           E1000_TXDCTL_FULL_TX_DESC_WB |
+		           E1000_TXDCTL_COUNT_DESC;
+		E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg_data);
+	} else {
+		e1000_enable_tx_pkt_filtering(hw);
+		reg_data = E1000_READ_REG(hw, E1000_GCR);
+		reg_data |= E1000_GCR_L1_ACT_WITHOUT_L0S_RX;
+		E1000_WRITE_REG(hw, E1000_GCR, reg_data);
+	}
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_82571(hw);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_initialize_hw_bits_82571 - Initialize hardware-dependent bits
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes required hardware-dependent bits needed for normal operation.
+ **/
+static void e1000_initialize_hw_bits_82571(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	DEBUGFUNC("e1000_initialize_hw_bits_82571");
+
+	if (hw->mac.disable_hw_init_bits)
+		goto out;
+
+	/* Transmit Descriptor Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TARC(0));
+	reg &= ~(0xF << 27); /* 30:27 */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		reg |= (1 << 23) | (1 << 24) | (1 << 25) | (1 << 26);
+		break;
+	default:
+		break;
+	}
+	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TARC(1));
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		reg &= ~((1 << 29) | (1 << 30));
+		reg |= (1 << 22) | (1 << 24) | (1 << 25) | (1 << 26);
+		if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+			reg &= ~(1 << 28);
+		else
+			reg |= (1 << 28);
+		E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+		break;
+	default:
+		break;
+	}
+
+	/* Device Control */
+	if (hw->mac.type == e1000_82573) {
+		reg = E1000_READ_REG(hw, E1000_CTRL);
+		reg &= ~(1 << 29);
+		E1000_WRITE_REG(hw, E1000_CTRL, reg);
+	}
+
+	/* Extended Device Control */
+	if (hw->mac.type == e1000_82573) {
+		reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		reg &= ~(1 << 23);
+		reg |= (1 << 22);
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+	}
+
+out:
+	return;
+}
+
+/**
+ *  e1000_clear_vfta_82571 - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+static void e1000_clear_vfta_82571(struct e1000_hw *hw)
+{
+	u32 offset;
+	u32 vfta_value = 0;
+	u32 vfta_offset = 0;
+	u32 vfta_bit_in_reg = 0;
+
+	DEBUGFUNC("e1000_clear_vfta_82571");
+
+	if (hw->mac.type == e1000_82573) {
+		if (hw->mng_cookie.vlan_id != 0) {
+			/*
+			 * The VFTA is a 4096b bit-field, each identifying
+			 * a single VLAN ID.  The following operations
+			 * determine which 32b entry (i.e. offset) into the
+			 * array we want to set the VLAN ID (i.e. bit) of
+			 * the manageability unit.
+			 */
+			vfta_offset = (hw->mng_cookie.vlan_id >>
+			               E1000_VFTA_ENTRY_SHIFT) &
+			              E1000_VFTA_ENTRY_MASK;
+			vfta_bit_in_reg = 1 << (hw->mng_cookie.vlan_id &
+			                       E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+		}
+	}
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		/*
+		 * If the offset we want to clear is the same offset of the
+		 * manageability VLAN ID, then clear all bits except that of
+		 * the manageability unit.
+		 */
+		vfta_value = (offset == vfta_offset) ? vfta_bit_in_reg : 0;
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, vfta_value);
+		E1000_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ *  e1000_update_mc_addr_list_82571 - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @rar_used_count: the first RAR register free to program
+ *  @rar_count: total number of supported Receive Address Registers
+ *
+ *  Updates the Receive Address Registers and Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ *  The parameter rar_count will usually be hw->mac.rar_entry_count
+ *  unless there are workarounds that change this.
+ **/
+static void e1000_update_mc_addr_list_82571(struct e1000_hw *hw,
+                                           u8 *mc_addr_list, u32 mc_addr_count,
+                                           u32 rar_used_count, u32 rar_count)
+{
+	DEBUGFUNC("e1000_update_mc_addr_list_82571");
+
+	if (e1000_get_laa_state_82571(hw))
+		rar_count--;
+
+	e1000_update_mc_addr_list_generic(hw, mc_addr_list, mc_addr_count,
+	                                  rar_used_count, rar_count);
+}
+
+/**
+ *  e1000_setup_link_82571 - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_setup_link_82571");
+
+	/*
+	 * 82573 does not have a word in the NVM to determine
+	 * the default flow control setting, so we explicitly
+	 * set it to full.
+	 */
+	if (hw->mac.type == e1000_82573)
+		hw->fc.type = e1000_fc_full;
+
+	return e1000_setup_link_generic(hw);
+}
+
+/**
+ *  e1000_setup_copper_link_82571 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 e1000_setup_copper_link_82571(struct e1000_hw *hw)
+{
+	u32 ctrl, led_ctrl;
+	s32  ret_val;
+
+	DEBUGFUNC("e1000_setup_copper_link_82571");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	switch (hw->phy.type) {
+	case e1000_phy_m88:
+		ret_val = e1000_copper_link_setup_m88(hw);
+		break;
+	case e1000_phy_igp_2:
+		ret_val = e1000_copper_link_setup_igp(hw);
+		/* Setup activity LED */
+		led_ctrl = E1000_READ_REG(hw, E1000_LEDCTL);
+		led_ctrl &= IGP_ACTIVITY_LED_MASK;
+		led_ctrl |= (IGP_ACTIVITY_LED_ENABLE | IGP_LED3_MODE);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, led_ctrl);
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_82571 - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes links.
+ *  Upon successful setup, poll for link.
+ **/
+static s32 e1000_setup_fiber_serdes_link_82571(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_setup_fiber_serdes_link_82571");
+
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+		/*
+		 * If SerDes loopback mode is entered, there is no form
+		 * of reset to take the adapter out of that mode.  So we
+		 * have to explicitly take the adapter out of loopback
+		 * mode.  This prevents drivers from twidling their thumbs
+		 * if another tool failed to take it out of loopback mode.
+		 */
+		E1000_WRITE_REG(hw, E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+		break;
+	default:
+		break;
+	}
+
+	return e1000_setup_fiber_serdes_link_generic(hw);
+}
+
+/**
+ *  e1000_valid_led_default_82571 - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 e1000_valid_led_default_82571(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_valid_led_default_82571");
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if (hw->mac.type == e1000_82573 &&
+	    *data == ID_LED_RESERVED_F746)
+		*data = ID_LED_DEFAULT_82573;
+	else if (*data == ID_LED_RESERVED_0000 ||
+	         *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT;
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_laa_state_82571 - Get locally administered address state
+ *  @hw: pointer to the HW structure
+ *
+ *  Retrieve and return the current locally administed address state.
+ **/
+bool e1000_get_laa_state_82571(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82571 *dev_spec;
+	bool state = FALSE;
+
+	DEBUGFUNC("e1000_get_laa_state_82571");
+
+	if (hw->mac.type != e1000_82571)
+		goto out;
+
+	dev_spec = (struct e1000_dev_spec_82571 *)hw->dev_spec;
+
+	state = dev_spec->laa_is_present;
+
+out:
+	return state;
+}
+
+/**
+ *  e1000_set_laa_state_82571 - Set locally administered address state
+ *  @hw: pointer to the HW structure
+ *  @state: enable/disable locally administered address
+ *
+ *  Enable/Disable the current locally administed address state.
+ **/
+void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state)
+{
+	struct e1000_dev_spec_82571 *dev_spec;
+
+	DEBUGFUNC("e1000_set_laa_state_82571");
+
+	if (hw->mac.type != e1000_82571)
+		goto out;
+
+	dev_spec = (struct e1000_dev_spec_82571 *)hw->dev_spec;
+
+	dev_spec->laa_is_present = state;
+
+	/* If workaround is activated... */
+	if (state) {
+		/*
+		 * Hold a copy of the LAA in RAR[14] This is done so that
+		 * between the time RAR[0] gets clobbered and the time it
+		 * gets fixed, the actual LAA is in one of the RARs and no
+		 * incoming packets directed to this port are dropped.
+		 * Eventually the LAA will be in RAR[0] and RAR[14].
+		 */
+		e1000_rar_set_generic(hw, hw->mac.addr,
+		                      hw->mac.rar_entry_count - 1);
+	}
+
+out:
+	return;
+}
+
+/**
+ *  e1000_fix_nvm_checksum_82571 - Fix EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies that the EEPROM has completed the update.  After updating the
+ *  EEPROM, we need to check bit 15 in work 0x23 for the checksum fix.  If
+ *  the checksum fix is not implemented, we need to set the bit and update
+ *  the checksum.  Otherwise, if bit 15 is set and the checksum is incorrect,
+ *  we need to return bad checksum.
+ **/
+static s32 e1000_fix_nvm_checksum_82571(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_fix_nvm_checksum_82571");
+
+	if (nvm->type != e1000_nvm_flash_hw)
+		goto out;
+
+	/*
+	 * Check bit 4 of word 10h.  If it is 0, firmware is done updating
+	 * 10h-12h.  Checksum may need to be fixed.
+	 */
+	ret_val = e1000_read_nvm(hw, 0x10, 1, &data);
+	if (ret_val)
+		goto out;
+
+	if (!(data & 0x10)) {
+		/*
+		 * Read 0x23 and check bit 15.  This bit is a 1
+		 * when the checksum has already been fixed.  If
+		 * the checksum is still wrong and this bit is a
+		 * 1, we need to return bad checksum.  Otherwise,
+		 * we need to set this bit to a 1 and update the
+		 * checksum.
+		 */
+		ret_val = e1000_read_nvm(hw, 0x23, 1, &data);
+		if (ret_val)
+			goto out;
+
+		if (!(data & 0x8000)) {
+			data |= 0x8000;
+			ret_val = e1000_write_nvm(hw, 0x23, 1, &data);
+			if (ret_val)
+				goto out;
+			ret_val = e1000_update_nvm_checksum(hw);
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_82571 - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 e1000_read_mac_addr_82571(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_mac_addr_82571");
+	if (e1000_check_alt_mac_addr_generic(hw))
+		ret_val = e1000_read_mac_addr_generic(hw);
+
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_82571 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_82571(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_82571 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void e1000_clear_hw_cntrs_82571(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_82571");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+	temp = E1000_READ_REG(hw, E1000_PRC64);
+	temp = E1000_READ_REG(hw, E1000_PRC127);
+	temp = E1000_READ_REG(hw, E1000_PRC255);
+	temp = E1000_READ_REG(hw, E1000_PRC511);
+	temp = E1000_READ_REG(hw, E1000_PRC1023);
+	temp = E1000_READ_REG(hw, E1000_PRC1522);
+	temp = E1000_READ_REG(hw, E1000_PTC64);
+	temp = E1000_READ_REG(hw, E1000_PTC127);
+	temp = E1000_READ_REG(hw, E1000_PTC255);
+	temp = E1000_READ_REG(hw, E1000_PTC511);
+	temp = E1000_READ_REG(hw, E1000_PTC1023);
+	temp = E1000_READ_REG(hw, E1000_PTC1522);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+
+	temp = E1000_READ_REG(hw, E1000_IAC);
+	temp = E1000_READ_REG(hw, E1000_ICRXOC);
+
+	temp = E1000_READ_REG(hw, E1000_ICRXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXPTC);
+	temp = E1000_READ_REG(hw, E1000_ICTXATC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQEC);
+	temp = E1000_READ_REG(hw, E1000_ICTXQMTC);
+	temp = E1000_READ_REG(hw, E1000_ICRXDMTC);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h
new file mode 100644
index 0000000..75ea2a2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_82571.h
@@ -0,0 +1,40 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_82571_H_
+#define _E1000_82571_H_
+
+#define ID_LED_RESERVED_F746 0xF746
+#define ID_LED_DEFAULT_82573 ((ID_LED_DEF1_DEF2 << 12) | \
+                              (ID_LED_OFF1_ON2  <<  8) | \
+                              (ID_LED_DEF1_DEF2 <<  4) | \
+                              (ID_LED_DEF1_DEF2))
+
+#define E1000_GCR_L1_ACT_WITHOUT_L0S_RX 0x08000000
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c
new file mode 100644
index 0000000..f434bf2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.c
@@ -0,0 +1,1164 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_mac.h"
+#include "e1000_nvm.h"
+#include "e1000_phy.h"
+
+/**
+ *  e1000_init_mac_params - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the MAC
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_mac_params(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	if (hw->func.init_mac_params) {
+		ret_val = hw->func.init_mac_params(hw);
+		if (ret_val) {
+			DEBUGOUT("MAC Initialization Error\n");
+			goto out;
+		}
+	} else {
+		DEBUGOUT("mac.init_mac_params was NULL\n");
+		ret_val = -E1000_ERR_CONFIG;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the NVM
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_nvm_params(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	if (hw->func.init_nvm_params) {
+		ret_val = hw->func.init_nvm_params(hw);
+		if (ret_val) {
+			DEBUGOUT("NVM Initialization Error\n");
+			goto out;
+		}
+	} else {
+		DEBUGOUT("nvm.init_nvm_params was NULL\n");
+		ret_val = -E1000_ERR_CONFIG;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_phy_params - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function initializes the function pointers for the PHY
+ *  set of functions.  Called by drivers or by e1000_setup_init_funcs.
+ **/
+s32 e1000_init_phy_params(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	if (hw->func.init_phy_params) {
+		ret_val = hw->func.init_phy_params(hw);
+		if (ret_val) {
+			DEBUGOUT("PHY Initialization Error\n");
+			goto out;
+		}
+	} else {
+		DEBUGOUT("phy.init_phy_params was NULL\n");
+		ret_val =  -E1000_ERR_CONFIG;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_mac_type - Sets MAC type
+ *  @hw: pointer to the HW structure
+ *
+ *  This function sets the mac type of the adapter based on the
+ *  device ID stored in the hw structure.
+ *  MUST BE FIRST FUNCTION CALLED (explicitly or through
+ *  e1000_setup_init_funcs()).
+ **/
+s32 e1000_set_mac_type(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_set_mac_type");
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82542:
+		mac->type = e1000_82542;
+		break;
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82543GC_COPPER:
+		mac->type = e1000_82543;
+		break;
+	case E1000_DEV_ID_82544EI_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82544GC_COPPER:
+	case E1000_DEV_ID_82544GC_LOM:
+		mac->type = e1000_82544;
+		break;
+	case E1000_DEV_ID_82540EM:
+	case E1000_DEV_ID_82540EM_LOM:
+	case E1000_DEV_ID_82540EP:
+	case E1000_DEV_ID_82540EP_LOM:
+	case E1000_DEV_ID_82540EP_LP:
+		mac->type = e1000_82540;
+		break;
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+		mac->type = e1000_82545;
+		break;
+	case E1000_DEV_ID_82545GM_COPPER:
+	case E1000_DEV_ID_82545GM_FIBER:
+	case E1000_DEV_ID_82545GM_SERDES:
+		mac->type = e1000_82545_rev_3;
+		break;
+	case E1000_DEV_ID_82546EB_COPPER:
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+		mac->type = e1000_82546;
+		break;
+	case E1000_DEV_ID_82546GB_COPPER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82546GB_SERDES:
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		mac->type = e1000_82546_rev_3;
+		break;
+	case E1000_DEV_ID_82541EI:
+	case E1000_DEV_ID_82541EI_MOBILE:
+	case E1000_DEV_ID_82541ER_LOM:
+		mac->type = e1000_82541;
+		break;
+	case E1000_DEV_ID_82541ER:
+	case E1000_DEV_ID_82541GI:
+	case E1000_DEV_ID_82541GI_LF:
+	case E1000_DEV_ID_82541GI_MOBILE:
+		mac->type = e1000_82541_rev_2;
+		break;
+	case E1000_DEV_ID_82547EI:
+	case E1000_DEV_ID_82547EI_MOBILE:
+		mac->type = e1000_82547;
+		break;
+	case E1000_DEV_ID_82547GI:
+		mac->type = e1000_82547_rev_2;
+		break;
+	case E1000_DEV_ID_82571EB_COPPER:
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_SERDES_DUAL:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+		mac->type = e1000_82571;
+		break;
+	case E1000_DEV_ID_82572EI:
+	case E1000_DEV_ID_82572EI_COPPER:
+	case E1000_DEV_ID_82572EI_FIBER:
+	case E1000_DEV_ID_82572EI_SERDES:
+		mac->type = e1000_82572;
+		break;
+	case E1000_DEV_ID_82573E:
+	case E1000_DEV_ID_82573E_IAMT:
+	case E1000_DEV_ID_82573L:
+		mac->type = e1000_82573;
+		break;
+	case E1000_DEV_ID_80003ES2LAN_COPPER_DPT:
+	case E1000_DEV_ID_80003ES2LAN_SERDES_DPT:
+	case E1000_DEV_ID_80003ES2LAN_COPPER_SPT:
+	case E1000_DEV_ID_80003ES2LAN_SERDES_SPT:
+		mac->type = e1000_80003es2lan;
+		break;
+	case E1000_DEV_ID_ICH8_IFE:
+	case E1000_DEV_ID_ICH8_IFE_GT:
+	case E1000_DEV_ID_ICH8_IFE_G:
+	case E1000_DEV_ID_ICH8_IGP_M:
+	case E1000_DEV_ID_ICH8_IGP_M_AMT:
+	case E1000_DEV_ID_ICH8_IGP_AMT:
+	case E1000_DEV_ID_ICH8_IGP_C:
+		mac->type = e1000_ich8lan;
+		break;
+	case E1000_DEV_ID_ICH9_IFE:
+	case E1000_DEV_ID_ICH9_IFE_GT:
+	case E1000_DEV_ID_ICH9_IFE_G:
+	case E1000_DEV_ID_ICH9_IGP_AMT:
+	case E1000_DEV_ID_ICH9_IGP_C:
+		mac->type = e1000_ich9lan;
+		break;
+	default:
+		/* Should never have loaded on this device */
+		ret_val = -E1000_ERR_MAC_INIT;
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_init_funcs - Initializes function pointers
+ *  @hw: pointer to the HW structure
+ *  @init_device: TRUE will initialize the rest of the function pointers
+ *                 getting the device ready for use.  FALSE will only set
+ *                 MAC type and the function pointers for the other init
+ *                 functions.  Passing FALSE will not generate any hardware
+ *                 reads or writes.
+ *
+ *  This function must be called by a driver in order to use the rest
+ *  of the 'shared' code files. Called by drivers only.
+ **/
+s32 e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device)
+{
+	s32 ret_val;
+
+	/* Can't do much good without knowing the MAC type. */
+	ret_val = e1000_set_mac_type(hw);
+	if (ret_val) {
+		DEBUGOUT("ERROR: MAC type could not be set properly.\n");
+		goto out;
+	}
+
+	if (!hw->hw_addr) {
+		DEBUGOUT("ERROR: Registers not mapped\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/*
+	 * Init some generic function pointers that are currently all pointing
+	 * to generic implementations. We do this first allowing a driver
+	 * module to override it afterwards.
+	 */
+	hw->func.config_collision_dist = e1000_config_collision_dist_generic;
+	hw->func.rar_set = e1000_rar_set_generic;
+	hw->func.validate_mdi_setting = e1000_validate_mdi_setting_generic;
+	hw->func.mng_host_if_write = e1000_mng_host_if_write_generic;
+	hw->func.mng_write_cmd_header = e1000_mng_write_cmd_header_generic;
+	hw->func.mng_enable_host_if = e1000_mng_enable_host_if_generic;
+	hw->func.wait_autoneg = e1000_wait_autoneg_generic;
+	hw->func.reload_nvm = e1000_reload_nvm_generic;
+
+	/*
+	 * Set up the init function pointers. These are functions within the
+	 * adapter family file that sets up function pointers for the rest of
+	 * the functions in that family.
+	 */
+	switch (hw->mac.type) {
+	case e1000_82542:
+		e1000_init_function_pointers_82542(hw);
+		break;
+	case e1000_82543:
+	case e1000_82544:
+		e1000_init_function_pointers_82543(hw);
+		break;
+	case e1000_82540:
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+		e1000_init_function_pointers_82540(hw);
+		break;
+	case e1000_82541:
+	case e1000_82541_rev_2:
+	case e1000_82547:
+	case e1000_82547_rev_2:
+		e1000_init_function_pointers_82541(hw);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+		e1000_init_function_pointers_82571(hw);
+		break;
+	case e1000_80003es2lan:
+		e1000_init_function_pointers_80003es2lan(hw);
+		break;
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		e1000_init_function_pointers_ich8lan(hw);
+		break;
+	default:
+		DEBUGOUT("Hardware not supported\n");
+		ret_val = -E1000_ERR_CONFIG;
+		break;
+	}
+
+	/*
+	 * Initialize the rest of the function pointers. These require some
+	 * register reads/writes in some cases.
+	 */
+	if (!(ret_val) && init_device) {
+		ret_val = e1000_init_mac_params(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_init_nvm_params(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_init_phy_params(hw);
+		if (ret_val)
+			goto out;
+
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_remove_device - Free device specific structure
+ *  @hw: pointer to the HW structure
+ *
+ *  If a device specific structure was allocated, this function will
+ *  free it. This is a function pointer entry point called by drivers.
+ **/
+void e1000_remove_device(struct e1000_hw *hw)
+{
+	if (hw->func.remove_device)
+		hw->func.remove_device(hw);
+}
+
+/**
+ *  e1000_get_bus_info - Obtain bus information for adapter
+ *  @hw: pointer to the HW structure
+ *
+ *  This will obtain information about the HW bus for which the
+ *  adaper is attached and stores it in the hw structure. This is a
+ *  function pointer entry point called by drivers.
+ **/
+s32 e1000_get_bus_info(struct e1000_hw *hw)
+{
+	if (hw->func.get_bus_info)
+		return hw->func.get_bus_info(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  This clears the VLAN filter table on the adapter. This is a function
+ *  pointer entry point called by drivers.
+ **/
+void e1000_clear_vfta(struct e1000_hw *hw)
+{
+	if (hw->func.clear_vfta)
+		hw->func.clear_vfta (hw);
+}
+
+/**
+ *  e1000_write_vfta - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: the 32-bit offset in which to write the value to.
+ *  @value: the 32-bit value to write at location offset.
+ *
+ *  This writes a 32-bit value to a 32-bit offset in the VLAN filter
+ *  table. This is a function pointer entry point called by drivers.
+ **/
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	if (hw->func.write_vfta)
+		hw->func.write_vfta(hw, offset, value);
+}
+
+/**
+ *  e1000_update_mc_addr_list - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @rar_used_count: the first RAR register free to program
+ *  @rar_count: total number of supported Receive Address Registers
+ *
+ *  Updates the Receive Address Registers and Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ *  The parameter rar_count will usually be hw->mac.rar_entry_count
+ *  unless there are workarounds that change this.  Currently no func pointer
+ *  exists and all implementations are handled in the generic version of this
+ *  function.
+ **/
+void e1000_update_mc_addr_list(struct e1000_hw *hw, u8 *mc_addr_list,
+                               u32 mc_addr_count, u32 rar_used_count,
+                               u32 rar_count)
+{
+	if (hw->func.update_mc_addr_list)
+		hw->func.update_mc_addr_list(hw,
+		                             mc_addr_list,
+		                             mc_addr_count,
+		                             rar_used_count,
+		                             rar_count);
+}
+
+/**
+ *  e1000_force_mac_fc - Force MAC flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings. Currently no func pointer exists
+ *  and all implementations are handled in the generic version of this
+ *  function.
+ **/
+s32 e1000_force_mac_fc(struct e1000_hw *hw)
+{
+	return e1000_force_mac_fc_generic(hw);
+}
+
+/**
+ *  e1000_check_for_link - Check/Store link connection
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks the link condition of the adapter and stores the
+ *  results in the hw->mac structure. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_check_for_link(struct e1000_hw *hw)
+{
+	if (hw->func.check_for_link)
+		return hw->func.check_for_link(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_check_mng_mode - Check management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has manageability enabled.
+ *  This is a function pointer entry point called by drivers.
+ **/
+bool e1000_check_mng_mode(struct e1000_hw *hw)
+{
+	if (hw->func.check_mng_mode)
+		return hw->func.check_mng_mode(hw);
+
+	return FALSE;
+}
+
+/**
+ *  e1000_mng_write_dhcp_info - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info(struct e1000_hw *hw, u8 *buffer, u16 length)
+{
+	return e1000_mng_write_dhcp_info_generic(hw, buffer, length);
+}
+
+/**
+ *  e1000_reset_hw - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_reset_hw(struct e1000_hw *hw)
+{
+	if (hw->func.reset_hw)
+		return hw->func.reset_hw(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_init_hw - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation. This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_init_hw(struct e1000_hw *hw)
+{
+	if (hw->func.init_hw)
+		return hw->func.init_hw(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_setup_link - Configures link and flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  This configures link and flow control settings for the adapter. This
+ *  is a function pointer entry point called by drivers. While modules can
+ *  also call this, they probably call their own version of this function.
+ **/
+s32 e1000_setup_link(struct e1000_hw *hw)
+{
+	if (hw->func.setup_link)
+		return hw->func.setup_link(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_get_speed_and_duplex - Returns current speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to a 16-bit value to store the speed
+ *  @duplex: pointer to a 16-bit value to store the duplex.
+ *
+ *  This returns the speed and duplex of the adapter in the two 'out'
+ *  variables passed in. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed, u16 *duplex)
+{
+	if (hw->func.get_link_up_info)
+		return hw->func.get_link_up_info(hw, speed, duplex);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_setup_led - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_setup_led(struct e1000_hw *hw)
+{
+	if (hw->func.setup_led)
+		return hw->func.setup_led(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_cleanup_led - Restores SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This restores the SW controllable LED to the value saved off by
+ *  e1000_setup_led. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_cleanup_led(struct e1000_hw *hw)
+{
+	if (hw->func.cleanup_led)
+		return hw->func.cleanup_led(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_blink_led - Blink SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This starts the adapter LED blinking. Request the LED to be setup first
+ *  and cleaned up after. This is a function pointer entry point called by
+ *  drivers.
+ **/
+s32 e1000_blink_led(struct e1000_hw *hw)
+{
+	if (hw->func.blink_led)
+		return hw->func.blink_led(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on - Turn on SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED on. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_on(struct e1000_hw *hw)
+{
+	if (hw->func.led_on)
+		return hw->func.led_on(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off - Turn off SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Turns the SW defined LED off. This is a function pointer entry point
+ *  called by drivers.
+ **/
+s32 e1000_led_off(struct e1000_hw *hw)
+{
+	if (hw->func.led_off)
+		return hw->func.led_off(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_reset_adaptive - Reset adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the adaptive IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_reset_adaptive(struct e1000_hw *hw)
+{
+	e1000_reset_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_update_adaptive - Update adaptive IFS
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates adapter IFS. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_update_adaptive(struct e1000_hw *hw)
+{
+    return; // TODO
+	e1000_update_adaptive_generic(hw);
+}
+
+/**
+ *  e1000_disable_pcie_master - Disable PCI-Express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests. Currently no func pointer exists and all implementations are
+ *  handled in the generic version of this function.
+ **/
+s32 e1000_disable_pcie_master(struct e1000_hw *hw)
+{
+	return e1000_disable_pcie_master_generic(hw);
+}
+
+/**
+ *  e1000_config_collision_dist - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup.
+ **/
+void e1000_config_collision_dist(struct e1000_hw *hw)
+{
+	if (hw->func.config_collision_dist)
+		hw->func.config_collision_dist(hw);
+}
+
+/**
+ *  e1000_rar_set - Sets a receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: address to set the RAR to
+ *  @index: the RAR to set
+ *
+ *  Sets a Receive Address Register (RAR) to the specified address.
+ **/
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	if (hw->func.rar_set)
+		hw->func.rar_set(hw, addr, index);
+}
+
+/**
+ *  e1000_validate_mdi_setting - Ensures valid MDI/MDIX SW state
+ *  @hw: pointer to the HW structure
+ *
+ *  Ensures that the MDI/MDIX SW state is valid.
+ **/
+s32 e1000_validate_mdi_setting(struct e1000_hw *hw)
+{
+	if (hw->func.validate_mdi_setting)
+		return hw->func.validate_mdi_setting(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_mta_set - Sets multicast table bit
+ *  @hw: pointer to the HW structure
+ *  @hash_value: Multicast hash value.
+ *
+ *  This sets the bit in the multicast table corresponding to the
+ *  hash value.  This is a function pointer entry point called by drivers.
+ **/
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+	if (hw->func.mta_set)
+		hw->func.mta_set(hw, hash_value);
+}
+
+/**
+ *  e1000_hash_mc_addr - Determines address location in multicast table
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: Multicast address to hash.
+ *
+ *  This hashes an address to determine its location in the multicast
+ *  table. Currently no func pointer exists and all implementations
+ *  are handled in the generic version of this function.
+ **/
+u32 e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+	return e1000_hash_mc_addr_generic(hw, mc_addr);
+}
+
+/**
+ *  e1000_enable_tx_pkt_filtering - Enable packet filtering on TX
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw)
+{
+	return e1000_enable_tx_pkt_filtering_generic(hw);
+}
+
+/**
+ *  e1000_mng_host_if_write - Writes to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write(struct e1000_hw * hw, u8 *buffer, u16 length,
+                            u16 offset, u8 *sum)
+{
+	if (hw->func.mng_host_if_write)
+		return hw->func.mng_host_if_write(hw, buffer, length, offset,
+		                                  sum);
+
+	return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_mng_write_cmd_header - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header(struct e1000_hw *hw,
+                               struct e1000_host_mng_command_header *hdr)
+{
+	if (hw->func.mng_write_cmd_header)
+		return hw->func.mng_write_cmd_header(hw, hdr);
+
+	return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_mng_enable_host_if - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operaton
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if(struct e1000_hw * hw)
+{
+	if (hw->func.mng_enable_host_if)
+		return hw->func.mng_enable_host_if(hw);
+
+	return E1000_NOT_IMPLEMENTED;
+}
+
+/**
+ *  e1000_wait_autoneg - Waits for autonegotiation completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for autoneg to complete. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_wait_autoneg(struct e1000_hw *hw)
+{
+	if (hw->func.wait_autoneg)
+		return hw->func.wait_autoneg(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_check_reset_block - Verifies PHY can be reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks if the PHY is in a state that can be reset or if manageability
+ *  has it tied up. This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_check_reset_block(struct e1000_hw *hw)
+{
+	if (hw->func.check_reset_block)
+		return hw->func.check_reset_block(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_phy_reg - Reads PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to read
+ *  @data: the buffer to store the 16-bit read.
+ *
+ *  Reads the PHY register and returns the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	if (hw->func.read_phy_reg)
+		return hw->func.read_phy_reg(hw, offset, data);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_phy_reg - Writes PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	if (hw->func.write_phy_reg)
+		return hw->func.write_phy_reg(hw, offset, data);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_kmrn_reg - Reads register using Kumeran interface
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to read
+ *  @data: the location to store the 16-bit value read.
+ *
+ *  Reads a register out of the Kumeran interface. Currently no func pointer
+ *  exists and all implementations are handled in the generic version of
+ *  this function.
+ **/
+s32 e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	return e1000_read_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ *  e1000_write_kmrn_reg - Writes register using Kumeran interface
+ *  @hw: pointer to the HW structure
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes a register to the Kumeran interface. Currently no func pointer
+ *  exists and all implementations are handled in the generic version of
+ *  this function.
+ **/
+s32 e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	return e1000_write_kmrn_reg_generic(hw, offset, data);
+}
+
+/**
+ *  e1000_get_cable_length - Retrieves cable length estimation
+ *  @hw: pointer to the HW structure
+ *
+ *  This function estimates the cable length and stores them in
+ *  hw->phy.min_length and hw->phy.max_length. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_get_cable_length(struct e1000_hw *hw)
+{
+	if (hw->func.get_cable_length)
+		return hw->func.get_cable_length(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_info - Retrieves PHY information from registers
+ *  @hw: pointer to the HW structure
+ *
+ *  This function gets some information from various PHY registers and
+ *  populates hw->phy values with it. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_get_phy_info(struct e1000_hw *hw)
+{
+	if (hw->func.get_phy_info)
+		return hw->func.get_phy_info(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_hw_reset - Hard PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a hard PHY reset. This is a function pointer entry point called
+ *  by drivers.
+ **/
+s32 e1000_phy_hw_reset(struct e1000_hw *hw)
+{
+	if (hw->func.reset_phy)
+		return hw->func.reset_phy(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_commit - Soft PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs a soft PHY reset on those that apply. This is a function pointer
+ *  entry point called by drivers.
+ **/
+s32 e1000_phy_commit(struct e1000_hw *hw)
+{
+	if (hw->func.commit_phy)
+		return hw->func.commit_phy(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d3_lplu_state - Sets low power link up state for D0
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D0
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D0
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active)
+{
+	if (hw->func.set_d0_lplu_state)
+		return hw->func.set_d0_lplu_state(hw, active);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_d3_lplu_state - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+	if (hw->func.set_d3_lplu_state)
+		return hw->func.set_d3_lplu_state(hw, active);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_read_mac_addr - Reads MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MAC address out of the adapter and stores it in the HW structure.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_mac_addr(struct e1000_hw *hw)
+{
+	if (hw->func.read_mac_addr)
+		return hw->func.read_mac_addr(hw);
+
+	return e1000_read_mac_addr_generic(hw);
+}
+
+/**
+ *  e1000_read_pba_num - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ *  Currently no func pointer exists and all implementations are handled in the
+ *  generic version of this function.
+ **/
+s32 e1000_read_pba_num(struct e1000_hw *hw, u32 *pba_num)
+{
+	return e1000_read_pba_num_generic(hw, pba_num);
+}
+
+/**
+ *  e1000_validate_nvm_checksum - Verifies NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Validates the NVM checksum is correct. This is a function pointer entry
+ *  point called by drivers.
+ **/
+s32 e1000_validate_nvm_checksum(struct e1000_hw *hw)
+{
+	if (hw->func.validate_nvm)
+		return hw->func.validate_nvm(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_update_nvm_checksum - Updates NVM (EEPROM) checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the NVM checksum. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+s32 e1000_update_nvm_checksum(struct e1000_hw *hw)
+{
+	if (hw->func.update_nvm)
+		return hw->func.update_nvm(hw);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_reload_nvm - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+void e1000_reload_nvm(struct e1000_hw *hw)
+{
+	if (hw->func.reload_nvm)
+		hw->func.reload_nvm(hw);
+}
+
+/**
+ *  e1000_read_nvm - Reads NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to read
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Reads 16-bit chunks of data from the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	if (hw->func.read_nvm)
+		return hw->func.read_nvm(hw, offset, words, data);
+
+	return -E1000_ERR_CONFIG;
+}
+
+/**
+ *  e1000_write_nvm - Writes to NVM (EEPROM)
+ *  @hw: pointer to the HW structure
+ *  @offset: the word offset to read
+ *  @words: number of 16-bit words to write
+ *  @data: pointer to the properly sized buffer for the data.
+ *
+ *  Writes 16-bit chunks of data to the NVM (EEPROM). This is a function
+ *  pointer entry point called by drivers.
+ **/
+s32 e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	if (hw->func.write_nvm)
+		return hw->func.write_nvm(hw, offset, words, data);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg - Writes 8bit Control register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset
+ *  @offset: the register to write
+ *  @data: the value to write.
+ *
+ *  Writes the PHY register at offset with the value in data.
+ *  This is a function pointer entry point called by drivers.
+ **/
+s32 e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg, u32 offset,
+                              u8 data)
+{
+	return e1000_write_8bit_ctrl_reg_generic(hw, reg, offset, data);
+}
+
+/**
+ * e1000_power_up_phy - Restores link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_up_phy(struct e1000_hw *hw)
+{
+	if (hw->func.power_up_phy)
+		hw->func.power_up_phy(hw);
+
+	e1000_setup_link(hw);
+}
+
+/**
+ * e1000_power_down_phy - Power down PHY
+ * @hw: pointer to the HW structure
+ *
+ * The phy may be powered down to save power, to turn off link when the
+ * driver is unloaded, or wake on lan is not enabled (among others).
+ **/
+void e1000_power_down_phy(struct e1000_hw *hw)
+{
+	if (hw->func.power_down_phy)
+		hw->func.power_down_phy(hw);
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h
new file mode 100644
index 0000000..4c646c8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_api.h
@@ -0,0 +1,166 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_API_H_
+#define _E1000_API_H_
+
+#include "e1000_hw.h"
+
+extern void    e1000_init_function_pointers_82542(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_82543(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_82540(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_82571(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_82541(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_80003es2lan(struct e1000_hw *hw);
+extern void    e1000_init_function_pointers_ich8lan(struct e1000_hw *hw);
+
+s32  e1000_set_mac_type(struct e1000_hw *hw);
+s32  e1000_setup_init_funcs(struct e1000_hw *hw, bool init_device);
+s32  e1000_init_mac_params(struct e1000_hw *hw);
+s32  e1000_init_nvm_params(struct e1000_hw *hw);
+s32  e1000_init_phy_params(struct e1000_hw *hw);
+void e1000_remove_device(struct e1000_hw *hw);
+s32  e1000_get_bus_info(struct e1000_hw *hw);
+void e1000_clear_vfta(struct e1000_hw *hw);
+void e1000_write_vfta(struct e1000_hw *hw, u32 offset, u32 value);
+s32  e1000_force_mac_fc(struct e1000_hw *hw);
+s32  e1000_check_for_link(struct e1000_hw *hw);
+s32  e1000_reset_hw(struct e1000_hw *hw);
+s32  e1000_init_hw(struct e1000_hw *hw);
+s32  e1000_setup_link(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex(struct e1000_hw *hw, u16 *speed,
+                                u16 *duplex);
+s32  e1000_disable_pcie_master(struct e1000_hw *hw);
+void e1000_config_collision_dist(struct e1000_hw *hw);
+void e1000_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+void e1000_mta_set(struct e1000_hw *hw, u32 hash_value);
+u32  e1000_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr);
+void e1000_update_mc_addr_list(struct e1000_hw *hw,
+                               u8 *mc_addr_list, u32 mc_addr_count,
+                               u32 rar_used_count, u32 rar_count);
+s32  e1000_setup_led(struct e1000_hw *hw);
+s32  e1000_cleanup_led(struct e1000_hw *hw);
+s32  e1000_check_reset_block(struct e1000_hw *hw);
+s32  e1000_blink_led(struct e1000_hw *hw);
+s32  e1000_led_on(struct e1000_hw *hw);
+s32  e1000_led_off(struct e1000_hw *hw);
+void e1000_reset_adaptive(struct e1000_hw *hw);
+void e1000_update_adaptive(struct e1000_hw *hw);
+s32  e1000_get_cable_length(struct e1000_hw *hw);
+s32  e1000_validate_mdi_setting(struct e1000_hw *hw);
+s32  e1000_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+                               u32 offset, u8 data);
+s32  e1000_get_phy_info(struct e1000_hw *hw);
+s32  e1000_phy_hw_reset(struct e1000_hw *hw);
+s32  e1000_phy_commit(struct e1000_hw *hw);
+void e1000_power_up_phy(struct e1000_hw *hw);
+void e1000_power_down_phy(struct e1000_hw *hw);
+s32  e1000_read_mac_addr(struct e1000_hw *hw);
+s32  e1000_read_pba_num(struct e1000_hw *hw, u32 *part_num);
+void e1000_reload_nvm(struct e1000_hw *hw);
+s32  e1000_update_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_validate_nvm_checksum(struct e1000_hw *hw);
+s32  e1000_read_nvm(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_read_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_write_kmrn_reg(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_nvm(struct e1000_hw *hw, u16 offset, u16 words,
+                     u16 *data);
+s32  e1000_wait_autoneg(struct e1000_hw *hw);
+s32  e1000_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32  e1000_set_d0_lplu_state(struct e1000_hw *hw, bool active);
+bool e1000_check_mng_mode(struct e1000_hw *hw);
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering(struct e1000_hw *hw);
+s32  e1000_mng_enable_host_if(struct e1000_hw *hw);
+s32  e1000_mng_host_if_write(struct e1000_hw *hw,
+                             u8 *buffer, u16 length, u16 offset, u8 *sum);
+s32  e1000_mng_write_cmd_header(struct e1000_hw *hw,
+                                struct e1000_host_mng_command_header *hdr);
+s32  e1000_mng_write_dhcp_info(struct e1000_hw * hw,
+                                    u8 *buffer, u16 length);
+void e1000_tbi_adjust_stats_82543(struct e1000_hw *hw,
+                                  struct e1000_hw_stats *stats,
+                                  u32 frame_len, u8 *mac_addr,
+                                  u32 max_frame_size);
+void e1000_set_tbi_compatibility_82543(struct e1000_hw *hw,
+                                       bool state);
+bool e1000_tbi_sbp_enabled_82543(struct e1000_hw *hw);
+u32  e1000_translate_register_82542(u32 reg);
+void e1000_init_script_state_82541(struct e1000_hw *hw, bool state);
+bool e1000_get_laa_state_82571(struct e1000_hw *hw);
+void e1000_set_laa_state_82571(struct e1000_hw *hw, bool state);
+void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+                                                 bool state);
+void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw);
+void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw);
+
+
+/*
+ * TBI_ACCEPT macro definition:
+ *
+ * This macro requires:
+ *      adapter = a pointer to struct e1000_hw
+ *      status = the 8 bit status field of the Rx descriptor with EOP set
+ *      error = the 8 bit error field of the Rx descriptor with EOP set
+ *      length = the sum of all the length fields of the Rx descriptors that
+ *               make up the current frame
+ *      last_byte = the last byte of the frame DMAed by the hardware
+ *      max_frame_length = the maximum frame length we want to accept.
+ *      min_frame_length = the minimum frame length we want to accept.
+ *
+ * This macro is a conditional that should be used in the interrupt
+ * handler's Rx processing routine when RxErrors have been detected.
+ *
+ * Typical use:
+ *  ...
+ *  if (TBI_ACCEPT) {
+ *      accept_frame = TRUE;
+ *      e1000_tbi_adjust_stats(adapter, MacAddress);
+ *      frame_length--;
+ *  } else {
+ *      accept_frame = FALSE;
+ *  }
+ *  ...
+ */
+
+/* The carrier extension symbol, as received by the NIC. */
+#define CARRIER_EXTENSION   0x0F
+
+#define TBI_ACCEPT(a, status, errors, length, last_byte, min_frame_size, max_frame_size) \
+    (e1000_tbi_sbp_enabled_82543(a) && \
+     (((errors) & E1000_RXD_ERR_FRAME_ERR_MASK) == E1000_RXD_ERR_CE) && \
+     ((last_byte) == CARRIER_EXTENSION) && \
+     (((status) & E1000_RXD_STAT_VP) ? \
+          (((length) > (min_frame_size - VLAN_TAG_SIZE)) && \
+           ((length) <= (max_frame_size + 1))) : \
+          (((length) > min_frame_size) && \
+           ((length) <= (max_frame_size + VLAN_TAG_SIZE + 1)))))
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h
new file mode 100644
index 0000000..37f3511
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_defines.h
@@ -0,0 +1,1397 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_APME       0x00000001 /* APM Enable */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+#define E1000_WUC_PME_STATUS 0x00000004 /* PME Status */
+#define E1000_WUC_APMPME     0x00000008 /* Assert PME on APM Wakeup */
+#define E1000_WUC_LSCWE      0x00000010 /* Link Status wake up enable */
+#define E1000_WUC_LSCWO      0x00000020 /* Link Status wake up override */
+#define E1000_WUC_SPM        0x80000000 /* Enable SPM */
+#define E1000_WUC_PHY_WAKE   0x00000100 /* if PHY supports wakeup */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+#define E1000_WUFC_ARP  0x00000020 /* ARP Request Packet Wakeup Enable */
+#define E1000_WUFC_IPV4 0x00000040 /* Directed IPv4 Packet Wakeup Enable */
+#define E1000_WUFC_IPV6 0x00000080 /* Directed IPv6 Packet Wakeup Enable */
+#define E1000_WUFC_IGNORE_TCO   0x00008000 /* Ignore WakeOn TCO packets */
+#define E1000_WUFC_FLX0 0x00010000 /* Flexible Filter 0 Enable */
+#define E1000_WUFC_FLX1 0x00020000 /* Flexible Filter 1 Enable */
+#define E1000_WUFC_FLX2 0x00040000 /* Flexible Filter 2 Enable */
+#define E1000_WUFC_FLX3 0x00080000 /* Flexible Filter 3 Enable */
+#define E1000_WUFC_ALL_FILTERS  0x000F00FF /* Mask for all wakeup filters */
+#define E1000_WUFC_FLX_OFFSET   16 /* Offset to the Flexible Filters bits */
+#define E1000_WUFC_FLX_FILTERS  0x000F0000 /* Mask for the 4 flexible filters */
+
+/* Wake Up Status */
+#define E1000_WUS_LNKC         E1000_WUFC_LNKC
+#define E1000_WUS_MAG          E1000_WUFC_MAG
+#define E1000_WUS_EX           E1000_WUFC_EX
+#define E1000_WUS_MC           E1000_WUFC_MC
+#define E1000_WUS_BC           E1000_WUFC_BC
+#define E1000_WUS_ARP          E1000_WUFC_ARP
+#define E1000_WUS_IPV4         E1000_WUFC_IPV4
+#define E1000_WUS_IPV6         E1000_WUFC_IPV6
+#define E1000_WUS_FLX0         E1000_WUFC_FLX0
+#define E1000_WUS_FLX1         E1000_WUFC_FLX1
+#define E1000_WUS_FLX2         E1000_WUFC_FLX2
+#define E1000_WUS_FLX3         E1000_WUFC_FLX3
+#define E1000_WUS_FLX_FILTERS  E1000_WUFC_FLX_FILTERS
+
+/* Wake Up Packet Length */
+#define E1000_WUPL_LENGTH_MASK 0x0FFF   /* Only the lower 12 bits are valid */
+
+/* Four Flexible Filters are supported */
+#define E1000_FLEXIBLE_FILTER_COUNT_MAX 4
+
+/* Each Flexible Filter is at most 128 (0x80) bytes in length */
+#define E1000_FLEXIBLE_FILTER_SIZE_MAX  128
+
+#define E1000_FFLT_SIZE E1000_FLEXIBLE_FILTER_COUNT_MAX
+#define E1000_FFMT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+#define E1000_FFVT_SIZE E1000_FLEXIBLE_FILTER_SIZE_MAX
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_GPI0_EN   0x00000001 /* Maps SDP4 to GPI0 */
+#define E1000_CTRL_EXT_GPI1_EN   0x00000002 /* Maps SDP5 to GPI1 */
+#define E1000_CTRL_EXT_PHYINT_EN E1000_CTRL_EXT_GPI1_EN
+#define E1000_CTRL_EXT_GPI2_EN   0x00000004 /* Maps SDP6 to GPI2 */
+#define E1000_CTRL_EXT_GPI3_EN   0x00000008 /* Maps SDP7 to GPI3 */
+/* Reserved (bits 4,5) in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DATA 0x00000010 /* Value of SW Defineable Pin 4 */
+#define E1000_CTRL_EXT_SDP5_DATA 0x00000020 /* Value of SW Defineable Pin 5 */
+#define E1000_CTRL_EXT_PHY_INT   E1000_CTRL_EXT_SDP5_DATA
+#define E1000_CTRL_EXT_SDP6_DATA 0x00000040 /* Value of SW Defineable Pin 6 */
+#define E1000_CTRL_EXT_SDP7_DATA 0x00000080 /* Value of SW Defineable Pin 7 */
+/* SDP 4/5 (bits 8,9) are reserved in >= 82575 */
+#define E1000_CTRL_EXT_SDP4_DIR  0x00000100 /* Direction of SDP4 0=in 1=out */
+#define E1000_CTRL_EXT_SDP5_DIR  0x00000200 /* Direction of SDP5 0=in 1=out */
+#define E1000_CTRL_EXT_SDP6_DIR  0x00000400 /* Direction of SDP6 0=in 1=out */
+#define E1000_CTRL_EXT_SDP7_DIR  0x00000800 /* Direction of SDP7 0=in 1=out */
+#define E1000_CTRL_EXT_ASDCHK    0x00001000 /* Initiate an ASD sequence */
+#define E1000_CTRL_EXT_EE_RST    0x00002000 /* Reinitialize from EEPROM */
+#define E1000_CTRL_EXT_IPS       0x00004000 /* Invert Power State */
+#define E1000_CTRL_EXT_SPD_BYPS  0x00008000 /* Speed Select Bypass */
+#define E1000_CTRL_EXT_RO_DIS    0x00020000 /* Relaxed Ordering disable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK 0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_GMII 0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_TBI  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_KMRN    0x00000000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES  0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIX_SERDES  0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII   0x00800000
+#define E1000_CTRL_EXT_EIAME          0x01000000
+#define E1000_CTRL_EXT_IRCA           0x00000001
+#define E1000_CTRL_EXT_WR_WMARK_MASK  0x03000000
+#define E1000_CTRL_EXT_WR_WMARK_256   0x00000000
+#define E1000_CTRL_EXT_WR_WMARK_320   0x01000000
+#define E1000_CTRL_EXT_WR_WMARK_384   0x02000000
+#define E1000_CTRL_EXT_WR_WMARK_448   0x03000000
+#define E1000_CTRL_EXT_CANC           0x04000000 /* Interrupt delay cancellation */
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000 /* Driver loaded bit for FW */
+/* IAME enable bit (27) was removed in >= 82575 */
+#define E1000_CTRL_EXT_IAME           0x08000000 /* Interrupt acknowledge Auto-mask */
+#define E1000_CTRL_EXT_INT_TIMER_CLR  0x20000000 /* Clear Interrupt timers after IMS clear */
+#define E1000_CRTL_EXT_PB_PAREN       0x01000000 /* packet buffer parity error detection enabled */
+#define E1000_CTRL_EXT_DF_PAREN       0x02000000 /* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_GHOST_PAREN    0x40000000
+#define E1000_CTRL_EXT_PBA_CLR        0x80000000 /* PBA Clear */
+#define E1000_I2CCMD_REG_ADDR_SHIFT   16
+#define E1000_I2CCMD_REG_ADDR         0x00FF0000
+#define E1000_I2CCMD_PHY_ADDR_SHIFT   24
+#define E1000_I2CCMD_PHY_ADDR         0x07000000
+#define E1000_I2CCMD_OPCODE_READ      0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE     0x00000000
+#define E1000_I2CCMD_RESET            0x10000000
+#define E1000_I2CCMD_READY            0x20000000
+#define E1000_I2CCMD_INTERRUPT_ENA    0x40000000
+#define E1000_I2CCMD_ERROR            0x80000000
+#define E1000_MAX_SGMII_PHY_REG_ADDR  255
+#define E1000_I2CCMD_PHY_TIMEOUT      200
+
+/* Receive Decriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum caculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_IPCS     0x40    /* IP xsum calculated */
+#define E1000_RXD_STAT_PIF      0x80    /* passed in-exact filter */
+#define E1000_RXD_STAT_CRCV     0x100   /* Speculative CRC Valid */
+#define E1000_RXD_STAT_IPIDV    0x200   /* IP identification valid */
+#define E1000_RXD_STAT_UDPV     0x400   /* Valid UDP checksum */
+#define E1000_RXD_STAT_DYNINT   0x800   /* Pkt caused INT via DYNINT */
+#define E1000_RXD_STAT_ACK      0x8000  /* ACK Packet indication */
+#define E1000_RXD_ERR_CE        0x01    /* CRC Error */
+#define E1000_RXD_ERR_SE        0x02    /* Symbol Error */
+#define E1000_RXD_ERR_SEQ       0x04    /* Sequence Error */
+#define E1000_RXD_ERR_CXE       0x10    /* Carrier Extension Error */
+#define E1000_RXD_ERR_TCPE      0x20    /* TCP/UDP Checksum Error */
+#define E1000_RXD_ERR_IPE       0x40    /* IP Checksum Error */
+#define E1000_RXD_ERR_RXE       0x80    /* Rx Data Error */
+#define E1000_RXD_SPC_VLAN_MASK 0x0FFF  /* VLAN ID is in lower 12 bits */
+#define E1000_RXD_SPC_PRI_MASK  0xE000  /* Priority is in upper 3 bits */
+#define E1000_RXD_SPC_PRI_SHIFT 13
+#define E1000_RXD_SPC_CFI_MASK  0x1000  /* CFI is bit 12 */
+#define E1000_RXD_SPC_CFI_SHIFT 12
+
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+/* mask to determine if packets should be dropped due to frame errors */
+#define E1000_RXD_ERR_FRAME_ERR_MASK ( \
+    E1000_RXD_ERR_CE  |                \
+    E1000_RXD_ERR_SE  |                \
+    E1000_RXD_ERR_SEQ |                \
+    E1000_RXD_ERR_CXE |                \
+    E1000_RXD_ERR_RXE)
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+    E1000_RXDEXT_STATERR_CE  |            \
+    E1000_RXDEXT_STATERR_SE  |            \
+    E1000_RXDEXT_STATERR_SEQ |            \
+    E1000_RXDEXT_STATERR_CXE |            \
+    E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_ENABLE_MASK                 0x00000007
+#define E1000_MRQC_ENABLE_RSS_2Q               0x00000001
+#define E1000_MRQC_ENABLE_RSS_INT              0x00000004
+#define E1000_MRQC_RSS_FIELD_MASK              0xFFFF0000
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX       0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6_EX           0x00080000
+#define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
+
+#define E1000_RXDPS_HDRSTAT_HDRSP              0x00008000
+#define E1000_RXDPS_HDRSTAT_HDRLEN_MASK        0x000003FF
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_R_ON_FORCE    0x00000004 /* Reset on Force TCO - RO */
+#define E1000_MANC_RMCP_EN       0x00000100 /* Enable RCMP 026Fh Filtering */
+#define E1000_MANC_0298_EN       0x00000200 /* Enable RCMP 0298h Filtering */
+#define E1000_MANC_IPV4_EN       0x00000400 /* Enable IPv4 */
+#define E1000_MANC_IPV6_EN       0x00000800 /* Enable IPv6 */
+#define E1000_MANC_SNAP_EN       0x00001000 /* Accept LLC/SNAP */
+#define E1000_MANC_ARP_EN        0x00002000 /* Enable ARP Request Filtering */
+/* Enable Neighbor Discovery Filtering */
+#define E1000_MANC_NEIGHBOR_EN   0x00004000
+#define E1000_MANC_ARP_RES_EN    0x00008000 /* Enable ARP response Filtering */
+#define E1000_MANC_TCO_RESET     0x00010000 /* TCO Reset Occurred */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_REPORT_STATUS 0x00040000 /* Status Reporting Enabled */
+#define E1000_MANC_RCV_ALL       0x00080000 /* Receive All Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
+/* Enable MNG packets to host memory */
+#define E1000_MANC_EN_MNG2HOST   0x00200000
+/* Enable IP address filtering */
+#define E1000_MANC_EN_IP_ADDR_FILTER    0x00400000
+#define E1000_MANC_EN_XSUM_FILTER   0x00800000 /* Enable checksum filtering */
+#define E1000_MANC_BR_EN            0x01000000 /* Enable broadcast filtering */
+#define E1000_MANC_SMB_REQ       0x01000000 /* SMBus Request */
+#define E1000_MANC_SMB_GNT       0x02000000 /* SMBus Grant */
+#define E1000_MANC_SMB_CLK_IN    0x04000000 /* SMBus Clock In */
+#define E1000_MANC_SMB_DATA_IN   0x08000000 /* SMBus Data In */
+#define E1000_MANC_SMB_DATA_OUT  0x10000000 /* SMBus Data Out */
+#define E1000_MANC_SMB_CLK_OUT   0x20000000 /* SMBus Clock Out */
+
+#define E1000_MANC_SMB_DATA_OUT_SHIFT  28 /* SMBus Data Out Shift */
+#define E1000_MANC_SMB_CLK_OUT_SHIFT   29 /* SMBus Clock Out Shift */
+
+/* Receive Control */
+#define E1000_RCTL_RST            0x00000001    /* Software reset */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_NO         0x00000000    /* no loopback mode */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_SLP        0x00000080    /* serial link loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_DTYP_MASK      0x00000C00    /* Descriptor type mask */
+#define E1000_RCTL_DTYP_PS        0x00000400    /* Packet Split descriptor */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_QUAT     0x00000100    /* rx desc min threshold size */
+#define E1000_RCTL_RDMTS_EIGTH    0x00000200    /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_MO_0           0x00000000    /* multicast offset 11:0 */
+#define E1000_RCTL_MO_1           0x00001000    /* multicast offset 12:1 */
+#define E1000_RCTL_MO_2           0x00002000    /* multicast offset 13:2 */
+#define E1000_RCTL_MO_3           0x00003000    /* multicast offset 15:4 */
+#define E1000_RCTL_MDR            0x00004000    /* multicast desc ring 0 */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 0 */
+#define E1000_RCTL_SZ_2048        0x00000000    /* rx buffer size 2048 */
+#define E1000_RCTL_SZ_1024        0x00010000    /* rx buffer size 1024 */
+#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+/* these buffer sizes are valid if E1000_RCTL_BSEX is 1 */
+#define E1000_RCTL_SZ_16384       0x00010000    /* rx buffer size 16384 */
+#define E1000_RCTL_SZ_8192        0x00020000    /* rx buffer size 8192 */
+#define E1000_RCTL_SZ_4096        0x00030000    /* rx buffer size 4096 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_CFI            0x00100000    /* canonical form indicator */
+#define E1000_RCTL_DPF            0x00400000    /* discard pause frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_BSEX           0x02000000    /* Buffer size extension */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+#define E1000_RCTL_FLXBUF_MASK    0x78000000    /* Flexible buffer size */
+#define E1000_RCTL_FLXBUF_SHIFT   27            /* Flexible buffer shift */
+
+/*
+ * Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM   0x1
+#define E1000_SWFW_PHY0_SM  0x2
+#define E1000_SWFW_PHY1_SM  0x4
+#define E1000_SWFW_CSR_SM   0x8
+
+/* FACTPS Definitions */
+#define E1000_FACTPS_LFS    0x40000000  /* LAN Function Select */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_BEM      0x00000002  /* Endian Mode.0=little,1=big */
+#define E1000_CTRL_PRIOR    0x00000004  /* Priority on PCI. 0=rx,1=fair */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_TME      0x00000010  /* Test mode. 0=normal,1=test */
+#define E1000_CTRL_SLE      0x00000020  /* Serial Link on 0=dis,1=en */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_10   0x00000000  /* Force 10Mb */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_BEM32    0x00000400  /* Big Endian 32 mode */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+#define E1000_CTRL_D_UD_EN  0x00002000  /* Dock/Undock enable */
+#define E1000_CTRL_D_UD_POLARITY 0x00004000 /* Defined polarity of Dock/Undock indication in SDP[0] */
+#define E1000_CTRL_FORCE_PHY_RESET 0x00008000 /* Reset both PHY ports, through PHYRST_N pin */
+#define E1000_CTRL_EXT_LINK_EN 0x00010000 /* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SWDPIN2  0x00100000  /* SWDPIN 2 value */
+#define E1000_CTRL_SWDPIN3  0x00200000  /* SWDPIN 3 value */
+#define E1000_CTRL_SWDPIO0  0x00400000  /* SWDPIN 0 Input or output */
+#define E1000_CTRL_SWDPIO1  0x00800000  /* SWDPIN 1 input or output */
+#define E1000_CTRL_SWDPIO2  0x01000000  /* SWDPIN 2 input or output */
+#define E1000_CTRL_SWDPIO3  0x02000000  /* SWDPIN 3 input or output */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_RTE      0x20000000  /* Routing tag enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+#define E1000_CTRL_SW2FW_INT 0x02000000  /* Initiate an interrupt to manageability engine */
+#define E1000_CTRL_I2C_ENA  0x02000000  /* I2C enable */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+#define E1000_CTRL_PHY_RESET_DIR  E1000_CTRL_SWDPIO0
+#define E1000_CTRL_PHY_RESET      E1000_CTRL_SWDPIN0
+#define E1000_CTRL_MDIO_DIR       E1000_CTRL_SWDPIO2
+#define E1000_CTRL_MDIO           E1000_CTRL_SWDPIN2
+#define E1000_CTRL_MDC_DIR        E1000_CTRL_SWDPIO3
+#define E1000_CTRL_MDC            E1000_CTRL_SWDPIN3
+#define E1000_CTRL_PHY_RESET_DIR4 E1000_CTRL_EXT_SDP4_DIR
+#define E1000_CTRL_PHY_RESET4     E1000_CTRL_EXT_SDP4_DATA
+
+#define E1000_CONNSW_ENRGSRC             0x4
+#define E1000_PCS_LCTL_FLV_LINK_UP       1
+#define E1000_PCS_LCTL_FSV_10            0
+#define E1000_PCS_LCTL_FSV_100           2
+#define E1000_PCS_LCTL_FSV_1000          4
+#define E1000_PCS_LCTL_FDV_FULL          8
+#define E1000_PCS_LCTL_FSD               0x10
+#define E1000_PCS_LCTL_FORCE_LINK        0x20
+#define E1000_PCS_LCTL_LOW_LINK_LATCH    0x40
+#define E1000_PCS_LCTL_AN_ENABLE         0x10000
+#define E1000_PCS_LCTL_AN_RESTART        0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT        0x40000
+#define E1000_PCS_LCTL_AN_SGMII_BYPASS   0x80000
+#define E1000_PCS_LCTL_AN_SGMII_TRIGGER  0x100000
+#define E1000_PCS_LCTL_FAST_LINK_TIMER   0x1000000
+#define E1000_PCS_LCTL_LINK_OK_FIX       0x2000000
+#define E1000_PCS_LCTL_CRS_ON_NI         0x4000000
+#define E1000_ENABLE_SERDES_LOOPBACK     0x0410
+
+#define E1000_PCS_LSTS_LINK_OK           1
+#define E1000_PCS_LSTS_SPEED_10          0
+#define E1000_PCS_LSTS_SPEED_100         2
+#define E1000_PCS_LSTS_SPEED_1000        4
+#define E1000_PCS_LSTS_DUPLEX_FULL       8
+#define E1000_PCS_LSTS_SYNK_OK           0x10
+#define E1000_PCS_LSTS_AN_COMPLETE       0x10000
+#define E1000_PCS_LSTS_AN_PAGE_RX        0x20000
+#define E1000_PCS_LSTS_AN_TIMED_OUT      0x40000
+#define E1000_PCS_LSTS_AN_REMOTE_FAULT   0x80000
+#define E1000_PCS_LSTS_AN_ERROR_RWS      0x100000
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_0     0x00000000      /* Function 0 */
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_TBIMODE    0x00000020      /* TBI mode */
+#define E1000_STATUS_SPEED_MASK 0x000000C0
+#define E1000_STATUS_SPEED_10   0x00000000      /* Speed 10Mb/s */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+#define E1000_STATUS_LAN_INIT_DONE 0x00000200   /* Lan Init Completion by NVM */
+#define E1000_STATUS_ASDV       0x00000300      /* Auto speed detect value */
+#define E1000_STATUS_DOCK_CI    0x00000800      /* Change in Dock/Undock state. Clear on write '0'. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000 /* Status of Master requests. */
+#define E1000_STATUS_MTXCKOK    0x00000400      /* MTX clock running OK */
+#define E1000_STATUS_PCI66      0x00000800      /* In 66Mhz slot */
+#define E1000_STATUS_BUS64      0x00001000      /* In 64 bit slot */
+#define E1000_STATUS_PCIX_MODE  0x00002000      /* PCI-X mode */
+#define E1000_STATUS_PCIX_SPEED 0x0000C000      /* PCI-X bus speed */
+#define E1000_STATUS_BMC_SKU_0  0x00100000 /* BMC USB redirect disabled */
+#define E1000_STATUS_BMC_SKU_1  0x00200000 /* BMC SRAM disabled */
+#define E1000_STATUS_BMC_SKU_2  0x00400000 /* BMC SDRAM disabled */
+#define E1000_STATUS_BMC_CRYPTO 0x00800000 /* BMC crypto disabled */
+#define E1000_STATUS_BMC_LITE   0x01000000 /* BMC external code execution disabled */
+#define E1000_STATUS_RGMII_ENABLE 0x02000000 /* RGMII disabled */
+#define E1000_STATUS_FUSE_8       0x04000000
+#define E1000_STATUS_FUSE_9       0x08000000
+#define E1000_STATUS_SERDES0_DIS  0x10000000 /* SERDES disabled on port 0 */
+#define E1000_STATUS_SERDES1_DIS  0x20000000 /* SERDES disabled on port 1 */
+
+/* Constants used to intrepret the masked PCI-X bus speed. */
+#define E1000_STATUS_PCIX_SPEED_66  0x00000000 /* PCI-X bus speed  50-66 MHz */
+#define E1000_STATUS_PCIX_SPEED_100 0x00004000 /* PCI-X bus speed  66-100 MHz */
+#define E1000_STATUS_PCIX_SPEED_133 0x00008000 /* PCI-X bus speed 100-133 MHz */
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+#define PHY_FORCE_TIME   20
+
+#define ADVERTISE_10_HALF                 0x0001
+#define ADVERTISE_10_FULL                 0x0002
+#define ADVERTISE_100_HALF                0x0004
+#define ADVERTISE_100_FULL                0x0008
+#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL               0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG      ( ADVERTISE_10_HALF |   ADVERTISE_10_FULL | \
+                                ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED      (ADVERTISE_10_HALF |   ADVERTISE_10_FULL)
+#define E1000_ALL_FULL_DUPLEX   (ADVERTISE_10_FULL |  ADVERTISE_100_FULL | \
+                                                     ADVERTISE_1000_FULL)
+#define E1000_ALL_HALF_DUPLEX   (ADVERTISE_10_HALF |  ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_MASK       0x0000000F
+#define E1000_LEDCTL_LED0_MODE_SHIFT      0
+#define E1000_LEDCTL_LED0_BLINK_RATE      0x00000020
+#define E1000_LEDCTL_LED0_IVRT            0x00000040
+#define E1000_LEDCTL_LED0_BLINK           0x00000080
+#define E1000_LEDCTL_LED1_MODE_MASK       0x00000F00
+#define E1000_LEDCTL_LED1_MODE_SHIFT      8
+#define E1000_LEDCTL_LED1_BLINK_RATE      0x00002000
+#define E1000_LEDCTL_LED1_IVRT            0x00004000
+#define E1000_LEDCTL_LED1_BLINK           0x00008000
+#define E1000_LEDCTL_LED2_MODE_MASK       0x000F0000
+#define E1000_LEDCTL_LED2_MODE_SHIFT      16
+#define E1000_LEDCTL_LED2_BLINK_RATE      0x00200000
+#define E1000_LEDCTL_LED2_IVRT            0x00400000
+#define E1000_LEDCTL_LED2_BLINK           0x00800000
+#define E1000_LEDCTL_LED3_MODE_MASK       0x0F000000
+#define E1000_LEDCTL_LED3_MODE_SHIFT      24
+#define E1000_LEDCTL_LED3_BLINK_RATE      0x20000000
+#define E1000_LEDCTL_LED3_IVRT            0x40000000
+#define E1000_LEDCTL_LED3_BLINK           0x80000000
+
+#define E1000_LEDCTL_MODE_LINK_10_1000  0x0
+#define E1000_LEDCTL_MODE_LINK_100_1000 0x1
+#define E1000_LEDCTL_MODE_LINK_UP       0x2
+#define E1000_LEDCTL_MODE_ACTIVITY      0x3
+#define E1000_LEDCTL_MODE_LINK_ACTIVITY 0x4
+#define E1000_LEDCTL_MODE_LINK_10       0x5
+#define E1000_LEDCTL_MODE_LINK_100      0x6
+#define E1000_LEDCTL_MODE_LINK_1000     0x7
+#define E1000_LEDCTL_MODE_PCIX_MODE     0x8
+#define E1000_LEDCTL_MODE_FULL_DUPLEX   0x9
+#define E1000_LEDCTL_MODE_COLLISION     0xA
+#define E1000_LEDCTL_MODE_BUS_SPEED     0xB
+#define E1000_LEDCTL_MODE_BUS_SIZE      0xC
+#define E1000_LEDCTL_MODE_PAUSED        0xD
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_DTYP_D     0x00100000 /* Data Descriptor */
+#define E1000_TXD_DTYP_C     0x00000000 /* Context Descriptor */
+#define E1000_TXD_POPTS_SHIFT 8         /* POPTS shift */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_IC     0x04000000 /* Insert Checksum */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_RPS    0x10000000 /* Report Packet Sent */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_CMD_VLE    0x40000000 /* Add VLAN tag */
+#define E1000_TXD_CMD_IDE    0x80000000 /* Enable Tidv register */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+#define E1000_TXD_STAT_EC    0x00000002 /* Excess Collisions */
+#define E1000_TXD_STAT_LC    0x00000004 /* Late Collisions */
+#define E1000_TXD_STAT_TU    0x00000008 /* Transmit underrun */
+#define E1000_TXD_CMD_TCP    0x01000000 /* TCP packet */
+#define E1000_TXD_CMD_IP     0x02000000 /* IP packet */
+#define E1000_TXD_CMD_TSE    0x04000000 /* TCP Seg enable */
+#define E1000_TXD_STAT_TC    0x00000004 /* Tx Underrun */
+/* Extended desc bits for Linksec and timesync */
+
+/* Transmit Control */
+#define E1000_TCTL_RST    0x00000001    /* software reset */
+#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_BCE    0x00000004    /* busy check enable */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_SWXOFF 0x00400000    /* SW Xoff transmission */
+#define E1000_TCTL_PBE    0x00800000    /* Packet Burst Enable */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+#define E1000_TCTL_NRTU   0x02000000    /* No Re-transmit on underrun */
+#define E1000_TCTL_MULR   0x10000000    /* Multiple request support */
+
+/* Transmit Arbitration Count */
+#define E1000_TARC0_ENABLE     0x00000400   /* Enable Tx Queue 0 */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_PCSS_MASK 0x000000FF   /* Packet Checksum Start */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_IPV6OFL   0x00000400   /* IPv6 checksum offload */
+#define E1000_RXCSUM_CRCOFL    0x00000800   /* CRC32 offload enable */
+#define E1000_RXCSUM_IPPCSE    0x00001000   /* IP payload checksum enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_ISCSI_DIS           0x00000001
+#define E1000_RFCTL_ISCSI_DWC_MASK      0x0000003E
+#define E1000_RFCTL_ISCSI_DWC_SHIFT     1
+#define E1000_RFCTL_NFSW_DIS            0x00000040
+#define E1000_RFCTL_NFSR_DIS            0x00000080
+#define E1000_RFCTL_NFS_VER_MASK        0x00000300
+#define E1000_RFCTL_NFS_VER_SHIFT       8
+#define E1000_RFCTL_IPV6_DIS            0x00000400
+#define E1000_RFCTL_IPV6_XSUM_DIS       0x00000800
+#define E1000_RFCTL_ACK_DIS             0x00001000
+#define E1000_RFCTL_ACKD_DIS            0x00002000
+#define E1000_RFCTL_IPFRSP_DIS          0x00004000
+#define E1000_RFCTL_EXTEN               0x00008000
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_NEW_IPV6_EXT_DIS    0x00020000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLD_SHIFT                12
+
+/* Default values for the transmit IPG register */
+#define DEFAULT_82542_TIPG_IPGT        10
+#define DEFAULT_82543_TIPG_IPGT_FIBER  9
+#define DEFAULT_82543_TIPG_IPGT_COPPER 8
+
+#define E1000_TIPG_IPGT_MASK  0x000003FF
+#define E1000_TIPG_IPGR1_MASK 0x000FFC00
+#define E1000_TIPG_IPGR2_MASK 0x3FF00000
+
+#define DEFAULT_82542_TIPG_IPGR1 2
+#define DEFAULT_82543_TIPG_IPGR1 8
+#define E1000_TIPG_IPGR1_SHIFT  10
+
+#define DEFAULT_82542_TIPG_IPGR2 10
+#define DEFAULT_82543_TIPG_IPGR2 6
+#define DEFAULT_80003ES2LAN_TIPG_IPGR2 7
+#define E1000_TIPG_IPGR2_SHIFT  20
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+
+#define ETHERNET_FCS_SIZE       4
+#define MAX_JUMBO_FRAME_SIZE    0x3F00
+
+/* Extended Configuration Control and Size */
+#define E1000_EXTCNF_CTRL_MDIO_SW_OWNERSHIP      0x00000020
+#define E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE       0x00000001
+#define E1000_EXTCNF_CTRL_SWFLAG                 0x00000020
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK   0x00FF0000
+#define E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT          16
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK   0x0FFF0000
+#define E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT          16
+
+#define E1000_PHY_CTRL_SPD_EN             0x00000001
+#define E1000_PHY_CTRL_D0A_LPLU           0x00000002
+#define E1000_PHY_CTRL_NOND0A_LPLU        0x00000004
+#define E1000_PHY_CTRL_NOND0A_GBE_DISABLE 0x00000008
+#define E1000_PHY_CTRL_GBE_DISABLE        0x00000040
+
+#define E1000_KABGTXD_BGSQLBIAS           0x00050000
+
+/* PBA constants */
+#define E1000_PBA_8K  0x0008    /* 8KB */
+#define E1000_PBA_12K 0x000C    /* 12KB */
+#define E1000_PBA_16K 0x0010    /* 16KB */
+#define E1000_PBA_20K 0x0014
+#define E1000_PBA_22K 0x0016
+#define E1000_PBA_24K 0x0018
+#define E1000_PBA_30K 0x001E
+#define E1000_PBA_32K 0x0020
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_38K 0x0026
+#define E1000_PBA_40K 0x0028
+#define E1000_PBA_48K 0x0030    /* 48KB */
+#define E1000_PBA_64K 0x0040    /* 64KB */
+
+#define E1000_PBS_16K E1000_PBA_16K
+#define E1000_PBS_24K E1000_PBA_24K
+
+#define IFS_MAX       80
+#define IFS_MIN       40
+#define IFS_RATIO     4
+#define IFS_STEP      10
+#define MIN_NUM_XMITS 1000
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+#define E1000_SWSM_WMNG         0x00000004 /* Wake MNG Clock */
+#define E1000_SWSM_DRV_LOAD     0x00000008 /* Driver Loaded Bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_TXQE          0x00000002 /* Transmit Queue empty */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXO           0x00000040 /* rx overrun */
+#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_MDAC          0x00000200 /* MDIO access complete */
+#define E1000_ICR_RXCFG         0x00000400 /* Rx /c/ ordered set */
+#define E1000_ICR_GPI_EN0       0x00000800 /* GP Int 0 */
+#define E1000_ICR_GPI_EN1       0x00001000 /* GP Int 1 */
+#define E1000_ICR_GPI_EN2       0x00002000 /* GP Int 2 */
+#define E1000_ICR_GPI_EN3       0x00004000 /* GP Int 3 */
+#define E1000_ICR_TXD_LOW       0x00008000
+#define E1000_ICR_SRPD          0x00010000
+#define E1000_ICR_ACK           0x00020000 /* Receive Ack frame */
+#define E1000_ICR_MNG           0x00040000 /* Manageability event */
+#define E1000_ICR_DOCK          0x00080000 /* Dock/Undock */
+#define E1000_ICR_INT_ASSERTED  0x80000000 /* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_RXD_FIFO_PAR0 0x00100000 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR0 0x00200000 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICR_HOST_ARB_PAR  0x00400000 /* host arb read buffer parity error */
+#define E1000_ICR_PB_PAR        0x00800000 /* packet buffer parity error */
+#define E1000_ICR_RXD_FIFO_PAR1 0x01000000 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICR_TXD_FIFO_PAR1 0x02000000 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICR_ALL_PARITY    0x03F00000 /* all parity error bits */
+#define E1000_ICR_DSW           0x00000020 /* FW changed the status of DISSW bit in the FWSM */
+#define E1000_ICR_PHYINT        0x00001000 /* LAN connected device generates an interrupt */
+#define E1000_ICR_EPRST         0x00100000 /* ME handware reset occurs */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0    0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1    0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2    0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3    0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0    0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1    0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2    0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3    0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_TCP_TIMER    0x40000000 /* TCP Timer */
+#define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+#define E1000_TCPTIMER_KS       0x00000100 /* KickStart */
+#define E1000_TCPTIMER_COUNT_ENABLE       0x00000200 /* Count Enable */
+#define E1000_TCPTIMER_COUNT_FINISH       0x00000400 /* Count finish */
+#define E1000_TCPTIMER_LOOP     0x00000800 /* Loop */
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ */
+#define POLL_IMS_ENABLE_MASK ( \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ)
+
+/*
+ * This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+    E1000_IMS_RXT0   |    \
+    E1000_IMS_TXDW   |    \
+    E1000_IMS_RXDMT0 |    \
+    E1000_IMS_RXSEQ  |    \
+    E1000_IMS_LSC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_IMS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
+#define E1000_IMS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_IMS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_IMS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_IMS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_IMS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_IMS_SRPD      E1000_ICR_SRPD
+#define E1000_IMS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_IMS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_IMS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_IMS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_IMS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_IMS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_IMS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_IMS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_IMS_DSW       E1000_ICR_DSW
+#define E1000_IMS_PHYINT    E1000_ICR_PHYINT
+#define E1000_IMS_EPRST     E1000_ICR_EPRST
+
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EIMS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EIMS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EIMS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EIMS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EIMS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EIMS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EIMS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EIMS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EIMS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_ICS_TXQE      E1000_ICR_TXQE      /* Transmit Queue empty */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_RXO       E1000_ICR_RXO       /* rx overrun */
+#define E1000_ICS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_ICS_MDAC      E1000_ICR_MDAC      /* MDIO access complete */
+#define E1000_ICS_RXCFG     E1000_ICR_RXCFG     /* Rx /c/ ordered set */
+#define E1000_ICS_GPI_EN0   E1000_ICR_GPI_EN0   /* GP Int 0 */
+#define E1000_ICS_GPI_EN1   E1000_ICR_GPI_EN1   /* GP Int 1 */
+#define E1000_ICS_GPI_EN2   E1000_ICR_GPI_EN2   /* GP Int 2 */
+#define E1000_ICS_GPI_EN3   E1000_ICR_GPI_EN3   /* GP Int 3 */
+#define E1000_ICS_TXD_LOW   E1000_ICR_TXD_LOW
+#define E1000_ICS_SRPD      E1000_ICR_SRPD
+#define E1000_ICS_ACK       E1000_ICR_ACK       /* Receive Ack frame */
+#define E1000_ICS_MNG       E1000_ICR_MNG       /* Manageability event */
+#define E1000_ICS_DOCK      E1000_ICR_DOCK      /* Dock/Undock */
+#define E1000_ICS_RXD_FIFO_PAR0 E1000_ICR_RXD_FIFO_PAR0 /* queue 0 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR0 E1000_ICR_TXD_FIFO_PAR0 /* queue 0 Tx descriptor FIFO parity error */
+#define E1000_ICS_HOST_ARB_PAR  E1000_ICR_HOST_ARB_PAR  /* host arb read buffer parity error */
+#define E1000_ICS_PB_PAR        E1000_ICR_PB_PAR        /* packet buffer parity error */
+#define E1000_ICS_RXD_FIFO_PAR1 E1000_ICR_RXD_FIFO_PAR1 /* queue 1 Rx descriptor FIFO parity error */
+#define E1000_ICS_TXD_FIFO_PAR1 E1000_ICR_TXD_FIFO_PAR1 /* queue 1 Tx descriptor FIFO parity error */
+#define E1000_ICS_DSW       E1000_ICR_DSW
+#define E1000_ICS_PHYINT    E1000_ICR_PHYINT
+#define E1000_ICS_EPRST     E1000_ICR_EPRST
+
+/* Extended Interrupt Cause Set */
+#define E1000_EICS_RX_QUEUE0    E1000_EICR_RX_QUEUE0 /* Rx Queue 0 Interrupt */
+#define E1000_EICS_RX_QUEUE1    E1000_EICR_RX_QUEUE1 /* Rx Queue 1 Interrupt */
+#define E1000_EICS_RX_QUEUE2    E1000_EICR_RX_QUEUE2 /* Rx Queue 2 Interrupt */
+#define E1000_EICS_RX_QUEUE3    E1000_EICR_RX_QUEUE3 /* Rx Queue 3 Interrupt */
+#define E1000_EICS_TX_QUEUE0    E1000_EICR_TX_QUEUE0 /* Tx Queue 0 Interrupt */
+#define E1000_EICS_TX_QUEUE1    E1000_EICR_TX_QUEUE1 /* Tx Queue 1 Interrupt */
+#define E1000_EICS_TX_QUEUE2    E1000_EICR_TX_QUEUE2 /* Tx Queue 2 Interrupt */
+#define E1000_EICS_TX_QUEUE3    E1000_EICR_TX_QUEUE3 /* Tx Queue 3 Interrupt */
+#define E1000_EICS_TCP_TIMER    E1000_EICR_TCP_TIMER /* TCP Timer */
+#define E1000_EICS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Transmit Descriptor Control */
+#define E1000_TXDCTL_PTHRESH 0x0000003F /* TXDCTL Prefetch Threshold */
+#define E1000_TXDCTL_HTHRESH 0x00003F00 /* TXDCTL Host Threshold */
+#define E1000_TXDCTL_WTHRESH 0x003F0000 /* TXDCTL Writeback Threshold */
+#define E1000_TXDCTL_GRAN    0x01000000 /* TXDCTL Granularity */
+#define E1000_TXDCTL_LWTHRESH 0xFE000000 /* TXDCTL Low Threshold */
+#define E1000_TXDCTL_FULL_TX_DESC_WB 0x01010000 /* GRAN=1, WTHRESH=1 */
+#define E1000_TXDCTL_MAX_TX_DESC_PREFETCH 0x0100001F /* GRAN=1, PTHRESH=31 */
+/* Enable the counting of descriptors still to be processed. */
+#define E1000_TXDCTL_COUNT_DESC 0x00400000
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/*
+ * Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots.  However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAR_ENTRIES     15
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+
+/* Error Codes */
+#define E1000_SUCCESS      0
+#define E1000_ERR_NVM      1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_PHY_TYPE 6
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_ERR_HOST_INTERFACE_COMMAND 11
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define FIBER_LINK_UP_LIMIT               50
+#define COPPER_LINK_UP_LIMIT              10
+#define PHY_AUTO_NEG_LIMIT                45
+#define PHY_FORCE_LIMIT                   20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+#define MDIO_OWNERSHIP_TIMEOUT      10
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT      10
+
+/* Flow Control */
+#define E1000_FCRTH_RTH  0x0000FFF8     /* Mask Bits[15:3] for RTH */
+#define E1000_FCRTH_XFCE 0x80000000     /* External Flow Control Enable */
+#define E1000_FCRTL_RTL  0x0000FFF8     /* Mask Bits[15:3] for RTL */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+/* Transmit Configuration Word */
+#define E1000_TXCW_FD         0x00000020        /* TXCW full duplex */
+#define E1000_TXCW_HD         0x00000040        /* TXCW half duplex */
+#define E1000_TXCW_PAUSE      0x00000080        /* TXCW sym pause request */
+#define E1000_TXCW_ASM_DIR    0x00000100        /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE_MASK 0x00000180        /* TXCW pause request mask */
+#define E1000_TXCW_RF         0x00003000        /* TXCW remote fault */
+#define E1000_TXCW_NP         0x00008000        /* TXCW next page */
+#define E1000_TXCW_CW         0x0000ffff        /* TxConfigWord mask */
+#define E1000_TXCW_TXC        0x40000000        /* Transmit Config control */
+#define E1000_TXCW_ANE        0x80000000        /* Auto-neg enable */
+
+/* Receive Configuration Word */
+#define E1000_RXCW_CW         0x0000ffff        /* RxConfigWord mask */
+#define E1000_RXCW_NC         0x04000000        /* Receive config no carrier */
+#define E1000_RXCW_IV         0x08000000        /* Receive config invalid */
+#define E1000_RXCW_CC         0x10000000        /* Receive config change */
+#define E1000_RXCW_C          0x20000000        /* Receive config */
+#define E1000_RXCW_SYNCH      0x40000000        /* Receive config synch */
+#define E1000_RXCW_ANC        0x80000000        /* Auto-neg complete */
+
+/* PCI Express Control */
+#define E1000_GCR_RXD_NO_SNOOP          0x00000001
+#define E1000_GCR_RXDSCW_NO_SNOOP       0x00000002
+#define E1000_GCR_RXDSCR_NO_SNOOP       0x00000004
+#define E1000_GCR_TXD_NO_SNOOP          0x00000008
+#define E1000_GCR_TXDSCW_NO_SNOOP       0x00000010
+#define E1000_GCR_TXDSCR_NO_SNOOP       0x00000020
+
+#define PCIE_NO_SNOOP_ALL (E1000_GCR_RXD_NO_SNOOP         | \
+                           E1000_GCR_RXDSCW_NO_SNOOP      | \
+                           E1000_GCR_RXDSCR_NO_SNOOP      | \
+                           E1000_GCR_TXD_NO_SNOOP         | \
+                           E1000_GCR_TXDSCW_NO_SNOOP      | \
+                           E1000_GCR_TXDSCR_NO_SNOOP)
+
+/* PHY Control Register */
+#define MII_CR_SPEED_SELECT_MSB 0x0040  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_COLL_TEST_ENABLE 0x0080  /* Collision test enable */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_ISOLATE          0x0400  /* Isolate PHY from MII */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_SPEED_SELECT_LSB 0x2000  /* bits 6,13: 10=1000, 01=100, 00=10 */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000       0x0040
+#define MII_CR_SPEED_100        0x2000
+#define MII_CR_SPEED_10         0x0000
+
+/* PHY Status Register */
+#define MII_SR_EXTENDED_CAPS     0x0001 /* Extended register capabilities */
+#define MII_SR_JABBER_DETECT     0x0002 /* Jabber Detected */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_CAPS      0x0008 /* Auto Neg Capable */
+#define MII_SR_REMOTE_FAULT      0x0010 /* Remote Fault Detect */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+#define MII_SR_PREAMBLE_SUPPRESS 0x0040 /* Preamble may be suppressed */
+#define MII_SR_EXTENDED_STATUS   0x0100 /* Ext. status info in Reg 0x0F */
+#define MII_SR_100T2_HD_CAPS     0x0200 /* 100T2 Half Duplex Capable */
+#define MII_SR_100T2_FD_CAPS     0x0400 /* 100T2 Full Duplex Capable */
+#define MII_SR_10T_HD_CAPS       0x0800 /* 10T   Half Duplex Capable */
+#define MII_SR_10T_FD_CAPS       0x1000 /* 10T   Full Duplex Capable */
+#define MII_SR_100X_HD_CAPS      0x2000 /* 100X  Half Duplex Capable */
+#define MII_SR_100X_FD_CAPS      0x4000 /* 100X  Full Duplex Capable */
+#define MII_SR_100T4_CAPS        0x8000 /* 100T4 Capable */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_SELECTOR_FIELD   0x0001   /* indicates IEEE 802.3 CSMA/CD */
+#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_100T4_CAPS       0x0200   /* 100T4 Capable */
+#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
+#define NWAY_AR_REMOTE_FAULT     0x2000   /* Remote Fault detected */
+#define NWAY_AR_NEXT_PAGE        0x8000   /* Next Page ability supported */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_SELECTOR_FIELD 0x0000 /* LP protocol selector field */
+#define NWAY_LPAR_10T_HD_CAPS    0x0020 /* LP is 10T   Half Duplex Capable */
+#define NWAY_LPAR_10T_FD_CAPS    0x0040 /* LP is 10T   Full Duplex Capable */
+#define NWAY_LPAR_100TX_HD_CAPS  0x0080 /* LP is 100TX Half Duplex Capable */
+#define NWAY_LPAR_100TX_FD_CAPS  0x0100 /* LP is 100TX Full Duplex Capable */
+#define NWAY_LPAR_100T4_CAPS     0x0200 /* LP is 100T4 Capable */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+#define NWAY_LPAR_REMOTE_FAULT   0x2000 /* LP has detected Remote Fault */
+#define NWAY_LPAR_ACKNOWLEDGE    0x4000 /* LP has rx'd link code word */
+#define NWAY_LPAR_NEXT_PAGE      0x8000 /* Next Page ability supported */
+
+/* Autoneg Expansion Register */
+#define NWAY_ER_LP_NWAY_CAPS      0x0001 /* LP has Auto Neg Capability */
+#define NWAY_ER_PAGE_RXD          0x0002 /* LP is 10T   Half Duplex Capable */
+#define NWAY_ER_NEXT_PAGE_CAPS    0x0004 /* LP is 10T   Full Duplex Capable */
+#define NWAY_ER_LP_NEXT_PAGE_CAPS 0x0008 /* LP is 100TX Half Duplex Capable */
+#define NWAY_ER_PAR_DETECT_FAULT  0x0010 /* LP is 100TX Full Duplex Capable */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_ASYM_PAUSE      0x0080 /* Advertise asymmetric pause bit */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_REPEATER_DTE    0x0400 /* 1=Repeater/switch device port */
+                                        /* 0=DTE device */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+                                        /* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+                                        /* 0=Automatic Master/Slave config */
+#define CR_1000T_TEST_MODE_NORMAL 0x0000 /* Normal Operation */
+#define CR_1000T_TEST_MODE_1     0x2000 /* Transmit Waveform test */
+#define CR_1000T_TEST_MODE_2     0x4000 /* Master Transmit Jitter test */
+#define CR_1000T_TEST_MODE_3     0x6000 /* Slave Transmit Jitter test */
+#define CR_1000T_TEST_MODE_4     0x8000 /* Transmitter Distortion test */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_IDLE_ERROR_CNT   0x00FF /* Num idle errors since last read */
+#define SR_1000T_ASYM_PAUSE_DIR   0x0100 /* LP asymmetric pause direction bit */
+#define SR_1000T_LP_HD_CAPS       0x0400 /* LP is 1000T HD capable */
+#define SR_1000T_LP_FD_CAPS       0x0800 /* LP is 1000T FD capable */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+#define SR_1000T_MS_CONFIG_RES    0x4000 /* 1=Local Tx is Master, 0=Slave */
+#define SR_1000T_MS_CONFIG_FAULT  0x8000 /* Master/Slave config fault */
+
+#define SR_1000T_PHY_EXCESSIVE_IDLE_ERR_COUNT 5
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL      0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Regiser */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_AUTONEG_EXP  0x06 /* Autoneg Expansion Reg */
+#define PHY_NEXT_PAGE_TX 0x07 /* Next Page Tx */
+#define PHY_LP_NEXT_PAGE 0x08 /* Link Partner Next Page */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+#define PHY_EXT_STATUS   0x0F /* Extended Status Reg */
+
+/* NVM Control */
+#define E1000_EECD_SK        0x00000001 /* NVM Clock */
+#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* NVM Data In */
+#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
+#define E1000_EECD_FWE_MASK  0x00000030
+#define E1000_EECD_FWE_DIS   0x00000010 /* Disable FLASH writes */
+#define E1000_EECD_FWE_EN    0x00000020 /* Enable FLASH writes */
+#define E1000_EECD_FWE_SHIFT 4
+#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
+#define E1000_EECD_SIZE      0x00000200 /* NVM Size (0=64 word 1=256 word) */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_EECD_TYPE      0x00002000 /* NVM Type (1-SPI, 0-Microwire) */
+#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_NVADDS    0x00018000 /* NVM Address Size */
+#define E1000_EECD_SELSHAD   0x00020000 /* Select Shadow RAM */
+#define E1000_EECD_INITSRAM  0x00040000 /* Initialize Shadow RAM */
+#define E1000_EECD_FLUPD     0x00080000 /* Update FLASH */
+#define E1000_EECD_AUPDEN    0x00100000 /* Enable Autonomous FLASH update */
+#define E1000_EECD_SHADV     0x00200000 /* Shadow RAM Data Valid */
+#define E1000_EECD_SEC1VAL   0x00400000 /* Sector One Valid */
+#define E1000_EECD_SECVAL_SHIFT      22
+
+#define E1000_NVM_SWDPIN0   0x0001   /* SWDPIN 0 NVM Value */
+#define E1000_NVM_LED_LOGIC 0x0020   /* Led Logic Word */
+#define E1000_NVM_RW_REG_DATA   16   /* Offset to data in NVM read/write registers */
+#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START  1    /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_NVM_POLL_WRITE    1    /* Flag for polling for write complete */
+#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
+#define E1000_FLASH_UPDATES  2000
+
+/* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
+#define NVM_ID_LED_SETTINGS        0x0004
+#define NVM_VERSION                0x0005
+#define NVM_SERDES_AMPLITUDE       0x0006 /* For SERDES output amplitude adjustment. */
+#define NVM_PHY_CLASS_WORD         0x0007
+#define NVM_INIT_CONTROL1_REG      0x000A
+#define NVM_INIT_CONTROL2_REG      0x000F
+#define NVM_SWDEF_PINS_CTRL_PORT_1 0x0010
+#define NVM_INIT_CONTROL3_PORT_B   0x0014
+#define NVM_INIT_3GIO_3            0x001A
+#define NVM_SWDEF_PINS_CTRL_PORT_0 0x0020
+#define NVM_INIT_CONTROL3_PORT_A   0x0024
+#define NVM_CFG                    0x0012
+#define NVM_FLASH_VERSION          0x0032
+#define NVM_ALT_MAC_ADDR_PTR       0x0037
+#define NVM_CHECKSUM_REG           0x003F
+
+#define E1000_NVM_CFG_DONE_PORT_0  0x40000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x80000 /* ...for second port */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK       0x3000
+#define NVM_WORD0F_PAUSE            0x1000
+#define NVM_WORD0F_ASM_DIR          0x2000
+#define NVM_WORD0F_ANE              0x0800
+#define NVM_WORD0F_SWPDIO_EXT_MASK  0x00F0
+#define NVM_WORD0F_LPLU             0x0001
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+#define NVM_WORD1A_ASPM_MASK  0x000C
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM                    0xBABA
+
+#define NVM_MAC_ADDR_OFFSET        0
+#define NVM_PBA_OFFSET_0           8
+#define NVM_PBA_OFFSET_1           9
+#define NVM_RESERVED_WORD          0xFFFF
+#define NVM_PHY_CLASS_A            0x8000
+#define NVM_SERDES_AMPLITUDE_MASK  0x000F
+#define NVM_SIZE_MASK              0x1C00
+#define NVM_SIZE_SHIFT             10
+#define NVM_WORD_SIZE_BASE_SHIFT   6
+#define NVM_SWDPIO_EXT_SHIFT       4
+
+/* NVM Commands - Microwire */
+#define NVM_READ_OPCODE_MICROWIRE  0x6  /* NVM read opcode */
+#define NVM_WRITE_OPCODE_MICROWIRE 0x5  /* NVM write opcode */
+#define NVM_ERASE_OPCODE_MICROWIRE 0x7  /* NVM erase opcode */
+#define NVM_EWEN_OPCODE_MICROWIRE  0x13 /* NVM erase/write enable */
+#define NVM_EWDS_OPCODE_MICROWIRE  0x10 /* NVM erast/write disable */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
+#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
+#define NVM_WRDI_OPCODE_SPI        0x04 /* NVM reset Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
+#define NVM_WRSR_OPCODE_SPI        0x01 /* NVM write Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI         0x01
+#define NVM_STATUS_WEN_SPI         0x02
+#define NVM_STATUS_BP0_SPI         0x04
+#define NVM_STATUS_BP1_SPI         0x08
+#define NVM_STATUS_WPEN_SPI        0x80
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
+                              (ID_LED_OFF1_OFF2 <<  8) | \
+                              (ID_LED_DEF1_DEF2 <<  4) | \
+                              (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIX_COMMAND_REGISTER        0xE6
+#define PCIX_STATUS_REGISTER_LO      0xE8
+#define PCIX_STATUS_REGISTER_HI      0xEA
+#define PCI_HEADER_TYPE_REGISTER     0x0E
+#define PCIE_LINK_STATUS             0x12
+
+#define PCIX_COMMAND_MMRBC_MASK      0x000C
+#define PCIX_COMMAND_MMRBC_SHIFT     0x2
+#define PCIX_STATUS_HI_MMRBC_MASK    0x0060
+#define PCIX_STATUS_HI_MMRBC_SHIFT   0x5
+#define PCIX_STATUS_HI_MMRBC_4K      0x3
+#define PCIX_STATUS_HI_MMRBC_2K      0x2
+#define PCIX_STATUS_LO_FUNC_MASK     0x7
+#define PCI_HEADER_TYPE_MULTIFUNC    0x80
+#define PCIE_LINK_WIDTH_MASK         0x3F0
+#define PCIE_LINK_WIDTH_SHIFT        4
+
+#ifndef ETH_ADDR_LEN
+#define ETH_ADDR_LEN                 6
+#endif
+
+#define PHY_REVISION_MASK      0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/*
+ * I = Integrated
+ * E = External
+ */
+#define M88E1000_E_PHY_ID    0x01410C50
+#define M88E1000_I_PHY_ID    0x01410C30
+#define M88E1011_I_PHY_ID    0x01410C20
+#define IGP01E1000_I_PHY_ID  0x02A80380
+#define M88E1011_I_REV_4     0x04
+#define M88E1111_I_PHY_ID    0x01410CC0
+#define GG82563_E_PHY_ID     0x01410CA0
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define IFE_E_PHY_ID         0x02A80330
+#define IFE_PLUS_E_PHY_ID    0x02A80320
+#define IFE_C_E_PHY_ID       0x02A80310
+#define M88_VENDOR           0x0141
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_INT_ENABLE        0x12  /* Interrupt Enable Register */
+#define M88E1000_INT_STATUS        0x13  /* Interrupt Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+#define M88E1000_RX_ERR_CNTR       0x15  /* Receive Error Counter */
+
+#define M88E1000_PHY_EXT_CTRL      0x1A  /* PHY extend control register */
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+#define M88E1000_PHY_VCO_REG_BIT8  0x100 /* Bits 8 & 11 are adjusted for */
+#define M88E1000_PHY_VCO_REG_BIT11 0x800    /* improved BER performance */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_JABBER_DISABLE    0x0001 /* 1=Jabber Function disabled */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+#define M88E1000_PSCR_SQE_TEST          0x0004 /* 1=SQE Test enabled */
+/* 1=CLK125 low, 0=CLK125 toggling */
+#define M88E1000_PSCR_CLK125_DISABLE    0x0010
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+                                               /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060
+/*
+ * 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+#define M88E1000_PSCR_EN_10BT_EXT_DIST 0x0080
+/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_MII_5BIT_ENABLE      0x0100
+#define M88E1000_PSCR_SCRAMBLER_DISABLE    0x0200 /* 1=Scrambler disable */
+#define M88E1000_PSCR_FORCE_LINK_GOOD      0x0400 /* 1=Force link good */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_JABBER             0x0001 /* 1=Jabber */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+/*
+ * 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380
+#define M88E1000_PSSR_LINK               0x0400 /* 1=Link up, 0=Link down */
+#define M88E1000_PSSR_SPD_DPLX_RESOLVED  0x0800 /* 1=Speed & Duplex resolved */
+#define M88E1000_PSSR_PAGE_RCVD          0x1000 /* 1=Page received */
+#define M88E1000_PSSR_DPLX               0x2000 /* 1=Duplex 0=Half Duplex */
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_10MBS              0x0000 /* 00=10Mbs */
+#define M88E1000_PSSR_100MBS             0x4000 /* 01=100Mbs */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+#define M88E1000_EPSCR_FIBER_LOOPBACK 0x4000 /* 1=Fiber loopback */
+/*
+ * 1 = Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+#define M88E1000_EPSCR_DOWN_NO_IDLE   0x8000
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_2X   0x0400
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_3X   0x0800
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_4X   0x0C00
+/*
+ * Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_DIS   0x0000
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_2X    0x0200
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_3X    0x0300
+#define M88E1000_EPSCR_TX_CLK_2_5     0x0060 /* 2.5 MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+#define M88E1000_EPSCR_TX_CLK_0       0x0000 /* NO  TX_CLK */
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_1X    0x0000
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_2X    0x0200
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_3X    0x0400
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_4X    0x0600
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_6X    0x0A00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_7X    0x0C00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_8X    0x0E00
+
+/*
+ * Bits...
+ * 15-5: page
+ * 4-0: register offset
+ */
+#define GG82563_PAGE_SHIFT        5
+#define GG82563_REG(page, reg)    \
+        (((page) << GG82563_PAGE_SHIFT) | ((reg) & MAX_PHY_REG_ADDRESS))
+#define GG82563_MIN_ALT_REG       30
+
+/* GG82563 Specific Registers */
+#define GG82563_PHY_SPEC_CTRL           \
+        GG82563_REG(0, 16) /* PHY Specific Control */
+#define GG82563_PHY_SPEC_STATUS         \
+        GG82563_REG(0, 17) /* PHY Specific Status */
+#define GG82563_PHY_INT_ENABLE          \
+        GG82563_REG(0, 18) /* Interrupt Enable */
+#define GG82563_PHY_SPEC_STATUS_2       \
+        GG82563_REG(0, 19) /* PHY Specific Status 2 */
+#define GG82563_PHY_RX_ERR_CNTR         \
+        GG82563_REG(0, 21) /* Receive Error Counter */
+#define GG82563_PHY_PAGE_SELECT         \
+        GG82563_REG(0, 22) /* Page Select */
+#define GG82563_PHY_SPEC_CTRL_2         \
+        GG82563_REG(0, 26) /* PHY Specific Control 2 */
+#define GG82563_PHY_PAGE_SELECT_ALT     \
+        GG82563_REG(0, 29) /* Alternate Page Select */
+#define GG82563_PHY_TEST_CLK_CTRL       \
+        GG82563_REG(0, 30) /* Test Clock Control (use reg. 29 to select) */
+
+#define GG82563_PHY_MAC_SPEC_CTRL       \
+        GG82563_REG(2, 21) /* MAC Specific Control Register */
+#define GG82563_PHY_MAC_SPEC_CTRL_2     \
+        GG82563_REG(2, 26) /* MAC Specific Control 2 */
+
+#define GG82563_PHY_DSP_DISTANCE    \
+        GG82563_REG(5, 26) /* DSP Distance */
+
+/* Page 193 - Port Control Registers */
+#define GG82563_PHY_KMRN_MODE_CTRL   \
+        GG82563_REG(193, 16) /* Kumeran Mode Control */
+#define GG82563_PHY_PORT_RESET          \
+        GG82563_REG(193, 17) /* Port Reset */
+#define GG82563_PHY_REVISION_ID         \
+        GG82563_REG(193, 18) /* Revision ID */
+#define GG82563_PHY_DEVICE_ID           \
+        GG82563_REG(193, 19) /* Device ID */
+#define GG82563_PHY_PWR_MGMT_CTRL       \
+        GG82563_REG(193, 20) /* Power Management Control */
+#define GG82563_PHY_RATE_ADAPT_CTRL     \
+        GG82563_REG(193, 25) /* Rate Adaptation Control */
+
+/* Page 194 - KMRN Registers */
+#define GG82563_PHY_KMRN_FIFO_CTRL_STAT \
+        GG82563_REG(194, 16) /* FIFO's Control/Status */
+#define GG82563_PHY_KMRN_CTRL           \
+        GG82563_REG(194, 17) /* Control */
+#define GG82563_PHY_INBAND_CTRL         \
+        GG82563_REG(194, 18) /* Inband Control */
+#define GG82563_PHY_KMRN_DIAGNOSTIC     \
+        GG82563_REG(194, 19) /* Diagnostic */
+#define GG82563_PHY_ACK_TIMEOUTS        \
+        GG82563_REG(194, 20) /* Acknowledge Timeouts */
+#define GG82563_PHY_ADV_ABILITY         \
+        GG82563_REG(194, 21) /* Advertised Ability */
+#define GG82563_PHY_LINK_PARTNER_ADV_ABILITY \
+        GG82563_REG(194, 23) /* Link Partner Advertised Ability */
+#define GG82563_PHY_ADV_NEXT_PAGE       \
+        GG82563_REG(194, 24) /* Advertised Next Page */
+#define GG82563_PHY_LINK_PARTNER_ADV_NEXT_PAGE \
+        GG82563_REG(194, 25) /* Link Partner Advertised Next page */
+#define GG82563_PHY_KMRN_MISC           \
+        GG82563_REG(194, 26) /* Misc. */
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY             0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT     8
+#define E1000_GEN_POLL_TIMEOUT          640
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c
new file mode 100644
index 0000000..a249f17
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ethtool.c
@@ -0,0 +1,2207 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* ethtool support for e1000 */
+
+#include <linux/netdevice.h>
+
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+
+#include "e1000.h"
+#include "e1000_82541.h"
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+
+#ifdef ETHTOOL_OPS_COMPAT
+#include "kcompat_ethtool.c"
+#endif
+
+#ifdef ETHTOOL_GSTATS
+struct e1000_stats {
+	char stat_string[ETH_GSTRING_LEN];
+	int sizeof_stat;
+	int stat_offset;
+};
+
+#define E1000_STAT(m) sizeof(((struct e1000_adapter *)0)->m), \
+		      offsetof(struct e1000_adapter, m)
+static const struct e1000_stats e1000_gstrings_stats[] = {
+	{ "rx_packets", E1000_STAT(stats.gprc) },
+	{ "tx_packets", E1000_STAT(stats.gptc) },
+	{ "rx_bytes", E1000_STAT(stats.gorc) },
+	{ "tx_bytes", E1000_STAT(stats.gotc) },
+	{ "rx_broadcast", E1000_STAT(stats.bprc) },
+	{ "tx_broadcast", E1000_STAT(stats.bptc) },
+	{ "rx_multicast", E1000_STAT(stats.mprc) },
+	{ "tx_multicast", E1000_STAT(stats.mptc) },
+	{ "rx_errors", E1000_STAT(net_stats.rx_errors) },
+	{ "tx_errors", E1000_STAT(net_stats.tx_errors) },
+	{ "tx_dropped", E1000_STAT(net_stats.tx_dropped) },
+	{ "multicast", E1000_STAT(stats.mprc) },
+	{ "collisions", E1000_STAT(stats.colc) },
+	{ "rx_length_errors", E1000_STAT(net_stats.rx_length_errors) },
+	{ "rx_over_errors", E1000_STAT(net_stats.rx_over_errors) },
+	{ "rx_crc_errors", E1000_STAT(stats.crcerrs) },
+	{ "rx_frame_errors", E1000_STAT(net_stats.rx_frame_errors) },
+	{ "rx_no_buffer_count", E1000_STAT(stats.rnbc) },
+	{ "rx_missed_errors", E1000_STAT(stats.mpc) },
+	{ "tx_aborted_errors", E1000_STAT(stats.ecol) },
+	{ "tx_carrier_errors", E1000_STAT(stats.tncrs) },
+	{ "tx_fifo_errors", E1000_STAT(net_stats.tx_fifo_errors) },
+	{ "tx_heartbeat_errors", E1000_STAT(net_stats.tx_heartbeat_errors) },
+	{ "tx_window_errors", E1000_STAT(stats.latecol) },
+	{ "tx_abort_late_coll", E1000_STAT(stats.latecol) },
+	{ "tx_deferred_ok", E1000_STAT(stats.dc) },
+	{ "tx_single_coll_ok", E1000_STAT(stats.scc) },
+	{ "tx_multi_coll_ok", E1000_STAT(stats.mcc) },
+	{ "tx_timeout_count", E1000_STAT(tx_timeout_count) },
+	{ "tx_restart_queue", E1000_STAT(restart_queue) },
+	{ "rx_long_length_errors", E1000_STAT(stats.roc) },
+	{ "rx_short_length_errors", E1000_STAT(stats.ruc) },
+	{ "rx_align_errors", E1000_STAT(stats.algnerrc) },
+	{ "tx_tcp_seg_good", E1000_STAT(stats.tsctc) },
+	{ "tx_tcp_seg_failed", E1000_STAT(stats.tsctfc) },
+	{ "rx_flow_control_xon", E1000_STAT(stats.xonrxc) },
+	{ "rx_flow_control_xoff", E1000_STAT(stats.xoffrxc) },
+	{ "tx_flow_control_xon", E1000_STAT(stats.xontxc) },
+	{ "tx_flow_control_xoff", E1000_STAT(stats.xofftxc) },
+	{ "rx_long_byte_count", E1000_STAT(stats.gorc) },
+	{ "rx_csum_offload_good", E1000_STAT(hw_csum_good) },
+	{ "rx_csum_offload_errors", E1000_STAT(hw_csum_err) },
+	{ "rx_header_split", E1000_STAT(rx_hdr_split) },
+	{ "alloc_rx_buff_failed", E1000_STAT(alloc_rx_buff_failed) },
+	{ "tx_smbus", E1000_STAT(stats.mgptc) },
+	{ "rx_smbus", E1000_STAT(stats.mgprc) },
+	{ "dropped_smbus", E1000_STAT(stats.mgpdc) },
+};
+
+#ifdef CONFIG_E1000_MQ
+#define E1000_QUEUE_STATS_LEN \
+	((((((struct e1000_adapter *)netdev->priv)->num_rx_queues > 1) ? \
+	  ((struct e1000_adapter *)netdev->priv)->num_rx_queues : 0 ) + \
+	 (((((struct e1000_adapter *)netdev->priv)->num_tx_queues > 1) ? \
+	  ((struct e1000_adapter *)netdev->priv)->num_tx_queues : 0 ))) * \
+	(sizeof(struct e1000_queue_stats) / sizeof(u64)))
+#else
+#define E1000_QUEUE_STATS_LEN 0
+#endif
+#define E1000_GLOBAL_STATS_LEN	\
+	sizeof(e1000_gstrings_stats) / sizeof(struct e1000_stats)
+#define E1000_STATS_LEN (E1000_GLOBAL_STATS_LEN + E1000_QUEUE_STATS_LEN)
+#endif /* ETHTOOL_GSTATS */
+#ifdef ETHTOOL_TEST
+static const char e1000_gstrings_test[][ETH_GSTRING_LEN] = {
+	"Register test  (offline)", "Eeprom test    (offline)",
+	"Interrupt test (offline)", "Loopback test  (offline)",
+	"Link test   (on/offline)"
+};
+#define E1000_TEST_LEN sizeof(e1000_gstrings_test) / ETH_GSTRING_LEN
+#endif /* ETHTOOL_TEST */
+
+static int e1000_get_settings(struct net_device *netdev,
+                              struct ethtool_cmd *ecmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 status;
+
+	if (hw->phy.media_type == e1000_media_type_copper) {
+
+		ecmd->supported = (SUPPORTED_10baseT_Half |
+		                   SUPPORTED_10baseT_Full |
+		                   SUPPORTED_100baseT_Half |
+		                   SUPPORTED_100baseT_Full |
+		                   SUPPORTED_1000baseT_Full|
+		                   SUPPORTED_Autoneg |
+		                   SUPPORTED_TP);
+		if (hw->phy.type == e1000_phy_ife)
+			ecmd->supported &= ~SUPPORTED_1000baseT_Full;
+		ecmd->advertising = ADVERTISED_TP;
+
+		if (hw->mac.autoneg == 1) {
+			ecmd->advertising |= ADVERTISED_Autoneg;
+			/* the e1000 autoneg seems to match ethtool nicely */
+			ecmd->advertising |= hw->phy.autoneg_advertised;
+		}
+
+		ecmd->port = PORT_TP;
+		ecmd->phy_address = hw->phy.addr;
+
+		if (hw->mac.type == e1000_82543)
+			ecmd->transceiver = XCVR_EXTERNAL;
+		else
+			ecmd->transceiver = XCVR_INTERNAL;
+
+	} else {
+		ecmd->supported   = (SUPPORTED_1000baseT_Full |
+				     SUPPORTED_FIBRE |
+				     SUPPORTED_Autoneg);
+
+		ecmd->advertising = (ADVERTISED_1000baseT_Full |
+				     ADVERTISED_FIBRE |
+				     ADVERTISED_Autoneg);
+
+		ecmd->port = PORT_FIBRE;
+
+		if (hw->mac.type >= e1000_82545)
+			ecmd->transceiver = XCVR_INTERNAL;
+		else
+			ecmd->transceiver = XCVR_EXTERNAL;
+	}
+
+	status = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+
+	if (status & E1000_STATUS_LU) {
+
+		if ((status & E1000_STATUS_SPEED_1000) ||
+		    hw->phy.media_type != e1000_media_type_copper)
+			ecmd->speed = SPEED_1000;
+		else if (status & E1000_STATUS_SPEED_100)
+			ecmd->speed = SPEED_100;
+		else
+			ecmd->speed = SPEED_10;
+
+		if ((status & E1000_STATUS_FD) ||
+		    hw->phy.media_type != e1000_media_type_copper)
+			ecmd->duplex = DUPLEX_FULL;
+		else
+			ecmd->duplex = DUPLEX_HALF;
+	} else {
+		ecmd->speed = -1;
+		ecmd->duplex = -1;
+	}
+
+	ecmd->autoneg = ((hw->phy.media_type == e1000_media_type_fiber) ||
+			 hw->mac.autoneg) ? AUTONEG_ENABLE : AUTONEG_DISABLE;
+	return 0;
+}
+
+static int e1000_set_settings(struct net_device *netdev,
+                              struct ethtool_cmd *ecmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* When SoL/IDER sessions are active, autoneg/speed/duplex
+	 * cannot be changed */
+	if (e1000_check_reset_block(hw)) {
+		DPRINTK(DRV, ERR, "Cannot change link characteristics "
+		        "when SoL/IDER is active.\n");
+		return -EINVAL;
+	}
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (ecmd->autoneg == AUTONEG_ENABLE) {
+		hw->mac.autoneg = 1;
+		if (hw->phy.media_type == e1000_media_type_fiber)
+			hw->phy.autoneg_advertised = ADVERTISED_1000baseT_Full |
+			                             ADVERTISED_FIBRE |
+			                             ADVERTISED_Autoneg;
+		else
+			hw->phy.autoneg_advertised = ecmd->advertising |
+			                             ADVERTISED_TP |
+			                             ADVERTISED_Autoneg;
+		ecmd->advertising = hw->phy.autoneg_advertised;
+		if (adapter->fc_autoneg)
+			hw->fc.original_type = e1000_fc_default;
+	} else {
+		if (e1000_set_spd_dplx(adapter, ecmd->speed + ecmd->duplex)) {
+			clear_bit(__E1000_RESETTING, &adapter->state);
+			return -EINVAL;
+		}
+	}
+
+	/* reset the link */
+
+	if (netif_running(adapter->netdev)) {
+		e1000_down(adapter);
+		e1000_up(adapter);
+	} else {
+		e1000_reset(adapter);
+	}
+
+	clear_bit(__E1000_RESETTING, &adapter->state);
+	return 0;
+}
+
+static void e1000_get_pauseparam(struct net_device *netdev,
+                                 struct ethtool_pauseparam *pause)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	pause->autoneg =
+		(adapter->fc_autoneg ? AUTONEG_ENABLE : AUTONEG_DISABLE);
+
+	if (hw->fc.type == e1000_fc_rx_pause)
+		pause->rx_pause = 1;
+	else if (hw->fc.type == e1000_fc_tx_pause)
+		pause->tx_pause = 1;
+	else if (hw->fc.type == e1000_fc_full) {
+		pause->rx_pause = 1;
+		pause->tx_pause = 1;
+	}
+}
+
+static int e1000_set_pauseparam(struct net_device *netdev,
+                                struct ethtool_pauseparam *pause)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 0;
+
+	adapter->fc_autoneg = pause->autoneg;
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (pause->rx_pause && pause->tx_pause)
+		hw->fc.type = e1000_fc_full;
+	else if (pause->rx_pause && !pause->tx_pause)
+		hw->fc.type = e1000_fc_rx_pause;
+	else if (!pause->rx_pause && pause->tx_pause)
+		hw->fc.type = e1000_fc_tx_pause;
+	else if (!pause->rx_pause && !pause->tx_pause)
+		hw->fc.type = e1000_fc_none;
+
+	hw->fc.original_type = hw->fc.type;
+
+	if (adapter->fc_autoneg == AUTONEG_ENABLE) {
+		hw->fc.type = e1000_fc_default;
+		if (netif_running(adapter->netdev)) {
+			e1000_down(adapter);
+			e1000_up(adapter);
+		} else {
+			e1000_reset(adapter);
+		}
+	} else {
+		retval = ((hw->phy.media_type == e1000_media_type_fiber) ?
+			  e1000_setup_link(hw) : e1000_force_mac_fc(hw));
+	}
+
+	clear_bit(__E1000_RESETTING, &adapter->state);
+	return retval;
+}
+
+static u32 e1000_get_rx_csum(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->rx_csum;
+}
+
+static int e1000_set_rx_csum(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	adapter->rx_csum = data;
+
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+	else
+		e1000_reset(adapter);
+	return 0;
+}
+
+static u32 e1000_get_tx_csum(struct net_device *netdev)
+{
+	return (netdev->features & NETIF_F_HW_CSUM) != 0;
+}
+
+static int e1000_set_tx_csum(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->hw.mac.type < e1000_82543) {
+		if (!data)
+			return -EINVAL;
+		return 0;
+	}
+
+	if (data)
+		netdev->features |= NETIF_F_HW_CSUM;
+	else
+		netdev->features &= ~NETIF_F_HW_CSUM;
+
+	return 0;
+}
+
+#ifdef NETIF_F_TSO
+static int e1000_set_tso(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int i;
+	struct net_device *v_netdev;
+	if (!(adapter->flags & E1000_FLAG_HAS_TSO))
+		return data ? -EINVAL : 0;
+
+	if (data) {
+		netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+		if (adapter->flags & E1000_FLAG_HAS_TSO6)
+			netdev->features |= NETIF_F_TSO6;
+#endif
+	} else {
+		netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+		if (adapter->flags & E1000_FLAG_HAS_TSO6)
+			netdev->features &= ~NETIF_F_TSO6;
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+		/* disable TSO on all VLANs if they're present */
+		if (!adapter->vlgrp)
+			goto tso_out;
+		for (i = 0; i < VLAN_N_VID; i++) {
+			v_netdev = vlan_group_get_device(adapter->vlgrp, i);
+			if (!v_netdev)
+				continue;
+
+			v_netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+			if (adapter->flags & E1000_FLAG_HAS_TSO6)
+				v_netdev->features &= ~NETIF_F_TSO6;
+#endif
+			vlan_group_set_device(adapter->vlgrp, i, v_netdev);
+		}
+#endif
+	}
+
+tso_out:
+	DPRINTK(PROBE, INFO, "TSO is %s\n", data ? "Enabled" : "Disabled");
+	adapter->flags |= E1000_FLAG_TSO_FORCE;
+	return 0;
+}
+#endif /* NETIF_F_TSO */
+
+static u32 e1000_get_msglevel(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->msg_enable;
+}
+
+static void e1000_set_msglevel(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	adapter->msg_enable = data;
+}
+
+static int e1000_get_regs_len(struct net_device *netdev)
+{
+#define E1000_REGS_LEN 32
+	return E1000_REGS_LEN * sizeof(u32);
+}
+
+static void e1000_get_regs(struct net_device *netdev,
+                           struct ethtool_regs *regs, void *p)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 *regs_buff = p;
+	u16 phy_data;
+
+	memset(p, 0, E1000_REGS_LEN * sizeof(u32));
+
+	regs->version = (1 << 24) | (hw->revision_id << 16) | hw->device_id;
+
+	regs_buff[0]  = E1000_READ_REG(hw, E1000_CTRL);
+	regs_buff[1]  = E1000_READ_REG(hw, E1000_STATUS);
+
+	regs_buff[2]  = E1000_READ_REG(hw, E1000_RCTL);
+	regs_buff[3]  = E1000_READ_REG(hw, E1000_RDLEN(0));
+	regs_buff[4]  = E1000_READ_REG(hw, E1000_RDH(0));
+	regs_buff[5]  = E1000_READ_REG(hw, E1000_RDT(0));
+	regs_buff[6]  = E1000_READ_REG(hw, E1000_RDTR);
+
+	regs_buff[7]  = E1000_READ_REG(hw, E1000_TCTL);
+	regs_buff[8]  = E1000_READ_REG(hw, E1000_TDLEN(0));
+	regs_buff[9]  = E1000_READ_REG(hw, E1000_TDH(0));
+	regs_buff[10] = E1000_READ_REG(hw, E1000_TDT(0));
+	regs_buff[11] = E1000_READ_REG(hw, E1000_TIDV);
+
+	regs_buff[12] = adapter->hw.phy.type;  /* PHY type (IGP=1, M88=0) */
+	if (hw->phy.type == e1000_phy_igp) {
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_A);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_A &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[13] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_B);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_B &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[14] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_C);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_C &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[15] = (u32)phy_data; /* cable length */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_AGC_D);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_AGC_D &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[16] = (u32)phy_data; /* cable length */
+		regs_buff[17] = 0; /* extended 10bt distance (not needed) */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[18] = (u32)phy_data; /* cable polarity */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT,
+				    IGP01E1000_PHY_PCS_INIT_REG);
+		e1000_read_phy_reg(hw, IGP01E1000_PHY_PCS_INIT_REG &
+				   IGP01E1000_PHY_PAGE_SELECT, &phy_data);
+		regs_buff[19] = (u32)phy_data; /* cable polarity */
+		regs_buff[20] = 0; /* polarity correction enabled (always) */
+		regs_buff[22] = 0; /* phy receive errors (unavailable) */
+		regs_buff[23] = regs_buff[18]; /* mdix mode */
+		e1000_write_phy_reg(hw, IGP01E1000_PHY_PAGE_SELECT, 0x0);
+	} else {
+		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+		regs_buff[13] = (u32)phy_data; /* cable length */
+		regs_buff[14] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[15] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[16] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+		regs_buff[17] = (u32)phy_data; /* extended 10bt distance */
+		regs_buff[18] = regs_buff[13]; /* cable polarity */
+		regs_buff[19] = 0;  /* Dummy (to align w/ IGP phy reg dump) */
+		regs_buff[20] = regs_buff[17]; /* polarity correction */
+		/* phy receive errors */
+		regs_buff[22] = adapter->phy_stats.receive_errors;
+		regs_buff[23] = regs_buff[13]; /* mdix mode */
+	}
+	regs_buff[21] = adapter->phy_stats.idle_errors;  /* phy idle errors */
+	e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+	regs_buff[24] = (u32)phy_data;  /* phy local receiver status */
+	regs_buff[25] = regs_buff[24];  /* phy remote receiver status */
+	if (hw->mac.type >= e1000_82540 &&
+	    hw->mac.type < e1000_82571 &&
+	    hw->phy.media_type == e1000_media_type_copper) {
+		regs_buff[26] = E1000_READ_REG(hw, E1000_MANC);
+	}
+}
+
+static int e1000_get_eeprom_len(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	return adapter->hw.nvm.word_size * 2;
+}
+
+static int e1000_get_eeprom(struct net_device *netdev,
+                            struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	int first_word, last_word;
+	int ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EINVAL;
+
+	eeprom->magic = hw->vendor_id | (hw->device_id << 16);
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+
+	eeprom_buff = kmalloc(sizeof(u16) *
+			(last_word - first_word + 1), GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	if (hw->nvm.type == e1000_nvm_eeprom_spi)
+		ret_val = e1000_read_nvm(hw, first_word,
+		                         last_word - first_word + 1,
+		                         eeprom_buff);
+	else {
+		for (i = 0; i < last_word - first_word + 1; i++)
+			if ((ret_val = e1000_read_nvm(hw, first_word + i, 1,
+			                              &eeprom_buff[i])))
+				break;
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(bytes, (u8 *)eeprom_buff + (eeprom->offset & 1),
+			eeprom->len);
+	kfree(eeprom_buff);
+
+	return ret_val;
+}
+
+static int e1000_set_eeprom(struct net_device *netdev,
+                            struct ethtool_eeprom *eeprom, u8 *bytes)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u16 *eeprom_buff;
+	void *ptr;
+	int max_len, first_word, last_word, ret_val = 0;
+	u16 i;
+
+	if (eeprom->len == 0)
+		return -EOPNOTSUPP;
+
+	if (eeprom->magic != (hw->vendor_id | (hw->device_id << 16)))
+		return -EFAULT;
+
+	max_len = hw->nvm.word_size * 2;
+
+	first_word = eeprom->offset >> 1;
+	last_word = (eeprom->offset + eeprom->len - 1) >> 1;
+	eeprom_buff = kmalloc(max_len, GFP_KERNEL);
+	if (!eeprom_buff)
+		return -ENOMEM;
+
+	ptr = (void *)eeprom_buff;
+
+	if (eeprom->offset & 1) {
+		/* need read/modify/write of first changed EEPROM word */
+		/* only the second byte of the word is being modified */
+		ret_val = e1000_read_nvm(hw, first_word, 1,
+					    &eeprom_buff[0]);
+		ptr++;
+	}
+	if (((eeprom->offset + eeprom->len) & 1) && (ret_val == 0)) {
+		/* need read/modify/write of last changed EEPROM word */
+		/* only the first byte of the word is being modified */
+		ret_val = e1000_read_nvm(hw, last_word, 1,
+		                  &eeprom_buff[last_word - first_word]);
+	}
+
+	/* Device's eeprom is always little-endian, word addressable */
+	for (i = 0; i < last_word - first_word + 1; i++)
+		le16_to_cpus(&eeprom_buff[i]);
+
+	memcpy(ptr, bytes, eeprom->len);
+
+	for (i = 0; i < last_word - first_word + 1; i++)
+		eeprom_buff[i] = cpu_to_le16(eeprom_buff[i]);
+
+	ret_val = e1000_write_nvm(hw, first_word,
+	                          last_word - first_word + 1, eeprom_buff);
+
+	/* Update the checksum over the first part of the EEPROM if needed
+	 * and flush shadow RAM for 82573 controllers */
+	if ((ret_val == 0) && ((first_word <= NVM_CHECKSUM_REG) ||
+				(hw->mac.type == e1000_82573)))
+		e1000_update_nvm_checksum(hw);
+
+	kfree(eeprom_buff);
+	return ret_val;
+}
+
+static void e1000_get_drvinfo(struct net_device *netdev,
+                              struct ethtool_drvinfo *drvinfo)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	char firmware_version[32];
+	u16 eeprom_data;
+
+	strncpy(drvinfo->driver,  e1000_driver_name, 32);
+	strncpy(drvinfo->version, e1000_driver_version, 32);
+
+	/* EEPROM image version # is reported as firmware version # for
+	 * 8257{1|2|3} controllers */
+	e1000_read_nvm(&adapter->hw, 5, 1, &eeprom_data);
+	switch (adapter->hw.mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		sprintf(firmware_version, "%d.%d-%d",
+			(eeprom_data & 0xF000) >> 12,
+			(eeprom_data & 0x0FF0) >> 4,
+			eeprom_data & 0x000F);
+		break;
+	default:
+		sprintf(firmware_version, "N/A");
+	}
+
+	strncpy(drvinfo->fw_version, firmware_version, 32);
+	strncpy(drvinfo->bus_info, pci_name(adapter->pdev), 32);
+	drvinfo->n_stats = E1000_STATS_LEN;
+	drvinfo->testinfo_len = E1000_TEST_LEN;
+	drvinfo->regdump_len = e1000_get_regs_len(netdev);
+	drvinfo->eedump_len = e1000_get_eeprom_len(netdev);
+}
+
+static void e1000_get_ringparam(struct net_device *netdev,
+                                struct ethtool_ringparam *ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	e1000_mac_type mac_type = adapter->hw.mac.type;
+	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+	struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+
+	ring->rx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_RXD :
+		E1000_MAX_82544_RXD;
+	ring->tx_max_pending = (mac_type < e1000_82544) ? E1000_MAX_TXD :
+		E1000_MAX_82544_TXD;
+	ring->rx_mini_max_pending = 0;
+	ring->rx_jumbo_max_pending = 0;
+	ring->rx_pending = rx_ring->count;
+	ring->tx_pending = tx_ring->count;
+	ring->rx_mini_pending = 0;
+	ring->rx_jumbo_pending = 0;
+}
+
+static int e1000_set_ringparam(struct net_device *netdev,
+                               struct ethtool_ringparam *ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	e1000_mac_type mac_type = adapter->hw.mac.type;
+	struct e1000_tx_ring *tx_ring, *tx_old;
+	struct e1000_rx_ring *rx_ring, *rx_old;
+	int i, err, tx_ring_size, rx_ring_size;
+
+	if ((ring->rx_mini_pending) || (ring->rx_jumbo_pending))
+		return -EINVAL;
+
+	tx_ring_size = sizeof(struct e1000_tx_ring) * adapter->num_tx_queues;
+	rx_ring_size = sizeof(struct e1000_rx_ring) * adapter->num_rx_queues;
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+
+	if (netif_running(adapter->netdev))
+		e1000_down(adapter);
+
+	tx_old = adapter->tx_ring;
+	rx_old = adapter->rx_ring;
+
+	err = -ENOMEM;
+	tx_ring = kzalloc(tx_ring_size, GFP_KERNEL);
+	if (!tx_ring)
+		goto err_alloc_tx;
+	/* use a memcpy to save any previously configured
+	 * items like napi structs from having to be
+	 * reinitialized */
+	memcpy(tx_ring, tx_old, tx_ring_size);
+
+	rx_ring = kzalloc(rx_ring_size, GFP_KERNEL);
+	if (!rx_ring)
+		goto err_alloc_rx;
+	memcpy(rx_ring, rx_old, rx_ring_size);
+
+	adapter->tx_ring = tx_ring;
+	adapter->rx_ring = rx_ring;
+
+	rx_ring->count = max(ring->rx_pending,(u32)E1000_MIN_RXD);
+	rx_ring->count = min(rx_ring->count,(u32)(mac_type < e1000_82544 ?
+		E1000_MAX_RXD : E1000_MAX_82544_RXD));
+	rx_ring->count = ALIGN(rx_ring->count, REQ_RX_DESCRIPTOR_MULTIPLE);
+
+	tx_ring->count = max(ring->tx_pending,(u32)E1000_MIN_TXD);
+	tx_ring->count = min(tx_ring->count,(u32)(mac_type < e1000_82544 ?
+		E1000_MAX_TXD : E1000_MAX_82544_TXD));
+	tx_ring->count = ALIGN(tx_ring->count, REQ_TX_DESCRIPTOR_MULTIPLE);
+
+	/* overwrite the counts with the new values */
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		tx_ring[i].count = tx_ring->count;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		rx_ring[i].count = rx_ring->count;
+
+	if (netif_running(adapter->netdev)) {
+		/* Try to get new resources before deleting old */
+		if ((err = e1000_setup_all_rx_resources(adapter)))
+			goto err_setup_rx;
+		if ((err = e1000_setup_all_tx_resources(adapter)))
+			goto err_setup_tx;
+
+		/* restore the old in order to free it,
+		 * then add in the new */
+		adapter->rx_ring = rx_old;
+		adapter->tx_ring = tx_old;
+		e1000_free_all_rx_resources(adapter);
+		e1000_free_all_tx_resources(adapter);
+		kfree(tx_old);
+		kfree(rx_old);
+		adapter->rx_ring = rx_ring;
+		adapter->tx_ring = tx_ring;
+		if ((err = e1000_up(adapter)))
+			goto err_setup;
+	}
+
+	clear_bit(__E1000_RESETTING, &adapter->state);
+	return 0;
+err_setup_tx:
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	adapter->rx_ring = rx_old;
+	adapter->tx_ring = tx_old;
+	kfree(rx_ring);
+err_alloc_rx:
+	kfree(tx_ring);
+err_alloc_tx:
+	e1000_up(adapter);
+err_setup:
+	clear_bit(__E1000_RESETTING, &adapter->state);
+	return err;
+}
+
+static bool reg_pattern_test(struct e1000_adapter *adapter, u64 *data,
+			     int reg, int offset, u32 mask, u32 write)
+{                                                                              \
+	u32 pat, val;
+	static const u32 test[] =
+		{0x5A5A5A5A, 0xA5A5A5A5, 0x00000000, 0xFFFFFFFF};
+	for (pat = 0; pat < ARRAY_SIZE(test); pat++) {
+		E1000_WRITE_REG_ARRAY(&adapter->hw, reg, offset,
+		                      (test[pat] & write));
+		val = E1000_READ_REG_ARRAY(&adapter->hw, reg, offset);
+		if (val != (test[pat] & write & mask)) {
+			DPRINTK(DRV, ERR, "pattern test reg %04X failed: got "
+			        "0x%08X expected 0x%08X\n",
+			        E1000_REGISTER(&adapter->hw, reg) + offset,
+			        val, (test[pat] & write & mask));
+			*data = E1000_REGISTER(&adapter->hw, reg);
+			return 1;
+		}
+	}
+	return 0;
+}
+
+static bool reg_set_and_check(struct e1000_adapter *adapter, u64 *data,
+			      int reg, u32 mask, u32 write)
+{
+	u32 val;
+	E1000_WRITE_REG(&adapter->hw, reg, write & mask);
+	val = E1000_READ_REG(&adapter->hw, reg);
+	if ((write & mask) != (val & mask)) {
+		DPRINTK(DRV, ERR, "set/check reg %04X test failed: got 0x%08X"
+		        "expected 0x%08X\n", reg, (val & mask), (write & mask));
+		*data = E1000_REGISTER(&adapter->hw, reg);
+		return 1;
+	}
+	return 0;
+}
+#define REG_PATTERN_TEST_ARRAY(reg, offset, mask, write)                       \
+	do {                                                                   \
+		if (reg_pattern_test(adapter, data, reg, offset, mask, write)) \
+			return 1;                                              \
+	} while (0)
+#define REG_PATTERN_TEST(reg, mask, write)                                     \
+	REG_PATTERN_TEST_ARRAY(reg, 0, mask, write)
+
+#define REG_SET_AND_CHECK(reg, mask, write)                                    \
+	do {                                                                   \
+		if (reg_set_and_check(adapter, data, reg, mask, write))       \
+			return 1;                                              \
+	} while (0)
+
+static int e1000_reg_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	u32 value, before, after;
+	u32 i, toggle;
+
+	/* The status register is Read Only, so a write should fail.
+	 * Some bits that get toggled are ignored.
+	 */
+	switch (mac->type) {
+	/* there are several bits on newer hardware that are r/w */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		toggle = 0x7FFFF3FF;
+		break;
+	case e1000_82573:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		toggle = 0x7FFFF033;
+		break;
+	default:
+		toggle = 0xFFFFF833;
+		break;
+	}
+
+	before = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+	value = (E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle);
+	E1000_WRITE_REG(&adapter->hw, E1000_STATUS, toggle);
+	after = E1000_READ_REG(&adapter->hw, E1000_STATUS) & toggle;
+	if (value != after) {
+		DPRINTK(DRV, ERR, "failed STATUS register test got: "
+		        "0x%08X expected: 0x%08X\n", after, value);
+		*data = 1;
+		return 1;
+	}
+	/* restore previous status */
+	E1000_WRITE_REG(&adapter->hw, E1000_STATUS, before);
+
+	if ((mac->type != e1000_ich8lan) &&
+	    (mac->type != e1000_ich9lan)) {
+		REG_PATTERN_TEST(E1000_FCAL, 0xFFFFFFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_FCAH, 0x0000FFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_FCT, 0x0000FFFF, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_VET, 0x0000FFFF, 0xFFFFFFFF);
+	}
+
+	REG_PATTERN_TEST(E1000_RDTR, 0x0000FFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_RDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_RDLEN(0), 0x000FFF80, 0x000FFFFF);
+	REG_PATTERN_TEST(E1000_RDH(0), 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_RDT(0), 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_FCRTH, 0x0000FFF8, 0x0000FFF8);
+	REG_PATTERN_TEST(E1000_FCTTV, 0x0000FFFF, 0x0000FFFF);
+	REG_PATTERN_TEST(E1000_TIPG, 0x3FFFFFFF, 0x3FFFFFFF);
+	REG_PATTERN_TEST(E1000_TDBAH(0), 0xFFFFFFFF, 0xFFFFFFFF);
+	REG_PATTERN_TEST(E1000_TDLEN(0), 0x000FFF80, 0x000FFFFF);
+
+	REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x00000000);
+
+	before = (((mac->type == e1000_ich8lan) ||
+		   (mac->type == e1000_ich9lan)) ? 0x06C3B33E : 0x06DFB3FE);
+	REG_SET_AND_CHECK(E1000_RCTL, before, 0x003FFFFB);
+	REG_SET_AND_CHECK(E1000_TCTL, 0xFFFFFFFF, 0x00000000);
+
+	if (mac->type >= e1000_82543) {
+
+		REG_SET_AND_CHECK(E1000_RCTL, before, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
+		if ((mac->type != e1000_ich8lan) &&
+		    (mac->type != e1000_ich9lan))
+			REG_PATTERN_TEST(E1000_TXCW, 0xC000FFFF, 0x0000FFFF);
+		REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFFFF0, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_TIDV, 0x0000FFFF, 0x0000FFFF);
+		for (i = 0; i < mac->rar_entry_count; i++) {
+			REG_PATTERN_TEST_ARRAY(E1000_RA, ((i << 1) + 1),
+			                       0x8003FFFF, 0xFFFFFFFF);
+		}
+
+	} else {
+
+		REG_SET_AND_CHECK(E1000_RCTL, 0xFFFFFFFF, 0x01FFFFFF);
+		REG_PATTERN_TEST(E1000_RDBAL(0), 0xFFFFF000, 0xFFFFFFFF);
+		REG_PATTERN_TEST(E1000_TXCW, 0x0000FFFF, 0x0000FFFF);
+		REG_PATTERN_TEST(E1000_TDBAL(0), 0xFFFFF000, 0xFFFFFFFF);
+
+	}
+
+	for (i = 0; i < mac->mta_reg_count; i++)
+		REG_PATTERN_TEST_ARRAY(E1000_MTA, i, 0xFFFFFFFF, 0xFFFFFFFF);
+
+	*data = 0;
+	return 0;
+}
+
+static int e1000_eeprom_test(struct e1000_adapter *adapter, u64 *data)
+{
+	u16 temp;
+	u16 checksum = 0;
+	u16 i;
+
+	*data = 0;
+	/* Read and add up the contents of the EEPROM */
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		if ((e1000_read_nvm(&adapter->hw, i, 1, &temp)) < 0) {
+			*data = 1;
+			break;
+		}
+		checksum += temp;
+	}
+
+	/* If Checksum is not Correct return error else test passed */
+	if ((checksum != (u16) NVM_SUM) && !(*data))
+		*data = 2;
+
+	return *data;
+}
+
+static irqreturn_t e1000_test_intr(int irq, void *data)
+{
+	struct net_device *netdev = (struct net_device *) data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	adapter->test_icr |= E1000_READ_REG(&adapter->hw, E1000_ICR);
+
+	return IRQ_HANDLED;
+}
+
+static int e1000_intr_test(struct e1000_adapter *adapter, u64 *data)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 mask, i=0, shared_int = TRUE;
+	u32 irq = adapter->pdev->irq;
+
+	*data = 0;
+
+	/* NOTE: we don't test MSI interrupts here, yet */
+	/* Hook up test interrupt handler just for this test */
+	if (!request_irq(irq, &e1000_test_intr, IRQF_PROBE_SHARED, netdev->name,
+	                 netdev))
+		shared_int = FALSE;
+	else if (request_irq(irq, &e1000_test_intr, IRQF_SHARED,
+	         netdev->name, netdev)) {
+		*data = 1;
+		return -1;
+	}
+	DPRINTK(HW, INFO, "testing %s interrupt\n",
+	        (shared_int ? "shared" : "unshared"));
+
+	/* Disable all the interrupts */
+	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Test each interrupt */
+	for (; i < 10; i++) {
+
+		if (((adapter->hw.mac.type == e1000_ich8lan) ||
+		     (adapter->hw.mac.type == e1000_ich9lan)) && i == 8)
+			continue;
+
+		/* Interrupt to test */
+		mask = 1 << i;
+
+		if (!shared_int) {
+			/* Disable the interrupt to be reported in
+			 * the cause register and then force the same
+			 * interrupt and see if one gets posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			E1000_WRITE_REG(&adapter->hw, E1000_IMC, mask);
+			E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask);
+			msleep(10);
+
+			if (adapter->test_icr & mask) {
+				*data = 3;
+				break;
+			}
+		}
+
+		/* Enable the interrupt to be reported in
+		 * the cause register and then force the same
+		 * interrupt and see if one gets posted.  If
+		 * an interrupt was not posted to the bus, the
+		 * test failed.
+		 */
+		adapter->test_icr = 0;
+		E1000_WRITE_REG(&adapter->hw, E1000_IMS, mask);
+		E1000_WRITE_REG(&adapter->hw, E1000_ICS, mask);
+		msleep(10);
+
+		if (!(adapter->test_icr & mask)) {
+			*data = 4;
+			break;
+		}
+
+		if (!shared_int) {
+			/* Disable the other interrupts to be reported in
+			 * the cause register and then force the other
+			 * interrupts and see if any get posted.  If
+			 * an interrupt was posted to the bus, the
+			 * test failed.
+			 */
+			adapter->test_icr = 0;
+			E1000_WRITE_REG(&adapter->hw, E1000_IMC,
+			                ~mask & 0x00007FFF);
+			E1000_WRITE_REG(&adapter->hw, E1000_ICS,
+			                ~mask & 0x00007FFF);
+			msleep(10);
+
+			if (adapter->test_icr) {
+				*data = 5;
+				break;
+			}
+		}
+	}
+
+	/* Disable all the interrupts */
+	E1000_WRITE_REG(&adapter->hw, E1000_IMC, 0xFFFFFFFF);
+	msleep(10);
+
+	/* Unhook test interrupt handler */
+	free_irq(irq, netdev);
+
+	return *data;
+}
+
+static void e1000_free_desc_rings(struct e1000_adapter *adapter)
+{
+	struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i;
+
+	if (tx_ring->desc && tx_ring->buffer_info) {
+		for (i = 0; i < tx_ring->count; i++) {
+			if (tx_ring->buffer_info[i].dma)
+				pci_unmap_single(pdev, tx_ring->buffer_info[i].dma,
+						 tx_ring->buffer_info[i].length,
+						 PCI_DMA_TODEVICE);
+			if (tx_ring->buffer_info[i].skb)
+				dev_kfree_skb(tx_ring->buffer_info[i].skb);
+		}
+	}
+
+	if (rx_ring->desc && rx_ring->buffer_info) {
+		for (i = 0; i < rx_ring->count; i++) {
+			if (rx_ring->buffer_info[i].dma)
+				pci_unmap_single(pdev, rx_ring->buffer_info[i].dma,
+						 E1000_RXBUFFER_2048,
+						 PCI_DMA_FROMDEVICE);
+			if (rx_ring->buffer_info[i].skb)
+				dev_kfree_skb(rx_ring->buffer_info[i].skb);
+		}
+	}
+
+	if (tx_ring->desc) {
+		dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+				  tx_ring->dma);
+		tx_ring->desc = NULL;
+	}
+	if (rx_ring->desc) {
+		dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+				  rx_ring->dma);
+		rx_ring->desc = NULL;
+	}
+
+	kfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+	kfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+
+	return;
+}
+
+static int e1000_setup_desc_rings(struct e1000_adapter *adapter)
+{
+	struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	u32 rctl;
+	int i, ret_val;
+
+	/* Setup Tx descriptor ring and Tx buffers */
+
+	if (!tx_ring->count)
+		tx_ring->count = E1000_DEFAULT_TXD;
+
+	if (!(tx_ring->buffer_info = kcalloc(tx_ring->count,
+	                                     sizeof(struct e1000_buffer),
+	                                     GFP_KERNEL))) {
+		ret_val = 1;
+		goto err_nomem;
+	}
+
+	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+	if (!(tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+	                                         &tx_ring->dma, GFP_ATOMIC))) {
+		ret_val = 2;
+		goto err_nomem;
+	}
+	tx_ring->next_to_use = tx_ring->next_to_clean = 0;
+
+	E1000_WRITE_REG(&adapter->hw, E1000_TDBAL(0),
+			((u64) tx_ring->dma & 0x00000000FFFFFFFF));
+	E1000_WRITE_REG(&adapter->hw, E1000_TDBAH(0), ((u64) tx_ring->dma >> 32));
+	E1000_WRITE_REG(&adapter->hw, E1000_TDLEN(0),
+			tx_ring->count * sizeof(struct e1000_tx_desc));
+	E1000_WRITE_REG(&adapter->hw, E1000_TDH(0), 0);
+	E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), 0);
+	E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
+			E1000_TCTL_MULR |
+			E1000_TCTL_PSP | E1000_TCTL_EN |
+			E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT |
+			E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT);
+
+	for (i = 0; i < tx_ring->count; i++) {
+		struct e1000_tx_desc *tx_desc = E1000_TX_DESC(*tx_ring, i);
+		struct sk_buff *skb;
+		unsigned int size = 1024;
+
+		if (!(skb = alloc_skb(size, GFP_KERNEL))) {
+			ret_val = 3;
+			goto err_nomem;
+		}
+		skb_put(skb, size);
+		tx_ring->buffer_info[i].skb = skb;
+		tx_ring->buffer_info[i].length = skb->len;
+		tx_ring->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, skb->len,
+				       PCI_DMA_TODEVICE);
+		tx_desc->buffer_addr = cpu_to_le64(tx_ring->buffer_info[i].dma);
+		tx_desc->lower.data = cpu_to_le32(skb->len);
+		tx_desc->lower.data |= cpu_to_le32(E1000_TXD_CMD_EOP |
+						   E1000_TXD_CMD_IFCS);
+		if (adapter->hw.mac.type < e1000_82543)
+			tx_desc->lower.data |= E1000_TXD_CMD_RPS;
+		else
+			tx_desc->lower.data |= E1000_TXD_CMD_RS;
+
+		tx_desc->upper.data = 0;
+	}
+
+	/* Setup Rx descriptor ring and Rx buffers */
+
+	if (!rx_ring->count)
+		rx_ring->count = E1000_DEFAULT_RXD;
+
+	if (!(rx_ring->buffer_info = kcalloc(rx_ring->count,
+	                                     sizeof(struct e1000_rx_buffer),
+	                                     GFP_KERNEL))) {
+		ret_val = 4;
+		goto err_nomem;
+	}
+
+	rx_ring->size = rx_ring->count * sizeof(struct e1000_rx_desc);
+	if (!(rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+	                                         &rx_ring->dma, GFP_ATOMIC))) {
+		ret_val = 5;
+		goto err_nomem;
+	}
+	rx_ring->next_to_use = rx_ring->next_to_clean = 0;
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	E1000_WRITE_REG(&adapter->hw, E1000_RDBAL(0),
+			((u64) rx_ring->dma & 0xFFFFFFFF));
+	E1000_WRITE_REG(&adapter->hw, E1000_RDBAH(0), ((u64) rx_ring->dma >> 32));
+	E1000_WRITE_REG(&adapter->hw, E1000_RDLEN(0), rx_ring->size);
+	E1000_WRITE_REG(&adapter->hw, E1000_RDH(0), 0);
+	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), 0);
+	rctl = E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_SZ_2048 |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+
+	for (i = 0; i < rx_ring->count; i++) {
+		struct e1000_rx_desc *rx_desc = E1000_RX_DESC(*rx_ring, i);
+		struct sk_buff *skb;
+
+		if (!(skb = alloc_skb(E1000_RXBUFFER_2048 + NET_IP_ALIGN,
+				GFP_KERNEL))) {
+			ret_val = 6;
+			goto err_nomem;
+		}
+		skb_reserve(skb, NET_IP_ALIGN);
+		rx_ring->buffer_info[i].skb = skb;
+		rx_ring->buffer_info[i].dma =
+			pci_map_single(pdev, skb->data, E1000_RXBUFFER_2048,
+				       PCI_DMA_FROMDEVICE);
+		rx_desc->buffer_addr = cpu_to_le64(rx_ring->buffer_info[i].dma);
+		memset(skb->data, 0x00, skb->len);
+	}
+
+	return 0;
+
+err_nomem:
+	e1000_free_desc_rings(adapter);
+	return ret_val;
+}
+
+static void e1000_phy_disable_receiver(struct e1000_adapter *adapter)
+{
+	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
+	e1000_write_phy_reg(&adapter->hw, 29, 0x001F);
+	e1000_write_phy_reg(&adapter->hw, 30, 0x8FFC);
+	e1000_write_phy_reg(&adapter->hw, 29, 0x001A);
+	e1000_write_phy_reg(&adapter->hw, 30, 0x8FF0);
+}
+
+static void e1000_phy_reset_clk_and_crs(struct e1000_adapter *adapter)
+{
+	u16 phy_reg;
+
+	/* Because we reset the PHY above, we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock.  This
+	 * value defaults back to a 2.5MHz clock when the PHY is reset.
+	 */
+	e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+	phy_reg |= M88E1000_EPSCR_TX_CLK_25;
+	e1000_write_phy_reg(&adapter->hw,
+		M88E1000_EXT_PHY_SPEC_CTRL, phy_reg);
+
+	/* In addition, because of the s/w reset above, we need to enable
+	 * CRS on TX.  This must be set for both full and half duplex
+	 * operation.
+	 */
+	e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+	phy_reg |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	e1000_write_phy_reg(&adapter->hw,
+		M88E1000_PHY_SPEC_CTRL, phy_reg);
+}
+
+static int e1000_nonintegrated_phy_loopback(struct e1000_adapter *adapter)
+{
+	u32 ctrl_reg;
+	u16 phy_reg;
+
+	/* Setup the Device Control Register for PHY loopback test. */
+
+	ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+	ctrl_reg |= (E1000_CTRL_ILOS |		/* Invert Loss-Of-Signal */
+		     E1000_CTRL_FRCSPD |	/* Set the Force Speed Bit */
+		     E1000_CTRL_FRCDPX |	/* Set the Force Duplex Bit */
+		     E1000_CTRL_SPD_1000 |	/* Force Speed to 1000 */
+		     E1000_CTRL_FD);		/* Force Duplex to FULL */
+
+	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl_reg);
+
+	/* Read the PHY Specific Control Register (0x10) */
+	e1000_read_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, &phy_reg);
+
+	/* Clear Auto-Crossover bits in PHY Specific Control Register
+	 * (bits 6:5).
+	 */
+	phy_reg &= ~M88E1000_PSCR_AUTO_X_MODE;
+	e1000_write_phy_reg(&adapter->hw, M88E1000_PHY_SPEC_CTRL, phy_reg);
+
+	/* Perform software reset on the PHY */
+	e1000_phy_commit(&adapter->hw);
+
+	/* Have to setup TX_CLK and TX_CRS after software reset */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x8100);
+
+	/* Wait for reset to complete. */
+	udelay(500);
+
+	/* Have to setup TX_CLK and TX_CRS after software reset */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	/* Write out to PHY registers 29 and 30 to disable the Receiver. */
+	e1000_phy_disable_receiver(adapter);
+
+	/* Set the loopback bit in the PHY control register. */
+	e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg);
+	phy_reg |= MII_CR_LOOPBACK;
+	e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_reg);
+
+	/* Setup TX_CLK and TX_CRS one more time. */
+	e1000_phy_reset_clk_and_crs(adapter);
+
+	/* Check Phy Configuration */
+	e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg);
+	if (phy_reg != 0x4100)
+		 return 9;
+
+	e1000_read_phy_reg(&adapter->hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_reg);
+	if (phy_reg != 0x0070)
+		return 10;
+
+	e1000_read_phy_reg(&adapter->hw, 29, &phy_reg);
+	if (phy_reg != 0x001A)
+		return 11;
+
+	return 0;
+}
+
+static int e1000_integrated_phy_loopback(struct e1000_adapter *adapter)
+{
+	u32 ctrl_reg = 0;
+	u32 stat_reg = 0;
+
+	adapter->hw.mac.autoneg = FALSE;
+
+	if (adapter->hw.phy.type == e1000_phy_m88) {
+		/* Auto-MDI/MDIX Off */
+		e1000_write_phy_reg(&adapter->hw,
+				    M88E1000_PHY_SPEC_CTRL, 0x0808);
+		/* reset to update Auto-MDI/MDIX */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x9140);
+		/* autoneg off */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x8140);
+	} else if (adapter->hw.phy.type == e1000_phy_gg82563)
+		e1000_write_phy_reg(&adapter->hw,
+		                    GG82563_PHY_KMRN_MODE_CTRL,
+		                    0x1CC);
+
+	ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+
+	if (adapter->hw.phy.type == e1000_phy_ife) {
+		/* force 100, set loopback */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x6100);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_100 |/* Force Speed to 100 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	} else {
+		/* force 1000, set loopback */
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, 0x4140);
+
+		/* Now set up the MAC to the same speed/duplex as the PHY. */
+		ctrl_reg = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl_reg &= ~E1000_CTRL_SPD_SEL; /* Clear the speed sel bits */
+		ctrl_reg |= (E1000_CTRL_FRCSPD | /* Set the Force Speed Bit */
+			     E1000_CTRL_FRCDPX | /* Set the Force Duplex Bit */
+			     E1000_CTRL_SPD_1000 |/* Force Speed to 1000 */
+			     E1000_CTRL_FD);	 /* Force Duplex to FULL */
+	}
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper &&
+	   adapter->hw.phy.type == e1000_phy_m88) {
+		ctrl_reg |= E1000_CTRL_ILOS; /* Invert Loss of Signal */
+	} else {
+		/* Set the ILOS bit on the fiber Nic if half duplex link is
+		 * detected. */
+		stat_reg = E1000_READ_REG(&adapter->hw, E1000_STATUS);
+		if ((stat_reg & E1000_STATUS_FD) == 0)
+			ctrl_reg |= (E1000_CTRL_ILOS | E1000_CTRL_SLU);
+	}
+
+	E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl_reg);
+
+	/* Disable the receiver on the PHY so when a cable is plugged in, the
+	 * PHY does not begin to autoneg when a cable is reconnected to the NIC.
+	 */
+	if (adapter->hw.phy.type == e1000_phy_m88)
+		e1000_phy_disable_receiver(adapter);
+
+	udelay(500);
+
+	return 0;
+}
+
+static int e1000_set_82571_fiber_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	int link = 0;
+
+	/* special requirements for 82571/82572 fiber adapters */
+
+	/* jump through hoops to make sure link is up because serdes
+	 * link is hardwired up */
+	ctrl |= E1000_CTRL_SLU;
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	/* disable autoneg */
+	ctrl = E1000_READ_REG(hw, E1000_TXCW);
+	ctrl &= ~(1 << 31);
+	E1000_WRITE_REG(hw, E1000_TXCW, ctrl);
+
+	link = (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU);
+
+	if (!link) {
+		/* set invert loss of signal */
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= E1000_CTRL_ILOS;
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	}
+
+	/* special write to serdes control register to enable SerDes analog
+	 * loopback */
+#define E1000_SERDES_LB_ON 0x410
+	E1000_WRITE_REG(hw, E1000_SCTL, E1000_SERDES_LB_ON);
+	msleep(10);
+
+	return 0;
+}
+
+static int e1000_set_phy_loopback(struct e1000_adapter *adapter)
+{
+	u16 phy_reg = 0;
+	u16 count = 0;
+
+	switch (adapter->hw.mac.type) {
+	case e1000_82543:
+		if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+			/* Attempt to setup Loopback mode on Non-integrated PHY.
+			 * Some PHY registers get corrupted at random, so
+			 * attempt this 10 times.
+			 */
+			while (e1000_nonintegrated_phy_loopback(adapter) &&
+			      count++ < 10);
+			if (count < 11)
+				return 0;
+		}
+		break;
+
+	case e1000_82544:
+	case e1000_82540:
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82541:
+	case e1000_82541_rev_2:
+	case e1000_82547:
+	case e1000_82547_rev_2:
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		return e1000_integrated_phy_loopback(adapter);
+		break;
+
+	default:
+		/* Default PHY loopback work is to read the MII
+		 * control register and assert bit 14 (loopback mode).
+		 */
+		e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_reg);
+		phy_reg |= MII_CR_LOOPBACK;
+		e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_reg);
+		return 0;
+		break;
+	}
+
+	return 8;
+}
+
+/* only call this for fiber/serdes connections to es2lan */
+static int e1000_set_es2lan_mac_loopback(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrlext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	u32 ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* save CTRL_EXT to restore later, reuse an empty variable (unused
+	   on mac_type 80003es2lan) */
+	adapter->tx_fifo_head = ctrlext;
+
+	/* clear the serdes mode bits, putting the device into mac loopback */
+	ctrlext &= ~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrlext);
+
+	/* force speed to 1000/FD, link up */
+	ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+	ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX |
+	         E1000_CTRL_SPD_1000 | E1000_CTRL_FD);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	/* set mac loopback */
+	ctrl = E1000_READ_REG(hw, E1000_RCTL);
+	ctrl |= E1000_RCTL_LBM_MAC;
+	E1000_WRITE_REG(hw, E1000_RCTL, ctrl);
+
+	/* set testing mode parameters (no need to reset later) */
+#define KMRNCTRLSTA_OPMODE (0x1F << 16)
+#define KMRNCTRLSTA_OPMODE_1GB_FD_GMII 0x0582
+	E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA,
+		(KMRNCTRLSTA_OPMODE | KMRNCTRLSTA_OPMODE_1GB_FD_GMII));
+
+	return 0;
+}
+
+static int e1000_setup_loopback_test(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	if (hw->phy.media_type == e1000_media_type_fiber ||
+	    hw->phy.media_type == e1000_media_type_internal_serdes) {
+		switch (hw->mac.type) {
+		case e1000_80003es2lan:
+			return e1000_set_es2lan_mac_loopback(adapter);
+			break;
+		case e1000_82545:
+		case e1000_82546:
+		case e1000_82545_rev_3:
+		case e1000_82546_rev_3:
+			return e1000_set_phy_loopback(adapter);
+			break;
+		case e1000_82571:
+		case e1000_82572:
+			return e1000_set_82571_fiber_loopback(adapter);
+			break;
+		default:
+			rctl = E1000_READ_REG(hw, E1000_RCTL);
+			rctl |= E1000_RCTL_LBM_TCVR;
+			E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+			return 0;
+		}
+	} else if (hw->phy.media_type == e1000_media_type_copper)
+		return e1000_set_phy_loopback(adapter);
+
+	return 7;
+}
+
+static void e1000_loopback_cleanup(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+	u16 phy_reg;
+
+	rctl = E1000_READ_REG(hw, E1000_RCTL);
+	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+
+	switch (hw->mac.type) {
+	case e1000_80003es2lan:
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes) {
+			/* restore CTRL_EXT, stealing space from tx_fifo_head */
+			E1000_WRITE_REG(hw, E1000_CTRL_EXT, adapter->tx_fifo_head);
+			adapter->tx_fifo_head = 0;
+		}
+		fallthrough;
+	case e1000_82571:
+	case e1000_82572:
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes) {
+#define E1000_SERDES_LB_OFF 0x400
+			E1000_WRITE_REG(hw, E1000_SCTL, E1000_SERDES_LB_OFF);
+			msleep(10);
+			break;
+		}
+		fallthrough;
+	case e1000_82545:
+	case e1000_82546:
+	case e1000_82545_rev_3:
+	case e1000_82546_rev_3:
+	default:
+		hw->mac.autoneg = TRUE;
+		if (hw->phy.type == e1000_phy_gg82563)
+			e1000_write_phy_reg(hw,
+					    GG82563_PHY_KMRN_MODE_CTRL,
+					    0x180);
+		e1000_read_phy_reg(hw, PHY_CONTROL, &phy_reg);
+		if (phy_reg & MII_CR_LOOPBACK) {
+			phy_reg &= ~MII_CR_LOOPBACK;
+			e1000_write_phy_reg(hw, PHY_CONTROL, phy_reg);
+			e1000_phy_commit(hw);
+		}
+		break;
+	}
+}
+
+static void e1000_create_lbtest_frame(struct sk_buff *skb,
+                                      unsigned int frame_size)
+{
+	memset(skb->data, 0xFF, frame_size);
+	frame_size &= ~1;
+	memset(&skb->data[frame_size / 2], 0xAA, frame_size / 2 - 1);
+	memset(&skb->data[frame_size / 2 + 10], 0xBE, 1);
+	memset(&skb->data[frame_size / 2 + 12], 0xAF, 1);
+}
+
+static int e1000_check_lbtest_frame(struct sk_buff *skb, unsigned int frame_size)
+{
+	frame_size &= ~1;
+	if (*(skb->data + 3) == 0xFF) {
+		if ((*(skb->data + frame_size / 2 + 10) == 0xBE) &&
+		   (*(skb->data + frame_size / 2 + 12) == 0xAF)) {
+			return 0;
+		}
+	}
+	return 13;
+}
+
+static int e1000_run_loopback_test(struct e1000_adapter *adapter)
+{
+	struct e1000_tx_ring *tx_ring = &adapter->test_tx_ring;
+	struct e1000_rx_ring *rx_ring = &adapter->test_rx_ring;
+	struct pci_dev *pdev = adapter->pdev;
+	int i, j, k, l, lc, good_cnt, ret_val=0;
+	unsigned long time;
+
+	E1000_WRITE_REG(&adapter->hw, E1000_RDT(0), rx_ring->count - 1);
+
+	/* Calculate the loop count based on the largest descriptor ring
+	 * The idea is to wrap the largest ring a number of times using 64
+	 * send/receive pairs during each loop
+	 */
+
+	if (rx_ring->count <= tx_ring->count)
+		lc = ((tx_ring->count / 64) * 2) + 1;
+	else
+		lc = ((rx_ring->count / 64) * 2) + 1;
+
+	k = l = 0;
+	for (j = 0; j <= lc; j++) { /* loop count loop */
+		for (i = 0; i < 64; i++) { /* send the packets */
+			e1000_create_lbtest_frame(tx_ring->buffer_info[k].skb,
+					1024);
+			pci_dma_sync_single_for_device(pdev,
+					tx_ring->buffer_info[k].dma,
+				    	tx_ring->buffer_info[k].length,
+				    	PCI_DMA_TODEVICE);
+			if (unlikely(++k == tx_ring->count)) k = 0;
+		}
+		E1000_WRITE_REG(&adapter->hw, E1000_TDT(0), k);
+		msleep(200);
+		time = jiffies; /* set the start time for the receive */
+		good_cnt = 0;
+		do { /* receive the sent packets */
+			pci_dma_sync_single_for_cpu(pdev,
+			                rx_ring->buffer_info[l].dma,
+			                E1000_RXBUFFER_2048,
+			                PCI_DMA_FROMDEVICE);
+
+			ret_val = e1000_check_lbtest_frame(
+					rx_ring->buffer_info[l].skb,
+				   	1024);
+			if (!ret_val)
+				good_cnt++;
+			if (unlikely(++l == rx_ring->count)) l = 0;
+			/* time + 20 msecs (200 msecs on 2.4) is more than
+			 * enough time to complete the receives, if it's
+			 * exceeded, break and error off
+			 */
+		} while (good_cnt < 64 && jiffies < (time + 20));
+		if (good_cnt != 64) {
+			ret_val = 13; /* ret_val is the same as mis-compare */
+			break;
+		}
+		if (jiffies >= (time + 20)) {
+			ret_val = 14; /* error code for time out error */
+			break;
+		}
+	} /* end loop count loop */
+	return ret_val;
+}
+
+static int e1000_loopback_test(struct e1000_adapter *adapter, u64 *data)
+{
+	/* PHY loopback cannot be performed if SoL/IDER
+	 * sessions are active */
+	if (e1000_check_reset_block(&adapter->hw)) {
+		DPRINTK(DRV, ERR, "Cannot do PHY loopback test "
+		        "when SoL/IDER is active.\n");
+		*data = 0;
+		goto out;
+	}
+
+	if ((*data = e1000_setup_desc_rings(adapter)))
+		goto out;
+	if ((*data = e1000_setup_loopback_test(adapter)))
+		goto err_loopback;
+	*data = e1000_run_loopback_test(adapter);
+	e1000_loopback_cleanup(adapter);
+
+err_loopback:
+	e1000_free_desc_rings(adapter);
+out:
+	return *data;
+}
+
+static int e1000_link_test(struct e1000_adapter *adapter, u64 *data)
+{
+	*data = 0;
+	if (adapter->hw.phy.media_type == e1000_media_type_internal_serdes) {
+		int i = 0;
+		adapter->hw.mac.serdes_has_link = FALSE;
+
+		/* On some blade server designs, link establishment
+		 * could take as long as 2-3 minutes */
+		do {
+			e1000_check_for_link(&adapter->hw);
+			if (adapter->hw.mac.serdes_has_link == TRUE)
+				return *data;
+			msleep(20);
+		} while (i++ < 3750);
+
+		*data = 1;
+	} else {
+		e1000_check_for_link(&adapter->hw);
+		if (adapter->hw.mac.autoneg)
+			msleep(4000);
+
+		if (!(E1000_READ_REG(&adapter->hw, E1000_STATUS) & E1000_STATUS_LU)) {
+			*data = 1;
+		}
+	}
+	return *data;
+}
+
+static int e1000_diag_test_count(struct net_device *netdev)
+{
+	return E1000_TEST_LEN;
+}
+
+static void e1000_diag_test(struct net_device *netdev,
+                            struct ethtool_test *eth_test, u64 *data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u16 autoneg_advertised;
+	u8 forced_speed_duplex, autoneg;
+	bool if_running = netif_running(netdev);
+
+	set_bit(__E1000_TESTING, &adapter->state);
+	if (eth_test->flags == ETH_TEST_FL_OFFLINE) {
+		/* Offline tests */
+
+		/* save speed, duplex, autoneg settings */
+		autoneg_advertised = adapter->hw.phy.autoneg_advertised;
+		forced_speed_duplex = adapter->hw.mac.forced_speed_duplex;
+		autoneg = adapter->hw.mac.autoneg;
+
+		DPRINTK(HW, INFO, "offline testing starting\n");
+
+		/* Link test performed before hardware reset so autoneg doesn't
+		 * interfere with test result */
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		if (if_running)
+			/* indicate we're in test mode */
+			dev_close(netdev);
+		else
+			e1000_reset(adapter);
+
+		if (e1000_reg_test(adapter, &data[0]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		if (e1000_eeprom_test(adapter, &data[1]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		if (e1000_intr_test(adapter, &data[2]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		e1000_reset(adapter);
+		/* make sure the phy is powered up */
+		if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+			e1000_power_up_phy(&adapter->hw);
+			e1000_setup_link(&adapter->hw);
+		}
+		if (e1000_loopback_test(adapter, &data[3]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* restore speed, duplex, autoneg settings */
+		adapter->hw.phy.autoneg_advertised = autoneg_advertised;
+		adapter->hw.mac.forced_speed_duplex = forced_speed_duplex;
+		adapter->hw.mac.autoneg = autoneg;
+
+		/* force this routine to wait until autoneg complete/timeout */
+		adapter->hw.phy.autoneg_wait_to_complete = TRUE;
+		e1000_reset(adapter);
+		adapter->hw.phy.autoneg_wait_to_complete = FALSE;
+
+		clear_bit(__E1000_TESTING, &adapter->state);
+		if (if_running)
+			dev_open(netdev);
+	} else {
+		DPRINTK(HW, INFO, "online testing starting\n");
+		/* Online tests */
+		if (e1000_link_test(adapter, &data[4]))
+			eth_test->flags |= ETH_TEST_FL_FAILED;
+
+		/* Online tests aren't run; pass by default */
+		data[0] = 0;
+		data[1] = 0;
+		data[2] = 0;
+		data[3] = 0;
+
+		clear_bit(__E1000_TESTING, &adapter->state);
+	}
+	msleep_interruptible(4 * 1000);
+}
+
+static int e1000_wol_exclusion(struct e1000_adapter *adapter,
+                               struct ethtool_wolinfo *wol)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int retval = 1; /* fail by default */
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82542:
+	case E1000_DEV_ID_82543GC_FIBER:
+	case E1000_DEV_ID_82543GC_COPPER:
+	case E1000_DEV_ID_82544EI_FIBER:
+	case E1000_DEV_ID_82546EB_QUAD_COPPER:
+	case E1000_DEV_ID_82545EM_FIBER:
+	case E1000_DEV_ID_82545EM_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+		/* these don't support WoL at all */
+		wol->supported = 0;
+		break;
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82571EB_FIBER:
+	case E1000_DEV_ID_82571EB_SERDES:
+	case E1000_DEV_ID_82571EB_COPPER:
+		/* Wake events not supported on port B */
+		if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1) {
+			wol->supported = 0;
+			break;
+		}
+		/* return success for non excluded adapter ports */
+		retval = 0;
+		break;
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		/* quad port adapters only support WoL on port A */
+		if (!(adapter->flags & E1000_FLAG_QUAD_PORT_A)) {
+			wol->supported = 0;
+			break;
+		}
+		/* return success for non excluded adapter ports */
+		retval = 0;
+		break;
+	default:
+		/* dual port cards only support WoL on port A from now on
+		 * unless it was enabled in the eeprom for port B
+		 * so exclude FUNC_1 ports from having WoL enabled */
+		if (E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_FUNC_1 &&
+		    !adapter->eeprom_wol) {
+			wol->supported = 0;
+			break;
+		}
+
+		retval = 0;
+	}
+
+	return retval;
+}
+
+static void e1000_get_wol(struct net_device *netdev,
+                          struct ethtool_wolinfo *wol)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	wol->supported = WAKE_UCAST | WAKE_MCAST |
+	                 WAKE_BCAST | WAKE_MAGIC;
+	wol->wolopts = 0;
+
+	/* this function will set ->supported = 0 and return 1 if wol is not
+	 * supported by this hardware */
+	if (e1000_wol_exclusion(adapter, wol))
+		return;
+
+	/* apply any specific unsupported masks here */
+	switch (adapter->hw.device_id) {
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		/* KSP3 does not support UCAST wake-ups */
+		wol->supported &= ~WAKE_UCAST;
+
+		if (adapter->wol & E1000_WUFC_EX)
+			DPRINTK(DRV, ERR, "Interface does not support "
+		        "directed (unicast) frame wake-up packets\n");
+		break;
+	default:
+		break;
+	}
+
+	if (adapter->wol & E1000_WUFC_EX)
+		wol->wolopts |= WAKE_UCAST;
+	if (adapter->wol & E1000_WUFC_MC)
+		wol->wolopts |= WAKE_MCAST;
+	if (adapter->wol & E1000_WUFC_BC)
+		wol->wolopts |= WAKE_BCAST;
+	if (adapter->wol & E1000_WUFC_MAG)
+		wol->wolopts |= WAKE_MAGIC;
+
+	return;
+}
+
+static int e1000_set_wol(struct net_device *netdev,
+                         struct ethtool_wolinfo *wol)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (wol->wolopts & (WAKE_PHY | WAKE_ARP | WAKE_MAGICSECURE))
+		return -EOPNOTSUPP;
+
+	if (e1000_wol_exclusion(adapter, wol))
+		return wol->wolopts ? -EOPNOTSUPP : 0;
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+		if (wol->wolopts & WAKE_UCAST) {
+			DPRINTK(DRV, ERR, "Interface does not support "
+		        "directed (unicast) frame wake-up packets\n");
+			return -EOPNOTSUPP;
+		}
+		break;
+	default:
+		break;
+	}
+
+	/* these settings will always override what we currently have */
+	adapter->wol = 0;
+
+	if (wol->wolopts & WAKE_UCAST)
+		adapter->wol |= E1000_WUFC_EX;
+	if (wol->wolopts & WAKE_MCAST)
+		adapter->wol |= E1000_WUFC_MC;
+	if (wol->wolopts & WAKE_BCAST)
+		adapter->wol |= E1000_WUFC_BC;
+	if (wol->wolopts & WAKE_MAGIC)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	return 0;
+}
+
+/* toggle LED 4 times per second = 2 "blinks" per second */
+#define E1000_ID_INTERVAL	(HZ/4)
+
+/* bit defines for adapter->led_status */
+#define E1000_LED_ON		0
+
+static void e1000_led_blink_callback(unsigned long data)
+{
+	struct e1000_adapter *adapter = (struct e1000_adapter *) data;
+
+	if (test_and_change_bit(E1000_LED_ON, &adapter->led_status))
+		e1000_led_off(&adapter->hw);
+	else
+		e1000_led_on(&adapter->hw);
+
+	mod_timer(&adapter->blink_timer, jiffies + E1000_ID_INTERVAL);
+}
+
+static int e1000_phys_id(struct net_device *netdev, u32 data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if (!data)
+		data = INT_MAX;
+
+	if (adapter->hw.mac.type < e1000_82571) {
+		if (!adapter->blink_timer.function) {
+			init_timer(&adapter->blink_timer);
+			adapter->blink_timer.function = e1000_led_blink_callback;
+			adapter->blink_timer.data = (unsigned long) adapter;
+		}
+		e1000_setup_led(&adapter->hw);
+		mod_timer(&adapter->blink_timer, jiffies);
+		msleep_interruptible(data * 1000);
+		del_timer_sync(&adapter->blink_timer);
+	} else if (adapter->hw.phy.type == e1000_phy_ife) {
+		if (!adapter->blink_timer.function) {
+			init_timer(&adapter->blink_timer);
+			adapter->blink_timer.function = e1000_led_blink_callback;
+			adapter->blink_timer.data = (unsigned long) adapter;
+		}
+		mod_timer(&adapter->blink_timer, jiffies);
+		msleep_interruptible(data * 1000);
+		del_timer_sync(&adapter->blink_timer);
+		e1000_write_phy_reg(&(adapter->hw), IFE_PHY_SPECIAL_CONTROL_LED, 0);
+	} else {
+		e1000_blink_led(&adapter->hw);
+		msleep_interruptible(data * 1000);
+	}
+
+	e1000_led_off(&adapter->hw);
+	clear_bit(E1000_LED_ON, &adapter->led_status);
+	e1000_cleanup_led(&adapter->hw);
+
+	return 0;
+}
+
+static int e1000_get_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *ec)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if (adapter->itr_setting <= 3)
+		ec->rx_coalesce_usecs = adapter->itr_setting;
+	else
+		ec->rx_coalesce_usecs = 1000000 / adapter->itr_setting;
+
+	return 0;
+}
+
+static int e1000_set_coalesce(struct net_device *netdev,
+			      struct ethtool_coalesce *ec)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	if ((ec->rx_coalesce_usecs > E1000_MAX_ITR_USECS) ||
+	    ((ec->rx_coalesce_usecs > 3) &&
+	     (ec->rx_coalesce_usecs < E1000_MIN_ITR_USECS)) ||
+	    (ec->rx_coalesce_usecs == 2))
+		return -EINVAL;
+
+	if (!(adapter->flags & E1000_FLAG_HAS_INTR_MODERATION))
+		return -ENOTSUPP;
+
+	if (ec->rx_coalesce_usecs <= 3) {
+		adapter->itr = 20000;
+		adapter->itr_setting = ec->rx_coalesce_usecs;
+	} else {
+		adapter->itr = (1000000 / ec->rx_coalesce_usecs);
+		adapter->itr_setting = adapter->itr & ~3;
+	}
+
+	if (adapter->itr_setting != 0)
+		E1000_WRITE_REG(&adapter->hw, E1000_ITR,
+			1000000000 / (adapter->itr * 256));
+	else
+		E1000_WRITE_REG(&adapter->hw, E1000_ITR, 0);
+
+	return 0;
+}
+
+static int e1000_nway_reset(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	if (netif_running(netdev))
+		e1000_reinit_locked(adapter);
+	return 0;
+}
+
+static int e1000_get_stats_count(struct net_device *netdev)
+{
+	return E1000_STATS_LEN;
+}
+
+static void e1000_get_ethtool_stats(struct net_device *netdev,
+                                    struct ethtool_stats *stats, u64 *data)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+#ifdef CONFIG_E1000_MQ
+	u64 *queue_stat;
+	int stat_count = sizeof(struct e1000_queue_stats) / sizeof(u64);
+	int j, k;
+#endif
+	int i;
+
+	e1000_update_stats(adapter);
+	for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+		char *p = (char *)adapter+e1000_gstrings_stats[i].stat_offset;
+		data[i] = (e1000_gstrings_stats[i].sizeof_stat ==
+			sizeof(u64)) ? *(u64 *)p : *(u32 *)p;
+	}
+#ifdef CONFIG_E1000_MQ
+	if (adapter->num_tx_queues > 1) {
+		for (j = 0; j < adapter->num_tx_queues; j++) {
+			queue_stat = (u64 *)&adapter->tx_ring[j].tx_stats;
+			for (k = 0; k < stat_count; k++)
+				data[i + k] = queue_stat[k];
+			i += k;
+		}
+	}
+	if (adapter->num_rx_queues > 1) {
+		for (j = 0; j < adapter->num_rx_queues; j++) {
+			queue_stat = (u64 *)&adapter->rx_ring[j].rx_stats;
+			for (k = 0; k < stat_count; k++)
+				data[i + k] = queue_stat[k];
+			i += k;
+		}
+	}
+#endif
+/*	BUG_ON(i != E1000_STATS_LEN); */
+}
+
+static void e1000_get_strings(struct net_device *netdev, u32 stringset,
+                              u8 *data)
+{
+#ifdef CONFIG_E1000_MQ
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+#endif
+	u8 *p = data;
+	int i;
+
+	switch (stringset) {
+	case ETH_SS_TEST:
+		memcpy(data, *e1000_gstrings_test,
+			E1000_TEST_LEN*ETH_GSTRING_LEN);
+		break;
+	case ETH_SS_STATS:
+		for (i = 0; i < E1000_GLOBAL_STATS_LEN; i++) {
+			memcpy(p, e1000_gstrings_stats[i].stat_string,
+			       ETH_GSTRING_LEN);
+			p += ETH_GSTRING_LEN;
+		}
+#ifdef CONFIG_E1000_MQ
+		if (adapter->num_tx_queues > 1) {
+			for (i = 0; i < adapter->num_tx_queues; i++) {
+				sprintf(p, "tx_queue_%u_packets", i);
+				p += ETH_GSTRING_LEN;
+				sprintf(p, "tx_queue_%u_bytes", i);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+		if (adapter->num_rx_queues > 1) {
+			for (i = 0; i < adapter->num_rx_queues; i++) {
+				sprintf(p, "rx_queue_%u_packets", i);
+				p += ETH_GSTRING_LEN;
+				sprintf(p, "rx_queue_%u_bytes", i);
+				p += ETH_GSTRING_LEN;
+			}
+		}
+#endif
+/*		BUG_ON(p - data != E1000_STATS_LEN * ETH_GSTRING_LEN); */
+		break;
+	}
+}
+
+static struct ethtool_ops e1000_ethtool_ops = {
+	.get_settings           = e1000_get_settings,
+	.set_settings           = e1000_set_settings,
+	.get_drvinfo            = e1000_get_drvinfo,
+	.get_regs_len           = e1000_get_regs_len,
+	.get_regs               = e1000_get_regs,
+	.get_wol                = e1000_get_wol,
+	.set_wol                = e1000_set_wol,
+	.get_msglevel           = e1000_get_msglevel,
+	.set_msglevel           = e1000_set_msglevel,
+	.nway_reset             = e1000_nway_reset,
+	.get_link               = ethtool_op_get_link,
+	.get_eeprom_len         = e1000_get_eeprom_len,
+	.get_eeprom             = e1000_get_eeprom,
+	.set_eeprom             = e1000_set_eeprom,
+	.get_ringparam          = e1000_get_ringparam,
+	.set_ringparam          = e1000_set_ringparam,
+	.get_pauseparam         = e1000_get_pauseparam,
+	.set_pauseparam         = e1000_set_pauseparam,
+	.get_rx_csum            = e1000_get_rx_csum,
+	.set_rx_csum            = e1000_set_rx_csum,
+	.get_tx_csum            = e1000_get_tx_csum,
+	.set_tx_csum            = e1000_set_tx_csum,
+	.get_sg                 = ethtool_op_get_sg,
+	.set_sg                 = ethtool_op_set_sg,
+#ifdef NETIF_F_TSO
+	.get_tso                = ethtool_op_get_tso,
+	.set_tso                = e1000_set_tso,
+#endif
+	.self_test_count        = e1000_diag_test_count,
+	.self_test              = e1000_diag_test,
+	.get_strings            = e1000_get_strings,
+	.phys_id                = e1000_phys_id,
+	.get_stats_count        = e1000_get_stats_count,
+	.get_ethtool_stats      = e1000_get_ethtool_stats,
+#ifdef ETHTOOL_GPERMADDR
+	.get_perm_addr          = ethtool_op_get_perm_addr,
+#endif
+	.get_coalesce           = e1000_get_coalesce,
+	.set_coalesce           = e1000_set_coalesce,
+};
+
+void e1000_set_ethtool_ops(struct net_device *netdev)
+{
+	SET_ETHTOOL_OPS(netdev, &e1000_ethtool_ops);
+}
+#endif	/* SIOCETHTOOL */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h
new file mode 100644
index 0000000..9a94200
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_hw.h
@@ -0,0 +1,711 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include "e1000_osdep.h"
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82542                    0x1000
+#define E1000_DEV_ID_82543GC_FIBER            0x1001
+#define E1000_DEV_ID_82543GC_COPPER           0x1004
+#define E1000_DEV_ID_82544EI_COPPER           0x1008
+#define E1000_DEV_ID_82544EI_FIBER            0x1009
+#define E1000_DEV_ID_82544GC_COPPER           0x100C
+#define E1000_DEV_ID_82544GC_LOM              0x100D
+#define E1000_DEV_ID_82540EM                  0x100E
+#define E1000_DEV_ID_82540EM_LOM              0x1015
+#define E1000_DEV_ID_82540EP_LOM              0x1016
+#define E1000_DEV_ID_82540EP                  0x1017
+#define E1000_DEV_ID_82540EP_LP               0x101E
+#define E1000_DEV_ID_82545EM_COPPER           0x100F
+#define E1000_DEV_ID_82545EM_FIBER            0x1011
+#define E1000_DEV_ID_82545GM_COPPER           0x1026
+#define E1000_DEV_ID_82545GM_FIBER            0x1027
+#define E1000_DEV_ID_82545GM_SERDES           0x1028
+#define E1000_DEV_ID_82546EB_COPPER           0x1010
+#define E1000_DEV_ID_82546EB_FIBER            0x1012
+#define E1000_DEV_ID_82546EB_QUAD_COPPER      0x101D
+#define E1000_DEV_ID_82546GB_COPPER           0x1079
+#define E1000_DEV_ID_82546GB_FIBER            0x107A
+#define E1000_DEV_ID_82546GB_SERDES           0x107B
+#define E1000_DEV_ID_82546GB_PCIE             0x108A
+#define E1000_DEV_ID_82546GB_QUAD_COPPER      0x1099
+#define E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3 0x10B5
+#define E1000_DEV_ID_82541EI                  0x1013
+#define E1000_DEV_ID_82541EI_MOBILE           0x1018
+#define E1000_DEV_ID_82541ER_LOM              0x1014
+#define E1000_DEV_ID_82541ER                  0x1078
+#define E1000_DEV_ID_82541GI                  0x1076
+#define E1000_DEV_ID_82541GI_LF               0x107C
+#define E1000_DEV_ID_82541GI_MOBILE           0x1077
+#define E1000_DEV_ID_82547EI                  0x1019
+#define E1000_DEV_ID_82547EI_MOBILE           0x101A
+#define E1000_DEV_ID_82547GI                  0x1075
+#define E1000_DEV_ID_82571EB_COPPER           0x105E
+#define E1000_DEV_ID_82571EB_FIBER            0x105F
+#define E1000_DEV_ID_82571EB_SERDES           0x1060
+#define E1000_DEV_ID_82571EB_SERDES_DUAL      0x10D9
+#define E1000_DEV_ID_82571EB_SERDES_QUAD      0x10DA
+#define E1000_DEV_ID_82571EB_QUAD_COPPER      0x10A4
+#define E1000_DEV_ID_82571PT_QUAD_COPPER      0x10D5
+#define E1000_DEV_ID_82571EB_QUAD_FIBER       0x10A5
+#define E1000_DEV_ID_82571EB_QUAD_COPPER_LP   0x10BC
+#define E1000_DEV_ID_82572EI_COPPER           0x107D
+#define E1000_DEV_ID_82572EI_FIBER            0x107E
+#define E1000_DEV_ID_82572EI_SERDES           0x107F
+#define E1000_DEV_ID_82572EI                  0x10B9
+#define E1000_DEV_ID_82573E                   0x108B
+#define E1000_DEV_ID_82573E_IAMT              0x108C
+#define E1000_DEV_ID_82573L                   0x109A
+#define E1000_DEV_ID_80003ES2LAN_COPPER_DPT   0x1096
+#define E1000_DEV_ID_80003ES2LAN_SERDES_DPT   0x1098
+#define E1000_DEV_ID_80003ES2LAN_COPPER_SPT   0x10BA
+#define E1000_DEV_ID_80003ES2LAN_SERDES_SPT   0x10BB
+#define E1000_DEV_ID_ICH8_IGP_M_AMT           0x1049
+#define E1000_DEV_ID_ICH8_IGP_AMT             0x104A
+#define E1000_DEV_ID_ICH8_IGP_C               0x104B
+#define E1000_DEV_ID_ICH8_IFE                 0x104C
+#define E1000_DEV_ID_ICH8_IFE_GT              0x10C4
+#define E1000_DEV_ID_ICH8_IFE_G               0x10C5
+#define E1000_DEV_ID_ICH8_IGP_M               0x104D
+#define E1000_DEV_ID_ICH9_IGP_AMT             0x10BD
+#define E1000_DEV_ID_ICH9_IGP_C               0x294C
+#define E1000_DEV_ID_ICH9_IFE                 0x10C0
+#define E1000_DEV_ID_ICH9_IFE_GT              0x10C3
+#define E1000_DEV_ID_ICH9_IFE_G               0x10C2
+
+#define E1000_REVISION_0 0
+#define E1000_REVISION_1 1
+#define E1000_REVISION_2 2
+#define E1000_REVISION_3 3
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0     0
+#define E1000_FUNC_1     1
+
+typedef enum {
+	e1000_undefined = 0,
+	e1000_82542,
+	e1000_82543,
+	e1000_82544,
+	e1000_82540,
+	e1000_82545,
+	e1000_82545_rev_3,
+	e1000_82546,
+	e1000_82546_rev_3,
+	e1000_82541,
+	e1000_82541_rev_2,
+	e1000_82547,
+	e1000_82547_rev_2,
+	e1000_82571,
+	e1000_82572,
+	e1000_82573,
+	e1000_80003es2lan,
+	e1000_ich8lan,
+	e1000_ich9lan,
+	e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */
+} e1000_mac_type;
+
+typedef enum {
+	e1000_media_type_unknown = 0,
+	e1000_media_type_copper = 1,
+	e1000_media_type_fiber = 2,
+	e1000_media_type_internal_serdes = 3,
+	e1000_num_media_types
+} e1000_media_type;
+
+typedef enum {
+	e1000_nvm_unknown = 0,
+	e1000_nvm_none,
+	e1000_nvm_eeprom_spi,
+	e1000_nvm_eeprom_microwire,
+	e1000_nvm_flash_hw,
+	e1000_nvm_flash_sw
+} e1000_nvm_type;
+
+typedef enum {
+	e1000_nvm_override_none = 0,
+	e1000_nvm_override_spi_small,
+	e1000_nvm_override_spi_large,
+	e1000_nvm_override_microwire_small,
+	e1000_nvm_override_microwire_large
+} e1000_nvm_override;
+
+typedef enum {
+	e1000_phy_unknown = 0,
+	e1000_phy_none,
+	e1000_phy_m88,
+	e1000_phy_igp,
+	e1000_phy_igp_2,
+	e1000_phy_gg82563,
+	e1000_phy_igp_3,
+	e1000_phy_ife,
+} e1000_phy_type;
+
+typedef enum {
+	e1000_bus_type_unknown = 0,
+	e1000_bus_type_pci,
+	e1000_bus_type_pcix,
+	e1000_bus_type_pci_express,
+	e1000_bus_type_reserved
+} e1000_bus_type;
+
+typedef enum {
+	e1000_bus_speed_unknown = 0,
+	e1000_bus_speed_33,
+	e1000_bus_speed_66,
+	e1000_bus_speed_100,
+	e1000_bus_speed_120,
+	e1000_bus_speed_133,
+	e1000_bus_speed_2500,
+	e1000_bus_speed_5000,
+	e1000_bus_speed_reserved
+} e1000_bus_speed;
+
+typedef enum {
+	e1000_bus_width_unknown = 0,
+	e1000_bus_width_pcie_x1,
+	e1000_bus_width_pcie_x2,
+	e1000_bus_width_pcie_x4 = 4,
+	e1000_bus_width_pcie_x8 = 8,
+	e1000_bus_width_32,
+	e1000_bus_width_64,
+	e1000_bus_width_reserved
+} e1000_bus_width;
+
+typedef enum {
+	e1000_1000t_rx_status_not_ok = 0,
+	e1000_1000t_rx_status_ok,
+	e1000_1000t_rx_status_undefined = 0xFF
+} e1000_1000t_rx_status;
+
+typedef enum {
+	e1000_rev_polarity_normal = 0,
+	e1000_rev_polarity_reversed,
+	e1000_rev_polarity_undefined = 0xFF
+} e1000_rev_polarity;
+
+typedef enum {
+	e1000_fc_none = 0,
+	e1000_fc_rx_pause,
+	e1000_fc_tx_pause,
+	e1000_fc_full,
+	e1000_fc_default = 0xFF
+} e1000_fc_type;
+
+typedef enum {
+	e1000_ffe_config_enabled = 0,
+	e1000_ffe_config_active,
+	e1000_ffe_config_blocked
+} e1000_ffe_config;
+
+typedef enum {
+	e1000_dsp_config_disabled = 0,
+	e1000_dsp_config_enabled,
+	e1000_dsp_config_activated,
+	e1000_dsp_config_undefined = 0xFF
+} e1000_dsp_config;
+
+/* Receive Descriptor */
+struct e1000_rx_desc {
+	u64 buffer_addr; /* Address of the descriptor's data buffer */
+	u16 length;      /* Length of data DMAed into data buffer */
+	u16 csum;        /* Packet checksum */
+	u8  status;      /* Descriptor status */
+	u8  errors;      /* Descriptor Errors */
+	u16 special;
+};
+
+/* Receive Descriptor - Extended */
+union e1000_rx_desc_extended {
+	struct {
+		u64 buffer_addr;
+		u64 reserved;
+	} read;
+	struct {
+		struct {
+			u32 mrq;              /* Multiple Rx Queues */
+			union {
+				u32 rss;            /* RSS Hash */
+				struct {
+					u16 ip_id;  /* IP id */
+					u16 csum;   /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			u32 status_error;     /* ext status/error */
+			u16 length;
+			u16 vlan;             /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+#define MAX_PS_BUFFERS 4
+/* Receive Descriptor - Packet Split */
+union e1000_rx_desc_packet_split {
+	struct {
+		/* one buffer for protocol header(s), three data buffers */
+		u64 buffer_addr[MAX_PS_BUFFERS];
+	} read;
+	struct {
+		struct {
+			u32 mrq;              /* Multiple Rx Queues */
+			union {
+				u32 rss;              /* RSS Hash */
+				struct {
+					u16 ip_id;    /* IP id */
+					u16 csum;     /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			u32 status_error;     /* ext status/error */
+			u16 length0;          /* length of buffer 0 */
+			u16 vlan;             /* VLAN tag */
+		} middle;
+		struct {
+			u16 header_status;
+			u16 length[3];        /* length of buffers 1-3 */
+		} upper;
+		u64 reserved;
+	} wb; /* writeback */
+};
+
+/* Transmit Descriptor */
+struct e1000_tx_desc {
+	u64 buffer_addr;      /* Address of the descriptor's data buffer */
+	union {
+		u32 data;
+		struct {
+			u16 length;    /* Data buffer length */
+			u8 cso;        /* Checksum offset */
+			u8 cmd;        /* Descriptor control */
+		} flags;
+	} lower;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 css;        /* Checksum start */
+			u16 special;
+		} fields;
+	} upper;
+};
+
+/* Offload Context Descriptor */
+struct e1000_context_desc {
+	union {
+		u32 ip_config;
+		struct {
+			u8 ipcss;      /* IP checksum start */
+			u8 ipcso;      /* IP checksum offset */
+			u16 ipcse;     /* IP checksum end */
+		} ip_fields;
+	} lower_setup;
+	union {
+		u32 tcp_config;
+		struct {
+			u8 tucss;      /* TCP checksum start */
+			u8 tucso;      /* TCP checksum offset */
+			u16 tucse;     /* TCP checksum end */
+		} tcp_fields;
+	} upper_setup;
+	u32 cmd_and_length;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 hdr_len;    /* Header length */
+			u16 mss;       /* Maximum segment size */
+		} fields;
+	} tcp_seg_setup;
+};
+
+/* Offload data descriptor */
+struct e1000_data_desc {
+	u64 buffer_addr;   /* Address of the descriptor's buffer address */
+	union {
+		u32 data;
+		struct {
+			u16 length;    /* Data buffer length */
+			u8 typ_len_ext;
+			u8 cmd;
+		} flags;
+	} lower;
+	union {
+		u32 data;
+		struct {
+			u8 status;     /* Descriptor status */
+			u8 popts;      /* Packet Options */
+			u16 special;
+		} fields;
+	} upper;
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+	u64 crcerrs;
+	u64 algnerrc;
+	u64 symerrs;
+	u64 rxerrc;
+	u64 mpc;
+	u64 scc;
+	u64 ecol;
+	u64 mcc;
+	u64 latecol;
+	u64 colc;
+	u64 dc;
+	u64 tncrs;
+	u64 sec;
+	u64 cexterr;
+	u64 rlec;
+	u64 xonrxc;
+	u64 xontxc;
+	u64 xoffrxc;
+	u64 xofftxc;
+	u64 fcruc;
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 rnbc;
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mgprc;
+	u64 mgpdc;
+	u64 mgptc;
+	u64 tor;
+	u64 tot;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 tsctc;
+	u64 tsctfc;
+	u64 iac;
+	u64 icrxptc;
+	u64 icrxatc;
+	u64 ictxptc;
+	u64 ictxatc;
+	u64 ictxqec;
+	u64 ictxqmtc;
+	u64 icrxdmtc;
+	u64 icrxoc;
+	u64 cbtmpc;
+	u64 htdpmc;
+	u64 cbrdpc;
+	u64 cbrmpc;
+	u64 rpthc;
+	u64 hgptc;
+	u64 htcbdpc;
+	u64 hgorc;
+	u64 hgotc;
+	u64 lenerrs;
+	u64 scvpc;
+	u64 hrmpc;
+};
+
+struct e1000_phy_stats {
+	u32 idle_errors;
+	u32 receive_errors;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+	u32 signature;
+	u8  status;
+	u8  reserved0;
+	u16 vlan_id;
+	u32 reserved1;
+	u16 reserved2;
+	u8  reserved3;
+	u8  checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+	u8 command_id;
+	u8 command_length;
+	u8 command_options;
+	u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH     252
+struct e1000_host_command_info {
+	struct e1000_host_command_header command_header;
+	u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+	u8  command_id;
+	u8  checksum;
+	u16 reserved1;
+	u16 reserved2;
+	u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+	struct e1000_host_mng_command_header command_header;
+	u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_manage.h"
+
+struct e1000_functions {
+	/* Function pointers for the MAC. */
+	s32  (*init_mac_params)(struct e1000_hw *);
+	s32  (*blink_led)(struct e1000_hw *);
+	s32  (*check_for_link)(struct e1000_hw *);
+	bool (*check_mng_mode)(struct e1000_hw *hw);
+	s32  (*cleanup_led)(struct e1000_hw *);
+	void (*clear_hw_cntrs)(struct e1000_hw *);
+	void (*clear_vfta)(struct e1000_hw *);
+	s32  (*get_bus_info)(struct e1000_hw *);
+	s32  (*get_link_up_info)(struct e1000_hw *, u16 *, u16 *);
+	s32  (*led_on)(struct e1000_hw *);
+	s32  (*led_off)(struct e1000_hw *);
+	void (*update_mc_addr_list)(struct e1000_hw *, u8 *, u32, u32,
+	                            u32);
+	void (*remove_device)(struct e1000_hw *);
+	s32  (*reset_hw)(struct e1000_hw *);
+	s32  (*init_hw)(struct e1000_hw *);
+	s32  (*setup_link)(struct e1000_hw *);
+	s32  (*setup_physical_interface)(struct e1000_hw *);
+	s32  (*setup_led)(struct e1000_hw *);
+	void (*write_vfta)(struct e1000_hw *, u32, u32);
+	void (*mta_set)(struct e1000_hw *, u32);
+	void (*config_collision_dist)(struct e1000_hw*);
+	void (*rar_set)(struct e1000_hw*, u8*, u32);
+	s32  (*read_mac_addr)(struct e1000_hw*);
+	s32  (*validate_mdi_setting)(struct e1000_hw*);
+	s32  (*mng_host_if_write)(struct e1000_hw*, u8*, u16, u16, u8*);
+	s32  (*mng_write_cmd_header)(struct e1000_hw *hw,
+                      struct e1000_host_mng_command_header*);
+	s32  (*mng_enable_host_if)(struct e1000_hw*);
+	s32  (*wait_autoneg)(struct e1000_hw*);
+
+	/* Function pointers for the PHY. */
+	s32  (*init_phy_params)(struct e1000_hw *);
+	s32  (*acquire_phy)(struct e1000_hw *);
+	s32  (*check_polarity)(struct e1000_hw *);
+	s32  (*check_reset_block)(struct e1000_hw *);
+	s32  (*commit_phy)(struct e1000_hw *);
+	s32  (*force_speed_duplex)(struct e1000_hw *);
+	s32  (*get_cfg_done)(struct e1000_hw *hw);
+	s32  (*get_cable_length)(struct e1000_hw *);
+	s32  (*get_phy_info)(struct e1000_hw *);
+	s32  (*read_phy_reg)(struct e1000_hw *, u32, u16 *);
+	void (*release_phy)(struct e1000_hw *);
+	s32  (*reset_phy)(struct e1000_hw *);
+	s32  (*set_d0_lplu_state)(struct e1000_hw *, bool);
+	s32  (*set_d3_lplu_state)(struct e1000_hw *, bool);
+	s32  (*write_phy_reg)(struct e1000_hw *, u32, u16);
+	void (*power_up_phy)(struct e1000_hw *);
+	void (*power_down_phy)(struct e1000_hw *);
+
+	/* Function pointers for the NVM. */
+	s32  (*init_nvm_params)(struct e1000_hw *);
+	s32  (*acquire_nvm)(struct e1000_hw *);
+	s32  (*read_nvm)(struct e1000_hw *, u16, u16, u16 *);
+	void (*release_nvm)(struct e1000_hw *);
+	void (*reload_nvm)(struct e1000_hw *);
+	s32  (*update_nvm)(struct e1000_hw *);
+	s32  (*valid_led_default)(struct e1000_hw *, u16 *);
+	s32  (*validate_nvm)(struct e1000_hw *);
+	s32  (*write_nvm)(struct e1000_hw *, u16, u16, u16 *);
+};
+
+struct e1000_mac_info {
+	u8 addr[6];
+	u8 perm_addr[6];
+
+	e1000_mac_type type;
+
+	u32 collision_delta;
+	u32 ledctl_default;
+	u32 ledctl_mode1;
+	u32 ledctl_mode2;
+	u32 mc_filter_type;
+	u32 tx_packet_delta;
+	u32 txcw;
+
+	u16 current_ifs_val;
+	u16 ifs_max_val;
+	u16 ifs_min_val;
+	u16 ifs_ratio;
+	u16 ifs_step_size;
+	u16 mta_reg_count;
+	u16 rar_entry_count;
+
+	u8  forced_speed_duplex;
+
+	bool adaptive_ifs;
+	bool arc_subsystem_valid;
+	bool asf_firmware_present;
+	bool autoneg;
+	bool autoneg_failed;
+	bool disable_av;
+	bool disable_hw_init_bits;
+	bool get_link_status;
+	bool ifs_params_forced;
+	bool in_ifs_mode;
+	bool report_tx_early;
+	bool serdes_has_link;
+	bool tx_pkt_filtering;
+};
+
+struct e1000_phy_info {
+	e1000_phy_type type;
+
+	e1000_1000t_rx_status local_rx;
+	e1000_1000t_rx_status remote_rx;
+	e1000_ms_type ms_type;
+	e1000_ms_type original_ms_type;
+	e1000_rev_polarity cable_polarity;
+	e1000_smart_speed smart_speed;
+
+	u32 addr;
+	u32 id;
+	u32 reset_delay_us; /* in usec */
+	u32 revision;
+
+	e1000_media_type media_type;
+
+	u16 autoneg_advertised;
+	u16 autoneg_mask;
+	u16 cable_length;
+	u16 max_cable_length;
+	u16 min_cable_length;
+
+	u8 mdix;
+
+	bool disable_polarity_correction;
+	bool is_mdix;
+	bool polarity_correction;
+	bool reset_disable;
+	bool speed_downgraded;
+	bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+	e1000_nvm_type type;
+	e1000_nvm_override override;
+
+	u32 flash_bank_size;
+	u32 flash_base_addr;
+
+	u16 word_size;
+	u16 delay_usec;
+	u16 address_bits;
+	u16 opcode_bits;
+	u16 page_size;
+};
+
+struct e1000_bus_info {
+	e1000_bus_type type;
+	e1000_bus_speed speed;
+	e1000_bus_width width;
+
+	u32 snoop;
+
+	u16 func;
+	u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+	u32 high_water;     /* Flow control high-water mark */
+	u32 low_water;      /* Flow control low-water mark */
+	u16 pause_time;     /* Flow control pause timer */
+	bool send_xon;      /* Flow control send XON */
+	bool strict_ieee;   /* Strict IEEE mode */
+	e1000_fc_type type; /* Type of flow control */
+	e1000_fc_type original_type;
+};
+
+struct e1000_hw {
+	void *back;
+	void *dev_spec;
+
+	u8 __iomem *hw_addr;
+	u8 __iomem *flash_address;
+	unsigned long io_base;
+
+	struct e1000_functions func;
+	struct e1000_mac_info  mac;
+	struct e1000_fc_info   fc;
+	struct e1000_phy_info  phy;
+	struct e1000_nvm_info  nvm;
+	struct e1000_bus_info  bus;
+	struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+	u32 dev_spec_size;
+
+	u16 device_id;
+	u16 subsystem_vendor_id;
+	u16 subsystem_device_id;
+	u16 vendor_id;
+
+	u8  revision_id;
+};
+
+/* These functions must be implemented by drivers */
+void e1000_pci_clear_mwi(struct e1000_hw *hw);
+void e1000_pci_set_mwi(struct e1000_hw *hw);
+s32  e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size);
+s32  e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_free_dev_spec_struct(struct e1000_hw *hw);
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c
new file mode 100644
index 0000000..c341584
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.c
@@ -0,0 +1,2582 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+/* e1000_ich8lan
+ * e1000_ich9lan
+ */
+
+#include "e1000_api.h"
+#include "e1000_ich8lan.h"
+
+static s32  e1000_init_phy_params_ich8lan(struct e1000_hw *hw);
+static s32  e1000_init_nvm_params_ich8lan(struct e1000_hw *hw);
+static s32  e1000_init_mac_params_ich8lan(struct e1000_hw *hw);
+static s32  e1000_acquire_swflag_ich8lan(struct e1000_hw *hw);
+static void e1000_release_swflag_ich8lan(struct e1000_hw *hw);
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw);
+static s32  e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw);
+static s32  e1000_check_reset_block_ich8lan(struct e1000_hw *hw);
+static s32  e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw);
+static s32  e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw);
+static s32  e1000_get_phy_info_ich8lan(struct e1000_hw *hw);
+static s32  e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
+                                            bool active);
+static s32  e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
+                                            bool active);
+static s32  e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
+                                   u16 words, u16 *data);
+static s32  e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset,
+                                    u16 words, u16 *data);
+static s32  e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw);
+static s32  e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw);
+static s32  e1000_valid_led_default_ich8lan(struct e1000_hw *hw,
+                                            u16 *data);
+static s32  e1000_get_bus_info_ich8lan(struct e1000_hw *hw);
+static s32  e1000_reset_hw_ich8lan(struct e1000_hw *hw);
+static s32  e1000_init_hw_ich8lan(struct e1000_hw *hw);
+static s32  e1000_setup_link_ich8lan(struct e1000_hw *hw);
+static s32  e1000_setup_copper_link_ich8lan(struct e1000_hw *hw);
+static s32  e1000_get_link_up_info_ich8lan(struct e1000_hw *hw,
+                                           u16 *speed, u16 *duplex);
+static s32  e1000_cleanup_led_ich8lan(struct e1000_hw *hw);
+static s32  e1000_led_on_ich8lan(struct e1000_hw *hw);
+static s32  e1000_led_off_ich8lan(struct e1000_hw *hw);
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw);
+static s32  e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank);
+static s32  e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout);
+static s32  e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw);
+static s32  e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw);
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw);
+static s32  e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw);
+static s32  e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+                                          u8 size, u16* data);
+static s32  e1000_read_flash_word_ich8lan(struct e1000_hw *hw,
+                                          u32 offset, u16 *data);
+static s32  e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+                                                 u32 offset, u8 byte);
+static s32  e1000_write_flash_byte_ich8lan(struct e1000_hw *hw,
+                                           u32 offset, u8 data);
+static s32  e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+                                           u8 size, u16 data);
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw);
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw);
+
+/* ICH GbE Flash Hardware Sequencing Flash Status Register bit breakdown */
+/* Offset 04h HSFSTS */
+union ich8_hws_flash_status {
+	struct ich8_hsfsts {
+		u16 flcdone    :1; /* bit 0 Flash Cycle Done */
+		u16 flcerr     :1; /* bit 1 Flash Cycle Error */
+		u16 dael       :1; /* bit 2 Direct Access error Log */
+		u16 berasesz   :2; /* bit 4:3 Sector Erase Size */
+		u16 flcinprog  :1; /* bit 5 flash cycle in Progress */
+		u16 reserved1  :2; /* bit 13:6 Reserved */
+		u16 reserved2  :6; /* bit 13:6 Reserved */
+		u16 fldesvalid :1; /* bit 14 Flash Descriptor Valid */
+		u16 flockdn    :1; /* bit 15 Flash Config Lock-Down */
+	} hsf_status;
+	u16 regval;
+};
+
+/* ICH GbE Flash Hardware Sequencing Flash control Register bit breakdown */
+/* Offset 06h FLCTL */
+union ich8_hws_flash_ctrl {
+	struct ich8_hsflctl {
+		u16 flcgo      :1;   /* 0 Flash Cycle Go */
+		u16 flcycle    :2;   /* 2:1 Flash Cycle */
+		u16 reserved   :5;   /* 7:3 Reserved  */
+		u16 fldbcount  :2;   /* 9:8 Flash Data Byte Count */
+		u16 flockdn    :6;   /* 15:10 Reserved */
+	} hsf_ctrl;
+	u16 regval;
+};
+
+/* ICH Flash Region Access Permissions */
+union ich8_hws_flash_regacc {
+	struct ich8_flracc {
+		u32 grra      :8; /* 0:7 GbE region Read Access */
+		u32 grwa      :8; /* 8:15 GbE region Write Access */
+		u32 gmrag     :8; /* 23:16 GbE Master Read Access Grant */
+		u32 gmwag     :8; /* 31:24 GbE Master Write Access Grant */
+	} hsf_flregacc;
+	u16 regval;
+};
+
+struct e1000_shadow_ram {
+	u16  value;
+	bool modified;
+};
+
+struct e1000_dev_spec_ich8lan {
+	bool kmrn_lock_loss_workaround_enabled;
+	struct e1000_shadow_ram shadow_ram[E1000_SHADOW_RAM_WORDS];
+};
+
+/**
+ *  e1000_init_phy_params_ich8lan - Initialize PHY function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific PHY parameters and function pointers.
+ **/
+static s32 e1000_init_phy_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i = 0;
+
+	DEBUGFUNC("e1000_init_phy_params_ich8lan");
+
+	phy->addr                       = 1;
+	phy->reset_delay_us             = 100;
+
+	func->acquire_phy               = e1000_acquire_swflag_ich8lan;
+	func->check_polarity            = e1000_check_polarity_ife_ich8lan;
+	func->check_reset_block         = e1000_check_reset_block_ich8lan;
+	func->force_speed_duplex        = e1000_phy_force_speed_duplex_ich8lan;
+	func->get_cable_length          = e1000_get_cable_length_igp_2;
+	func->get_cfg_done              = e1000_get_cfg_done_ich8lan;
+	func->get_phy_info              = e1000_get_phy_info_ich8lan;
+	func->read_phy_reg              = e1000_read_phy_reg_igp;
+	func->release_phy               = e1000_release_swflag_ich8lan;
+	func->reset_phy                 = e1000_phy_hw_reset_ich8lan;
+	func->set_d0_lplu_state         = e1000_set_d0_lplu_state_ich8lan;
+	func->set_d3_lplu_state         = e1000_set_d3_lplu_state_ich8lan;
+	func->write_phy_reg             = e1000_write_phy_reg_igp;
+	func->power_up_phy              = e1000_power_up_phy_copper;
+	func->power_down_phy            = e1000_power_down_phy_copper_ich8lan;
+
+
+	phy->id = 0;
+	while ((e1000_phy_unknown == e1000_get_phy_type_from_id(phy->id)) &&
+	       (i++ < 100)) {
+		msec_delay(1);
+		ret_val = e1000_get_phy_id(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Verify phy id */
+	switch (phy->id) {
+	case IGP03E1000_E_PHY_ID:
+		phy->type = e1000_phy_igp_3;
+		phy->autoneg_mask = AUTONEG_ADVERTISE_SPEED_DEFAULT;
+		break;
+	case IFE_E_PHY_ID:
+	case IFE_PLUS_E_PHY_ID:
+	case IFE_C_E_PHY_ID:
+		phy->type = e1000_phy_ife;
+		phy->autoneg_mask = E1000_ALL_NOT_GIG;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_nvm_params_ich8lan - Initialize NVM function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific NVM parameters and function
+ *  pointers.
+ **/
+static s32 e1000_init_nvm_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_functions *func = &hw->func;
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	u32 gfpreg, sector_base_addr, sector_end_addr;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_nvm_params_ich8lan");
+
+	/* Can't read flash registers if the register set isn't mapped. */
+	if (!hw->flash_address) {
+		DEBUGOUT("ERROR: Flash registers not mapped\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	nvm->type               = e1000_nvm_flash_sw;
+
+	gfpreg = E1000_READ_FLASH_REG(hw, ICH_FLASH_GFPREG);
+
+	/*
+	 * sector_X_addr is a "sector"-aligned address (4096 bytes)
+	 * Add 1 to sector_end_addr since this sector is included in
+	 * the overall size.
+	 */
+	sector_base_addr = gfpreg & FLASH_GFPREG_BASE_MASK;
+	sector_end_addr = ((gfpreg >> 16) & FLASH_GFPREG_BASE_MASK) + 1;
+
+	/* flash_base_addr is byte-aligned */
+	nvm->flash_base_addr    = sector_base_addr << FLASH_SECTOR_ADDR_SHIFT;
+
+	/*
+	 * find total size of the NVM, then cut in half since the total
+	 * size represents two separate NVM banks.
+	 */
+	nvm->flash_bank_size    = (sector_end_addr - sector_base_addr)
+	                          << FLASH_SECTOR_ADDR_SHIFT;
+	nvm->flash_bank_size    /= 2;
+	/* Adjust to word count */
+	nvm->flash_bank_size    /= sizeof(u16);
+
+	nvm->word_size          = E1000_SHADOW_RAM_WORDS;
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/* Clear shadow ram */
+	for (i = 0; i < nvm->word_size; i++) {
+		dev_spec->shadow_ram[i].modified = FALSE;
+		dev_spec->shadow_ram[i].value    = 0xFFFF;
+	}
+
+	/* Function Pointers */
+	func->acquire_nvm       = e1000_acquire_swflag_ich8lan;
+	func->read_nvm          = e1000_read_nvm_ich8lan;
+	func->release_nvm       = e1000_release_swflag_ich8lan;
+	func->update_nvm        = e1000_update_nvm_checksum_ich8lan;
+	func->valid_led_default = e1000_valid_led_default_ich8lan;
+	func->validate_nvm      = e1000_validate_nvm_checksum_ich8lan;
+	func->write_nvm         = e1000_write_nvm_ich8lan;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_mac_params_ich8lan - Initialize MAC function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific MAC parameters and function
+ *  pointers.
+ **/
+static s32 e1000_init_mac_params_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_init_mac_params_ich8lan");
+
+	/* Set media type function pointer */
+	hw->phy.media_type = e1000_media_type_copper;
+
+	/* Set mta register count */
+	mac->mta_reg_count = 32;
+	/* Set rar entry count */
+	mac->rar_entry_count = E1000_ICH_RAR_ENTRIES;
+	if (mac->type == e1000_ich8lan)
+		mac->rar_entry_count--;
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = TRUE;
+	/* Set if manageability features are enabled. */
+	mac->arc_subsystem_valid = TRUE;
+
+	/* Function pointers */
+
+	/* bus type/speed/width */
+	func->get_bus_info = e1000_get_bus_info_ich8lan;
+	/* reset */
+	func->reset_hw = e1000_reset_hw_ich8lan;
+	/* hw initialization */
+	func->init_hw = e1000_init_hw_ich8lan;
+	/* link setup */
+	func->setup_link = e1000_setup_link_ich8lan;
+	/* physical interface setup */
+	func->setup_physical_interface = e1000_setup_copper_link_ich8lan;
+	/* check for link */
+	func->check_for_link = e1000_check_for_copper_link_generic;
+	/* check management mode */
+	func->check_mng_mode = e1000_check_mng_mode_ich8lan;
+	/* link info */
+	func->get_link_up_info = e1000_get_link_up_info_ich8lan;
+	/* multicast address update */
+	func->update_mc_addr_list = e1000_update_mc_addr_list_generic;
+	/* setting MTA */
+	func->mta_set = e1000_mta_set_generic;
+	/* blink LED */
+	func->blink_led = e1000_blink_led_generic;
+	/* setup LED */
+	func->setup_led = e1000_setup_led_generic;
+	/* cleanup LED */
+	func->cleanup_led = e1000_cleanup_led_ich8lan;
+	/* turn on/off LED */
+	func->led_on = e1000_led_on_ich8lan;
+	func->led_off = e1000_led_off_ich8lan;
+	/* remove device */
+	func->remove_device = e1000_remove_device_generic;
+	/* clear hardware counters */
+	func->clear_hw_cntrs = e1000_clear_hw_cntrs_ich8lan;
+
+	hw->dev_spec_size = sizeof(struct e1000_dev_spec_ich8lan);
+
+	/* Device-specific structure allocation */
+	ret_val = e1000_alloc_zeroed_dev_spec_struct(hw, hw->dev_spec_size);
+	if (ret_val)
+		goto out;
+
+	/* Enable PCS Lock-loss workaround for ICH8 */
+	if (mac->type == e1000_ich8lan)
+		e1000_set_kmrn_lock_loss_workaround_ich8lan(hw, TRUE);
+
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_function_pointers_ich8lan - Initialize ICH8 function pointers
+ *  @hw: pointer to the HW structure
+ *
+ *  Initialize family-specific function pointers for PHY, MAC, and NVM.
+ **/
+void e1000_init_function_pointers_ich8lan(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_init_function_pointers_ich8lan");
+
+	hw->func.init_mac_params = e1000_init_mac_params_ich8lan;
+	hw->func.init_nvm_params = e1000_init_nvm_params_ich8lan;
+	hw->func.init_phy_params = e1000_init_phy_params_ich8lan;
+}
+
+/**
+ *  e1000_acquire_swflag_ich8lan - Acquire software control flag
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquires the software control flag for performing NVM and PHY
+ *  operations.  This is a function pointer entry point only called by
+ *  read/write routines for the PHY and NVM parts.
+ **/
+static s32 e1000_acquire_swflag_ich8lan(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl, timeout = PHY_CFG_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_acquire_swflag_ich8lan");
+
+	while (timeout) {
+		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+		extcnf_ctrl |= E1000_EXTCNF_CTRL_SWFLAG;
+		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+
+		extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+		if (extcnf_ctrl & E1000_EXTCNF_CTRL_SWFLAG)
+			break;
+		msec_delay_irq(1);
+		timeout--;
+	}
+
+	if (!timeout) {
+		DEBUGOUT("FW or HW has locked the resource for too long.\n");
+		extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+		E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_release_swflag_ich8lan - Release software control flag
+ *  @hw: pointer to the HW structure
+ *
+ *  Releases the software control flag for performing NVM and PHY operations.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static void e1000_release_swflag_ich8lan(struct e1000_hw *hw)
+{
+	u32 extcnf_ctrl;
+
+	DEBUGFUNC("e1000_release_swflag_ich8lan");
+
+	extcnf_ctrl = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+	extcnf_ctrl &= ~E1000_EXTCNF_CTRL_SWFLAG;
+	E1000_WRITE_REG(hw, E1000_EXTCNF_CTRL, extcnf_ctrl);
+
+	return;
+}
+
+/**
+ *  e1000_check_mng_mode_ich8lan - Checks management mode
+ *  @hw: pointer to the HW structure
+ *
+ *  This checks if the adapter has manageability enabled.
+ *  This is a function pointer entry point only called by read/write
+ *  routines for the PHY and NVM parts.
+ **/
+static bool e1000_check_mng_mode_ich8lan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	DEBUGFUNC("e1000_check_mng_mode_ich8lan");
+
+	fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+	return ((fwsm & E1000_FWSM_MODE_MASK) ==
+	        (E1000_ICH_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ *  e1000_check_reset_block_ich8lan - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks if firmware is blocking the reset of the PHY.
+ *  This is a function pointer entry point only called by
+ *  reset routines.
+ **/
+static s32 e1000_check_reset_block_ich8lan(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	DEBUGFUNC("e1000_check_reset_block_ich8lan");
+
+	fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+	return (fwsm & E1000_ICH_FWSM_RSPCIPHY) ? E1000_SUCCESS
+	                                        : E1000_BLK_PHY_RESET;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_ich8lan - Force PHY speed & duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  Forces the speed and duplex settings of the PHY.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_phy_force_speed_duplex_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_ich8lan");
+
+	if (phy->type != e1000_phy_ife) {
+		ret_val = e1000_phy_force_speed_duplex_igp(hw);
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &data);
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	/* Disable MDI-X support for 10/100 */
+	ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IFE_PMC_AUTO_MDIX;
+	data &= ~IFE_PMC_FORCE_MDIX;
+
+	ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, data);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("IFE PMC: %X\n", data);
+
+	usec_delay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		DEBUGOUT("Waiting for forced speed/duplex link on IFE phy.\n");
+
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			DEBUGOUT("Link taking longer than expected.\n");
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_ich8lan - Performs a PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the PHY
+ *  This is a function pointer entry point called by drivers
+ *  or other shared routines.
+ **/
+static s32 e1000_phy_hw_reset_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, data, cnf_size, cnf_base_addr, sw_cfg_mask;
+	s32 ret_val;
+	u16 loop = E1000_ICH8_LAN_INIT_TIMEOUT;
+	u16 word_addr, reg_data, reg_addr, phy_page = 0;
+
+	DEBUGFUNC("e1000_phy_hw_reset_ich8lan");
+
+	ret_val = e1000_phy_hw_reset_generic(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Initialize the PHY from the NVM on ICH platforms.  This
+	 * is needed due to an issue where the NVM configuration is
+	 * not properly autoloaded after power transitions.
+	 * Therefore, after each PHY reset, we will load the
+	 * configuration data out of the NVM manually.
+	 */
+	if (hw->mac.type == e1000_ich8lan && phy->type == e1000_phy_igp_3) {
+		/* Check if SW needs configure the PHY */
+		if ((hw->device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+		    (hw->device_id == E1000_DEV_ID_ICH8_IGP_M))
+			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG_ICH8M;
+		else
+			sw_cfg_mask = E1000_FEXTNVM_SW_CONFIG;
+
+		data = E1000_READ_REG(hw, E1000_FEXTNVM);
+		if (!(data & sw_cfg_mask))
+			goto out;
+
+		/* Wait for basic configuration completes before proceeding*/
+		do {
+			data = E1000_READ_REG(hw, E1000_STATUS);
+			data &= E1000_STATUS_LAN_INIT_DONE;
+			usec_delay(100);
+		} while ((!data) && --loop);
+
+		/*
+		 * If basic configuration is incomplete before the above loop
+		 * count reaches 0, loading the configuration from NVM will
+		 * leave the PHY in a bad state possibly resulting in no link.
+		 */
+		if (loop == 0) {
+			DEBUGOUT("LAN_INIT_DONE not set, increase timeout\n");
+		}
+
+		/* Clear the Init Done bit for the next init event */
+		data = E1000_READ_REG(hw, E1000_STATUS);
+		data &= ~E1000_STATUS_LAN_INIT_DONE;
+		E1000_WRITE_REG(hw, E1000_STATUS, data);
+
+		/*
+		 * Make sure HW does not configure LCD from PHY
+		 * extended configuration before SW configuration
+		 */
+		data = E1000_READ_REG(hw, E1000_EXTCNF_CTRL);
+		if (data & E1000_EXTCNF_CTRL_LCD_WRITE_ENABLE)
+			goto out;
+
+		cnf_size = E1000_READ_REG(hw, E1000_EXTCNF_SIZE);
+		cnf_size &= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_MASK;
+		cnf_size >>= E1000_EXTCNF_SIZE_EXT_PCIE_LENGTH_SHIFT;
+		if (!cnf_size)
+			goto out;
+
+		cnf_base_addr = data & E1000_EXTCNF_CTRL_EXT_CNF_POINTER_MASK;
+		cnf_base_addr >>= E1000_EXTCNF_CTRL_EXT_CNF_POINTER_SHIFT;
+
+		/*
+		 * Configure LCD from extended configuration
+		 * region.
+		 */
+
+		/* cnf_base_addr is in DWORD */
+		word_addr = (u16)(cnf_base_addr << 1);
+
+		for (i = 0; i < cnf_size; i++) {
+			ret_val = e1000_read_nvm(hw,
+			                        (word_addr + i * 2),
+			                        1,
+			                        &reg_data);
+			if (ret_val)
+				goto out;
+
+			ret_val = e1000_read_nvm(hw,
+			                        (word_addr + i * 2 + 1),
+			                        1,
+			                        &reg_addr);
+			if (ret_val)
+				goto out;
+
+			/* Save off the PHY page for future writes. */
+			if (reg_addr == IGP01E1000_PHY_PAGE_SELECT) {
+				phy_page = reg_data;
+				continue;
+			}
+
+			reg_addr |= phy_page;
+
+			ret_val = e1000_write_phy_reg(hw,
+			                             (u32)reg_addr,
+			                             reg_data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_ich8lan - Calls appropriate PHY type get_phy_info
+ *  @hw: pointer to the HW structure
+ *
+ *  Wrapper for calling the get_phy_info routines for the appropriate phy type.
+ *  This is a function pointer entry point called by drivers
+ *  or other shared routines.
+ **/
+static s32 e1000_get_phy_info_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = -E1000_ERR_PHY_TYPE;
+
+	DEBUGFUNC("e1000_get_phy_info_ich8lan");
+
+	switch (hw->phy.type) {
+	case e1000_phy_ife:
+		ret_val = e1000_get_phy_info_ife_ich8lan(hw);
+		break;
+	case e1000_phy_igp_3:
+		ret_val = e1000_get_phy_info_igp(hw);
+		break;
+	default:
+		break;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_ife_ich8lan - Retrieves various IFE PHY states
+ *  @hw: pointer to the HW structure
+ *
+ *  Populates "phy" structure with various feature states.
+ *  This function is only called by other family-specific
+ *  routines.
+ **/
+static s32 e1000_get_phy_info_ife_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	DEBUGFUNC("e1000_get_phy_info_ife_ich8lan");
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		DEBUGOUT("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, IFE_PHY_SPECIAL_CONTROL, &data);
+	if (ret_val)
+		goto out;
+	phy->polarity_correction = (data & IFE_PSC_AUTO_POLARITY_DISABLE)
+	                           ? FALSE : TRUE;
+
+	if (phy->polarity_correction) {
+		ret_val = e1000_check_polarity_ife_ich8lan(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/* Polarity is forced */
+		phy->cable_polarity = (data & IFE_PSC_FORCE_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IFE_PMC_MDIX_STATUS) ? TRUE : FALSE;
+
+	/* The following parameters are undefined for 10/100 operation. */
+	phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+	phy->local_rx = e1000_1000t_rx_status_undefined;
+	phy->remote_rx = e1000_1000t_rx_status_undefined;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_ife_ich8lan - Check cable polarity for IFE PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Polarity is determined on the polarity reveral feature being enabled.
+ *  This function is only called by other family-specific
+ *  routines.
+ **/
+static s32 e1000_check_polarity_ife_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	DEBUGFUNC("e1000_check_polarity_ife_ich8lan");
+
+	/*
+	 * Polarity is determined based on the reversal feature
+	 * being enabled.
+	 */
+	if (phy->polarity_correction) {
+		offset	= IFE_PHY_EXTENDED_STATUS_CONTROL;
+		mask	= IFE_PESC_POLARITY_REVERSED;
+	} else {
+		offset	= IFE_PHY_SPECIAL_CONTROL;
+		mask	= IFE_PSC_FORCE_POLARITY;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->cable_polarity = (phy_data & mask)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d0_lplu_state_ich8lan - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d0_lplu_state_ich8lan(struct e1000_hw *hw,
+                                           bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 phy_ctrl;
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d0_lplu_state_ich8lan");
+
+	if (phy->type == e1000_phy_ife)
+		goto out;
+
+	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+	if (active) {
+		phy_ctrl |= E1000_PHY_CTRL_D0A_LPLU;
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
+		if ((hw->mac.type == e1000_ich8lan) &&
+		    (hw->phy.type == e1000_phy_igp_3))
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                            IGP01E1000_PHY_PORT_CONFIG,
+		                            &data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             data);
+		if (ret_val)
+			goto out;
+	} else {
+		phy_ctrl &= ~E1000_PHY_CTRL_D0A_LPLU;
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_d3_lplu_state_ich8lan - Set Low Power Linkup D3 state
+ *  @hw: pointer to the HW structure
+ *  @active: TRUE to enable LPLU, FALSE to disable
+ *
+ *  Sets the LPLU D3 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 e1000_set_d3_lplu_state_ich8lan(struct e1000_hw *hw,
+                                           bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 phy_ctrl;
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d3_lplu_state_ich8lan");
+
+	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+
+	if (!active) {
+		phy_ctrl &= ~E1000_PHY_CTRL_NOND0A_LPLU;
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		phy_ctrl |= E1000_PHY_CTRL_NOND0A_LPLU;
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+		/*
+		 * Call gig speed drop workaround on LPLU before accessing
+		 * any PHY registers
+		 */
+		if ((hw->mac.type == e1000_ich8lan) &&
+		    (hw->phy.type == e1000_phy_igp_3))
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                            IGP01E1000_PHY_PORT_CONFIG,
+		                            &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_valid_nvm_bank_detect_ich8lan - finds out the valid bank 0 or 1
+ *  @hw: pointer to the HW structure
+ *  @bank:  pointer to the variable that returns the active bank
+ *
+ *  Reads signature byte from the NVM using the flash access registers.
+ **/
+static s32 e1000_valid_nvm_bank_detect_ich8lan(struct e1000_hw *hw, u32 *bank)
+{
+	s32 ret_val = E1000_SUCCESS;
+	if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_SEC1VAL)
+		*bank = 1;
+	else
+		*bank = 0;
+	
+	return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_ich8lan - Read word(s) from the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the word(s) to read.
+ *  @words: Size of data to read in words
+ *  @data: Pointer to the word(s) to read at offset.
+ *
+ *  Reads a word(s) from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+                                  u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	u32 act_offset;
+	s32 ret_val = E1000_SUCCESS;
+	u32 bank = 0;
+	u16 i, word;
+
+	DEBUGFUNC("e1000_read_nvm_ich8lan");
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val != E1000_SUCCESS)
+		goto out;
+
+	act_offset = (bank) ? nvm->flash_bank_size : 0;
+	act_offset += offset;
+
+	for (i = 0; i < words; i++) {
+		if ((dev_spec->shadow_ram) &&
+		    (dev_spec->shadow_ram[offset+i].modified)) {
+			data[i] = dev_spec->shadow_ram[offset+i].value;
+		} else {
+			ret_val = e1000_read_flash_word_ich8lan(hw,
+			                                        act_offset + i,
+			                                        &word);
+			if (ret_val)
+				break;
+			data[i] = word;
+		}
+	}
+
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_flash_cycle_init_ich8lan - Initialize flash
+ *  @hw: pointer to the HW structure
+ *
+ *  This function does initial flash setup so that a new read/write/erase cycle
+ *  can be started.
+ **/
+static s32 e1000_flash_cycle_init_ich8lan(struct e1000_hw *hw)
+{
+	union ich8_hws_flash_status hsfsts;
+	s32 ret_val = -E1000_ERR_NVM;
+	s32 i = 0;
+
+	DEBUGFUNC("e1000_flash_cycle_init_ich8lan");
+
+	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+	/* Check if the flash descriptor is valid */
+	if (hsfsts.hsf_status.fldesvalid == 0) {
+		DEBUGOUT("Flash descriptor invalid.  "
+		         "SW Sequencing must be used.");
+		goto out;
+	}
+
+	/* Clear FCERR and DAEL in hw status by writing 1 */
+	hsfsts.hsf_status.flcerr = 1;
+	hsfsts.hsf_status.dael = 1;
+
+	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+
+	/*
+	 * Either we should have a hardware SPI cycle in progress
+	 * bit to check against, in order to start a new cycle or
+	 * FDONE bit should be changed in the hardware so that it
+	 * is 1 after harware reset, which can then be used as an
+	 * indication whether a cycle is in progress or has been
+	 * completed.
+	 */
+
+	if (hsfsts.hsf_status.flcinprog == 0) {
+		/*
+		 * There is no cycle running at present,
+		 * so we can start a cycle.
+		 * Begin by setting Flash Cycle Done.
+		 */
+		hsfsts.hsf_status.flcdone = 1;
+		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFSTS, hsfsts.regval);
+		ret_val = E1000_SUCCESS;
+	} else {
+		/*
+		 * Otherwise poll for sometime so the current
+		 * cycle has a chance to end before giving up.
+		 */
+		for (i = 0; i < ICH_FLASH_READ_COMMAND_TIMEOUT; i++) {
+			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+			                                      ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcinprog == 0) {
+				ret_val = E1000_SUCCESS;
+				break;
+			}
+			usec_delay(1);
+		}
+		if (ret_val == E1000_SUCCESS) {
+			/*
+			 * Successful in waiting for previous cycle to timeout,
+			 * now set the Flash Cycle Done.
+			 */
+			hsfsts.hsf_status.flcdone = 1;
+			E1000_WRITE_FLASH_REG16(hw,
+			                        ICH_FLASH_HSFSTS,
+			                        hsfsts.regval);
+		} else {
+			DEBUGOUT("Flash controller busy, cannot get access");
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_flash_cycle_ich8lan - Starts flash cycle (read/write/erase)
+ *  @hw: pointer to the HW structure
+ *  @timeout: maximum time to wait for completion
+ *
+ *  This function starts a flash cycle and waits for its completion.
+ **/
+static s32 e1000_flash_cycle_ich8lan(struct e1000_hw *hw, u32 timeout)
+{
+	union ich8_hws_flash_ctrl hsflctl;
+	union ich8_hws_flash_status hsfsts;
+	s32 ret_val = -E1000_ERR_NVM;
+	u32 i = 0;
+
+	DEBUGFUNC("e1000_flash_cycle_ich8lan");
+
+	/* Start a cycle by writing 1 in Flash Cycle Go in Hw Flash Control */
+	hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+	hsflctl.hsf_ctrl.flcgo = 1;
+	E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+	/* wait till FDONE bit is set to 1 */
+	do {
+		hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+		if (hsfsts.hsf_status.flcdone == 1)
+			break;
+		usec_delay(1);
+	} while (i++ < timeout);
+
+	if (hsfsts.hsf_status.flcdone == 1 && hsfsts.hsf_status.flcerr == 0)
+		ret_val = E1000_SUCCESS;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_read_flash_word_ich8lan - Read word from flash
+ *  @hw: pointer to the HW structure
+ *  @offset: offset to data location
+ *  @data: pointer to the location for storing the data
+ *
+ *  Reads the flash word at offset into data.  Offset is converted
+ *  to bytes before read.
+ **/
+static s32 e1000_read_flash_word_ich8lan(struct e1000_hw *hw, u32 offset,
+                                         u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_read_flash_word_ich8lan");
+
+	if (!data) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Must convert offset into bytes. */
+	offset <<= 1;
+
+	ret_val = e1000_read_flash_data_ich8lan(hw, offset, 2, data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_flash_data_ich8lan - Read byte or word from NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the byte or word to read.
+ *  @size: Size of data to read, 1=byte 2=word
+ *  @data: Pointer to the word to store the value read.
+ *
+ *  Reads a byte or word from the NVM using the flash access registers.
+ **/
+static s32 e1000_read_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+                                         u8 size, u16* data)
+{
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	u32 flash_data = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+	u8 count = 0;
+
+	DEBUGFUNC("e1000_read_flash_data_ich8lan");
+
+	if (size < 1  || size > 2 || offset > ICH_FLASH_LINEAR_ADDR_MASK)
+		goto out;
+
+	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+	                    hw->nvm.flash_base_addr;
+
+	do {
+		usec_delay(1);
+		/* Steps */
+		ret_val = e1000_flash_cycle_init_ich8lan(hw);
+		if (ret_val != E1000_SUCCESS)
+			break;
+
+		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+		hsflctl.hsf_ctrl.fldbcount = size - 1;
+		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_READ;
+		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+		ret_val = e1000_flash_cycle_ich8lan(hw,
+		                                ICH_FLASH_READ_COMMAND_TIMEOUT);
+
+		/*
+		 * Check if FCERR is set to 1, if set to 1, clear it
+		 * and try the whole sequence a few more times, else
+		 * read in (shift in) the Flash Data0, the order is
+		 * least significant byte first msb to lsb
+		 */
+		if (ret_val == E1000_SUCCESS) {
+			flash_data = E1000_READ_FLASH_REG(hw, ICH_FLASH_FDATA0);
+			if (size == 1) {
+				*data = (u8)(flash_data & 0x000000FF);
+			} else if (size == 2) {
+				*data = (u16)(flash_data & 0x0000FFFF);
+			}
+			break;
+		} else {
+			/*
+			 * If we've gotten here, then things are probably
+			 * completely hosed, but if the error condition is
+			 * detected, it won't hurt to give it another try...
+			 * ICH_FLASH_CYCLE_REPEAT_COUNT times.
+			 */
+			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+			                                      ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcerr == 1) {
+				/* Repeat for some time before giving up. */
+				continue;
+			} else if (hsfsts.hsf_status.flcdone == 0) {
+				DEBUGOUT("Timeout error - flash cycle "
+				         "did not complete.");
+				break;
+			}
+		}
+	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_ich8lan - Write word(s) to the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the word(s) to write.
+ *  @words: Size of data to write in words
+ *  @data: Pointer to the word(s) to write at offset.
+ *
+ *  Writes a byte or word to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_nvm_ich8lan(struct e1000_hw *hw, u16 offset, u16 words,
+                                   u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i;
+
+	DEBUGFUNC("e1000_write_nvm_ich8lan");
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	if ((offset >= nvm->word_size) || (words > nvm->word_size - offset) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	for (i = 0; i < words; i++) {
+		dev_spec->shadow_ram[offset+i].modified = TRUE;
+		dev_spec->shadow_ram[offset+i].value = data[i];
+	}
+
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_ich8lan - Update the checksum for NVM
+ *  @hw: pointer to the HW structure
+ *
+ *  The NVM checksum is updated by calling the generic update_nvm_checksum,
+ *  which writes the checksum to the shadow ram.  The changes in the shadow
+ *  ram are then committed to the EEPROM by processing each bank at a time
+ *  checking for the modified bit and writing only the pending changes.
+ *  After a succesful commit, the shadow ram is cleared and is ready for
+ *  future writes.
+ **/
+static s32 e1000_update_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	u32 i, act_offset, new_bank_offset, old_bank_offset, bank;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_update_nvm_checksum_ich8lan");
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	ret_val = e1000_update_nvm_checksum_generic(hw);
+	if (ret_val)
+		goto out;
+
+	if (nvm->type != e1000_nvm_flash_sw)
+		goto out;
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * We're writing to the opposite bank so if we're on bank 1,
+	 * write to bank 0 etc.  We also need to erase the segment that
+	 * is going to be written
+	 */
+	ret_val =  e1000_valid_nvm_bank_detect_ich8lan(hw, &bank);
+	if (ret_val != E1000_SUCCESS)
+		goto out;
+
+	if (bank == 0) {
+		new_bank_offset = nvm->flash_bank_size;
+		old_bank_offset = 0;
+		e1000_erase_flash_bank_ich8lan(hw, 1);
+	} else {
+		old_bank_offset = nvm->flash_bank_size;
+		new_bank_offset = 0;
+		e1000_erase_flash_bank_ich8lan(hw, 0);
+	}
+
+	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+		/*
+		 * Determine whether to write the value stored
+		 * in the other NVM bank or a modified value stored
+		 * in the shadow RAM
+		 */
+		if (dev_spec->shadow_ram[i].modified) {
+			data = dev_spec->shadow_ram[i].value;
+		} else {
+			e1000_read_flash_word_ich8lan(hw,
+			                              i + old_bank_offset,
+			                              &data);
+		}
+
+		/*
+		 * If the word is 0x13, then make sure the signature bits
+		 * (15:14) are 11b until the commit has completed.
+		 * This will allow us to write 10b which indicates the
+		 * signature is valid.  We want to do this after the write
+		 * has completed so that we don't mark the segment valid
+		 * while the write is still in progress
+		 */
+		if (i == E1000_ICH_NVM_SIG_WORD)
+			data |= E1000_ICH_NVM_SIG_MASK;
+
+		/* Convert offset to bytes. */
+		act_offset = (i + new_bank_offset) << 1;
+
+		usec_delay(100);
+		/* Write the bytes to the new bank. */
+		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+		                                               act_offset,
+		                                               (u8)data);
+		if (ret_val)
+			break;
+
+		usec_delay(100);
+		ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+		                                          act_offset + 1,
+		                                          (u8)(data >> 8));
+		if (ret_val)
+			break;
+	}
+
+	/*
+	 * Don't bother writing the segment valid bits if sector
+	 * programming failed.
+	 */
+	if (ret_val) {
+		DEBUGOUT("Flash commit failed.\n");
+		e1000_release_nvm(hw);
+		goto out;
+	}
+
+	/*
+	 * Finally validate the new segment by setting bit 15:14
+	 * to 10b in word 0x13 , this can be done without an
+	 * erase as well since these bits are 11 to start with
+	 * and we need to change bit 14 to 0b
+	 */
+	act_offset = new_bank_offset + E1000_ICH_NVM_SIG_WORD;
+	e1000_read_flash_word_ich8lan(hw, act_offset, &data);
+	data &= 0xBFFF;
+	ret_val = e1000_retry_write_flash_byte_ich8lan(hw,
+	                                               act_offset * 2 + 1,
+	                                               (u8)(data >> 8));
+	if (ret_val) {
+		e1000_release_nvm(hw);
+		goto out;
+	}
+
+	/*
+	 * And invalidate the previously valid segment by setting
+	 * its signature word (0x13) high_byte to 0b. This can be
+	 * done without an erase because flash erase sets all bits
+	 * to 1's. We can write 1's to 0's without an erase
+	 */
+	act_offset = (old_bank_offset + E1000_ICH_NVM_SIG_WORD) * 2 + 1;
+	ret_val = e1000_retry_write_flash_byte_ich8lan(hw, act_offset, 0);
+	if (ret_val) {
+		e1000_release_nvm(hw);
+		goto out;
+	}
+
+	/* Great!  Everything worked, we can now clear the cached entries. */
+	for (i = 0; i < E1000_SHADOW_RAM_WORDS; i++) {
+		dev_spec->shadow_ram[i].modified = FALSE;
+		dev_spec->shadow_ram[i].value = 0xFFFF;
+	}
+
+	e1000_release_nvm(hw);
+
+	/*
+	 * Reload the EEPROM, or else modifications will not appear
+	 * until after the next adapter reset.
+	 */
+	e1000_reload_nvm(hw);
+	msec_delay(10);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_ich8lan - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Check to see if checksum needs to be fixed by reading bit 6 in word 0x19.
+ *  If the bit is 0, that the EEPROM had been modified, but the checksum was
+ *  not calculated, in which case we need to calculate the checksum and set
+ *  bit 6.
+ **/
+static s32 e1000_validate_nvm_checksum_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 data;
+
+	DEBUGFUNC("e1000_validate_nvm_checksum_ich8lan");
+
+	/*
+	 * Read 0x19 and check bit 6.  If this bit is 0, the checksum
+	 * needs to be fixed.  This bit is an indication that the NVM
+	 * was prepared by OEM software and did not calculate the
+	 * checksum...a likely scenario.
+	 */
+	ret_val = e1000_read_nvm(hw, 0x19, 1, &data);
+	if (ret_val)
+		goto out;
+
+	if ((data & 0x40) == 0) {
+		data |= 0x40;
+		ret_val = e1000_write_nvm(hw, 0x19, 1, &data);
+		if (ret_val)
+			goto out;
+		ret_val = e1000_update_nvm_checksum(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_validate_nvm_checksum_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_flash_data_ich8lan - Writes bytes to the NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset (in bytes) of the byte/word to read.
+ *  @size: Size of data to read, 1=byte 2=word
+ *  @data: The byte(s) to write to the NVM.
+ *
+ *  Writes one/two bytes to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_data_ich8lan(struct e1000_hw *hw, u32 offset,
+                                          u8 size, u16 data)
+{
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	u32 flash_data = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+	u8 count = 0;
+
+	DEBUGFUNC("e1000_write_ich8_data");
+
+	if (size < 1 || size > 2 || data > size * 0xff ||
+	    offset > ICH_FLASH_LINEAR_ADDR_MASK)
+		goto out;
+
+	flash_linear_addr = (ICH_FLASH_LINEAR_ADDR_MASK & offset) +
+	                    hw->nvm.flash_base_addr;
+
+	do {
+		usec_delay(1);
+		/* Steps */
+		ret_val = e1000_flash_cycle_init_ich8lan(hw);
+		if (ret_val != E1000_SUCCESS)
+			break;
+
+		hsflctl.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFCTL);
+		/* 0b/1b corresponds to 1 or 2 byte size, respectively. */
+		hsflctl.hsf_ctrl.fldbcount = size -1;
+		hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_WRITE;
+		E1000_WRITE_FLASH_REG16(hw, ICH_FLASH_HSFCTL, hsflctl.regval);
+
+		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FADDR, flash_linear_addr);
+
+		if (size == 1)
+			flash_data = (u32)data & 0x00FF;
+		else
+			flash_data = (u32)data;
+
+		E1000_WRITE_FLASH_REG(hw, ICH_FLASH_FDATA0, flash_data);
+
+		/*
+		 * check if FCERR is set to 1 , if set to 1, clear it
+		 * and try the whole sequence a few more times else done
+		 */
+		ret_val = e1000_flash_cycle_ich8lan(hw,
+		                               ICH_FLASH_WRITE_COMMAND_TIMEOUT);
+		if (ret_val == E1000_SUCCESS) {
+			break;
+		} else {
+			/*
+			 * If we're here, then things are most likely
+			 * completely hosed, but if the error condition
+			 * is detected, it won't hurt to give it another
+			 * try...ICH_FLASH_CYCLE_REPEAT_COUNT times.
+			 */
+			hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+			                                      ICH_FLASH_HSFSTS);
+			if (hsfsts.hsf_status.flcerr == 1) {
+				/* Repeat for some time before giving up. */
+				continue;
+			} else if (hsfsts.hsf_status.flcdone == 0) {
+				DEBUGOUT("Timeout error - flash cycle "
+				         "did not complete.");
+				break;
+			}
+		}
+	} while (count++ < ICH_FLASH_CYCLE_REPEAT_COUNT);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_flash_byte_ich8lan - Write a single byte to NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The index of the byte to read.
+ *  @data: The byte to write to the NVM.
+ *
+ *  Writes a single byte to the NVM using the flash access registers.
+ **/
+static s32 e1000_write_flash_byte_ich8lan(struct e1000_hw *hw, u32 offset,
+                                          u8 data)
+{
+	u16 word = (u16)data;
+
+	DEBUGFUNC("e1000_write_flash_byte_ich8lan");
+
+	return e1000_write_flash_data_ich8lan(hw, offset, 1, word);
+}
+
+/**
+ *  e1000_retry_write_flash_byte_ich8lan - Writes a single byte to NVM
+ *  @hw: pointer to the HW structure
+ *  @offset: The offset of the byte to write.
+ *  @byte: The byte to write to the NVM.
+ *
+ *  Writes a single byte to the NVM using the flash access registers.
+ *  Goes through a retry algorithm before giving up.
+ **/
+static s32 e1000_retry_write_flash_byte_ich8lan(struct e1000_hw *hw,
+                                                u32 offset, u8 byte)
+{
+	s32 ret_val;
+	u16 program_retries;
+
+	DEBUGFUNC("e1000_retry_write_flash_byte_ich8lan");
+
+	ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+	if (ret_val == E1000_SUCCESS)
+		goto out;
+
+	for (program_retries = 0; program_retries < 100; program_retries++) {
+		DEBUGOUT2("Retrying Byte %2.2X at offset %u\n", byte, offset);
+		usec_delay(100);
+		ret_val = e1000_write_flash_byte_ich8lan(hw, offset, byte);
+		if (ret_val == E1000_SUCCESS)
+			break;
+	}
+	if (program_retries == 100) {
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_erase_flash_bank_ich8lan - Erase a bank (4k) from NVM
+ *  @hw: pointer to the HW structure
+ *  @bank: 0 for first bank, 1 for second bank, etc.
+ *
+ *  Erases the bank specified. Each bank is a 4k block. Banks are 0 based.
+ *  bank N is 4096 * N + flash_reg_addr.
+ **/
+static s32 e1000_erase_flash_bank_ich8lan(struct e1000_hw *hw, u32 bank)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	union ich8_hws_flash_status hsfsts;
+	union ich8_hws_flash_ctrl hsflctl;
+	u32 flash_linear_addr;
+	/* bank size is in 16bit words - adjust to bytes */
+	u32 flash_bank_size = nvm->flash_bank_size * 2;
+	s32  ret_val = E1000_SUCCESS;
+	s32  count = 0;
+	s32  j, iteration, sector_size;
+
+	DEBUGFUNC("e1000_erase_flash_bank_ich8lan");
+
+	hsfsts.regval = E1000_READ_FLASH_REG16(hw, ICH_FLASH_HSFSTS);
+
+	/*
+	 * Determine HW Sector size: Read BERASE bits of hw flash status
+	 * register
+	 * 00: The Hw sector is 256 bytes, hence we need to erase 16
+	 *     consecutive sectors.  The start index for the nth Hw sector
+	 *     can be calculated as = bank * 4096 + n * 256
+	 * 01: The Hw sector is 4K bytes, hence we need to erase 1 sector.
+	 *     The start index for the nth Hw sector can be calculated
+	 *     as = bank * 4096
+	 * 10: The Hw sector is 8K bytes, nth sector = bank * 8192
+	 *     (ich9 only, otherwise error condition)
+	 * 11: The Hw sector is 64K bytes, nth sector = bank * 65536
+	 */
+	switch (hsfsts.hsf_status.berasesz) {
+	case 0:
+		/* Hw sector size 256 */
+		sector_size = ICH_FLASH_SEG_SIZE_256;
+		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_256;
+		break;
+	case 1:
+		sector_size = ICH_FLASH_SEG_SIZE_4K;
+		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_4K;
+		break;
+	case 2:
+		if (hw->mac.type == e1000_ich9lan) {
+			sector_size = ICH_FLASH_SEG_SIZE_8K;
+			iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_8K;
+		} else {
+			ret_val = -E1000_ERR_NVM;
+			goto out;
+		}
+		break;
+	case 3:
+		sector_size = ICH_FLASH_SEG_SIZE_64K;
+		iteration = flash_bank_size / ICH_FLASH_SEG_SIZE_64K;
+		break;
+	default:
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Start with the base address, then add the sector offset. */
+	flash_linear_addr = hw->nvm.flash_base_addr;
+	flash_linear_addr += (bank) ? (sector_size * iteration) : 0;
+
+	for (j = 0; j < iteration ; j++) {
+		do {
+			/* Steps */
+			ret_val = e1000_flash_cycle_init_ich8lan(hw);
+			if (ret_val)
+				goto out;
+
+			/*
+			 * Write a value 11 (block Erase) in Flash
+			 * Cycle field in hw flash control
+			 */
+			hsflctl.regval = E1000_READ_FLASH_REG16(hw,
+			                                      ICH_FLASH_HSFCTL);
+			hsflctl.hsf_ctrl.flcycle = ICH_CYCLE_ERASE;
+			E1000_WRITE_FLASH_REG16(hw,
+			                        ICH_FLASH_HSFCTL,
+			                        hsflctl.regval);
+
+			/*
+			 * Write the last 24 bits of an index within the
+			 * block into Flash Linear address field in Flash
+			 * Address.
+			 */
+			flash_linear_addr += (j * sector_size);
+			E1000_WRITE_FLASH_REG(hw,
+			                      ICH_FLASH_FADDR,
+			                      flash_linear_addr);
+
+			ret_val = e1000_flash_cycle_ich8lan(hw,
+			                       ICH_FLASH_ERASE_COMMAND_TIMEOUT);
+			if (ret_val == E1000_SUCCESS) {
+				break;
+			} else {
+				/*
+				 * Check if FCERR is set to 1.  If 1,
+				 * clear it and try the whole sequence
+				 * a few more times else Done
+				 */
+				hsfsts.regval = E1000_READ_FLASH_REG16(hw,
+				                              ICH_FLASH_HSFSTS);
+				if (hsfsts.hsf_status.flcerr == 1) {
+					/*
+					 * repeat for some time before
+					 * giving up
+					 */
+					continue;
+				} else if (hsfsts.hsf_status.flcdone == 0)
+					goto out;
+			}
+		} while (++count < ICH_FLASH_CYCLE_REPEAT_COUNT);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_valid_led_default_ich8lan - Set the default LED settings
+ *  @hw: pointer to the HW structure
+ *  @data: Pointer to the LED settings
+ *
+ *  Reads the LED default settings from the NVM to data.  If the NVM LED
+ *  settings is all 0's or F's, set the LED default to a valid LED default
+ *  setting.
+ **/
+static s32 e1000_valid_led_default_ich8lan(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_valid_led_default_ich8lan");
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 ||
+	    *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT_ICH8LAN;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info_ich8lan - Get/Set the bus type and width
+ *  @hw: pointer to the HW structure
+ *
+ *  ICH8 use the PCI Express bus, but does not contain a PCI Express Capability
+ *  register, so the the bus width is hard coded.
+ **/
+static s32 e1000_get_bus_info_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_get_bus_info_ich8lan");
+
+	ret_val = e1000_get_bus_info_pcie_generic(hw);
+
+	/*
+	 * ICH devices are "PCI Express"-ish.  They have
+	 * a configuration space, but do not contain
+	 * PCI Express Capability registers, so bus width
+	 * must be hardcoded.
+	 */
+	if (bus->width == e1000_bus_width_unknown)
+		bus->width = e1000_bus_width_pcie_x1;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_hw_ich8lan - Reset the hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a full reset of the hardware which includes a reset of the PHY and
+ *  MAC.
+ **/
+static s32 e1000_reset_hw_ich8lan(struct e1000_hw *hw)
+{
+	u32 ctrl, icr, kab;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_reset_hw_ich8lan");
+
+	/*
+	 * Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = e1000_disable_pcie_master_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("PCI-E Master disable polling has failed.\n");
+	}
+
+	DEBUGOUT("Masking off all interrupts\n");
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+
+	/*
+	 * Disable the Transmit and Receive units.  Then delay to allow
+	 * any pending transactions to complete before we hit the MAC
+	 * with the global reset.
+	 */
+	E1000_WRITE_REG(hw, E1000_RCTL, 0);
+	E1000_WRITE_REG(hw, E1000_TCTL, E1000_TCTL_PSP);
+	E1000_WRITE_FLUSH(hw);
+
+	msec_delay(10);
+
+	/* Workaround for ICH8 bit corruption issue in FIFO memory */
+	if (hw->mac.type == e1000_ich8lan) {
+		/* Set Tx and Rx buffer allocation to 8k apiece. */
+		E1000_WRITE_REG(hw, E1000_PBA, E1000_PBA_8K);
+		/* Set Packet Buffer Size to 16k. */
+		E1000_WRITE_REG(hw, E1000_PBS, E1000_PBS_16K);
+	}
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	if (!e1000_check_reset_block(hw) && !hw->phy.reset_disable) {
+		/*
+		 * PHY HW reset requires MAC CORE reset at the same
+		 * time to make sure the interface between MAC and the
+		 * external PHY is reset.
+		 */
+		ctrl |= E1000_CTRL_PHY_RST;
+	}
+	ret_val = e1000_acquire_swflag_ich8lan(hw);
+	DEBUGOUT("Issuing a global reset to ich8lan");
+	E1000_WRITE_REG(hw, E1000_CTRL, (ctrl | E1000_CTRL_RST));
+	msec_delay(20);
+
+	ret_val = e1000_get_auto_rd_done_generic(hw);
+	if (ret_val) {
+		/*
+		 * When auto config read does not complete, do not
+		 * return with an error. This can happen in situations
+		 * where there is no eeprom and prevents getting link.
+		 */
+		DEBUGOUT("Auto Read Done did not complete\n");
+	}
+
+	E1000_WRITE_REG(hw, E1000_IMC, 0xffffffff);
+	icr = E1000_READ_REG(hw, E1000_ICR);
+
+	kab = E1000_READ_REG(hw, E1000_KABGTXD);
+	kab |= E1000_KABGTXD_BGSQLBIAS;
+	E1000_WRITE_REG(hw, E1000_KABGTXD, kab);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_init_hw_ich8lan - Initialize the hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  Prepares the hardware for transmit and receive by doing the following:
+ *   - initialize hardware bits
+ *   - initialize LED identification
+ *   - setup receive address registers
+ *   - setup flow control
+ *   - setup transmit discriptors
+ *   - clear statistics
+ **/
+static s32 e1000_init_hw_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl_ext, txdctl, snoop;
+	s32 ret_val;
+	u16 i;
+
+	DEBUGFUNC("e1000_init_hw_ich8lan");
+
+	e1000_initialize_hw_bits_ich8lan(hw);
+
+	/* Initialize identification LED */
+	ret_val = e1000_id_led_init_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Setup the receive address. */
+	e1000_init_rx_addrs_generic(hw, mac->rar_entry_count);
+
+	/* Zero out the Multicast HASH table */
+	DEBUGOUT("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = e1000_setup_link(hw);
+
+	/* Set the transmit descriptor write-back policy for both queues */
+	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+		 E1000_TXDCTL_FULL_TX_DESC_WB;
+	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), txdctl);
+	txdctl = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	txdctl = (txdctl & ~E1000_TXDCTL_WTHRESH) |
+		 E1000_TXDCTL_FULL_TX_DESC_WB;
+	txdctl = (txdctl & ~E1000_TXDCTL_PTHRESH) |
+	         E1000_TXDCTL_MAX_TX_DESC_PREFETCH;
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), txdctl);
+
+	/*
+	 * ICH8 has opposite polarity of no_snoop bits.
+	 * By default, we should use snoop behavior.
+	 */
+	if (mac->type == e1000_ich8lan)
+		snoop = PCIE_ICH8_SNOOP_ALL;
+	else
+		snoop = (u32)~(PCIE_NO_SNOOP_ALL);
+	e1000_set_pcie_no_snoop_generic(hw, snoop);
+
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_RO_DIS;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+
+	/*
+	 * Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	e1000_clear_hw_cntrs_ich8lan(hw);
+
+	return ret_val;
+}
+/**
+ *  e1000_initialize_hw_bits_ich8lan - Initialize required hardware bits
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets/Clears required hardware bits necessary for correctly setting up the
+ *  hardware for transmit and receive.
+ **/
+static void e1000_initialize_hw_bits_ich8lan(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	DEBUGFUNC("e1000_initialize_hw_bits_ich8lan");
+
+	if (hw->mac.disable_hw_init_bits)
+		goto out;
+
+	/* Extended Device Control */
+	reg = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, reg);
+
+	/* Transmit Descriptor Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(0));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(0), reg);
+
+	/* Transmit Descriptor Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TXDCTL(1));
+	reg |= (1 << 22);
+	E1000_WRITE_REG(hw, E1000_TXDCTL(1), reg);
+
+	/* Transmit Arbitration Control 0 */
+	reg = E1000_READ_REG(hw, E1000_TARC(0));
+	if (hw->mac.type == e1000_ich8lan)
+		reg |= (1 << 28) | (1 << 29);
+	reg |= (1 << 23) | (1 << 24) | (1 << 26) | (1 << 27);
+	E1000_WRITE_REG(hw, E1000_TARC(0), reg);
+
+	/* Transmit Arbitration Control 1 */
+	reg = E1000_READ_REG(hw, E1000_TARC(1));
+	if (E1000_READ_REG(hw, E1000_TCTL) & E1000_TCTL_MULR)
+		reg &= ~(1 << 28);
+	else
+		reg |= (1 << 28);
+	reg |= (1 << 24) | (1 << 26) | (1 << 30);
+	E1000_WRITE_REG(hw, E1000_TARC(1), reg);
+
+	/* Device Status */
+	if (hw->mac.type == e1000_ich8lan) {
+		reg = E1000_READ_REG(hw, E1000_STATUS);
+		reg &= ~(1 << 31);
+		E1000_WRITE_REG(hw, E1000_STATUS, reg);
+	}
+
+out:
+	return;
+}
+
+/**
+ *  e1000_setup_link_ich8lan - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+static s32 e1000_setup_link_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_link_ich8lan");
+
+	if (e1000_check_reset_block(hw))
+		goto out;
+
+	/*
+	 * ICH parts do not have a word in the NVM to determine
+	 * the default flow control setting, so we explicitly
+	 * set it to full.
+	 */
+	if (hw->fc.type == e1000_fc_default)
+		hw->fc.type = e1000_fc_full;
+
+	hw->fc.original_type = hw->fc.type;
+
+	DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type);
+
+	/* Continue to configure the copper link. */
+	ret_val = func->setup_physical_interface(hw);
+	if (ret_val)
+		goto out;
+
+	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+	ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_ich8lan - Configure MAC/PHY interface
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the kumeran interface to the PHY to wait the appropriate time
+ *  when polling the PHY, then call the generic setup_copper_link to finish
+ *  configuring the copper link.
+ **/
+static s32 e1000_setup_copper_link_ich8lan(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+	u16 reg_data;
+
+	DEBUGFUNC("e1000_setup_copper_link_ich8lan");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	/*
+	 * Set the mac to wait the maximum time between each iteration
+	 * and increase the max iterations when polling the phy;
+	 * this fixes erroneous timeouts at 10Mbps.
+	 */
+	ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 4), 0xFFFF);
+	if (ret_val)
+		goto out;
+	ret_val = e1000_read_kmrn_reg(hw, GG82563_REG(0x34, 9), &reg_data);
+	if (ret_val)
+		goto out;
+	reg_data |= 0x3F;
+	ret_val = e1000_write_kmrn_reg(hw, GG82563_REG(0x34, 9), reg_data);
+	if (ret_val)
+		goto out;
+
+	if (hw->phy.type == e1000_phy_igp_3) {
+		ret_val = e1000_copper_link_setup_igp(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	if (hw->phy.type == e1000_phy_ife) {
+		ret_val = e1000_read_phy_reg(hw, IFE_PHY_MDIX_CONTROL, &reg_data);
+		if (ret_val)
+			goto out;
+
+		reg_data &= ~IFE_PMC_AUTO_MDIX;
+
+		switch (hw->phy.mdix) {
+		case 1:
+			reg_data &= ~IFE_PMC_FORCE_MDIX;
+			break;
+		case 2:
+			reg_data |= IFE_PMC_FORCE_MDIX;
+			break;
+		case 0:
+		default:
+			reg_data |= IFE_PMC_AUTO_MDIX;
+			break;
+		}
+		ret_val = e1000_write_phy_reg(hw, IFE_PHY_MDIX_CONTROL, reg_data);
+		if (ret_val)
+			goto out;
+	}
+	ret_val = e1000_setup_copper_link_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_link_up_info_ich8lan - Get current link speed and duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: pointer to store current link speed
+ *  @duplex: pointer to store the current link duplex
+ *
+ *  Calls the generic get_speed_and_duplex to retreive the current link
+ *  information and then calls the Kumeran lock loss workaround for links at
+ *  gigabit speeds.
+ **/
+static s32 e1000_get_link_up_info_ich8lan(struct e1000_hw *hw, u16 *speed,
+                                          u16 *duplex)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_get_link_up_info_ich8lan");
+
+	ret_val = e1000_get_speed_and_duplex_copper_generic(hw, speed, duplex);
+	if (ret_val)
+		goto out;
+
+	if ((hw->mac.type == e1000_ich8lan) &&
+	    (hw->phy.type == e1000_phy_igp_3) &&
+	    (*speed == SPEED_1000)) {
+		ret_val = e1000_kmrn_lock_loss_workaround_ich8lan(hw);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_kmrn_lock_loss_workaround_ich8lan - Kumeran workaround
+ *  @hw: pointer to the HW structure
+ *
+ *  Work-around for 82566 Kumeran PCS lock loss:
+ *  On link status change (i.e. PCI reset, speed change) and link is up and
+ *  speed is gigabit-
+ *    0) if workaround is optionally disabled do nothing
+ *    1) wait 1ms for Kumeran link to come up
+ *    2) check Kumeran Diagnostic register PCS lock loss bit
+ *    3) if not set the link is locked (all is good), otherwise...
+ *    4) reset the PHY
+ *    5) repeat up to 10 times
+ *  Note: this is only called for IGP3 copper when speed is 1gb.
+ **/
+static s32 e1000_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec;
+	u32 phy_ctrl;
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, data;
+	bool link;
+
+	DEBUGFUNC("e1000_kmrn_lock_loss_workaround_ich8lan");
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	if (!(dev_spec->kmrn_lock_loss_workaround_enabled))
+		goto out;
+
+	/*
+	 * Make sure link is up before proceeding.  If not just return.
+	 * Attempting this while link is negotiating fouled up link
+	 * stability
+	 */
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (!link) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	for (i = 0; i < 10; i++) {
+		/* read once to clear */
+		ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &data);
+		if (ret_val)
+			goto out;
+		/* and again to get new status */
+		ret_val = e1000_read_phy_reg(hw, IGP3_KMRN_DIAG, &data);
+		if (ret_val)
+			goto out;
+
+		/* check for PCS lock */
+		if (!(data & IGP3_KMRN_DIAG_PCS_LOCK_LOSS)) {
+			ret_val = E1000_SUCCESS;
+			goto out;
+		}
+
+		/* Issue PHY reset */
+		e1000_phy_hw_reset(hw);
+		msec_delay_irq(5);
+	}
+	/* Disable GigE link negotiation */
+	phy_ctrl = E1000_READ_REG(hw, E1000_PHY_CTRL);
+	phy_ctrl |= (E1000_PHY_CTRL_GBE_DISABLE |
+	             E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+	E1000_WRITE_REG(hw, E1000_PHY_CTRL, phy_ctrl);
+
+	/*
+	 * Call gig speed drop workaround on Giga disable before accessing
+	 * any PHY registers
+	 */
+	e1000_gig_downshift_workaround_ich8lan(hw);
+
+	/* unable to acquire PCS lock */
+	ret_val = -E1000_ERR_PHY;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_kmrn_lock_loss_workaound_ich8lan - Set Kumeran workaround state
+ *  @hw: pointer to the HW structure
+ *  @state: boolean value used to set the current Kumaran workaround state
+ *
+ *  If ICH8, set the current Kumeran workaround state (enabled - TRUE
+ *  /disabled - FALSE).
+ **/
+void e1000_set_kmrn_lock_loss_workaround_ich8lan(struct e1000_hw *hw,
+                                                 bool state)
+{
+	struct e1000_dev_spec_ich8lan *dev_spec;
+
+	DEBUGFUNC("e1000_set_kmrn_lock_loss_workaround_ich8lan");
+
+	if (hw->mac.type != e1000_ich8lan) {
+		DEBUGOUT("Workaround applies to ICH8 only.\n");
+		goto out;
+	}
+
+	dev_spec = (struct e1000_dev_spec_ich8lan *)hw->dev_spec;
+
+	if (!dev_spec) {
+		DEBUGOUT("dev_spec pointer is set to NULL.\n");
+		goto out;
+	}
+
+	dev_spec->kmrn_lock_loss_workaround_enabled = state;
+
+out:
+	return;
+}
+
+/**
+ *  e1000_ipg3_phy_powerdown_workaround_ich8lan - Power down workaround on D3
+ *  @hw: pointer to the HW structure
+ *
+ *  Workaround for 82566 power-down on D3 entry:
+ *    1) disable gigabit link
+ *    2) write VR power-down enable
+ *    3) read it back
+ *  Continue if successful, else issue LCD reset and repeat
+ **/
+void e1000_igp3_phy_powerdown_workaround_ich8lan(struct e1000_hw *hw)
+{
+	u32 reg;
+	u16 data;
+	u8  retry = 0;
+
+	DEBUGFUNC("e1000_igp3_phy_powerdown_workaround_ich8lan");
+
+	if (hw->phy.type != e1000_phy_igp_3)
+		goto out;
+
+	/* Try the workaround twice (if needed) */
+	do {
+		/* Disable link */
+		reg = E1000_READ_REG(hw, E1000_PHY_CTRL);
+		reg |= (E1000_PHY_CTRL_GBE_DISABLE |
+		        E1000_PHY_CTRL_NOND0A_GBE_DISABLE);
+		E1000_WRITE_REG(hw, E1000_PHY_CTRL, reg);
+
+		/*
+		 * Call gig speed drop workaround on Giga disable before
+		 * accessing any PHY registers
+		 */
+		if (hw->mac.type == e1000_ich8lan)
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* Write VR power-down enable */
+		e1000_read_phy_reg(hw, IGP3_VR_CTRL, &data);
+		data &= ~IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+		e1000_write_phy_reg(hw,
+		                   IGP3_VR_CTRL,
+		                   data | IGP3_VR_CTRL_MODE_SHUTDOWN);
+
+		/* Read it back and test */
+		e1000_read_phy_reg(hw, IGP3_VR_CTRL, &data);
+		data &= IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK;
+		if ((data == IGP3_VR_CTRL_MODE_SHUTDOWN) || retry)
+			break;
+
+		/* Issue PHY reset and repeat at most one more time */
+		reg = E1000_READ_REG(hw, E1000_CTRL);
+		E1000_WRITE_REG(hw, E1000_CTRL, reg | E1000_CTRL_PHY_RST);
+		retry++;
+	} while (retry);
+
+out:
+	return;
+}
+
+/**
+ *  e1000_gig_downshift_workaround_ich8lan - WoL from S5 stops working
+ *  @hw: pointer to the HW structure
+ *
+ *  Steps to take when dropping from 1Gb/s (eg. link cable removal (LSC),
+ *  LPLU, Giga disable, MDIC PHY reset):
+ *    1) Set Kumeran Near-end loopback
+ *    2) Clear Kumeran Near-end loopback
+ *  Should only be called for ICH8[m] devices with IGP_3 Phy.
+ **/
+void e1000_gig_downshift_workaround_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 reg_data;
+
+	DEBUGFUNC("e1000_gig_downshift_workaround_ich8lan");
+
+	if ((hw->mac.type != e1000_ich8lan) ||
+	    (hw->phy.type != e1000_phy_igp_3))
+		goto out;
+
+	ret_val = e1000_read_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+	                              &reg_data);
+	if (ret_val)
+		goto out;
+	reg_data |= E1000_KMRNCTRLSTA_DIAG_NELPBK;
+	ret_val = e1000_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+	                               reg_data);
+	if (ret_val)
+		goto out;
+	reg_data &= ~E1000_KMRNCTRLSTA_DIAG_NELPBK;
+	ret_val = e1000_write_kmrn_reg(hw, E1000_KMRNCTRLSTA_DIAG_OFFSET,
+	                               reg_data);
+out:
+	return;
+}
+
+/**
+ *  e1000_cleanup_led_ich8lan - Restore the default LED operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the LED back to the default configuration.
+ **/
+static s32 e1000_cleanup_led_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_cleanup_led_ich8lan");
+
+	if (hw->phy.type == e1000_phy_ife)
+		ret_val = e1000_write_phy_reg(hw,
+		                              IFE_PHY_SPECIAL_CONTROL_LED,
+		                              0);
+	else
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_led_on_ich8lan - Turn LED's on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn on the LED's.
+ **/
+static s32 e1000_led_on_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_led_on_ich8lan");
+
+	if (hw->phy.type == e1000_phy_ife)
+		ret_val = e1000_write_phy_reg(hw,
+		                IFE_PHY_SPECIAL_CONTROL_LED,
+		                (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_ON));
+	else
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_led_off_ich8lan - Turn LED's off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn off the LED's.
+ **/
+static s32 e1000_led_off_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_led_off_ich8lan");
+
+	if (hw->phy.type == e1000_phy_ife)
+		ret_val = e1000_write_phy_reg(hw,
+		               IFE_PHY_SPECIAL_CONTROL_LED,
+		               (IFE_PSCL_PROBE_MODE | IFE_PSCL_PROBE_LEDS_OFF));
+	else
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_ich8lan - Read config done bit
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the management control register for the config done bit for
+ *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
+ *  to read the config done bit, so an error is *ONLY* logged and returns
+ *  E1000_SUCCESS.  If we were to return with error, EEPROM-less silicon
+ *  would not be able to be reset or change link.
+ **/
+static s32 e1000_get_cfg_done_ich8lan(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	e1000_get_cfg_done_generic(hw);
+
+	/* If EEPROM is not marked present, init the IGP 3 PHY manually */
+	if (((E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_PRES) == 0) &&
+	    (hw->phy.type == e1000_phy_igp_3)) {
+		e1000_phy_init_script_igp3(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ * e1000_power_down_phy_copper_ich8lan - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+static void e1000_power_down_phy_copper_ich8lan(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(e1000_check_mng_mode(hw) || e1000_check_reset_block(hw)))
+		e1000_power_down_phy_copper(hw);
+
+	return;
+}
+
+/**
+ *  e1000_clear_hw_cntrs_ich8lan - Clear statistical counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears hardware counters specific to the silicon family and calls
+ *  clear_hw_cntrs_generic to clear all general purpose counters.
+ **/
+static void e1000_clear_hw_cntrs_ich8lan(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_ich8lan");
+
+	e1000_clear_hw_cntrs_base_generic(hw);
+
+	temp = E1000_READ_REG(hw, E1000_ALGNERRC);
+	temp = E1000_READ_REG(hw, E1000_RXERRC);
+	temp = E1000_READ_REG(hw, E1000_TNCRS);
+	temp = E1000_READ_REG(hw, E1000_CEXTERR);
+	temp = E1000_READ_REG(hw, E1000_TSCTC);
+	temp = E1000_READ_REG(hw, E1000_TSCTFC);
+
+	temp = E1000_READ_REG(hw, E1000_MGTPRC);
+	temp = E1000_READ_REG(hw, E1000_MGTPDC);
+	temp = E1000_READ_REG(hw, E1000_MGTPTC);
+
+	temp = E1000_READ_REG(hw, E1000_IAC);
+	temp = E1000_READ_REG(hw, E1000_ICRXOC);
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h
new file mode 100644
index 0000000..65e95c9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_ich8lan.h
@@ -0,0 +1,110 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_ICH8LAN_H_
+#define _E1000_ICH8LAN_H_
+
+#define ICH_FLASH_GFPREG                 0x0000
+#define ICH_FLASH_HSFSTS                 0x0004
+#define ICH_FLASH_HSFCTL                 0x0006
+#define ICH_FLASH_FADDR                  0x0008
+#define ICH_FLASH_FDATA0                 0x0010
+
+#define ICH_FLASH_READ_COMMAND_TIMEOUT   500
+#define ICH_FLASH_WRITE_COMMAND_TIMEOUT  500
+#define ICH_FLASH_ERASE_COMMAND_TIMEOUT  3000000
+#define ICH_FLASH_LINEAR_ADDR_MASK       0x00FFFFFF
+#define ICH_FLASH_CYCLE_REPEAT_COUNT     10
+
+#define ICH_CYCLE_READ                   0
+#define ICH_CYCLE_WRITE                  2
+#define ICH_CYCLE_ERASE                  3
+
+#define FLASH_GFPREG_BASE_MASK           0x1FFF
+#define FLASH_SECTOR_ADDR_SHIFT          12
+
+#define E1000_SHADOW_RAM_WORDS           2048
+
+#define ICH_FLASH_SEG_SIZE_256           256
+#define ICH_FLASH_SEG_SIZE_4K            4096
+#define ICH_FLASH_SEG_SIZE_8K            8192
+#define ICH_FLASH_SEG_SIZE_64K           65536
+#define ICH_FLASH_SECTOR_SIZE            4096
+
+#define ICH_FLASH_REG_MAPSIZE            0x00A0
+
+#define E1000_ICH_FWSM_RSPCIPHY          0x00000040 /* Reset PHY on PCI Reset */
+#define E1000_ICH_FWSM_DISSW             0x10000000 /* FW Disables SW Writes */
+/* FW established a valid mode */
+#define E1000_ICH_FWSM_FW_VALID          0x00008000
+
+#define E1000_ICH_MNG_IAMT_MODE          0x2
+
+#define ID_LED_DEFAULT_ICH8LAN  ((ID_LED_DEF1_DEF2 << 12) | \
+                                 (ID_LED_DEF1_OFF2 <<  8) | \
+                                 (ID_LED_DEF1_ON2  <<  4) | \
+                                 (ID_LED_DEF1_DEF2))
+
+#define E1000_ICH_NVM_SIG_WORD           0x13
+#define E1000_ICH_NVM_SIG_MASK           0xC000
+
+#define E1000_ICH8_LAN_INIT_TIMEOUT      1500
+
+#define E1000_FEXTNVM_SW_CONFIG        1
+#define E1000_FEXTNVM_SW_CONFIG_ICH8M (1 << 27) /* Bit redefined for ICH8M */
+
+#define PCIE_ICH8_SNOOP_ALL   PCIE_NO_SNOOP_ALL
+
+#define E1000_ICH_RAR_ENTRIES            7
+
+#define PHY_PAGE_SHIFT 5
+#define PHY_REG(page, reg) (((page) << PHY_PAGE_SHIFT) | \
+                           ((reg) & MAX_PHY_REG_ADDRESS))
+#define IGP3_KMRN_DIAG  PHY_REG(770, 19) /* KMRN Diagnostic */
+#define IGP3_VR_CTRL    PHY_REG(776, 18) /* Voltage Regulator Control */
+#define IGP3_CAPABILITY PHY_REG(776, 19) /* Capability */
+#define IGP3_PM_CTRL    PHY_REG(769, 20) /* Power Management Control */
+
+#define IGP3_KMRN_DIAG_PCS_LOCK_LOSS         0x0002
+#define IGP3_VR_CTRL_DEV_POWERDOWN_MODE_MASK 0x0300
+#define IGP3_VR_CTRL_MODE_SHUTDOWN           0x0200
+#define IGP3_PM_CTRL_FORCE_PWR_DOWN          0x0020
+
+/*
+ * Additional interrupts need to be handled for ICH family:
+ *  DSW = The FW changed the status of the DISSW bit in FWSM
+ *  PHYINT = The LAN connected device generates an interrupt
+ *  EPRST = Manageability reset event
+ */
+#define IMS_ICH_ENABLE_MASK (\
+    E1000_IMS_DSW   | \
+    E1000_IMS_PHYINT | \
+    E1000_IMS_EPRST)
+
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c
new file mode 100644
index 0000000..c60b402
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.c
@@ -0,0 +1,2039 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_mac.h"
+
+/**
+ *  e1000_remove_device_generic - Free device specific structure
+ *  @hw: pointer to the HW structure
+ *
+ *  If a device specific structure was allocated, this function will
+ *  free it.
+ **/
+void e1000_remove_device_generic(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_remove_device_generic");
+
+	/* Freeing the dev_spec member of e1000_hw structure */
+	e1000_free_dev_spec_struct(hw);
+}
+
+/**
+ *  e1000_get_bus_info_pci_generic - Get PCI(x) bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCI/PCIx), and PCI(-x) function.
+ **/
+s32 e1000_get_bus_info_pci_generic(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	u32 status = E1000_READ_REG(hw, E1000_STATUS);
+	s32 ret_val = E1000_SUCCESS;
+	u16 pci_header_type;
+
+	DEBUGFUNC("e1000_get_bus_info_pci_generic");
+
+	/* PCI or PCI-X? */
+	bus->type = (status & E1000_STATUS_PCIX_MODE)
+			? e1000_bus_type_pcix
+			: e1000_bus_type_pci;
+
+	/* Bus speed */
+	if (bus->type == e1000_bus_type_pci) {
+		bus->speed = (status & E1000_STATUS_PCI66)
+		             ? e1000_bus_speed_66
+		             : e1000_bus_speed_33;
+	} else {
+		switch (status & E1000_STATUS_PCIX_SPEED) {
+		case E1000_STATUS_PCIX_SPEED_66:
+			bus->speed = e1000_bus_speed_66;
+			break;
+		case E1000_STATUS_PCIX_SPEED_100:
+			bus->speed = e1000_bus_speed_100;
+			break;
+		case E1000_STATUS_PCIX_SPEED_133:
+			bus->speed = e1000_bus_speed_133;
+			break;
+		default:
+			bus->speed = e1000_bus_speed_reserved;
+			break;
+		}
+	}
+
+	/* Bus width */
+	bus->width = (status & E1000_STATUS_BUS64)
+	             ? e1000_bus_width_64
+	             : e1000_bus_width_32;
+
+	/* Which PCI(-X) function? */
+	e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+	if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC)
+		bus->func = (status & E1000_STATUS_FUNC_MASK)
+		            >> E1000_STATUS_FUNC_SHIFT;
+	else
+		bus->func = 0;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_bus_info_pcie_generic - Get PCIe bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 e1000_get_bus_info_pcie_generic(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val;
+	u32 status;
+	u16 pcie_link_status, pci_header_type;
+
+	DEBUGFUNC("e1000_get_bus_info_pcie_generic");
+
+	bus->type = e1000_bus_type_pci_express;
+	bus->speed = e1000_bus_speed_2500;
+
+	ret_val = e1000_read_pcie_cap_reg(hw,
+	                                  PCIE_LINK_STATUS,
+	                                  &pcie_link_status);
+	if (ret_val)
+		bus->width = e1000_bus_width_unknown;
+	else
+		bus->width = (e1000_bus_width)((pcie_link_status &
+		                                PCIE_LINK_WIDTH_MASK) >>
+		                               PCIE_LINK_WIDTH_SHIFT);
+
+	e1000_read_pci_cfg(hw, PCI_HEADER_TYPE_REGISTER, &pci_header_type);
+	if (pci_header_type & PCI_HEADER_TYPE_MULTIFUNC) {
+		status = E1000_READ_REG(hw, E1000_STATUS);
+		bus->func = (status & E1000_STATUS_FUNC_MASK)
+		            >> E1000_STATUS_FUNC_SHIFT;
+	} else {
+		bus->func = 0;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_clear_vfta_generic - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void e1000_clear_vfta_generic(struct e1000_hw *hw)
+{
+	u32 offset;
+
+	DEBUGFUNC("e1000_clear_vfta_generic");
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ *  e1000_write_vfta_generic - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	DEBUGFUNC("e1000_write_vfta_generic");
+
+	E1000_WRITE_REG_ARRAY(hw, E1000_VFTA, offset, value);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_init_rx_addrs_generic - Initialize receive address's
+ *  @hw: pointer to the HW structure
+ *  @rar_count: receive address registers
+ *
+ *  Setups the receive address registers by setting the base receive address
+ *  register to the devices MAC address and clearing all the other receive
+ *  address registers to 0.
+ **/
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count)
+{
+	u32 i;
+
+	DEBUGFUNC("e1000_init_rx_addrs_generic");
+
+	/* Setup the receive address */
+	DEBUGOUT("Programming MAC Address into RAR[0]\n");
+
+	e1000_rar_set_generic(hw, hw->mac.addr, 0);
+
+	/* Zero out the other (rar_entry_count - 1) receive addresses */
+	DEBUGOUT1("Clearing RAR[1-%u]\n", rar_count-1);
+	for (i = 1; i < rar_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1), 0);
+		E1000_WRITE_FLUSH(hw);
+		E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((i << 1) + 1), 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+}
+
+/**
+ *  e1000_check_alt_mac_addr_generic - Check for alternate MAC addr
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the nvm for an alternate MAC address.  An alternate MAC address
+ *  can be setup by pre-boot software and must be treated like a permanent
+ *  address and must override the actual permanent MAC address.  If an
+ *  alternate MAC address is found it is saved in the hw struct and
+ *  programmed into RAR0 and the function returns success, otherwise the
+ *  function returns an error.
+ **/
+s32 e1000_check_alt_mac_addr_generic(struct e1000_hw *hw)
+{
+	u32 i;
+	s32 ret_val = E1000_SUCCESS;
+	u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+	u8 alt_mac_addr[ETH_ADDR_LEN];
+
+	DEBUGFUNC("e1000_check_alt_mac_addr_generic");
+
+	ret_val = e1000_read_nvm(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+	                         &nvm_alt_mac_addr_offset);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if (nvm_alt_mac_addr_offset == 0xFFFF) {
+		ret_val = -(E1000_NOT_IMPLEMENTED);
+		goto out;
+	}
+
+	if (hw->bus.func == E1000_FUNC_1)
+		nvm_alt_mac_addr_offset += ETH_ADDR_LEN/sizeof(u16);
+
+	for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+		offset = nvm_alt_mac_addr_offset + (i >> 1);
+		ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error\n");
+			goto out;
+		}
+
+		alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+		alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+	}
+
+	/* if multicast bit is set, the alternate address will not be used */
+	if (alt_mac_addr[0] & 0x01) {
+		ret_val = -(E1000_NOT_IMPLEMENTED);
+		goto out;
+	}
+
+	for (i = 0; i < ETH_ADDR_LEN; i++)
+		hw->mac.addr[i] = hw->mac.perm_addr[i] = alt_mac_addr[i];
+
+	e1000_rar_set(hw, hw->mac.perm_addr, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_rar_set_generic - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+
+	DEBUGFUNC("e1000_rar_set_generic");
+
+	/*
+	 * HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] |
+	           ((u32) addr[1] << 8) |
+	           ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high) {
+		if (!hw->mac.disable_av)
+			rar_high |= E1000_RAH_AV;
+	}
+
+	E1000_WRITE_REG_ARRAY(hw, E1000_RA, (index << 1), rar_low);
+	E1000_WRITE_REG_ARRAY(hw, E1000_RA, ((index << 1) + 1), rar_high);
+}
+
+/**
+ *  e1000_mta_set_generic - Set multicast filter table address
+ *  @hw: pointer to the HW structure
+ *  @hash_value: determines the MTA register and bit to set
+ *
+ *  The multicast table address is a register array of 32-bit registers.
+ *  The hash_value is used to determine what register the bit is in, the
+ *  current value is read, the new bit is OR'd in and the new value is
+ *  written back into the register.
+ **/
+void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg, mta;
+
+	DEBUGFUNC("e1000_mta_set_generic");
+	/*
+	 * The MTA is a register array of 32-bit registers. It is
+	 * treated like an array of (32*mta_reg_count) bits.  We want to
+	 * set bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The (hw->mac.mta_reg_count - 1) serves as a
+	 * mask to bits 31:5 of the hash value which gives us the
+	 * register we're modifying.  The hash bit within that register
+	 * is determined by the lower 5 bits of the hash value.
+	 */
+	hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+	hash_bit = hash_value & 0x1F;
+
+	mta = E1000_READ_REG_ARRAY(hw, E1000_MTA, hash_reg);
+
+	mta |= (1 << hash_bit);
+
+	E1000_WRITE_REG_ARRAY(hw, E1000_MTA, hash_reg, mta);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_update_mc_addr_list_generic - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *  @rar_used_count: the first RAR register free to program
+ *  @rar_count: total number of supported Receive Address Registers
+ *
+ *  Updates the Receive Address Registers and Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ *  The parameter rar_count will usually be hw->mac.rar_entry_count
+ *  unless there are workarounds that change this.
+ **/
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+                                       u8 *mc_addr_list, u32 mc_addr_count,
+                                       u32 rar_used_count, u32 rar_count)
+{
+	u32 hash_value;
+	u32 i;
+
+	DEBUGFUNC("e1000_update_mc_addr_list_generic");
+
+	/*
+	 * Load the first set of multicast addresses into the exact
+	 * filters (RAR).  If there are not enough to fill the RAR
+	 * array, clear the filters.
+	 */
+	for (i = rar_used_count; i < rar_count; i++) {
+		if (mc_addr_count) {
+			e1000_rar_set(hw, mc_addr_list, i);
+			mc_addr_count--;
+			mc_addr_list += ETH_ADDR_LEN;
+		} else {
+			E1000_WRITE_REG_ARRAY(hw, E1000_RA, i << 1, 0);
+			E1000_WRITE_FLUSH(hw);
+			E1000_WRITE_REG_ARRAY(hw, E1000_RA, (i << 1) + 1, 0);
+			E1000_WRITE_FLUSH(hw);
+		}
+	}
+
+	/* Clear the old settings from the MTA */
+	DEBUGOUT("Clearing MTA\n");
+	for (i = 0; i < hw->mac.mta_reg_count; i++) {
+		E1000_WRITE_REG_ARRAY(hw, E1000_MTA, i, 0);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* Load any remaining multicast addresses into the hash table. */
+	for (; mc_addr_count > 0; mc_addr_count--) {
+		hash_value = e1000_hash_mc_addr(hw, mc_addr_list);
+		DEBUGOUT1("Hash value = 0x%03X\n", hash_value);
+		e1000_mta_set(hw, hash_value);
+		mc_addr_list += ETH_ADDR_LEN;
+	}
+}
+
+/**
+ *  e1000_hash_mc_addr_generic - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.  See
+ *  e1000_mta_set_generic()
+ **/
+u32 e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr)
+{
+	u32 hash_value, hash_mask;
+	u8 bit_shift = 0;
+
+	DEBUGFUNC("e1000_hash_mc_addr_generic");
+
+	/* Register count multiplied by bits per register */
+	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+	/*
+	 * For a mc_filter_type of 0, bit_shift is the number of left-shifts
+	 * where 0xFF would still fall within the hash mask.
+	 */
+	while (hash_mask >> bit_shift != 0xFF)
+		bit_shift++;
+
+	/*
+	 * The portion of the address that is used for the hash table
+	 * is determined by the mc_filter_type setting.
+	 * The algorithm is such that there is a total of 8 bits of shifting.
+	 * The bit_shift for a mc_filter_type of 0 represents the number of
+	 * left-shifts where the MSB of mc_addr[5] would still fall within
+	 * the hash_mask.  Case 0 does this exactly.  Since there are a total
+	 * of 8 bits of shifting, then mc_addr[4] will shift right the
+	 * remaining number of bits. Thus 8 - bit_shift.  The rest of the
+	 * cases are a variation of this algorithm...essentially raising the
+	 * number of bits to shift mc_addr[5] left, while still keeping the
+	 * 8-bit shifting total.
+	 *
+	 * For example, given the following Destination MAC Address and an
+	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+	 * we can see that the bit_shift for case 0 is 4.  These are the hash
+	 * values resulting from each mc_filter_type...
+	 * [0] [1] [2] [3] [4] [5]
+	 * 01  AA  00  12  34  56
+	 * LSB                 MSB
+	 *
+	 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+	 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+	 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+	 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+	 */
+	switch (hw->mac.mc_filter_type) {
+		default:
+		case 0:
+			break;
+		case 1:
+			bit_shift += 1;
+			break;
+		case 2:
+			bit_shift += 2;
+			break;
+		case 3:
+			bit_shift += 4;
+			break;
+	}
+
+	hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+	                          (((u16) mc_addr[5]) << bit_shift)));
+
+	return hash_value;
+}
+
+/**
+ *  e1000_pcix_mmrbc_workaround_generic - Fix incorrect MMRBC value
+ *  @hw: pointer to the HW structure
+ *
+ *  In certain situations, a system BIOS may report that the PCIx maximum
+ *  memory read byte count (MMRBC) value is higher than than the actual
+ *  value. We check the PCIx command regsiter with the current PCIx status
+ *  regsiter.
+ **/
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw)
+{
+	u16 cmd_mmrbc;
+	u16 pcix_cmd;
+	u16 pcix_stat_hi_word;
+	u16 stat_mmrbc;
+
+	DEBUGFUNC("e1000_pcix_mmrbc_workaround_generic");
+
+	/* Workaround for PCI-X issue when BIOS sets MMRBC incorrectly */
+	if (hw->bus.type != e1000_bus_type_pcix)
+		return;
+
+	e1000_read_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+	e1000_read_pci_cfg(hw, PCIX_STATUS_REGISTER_HI, &pcix_stat_hi_word);
+	cmd_mmrbc = (pcix_cmd & PCIX_COMMAND_MMRBC_MASK) >>
+	             PCIX_COMMAND_MMRBC_SHIFT;
+	stat_mmrbc = (pcix_stat_hi_word & PCIX_STATUS_HI_MMRBC_MASK) >>
+	              PCIX_STATUS_HI_MMRBC_SHIFT;
+	if (stat_mmrbc == PCIX_STATUS_HI_MMRBC_4K)
+		stat_mmrbc = PCIX_STATUS_HI_MMRBC_2K;
+	if (cmd_mmrbc > stat_mmrbc) {
+		pcix_cmd &= ~PCIX_COMMAND_MMRBC_MASK;
+		pcix_cmd |= stat_mmrbc << PCIX_COMMAND_MMRBC_SHIFT;
+		e1000_write_pci_cfg(hw, PCIX_COMMAND_REGISTER, &pcix_cmd);
+	}
+}
+
+/**
+ *  e1000_clear_hw_cntrs_base_generic - Clear base hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the base hardware counters by reading the counter registers.
+ **/
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw)
+{
+	volatile u32 temp;
+
+	DEBUGFUNC("e1000_clear_hw_cntrs_base_generic");
+
+	temp = E1000_READ_REG(hw, E1000_CRCERRS);
+	temp = E1000_READ_REG(hw, E1000_SYMERRS);
+	temp = E1000_READ_REG(hw, E1000_MPC);
+	temp = E1000_READ_REG(hw, E1000_SCC);
+	temp = E1000_READ_REG(hw, E1000_ECOL);
+	temp = E1000_READ_REG(hw, E1000_MCC);
+	temp = E1000_READ_REG(hw, E1000_LATECOL);
+	temp = E1000_READ_REG(hw, E1000_COLC);
+	temp = E1000_READ_REG(hw, E1000_DC);
+	temp = E1000_READ_REG(hw, E1000_SEC);
+	temp = E1000_READ_REG(hw, E1000_RLEC);
+	temp = E1000_READ_REG(hw, E1000_XONRXC);
+	temp = E1000_READ_REG(hw, E1000_XONTXC);
+	temp = E1000_READ_REG(hw, E1000_XOFFRXC);
+	temp = E1000_READ_REG(hw, E1000_XOFFTXC);
+	temp = E1000_READ_REG(hw, E1000_FCRUC);
+	temp = E1000_READ_REG(hw, E1000_GPRC);
+	temp = E1000_READ_REG(hw, E1000_BPRC);
+	temp = E1000_READ_REG(hw, E1000_MPRC);
+	temp = E1000_READ_REG(hw, E1000_GPTC);
+	temp = E1000_READ_REG(hw, E1000_GORCL);
+	temp = E1000_READ_REG(hw, E1000_GORCH);
+	temp = E1000_READ_REG(hw, E1000_GOTCL);
+	temp = E1000_READ_REG(hw, E1000_GOTCH);
+	temp = E1000_READ_REG(hw, E1000_RNBC);
+	temp = E1000_READ_REG(hw, E1000_RUC);
+	temp = E1000_READ_REG(hw, E1000_RFC);
+	temp = E1000_READ_REG(hw, E1000_ROC);
+	temp = E1000_READ_REG(hw, E1000_RJC);
+	temp = E1000_READ_REG(hw, E1000_TORL);
+	temp = E1000_READ_REG(hw, E1000_TORH);
+	temp = E1000_READ_REG(hw, E1000_TOTL);
+	temp = E1000_READ_REG(hw, E1000_TOTH);
+	temp = E1000_READ_REG(hw, E1000_TPR);
+	temp = E1000_READ_REG(hw, E1000_TPT);
+	temp = E1000_READ_REG(hw, E1000_MPTC);
+	temp = E1000_READ_REG(hw, E1000_BPTC);
+}
+
+/**
+ *  e1000_check_for_copper_link_generic - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+s32 e1000_check_for_copper_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+
+	DEBUGFUNC("e1000_check_for_copper_link");
+
+	/*
+	 * We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/*
+	 * First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link)
+		goto out; /* No link detected */
+
+	mac->get_link_status = FALSE;
+
+	/*
+	 * Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	e1000_check_downshift_generic(hw);
+
+	/*
+	 * If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/*
+	 * Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	e1000_config_collision_dist_generic(hw);
+
+	/*
+	 * Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = e1000_config_fc_after_link_up_generic(hw);
+	if (ret_val) {
+		DEBUGOUT("Error configuring flow control\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_fiber_link_generic - Check for link (Fiber)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_fiber_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_check_for_fiber_link_generic");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	status = E1000_READ_REG(hw, E1000_STATUS);
+	rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), the cable is plugged in (we have signal),
+	 * and our link partner is not trying to auto-negotiate with us (we
+	 * are receiving idles or data), we need to force link up. We also
+	 * need to give auto-negotiation time to complete, in case the cable
+	 * was just plugged in. The autoneg_failed flag does this.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+	if ((ctrl & E1000_CTRL_SWDPIN1) && (!(status & E1000_STATUS_LU)) &&
+	    (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			goto out;
+		}
+		DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+		if (ret_val) {
+			DEBUGOUT("Error configuring flow control\n");
+			goto out;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+		E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = TRUE;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_for_serdes_link_generic - Check for link (Serdes)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks for link up on the hardware.  If link is not up and we have
+ *  a signal, then we need to force link up.
+ **/
+s32 e1000_check_for_serdes_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 rxcw;
+	u32 ctrl;
+	u32 status;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_check_for_serdes_link_generic");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	status = E1000_READ_REG(hw, E1000_STATUS);
+	rxcw = E1000_READ_REG(hw, E1000_RXCW);
+
+	/*
+	 * If we don't have link (auto-negotiation failed or link partner
+	 * cannot auto-negotiate), and our link partner is not trying to
+	 * auto-negotiate with us (we are receiving idles or data),
+	 * we need to force link up. We also need to give auto-negotiation
+	 * time to complete.
+	 */
+	/* (ctrl & E1000_CTRL_SWDPIN1) == 1 == have signal */
+	if ((!(status & E1000_STATUS_LU)) && (!(rxcw & E1000_RXCW_C))) {
+		if (mac->autoneg_failed == 0) {
+			mac->autoneg_failed = 1;
+			goto out;
+		}
+		DEBUGOUT("NOT RXing /C/, disable AutoNeg and force link.\n");
+
+		/* Disable auto-negotiation in the TXCW register */
+		E1000_WRITE_REG(hw, E1000_TXCW, (mac->txcw & ~E1000_TXCW_ANE));
+
+		/* Force link-up and also force full-duplex. */
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= (E1000_CTRL_SLU | E1000_CTRL_FD);
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+		/* Configure Flow Control after forcing link up. */
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+		if (ret_val) {
+			DEBUGOUT("Error configuring flow control\n");
+			goto out;
+		}
+	} else if ((ctrl & E1000_CTRL_SLU) && (rxcw & E1000_RXCW_C)) {
+		/*
+		 * If we are forcing link and we are receiving /C/ ordered
+		 * sets, re-enable auto-negotiation in the TXCW register
+		 * and disable forced link in the Device Control register
+		 * in an attempt to auto-negotiate with our link partner.
+		 */
+		DEBUGOUT("RXing /C/, enable AutoNeg and stop forcing link.\n");
+		E1000_WRITE_REG(hw, E1000_TXCW, mac->txcw);
+		E1000_WRITE_REG(hw, E1000_CTRL, (ctrl & ~E1000_CTRL_SLU));
+
+		mac->serdes_has_link = TRUE;
+	} else if (!(E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW))) {
+		/*
+		 * If we force link for non-auto-negotiation switch, check
+		 * link status based on MAC synchronization for internal
+		 * serdes media type.
+		 */
+		/* SYNCH bit and IV bit are sticky. */
+		usec_delay(10);
+		if (E1000_RXCW_SYNCH & E1000_READ_REG(hw, E1000_RXCW)) {
+			if (!(rxcw & E1000_RXCW_IV)) {
+				mac->serdes_has_link = TRUE;
+				DEBUGOUT("SERDES: Link is up.\n");
+			}
+		} else {
+			mac->serdes_has_link = FALSE;
+			DEBUGOUT("SERDES: Link is down.\n");
+		}
+	}
+
+	if (E1000_TXCW_ANE & E1000_READ_REG(hw, E1000_TXCW)) {
+		status = E1000_READ_REG(hw, E1000_STATUS);
+		mac->serdes_has_link = (status & E1000_STATUS_LU)
+					? TRUE
+					: FALSE;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_link_generic - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+s32 e1000_setup_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_functions *func = &hw->func;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_link_generic");
+
+	/*
+	 * In the case of the phy reset being blocked, we already have a link.
+	 * We do not need to set it up again.
+	 */
+	if (e1000_check_reset_block(hw))
+		goto out;
+
+	/*
+	 * If flow control is set to default, set flow control based on
+	 * the EEPROM flow control settings.
+	 */
+	if (hw->fc.type == e1000_fc_default) {
+		ret_val = e1000_set_default_fc_generic(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	/*
+	 * We want to save off the original Flow Control configuration just
+	 * in case we get disconnected and then reconnected into a different
+	 * hub or switch with different Flow Control capabilities.
+	 */
+	hw->fc.original_type = hw->fc.type;
+
+	DEBUGOUT1("After fix-ups FlowControl is now = %x\n", hw->fc.type);
+
+	/* Call the necessary media_type subroutine to configure the link. */
+	ret_val = func->setup_physical_interface(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Initialize the flow control address, type, and PAUSE timer
+	 * registers to their default values.  This is done even if flow
+	 * control is disabled, because it does not hurt anything to
+	 * initialize these registers.
+	 */
+	DEBUGOUT("Initializing the Flow Control address, type and timer regs\n");
+	E1000_WRITE_REG(hw, E1000_FCT, FLOW_CONTROL_TYPE);
+	E1000_WRITE_REG(hw, E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	E1000_WRITE_REG(hw, E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+	E1000_WRITE_REG(hw, E1000_FCTTV, hw->fc.pause_time);
+
+	ret_val = e1000_set_fc_watermarks_generic(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_fiber_serdes_link_generic - Setup link for fiber/serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures collision distance and flow control for fiber and serdes
+ *  links.  Upon successful setup, poll for link.
+ **/
+s32 e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_fiber_serdes_link_generic");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/* Take the link out of reset */
+	ctrl &= ~E1000_CTRL_LRST;
+
+	e1000_config_collision_dist_generic(hw);
+
+	ret_val = e1000_commit_fc_settings_generic(hw);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Since auto-negotiation is enabled, take the link out of reset (the
+	 * link will be in reset, because we previously reset the chip). This
+	 * will restart auto-negotiation.  If auto-negotiation is successful
+	 * then the link-up status bit will be set and the flow control enable
+	 * bits (RFCE and TFCE) will be set according to their negotiated value.
+	 */
+	DEBUGOUT("Auto-negotiation enabled\n");
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	E1000_WRITE_FLUSH(hw);
+	msec_delay(1);
+
+	/*
+	 * For these adapters, the SW defineable pin 1 is set when the optics
+	 * detect a signal.  If we have a signal, then poll for a "Link-Up"
+	 * indication.
+	 */
+	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+	    (E1000_READ_REG(hw, E1000_CTRL) & E1000_CTRL_SWDPIN1)) {
+		ret_val = e1000_poll_fiber_serdes_link_generic(hw);
+	} else {
+		DEBUGOUT("No signal detected\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_collision_dist_generic - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void e1000_config_collision_dist_generic(struct e1000_hw *hw)
+{
+	u32 tctl;
+
+	DEBUGFUNC("e1000_config_collision_dist_generic");
+
+	tctl = E1000_READ_REG(hw, E1000_TCTL);
+
+	tctl &= ~E1000_TCTL_COLD;
+	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/**
+ *  e1000_poll_fiber_serdes_link_generic - Poll for link up
+ *  @hw: pointer to the HW structure
+ *
+ *  Polls for link up by reading the status register, if link fails to come
+ *  up with auto-negotiation, then the link is forced if a signal is detected.
+ **/
+s32 e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 i, status;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_poll_fiber_serdes_link_generic");
+
+	/*
+	 * If we have a signal (the cable is plugged in, or assumed true for
+	 * serdes media) then poll for a "Link-Up" indication in the Device
+	 * Status Register.  Time-out if a link isn't seen in 500 milliseconds
+	 * seconds (Auto-negotiation should complete in less than 500
+	 * milliseconds even if the other end is doing it in SW).
+	 */
+	for (i = 0; i < FIBER_LINK_UP_LIMIT; i++) {
+		msec_delay(10);
+		status = E1000_READ_REG(hw, E1000_STATUS);
+		if (status & E1000_STATUS_LU)
+			break;
+	}
+	if (i == FIBER_LINK_UP_LIMIT) {
+		DEBUGOUT("Never got a valid link from auto-neg!!!\n");
+		mac->autoneg_failed = 1;
+		/*
+		 * AutoNeg failed to achieve a link, so we'll call
+		 * mac->check_for_link. This routine will force the
+		 * link up if we detect a signal. This will allow us to
+		 * communicate with non-autonegotiating link partners.
+		 */
+		ret_val = e1000_check_for_link(hw);
+		if (ret_val) {
+			DEBUGOUT("Error while checking for link\n");
+			goto out;
+		}
+		mac->autoneg_failed = 0;
+	} else {
+		mac->autoneg_failed = 0;
+		DEBUGOUT("Valid Link Found\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_commit_fc_settings_generic - Configure flow control
+ *  @hw: pointer to the HW structure
+ *
+ *  Write the flow control settings to the Transmit Config Word Register (TXCW)
+ *  base on the flow control settings in e1000_mac_info.
+ **/
+s32 e1000_commit_fc_settings_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 txcw;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_commit_fc_settings_generic");
+
+	/*
+	 * Check for a software override of the flow control settings, and
+	 * setup the device accordingly.  If auto-negotiation is enabled, then
+	 * software will have to set the "PAUSE" bits to the correct value in
+	 * the Transmit Config Word Register (TXCW) and re-start auto-
+	 * negotiation.  However, if auto-negotiation is disabled, then
+	 * software will have to manually configure the two flow control enable
+	 * bits in the CTRL register.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames,
+	 *          but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames but we
+	 *          do not support receiving pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+	 */
+	switch (hw->fc.type) {
+	case e1000_fc_none:
+		/* Flow control completely disabled by a software over-ride. */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD);
+		break;
+	case e1000_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled and Tx Flow control is disabled
+		 * by a software over-ride. Since there really isn't a way to
+		 * advertise that we are capable of Rx Pause ONLY, we will
+		 * advertise that we support both symmetric and asymmetric RX
+		 * PAUSE.  Later, we will disable the adapter's ability to send
+		 * PAUSE frames.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+		break;
+	case e1000_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is disabled,
+		 * by a software over-ride.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_ASM_DIR);
+		break;
+	case e1000_fc_full:
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
+		 * over-ride.
+		 */
+		txcw = (E1000_TXCW_ANE | E1000_TXCW_FD | E1000_TXCW_PAUSE_MASK);
+		break;
+	default:
+		DEBUGOUT("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+		break;
+	}
+
+	E1000_WRITE_REG(hw, E1000_TXCW, txcw);
+	mac->txcw = txcw;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_set_fc_watermarks_generic - Set flow control high/low watermarks
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the flow control high/low threshold (watermark) registers.  If
+ *  flow control XON frame transmission is enabled, then set XON frame
+ *  tansmission as well.
+ **/
+s32 e1000_set_fc_watermarks_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u32 fcrtl = 0, fcrth = 0;
+
+	DEBUGFUNC("e1000_set_fc_watermarks_generic");
+
+	/*
+	 * Set the flow control receive threshold registers.  Normally,
+	 * these registers will be set to a default threshold that may be
+	 * adjusted later by the driver's runtime code.  However, if the
+	 * ability to transmit pause frames is not enabled, then these
+	 * registers will be set to 0.
+	 */
+	if (hw->fc.type & e1000_fc_tx_pause) {
+		/*
+		 * We need to set up the Receive Threshold high and low water
+		 * marks as well as (optionally) enabling the transmission of
+		 * XON frames.
+		 */
+		fcrtl = hw->fc.low_water;
+		if (hw->fc.send_xon)
+			fcrtl |= E1000_FCRTL_XONE;
+
+		fcrth = hw->fc.high_water;
+	}
+	E1000_WRITE_REG(hw, E1000_FCRTL, fcrtl);
+	E1000_WRITE_REG(hw, E1000_FCRTH, fcrth);
+
+	return ret_val;
+}
+
+/**
+ *  e1000_set_default_fc_generic - Set flow control default values
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM for the default values for flow control and store the
+ *  values.
+ **/
+s32 e1000_set_default_fc_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 nvm_data;
+
+	DEBUGFUNC("e1000_set_default_fc_generic");
+
+	/*
+	 * Read and store word 0x0F of the EEPROM. This word contains bits
+	 * that determine the hardware's default PAUSE (flow control) mode,
+	 * a bit that determines whether the HW defaults to enabling or
+	 * disabling auto-negotiation, and the direction of the
+	 * SW defined pins. If there is no SW over-ride of the flow
+	 * control setting, then the variable hw->fc will
+	 * be initialized based on a value in the EEPROM.
+	 */
+	ret_val = e1000_read_nvm(hw, NVM_INIT_CONTROL2_REG, 1, &nvm_data);
+
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+		hw->fc.type = e1000_fc_none;
+	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+		 NVM_WORD0F_ASM_DIR)
+		hw->fc.type = e1000_fc_tx_pause;
+	else
+		hw->fc.type = e1000_fc_full;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_force_mac_fc_generic - Force the MAC's flow control settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
+ *  device control register to reflect the adapter settings.  TFCE and RFCE
+ *  need to be explicitly set by software when a copper PHY is used because
+ *  autonegotiation is managed by the PHY rather than the MAC.  Software must
+ *  also configure these bits when link is forced on a fiber connection.
+ **/
+s32 e1000_force_mac_fc_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_force_mac_fc_generic");
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+
+	/*
+	 * Because we didn't get link via the internal auto-negotiation
+	 * mechanism (we either forced link or we got link via PHY
+	 * auto-neg), we have to manually enable/disable transmit an
+	 * receive flow control.
+	 *
+	 * The "Case" statement below enables/disable flow control
+	 * according to the "hw->fc.type" parameter.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause
+	 *          frames but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          frames but we do not receive pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) is enabled.
+	 *  other:  No other values should be possible at this point.
+	 */
+	DEBUGOUT1("hw->fc.type = %u\n", hw->fc.type);
+
+	switch (hw->fc.type) {
+	case e1000_fc_none:
+		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+		break;
+	case e1000_fc_rx_pause:
+		ctrl &= (~E1000_CTRL_TFCE);
+		ctrl |= E1000_CTRL_RFCE;
+		break;
+	case e1000_fc_tx_pause:
+		ctrl &= (~E1000_CTRL_RFCE);
+		ctrl |= E1000_CTRL_TFCE;
+		break;
+	case e1000_fc_full:
+		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+		break;
+	default:
+		DEBUGOUT("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_config_fc_after_link_up_generic - Configures flow control after link
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the status of auto-negotiation after link up to ensure that the
+ *  speed and duplex were not forced.  If the link needed to be forced, then
+ *  flow control needs to be forced also.  If auto-negotiation is enabled
+ *  and did not fail, then we configure flow control based on our link
+ *  partner.
+ **/
+s32 e1000_config_fc_after_link_up_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = E1000_SUCCESS;
+	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+	u16 speed, duplex;
+
+	DEBUGFUNC("e1000_config_fc_after_link_up_generic");
+
+	/*
+	 * Check for the case where we have fiber media and auto-neg failed
+	 * so we had to force link.  In this case, we need to force the
+	 * configuration of the MAC to match the "fc" parameter.
+	 */
+	if (mac->autoneg_failed) {
+		if (hw->phy.media_type == e1000_media_type_fiber ||
+		    hw->phy.media_type == e1000_media_type_internal_serdes)
+			ret_val = e1000_force_mac_fc_generic(hw);
+	} else {
+		if (hw->phy.media_type == e1000_media_type_copper)
+			ret_val = e1000_force_mac_fc_generic(hw);
+	}
+
+	if (ret_val) {
+		DEBUGOUT("Error forcing flow control settings\n");
+		goto out;
+	}
+
+	/*
+	 * Check for the case where we have copper media and auto-neg is
+	 * enabled.  In this case, we need to check and see if Auto-Neg
+	 * has completed, and if so, how the PHY and link partner has
+	 * flow control configured.
+	 */
+	if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+		/*
+		 * Read the MII Status Register and check to see if AutoNeg
+		 * has completed.  We read this twice because this reg has
+		 * some "sticky" (latched) bits.
+		 */
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &mii_status_reg);
+		if (ret_val)
+			goto out;
+
+		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+			DEBUGOUT("Copper PHY and Auto Neg "
+			         "has not completed.\n");
+			goto out;
+		}
+
+		/*
+		 * The AutoNeg process has completed, so we now need to
+		 * read both the Auto Negotiation Advertisement
+		 * Register (Address 4) and the Auto_Negotiation Base
+		 * Page Ability Register (Address 5) to determine how
+		 * flow control was negotiated.
+		 */
+		ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+		                             &mii_nway_adv_reg);
+		if (ret_val)
+			goto out;
+		ret_val = e1000_read_phy_reg(hw, PHY_LP_ABILITY,
+		                             &mii_nway_lp_ability_reg);
+		if (ret_val)
+			goto out;
+
+		/*
+		 * Two bits in the Auto Negotiation Advertisement Register
+		 * (Address 4) and two bits in the Auto Negotiation Base
+		 * Page Ability Register (Address 5) determine flow control
+		 * for both the PHY and the link partner.  The following
+		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+		 * 1999, describes these PAUSE resolution bits and how flow
+		 * control is determined based upon these settings.
+		 * NOTE:  DC = Don't Care
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
+		 *   0   |    1    |   0   |   DC    | e1000_fc_none
+		 *   0   |    1    |   1   |    0    | e1000_fc_none
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *   1   |    0    |   0   |   DC    | e1000_fc_none
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *   1   |    1    |   0   |    0    | e1000_fc_none
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 * Are both PAUSE bits set to 1?  If so, this implies
+		 * Symmetric Flow Control is enabled at both ends.  The
+		 * ASM_DIR bits are irrelevant per the spec.
+		 *
+		 * For Symmetric Flow Control:
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |   DC    |   1   |   DC    | E1000_fc_full
+		 *
+		 */
+		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+			/*
+			 * Now we need to check if the user selected Rx ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise RX
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF  the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.original_type == e1000_fc_full) {
+				hw->fc.type = e1000_fc_full;
+				DEBUGOUT("Flow Control = FULL.\r\n");
+			} else {
+				hw->fc.type = e1000_fc_rx_pause;
+				DEBUGOUT("Flow Control = "
+				         "RX PAUSE frames only.\r\n");
+			}
+		}
+		/*
+		 * For receiving PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 */
+		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		          (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+		          (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+		          (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.type = e1000_fc_tx_pause;
+			DEBUGOUT("Flow Control = TX PAUSE frames only.\r\n");
+		}
+		/*
+		 * For transmitting PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 */
+		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		         (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+		         !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+		         (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.type = e1000_fc_rx_pause;
+			DEBUGOUT("Flow Control = RX PAUSE frames only.\r\n");
+		} else {
+			/*
+			 * Per the IEEE spec, at this point flow control
+			 * should be disabled.
+			 */
+			hw->fc.type = e1000_fc_none;
+			DEBUGOUT("Flow Control = NONE.\r\n");
+		}
+
+		/*
+		 * Now we need to do one last check...  If we auto-
+		 * negotiated to HALF DUPLEX, flow control should not be
+		 * enabled per IEEE 802.3 spec.
+		 */
+		ret_val = e1000_get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			DEBUGOUT("Error getting link speed and duplex\n");
+			goto out;
+		}
+
+		if (duplex == HALF_DUPLEX)
+			hw->fc.type = e1000_fc_none;
+
+		/*
+		 * Now we call a subroutine to actually force the MAC
+		 * controller to use the correct flow control settings.
+		 */
+		ret_val = e1000_force_mac_fc_generic(hw);
+		if (ret_val) {
+			DEBUGOUT("Error forcing flow control settings\n");
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_speed_and_duplex_copper_generic - Retreive current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Read the status register for the current speed/duplex and store the current
+ *  speed and duplex for copper connections.
+ **/
+s32 e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                              u16 *duplex)
+{
+	u32 status;
+
+	DEBUGFUNC("e1000_get_speed_and_duplex_copper_generic");
+
+	status = E1000_READ_REG(hw, E1000_STATUS);
+	if (status & E1000_STATUS_SPEED_1000) {
+		*speed = SPEED_1000;
+		DEBUGOUT("1000 Mbs, ");
+	} else if (status & E1000_STATUS_SPEED_100) {
+		*speed = SPEED_100;
+		DEBUGOUT("100 Mbs, ");
+	} else {
+		*speed = SPEED_10;
+		DEBUGOUT("10 Mbs, ");
+	}
+
+	if (status & E1000_STATUS_FD) {
+		*duplex = FULL_DUPLEX;
+		DEBUGOUT("Full Duplex\n");
+	} else {
+		*duplex = HALF_DUPLEX;
+		DEBUGOUT("Half Duplex\n");
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_speed_and_duplex_fiber_generic - Retreive current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Sets the speed and duplex to gigabit full duplex (the only possible option)
+ *  for fiber/serdes links.
+ **/
+s32 e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+                                                    u16 *speed, u16 *duplex)
+{
+	DEBUGFUNC("e1000_get_speed_and_duplex_fiber_serdes_generic");
+
+	*speed = SPEED_1000;
+	*duplex = FULL_DUPLEX;
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_hw_semaphore_generic - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 e1000_get_hw_semaphore_generic(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 ret_val = E1000_SUCCESS;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	DEBUGFUNC("e1000_get_hw_semaphore_generic");
+
+	/* Get the SW semaphore */
+	while (i < timeout) {
+		swsm = E1000_READ_REG(hw, E1000_SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		usec_delay(50);
+		i++;
+	}
+
+	if (i == timeout) {
+		DEBUGOUT("Driver can't access device - SMBI bit is set.\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = E1000_READ_REG(hw, E1000_SWSM);
+		E1000_WRITE_REG(hw, E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (E1000_READ_REG(hw, E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		usec_delay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		e1000_put_hw_semaphore_generic(hw);
+		DEBUGOUT("Driver can't access the NVM\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_put_hw_semaphore_generic - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	DEBUGFUNC("e1000_put_hw_semaphore_generic");
+
+	swsm = E1000_READ_REG(hw, E1000_SWSM);
+
+	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+	E1000_WRITE_REG(hw, E1000_SWSM, swsm);
+}
+
+/**
+ *  e1000_get_auto_rd_done_generic - Check for auto read completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check EEPROM for Auto Read done bit.
+ **/
+s32 e1000_get_auto_rd_done_generic(struct e1000_hw *hw)
+{
+	s32 i = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_get_auto_rd_done_generic");
+
+	while (i < AUTO_READ_DONE_TIMEOUT) {
+		if (E1000_READ_REG(hw, E1000_EECD) & E1000_EECD_AUTO_RD)
+			break;
+		msec_delay(1);
+		i++;
+	}
+
+	if (i == AUTO_READ_DONE_TIMEOUT) {
+		DEBUGOUT("Auto read by HW from NVM has not completed.\n");
+		ret_val = -E1000_ERR_RESET;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_valid_led_default_generic - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_valid_led_default_generic");
+
+	ret_val = e1000_read_nvm(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF)
+		*data = ID_LED_DEFAULT;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_id_led_init_generic -
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 e1000_id_led_init_generic(struct e1000_hw * hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	const u32 ledctl_mask = 0x000000FF;
+	const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+	const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+	u16 data, i, temp;
+	const u16 led_mask = 0x0F;
+
+	DEBUGFUNC("e1000_id_led_init_generic");
+
+	ret_val = hw->func.valid_led_default(hw, &data);
+	if (ret_val)
+		goto out;
+
+	mac->ledctl_default = E1000_READ_REG(hw, E1000_LEDCTL);
+	mac->ledctl_mode1 = mac->ledctl_default;
+	mac->ledctl_mode2 = mac->ledctl_default;
+
+	for (i = 0; i < 4; i++) {
+		temp = (data >> (i << 2)) & led_mask;
+		switch (temp) {
+		case ID_LED_ON1_DEF2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_ON1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_OFF1_DEF2:
+		case ID_LED_OFF1_ON2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		switch (temp) {
+		case ID_LED_DEF1_ON2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_OFF1_ON2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_DEF1_OFF2:
+		case ID_LED_ON1_OFF2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_led_generic - Configures SW controllable LED
+ *  @hw: pointer to the HW structure
+ *
+ *  This prepares the SW controllable LED for use and saves the current state
+ *  of the LED so it can be later restored.
+ **/
+s32 e1000_setup_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_setup_led_generic");
+
+	if (hw->func.setup_led != e1000_setup_led_generic) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		ledctl = E1000_READ_REG(hw, E1000_LEDCTL);
+		hw->mac.ledctl_default = ledctl;
+		/* Turn off LED0 */
+		ledctl &= ~(E1000_LEDCTL_LED0_IVRT |
+		            E1000_LEDCTL_LED0_BLINK |
+		            E1000_LEDCTL_LED0_MODE_MASK);
+		ledctl |= (E1000_LEDCTL_MODE_LED_OFF <<
+		           E1000_LEDCTL_LED0_MODE_SHIFT);
+		E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl);
+	} else if (hw->phy.media_type == e1000_media_type_copper) {
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_cleanup_led_generic - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.
+ **/
+s32 e1000_cleanup_led_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_cleanup_led_generic");
+
+	if (hw->func.cleanup_led != e1000_cleanup_led_generic) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_default);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_blink_led_generic - Blink LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Blink the led's which are set to be on.
+ **/
+s32 e1000_blink_led_generic(struct e1000_hw *hw)
+{
+	u32 ledctl_blink = 0;
+	u32 i;
+
+	DEBUGFUNC("e1000_blink_led_generic");
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		/* always blink LED0 for PCI-E fiber */
+		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+	} else {
+		/*
+		 * set the blink bit for each LED that's "on" (0x0E)
+		 * in ledctl_mode2
+		 */
+		ledctl_blink = hw->mac.ledctl_mode2;
+		for (i = 0; i < 4; i++)
+			if (((hw->mac.ledctl_mode2 >> (i * 8)) & 0xFF) ==
+			    E1000_LEDCTL_MODE_LED_ON)
+				ledctl_blink |= (E1000_LEDCTL_LED0_BLINK <<
+				                 (i * 8));
+	}
+
+	E1000_WRITE_REG(hw, E1000_LEDCTL, ledctl_blink);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_on_generic - Turn LED on
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED on.
+ **/
+s32 e1000_led_on_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	DEBUGFUNC("e1000_led_on_generic");
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl &= ~E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode2);
+		break;
+	default:
+		break;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_led_off_generic - Turn LED off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED off.
+ **/
+s32 e1000_led_off_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+
+	DEBUGFUNC("e1000_led_off_generic");
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+		ctrl = E1000_READ_REG(hw, E1000_CTRL);
+		ctrl |= E1000_CTRL_SWDPIN0;
+		ctrl |= E1000_CTRL_SWDPIO0;
+		E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+		break;
+	case e1000_media_type_copper:
+		E1000_WRITE_REG(hw, E1000_LEDCTL, hw->mac.ledctl_mode1);
+		break;
+	default:
+		break;
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_set_pcie_no_snoop_generic - Set PCI-express capabilities
+ *  @hw: pointer to the HW structure
+ *  @no_snoop: bitmap of snoop events
+ *
+ *  Set the PCI-express register to snoop for events enabled in 'no_snoop'.
+ **/
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop)
+{
+	u32 gcr;
+
+	DEBUGFUNC("e1000_set_pcie_no_snoop_generic");
+
+	if (hw->bus.type != e1000_bus_type_pci_express)
+		goto out;
+
+	if (no_snoop) {
+		gcr = E1000_READ_REG(hw, E1000_GCR);
+		gcr &= ~(PCIE_NO_SNOOP_ALL);
+		gcr |= no_snoop;
+		E1000_WRITE_REG(hw, E1000_GCR, gcr);
+	}
+out:
+	return;
+}
+
+/**
+ *  e1000_disable_pcie_master_generic - Disables PCI-express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns 0 (E1000_SUCCESS) if successful, else returns -10
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not casued
+ *  the master requests to be disabled.
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests.
+ **/
+s32 e1000_disable_pcie_master_generic(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 timeout = MASTER_DISABLE_TIMEOUT;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_disable_pcie_master_generic");
+
+	if (hw->bus.type != e1000_bus_type_pci_express)
+		goto out;
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+
+	while (timeout) {
+		if (!(E1000_READ_REG(hw, E1000_STATUS) &
+		      E1000_STATUS_GIO_MASTER_ENABLE))
+			break;
+		usec_delay(100);
+		timeout--;
+	}
+
+	if (!timeout) {
+		DEBUGOUT("Master requests are pending.\n");
+		ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reset_adaptive_generic - Reset Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the Adaptive Interframe Spacing throttle to default values.
+ **/
+void e1000_reset_adaptive_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+	DEBUGFUNC("e1000_reset_adaptive_generic");
+
+	if (!mac->adaptive_ifs) {
+		DEBUGOUT("Not in Adaptive IFS mode!\n");
+		goto out;
+	}
+
+	if (!mac->ifs_params_forced) {
+		mac->current_ifs_val = 0;
+		mac->ifs_min_val = IFS_MIN;
+		mac->ifs_max_val = IFS_MAX;
+		mac->ifs_step_size = IFS_STEP;
+		mac->ifs_ratio = IFS_RATIO;
+	}
+
+	mac->in_ifs_mode = FALSE;
+	E1000_WRITE_REG(hw, E1000_AIT, 0);
+out:
+	return;
+}
+
+/**
+ *  e1000_update_adaptive_generic - Update Adaptive Interframe Spacing
+ *  @hw: pointer to the HW structure
+ *
+ *  Update the Adaptive Interframe Spacing Throttle value based on the
+ *  time between transmitted packets and time between collisions.
+ **/
+void e1000_update_adaptive_generic(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+
+	DEBUGFUNC("e1000_update_adaptive_generic");
+
+	if (!mac->adaptive_ifs) {
+		DEBUGOUT("Not in Adaptive IFS mode!\n");
+		goto out;
+	}
+
+	if ((mac->collision_delta * mac->ifs_ratio) > mac->tx_packet_delta) {
+		if (mac->tx_packet_delta > MIN_NUM_XMITS) {
+			mac->in_ifs_mode = TRUE;
+			if (mac->current_ifs_val < mac->ifs_max_val) {
+				if (!mac->current_ifs_val)
+					mac->current_ifs_val = mac->ifs_min_val;
+				else
+					mac->current_ifs_val +=
+						mac->ifs_step_size;
+				E1000_WRITE_REG(hw, E1000_AIT, mac->current_ifs_val);
+			}
+		}
+	} else {
+		if (mac->in_ifs_mode &&
+		    (mac->tx_packet_delta <= MIN_NUM_XMITS)) {
+			mac->current_ifs_val = 0;
+			mac->in_ifs_mode = FALSE;
+			E1000_WRITE_REG(hw, E1000_AIT, 0);
+		}
+	}
+out:
+	return;
+}
+
+/**
+ *  e1000_validate_mdi_setting_generic - Verify MDI/MDIx settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify that when not using auto-negotitation that MDI/MDIx is correctly
+ *  set, which is forced to MDI mode only.
+ **/
+s32 e1000_validate_mdi_setting_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_validate_mdi_setting_generic");
+
+	if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+		DEBUGOUT("Invalid MDI setting detected\n");
+		hw->phy.mdix = 1;
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_8bit_ctrl_reg_generic - Write a 8bit CTRL register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset such as E1000_SCTL
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes an address/data control type register.  There are several of these
+ *  and they all have the format address << 8 | data and bit 31 is polled for
+ *  completion.
+ **/
+s32 e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                      u32 offset, u8 data)
+{
+	u32 i, regvalue = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_write_8bit_ctrl_reg_generic");
+
+	/* Set up the address and data */
+	regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+	E1000_WRITE_REG(hw, reg, regvalue);
+
+	/* Poll the ready bit to see if the MDI read completed */
+	for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+		usec_delay(5);
+		regvalue = E1000_READ_REG(hw, reg);
+		if (regvalue & E1000_GEN_CTL_READY)
+			break;
+	}
+	if (!(regvalue & E1000_GEN_CTL_READY)) {
+		DEBUGOUT1("Reg %08x did not indicate ready\n", reg);
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h
new file mode 100644
index 0000000..2a7d39c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_mac.h
@@ -0,0 +1,86 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+/*
+ * Functions that should not be called directly from drivers but can be used
+ * by other files in this 'shared code'
+ */
+s32  e1000_blink_led_generic(struct e1000_hw *hw);
+s32  e1000_check_for_copper_link_generic(struct e1000_hw *hw);
+s32  e1000_check_for_fiber_link_generic(struct e1000_hw *hw);
+s32  e1000_check_for_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_cleanup_led_generic(struct e1000_hw *hw);
+s32  e1000_commit_fc_settings_generic(struct e1000_hw *hw);
+s32  e1000_config_fc_after_link_up_generic(struct e1000_hw *hw);
+s32  e1000_disable_pcie_master_generic(struct e1000_hw *hw);
+s32  e1000_force_mac_fc_generic(struct e1000_hw *hw);
+s32  e1000_get_auto_rd_done_generic(struct e1000_hw *hw);
+s32  e1000_get_bus_info_pci_generic(struct e1000_hw *hw);
+s32  e1000_get_bus_info_pcie_generic(struct e1000_hw *hw);
+s32  e1000_get_hw_semaphore_generic(struct e1000_hw *hw);
+s32  e1000_get_speed_and_duplex_copper_generic(struct e1000_hw *hw, u16 *speed,
+                                               u16 *duplex);
+s32  e1000_get_speed_and_duplex_fiber_serdes_generic(struct e1000_hw *hw,
+                                                     u16 *speed, u16 *duplex);
+s32  e1000_id_led_init_generic(struct e1000_hw *hw);
+s32  e1000_led_on_generic(struct e1000_hw *hw);
+s32  e1000_led_off_generic(struct e1000_hw *hw);
+void e1000_update_mc_addr_list_generic(struct e1000_hw *hw,
+	                               u8 *mc_addr_list, u32 mc_addr_count,
+	                               u32 rar_used_count, u32 rar_count);
+s32  e1000_poll_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_set_default_fc_generic(struct e1000_hw *hw);
+s32  e1000_set_fc_watermarks_generic(struct e1000_hw *hw);
+s32  e1000_setup_fiber_serdes_link_generic(struct e1000_hw *hw);
+s32  e1000_setup_led_generic(struct e1000_hw *hw);
+s32  e1000_setup_link_generic(struct e1000_hw *hw);
+s32  e1000_validate_mdi_setting_generic(struct e1000_hw *hw);
+s32  e1000_write_8bit_ctrl_reg_generic(struct e1000_hw *hw, u32 reg,
+                                       u32 offset, u8 data);
+
+u32  e1000_hash_mc_addr_generic(struct e1000_hw *hw, u8 *mc_addr);
+
+void e1000_clear_hw_cntrs_base_generic(struct e1000_hw *hw);
+void e1000_clear_vfta_generic(struct e1000_hw *hw);
+void e1000_config_collision_dist_generic(struct e1000_hw *hw);
+void e1000_init_rx_addrs_generic(struct e1000_hw *hw, u16 rar_count);
+void e1000_mta_set_generic(struct e1000_hw *hw, u32 hash_value);
+void e1000_pcix_mmrbc_workaround_generic(struct e1000_hw *hw);
+void e1000_put_hw_semaphore_generic(struct e1000_hw *hw);
+void e1000_rar_set_generic(struct e1000_hw *hw, u8 *addr, u32 index);
+s32  e1000_check_alt_mac_addr_generic(struct e1000_hw *hw);
+void e1000_remove_device_generic(struct e1000_hw *hw);
+void e1000_reset_adaptive_generic(struct e1000_hw *hw);
+void e1000_set_pcie_no_snoop_generic(struct e1000_hw *hw, u32 no_snoop);
+void e1000_update_adaptive_generic(struct e1000_hw *hw);
+void e1000_write_vfta_generic(struct e1000_hw *hw, u32 offset, u32 value);
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c
new file mode 100644
index 0000000..3bd82cf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_main.c
@@ -0,0 +1,5987 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/netdevice.h>
+#include <linux/tcp.h>
+#include <linux/ipv6.h>
+
+
+// RTNET defines...
+#ifdef NETIF_F_TSO
+#undef NETIF_F_TSO
+#endif
+
+#ifdef NETIF_F_TSO6
+#undef NETIF_F_TSO6
+#endif
+
+#ifdef NETIF_F_HW_VLAN_TX
+#undef NETIF_F_HW_VLAN_TX
+#endif
+
+#ifdef CONFIG_E1000_NAPI
+#undef CONFIG_E1000_NAPI
+#endif
+
+#ifdef MAX_SKB_FRAGS
+#undef MAX_SKB_FRAGS
+#endif
+
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT
+#endif
+
+#ifdef CONFIG_E1000_MQ
+#undef CONFIG_E1000_MQ
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifdef CONFIG_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef HAVE_PCI_ERS
+#error "STOP it here"
+#undef HAVE_PCI_ERS
+#endif
+
+#ifdef USE_REBOOT_NOTIFIER
+#undef USE_REBOOT_NOTIFIER
+#endif
+
+#ifdef HAVE_TX_TIMEOUT
+#undef HAVE_TX_TIMEOUT
+#endif
+
+
+#ifdef NETIF_F_TSO
+#include <net/checksum.h>
+#ifdef NETIF_F_TSO6
+#include <net/ip6_checksum.h>
+#endif
+#endif
+#ifdef SIOCGMIIPHY
+#include <linux/mii.h>
+#endif
+#ifdef SIOCETHTOOL
+#include <linux/ethtool.h>
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+#include <linux/if_vlan.h>
+#endif
+#ifdef CONFIG_E1000_MQ
+#include <linux/cpu.h>
+#include <linux/smp.h>
+#endif
+
+#include "e1000.h"
+
+#ifdef HAVE_PCI_ERS
+#error "STOP it here"
+#endif
+
+
+
+char e1000_driver_name[MODULE_NAME_LEN] = "rt_e1000";
+static char e1000_driver_string[] = "Intel(R) PRO/1000 Network Driver";
+
+#ifdef CONFIG_E1000_NAPI
+#define DRV_NAPI "-NAPI"
+#else
+#define DRV_NAPI
+#endif
+
+
+#define DRV_DEBUG
+
+#define DRV_HW_PERF
+
+/*
+ * Port to rtnet based on e1000 driver version 7.6.15.5 (22-Sep-2008 Mathias Koehrer)
+ *
+ * */
+
+#define DRV_VERSION "7.6.15.5" DRV_NAPI DRV_DEBUG DRV_HW_PERF " ported to RTnet"
+const char e1000_driver_version[] = DRV_VERSION;
+static const char e1000_copyright[] = "Copyright (c) 1999-2008 Intel Corporation.";
+
+// RTNET wrappers
+#define kmalloc(a,b) rtdm_malloc(a)
+#define vmalloc(a) rtdm_malloc(a)
+#define kfree(a) rtdm_free(a)
+#define vfree(a) rtdm_free(a)
+#define skb_reserve(a,b) rtskb_reserve(a,b)
+#define net_device rtnet_device
+#define sk_buff rtskb
+#define netdev_priv(a) a->priv
+// ----------------------
+
+
+
+/* e1000_pci_tbl - PCI Device ID Table
+ *
+ * Last entry must be all 0s
+ *
+ * Macro expands to...
+ *   {PCI_DEVICE(PCI_VENDOR_ID_INTEL, device_id)}
+ */
+
+#define PCI_ID_LIST_PCI  \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82542), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82543GC_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82543GC_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544EI_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544EI_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544GC_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82544GC_LOM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545EM_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546EB_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545EM_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546EB_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541EI), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541ER_LOM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EM_LOM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EP_LOM), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EP), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541EI_MOBILE), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82547EI), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82547EI_MOBILE), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546EB_QUAD_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82540EP_LP), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545GM_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545GM_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82545GM_SERDES), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82547GI), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541GI), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541GI_MOBILE), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541ER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_FIBER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_SERDES), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82541GI_LF), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_PCIE), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_QUAD_COPPER), \
+	  INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3)
+
+#define PCI_ID_LIST_PCIE  \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_M_AMT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_AMT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_C), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IFE), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IGP_M), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_COPPER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_FIBER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_SERDES), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI_COPPER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI_FIBER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI_SERDES), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82573E), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82573E_IAMT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_COPPER_DPT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_SERDES_DPT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82573L), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_QUAD_COPPER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_QUAD_FIBER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_SERDES_DUAL), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_SERDES_QUAD), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82572EI), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_COPPER_SPT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_80003ES2LAN_SERDES_SPT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571EB_QUAD_COPPER_LP), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_82571PT_QUAD_COPPER), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IFE_GT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH8_IFE_G), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IGP_AMT), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IGP_C), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IFE), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IFE_G), \
+	 INTEL_E1000_ETHERNET_DEVICE(E1000_DEV_ID_ICH9_IFE_GT)
+
+
+
+
+static struct pci_device_id e1000_pci_tbl[] = {
+    PCI_ID_LIST_PCI,
+    PCI_ID_LIST_PCIE,
+	/* required last entry */
+	{0,}
+};
+MODULE_DEVICE_TABLE(pci, e1000_pci_tbl);
+
+static struct pci_device_id e1000_pcipure_tbl[] = {
+    PCI_ID_LIST_PCI,
+	/* required last entry */
+	{0,}
+};
+
+static struct pci_device_id e1000_pcie_tbl[] = {
+    PCI_ID_LIST_PCIE,
+	/* required last entry */
+	{0,}
+};
+
+
+
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring);
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring);
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring);
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring);
+
+static int e1000_init_module(void);
+static void e1000_exit_module(void);
+static int e1000_probe(struct pci_dev *pdev, const struct pci_device_id *ent);
+static void e1000_remove(struct pci_dev *pdev);
+static int e1000_alloc_queues(struct e1000_adapter *adapter);
+#ifdef CONFIG_E1000_MQ
+static void e1000_setup_queue_mapping(struct e1000_adapter *adapter);
+#endif
+static int e1000_sw_init(struct e1000_adapter *adapter);
+static int e1000_open(struct net_device *netdev);
+static int e1000_close(struct net_device *netdev);
+static void e1000_configure(struct e1000_adapter *adapter);
+static void e1000_configure_tx(struct e1000_adapter *adapter);
+static void e1000_configure_rx(struct e1000_adapter *adapter);
+static void e1000_setup_rctl(struct e1000_adapter *adapter);
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter);
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+				struct e1000_tx_ring *tx_ring);
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+				struct e1000_rx_ring *rx_ring);
+static void e1000_set_multi(struct net_device *netdev);
+static void e1000_update_phy_info_task(struct work_struct *work);
+static void e1000_watchdog_task(struct work_struct *work);
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work);
+static int e1000_xmit_frame_ring(struct sk_buff *skb, struct net_device *netdev,
+				 struct e1000_tx_ring *tx_ring);
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev);
+#ifdef CONFIG_E1000_MQ
+static int e1000_subqueue_xmit_frame(struct sk_buff *skb,
+				     struct net_device *netdev, int queue);
+#endif
+static void e1000_phy_read_status(struct e1000_adapter *adapter);
+#if 0
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev);
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu);
+static int e1000_set_mac(struct net_device *netdev, void *p);
+#endif
+static int  e1000_intr(rtdm_irq_t *irq_handle);
+static int e1000_intr_msi(rtdm_irq_t *irq_handle);
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring);
+#ifdef CONFIG_E1000_NAPI
+static int e1000_poll(struct napi_struct *napi, int budget);
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+				    int *work_done, int work_to_do);
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				       struct e1000_rx_ring *rx_ring,
+				       int *work_done, int work_to_do);
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+					  struct e1000_rx_ring *rx_ring,
+					  int *work_done, int work_to_do);
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+					 struct e1000_rx_ring *rx_ring,
+					 int cleaned_count);
+#else
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+					 nanosecs_abs_t *time_stamp);
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				       struct e1000_rx_ring *rx_ring,
+					 nanosecs_abs_t *time_stamp);
+#endif
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count);
+static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+				      struct e1000_rx_ring *rx_ring,
+				      int cleaned_count);
+#if 0
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd);
+#ifdef SIOCGMIIPHY
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			   int cmd);
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter);
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter);
+static void e1000_tx_timeout(struct net_device *dev);
+#endif
+#endif
+static void e1000_reset_task(struct work_struct *work);
+static void e1000_smartspeed(struct e1000_adapter *adapter);
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+				       struct sk_buff *skb);
+
+#ifdef NETIF_F_HW_VLAN_TX
+static void e1000_vlan_rx_register(struct net_device *netdev,
+				   struct vlan_group *grp);
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid);
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid);
+static void e1000_restore_vlan(struct e1000_adapter *adapter);
+#endif
+
+// static int e1000_suspend(struct pci_dev *pdev, pm_message_t state);
+#ifdef CONFIG_PM
+static int e1000_resume(struct pci_dev *pdev);
+#endif
+#ifndef USE_REBOOT_NOTIFIER
+// static void e1000_shutdown(struct pci_dev *pdev);
+#else
+static int e1000_notify_reboot(struct notifier_block *, unsigned long event,
+			       void *ptr);
+static struct notifier_block e1000_notifier_reboot = {
+	.notifier_call	= e1000_notify_reboot,
+	.next		= NULL,
+	.priority	= 0
+};
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void e1000_netpoll (struct net_device *netdev);
+#endif
+
+#define COPYBREAK_DEFAULT 256
+static unsigned int copybreak __read_mostly = COPYBREAK_DEFAULT;
+module_param(copybreak, uint, 0644);
+MODULE_PARM_DESC(copybreak,
+	"Maximum size of packet that is copied to a new buffer on receive");
+
+
+#ifdef HAVE_PCI_ERS
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+		     pci_channel_state_t state);
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev);
+static void e1000_io_resume(struct pci_dev *pdev);
+
+static struct pci_error_handlers e1000_err_handler = {
+	.error_detected = e1000_io_error_detected,
+	.slot_reset = e1000_io_slot_reset,
+	.resume = e1000_io_resume,
+};
+#endif
+
+static struct pci_driver e1000_driver = {
+	.name     = e1000_driver_name,
+	.id_table = e1000_pci_tbl,
+	.probe    = e1000_probe,
+	.remove   = e1000_remove,
+#ifdef HAVE_PCI_ERS
+	.err_handler = &e1000_err_handler
+#endif
+};
+
+MODULE_AUTHOR("Intel Corporation, <linux.nics@intel.com>");
+MODULE_DESCRIPTION("Intel(R) PRO/1000 Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define MAX_UNITS 8
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (eg. 1,0,1)");
+
+
+static int local_debug = NETIF_MSG_DRV | NETIF_MSG_PROBE;
+module_param(local_debug, int, 0);
+MODULE_PARM_DESC(local_debug, "Debug level (0=none,...,16=all)");
+
+/* The parameter 'pciif' might be used to use this driver for
+ * PCI or PCIe only NICs.
+ * This allows to reflect the situation that newer Linux kernels
+ * have two different (non real time) drivers for the e1000:
+ * e1000 for PCI only
+ * e1000e for PCIe only
+ *
+ * Using the 'pciif' parameter allows to load the driver
+ *  modprobe rt_e1000 pciif=pci
+ * to use it as PCI only
+ * and a
+ *  modprobe rt_e1000 -o rt_e1000e pciif=pcie
+ * allows to load a second instance of this driver named 'rt_e1000e'
+ *
+ * If the 'pciif' paramter is not specified, all (PCI and PCIe) e1000
+ * NICs will be used.
+ * */
+static char *pciif = "all";
+module_param(pciif, charp, 0);
+MODULE_PARM_DESC(pciif, "PCI Interface: 'all' (default), 'pci', 'pcie'");
+
+
+//#define register_netdev(a) rt_register_rtnetdev(a)
+//#define unregister_netdev(a) rt_unregister_rtnetdev(a)
+//#define free_netdev(a) rtdev_free(a)
+//#define netif_stop_queue(a) rtnetif_stop_queue(a)
+
+/**
+ * e1000_init_module - Driver Registration Routine
+ *
+ * e1000_init_module is the first routine called when the driver is
+ * loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init e1000_init_module(void)
+{
+	int ret;
+    strcpy(e1000_driver_name, THIS_MODULE->name);
+	printk(KERN_INFO "%s - %s version %s (pciif: %s)\n",
+	       e1000_driver_string, e1000_driver_name, e1000_driver_version, pciif);
+
+	printk(KERN_INFO "%s\n", e1000_copyright);
+
+
+    if (0 == strcmp(pciif, "pcie"))
+    {
+	// PCIe only
+	    e1000_driver.id_table = e1000_pcie_tbl;
+    }
+    else if (0 == strcmp(pciif, "pci"))
+    {
+	// PCI only
+	    e1000_driver.id_table = e1000_pcipure_tbl;
+    }
+
+	ret = pci_register_driver(&e1000_driver);
+#ifdef USE_REBOOT_NOTIFIER
+	if (ret >= 0) {
+		register_reboot_notifier(&e1000_notifier_reboot);
+	}
+#endif
+	if (copybreak != COPYBREAK_DEFAULT) {
+		if (copybreak == 0)
+			printk(KERN_INFO "e1000: copybreak disabled\n");
+		else
+			printk(KERN_INFO "e1000: copybreak enabled for "
+			       "packets <= %u bytes\n", copybreak);
+	}
+	return ret;
+}
+
+module_init(e1000_init_module);
+
+/**
+ * e1000_exit_module - Driver Exit Cleanup Routine
+ *
+ * e1000_exit_module is called just before the driver is removed
+ * from memory.
+ **/
+static void __exit e1000_exit_module(void)
+{
+#ifdef USE_REBOOT_NOTIFIER
+	unregister_reboot_notifier(&e1000_notifier_reboot);
+#endif
+	pci_unregister_driver(&e1000_driver);
+}
+
+module_exit(e1000_exit_module);
+
+static int e1000_request_irq(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err = 0;
+
+	if (adapter->flags & E1000_FLAG_HAS_MSI) {
+		err = pci_enable_msi(adapter->pdev);
+		if (!err)
+			adapter->flags |= E1000_FLAG_MSI_ENABLED;
+	}
+    rt_stack_connect(netdev, &STACK_manager);
+	if (adapter->flags & E1000_FLAG_MSI_ENABLED) {
+		err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq, e1000_intr_msi,
+				  0, netdev->name, netdev);
+		if (!err) {
+			return err;
+		} else {
+			adapter->flags &= ~E1000_FLAG_MSI_ENABLED;
+			pci_disable_msi(adapter->pdev);
+		}
+	}
+	err = rtdm_irq_request(&adapter->irq_handle, adapter->pdev->irq,
+			       e1000_intr, RTDM_IRQTYPE_SHARED, netdev->name,
+			       netdev);
+	if (err)
+		DPRINTK(PROBE, ERR, "Unable to allocate interrupt Error: %d\n",
+			err);
+
+	return err;
+}
+
+static void e1000_free_irq(struct e1000_adapter *adapter)
+{
+	// struct net_device *netdev = adapter->netdev;
+
+	rtdm_irq_free(&adapter->irq_handle);
+
+	if (adapter->flags & E1000_FLAG_MSI_ENABLED) {
+		pci_disable_msi(adapter->pdev);
+		adapter->flags &= ~E1000_FLAG_MSI_ENABLED;
+	}
+}
+
+/**
+ * e1000_irq_disable - Mask off interrupt generation on the NIC
+ * @adapter: board private structure
+ **/
+static void e1000_irq_disable(struct e1000_adapter *adapter)
+{
+	atomic_inc(&adapter->irq_sem);
+	E1000_WRITE_REG(&adapter->hw, E1000_IMC, ~0);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	synchronize_irq(adapter->pdev->irq);
+}
+
+/**
+ * e1000_irq_enable - Enable default interrupt generation settings
+ * @adapter: board private structure
+ **/
+
+static void e1000_irq_enable(struct e1000_adapter *adapter)
+{
+	if (likely(atomic_dec_and_test(&adapter->irq_sem))) {
+		E1000_WRITE_REG(&adapter->hw, E1000_IMS, IMS_ENABLE_MASK);
+		E1000_WRITE_FLUSH(&adapter->hw);
+	}
+}
+#ifdef NETIF_F_HW_VLAN_TX
+
+static void e1000_update_mng_vlan(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u16 vid = adapter->hw.mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
+	if (adapter->vlgrp) {
+		if (!vlan_group_get_device(adapter->vlgrp, vid)) {
+			if (adapter->hw.mng_cookie.status &
+				E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+				e1000_vlan_rx_add_vid(netdev, vid);
+				adapter->mng_vlan_id = vid;
+			} else {
+				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+			}
+
+			if ((old_vid != (u16)E1000_MNG_VLAN_NONE) &&
+					(vid != old_vid) &&
+			    !vlan_group_get_device(adapter->vlgrp, old_vid))
+				e1000_vlan_rx_kill_vid(netdev, old_vid);
+		} else {
+			adapter->mng_vlan_id = vid;
+		}
+	}
+}
+#endif
+
+/**
+ * e1000_release_hw_control - release control of the h/w to f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_release_hw_control resets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that the
+ * driver is no longer loaded. For AMT version (only with 82573) i
+ * of the f/w this means that the network i/f is closed.
+ *
+ **/
+static void e1000_release_hw_control(struct e1000_adapter *adapter)
+{
+	u32 ctrl_ext;
+	u32 swsm;
+
+	/* Let firmware taken over control of h/w */
+	switch (adapter->hw.mac.type) {
+	case e1000_82573:
+		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
+		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
+				swsm & ~E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+				ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+/**
+ * e1000_get_hw_control - get control of the h/w from f/w
+ * @adapter: address of board private structure
+ *
+ * e1000_get_hw_control sets {CTRL_EXT|SWSM}:DRV_LOAD bit.
+ * For ASF and Pass Through versions of f/w this means that
+ * the driver is loaded. For AMT version (only with 82573)
+ * of the f/w this means that the network i/f is open.
+ *
+ **/
+static void e1000_get_hw_control(struct e1000_adapter *adapter)
+{
+	u32 ctrl_ext;
+	u32 swsm;
+
+	/* Let firmware know the driver has taken over */
+	switch (adapter->hw.mac.type) {
+	case e1000_82573:
+		swsm = E1000_READ_REG(&adapter->hw, E1000_SWSM);
+		E1000_WRITE_REG(&adapter->hw, E1000_SWSM,
+				swsm | E1000_SWSM_DRV_LOAD);
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		ctrl_ext = E1000_READ_REG(&adapter->hw, E1000_CTRL_EXT);
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL_EXT,
+				ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+		break;
+	default:
+		break;
+	}
+}
+
+static void e1000_init_manageability(struct e1000_adapter *adapter)
+{
+}
+
+static void e1000_release_manageability(struct e1000_adapter *adapter)
+{
+}
+
+/**
+ * e1000_configure - configure the hardware for RX and TX
+ * @adapter: private board structure
+ **/
+static void e1000_configure(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int i;
+
+	e1000_set_multi(netdev);
+
+#ifdef NETIF_F_HW_VLAN_TX
+	e1000_restore_vlan(adapter);
+#endif
+	e1000_init_manageability(adapter);
+
+	e1000_configure_tx(adapter);
+	e1000_setup_rctl(adapter);
+	e1000_configure_rx(adapter);
+	/* call E1000_DESC_UNUSED which always leaves
+	 * at least 1 descriptor unused to make sure
+	 * next_to_use != next_to_clean */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct e1000_rx_ring *ring = &adapter->rx_ring[i];
+		adapter->alloc_rx_buf(adapter, ring,
+				      E1000_DESC_UNUSED(ring));
+	}
+
+#ifdef CONFIG_E1000_MQ
+	e1000_setup_queue_mapping(adapter);
+#endif
+
+	// adapter->tx_queue_len = netdev->tx_queue_len;
+}
+
+static void e1000_napi_enable_all(struct e1000_adapter *adapter)
+{
+#ifdef CONFIG_E1000_NAPI
+	int i;
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_enable(&adapter->rx_ring[i].napi);
+#endif
+}
+
+static void e1000_napi_disable_all(struct e1000_adapter *adapter)
+{
+#ifdef CONFIG_E1000_NAPI
+	int i;
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		napi_disable(&adapter->rx_ring[i].napi);
+#endif
+}
+
+int e1000_up(struct e1000_adapter *adapter)
+{
+	/* hardware has been reset, we need to reload some things */
+	e1000_configure(adapter);
+
+	clear_bit(__E1000_DOWN, &adapter->state);
+
+	e1000_napi_enable_all(adapter);
+
+	e1000_irq_enable(adapter);
+
+	/* fire a link change interrupt to start the watchdog */
+	// E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+	return 0;
+}
+
+static void e1000_down_and_stop(struct e1000_adapter *adapter)
+{
+	/* signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer */
+	set_bit(__E1000_DOWN, &adapter->state);
+
+	cancel_work_sync(&adapter->reset_task);
+	cancel_delayed_work_sync(&adapter->watchdog_task);
+	cancel_delayed_work_sync(&adapter->phy_info_task);
+	cancel_delayed_work_sync(&adapter->fifo_stall_task);
+}
+
+void e1000_down(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 tctl, rctl;
+
+	e1000_down_and_stop(adapter);
+
+	/* disable receives in the hardware */
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	/* flush and sleep below */
+
+#ifdef NETIF_F_LLTX
+	rtnetif_stop_queue(netdev);
+#else
+	rtnetif_tx_disable(netdev);
+#endif
+
+	/* disable transmits in the hardware */
+	tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
+	/* flush both disables and wait for them to finish */
+	E1000_WRITE_FLUSH(&adapter->hw);
+	msleep(10);
+
+	e1000_napi_disable_all(adapter);
+
+	e1000_irq_disable(adapter);
+
+	// netdev->tx_queue_len = adapter->tx_queue_len;
+	rtnetif_carrier_off(netdev);
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+
+	e1000_reset(adapter);
+	e1000_clean_all_tx_rings(adapter);
+	e1000_clean_all_rx_rings(adapter);
+}
+
+void e1000_reinit_locked(struct e1000_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+	e1000_down(adapter);
+	e1000_up(adapter);
+	clear_bit(__E1000_RESETTING, &adapter->state);
+}
+
+void e1000_reset(struct e1000_adapter *adapter)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_fc_info *fc = &adapter->hw.fc;
+	u32 pba = 0, tx_space, min_tx_space, min_rx_space;
+	bool legacy_pba_adjust = FALSE;
+	u16 hwm;
+
+	/* Repartition Pba for greater than 9k mtu
+	 * To take effect CTRL.RST is required.
+	 */
+
+	switch (mac->type) {
+	case e1000_82542:
+	case e1000_82543:
+	case e1000_82544:
+	case e1000_82540:
+	case e1000_82541:
+	case e1000_82541_rev_2:
+		legacy_pba_adjust = TRUE;
+		pba = E1000_PBA_48K;
+		break;
+	case e1000_82545:
+	case e1000_82545_rev_3:
+	case e1000_82546:
+	case e1000_82546_rev_3:
+		pba = E1000_PBA_48K;
+		break;
+	case e1000_82547:
+	case e1000_82547_rev_2:
+		legacy_pba_adjust = TRUE;
+		pba = E1000_PBA_30K;
+		break;
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+		pba = E1000_PBA_38K;
+		break;
+	case e1000_82573:
+		pba = E1000_PBA_20K;
+		break;
+	case e1000_ich8lan:
+		pba = E1000_PBA_8K;
+		break;
+	case e1000_ich9lan:
+#define E1000_PBA_10K 0x000A
+		pba = E1000_PBA_10K;
+		break;
+	case e1000_undefined:
+	case e1000_num_macs:
+		break;
+	}
+
+	if (legacy_pba_adjust == TRUE) {
+		if (adapter->max_frame_size > E1000_RXBUFFER_8192)
+			pba -= 8; /* allocate more FIFO for Tx */
+
+		if (mac->type == e1000_82547) {
+			adapter->tx_fifo_head = 0;
+			adapter->tx_head_addr = pba << E1000_TX_HEAD_ADDR_SHIFT;
+			adapter->tx_fifo_size =
+				(E1000_PBA_40K - pba) << E1000_PBA_BYTES_SHIFT;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+		}
+	} else if (adapter->max_frame_size > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) {
+		/* adjust PBA for jumbo frames */
+		E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
+
+		/* To maintain wire speed transmits, the Tx FIFO should be
+		 * large enough to accommodate two full transmit packets,
+		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+		 * the Rx FIFO should be large enough to accommodate at least
+		 * one full receive packet and is similarly rounded up and
+		 * expressed in KB. */
+		pba = E1000_READ_REG(&adapter->hw, E1000_PBA);
+		/* upper 16 bits has Tx packet buffer allocation size in KB */
+		tx_space = pba >> 16;
+		/* lower 16 bits has Rx packet buffer allocation size in KB */
+		pba &= 0xffff;
+		/* the tx fifo also stores 16 bytes of information about the tx
+		 * but don't include ethernet FCS because hardware appends it */
+		min_tx_space = (adapter->max_frame_size +
+				sizeof(struct e1000_tx_desc) -
+				ETHERNET_FCS_SIZE) * 2;
+		min_tx_space = ALIGN(min_tx_space, 1024);
+		min_tx_space >>= 10;
+		/* software strips receive CRC, so leave room for it */
+		min_rx_space = adapter->max_frame_size;
+		min_rx_space = ALIGN(min_rx_space, 1024);
+		min_rx_space >>= 10;
+
+		/* If current Tx allocation is less than the min Tx FIFO size,
+		 * and the min Tx FIFO size is less than the current Rx FIFO
+		 * allocation, take space away from current Rx allocation */
+		if (tx_space < min_tx_space &&
+		    ((min_tx_space - tx_space) < pba)) {
+			pba = pba - (min_tx_space - tx_space);
+
+			/* PCI/PCIx hardware has PBA alignment constraints */
+			switch (mac->type) {
+			case e1000_82545 ... e1000_82546_rev_3:
+				pba &= ~(E1000_PBA_8K - 1);
+				break;
+			default:
+				break;
+			}
+
+			/* if short on rx space, rx wins and must trump tx
+			 * adjustment or use Early Receive if available */
+			if (pba < min_rx_space) {
+				switch (mac->type) {
+				case e1000_82573:
+				case e1000_ich9lan:
+					/* ERT enabled in e1000_configure_rx */
+					break;
+				default:
+					pba = min_rx_space;
+					break;
+				}
+			}
+		}
+	}
+
+	E1000_WRITE_REG(&adapter->hw, E1000_PBA, pba);
+
+	/* flow control settings */
+	/* The high water mark must be low enough to fit one full frame
+	 * (or the size used for early receive) above it in the Rx FIFO.
+	 * Set it to the lower of:
+	 * - 90% of the Rx FIFO size, and
+	 * - the full Rx FIFO size minus the early receive size (for parts
+	 *   with ERT support assuming ERT set to E1000_ERT_2048), or
+	 * - the full Rx FIFO size minus one full frame */
+	hwm = min(((pba << 10) * 9 / 10),
+		  ((mac->type == e1000_82573 || mac->type == e1000_ich9lan) ?
+		      (u16)((pba << 10) - (E1000_ERT_2048 << 3)) :
+		      ((pba << 10) - adapter->max_frame_size)));
+
+	fc->high_water = hwm & 0xFFF8;	/* 8-byte granularity */
+	fc->low_water = fc->high_water - 8;
+
+	if (mac->type == e1000_80003es2lan)
+		fc->pause_time = 0xFFFF;
+	else
+		fc->pause_time = E1000_FC_PAUSE_TIME;
+	fc->send_xon = 1;
+	fc->type = fc->original_type;
+
+	/* Allow time for pending master requests to run */
+	e1000_reset_hw(&adapter->hw);
+
+	/* For 82573 and ICHx if AMT is enabled, let the firmware know
+	 * that the network interface is in control */
+	if (((adapter->hw.mac.type == e1000_82573) ||
+	     (adapter->hw.mac.type == e1000_ich8lan) ||
+	     (adapter->hw.mac.type == e1000_ich9lan)) &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	if (mac->type >= e1000_82544)
+		E1000_WRITE_REG(&adapter->hw, E1000_WUC, 0);
+
+	if (e1000_init_hw(&adapter->hw))
+		DPRINTK(PROBE, ERR, "Hardware Error\n");
+#ifdef NETIF_F_HW_VLAN_TX
+	e1000_update_mng_vlan(adapter);
+#endif
+	/* if (adapter->hwflags & HWFLAGS_PHY_PWR_BIT) { */
+	if (mac->type >= e1000_82544 &&
+	    mac->type <= e1000_82547_rev_2 &&
+	    mac->autoneg == 1 &&
+	    adapter->hw.phy.autoneg_advertised == ADVERTISE_1000_FULL) {
+		u32 ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		/* clear phy power management bit if we are in gig only mode,
+		 * which if enabled will attempt negotiation to 100Mb, which
+		 * can cause a loss of link at power off or driver unload */
+		ctrl &= ~E1000_CTRL_SWDPIN3;
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+	}
+
+#if defined(CONFIG_PPC64) || defined(CONFIG_PPC)
+#define E1000_GCR_DISABLE_TIMEOUT_MECHANISM 0x80000000
+	if (adapter->hw.mac.type == e1000_82571) {
+		/* work around pSeries hardware by disabling timeouts */
+		u32 gcr = E1000_READ_REG(&adapter->hw, E1000_GCR);
+		gcr |= E1000_GCR_DISABLE_TIMEOUT_MECHANISM;
+		E1000_WRITE_REG(&adapter->hw, E1000_GCR, gcr);
+	}
+#endif
+
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	E1000_WRITE_REG(&adapter->hw, E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+
+	e1000_reset_adaptive(&adapter->hw);
+	e1000_get_phy_info(&adapter->hw);
+
+	if (!(adapter->flags & E1000_FLAG_SMART_POWER_DOWN) &&
+	    (mac->type == e1000_82571 || mac->type == e1000_82572)) {
+		u16 phy_data = 0;
+		/* speed up time to link by disabling smart power down, ignore
+		 * the return value of this function because there is nothing
+		 * different we would do if it failed */
+		e1000_read_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+				   &phy_data);
+		phy_data &= ~IGP02E1000_PM_SPD;
+		e1000_write_phy_reg(&adapter->hw, IGP02E1000_PHY_POWER_MGMT,
+				    phy_data);
+	}
+
+	e1000_release_manageability(adapter);
+}
+
+/**
+ * e1000_probe - Device Initialization Routine
+ * @pdev: PCI device information struct
+ * @ent: entry in e1000_pci_tbl
+ *
+ * Returns 0 on success, negative on failure
+ *
+ * e1000_probe initializes an adapter identified by a pci_dev structure.
+ * The OS initialization, configuring of the adapter private structure,
+ * and a hardware reset occur.
+ **/
+static int e1000_probe(struct pci_dev *pdev,
+				 const struct pci_device_id *ent)
+{
+	struct net_device *netdev;
+	struct e1000_adapter *adapter;
+
+	static int cards_found = 0;
+	static int global_quad_port_a = 0; /* global ksp3 port a indication */
+	int i, err, pci_using_dac;
+	u16 eeprom_data = 0;
+	u16 eeprom_apme_mask = E1000_EEPROM_APME;
+
+    if (cards[cards_found++] == 0)
+    {
+	return -ENODEV;
+    }
+
+	if ((err = pci_enable_device(pdev)))
+		return err;
+
+	if (!(err = dma_set_mask(&pdev->dev, DMA_64BIT_MASK)) &&
+	    !(err = dma_set_coherent_mask(&pdev->dev, DMA_64BIT_MASK))) {
+		pci_using_dac = 1;
+	} else {
+		if ((err = dma_set_mask(&pdev->dev, DMA_32BIT_MASK)) &&
+		    (err = dma_set_coherent_mask(&pdev->dev, DMA_32BIT_MASK))) {
+			E1000_ERR("No usable DMA configuration, aborting\n");
+			goto err_dma;
+		}
+		pci_using_dac = 0;
+	}
+
+	if ((err = pci_request_regions(pdev, e1000_driver_name)))
+		goto err_pci_reg;
+
+	pci_set_master(pdev);
+
+	err = -ENOMEM;
+#ifdef CONFIG_E1000_MQ
+	netdev = rt_alloc_etherdev(sizeof(struct e1000_adapter) +
+							(sizeof(struct net_device_subqueue) *
+								E1000_MAX_TX_QUEUES), 16);
+#else
+	netdev = rt_alloc_etherdev(sizeof(struct e1000_adapter),
+				2 * E1000_DEFAULT_RXD + E1000_DEFAULT_TXD);
+#endif
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+    memset(netdev->priv, 0, sizeof(struct e1000_adapter));
+    rt_rtdev_connect(netdev, &RTDEV_manager);
+
+	// SET_NETDEV_DEV(netdev, &pdev->dev);
+    netdev->vers = RTDEV_VERS_2_0;
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = netdev->priv;
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	adapter->hw.back = adapter;
+	adapter->msg_enable = (1 << local_debug) - 1;
+
+	err = -EIO;
+	adapter->hw.hw_addr = ioremap(pci_resource_start(pdev, BAR_0),
+				      pci_resource_len(pdev, BAR_0));
+	if (!adapter->hw.hw_addr)
+		goto err_ioremap;
+
+	for (i = BAR_1; i <= BAR_5; i++) {
+		if (pci_resource_len(pdev, i) == 0)
+			continue;
+		if (pci_resource_flags(pdev, i) & IORESOURCE_IO) {
+			adapter->hw.io_base = pci_resource_start(pdev, i);
+			break;
+		}
+	}
+
+	netdev->open = &e1000_open;
+	netdev->stop = &e1000_close;
+	netdev->hard_start_xmit = &e1000_xmit_frame;
+#ifdef CONFIG_E1000_MQ
+	netdev->hard_start_subqueue_xmit = &e1000_subqueue_xmit_frame;
+#endif
+#ifdef HAVE_TX_TIMEOUT
+	netdev->tx_timeout = &e1000_tx_timeout;
+	netdev->watchdog_timeo = 5 * HZ;
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+	netdev->vlan_rx_register = e1000_vlan_rx_register;
+	netdev->vlan_rx_add_vid = e1000_vlan_rx_add_vid;
+	netdev->vlan_rx_kill_vid = e1000_vlan_rx_kill_vid;
+#endif
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	netdev->poll_controller = e1000_netpoll;
+#endif
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	adapter->bd_number = cards_found;
+
+	/* setup the private structure */
+	if ((err = e1000_sw_init(adapter)))
+		goto err_sw_init;
+
+	err = -EIO;
+	/* Flash BAR mapping must happen after e1000_sw_init
+	 * because it depends on mac.type */
+	if (((adapter->hw.mac.type == e1000_ich8lan) ||
+	     (adapter->hw.mac.type == e1000_ich9lan)) &&
+	   (pci_resource_flags(pdev, 1) & IORESOURCE_MEM)) {
+		adapter->hw.flash_address = ioremap(pci_resource_start(pdev, 1),
+						    pci_resource_len(pdev, 1));
+		if (!adapter->hw.flash_address)
+			goto err_flashmap;
+	}
+
+	if ((err = e1000_init_mac_params(&adapter->hw)))
+		goto err_hw_init;
+
+	if ((err = e1000_init_nvm_params(&adapter->hw)))
+		goto err_hw_init;
+
+	if ((err = e1000_init_phy_params(&adapter->hw)))
+		goto err_hw_init;
+
+	e1000_get_bus_info(&adapter->hw);
+
+	e1000_init_script_state_82541(&adapter->hw, TRUE);
+	e1000_set_tbi_compatibility_82543(&adapter->hw, TRUE);
+
+	adapter->hw.phy.autoneg_wait_to_complete = FALSE;
+	adapter->hw.mac.adaptive_ifs = FALSE;
+
+	/* Copper options */
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+		adapter->hw.phy.mdix = AUTO_ALL_MODES;
+		adapter->hw.phy.disable_polarity_correction = FALSE;
+		adapter->hw.phy.ms_type = E1000_MASTER_SLAVE;
+	}
+
+	if (e1000_check_reset_block(&adapter->hw))
+		DPRINTK(PROBE, INFO, "PHY reset is blocked due to SOL/IDER session.\n");
+
+#ifdef MAX_SKB_FRAGS
+	if (adapter->hw.mac.type >= e1000_82543) {
+#ifdef NETIF_F_HW_VLAN_TX
+		netdev->features = NETIF_F_SG |
+				   NETIF_F_HW_CSUM |
+				   NETIF_F_HW_VLAN_TX |
+				   NETIF_F_HW_VLAN_RX |
+				   NETIF_F_HW_VLAN_FILTER;
+		if ((adapter->hw.mac.type == e1000_ich8lan) ||
+		    (adapter->hw.mac.type == e1000_ich9lan))
+			netdev->features &= ~NETIF_F_HW_VLAN_FILTER;
+#else
+		netdev->features = NETIF_F_SG | NETIF_F_HW_CSUM;
+#endif
+	}
+
+#ifdef NETIF_F_TSO
+	if ((adapter->hw.mac.type >= e1000_82544) &&
+	   (adapter->hw.mac.type != e1000_82547)) {
+		adapter->flags |= E1000_FLAG_HAS_TSO;
+		netdev->features |= NETIF_F_TSO;
+	}
+
+#ifdef NETIF_F_TSO6
+	if (adapter->hw.mac.type > e1000_82547_rev_2) {
+		adapter->flags |= E1000_FLAG_HAS_TSO6;
+		netdev->features |= NETIF_F_TSO6;
+	}
+#endif
+#endif
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+#endif
+#ifdef NETIF_F_LLTX
+	netdev->features |= NETIF_F_LLTX;
+#endif
+
+	/* Hardware features, flags and workarounds */
+	if (adapter->hw.mac.type >= e1000_82571) {
+		adapter->flags |= E1000_FLAG_INT_ASSERT_AUTO_MASK;
+		adapter->flags |= E1000_FLAG_HAS_MSI;
+		adapter->flags |= E1000_FLAG_HAS_MANC2H;
+	}
+
+	if (adapter->hw.mac.type >= e1000_82540) {
+		adapter->flags |= E1000_FLAG_HAS_SMBUS;
+		adapter->flags |= E1000_FLAG_HAS_INTR_MODERATION;
+	}
+
+	if (adapter->hw.mac.type == e1000_82543)
+		adapter->flags |= E1000_FLAG_BAD_TX_CARRIER_STATS_FD;
+
+	/* In rare occasions, ESB2 systems would end up started without
+	 * the RX unit being turned on. */
+	if (adapter->hw.mac.type == e1000_80003es2lan)
+		adapter->flags |= E1000_FLAG_RX_NEEDS_RESTART;
+
+	adapter->en_mng_pt = e1000_enable_mng_pass_thru(&adapter->hw);
+
+	/* before reading the NVM, reset the controller to
+	 * put the device in a known good starting state */
+
+	e1000_reset_hw(&adapter->hw);
+
+	/* make sure we don't intercept ARP packets until we're up */
+	e1000_release_manageability(adapter);
+
+	/* make sure the NVM is good */
+
+	if (e1000_validate_nvm_checksum(&adapter->hw) < 0) {
+		DPRINTK(PROBE, ERR, "The NVM Checksum Is Not Valid\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	/* copy the MAC address out of the NVM */
+
+	if (e1000_read_mac_addr(&adapter->hw))
+		DPRINTK(PROBE, ERR, "NVM Read Error\n");
+	memcpy(netdev->dev_addr, adapter->hw.mac.addr, netdev->addr_len);
+#ifdef ETHTOOL_GPERMADDR
+	memcpy(netdev->perm_addr, adapter->hw.mac.addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->perm_addr)) {
+#else
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+#endif
+		DPRINTK(PROBE, ERR, "Invalid MAC Address\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	INIT_DELAYED_WORK(&adapter->watchdog_task, e1000_watchdog_task);
+	INIT_DELAYED_WORK(&adapter->fifo_stall_task,
+			  e1000_82547_tx_fifo_stall_task);
+	INIT_DELAYED_WORK(&adapter->phy_info_task, e1000_update_phy_info_task);
+	INIT_WORK(&adapter->reset_task, e1000_reset_task);
+
+	e1000_check_options(adapter);
+
+	/* Initial Wake on LAN setting
+	 * If APM wake is enabled in the EEPROM,
+	 * enable the ACPI Magic Packet filter
+	 */
+
+	switch (adapter->hw.mac.type) {
+	case e1000_82542:
+	case e1000_82543:
+		break;
+	case e1000_82544:
+		e1000_read_nvm(&adapter->hw,
+			NVM_INIT_CONTROL2_REG, 1, &eeprom_data);
+		eeprom_apme_mask = E1000_EEPROM_82544_APM;
+		break;
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		/* APME bit in EEPROM is mapped to WUC.APME */
+		eeprom_data = E1000_READ_REG(&adapter->hw, E1000_WUC);
+		eeprom_apme_mask = E1000_WUC_APME;
+		break;
+	case e1000_82546:
+	case e1000_82546_rev_3:
+	case e1000_82571:
+	case e1000_80003es2lan:
+		if (adapter->hw.bus.func == 1) {
+			e1000_read_nvm(&adapter->hw,
+				NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+			break;
+		}
+		fallthrough;
+	default:
+		e1000_read_nvm(&adapter->hw,
+			NVM_INIT_CONTROL3_PORT_A, 1, &eeprom_data);
+		break;
+	}
+	if (eeprom_data & eeprom_apme_mask)
+		adapter->eeprom_wol |= E1000_WUFC_MAG;
+
+	/* now that we have the eeprom settings, apply the special cases
+	 * where the eeprom may be wrong or the board simply won't support
+	 * wake on lan on a particular port */
+	switch (pdev->device) {
+	case E1000_DEV_ID_82546GB_PCIE:
+	case E1000_DEV_ID_82571EB_SERDES_QUAD:
+		adapter->eeprom_wol = 0;
+		break;
+	case E1000_DEV_ID_82546EB_FIBER:
+	case E1000_DEV_ID_82546GB_FIBER:
+	case E1000_DEV_ID_82571EB_FIBER:
+		/* Wake events only supported on port A for dual fiber
+		 * regardless of eeprom setting */
+		if (E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+		    E1000_STATUS_FUNC_1)
+			adapter->eeprom_wol = 0;
+		break;
+	case E1000_DEV_ID_82546GB_QUAD_COPPER_KSP3:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER:
+	case E1000_DEV_ID_82571EB_QUAD_FIBER:
+	case E1000_DEV_ID_82571EB_QUAD_COPPER_LP:
+	case E1000_DEV_ID_82571PT_QUAD_COPPER:
+		/* if quad port adapter, disable WoL on all but port A */
+		if (global_quad_port_a != 0)
+			adapter->eeprom_wol = 0;
+		else
+			adapter->flags |= E1000_FLAG_QUAD_PORT_A;
+		/* Reset for multiple quad port adapters */
+		if (++global_quad_port_a == 4)
+			global_quad_port_a = 0;
+		break;
+	}
+
+	/* initialize the wol settings based on the eeprom settings */
+	adapter->wol = adapter->eeprom_wol;
+
+	/* print bus type/speed/width info */
+	{
+	struct e1000_hw *hw = &adapter->hw;
+	DPRINTK(PROBE, INFO, "(PCI%s:%s:%s) ",
+		((hw->bus.type == e1000_bus_type_pcix) ? "-X" :
+		 (hw->bus.type == e1000_bus_type_pci_express ? " Express":"")),
+		((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+		 (hw->bus.speed == e1000_bus_speed_133) ? "133MHz" :
+		 (hw->bus.speed == e1000_bus_speed_120) ? "120MHz" :
+		 (hw->bus.speed == e1000_bus_speed_100) ? "100MHz" :
+		 (hw->bus.speed == e1000_bus_speed_66) ? "66MHz" : "33MHz"),
+		((hw->bus.width == e1000_bus_width_64) ? "64-bit" :
+		 (hw->bus.width == e1000_bus_width_pcie_x4) ? "Width x4" :
+		 (hw->bus.width == e1000_bus_width_pcie_x1) ? "Width x1" :
+		 "32-bit"));
+	}
+
+	for (i = 0; i < 6; i++)
+		printk("%2.2x%c", netdev->dev_addr[i], i == 5 ? '\n' : ':');
+
+	/* reset the hardware with the new settings */
+	e1000_reset(adapter);
+
+	/* If the controller is 82573 or ICH and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (((adapter->hw.mac.type != e1000_82573) &&
+	     (adapter->hw.mac.type != e1000_ich8lan) &&
+	     (adapter->hw.mac.type != e1000_ich9lan)) ||
+	    !e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	/* tell the stack to leave us alone until e1000_open() is called */
+	rtnetif_carrier_off(netdev);
+	rtnetif_stop_queue(netdev);
+
+	strcpy(netdev->name, "rteth%d");
+	err = rt_register_rtnetdev(netdev);
+	if (err)
+		goto err_register;
+
+	DPRINTK(PROBE, INFO, "Intel(R) PRO/1000 Network Connection\n");
+
+	cards_found++;
+	return 0;
+
+err_register:
+err_hw_init:
+	e1000_release_hw_control(adapter);
+err_eeprom:
+	if (!e1000_check_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+
+	e1000_remove_device(&adapter->hw);
+err_flashmap:
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+err_sw_init:
+	iounmap(adapter->hw.hw_addr);
+err_ioremap:
+	rtdev_free(netdev);
+err_alloc_etherdev:
+	pci_release_regions(pdev);
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ * e1000_remove - Device Removal Routine
+ * @pdev: PCI device information struct
+ *
+ * e1000_remove is called by the PCI subsystem to alert the driver
+ * that it should release a PCI device.  The could be caused by a
+ * Hot-Plug event, or because the driver is going to be removed from
+ * memory.
+ **/
+static void e1000_remove(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	e1000_down_and_stop(adapter);
+
+	e1000_release_manageability(adapter);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant. */
+	e1000_release_hw_control(adapter);
+
+	rt_unregister_rtnetdev(netdev);
+
+	if (!e1000_check_reset_block(&adapter->hw))
+		e1000_phy_hw_reset(&adapter->hw);
+
+	e1000_remove_device(&adapter->hw);
+
+	kfree(adapter->tx_ring);
+	kfree(adapter->rx_ring);
+
+	iounmap(adapter->hw.hw_addr);
+	if (adapter->hw.flash_address)
+		iounmap(adapter->hw.flash_address);
+	pci_release_regions(pdev);
+
+	rtdev_free(netdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ * e1000_sw_init - Initialize general software structures (struct e1000_adapter)
+ * @adapter: board private structure to initialize
+ *
+ * e1000_sw_init initializes the Adapter private data structure.
+ * Fields are initialized based on PCI device information and
+ * OS network device settings (MTU size).
+ **/
+static int e1000_sw_init(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+#ifdef CONFIG_E1000_NAPI
+	int i;
+#endif
+
+	/* PCI config space info */
+
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	pci_read_config_byte(pdev, PCI_REVISION_ID, &hw->revision_id);
+
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+	adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+	adapter->rx_ps_bsize0 = E1000_RXBUFFER_128;
+	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETHERNET_FCS_SIZE;
+	adapter->min_frame_size = ETH_ZLEN + ETHERNET_FCS_SIZE;
+
+	/* Initialize the hardware-specific values */
+	if (e1000_setup_init_funcs(hw, FALSE)) {
+		DPRINTK(PROBE, ERR, "Hardware Initialization Failure\n");
+		return -EIO;
+	}
+
+#ifdef CONFIG_E1000_MQ
+	/* Number of supported queues.
+	 * TODO: It's assumed num_rx_queues >= num_tx_queues, since multi-rx
+	 * queues are much more interesting.  Is it worth coding for the
+	 * possibility (however improbable) of num_tx_queues > num_rx_queues?
+	 */
+	switch (hw->mac.type) {
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_82573:
+	case e1000_80003es2lan:
+		adapter->num_tx_queues = 2;
+		adapter->num_rx_queues = 2;
+		break;
+	case e1000_ich8lan:
+	case e1000_ich9lan:
+		if ((adapter->hw.device_id == E1000_DEV_ID_ICH8_IGP_AMT) ||
+		    (adapter->hw.device_id == E1000_DEV_ID_ICH8_IGP_M_AMT) ||
+		    (adapter->hw.device_id == E1000_DEV_ID_ICH9_IGP_AMT)) {
+			adapter->num_tx_queues = 2;
+			adapter->num_rx_queues = 2;
+			break;
+		}
+		fallthrough; /* remaining ICH SKUs do not support MQ */
+	default:
+		/* All hardware before 82571 only have 1 queue each for Rx/Tx.
+		 * However, the 82571 family does not have MSI-X, so multi-
+		 * queue isn't enabled.
+		 * It'd be wise not to mess with this default case. :) */
+		adapter->num_tx_queues = 1;
+		adapter->num_rx_queues = 1;
+		netdev->egress_subqueue_count = 0;
+		break;
+	}
+	adapter->num_rx_queues = min(adapter->num_rx_queues, num_online_cpus());
+	adapter->num_tx_queues = min(adapter->num_tx_queues, num_online_cpus());
+
+	if ((adapter->num_tx_queues > 1) || (adapter->num_rx_queues > 1)) {
+		netdev->egress_subqueue = (struct net_device_subqueue *)
+					   ((void *)adapter +
+					    sizeof(struct e1000_adapter));
+		netdev->egress_subqueue_count = adapter->num_tx_queues;
+		DPRINTK(DRV, INFO, "Multiqueue Enabled: RX queues = %u, "
+			"TX queues = %u\n", adapter->num_rx_queues,
+			adapter->num_tx_queues);
+	}
+#else
+	adapter->num_tx_queues = 1;
+	adapter->num_rx_queues = 1;
+#endif
+
+	if (e1000_alloc_queues(adapter)) {
+		DPRINTK(PROBE, ERR, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_E1000_NAPI
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct e1000_rx_ring *rx_ring = &adapter->rx_ring[i];
+		netif_napi_add(adapter->netdev, &rx_ring->napi, e1000_poll, 64);
+	}
+	rtdm_lock_init(&adapter->tx_queue_lock);
+#ifdef CONFIG_E1000_MQ
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		rtdm_lock_init(&adapter->tx_ring[i].tx_queue_lock);
+#endif
+#endif
+
+	/* Explicitly disable IRQ since the NIC can be in any state. */
+	atomic_set(&adapter->irq_sem, 0);
+	e1000_irq_disable(adapter);
+
+	rtdm_lock_init(&adapter->stats_lock);
+
+	set_bit(__E1000_DOWN, &adapter->state);
+	return 0;
+}
+
+/**
+ * e1000_alloc_queues - Allocate memory for all rings
+ * @adapter: board private structure to initialize
+ **/
+static int e1000_alloc_queues(struct e1000_adapter *adapter)
+{
+	adapter->tx_ring = kcalloc(adapter->num_tx_queues,
+				   sizeof(struct e1000_tx_ring), GFP_KERNEL);
+	if (!adapter->tx_ring)
+		return -ENOMEM;
+
+	adapter->rx_ring = kcalloc(adapter->num_rx_queues,
+				   sizeof(struct e1000_rx_ring), GFP_KERNEL);
+	if (!adapter->rx_ring) {
+		kfree(adapter->tx_ring);
+		return -ENOMEM;
+	}
+
+#ifdef CONFIG_E1000_MQ
+	adapter->cpu_tx_ring = alloc_percpu(struct e1000_tx_ring *);
+#endif
+
+	return E1000_SUCCESS;
+}
+
+#ifdef CONFIG_E1000_MQ
+static void e1000_setup_queue_mapping(struct e1000_adapter *adapter)
+{
+	int i, cpu;
+
+	lock_cpu_hotplug();
+	i = 0;
+	for_each_online_cpu(cpu) {
+		*per_cpu_ptr(adapter->cpu_tx_ring, cpu) =
+			     &adapter->tx_ring[i % adapter->num_tx_queues];
+		i++;
+	}
+	unlock_cpu_hotplug();
+}
+#endif
+
+/**
+ * e1000_intr_msi_test - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static irqreturn_t e1000_intr_msi_test(int irq, void *data)
+{
+	struct net_device *netdev = data;
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	u32 icr = E1000_READ_REG(&adapter->hw, E1000_ICR);
+	DPRINTK(HW,INFO, "icr is %08X\n", icr);
+	if (icr & E1000_ICR_RXSEQ) {
+		adapter->flags |= E1000_FLAG_HAS_MSI;
+		wmb();
+	}
+
+	return IRQ_HANDLED;
+}
+
+/**
+ * e1000_test_msi_interrupt - Returns 0 for successful test
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c
+ **/
+static int e1000_test_msi_interrupt(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	int err;
+
+	/* poll_enable hasn't been called yet, so don't need disable */
+	/* clear any pending events */
+	E1000_READ_REG(&adapter->hw, E1000_ICR);
+
+	/* free the real vector and request a test handler */
+	e1000_free_irq(adapter);
+
+	err = pci_enable_msi(adapter->pdev);
+	err = request_irq(adapter->pdev->irq, &e1000_intr_msi_test, 0,
+			  netdev->name, netdev);
+	if (err) {
+		pci_disable_msi(adapter->pdev);
+		goto msi_test_failed;
+	}
+
+	/* our temporary test variable */
+	adapter->flags &= ~E1000_FLAG_HAS_MSI;
+	wmb();
+
+	e1000_irq_enable(adapter);
+
+	/* fire an unusual interrupt on the test handler */
+	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_RXSEQ);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	msleep(50);
+
+	e1000_irq_disable(adapter);
+
+	rmb();
+	if (!(adapter->flags & E1000_FLAG_HAS_MSI)) {
+		adapter->flags |= E1000_FLAG_HAS_MSI;
+		err = -EIO;
+		DPRINTK(HW, INFO, "MSI interrupt test failed!\n");
+	}
+
+	free_irq(adapter->pdev->irq, netdev);
+	pci_disable_msi(adapter->pdev);
+
+	if (err == -EIO)
+		goto msi_test_failed;
+
+	/* okay so the test worked, restore settings */
+	DPRINTK(HW, INFO, "MSI interrupt test succeeded!\n");
+msi_test_failed:
+	/* restore the original vector, even if it failed */
+	e1000_request_irq(adapter);
+	return err;
+}
+
+/**
+ * e1000_test_msi - Returns 0 if MSI test succeeds and INTx mode is restored
+ * @adapter: board private struct
+ *
+ * code flow taken from tg3.c, called with e1000 interrupts disabled.
+ **/
+static int e1000_test_msi(struct e1000_adapter *adapter)
+{
+	int err;
+	u16 pci_cmd;
+
+	if (!(adapter->flags & E1000_FLAG_MSI_ENABLED) ||
+	    !(adapter->flags & E1000_FLAG_HAS_MSI))
+		return 0;
+
+	/* disable SERR in case the MSI write causes a master abort */
+	pci_read_config_word(adapter->pdev, PCI_COMMAND, &pci_cmd);
+	pci_write_config_word(adapter->pdev, PCI_COMMAND,
+			      pci_cmd & ~PCI_COMMAND_SERR);
+
+	err = e1000_test_msi_interrupt(adapter);
+
+	/* restore previous setting of command word */
+	pci_write_config_word(adapter->pdev, PCI_COMMAND, pci_cmd);
+
+	/* success ! */
+	if (!err)
+		return 0;
+
+	/* EIO means MSI test failed */
+	if (err != -EIO)
+		return err;
+
+	/* back to INTx mode */
+	DPRINTK(PROBE, WARNING, "MSI interrupt test failed, using legacy "
+		"interrupt.\n");
+
+	e1000_free_irq(adapter);
+	adapter->flags &= ~E1000_FLAG_HAS_MSI;
+
+	err = e1000_request_irq(adapter);
+
+	return err;
+}
+
+/**
+ * e1000_open - Called when a network interface is made active
+ * @netdev: network interface device structure
+ *
+ * Returns 0 on success, negative value on failure
+ *
+ * The open entry point is called when a network interface is made
+ * active by the system (IFF_UP).  At this point all resources needed
+ * for transmit and receive operations are allocated, the interrupt
+ * handler is registered with the OS, the watchdog timer is started,
+ * and the stack is notified that the interface is ready.
+ **/
+static int e1000_open(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int err;
+	/* disallow open during test */
+	if (test_bit(__E1000_TESTING, &adapter->state))
+		return -EBUSY;
+
+	/* allocate transmit descriptors */
+	err = e1000_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = e1000_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+		e1000_power_up_phy(&adapter->hw);
+		e1000_setup_link(&adapter->hw);
+	}
+
+#ifdef NETIF_F_HW_VLAN_TX
+	adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN)) {
+		e1000_update_mng_vlan(adapter);
+	}
+#endif
+
+	/* For 82573 and ICHx if AMT is enabled, let the firmware know
+	 * that the network interface is now open */
+	if (((adapter->hw.mac.type == e1000_82573) ||
+	     (adapter->hw.mac.type == e1000_ich8lan) ||
+	     (adapter->hw.mac.type == e1000_ich9lan)) &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	/* before we allocate an interrupt, we must be ready to handle it.
+	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+	 * as soon as we call pci_request_irq, so we have to setup our
+	 * clean_rx handler before we do so.  */
+	e1000_configure(adapter);
+
+
+	err = e1000_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* work around PCIe errata with MSI interrupts causing some chipsets to
+	 * ignore e1000 MSI messages, which means we need to test our MSI
+	 * interrupt now */
+	err = e1000_test_msi(adapter);
+	if (err) {
+		DPRINTK(PROBE, ERR, "Interrupt allocation failed\n");
+		goto err_req_irq;
+	}
+
+	/* From here on the code is the same as e1000_up() */
+	clear_bit(__E1000_DOWN, &adapter->state);
+
+	e1000_napi_enable_all(adapter);
+
+	schedule_delayed_work(&adapter->watchdog_task, 1);
+	e1000_irq_enable(adapter);
+
+	/* fire a link status change interrupt to start the watchdog */
+	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_LSC);
+
+	return E1000_SUCCESS;
+
+err_req_irq:
+	e1000_release_hw_control(adapter);
+	/* Power down the PHY so no link is implied when interface is down *
+	 * The PHY cannot be powered down if any of the following is TRUE *
+	 * (a) WoL is enabled
+	 * (b) AMT is active
+	 * (c) SoL/IDER session is active */
+	if (!adapter->wol && adapter->hw.mac.type >= e1000_82540 &&
+	   adapter->hw.phy.media_type == e1000_media_type_copper)
+		e1000_power_down_phy(&adapter->hw);
+	e1000_free_all_rx_resources(adapter);
+err_setup_rx:
+	e1000_free_all_tx_resources(adapter);
+err_setup_tx:
+	e1000_reset(adapter);
+
+	return err;
+}
+
+/**
+ * e1000_close - Disables a network interface
+ * @netdev: network interface device structure
+ *
+ * Returns 0, this is not allowed to fail
+ *
+ * The close entry point is called when an interface is de-activated
+ * by the OS.  The hardware is still under the drivers control, but
+ * needs to be disabled.  A global MAC reset is issued to stop the
+ * hardware, and all transmit and receive resources are freed.
+ **/
+static int e1000_close(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	WARN_ON(test_bit(__E1000_RESETTING, &adapter->state));
+	e1000_down(adapter);
+	/* Power down the PHY so no link is implied when interface is down *
+	 * The PHY cannot be powered down if any of the following is TRUE *
+	 * (a) WoL is enabled
+	 * (b) AMT is active
+	 * (c) SoL/IDER session is active */
+	if (!adapter->wol && adapter->hw.mac.type >= e1000_82540 &&
+	   adapter->hw.phy.media_type == e1000_media_type_copper)
+		e1000_power_down_phy(&adapter->hw);
+	e1000_free_irq(adapter);
+
+	e1000_free_all_tx_resources(adapter);
+	e1000_free_all_rx_resources(adapter);
+
+#ifdef NETIF_F_HW_VLAN_TX
+	/* kill manageability vlan ID if supported, but not if a vlan with
+	 * the same ID is registered on the host OS (let 8021q kill it) */
+	if ((adapter->hw.mng_cookie.status &
+			  E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	     !(adapter->vlgrp &&
+	       vlan_group_get_device(adapter->vlgrp, adapter->mng_vlan_id))) {
+		e1000_vlan_rx_kill_vid(netdev, adapter->mng_vlan_id);
+	}
+#endif
+
+	/* For 82573 and ICHx if AMT is enabled, let the firmware know
+	 * that the network interface is now closed */
+	if (((adapter->hw.mac.type == e1000_82573) ||
+	     (adapter->hw.mac.type == e1000_ich8lan) ||
+	     (adapter->hw.mac.type == e1000_ich9lan)) &&
+	    e1000_check_mng_mode(&adapter->hw))
+		e1000_release_hw_control(adapter);
+
+	return 0;
+}
+
+/**
+ * e1000_check_64k_bound - check that memory doesn't cross 64kB boundary
+ * @adapter: address of board private structure
+ * @start: address of beginning of memory
+ * @len: length of memory
+ **/
+static bool e1000_check_64k_bound(struct e1000_adapter *adapter,
+				       void *start, unsigned long len)
+{
+	unsigned long begin = (unsigned long) start;
+	unsigned long end = begin + len;
+
+	/* First rev 82545 and 82546 need to not allow any memory
+	 * write location to cross 64k boundary due to errata 23 */
+	if (adapter->hw.mac.type == e1000_82545 ||
+	    adapter->hw.mac.type == e1000_82546) {
+		return ((begin ^ (end - 1)) >> 16) != 0 ? FALSE : TRUE;
+	}
+
+	return TRUE;
+}
+
+/**
+ * e1000_setup_tx_resources - allocate Tx resources (Descriptors)
+ * @adapter: board private structure
+ * @tx_ring:    tx descriptor ring (for a specific queue) to setup
+ *
+ * Return 0 on success, negative on failure
+ **/
+static int e1000_setup_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size;
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	tx_ring->buffer_info = vmalloc(size);
+	if (!tx_ring->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* round up to nearest 4K */
+
+	tx_ring->size = tx_ring->count * sizeof(struct e1000_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+					   &tx_ring->dma, GFP_ATOMIC);
+	if (!tx_ring->desc) {
+setup_tx_desc_die:
+		vfree(tx_ring->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the transmit descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, tx_ring->desc, tx_ring->size)) {
+		void *olddesc = tx_ring->desc;
+		dma_addr_t olddma = tx_ring->dma;
+		DPRINTK(TX_ERR, ERR, "tx_ring align check failed: %u bytes "
+				     "at %p\n", tx_ring->size, tx_ring->desc);
+		/* Try again, without freeing the previous */
+		tx_ring->desc = dma_alloc_coherent(&pdev->dev, tx_ring->size,
+						   &tx_ring->dma, GFP_ATOMIC);
+		/* Failed allocation, critical failure */
+		if (!tx_ring->desc) {
+			dma_free_coherent(&pdev->dev, tx_ring->size, olddesc,
+					  olddma);
+			goto setup_tx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, tx_ring->desc,
+					   tx_ring->size)) {
+			/* give up */
+			dma_free_coherent(&pdev->dev, tx_ring->size,
+					  tx_ring->desc, tx_ring->dma);
+			dma_free_coherent(&pdev->dev, tx_ring->size, olddesc,
+					  olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the transmit descriptor ring\n");
+			vfree(tx_ring->buffer_info);
+			return -ENOMEM;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			dma_free_coherent(&pdev->dev, tx_ring->size, olddesc,
+					  olddma);
+		}
+	}
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	rtdm_lock_init(&tx_ring->tx_lock);
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_tx_resources - wrapper to allocate Tx resources
+ * @adapter: board private structure
+ *
+ * this allocates tx resources for all queues, return 0 on success, negative
+ * on failure
+ **/
+int e1000_setup_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = e1000_setup_tx_resources(adapter, &adapter->tx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Tx Queue %u failed\n", i);
+			for (i-- ; i >= 0; i--)
+				e1000_free_tx_resources(adapter,
+							&adapter->tx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ * e1000_configure_tx - Configure 8254x Transmit Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Tx unit of the MAC after a reset.
+ **/
+static void e1000_configure_tx(struct e1000_adapter *adapter)
+{
+	u64 tdba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tdlen, tctl, tipg, tarc;
+	u32 ipgr1, ipgr2;
+	int i;
+
+	/* Setup the HW Tx Head and Tail descriptor pointers */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		tdba = adapter->tx_ring[i].dma;
+		tdlen = adapter->tx_ring[i].count * sizeof(struct e1000_tx_desc);
+		E1000_WRITE_REG(hw, E1000_TDBAL(i), (tdba & 0x00000000ffffffffULL));
+		E1000_WRITE_REG(hw, E1000_TDBAH(i), (tdba >> 32));
+		E1000_WRITE_REG(hw, E1000_TDLEN(i), tdlen);
+		E1000_WRITE_REG(hw, E1000_TDH(i), 0);
+		E1000_WRITE_REG(hw, E1000_TDT(i), 0);
+		adapter->tx_ring[i].tdh = E1000_REGISTER(hw, E1000_TDH(i));
+		adapter->tx_ring[i].tdt = E1000_REGISTER(hw, E1000_TDT(i));
+	}
+
+
+	/* Set the default values for the Tx Inter Packet Gap timer */
+	if (adapter->hw.mac.type <= e1000_82547_rev_2 &&
+	    (hw->phy.media_type == e1000_media_type_fiber ||
+	     hw->phy.media_type == e1000_media_type_internal_serdes))
+		tipg = DEFAULT_82543_TIPG_IPGT_FIBER;
+	else
+		tipg = DEFAULT_82543_TIPG_IPGT_COPPER;
+
+	switch (hw->mac.type) {
+	case e1000_82542:
+		tipg = DEFAULT_82542_TIPG_IPGT;
+		ipgr1 = DEFAULT_82542_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82542_TIPG_IPGR2;
+		break;
+	case e1000_80003es2lan:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_80003ES2LAN_TIPG_IPGR2;
+		break;
+	default:
+		ipgr1 = DEFAULT_82543_TIPG_IPGR1;
+		ipgr2 = DEFAULT_82543_TIPG_IPGR2;
+		break;
+	}
+	tipg |= ipgr1 << E1000_TIPG_IPGR1_SHIFT;
+	tipg |= ipgr2 << E1000_TIPG_IPGR2_SHIFT;
+	E1000_WRITE_REG(hw, E1000_TIPG, tipg);
+
+	/* Set the Tx Interrupt Delay register */
+
+	E1000_WRITE_REG(hw, E1000_TIDV, adapter->tx_int_delay);
+	if (adapter->flags & E1000_FLAG_HAS_INTR_MODERATION)
+		E1000_WRITE_REG(hw, E1000_TADV, adapter->tx_abs_int_delay);
+
+	/* Program the Transmit Control Register */
+
+	tctl = E1000_READ_REG(hw, E1000_TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	if (hw->mac.type == e1000_82571 || hw->mac.type == e1000_82572) {
+		tarc = E1000_READ_REG(hw, E1000_TARC(0));
+		/* set the speed mode bit, we'll clear it if we're not at
+		 * gigabit link later */
+#define SPEED_MODE_BIT (1 << 21)
+		tarc |= SPEED_MODE_BIT;
+		E1000_WRITE_REG(hw, E1000_TARC(0), tarc);
+	} else if (hw->mac.type == e1000_80003es2lan) {
+		tarc = E1000_READ_REG(hw, E1000_TARC(0));
+		tarc |= 1;
+		E1000_WRITE_REG(hw, E1000_TARC(0), tarc);
+		tarc = E1000_READ_REG(hw, E1000_TARC(1));
+		tarc |= 1;
+		E1000_WRITE_REG(hw, E1000_TARC(1), tarc);
+	}
+
+	e1000_config_collision_dist(hw);
+
+	/* Setup Transmit Descriptor Settings for eop descriptor */
+	adapter->txd_cmd = E1000_TXD_CMD_EOP | E1000_TXD_CMD_IFCS;
+
+	/* only set IDE if we are delaying interrupts using the timers */
+	if (adapter->tx_int_delay)
+		adapter->txd_cmd |= E1000_TXD_CMD_IDE;
+
+	if (hw->mac.type < e1000_82543)
+		adapter->txd_cmd |= E1000_TXD_CMD_RPS;
+	else
+		adapter->txd_cmd |= E1000_TXD_CMD_RS;
+
+	/* Cache if we're 82544 running in PCI-X because we'll
+	 * need this to apply a workaround later in the send path. */
+	if (hw->mac.type == e1000_82544 &&
+	    hw->bus.type == e1000_bus_type_pcix)
+		adapter->pcix_82544 = 1;
+
+	E1000_WRITE_REG(hw, E1000_TCTL, tctl);
+
+}
+
+/**
+ * e1000_setup_rx_resources - allocate Rx resources (Descriptors)
+ * @adapter: board private structure
+ * @rx_ring:    rx descriptor ring (for a specific queue) to setup
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_setup_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int size, desc_len;
+
+	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
+	rx_ring->buffer_info = vmalloc(size);
+	if (!rx_ring->buffer_info) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+	memset(rx_ring->buffer_info, 0, size);
+
+	rx_ring->ps_page = kcalloc(rx_ring->count, sizeof(struct e1000_ps_page),
+				   GFP_KERNEL);
+	if (!rx_ring->ps_page) {
+		vfree(rx_ring->buffer_info);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	rx_ring->ps_page_dma = kcalloc(rx_ring->count,
+				       sizeof(struct e1000_ps_page_dma),
+				       GFP_KERNEL);
+	if (!rx_ring->ps_page_dma) {
+		vfree(rx_ring->buffer_info);
+		kfree(rx_ring->ps_page);
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+		return -ENOMEM;
+	}
+
+	if (adapter->hw.mac.type <= e1000_82547_rev_2)
+		desc_len = sizeof(struct e1000_rx_desc);
+	else
+		desc_len = sizeof(union e1000_rx_desc_packet_split);
+
+	/* Round up to nearest 4K */
+
+	rx_ring->size = rx_ring->count * desc_len;
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+					   &rx_ring->dma, GFP_ATOMIC);
+
+	if (!rx_ring->desc) {
+		DPRINTK(PROBE, ERR,
+		"Unable to allocate memory for the receive descriptor ring\n");
+setup_rx_desc_die:
+		vfree(rx_ring->buffer_info);
+		kfree(rx_ring->ps_page);
+		kfree(rx_ring->ps_page_dma);
+		return -ENOMEM;
+	}
+
+	/* Fix for errata 23, can't cross 64kB boundary */
+	if (!e1000_check_64k_bound(adapter, rx_ring->desc, rx_ring->size)) {
+		void *olddesc = rx_ring->desc;
+		dma_addr_t olddma = rx_ring->dma;
+		DPRINTK(RX_ERR, ERR, "rx_ring align check failed: %u bytes "
+				     "at %p\n", rx_ring->size, rx_ring->desc);
+		/* Try again, without freeing the previous */
+		rx_ring->desc = dma_alloc_coherent(&pdev->dev, rx_ring->size,
+						   &rx_ring->dma, GFP_ATOMIC);
+		/* Failed allocation, critical failure */
+		if (!rx_ring->desc) {
+			dma_free_coherent(&pdev->dev, rx_ring->size, olddesc,
+					  olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		}
+
+		if (!e1000_check_64k_bound(adapter, rx_ring->desc,
+					   rx_ring->size)) {
+			/* give up */
+			dma_free_coherent(&pdev->dev, rx_ring->size,
+					  rx_ring->desc, rx_ring->dma);
+			dma_free_coherent(&pdev->dev, rx_ring->size, olddesc,
+					  olddma);
+			DPRINTK(PROBE, ERR,
+				"Unable to allocate aligned memory "
+				"for the receive descriptor ring\n");
+			goto setup_rx_desc_die;
+		} else {
+			/* Free old allocation, new allocation was successful */
+			dma_free_coherent(&pdev->dev, rx_ring->size, olddesc,
+					  olddma);
+		}
+	}
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	/* set up ring defaults */
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+	rx_ring->rx_skb_top = NULL;
+	rx_ring->adapter = adapter;
+
+	return 0;
+}
+
+/**
+ * e1000_setup_all_rx_resources - wrapper to allocate Rx resources
+ * @adapter: board private structure
+ *
+ * this allocates rx resources for all queues, return 0 on success, negative
+ * on failure
+ **/
+int e1000_setup_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = e1000_setup_rx_resources(adapter, &adapter->rx_ring[i]);
+		if (err) {
+			DPRINTK(PROBE, ERR,
+				"Allocation for Rx Queue %u failed\n", i);
+			for (i-- ; i >= 0; i--)
+				e1000_free_rx_resources(adapter,
+							&adapter->rx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+#define PAGE_USE_COUNT(S) (((S) >> PAGE_SHIFT) + \
+			(((S) & (PAGE_SIZE - 1)) ? 1 : 0))
+/**
+ * e1000_setup_rctl - configure the receive control registers
+ * @adapter: Board private structure
+ **/
+static void e1000_setup_rctl(struct e1000_adapter *adapter)
+{
+	u32 rctl, rfctl;
+	u32 psrctl = 0;
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+	u32 pages = 0;
+#endif
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM |
+		E1000_RCTL_LBM_NO | E1000_RCTL_RDMTS_HALF |
+		(adapter->hw.mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	/* disable the stripping of CRC because it breaks
+	 * BMC firmware connected over SMBUS
+	if (adapter->hw.mac.type > e1000_82543)
+		rctl |= E1000_RCTL_SECRC;
+	*/
+
+	if (e1000_tbi_sbp_enabled_82543(&adapter->hw))
+		rctl |= E1000_RCTL_SBP;
+	else
+		rctl &= ~E1000_RCTL_SBP;
+
+	if (adapter->netdev->mtu <= ETH_DATA_LEN)
+		rctl &= ~E1000_RCTL_LPE;
+	else
+		rctl |= E1000_RCTL_LPE;
+
+	/* Setup buffer sizes */
+	rctl &= ~E1000_RCTL_SZ_4096;
+	rctl |= E1000_RCTL_BSEX;
+	switch (adapter->rx_buffer_len) {
+		case E1000_RXBUFFER_256:
+			rctl |= E1000_RCTL_SZ_256;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_512:
+			rctl |= E1000_RCTL_SZ_512;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_1024:
+			rctl |= E1000_RCTL_SZ_1024;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_2048:
+		default:
+			rctl |= E1000_RCTL_SZ_2048;
+			rctl &= ~E1000_RCTL_BSEX;
+			break;
+		case E1000_RXBUFFER_4096:
+			rctl |= E1000_RCTL_SZ_4096;
+			break;
+		case E1000_RXBUFFER_8192:
+			rctl |= E1000_RCTL_SZ_8192;
+			break;
+		case E1000_RXBUFFER_16384:
+			rctl |= E1000_RCTL_SZ_16384;
+			break;
+	}
+
+#ifndef CONFIG_E1000_DISABLE_PACKET_SPLIT
+	/* 82571 and greater support packet-split where the protocol
+	 * header is placed in skb->data and the packet data is
+	 * placed in pages hanging off of skb_shinfo(skb)->nr_frags.
+	 * In the case of a non-split, skb->data is linearly filled,
+	 * followed by the page buffers.  Therefore, skb->data is
+	 * sized to hold the largest protocol header.
+	 */
+	/* allocations using alloc_page take too long for regular MTU
+	 * so only enable packet split for jumbo frames */
+	pages = PAGE_USE_COUNT(adapter->netdev->mtu);
+	if ((adapter->hw.mac.type >= e1000_82571) && (pages <= 3) &&
+	    PAGE_SIZE <= 16384 && (rctl & E1000_RCTL_LPE))
+		adapter->rx_ps_pages = pages;
+	else
+		adapter->rx_ps_pages = 0;
+#endif
+
+	if (adapter->rx_ps_pages) {
+		/* Configure extra packet-split registers */
+		rfctl = E1000_READ_REG(&adapter->hw, E1000_RFCTL);
+		rfctl |= E1000_RFCTL_EXTEN;
+		/* disable packet split support for IPv6 extension headers,
+		 * because some malformed IPv6 headers can hang the RX */
+		rfctl |= (E1000_RFCTL_IPV6_EX_DIS |
+			  E1000_RFCTL_NEW_IPV6_EXT_DIS);
+
+		E1000_WRITE_REG(&adapter->hw, E1000_RFCTL, rfctl);
+
+		/* disable the stripping of CRC because it breaks
+		 * BMC firmware connected over SMBUS */
+		rctl |= E1000_RCTL_DTYP_PS /* | E1000_RCTL_SECRC */;
+
+		psrctl |= adapter->rx_ps_bsize0 >>
+			E1000_PSRCTL_BSIZE0_SHIFT;
+
+		switch (adapter->rx_ps_pages) {
+		case 3:
+			psrctl |= PAGE_SIZE <<
+				E1000_PSRCTL_BSIZE3_SHIFT;
+			fallthrough;
+		case 2:
+			psrctl |= PAGE_SIZE <<
+				E1000_PSRCTL_BSIZE2_SHIFT;
+			fallthrough;
+		case 1:
+			psrctl |= PAGE_SIZE >>
+				E1000_PSRCTL_BSIZE1_SHIFT;
+			break;
+		}
+
+		E1000_WRITE_REG(&adapter->hw, E1000_PSRCTL, psrctl);
+	}
+
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+	adapter->flags &= ~E1000_FLAG_RX_RESTART_NOW;
+}
+
+/**
+ * e1000_configure_rx - Configure 8254x Receive Unit after Reset
+ * @adapter: board private structure
+ *
+ * Configure the Rx unit of the MAC after a reset.
+ **/
+static void e1000_configure_rx(struct e1000_adapter *adapter)
+{
+	u64 rdba;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rdlen, rctl, rxcsum, ctrl_ext;
+	int i;
+
+	if (adapter->rx_ps_pages) {
+		/* this is a 32 byte descriptor */
+		rdlen = adapter->rx_ring[0].count *
+			sizeof(union e1000_rx_desc_packet_split);
+		adapter->clean_rx = e1000_clean_rx_irq_ps;
+		adapter->alloc_rx_buf = e1000_alloc_rx_buffers_ps;
+#ifdef CONFIG_E1000_NAPI
+	} else if (adapter->netdev->mtu > MAXIMUM_ETHERNET_VLAN_SIZE) {
+		rdlen = adapter->rx_ring[0].count *
+			sizeof(struct e1000_rx_desc);
+		adapter->clean_rx = e1000_clean_jumbo_rx_irq;
+		adapter->alloc_rx_buf = e1000_alloc_jumbo_rx_buffers;
+#endif
+	} else {
+		rdlen = adapter->rx_ring[0].count *
+			sizeof(struct e1000_rx_desc);
+		adapter->clean_rx = e1000_clean_rx_irq;
+		adapter->alloc_rx_buf = e1000_alloc_rx_buffers;
+	}
+
+	/* disable receives while setting up the descriptors */
+	rctl = E1000_READ_REG(hw, E1000_RCTL);
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	E1000_WRITE_FLUSH(hw);
+	mdelay(10);
+
+	/* set the Receive Delay Timer Register */
+	E1000_WRITE_REG(hw, E1000_RDTR, adapter->rx_int_delay);
+
+	if (adapter->flags & E1000_FLAG_HAS_INTR_MODERATION) {
+		E1000_WRITE_REG(hw, E1000_RADV, adapter->rx_abs_int_delay);
+		if (adapter->itr_setting != 0)
+			E1000_WRITE_REG(hw, E1000_ITR,
+				1000000000 / (adapter->itr * 256));
+	}
+
+	if (hw->mac.type >= e1000_82571) {
+		ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+		/* Reset delay timers after every interrupt */
+		ctrl_ext |= E1000_CTRL_EXT_INT_TIMER_CLR;
+#ifdef CONFIG_E1000_NAPI
+		/* Auto-Mask interrupts upon ICR access */
+		ctrl_ext |= E1000_CTRL_EXT_IAME;
+		E1000_WRITE_REG(hw, E1000_IAM, 0xffffffff);
+#endif
+		E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		rdba = adapter->rx_ring[i].dma;
+		E1000_WRITE_REG(hw, E1000_RDBAL(i), (rdba & 0x00000000ffffffffULL));
+		E1000_WRITE_REG(hw, E1000_RDBAH(i), (rdba >> 32));
+		E1000_WRITE_REG(hw, E1000_RDLEN(i), rdlen);
+		E1000_WRITE_REG(hw, E1000_RDH(i), 0);
+		E1000_WRITE_REG(hw, E1000_RDT(i), 0);
+		adapter->rx_ring[i].rdh = E1000_REGISTER(hw, E1000_RDH(i));
+		adapter->rx_ring[i].rdt = E1000_REGISTER(hw, E1000_RDT(i));
+	}
+
+#ifdef CONFIG_E1000_MQ
+	if (adapter->num_rx_queues > 1) {
+		u32 random[10];
+		u32 reta, mrqc;
+		int i;
+
+		get_random_bytes(&random[0], 40);
+
+		switch (adapter->num_rx_queues) {
+		default:
+			reta = 0x00800080;
+			mrqc = E1000_MRQC_ENABLE_RSS_2Q;
+			break;
+		}
+
+		/* Fill out redirection table */
+		for (i = 0; i < 32; i++)
+			E1000_WRITE_REG_ARRAY(hw, E1000_RETA, i, reta);
+		/* Fill out hash function seeds */
+		for (i = 0; i < 10; i++)
+			E1000_WRITE_REG_ARRAY(hw, E1000_RSSRK, i, random[i]);
+
+		mrqc |= (E1000_MRQC_RSS_FIELD_IPV4 |
+			 E1000_MRQC_RSS_FIELD_IPV4_TCP);
+
+		E1000_WRITE_REG(hw, E1000_MRQC, mrqc);
+
+		/* Multiqueue and packet checksumming are mutually exclusive. */
+		rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+		rxcsum |= E1000_RXCSUM_PCSD;
+		E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+	} else if (hw->mac.type >= e1000_82543) {
+#else
+	if (hw->mac.type >= e1000_82543) {
+#endif /* CONFIG_E1000_MQ */
+		/* Enable 82543 Receive Checksum Offload for TCP and UDP */
+		rxcsum = E1000_READ_REG(hw, E1000_RXCSUM);
+		if (adapter->rx_csum == TRUE) {
+			rxcsum |= E1000_RXCSUM_TUOFL;
+
+			/* Enable 82571 IPv4 payload checksum for UDP fragments
+			 * Must be used in conjunction with packet-split. */
+			if ((hw->mac.type >= e1000_82571) &&
+			    (adapter->rx_ps_pages)) {
+				rxcsum |= E1000_RXCSUM_IPPCSE;
+			}
+		} else {
+			rxcsum &= ~E1000_RXCSUM_TUOFL;
+			/* don't need to clear IPPCSE as it defaults to 0 */
+		}
+		E1000_WRITE_REG(hw, E1000_RXCSUM, rxcsum);
+	}
+
+	/* Enable early receives on supported devices, only takes effect when
+	 * packet size is equal or larger than the specified value (in 8 byte
+	 * units), e.g. using jumbo frames when setting to E1000_ERT_2048 */
+	if ((hw->mac.type == e1000_82573 || hw->mac.type == e1000_ich9lan) &&
+	    (adapter->netdev->mtu > ETH_DATA_LEN))
+		E1000_WRITE_REG(hw, E1000_ERT, E1000_ERT_2048);
+
+	/* Enable Receives */
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+/**
+ * e1000_free_tx_resources - Free Tx Resources per Queue
+ * @adapter: board private structure
+ * @tx_ring: Tx descriptor ring for a specific queue
+ *
+ * Free all transmit software resources
+ **/
+static void e1000_free_tx_resources(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_tx_ring(adapter, tx_ring);
+
+	vfree(tx_ring->buffer_info);
+	tx_ring->buffer_info = NULL;
+
+	dma_free_coherent(&pdev->dev, tx_ring->size, tx_ring->desc,
+			  tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_tx_resources - Free Tx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all transmit software resources
+ **/
+void e1000_free_all_tx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_free_tx_resources(adapter, &adapter->tx_ring[i]);
+}
+
+static void e1000_unmap_and_free_tx_resource(struct e1000_adapter *adapter,
+					     struct e1000_buffer *buffer_info)
+{
+	if (buffer_info->dma) {
+		dma_unmap_page(&adapter->pdev->dev,
+			       buffer_info->dma,
+			       buffer_info->length,
+			       DMA_TO_DEVICE);
+		buffer_info->dma = 0;
+	}
+	if (buffer_info->skb) {
+		kfree_rtskb(buffer_info->skb);
+		buffer_info->skb = NULL;
+	}
+	/* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ * e1000_clean_tx_ring - Free Tx Buffers
+ * @adapter: board private structure
+ * @tx_ring: ring to be cleaned
+ **/
+static void e1000_clean_tx_ring(struct e1000_adapter *adapter,
+				struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_buffer *buffer_info;
+	unsigned long size;
+	unsigned int i;
+
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->buffer_info[i];
+		e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+	}
+
+	size = sizeof(struct e1000_buffer) * tx_ring->count;
+	memset(tx_ring->buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+	tx_ring->last_tx_tso = 0;
+
+	writel(0, adapter->hw.hw_addr + tx_ring->tdh);
+	writel(0, adapter->hw.hw_addr + tx_ring->tdt);
+}
+
+/**
+ * e1000_clean_all_tx_rings - Free Tx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void e1000_clean_all_tx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		e1000_clean_tx_ring(adapter, &adapter->tx_ring[i]);
+}
+
+/**
+ * e1000_free_rx_resources - Free Rx Resources
+ * @adapter: board private structure
+ * @rx_ring: ring to clean the resources from
+ *
+ * Free all receive software resources
+ **/
+static void e1000_free_rx_resources(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring)
+{
+	struct pci_dev *pdev = adapter->pdev;
+
+	e1000_clean_rx_ring(adapter, rx_ring);
+
+	vfree(rx_ring->buffer_info);
+	rx_ring->buffer_info = NULL;
+	kfree(rx_ring->ps_page);
+	rx_ring->ps_page = NULL;
+	kfree(rx_ring->ps_page_dma);
+	rx_ring->ps_page_dma = NULL;
+
+	dma_free_coherent(&pdev->dev, rx_ring->size, rx_ring->desc,
+			  rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ * e1000_free_all_rx_resources - Free Rx Resources for All Queues
+ * @adapter: board private structure
+ *
+ * Free all receive software resources
+ **/
+void e1000_free_all_rx_resources(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_free_rx_resources(adapter, &adapter->rx_ring[i]);
+}
+
+/**
+ * e1000_clean_rx_ring - Free Rx Buffers per Queue
+ * @adapter: board private structure
+ * @rx_ring: ring to free buffers from
+ **/
+static void e1000_clean_rx_ring(struct e1000_adapter *adapter,
+				struct e1000_rx_ring *rx_ring)
+{
+	struct e1000_rx_buffer *buffer_info;
+	struct e1000_ps_page *ps_page;
+	struct e1000_ps_page_dma *ps_page_dma;
+	struct pci_dev *pdev = adapter->pdev;
+	unsigned long size;
+	unsigned int i, j;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		buffer_info = &rx_ring->buffer_info[i];
+		if (buffer_info->dma &&
+		    adapter->clean_rx == e1000_clean_rx_irq) {
+			dma_unmap_single(&pdev->dev, buffer_info->dma,
+					 adapter->rx_buffer_len,
+					 DMA_FROM_DEVICE);
+#ifdef CONFIG_E1000_NAPI
+		} else if (buffer_info->dma &&
+			   adapter->clean_rx == e1000_clean_jumbo_rx_irq) {
+			dma_unmap_page(&pdev->dev, buffer_info->dma, PAGE_SIZE,
+				       DMA_FROM_DEVICE);
+#endif
+		} else if (buffer_info->dma &&
+			   adapter->clean_rx == e1000_clean_rx_irq_ps) {
+			dma_unmap_single(&pdev->dev, buffer_info->dma,
+					 adapter->rx_ps_bsize0,
+					 DMA_FROM_DEVICE);
+		}
+		buffer_info->dma = 0;
+		if (buffer_info->page) {
+			put_page(buffer_info->page);
+			buffer_info->page = NULL;
+		}
+		if (buffer_info->skb) {
+			kfree_rtskb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+		ps_page = &rx_ring->ps_page[i];
+		ps_page_dma = &rx_ring->ps_page_dma[i];
+		for (j = 0; j < adapter->rx_ps_pages; j++) {
+			if (!ps_page->ps_page[j]) break;
+			dma_unmap_page(&pdev->dev,
+				       ps_page_dma->ps_page_dma[j],
+				       PAGE_SIZE, DMA_FROM_DEVICE);
+			ps_page_dma->ps_page_dma[j] = 0;
+			put_page(ps_page->ps_page[j]);
+			ps_page->ps_page[j] = NULL;
+		}
+	}
+
+#ifdef CONFIG_E1000_NAPI
+	/* there also may be some cached data from a chained receive */
+	if (rx_ring->rx_skb_top) {
+		kfree_rtskb(rx_ring->rx_skb_top);
+		rx_ring->rx_skb_top = NULL;
+	}
+#endif
+
+	size = sizeof(struct e1000_rx_buffer) * rx_ring->count;
+	memset(rx_ring->buffer_info, 0, size);
+	size = sizeof(struct e1000_ps_page) * rx_ring->count;
+	memset(rx_ring->ps_page, 0, size);
+	size = sizeof(struct e1000_ps_page_dma) * rx_ring->count;
+	memset(rx_ring->ps_page_dma, 0, size);
+
+	/* Zero out the descriptor ring */
+
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	writel(0, adapter->hw.hw_addr + rx_ring->rdh);
+	writel(0, adapter->hw.hw_addr + rx_ring->rdt);
+}
+
+/**
+ * e1000_clean_all_rx_rings - Free Rx Buffers for all queues
+ * @adapter: board private structure
+ **/
+static void e1000_clean_all_rx_rings(struct e1000_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		e1000_clean_rx_ring(adapter, &adapter->rx_ring[i]);
+}
+
+/* The 82542 2.0 (revision 2) needs to have the receive unit in reset
+ * and memory write and invalidate disabled for certain operations
+ */
+#if 0
+static void e1000_enter_82542_rst(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl;
+
+	if (adapter->hw.mac.type != e1000_82542)
+		return;
+	if (adapter->hw.revision_id != E1000_REVISION_2)
+		return;
+
+	e1000_pci_clear_mwi(&adapter->hw);
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	rctl |= E1000_RCTL_RST;
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	mdelay(5);
+
+	if (rtnetif_running(netdev))
+		e1000_clean_all_rx_rings(adapter);
+}
+
+static void e1000_leave_82542_rst(struct e1000_adapter *adapter)
+{
+	struct net_device *netdev = adapter->netdev;
+	u32 rctl;
+
+	if (adapter->hw.mac.type != e1000_82542)
+		return;
+	if (adapter->hw.revision_id != E1000_REVISION_2)
+		return;
+
+	rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+	rctl &= ~E1000_RCTL_RST;
+	E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+	E1000_WRITE_FLUSH(&adapter->hw);
+	mdelay(5);
+
+	if (adapter->hw.bus.pci_cmd_word & PCI_COMMAND_INVALIDATE)
+		e1000_pci_set_mwi(&adapter->hw);
+
+	if (rtnetif_running(netdev)) {
+		/* No need to loop, because 82542 supports only 1 queue */
+		struct e1000_rx_ring *ring = &adapter->rx_ring[0];
+		e1000_configure_rx(adapter);
+		adapter->alloc_rx_buf(adapter, ring, E1000_DESC_UNUSED(ring));
+	}
+}
+
+/**
+ * e1000_set_mac - Change the Ethernet Address of the NIC
+ * @netdev: network interface device structure
+ * @p: pointer to an address structure
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_set_mac(struct net_device *netdev, void *p)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct sockaddr *addr = p;
+
+	if (!is_valid_ether_addr(addr->sa_data))
+		return -EADDRNOTAVAIL;
+
+	/* 82542 2.0 needs to be in reset to write receive address registers */
+
+	if (adapter->hw.mac.type == e1000_82542)
+		e1000_enter_82542_rst(adapter);
+
+	memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
+	memcpy(adapter->hw.mac.addr, addr->sa_data, netdev->addr_len);
+
+	e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+	/* With 82571 controllers, LAA may be overwritten (with the default)
+	 * due to controller reset from the other port. */
+	if (adapter->hw.mac.type == e1000_82571) {
+		/* activate the work around */
+		e1000_set_laa_state_82571(&adapter->hw, TRUE);
+
+		/* Hold a copy of the LAA in RAR[14] This is done so that
+		 * between the time RAR[0] gets clobbered  and the time it
+		 * gets fixed (in e1000_watchdog), the actual LAA is in one
+		 * of the RARs and no incoming packets directed to this port
+		 * are dropped. Eventually the LAA will be in RAR[0] and
+		 * RAR[14] */
+		e1000_rar_set(&adapter->hw,
+			      adapter->hw.mac.addr,
+			      adapter->hw.mac.rar_entry_count - 1);
+	}
+
+	if (adapter->hw.mac.type == e1000_82542)
+		e1000_leave_82542_rst(adapter);
+
+	return 0;
+}
+#endif
+
+/**
+ * e1000_set_multi - Multicast and Promiscuous mode set
+ * @netdev: network interface device structure
+ *
+ * The set_multi entry point is called whenever the multicast address
+ * list or the network interface flags are updated.  This routine is
+ * responsible for configuring the hardware for proper multicast,
+ * promiscuous mode, and all-multi behavior.
+ **/
+static void e1000_set_multi(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	/* Check for Promiscuous and All Multicast modes */
+
+	rctl = E1000_READ_REG(hw, E1000_RCTL);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+	} else if (netdev->flags & IFF_ALLMULTI) {
+		rctl |= E1000_RCTL_MPE;
+		rctl &= ~E1000_RCTL_UPE;
+	} else {
+		rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE);
+	}
+
+	E1000_WRITE_REG(hw, E1000_RCTL, rctl);
+}
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy */
+static void e1000_update_phy_info_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     phy_info_task.work);
+	e1000_get_phy_info(&adapter->hw);
+}
+
+/**
+ * e1000_82547_tx_fifo_stall_task - task to complete work
+ * @work: work struct contained inside adapter struct
+ **/
+static void e1000_82547_tx_fifo_stall_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     fifo_stall_task.work);
+	struct net_device *netdev = adapter->netdev;
+	u32 tctl;
+
+	if (atomic_read(&adapter->tx_fifo_stall)) {
+		if ((E1000_READ_REG(&adapter->hw, E1000_TDT(0)) ==
+		    E1000_READ_REG(&adapter->hw, E1000_TDH(0))) &&
+		   (E1000_READ_REG(&adapter->hw, E1000_TDFT) ==
+		    E1000_READ_REG(&adapter->hw, E1000_TDFH)) &&
+		   (E1000_READ_REG(&adapter->hw, E1000_TDFTS) ==
+		    E1000_READ_REG(&adapter->hw, E1000_TDFHS))) {
+			tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+			E1000_WRITE_REG(&adapter->hw, E1000_TCTL,
+					tctl & ~E1000_TCTL_EN);
+			E1000_WRITE_REG(&adapter->hw, E1000_TDFT,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, E1000_TDFH,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, E1000_TDFTS,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, E1000_TDFHS,
+					adapter->tx_head_addr);
+			E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
+			E1000_WRITE_FLUSH(&adapter->hw);
+
+			adapter->tx_fifo_head = 0;
+			atomic_set(&adapter->tx_fifo_stall, 0);
+			rtnetif_wake_queue(netdev);
+		} else if (!test_bit(__E1000_DOWN, &adapter->state))
+			schedule_delayed_work(&adapter->fifo_stall_task, 1);
+	}
+}
+
+static bool e1000_has_link(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	bool link_active = FALSE;
+	s32 ret_val = 0;
+
+	/* get_link_status is set on LSC (link status) interrupt or
+	 * rx sequence error interrupt.  get_link_status will stay
+	 * false until the e1000_check_for_link establishes link
+	 * for copper adapters ONLY
+	 */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		if (hw->mac.get_link_status) {
+			ret_val = e1000_check_for_link(hw);
+			link_active = !hw->mac.get_link_status;
+		} else {
+			link_active = TRUE;
+		}
+		break;
+	case e1000_media_type_fiber:
+		ret_val = e1000_check_for_link(hw);
+		link_active = !!(E1000_READ_REG(hw, E1000_STATUS) &
+				 E1000_STATUS_LU);
+		break;
+	case e1000_media_type_internal_serdes:
+		ret_val = e1000_check_for_link(hw);
+		link_active = adapter->hw.mac.serdes_has_link;
+		break;
+	default:
+	case e1000_media_type_unknown:
+		break;
+	}
+
+	if ((ret_val == E1000_ERR_PHY) && (hw->phy.type == e1000_phy_igp_3) &&
+	    (E1000_READ_REG(&adapter->hw, E1000_CTRL) & E1000_PHY_CTRL_GBE_DISABLE)) {
+		/* See e1000_kmrn_lock_loss_workaround_ich8lan() */
+		DPRINTK(LINK, INFO,
+			"Gigabit has been disabled, downgrading speed\n");
+	}
+
+	return link_active;
+}
+
+static void e1000_enable_receives(struct e1000_adapter *adapter)
+{
+	/* make sure the receive unit is started */
+	if ((adapter->flags & E1000_FLAG_RX_NEEDS_RESTART) &&
+	    (adapter->flags & E1000_FLAG_RX_RESTART_NOW)) {
+		struct e1000_hw *hw = &adapter->hw;
+		u32 rctl = E1000_READ_REG(hw, E1000_RCTL);
+		E1000_WRITE_REG(hw, E1000_RCTL, rctl | E1000_RCTL_EN);
+		adapter->flags &= ~E1000_FLAG_RX_RESTART_NOW;
+	}
+}
+
+static void e1000_watchdog_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter = container_of(work,
+						     struct e1000_adapter,
+						     watchdog_task.work);
+
+	struct net_device *netdev = adapter->netdev;
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_tx_ring *tx_ring;
+	u32 link, tctl;
+	int i, tx_pending = 0;
+
+	link = e1000_has_link(adapter);
+	if ((rtnetif_carrier_ok(netdev)) && link) {
+		e1000_enable_receives(adapter);
+		goto link_up;
+	}
+
+	if (mac->type == e1000_82573) {
+		e1000_enable_tx_pkt_filtering(&adapter->hw);
+#ifdef NETIF_F_HW_VLAN_TX
+		if (adapter->mng_vlan_id != adapter->hw.mng_cookie.vlan_id)
+			e1000_update_mng_vlan(adapter);
+#endif
+	}
+
+	if (link) {
+		if (!rtnetif_carrier_ok(netdev)) {
+			u32 ctrl;
+			bool txb2b = 1;
+#ifdef SIOCGMIIPHY
+			/* update snapshot of PHY registers on LSC */
+			e1000_phy_read_status(adapter);
+#endif
+			e1000_get_speed_and_duplex(&adapter->hw,
+						   &adapter->link_speed,
+						   &adapter->link_duplex);
+
+			ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+			DPRINTK(LINK, INFO, "NIC Link is Up %d Mbps %s, "
+				"Flow Control: %s\n",
+				adapter->link_speed,
+				adapter->link_duplex == FULL_DUPLEX ?
+				"Full Duplex" : "Half Duplex",
+				((ctrl & E1000_CTRL_TFCE) && (ctrl &
+				E1000_CTRL_RFCE)) ? "RX/TX" : ((ctrl &
+				E1000_CTRL_RFCE) ? "RX" : ((ctrl &
+				E1000_CTRL_TFCE) ? "TX" : "None" )));
+
+			/* tweak tx_queue_len according to speed/duplex
+			 * and adjust the timeout factor */
+			//netdev->tx_queue_len = adapter->tx_queue_len;
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				txb2b = 0;
+				//netdev->tx_queue_len = 10;
+				adapter->tx_timeout_factor = 16;
+				break;
+			case SPEED_100:
+				txb2b = 0;
+				//netdev->tx_queue_len = 100;
+				/* maybe add some timeout factor ? */
+				break;
+			}
+
+			if ((mac->type == e1000_82571 ||
+			     mac->type == e1000_82572) &&
+			    txb2b == 0) {
+				u32 tarc0;
+				tarc0 = E1000_READ_REG(&adapter->hw, E1000_TARC(0));
+				tarc0 &= ~SPEED_MODE_BIT;
+				E1000_WRITE_REG(&adapter->hw, E1000_TARC(0), tarc0);
+			}
+
+#ifdef NETIF_F_TSO
+			/* disable TSO for pcie and 10/100 speeds, to avoid
+			 * some hardware issues */
+			if (!(adapter->flags & E1000_FLAG_TSO_FORCE) &&
+			    adapter->hw.bus.type == e1000_bus_type_pci_express){
+				switch (adapter->link_speed) {
+				case SPEED_10:
+				case SPEED_100:
+					DPRINTK(PROBE,INFO,
+					"10/100 speed: disabling TSO\n");
+					netdev->features &= ~NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+					netdev->features &= ~NETIF_F_TSO6;
+#endif
+					break;
+				case SPEED_1000:
+					netdev->features |= NETIF_F_TSO;
+#ifdef NETIF_F_TSO6
+					netdev->features |= NETIF_F_TSO6;
+#endif
+					break;
+				default:
+					/* oops */
+					break;
+				}
+			}
+#endif
+
+			/* enable transmits in the hardware, need to do this
+			 * after setting TARC0 */
+			tctl = E1000_READ_REG(&adapter->hw, E1000_TCTL);
+			tctl |= E1000_TCTL_EN;
+			E1000_WRITE_REG(&adapter->hw, E1000_TCTL, tctl);
+
+			rtnetif_carrier_on(netdev);
+			rtnetif_wake_queue(netdev);
+#ifdef CONFIG_E1000_MQ
+			if (netif_is_multiqueue(netdev))
+				for (i = 0; i < adapter->num_tx_queues; i++)
+					netif_wake_subqueue(netdev, i);
+#endif
+
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				schedule_delayed_work(&adapter->phy_info_task,
+						      2 * HZ);
+			adapter->smartspeed = 0;
+		}
+	} else {
+		if (rtnetif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+			DPRINTK(LINK, INFO, "NIC Link is Down\n");
+			rtnetif_carrier_off(netdev);
+			rtnetif_stop_queue(netdev);
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				schedule_delayed_work(&adapter->phy_info_task,
+						      2 * HZ);
+
+			/* 80003ES2LAN workaround--
+			 * For packet buffer work-around on link down event;
+			 * disable receives in the ISR and
+			 * reset device here in the watchdog
+			 */
+			if (adapter->flags & E1000_FLAG_RX_NEEDS_RESTART)
+				/* reset device */
+				schedule_work(&adapter->reset_task);
+		}
+
+		e1000_smartspeed(adapter);
+	}
+
+link_up:
+	e1000_update_stats(adapter);
+
+	mac->tx_packet_delta = adapter->stats.tpt - adapter->tpt_old;
+	adapter->tpt_old = adapter->stats.tpt;
+	mac->collision_delta = adapter->stats.colc - adapter->colc_old;
+	adapter->colc_old = adapter->stats.colc;
+
+	adapter->gorc = adapter->stats.gorc - adapter->gorc_old;
+	adapter->gorc_old = adapter->stats.gorc;
+	adapter->gotc = adapter->stats.gotc - adapter->gotc_old;
+	adapter->gotc_old = adapter->stats.gotc;
+
+	e1000_update_adaptive(&adapter->hw);
+
+	if (!rtnetif_carrier_ok(netdev)) {
+		for (i = 0 ; i < adapter->num_tx_queues ; i++) {
+			tx_ring = &adapter->tx_ring[i];
+			tx_pending |= (E1000_DESC_UNUSED(tx_ring) + 1 <
+							       tx_ring->count);
+		}
+		if (tx_pending) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context). */
+			adapter->tx_timeout_count++;
+			schedule_work(&adapter->reset_task);
+		}
+	}
+
+	/* Cause software interrupt to ensure rx ring is cleaned */
+	E1000_WRITE_REG(&adapter->hw, E1000_ICS, E1000_ICS_RXDMT0);
+
+	/* Force detection of hung controller every watchdog period */
+	adapter->detect_tx_hung = TRUE;
+
+	/* With 82571 controllers, LAA may be overwritten due to controller
+	 * reset from the other port. Set the appropriate LAA in RAR[0] */
+	if (e1000_get_laa_state_82571(&adapter->hw) == TRUE)
+		e1000_rar_set(&adapter->hw, adapter->hw.mac.addr, 0);
+
+	/* Reschedule the task */
+	if (!test_bit(__E1000_DOWN, &adapter->state))
+		schedule_delayed_work(&adapter->watchdog_task, 2 * HZ);
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ * e1000_update_itr - update the dynamic ITR value based on statistics
+ * @adapter: pointer to adapter
+ * @itr_setting: current adapter->itr
+ * @packets: the number of packets during this measurement interval
+ * @bytes: the number of bytes during this measurement interval
+ *
+ *      Stores a new ITR value based on packets and byte
+ *      counts during the last interrupt.  The advantage of per interrupt
+ *      computation is faster updates and more accurate ITR for the current
+ *      traffic pattern.  Constants in this function were computed
+ *      based on theoretical maximum wire speed and thresholds were set based
+ *      on testing data as well as attempting to minimize response time
+ *      while increasing bulk throughput.
+ *      this functionality is controlled by the InterruptThrottleRate module
+ *      parameter (see e1000_param.c)
+ **/
+#if 0
+static unsigned int e1000_update_itr(struct e1000_adapter *adapter,
+				     u16 itr_setting, int packets,
+				     int bytes)
+{
+	unsigned int retval = itr_setting;
+
+	if (unlikely(!(adapter->flags & E1000_FLAG_HAS_INTR_MODERATION)))
+		goto update_itr_done;
+
+	if (packets == 0)
+		goto update_itr_done;
+
+	switch (itr_setting) {
+	case lowest_latency:
+		/* handle TSO and jumbo frames */
+		if (bytes/packets > 8000)
+			retval = bulk_latency;
+		else if ((packets < 5) && (bytes > 512)) {
+			retval = low_latency;
+		}
+		break;
+	case low_latency:  /* 50 usec aka 20000 ints/s */
+		if (bytes > 10000) {
+			/* this if handles the TSO accounting */
+			if (bytes/packets > 8000) {
+				retval = bulk_latency;
+			} else if ((packets < 10) || ((bytes/packets) > 1200)) {
+				retval = bulk_latency;
+			} else if ((packets > 35)) {
+				retval = lowest_latency;
+			}
+		} else if (bytes/packets > 2000) {
+			retval = bulk_latency;
+		} else if (packets <= 2 && bytes < 512) {
+			retval = lowest_latency;
+		}
+		break;
+	case bulk_latency: /* 250 usec aka 4000 ints/s */
+		if (bytes > 25000) {
+			if (packets > 35) {
+				retval = low_latency;
+			}
+		} else if (bytes < 6000) {
+			retval = low_latency;
+		}
+		break;
+	}
+
+update_itr_done:
+	return retval;
+}
+#endif
+
+static void e1000_set_itr(struct e1000_adapter *adapter)
+{
+}
+
+#define E1000_TX_FLAGS_CSUM		0x00000001
+#define E1000_TX_FLAGS_VLAN		0x00000002
+#define E1000_TX_FLAGS_TSO		0x00000004
+#define E1000_TX_FLAGS_IPV4		0x00000008
+#define E1000_TX_FLAGS_VLAN_MASK	0xffff0000
+#define E1000_TX_FLAGS_VLAN_SHIFT	16
+
+static int e1000_tso(struct e1000_adapter *adapter,
+		     struct e1000_tx_ring *tx_ring, struct sk_buff *skb)
+{
+#ifdef NETIF_F_TSO
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	u32 cmd_length = 0;
+	u16 ipcse = 0, tucse, mss;
+	u8 ipcss, ipcso, tucss, tucso, hdr_len;
+	int err;
+
+	if (skb_is_gso(skb)) {
+		if (skb_header_cloned(skb)) {
+			err = pskb_expand_head(skb, 0, 0, GFP_ATOMIC);
+			if (err)
+				return err;
+		}
+
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		mss = skb_shinfo(skb)->gso_size;
+		if (skb->protocol == htons(ETH_P_IP)) {
+			struct iphdr *iph = ip_hdr(skb);
+			iph->tot_len = 0;
+			iph->check = 0;
+			tcp_hdr(skb)->check = ~csum_tcpudp_magic(iph->saddr,
+								 iph->daddr, 0,
+								 IPPROTO_TCP,
+								 0);
+			cmd_length = E1000_TXD_CMD_IP;
+			ipcse = skb_transport_offset(skb) - 1;
+#ifdef NETIF_F_TSO6
+		} else if (skb_shinfo(skb)->gso_type == SKB_GSO_TCPV6) {
+			ipv6_hdr(skb)->payload_len = 0;
+			tcp_hdr(skb)->check =
+				~csum_ipv6_magic(&ipv6_hdr(skb)->saddr,
+						 &ipv6_hdr(skb)->daddr,
+						 0, IPPROTO_TCP, 0);
+			ipcse = 0;
+#endif
+		}
+		ipcss = skb_network_offset(skb);
+		ipcso = (void *)&(ip_hdr(skb)->check) - (void *)skb->data;
+		tucss = skb_transport_offset(skb);
+		tucso = (void *)&(tcp_hdr(skb)->check) - (void *)skb->data;
+		tucse = 0;
+
+		cmd_length |= (E1000_TXD_CMD_DEXT | E1000_TXD_CMD_TSE |
+			       E1000_TXD_CMD_TCP | (skb->len - (hdr_len)));
+
+		i = tx_ring->next_to_use;
+		context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+		buffer_info = &tx_ring->buffer_info[i];
+
+		context_desc->lower_setup.ip_fields.ipcss  = ipcss;
+		context_desc->lower_setup.ip_fields.ipcso  = ipcso;
+		context_desc->lower_setup.ip_fields.ipcse  = cpu_to_le16(ipcse);
+		context_desc->upper_setup.tcp_fields.tucss = tucss;
+		context_desc->upper_setup.tcp_fields.tucso = tucso;
+		context_desc->upper_setup.tcp_fields.tucse = cpu_to_le16(tucse);
+		context_desc->tcp_seg_setup.fields.mss     = cpu_to_le16(mss);
+		context_desc->tcp_seg_setup.fields.hdr_len = hdr_len;
+		context_desc->cmd_and_length = cpu_to_le32(cmd_length);
+
+		buffer_info->time_stamp = jiffies;
+		buffer_info->next_to_watch = i;
+
+		if (++i == tx_ring->count) i = 0;
+		tx_ring->next_to_use = i;
+
+		return TRUE;
+	}
+#endif
+
+	return FALSE;
+}
+
+static bool e1000_tx_csum(struct e1000_adapter *adapter,
+			       struct e1000_tx_ring *tx_ring,
+			       struct sk_buff *skb)
+{
+	struct e1000_context_desc *context_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i;
+	// u8 css;
+	u32 cmd_len = E1000_TXD_CMD_DEXT;
+
+	if (unlikely(skb->ip_summed != CHECKSUM_PARTIAL))
+		return FALSE;
+
+	switch (skb->protocol) {
+	case __constant_htons(ETH_P_IP):
+		break;
+	default:
+		if (unlikely(net_ratelimit())) {
+			DPRINTK(PROBE, WARNING, "checksum_partial proto=%x!\n",
+				skb->protocol);
+		}
+		break;
+	}
+
+	// css = skb_transport_offset(skb);
+
+	i = tx_ring->next_to_use;
+	buffer_info = &tx_ring->buffer_info[i];
+	context_desc = E1000_CONTEXT_DESC(*tx_ring, i);
+
+	context_desc->lower_setup.ip_config = 0;
+	context_desc->cmd_and_length = cpu_to_le32(cmd_len);
+
+	buffer_info->time_stamp = jiffies;
+	buffer_info->next_to_watch = i;
+
+	if (unlikely(++i == tx_ring->count)) i = 0;
+	tx_ring->next_to_use = i;
+
+	return TRUE;
+}
+
+#define E1000_MAX_TXD_PWR	12
+#define E1000_MAX_DATA_PER_TXD	(1<<E1000_MAX_TXD_PWR)
+
+static int e1000_tx_map(struct e1000_adapter *adapter,
+			struct e1000_tx_ring *tx_ring,
+			struct sk_buff *skb, unsigned int first,
+			unsigned int max_per_txd, unsigned int nr_frags,
+			unsigned int mss)
+{
+	struct e1000_buffer *buffer_info;
+	unsigned int len = skb->len;
+	unsigned int offset = 0, size, count = 0, i;
+#ifdef MAX_SKB_FRAGS
+	unsigned int f;
+	len -= skb->data_len;
+#endif
+
+	i = tx_ring->next_to_use;
+
+	while (len) {
+		buffer_info = &tx_ring->buffer_info[i];
+		size = min(len, max_per_txd);
+#ifdef NETIF_F_TSO
+		/* Workaround for Controller erratum --
+		 * descriptor for non-tso packet in a linear SKB that follows a
+		 * tso gets written back prematurely before the data is fully
+		 * DMA'd to the controller */
+		if (tx_ring->last_tx_tso && !skb_is_gso(skb)) {
+			tx_ring->last_tx_tso = 0;
+			if (!skb->data_len)
+				size -= 4;
+		}
+
+		/* Workaround for premature desc write-backs
+		 * in TSO mode.  Append 4-byte sentinel desc */
+		if (unlikely(mss && !nr_frags && size == len && size > 8))
+			size -= 4;
+#endif
+		/* work-around for errata 10 and it applies
+		 * to all controllers in PCI-X mode
+		 * The fix is to make sure that the first descriptor of a
+		 * packet is smaller than 2048 - 16 - 16 (or 2016) bytes
+		 */
+		if (unlikely((adapter->hw.bus.type == e1000_bus_type_pcix) &&
+				(size > 2015) && count == 0))
+			size = 2015;
+
+		/* Workaround for potential 82544 hang in PCI-X.  Avoid
+		 * terminating buffers within evenly-aligned dwords. */
+		if (unlikely(adapter->pcix_82544 &&
+		   !((unsigned long)(skb->data + offset + size - 1) & 4) &&
+		   size > 4))
+			size -= 4;
+
+		buffer_info->length = size;
+		/* set time_stamp *before* dma to help avoid a possible race */
+		buffer_info->time_stamp = jiffies;
+		buffer_info->dma =
+			dma_map_single(&adapter->pdev->dev,
+				skb->data + offset,
+				size,
+				DMA_TO_DEVICE);
+		buffer_info->next_to_watch = i;
+
+		len -= size;
+		offset += size;
+		count++;
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+#ifdef MAX_SKB_FRAGS
+	for (f = 0; f < nr_frags; f++) {
+		struct skb_frag_struct *frag;
+
+		frag = &skb_shinfo(skb)->frags[f];
+		len = frag->size;
+		offset = frag->page_offset;
+
+		while (len) {
+			buffer_info = &tx_ring->buffer_info[i];
+			size = min(len, max_per_txd);
+#ifdef NETIF_F_TSO
+			/* Workaround for premature desc write-backs
+			 * in TSO mode.  Append 4-byte sentinel desc */
+			if (unlikely(mss && f == (nr_frags-1) && size == len && size > 8))
+				size -= 4;
+#endif
+			/* Workaround for potential 82544 hang in PCI-X.
+			 * Avoid terminating buffers within evenly-aligned
+			 * dwords. */
+			if (unlikely(adapter->pcix_82544 &&
+			   !((unsigned long)(frag->page+offset+size-1) & 4) &&
+			   size > 4))
+				size -= 4;
+
+			buffer_info->length = size;
+			buffer_info->time_stamp = jiffies;
+			buffer_info->dma =
+				dma_map_page(&adapter->pdev->dev,
+					frag->page,
+					offset,
+					size,
+					DMA_TO_DEVICE);
+			buffer_info->next_to_watch = i;
+
+			len -= size;
+			offset += size;
+			count++;
+			if (unlikely(++i == tx_ring->count)) i = 0;
+		}
+	}
+#endif
+
+	i = (i == 0) ? tx_ring->count - 1 : i - 1;
+	tx_ring->buffer_info[i].skb = skb;
+	tx_ring->buffer_info[first].next_to_watch = i;
+
+	return count;
+}
+
+static void e1000_tx_queue(struct e1000_adapter *adapter,
+			   struct e1000_tx_ring *tx_ring,
+			   int tx_flags, int count, nanosecs_abs_t *xmit_stamp)
+{
+	struct e1000_tx_desc *tx_desc = NULL;
+	struct e1000_buffer *buffer_info;
+	u32 txd_upper = 0, txd_lower = E1000_TXD_CMD_IFCS;
+	unsigned int i;
+    rtdm_lockctx_t context;
+
+	if (likely(tx_flags & E1000_TX_FLAGS_TSO)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D |
+			     E1000_TXD_CMD_TSE;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+
+		if (likely(tx_flags & E1000_TX_FLAGS_IPV4))
+			txd_upper |= E1000_TXD_POPTS_IXSM << 8;
+	}
+
+	if (likely(tx_flags & E1000_TX_FLAGS_CSUM)) {
+		txd_lower |= E1000_TXD_CMD_DEXT | E1000_TXD_DTYP_D;
+		txd_upper |= E1000_TXD_POPTS_TXSM << 8;
+	}
+
+	if (unlikely(tx_flags & E1000_TX_FLAGS_VLAN)) {
+		txd_lower |= E1000_TXD_CMD_VLE;
+		txd_upper |= (tx_flags & E1000_TX_FLAGS_VLAN_MASK);
+	}
+
+	i = tx_ring->next_to_use;
+
+	while (count--) {
+		buffer_info = &tx_ring->buffer_info[i];
+		tx_desc = E1000_TX_DESC(*tx_ring, i);
+		tx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+		tx_desc->lower.data =
+			cpu_to_le32(txd_lower | buffer_info->length);
+		tx_desc->upper.data = cpu_to_le32(txd_upper);
+		if (unlikely(++i == tx_ring->count)) i = 0;
+	}
+
+	tx_desc->lower.data |= cpu_to_le32(adapter->txd_cmd);
+
+    rtdm_lock_irqsave(context);
+
+    if (xmit_stamp)
+	*xmit_stamp = cpu_to_be64(rtdm_clock_read() + *xmit_stamp);
+
+	/* Force memory writes to complete before letting h/w
+	 * know there are new descriptors to fetch.  (Only
+	 * applicable for weak-ordered memory model archs,
+	 * such as IA-64). */
+	wmb();
+
+	tx_ring->next_to_use = i;
+	writel(i, adapter->hw.hw_addr + tx_ring->tdt);
+
+    rtdm_lock_irqrestore(context);
+	/* we need this if more than one processor can write to our tail
+	 * at a time, it synchronizes IO on IA64/Altix systems */
+	mmiowb();
+}
+
+#define E1000_FIFO_HDR			0x10
+#define E1000_82547_PAD_LEN		0x3E0
+
+/**
+ * 82547 workaround to avoid controller hang in half-duplex environment.
+ * The workaround is to avoid queuing a large packet that would span
+ * the internal Tx FIFO ring boundary by notifying the stack to resend
+ * the packet at a later time.  This gives the Tx FIFO an opportunity to
+ * flush all packets.  When that occurs, we reset the Tx FIFO pointers
+ * to the beginning of the Tx FIFO.
+ **/
+static int e1000_82547_fifo_workaround(struct e1000_adapter *adapter,
+				       struct sk_buff *skb)
+{
+	u32 fifo_space = adapter->tx_fifo_size - adapter->tx_fifo_head;
+	u32 skb_fifo_len = skb->len + E1000_FIFO_HDR;
+
+	skb_fifo_len = ALIGN(skb_fifo_len, E1000_FIFO_HDR);
+
+	if (adapter->link_duplex != HALF_DUPLEX)
+		goto no_fifo_stall_required;
+
+	if (atomic_read(&adapter->tx_fifo_stall))
+		return 1;
+
+	if (skb_fifo_len >= (E1000_82547_PAD_LEN + fifo_space)) {
+		atomic_set(&adapter->tx_fifo_stall, 1);
+		return 1;
+	}
+
+no_fifo_stall_required:
+	adapter->tx_fifo_head += skb_fifo_len;
+	if (adapter->tx_fifo_head >= adapter->tx_fifo_size)
+		adapter->tx_fifo_head -= adapter->tx_fifo_size;
+	return 0;
+}
+
+#define MINIMUM_DHCP_PACKET_SIZE 282
+static int e1000_transfer_dhcp_info(struct e1000_adapter *adapter,
+				    struct sk_buff *skb)
+{
+	struct e1000_hw *hw =  &adapter->hw;
+	u16 length, offset;
+#ifdef NETIF_F_HW_VLAN_TX
+	if (vlan_tx_tag_present(skb)) {
+		if (!((vlan_tx_tag_get(skb) == adapter->hw.mng_cookie.vlan_id)
+		    && (adapter->hw.mng_cookie.status &
+			E1000_MNG_DHCP_COOKIE_STATUS_VLAN)))
+			return 0;
+	}
+#endif
+	if (skb->len > MINIMUM_DHCP_PACKET_SIZE) {
+		struct ethhdr *eth = (struct ethhdr *) skb->data;
+		if ((htons(ETH_P_IP) == eth->h_proto)) {
+			const struct iphdr *ip =
+				(struct iphdr *)((u8 *)skb->data+14);
+			if (IPPROTO_UDP == ip->protocol) {
+				struct udphdr *udp =
+					(struct udphdr *)((u8 *)ip +
+						(ip->ihl << 2));
+				if (ntohs(udp->dest) == 67) {
+					offset = (u8 *)udp + 8 - skb->data;
+					length = skb->len - offset;
+
+					return e1000_mng_write_dhcp_info(hw,
+							(u8 *)udp + 8,
+							length);
+				}
+			}
+		}
+	}
+	return 0;
+}
+
+static int __e1000_maybe_stop_tx(struct net_device *netdev,
+				 struct e1000_tx_ring *tx_ring, int size)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	rtnetif_stop_queue(netdev);
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it. */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available. */
+	if (likely(E1000_DESC_UNUSED(tx_ring) < size))
+		return -EBUSY;
+
+	/* A reprieve! */
+	rtnetif_start_queue(netdev);
+	++adapter->restart_queue;
+	return 0;
+}
+
+static int e1000_maybe_stop_tx(struct net_device *netdev,
+			       struct e1000_tx_ring *tx_ring, int size)
+{
+	if (likely(E1000_DESC_UNUSED(tx_ring) >= size))
+		return 0;
+	return __e1000_maybe_stop_tx(netdev, tx_ring, size);
+}
+
+#define TXD_USE_COUNT(S, X) (((S) >> (X)) + 1 )
+static int e1000_xmit_frame_ring(struct sk_buff *skb,
+				 struct net_device *netdev,
+				 struct e1000_tx_ring *tx_ring)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	unsigned int first, max_per_txd = E1000_MAX_DATA_PER_TXD;
+	unsigned int max_txd_pwr = E1000_MAX_TXD_PWR;
+	unsigned int tx_flags = 0;
+	unsigned int len = skb->len;
+	unsigned long irq_flags;
+	unsigned int nr_frags = 0;
+	unsigned int mss = 0;
+	int count = 0;
+	int tso;
+#ifdef MAX_SKB_FRAGS
+	unsigned int f;
+	len -= skb->data_len;
+#endif
+
+	if (test_bit(__E1000_DOWN, &adapter->state)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (unlikely(skb->len <= 0)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+
+	/* 82571 and newer doesn't need the workaround that limited descriptor
+	 * length to 4kB */
+	if (adapter->hw.mac.type >= e1000_82571)
+		max_per_txd = 8192;
+
+#ifdef NETIF_F_TSO
+	mss = skb_shinfo(skb)->gso_size;
+	/* The controller does a simple calculation to
+	 * make sure there is enough room in the FIFO before
+	 * initiating the DMA for each buffer.  The calc is:
+	 * 4 = ceil(buffer len/mss).  To make sure we don't
+	 * overrun the FIFO, adjust the max buffer len if mss
+	 * drops. */
+	if (mss) {
+		u8 hdr_len;
+		max_per_txd = min(mss << 2, max_per_txd);
+		max_txd_pwr = fls(max_per_txd) - 1;
+
+		/* TSO Workaround for 82571/2/3 Controllers -- if skb->data
+		* points to just header, pull a few bytes of payload from
+		* frags into skb->data */
+		hdr_len = skb_transport_offset(skb) + tcp_hdrlen(skb);
+		if (skb->data_len && (hdr_len == (skb->len - skb->data_len))) {
+			switch (adapter->hw.mac.type) {
+				unsigned int pull_size;
+			case e1000_82544:
+				/* Make sure we have room to chop off 4 bytes,
+				 * and that the end alignment will work out to
+				 * this hardware's requirements
+				 * NOTE: this is a TSO only workaround
+				 * if end byte alignment not correct move us
+				 * into the next dword */
+				if ((unsigned long)(skb_tail_pointer(skb) - 1) & 4)
+					break;
+				fallthrough;
+			case e1000_82571:
+			case e1000_82572:
+			case e1000_82573:
+			case e1000_ich8lan:
+			case e1000_ich9lan:
+				pull_size = min((unsigned int)4, skb->data_len);
+				if (!__pskb_pull_tail(skb, pull_size)) {
+					DPRINTK(DRV, ERR,
+						"__pskb_pull_tail failed.\n");
+					kfree_rtskb(skb);
+					return NETDEV_TX_OK;
+				}
+				len = skb->len - skb->data_len;
+				break;
+			default:
+				/* do nothing */
+				break;
+			}
+		}
+	}
+
+	/* reserve a descriptor for the offload context */
+	if ((mss) || (skb->ip_summed == CHECKSUM_PARTIAL))
+		count++;
+	count++;
+#else
+	if (skb->ip_summed == CHECKSUM_PARTIAL)
+		count++;
+#endif
+
+#ifdef NETIF_F_TSO
+	/* Controller Erratum workaround */
+	if (!skb->data_len && tx_ring->last_tx_tso && !skb_is_gso(skb))
+		count++;
+#endif
+
+	count += TXD_USE_COUNT(len, max_txd_pwr);
+
+	if (adapter->pcix_82544)
+		count++;
+
+	/* work-around for errata 10 and it applies to all controllers
+	 * in PCI-X mode, so add one more descriptor to the count
+	 */
+	if (unlikely((adapter->hw.bus.type == e1000_bus_type_pcix) &&
+			(len > 2015)))
+		count++;
+
+#ifdef MAX_SKB_FRAGS
+	nr_frags = skb_shinfo(skb)->nr_frags;
+	for (f = 0; f < nr_frags; f++)
+		count += TXD_USE_COUNT(skb_shinfo(skb)->frags[f].size,
+				       max_txd_pwr);
+	if (adapter->pcix_82544)
+		count += nr_frags;
+
+#endif
+
+	if (adapter->hw.mac.tx_pkt_filtering &&
+	    (adapter->hw.mac.type == e1000_82573))
+		e1000_transfer_dhcp_info(adapter, skb);
+
+	rtdm_lock_get_irqsave(&tx_ring->tx_lock, irq_flags);
+
+	/* need: count + 2 desc gap to keep tail from touching
+	 * head, otherwise try next time */
+	if (unlikely(e1000_maybe_stop_tx(netdev, tx_ring, count + 2))) {
+		rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+		rtdm_printk("FATAL: rt_e1000 ran into tail close to head situation!\n");
+		return NETDEV_TX_BUSY;
+	}
+
+	if (unlikely(adapter->hw.mac.type == e1000_82547)) {
+		if (unlikely(e1000_82547_fifo_workaround(adapter, skb))) {
+			rtnetif_stop_queue(netdev);
+			rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+			if (!test_bit(__E1000_DOWN, &adapter->state))
+				schedule_delayed_work(&adapter->fifo_stall_task,
+						      1);
+		    rtdm_printk("FATAL: rt_e1000 ran into tail 82547 controller bug!\n");
+			return NETDEV_TX_BUSY;
+		}
+	}
+
+#ifndef NETIF_F_LLTX
+	rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+
+#endif
+#ifdef NETIF_F_HW_VLAN_TX
+	if (unlikely(adapter->vlgrp && vlan_tx_tag_present(skb))) {
+		tx_flags |= E1000_TX_FLAGS_VLAN;
+		tx_flags |= (vlan_tx_tag_get(skb) << E1000_TX_FLAGS_VLAN_SHIFT);
+	}
+#endif
+
+	first = tx_ring->next_to_use;
+
+	tso = e1000_tso(adapter, tx_ring, skb);
+	if (tso < 0) {
+		kfree_rtskb(skb);
+#ifdef NETIF_F_LLTX
+		rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+#endif
+		return NETDEV_TX_OK;
+	}
+
+	if (likely(tso)) {
+		tx_ring->last_tx_tso = 1;
+		tx_flags |= E1000_TX_FLAGS_TSO;
+	} else if (likely(e1000_tx_csum(adapter, tx_ring, skb)))
+		tx_flags |= E1000_TX_FLAGS_CSUM;
+
+	/* Old method was to assume IPv4 packet by default if TSO was enabled.
+	 * 82571 hardware supports TSO capabilities for IPv6 as well...
+	 * no longer assume, we must. */
+	if (likely(skb->protocol == htons(ETH_P_IP)))
+		tx_flags |= E1000_TX_FLAGS_IPV4;
+
+	e1000_tx_queue(adapter, tx_ring, tx_flags,
+		       e1000_tx_map(adapter, tx_ring, skb, first,
+				    max_per_txd, nr_frags, mss),
+		   skb->xmit_stamp);
+
+	// netdev->trans_start = jiffies;
+
+	/* Make sure there is space in the ring for the next send. */
+	// e1000_maybe_stop_tx(netdev, tx_ring, MAX_SKB_FRAGS + 2);
+
+#ifdef NETIF_F_LLTX
+	rtdm_lock_put_irqrestore(&tx_ring->tx_lock, irq_flags);
+#endif
+	return NETDEV_TX_OK;
+}
+
+static int e1000_xmit_frame(struct sk_buff *skb, struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+
+	/* This goes back to the question of how to logically map a tx queue
+	 * to a flow.  Right now, performance is impacted slightly negatively
+	 * if using multiple tx queues.  If the stack breaks away from a
+	 * single qdisc implementation, we can look at this again. */
+	return (e1000_xmit_frame_ring(skb, netdev, tx_ring));
+}
+
+#ifdef CONFIG_E1000_MQ
+static int e1000_subqueue_xmit_frame(struct sk_buff *skb,
+				     struct net_device *netdev, int queue)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_tx_ring *tx_ring = &adapter->tx_ring[queue];
+
+	return (e1000_xmit_frame_ring(skb, netdev, tx_ring));
+}
+#endif
+
+
+/**
+ * e1000_tx_timeout - Respond to a Tx Hang
+ * @netdev: network interface device structure
+ **/
+#if 0
+static void e1000_tx_timeout(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	/* Do the reset outside of interrupt context */
+	adapter->tx_timeout_count++;
+	schedule_work(&adapter->reset_task);
+}
+#endif
+
+static void e1000_reset_task(struct work_struct *work)
+{
+	struct e1000_adapter *adapter;
+	adapter = container_of(work, struct e1000_adapter, reset_task);
+
+	e1000_reinit_locked(adapter);
+}
+
+#if 0
+/**
+ * e1000_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats * e1000_get_stats(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ * e1000_change_mtu - Change the Maximum Transfer Unit
+ * @netdev: network interface device structure
+ * @new_mtu: new value for maximum frame size
+ *
+ * Returns 0 on success, negative on failure
+ **/
+static int e1000_change_mtu(struct net_device *netdev, int new_mtu)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int max_frame = new_mtu + ETH_HLEN + ETHERNET_FCS_SIZE;
+	u16 eeprom_data = 0;
+
+	if ((max_frame < ETH_ZLEN + ETHERNET_FCS_SIZE) ||
+	    (max_frame > MAX_JUMBO_FRAME_SIZE)) {
+		DPRINTK(PROBE, ERR, "Invalid MTU setting\n");
+		return -EINVAL;
+	}
+
+	/* Adapter-specific max frame size limits. */
+	switch (adapter->hw.mac.type) {
+	case e1000_undefined:
+	case e1000_82542:
+	case e1000_ich8lan:
+		if (max_frame > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) {
+			DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+			return -EINVAL;
+		}
+		break;
+	case e1000_82573:
+		/* Jumbo Frames not supported if:
+		 * - this is not an 82573L device
+		 * - ASPM is enabled in any way (0x1A bits 3:2) */
+		e1000_read_nvm(&adapter->hw, NVM_INIT_3GIO_3, 1, &eeprom_data);
+		if ((adapter->hw.device_id != E1000_DEV_ID_82573L) ||
+		    (eeprom_data & NVM_WORD1A_ASPM_MASK)) {
+			if (max_frame > ETH_FRAME_LEN + ETHERNET_FCS_SIZE) {
+				DPRINTK(PROBE, ERR,
+					"Jumbo Frames not supported.\n");
+				return -EINVAL;
+			}
+			break;
+		}
+		/* ERT will be enabled later to enable wire speed receives */
+
+		/* fall through to get support */
+	case e1000_ich9lan:
+		if ((adapter->hw.phy.type == e1000_phy_ife) &&
+		    (max_frame > ETH_FRAME_LEN + ETHERNET_FCS_SIZE)) {
+			DPRINTK(PROBE, ERR, "Jumbo Frames not supported.\n");
+			return -EINVAL;
+		}
+		/* fall through to get support */
+	case e1000_82571:
+	case e1000_82572:
+	case e1000_80003es2lan:
+#define MAX_STD_JUMBO_FRAME_SIZE 9234
+		if (max_frame > MAX_STD_JUMBO_FRAME_SIZE) {
+			DPRINTK(PROBE, ERR, "MTU > 9216 not supported.\n");
+			return -EINVAL;
+		}
+		break;
+	default:
+		/* Capable of supporting up to MAX_JUMBO_FRAME_SIZE limit. */
+		break;
+	}
+
+	while (test_and_set_bit(__E1000_RESETTING, &adapter->state))
+		msleep(1);
+	/* e1000_down has a dependency on max_frame_size */
+	adapter->max_frame_size = max_frame;
+	if (rtnetif_running(netdev))
+		e1000_down(adapter);
+
+	/* NOTE: netdev_alloc_skb reserves 16 bytes, and typically NET_IP_ALIGN
+	 * means we reserve 2 more, this pushes us to allocate from the next
+	 * larger slab size.
+	 * i.e. RXBUFFER_2048 --> size-4096 slab
+	 *  however with the new *_jumbo_rx* routines, jumbo receives will use
+	 *  fragmented skbs */
+
+	if (max_frame <= E1000_RXBUFFER_256)
+		adapter->rx_buffer_len = E1000_RXBUFFER_256;
+	else if (max_frame <= E1000_RXBUFFER_512)
+		adapter->rx_buffer_len = E1000_RXBUFFER_512;
+	else if (max_frame <= E1000_RXBUFFER_1024)
+		adapter->rx_buffer_len = E1000_RXBUFFER_1024;
+	else if (max_frame <= E1000_RXBUFFER_2048)
+		adapter->rx_buffer_len = E1000_RXBUFFER_2048;
+#ifdef CONFIG_E1000_NAPI
+	else
+		adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+#else
+	else if (max_frame <= E1000_RXBUFFER_4096)
+		adapter->rx_buffer_len = E1000_RXBUFFER_4096;
+	else if (max_frame <= E1000_RXBUFFER_8192)
+		adapter->rx_buffer_len = E1000_RXBUFFER_8192;
+	else if (max_frame <= E1000_RXBUFFER_16384)
+		adapter->rx_buffer_len = E1000_RXBUFFER_16384;
+#endif
+
+	/* adjust allocation if LPE protects us, and we aren't using SBP */
+	if (!e1000_tbi_sbp_enabled_82543(&adapter->hw) &&
+	    ((max_frame == ETH_FRAME_LEN + ETHERNET_FCS_SIZE) ||
+	     (max_frame == MAXIMUM_ETHERNET_VLAN_SIZE)))
+		adapter->rx_buffer_len = MAXIMUM_ETHERNET_VLAN_SIZE;
+
+	DPRINTK(PROBE, INFO, "changing MTU from %d to %d\n",
+		netdev->mtu, new_mtu);
+	netdev->mtu = new_mtu;
+
+	if (rtnetif_running(netdev))
+		e1000_up(adapter);
+	else
+		e1000_reset(adapter);
+
+	clear_bit(__E1000_RESETTING, &adapter->state);
+
+	return 0;
+}
+#endif
+
+/**
+ * e1000_update_stats - Update the board statistics counters
+ * @adapter: board private structure
+ **/
+void e1000_update_stats(struct e1000_adapter *adapter)
+{
+}
+#ifdef SIOCGMIIPHY
+
+/**
+ * e1000_phy_read_status - Update the PHY register status snapshot
+ * @adapter: board private structure
+ **/
+static void e1000_phy_read_status(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_phy_regs *phy = &adapter->phy_regs;
+	int ret_val = E1000_SUCCESS;
+	unsigned long irq_flags;
+
+
+	rtdm_lock_get_irqsave(&adapter->stats_lock, irq_flags);
+
+	if (E1000_READ_REG(hw, E1000_STATUS)& E1000_STATUS_LU) {
+		ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy->bmcr);
+		ret_val |= e1000_read_phy_reg(hw, PHY_STATUS, &phy->bmsr);
+		ret_val |= e1000_read_phy_reg(hw, PHY_AUTONEG_ADV,
+					      &phy->advertise);
+		ret_val |= e1000_read_phy_reg(hw, PHY_LP_ABILITY, &phy->lpa);
+		ret_val |= e1000_read_phy_reg(hw, PHY_AUTONEG_EXP,
+					      &phy->expansion);
+		ret_val |= e1000_read_phy_reg(hw, PHY_1000T_CTRL,
+					      &phy->ctrl1000);
+		ret_val |= e1000_read_phy_reg(hw, PHY_1000T_STATUS,
+					      &phy->stat1000);
+		ret_val |= e1000_read_phy_reg(hw, PHY_EXT_STATUS,
+					      &phy->estatus);
+		if (ret_val)
+			DPRINTK(DRV, WARNING, "Error reading PHY register\n");
+	} else {
+		/* Do not read PHY registers if link is not up
+		 * Set values to typical power-on defaults */
+		phy->bmcr = (BMCR_SPEED1000 | BMCR_ANENABLE | BMCR_FULLDPLX);
+		phy->bmsr = (BMSR_100FULL | BMSR_100HALF | BMSR_10FULL |
+			     BMSR_10HALF | BMSR_ESTATEN | BMSR_ANEGCAPABLE |
+			     BMSR_ERCAP);
+		phy->advertise = (ADVERTISE_PAUSE_ASYM | ADVERTISE_PAUSE_CAP |
+				  ADVERTISE_ALL | ADVERTISE_CSMA);
+		phy->lpa = 0;
+		phy->expansion = EXPANSION_ENABLENPAGE;
+		phy->ctrl1000 = ADVERTISE_1000FULL;
+		phy->stat1000 = 0;
+		phy->estatus = (ESTATUS_1000_TFULL | ESTATUS_1000_THALF);
+	}
+
+	rtdm_lock_put_irqrestore(&adapter->stats_lock, irq_flags);
+}
+#endif
+
+
+/**
+ * e1000_intr_msi - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static int e1000_intr_msi(rtdm_irq_t *irq_handle)
+{
+    struct rtnet_device *netdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+#ifndef CONFIG_E1000_NAPI
+	int i, j;
+	int rx_cleaned, tx_cleaned;
+#endif
+	u32 icr = E1000_READ_REG(hw, E1000_ICR);
+    nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+
+
+#ifdef CONFIG_E1000_NAPI
+	/* read ICR disables interrupts using IAM, so keep up with our
+	 * enable/disable accounting */
+	atomic_inc(&adapter->irq_sem);
+#endif
+	if (icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC)) {
+		hw->mac.get_link_status = 1;
+		/* ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers */
+		if ((hw->mac.type == e1000_ich8lan) &&
+		    (hw->phy.type == e1000_phy_igp_3) &&
+		    (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)))
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* 80003ES2LAN workaround-- For packet buffer work-around on
+		 * link down event; disable receives here in the ISR and reset
+		 * adapter in watchdog */
+		if (rtnetif_carrier_ok(netdev) &&
+		    (adapter->flags & E1000_FLAG_RX_NEEDS_RESTART)) {
+			/* disable receives */
+			u32 rctl = E1000_READ_REG(hw, E1000_RCTL);
+			E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= E1000_FLAG_RX_RESTART_NOW;
+		}
+		/* guard against interrupt when we're going down */
+		//if (!test_bit(__E1000_DOWN, &adapter->state))
+		//	mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+#ifdef CONFIG_E1000_NAPI
+	/* XXX only using ring 0 for napi */
+	if (likely(netif_rx_schedule_prep(netdev, &adapter->rx_ring[0].napi))) {
+		adapter->total_tx_bytes = 0;
+		adapter->total_tx_packets = 0;
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
+		__netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+	} else {
+		atomic_dec(&adapter->irq_sem);
+	}
+#else
+	adapter->total_tx_bytes = 0;
+	adapter->total_rx_bytes = 0;
+	adapter->total_tx_packets = 0;
+	adapter->total_rx_packets = 0;
+    adapter->data_received = 0;
+
+	for (i = 0; i < E1000_MAX_INTR; i++) {
+		rx_cleaned = 0;
+		for (j = 0; j < adapter->num_rx_queues; j++)
+			rx_cleaned |= adapter->clean_rx(adapter,
+							&adapter->rx_ring[j], &time_stamp);
+
+		tx_cleaned = 0;
+		for (j = 0 ; j < adapter->num_tx_queues ; j++)
+			tx_cleaned |= e1000_clean_tx_irq(adapter,
+							 &adapter->tx_ring[j]);
+
+		if (!rx_cleaned && !tx_cleaned)
+			break;
+	}
+
+	if (likely(adapter->itr_setting & 3))
+		e1000_set_itr(adapter);
+#endif
+
+	if (adapter->data_received)
+		rt_mark_stack_mgr(netdev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ * e1000_intr - Interrupt Handler
+ * @irq: interrupt number
+ * @data: pointer to a network interface device structure
+ **/
+static int e1000_intr(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *netdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl, icr = E1000_READ_REG(hw, E1000_ICR);
+#ifndef CONFIG_E1000_NAPI
+	int i, j;
+	int rx_cleaned, tx_cleaned;
+#endif
+    nanosecs_abs_t time_stamp = rtdm_clock_read();
+	if (unlikely(!icr))
+		return RTDM_IRQ_NONE;  /* Not our interrupt */
+
+#ifdef CONFIG_E1000_NAPI
+	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt */
+	if ((adapter->flags & E1000_FLAG_INT_ASSERT_AUTO_MASK) &&
+	    !(icr & E1000_ICR_INT_ASSERTED))
+		return IRQ_NONE;
+
+	/* Interrupt Auto-Mask...upon reading ICR,
+	 * interrupts are masked.  No need for the
+	 * IMC write, but it does mean we should
+	 * account for it ASAP. */
+	if (likely(hw->mac.type >= e1000_82571))
+		atomic_inc(&adapter->irq_sem);
+#endif
+
+	if (unlikely(icr & (E1000_ICR_RXSEQ | E1000_ICR_LSC))) {
+		hw->mac.get_link_status = 1;
+		/* ICH8 workaround-- Call gig speed drop workaround on cable
+		 * disconnect (LSC) before accessing any PHY registers */
+		if ((hw->mac.type == e1000_ich8lan) &&
+		    (hw->phy.type == e1000_phy_igp_3) &&
+		    (!(E1000_READ_REG(hw, E1000_STATUS) & E1000_STATUS_LU)))
+			e1000_gig_downshift_workaround_ich8lan(hw);
+
+		/* 80003ES2LAN workaround--
+		 * For packet buffer work-around on link down event;
+		 * disable receives here in the ISR and
+		 * reset adapter in watchdog
+		 */
+		if (rtnetif_carrier_ok(netdev) &&
+		    (adapter->flags & E1000_FLAG_RX_NEEDS_RESTART)) {
+			/* disable receives */
+			rctl = E1000_READ_REG(hw, E1000_RCTL);
+			E1000_WRITE_REG(hw, E1000_RCTL, rctl & ~E1000_RCTL_EN);
+			adapter->flags |= E1000_FLAG_RX_RESTART_NOW;
+		}
+		/* guard against interrupt when we're going down */
+		//if (!test_bit(__E1000_DOWN, &adapter->state))
+		//	mod_timer(&adapter->watchdog_timer, jiffies + 1);
+	}
+
+#ifdef CONFIG_E1000_NAPI
+	if (hw->mac.type < e1000_82571) {
+		/* disable interrupts, without the synchronize_irq bit */
+		atomic_inc(&adapter->irq_sem);
+		E1000_WRITE_REG(hw, E1000_IMC, ~0);
+		E1000_WRITE_FLUSH(hw);
+	}
+	/* XXX only using ring 0 for napi */
+	if (likely(netif_rx_schedule_prep(netdev, &adapter->rx_ring[0].napi))) {
+		adapter->total_tx_bytes = 0;
+		adapter->total_tx_packets = 0;
+		adapter->total_rx_bytes = 0;
+		adapter->total_rx_packets = 0;
+		__netif_rx_schedule(netdev, &adapter->rx_ring[0].napi);
+	} else {
+		atomic_dec(&adapter->irq_sem);
+	}
+#else
+	/* Writing IMC and IMS is needed for 82547.
+	 * Due to Hub Link bus being occupied, an interrupt
+	 * de-assertion message is not able to be sent.
+	 * When an interrupt assertion message is generated later,
+	 * two messages are re-ordered and sent out.
+	 * That causes APIC to think 82547 is in de-assertion
+	 * state, while 82547 is in assertion state, resulting
+	 * in dead lock. Writing IMC forces 82547 into
+	 * de-assertion state.
+	 */
+	if (hw->mac.type == e1000_82547 || hw->mac.type == e1000_82547_rev_2) {
+		atomic_inc(&adapter->irq_sem);
+		E1000_WRITE_REG(hw, E1000_IMC, ~0);
+	}
+
+    adapter->data_received = 0;
+	adapter->total_tx_bytes = 0;
+	adapter->total_rx_bytes = 0;
+	adapter->total_tx_packets = 0;
+	adapter->total_rx_packets = 0;
+
+	for (i = 0; i < E1000_MAX_INTR; i++) {
+		rx_cleaned = 0;
+		for (j = 0; j < adapter->num_rx_queues; j++)
+			rx_cleaned |= adapter->clean_rx(adapter,
+							&adapter->rx_ring[j], &time_stamp);
+
+		tx_cleaned = 0;
+		for (j = 0 ; j < adapter->num_tx_queues ; j++)
+			tx_cleaned |= e1000_clean_tx_irq(adapter,
+							 &adapter->tx_ring[j]);
+
+		if (!rx_cleaned && !tx_cleaned)
+			break;
+	}
+
+	if (likely(adapter->itr_setting & 3))
+		e1000_set_itr(adapter);
+
+	if (hw->mac.type == e1000_82547 || hw->mac.type == e1000_82547_rev_2)
+		e1000_irq_enable(adapter);
+
+#endif
+
+	if (adapter->data_received)
+		rt_mark_stack_mgr(netdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+#ifdef CONFIG_E1000_NAPI
+/**
+ * e1000_poll - NAPI Rx polling callback
+ * @napi: struct associated with this polling callback
+ * @budget: amount of packets driver is allowed to process this poll
+ **/
+static int e1000_poll(struct napi_struct *napi, int budget)
+{
+	struct e1000_rx_ring *rx_ring = container_of(napi, struct e1000_rx_ring,
+						     napi);
+	struct e1000_adapter *adapter = rx_ring->adapter;
+	struct net_device *netdev = adapter->netdev;
+	int tx_clean_complete = 1, work_done = 0;
+	int i;
+
+	/* FIXME: i think this code is un-necessary when using base netdev */
+	/* Keep link state information with original netdev */
+	if (!rtnetif_carrier_ok(netdev))
+		goto quit_polling;
+
+	/* e1000_poll is called per-cpu.  This lock protects
+	 * tx_ring[i] from being cleaned by multiple cpus
+	 * simultaneously.  A failure obtaining the lock means
+	 * tx_ring[i] is currently being cleaned anyway. */
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+#ifdef CONFIG_E1000_MQ
+		if (spin_trylock(&adapter->tx_ring[i].tx_queue_lock)) {
+			tx_clean_complete &= e1000_clean_tx_irq(adapter,
+							&adapter->tx_ring[i]);
+			spin_unlock(&adapter->tx_ring[i].tx_queue_lock);
+		}
+#else
+		if (spin_trylock(&adapter->tx_queue_lock)) {
+			tx_clean_complete &= e1000_clean_tx_irq(adapter,
+							&adapter->tx_ring[i]);
+			spin_unlock(&adapter->tx_queue_lock);
+		}
+#endif
+	}
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		adapter->clean_rx(adapter, &adapter->rx_ring[i],
+				  &work_done, budget);
+	}
+
+	/* If no Tx and not enough Rx work done, exit the polling mode */
+	if ((tx_clean_complete && (work_done == 0)) ||
+	   !rtnetif_running(netdev)) {
+quit_polling:
+		if (likely(adapter->itr_setting & 3))
+			e1000_set_itr(adapter);
+		netif_rx_complete(netdev, napi);
+		if (test_bit(__E1000_DOWN, &adapter->state))
+			atomic_dec(&adapter->irq_sem);
+		else
+			e1000_irq_enable(adapter);
+		return 0;
+	}
+
+	/* need to make sure the stack is aware of a tx-only poll loop */
+	if (!tx_clean_complete)
+		work_done = budget;
+
+	return work_done;
+}
+
+#endif
+/**
+ * e1000_clean_tx_irq - Reclaim resources after transmit completes
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_tx_irq(struct e1000_adapter *adapter,
+				    struct e1000_tx_ring *tx_ring)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct e1000_tx_desc *tx_desc, *eop_desc;
+	struct e1000_buffer *buffer_info;
+	unsigned int i, eop;
+#ifdef CONFIG_E1000_NAPI
+	unsigned int count = 0;
+#endif
+	bool cleaned = FALSE;
+	bool retval = TRUE;
+	unsigned int total_tx_bytes=0, total_tx_packets=0;
+
+
+	i = tx_ring->next_to_clean;
+	eop = tx_ring->buffer_info[i].next_to_watch;
+	eop_desc = E1000_TX_DESC(*tx_ring, eop);
+
+	while (eop_desc->upper.data & cpu_to_le32(E1000_TXD_STAT_DD)) {
+		for (cleaned = FALSE; !cleaned; ) {
+			tx_desc = E1000_TX_DESC(*tx_ring, i);
+			buffer_info = &tx_ring->buffer_info[i];
+			cleaned = (i == eop);
+
+#ifdef CONFIG_E1000_MQ
+			tx_ring->tx_stats.bytes += buffer_info->length;
+#endif
+			if (cleaned) {
+				struct sk_buff *skb = buffer_info->skb;
+#ifdef NETIF_F_TSO
+				unsigned int segs, bytecount;
+				segs = skb_shinfo(skb)->gso_segs ?: 1;
+				/* multiply data chunks by size of headers */
+				bytecount = ((segs - 1) * skb_headlen(skb)) +
+					    skb->len;
+				total_tx_packets += segs;
+				total_tx_bytes += bytecount;
+#else
+				total_tx_packets++;
+				total_tx_bytes += skb->len;
+#endif
+			}
+			e1000_unmap_and_free_tx_resource(adapter, buffer_info);
+			tx_desc->upper.data = 0;
+
+			if (unlikely(++i == tx_ring->count)) i = 0;
+		}
+
+#ifdef CONFIG_E1000_MQ
+		tx_ring->tx_stats.packets++;
+#endif
+		eop = tx_ring->buffer_info[i].next_to_watch;
+		eop_desc = E1000_TX_DESC(*tx_ring, eop);
+#ifdef CONFIG_E1000_NAPI
+#define E1000_TX_WEIGHT 64
+		/* weight of a sort for tx, to avoid endless transmit cleanup */
+		if (count++ == E1000_TX_WEIGHT) {
+			retval = FALSE;
+			break;
+		}
+#endif
+	}
+
+	tx_ring->next_to_clean = i;
+
+#define TX_WAKE_THRESHOLD 32
+	if (unlikely(cleaned && rtnetif_carrier_ok(netdev) &&
+		     E1000_DESC_UNUSED(tx_ring) >= TX_WAKE_THRESHOLD)) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+
+		if (rtnetif_queue_stopped(netdev) &&
+		    !(test_bit(__E1000_DOWN, &adapter->state))) {
+			rtnetif_wake_queue(netdev);
+			++adapter->restart_queue;
+		}
+	}
+
+	if (adapter->detect_tx_hung) {
+		/* Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i */
+		adapter->detect_tx_hung = FALSE;
+		if (tx_ring->buffer_info[eop].dma &&
+		    time_after(jiffies, tx_ring->buffer_info[eop].time_stamp +
+			       (adapter->tx_timeout_factor * HZ))
+		    && !(E1000_READ_REG(&adapter->hw, E1000_STATUS) &
+			 E1000_STATUS_TXOFF)) {
+
+			/* detected Tx unit hang */
+			DPRINTK(DRV, ERR, "Detected Tx Unit Hang\n"
+					"  Tx Queue             <%lu>\n"
+					"  TDH                  <%x>\n"
+					"  TDT                  <%x>\n"
+					"  next_to_use          <%x>\n"
+					"  next_to_clean        <%x>\n"
+					"buffer_info[next_to_clean]\n"
+					"  time_stamp           <%lx>\n"
+					"  next_to_watch        <%x>\n"
+					"  jiffies              <%lx>\n"
+					"  next_to_watch.status <%x>\n",
+				(unsigned long)((tx_ring - adapter->tx_ring) /
+					sizeof(struct e1000_tx_ring)),
+				readl(adapter->hw.hw_addr + tx_ring->tdh),
+				readl(adapter->hw.hw_addr + tx_ring->tdt),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				tx_ring->buffer_info[eop].time_stamp,
+				eop,
+				jiffies,
+				eop_desc->upper.fields.status);
+			rtnetif_stop_queue(netdev);
+		}
+	}
+	adapter->total_tx_bytes += total_tx_bytes;
+	adapter->total_tx_packets += total_tx_packets;
+	adapter->net_stats.tx_bytes += total_tx_bytes;
+	adapter->net_stats.tx_packets += total_tx_packets;
+	return retval;
+}
+
+/**
+ * e1000_rx_checksum - Receive Checksum Offload for 82543
+ * @adapter:     board private structure
+ * @status_err:  receive descriptor status and error fields
+ * @csum:        receive descriptor csum field
+ * @sk_buff:     socket buffer with received data
+ **/
+static void e1000_rx_checksum(struct e1000_adapter *adapter, u32 status_err,
+			      u32 csum, struct sk_buff *skb)
+{
+	u16 status = (u16)status_err;
+	u8 errors = (u8)(status_err >> 24);
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* 82543 or newer only */
+	if (unlikely(adapter->hw.mac.type < e1000_82543)) return;
+	/* Ignore Checksum bit is set */
+	if (unlikely(status & E1000_RXD_STAT_IXSM)) return;
+	/* TCP/UDP checksum error bit is set */
+	if (unlikely(errors & E1000_RXD_ERR_TCPE)) {
+		/* let the stack verify checksum errors */
+		adapter->hw_csum_err++;
+		return;
+	}
+	/* TCP/UDP Checksum has not been calculated */
+	if (adapter->hw.mac.type <= e1000_82547_rev_2) {
+		if (!(status & E1000_RXD_STAT_TCPCS))
+			return;
+	} else {
+		if (!(status & (E1000_RXD_STAT_TCPCS | E1000_RXD_STAT_UDPCS)))
+			return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (likely(status & E1000_RXD_STAT_TCPCS)) {
+		/* TCP checksum is good */
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	} else if (adapter->hw.mac.type > e1000_82547_rev_2) {
+		/* IP fragment with UDP payload */
+		/* Hardware complements the payload checksum, so we undo it
+		 * and then put the value in host order for further stack use.
+		 */
+		csum = ntohl(csum ^ 0xFFFF);
+		skb->csum = csum;
+		skb->ip_summed = CHECKSUM_COMPLETE;
+	}
+	adapter->hw_csum_good++;
+}
+
+/**
+ * e1000_receive_skb - helper function to handle rx indications
+ * @adapter: board private structure
+ * @status: descriptor status field as written by hardware
+ * @vlan: descriptor vlan field as written by hardware (no le/be conversion)
+ * @skb: pointer to sk_buff to be indicated to stack
+ **/
+static void e1000_receive_skb(struct e1000_adapter *adapter, u8 status,
+			      u16 vlan, struct sk_buff *skb)
+{
+#ifdef CONFIG_E1000_NAPI
+#ifdef NETIF_F_HW_VLAN_TX
+	if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) {
+		vlan_hwaccel_receive_skb(skb, adapter->vlgrp,
+					 le16_to_cpu(vlan) &
+					 E1000_RXD_SPC_VLAN_MASK);
+	} else {
+		netif_receive_skb(skb);
+	}
+#else
+	netif_receive_skb(skb);
+#endif
+#else /* CONFIG_E1000_NAPI */
+#ifdef NETIF_F_HW_VLAN_TX
+	if (unlikely(adapter->vlgrp && (status & E1000_RXD_STAT_VP))) {
+		vlan_hwaccel_rx(skb, adapter->vlgrp,
+				le16_to_cpu(vlan) & E1000_RXD_SPC_VLAN_MASK);
+	} else {
+		netif_rx(skb);
+	}
+#else
+	rtnetif_rx(skb);
+#endif
+#endif /* CONFIG_E1000_NAPI */
+}
+
+#ifdef CONFIG_E1000_NAPI
+/* NOTE: these new jumbo frame routines rely on NAPI because of the
+ * pskb_may_pull call, which eventually must call kmap_atomic which you cannot
+ * call from hard irq context */
+
+/**
+ * e1000_consume_page - helper function
+ **/
+static void e1000_consume_page(struct e1000_rx_buffer *bi, struct sk_buff *skb,
+			       u16 length)
+{
+	bi->page = NULL;
+	skb->len += length;
+	skb->data_len += length;
+	skb->truesize += length;
+}
+
+/**
+ * e1000_clean_jumbo_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+static bool e1000_clean_jumbo_rx_irq(struct e1000_adapter *adapter,
+					  struct e1000_rx_ring *rx_ring,
+					  int *work_done, int work_to_do)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_rx_buffer *buffer_info, *next_buffer;
+	unsigned long irq_flags;
+	u32 length;
+	unsigned int i;
+	int cleaned_count = 0;
+	bool cleaned = FALSE;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct sk_buff *skb;
+		u8 status;
+
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+
+		status = rx_desc->status;
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned = TRUE;
+		cleaned_count++;
+		dma_unmap_page(&pdev->dev,
+			       buffer_info->dma,
+			       PAGE_SIZE,
+			       DMA_FROM_DEVICE);
+		buffer_info->dma = 0;
+
+		length = le16_to_cpu(rx_desc->length);
+
+		/* errors is only valid for DD + EOP descriptors */
+		if (unlikely((status & E1000_RXD_STAT_EOP) &&
+		    (rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK))) {
+			u8 last_byte = *(skb->data + length - 1);
+			if (TBI_ACCEPT(&adapter->hw, status,
+				      rx_desc->errors, length, last_byte,
+				      adapter->min_frame_size,
+				      adapter->max_frame_size)) {
+				rtdm_lock_get_irqsave(&adapter->stats_lock,
+						  irq_flags);
+				e1000_tbi_adjust_stats_82543(&adapter->hw,
+						      &adapter->stats,
+						      length, skb->data,
+						      adapter->max_frame_size);
+				rtdm_lock_put_irqrestore(&adapter->stats_lock,
+						       irq_flags);
+				length--;
+			} else {
+				/* recycle both page and skb */
+				buffer_info->skb = skb;
+				/* an error means any chain goes out the window
+				 * too */
+				if (rx_ring->rx_skb_top)
+					kfree_rtskb(rx_ring->rx_skb_top);
+				rx_ring->rx_skb_top = NULL;
+				goto next_desc;
+			}
+		}
+
+#define rxtop rx_ring->rx_skb_top
+		if (!(status & E1000_RXD_STAT_EOP)) {
+			/* this descriptor is only the beginning (or middle) */
+			if (!rxtop) {
+				/* this is the beginning of a chain */
+				rxtop = skb;
+				skb_fill_page_desc(rxtop, 0, buffer_info->page,
+						   0, length);
+			} else {
+				/* this is the middle of a chain */
+				skb_fill_page_desc(rxtop,
+				    skb_shinfo(rxtop)->nr_frags,
+				    buffer_info->page, 0, length);
+				/* re-use the skb, only consumed the page */
+				buffer_info->skb = skb;
+			}
+			e1000_consume_page(buffer_info, rxtop, length);
+			goto next_desc;
+		} else {
+			if (rxtop) {
+				/* end of the chain */
+				skb_fill_page_desc(rxtop,
+				    skb_shinfo(rxtop)->nr_frags,
+				    buffer_info->page, 0, length);
+				/* re-use the current skb, we only consumed the
+				 * page */
+				buffer_info->skb = skb;
+				skb = rxtop;
+				rxtop = NULL;
+				e1000_consume_page(buffer_info, skb, length);
+			} else {
+				/* no chain, got EOP, this buf is the packet
+				 * copybreak to save the put_page/alloc_page */
+				if (length <= copybreak &&
+				    skb_tailroom(skb) >= length) {
+					u8 *vaddr;
+					vaddr = kmap_atomic(buffer_info->page,
+							   KM_SKB_DATA_SOFTIRQ);
+					memcpy(skb_tail_pointer(skb), vaddr, length);
+					kunmap_atomic(vaddr,
+						      KM_SKB_DATA_SOFTIRQ);
+					/* re-use the page, so don't erase
+					 * buffer_info->page */
+					rtskb_put(skb, length);
+				} else {
+					skb_fill_page_desc(skb, 0,
+							   buffer_info->page, 0,
+							   length);
+					e1000_consume_page(buffer_info, skb,
+							   length);
+				}
+			}
+		}
+
+		/* Receive Checksum Offload XXX recompute due to CRC strip? */
+		e1000_rx_checksum(adapter,
+				  (u32)(status) |
+				  ((u32)(rx_desc->errors) << 24),
+				  le16_to_cpu(rx_desc->csum), skb);
+
+		pskb_trim(skb, skb->len - 4);
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += skb->len;
+		total_rx_packets++;
+
+		/* eth type trans needs skb->data to point to something */
+		if (!pskb_may_pull(skb, ETH_HLEN)) {
+			DPRINTK(DRV, ERR, "__pskb_pull_tail failed.\n");
+			kfree_rtskb(skb);
+			goto next_desc;
+		}
+
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+
+		e1000_receive_skb(adapter, status, rx_desc->special, skb);
+	adapter->data_received = 1; // Set flag for the main interrupt routine
+
+		netdev->last_rx = jiffies;
+#ifdef CONFIG_E1000_MQ
+		rx_ring->rx_stats.packets++;
+		rx_ring->rx_stats.bytes += length;
+#endif
+
+next_desc:
+		rx_desc->status = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+	return cleaned;
+}
+#endif /* NAPI */
+
+
+/**
+ * e1000_clean_rx_irq - Send received data up the network stack; legacy
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+#ifdef CONFIG_E1000_NAPI
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+				    int *work_done, int work_to_do)
+#else
+static bool e1000_clean_rx_irq(struct e1000_adapter *adapter,
+				    struct e1000_rx_ring *rx_ring,
+				    nanosecs_abs_t *time_stamp)
+#endif
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc, *next_rxd;
+	struct e1000_rx_buffer *buffer_info, *next_buffer;
+	u32 length;
+	unsigned int i;
+	int cleaned_count = 0;
+	bool cleaned = FALSE;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+	// rtdm_printk("<2> e1000_clean_rx_irq %i\n", __LINE__);
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC(*rx_ring, i);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (rx_desc->status & E1000_RXD_STAT_DD) {
+		struct sk_buff *skb;
+		u8 status;
+
+#ifdef CONFIG_E1000_NAPI
+		if (*work_done >= work_to_do)
+			break;
+		(*work_done)++;
+#endif
+		status = rx_desc->status;
+		skb = buffer_info->skb;
+		buffer_info->skb = NULL;
+
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned = TRUE;
+		cleaned_count++;
+		dma_unmap_single(&pdev->dev,
+				 buffer_info->dma,
+				 adapter->rx_buffer_len,
+				 DMA_FROM_DEVICE);
+		buffer_info->dma = 0;
+
+		length = le16_to_cpu(rx_desc->length);
+
+		/* !EOP means multiple descriptors were used to store a single
+		 * packet, also make sure the frame isn't just CRC only */
+		if (unlikely(!(status & E1000_RXD_STAT_EOP) || (length <= 4))) {
+			/* All receives must fit into a single buffer */
+			E1000_DBG("%s: Receive packet consumed multiple"
+				  " buffers\n", netdev->name);
+			/* recycle */
+			buffer_info->skb = skb;
+			goto next_desc;
+		}
+
+		if (unlikely(rx_desc->errors & E1000_RXD_ERR_FRAME_ERR_MASK)) {
+			u8 last_byte = *(skb->data + length - 1);
+			if (TBI_ACCEPT(&adapter->hw, status,
+				      rx_desc->errors, length, last_byte,
+				      adapter->min_frame_size,
+				      adapter->max_frame_size)) {
+				length--;
+			} else {
+				/* recycle */
+				buffer_info->skb = skb;
+				goto next_desc;
+			}
+		}
+
+		/* adjust length to remove Ethernet CRC, this must be
+		 * done after the TBI_ACCEPT workaround above */
+		length -= 4;
+
+		/* probably a little skewed due to removing CRC */
+		total_rx_bytes += length;
+		total_rx_packets++;
+
+		rtskb_put(skb, length);
+
+		/* Receive Checksum Offload */
+		e1000_rx_checksum(adapter,
+				  (u32)(status) |
+				  ((u32)(rx_desc->errors) << 24),
+				  le16_to_cpu(rx_desc->csum), skb);
+
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+	skb->time_stamp = *time_stamp;
+
+		e1000_receive_skb(adapter, status, rx_desc->special, skb);
+	adapter->data_received = 1; // Set flag for the main interrupt routine
+
+		// netdev->last_rx = jiffies;
+#ifdef CONFIG_E1000_MQ
+		rx_ring->rx_stats.packets++;
+		rx_ring->rx_stats.bytes += length;
+#endif
+
+next_desc:
+		rx_desc->status = 0;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+	return cleaned;
+}
+
+/**
+ * e1000_clean_rx_irq_ps - Send received data up the network stack; packet split
+ * @adapter: board private structure
+ *
+ * the return value indicates whether actual cleaning was done, there
+ * is no guarantee that everything was cleaned
+ **/
+#ifdef CONFIG_E1000_NAPI
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				       struct e1000_rx_ring *rx_ring,
+				       int *work_done, int work_to_do)
+#else
+static bool e1000_clean_rx_irq_ps(struct e1000_adapter *adapter,
+				       struct e1000_rx_ring *rx_ring,
+				       nanosecs_abs_t *time_stamp)
+#endif
+{
+#ifdef CONFIG_E1000_DISABLE_PACKET_SPLIT
+    return true;
+
+#else
+
+	union e1000_rx_desc_packet_split *rx_desc, *next_rxd;
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_buffer *buffer_info, *next_buffer;
+	struct e1000_ps_page *ps_page;
+	struct e1000_ps_page_dma *ps_page_dma;
+	struct sk_buff *skb;
+	unsigned int i, j;
+	u32 length, staterr;
+	int cleaned_count = 0;
+	bool cleaned = FALSE;
+	unsigned int total_rx_bytes=0, total_rx_packets=0;
+
+	i = rx_ring->next_to_clean;
+	rx_desc = E1000_RX_DESC_PS(*rx_ring, i);
+	staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (staterr & E1000_RXD_STAT_DD) {
+		ps_page = &rx_ring->ps_page[i];
+		ps_page_dma = &rx_ring->ps_page_dma[i];
+#ifdef CONFIG_E1000_NAPI
+		if (unlikely(*work_done >= work_to_do))
+			break;
+		(*work_done)++;
+#endif
+		skb = buffer_info->skb;
+
+		/* in the packet split case this is header only */
+		prefetch(skb->data - NET_IP_ALIGN);
+
+		if (++i == rx_ring->count) i = 0;
+		next_rxd = E1000_RX_DESC_PS(*rx_ring, i);
+		prefetch(next_rxd);
+
+		next_buffer = &rx_ring->buffer_info[i];
+
+		cleaned = TRUE;
+		cleaned_count++;
+		dma_unmap_single(&pdev->dev, buffer_info->dma,
+				 adapter->rx_ps_bsize0,
+				 DMA_FROM_DEVICE);
+		buffer_info->dma = 0;
+
+		if (unlikely(!(staterr & E1000_RXD_STAT_EOP))) {
+			E1000_DBG("%s: Packet Split buffers didn't pick up"
+				  " the full packet\n", netdev->name);
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		if (unlikely(staterr & E1000_RXDEXT_ERR_FRAME_ERR_MASK)) {
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		length = le16_to_cpu(rx_desc->wb.middle.length0);
+
+		if (unlikely(!length)) {
+			E1000_DBG("%s: Last part of the packet spanning"
+				  " multiple descriptors\n", netdev->name);
+			dev_kfree_skb_irq(skb);
+			goto next_desc;
+		}
+
+		/* Good Receive */
+		rtskb_put(skb, length);
+#ifdef CONFIG_E1000_MQ
+		rx_ring->rx_stats.packets++;
+		rx_ring->rx_stats.bytes += skb->len;
+#endif
+
+#ifdef CONFIG_E1000_NAPI
+		{
+		/* this looks ugly, but it seems compiler issues make it
+		   more efficient than reusing j */
+		int l1 = le16_to_cpu(rx_desc->wb.upper.length[0]);
+
+		/* page alloc/put takes too long and effects small packet
+		 * throughput, so unsplit small packets and save the alloc/put
+		 * only valid in softirq (napi) context to call kmap_* */
+		if (l1 && (l1 <= copybreak) &&
+		    ((length + l1) <= adapter->rx_ps_bsize0)) {
+			u8 *vaddr;
+			/* there is no documentation about how to call
+			 * kmap_atomic, so we can't hold the mapping
+			 * very long */
+			pci_dma_sync_single_for_cpu(pdev,
+				ps_page_dma->ps_page_dma[0],
+				PAGE_SIZE,
+				PCI_DMA_FROMDEVICE);
+			vaddr = kmap_atomic(ps_page->ps_page[0],
+					    KM_SKB_DATA_SOFTIRQ);
+			memcpy(skb_tail_pointer(skb), vaddr, l1);
+			kunmap_atomic(vaddr, KM_SKB_DATA_SOFTIRQ);
+			pci_dma_sync_single_for_device(pdev,
+				ps_page_dma->ps_page_dma[0],
+				PAGE_SIZE, PCI_DMA_FROMDEVICE);
+			/* remove the CRC */
+			l1 -= 4;
+			rtskb_put(skb, l1);
+			goto copydone;
+		} /* if */
+		}
+#endif
+
+		for (j = 0; j < adapter->rx_ps_pages; j++) {
+			if (!(length= le16_to_cpu(rx_desc->wb.upper.length[j])))
+				break;
+			dma_unmap_page(&pdev->dev, ps_page_dma->ps_page_dma[j],
+				       PAGE_SIZE, DMA_FROM_DEVICE);
+			ps_page_dma->ps_page_dma[j] = 0;
+			skb_fill_page_desc(skb, j, ps_page->ps_page[j], 0,
+					   length);
+			ps_page->ps_page[j] = NULL;
+			skb->len += length;
+			skb->data_len += length;
+			skb->truesize += length;
+		}
+
+		/* strip the ethernet crc, problem is we're using pages now so
+		 * this whole operation can get a little cpu intensive */
+		pskb_trim(skb, skb->len - 4);
+
+#ifdef CONFIG_E1000_NAPI
+copydone:
+#endif
+		total_rx_bytes += skb->len;
+		total_rx_packets++;
+
+		e1000_rx_checksum(adapter, staterr,
+				  le16_to_cpu(rx_desc->wb.lower.hi_dword.csum_ip.csum), skb);
+		skb->protocol = rt_eth_type_trans(skb, netdev);
+
+		if (likely(rx_desc->wb.upper.header_status &
+			   cpu_to_le16(E1000_RXDPS_HDRSTAT_HDRSP)))
+			adapter->rx_hdr_split++;
+
+		e1000_receive_skb(adapter, staterr, rx_desc->wb.middle.vlan,
+				  skb);
+		netdev->last_rx = jiffies;
+
+next_desc:
+		rx_desc->wb.middle.status_error &= cpu_to_le32(~0xFF);
+		buffer_info->skb = NULL;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (unlikely(cleaned_count >= E1000_RX_BUFFER_WRITE)) {
+			adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		/* use prefetched values */
+		rx_desc = next_rxd;
+		buffer_info = next_buffer;
+
+		staterr = le32_to_cpu(rx_desc->wb.middle.status_error);
+	}
+	rx_ring->next_to_clean = i;
+
+	cleaned_count = E1000_DESC_UNUSED(rx_ring);
+	if (cleaned_count)
+		adapter->alloc_rx_buf(adapter, rx_ring, cleaned_count);
+
+	adapter->total_rx_packets += total_rx_packets;
+	adapter->total_rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_bytes += total_rx_bytes;
+	adapter->net_stats.rx_packets += total_rx_packets;
+	return cleaned;
+#endif
+}
+
+#ifdef CONFIG_E1000_NAPI
+/**
+ * e1000_alloc_jumbo_rx_buffers - Replace used jumbo receive buffers
+ * @adapter: address of board private structure
+ * @rx_ring: pointer to receive ring structure
+ * @cleaned_count: number of buffers to allocate this pass
+ **/
+static void e1000_alloc_jumbo_rx_buffers(struct e1000_adapter *adapter,
+					 struct e1000_rx_ring *rx_ring,
+					 int cleaned_count)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc;
+	struct e1000_rx_buffer *buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+	unsigned int bufsz = 256 -
+			     16 /*for skb_reserve */ -
+			     NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		skb = buffer_info->skb;
+		if (skb) {
+			skb_trim(skb, 0);
+			goto check_page;
+		}
+
+		skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+			struct sk_buff *oldskb = skb;
+			DPRINTK(PROBE, ERR, "skb align check failed: %u bytes "
+					     "at %p\n", bufsz, skb->data);
+			/* Try again, without freeing the previous */
+			skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+			/* Failed allocation, critical failure */
+			if (!skb) {
+				kfree_rtskb(oldskb);
+				adapter->alloc_rx_buff_failed++;
+				break;
+			}
+
+			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+				/* give up */
+				kfree_rtskb(skb);
+				kfree_rtskb(oldskb);
+				adapter->alloc_rx_buff_failed++;
+				break; /* while !buffer_info->skb */
+			}
+
+			/* Use new allocation */
+			kfree_rtskb(oldskb);
+		}
+		/* Make buffer alignment 2 beyond a 16 byte boundary
+		 * this will result in a 16 byte aligned IP header after
+		 * the 14 byte MAC header is removed
+		 */
+		skb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+check_page:
+		/* allocate a new page if necessary */
+		if (!buffer_info->page) {
+			buffer_info->page = alloc_page(GFP_ATOMIC);
+			if (unlikely(!buffer_info->page)) {
+				adapter->alloc_rx_buff_failed++;
+				break;
+			}
+		}
+
+		if (!buffer_info->dma)
+			buffer_info->dma = dma_map_page(&pdev->dev,
+							buffer_info->page, 0,
+							PAGE_SIZE,
+							DMA_FROM_DEVICE);
+
+		rx_desc = E1000_RX_DESC(*rx_ring, i);
+		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(++i == rx_ring->count))
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+	}
+}
+#endif /* NAPI */
+
+/**
+ * e1000_alloc_rx_buffers - Replace used receive buffers; legacy & extended
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers(struct e1000_adapter *adapter,
+				   struct e1000_rx_ring *rx_ring,
+				   int cleaned_count)
+{
+	struct net_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_rx_desc *rx_desc;
+	struct e1000_rx_buffer *buffer_info;
+	struct sk_buff *skb;
+	unsigned int i;
+	unsigned int bufsz = adapter->rx_buffer_len + NET_IP_ALIGN;
+
+	i = rx_ring->next_to_use;
+	buffer_info = &rx_ring->buffer_info[i];
+
+	while (cleaned_count--) {
+		skb = buffer_info->skb;
+		if (skb) {
+			rtskb_trim(skb, 0);
+			goto map_skb;
+		}
+
+		skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+		if (unlikely(!skb)) {
+			/* Better luck next round */
+			adapter->alloc_rx_buff_failed++;
+			break;
+		}
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+			struct sk_buff *oldskb = skb;
+			DPRINTK(RX_ERR, ERR, "skb align check failed: %u bytes "
+					     "at %p\n", bufsz, skb->data);
+			/* Try again, without freeing the previous */
+			skb = rtnetdev_alloc_rtskb(netdev, bufsz);
+			/* Failed allocation, critical failure */
+			if (!skb) {
+				kfree_rtskb(oldskb);
+				adapter->alloc_rx_buff_failed++;
+				break;
+			}
+
+			if (!e1000_check_64k_bound(adapter, skb->data, bufsz)) {
+				/* give up */
+				kfree_rtskb(skb);
+				kfree_rtskb(oldskb);
+				adapter->alloc_rx_buff_failed++;
+				break; /* while !buffer_info->skb */
+			}
+
+			/* Use new allocation */
+			kfree_rtskb(oldskb);
+		}
+		/* Make buffer alignment 2 beyond a 16 byte boundary
+		 * this will result in a 16 byte aligned IP header after
+		 * the 14 byte MAC header is removed
+		 */
+		skb_reserve(skb, NET_IP_ALIGN);
+
+		buffer_info->skb = skb;
+map_skb:
+		buffer_info->dma = dma_map_single(&pdev->dev,
+						  skb->data,
+						  adapter->rx_buffer_len,
+						  DMA_FROM_DEVICE);
+
+		/* Fix for errata 23, can't cross 64kB boundary */
+		if (!e1000_check_64k_bound(adapter,
+					(void *)(unsigned long)buffer_info->dma,
+					adapter->rx_buffer_len)) {
+			DPRINTK(RX_ERR, ERR,
+				"dma align check failed: %u bytes at %p\n",
+				adapter->rx_buffer_len,
+				(void *)(unsigned long)buffer_info->dma);
+			kfree_rtskb(skb);
+			buffer_info->skb = NULL;
+
+			dma_unmap_single(&pdev->dev, buffer_info->dma,
+					 adapter->rx_buffer_len,
+					 DMA_FROM_DEVICE);
+			buffer_info->dma = 0;
+
+			adapter->alloc_rx_buff_failed++;
+			break; /* while !buffer_info->skb */
+		}
+		rx_desc = E1000_RX_DESC(*rx_ring, i);
+		rx_desc->buffer_addr = cpu_to_le64(buffer_info->dma);
+
+		if (unlikely(++i == rx_ring->count))
+			i = 0;
+		buffer_info = &rx_ring->buffer_info[i];
+	}
+
+	if (likely(rx_ring->next_to_use != i)) {
+		rx_ring->next_to_use = i;
+		if (unlikely(i-- == 0))
+			i = (rx_ring->count - 1);
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64). */
+		wmb();
+		writel(i, adapter->hw.hw_addr + rx_ring->rdt);
+	}
+}
+
+/**
+ * e1000_alloc_rx_buffers_ps - Replace used receive buffers; packet split
+ * @adapter: address of board private structure
+ **/
+static void e1000_alloc_rx_buffers_ps(struct e1000_adapter *adapter,
+				      struct e1000_rx_ring *rx_ring,
+				      int cleaned_count)
+{
+}
+
+/**
+ * e1000_smartspeed - Workaround for SmartSpeed on 82541 and 82547 controllers.
+ * @adapter:
+ **/
+static void e1000_smartspeed(struct e1000_adapter *adapter)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+	struct e1000_phy_info *phy = &adapter->hw.phy;
+	u16 phy_status;
+	u16 phy_ctrl;
+
+	if ((phy->type != e1000_phy_igp) || !mac->autoneg ||
+	    !(phy->autoneg_advertised & ADVERTISE_1000_FULL))
+		return;
+
+	if (adapter->smartspeed == 0) {
+		/* If Master/Slave config fault is asserted twice,
+		 * we assume back-to-back */
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_STATUS, &phy_status);
+		if (!(phy_status & SR_1000T_MS_CONFIG_FAULT)) return;
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+		if (phy_ctrl & CR_1000T_MS_ENABLE) {
+			phy_ctrl &= ~CR_1000T_MS_ENABLE;
+			e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL,
+					    phy_ctrl);
+			adapter->smartspeed++;
+			if (!e1000_phy_setup_autoneg(&adapter->hw) &&
+			   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL,
+					       &phy_ctrl)) {
+				phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+					     MII_CR_RESTART_AUTO_NEG);
+				e1000_write_phy_reg(&adapter->hw, PHY_CONTROL,
+						    phy_ctrl);
+			}
+		}
+		return;
+	} else if (adapter->smartspeed == E1000_SMARTSPEED_DOWNSHIFT) {
+		/* If still no link, perhaps using 2/3 pair cable */
+		e1000_read_phy_reg(&adapter->hw, PHY_1000T_CTRL, &phy_ctrl);
+		phy_ctrl |= CR_1000T_MS_ENABLE;
+		e1000_write_phy_reg(&adapter->hw, PHY_1000T_CTRL, phy_ctrl);
+		if (!e1000_phy_setup_autoneg(&adapter->hw) &&
+		   !e1000_read_phy_reg(&adapter->hw, PHY_CONTROL, &phy_ctrl)) {
+			phy_ctrl |= (MII_CR_AUTO_NEG_EN |
+				     MII_CR_RESTART_AUTO_NEG);
+			e1000_write_phy_reg(&adapter->hw, PHY_CONTROL, phy_ctrl);
+		}
+	}
+	/* Restart process after E1000_SMARTSPEED_MAX iterations */
+	if (adapter->smartspeed++ == E1000_SMARTSPEED_MAX)
+		adapter->smartspeed = 0;
+}
+
+/**
+ * e1000_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+#if 0
+static int e1000_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
+{
+	switch (cmd) {
+#ifdef SIOCGMIIPHY
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return e1000_mii_ioctl(netdev, ifr, cmd);
+#endif
+#ifdef ETHTOOL_OPS_COMPAT
+	case SIOCETHTOOL:
+		return ethtool_ioctl(ifr);
+#endif
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+#ifdef SIOCGMIIPHY
+/**
+ * e1000_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int e1000_mii_ioctl(struct net_device *netdev, struct ifreq *ifr,
+			   int cmd)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+
+	if (adapter->hw.phy.media_type != e1000_media_type_copper)
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = adapter->hw.phy.addr;
+		break;
+	case SIOCGMIIREG:
+		if (!capable(CAP_NET_ADMIN))
+			return -EPERM;
+		switch (data->reg_num & 0x1F) {
+		case MII_BMCR:
+			data->val_out = adapter->phy_regs.bmcr;
+			break;
+		case MII_BMSR:
+			data->val_out = adapter->phy_regs.bmsr;
+			break;
+		case MII_PHYSID1:
+			data->val_out = (adapter->hw.phy.id >> 16);
+			break;
+		case MII_PHYSID2:
+			data->val_out = (adapter->hw.phy.id & 0xFFFF);
+			break;
+		case MII_ADVERTISE:
+			data->val_out = adapter->phy_regs.advertise;
+			break;
+		case MII_LPA:
+			data->val_out = adapter->phy_regs.lpa;
+			break;
+		case MII_EXPANSION:
+			data->val_out = adapter->phy_regs.expansion;
+			break;
+		case MII_CTRL1000:
+			data->val_out = adapter->phy_regs.ctrl1000;
+			break;
+		case MII_STAT1000:
+			data->val_out = adapter->phy_regs.stat1000;
+			break;
+		case MII_ESTATUS:
+			data->val_out = adapter->phy_regs.estatus;
+			break;
+		default:
+			return -EIO;
+		}
+		break;
+	case SIOCSMIIREG:
+	default:
+		return -EOPNOTSUPP;
+	}
+	return E1000_SUCCESS;
+}
+#endif
+#endif
+
+void e1000_pci_set_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+	int ret_val = pci_set_mwi(adapter->pdev);
+
+	if (ret_val)
+		DPRINTK(PROBE, ERR, "Error in setting MWI\n");
+}
+
+void e1000_pci_clear_mwi(struct e1000_hw *hw)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_clear_mwi(adapter->pdev);
+}
+
+void e1000_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void e1000_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+
+	pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+s32 e1000_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct e1000_adapter *adapter = hw->back;
+	u16 cap_offset;
+
+	cap_offset = pci_find_capability(adapter->pdev, PCI_CAP_ID_EXP);
+	if (!cap_offset)
+		return -E1000_ERR_CONFIG;
+
+	pci_read_config_word(adapter->pdev, cap_offset + reg, value);
+
+	return E1000_SUCCESS;
+}
+
+#ifdef NETIF_F_HW_VLAN_TX
+static void e1000_vlan_rx_register(struct net_device *netdev,
+				   struct vlan_group *grp)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u32 ctrl, rctl;
+
+	e1000_irq_disable(adapter);
+	adapter->vlgrp = grp;
+
+	if (grp) {
+		/* enable VLAN tag insert/strip */
+		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl |= E1000_CTRL_VME;
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+
+		if ((adapter->hw.mac.type != e1000_ich8lan) &&
+		    (adapter->hw.mac.type != e1000_ich9lan)) {
+			/* enable VLAN receive filtering */
+			rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+			rctl |= E1000_RCTL_VFE;
+			rctl &= ~E1000_RCTL_CFIEN;
+			E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+			e1000_update_mng_vlan(adapter);
+		}
+	} else {
+		/* disable VLAN tag insert/strip */
+		ctrl = E1000_READ_REG(&adapter->hw, E1000_CTRL);
+		ctrl &= ~E1000_CTRL_VME;
+		E1000_WRITE_REG(&adapter->hw, E1000_CTRL, ctrl);
+
+		if ((adapter->hw.mac.type != e1000_ich8lan) &&
+		    (adapter->hw.mac.type != e1000_ich9lan)) {
+			/* disable VLAN filtering */
+			rctl = E1000_READ_REG(&adapter->hw, E1000_RCTL);
+			rctl &= ~E1000_RCTL_VFE;
+			E1000_WRITE_REG(&adapter->hw, E1000_RCTL, rctl);
+			if (adapter->mng_vlan_id !=
+			    (u16)E1000_MNG_VLAN_NONE) {
+				e1000_vlan_rx_kill_vid(netdev,
+						       adapter->mng_vlan_id);
+				adapter->mng_vlan_id = E1000_MNG_VLAN_NONE;
+			}
+		}
+	}
+
+	e1000_irq_enable(adapter);
+}
+
+static void e1000_vlan_rx_add_vid(struct net_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u32 vfta, index;
+	struct net_device *v_netdev;
+
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id))
+		return;
+	/* add VID to filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
+	vfta |= (1 << (vid & 0x1F));
+	e1000_write_vfta(&adapter->hw, index, vfta);
+	/* Copy feature flags from netdev to the vlan netdev for this vid.
+	 * This allows things like TSO to bubble down to our vlan device.
+	 */
+	v_netdev = vlan_group_get_device(adapter->vlgrp, vid);
+	v_netdev->features |= adapter->netdev->features;
+	vlan_group_set_device(adapter->vlgrp, vid, v_netdev);
+}
+
+static void e1000_vlan_rx_kill_vid(struct net_device *netdev, u16 vid)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u32 vfta, index;
+
+	e1000_irq_disable(adapter);
+	vlan_group_set_device(adapter->vlgrp, vid, NULL);
+	e1000_irq_enable(adapter);
+
+	if ((adapter->hw.mng_cookie.status &
+	     E1000_MNG_DHCP_COOKIE_STATUS_VLAN) &&
+	    (vid == adapter->mng_vlan_id)) {
+		/* release control to f/w */
+		e1000_release_hw_control(adapter);
+		return;
+	}
+
+	/* remove VID from filter table */
+	index = (vid >> 5) & 0x7F;
+	vfta = E1000_READ_REG_ARRAY(&adapter->hw, E1000_VFTA, index);
+	vfta &= ~(1 << (vid & 0x1F));
+	e1000_write_vfta(&adapter->hw, index, vfta);
+}
+
+static void e1000_restore_vlan(struct e1000_adapter *adapter)
+{
+	e1000_vlan_rx_register(adapter->netdev, adapter->vlgrp);
+
+	if (adapter->vlgrp) {
+		u16 vid;
+		for (vid = 0; vid < VLAN_N_VID; vid++) {
+			if (!vlan_group_get_device(adapter->vlgrp, vid))
+				continue;
+			e1000_vlan_rx_add_vid(adapter->netdev, vid);
+		}
+	}
+}
+#endif
+
+int e1000_set_spd_dplx(struct e1000_adapter *adapter, u16 spddplx)
+{
+	struct e1000_mac_info *mac = &adapter->hw.mac;
+
+	mac->autoneg = 0;
+
+	/* Fiber NICs only allow 1000 gbps Full duplex */
+	if ((adapter->hw.phy.media_type == e1000_media_type_fiber) &&
+		spddplx != (SPEED_1000 + DUPLEX_FULL)) {
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+
+	switch (spddplx) {
+	case SPEED_10 + DUPLEX_HALF:
+		mac->forced_speed_duplex = ADVERTISE_10_HALF;
+		break;
+	case SPEED_10 + DUPLEX_FULL:
+		mac->forced_speed_duplex = ADVERTISE_10_FULL;
+		break;
+	case SPEED_100 + DUPLEX_HALF:
+		mac->forced_speed_duplex = ADVERTISE_100_HALF;
+		break;
+	case SPEED_100 + DUPLEX_FULL:
+		mac->forced_speed_duplex = ADVERTISE_100_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_FULL:
+		mac->autoneg = 1;
+		adapter->hw.phy.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	case SPEED_1000 + DUPLEX_HALF: /* not supported */
+	default:
+		DPRINTK(PROBE, ERR, "Unsupported Speed/Duplex configuration\n");
+		return -EINVAL;
+	}
+	return 0;
+}
+
+#ifdef USE_REBOOT_NOTIFIER
+/* only want to do this for 2.4 kernels? */
+static int e1000_notify_reboot(struct notifier_block *nb,
+			       unsigned long event, void *p)
+{
+	struct pci_dev *pdev = NULL;
+
+	switch (event) {
+	case SYS_DOWN:
+	case SYS_HALT:
+	case SYS_POWER_OFF:
+		while ((pdev = pci_find_device(PCI_ANY_ID, PCI_ANY_ID, pdev))) {
+			if (pci_dev_driver(pdev) == &e1000_driver)
+				e1000_suspend(pdev, PMSG_SUSPEND);
+		}
+	}
+	return NOTIFY_DONE;
+}
+#endif
+
+#ifdef CONFIG_PM
+static int e1000_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	if ((err = pci_enable_device(pdev))) {
+		printk(KERN_ERR "e1000: Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (rtnetif_running(netdev) && (err = e1000_request_irq(adapter)))
+		return err;
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper) {
+		e1000_power_up_phy(&adapter->hw);
+		e1000_setup_link(&adapter->hw);
+	}
+	e1000_reset(adapter);
+	E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0);
+
+	e1000_init_manageability(adapter);
+
+	if (rtnetif_running(netdev))
+		e1000_up(adapter);
+
+	netif_device_attach(netdev);
+
+	/* If the controller is 82573 or ICHx and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (((adapter->hw.mac.type != e1000_82573) &&
+	     (adapter->hw.mac.type != e1000_ich8lan) &&
+	     (adapter->hw.mac.type != e1000_ich9lan)) ||
+	    !e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+	return 0;
+}
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/*
+ * Polling 'interrupt' - used by things like netconsole to send skbs
+ * without having to re-enable interrupts. It's not called while
+ * the interrupt routine is executing.
+ */
+static void e1000_netpoll(struct net_device *netdev)
+{
+	struct e1000_adapter *adapter = netdev_priv(netdev);
+	int i;
+
+	disable_irq(adapter->pdev->irq);
+	e1000_intr(adapter->pdev->irq, netdev);
+
+	for (i = 0; i < adapter->num_tx_queues ; i++ )
+		e1000_clean_tx_irq(adapter, &adapter->tx_ring[i]);
+#ifndef CONFIG_E1000_NAPI
+	for (i = 0; i < adapter->num_rx_queues ; i++ )
+		adapter->clean_rx(adapter, &adapter->rx_ring[i], NULL);
+#endif
+	enable_irq(adapter->pdev->irq);
+}
+#endif
+
+#ifdef HAVE_PCI_ERS
+/**
+ * e1000_io_error_detected - called when PCI error is detected
+ * @pdev: Pointer to PCI device
+ * @state: The current pci connection state
+ *
+ * This function is called after a PCI bus error affecting
+ * this device has been detected.
+ */
+static pci_ers_result_t e1000_io_error_detected(struct pci_dev *pdev,
+						pci_channel_state_t state)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	netif_device_detach(netdev);
+
+	if (rtnetif_running(netdev))
+		e1000_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ * e1000_io_slot_reset - called after the pci bus has been reset.
+ * @pdev: Pointer to PCI device
+ *
+ * Restart the card from scratch, as if from a cold-boot. Implementation
+ * resembles the first-half of the e1000_resume routine.
+ */
+static pci_ers_result_t e1000_io_slot_reset(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	if (pci_enable_device(pdev)) {
+		printk(KERN_ERR "e1000: Cannot re-enable PCI device after reset.\n");
+		return PCI_ERS_RESULT_DISCONNECT;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	e1000_reset(adapter);
+	E1000_WRITE_REG(&adapter->hw, E1000_WUS, ~0);
+
+	return PCI_ERS_RESULT_RECOVERED;
+}
+
+/**
+ * e1000_io_resume - called when traffic can start flowing again.
+ * @pdev: Pointer to PCI device
+ *
+ * This callback is called when the error recovery driver tells us that
+ * its OK to resume normal operation. Implementation resembles the
+ * second-half of the e1000_resume routine.
+ */
+static void e1000_io_resume(struct pci_dev *pdev)
+{
+	struct net_device *netdev = pci_get_drvdata(pdev);
+	struct e1000_adapter *adapter = netdev->priv;
+
+	e1000_init_manageability(adapter);
+
+	if (rtnetif_running(netdev)) {
+		if (e1000_up(adapter)) {
+			printk("e1000: can't bring device back up after reset\n");
+			return;
+		}
+	}
+
+	netif_device_attach(netdev);
+
+	/* If the controller is 82573 or ICHx and f/w is AMT, do not set
+	 * DRV_LOAD until the interface is up.  For all other cases,
+	 * let the f/w know that the h/w is now under the control
+	 * of the driver. */
+	if (((adapter->hw.mac.type != e1000_82573) &&
+	     (adapter->hw.mac.type != e1000_ich8lan) &&
+	     (adapter->hw.mac.type != e1000_ich9lan)) ||
+	    !e1000_check_mng_mode(&adapter->hw))
+		e1000_get_hw_control(adapter);
+
+}
+#endif /* HAVE_PCI_ERS */
+
+s32 e1000_alloc_zeroed_dev_spec_struct(struct e1000_hw *hw, u32 size)
+{
+	hw->dev_spec = kmalloc(size, GFP_KERNEL);
+
+	if (!hw->dev_spec)
+		return -ENOMEM;
+
+	memset(hw->dev_spec, 0, size);
+
+	return E1000_SUCCESS;
+}
+
+void e1000_free_dev_spec_struct(struct e1000_hw *hw)
+{
+	if (!hw->dev_spec)
+		return;
+
+	kfree(hw->dev_spec);
+}
+
+/* vim: set ts=4: */
+/* e1000_main.c */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c
new file mode 100644
index 0000000..4145dbd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.c
@@ -0,0 +1,384 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_manage.h"
+
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length);
+
+/**
+ *  e1000_calculate_checksum - Calculate checksum for buffer
+ *  @buffer: pointer to EEPROM
+ *  @length: size of EEPROM to calculate a checksum for
+ *
+ *  Calculates the checksum for some buffer on a specified length.  The
+ *  checksum calculated is returned.
+ **/
+static u8 e1000_calculate_checksum(u8 *buffer, u32 length)
+{
+	u32 i;
+	u8  sum = 0;
+
+	DEBUGFUNC("e1000_calculate_checksum");
+
+	if (!buffer)
+		return 0;
+
+	for (i = 0; i < length; i++)
+		sum += buffer[i];
+
+	return (u8) (0 - sum);
+}
+
+/**
+ *  e1000_mng_enable_host_if_generic - Checks host interface is enabled
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns E1000_success upon success, else E1000_ERR_HOST_INTERFACE_COMMAND
+ *
+ *  This function checks whether the HOST IF is enabled for command operaton
+ *  and also checks whether the previous command is completed.  It busy waits
+ *  in case of previous command is not completed.
+ **/
+s32 e1000_mng_enable_host_if_generic(struct e1000_hw * hw)
+{
+	u32 hicr;
+	s32 ret_val = E1000_SUCCESS;
+	u8  i;
+
+	DEBUGFUNC("e1000_mng_enable_host_if_generic");
+
+	/* Check that the host interface is enabled. */
+	hicr = E1000_READ_REG(hw, E1000_HICR);
+	if ((hicr & E1000_HICR_EN) == 0) {
+		DEBUGOUT("E1000_HOST_EN bit disabled.\n");
+		ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+		goto out;
+	}
+	/* check the previous command is completed */
+	for (i = 0; i < E1000_MNG_DHCP_COMMAND_TIMEOUT; i++) {
+		hicr = E1000_READ_REG(hw, E1000_HICR);
+		if (!(hicr & E1000_HICR_C))
+			break;
+		msec_delay_irq(1);
+	}
+
+	if (i == E1000_MNG_DHCP_COMMAND_TIMEOUT) {
+		DEBUGOUT("Previous command timeout failed .\n");
+		ret_val = -E1000_ERR_HOST_INTERFACE_COMMAND;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_mng_mode_generic - Generic check managament mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the firmware semaphore register and returns true (>0) if
+ *  manageability is enabled, else false (0).
+ **/
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw)
+{
+	u32 fwsm;
+
+	DEBUGFUNC("e1000_check_mng_mode_generic");
+
+	fwsm = E1000_READ_REG(hw, E1000_FWSM);
+
+	return ((fwsm & E1000_FWSM_MODE_MASK) ==
+	        (E1000_MNG_IAMT_MODE << E1000_FWSM_MODE_SHIFT));
+}
+
+/**
+ *  e1000_enable_tx_pkt_filtering_generic - Enable packet filtering on TX
+ *  @hw: pointer to the HW structure
+ *
+ *  Enables packet filtering on transmit packets if manageability is enabled
+ *  and host interface is enabled.
+ **/
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw)
+{
+	struct e1000_host_mng_dhcp_cookie *hdr = &hw->mng_cookie;
+	u32 *buffer = (u32 *)&hw->mng_cookie;
+	u32 offset;
+	s32 ret_val, hdr_csum, csum;
+	u8 i, len;
+	bool tx_filter = TRUE;
+
+	DEBUGFUNC("e1000_enable_tx_pkt_filtering_generic");
+
+	/* No manageability, no filtering */
+	if (!e1000_check_mng_mode(hw)) {
+		tx_filter = FALSE;
+		goto out;
+	}
+
+	/*
+	 * If we can't read from the host interface for whatever
+	 * reason, disable filtering.
+	 */
+	ret_val = e1000_mng_enable_host_if(hw);
+	if (ret_val != E1000_SUCCESS) {
+		tx_filter = FALSE;
+		goto out;
+	}
+
+	/* Read in the header.  Length and offset are in dwords. */
+	len    = E1000_MNG_DHCP_COOKIE_LENGTH >> 2;
+	offset = E1000_MNG_DHCP_COOKIE_OFFSET >> 2;
+	for (i = 0; i < len; i++) {
+		*(buffer + i) = E1000_READ_REG_ARRAY_DWORD(hw,
+		                                           E1000_HOST_IF,
+		                                           offset + i);
+	}
+	hdr_csum = hdr->checksum;
+	hdr->checksum = 0;
+	csum = e1000_calculate_checksum((u8 *)hdr,
+	                                E1000_MNG_DHCP_COOKIE_LENGTH);
+	/*
+	 * If either the checksums or signature don't match, then
+	 * the cookie area isn't considered valid, in which case we
+	 * take the safe route of assuming Tx filtering is enabled.
+	 */
+	if (hdr_csum != csum)
+		goto out;
+	if (hdr->signature != E1000_IAMT_SIGNATURE)
+		goto out;
+
+	/* Cookie area is valid, make the final check for filtering. */
+	if (!(hdr->status & E1000_MNG_DHCP_COOKIE_STATUS_PARSING))
+		tx_filter = FALSE;
+
+out:
+	hw->mac.tx_pkt_filtering = tx_filter;
+	return tx_filter;
+}
+
+/**
+ *  e1000_mng_write_dhcp_info_generic - Writes DHCP info to host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface
+ *  @length: size of the buffer
+ *
+ *  Writes the DHCP information to the host interface.
+ **/
+s32 e1000_mng_write_dhcp_info_generic(struct e1000_hw * hw, u8 *buffer,
+                                      u16 length)
+{
+	struct e1000_host_mng_command_header hdr;
+	s32 ret_val;
+	u32 hicr;
+
+	DEBUGFUNC("e1000_mng_write_dhcp_info_generic");
+
+	hdr.command_id = E1000_MNG_DHCP_TX_PAYLOAD_CMD;
+	hdr.command_length = length;
+	hdr.reserved1 = 0;
+	hdr.reserved2 = 0;
+	hdr.checksum = 0;
+
+	/* Enable the host interface */
+	ret_val = e1000_mng_enable_host_if(hw);
+	if (ret_val)
+		goto out;
+
+	/* Populate the host interface with the contents of "buffer". */
+	ret_val = e1000_mng_host_if_write(hw, buffer, length,
+	                                  sizeof(hdr), &(hdr.checksum));
+	if (ret_val)
+		goto out;
+
+	/* Write the manageability command header */
+	ret_val = e1000_mng_write_cmd_header(hw, &hdr);
+	if (ret_val)
+		goto out;
+
+	/* Tell the ARC a new command is pending. */
+	hicr = E1000_READ_REG(hw, E1000_HICR);
+	E1000_WRITE_REG(hw, E1000_HICR, hicr | E1000_HICR_C);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_mng_write_cmd_header_generic - Writes manageability command header
+ *  @hw: pointer to the HW structure
+ *  @hdr: pointer to the host interface command header
+ *
+ *  Writes the command header after does the checksum calculation.
+ **/
+s32 e1000_mng_write_cmd_header_generic(struct e1000_hw * hw,
+                                    struct e1000_host_mng_command_header * hdr)
+{
+	u16 i, length = sizeof(struct e1000_host_mng_command_header);
+
+	DEBUGFUNC("e1000_mng_write_cmd_header_generic");
+
+	/* Write the whole command header structure with new checksum. */
+
+	hdr->checksum = e1000_calculate_checksum((u8 *)hdr, length);
+
+	length >>= 2;
+	/* Write the relevant command block into the ram area. */
+	for (i = 0; i < length; i++) {
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, i,
+		                            *((u32 *) hdr + i));
+		E1000_WRITE_FLUSH(hw);
+	}
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_mng_host_if_write_generic - Write to the manageability host interface
+ *  @hw: pointer to the HW structure
+ *  @buffer: pointer to the host interface buffer
+ *  @length: size of the buffer
+ *  @offset: location in the buffer to write to
+ *  @sum: sum of the data (not checksum)
+ *
+ *  This function writes the buffer content at the offset given on the host if.
+ *  It also does alignment considerations to do the writes in most efficient
+ *  way.  Also fills up the sum of the buffer in *buffer parameter.
+ **/
+s32 e1000_mng_host_if_write_generic(struct e1000_hw * hw, u8 *buffer,
+                                    u16 length, u16 offset, u8 *sum)
+{
+	u8 *tmp;
+	u8 *bufptr = buffer;
+	u32 data = 0;
+	s32 ret_val = E1000_SUCCESS;
+	u16 remaining, i, j, prev_bytes;
+
+	DEBUGFUNC("e1000_mng_host_if_write_generic");
+
+	/* sum = only sum of the data and it is not checksum */
+
+	if (length == 0 || offset + length > E1000_HI_MAX_MNG_DATA_LENGTH) {
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	tmp = (u8 *)&data;
+	prev_bytes = offset & 0x3;
+	offset >>= 2;
+
+	if (prev_bytes) {
+		data = E1000_READ_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset);
+		for (j = prev_bytes; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset, data);
+		length -= j - prev_bytes;
+		offset++;
+	}
+
+	remaining = length & 0x3;
+	length -= remaining;
+
+	/* Calculate length in DWORDs */
+	length >>= 2;
+
+	/*
+	 * The device driver writes the relevant command block into the
+	 * ram area.
+	 */
+	for (i = 0; i < length; i++) {
+		for (j = 0; j < sizeof(u32); j++) {
+			*(tmp + j) = *bufptr++;
+			*sum += *(tmp + j);
+		}
+
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
+	}
+	if (remaining) {
+		for (j = 0; j < sizeof(u32); j++) {
+			if (j < remaining)
+				*(tmp + j) = *bufptr++;
+			else
+				*(tmp + j) = 0;
+
+			*sum += *(tmp + j);
+		}
+		E1000_WRITE_REG_ARRAY_DWORD(hw, E1000_HOST_IF, offset + i, data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_enable_mng_pass_thru - Enable processing of ARP's
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies the hardware needs to allow ARPs to be processed by the host.
+ **/
+bool e1000_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+	u32 manc;
+	u32 fwsm, factps;
+	bool ret_val = FALSE;
+
+	DEBUGFUNC("e1000_enable_mng_pass_thru");
+
+	if (!hw->mac.asf_firmware_present)
+		goto out;
+
+	manc = E1000_READ_REG(hw, E1000_MANC);
+
+	if (!(manc & E1000_MANC_RCV_TCO_EN) ||
+	    !(manc & E1000_MANC_EN_MAC_ADDR_FILTER))
+		goto out;
+
+	if (hw->mac.arc_subsystem_valid) {
+		fwsm = E1000_READ_REG(hw, E1000_FWSM);
+		factps = E1000_READ_REG(hw, E1000_FACTPS);
+
+		if (!(factps & E1000_FACTPS_MNGCG) &&
+		    ((fwsm & E1000_FWSM_MODE_MASK) ==
+		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+			ret_val = TRUE;
+			goto out;
+		}
+	} else {
+		if ((manc & E1000_MANC_SMBUS_EN) &&
+		    !(manc & E1000_MANC_ASF_EN)) {
+			ret_val = TRUE;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h
new file mode 100644
index 0000000..b11b865
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_manage.h
@@ -0,0 +1,81 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_MANAGE_H_
+#define _E1000_MANAGE_H_
+
+bool e1000_check_mng_mode_generic(struct e1000_hw *hw);
+bool e1000_enable_tx_pkt_filtering_generic(struct e1000_hw *hw);
+s32  e1000_mng_enable_host_if_generic(struct e1000_hw *hw);
+s32  e1000_mng_host_if_write_generic(struct e1000_hw *hw, u8 *buffer,
+                                     u16 length, u16 offset, u8 *sum);
+s32  e1000_mng_write_cmd_header_generic(struct e1000_hw *hw,
+                                    struct e1000_host_mng_command_header *hdr);
+s32  e1000_mng_write_dhcp_info_generic(struct e1000_hw *hw,
+                                       u8 *buffer, u16 length);
+
+typedef enum {
+	e1000_mng_mode_none = 0,
+	e1000_mng_mode_asf,
+	e1000_mng_mode_pt,
+	e1000_mng_mode_ipmi,
+	e1000_mng_mode_host_if_only
+} e1000_mng_mode;
+
+#define E1000_FACTPS_MNGCG    0x20000000
+
+#define E1000_FWSM_MODE_MASK  0xE
+#define E1000_FWSM_MODE_SHIFT 1
+
+#define E1000_MNG_IAMT_MODE                  0x3
+#define E1000_MNG_DHCP_COOKIE_LENGTH         0x10
+#define E1000_MNG_DHCP_COOKIE_OFFSET         0x6F0
+#define E1000_MNG_DHCP_COMMAND_TIMEOUT       10
+#define E1000_MNG_DHCP_TX_PAYLOAD_CMD        64
+#define E1000_MNG_DHCP_COOKIE_STATUS_PARSING 0x1
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN    0x2
+
+#define E1000_VFTA_ENTRY_SHIFT               5
+#define E1000_VFTA_ENTRY_MASK                0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
+
+#define E1000_HI_MAX_BLOCK_BYTE_LENGTH       1792 /* Num of bytes in range */
+#define E1000_HI_MAX_BLOCK_DWORD_LENGTH      448 /* Num of dwords in range */
+#define E1000_HI_COMMAND_TIMEOUT             500 /* Process HI command limit */
+
+#define E1000_HICR_EN              0x01  /* Enable bit - RO */
+/* Driver sets this bit when done to put command in RAM */
+#define E1000_HICR_C               0x02
+#define E1000_HICR_SV              0x04  /* Status Validity */
+#define E1000_HICR_FW_RESET_ENABLE 0x40
+#define E1000_HICR_FW_RESET        0x80
+
+/* Intel(R) Active Management Technology signature */
+#define E1000_IAMT_SIGNATURE  0x544D4149
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c
new file mode 100644
index 0000000..f89d490
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.c
@@ -0,0 +1,893 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_nvm.h"
+
+/**
+ *  e1000_raise_eec_clk - Raise EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Enable/Raise the EEPROM clock bit.
+ **/
+static void e1000_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd | E1000_EECD_SK;
+	E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+	E1000_WRITE_FLUSH(hw);
+	usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_lower_eec_clk - Lower EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Clear/Lower the EEPROM clock bit.
+ **/
+static void e1000_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd & ~E1000_EECD_SK;
+	E1000_WRITE_REG(hw, E1000_EECD, *eecd);
+	E1000_WRITE_FLUSH(hw);
+	usec_delay(hw->nvm.delay_usec);
+}
+
+/**
+ *  e1000_shift_out_eec_bits - Shift data bits our to the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
+ *  "data" parameter will be shifted out to the EEPROM one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void e1000_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	u32 mask;
+
+	DEBUGFUNC("e1000_shift_out_eec_bits");
+
+	mask = 0x01 << (count - 1);
+	if (nvm->type == e1000_nvm_eeprom_microwire)
+		eecd &= ~E1000_EECD_DO;
+	else if (nvm->type == e1000_nvm_eeprom_spi)
+		eecd |= E1000_EECD_DO;
+
+	do {
+		eecd &= ~E1000_EECD_DI;
+
+		if (data & mask)
+			eecd |= E1000_EECD_DI;
+
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+
+		usec_delay(nvm->delay_usec);
+
+		e1000_raise_eec_clk(hw, &eecd);
+		e1000_lower_eec_clk(hw, &eecd);
+
+		mask >>= 1;
+	} while (mask);
+
+	eecd &= ~E1000_EECD_DI;
+	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_shift_in_eec_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @count: number of bits to shift in
+ *
+ *  In order to read a register from the EEPROM, we need to shift 'count' bits
+ *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
+ *  the EEPROM (setting the SK bit), and then reading the value of the data out
+ *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
+ *  always be clear.
+ **/
+static u16 e1000_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+	u32 eecd;
+	u32 i;
+	u16 data;
+
+	DEBUGFUNC("e1000_shift_in_eec_bits");
+
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+	data = 0;
+
+	for (i = 0; i < count; i++) {
+		data <<= 1;
+		e1000_raise_eec_clk(hw, &eecd);
+
+		eecd = E1000_READ_REG(hw, E1000_EECD);
+
+		eecd &= ~E1000_EECD_DI;
+		if (eecd & E1000_EECD_DO)
+			data |= 1;
+
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+
+	return data;
+}
+
+/**
+ *  e1000_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ *  @hw: pointer to the HW structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the EEPROM status bit for either read or write completion based
+ *  upon the value of 'ee_reg'.
+ **/
+s32 e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+	u32 attempts = 100000;
+	u32 i, reg = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+
+	DEBUGFUNC("e1000_poll_eerd_eewr_done");
+
+	for (i = 0; i < attempts; i++) {
+		if (ee_reg == E1000_NVM_POLL_READ)
+			reg = E1000_READ_REG(hw, E1000_EERD);
+		else
+			reg = E1000_READ_REG(hw, E1000_EEWR);
+
+		if (reg & E1000_NVM_RW_REG_DONE) {
+			ret_val = E1000_SUCCESS;
+			break;
+		}
+
+		usec_delay(5);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_acquire_nvm_generic - Generic request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 e1000_acquire_nvm_generic(struct e1000_hw *hw)
+{
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_acquire_nvm_generic");
+
+	E1000_WRITE_REG(hw, E1000_EECD, eecd | E1000_EECD_REQ);
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	while (timeout) {
+		if (eecd & E1000_EECD_GNT)
+			break;
+		usec_delay(5);
+		eecd = E1000_READ_REG(hw, E1000_EECD);
+		timeout--;
+	}
+
+	if (!timeout) {
+		eecd &= ~E1000_EECD_REQ;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		DEBUGOUT("Could not acquire NVM grant\n");
+		ret_val = -E1000_ERR_NVM;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  e1000_standby_nvm - Return EEPROM to standby state
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the EEPROM to a standby state.
+ **/
+static void e1000_standby_nvm(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+
+	DEBUGFUNC("e1000_standby_nvm");
+
+	if (nvm->type == e1000_nvm_eeprom_microwire) {
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		usec_delay(nvm->delay_usec);
+
+		e1000_raise_eec_clk(hw, &eecd);
+
+		/* Select EEPROM */
+		eecd |= E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		usec_delay(nvm->delay_usec);
+
+		e1000_lower_eec_clk(hw, &eecd);
+	} else if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Toggle CS to flush commands */
+		eecd |= E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		usec_delay(nvm->delay_usec);
+		eecd &= ~E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		E1000_WRITE_FLUSH(hw);
+		usec_delay(nvm->delay_usec);
+	}
+}
+
+/**
+ *  e1000_stop_nvm - Terminate EEPROM command
+ *  @hw: pointer to the HW structure
+ *
+ *  Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+void e1000_stop_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	DEBUGFUNC("e1000_stop_nvm");
+
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+		/* Pull CS high */
+		eecd |= E1000_EECD_CS;
+		e1000_lower_eec_clk(hw, &eecd);
+	} else if (hw->nvm.type == e1000_nvm_eeprom_microwire) {
+		/* CS on Microcwire is active-high */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_DI);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		e1000_raise_eec_clk(hw, &eecd);
+		e1000_lower_eec_clk(hw, &eecd);
+	}
+}
+
+/**
+ *  e1000_release_nvm_generic - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void e1000_release_nvm_generic(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	DEBUGFUNC("e1000_release_nvm_generic");
+
+	e1000_stop_nvm(hw);
+
+	eecd = E1000_READ_REG(hw, E1000_EECD);
+	eecd &= ~E1000_EECD_REQ;
+	E1000_WRITE_REG(hw, E1000_EECD, eecd);
+}
+
+/**
+ *  e1000_ready_nvm_eeprom - Prepares EEPROM for read/write
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups the EEPROM for reading and writing.
+ **/
+static s32 e1000_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = E1000_READ_REG(hw, E1000_EECD);
+	s32 ret_val = E1000_SUCCESS;
+	u16 timeout = 0;
+	u8 spi_stat_reg;
+
+	DEBUGFUNC("e1000_ready_nvm_eeprom");
+
+	if (nvm->type == e1000_nvm_eeprom_microwire) {
+		/* Clear SK and DI */
+		eecd &= ~(E1000_EECD_DI | E1000_EECD_SK);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		/* Set CS */
+		eecd |= E1000_EECD_CS;
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+	} else if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Clear SK and CS */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		E1000_WRITE_REG(hw, E1000_EECD, eecd);
+		usec_delay(1);
+		timeout = NVM_MAX_RETRY_SPI;
+
+		/*
+		 * Read "Status Register" repeatedly until the LSB is cleared.
+		 * The EEPROM will signal that the command has been completed
+		 * by clearing bit 0 of the internal status register.  If it's
+		 * not cleared within 'timeout', then error out.
+		 */
+		while (timeout) {
+			e1000_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+			                         hw->nvm.opcode_bits);
+			spi_stat_reg = (u8)e1000_shift_in_eec_bits(hw, 8);
+			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+				break;
+
+			usec_delay(5);
+			e1000_standby_nvm(hw);
+			timeout--;
+		}
+
+		if (!timeout) {
+			DEBUGOUT("SPI NVM Status error\n");
+			ret_val = -E1000_ERR_NVM;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_spi - Read EEPROM's using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i = 0;
+	s32 ret_val;
+	u16 word_in;
+	u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+	DEBUGFUNC("e1000_read_nvm_spi");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_ready_nvm_eeprom(hw);
+	if (ret_val)
+		goto release;
+
+	e1000_standby_nvm(hw);
+
+	if ((nvm->address_bits == 8) && (offset >= 128))
+		read_opcode |= NVM_A8_OPCODE_SPI;
+
+	/* Send the READ command (opcode + addr) */
+	e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+	e1000_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+	/*
+	 * Read the data.  SPI NVMs increment the address with each byte
+	 * read and will roll over if reading beyond the end.  This allows
+	 * us to read the whole NVM from any offset
+	 */
+	for (i = 0; i < words; i++) {
+		word_in = e1000_shift_in_eec_bits(hw, 16);
+		data[i] = (word_in >> 8) | (word_in << 8);
+	}
+
+release:
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_microwire - Reads EEPROM's using microwire
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+                             u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i = 0;
+	s32 ret_val;
+	u8 read_opcode = NVM_READ_OPCODE_MICROWIRE;
+
+	DEBUGFUNC("e1000_read_nvm_microwire");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_ready_nvm_eeprom(hw);
+	if (ret_val)
+		goto release;
+
+	for (i = 0; i < words; i++) {
+		/* Send the READ command (opcode + addr) */
+		e1000_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+		e1000_shift_out_eec_bits(hw, (u16)(offset + i),
+					nvm->address_bits);
+
+		/*
+		 * Read the data.  For microwire, each word requires the
+		 * overhead of setup and tear-down.
+		 */
+		data[i] = e1000_shift_in_eec_bits(hw, 16);
+		e1000_standby_nvm(hw);
+	}
+
+release:
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_nvm_eerd - Reads EEPROM using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eerd = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_nvm_eerd");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * too many words for the offset, and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+		       E1000_NVM_RW_REG_START;
+
+		E1000_WRITE_REG(hw, E1000_EERD, eerd);
+		ret_val = e1000_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+		if (ret_val)
+			break;
+
+		data[i] = (E1000_READ_REG(hw, E1000_EERD) >>
+		           E1000_NVM_RW_REG_DATA);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_spi - Write to EEPROM using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val;
+	u16 widx = 0;
+
+	DEBUGFUNC("e1000_write_nvm_spi");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	msec_delay(10);
+
+	while (widx < words) {
+		u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+		ret_val = e1000_ready_nvm_eeprom(hw);
+		if (ret_val)
+			goto release;
+
+		e1000_standby_nvm(hw);
+
+		/* Send the WRITE ENABLE command (8 bit opcode) */
+		e1000_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+		                         nvm->opcode_bits);
+
+		e1000_standby_nvm(hw);
+
+		/*
+		 * Some SPI eeproms use the 8th address bit embedded in the
+		 * opcode
+		 */
+		if ((nvm->address_bits == 8) && (offset >= 128))
+			write_opcode |= NVM_A8_OPCODE_SPI;
+
+		/* Send the Write command (8-bit opcode + addr) */
+		e1000_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+		e1000_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+		                         nvm->address_bits);
+
+		/* Loop to allow for up to whole page write of eeprom */
+		while (widx < words) {
+			u16 word_out = data[widx];
+			word_out = (word_out >> 8) | (word_out << 8);
+			e1000_shift_out_eec_bits(hw, word_out, 16);
+			widx++;
+
+			if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+				e1000_standby_nvm(hw);
+				break;
+			}
+		}
+	}
+
+	msec_delay(10);
+release:
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_nvm_microwire - Writes EEPROM using microwire
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using microwire interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+s32 e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset, u16 words,
+                              u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32  ret_val;
+	u32 eecd;
+	u16 words_written = 0;
+	u16 widx = 0;
+
+	DEBUGFUNC("e1000_write_nvm_microwire");
+
+	/*
+	 * A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		DEBUGOUT("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_nvm(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_ready_nvm_eeprom(hw);
+	if (ret_val)
+		goto release;
+
+	e1000_shift_out_eec_bits(hw, NVM_EWEN_OPCODE_MICROWIRE,
+	                         (u16)(nvm->opcode_bits + 2));
+
+	e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+	e1000_standby_nvm(hw);
+
+	while (words_written < words) {
+		e1000_shift_out_eec_bits(hw, NVM_WRITE_OPCODE_MICROWIRE,
+		                         nvm->opcode_bits);
+
+		e1000_shift_out_eec_bits(hw, (u16)(offset + words_written),
+		                         nvm->address_bits);
+
+		e1000_shift_out_eec_bits(hw, data[words_written], 16);
+
+		e1000_standby_nvm(hw);
+
+		for (widx = 0; widx < 200; widx++) {
+			eecd = E1000_READ_REG(hw, E1000_EECD);
+			if (eecd & E1000_EECD_DO)
+				break;
+			usec_delay(50);
+		}
+
+		if (widx == 200) {
+			DEBUGOUT("NVM Write did not complete\n");
+			ret_val = -E1000_ERR_NVM;
+			goto release;
+		}
+
+		e1000_standby_nvm(hw);
+
+		words_written++;
+	}
+
+	e1000_shift_out_eec_bits(hw, NVM_EWDS_OPCODE_MICROWIRE,
+	                         (u16)(nvm->opcode_bits + 2));
+
+	e1000_shift_out_eec_bits(hw, 0, (u16)(nvm->address_bits - 2));
+
+release:
+	e1000_release_nvm(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_pba_num_generic - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @pba_num: pointer to device part number
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in pba_num.
+ **/
+s32 e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num)
+{
+	s32  ret_val;
+	u16 nvm_data;
+
+	DEBUGFUNC("e1000_read_pba_num_generic");
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+	*pba_num = (u32)(nvm_data << 16);
+
+	ret_val = e1000_read_nvm(hw, NVM_PBA_OFFSET_1, 1, &nvm_data);
+	if (ret_val) {
+		DEBUGOUT("NVM Read Error\n");
+		goto out;
+	}
+	*pba_num |= nvm_data;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_mac_addr_generic - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the device MAC address from the EEPROM and stores the value.
+ *  Since devices with two ports use the same EEPROM, we increment the
+ *  last bit in the MAC address for the second port.
+ **/
+s32 e1000_read_mac_addr_generic(struct e1000_hw *hw)
+{
+	s32  ret_val = E1000_SUCCESS;
+	u16 offset, nvm_data, i;
+
+	DEBUGFUNC("e1000_read_mac_addr");
+
+	for (i = 0; i < ETH_ADDR_LEN; i += 2) {
+		offset = i >> 1;
+		ret_val = e1000_read_nvm(hw, offset, 1, &nvm_data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error\n");
+			goto out;
+		}
+		hw->mac.perm_addr[i] = (u8)(nvm_data & 0xFF);
+		hw->mac.perm_addr[i+1] = (u8)(nvm_data >> 8);
+	}
+
+	/* Flip last bit of mac address if we're on second port */
+	if (hw->bus.func == E1000_FUNC_1)
+		hw->mac.perm_addr[5] ^= 1;
+
+	for (i = 0; i < ETH_ADDR_LEN; i++)
+		hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_validate_nvm_checksum_generic - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 e1000_validate_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	DEBUGFUNC("e1000_validate_nvm_checksum_generic");
+
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+
+	if (checksum != (u16) NVM_SUM) {
+		DEBUGOUT("NVM Checksum Invalid\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_update_nvm_checksum_generic - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 e1000_update_nvm_checksum_generic(struct e1000_hw *hw)
+{
+	s32  ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	DEBUGFUNC("e1000_update_nvm_checksum");
+
+	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+		ret_val = e1000_read_nvm(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			DEBUGOUT("NVM Read Error while updating checksum.\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+	checksum = (u16) NVM_SUM - checksum;
+	ret_val = e1000_write_nvm(hw, NVM_CHECKSUM_REG, 1, &checksum);
+	if (ret_val) {
+		DEBUGOUT("NVM Write Error while updating checksum.\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_reload_nvm_generic - Reloads EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Reloads the EEPROM by setting the "Reinitialize from EEPROM" bit in the
+ *  extended control register.
+ **/
+void e1000_reload_nvm_generic(struct e1000_hw *hw)
+{
+	u32 ctrl_ext;
+
+	DEBUGFUNC("e1000_reload_nvm_generic");
+
+	usec_delay(10);
+	ctrl_ext = E1000_READ_REG(hw, E1000_CTRL_EXT);
+	ctrl_ext |= E1000_CTRL_EXT_EE_RST;
+	E1000_WRITE_REG(hw, E1000_CTRL_EXT, ctrl_ext);
+	E1000_WRITE_FLUSH(hw);
+}
+
+/* Function pointers local to this file and not intended for public use */
+
+/**
+ *  e1000_acquire_nvm - Acquire exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  For those silicon families which have implemented a NVM acquire function,
+ *  run the defined function else return success.
+ **/
+s32 e1000_acquire_nvm(struct e1000_hw *hw)
+{
+	if (hw->func.acquire_nvm)
+		return hw->func.acquire_nvm(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_release_nvm - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  For those silicon families which have implemented a NVM release function,
+ *  run the defined fucntion else return success.
+ **/
+void e1000_release_nvm(struct e1000_hw *hw)
+{
+	if (hw->func.release_nvm)
+		hw->func.release_nvm(hw);
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h
new file mode 100644
index 0000000..1803600
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_nvm.h
@@ -0,0 +1,61 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+s32  e1000_acquire_nvm_generic(struct e1000_hw *hw);
+
+s32  e1000_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg);
+s32  e1000_read_mac_addr_generic(struct e1000_hw *hw);
+s32  e1000_read_pba_num_generic(struct e1000_hw *hw, u32 *pba_num);
+s32  e1000_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  e1000_read_nvm_microwire(struct e1000_hw *hw, u16 offset,
+                              u16 words, u16 *data);
+s32  e1000_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words,
+                         u16 *data);
+s32  e1000_valid_led_default_generic(struct e1000_hw *hw, u16 *data);
+s32  e1000_validate_nvm_checksum_generic(struct e1000_hw *hw);
+s32  e1000_write_nvm_eewr(struct e1000_hw *hw, u16 offset,
+                          u16 words, u16 *data);
+s32  e1000_write_nvm_microwire(struct e1000_hw *hw, u16 offset,
+                               u16 words, u16 *data);
+s32  e1000_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words,
+                         u16 *data);
+s32  e1000_update_nvm_checksum_generic(struct e1000_hw *hw);
+void e1000_stop_nvm(struct e1000_hw *hw);
+void e1000_release_nvm_generic(struct e1000_hw *hw);
+void e1000_reload_nvm_generic(struct e1000_hw *hw);
+
+/* Function pointers */
+s32  e1000_acquire_nvm(struct e1000_hw *hw);
+void e1000_release_nvm(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE  0xDB00
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h
new file mode 100644
index 0000000..3a09cc2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_osdep.h
@@ -0,0 +1,124 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+/* glue for the OS-dependent part of e1000
+ * includes register access macros
+ */
+
+#ifndef _E1000_OSDEP_H_
+#define _E1000_OSDEP_H_
+
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/if_ether.h>
+
+#include "kcompat.h"
+
+#define usec_delay(x) udelay(x)
+#ifndef msec_delay
+#define msec_delay(x)	do { if(in_interrupt()) { \
+				/* Don't sleep in interrupt context! */ \
+				BUG(); \
+			} else { \
+				msleep(x); \
+			} } while (0)
+
+/* Some workarounds require millisecond delays and are run during interrupt
+ * context.  Most notably, when establishing link, the phy may need tweaking
+ * but cannot process phy register reads/writes faster than millisecond
+ * intervals...and we establish link due to a "link status change" interrupt.
+ */
+#define msec_delay_irq(x) mdelay(x)
+#endif
+
+#define PCI_COMMAND_REGISTER   PCI_COMMAND
+#define CMD_MEM_WRT_INVALIDATE PCI_COMMAND_INVALIDATE
+#define ETH_ADDR_LEN           ETH_ALEN
+
+#ifdef __BIG_ENDIAN
+#define E1000_BIG_ENDIAN __BIG_ENDIAN
+#endif
+
+
+#define DEBUGOUT(S)
+#define DEBUGOUT1(S, A...)
+
+#define DEBUGFUNC(F) DEBUGOUT(F "\n")
+#define DEBUGOUT2 DEBUGOUT1
+#define DEBUGOUT3 DEBUGOUT2
+#define DEBUGOUT7 DEBUGOUT3
+
+#define E1000_REGISTER(a, reg) (((a)->mac.type >= e1000_82543) \
+			       ? reg                           \
+			       : e1000_translate_register_82542(reg))
+
+#define E1000_WRITE_REG(a, reg, value) ( \
+    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg))))
+
+#define E1000_READ_REG(a, reg) (readl((a)->hw_addr + E1000_REGISTER(a, reg)))
+
+#define E1000_WRITE_REG_ARRAY(a, reg, offset, value) ( \
+    writel((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2))))
+
+#define E1000_READ_REG_ARRAY(a, reg, offset) ( \
+    readl((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 2)))
+
+#define E1000_READ_REG_ARRAY_DWORD E1000_READ_REG_ARRAY
+#define E1000_WRITE_REG_ARRAY_DWORD E1000_WRITE_REG_ARRAY
+
+#define E1000_WRITE_REG_ARRAY_WORD(a, reg, offset, value) ( \
+    writew((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1))))
+
+#define E1000_READ_REG_ARRAY_WORD(a, reg, offset) ( \
+    readw((a)->hw_addr + E1000_REGISTER(a, reg) + ((offset) << 1)))
+
+#define E1000_WRITE_REG_ARRAY_BYTE(a, reg, offset, value) ( \
+    writeb((value), ((a)->hw_addr + E1000_REGISTER(a, reg) + (offset))))
+
+#define E1000_READ_REG_ARRAY_BYTE(a, reg, offset) ( \
+    readb((a)->hw_addr + E1000_REGISTER(a, reg) + (offset)))
+
+#define E1000_WRITE_REG_IO(a, reg, offset) do { \
+    outl(reg, ((a)->io_base));                  \
+    outl(offset, ((a)->io_base + 4));      } while(0)
+
+#define E1000_WRITE_FLUSH(a) E1000_READ_REG(a, E1000_STATUS)
+
+#define E1000_WRITE_FLASH_REG(a, reg, value) ( \
+    writel((value), ((a)->flash_address + reg)))
+
+#define E1000_WRITE_FLASH_REG16(a, reg, value) ( \
+    writew((value), ((a)->flash_address + reg)))
+
+#define E1000_READ_FLASH_REG(a, reg) (readl((a)->flash_address + reg))
+
+#define E1000_READ_FLASH_REG16(a, reg) (readw((a)->flash_address + reg))
+
+#endif /* _E1000_OSDEP_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c
new file mode 100644
index 0000000..c5db2cb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_param.c
@@ -0,0 +1,894 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+
+#include <linux/netdevice.h>
+
+#include "e1000.h"
+
+/* This is the only thing that needs to be changed to adjust the
+ * maximum number of ports that the driver can manage.
+ */
+
+#define E1000_MAX_NIC 32
+
+#define OPTION_UNSET   -1
+#define OPTION_DISABLED 0
+#define OPTION_ENABLED  1
+
+/* All parameters are treated the same, as an integer array of values.
+ * This macro just reduces the need to repeat the same declaration code
+ * over and over (plus this helps to avoid typo bugs).
+ */
+
+#define E1000_PARAM_INIT { [0 ... E1000_MAX_NIC] = OPTION_UNSET }
+#ifndef module_param_array
+/* Module Parameters are always initialized to -1, so that the driver
+ * can tell the difference between no user specified value or the
+ * user asking for the default value.
+ * The true default values are loaded in when e1000_check_options is called.
+ *
+ * This is a GCC extension to ANSI C.
+ * See the item "Labeled Elements in Initializers" in the section
+ * "Extensions to the C Language Family" of the GCC documentation.
+ */
+
+#define E1000_PARAM(X, desc) \
+	static const int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	MODULE_PARM(X, "1-" __MODULE_STRING(E1000_MAX_NIC) "i"); \
+	MODULE_PARM_DESC(X, desc);
+#else
+#define E1000_PARAM(X, desc) \
+	static int X[E1000_MAX_NIC+1] = E1000_PARAM_INIT; \
+	static unsigned int num_##X = 0; \
+	module_param_array_named(X, X, int, &num_##X, 0); \
+	MODULE_PARM_DESC(X, desc);
+#endif
+
+/* Transmit Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(TxDescriptors, "Number of transmit descriptors");
+
+/* Receive Descriptor Count
+ *
+ * Valid Range: 80-256 for 82542 and 82543 gigabit ethernet controllers
+ * Valid Range: 80-4096 for 82544 and newer
+ *
+ * Default Value: 256
+ */
+E1000_PARAM(RxDescriptors, "Number of receive descriptors");
+
+/* User Specified Speed Override
+ *
+ * Valid Range: 0, 10, 100, 1000
+ *  - 0    - auto-negotiate at all supported speeds
+ *  - 10   - only link at 10 Mbps
+ *  - 100  - only link at 100 Mbps
+ *  - 1000 - only link at 1000 Mbps
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Speed, "Speed setting");
+
+/* User Specified Duplex Override
+ *
+ * Valid Range: 0-2
+ *  - 0 - auto-negotiate for duplex
+ *  - 1 - only link at half duplex
+ *  - 2 - only link at full duplex
+ *
+ * Default Value: 0
+ */
+E1000_PARAM(Duplex, "Duplex setting");
+
+/* Auto-negotiation Advertisement Override
+ *
+ * Valid Range: 0x01-0x0F, 0x20-0x2F (copper); 0x20 (fiber)
+ *
+ * The AutoNeg value is a bit mask describing which speed and duplex
+ * combinations should be advertised during auto-negotiation.
+ * The supported speed and duplex modes are listed below
+ *
+ * Bit           7     6     5      4      3     2     1      0
+ * Speed (Mbps)  N/A   N/A   1000   N/A    100   100   10     10
+ * Duplex                    Full          Full  Half  Full   Half
+ *
+ * Default Value: 0x2F (copper); 0x20 (fiber)
+ */
+E1000_PARAM(AutoNeg, "Advertised auto-negotiation setting");
+#define AUTONEG_ADV_DEFAULT  0x2F
+#define AUTONEG_ADV_MASK     0x2F
+
+/* User Specified Flow Control Override
+ *
+ * Valid Range: 0-3
+ *  - 0 - No Flow Control
+ *  - 1 - Rx only, respond to PAUSE frames but do not generate them
+ *  - 2 - Tx only, generate PAUSE frames but ignore them on receive
+ *  - 3 - Full Flow Control Support
+ *
+ * Default Value: Read flow control settings from the EEPROM
+ */
+E1000_PARAM(FlowControl, "Flow Control setting");
+#define FLOW_CONTROL_DEFAULT FLOW_CONTROL_FULL
+
+/* XsumRX - Receive Checksum Offload Enable/Disable
+ *
+ * Valid Range: 0, 1
+ *  - 0 - disables all checksum offload
+ *  - 1 - enables receive IP/TCP/UDP checksum offload
+ *        on 82543 and newer -based NICs
+ *
+ * Default Value: 1
+ */
+E1000_PARAM(XsumRX, "Disable or enable Receive Checksum offload");
+
+/* Transmit Interrupt Delay in units of 1.024 microseconds
+ *  Tx interrupt delay needs to typically be set to something non zero
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxIntDelay, "Transmit Interrupt Delay");
+#define DEFAULT_TIDV                   0
+#define MAX_TXDELAY               0xFFFF
+#define MIN_TXDELAY                    0
+
+/* Transmit Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(TxAbsIntDelay, "Transmit Absolute Interrupt Delay");
+#define DEFAULT_TADV                   0
+#define MAX_TXABSDELAY            0xFFFF
+#define MIN_TXABSDELAY                 0
+
+/* Receive Interrupt Delay in units of 1.024 microseconds
+ *   hardware will likely hang if you set this to anything but zero.
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxIntDelay, "Receive Interrupt Delay");
+#define DEFAULT_RDTR                   0
+#define MAX_RXDELAY               0xFFFF
+#define MIN_RXDELAY                    0
+
+/* Receive Absolute Interrupt Delay in units of 1.024 microseconds
+ *
+ * Valid Range: 0-65535
+ */
+E1000_PARAM(RxAbsIntDelay, "Receive Absolute Interrupt Delay");
+#define DEFAULT_RADV                   0
+#define MAX_RXABSDELAY            0xFFFF
+#define MIN_RXABSDELAY                 0
+
+/* Interrupt Throttle Rate (interrupts/sec)
+ *
+ * Valid Range: 100-100000 (0=off, 1=dynamic, 3=dynamic conservative)
+ */
+E1000_PARAM(InterruptThrottleRate, "Interrupt Throttling Rate");
+#define DEFAULT_ITR                    0
+#define MAX_ITR                   100000
+#define MIN_ITR                      100
+
+/* Enable Smart Power Down of the PHY
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 0 (disabled)
+ */
+E1000_PARAM(SmartPowerDownEnable, "Enable PHY smart power down");
+
+/* Enable Kumeran Lock Loss workaround
+ *
+ * Valid Range: 0, 1
+ *
+ * Default Value: 1 (enabled)
+ */
+E1000_PARAM(KumeranLockLoss, "Enable Kumeran lock loss workaround");
+
+
+struct e1000_option {
+	enum { enable_option, range_option, list_option } type;
+	const char *name;
+	const char *err;
+	int def;
+	union {
+		struct { /* range_option info */
+			int min;
+			int max;
+		} r;
+		struct { /* list_option info */
+			int nr;
+			struct e1000_opt_list { int i; char *str; } *p;
+		} l;
+	} arg;
+};
+
+static int e1000_validate_option(unsigned int *value,
+                                           const struct e1000_option *opt,
+                                           struct e1000_adapter *adapter)
+{
+	if (*value == OPTION_UNSET) {
+		*value = opt->def;
+		return 0;
+	}
+
+	switch (opt->type) {
+	case enable_option:
+		switch (*value) {
+		case OPTION_ENABLED:
+			DPRINTK(PROBE, INFO, "%s Enabled\n", opt->name);
+			return 0;
+		case OPTION_DISABLED:
+			DPRINTK(PROBE, INFO, "%s Disabled\n", opt->name);
+			return 0;
+		}
+		break;
+	case range_option:
+		if (*value >= opt->arg.r.min && *value <= opt->arg.r.max) {
+			DPRINTK(PROBE, INFO,
+					"%s set to %i\n", opt->name, *value);
+			return 0;
+		}
+		break;
+	case list_option: {
+		int i;
+		struct e1000_opt_list *ent;
+
+		for (i = 0; i < opt->arg.l.nr; i++) {
+			ent = &opt->arg.l.p[i];
+			if (*value == ent->i) {
+				if (ent->str[0] != '\0')
+					DPRINTK(PROBE, INFO, "%s\n", ent->str);
+				return 0;
+			}
+		}
+	}
+		break;
+	default:
+		BUG();
+	}
+
+	DPRINTK(PROBE, INFO, "Invalid %s value specified (%i) %s\n",
+	       opt->name, *value, opt->err);
+	*value = opt->def;
+	return -1;
+}
+
+static void e1000_check_fiber_options(struct e1000_adapter *adapter);
+static void e1000_check_copper_options(struct e1000_adapter *adapter);
+
+/**
+ * e1000_check_options - Range Checking for Command Line Parameters
+ * @adapter: board private structure
+ *
+ * This routine checks all command line parameters for valid user
+ * input.  If an invalid value is given, or if no user specified
+ * value exists, a default value is used.  The final value is stored
+ * in a variable in the adapter structure.
+ **/
+void e1000_check_options(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	int bd = adapter->bd_number;
+	if (bd >= E1000_MAX_NIC) {
+		DPRINTK(PROBE, NOTICE,
+		       "Warning: no configuration for board #%i\n", bd);
+		DPRINTK(PROBE, NOTICE, "Using defaults for all values\n");
+#ifndef module_param_array
+		bd = E1000_MAX_NIC;
+#endif
+	}
+
+	{ /* Transmit Descriptor Count */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_TXD),
+			.def  = E1000_DEFAULT_TXD,
+			.arg  = { .r = { .min = E1000_MIN_TXD }}
+		};
+		struct e1000_tx_ring *tx_ring = adapter->tx_ring;
+		int i;
+		opt.arg.r.max = hw->mac.type < e1000_82544 ?
+			E1000_MAX_TXD : E1000_MAX_82544_TXD;
+
+#ifdef module_param_array
+		if (num_TxDescriptors > bd) {
+#endif
+			tx_ring->count = TxDescriptors[bd];
+			e1000_validate_option(&tx_ring->count, &opt, adapter);
+			tx_ring->count = ALIGN(tx_ring->count,
+			                       REQ_TX_DESCRIPTOR_MULTIPLE);
+#ifdef module_param_array
+		} else {
+			tx_ring->count = opt.def;
+		}
+#endif
+		for (i = 0; i < adapter->num_tx_queues; i++)
+			tx_ring[i].count = tx_ring->count;
+	}
+	{ /* Receive Descriptor Count */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Descriptors",
+			.err  = "using default of "
+				__MODULE_STRING(E1000_DEFAULT_RXD),
+			.def  = E1000_DEFAULT_RXD,
+			.arg  = { .r = { .min = E1000_MIN_RXD }}
+		};
+		struct e1000_rx_ring *rx_ring = adapter->rx_ring;
+		int i;
+		opt.arg.r.max = hw->mac.type < e1000_82544 ? E1000_MAX_RXD :
+			E1000_MAX_82544_RXD;
+
+#ifdef module_param_array
+		if (num_RxDescriptors > bd) {
+#endif
+			rx_ring->count = RxDescriptors[bd];
+			e1000_validate_option(&rx_ring->count, &opt, adapter);
+			rx_ring->count = ALIGN(rx_ring->count,
+			                       REQ_RX_DESCRIPTOR_MULTIPLE);
+#ifdef module_param_array
+		} else {
+			rx_ring->count = opt.def;
+		}
+#endif
+		for (i = 0; i < adapter->num_rx_queues; i++)
+			rx_ring[i].count = rx_ring->count;
+	}
+	{ /* Checksum Offload Enable/Disable */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Checksum Offload",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+#ifdef module_param_array
+		if (num_XsumRX > bd) {
+#endif
+			unsigned int rx_csum = XsumRX[bd];
+			e1000_validate_option(&rx_csum, &opt, adapter);
+			adapter->rx_csum = rx_csum;
+#ifdef module_param_array
+		} else {
+			adapter->rx_csum = opt.def;
+		}
+#endif
+	}
+	{ /* Flow Control */
+
+		struct e1000_opt_list fc_list[] =
+			{{ e1000_fc_none,    "Flow Control Disabled" },
+			 { e1000_fc_rx_pause,"Flow Control Receive Only" },
+			 { e1000_fc_tx_pause,"Flow Control Transmit Only" },
+			 { e1000_fc_full,    "Flow Control Enabled" },
+			 { e1000_fc_default, "Flow Control Hardware Default" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Flow Control",
+			.err  = "reading default settings from EEPROM",
+			.def  = e1000_fc_default,
+			.arg  = { .l = { .nr = ARRAY_SIZE(fc_list),
+					 .p = fc_list }}
+		};
+
+#ifdef module_param_array
+		if (num_FlowControl > bd) {
+#endif
+			unsigned int fc = FlowControl[bd];
+			e1000_validate_option(&fc, &opt, adapter);
+			hw->fc.original_type = fc;
+			hw->fc.type = fc;
+#ifdef module_param_array
+		} else {
+			hw->fc.original_type = opt.def;
+			hw->fc.type = opt.def;
+		}
+#endif
+	}
+	{ /* Transmit Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TIDV),
+			.def  = DEFAULT_TIDV,
+			.arg  = { .r = { .min = MIN_TXDELAY,
+					 .max = MAX_TXDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_TxIntDelay > bd) {
+#endif
+			adapter->tx_int_delay = TxIntDelay[bd];
+			e1000_validate_option(&adapter->tx_int_delay, &opt,
+			                      adapter);
+#ifdef module_param_array
+		} else {
+			adapter->tx_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Transmit Absolute Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Transmit Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_TADV),
+			.def  = DEFAULT_TADV,
+			.arg  = { .r = { .min = MIN_TXABSDELAY,
+					 .max = MAX_TXABSDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_TxAbsIntDelay > bd) {
+#endif
+			adapter->tx_abs_int_delay = TxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->tx_abs_int_delay, &opt,
+			                      adapter);
+#ifdef module_param_array
+		} else {
+			adapter->tx_abs_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Receive Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RDTR),
+			.def  = DEFAULT_RDTR,
+			.arg  = { .r = { .min = MIN_RXDELAY,
+					 .max = MAX_RXDELAY }}
+		};
+
+		/* modify min and default if 82573 for slow ping w/a,
+		 * a value greater than 8 needs to be set for RDTR */
+
+#ifdef module_param_array
+		if (num_RxIntDelay > bd) {
+#endif
+			adapter->rx_int_delay = RxIntDelay[bd];
+			e1000_validate_option(&adapter->rx_int_delay, &opt,
+			                      adapter);
+#ifdef module_param_array
+		} else {
+			adapter->rx_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Receive Absolute Interrupt Delay */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Receive Absolute Interrupt Delay",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_RADV),
+			.def  = DEFAULT_RADV,
+			.arg  = { .r = { .min = MIN_RXABSDELAY,
+					 .max = MAX_RXABSDELAY }}
+		};
+
+#ifdef module_param_array
+		if (num_RxAbsIntDelay > bd) {
+#endif
+			adapter->rx_abs_int_delay = RxAbsIntDelay[bd];
+			e1000_validate_option(&adapter->rx_abs_int_delay, &opt,
+			                      adapter);
+#ifdef module_param_array
+		} else {
+			adapter->rx_abs_int_delay = opt.def;
+		}
+#endif
+	}
+	{ /* Interrupt Throttling Rate */
+		struct e1000_option opt = {
+			.type = range_option,
+			.name = "Interrupt Throttling Rate (ints/sec)",
+			.err  = "using default of " __MODULE_STRING(DEFAULT_ITR),
+			.def  = DEFAULT_ITR,
+			.arg  = { .r = { .min = MIN_ITR,
+					 .max = MAX_ITR }}
+		};
+
+#ifdef module_param_array
+		if (num_InterruptThrottleRate > bd) {
+#endif
+			adapter->itr = InterruptThrottleRate[bd];
+			switch (adapter->itr) {
+			case 0:
+				DPRINTK(PROBE, INFO, "%s turned off\n",
+				        opt.name);
+				break;
+			case 1:
+				DPRINTK(PROBE, INFO, "%s set to dynamic mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			case 3:
+				DPRINTK(PROBE, INFO,
+				        "%s set to dynamic conservative mode\n",
+					opt.name);
+				adapter->itr_setting = adapter->itr;
+				adapter->itr = 20000;
+				break;
+			default:
+				e1000_validate_option(&adapter->itr, &opt,
+				        adapter);
+				/* save the setting, because the dynamic bits change itr */
+				/* clear the lower two bits because they are
+				 * used as control */
+				adapter->itr_setting = adapter->itr & ~3;
+				break;
+			}
+#ifdef module_param_array
+		} else {
+			adapter->itr_setting = opt.def;
+			adapter->itr = 20000;
+		}
+#endif
+	}
+	{ /* Smart Power Down */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "PHY Smart Power Down",
+			.err  = "defaulting to Disabled",
+			.def  = OPTION_DISABLED
+		};
+
+#ifdef module_param_array
+		if (num_SmartPowerDownEnable > bd) {
+#endif
+			unsigned int spd = SmartPowerDownEnable[bd];
+			e1000_validate_option(&spd, &opt, adapter);
+			adapter->flags |= spd ? E1000_FLAG_SMART_POWER_DOWN : 0;
+#ifdef module_param_array
+		} else {
+			adapter->flags &= ~E1000_FLAG_SMART_POWER_DOWN;
+		}
+#endif
+	}
+	{ /* Kumeran Lock Loss Workaround */
+		struct e1000_option opt = {
+			.type = enable_option,
+			.name = "Kumeran Lock Loss Workaround",
+			.err  = "defaulting to Enabled",
+			.def  = OPTION_ENABLED
+		};
+
+#ifdef module_param_array
+		if (num_KumeranLockLoss > bd) {
+#endif
+			unsigned int kmrn_lock_loss = KumeranLockLoss[bd];
+			e1000_validate_option(&kmrn_lock_loss, &opt, adapter);
+			if (hw->mac.type == e1000_ich8lan)
+				e1000_set_kmrn_lock_loss_workaround_ich8lan(hw,
+				                                kmrn_lock_loss);
+#ifdef module_param_array
+		} else {
+			if (hw->mac.type == e1000_ich8lan)
+				e1000_set_kmrn_lock_loss_workaround_ich8lan(hw,
+				                                       opt.def);
+		}
+#endif
+	}
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_fiber:
+	case e1000_media_type_internal_serdes:
+		e1000_check_fiber_options(adapter);
+		break;
+	case e1000_media_type_copper:
+		e1000_check_copper_options(adapter);
+		break;
+	default:
+		BUG();
+	}
+
+}
+
+/**
+ * e1000_check_fiber_options - Range Checking for Link Options, Fiber Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on fiber adapters
+ **/
+static void e1000_check_fiber_options(struct e1000_adapter *adapter)
+{
+	int bd = adapter->bd_number;
+#ifndef module_param_array
+	bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+	if ((Speed[bd] != OPTION_UNSET)) {
+#else
+	if (num_Speed > bd) {
+#endif
+		DPRINTK(PROBE, INFO, "Speed not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+#ifndef module_param_array
+	if ((Duplex[bd] != OPTION_UNSET)) {
+#else
+	if (num_Duplex > bd) {
+#endif
+		DPRINTK(PROBE, INFO, "Duplex not valid for fiber adapters, "
+		       "parameter ignored\n");
+	}
+
+#ifndef module_param_array
+	if ((AutoNeg[bd] != OPTION_UNSET) && (AutoNeg[bd] != 0x20)) {
+#else
+	if ((num_AutoNeg > bd) && (AutoNeg[bd] != 0x20)) {
+#endif
+		DPRINTK(PROBE, INFO, "AutoNeg other than 1000/Full is "
+				 "not valid for fiber adapters, "
+				 "parameter ignored\n");
+	}
+}
+
+/**
+ * e1000_check_copper_options - Range Checking for Link Options, Copper Version
+ * @adapter: board private structure
+ *
+ * Handles speed and duplex options on copper adapters
+ **/
+static void e1000_check_copper_options(struct e1000_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int speed, dplx, an;
+	int bd = adapter->bd_number;
+#ifndef module_param_array
+	bd = bd > E1000_MAX_NIC ? E1000_MAX_NIC : bd;
+#endif
+
+	{ /* Speed */
+		struct e1000_opt_list speed_list[] = {{          0, "" },
+						      {   SPEED_10, "" },
+						      {  SPEED_100, "" },
+						      { SPEED_1000, "" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Speed",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(speed_list),
+					 .p = speed_list }}
+		};
+
+#ifdef module_param_array
+		if (num_Speed > bd) {
+#endif
+			speed = Speed[bd];
+			e1000_validate_option(&speed, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			speed = opt.def;
+		}
+#endif
+	}
+	{ /* Duplex */
+		struct e1000_opt_list dplx_list[] = {{           0, "" },
+						     { HALF_DUPLEX, "" },
+						     { FULL_DUPLEX, "" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "Duplex",
+			.err  = "parameter ignored",
+			.def  = 0,
+			.arg  = { .l = { .nr = ARRAY_SIZE(dplx_list),
+					 .p = dplx_list }}
+		};
+
+		if (e1000_check_reset_block(hw)) {
+			DPRINTK(PROBE, INFO,
+				"Link active due to SoL/IDER Session. "
+			        "Speed/Duplex/AutoNeg parameter ignored.\n");
+			return;
+		}
+#ifdef module_param_array
+		if (num_Duplex > bd) {
+#endif
+			dplx = Duplex[bd];
+			e1000_validate_option(&dplx, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			dplx = opt.def;
+		}
+#endif
+	}
+
+#ifdef module_param_array
+	if ((num_AutoNeg > bd) && (speed != 0 || dplx != 0)) {
+#else
+	if (AutoNeg[bd] != OPTION_UNSET && (speed != 0 || dplx != 0)) {
+#endif
+		DPRINTK(PROBE, INFO,
+		       "AutoNeg specified along with Speed or Duplex, "
+		       "parameter ignored\n");
+		hw->phy.autoneg_advertised = AUTONEG_ADV_DEFAULT;
+	} else { /* Autoneg */
+		struct e1000_opt_list an_list[] =
+			#define AA "AutoNeg advertising "
+			{{ 0x01, AA "10/HD" },
+			 { 0x02, AA "10/FD" },
+			 { 0x03, AA "10/FD, 10/HD" },
+			 { 0x04, AA "100/HD" },
+			 { 0x05, AA "100/HD, 10/HD" },
+			 { 0x06, AA "100/HD, 10/FD" },
+			 { 0x07, AA "100/HD, 10/FD, 10/HD" },
+			 { 0x08, AA "100/FD" },
+			 { 0x09, AA "100/FD, 10/HD" },
+			 { 0x0a, AA "100/FD, 10/FD" },
+			 { 0x0b, AA "100/FD, 10/FD, 10/HD" },
+			 { 0x0c, AA "100/FD, 100/HD" },
+			 { 0x0d, AA "100/FD, 100/HD, 10/HD" },
+			 { 0x0e, AA "100/FD, 100/HD, 10/FD" },
+			 { 0x0f, AA "100/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x20, AA "1000/FD" },
+			 { 0x21, AA "1000/FD, 10/HD" },
+			 { 0x22, AA "1000/FD, 10/FD" },
+			 { 0x23, AA "1000/FD, 10/FD, 10/HD" },
+			 { 0x24, AA "1000/FD, 100/HD" },
+			 { 0x25, AA "1000/FD, 100/HD, 10/HD" },
+			 { 0x26, AA "1000/FD, 100/HD, 10/FD" },
+			 { 0x27, AA "1000/FD, 100/HD, 10/FD, 10/HD" },
+			 { 0x28, AA "1000/FD, 100/FD" },
+			 { 0x29, AA "1000/FD, 100/FD, 10/HD" },
+			 { 0x2a, AA "1000/FD, 100/FD, 10/FD" },
+			 { 0x2b, AA "1000/FD, 100/FD, 10/FD, 10/HD" },
+			 { 0x2c, AA "1000/FD, 100/FD, 100/HD" },
+			 { 0x2d, AA "1000/FD, 100/FD, 100/HD, 10/HD" },
+			 { 0x2e, AA "1000/FD, 100/FD, 100/HD, 10/FD" },
+			 { 0x2f, AA "1000/FD, 100/FD, 100/HD, 10/FD, 10/HD" }};
+
+		struct e1000_option opt = {
+			.type = list_option,
+			.name = "AutoNeg",
+			.err  = "parameter ignored",
+			.def  = AUTONEG_ADV_DEFAULT,
+			.arg  = { .l = { .nr = ARRAY_SIZE(an_list),
+					 .p = an_list }}
+		};
+
+#ifdef module_param_array
+		if (num_AutoNeg > bd) {
+#endif
+			an = AutoNeg[bd];
+			e1000_validate_option(&an, &opt, adapter);
+#ifdef module_param_array
+		} else {
+			an = opt.def;
+		}
+#endif
+		hw->phy.autoneg_advertised = an;
+	}
+
+	switch (speed + dplx) {
+	case 0:
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+#ifdef module_param_array
+		if ((num_Speed > bd) && (speed != 0 || dplx != 0))
+#else
+		if (Speed[bd] != OPTION_UNSET || Duplex[bd] != OPTION_UNSET)
+#endif
+			DPRINTK(PROBE, INFO,
+			       "Speed and duplex autonegotiation enabled\n");
+		break;
+	case HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Half Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Half Duplex only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_10_HALF |
+		                             ADVERTISE_100_HALF;
+		break;
+	case FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Full Duplex specified without Speed\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"Full Duplex only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_10_FULL |
+		                             ADVERTISE_100_FULL |
+		                             ADVERTISE_1000_FULL;
+		break;
+	case SPEED_10:
+		DPRINTK(PROBE, INFO, "10 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at 10 Mbps only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_10_HALF |
+		                             ADVERTISE_10_FULL;
+		break;
+	case SPEED_10 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Half Duplex\n");
+		hw->mac.autoneg = adapter->fc_autoneg = FALSE;
+		hw->mac.forced_speed_duplex = ADVERTISE_10_HALF;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_10 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 10 Mbps Full Duplex\n");
+		hw->mac.autoneg = adapter->fc_autoneg = FALSE;
+		hw->mac.forced_speed_duplex = ADVERTISE_10_FULL;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_100:
+		DPRINTK(PROBE, INFO, "100 Mbps Speed specified "
+			"without Duplex\n");
+		DPRINTK(PROBE, INFO, "Using Autonegotiation at "
+			"100 Mbps only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_100_HALF |
+		                             ADVERTISE_100_FULL;
+		break;
+	case SPEED_100 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Half Duplex\n");
+		hw->mac.autoneg = adapter->fc_autoneg = FALSE;
+		hw->mac.forced_speed_duplex = ADVERTISE_100_HALF;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_100 + FULL_DUPLEX:
+		DPRINTK(PROBE, INFO, "Forcing to 100 Mbps Full Duplex\n");
+		hw->mac.autoneg = adapter->fc_autoneg = FALSE;
+		hw->mac.forced_speed_duplex = ADVERTISE_100_FULL;
+		hw->phy.autoneg_advertised = 0;
+		break;
+	case SPEED_1000:
+		DPRINTK(PROBE, INFO, "1000 Mbps Speed specified without "
+			"Duplex\n");
+		goto full_duplex_only;
+	case SPEED_1000 + HALF_DUPLEX:
+		DPRINTK(PROBE, INFO,
+			"Half Duplex is not supported at 1000 Mbps\n");
+		fallthrough;
+	case SPEED_1000 + FULL_DUPLEX:
+full_duplex_only:
+		DPRINTK(PROBE, INFO,
+		       "Using Autonegotiation at 1000 Mbps Full Duplex only\n");
+		hw->mac.autoneg = adapter->fc_autoneg = TRUE;
+		hw->phy.autoneg_advertised = ADVERTISE_1000_FULL;
+		break;
+	default:
+		BUG();
+	}
+
+	/* Speed, AutoNeg and MDI/MDI-X must all play nice */
+	if (e1000_validate_mdi_setting(&(adapter->hw)) < 0) {
+		DPRINTK(PROBE, INFO,
+			"Speed, AutoNeg and MDI-X specifications are "
+			"incompatible. Setting MDI-X to a compatible value.\n");
+	}
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c
new file mode 100644
index 0000000..cec2ba3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.c
@@ -0,0 +1,2106 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#include "e1000_api.h"
+#include "e1000_phy.h"
+
+static s32  e1000_get_phy_cfg_done(struct e1000_hw *hw);
+static void e1000_release_phy(struct e1000_hw *hw);
+static s32  e1000_acquire_phy(struct e1000_hw *hw);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] =
+	{ 0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_m88_cable_length_table) / \
+                 sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] =
+    { 0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+      0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+      6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+      21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+      40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+      60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+      83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+      104, 109, 114, 118, 121, 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+                (sizeof(e1000_igp_2_cable_length_table) / \
+                 sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ *  e1000_check_reset_block_generic - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the PHY management control register and check whether a PHY reset
+ *  is blocked.  If a reset is not blocked return E1000_SUCCESS, otherwise
+ *  return E1000_BLK_PHY_RESET (12).
+ **/
+s32 e1000_check_reset_block_generic(struct e1000_hw *hw)
+{
+	u32 manc;
+
+	DEBUGFUNC("e1000_check_reset_block");
+
+	manc = E1000_READ_REG(hw, E1000_MANC);
+
+	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ?
+	       E1000_BLK_PHY_RESET : E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_id - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+s32 e1000_get_phy_id(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+	u16 phy_id;
+
+	DEBUGFUNC("e1000_get_phy_id");
+
+	ret_val = e1000_read_phy_reg(hw, PHY_ID1, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id = (u32)(phy_id << 16);
+	usec_delay(20);
+	ret_val = e1000_read_phy_reg(hw, PHY_ID2, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+	phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_reset_dsp_generic - Reset PHY DSP
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the digital signal processor.
+ **/
+s32 e1000_phy_reset_dsp_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_phy_reset_dsp_generic");
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_mdic - Read MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control regsiter in the PHY at offset and stores the
+ *  information read to data.
+ **/
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_read_phy_reg_mdic");
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		DEBUGOUT1("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+	        (phy->addr << E1000_MDIC_PHY_SHIFT) |
+	        (E1000_MDIC_OP_READ));
+
+	E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+	/*
+	 * Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		usec_delay(50);
+		mdic = E1000_READ_REG(hw, E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		DEBUGOUT("MDI Read did not complete\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		DEBUGOUT("MDI Error\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	*data = (u16) mdic;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_mdic - Write MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = E1000_SUCCESS;
+
+	DEBUGFUNC("e1000_write_phy_reg_mdic");
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		DEBUGOUT1("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/*
+	 * Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = (((u32)data) |
+	        (offset << E1000_MDIC_REG_SHIFT) |
+	        (phy->addr << E1000_MDIC_PHY_SHIFT) |
+	        (E1000_MDIC_OP_WRITE));
+
+	E1000_WRITE_REG(hw, E1000_MDIC, mdic);
+
+	/*
+	 * Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		usec_delay(50);
+		mdic = E1000_READ_REG(hw, E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		DEBUGOUT("MDI Write did not complete\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		DEBUGOUT("MDI Error\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_m88 - Read m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_read_phy_reg_m88");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg_mdic(hw,
+	                                  MAX_PHY_REG_ADDRESS & offset,
+	                                  data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_m88 - Write m88 PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_write_phy_reg_m88");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_write_phy_reg_mdic(hw,
+	                                   MAX_PHY_REG_ADDRESS & offset,
+	                                   data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_read_phy_reg_igp");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000_write_phy_reg_mdic(hw,
+		                                   IGP01E1000_PHY_PAGE_SELECT,
+		                                   (u16)offset);
+		if (ret_val) {
+			e1000_release_phy(hw);
+			goto out;
+		}
+	}
+
+	ret_val = e1000_read_phy_reg_mdic(hw,
+	                                  MAX_PHY_REG_ADDRESS & offset,
+	                                  data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_write_phy_reg_igp");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = e1000_write_phy_reg_mdic(hw,
+		                                   IGP01E1000_PHY_PAGE_SELECT,
+		                                   (u16)offset);
+		if (ret_val) {
+			e1000_release_phy(hw);
+			goto out;
+		}
+	}
+
+	ret_val = e1000_write_phy_reg_mdic(hw,
+	                                   MAX_PHY_REG_ADDRESS & offset,
+	                                   data);
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_read_kmrn_reg_generic - Read kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary.  Then reads the PHY register at offset
+ *  using the kumeran interface.  The information retrieved is stored in data.
+ *  Release any acquired semaphores before exiting.
+ **/
+s32 e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_read_kmrn_reg_generic");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+	               E1000_KMRNCTRLSTA_OFFSET) | E1000_KMRNCTRLSTA_REN;
+	E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+
+	usec_delay(2);
+
+	kmrnctrlsta = E1000_READ_REG(hw, E1000_KMRNCTRLSTA);
+	*data = (u16)kmrnctrlsta;
+
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_write_kmrn_reg_generic - Write kumeran register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary.  Then write the data to PHY register
+ *  at the offset using the kumeran interface.  Release any acquired semaphores
+ *  before exiting.
+ **/
+s32 e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	u32 kmrnctrlsta;
+	s32 ret_val;
+
+	DEBUGFUNC("e1000_write_kmrn_reg_generic");
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	kmrnctrlsta = ((offset << E1000_KMRNCTRLSTA_OFFSET_SHIFT) &
+	               E1000_KMRNCTRLSTA_OFFSET) | data;
+	E1000_WRITE_REG(hw, E1000_KMRNCTRLSTA, kmrnctrlsta);
+
+	usec_delay(2);
+	e1000_release_phy(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
+ *  and downshift values are set also.
+ **/
+s32 e1000_copper_link_setup_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	DEBUGFUNC("e1000_copper_link_setup_m88");
+
+	if (phy->reset_disable) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+	/*
+	 * Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+	switch (phy->mdix) {
+		case 1:
+			phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+			break;
+		case 2:
+			phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+			break;
+		case 3:
+			phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+			break;
+		case 0:
+		default:
+			phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+			break;
+	}
+
+	/*
+	 * Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+	if (phy->disable_polarity_correction == 1)
+		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	if (phy->revision < E1000_REVISION_4) {
+		/*
+		 * Force TX_CLK in the Extended PHY Specific Control Register
+		 * to 25MHz clock.
+		 */
+		ret_val = e1000_read_phy_reg(hw,
+		                             M88E1000_EXT_PHY_SPEC_CTRL,
+		                             &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+		if ((phy->revision == E1000_REVISION_2) &&
+		    (phy->id == M88E1111_I_PHY_ID)) {
+			/* 82573L PHY - set the downshift counter to 5x. */
+			phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+			phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+		} else {
+			/* Configure Master and Slave downshift values */
+			phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+			             M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+			phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+			             M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+		}
+		ret_val = e1000_write_phy_reg(hw,
+		                             M88E1000_EXT_PHY_SPEC_CTRL,
+		                             phy_data);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Commit the changes. */
+	ret_val = e1000_phy_commit(hw);
+	if (ret_val) {
+		DEBUGOUT("Error committing the PHY changes\n");
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_setup_igp - Setup igp PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ *  igp PHY's.
+ **/
+s32 e1000_copper_link_setup_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_copper_link_setup_igp");
+
+	if (phy->reset_disable) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_phy_hw_reset(hw);
+	if (ret_val) {
+		DEBUGOUT("Error resetting the PHY.\n");
+		goto out;
+	}
+
+	/* Wait 15ms for MAC to configure PHY from NVM settings. */
+	msec_delay(15);
+
+	/*
+	 * The NVM settings will configure LPLU in D3 for
+	 * non-IGP1 PHYs.
+	 */
+	if (phy->type == e1000_phy_igp) {
+		/* disable lplu d3 during driver init */
+		ret_val = e1000_set_d3_lplu_state(hw, FALSE);
+		if (ret_val) {
+			DEBUGOUT("Error Disabling LPLU D3\n");
+			goto out;
+		}
+	}
+
+	/* disable lplu d0 during driver init */
+	ret_val = e1000_set_d0_lplu_state(hw, FALSE);
+	if (ret_val) {
+		DEBUGOUT("Error Disabling LPLU D0\n");
+		goto out;
+	}
+	/* Configure mdi-mdix settings */
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+	switch (phy->mdix) {
+	case 1:
+		data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 2:
+		data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 0:
+	default:
+		data |= IGP01E1000_PSCR_AUTO_MDIX;
+		break;
+	}
+	ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+	if (ret_val)
+		goto out;
+
+	/* set auto-master slave resolution settings */
+	if (hw->mac.autoneg) {
+		/*
+		 * when autonegotiation advertisement is only 1000Mbps then we
+		 * should disable SmartSpeed and enable Auto MasterSlave
+		 * resolution as hardware default.
+		 */
+		if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+			/* Disable SmartSpeed */
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+
+			/* Set auto Master/Slave resolution process */
+			ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~CR_1000T_MS_ENABLE;
+			ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data);
+			if (ret_val)
+				goto out;
+		}
+
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_CTRL, &data);
+		if (ret_val)
+			goto out;
+
+		/* load defaults for future use */
+		phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+			((data & CR_1000T_MS_VALUE) ?
+			e1000_ms_force_master :
+			e1000_ms_force_slave) :
+			e1000_ms_auto;
+
+		switch (phy->ms_type) {
+		case e1000_ms_force_master:
+			data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_force_slave:
+			data |= CR_1000T_MS_ENABLE;
+			data &= ~(CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_auto:
+			data &= ~CR_1000T_MS_ENABLE;
+		default:
+			break;
+		}
+		ret_val = e1000_write_phy_reg(hw, PHY_1000T_CTRL, data);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_copper_link_autoneg - Setup/Enable autoneg for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs initial bounds checking on autoneg advertisement parameter, then
+ *  configure to advertise the full capability.  Setup the PHY to autoneg
+ *  and restart the negotiation process between the link partner.  If
+ *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+s32 e1000_copper_link_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	DEBUGFUNC("e1000_copper_link_autoneg");
+
+	/*
+	 * Perform some bounds checking on the autoneg advertisement
+	 * parameter.
+	 */
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/*
+	 * If autoneg_advertised is zero, we assume it was not defaulted
+	 * by the calling code so we set to advertise full capability.
+	 */
+	if (phy->autoneg_advertised == 0)
+		phy->autoneg_advertised = phy->autoneg_mask;
+
+	DEBUGOUT("Reconfiguring auto-neg advertisement params\n");
+	ret_val = e1000_phy_setup_autoneg(hw);
+	if (ret_val) {
+		DEBUGOUT("Error Setting up Auto-Negotiation\n");
+		goto out;
+	}
+	DEBUGOUT("Restarting Auto-Neg\n");
+
+	/*
+	 * Restart auto-negotiation by setting the Auto Neg Enable bit and
+	 * the Auto Neg Restart bit in the PHY control register.
+	 */
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Does the user want to wait for Auto-Neg to complete here, or
+	 * check at a later time (for example, callback routine).
+	 */
+	if (phy->autoneg_wait_to_complete) {
+		ret_val = e1000_wait_autoneg(hw);
+		if (ret_val) {
+			DEBUGOUT("Error while waiting for "
+			         "autoneg to complete\n");
+			goto out;
+		}
+	}
+
+	hw->mac.get_link_status = TRUE;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_setup_autoneg - Configure PHY for auto-negotiation
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MII auto-neg advertisement register and/or the 1000T control
+ *  register and if the PHY is already setup for auto-negotiation, then
+ *  return successful.  Otherwise, setup advertisement and flow control to
+ *  the appropriate values for the wanted auto-negotiation.
+ **/
+s32 e1000_phy_setup_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 mii_autoneg_adv_reg;
+	u16 mii_1000t_ctrl_reg = 0;
+
+	DEBUGFUNC("e1000_phy_setup_autoneg");
+
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	ret_val = e1000_read_phy_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		/* Read the MII 1000Base-T Control Register (Address 9). */
+		ret_val = e1000_read_phy_reg(hw,
+		                            PHY_1000T_CTRL,
+		                            &mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+	/*
+	 * Need to parse both autoneg_advertised and fc and set up
+	 * the appropriate PHY registers.  First we will parse for
+	 * autoneg_advertised software override.  Since we can advertise
+	 * a plethora of combinations, we need to check each bit
+	 * individually.
+	 */
+
+	/*
+	 * First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9).
+	 */
+	mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+	                         NWAY_AR_100TX_HD_CAPS |
+	                         NWAY_AR_10T_FD_CAPS   |
+	                         NWAY_AR_10T_HD_CAPS);
+	mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+	DEBUGOUT1("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+	/* Do we want to advertise 10 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+		DEBUGOUT("Advertise 10mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+	}
+
+	/* Do we want to advertise 10 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+		DEBUGOUT("Advertise 10mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+		DEBUGOUT("Advertise 100mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+		DEBUGOUT("Advertise 100mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+	}
+
+	/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+	if (phy->autoneg_advertised & ADVERTISE_1000_HALF) {
+		DEBUGOUT("Advertise 1000mb Half duplex request denied!\n");
+	}
+
+	/* Do we want to advertise 1000 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+		DEBUGOUT("Advertise 1000mb Full duplex\n");
+		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+	}
+
+	/*
+	 * Check for a software override of the flow control settings, and
+	 * setup the PHY advertisement registers accordingly.  If
+	 * auto-negotiation is enabled, then software will have to set the
+	 * "PAUSE" bits to the correct value in the Auto-Negotiation
+	 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+	 * negotiation.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames
+	 *          but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          but we do not support receiving pause frames).
+	 *      3:  Both Rx and Tx flow control (symmetric) are enabled.
+	 *  other:  No software override.  The flow control configuration
+	 *          in the EEPROM is used.
+	 */
+	switch (hw->fc.type) {
+	case e1000_fc_none:
+		/*
+		 * Flow control (Rx & Tx) is completely disabled by a
+		 * software over-ride.
+		 */
+		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_rx_pause:
+		/*
+		 * Rx Flow control is enabled, and Tx Flow control is
+		 * disabled, by a software over-ride.
+		 *
+		 * Since there really isn't a way to advertise that we are
+		 * capable of Rx Pause ONLY, we will advertise that we
+		 * support both symmetric and asymmetric Rx PAUSE.  Later
+		 * (in e1000_config_fc_after_link_up) we will disable the
+		 * hw's ability to send PAUSE frames.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_tx_pause:
+		/*
+		 * Tx Flow control is enabled, and Rx Flow control is
+		 * disabled, by a software over-ride.
+		 */
+		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+		break;
+	case e1000_fc_full:
+		/*
+		 * Flow control (both Rx and Tx) is enabled by a software
+		 * over-ride.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	default:
+		DEBUGOUT("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_write_phy_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		ret_val = e1000_write_phy_reg(hw,
+		                              PHY_1000T_CTRL,
+		                              mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_setup_copper_link_generic - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_setup_copper_link_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	bool link;
+
+	DEBUGFUNC("e1000_setup_copper_link_generic");
+
+	if (hw->mac.autoneg) {
+		/*
+		 * Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
+		ret_val = e1000_copper_link_autoneg(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/*
+		 * PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
+		DEBUGOUT("Forcing Speed and Duplex\n");
+		ret_val = e1000_phy_force_speed_duplex(hw);
+		if (ret_val) {
+			DEBUGOUT("Error Forcing Speed and Duplex\n");
+			goto out;
+		}
+	}
+
+	/*
+	 * Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = e1000_phy_has_link_generic(hw,
+	                                     COPPER_LINK_UP_LIMIT,
+	                                     10,
+	                                     &link);
+	if (ret_val)
+		goto out;
+
+	if (link) {
+		DEBUGOUT("Valid link established!!!\n");
+		e1000_config_collision_dist_generic(hw);
+		ret_val = e1000_config_fc_after_link_up_generic(hw);
+	} else {
+		DEBUGOUT("Unable to establish link!!!\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_igp");
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+	phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+	ret_val = e1000_write_phy_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("IGP PSCR: %X\n", phy_data);
+
+	usec_delay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		DEBUGOUT("Waiting for forced speed/duplex link on IGP phy.\n");
+
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			DEBUGOUT("Link taking longer than expected.\n");
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Resets the PHY to commit the
+ *  changes.  If time expires while waiting for link up, we reset the DSP.
+ *  After reset, TX_CLK and CRS on Tx must be set.  Return successful upon
+ *  successful completion, else return corresponding error code.
+ **/
+s32 e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_m88");
+
+	/*
+	 * Clear Auto-Crossover to force MDI manually.  M88E1000 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	DEBUGOUT1("M88E1000 PSCR: %X\n", phy_data);
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	e1000_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	/* Reset the phy to commit changes. */
+	phy_data |= MII_CR_RESET;
+
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	usec_delay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		DEBUGOUT("Waiting for forced speed/duplex link on M88 phy.\n");
+
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			/*
+			 * We didn't get link.
+			 * Reset the DSP and cross our fingers.
+			 */
+			ret_val = e1000_write_phy_reg(hw,
+			                              M88E1000_PHY_PAGE_SELECT,
+			                              0x001d);
+			if (ret_val)
+				goto out;
+			ret_val = e1000_phy_reset_dsp_generic(hw);
+			if (ret_val)
+				goto out;
+		}
+
+		/* Try once more */
+		ret_val = e1000_phy_has_link_generic(hw,
+		                                     PHY_FORCE_LIMIT,
+		                                     100000,
+		                                     &link);
+		if (ret_val)
+			goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * Resetting the phy means we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock from
+	 * the reset value of 2.5MHz.
+	 */
+	phy_data |= M88E1000_EPSCR_TX_CLK_25;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/*
+	 * In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	ret_val = e1000_write_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ *  Forces speed and duplex on the PHY by doing the following: disable flow
+ *  control, force speed/duplex on the MAC, disable auto speed detection,
+ *  disable auto-negotiation, configure duplex, configure speed, configure
+ *  the collision distance, write configuration to CTRL register.  The
+ *  caller must write to the PHY_CONTROL register for these settings to
+ *  take affect.
+ **/
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl;
+
+	DEBUGFUNC("e1000_phy_force_speed_duplex_setup");
+
+	/* Turn off flow control when forcing speed/duplex */
+	hw->fc.type = e1000_fc_none;
+
+	/* Force speed/duplex on the mac */
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ctrl &= ~E1000_CTRL_SPD_SEL;
+
+	/* Disable Auto Speed Detection */
+	ctrl &= ~E1000_CTRL_ASDE;
+
+	/* Disable autoneg on the phy */
+	*phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+	/* Forcing Full or Half Duplex? */
+	if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+		ctrl &= ~E1000_CTRL_FD;
+		*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+		DEBUGOUT("Half Duplex\n");
+	} else {
+		ctrl |= E1000_CTRL_FD;
+		*phy_ctrl |= MII_CR_FULL_DUPLEX;
+		DEBUGOUT("Full Duplex\n");
+	}
+
+	/* Forcing 10mb or 100mb? */
+	if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+		ctrl |= E1000_CTRL_SPD_100;
+		*phy_ctrl |= MII_CR_SPEED_100;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+		DEBUGOUT("Forcing 100mb\n");
+	} else {
+		ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+		*phy_ctrl |= MII_CR_SPEED_10;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+		DEBUGOUT("Forcing 10mb\n");
+	}
+
+	e1000_config_collision_dist_generic(hw);
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+}
+
+/**
+ *  e1000_set_d3_lplu_state_generic - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_set_d3_lplu_state_generic");
+
+	ret_val = e1000_read_phy_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (!active) {
+		data &= ~IGP02E1000_PM_D3_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                             IGP02E1000_PHY_POWER_MGMT,
+		                             data);
+		if (ret_val)
+			goto out;
+		/*
+		 * LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = e1000_read_phy_reg(hw,
+			                            IGP01E1000_PHY_PORT_CONFIG,
+			                            &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = e1000_read_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = e1000_write_phy_reg(hw,
+			                             IGP01E1000_PHY_PORT_CONFIG,
+			                             data);
+			if (ret_val)
+				goto out;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+	           (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+	           (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= IGP02E1000_PM_D3_LPLU;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP02E1000_PHY_POWER_MGMT,
+		                              data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = e1000_read_phy_reg(hw,
+		                             IGP01E1000_PHY_PORT_CONFIG,
+		                             &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = e1000_write_phy_reg(hw,
+		                              IGP01E1000_PHY_PORT_CONFIG,
+		                              data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_downshift_generic - Checks whether a downshift in speed occured
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  A downshift is detected by querying the PHY link health.
+ **/
+s32 e1000_check_downshift_generic(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	DEBUGFUNC("e1000_check_downshift_generic");
+
+	switch (phy->type) {
+	case e1000_phy_m88:
+	case e1000_phy_gg82563:
+		offset	= M88E1000_PHY_SPEC_STATUS;
+		mask	= M88E1000_PSSR_DOWNSHIFT;
+		break;
+	case e1000_phy_igp_2:
+	case e1000_phy_igp:
+	case e1000_phy_igp_3:
+		offset	= IGP01E1000_PHY_LINK_HEALTH;
+		mask	= IGP01E1000_PLHR_SS_DOWNGRADE;
+		break;
+	default:
+		/* speed downshift not supported */
+		phy->speed_downgraded = FALSE;
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->speed_downgraded = (phy_data & mask) ? TRUE : FALSE;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_m88 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 e1000_check_polarity_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	DEBUGFUNC("e1000_check_polarity_m88");
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_check_polarity_igp - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY port status register, and the
+ *  current speed (since there is no polarity at 100Mbps).
+ **/
+s32 e1000_check_polarity_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data, offset, mask;
+
+	DEBUGFUNC("e1000_check_polarity_igp");
+
+	/*
+	 * Polarity is determined based on the speed of
+	 * our connection.
+	 */
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		offset	= IGP01E1000_PHY_PCS_INIT_REG;
+		mask	= IGP01E1000_PHY_POLARITY_MASK;
+	} else {
+		/*
+		 * This really only applies to 10Mbps since
+		 * there is no polarity for 100Mbps (always 0).
+		 */
+		offset	= IGP01E1000_PHY_PORT_STATUS;
+		mask	= IGP01E1000_PSSR_POLARITY_REVERSED;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, offset, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & mask)
+		                      ? e1000_rev_polarity_reversed
+		                      : e1000_rev_polarity_normal;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_wait_autoneg_generic - Wait for auto-neg compeletion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for auto-negotiation to complete or for the auto-negotiation time
+ *  limit to expire, which ever happens first.
+ **/
+s32 e1000_wait_autoneg_generic(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, phy_status;
+
+	DEBUGFUNC("e1000_wait_autoneg_generic");
+
+	/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+	for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_AUTONEG_COMPLETE)
+			break;
+		msec_delay(100);
+	}
+
+	/*
+	 * PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+	 * has completed.
+	 */
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_has_link_generic - Polls PHY for link
+ *  @hw: pointer to the HW structure
+ *  @iterations: number of times to poll for link
+ *  @usec_interval: delay between polling attempts
+ *  @success: pointer to whether polling was successful or not
+ *
+ *  Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                               u32 usec_interval, bool *success)
+{
+	s32 ret_val = E1000_SUCCESS;
+	u16 i, phy_status;
+
+	DEBUGFUNC("e1000_phy_has_link_generic");
+
+	for (i = 0; i < iterations; i++) {
+		/*
+		 * Some PHYs require the PHY_STATUS register to be read
+		 * twice due to the link bit being sticky.  No harm doing
+		 * it across the board.
+		 */
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = e1000_read_phy_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_LINK_STATUS)
+			break;
+		if (usec_interval >= 1000)
+			msec_delay_irq(usec_interval/1000);
+		else
+			usec_delay(usec_interval);
+	}
+
+	*success = (i < iterations) ? TRUE : FALSE;
+
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_m88 - Determine cable length for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY specific status register to retrieve the cable length
+ *  information.  The cable length is determined by averaging the minimum and
+ *  maximum values to get the "average" cable length.  The m88 PHY has four
+ *  possible cable length values, which are:
+ *	Register Value		Cable Length
+ *	0			< 50 meters
+ *	1			50 - 80 meters
+ *	2			80 - 110 meters
+ *	3			110 - 140 meters
+ *	4			> 140 meters
+ **/
+s32 e1000_get_cable_length_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, index;
+
+	DEBUGFUNC("e1000_get_cable_length_m88");
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+	        M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	phy->min_cable_length = e1000_m88_cable_length_table[index];
+	phy->max_cable_length = e1000_m88_cable_length_table[index+1];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which reperesent the
+ *  cobination of course and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.
+ **/
+s32 e1000_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = E1000_SUCCESS;
+	u16 phy_data, i, agc_value = 0;
+	u16 cur_agc_index, max_agc_index = 0;
+	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] =
+	                                                 {IGP02E1000_PHY_AGC_A,
+	                                                  IGP02E1000_PHY_AGC_B,
+	                                                  IGP02E1000_PHY_AGC_C,
+	                                                  IGP02E1000_PHY_AGC_D};
+
+	DEBUGFUNC("e1000_get_cable_length_igp_2");
+
+	/* Read the AGC registers for all channels */
+	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+		ret_val = e1000_read_phy_reg(hw, agc_reg_array[i], &phy_data);
+		if (ret_val)
+			goto out;
+
+		/*
+		 * Getting bits 15:9, which represent the combination of
+		 * course and fine gain values.  The result is a number
+		 * that can be put into the lookup table to obtain the
+		 * approximate cable length.
+		 */
+		cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+		                IGP02E1000_AGC_LENGTH_MASK;
+
+		/* Array index bound check. */
+		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		    (cur_agc_index == 0)) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		/* Remove min & max AGC values from calculation. */
+		if (e1000_igp_2_cable_length_table[min_agc_index] >
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			min_agc_index = cur_agc_index;
+		if (e1000_igp_2_cable_length_table[max_agc_index] <
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			max_agc_index = cur_agc_index;
+
+		agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+	}
+
+	agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+	              e1000_igp_2_cable_length_table[max_agc_index]);
+	agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+	/* Calculate cable length with the error range of +/- 10 meters. */
+	phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+	                         (agc_value - IGP02E1000_AGC_RANGE) : 0;
+	phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_m88 - Retrieve PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Valid for only copper links.  Read the PHY status register (sticky read)
+ *  to verify that link is up.  Read the PHY special control register to
+ *  determine the polarity and 10base-T extended distance.  Read the PHY
+ *  special status register to determine MDI/MDIx and current speed.  If
+ *  speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u16 phy_data;
+	bool link;
+
+	DEBUGFUNC("e1000_get_phy_info_m88");
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		DEBUGOUT("Phy info is only valid for copper media\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		DEBUGOUT("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
+	                           ? TRUE
+	                           : FALSE;
+
+	ret_val = e1000_check_polarity_m88(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? TRUE : FALSE;
+
+	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+		                ? e1000_1000t_rx_status_ok
+		                : e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+		                 ? e1000_1000t_rx_status_ok
+		                 : e1000_1000t_rx_status_not_ok;
+	} else {
+		/* Set values to "undefined" */
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_phy_info_igp - Retrieve igp PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 e1000_get_phy_info_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	DEBUGFUNC("e1000_get_phy_info_igp");
+
+	ret_val = e1000_phy_has_link_generic(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		DEBUGOUT("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = TRUE;
+
+	ret_val = e1000_check_polarity_igp(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = e1000_read_phy_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? TRUE : FALSE;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		ret_val = e1000_get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = e1000_read_phy_reg(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+		                ? e1000_1000t_rx_status_ok
+		                : e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+		                 ? e1000_1000t_rx_status_ok
+		                 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_sw_reset_generic - PHY software reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a software reset of the PHY by reading the PHY control register and
+ *  setting/write the control register reset bit to the PHY.
+ **/
+s32 e1000_phy_sw_reset_generic(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	DEBUGFUNC("e1000_phy_sw_reset_generic");
+
+	ret_val = e1000_read_phy_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= MII_CR_RESET;
+	ret_val = e1000_write_phy_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	usec_delay(1);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_phy_hw_reset_generic - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and relase the semaphore (if necessary).
+ **/
+s32 e1000_phy_hw_reset_generic(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u32 ctrl;
+
+	DEBUGFUNC("e1000_phy_hw_reset_generic");
+
+	ret_val = e1000_check_reset_block(hw);
+	if (ret_val) {
+		ret_val = E1000_SUCCESS;
+		goto out;
+	}
+
+	ret_val = e1000_acquire_phy(hw);
+	if (ret_val)
+		goto out;
+
+	ctrl = E1000_READ_REG(hw, E1000_CTRL);
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+	E1000_WRITE_FLUSH(hw);
+
+	usec_delay(phy->reset_delay_us);
+
+	E1000_WRITE_REG(hw, E1000_CTRL, ctrl);
+	E1000_WRITE_FLUSH(hw);
+
+	usec_delay(150);
+
+	e1000_release_phy(hw);
+
+	ret_val = e1000_get_phy_cfg_done(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  e1000_get_cfg_done_generic - Generic configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Generic function to wait 10 milli-seconds for configuration to complete
+ *  and return success.
+ **/
+s32 e1000_get_cfg_done_generic(struct e1000_hw *hw)
+{
+	DEBUGFUNC("e1000_get_cfg_done_generic");
+
+	msec_delay_irq(10);
+
+	return E1000_SUCCESS;
+}
+
+/* Internal function pointers */
+
+/**
+ *  e1000_get_phy_cfg_done - Generic PHY configuration done
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family did not implement a family specific
+ *  get_cfg_done function.
+ **/
+static s32 e1000_get_phy_cfg_done(struct e1000_hw *hw)
+{
+	if (hw->func.get_cfg_done)
+		return hw->func.get_cfg_done(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_release_phy - Generic release PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return if silicon family does not require a semaphore when accessing the
+ *  PHY.
+ **/
+static void e1000_release_phy(struct e1000_hw *hw)
+{
+	if (hw->func.release_phy)
+		hw->func.release_phy(hw);
+}
+
+/**
+ *  e1000_acquire_phy - Generic acquire PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Return success if silicon family does not require a semaphore when
+ *  accessing the PHY.
+ **/
+static s32 e1000_acquire_phy(struct e1000_hw *hw)
+{
+	if (hw->func.acquire_phy)
+		return hw->func.acquire_phy(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_force_speed_duplex - Generic force PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *
+ *  When the silicon family has not implemented a forced speed/duplex
+ *  function for the PHY, simply return E1000_SUCCESS.
+ **/
+s32 e1000_phy_force_speed_duplex(struct e1000_hw *hw)
+{
+	if (hw->func.force_speed_duplex)
+		return hw->func.force_speed_duplex(hw);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 e1000_phy_init_script_igp3(struct e1000_hw *hw)
+{
+	DEBUGOUT("Running IGP 3 PHY init script\n");
+
+	/* PHY init IGP 3 */
+	/* Enable rise/fall, 10-mode work in class-A */
+	e1000_write_phy_reg(hw, 0x2F5B, 0x9018);
+	/* Remove all caps from Replica path filter */
+	e1000_write_phy_reg(hw, 0x2F52, 0x0000);
+	/* Bias trimming for ADC, AFE and Driver (Default) */
+	e1000_write_phy_reg(hw, 0x2FB1, 0x8B24);
+	/* Increase Hybrid poly bias */
+	e1000_write_phy_reg(hw, 0x2FB2, 0xF8F0);
+	/* Add 4% to Tx amplitude in Giga mode */
+	e1000_write_phy_reg(hw, 0x2010, 0x10B0);
+	/* Disable trimming (TTT) */
+	e1000_write_phy_reg(hw, 0x2011, 0x0000);
+	/* Poly DC correction to 94.6% + 2% for all channels */
+	e1000_write_phy_reg(hw, 0x20DD, 0x249A);
+	/* ABS DC correction to 95.9% */
+	e1000_write_phy_reg(hw, 0x20DE, 0x00D3);
+	/* BG temp curve trim */
+	e1000_write_phy_reg(hw, 0x28B4, 0x04CE);
+	/* Increasing ADC OPAMP stage 1 currents to max */
+	e1000_write_phy_reg(hw, 0x2F70, 0x29E4);
+	/* Force 1000 ( required for enabling PHY regs configuration) */
+	e1000_write_phy_reg(hw, 0x0000, 0x0140);
+	/* Set upd_freq to 6 */
+	e1000_write_phy_reg(hw, 0x1F30, 0x1606);
+	/* Disable NPDFE */
+	e1000_write_phy_reg(hw, 0x1F31, 0xB814);
+	/* Disable adaptive fixed FFE (Default) */
+	e1000_write_phy_reg(hw, 0x1F35, 0x002A);
+	/* Enable FFE hysteresis */
+	e1000_write_phy_reg(hw, 0x1F3E, 0x0067);
+	/* Fixed FFE for short cable lengths */
+	e1000_write_phy_reg(hw, 0x1F54, 0x0065);
+	/* Fixed FFE for medium cable lengths */
+	e1000_write_phy_reg(hw, 0x1F55, 0x002A);
+	/* Fixed FFE for long cable lengths */
+	e1000_write_phy_reg(hw, 0x1F56, 0x002A);
+	/* Enable Adaptive Clip Threshold */
+	e1000_write_phy_reg(hw, 0x1F72, 0x3FB0);
+	/* AHT reset limit to 1 */
+	e1000_write_phy_reg(hw, 0x1F76, 0xC0FF);
+	/* Set AHT master delay to 127 msec */
+	e1000_write_phy_reg(hw, 0x1F77, 0x1DEC);
+	/* Set scan bits for AHT */
+	e1000_write_phy_reg(hw, 0x1F78, 0xF9EF);
+	/* Set AHT Preset bits */
+	e1000_write_phy_reg(hw, 0x1F79, 0x0210);
+	/* Change integ_factor of channel A to 3 */
+	e1000_write_phy_reg(hw, 0x1895, 0x0003);
+	/* Change prop_factor of channels BCD to 8 */
+	e1000_write_phy_reg(hw, 0x1796, 0x0008);
+	/* Change cg_icount + enable integbp for channels BCD */
+	e1000_write_phy_reg(hw, 0x1798, 0xD008);
+	/*
+	 * Change cg_icount + enable integbp + change prop_factor_master
+	 * to 8 for channel A
+	 */
+	e1000_write_phy_reg(hw, 0x1898, 0xD918);
+	/* Disable AHT in Slave mode on channel A */
+	e1000_write_phy_reg(hw, 0x187A, 0x0800);
+	/*
+	 * Enable LPLU and disable AN to 1000 in non-D0a states,
+	 * Enable SPD+B2B
+	 */
+	e1000_write_phy_reg(hw, 0x0019, 0x008D);
+	/* Enable restart AN on an1000_dis change */
+	e1000_write_phy_reg(hw, 0x001B, 0x2080);
+	/* Enable wh_fifo read clock in 10/100 modes */
+	e1000_write_phy_reg(hw, 0x0014, 0x0045);
+	/* Restart AN, Speed selection is 1000 */
+	e1000_write_phy_reg(hw, 0x0000, 0x1340);
+
+	return E1000_SUCCESS;
+}
+
+/**
+ *  e1000_get_phy_type_from_id - Get PHY type from id
+ *  @phy_id: phy_id read from the phy
+ *
+ *  Returns the phy type from the id.
+ **/
+e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id)
+{
+	e1000_phy_type phy_type = e1000_phy_unknown;
+
+	switch (phy_id)	{
+	case M88E1000_I_PHY_ID:
+	case M88E1000_E_PHY_ID:
+	case M88E1111_I_PHY_ID:
+	case M88E1011_I_PHY_ID:
+		phy_type = e1000_phy_m88;
+		break;
+	case IGP01E1000_I_PHY_ID: /* IGP 1 & 2 share this */
+		phy_type = e1000_phy_igp_2;
+		break;
+	case GG82563_E_PHY_ID:
+		phy_type = e1000_phy_gg82563;
+		break;
+	case IGP03E1000_E_PHY_ID:
+		phy_type = e1000_phy_igp_3;
+		break;
+	case IFE_E_PHY_ID:
+	case IFE_PLUS_E_PHY_ID:
+	case IFE_C_E_PHY_ID:
+		phy_type = e1000_phy_ife;
+		break;
+	default:
+		phy_type = e1000_phy_unknown;
+		break;
+	}
+	return phy_type;
+}
+
+/**
+ * e1000_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_up_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1000_read_phy_reg(hw, PHY_CONTROL, &mii_reg);
+	mii_reg &= ~MII_CR_POWER_DOWN;
+	e1000_write_phy_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * e1000_power_down_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, restore the link to previous
+ * settings.
+ **/
+void e1000_power_down_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	e1000_read_phy_reg(hw, PHY_CONTROL, &mii_reg);
+	mii_reg |= MII_CR_POWER_DOWN;
+	e1000_write_phy_reg(hw, PHY_CONTROL, mii_reg);
+	msec_delay(1);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h
new file mode 100644
index 0000000..111e61e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_phy.h
@@ -0,0 +1,168 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+typedef enum {
+	e1000_ms_hw_default = 0,
+	e1000_ms_force_master,
+	e1000_ms_force_slave,
+	e1000_ms_auto
+} e1000_ms_type;
+
+typedef enum {
+	e1000_smart_speed_default = 0,
+	e1000_smart_speed_on,
+	e1000_smart_speed_off
+} e1000_smart_speed;
+
+s32  e1000_check_downshift_generic(struct e1000_hw *hw);
+s32  e1000_check_polarity_m88(struct e1000_hw *hw);
+s32  e1000_check_polarity_igp(struct e1000_hw *hw);
+s32  e1000_check_reset_block_generic(struct e1000_hw *hw);
+s32  e1000_copper_link_autoneg(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_igp(struct e1000_hw *hw);
+s32  e1000_copper_link_setup_m88(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32  e1000_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32  e1000_get_cable_length_m88(struct e1000_hw *hw);
+s32  e1000_get_cable_length_igp_2(struct e1000_hw *hw);
+s32  e1000_get_cfg_done_generic(struct e1000_hw *hw);
+s32  e1000_get_phy_id(struct e1000_hw *hw);
+s32  e1000_get_phy_info_igp(struct e1000_hw *hw);
+s32  e1000_get_phy_info_m88(struct e1000_hw *hw);
+s32  e1000_phy_sw_reset_generic(struct e1000_hw *hw);
+void e1000_phy_force_speed_duplex_setup(struct e1000_hw *hw, u16 *phy_ctrl);
+s32  e1000_phy_hw_reset_generic(struct e1000_hw *hw);
+s32  e1000_phy_reset_dsp_generic(struct e1000_hw *hw);
+s32  e1000_phy_setup_autoneg(struct e1000_hw *hw);
+s32  e1000_read_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_read_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  e1000_set_d3_lplu_state_generic(struct e1000_hw *hw, bool active);
+s32  e1000_setup_copper_link_generic(struct e1000_hw *hw);
+s32  e1000_wait_autoneg_generic(struct e1000_hw *hw);
+s32  e1000_write_kmrn_reg_generic(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_write_phy_reg_m88(struct e1000_hw *hw, u32 offset, u16 data);
+s32  e1000_phy_reset_dsp(struct e1000_hw *hw);
+s32  e1000_phy_has_link_generic(struct e1000_hw *hw, u32 iterations,
+                                u32 usec_interval, bool *success);
+s32  e1000_phy_init_script_igp3(struct e1000_hw *hw);
+e1000_phy_type e1000_get_phy_type_from_id(u32 phy_id);
+void e1000_power_up_phy_copper(struct e1000_hw *hw);
+void e1000_power_down_phy_copper(struct e1000_hw *hw);
+s32 e1000_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32 e1000_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+
+#define E1000_MAX_PHY_ADDR                4
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS        0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL          0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH        0x13 /* PHY Link Health */
+#define IGP01E1000_GMII_FIFO              0x14 /* GMII FIFO */
+#define IGP01E1000_PHY_CHANNEL_QUALITY    0x15 /* PHY Channel Quality */
+#define IGP02E1000_PHY_POWER_MGMT         0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT        0x1F /* Page Select */
+#define BM_PHY_PAGE_SELECT                22   /* Page Select for BM */
+#define IGP_PAGE_SHIFT                    5
+#define PHY_REG_MASK                      0x1F
+
+
+#define IGP01E1000_PHY_PCS_INIT_REG       0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK      0x0078
+
+#define IGP01E1000_PSCR_AUTO_MDIX         0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
+
+#define IGP01E1000_PSCFR_SMART_SPEED      0x0080
+
+/* Enable flexible speed on link-up */
+#define IGP01E1000_GMII_FLEX_SPD          0x0010
+#define IGP01E1000_GMII_SPD               0x0020 /* Enable SPD */
+
+#define IGP02E1000_PM_SPD                 0x0001 /* Smart Power Down */
+#define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
+
+#define IGP01E1000_PLHR_SS_DOWNGRADE      0x8000
+
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX              0x0008
+#define IGP01E1000_PSSR_SPEED_MASK        0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS    0xC000
+
+#define IGP02E1000_PHY_CHANNEL_NUM        4
+#define IGP02E1000_PHY_AGC_A              0x11B1
+#define IGP02E1000_PHY_AGC_B              0x12B1
+#define IGP02E1000_PHY_AGC_C              0x14B1
+#define IGP02E1000_PHY_AGC_D              0x18B1
+
+#define IGP02E1000_AGC_LENGTH_SHIFT       9   /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK        0x7F
+#define IGP02E1000_AGC_RANGE              15
+
+#define IGP03E1000_PHY_MISC_CTRL          0x1B
+#define IGP03E1000_PHY_MISC_DUPLEX_MANUAL_SET  0x1000 /* Manually Set Duplex */
+
+#define E1000_CABLE_LENGTH_UNDEFINED      0xFF
+
+#define E1000_KMRNCTRLSTA_OFFSET          0x001F0000
+#define E1000_KMRNCTRLSTA_OFFSET_SHIFT    16
+#define E1000_KMRNCTRLSTA_REN             0x00200000
+#define E1000_KMRNCTRLSTA_DIAG_OFFSET     0x3    /* Kumeran Diagnostic */
+#define E1000_KMRNCTRLSTA_DIAG_NELPBK     0x1000 /* Nearend Loopback mode */
+
+#define IFE_PHY_EXTENDED_STATUS_CONTROL 0x10
+#define IFE_PHY_SPECIAL_CONTROL     0x11 /* 100BaseTx PHY Special Control */
+#define IFE_PHY_SPECIAL_CONTROL_LED 0x1B /* PHY Special and LED Control */
+#define IFE_PHY_MDIX_CONTROL        0x1C /* MDI/MDI-X Control */
+
+/* IFE PHY Extended Status Control */
+#define IFE_PESC_POLARITY_REVERSED    0x0100
+
+/* IFE PHY Special Control */
+#define IFE_PSC_AUTO_POLARITY_DISABLE      0x0010
+#define IFE_PSC_FORCE_POLARITY             0x0020
+#define IFE_PSC_DISABLE_DYNAMIC_POWER_DOWN 0x0100
+
+/* IFE PHY Special Control and LED Control */
+#define IFE_PSCL_PROBE_MODE            0x0020
+#define IFE_PSCL_PROBE_LEDS_OFF        0x0006 /* Force LEDs 0 and 2 off */
+#define IFE_PSCL_PROBE_LEDS_ON         0x0007 /* Force LEDs 0 and 2 on */
+
+/* IFE PHY MDIX Control */
+#define IFE_PMC_MDIX_STATUS      0x0020 /* 1=MDI-X, 0=MDI */
+#define IFE_PMC_FORCE_MDIX       0x0040 /* 1=force MDI-X, 0=force MDI */
+#define IFE_PMC_AUTO_MDIX        0x0080 /* 1=enable auto MDI/MDI-X, 0=disable */
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h
new file mode 100644
index 0000000..72b9f9c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/e1000_regs.h
@@ -0,0 +1,307 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_CTRL_DUP 0x00004  /* Device Control Duplicate (Shadow) - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_FLA      0x0001C  /* Flash Access - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FEXTNVM  0x00028  /* Future Extended NVM - RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* Rx Control - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* Tx Configuration Word - RW */
+#define E1000_RXCW     0x00180  /* Rx Configuration Word - RO */
+#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_TCTL     0x00400  /* Tx Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended Tx Control - RW */
+#define E1000_TIPG     0x00410  /* Tx Inter-packet gap -RW */
+#define E1000_TBT      0x00448  /* Tx Burst Timer - RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_EXTCNF_CTRL  0x00F00  /* Extended Configuration Control */
+#define E1000_EXTCNF_SIZE  0x00F08  /* Extended Configuration Size */
+#define E1000_PHY_CTRL     0x00F10  /* PHY Control Register in CSR */
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC   0x01024  /* EEPROM Auto Read Bus Control */
+#define E1000_FLASHT   0x01028  /* FLASH Timer Register */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_FLSWCTL  0x01030  /* FLASH control register */
+#define E1000_FLSWDATA 0x01034  /* FLASH data register */
+#define E1000_FLSWCNT  0x01038  /* FLASH Access Counter */
+#define E1000_FLOP     0x0103C  /* FLASH Opcode Register */
+#define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
+#define E1000_I2CPARAMS 0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_WDSTP    0x01040  /* Watchdog Setup - RW */
+#define E1000_SWDSTS   0x01044  /* SW Device Status - RW */
+#define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C  /* TCP Timer - RW */
+#define E1000_ERT      0x02008  /* Early Rx Threshold - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_PSRCTL   0x02170  /* Packet Split Receive Control - RW */
+#define E1000_RDFPCQ(_n)  (0x02430 + (0x4 * (_n)))
+#define E1000_PBRTH    0x02458  /* PB Rx Arbitration Threshold - RW */
+#define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
+/* Split and Replication Rx Control - RW */
+#define E1000_RDPUMB   0x025CC  /* DMA Rx Descriptor uC Mailbox - RW */
+#define E1000_RDPUAD   0x025D0  /* DMA Rx Descriptor uC Addr Command - RW */
+#define E1000_RDPUWD   0x025D4  /* DMA Rx Descriptor uC Data Write - RW */
+#define E1000_RDPURD   0x025D8  /* DMA Rx Descriptor uC Data Read - RW */
+#define E1000_RDPUCTL  0x025DC  /* DMA Rx Descriptor uC Control - RW */
+#define E1000_RDTR     0x02820  /* Rx Delay Timer - RW */
+#define E1000_RADV     0x0282C  /* Rx Interrupt Absolute Delay Timer - RW */
+/*
+ * Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n)   ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) : (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n)   ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) : (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n)   ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) : (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n)  ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) : (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n)     ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) : (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n)     ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) : (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n)  ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) : (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n)   ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) : (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n)   ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) : (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n)   ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) : (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n)     ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) : (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n)     ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) : (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n)  ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) : (0x0E028 + ((_n) * 0x40)))
+#define E1000_TARC(_n)    (0x03840 + (_n << 8))
+#define E1000_DCA_TXCTRL(_n) (0x03814 + (_n << 8))
+#define E1000_DCA_RXCTRL(_n) (0x02814 + (_n << 8))
+#define E1000_TDWBAL(_n)  ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) : (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n)  ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) : (0x0E03C + ((_n) * 0x40)))
+#define E1000_RSRPD    0x02C00  /* Rx Small Packet Detect - RW */
+#define E1000_RAID     0x02C08  /* Receive Ack Interrupt Delay - RW */
+#define E1000_TXDMAC   0x03000  /* Tx DMA Control - RW */
+#define E1000_KABGTXD  0x03004  /* AFE Band Gap Transmit Ref Data */
+#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i)           (0x05400 + ((_i) * 8))
+#define E1000_RAH(_i)           (0x05404 + ((_i) * 8))
+#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
+#define E1000_TDFH     0x03410  /* Tx Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* Tx Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* Tx Data FIFO Head Saved - RW */
+#define E1000_TDFTS    0x03428  /* Tx Data FIFO Tail Saved - RW */
+#define E1000_TDFPC    0x03430  /* Tx Data FIFO Packet Count - RW */
+#define E1000_TDPUMB   0x0357C  /* DMA Tx Descriptor uC Mail Box - RW */
+#define E1000_TDPUAD   0x03580  /* DMA Tx Descriptor uC Addr Command - RW */
+#define E1000_TDPUWD   0x03584  /* DMA Tx Descriptor uC Data Write - RW */
+#define E1000_TDPURD   0x03588  /* DMA Tx Descriptor uC Data  Read  - RW */
+#define E1000_TDPUCTL  0x0358C  /* DMA Tx Descriptor uC Control - RW */
+#define E1000_DTXCTL   0x03590  /* DMA Tx Control - RW */
+#define E1000_TIDV     0x03820  /* Tx Interrupt Delay Value - RW */
+#define E1000_TADV     0x0382C  /* Tx Interrupt Absolute Delay Val - RW */
+#define E1000_TSPMT    0x03830  /* TCP Segmentation PAD & Min Threshold - RW */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* Tx-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON Rx Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON Tx Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF Rx Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF Tx Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control Rx Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets Rx (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets Rx (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets Rx (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets Rx (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets Rx (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets Rx (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets Rx Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets Rx Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets Rx Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets Tx Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets Rx Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets Rx Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets Tx Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets Tx Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* Rx No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* Rx Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* Rx Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* Rx Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* Rx Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets Rx Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets Tx Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets Rx Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets Rx High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets Tx Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets Tx High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets Rx - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets Tx - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets Tx (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets Tx (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets Tx (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets Tx (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets Tx (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets Tx (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets Tx Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets Tx Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context Tx - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context Tx Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+#define E1000_ICRXPTC  0x04104  /* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXATC  0x04108  /* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C  /* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXATC  0x04110  /* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXQEC  0x04118  /* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQMTC 0x0411C  /* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120  /* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+
+#define E1000_PCS_CFG0    0x04200  /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL    0x04208  /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT   0x0420C  /* PCS Link Status - RO */
+#define E1000_CBTMPC      0x0402C  /* Circuit Breaker Tx Packet Count */
+#define E1000_HTDPMC      0x0403C  /* Host Transmit Discarded Packets */
+#define E1000_CBRDPC      0x04044  /* Circuit Breaker Rx Dropped Count */
+#define E1000_CBRMPC      0x040FC  /* Circuit Breaker Rx Packet Count */
+#define E1000_RPTHC       0x04104  /* Rx Packets To Host */
+#define E1000_HGPTC       0x04118  /* Host Good Packets Tx Count */
+#define E1000_HTCBDPC     0x04124  /* Host Tx Circuit Breaker Dropped Count */
+#define E1000_HGORCL      0x04128  /* Host Good Octets Received Count Low */
+#define E1000_HGORCH      0x0412C  /* Host Good Octets Received Count High */
+#define E1000_HGOTCL      0x04130  /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH      0x04134  /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS     0x04138  /* Length Errors Count */
+#define E1000_SCVPC       0x04228  /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_HRMPC       0x0A018  /* Header Redirection Missed Packet Count */
+#define E1000_PCS_ANADV   0x04218  /* AN advertisement - RW */
+#define E1000_PCS_LPAB    0x0421C  /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX    0x04220  /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP  0x04224  /* Link Partner Ability Next Page - RW */
+#define E1000_1GSTAT_RCV  0x04228  /* 1GSTAT Code Violation Packet Count - RW */
+#define E1000_RXCSUM   0x05000  /* Rx Checksum Control - RW */
+#define E1000_RLPML    0x05004  /* Rx Long Packet Max Length */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_VMD_CTL  0x0581C  /* VMDq Control - RW */
+#define E1000_VFQA0    0x0B000  /* VLAN Filter Queue Array 0 - RW Array */
+#define E1000_VFQA1    0x0B200  /* VLAN Filter Queue Array 1 - RW Array */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_IP4AT    0x05840  /* IPv4 Address Table - RW Array */
+#define E1000_IP6AT    0x05880  /* IPv6 Address Table - RW Array */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+#define E1000_WUPM     0x05A00  /* Wakeup Packet Memory - RO A */
+#define E1000_PBACL    0x05B68  /* MSIx PBA Clear - Read/Write 1's to clear */
+#define E1000_FFLT     0x05F00  /* Flexible Filter Length Table - RW Array */
+#define E1000_HOST_IF  0x08800  /* Host Interface */
+#define E1000_FFMT     0x09000  /* Flexible Filter Mask Table - RW Array */
+#define E1000_FFVT     0x09800  /* Flexible Filter Value Table - RW Array */
+
+#define E1000_KMRNCTRLSTA 0x00034 /* MAC-PHY interface - RW */
+#define E1000_MDPHYA      0x0003C /* PHY address - RW */
+#define E1000_MANC2H      0x05860 /* Management Control To Host - RW */
+#define E1000_SW_FW_SYNC  0x05B5C /* Software-Firmware Synchronization - RW */
+#define E1000_CCMCTL      0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL      0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL       0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR         0x05B00 /* PCI-Ex Control */
+#define E1000_GSCL_1    0x05B10 /* PCI-Ex Statistic Control #1 */
+#define E1000_GSCL_2    0x05B14 /* PCI-Ex Statistic Control #2 */
+#define E1000_GSCL_3    0x05B18 /* PCI-Ex Statistic Control #3 */
+#define E1000_GSCL_4    0x05B1C /* PCI-Ex Statistic Control #4 */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_DCA_ID    0x05B70 /* DCA Requester ID Information - RO */
+#define E1000_DCA_CTRL  0x05B74 /* DCA Control - RW */
+#define E1000_FFLT_DBG  0x05F04 /* Debug Register */
+#define E1000_HICR      0x08F00 /* Host Inteface Control */
+
+/* RSS registers */
+#define E1000_CPUVEC    0x02C10 /* CPU Vector Register - RW */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i)      (0x05A80 + ((_i) * 4))  /* Immediate Interrupt */
+#define E1000_IMIREXT(_i)   (0x05AA0 + ((_i) * 4))  /* Immediate Interrupt Ext*/
+#define E1000_IMIRVP    0x05AC0 /* Immediate Interrupt Rx VLAN Priority - RW */
+#define E1000_MSIXBM(_i)    (0x01600 + ((_i) * 4)) /* MSI-X Allocation Register (_i) - RW */
+#define E1000_MSIXTADD(_i)  (0x0C000 + ((_i) * 0x10)) /* MSI-X Table entry addr low reg 0 - RW */
+#define E1000_MSIXTUADD(_i) (0x0C004 + ((_i) * 0x10)) /* MSI-X Table entry addr upper reg 0 - RW */
+#define E1000_MSIXTMSG(_i)  (0x0C008 + ((_i) * 0x10)) /* MSI-X Table entry message reg 0 - RW */
+#define E1000_MSIXVCTRL(_i) (0x0C00C + ((_i) * 0x10)) /* MSI-X Table entry vector ctrl reg 0 - RW */
+#define E1000_MSIXPBA    0x0E000 /* MSI-X Pending bit array */
+#define E1000_RETA(_i)  (0x05C00 + ((_i) * 4)) /* Redirection Table - RW Array */
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
+#define E1000_RSSIM     0x05864 /* RSS Interrupt Mask */
+#define E1000_RSSIR     0x05868 /* RSS Interrupt Request */
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h
new file mode 100644
index 0000000..48906b7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/e1000/kcompat.h
@@ -0,0 +1,603 @@
+/*******************************************************************************
+
+  Intel PRO/1000 Linux driver
+  Copyright(c) 1999 - 2008 Intel Corporation.
+
+  This program is free software; you can redistribute it and/or modify it
+  under the terms and conditions of the GNU General Public License,
+  version 2, as published by the Free Software Foundation.
+
+  This program is distributed in the hope it will be useful, but WITHOUT
+  ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+  FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+  more details.
+
+  You should have received a copy of the GNU General Public License along with
+  this program; if not, write to the Free Software Foundation, Inc.,
+  51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
+
+  The full GNU General Public License is included in this distribution in
+  the file called "COPYING".
+
+  Contact Information:
+  Linux NICS <linux.nics@intel.com>
+  e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+  Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+
+*******************************************************************************/
+
+#ifndef _KCOMPAT_H_
+#define _KCOMPAT_H_
+
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/list.h>
+#include <linux/delay.h>
+#include <linux/sched.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/udp.h>
+#include <linux/mii.h>
+#include <asm/io.h>
+
+#include <rtnet_port.h>
+/* NAPI enable/disable flags here */
+
+
+#ifdef _E1000_H_
+#ifdef CONFIG_E1000_NAPI
+#define NAPI
+#endif
+#ifdef E1000_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef E1000_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+#ifdef _IGB_H_
+#define NAPI
+#endif
+
+#ifdef _IXGB_H_
+#ifdef CONFIG_IXGB_NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NAPI
+#undef NAPI
+#define NAPI
+#endif
+#ifdef IXGB_NO_NAPI
+#undef NAPI
+#endif
+#endif
+
+
+#ifdef DRIVER_E1000
+#define adapter_struct e1000_adapter
+#endif
+
+
+// RTNET settings
+#ifdef NAPI
+#undef NAPI
+#endif
+
+#undef NETIF_F_TSO
+#undef NETIF_F_HW_VLAN_TX
+#undef CONFIG_NET_POLL_CONTROLLER
+#ifdef ETHTOOL_GPERMADDR
+#undef ETHTOOL_GPERMADDR
+#endif
+
+
+/* and finally set defines so that the code sees the changes */
+#ifdef NAPI
+#ifndef CONFIG_E1000_NAPI
+#define CONFIG_E1000_NAPI
+#endif
+#ifndef CONFIG_IXGB_NAPI
+#define CONFIG_IXGB_NAPI
+#endif
+#else
+#undef CONFIG_E1000_NAPI
+#undef CONFIG_IXGB_NAPI
+#endif
+
+/* packet split disable/enable */
+#ifdef DISABLE_PACKET_SPLIT
+#undef CONFIG_E1000_DISABLE_PACKET_SPLIT
+#define CONFIG_E1000_DISABLE_PACKET_SPLIT
+#endif
+
+/* MSI compatibility code for all kernels and drivers */
+#ifdef DISABLE_PCI_MSI
+#undef CONFIG_PCI_MSI
+#endif
+
+#ifdef DISABLE_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef DISABLE_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef PMSG_SUSPEND
+#define PMSG_SUSPEND 3
+#endif
+
+/* generic boolean compatibility */
+#undef TRUE
+#undef FALSE
+#define TRUE true
+#define FALSE false
+#ifdef GCC_VERSION
+#if ( GCC_VERSION < 3000 )
+#define _Bool char
+#endif
+#endif
+#ifndef bool
+#define bool _Bool
+#define true 1
+#define false 0
+#endif
+
+
+#ifndef module_param
+#define module_param(v,t,p) MODULE_PARM(v, "i");
+#endif
+
+#ifndef DMA_64BIT_MASK
+#define DMA_64BIT_MASK  0xffffffffffffffffULL
+#endif
+
+#ifndef DMA_32BIT_MASK
+#define DMA_32BIT_MASK  0x00000000ffffffffULL
+#endif
+
+#ifndef PCI_CAP_ID_EXP
+#define PCI_CAP_ID_EXP 0x10
+#endif
+
+#ifndef mmiowb
+#ifdef CONFIG_IA64
+#define mmiowb() asm volatile ("mf.a" ::: "memory")
+#else
+#define mmiowb()
+#endif
+#endif
+
+#ifndef SET_NETDEV_DEV
+#define SET_NETDEV_DEV(net, pdev)
+#endif
+
+#ifndef HAVE_FREE_NETDEV
+#define free_netdev(x)	kfree(x)
+#endif
+
+#ifdef HAVE_POLL_CONTROLLER
+#define CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifndef NETDEV_TX_OK
+#define NETDEV_TX_OK 0
+#endif
+
+#ifndef NETDEV_TX_BUSY
+#define NETDEV_TX_BUSY 1
+#endif
+
+#ifndef NETDEV_TX_LOCKED
+#define NETDEV_TX_LOCKED -1
+#endif
+
+#ifndef SKB_DATAREF_SHIFT
+/* if we do not have the infrastructure to detect if skb_header is cloned
+   just return false in all cases */
+#define skb_header_cloned(x) 0
+#endif
+
+#ifndef NETIF_F_GSO
+#define gso_size tso_size
+#define gso_segs tso_segs
+#endif
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#define CHECKSUM_COMPLETE CHECKSUM_HW
+#endif
+
+#ifndef __read_mostly
+#define __read_mostly
+#endif
+
+#ifndef MII_RESV1
+#define MII_RESV1		0x17		/* Reserved...		*/
+#endif
+
+#ifndef unlikely
+#define unlikely(_x) _x
+#define likely(_x) _x
+#endif
+
+#ifndef WARN_ON
+#define WARN_ON(x)
+#endif
+
+#ifndef PCI_DEVICE
+#define PCI_DEVICE(vend,dev) \
+	.vendor = (vend), .device = (dev), \
+	.subvendor = PCI_ANY_ID, .subdevice = PCI_ANY_ID
+#endif
+
+#ifndef num_online_cpus
+#define num_online_cpus() smp_num_cpus
+#endif
+
+#ifndef _LINUX_RANDOM_H
+#include <linux/random.h>
+#endif
+
+#ifndef DECLARE_BITMAP
+#ifndef BITS_TO_LONGS
+#define BITS_TO_LONGS(bits) (((bits)+BITS_PER_LONG-1)/BITS_PER_LONG)
+#endif
+#define DECLARE_BITMAP(name,bits) long name[BITS_TO_LONGS(bits)]
+#endif
+
+#ifndef VLAN_HLEN
+#define VLAN_HLEN 4
+#endif
+
+#ifndef VLAN_ETH_HLEN
+#define VLAN_ETH_HLEN 18
+#endif
+
+#ifndef VLAN_ETH_FRAME_LEN
+#define VLAN_ETH_FRAME_LEN 1518
+#endif
+
+
+/*****************************************************************************/
+/* Installations with ethtool version without eeprom, adapter id, or statistics
+ * support */
+
+#ifndef ETH_GSTRING_LEN
+#define ETH_GSTRING_LEN 32
+#endif
+
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS 0x1d
+#undef ethtool_drvinfo
+#define ethtool_drvinfo k_ethtool_drvinfo
+struct k_ethtool_drvinfo {
+	u32 cmd;
+	char driver[32];
+	char version[32];
+	char fw_version[32];
+	char bus_info[32];
+	char reserved1[32];
+	char reserved2[16];
+	u32 n_stats;
+	u32 testinfo_len;
+	u32 eedump_len;
+	u32 regdump_len;
+};
+
+struct ethtool_stats {
+	u32 cmd;
+	u32 n_stats;
+	u64 data[0];
+};
+#endif /* ETHTOOL_GSTATS */
+
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID 0x1c
+#endif /* ETHTOOL_PHYS_ID */
+
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS 0x1b
+enum ethtool_stringset {
+	ETH_SS_TEST             = 0,
+	ETH_SS_STATS,
+};
+struct ethtool_gstrings {
+	u32 cmd;            /* ETHTOOL_GSTRINGS */
+	u32 string_set;     /* string set id e.c. ETH_SS_TEST, etc*/
+	u32 len;            /* number of strings in the string set */
+	u8 data[0];
+};
+#endif /* ETHTOOL_GSTRINGS */
+
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST 0x1a
+enum ethtool_test_flags {
+	ETH_TEST_FL_OFFLINE	= (1 << 0),
+	ETH_TEST_FL_FAILED	= (1 << 1),
+};
+struct ethtool_test {
+	u32 cmd;
+	u32 flags;
+	u32 reserved;
+	u32 len;
+	u64 data[0];
+};
+#endif /* ETHTOOL_TEST */
+
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM 0xb
+#undef ETHTOOL_GREGS
+struct ethtool_eeprom {
+	u32 cmd;
+	u32 magic;
+	u32 offset;
+	u32 len;
+	u8 data[0];
+};
+
+struct ethtool_value {
+	u32 cmd;
+	u32 data;
+};
+#endif /* ETHTOOL_GEEPROM */
+
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK 0xa
+#endif /* ETHTOOL_GLINK */
+
+#ifndef ETHTOOL_GREGS
+#define ETHTOOL_GREGS		0x00000004 /* Get NIC registers */
+#define ethtool_regs _kc_ethtool_regs
+/* for passing big chunks of data */
+struct _kc_ethtool_regs {
+	u32 cmd;
+	u32 version; /* driver-specific, indicates different chips/revs */
+	u32 len; /* bytes */
+	u8 data[0];
+};
+#endif /* ETHTOOL_GREGS */
+
+#ifndef ETHTOOL_GMSGLVL
+#define ETHTOOL_GMSGLVL		0x00000007 /* Get driver message level */
+#endif
+#ifndef ETHTOOL_SMSGLVL
+#define ETHTOOL_SMSGLVL		0x00000008 /* Set driver msg level, priv. */
+#endif
+#ifndef ETHTOOL_NWAY_RST
+#define ETHTOOL_NWAY_RST	0x00000009 /* Restart autonegotiation, priv */
+#endif
+#ifndef ETHTOOL_GLINK
+#define ETHTOOL_GLINK		0x0000000a /* Get link status */
+#endif
+#ifndef ETHTOOL_GEEPROM
+#define ETHTOOL_GEEPROM		0x0000000b /* Get EEPROM data */
+#endif
+#ifndef ETHTOOL_SEEPROM
+#define ETHTOOL_SEEPROM		0x0000000c /* Set EEPROM data */
+#endif
+#ifndef ETHTOOL_GCOALESCE
+#define ETHTOOL_GCOALESCE	0x0000000e /* Get coalesce config */
+/* for configuring coalescing parameters of chip */
+#define ethtool_coalesce _kc_ethtool_coalesce
+struct _kc_ethtool_coalesce {
+	u32	cmd;	/* ETHTOOL_{G,S}COALESCE */
+
+	/* How many usecs to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	rx_coalesce_usecs;
+
+	/* How many packets to delay an RX interrupt after
+	 * a packet arrives.  If 0, only rx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause RX interrupts to never be
+	 * generated.
+	 */
+	u32	rx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	rx_coalesce_usecs_irq;
+	u32	rx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_max_coalesced_frames
+	 * is used.
+	 */
+	u32	tx_coalesce_usecs;
+
+	/* How many packets to delay a TX interrupt after
+	 * a packet is sent.  If 0, only tx_coalesce_usecs is
+	 * used.  It is illegal to set both usecs and max frames
+	 * to zero as this would cause TX interrupts to never be
+	 * generated.
+	 */
+	u32	tx_max_coalesced_frames;
+
+	/* Same as above two parameters, except that these values
+	 * apply while an IRQ is being serviced by the host.  Not
+	 * all cards support this feature and the values are ignored
+	 * in that case.
+	 */
+	u32	tx_coalesce_usecs_irq;
+	u32	tx_max_coalesced_frames_irq;
+
+	/* How many usecs to delay in-memory statistics
+	 * block updates.  Some drivers do not have an in-memory
+	 * statistic block, and in such cases this value is ignored.
+	 * This value must not be zero.
+	 */
+	u32	stats_block_coalesce_usecs;
+
+	/* Adaptive RX/TX coalescing is an algorithm implemented by
+	 * some drivers to improve latency under low packet rates and
+	 * improve throughput under high packet rates.  Some drivers
+	 * only implement one of RX or TX adaptive coalescing.  Anything
+	 * not implemented by the driver causes these values to be
+	 * silently ignored.
+	 */
+	u32	use_adaptive_rx_coalesce;
+	u32	use_adaptive_tx_coalesce;
+
+	/* When the packet rate (measured in packets per second)
+	 * is below pkt_rate_low, the {rx,tx}_*_low parameters are
+	 * used.
+	 */
+	u32	pkt_rate_low;
+	u32	rx_coalesce_usecs_low;
+	u32	rx_max_coalesced_frames_low;
+	u32	tx_coalesce_usecs_low;
+	u32	tx_max_coalesced_frames_low;
+
+	/* When the packet rate is below pkt_rate_high but above
+	 * pkt_rate_low (both measured in packets per second) the
+	 * normal {rx,tx}_* coalescing parameters are used.
+	 */
+
+	/* When the packet rate is (measured in packets per second)
+	 * is above pkt_rate_high, the {rx,tx}_*_high parameters are
+	 * used.
+	 */
+	u32	pkt_rate_high;
+	u32	rx_coalesce_usecs_high;
+	u32	rx_max_coalesced_frames_high;
+	u32	tx_coalesce_usecs_high;
+	u32	tx_max_coalesced_frames_high;
+
+	/* How often to do adaptive coalescing packet rate sampling,
+	 * measured in seconds.  Must not be zero.
+	 */
+	u32	rate_sample_interval;
+};
+#endif /* ETHTOOL_GCOALESCE */
+
+#ifndef ETHTOOL_SCOALESCE
+#define ETHTOOL_SCOALESCE	0x0000000f /* Set coalesce config. */
+#endif
+#ifndef ETHTOOL_GRINGPARAM
+#define ETHTOOL_GRINGPARAM	0x00000010 /* Get ring parameters */
+/* for configuring RX/TX ring parameters */
+#define ethtool_ringparam _kc_ethtool_ringparam
+struct _kc_ethtool_ringparam {
+	u32	cmd;	/* ETHTOOL_{G,S}RINGPARAM */
+
+	/* Read only attributes.  These indicate the maximum number
+	 * of pending RX/TX ring entries the driver will allow the
+	 * user to set.
+	 */
+	u32	rx_max_pending;
+	u32	rx_mini_max_pending;
+	u32	rx_jumbo_max_pending;
+	u32	tx_max_pending;
+
+	/* Values changeable by the user.  The valid values are
+	 * in the range 1 to the "*_max_pending" counterpart above.
+	 */
+	u32	rx_pending;
+	u32	rx_mini_pending;
+	u32	rx_jumbo_pending;
+	u32	tx_pending;
+};
+#endif /* ETHTOOL_GRINGPARAM */
+
+#ifndef ETHTOOL_SRINGPARAM
+#define ETHTOOL_SRINGPARAM	0x00000011 /* Set ring parameters, priv. */
+#endif
+#ifndef ETHTOOL_GPAUSEPARAM
+#define ETHTOOL_GPAUSEPARAM	0x00000012 /* Get pause parameters */
+/* for configuring link flow control parameters */
+#define ethtool_pauseparam _kc_ethtool_pauseparam
+struct _kc_ethtool_pauseparam {
+	u32	cmd;	/* ETHTOOL_{G,S}PAUSEPARAM */
+
+	/* If the link is being auto-negotiated (via ethtool_cmd.autoneg
+	 * being true) the user may set 'autoneg' here non-zero to have the
+	 * pause parameters be auto-negotiated too.  In such a case, the
+	 * {rx,tx}_pause values below determine what capabilities are
+	 * advertised.
+	 *
+	 * If 'autoneg' is zero or the link is not being auto-negotiated,
+	 * then {rx,tx}_pause force the driver to use/not-use pause
+	 * flow control.
+	 */
+	u32	autoneg;
+	u32	rx_pause;
+	u32	tx_pause;
+};
+#endif /* ETHTOOL_GPAUSEPARAM */
+
+#ifndef ETHTOOL_SPAUSEPARAM
+#define ETHTOOL_SPAUSEPARAM	0x00000013 /* Set pause parameters. */
+#endif
+#ifndef ETHTOOL_GRXCSUM
+#define ETHTOOL_GRXCSUM		0x00000014 /* Get RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SRXCSUM
+#define ETHTOOL_SRXCSUM		0x00000015 /* Set RX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GTXCSUM
+#define ETHTOOL_GTXCSUM		0x00000016 /* Get TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STXCSUM
+#define ETHTOOL_STXCSUM		0x00000017 /* Set TX hw csum enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_GSG
+#define ETHTOOL_GSG		0x00000018 /* Get scatter-gather enable
+					    * (ethtool_value) */
+#endif
+#ifndef ETHTOOL_SSG
+#define ETHTOOL_SSG		0x00000019 /* Set scatter-gather enable
+					    * (ethtool_value). */
+#endif
+#ifndef ETHTOOL_TEST
+#define ETHTOOL_TEST		0x0000001a /* execute NIC self-test, priv. */
+#endif
+#ifndef ETHTOOL_GSTRINGS
+#define ETHTOOL_GSTRINGS	0x0000001b /* get specified string set */
+#endif
+#ifndef ETHTOOL_PHYS_ID
+#define ETHTOOL_PHYS_ID		0x0000001c /* identify the NIC */
+#endif
+#ifndef ETHTOOL_GSTATS
+#define ETHTOOL_GSTATS		0x0000001d /* get NIC-specific statistics */
+#endif
+#ifndef ETHTOOL_GTSO
+#define ETHTOOL_GTSO		0x0000001e /* Get TSO enable (ethtool_value) */
+#endif
+#ifndef ETHTOOL_STSO
+#define ETHTOOL_STSO		0x0000001f /* Set TSO enable (ethtool_value) */
+#endif
+
+#ifndef ETHTOOL_BUSINFO_LEN
+#define ETHTOOL_BUSINFO_LEN	32
+#endif
+
+#ifndef HAVE_PCI_SET_MWI
+#define pci_set_mwi(X) pci_write_config_word(X, \
+			       PCI_COMMAND, adapter->hw.bus.pci_cmd_word | \
+			       PCI_COMMAND_INVALIDATE);
+#define pci_clear_mwi(X) pci_write_config_word(X, \
+			       PCI_COMMAND, adapter->hw.bus.pci_cmd_word & \
+			       ~PCI_COMMAND_INVALIDATE);
+#endif
+
+
+#undef HAVE_PCI_ERS
+
+#endif /* _KCOMPAT_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig
new file mode 100644
index 0000000..0544128
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Kconfig
@@ -0,0 +1,4 @@
+config XENO_DRIVERS_NET_DRV_RT2500
+    depends on XENO_DRIVERS_NET && PCI
+    tristate "Ralink 2500 WLAN"
+    select XENO_DRIVERS_NET_RTWLAN
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile
new file mode 100644
index 0000000..d5e2643
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/Makefile
@@ -0,0 +1,6 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_RT2500) += rt_rt2x00core.o rt_rt2500pci.o
+
+rt_rt2x00core-y := rt2x00core.o
+rt_rt2500pci-y := rt2500pci.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/README b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/README
new file mode 100644
index 0000000..f47fccd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/README
@@ -0,0 +1,58 @@
+RTnet rt2500 WLAN README
+========================
+
+See http://www.fsf.org/resources/hw/net/wireless/cards.html 
+for a list of cards which use the rt2500 chipset.
+
+After the modules rt_rt2x00core and rt_rt2500pci have been loaded the driver
+can be configured with rtiwconfig. Following features are currently implemented
+(see also rtiwconfig --help):
+
+* bitrate: The committed value is multiplied with 0.5 Mbit/s.
+    Valid is 2, 4, 11, 22 for 802.11b and
+    12, 18, 24, 36, 48, 72, 96, 108 for 802.11g.
+
+* channel: Sets frequency/channel. Channel IDs are from 1 to 13.
+
+* txpower: Sets the transmission power. Zero means minimum TX power.
+
+* retry: The hardware can be configured to do transmission retries.
+    This sets the maximum amount of retries.
+
+* tx mode: The RTnet driver can be used in three different modes:
+
+    "raw": No acknowledgement of transmitted frames is expected.
+           -> No retries possible.
+           Target address of WLAN frame is set as passed from higher layers.
+
+    "ack": Acknowledgement of every transmitted frames is expected.
+           -> Retries are possible.
+           Target address of WLAN frame is set as passed from higher layers.
+           WARNING: This is currently only useful for unicast transmission.
+
+    "mcast": No acknowledgement of the frame is expected.
+           Receiver address of the WLAN frame is the own MAC-Address with group
+           bit set.
+
+* drop broadcast: Configures, whether the hardware shall drop received
+    broadcast frames.
+
+* drop multicast: Configures, whether the hardware shall drop received
+    multicast frames.
+
+* bbp sensibility: Sets the receiving sensibility of the base band processor.
+    Values around 70 seem to be useful.
+
+* autoresponder: Determines, whether the hardware responds automatically on
+    received unicast frames with an ACK frame.
+
+* regread/regwrite: Gives direct access to the chipset registers.
+    Only useful if you know what you are doing :)
+
+The driver has been tested with an ASUS WL-107g PCMCIA and a MSI PC54G2 PCI
+card.
+
+KNOWN BUGS: 
+After configuring the bitrate via rtiwconfig the driver activates the
+hardware autoresponder. If desired, the autoresponder has to be disabled again
+manually.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c
new file mode 100644
index 0000000..9bbdce7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.c
@@ -0,0 +1,1274 @@
+/* rt2500pci.c
+ *
+ * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project
+ *			     <http://rt2x00.serialmonkey.com>
+ *               2006        rtnet adaption by Daniel Gregorek
+ *                           <dxg@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Module: rt_rt2500pci
+ * Abstract: rt2500pci device specific routines.
+ * Supported chipsets: RT2560.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+#include "rt2x00.h"
+#include "rt2500pci.h"
+
+#include <rtnet_port.h>
+
+#ifdef DRV_NAME
+#undef DRV_NAME
+#define DRV_NAME "rt_rt2500pci"
+#endif /* DRV_NAME */
+
+/* handler for direct register access from core module */
+static int rt2x00_dev_register_access(struct _rt2x00_core *core, int request,
+				      u32 address, u32 *value)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	u8 u8_value;
+
+	switch (request) {
+	case IOC_RTWLAN_REGREAD:
+		rt2x00_register_read(rt2x00pci, address, value);
+		break;
+	case IOC_RTWLAN_REGWRITE:
+		rt2x00_register_write(rt2x00pci, address, *value);
+		break;
+	case IOC_RTWLAN_BBPREAD:
+		rt2x00_bbp_regread(rt2x00pci, address, &u8_value);
+		*value = u8_value;
+		break;
+	case IOC_RTWLAN_BBPWRITE:
+		rt2x00_bbp_regwrite(rt2x00pci, address, *value);
+		break;
+	default:
+		return -1;
+	}
+
+	return 0;
+}
+
+/*
+ * Interrupt routines.
+ * rt2x00_interrupt_txdone processes all transmitted packetss results.
+ * rt2x00_interrupt_rxdone processes all received rx packets.
+ */
+static void rt2x00_interrupt_txdone(struct _data_ring *ring)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(ring->core->rtnet_dev);
+	struct _txd *txd = NULL;
+	u8 tx_result = 0x00;
+	/*    u8			retry_count = 0x00; */
+
+	do {
+		txd = DESC_ADDR_DONE(ring);
+
+		if (rt2x00_get_field32(txd->word0, TXD_W0_OWNER_NIC) ||
+		    !rt2x00_get_field32(txd->word0, TXD_W0_VALID))
+			break;
+
+		if (ring->ring_type == RING_TX) {
+			tx_result =
+				rt2x00_get_field32(txd->word0, TXD_W0_RESULT);
+			/*	    retry_count = rt2x00_get_field32(txd->word0, TXD_W0_RETRY_COUNT); */
+
+			switch (tx_result) {
+			case TX_SUCCESS:
+				rtwlan_dev->stats.tx_packets++;
+				break;
+			case TX_SUCCESS_RETRY:
+				rtwlan_dev->stats.tx_retry++;
+				break;
+			case TX_FAIL_RETRY:
+				DEBUG("TX_FAIL_RETRY.\n");
+				break;
+			case TX_FAIL_INVALID:
+				DEBUG("TX_FAIL_INVALID.\n");
+				break;
+			case TX_FAIL_OTHER:
+				DEBUG("TX_FAIL_OTHER.\n");
+				break;
+			default:
+				DEBUG("Unknown tx result.\n");
+			}
+		}
+
+		rt2x00_set_field32(&txd->word0, TXD_W0_VALID, 0);
+
+		rt2x00_ring_index_done_inc(ring);
+	} while (!rt2x00_ring_empty(ring));
+}
+
+static void rt2x00_interrupt_rxdone(struct _data_ring *ring,
+				    nanosecs_abs_t *time_stamp)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(ring->core);
+	struct rtnet_device *rtnet_dev = ring->core->rtnet_dev;
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rxd *rxd = NULL;
+	struct rtskb *rtskb;
+	void *data = NULL;
+	u16 size = 0x0000;
+	/*    u16                    rssi = 0x0000; */
+
+	while (1) {
+		rxd = DESC_ADDR(ring);
+		data = DATA_ADDR(ring);
+
+		if (rt2x00_get_field32(rxd->word0, RXD_W0_OWNER_NIC))
+			break;
+
+		size = rt2x00_get_field32(rxd->word0, RXD_W0_DATABYTE_COUNT);
+		/*	rssi = rt2x00_get_field32(rxd->word2, RXD_W2_RSSI); */
+
+		/* prepare rtskb */
+		rtskb = rtnetdev_alloc_rtskb(rtnet_dev, size + NET_IP_ALIGN);
+		if (!rtskb) {
+			ERROR("Couldn't allocate rtskb, packet dropped.\n");
+			break;
+		}
+		rtskb->time_stamp = *time_stamp;
+		rtskb_reserve(rtskb, NET_IP_ALIGN);
+
+		memcpy(rtskb->data, data, size);
+		rtskb_put(rtskb, size);
+
+		/* give incoming frame to rtwlan stack */
+		rtwlan_rx(rtskb, rtnet_dev);
+
+		rtwlan_dev->stats.rx_packets++;
+
+		rt2x00_set_field32(&rxd->word0, RXD_W0_OWNER_NIC, 1);
+		rt2x00_ring_index_inc(&rt2x00pci->rx);
+	}
+}
+
+int rt2x00_interrupt(rtdm_irq_t *irq_handle)
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	struct rtnet_device *rtnet_dev =
+		rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	unsigned int old_packet_cnt = rtwlan_dev->stats.rx_packets;
+	u32 reg = 0x00000000;
+
+	rtdm_lock_get(&rt2x00pci->lock);
+
+	rt2x00_register_read(rt2x00pci, CSR7, &reg);
+	rt2x00_register_write(rt2x00pci, CSR7, reg);
+
+	if (!reg) {
+		rtdm_lock_put(&rt2x00pci->lock);
+		return RTDM_IRQ_NONE;
+	}
+
+	if (rt2x00_get_field32(
+		    reg,
+		    CSR7_TBCN_EXPIRE)) /* Beacon timer expired interrupt. */
+		DEBUG("Beacon timer expired.\n");
+	if (rt2x00_get_field32(reg, CSR7_RXDONE)) /* Rx ring done interrupt. */
+		rt2x00_interrupt_rxdone(&rt2x00pci->rx, &time_stamp);
+	if (rt2x00_get_field32(
+		    reg,
+		    CSR7_TXDONE_ATIMRING)) /* Atim ring transmit done interrupt. */
+		DEBUG("AtimTxDone.\n");
+	if (rt2x00_get_field32(
+		    reg,
+		    CSR7_TXDONE_PRIORING)) /* Priority ring transmit done interrupt. */
+		DEBUG("PrioTxDone.\n");
+	if (rt2x00_get_field32(
+		    reg,
+		    CSR7_TXDONE_TXRING)) /* Tx ring transmit done interrupt. */
+		rt2x00_interrupt_txdone(&rt2x00pci->tx);
+
+	rtdm_lock_put(&rt2x00pci->lock);
+
+	if (old_packet_cnt != rtwlan_dev->stats.rx_packets)
+		rt_mark_stack_mgr(rtnet_dev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+void rt2x00_init_eeprom(struct _rt2x00_pci *rt2x00pci,
+			struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+	u16 eeprom = 0x0000;
+
+	/*
+     * 1 - Detect EEPROM width.
+     */
+	rt2x00_register_read(rt2x00pci, CSR21, &reg);
+	rt2x00pci->eeprom_width = rt2x00_get_field32(reg, CSR21_TYPE_93C46) ?
+					  EEPROM_WIDTH_93c46 :
+					  EEPROM_WIDTH_93c66;
+
+	/*
+     * 2 - Identify rf chipset.
+     */
+	eeprom = rt2x00_eeprom_read_word(rt2x00pci, EEPROM_ANTENNA);
+	set_chip(&rt2x00pci->chip, RT2560,
+		 rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RF_TYPE));
+
+	/*
+     * 3 - Identify default antenna configuration.
+     */
+	config->antenna_tx =
+		rt2x00_get_field16(eeprom, EEPROM_ANTENNA_TX_DEFAULT);
+	config->antenna_rx =
+		rt2x00_get_field16(eeprom, EEPROM_ANTENNA_RX_DEFAULT);
+
+	DEBUG("antenna_tx=%d antenna_rx=%d\n", config->antenna_tx,
+	      config->antenna_rx);
+
+	/*
+     * 4 - Read BBP data from EEPROM and store in private structure.
+     */
+	memset(&rt2x00pci->eeprom, 0x00, sizeof(rt2x00pci->eeprom));
+	for (eeprom = 0; eeprom < EEPROM_BBP_SIZE; eeprom++)
+		rt2x00pci->eeprom[eeprom] = rt2x00_eeprom_read_word(
+			rt2x00pci, EEPROM_BBP_START + eeprom);
+}
+
+void rt2x00_dev_read_mac(struct _rt2x00_pci *rt2x00pci,
+			 struct rtnet_device *rtnet_dev)
+{
+	u32 reg[2];
+
+	memset(&reg, 0x00, sizeof(reg));
+
+	rt2x00_register_multiread(rt2x00pci, CSR3, &reg[0], sizeof(reg));
+
+	rtnet_dev->dev_addr[0] = rt2x00_get_field32(reg[0], CSR3_BYTE0);
+	rtnet_dev->dev_addr[1] = rt2x00_get_field32(reg[0], CSR3_BYTE1);
+	rtnet_dev->dev_addr[2] = rt2x00_get_field32(reg[0], CSR3_BYTE2);
+	rtnet_dev->dev_addr[3] = rt2x00_get_field32(reg[0], CSR3_BYTE3);
+	rtnet_dev->dev_addr[4] = rt2x00_get_field32(reg[1], CSR4_BYTE4);
+	rtnet_dev->dev_addr[5] = rt2x00_get_field32(reg[1], CSR4_BYTE5);
+
+	rtnet_dev->addr_len = 6;
+}
+
+int rt2x00_dev_probe(struct _rt2x00_core *core, void *priv)
+{
+	struct pci_dev *pci_dev = (struct pci_dev *)priv;
+	struct _rt2x00_pci *rt2x00pci = core->priv;
+
+	memset(rt2x00pci, 0x00, sizeof(*rt2x00pci));
+
+	if (unlikely(!pci_dev)) {
+		ERROR("invalid priv pointer.\n");
+		return -ENODEV;
+	}
+	rt2x00pci->pci_dev = pci_dev;
+
+	rt2x00pci->rx.data_addr = NULL;
+	rt2x00pci->tx.data_addr = NULL;
+
+	rt2x00pci->csr_addr = ioremap(pci_resource_start(pci_dev, 0),
+				      pci_resource_len(pci_dev, 0));
+	if (!rt2x00pci->csr_addr) {
+		ERROR("ioremap failed.\n");
+		return -ENOMEM;
+	}
+
+	rt2x00_init_eeprom(rt2x00pci, &core->config);
+	rt2x00_dev_read_mac(rt2x00pci, core->rtnet_dev);
+
+	return 0;
+}
+
+int rt2x00_dev_remove(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	if (rt2x00pci->csr_addr) {
+		iounmap(rt2x00pci->csr_addr);
+		rt2x00pci->csr_addr = NULL;
+	}
+
+	return 0;
+}
+
+/*
+ * rt2x00_clear_ring
+ * During the initialization some of the descriptor variables are filled in.
+ * The default value of the owner variable is different between the types of the descriptor,
+ * DMA ring entries that receive packets are owned by the device untill a packet is received.
+ * DMA ring entries that are used to transmit a packet are owned by the module untill the device,
+ * for these rings the valid bit is set to 0 to indicate it is ready for use.
+ * should transmit the packet that particular DMA ring entry.
+ * The BUFFER_ADDRESS variable is used to link a descriptor to a packet data block.
+ */
+static void rt2x00_clear_ring(struct _rt2x00_pci *rt2x00pci,
+			      struct _data_ring *ring)
+{
+	struct _rxd *rxd = NULL;
+	struct _txd *txd = NULL;
+	dma_addr_t data_dma =
+		ring->data_dma + (ring->max_entries * ring->desc_size);
+	u8 counter = 0x00;
+
+	memset(ring->data_addr, 0x00, ring->mem_size);
+
+	for (; counter < ring->max_entries; counter++) {
+		if (ring->ring_type == RING_RX) {
+			rxd = (struct _rxd *)__DESC_ADDR(ring, counter);
+
+			rt2x00_set_field32(&rxd->word1, RXD_W1_BUFFER_ADDRESS,
+					   data_dma);
+			rt2x00_set_field32(&rxd->word0, RXD_W0_OWNER_NIC, 1);
+		} else {
+			txd = (struct _txd *)__DESC_ADDR(ring, counter);
+
+			rt2x00_set_field32(&txd->word1, TXD_W1_BUFFER_ADDRESS,
+					   data_dma);
+			rt2x00_set_field32(&txd->word0, TXD_W0_VALID, 0);
+			rt2x00_set_field32(&txd->word0, TXD_W0_OWNER_NIC, 0);
+		}
+
+		data_dma += ring->entry_size;
+	}
+
+	rt2x00_ring_clear_index(ring);
+}
+
+/*
+ * rt2x00_init_ring_register
+ * The registers should be updated with the descriptor size and the
+ * number of entries of each ring.
+ * The address of the first entry of the descriptor ring is written to the register
+ * corresponding to the ring.
+ */
+static void rt2x00_init_ring_register(struct _rt2x00_pci *rt2x00pci)
+{
+	u32 reg = 0x00000000;
+
+	/* Initialize ring register for RX/TX */
+
+	rt2x00_set_field32(&reg, TXCSR2_TXD_SIZE, rt2x00pci->tx.desc_size);
+	rt2x00_set_field32(&reg, TXCSR2_NUM_TXD, rt2x00pci->tx.max_entries);
+	rt2x00_register_write(rt2x00pci, TXCSR2, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, TXCSR3_TX_RING_REGISTER,
+			   rt2x00pci->tx.data_dma);
+	rt2x00_register_write(rt2x00pci, TXCSR3, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, RXCSR1_RXD_SIZE, rt2x00pci->rx.desc_size);
+	rt2x00_set_field32(&reg, RXCSR1_NUM_RXD, rt2x00pci->rx.max_entries);
+	rt2x00_register_write(rt2x00pci, RXCSR1, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, RXCSR2_RX_RING_REGISTER,
+			   rt2x00pci->rx.data_dma);
+	rt2x00_register_write(rt2x00pci, RXCSR2, reg);
+}
+
+static int rt2x00_init_registers(struct _rt2x00_pci *rt2x00pci)
+{
+	u32 reg = 0x00000000;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_write(rt2x00pci, PWRCSR0, cpu_to_le32(0x3f3b3100));
+
+	rt2x00_register_write(rt2x00pci, PSCSR0, cpu_to_le32(0x00020002));
+	rt2x00_register_write(rt2x00pci, PSCSR1, cpu_to_le32(0x00000002));
+	rt2x00_register_write(rt2x00pci, PSCSR2, cpu_to_le32(0x00020002));
+	rt2x00_register_write(rt2x00pci, PSCSR3, cpu_to_le32(0x00000002));
+
+	rt2x00_register_read(rt2x00pci, TIMECSR, &reg);
+	rt2x00_set_field32(&reg, TIMECSR_US_COUNT, 33);
+	rt2x00_set_field32(&reg, TIMECSR_US_64_COUNT, 63);
+	rt2x00_set_field32(&reg, TIMECSR_BEACON_EXPECT, 0);
+	rt2x00_register_write(rt2x00pci, TIMECSR, reg);
+
+	rt2x00_register_read(rt2x00pci, CSR9, &reg);
+	rt2x00_set_field32(&reg, CSR9_MAX_FRAME_UNIT,
+			   (rt2x00pci->rx.entry_size / 128));
+	rt2x00_register_write(rt2x00pci, CSR9, reg);
+
+	rt2x00_register_write(rt2x00pci, CNT3, cpu_to_le32(0x3f080000));
+
+	rt2x00_register_read(rt2x00pci, RXCSR0, &reg);
+	rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 0);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, 0);
+	rt2x00_register_write(rt2x00pci, RXCSR0, reg);
+
+	rt2x00_register_write(rt2x00pci, MACCSR0, cpu_to_le32(0x00213223));
+
+	rt2x00_register_read(rt2x00pci, MACCSR1, &reg);
+	rt2x00_set_field32(&reg, MACCSR1_AUTO_TXBBP, 1);
+	rt2x00_set_field32(&reg, MACCSR1_AUTO_RXBBP, 1);
+	rt2x00_register_write(rt2x00pci, MACCSR1, reg);
+
+	rt2x00_register_read(rt2x00pci, MACCSR2, &reg);
+	rt2x00_set_field32(&reg, MACCSR2_DELAY, 64);
+	rt2x00_register_write(rt2x00pci, MACCSR2, reg);
+
+	rt2x00_register_read(rt2x00pci, RXCSR3, &reg);
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID0, 47); /* Signal. */
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID0_VALID, 1);
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID1, 51); /* Rssi. */
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID1_VALID, 1);
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID2, 42); /* OFDM Rate. */
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID2_VALID, 1);
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID3, 51); /* OFDM. */
+	rt2x00_set_field32(&reg, RXCSR3_BBP_ID3_VALID, 1);
+	rt2x00_register_write(rt2x00pci, RXCSR3, reg);
+
+	rt2x00_register_read(rt2x00pci, RALINKCSR, &reg);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA0, 17);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID0, 26);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID0, 1);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_DATA1, 0);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_ID1, 26);
+	rt2x00_set_field32(&reg, RALINKCSR_AR_BBP_VALID1, 1);
+	rt2x00_register_write(rt2x00pci, RALINKCSR, reg);
+
+	rt2x00_register_write(rt2x00pci, BBPCSR1, cpu_to_le32(0x82188200));
+
+	rt2x00_register_write(rt2x00pci, TXACKCSR0, cpu_to_le32(0x00000020));
+
+	rt2x00_register_write(rt2x00pci, ARTCSR0, cpu_to_le32(0x7038140a));
+	rt2x00_register_write(rt2x00pci, ARTCSR1, cpu_to_le32(0x1d21252d));
+	rt2x00_register_write(rt2x00pci, ARTCSR2, cpu_to_le32(0x1919191d));
+
+	/* disable Beacon timer */
+	rt2x00_register_write(rt2x00pci, CSR14, 0x0);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, LEDCSR_ON_PERIOD, 30);
+	rt2x00_set_field32(&reg, LEDCSR_OFF_PERIOD, 70);
+	rt2x00_set_field32(&reg, LEDCSR_LINK, 0);
+	rt2x00_set_field32(&reg, LEDCSR_ACTIVITY, 0);
+	rt2x00_register_write(rt2x00pci, LEDCSR, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, CSR1_SOFT_RESET, 1);
+	rt2x00_register_write(rt2x00pci, CSR1, reg);
+
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, CSR1_HOST_READY, 1);
+	rt2x00_register_write(rt2x00pci, CSR1, reg);
+
+	/*
+     * We must clear the FCS and FIFI error count.
+     * These registers are cleared on read, so we may pass a useless variable to store the value.
+     */
+	rt2x00_register_read(rt2x00pci, CNT0, &reg);
+	rt2x00_register_read(rt2x00pci, CNT4, &reg);
+
+	return 0;
+}
+
+static void rt2x00_init_write_mac(struct _rt2x00_pci *rt2x00pci,
+				  struct rtnet_device *rtnet_dev)
+{
+	u32 reg[2];
+
+	memset(&reg, 0x00, sizeof(reg));
+
+	rt2x00_set_field32(&reg[0], CSR3_BYTE0, rtnet_dev->dev_addr[0]);
+	rt2x00_set_field32(&reg[0], CSR3_BYTE1, rtnet_dev->dev_addr[1]);
+	rt2x00_set_field32(&reg[0], CSR3_BYTE2, rtnet_dev->dev_addr[2]);
+	rt2x00_set_field32(&reg[0], CSR3_BYTE3, rtnet_dev->dev_addr[3]);
+	rt2x00_set_field32(&reg[1], CSR4_BYTE4, rtnet_dev->dev_addr[4]);
+	rt2x00_set_field32(&reg[1], CSR4_BYTE5, rtnet_dev->dev_addr[5]);
+
+	rt2x00_register_multiwrite(rt2x00pci, CSR3, &reg[0], sizeof(reg));
+}
+
+static int rt2x00_init_bbp(struct _rt2x00_pci *rt2x00pci)
+{
+	u8 reg_id = 0x00;
+	u8 value = 0x00;
+	u8 counter = 0x00;
+
+	for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) {
+		rt2x00_bbp_regread(rt2x00pci, 0x00, &value);
+		if ((value != 0xff) && (value != 0x00))
+			goto continue_csr_init;
+		NOTICE("Waiting for BBP register.\n");
+	}
+
+	ERROR("hardware problem, BBP register access failed, aborting.\n");
+	return -EACCES;
+
+continue_csr_init:
+	rt2x00_bbp_regwrite(rt2x00pci, 3, 0x02);
+	rt2x00_bbp_regwrite(rt2x00pci, 4, 0x19);
+	rt2x00_bbp_regwrite(rt2x00pci, 14, 0x1c);
+	rt2x00_bbp_regwrite(rt2x00pci, 15, 0x30);
+	rt2x00_bbp_regwrite(rt2x00pci, 16, 0xac);
+	rt2x00_bbp_regwrite(rt2x00pci, 17, 0x48);
+	rt2x00_bbp_regwrite(rt2x00pci, 18, 0x18);
+	rt2x00_bbp_regwrite(rt2x00pci, 19, 0xff);
+	rt2x00_bbp_regwrite(rt2x00pci, 20, 0x1e);
+	rt2x00_bbp_regwrite(rt2x00pci, 21, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 22, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 23, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 24, 0x70);
+	rt2x00_bbp_regwrite(rt2x00pci, 25, 0x40);
+	rt2x00_bbp_regwrite(rt2x00pci, 26, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 27, 0x23);
+	rt2x00_bbp_regwrite(rt2x00pci, 30, 0x10);
+	rt2x00_bbp_regwrite(rt2x00pci, 31, 0x2b);
+	rt2x00_bbp_regwrite(rt2x00pci, 32, 0xb9);
+	rt2x00_bbp_regwrite(rt2x00pci, 34, 0x12);
+	rt2x00_bbp_regwrite(rt2x00pci, 35, 0x50);
+	rt2x00_bbp_regwrite(rt2x00pci, 39, 0xc4);
+	rt2x00_bbp_regwrite(rt2x00pci, 40, 0x02);
+	rt2x00_bbp_regwrite(rt2x00pci, 41, 0x60);
+	rt2x00_bbp_regwrite(rt2x00pci, 53, 0x10);
+	rt2x00_bbp_regwrite(rt2x00pci, 54, 0x18);
+	rt2x00_bbp_regwrite(rt2x00pci, 56, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 57, 0x10);
+	rt2x00_bbp_regwrite(rt2x00pci, 58, 0x08);
+	rt2x00_bbp_regwrite(rt2x00pci, 61, 0x6d);
+	rt2x00_bbp_regwrite(rt2x00pci, 62, 0x10);
+
+	DEBUG("Start reading EEPROM contents...\n");
+	for (counter = 0; counter < EEPROM_BBP_SIZE; counter++) {
+		if (rt2x00pci->eeprom[counter] != 0xffff &&
+		    rt2x00pci->eeprom[counter] != 0x0000) {
+			reg_id = rt2x00_get_field16(rt2x00pci->eeprom[counter],
+						    EEPROM_BBP_REG_ID);
+			value = rt2x00_get_field16(rt2x00pci->eeprom[counter],
+						   EEPROM_BBP_VALUE);
+			DEBUG("BBP reg_id: 0x%02x, value: 0x%02x.\n", reg_id,
+			      value);
+			rt2x00_bbp_regwrite(rt2x00pci, reg_id, value);
+		}
+	}
+	DEBUG("...End of EEPROM contents.\n");
+
+	return 0;
+}
+
+/*
+ * Device radio routines.
+ * When the radio is switched on or off, the TX and RX
+ * should always be reset using the TXCSR0 and RXCSR0 registers.
+ * The radio itself is switched on and off using the PWRCSR0 register.
+ */
+
+static int rt2x00_dev_radio_on(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	u32 reg = 0x00000000;
+	int retval;
+
+	if (rt2x00_pci_alloc_rings(core))
+		goto exit_fail;
+
+	rt2x00_clear_ring(rt2x00pci, &rt2x00pci->rx);
+	rt2x00_clear_ring(rt2x00pci, &rt2x00pci->tx);
+
+	rt2x00_init_ring_register(rt2x00pci);
+
+	if (rt2x00_init_registers(rt2x00pci))
+		goto exit_fail;
+
+	rt2x00_init_write_mac(rt2x00pci, core->rtnet_dev);
+
+	if (rt2x00_init_bbp(rt2x00pci))
+		goto exit_fail;
+
+	/*
+     * Clear interrupts.
+     */
+	rt2x00_register_read(rt2x00pci, CSR7, &reg);
+	rt2x00_register_write(rt2x00pci, CSR7, reg);
+
+	/* Register rtdm-irq */
+	retval = rtdm_irq_request(&rt2x00pci->irq_handle, core->rtnet_dev->irq,
+				  rt2x00_interrupt, 0, core->rtnet_dev->name,
+				  core->rtnet_dev);
+
+	/*
+     * Enable interrupts.
+     */
+	rt2x00_register_read(rt2x00pci, CSR8, &reg);
+	rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, 0);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 0);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 0);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 0);
+	rt2x00_set_field32(&reg, CSR8_RXDONE, 0);
+	rt2x00_register_write(rt2x00pci, CSR8, reg);
+
+	return 0;
+
+exit_fail:
+	rt2x00_pci_free_rings(core);
+
+	return -ENOMEM;
+}
+
+static int rt2x00_dev_radio_off(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	u32 reg = 0x00000000;
+	int retval = 0;
+
+	rt2x00_register_write(rt2x00pci, PWRCSR0, cpu_to_le32(0x00000000));
+
+	rt2x00_register_read(rt2x00pci, TXCSR0, &reg);
+	rt2x00_set_field32(&reg, TXCSR0_ABORT, 1);
+	rt2x00_register_write(rt2x00pci, TXCSR0, reg);
+
+	rt2x00_register_read(rt2x00pci, RXCSR0, &reg);
+	rt2x00_set_field32(&reg, RXCSR0_DISABLE_RX, 1);
+	rt2x00_register_write(rt2x00pci, RXCSR0, reg);
+
+	rt2x00_register_read(rt2x00pci, LEDCSR, &reg);
+	rt2x00_set_field32(&reg, LEDCSR_LINK, 0);
+	rt2x00_register_write(rt2x00pci, LEDCSR, reg);
+
+	rt2x00_register_read(rt2x00pci, CSR8, &reg);
+	rt2x00_set_field32(&reg, CSR8_TBCN_EXPIRE, 1);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_TXRING, 1);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_ATIMRING, 1);
+	rt2x00_set_field32(&reg, CSR8_TXDONE_PRIORING, 1);
+	rt2x00_set_field32(&reg, CSR8_RXDONE, 1);
+	rt2x00_register_write(rt2x00pci, CSR8, reg);
+
+	rt2x00_pci_free_rings(core);
+
+	if ((retval = rtdm_irq_free(&rt2x00pci->irq_handle)) != 0)
+		ERROR("rtdm_irq_free=%d\n", retval);
+
+	rt_stack_disconnect(core->rtnet_dev);
+
+	return retval;
+}
+
+/*
+ * Configuration handlers.
+ */
+
+static void rt2x00_dev_update_autoresp(struct _rt2x00_pci *rt2x00pci,
+				       struct _rt2x00_config *config)
+{
+	u32 reg = 0;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_read(rt2x00pci, TXCSR1, &reg);
+
+	if (config->config_flags & CONFIG_AUTORESP)
+		rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
+	else
+		rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 0);
+
+	rt2x00_register_write(rt2x00pci, TXCSR1, reg);
+}
+
+static void rt2x00_dev_update_bbpsens(struct _rt2x00_pci *rt2x00pci,
+				      struct _rt2x00_config *config)
+{
+	rt2x00_bbp_regwrite(rt2x00pci, 0x11, config->bbpsens);
+}
+
+static void rt2x00_dev_update_bssid(struct _rt2x00_pci *rt2x00pci,
+				    struct _rt2x00_config *config)
+{
+	u32 reg[2];
+
+	memset(&reg, 0x00, sizeof(reg));
+
+	rt2x00_set_field32(&reg[0], CSR5_BYTE0, config->bssid[0]);
+	rt2x00_set_field32(&reg[0], CSR5_BYTE1, config->bssid[1]);
+	rt2x00_set_field32(&reg[0], CSR5_BYTE2, config->bssid[2]);
+	rt2x00_set_field32(&reg[0], CSR5_BYTE3, config->bssid[3]);
+	rt2x00_set_field32(&reg[1], CSR6_BYTE4, config->bssid[4]);
+	rt2x00_set_field32(&reg[1], CSR6_BYTE5, config->bssid[5]);
+
+	rt2x00_register_multiwrite(rt2x00pci, CSR5, &reg[0], sizeof(reg));
+}
+
+static void rt2x00_dev_update_packet_filter(struct _rt2x00_pci *rt2x00pci,
+					    struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_read(rt2x00pci, RXCSR0, &reg);
+
+	rt2x00_set_field32(&reg, RXCSR0_DROP_TODS, 0);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_CRC, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_PHYSICAL, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_CONTROL, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_VERSION_ERROR, 1);
+	rt2x00_set_field32(&reg, RXCSR0_DROP_NOT_TO_ME, 1);
+
+	/*
+     * This looks like a bug, but for an unknown reason the register seems to swap the bits !!!
+     */
+	if (config->config_flags & CONFIG_DROP_BCAST)
+		rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST, 1);
+	else
+		rt2x00_set_field32(&reg, RXCSR0_DROP_MCAST, 0);
+
+	if (config->config_flags & CONFIG_DROP_MCAST)
+		rt2x00_set_field32(&reg, RXCSR0_DROP_BCAST, 1);
+	else
+		rt2x00_set_field32(&reg, RXCSR0_DROP_BCAST, 0);
+
+	rt2x00_register_write(rt2x00pci, RXCSR0, reg);
+}
+
+static void rt2x00_dev_update_channel(struct _rt2x00_pci *rt2x00pci,
+				      struct _rt2x00_config *config)
+{
+	u8 txpower = rt2x00_get_txpower(&rt2x00pci->chip, config->txpower);
+	u32 reg = 0x00000000;
+
+	if (rt2x00_get_rf_value(&rt2x00pci->chip, config->channel,
+				&rt2x00pci->channel)) {
+		ERROR("RF values for chip %04x and channel %d not found.\n",
+		      rt2x00_get_rf(&rt2x00pci->chip), config->channel);
+		return;
+	}
+
+	/*
+     * Set TXpower.
+     */
+	rt2x00_set_field32(&rt2x00pci->channel.rf3, RF3_TXPOWER, txpower);
+
+	/*
+     * For RT2525 we should first set the channel to half band higher.
+     */
+	if (rt2x00_rf(&rt2x00pci->chip, RF2525)) {
+		rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf1);
+		rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf2 +
+						      cpu_to_le32(0x00000020));
+		rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf3);
+		if (rt2x00pci->channel.rf4)
+			rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf4);
+	}
+
+	rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf1);
+	rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf2);
+	rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf3);
+	if (rt2x00pci->channel.rf4)
+		rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf4);
+
+	/*
+     * Channel 14 requires the Japan filter bit to be set.
+     */
+	rt2x00_bbp_regwrite(rt2x00pci, 70,
+			    (config->channel == 14) ? 0x4e : 0x46);
+
+	msleep(1);
+
+	/*
+     * Clear false CRC during channel switch.
+     */
+	rt2x00_register_read(rt2x00pci, CNT0, &reg);
+
+	DEBUG("Switching to channel %d. RF1: 0x%08x, RF2: 0x%08x, RF3: 0x%08x, RF4: 0x%08x.\n",
+	      config->channel, rt2x00pci->channel.rf1, rt2x00pci->channel.rf2,
+	      rt2x00pci->channel.rf3, rt2x00pci->channel.rf4);
+}
+
+static void rt2x00_dev_update_rate(struct _rt2x00_pci *rt2x00pci,
+				   struct _rt2x00_config *config)
+{
+	u32 value = 0x00000000;
+	u32 reg = 0x00000000;
+	u8 counter = 0x00;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_read(rt2x00pci, TXCSR1, &reg);
+
+	value = config->sifs + (2 * config->slot_time) + config->plcp +
+		get_preamble(config) +
+		get_duration(ACK_SIZE, capabilities.bitrate[0]);
+	rt2x00_set_field32(&reg, TXCSR1_ACK_TIMEOUT, value);
+
+	value = config->sifs + config->plcp + get_preamble(config) +
+		get_duration(ACK_SIZE, capabilities.bitrate[0]);
+	rt2x00_set_field32(&reg, TXCSR1_ACK_CONSUME_TIME, value);
+
+	rt2x00_set_field32(&reg, TXCSR1_TSF_OFFSET, 0x18);
+	rt2x00_set_field32(&reg, TXCSR1_AUTORESPONDER, 1);
+
+	rt2x00_register_write(rt2x00pci, TXCSR1, reg);
+
+	reg = 0x00000000;
+	for (counter = 0; counter < 12; counter++) {
+		reg |= cpu_to_le32(0x00000001 << counter);
+		if (capabilities.bitrate[counter] == config->bitrate)
+			break;
+	}
+
+	rt2x00_register_write(rt2x00pci, ARCSR1, reg);
+}
+
+static void rt2x00_dev_update_txpower(struct _rt2x00_pci *rt2x00pci,
+				      struct _rt2x00_config *config)
+{
+	u8 txpower = rt2x00_get_txpower(&rt2x00pci->chip, config->txpower);
+
+	DEBUG("Start.\n");
+
+	rt2x00_set_field32(&rt2x00pci->channel.rf3, RF3_TXPOWER, txpower);
+	rt2x00_rf_regwrite(rt2x00pci, rt2x00pci->channel.rf3);
+}
+
+static void rt2x00_dev_update_antenna(struct _rt2x00_pci *rt2x00pci,
+				      struct _rt2x00_config *config)
+{
+	u32 reg;
+	u8 reg_rx;
+	u8 reg_tx;
+
+	rt2x00_register_read(rt2x00pci, BBPCSR1, &reg);
+	rt2x00_bbp_regread(rt2x00pci, 14, &reg_rx);
+	rt2x00_bbp_regread(rt2x00pci, 2, &reg_tx);
+
+	/* TX antenna select */
+	if (config->antenna_tx == 1) {
+		/* Antenna A */
+		reg_tx = (reg_tx & 0xfc) | 0x00;
+		reg = (reg & 0xfffcfffc) | 0x00;
+	} else if (config->antenna_tx == 2) {
+		/* Antenna B */
+		reg_tx = (reg_tx & 0xfc) | 0x02;
+		reg = (reg & 0xfffcfffc) | 0x00020002;
+	} else {
+		/* Diversity */
+		reg_tx = (reg_tx & 0xfc) | 0x02;
+		reg = (reg & 0xfffcfffc) | 0x00020002;
+	}
+
+	/* RX antenna select */
+	if (config->antenna_rx == 1)
+		reg_rx = (reg_rx & 0xfc) | 0x00;
+	else if (config->antenna_rx == 2)
+		reg_rx = (reg_rx & 0xfc) | 0x02;
+	else
+		reg_rx = (reg_rx & 0xfc) | 0x02;
+
+	/*
+     * RT2525E and RT5222 need to flip I/Q
+     */
+	if (rt2x00_rf(&rt2x00pci->chip, RF5222)) {
+		reg |= 0x00040004;
+		reg_tx |= 0x04;
+	} else if (rt2x00_rf(&rt2x00pci->chip, RF2525E)) {
+		reg |= 0x00040004;
+		reg_tx |= 0x04;
+		reg_rx |= 0xfb;
+	}
+
+	rt2x00_register_write(rt2x00pci, BBPCSR1, reg);
+	rt2x00_bbp_regwrite(rt2x00pci, 14, reg_rx);
+	rt2x00_bbp_regwrite(rt2x00pci, 2, reg_tx);
+}
+
+static void rt2x00_dev_update_duration(struct _rt2x00_pci *rt2x00pci,
+				       struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+
+	DEBUG("Start.\n");
+
+	rt2x00_register_read(rt2x00pci, CSR11, &reg);
+	rt2x00_set_field32(&reg, CSR11_CWMIN, 5); /* 2^5 = 32. */
+	rt2x00_set_field32(&reg, CSR11_CWMAX, 10); /* 2^10 = 1024. */
+	rt2x00_set_field32(&reg, CSR11_SLOT_TIME, config->slot_time);
+	rt2x00_set_field32(&reg, CSR11_CW_SELECT, 1);
+	rt2x00_register_write(rt2x00pci, CSR11, reg);
+
+	rt2x00_register_read(rt2x00pci, CSR18, &reg);
+	rt2x00_set_field32(&reg, CSR18_SIFS, config->sifs);
+	rt2x00_set_field32(&reg, CSR18_PIFS, config->sifs + config->slot_time);
+	rt2x00_register_write(rt2x00pci, CSR18, reg);
+
+	rt2x00_register_read(rt2x00pci, CSR19, &reg);
+	rt2x00_set_field32(&reg, CSR19_DIFS,
+			   config->sifs + (2 * config->slot_time));
+	rt2x00_set_field32(&reg, CSR19_EIFS,
+			   config->sifs +
+				   get_duration((IEEE80211_HEADER + ACK_SIZE),
+						capabilities.bitrate[0]));
+	rt2x00_register_write(rt2x00pci, CSR19, reg);
+}
+
+static void rt2x00_dev_update_retry(struct _rt2x00_pci *rt2x00pci,
+				    struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+
+	rt2x00_register_read(rt2x00pci, CSR11, &reg);
+	rt2x00_set_field32(&reg, CSR11_LONG_RETRY, config->long_retry);
+	rt2x00_set_field32(&reg, CSR11_SHORT_RETRY, config->short_retry);
+	rt2x00_register_write(rt2x00pci, CSR11, reg);
+}
+
+static void rt2x00_dev_update_preamble(struct _rt2x00_pci *rt2x00pci,
+				       struct _rt2x00_config *config)
+{
+	u32 reg[4];
+	u32 preamble = 0x00000000;
+
+	memset(&reg, 0x00, sizeof(reg));
+
+	reg[0] = cpu_to_le32(0x00700400 | preamble); /* ARCSR2 */
+	reg[1] = cpu_to_le32(0x00380401 | preamble); /* ARCSR3 */
+	reg[2] = cpu_to_le32(0x00150402 | preamble); /* ARCSR4 */
+	reg[3] = cpu_to_le32(0x000b8403 | preamble); /* ARCSR5 */
+
+	rt2x00_register_multiwrite(rt2x00pci, ARCSR2, &reg[0], sizeof(reg));
+}
+
+static void rt2x00_dev_update_led(struct _rt2x00_pci *rt2x00pci,
+				  struct _rt2x00_config *config)
+{
+	u32 reg = 0x00000000;
+
+	rt2x00_register_read(rt2x00pci, LEDCSR, &reg);
+	rt2x00_set_field32(&reg, LEDCSR_LINK, config->led_status ? 1 : 0);
+	rt2x00_register_write(rt2x00pci, LEDCSR, reg);
+}
+
+static int rt2x00_dev_update_config(struct _rt2x00_core *core, u16 update_flags)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	DEBUG("Start.\n");
+
+	if (update_flags & UPDATE_BSSID)
+		rt2x00_dev_update_bssid(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_PACKET_FILTER)
+		rt2x00_dev_update_packet_filter(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_CHANNEL)
+		rt2x00_dev_update_channel(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_BITRATE)
+		rt2x00_dev_update_rate(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_TXPOWER)
+		rt2x00_dev_update_txpower(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_ANTENNA)
+		rt2x00_dev_update_antenna(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_DURATION)
+		rt2x00_dev_update_duration(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_RETRY)
+		rt2x00_dev_update_retry(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_PREAMBLE)
+		rt2x00_dev_update_preamble(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_LED_STATUS)
+		rt2x00_dev_update_led(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_AUTORESP)
+		rt2x00_dev_update_autoresp(rt2x00pci, &core->config);
+
+	if (update_flags & UPDATE_BBPSENS)
+		rt2x00_dev_update_bbpsens(rt2x00pci, &core->config);
+
+	DEBUG("Exit.\n");
+
+	return 0;
+}
+
+/*
+ * Transmission routines.
+ * rt2x00_write_tx_desc will write the txd descriptor.
+ * rt2x00_dev_xmit_packet will copy the packets to the appropriate DMA ring.
+ */
+
+/*
+ * PLCP_SIGNAL, PLCP_SERVICE, PLCP_LENGTH_LOW and PLCP_LENGTH_HIGH are BBP registers.
+ * For RT2460 devices we need, besides the value we want to write,
+ * also set the busy bit (0x8000) and the register number (0x0f00).
+ * The value we want to write is stored in 0x00ff.
+ * For PLCP_SIGNAL we can optionally enable SHORT_PREAMBLE.
+ * For PLCP_SERVICE we can set the length extension bit according to
+ * 802.11b standard 18.2.3.5.
+ */
+static void rt2x00_write_tx_desc(struct _rt2x00_pci *rt2x00pci,
+				 struct _txd *txd, u32 packet_size, u16 rate,
+				 u16 xmit_flags)
+{
+	u32 residual = 0x00000000;
+	u32 duration = 0x00000000;
+	u16 signal = 0x0000;
+	u16 service = 0x0000;
+	u16 length_low = 0x0000;
+	u16 length_high = 0x0000;
+
+	rt2x00_set_field32(&txd->word0, TXD_W0_VALID, 1);
+	rt2x00_set_field32(&txd->word0, TXD_W0_DATABYTE_COUNT, packet_size);
+	rt2x00_set_field32(&txd->word0, TXD_W0_ACK,
+			   (xmit_flags & XMIT_ACK) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_RETRY_MODE,
+			   (xmit_flags & XMIT_LONG_RETRY) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_TIMESTAMP,
+			   (xmit_flags & XMIT_TIMESTAMP) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_MORE_FRAG,
+			   (xmit_flags & XMIT_MORE_FRAGS) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_MORE_FRAG,
+			   (xmit_flags & XMIT_RTS) ? 1 : 0);
+	rt2x00_set_field32(&txd->word10, TXD_W10_RTS,
+			   (xmit_flags & XMIT_RTS) ? 1 : 0);
+	rt2x00_set_field32(&txd->word0, TXD_W0_OFDM,
+			   (xmit_flags & XMIT_OFDM) ? 1 : 0);
+
+	packet_size += 4;
+
+	if (xmit_flags & XMIT_OFDM) {
+		/*
+	 * convert length to microseconds.
+	 */
+		length_high = (packet_size >> 6) & 0x3f;
+		length_low = (packet_size & 0x3f);
+	} else {
+		residual = get_duration_res(packet_size, rate);
+		duration = get_duration(packet_size, rate);
+
+		if (residual != 0)
+			duration++;
+
+		length_high = duration >> 8;
+		length_low = duration & 0xff;
+	}
+
+	signal |= 0x8500 | rt2x00_get_plcp(rate);
+	if (xmit_flags & XMIT_SHORT_PREAMBLE)
+		signal |= 0x0008;
+
+	service |= 0x0600 | 0x0004;
+	if (residual <= (8 % 11))
+		service |= 0x0080;
+
+	rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_SIGNAL, signal);
+	rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_SERVICE, service);
+	rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_LENGTH_LOW, length_low);
+	rt2x00_set_field32(&txd->word3, TXD_W3_PLCP_LENGTH_HIGH, length_high);
+
+	/* set XMIT_IFS to XMIT_IFS_NONE */
+	rt2x00_set_field32(&txd->word0, TXD_W0_IFS, XMIT_IFS_NONE);
+
+	/* highest priority */
+	rt2x00_set_field32(&txd->word2, TXD_W2_CWMIN, 1);
+	rt2x00_set_field32(&txd->word2, TXD_W2_CWMAX, 2);
+	rt2x00_set_field32(&txd->word2, TXD_W2_AIFS, 1);
+
+	/*
+     * set this last, after this the device can start transmitting the packet.
+     */
+	rt2x00_set_field32(&txd->word0, TXD_W0_OWNER_NIC, 1);
+}
+
+static int rt2x00_dev_xmit_packet(struct _rt2x00_core *core,
+				  struct rtskb *rtskb, u16 rate, u16 xmit_flags)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+	struct _data_ring *ring = NULL;
+	struct _txd *txd = NULL;
+	void *data = NULL;
+	u32 reg = 0x00000000;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rt2x00pci->lock, context);
+
+	/* load tx-control register */
+	rt2x00_register_read(rt2x00pci, TXCSR0, &reg);
+
+	/* select tx-descriptor ring and prepare xmit */
+	ring = &rt2x00pci->tx;
+	rt2x00_set_field32(&reg, TXCSR0_KICK_TX, 1);
+
+	txd = DESC_ADDR(ring);
+	data = DATA_ADDR(ring);
+
+	if (rt2x00_get_field32(txd->word0, TXD_W0_OWNER_NIC) ||
+	    rt2x00_get_field32(txd->word0, TXD_W0_VALID)) {
+		rtdm_lock_put_irqrestore(&rt2x00pci->lock, context);
+		return -ENOMEM;
+	}
+
+	/* get and patch time stamp just before the transmission */
+	if (rtskb->xmit_stamp)
+		*rtskb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *rtskb->xmit_stamp);
+
+	/* copy rtskb to dma */
+	memcpy(data, rtskb->data, rtskb->len);
+
+	rt2x00_write_tx_desc(rt2x00pci, txd, rtskb->len, rate, xmit_flags);
+	rt2x00_ring_index_inc(ring);
+
+	/* let the device do the rest ... */
+	rt2x00_register_write(rt2x00pci, TXCSR0, reg);
+
+	rtdm_lock_put_irqrestore(&rt2x00pci->lock, context);
+
+	return 0;
+}
+
+/*
+ * PCI device handlers for usage by core module.
+ */
+static struct _rt2x00_dev_handler rt2x00_pci_handler = {
+
+	.dev_module = THIS_MODULE,
+	.dev_probe = rt2x00_dev_probe,
+	.dev_remove = rt2x00_dev_remove,
+	.dev_radio_on = rt2x00_dev_radio_on,
+	.dev_radio_off = rt2x00_dev_radio_off,
+	.dev_update_config = rt2x00_dev_update_config,
+	.dev_register_access = rt2x00_dev_register_access,
+	.dev_xmit_packet = rt2x00_dev_xmit_packet,
+};
+
+int rt2x00_pci_probe(struct pci_dev *pci_dev, const struct pci_device_id *id)
+{
+	struct rtnet_device *rtnet_dev = NULL;
+	int status = 0x00000000;
+
+	DEBUG("start.\n");
+
+	if (id->driver_data != RT2560) {
+		ERROR("detected device not supported.\n");
+		status = -ENODEV;
+		goto exit;
+	}
+
+	if (pci_enable_device(pci_dev)) {
+		ERROR("enable device failed.\n");
+		status = -EIO;
+		goto exit;
+	}
+
+	pci_set_master(pci_dev);
+
+	if (dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(64)) &&
+	    dma_set_mask(&pci_dev->dev, DMA_BIT_MASK(32))) {
+		ERROR("PCI DMA not supported\n");
+		status = -EIO;
+		goto exit_disable_device;
+	}
+
+	if (pci_request_regions(pci_dev, pci_name(pci_dev))) {
+		ERROR("PCI request regions failed.\n");
+		status = -EBUSY;
+		goto exit_disable_device;
+	}
+	INFO("pci_dev->irq=%d\n", pci_dev->irq);
+
+	rtnet_dev = rt2x00_core_probe(&rt2x00_pci_handler, pci_dev,
+				      sizeof(struct _rt2x00_pci));
+
+	if (!rtnet_dev) {
+		ERROR("rtnet_device allocation failed.\n");
+		status = -ENOMEM;
+		goto exit_release_regions;
+	}
+
+	rtnet_dev->irq = pci_dev->irq;
+
+	pci_set_drvdata(pci_dev, rtnet_dev);
+
+	return 0;
+
+exit_release_regions:
+	pci_release_regions(pci_dev);
+
+exit_disable_device:
+	if (status != -EBUSY)
+		pci_disable_device(pci_dev);
+
+exit:
+	return status;
+}
+
+static void rt2x00_pci_remove(struct pci_dev *pci_dev)
+{
+	struct rtnet_device *rtnet_dev = pci_get_drvdata(pci_dev);
+
+	rt2x00_core_remove(rtnet_dev);
+	pci_set_drvdata(pci_dev, NULL);
+	pci_release_regions(pci_dev);
+	pci_disable_device(pci_dev);
+}
+
+/*
+ * RT2500 PCI module information.
+ */
+char version[] = DRV_NAME " - " DRV_VERSION;
+
+struct pci_device_id rt2x00_device_pci_tbl[] = {
+	{ PCI_DEVICE(0x1814, 0x0201),
+	  .driver_data = RT2560 }, /* Ralink 802.11g */
+	{
+		0,
+	}
+};
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("RTnet rt2500 PCI WLAN driver (PCI Module)");
+MODULE_LICENSE("GPL");
+
+struct pci_driver rt2x00_pci_driver = {
+	.name = DRV_NAME,
+	.id_table = rt2x00_device_pci_tbl,
+	.probe = rt2x00_pci_probe,
+	.remove = rt2x00_pci_remove,
+};
+
+static int __init rt2x00_pci_init(void)
+{
+	rtdm_printk(KERN_INFO "Loading module: %s\n", version);
+	return pci_register_driver(&rt2x00_pci_driver);
+}
+
+static void __exit rt2x00_pci_exit(void)
+{
+	rtdm_printk(KERN_INFO "Unloading module: %s\n", version);
+	pci_unregister_driver(&rt2x00_pci_driver);
+}
+
+module_init(rt2x00_pci_init);
+module_exit(rt2x00_pci_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h
new file mode 100644
index 0000000..60e744b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2500pci.h
@@ -0,0 +1,1498 @@
+/* rt2500pci.h
+ *
+ * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project
+ *	                     <http://rt2x00.serialmonkey.com>
+ *               2006        rtnet adaption by Daniel Gregorek 
+ *                           <dxg@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ *	Module: rt2500pci
+ * Abstract: Data structures and registers for the rt2500pci module.
+ * Supported chipsets: RT2560.
+ */
+
+#ifndef RT2500PCI_H
+#define RT2500PCI_H
+
+/*
+ * RT chip defines
+ */
+#define RT2560 0x0201
+
+/*
+ * RF chip defines
+ */
+#define RF2522 0x0200
+#define RF2523 0x0201
+#define RF2524 0x0202
+#define RF2525 0x0203
+#define RF2525E 0x0204
+#define RF5222 0x0210
+
+/*
+ * Control/Status Registers(CSR).
+ */
+#define CSR0 0x0000 /* ASIC revision number. */
+#define CSR1 0x0004 /* System control register. */
+#define CSR2 0x0008 /* System admin status register (invalid). */
+#define CSR3 0x000c /* STA MAC address register 0. */
+#define CSR4 0x0010 /* STA MAC address register 1. */
+#define CSR5 0x0014 /* BSSID register 0. */
+#define CSR6 0x0018 /* BSSID register 1. */
+#define CSR7 0x001c /* Interrupt source register. */
+#define CSR8 0x0020 /* Interrupt mask register. */
+#define CSR9 0x0024 /* Maximum frame length register. */
+#define SECCSR0 0x0028 /* WEP control register. */
+#define CSR11 0x002c /* Back-off control register. */
+#define CSR12 0x0030 /* Synchronization configuration register 0. */
+#define CSR13 0x0034 /* Synchronization configuration register 1. */
+#define CSR14 0x0038 /* Synchronization control register. */
+#define CSR15 0x003c /* Synchronization status register. */
+#define CSR16 0x0040 /* TSF timer register 0. */
+#define CSR17 0x0044 /* TSF timer register 1. */
+#define CSR18 0x0048 /* IFS timer register 0. */
+#define CSR19 0x004c /* IFS timer register 1. */
+#define CSR20 0x0050 /* WakeUp register. */
+#define CSR21 0x0054 /* EEPROM control register. */
+#define CSR22 0x0058 /* CFP Control Register. */
+
+/*
+ * Transmit related CSRs.
+ */
+#define TXCSR0 0x0060 /* TX control register. */
+#define TXCSR1 0x0064 /* TX configuration register. */
+#define TXCSR2 0x0068 /* TX descriptor configuratioon register. */
+#define TXCSR3 0x006c /* TX Ring Base address register. */
+#define TXCSR4 0x0070 /* TX Atim Ring Base address register. */
+#define TXCSR5 0x0074 /* TX Prio Ring Base address register. */
+#define TXCSR6 0x0078 /* Beacon base address. */
+#define TXCSR7 0x007c /* AutoResponder Control Register. */
+#define TXCSR8 0x0098 /* CCK TX BBP registers. */
+#define TXCSR9 0x0094 /* OFDM TX BBP registers. */
+
+/*
+ * Receive related CSRs.
+ */
+#define RXCSR0 0x0080 /* RX control register. */
+#define RXCSR1 0x0084 /* RX descriptor configuration register. */
+#define RXCSR2 0x0088 /* RX Ring base address register. */
+#define RXCSR3 0x0090 /* BBP ID register 0 */
+#define ARCSR1 0x009c /* Auto Responder PLCP config register 1. */
+
+/*
+ * PCI control CSRs.
+ */
+#define PCICSR 0x008c /* PCI control register. */
+
+/*
+ * Statistic Register.
+ */
+#define CNT0 0x00a0 /* FCS error count. */
+#define TIMECSR2 0x00a8
+#define CNT1 0x00ac /* PLCP error count. */
+#define CNT2 0x00b0 /* long error count. */
+#define TIMECSR3 0x00b4
+#define CNT3 0x00b8 /* CCA false alarm count. */
+#define CNT4 0x00bc /* Rx FIFO overflow count. */
+#define CNT5 0x00c0 /* Tx FIFO underrun count. */
+
+/*
+ * Baseband Control Register.
+ */
+#define PWRCSR0 0x00c4 /* Power mode configuration. */
+#define PSCSR0 0x00c8 /* Power state transition time. */
+#define PSCSR1 0x00cc /* Power state transition time. */
+#define PSCSR2 0x00d0 /* Power state transition time. */
+#define PSCSR3 0x00d4 /* Power state transition time. */
+#define PWRCSR1 0x00d8 /* Manual power control / status. */
+#define TIMECSR 0x00dc /* Timer control. */
+#define MACCSR0 0x00e0 /* MAC configuration. */
+#define MACCSR1 0x00e4 /* MAC configuration. */
+#define RALINKCSR 0x00e8 /* Ralink Auto-reset register. */
+#define BCNCSR 0x00ec /* Beacon interval control register. */
+
+/*
+ * BBP / RF / IF Control Register.
+ */
+#define BBPCSR 0x00f0 /* BBP serial control. */
+#define RFCSR 0x00f4 /* RF serial control. */
+#define LEDCSR 0x00f8 /* LED control register */
+
+#define SECCSR3 0x00fc /* AES control register. */
+
+/*
+ * ASIC pointer information.
+ */
+#define RXPTR 0x0100 /* Current RX ring address. */
+#define TXPTR 0x0104 /* Current Tx ring address. */
+#define PRIPTR 0x0108 /* Current Priority ring address. */
+#define ATIMPTR 0x010c /* Current ATIM ring address. */
+
+#define TXACKCSR0 0x0110 /* TX ACK timeout. */
+#define ACKCNT0 0x0114 /* TX ACK timeout count. */
+#define ACKCNT1 0x0118 /* RX ACK timeout count. */
+
+/*
+ * GPIO and others.
+ */
+#define GPIOCSR 0x0120 /* GPIO. */
+#define FIFOCSR0 0x0128 /* TX FIFO pointer. */
+#define FIFOCSR1 0x012c /* RX FIFO pointer. */
+#define BCNCSR1 0x0130 /* Tx BEACON offset time, unit: 1 usec. */
+#define MACCSR2 0x0134 /* TX_PE to RX_PE delay time, unit: 1 PCI clock cycle. */
+#define TESTCSR 0x0138 /* TEST mode selection register. */
+#define ARCSR2 0x013c /* 1 Mbps ACK/CTS PLCP. */
+#define ARCSR3 0x0140 /* 2 Mbps ACK/CTS PLCP. */
+#define ARCSR4 0x0144 /* 5.5 Mbps ACK/CTS PLCP. */
+#define ARCSR5 0x0148 /* 11 Mbps ACK/CTS PLCP. */
+#define ARTCSR0 0x014c /* ACK/CTS payload consumed time for 1/2/5.5/11 mbps. */
+#define ARTCSR1                                                                \
+	0x0150 /* OFDM ACK/CTS payload consumed time for 6/9/12/18 mbps. */
+#define ARTCSR2                                                                \
+	0x0154 /* OFDM ACK/CTS payload consumed time for 24/36/48/54 mbps. */
+#define SECCSR1 0x0158 /* WEP control register. */
+#define BBPCSR1 0x015c /* BBP TX configuration. */
+#define DBANDCSR0 0x0160 /* Dual band configuration register 0. */
+#define DBANDCSR1 0x0164 /* Dual band configuration register 1. */
+#define BBPPCSR 0x0168 /* BBP Pin control register. */
+#define DBGSEL0 0x016c /* MAC special debug mode selection register 0. */
+#define DBGSEL1 0x0170 /* MAC special debug mode selection register 1. */
+#define BISTCSR 0x0174 /* BBP BIST register. */
+#define MCAST0 0x0178 /* multicast filter register 0. */
+#define MCAST1 0x017c /* multicast filter register 1. */
+#define UARTCSR0 0x0180 /* UART1 TX register. */
+#define UARTCSR1 0x0184 /* UART1 RX register. */
+#define UARTCSR3 0x0188 /* UART1 frame control register. */
+#define UARTCSR4 0x018c /* UART1 buffer control register. */
+#define UART2CSR0 0x0190 /* UART2 TX register. */
+#define UART2CSR1 0x0194 /* UART2 RX register. */
+#define UART2CSR3 0x0198 /* UART2 frame control register. */
+#define UART2CSR4 0x019c /* UART2 buffer control register. */
+
+/*
+ * EEPROM addresses
+ */
+#define EEPROM_ANTENNA 0x10
+#define EEPROM_GEOGRAPHY 0x12
+#define EEPROM_BBP_START 0x13
+#define EEPROM_BBP_END 0x22
+
+#define EEPROM_BBP_SIZE 16
+
+/*
+ * CSR Registers.
+ * Some values are set in TU, whereas 1 TU == 1024 us.
+ */
+
+/*
+ * CSR1: System control register.
+ */
+#define CSR1_SOFT_RESET                                                        \
+	FIELD32(0, 0x00000001) /* Software reset, 1: reset, 0: normal. */
+#define CSR1_BBP_RESET                                                         \
+	FIELD32(1, 0x00000002) /* Hardware reset, 1: reset, 0, release. */
+#define CSR1_HOST_READY                                                        \
+	FIELD32(2, 0x00000004) /* Host ready after initialization. */
+
+/*
+ * CSR3: STA MAC address register 0.
+ */
+#define CSR3_BYTE0 FIELD32(0, 0x000000ff) /* MAC address byte 0. */
+#define CSR3_BYTE1 FIELD32(8, 0x0000ff00) /* MAC address byte 1. */
+#define CSR3_BYTE2 FIELD32(16, 0x00ff0000) /* MAC address byte 2. */
+#define CSR3_BYTE3 FIELD32(24, 0xff000000) /* MAC address byte 3. */
+
+/*
+ * CSR4: STA MAC address register 1.
+ */
+#define CSR4_BYTE4 FIELD32(0, 0x000000ff) /* MAC address byte 4. */
+#define CSR4_BYTE5 FIELD32(8, 0x0000ff00) /* MAC address byte 5. */
+
+/*
+ * CSR5: BSSID register 0.
+ */
+#define CSR5_BYTE0 FIELD32(0, 0x000000ff) /* BSSID address byte 0. */
+#define CSR5_BYTE1 FIELD32(8, 0x0000ff00) /* BSSID address byte 1. */
+#define CSR5_BYTE2 FIELD32(16, 0x00ff0000) /* BSSID address byte 2. */
+#define CSR5_BYTE3 FIELD32(24, 0xff000000) /* BSSID address byte 3. */
+
+/*
+ * CSR6: BSSID register 1.
+ */
+#define CSR6_BYTE4 FIELD32(0, 0x000000ff) /* BSSID address byte 4. */
+#define CSR6_BYTE5 FIELD32(8, 0x0000ff00) /* BSSID address byte 5. */
+
+/*
+ * CSR7: Interrupt source register.
+ * Write 1 to clear.
+ */
+#define CSR7_TBCN_EXPIRE                                                       \
+	FIELD32(0, 0x00000001) /* beacon timer expired interrupt. */
+#define CSR7_TWAKE_EXPIRE                                                      \
+	FIELD32(1, 0x00000002) /* wakeup timer expired interrupt. */
+#define CSR7_TATIMW_EXPIRE                                                     \
+	FIELD32(2, 0x00000004) /* timer of atim window expired interrupt. */
+#define CSR7_TXDONE_TXRING                                                     \
+	FIELD32(3, 0x00000008) /* tx ring transmit done interrupt. */
+#define CSR7_TXDONE_ATIMRING                                                   \
+	FIELD32(4, 0x00000010) /* atim ring transmit done interrupt. */
+#define CSR7_TXDONE_PRIORING                                                   \
+	FIELD32(5, 0x00000020) /* priority ring transmit done interrupt. */
+#define CSR7_RXDONE FIELD32(6, 0x00000040) /* receive done interrupt. */
+#define CSR7_DECRYPTION_DONE                                                   \
+	FIELD32(7, 0x00000080) /* Decryption done interrupt. */
+#define CSR7_ENCRYPTION_DONE                                                   \
+	FIELD32(8, 0x00000100) /* Encryption done interrupt. */
+#define CSR7_UART1_TX_TRESHOLD                                                 \
+	FIELD32(9, 0x00000200) /* UART1 TX reaches threshold. */
+#define CSR7_UART1_RX_TRESHOLD                                                 \
+	FIELD32(10, 0x00000400) /* UART1 RX reaches threshold. */
+#define CSR7_UART1_IDLE_TRESHOLD                                               \
+	FIELD32(11, 0x00000800) /* UART1 IDLE over threshold. */
+#define CSR7_UART1_TX_BUFF_ERROR                                               \
+	FIELD32(12, 0x00001000) /* UART1 TX buffer error. */
+#define CSR7_UART1_RX_BUFF_ERROR                                               \
+	FIELD32(13, 0x00002000) /* UART1 RX buffer error. */
+#define CSR7_UART2_TX_TRESHOLD                                                 \
+	FIELD32(14, 0x00004000) /* UART2 TX reaches threshold. */
+#define CSR7_UART2_RX_TRESHOLD                                                 \
+	FIELD32(15, 0x00008000) /* UART2 RX reaches threshold. */
+#define CSR7_UART2_IDLE_TRESHOLD                                               \
+	FIELD32(16, 0x00010000) /* UART2 IDLE over threshold. */
+#define CSR7_UART2_TX_BUFF_ERROR                                               \
+	FIELD32(17, 0x00020000) /* UART2 TX buffer error. */
+#define CSR7_UART2_RX_BUFF_ERROR                                               \
+	FIELD32(18, 0x00040000) /* UART2 RX buffer error. */
+#define CSR7_TIMER_CSR3_EXPIRE                                                 \
+	FIELD32(19,                                                            \
+		0x00080000) /* TIMECSR3 timer expired (802.1H quiet period). */
+
+/*
+ * CSR8: Interrupt mask register.
+ * Write 1 to mask interrupt.
+ */
+#define CSR8_TBCN_EXPIRE                                                       \
+	FIELD32(0, 0x00000001) /* beacon timer expired interrupt. */
+#define CSR8_TWAKE_EXPIRE                                                      \
+	FIELD32(1, 0x00000002) /* wakeup timer expired interrupt. */
+#define CSR8_TATIMW_EXPIRE                                                     \
+	FIELD32(2, 0x00000004) /* timer of atim window expired interrupt. */
+#define CSR8_TXDONE_TXRING                                                     \
+	FIELD32(3, 0x00000008) /* tx ring transmit done interrupt. */
+#define CSR8_TXDONE_ATIMRING                                                   \
+	FIELD32(4, 0x00000010) /* atim ring transmit done interrupt. */
+#define CSR8_TXDONE_PRIORING                                                   \
+	FIELD32(5, 0x00000020) /* priority ring transmit done interrupt. */
+#define CSR8_RXDONE FIELD32(6, 0x00000040) /* receive done interrupt. */
+#define CSR8_DECRYPTION_DONE                                                   \
+	FIELD32(7, 0x00000080) /* Decryption done interrupt. */
+#define CSR8_ENCRYPTION_DONE                                                   \
+	FIELD32(8, 0x00000100) /* Encryption done interrupt. */
+#define CSR8_UART1_TX_TRESHOLD                                                 \
+	FIELD32(9, 0x00000200) /* UART1 TX reaches threshold. */
+#define CSR8_UART1_RX_TRESHOLD                                                 \
+	FIELD32(10, 0x00000400) /* UART1 RX reaches threshold. */
+#define CSR8_UART1_IDLE_TRESHOLD                                               \
+	FIELD32(11, 0x00000800) /* UART1 IDLE over threshold. */
+#define CSR8_UART1_TX_BUFF_ERROR                                               \
+	FIELD32(12, 0x00001000) /* UART1 TX buffer error. */
+#define CSR8_UART1_RX_BUFF_ERROR                                               \
+	FIELD32(13, 0x00002000) /* UART1 RX buffer error. */
+#define CSR8_UART2_TX_TRESHOLD                                                 \
+	FIELD32(14, 0x00004000) /* UART2 TX reaches threshold. */
+#define CSR8_UART2_RX_TRESHOLD                                                 \
+	FIELD32(15, 0x00008000) /* UART2 RX reaches threshold. */
+#define CSR8_UART2_IDLE_TRESHOLD                                               \
+	FIELD32(16, 0x00010000) /* UART2 IDLE over threshold. */
+#define CSR8_UART2_TX_BUFF_ERROR                                               \
+	FIELD32(17, 0x00020000) /* UART2 TX buffer error. */
+#define CSR8_UART2_RX_BUFF_ERROR                                               \
+	FIELD32(18, 0x00040000) /* UART2 RX buffer error. */
+#define CSR8_TIMER_CSR3_EXPIRE                                                 \
+	FIELD32(19,                                                            \
+		0x00080000) /* TIMECSR3 timer expired (802.1H quiet period). */
+
+/*
+ * CSR9: Maximum frame length register.
+ */
+#define CSR9_MAX_FRAME_UNIT                                                    \
+	FIELD32(7,                                                             \
+		0x00000f80) /* maximum frame length in 128b unit, default: 12. */
+
+/*
+ * SECCSR0: WEP control register.
+ */
+#define SECCSR0_KICK_DECRYPT                                                   \
+	FIELD32(0, 0x00000001) /* Kick decryption engine, self-clear. */
+#define SECCSR0_ONE_SHOT                                                       \
+	FIELD32(1, 0x00000002) /* 0: ring mode, 1: One shot only mode. */
+#define SECCSR0_DESC_ADDRESS                                                   \
+	FIELD32(2, 0xfffffffc) /* Descriptor physical address of frame. */
+
+/*
+ * CSR11: Back-off control register.
+ */
+#define CSR11_CWMIN                                                            \
+	FIELD32(0, 0x0000000f) /* CWmin. Default cwmin is 31 (2^5 - 1). */
+#define CSR11_CWMAX                                                            \
+	FIELD32(4, 0x000000f0) /* CWmax. Default cwmax is 1023 (2^10 - 1). */
+#define CSR11_SLOT_TIME                                                        \
+	FIELD32(8, 0x00001f00) /* slot time, default is 20us for 802.11b */
+#define CSR11_CW_SELECT                                                        \
+	FIELD32(13,                                                            \
+		0x00002000) /* CWmin/CWmax selection, 1: Register, 0: TXD. */
+#define CSR11_LONG_RETRY FIELD32(16, 0x00ff0000) /* long retry count. */
+#define CSR11_SHORT_RETRY FIELD32(24, 0xff000000) /* short retry count. */
+
+/*
+ * CSR12: Synchronization configuration register 0.
+ * All units in 1/16 TU.
+ */
+#define CSR12_BEACON_INTERVAL                                                  \
+	FIELD32(0, 0x0000ffff) /* beacon interval, default is 100 TU. */
+#define CSR12_CFPMAX_DURATION                                                  \
+	FIELD32(16, 0xffff0000) /* cfp maximum duration, default is 100 TU. */
+
+/*
+ * CSR13: Synchronization configuration register 1.
+ * All units in 1/16 TU.
+ */
+#define CSR13_ATIMW_DURATION FIELD32(0, 0x0000ffff) /* atim window duration. */
+#define CSR13_CFP_PERIOD                                                       \
+	FIELD32(16, 0x00ff0000) /* cfp period, default is 0 TU. */
+
+/*
+ * CSR14: Synchronization control register.
+ */
+#define CSR14_TSF_COUNT FIELD32(0, 0x00000001) /* enable tsf auto counting. */
+#define CSR14_TSF_SYNC                                                         \
+	FIELD32(1,                                                             \
+		0x00000006) /* tsf sync, 0: disable, 1: infra, 2: ad-hoc mode. */
+#define CSR14_TBCN FIELD32(3, 0x00000008) /* enable tbcn with reload value. */
+#define CSR14_TCFP                                                             \
+	FIELD32(4, 0x00000010) /* enable tcfp & cfp / cp switching. */
+#define CSR14_TATIMW                                                           \
+	FIELD32(5, 0x00000020) /* enable tatimw & atim window switching. */
+#define CSR14_BEACON_GEN FIELD32(6, 0x00000040) /* enable beacon generator. */
+#define CSR14_CFP_COUNT_PRELOAD                                                \
+	FIELD32(8, 0x0000ff00) /* cfp count preload value. */
+#define CSR14_TBCM_PRELOAD                                                     \
+	FIELD32(16, 0xffff0000) /* tbcn preload value in units of 64us. */
+
+/*
+ * CSR15: Synchronization status register.
+ */
+#define CSR15_CFP                                                              \
+	FIELD32(0, 0x00000001) /* ASIC is in contention-free period. */
+#define CSR15_ATIMW FIELD32(1, 0x00000002) /* ASIC is in ATIM window. */
+#define CSR15_BEACON_SENT FIELD32(2, 0x00000004) /* Beacon is send. */
+
+/*
+ * CSR16: TSF timer register 0.
+ */
+#define CSR16_LOW_TSFTIMER FIELD32(0, 0xffffffff)
+
+/*
+ * CSR17: TSF timer register 1.
+ */
+#define CSR17_HIGH_TSFTIMER FIELD32(0, 0xffffffff)
+
+/*
+ * CSR18: IFS timer register 0.
+ */
+#define CSR18_SIFS FIELD32(0, 0x000001ff) /* sifs, default is 10 us. */
+#define CSR18_PIFS FIELD32(16, 0x01f00000) /* pifs, default is 30 us. */
+
+/*
+ * CSR19: IFS timer register 1.
+ */
+#define CSR19_DIFS FIELD32(0, 0x0000ffff) /* difs, default is 50 us. */
+#define CSR19_EIFS FIELD32(16, 0xffff0000) /* eifs, default is 364 us. */
+
+/*
+ * CSR20: Wakeup timer register.
+ */
+#define CSR20_DELAY_AFTER_TBCN                                                 \
+	FIELD32(0,                                                             \
+		0x0000ffff) /* delay after tbcn expired in units of 1/16 TU. */
+#define CSR20_TBCN_BEFORE_WAKEUP                                               \
+	FIELD32(16, 0x00ff0000) /* number of beacon before wakeup. */
+#define CSR20_AUTOWAKE                                                         \
+	FIELD32(24, 0x01000000) /* enable auto wakeup / sleep mechanism. */
+
+/*
+ * CSR21: EEPROM control register.
+ */
+#define CSR21_RELOAD                                                           \
+	FIELD32(0, 0x00000001) /* Write 1 to reload eeprom content. */
+#define CSR21_EEPROM_DATA_CLOCK FIELD32(1, 0x00000002)
+#define CSR21_EEPROM_CHIP_SELECT FIELD32(2, 0x00000004)
+#define CSR21_EEPROM_DATA_IN FIELD32(3, 0x00000008)
+#define CSR21_EEPROM_DATA_OUT FIELD32(4, 0x00000010)
+#define CSR21_TYPE_93C46 FIELD32(5, 0x00000020) /* 1: 93c46, 0:93c66. */
+
+/*
+ * CSR22: CFP control register.
+ */
+#define CSR22_CFP_DURATION_REMAIN                                              \
+	FIELD32(0, 0x0000ffff) /* cfp duration remain, in units of TU. */
+#define CSR22_RELOAD_CFP_DURATION                                              \
+	FIELD32(16, 0x00010000) /* Write 1 to reload cfp duration remain. */
+
+/*
+ * TX / RX Registers.
+ * Some values are set in TU, whereas 1 TU == 1024 us.
+ */
+
+/*
+ * TXCSR0: TX Control Register.
+ */
+#define TXCSR0_KICK_TX FIELD32(0, 0x00000001) /* kick tx ring. */
+#define TXCSR0_KICK_ATIM FIELD32(1, 0x00000002) /* kick atim ring. */
+#define TXCSR0_KICK_PRIO FIELD32(2, 0x00000004) /* kick priority ring. */
+#define TXCSR0_ABORT                                                           \
+	FIELD32(3, 0x00000008) /* abort all transmit related ring operation. */
+
+/*
+ * TXCSR1: TX Configuration Register.
+ */
+#define TXCSR1_ACK_TIMEOUT                                                     \
+	FIELD32(0,                                                             \
+		0x000001ff) /* ack timeout, default = sifs + 2*slottime + acktime @ 1mbps. */
+#define TXCSR1_ACK_CONSUME_TIME                                                \
+	FIELD32(9,                                                             \
+		0x0003fe00) /* ack consume time, default = sifs + acktime @ 1mbps. */
+#define TXCSR1_TSF_OFFSET FIELD32(18, 0x00fc0000) /* insert tsf offset. */
+#define TXCSR1_AUTORESPONDER                                                   \
+	FIELD32(24,                                                            \
+		0x01000000) /* enable auto responder which include ack & cts. */
+
+/*
+ * TXCSR2: Tx descriptor configuration register.
+ */
+#define TXCSR2_TXD_SIZE                                                        \
+	FIELD32(0, 0x000000ff) /* tx descriptor size, default is 48. */
+#define TXCSR2_NUM_TXD FIELD32(8, 0x0000ff00) /* number of txd in ring. */
+#define TXCSR2_NUM_ATIM FIELD32(16, 0x00ff0000) /* number of atim in ring. */
+#define TXCSR2_NUM_PRIO                                                        \
+	FIELD32(24, 0xff000000) /* number of priority in ring. */
+
+/*
+ * TXCSR3: TX Ring Base address register.
+ */
+#define TXCSR3_TX_RING_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * TXCSR4: TX Atim Ring Base address register.
+ */
+#define TXCSR4_ATIM_RING_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * TXCSR5: TX Prio Ring Base address register.
+ */
+#define TXCSR5_PRIO_RING_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * TXCSR6: Beacon Base address register.
+ */
+#define TXCSR6_BEACON_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * TXCSR7: Auto responder control register.
+ */
+#define TXCSR7_AR_POWERMANAGEMENT                                              \
+	FIELD32(0, 0x00000001) /* auto responder power management bit. */
+
+/*
+ * TXCSR8: CCK Tx BBP register.
+ */
+#define TXCSR8_CCK_SIGNAL                                                      \
+	FIELD32(0, 0x000000ff) /* BBP rate field address for CCK. */
+#define TXCSR8_CCK_SERVICE                                                     \
+	FIELD32(8, 0x0000ff00) /* BBP service field address for CCK. */
+#define TXCSR8_CCK_LENGTH_LOW                                                  \
+	FIELD32(16, 0x00ff0000) /* BBP length low byte address for CCK. */
+#define TXCSR8_CCK_LENGTH_HIGH                                                 \
+	FIELD32(24, 0xff000000) /* BBP length high byte address for CCK. */
+
+/* 
+ * TXCSR9: OFDM TX BBP registers
+ */
+#define TXCSR9_OFDM_RATE                                                       \
+	FIELD32(0, 0x000000ff) /* BBP rate field address for OFDM. */
+#define TXCSR9_OFDM_SERVICE                                                    \
+	FIELD32(8, 0x0000ff00) /* BBP service field address for OFDM. */
+#define TXCSR9_OFDM_LENGTH_LOW                                                 \
+	FIELD32(16, 0x00ff0000) /* BBP length low byte address for OFDM. */
+#define TXCSR9_OFDM_LENGTH_HIGH                                                \
+	FIELD32(24, 0xff000000) /* BBP length high byte address for OFDM. */
+
+/*
+ * RXCSR0: RX Control Register.
+ */
+#define RXCSR0_DISABLE_RX FIELD32(0, 0x00000001) /* disable rx engine. */
+#define RXCSR0_DROP_CRC FIELD32(1, 0x00000002) /* drop crc error. */
+#define RXCSR0_DROP_PHYSICAL FIELD32(2, 0x00000004) /* drop physical error. */
+#define RXCSR0_DROP_CONTROL FIELD32(3, 0x00000008) /* drop control frame. */
+#define RXCSR0_DROP_NOT_TO_ME                                                  \
+	FIELD32(4, 0x00000010) /* drop not to me unicast frame. */
+#define RXCSR0_DROP_TODS                                                       \
+	FIELD32(5, 0x00000020) /* drop frame tods bit is true. */
+#define RXCSR0_DROP_VERSION_ERROR                                              \
+	FIELD32(6, 0x00000040) /* drop version error frame. */
+#define RXCSR0_PASS_CRC                                                        \
+	FIELD32(7, 0x00000080) /* pass all packets with crc attached. */
+#define RXCSR0_PASS_PLCP                                                       \
+	FIELD32(8,                                                             \
+		0x00000100) /* Pass all packets with 4 bytes PLCP attached. */
+#define RXCSR0_DROP_MCAST FIELD32(9, 0x00000200) /* Drop multicast frames. */
+#define RXCSR0_DROP_BCAST FIELD32(10, 0x00000400) /* Drop broadcast frames. */
+#define RXCSR0_ENABLE_QOS                                                      \
+	FIELD32(11, 0x00000800) /* Accept QOS data frame and parse QOS field. */
+
+/*
+ * RXCSR1: RX descriptor configuration register.
+ */
+#define RXCSR1_RXD_SIZE                                                        \
+	FIELD32(0, 0x000000ff) /* rx descriptor size, default is 32b. */
+#define RXCSR1_NUM_RXD FIELD32(8, 0x0000ff00) /* number of rxd in ring. */
+
+/*
+ * RXCSR2: RX Ring base address register.
+ */
+#define RXCSR2_RX_RING_REGISTER FIELD32(0, 0xffffffff)
+
+/*
+ * RXCSR3: BBP ID register for Rx operation.
+ */
+#define RXCSR3_BBP_ID0 FIELD32(0, 0x0000007f) /* bbp register 0 id. */
+#define RXCSR3_BBP_ID0_VALID                                                   \
+	FIELD32(7, 0x00000080) /* bbp register 0 id is valid or not. */
+#define RXCSR3_BBP_ID1 FIELD32(8, 0x00007f00) /* bbp register 1 id. */
+#define RXCSR3_BBP_ID1_VALID                                                   \
+	FIELD32(15, 0x00008000) /* bbp register 1 id is valid or not. */
+#define RXCSR3_BBP_ID2 FIELD32(16, 0x007f0000) /* bbp register 2 id. */
+#define RXCSR3_BBP_ID2_VALID                                                   \
+	FIELD32(23, 0x00800000) /* bbp register 2 id is valid or not. */
+#define RXCSR3_BBP_ID3 FIELD32(24, 0x7f000000) /* bbp register 3 id. */
+#define RXCSR3_BBP_ID3_VALID                                                   \
+	FIELD32(31, 0x80000000) /* bbp register 3 id is valid or not. */
+
+/*
+ * ARCSR1: Auto Responder PLCP config register 1.
+ */
+#define ARCSR1_AR_BBP_DATA2                                                    \
+	FIELD32(0, 0x000000ff) /* Auto responder BBP register 2 data. */
+#define ARCSR1_AR_BBP_ID2                                                      \
+	FIELD32(8, 0x0000ff00) /* Auto responder BBP register 2 Id. */
+#define ARCSR1_AR_BBP_DATA3                                                    \
+	FIELD32(16, 0x00ff0000) /* Auto responder BBP register 3 data. */
+#define ARCSR1_AR_BBP_ID3                                                      \
+	FIELD32(24, 0xff000000) /* Auto responder BBP register 3 Id. */
+
+/*
+ * Miscellaneous Registers.
+ * Some values are set in TU, whereas 1 TU == 1024 us.
+ */
+
+/*
+ * PCISR: PCI control register.
+ */
+#define PCICSR_BIG_ENDIAN                                                      \
+	FIELD32(0, 0x00000001) /* 1: big endian, 0: little endian. */
+#define PCICSR_RX_TRESHOLD                                                     \
+	FIELD32(1, 0x00000006) /* rx threshold in dw to start pci access */
+/* 0: 16dw (default), 1: 8dw, 2: 4dw, 3: 32dw. */
+#define PCICSR_TX_TRESHOLD                                                     \
+	FIELD32(3, 0x00000018) /* tx threshold in dw to start pci access */
+/* 0: 0dw (default), 1: 1dw, 2: 4dw, 3: forward. */
+#define PCICSR_BURST_LENTH FIELD32(5, 0x00000060) /* pci burst length */
+/* 0: 4dw (default, 1: 8dw, 2: 16dw, 3:32dw. */
+#define PCICSR_ENABLE_CLK FIELD32(7, 0x00000080) /* enable clk_run, */
+/* pci clock can't going down to non-operational. */
+#define PCICSR_READ_MULTIPLE                                                   \
+	FIELD32(8, 0x00000100) /* Enable memory read multiple. */
+#define PCICSR_WRITE_INVALID                                                   \
+	FIELD32(9, 0x00000200) /* Enable memory write & invalid. */
+
+/*
+ * PWRCSR1: Manual power control / status register.
+ * state: 0 deep_sleep, 1: sleep, 2: standby, 3: awake.
+ */
+#define PWRCSR1_SET_STATE                                                      \
+	FIELD32(0,                                                             \
+		0x00000001) /* set state. Write 1 to trigger, self cleared. */
+#define PWRCSR1_BBP_DESIRE_STATE FIELD32(1, 0x00000006) /* BBP desired state. */
+#define PWRCSR1_RF_DESIRE_STATE FIELD32(3, 0x00000018) /* RF desired state. */
+#define PWRCSR1_BBP_CURR_STATE FIELD32(5, 0x00000060) /* BBP current state. */
+#define PWRCSR1_RF_CURR_STATE FIELD32(7, 0x00000180) /* RF current state. */
+#define PWRCSR1_PUT_TO_SLEEP                                                   \
+	FIELD32(9,                                                             \
+		0x00000200) /* put to sleep. Write 1 to trigger, self cleared. */
+
+/*
+ * TIMECSR: Timer control register.
+ */
+#define TIMECSR_US_COUNT                                                       \
+	FIELD32(0, 0x000000ff) /* 1 us timer count in units of clock cycles. */
+#define TIMECSR_US_64_COUNT                                                    \
+	FIELD32(8, 0x0000ff00) /* 64 us timer count in units of 1 us timer. */
+#define TIMECSR_BEACON_EXPECT                                                  \
+	FIELD32(16, 0x00070000) /* Beacon expect window. */
+
+/*
+ * MACCSR1: MAC configuration register 1.
+ */
+#define MACCSR1_KICK_RX                                                        \
+	FIELD32(0, 0x00000001) /* kick one-shot rx in one-shot rx mode. */
+#define MACCSR1_ONESHOT_RXMODE                                                 \
+	FIELD32(1, 0x00000002) /* enable one-shot rx mode for debugging. */
+#define MACCSR1_BBPRX_RESET_MODE                                               \
+	FIELD32(2, 0x00000004) /* ralink bbp rx reset mode. */
+#define MACCSR1_AUTO_TXBBP                                                     \
+	FIELD32(3, 0x00000008) /* auto tx logic access bbp control register. */
+#define MACCSR1_AUTO_RXBBP                                                     \
+	FIELD32(4, 0x00000010) /* auto rx logic access bbp control register. */
+#define MACCSR1_LOOPBACK FIELD32(5, 0x00000060) /* loopback mode. */
+/* 0: normal, 1: internal, 2: external, 3:rsvd. */
+#define MACCSR1_INTERSIL_IF                                                    \
+	FIELD32(7, 0x00000080) /* intersil if calibration pin. */
+
+/*
+ * RALINKCSR: Ralink Rx auto-reset BBCR.
+ */
+#define RALINKCSR_AR_BBP_DATA0                                                 \
+	FIELD32(0, 0x000000ff) /* auto reset bbp register 0 data. */
+#define RALINKCSR_AR_BBP_ID0                                                   \
+	FIELD32(8, 0x00007f00) /* auto reset bbp register 0 id. */
+#define RALINKCSR_AR_BBP_VALID0                                                \
+	FIELD32(15, 0x00008000) /* auto reset bbp register 0 valid. */
+#define RALINKCSR_AR_BBP_DATA1                                                 \
+	FIELD32(16, 0x00ff0000) /* auto reset bbp register 1 data. */
+#define RALINKCSR_AR_BBP_ID1                                                   \
+	FIELD32(24, 0x7f000000) /* auto reset bbp register 1 id. */
+#define RALINKCSR_AR_BBP_VALID1                                                \
+	FIELD32(31, 0x80000000) /* auto reset bbp register 1 valid. */
+
+/*
+ * BCNCSR: Beacon interval control register.
+ */
+#define BCNCSR_CHANGE                                                          \
+	FIELD32(0, 0x00000001) /* write one to change beacon interval. */
+#define BCNCSR_DELTATIME FIELD32(1, 0x0000001e) /* the delta time value. */
+#define BCNCSR_NUM_BEACON                                                      \
+	FIELD32(5, 0x00001fe0) /* number of beacon according to mode. */
+#define BCNCSR_MODE FIELD32(13, 0x00006000) /* please refer to asic specs. */
+#define BCNCSR_PLUS                                                            \
+	FIELD32(15, 0x00008000) /* plus or minus delta time value. */
+
+/*
+ * BBPCSR: BBP serial control register.
+ */
+#define BBPCSR_VALUE                                                           \
+	FIELD32(0, 0x000000ff) /* register value to program into bbp. */
+#define BBPCSR_REGNUM FIELD32(8, 0x00007f00) /* selected bbp register. */
+#define BBPCSR_BUSY                                                            \
+	FIELD32(15, 0x00008000) /* 1: asic is busy execute bbp programming. */
+#define BBPCSR_WRITE_CONTROL                                                   \
+	FIELD32(16, 0x00010000) /* 1: write bbp, 0: read bbp. */
+
+/*
+ * RFCSR: RF serial control register.
+ */
+#define RFCSR_VALUE                                                            \
+	FIELD32(0, 0x00ffffff) /* register value + id to program into rf/if. */
+#define RFCSR_NUMBER_OF_BITS                                                   \
+	FIELD32(24,                                                            \
+		0x1f000000) /* number of bits used in value (i:20, rfmd:22). */
+#define RFCSR_IF_SELECT                                                        \
+	FIELD32(29, 0x20000000) /* chip to program: 0: rf, 1: if. */
+#define RFCSR_PLL_LD FIELD32(30, 0x40000000) /* rf pll_ld status. */
+#define RFCSR_BUSY                                                             \
+	FIELD32(31, 0x80000000) /* 1: asic is busy execute rf programming. */
+
+/*
+ * LEDCSR: LED control register.
+ */
+#define LEDCSR_ON_PERIOD FIELD32(0, 0x000000ff) /* on period, default 70ms. */
+#define LEDCSR_OFF_PERIOD FIELD32(8, 0x0000ff00) /* off period, default 30ms. */
+#define LEDCSR_LINK FIELD32(16, 0x00010000) /* 0: linkoff, 1: linkup. */
+#define LEDCSR_ACTIVITY FIELD32(17, 0x00020000) /* 0: idle, 1: active. */
+#define LEDCSR_LINK_POLARITY                                                   \
+	FIELD32(18, 0x00040000) /* 0: active low, 1: active high. */
+#define LEDCSR_ACTIVITY_POLARITY                                               \
+	FIELD32(19, 0x00080000) /* 0: active low, 1: active high. */
+#define LEDCSR_LED_DEFAULT                                                     \
+	FIELD32(20, 0x00100000) /* LED state for "enable" 0: ON, 1: OFF. */
+
+/*
+ * GPIOCSR: GPIO control register.
+ */
+#define GPIOCSR_BIT0 FIELD32(0, 0x00000001)
+#define GPIOCSR_BIT1 FIELD32(1, 0x00000002)
+#define GPIOCSR_BIT2 FIELD32(2, 0x00000004)
+#define GPIOCSR_BIT3 FIELD32(3, 0x00000008)
+#define GPIOCSR_BIT4 FIELD32(4, 0x00000010)
+#define GPIOCSR_BIT5 FIELD32(5, 0x00000020)
+#define GPIOCSR_BIT6 FIELD32(6, 0x00000040)
+#define GPIOCSR_BIT7 FIELD32(7, 0x00000080)
+#define GPIOCSR_DIR0 FIELD32(8, 0x00000100)
+#define GPIOCSR_DIR1 FIELD32(9, 0x00000200)
+#define GPIOCSR_DIR2 FIELD32(10, 0x00000400)
+#define GPIOCSR_DIR3 FIELD32(11, 0x00000800)
+#define GPIOCSR_DIR4 FIELD32(12, 0x00001000)
+#define GPIOCSR_DIR5 FIELD32(13, 0x00002000)
+#define GPIOCSR_DIR6 FIELD32(14, 0x00004000)
+#define GPIOCSR_DIR7 FIELD32(15, 0x00008000)
+
+/*
+ * BCNCSR1: Tx BEACON offset time control register.
+ */
+#define BCNCSR1_PRELOAD                                                        \
+	FIELD32(0, 0x0000ffff) /* beacon timer offset in units of usec. */
+#define BCNCSR1_BEACON_CWMIN FIELD32(16, 0x000f0000) /* 2^CwMin. */
+
+/*
+ * MACCSR2: TX_PE to RX_PE turn-around time control register
+ */
+#define MACCSR2_DELAY                                                          \
+	FIELD32(0,                                                             \
+		0x000000ff) /* RX_PE low width, in units of pci clock cycle. */
+
+/*
+ * SECCSR1_RT2509: WEP control register 
+ */
+#define SECCSR1_KICK_ENCRYPT                                                   \
+	FIELD32(0, 0x00000001) /* Kick encryption engine, self-clear. */
+#define SECCSR1_ONE_SHOT                                                       \
+	FIELD32(1, 0x00000002) /* 0: ring mode, 1: One shot only mode. */
+#define SECCSR1_DESC_ADDRESS                                                   \
+	FIELD32(2, 0xfffffffc) /* Descriptor physical address of frame. */
+
+/*
+ * RF registers
+ */
+#define RF1_TUNER FIELD32(17, 0x00020000)
+#define RF3_TUNER FIELD32(8, 0x00000100)
+#define RF3_TXPOWER FIELD32(9, 0x00003e00)
+
+/*
+ * EEPROM content format.
+ * The wordsize of the EEPROM is 16 bits.
+ */
+
+/*
+ * EEPROM operation defines.
+ */
+#define EEPROM_WIDTH_93c46 6
+#define EEPROM_WIDTH_93c66 8
+#define EEPROM_WRITE_OPCODE 0x05
+#define EEPROM_READ_OPCODE 0x06
+
+/*
+ * EEPROM antenna.
+ */
+#define EEPROM_ANTENNA_NUM FIELD16(0, 0x0003) /* Number of antenna's. */
+#define EEPROM_ANTENNA_TX_DEFAULT                                              \
+	FIELD16(2, 0x000c) /* Default antenna 0: diversity, 1: A, 2: B. */
+#define EEPROM_ANTENNA_RX_DEFAULT                                              \
+	FIELD16(4, 0x0030) /* Default antenna 0: diversity, 1: A, 2: B. */
+#define EEPROM_ANTENNA_LED_MODE                                                \
+	FIELD16(6, 0x01c0) /* 0: default, 1: TX/RX activity, */
+/* 2: Single LED (ignore link), 3: reserved. */
+#define EEPROM_ANTENNA_DYN_TXAGC                                               \
+	FIELD16(9, 0x0200) /* Dynamic TX AGC control. */
+#define EEPROM_ANTENNA_HARDWARE_RADIO                                          \
+	FIELD16(10, 0x0400) /* 1: Hardware controlled radio. Read GPIO0. */
+#define EEPROM_ANTENNA_RF_TYPE                                                 \
+	FIELD16(11, 0xf800) /* rf_type of this adapter. */
+
+/*
+ * EEPROM geography.
+ */
+#define EEPROM_GEOGRAPHY_GEO                                                   \
+	FIELD16(8, 0x0f00) /* Default geography setting for device. */
+
+/*
+ * EEPROM NIC config.
+ */
+#define EEPROM_NIC_CARDBUS_ACCEL FIELD16(0, 0x0001) /* 0: enable, 1: disable. */
+#define EEPROM_NIC_DYN_BBP_TUNE FIELD16(1, 0x0002) /* 0: enable, 1: disable. */
+#define EEPROM_NIC_CCK_TX_POWER                                                \
+	FIELD16(2, 0x000c) /* CCK TX power compensation. */
+
+/*
+ * EEPROM TX power.
+ */
+#define EEPROM_TX_POWER1 FIELD16(0, 0x00ff)
+#define EEPROM_TX_POWER2 FIELD16(8, 0xff00)
+
+/*
+ * EEPROM BBP.
+ */
+#define EEPROM_BBP_VALUE FIELD16(0, 0x00ff)
+#define EEPROM_BBP_REG_ID FIELD16(8, 0xff00)
+
+/*
+ * EEPROM VERSION.
+ */
+#define EEPROM_VERSION_FAE FIELD16(0, 0x00ff) /* FAE release number. */
+#define EEPROM_VERSION FIELD16(8, 0xff00)
+
+/*
+ * DMA ring defines and data structures.
+ */
+
+/*
+ * Size of a single descriptor.
+ */
+#define SIZE_DESCRIPTOR 48
+
+/*
+ * TX descriptor format for TX, PRIO, ATIM and Beacon Ring.
+ */
+struct _txd {
+	u32 word0;
+#define TXD_W0_OWNER_NIC FIELD32(0, 0x00000001)
+#define TXD_W0_VALID FIELD32(1, 0x00000002)
+#define TXD_W0_RESULT FIELD32(2, 0x0000001c) /* Set by device. */
+#define TXD_W0_RETRY_COUNT FIELD32(5, 0x000000e0) /* Set by device. */
+#define TXD_W0_MORE_FRAG FIELD32(8, 0x00000100) /* Set by device. */
+#define TXD_W0_ACK FIELD32(9, 0x00000200)
+#define TXD_W0_TIMESTAMP FIELD32(10, 0x00000400)
+#define TXD_W0_OFDM FIELD32(11, 0x00000800)
+#define TXD_W0_CIPHER_OWNER FIELD32(12, 0x00001000)
+#define TXD_W0_IFS FIELD32(13, 0x00006000)
+#define TXD_W0_RETRY_MODE FIELD32(15, 0x00008000)
+#define TXD_W0_DATABYTE_COUNT FIELD32(16, 0x0fff0000)
+#define TXD_W0_CIPHER_ALG FIELD32(29, 0xe0000000)
+
+	u32 word1;
+#define TXD_W1_BUFFER_ADDRESS FIELD32(0, 0xffffffff)
+
+	u32 word2;
+#define TXD_W2_IV_OFFSET FIELD32(0, 0x0000003f)
+#define TXD_W2_AIFS FIELD32(6, 0x000000c0)
+#define TXD_W2_CWMIN FIELD32(8, 0x00000f00)
+#define TXD_W2_CWMAX FIELD32(12, 0x0000f000)
+
+	u32 word3;
+#define TXD_W3_PLCP_SIGNAL FIELD32(0, 0x000000ff)
+#define TXD_W3_PLCP_SERVICE FIELD32(8, 0x0000ff00)
+#define TXD_W3_PLCP_LENGTH_LOW FIELD32(16, 0x00ff0000)
+#define TXD_W3_PLCP_LENGTH_HIGH FIELD32(24, 0xff000000)
+
+	u32 word4;
+#define TXD_W4_IV FIELD32(0, 0xffffffff)
+
+	u32 word5;
+#define TXD_W5_EIV FIELD32(0, 0xffffffff)
+
+	u32 word6;
+#define TXD_W6_KEY FIELD32(0, 0xffffffff)
+
+	u32 word7;
+#define TXD_W7_KEY FIELD32(0, 0xffffffff)
+
+	u32 word8;
+#define TXD_W8_KEY FIELD32(0, 0xffffffff)
+
+	u32 word9;
+#define TXD_W9_KEY FIELD32(0, 0xffffffff)
+
+	u32 word10;
+#define TXD_W10_RTS FIELD32(0, 0x00000001)
+#define TXD_W10_TX_RATE FIELD32(0, 0x000000fe) /* For module only. */
+} __attribute__((packed));
+
+/*
+ * RX descriptor format for RX Ring.
+ */
+struct _rxd {
+	u32 word0;
+#define RXD_W0_OWNER_NIC FIELD32(0, 0x00000001)
+#define RXD_W0_UNICAST_TO_ME FIELD32(1, 0x00000002)
+#define RXD_W0_MULTICAST FIELD32(2, 0x00000004)
+#define RXD_W0_BROADCAST FIELD32(3, 0x00000008)
+#define RXD_W0_MY_BSS FIELD32(4, 0x00000010)
+#define RXD_W0_CRC FIELD32(5, 0x00000020)
+#define RXD_W0_OFDM FIELD32(6, 0x00000040)
+#define RXD_W0_PHYSICAL_ERROR FIELD32(7, 0x00000080)
+#define RXD_W0_CIPHER_OWNER FIELD32(8, 0x00000100)
+#define RXD_W0_ICV_ERROR FIELD32(9, 0x00000200)
+#define RXD_W0_IV_OFFSET FIELD32(10, 0x0000fc00)
+#define RXD_W0_DATABYTE_COUNT FIELD32(16, 0x0fff0000)
+#define RXD_W0_CIPHER_ALG FIELD32(29, 0xe0000000)
+
+	u32 word1;
+#define RXD_W1_BUFFER_ADDRESS FIELD32(0, 0xffffffff)
+
+	u32 word2;
+#define RXD_W2_BBR0 FIELD32(0, 0x000000ff)
+#define RXD_W2_RSSI FIELD32(8, 0x0000ff00)
+#define RXD_W2_TA FIELD32(16, 0xffff0000)
+
+	u32 word3;
+#define RXD_W3_TA FIELD32(0, 0xffffffff)
+
+	u32 word4;
+#define RXD_W4_IV FIELD32(0, 0xffffffff)
+
+	u32 word5;
+#define RXD_W5_EIV FIELD32(0, 0xffffffff)
+
+	u32 word6;
+#define RXD_W6_KEY FIELD32(0, 0xffffffff)
+
+	u32 word7;
+#define RXD_W7_KEY FIELD32(0, 0xffffffff)
+
+	u32 word8;
+#define RXD_W8_KEY FIELD32(0, 0xffffffff)
+
+	u32 word9;
+#define RXD_W9_KEY FIELD32(0, 0xffffffff)
+
+	u32 word10;
+#define RXD_W10_DROP FIELD32(0, 0x00000001)
+} __attribute__((packed));
+
+/*
+ * _rt2x00_pci
+ * This is the main structure which contains all variables required to communicate with the PCI device.
+ */
+struct _rt2x00_pci {
+	/*
+     * PCI device structure.
+     */
+	struct pci_dev *pci_dev;
+
+	/*
+     * Chipset identification.
+     */
+	struct _rt2x00_chip chip;
+
+	/*
+     * csr_addr
+     * Base address of device registers, all exact register addresses are calculated from this address.
+     */
+	void __iomem *csr_addr;
+
+	/*
+     * RF register values for current channel.
+     */
+	struct _rf_channel channel;
+
+	/*
+     * EEPROM bus width.
+     */
+	u8 eeprom_width;
+
+	u8 __pad; /* For alignment only. */
+
+	/*
+     * EEPROM BBP data.
+     */
+	u16 eeprom[EEPROM_BBP_SIZE];
+
+	/*
+     * DMA packet ring.
+     */
+	struct _data_ring rx;
+	struct _data_ring tx;
+
+	rtdm_irq_t irq_handle;
+	rtdm_lock_t lock;
+
+} __attribute__((packed));
+
+static int rt2x00_get_rf_value(const struct _rt2x00_chip *chip,
+			       const u8 channel, struct _rf_channel *rf_reg)
+{
+	int index = 0x00;
+
+	index = rt2x00_get_channel_index(channel);
+	if (index < 0)
+		return -EINVAL;
+
+	memset(rf_reg, 0x00, sizeof(*rf_reg));
+
+	if (rt2x00_rf(chip, RF2522)) {
+		rf_reg->rf1 = 0x00002050;
+		rf_reg->rf3 = 0x00000101;
+		goto update_rf2_1;
+	}
+	if (rt2x00_rf(chip, RF2523)) {
+		rf_reg->rf1 = 0x00022010;
+		rf_reg->rf3 = 0x000e0111;
+		rf_reg->rf4 = 0x00000a1b;
+		goto update_rf2_2;
+	}
+	if (rt2x00_rf(chip, RF2524)) {
+		rf_reg->rf1 = 0x00032020;
+		rf_reg->rf3 = 0x00000101;
+		rf_reg->rf4 = 0x00000a1b;
+		goto update_rf2_2;
+	}
+	if (rt2x00_rf(chip, RF2525)) {
+		rf_reg->rf1 = 0x00022020;
+		rf_reg->rf2 = 0x00080000;
+		rf_reg->rf3 = 0x00060111;
+		rf_reg->rf4 = 0x00000a1b;
+		goto update_rf2_2;
+	}
+	if (rt2x00_rf(chip, RF2525E)) {
+		rf_reg->rf2 = 0x00080000;
+		rf_reg->rf3 = 0x00060111;
+		goto update_rf2_3;
+	}
+	if (rt2x00_rf(chip, RF5222)) {
+		rf_reg->rf3 = 0x00000101;
+		goto update_rf2_3;
+	}
+
+	return -EINVAL;
+
+update_rf2_1: /* RF2522. */
+	rf_reg->rf2 = 0x000c1fda + (index * 0x14);
+	if (channel == 14)
+		rf_reg->rf2 += 0x0000001c;
+	goto exit;
+
+update_rf2_2: /* RF2523, RF2524, RF2525. */
+	rf_reg->rf2 |= 0x00000c9e + (index * 0x04);
+	if (rf_reg->rf2 & 0x00000040)
+		rf_reg->rf2 += 0x00000040;
+	if (channel == 14) {
+		rf_reg->rf2 += 0x08;
+		rf_reg->rf4 &= ~0x00000018;
+	}
+	goto exit;
+
+update_rf2_3: /* RF2525E, RF5222. */
+	if (OFDM_CHANNEL(channel)) {
+		rf_reg->rf1 = 0x00022020;
+		rf_reg->rf2 |= 0x00001136 + (index * 0x04);
+		if (rf_reg->rf2 & 0x00000040)
+			rf_reg->rf2 += 0x00000040;
+		if (channel == 14) {
+			rf_reg->rf2 += 0x04;
+			rf_reg->rf4 = 0x00000a1b;
+		} else {
+			rf_reg->rf4 = 0x00000a0b;
+		}
+	} else if (UNII_LOW_CHANNEL(channel)) {
+		rf_reg->rf1 = 0x00022010;
+		rf_reg->rf2 = 0x00018896 + (index * 0x04);
+		rf_reg->rf4 = 0x00000a1f;
+	} else if (HIPERLAN2_CHANNEL(channel)) {
+		rf_reg->rf1 = 0x00022010;
+		rf_reg->rf2 = 0x00008802 + (index * 0x04);
+		rf_reg->rf4 = 0x00000a0f;
+	} else if (UNII_HIGH_CHANNEL(channel)) {
+		rf_reg->rf1 = 0x00022020;
+		rf_reg->rf2 = 0x000090a6 + (index * 0x08);
+		rf_reg->rf4 = 0x00000a07;
+	}
+
+exit:
+	rf_reg->rf1 = cpu_to_le32(rf_reg->rf1);
+	rf_reg->rf2 = cpu_to_le32(rf_reg->rf2);
+	rf_reg->rf3 = cpu_to_le32(rf_reg->rf3);
+	rf_reg->rf4 = cpu_to_le32(rf_reg->rf4);
+
+	return 0;
+}
+
+/*
+ * Get txpower value in dBm mathing the requested percentage.
+ */
+static inline u8 rt2x00_get_txpower(const struct _rt2x00_chip *chip,
+				    const u8 tx_power)
+{
+	return tx_power / 100 * 31;
+
+	/*
+      if(tx_power <= 3)
+      return 19;
+      else if(tx_power <= 12)
+      return 22;
+      else if(tx_power <= 25)
+      return 25;
+      else if(tx_power <= 50)
+      return 28;
+      else if(tx_power <= 75)
+      return 30;
+      else if(tx_power <= 100)
+      return 31;
+    
+      ERROR("Invalid tx_power.\n");
+      return 31;
+    */
+}
+
+/*
+ * Ring handlers.
+ */
+static inline int
+rt2x00_pci_alloc_ring(struct _rt2x00_core *core, struct _data_ring *ring,
+		      const u8 ring_type, const u16 max_entries,
+		      const u16 entry_size, const u16 desc_size)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	rt2x00_init_ring(core, ring, ring_type, max_entries, entry_size,
+			 desc_size);
+
+	ring->data_addr =
+		dma_alloc_coherent(&rt2x00pci->pci_dev->dev, ring->mem_size,
+				   &ring->data_dma, GFP_KERNEL);
+	if (!ring->data_addr)
+		return -ENOMEM;
+
+	memset(ring->data_addr, 0x00, ring->mem_size);
+
+	return 0;
+}
+
+static int rt2x00_pci_alloc_rings(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	if (rt2x00_pci_alloc_ring(core, &rt2x00pci->rx, RING_RX, RX_ENTRIES,
+				  DATA_FRAME_SIZE, SIZE_DESCRIPTOR) ||
+	    rt2x00_pci_alloc_ring(core, &rt2x00pci->tx, RING_TX, TX_ENTRIES,
+				  DATA_FRAME_SIZE, SIZE_DESCRIPTOR)) {
+		ERROR("DMA allocation failed.\n");
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static inline void rt2x00_pci_free_ring(struct _data_ring *ring)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(ring->core);
+
+	if (ring->data_addr)
+		dma_free_coherent(&rt2x00pci->pci_dev->dev, ring->mem_size,
+				  ring->data_addr, ring->data_dma);
+	ring->data_addr = NULL;
+
+	rt2x00_deinit_ring(ring);
+}
+
+static void rt2x00_pci_free_rings(struct _rt2x00_core *core)
+{
+	struct _rt2x00_pci *rt2x00pci = rt2x00_priv(core);
+
+	rt2x00_pci_free_ring(&rt2x00pci->rx);
+	rt2x00_pci_free_ring(&rt2x00pci->tx);
+}
+
+/*
+ * Macro's for calculating exact position in data ring.
+ */
+#define DESC_BASE(__ring) ((void *)((__ring)->data_addr))
+#define DATA_BASE(__ring)                                                      \
+	((void *)(DESC_BASE(__ring) +                                          \
+		  ((__ring)->max_entries * (__ring)->desc_size)))
+
+#define __DESC_ADDR(__ring, __index)                                           \
+	((void *)(DESC_BASE(__ring) + ((__index) * (__ring)->desc_size)))
+#define __DATA_ADDR(__ring, __index)                                           \
+	((void *)(DATA_BASE(__ring) + ((__index) * (__ring)->entry_size)))
+
+#define DESC_ADDR(__ring) (__DESC_ADDR(__ring, (__ring)->index))
+#define DESC_ADDR_DONE(__ring) (__DESC_ADDR(__ring, (__ring)->index_done))
+
+#define DATA_ADDR(__ring) (__DATA_ADDR(__ring, (__ring)->index))
+#define DATA_ADDR_DONE(__ring) (__DATA_ADDR(__ring, (__ring)->index_done))
+
+/*
+ * Register access.
+ * All access to the registers will go through rt2x00_register_read and rt2x00_register_write.
+ * BBP and RF register require indirect register access through the register BBPCSR and RFCSR.
+ * The indirect register access work with busy bits, and a read or write function call can fail.
+ * Specific fields within a register can be accessed using the set and get field routines,
+ * these function will handle the requirement of little_endian and big_endian conversions.
+ */
+#define REGISTER_BUSY_COUNT                                                    \
+	10 /* Number of retries before failing access BBP & RF indirect register */
+#define REGISTER_BUSY_DELAY                                                    \
+	100 /* Delay between each register access retry. (us) */
+
+static void rt2x00_register_read(const struct _rt2x00_pci *rt2x00pci,
+				 const unsigned long offset, u32 *value)
+{
+	*value = readl((void *)(rt2x00pci->csr_addr + offset));
+}
+
+static void rt2x00_register_multiread(const struct _rt2x00_pci *rt2x00pci,
+				      const unsigned long offset, u32 *value,
+				      const u16 length)
+{
+	memcpy_fromio((void *)value, (void *)(rt2x00pci->csr_addr + offset),
+		      length);
+}
+
+static void rt2x00_register_write(const struct _rt2x00_pci *rt2x00pci,
+				  const unsigned long offset, const u32 value)
+{
+	writel(value, (void *)(rt2x00pci->csr_addr + offset));
+}
+
+static void rt2x00_register_multiwrite(const struct _rt2x00_pci *rt2x00pci,
+				       const unsigned long offset, u32 *value,
+				       const u16 length)
+{
+	memcpy_toio((void *)(rt2x00pci->csr_addr + offset), (void *)value,
+		    length);
+}
+
+static void rt2x00_bbp_regwrite(const struct _rt2x00_pci *rt2x00pci,
+				const u8 reg_id, const u8 value)
+{
+	u32 reg = 0x00000000;
+	u8 counter = 0x00;
+
+	for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) {
+		rt2x00_register_read(rt2x00pci, BBPCSR, &reg);
+		if (!rt2x00_get_field32(reg, BBPCSR_BUSY))
+			goto bbp_write;
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR("BBPCSR register busy. Write failed\n");
+	return;
+
+bbp_write:
+	reg = 0x00000000;
+	rt2x00_set_field32(&reg, BBPCSR_VALUE, value);
+	rt2x00_set_field32(&reg, BBPCSR_REGNUM, reg_id);
+	rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
+	rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 1);
+
+	rt2x00_register_write(rt2x00pci, BBPCSR, reg);
+}
+
+static void rt2x00_bbp_regread(const struct _rt2x00_pci *rt2x00pci,
+			       const u8 reg_id, u8 *value)
+{
+	u32 reg = 0x00000000;
+	u8 counter = 0x00;
+
+	/*
+     * We first have to acquire the requested BBP register,
+     * so we write the register id into the BBP register first.
+     */
+	rt2x00_set_field32(&reg, BBPCSR_REGNUM, reg_id);
+	rt2x00_set_field32(&reg, BBPCSR_BUSY, 1);
+	rt2x00_set_field32(&reg, BBPCSR_WRITE_CONTROL, 0);
+
+	rt2x00_register_write(rt2x00pci, BBPCSR, reg);
+
+	for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) {
+		rt2x00_register_read(rt2x00pci, BBPCSR, &reg);
+		if (!rt2x00_get_field32(reg, BBPCSR_BUSY)) {
+			*value = rt2x00_get_field32(reg, BBPCSR_VALUE);
+			return;
+		}
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR("BBPCSR register busy. Read failed\n");
+	*value = 0xff;
+}
+
+static void rt2x00_rf_regwrite(const struct _rt2x00_pci *rt2x00pci,
+			       const u32 value)
+{
+	u32 reg = 0x00000000;
+	u8 counter = 0x00;
+
+	for (counter = 0x00; counter < REGISTER_BUSY_COUNT; counter++) {
+		rt2x00_register_read(rt2x00pci, RFCSR, &reg);
+		if (!rt2x00_get_field32(reg, RFCSR_BUSY))
+			goto rf_write;
+		udelay(REGISTER_BUSY_DELAY);
+	}
+
+	ERROR("RFCSR register busy. Write failed\n");
+	return;
+
+rf_write:
+	reg = value;
+	rt2x00_set_field32(&reg, RFCSR_NUMBER_OF_BITS, 20);
+	rt2x00_set_field32(&reg, RFCSR_IF_SELECT, 0);
+	rt2x00_set_field32(&reg, RFCSR_BUSY, 1);
+
+	//  printk(KERN_INFO "DEBUG: %s:%d: reg=%x\n", __FILE__, __LINE__, reg);
+
+	rt2x00_register_write(rt2x00pci, RFCSR, reg);
+}
+
+/*
+ * EEPROM access.
+ * The EEPROM is being accessed by word index.
+ * rt2x00_eeprom_read_word is the main access function that can be called by
+ * the rest of the module. It will take the index number of the eeprom word
+ * and the bus width.
+ */
+static inline void rt2x00_eeprom_pulse_high(const struct _rt2x00_pci *rt2x00pci,
+					    u32 *flags)
+{
+	rt2x00_set_field32(flags, CSR21_EEPROM_DATA_CLOCK, 1);
+	rt2x00_register_write(rt2x00pci, CSR21, *flags);
+	udelay(1);
+}
+
+static inline void rt2x00_eeprom_pulse_low(const struct _rt2x00_pci *rt2x00pci,
+					   u32 *flags)
+{
+	rt2x00_set_field32(flags, CSR21_EEPROM_DATA_CLOCK, 0);
+	rt2x00_register_write(rt2x00pci, CSR21, *flags);
+	udelay(1);
+}
+
+static void rt2x00_eeprom_shift_out_bits(const struct _rt2x00_pci *rt2x00pci,
+					 const u16 data, const u16 count)
+{
+	u32 flags = 0x00000000;
+	u32 mask = 0x0001 << (count - 1);
+
+	rt2x00_register_read(rt2x00pci, CSR21, &flags);
+
+	/*
+     * Clear data flags.
+     */
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_OUT, 0);
+
+	/*
+     * Start writing all bits. 
+     */
+	do {
+		/*
+         * Only set the data_in flag when we are at the correct bit.
+         */
+		rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN,
+				   (data & mask) ? 1 : 0);
+
+		rt2x00_register_write(rt2x00pci, CSR21, flags);
+
+		rt2x00_eeprom_pulse_high(rt2x00pci, &flags);
+		rt2x00_eeprom_pulse_low(rt2x00pci, &flags);
+
+		/*
+         * Shift to next bit.
+         */
+		mask >>= 1;
+	} while (mask);
+
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_register_write(rt2x00pci, CSR21, flags);
+}
+
+static void rt2x00_eeprom_shift_in_bits(const struct _rt2x00_pci *rt2x00pci,
+					u16 *data)
+{
+	u32 flags = 0x00000000;
+	u8 counter = 0x00;
+
+	rt2x00_register_read(rt2x00pci, CSR21, &flags);
+
+	/*
+     * Clear data flags.
+     */
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_OUT, 0);
+
+	/*
+     * Start reading all 16 bits.
+     */
+	for (counter = 0; counter < 16; counter++) {
+		/*
+         * Shift to the next bit.
+         */
+		*data <<= 1;
+
+		rt2x00_eeprom_pulse_high(rt2x00pci, &flags);
+
+		rt2x00_register_read(rt2x00pci, CSR21, &flags);
+
+		/*
+         * Clear data_in flag and set the data bit to 1 when the data_out flag is set.
+         */
+		rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+		if (rt2x00_get_field32(flags, CSR21_EEPROM_DATA_OUT))
+			*data |= 1;
+
+		rt2x00_eeprom_pulse_low(rt2x00pci, &flags);
+	}
+}
+
+static u16 rt2x00_eeprom_read_word(const struct _rt2x00_pci *rt2x00pci,
+				   const u8 word)
+{
+	u32 flags = 0x00000000;
+	u16 data = 0x0000;
+
+	/*
+     * Clear all flags, and enable chip select.
+     */
+	rt2x00_register_read(rt2x00pci, CSR21, &flags);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_OUT, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_CLOCK, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_CHIP_SELECT, 1);
+	rt2x00_register_write(rt2x00pci, CSR21, flags);
+
+	/*
+     * kick a pulse.
+     */
+	rt2x00_eeprom_pulse_high(rt2x00pci, &flags);
+	rt2x00_eeprom_pulse_low(rt2x00pci, &flags);
+
+	/*
+     * Select the read opcode and bus_width.
+     */
+	rt2x00_eeprom_shift_out_bits(rt2x00pci, EEPROM_READ_OPCODE, 3);
+	rt2x00_eeprom_shift_out_bits(rt2x00pci, word, rt2x00pci->eeprom_width);
+
+	rt2x00_eeprom_shift_in_bits(rt2x00pci, &data);
+
+	/*
+     * Clear chip_select and data_in flags.
+     */
+	rt2x00_register_read(rt2x00pci, CSR21, &flags);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_DATA_IN, 0);
+	rt2x00_set_field32(&flags, CSR21_EEPROM_CHIP_SELECT, 0);
+	rt2x00_register_write(rt2x00pci, CSR21, flags);
+
+	/*
+     * kick a pulse.
+     */
+	rt2x00_eeprom_pulse_high(rt2x00pci, &flags);
+	rt2x00_eeprom_pulse_low(rt2x00pci, &flags);
+
+	return data;
+}
+
+#endif /* RT2500PCI_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h
new file mode 100644
index 0000000..ec8ca90
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00.h
@@ -0,0 +1,649 @@
+/* rt2x00.h
+ *
+ * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project
+ *	                     <http://rt2x00.serialmonkey.com>
+ *               2006        rtnet adaption by Daniel Gregorek 
+ *                           <dxg@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+  Module: rt2x00
+  Abstract: rt2x00 global information.
+  Supported chipsets: RT2560
+*/
+
+#ifndef RT2X00_H
+#define RT2X00_H
+
+#include <linux/netdevice.h>
+#include <linux/wireless.h>
+
+#include <rtnet_port.h>
+#include <rtwlan.h>
+
+#define MAX_UNITS 2
+
+/*
+ * Module information.
+ */
+#define DRV_NAME "rt2x00"
+#define DRV_VERSION "0.1"
+#define DRV_AUTHOR "Daniel Gregorek <dxg@gmx.de>"
+//#define CONFIG_RT2X00_DEBUG
+
+/*
+ * Debug defines.
+ * The debug variable will be exported by the device specific module.
+ * For this reason this variable must be set to extern to make it accessible
+ * to the core module as well.
+ */
+#ifdef CONFIG_RT2X00_DEBUG
+extern int rt2x00_debug_level;
+#define DEBUG_PRINTK(__message...)                                             \
+	do {                                                                   \
+		rtdm_printk(__message);                                        \
+	} while (0)
+#else /* CONFIG_RT2X00_DEBUG */
+#define DEBUG_PRINTK(__message...)                                             \
+	do {                                                                   \
+	} while (0)
+#endif /* CONFIG_RT2X00_DEBUG */
+
+/*
+ * Various debug levels.
+ * PANIC and ERROR indicates serious problems within the module,
+ * these should never be ignored and thus we will always print the message.
+ */
+#define PANIC(__message, __args...)                                            \
+	rtdm_printk(KERN_PANIC DRV_NAME "->%s: Panic - " __message,            \
+		    __FUNCTION__, ##__args);
+#define ERROR(__message, __args...)                                            \
+	rtdm_printk(KERN_ERR DRV_NAME "->%s: Error - " __message,              \
+		    __FUNCTION__, ##__args);
+#define WARNING(__message, __args...)                                          \
+	rtdm_printk(KERN_WARNING DRV_NAME "->%s: Warning - " __message,        \
+		    __FUNCTION__, ##__args);
+#define NOTICE(__message, __args...)                                           \
+	rtdm_printk(KERN_NOTICE DRV_NAME "->%s: Notice - " __message,          \
+		    __FUNCTION__, ##__args);
+#define INFO(__message, __args...)                                             \
+	rtdm_printk(KERN_INFO DRV_NAME "->%s: Info - " __message,              \
+		    __FUNCTION__, ##__args);
+#define DEBUG(__message, __args...)                                            \
+	DEBUG_PRINTK(KERN_DEBUG DRV_NAME "->%s: Debug - " __message,           \
+		     __FUNCTION__, ##__args);
+
+/*
+ * RT2x00 ring types.
+ */
+
+/*
+ * Ring names.
+ */
+#define RING_RX 0x01 /* Ring used for receiving packets. */
+#define RING_TX 0x02 /* Ring used for transmitting normal packets. */
+
+/*
+ * Ring sizes.
+ */
+#define DATA_FRAME_SIZE 2432
+#define MGMT_FRAME_SIZE 256
+
+/*
+ * RT2x00 xmit flags.
+ */
+#define XMIT_IFS_SIFS 0x0001
+#define XMIT_IFS_BACKOFF 0x0002
+#define XMIT_IFS_NEW_BACKOFF 0x0004
+#define XMIT_IFS_NONE 0x0008
+#define XMIT_NEW_SEQUENCE 0x0010
+#define XMIT_ACK 0x0020
+#define XMIT_TIMESTAMP 0x0040
+#define XMIT_RTS 0x0080
+#define XMIT_OFDM 0x0100
+#define XMIT_LONG_RETRY 0x0200
+#define XMIT_MORE_FRAGS 0x0400
+#define XMIT_SHORT_PREAMBLE 0x0800
+#define XMIT_START 0x1000
+
+/*
+ * RT2x00 Statistics flags.
+ */
+#define STATS_TX_RESULT 0x01
+#define STATS_TX_RETRY_COUNT 0x02
+#define STATS_RX_CRC 0x10
+#define STATS_RX_PHYSICAL 0x20
+#define STATS_RX_QUALITY 0x40
+#define STATS_RX_DROP 0x80
+
+/*
+ * TX result flags.
+ */
+#define TX_SUCCESS 0
+#define TX_SUCCESS_RETRY 1
+#define TX_FAIL_RETRY 2
+#define TX_FAIL_INVALID 3
+#define TX_FAIL_OTHER 4
+
+/*
+ * Channel type defines.
+ */
+#define CHANNEL_OFDM 0x01
+#define CHANNEL_UNII_LOW 0x02
+#define CHANNEL_HIPERLAN2 0x04
+#define CHANNEL_UNII_HIGH 0x08
+
+#define CHANNEL_OFDM_MIN 1
+#define CHANNEL_OFDM_MAX 14
+#define CHANNEL_UNII_LOW_MIN 36
+#define CHANNEL_UNII_LOW_MAX 64
+#define CHANNEL_HIPERLAN2_MIN 100
+#define CHANNEL_HIPERLAN2_MAX 140
+#define CHANNEL_UNII_HIGH_MIN 149
+#define CHANNEL_UNII_HIGH_MAX 161
+
+/*
+ * Device 802.11abg capabilities.
+ */
+static struct _rt2x00_capabilities {
+	u8 txpower[6];
+	u8 bitrate[12];
+} __attribute__ ((packed)) capabilities = {
+    /*
+     * tx-power.
+     */
+    .txpower = {
+          3, 12, 25, 50, 75, 100,
+      },
+
+    /*
+     * Bitrates
+     */
+    .bitrate = {
+         2, 4, 11, 22,						/* CCK. */
+         12, 18, 24, 36, 48, 72, 96, 108,			/* OFDM. */
+     },
+};
+
+struct _rt2x00_config {
+	u8 config_flags;
+#define CONFIG_DROP_BCAST 0x0001
+#define CONFIG_DROP_MCAST 0x0002
+#define CONFIG_AUTORESP 0x0004
+
+	u8 antenna_tx;
+	u8 antenna_rx;
+
+	u8 bssid[ETH_ALEN];
+	u8 short_retry;
+	u8 long_retry;
+
+	u8 channel;
+	u8 bitrate; /* 0.5Mbit/sec */
+	u8 txpower; /* % */
+
+	u8 bbpsens;
+
+	/*
+     * LED status
+     */
+	u8 led_status;
+
+	u16 __pad2; /* For alignment only. */
+
+	/*
+     * Duration values in us.
+     */
+	u8 plcp;
+	u8 sifs;
+	u8 slot_time;
+
+	/*
+     * Configuration values that have to be updated to device.
+     */
+	u16 update_flags;
+#define UPDATE_ALL_CONFIG 0xffff
+#define UPDATE_BSSID 0x0001
+#define UPDATE_PACKET_FILTER 0x0002
+#define UPDATE_CHANNEL 0x0004
+#define UPDATE_BITRATE 0x0008
+#define UPDATE_RETRY 0x0010
+#define UPDATE_TXPOWER 0x0020
+#define UPDATE_ANTENNA 0x0040
+#define UPDATE_DURATION 0x0080
+#define UPDATE_PREAMBLE 0x0100
+#define UPDATE_AUTORESP 0x0200
+#define UPDATE_LED_STATUS 0x0400
+#define UPDATE_BBPSENS 0x0800
+
+} __attribute__((packed));
+
+struct _rt2x00_core {
+	/*
+     * RT2x00 device status flags (atomic read/write access).
+     */
+	unsigned long flags;
+
+#define DEVICE_ENABLED 0 /* Device has been opened. */
+#define DEVICE_AWAKE 1 /* Device is not suspended. */
+#define DEVICE_RADIO_ON 2 /* Device antenna is enabled. */
+#define DEVICE_CONFIG_UPDATE 3 /* Device is updating configuration. */
+
+	/*
+     * Device handler.
+     */
+	struct _rt2x00_dev_handler *handler;
+
+	/*
+     * RTnet device we belong to.
+     */
+	struct rtnet_device *rtnet_dev;
+
+	/*
+     * RTwlan stack structure.
+     */
+	struct rtwlan_device *rtwlan_dev;
+
+	/*
+     * Device configuration.
+     */
+	struct _rt2x00_config config;
+
+	void *priv;
+
+} __attribute__((packed));
+
+/*
+ * Device specific handlers.
+ */
+struct _rt2x00_dev_handler {
+	/*
+     * Device specific module.
+     */
+	struct module *dev_module;
+
+	/*
+     * Initialization handlers.
+     */
+	int (*dev_probe)(struct _rt2x00_core *core, void *priv);
+	int (*dev_remove)(struct _rt2x00_core *core);
+
+	/*
+     * Radio control.
+     */
+	int (*dev_radio_on)(struct _rt2x00_core *core);
+	int (*dev_radio_off)(struct _rt2x00_core *core);
+
+	/*
+     * Configuration handlers.
+     */
+	int (*dev_update_config)(struct _rt2x00_core *core, u16 update_flags);
+
+	/*
+     * xmit handler.
+     */
+	int (*dev_xmit_packet)(struct _rt2x00_core *core, struct rtskb *rtskb,
+			       u16 rate, u16 xmit_flags);
+
+	/*
+     * Handler for direct access to register from core.
+     */
+	int (*dev_register_access)(struct _rt2x00_core *core, int request,
+				   u32 address, u32 *value);
+
+} __attribute__((packed));
+
+static inline void *rt2x00_priv(const struct _rt2x00_core *core)
+{
+	return core->priv;
+}
+
+/*
+ * Duration calculations
+ * The rate variable passed is: 2 * real_rate (in Mb/s).
+ * Therefore length has to be multiplied with 8 to convert bytes to bits and  mulltiply the length
+ * with 2 to compensate for the difference between real_rate and the rate variable.
+ */
+#define ACK_SIZE 14
+#define IEEE80211_HEADER 24
+
+static inline u16 get_duration(const unsigned int size, const u8 rate)
+{
+	return ((size * 8 * 2) / rate);
+}
+
+static inline u16 get_duration_res(const unsigned int size, const u8 rate)
+{
+	return ((size * 8 * 2) % rate);
+}
+
+static inline u16 get_preamble(const struct _rt2x00_config *config)
+{
+	return 144;
+}
+
+/*
+ * Register handlers.
+ * We store the position of a register field inside a field structure,
+ * This will simplify the process of setting and reading a certain field
+ * inside the register.
+ */
+struct _rt2x00_field16 {
+	u16 bit_offset;
+	u16 bit_mask;
+} __attribute__((packed));
+
+struct _rt2x00_field32 {
+	u32 bit_offset;
+	u32 bit_mask;
+} __attribute__((packed));
+
+#define FIELD16(__offset, __mask)                                              \
+	((struct _rt2x00_field16){ (__offset), (__mask) })
+#define FIELD32(__offset, __mask)                                              \
+	((struct _rt2x00_field32){ (__offset), (__mask) })
+
+static inline void rt2x00_set_field32(u32 *reg,
+				      const struct _rt2x00_field32 field,
+				      const u32 value)
+{
+	*reg &= cpu_to_le32(~(field.bit_mask));
+	*reg |= cpu_to_le32((value << field.bit_offset) & field.bit_mask);
+}
+
+static inline void rt2x00_set_field32_nb(u32 *reg,
+					 const struct _rt2x00_field32 field,
+					 const u32 value)
+{
+	*reg &= ~(field.bit_mask);
+	*reg |= (value << field.bit_offset) & field.bit_mask;
+}
+
+static inline u32 rt2x00_get_field32(const u32 reg,
+				     const struct _rt2x00_field32 field)
+{
+	return (le32_to_cpu(reg) & field.bit_mask) >> field.bit_offset;
+}
+
+static inline u32 rt2x00_get_field32_nb(const u32 reg,
+					const struct _rt2x00_field32 field)
+{
+	return (reg & field.bit_mask) >> field.bit_offset;
+}
+
+static inline void rt2x00_set_field16(u16 *reg,
+				      const struct _rt2x00_field16 field,
+				      const u16 value)
+{
+	*reg &= cpu_to_le16(~(field.bit_mask));
+	*reg |= cpu_to_le16((value << field.bit_offset) & field.bit_mask);
+}
+
+static inline void rt2x00_set_field16_nb(u16 *reg,
+					 const struct _rt2x00_field16 field,
+					 const u16 value)
+{
+	*reg &= ~(field.bit_mask);
+	*reg |= (value << field.bit_offset) & field.bit_mask;
+}
+
+static inline u16 rt2x00_get_field16(const u16 reg,
+				     const struct _rt2x00_field16 field)
+{
+	return (le16_to_cpu(reg) & field.bit_mask) >> field.bit_offset;
+}
+
+static inline u16 rt2x00_get_field16_nb(const u16 reg,
+					const struct _rt2x00_field16 field)
+{
+	return (reg & field.bit_mask) >> field.bit_offset;
+}
+
+/*
+ * rf register sructure for channel selection.
+ */
+struct _rf_channel {
+	u32 rf1;
+	u32 rf2;
+	u32 rf3;
+	u32 rf4;
+} __attribute__((packed));
+
+/*
+ * Chipset identification
+ * The chipset on the device is composed of a RT and RF chip.
+ * The chipset combination is important for determining device capabilities.
+ */
+struct _rt2x00_chip {
+	u16 rt;
+	u16 rf;
+} __attribute__((packed));
+
+/*
+ * Set chipset data.
+ * Some rf values for RT2400 devices are equal to rf values for RT2500 devices.
+ * To prevent problems, all rf values will be masked to clearly seperate each chipset.
+ */
+static inline void set_chip(struct _rt2x00_chip *chipset, const u16 rt,
+			    const u16 rf)
+{
+	INFO("Chipset detected - rt: %04x, rf: %04x.\n", rt, rf);
+
+	chipset->rt = rt;
+	chipset->rf = rf | (chipset->rt & 0xff00);
+}
+
+static inline char rt2x00_rt(const struct _rt2x00_chip *chipset, const u16 chip)
+{
+	return (chipset->rt == chip);
+}
+
+static inline char rt2x00_rf(const struct _rt2x00_chip *chipset, const u16 chip)
+{
+	return (chipset->rf == chip);
+}
+
+static inline u16 rt2x00_get_rf(const struct _rt2x00_chip *chipset)
+{
+	return chipset->rf;
+}
+
+/*
+ * _data_ring
+ * Data rings are used by the device to send and receive packets.
+ * The data_addr is the base address of the data memory.
+ * Device specifice information is pointed to by the priv pointer.
+ * The index values may only be changed with the functions ring_index_inc()
+ * and ring_index_done_inc().
+ */
+struct _data_ring {
+	/*
+     * Base address of packet ring.
+     */
+	dma_addr_t data_dma;
+	void *data_addr;
+
+	/*
+     * Private device specific data.
+     */
+	void *priv;
+	struct _rt2x00_core *core;
+
+	/*
+     * Current index values.
+     */
+	u8 index;
+	u8 index_done;
+
+	/*
+     * Ring type set with RING_* define.
+     */
+	u8 ring_type;
+
+	/*
+     * Number of entries in this ring.
+     */
+	u8 max_entries;
+
+	/*
+     * Size of packet and descriptor in bytes.
+     */
+	u16 entry_size;
+	u16 desc_size;
+
+	/*
+     * Total allocated memory size.
+     */
+	u32 mem_size;
+} __attribute__((packed));
+
+/*
+ * Number of entries in a packet ring.
+ */
+#define RX_ENTRIES 8
+#define TX_ENTRIES 8
+#define ATIM_ENTRIES 1
+#define PRIO_ENTRIES 2
+#define BEACON_ENTRIES 1
+
+/*
+ * Initialization and cleanup routines.
+ */
+static inline void rt2x00_init_ring(struct _rt2x00_core *core,
+				    struct _data_ring *ring, const u8 ring_type,
+				    const u16 max_entries, const u16 entry_size,
+				    const u16 desc_size)
+{
+	ring->core = core;
+	ring->index = 0;
+	ring->index_done = 0;
+	ring->ring_type = ring_type;
+	ring->max_entries = max_entries;
+	ring->entry_size = entry_size;
+	ring->desc_size = desc_size;
+	ring->mem_size =
+		ring->max_entries * (ring->desc_size + ring->entry_size);
+}
+
+static inline void rt2x00_deinit_ring(struct _data_ring *ring)
+{
+	ring->core = NULL;
+	ring->index = 0;
+	ring->index_done = 0;
+	ring->ring_type = 0;
+	ring->max_entries = 0;
+	ring->entry_size = 0;
+	ring->desc_size = 0;
+	ring->mem_size = 0;
+}
+
+/*
+ * Ring index manipulation functions.
+ */
+static inline void rt2x00_ring_index_inc(struct _data_ring *ring)
+{
+	ring->index = (++ring->index < ring->max_entries) ? ring->index : 0;
+}
+
+static inline void rt2x00_ring_index_done_inc(struct _data_ring *ring)
+{
+	ring->index_done =
+		(++ring->index_done < ring->max_entries) ? ring->index_done : 0;
+}
+
+static inline void rt2x00_ring_clear_index(struct _data_ring *ring)
+{
+	ring->index = 0;
+	ring->index_done = 0;
+}
+
+static inline u8 rt2x00_ring_empty(struct _data_ring *ring)
+{
+	return ring->index_done == ring->index;
+}
+
+static inline u8 rt2x00_ring_free_entries(struct _data_ring *ring)
+{
+	if (ring->index >= ring->index_done)
+		return ring->max_entries - (ring->index - ring->index_done);
+	else
+		return ring->index_done - ring->index;
+}
+
+/*
+ * Return PLCP value matching the rate.
+ * PLCP values according to ieee802.11a-1999 p.14.
+ */
+static inline u8 rt2x00_get_plcp(const u8 rate)
+{
+	u8 counter = 0x00;
+	u8 plcp[12] = {
+		0x00, 0x01, 0x02, 0x03, /* CCK. */
+		0x0b, 0x0f, 0x0a, 0x0e, 0x09, 0x0d, 0x08, 0x0c, /* OFDM. */
+	};
+
+	for (; counter < 12; counter++) {
+		if (capabilities.bitrate[counter] == rate)
+			return plcp[counter];
+	}
+
+	return 0xff;
+}
+
+#define OFDM_CHANNEL(__channel)                                                \
+	((__channel) >= CHANNEL_OFDM_MIN && (__channel) <= CHANNEL_OFDM_MAX)
+#define UNII_LOW_CHANNEL(__channel)                                            \
+	((__channel) >= CHANNEL_UNII_LOW_MIN &&                                \
+	 (__channel) <= CHANNEL_UNII_LOW_MAX)
+#define HIPERLAN2_CHANNEL(__channel)                                           \
+	((__channel) >= CHANNEL_HIPERLAN2_MIN &&                               \
+	 (__channel) <= CHANNEL_HIPERLAN2_MAX)
+#define UNII_HIGH_CHANNEL(__channel)                                           \
+	((__channel) >= CHANNEL_UNII_HIGH_MIN &&                               \
+	 (__channel) <= CHANNEL_UNII_HIGH_MAX)
+
+/*
+ * Return the index value of the channel starting from the first channel of the range.
+ * Where range can be OFDM, UNII (low), HiperLAN2 or UNII (high).
+ */
+static inline int rt2x00_get_channel_index(const u8 channel)
+{
+	if (OFDM_CHANNEL(channel))
+		return (channel - 1);
+
+	if (channel % 4)
+		return -EINVAL;
+
+	if (UNII_LOW_CHANNEL(channel))
+		return ((channel - CHANNEL_UNII_LOW_MIN) / 4);
+	else if (HIPERLAN2_CHANNEL(channel))
+		return ((channel - CHANNEL_HIPERLAN2_MIN) / 4);
+	else if (UNII_HIGH_CHANNEL(channel))
+		return ((channel - CHANNEL_UNII_HIGH_MIN) / 4);
+	return -EINVAL;
+}
+
+/*
+ * RT2x00 core module functions that can be used in the device specific modules.
+ */
+extern struct rtnet_device *
+rt2x00_core_probe(struct _rt2x00_dev_handler *handler, void *priv,
+		  u32 sizeof_dev);
+extern void rt2x00_core_remove(struct rtnet_device *rtnet_dev);
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c
new file mode 100644
index 0000000..fac5c3e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/experimental/rt2500/rt2x00core.c
@@ -0,0 +1,444 @@
+/* rt2x00core.c
+ *
+ * Copyright (C) 2004 - 2005 rt2x00-2.0.0-b3 SourceForge Project
+ *			     <http://rt2x00.serialmonkey.com>
+ *               2006        rtnet adaption by Daniel Gregorek
+ *                           <dxg@gmx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the
+ * Free Software Foundation, Inc.,
+ * 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/*
+ * Module: rt2x00core
+ * Abstract: rt2x00 core routines.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/version.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <asm/io.h>
+
+#include <rtnet_port.h>
+
+#include "rt2x00.h"
+
+#ifdef DRV_NAME
+#undef DRV_NAME
+#define DRV_NAME "rt_rt2x00core"
+#endif /* DRV_NAME */
+
+static int rt2x00_radio_on(struct _rt2x00_core *core);
+static int rt2x00_radio_off(struct _rt2x00_core *core);
+
+static int cards[MAX_UNITS] = { [0 ...(MAX_UNITS - 1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+
+/*
+ * Writes the pending configuration to the device
+ */
+static void rt2x00_update_config(struct _rt2x00_core *core)
+{
+	u16 update_flags = 0x0000;
+
+	if (!test_bit(DEVICE_ENABLED, &core->flags) &&
+	    !test_bit(DEVICE_RADIO_ON, &core->flags))
+		return;
+
+	if (test_and_set_bit(DEVICE_CONFIG_UPDATE, &core->flags))
+		return;
+
+	update_flags = core->config.update_flags;
+	core->config.update_flags = 0;
+
+	if (likely(update_flags))
+		core->handler->dev_update_config(core, update_flags);
+
+	clear_bit(DEVICE_CONFIG_UPDATE, &core->flags);
+}
+
+/*
+ * Radio control.
+ */
+static int rt2x00_radio_on(struct _rt2x00_core *core)
+{
+	int status = 0x00000000;
+
+	if (test_bit(DEVICE_RADIO_ON, &core->flags)) {
+		WARNING("Radio already on.\n");
+		return -ENOTCONN;
+	}
+
+	status = core->handler->dev_radio_on(core);
+	if (status)
+		return status;
+
+	set_bit(DEVICE_RADIO_ON, &core->flags);
+
+	return 0;
+}
+
+static int rt2x00_radio_off(struct _rt2x00_core *core)
+{
+	if (!test_and_clear_bit(DEVICE_RADIO_ON, &core->flags)) {
+		WARNING("Radio already off.\n");
+		return -ENOTCONN;
+	}
+
+	core->handler->dev_radio_off(core);
+
+	return 0;
+}
+
+/*
+ * user space io handler
+ */
+static int rt2x00_ioctl(struct rtnet_device *rtnet_dev, struct ifreq *ifr,
+			int request)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+	struct rtwlan_cmd *cmd;
+	u8 rate, dsss_rate, ofdm_rate;
+	u32 address, value;
+
+	cmd = (struct rtwlan_cmd *)ifr->ifr_data;
+
+	switch (request) {
+	case IOC_RTWLAN_IFINFO:
+		cmd->args.info.bitrate = core->config.bitrate;
+		cmd->args.info.channel = core->config.channel;
+		cmd->args.info.retry = core->config.short_retry;
+		cmd->args.info.txpower = core->config.txpower;
+		cmd->args.info.bbpsens = core->config.bbpsens;
+		cmd->args.info.mode = core->rtwlan_dev->mode;
+		cmd->args.info.rx_packets = core->rtwlan_dev->stats.rx_packets;
+		cmd->args.info.tx_packets = core->rtwlan_dev->stats.tx_packets;
+		cmd->args.info.tx_retry = core->rtwlan_dev->stats.tx_retry;
+		cmd->args.info.autoresponder =
+			core->config.config_flags & CONFIG_AUTORESP ? 1 : 0;
+		cmd->args.info.dropbcast =
+			core->config.config_flags & CONFIG_DROP_BCAST ? 1 : 0;
+		cmd->args.info.dropmcast =
+			core->config.config_flags & CONFIG_DROP_MCAST ? 1 : 0;
+		DEBUG("rtwlan_dev->mode=%d\n", rtwlan_dev->mode);
+		break;
+	case IOC_RTWLAN_BITRATE:
+		rate = cmd->args.set.bitrate;
+		ofdm_rate = ieee80211_is_ofdm_rate(rate);
+		dsss_rate = ieee80211_is_dsss_rate(rate);
+		DEBUG("bitrate=%d\n", rate);
+		if (!(dsss_rate ^ ofdm_rate))
+			NOTICE("Rate %d is not DSSS and not OFDM.\n", rate);
+		core->config.bitrate = rate;
+		core->config.update_flags |= UPDATE_BITRATE;
+		break;
+	case IOC_RTWLAN_CHANNEL:
+		DEBUG("channel=%d\n", cmd->args.set.channel);
+		core->config.channel = cmd->args.set.channel;
+		core->config.update_flags |= UPDATE_CHANNEL;
+		break;
+	case IOC_RTWLAN_RETRY:
+		core->config.short_retry = cmd->args.set.retry;
+		core->config.update_flags |= UPDATE_RETRY;
+		break;
+	case IOC_RTWLAN_TXPOWER:
+		core->config.txpower = cmd->args.set.txpower;
+		core->config.update_flags |= UPDATE_TXPOWER;
+		break;
+	case IOC_RTWLAN_AUTORESP:
+		if (cmd->args.set.autoresponder)
+			core->config.config_flags |= CONFIG_AUTORESP;
+		else
+			core->config.config_flags &= ~CONFIG_AUTORESP;
+		core->config.update_flags |= UPDATE_AUTORESP;
+		break;
+	case IOC_RTWLAN_DROPBCAST:
+		if (cmd->args.set.dropbcast)
+			core->config.config_flags |= CONFIG_DROP_BCAST;
+		else
+			core->config.config_flags &= ~CONFIG_DROP_BCAST;
+		core->config.update_flags |= UPDATE_PACKET_FILTER;
+		break;
+	case IOC_RTWLAN_DROPMCAST:
+		if (cmd->args.set.dropmcast)
+			core->config.config_flags |= CONFIG_DROP_MCAST;
+		else
+			core->config.config_flags &= ~CONFIG_DROP_MCAST;
+		core->config.update_flags |= UPDATE_PACKET_FILTER;
+		break;
+	case IOC_RTWLAN_TXMODE:
+		core->rtwlan_dev->mode = cmd->args.set.mode;
+		break;
+	case IOC_RTWLAN_BBPSENS:
+		value = cmd->args.set.bbpsens;
+		if (value < 0)
+			value = 0;
+		if (value > 127)
+			value = 127;
+		core->config.bbpsens = value;
+		core->config.update_flags |= UPDATE_BBPSENS;
+		break;
+	case IOC_RTWLAN_REGREAD:
+	case IOC_RTWLAN_BBPREAD:
+		address = cmd->args.reg.address;
+		core->handler->dev_register_access(core, request, address,
+						   &value);
+		cmd->args.reg.value = value;
+		break;
+	case IOC_RTWLAN_REGWRITE:
+	case IOC_RTWLAN_BBPWRITE:
+		address = cmd->args.reg.address;
+		value = cmd->args.reg.value;
+		core->handler->dev_register_access(core, request, address,
+						   &value);
+		break;
+	default:
+		ERROR("Unknown request!\n");
+		return -1;
+	}
+
+	if (request != IOC_RTWLAN_IFINFO)
+		rt2x00_update_config(core);
+
+	return 0;
+}
+
+/*
+ * TX/RX related routines.
+ */
+static int rt2x00_start_xmit(struct rtskb *rtskb,
+			     struct rtnet_device *rtnet_dev)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+	u16 xmit_flags = 0x0000;
+	u8 rate = 0x00;
+
+	if (unlikely(rtskb)) {
+		rate = core->config.bitrate;
+		if (ieee80211_is_ofdm_rate(rate))
+			xmit_flags |= XMIT_OFDM;
+
+		/* Check if the packet should be acknowledged */
+		if (core->rtwlan_dev->mode == RTWLAN_TXMODE_ACK)
+			xmit_flags |= XMIT_ACK;
+
+		if (core->handler->dev_xmit_packet(core, rtskb, rate,
+						   xmit_flags))
+			ERROR("Packet dropped !");
+
+		dev_kfree_rtskb(rtskb);
+	}
+
+	return 0;
+}
+
+/***
+ *  rt2x00_open
+ *  @rtdev
+ */
+static int rt2x00_open(struct rtnet_device *rtnet_dev)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+	int status = 0x00000000;
+
+	DEBUG("Start.\n");
+
+	if (test_and_set_bit(DEVICE_ENABLED, &core->flags)) {
+		ERROR("device already enabled.\n");
+		return -EBUSY;
+	}
+
+	/*
+     * Start rtnet interface.
+     */
+	rt_stack_connect(rtnet_dev, &STACK_manager);
+
+	status = rt2x00_radio_on(core);
+	if (status) {
+		clear_bit(DEVICE_ENABLED, &core->flags);
+		ERROR("Couldn't activate radio.\n");
+		return status;
+	}
+
+	core->config.led_status = 1;
+	core->config.update_flags |= UPDATE_LED_STATUS;
+	rt2x00_update_config(core);
+
+	rtnetif_start_queue(rtnet_dev);
+
+	DEBUG("Exit success.\n");
+
+	return 0;
+}
+
+/***
+ *  rt2x00_close
+ *  @rtdev
+ */
+static int rt2x00_close(struct rtnet_device *rtnet_dev)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct _rt2x00_core *core = rtwlan_priv(rtwlan_dev);
+
+	DEBUG("Start.\n");
+
+	if (!test_and_clear_bit(DEVICE_ENABLED, &core->flags)) {
+		ERROR("device already disabled.\n");
+		return -EBUSY;
+	}
+
+	rt2x00_radio_off(core);
+
+	rtnetif_stop_queue(rtnet_dev);
+	rt_stack_disconnect(rtnet_dev);
+
+	return 0;
+}
+
+/*
+ * Initialization handlers.
+ */
+static void rt2x00_init_config(struct _rt2x00_core *core)
+{
+	DEBUG("Start.\n");
+
+	memset(&core->config.bssid, '\0', sizeof(core->config.bssid));
+
+	core->config.channel = 1;
+	core->config.bitrate = capabilities.bitrate[0];
+	core->config.bbpsens = 50;
+	core->config.config_flags = 0;
+	core->config.config_flags |=
+		CONFIG_DROP_BCAST | CONFIG_DROP_MCAST | CONFIG_AUTORESP;
+	core->config.short_retry = 4;
+	core->config.long_retry = 7;
+	core->config.txpower = 100;
+	core->config.plcp = 48;
+	core->config.sifs = 10;
+	core->config.slot_time = 20;
+	core->rtwlan_dev->mode = RTWLAN_TXMODE_RAW;
+	core->config.update_flags = UPDATE_ALL_CONFIG;
+}
+
+struct rtnet_device *rt2x00_core_probe(struct _rt2x00_dev_handler *handler,
+				       void *priv, u32 sizeof_dev)
+{
+	struct rtnet_device *rtnet_dev = NULL;
+	struct _rt2x00_core *core = NULL;
+	struct rtwlan_device *rtwlan_dev = NULL;
+	static int cards_found = -1;
+	int err;
+
+	DEBUG("Start.\n");
+
+	cards_found++;
+	if (cards[cards_found] == 0)
+		goto exit;
+
+	rtnet_dev =
+		rtwlan_alloc_dev(sizeof_dev + sizeof(*core), RX_ENTRIES * 2);
+	if (!rtnet_dev)
+		goto exit;
+
+	rt_rtdev_connect(rtnet_dev, &RTDEV_manager);
+	rtnet_dev->vers = RTDEV_VERS_2_0;
+
+	rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	memset(rtwlan_dev, 0x00, sizeof(*rtwlan_dev));
+
+	core = rtwlan_priv(rtwlan_dev);
+	memset(core, 0x00, sizeof(*core));
+
+	core->rtwlan_dev = rtwlan_dev;
+	core->handler = handler;
+	core->priv = (void *)core + sizeof(*core);
+	core->rtnet_dev = rtnet_dev;
+
+	/* Set configuration default values. */
+	rt2x00_init_config(core);
+
+	if (core->handler->dev_probe && core->handler->dev_probe(core, priv)) {
+		ERROR("device probe failed.\n");
+		goto exit;
+	}
+	INFO("Device " MAC_FMT " detected.\n", MAC_ARG(rtnet_dev->dev_addr));
+
+	rtwlan_dev->hard_start_xmit = rt2x00_start_xmit;
+
+	rtnet_dev->open = &rt2x00_open;
+	rtnet_dev->stop = &rt2x00_close;
+	rtnet_dev->do_ioctl = &rt2x00_ioctl;
+	rtnet_dev->hard_header = &rt_eth_header;
+
+	if ((err = rt_register_rtnetdev(rtnet_dev)) != 0) {
+		rtdev_free(rtnet_dev);
+		ERROR("rtnet_device registration failed.\n");
+		printk("err=%d\n", err);
+		goto exit_dev_remove;
+	}
+
+	set_bit(DEVICE_AWAKE, &core->flags);
+
+	return rtnet_dev;
+
+exit_dev_remove:
+	if (core->handler->dev_remove)
+		core->handler->dev_remove(core);
+
+exit:
+	return NULL;
+}
+EXPORT_SYMBOL_GPL(rt2x00_core_probe);
+
+void rt2x00_core_remove(struct rtnet_device *rtnet_dev)
+{
+	rt_unregister_rtnetdev(rtnet_dev);
+	rt_rtdev_disconnect(rtnet_dev);
+
+	rtdev_free(rtnet_dev);
+}
+EXPORT_SYMBOL_GPL(rt2x00_core_remove);
+
+/*
+ * RT2x00 core module information.
+ */
+static char version[] = DRV_NAME " - " DRV_VERSION;
+
+MODULE_AUTHOR(DRV_AUTHOR);
+MODULE_DESCRIPTION("RTnet rt2500 PCI WLAN driver (Core Module)");
+MODULE_LICENSE("GPL");
+
+static int __init rt2x00_core_init(void)
+{
+	printk(KERN_INFO "Loading module: %s\n", version);
+	return 0;
+}
+
+static void __exit rt2x00_core_exit(void)
+{
+	printk(KERN_INFO "Unloading module: %s\n", version);
+}
+
+module_init(rt2x00_core_init);
+module_exit(rt2x00_core_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile
new file mode 100644
index 0000000..316d8c3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_FEC) += rtnet_fec.o
+
+rtnet_fec-y := fec_main.o fec_ptp.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h
new file mode 100644
index 0000000..0e25662
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec.h
@@ -0,0 +1,625 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/****************************************************************************/
+
+/*
+ *	fec.h  --  Fast Ethernet Controller for Motorola ColdFire SoC
+ *		   processors.
+ *
+ *	(C) Copyright 2000-2005, Greg Ungerer (gerg@snapgear.com)
+ *	(C) Copyright 2000-2001, Lineo (www.lineo.com)
+ */
+
+/****************************************************************************/
+#ifndef FEC_H
+#define	FEC_H
+/****************************************************************************/
+
+#include <linux/clocksource.h>
+#include <linux/net_tstamp.h>
+#include <linux/ptp_clock_kernel.h>
+#include <linux/timecounter.h>
+#include <rtnet_port.h>
+
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+    defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+/*
+ *	Just figures, Motorola would have to change the offsets for
+ *	registers in the same peripheral device on different models
+ *	of the ColdFire!
+ */
+#define FEC_IEVENT		0x004 /* Interrupt event reg */
+#define FEC_IMASK		0x008 /* Interrupt mask reg */
+#define FEC_R_DES_ACTIVE_0	0x010 /* Receive descriptor reg */
+#define FEC_X_DES_ACTIVE_0	0x014 /* Transmit descriptor reg */
+#define FEC_ECNTRL		0x024 /* Ethernet control reg */
+#define FEC_MII_DATA		0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED		0x044 /* MII speed control reg */
+#define FEC_MIB_CTRLSTAT	0x064 /* MIB control/status reg */
+#define FEC_R_CNTRL		0x084 /* Receive control reg */
+#define FEC_X_CNTRL		0x0c4 /* Transmit Control reg */
+#define FEC_ADDR_LOW		0x0e4 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH		0x0e8 /* High 16bits MAC address */
+#define FEC_OPD			0x0ec /* Opcode + Pause duration */
+#define FEC_TXIC0		0x0f0 /* Tx Interrupt Coalescing for ring 0 */
+#define FEC_TXIC1		0x0f4 /* Tx Interrupt Coalescing for ring 1 */
+#define FEC_TXIC2		0x0f8 /* Tx Interrupt Coalescing for ring 2 */
+#define FEC_RXIC0		0x100 /* Rx Interrupt Coalescing for ring 0 */
+#define FEC_RXIC1		0x104 /* Rx Interrupt Coalescing for ring 1 */
+#define FEC_RXIC2		0x108 /* Rx Interrupt Coalescing for ring 2 */
+#define FEC_HASH_TABLE_HIGH	0x118 /* High 32bits hash table */
+#define FEC_HASH_TABLE_LOW	0x11c /* Low 32bits hash table */
+#define FEC_GRP_HASH_TABLE_HIGH	0x120 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW	0x124 /* Low 32bits hash table */
+#define FEC_X_WMRK		0x144 /* FIFO transmit water mark */
+#define FEC_R_BOUND		0x14c /* FIFO receive bound reg */
+#define FEC_R_FSTART		0x150 /* FIFO receive start reg */
+#define FEC_R_DES_START_1	0x160 /* Receive descriptor ring 1 */
+#define FEC_X_DES_START_1	0x164 /* Transmit descriptor ring 1 */
+#define FEC_R_BUFF_SIZE_1	0x168 /* Maximum receive buff ring1 size */
+#define FEC_R_DES_START_2	0x16c /* Receive descriptor ring 2 */
+#define FEC_X_DES_START_2	0x170 /* Transmit descriptor ring 2 */
+#define FEC_R_BUFF_SIZE_2	0x174 /* Maximum receive buff ring2 size */
+#define FEC_R_DES_START_0	0x180 /* Receive descriptor ring */
+#define FEC_X_DES_START_0	0x184 /* Transmit descriptor ring */
+#define FEC_R_BUFF_SIZE_0	0x188 /* Maximum receive buff size */
+#define FEC_R_FIFO_RSFL		0x190 /* Receive FIFO section full threshold */
+#define FEC_R_FIFO_RSEM		0x194 /* Receive FIFO section empty threshold */
+#define FEC_R_FIFO_RAEM		0x198 /* Receive FIFO almost empty threshold */
+#define FEC_R_FIFO_RAFL		0x19c /* Receive FIFO almost full threshold */
+#define FEC_FTRL		0x1b0 /* Frame truncation receive length*/
+#define FEC_RACC		0x1c4 /* Receive Accelerator function */
+#define FEC_RCMR_1		0x1c8 /* Receive classification match ring 1 */
+#define FEC_RCMR_2		0x1cc /* Receive classification match ring 2 */
+#define FEC_DMA_CFG_1		0x1d8 /* DMA class configuration for ring 1 */
+#define FEC_DMA_CFG_2		0x1dc /* DMA class Configuration for ring 2 */
+#define FEC_R_DES_ACTIVE_1	0x1e0 /* Rx descriptor active for ring 1 */
+#define FEC_X_DES_ACTIVE_1	0x1e4 /* Tx descriptor active for ring 1 */
+#define FEC_R_DES_ACTIVE_2	0x1e8 /* Rx descriptor active for ring 2 */
+#define FEC_X_DES_ACTIVE_2	0x1ec /* Tx descriptor active for ring 2 */
+#define FEC_QOS_SCHEME		0x1f0 /* Set multi queues Qos scheme */
+#define FEC_MIIGSK_CFGR		0x300 /* MIIGSK Configuration reg */
+#define FEC_MIIGSK_ENR		0x308 /* MIIGSK Enable reg */
+
+#define BM_MIIGSK_CFGR_MII		0x00
+#define BM_MIIGSK_CFGR_RMII		0x01
+#define BM_MIIGSK_CFGR_FRCONT_10M	0x40
+
+#define RMON_T_DROP		0x200 /* Count of frames not cntd correctly */
+#define RMON_T_PACKETS		0x204 /* RMON TX packet count */
+#define RMON_T_BC_PKT		0x208 /* RMON TX broadcast pkts */
+#define RMON_T_MC_PKT		0x20c /* RMON TX multicast pkts */
+#define RMON_T_CRC_ALIGN	0x210 /* RMON TX pkts with CRC align err */
+#define RMON_T_UNDERSIZE	0x214 /* RMON TX pkts < 64 bytes, good CRC */
+#define RMON_T_OVERSIZE		0x218 /* RMON TX pkts > MAX_FL bytes good CRC */
+#define RMON_T_FRAG		0x21c /* RMON TX pkts < 64 bytes, bad CRC */
+#define RMON_T_JAB		0x220 /* RMON TX pkts > MAX_FL bytes, bad CRC */
+#define RMON_T_COL		0x224 /* RMON TX collision count */
+#define RMON_T_P64		0x228 /* RMON TX 64 byte pkts */
+#define RMON_T_P65TO127		0x22c /* RMON TX 65 to 127 byte pkts */
+#define RMON_T_P128TO255	0x230 /* RMON TX 128 to 255 byte pkts */
+#define RMON_T_P256TO511	0x234 /* RMON TX 256 to 511 byte pkts */
+#define RMON_T_P512TO1023	0x238 /* RMON TX 512 to 1023 byte pkts */
+#define RMON_T_P1024TO2047	0x23c /* RMON TX 1024 to 2047 byte pkts */
+#define RMON_T_P_GTE2048	0x240 /* RMON TX pkts > 2048 bytes */
+#define RMON_T_OCTETS		0x244 /* RMON TX octets */
+#define IEEE_T_DROP		0x248 /* Count of frames not counted crtly */
+#define IEEE_T_FRAME_OK		0x24c /* Frames tx'd OK */
+#define IEEE_T_1COL		0x250 /* Frames tx'd with single collision */
+#define IEEE_T_MCOL		0x254 /* Frames tx'd with multiple collision */
+#define IEEE_T_DEF		0x258 /* Frames tx'd after deferral delay */
+#define IEEE_T_LCOL		0x25c /* Frames tx'd with late collision */
+#define IEEE_T_EXCOL		0x260 /* Frames tx'd with excesv collisions */
+#define IEEE_T_MACERR		0x264 /* Frames tx'd with TX FIFO underrun */
+#define IEEE_T_CSERR		0x268 /* Frames tx'd with carrier sense err */
+#define IEEE_T_SQE		0x26c /* Frames tx'd with SQE err */
+#define IEEE_T_FDXFC		0x270 /* Flow control pause frames tx'd */
+#define IEEE_T_OCTETS_OK	0x274 /* Octet count for frames tx'd w/o err */
+#define RMON_R_PACKETS		0x284 /* RMON RX packet count */
+#define RMON_R_BC_PKT		0x288 /* RMON RX broadcast pkts */
+#define RMON_R_MC_PKT		0x28c /* RMON RX multicast pkts */
+#define RMON_R_CRC_ALIGN	0x290 /* RMON RX pkts with CRC alignment err */
+#define RMON_R_UNDERSIZE	0x294 /* RMON RX pkts < 64 bytes, good CRC */
+#define RMON_R_OVERSIZE		0x298 /* RMON RX pkts > MAX_FL bytes good CRC */
+#define RMON_R_FRAG		0x29c /* RMON RX pkts < 64 bytes, bad CRC */
+#define RMON_R_JAB		0x2a0 /* RMON RX pkts > MAX_FL bytes, bad CRC */
+#define RMON_R_RESVD_O		0x2a4 /* Reserved */
+#define RMON_R_P64		0x2a8 /* RMON RX 64 byte pkts */
+#define RMON_R_P65TO127		0x2ac /* RMON RX 65 to 127 byte pkts */
+#define RMON_R_P128TO255	0x2b0 /* RMON RX 128 to 255 byte pkts */
+#define RMON_R_P256TO511	0x2b4 /* RMON RX 256 to 511 byte pkts */
+#define RMON_R_P512TO1023	0x2b8 /* RMON RX 512 to 1023 byte pkts */
+#define RMON_R_P1024TO2047	0x2bc /* RMON RX 1024 to 2047 byte pkts */
+#define RMON_R_P_GTE2048	0x2c0 /* RMON RX pkts > 2048 bytes */
+#define RMON_R_OCTETS		0x2c4 /* RMON RX octets */
+#define IEEE_R_DROP		0x2c8 /* Count frames not counted correctly */
+#define IEEE_R_FRAME_OK		0x2cc /* Frames rx'd OK */
+#define IEEE_R_CRC		0x2d0 /* Frames rx'd with CRC err */
+#define IEEE_R_ALIGN		0x2d4 /* Frames rx'd with alignment err */
+#define IEEE_R_MACERR		0x2d8 /* Receive FIFO overflow count */
+#define IEEE_R_FDXFC		0x2dc /* Flow control pause frames rx'd */
+#define IEEE_R_OCTETS_OK	0x2e0 /* Octet cnt for frames rx'd w/o err */
+
+#else
+
+#define FEC_ECNTRL		0x000 /* Ethernet control reg */
+#define FEC_IEVENT		0x004 /* Interrupt even reg */
+#define FEC_IMASK		0x008 /* Interrupt mask reg */
+#define FEC_IVEC		0x00c /* Interrupt vec status reg */
+#define FEC_R_DES_ACTIVE_0	0x010 /* Receive descriptor reg */
+#define FEC_R_DES_ACTIVE_1	FEC_R_DES_ACTIVE_0
+#define FEC_R_DES_ACTIVE_2	FEC_R_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_0	0x014 /* Transmit descriptor reg */
+#define FEC_X_DES_ACTIVE_1	FEC_X_DES_ACTIVE_0
+#define FEC_X_DES_ACTIVE_2	FEC_X_DES_ACTIVE_0
+#define FEC_MII_DATA		0x040 /* MII manage frame reg */
+#define FEC_MII_SPEED		0x044 /* MII speed control reg */
+#define FEC_R_BOUND		0x08c /* FIFO receive bound reg */
+#define FEC_R_FSTART		0x090 /* FIFO receive start reg */
+#define FEC_X_WMRK		0x0a4 /* FIFO transmit water mark */
+#define FEC_X_FSTART		0x0ac /* FIFO transmit start reg */
+#define FEC_R_CNTRL		0x104 /* Receive control reg */
+#define FEC_MAX_FRM_LEN		0x108 /* Maximum frame length reg */
+#define FEC_X_CNTRL		0x144 /* Transmit Control reg */
+#define FEC_ADDR_LOW		0x3c0 /* Low 32bits MAC address */
+#define FEC_ADDR_HIGH		0x3c4 /* High 16bits MAC address */
+#define FEC_GRP_HASH_TABLE_HIGH	0x3c8 /* High 32bits hash table */
+#define FEC_GRP_HASH_TABLE_LOW	0x3cc /* Low 32bits hash table */
+#define FEC_R_DES_START_0	0x3d0 /* Receive descriptor ring */
+#define FEC_R_DES_START_1	FEC_R_DES_START_0
+#define FEC_R_DES_START_2	FEC_R_DES_START_0
+#define FEC_X_DES_START_0	0x3d4 /* Transmit descriptor ring */
+#define FEC_X_DES_START_1	FEC_X_DES_START_0
+#define FEC_X_DES_START_2	FEC_X_DES_START_0
+#define FEC_R_BUFF_SIZE_0	0x3d8 /* Maximum receive buff size */
+#define FEC_R_BUFF_SIZE_1	FEC_R_BUFF_SIZE_0
+#define FEC_R_BUFF_SIZE_2	FEC_R_BUFF_SIZE_0
+#define FEC_FIFO_RAM		0x400 /* FIFO RAM buffer */
+/* Not existed in real chip
+ * Just for pass build.
+ */
+#define FEC_RCMR_1		0xfff
+#define FEC_RCMR_2		0xfff
+#define FEC_DMA_CFG_1		0xfff
+#define FEC_DMA_CFG_2		0xfff
+#define FEC_TXIC0		0xfff
+#define FEC_TXIC1		0xfff
+#define FEC_TXIC2		0xfff
+#define FEC_RXIC0		0xfff
+#define FEC_RXIC1		0xfff
+#define FEC_RXIC2		0xfff
+#endif /* CONFIG_M5272 */
+
+
+/*
+ *	Define the buffer descriptor structure.
+ *
+ *	Evidently, ARM SoCs have the FEC block generated in a
+ *	little endian mode so adjust endianness accordingly.
+ */
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+#define fec32_to_cpu le32_to_cpu
+#define fec16_to_cpu le16_to_cpu
+#define cpu_to_fec32 cpu_to_le32
+#define cpu_to_fec16 cpu_to_le16
+#define __fec32 __le32
+#define __fec16 __le16
+
+struct bufdesc {
+	__fec16 cbd_datlen;	/* Data length */
+	__fec16 cbd_sc;		/* Control and status info */
+	__fec32 cbd_bufaddr;	/* Buffer address */
+};
+#else
+#define fec32_to_cpu be32_to_cpu
+#define fec16_to_cpu be16_to_cpu
+#define cpu_to_fec32 cpu_to_be32
+#define cpu_to_fec16 cpu_to_be16
+#define __fec32 __be32
+#define __fec16 __be16
+
+struct bufdesc {
+	__fec16	cbd_sc;		/* Control and status info */
+	__fec16	cbd_datlen;	/* Data length */
+	__fec32	cbd_bufaddr;	/* Buffer address */
+};
+#endif
+
+struct bufdesc_ex {
+	struct bufdesc desc;
+	__fec32 cbd_esc;
+	__fec32 cbd_prot;
+	__fec32 cbd_bdu;
+	__fec32 ts;
+	__fec16 res0[4];
+};
+
+/*
+ *	The following definitions courtesy of commproc.h, which where
+ *	Copyright (c) 1997 Dan Malek (dmalek@jlc.net).
+ */
+#define BD_SC_EMPTY	((ushort)0x8000)	/* Receive is empty */
+#define BD_SC_READY	((ushort)0x8000)	/* Transmit is ready */
+#define BD_SC_WRAP	((ushort)0x2000)	/* Last buffer descriptor */
+#define BD_SC_INTRPT	((ushort)0x1000)	/* Interrupt on change */
+#define BD_SC_CM	((ushort)0x0200)	/* Continuous mode */
+#define BD_SC_ID	((ushort)0x0100)	/* Rec'd too many idles */
+#define BD_SC_P		((ushort)0x0100)	/* xmt preamble */
+#define BD_SC_BR	((ushort)0x0020)	/* Break received */
+#define BD_SC_FR	((ushort)0x0010)	/* Framing error */
+#define BD_SC_PR	((ushort)0x0008)	/* Parity error */
+#define BD_SC_OV	((ushort)0x0002)	/* Overrun */
+#define BD_SC_CD	((ushort)0x0001)	/* ?? */
+
+/* Buffer descriptor control/status used by Ethernet receive.
+ */
+#define BD_ENET_RX_EMPTY	((ushort)0x8000)
+#define BD_ENET_RX_WRAP		((ushort)0x2000)
+#define BD_ENET_RX_INTR		((ushort)0x1000)
+#define BD_ENET_RX_LAST		((ushort)0x0800)
+#define BD_ENET_RX_FIRST	((ushort)0x0400)
+#define BD_ENET_RX_MISS		((ushort)0x0100)
+#define BD_ENET_RX_LG		((ushort)0x0020)
+#define BD_ENET_RX_NO		((ushort)0x0010)
+#define BD_ENET_RX_SH		((ushort)0x0008)
+#define BD_ENET_RX_CR		((ushort)0x0004)
+#define BD_ENET_RX_OV		((ushort)0x0002)
+#define BD_ENET_RX_CL		((ushort)0x0001)
+#define BD_ENET_RX_STATS	((ushort)0x013f)	/* All status bits */
+
+/* Enhanced buffer descriptor control/status used by Ethernet receive */
+#define BD_ENET_RX_VLAN		0x00000004
+
+/* Buffer descriptor control/status used by Ethernet transmit.
+ */
+#define BD_ENET_TX_READY	((ushort)0x8000)
+#define BD_ENET_TX_PAD		((ushort)0x4000)
+#define BD_ENET_TX_WRAP		((ushort)0x2000)
+#define BD_ENET_TX_INTR		((ushort)0x1000)
+#define BD_ENET_TX_LAST		((ushort)0x0800)
+#define BD_ENET_TX_TC		((ushort)0x0400)
+#define BD_ENET_TX_DEF		((ushort)0x0200)
+#define BD_ENET_TX_HB		((ushort)0x0100)
+#define BD_ENET_TX_LC		((ushort)0x0080)
+#define BD_ENET_TX_RL		((ushort)0x0040)
+#define BD_ENET_TX_RCMASK	((ushort)0x003c)
+#define BD_ENET_TX_UN		((ushort)0x0002)
+#define BD_ENET_TX_CSL		((ushort)0x0001)
+#define BD_ENET_TX_STATS	((ushort)0x0fff)	/* All status bits */
+
+/* enhanced buffer descriptor control/status used by Ethernet transmit */
+#define BD_ENET_TX_INT		0x40000000
+#define BD_ENET_TX_TS		0x20000000
+#define BD_ENET_TX_PINS		0x10000000
+#define BD_ENET_TX_IINS		0x08000000
+
+
+/* This device has up to three irqs on some platforms */
+#define FEC_IRQ_NUM		3
+
+/* Maximum number of queues supported
+ * ENET with AVB IP can support up to 3 independent tx queues and rx queues.
+ * User can point the queue number that is less than or equal to 3.
+ */
+#define FEC_ENET_MAX_TX_QS	3
+#define FEC_ENET_MAX_RX_QS	3
+
+#define FEC_R_DES_START(X)	(((X) == 1) ? FEC_R_DES_START_1 : \
+				(((X) == 2) ? \
+					FEC_R_DES_START_2 : FEC_R_DES_START_0))
+#define FEC_X_DES_START(X)	(((X) == 1) ? FEC_X_DES_START_1 : \
+				(((X) == 2) ? \
+					FEC_X_DES_START_2 : FEC_X_DES_START_0))
+#define FEC_R_BUFF_SIZE(X)	(((X) == 1) ? FEC_R_BUFF_SIZE_1 : \
+				(((X) == 2) ? \
+					FEC_R_BUFF_SIZE_2 : FEC_R_BUFF_SIZE_0))
+
+#define FEC_DMA_CFG(X)		(((X) == 2) ? FEC_DMA_CFG_2 : FEC_DMA_CFG_1)
+
+#define DMA_CLASS_EN		(1 << 16)
+#define FEC_RCMR(X)		(((X) == 2) ? FEC_RCMR_2 : FEC_RCMR_1)
+#define IDLE_SLOPE_MASK		0xffff
+#define IDLE_SLOPE_1		0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE_2		0x200 /* BW fraction: 0.5 */
+#define IDLE_SLOPE(X)		(((X) == 1) ?				\
+				(IDLE_SLOPE_1 & IDLE_SLOPE_MASK) :	\
+				(IDLE_SLOPE_2 & IDLE_SLOPE_MASK))
+#define RCMR_MATCHEN		(0x1 << 16)
+#define RCMR_CMP_CFG(v, n)	(((v) & 0x7) <<  (n << 2))
+#define RCMR_CMP_1		(RCMR_CMP_CFG(0, 0) | RCMR_CMP_CFG(1, 1) | \
+				RCMR_CMP_CFG(2, 2) | RCMR_CMP_CFG(3, 3))
+#define RCMR_CMP_2		(RCMR_CMP_CFG(4, 0) | RCMR_CMP_CFG(5, 1) | \
+				RCMR_CMP_CFG(6, 2) | RCMR_CMP_CFG(7, 3))
+#define RCMR_CMP(X)		(((X) == 1) ? RCMR_CMP_1 : RCMR_CMP_2)
+#define FEC_TX_BD_FTYPE(X)	(((X) & 0xf) << 20)
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it it best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+
+#define FEC_ENET_RX_PAGES	256
+#define FEC_ENET_RX_FRSIZE	2048
+#define FEC_ENET_RX_FRPPG	(PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define FEC_ENET_TX_FRSIZE	2048
+#define FEC_ENET_TX_FRPPG	(PAGE_SIZE / FEC_ENET_TX_FRSIZE)
+#define TX_RING_SIZE		512	/* Must be power of two */
+#define TX_RING_MOD_MASK	511	/*   for this to work */
+
+#define BD_ENET_RX_INT		0x00800000
+#define BD_ENET_RX_PTP		((ushort)0x0400)
+#define BD_ENET_RX_ICE		0x00000020
+#define BD_ENET_RX_PCR		0x00000010
+#define FLAG_RX_CSUM_ENABLED	(BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+#define FLAG_RX_CSUM_ERROR	(BD_ENET_RX_ICE | BD_ENET_RX_PCR)
+
+/* Interrupt events/masks. */
+#define FEC_ENET_HBERR  ((uint)0x80000000)      /* Heartbeat error */
+#define FEC_ENET_BABR   ((uint)0x40000000)      /* Babbling receiver */
+#define FEC_ENET_BABT   ((uint)0x20000000)      /* Babbling transmitter */
+#define FEC_ENET_GRA    ((uint)0x10000000)      /* Graceful stop complete */
+#define FEC_ENET_TXF_0	((uint)0x08000000)	/* Full frame transmitted */
+#define FEC_ENET_TXF_1	((uint)0x00000008)	/* Full frame transmitted */
+#define FEC_ENET_TXF_2	((uint)0x00000080)	/* Full frame transmitted */
+#define FEC_ENET_TXB    ((uint)0x04000000)      /* A buffer was transmitted */
+#define FEC_ENET_RXF_0	((uint)0x02000000)	/* Full frame received */
+#define FEC_ENET_RXF_1	((uint)0x00000002)	/* Full frame received */
+#define FEC_ENET_RXF_2	((uint)0x00000020)	/* Full frame received */
+#define FEC_ENET_RXB    ((uint)0x01000000)      /* A buffer was received */
+#define FEC_ENET_MII    ((uint)0x00800000)      /* MII interrupt */
+#define FEC_ENET_EBERR  ((uint)0x00400000)      /* SDMA bus error */
+#define FEC_ENET_WAKEUP	((uint)0x00020000)	/* Wakeup request */
+#define FEC_ENET_TXF	(FEC_ENET_TXF_0 | FEC_ENET_TXF_1 | FEC_ENET_TXF_2)
+#define FEC_ENET_RXF	(FEC_ENET_RXF_0 | FEC_ENET_RXF_1 | FEC_ENET_RXF_2)
+#define FEC_ENET_TS_AVAIL       ((uint)0x00010000)
+#define FEC_ENET_TS_TIMER       ((uint)0x00008000)
+
+#define FEC_DEFAULT_IMASK (FEC_ENET_TXF | FEC_ENET_RXF)
+#define FEC_RX_DISABLED_IMASK (FEC_DEFAULT_IMASK & (~FEC_ENET_RXF))
+
+/* ENET interrupt coalescing macro define */
+#define FEC_ITR_CLK_SEL		(0x1 << 30)
+#define FEC_ITR_EN		(0x1 << 31)
+#define FEC_ITR_ICFT(X)		(((X) & 0xff) << 20)
+#define FEC_ITR_ICTT(X)		((X) & 0xffff)
+#define FEC_ITR_ICFT_DEFAULT	200  /* Set 200 frame count threshold */
+#define FEC_ITR_ICTT_DEFAULT	10   /* Set 10 us timer threshold */
+
+#define FEC_VLAN_TAG_LEN	0x04
+#define FEC_ETHTYPE_LEN		0x02
+
+/* Controller is ENET-MAC */
+#define FEC_QUIRK_ENET_MAC		(1 << 0)
+/* Controller needs driver to swap frame */
+#define FEC_QUIRK_SWAP_FRAME		(1 << 1)
+/* Controller uses gasket */
+#define FEC_QUIRK_USE_GASKET		(1 << 2)
+/* Controller has GBIT support */
+#define FEC_QUIRK_HAS_GBIT		(1 << 3)
+/* Controller has extend desc buffer */
+#define FEC_QUIRK_HAS_BUFDESC_EX	(1 << 4)
+/* Controller has hardware checksum support */
+#define FEC_QUIRK_HAS_CSUM		(1 << 5)
+/* Controller has hardware vlan support */
+#define FEC_QUIRK_HAS_VLAN		(1 << 6)
+/* ENET IP errata ERR006358
+ *
+ * If the ready bit in the transmit buffer descriptor (TxBD[R]) is previously
+ * detected as not set during a prior frame transmission, then the
+ * ENET_TDAR[TDAR] bit is cleared at a later time, even if additional TxBDs
+ * were added to the ring and the ENET_TDAR[TDAR] bit is set. This results in
+ * frames not being transmitted until there is a 0-to-1 transition on
+ * ENET_TDAR[TDAR].
+ */
+#define FEC_QUIRK_ERR006358		(1 << 7)
+/* ENET IP hw AVB
+ *
+ * i.MX6SX ENET IP add Audio Video Bridging (AVB) feature support.
+ * - Two class indicators on receive with configurable priority
+ * - Two class indicators and line speed timer on transmit allowing
+ *   implementation class credit based shapers externally
+ * - Additional DMA registers provisioned to allow managing up to 3
+ *   independent rings
+ */
+#define FEC_QUIRK_HAS_AVB		(1 << 8)
+/* There is a TDAR race condition for mutliQ when the software sets TDAR
+ * and the UDMA clears TDAR simultaneously or in a small window (2-4 cycles).
+ * This will cause the udma_tx and udma_tx_arbiter state machines to hang.
+ * The issue exist at i.MX6SX enet IP.
+ */
+#define FEC_QUIRK_ERR007885		(1 << 9)
+/* ENET Block Guide/ Chapter for the iMX6SX (PELE) address one issue:
+ * After set ENET_ATCR[Capture], there need some time cycles before the counter
+ * value is capture in the register clock domain.
+ * The wait-time-cycles is at least 6 clock cycles of the slower clock between
+ * the register clock and the 1588 clock. The 1588 ts_clk is fixed to 25Mhz,
+ * register clock is 66Mhz, so the wait-time-cycles must be greater than 240ns
+ * (40ns * 6).
+ */
+#define FEC_QUIRK_BUG_CAPTURE		(1 << 10)
+/* Controller has only one MDIO bus */
+#define FEC_QUIRK_SINGLE_MDIO		(1 << 11)
+/* Controller supports RACC register */
+#define FEC_QUIRK_HAS_RACC		(1 << 12)
+/* Controller supports interrupt coalesc */
+#define FEC_QUIRK_HAS_COALESCE		(1 << 13)
+/* Interrupt doesn't wake CPU from deep idle */
+#define FEC_QUIRK_ERR006687		(1 << 14)
+/* The MIB counters should be cleared and enabled during
+ * initialisation.
+ */
+#define FEC_QUIRK_MIB_CLEAR		(1 << 15)
+/* Only i.MX25/i.MX27/i.MX28 controller supports FRBR,FRSR registers,
+ * those FIFO receive registers are resolved in other platforms.
+ */
+#define FEC_QUIRK_HAS_FRREG		(1 << 16)
+
+/* Some FEC hardware blocks need the MMFR cleared at setup time to avoid
+ * the generation of an MII event. This must be avoided in the older
+ * FEC blocks where it will stop MII events being generated.
+ */
+#define FEC_QUIRK_CLEAR_SETUP_MII	(1 << 17)
+/* i.MX8QM ENET IP version add new feture to generate delayed TXC/RXC
+ * as an alternative option to make sure it works well with various PHYs.
+ * For the implementation of delayed clock, ENET takes synchronized 250MHz
+ * clocks to generate 2ns delay.
+ */
+#define FEC_QUIRK_DELAYED_CLKS_SUPPORT	(1 << 18)
+
+struct bufdesc_prop {
+	int qid;
+	/* Address of Rx and Tx buffers */
+	struct bufdesc	*base;
+	struct bufdesc	*last;
+	struct bufdesc	*cur;
+	void __iomem	*reg_desc_active;
+	dma_addr_t	dma;
+	unsigned short ring_size;
+	unsigned char dsize;
+	unsigned char dsize_log2;
+};
+
+struct fec_enet_priv_tx_q {
+	struct bufdesc_prop bd;
+	unsigned char *tx_bounce[TX_RING_SIZE];
+	union {	/* CAUTION: must be same cell count. */
+		struct  sk_buff *tx_skbuff[TX_RING_SIZE];
+		struct rtskb *tx_rtbuff[TX_RING_SIZE];
+	};
+
+	unsigned short tx_stop_threshold;
+	unsigned short tx_wake_threshold;
+
+	struct bufdesc	*dirty_tx;
+	char *tso_hdrs;
+	dma_addr_t tso_hdrs_dma;
+};
+
+struct fec_enet_priv_rx_q {
+	struct bufdesc_prop bd;
+	union {	/* CAUTION: must be same cell count. */
+		struct  sk_buff *rx_skbuff[RX_RING_SIZE];
+		struct rtskb *rx_rtbuff[RX_RING_SIZE];
+	};
+};
+
+struct fec_stop_mode_gpr {
+	struct regmap *gpr;
+	u8 reg;
+	u8 bit;
+};
+
+/* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+	/* Hardware registers of the FEC device */
+	void __iomem *hwp;
+
+	struct net_device *netdev;
+
+	struct fec_rt_data {
+		rtdm_irq_t irq_handle[3];
+		rtdm_lock_t lock;
+		rtdm_nrtsig_t mdio_sig;
+		struct rtnet_device dev;
+	} rtnet;
+
+	struct clk *clk_ipg;
+	struct clk *clk_ahb;
+	struct clk *clk_ref;
+	struct clk *clk_enet_out;
+	struct clk *clk_ptp;
+
+	bool ptp_clk_on;
+	struct mutex ptp_clk_mutex;
+	unsigned int num_tx_queues;
+	unsigned int num_rx_queues;
+
+	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
+	struct fec_enet_priv_tx_q *tx_queue[FEC_ENET_MAX_TX_QS];
+	struct fec_enet_priv_rx_q *rx_queue[FEC_ENET_MAX_RX_QS];
+
+	unsigned int total_tx_ring_size;
+	unsigned int total_rx_ring_size;
+
+	struct	platform_device *pdev;
+
+	int	dev_id;
+
+	/* Phylib and MDIO interface */
+	struct	mii_bus *mii_bus;
+	uint	phy_speed;
+	phy_interface_t	phy_interface;
+	struct device_node *phy_node;
+	int	link;
+	int	full_duplex;
+	int	speed;
+	struct	completion mdio_done;
+	int	irq[FEC_IRQ_NUM];
+	int	irqnr;
+	bool	bufdesc_ex;
+	int	pause_flag;
+	int	wol_flag;
+	u32	quirks;
+
+	int	csum_flags;
+
+	struct work_struct tx_timeout_work;
+
+	struct ptp_clock *ptp_clock;
+	struct ptp_clock_info ptp_caps;
+	unsigned long last_overflow_check;
+	spinlock_t tmreg_lock;
+	struct cyclecounter cc;
+	struct timecounter tc;
+	int rx_hwtstamp_filter;
+	u32 base_incval;
+	u32 cycle_speed;
+	int hwts_rx_en;
+	int hwts_tx_en;
+	struct delayed_work time_keep;
+	struct regulator *reg_phy;
+	struct fec_stop_mode_gpr stop_gpr;
+
+	unsigned int tx_align;
+	unsigned int rx_align;
+
+	/* hw interrupt coalesce */
+	unsigned int rx_pkts_itr;
+	unsigned int rx_time_itr;
+	unsigned int tx_pkts_itr;
+	unsigned int tx_time_itr;
+	unsigned int itr_clk_rate;
+
+	u32 rx_copybreak;
+
+	/* ptp clock period in ns*/
+	unsigned int ptp_inc;
+
+	/* pps  */
+	int pps_channel;
+	unsigned int reload_period;
+	int pps_enable;
+	unsigned int next_counter;
+
+	u64 ethtool_stats[];
+};
+
+void fec_ptp_init(struct platform_device *pdev, int irq_idx);
+void fec_ptp_stop(struct platform_device *pdev);
+void fec_ptp_start_cyclecounter(struct net_device *ndev);
+void fec_ptp_disable_hwts(struct net_device *ndev);
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr);
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr);
+
+/****************************************************************************/
+#endif /* FEC_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c
new file mode 100644
index 0000000..99c5ec6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_main.c
@@ -0,0 +1,3708 @@
+// SPDX-License-Identifier: GPL-2.0+
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * Right now, I am very wasteful with the buffers.  I allocate memory
+ * pages and then divide them into 2K frame buffers.  This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Much better multiple PHY support by Magnus Damm.
+ * Copyright (c) 2000 Ericsson Radio Systems AB.
+ *
+ * Support for FEC controller of ColdFire processors.
+ * Copyright (c) 2001-2005 Greg Ungerer (gerg@snapgear.com)
+ *
+ * Bug fixes and cleanup by Philippe De Muyter (phdm@macqel.be)
+ * Copyright (c) 2004-2006 Macq Electronique SA.
+ *
+ * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/pm_runtime.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <net/ip.h>
+#include <net/tso.h>
+#include <linux/tcp.h>
+#include <linux/udp.h>
+#include <linux/icmp.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/crc32.h>
+#include <linux/platform_device.h>
+#include <linux/mdio.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/regulator/consumer.h>
+#include <linux/if_vlan.h>
+#include <linux/pinctrl/consumer.h>
+#include <linux/prefetch.h>
+#include <linux/mfd/syscon.h>
+#include <linux/regmap.h>
+#include <linux/iopoll.h>
+#include <soc/imx/cpuidle.h>
+#include <asm/cacheflush.h>
+
+#include "fec.h"
+
+static void set_multicast_list(struct net_device *ndev);
+static void fec_enet_itr_coal_init(struct net_device *ndev);
+
+#define DRIVER_NAME	"rt_fec"
+
+static const u16 fec_enet_vlan_pri_to_queue[8] = {0, 0, 1, 1, 1, 2, 2, 2};
+
+/* Pause frame feild and FIFO threshold */
+#define FEC_ENET_FCE	(1 << 5)
+#define FEC_ENET_RSEM_V	0x84
+#define FEC_ENET_RSFL_V	16
+#define FEC_ENET_RAEM_V	0x8
+#define FEC_ENET_RAFL_V	0x8
+#define FEC_ENET_OPD_V	0xFFF0
+#define FEC_MDIO_PM_TIMEOUT  100 /* ms */
+
+struct fec_devinfo {
+	u32 quirks;
+};
+
+static const struct fec_devinfo fec_imx25_info = {
+	.quirks = FEC_QUIRK_USE_GASKET | FEC_QUIRK_MIB_CLEAR |
+		  FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx27_info = {
+	.quirks = FEC_QUIRK_MIB_CLEAR | FEC_QUIRK_HAS_FRREG,
+};
+
+static const struct fec_devinfo fec_imx28_info = {
+	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_SWAP_FRAME |
+		  FEC_QUIRK_SINGLE_MDIO | FEC_QUIRK_HAS_RACC |
+		  FEC_QUIRK_HAS_FRREG | FEC_QUIRK_CLEAR_SETUP_MII,
+};
+
+static const struct fec_devinfo fec_imx6q_info = {
+	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR006358 |
+		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_CLEAR_SETUP_MII,
+};
+
+static const struct fec_devinfo fec_mvf600_info = {
+	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_RACC,
+};
+
+static const struct fec_devinfo fec_imx6x_info = {
+	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+		  FEC_QUIRK_CLEAR_SETUP_MII,
+};
+
+static const struct fec_devinfo fec_imx6ul_info = {
+	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_ERR007885 |
+		  FEC_QUIRK_BUG_CAPTURE | FEC_QUIRK_HAS_RACC |
+		  FEC_QUIRK_HAS_COALESCE | FEC_QUIRK_CLEAR_SETUP_MII,
+};
+
+static const struct fec_devinfo fec_imx8mq_info = {
+	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+		  FEC_QUIRK_CLEAR_SETUP_MII,
+};
+
+static const struct fec_devinfo fec_imx8qm_info = {
+	.quirks = FEC_QUIRK_ENET_MAC | FEC_QUIRK_HAS_GBIT |
+		  FEC_QUIRK_HAS_BUFDESC_EX | FEC_QUIRK_HAS_CSUM |
+		  FEC_QUIRK_HAS_VLAN | FEC_QUIRK_HAS_AVB |
+		  FEC_QUIRK_ERR007885 | FEC_QUIRK_BUG_CAPTURE |
+		  FEC_QUIRK_HAS_RACC | FEC_QUIRK_HAS_COALESCE |
+		  FEC_QUIRK_DELAYED_CLKS_SUPPORT,
+};
+
+static struct platform_device_id fec_devtype[] = {
+	{
+		/* keep it for coldfire */
+		.name = DRIVER_NAME,
+		.driver_data = 0,
+	}, {
+		.name = "imx25-fec",
+		.driver_data = (kernel_ulong_t)&fec_imx25_info,
+	}, {
+		.name = "imx27-fec",
+		.driver_data = (kernel_ulong_t)&fec_imx27_info,
+	}, {
+		.name = "imx28-fec",
+		.driver_data = (kernel_ulong_t)&fec_imx28_info,
+	}, {
+		.name = "imx6q-fec",
+		.driver_data = (kernel_ulong_t)&fec_imx6q_info,
+	}, {
+		.name = "mvf600-fec",
+		.driver_data = (kernel_ulong_t)&fec_mvf600_info,
+	}, {
+		.name = "imx6sx-fec",
+		.driver_data = (kernel_ulong_t)&fec_imx6x_info,
+	}, {
+		.name = "imx6ul-fec",
+		.driver_data = (kernel_ulong_t)&fec_imx6ul_info,
+	}, {
+		.name = "imx8mq-fec",
+		.driver_data = (kernel_ulong_t)&fec_imx8mq_info,
+	}, {
+		.name = "imx8qm-fec",
+		.driver_data = (kernel_ulong_t)&fec_imx8qm_info,
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, fec_devtype);
+
+enum imx_fec_type {
+	IMX25_FEC = 1,	/* runs on i.mx25/50/53 */
+	IMX27_FEC,	/* runs on i.mx27/35/51 */
+	IMX28_FEC,
+	IMX6Q_FEC,
+	MVF600_FEC,
+	IMX6SX_FEC,
+	IMX6UL_FEC,
+	IMX8MQ_FEC,
+	IMX8QM_FEC,
+};
+
+static const struct of_device_id fec_dt_ids[] = {
+	{ .compatible = "fsl,imx25-fec", .data = &fec_devtype[IMX25_FEC], },
+	{ .compatible = "fsl,imx27-fec", .data = &fec_devtype[IMX27_FEC], },
+	{ .compatible = "fsl,imx28-fec", .data = &fec_devtype[IMX28_FEC], },
+	{ .compatible = "fsl,imx6q-fec", .data = &fec_devtype[IMX6Q_FEC], },
+	{ .compatible = "fsl,mvf600-fec", .data = &fec_devtype[MVF600_FEC], },
+	{ .compatible = "fsl,imx6sx-fec", .data = &fec_devtype[IMX6SX_FEC], },
+	{ .compatible = "fsl,imx6ul-fec", .data = &fec_devtype[IMX6UL_FEC], },
+	{ .compatible = "fsl,imx8mq-fec", .data = &fec_devtype[IMX8MQ_FEC], },
+	{ .compatible = "fsl,imx8qm-fec", .data = &fec_devtype[IMX8QM_FEC], },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, fec_dt_ids);
+
+static unsigned char macaddr[ETH_ALEN];
+module_param_array(macaddr, byte, NULL, 0);
+MODULE_PARM_DESC(macaddr, "FEC Ethernet MAC address");
+
+#if defined(CONFIG_M5272)
+/*
+ * Some hardware gets it MAC address out of local flash memory.
+ * if this is non-zero then assume it is the address to get MAC from.
+ */
+#if defined(CONFIG_NETtel)
+#define	FEC_FLASHMAC	0xf0006006
+#elif defined(CONFIG_GILBARCONAP) || defined(CONFIG_SCALES)
+#define	FEC_FLASHMAC	0xf0006000
+#elif defined(CONFIG_CANCam)
+#define	FEC_FLASHMAC	0xf0020000
+#elif defined (CONFIG_M5272C3)
+#define	FEC_FLASHMAC	(0xffe04000 + 4)
+#elif defined(CONFIG_MOD5272)
+#define FEC_FLASHMAC	0xffc0406b
+#else
+#define	FEC_FLASHMAC	0
+#endif
+#endif /* CONFIG_M5272 */
+
+/* The FEC stores dest/src/type/vlan, data, and checksum for receive packets.
+ *
+ * 2048 byte skbufs are allocated. However, alignment requirements
+ * varies between FEC variants. Worst case is 64, so round down by 64.
+ */
+#define PKT_MAXBUF_SIZE		(round_down(2048 - 64, 64))
+#define PKT_MINBUF_SIZE		64
+
+/* FEC receive acceleration */
+#define FEC_RACC_IPDIS		(1 << 1)
+#define FEC_RACC_PRODIS		(1 << 2)
+#define FEC_RACC_SHIFT16	BIT(7)
+#define FEC_RACC_OPTIONS	(FEC_RACC_IPDIS | FEC_RACC_PRODIS)
+
+/* MIB Control Register */
+#define FEC_MIB_CTRLSTAT_DISABLE	BIT(31)
+
+/*
+ * The 5270/5271/5280/5282/532x RX control register also contains maximum frame
+ * size bits. Other FEC hardware does not, so we need to take that into
+ * account when setting it.
+ */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+    defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+    defined(CONFIG_ARM64)
+#define	OPT_FRAME_SIZE	(PKT_MAXBUF_SIZE << 16)
+#else
+#define	OPT_FRAME_SIZE	0
+#endif
+
+/* FEC MII MMFR bits definition */
+#define FEC_MMFR_ST		(1 << 30)
+#define FEC_MMFR_ST_C45		(0)
+#define FEC_MMFR_OP_READ	(2 << 28)
+#define FEC_MMFR_OP_READ_C45	(3 << 28)
+#define FEC_MMFR_OP_WRITE	(1 << 28)
+#define FEC_MMFR_OP_ADDR_WRITE	(0)
+#define FEC_MMFR_PA(v)		((v & 0x1f) << 23)
+#define FEC_MMFR_RA(v)		((v & 0x1f) << 18)
+#define FEC_MMFR_TA		(2 << 16)
+#define FEC_MMFR_DATA(v)	(v & 0xffff)
+/* FEC ECR bits definition */
+#define FEC_ECR_MAGICEN		(1 << 2)
+#define FEC_ECR_SLEEP		(1 << 3)
+
+#define FEC_MII_TIMEOUT		30000 /* us */
+
+/* Transmitter timeout */
+#define TX_TIMEOUT (2 * HZ)
+
+#define FEC_PAUSE_FLAG_AUTONEG	0x1
+#define FEC_PAUSE_FLAG_ENABLE	0x2
+#define FEC_WOL_HAS_MAGIC_PACKET	(0x1 << 0)
+#define FEC_WOL_FLAG_ENABLE		(0x1 << 1)
+#define FEC_WOL_FLAG_SLEEP_ON		(0x1 << 2)
+
+#define COPYBREAK_DEFAULT	256
+
+/* Max number of allowed TCP segments for software TSO */
+#define FEC_MAX_TSO_SEGS	100
+#define FEC_MAX_SKB_DESCS	(FEC_MAX_TSO_SEGS * 2 + MAX_SKB_FRAGS)
+
+#define IS_TSO_HEADER(txq, addr) \
+	((addr >= txq->tso_hdrs_dma) && \
+	(addr < txq->tso_hdrs_dma + txq->bd.ring_size * TSO_HEADER_SIZE))
+
+static int mii_cnt;
+
+static struct bufdesc *fec_enet_get_nextdesc(struct bufdesc *bdp,
+					     struct bufdesc_prop *bd)
+{
+	return (bdp >= bd->last) ? bd->base
+			: (struct bufdesc *)(((void *)bdp) + bd->dsize);
+}
+
+static struct bufdesc *fec_enet_get_prevdesc(struct bufdesc *bdp,
+					     struct bufdesc_prop *bd)
+{
+	return (bdp <= bd->base) ? bd->last
+			: (struct bufdesc *)(((void *)bdp) - bd->dsize);
+}
+
+static int fec_enet_get_bd_index(struct bufdesc *bdp,
+				 struct bufdesc_prop *bd)
+{
+	return ((const char *)bdp - (const char *)bd->base) >> bd->dsize_log2;
+}
+
+static int fec_enet_get_free_txdesc_num(struct fec_enet_priv_tx_q *txq)
+{
+	int entries;
+
+	entries = (((const char *)txq->dirty_tx -
+			(const char *)txq->bd.cur) >> txq->bd.dsize_log2) - 1;
+
+	return entries >= 0 ? entries : entries + txq->bd.ring_size;
+}
+
+static void swap_buffer(void *bufaddr, int len)
+{
+	int i;
+	unsigned int *buf = bufaddr;
+
+	for (i = 0; i < len; i += 4, buf++)
+		swab32s(buf);
+}
+
+static void fec_dump(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct bufdesc *bdp;
+	struct fec_enet_priv_tx_q *txq;
+	int index = 0;
+
+	netdev_info(ndev, "TX ring dump\n");
+	pr_info("Nr     SC     addr       len  SKB\n");
+
+	txq = fep->tx_queue[0];
+	bdp = txq->bd.base;
+
+	do {
+		pr_info("%3u %c%c 0x%04x 0x%08x %4u %p\n",
+			index,
+			bdp == txq->bd.cur ? 'S' : ' ',
+			bdp == txq->dirty_tx ? 'H' : ' ',
+			fec16_to_cpu(bdp->cbd_sc),
+			fec32_to_cpu(bdp->cbd_bufaddr),
+			fec16_to_cpu(bdp->cbd_datlen),
+			txq->tx_skbuff[index]);
+		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+		index++;
+	} while (bdp != txq->bd.base);
+}
+
+static inline bool is_ipv4_pkt(struct sk_buff *skb)
+{
+	return skb->protocol == htons(ETH_P_IP) && ip_hdr(skb)->version == 4;
+}
+
+static int fec_rt_txq_submit_skb(struct fec_enet_priv_tx_q *txq,
+				 struct rtskb *skb, struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	struct bufdesc *bdp, *last_bdp;
+	void *bufaddr;
+	dma_addr_t addr;
+	unsigned short status;
+	unsigned short buflen;
+	unsigned int estatus = 0;
+	unsigned int index;
+	int entries_free;
+	rtdm_lockctx_t c;
+
+	entries_free = fec_enet_get_free_txdesc_num(txq);
+	if (entries_free < MAX_SKB_FRAGS + 1) {
+		rtdm_printk_ratelimited("%s: NOT enough BD for SG!\n",
+					dev_name(&fep->pdev->dev));
+		return NETDEV_TX_BUSY;
+	}
+
+	rtdm_lock_get_irqsave(&frt->lock, c);
+
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read_monotonic() +
+				    *skb->xmit_stamp);
+
+	/* Fill in a Tx ring entry */
+	bdp = txq->bd.cur;
+	last_bdp = bdp;
+	status = fec16_to_cpu(bdp->cbd_sc);
+	status &= ~BD_ENET_TX_STATS;
+
+	/* Set buffer length and buffer pointer */
+	bufaddr = skb->data;
+	buflen = rtskb_headlen(skb);
+
+	index = fec_enet_get_bd_index(bdp, &txq->bd);
+	if (((unsigned long) bufaddr) & fep->tx_align ||
+		fep->quirks & FEC_QUIRK_SWAP_FRAME) {
+		memcpy(txq->tx_bounce[index], skb->data, buflen);
+		bufaddr = txq->tx_bounce[index];
+
+		if (fep->quirks & FEC_QUIRK_SWAP_FRAME)
+			swap_buffer(bufaddr, buflen);
+	}
+
+	addr = dma_map_single(&fep->pdev->dev, bufaddr, buflen, DMA_TO_DEVICE);
+	if (dma_mapping_error(&fep->pdev->dev, addr)) {
+		rtdm_lock_put_irqrestore(&frt->lock, c);
+		dev_kfree_rtskb(skb);
+		rtdm_printk_ratelimited("%s: Tx DMA memory map failed\n",
+					dev_name(&fep->pdev->dev));
+		return NETDEV_TX_BUSY;
+	}
+	status |= (BD_ENET_TX_INTR | BD_ENET_TX_LAST);
+
+	bdp->cbd_bufaddr = cpu_to_fec32(addr);
+	bdp->cbd_datlen = cpu_to_fec16(buflen);
+
+	if (fep->bufdesc_ex) {
+		struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+		estatus = BD_ENET_TX_INT;
+		if (fep->quirks & FEC_QUIRK_HAS_AVB)
+			estatus |= FEC_TX_BD_FTYPE(txq->bd.qid);
+		ebdp->cbd_bdu = 0;
+		ebdp->cbd_esc = cpu_to_fec32(estatus);
+	}
+
+	index = fec_enet_get_bd_index(last_bdp, &txq->bd);
+	txq->tx_rtbuff[index] = skb;
+
+	/* Make sure the updates to rest of the descriptor are performed before
+	 * transferring ownership.
+	 */
+	wmb();
+
+	/* Send it on its way.  Tell FEC it's ready, interrupt when done,
+	 * it's the last BD of the frame, and to put the CRC on the end.
+	 */
+	status |= (BD_ENET_TX_READY | BD_ENET_TX_TC);
+	bdp->cbd_sc = cpu_to_fec16(status);
+
+	/* If this was the last BD in the ring, start at the beginning again. */
+	bdp = fec_enet_get_nextdesc(last_bdp, &txq->bd);
+
+	/* Make sure the update to bdp and tx_rtbuff are performed
+	 * before txq->bd.cur.
+	 */
+	wmb();
+	txq->bd.cur = bdp;
+
+	/* Trigger transmission start */
+	writel(0, txq->bd.reg_desc_active);
+
+	rtdm_lock_put_irqrestore(&frt->lock, c);
+
+	return 0;
+}
+
+static netdev_tx_t
+fec_enet_start_xmit(struct sk_buff *skb, struct net_device *ndev)
+{
+	return -EBUSY;
+}
+
+static netdev_tx_t
+fec_rt_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct fec_enet_priv_tx_q *txq;
+	struct fec_enet_private *fep;
+
+	fep = container_of(rtdev, struct fec_enet_private, rtnet.dev);
+	txq = fep->tx_queue[0];
+
+	return fec_rt_txq_submit_skb(txq, skb, fep->netdev);
+}
+
+static struct net_device_stats *fec_rt_stats(struct rtnet_device *rtdev)
+{
+	struct fec_enet_private *fep;
+
+	fep = container_of(rtdev, struct fec_enet_private, rtnet.dev);
+
+	return &fep->netdev->stats;
+}
+
+/* Init RX & TX buffer descriptors
+ */
+static void fec_enet_bd_init(struct net_device *dev)
+{
+	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_enet_priv_tx_q *txq;
+	struct fec_enet_priv_rx_q *rxq;
+	struct bufdesc *bdp;
+	unsigned int i;
+	unsigned int q;
+
+	for (q = 0; q < fep->num_rx_queues; q++) {
+		/* Initialize the receive buffer descriptors. */
+		rxq = fep->rx_queue[q];
+		bdp = rxq->bd.base;
+
+		for (i = 0; i < rxq->bd.ring_size; i++) {
+
+			/* Initialize the BD for every fragment in the page. */
+			if (bdp->cbd_bufaddr)
+				bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
+			else
+				bdp->cbd_sc = cpu_to_fec16(0);
+			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+		}
+
+		/* Set the last buffer to wrap */
+		bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
+		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+
+		rxq->bd.cur = rxq->bd.base;
+	}
+
+	for (q = 0; q < fep->num_tx_queues; q++) {
+		/* ...and the same for transmit */
+		txq = fep->tx_queue[q];
+		bdp = txq->bd.base;
+		txq->bd.cur = bdp;
+
+		for (i = 0; i < txq->bd.ring_size; i++) {
+			/* Initialize the BD for every fragment in the page. */
+			bdp->cbd_sc = cpu_to_fec16(0);
+			if (bdp->cbd_bufaddr &&
+			    !IS_TSO_HEADER(txq, fec32_to_cpu(bdp->cbd_bufaddr)))
+				dma_unmap_single(&fep->pdev->dev,
+						 fec32_to_cpu(bdp->cbd_bufaddr),
+						 fec16_to_cpu(bdp->cbd_datlen),
+						 DMA_TO_DEVICE);
+			if (txq->tx_skbuff[i]) {
+				dev_kfree_rtskb(txq->tx_rtbuff[i]);
+				txq->tx_skbuff[i] = NULL;
+			}
+			bdp->cbd_bufaddr = cpu_to_fec32(0);
+			bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+		}
+
+		/* Set the last buffer to wrap */
+		bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
+		bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+		txq->dirty_tx = bdp;
+	}
+}
+
+static void fec_enet_active_rxring(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int i;
+
+	for (i = 0; i < fep->num_rx_queues; i++)
+		writel(0, fep->rx_queue[i]->bd.reg_desc_active);
+}
+
+static void fec_enet_enable_ring(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_enet_priv_tx_q *txq;
+	struct fec_enet_priv_rx_q *rxq;
+	int i;
+
+	for (i = 0; i < fep->num_rx_queues; i++) {
+		rxq = fep->rx_queue[i];
+		writel(rxq->bd.dma, fep->hwp + FEC_R_DES_START(i));
+		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_R_BUFF_SIZE(i));
+
+		/* enable DMA1/2 */
+		if (i)
+			writel(RCMR_MATCHEN | RCMR_CMP(i),
+			       fep->hwp + FEC_RCMR(i));
+	}
+
+	for (i = 0; i < fep->num_tx_queues; i++) {
+		txq = fep->tx_queue[i];
+		writel(txq->bd.dma, fep->hwp + FEC_X_DES_START(i));
+
+		/* enable DMA1/2 */
+		if (i)
+			writel(DMA_CLASS_EN | IDLE_SLOPE(i),
+			       fep->hwp + FEC_DMA_CFG(i));
+	}
+}
+
+static void fec_enet_reset_skb(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_enet_priv_tx_q *txq;
+	int i, j;
+
+	for (i = 0; i < fep->num_tx_queues; i++) {
+		txq = fep->tx_queue[i];
+
+		for (j = 0; j < txq->bd.ring_size; j++) {
+			if (txq->tx_skbuff[j]) {
+				dev_kfree_rtskb(txq->tx_rtbuff[j]);
+				txq->tx_skbuff[j] = NULL;
+			}
+		}
+	}
+}
+
+/*
+ * This function is called to start or restart the FEC during a link
+ * change, transmit timeout, or to reconfigure the FEC.  The network
+ * packet processing for this device must be stopped before this call.
+ */
+static void
+fec_restart(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	u32 val;
+	u32 temp_mac[2];
+	u32 rcntl = OPT_FRAME_SIZE | 0x04;
+	u32 ecntl = 0x2; /* ETHEREN */
+
+	/* Whack a reset.  We should wait for this.
+	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+	 * instead of reset MAC itself.
+	 */
+	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+		writel(0, fep->hwp + FEC_ECNTRL);
+	} else {
+		writel(1, fep->hwp + FEC_ECNTRL);
+		udelay(10);
+	}
+
+	/*
+	 * enet-mac reset will reset mac address registers too,
+	 * so need to reconfigure it.
+	 */
+	memcpy(&temp_mac, ndev->dev_addr, ETH_ALEN);
+	writel((__force u32)cpu_to_be32(temp_mac[0]),
+	       fep->hwp + FEC_ADDR_LOW);
+	writel((__force u32)cpu_to_be32(temp_mac[1]),
+	       fep->hwp + FEC_ADDR_HIGH);
+
+	/* Clear any outstanding interrupt, except MDIO. */
+	writel((0xffffffff & ~FEC_ENET_MII), fep->hwp + FEC_IEVENT);
+
+	fec_enet_bd_init(ndev);
+
+	fec_enet_enable_ring(ndev);
+
+	/* Reset tx SKB buffers. */
+	fec_enet_reset_skb(ndev);
+
+	/* Enable MII mode */
+	if (fep->full_duplex == DUPLEX_FULL) {
+		/* FD enable */
+		writel(0x04, fep->hwp + FEC_X_CNTRL);
+	} else {
+		/* No Rcv on Xmit */
+		rcntl |= 0x02;
+		writel(0x0, fep->hwp + FEC_X_CNTRL);
+	}
+
+	/* Set MII speed */
+	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+#if !defined(CONFIG_M5272)
+	if (fep->quirks & FEC_QUIRK_HAS_RACC) {
+		val = readl(fep->hwp + FEC_RACC);
+		/* align IP header */
+		val |= FEC_RACC_SHIFT16;
+		if (fep->csum_flags & FLAG_RX_CSUM_ENABLED)
+			/* set RX checksum */
+			val |= FEC_RACC_OPTIONS;
+		else
+			val &= ~FEC_RACC_OPTIONS;
+		writel(val, fep->hwp + FEC_RACC);
+		writel(PKT_MAXBUF_SIZE, fep->hwp + FEC_FTRL);
+	}
+#endif
+
+	/*
+	 * The phy interface and speed need to get configured
+	 * differently on enet-mac.
+	 */
+	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+		/* Enable flow control and length check */
+		rcntl |= 0x40000000 | 0x00000020;
+
+		/* RGMII, RMII or MII */
+		if (fep->phy_interface == PHY_INTERFACE_MODE_RGMII ||
+		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_ID ||
+		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_RXID ||
+		    fep->phy_interface == PHY_INTERFACE_MODE_RGMII_TXID)
+			rcntl |= (1 << 6);
+		else if (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+			rcntl |= (1 << 8);
+		else
+			rcntl &= ~(1 << 8);
+
+		/* 1G, 100M or 10M */
+		if (ndev->phydev) {
+			if (ndev->phydev->speed == SPEED_1000)
+				ecntl |= (1 << 5);
+			else if (ndev->phydev->speed == SPEED_100)
+				rcntl &= ~(1 << 9);
+			else
+				rcntl |= (1 << 9);
+		}
+	} else {
+#ifdef FEC_MIIGSK_ENR
+		if (fep->quirks & FEC_QUIRK_USE_GASKET) {
+			u32 cfgr;
+			/* disable the gasket and wait */
+			writel(0, fep->hwp + FEC_MIIGSK_ENR);
+			while (readl(fep->hwp + FEC_MIIGSK_ENR) & 4)
+				udelay(1);
+
+			/*
+			 * configure the gasket:
+			 *   RMII, 50 MHz, no loopback, no echo
+			 *   MII, 25 MHz, no loopback, no echo
+			 */
+			cfgr = (fep->phy_interface == PHY_INTERFACE_MODE_RMII)
+				? BM_MIIGSK_CFGR_RMII : BM_MIIGSK_CFGR_MII;
+			if (ndev->phydev && ndev->phydev->speed == SPEED_10)
+				cfgr |= BM_MIIGSK_CFGR_FRCONT_10M;
+			writel(cfgr, fep->hwp + FEC_MIIGSK_CFGR);
+
+			/* re-enable the gasket */
+			writel(2, fep->hwp + FEC_MIIGSK_ENR);
+		}
+#endif
+	}
+
+#if !defined(CONFIG_M5272)
+	/* enable pause frame*/
+	if ((fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) ||
+	    ((fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) &&
+	     ndev->phydev && ndev->phydev->pause)) {
+		rcntl |= FEC_ENET_FCE;
+
+		/* set FIFO threshold parameter to reduce overrun */
+		writel(FEC_ENET_RSEM_V, fep->hwp + FEC_R_FIFO_RSEM);
+		writel(FEC_ENET_RSFL_V, fep->hwp + FEC_R_FIFO_RSFL);
+		writel(FEC_ENET_RAEM_V, fep->hwp + FEC_R_FIFO_RAEM);
+		writel(FEC_ENET_RAFL_V, fep->hwp + FEC_R_FIFO_RAFL);
+
+		/* OPD */
+		writel(FEC_ENET_OPD_V, fep->hwp + FEC_OPD);
+	} else {
+		rcntl &= ~FEC_ENET_FCE;
+	}
+#endif /* !defined(CONFIG_M5272) */
+
+	writel(rcntl, fep->hwp + FEC_R_CNTRL);
+
+	/* Setup multicast filter. */
+	set_multicast_list(ndev);
+#ifndef CONFIG_M5272
+	writel(0, fep->hwp + FEC_HASH_TABLE_HIGH);
+	writel(0, fep->hwp + FEC_HASH_TABLE_LOW);
+#endif
+
+	if (fep->quirks & FEC_QUIRK_ENET_MAC) {
+		/* enable ENET endian swap */
+		ecntl |= (1 << 8);
+		/* enable ENET store and forward mode */
+		writel(1 << 8, fep->hwp + FEC_X_WMRK);
+	}
+
+	if (fep->bufdesc_ex)
+		ecntl |= (1 << 4);
+
+#ifndef CONFIG_M5272
+	/* Enable the MIB statistic event counters */
+	writel(0 << 31, fep->hwp + FEC_MIB_CTRLSTAT);
+#endif
+
+	/* And last, enable the transmit and receive processing */
+	writel(ecntl, fep->hwp + FEC_ECNTRL);
+	fec_enet_active_rxring(ndev);
+
+	if (fep->bufdesc_ex)
+		fec_ptp_start_cyclecounter(ndev);
+
+	/* Enable interrupts we wish to service */
+	if (fep->link)
+		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+	else
+		writel(0, fep->hwp + FEC_IMASK);
+
+	/* Init the interrupt coalescing */
+	fec_enet_itr_coal_init(ndev);
+
+}
+
+static void fec_enet_stop_mode(struct fec_enet_private *fep, bool enabled)
+{
+	struct fec_platform_data *pdata = fep->pdev->dev.platform_data;
+	struct fec_stop_mode_gpr *stop_gpr = &fep->stop_gpr;
+
+	if (stop_gpr->gpr) {
+		if (enabled)
+			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+					   BIT(stop_gpr->bit),
+					   BIT(stop_gpr->bit));
+		else
+			regmap_update_bits(stop_gpr->gpr, stop_gpr->reg,
+					   BIT(stop_gpr->bit), 0);
+	} else if (pdata && pdata->sleep_mode_enable) {
+		pdata->sleep_mode_enable(enabled);
+	}
+}
+
+static void
+fec_stop(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	u32 rmii_mode = readl(fep->hwp + FEC_R_CNTRL) & (1 << 8);
+	u32 val;
+
+	/* We cannot expect a graceful transmit stop without link !!! */
+	if (fep->link) {
+		writel(1, fep->hwp + FEC_X_CNTRL); /* Graceful transmit stop */
+		udelay(10);
+		if (!(readl(fep->hwp + FEC_IEVENT) & FEC_ENET_GRA))
+			netdev_err(ndev, "Graceful transmit stop did not complete!\n");
+	}
+
+	/* Whack a reset.  We should wait for this.
+	 * For i.MX6SX SOC, enet use AXI bus, we use disable MAC
+	 * instead of reset MAC itself.
+	 */
+	if (!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+		if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+			writel(0, fep->hwp + FEC_ECNTRL);
+		} else {
+			writel(1, fep->hwp + FEC_ECNTRL);
+			udelay(10);
+		}
+		writel(FEC_DEFAULT_IMASK, fep->hwp + FEC_IMASK);
+	} else {
+		writel(FEC_DEFAULT_IMASK | FEC_ENET_WAKEUP, fep->hwp + FEC_IMASK);
+		val = readl(fep->hwp + FEC_ECNTRL);
+		val |= (FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+		writel(val, fep->hwp + FEC_ECNTRL);
+		fec_enet_stop_mode(fep, true);
+	}
+	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+	/* We have to keep ENET enabled to have MII interrupt stay working */
+	if (fep->quirks & FEC_QUIRK_ENET_MAC &&
+		!(fep->wol_flag & FEC_WOL_FLAG_SLEEP_ON)) {
+		writel(2, fep->hwp + FEC_ECNTRL);
+		writel(rmii_mode, fep->hwp + FEC_R_CNTRL);
+	}
+}
+
+static void
+#if LINUX_VERSION_CODE > KERNEL_VERSION(5,6,0)
+fec_timeout(struct net_device *ndev, unsigned int txqueue)
+#else
+fec_timeout(struct net_device *ndev)
+#endif
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	fec_dump(ndev);
+
+	ndev->stats.tx_errors++;
+
+	schedule_work(&fep->tx_timeout_work);
+}
+
+static void fec_enet_timeout_work(struct work_struct *work)
+{
+	struct fec_enet_private *fep =
+		container_of(work, struct fec_enet_private, tx_timeout_work);
+	struct net_device *ndev = fep->netdev;
+	struct fec_rt_data *frt = &fep->rtnet;
+
+	rtnl_lock();
+	if (netif_device_present(ndev) || rtnetif_running(&frt->dev)) {
+		rtnetif_stop_queue(&frt->dev);
+		fec_restart(ndev);
+		rtnetif_wake_queue(&frt->dev);
+	}
+	rtnl_unlock();
+}
+
+static void
+fec_rt_tx_queue(struct net_device *ndev, u16 queue_id)
+{
+	struct	fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	struct bufdesc *bdp;
+	unsigned short status;
+	struct	rtskb	*skb;
+	struct fec_enet_priv_tx_q *txq;
+	int	index;
+
+	txq = fep->tx_queue[queue_id];
+
+	rtdm_lock_get(&frt->lock);
+
+	/* get next bdp of dirty_tx */
+	bdp = txq->dirty_tx;
+
+	/* get next bdp of dirty_tx */
+	bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+
+	while (bdp != READ_ONCE(txq->bd.cur)) {
+		/* Order the load of bd.cur and cbd_sc */
+		rmb();
+		status = fec16_to_cpu(READ_ONCE(bdp->cbd_sc));
+		if (status & BD_ENET_TX_READY)
+			break;
+
+		index = fec_enet_get_bd_index(bdp, &txq->bd);
+
+		skb = txq->tx_rtbuff[index];
+		txq->tx_rtbuff[index] = NULL;
+		dma_unmap_single(&fep->pdev->dev,
+					 fec32_to_cpu(bdp->cbd_bufaddr),
+					 fec16_to_cpu(bdp->cbd_datlen),
+					 DMA_TO_DEVICE);
+		bdp->cbd_bufaddr = cpu_to_fec32(0);
+		if (!skb)
+			goto skb_done;
+
+		/* Check for errors. */
+		if (status & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+				   BD_ENET_TX_RL | BD_ENET_TX_UN |
+				   BD_ENET_TX_CSL)) {
+			ndev->stats.tx_errors++;
+			if (status & BD_ENET_TX_HB)  /* No heartbeat */
+				ndev->stats.tx_heartbeat_errors++;
+			if (status & BD_ENET_TX_LC)  /* Late collision */
+				ndev->stats.tx_window_errors++;
+			if (status & BD_ENET_TX_RL)  /* Retrans limit */
+				ndev->stats.tx_aborted_errors++;
+			if (status & BD_ENET_TX_UN)  /* Underrun */
+				ndev->stats.tx_fifo_errors++;
+			if (status & BD_ENET_TX_CSL) /* Carrier lost */
+				ndev->stats.tx_carrier_errors++;
+		} else {
+			ndev->stats.tx_packets++;
+			ndev->stats.tx_bytes += skb->len;
+		}
+
+		/* Deferred means some collisions occurred during transmit,
+		 * but we eventually sent the packet OK.
+		 */
+		if (status & BD_ENET_TX_DEF)
+			ndev->stats.collisions++;
+
+		dev_kfree_rtskb(skb);
+skb_done:
+		/* Make sure the update to bdp and tx_rtbuff are performed
+		 * before dirty_tx
+		 */
+		wmb();
+		txq->dirty_tx = bdp;
+
+		/* Update pointer to next buffer descriptor to be transmitted */
+		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+
+		/* Since we have freed up a buffer, the ring is no longer full
+		 */
+		if (rtnetif_queue_stopped(&frt->dev))
+			rtnetif_wake_queue(&frt->dev);
+	}
+
+	/* ERR006358: Keep the transmitter going */
+	if (bdp != txq->bd.cur &&
+	    readl(txq->bd.reg_desc_active) == 0)
+		writel(0, txq->bd.reg_desc_active);
+
+	rtdm_lock_put(&frt->lock);
+}
+
+static void fec_enet_tx(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int i;
+
+	/* Make sure that AVB queues are processed first. */
+	for (i = fep->num_tx_queues - 1; i >= 0; i--)
+		fec_rt_tx_queue(ndev, i);
+}
+
+static int
+fec_rt_new_rxbdp(struct net_device *ndev, struct bufdesc *bdp, struct rtskb *skb)
+{
+	struct  fec_enet_private *fep = netdev_priv(ndev);
+	int off;
+
+	off = ((unsigned long)skb->data) & fep->rx_align;
+	if (off)
+		rtskb_reserve(skb, fep->rx_align + 1 - off);
+
+	bdp->cbd_bufaddr = cpu_to_fec32(dma_map_single(&fep->pdev->dev, skb->data, RTSKB_SIZE - fep->rx_align, DMA_FROM_DEVICE));
+	if (dma_mapping_error(&fep->pdev->dev, fec32_to_cpu(bdp->cbd_bufaddr))) {
+		rtdm_printk_ratelimited("%s: Rx DMA memory map failed\n",
+					dev_name(&fep->pdev->dev));
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+
+static int
+fec_rt_rx_queue(struct net_device *ndev, int budget, u16 queue_id)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	struct fec_enet_priv_rx_q *rxq;
+	struct bufdesc *bdp;
+	unsigned short status;
+	struct  rtskb *skb_new, *skb;
+	ushort	pkt_len;
+	__u8 *data;
+	int	pkt_received = 0;
+	struct	bufdesc_ex *ebdp = NULL;
+	int	index;
+	bool	need_swap = fep->quirks & FEC_QUIRK_SWAP_FRAME;
+
+#ifdef CONFIG_M532x
+	flush_cache_all();
+#endif
+	rxq = fep->rx_queue[queue_id];
+
+	rtdm_lock_get(&frt->lock);
+
+	/* First, grab all of the stats for the incoming packet.
+	 * These get messed up if we get called due to a busy condition.
+	 */
+	bdp = rxq->bd.cur;
+
+	while (!((status = fec16_to_cpu(bdp->cbd_sc)) & BD_ENET_RX_EMPTY)) {
+
+		if (pkt_received >= budget)
+			break;
+		pkt_received++;
+
+		writel(FEC_ENET_RXF, fep->hwp + FEC_IEVENT);
+
+		/* Check for errors. */
+		status ^= BD_ENET_RX_LAST;
+		if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+			   BD_ENET_RX_CR | BD_ENET_RX_OV | BD_ENET_RX_LAST |
+			   BD_ENET_RX_CL)) {
+			ndev->stats.rx_errors++;
+			if (status & BD_ENET_RX_OV) {
+				/* FIFO overrun */
+				ndev->stats.rx_fifo_errors++;
+				goto rx_processing_done;
+			}
+			if (status & (BD_ENET_RX_LG | BD_ENET_RX_SH
+						| BD_ENET_RX_LAST)) {
+				/* Frame too long or too short. */
+				ndev->stats.rx_length_errors++;
+				if (status & BD_ENET_RX_LAST)
+					netdev_err(ndev, "rcv is not +last\n");
+			}
+			if (status & BD_ENET_RX_CR)	/* CRC Error */
+				ndev->stats.rx_crc_errors++;
+			/* Report late collisions as a frame error. */
+			if (status & (BD_ENET_RX_NO | BD_ENET_RX_CL))
+				ndev->stats.rx_frame_errors++;
+			goto rx_processing_done;
+		}
+
+		/* Process the incoming frame. */
+		ndev->stats.rx_packets++;
+		pkt_len = fec16_to_cpu(bdp->cbd_datlen);
+		ndev->stats.rx_bytes += pkt_len;
+
+		index = fec_enet_get_bd_index(bdp, &rxq->bd);
+		skb = rxq->rx_rtbuff[index];
+		if (skb == NULL)
+			goto rx_processing_done;
+
+		dma_unmap_single(&fep->pdev->dev,
+					 fec32_to_cpu(bdp->cbd_bufaddr),
+					 RTSKB_SIZE - fep->rx_align,
+					 DMA_FROM_DEVICE);
+
+		prefetch(skb->data - NET_IP_ALIGN);
+		rtskb_put(skb, pkt_len - 4);
+		data = skb->data;
+
+		if (need_swap)
+			swap_buffer(data, pkt_len);
+
+#if !defined(CONFIG_M5272)
+		if (fep->quirks & FEC_QUIRK_HAS_RACC)
+			data = rtskb_pull(skb, 2);
+#endif
+
+		skb->protocol = rt_eth_type_trans(skb, &frt->dev);
+
+		/* Extract the enhanced buffer descriptor */
+		if (fep->bufdesc_ex) {
+			ebdp = (struct bufdesc_ex *)bdp;
+			if (fep->csum_flags & FLAG_RX_CSUM_ENABLED) {
+				if (!(ebdp->cbd_esc & cpu_to_fec32(FLAG_RX_CSUM_ERROR)))
+					skb->ip_summed = CHECKSUM_UNNECESSARY;
+				else
+					WARN_ON_ONCE(skb->ip_summed != CHECKSUM_NONE);
+			}
+		}
+
+		skb_new = rtnetdev_alloc_rtskb(&frt->dev, RTSKB_SIZE);
+		if (unlikely(skb_new == NULL))
+			ndev->stats.rx_dropped++;
+		else {
+			rtnetif_rx(skb);
+			rxq->rx_rtbuff[index] = skb_new;
+			fec_rt_new_rxbdp(ndev, bdp, skb_new);
+		}
+
+rx_processing_done:
+		/* Clear the status flags for this buffer */
+		status &= ~BD_ENET_RX_STATS;
+
+		/* Mark the buffer empty */
+		status |= BD_ENET_RX_EMPTY;
+
+		if (fep->bufdesc_ex) {
+			ebdp = (struct bufdesc_ex *)bdp;
+			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+			ebdp->cbd_prot = 0;
+			ebdp->cbd_bdu = 0;
+		}
+		/* Make sure the updates to rest of the descriptor are
+		 * performed before transferring ownership.
+		 */
+		wmb();
+		bdp->cbd_sc = cpu_to_fec16(status);
+
+		/* Update BD pointer to next entry */
+		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+
+		/* Doing this here will keep the FEC running while we process
+		 * incoming frames.  On a heavily loaded network, we should be
+		 * able to keep up at the expense of system resources.
+		 */
+		writel(0, rxq->bd.reg_desc_active);
+	}
+	rxq->bd.cur = bdp;
+
+	rtdm_lock_put(&frt->lock);
+
+	if (pkt_received)
+		rt_mark_stack_mgr(&frt->dev);
+
+	return pkt_received;
+}
+
+static int fec_enet_rx(struct net_device *ndev, int budget)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int i, done = 0;
+
+	/* Make sure that AVB queues are processed first. */
+	for (i = fep->num_rx_queues - 1; i >= 0; i--)
+		done += fec_rt_rx_queue(ndev, budget - done, i);
+
+	return done;
+}
+
+static bool fec_enet_collect_events(struct fec_enet_private *fep)
+{
+	uint int_events;
+
+	int_events = readl(fep->hwp + FEC_IEVENT);
+
+	/* Don't clear MDIO events, we poll for those */
+	int_events &= ~FEC_ENET_MII;
+
+	writel(int_events, fep->hwp + FEC_IEVENT);
+
+	return int_events != 0;
+}
+
+static int
+fec_rt_interrupt(rtdm_irq_t *irqh)
+{
+	struct net_device *ndev = rtdm_irq_get_arg(irqh, struct net_device);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	irqreturn_t ret = RTDM_IRQ_NONE;
+	uint int_events = fec_enet_collect_events(fep);
+
+	if (int_events && fep->link) {
+		/* Disable interrupts */
+			//writel(0, fep->hwp + FEC_IMASK);
+		if (int_events && FEC_ENET_RXF)
+			fec_enet_rx(ndev, RX_RING_SIZE);
+		if (int_events && FEC_ENET_TXF)
+			fec_enet_tx(ndev);
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (int_events & FEC_ENET_MII) {
+		rtdm_nrtsig_pend(&fep->rtnet.mdio_sig);
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	return ret;
+}
+
+/* ------------------------------------------------------------------------- */
+static void fec_get_mac(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_platform_data *pdata = dev_get_platdata(&fep->pdev->dev);
+	unsigned char *iap, tmpaddr[ETH_ALEN];
+
+	/*
+	 * try to get mac address in following order:
+	 *
+	 * 1) module parameter via kernel command line in form
+	 *    fec.macaddr=0x00,0x04,0x9f,0x01,0x30,0xe0
+	 */
+	iap = macaddr;
+
+	/*
+	 * 2) from device tree data
+	 */
+	if (!is_valid_ether_addr(iap)) {
+		struct device_node *np = fep->pdev->dev.of_node;
+		if (np) {
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
+			int err = of_get_mac_address(np, tmpaddr);
+			if (!err)
+				iap = tmpaddr;
+#else
+			const char *mac = of_get_mac_address(np);
+			if (!IS_ERR(mac))
+				iap = (unsigned char *) mac;
+#endif
+		}
+	}
+
+	/*
+	 * 3) from flash or fuse (via platform data)
+	 */
+	if (!is_valid_ether_addr(iap)) {
+#ifdef CONFIG_M5272
+		if (FEC_FLASHMAC)
+			iap = (unsigned char *)FEC_FLASHMAC;
+#else
+		if (pdata)
+			iap = (unsigned char *)&pdata->mac;
+#endif
+	}
+
+	/*
+	 * 4) FEC mac registers set by bootloader
+	 */
+	if (!is_valid_ether_addr(iap)) {
+		*((__be32 *) &tmpaddr[0]) =
+			cpu_to_be32(readl(fep->hwp + FEC_ADDR_LOW));
+		*((__be16 *) &tmpaddr[4]) =
+			cpu_to_be16(readl(fep->hwp + FEC_ADDR_HIGH) >> 16);
+		iap = &tmpaddr[0];
+	}
+
+	/*
+	 * 5) random mac address
+	 */
+	if (!is_valid_ether_addr(iap)) {
+		/* Report it and use a random ethernet address instead */
+		dev_err(&fep->pdev->dev, "Invalid MAC address: %pM\n", iap);
+		eth_hw_addr_random(ndev);
+		dev_info(&fep->pdev->dev, "Using random MAC address: %pM\n",
+			 ndev->dev_addr);
+		return;
+	}
+
+	/* Adjust MAC if using macaddr */
+	if (iap == macaddr) {
+		memcpy(tmpaddr, macaddr, ETH_ALEN);
+		tmpaddr[ETH_ALEN-1] += fep->dev_id;
+		eth_hw_addr_set(ndev, tmpaddr);
+	} else {
+		eth_hw_addr_set(ndev, iap);
+	}
+}
+
+/* ------------------------------------------------------------------------- */
+
+/*
+ * Phy section
+ */
+static void do_adjust_link(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	struct phy_device *phy_dev = ndev->phydev;
+	int status_change = 0;
+
+	/*
+	 * If the netdev is down, or is going down, we're not interested
+	 * in link state events, so just mark our idea of the link as down
+	 * and ignore the event.
+	 */
+	if (!rtnetif_running(&frt->dev) || !netif_device_present(ndev)) {
+		fep->link = 0;
+	} else if (phy_dev->link) {
+		if (!fep->link) {
+			fep->link = phy_dev->link;
+			status_change = 1;
+		}
+
+		if (fep->full_duplex != phy_dev->duplex) {
+			fep->full_duplex = phy_dev->duplex;
+			status_change = 1;
+		}
+
+		if (phy_dev->speed != fep->speed) {
+			fep->speed = phy_dev->speed;
+			status_change = 1;
+		}
+
+		/* if any of the above changed restart the FEC */
+		if (status_change) {
+			rtnetif_stop_queue(&frt->dev);
+			fec_restart(ndev);
+			rtnetif_wake_queue(&frt->dev);
+		}
+	} else {
+		if (fep->link) {
+			rtnetif_stop_queue(&frt->dev);
+			fec_stop(ndev);
+			rtnetif_wake_queue(&frt->dev);
+			fep->link = phy_dev->link;
+			status_change = 1;
+		}
+	}
+
+	if (status_change)
+		phy_print_status(phy_dev);
+}
+
+static void fec_enet_adjust_link(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	do_adjust_link(ndev);
+
+	/*
+	 * PHYLIB sets netif_carrier_on() when the link is up,
+	 * propagate state change to RTnet.
+	 */
+	if (netif_carrier_ok(ndev)) {
+		netdev_info(ndev, "carrier detected\n");
+		rtnetif_carrier_on(&fep->rtnet.dev);
+	} else {
+		netdev_info(ndev, "carrier lost\n");
+		rtnetif_carrier_off(&fep->rtnet.dev);
+	}
+}
+
+static int fec_enet_mdio_wait(struct fec_enet_private *fep)
+{
+	uint ievent;
+	int ret;
+
+	ret = readl_poll_timeout_atomic(fep->hwp + FEC_IEVENT, ievent,
+					ievent & FEC_ENET_MII, 2, 30000);
+
+	if (!ret)
+		writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+	return ret;
+}
+
+static int fec_enet_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+	struct fec_enet_private *fep = bus->priv;
+	struct device *dev = &fep->pdev->dev;
+	int ret = 0, frame_start, frame_addr, frame_op;
+	bool is_c45 = !!(regnum & MII_ADDR_C45);
+
+	ret = pm_runtime_resume_and_get(dev);
+	if (ret < 0)
+		return ret;
+
+	if (is_c45) {
+		frame_start = FEC_MMFR_ST_C45;
+
+		/* write address */
+		frame_addr = (regnum >> 16);
+		writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+		       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+		       FEC_MMFR_TA | (regnum & 0xFFFF),
+		       fep->hwp + FEC_MII_DATA);
+
+		/* wait for end of transfer */
+		ret = fec_enet_mdio_wait(fep);
+		if (ret) {
+			netdev_err(fep->netdev, "MDIO address write timeout\n");
+			goto out;
+		}
+
+		frame_op = FEC_MMFR_OP_READ_C45;
+
+	} else {
+		/* C22 read */
+		frame_op = FEC_MMFR_OP_READ;
+		frame_start = FEC_MMFR_ST;
+		frame_addr = regnum;
+	}
+
+	/* start a read op */
+	writel(frame_start | frame_op |
+		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+		FEC_MMFR_TA, fep->hwp + FEC_MII_DATA);
+
+	/* wait for end of transfer */
+	ret = fec_enet_mdio_wait(fep);
+	if (ret) {
+		netdev_err(fep->netdev, "MDIO read timeout\n");
+		goto out;
+	}
+
+	ret = FEC_MMFR_DATA(readl(fep->hwp + FEC_MII_DATA));
+
+out:
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+
+	return ret;
+}
+
+static int fec_enet_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+			   u16 value)
+{
+	struct fec_enet_private *fep = bus->priv;
+	struct device *dev = &fep->pdev->dev;
+	int ret, frame_start, frame_addr;
+	bool is_c45 = !!(regnum & MII_ADDR_C45);
+
+	ret = pm_runtime_resume_and_get(dev);
+	if (ret < 0)
+		return ret;
+
+	if (is_c45) {
+		frame_start = FEC_MMFR_ST_C45;
+
+		/* write address */
+		frame_addr = (regnum >> 16);
+		writel(frame_start | FEC_MMFR_OP_ADDR_WRITE |
+		       FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+		       FEC_MMFR_TA | (regnum & 0xFFFF),
+		       fep->hwp + FEC_MII_DATA);
+
+		/* wait for end of transfer */
+		ret = fec_enet_mdio_wait(fep);
+		if (ret) {
+			netdev_err(fep->netdev, "MDIO address write timeout\n");
+			goto out;
+		}
+	} else {
+		/* C22 write */
+		frame_start = FEC_MMFR_ST;
+		frame_addr = regnum;
+	}
+
+	/* start a write op */
+	writel(frame_start | FEC_MMFR_OP_WRITE |
+		FEC_MMFR_PA(mii_id) | FEC_MMFR_RA(frame_addr) |
+		FEC_MMFR_TA | FEC_MMFR_DATA(value),
+		fep->hwp + FEC_MII_DATA);
+
+	/* wait for end of transfer */
+	ret = fec_enet_mdio_wait(fep);
+	if (ret)
+		netdev_err(fep->netdev, "MDIO write timeout\n");
+
+out:
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+
+	return ret;
+}
+
+static void fec_enet_phy_reset_after_clk_enable(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct phy_device *phy_dev = ndev->phydev;
+
+	if (phy_dev) {
+		phy_reset_after_clk_enable(phy_dev);
+	} else if (fep->phy_node) {
+		/*
+		 * If the PHY still is not bound to the MAC, but there is
+		 * OF PHY node and a matching PHY device instance already,
+		 * use the OF PHY node to obtain the PHY device instance,
+		 * and then use that PHY device instance when triggering
+		 * the PHY reset.
+		 */
+		phy_dev = of_phy_find_device(fep->phy_node);
+		phy_reset_after_clk_enable(phy_dev);
+		put_device(&phy_dev->mdio.dev);
+	}
+}
+
+static int fec_enet_clk_enable(struct net_device *ndev, bool enable)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int ret;
+
+	if (enable) {
+		ret = clk_prepare_enable(fep->clk_enet_out);
+		if (ret)
+			return ret;
+
+		if (fep->clk_ptp) {
+			mutex_lock(&fep->ptp_clk_mutex);
+			ret = clk_prepare_enable(fep->clk_ptp);
+			if (ret) {
+				mutex_unlock(&fep->ptp_clk_mutex);
+				goto failed_clk_ptp;
+			} else {
+				fep->ptp_clk_on = true;
+			}
+			mutex_unlock(&fep->ptp_clk_mutex);
+		}
+
+		ret = clk_prepare_enable(fep->clk_ref);
+		if (ret)
+			goto failed_clk_ref;
+
+		fec_enet_phy_reset_after_clk_enable(ndev);
+	} else {
+		clk_disable_unprepare(fep->clk_enet_out);
+		if (fep->clk_ptp) {
+			mutex_lock(&fep->ptp_clk_mutex);
+			clk_disable_unprepare(fep->clk_ptp);
+			fep->ptp_clk_on = false;
+			mutex_unlock(&fep->ptp_clk_mutex);
+		}
+		clk_disable_unprepare(fep->clk_ref);
+	}
+
+	return 0;
+
+failed_clk_ref:
+	if (fep->clk_ptp) {
+		mutex_lock(&fep->ptp_clk_mutex);
+		clk_disable_unprepare(fep->clk_ptp);
+		fep->ptp_clk_on = false;
+		mutex_unlock(&fep->ptp_clk_mutex);
+	}
+failed_clk_ptp:
+	clk_disable_unprepare(fep->clk_enet_out);
+
+	return ret;
+}
+
+static int fec_enet_mii_probe(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct phy_device *phy_dev = NULL;
+	char mdio_bus_id[MII_BUS_ID_SIZE];
+	char phy_name[MII_BUS_ID_SIZE + 3];
+	int phy_id;
+	int dev_id = fep->dev_id;
+
+	if (fep->phy_node) {
+		phy_dev = of_phy_connect(ndev, fep->phy_node,
+					 &fec_enet_adjust_link, 0,
+					 fep->phy_interface);
+		if (!phy_dev) {
+			netdev_err(ndev, "Unable to connect to phy\n");
+			return -ENODEV;
+		}
+	} else {
+		/* check for attached phy */
+		for (phy_id = 0; (phy_id < PHY_MAX_ADDR); phy_id++) {
+			if (!mdiobus_is_registered_device(fep->mii_bus, phy_id))
+				continue;
+			if (dev_id--)
+				continue;
+			strlcpy(mdio_bus_id, fep->mii_bus->id, MII_BUS_ID_SIZE);
+			break;
+		}
+
+		if (phy_id >= PHY_MAX_ADDR) {
+			netdev_info(ndev, "no PHY, assuming direct connection to switch\n");
+			strlcpy(mdio_bus_id, "fixed-0", MII_BUS_ID_SIZE);
+			phy_id = 0;
+		}
+
+		snprintf(phy_name, sizeof(phy_name),
+			 PHY_ID_FMT, mdio_bus_id, phy_id);
+		phy_dev = phy_connect(ndev, phy_name, &fec_enet_adjust_link,
+				      fep->phy_interface);
+	}
+
+	if (IS_ERR(phy_dev)) {
+		netdev_err(ndev, "could not attach to PHY\n");
+		return PTR_ERR(phy_dev);
+	}
+
+	/* mask with MAC supported features */
+	if (fep->quirks & FEC_QUIRK_HAS_GBIT) {
+		phy_set_max_speed(phy_dev, 1000);
+		phy_remove_link_mode(phy_dev,
+				     ETHTOOL_LINK_MODE_1000baseT_Half_BIT);
+#if !defined(CONFIG_M5272)
+		phy_support_sym_pause(phy_dev);
+#endif
+	}
+	else
+		phy_set_max_speed(phy_dev, 100);
+
+	fep->link = 0;
+	fep->full_duplex = 0;
+
+	phy_attached_info(phy_dev);
+
+	return 0;
+}
+
+static int fec_enet_mii_init(struct platform_device *pdev)
+{
+	static struct mii_bus *fec0_mii_bus;
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	bool suppress_preamble = false;
+	struct device_node *node;
+	int err = -ENXIO;
+	u32 mii_speed, holdtime;
+	u32 bus_freq;
+
+	/*
+	 * The i.MX28 dual fec interfaces are not equal.
+	 * Here are the differences:
+	 *
+	 *  - fec0 supports MII & RMII modes while fec1 only supports RMII
+	 *  - fec0 acts as the 1588 time master while fec1 is slave
+	 *  - external phys can only be configured by fec0
+	 *
+	 * That is to say fec1 can not work independently. It only works
+	 * when fec0 is working. The reason behind this design is that the
+	 * second interface is added primarily for Switch mode.
+	 *
+	 * Because of the last point above, both phys are attached on fec0
+	 * mdio interface in board design, and need to be configured by
+	 * fec0 mii_bus.
+	 */
+	if ((fep->quirks & FEC_QUIRK_SINGLE_MDIO) && fep->dev_id > 0) {
+		/* fec1 uses fec0 mii_bus */
+		if (mii_cnt && fec0_mii_bus) {
+			fep->mii_bus = fec0_mii_bus;
+			mii_cnt++;
+			return 0;
+		}
+		return -ENOENT;
+	}
+
+	bus_freq = 2500000; /* 2.5MHz by default */
+	node = of_get_child_by_name(pdev->dev.of_node, "mdio");
+	if (node) {
+		of_property_read_u32(node, "clock-frequency", &bus_freq);
+		suppress_preamble = of_property_read_bool(node,
+							  "suppress-preamble");
+	}
+
+	/*
+	 * Set MII speed (= clk_get_rate() / 2 * phy_speed)
+	 *
+	 * The formula for FEC MDC is 'ref_freq / (MII_SPEED x 2)' while
+	 * for ENET-MAC is 'ref_freq / ((MII_SPEED + 1) x 2)'.  The i.MX28
+	 * Reference Manual has an error on this, and gets fixed on i.MX6Q
+	 * document.
+	 */
+	mii_speed = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), bus_freq * 2);
+	if (fep->quirks & FEC_QUIRK_ENET_MAC)
+		mii_speed--;
+	if (mii_speed > 63) {
+		dev_err(&pdev->dev,
+			"fec clock (%lu) too fast to get right mii speed\n",
+			clk_get_rate(fep->clk_ipg));
+		err = -EINVAL;
+		goto err_out;
+	}
+
+	/*
+	 * The i.MX28 and i.MX6 types have another filed in the MSCR (aka
+	 * MII_SPEED) register that defines the MDIO output hold time. Earlier
+	 * versions are RAZ there, so just ignore the difference and write the
+	 * register always.
+	 * The minimal hold time according to IEE802.3 (clause 22) is 10 ns.
+	 * HOLDTIME + 1 is the number of clk cycles the fec is holding the
+	 * output.
+	 * The HOLDTIME bitfield takes values between 0 and 7 (inclusive).
+	 * Given that ceil(clkrate / 5000000) <= 64, the calculation for
+	 * holdtime cannot result in a value greater than 3.
+	 */
+	holdtime = DIV_ROUND_UP(clk_get_rate(fep->clk_ipg), 100000000) - 1;
+
+	fep->phy_speed = mii_speed << 1 | holdtime << 8;
+
+	if (suppress_preamble)
+		fep->phy_speed |= BIT(7);
+
+	if (fep->quirks & FEC_QUIRK_CLEAR_SETUP_MII) {
+		/* Clear MMFR to avoid to generate MII event by writing MSCR.
+		 * MII event generation condition:
+		 * - writing MSCR:
+		 *	- mmfr[31:0]_not_zero & mscr[7:0]_is_zero &
+		 *	  mscr_reg_data_in[7:0] != 0
+		 * - writing MMFR:
+		 *	- mscr[7:0]_not_zero
+		 */
+		writel(0, fep->hwp + FEC_MII_DATA);
+	}
+
+	writel(fep->phy_speed, fep->hwp + FEC_MII_SPEED);
+
+	/* Clear any pending transaction complete indication */
+	writel(FEC_ENET_MII, fep->hwp + FEC_IEVENT);
+
+	fep->mii_bus = mdiobus_alloc();
+	if (fep->mii_bus == NULL) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	fep->mii_bus->name = "fec_enet_mii_bus";
+	fep->mii_bus->read = fec_enet_mdio_read;
+	fep->mii_bus->write = fec_enet_mdio_write;
+	snprintf(fep->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+		pdev->name, fep->dev_id + 1);
+	fep->mii_bus->priv = fep;
+	fep->mii_bus->parent = &pdev->dev;
+
+	err = of_mdiobus_register(fep->mii_bus, node);
+	if (err)
+		goto err_out_free_mdiobus;
+	of_node_put(node);
+
+	mii_cnt++;
+
+	/* save fec0 mii_bus */
+	if (fep->quirks & FEC_QUIRK_SINGLE_MDIO)
+		fec0_mii_bus = fep->mii_bus;
+
+	return 0;
+
+err_out_free_mdiobus:
+	mdiobus_free(fep->mii_bus);
+err_out:
+	of_node_put(node);
+	return err;
+}
+
+static void fec_enet_mii_remove(struct fec_enet_private *fep)
+{
+	if (--mii_cnt == 0) {
+		mdiobus_unregister(fep->mii_bus);
+		mdiobus_free(fep->mii_bus);
+	}
+}
+
+static void fec_enet_get_drvinfo(struct net_device *ndev,
+				 struct ethtool_drvinfo *info)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	strlcpy(info->driver, fep->pdev->dev.driver->name,
+		sizeof(info->driver));
+	strlcpy(info->bus_info, dev_name(&ndev->dev), sizeof(info->bus_info));
+}
+
+static int fec_enet_get_regs_len(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct resource *r;
+	int s = 0;
+
+	r = platform_get_resource(fep->pdev, IORESOURCE_MEM, 0);
+	if (r)
+		s = resource_size(r);
+
+	return s;
+}
+
+/* List of registers that can be safety be read to dump them with ethtool */
+#if defined(CONFIG_M523x) || defined(CONFIG_M527x) || defined(CONFIG_M528x) || \
+	defined(CONFIG_M520x) || defined(CONFIG_M532x) || defined(CONFIG_ARM) || \
+	defined(CONFIG_ARM64) || defined(CONFIG_COMPILE_TEST)
+static __u32 fec_enet_register_version = 2;
+static u32 fec_enet_register_offset[] = {
+	FEC_IEVENT, FEC_IMASK, FEC_R_DES_ACTIVE_0, FEC_X_DES_ACTIVE_0,
+	FEC_ECNTRL, FEC_MII_DATA, FEC_MII_SPEED, FEC_MIB_CTRLSTAT, FEC_R_CNTRL,
+	FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH, FEC_OPD, FEC_TXIC0, FEC_TXIC1,
+	FEC_TXIC2, FEC_RXIC0, FEC_RXIC1, FEC_RXIC2, FEC_HASH_TABLE_HIGH,
+	FEC_HASH_TABLE_LOW, FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW,
+	FEC_X_WMRK, FEC_R_BOUND, FEC_R_FSTART, FEC_R_DES_START_1,
+	FEC_X_DES_START_1, FEC_R_BUFF_SIZE_1, FEC_R_DES_START_2,
+	FEC_X_DES_START_2, FEC_R_BUFF_SIZE_2, FEC_R_DES_START_0,
+	FEC_X_DES_START_0, FEC_R_BUFF_SIZE_0, FEC_R_FIFO_RSFL, FEC_R_FIFO_RSEM,
+	FEC_R_FIFO_RAEM, FEC_R_FIFO_RAFL, FEC_RACC, FEC_RCMR_1, FEC_RCMR_2,
+	FEC_DMA_CFG_1, FEC_DMA_CFG_2, FEC_R_DES_ACTIVE_1, FEC_X_DES_ACTIVE_1,
+	FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_2, FEC_QOS_SCHEME,
+	RMON_T_DROP, RMON_T_PACKETS, RMON_T_BC_PKT, RMON_T_MC_PKT,
+	RMON_T_CRC_ALIGN, RMON_T_UNDERSIZE, RMON_T_OVERSIZE, RMON_T_FRAG,
+	RMON_T_JAB, RMON_T_COL, RMON_T_P64, RMON_T_P65TO127, RMON_T_P128TO255,
+	RMON_T_P256TO511, RMON_T_P512TO1023, RMON_T_P1024TO2047,
+	RMON_T_P_GTE2048, RMON_T_OCTETS,
+	IEEE_T_DROP, IEEE_T_FRAME_OK, IEEE_T_1COL, IEEE_T_MCOL, IEEE_T_DEF,
+	IEEE_T_LCOL, IEEE_T_EXCOL, IEEE_T_MACERR, IEEE_T_CSERR, IEEE_T_SQE,
+	IEEE_T_FDXFC, IEEE_T_OCTETS_OK,
+	RMON_R_PACKETS, RMON_R_BC_PKT, RMON_R_MC_PKT, RMON_R_CRC_ALIGN,
+	RMON_R_UNDERSIZE, RMON_R_OVERSIZE, RMON_R_FRAG, RMON_R_JAB,
+	RMON_R_RESVD_O, RMON_R_P64, RMON_R_P65TO127, RMON_R_P128TO255,
+	RMON_R_P256TO511, RMON_R_P512TO1023, RMON_R_P1024TO2047,
+	RMON_R_P_GTE2048, RMON_R_OCTETS,
+	IEEE_R_DROP, IEEE_R_FRAME_OK, IEEE_R_CRC, IEEE_R_ALIGN, IEEE_R_MACERR,
+	IEEE_R_FDXFC, IEEE_R_OCTETS_OK
+};
+#else
+static __u32 fec_enet_register_version = 1;
+static u32 fec_enet_register_offset[] = {
+	FEC_ECNTRL, FEC_IEVENT, FEC_IMASK, FEC_IVEC, FEC_R_DES_ACTIVE_0,
+	FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2, FEC_X_DES_ACTIVE_0,
+	FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2, FEC_MII_DATA, FEC_MII_SPEED,
+	FEC_R_BOUND, FEC_R_FSTART, FEC_X_WMRK, FEC_X_FSTART, FEC_R_CNTRL,
+	FEC_MAX_FRM_LEN, FEC_X_CNTRL, FEC_ADDR_LOW, FEC_ADDR_HIGH,
+	FEC_GRP_HASH_TABLE_HIGH, FEC_GRP_HASH_TABLE_LOW, FEC_R_DES_START_0,
+	FEC_R_DES_START_1, FEC_R_DES_START_2, FEC_X_DES_START_0,
+	FEC_X_DES_START_1, FEC_X_DES_START_2, FEC_R_BUFF_SIZE_0,
+	FEC_R_BUFF_SIZE_1, FEC_R_BUFF_SIZE_2
+};
+#endif
+
+static void fec_enet_get_regs(struct net_device *ndev,
+			      struct ethtool_regs *regs, void *regbuf)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	u32 __iomem *theregs = (u32 __iomem *)fep->hwp;
+	struct device *dev = &fep->pdev->dev;
+	u32 *buf = (u32 *)regbuf;
+	u32 i, off;
+	int ret;
+
+	ret = pm_runtime_resume_and_get(dev);
+	if (ret < 0)
+		return;
+
+	regs->version = fec_enet_register_version;
+
+	memset(buf, 0, regs->len);
+
+	for (i = 0; i < ARRAY_SIZE(fec_enet_register_offset); i++) {
+		off = fec_enet_register_offset[i];
+
+		if ((off == FEC_R_BOUND || off == FEC_R_FSTART) &&
+		    !(fep->quirks & FEC_QUIRK_HAS_FRREG))
+			continue;
+
+		off >>= 2;
+		buf[off] = readl(&theregs[off]);
+	}
+
+	pm_runtime_mark_last_busy(dev);
+	pm_runtime_put_autosuspend(dev);
+}
+
+static int fec_enet_get_ts_info(struct net_device *ndev,
+				struct ethtool_ts_info *info)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	if (fep->bufdesc_ex) {
+
+		info->so_timestamping = SOF_TIMESTAMPING_TX_SOFTWARE |
+					SOF_TIMESTAMPING_RX_SOFTWARE |
+					SOF_TIMESTAMPING_SOFTWARE |
+					SOF_TIMESTAMPING_TX_HARDWARE |
+					SOF_TIMESTAMPING_RX_HARDWARE |
+					SOF_TIMESTAMPING_RAW_HARDWARE;
+		if (fep->ptp_clock)
+			info->phc_index = ptp_clock_index(fep->ptp_clock);
+		else
+			info->phc_index = -1;
+
+		info->tx_types = (1 << HWTSTAMP_TX_OFF) |
+				 (1 << HWTSTAMP_TX_ON);
+
+		info->rx_filters = (1 << HWTSTAMP_FILTER_NONE) |
+				   (1 << HWTSTAMP_FILTER_ALL);
+		return 0;
+	} else {
+		return ethtool_op_get_ts_info(ndev, info);
+	}
+}
+
+#if !defined(CONFIG_M5272)
+
+static void fec_enet_get_pauseparam(struct net_device *ndev,
+				    struct ethtool_pauseparam *pause)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	pause->autoneg = (fep->pause_flag & FEC_PAUSE_FLAG_AUTONEG) != 0;
+	pause->tx_pause = (fep->pause_flag & FEC_PAUSE_FLAG_ENABLE) != 0;
+	pause->rx_pause = pause->tx_pause;
+}
+
+static int fec_enet_set_pauseparam(struct net_device *ndev,
+				   struct ethtool_pauseparam *pause)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+
+	if (!ndev->phydev)
+		return -ENODEV;
+
+	if (pause->tx_pause != pause->rx_pause) {
+		netdev_info(ndev,
+			"hardware only support enable/disable both tx and rx");
+		return -EINVAL;
+	}
+
+	fep->pause_flag = 0;
+
+	/* tx pause must be same as rx pause */
+	fep->pause_flag |= pause->rx_pause ? FEC_PAUSE_FLAG_ENABLE : 0;
+	fep->pause_flag |= pause->autoneg ? FEC_PAUSE_FLAG_AUTONEG : 0;
+
+	phy_set_sym_pause(ndev->phydev, pause->rx_pause, pause->tx_pause,
+			  pause->autoneg);
+
+	if (pause->autoneg) {
+		if (rtnetif_running(&frt->dev))
+			fec_stop(ndev);
+		phy_start_aneg(ndev->phydev);
+	}
+	if (rtnetif_running(&frt->dev)) {
+		rtnetif_stop_queue(&frt->dev);
+		fec_restart(ndev);
+		rtnetif_wake_queue(&frt->dev);
+	}
+
+	return 0;
+}
+
+static const struct fec_stat {
+	char name[ETH_GSTRING_LEN];
+	u16 offset;
+} fec_stats[] = {
+	/* RMON TX */
+	{ "tx_dropped", RMON_T_DROP },
+	{ "tx_packets", RMON_T_PACKETS },
+	{ "tx_broadcast", RMON_T_BC_PKT },
+	{ "tx_multicast", RMON_T_MC_PKT },
+	{ "tx_crc_errors", RMON_T_CRC_ALIGN },
+	{ "tx_undersize", RMON_T_UNDERSIZE },
+	{ "tx_oversize", RMON_T_OVERSIZE },
+	{ "tx_fragment", RMON_T_FRAG },
+	{ "tx_jabber", RMON_T_JAB },
+	{ "tx_collision", RMON_T_COL },
+	{ "tx_64byte", RMON_T_P64 },
+	{ "tx_65to127byte", RMON_T_P65TO127 },
+	{ "tx_128to255byte", RMON_T_P128TO255 },
+	{ "tx_256to511byte", RMON_T_P256TO511 },
+	{ "tx_512to1023byte", RMON_T_P512TO1023 },
+	{ "tx_1024to2047byte", RMON_T_P1024TO2047 },
+	{ "tx_GTE2048byte", RMON_T_P_GTE2048 },
+	{ "tx_octets", RMON_T_OCTETS },
+
+	/* IEEE TX */
+	{ "IEEE_tx_drop", IEEE_T_DROP },
+	{ "IEEE_tx_frame_ok", IEEE_T_FRAME_OK },
+	{ "IEEE_tx_1col", IEEE_T_1COL },
+	{ "IEEE_tx_mcol", IEEE_T_MCOL },
+	{ "IEEE_tx_def", IEEE_T_DEF },
+	{ "IEEE_tx_lcol", IEEE_T_LCOL },
+	{ "IEEE_tx_excol", IEEE_T_EXCOL },
+	{ "IEEE_tx_macerr", IEEE_T_MACERR },
+	{ "IEEE_tx_cserr", IEEE_T_CSERR },
+	{ "IEEE_tx_sqe", IEEE_T_SQE },
+	{ "IEEE_tx_fdxfc", IEEE_T_FDXFC },
+	{ "IEEE_tx_octets_ok", IEEE_T_OCTETS_OK },
+
+	/* RMON RX */
+	{ "rx_packets", RMON_R_PACKETS },
+	{ "rx_broadcast", RMON_R_BC_PKT },
+	{ "rx_multicast", RMON_R_MC_PKT },
+	{ "rx_crc_errors", RMON_R_CRC_ALIGN },
+	{ "rx_undersize", RMON_R_UNDERSIZE },
+	{ "rx_oversize", RMON_R_OVERSIZE },
+	{ "rx_fragment", RMON_R_FRAG },
+	{ "rx_jabber", RMON_R_JAB },
+	{ "rx_64byte", RMON_R_P64 },
+	{ "rx_65to127byte", RMON_R_P65TO127 },
+	{ "rx_128to255byte", RMON_R_P128TO255 },
+	{ "rx_256to511byte", RMON_R_P256TO511 },
+	{ "rx_512to1023byte", RMON_R_P512TO1023 },
+	{ "rx_1024to2047byte", RMON_R_P1024TO2047 },
+	{ "rx_GTE2048byte", RMON_R_P_GTE2048 },
+	{ "rx_octets", RMON_R_OCTETS },
+
+	/* IEEE RX */
+	{ "IEEE_rx_drop", IEEE_R_DROP },
+	{ "IEEE_rx_frame_ok", IEEE_R_FRAME_OK },
+	{ "IEEE_rx_crc", IEEE_R_CRC },
+	{ "IEEE_rx_align", IEEE_R_ALIGN },
+	{ "IEEE_rx_macerr", IEEE_R_MACERR },
+	{ "IEEE_rx_fdxfc", IEEE_R_FDXFC },
+	{ "IEEE_rx_octets_ok", IEEE_R_OCTETS_OK },
+};
+
+#define FEC_STATS_SIZE		(ARRAY_SIZE(fec_stats) * sizeof(u64))
+
+static void fec_enet_update_ethtool_stats(struct net_device *dev)
+{
+	struct fec_enet_private *fep = netdev_priv(dev);
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+		fep->ethtool_stats[i] = readl(fep->hwp + fec_stats[i].offset);
+}
+
+static void fec_enet_get_ethtool_stats(struct net_device *dev,
+				       struct ethtool_stats *stats, u64 *data)
+{
+	struct fec_enet_private *fep = netdev_priv(dev);
+	struct fec_rt_data *frt = &fep->rtnet;
+
+	if (rtnetif_running(&frt->dev))
+		fec_enet_update_ethtool_stats(dev);
+
+	memcpy(data, fep->ethtool_stats, FEC_STATS_SIZE);
+}
+
+static void fec_enet_get_strings(struct net_device *netdev,
+	u32 stringset, u8 *data)
+{
+	int i;
+	switch (stringset) {
+	case ETH_SS_STATS:
+		for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+			memcpy(data + i * ETH_GSTRING_LEN,
+				fec_stats[i].name, ETH_GSTRING_LEN);
+		break;
+	}
+}
+
+static int fec_enet_get_sset_count(struct net_device *dev, int sset)
+{
+	switch (sset) {
+	case ETH_SS_STATS:
+		return ARRAY_SIZE(fec_stats);
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+static void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+	struct fec_enet_private *fep = netdev_priv(dev);
+	int i;
+
+	/* Disable MIB statistics counters */
+	writel(FEC_MIB_CTRLSTAT_DISABLE, fep->hwp + FEC_MIB_CTRLSTAT);
+
+	for (i = 0; i < ARRAY_SIZE(fec_stats); i++)
+		writel(0, fep->hwp + fec_stats[i].offset);
+
+	/* Don't disable MIB statistics counters */
+	writel(0, fep->hwp + FEC_MIB_CTRLSTAT);
+}
+
+#else	/* !defined(CONFIG_M5272) */
+#define FEC_STATS_SIZE	0
+static inline void fec_enet_update_ethtool_stats(struct net_device *dev)
+{
+}
+
+static inline void fec_enet_clear_ethtool_stats(struct net_device *dev)
+{
+}
+#endif /* !defined(CONFIG_M5272) */
+
+/* ITR clock source is enet system clock (clk_ahb).
+ * TCTT unit is cycle_ns * 64 cycle
+ * So, the ICTT value = X us / (cycle_ns * 64)
+ */
+static int fec_enet_us_to_itr_clock(struct net_device *ndev, int us)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	return us * (fep->itr_clk_rate / 64000) / 1000;
+}
+
+/* Set threshold for interrupt coalescing */
+static void fec_enet_itr_coal_set(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int rx_itr, tx_itr;
+
+	/* Must be greater than zero to avoid unpredictable behavior */
+	if (!fep->rx_time_itr || !fep->rx_pkts_itr ||
+	    !fep->tx_time_itr || !fep->tx_pkts_itr)
+		return;
+
+	/* Select enet system clock as Interrupt Coalescing
+	 * timer Clock Source
+	 */
+	rx_itr = FEC_ITR_CLK_SEL;
+	tx_itr = FEC_ITR_CLK_SEL;
+
+	/* set ICFT and ICTT */
+	rx_itr |= FEC_ITR_ICFT(fep->rx_pkts_itr);
+	rx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->rx_time_itr));
+	tx_itr |= FEC_ITR_ICFT(fep->tx_pkts_itr);
+	tx_itr |= FEC_ITR_ICTT(fec_enet_us_to_itr_clock(ndev, fep->tx_time_itr));
+
+	rx_itr |= FEC_ITR_EN;
+	tx_itr |= FEC_ITR_EN;
+
+	writel(tx_itr, fep->hwp + FEC_TXIC0);
+	writel(rx_itr, fep->hwp + FEC_RXIC0);
+	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+		writel(tx_itr, fep->hwp + FEC_TXIC1);
+		writel(rx_itr, fep->hwp + FEC_RXIC1);
+		writel(tx_itr, fep->hwp + FEC_TXIC2);
+		writel(rx_itr, fep->hwp + FEC_RXIC2);
+	}
+}
+
+static int
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
+fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec,
+		      struct kernel_ethtool_coalesce *kernel_coal,
+		      struct netlink_ext_ack *extack)
+#else
+fec_enet_get_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+#endif
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
+		return -EOPNOTSUPP;
+
+	ec->rx_coalesce_usecs = fep->rx_time_itr;
+	ec->rx_max_coalesced_frames = fep->rx_pkts_itr;
+
+	ec->tx_coalesce_usecs = fep->tx_time_itr;
+	ec->tx_max_coalesced_frames = fep->tx_pkts_itr;
+
+	return 0;
+}
+
+static int
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
+fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec,
+		      struct kernel_ethtool_coalesce *kernel_coal,
+		      struct netlink_ext_ack *extack)
+#else
+fec_enet_set_coalesce(struct net_device *ndev, struct ethtool_coalesce *ec)
+#endif
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct device *dev = &fep->pdev->dev;
+	unsigned int cycle;
+
+	if (!(fep->quirks & FEC_QUIRK_HAS_COALESCE))
+		return -EOPNOTSUPP;
+
+	if (ec->rx_max_coalesced_frames > 255) {
+		dev_err(dev, "Rx coalesced frames exceed hardware limitation\n");
+		return -EINVAL;
+	}
+
+	if (ec->tx_max_coalesced_frames > 255) {
+		dev_err(dev, "Tx coalesced frame exceed hardware limitation\n");
+		return -EINVAL;
+	}
+
+	cycle = fec_enet_us_to_itr_clock(ndev, ec->rx_coalesce_usecs);
+	if (cycle > 0xFFFF) {
+		dev_err(dev, "Rx coalesced usec exceed hardware limitation\n");
+		return -EINVAL;
+	}
+
+	cycle = fec_enet_us_to_itr_clock(ndev, ec->tx_coalesce_usecs);
+	if (cycle > 0xFFFF) {
+		dev_err(dev, "Tx coalesced usec exceed hardware limitation\n");
+		return -EINVAL;
+	}
+
+	fep->rx_time_itr = ec->rx_coalesce_usecs;
+	fep->rx_pkts_itr = ec->rx_max_coalesced_frames;
+
+	fep->tx_time_itr = ec->tx_coalesce_usecs;
+	fep->tx_pkts_itr = ec->tx_max_coalesced_frames;
+
+	fec_enet_itr_coal_set(ndev);
+
+	return 0;
+}
+
+static void fec_enet_itr_coal_init(struct net_device *ndev)
+{
+	struct ethtool_coalesce ec;
+
+	ec.rx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
+	ec.rx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
+
+	ec.tx_coalesce_usecs = FEC_ITR_ICTT_DEFAULT;
+	ec.tx_max_coalesced_frames = FEC_ITR_ICFT_DEFAULT;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 15, 0)
+	fec_enet_set_coalesce(ndev, &ec, NULL, NULL);
+#else
+	fec_enet_set_coalesce(ndev, &ec);
+#endif
+}
+
+static int fec_enet_get_tunable(struct net_device *netdev,
+				const struct ethtool_tunable *tuna,
+				void *data)
+{
+	struct fec_enet_private *fep = netdev_priv(netdev);
+	int ret = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_RX_COPYBREAK:
+		*(u32 *)data = fep->rx_copybreak;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static int fec_enet_set_tunable(struct net_device *netdev,
+				const struct ethtool_tunable *tuna,
+				const void *data)
+{
+	struct fec_enet_private *fep = netdev_priv(netdev);
+	int ret = 0;
+
+	switch (tuna->id) {
+	case ETHTOOL_RX_COPYBREAK:
+		fep->rx_copybreak = *(u32 *)data;
+		break;
+	default:
+		ret = -EINVAL;
+		break;
+	}
+
+	return ret;
+}
+
+static void
+fec_enet_get_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	if (fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET) {
+		wol->supported = WAKE_MAGIC;
+		wol->wolopts = fep->wol_flag & FEC_WOL_FLAG_ENABLE ? WAKE_MAGIC : 0;
+	} else {
+		wol->supported = wol->wolopts = 0;
+	}
+}
+
+static int
+fec_enet_set_wol(struct net_device *ndev, struct ethtool_wolinfo *wol)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	if (!(fep->wol_flag & FEC_WOL_HAS_MAGIC_PACKET))
+		return -EINVAL;
+
+	if (wol->wolopts & ~WAKE_MAGIC)
+		return -EINVAL;
+
+	device_set_wakeup_enable(&ndev->dev, wol->wolopts & WAKE_MAGIC);
+	if (device_may_wakeup(&ndev->dev)) {
+		fep->wol_flag |= FEC_WOL_FLAG_ENABLE;
+		if (fep->irq[0] > 0)
+			enable_irq_wake(fep->irq[0]);
+	} else {
+		fep->wol_flag &= (~FEC_WOL_FLAG_ENABLE);
+		if (fep->irq[0] > 0)
+			disable_irq_wake(fep->irq[0]);
+	}
+
+	return 0;
+}
+
+static const struct ethtool_ops fec_enet_ethtool_ops = {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(5,7,0)
+	.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
+				     ETHTOOL_COALESCE_MAX_FRAMES,
+#endif
+	.get_drvinfo		= fec_enet_get_drvinfo,
+	.get_regs_len		= fec_enet_get_regs_len,
+	.get_regs		= fec_enet_get_regs,
+	.nway_reset		= phy_ethtool_nway_reset,
+	.get_link		= ethtool_op_get_link,
+	.get_coalesce		= fec_enet_get_coalesce,
+	.set_coalesce		= fec_enet_set_coalesce,
+#ifndef CONFIG_M5272
+	.get_pauseparam		= fec_enet_get_pauseparam,
+	.set_pauseparam		= fec_enet_set_pauseparam,
+	.get_strings		= fec_enet_get_strings,
+	.get_ethtool_stats	= fec_enet_get_ethtool_stats,
+	.get_sset_count		= fec_enet_get_sset_count,
+#endif
+	.get_ts_info		= fec_enet_get_ts_info,
+	.get_tunable		= fec_enet_get_tunable,
+	.set_tunable		= fec_enet_set_tunable,
+	.get_wol		= fec_enet_get_wol,
+	.set_wol		= fec_enet_set_wol,
+	.get_link_ksettings	= phy_ethtool_get_link_ksettings,
+	.set_link_ksettings	= phy_ethtool_set_link_ksettings,
+};
+
+static int fec_enet_ioctl(struct net_device *ndev, struct ifreq *rq, int cmd)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	struct phy_device *phydev = ndev->phydev;
+
+	if (!rtnetif_running(&frt->dev))
+		return -EINVAL;
+
+	if (!phydev)
+		return -ENODEV;
+
+	if (fep->bufdesc_ex) {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(5,9,0)
+		bool use_fec_hwts = !phy_has_hwtstamp(phydev);
+#else
+		bool use_fec_hwts = true;
+#endif
+		if (cmd == SIOCSHWTSTAMP) {
+			if (use_fec_hwts)
+				return fec_ptp_set(ndev, rq);
+			fec_ptp_disable_hwts(ndev);
+		} else if (cmd == SIOCGHWTSTAMP) {
+			if (use_fec_hwts)
+				return fec_ptp_get(ndev, rq);
+		}
+	}
+
+	return phy_mii_ioctl(phydev, rq, cmd);
+}
+
+static int fec_rt_ioctl(struct rtnet_device *rtdev, struct ifreq *rq, int cmd)
+{
+	struct fec_enet_private *fep;
+
+	fep = container_of(rtdev, struct fec_enet_private, rtnet.dev);
+
+	return fec_enet_ioctl(fep->netdev, rq, cmd);
+}
+
+static void fec_enet_free_buffers(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	unsigned int i;
+	void *skb;
+	struct bufdesc	*bdp;
+	struct fec_enet_priv_tx_q *txq;
+	struct fec_enet_priv_rx_q *rxq;
+	unsigned int q, size;
+
+	for (q = 0; q < fep->num_rx_queues; q++) {
+		rxq = fep->rx_queue[q];
+		bdp = rxq->bd.base;
+		for (i = 0; i < rxq->bd.ring_size; i++) {
+			skb = rxq->rx_skbuff[i];
+			if (!skb)
+				goto skip;
+			rxq->rx_skbuff[i] = NULL;
+			dev_kfree_rtskb(skb);
+			size = RTSKB_SIZE;
+
+			dma_unmap_single(&fep->pdev->dev,
+					 fec32_to_cpu(bdp->cbd_bufaddr),
+					 size - fep->rx_align,
+					 DMA_FROM_DEVICE);
+		skip:
+			bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+		}
+	}
+
+	for (q = 0; q < fep->num_tx_queues; q++) {
+		txq = fep->tx_queue[q];
+		for (i = 0; i < txq->bd.ring_size; i++) {
+			kfree(txq->tx_bounce[i]);
+			txq->tx_bounce[i] = NULL;
+			skb = txq->tx_skbuff[i];
+			if (!skb)
+				continue;
+			txq->tx_skbuff[i] = NULL;
+			dev_kfree_rtskb(skb);
+		}
+	}
+}
+
+static void fec_enet_free_queue(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int i;
+	struct fec_enet_priv_tx_q *txq;
+
+	for (i = 0; i < fep->num_tx_queues; i++)
+		if (fep->tx_queue[i] && fep->tx_queue[i]->tso_hdrs) {
+			txq = fep->tx_queue[i];
+			dma_free_coherent(&fep->pdev->dev,
+					  txq->bd.ring_size * TSO_HEADER_SIZE,
+					  txq->tso_hdrs,
+					  txq->tso_hdrs_dma);
+		}
+
+	for (i = 0; i < fep->num_rx_queues; i++)
+		kfree(fep->rx_queue[i]);
+	for (i = 0; i < fep->num_tx_queues; i++)
+		kfree(fep->tx_queue[i]);
+}
+
+static int fec_enet_alloc_queue(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int i;
+	int ret = 0;
+	struct fec_enet_priv_tx_q *txq;
+
+	for (i = 0; i < fep->num_tx_queues; i++) {
+		txq = kzalloc(sizeof(*txq), GFP_KERNEL);
+		if (!txq) {
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
+
+		fep->tx_queue[i] = txq;
+		txq->bd.ring_size = TX_RING_SIZE;
+		fep->total_tx_ring_size += fep->tx_queue[i]->bd.ring_size;
+
+		txq->tx_stop_threshold = FEC_MAX_SKB_DESCS;
+		txq->tx_wake_threshold =
+			(txq->bd.ring_size - txq->tx_stop_threshold) / 2;
+
+		txq->tso_hdrs = dma_alloc_coherent(&fep->pdev->dev,
+					txq->bd.ring_size * TSO_HEADER_SIZE,
+					&txq->tso_hdrs_dma,
+					GFP_KERNEL);
+		if (!txq->tso_hdrs) {
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
+	}
+
+	for (i = 0; i < fep->num_rx_queues; i++) {
+		fep->rx_queue[i] = kzalloc(sizeof(*fep->rx_queue[i]),
+					   GFP_KERNEL);
+		if (!fep->rx_queue[i]) {
+			ret = -ENOMEM;
+			goto alloc_failed;
+		}
+
+		fep->rx_queue[i]->bd.ring_size = RX_RING_SIZE;
+		fep->total_rx_ring_size += fep->rx_queue[i]->bd.ring_size;
+	}
+	return ret;
+
+alloc_failed:
+	fec_enet_free_queue(ndev);
+	return ret;
+}
+
+static int
+fec_enet_alloc_rxq_buffers(struct net_device *ndev, unsigned int queue)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	unsigned int i;
+	struct rtskb *rtskb;
+	struct bufdesc	*bdp;
+	struct fec_enet_priv_rx_q *rxq;
+
+	rxq = fep->rx_queue[queue];
+	bdp = rxq->bd.base;
+	for (i = 0; i < rxq->bd.ring_size; i++) {
+		rtskb = rtnetdev_alloc_rtskb(&frt->dev, RTSKB_SIZE);
+		if (!rtskb)
+			goto err_alloc;
+
+		if (fec_rt_new_rxbdp(ndev, bdp, rtskb)) {
+			dev_kfree_rtskb(rtskb);
+			goto err_alloc;
+		}
+		rxq->rx_rtbuff[i] = rtskb;
+		bdp->cbd_sc = cpu_to_fec16(BD_ENET_RX_EMPTY);
+
+		if (fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_RX_INT);
+		}
+
+		bdp = fec_enet_get_nextdesc(bdp, &rxq->bd);
+	}
+
+	/* Set the last buffer to wrap. */
+	bdp = fec_enet_get_prevdesc(bdp, &rxq->bd);
+	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+	return 0;
+
+ err_alloc:
+	fec_enet_free_buffers(ndev);
+	return -ENOMEM;
+}
+
+static int
+fec_enet_alloc_txq_buffers(struct net_device *ndev, unsigned int queue)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	unsigned int i;
+	struct bufdesc  *bdp;
+	struct fec_enet_priv_tx_q *txq;
+
+	txq = fep->tx_queue[queue];
+	bdp = txq->bd.base;
+	for (i = 0; i < txq->bd.ring_size; i++) {
+		txq->tx_bounce[i] = kmalloc(FEC_ENET_TX_FRSIZE, GFP_KERNEL);
+		if (!txq->tx_bounce[i])
+			goto err_alloc;
+
+		bdp->cbd_sc = cpu_to_fec16(0);
+		bdp->cbd_bufaddr = cpu_to_fec32(0);
+
+		if (fep->bufdesc_ex) {
+			struct bufdesc_ex *ebdp = (struct bufdesc_ex *)bdp;
+			ebdp->cbd_esc = cpu_to_fec32(BD_ENET_TX_INT);
+		}
+
+		bdp = fec_enet_get_nextdesc(bdp, &txq->bd);
+	}
+
+	/* Set the last buffer to wrap. */
+	bdp = fec_enet_get_prevdesc(bdp, &txq->bd);
+	bdp->cbd_sc |= cpu_to_fec16(BD_SC_WRAP);
+
+	return 0;
+
+ err_alloc:
+	fec_enet_free_buffers(ndev);
+	return -ENOMEM;
+}
+
+static int fec_enet_alloc_buffers(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	unsigned int i;
+
+	for (i = 0; i < fep->num_rx_queues; i++)
+		if (fec_enet_alloc_rxq_buffers(ndev, i))
+			return -ENOMEM;
+
+	for (i = 0; i < fep->num_tx_queues; i++)
+		if (fec_enet_alloc_txq_buffers(ndev, i))
+			return -ENOMEM;
+	return 0;
+}
+
+static int
+__fec_enet_open(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int ret;
+	bool reset_again;
+
+	ret = pm_runtime_resume_and_get(&fep->pdev->dev);
+	if (ret < 0)
+		return ret;
+
+	pinctrl_pm_select_default_state(&fep->pdev->dev);
+	ret = fec_enet_clk_enable(ndev, true);
+	if (ret)
+		goto clk_enable;
+
+	/* During the first fec_enet_open call the PHY isn't probed at this
+	 * point. Therefore the phy_reset_after_clk_enable() call within
+	 * fec_enet_clk_enable() fails. As we need this reset in order to be
+	 * sure the PHY is working correctly we check if we need to reset again
+	 * later when the PHY is probed
+	 */
+	if (ndev->phydev && ndev->phydev->drv)
+		reset_again = false;
+	else
+		reset_again = true;
+
+	/* I should reset the ring buffers here, but I don't yet know
+	 * a simple way to do that.
+	 */
+
+	ret = fec_enet_alloc_buffers(ndev);
+	if (ret)
+		goto err_enet_alloc;
+
+	/* Init MAC prior to mii bus probe */
+	fec_restart(ndev);
+
+	/* Call phy_reset_after_clk_enable() again if it failed during
+	 * phy_reset_after_clk_enable() before because the PHY wasn't probed.
+	 */
+	if (reset_again)
+		fec_enet_phy_reset_after_clk_enable(ndev);
+
+	/* Probe and connect to PHY when open the interface */
+	ret = fec_enet_mii_probe(ndev);
+	if (ret)
+		goto err_enet_mii_probe;
+
+	if (fep->quirks & FEC_QUIRK_ERR006687)
+		imx6q_cpuidle_fec_irqs_used();
+
+	phy_start(ndev->phydev);
+	netif_tx_start_all_queues(ndev);
+
+	device_set_wakeup_enable(&ndev->dev, fep->wol_flag &
+				 FEC_WOL_FLAG_ENABLE);
+
+	return 0;
+
+err_enet_mii_probe:
+	fec_enet_free_buffers(ndev);
+err_enet_alloc:
+	fec_enet_clk_enable(ndev, false);
+clk_enable:
+	pm_runtime_mark_last_busy(&fep->pdev->dev);
+	pm_runtime_put_autosuspend(&fep->pdev->dev);
+	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+	return ret;
+}
+
+static int
+fec_enet_open(struct net_device *ndev)
+{
+	return -EBUSY;
+}
+
+static int
+fec_rt_open(struct rtnet_device *rtdev)
+{
+	struct fec_enet_private *fep;
+	int ret;
+
+	fep = container_of(rtdev, struct fec_enet_private, rtnet.dev);
+	ret = __fec_enet_open(fep->netdev);
+	if (ret)
+		return ret;
+
+	rt_stack_connect(rtdev, &STACK_manager);
+	rtnetif_start_queue(rtdev);
+
+	return 0;
+}
+
+static int
+fec_enet_close(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	phy_stop(ndev->phydev);
+
+	if (netif_device_present(ndev)) {
+		netif_tx_disable(ndev);
+		fec_stop(ndev);
+	}
+
+	phy_disconnect(ndev->phydev);
+
+	if (fep->quirks & FEC_QUIRK_ERR006687)
+		imx6q_cpuidle_fec_irqs_unused();
+
+	fec_enet_update_ethtool_stats(ndev);
+
+	fec_enet_clk_enable(ndev, false);
+	pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+	pm_runtime_mark_last_busy(&fep->pdev->dev);
+	pm_runtime_put_autosuspend(&fep->pdev->dev);
+
+	fec_enet_free_buffers(ndev);
+
+	return 0;
+}
+
+static int
+fec_rt_close(struct rtnet_device *rtdev)
+{
+	struct fec_enet_private *fep;
+
+	fep = container_of(rtdev, struct fec_enet_private, rtnet.dev);
+	rtnetif_stop_queue(rtdev);
+	rtnetif_carrier_off(rtdev);
+	rt_stack_disconnect(rtdev);
+
+	return fec_enet_close(fep->netdev);
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering.  Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not.  I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+#define FEC_HASH_BITS	6		/* #bits in hash */
+
+static void set_multicast_list(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct netdev_hw_addr *ha;
+	unsigned int crc, tmp;
+	unsigned char hash;
+	unsigned int hash_high = 0, hash_low = 0;
+
+	if (ndev->flags & IFF_PROMISC) {
+		tmp = readl(fep->hwp + FEC_R_CNTRL);
+		tmp |= 0x8;
+		writel(tmp, fep->hwp + FEC_R_CNTRL);
+		return;
+	}
+
+	tmp = readl(fep->hwp + FEC_R_CNTRL);
+	tmp &= ~0x8;
+	writel(tmp, fep->hwp + FEC_R_CNTRL);
+
+	if (ndev->flags & IFF_ALLMULTI) {
+		/* Catch all multicast addresses, so set the
+		 * filter to all 1's
+		 */
+		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+		writel(0xffffffff, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+
+		return;
+	}
+
+	/* Add the addresses in hash register */
+	netdev_for_each_mc_addr(ha, ndev) {
+		/* calculate crc32 value of mac address */
+		crc = ether_crc_le(ndev->addr_len, ha->addr);
+
+		/* only upper 6 bits (FEC_HASH_BITS) are used
+		 * which point to specific bit in the hash registers
+		 */
+		hash = (crc >> (32 - FEC_HASH_BITS)) & 0x3f;
+
+		if (hash > 31)
+			hash_high |= 1 << (hash - 32);
+		else
+			hash_low |= 1 << hash;
+	}
+
+	writel(hash_high, fep->hwp + FEC_GRP_HASH_TABLE_HIGH);
+	writel(hash_low, fep->hwp + FEC_GRP_HASH_TABLE_LOW);
+}
+
+/* Set a MAC change in hardware. */
+static int
+fec_set_mac_address(struct net_device *ndev, void *p)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	struct sockaddr *addr = p;
+
+	if (addr) {
+		if (!is_valid_ether_addr(addr->sa_data))
+			return -EADDRNOTAVAIL;
+		eth_hw_addr_set(ndev, addr->sa_data);
+	}
+
+	/* Add netif status check here to avoid system hang in below case:
+	 * ifconfig ethx down; ifconfig ethx hw ether xx:xx:xx:xx:xx:xx;
+	 * After ethx down, fec all clocks are gated off and then register
+	 * access causes system hang.
+	 */
+	if (!rtnetif_running(&frt->dev))
+		return 0;
+
+	writel(ndev->dev_addr[3] | (ndev->dev_addr[2] << 8) |
+		(ndev->dev_addr[1] << 16) | (ndev->dev_addr[0] << 24),
+		fep->hwp + FEC_ADDR_LOW);
+	writel((ndev->dev_addr[5] << 16) | (ndev->dev_addr[4] << 24),
+		fep->hwp + FEC_ADDR_HIGH);
+	return 0;
+}
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/**
+ * fec_poll_controller - FEC Poll controller function
+ * @dev: The FEC network adapter
+ *
+ * Polled functionality used by netconsole and others in non interrupt mode
+ *
+ */
+static void fec_poll_controller(struct net_device *dev)
+{
+	int i;
+	struct fec_enet_private *fep = netdev_priv(dev);
+
+	for (i = 0; i < FEC_IRQ_NUM; i++) {
+		if (fep->irq[i] > 0) {
+			disable_irq(fep->irq[i]);
+			fec_enet_interrupt(fep->irq[i], dev);
+			enable_irq(fep->irq[i]);
+		}
+	}
+}
+#endif
+
+static inline void fec_enet_set_netdev_features(struct net_device *netdev,
+	netdev_features_t features)
+{
+	struct fec_enet_private *fep = netdev_priv(netdev);
+	netdev_features_t changed = features ^ netdev->features;
+
+	netdev->features = features;
+
+	/* Receive checksum has been changed */
+	if (changed & NETIF_F_RXCSUM) {
+		if (features & NETIF_F_RXCSUM)
+			fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+		else
+			fep->csum_flags &= ~FLAG_RX_CSUM_ENABLED;
+	}
+}
+
+static int fec_set_features(struct net_device *netdev,
+	netdev_features_t features)
+{
+	struct fec_enet_private *fep = netdev_priv(netdev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	netdev_features_t changed = features ^ netdev->features;
+
+	if (rtnetif_running(&frt->dev) && changed & NETIF_F_RXCSUM) {
+		rtnetif_stop_queue(&frt->dev);
+		fec_stop(netdev);
+		fec_enet_set_netdev_features(netdev, features);
+		fec_restart(netdev);
+		rtnetif_wake_queue(&frt->dev);
+	} else {
+		fec_enet_set_netdev_features(netdev, features);
+	}
+
+	return 0;
+}
+
+static u16 fec_enet_get_raw_vlan_tci(struct sk_buff *skb)
+{
+	struct vlan_ethhdr *vhdr;
+	unsigned short vlan_TCI = 0;
+
+	if (skb->protocol == htons(ETH_P_ALL)) {
+		vhdr = (struct vlan_ethhdr *)(skb->data);
+		vlan_TCI = ntohs(vhdr->h_vlan_TCI);
+	}
+
+	return vlan_TCI;
+}
+
+static u16 fec_enet_select_queue(struct net_device *ndev, struct sk_buff *skb,
+				 struct net_device *sb_dev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	u16 vlan_tag;
+
+	if (!(fep->quirks & FEC_QUIRK_HAS_AVB))
+		return netdev_pick_tx(ndev, skb, NULL);
+
+	vlan_tag = fec_enet_get_raw_vlan_tci(skb);
+	if (!vlan_tag)
+		return vlan_tag;
+
+	return fec_enet_vlan_pri_to_queue[vlan_tag >> 13];
+}
+
+static const struct net_device_ops fec_netdev_ops = {
+	.ndo_open		= fec_enet_open,
+	.ndo_stop		= fec_enet_close,
+	.ndo_start_xmit		= fec_enet_start_xmit,
+	.ndo_select_queue       = fec_enet_select_queue,
+	.ndo_set_rx_mode	= set_multicast_list,
+	.ndo_validate_addr	= eth_validate_addr,
+	.ndo_tx_timeout		= fec_timeout,
+	.ndo_set_mac_address	= fec_set_mac_address,
+	.ndo_do_ioctl		= fec_enet_ioctl,
+#ifdef CONFIG_NET_POLL_CONTROLLER
+	.ndo_poll_controller	= fec_poll_controller,
+#endif
+	.ndo_set_features	= fec_set_features,
+};
+
+static const unsigned short offset_des_active_rxq[] = {
+	FEC_R_DES_ACTIVE_0, FEC_R_DES_ACTIVE_1, FEC_R_DES_ACTIVE_2
+};
+
+static const unsigned short offset_des_active_txq[] = {
+	FEC_X_DES_ACTIVE_0, FEC_X_DES_ACTIVE_1, FEC_X_DES_ACTIVE_2
+};
+
+ /*
+  * XXX:  We need to clean up on failure exits here.
+  *
+  */
+static int fec_enet_init(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	struct bufdesc *cbd_base;
+	dma_addr_t bd_dma;
+	int bd_size;
+	unsigned int i;
+	unsigned dsize = fep->bufdesc_ex ? sizeof(struct bufdesc_ex) :
+			sizeof(struct bufdesc);
+	unsigned dsize_log2 = __fls(dsize);
+	int ret;
+
+	WARN_ON(dsize != (1 << dsize_log2));
+#if defined(CONFIG_ARM) || defined(CONFIG_ARM64)
+	fep->rx_align = 0xf;
+	fep->tx_align = 0xf;
+#else
+	fep->rx_align = 0x3;
+	fep->tx_align = 0x3;
+#endif
+
+	/* Check mask of the streaming and coherent API */
+	ret = dma_set_mask_and_coherent(&fep->pdev->dev, DMA_BIT_MASK(32));
+	if (ret < 0) {
+		dev_warn(&fep->pdev->dev, "No suitable DMA available\n");
+		return ret;
+	}
+
+	ret = fec_enet_alloc_queue(ndev);
+	if (ret)
+		return ret;
+
+	bd_size = (fep->total_tx_ring_size + fep->total_rx_ring_size) * dsize;
+
+	/* Allocate memory for buffer descriptors. */
+	cbd_base = dmam_alloc_coherent(&fep->pdev->dev, bd_size, &bd_dma,
+				       GFP_KERNEL);
+	if (!cbd_base) {
+		ret = -ENOMEM;
+		goto free_queue_mem;
+	}
+
+	/* Get the Ethernet address */
+	fec_get_mac(ndev);
+	/* make sure MAC we just acquired is programmed into the hw */
+	fec_set_mac_address(ndev, NULL);
+
+	memcpy(&frt->dev.dev_addr, ndev->dev_addr, ETH_ALEN);
+
+	/* Set receive and transmit descriptor base. */
+	for (i = 0; i < fep->num_rx_queues; i++) {
+		struct fec_enet_priv_rx_q *rxq = fep->rx_queue[i];
+		unsigned size = dsize * rxq->bd.ring_size;
+
+		rxq->bd.qid = i;
+		rxq->bd.base = cbd_base;
+		rxq->bd.cur = cbd_base;
+		rxq->bd.dma = bd_dma;
+		rxq->bd.dsize = dsize;
+		rxq->bd.dsize_log2 = dsize_log2;
+		rxq->bd.reg_desc_active = fep->hwp + offset_des_active_rxq[i];
+		bd_dma += size;
+		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
+		rxq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
+	}
+
+	for (i = 0; i < fep->num_tx_queues; i++) {
+		struct fec_enet_priv_tx_q *txq = fep->tx_queue[i];
+		unsigned size = dsize * txq->bd.ring_size;
+
+		txq->bd.qid = i;
+		txq->bd.base = cbd_base;
+		txq->bd.cur = cbd_base;
+		txq->bd.dma = bd_dma;
+		txq->bd.dsize = dsize;
+		txq->bd.dsize_log2 = dsize_log2;
+		txq->bd.reg_desc_active = fep->hwp + offset_des_active_txq[i];
+		bd_dma += size;
+		cbd_base = (struct bufdesc *)(((void *)cbd_base) + size);
+		txq->bd.last = (struct bufdesc *)(((void *)cbd_base) - dsize);
+	}
+
+
+	/* The FEC Ethernet specific entries in the device structure */
+	ndev->watchdog_timeo = TX_TIMEOUT;
+	ndev->netdev_ops = &fec_netdev_ops;
+	ndev->ethtool_ops = &fec_enet_ethtool_ops;
+
+	writel(FEC_RX_DISABLED_IMASK, fep->hwp + FEC_IMASK);
+
+	if (fep->quirks & FEC_QUIRK_HAS_VLAN)
+		/* enable hw VLAN support */
+		ndev->features |= NETIF_F_HW_VLAN_CTAG_RX;
+
+	if (fep->quirks & FEC_QUIRK_HAS_CSUM) {
+		ndev->gso_max_segs = FEC_MAX_TSO_SEGS;
+
+		/* enable hw accelerator */
+		ndev->features |= (NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM
+				| NETIF_F_RXCSUM | NETIF_F_SG | NETIF_F_TSO);
+		fep->csum_flags |= FLAG_RX_CSUM_ENABLED;
+	}
+
+	if (fep->quirks & FEC_QUIRK_HAS_AVB) {
+		fep->tx_align = 0;
+		fep->rx_align = 0x3f;
+	}
+
+	ndev->hw_features = ndev->features;
+
+	fec_restart(ndev);
+
+	if (fep->quirks & FEC_QUIRK_MIB_CLEAR)
+		fec_enet_clear_ethtool_stats(ndev);
+	else
+		fec_enet_update_ethtool_stats(ndev);
+
+	return 0;
+
+free_queue_mem:
+	fec_enet_free_queue(ndev);
+	return ret;
+}
+
+#ifdef CONFIG_OF
+static int fec_reset_phy(struct platform_device *pdev)
+{
+	int err, phy_reset;
+	bool active_high = false;
+	int msec = 1, phy_post_delay = 0;
+	struct device_node *np = pdev->dev.of_node;
+
+	if (!np)
+		return 0;
+
+	err = of_property_read_u32(np, "phy-reset-duration", &msec);
+	/* A sane reset duration should not be longer than 1s */
+	if (!err && msec > 1000)
+		msec = 1;
+
+	phy_reset = of_get_named_gpio(np, "phy-reset-gpios", 0);
+	if (phy_reset == -EPROBE_DEFER)
+		return phy_reset;
+	else if (!gpio_is_valid(phy_reset))
+		return 0;
+
+	err = of_property_read_u32(np, "phy-reset-post-delay", &phy_post_delay);
+	/* valid reset duration should be less than 1s */
+	if (!err && phy_post_delay > 1000)
+		return -EINVAL;
+
+	active_high = of_property_read_bool(np, "phy-reset-active-high");
+
+	err = devm_gpio_request_one(&pdev->dev, phy_reset,
+			active_high ? GPIOF_OUT_INIT_HIGH : GPIOF_OUT_INIT_LOW,
+			"phy-reset");
+	if (err) {
+		dev_err(&pdev->dev, "failed to get phy-reset-gpios: %d\n", err);
+		return err;
+	}
+
+	if (msec > 20)
+		msleep(msec);
+	else
+		usleep_range(msec * 1000, msec * 1000 + 1000);
+
+	gpio_set_value_cansleep(phy_reset, !active_high);
+
+	if (!phy_post_delay)
+		return 0;
+
+	if (phy_post_delay > 20)
+		msleep(phy_post_delay);
+	else
+		usleep_range(phy_post_delay * 1000,
+			     phy_post_delay * 1000 + 1000);
+
+	return 0;
+}
+#else /* CONFIG_OF */
+static int fec_reset_phy(struct platform_device *pdev)
+{
+	/*
+	 * In case of platform probe, the reset has been done
+	 * by machine code.
+	 */
+	return 0;
+}
+#endif /* CONFIG_OF */
+
+static void
+fec_enet_get_queue_num(struct platform_device *pdev, int *num_tx, int *num_rx)
+{
+	struct device_node *np = pdev->dev.of_node;
+
+	*num_tx = *num_rx = 1;
+
+	if (!np || !of_device_is_available(np))
+		return;
+
+	/* parse the num of tx and rx queues */
+	of_property_read_u32(np, "fsl,num-tx-queues", num_tx);
+
+	of_property_read_u32(np, "fsl,num-rx-queues", num_rx);
+
+	if (*num_tx < 1 || *num_tx > FEC_ENET_MAX_TX_QS) {
+		dev_warn(&pdev->dev, "Invalid num_tx(=%d), fall back to 1\n",
+			 *num_tx);
+		*num_tx = 1;
+		return;
+	}
+
+	if (*num_rx < 1 || *num_rx > FEC_ENET_MAX_RX_QS) {
+		dev_warn(&pdev->dev, "Invalid num_rx(=%d), fall back to 1\n",
+			 *num_rx);
+		*num_rx = 1;
+		return;
+	}
+
+}
+
+static int fec_rt_init(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	struct rtnet_device *rtdev = &frt->dev;
+	int ret;
+
+	rtdev->open = fec_rt_open;
+	rtdev->stop = fec_rt_close;
+	rtdev->do_ioctl = fec_rt_ioctl;
+	rtdev->hard_start_xmit = fec_rt_start_xmit;
+	rtdev->get_stats = fec_rt_stats;
+	rtdev->sysbind = &fep->pdev->dev;
+
+	ret = rt_init_etherdev(rtdev, (RX_RING_SIZE + TX_RING_SIZE) * 2);
+	if (ret)
+		return ret;
+
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdm_lock_init(&frt->lock);
+
+	ret = rt_register_rtnetdev(rtdev);
+	if (ret) {
+		rt_rtdev_disconnect(rtdev);
+		return ret;
+	}
+
+	rtnetif_carrier_off(rtdev);
+
+	return 0;
+}
+
+static void fec_rt_destroy(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	struct rtnet_device *rtdev = &frt->dev;
+	int i;
+
+	for (i = 0; i < fep->irqnr; i++)
+		rtdm_irq_free(&frt->irq_handle[i]);
+
+	rtdm_nrtsig_destroy(&frt->mdio_sig);
+	rt_rtdev_disconnect(rtdev);
+	rt_unregister_rtnetdev(rtdev);
+	rtdev_destroy(rtdev);
+}
+
+static int fec_enet_get_irq_cnt(struct platform_device *pdev)
+{
+	int irq_cnt = platform_irq_count(pdev);
+
+	if (irq_cnt > FEC_IRQ_NUM)
+		irq_cnt = FEC_IRQ_NUM;	/* last for pps */
+	else if (irq_cnt == 2)
+		irq_cnt = 1;	/* last for pps */
+	else if (irq_cnt <= 0)
+		irq_cnt = 1;	/* At least 1 irq is needed */
+	return irq_cnt;
+}
+
+static int fec_enet_init_stop_mode(struct fec_enet_private *fep,
+				   struct device_node *np)
+{
+	struct device_node *gpr_np;
+	u32 out_val[3];
+	int ret = 0;
+
+	gpr_np = of_parse_phandle(np, "fsl,stop-mode", 0);
+	if (!gpr_np)
+		return 0;
+
+	ret = of_property_read_u32_array(np, "fsl,stop-mode", out_val,
+					 ARRAY_SIZE(out_val));
+	if (ret) {
+		dev_dbg(&fep->pdev->dev, "no stop mode property\n");
+		return ret;
+	}
+
+	fep->stop_gpr.gpr = syscon_node_to_regmap(gpr_np);
+	if (IS_ERR(fep->stop_gpr.gpr)) {
+		dev_err(&fep->pdev->dev, "could not find gpr regmap\n");
+		ret = PTR_ERR(fep->stop_gpr.gpr);
+		fep->stop_gpr.gpr = NULL;
+		goto out;
+	}
+
+	fep->stop_gpr.reg = out_val[1];
+	fep->stop_gpr.bit = out_val[2];
+
+out:
+	of_node_put(gpr_np);
+
+	return ret;
+}
+
+static int
+fec_probe(struct platform_device *pdev)
+{
+	struct fec_enet_private *fep;
+	struct fec_platform_data *pdata;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(5,5,0)
+	phy_interface_t interface;
+#endif
+	struct net_device *ndev;
+	int i, irq, ret = 0, eth_id;
+	const struct of_device_id *of_id;
+	static int dev_id;
+	struct device_node *np = pdev->dev.of_node, *phy_node;
+	int num_tx_qs;
+	int num_rx_qs;
+	char irq_name[8];
+	int irq_cnt;
+	struct fec_devinfo *dev_info;
+
+	fec_enet_get_queue_num(pdev, &num_tx_qs, &num_rx_qs);
+
+	/* Init network device */
+	ndev = alloc_etherdev_mqs(sizeof(struct fec_enet_private) +
+				  FEC_STATS_SIZE, num_tx_qs, num_rx_qs);
+	if (!ndev)
+		return -ENOMEM;
+
+	SET_NETDEV_DEV(ndev, &pdev->dev);
+
+	/* setup board info structure */
+	fep = netdev_priv(ndev);
+	fep->pdev = pdev; // warning must be done before fec_rt_init
+
+	ret = fec_rt_init(ndev);
+	if (ret)
+		goto failed_rt_init;
+
+	of_id = of_match_device(fec_dt_ids, &pdev->dev);
+	if (of_id)
+		pdev->id_entry = of_id->data;
+	dev_info = (struct fec_devinfo *)pdev->id_entry->driver_data;
+	if (dev_info)
+		fep->quirks = dev_info->quirks;
+
+	fep->netdev = ndev;
+	fep->num_rx_queues = num_rx_qs;
+	fep->num_tx_queues = num_tx_qs;
+
+#if !defined(CONFIG_M5272)
+	/* default enable pause frame auto negotiation */
+	if (fep->quirks & FEC_QUIRK_HAS_GBIT)
+		fep->pause_flag |= FEC_PAUSE_FLAG_AUTONEG;
+#endif
+
+	/* Select default pin state */
+	pinctrl_pm_select_default_state(&pdev->dev);
+
+	fep->hwp = devm_platform_ioremap_resource(pdev, 0);
+	if (IS_ERR(fep->hwp)) {
+		ret = PTR_ERR(fep->hwp);
+		goto failed_ioremap;
+	}
+
+	fep->dev_id = dev_id++;
+
+	platform_set_drvdata(pdev, ndev);
+
+	if ((of_machine_is_compatible("fsl,imx6q") ||
+	     of_machine_is_compatible("fsl,imx6dl")) &&
+	    !of_property_read_bool(np, "fsl,err006687-workaround-present"))
+		fep->quirks |= FEC_QUIRK_ERR006687;
+
+	if (of_get_property(np, "fsl,magic-packet", NULL))
+		fep->wol_flag |= FEC_WOL_HAS_MAGIC_PACKET;
+
+	ret = fec_enet_init_stop_mode(fep, np);
+	if (ret)
+		goto failed_stop_mode;
+
+	phy_node = of_parse_phandle(np, "phy-handle", 0);
+	if (!phy_node && of_phy_is_fixed_link(np)) {
+		ret = of_phy_register_fixed_link(np);
+		if (ret < 0) {
+			dev_err(&pdev->dev,
+				"broken fixed-link specification\n");
+			goto failed_phy;
+		}
+		phy_node = of_node_get(np);
+	}
+	fep->phy_node = phy_node;
+#if LINUX_VERSION_CODE > KERNEL_VERSION(5,5,0)
+	ret = of_get_phy_mode(pdev->dev.of_node, &interface);
+	if (ret) {
+#else
+	ret = of_get_phy_mode(pdev->dev.of_node);
+	if (ret < 0) {
+#endif
+		pdata = dev_get_platdata(&pdev->dev);
+		if (pdata)
+			fep->phy_interface = pdata->phy;
+		else
+			fep->phy_interface = PHY_INTERFACE_MODE_MII;
+	} else {
+#if LINUX_VERSION_CODE > KERNEL_VERSION(5,5,0)
+		fep->phy_interface = interface;
+#else
+		fep->phy_interface = ret;
+#endif
+	}
+
+	fep->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(fep->clk_ipg)) {
+		ret = PTR_ERR(fep->clk_ipg);
+		goto failed_clk;
+	}
+
+	fep->clk_ahb = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(fep->clk_ahb)) {
+		ret = PTR_ERR(fep->clk_ahb);
+		goto failed_clk;
+	}
+
+	fep->itr_clk_rate = clk_get_rate(fep->clk_ahb);
+
+	/* enet_out is optional, depends on board */
+	fep->clk_enet_out = devm_clk_get(&pdev->dev, "enet_out");
+	if (IS_ERR(fep->clk_enet_out))
+		fep->clk_enet_out = NULL;
+
+	/*
+	 * We keep the companion PTP driver enabled even when
+	 * operating the NIC in rt mode. The PHC is still available,
+	 * although not providing rt guarantees.
+	 */
+	fep->ptp_clk_on = false;
+	mutex_init(&fep->ptp_clk_mutex);
+
+	/* clk_ref is optional, depends on board */
+	fep->clk_ref = devm_clk_get(&pdev->dev, "enet_clk_ref");
+	if (IS_ERR(fep->clk_ref))
+		fep->clk_ref = NULL;
+
+	fep->bufdesc_ex = fep->quirks & FEC_QUIRK_HAS_BUFDESC_EX;
+	fep->clk_ptp = devm_clk_get(&pdev->dev, "ptp");
+	if (IS_ERR(fep->clk_ptp)) {
+		fep->clk_ptp = NULL;
+		fep->bufdesc_ex = false;
+	}
+
+	ret = fec_enet_clk_enable(ndev, true);
+	if (ret)
+		goto failed_clk;
+
+	ret = clk_prepare_enable(fep->clk_ipg);
+	if (ret)
+		goto failed_clk_ipg;
+	ret = clk_prepare_enable(fep->clk_ahb);
+	if (ret)
+		goto failed_clk_ahb;
+
+	fep->reg_phy = devm_regulator_get_optional(&pdev->dev, "phy");
+	if (!IS_ERR(fep->reg_phy)) {
+		ret = regulator_enable(fep->reg_phy);
+		if (ret) {
+			dev_err(&pdev->dev,
+				"Failed to enable phy regulator: %d\n", ret);
+			goto failed_regulator;
+		}
+	} else {
+		if (PTR_ERR(fep->reg_phy) == -EPROBE_DEFER) {
+			ret = -EPROBE_DEFER;
+			goto failed_regulator;
+		}
+		fep->reg_phy = NULL;
+	}
+
+	pm_runtime_set_autosuspend_delay(&pdev->dev, FEC_MDIO_PM_TIMEOUT);
+	pm_runtime_use_autosuspend(&pdev->dev);
+	pm_runtime_get_noresume(&pdev->dev);
+	pm_runtime_set_active(&pdev->dev);
+	pm_runtime_enable(&pdev->dev);
+
+	ret = fec_reset_phy(pdev);
+	if (ret)
+		goto failed_reset;
+
+	irq_cnt = fec_enet_get_irq_cnt(pdev);
+	if (fep->bufdesc_ex)
+		fec_ptp_init(pdev, irq_cnt);
+
+	ret = fec_enet_init(ndev);
+	if (ret)
+		goto failed_init;
+
+	for (i = 0; i < irq_cnt; i++) {
+		snprintf(irq_name, sizeof(irq_name), "int%d", i);
+		irq = platform_get_irq_byname_optional(pdev, irq_name);
+		if (irq < 0)
+			irq = platform_get_irq(pdev, i);
+		if (irq < 0) {
+			ret = irq;
+			goto failed_irq;
+		}
+		ret = rtdm_irq_request(&fep->rtnet.irq_handle[i], irq,
+					       fec_rt_interrupt, 0, ndev->name, ndev);
+		if (ret)
+			goto failed_irq;
+
+		fep->irq[i] = irq;
+		fep->irqnr++;
+	}
+
+	ret = fec_enet_mii_init(pdev);
+	if (ret)
+		goto failed_mii_init;
+
+	/* Carrier starts down, phylib will bring it up */
+	netif_carrier_off(ndev);
+	fec_enet_clk_enable(ndev, false);
+	pinctrl_pm_select_sleep_state(&pdev->dev);
+
+	eth_id = of_alias_get_id(pdev->dev.of_node, "ethernet");
+	if (eth_id >= 0)
+		sprintf(ndev->name, "rteth%d", eth_id);
+
+	ndev->max_mtu = PKT_MAXBUF_SIZE - ETH_HLEN - ETH_FCS_LEN;
+
+	ret = register_netdev(ndev);
+	if (ret)
+		goto failed_register;
+
+	device_init_wakeup(&ndev->dev, fep->wol_flag &
+			   FEC_WOL_HAS_MAGIC_PACKET);
+
+	if (fep->bufdesc_ex && fep->ptp_clock)
+		netdev_info(ndev, "registered PHC device %d\n", fep->dev_id);
+
+	fep->rx_copybreak = COPYBREAK_DEFAULT;
+	INIT_WORK(&fep->tx_timeout_work, fec_enet_timeout_work);
+
+	pm_runtime_mark_last_busy(&pdev->dev);
+	pm_runtime_put_autosuspend(&pdev->dev);
+
+	return 0;
+
+failed_register:
+	fec_enet_mii_remove(fep);
+failed_mii_init:
+failed_irq:
+failed_init:
+	fec_ptp_stop(pdev);
+failed_reset:
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+	if (fep->reg_phy)
+		regulator_disable(fep->reg_phy);
+failed_regulator:
+	clk_disable_unprepare(fep->clk_ahb);
+failed_clk_ahb:
+	clk_disable_unprepare(fep->clk_ipg);
+failed_clk_ipg:
+	fec_enet_clk_enable(ndev, false);
+failed_clk:
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+	of_node_put(phy_node);
+failed_stop_mode:
+failed_phy:
+	dev_id--;
+failed_ioremap:
+	fec_rt_destroy(ndev);
+failed_rt_init:
+	free_netdev(ndev);
+	dev_id--;
+
+	return ret;
+}
+
+static int
+fec_drv_remove(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct device_node *np = pdev->dev.of_node;
+	int ret;
+
+	ret = pm_runtime_resume_and_get(&pdev->dev);
+	if (ret < 0)
+		return ret;
+
+	cancel_work_sync(&fep->tx_timeout_work);
+	fec_ptp_stop(pdev);
+
+	fec_rt_destroy(ndev);
+	unregister_netdev(ndev);
+	fec_enet_mii_remove(fep);
+	if (fep->reg_phy)
+		regulator_disable(fep->reg_phy);
+
+	if (of_phy_is_fixed_link(np))
+		of_phy_deregister_fixed_link(np);
+	of_node_put(fep->phy_node);
+
+	clk_disable_unprepare(fep->clk_ahb);
+	clk_disable_unprepare(fep->clk_ipg);
+	pm_runtime_put_noidle(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	free_netdev(ndev);
+	return 0;
+}
+
+static int __maybe_unused fec_suspend(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+
+	rtnl_lock();
+	if (rtnetif_running(&frt->dev)) {
+		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE)
+			fep->wol_flag |= FEC_WOL_FLAG_SLEEP_ON;
+		phy_stop(ndev->phydev);
+		rtnetif_stop_queue(&frt->dev);
+		netif_device_detach(ndev);
+		rtnetif_wake_queue(&frt->dev);
+		fec_stop(ndev);
+		fec_enet_clk_enable(ndev, false);
+		if (!(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+			pinctrl_pm_select_sleep_state(&fep->pdev->dev);
+	}
+	rtnl_unlock();
+
+	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE))
+		regulator_disable(fep->reg_phy);
+
+	/* SOC supply clock to phy, when clock is disabled, phy link down
+	 * SOC control phy regulator, when regulator is disabled, phy link down
+	 */
+	if (fep->clk_enet_out || fep->reg_phy)
+		fep->link = 0;
+
+	return 0;
+}
+
+static int __maybe_unused fec_resume(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct fec_rt_data *frt = &fep->rtnet;
+	int ret;
+	int val;
+
+	if (fep->reg_phy && !(fep->wol_flag & FEC_WOL_FLAG_ENABLE)) {
+		ret = regulator_enable(fep->reg_phy);
+		if (ret)
+			return ret;
+	}
+
+	rtnl_lock();
+	if (rtnetif_running(&frt->dev)) {
+		ret = fec_enet_clk_enable(ndev, true);
+		if (ret) {
+			rtnl_unlock();
+			goto failed_clk;
+		}
+		if (fep->wol_flag & FEC_WOL_FLAG_ENABLE) {
+			fec_enet_stop_mode(fep, false);
+
+			val = readl(fep->hwp + FEC_ECNTRL);
+			val &= ~(FEC_ECR_MAGICEN | FEC_ECR_SLEEP);
+			writel(val, fep->hwp + FEC_ECNTRL);
+			fep->wol_flag &= ~FEC_WOL_FLAG_SLEEP_ON;
+		} else {
+			pinctrl_pm_select_default_state(&fep->pdev->dev);
+		}
+		fec_restart(ndev);
+		rtnetif_stop_queue(&frt->dev);
+		netif_device_attach(ndev);
+		rtnetif_wake_queue(&frt->dev);
+		phy_start(ndev->phydev);
+	}
+	rtnl_unlock();
+
+	return 0;
+
+failed_clk:
+	if (fep->reg_phy)
+		regulator_disable(fep->reg_phy);
+	return ret;
+}
+
+static int __maybe_unused fec_runtime_suspend(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	clk_disable_unprepare(fep->clk_ahb);
+	clk_disable_unprepare(fep->clk_ipg);
+
+	return 0;
+}
+
+static int __maybe_unused fec_runtime_resume(struct device *dev)
+{
+	struct net_device *ndev = dev_get_drvdata(dev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int ret;
+
+	ret = clk_prepare_enable(fep->clk_ahb);
+	if (ret)
+		return ret;
+	ret = clk_prepare_enable(fep->clk_ipg);
+	if (ret)
+		goto failed_clk_ipg;
+
+	return 0;
+
+failed_clk_ipg:
+	clk_disable_unprepare(fep->clk_ahb);
+	return ret;
+}
+
+static const struct dev_pm_ops fec_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(fec_suspend, fec_resume)
+	SET_RUNTIME_PM_OPS(fec_runtime_suspend, fec_runtime_resume, NULL)
+};
+
+static struct platform_driver fec_driver = {
+	.driver	= {
+		.name	= DRIVER_NAME,
+		.pm	= &fec_pm_ops,
+		.of_match_table = fec_dt_ids,
+	},
+	.id_table = fec_devtype,
+	.probe	= fec_probe,
+	.remove	= fec_drv_remove,
+};
+
+module_platform_driver(fec_driver);
+
+MODULE_ALIAS("platform:"DRIVER_NAME);
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c
new file mode 100644
index 0000000..d71eac7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/freescale/fec_ptp.c
@@ -0,0 +1,648 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Fast Ethernet Controller (ENET) PTP driver for MX6x.
+ *
+ * Copyright (C) 2012 Freescale Semiconductor, Inc.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/workqueue.h>
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/irq.h>
+#include <linux/clk.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/fec.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_gpio.h>
+#include <linux/of_net.h>
+
+#include "fec.h"
+
+/* FEC 1588 register bits */
+#define FEC_T_CTRL_SLAVE                0x00002000
+#define FEC_T_CTRL_CAPTURE              0x00000800
+#define FEC_T_CTRL_RESTART              0x00000200
+#define FEC_T_CTRL_PERIOD_RST           0x00000030
+#define FEC_T_CTRL_PERIOD_EN		0x00000010
+#define FEC_T_CTRL_ENABLE               0x00000001
+
+#define FEC_T_INC_MASK                  0x0000007f
+#define FEC_T_INC_OFFSET                0
+#define FEC_T_INC_CORR_MASK             0x00007f00
+#define FEC_T_INC_CORR_OFFSET           8
+
+#define FEC_T_CTRL_PINPER		0x00000080
+#define FEC_T_TF0_MASK			0x00000001
+#define FEC_T_TF0_OFFSET		0
+#define FEC_T_TF1_MASK			0x00000002
+#define FEC_T_TF1_OFFSET		1
+#define FEC_T_TF2_MASK			0x00000004
+#define FEC_T_TF2_OFFSET		2
+#define FEC_T_TF3_MASK			0x00000008
+#define FEC_T_TF3_OFFSET		3
+#define FEC_T_TDRE_MASK			0x00000001
+#define FEC_T_TDRE_OFFSET		0
+#define FEC_T_TMODE_MASK		0x0000003C
+#define FEC_T_TMODE_OFFSET		2
+#define FEC_T_TIE_MASK			0x00000040
+#define FEC_T_TIE_OFFSET		6
+#define FEC_T_TF_MASK			0x00000080
+#define FEC_T_TF_OFFSET			7
+
+#define FEC_ATIME_CTRL		0x400
+#define FEC_ATIME		0x404
+#define FEC_ATIME_EVT_OFFSET	0x408
+#define FEC_ATIME_EVT_PERIOD	0x40c
+#define FEC_ATIME_CORR		0x410
+#define FEC_ATIME_INC		0x414
+#define FEC_TS_TIMESTAMP	0x418
+
+#define FEC_TGSR		0x604
+#define FEC_TCSR(n)		(0x608 + n * 0x08)
+#define FEC_TCCR(n)		(0x60C + n * 0x08)
+#define MAX_TIMER_CHANNEL	3
+#define FEC_TMODE_TOGGLE	0x05
+#define FEC_HIGH_PULSE		0x0F
+
+#define FEC_CC_MULT	(1 << 31)
+#define FEC_COUNTER_PERIOD	(1 << 31)
+#define PPS_OUPUT_RELOAD_PERIOD	NSEC_PER_SEC
+#define FEC_CHANNLE_0		0
+#define DEFAULT_PPS_CHANNEL	FEC_CHANNLE_0
+
+/**
+ * fec_ptp_enable_pps
+ * @fep: the fec_enet_private structure handle
+ * @enable: enable the channel pps output
+ *
+ * This function enble the PPS ouput on the timer channel.
+ */
+static int fec_ptp_enable_pps(struct fec_enet_private *fep, uint enable)
+{
+	unsigned long flags;
+	u32 val, tempval;
+	struct timespec64 ts;
+	u64 ns;
+	val = 0;
+
+	if (fep->pps_enable == enable)
+		return 0;
+
+	fep->pps_channel = DEFAULT_PPS_CHANNEL;
+	fep->reload_period = PPS_OUPUT_RELOAD_PERIOD;
+
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+	if (enable) {
+		/* clear capture or output compare interrupt status if have.
+		 */
+		writel(FEC_T_TF_MASK, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+		/* It is recommended to double check the TMODE field in the
+		 * TCSR register to be cleared before the first compare counter
+		 * is written into TCCR register. Just add a double check.
+		 */
+		val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+		do {
+			val &= ~(FEC_T_TMODE_MASK);
+			writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
+			val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+		} while (val & FEC_T_TMODE_MASK);
+
+		/* Dummy read counter to update the counter */
+		timecounter_read(&fep->tc);
+		/* We want to find the first compare event in the next
+		 * second point. So we need to know what the ptp time
+		 * is now and how many nanoseconds is ahead to get next second.
+		 * The remaining nanosecond ahead before the next second would be
+		 * NSEC_PER_SEC - ts.tv_nsec. Add the remaining nanoseconds
+		 * to current timer would be next second.
+		 */
+		tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+		tempval |= FEC_T_CTRL_CAPTURE;
+		writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+		tempval = readl(fep->hwp + FEC_ATIME);
+		/* Convert the ptp local counter to 1588 timestamp */
+		ns = timecounter_cyc2time(&fep->tc, tempval);
+		ts = ns_to_timespec64(ns);
+
+		/* The tempval is  less than 3 seconds, and  so val is less than
+		 * 4 seconds. No overflow for 32bit calculation.
+		 */
+		val = NSEC_PER_SEC - (u32)ts.tv_nsec + tempval;
+
+		/* Need to consider the situation that the current time is
+		 * very close to the second point, which means NSEC_PER_SEC
+		 * - ts.tv_nsec is close to be zero(For example 20ns); Since the timer
+		 * is still running when we calculate the first compare event, it is
+		 * possible that the remaining nanoseonds run out before the compare
+		 * counter is calculated and written into TCCR register. To avoid
+		 * this possibility, we will set the compare event to be the next
+		 * of next second. The current setting is 31-bit timer and wrap
+		 * around over 2 seconds. So it is okay to set the next of next
+		 * seond for the timer.
+		 */
+		val += NSEC_PER_SEC;
+
+		/* We add (2 * NSEC_PER_SEC - (u32)ts.tv_nsec) to current
+		 * ptp counter, which maybe cause 32-bit wrap. Since the
+		 * (NSEC_PER_SEC - (u32)ts.tv_nsec) is less than 2 second.
+		 * We can ensure the wrap will not cause issue. If the offset
+		 * is bigger than fep->cc.mask would be a error.
+		 */
+		val &= fep->cc.mask;
+		writel(val, fep->hwp + FEC_TCCR(fep->pps_channel));
+
+		/* Calculate the second the compare event timestamp */
+		fep->next_counter = (val + fep->reload_period) & fep->cc.mask;
+
+		/* * Enable compare event when overflow */
+		val = readl(fep->hwp + FEC_ATIME_CTRL);
+		val |= FEC_T_CTRL_PINPER;
+		writel(val, fep->hwp + FEC_ATIME_CTRL);
+
+		/* Compare channel setting. */
+		val = readl(fep->hwp + FEC_TCSR(fep->pps_channel));
+		val |= (1 << FEC_T_TF_OFFSET | 1 << FEC_T_TIE_OFFSET);
+		val &= ~(1 << FEC_T_TDRE_OFFSET);
+		val &= ~(FEC_T_TMODE_MASK);
+		val |= (FEC_HIGH_PULSE << FEC_T_TMODE_OFFSET);
+		writel(val, fep->hwp + FEC_TCSR(fep->pps_channel));
+
+		/* Write the second compare event timestamp and calculate
+		 * the third timestamp. Refer the TCCR register detail in the spec.
+		 */
+		writel(fep->next_counter, fep->hwp + FEC_TCCR(fep->pps_channel));
+		fep->next_counter = (fep->next_counter + fep->reload_period) & fep->cc.mask;
+	} else {
+		writel(0, fep->hwp + FEC_TCSR(fep->pps_channel));
+	}
+
+	fep->pps_enable = enable;
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+	return 0;
+}
+
+/**
+ * fec_ptp_read - read raw cycle counter (to be used by time counter)
+ * @cc: the cyclecounter structure
+ *
+ * this function reads the cyclecounter registers and is called by the
+ * cyclecounter structure used to construct a ns counter from the
+ * arbitrary fixed point registers
+ */
+static u64 fec_ptp_read(const struct cyclecounter *cc)
+{
+	struct fec_enet_private *fep =
+		container_of(cc, struct fec_enet_private, cc);
+	u32 tempval;
+
+	tempval = readl(fep->hwp + FEC_ATIME_CTRL);
+	tempval |= FEC_T_CTRL_CAPTURE;
+	writel(tempval, fep->hwp + FEC_ATIME_CTRL);
+
+	if (fep->quirks & FEC_QUIRK_BUG_CAPTURE)
+		udelay(1);
+
+	return readl(fep->hwp + FEC_ATIME);
+}
+
+/**
+ * fec_ptp_start_cyclecounter - create the cycle counter from hw
+ * @ndev: network device
+ *
+ * this function initializes the timecounter and cyclecounter
+ * structures for use in generated a ns counter from the arbitrary
+ * fixed point cycles registers in the hardware.
+ */
+void fec_ptp_start_cyclecounter(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	unsigned long flags;
+	int inc;
+
+	inc = 1000000000 / fep->cycle_speed;
+
+	/* grab the ptp lock */
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+	/* 1ns counter */
+	writel(inc << FEC_T_INC_OFFSET, fep->hwp + FEC_ATIME_INC);
+
+	/* use 31-bit timer counter */
+	writel(FEC_COUNTER_PERIOD, fep->hwp + FEC_ATIME_EVT_PERIOD);
+
+	writel(FEC_T_CTRL_ENABLE | FEC_T_CTRL_PERIOD_RST,
+		fep->hwp + FEC_ATIME_CTRL);
+
+	memset(&fep->cc, 0, sizeof(fep->cc));
+	fep->cc.read = fec_ptp_read;
+	fep->cc.mask = CLOCKSOURCE_MASK(31);
+	fep->cc.shift = 31;
+	fep->cc.mult = FEC_CC_MULT;
+
+	/* reset the ns time counter */
+	timecounter_init(&fep->tc, &fep->cc, 0);
+
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+}
+
+/**
+ * fec_ptp_adjfreq - adjust ptp cycle frequency
+ * @ptp: the ptp clock structure
+ * @ppb: parts per billion adjustment from base
+ *
+ * Adjust the frequency of the ptp cycle counter by the
+ * indicated ppb from the base frequency.
+ *
+ * Because ENET hardware frequency adjust is complex,
+ * using software method to do that.
+ */
+static int fec_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
+{
+	unsigned long flags;
+	int neg_adj = 0;
+	u32 i, tmp;
+	u32 corr_inc, corr_period;
+	u32 corr_ns;
+	u64 lhs, rhs;
+
+	struct fec_enet_private *fep =
+	    container_of(ptp, struct fec_enet_private, ptp_caps);
+
+	if (ppb == 0)
+		return 0;
+
+	if (ppb < 0) {
+		ppb = -ppb;
+		neg_adj = 1;
+	}
+
+	/* In theory, corr_inc/corr_period = ppb/NSEC_PER_SEC;
+	 * Try to find the corr_inc  between 1 to fep->ptp_inc to
+	 * meet adjustment requirement.
+	 */
+	lhs = NSEC_PER_SEC;
+	rhs = (u64)ppb * (u64)fep->ptp_inc;
+	for (i = 1; i <= fep->ptp_inc; i++) {
+		if (lhs >= rhs) {
+			corr_inc = i;
+			corr_period = div_u64(lhs, rhs);
+			break;
+		}
+		lhs += NSEC_PER_SEC;
+	}
+	/* Not found? Set it to high value - double speed
+	 * correct in every clock step.
+	 */
+	if (i > fep->ptp_inc) {
+		corr_inc = fep->ptp_inc;
+		corr_period = 1;
+	}
+
+	if (neg_adj)
+		corr_ns = fep->ptp_inc - corr_inc;
+	else
+		corr_ns = fep->ptp_inc + corr_inc;
+
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+
+	tmp = readl(fep->hwp + FEC_ATIME_INC) & FEC_T_INC_MASK;
+	tmp |= corr_ns << FEC_T_INC_CORR_OFFSET;
+	writel(tmp, fep->hwp + FEC_ATIME_INC);
+	corr_period = corr_period > 1 ? corr_period - 1 : corr_period;
+	writel(corr_period, fep->hwp + FEC_ATIME_CORR);
+	/* dummy read to update the timer. */
+	timecounter_read(&fep->tc);
+
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+	return 0;
+}
+
+/**
+ * fec_ptp_adjtime
+ * @ptp: the ptp clock structure
+ * @delta: offset to adjust the cycle counter by
+ *
+ * adjust the timer by resetting the timecounter structure.
+ */
+static int fec_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
+{
+	struct fec_enet_private *fep =
+	    container_of(ptp, struct fec_enet_private, ptp_caps);
+	unsigned long flags;
+
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	timecounter_adjtime(&fep->tc, delta);
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+
+	return 0;
+}
+
+/**
+ * fec_ptp_gettime
+ * @ptp: the ptp clock structure
+ * @ts: timespec structure to hold the current time value
+ *
+ * read the timecounter and return the correct value on ns,
+ * after converting it into a struct timespec.
+ */
+static int fec_ptp_gettime(struct ptp_clock_info *ptp, struct timespec64 *ts)
+{
+	struct fec_enet_private *adapter =
+	    container_of(ptp, struct fec_enet_private, ptp_caps);
+	u64 ns;
+	unsigned long flags;
+
+	mutex_lock(&adapter->ptp_clk_mutex);
+	/* Check the ptp clock */
+	if (!adapter->ptp_clk_on) {
+		mutex_unlock(&adapter->ptp_clk_mutex);
+		return -EINVAL;
+	}
+	spin_lock_irqsave(&adapter->tmreg_lock, flags);
+	ns = timecounter_read(&adapter->tc);
+	spin_unlock_irqrestore(&adapter->tmreg_lock, flags);
+	mutex_unlock(&adapter->ptp_clk_mutex);
+
+	*ts = ns_to_timespec64(ns);
+
+	return 0;
+}
+
+/**
+ * fec_ptp_settime
+ * @ptp: the ptp clock structure
+ * @ts: the timespec containing the new time for the cycle counter
+ *
+ * reset the timecounter to use a new base value instead of the kernel
+ * wall timer value.
+ */
+static int fec_ptp_settime(struct ptp_clock_info *ptp,
+			   const struct timespec64 *ts)
+{
+	struct fec_enet_private *fep =
+	    container_of(ptp, struct fec_enet_private, ptp_caps);
+
+	u64 ns;
+	unsigned long flags;
+	u32 counter;
+
+	mutex_lock(&fep->ptp_clk_mutex);
+	/* Check the ptp clock */
+	if (!fep->ptp_clk_on) {
+		mutex_unlock(&fep->ptp_clk_mutex);
+		return -EINVAL;
+	}
+
+	ns = timespec64_to_ns(ts);
+	/* Get the timer value based on timestamp.
+	 * Update the counter with the masked value.
+	 */
+	counter = ns & fep->cc.mask;
+
+	spin_lock_irqsave(&fep->tmreg_lock, flags);
+	writel(counter, fep->hwp + FEC_ATIME);
+	timecounter_init(&fep->tc, &fep->cc, ns);
+	spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+	mutex_unlock(&fep->ptp_clk_mutex);
+	return 0;
+}
+
+/**
+ * fec_ptp_enable
+ * @ptp: the ptp clock structure
+ * @rq: the requested feature to change
+ * @on: whether to enable or disable the feature
+ *
+ */
+static int fec_ptp_enable(struct ptp_clock_info *ptp,
+			  struct ptp_clock_request *rq, int on)
+{
+	struct fec_enet_private *fep =
+	    container_of(ptp, struct fec_enet_private, ptp_caps);
+	int ret = 0;
+
+	if (rq->type == PTP_CLK_REQ_PPS) {
+		ret = fec_ptp_enable_pps(fep, on);
+
+		return ret;
+	}
+	return -EOPNOTSUPP;
+}
+
+/**
+ * fec_ptp_disable_hwts - disable hardware time stamping
+ * @ndev: pointer to net_device
+ */
+void fec_ptp_disable_hwts(struct net_device *ndev)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	fep->hwts_tx_en = 0;
+	fep->hwts_rx_en = 0;
+}
+
+int fec_ptp_set(struct net_device *ndev, struct ifreq *ifr)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	struct hwtstamp_config config;
+
+	if (copy_from_user(&config, ifr->ifr_data, sizeof(config)))
+		return -EFAULT;
+
+	/* reserved for future extensions */
+	if (config.flags)
+		return -EINVAL;
+
+	switch (config.tx_type) {
+	case HWTSTAMP_TX_OFF:
+		fep->hwts_tx_en = 0;
+		break;
+	case HWTSTAMP_TX_ON:
+		fep->hwts_tx_en = 1;
+		break;
+	default:
+		return -ERANGE;
+	}
+
+	switch (config.rx_filter) {
+	case HWTSTAMP_FILTER_NONE:
+		fep->hwts_rx_en = 0;
+		break;
+
+	default:
+		fep->hwts_rx_en = 1;
+		config.rx_filter = HWTSTAMP_FILTER_ALL;
+		break;
+	}
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+	    -EFAULT : 0;
+}
+
+int fec_ptp_get(struct net_device *ndev, struct ifreq *ifr)
+{
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	struct hwtstamp_config config;
+
+	config.flags = 0;
+	config.tx_type = fep->hwts_tx_en ? HWTSTAMP_TX_ON : HWTSTAMP_TX_OFF;
+	config.rx_filter = (fep->hwts_rx_en ?
+			    HWTSTAMP_FILTER_ALL : HWTSTAMP_FILTER_NONE);
+
+	return copy_to_user(ifr->ifr_data, &config, sizeof(config)) ?
+		-EFAULT : 0;
+}
+
+/*
+ * fec_time_keep - call timecounter_read every second to avoid timer overrun
+ *                 because ENET just support 32bit counter, will timeout in 4s
+ */
+static void fec_time_keep(struct work_struct *work)
+{
+	struct delayed_work *dwork = to_delayed_work(work);
+	struct fec_enet_private *fep = container_of(dwork, struct fec_enet_private, time_keep);
+	unsigned long flags;
+
+	mutex_lock(&fep->ptp_clk_mutex);
+	if (fep->ptp_clk_on) {
+		spin_lock_irqsave(&fep->tmreg_lock, flags);
+		timecounter_read(&fep->tc);
+		spin_unlock_irqrestore(&fep->tmreg_lock, flags);
+	}
+	mutex_unlock(&fep->ptp_clk_mutex);
+
+	schedule_delayed_work(&fep->time_keep, HZ);
+}
+
+/* This function checks the pps event and reloads the timer compare counter. */
+static irqreturn_t fec_pps_interrupt(int irq, void *dev_id)
+{
+	struct net_device *ndev = dev_id;
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	u32 val;
+	u8 channel = fep->pps_channel;
+	struct ptp_clock_event event;
+
+	val = readl(fep->hwp + FEC_TCSR(channel));
+	if (val & FEC_T_TF_MASK) {
+		/* Write the next next compare(not the next according the spec)
+		 * value to the register
+		 */
+		writel(fep->next_counter, fep->hwp + FEC_TCCR(channel));
+		do {
+			writel(val, fep->hwp + FEC_TCSR(channel));
+		} while (readl(fep->hwp + FEC_TCSR(channel)) & FEC_T_TF_MASK);
+
+		/* Update the counter; */
+		fep->next_counter = (fep->next_counter + fep->reload_period) &
+				fep->cc.mask;
+
+		event.type = PTP_CLOCK_PPS;
+		ptp_clock_event(fep->ptp_clock, &event);
+		return IRQ_HANDLED;
+	}
+
+	return IRQ_NONE;
+}
+
+/**
+ * fec_ptp_init
+ * @pdev: The FEC network adapter
+ * @irq_idx: the interrupt index
+ *
+ * This function performs the required steps for enabling ptp
+ * support. If ptp support has already been loaded it simply calls the
+ * cyclecounter init routine and exits.
+ */
+
+void fec_ptp_init(struct platform_device *pdev, int irq_idx)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+	int irq;
+	int ret;
+
+	fep->ptp_caps.owner = THIS_MODULE;
+	strlcpy(fep->ptp_caps.name, "fec ptp", sizeof(fep->ptp_caps.name));
+
+	fep->ptp_caps.max_adj = 250000000;
+	fep->ptp_caps.n_alarm = 0;
+	fep->ptp_caps.n_ext_ts = 0;
+	fep->ptp_caps.n_per_out = 0;
+	fep->ptp_caps.n_pins = 0;
+	fep->ptp_caps.pps = 1;
+	fep->ptp_caps.adjfreq = fec_ptp_adjfreq;
+	fep->ptp_caps.adjtime = fec_ptp_adjtime;
+	fep->ptp_caps.gettime64 = fec_ptp_gettime;
+	fep->ptp_caps.settime64 = fec_ptp_settime;
+	fep->ptp_caps.enable = fec_ptp_enable;
+
+	fep->cycle_speed = clk_get_rate(fep->clk_ptp);
+	if (!fep->cycle_speed) {
+		fep->cycle_speed = NSEC_PER_SEC;
+		dev_err(&fep->pdev->dev, "clk_ptp clock rate is zero\n");
+	}
+	fep->ptp_inc = NSEC_PER_SEC / fep->cycle_speed;
+
+	spin_lock_init(&fep->tmreg_lock);
+
+	fec_ptp_start_cyclecounter(ndev);
+
+	INIT_DELAYED_WORK(&fep->time_keep, fec_time_keep);
+
+	irq = platform_get_irq_byname_optional(pdev, "pps");
+	if (irq < 0)
+		irq = platform_get_irq_optional(pdev, irq_idx);
+	/* Failure to get an irq is not fatal,
+	 * only the PTP_CLOCK_PPS clock events should stop
+	 */
+	if (irq >= 0) {
+		ret = devm_request_irq(&pdev->dev, irq, fec_pps_interrupt,
+				       0, pdev->name, ndev);
+		if (ret < 0)
+			dev_warn(&pdev->dev, "request for pps irq failed(%d)\n",
+				 ret);
+	}
+
+	fep->ptp_clock = ptp_clock_register(&fep->ptp_caps, &pdev->dev);
+	if (IS_ERR(fep->ptp_clock)) {
+		fep->ptp_clock = NULL;
+		dev_err(&pdev->dev, "ptp_clock_register failed\n");
+	}
+
+	schedule_delayed_work(&fep->time_keep, HZ);
+}
+
+void fec_ptp_stop(struct platform_device *pdev)
+{
+	struct net_device *ndev = platform_get_drvdata(pdev);
+	struct fec_enet_private *fep = netdev_priv(ndev);
+
+	cancel_delayed_work_sync(&fep->time_keep);
+	if (fep->ptp_clock)
+		ptp_clock_unregister(fep->ptp_clock);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile
new file mode 100644
index 0000000..49cdf50
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/Makefile
@@ -0,0 +1,13 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_IGB) += rt_igb.o
+
+rt_igb-y :=  					\
+	e1000_82575.o				\
+	e1000_i210.o				\
+	e1000_mac.o				\
+	e1000_mbx.o				\
+	e1000_nvm.o				\
+	e1000_phy.o				\
+	igb_hwmon.o				\
+	igb_main.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c
new file mode 100644
index 0000000..fff9e85
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.c
@@ -0,0 +1,2891 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2015 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* e1000_82575
+ * e1000_82576
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+#include <linux/i2c.h>
+
+#include "e1000_mac.h"
+#include "e1000_82575.h"
+#include "e1000_i210.h"
+
+static s32  igb_get_invariants_82575(struct e1000_hw *);
+static s32  igb_acquire_phy_82575(struct e1000_hw *);
+static void igb_release_phy_82575(struct e1000_hw *);
+static s32  igb_acquire_nvm_82575(struct e1000_hw *);
+static void igb_release_nvm_82575(struct e1000_hw *);
+static s32  igb_check_for_link_82575(struct e1000_hw *);
+static s32  igb_get_cfg_done_82575(struct e1000_hw *);
+static s32  igb_init_hw_82575(struct e1000_hw *);
+static s32  igb_phy_hw_reset_sgmii_82575(struct e1000_hw *);
+static s32  igb_read_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16 *);
+static s32  igb_read_phy_reg_82580(struct e1000_hw *, u32, u16 *);
+static s32  igb_write_phy_reg_82580(struct e1000_hw *, u32, u16);
+static s32  igb_reset_hw_82575(struct e1000_hw *);
+static s32  igb_reset_hw_82580(struct e1000_hw *);
+static s32  igb_set_d0_lplu_state_82575(struct e1000_hw *, bool);
+static s32  igb_set_d0_lplu_state_82580(struct e1000_hw *, bool);
+static s32  igb_set_d3_lplu_state_82580(struct e1000_hw *, bool);
+static s32  igb_setup_copper_link_82575(struct e1000_hw *);
+static s32  igb_setup_serdes_link_82575(struct e1000_hw *);
+static s32  igb_write_phy_reg_sgmii_82575(struct e1000_hw *, u32, u16);
+static void igb_clear_hw_cntrs_82575(struct e1000_hw *);
+static s32  igb_acquire_swfw_sync_82575(struct e1000_hw *, u16);
+static s32  igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *, u16 *,
+						 u16 *);
+static s32  igb_get_phy_id_82575(struct e1000_hw *);
+static void igb_release_swfw_sync_82575(struct e1000_hw *, u16);
+static bool igb_sgmii_active_82575(struct e1000_hw *);
+static s32  igb_reset_init_script_82575(struct e1000_hw *);
+static s32  igb_read_mac_addr_82575(struct e1000_hw *);
+static s32  igb_set_pcie_completion_timeout(struct e1000_hw *hw);
+static s32  igb_reset_mdicnfg_82580(struct e1000_hw *hw);
+static s32  igb_validate_nvm_checksum_82580(struct e1000_hw *hw);
+static s32  igb_update_nvm_checksum_82580(struct e1000_hw *hw);
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw);
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw);
+static const u16 e1000_82580_rxpbs_table[] = {
+	36, 72, 144, 1, 2, 4, 8, 16, 35, 70, 140 };
+
+/**
+ *  igb_sgmii_uses_mdio_82575 - Determine if I2C pins are for external MDIO
+ *  @hw: pointer to the HW structure
+ *
+ *  Called to determine if the I2C pins are being used for I2C or as an
+ *  external MDIO interface since the two options are mutually exclusive.
+ **/
+static bool igb_sgmii_uses_mdio_82575(struct e1000_hw *hw)
+{
+	u32 reg = 0;
+	bool ext_mdio = false;
+
+	switch (hw->mac.type) {
+	case e1000_82575:
+	case e1000_82576:
+		reg = rd32(E1000_MDIC);
+		ext_mdio = !!(reg & E1000_MDIC_DEST);
+		break;
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	case e1000_i211:
+		reg = rd32(E1000_MDICNFG);
+		ext_mdio = !!(reg & E1000_MDICNFG_EXT_MDIO);
+		break;
+	default:
+		break;
+	}
+	return ext_mdio;
+}
+
+/**
+ *  igb_check_for_link_media_swap - Check which M88E1112 interface linked
+ *  @hw: pointer to the HW structure
+ *
+ *  Poll the M88E1112 interfaces to see which interface achieved link.
+ */
+static s32 igb_check_for_link_media_swap(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	u8 port = 0;
+
+	/* Check the copper medium. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	if (data & E1000_M88E1112_STATUS_LINK)
+		port = E1000_MEDIA_PORT_COPPER;
+
+	/* Check the other medium. */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 1);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = phy->ops.read_reg(hw, E1000_M88E1112_STATUS, &data);
+	if (ret_val)
+		return ret_val;
+
+	/* reset page to 0 */
+	ret_val = phy->ops.write_reg(hw, E1000_M88E1112_PAGE_ADDR, 0);
+	if (ret_val)
+		return ret_val;
+
+	if (data & E1000_M88E1112_STATUS_LINK)
+		port = E1000_MEDIA_PORT_OTHER;
+
+	/* Determine if a swap needs to happen. */
+	if (port && (hw->dev_spec._82575.media_port != port)) {
+		hw->dev_spec._82575.media_port = port;
+		hw->dev_spec._82575.media_changed = true;
+	} else {
+		ret_val = igb_check_for_link_82575(hw);
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_init_phy_params_82575 - Init PHY func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_init_phy_params_82575(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u32 ctrl_ext;
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		phy->type = e1000_phy_none;
+		goto out;
+	}
+
+	phy->autoneg_mask	= AUTONEG_ADVERTISE_SPEED_DEFAULT;
+	phy->reset_delay_us	= 100;
+
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+
+	if (igb_sgmii_active_82575(hw)) {
+		phy->ops.reset = igb_phy_hw_reset_sgmii_82575;
+		ctrl_ext |= E1000_CTRL_I2C_ENA;
+	} else {
+		phy->ops.reset = igb_phy_hw_reset;
+		ctrl_ext &= ~E1000_CTRL_I2C_ENA;
+	}
+
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+	igb_reset_mdicnfg_82580(hw);
+
+	if (igb_sgmii_active_82575(hw) && !igb_sgmii_uses_mdio_82575(hw)) {
+		phy->ops.read_reg = igb_read_phy_reg_sgmii_82575;
+		phy->ops.write_reg = igb_write_phy_reg_sgmii_82575;
+	} else {
+		switch (hw->mac.type) {
+		case e1000_82580:
+		case e1000_i350:
+		case e1000_i354:
+			phy->ops.read_reg = igb_read_phy_reg_82580;
+			phy->ops.write_reg = igb_write_phy_reg_82580;
+			break;
+		case e1000_i210:
+		case e1000_i211:
+			phy->ops.read_reg = igb_read_phy_reg_gs40g;
+			phy->ops.write_reg = igb_write_phy_reg_gs40g;
+			break;
+		default:
+			phy->ops.read_reg = igb_read_phy_reg_igp;
+			phy->ops.write_reg = igb_write_phy_reg_igp;
+		}
+	}
+
+	/* set lan id */
+	hw->bus.func = (rd32(E1000_STATUS) & E1000_STATUS_FUNC_MASK) >>
+			E1000_STATUS_FUNC_SHIFT;
+
+	/* Set phy->phy_addr and phy->id. */
+	ret_val = igb_get_phy_id_82575(hw);
+	if (ret_val)
+		return ret_val;
+
+	/* Verify phy id and set remaining function pointers */
+	switch (phy->id) {
+	case M88E1543_E_PHY_ID:
+	case I347AT4_E_PHY_ID:
+	case M88E1112_E_PHY_ID:
+	case M88E1111_I_PHY_ID:
+		phy->type		= e1000_phy_m88;
+		phy->ops.check_polarity	= igb_check_polarity_m88;
+		phy->ops.get_phy_info	= igb_get_phy_info_m88;
+		if (phy->id != M88E1111_I_PHY_ID)
+			phy->ops.get_cable_length =
+					 igb_get_cable_length_m88_gen2;
+		else
+			phy->ops.get_cable_length = igb_get_cable_length_m88;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+		/* Check if this PHY is confgured for media swap. */
+		if (phy->id == M88E1112_E_PHY_ID) {
+			u16 data;
+
+			ret_val = phy->ops.write_reg(hw,
+						     E1000_M88E1112_PAGE_ADDR,
+						     2);
+			if (ret_val)
+				goto out;
+
+			ret_val = phy->ops.read_reg(hw,
+						    E1000_M88E1112_MAC_CTRL_1,
+						    &data);
+			if (ret_val)
+				goto out;
+
+			data = (data & E1000_M88E1112_MAC_CTRL_1_MODE_MASK) >>
+			       E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT;
+			if (data == E1000_M88E1112_AUTO_COPPER_SGMII ||
+			    data == E1000_M88E1112_AUTO_COPPER_BASEX)
+				hw->mac.ops.check_for_link =
+						igb_check_for_link_media_swap;
+		}
+		break;
+	case IGP03E1000_E_PHY_ID:
+		phy->type = e1000_phy_igp_3;
+		phy->ops.get_phy_info = igb_get_phy_info_igp;
+		phy->ops.get_cable_length = igb_get_cable_length_igp_2;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_igp;
+		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82575;
+		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state;
+		break;
+	case I82580_I_PHY_ID:
+	case I350_I_PHY_ID:
+		phy->type = e1000_phy_82580;
+		phy->ops.force_speed_duplex =
+					 igb_phy_force_speed_duplex_82580;
+		phy->ops.get_cable_length = igb_get_cable_length_82580;
+		phy->ops.get_phy_info = igb_get_phy_info_82580;
+		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+		break;
+	case I210_I_PHY_ID:
+		phy->type		= e1000_phy_i210;
+		phy->ops.check_polarity	= igb_check_polarity_m88;
+		phy->ops.get_phy_info	= igb_get_phy_info_m88;
+		phy->ops.get_cable_length = igb_get_cable_length_m88_gen2;
+		phy->ops.set_d0_lplu_state = igb_set_d0_lplu_state_82580;
+		phy->ops.set_d3_lplu_state = igb_set_d3_lplu_state_82580;
+		phy->ops.force_speed_duplex = igb_phy_force_speed_duplex_m88;
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_init_nvm_params_82575 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_init_nvm_params_82575(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = rd32(E1000_EECD);
+	u16 size;
+
+	size = (u16)((eecd & E1000_EECD_SIZE_EX_MASK) >>
+		     E1000_EECD_SIZE_EX_SHIFT);
+
+	/* Added to a constant, "size" becomes the left-shift value
+	 * for setting word_size.
+	 */
+	size += NVM_WORD_SIZE_BASE_SHIFT;
+
+	/* Just in case size is out of range, cap it to the largest
+	 * EEPROM size supported
+	 */
+	if (size > 15)
+		size = 15;
+
+	nvm->word_size = 1 << size;
+	nvm->opcode_bits = 8;
+	nvm->delay_usec = 1;
+
+	switch (nvm->override) {
+	case e1000_nvm_override_spi_large:
+		nvm->page_size = 32;
+		nvm->address_bits = 16;
+		break;
+	case e1000_nvm_override_spi_small:
+		nvm->page_size = 8;
+		nvm->address_bits = 8;
+		break;
+	default:
+		nvm->page_size = eecd & E1000_EECD_ADDR_BITS ? 32 : 8;
+		nvm->address_bits = eecd & E1000_EECD_ADDR_BITS ?
+				    16 : 8;
+		break;
+	}
+	if (nvm->word_size == (1 << 15))
+		nvm->page_size = 128;
+
+	nvm->type = e1000_nvm_eeprom_spi;
+
+	/* NVM Function Pointers */
+	nvm->ops.acquire = igb_acquire_nvm_82575;
+	nvm->ops.release = igb_release_nvm_82575;
+	nvm->ops.write = igb_write_nvm_spi;
+	nvm->ops.validate = igb_validate_nvm_checksum;
+	nvm->ops.update = igb_update_nvm_checksum;
+	if (nvm->word_size < (1 << 15))
+		nvm->ops.read = igb_read_nvm_eerd;
+	else
+		nvm->ops.read = igb_read_nvm_spi;
+
+	/* override generic family function pointers for specific descendants */
+	switch (hw->mac.type) {
+	case e1000_82580:
+		nvm->ops.validate = igb_validate_nvm_checksum_82580;
+		nvm->ops.update = igb_update_nvm_checksum_82580;
+		break;
+	case e1000_i354:
+	case e1000_i350:
+		nvm->ops.validate = igb_validate_nvm_checksum_i350;
+		nvm->ops.update = igb_update_nvm_checksum_i350;
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_init_mac_params_82575 - Init MAC func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_init_mac_params_82575(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+
+	/* Set mta register count */
+	mac->mta_reg_count = 128;
+	/* Set rar entry count */
+	switch (mac->type) {
+	case e1000_82576:
+		mac->rar_entry_count = E1000_RAR_ENTRIES_82576;
+		break;
+	case e1000_82580:
+		mac->rar_entry_count = E1000_RAR_ENTRIES_82580;
+		break;
+	case e1000_i350:
+	case e1000_i354:
+		mac->rar_entry_count = E1000_RAR_ENTRIES_I350;
+		break;
+	default:
+		mac->rar_entry_count = E1000_RAR_ENTRIES_82575;
+		break;
+	}
+	/* reset */
+	if (mac->type >= e1000_82580)
+		mac->ops.reset_hw = igb_reset_hw_82580;
+	else
+		mac->ops.reset_hw = igb_reset_hw_82575;
+
+	if (mac->type >= e1000_i210) {
+		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_i210;
+		mac->ops.release_swfw_sync = igb_release_swfw_sync_i210;
+
+	} else {
+		mac->ops.acquire_swfw_sync = igb_acquire_swfw_sync_82575;
+		mac->ops.release_swfw_sync = igb_release_swfw_sync_82575;
+	}
+
+	/* Set if part includes ASF firmware */
+	mac->asf_firmware_present = true;
+	/* Set if manageability features are enabled. */
+	mac->arc_subsystem_valid =
+		(rd32(E1000_FWSM) & E1000_FWSM_MODE_MASK)
+			? true : false;
+	/* enable EEE on i350 parts and later parts */
+	if (mac->type >= e1000_i350)
+		dev_spec->eee_disable = false;
+	else
+		dev_spec->eee_disable = true;
+	/* Allow a single clear of the SW semaphore on I210 and newer */
+	if (mac->type >= e1000_i210)
+		dev_spec->clear_semaphore_once = true;
+	/* physical interface link setup */
+	mac->ops.setup_physical_interface =
+		(hw->phy.media_type == e1000_media_type_copper)
+			? igb_setup_copper_link_82575
+			: igb_setup_serdes_link_82575;
+
+	if (mac->type == e1000_82580) {
+		switch (hw->device_id) {
+		/* feature not supported on these id's */
+		case E1000_DEV_ID_DH89XXCC_SGMII:
+		case E1000_DEV_ID_DH89XXCC_SERDES:
+		case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+		case E1000_DEV_ID_DH89XXCC_SFP:
+			break;
+		default:
+			hw->dev_spec._82575.mas_capable = true;
+			break;
+		}
+	}
+	return 0;
+}
+
+/**
+ *  igb_set_sfp_media_type_82575 - derives SFP module media type.
+ *  @hw: pointer to the HW structure
+ *
+ *  The media type is chosen based on SFP module.
+ *  compatibility flags retrieved from SFP ID EEPROM.
+ **/
+static s32 igb_set_sfp_media_type_82575(struct e1000_hw *hw)
+{
+	s32 ret_val = E1000_ERR_CONFIG;
+	u32 ctrl_ext = 0;
+	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+	struct e1000_sfp_flags *eth_flags = &dev_spec->eth_flags;
+	u8 tranceiver_type = 0;
+	s32 timeout = 3;
+
+	/* Turn I2C interface ON and power on sfp cage */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+	wr32(E1000_CTRL_EXT, ctrl_ext | E1000_CTRL_I2C_ENA);
+
+	wrfl();
+
+	/* Read SFP module data */
+	while (timeout) {
+		ret_val = igb_read_sfp_data_byte(hw,
+			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_IDENTIFIER_OFFSET),
+			&tranceiver_type);
+		if (ret_val == 0)
+			break;
+		msleep(100);
+		timeout--;
+	}
+	if (ret_val != 0)
+		goto out;
+
+	ret_val = igb_read_sfp_data_byte(hw,
+			E1000_I2CCMD_SFP_DATA_ADDR(E1000_SFF_ETH_FLAGS_OFFSET),
+			(u8 *)eth_flags);
+	if (ret_val != 0)
+		goto out;
+
+	/* Check if there is some SFP module plugged and powered */
+	if ((tranceiver_type == E1000_SFF_IDENTIFIER_SFP) ||
+	    (tranceiver_type == E1000_SFF_IDENTIFIER_SFF)) {
+		dev_spec->module_plugged = true;
+		if (eth_flags->e1000_base_lx || eth_flags->e1000_base_sx) {
+			hw->phy.media_type = e1000_media_type_internal_serdes;
+		} else if (eth_flags->e100_base_fx) {
+			dev_spec->sgmii_active = true;
+			hw->phy.media_type = e1000_media_type_internal_serdes;
+		} else if (eth_flags->e1000_base_t) {
+			dev_spec->sgmii_active = true;
+			hw->phy.media_type = e1000_media_type_copper;
+		} else {
+			hw->phy.media_type = e1000_media_type_unknown;
+			hw_dbg("PHY module has not been recognized\n");
+			goto out;
+		}
+	} else {
+		hw->phy.media_type = e1000_media_type_unknown;
+	}
+	ret_val = 0;
+out:
+	/* Restore I2C interface setting */
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+	return ret_val;
+}
+
+static s32 igb_get_invariants_82575(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+	s32 ret_val;
+	u32 ctrl_ext = 0;
+	u32 link_mode = 0;
+
+	switch (hw->device_id) {
+	case E1000_DEV_ID_82575EB_COPPER:
+	case E1000_DEV_ID_82575EB_FIBER_SERDES:
+	case E1000_DEV_ID_82575GB_QUAD_COPPER:
+		mac->type = e1000_82575;
+		break;
+	case E1000_DEV_ID_82576:
+	case E1000_DEV_ID_82576_NS:
+	case E1000_DEV_ID_82576_NS_SERDES:
+	case E1000_DEV_ID_82576_FIBER:
+	case E1000_DEV_ID_82576_SERDES:
+	case E1000_DEV_ID_82576_QUAD_COPPER:
+	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+	case E1000_DEV_ID_82576_SERDES_QUAD:
+		mac->type = e1000_82576;
+		break;
+	case E1000_DEV_ID_82580_COPPER:
+	case E1000_DEV_ID_82580_FIBER:
+	case E1000_DEV_ID_82580_QUAD_FIBER:
+	case E1000_DEV_ID_82580_SERDES:
+	case E1000_DEV_ID_82580_SGMII:
+	case E1000_DEV_ID_82580_COPPER_DUAL:
+	case E1000_DEV_ID_DH89XXCC_SGMII:
+	case E1000_DEV_ID_DH89XXCC_SERDES:
+	case E1000_DEV_ID_DH89XXCC_BACKPLANE:
+	case E1000_DEV_ID_DH89XXCC_SFP:
+		mac->type = e1000_82580;
+		break;
+	case E1000_DEV_ID_I350_COPPER:
+	case E1000_DEV_ID_I350_FIBER:
+	case E1000_DEV_ID_I350_SERDES:
+	case E1000_DEV_ID_I350_SGMII:
+		mac->type = e1000_i350;
+		break;
+	case E1000_DEV_ID_I210_COPPER:
+	case E1000_DEV_ID_I210_FIBER:
+	case E1000_DEV_ID_I210_SERDES:
+	case E1000_DEV_ID_I210_SGMII:
+	case E1000_DEV_ID_I210_COPPER_FLASHLESS:
+	case E1000_DEV_ID_I210_SERDES_FLASHLESS:
+		mac->type = e1000_i210;
+		break;
+	case E1000_DEV_ID_I211_COPPER:
+		mac->type = e1000_i211;
+		break;
+	case E1000_DEV_ID_I354_BACKPLANE_1GBPS:
+	case E1000_DEV_ID_I354_SGMII:
+	case E1000_DEV_ID_I354_BACKPLANE_2_5GBPS:
+		mac->type = e1000_i354;
+		break;
+	default:
+		return -E1000_ERR_MAC_INIT;
+	}
+
+	/* Set media type */
+	/* The 82575 uses bits 22:23 for link mode. The mode can be changed
+	 * based on the EEPROM. We cannot rely upon device ID. There
+	 * is no distinguishable difference between fiber and internal
+	 * SerDes mode on the 82575. There can be an external PHY attached
+	 * on the SGMII interface. For this, we'll set sgmii_active to true.
+	 */
+	hw->phy.media_type = e1000_media_type_copper;
+	dev_spec->sgmii_active = false;
+	dev_spec->module_plugged = false;
+
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+
+	link_mode = ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK;
+	switch (link_mode) {
+	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+		hw->phy.media_type = e1000_media_type_internal_serdes;
+		break;
+	case E1000_CTRL_EXT_LINK_MODE_SGMII:
+		/* Get phy control interface type set (MDIO vs. I2C)*/
+		if (igb_sgmii_uses_mdio_82575(hw)) {
+			hw->phy.media_type = e1000_media_type_copper;
+			dev_spec->sgmii_active = true;
+			break;
+		}
+		/* for I2C based SGMII: */
+		fallthrough;
+	case E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES:
+		/* read media type from SFP EEPROM */
+		ret_val = igb_set_sfp_media_type_82575(hw);
+		if ((ret_val != 0) ||
+		    (hw->phy.media_type == e1000_media_type_unknown)) {
+			/* If media type was not identified then return media
+			 * type defined by the CTRL_EXT settings.
+			 */
+			hw->phy.media_type = e1000_media_type_internal_serdes;
+
+			if (link_mode == E1000_CTRL_EXT_LINK_MODE_SGMII) {
+				hw->phy.media_type = e1000_media_type_copper;
+				dev_spec->sgmii_active = true;
+			}
+
+			break;
+		}
+
+		/* do not change link mode for 100BaseFX */
+		if (dev_spec->eth_flags.e100_base_fx)
+			break;
+
+		/* change current link mode setting */
+		ctrl_ext &= ~E1000_CTRL_EXT_LINK_MODE_MASK;
+
+		if (hw->phy.media_type == e1000_media_type_copper)
+			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_SGMII;
+		else
+			ctrl_ext |= E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+
+		wr32(E1000_CTRL_EXT, ctrl_ext);
+
+		break;
+	default:
+		break;
+	}
+
+	/* mac initialization and operations */
+	ret_val = igb_init_mac_params_82575(hw);
+	if (ret_val)
+		goto out;
+
+	/* NVM initialization */
+	ret_val = igb_init_nvm_params_82575(hw);
+	switch (hw->mac.type) {
+	case e1000_i210:
+	case e1000_i211:
+		ret_val = igb_init_nvm_params_i210(hw);
+		break;
+	default:
+		break;
+	}
+
+	if (ret_val)
+		goto out;
+
+	/* if part supports SR-IOV then initialize mailbox parameters */
+	switch (mac->type) {
+	case e1000_82576:
+	case e1000_i350:
+		igb_init_mbx_params_pf(hw);
+		break;
+	default:
+		break;
+	}
+
+	/* setup PHY parameters */
+	ret_val = igb_init_phy_params_82575(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_acquire_phy_82575 - Acquire rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 igb_acquire_phy_82575(struct e1000_hw *hw)
+{
+	u16 mask = E1000_SWFW_PHY0_SM;
+
+	if (hw->bus.func == E1000_FUNC_1)
+		mask = E1000_SWFW_PHY1_SM;
+	else if (hw->bus.func == E1000_FUNC_2)
+		mask = E1000_SWFW_PHY2_SM;
+	else if (hw->bus.func == E1000_FUNC_3)
+		mask = E1000_SWFW_PHY3_SM;
+
+	return hw->mac.ops.acquire_swfw_sync(hw, mask);
+}
+
+/**
+ *  igb_release_phy_82575 - Release rights to access PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  A wrapper to release access rights to the correct PHY.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static void igb_release_phy_82575(struct e1000_hw *hw)
+{
+	u16 mask = E1000_SWFW_PHY0_SM;
+
+	if (hw->bus.func == E1000_FUNC_1)
+		mask = E1000_SWFW_PHY1_SM;
+	else if (hw->bus.func == E1000_FUNC_2)
+		mask = E1000_SWFW_PHY2_SM;
+	else if (hw->bus.func == E1000_FUNC_3)
+		mask = E1000_SWFW_PHY3_SM;
+
+	hw->mac.ops.release_swfw_sync(hw, mask);
+}
+
+/**
+ *  igb_read_phy_reg_sgmii_82575 - Read PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the serial gigabit media independent
+ *  interface and stores the retrieved information in data.
+ **/
+static s32 igb_read_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+					  u16 *data)
+{
+	s32 ret_val = -E1000_ERR_PARAM;
+
+	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+		hw_dbg("PHY Address %u is out of range\n", offset);
+		goto out;
+	}
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_read_phy_reg_i2c(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_sgmii_82575 - Write PHY register using sgmii
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the serial gigabit
+ *  media independent interface.
+ **/
+static s32 igb_write_phy_reg_sgmii_82575(struct e1000_hw *hw, u32 offset,
+					   u16 data)
+{
+	s32 ret_val = -E1000_ERR_PARAM;
+
+
+	if (offset > E1000_MAX_SGMII_PHY_REG_ADDR) {
+		hw_dbg("PHY Address %d is out of range\n", offset);
+		goto out;
+	}
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_write_phy_reg_i2c(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_phy_id_82575 - Retrieve PHY addr and id
+ *  @hw: pointer to the HW structure
+ *
+ *  Retrieves the PHY address and ID for both PHY's which do and do not use
+ *  sgmi interface.
+ **/
+static s32 igb_get_phy_id_82575(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val = 0;
+	u16 phy_id;
+	u32 ctrl_ext;
+	u32 mdic;
+
+	/* Extra read required for some PHY's on i354 */
+	if (hw->mac.type == e1000_i354)
+		igb_get_phy_id(hw);
+
+	/* For SGMII PHYs, we try the list of possible addresses until
+	 * we find one that works.  For non-SGMII PHYs
+	 * (e.g. integrated copper PHYs), an address of 1 should
+	 * work.  The result of this function should mean phy->phy_addr
+	 * and phy->id are set correctly.
+	 */
+	if (!(igb_sgmii_active_82575(hw))) {
+		phy->addr = 1;
+		ret_val = igb_get_phy_id(hw);
+		goto out;
+	}
+
+	if (igb_sgmii_uses_mdio_82575(hw)) {
+		switch (hw->mac.type) {
+		case e1000_82575:
+		case e1000_82576:
+			mdic = rd32(E1000_MDIC);
+			mdic &= E1000_MDIC_PHY_MASK;
+			phy->addr = mdic >> E1000_MDIC_PHY_SHIFT;
+			break;
+		case e1000_82580:
+		case e1000_i350:
+		case e1000_i354:
+		case e1000_i210:
+		case e1000_i211:
+			mdic = rd32(E1000_MDICNFG);
+			mdic &= E1000_MDICNFG_PHY_MASK;
+			phy->addr = mdic >> E1000_MDICNFG_PHY_SHIFT;
+			break;
+		default:
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+		ret_val = igb_get_phy_id(hw);
+		goto out;
+	}
+
+	/* Power on sgmii phy if it is disabled */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	wr32(E1000_CTRL_EXT, ctrl_ext & ~E1000_CTRL_EXT_SDP3_DATA);
+	wrfl();
+	msleep(300);
+
+	/* The address field in the I2CCMD register is 3 bits and 0 is invalid.
+	 * Therefore, we need to test 1-7
+	 */
+	for (phy->addr = 1; phy->addr < 8; phy->addr++) {
+		ret_val = igb_read_phy_reg_sgmii_82575(hw, PHY_ID1, &phy_id);
+		if (ret_val == 0) {
+			hw_dbg("Vendor ID 0x%08X read at address %u\n",
+			       phy_id, phy->addr);
+			/* At the time of this writing, The M88 part is
+			 * the only supported SGMII PHY product.
+			 */
+			if (phy_id == M88_VENDOR)
+				break;
+		} else {
+			hw_dbg("PHY address %u was unreadable\n", phy->addr);
+		}
+	}
+
+	/* A valid PHY type couldn't be found. */
+	if (phy->addr == 8) {
+		phy->addr = 0;
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	} else {
+		ret_val = igb_get_phy_id(hw);
+	}
+
+	/* restore previous sfp cage power state */
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_hw_reset_sgmii_82575 - Performs a PHY reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Resets the PHY using the serial gigabit media independent interface.
+ **/
+static s32 igb_phy_hw_reset_sgmii_82575(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	/* This isn't a true "hard" reset, but is the only reset
+	 * available to us at this time.
+	 */
+
+	hw_dbg("Soft resetting SGMII attached PHY...\n");
+
+	/* SFP documentation requires the following to configure the SPF module
+	 * to work on SGMII.  No further documentation is given.
+	 */
+	ret_val = hw->phy.ops.write_reg(hw, 0x1B, 0x8084);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_phy_sw_reset(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_set_d0_lplu_state_82575 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 igb_set_d0_lplu_state_82575(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (active) {
+		data |= IGP02E1000_PM_D0_LPLU;
+		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+						 data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+						&data);
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+						 data);
+		if (ret_val)
+			goto out;
+	} else {
+		data &= ~IGP02E1000_PM_D0_LPLU;
+		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+						 data);
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = phy->ops.read_reg(hw,
+					IGP01E1000_PHY_PORT_CONFIG, &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+					IGP01E1000_PHY_PORT_CONFIG, data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = phy->ops.read_reg(hw,
+					IGP01E1000_PHY_PORT_CONFIG, &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+					IGP01E1000_PHY_PORT_CONFIG, data);
+			if (ret_val)
+				goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_set_d0_lplu_state_82580 - Set Low Power Linkup D0 state
+ *  @hw: pointer to the HW structure
+ *  @active: true to enable LPLU, false to disable
+ *
+ *  Sets the LPLU D0 state according to the active flag.  When
+ *  activating LPLU this function also disables smart speed
+ *  and vice versa.  LPLU will not be activated unless the
+ *  device autonegotiation advertisement meets standards of
+ *  either 10 or 10/100 or 10/100/1000 at all duplexes.
+ *  This is a function pointer entry point only called by
+ *  PHY setup routines.
+ **/
+static s32 igb_set_d0_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u16 data;
+
+	data = rd32(E1000_82580_PHY_POWER_MGMT);
+
+	if (active) {
+		data |= E1000_82580_PM_D0_LPLU;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		data &= ~E1000_82580_PM_SPD;
+	} else {
+		data &= ~E1000_82580_PM_D0_LPLU;
+
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on)
+			data |= E1000_82580_PM_SPD;
+		else if (phy->smart_speed == e1000_smart_speed_off)
+			data &= ~E1000_82580_PM_SPD; }
+
+	wr32(E1000_82580_PHY_POWER_MGMT, data);
+	return 0;
+}
+
+/**
+ *  igb_set_d3_lplu_state_82580 - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+static s32 igb_set_d3_lplu_state_82580(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u16 data;
+
+	data = rd32(E1000_82580_PHY_POWER_MGMT);
+
+	if (!active) {
+		data &= ~E1000_82580_PM_D3_LPLU;
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on)
+			data |= E1000_82580_PM_SPD;
+		else if (phy->smart_speed == e1000_smart_speed_off)
+			data &= ~E1000_82580_PM_SPD;
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= E1000_82580_PM_D3_LPLU;
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		data &= ~E1000_82580_PM_SPD;
+	}
+
+	wr32(E1000_82580_PHY_POWER_MGMT, data);
+	return 0;
+}
+
+/**
+ *  igb_acquire_nvm_82575 - Request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the necessary semaphores for exclusive access to the EEPROM.
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 igb_acquire_nvm_82575(struct e1000_hw *hw)
+{
+	s32 ret_val;
+
+	ret_val = hw->mac.ops.acquire_swfw_sync(hw, E1000_SWFW_EEP_SM);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_acquire_nvm(hw);
+
+	if (ret_val)
+		hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_release_nvm_82575 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ *  then release the semaphores acquired.
+ **/
+static void igb_release_nvm_82575(struct e1000_hw *hw)
+{
+	igb_release_nvm(hw);
+	hw->mac.ops.release_swfw_sync(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  igb_acquire_swfw_sync_82575 - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+static s32 igb_acquire_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 ret_val = 0;
+	s32 i = 0, timeout = 200;
+
+	while (i < timeout) {
+		if (igb_get_hw_semaphore(hw)) {
+			ret_val = -E1000_ERR_SWFW_SYNC;
+			goto out;
+		}
+
+		swfw_sync = rd32(E1000_SW_FW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask)))
+			break;
+
+		/* Firmware currently using resource (fwmask)
+		 * or other software thread using resource (swmask)
+		 */
+		igb_put_hw_semaphore(hw);
+		mdelay(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+		ret_val = -E1000_ERR_SWFW_SYNC;
+		goto out;
+	}
+
+	swfw_sync |= swmask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_release_swfw_sync_82575 - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+static void igb_release_swfw_sync_82575(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+	while (igb_get_hw_semaphore(hw) != 0)
+		; /* Empty */
+
+	swfw_sync = rd32(E1000_SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore(hw);
+}
+
+/**
+ *  igb_get_cfg_done_82575 - Read config done bit
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the management control register for the config done bit for
+ *  completion status.  NOTE: silicon which is EEPROM-less will fail trying
+ *  to read the config done bit, so an error is *ONLY* logged and returns
+ *  0.  If we were to return with error, EEPROM-less silicon
+ *  would not be able to be reset or change link.
+ **/
+static s32 igb_get_cfg_done_82575(struct e1000_hw *hw)
+{
+	s32 timeout = PHY_CFG_TIMEOUT;
+	u32 mask = E1000_NVM_CFG_DONE_PORT_0;
+
+	if (hw->bus.func == 1)
+		mask = E1000_NVM_CFG_DONE_PORT_1;
+	else if (hw->bus.func == E1000_FUNC_2)
+		mask = E1000_NVM_CFG_DONE_PORT_2;
+	else if (hw->bus.func == E1000_FUNC_3)
+		mask = E1000_NVM_CFG_DONE_PORT_3;
+
+	while (timeout) {
+		if (rd32(E1000_EEMNGCTL) & mask)
+			break;
+		usleep_range(1000, 2000);
+		timeout--;
+	}
+	if (!timeout)
+		hw_dbg("MNG configuration cycle has not completed.\n");
+
+	/* If EEPROM is not marked present, init the PHY manually */
+	if (((rd32(E1000_EECD) & E1000_EECD_PRES) == 0) &&
+	    (hw->phy.type == e1000_phy_igp_3))
+		igb_phy_init_script_igp3(hw);
+
+	return 0;
+}
+
+/**
+ *  igb_get_link_up_info_82575 - Get link speed/duplex info
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  This is a wrapper function, if using the serial gigabit media independent
+ *  interface, use PCS to retrieve the link speed and duplex information.
+ *  Otherwise, use the generic function to get the link speed and duplex info.
+ **/
+static s32 igb_get_link_up_info_82575(struct e1000_hw *hw, u16 *speed,
+					u16 *duplex)
+{
+	s32 ret_val;
+
+	if (hw->phy.media_type != e1000_media_type_copper)
+		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, speed,
+							       duplex);
+	else
+		ret_val = igb_get_speed_and_duplex_copper(hw, speed,
+								    duplex);
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_link_82575 - Check for link
+ *  @hw: pointer to the HW structure
+ *
+ *  If sgmii is enabled, then use the pcs register to determine link, otherwise
+ *  use the generic interface for determining link.
+ **/
+static s32 igb_check_for_link_82575(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 speed, duplex;
+
+	if (hw->phy.media_type != e1000_media_type_copper) {
+		ret_val = igb_get_pcs_speed_and_duplex_82575(hw, &speed,
+							     &duplex);
+		/* Use this flag to determine if link needs to be checked or
+		 * not.  If  we have link clear the flag so that we do not
+		 * continue to check for link.
+		 */
+		hw->mac.get_link_status = !hw->mac.serdes_has_link;
+
+		/* Configure Flow Control now that Auto-Neg has completed.
+		 * First, we need to restore the desired flow control
+		 * settings because we may have had to re-autoneg with a
+		 * different link partner.
+		 */
+		ret_val = igb_config_fc_after_link_up(hw);
+		if (ret_val)
+			hw_dbg("Error configuring flow control\n");
+	} else {
+		ret_val = igb_check_for_copper_link(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_power_up_serdes_link_82575 - Power up the serdes link after shutdown
+ *  @hw: pointer to the HW structure
+ **/
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw)
+{
+	u32 reg;
+
+
+	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+	    !igb_sgmii_active_82575(hw))
+		return;
+
+	/* Enable PCS to turn on link */
+	reg = rd32(E1000_PCS_CFG0);
+	reg |= E1000_PCS_CFG_PCS_EN;
+	wr32(E1000_PCS_CFG0, reg);
+
+	/* Power up the laser */
+	reg = rd32(E1000_CTRL_EXT);
+	reg &= ~E1000_CTRL_EXT_SDP3_DATA;
+	wr32(E1000_CTRL_EXT, reg);
+
+	/* flush the write to verify completion */
+	wrfl();
+	usleep_range(1000, 2000);
+}
+
+/**
+ *  igb_get_pcs_speed_and_duplex_82575 - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Using the physical coding sub-layer (PCS), retrieve the current speed and
+ *  duplex, then store the values in the pointers provided.
+ **/
+static s32 igb_get_pcs_speed_and_duplex_82575(struct e1000_hw *hw, u16 *speed,
+						u16 *duplex)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 pcs, status;
+
+	/* Set up defaults for the return values of this function */
+	mac->serdes_has_link = false;
+	*speed = 0;
+	*duplex = 0;
+
+	/* Read the PCS Status register for link state. For non-copper mode,
+	 * the status register is not accurate. The PCS status register is
+	 * used instead.
+	 */
+	pcs = rd32(E1000_PCS_LSTAT);
+
+	/* The link up bit determines when link is up on autoneg. The sync ok
+	 * gets set once both sides sync up and agree upon link. Stable link
+	 * can be determined by checking for both link up and link sync ok
+	 */
+	if ((pcs & E1000_PCS_LSTS_LINK_OK) && (pcs & E1000_PCS_LSTS_SYNK_OK)) {
+		mac->serdes_has_link = true;
+
+		/* Detect and store PCS speed */
+		if (pcs & E1000_PCS_LSTS_SPEED_1000)
+			*speed = SPEED_1000;
+		else if (pcs & E1000_PCS_LSTS_SPEED_100)
+			*speed = SPEED_100;
+		else
+			*speed = SPEED_10;
+
+		/* Detect and store PCS duplex */
+		if (pcs & E1000_PCS_LSTS_DUPLEX_FULL)
+			*duplex = FULL_DUPLEX;
+		else
+			*duplex = HALF_DUPLEX;
+
+	/* Check if it is an I354 2.5Gb backplane connection. */
+		if (mac->type == e1000_i354) {
+			status = rd32(E1000_STATUS);
+			if ((status & E1000_STATUS_2P5_SKU) &&
+			    !(status & E1000_STATUS_2P5_SKU_OVER)) {
+				*speed = SPEED_2500;
+				*duplex = FULL_DUPLEX;
+				hw_dbg("2500 Mbs, ");
+				hw_dbg("Full Duplex\n");
+			}
+		}
+
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_shutdown_serdes_link_82575 - Remove link during power down
+ *  @hw: pointer to the HW structure
+ *
+ *  In the case of fiber serdes, shut down optics and PCS on driver unload
+ *  when management pass thru is not enabled.
+ **/
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw)
+{
+	u32 reg;
+
+	if (hw->phy.media_type != e1000_media_type_internal_serdes &&
+	    igb_sgmii_active_82575(hw))
+		return;
+
+	if (!igb_enable_mng_pass_thru(hw)) {
+		/* Disable PCS to turn off link */
+		reg = rd32(E1000_PCS_CFG0);
+		reg &= ~E1000_PCS_CFG_PCS_EN;
+		wr32(E1000_PCS_CFG0, reg);
+
+		/* shutdown the laser */
+		reg = rd32(E1000_CTRL_EXT);
+		reg |= E1000_CTRL_EXT_SDP3_DATA;
+		wr32(E1000_CTRL_EXT, reg);
+
+		/* flush the write to verify completion */
+		wrfl();
+		usleep_range(1000, 2000);
+	}
+}
+
+/**
+ *  igb_reset_hw_82575 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the hardware into a known state.  This is a
+ *  function pointer entry point called by the api module.
+ **/
+static s32 igb_reset_hw_82575(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val;
+
+	/* Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = igb_disable_pcie_master(hw);
+	if (ret_val)
+		hw_dbg("PCI-E Master disable polling has failed.\n");
+
+	/* set the completion timeout for interface */
+	ret_val = igb_set_pcie_completion_timeout(hw);
+	if (ret_val)
+		hw_dbg("PCI-E Set completion timeout has failed.\n");
+
+	hw_dbg("Masking off all interrupts\n");
+	wr32(E1000_IMC, 0xffffffff);
+
+	wr32(E1000_RCTL, 0);
+	wr32(E1000_TCTL, E1000_TCTL_PSP);
+	wrfl();
+
+	usleep_range(10000, 20000);
+
+	ctrl = rd32(E1000_CTRL);
+
+	hw_dbg("Issuing a global reset to MAC\n");
+	wr32(E1000_CTRL, ctrl | E1000_CTRL_RST);
+
+	ret_val = igb_get_auto_rd_done(hw);
+	if (ret_val) {
+		/* When auto config read does not complete, do not
+		 * return with an error. This can happen in situations
+		 * where there is no eeprom and prevents getting link.
+		 */
+		hw_dbg("Auto Read Done did not complete\n");
+	}
+
+	/* If EEPROM is not present, run manual init scripts */
+	if ((rd32(E1000_EECD) & E1000_EECD_PRES) == 0)
+		igb_reset_init_script_82575(hw);
+
+	/* Clear any pending interrupt events. */
+	wr32(E1000_IMC, 0xffffffff);
+	rd32(E1000_ICR);
+
+	/* Install any alternate MAC address into RAR0 */
+	ret_val = igb_check_alt_mac_addr(hw);
+
+	return ret_val;
+}
+
+/**
+ *  igb_init_hw_82575 - Initialize hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This inits the hardware readying it for operation.
+ **/
+static s32 igb_init_hw_82575(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	u16 i, rar_count = mac->rar_entry_count;
+
+	if ((hw->mac.type >= e1000_i210) &&
+	    !(igb_get_flash_presence_i210(hw))) {
+		ret_val = igb_pll_workaround_i210(hw);
+		if (ret_val)
+			return ret_val;
+	}
+
+	/* Initialize identification LED */
+	ret_val = igb_id_led_init(hw);
+	if (ret_val) {
+		hw_dbg("Error initializing identification LED\n");
+		/* This is not fatal and we should not stop init due to this */
+	}
+
+	/* Disabling VLAN filtering */
+	hw_dbg("Initializing the IEEE VLAN\n");
+	if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
+		igb_clear_vfta_i350(hw);
+	else
+		igb_clear_vfta(hw);
+
+	/* Setup the receive address */
+	igb_init_rx_addrs(hw, rar_count);
+
+	/* Zero out the Multicast HASH table */
+	hw_dbg("Zeroing the MTA\n");
+	for (i = 0; i < mac->mta_reg_count; i++)
+		array_wr32(E1000_MTA, i, 0);
+
+	/* Zero out the Unicast HASH table */
+	hw_dbg("Zeroing the UTA\n");
+	for (i = 0; i < mac->uta_reg_count; i++)
+		array_wr32(E1000_UTA, i, 0);
+
+	/* Setup link and flow control */
+	ret_val = igb_setup_link(hw);
+
+	/* Clear all of the statistics registers (clear on read).  It is
+	 * important that we do this after we have tried to establish link
+	 * because the symbol error count will increment wildly if there
+	 * is no link.
+	 */
+	igb_clear_hw_cntrs_82575(hw);
+	return ret_val;
+}
+
+/**
+ *  igb_setup_copper_link_82575 - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the link for auto-neg or forced speed and duplex.  Then we check
+ *  for link, once link is established calls to configure collision distance
+ *  and flow control are called.
+ **/
+static s32 igb_setup_copper_link_82575(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32  ret_val;
+	u32 phpm_reg;
+
+	ctrl = rd32(E1000_CTRL);
+	ctrl |= E1000_CTRL_SLU;
+	ctrl &= ~(E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	wr32(E1000_CTRL, ctrl);
+
+	/* Clear Go Link Disconnect bit on supported devices */
+	switch (hw->mac.type) {
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i210:
+	case e1000_i211:
+		phpm_reg = rd32(E1000_82580_PHY_POWER_MGMT);
+		phpm_reg &= ~E1000_82580_PM_GO_LINKD;
+		wr32(E1000_82580_PHY_POWER_MGMT, phpm_reg);
+		break;
+	default:
+		break;
+	}
+
+	ret_val = igb_setup_serdes_link_82575(hw);
+	if (ret_val)
+		goto out;
+
+	if (igb_sgmii_active_82575(hw) && !hw->phy.reset_disable) {
+		/* allow time for SFP cage time to power up phy */
+		msleep(300);
+
+		ret_val = hw->phy.ops.reset(hw);
+		if (ret_val) {
+			hw_dbg("Error resetting the PHY.\n");
+			goto out;
+		}
+	}
+	switch (hw->phy.type) {
+	case e1000_phy_i210:
+	case e1000_phy_m88:
+		switch (hw->phy.id) {
+		case I347AT4_E_PHY_ID:
+		case M88E1112_E_PHY_ID:
+		case M88E1543_E_PHY_ID:
+		case I210_I_PHY_ID:
+			ret_val = igb_copper_link_setup_m88_gen2(hw);
+			break;
+		default:
+			ret_val = igb_copper_link_setup_m88(hw);
+			break;
+		}
+		break;
+	case e1000_phy_igp_3:
+		ret_val = igb_copper_link_setup_igp(hw);
+		break;
+	case e1000_phy_82580:
+		ret_val = igb_copper_link_setup_82580(hw);
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		break;
+	}
+
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_setup_copper_link(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_setup_serdes_link_82575 - Setup link for serdes
+ *  @hw: pointer to the HW structure
+ *
+ *  Configure the physical coding sub-layer (PCS) link.  The PCS link is
+ *  used on copper connections where the serialized gigabit media independent
+ *  interface (sgmii), or serdes fiber is being used.  Configures the link
+ *  for auto-negotiation or forces speed/duplex.
+ **/
+static s32 igb_setup_serdes_link_82575(struct e1000_hw *hw)
+{
+	u32 ctrl_ext, ctrl_reg, reg, anadv_reg;
+	bool pcs_autoneg;
+	s32 ret_val = 0;
+	u16 data;
+
+	if ((hw->phy.media_type != e1000_media_type_internal_serdes) &&
+	    !igb_sgmii_active_82575(hw))
+		return ret_val;
+
+
+	/* On the 82575, SerDes loopback mode persists until it is
+	 * explicitly turned off or a power cycle is performed.  A read to
+	 * the register does not indicate its status.  Therefore, we ensure
+	 * loopback mode is disabled during initialization.
+	 */
+	wr32(E1000_SCTL, E1000_SCTL_DISABLE_SERDES_LOOPBACK);
+
+	/* power on the sfp cage if present and turn on I2C */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	ctrl_ext &= ~E1000_CTRL_EXT_SDP3_DATA;
+	ctrl_ext |= E1000_CTRL_I2C_ENA;
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+
+	ctrl_reg = rd32(E1000_CTRL);
+	ctrl_reg |= E1000_CTRL_SLU;
+
+	if (hw->mac.type == e1000_82575 || hw->mac.type == e1000_82576) {
+		/* set both sw defined pins */
+		ctrl_reg |= E1000_CTRL_SWDPIN0 | E1000_CTRL_SWDPIN1;
+
+		/* Set switch control to serdes energy detect */
+		reg = rd32(E1000_CONNSW);
+		reg |= E1000_CONNSW_ENRGSRC;
+		wr32(E1000_CONNSW, reg);
+	}
+
+	reg = rd32(E1000_PCS_LCTL);
+
+	/* default pcs_autoneg to the same setting as mac autoneg */
+	pcs_autoneg = hw->mac.autoneg;
+
+	switch (ctrl_ext & E1000_CTRL_EXT_LINK_MODE_MASK) {
+	case E1000_CTRL_EXT_LINK_MODE_SGMII:
+		/* sgmii mode lets the phy handle forcing speed/duplex */
+		pcs_autoneg = true;
+		/* autoneg time out should be disabled for SGMII mode */
+		reg &= ~(E1000_PCS_LCTL_AN_TIMEOUT);
+		break;
+	case E1000_CTRL_EXT_LINK_MODE_1000BASE_KX:
+		/* disable PCS autoneg and support parallel detect only */
+		pcs_autoneg = false;
+		fallthrough;
+	default:
+		if (hw->mac.type == e1000_82575 ||
+		    hw->mac.type == e1000_82576) {
+			ret_val = hw->nvm.ops.read(hw, NVM_COMPAT, 1, &data);
+			if (ret_val) {
+				hw_dbg(KERN_DEBUG "NVM Read Error\n\n");
+				return ret_val;
+			}
+
+			if (data & E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT)
+				pcs_autoneg = false;
+		}
+
+		/* non-SGMII modes only supports a speed of 1000/Full for the
+		 * link so it is best to just force the MAC and let the pcs
+		 * link either autoneg or be forced to 1000/Full
+		 */
+		ctrl_reg |= E1000_CTRL_SPD_1000 | E1000_CTRL_FRCSPD |
+				E1000_CTRL_FD | E1000_CTRL_FRCDPX;
+
+		/* set speed of 1000/Full if speed/duplex is forced */
+		reg |= E1000_PCS_LCTL_FSV_1000 | E1000_PCS_LCTL_FDV_FULL;
+		break;
+	}
+
+	wr32(E1000_CTRL, ctrl_reg);
+
+	/* New SerDes mode allows for forcing speed or autonegotiating speed
+	 * at 1gb. Autoneg should be default set by most drivers. This is the
+	 * mode that will be compatible with older link partners and switches.
+	 * However, both are supported by the hardware and some drivers/tools.
+	 */
+	reg &= ~(E1000_PCS_LCTL_AN_ENABLE | E1000_PCS_LCTL_FLV_LINK_UP |
+		E1000_PCS_LCTL_FSD | E1000_PCS_LCTL_FORCE_LINK);
+
+	if (pcs_autoneg) {
+		/* Set PCS register for autoneg */
+		reg |= E1000_PCS_LCTL_AN_ENABLE | /* Enable Autoneg */
+		       E1000_PCS_LCTL_AN_RESTART; /* Restart autoneg */
+
+		/* Disable force flow control for autoneg */
+		reg &= ~E1000_PCS_LCTL_FORCE_FCTRL;
+
+		/* Configure flow control advertisement for autoneg */
+		anadv_reg = rd32(E1000_PCS_ANADV);
+		anadv_reg &= ~(E1000_TXCW_ASM_DIR | E1000_TXCW_PAUSE);
+		switch (hw->fc.requested_mode) {
+		case e1000_fc_full:
+		case e1000_fc_rx_pause:
+			anadv_reg |= E1000_TXCW_ASM_DIR;
+			anadv_reg |= E1000_TXCW_PAUSE;
+			break;
+		case e1000_fc_tx_pause:
+			anadv_reg |= E1000_TXCW_ASM_DIR;
+			break;
+		default:
+			break;
+		}
+		wr32(E1000_PCS_ANADV, anadv_reg);
+
+		hw_dbg("Configuring Autoneg:PCS_LCTL=0x%08X\n", reg);
+	} else {
+		/* Set PCS register for forced link */
+		reg |= E1000_PCS_LCTL_FSD;        /* Force Speed */
+
+		/* Force flow control for forced link */
+		reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+
+		hw_dbg("Configuring Forced Link:PCS_LCTL=0x%08X\n", reg);
+	}
+
+	wr32(E1000_PCS_LCTL, reg);
+
+	if (!pcs_autoneg && !igb_sgmii_active_82575(hw))
+		igb_force_mac_fc(hw);
+
+	return ret_val;
+}
+
+/**
+ *  igb_sgmii_active_82575 - Return sgmii state
+ *  @hw: pointer to the HW structure
+ *
+ *  82575 silicon has a serialized gigabit media independent interface (sgmii)
+ *  which can be enabled for use in the embedded applications.  Simply
+ *  return the current state of the sgmii interface.
+ **/
+static bool igb_sgmii_active_82575(struct e1000_hw *hw)
+{
+	struct e1000_dev_spec_82575 *dev_spec = &hw->dev_spec._82575;
+	return dev_spec->sgmii_active;
+}
+
+/**
+ *  igb_reset_init_script_82575 - Inits HW defaults after reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Inits recommended HW defaults after a reset when there is no EEPROM
+ *  detected. This is only for the 82575.
+ **/
+static s32 igb_reset_init_script_82575(struct e1000_hw *hw)
+{
+	if (hw->mac.type == e1000_82575) {
+		hw_dbg("Running reset init script for 82575\n");
+		/* SerDes configuration via SERDESCTRL */
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x00, 0x0C);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x01, 0x78);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x1B, 0x23);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCTL, 0x23, 0x15);
+
+		/* CCM configuration via CCMCTL register */
+		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x14, 0x00);
+		igb_write_8bit_ctrl_reg(hw, E1000_CCMCTL, 0x10, 0x00);
+
+		/* PCIe lanes configuration */
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x00, 0xEC);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x61, 0xDF);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x34, 0x05);
+		igb_write_8bit_ctrl_reg(hw, E1000_GIOCTL, 0x2F, 0x81);
+
+		/* PCIe PLL Configuration */
+		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x02, 0x47);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x14, 0x00);
+		igb_write_8bit_ctrl_reg(hw, E1000_SCCTL, 0x10, 0x00);
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_read_mac_addr_82575 - Read device MAC address
+ *  @hw: pointer to the HW structure
+ **/
+static s32 igb_read_mac_addr_82575(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/* If there's an alternate MAC address place it in RAR0
+	 * so that it will override the Si installed default perm
+	 * address.
+	 */
+	ret_val = igb_check_alt_mac_addr(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_read_mac_addr(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ * igb_power_down_phy_copper_82575 - Remove link during PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, or wake on lan is not enabled, remove the link.
+ **/
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw)
+{
+	/* If the management interface is not enabled, then power down */
+	if (!(igb_enable_mng_pass_thru(hw) || igb_check_reset_block(hw)))
+		igb_power_down_phy_copper(hw);
+}
+
+/**
+ *  igb_clear_hw_cntrs_82575 - Clear device specific hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the hardware counters by reading the counter registers.
+ **/
+static void igb_clear_hw_cntrs_82575(struct e1000_hw *hw)
+{
+	igb_clear_hw_cntrs_base(hw);
+
+	rd32(E1000_PRC64);
+	rd32(E1000_PRC127);
+	rd32(E1000_PRC255);
+	rd32(E1000_PRC511);
+	rd32(E1000_PRC1023);
+	rd32(E1000_PRC1522);
+	rd32(E1000_PTC64);
+	rd32(E1000_PTC127);
+	rd32(E1000_PTC255);
+	rd32(E1000_PTC511);
+	rd32(E1000_PTC1023);
+	rd32(E1000_PTC1522);
+
+	rd32(E1000_ALGNERRC);
+	rd32(E1000_RXERRC);
+	rd32(E1000_TNCRS);
+	rd32(E1000_CEXTERR);
+	rd32(E1000_TSCTC);
+	rd32(E1000_TSCTFC);
+
+	rd32(E1000_MGTPRC);
+	rd32(E1000_MGTPDC);
+	rd32(E1000_MGTPTC);
+
+	rd32(E1000_IAC);
+	rd32(E1000_ICRXOC);
+
+	rd32(E1000_ICRXPTC);
+	rd32(E1000_ICRXATC);
+	rd32(E1000_ICTXPTC);
+	rd32(E1000_ICTXATC);
+	rd32(E1000_ICTXQEC);
+	rd32(E1000_ICTXQMTC);
+	rd32(E1000_ICRXDMTC);
+
+	rd32(E1000_CBTMPC);
+	rd32(E1000_HTDPMC);
+	rd32(E1000_CBRMPC);
+	rd32(E1000_RPTHC);
+	rd32(E1000_HGPTC);
+	rd32(E1000_HTCBDPC);
+	rd32(E1000_HGORCL);
+	rd32(E1000_HGORCH);
+	rd32(E1000_HGOTCL);
+	rd32(E1000_HGOTCH);
+	rd32(E1000_LENERRS);
+
+	/* This register should not be read in copper configurations */
+	if (hw->phy.media_type == e1000_media_type_internal_serdes ||
+	    igb_sgmii_active_82575(hw))
+		rd32(E1000_SCVPC);
+}
+
+/**
+ *  igb_rx_fifo_flush_82575 - Clean rx fifo after RX enable
+ *  @hw: pointer to the HW structure
+ *
+ *  After rx enable if manageability is enabled then there is likely some
+ *  bad data at the start of the fifo and possibly in the DMA fifo. This
+ *  function clears the fifos and flushes any packets that came in as rx was
+ *  being enabled.
+ **/
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw)
+{
+	u32 rctl, rlpml, rxdctl[4], rfctl, temp_rctl, rx_enabled;
+	int i, ms_wait;
+
+	/* disable IPv6 options as per hardware errata */
+	rfctl = rd32(E1000_RFCTL);
+	rfctl |= E1000_RFCTL_IPV6_EX_DIS;
+	wr32(E1000_RFCTL, rfctl);
+
+	if (hw->mac.type != e1000_82575 ||
+	    !(rd32(E1000_MANC) & E1000_MANC_RCV_TCO_EN))
+		return;
+
+	/* Disable all RX queues */
+	for (i = 0; i < 4; i++) {
+		rxdctl[i] = rd32(E1000_RXDCTL(i));
+		wr32(E1000_RXDCTL(i),
+		     rxdctl[i] & ~E1000_RXDCTL_QUEUE_ENABLE);
+	}
+	/* Poll all queues to verify they have shut down */
+	for (ms_wait = 0; ms_wait < 10; ms_wait++) {
+		usleep_range(1000, 2000);
+		rx_enabled = 0;
+		for (i = 0; i < 4; i++)
+			rx_enabled |= rd32(E1000_RXDCTL(i));
+		if (!(rx_enabled & E1000_RXDCTL_QUEUE_ENABLE))
+			break;
+	}
+
+	if (ms_wait == 10)
+		hw_dbg("Queue disable timed out after 10ms\n");
+
+	/* Clear RLPML, RCTL.SBP, RFCTL.LEF, and set RCTL.LPE so that all
+	 * incoming packets are rejected.  Set enable and wait 2ms so that
+	 * any packet that was coming in as RCTL.EN was set is flushed
+	 */
+	wr32(E1000_RFCTL, rfctl & ~E1000_RFCTL_LEF);
+
+	rlpml = rd32(E1000_RLPML);
+	wr32(E1000_RLPML, 0);
+
+	rctl = rd32(E1000_RCTL);
+	temp_rctl = rctl & ~(E1000_RCTL_EN | E1000_RCTL_SBP);
+	temp_rctl |= E1000_RCTL_LPE;
+
+	wr32(E1000_RCTL, temp_rctl);
+	wr32(E1000_RCTL, temp_rctl | E1000_RCTL_EN);
+	wrfl();
+	usleep_range(2000, 3000);
+
+	/* Enable RX queues that were previously enabled and restore our
+	 * previous state
+	 */
+	for (i = 0; i < 4; i++)
+		wr32(E1000_RXDCTL(i), rxdctl[i]);
+	wr32(E1000_RCTL, rctl);
+	wrfl();
+
+	wr32(E1000_RLPML, rlpml);
+	wr32(E1000_RFCTL, rfctl);
+
+	/* Flush receive errors generated by workaround */
+	rd32(E1000_ROC);
+	rd32(E1000_RNBC);
+	rd32(E1000_MPC);
+}
+
+/**
+ *  igb_set_pcie_completion_timeout - set pci-e completion timeout
+ *  @hw: pointer to the HW structure
+ *
+ *  The defaults for 82575 and 82576 should be in the range of 50us to 50ms,
+ *  however the hardware default for these parts is 500us to 1ms which is less
+ *  than the 10ms recommended by the pci-e spec.  To address this we need to
+ *  increase the value to either 10ms to 200ms for capability version 1 config,
+ *  or 16ms to 55ms for version 2.
+ **/
+static s32 igb_set_pcie_completion_timeout(struct e1000_hw *hw)
+{
+	u32 gcr = rd32(E1000_GCR);
+	s32 ret_val = 0;
+	u16 pcie_devctl2;
+
+	/* only take action if timeout value is defaulted to 0 */
+	if (gcr & E1000_GCR_CMPL_TMOUT_MASK)
+		goto out;
+
+	/* if capabilities version is type 1 we can write the
+	 * timeout of 10ms to 200ms through the GCR register
+	 */
+	if (!(gcr & E1000_GCR_CAP_VER2)) {
+		gcr |= E1000_GCR_CMPL_TMOUT_10ms;
+		goto out;
+	}
+
+	/* for version 2 capabilities we need to write the config space
+	 * directly in order to set the completion timeout value for
+	 * 16ms to 55ms
+	 */
+	ret_val = igb_read_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+					&pcie_devctl2);
+	if (ret_val)
+		goto out;
+
+	pcie_devctl2 |= PCIE_DEVICE_CONTROL2_16ms;
+
+	ret_val = igb_write_pcie_cap_reg(hw, PCIE_DEVICE_CONTROL2,
+					 &pcie_devctl2);
+out:
+	/* disable completion timeout resend */
+	gcr &= ~E1000_GCR_CMPL_TMOUT_RESEND;
+
+	wr32(E1000_GCR, gcr);
+	return ret_val;
+}
+
+/**
+ *  igb_vmdq_set_anti_spoofing_pf - enable or disable anti-spoofing
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *  @pf: Physical Function pool - do not set anti-spoofing for the PF
+ *
+ *  enables/disables L2 switch anti-spoofing functionality.
+ **/
+void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *hw, bool enable, int pf)
+{
+	u32 reg_val, reg_offset;
+
+	switch (hw->mac.type) {
+	case e1000_82576:
+		reg_offset = E1000_DTXSWC;
+		break;
+	case e1000_i350:
+	case e1000_i354:
+		reg_offset = E1000_TXSWC;
+		break;
+	default:
+		return;
+	}
+
+	reg_val = rd32(reg_offset);
+	if (enable) {
+		reg_val |= (E1000_DTXSWC_MAC_SPOOF_MASK |
+			     E1000_DTXSWC_VLAN_SPOOF_MASK);
+		/* The PF can spoof - it has to in order to
+		 * support emulation mode NICs
+		 */
+		reg_val ^= (1 << pf | 1 << (pf + MAX_NUM_VFS));
+	} else {
+		reg_val &= ~(E1000_DTXSWC_MAC_SPOOF_MASK |
+			     E1000_DTXSWC_VLAN_SPOOF_MASK);
+	}
+	wr32(reg_offset, reg_val);
+}
+
+/**
+ *  igb_vmdq_set_loopback_pf - enable or disable vmdq loopback
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *
+ *  enables/disables L2 switch loopback functionality.
+ **/
+void igb_vmdq_set_loopback_pf(struct e1000_hw *hw, bool enable)
+{
+	u32 dtxswc;
+
+	switch (hw->mac.type) {
+	case e1000_82576:
+		dtxswc = rd32(E1000_DTXSWC);
+		if (enable)
+			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+		else
+			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+		wr32(E1000_DTXSWC, dtxswc);
+		break;
+	case e1000_i354:
+	case e1000_i350:
+		dtxswc = rd32(E1000_TXSWC);
+		if (enable)
+			dtxswc |= E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+		else
+			dtxswc &= ~E1000_DTXSWC_VMDQ_LOOPBACK_EN;
+		wr32(E1000_TXSWC, dtxswc);
+		break;
+	default:
+		/* Currently no other hardware supports loopback */
+		break;
+	}
+
+}
+
+/**
+ *  igb_vmdq_set_replication_pf - enable or disable vmdq replication
+ *  @hw: pointer to the hardware struct
+ *  @enable: state to enter, either enabled or disabled
+ *
+ *  enables/disables replication of packets across multiple pools.
+ **/
+void igb_vmdq_set_replication_pf(struct e1000_hw *hw, bool enable)
+{
+	u32 vt_ctl = rd32(E1000_VT_CTL);
+
+	if (enable)
+		vt_ctl |= E1000_VT_CTL_VM_REPL_EN;
+	else
+		vt_ctl &= ~E1000_VT_CTL_VM_REPL_EN;
+
+	wr32(E1000_VT_CTL, vt_ctl);
+}
+
+/**
+ *  igb_read_phy_reg_82580 - Read 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control register in the PHY at offset and stores the
+ *  information read to data.
+ **/
+static s32 igb_read_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_read_phy_reg_mdic(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_82580 - Write 82580 MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+static s32 igb_write_phy_reg_82580(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_write_phy_reg_mdic(hw, offset, data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_reset_mdicnfg_82580 - Reset MDICNFG destination and com_mdio bits
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets the the MDICNFG.Destination and MDICNFG.Com_MDIO bits based on
+ *  the values found in the EEPROM.  This addresses an issue in which these
+ *  bits are not restored from EEPROM after reset.
+ **/
+static s32 igb_reset_mdicnfg_82580(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 mdicnfg;
+	u16 nvm_data = 0;
+
+	if (hw->mac.type != e1000_82580)
+		goto out;
+	if (!igb_sgmii_active_82575(hw))
+		goto out;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+				   NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+				   &nvm_data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	mdicnfg = rd32(E1000_MDICNFG);
+	if (nvm_data & NVM_WORD24_EXT_MDIO)
+		mdicnfg |= E1000_MDICNFG_EXT_MDIO;
+	if (nvm_data & NVM_WORD24_COM_MDIO)
+		mdicnfg |= E1000_MDICNFG_COM_MDIO;
+	wr32(E1000_MDICNFG, mdicnfg);
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_reset_hw_82580 - Reset hardware
+ *  @hw: pointer to the HW structure
+ *
+ *  This resets function or entire device (all ports, etc.)
+ *  to a known state.
+ **/
+static s32 igb_reset_hw_82580(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	/* BH SW mailbox bit in SW_FW_SYNC */
+	u16 swmbsw_mask = E1000_SW_SYNCH_MB;
+	u32 ctrl;
+	bool global_device_reset = hw->dev_spec._82575.global_device_reset;
+
+	hw->dev_spec._82575.global_device_reset = false;
+
+	/* due to hw errata, global device reset doesn't always
+	 * work on 82580
+	 */
+	if (hw->mac.type == e1000_82580)
+		global_device_reset = false;
+
+	/* Get current control state. */
+	ctrl = rd32(E1000_CTRL);
+
+	/* Prevent the PCI-E bus from sticking if there is no TLP connection
+	 * on the last TLP read/write transaction when MAC is reset.
+	 */
+	ret_val = igb_disable_pcie_master(hw);
+	if (ret_val)
+		hw_dbg("PCI-E Master disable polling has failed.\n");
+
+	hw_dbg("Masking off all interrupts\n");
+	wr32(E1000_IMC, 0xffffffff);
+	wr32(E1000_RCTL, 0);
+	wr32(E1000_TCTL, E1000_TCTL_PSP);
+	wrfl();
+
+	usleep_range(10000, 11000);
+
+	/* Determine whether or not a global dev reset is requested */
+	if (global_device_reset &&
+		hw->mac.ops.acquire_swfw_sync(hw, swmbsw_mask))
+			global_device_reset = false;
+
+	if (global_device_reset &&
+		!(rd32(E1000_STATUS) & E1000_STAT_DEV_RST_SET))
+		ctrl |= E1000_CTRL_DEV_RST;
+	else
+		ctrl |= E1000_CTRL_RST;
+
+	wr32(E1000_CTRL, ctrl);
+	wrfl();
+
+	/* Add delay to insure DEV_RST has time to complete */
+	if (global_device_reset)
+		usleep_range(5000, 6000);
+
+	ret_val = igb_get_auto_rd_done(hw);
+	if (ret_val) {
+		/* When auto config read does not complete, do not
+		 * return with an error. This can happen in situations
+		 * where there is no eeprom and prevents getting link.
+		 */
+		hw_dbg("Auto Read Done did not complete\n");
+	}
+
+	/* clear global device reset status bit */
+	wr32(E1000_STATUS, E1000_STAT_DEV_RST_SET);
+
+	/* Clear any pending interrupt events. */
+	wr32(E1000_IMC, 0xffffffff);
+	rd32(E1000_ICR);
+
+	ret_val = igb_reset_mdicnfg_82580(hw);
+	if (ret_val)
+		hw_dbg("Could not reset MDICNFG based on EEPROM\n");
+
+	/* Install any alternate MAC address into RAR0 */
+	ret_val = igb_check_alt_mac_addr(hw);
+
+	/* Release semaphore */
+	if (global_device_reset)
+		hw->mac.ops.release_swfw_sync(hw, swmbsw_mask);
+
+	return ret_val;
+}
+
+/**
+ *  igb_rxpbs_adjust_82580 - adjust RXPBS value to reflect actual RX PBA size
+ *  @data: data received by reading RXPBS register
+ *
+ *  The 82580 uses a table based approach for packet buffer allocation sizes.
+ *  This function converts the retrieved value into the correct table value
+ *     0x0 0x1 0x2 0x3 0x4 0x5 0x6 0x7
+ *  0x0 36  72 144   1   2   4   8  16
+ *  0x8 35  70 140 rsv rsv rsv rsv rsv
+ */
+u16 igb_rxpbs_adjust_82580(u32 data)
+{
+	u16 ret_val = 0;
+
+	if (data < ARRAY_SIZE(e1000_82580_rxpbs_table))
+		ret_val = e1000_82580_rxpbs_table[data];
+
+	return ret_val;
+}
+
+/**
+ *  igb_validate_nvm_checksum_with_offset - Validate EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_with_offset(struct e1000_hw *hw,
+						 u16 offset)
+{
+	s32 ret_val = 0;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = offset; i < ((NVM_CHECKSUM_REG + offset) + 1); i++) {
+		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+
+	if (checksum != (u16) NVM_SUM) {
+		hw_dbg("NVM Checksum Invalid\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum_with_offset - Update EEPROM
+ *  checksum
+ *  @hw: pointer to the HW structure
+ *  @offset: offset in words of the checksum protected region
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_with_offset(struct e1000_hw *hw, u16 offset)
+{
+	s32 ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = offset; i < (NVM_CHECKSUM_REG + offset); i++) {
+		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error while updating checksum.\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+	checksum = (u16) NVM_SUM - checksum;
+	ret_val = hw->nvm.ops.write(hw, (NVM_CHECKSUM_REG + offset), 1,
+				&checksum);
+	if (ret_val)
+		hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_validate_nvm_checksum_82580 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_82580(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 eeprom_regions_count = 1;
+	u16 j, nvm_data;
+	u16 nvm_offset;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (nvm_data & NVM_COMPATIBILITY_BIT_MASK) {
+		/* if checksums compatibility bit is set validate checksums
+		 * for all 4 ports.
+		 */
+		eeprom_regions_count = 4;
+	}
+
+	for (j = 0; j < eeprom_regions_count; j++) {
+		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+		ret_val = igb_validate_nvm_checksum_with_offset(hw,
+								nvm_offset);
+		if (ret_val != 0)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum_82580 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_82580(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 j, nvm_data;
+	u16 nvm_offset;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_COMPATIBILITY_REG_3, 1, &nvm_data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error while updating checksum compatibility bit.\n");
+		goto out;
+	}
+
+	if ((nvm_data & NVM_COMPATIBILITY_BIT_MASK) == 0) {
+		/* set compatibility bit to validate checksums appropriately */
+		nvm_data = nvm_data | NVM_COMPATIBILITY_BIT_MASK;
+		ret_val = hw->nvm.ops.write(hw, NVM_COMPATIBILITY_REG_3, 1,
+					&nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Write Error while updating checksum compatibility bit.\n");
+			goto out;
+		}
+	}
+
+	for (j = 0; j < 4; j++) {
+		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_validate_nvm_checksum_i350 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM section checksum by reading/adding each word of
+ *  the EEPROM and then verifies that the sum of the EEPROM is
+ *  equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_i350(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 j;
+	u16 nvm_offset;
+
+	for (j = 0; j < 4; j++) {
+		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+		ret_val = igb_validate_nvm_checksum_with_offset(hw,
+								nvm_offset);
+		if (ret_val != 0)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum_i350 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM section checksums for all 4 ports by reading/adding
+ *  each word of the EEPROM up to the checksum.  Then calculates the EEPROM
+ *  checksum and writes the value to the EEPROM.
+ **/
+static s32 igb_update_nvm_checksum_i350(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 j;
+	u16 nvm_offset;
+
+	for (j = 0; j < 4; j++) {
+		nvm_offset = NVM_82580_LAN_FUNC_OFFSET(j);
+		ret_val = igb_update_nvm_checksum_with_offset(hw, nvm_offset);
+		if (ret_val != 0)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  __igb_access_emi_reg - Read/write EMI register
+ *  @hw: pointer to the HW structure
+ *  @addr: EMI address to program
+ *  @data: pointer to value to read/write from/to the EMI address
+ *  @read: boolean flag to indicate read or write
+ **/
+static s32 __igb_access_emi_reg(struct e1000_hw *hw, u16 address,
+				  u16 *data, bool read)
+{
+	s32 ret_val = 0;
+
+	ret_val = hw->phy.ops.write_reg(hw, E1000_EMIADD, address);
+	if (ret_val)
+		return ret_val;
+
+	if (read)
+		ret_val = hw->phy.ops.read_reg(hw, E1000_EMIDATA, data);
+	else
+		ret_val = hw->phy.ops.write_reg(hw, E1000_EMIDATA, *data);
+
+	return ret_val;
+}
+
+/**
+ *  igb_read_emi_reg - Read Extended Management Interface register
+ *  @hw: pointer to the HW structure
+ *  @addr: EMI address to program
+ *  @data: value to be read from the EMI address
+ **/
+s32 igb_read_emi_reg(struct e1000_hw *hw, u16 addr, u16 *data)
+{
+	return __igb_access_emi_reg(hw, addr, data, true);
+}
+
+/**
+ *  igb_set_eee_i350 - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *  @adv1G: boolean flag enabling 1G EEE advertisement
+ *  @adv100m: boolean flag enabling 100M EEE advertisement
+ *
+ *  Enable/disable EEE based on setting in dev_spec structure.
+ *
+ **/
+s32 igb_set_eee_i350(struct e1000_hw *hw, bool adv1G, bool adv100M)
+{
+	u32 ipcnfg, eeer;
+
+	if ((hw->mac.type < e1000_i350) ||
+	    (hw->phy.media_type != e1000_media_type_copper))
+		goto out;
+	ipcnfg = rd32(E1000_IPCNFG);
+	eeer = rd32(E1000_EEER);
+
+	/* enable or disable per user setting */
+	if (!(hw->dev_spec._82575.eee_disable)) {
+		u32 eee_su = rd32(E1000_EEE_SU);
+
+		if (adv100M)
+			ipcnfg |= E1000_IPCNFG_EEE_100M_AN;
+		else
+			ipcnfg &= ~E1000_IPCNFG_EEE_100M_AN;
+
+		if (adv1G)
+			ipcnfg |= E1000_IPCNFG_EEE_1G_AN;
+		else
+			ipcnfg &= ~E1000_IPCNFG_EEE_1G_AN;
+
+		eeer |= (E1000_EEER_TX_LPI_EN | E1000_EEER_RX_LPI_EN |
+			E1000_EEER_LPI_FC);
+
+		/* This bit should not be set in normal operation. */
+		if (eee_su & E1000_EEE_SU_LPI_CLK_STP)
+			hw_dbg("LPI Clock Stop Bit should not be set!\n");
+
+	} else {
+		ipcnfg &= ~(E1000_IPCNFG_EEE_1G_AN |
+			E1000_IPCNFG_EEE_100M_AN);
+		eeer &= ~(E1000_EEER_TX_LPI_EN |
+			E1000_EEER_RX_LPI_EN |
+			E1000_EEER_LPI_FC);
+	}
+	wr32(E1000_IPCNFG, ipcnfg);
+	wr32(E1000_EEER, eeer);
+	rd32(E1000_IPCNFG);
+	rd32(E1000_EEER);
+out:
+
+	return 0;
+}
+
+/**
+ *  igb_set_eee_i354 - Enable/disable EEE support
+ *  @hw: pointer to the HW structure
+ *  @adv1G: boolean flag enabling 1G EEE advertisement
+ *  @adv100m: boolean flag enabling 100M EEE advertisement
+ *
+ *  Enable/disable EEE legacy mode based on setting in dev_spec structure.
+ *
+ **/
+s32 igb_set_eee_i354(struct e1000_hw *hw, bool adv1G, bool adv100M)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_data;
+
+	if ((hw->phy.media_type != e1000_media_type_copper) ||
+	    (phy->id != M88E1543_E_PHY_ID))
+		goto out;
+
+	if (!hw->dev_spec._82575.eee_disable) {
+		/* Switch to PHY page 18. */
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 18);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data |= E1000_M88E1543_EEE_CTRL_1_MS;
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_EEE_CTRL_1,
+					     phy_data);
+		if (ret_val)
+			goto out;
+
+		/* Return the PHY to page 0. */
+		ret_val = phy->ops.write_reg(hw, E1000_M88E1543_PAGE_ADDR, 0);
+		if (ret_val)
+			goto out;
+
+		/* Turn on EEE advertisement. */
+		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+					     E1000_EEE_ADV_DEV_I354,
+					     &phy_data);
+		if (ret_val)
+			goto out;
+
+		if (adv100M)
+			phy_data |= E1000_EEE_ADV_100_SUPPORTED;
+		else
+			phy_data &= ~E1000_EEE_ADV_100_SUPPORTED;
+
+		if (adv1G)
+			phy_data |= E1000_EEE_ADV_1000_SUPPORTED;
+		else
+			phy_data &= ~E1000_EEE_ADV_1000_SUPPORTED;
+
+		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+						E1000_EEE_ADV_DEV_I354,
+						phy_data);
+	} else {
+		/* Turn off EEE advertisement. */
+		ret_val = igb_read_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+					     E1000_EEE_ADV_DEV_I354,
+					     &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data &= ~(E1000_EEE_ADV_100_SUPPORTED |
+			      E1000_EEE_ADV_1000_SUPPORTED);
+		ret_val = igb_write_xmdio_reg(hw, E1000_EEE_ADV_ADDR_I354,
+					      E1000_EEE_ADV_DEV_I354,
+					      phy_data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_eee_status_i354 - Get EEE status
+ *  @hw: pointer to the HW structure
+ *  @status: EEE status
+ *
+ *  Get EEE status by guessing based on whether Tx or Rx LPI indications have
+ *  been received.
+ **/
+s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_data;
+
+	/* Check if EEE is supported on this device. */
+	if ((hw->phy.media_type != e1000_media_type_copper) ||
+	    (phy->id != M88E1543_E_PHY_ID))
+		goto out;
+
+	ret_val = igb_read_xmdio_reg(hw, E1000_PCS_STATUS_ADDR_I354,
+				     E1000_PCS_STATUS_DEV_I354,
+				     &phy_data);
+	if (ret_val)
+		goto out;
+
+	*status = phy_data & (E1000_PCS_STATUS_TX_LPI_RCVD |
+			      E1000_PCS_STATUS_RX_LPI_RCVD) ? true : false;
+
+out:
+	return ret_val;
+}
+
+static const u8 e1000_emc_temp_data[4] = {
+	E1000_EMC_INTERNAL_DATA,
+	E1000_EMC_DIODE1_DATA,
+	E1000_EMC_DIODE2_DATA,
+	E1000_EMC_DIODE3_DATA
+};
+static const u8 e1000_emc_therm_limit[4] = {
+	E1000_EMC_INTERNAL_THERM_LIMIT,
+	E1000_EMC_DIODE1_THERM_LIMIT,
+	E1000_EMC_DIODE2_THERM_LIMIT,
+	E1000_EMC_DIODE3_THERM_LIMIT
+};
+
+#ifdef CONFIG_IGB_HWMON
+/**
+ *  igb_get_thermal_sensor_data_generic - Gathers thermal sensor data
+ *  @hw: pointer to hardware structure
+ *
+ *  Updates the temperatures in mac.thermal_sensor_data
+ **/
+static s32 igb_get_thermal_sensor_data_generic(struct e1000_hw *hw)
+{
+	u16 ets_offset;
+	u16 ets_cfg;
+	u16 ets_sensor;
+	u8  num_sensors;
+	u8  sensor_index;
+	u8  sensor_location;
+	u8  i;
+	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+		return E1000_NOT_IMPLEMENTED;
+
+	data->sensor[0].temp = (rd32(E1000_THMJT) & 0xFF);
+
+	/* Return the internal sensor only if ETS is unsupported */
+	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+		return 0;
+
+	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+	    != NVM_ETS_TYPE_EMC)
+		return E1000_NOT_IMPLEMENTED;
+
+	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+	if (num_sensors > E1000_MAX_SENSORS)
+		num_sensors = E1000_MAX_SENSORS;
+
+	for (i = 1; i < num_sensors; i++) {
+		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+				NVM_ETS_DATA_INDEX_SHIFT);
+		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+				   NVM_ETS_DATA_LOC_SHIFT);
+
+		if (sensor_location != 0)
+			hw->phy.ops.read_i2c_byte(hw,
+					e1000_emc_temp_data[sensor_index],
+					E1000_I2C_THERMAL_SENSOR_ADDR,
+					&data->sensor[i].temp);
+	}
+	return 0;
+}
+
+/**
+ *  igb_init_thermal_sensor_thresh_generic - Sets thermal sensor thresholds
+ *  @hw: pointer to hardware structure
+ *
+ *  Sets the thermal sensor thresholds according to the NVM map
+ *  and save off the threshold and location values into mac.thermal_sensor_data
+ **/
+static s32 igb_init_thermal_sensor_thresh_generic(struct e1000_hw *hw)
+{
+	u16 ets_offset;
+	u16 ets_cfg;
+	u16 ets_sensor;
+	u8  low_thresh_delta;
+	u8  num_sensors;
+	u8  sensor_index;
+	u8  sensor_location;
+	u8  therm_limit;
+	u8  i;
+	struct e1000_thermal_sensor_data *data = &hw->mac.thermal_sensor_data;
+
+	if ((hw->mac.type != e1000_i350) || (hw->bus.func != 0))
+		return E1000_NOT_IMPLEMENTED;
+
+	memset(data, 0, sizeof(struct e1000_thermal_sensor_data));
+
+	data->sensor[0].location = 0x1;
+	data->sensor[0].caution_thresh =
+		(rd32(E1000_THHIGHTC) & 0xFF);
+	data->sensor[0].max_op_thresh =
+		(rd32(E1000_THLOWTC) & 0xFF);
+
+	/* Return the internal sensor only if ETS is unsupported */
+	hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_offset);
+	if ((ets_offset == 0x0000) || (ets_offset == 0xFFFF))
+		return 0;
+
+	hw->nvm.ops.read(hw, ets_offset, 1, &ets_cfg);
+	if (((ets_cfg & NVM_ETS_TYPE_MASK) >> NVM_ETS_TYPE_SHIFT)
+	    != NVM_ETS_TYPE_EMC)
+		return E1000_NOT_IMPLEMENTED;
+
+	low_thresh_delta = ((ets_cfg & NVM_ETS_LTHRES_DELTA_MASK) >>
+			    NVM_ETS_LTHRES_DELTA_SHIFT);
+	num_sensors = (ets_cfg & NVM_ETS_NUM_SENSORS_MASK);
+
+	for (i = 1; i <= num_sensors; i++) {
+		hw->nvm.ops.read(hw, (ets_offset + i), 1, &ets_sensor);
+		sensor_index = ((ets_sensor & NVM_ETS_DATA_INDEX_MASK) >>
+				NVM_ETS_DATA_INDEX_SHIFT);
+		sensor_location = ((ets_sensor & NVM_ETS_DATA_LOC_MASK) >>
+				   NVM_ETS_DATA_LOC_SHIFT);
+		therm_limit = ets_sensor & NVM_ETS_DATA_HTHRESH_MASK;
+
+		hw->phy.ops.write_i2c_byte(hw,
+			e1000_emc_therm_limit[sensor_index],
+			E1000_I2C_THERMAL_SENSOR_ADDR,
+			therm_limit);
+
+		if ((i < E1000_MAX_SENSORS) && (sensor_location != 0)) {
+			data->sensor[i].location = sensor_location;
+			data->sensor[i].caution_thresh = therm_limit;
+			data->sensor[i].max_op_thresh = therm_limit -
+							low_thresh_delta;
+		}
+	}
+	return 0;
+}
+
+#endif
+static struct e1000_mac_operations e1000_mac_ops_82575 = {
+	.init_hw              = igb_init_hw_82575,
+	.check_for_link       = igb_check_for_link_82575,
+	.rar_set              = igb_rar_set,
+	.read_mac_addr        = igb_read_mac_addr_82575,
+	.get_speed_and_duplex = igb_get_link_up_info_82575,
+#ifdef CONFIG_IGB_HWMON
+	.get_thermal_sensor_data = igb_get_thermal_sensor_data_generic,
+	.init_thermal_sensor_thresh = igb_init_thermal_sensor_thresh_generic,
+#endif
+};
+
+static struct e1000_phy_operations e1000_phy_ops_82575 = {
+	.acquire              = igb_acquire_phy_82575,
+	.get_cfg_done         = igb_get_cfg_done_82575,
+	.release              = igb_release_phy_82575,
+	.write_i2c_byte       = igb_write_i2c_byte,
+	.read_i2c_byte        = igb_read_i2c_byte,
+};
+
+static struct e1000_nvm_operations e1000_nvm_ops_82575 = {
+	.acquire              = igb_acquire_nvm_82575,
+	.read                 = igb_read_nvm_eerd,
+	.release              = igb_release_nvm_82575,
+	.write                = igb_write_nvm_spi,
+};
+
+const struct e1000_info e1000_82575_info = {
+	.get_invariants = igb_get_invariants_82575,
+	.mac_ops = &e1000_mac_ops_82575,
+	.phy_ops = &e1000_phy_ops_82575,
+	.nvm_ops = &e1000_nvm_ops_82575,
+};
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h
new file mode 100644
index 0000000..db4e9f4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_82575.h
@@ -0,0 +1,280 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_82575_H_
+#define _E1000_82575_H_
+
+void igb_shutdown_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_up_serdes_link_82575(struct e1000_hw *hw);
+void igb_power_down_phy_copper_82575(struct e1000_hw *hw);
+void igb_rx_fifo_flush_82575(struct e1000_hw *hw);
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+		      u8 *data);
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset, u8 dev_addr,
+		       u8 data);
+
+#define ID_LED_DEFAULT_82575_SERDES ((ID_LED_DEF1_DEF2 << 12) | \
+				     (ID_LED_DEF1_DEF2 <<  8) | \
+				     (ID_LED_DEF1_DEF2 <<  4) | \
+				     (ID_LED_OFF1_ON2))
+
+#define E1000_RAR_ENTRIES_82575        16
+#define E1000_RAR_ENTRIES_82576        24
+#define E1000_RAR_ENTRIES_82580        24
+#define E1000_RAR_ENTRIES_I350         32
+
+#define E1000_SW_SYNCH_MB              0x00000100
+#define E1000_STAT_DEV_RST_SET         0x00100000
+#define E1000_CTRL_DEV_RST             0x20000000
+
+/* SRRCTL bit definitions */
+#define E1000_SRRCTL_BSIZEPKT_SHIFT                     10 /* Shift _right_ */
+#define E1000_SRRCTL_BSIZEHDRSIZE_SHIFT                 2  /* Shift _left_ */
+#define E1000_SRRCTL_DESCTYPE_ADV_ONEBUF                0x02000000
+#define E1000_SRRCTL_DESCTYPE_HDR_SPLIT_ALWAYS          0x0A000000
+#define E1000_SRRCTL_DROP_EN                            0x80000000
+#define E1000_SRRCTL_TIMESTAMP                          0x40000000
+
+
+#define E1000_MRQC_ENABLE_RSS_4Q            0x00000002
+#define E1000_MRQC_ENABLE_VMDQ              0x00000003
+#define E1000_MRQC_RSS_FIELD_IPV4_UDP       0x00400000
+#define E1000_MRQC_ENABLE_VMDQ_RSS_2Q       0x00000005
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP       0x00800000
+#define E1000_MRQC_RSS_FIELD_IPV6_UDP_EX    0x01000000
+
+#define E1000_EICR_TX_QUEUE ( \
+	E1000_EICR_TX_QUEUE0 |    \
+	E1000_EICR_TX_QUEUE1 |    \
+	E1000_EICR_TX_QUEUE2 |    \
+	E1000_EICR_TX_QUEUE3)
+
+#define E1000_EICR_RX_QUEUE ( \
+	E1000_EICR_RX_QUEUE0 |    \
+	E1000_EICR_RX_QUEUE1 |    \
+	E1000_EICR_RX_QUEUE2 |    \
+	E1000_EICR_RX_QUEUE3)
+
+/* Immediate Interrupt Rx (A.K.A. Low Latency Interrupt) */
+#define E1000_IMIREXT_SIZE_BP     0x00001000  /* Packet size bypass */
+#define E1000_IMIREXT_CTRL_BP     0x00080000  /* Bypass check of ctrl bits */
+
+/* Receive Descriptor - Advanced */
+union e1000_adv_rx_desc {
+	struct {
+		__le64 pkt_addr;             /* Packet buffer address */
+		__le64 hdr_addr;             /* Header buffer address */
+	} read;
+	struct {
+		struct {
+			struct {
+				__le16 pkt_info;   /* RSS type, Packet type */
+				__le16 hdr_info;   /* Split Head, buf len */
+			} lo_dword;
+			union {
+				__le32 rss;          /* RSS Hash */
+				struct {
+					__le16 ip_id;    /* IP id */
+					__le16 csum;     /* Packet Checksum */
+				} csum_ip;
+			} hi_dword;
+		} lower;
+		struct {
+			__le32 status_error;     /* ext status/error */
+			__le16 length;           /* Packet length */
+			__le16 vlan;             /* VLAN tag */
+		} upper;
+	} wb;  /* writeback */
+};
+
+#define E1000_RXDADV_HDRBUFLEN_MASK      0x7FE0
+#define E1000_RXDADV_HDRBUFLEN_SHIFT     5
+#define E1000_RXDADV_STAT_TS             0x10000 /* Pkt was time stamped */
+#define E1000_RXDADV_STAT_TSIP           0x08000 /* timestamp in packet */
+
+/* Transmit Descriptor - Advanced */
+union e1000_adv_tx_desc {
+	struct {
+		__le64 buffer_addr;    /* Address of descriptor's data buf */
+		__le32 cmd_type_len;
+		__le32 olinfo_status;
+	} read;
+	struct {
+		__le64 rsvd;       /* Reserved */
+		__le32 nxtseq_seed;
+		__le32 status;
+	} wb;
+};
+
+/* Adv Transmit Descriptor Config Masks */
+#define E1000_ADVTXD_MAC_TSTAMP   0x00080000 /* IEEE1588 Timestamp packet */
+#define E1000_ADVTXD_DTYP_CTXT    0x00200000 /* Advanced Context Descriptor */
+#define E1000_ADVTXD_DTYP_DATA    0x00300000 /* Advanced Data Descriptor */
+#define E1000_ADVTXD_DCMD_EOP     0x01000000 /* End of Packet */
+#define E1000_ADVTXD_DCMD_IFCS    0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_ADVTXD_DCMD_RS      0x08000000 /* Report Status */
+#define E1000_ADVTXD_DCMD_DEXT    0x20000000 /* Descriptor extension (1=Adv) */
+#define E1000_ADVTXD_DCMD_VLE     0x40000000 /* VLAN pkt enable */
+#define E1000_ADVTXD_DCMD_TSE     0x80000000 /* TCP Seg enable */
+#define E1000_ADVTXD_PAYLEN_SHIFT    14 /* Adv desc PAYLEN shift */
+
+/* Context descriptors */
+struct e1000_adv_tx_context_desc {
+	__le32 vlan_macip_lens;
+	__le32 seqnum_seed;
+	__le32 type_tucmd_mlhl;
+	__le32 mss_l4len_idx;
+};
+
+#define E1000_ADVTXD_MACLEN_SHIFT    9  /* Adv ctxt desc mac len shift */
+#define E1000_ADVTXD_TUCMD_IPV4    0x00000400  /* IP Packet Type: 1=IPv4 */
+#define E1000_ADVTXD_TUCMD_L4T_TCP 0x00000800  /* L4 Packet TYPE of TCP */
+#define E1000_ADVTXD_TUCMD_L4T_SCTP 0x00001000 /* L4 packet TYPE of SCTP */
+/* IPSec Encrypt Enable for ESP */
+#define E1000_ADVTXD_L4LEN_SHIFT     8  /* Adv ctxt L4LEN shift */
+#define E1000_ADVTXD_MSS_SHIFT      16  /* Adv ctxt MSS shift */
+/* Adv ctxt IPSec SA IDX mask */
+/* Adv ctxt IPSec ESP len mask */
+
+/* Additional Transmit Descriptor Control definitions */
+#define E1000_TXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Tx Queue */
+/* Tx Queue Arbitration Priority 0=low, 1=high */
+
+/* Additional Receive Descriptor Control definitions */
+#define E1000_RXDCTL_QUEUE_ENABLE  0x02000000 /* Enable specific Rx Queue */
+
+/* Direct Cache Access (DCA) definitions */
+#define E1000_DCA_CTRL_DCA_MODE_DISABLE 0x01 /* DCA Disable */
+#define E1000_DCA_CTRL_DCA_MODE_CB2     0x02 /* DCA Mode CB2 */
+
+#define E1000_DCA_RXCTRL_CPUID_MASK 0x0000001F /* Rx CPUID Mask */
+#define E1000_DCA_RXCTRL_DESC_DCA_EN (1 << 5) /* DCA Rx Desc enable */
+#define E1000_DCA_RXCTRL_HEAD_DCA_EN (1 << 6) /* DCA Rx Desc header enable */
+#define E1000_DCA_RXCTRL_DATA_DCA_EN (1 << 7) /* DCA Rx Desc payload enable */
+#define E1000_DCA_RXCTRL_DESC_RRO_EN (1 << 9) /* DCA Rx rd Desc Relax Order */
+
+#define E1000_DCA_TXCTRL_CPUID_MASK 0x0000001F /* Tx CPUID Mask */
+#define E1000_DCA_TXCTRL_DESC_DCA_EN (1 << 5) /* DCA Tx Desc enable */
+#define E1000_DCA_TXCTRL_DESC_RRO_EN (1 << 9) /* Tx rd Desc Relax Order */
+#define E1000_DCA_TXCTRL_TX_WB_RO_EN (1 << 11) /* Tx Desc writeback RO bit */
+#define E1000_DCA_TXCTRL_DATA_RRO_EN (1 << 13) /* Tx rd data Relax Order */
+
+/* Additional DCA related definitions, note change in position of CPUID */
+#define E1000_DCA_TXCTRL_CPUID_MASK_82576 0xFF000000 /* Tx CPUID Mask */
+#define E1000_DCA_RXCTRL_CPUID_MASK_82576 0xFF000000 /* Rx CPUID Mask */
+#define E1000_DCA_TXCTRL_CPUID_SHIFT 24 /* Tx CPUID now in the last byte */
+#define E1000_DCA_RXCTRL_CPUID_SHIFT 24 /* Rx CPUID now in the last byte */
+
+/* ETQF register bit definitions */
+#define E1000_ETQF_FILTER_ENABLE   (1 << 26)
+#define E1000_ETQF_1588            (1 << 30)
+
+/* FTQF register bit definitions */
+#define E1000_FTQF_VF_BP               0x00008000
+#define E1000_FTQF_1588_TIME_STAMP     0x08000000
+#define E1000_FTQF_MASK                0xF0000000
+#define E1000_FTQF_MASK_PROTO_BP       0x10000000
+#define E1000_FTQF_MASK_SOURCE_PORT_BP 0x80000000
+
+#define E1000_NVM_APME_82575          0x0400
+#define MAX_NUM_VFS                   8
+
+#define E1000_DTXSWC_MAC_SPOOF_MASK   0x000000FF /* Per VF MAC spoof control */
+#define E1000_DTXSWC_VLAN_SPOOF_MASK  0x0000FF00 /* Per VF VLAN spoof control */
+#define E1000_DTXSWC_LLE_MASK         0x00FF0000 /* Per VF Local LB enables */
+#define E1000_DTXSWC_VLAN_SPOOF_SHIFT 8
+#define E1000_DTXSWC_VMDQ_LOOPBACK_EN (1 << 31)  /* global VF LB enable */
+
+/* Easy defines for setting default pool, would normally be left a zero */
+#define E1000_VT_CTL_DEFAULT_POOL_SHIFT 7
+#define E1000_VT_CTL_DEFAULT_POOL_MASK  (0x7 << E1000_VT_CTL_DEFAULT_POOL_SHIFT)
+
+/* Other useful VMD_CTL register defines */
+#define E1000_VT_CTL_IGNORE_MAC         (1 << 28)
+#define E1000_VT_CTL_DISABLE_DEF_POOL   (1 << 29)
+#define E1000_VT_CTL_VM_REPL_EN         (1 << 30)
+
+/* Per VM Offload register setup */
+#define E1000_VMOLR_RLPML_MASK 0x00003FFF /* Long Packet Maximum Length mask */
+#define E1000_VMOLR_LPE        0x00010000 /* Accept Long packet */
+#define E1000_VMOLR_RSSE       0x00020000 /* Enable RSS */
+#define E1000_VMOLR_AUPE       0x01000000 /* Accept untagged packets */
+#define E1000_VMOLR_ROMPE      0x02000000 /* Accept overflow multicast */
+#define E1000_VMOLR_ROPE       0x04000000 /* Accept overflow unicast */
+#define E1000_VMOLR_BAM        0x08000000 /* Accept Broadcast packets */
+#define E1000_VMOLR_MPME       0x10000000 /* Multicast promiscuous mode */
+#define E1000_VMOLR_STRVLAN    0x40000000 /* Vlan stripping enable */
+#define E1000_VMOLR_STRCRC     0x80000000 /* CRC stripping enable */
+
+#define E1000_DVMOLR_HIDEVLAN  0x20000000 /* Hide vlan enable */
+#define E1000_DVMOLR_STRVLAN   0x40000000 /* Vlan stripping enable */
+#define E1000_DVMOLR_STRCRC    0x80000000 /* CRC stripping enable */
+
+#define E1000_VLVF_ARRAY_SIZE     32
+#define E1000_VLVF_VLANID_MASK    0x00000FFF
+#define E1000_VLVF_POOLSEL_SHIFT  12
+#define E1000_VLVF_POOLSEL_MASK   (0xFF << E1000_VLVF_POOLSEL_SHIFT)
+#define E1000_VLVF_LVLAN          0x00100000
+#define E1000_VLVF_VLANID_ENABLE  0x80000000
+
+#define E1000_VMVIR_VLANA_DEFAULT      0x40000000 /* Always use default VLAN */
+#define E1000_VMVIR_VLANA_NEVER        0x80000000 /* Never insert VLAN tag */
+
+#define E1000_IOVCTL 0x05BBC
+#define E1000_IOVCTL_REUSE_VFQ 0x00000001
+
+#define E1000_RPLOLR_STRVLAN   0x40000000
+#define E1000_RPLOLR_STRCRC    0x80000000
+
+#define E1000_DTXCTL_8023LL     0x0004
+#define E1000_DTXCTL_VLAN_ADDED 0x0008
+#define E1000_DTXCTL_OOS_ENABLE 0x0010
+#define E1000_DTXCTL_MDP_EN     0x0020
+#define E1000_DTXCTL_SPOOF_INT  0x0040
+
+#define E1000_EEPROM_PCS_AUTONEG_DISABLE_BIT	(1 << 14)
+
+#define ALL_QUEUES   0xFFFF
+
+/* RX packet buffer size defines */
+#define E1000_RXPBS_SIZE_MASK_82576  0x0000007F
+void igb_vmdq_set_anti_spoofing_pf(struct e1000_hw *, bool, int);
+void igb_vmdq_set_loopback_pf(struct e1000_hw *, bool);
+void igb_vmdq_set_replication_pf(struct e1000_hw *, bool);
+u16 igb_rxpbs_adjust_82580(u32 data);
+s32 igb_read_emi_reg(struct e1000_hw *, u16 addr, u16 *data);
+s32 igb_set_eee_i350(struct e1000_hw *, bool adv1G, bool adv100M);
+s32 igb_set_eee_i354(struct e1000_hw *, bool adv1G, bool adv100M);
+s32 igb_get_eee_status_i354(struct e1000_hw *hw, bool *status);
+
+#define E1000_I2C_THERMAL_SENSOR_ADDR	0xF8
+#define E1000_EMC_INTERNAL_DATA		0x00
+#define E1000_EMC_INTERNAL_THERM_LIMIT	0x20
+#define E1000_EMC_DIODE1_DATA		0x01
+#define E1000_EMC_DIODE1_THERM_LIMIT	0x19
+#define E1000_EMC_DIODE2_DATA		0x23
+#define E1000_EMC_DIODE2_THERM_LIMIT	0x1A
+#define E1000_EMC_DIODE3_DATA		0x2A
+#define E1000_EMC_DIODE3_THERM_LIMIT	0x30
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h
new file mode 100644
index 0000000..1002cbc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_defines.h
@@ -0,0 +1,1018 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_DEFINES_H_
+#define _E1000_DEFINES_H_
+
+/* Number of Transmit and Receive Descriptors must be a multiple of 8 */
+#define REQ_TX_DESCRIPTOR_MULTIPLE  8
+#define REQ_RX_DESCRIPTOR_MULTIPLE  8
+
+/* Definitions for power management and wakeup registers */
+/* Wake Up Control */
+#define E1000_WUC_PME_EN     0x00000002 /* PME Enable */
+
+/* Wake Up Filter Control */
+#define E1000_WUFC_LNKC 0x00000001 /* Link Status Change Wakeup Enable */
+#define E1000_WUFC_MAG  0x00000002 /* Magic Packet Wakeup Enable */
+#define E1000_WUFC_EX   0x00000004 /* Directed Exact Wakeup Enable */
+#define E1000_WUFC_MC   0x00000008 /* Directed Multicast Wakeup Enable */
+#define E1000_WUFC_BC   0x00000010 /* Broadcast Wakeup Enable */
+
+/* Extended Device Control */
+#define E1000_CTRL_EXT_SDP2_DATA 0x00000040 /* Value of SW Defineable Pin 2 */
+#define E1000_CTRL_EXT_SDP3_DATA 0x00000080 /* Value of SW Defineable Pin 3 */
+#define E1000_CTRL_EXT_SDP2_DIR  0x00000400 /* SDP2 Data direction */
+#define E1000_CTRL_EXT_SDP3_DIR  0x00000800 /* SDP3 Data direction */
+
+/* Physical Func Reset Done Indication */
+#define E1000_CTRL_EXT_PFRSTD	0x00004000
+#define E1000_CTRL_EXT_SDLPE	0X00040000  /* SerDes Low Power Enable */
+#define E1000_CTRL_EXT_LINK_MODE_MASK	0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES	0x00C00000
+#define E1000_CTRL_EXT_LINK_MODE_1000BASE_KX	0x00400000
+#define E1000_CTRL_EXT_LINK_MODE_SGMII	0x00800000
+#define E1000_CTRL_EXT_LINK_MODE_GMII	0x00000000
+#define E1000_CTRL_EXT_EIAME	0x01000000
+#define E1000_CTRL_EXT_IRCA		0x00000001
+/* Interrupt delay cancellation */
+/* Driver loaded bit for FW */
+#define E1000_CTRL_EXT_DRV_LOAD       0x10000000
+/* Interrupt acknowledge Auto-mask */
+/* Clear Interrupt timers after IMS clear */
+/* packet buffer parity error detection enabled */
+/* descriptor FIFO parity error detection enable */
+#define E1000_CTRL_EXT_PBA_CLR		0x80000000 /* PBA Clear */
+#define E1000_CTRL_EXT_PHYPDEN		0x00100000
+#define E1000_I2CCMD_REG_ADDR_SHIFT	16
+#define E1000_I2CCMD_PHY_ADDR_SHIFT	24
+#define E1000_I2CCMD_OPCODE_READ	0x08000000
+#define E1000_I2CCMD_OPCODE_WRITE	0x00000000
+#define E1000_I2CCMD_READY		0x20000000
+#define E1000_I2CCMD_ERROR		0x80000000
+#define E1000_I2CCMD_SFP_DATA_ADDR(a)	(0x0000 + (a))
+#define E1000_I2CCMD_SFP_DIAG_ADDR(a)	(0x0100 + (a))
+#define E1000_MAX_SGMII_PHY_REG_ADDR	255
+#define E1000_I2CCMD_PHY_TIMEOUT	200
+#define E1000_IVAR_VALID		0x80
+#define E1000_GPIE_NSICR		0x00000001
+#define E1000_GPIE_MSIX_MODE		0x00000010
+#define E1000_GPIE_EIAME		0x40000000
+#define E1000_GPIE_PBA			0x80000000
+
+/* Receive Descriptor bit definitions */
+#define E1000_RXD_STAT_DD       0x01    /* Descriptor Done */
+#define E1000_RXD_STAT_EOP      0x02    /* End of Packet */
+#define E1000_RXD_STAT_IXSM     0x04    /* Ignore checksum */
+#define E1000_RXD_STAT_VP       0x08    /* IEEE VLAN Packet */
+#define E1000_RXD_STAT_UDPCS    0x10    /* UDP xsum calculated */
+#define E1000_RXD_STAT_TCPCS    0x20    /* TCP xsum calculated */
+#define E1000_RXD_STAT_TS       0x10000 /* Pkt was time stamped */
+
+#define E1000_RXDEXT_STATERR_LB    0x00040000
+#define E1000_RXDEXT_STATERR_CE    0x01000000
+#define E1000_RXDEXT_STATERR_SE    0x02000000
+#define E1000_RXDEXT_STATERR_SEQ   0x04000000
+#define E1000_RXDEXT_STATERR_CXE   0x10000000
+#define E1000_RXDEXT_STATERR_TCPE  0x20000000
+#define E1000_RXDEXT_STATERR_IPE   0x40000000
+#define E1000_RXDEXT_STATERR_RXE   0x80000000
+
+/* Same mask, but for extended and packet split descriptors */
+#define E1000_RXDEXT_ERR_FRAME_ERR_MASK ( \
+	E1000_RXDEXT_STATERR_CE  |            \
+	E1000_RXDEXT_STATERR_SE  |            \
+	E1000_RXDEXT_STATERR_SEQ |            \
+	E1000_RXDEXT_STATERR_CXE |            \
+	E1000_RXDEXT_STATERR_RXE)
+
+#define E1000_MRQC_RSS_FIELD_IPV4_TCP          0x00010000
+#define E1000_MRQC_RSS_FIELD_IPV4              0x00020000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP_EX       0x00040000
+#define E1000_MRQC_RSS_FIELD_IPV6              0x00100000
+#define E1000_MRQC_RSS_FIELD_IPV6_TCP          0x00200000
+
+
+/* Management Control */
+#define E1000_MANC_SMBUS_EN      0x00000001 /* SMBus Enabled - RO */
+#define E1000_MANC_ASF_EN        0x00000002 /* ASF Enabled - RO */
+#define E1000_MANC_EN_BMC2OS     0x10000000 /* OSBMC is Enabled or not */
+/* Enable Neighbor Discovery Filtering */
+#define E1000_MANC_RCV_TCO_EN    0x00020000 /* Receive TCO Packets Enabled */
+#define E1000_MANC_BLK_PHY_RST_ON_IDE   0x00040000 /* Block phy resets */
+/* Enable MAC address filtering */
+#define E1000_MANC_EN_MAC_ADDR_FILTER   0x00100000
+
+/* Receive Control */
+#define E1000_RCTL_EN             0x00000002    /* enable */
+#define E1000_RCTL_SBP            0x00000004    /* store bad packet */
+#define E1000_RCTL_UPE            0x00000008    /* unicast promiscuous enable */
+#define E1000_RCTL_MPE            0x00000010    /* multicast promiscuous enab */
+#define E1000_RCTL_LPE            0x00000020    /* long packet enable */
+#define E1000_RCTL_LBM_MAC        0x00000040    /* MAC loopback mode */
+#define E1000_RCTL_LBM_TCVR       0x000000C0    /* tcvr loopback mode */
+#define E1000_RCTL_RDMTS_HALF     0x00000000    /* rx desc min threshold size */
+#define E1000_RCTL_MO_SHIFT       12            /* multicast offset shift */
+#define E1000_RCTL_BAM            0x00008000    /* broadcast enable */
+#define E1000_RCTL_SZ_512         0x00020000    /* rx buffer size 512 */
+#define E1000_RCTL_SZ_256         0x00030000    /* rx buffer size 256 */
+#define E1000_RCTL_VFE            0x00040000    /* vlan filter enable */
+#define E1000_RCTL_CFIEN          0x00080000    /* canonical form enable */
+#define E1000_RCTL_DPF            0x00400000    /* Discard Pause Frames */
+#define E1000_RCTL_PMCF           0x00800000    /* pass MAC control frames */
+#define E1000_RCTL_SECRC          0x04000000    /* Strip Ethernet CRC */
+
+/* Use byte values for the following shift parameters
+ * Usage:
+ *     psrctl |= (((ROUNDUP(value0, 128) >> E1000_PSRCTL_BSIZE0_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE0_MASK) |
+ *                ((ROUNDUP(value1, 1024) >> E1000_PSRCTL_BSIZE1_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE1_MASK) |
+ *                ((ROUNDUP(value2, 1024) << E1000_PSRCTL_BSIZE2_SHIFT) &
+ *                  E1000_PSRCTL_BSIZE2_MASK) |
+ *                ((ROUNDUP(value3, 1024) << E1000_PSRCTL_BSIZE3_SHIFT) |;
+ *                  E1000_PSRCTL_BSIZE3_MASK))
+ * where value0 = [128..16256],  default=256
+ *       value1 = [1024..64512], default=4096
+ *       value2 = [0..64512],    default=4096
+ *       value3 = [0..64512],    default=0
+ */
+
+#define E1000_PSRCTL_BSIZE0_MASK   0x0000007F
+#define E1000_PSRCTL_BSIZE1_MASK   0x00003F00
+#define E1000_PSRCTL_BSIZE2_MASK   0x003F0000
+#define E1000_PSRCTL_BSIZE3_MASK   0x3F000000
+
+#define E1000_PSRCTL_BSIZE0_SHIFT  7            /* Shift _right_ 7 */
+#define E1000_PSRCTL_BSIZE1_SHIFT  2            /* Shift _right_ 2 */
+#define E1000_PSRCTL_BSIZE2_SHIFT  6            /* Shift _left_ 6 */
+#define E1000_PSRCTL_BSIZE3_SHIFT 14            /* Shift _left_ 14 */
+
+/* SWFW_SYNC Definitions */
+#define E1000_SWFW_EEP_SM   0x1
+#define E1000_SWFW_PHY0_SM  0x2
+#define E1000_SWFW_PHY1_SM  0x4
+#define E1000_SWFW_PHY2_SM  0x20
+#define E1000_SWFW_PHY3_SM  0x40
+
+/* FACTPS Definitions */
+/* Device Control */
+#define E1000_CTRL_FD       0x00000001  /* Full duplex.0=half; 1=full */
+#define E1000_CTRL_GIO_MASTER_DISABLE 0x00000004 /*Blocks new Master requests */
+#define E1000_CTRL_LRST     0x00000008  /* Link reset. 0=normal,1=reset */
+#define E1000_CTRL_ASDE     0x00000020  /* Auto-speed detect enable */
+#define E1000_CTRL_SLU      0x00000040  /* Set link up (Force Link) */
+#define E1000_CTRL_ILOS     0x00000080  /* Invert Loss-Of Signal */
+#define E1000_CTRL_SPD_SEL  0x00000300  /* Speed Select Mask */
+#define E1000_CTRL_SPD_100  0x00000100  /* Force 100Mb */
+#define E1000_CTRL_SPD_1000 0x00000200  /* Force 1Gb */
+#define E1000_CTRL_FRCSPD   0x00000800  /* Force Speed */
+#define E1000_CTRL_FRCDPX   0x00001000  /* Force Duplex */
+/* Defined polarity of Dock/Undock indication in SDP[0] */
+/* Reset both PHY ports, through PHYRST_N pin */
+/* enable link status from external LINK_0 and LINK_1 pins */
+#define E1000_CTRL_SWDPIN0  0x00040000  /* SWDPIN 0 value */
+#define E1000_CTRL_SWDPIN1  0x00080000  /* SWDPIN 1 value */
+#define E1000_CTRL_SDP0_DIR 0x00400000  /* SDP0 Data direction */
+#define E1000_CTRL_SDP1_DIR 0x00800000  /* SDP1 Data direction */
+#define E1000_CTRL_RST      0x04000000  /* Global reset */
+#define E1000_CTRL_RFCE     0x08000000  /* Receive Flow Control enable */
+#define E1000_CTRL_TFCE     0x10000000  /* Transmit flow control enable */
+#define E1000_CTRL_VME      0x40000000  /* IEEE VLAN mode enable */
+#define E1000_CTRL_PHY_RST  0x80000000  /* PHY Reset */
+/* Initiate an interrupt to manageability engine */
+#define E1000_CTRL_I2C_ENA  0x02000000  /* I2C enable */
+
+/* Bit definitions for the Management Data IO (MDIO) and Management Data
+ * Clock (MDC) pins in the Device Control Register.
+ */
+
+#define E1000_CONNSW_ENRGSRC             0x4
+#define E1000_CONNSW_PHYSD		0x400
+#define E1000_CONNSW_PHY_PDN		0x800
+#define E1000_CONNSW_SERDESD		0x200
+#define E1000_CONNSW_AUTOSENSE_CONF	0x2
+#define E1000_CONNSW_AUTOSENSE_EN	0x1
+#define E1000_PCS_CFG_PCS_EN             8
+#define E1000_PCS_LCTL_FLV_LINK_UP       1
+#define E1000_PCS_LCTL_FSV_100           2
+#define E1000_PCS_LCTL_FSV_1000          4
+#define E1000_PCS_LCTL_FDV_FULL          8
+#define E1000_PCS_LCTL_FSD               0x10
+#define E1000_PCS_LCTL_FORCE_LINK        0x20
+#define E1000_PCS_LCTL_FORCE_FCTRL       0x80
+#define E1000_PCS_LCTL_AN_ENABLE         0x10000
+#define E1000_PCS_LCTL_AN_RESTART        0x20000
+#define E1000_PCS_LCTL_AN_TIMEOUT        0x40000
+#define E1000_ENABLE_SERDES_LOOPBACK     0x0410
+
+#define E1000_PCS_LSTS_LINK_OK           1
+#define E1000_PCS_LSTS_SPEED_100         2
+#define E1000_PCS_LSTS_SPEED_1000        4
+#define E1000_PCS_LSTS_DUPLEX_FULL       8
+#define E1000_PCS_LSTS_SYNK_OK           0x10
+
+/* Device Status */
+#define E1000_STATUS_FD         0x00000001      /* Full duplex.0=half,1=full */
+#define E1000_STATUS_LU         0x00000002      /* Link up.0=no,1=link */
+#define E1000_STATUS_FUNC_MASK  0x0000000C      /* PCI Function Mask */
+#define E1000_STATUS_FUNC_SHIFT 2
+#define E1000_STATUS_FUNC_1     0x00000004      /* Function 1 */
+#define E1000_STATUS_TXOFF      0x00000010      /* transmission paused */
+#define E1000_STATUS_SPEED_100  0x00000040      /* Speed 100Mb/s */
+#define E1000_STATUS_SPEED_1000 0x00000080      /* Speed 1000Mb/s */
+/* Change in Dock/Undock state. Clear on write '0'. */
+/* Status of Master requests. */
+#define E1000_STATUS_GIO_MASTER_ENABLE 0x00080000
+/* BMC external code execution disabled */
+
+#define E1000_STATUS_2P5_SKU		0x00001000 /* Val of 2.5GBE SKU strap */
+#define E1000_STATUS_2P5_SKU_OVER	0x00002000 /* Val of 2.5GBE SKU Over */
+/* Constants used to intrepret the masked PCI-X bus speed. */
+
+#define SPEED_10    10
+#define SPEED_100   100
+#define SPEED_1000  1000
+#define SPEED_2500  2500
+#define HALF_DUPLEX 1
+#define FULL_DUPLEX 2
+
+
+#define ADVERTISE_10_HALF                 0x0001
+#define ADVERTISE_10_FULL                 0x0002
+#define ADVERTISE_100_HALF                0x0004
+#define ADVERTISE_100_FULL                0x0008
+#define ADVERTISE_1000_HALF               0x0010 /* Not used, just FYI */
+#define ADVERTISE_1000_FULL               0x0020
+
+/* 1000/H is not supported, nor spec-compliant. */
+#define E1000_ALL_SPEED_DUPLEX (ADVERTISE_10_HALF  |  ADVERTISE_10_FULL | \
+				ADVERTISE_100_HALF |  ADVERTISE_100_FULL | \
+						      ADVERTISE_1000_FULL)
+#define E1000_ALL_NOT_GIG      (ADVERTISE_10_HALF  |  ADVERTISE_10_FULL | \
+				ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_100_SPEED    (ADVERTISE_100_HALF |  ADVERTISE_100_FULL)
+#define E1000_ALL_10_SPEED     (ADVERTISE_10_HALF  |  ADVERTISE_10_FULL)
+#define E1000_ALL_FULL_DUPLEX  (ADVERTISE_10_FULL  |  ADVERTISE_100_FULL | \
+						      ADVERTISE_1000_FULL)
+#define E1000_ALL_HALF_DUPLEX  (ADVERTISE_10_HALF  |  ADVERTISE_100_HALF)
+
+#define AUTONEG_ADVERTISE_SPEED_DEFAULT   E1000_ALL_SPEED_DUPLEX
+
+/* LED Control */
+#define E1000_LEDCTL_LED0_MODE_SHIFT	0
+#define E1000_LEDCTL_LED0_BLINK		0x00000080
+#define E1000_LEDCTL_LED0_MODE_MASK	0x0000000F
+#define E1000_LEDCTL_LED0_IVRT		0x00000040
+
+#define E1000_LEDCTL_MODE_LED_ON        0xE
+#define E1000_LEDCTL_MODE_LED_OFF       0xF
+
+/* Transmit Descriptor bit definitions */
+#define E1000_TXD_POPTS_IXSM 0x01       /* Insert IP checksum */
+#define E1000_TXD_POPTS_TXSM 0x02       /* Insert TCP/UDP checksum */
+#define E1000_TXD_CMD_EOP    0x01000000 /* End of Packet */
+#define E1000_TXD_CMD_IFCS   0x02000000 /* Insert FCS (Ethernet CRC) */
+#define E1000_TXD_CMD_RS     0x08000000 /* Report Status */
+#define E1000_TXD_CMD_DEXT   0x20000000 /* Descriptor extension (0 = legacy) */
+#define E1000_TXD_STAT_DD    0x00000001 /* Descriptor Done */
+/* Extended desc bits for Linksec and timesync */
+
+/* Transmit Control */
+#define E1000_TCTL_EN     0x00000002    /* enable tx */
+#define E1000_TCTL_PSP    0x00000008    /* pad short packets */
+#define E1000_TCTL_CT     0x00000ff0    /* collision threshold */
+#define E1000_TCTL_COLD   0x003ff000    /* collision distance */
+#define E1000_TCTL_RTLC   0x01000000    /* Re-transmit on late collision */
+
+/* DMA Coalescing register fields */
+#define E1000_DMACR_DMACWT_MASK         0x00003FFF /* DMA Coal Watchdog Timer */
+#define E1000_DMACR_DMACTHR_MASK        0x00FF0000 /* DMA Coal Rx Threshold */
+#define E1000_DMACR_DMACTHR_SHIFT       16
+#define E1000_DMACR_DMAC_LX_MASK        0x30000000 /* Lx when no PCIe trans */
+#define E1000_DMACR_DMAC_LX_SHIFT       28
+#define E1000_DMACR_DMAC_EN             0x80000000 /* Enable DMA Coalescing */
+/* DMA Coalescing BMC-to-OS Watchdog Enable */
+#define E1000_DMACR_DC_BMC2OSW_EN	0x00008000
+
+#define E1000_DMCTXTH_DMCTTHR_MASK      0x00000FFF /* DMA Coal Tx Threshold */
+
+#define E1000_DMCTLX_TTLX_MASK          0x00000FFF /* Time to LX request */
+
+#define E1000_DMCRTRH_UTRESH_MASK       0x0007FFFF /* Rx Traffic Rate Thresh */
+#define E1000_DMCRTRH_LRPRCW            0x80000000 /* Rx pkt rate curr window */
+
+#define E1000_DMCCNT_CCOUNT_MASK        0x01FFFFFF /* DMA Coal Rx Current Cnt */
+
+#define E1000_FCRTC_RTH_COAL_MASK       0x0003FFF0 /* FC Rx Thresh High val */
+#define E1000_FCRTC_RTH_COAL_SHIFT      4
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power decision */
+
+/* Timestamp in Rx buffer */
+#define E1000_RXPBS_CFG_TS_EN           0x80000000
+
+#define I210_RXPBSIZE_DEFAULT		0x000000A2 /* RXPBSIZE default */
+#define I210_TXPBSIZE_DEFAULT		0x04000014 /* TXPBSIZE default */
+
+/* SerDes Control */
+#define E1000_SCTL_DISABLE_SERDES_LOOPBACK 0x0400
+
+/* Receive Checksum Control */
+#define E1000_RXCSUM_IPOFL     0x00000100   /* IPv4 checksum offload */
+#define E1000_RXCSUM_TUOFL     0x00000200   /* TCP / UDP checksum offload */
+#define E1000_RXCSUM_CRCOFL    0x00000800   /* CRC32 offload enable */
+#define E1000_RXCSUM_PCSD      0x00002000   /* packet checksum disabled */
+
+/* Header split receive */
+#define E1000_RFCTL_IPV6_EX_DIS         0x00010000
+#define E1000_RFCTL_LEF                 0x00040000
+
+/* Collision related configuration parameters */
+#define E1000_COLLISION_THRESHOLD       15
+#define E1000_CT_SHIFT                  4
+#define E1000_COLLISION_DISTANCE        63
+#define E1000_COLD_SHIFT                12
+
+/* Ethertype field values */
+#define ETHERNET_IEEE_VLAN_TYPE 0x8100  /* 802.3ac packet */
+
+#define MAX_JUMBO_FRAME_SIZE    0x3F00
+
+/* PBA constants */
+#define E1000_PBA_34K 0x0022
+#define E1000_PBA_64K 0x0040    /* 64KB */
+
+/* SW Semaphore Register */
+#define E1000_SWSM_SMBI         0x00000001 /* Driver Semaphore bit */
+#define E1000_SWSM_SWESMBI      0x00000002 /* FW Semaphore bit */
+
+/* Interrupt Cause Read */
+#define E1000_ICR_TXDW          0x00000001 /* Transmit desc written back */
+#define E1000_ICR_LSC           0x00000004 /* Link Status Change */
+#define E1000_ICR_RXSEQ         0x00000008 /* rx sequence error */
+#define E1000_ICR_RXDMT0        0x00000010 /* rx desc min. threshold (0) */
+#define E1000_ICR_RXT0          0x00000080 /* rx timer intr (ring 0) */
+#define E1000_ICR_VMMB          0x00000100 /* VM MB event */
+#define E1000_ICR_TS            0x00080000 /* Time Sync Interrupt */
+#define E1000_ICR_DRSTA         0x40000000 /* Device Reset Asserted */
+/* If this bit asserted, the driver should claim the interrupt */
+#define E1000_ICR_INT_ASSERTED  0x80000000
+/* LAN connected device generates an interrupt */
+#define E1000_ICR_DOUTSYNC      0x10000000 /* NIC DMA out of sync */
+
+/* Extended Interrupt Cause Read */
+#define E1000_EICR_RX_QUEUE0    0x00000001 /* Rx Queue 0 Interrupt */
+#define E1000_EICR_RX_QUEUE1    0x00000002 /* Rx Queue 1 Interrupt */
+#define E1000_EICR_RX_QUEUE2    0x00000004 /* Rx Queue 2 Interrupt */
+#define E1000_EICR_RX_QUEUE3    0x00000008 /* Rx Queue 3 Interrupt */
+#define E1000_EICR_TX_QUEUE0    0x00000100 /* Tx Queue 0 Interrupt */
+#define E1000_EICR_TX_QUEUE1    0x00000200 /* Tx Queue 1 Interrupt */
+#define E1000_EICR_TX_QUEUE2    0x00000400 /* Tx Queue 2 Interrupt */
+#define E1000_EICR_TX_QUEUE3    0x00000800 /* Tx Queue 3 Interrupt */
+#define E1000_EICR_OTHER        0x80000000 /* Interrupt Cause Active */
+/* TCP Timer */
+
+/* This defines the bits that are set in the Interrupt Mask
+ * Set/Read Register.  Each bit is documented below:
+ *   o RXT0   = Receiver Timer Interrupt (ring 0)
+ *   o TXDW   = Transmit Descriptor Written Back
+ *   o RXDMT0 = Receive Descriptor Minimum Threshold hit (ring 0)
+ *   o RXSEQ  = Receive Sequence Error
+ *   o LSC    = Link Status Change
+ */
+#define IMS_ENABLE_MASK ( \
+	E1000_IMS_RXT0   |    \
+	E1000_IMS_TXDW   |    \
+	E1000_IMS_RXDMT0 |    \
+	E1000_IMS_RXSEQ  |    \
+	E1000_IMS_LSC    |    \
+	E1000_IMS_DOUTSYNC)
+
+/* Interrupt Mask Set */
+#define E1000_IMS_TXDW      E1000_ICR_TXDW      /* Transmit desc written back */
+#define E1000_IMS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_IMS_VMMB      E1000_ICR_VMMB      /* Mail box activity */
+#define E1000_IMS_TS        E1000_ICR_TS        /* Time Sync Interrupt */
+#define E1000_IMS_RXSEQ     E1000_ICR_RXSEQ     /* rx sequence error */
+#define E1000_IMS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_IMS_RXT0      E1000_ICR_RXT0      /* rx timer intr */
+#define E1000_IMS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Asserted */
+#define E1000_IMS_DOUTSYNC  E1000_ICR_DOUTSYNC /* NIC DMA out of sync */
+
+/* Extended Interrupt Mask Set */
+#define E1000_EIMS_OTHER        E1000_EICR_OTHER   /* Interrupt Cause Active */
+
+/* Interrupt Cause Set */
+#define E1000_ICS_LSC       E1000_ICR_LSC       /* Link Status Change */
+#define E1000_ICS_RXDMT0    E1000_ICR_RXDMT0    /* rx desc min. threshold */
+#define E1000_ICS_DRSTA     E1000_ICR_DRSTA     /* Device Reset Aserted */
+
+/* Extended Interrupt Cause Set */
+/* E1000_EITR_CNT_IGNR is only for 82576 and newer */
+#define E1000_EITR_CNT_IGNR     0x80000000 /* Don't reset counters on write */
+
+
+/* Transmit Descriptor Control */
+/* Enable the counting of descriptors still to be processed. */
+
+/* Flow Control Constants */
+#define FLOW_CONTROL_ADDRESS_LOW  0x00C28001
+#define FLOW_CONTROL_ADDRESS_HIGH 0x00000100
+#define FLOW_CONTROL_TYPE         0x8808
+
+/* Transmit Config Word */
+#define E1000_TXCW_ASM_DIR	0x00000100 /* TXCW astm pause direction */
+#define E1000_TXCW_PAUSE	0x00000080 /* TXCW sym pause request */
+
+/* 802.1q VLAN Packet Size */
+#define VLAN_TAG_SIZE              4    /* 802.3ac tag (not DMA'd) */
+#define E1000_VLAN_FILTER_TBL_SIZE 128  /* VLAN Filter Table (4096 bits) */
+
+/* Receive Address */
+/* Number of high/low register pairs in the RAR. The RAR (Receive Address
+ * Registers) holds the directed and multicast addresses that we monitor.
+ * Technically, we have 16 spots.  However, we reserve one of these spots
+ * (RAR[15]) for our directed address used by controllers with
+ * manageability enabled, allowing us room for 15 multicast addresses.
+ */
+#define E1000_RAH_AV  0x80000000        /* Receive descriptor valid */
+#define E1000_RAL_MAC_ADDR_LEN 4
+#define E1000_RAH_MAC_ADDR_LEN 2
+#define E1000_RAH_POOL_MASK 0x03FC0000
+#define E1000_RAH_POOL_1 0x00040000
+
+/* Error Codes */
+#define E1000_ERR_NVM      1
+#define E1000_ERR_PHY      2
+#define E1000_ERR_CONFIG   3
+#define E1000_ERR_PARAM    4
+#define E1000_ERR_MAC_INIT 5
+#define E1000_ERR_RESET   9
+#define E1000_ERR_MASTER_REQUESTS_PENDING 10
+#define E1000_BLK_PHY_RESET   12
+#define E1000_ERR_SWFW_SYNC 13
+#define E1000_NOT_IMPLEMENTED 14
+#define E1000_ERR_MBX      15
+#define E1000_ERR_INVALID_ARGUMENT  16
+#define E1000_ERR_NO_SPACE          17
+#define E1000_ERR_NVM_PBA_SECTION   18
+#define E1000_ERR_INVM_VALUE_NOT_FOUND	19
+#define E1000_ERR_I2C               20
+
+/* Loop limit on how long we wait for auto-negotiation to complete */
+#define COPPER_LINK_UP_LIMIT              10
+#define PHY_AUTO_NEG_LIMIT                45
+#define PHY_FORCE_LIMIT                   20
+/* Number of 100 microseconds we wait for PCI Express master disable */
+#define MASTER_DISABLE_TIMEOUT      800
+/* Number of milliseconds we wait for PHY configuration done after MAC reset */
+#define PHY_CFG_TIMEOUT             100
+/* Number of 2 milliseconds we wait for acquiring MDIO ownership. */
+/* Number of milliseconds for NVM auto read done after MAC reset. */
+#define AUTO_READ_DONE_TIMEOUT      10
+
+/* Flow Control */
+#define E1000_FCRTL_XONE 0x80000000     /* Enable XON frame transmission */
+
+#define E1000_TSYNCTXCTL_VALID    0x00000001 /* tx timestamp valid */
+#define E1000_TSYNCTXCTL_ENABLED  0x00000010 /* enable tx timestampping */
+
+#define E1000_TSYNCRXCTL_VALID      0x00000001 /* rx timestamp valid */
+#define E1000_TSYNCRXCTL_TYPE_MASK  0x0000000E /* rx type mask */
+#define E1000_TSYNCRXCTL_TYPE_L2_V2       0x00
+#define E1000_TSYNCRXCTL_TYPE_L4_V1       0x02
+#define E1000_TSYNCRXCTL_TYPE_L2_L4_V2    0x04
+#define E1000_TSYNCRXCTL_TYPE_ALL         0x08
+#define E1000_TSYNCRXCTL_TYPE_EVENT_V2    0x0A
+#define E1000_TSYNCRXCTL_ENABLED    0x00000010 /* enable rx timestampping */
+
+#define E1000_TSYNCRXCFG_PTP_V1_CTRLT_MASK   0x000000FF
+#define E1000_TSYNCRXCFG_PTP_V1_SYNC_MESSAGE       0x00
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_REQ_MESSAGE  0x01
+#define E1000_TSYNCRXCFG_PTP_V1_FOLLOWUP_MESSAGE   0x02
+#define E1000_TSYNCRXCFG_PTP_V1_DELAY_RESP_MESSAGE 0x03
+#define E1000_TSYNCRXCFG_PTP_V1_MANAGEMENT_MESSAGE 0x04
+
+#define E1000_TSYNCRXCFG_PTP_V2_MSGID_MASK               0x00000F00
+#define E1000_TSYNCRXCFG_PTP_V2_SYNC_MESSAGE                 0x0000
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_REQ_MESSAGE            0x0100
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_REQ_MESSAGE       0x0200
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_RESP_MESSAGE      0x0300
+#define E1000_TSYNCRXCFG_PTP_V2_FOLLOWUP_MESSAGE             0x0800
+#define E1000_TSYNCRXCFG_PTP_V2_DELAY_RESP_MESSAGE           0x0900
+#define E1000_TSYNCRXCFG_PTP_V2_PATH_DELAY_FOLLOWUP_MESSAGE  0x0A00
+#define E1000_TSYNCRXCFG_PTP_V2_ANNOUNCE_MESSAGE             0x0B00
+#define E1000_TSYNCRXCFG_PTP_V2_SIGNALLING_MESSAGE           0x0C00
+#define E1000_TSYNCRXCFG_PTP_V2_MANAGEMENT_MESSAGE           0x0D00
+
+#define E1000_TIMINCA_16NS_SHIFT 24
+
+/* Time Sync Interrupt Cause/Mask Register Bits */
+
+#define TSINTR_SYS_WRAP  (1 << 0) /* SYSTIM Wrap around. */
+#define TSINTR_TXTS      (1 << 1) /* Transmit Timestamp. */
+#define TSINTR_RXTS      (1 << 2) /* Receive Timestamp. */
+#define TSINTR_TT0       (1 << 3) /* Target Time 0 Trigger. */
+#define TSINTR_TT1       (1 << 4) /* Target Time 1 Trigger. */
+#define TSINTR_AUTT0     (1 << 5) /* Auxiliary Timestamp 0 Taken. */
+#define TSINTR_AUTT1     (1 << 6) /* Auxiliary Timestamp 1 Taken. */
+#define TSINTR_TADJ      (1 << 7) /* Time Adjust Done. */
+
+#define TSYNC_INTERRUPTS TSINTR_TXTS
+#define E1000_TSICR_TXTS TSINTR_TXTS
+
+/* TSAUXC Configuration Bits */
+#define TSAUXC_EN_TT0    (1 << 0)  /* Enable target time 0. */
+#define TSAUXC_EN_TT1    (1 << 1)  /* Enable target time 1. */
+#define TSAUXC_EN_CLK0   (1 << 2)  /* Enable Configurable Frequency Clock 0. */
+#define TSAUXC_SAMP_AUT0 (1 << 3)  /* Latch SYSTIML/H into AUXSTMPL/0. */
+#define TSAUXC_ST0       (1 << 4)  /* Start Clock 0 Toggle on Target Time 0. */
+#define TSAUXC_EN_CLK1   (1 << 5)  /* Enable Configurable Frequency Clock 1. */
+#define TSAUXC_SAMP_AUT1 (1 << 6)  /* Latch SYSTIML/H into AUXSTMPL/1. */
+#define TSAUXC_ST1       (1 << 7)  /* Start Clock 1 Toggle on Target Time 1. */
+#define TSAUXC_EN_TS0    (1 << 8)  /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT0     (1 << 9)  /* Auxiliary Timestamp Taken. */
+#define TSAUXC_EN_TS1    (1 << 10) /* Enable hardware timestamp 0. */
+#define TSAUXC_AUTT1     (1 << 11) /* Auxiliary Timestamp Taken. */
+#define TSAUXC_PLSG      (1 << 17) /* Generate a pulse. */
+#define TSAUXC_DISABLE   (1 << 31) /* Disable SYSTIM Count Operation. */
+
+/* SDP Configuration Bits */
+#define AUX0_SEL_SDP0    (0 << 0)  /* Assign SDP0 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP1    (1 << 0)  /* Assign SDP1 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP2    (2 << 0)  /* Assign SDP2 to auxiliary time stamp 0. */
+#define AUX0_SEL_SDP3    (3 << 0)  /* Assign SDP3 to auxiliary time stamp 0. */
+#define AUX0_TS_SDP_EN   (1 << 2)  /* Enable auxiliary time stamp trigger 0. */
+#define AUX1_SEL_SDP0    (0 << 3)  /* Assign SDP0 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP1    (1 << 3)  /* Assign SDP1 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP2    (2 << 3)  /* Assign SDP2 to auxiliary time stamp 1. */
+#define AUX1_SEL_SDP3    (3 << 3)  /* Assign SDP3 to auxiliary time stamp 1. */
+#define AUX1_TS_SDP_EN   (1 << 5)  /* Enable auxiliary time stamp trigger 1. */
+#define TS_SDP0_SEL_TT0  (0 << 6)  /* Target time 0 is output on SDP0. */
+#define TS_SDP0_SEL_TT1  (1 << 6)  /* Target time 1 is output on SDP0. */
+#define TS_SDP0_SEL_FC0  (2 << 6)  /* Freq clock  0 is output on SDP0. */
+#define TS_SDP0_SEL_FC1  (3 << 6)  /* Freq clock  1 is output on SDP0. */
+#define TS_SDP0_EN       (1 << 8)  /* SDP0 is assigned to Tsync. */
+#define TS_SDP1_SEL_TT0  (0 << 9)  /* Target time 0 is output on SDP1. */
+#define TS_SDP1_SEL_TT1  (1 << 9)  /* Target time 1 is output on SDP1. */
+#define TS_SDP1_SEL_FC0  (2 << 9)  /* Freq clock  0 is output on SDP1. */
+#define TS_SDP1_SEL_FC1  (3 << 9)  /* Freq clock  1 is output on SDP1. */
+#define TS_SDP1_EN       (1 << 11) /* SDP1 is assigned to Tsync. */
+#define TS_SDP2_SEL_TT0  (0 << 12) /* Target time 0 is output on SDP2. */
+#define TS_SDP2_SEL_TT1  (1 << 12) /* Target time 1 is output on SDP2. */
+#define TS_SDP2_SEL_FC0  (2 << 12) /* Freq clock  0 is output on SDP2. */
+#define TS_SDP2_SEL_FC1  (3 << 12) /* Freq clock  1 is output on SDP2. */
+#define TS_SDP2_EN       (1 << 14) /* SDP2 is assigned to Tsync. */
+#define TS_SDP3_SEL_TT0  (0 << 15) /* Target time 0 is output on SDP3. */
+#define TS_SDP3_SEL_TT1  (1 << 15) /* Target time 1 is output on SDP3. */
+#define TS_SDP3_SEL_FC0  (2 << 15) /* Freq clock  0 is output on SDP3. */
+#define TS_SDP3_SEL_FC1  (3 << 15) /* Freq clock  1 is output on SDP3. */
+#define TS_SDP3_EN       (1 << 17) /* SDP3 is assigned to Tsync. */
+
+#define E1000_MDICNFG_EXT_MDIO    0x80000000      /* MDI ext/int destination */
+#define E1000_MDICNFG_COM_MDIO    0x40000000      /* MDI shared w/ lan 0 */
+#define E1000_MDICNFG_PHY_MASK    0x03E00000
+#define E1000_MDICNFG_PHY_SHIFT   21
+
+#define E1000_MEDIA_PORT_COPPER			1
+#define E1000_MEDIA_PORT_OTHER			2
+#define E1000_M88E1112_AUTO_COPPER_SGMII	0x2
+#define E1000_M88E1112_AUTO_COPPER_BASEX	0x3
+#define E1000_M88E1112_STATUS_LINK		0x0004 /* Interface Link Bit */
+#define E1000_M88E1112_MAC_CTRL_1		0x10
+#define E1000_M88E1112_MAC_CTRL_1_MODE_MASK	0x0380 /* Mode Select */
+#define E1000_M88E1112_MAC_CTRL_1_MODE_SHIFT	7
+#define E1000_M88E1112_PAGE_ADDR		0x16
+#define E1000_M88E1112_STATUS			0x01
+
+/* PCI Express Control */
+#define E1000_GCR_CMPL_TMOUT_MASK       0x0000F000
+#define E1000_GCR_CMPL_TMOUT_10ms       0x00001000
+#define E1000_GCR_CMPL_TMOUT_RESEND     0x00010000
+#define E1000_GCR_CAP_VER2              0x00040000
+
+/* mPHY Address Control and Data Registers */
+#define E1000_MPHY_ADDR_CTL          0x0024 /* mPHY Address Control Register */
+#define E1000_MPHY_ADDR_CTL_OFFSET_MASK 0xFFFF0000
+#define E1000_MPHY_DATA                 0x0E10 /* mPHY Data Register */
+
+/* mPHY PCS CLK Register */
+#define E1000_MPHY_PCS_CLK_REG_OFFSET  0x0004 /* mPHY PCS CLK AFE CSR Offset */
+/* mPHY Near End Digital Loopback Override Bit */
+#define E1000_MPHY_PCS_CLK_REG_DIGINELBEN 0x10
+
+#define E1000_PCS_LCTL_FORCE_FCTRL	0x80
+#define E1000_PCS_LSTS_AN_COMPLETE	0x10000
+
+/* PHY Control Register */
+#define MII_CR_FULL_DUPLEX      0x0100  /* FDX =1, half duplex =0 */
+#define MII_CR_RESTART_AUTO_NEG 0x0200  /* Restart auto negotiation */
+#define MII_CR_POWER_DOWN       0x0800  /* Power down */
+#define MII_CR_AUTO_NEG_EN      0x1000  /* Auto Neg Enable */
+#define MII_CR_LOOPBACK         0x4000  /* 0 = normal, 1 = loopback */
+#define MII_CR_RESET            0x8000  /* 0 = normal, 1 = PHY reset */
+#define MII_CR_SPEED_1000       0x0040
+#define MII_CR_SPEED_100        0x2000
+#define MII_CR_SPEED_10         0x0000
+
+/* PHY Status Register */
+#define MII_SR_LINK_STATUS       0x0004 /* Link Status 1 = link */
+#define MII_SR_AUTONEG_COMPLETE  0x0020 /* Auto Neg Complete */
+
+/* Autoneg Advertisement Register */
+#define NWAY_AR_10T_HD_CAPS      0x0020   /* 10T   Half Duplex Capable */
+#define NWAY_AR_10T_FD_CAPS      0x0040   /* 10T   Full Duplex Capable */
+#define NWAY_AR_100TX_HD_CAPS    0x0080   /* 100TX Half Duplex Capable */
+#define NWAY_AR_100TX_FD_CAPS    0x0100   /* 100TX Full Duplex Capable */
+#define NWAY_AR_PAUSE            0x0400   /* Pause operation desired */
+#define NWAY_AR_ASM_DIR          0x0800   /* Asymmetric Pause Direction bit */
+
+/* Link Partner Ability Register (Base Page) */
+#define NWAY_LPAR_PAUSE          0x0400 /* LP Pause operation desired */
+#define NWAY_LPAR_ASM_DIR        0x0800 /* LP Asymmetric Pause Direction bit */
+
+/* Autoneg Expansion Register */
+
+/* 1000BASE-T Control Register */
+#define CR_1000T_HD_CAPS         0x0100 /* Advertise 1000T HD capability */
+#define CR_1000T_FD_CAPS         0x0200 /* Advertise 1000T FD capability  */
+#define CR_1000T_MS_VALUE        0x0800 /* 1=Configure PHY as Master */
+					/* 0=Configure PHY as Slave */
+#define CR_1000T_MS_ENABLE       0x1000 /* 1=Master/Slave manual config value */
+					/* 0=Automatic Master/Slave config */
+
+/* 1000BASE-T Status Register */
+#define SR_1000T_REMOTE_RX_STATUS 0x1000 /* Remote receiver OK */
+#define SR_1000T_LOCAL_RX_STATUS  0x2000 /* Local receiver OK */
+
+
+/* PHY 1000 MII Register/Bit Definitions */
+/* PHY Registers defined by IEEE */
+#define PHY_CONTROL      0x00 /* Control Register */
+#define PHY_STATUS       0x01 /* Status Register */
+#define PHY_ID1          0x02 /* Phy Id Reg (word 1) */
+#define PHY_ID2          0x03 /* Phy Id Reg (word 2) */
+#define PHY_AUTONEG_ADV  0x04 /* Autoneg Advertisement */
+#define PHY_LP_ABILITY   0x05 /* Link Partner Ability (Base Page) */
+#define PHY_1000T_CTRL   0x09 /* 1000Base-T Control Reg */
+#define PHY_1000T_STATUS 0x0A /* 1000Base-T Status Reg */
+
+/* NVM Control */
+#define E1000_EECD_SK        0x00000001 /* NVM Clock */
+#define E1000_EECD_CS        0x00000002 /* NVM Chip Select */
+#define E1000_EECD_DI        0x00000004 /* NVM Data In */
+#define E1000_EECD_DO        0x00000008 /* NVM Data Out */
+#define E1000_EECD_REQ       0x00000040 /* NVM Access Request */
+#define E1000_EECD_GNT       0x00000080 /* NVM Access Grant */
+#define E1000_EECD_PRES      0x00000100 /* NVM Present */
+/* NVM Addressing bits based on type 0=small, 1=large */
+#define E1000_EECD_ADDR_BITS 0x00000400
+#define E1000_NVM_GRANT_ATTEMPTS   1000 /* NVM # attempts to gain grant */
+#define E1000_EECD_AUTO_RD          0x00000200  /* NVM Auto Read done */
+#define E1000_EECD_SIZE_EX_MASK     0x00007800  /* NVM Size */
+#define E1000_EECD_SIZE_EX_SHIFT     11
+#define E1000_EECD_FLUPD_I210		0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210		0x04000000 /* Update FLASH done*/
+#define E1000_EECD_FLASH_DETECTED_I210	0x00080000 /* FLASH detected */
+#define E1000_FLUDONE_ATTEMPTS		20000
+#define E1000_EERD_EEWR_MAX_COUNT	512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX		0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i)	(0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY	E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX	0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX	0x01
+#define E1000_I210_FLASH_SECTOR_SIZE	0x1000 /* 4KB FLASH sector unit size */
+/* Secure FLASH mode requires removing MSb */
+#define E1000_I210_FW_PTR_MASK		0x7FFF
+/* Firmware code revision field word offset*/
+#define E1000_I210_FW_VER_OFFSET	328
+#define E1000_EECD_FLUPD_I210		0x00800000 /* Update FLASH */
+#define E1000_EECD_FLUDONE_I210		0x04000000 /* Update FLASH done*/
+#define E1000_FLUDONE_ATTEMPTS		20000
+#define E1000_EERD_EEWR_MAX_COUNT	512 /* buffered EEPROM words rw */
+#define E1000_I210_FIFO_SEL_RX		0x00
+#define E1000_I210_FIFO_SEL_TX_QAV(_i)	(0x02 + (_i))
+#define E1000_I210_FIFO_SEL_TX_LEGACY	E1000_I210_FIFO_SEL_TX_QAV(0)
+#define E1000_I210_FIFO_SEL_BMC2OS_TX	0x06
+#define E1000_I210_FIFO_SEL_BMC2OS_RX	0x01
+
+
+/* Offset to data in NVM read/write registers */
+#define E1000_NVM_RW_REG_DATA   16
+#define E1000_NVM_RW_REG_DONE   2    /* Offset to READ/WRITE done bit */
+#define E1000_NVM_RW_REG_START  1    /* Start operation */
+#define E1000_NVM_RW_ADDR_SHIFT 2    /* Shift to the address bits */
+#define E1000_NVM_POLL_READ     0    /* Flag for polling for read complete */
+
+/* NVM Word Offsets */
+#define NVM_COMPAT                 0x0003
+#define NVM_ID_LED_SETTINGS        0x0004 /* SERDES output amplitude */
+#define NVM_VERSION                0x0005
+#define NVM_INIT_CONTROL2_REG      0x000F
+#define NVM_INIT_CONTROL3_PORT_B   0x0014
+#define NVM_INIT_CONTROL3_PORT_A   0x0024
+#define NVM_ALT_MAC_ADDR_PTR       0x0037
+#define NVM_CHECKSUM_REG           0x003F
+#define NVM_COMPATIBILITY_REG_3    0x0003
+#define NVM_COMPATIBILITY_BIT_MASK 0x8000
+#define NVM_MAC_ADDR               0x0000
+#define NVM_SUB_DEV_ID             0x000B
+#define NVM_SUB_VEN_ID             0x000C
+#define NVM_DEV_ID                 0x000D
+#define NVM_VEN_ID                 0x000E
+#define NVM_INIT_CTRL_2            0x000F
+#define NVM_INIT_CTRL_4            0x0013
+#define NVM_LED_1_CFG              0x001C
+#define NVM_LED_0_2_CFG            0x001F
+#define NVM_ETRACK_WORD            0x0042
+#define NVM_ETRACK_HIWORD          0x0043
+#define NVM_COMB_VER_OFF           0x0083
+#define NVM_COMB_VER_PTR           0x003d
+
+/* NVM version defines */
+#define NVM_MAJOR_MASK			0xF000
+#define NVM_MINOR_MASK			0x0FF0
+#define NVM_IMAGE_ID_MASK		0x000F
+#define NVM_COMB_VER_MASK		0x00FF
+#define NVM_MAJOR_SHIFT			12
+#define NVM_MINOR_SHIFT			4
+#define NVM_COMB_VER_SHFT		8
+#define NVM_VER_INVALID			0xFFFF
+#define NVM_ETRACK_SHIFT		16
+#define NVM_ETRACK_VALID		0x8000
+#define NVM_NEW_DEC_MASK		0x0F00
+#define NVM_HEX_CONV			16
+#define NVM_HEX_TENS			10
+
+#define NVM_ETS_CFG			0x003E
+#define NVM_ETS_LTHRES_DELTA_MASK	0x07C0
+#define NVM_ETS_LTHRES_DELTA_SHIFT	6
+#define NVM_ETS_TYPE_MASK		0x0038
+#define NVM_ETS_TYPE_SHIFT		3
+#define NVM_ETS_TYPE_EMC		0x000
+#define NVM_ETS_NUM_SENSORS_MASK	0x0007
+#define NVM_ETS_DATA_LOC_MASK		0x3C00
+#define NVM_ETS_DATA_LOC_SHIFT		10
+#define NVM_ETS_DATA_INDEX_MASK		0x0300
+#define NVM_ETS_DATA_INDEX_SHIFT	8
+#define NVM_ETS_DATA_HTHRESH_MASK	0x00FF
+
+#define E1000_NVM_CFG_DONE_PORT_0  0x040000 /* MNG config cycle done */
+#define E1000_NVM_CFG_DONE_PORT_1  0x080000 /* ...for second port */
+#define E1000_NVM_CFG_DONE_PORT_2  0x100000 /* ...for third port */
+#define E1000_NVM_CFG_DONE_PORT_3  0x200000 /* ...for fourth port */
+
+#define NVM_82580_LAN_FUNC_OFFSET(a) (a ? (0x40 + (0x40 * a)) : 0)
+
+/* Mask bits for fields in Word 0x24 of the NVM */
+#define NVM_WORD24_COM_MDIO         0x0008 /* MDIO interface shared */
+#define NVM_WORD24_EXT_MDIO         0x0004 /* MDIO accesses routed external */
+
+/* Mask bits for fields in Word 0x0f of the NVM */
+#define NVM_WORD0F_PAUSE_MASK       0x3000
+#define NVM_WORD0F_ASM_DIR          0x2000
+
+/* Mask bits for fields in Word 0x1a of the NVM */
+
+/* length of string needed to store part num */
+#define E1000_PBANUM_LENGTH         11
+
+/* For checksumming, the sum of all words in the NVM should equal 0xBABA. */
+#define NVM_SUM                    0xBABA
+
+#define NVM_PBA_OFFSET_0           8
+#define NVM_PBA_OFFSET_1           9
+#define NVM_RESERVED_WORD		0xFFFF
+#define NVM_PBA_PTR_GUARD          0xFAFA
+#define NVM_WORD_SIZE_BASE_SHIFT   6
+
+/* NVM Commands - Microwire */
+
+/* NVM Commands - SPI */
+#define NVM_MAX_RETRY_SPI          5000 /* Max wait of 5ms, for RDY signal */
+#define NVM_WRITE_OPCODE_SPI       0x02 /* NVM write opcode */
+#define NVM_READ_OPCODE_SPI        0x03 /* NVM read opcode */
+#define NVM_A8_OPCODE_SPI          0x08 /* opcode bit-3 = address bit-8 */
+#define NVM_WREN_OPCODE_SPI        0x06 /* NVM set Write Enable latch */
+#define NVM_RDSR_OPCODE_SPI        0x05 /* NVM read Status register */
+
+/* SPI NVM Status Register */
+#define NVM_STATUS_RDY_SPI         0x01
+
+/* Word definitions for ID LED Settings */
+#define ID_LED_RESERVED_0000 0x0000
+#define ID_LED_RESERVED_FFFF 0xFFFF
+#define ID_LED_DEFAULT       ((ID_LED_OFF1_ON2  << 12) | \
+			      (ID_LED_OFF1_OFF2 <<  8) | \
+			      (ID_LED_DEF1_DEF2 <<  4) | \
+			      (ID_LED_DEF1_DEF2))
+#define ID_LED_DEF1_DEF2     0x1
+#define ID_LED_DEF1_ON2      0x2
+#define ID_LED_DEF1_OFF2     0x3
+#define ID_LED_ON1_DEF2      0x4
+#define ID_LED_ON1_ON2       0x5
+#define ID_LED_ON1_OFF2      0x6
+#define ID_LED_OFF1_DEF2     0x7
+#define ID_LED_OFF1_ON2      0x8
+#define ID_LED_OFF1_OFF2     0x9
+
+#define IGP_ACTIVITY_LED_MASK   0xFFFFF0FF
+#define IGP_ACTIVITY_LED_ENABLE 0x0300
+#define IGP_LED3_MODE           0x07000000
+
+/* PCI/PCI-X/PCI-EX Config space */
+#define PCIE_DEVICE_CONTROL2         0x28
+#define PCIE_DEVICE_CONTROL2_16ms    0x0005
+
+#define PHY_REVISION_MASK      0xFFFFFFF0
+#define MAX_PHY_REG_ADDRESS    0x1F  /* 5 bit address bus (0-0x1F) */
+#define MAX_PHY_MULTI_PAGE_REG 0xF
+
+/* Bit definitions for valid PHY IDs. */
+/* I = Integrated
+ * E = External
+ */
+#define M88E1111_I_PHY_ID    0x01410CC0
+#define M88E1112_E_PHY_ID    0x01410C90
+#define I347AT4_E_PHY_ID     0x01410DC0
+#define IGP03E1000_E_PHY_ID  0x02A80390
+#define I82580_I_PHY_ID      0x015403A0
+#define I350_I_PHY_ID        0x015403B0
+#define M88_VENDOR           0x0141
+#define I210_I_PHY_ID        0x01410C00
+#define M88E1543_E_PHY_ID    0x01410EA0
+
+/* M88E1000 Specific Registers */
+#define M88E1000_PHY_SPEC_CTRL     0x10  /* PHY Specific Control Register */
+#define M88E1000_PHY_SPEC_STATUS   0x11  /* PHY Specific Status Register */
+#define M88E1000_EXT_PHY_SPEC_CTRL 0x14  /* Extended PHY Specific Control */
+
+#define M88E1000_PHY_PAGE_SELECT   0x1D  /* Reg 29 for page number setting */
+#define M88E1000_PHY_GEN_CONTROL   0x1E  /* Its meaning depends on reg 29 */
+
+/* M88E1000 PHY Specific Control Register */
+#define M88E1000_PSCR_POLARITY_REVERSAL 0x0002 /* 1=Polarity Reversal enabled */
+/* 1=CLK125 low, 0=CLK125 toggling */
+#define M88E1000_PSCR_MDI_MANUAL_MODE  0x0000  /* MDI Crossover Mode bits 6:5 */
+					       /* Manual MDI configuration */
+#define M88E1000_PSCR_MDIX_MANUAL_MODE 0x0020  /* Manual MDIX configuration */
+/* 1000BASE-T: Auto crossover, 100BASE-TX/10BASE-T: MDI Mode */
+#define M88E1000_PSCR_AUTO_X_1000T     0x0040
+/* Auto crossover enabled all speeds */
+#define M88E1000_PSCR_AUTO_X_MODE      0x0060
+/* 1=Enable Extended 10BASE-T distance (Lower 10BASE-T Rx Threshold
+ * 0=Normal 10BASE-T Rx Threshold
+ */
+/* 1=5-bit interface in 100BASE-TX, 0=MII interface in 100BASE-TX */
+#define M88E1000_PSCR_ASSERT_CRS_ON_TX     0x0800 /* 1=Assert CRS on Transmit */
+
+/* M88E1000 PHY Specific Status Register */
+#define M88E1000_PSSR_REV_POLARITY       0x0002 /* 1=Polarity reversed */
+#define M88E1000_PSSR_DOWNSHIFT          0x0020 /* 1=Downshifted */
+#define M88E1000_PSSR_MDIX               0x0040 /* 1=MDIX; 0=MDI */
+/* 0 = <50M
+ * 1 = 50-80M
+ * 2 = 80-110M
+ * 3 = 110-140M
+ * 4 = >140M
+ */
+#define M88E1000_PSSR_CABLE_LENGTH       0x0380
+#define M88E1000_PSSR_SPEED              0xC000 /* Speed, bits 14:15 */
+#define M88E1000_PSSR_1000MBS            0x8000 /* 10=1000Mbs */
+
+#define M88E1000_PSSR_CABLE_LENGTH_SHIFT 7
+
+/* M88E1000 Extended PHY Specific Control Register */
+/* 1 = Lost lock detect enabled.
+ * Will assert lost lock and bring
+ * link down if idle not seen
+ * within 1ms in 1000BASE-T
+ */
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the master
+ */
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK 0x0C00
+#define M88E1000_EPSCR_MASTER_DOWNSHIFT_1X   0x0000
+/* Number of times we will attempt to autonegotiate before downshifting if we
+ * are the slave
+ */
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK  0x0300
+#define M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X    0x0100
+#define M88E1000_EPSCR_TX_CLK_25      0x0070 /* 25  MHz TX_CLK */
+
+/* Intel i347-AT4 Registers */
+
+#define I347AT4_PCDL                   0x10 /* PHY Cable Diagnostics Length */
+#define I347AT4_PCDC                   0x15 /* PHY Cable Diagnostics Control */
+#define I347AT4_PAGE_SELECT            0x16
+
+/* i347-AT4 Extended PHY Specific Control Register */
+
+/*  Number of times we will attempt to autonegotiate before downshifting if we
+ *  are the master
+ */
+#define I347AT4_PSCR_DOWNSHIFT_ENABLE 0x0800
+#define I347AT4_PSCR_DOWNSHIFT_MASK   0x7000
+#define I347AT4_PSCR_DOWNSHIFT_1X     0x0000
+#define I347AT4_PSCR_DOWNSHIFT_2X     0x1000
+#define I347AT4_PSCR_DOWNSHIFT_3X     0x2000
+#define I347AT4_PSCR_DOWNSHIFT_4X     0x3000
+#define I347AT4_PSCR_DOWNSHIFT_5X     0x4000
+#define I347AT4_PSCR_DOWNSHIFT_6X     0x5000
+#define I347AT4_PSCR_DOWNSHIFT_7X     0x6000
+#define I347AT4_PSCR_DOWNSHIFT_8X     0x7000
+
+/* i347-AT4 PHY Cable Diagnostics Control */
+#define I347AT4_PCDC_CABLE_LENGTH_UNIT 0x0400 /* 0=cm 1=meters */
+
+/* Marvell 1112 only registers */
+#define M88E1112_VCT_DSP_DISTANCE       0x001A
+
+/* M88EC018 Rev 2 specific DownShift settings */
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK  0x0E00
+#define M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X    0x0800
+
+/* MDI Control */
+#define E1000_MDIC_DATA_MASK 0x0000FFFF
+#define E1000_MDIC_REG_MASK  0x001F0000
+#define E1000_MDIC_REG_SHIFT 16
+#define E1000_MDIC_PHY_MASK  0x03E00000
+#define E1000_MDIC_PHY_SHIFT 21
+#define E1000_MDIC_OP_WRITE  0x04000000
+#define E1000_MDIC_OP_READ   0x08000000
+#define E1000_MDIC_READY     0x10000000
+#define E1000_MDIC_INT_EN    0x20000000
+#define E1000_MDIC_ERROR     0x40000000
+#define E1000_MDIC_DEST      0x80000000
+
+/* Thermal Sensor */
+#define E1000_THSTAT_PWR_DOWN       0x00000001 /* Power Down Event */
+#define E1000_THSTAT_LINK_THROTTLE  0x00000002 /* Link Speed Throttle Event */
+
+/* Energy Efficient Ethernet */
+#define E1000_IPCNFG_EEE_1G_AN       0x00000008  /* EEE Enable 1G AN */
+#define E1000_IPCNFG_EEE_100M_AN     0x00000004  /* EEE Enable 100M AN */
+#define E1000_EEER_TX_LPI_EN         0x00010000  /* EEE Tx LPI Enable */
+#define E1000_EEER_RX_LPI_EN         0x00020000  /* EEE Rx LPI Enable */
+#define E1000_EEER_FRC_AN            0x10000000  /* Enable EEE in loopback */
+#define E1000_EEER_LPI_FC            0x00040000  /* EEE Enable on FC */
+#define E1000_EEE_SU_LPI_CLK_STP     0X00800000  /* EEE LPI Clock Stop */
+#define E1000_EEER_EEE_NEG           0x20000000  /* EEE capability nego */
+#define E1000_EEE_LP_ADV_ADDR_I350   0x040F      /* EEE LP Advertisement */
+#define E1000_EEE_LP_ADV_DEV_I210    7           /* EEE LP Adv Device */
+#define E1000_EEE_LP_ADV_ADDR_I210   61          /* EEE LP Adv Register */
+#define E1000_MMDAC_FUNC_DATA        0x4000      /* Data, no post increment */
+#define E1000_M88E1543_PAGE_ADDR	0x16       /* Page Offset Register */
+#define E1000_M88E1543_EEE_CTRL_1	0x0
+#define E1000_M88E1543_EEE_CTRL_1_MS	0x0001     /* EEE Master/Slave */
+#define E1000_EEE_ADV_DEV_I354		7
+#define E1000_EEE_ADV_ADDR_I354		60
+#define E1000_EEE_ADV_100_SUPPORTED	(1 << 1)   /* 100BaseTx EEE Supported */
+#define E1000_EEE_ADV_1000_SUPPORTED	(1 << 2)   /* 1000BaseT EEE Supported */
+#define E1000_PCS_STATUS_DEV_I354	3
+#define E1000_PCS_STATUS_ADDR_I354	1
+#define E1000_PCS_STATUS_TX_LPI_IND	0x0200     /* Tx in LPI state */
+#define E1000_PCS_STATUS_RX_LPI_RCVD	0x0400
+#define E1000_PCS_STATUS_TX_LPI_RCVD	0x0800
+
+/* SerDes Control */
+#define E1000_GEN_CTL_READY             0x80000000
+#define E1000_GEN_CTL_ADDRESS_SHIFT     8
+#define E1000_GEN_POLL_TIMEOUT          640
+
+#define E1000_VFTA_ENTRY_SHIFT               5
+#define E1000_VFTA_ENTRY_MASK                0x7F
+#define E1000_VFTA_ENTRY_BIT_SHIFT_MASK      0x1F
+
+/* DMA Coalescing register fields */
+#define E1000_PCIEMISC_LX_DECISION      0x00000080 /* Lx power on DMA coal */
+
+/* Tx Rate-Scheduler Config fields */
+#define E1000_RTTBCNRC_RS_ENA		0x80000000
+#define E1000_RTTBCNRC_RF_DEC_MASK	0x00003FFF
+#define E1000_RTTBCNRC_RF_INT_SHIFT	14
+#define E1000_RTTBCNRC_RF_INT_MASK	\
+	(E1000_RTTBCNRC_RF_DEC_MASK << E1000_RTTBCNRC_RF_INT_SHIFT)
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h
new file mode 100644
index 0000000..7bb117d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_hw.h
@@ -0,0 +1,570 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_HW_H_
+#define _E1000_HW_H_
+
+#include <linux/types.h>
+#include <linux/delay.h>
+#include <linux/io.h>
+#include <linux/netdevice.h>
+#include <rtnet_port.h>
+
+#include "e1000_regs.h"
+#include "e1000_defines.h"
+
+struct e1000_hw;
+
+#define E1000_DEV_ID_82576			0x10C9
+#define E1000_DEV_ID_82576_FIBER		0x10E6
+#define E1000_DEV_ID_82576_SERDES		0x10E7
+#define E1000_DEV_ID_82576_QUAD_COPPER		0x10E8
+#define E1000_DEV_ID_82576_QUAD_COPPER_ET2	0x1526
+#define E1000_DEV_ID_82576_NS			0x150A
+#define E1000_DEV_ID_82576_NS_SERDES		0x1518
+#define E1000_DEV_ID_82576_SERDES_QUAD		0x150D
+#define E1000_DEV_ID_82575EB_COPPER		0x10A7
+#define E1000_DEV_ID_82575EB_FIBER_SERDES	0x10A9
+#define E1000_DEV_ID_82575GB_QUAD_COPPER	0x10D6
+#define E1000_DEV_ID_82580_COPPER		0x150E
+#define E1000_DEV_ID_82580_FIBER		0x150F
+#define E1000_DEV_ID_82580_SERDES		0x1510
+#define E1000_DEV_ID_82580_SGMII		0x1511
+#define E1000_DEV_ID_82580_COPPER_DUAL		0x1516
+#define E1000_DEV_ID_82580_QUAD_FIBER		0x1527
+#define E1000_DEV_ID_DH89XXCC_SGMII		0x0438
+#define E1000_DEV_ID_DH89XXCC_SERDES		0x043A
+#define E1000_DEV_ID_DH89XXCC_BACKPLANE		0x043C
+#define E1000_DEV_ID_DH89XXCC_SFP		0x0440
+#define E1000_DEV_ID_I350_COPPER		0x1521
+#define E1000_DEV_ID_I350_FIBER			0x1522
+#define E1000_DEV_ID_I350_SERDES		0x1523
+#define E1000_DEV_ID_I350_SGMII			0x1524
+#define E1000_DEV_ID_I210_COPPER		0x1533
+#define E1000_DEV_ID_I210_FIBER			0x1536
+#define E1000_DEV_ID_I210_SERDES		0x1537
+#define E1000_DEV_ID_I210_SGMII			0x1538
+#define E1000_DEV_ID_I210_COPPER_FLASHLESS	0x157B
+#define E1000_DEV_ID_I210_SERDES_FLASHLESS	0x157C
+#define E1000_DEV_ID_I211_COPPER		0x1539
+#define E1000_DEV_ID_I354_BACKPLANE_1GBPS	0x1F40
+#define E1000_DEV_ID_I354_SGMII			0x1F41
+#define E1000_DEV_ID_I354_BACKPLANE_2_5GBPS	0x1F45
+
+#define E1000_REVISION_2 2
+#define E1000_REVISION_4 4
+
+#define E1000_FUNC_0     0
+#define E1000_FUNC_1     1
+#define E1000_FUNC_2     2
+#define E1000_FUNC_3     3
+
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN0   0
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN1   3
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN2   6
+#define E1000_ALT_MAC_ADDRESS_OFFSET_LAN3   9
+
+enum e1000_mac_type {
+	e1000_undefined = 0,
+	e1000_82575,
+	e1000_82576,
+	e1000_82580,
+	e1000_i350,
+	e1000_i354,
+	e1000_i210,
+	e1000_i211,
+	e1000_num_macs  /* List is 1-based, so subtract 1 for true count. */
+};
+
+enum e1000_media_type {
+	e1000_media_type_unknown = 0,
+	e1000_media_type_copper = 1,
+	e1000_media_type_fiber = 2,
+	e1000_media_type_internal_serdes = 3,
+	e1000_num_media_types
+};
+
+enum e1000_nvm_type {
+	e1000_nvm_unknown = 0,
+	e1000_nvm_none,
+	e1000_nvm_eeprom_spi,
+	e1000_nvm_flash_hw,
+	e1000_nvm_invm,
+	e1000_nvm_flash_sw
+};
+
+enum e1000_nvm_override {
+	e1000_nvm_override_none = 0,
+	e1000_nvm_override_spi_small,
+	e1000_nvm_override_spi_large,
+};
+
+enum e1000_phy_type {
+	e1000_phy_unknown = 0,
+	e1000_phy_none,
+	e1000_phy_m88,
+	e1000_phy_igp,
+	e1000_phy_igp_2,
+	e1000_phy_gg82563,
+	e1000_phy_igp_3,
+	e1000_phy_ife,
+	e1000_phy_82580,
+	e1000_phy_i210,
+};
+
+enum e1000_bus_type {
+	e1000_bus_type_unknown = 0,
+	e1000_bus_type_pci,
+	e1000_bus_type_pcix,
+	e1000_bus_type_pci_express,
+	e1000_bus_type_reserved
+};
+
+enum e1000_bus_speed {
+	e1000_bus_speed_unknown = 0,
+	e1000_bus_speed_33,
+	e1000_bus_speed_66,
+	e1000_bus_speed_100,
+	e1000_bus_speed_120,
+	e1000_bus_speed_133,
+	e1000_bus_speed_2500,
+	e1000_bus_speed_5000,
+	e1000_bus_speed_reserved
+};
+
+enum e1000_bus_width {
+	e1000_bus_width_unknown = 0,
+	e1000_bus_width_pcie_x1,
+	e1000_bus_width_pcie_x2,
+	e1000_bus_width_pcie_x4 = 4,
+	e1000_bus_width_pcie_x8 = 8,
+	e1000_bus_width_32,
+	e1000_bus_width_64,
+	e1000_bus_width_reserved
+};
+
+enum e1000_1000t_rx_status {
+	e1000_1000t_rx_status_not_ok = 0,
+	e1000_1000t_rx_status_ok,
+	e1000_1000t_rx_status_undefined = 0xFF
+};
+
+enum e1000_rev_polarity {
+	e1000_rev_polarity_normal = 0,
+	e1000_rev_polarity_reversed,
+	e1000_rev_polarity_undefined = 0xFF
+};
+
+enum e1000_fc_mode {
+	e1000_fc_none = 0,
+	e1000_fc_rx_pause,
+	e1000_fc_tx_pause,
+	e1000_fc_full,
+	e1000_fc_default = 0xFF
+};
+
+/* Statistics counters collected by the MAC */
+struct e1000_hw_stats {
+	u64 crcerrs;
+	u64 algnerrc;
+	u64 symerrs;
+	u64 rxerrc;
+	u64 mpc;
+	u64 scc;
+	u64 ecol;
+	u64 mcc;
+	u64 latecol;
+	u64 colc;
+	u64 dc;
+	u64 tncrs;
+	u64 sec;
+	u64 cexterr;
+	u64 rlec;
+	u64 xonrxc;
+	u64 xontxc;
+	u64 xoffrxc;
+	u64 xofftxc;
+	u64 fcruc;
+	u64 prc64;
+	u64 prc127;
+	u64 prc255;
+	u64 prc511;
+	u64 prc1023;
+	u64 prc1522;
+	u64 gprc;
+	u64 bprc;
+	u64 mprc;
+	u64 gptc;
+	u64 gorc;
+	u64 gotc;
+	u64 rnbc;
+	u64 ruc;
+	u64 rfc;
+	u64 roc;
+	u64 rjc;
+	u64 mgprc;
+	u64 mgpdc;
+	u64 mgptc;
+	u64 tor;
+	u64 tot;
+	u64 tpr;
+	u64 tpt;
+	u64 ptc64;
+	u64 ptc127;
+	u64 ptc255;
+	u64 ptc511;
+	u64 ptc1023;
+	u64 ptc1522;
+	u64 mptc;
+	u64 bptc;
+	u64 tsctc;
+	u64 tsctfc;
+	u64 iac;
+	u64 icrxptc;
+	u64 icrxatc;
+	u64 ictxptc;
+	u64 ictxatc;
+	u64 ictxqec;
+	u64 ictxqmtc;
+	u64 icrxdmtc;
+	u64 icrxoc;
+	u64 cbtmpc;
+	u64 htdpmc;
+	u64 cbrdpc;
+	u64 cbrmpc;
+	u64 rpthc;
+	u64 hgptc;
+	u64 htcbdpc;
+	u64 hgorc;
+	u64 hgotc;
+	u64 lenerrs;
+	u64 scvpc;
+	u64 hrmpc;
+	u64 doosync;
+	u64 o2bgptc;
+	u64 o2bspc;
+	u64 b2ospc;
+	u64 b2ogprc;
+};
+
+struct e1000_host_mng_dhcp_cookie {
+	u32 signature;
+	u8  status;
+	u8  reserved0;
+	u16 vlan_id;
+	u32 reserved1;
+	u16 reserved2;
+	u8  reserved3;
+	u8  checksum;
+};
+
+/* Host Interface "Rev 1" */
+struct e1000_host_command_header {
+	u8 command_id;
+	u8 command_length;
+	u8 command_options;
+	u8 checksum;
+};
+
+#define E1000_HI_MAX_DATA_LENGTH     252
+struct e1000_host_command_info {
+	struct e1000_host_command_header command_header;
+	u8 command_data[E1000_HI_MAX_DATA_LENGTH];
+};
+
+/* Host Interface "Rev 2" */
+struct e1000_host_mng_command_header {
+	u8  command_id;
+	u8  checksum;
+	u16 reserved1;
+	u16 reserved2;
+	u16 command_length;
+};
+
+#define E1000_HI_MAX_MNG_DATA_LENGTH 0x6F8
+struct e1000_host_mng_command_info {
+	struct e1000_host_mng_command_header command_header;
+	u8 command_data[E1000_HI_MAX_MNG_DATA_LENGTH];
+};
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_mbx.h"
+
+struct e1000_mac_operations {
+	s32 (*check_for_link)(struct e1000_hw *);
+	s32 (*reset_hw)(struct e1000_hw *);
+	s32 (*init_hw)(struct e1000_hw *);
+	bool (*check_mng_mode)(struct e1000_hw *);
+	s32 (*setup_physical_interface)(struct e1000_hw *);
+	void (*rar_set)(struct e1000_hw *, u8 *, u32);
+	s32 (*read_mac_addr)(struct e1000_hw *);
+	s32 (*get_speed_and_duplex)(struct e1000_hw *, u16 *, u16 *);
+	s32 (*acquire_swfw_sync)(struct e1000_hw *, u16);
+	void (*release_swfw_sync)(struct e1000_hw *, u16);
+#ifdef CONFIG_IGB_HWMON
+	s32 (*get_thermal_sensor_data)(struct e1000_hw *);
+	s32 (*init_thermal_sensor_thresh)(struct e1000_hw *);
+#endif
+
+};
+
+struct e1000_phy_operations {
+	s32 (*acquire)(struct e1000_hw *);
+	s32 (*check_polarity)(struct e1000_hw *);
+	s32 (*check_reset_block)(struct e1000_hw *);
+	s32 (*force_speed_duplex)(struct e1000_hw *);
+	s32 (*get_cfg_done)(struct e1000_hw *hw);
+	s32 (*get_cable_length)(struct e1000_hw *);
+	s32 (*get_phy_info)(struct e1000_hw *);
+	s32 (*read_reg)(struct e1000_hw *, u32, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32 (*reset)(struct e1000_hw *);
+	s32 (*set_d0_lplu_state)(struct e1000_hw *, bool);
+	s32 (*set_d3_lplu_state)(struct e1000_hw *, bool);
+	s32 (*write_reg)(struct e1000_hw *, u32, u16);
+	s32 (*read_i2c_byte)(struct e1000_hw *, u8, u8, u8 *);
+	s32 (*write_i2c_byte)(struct e1000_hw *, u8, u8, u8);
+};
+
+struct e1000_nvm_operations {
+	s32 (*acquire)(struct e1000_hw *);
+	s32 (*read)(struct e1000_hw *, u16, u16, u16 *);
+	void (*release)(struct e1000_hw *);
+	s32 (*write)(struct e1000_hw *, u16, u16, u16 *);
+	s32 (*update)(struct e1000_hw *);
+	s32 (*validate)(struct e1000_hw *);
+	s32 (*valid_led_default)(struct e1000_hw *, u16 *);
+};
+
+#define E1000_MAX_SENSORS		3
+
+struct e1000_thermal_diode_data {
+	u8 location;
+	u8 temp;
+	u8 caution_thresh;
+	u8 max_op_thresh;
+};
+
+struct e1000_thermal_sensor_data {
+	struct e1000_thermal_diode_data sensor[E1000_MAX_SENSORS];
+};
+
+struct e1000_info {
+	s32 (*get_invariants)(struct e1000_hw *);
+	struct e1000_mac_operations *mac_ops;
+	struct e1000_phy_operations *phy_ops;
+	struct e1000_nvm_operations *nvm_ops;
+};
+
+extern const struct e1000_info e1000_82575_info;
+
+struct e1000_mac_info {
+	struct e1000_mac_operations ops;
+
+	u8 addr[6];
+	u8 perm_addr[6];
+
+	enum e1000_mac_type type;
+
+	u32 ledctl_default;
+	u32 ledctl_mode1;
+	u32 ledctl_mode2;
+	u32 mc_filter_type;
+	u32 txcw;
+
+	u16 mta_reg_count;
+	u16 uta_reg_count;
+
+	/* Maximum size of the MTA register table in all supported adapters */
+	#define MAX_MTA_REG 128
+	u32 mta_shadow[MAX_MTA_REG];
+	u16 rar_entry_count;
+
+	u8  forced_speed_duplex;
+
+	bool adaptive_ifs;
+	bool arc_subsystem_valid;
+	bool asf_firmware_present;
+	bool autoneg;
+	bool autoneg_failed;
+	bool disable_hw_init_bits;
+	bool get_link_status;
+	bool ifs_params_forced;
+	bool in_ifs_mode;
+	bool report_tx_early;
+	bool serdes_has_link;
+	bool tx_pkt_filtering;
+	struct e1000_thermal_sensor_data thermal_sensor_data;
+};
+
+struct e1000_phy_info {
+	struct e1000_phy_operations ops;
+
+	enum e1000_phy_type type;
+
+	enum e1000_1000t_rx_status local_rx;
+	enum e1000_1000t_rx_status remote_rx;
+	enum e1000_ms_type ms_type;
+	enum e1000_ms_type original_ms_type;
+	enum e1000_rev_polarity cable_polarity;
+	enum e1000_smart_speed smart_speed;
+
+	u32 addr;
+	u32 id;
+	u32 reset_delay_us; /* in usec */
+	u32 revision;
+
+	enum e1000_media_type media_type;
+
+	u16 autoneg_advertised;
+	u16 autoneg_mask;
+	u16 cable_length;
+	u16 max_cable_length;
+	u16 min_cable_length;
+
+	u8 mdix;
+
+	bool disable_polarity_correction;
+	bool is_mdix;
+	bool polarity_correction;
+	bool reset_disable;
+	bool speed_downgraded;
+	bool autoneg_wait_to_complete;
+};
+
+struct e1000_nvm_info {
+	struct e1000_nvm_operations ops;
+	enum e1000_nvm_type type;
+	enum e1000_nvm_override override;
+
+	u32 flash_bank_size;
+	u32 flash_base_addr;
+
+	u16 word_size;
+	u16 delay_usec;
+	u16 address_bits;
+	u16 opcode_bits;
+	u16 page_size;
+};
+
+struct e1000_bus_info {
+	enum e1000_bus_type type;
+	enum e1000_bus_speed speed;
+	enum e1000_bus_width width;
+
+	u32 snoop;
+
+	u16 func;
+	u16 pci_cmd_word;
+};
+
+struct e1000_fc_info {
+	u32 high_water;     /* Flow control high-water mark */
+	u32 low_water;      /* Flow control low-water mark */
+	u16 pause_time;     /* Flow control pause timer */
+	bool send_xon;      /* Flow control send XON */
+	bool strict_ieee;   /* Strict IEEE mode */
+	enum e1000_fc_mode current_mode; /* Type of flow control */
+	enum e1000_fc_mode requested_mode;
+};
+
+struct e1000_mbx_operations {
+	s32 (*init_params)(struct e1000_hw *hw);
+	s32 (*read)(struct e1000_hw *, u32 *, u16,  u16);
+	s32 (*write)(struct e1000_hw *, u32 *, u16, u16);
+	s32 (*read_posted)(struct e1000_hw *, u32 *, u16,  u16);
+	s32 (*write_posted)(struct e1000_hw *, u32 *, u16, u16);
+	s32 (*check_for_msg)(struct e1000_hw *, u16);
+	s32 (*check_for_ack)(struct e1000_hw *, u16);
+	s32 (*check_for_rst)(struct e1000_hw *, u16);
+};
+
+struct e1000_mbx_stats {
+	u32 msgs_tx;
+	u32 msgs_rx;
+
+	u32 acks;
+	u32 reqs;
+	u32 rsts;
+};
+
+struct e1000_mbx_info {
+	struct e1000_mbx_operations ops;
+	struct e1000_mbx_stats stats;
+	u32 timeout;
+	u32 usec_delay;
+	u16 size;
+};
+
+struct e1000_dev_spec_82575 {
+	bool sgmii_active;
+	bool global_device_reset;
+	bool eee_disable;
+	bool clear_semaphore_once;
+	struct e1000_sfp_flags eth_flags;
+	bool module_plugged;
+	u8 media_port;
+	bool media_changed;
+	bool mas_capable;
+};
+
+struct e1000_hw {
+	void *back;
+
+	u8 __iomem *hw_addr;
+	u8 __iomem *flash_address;
+	unsigned long io_base;
+
+	struct e1000_mac_info  mac;
+	struct e1000_fc_info   fc;
+	struct e1000_phy_info  phy;
+	struct e1000_nvm_info  nvm;
+	struct e1000_bus_info  bus;
+	struct e1000_mbx_info mbx;
+	struct e1000_host_mng_dhcp_cookie mng_cookie;
+
+	union {
+		struct e1000_dev_spec_82575	_82575;
+	} dev_spec;
+
+	u16 device_id;
+	u16 subsystem_vendor_id;
+	u16 subsystem_device_id;
+	u16 vendor_id;
+
+	u8  revision_id;
+};
+
+struct rtnet_device *igb_get_hw_dev(struct e1000_hw *hw);
+#define hw_dbg(format, arg...) \
+	rtdev_dbg(igb_get_hw_dev(hw), format, ##arg)
+
+/* These functions must be implemented by drivers */
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value);
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value);
+#endif /* _E1000_HW_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c
new file mode 100644
index 0000000..65d9316
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.c
@@ -0,0 +1,902 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* e1000_i210
+ * e1000_i211
+ */
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "e1000_hw.h"
+#include "e1000_i210.h"
+
+static s32 igb_update_flash_i210(struct e1000_hw *hw);
+
+/**
+ * igb_get_hw_semaphore_i210 - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ */
+static s32 igb_get_hw_semaphore_i210(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	/* Get the SW semaphore */
+	while (i < timeout) {
+		swsm = rd32(E1000_SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		udelay(50);
+		i++;
+	}
+
+	if (i == timeout) {
+		/* In rare circumstances, the SW semaphore may already be held
+		 * unintentionally. Clear the semaphore once before giving up.
+		 */
+		if (hw->dev_spec._82575.clear_semaphore_once) {
+			hw->dev_spec._82575.clear_semaphore_once = false;
+			igb_put_hw_semaphore(hw);
+			for (i = 0; i < timeout; i++) {
+				swsm = rd32(E1000_SWSM);
+				if (!(swsm & E1000_SWSM_SMBI))
+					break;
+
+				udelay(50);
+			}
+		}
+
+		/* If we do not have the semaphore here, we have to give up. */
+		if (i == timeout) {
+			hw_dbg("Driver can't access device - SMBI bit is set.\n");
+			return -E1000_ERR_NVM;
+		}
+	}
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = rd32(E1000_SWSM);
+		wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		igb_put_hw_semaphore(hw);
+		hw_dbg("Driver can't access the NVM\n");
+		return -E1000_ERR_NVM;
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_acquire_nvm_i210 - Request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the necessary semaphores for exclusive access to the EEPROM.
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+static s32 igb_acquire_nvm_i210(struct e1000_hw *hw)
+{
+	return igb_acquire_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  igb_release_nvm_i210 - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit,
+ *  then release the semaphores acquired.
+ **/
+static void igb_release_nvm_i210(struct e1000_hw *hw)
+{
+	igb_release_swfw_sync_i210(hw, E1000_SWFW_EEP_SM);
+}
+
+/**
+ *  igb_acquire_swfw_sync_i210 - Acquire SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Acquire the SW/FW semaphore to access the PHY or NVM.  The mask
+ *  will also specify which port we're acquiring the lock for.
+ **/
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+	u32 swmask = mask;
+	u32 fwmask = mask << 16;
+	s32 ret_val = 0;
+	s32 i = 0, timeout = 200; /* FIXME: find real value to use here */
+
+	while (i < timeout) {
+		if (igb_get_hw_semaphore_i210(hw)) {
+			ret_val = -E1000_ERR_SWFW_SYNC;
+			goto out;
+		}
+
+		swfw_sync = rd32(E1000_SW_FW_SYNC);
+		if (!(swfw_sync & (fwmask | swmask)))
+			break;
+
+		/* Firmware currently using resource (fwmask) */
+		igb_put_hw_semaphore(hw);
+		mdelay(5);
+		i++;
+	}
+
+	if (i == timeout) {
+		hw_dbg("Driver can't access resource, SW_FW_SYNC timeout.\n");
+		ret_val = -E1000_ERR_SWFW_SYNC;
+		goto out;
+	}
+
+	swfw_sync |= swmask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore(hw);
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_release_swfw_sync_i210 - Release SW/FW semaphore
+ *  @hw: pointer to the HW structure
+ *  @mask: specifies which semaphore to acquire
+ *
+ *  Release the SW/FW semaphore used to access the PHY or NVM.  The mask
+ *  will also specify which port we're releasing the lock for.
+ **/
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask)
+{
+	u32 swfw_sync;
+
+	while (igb_get_hw_semaphore_i210(hw))
+		; /* Empty */
+
+	swfw_sync = rd32(E1000_SW_FW_SYNC);
+	swfw_sync &= ~mask;
+	wr32(E1000_SW_FW_SYNC, swfw_sync);
+
+	igb_put_hw_semaphore(hw);
+}
+
+/**
+ *  igb_read_nvm_srrd_i210 - Reads Shadow Ram using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the Shadow Ram to read
+ *  @words: number of words to read
+ *  @data: word read from the Shadow Ram
+ *
+ *  Reads a 16 bit word from the Shadow Ram using the EERD register.
+ *  Uses necessary synchronization semaphores.
+ **/
+static s32 igb_read_nvm_srrd_i210(struct e1000_hw *hw, u16 offset, u16 words,
+				  u16 *data)
+{
+	s32 status = 0;
+	u16 i, count;
+
+	/* We cannot hold synchronization semaphores for too long,
+	 * because of forceful takeover procedure. However it is more efficient
+	 * to read in bursts than synchronizing access for each word.
+	 */
+	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+			E1000_EERD_EEWR_MAX_COUNT : (words - i);
+		if (!(hw->nvm.ops.acquire(hw))) {
+			status = igb_read_nvm_eerd(hw, offset, count,
+						     data + i);
+			hw->nvm.ops.release(hw);
+		} else {
+			status = E1000_ERR_SWFW_SYNC;
+		}
+
+		if (status)
+			break;
+	}
+
+	return status;
+}
+
+/**
+ *  igb_write_nvm_srwr - Write to Shadow Ram using EEWR
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the Shadow Ram to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the Shadow Ram
+ *
+ *  Writes data to Shadow Ram at offset using EEWR register.
+ *
+ *  If igb_update_nvm_checksum is not called after this function , the
+ *  Shadow Ram will most likely contain an invalid checksum.
+ **/
+static s32 igb_write_nvm_srwr(struct e1000_hw *hw, u16 offset, u16 words,
+				u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, k, eewr = 0;
+	u32 attempts = 100000;
+	s32 ret_val = 0;
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * too many words for the offset, and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		hw_dbg("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eewr = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) |
+			(data[i] << E1000_NVM_RW_REG_DATA) |
+			E1000_NVM_RW_REG_START;
+
+		wr32(E1000_SRWR, eewr);
+
+		for (k = 0; k < attempts; k++) {
+			if (E1000_NVM_RW_REG_DONE &
+			    rd32(E1000_SRWR)) {
+				ret_val = 0;
+				break;
+			}
+			udelay(5);
+	}
+
+		if (ret_val) {
+			hw_dbg("Shadow RAM write EEWR timed out\n");
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_nvm_srwr_i210 - Write to Shadow RAM using EEWR
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the Shadow RAM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the Shadow RAM
+ *
+ *  Writes data to Shadow RAM at offset using EEWR register.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  data will not be committed to FLASH and also Shadow RAM will most likely
+ *  contain an invalid checksum.
+ *
+ *  If error code is returned, data and Shadow RAM may be inconsistent - buffer
+ *  partially written.
+ **/
+static s32 igb_write_nvm_srwr_i210(struct e1000_hw *hw, u16 offset, u16 words,
+				   u16 *data)
+{
+	s32 status = 0;
+	u16 i, count;
+
+	/* We cannot hold synchronization semaphores for too long,
+	 * because of forceful takeover procedure. However it is more efficient
+	 * to write in bursts than synchronizing access for each word.
+	 */
+	for (i = 0; i < words; i += E1000_EERD_EEWR_MAX_COUNT) {
+		count = (words - i) / E1000_EERD_EEWR_MAX_COUNT > 0 ?
+			E1000_EERD_EEWR_MAX_COUNT : (words - i);
+		if (!(hw->nvm.ops.acquire(hw))) {
+			status = igb_write_nvm_srwr(hw, offset, count,
+						      data + i);
+			hw->nvm.ops.release(hw);
+		} else {
+			status = E1000_ERR_SWFW_SYNC;
+		}
+
+		if (status)
+			break;
+	}
+
+	return status;
+}
+
+/**
+ *  igb_read_invm_word_i210 - Reads OTP
+ *  @hw: pointer to the HW structure
+ *  @address: the word address (aka eeprom offset) to read
+ *  @data: pointer to the data read
+ *
+ *  Reads 16-bit words from the OTP. Return error when the word is not
+ *  stored in OTP.
+ **/
+static s32 igb_read_invm_word_i210(struct e1000_hw *hw, u8 address, u16 *data)
+{
+	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+	u32 invm_dword;
+	u16 i;
+	u8 record_type, word_address;
+
+	for (i = 0; i < E1000_INVM_SIZE; i++) {
+		invm_dword = rd32(E1000_INVM_DATA_REG(i));
+		/* Get record type */
+		record_type = INVM_DWORD_TO_RECORD_TYPE(invm_dword);
+		if (record_type == E1000_INVM_UNINITIALIZED_STRUCTURE)
+			break;
+		if (record_type == E1000_INVM_CSR_AUTOLOAD_STRUCTURE)
+			i += E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS;
+		if (record_type == E1000_INVM_RSA_KEY_SHA256_STRUCTURE)
+			i += E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS;
+		if (record_type == E1000_INVM_WORD_AUTOLOAD_STRUCTURE) {
+			word_address = INVM_DWORD_TO_WORD_ADDRESS(invm_dword);
+			if (word_address == address) {
+				*data = INVM_DWORD_TO_WORD_DATA(invm_dword);
+				hw_dbg("Read INVM Word 0x%02x = %x\n",
+					  address, *data);
+				status = 0;
+				break;
+			}
+		}
+	}
+	if (status)
+		hw_dbg("Requested word 0x%02x not found in OTP\n", address);
+	return status;
+}
+
+/**
+ * igb_read_invm_i210 - Read invm wrapper function for I210/I211
+ *  @hw: pointer to the HW structure
+ *  @words: number of words to read
+ *  @data: pointer to the data read
+ *
+ *  Wrapper function to return data formerly found in the NVM.
+ **/
+static s32 igb_read_invm_i210(struct e1000_hw *hw, u16 offset,
+				u16 words __always_unused, u16 *data)
+{
+	s32 ret_val = 0;
+
+	/* Only the MAC addr is required to be present in the iNVM */
+	switch (offset) {
+	case NVM_MAC_ADDR:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, &data[0]);
+		ret_val |= igb_read_invm_word_i210(hw, (u8)offset+1,
+						     &data[1]);
+		ret_val |= igb_read_invm_word_i210(hw, (u8)offset+2,
+						     &data[2]);
+		if (ret_val)
+			hw_dbg("MAC Addr not found in iNVM\n");
+		break;
+	case NVM_INIT_CTRL_2:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = NVM_INIT_CTRL_2_DEFAULT_I211;
+			ret_val = 0;
+		}
+		break;
+	case NVM_INIT_CTRL_4:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = NVM_INIT_CTRL_4_DEFAULT_I211;
+			ret_val = 0;
+		}
+		break;
+	case NVM_LED_1_CFG:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = NVM_LED_1_CFG_DEFAULT_I211;
+			ret_val = 0;
+		}
+		break;
+	case NVM_LED_0_2_CFG:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = NVM_LED_0_2_CFG_DEFAULT_I211;
+			ret_val = 0;
+		}
+		break;
+	case NVM_ID_LED_SETTINGS:
+		ret_val = igb_read_invm_word_i210(hw, (u8)offset, data);
+		if (ret_val) {
+			*data = ID_LED_RESERVED_FFFF;
+			ret_val = 0;
+		}
+		break;
+	case NVM_SUB_DEV_ID:
+		*data = hw->subsystem_device_id;
+		break;
+	case NVM_SUB_VEN_ID:
+		*data = hw->subsystem_vendor_id;
+		break;
+	case NVM_DEV_ID:
+		*data = hw->device_id;
+		break;
+	case NVM_VEN_ID:
+		*data = hw->vendor_id;
+		break;
+	default:
+		hw_dbg("NVM word 0x%02x is not mapped.\n", offset);
+		*data = NVM_RESERVED_WORD;
+		break;
+	}
+	return ret_val;
+}
+
+/**
+ *  igb_read_invm_version - Reads iNVM version and image type
+ *  @hw: pointer to the HW structure
+ *  @invm_ver: version structure for the version read
+ *
+ *  Reads iNVM version and image type.
+ **/
+s32 igb_read_invm_version(struct e1000_hw *hw,
+			  struct e1000_fw_version *invm_ver) {
+	u32 *record = NULL;
+	u32 *next_record = NULL;
+	u32 i = 0;
+	u32 invm_dword = 0;
+	u32 invm_blocks = E1000_INVM_SIZE - (E1000_INVM_ULT_BYTES_SIZE /
+					     E1000_INVM_RECORD_SIZE_IN_BYTES);
+	u32 buffer[E1000_INVM_SIZE];
+	s32 status = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+	u16 version = 0;
+
+	/* Read iNVM memory */
+	for (i = 0; i < E1000_INVM_SIZE; i++) {
+		invm_dword = rd32(E1000_INVM_DATA_REG(i));
+		buffer[i] = invm_dword;
+	}
+
+	/* Read version number */
+	for (i = 1; i < invm_blocks; i++) {
+		record = &buffer[invm_blocks - i];
+		next_record = &buffer[invm_blocks - i + 1];
+
+		/* Check if we have first version location used */
+		if ((i == 1) && ((*record & E1000_INVM_VER_FIELD_ONE) == 0)) {
+			version = 0;
+			status = 0;
+			break;
+		}
+		/* Check if we have second version location used */
+		else if ((i == 1) &&
+			 ((*record & E1000_INVM_VER_FIELD_TWO) == 0)) {
+			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+			status = 0;
+			break;
+		}
+		/* Check if we have odd version location
+		 * used and it is the last one used
+		 */
+		else if ((((*record & E1000_INVM_VER_FIELD_ONE) == 0) &&
+			 ((*record & 0x3) == 0)) || (((*record & 0x3) != 0) &&
+			 (i != 1))) {
+			version = (*next_record & E1000_INVM_VER_FIELD_TWO)
+				  >> 13;
+			status = 0;
+			break;
+		}
+		/* Check if we have even version location
+		 * used and it is the last one used
+		 */
+		else if (((*record & E1000_INVM_VER_FIELD_TWO) == 0) &&
+			 ((*record & 0x3) == 0)) {
+			version = (*record & E1000_INVM_VER_FIELD_ONE) >> 3;
+			status = 0;
+			break;
+		}
+	}
+
+	if (!status) {
+		invm_ver->invm_major = (version & E1000_INVM_MAJOR_MASK)
+					>> E1000_INVM_MAJOR_SHIFT;
+		invm_ver->invm_minor = version & E1000_INVM_MINOR_MASK;
+	}
+	/* Read Image Type */
+	for (i = 1; i < invm_blocks; i++) {
+		record = &buffer[invm_blocks - i];
+		next_record = &buffer[invm_blocks - i + 1];
+
+		/* Check if we have image type in first location used */
+		if ((i == 1) && ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) {
+			invm_ver->invm_img_type = 0;
+			status = 0;
+			break;
+		}
+		/* Check if we have image type in first location used */
+		else if ((((*record & 0x3) == 0) &&
+			 ((*record & E1000_INVM_IMGTYPE_FIELD) == 0)) ||
+			 ((((*record & 0x3) != 0) && (i != 1)))) {
+			invm_ver->invm_img_type =
+				(*next_record & E1000_INVM_IMGTYPE_FIELD) >> 23;
+			status = 0;
+			break;
+		}
+	}
+	return status;
+}
+
+/**
+ *  igb_validate_nvm_checksum_i210 - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+static s32 igb_validate_nvm_checksum_i210(struct e1000_hw *hw)
+{
+	s32 status = 0;
+	s32 (*read_op_ptr)(struct e1000_hw *, u16, u16, u16 *);
+
+	if (!(hw->nvm.ops.acquire(hw))) {
+
+		/* Replace the read function with semaphore grabbing with
+		 * the one that skips this for a while.
+		 * We have semaphore taken already here.
+		 */
+		read_op_ptr = hw->nvm.ops.read;
+		hw->nvm.ops.read = igb_read_nvm_eerd;
+
+		status = igb_validate_nvm_checksum(hw);
+
+		/* Revert original read operation. */
+		hw->nvm.ops.read = read_op_ptr;
+
+		hw->nvm.ops.release(hw);
+	} else {
+		status = E1000_ERR_SWFW_SYNC;
+	}
+
+	return status;
+}
+
+/**
+ *  igb_update_nvm_checksum_i210 - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM. Next commit EEPROM data onto the Flash.
+ **/
+static s32 igb_update_nvm_checksum_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	/* Read the first word from the EEPROM. If this times out or fails, do
+	 * not continue or we could be in for a very long wait while every
+	 * EEPROM read fails
+	 */
+	ret_val = igb_read_nvm_eerd(hw, 0, 1, &nvm_data);
+	if (ret_val) {
+		hw_dbg("EEPROM read failed\n");
+		goto out;
+	}
+
+	if (!(hw->nvm.ops.acquire(hw))) {
+		/* Do not use hw->nvm.ops.write, hw->nvm.ops.read
+		 * because we do not want to take the synchronization
+		 * semaphores twice here.
+		 */
+
+		for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+			ret_val = igb_read_nvm_eerd(hw, i, 1, &nvm_data);
+			if (ret_val) {
+				hw->nvm.ops.release(hw);
+				hw_dbg("NVM Read Error while updating checksum.\n");
+				goto out;
+			}
+			checksum += nvm_data;
+		}
+		checksum = (u16) NVM_SUM - checksum;
+		ret_val = igb_write_nvm_srwr(hw, NVM_CHECKSUM_REG, 1,
+						&checksum);
+		if (ret_val) {
+			hw->nvm.ops.release(hw);
+			hw_dbg("NVM Write Error while updating checksum.\n");
+			goto out;
+		}
+
+		hw->nvm.ops.release(hw);
+
+		ret_val = igb_update_flash_i210(hw);
+	} else {
+		ret_val = -E1000_ERR_SWFW_SYNC;
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_pool_flash_update_done_i210 - Pool FLUDONE status.
+ *  @hw: pointer to the HW structure
+ *
+ **/
+static s32 igb_pool_flash_update_done_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = -E1000_ERR_NVM;
+	u32 i, reg;
+
+	for (i = 0; i < E1000_FLUDONE_ATTEMPTS; i++) {
+		reg = rd32(E1000_EECD);
+		if (reg & E1000_EECD_FLUDONE_I210) {
+			ret_val = 0;
+			break;
+		}
+		udelay(5);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_get_flash_presence_i210 - Check if flash device is detected.
+ *  @hw: pointer to the HW structure
+ *
+ **/
+bool igb_get_flash_presence_i210(struct e1000_hw *hw)
+{
+	u32 eec = 0;
+	bool ret_val = false;
+
+	eec = rd32(E1000_EECD);
+	if (eec & E1000_EECD_FLASH_DETECTED_I210)
+		ret_val = true;
+
+	return ret_val;
+}
+
+/**
+ *  igb_update_flash_i210 - Commit EEPROM to the flash
+ *  @hw: pointer to the HW structure
+ *
+ **/
+static s32 igb_update_flash_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 flup;
+
+	ret_val = igb_pool_flash_update_done_i210(hw);
+	if (ret_val == -E1000_ERR_NVM) {
+		hw_dbg("Flash update time out\n");
+		goto out;
+	}
+
+	flup = rd32(E1000_EECD) | E1000_EECD_FLUPD_I210;
+	wr32(E1000_EECD, flup);
+
+	ret_val = igb_pool_flash_update_done_i210(hw);
+	if (ret_val)
+		hw_dbg("Flash update complete\n");
+	else
+		hw_dbg("Flash update time out\n");
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_valid_led_default_i210 - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+		switch (hw->phy.media_type) {
+		case e1000_media_type_internal_serdes:
+			*data = ID_LED_DEFAULT_I210_SERDES;
+			break;
+		case e1000_media_type_copper:
+		default:
+			*data = ID_LED_DEFAULT_I210;
+			break;
+		}
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  __igb_access_xmdio_reg - Read/write XMDIO register
+ *  @hw: pointer to the HW structure
+ *  @address: XMDIO address to program
+ *  @dev_addr: device address to program
+ *  @data: pointer to value to read/write from/to the XMDIO address
+ *  @read: boolean flag to indicate read or write
+ **/
+static s32 __igb_access_xmdio_reg(struct e1000_hw *hw, u16 address,
+				  u8 dev_addr, u16 *data, bool read)
+{
+	s32 ret_val = 0;
+
+	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, dev_addr);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, address);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, E1000_MMDAC_FUNC_DATA |
+							 dev_addr);
+	if (ret_val)
+		return ret_val;
+
+	if (read)
+		ret_val = hw->phy.ops.read_reg(hw, E1000_MMDAAD, data);
+	else
+		ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAAD, *data);
+	if (ret_val)
+		return ret_val;
+
+	/* Recalibrate the device back to 0 */
+	ret_val = hw->phy.ops.write_reg(hw, E1000_MMDAC, 0);
+	if (ret_val)
+		return ret_val;
+
+	return ret_val;
+}
+
+/**
+ *  igb_read_xmdio_reg - Read XMDIO register
+ *  @hw: pointer to the HW structure
+ *  @addr: XMDIO address to program
+ *  @dev_addr: device address to program
+ *  @data: value to be read from the EMI address
+ **/
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data)
+{
+	return __igb_access_xmdio_reg(hw, addr, dev_addr, data, true);
+}
+
+/**
+ *  igb_write_xmdio_reg - Write XMDIO register
+ *  @hw: pointer to the HW structure
+ *  @addr: XMDIO address to program
+ *  @dev_addr: device address to program
+ *  @data: value to be written to the XMDIO address
+ **/
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data)
+{
+	return __igb_access_xmdio_reg(hw, addr, dev_addr, &data, false);
+}
+
+/**
+ *  igb_init_nvm_params_i210 - Init NVM func ptrs.
+ *  @hw: pointer to the HW structure
+ **/
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	struct e1000_nvm_info *nvm = &hw->nvm;
+
+	nvm->ops.acquire = igb_acquire_nvm_i210;
+	nvm->ops.release = igb_release_nvm_i210;
+	nvm->ops.valid_led_default = igb_valid_led_default_i210;
+
+	/* NVM Function Pointers */
+	if (igb_get_flash_presence_i210(hw)) {
+		hw->nvm.type = e1000_nvm_flash_hw;
+		nvm->ops.read    = igb_read_nvm_srrd_i210;
+		nvm->ops.write   = igb_write_nvm_srwr_i210;
+		nvm->ops.validate = igb_validate_nvm_checksum_i210;
+		nvm->ops.update   = igb_update_nvm_checksum_i210;
+	} else {
+		hw->nvm.type = e1000_nvm_invm;
+		nvm->ops.read     = igb_read_invm_i210;
+		nvm->ops.write    = NULL;
+		nvm->ops.validate = NULL;
+		nvm->ops.update   = NULL;
+	}
+	return ret_val;
+}
+
+/**
+ * igb_pll_workaround_i210
+ * @hw: pointer to the HW structure
+ *
+ * Works around an errata in the PLL circuit where it occasionally
+ * provides the wrong clock frequency after power up.
+ **/
+s32 igb_pll_workaround_i210(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u32 wuc, mdicnfg, ctrl, ctrl_ext, reg_val;
+	u16 nvm_word, phy_word, pci_word, tmp_nvm;
+	int i;
+
+	/* Get and set needed register values */
+	wuc = rd32(E1000_WUC);
+	mdicnfg = rd32(E1000_MDICNFG);
+	reg_val = mdicnfg & ~E1000_MDICNFG_EXT_MDIO;
+	wr32(E1000_MDICNFG, reg_val);
+
+	/* Get data from NVM, or set default */
+	ret_val = igb_read_invm_word_i210(hw, E1000_INVM_AUTOLOAD,
+					  &nvm_word);
+	if (ret_val)
+		nvm_word = E1000_INVM_DEFAULT_AL;
+	tmp_nvm = nvm_word | E1000_INVM_PLL_WO_VAL;
+	for (i = 0; i < E1000_MAX_PLL_TRIES; i++) {
+		/* check current state directly from internal PHY */
+		igb_read_phy_reg_gs40g(hw, (E1000_PHY_PLL_FREQ_PAGE |
+					 E1000_PHY_PLL_FREQ_REG), &phy_word);
+		if ((phy_word & E1000_PHY_PLL_UNCONF)
+		    != E1000_PHY_PLL_UNCONF) {
+			ret_val = 0;
+			break;
+		} else {
+			ret_val = -E1000_ERR_PHY;
+		}
+		/* directly reset the internal PHY */
+		ctrl = rd32(E1000_CTRL);
+		wr32(E1000_CTRL, ctrl|E1000_CTRL_PHY_RST);
+
+		ctrl_ext = rd32(E1000_CTRL_EXT);
+		ctrl_ext |= (E1000_CTRL_EXT_PHYPDEN | E1000_CTRL_EXT_SDLPE);
+		wr32(E1000_CTRL_EXT, ctrl_ext);
+
+		wr32(E1000_WUC, 0);
+		reg_val = (E1000_INVM_AUTOLOAD << 4) | (tmp_nvm << 16);
+		wr32(E1000_EEARBC_I210, reg_val);
+
+		igb_read_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+		pci_word |= E1000_PCI_PMCSR_D3;
+		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+		usleep_range(1000, 2000);
+		pci_word &= ~E1000_PCI_PMCSR_D3;
+		igb_write_pci_cfg(hw, E1000_PCI_PMCSR, &pci_word);
+		reg_val = (E1000_INVM_AUTOLOAD << 4) | (nvm_word << 16);
+		wr32(E1000_EEARBC_I210, reg_val);
+
+		/* restore WUC register */
+		wr32(E1000_WUC, wuc);
+	}
+	/* restore MDICNFG setting */
+	wr32(E1000_MDICNFG, mdicnfg);
+	return ret_val;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h
new file mode 100644
index 0000000..3442b63
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_i210.h
@@ -0,0 +1,93 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_I210_H_
+#define _E1000_I210_H_
+
+s32 igb_acquire_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+void igb_release_swfw_sync_i210(struct e1000_hw *hw, u16 mask);
+s32 igb_valid_led_default_i210(struct e1000_hw *hw, u16 *data);
+s32 igb_read_invm_version(struct e1000_hw *hw,
+			  struct e1000_fw_version *invm_ver);
+s32 igb_read_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 *data);
+s32 igb_write_xmdio_reg(struct e1000_hw *hw, u16 addr, u8 dev_addr, u16 data);
+s32 igb_init_nvm_params_i210(struct e1000_hw *hw);
+bool igb_get_flash_presence_i210(struct e1000_hw *hw);
+s32 igb_pll_workaround_i210(struct e1000_hw *hw);
+
+#define E1000_STM_OPCODE		0xDB00
+#define E1000_EEPROM_FLASH_SIZE_WORD	0x11
+
+#define INVM_DWORD_TO_RECORD_TYPE(invm_dword) \
+	(u8)((invm_dword) & 0x7)
+#define INVM_DWORD_TO_WORD_ADDRESS(invm_dword) \
+	(u8)(((invm_dword) & 0x0000FE00) >> 9)
+#define INVM_DWORD_TO_WORD_DATA(invm_dword) \
+	(u16)(((invm_dword) & 0xFFFF0000) >> 16)
+
+enum E1000_INVM_STRUCTURE_TYPE {
+	E1000_INVM_UNINITIALIZED_STRUCTURE		= 0x00,
+	E1000_INVM_WORD_AUTOLOAD_STRUCTURE		= 0x01,
+	E1000_INVM_CSR_AUTOLOAD_STRUCTURE		= 0x02,
+	E1000_INVM_PHY_REGISTER_AUTOLOAD_STRUCTURE	= 0x03,
+	E1000_INVM_RSA_KEY_SHA256_STRUCTURE		= 0x04,
+	E1000_INVM_INVALIDATED_STRUCTURE		= 0x0F,
+};
+
+#define E1000_INVM_RSA_KEY_SHA256_DATA_SIZE_IN_DWORDS	8
+#define E1000_INVM_CSR_AUTOLOAD_DATA_SIZE_IN_DWORDS	1
+#define E1000_INVM_ULT_BYTES_SIZE			8
+#define E1000_INVM_RECORD_SIZE_IN_BYTES			4
+#define E1000_INVM_VER_FIELD_ONE			0x1FF8
+#define E1000_INVM_VER_FIELD_TWO			0x7FE000
+#define E1000_INVM_IMGTYPE_FIELD			0x1F800000
+
+#define E1000_INVM_MAJOR_MASK		0x3F0
+#define E1000_INVM_MINOR_MASK		0xF
+#define E1000_INVM_MAJOR_SHIFT		4
+
+#define ID_LED_DEFAULT_I210		((ID_LED_OFF1_ON2  << 8) | \
+					 (ID_LED_DEF1_DEF2 <<  4) | \
+					 (ID_LED_OFF1_OFF2))
+#define ID_LED_DEFAULT_I210_SERDES	((ID_LED_DEF1_DEF2 << 8) | \
+					 (ID_LED_DEF1_DEF2 <<  4) | \
+					 (ID_LED_OFF1_ON2))
+
+/* NVM offset defaults for i211 device */
+#define NVM_INIT_CTRL_2_DEFAULT_I211	0X7243
+#define NVM_INIT_CTRL_4_DEFAULT_I211	0x00C1
+#define NVM_LED_1_CFG_DEFAULT_I211	0x0184
+#define NVM_LED_0_2_CFG_DEFAULT_I211	0x200C
+
+/* PLL Defines */
+#define E1000_PCI_PMCSR			0x44
+#define E1000_PCI_PMCSR_D3		0x03
+#define E1000_MAX_PLL_TRIES		5
+#define E1000_PHY_PLL_UNCONF		0xFF
+#define E1000_PHY_PLL_FREQ_PAGE		0xFC0000
+#define E1000_PHY_PLL_FREQ_REG		0x000E
+#define E1000_INVM_DEFAULT_AL		0x202F
+#define E1000_INVM_AUTOLOAD		0x0A
+#define E1000_INVM_PLL_WO_VAL		0x0010
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c
new file mode 100644
index 0000000..500c928
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.c
@@ -0,0 +1,1607 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+
+#include "e1000_mac.h"
+
+#include "igb.h"
+
+static s32 igb_set_default_fc(struct e1000_hw *hw);
+static s32 igb_set_fc_watermarks(struct e1000_hw *hw);
+
+/**
+ *  igb_get_bus_info_pcie - Get PCIe bus information
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines and stores the system bus information for a particular
+ *  network interface.  The following bus information is determined and stored:
+ *  bus speed, bus width, type (PCIe), and PCIe function.
+ **/
+s32 igb_get_bus_info_pcie(struct e1000_hw *hw)
+{
+	struct e1000_bus_info *bus = &hw->bus;
+	s32 ret_val;
+	u32 reg;
+	u16 pcie_link_status;
+
+	bus->type = e1000_bus_type_pci_express;
+
+	ret_val = igb_read_pcie_cap_reg(hw,
+					PCI_EXP_LNKSTA,
+					&pcie_link_status);
+	if (ret_val) {
+		bus->width = e1000_bus_width_unknown;
+		bus->speed = e1000_bus_speed_unknown;
+	} else {
+		switch (pcie_link_status & PCI_EXP_LNKSTA_CLS) {
+		case PCI_EXP_LNKSTA_CLS_2_5GB:
+			bus->speed = e1000_bus_speed_2500;
+			break;
+		case PCI_EXP_LNKSTA_CLS_5_0GB:
+			bus->speed = e1000_bus_speed_5000;
+			break;
+		default:
+			bus->speed = e1000_bus_speed_unknown;
+			break;
+		}
+
+		bus->width = (enum e1000_bus_width)((pcie_link_status &
+						     PCI_EXP_LNKSTA_NLW) >>
+						     PCI_EXP_LNKSTA_NLW_SHIFT);
+	}
+
+	reg = rd32(E1000_STATUS);
+	bus->func = (reg & E1000_STATUS_FUNC_MASK) >> E1000_STATUS_FUNC_SHIFT;
+
+	return 0;
+}
+
+/**
+ *  igb_clear_vfta - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void igb_clear_vfta(struct e1000_hw *hw)
+{
+	u32 offset;
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		array_wr32(E1000_VFTA, offset, 0);
+		wrfl();
+	}
+}
+
+/**
+ *  igb_write_vfta - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+static void igb_write_vfta(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	array_wr32(E1000_VFTA, offset, value);
+	wrfl();
+}
+
+/* Due to a hw errata, if the host tries to  configure the VFTA register
+ * while performing queries from the BMC or DMA, then the VFTA in some
+ * cases won't be written.
+ */
+
+/**
+ *  igb_clear_vfta_i350 - Clear VLAN filter table
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the register array which contains the VLAN filter table by
+ *  setting all the values to 0.
+ **/
+void igb_clear_vfta_i350(struct e1000_hw *hw)
+{
+	u32 offset;
+	int i;
+
+	for (offset = 0; offset < E1000_VLAN_FILTER_TBL_SIZE; offset++) {
+		for (i = 0; i < 10; i++)
+			array_wr32(E1000_VFTA, offset, 0);
+
+		wrfl();
+	}
+}
+
+/**
+ *  igb_write_vfta_i350 - Write value to VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset in VLAN filter table
+ *  @value: register value written to VLAN filter table
+ *
+ *  Writes value at the given offset in the register array which stores
+ *  the VLAN filter table.
+ **/
+static void igb_write_vfta_i350(struct e1000_hw *hw, u32 offset, u32 value)
+{
+	int i;
+
+	for (i = 0; i < 10; i++)
+		array_wr32(E1000_VFTA, offset, value);
+
+	wrfl();
+}
+
+/**
+ *  igb_init_rx_addrs - Initialize receive address's
+ *  @hw: pointer to the HW structure
+ *  @rar_count: receive address registers
+ *
+ *  Setups the receive address registers by setting the base receive address
+ *  register to the devices MAC address and clearing all the other receive
+ *  address registers to 0.
+ **/
+void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count)
+{
+	u32 i;
+	u8 mac_addr[ETH_ALEN] = {0};
+
+	/* Setup the receive address */
+	hw_dbg("Programming MAC Address into RAR[0]\n");
+
+	hw->mac.ops.rar_set(hw, hw->mac.addr, 0);
+
+	/* Zero out the other (rar_entry_count - 1) receive addresses */
+	hw_dbg("Clearing RAR[1-%u]\n", rar_count-1);
+	for (i = 1; i < rar_count; i++)
+		hw->mac.ops.rar_set(hw, mac_addr, i);
+}
+
+/**
+ *  igb_vfta_set - enable or disable vlan in VLAN filter table
+ *  @hw: pointer to the HW structure
+ *  @vid: VLAN id to add or remove
+ *  @add: if true add filter, if false remove
+ *
+ *  Sets or clears a bit in the VLAN filter table array based on VLAN id
+ *  and if we are adding or removing the filter
+ **/
+s32 igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add)
+{
+	u32 index = (vid >> E1000_VFTA_ENTRY_SHIFT) & E1000_VFTA_ENTRY_MASK;
+	u32 mask = 1 << (vid & E1000_VFTA_ENTRY_BIT_SHIFT_MASK);
+	u32 vfta;
+	struct igb_adapter *adapter = hw->back;
+	s32 ret_val = 0;
+
+	vfta = adapter->shadow_vfta[index];
+
+	/* bit was set/cleared before we started */
+	if ((!!(vfta & mask)) == add) {
+		ret_val = -E1000_ERR_CONFIG;
+	} else {
+		if (add)
+			vfta |= mask;
+		else
+			vfta &= ~mask;
+	}
+	if ((hw->mac.type == e1000_i350) || (hw->mac.type == e1000_i354))
+		igb_write_vfta_i350(hw, index, vfta);
+	else
+		igb_write_vfta(hw, index, vfta);
+	adapter->shadow_vfta[index] = vfta;
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_alt_mac_addr - Check for alternate MAC addr
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the nvm for an alternate MAC address.  An alternate MAC address
+ *  can be setup by pre-boot software and must be treated like a permanent
+ *  address and must override the actual permanent MAC address.  If an
+ *  alternate MAC address is found it is saved in the hw struct and
+ *  programmed into RAR0 and the function returns success, otherwise the
+ *  function returns an error.
+ **/
+s32 igb_check_alt_mac_addr(struct e1000_hw *hw)
+{
+	u32 i;
+	s32 ret_val = 0;
+	u16 offset, nvm_alt_mac_addr_offset, nvm_data;
+	u8 alt_mac_addr[ETH_ALEN];
+
+	/* Alternate MAC address is handled by the option ROM for 82580
+	 * and newer. SW support not required.
+	 */
+	if (hw->mac.type >= e1000_82580)
+		goto out;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_ALT_MAC_ADDR_PTR, 1,
+				 &nvm_alt_mac_addr_offset);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if ((nvm_alt_mac_addr_offset == 0xFFFF) ||
+	    (nvm_alt_mac_addr_offset == 0x0000))
+		/* There is no Alternate MAC Address */
+		goto out;
+
+	if (hw->bus.func == E1000_FUNC_1)
+		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN1;
+	if (hw->bus.func == E1000_FUNC_2)
+		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN2;
+
+	if (hw->bus.func == E1000_FUNC_3)
+		nvm_alt_mac_addr_offset += E1000_ALT_MAC_ADDRESS_OFFSET_LAN3;
+	for (i = 0; i < ETH_ALEN; i += 2) {
+		offset = nvm_alt_mac_addr_offset + (i >> 1);
+		ret_val = hw->nvm.ops.read(hw, offset, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error\n");
+			goto out;
+		}
+
+		alt_mac_addr[i] = (u8)(nvm_data & 0xFF);
+		alt_mac_addr[i + 1] = (u8)(nvm_data >> 8);
+	}
+
+	/* if multicast bit is set, the alternate address will not be used */
+	if (is_multicast_ether_addr(alt_mac_addr)) {
+		hw_dbg("Ignoring Alternate Mac Address with MC bit set\n");
+		goto out;
+	}
+
+	/* We have a valid alternate MAC address, and we want to treat it the
+	 * same as the normal permanent MAC address stored by the HW into the
+	 * RAR. Do this by mapping this address into RAR0.
+	 */
+	hw->mac.ops.rar_set(hw, alt_mac_addr, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_rar_set - Set receive address register
+ *  @hw: pointer to the HW structure
+ *  @addr: pointer to the receive address
+ *  @index: receive address array register
+ *
+ *  Sets the receive address array register at index to the address passed
+ *  in by addr.
+ **/
+void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index)
+{
+	u32 rar_low, rar_high;
+
+	/* HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] |
+		   ((u32) addr[1] << 8) |
+		    ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* If MAC address zero, no need to set the AV bit */
+	if (rar_low || rar_high)
+		rar_high |= E1000_RAH_AV;
+
+	/* Some bridges will combine consecutive 32-bit writes into
+	 * a single burst write, which will malfunction on some parts.
+	 * The flushes avoid this.
+	 */
+	wr32(E1000_RAL(index), rar_low);
+	wrfl();
+	wr32(E1000_RAH(index), rar_high);
+	wrfl();
+}
+
+/**
+ *  igb_mta_set - Set multicast filter table address
+ *  @hw: pointer to the HW structure
+ *  @hash_value: determines the MTA register and bit to set
+ *
+ *  The multicast table address is a register array of 32-bit registers.
+ *  The hash_value is used to determine what register the bit is in, the
+ *  current value is read, the new bit is OR'd in and the new value is
+ *  written back into the register.
+ **/
+void igb_mta_set(struct e1000_hw *hw, u32 hash_value)
+{
+	u32 hash_bit, hash_reg, mta;
+
+	/* The MTA is a register array of 32-bit registers. It is
+	 * treated like an array of (32*mta_reg_count) bits.  We want to
+	 * set bit BitArray[hash_value]. So we figure out what register
+	 * the bit is in, read it, OR in the new bit, then write
+	 * back the new value.  The (hw->mac.mta_reg_count - 1) serves as a
+	 * mask to bits 31:5 of the hash value which gives us the
+	 * register we're modifying.  The hash bit within that register
+	 * is determined by the lower 5 bits of the hash value.
+	 */
+	hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+	hash_bit = hash_value & 0x1F;
+
+	mta = array_rd32(E1000_MTA, hash_reg);
+
+	mta |= (1 << hash_bit);
+
+	array_wr32(E1000_MTA, hash_reg, mta);
+	wrfl();
+}
+
+/**
+ *  igb_hash_mc_addr - Generate a multicast hash value
+ *  @hw: pointer to the HW structure
+ *  @mc_addr: pointer to a multicast address
+ *
+ *  Generates a multicast address hash value which is used to determine
+ *  the multicast filter table array address and new table value.  See
+ *  igb_mta_set()
+ **/
+static u32 igb_hash_mc_addr(struct e1000_hw *hw, u8 *mc_addr)
+{
+	u32 hash_value, hash_mask;
+	u8 bit_shift = 0;
+
+	/* Register count multiplied by bits per register */
+	hash_mask = (hw->mac.mta_reg_count * 32) - 1;
+
+	/* For a mc_filter_type of 0, bit_shift is the number of left-shifts
+	 * where 0xFF would still fall within the hash mask.
+	 */
+	while (hash_mask >> bit_shift != 0xFF)
+		bit_shift++;
+
+	/* The portion of the address that is used for the hash table
+	 * is determined by the mc_filter_type setting.
+	 * The algorithm is such that there is a total of 8 bits of shifting.
+	 * The bit_shift for a mc_filter_type of 0 represents the number of
+	 * left-shifts where the MSB of mc_addr[5] would still fall within
+	 * the hash_mask.  Case 0 does this exactly.  Since there are a total
+	 * of 8 bits of shifting, then mc_addr[4] will shift right the
+	 * remaining number of bits. Thus 8 - bit_shift.  The rest of the
+	 * cases are a variation of this algorithm...essentially raising the
+	 * number of bits to shift mc_addr[5] left, while still keeping the
+	 * 8-bit shifting total.
+	 *
+	 * For example, given the following Destination MAC Address and an
+	 * mta register count of 128 (thus a 4096-bit vector and 0xFFF mask),
+	 * we can see that the bit_shift for case 0 is 4.  These are the hash
+	 * values resulting from each mc_filter_type...
+	 * [0] [1] [2] [3] [4] [5]
+	 * 01  AA  00  12  34  56
+	 * LSB                 MSB
+	 *
+	 * case 0: hash_value = ((0x34 >> 4) | (0x56 << 4)) & 0xFFF = 0x563
+	 * case 1: hash_value = ((0x34 >> 3) | (0x56 << 5)) & 0xFFF = 0xAC6
+	 * case 2: hash_value = ((0x34 >> 2) | (0x56 << 6)) & 0xFFF = 0x163
+	 * case 3: hash_value = ((0x34 >> 0) | (0x56 << 8)) & 0xFFF = 0x634
+	 */
+	switch (hw->mac.mc_filter_type) {
+	default:
+	case 0:
+		break;
+	case 1:
+		bit_shift += 1;
+		break;
+	case 2:
+		bit_shift += 2;
+		break;
+	case 3:
+		bit_shift += 4;
+		break;
+	}
+
+	hash_value = hash_mask & (((mc_addr[4] >> (8 - bit_shift)) |
+				  (((u16) mc_addr[5]) << bit_shift)));
+
+	return hash_value;
+}
+
+/**
+ *  igb_update_mc_addr_list - Update Multicast addresses
+ *  @hw: pointer to the HW structure
+ *  @mc_addr_list: array of multicast addresses to program
+ *  @mc_addr_count: number of multicast addresses to program
+ *
+ *  Updates entire Multicast Table Array.
+ *  The caller must have a packed mc_addr_list of multicast addresses.
+ **/
+void igb_update_mc_addr_list(struct e1000_hw *hw,
+			     u8 *mc_addr_list, u32 mc_addr_count)
+{
+	u32 hash_value, hash_bit, hash_reg;
+	int i;
+
+	/* clear mta_shadow */
+	memset(&hw->mac.mta_shadow, 0, sizeof(hw->mac.mta_shadow));
+
+	/* update mta_shadow from mc_addr_list */
+	for (i = 0; (u32) i < mc_addr_count; i++) {
+		hash_value = igb_hash_mc_addr(hw, mc_addr_list);
+
+		hash_reg = (hash_value >> 5) & (hw->mac.mta_reg_count - 1);
+		hash_bit = hash_value & 0x1F;
+
+		hw->mac.mta_shadow[hash_reg] |= (1 << hash_bit);
+		mc_addr_list += (ETH_ALEN);
+	}
+
+	/* replace the entire MTA table */
+	for (i = hw->mac.mta_reg_count - 1; i >= 0; i--)
+		array_wr32(E1000_MTA, i, hw->mac.mta_shadow[i]);
+	wrfl();
+}
+
+/**
+ *  igb_clear_hw_cntrs_base - Clear base hardware counters
+ *  @hw: pointer to the HW structure
+ *
+ *  Clears the base hardware counters by reading the counter registers.
+ **/
+void igb_clear_hw_cntrs_base(struct e1000_hw *hw)
+{
+	rd32(E1000_CRCERRS);
+	rd32(E1000_SYMERRS);
+	rd32(E1000_MPC);
+	rd32(E1000_SCC);
+	rd32(E1000_ECOL);
+	rd32(E1000_MCC);
+	rd32(E1000_LATECOL);
+	rd32(E1000_COLC);
+	rd32(E1000_DC);
+	rd32(E1000_SEC);
+	rd32(E1000_RLEC);
+	rd32(E1000_XONRXC);
+	rd32(E1000_XONTXC);
+	rd32(E1000_XOFFRXC);
+	rd32(E1000_XOFFTXC);
+	rd32(E1000_FCRUC);
+	rd32(E1000_GPRC);
+	rd32(E1000_BPRC);
+	rd32(E1000_MPRC);
+	rd32(E1000_GPTC);
+	rd32(E1000_GORCL);
+	rd32(E1000_GORCH);
+	rd32(E1000_GOTCL);
+	rd32(E1000_GOTCH);
+	rd32(E1000_RNBC);
+	rd32(E1000_RUC);
+	rd32(E1000_RFC);
+	rd32(E1000_ROC);
+	rd32(E1000_RJC);
+	rd32(E1000_TORL);
+	rd32(E1000_TORH);
+	rd32(E1000_TOTL);
+	rd32(E1000_TOTH);
+	rd32(E1000_TPR);
+	rd32(E1000_TPT);
+	rd32(E1000_MPTC);
+	rd32(E1000_BPTC);
+}
+
+/**
+ *  igb_check_for_copper_link - Check for link (Copper)
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks to see of the link status of the hardware has changed.  If a
+ *  change in link status has been detected, then we read the PHY registers
+ *  to get the current speed/duplex if link exists.
+ **/
+s32 igb_check_for_copper_link(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	bool link;
+
+	/* We only want to go out to the PHY registers to see if Auto-Neg
+	 * has completed and/or if our link status has changed.  The
+	 * get_link_status flag is set upon receiving a Link Status
+	 * Change or Rx Sequence Error interrupt.
+	 */
+	if (!mac->get_link_status) {
+		ret_val = 0;
+		goto out;
+	}
+
+	/* First we want to see if the MII Status Register reports
+	 * link.  If so, then we want to get the current speed/duplex
+	 * of the PHY.
+	 */
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link)
+		goto out; /* No link detected */
+
+	mac->get_link_status = false;
+
+	/* Check if there was DownShift, must be checked
+	 * immediately after link-up
+	 */
+	igb_check_downshift(hw);
+
+	/* If we are forcing speed/duplex, then we simply return since
+	 * we have already determined whether we have link or not.
+	 */
+	if (!mac->autoneg) {
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	/* Auto-Neg is enabled.  Auto Speed Detection takes care
+	 * of MAC speed/duplex configuration.  So we only need to
+	 * configure Collision Distance in the MAC.
+	 */
+	igb_config_collision_dist(hw);
+
+	/* Configure Flow Control now that Auto-Neg has completed.
+	 * First, we need to restore the desired flow control
+	 * settings because we may have had to re-autoneg with a
+	 * different link partner.
+	 */
+	ret_val = igb_config_fc_after_link_up(hw);
+	if (ret_val)
+		hw_dbg("Error configuring flow control\n");
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_setup_link - Setup flow control and link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Determines which flow control settings to use, then configures flow
+ *  control.  Calls the appropriate media-specific link configuration
+ *  function.  Assuming the adapter has a valid link partner, a valid link
+ *  should be established.  Assumes the hardware has previously been reset
+ *  and the transmitter and receiver are not enabled.
+ **/
+s32 igb_setup_link(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/* In the case of the phy reset being blocked, we already have a link.
+	 * We do not need to set it up again.
+	 */
+	if (igb_check_reset_block(hw))
+		goto out;
+
+	/* If requested flow control is set to default, set flow control
+	 * based on the EEPROM flow control settings.
+	 */
+	if (hw->fc.requested_mode == e1000_fc_default) {
+		ret_val = igb_set_default_fc(hw);
+		if (ret_val)
+			goto out;
+	}
+
+	/* We want to save off the original Flow Control configuration just
+	 * in case we get disconnected and then reconnected into a different
+	 * hub or switch with different Flow Control capabilities.
+	 */
+	hw->fc.current_mode = hw->fc.requested_mode;
+
+	hw_dbg("After fix-ups FlowControl is now = %x\n", hw->fc.current_mode);
+
+	/* Call the necessary media_type subroutine to configure the link. */
+	ret_val = hw->mac.ops.setup_physical_interface(hw);
+	if (ret_val)
+		goto out;
+
+	/* Initialize the flow control address, type, and PAUSE timer
+	 * registers to their default values.  This is done even if flow
+	 * control is disabled, because it does not hurt anything to
+	 * initialize these registers.
+	 */
+	hw_dbg("Initializing the Flow Control address, type and timer regs\n");
+	wr32(E1000_FCT, FLOW_CONTROL_TYPE);
+	wr32(E1000_FCAH, FLOW_CONTROL_ADDRESS_HIGH);
+	wr32(E1000_FCAL, FLOW_CONTROL_ADDRESS_LOW);
+
+	wr32(E1000_FCTTV, hw->fc.pause_time);
+
+	ret_val = igb_set_fc_watermarks(hw);
+
+out:
+
+	return ret_val;
+}
+
+/**
+ *  igb_config_collision_dist - Configure collision distance
+ *  @hw: pointer to the HW structure
+ *
+ *  Configures the collision distance to the default value and is used
+ *  during link setup. Currently no func pointer exists and all
+ *  implementations are handled in the generic version of this function.
+ **/
+void igb_config_collision_dist(struct e1000_hw *hw)
+{
+	u32 tctl;
+
+	tctl = rd32(E1000_TCTL);
+
+	tctl &= ~E1000_TCTL_COLD;
+	tctl |= E1000_COLLISION_DISTANCE << E1000_COLD_SHIFT;
+
+	wr32(E1000_TCTL, tctl);
+	wrfl();
+}
+
+/**
+ *  igb_set_fc_watermarks - Set flow control high/low watermarks
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets the flow control high/low threshold (watermark) registers.  If
+ *  flow control XON frame transmission is enabled, then set XON frame
+ *  tansmission as well.
+ **/
+static s32 igb_set_fc_watermarks(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u32 fcrtl = 0, fcrth = 0;
+
+	/* Set the flow control receive threshold registers.  Normally,
+	 * these registers will be set to a default threshold that may be
+	 * adjusted later by the driver's runtime code.  However, if the
+	 * ability to transmit pause frames is not enabled, then these
+	 * registers will be set to 0.
+	 */
+	if (hw->fc.current_mode & e1000_fc_tx_pause) {
+		/* We need to set up the Receive Threshold high and low water
+		 * marks as well as (optionally) enabling the transmission of
+		 * XON frames.
+		 */
+		fcrtl = hw->fc.low_water;
+		if (hw->fc.send_xon)
+			fcrtl |= E1000_FCRTL_XONE;
+
+		fcrth = hw->fc.high_water;
+	}
+	wr32(E1000_FCRTL, fcrtl);
+	wr32(E1000_FCRTH, fcrth);
+
+	return ret_val;
+}
+
+/**
+ *  igb_set_default_fc - Set flow control default values
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the EEPROM for the default values for flow control and store the
+ *  values.
+ **/
+static s32 igb_set_default_fc(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 lan_offset;
+	u16 nvm_data;
+
+	/* Read and store word 0x0F of the EEPROM. This word contains bits
+	 * that determine the hardware's default PAUSE (flow control) mode,
+	 * a bit that determines whether the HW defaults to enabling or
+	 * disabling auto-negotiation, and the direction of the
+	 * SW defined pins. If there is no SW over-ride of the flow
+	 * control setting, then the variable hw->fc will
+	 * be initialized based on a value in the EEPROM.
+	 */
+	if (hw->mac.type == e1000_i350) {
+		lan_offset = NVM_82580_LAN_FUNC_OFFSET(hw->bus.func);
+		ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG
+					   + lan_offset, 1, &nvm_data);
+	 } else {
+		ret_val = hw->nvm.ops.read(hw, NVM_INIT_CONTROL2_REG,
+					   1, &nvm_data);
+	 }
+
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if ((nvm_data & NVM_WORD0F_PAUSE_MASK) == 0)
+		hw->fc.requested_mode = e1000_fc_none;
+	else if ((nvm_data & NVM_WORD0F_PAUSE_MASK) ==
+		 NVM_WORD0F_ASM_DIR)
+		hw->fc.requested_mode = e1000_fc_tx_pause;
+	else
+		hw->fc.requested_mode = e1000_fc_full;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_force_mac_fc - Force the MAC's flow control settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Force the MAC's flow control settings.  Sets the TFCE and RFCE bits in the
+ *  device control register to reflect the adapter settings.  TFCE and RFCE
+ *  need to be explicitly set by software when a copper PHY is used because
+ *  autonegotiation is managed by the PHY rather than the MAC.  Software must
+ *  also configure these bits when link is forced on a fiber connection.
+ **/
+s32 igb_force_mac_fc(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 ret_val = 0;
+
+	ctrl = rd32(E1000_CTRL);
+
+	/* Because we didn't get link via the internal auto-negotiation
+	 * mechanism (we either forced link or we got link via PHY
+	 * auto-neg), we have to manually enable/disable transmit an
+	 * receive flow control.
+	 *
+	 * The "Case" statement below enables/disable flow control
+	 * according to the "hw->fc.current_mode" parameter.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause
+	 *          frames but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          frames but we do not receive pause frames).
+	 *      3:  Both Rx and TX flow control (symmetric) is enabled.
+	 *  other:  No other values should be possible at this point.
+	 */
+	hw_dbg("hw->fc.current_mode = %u\n", hw->fc.current_mode);
+
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		ctrl &= (~(E1000_CTRL_TFCE | E1000_CTRL_RFCE));
+		break;
+	case e1000_fc_rx_pause:
+		ctrl &= (~E1000_CTRL_TFCE);
+		ctrl |= E1000_CTRL_RFCE;
+		break;
+	case e1000_fc_tx_pause:
+		ctrl &= (~E1000_CTRL_RFCE);
+		ctrl |= E1000_CTRL_TFCE;
+		break;
+	case e1000_fc_full:
+		ctrl |= (E1000_CTRL_TFCE | E1000_CTRL_RFCE);
+		break;
+	default:
+		hw_dbg("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	wr32(E1000_CTRL, ctrl);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_config_fc_after_link_up - Configures flow control after link
+ *  @hw: pointer to the HW structure
+ *
+ *  Checks the status of auto-negotiation after link up to ensure that the
+ *  speed and duplex were not forced.  If the link needed to be forced, then
+ *  flow control needs to be forced also.  If auto-negotiation is enabled
+ *  and did not fail, then we configure flow control based on our link
+ *  partner.
+ **/
+s32 igb_config_fc_after_link_up(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val = 0;
+	u32 pcs_status_reg, pcs_adv_reg, pcs_lp_ability_reg, pcs_ctrl_reg;
+	u16 mii_status_reg, mii_nway_adv_reg, mii_nway_lp_ability_reg;
+	u16 speed, duplex;
+
+	/* Check for the case where we have fiber media and auto-neg failed
+	 * so we had to force link.  In this case, we need to force the
+	 * configuration of the MAC to match the "fc" parameter.
+	 */
+	if (mac->autoneg_failed) {
+		if (hw->phy.media_type == e1000_media_type_internal_serdes)
+			ret_val = igb_force_mac_fc(hw);
+	} else {
+		if (hw->phy.media_type == e1000_media_type_copper)
+			ret_val = igb_force_mac_fc(hw);
+	}
+
+	if (ret_val) {
+		hw_dbg("Error forcing flow control settings\n");
+		goto out;
+	}
+
+	/* Check for the case where we have copper media and auto-neg is
+	 * enabled.  In this case, we need to check and see if Auto-Neg
+	 * has completed, and if so, how the PHY and link partner has
+	 * flow control configured.
+	 */
+	if ((hw->phy.media_type == e1000_media_type_copper) && mac->autoneg) {
+		/* Read the MII Status Register and check to see if AutoNeg
+		 * has completed.  We read this twice because this reg has
+		 * some "sticky" (latched) bits.
+		 */
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+						   &mii_status_reg);
+		if (ret_val)
+			goto out;
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS,
+						   &mii_status_reg);
+		if (ret_val)
+			goto out;
+
+		if (!(mii_status_reg & MII_SR_AUTONEG_COMPLETE)) {
+			hw_dbg("Copper PHY and Auto Neg has not completed.\n");
+			goto out;
+		}
+
+		/* The AutoNeg process has completed, so we now need to
+		 * read both the Auto Negotiation Advertisement
+		 * Register (Address 4) and the Auto_Negotiation Base
+		 * Page Ability Register (Address 5) to determine how
+		 * flow control was negotiated.
+		 */
+		ret_val = hw->phy.ops.read_reg(hw, PHY_AUTONEG_ADV,
+					    &mii_nway_adv_reg);
+		if (ret_val)
+			goto out;
+		ret_val = hw->phy.ops.read_reg(hw, PHY_LP_ABILITY,
+					    &mii_nway_lp_ability_reg);
+		if (ret_val)
+			goto out;
+
+		/* Two bits in the Auto Negotiation Advertisement Register
+		 * (Address 4) and two bits in the Auto Negotiation Base
+		 * Page Ability Register (Address 5) determine flow control
+		 * for both the PHY and the link partner.  The following
+		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+		 * 1999, describes these PAUSE resolution bits and how flow
+		 * control is determined based upon these settings.
+		 * NOTE:  DC = Don't Care
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
+		 *   0   |    1    |   0   |   DC    | e1000_fc_none
+		 *   0   |    1    |   1   |    0    | e1000_fc_none
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *   1   |    0    |   0   |   DC    | e1000_fc_none
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *   1   |    1    |   0   |    0    | e1000_fc_none
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 * Are both PAUSE bits set to 1?  If so, this implies
+		 * Symmetric Flow Control is enabled at both ends.  The
+		 * ASM_DIR bits are irrelevant per the spec.
+		 *
+		 * For Symmetric Flow Control:
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |   DC    |   1   |   DC    | E1000_fc_full
+		 *
+		 */
+		if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+		    (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE)) {
+			/* Now we need to check if the user selected RX ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise RX
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF  the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.requested_mode == e1000_fc_full) {
+				hw->fc.current_mode = e1000_fc_full;
+				hw_dbg("Flow Control = FULL.\n");
+			} else {
+				hw->fc.current_mode = e1000_fc_rx_pause;
+				hw_dbg("Flow Control = RX PAUSE frames only.\n");
+			}
+		}
+		/* For receiving PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 */
+		else if (!(mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+			  (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+			  (mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+			  (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_tx_pause;
+			hw_dbg("Flow Control = TX PAUSE frames only.\n");
+		}
+		/* For transmitting PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 */
+		else if ((mii_nway_adv_reg & NWAY_AR_PAUSE) &&
+			 (mii_nway_adv_reg & NWAY_AR_ASM_DIR) &&
+			 !(mii_nway_lp_ability_reg & NWAY_LPAR_PAUSE) &&
+			 (mii_nway_lp_ability_reg & NWAY_LPAR_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_rx_pause;
+			hw_dbg("Flow Control = RX PAUSE frames only.\n");
+		}
+		/* Per the IEEE spec, at this point flow control should be
+		 * disabled.  However, we want to consider that we could
+		 * be connected to a legacy switch that doesn't advertise
+		 * desired flow control, but can be forced on the link
+		 * partner.  So if we advertised no flow control, that is
+		 * what we will resolve to.  If we advertised some kind of
+		 * receive capability (Rx Pause Only or Full Flow Control)
+		 * and the link partner advertised none, we will configure
+		 * ourselves to enable Rx Flow Control only.  We can do
+		 * this safely for two reasons:  If the link partner really
+		 * didn't want flow control enabled, and we enable Rx, no
+		 * harm done since we won't be receiving any PAUSE frames
+		 * anyway.  If the intent on the link partner was to have
+		 * flow control enabled, then by us enabling RX only, we
+		 * can at least receive pause frames and process them.
+		 * This is a good idea because in most cases, since we are
+		 * predominantly a server NIC, more times than not we will
+		 * be asked to delay transmission of packets than asking
+		 * our link partner to pause transmission of frames.
+		 */
+		else if ((hw->fc.requested_mode == e1000_fc_none) ||
+			 (hw->fc.requested_mode == e1000_fc_tx_pause) ||
+			 (hw->fc.strict_ieee)) {
+			hw->fc.current_mode = e1000_fc_none;
+			hw_dbg("Flow Control = NONE.\n");
+		} else {
+			hw->fc.current_mode = e1000_fc_rx_pause;
+			hw_dbg("Flow Control = RX PAUSE frames only.\n");
+		}
+
+		/* Now we need to do one last check...  If we auto-
+		 * negotiated to HALF DUPLEX, flow control should not be
+		 * enabled per IEEE 802.3 spec.
+		 */
+		ret_val = hw->mac.ops.get_speed_and_duplex(hw, &speed, &duplex);
+		if (ret_val) {
+			hw_dbg("Error getting link speed and duplex\n");
+			goto out;
+		}
+
+		if (duplex == HALF_DUPLEX)
+			hw->fc.current_mode = e1000_fc_none;
+
+		/* Now we call a subroutine to actually force the MAC
+		 * controller to use the correct flow control settings.
+		 */
+		ret_val = igb_force_mac_fc(hw);
+		if (ret_val) {
+			hw_dbg("Error forcing flow control settings\n");
+			goto out;
+		}
+	}
+	/* Check for the case where we have SerDes media and auto-neg is
+	 * enabled.  In this case, we need to check and see if Auto-Neg
+	 * has completed, and if so, how the PHY and link partner has
+	 * flow control configured.
+	 */
+	if ((hw->phy.media_type == e1000_media_type_internal_serdes)
+		&& mac->autoneg) {
+		/* Read the PCS_LSTS and check to see if AutoNeg
+		 * has completed.
+		 */
+		pcs_status_reg = rd32(E1000_PCS_LSTAT);
+
+		if (!(pcs_status_reg & E1000_PCS_LSTS_AN_COMPLETE)) {
+			hw_dbg("PCS Auto Neg has not completed.\n");
+			return ret_val;
+		}
+
+		/* The AutoNeg process has completed, so we now need to
+		 * read both the Auto Negotiation Advertisement
+		 * Register (PCS_ANADV) and the Auto_Negotiation Base
+		 * Page Ability Register (PCS_LPAB) to determine how
+		 * flow control was negotiated.
+		 */
+		pcs_adv_reg = rd32(E1000_PCS_ANADV);
+		pcs_lp_ability_reg = rd32(E1000_PCS_LPAB);
+
+		/* Two bits in the Auto Negotiation Advertisement Register
+		 * (PCS_ANADV) and two bits in the Auto Negotiation Base
+		 * Page Ability Register (PCS_LPAB) determine flow control
+		 * for both the PHY and the link partner.  The following
+		 * table, taken out of the IEEE 802.3ab/D6.0 dated March 25,
+		 * 1999, describes these PAUSE resolution bits and how flow
+		 * control is determined based upon these settings.
+		 * NOTE:  DC = Don't Care
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | NIC Resolution
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    0    |  DC   |   DC    | e1000_fc_none
+		 *   0   |    1    |   0   |   DC    | e1000_fc_none
+		 *   0   |    1    |   1   |    0    | e1000_fc_none
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 *   1   |    0    |   0   |   DC    | e1000_fc_none
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *   1   |    1    |   0   |    0    | e1000_fc_none
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 *
+		 * Are both PAUSE bits set to 1?  If so, this implies
+		 * Symmetric Flow Control is enabled at both ends.  The
+		 * ASM_DIR bits are irrelevant per the spec.
+		 *
+		 * For Symmetric Flow Control:
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |   DC    |   1   |   DC    | e1000_fc_full
+		 *
+		 */
+		if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+		    (pcs_lp_ability_reg & E1000_TXCW_PAUSE)) {
+			/* Now we need to check if the user selected Rx ONLY
+			 * of pause frames.  In this case, we had to advertise
+			 * FULL flow control because we could not advertise Rx
+			 * ONLY. Hence, we must now check to see if we need to
+			 * turn OFF the TRANSMISSION of PAUSE frames.
+			 */
+			if (hw->fc.requested_mode == e1000_fc_full) {
+				hw->fc.current_mode = e1000_fc_full;
+				hw_dbg("Flow Control = FULL.\n");
+			} else {
+				hw->fc.current_mode = e1000_fc_rx_pause;
+				hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+			}
+		}
+		/* For receiving PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   0   |    1    |   1   |    1    | e1000_fc_tx_pause
+		 */
+		else if (!(pcs_adv_reg & E1000_TXCW_PAUSE) &&
+			  (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+			  (pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+			  (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_tx_pause;
+			hw_dbg("Flow Control = Tx PAUSE frames only.\n");
+		}
+		/* For transmitting PAUSE frames ONLY.
+		 *
+		 *   LOCAL DEVICE  |   LINK PARTNER
+		 * PAUSE | ASM_DIR | PAUSE | ASM_DIR | Result
+		 *-------|---------|-------|---------|--------------------
+		 *   1   |    1    |   0   |    1    | e1000_fc_rx_pause
+		 */
+		else if ((pcs_adv_reg & E1000_TXCW_PAUSE) &&
+			 (pcs_adv_reg & E1000_TXCW_ASM_DIR) &&
+			 !(pcs_lp_ability_reg & E1000_TXCW_PAUSE) &&
+			 (pcs_lp_ability_reg & E1000_TXCW_ASM_DIR)) {
+			hw->fc.current_mode = e1000_fc_rx_pause;
+			hw_dbg("Flow Control = Rx PAUSE frames only.\n");
+		} else {
+			/* Per the IEEE spec, at this point flow control
+			 * should be disabled.
+			 */
+			hw->fc.current_mode = e1000_fc_none;
+			hw_dbg("Flow Control = NONE.\n");
+		}
+
+		/* Now we call a subroutine to actually force the MAC
+		 * controller to use the correct flow control settings.
+		 */
+		pcs_ctrl_reg = rd32(E1000_PCS_LCTL);
+		pcs_ctrl_reg |= E1000_PCS_LCTL_FORCE_FCTRL;
+		wr32(E1000_PCS_LCTL, pcs_ctrl_reg);
+
+		ret_val = igb_force_mac_fc(hw);
+		if (ret_val) {
+			hw_dbg("Error forcing flow control settings\n");
+			return ret_val;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_speed_and_duplex_copper - Retrieve current speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @speed: stores the current speed
+ *  @duplex: stores the current duplex
+ *
+ *  Read the status register for the current speed/duplex and store the current
+ *  speed and duplex for copper connections.
+ **/
+s32 igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+				      u16 *duplex)
+{
+	u32 status;
+
+	status = rd32(E1000_STATUS);
+	if (status & E1000_STATUS_SPEED_1000) {
+		*speed = SPEED_1000;
+		hw_dbg("1000 Mbs, ");
+	} else if (status & E1000_STATUS_SPEED_100) {
+		*speed = SPEED_100;
+		hw_dbg("100 Mbs, ");
+	} else {
+		*speed = SPEED_10;
+		hw_dbg("10 Mbs, ");
+	}
+
+	if (status & E1000_STATUS_FD) {
+		*duplex = FULL_DUPLEX;
+		hw_dbg("Full Duplex\n");
+	} else {
+		*duplex = HALF_DUPLEX;
+		hw_dbg("Half Duplex\n");
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_get_hw_semaphore - Acquire hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Acquire the HW semaphore to access the PHY or NVM
+ **/
+s32 igb_get_hw_semaphore(struct e1000_hw *hw)
+{
+	u32 swsm;
+	s32 ret_val = 0;
+	s32 timeout = hw->nvm.word_size + 1;
+	s32 i = 0;
+
+	/* Get the SW semaphore */
+	while (i < timeout) {
+		swsm = rd32(E1000_SWSM);
+		if (!(swsm & E1000_SWSM_SMBI))
+			break;
+
+		udelay(50);
+		i++;
+	}
+
+	if (i == timeout) {
+		hw_dbg("Driver can't access device - SMBI bit is set.\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	/* Get the FW semaphore. */
+	for (i = 0; i < timeout; i++) {
+		swsm = rd32(E1000_SWSM);
+		wr32(E1000_SWSM, swsm | E1000_SWSM_SWESMBI);
+
+		/* Semaphore acquired if bit latched */
+		if (rd32(E1000_SWSM) & E1000_SWSM_SWESMBI)
+			break;
+
+		udelay(50);
+	}
+
+	if (i == timeout) {
+		/* Release semaphores */
+		igb_put_hw_semaphore(hw);
+		hw_dbg("Driver can't access the NVM\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_put_hw_semaphore - Release hardware semaphore
+ *  @hw: pointer to the HW structure
+ *
+ *  Release hardware semaphore used to access the PHY or NVM
+ **/
+void igb_put_hw_semaphore(struct e1000_hw *hw)
+{
+	u32 swsm;
+
+	swsm = rd32(E1000_SWSM);
+
+	swsm &= ~(E1000_SWSM_SMBI | E1000_SWSM_SWESMBI);
+
+	wr32(E1000_SWSM, swsm);
+}
+
+/**
+ *  igb_get_auto_rd_done - Check for auto read completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Check EEPROM for Auto Read done bit.
+ **/
+s32 igb_get_auto_rd_done(struct e1000_hw *hw)
+{
+	s32 i = 0;
+	s32 ret_val = 0;
+
+
+	while (i < AUTO_READ_DONE_TIMEOUT) {
+		if (rd32(E1000_EECD) & E1000_EECD_AUTO_RD)
+			break;
+		usleep_range(1000, 2000);
+		i++;
+	}
+
+	if (i == AUTO_READ_DONE_TIMEOUT) {
+		hw_dbg("Auto read by HW from NVM has not completed.\n");
+		ret_val = -E1000_ERR_RESET;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_valid_led_default - Verify a valid default LED config
+ *  @hw: pointer to the HW structure
+ *  @data: pointer to the NVM (EEPROM)
+ *
+ *  Read the EEPROM for the current default LED configuration.  If the
+ *  LED configuration is not valid, set to a valid LED configuration.
+ **/
+static s32 igb_valid_led_default(struct e1000_hw *hw, u16 *data)
+{
+	s32 ret_val;
+
+	ret_val = hw->nvm.ops.read(hw, NVM_ID_LED_SETTINGS, 1, data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (*data == ID_LED_RESERVED_0000 || *data == ID_LED_RESERVED_FFFF) {
+		switch (hw->phy.media_type) {
+		case e1000_media_type_internal_serdes:
+			*data = ID_LED_DEFAULT_82575_SERDES;
+			break;
+		case e1000_media_type_copper:
+		default:
+			*data = ID_LED_DEFAULT;
+			break;
+		}
+	}
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_id_led_init -
+ *  @hw: pointer to the HW structure
+ *
+ **/
+s32 igb_id_led_init(struct e1000_hw *hw)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	s32 ret_val;
+	const u32 ledctl_mask = 0x000000FF;
+	const u32 ledctl_on = E1000_LEDCTL_MODE_LED_ON;
+	const u32 ledctl_off = E1000_LEDCTL_MODE_LED_OFF;
+	u16 data, i, temp;
+	const u16 led_mask = 0x0F;
+
+	/* i210 and i211 devices have different LED mechanism */
+	if ((hw->mac.type == e1000_i210) ||
+	    (hw->mac.type == e1000_i211))
+		ret_val = igb_valid_led_default_i210(hw, &data);
+	else
+		ret_val = igb_valid_led_default(hw, &data);
+
+	if (ret_val)
+		goto out;
+
+	mac->ledctl_default = rd32(E1000_LEDCTL);
+	mac->ledctl_mode1 = mac->ledctl_default;
+	mac->ledctl_mode2 = mac->ledctl_default;
+
+	for (i = 0; i < 4; i++) {
+		temp = (data >> (i << 2)) & led_mask;
+		switch (temp) {
+		case ID_LED_ON1_DEF2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_ON1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_OFF1_DEF2:
+		case ID_LED_OFF1_ON2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode1 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode1 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+		switch (temp) {
+		case ID_LED_DEF1_ON2:
+		case ID_LED_ON1_ON2:
+		case ID_LED_OFF1_ON2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_on << (i << 3);
+			break;
+		case ID_LED_DEF1_OFF2:
+		case ID_LED_ON1_OFF2:
+		case ID_LED_OFF1_OFF2:
+			mac->ledctl_mode2 &= ~(ledctl_mask << (i << 3));
+			mac->ledctl_mode2 |= ledctl_off << (i << 3);
+			break;
+		default:
+			/* Do nothing */
+			break;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_cleanup_led - Set LED config to default operation
+ *  @hw: pointer to the HW structure
+ *
+ *  Remove the current LED configuration and set the LED configuration
+ *  to the default value, saved from the EEPROM.
+ **/
+s32 igb_cleanup_led(struct e1000_hw *hw)
+{
+	wr32(E1000_LEDCTL, hw->mac.ledctl_default);
+	return 0;
+}
+
+/**
+ *  igb_blink_led - Blink LED
+ *  @hw: pointer to the HW structure
+ *
+ *  Blink the led's which are set to be on.
+ **/
+s32 igb_blink_led(struct e1000_hw *hw)
+{
+	u32 ledctl_blink = 0;
+	u32 i;
+
+	if (hw->phy.media_type == e1000_media_type_fiber) {
+		/* always blink LED0 for PCI-E fiber */
+		ledctl_blink = E1000_LEDCTL_LED0_BLINK |
+		     (E1000_LEDCTL_MODE_LED_ON << E1000_LEDCTL_LED0_MODE_SHIFT);
+	} else {
+		/* Set the blink bit for each LED that's "on" (0x0E)
+		 * (or "off" if inverted) in ledctl_mode2.  The blink
+		 * logic in hardware only works when mode is set to "on"
+		 * so it must be changed accordingly when the mode is
+		 * "off" and inverted.
+		 */
+		ledctl_blink = hw->mac.ledctl_mode2;
+		for (i = 0; i < 32; i += 8) {
+			u32 mode = (hw->mac.ledctl_mode2 >> i) &
+			    E1000_LEDCTL_LED0_MODE_MASK;
+			u32 led_default = hw->mac.ledctl_default >> i;
+
+			if ((!(led_default & E1000_LEDCTL_LED0_IVRT) &&
+			     (mode == E1000_LEDCTL_MODE_LED_ON)) ||
+			    ((led_default & E1000_LEDCTL_LED0_IVRT) &&
+			     (mode == E1000_LEDCTL_MODE_LED_OFF))) {
+				ledctl_blink &=
+				    ~(E1000_LEDCTL_LED0_MODE_MASK << i);
+				ledctl_blink |= (E1000_LEDCTL_LED0_BLINK |
+						 E1000_LEDCTL_MODE_LED_ON) << i;
+			}
+		}
+	}
+
+	wr32(E1000_LEDCTL, ledctl_blink);
+
+	return 0;
+}
+
+/**
+ *  igb_led_off - Turn LED off
+ *  @hw: pointer to the HW structure
+ *
+ *  Turn LED off.
+ **/
+s32 igb_led_off(struct e1000_hw *hw)
+{
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		wr32(E1000_LEDCTL, hw->mac.ledctl_mode1);
+		break;
+	default:
+		break;
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_disable_pcie_master - Disables PCI-express master access
+ *  @hw: pointer to the HW structure
+ *
+ *  Returns 0 (0) if successful, else returns -10
+ *  (-E1000_ERR_MASTER_REQUESTS_PENDING) if master disable bit has not caused
+ *  the master requests to be disabled.
+ *
+ *  Disables PCI-Express master access and verifies there are no pending
+ *  requests.
+ **/
+s32 igb_disable_pcie_master(struct e1000_hw *hw)
+{
+	u32 ctrl;
+	s32 timeout = MASTER_DISABLE_TIMEOUT;
+	s32 ret_val = 0;
+
+	if (hw->bus.type != e1000_bus_type_pci_express)
+		goto out;
+
+	ctrl = rd32(E1000_CTRL);
+	ctrl |= E1000_CTRL_GIO_MASTER_DISABLE;
+	wr32(E1000_CTRL, ctrl);
+
+	while (timeout) {
+		if (!(rd32(E1000_STATUS) &
+		      E1000_STATUS_GIO_MASTER_ENABLE))
+			break;
+		udelay(100);
+		timeout--;
+	}
+
+	if (!timeout) {
+		hw_dbg("Master requests are pending.\n");
+		ret_val = -E1000_ERR_MASTER_REQUESTS_PENDING;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_validate_mdi_setting - Verify MDI/MDIx settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify that when not using auto-negotitation that MDI/MDIx is correctly
+ *  set, which is forced to MDI mode only.
+ **/
+s32 igb_validate_mdi_setting(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	/* All MDI settings are supported on 82580 and newer. */
+	if (hw->mac.type >= e1000_82580)
+		goto out;
+
+	if (!hw->mac.autoneg && (hw->phy.mdix == 0 || hw->phy.mdix == 3)) {
+		hw_dbg("Invalid MDI setting detected\n");
+		hw->phy.mdix = 1;
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_8bit_ctrl_reg - Write a 8bit CTRL register
+ *  @hw: pointer to the HW structure
+ *  @reg: 32bit register offset such as E1000_SCTL
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes an address/data control type register.  There are several of these
+ *  and they all have the format address << 8 | data and bit 31 is polled for
+ *  completion.
+ **/
+s32 igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+			      u32 offset, u8 data)
+{
+	u32 i, regvalue = 0;
+	s32 ret_val = 0;
+
+	/* Set up the address and data */
+	regvalue = ((u32)data) | (offset << E1000_GEN_CTL_ADDRESS_SHIFT);
+	wr32(reg, regvalue);
+
+	/* Poll the ready bit to see if the MDI read completed */
+	for (i = 0; i < E1000_GEN_POLL_TIMEOUT; i++) {
+		udelay(5);
+		regvalue = rd32(reg);
+		if (regvalue & E1000_GEN_CTL_READY)
+			break;
+	}
+	if (!(regvalue & E1000_GEN_CTL_READY)) {
+		hw_dbg("Reg %08x did not indicate ready\n", reg);
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_enable_mng_pass_thru - Enable processing of ARP's
+ *  @hw: pointer to the HW structure
+ *
+ *  Verifies the hardware needs to leave interface enabled so that frames can
+ *  be directed to and from the management interface.
+ **/
+bool igb_enable_mng_pass_thru(struct e1000_hw *hw)
+{
+	u32 manc;
+	u32 fwsm, factps;
+	bool ret_val = false;
+
+	if (!hw->mac.asf_firmware_present)
+		goto out;
+
+	manc = rd32(E1000_MANC);
+
+	if (!(manc & E1000_MANC_RCV_TCO_EN))
+		goto out;
+
+	if (hw->mac.arc_subsystem_valid) {
+		fwsm = rd32(E1000_FWSM);
+		factps = rd32(E1000_FACTPS);
+
+		if (!(factps & E1000_FACTPS_MNGCG) &&
+		    ((fwsm & E1000_FWSM_MODE_MASK) ==
+		     (e1000_mng_mode_pt << E1000_FWSM_MODE_SHIFT))) {
+			ret_val = true;
+			goto out;
+		}
+	} else {
+		if ((manc & E1000_MANC_SMBUS_EN) &&
+		    !(manc & E1000_MANC_ASF_EN)) {
+			ret_val = true;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h
new file mode 100644
index 0000000..b50d57c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mac.h
@@ -0,0 +1,88 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_MAC_H_
+#define _E1000_MAC_H_
+
+#include "e1000_hw.h"
+
+#include "e1000_phy.h"
+#include "e1000_nvm.h"
+#include "e1000_defines.h"
+#include "e1000_i210.h"
+
+/* Functions that should not be called directly from drivers but can be used
+ * by other files in this 'shared code'
+ */
+s32  igb_blink_led(struct e1000_hw *hw);
+s32  igb_check_for_copper_link(struct e1000_hw *hw);
+s32  igb_cleanup_led(struct e1000_hw *hw);
+s32  igb_config_fc_after_link_up(struct e1000_hw *hw);
+s32  igb_disable_pcie_master(struct e1000_hw *hw);
+s32  igb_force_mac_fc(struct e1000_hw *hw);
+s32  igb_get_auto_rd_done(struct e1000_hw *hw);
+s32  igb_get_bus_info_pcie(struct e1000_hw *hw);
+s32  igb_get_hw_semaphore(struct e1000_hw *hw);
+s32  igb_get_speed_and_duplex_copper(struct e1000_hw *hw, u16 *speed,
+				     u16 *duplex);
+s32  igb_id_led_init(struct e1000_hw *hw);
+s32  igb_led_off(struct e1000_hw *hw);
+void igb_update_mc_addr_list(struct e1000_hw *hw,
+			     u8 *mc_addr_list, u32 mc_addr_count);
+s32  igb_setup_link(struct e1000_hw *hw);
+s32  igb_validate_mdi_setting(struct e1000_hw *hw);
+s32  igb_write_8bit_ctrl_reg(struct e1000_hw *hw, u32 reg,
+			     u32 offset, u8 data);
+
+void igb_clear_hw_cntrs_base(struct e1000_hw *hw);
+void igb_clear_vfta(struct e1000_hw *hw);
+void igb_clear_vfta_i350(struct e1000_hw *hw);
+s32  igb_vfta_set(struct e1000_hw *hw, u32 vid, bool add);
+void igb_config_collision_dist(struct e1000_hw *hw);
+void igb_init_rx_addrs(struct e1000_hw *hw, u16 rar_count);
+void igb_mta_set(struct e1000_hw *hw, u32 hash_value);
+void igb_put_hw_semaphore(struct e1000_hw *hw);
+void igb_rar_set(struct e1000_hw *hw, u8 *addr, u32 index);
+s32  igb_check_alt_mac_addr(struct e1000_hw *hw);
+
+bool igb_enable_mng_pass_thru(struct e1000_hw *hw);
+
+enum e1000_mng_mode {
+	e1000_mng_mode_none = 0,
+	e1000_mng_mode_asf,
+	e1000_mng_mode_pt,
+	e1000_mng_mode_ipmi,
+	e1000_mng_mode_host_if_only
+};
+
+#define E1000_FACTPS_MNGCG	0x20000000
+
+#define E1000_FWSM_MODE_MASK	0xE
+#define E1000_FWSM_MODE_SHIFT	1
+
+#define E1000_MNG_DHCP_COOKIE_STATUS_VLAN	0x2
+
+void e1000_init_function_pointers_82575(struct e1000_hw *hw);
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c
new file mode 100644
index 0000000..162cc49
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.c
@@ -0,0 +1,443 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include "e1000_mbx.h"
+
+/**
+ *  igb_read_mbx - Reads a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to read
+ *
+ *  returns SUCCESS if it successfully read message from buffer
+ **/
+s32 igb_read_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	/* limit read to size of mailbox */
+	if (size > mbx->size)
+		size = mbx->size;
+
+	if (mbx->ops.read)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_write_mbx - Write a message to the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+s32 igb_write_mbx(struct e1000_hw *hw, u32 *msg, u16 size, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = 0;
+
+	if (size > mbx->size)
+		ret_val = -E1000_ERR_MBX;
+
+	else if (mbx->ops.write)
+		ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_msg - checks to see if someone sent us mail
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (mbx->ops.check_for_msg)
+		ret_val = mbx->ops.check_for_msg(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_ack - checks to see if someone sent us ACK
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (mbx->ops.check_for_ack)
+		ret_val = mbx->ops.check_for_ack(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_rst - checks to see if other side has reset
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to check
+ *
+ *  returns SUCCESS if the Status bit was found or else ERR_MBX
+ **/
+s32 igb_check_for_rst(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (mbx->ops.check_for_rst)
+		ret_val = mbx->ops.check_for_rst(hw, mbx_id);
+
+	return ret_val;
+}
+
+/**
+ *  igb_poll_for_msg - Wait for message notification
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification
+ **/
+static s32 igb_poll_for_msg(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_msg)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_msg(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		udelay(mbx->usec_delay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
+out:
+	return countdown ? 0 : -E1000_ERR_MBX;
+}
+
+/**
+ *  igb_poll_for_ack - Wait for message acknowledgement
+ *  @hw: pointer to the HW structure
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message acknowledgement
+ **/
+static s32 igb_poll_for_ack(struct e1000_hw *hw, u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	int countdown = mbx->timeout;
+
+	if (!countdown || !mbx->ops.check_for_ack)
+		goto out;
+
+	while (countdown && mbx->ops.check_for_ack(hw, mbx_id)) {
+		countdown--;
+		if (!countdown)
+			break;
+		udelay(mbx->usec_delay);
+	}
+
+	/* if we failed, all future posted messages fail until reset */
+	if (!countdown)
+		mbx->timeout = 0;
+out:
+	return countdown ? 0 : -E1000_ERR_MBX;
+}
+
+/**
+ *  igb_read_posted_mbx - Wait for message notification and receive message
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully received a message notification and
+ *  copied it into the receive buffer.
+ **/
+static s32 igb_read_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
+			       u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (!mbx->ops.read)
+		goto out;
+
+	ret_val = igb_poll_for_msg(hw, mbx_id);
+
+	if (!ret_val)
+		ret_val = mbx->ops.read(hw, msg, size, mbx_id);
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_posted_mbx - Write a message to the mailbox, wait for ack
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @mbx_id: id of mailbox to write
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer and
+ *  received an ack to that message within delay * timeout period
+ **/
+static s32 igb_write_posted_mbx(struct e1000_hw *hw, u32 *msg, u16 size,
+				u16 mbx_id)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+	s32 ret_val = -E1000_ERR_MBX;
+
+	/* exit if either we can't write or there isn't a defined timeout */
+	if (!mbx->ops.write || !mbx->timeout)
+		goto out;
+
+	/* send msg */
+	ret_val = mbx->ops.write(hw, msg, size, mbx_id);
+
+	/* if msg sent wait until we receive an ack */
+	if (!ret_val)
+		ret_val = igb_poll_for_ack(hw, mbx_id);
+out:
+	return ret_val;
+}
+
+static s32 igb_check_for_bit_pf(struct e1000_hw *hw, u32 mask)
+{
+	u32 mbvficr = rd32(E1000_MBVFICR);
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (mbvficr & mask) {
+		ret_val = 0;
+		wr32(E1000_MBVFICR, mask);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_msg_pf - checks to see if the VF has sent mail
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_msg_pf(struct e1000_hw *hw, u16 vf_number)
+{
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFREQ_VF1 << vf_number)) {
+		ret_val = 0;
+		hw->mbx.stats.reqs++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_ack_pf - checks to see if the VF has ACKed
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_ack_pf(struct e1000_hw *hw, u16 vf_number)
+{
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (!igb_check_for_bit_pf(hw, E1000_MBVFICR_VFACK_VF1 << vf_number)) {
+		ret_val = 0;
+		hw->mbx.stats.acks++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_for_rst_pf - checks to see if the VF has reset
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if the VF has set the Status bit or else ERR_MBX
+ **/
+static s32 igb_check_for_rst_pf(struct e1000_hw *hw, u16 vf_number)
+{
+	u32 vflre = rd32(E1000_VFLRE);
+	s32 ret_val = -E1000_ERR_MBX;
+
+	if (vflre & (1 << vf_number)) {
+		ret_val = 0;
+		wr32(E1000_VFLRE, (1 << vf_number));
+		hw->mbx.stats.rsts++;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_obtain_mbx_lock_pf - obtain mailbox lock
+ *  @hw: pointer to the HW structure
+ *  @vf_number: the VF index
+ *
+ *  return SUCCESS if we obtained the mailbox lock
+ **/
+static s32 igb_obtain_mbx_lock_pf(struct e1000_hw *hw, u16 vf_number)
+{
+	s32 ret_val = -E1000_ERR_MBX;
+	u32 p2v_mailbox;
+
+	/* Take ownership of the buffer */
+	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_PFU);
+
+	/* reserve mailbox for vf use */
+	p2v_mailbox = rd32(E1000_P2VMAILBOX(vf_number));
+	if (p2v_mailbox & E1000_P2VMAILBOX_PFU)
+		ret_val = 0;
+
+	return ret_val;
+}
+
+/**
+ *  igb_write_mbx_pf - Places a message in the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  returns SUCCESS if it successfully copied message into the buffer
+ **/
+static s32 igb_write_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+			    u16 vf_number)
+{
+	s32 ret_val;
+	u16 i;
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
+		goto out_no_write;
+
+	/* flush msg and acks as we are overwriting the message buffer */
+	igb_check_for_msg_pf(hw, vf_number);
+	igb_check_for_ack_pf(hw, vf_number);
+
+	/* copy the caller specified message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		array_wr32(E1000_VMBMEM(vf_number), i, msg[i]);
+
+	/* Interrupt VF to tell it a message has been sent and release buffer*/
+	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_STS);
+
+	/* update stats */
+	hw->mbx.stats.msgs_tx++;
+
+out_no_write:
+	return ret_val;
+
+}
+
+/**
+ *  igb_read_mbx_pf - Read a message from the mailbox
+ *  @hw: pointer to the HW structure
+ *  @msg: The message buffer
+ *  @size: Length of buffer
+ *  @vf_number: the VF index
+ *
+ *  This function copies a message from the mailbox buffer to the caller's
+ *  memory buffer.  The presumption is that the caller knows that there was
+ *  a message due to a VF request so no polling for message is needed.
+ **/
+static s32 igb_read_mbx_pf(struct e1000_hw *hw, u32 *msg, u16 size,
+			   u16 vf_number)
+{
+	s32 ret_val;
+	u16 i;
+
+	/* lock the mailbox to prevent pf/vf race condition */
+	ret_val = igb_obtain_mbx_lock_pf(hw, vf_number);
+	if (ret_val)
+		goto out_no_read;
+
+	/* copy the message to the mailbox memory buffer */
+	for (i = 0; i < size; i++)
+		msg[i] = array_rd32(E1000_VMBMEM(vf_number), i);
+
+	/* Acknowledge the message and release buffer */
+	wr32(E1000_P2VMAILBOX(vf_number), E1000_P2VMAILBOX_ACK);
+
+	/* update stats */
+	hw->mbx.stats.msgs_rx++;
+
+out_no_read:
+	return ret_val;
+}
+
+/**
+ *  e1000_init_mbx_params_pf - set initial values for pf mailbox
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes the hw->mbx struct to correct values for pf mailbox
+ */
+s32 igb_init_mbx_params_pf(struct e1000_hw *hw)
+{
+	struct e1000_mbx_info *mbx = &hw->mbx;
+
+	mbx->timeout = 0;
+	mbx->usec_delay = 0;
+
+	mbx->size = E1000_VFMAILBOX_SIZE;
+
+	mbx->ops.read = igb_read_mbx_pf;
+	mbx->ops.write = igb_write_mbx_pf;
+	mbx->ops.read_posted = igb_read_posted_mbx;
+	mbx->ops.write_posted = igb_write_posted_mbx;
+	mbx->ops.check_for_msg = igb_check_for_msg_pf;
+	mbx->ops.check_for_ack = igb_check_for_ack_pf;
+	mbx->ops.check_for_rst = igb_check_for_rst_pf;
+
+	mbx->stats.msgs_tx = 0;
+	mbx->stats.msgs_rx = 0;
+	mbx->stats.reqs = 0;
+	mbx->stats.acks = 0;
+	mbx->stats.rsts = 0;
+
+	return 0;
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h
new file mode 100644
index 0000000..d20af6b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_mbx.h
@@ -0,0 +1,73 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_MBX_H_
+#define _E1000_MBX_H_
+
+#include "e1000_hw.h"
+
+#define E1000_P2VMAILBOX_STS	0x00000001 /* Initiate message send to VF */
+#define E1000_P2VMAILBOX_ACK	0x00000002 /* Ack message recv'd from VF */
+#define E1000_P2VMAILBOX_VFU	0x00000004 /* VF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_PFU	0x00000008 /* PF owns the mailbox buffer */
+#define E1000_P2VMAILBOX_RVFU	0x00000010 /* Reset VFU - used when VF stuck */
+
+#define E1000_MBVFICR_VFREQ_MASK	0x000000FF /* bits for VF messages */
+#define E1000_MBVFICR_VFREQ_VF1		0x00000001 /* bit for VF 1 message */
+#define E1000_MBVFICR_VFACK_MASK	0x00FF0000 /* bits for VF acks */
+#define E1000_MBVFICR_VFACK_VF1		0x00010000 /* bit for VF 1 ack */
+
+#define E1000_VFMAILBOX_SIZE	16 /* 16 32 bit words - 64 bytes */
+
+/* If it's a E1000_VF_* msg then it originates in the VF and is sent to the
+ * PF.  The reverse is true if it is E1000_PF_*.
+ * Message ACK's are the value or'd with 0xF0000000
+ */
+/* Messages below or'd with this are the ACK */
+#define E1000_VT_MSGTYPE_ACK	0x80000000
+/* Messages below or'd with this are the NACK */
+#define E1000_VT_MSGTYPE_NACK	0x40000000
+/* Indicates that VF is still clear to send requests */
+#define E1000_VT_MSGTYPE_CTS	0x20000000
+#define E1000_VT_MSGINFO_SHIFT	16
+/* bits 23:16 are used for exra info for certain messages */
+#define E1000_VT_MSGINFO_MASK	(0xFF << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_VF_RESET		0x01 /* VF requests reset */
+#define E1000_VF_SET_MAC_ADDR	0x02 /* VF requests to set MAC addr */
+#define E1000_VF_SET_MULTICAST	0x03 /* VF requests to set MC addr */
+#define E1000_VF_SET_VLAN	0x04 /* VF requests to set VLAN */
+#define E1000_VF_SET_LPE	0x05 /* VF requests to set VMOLR.LPE */
+#define E1000_VF_SET_PROMISC	0x06 /*VF requests to clear VMOLR.ROPE/MPME*/
+#define E1000_VF_SET_PROMISC_MULTICAST	(0x02 << E1000_VT_MSGINFO_SHIFT)
+
+#define E1000_PF_CONTROL_MSG	0x0100 /* PF control message */
+
+s32 igb_read_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_write_mbx(struct e1000_hw *, u32 *, u16, u16);
+s32 igb_check_for_msg(struct e1000_hw *, u16);
+s32 igb_check_for_ack(struct e1000_hw *, u16);
+s32 igb_check_for_rst(struct e1000_hw *, u16);
+s32 igb_init_mbx_params_pf(struct e1000_hw *);
+
+#endif /* _E1000_MBX_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c
new file mode 100644
index 0000000..a8d0207
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.c
@@ -0,0 +1,803 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+
+#include "e1000_mac.h"
+#include "e1000_nvm.h"
+
+/**
+ *  igb_raise_eec_clk - Raise EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Enable/Raise the EEPROM clock bit.
+ **/
+static void igb_raise_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd | E1000_EECD_SK;
+	wr32(E1000_EECD, *eecd);
+	wrfl();
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  igb_lower_eec_clk - Lower EEPROM clock
+ *  @hw: pointer to the HW structure
+ *  @eecd: pointer to the EEPROM
+ *
+ *  Clear/Lower the EEPROM clock bit.
+ **/
+static void igb_lower_eec_clk(struct e1000_hw *hw, u32 *eecd)
+{
+	*eecd = *eecd & ~E1000_EECD_SK;
+	wr32(E1000_EECD, *eecd);
+	wrfl();
+	udelay(hw->nvm.delay_usec);
+}
+
+/**
+ *  igb_shift_out_eec_bits - Shift data bits our to the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @data: data to send to the EEPROM
+ *  @count: number of bits to shift out
+ *
+ *  We need to shift 'count' bits out to the EEPROM.  So, the value in the
+ *  "data" parameter will be shifted out to the EEPROM one bit at a time.
+ *  In order to do this, "data" must be broken down into bits.
+ **/
+static void igb_shift_out_eec_bits(struct e1000_hw *hw, u16 data, u16 count)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = rd32(E1000_EECD);
+	u32 mask;
+
+	mask = 0x01 << (count - 1);
+	if (nvm->type == e1000_nvm_eeprom_spi)
+		eecd |= E1000_EECD_DO;
+
+	do {
+		eecd &= ~E1000_EECD_DI;
+
+		if (data & mask)
+			eecd |= E1000_EECD_DI;
+
+		wr32(E1000_EECD, eecd);
+		wrfl();
+
+		udelay(nvm->delay_usec);
+
+		igb_raise_eec_clk(hw, &eecd);
+		igb_lower_eec_clk(hw, &eecd);
+
+		mask >>= 1;
+	} while (mask);
+
+	eecd &= ~E1000_EECD_DI;
+	wr32(E1000_EECD, eecd);
+}
+
+/**
+ *  igb_shift_in_eec_bits - Shift data bits in from the EEPROM
+ *  @hw: pointer to the HW structure
+ *  @count: number of bits to shift in
+ *
+ *  In order to read a register from the EEPROM, we need to shift 'count' bits
+ *  in from the EEPROM.  Bits are "shifted in" by raising the clock input to
+ *  the EEPROM (setting the SK bit), and then reading the value of the data out
+ *  "DO" bit.  During this "shifting in" process the data in "DI" bit should
+ *  always be clear.
+ **/
+static u16 igb_shift_in_eec_bits(struct e1000_hw *hw, u16 count)
+{
+	u32 eecd;
+	u32 i;
+	u16 data;
+
+	eecd = rd32(E1000_EECD);
+
+	eecd &= ~(E1000_EECD_DO | E1000_EECD_DI);
+	data = 0;
+
+	for (i = 0; i < count; i++) {
+		data <<= 1;
+		igb_raise_eec_clk(hw, &eecd);
+
+		eecd = rd32(E1000_EECD);
+
+		eecd &= ~E1000_EECD_DI;
+		if (eecd & E1000_EECD_DO)
+			data |= 1;
+
+		igb_lower_eec_clk(hw, &eecd);
+	}
+
+	return data;
+}
+
+/**
+ *  igb_poll_eerd_eewr_done - Poll for EEPROM read/write completion
+ *  @hw: pointer to the HW structure
+ *  @ee_reg: EEPROM flag for polling
+ *
+ *  Polls the EEPROM status bit for either read or write completion based
+ *  upon the value of 'ee_reg'.
+ **/
+static s32 igb_poll_eerd_eewr_done(struct e1000_hw *hw, int ee_reg)
+{
+	u32 attempts = 100000;
+	u32 i, reg = 0;
+	s32 ret_val = -E1000_ERR_NVM;
+
+	for (i = 0; i < attempts; i++) {
+		if (ee_reg == E1000_NVM_POLL_READ)
+			reg = rd32(E1000_EERD);
+		else
+			reg = rd32(E1000_EEWR);
+
+		if (reg & E1000_NVM_RW_REG_DONE) {
+			ret_val = 0;
+			break;
+		}
+
+		udelay(5);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_acquire_nvm - Generic request for access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Set the EEPROM access request bit and wait for EEPROM access grant bit.
+ *  Return successful if access grant bit set, else clear the request for
+ *  EEPROM access and return -E1000_ERR_NVM (-1).
+ **/
+s32 igb_acquire_nvm(struct e1000_hw *hw)
+{
+	u32 eecd = rd32(E1000_EECD);
+	s32 timeout = E1000_NVM_GRANT_ATTEMPTS;
+	s32 ret_val = 0;
+
+
+	wr32(E1000_EECD, eecd | E1000_EECD_REQ);
+	eecd = rd32(E1000_EECD);
+
+	while (timeout) {
+		if (eecd & E1000_EECD_GNT)
+			break;
+		udelay(5);
+		eecd = rd32(E1000_EECD);
+		timeout--;
+	}
+
+	if (!timeout) {
+		eecd &= ~E1000_EECD_REQ;
+		wr32(E1000_EECD, eecd);
+		hw_dbg("Could not acquire NVM grant\n");
+		ret_val = -E1000_ERR_NVM;
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_standby_nvm - Return EEPROM to standby state
+ *  @hw: pointer to the HW structure
+ *
+ *  Return the EEPROM to a standby state.
+ **/
+static void igb_standby_nvm(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = rd32(E1000_EECD);
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Toggle CS to flush commands */
+		eecd |= E1000_EECD_CS;
+		wr32(E1000_EECD, eecd);
+		wrfl();
+		udelay(nvm->delay_usec);
+		eecd &= ~E1000_EECD_CS;
+		wr32(E1000_EECD, eecd);
+		wrfl();
+		udelay(nvm->delay_usec);
+	}
+}
+
+/**
+ *  e1000_stop_nvm - Terminate EEPROM command
+ *  @hw: pointer to the HW structure
+ *
+ *  Terminates the current command by inverting the EEPROM's chip select pin.
+ **/
+static void e1000_stop_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	eecd = rd32(E1000_EECD);
+	if (hw->nvm.type == e1000_nvm_eeprom_spi) {
+		/* Pull CS high */
+		eecd |= E1000_EECD_CS;
+		igb_lower_eec_clk(hw, &eecd);
+	}
+}
+
+/**
+ *  igb_release_nvm - Release exclusive access to EEPROM
+ *  @hw: pointer to the HW structure
+ *
+ *  Stop any current commands to the EEPROM and clear the EEPROM request bit.
+ **/
+void igb_release_nvm(struct e1000_hw *hw)
+{
+	u32 eecd;
+
+	e1000_stop_nvm(hw);
+
+	eecd = rd32(E1000_EECD);
+	eecd &= ~E1000_EECD_REQ;
+	wr32(E1000_EECD, eecd);
+}
+
+/**
+ *  igb_ready_nvm_eeprom - Prepares EEPROM for read/write
+ *  @hw: pointer to the HW structure
+ *
+ *  Setups the EEPROM for reading and writing.
+ **/
+static s32 igb_ready_nvm_eeprom(struct e1000_hw *hw)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 eecd = rd32(E1000_EECD);
+	s32 ret_val = 0;
+	u16 timeout = 0;
+	u8 spi_stat_reg;
+
+
+	if (nvm->type == e1000_nvm_eeprom_spi) {
+		/* Clear SK and CS */
+		eecd &= ~(E1000_EECD_CS | E1000_EECD_SK);
+		wr32(E1000_EECD, eecd);
+		wrfl();
+		udelay(1);
+		timeout = NVM_MAX_RETRY_SPI;
+
+		/* Read "Status Register" repeatedly until the LSB is cleared.
+		 * The EEPROM will signal that the command has been completed
+		 * by clearing bit 0 of the internal status register.  If it's
+		 * not cleared within 'timeout', then error out.
+		 */
+		while (timeout) {
+			igb_shift_out_eec_bits(hw, NVM_RDSR_OPCODE_SPI,
+					       hw->nvm.opcode_bits);
+			spi_stat_reg = (u8)igb_shift_in_eec_bits(hw, 8);
+			if (!(spi_stat_reg & NVM_STATUS_RDY_SPI))
+				break;
+
+			udelay(5);
+			igb_standby_nvm(hw);
+			timeout--;
+		}
+
+		if (!timeout) {
+			hw_dbg("SPI NVM Status error\n");
+			ret_val = -E1000_ERR_NVM;
+			goto out;
+		}
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_nvm_spi - Read EEPROM's using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM.
+ **/
+s32 igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i = 0;
+	s32 ret_val;
+	u16 word_in;
+	u8 read_opcode = NVM_READ_OPCODE_SPI;
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		hw_dbg("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	ret_val = nvm->ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = igb_ready_nvm_eeprom(hw);
+	if (ret_val)
+		goto release;
+
+	igb_standby_nvm(hw);
+
+	if ((nvm->address_bits == 8) && (offset >= 128))
+		read_opcode |= NVM_A8_OPCODE_SPI;
+
+	/* Send the READ command (opcode + addr) */
+	igb_shift_out_eec_bits(hw, read_opcode, nvm->opcode_bits);
+	igb_shift_out_eec_bits(hw, (u16)(offset*2), nvm->address_bits);
+
+	/* Read the data.  SPI NVMs increment the address with each byte
+	 * read and will roll over if reading beyond the end.  This allows
+	 * us to read the whole NVM from any offset
+	 */
+	for (i = 0; i < words; i++) {
+		word_in = igb_shift_in_eec_bits(hw, 16);
+		data[i] = (word_in >> 8) | (word_in << 8);
+	}
+
+release:
+	nvm->ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_nvm_eerd - Reads EEPROM using EERD register
+ *  @hw: pointer to the HW structure
+ *  @offset: offset of word in the EEPROM to read
+ *  @words: number of words to read
+ *  @data: word read from the EEPROM
+ *
+ *  Reads a 16 bit word from the EEPROM using the EERD register.
+ **/
+s32 igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	u32 i, eerd = 0;
+	s32 ret_val = 0;
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		hw_dbg("nvm parameter(s) out of bounds\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+	for (i = 0; i < words; i++) {
+		eerd = ((offset+i) << E1000_NVM_RW_ADDR_SHIFT) +
+			E1000_NVM_RW_REG_START;
+
+		wr32(E1000_EERD, eerd);
+		ret_val = igb_poll_eerd_eewr_done(hw, E1000_NVM_POLL_READ);
+		if (ret_val)
+			break;
+
+		data[i] = (rd32(E1000_EERD) >>
+			E1000_NVM_RW_REG_DATA);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_nvm_spi - Write to EEPROM using SPI
+ *  @hw: pointer to the HW structure
+ *  @offset: offset within the EEPROM to be written to
+ *  @words: number of words to write
+ *  @data: 16 bit word(s) to be written to the EEPROM
+ *
+ *  Writes data to EEPROM at offset using SPI interface.
+ *
+ *  If e1000_update_nvm_checksum is not called after this function , the
+ *  EEPROM will most likley contain an invalid checksum.
+ **/
+s32 igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data)
+{
+	struct e1000_nvm_info *nvm = &hw->nvm;
+	s32 ret_val = -E1000_ERR_NVM;
+	u16 widx = 0;
+
+	/* A check for invalid values:  offset too large, too many words,
+	 * and not enough words.
+	 */
+	if ((offset >= nvm->word_size) || (words > (nvm->word_size - offset)) ||
+	    (words == 0)) {
+		hw_dbg("nvm parameter(s) out of bounds\n");
+		return ret_val;
+	}
+
+	while (widx < words) {
+		u8 write_opcode = NVM_WRITE_OPCODE_SPI;
+
+		ret_val = nvm->ops.acquire(hw);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = igb_ready_nvm_eeprom(hw);
+		if (ret_val) {
+			nvm->ops.release(hw);
+			return ret_val;
+		}
+
+		igb_standby_nvm(hw);
+
+		/* Send the WRITE ENABLE command (8 bit opcode) */
+		igb_shift_out_eec_bits(hw, NVM_WREN_OPCODE_SPI,
+					 nvm->opcode_bits);
+
+		igb_standby_nvm(hw);
+
+		/* Some SPI eeproms use the 8th address bit embedded in the
+		 * opcode
+		 */
+		if ((nvm->address_bits == 8) && (offset >= 128))
+			write_opcode |= NVM_A8_OPCODE_SPI;
+
+		/* Send the Write command (8-bit opcode + addr) */
+		igb_shift_out_eec_bits(hw, write_opcode, nvm->opcode_bits);
+		igb_shift_out_eec_bits(hw, (u16)((offset + widx) * 2),
+					 nvm->address_bits);
+
+		/* Loop to allow for up to whole page write of eeprom */
+		while (widx < words) {
+			u16 word_out = data[widx];
+
+			word_out = (word_out >> 8) | (word_out << 8);
+			igb_shift_out_eec_bits(hw, word_out, 16);
+			widx++;
+
+			if ((((offset + widx) * 2) % nvm->page_size) == 0) {
+				igb_standby_nvm(hw);
+				break;
+			}
+		}
+		usleep_range(1000, 2000);
+		nvm->ops.release(hw);
+	}
+
+	return ret_val;
+}
+
+/**
+ *  igb_read_part_string - Read device part number
+ *  @hw: pointer to the HW structure
+ *  @part_num: pointer to device part number
+ *  @part_num_size: size of part number buffer
+ *
+ *  Reads the product board assembly (PBA) number from the EEPROM and stores
+ *  the value in part_num.
+ **/
+s32 igb_read_part_string(struct e1000_hw *hw, u8 *part_num, u32 part_num_size)
+{
+	s32 ret_val;
+	u16 nvm_data;
+	u16 pointer;
+	u16 offset;
+	u16 length;
+
+	if (part_num == NULL) {
+		hw_dbg("PBA string buffer was null\n");
+		ret_val = E1000_ERR_INVALID_ARGUMENT;
+		goto out;
+	}
+
+	ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_0, 1, &nvm_data);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	ret_val = hw->nvm.ops.read(hw, NVM_PBA_OFFSET_1, 1, &pointer);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	/* if nvm_data is not ptr guard the PBA must be in legacy format which
+	 * means pointer is actually our second data word for the PBA number
+	 * and we can decode it into an ascii string
+	 */
+	if (nvm_data != NVM_PBA_PTR_GUARD) {
+		hw_dbg("NVM PBA number is not stored as string\n");
+
+		/* we will need 11 characters to store the PBA */
+		if (part_num_size < 11) {
+			hw_dbg("PBA string buffer too small\n");
+			return E1000_ERR_NO_SPACE;
+		}
+
+		/* extract hex string from data and pointer */
+		part_num[0] = (nvm_data >> 12) & 0xF;
+		part_num[1] = (nvm_data >> 8) & 0xF;
+		part_num[2] = (nvm_data >> 4) & 0xF;
+		part_num[3] = nvm_data & 0xF;
+		part_num[4] = (pointer >> 12) & 0xF;
+		part_num[5] = (pointer >> 8) & 0xF;
+		part_num[6] = '-';
+		part_num[7] = 0;
+		part_num[8] = (pointer >> 4) & 0xF;
+		part_num[9] = pointer & 0xF;
+
+		/* put a null character on the end of our string */
+		part_num[10] = '\0';
+
+		/* switch all the data but the '-' to hex char */
+		for (offset = 0; offset < 10; offset++) {
+			if (part_num[offset] < 0xA)
+				part_num[offset] += '0';
+			else if (part_num[offset] < 0x10)
+				part_num[offset] += 'A' - 0xA;
+		}
+
+		goto out;
+	}
+
+	ret_val = hw->nvm.ops.read(hw, pointer, 1, &length);
+	if (ret_val) {
+		hw_dbg("NVM Read Error\n");
+		goto out;
+	}
+
+	if (length == 0xFFFF || length == 0) {
+		hw_dbg("NVM PBA number section invalid length\n");
+		ret_val = E1000_ERR_NVM_PBA_SECTION;
+		goto out;
+	}
+	/* check if part_num buffer is big enough */
+	if (part_num_size < (((u32)length * 2) - 1)) {
+		hw_dbg("PBA string buffer too small\n");
+		ret_val = E1000_ERR_NO_SPACE;
+		goto out;
+	}
+
+	/* trim pba length from start of string */
+	pointer++;
+	length--;
+
+	for (offset = 0; offset < length; offset++) {
+		ret_val = hw->nvm.ops.read(hw, pointer + offset, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error\n");
+			goto out;
+		}
+		part_num[offset * 2] = (u8)(nvm_data >> 8);
+		part_num[(offset * 2) + 1] = (u8)(nvm_data & 0xFF);
+	}
+	part_num[offset * 2] = '\0';
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_mac_addr - Read device MAC address
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the device MAC address from the EEPROM and stores the value.
+ *  Since devices with two ports use the same EEPROM, we increment the
+ *  last bit in the MAC address for the second port.
+ **/
+s32 igb_read_mac_addr(struct e1000_hw *hw)
+{
+	u32 rar_high;
+	u32 rar_low;
+	u16 i;
+
+	rar_high = rd32(E1000_RAH(0));
+	rar_low = rd32(E1000_RAL(0));
+
+	for (i = 0; i < E1000_RAL_MAC_ADDR_LEN; i++)
+		hw->mac.perm_addr[i] = (u8)(rar_low >> (i*8));
+
+	for (i = 0; i < E1000_RAH_MAC_ADDR_LEN; i++)
+		hw->mac.perm_addr[i+4] = (u8)(rar_high >> (i*8));
+
+	for (i = 0; i < ETH_ALEN; i++)
+		hw->mac.addr[i] = hw->mac.perm_addr[i];
+
+	return 0;
+}
+
+/**
+ *  igb_validate_nvm_checksum - Validate EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Calculates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  and then verifies that the sum of the EEPROM is equal to 0xBABA.
+ **/
+s32 igb_validate_nvm_checksum(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = 0; i < (NVM_CHECKSUM_REG + 1); i++) {
+		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+
+	if (checksum != (u16) NVM_SUM) {
+		hw_dbg("NVM Checksum Invalid\n");
+		ret_val = -E1000_ERR_NVM;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_update_nvm_checksum - Update EEPROM checksum
+ *  @hw: pointer to the HW structure
+ *
+ *  Updates the EEPROM checksum by reading/adding each word of the EEPROM
+ *  up to the checksum.  Then calculates the EEPROM checksum and writes the
+ *  value to the EEPROM.
+ **/
+s32 igb_update_nvm_checksum(struct e1000_hw *hw)
+{
+	s32  ret_val;
+	u16 checksum = 0;
+	u16 i, nvm_data;
+
+	for (i = 0; i < NVM_CHECKSUM_REG; i++) {
+		ret_val = hw->nvm.ops.read(hw, i, 1, &nvm_data);
+		if (ret_val) {
+			hw_dbg("NVM Read Error while updating checksum.\n");
+			goto out;
+		}
+		checksum += nvm_data;
+	}
+	checksum = (u16) NVM_SUM - checksum;
+	ret_val = hw->nvm.ops.write(hw, NVM_CHECKSUM_REG, 1, &checksum);
+	if (ret_val)
+		hw_dbg("NVM Write Error while updating checksum.\n");
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_fw_version - Get firmware version information
+ *  @hw: pointer to the HW structure
+ *  @fw_vers: pointer to output structure
+ *
+ *  unsupported MAC types will return all 0 version structure
+ **/
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers)
+{
+	u16 eeprom_verh, eeprom_verl, etrack_test, fw_version;
+	u8 q, hval, rem, result;
+	u16 comb_verh, comb_verl, comb_offset;
+
+	memset(fw_vers, 0, sizeof(struct e1000_fw_version));
+
+	/* basic eeprom version numbers and bits used vary by part and by tool
+	 * used to create the nvm images. Check which data format we have.
+	 */
+	hw->nvm.ops.read(hw, NVM_ETRACK_HIWORD, 1, &etrack_test);
+	switch (hw->mac.type) {
+	case e1000_i211:
+		igb_read_invm_version(hw, fw_vers);
+		return;
+	case e1000_82575:
+	case e1000_82576:
+	case e1000_82580:
+		/* Use this format, unless EETRACK ID exists,
+		 * then use alternate format
+		 */
+		if ((etrack_test &  NVM_MAJOR_MASK) != NVM_ETRACK_VALID) {
+			hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+			fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+					      >> NVM_MAJOR_SHIFT;
+			fw_vers->eep_minor = (fw_version & NVM_MINOR_MASK)
+					      >> NVM_MINOR_SHIFT;
+			fw_vers->eep_build = (fw_version & NVM_IMAGE_ID_MASK);
+			goto etrack_id;
+		}
+		break;
+	case e1000_i210:
+		if (!(igb_get_flash_presence_i210(hw))) {
+			igb_read_invm_version(hw, fw_vers);
+			return;
+		}
+		fallthrough;
+	case e1000_i350:
+		/* find combo image version */
+		hw->nvm.ops.read(hw, NVM_COMB_VER_PTR, 1, &comb_offset);
+		if ((comb_offset != 0x0) &&
+		    (comb_offset != NVM_VER_INVALID)) {
+
+			hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset
+					 + 1), 1, &comb_verh);
+			hw->nvm.ops.read(hw, (NVM_COMB_VER_OFF + comb_offset),
+					 1, &comb_verl);
+
+			/* get Option Rom version if it exists and is valid */
+			if ((comb_verh && comb_verl) &&
+			    ((comb_verh != NVM_VER_INVALID) &&
+			     (comb_verl != NVM_VER_INVALID))) {
+
+				fw_vers->or_valid = true;
+				fw_vers->or_major =
+					comb_verl >> NVM_COMB_VER_SHFT;
+				fw_vers->or_build =
+					(comb_verl << NVM_COMB_VER_SHFT)
+					| (comb_verh >> NVM_COMB_VER_SHFT);
+				fw_vers->or_patch =
+					comb_verh & NVM_COMB_VER_MASK;
+			}
+		}
+		break;
+	default:
+		return;
+	}
+	hw->nvm.ops.read(hw, NVM_VERSION, 1, &fw_version);
+	fw_vers->eep_major = (fw_version & NVM_MAJOR_MASK)
+			      >> NVM_MAJOR_SHIFT;
+
+	/* check for old style version format in newer images*/
+	if ((fw_version & NVM_NEW_DEC_MASK) == 0x0) {
+		eeprom_verl = (fw_version & NVM_COMB_VER_MASK);
+	} else {
+		eeprom_verl = (fw_version & NVM_MINOR_MASK)
+				>> NVM_MINOR_SHIFT;
+	}
+	/* Convert minor value to hex before assigning to output struct
+	 * Val to be converted will not be higher than 99, per tool output
+	 */
+	q = eeprom_verl / NVM_HEX_CONV;
+	hval = q * NVM_HEX_TENS;
+	rem = eeprom_verl % NVM_HEX_CONV;
+	result = hval + rem;
+	fw_vers->eep_minor = result;
+
+etrack_id:
+	if ((etrack_test &  NVM_MAJOR_MASK) == NVM_ETRACK_VALID) {
+		hw->nvm.ops.read(hw, NVM_ETRACK_WORD, 1, &eeprom_verl);
+		hw->nvm.ops.read(hw, (NVM_ETRACK_WORD + 1), 1, &eeprom_verh);
+		fw_vers->etrack_id = (eeprom_verh << NVM_ETRACK_SHIFT)
+			| eeprom_verl;
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h
new file mode 100644
index 0000000..cb675be
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_nvm.h
@@ -0,0 +1,57 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_NVM_H_
+#define _E1000_NVM_H_
+
+s32  igb_acquire_nvm(struct e1000_hw *hw);
+void igb_release_nvm(struct e1000_hw *hw);
+s32  igb_read_mac_addr(struct e1000_hw *hw);
+s32  igb_read_part_num(struct e1000_hw *hw, u32 *part_num);
+s32  igb_read_part_string(struct e1000_hw *hw, u8 *part_num,
+			  u32 part_num_size);
+s32  igb_read_nvm_eerd(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  igb_read_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  igb_write_nvm_spi(struct e1000_hw *hw, u16 offset, u16 words, u16 *data);
+s32  igb_validate_nvm_checksum(struct e1000_hw *hw);
+s32  igb_update_nvm_checksum(struct e1000_hw *hw);
+
+struct e1000_fw_version {
+	u32 etrack_id;
+	u16 eep_major;
+	u16 eep_minor;
+	u16 eep_build;
+
+	u8 invm_major;
+	u8 invm_minor;
+	u8 invm_img_type;
+
+	bool or_valid;
+	u16 or_major;
+	u16 or_build;
+	u16 or_patch;
+};
+void igb_get_fw_version(struct e1000_hw *hw, struct e1000_fw_version *fw_vers);
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c
new file mode 100644
index 0000000..8d74089
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.c
@@ -0,0 +1,2513 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include <linux/if_ether.h>
+#include <linux/delay.h>
+
+#include "e1000_mac.h"
+#include "e1000_phy.h"
+
+static s32  igb_phy_setup_autoneg(struct e1000_hw *hw);
+static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+					     u16 *phy_ctrl);
+static s32  igb_wait_autoneg(struct e1000_hw *hw);
+static s32  igb_set_master_slave_mode(struct e1000_hw *hw);
+
+/* Cable length tables */
+static const u16 e1000_m88_cable_length_table[] = {
+	0, 50, 80, 110, 140, 140, E1000_CABLE_LENGTH_UNDEFINED };
+#define M88E1000_CABLE_LENGTH_TABLE_SIZE \
+	(sizeof(e1000_m88_cable_length_table) / \
+	sizeof(e1000_m88_cable_length_table[0]))
+
+static const u16 e1000_igp_2_cable_length_table[] = {
+	0, 0, 0, 0, 0, 0, 0, 0, 3, 5, 8, 11, 13, 16, 18, 21,
+	0, 0, 0, 3, 6, 10, 13, 16, 19, 23, 26, 29, 32, 35, 38, 41,
+	6, 10, 14, 18, 22, 26, 30, 33, 37, 41, 44, 48, 51, 54, 58, 61,
+	21, 26, 31, 35, 40, 44, 49, 53, 57, 61, 65, 68, 72, 75, 79, 82,
+	40, 45, 51, 56, 61, 66, 70, 75, 79, 83, 87, 91, 94, 98, 101, 104,
+	60, 66, 72, 77, 82, 87, 92, 96, 100, 104, 108, 111, 114, 117, 119, 121,
+	83, 89, 95, 100, 105, 109, 113, 116, 119, 122, 124,
+	104, 109, 114, 118, 121, 124};
+#define IGP02E1000_CABLE_LENGTH_TABLE_SIZE \
+	(sizeof(e1000_igp_2_cable_length_table) / \
+	 sizeof(e1000_igp_2_cable_length_table[0]))
+
+/**
+ *  igb_check_reset_block - Check if PHY reset is blocked
+ *  @hw: pointer to the HW structure
+ *
+ *  Read the PHY management control register and check whether a PHY reset
+ *  is blocked.  If a reset is not blocked return 0, otherwise
+ *  return E1000_BLK_PHY_RESET (12).
+ **/
+s32 igb_check_reset_block(struct e1000_hw *hw)
+{
+	u32 manc;
+
+	manc = rd32(E1000_MANC);
+
+	return (manc & E1000_MANC_BLK_PHY_RST_ON_IDE) ? E1000_BLK_PHY_RESET : 0;
+}
+
+/**
+ *  igb_get_phy_id - Retrieve the PHY ID and revision
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY registers and stores the PHY ID and possibly the PHY
+ *  revision in the hardware structure.
+ **/
+s32 igb_get_phy_id(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_id;
+
+	ret_val = phy->ops.read_reg(hw, PHY_ID1, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id = (u32)(phy_id << 16);
+	udelay(20);
+	ret_val = phy->ops.read_reg(hw, PHY_ID2, &phy_id);
+	if (ret_val)
+		goto out;
+
+	phy->id |= (u32)(phy_id & PHY_REVISION_MASK);
+	phy->revision = (u32)(phy_id & ~PHY_REVISION_MASK);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_reset_dsp - Reset PHY DSP
+ *  @hw: pointer to the HW structure
+ *
+ *  Reset the digital signal processor.
+ **/
+static s32 igb_phy_reset_dsp(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+
+	if (!(hw->phy.ops.write_reg))
+		goto out;
+
+	ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0xC1);
+	if (ret_val)
+		goto out;
+
+	ret_val = hw->phy.ops.write_reg(hw, M88E1000_PHY_GEN_CONTROL, 0);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_phy_reg_mdic - Read MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the MDI control regsiter in the PHY at offset and stores the
+ *  information read to data.
+ **/
+s32 igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = 0;
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		hw_dbg("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/* Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = ((offset << E1000_MDIC_REG_SHIFT) |
+		(phy->addr << E1000_MDIC_PHY_SHIFT) |
+		(E1000_MDIC_OP_READ));
+
+	wr32(E1000_MDIC, mdic);
+
+	/* Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		udelay(50);
+		mdic = rd32(E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		hw_dbg("MDI Read did not complete\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		hw_dbg("MDI Error\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	*data = (u16) mdic;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_mdic - Write MDI control register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write to register at offset
+ *
+ *  Writes data to MDI control register in the PHY at offset.
+ **/
+s32 igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, mdic = 0;
+	s32 ret_val = 0;
+
+	if (offset > MAX_PHY_REG_ADDRESS) {
+		hw_dbg("PHY Address %d is out of range\n", offset);
+		ret_val = -E1000_ERR_PARAM;
+		goto out;
+	}
+
+	/* Set up Op-code, Phy Address, and register offset in the MDI
+	 * Control register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	mdic = (((u32)data) |
+		(offset << E1000_MDIC_REG_SHIFT) |
+		(phy->addr << E1000_MDIC_PHY_SHIFT) |
+		(E1000_MDIC_OP_WRITE));
+
+	wr32(E1000_MDIC, mdic);
+
+	/* Poll the ready bit to see if the MDI read completed
+	 * Increasing the time out as testing showed failures with
+	 * the lower time out
+	 */
+	for (i = 0; i < (E1000_GEN_POLL_TIMEOUT * 3); i++) {
+		udelay(50);
+		mdic = rd32(E1000_MDIC);
+		if (mdic & E1000_MDIC_READY)
+			break;
+	}
+	if (!(mdic & E1000_MDIC_READY)) {
+		hw_dbg("MDI Write did not complete\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+	if (mdic & E1000_MDIC_ERROR) {
+		hw_dbg("MDI Error\n");
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_read_phy_reg_i2c - Read PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Reads the PHY register at offset using the i2c interface and stores the
+ *  retrieved information in data.
+ **/
+s32 igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, i2ccmd = 0;
+
+	/* Set up Op-code, Phy Address, and register address in the I2CCMD
+	 * register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+		  (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+		  (E1000_I2CCMD_OPCODE_READ));
+
+	wr32(E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		i2ccmd = rd32(E1000_I2CCMD);
+		if (i2ccmd & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(i2ccmd & E1000_I2CCMD_READY)) {
+		hw_dbg("I2CCMD Read did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (i2ccmd & E1000_I2CCMD_ERROR) {
+		hw_dbg("I2CCMD Error bit set\n");
+		return -E1000_ERR_PHY;
+	}
+
+	/* Need to byte-swap the 16-bit value. */
+	*data = ((i2ccmd >> 8) & 0x00FF) | ((i2ccmd << 8) & 0xFF00);
+
+	return 0;
+}
+
+/**
+ *  igb_write_phy_reg_i2c - Write PHY register using i2c
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Writes the data to PHY register at the offset using the i2c interface.
+ **/
+s32 igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	u32 i, i2ccmd = 0;
+	u16 phy_data_swapped;
+
+	/* Prevent overwritting SFP I2C EEPROM which is at A0 address.*/
+	if ((hw->phy.addr == 0) || (hw->phy.addr > 7)) {
+		hw_dbg("PHY I2C Address %d is out of range.\n",
+			  hw->phy.addr);
+		return -E1000_ERR_CONFIG;
+	}
+
+	/* Swap the data bytes for the I2C interface */
+	phy_data_swapped = ((data >> 8) & 0x00FF) | ((data << 8) & 0xFF00);
+
+	/* Set up Op-code, Phy Address, and register address in the I2CCMD
+	 * register.  The MAC will take care of interfacing with the
+	 * PHY to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+		  (phy->addr << E1000_I2CCMD_PHY_ADDR_SHIFT) |
+		  E1000_I2CCMD_OPCODE_WRITE |
+		  phy_data_swapped);
+
+	wr32(E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		i2ccmd = rd32(E1000_I2CCMD);
+		if (i2ccmd & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(i2ccmd & E1000_I2CCMD_READY)) {
+		hw_dbg("I2CCMD Write did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (i2ccmd & E1000_I2CCMD_ERROR) {
+		hw_dbg("I2CCMD Error bit set\n");
+		return -E1000_ERR_PHY;
+	}
+
+	return 0;
+}
+
+/**
+ *  igb_read_sfp_data_byte - Reads SFP module data.
+ *  @hw: pointer to the HW structure
+ *  @offset: byte location offset to be read
+ *  @data: read data buffer pointer
+ *
+ *  Reads one byte from SFP module data stored
+ *  in SFP resided EEPROM memory or SFP diagnostic area.
+ *  Function should be called with
+ *  E1000_I2CCMD_SFP_DATA_ADDR(<byte offset>) for SFP module database access
+ *  E1000_I2CCMD_SFP_DIAG_ADDR(<byte offset>) for SFP diagnostics parameters
+ *  access
+ **/
+s32 igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data)
+{
+	u32 i = 0;
+	u32 i2ccmd = 0;
+	u32 data_local = 0;
+
+	if (offset > E1000_I2CCMD_SFP_DIAG_ADDR(255)) {
+		hw_dbg("I2CCMD command address exceeds upper limit\n");
+		return -E1000_ERR_PHY;
+	}
+
+	/* Set up Op-code, EEPROM Address,in the I2CCMD
+	 * register. The MAC will take care of interfacing with the
+	 * EEPROM to retrieve the desired data.
+	 */
+	i2ccmd = ((offset << E1000_I2CCMD_REG_ADDR_SHIFT) |
+		  E1000_I2CCMD_OPCODE_READ);
+
+	wr32(E1000_I2CCMD, i2ccmd);
+
+	/* Poll the ready bit to see if the I2C read completed */
+	for (i = 0; i < E1000_I2CCMD_PHY_TIMEOUT; i++) {
+		udelay(50);
+		data_local = rd32(E1000_I2CCMD);
+		if (data_local & E1000_I2CCMD_READY)
+			break;
+	}
+	if (!(data_local & E1000_I2CCMD_READY)) {
+		hw_dbg("I2CCMD Read did not complete\n");
+		return -E1000_ERR_PHY;
+	}
+	if (data_local & E1000_I2CCMD_ERROR) {
+		hw_dbg("I2CCMD Error bit set\n");
+		return -E1000_ERR_PHY;
+	}
+	*data = (u8) data_local & 0xFF;
+
+	return 0;
+}
+
+/**
+ *  igb_read_phy_reg_igp - Read igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to be read
+ *  @data: pointer to the read data
+ *
+ *  Acquires semaphore, if necessary, then reads the PHY register at offset
+ *  and storing the retrieved information in data.  Release any acquired
+ *  semaphores before exiting.
+ **/
+s32 igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val = 0;
+
+	if (!(hw->phy.ops.acquire))
+		goto out;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = igb_write_phy_reg_mdic(hw,
+						 IGP01E1000_PHY_PAGE_SELECT,
+						 (u16)offset);
+		if (ret_val) {
+			hw->phy.ops.release(hw);
+			goto out;
+		}
+	}
+
+	ret_val = igb_read_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_igp - Write igp PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: register offset to write to
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val = 0;
+
+	if (!(hw->phy.ops.acquire))
+		goto out;
+
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	if (offset > MAX_PHY_MULTI_PAGE_REG) {
+		ret_val = igb_write_phy_reg_mdic(hw,
+						 IGP01E1000_PHY_PAGE_SELECT,
+						 (u16)offset);
+		if (ret_val) {
+			hw->phy.ops.release(hw);
+			goto out;
+		}
+	}
+
+	ret_val = igb_write_phy_reg_mdic(hw, MAX_PHY_REG_ADDRESS & offset,
+					 data);
+
+	hw->phy.ops.release(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_copper_link_setup_82580 - Setup 82580 PHY for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Carrier-sense on Transmit and downshift values.
+ **/
+s32 igb_copper_link_setup_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	if (phy->reset_disable) {
+		ret_val = 0;
+		goto out;
+	}
+
+	if (phy->type == e1000_phy_82580) {
+		ret_val = hw->phy.ops.reset(hw);
+		if (ret_val) {
+			hw_dbg("Error resetting the PHY.\n");
+			goto out;
+		}
+	}
+
+	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	ret_val = phy->ops.read_reg(hw, I82580_CFG_REG, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= I82580_CFG_ASSERT_CRS_ON_TX;
+
+	/* Enable downshift */
+	phy_data |= I82580_CFG_ENABLE_DOWNSHIFT;
+
+	ret_val = phy->ops.write_reg(hw, I82580_CFG_REG, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Set MDI/MDIX mode */
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+	if (ret_val)
+		goto out;
+	phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+	/* Options:
+	 *   0 - Auto (default)
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 */
+	switch (hw->phy.mdix) {
+	case 1:
+		break;
+	case 2:
+		phy_data |= I82580_PHY_CTRL2_MANUAL_MDIX;
+		break;
+	case 0:
+	default:
+		phy_data |= I82580_PHY_CTRL2_AUTO_MDI_MDIX;
+		break;
+	}
+	ret_val = hw->phy.ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_copper_link_setup_m88 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for m88 PHY's.  If necessary, transmit clock
+ *  and downshift values are set also.
+ **/
+s32 igb_copper_link_setup_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	if (phy->reset_disable) {
+		ret_val = 0;
+		goto out;
+	}
+
+	/* Enable CRS on TX. This must be set for half-duplex operation. */
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+
+	/* Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+	switch (phy->mdix) {
+	case 1:
+		phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+		break;
+	case 2:
+		phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+		break;
+	case 3:
+		phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+		break;
+	case 0:
+	default:
+		phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+		break;
+	}
+
+	/* Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+	if (phy->disable_polarity_correction == 1)
+		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+	ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	if (phy->revision < E1000_REVISION_4) {
+		/* Force TX_CLK in the Extended PHY Specific Control Register
+		 * to 25MHz clock.
+		 */
+		ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data |= M88E1000_EPSCR_TX_CLK_25;
+
+		if ((phy->revision == E1000_REVISION_2) &&
+		    (phy->id == M88E1111_I_PHY_ID)) {
+			/* 82573L PHY - set the downshift counter to 5x. */
+			phy_data &= ~M88EC018_EPSCR_DOWNSHIFT_COUNTER_MASK;
+			phy_data |= M88EC018_EPSCR_DOWNSHIFT_COUNTER_5X;
+		} else {
+			/* Configure Master and Slave downshift values */
+			phy_data &= ~(M88E1000_EPSCR_MASTER_DOWNSHIFT_MASK |
+				      M88E1000_EPSCR_SLAVE_DOWNSHIFT_MASK);
+			phy_data |= (M88E1000_EPSCR_MASTER_DOWNSHIFT_1X |
+				     M88E1000_EPSCR_SLAVE_DOWNSHIFT_1X);
+		}
+		ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL,
+					     phy_data);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Commit the changes. */
+	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val) {
+		hw_dbg("Error committing the PHY changes\n");
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_copper_link_setup_m88_gen2 - Setup m88 PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up MDI/MDI-X and polarity for i347-AT4, m88e1322 and m88e1112 PHY's.
+ *  Also enables and sets the downshift parameters.
+ **/
+s32 igb_copper_link_setup_m88_gen2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+
+	if (phy->reset_disable)
+		return 0;
+
+	/* Enable CRS on Tx. This must be set for half-duplex operation. */
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Options:
+	 *   MDI/MDI-X = 0 (default)
+	 *   0 - Auto for all speeds
+	 *   1 - MDI mode
+	 *   2 - MDI-X mode
+	 *   3 - Auto for 1000Base-T only (MDI-X for 10/100Base-T modes)
+	 */
+	phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+
+	switch (phy->mdix) {
+	case 1:
+		phy_data |= M88E1000_PSCR_MDI_MANUAL_MODE;
+		break;
+	case 2:
+		phy_data |= M88E1000_PSCR_MDIX_MANUAL_MODE;
+		break;
+	case 3:
+		/* M88E1112 does not support this mode) */
+		if (phy->id != M88E1112_E_PHY_ID) {
+			phy_data |= M88E1000_PSCR_AUTO_X_1000T;
+			break;
+		}
+		fallthrough;
+	case 0:
+	default:
+		phy_data |= M88E1000_PSCR_AUTO_X_MODE;
+		break;
+	}
+
+	/* Options:
+	 *   disable_polarity_correction = 0 (default)
+	 *       Automatic Correction for Reversed Cable Polarity
+	 *   0 - Disabled
+	 *   1 - Enabled
+	 */
+	phy_data &= ~M88E1000_PSCR_POLARITY_REVERSAL;
+	if (phy->disable_polarity_correction == 1)
+		phy_data |= M88E1000_PSCR_POLARITY_REVERSAL;
+
+	/* Enable downshift and setting it to X6 */
+	if (phy->id == M88E1543_E_PHY_ID) {
+		phy_data &= ~I347AT4_PSCR_DOWNSHIFT_ENABLE;
+		ret_val =
+		    phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+		if (ret_val)
+			return ret_val;
+
+		ret_val = igb_phy_sw_reset(hw);
+		if (ret_val) {
+			hw_dbg("Error committing the PHY changes\n");
+			return ret_val;
+		}
+	}
+
+	phy_data &= ~I347AT4_PSCR_DOWNSHIFT_MASK;
+	phy_data |= I347AT4_PSCR_DOWNSHIFT_6X;
+	phy_data |= I347AT4_PSCR_DOWNSHIFT_ENABLE;
+
+	ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* Commit the changes. */
+	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val) {
+		hw_dbg("Error committing the PHY changes\n");
+		return ret_val;
+	}
+	ret_val = igb_set_master_slave_mode(hw);
+	if (ret_val)
+		return ret_val;
+
+	return 0;
+}
+
+/**
+ *  igb_copper_link_setup_igp - Setup igp PHY's for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up LPLU, MDI/MDI-X, polarity, Smartspeed and Master/Slave config for
+ *  igp PHY's.
+ **/
+s32 igb_copper_link_setup_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	if (phy->reset_disable) {
+		ret_val = 0;
+		goto out;
+	}
+
+	ret_val = phy->ops.reset(hw);
+	if (ret_val) {
+		hw_dbg("Error resetting the PHY.\n");
+		goto out;
+	}
+
+	/* Wait 100ms for MAC to configure PHY from NVM settings, to avoid
+	 * timeout issues when LFS is enabled.
+	 */
+	msleep(100);
+
+	/* The NVM settings will configure LPLU in D3 for
+	 * non-IGP1 PHYs.
+	 */
+	if (phy->type == e1000_phy_igp) {
+		/* disable lplu d3 during driver init */
+		if (phy->ops.set_d3_lplu_state)
+			ret_val = phy->ops.set_d3_lplu_state(hw, false);
+		if (ret_val) {
+			hw_dbg("Error Disabling LPLU D3\n");
+			goto out;
+		}
+	}
+
+	/* disable lplu d0 during driver init */
+	ret_val = phy->ops.set_d0_lplu_state(hw, false);
+	if (ret_val) {
+		hw_dbg("Error Disabling LPLU D0\n");
+		goto out;
+	}
+	/* Configure mdi-mdix settings */
+	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &data);
+	if (ret_val)
+		goto out;
+
+	data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+
+	switch (phy->mdix) {
+	case 1:
+		data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 2:
+		data |= IGP01E1000_PSCR_FORCE_MDI_MDIX;
+		break;
+	case 0:
+	default:
+		data |= IGP01E1000_PSCR_AUTO_MDIX;
+		break;
+	}
+	ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, data);
+	if (ret_val)
+		goto out;
+
+	/* set auto-master slave resolution settings */
+	if (hw->mac.autoneg) {
+		/* when autonegotiation advertisement is only 1000Mbps then we
+		 * should disable SmartSpeed and enable Auto MasterSlave
+		 * resolution as hardware default.
+		 */
+		if (phy->autoneg_advertised == ADVERTISE_1000_FULL) {
+			/* Disable SmartSpeed */
+			ret_val = phy->ops.read_reg(hw,
+						    IGP01E1000_PHY_PORT_CONFIG,
+						    &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+						     IGP01E1000_PHY_PORT_CONFIG,
+						     data);
+			if (ret_val)
+				goto out;
+
+			/* Set auto Master/Slave resolution process */
+			ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~CR_1000T_MS_ENABLE;
+			ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+			if (ret_val)
+				goto out;
+		}
+
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL, &data);
+		if (ret_val)
+			goto out;
+
+		/* load defaults for future use */
+		phy->original_ms_type = (data & CR_1000T_MS_ENABLE) ?
+			((data & CR_1000T_MS_VALUE) ?
+			e1000_ms_force_master :
+			e1000_ms_force_slave) :
+			e1000_ms_auto;
+
+		switch (phy->ms_type) {
+		case e1000_ms_force_master:
+			data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_force_slave:
+			data |= CR_1000T_MS_ENABLE;
+			data &= ~(CR_1000T_MS_VALUE);
+			break;
+		case e1000_ms_auto:
+			data &= ~CR_1000T_MS_ENABLE;
+		default:
+			break;
+		}
+		ret_val = phy->ops.write_reg(hw, PHY_1000T_CTRL, data);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_copper_link_autoneg - Setup/Enable autoneg for copper link
+ *  @hw: pointer to the HW structure
+ *
+ *  Performs initial bounds checking on autoneg advertisement parameter, then
+ *  configure to advertise the full capability.  Setup the PHY to autoneg
+ *  and restart the negotiation process between the link partner.  If
+ *  autoneg_wait_to_complete, then wait for autoneg to complete before exiting.
+ **/
+static s32 igb_copper_link_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_ctrl;
+
+	/* Perform some bounds checking on the autoneg advertisement
+	 * parameter.
+	 */
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* If autoneg_advertised is zero, we assume it was not defaulted
+	 * by the calling code so we set to advertise full capability.
+	 */
+	if (phy->autoneg_advertised == 0)
+		phy->autoneg_advertised = phy->autoneg_mask;
+
+	hw_dbg("Reconfiguring auto-neg advertisement params\n");
+	ret_val = igb_phy_setup_autoneg(hw);
+	if (ret_val) {
+		hw_dbg("Error Setting up Auto-Negotiation\n");
+		goto out;
+	}
+	hw_dbg("Restarting Auto-Neg\n");
+
+	/* Restart auto-negotiation by setting the Auto Neg Enable bit and
+	 * the Auto Neg Restart bit in the PHY control register.
+	 */
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= (MII_CR_AUTO_NEG_EN | MII_CR_RESTART_AUTO_NEG);
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	/* Does the user want to wait for Auto-Neg to complete here, or
+	 * check at a later time (for example, callback routine).
+	 */
+	if (phy->autoneg_wait_to_complete) {
+		ret_val = igb_wait_autoneg(hw);
+		if (ret_val) {
+			hw_dbg("Error while waiting for autoneg to complete\n");
+			goto out;
+		}
+	}
+
+	hw->mac.get_link_status = true;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_setup_autoneg - Configure PHY for auto-negotiation
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the MII auto-neg advertisement register and/or the 1000T control
+ *  register and if the PHY is already setup for auto-negotiation, then
+ *  return successful.  Otherwise, setup advertisement and flow control to
+ *  the appropriate values for the wanted auto-negotiation.
+ **/
+static s32 igb_phy_setup_autoneg(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 mii_autoneg_adv_reg;
+	u16 mii_1000t_ctrl_reg = 0;
+
+	phy->autoneg_advertised &= phy->autoneg_mask;
+
+	/* Read the MII Auto-Neg Advertisement Register (Address 4). */
+	ret_val = phy->ops.read_reg(hw, PHY_AUTONEG_ADV, &mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		/* Read the MII 1000Base-T Control Register (Address 9). */
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_CTRL,
+					    &mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+	/* Need to parse both autoneg_advertised and fc and set up
+	 * the appropriate PHY registers.  First we will parse for
+	 * autoneg_advertised software override.  Since we can advertise
+	 * a plethora of combinations, we need to check each bit
+	 * individually.
+	 */
+
+	/* First we clear all the 10/100 mb speed bits in the Auto-Neg
+	 * Advertisement Register (Address 4) and the 1000 mb speed bits in
+	 * the  1000Base-T Control Register (Address 9).
+	 */
+	mii_autoneg_adv_reg &= ~(NWAY_AR_100TX_FD_CAPS |
+				 NWAY_AR_100TX_HD_CAPS |
+				 NWAY_AR_10T_FD_CAPS   |
+				 NWAY_AR_10T_HD_CAPS);
+	mii_1000t_ctrl_reg &= ~(CR_1000T_HD_CAPS | CR_1000T_FD_CAPS);
+
+	hw_dbg("autoneg_advertised %x\n", phy->autoneg_advertised);
+
+	/* Do we want to advertise 10 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_HALF) {
+		hw_dbg("Advertise 10mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_HD_CAPS;
+	}
+
+	/* Do we want to advertise 10 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_10_FULL) {
+		hw_dbg("Advertise 10mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_10T_FD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Half Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_HALF) {
+		hw_dbg("Advertise 100mb Half duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_HD_CAPS;
+	}
+
+	/* Do we want to advertise 100 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_100_FULL) {
+		hw_dbg("Advertise 100mb Full duplex\n");
+		mii_autoneg_adv_reg |= NWAY_AR_100TX_FD_CAPS;
+	}
+
+	/* We do not allow the Phy to advertise 1000 Mb Half Duplex */
+	if (phy->autoneg_advertised & ADVERTISE_1000_HALF)
+		hw_dbg("Advertise 1000mb Half duplex request denied!\n");
+
+	/* Do we want to advertise 1000 Mb Full Duplex? */
+	if (phy->autoneg_advertised & ADVERTISE_1000_FULL) {
+		hw_dbg("Advertise 1000mb Full duplex\n");
+		mii_1000t_ctrl_reg |= CR_1000T_FD_CAPS;
+	}
+
+	/* Check for a software override of the flow control settings, and
+	 * setup the PHY advertisement registers accordingly.  If
+	 * auto-negotiation is enabled, then software will have to set the
+	 * "PAUSE" bits to the correct value in the Auto-Negotiation
+	 * Advertisement Register (PHY_AUTONEG_ADV) and re-start auto-
+	 * negotiation.
+	 *
+	 * The possible values of the "fc" parameter are:
+	 *      0:  Flow control is completely disabled
+	 *      1:  Rx flow control is enabled (we can receive pause frames
+	 *          but not send pause frames).
+	 *      2:  Tx flow control is enabled (we can send pause frames
+	 *          but we do not support receiving pause frames).
+	 *      3:  Both Rx and TX flow control (symmetric) are enabled.
+	 *  other:  No software override.  The flow control configuration
+	 *          in the EEPROM is used.
+	 */
+	switch (hw->fc.current_mode) {
+	case e1000_fc_none:
+		/* Flow control (RX & TX) is completely disabled by a
+		 * software over-ride.
+		 */
+		mii_autoneg_adv_reg &= ~(NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_rx_pause:
+		/* RX Flow control is enabled, and TX Flow control is
+		 * disabled, by a software over-ride.
+		 *
+		 * Since there really isn't a way to advertise that we are
+		 * capable of RX Pause ONLY, we will advertise that we
+		 * support both symmetric and asymmetric RX PAUSE.  Later
+		 * (in e1000_config_fc_after_link_up) we will disable the
+		 * hw's ability to send PAUSE frames.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	case e1000_fc_tx_pause:
+		/* TX Flow control is enabled, and RX Flow control is
+		 * disabled, by a software over-ride.
+		 */
+		mii_autoneg_adv_reg |= NWAY_AR_ASM_DIR;
+		mii_autoneg_adv_reg &= ~NWAY_AR_PAUSE;
+		break;
+	case e1000_fc_full:
+		/* Flow control (both RX and TX) is enabled by a software
+		 * over-ride.
+		 */
+		mii_autoneg_adv_reg |= (NWAY_AR_ASM_DIR | NWAY_AR_PAUSE);
+		break;
+	default:
+		hw_dbg("Flow control param set incorrectly\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = phy->ops.write_reg(hw, PHY_AUTONEG_ADV, mii_autoneg_adv_reg);
+	if (ret_val)
+		goto out;
+
+	hw_dbg("Auto-Neg Advertising %x\n", mii_autoneg_adv_reg);
+
+	if (phy->autoneg_mask & ADVERTISE_1000_FULL) {
+		ret_val = phy->ops.write_reg(hw,
+					     PHY_1000T_CTRL,
+					     mii_1000t_ctrl_reg);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_setup_copper_link - Configure copper link settings
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the appropriate function to configure the link for auto-neg or forced
+ *  speed and duplex.  Then we check for link, once link is established calls
+ *  to configure collision distance and flow control are called.  If link is
+ *  not established, we return -E1000_ERR_PHY (-2).
+ **/
+s32 igb_setup_copper_link(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	bool link;
+
+	if (hw->mac.autoneg) {
+		/* Setup autoneg and flow control advertisement and perform
+		 * autonegotiation.
+		 */
+		ret_val = igb_copper_link_autoneg(hw);
+		if (ret_val)
+			goto out;
+	} else {
+		/* PHY will be set to 10H, 10F, 100H or 100F
+		 * depending on user settings.
+		 */
+		hw_dbg("Forcing Speed and Duplex\n");
+		ret_val = hw->phy.ops.force_speed_duplex(hw);
+		if (ret_val) {
+			hw_dbg("Error Forcing Speed and Duplex\n");
+			goto out;
+		}
+	}
+
+	/* Check link status. Wait up to 100 microseconds for link to become
+	 * valid.
+	 */
+	ret_val = igb_phy_has_link(hw, COPPER_LINK_UP_LIMIT, 10, &link);
+	if (ret_val)
+		goto out;
+
+	if (link) {
+		hw_dbg("Valid link established!!!\n");
+		igb_config_collision_dist(hw);
+		ret_val = igb_config_fc_after_link_up(hw);
+	} else {
+		hw_dbg("Unable to establish link!!!\n");
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_force_speed_duplex_igp - Force speed/duplex for igp PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 igb_phy_force_speed_duplex_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Clear Auto-Crossover to force MDI manually.  IGP requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~IGP01E1000_PSCR_AUTO_MDIX;
+	phy_data &= ~IGP01E1000_PSCR_FORCE_MDI_MDIX;
+
+	ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	hw_dbg("IGP PSCR: %X\n", phy_data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		hw_dbg("Waiting for forced speed/duplex link on IGP phy.\n");
+
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			hw_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 10000, &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_force_speed_duplex_m88 - Force speed/duplex for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Resets the PHY to commit the
+ *  changes.  If time expires while waiting for link up, we reset the DSP.
+ *  After reset, TX_CLK and CRS on TX must be set.  Return successful upon
+ *  successful completion, else return corresponding error code.
+ **/
+s32 igb_phy_force_speed_duplex_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	/* I210 and I211 devices support Auto-Crossover in forced operation. */
+	if (phy->type != e1000_phy_i210) {
+		/* Clear Auto-Crossover to force MDI manually.  M88E1000
+		 * requires MDI forced whenever speed and duplex are forced.
+		 */
+		ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL,
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy_data &= ~M88E1000_PSCR_AUTO_X_MODE;
+		ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL,
+					     phy_data);
+		if (ret_val)
+			goto out;
+
+		hw_dbg("M88E1000 PSCR: %X\n", phy_data);
+	}
+
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Reset the phy to commit changes. */
+	ret_val = igb_phy_sw_reset(hw);
+	if (ret_val)
+		goto out;
+
+	if (phy->autoneg_wait_to_complete) {
+		hw_dbg("Waiting for forced speed/duplex link on M88 phy.\n");
+
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+		if (ret_val)
+			goto out;
+
+		if (!link) {
+			bool reset_dsp = true;
+
+			switch (hw->phy.id) {
+			case I347AT4_E_PHY_ID:
+			case M88E1112_E_PHY_ID:
+			case I210_I_PHY_ID:
+				reset_dsp = false;
+				break;
+			default:
+				if (hw->phy.type != e1000_phy_m88)
+					reset_dsp = false;
+				break;
+			}
+			if (!reset_dsp)
+				hw_dbg("Link taking longer than expected.\n");
+			else {
+				/* We didn't get link.
+				 * Reset the DSP and cross our fingers.
+				 */
+				ret_val = phy->ops.write_reg(hw,
+						M88E1000_PHY_PAGE_SELECT,
+						0x001d);
+				if (ret_val)
+					goto out;
+				ret_val = igb_phy_reset_dsp(hw);
+				if (ret_val)
+					goto out;
+			}
+		}
+
+		/* Try once more */
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT,
+					   100000, &link);
+		if (ret_val)
+			goto out;
+	}
+
+	if (hw->phy.type != e1000_phy_m88 ||
+	    hw->phy.id == I347AT4_E_PHY_ID ||
+	    hw->phy.id == M88E1112_E_PHY_ID ||
+	    hw->phy.id == I210_I_PHY_ID)
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Resetting the phy means we need to re-force TX_CLK in the
+	 * Extended PHY Specific Control Register to 25MHz clock from
+	 * the reset value of 2.5MHz.
+	 */
+	phy_data |= M88E1000_EPSCR_TX_CLK_25;
+	ret_val = phy->ops.write_reg(hw, M88E1000_EXT_PHY_SPEC_CTRL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* In addition, we must re-enable CRS on Tx for both half and full
+	 * duplex.
+	 */
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data |= M88E1000_PSCR_ASSERT_CRS_ON_TX;
+	ret_val = phy->ops.write_reg(hw, M88E1000_PHY_SPEC_CTRL, phy_data);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_force_speed_duplex_setup - Configure forced PHY speed/duplex
+ *  @hw: pointer to the HW structure
+ *  @phy_ctrl: pointer to current value of PHY_CONTROL
+ *
+ *  Forces speed and duplex on the PHY by doing the following: disable flow
+ *  control, force speed/duplex on the MAC, disable auto speed detection,
+ *  disable auto-negotiation, configure duplex, configure speed, configure
+ *  the collision distance, write configuration to CTRL register.  The
+ *  caller must write to the PHY_CONTROL register for these settings to
+ *  take affect.
+ **/
+static void igb_phy_force_speed_duplex_setup(struct e1000_hw *hw,
+					     u16 *phy_ctrl)
+{
+	struct e1000_mac_info *mac = &hw->mac;
+	u32 ctrl;
+
+	/* Turn off flow control when forcing speed/duplex */
+	hw->fc.current_mode = e1000_fc_none;
+
+	/* Force speed/duplex on the mac */
+	ctrl = rd32(E1000_CTRL);
+	ctrl |= (E1000_CTRL_FRCSPD | E1000_CTRL_FRCDPX);
+	ctrl &= ~E1000_CTRL_SPD_SEL;
+
+	/* Disable Auto Speed Detection */
+	ctrl &= ~E1000_CTRL_ASDE;
+
+	/* Disable autoneg on the phy */
+	*phy_ctrl &= ~MII_CR_AUTO_NEG_EN;
+
+	/* Forcing Full or Half Duplex? */
+	if (mac->forced_speed_duplex & E1000_ALL_HALF_DUPLEX) {
+		ctrl &= ~E1000_CTRL_FD;
+		*phy_ctrl &= ~MII_CR_FULL_DUPLEX;
+		hw_dbg("Half Duplex\n");
+	} else {
+		ctrl |= E1000_CTRL_FD;
+		*phy_ctrl |= MII_CR_FULL_DUPLEX;
+		hw_dbg("Full Duplex\n");
+	}
+
+	/* Forcing 10mb or 100mb? */
+	if (mac->forced_speed_duplex & E1000_ALL_100_SPEED) {
+		ctrl |= E1000_CTRL_SPD_100;
+		*phy_ctrl |= MII_CR_SPEED_100;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_10);
+		hw_dbg("Forcing 100mb\n");
+	} else {
+		ctrl &= ~(E1000_CTRL_SPD_1000 | E1000_CTRL_SPD_100);
+		*phy_ctrl |= MII_CR_SPEED_10;
+		*phy_ctrl &= ~(MII_CR_SPEED_1000 | MII_CR_SPEED_100);
+		hw_dbg("Forcing 10mb\n");
+	}
+
+	igb_config_collision_dist(hw);
+
+	wr32(E1000_CTRL, ctrl);
+}
+
+/**
+ *  igb_set_d3_lplu_state - Sets low power link up state for D3
+ *  @hw: pointer to the HW structure
+ *  @active: boolean used to enable/disable lplu
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  The low power link up (lplu) state is set to the power management level D3
+ *  and SmartSpeed is disabled when active is true, else clear lplu for D3
+ *  and enable Smartspeed.  LPLU and Smartspeed are mutually exclusive.  LPLU
+ *  is used during Dx states where the power conservation is most important.
+ *  During driver activity, SmartSpeed should be enabled so performance is
+ *  maintained.
+ **/
+s32 igb_set_d3_lplu_state(struct e1000_hw *hw, bool active)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 data;
+
+	if (!(hw->phy.ops.read_reg))
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, IGP02E1000_PHY_POWER_MGMT, &data);
+	if (ret_val)
+		goto out;
+
+	if (!active) {
+		data &= ~IGP02E1000_PM_D3_LPLU;
+		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+					     data);
+		if (ret_val)
+			goto out;
+		/* LPLU and SmartSpeed are mutually exclusive.  LPLU is used
+		 * during Dx states where the power conservation is most
+		 * important.  During driver activity we should enable
+		 * SmartSpeed, so performance is maintained.
+		 */
+		if (phy->smart_speed == e1000_smart_speed_on) {
+			ret_val = phy->ops.read_reg(hw,
+						    IGP01E1000_PHY_PORT_CONFIG,
+						    &data);
+			if (ret_val)
+				goto out;
+
+			data |= IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+						     IGP01E1000_PHY_PORT_CONFIG,
+						     data);
+			if (ret_val)
+				goto out;
+		} else if (phy->smart_speed == e1000_smart_speed_off) {
+			ret_val = phy->ops.read_reg(hw,
+						     IGP01E1000_PHY_PORT_CONFIG,
+						     &data);
+			if (ret_val)
+				goto out;
+
+			data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+			ret_val = phy->ops.write_reg(hw,
+						     IGP01E1000_PHY_PORT_CONFIG,
+						     data);
+			if (ret_val)
+				goto out;
+		}
+	} else if ((phy->autoneg_advertised == E1000_ALL_SPEED_DUPLEX) ||
+		   (phy->autoneg_advertised == E1000_ALL_NOT_GIG) ||
+		   (phy->autoneg_advertised == E1000_ALL_10_SPEED)) {
+		data |= IGP02E1000_PM_D3_LPLU;
+		ret_val = phy->ops.write_reg(hw, IGP02E1000_PHY_POWER_MGMT,
+					      data);
+		if (ret_val)
+			goto out;
+
+		/* When LPLU is enabled, we should disable SmartSpeed */
+		ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+					    &data);
+		if (ret_val)
+			goto out;
+
+		data &= ~IGP01E1000_PSCFR_SMART_SPEED;
+		ret_val = phy->ops.write_reg(hw, IGP01E1000_PHY_PORT_CONFIG,
+					     data);
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_check_downshift - Checks whether a downshift in speed occurred
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns 1
+ *
+ *  A downshift is detected by querying the PHY link health.
+ **/
+s32 igb_check_downshift(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, offset, mask;
+
+	switch (phy->type) {
+	case e1000_phy_i210:
+	case e1000_phy_m88:
+	case e1000_phy_gg82563:
+		offset	= M88E1000_PHY_SPEC_STATUS;
+		mask	= M88E1000_PSSR_DOWNSHIFT;
+		break;
+	case e1000_phy_igp_2:
+	case e1000_phy_igp:
+	case e1000_phy_igp_3:
+		offset	= IGP01E1000_PHY_LINK_HEALTH;
+		mask	= IGP01E1000_PLHR_SS_DOWNGRADE;
+		break;
+	default:
+		/* speed downshift not supported */
+		phy->speed_downgraded = false;
+		ret_val = 0;
+		goto out;
+	}
+
+	ret_val = phy->ops.read_reg(hw, offset, &phy_data);
+
+	if (!ret_val)
+		phy->speed_downgraded = (phy_data & mask) ? true : false;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_check_polarity_m88 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+s32 igb_check_polarity_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & M88E1000_PSSR_REV_POLARITY)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  igb_check_polarity_igp - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY port status register, and the
+ *  current speed (since there is no polarity at 100Mbps).
+ **/
+static s32 igb_check_polarity_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data, offset, mask;
+
+	/* Polarity is determined based on the speed of
+	 * our connection.
+	 */
+	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		offset	= IGP01E1000_PHY_PCS_INIT_REG;
+		mask	= IGP01E1000_PHY_POLARITY_MASK;
+	} else {
+		/* This really only applies to 10Mbps since
+		 * there is no polarity for 100Mbps (always 0).
+		 */
+		offset	= IGP01E1000_PHY_PORT_STATUS;
+		mask	= IGP01E1000_PSSR_POLARITY_REVERSED;
+	}
+
+	ret_val = phy->ops.read_reg(hw, offset, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & mask)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_wait_autoneg - Wait for auto-neg completion
+ *  @hw: pointer to the HW structure
+ *
+ *  Waits for auto-negotiation to complete or for the auto-negotiation time
+ *  limit to expire, which ever happens first.
+ **/
+static s32 igb_wait_autoneg(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 i, phy_status;
+
+	/* Break after autoneg completes or PHY_AUTO_NEG_LIMIT expires. */
+	for (i = PHY_AUTO_NEG_LIMIT; i > 0; i--) {
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_AUTONEG_COMPLETE)
+			break;
+		msleep(100);
+	}
+
+	/* PHY_AUTO_NEG_TIME expiration doesn't guarantee auto-negotiation
+	 * has completed.
+	 */
+	return ret_val;
+}
+
+/**
+ *  igb_phy_has_link - Polls PHY for link
+ *  @hw: pointer to the HW structure
+ *  @iterations: number of times to poll for link
+ *  @usec_interval: delay between polling attempts
+ *  @success: pointer to whether polling was successful or not
+ *
+ *  Polls the PHY status register for link, 'iterations' number of times.
+ **/
+s32 igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+		     u32 usec_interval, bool *success)
+{
+	s32 ret_val = 0;
+	u16 i, phy_status;
+
+	for (i = 0; i < iterations; i++) {
+		/* Some PHYs require the PHY_STATUS register to be read
+		 * twice due to the link bit being sticky.  No harm doing
+		 * it across the board.
+		 */
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val && usec_interval > 0) {
+			/* If the first read fails, another entity may have
+			 * ownership of the resources, wait and try again to
+			 * see if they have relinquished the resources yet.
+			 */
+			if (usec_interval >= 1000)
+				mdelay(usec_interval/1000);
+			else
+				udelay(usec_interval);
+		}
+		ret_val = hw->phy.ops.read_reg(hw, PHY_STATUS, &phy_status);
+		if (ret_val)
+			break;
+		if (phy_status & MII_SR_LINK_STATUS)
+			break;
+		if (usec_interval >= 1000)
+			mdelay(usec_interval/1000);
+		else
+			udelay(usec_interval);
+	}
+
+	*success = (i < iterations) ? true : false;
+
+	return ret_val;
+}
+
+/**
+ *  igb_get_cable_length_m88 - Determine cable length for m88 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Reads the PHY specific status register to retrieve the cable length
+ *  information.  The cable length is determined by averaging the minimum and
+ *  maximum values to get the "average" cable length.  The m88 PHY has four
+ *  possible cable length values, which are:
+ *	Register Value		Cable Length
+ *	0			< 50 meters
+ *	1			50 - 80 meters
+ *	2			80 - 110 meters
+ *	3			110 - 140 meters
+ *	4			> 140 meters
+ **/
+s32 igb_get_cable_length_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, index;
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+		M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+	if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+	phy->min_cable_length = e1000_m88_cable_length_table[index];
+	phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+s32 igb_get_cable_length_m88_gen2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, phy_data2, index, default_page, is_cm;
+
+	switch (hw->phy.id) {
+	case I210_I_PHY_ID:
+		/* Get cable length from PHY Cable Diagnostics Control Reg */
+		ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+					    (I347AT4_PCDL + phy->addr),
+					    &phy_data);
+		if (ret_val)
+			return ret_val;
+
+		/* Check if the unit of cable length is meters or cm */
+		ret_val = phy->ops.read_reg(hw, (0x7 << GS40G_PAGE_SHIFT) +
+					    I347AT4_PCDC, &phy_data2);
+		if (ret_val)
+			return ret_val;
+
+		is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+		/* Populate the phy structure with cable length in meters */
+		phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->cable_length = phy_data / (is_cm ? 100 : 1);
+		break;
+	case M88E1543_E_PHY_ID:
+	case I347AT4_E_PHY_ID:
+		/* Remember the original page select and set it to 7 */
+		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+					    &default_page);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x07);
+		if (ret_val)
+			goto out;
+
+		/* Get cable length from PHY Cable Diagnostics Control Reg */
+		ret_val = phy->ops.read_reg(hw, (I347AT4_PCDL + phy->addr),
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		/* Check if the unit of cable length is meters or cm */
+		ret_val = phy->ops.read_reg(hw, I347AT4_PCDC, &phy_data2);
+		if (ret_val)
+			goto out;
+
+		is_cm = !(phy_data2 & I347AT4_PCDC_CABLE_LENGTH_UNIT);
+
+		/* Populate the phy structure with cable length in meters */
+		phy->min_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->max_cable_length = phy_data / (is_cm ? 100 : 1);
+		phy->cable_length = phy_data / (is_cm ? 100 : 1);
+
+		/* Reset the page selec to its original value */
+		ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+					     default_page);
+		if (ret_val)
+			goto out;
+		break;
+	case M88E1112_E_PHY_ID:
+		/* Remember the original page select and set it to 5 */
+		ret_val = phy->ops.read_reg(hw, I347AT4_PAGE_SELECT,
+					    &default_page);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT, 0x05);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, M88E1112_VCT_DSP_DISTANCE,
+					    &phy_data);
+		if (ret_val)
+			goto out;
+
+		index = (phy_data & M88E1000_PSSR_CABLE_LENGTH) >>
+			M88E1000_PSSR_CABLE_LENGTH_SHIFT;
+		if (index >= M88E1000_CABLE_LENGTH_TABLE_SIZE - 1) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		phy->min_cable_length = e1000_m88_cable_length_table[index];
+		phy->max_cable_length = e1000_m88_cable_length_table[index + 1];
+
+		phy->cable_length = (phy->min_cable_length +
+				     phy->max_cable_length) / 2;
+
+		/* Reset the page select to its original value */
+		ret_val = phy->ops.write_reg(hw, I347AT4_PAGE_SELECT,
+					     default_page);
+		if (ret_val)
+			goto out;
+
+		break;
+	default:
+		ret_val = -E1000_ERR_PHY;
+		goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_cable_length_igp_2 - Determine cable length for igp2 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  The automatic gain control (agc) normalizes the amplitude of the
+ *  received signal, adjusting for the attenuation produced by the
+ *  cable.  By reading the AGC registers, which represent the
+ *  combination of coarse and fine gain value, the value can be put
+ *  into a lookup table to obtain the approximate cable length
+ *  for each channel.
+ **/
+s32 igb_get_cable_length_igp_2(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val = 0;
+	u16 phy_data, i, agc_value = 0;
+	u16 cur_agc_index, max_agc_index = 0;
+	u16 min_agc_index = IGP02E1000_CABLE_LENGTH_TABLE_SIZE - 1;
+	static const u16 agc_reg_array[IGP02E1000_PHY_CHANNEL_NUM] = {
+		IGP02E1000_PHY_AGC_A,
+		IGP02E1000_PHY_AGC_B,
+		IGP02E1000_PHY_AGC_C,
+		IGP02E1000_PHY_AGC_D
+	};
+
+	/* Read the AGC registers for all channels */
+	for (i = 0; i < IGP02E1000_PHY_CHANNEL_NUM; i++) {
+		ret_val = phy->ops.read_reg(hw, agc_reg_array[i], &phy_data);
+		if (ret_val)
+			goto out;
+
+		/* Getting bits 15:9, which represent the combination of
+		 * coarse and fine gain values.  The result is a number
+		 * that can be put into the lookup table to obtain the
+		 * approximate cable length.
+		 */
+		cur_agc_index = (phy_data >> IGP02E1000_AGC_LENGTH_SHIFT) &
+				IGP02E1000_AGC_LENGTH_MASK;
+
+		/* Array index bound check. */
+		if ((cur_agc_index >= IGP02E1000_CABLE_LENGTH_TABLE_SIZE) ||
+		    (cur_agc_index == 0)) {
+			ret_val = -E1000_ERR_PHY;
+			goto out;
+		}
+
+		/* Remove min & max AGC values from calculation. */
+		if (e1000_igp_2_cable_length_table[min_agc_index] >
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			min_agc_index = cur_agc_index;
+		if (e1000_igp_2_cable_length_table[max_agc_index] <
+		    e1000_igp_2_cable_length_table[cur_agc_index])
+			max_agc_index = cur_agc_index;
+
+		agc_value += e1000_igp_2_cable_length_table[cur_agc_index];
+	}
+
+	agc_value -= (e1000_igp_2_cable_length_table[min_agc_index] +
+		      e1000_igp_2_cable_length_table[max_agc_index]);
+	agc_value /= (IGP02E1000_PHY_CHANNEL_NUM - 2);
+
+	/* Calculate cable length with the error range of +/- 10 meters. */
+	phy->min_cable_length = ((agc_value - IGP02E1000_AGC_RANGE) > 0) ?
+				 (agc_value - IGP02E1000_AGC_RANGE) : 0;
+	phy->max_cable_length = agc_value + IGP02E1000_AGC_RANGE;
+
+	phy->cable_length = (phy->min_cable_length + phy->max_cable_length) / 2;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_phy_info_m88 - Retrieve PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Valid for only copper links.  Read the PHY status register (sticky read)
+ *  to verify that link is up.  Read the PHY special control register to
+ *  determine the polarity and 10base-T extended distance.  Read the PHY
+ *  special status register to determine MDI/MDIx and current speed.  If
+ *  speed is 1000, then determine cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_m88(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u16 phy_data;
+	bool link;
+
+	if (phy->media_type != e1000_media_type_copper) {
+		hw_dbg("Phy info is only valid for copper media\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		hw_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_CTRL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->polarity_correction = (phy_data & M88E1000_PSCR_POLARITY_REVERSAL)
+				   ? true : false;
+
+	ret_val = igb_check_polarity_m88(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, M88E1000_PHY_SPEC_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (phy_data & M88E1000_PSSR_MDIX) ? true : false;
+
+	if ((phy_data & M88E1000_PSSR_SPEED) == M88E1000_PSSR_1000MBS) {
+		ret_val = phy->ops.get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &phy_data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (phy_data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (phy_data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		/* Set values to "undefined" */
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_phy_info_igp - Retrieve igp PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_igp(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		hw_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = true;
+
+	ret_val = igb_check_polarity_igp(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, IGP01E1000_PHY_PORT_STATUS, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & IGP01E1000_PSSR_MDIX) ? true : false;
+
+	if ((data & IGP01E1000_PSSR_SPEED_MASK) ==
+	    IGP01E1000_PSSR_SPEED_1000MBPS) {
+		ret_val = phy->ops.get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_sw_reset - PHY software reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Does a software reset of the PHY by reading the PHY control register and
+ *  setting/write the control register reset bit to the PHY.
+ **/
+s32 igb_phy_sw_reset(struct e1000_hw *hw)
+{
+	s32 ret_val = 0;
+	u16 phy_ctrl;
+
+	if (!(hw->phy.ops.read_reg))
+		goto out;
+
+	ret_val = hw->phy.ops.read_reg(hw, PHY_CONTROL, &phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	phy_ctrl |= MII_CR_RESET;
+	ret_val = hw->phy.ops.write_reg(hw, PHY_CONTROL, phy_ctrl);
+	if (ret_val)
+		goto out;
+
+	udelay(1);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_hw_reset - PHY hardware reset
+ *  @hw: pointer to the HW structure
+ *
+ *  Verify the reset block is not blocking us from resetting.  Acquire
+ *  semaphore (if necessary) and read/set/write the device control reset
+ *  bit in the PHY.  Wait the appropriate delay time for the device to
+ *  reset and release the semaphore (if necessary).
+ **/
+s32 igb_phy_hw_reset(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32  ret_val;
+	u32 ctrl;
+
+	ret_val = igb_check_reset_block(hw);
+	if (ret_val) {
+		ret_val = 0;
+		goto out;
+	}
+
+	ret_val = phy->ops.acquire(hw);
+	if (ret_val)
+		goto out;
+
+	ctrl = rd32(E1000_CTRL);
+	wr32(E1000_CTRL, ctrl | E1000_CTRL_PHY_RST);
+	wrfl();
+
+	udelay(phy->reset_delay_us);
+
+	wr32(E1000_CTRL, ctrl);
+	wrfl();
+
+	udelay(150);
+
+	phy->ops.release(hw);
+
+	ret_val = phy->ops.get_cfg_done(hw);
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_phy_init_script_igp3 - Inits the IGP3 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Initializes a Intel Gigabit PHY3 when an EEPROM is not present.
+ **/
+s32 igb_phy_init_script_igp3(struct e1000_hw *hw)
+{
+	hw_dbg("Running IGP 3 PHY init script\n");
+
+	/* PHY init IGP 3 */
+	/* Enable rise/fall, 10-mode work in class-A */
+	hw->phy.ops.write_reg(hw, 0x2F5B, 0x9018);
+	/* Remove all caps from Replica path filter */
+	hw->phy.ops.write_reg(hw, 0x2F52, 0x0000);
+	/* Bias trimming for ADC, AFE and Driver (Default) */
+	hw->phy.ops.write_reg(hw, 0x2FB1, 0x8B24);
+	/* Increase Hybrid poly bias */
+	hw->phy.ops.write_reg(hw, 0x2FB2, 0xF8F0);
+	/* Add 4% to TX amplitude in Giga mode */
+	hw->phy.ops.write_reg(hw, 0x2010, 0x10B0);
+	/* Disable trimming (TTT) */
+	hw->phy.ops.write_reg(hw, 0x2011, 0x0000);
+	/* Poly DC correction to 94.6% + 2% for all channels */
+	hw->phy.ops.write_reg(hw, 0x20DD, 0x249A);
+	/* ABS DC correction to 95.9% */
+	hw->phy.ops.write_reg(hw, 0x20DE, 0x00D3);
+	/* BG temp curve trim */
+	hw->phy.ops.write_reg(hw, 0x28B4, 0x04CE);
+	/* Increasing ADC OPAMP stage 1 currents to max */
+	hw->phy.ops.write_reg(hw, 0x2F70, 0x29E4);
+	/* Force 1000 ( required for enabling PHY regs configuration) */
+	hw->phy.ops.write_reg(hw, 0x0000, 0x0140);
+	/* Set upd_freq to 6 */
+	hw->phy.ops.write_reg(hw, 0x1F30, 0x1606);
+	/* Disable NPDFE */
+	hw->phy.ops.write_reg(hw, 0x1F31, 0xB814);
+	/* Disable adaptive fixed FFE (Default) */
+	hw->phy.ops.write_reg(hw, 0x1F35, 0x002A);
+	/* Enable FFE hysteresis */
+	hw->phy.ops.write_reg(hw, 0x1F3E, 0x0067);
+	/* Fixed FFE for short cable lengths */
+	hw->phy.ops.write_reg(hw, 0x1F54, 0x0065);
+	/* Fixed FFE for medium cable lengths */
+	hw->phy.ops.write_reg(hw, 0x1F55, 0x002A);
+	/* Fixed FFE for long cable lengths */
+	hw->phy.ops.write_reg(hw, 0x1F56, 0x002A);
+	/* Enable Adaptive Clip Threshold */
+	hw->phy.ops.write_reg(hw, 0x1F72, 0x3FB0);
+	/* AHT reset limit to 1 */
+	hw->phy.ops.write_reg(hw, 0x1F76, 0xC0FF);
+	/* Set AHT master delay to 127 msec */
+	hw->phy.ops.write_reg(hw, 0x1F77, 0x1DEC);
+	/* Set scan bits for AHT */
+	hw->phy.ops.write_reg(hw, 0x1F78, 0xF9EF);
+	/* Set AHT Preset bits */
+	hw->phy.ops.write_reg(hw, 0x1F79, 0x0210);
+	/* Change integ_factor of channel A to 3 */
+	hw->phy.ops.write_reg(hw, 0x1895, 0x0003);
+	/* Change prop_factor of channels BCD to 8 */
+	hw->phy.ops.write_reg(hw, 0x1796, 0x0008);
+	/* Change cg_icount + enable integbp for channels BCD */
+	hw->phy.ops.write_reg(hw, 0x1798, 0xD008);
+	/* Change cg_icount + enable integbp + change prop_factor_master
+	 * to 8 for channel A
+	 */
+	hw->phy.ops.write_reg(hw, 0x1898, 0xD918);
+	/* Disable AHT in Slave mode on channel A */
+	hw->phy.ops.write_reg(hw, 0x187A, 0x0800);
+	/* Enable LPLU and disable AN to 1000 in non-D0a states,
+	 * Enable SPD+B2B
+	 */
+	hw->phy.ops.write_reg(hw, 0x0019, 0x008D);
+	/* Enable restart AN on an1000_dis change */
+	hw->phy.ops.write_reg(hw, 0x001B, 0x2080);
+	/* Enable wh_fifo read clock in 10/100 modes */
+	hw->phy.ops.write_reg(hw, 0x0014, 0x0045);
+	/* Restart AN, Speed selection is 1000 */
+	hw->phy.ops.write_reg(hw, 0x0000, 0x1340);
+
+	return 0;
+}
+
+/**
+ * igb_power_up_phy_copper - Restore copper link in case of PHY power down
+ * @hw: pointer to the HW structure
+ *
+ * In the case of a PHY power down to save power, or to turn off link during a
+ * driver unload, restore the link to previous settings.
+ **/
+void igb_power_up_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+	mii_reg &= ~MII_CR_POWER_DOWN;
+	hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+}
+
+/**
+ * igb_power_down_phy_copper - Power down copper PHY
+ * @hw: pointer to the HW structure
+ *
+ * Power down PHY to save power when interface is down and wake on lan
+ * is not enabled.
+ **/
+void igb_power_down_phy_copper(struct e1000_hw *hw)
+{
+	u16 mii_reg = 0;
+
+	/* The PHY will retain its settings across a power down/up cycle */
+	hw->phy.ops.read_reg(hw, PHY_CONTROL, &mii_reg);
+	mii_reg |= MII_CR_POWER_DOWN;
+	hw->phy.ops.write_reg(hw, PHY_CONTROL, mii_reg);
+	usleep_range(1000, 2000);
+}
+
+/**
+ *  igb_check_polarity_82580 - Checks the polarity.
+ *  @hw: pointer to the HW structure
+ *
+ *  Success returns 0, Failure returns -E1000_ERR_PHY (-2)
+ *
+ *  Polarity is determined based on the PHY specific status register.
+ **/
+static s32 igb_check_polarity_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+
+
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+
+	if (!ret_val)
+		phy->cable_polarity = (data & I82580_PHY_STATUS2_REV_POLARITY)
+				      ? e1000_rev_polarity_reversed
+				      : e1000_rev_polarity_normal;
+
+	return ret_val;
+}
+
+/**
+ *  igb_phy_force_speed_duplex_82580 - Force speed/duplex for I82580 PHY
+ *  @hw: pointer to the HW structure
+ *
+ *  Calls the PHY setup function to force speed and duplex.  Clears the
+ *  auto-crossover to force MDI manually.  Waits for link and returns
+ *  successful if link up is successful, else -E1000_ERR_PHY (-2).
+ **/
+s32 igb_phy_force_speed_duplex_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data;
+	bool link;
+
+	ret_val = phy->ops.read_reg(hw, PHY_CONTROL, &phy_data);
+	if (ret_val)
+		goto out;
+
+	igb_phy_force_speed_duplex_setup(hw, &phy_data);
+
+	ret_val = phy->ops.write_reg(hw, PHY_CONTROL, phy_data);
+	if (ret_val)
+		goto out;
+
+	/* Clear Auto-Crossover to force MDI manually.  82580 requires MDI
+	 * forced whenever speed and duplex are forced.
+	 */
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_CTRL_2, &phy_data);
+	if (ret_val)
+		goto out;
+
+	phy_data &= ~I82580_PHY_CTRL2_MDIX_CFG_MASK;
+
+	ret_val = phy->ops.write_reg(hw, I82580_PHY_CTRL_2, phy_data);
+	if (ret_val)
+		goto out;
+
+	hw_dbg("I82580_PHY_CTRL_2: %X\n", phy_data);
+
+	udelay(1);
+
+	if (phy->autoneg_wait_to_complete) {
+		hw_dbg("Waiting for forced speed/duplex link on 82580 phy\n");
+
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+		if (ret_val)
+			goto out;
+
+		if (!link)
+			hw_dbg("Link taking longer than expected.\n");
+
+		/* Try once more */
+		ret_val = igb_phy_has_link(hw, PHY_FORCE_LIMIT, 100000, &link);
+		if (ret_val)
+			goto out;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_phy_info_82580 - Retrieve I82580 PHY information
+ *  @hw: pointer to the HW structure
+ *
+ *  Read PHY status to determine if link is up.  If link is up, then
+ *  set/determine 10base-T extended distance and polarity correction.  Read
+ *  PHY port status to determine MDI/MDIx and speed.  Based on the speed,
+ *  determine on the cable length, local and remote receiver.
+ **/
+s32 igb_get_phy_info_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 data;
+	bool link;
+
+	ret_val = igb_phy_has_link(hw, 1, 0, &link);
+	if (ret_val)
+		goto out;
+
+	if (!link) {
+		hw_dbg("Phy info is only valid if link is up\n");
+		ret_val = -E1000_ERR_CONFIG;
+		goto out;
+	}
+
+	phy->polarity_correction = true;
+
+	ret_val = igb_check_polarity_82580(hw);
+	if (ret_val)
+		goto out;
+
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_STATUS_2, &data);
+	if (ret_val)
+		goto out;
+
+	phy->is_mdix = (data & I82580_PHY_STATUS2_MDIX) ? true : false;
+
+	if ((data & I82580_PHY_STATUS2_SPEED_MASK) ==
+	    I82580_PHY_STATUS2_SPEED_1000MBPS) {
+		ret_val = hw->phy.ops.get_cable_length(hw);
+		if (ret_val)
+			goto out;
+
+		ret_val = phy->ops.read_reg(hw, PHY_1000T_STATUS, &data);
+		if (ret_val)
+			goto out;
+
+		phy->local_rx = (data & SR_1000T_LOCAL_RX_STATUS)
+				? e1000_1000t_rx_status_ok
+				: e1000_1000t_rx_status_not_ok;
+
+		phy->remote_rx = (data & SR_1000T_REMOTE_RX_STATUS)
+				 ? e1000_1000t_rx_status_ok
+				 : e1000_1000t_rx_status_not_ok;
+	} else {
+		phy->cable_length = E1000_CABLE_LENGTH_UNDEFINED;
+		phy->local_rx = e1000_1000t_rx_status_undefined;
+		phy->remote_rx = e1000_1000t_rx_status_undefined;
+	}
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_get_cable_length_82580 - Determine cable length for 82580 PHY
+ *  @hw: pointer to the HW structure
+ *
+ * Reads the diagnostic status register and verifies result is valid before
+ * placing it in the phy_cable_length field.
+ **/
+s32 igb_get_cable_length_82580(struct e1000_hw *hw)
+{
+	struct e1000_phy_info *phy = &hw->phy;
+	s32 ret_val;
+	u16 phy_data, length;
+
+	ret_val = phy->ops.read_reg(hw, I82580_PHY_DIAG_STATUS, &phy_data);
+	if (ret_val)
+		goto out;
+
+	length = (phy_data & I82580_DSTATUS_CABLE_LENGTH) >>
+		 I82580_DSTATUS_CABLE_LENGTH_SHIFT;
+
+	if (length == E1000_CABLE_LENGTH_UNDEFINED)
+		ret_val = -E1000_ERR_PHY;
+
+	phy->cable_length = length;
+
+out:
+	return ret_val;
+}
+
+/**
+ *  igb_write_phy_reg_gs40g - Write GS40G PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: lower half is register offset to write to
+ *     upper half is page to use.
+ *  @data: data to write at register offset
+ *
+ *  Acquires semaphore, if necessary, then writes the data to PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	s32 ret_val;
+	u16 page = offset >> GS40G_PAGE_SHIFT;
+
+	offset = offset & GS40G_OFFSET_MASK;
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+	if (ret_val)
+		goto release;
+	ret_val = igb_write_phy_reg_mdic(hw, offset, data);
+
+release:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  igb_read_phy_reg_gs40g - Read GS40G  PHY register
+ *  @hw: pointer to the HW structure
+ *  @offset: lower half is register offset to read to
+ *     upper half is page to use.
+ *  @data: data to read at register offset
+ *
+ *  Acquires semaphore, if necessary, then reads the data in the PHY register
+ *  at the offset.  Release any acquired semaphores before exiting.
+ **/
+s32 igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	s32 ret_val;
+	u16 page = offset >> GS40G_PAGE_SHIFT;
+
+	offset = offset & GS40G_OFFSET_MASK;
+	ret_val = hw->phy.ops.acquire(hw);
+	if (ret_val)
+		return ret_val;
+
+	ret_val = igb_write_phy_reg_mdic(hw, GS40G_PAGE_SELECT, page);
+	if (ret_val)
+		goto release;
+	ret_val = igb_read_phy_reg_mdic(hw, offset, data);
+
+release:
+	hw->phy.ops.release(hw);
+	return ret_val;
+}
+
+/**
+ *  igb_set_master_slave_mode - Setup PHY for Master/slave mode
+ *  @hw: pointer to the HW structure
+ *
+ *  Sets up Master/slave mode
+ **/
+static s32 igb_set_master_slave_mode(struct e1000_hw *hw)
+{
+	s32 ret_val;
+	u16 phy_data;
+
+	/* Resolve Master/Slave mode */
+	ret_val = hw->phy.ops.read_reg(hw, PHY_1000T_CTRL, &phy_data);
+	if (ret_val)
+		return ret_val;
+
+	/* load defaults for future use */
+	hw->phy.original_ms_type = (phy_data & CR_1000T_MS_ENABLE) ?
+				   ((phy_data & CR_1000T_MS_VALUE) ?
+				    e1000_ms_force_master :
+				    e1000_ms_force_slave) : e1000_ms_auto;
+
+	switch (hw->phy.ms_type) {
+	case e1000_ms_force_master:
+		phy_data |= (CR_1000T_MS_ENABLE | CR_1000T_MS_VALUE);
+		break;
+	case e1000_ms_force_slave:
+		phy_data |= CR_1000T_MS_ENABLE;
+		phy_data &= ~(CR_1000T_MS_VALUE);
+		break;
+	case e1000_ms_auto:
+		phy_data &= ~CR_1000T_MS_ENABLE;
+		fallthrough;
+	default:
+		break;
+	}
+
+	return hw->phy.ops.write_reg(hw, PHY_1000T_CTRL, phy_data);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h
new file mode 100644
index 0000000..6e7ac2f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_phy.h
@@ -0,0 +1,175 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_PHY_H_
+#define _E1000_PHY_H_
+
+enum e1000_ms_type {
+	e1000_ms_hw_default = 0,
+	e1000_ms_force_master,
+	e1000_ms_force_slave,
+	e1000_ms_auto
+};
+
+enum e1000_smart_speed {
+	e1000_smart_speed_default = 0,
+	e1000_smart_speed_on,
+	e1000_smart_speed_off
+};
+
+s32  igb_check_downshift(struct e1000_hw *hw);
+s32  igb_check_reset_block(struct e1000_hw *hw);
+s32  igb_copper_link_setup_igp(struct e1000_hw *hw);
+s32  igb_copper_link_setup_m88(struct e1000_hw *hw);
+s32  igb_copper_link_setup_m88_gen2(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex_igp(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex_m88(struct e1000_hw *hw);
+s32  igb_get_cable_length_m88(struct e1000_hw *hw);
+s32  igb_get_cable_length_m88_gen2(struct e1000_hw *hw);
+s32  igb_get_cable_length_igp_2(struct e1000_hw *hw);
+s32  igb_get_phy_id(struct e1000_hw *hw);
+s32  igb_get_phy_info_igp(struct e1000_hw *hw);
+s32  igb_get_phy_info_m88(struct e1000_hw *hw);
+s32  igb_phy_sw_reset(struct e1000_hw *hw);
+s32  igb_phy_hw_reset(struct e1000_hw *hw);
+s32  igb_read_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_set_d3_lplu_state(struct e1000_hw *hw, bool active);
+s32  igb_setup_copper_link(struct e1000_hw *hw);
+s32  igb_write_phy_reg_igp(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_phy_has_link(struct e1000_hw *hw, u32 iterations,
+				u32 usec_interval, bool *success);
+void igb_power_up_phy_copper(struct e1000_hw *hw);
+void igb_power_down_phy_copper(struct e1000_hw *hw);
+s32  igb_phy_init_script_igp3(struct e1000_hw *hw);
+s32  igb_read_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_write_phy_reg_mdic(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_read_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_write_phy_reg_i2c(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_read_sfp_data_byte(struct e1000_hw *hw, u16 offset, u8 *data);
+s32  igb_copper_link_setup_82580(struct e1000_hw *hw);
+s32  igb_get_phy_info_82580(struct e1000_hw *hw);
+s32  igb_phy_force_speed_duplex_82580(struct e1000_hw *hw);
+s32  igb_get_cable_length_82580(struct e1000_hw *hw);
+s32  igb_read_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 *data);
+s32  igb_write_phy_reg_gs40g(struct e1000_hw *hw, u32 offset, u16 data);
+s32  igb_check_polarity_m88(struct e1000_hw *hw);
+
+/* IGP01E1000 Specific Registers */
+#define IGP01E1000_PHY_PORT_CONFIG        0x10 /* Port Config */
+#define IGP01E1000_PHY_PORT_STATUS        0x11 /* Status */
+#define IGP01E1000_PHY_PORT_CTRL          0x12 /* Control */
+#define IGP01E1000_PHY_LINK_HEALTH        0x13 /* PHY Link Health */
+#define IGP02E1000_PHY_POWER_MGMT         0x19 /* Power Management */
+#define IGP01E1000_PHY_PAGE_SELECT        0x1F /* Page Select */
+#define IGP01E1000_PHY_PCS_INIT_REG       0x00B4
+#define IGP01E1000_PHY_POLARITY_MASK      0x0078
+#define IGP01E1000_PSCR_AUTO_MDIX         0x1000
+#define IGP01E1000_PSCR_FORCE_MDI_MDIX    0x2000 /* 0=MDI, 1=MDIX */
+#define IGP01E1000_PSCFR_SMART_SPEED      0x0080
+
+#define I82580_ADDR_REG                   16
+#define I82580_CFG_REG                    22
+#define I82580_CFG_ASSERT_CRS_ON_TX       (1 << 15)
+#define I82580_CFG_ENABLE_DOWNSHIFT       (3 << 10) /* auto downshift 100/10 */
+#define I82580_CTRL_REG                   23
+#define I82580_CTRL_DOWNSHIFT_MASK        (7 << 10)
+
+/* 82580 specific PHY registers */
+#define I82580_PHY_CTRL_2            18
+#define I82580_PHY_LBK_CTRL          19
+#define I82580_PHY_STATUS_2          26
+#define I82580_PHY_DIAG_STATUS       31
+
+/* I82580 PHY Status 2 */
+#define I82580_PHY_STATUS2_REV_POLARITY   0x0400
+#define I82580_PHY_STATUS2_MDIX           0x0800
+#define I82580_PHY_STATUS2_SPEED_MASK     0x0300
+#define I82580_PHY_STATUS2_SPEED_1000MBPS 0x0200
+#define I82580_PHY_STATUS2_SPEED_100MBPS  0x0100
+
+/* I82580 PHY Control 2 */
+#define I82580_PHY_CTRL2_MANUAL_MDIX      0x0200
+#define I82580_PHY_CTRL2_AUTO_MDI_MDIX    0x0400
+#define I82580_PHY_CTRL2_MDIX_CFG_MASK    0x0600
+
+/* I82580 PHY Diagnostics Status */
+#define I82580_DSTATUS_CABLE_LENGTH       0x03FC
+#define I82580_DSTATUS_CABLE_LENGTH_SHIFT 2
+
+/* 82580 PHY Power Management */
+#define E1000_82580_PHY_POWER_MGMT	0xE14
+#define E1000_82580_PM_SPD		0x0001 /* Smart Power Down */
+#define E1000_82580_PM_D0_LPLU		0x0002 /* For D0a states */
+#define E1000_82580_PM_D3_LPLU		0x0004 /* For all other states */
+#define E1000_82580_PM_GO_LINKD		0x0020 /* Go Link Disconnect */
+
+/* Enable flexible speed on link-up */
+#define IGP02E1000_PM_D0_LPLU             0x0002 /* For D0a states */
+#define IGP02E1000_PM_D3_LPLU             0x0004 /* For all other states */
+#define IGP01E1000_PLHR_SS_DOWNGRADE      0x8000
+#define IGP01E1000_PSSR_POLARITY_REVERSED 0x0002
+#define IGP01E1000_PSSR_MDIX              0x0800
+#define IGP01E1000_PSSR_SPEED_MASK        0xC000
+#define IGP01E1000_PSSR_SPEED_1000MBPS    0xC000
+#define IGP02E1000_PHY_CHANNEL_NUM        4
+#define IGP02E1000_PHY_AGC_A              0x11B1
+#define IGP02E1000_PHY_AGC_B              0x12B1
+#define IGP02E1000_PHY_AGC_C              0x14B1
+#define IGP02E1000_PHY_AGC_D              0x18B1
+#define IGP02E1000_AGC_LENGTH_SHIFT       9   /* Course - 15:13, Fine - 12:9 */
+#define IGP02E1000_AGC_LENGTH_MASK        0x7F
+#define IGP02E1000_AGC_RANGE              15
+
+#define E1000_CABLE_LENGTH_UNDEFINED      0xFF
+
+/* GS40G - I210 PHY defines */
+#define GS40G_PAGE_SELECT		0x16
+#define GS40G_PAGE_SHIFT		16
+#define GS40G_OFFSET_MASK		0xFFFF
+#define GS40G_PAGE_2			0x20000
+#define GS40G_MAC_REG2			0x15
+#define GS40G_MAC_LB			0x4140
+#define GS40G_MAC_SPEED_1G		0X0006
+#define GS40G_COPPER_SPEC		0x0010
+#define GS40G_LINE_LB			0x4000
+
+/* SFP modules ID memory locations */
+#define E1000_SFF_IDENTIFIER_OFFSET	0x00
+#define E1000_SFF_IDENTIFIER_SFF	0x02
+#define E1000_SFF_IDENTIFIER_SFP	0x03
+
+#define E1000_SFF_ETH_FLAGS_OFFSET	0x06
+/* Flags for SFP modules compatible with ETH up to 1Gb */
+struct e1000_sfp_flags {
+	u8 e1000_base_sx:1;
+	u8 e1000_base_lx:1;
+	u8 e1000_base_cx:1;
+	u8 e1000_base_t:1;
+	u8 e100_base_lx:1;
+	u8 e100_base_fx:1;
+	u8 e10_base_bx10:1;
+	u8 e10_base_px:1;
+};
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h
new file mode 100644
index 0000000..0d1d140
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/e1000_regs.h
@@ -0,0 +1,427 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#ifndef _E1000_REGS_H_
+#define _E1000_REGS_H_
+
+#define E1000_CTRL     0x00000  /* Device Control - RW */
+#define E1000_STATUS   0x00008  /* Device Status - RO */
+#define E1000_EECD     0x00010  /* EEPROM/Flash Control - RW */
+#define E1000_EERD     0x00014  /* EEPROM Read - RW */
+#define E1000_CTRL_EXT 0x00018  /* Extended Device Control - RW */
+#define E1000_MDIC     0x00020  /* MDI Control - RW */
+#define E1000_MDICNFG  0x00E04  /* MDI Config - RW */
+#define E1000_SCTL     0x00024  /* SerDes Control - RW */
+#define E1000_FCAL     0x00028  /* Flow Control Address Low - RW */
+#define E1000_FCAH     0x0002C  /* Flow Control Address High -RW */
+#define E1000_FCT      0x00030  /* Flow Control Type - RW */
+#define E1000_CONNSW   0x00034  /* Copper/Fiber switch control - RW */
+#define E1000_VET      0x00038  /* VLAN Ether Type - RW */
+#define E1000_TSSDP    0x0003C  /* Time Sync SDP Configuration Register - RW */
+#define E1000_ICR      0x000C0  /* Interrupt Cause Read - R/clr */
+#define E1000_ITR      0x000C4  /* Interrupt Throttling Rate - RW */
+#define E1000_ICS      0x000C8  /* Interrupt Cause Set - WO */
+#define E1000_IMS      0x000D0  /* Interrupt Mask Set - RW */
+#define E1000_IMC      0x000D8  /* Interrupt Mask Clear - WO */
+#define E1000_IAM      0x000E0  /* Interrupt Acknowledge Auto Mask */
+#define E1000_RCTL     0x00100  /* RX Control - RW */
+#define E1000_FCTTV    0x00170  /* Flow Control Transmit Timer Value - RW */
+#define E1000_TXCW     0x00178  /* TX Configuration Word - RW */
+#define E1000_EICR     0x01580  /* Ext. Interrupt Cause Read - R/clr */
+#define E1000_EITR(_n) (0x01680 + (0x4 * (_n)))
+#define E1000_EICS     0x01520  /* Ext. Interrupt Cause Set - W0 */
+#define E1000_EIMS     0x01524  /* Ext. Interrupt Mask Set/Read - RW */
+#define E1000_EIMC     0x01528  /* Ext. Interrupt Mask Clear - WO */
+#define E1000_EIAC     0x0152C  /* Ext. Interrupt Auto Clear - RW */
+#define E1000_EIAM     0x01530  /* Ext. Interrupt Ack Auto Clear Mask - RW */
+#define E1000_GPIE     0x01514  /* General Purpose Interrupt Enable - RW */
+#define E1000_IVAR0    0x01700  /* Interrupt Vector Allocation (array) - RW */
+#define E1000_IVAR_MISC 0x01740 /* IVAR for "other" causes - RW */
+#define E1000_TCTL     0x00400  /* TX Control - RW */
+#define E1000_TCTL_EXT 0x00404  /* Extended TX Control - RW */
+#define E1000_TIPG     0x00410  /* TX Inter-packet gap -RW */
+#define E1000_AIT      0x00458  /* Adaptive Interframe Spacing Throttle - RW */
+#define E1000_LEDCTL   0x00E00  /* LED Control - RW */
+#define E1000_LEDMUX   0x08130  /* LED MUX Control */
+#define E1000_PBA      0x01000  /* Packet Buffer Allocation - RW */
+#define E1000_PBS      0x01008  /* Packet Buffer Size */
+#define E1000_EEMNGCTL 0x01010  /* MNG EEprom Control */
+#define E1000_EEARBC_I210 0x12024  /* EEPROM Auto Read Bus Control */
+#define E1000_EEWR     0x0102C  /* EEPROM Write Register - RW */
+#define E1000_I2CCMD   0x01028  /* SFPI2C Command Register - RW */
+#define E1000_FRTIMER  0x01048  /* Free Running Timer - RW */
+#define E1000_TCPTIMER 0x0104C  /* TCP Timer - RW */
+#define E1000_FCRTL    0x02160  /* Flow Control Receive Threshold Low - RW */
+#define E1000_FCRTH    0x02168  /* Flow Control Receive Threshold High - RW */
+#define E1000_FCRTV    0x02460  /* Flow Control Refresh Timer Value - RW */
+#define E1000_I2CPARAMS        0x0102C /* SFPI2C Parameters Register - RW */
+#define E1000_I2CBB_EN      0x00000100  /* I2C - Bit Bang Enable */
+#define E1000_I2C_CLK_OUT   0x00000200  /* I2C- Clock */
+#define E1000_I2C_DATA_OUT  0x00000400  /* I2C- Data Out */
+#define E1000_I2C_DATA_OE_N 0x00000800  /* I2C- Data Output Enable */
+#define E1000_I2C_DATA_IN   0x00001000  /* I2C- Data In */
+#define E1000_I2C_CLK_OE_N  0x00002000  /* I2C- Clock Output Enable */
+#define E1000_I2C_CLK_IN    0x00004000  /* I2C- Clock In */
+#define E1000_MPHY_ADDR_CTRL	0x0024 /* GbE MPHY Address Control */
+#define E1000_MPHY_DATA		0x0E10 /* GBE MPHY Data */
+#define E1000_MPHY_STAT		0x0E0C /* GBE MPHY Statistics */
+
+/* IEEE 1588 TIMESYNCH */
+#define E1000_TSYNCRXCTL 0x0B620 /* Rx Time Sync Control register - RW */
+#define E1000_TSYNCTXCTL 0x0B614 /* Tx Time Sync Control register - RW */
+#define E1000_TSYNCRXCFG 0x05F50 /* Time Sync Rx Configuration - RW */
+#define E1000_RXSTMPL    0x0B624 /* Rx timestamp Low - RO */
+#define E1000_RXSTMPH    0x0B628 /* Rx timestamp High - RO */
+#define E1000_RXSATRL    0x0B62C /* Rx timestamp attribute low - RO */
+#define E1000_RXSATRH    0x0B630 /* Rx timestamp attribute high - RO */
+#define E1000_TXSTMPL    0x0B618 /* Tx timestamp value Low - RO */
+#define E1000_TXSTMPH    0x0B61C /* Tx timestamp value High - RO */
+#define E1000_SYSTIML    0x0B600 /* System time register Low - RO */
+#define E1000_SYSTIMH    0x0B604 /* System time register High - RO */
+#define E1000_TIMINCA    0x0B608 /* Increment attributes register - RW */
+#define E1000_TSAUXC     0x0B640 /* Timesync Auxiliary Control register */
+#define E1000_TRGTTIML0  0x0B644 /* Target Time Register 0 Low  - RW */
+#define E1000_TRGTTIMH0  0x0B648 /* Target Time Register 0 High - RW */
+#define E1000_TRGTTIML1  0x0B64C /* Target Time Register 1 Low  - RW */
+#define E1000_TRGTTIMH1  0x0B650 /* Target Time Register 1 High - RW */
+#define E1000_AUXSTMPL0  0x0B65C /* Auxiliary Time Stamp 0 Register Low  - RO */
+#define E1000_AUXSTMPH0  0x0B660 /* Auxiliary Time Stamp 0 Register High - RO */
+#define E1000_AUXSTMPL1  0x0B664 /* Auxiliary Time Stamp 1 Register Low  - RO */
+#define E1000_AUXSTMPH1  0x0B668 /* Auxiliary Time Stamp 1 Register High - RO */
+#define E1000_SYSTIMR    0x0B6F8 /* System time register Residue */
+#define E1000_TSICR      0x0B66C /* Interrupt Cause Register */
+#define E1000_TSIM       0x0B674 /* Interrupt Mask Register */
+
+/* Filtering Registers */
+#define E1000_SAQF(_n) (0x5980 + 4 * (_n))
+#define E1000_DAQF(_n) (0x59A0 + 4 * (_n))
+#define E1000_SPQF(_n) (0x59C0 + 4 * (_n))
+#define E1000_FTQF(_n) (0x59E0 + 4 * (_n))
+#define E1000_SAQF0 E1000_SAQF(0)
+#define E1000_DAQF0 E1000_DAQF(0)
+#define E1000_SPQF0 E1000_SPQF(0)
+#define E1000_FTQF0 E1000_FTQF(0)
+#define E1000_SYNQF(_n) (0x055FC + (4 * (_n))) /* SYN Packet Queue Fltr */
+#define E1000_ETQF(_n)  (0x05CB0 + (4 * (_n))) /* EType Queue Fltr */
+
+#define E1000_RQDPC(_n) (0x0C030 + ((_n) * 0x40))
+
+/* DMA Coalescing registers */
+#define E1000_DMACR	0x02508 /* Control Register */
+#define E1000_DMCTXTH	0x03550 /* Transmit Threshold */
+#define E1000_DMCTLX	0x02514 /* Time to Lx Request */
+#define E1000_DMCRTRH	0x05DD0 /* Receive Packet Rate Threshold */
+#define E1000_DMCCNT	0x05DD4 /* Current Rx Count */
+#define E1000_FCRTC	0x02170 /* Flow Control Rx high watermark */
+#define E1000_PCIEMISC	0x05BB8 /* PCIE misc config register */
+
+/* TX Rate Limit Registers */
+#define E1000_RTTDQSEL	0x3604 /* Tx Desc Plane Queue Select - WO */
+#define E1000_RTTBCNRM	0x3690 /* Tx BCN Rate-scheduler MMW */
+#define E1000_RTTBCNRC	0x36B0 /* Tx BCN Rate-Scheduler Config - WO */
+
+/* Split and Replication RX Control - RW */
+#define E1000_RXPBS	0x02404 /* Rx Packet Buffer Size - RW */
+
+/* Thermal sensor configuration and status registers */
+#define E1000_THMJT	0x08100 /* Junction Temperature */
+#define E1000_THLOWTC	0x08104 /* Low Threshold Control */
+#define E1000_THMIDTC	0x08108 /* Mid Threshold Control */
+#define E1000_THHIGHTC	0x0810C /* High Threshold Control */
+#define E1000_THSTAT	0x08110 /* Thermal Sensor Status */
+
+/* Convenience macros
+ *
+ * Note: "_n" is the queue number of the register to be written to.
+ *
+ * Example usage:
+ * E1000_RDBAL_REG(current_rx_queue)
+ */
+#define E1000_RDBAL(_n)   ((_n) < 4 ? (0x02800 + ((_n) * 0x100)) \
+				    : (0x0C000 + ((_n) * 0x40)))
+#define E1000_RDBAH(_n)   ((_n) < 4 ? (0x02804 + ((_n) * 0x100)) \
+				    : (0x0C004 + ((_n) * 0x40)))
+#define E1000_RDLEN(_n)   ((_n) < 4 ? (0x02808 + ((_n) * 0x100)) \
+				    : (0x0C008 + ((_n) * 0x40)))
+#define E1000_SRRCTL(_n)  ((_n) < 4 ? (0x0280C + ((_n) * 0x100)) \
+				    : (0x0C00C + ((_n) * 0x40)))
+#define E1000_RDH(_n)     ((_n) < 4 ? (0x02810 + ((_n) * 0x100)) \
+				    : (0x0C010 + ((_n) * 0x40)))
+#define E1000_RDT(_n)     ((_n) < 4 ? (0x02818 + ((_n) * 0x100)) \
+				    : (0x0C018 + ((_n) * 0x40)))
+#define E1000_RXDCTL(_n)  ((_n) < 4 ? (0x02828 + ((_n) * 0x100)) \
+				    : (0x0C028 + ((_n) * 0x40)))
+#define E1000_TDBAL(_n)   ((_n) < 4 ? (0x03800 + ((_n) * 0x100)) \
+				    : (0x0E000 + ((_n) * 0x40)))
+#define E1000_TDBAH(_n)   ((_n) < 4 ? (0x03804 + ((_n) * 0x100)) \
+				    : (0x0E004 + ((_n) * 0x40)))
+#define E1000_TDLEN(_n)   ((_n) < 4 ? (0x03808 + ((_n) * 0x100)) \
+				    : (0x0E008 + ((_n) * 0x40)))
+#define E1000_TDH(_n)     ((_n) < 4 ? (0x03810 + ((_n) * 0x100)) \
+				    : (0x0E010 + ((_n) * 0x40)))
+#define E1000_TDT(_n)     ((_n) < 4 ? (0x03818 + ((_n) * 0x100)) \
+				    : (0x0E018 + ((_n) * 0x40)))
+#define E1000_TXDCTL(_n)  ((_n) < 4 ? (0x03828 + ((_n) * 0x100)) \
+				    : (0x0E028 + ((_n) * 0x40)))
+#define E1000_RXCTL(_n)	  ((_n) < 4 ? (0x02814 + ((_n) * 0x100)) : \
+				      (0x0C014 + ((_n) * 0x40)))
+#define E1000_DCA_RXCTRL(_n)	E1000_RXCTL(_n)
+#define E1000_TXCTL(_n)   ((_n) < 4 ? (0x03814 + ((_n) * 0x100)) : \
+				      (0x0E014 + ((_n) * 0x40)))
+#define E1000_DCA_TXCTRL(_n) E1000_TXCTL(_n)
+#define E1000_TDWBAL(_n)  ((_n) < 4 ? (0x03838 + ((_n) * 0x100)) \
+				    : (0x0E038 + ((_n) * 0x40)))
+#define E1000_TDWBAH(_n)  ((_n) < 4 ? (0x0383C + ((_n) * 0x100)) \
+				    : (0x0E03C + ((_n) * 0x40)))
+
+#define E1000_RXPBS	0x02404  /* Rx Packet Buffer Size - RW */
+#define E1000_TXPBS	0x03404  /* Tx Packet Buffer Size - RW */
+
+#define E1000_TDFH     0x03410  /* TX Data FIFO Head - RW */
+#define E1000_TDFT     0x03418  /* TX Data FIFO Tail - RW */
+#define E1000_TDFHS    0x03420  /* TX Data FIFO Head Saved - RW */
+#define E1000_TDFPC    0x03430  /* TX Data FIFO Packet Count - RW */
+#define E1000_DTXCTL   0x03590  /* DMA TX Control - RW */
+#define E1000_CRCERRS  0x04000  /* CRC Error Count - R/clr */
+#define E1000_ALGNERRC 0x04004  /* Alignment Error Count - R/clr */
+#define E1000_SYMERRS  0x04008  /* Symbol Error Count - R/clr */
+#define E1000_RXERRC   0x0400C  /* Receive Error Count - R/clr */
+#define E1000_MPC      0x04010  /* Missed Packet Count - R/clr */
+#define E1000_SCC      0x04014  /* Single Collision Count - R/clr */
+#define E1000_ECOL     0x04018  /* Excessive Collision Count - R/clr */
+#define E1000_MCC      0x0401C  /* Multiple Collision Count - R/clr */
+#define E1000_LATECOL  0x04020  /* Late Collision Count - R/clr */
+#define E1000_COLC     0x04028  /* Collision Count - R/clr */
+#define E1000_DC       0x04030  /* Defer Count - R/clr */
+#define E1000_TNCRS    0x04034  /* TX-No CRS - R/clr */
+#define E1000_SEC      0x04038  /* Sequence Error Count - R/clr */
+#define E1000_CEXTERR  0x0403C  /* Carrier Extension Error Count - R/clr */
+#define E1000_RLEC     0x04040  /* Receive Length Error Count - R/clr */
+#define E1000_XONRXC   0x04048  /* XON RX Count - R/clr */
+#define E1000_XONTXC   0x0404C  /* XON TX Count - R/clr */
+#define E1000_XOFFRXC  0x04050  /* XOFF RX Count - R/clr */
+#define E1000_XOFFTXC  0x04054  /* XOFF TX Count - R/clr */
+#define E1000_FCRUC    0x04058  /* Flow Control RX Unsupported Count- R/clr */
+#define E1000_PRC64    0x0405C  /* Packets RX (64 bytes) - R/clr */
+#define E1000_PRC127   0x04060  /* Packets RX (65-127 bytes) - R/clr */
+#define E1000_PRC255   0x04064  /* Packets RX (128-255 bytes) - R/clr */
+#define E1000_PRC511   0x04068  /* Packets RX (255-511 bytes) - R/clr */
+#define E1000_PRC1023  0x0406C  /* Packets RX (512-1023 bytes) - R/clr */
+#define E1000_PRC1522  0x04070  /* Packets RX (1024-1522 bytes) - R/clr */
+#define E1000_GPRC     0x04074  /* Good Packets RX Count - R/clr */
+#define E1000_BPRC     0x04078  /* Broadcast Packets RX Count - R/clr */
+#define E1000_MPRC     0x0407C  /* Multicast Packets RX Count - R/clr */
+#define E1000_GPTC     0x04080  /* Good Packets TX Count - R/clr */
+#define E1000_GORCL    0x04088  /* Good Octets RX Count Low - R/clr */
+#define E1000_GORCH    0x0408C  /* Good Octets RX Count High - R/clr */
+#define E1000_GOTCL    0x04090  /* Good Octets TX Count Low - R/clr */
+#define E1000_GOTCH    0x04094  /* Good Octets TX Count High - R/clr */
+#define E1000_RNBC     0x040A0  /* RX No Buffers Count - R/clr */
+#define E1000_RUC      0x040A4  /* RX Undersize Count - R/clr */
+#define E1000_RFC      0x040A8  /* RX Fragment Count - R/clr */
+#define E1000_ROC      0x040AC  /* RX Oversize Count - R/clr */
+#define E1000_RJC      0x040B0  /* RX Jabber Count - R/clr */
+#define E1000_MGTPRC   0x040B4  /* Management Packets RX Count - R/clr */
+#define E1000_MGTPDC   0x040B8  /* Management Packets Dropped Count - R/clr */
+#define E1000_MGTPTC   0x040BC  /* Management Packets TX Count - R/clr */
+#define E1000_TORL     0x040C0  /* Total Octets RX Low - R/clr */
+#define E1000_TORH     0x040C4  /* Total Octets RX High - R/clr */
+#define E1000_TOTL     0x040C8  /* Total Octets TX Low - R/clr */
+#define E1000_TOTH     0x040CC  /* Total Octets TX High - R/clr */
+#define E1000_TPR      0x040D0  /* Total Packets RX - R/clr */
+#define E1000_TPT      0x040D4  /* Total Packets TX - R/clr */
+#define E1000_PTC64    0x040D8  /* Packets TX (64 bytes) - R/clr */
+#define E1000_PTC127   0x040DC  /* Packets TX (65-127 bytes) - R/clr */
+#define E1000_PTC255   0x040E0  /* Packets TX (128-255 bytes) - R/clr */
+#define E1000_PTC511   0x040E4  /* Packets TX (256-511 bytes) - R/clr */
+#define E1000_PTC1023  0x040E8  /* Packets TX (512-1023 bytes) - R/clr */
+#define E1000_PTC1522  0x040EC  /* Packets TX (1024-1522 Bytes) - R/clr */
+#define E1000_MPTC     0x040F0  /* Multicast Packets TX Count - R/clr */
+#define E1000_BPTC     0x040F4  /* Broadcast Packets TX Count - R/clr */
+#define E1000_TSCTC    0x040F8  /* TCP Segmentation Context TX - R/clr */
+#define E1000_TSCTFC   0x040FC  /* TCP Segmentation Context TX Fail - R/clr */
+#define E1000_IAC      0x04100  /* Interrupt Assertion Count */
+/* Interrupt Cause Rx Packet Timer Expire Count */
+#define E1000_ICRXPTC  0x04104
+/* Interrupt Cause Rx Absolute Timer Expire Count */
+#define E1000_ICRXATC  0x04108
+/* Interrupt Cause Tx Packet Timer Expire Count */
+#define E1000_ICTXPTC  0x0410C
+/* Interrupt Cause Tx Absolute Timer Expire Count */
+#define E1000_ICTXATC  0x04110
+/* Interrupt Cause Tx Queue Empty Count */
+#define E1000_ICTXQEC  0x04118
+/* Interrupt Cause Tx Queue Minimum Threshold Count */
+#define E1000_ICTXQMTC 0x0411C
+/* Interrupt Cause Rx Descriptor Minimum Threshold Count */
+#define E1000_ICRXDMTC 0x04120
+#define E1000_ICRXOC   0x04124  /* Interrupt Cause Receiver Overrun Count */
+#define E1000_PCS_CFG0    0x04200  /* PCS Configuration 0 - RW */
+#define E1000_PCS_LCTL    0x04208  /* PCS Link Control - RW */
+#define E1000_PCS_LSTAT   0x0420C  /* PCS Link Status - RO */
+#define E1000_CBTMPC      0x0402C  /* Circuit Breaker TX Packet Count */
+#define E1000_HTDPMC      0x0403C  /* Host Transmit Discarded Packets */
+#define E1000_CBRMPC      0x040FC  /* Circuit Breaker RX Packet Count */
+#define E1000_RPTHC       0x04104  /* Rx Packets To Host */
+#define E1000_HGPTC       0x04118  /* Host Good Packets TX Count */
+#define E1000_HTCBDPC     0x04124  /* Host TX Circuit Breaker Dropped Count */
+#define E1000_HGORCL      0x04128  /* Host Good Octets Received Count Low */
+#define E1000_HGORCH      0x0412C  /* Host Good Octets Received Count High */
+#define E1000_HGOTCL      0x04130  /* Host Good Octets Transmit Count Low */
+#define E1000_HGOTCH      0x04134  /* Host Good Octets Transmit Count High */
+#define E1000_LENERRS     0x04138  /* Length Errors Count */
+#define E1000_SCVPC       0x04228  /* SerDes/SGMII Code Violation Pkt Count */
+#define E1000_PCS_ANADV   0x04218  /* AN advertisement - RW */
+#define E1000_PCS_LPAB    0x0421C  /* Link Partner Ability - RW */
+#define E1000_PCS_NPTX    0x04220  /* AN Next Page Transmit - RW */
+#define E1000_PCS_LPABNP  0x04224  /* Link Partner Ability Next Page - RW */
+#define E1000_RXCSUM   0x05000  /* RX Checksum Control - RW */
+#define E1000_RLPML    0x05004  /* RX Long Packet Max Length */
+#define E1000_RFCTL    0x05008  /* Receive Filter Control*/
+#define E1000_MTA      0x05200  /* Multicast Table Array - RW Array */
+#define E1000_RA       0x05400  /* Receive Address - RW Array */
+#define E1000_RA2      0x054E0  /* 2nd half of Rx address array - RW Array */
+#define E1000_PSRTYPE(_i)       (0x05480 + ((_i) * 4))
+#define E1000_RAL(_i)  (((_i) <= 15) ? (0x05400 + ((_i) * 8)) : \
+					(0x054E0 + ((_i - 16) * 8)))
+#define E1000_RAH(_i)  (((_i) <= 15) ? (0x05404 + ((_i) * 8)) : \
+					(0x054E4 + ((_i - 16) * 8)))
+#define E1000_IP4AT_REG(_i)     (0x05840 + ((_i) * 8))
+#define E1000_IP6AT_REG(_i)     (0x05880 + ((_i) * 4))
+#define E1000_WUPM_REG(_i)      (0x05A00 + ((_i) * 4))
+#define E1000_FFMT_REG(_i)      (0x09000 + ((_i) * 8))
+#define E1000_FFVT_REG(_i)      (0x09800 + ((_i) * 8))
+#define E1000_FFLT_REG(_i)      (0x05F00 + ((_i) * 8))
+#define E1000_VFTA     0x05600  /* VLAN Filter Table Array - RW Array */
+#define E1000_VT_CTL   0x0581C  /* VMDq Control - RW */
+#define E1000_WUC      0x05800  /* Wakeup Control - RW */
+#define E1000_WUFC     0x05808  /* Wakeup Filter Control - RW */
+#define E1000_WUS      0x05810  /* Wakeup Status - RO */
+#define E1000_MANC     0x05820  /* Management Control - RW */
+#define E1000_IPAV     0x05838  /* IP Address Valid - RW */
+#define E1000_WUPL     0x05900  /* Wakeup Packet Length - RW */
+
+#define E1000_SW_FW_SYNC  0x05B5C /* Software-Firmware Synchronization - RW */
+#define E1000_CCMCTL      0x05B48 /* CCM Control Register */
+#define E1000_GIOCTL      0x05B44 /* GIO Analog Control Register */
+#define E1000_SCCTL       0x05B4C /* PCIc PLL Configuration Register */
+#define E1000_GCR         0x05B00 /* PCI-Ex Control */
+#define E1000_FACTPS    0x05B30 /* Function Active and Power State to MNG */
+#define E1000_SWSM      0x05B50 /* SW Semaphore */
+#define E1000_FWSM      0x05B54 /* FW Semaphore */
+#define E1000_DCA_CTRL  0x05B74 /* DCA Control - RW */
+
+/* RSS registers */
+#define E1000_MRQC      0x05818 /* Multiple Receive Control - RW */
+#define E1000_IMIR(_i)      (0x05A80 + ((_i) * 4))  /* Immediate Interrupt */
+#define E1000_IMIREXT(_i)   (0x05AA0 + ((_i) * 4))  /* Immediate Interrupt Ext*/
+#define E1000_IMIRVP    0x05AC0 /* Immediate Interrupt RX VLAN Priority - RW */
+/* MSI-X Allocation Register (_i) - RW */
+#define E1000_MSIXBM(_i)    (0x01600 + ((_i) * 4))
+/* Redirection Table - RW Array */
+#define E1000_RETA(_i)  (0x05C00 + ((_i) * 4))
+#define E1000_RSSRK(_i) (0x05C80 + ((_i) * 4)) /* RSS Random Key - RW Array */
+
+/* VT Registers */
+#define E1000_MBVFICR   0x00C80 /* Mailbox VF Cause - RWC */
+#define E1000_MBVFIMR   0x00C84 /* Mailbox VF int Mask - RW */
+#define E1000_VFLRE     0x00C88 /* VF Register Events - RWC */
+#define E1000_VFRE      0x00C8C /* VF Receive Enables */
+#define E1000_VFTE      0x00C90 /* VF Transmit Enables */
+#define E1000_QDE       0x02408 /* Queue Drop Enable - RW */
+#define E1000_DTXSWC    0x03500 /* DMA Tx Switch Control - RW */
+#define E1000_WVBR      0x03554 /* VM Wrong Behavior - RWS */
+#define E1000_RPLOLR    0x05AF0 /* Replication Offload - RW */
+#define E1000_UTA       0x0A000 /* Unicast Table Array - RW */
+#define E1000_IOVTCL    0x05BBC /* IOV Control Register */
+#define E1000_TXSWC     0x05ACC /* Tx Switch Control */
+#define E1000_LVMMC	0x03548 /* Last VM Misbehavior cause */
+/* These act per VF so an array friendly macro is used */
+#define E1000_P2VMAILBOX(_n)   (0x00C00 + (4 * (_n)))
+#define E1000_VMBMEM(_n)       (0x00800 + (64 * (_n)))
+#define E1000_VMOLR(_n)        (0x05AD0 + (4 * (_n)))
+#define E1000_DVMOLR(_n)       (0x0C038 + (64 * (_n)))
+#define E1000_VLVF(_n)         (0x05D00 + (4 * (_n))) /* VLAN VM Filter */
+#define E1000_VMVIR(_n)        (0x03700 + (4 * (_n)))
+
+struct e1000_hw;
+
+u32 igb_rd32(struct e1000_hw *hw, u32 reg);
+
+/* write operations, indexed using DWORDS */
+#define wr32(reg, val) \
+do { \
+	u8 __iomem *hw_addr = READ_ONCE((hw)->hw_addr); \
+	if (!E1000_REMOVED(hw_addr)) \
+		writel((val), &hw_addr[(reg)]); \
+} while (0)
+
+#define rd32(reg) (igb_rd32(hw, reg))
+
+#define wrfl() ((void)rd32(E1000_STATUS))
+
+#define array_wr32(reg, offset, value) \
+	wr32((reg) + ((offset) << 2), (value))
+
+#define array_rd32(reg, offset) \
+	(readl(hw->hw_addr + reg + ((offset) << 2)))
+
+/* DMA Coalescing registers */
+#define E1000_PCIEMISC	0x05BB8 /* PCIE misc config register */
+
+/* Energy Efficient Ethernet "EEE" register */
+#define E1000_IPCNFG	0x0E38 /* Internal PHY Configuration */
+#define E1000_EEER	0x0E30 /* Energy Efficient Ethernet */
+#define E1000_EEE_SU	0X0E34 /* EEE Setup */
+#define E1000_EMIADD	0x10   /* Extended Memory Indirect Address */
+#define E1000_EMIDATA	0x11   /* Extended Memory Indirect Data */
+#define E1000_MMDAC	13     /* MMD Access Control */
+#define E1000_MMDAAD	14     /* MMD Access Address/Data */
+
+/* Thermal Sensor Register */
+#define E1000_THSTAT	0x08110 /* Thermal Sensor Status */
+
+/* OS2BMC Registers */
+#define E1000_B2OSPC	0x08FE0 /* BMC2OS packets sent by BMC */
+#define E1000_B2OGPRC	0x04158 /* BMC2OS packets received by host */
+#define E1000_O2BGPTC	0x08FE4 /* OS2BMC packets received by BMC */
+#define E1000_O2BSPC	0x0415C /* OS2BMC packets transmitted by host */
+
+#define E1000_SRWR		0x12018  /* Shadow Ram Write Register - RW */
+#define E1000_I210_FLMNGCTL	0x12038
+#define E1000_I210_FLMNGDATA	0x1203C
+#define E1000_I210_FLMNGCNT	0x12040
+
+#define E1000_I210_FLSWCTL	0x12048
+#define E1000_I210_FLSWDATA	0x1204C
+#define E1000_I210_FLSWCNT	0x12050
+
+#define E1000_I210_FLA		0x1201C
+
+#define E1000_INVM_DATA_REG(_n)	(0x12120 + 4*(_n))
+#define E1000_INVM_SIZE		64 /* Number of INVM Data Registers */
+
+#define E1000_REMOVED(h) unlikely(!(h))
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h
new file mode 100644
index 0000000..4c65e31
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb.h
@@ -0,0 +1,559 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+/* Linux PRO/1000 Ethernet Driver main header file */
+
+#ifndef _IGB_H_
+#define _IGB_H_
+
+#include "e1000_mac.h"
+#include "e1000_82575.h"
+
+#include <linux/bitops.h>
+#include <linux/if_vlan.h>
+#include <linux/i2c.h>
+#include <linux/i2c-algo-bit.h>
+#include <linux/pci.h>
+#include <linux/mdio.h>
+
+#include <rtdev.h>
+
+#undef CONFIG_IGB_HWMON
+
+struct igb_adapter;
+
+#define E1000_PCS_CFG_IGN_SD	1
+
+/* Interrupt defines */
+#define IGB_START_ITR		648 /* ~6000 ints/sec */
+#define IGB_4K_ITR		980
+#define IGB_20K_ITR		196
+#define IGB_70K_ITR		56
+
+/* TX/RX descriptor defines */
+#define IGB_DEFAULT_TXD		256
+#define IGB_DEFAULT_TX_WORK	128
+#define IGB_MIN_TXD		80
+#define IGB_MAX_TXD		4096
+
+#define IGB_DEFAULT_RXD		256
+#define IGB_MIN_RXD		80
+#define IGB_MAX_RXD		4096
+
+#define IGB_DEFAULT_ITR		3 /* dynamic */
+#define IGB_MAX_ITR_USECS	10000
+#define IGB_MIN_ITR_USECS	10
+#define NON_Q_VECTORS		1
+#define MAX_Q_VECTORS		8
+#define MAX_MSIX_ENTRIES	10
+
+/* Transmit and receive queues */
+#define IGB_MAX_RX_QUEUES	8
+#define IGB_MAX_RX_QUEUES_82575	4
+#define IGB_MAX_RX_QUEUES_I211	2
+#define IGB_MAX_TX_QUEUES	8
+#define IGB_MAX_VF_MC_ENTRIES	30
+#define IGB_MAX_VF_FUNCTIONS	8
+#define IGB_MAX_VFTA_ENTRIES	128
+#define IGB_82576_VF_DEV_ID	0x10CA
+#define IGB_I350_VF_DEV_ID	0x1520
+
+/* NVM version defines */
+#define IGB_MAJOR_MASK		0xF000
+#define IGB_MINOR_MASK		0x0FF0
+#define IGB_BUILD_MASK		0x000F
+#define IGB_COMB_VER_MASK	0x00FF
+#define IGB_MAJOR_SHIFT		12
+#define IGB_MINOR_SHIFT		4
+#define IGB_COMB_VER_SHFT	8
+#define IGB_NVM_VER_INVALID	0xFFFF
+#define IGB_ETRACK_SHIFT	16
+#define NVM_ETRACK_WORD		0x0042
+#define NVM_COMB_VER_OFF	0x0083
+#define NVM_COMB_VER_PTR	0x003d
+
+struct vf_data_storage {
+	unsigned char vf_mac_addresses[ETH_ALEN];
+	u16 vf_mc_hashes[IGB_MAX_VF_MC_ENTRIES];
+	u16 num_vf_mc_hashes;
+	u16 vlans_enabled;
+	u32 flags;
+	unsigned long last_nack;
+	u16 pf_vlan; /* When set, guest VLAN config not allowed. */
+	u16 pf_qos;
+	u16 tx_rate;
+	bool spoofchk_enabled;
+};
+
+#define IGB_VF_FLAG_CTS            0x00000001 /* VF is clear to send data */
+#define IGB_VF_FLAG_UNI_PROMISC    0x00000002 /* VF has unicast promisc */
+#define IGB_VF_FLAG_MULTI_PROMISC  0x00000004 /* VF has multicast promisc */
+#define IGB_VF_FLAG_PF_SET_MAC     0x00000008 /* PF has set MAC address */
+
+/* RX descriptor control thresholds.
+ * PTHRESH - MAC will consider prefetch if it has fewer than this number of
+ *           descriptors available in its onboard memory.
+ *           Setting this to 0 disables RX descriptor prefetch.
+ * HTHRESH - MAC will only prefetch if there are at least this many descriptors
+ *           available in host memory.
+ *           If PTHRESH is 0, this should also be 0.
+ * WTHRESH - RX descriptor writeback threshold - MAC will delay writing back
+ *           descriptors until either it has this many to write back, or the
+ *           ITR timer expires.
+ */
+#define IGB_RX_PTHRESH	((hw->mac.type == e1000_i354) ? 12 : 8)
+#define IGB_RX_HTHRESH	8
+#define IGB_TX_PTHRESH	((hw->mac.type == e1000_i354) ? 20 : 8)
+#define IGB_TX_HTHRESH	1
+#define IGB_RX_WTHRESH	((hw->mac.type == e1000_82576 && \
+			  (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 4)
+#define IGB_TX_WTHRESH	((hw->mac.type == e1000_82576 && \
+			  (adapter->flags & IGB_FLAG_HAS_MSIX)) ? 1 : 16)
+
+/* this is the size past which hardware will drop packets when setting LPE=0 */
+#define MAXIMUM_ETHERNET_VLAN_SIZE 1522
+
+/* Supported Rx Buffer Sizes */
+#define IGB_RXBUFFER_256	256
+#define IGB_RXBUFFER_2048	2048
+#define IGB_RX_HDR_LEN		IGB_RXBUFFER_256
+#define IGB_RX_BUFSZ		IGB_RXBUFFER_2048
+
+/* How many Rx Buffers do we bundle into one write to the hardware ? */
+#define IGB_RX_BUFFER_WRITE	16 /* Must be power of 2 */
+
+#define AUTO_ALL_MODES		0
+#define IGB_EEPROM_APME		0x0400
+
+#ifndef IGB_MASTER_SLAVE
+/* Switch to override PHY master/slave setting */
+#define IGB_MASTER_SLAVE	e1000_ms_hw_default
+#endif
+
+#define IGB_MNG_VLAN_NONE	-1
+
+enum igb_tx_flags {
+	/* cmd_type flags */
+	IGB_TX_FLAGS_VLAN	= 0x01,
+	IGB_TX_FLAGS_TSO	= 0x02,
+	IGB_TX_FLAGS_TSTAMP	= 0x04,
+
+	/* olinfo flags */
+	IGB_TX_FLAGS_IPV4	= 0x10,
+	IGB_TX_FLAGS_CSUM	= 0x20,
+};
+
+/* VLAN info */
+#define IGB_TX_FLAGS_VLAN_MASK	0xffff0000
+#define IGB_TX_FLAGS_VLAN_SHIFT	16
+
+/* The largest size we can write to the descriptor is 65535.  In order to
+ * maintain a power of two alignment we have to limit ourselves to 32K.
+ */
+#define IGB_MAX_TXD_PWR	15
+#define IGB_MAX_DATA_PER_TXD	(1 << IGB_MAX_TXD_PWR)
+
+/* Tx Descriptors needed, worst case */
+#define TXD_USE_COUNT(S) DIV_ROUND_UP((S), IGB_MAX_DATA_PER_TXD)
+#define DESC_NEEDED (MAX_SKB_FRAGS + 4)
+
+/* EEPROM byte offsets */
+#define IGB_SFF_8472_SWAP		0x5C
+#define IGB_SFF_8472_COMP		0x5E
+
+/* Bitmasks */
+#define IGB_SFF_ADDRESSING_MODE		0x4
+#define IGB_SFF_8472_UNSUP		0x00
+
+/* wrapper around a pointer to a socket buffer,
+ * so a DMA handle can be stored along with the buffer
+ */
+struct igb_tx_buffer {
+	union e1000_adv_tx_desc *next_to_watch;
+	unsigned long time_stamp;
+	struct rtskb *skb;
+	unsigned int bytecount;
+	u16 gso_segs;
+	__be16 protocol;
+
+	u32 tx_flags;
+};
+
+struct igb_rx_buffer {
+	dma_addr_t dma;
+	struct rtskb *skb;
+};
+
+struct igb_tx_queue_stats {
+	u64 packets;
+	u64 bytes;
+	u64 restart_queue;
+	u64 restart_queue2;
+};
+
+struct igb_rx_queue_stats {
+	u64 packets;
+	u64 bytes;
+	u64 drops;
+	u64 csum_err;
+	u64 alloc_failed;
+};
+
+struct igb_ring_container {
+	struct igb_ring *ring;		/* pointer to linked list of rings */
+	unsigned int total_bytes;	/* total bytes processed this int */
+	unsigned int total_packets;	/* total packets processed this int */
+	u16 work_limit;			/* total work allowed per interrupt */
+	u8 count;			/* total number of rings in vector */
+	u8 itr;				/* current ITR setting for ring */
+};
+
+struct igb_ring {
+	struct igb_q_vector *q_vector;	/* backlink to q_vector */
+	struct rtnet_device *netdev;	/* back pointer to net_device */
+	struct device *dev;		/* device pointer for dma mapping */
+	union {				/* array of buffer info structs */
+		struct igb_tx_buffer *tx_buffer_info;
+		struct igb_rx_buffer *rx_buffer_info;
+	};
+	void *desc;			/* descriptor ring memory */
+	unsigned long flags;		/* ring specific flags */
+	void __iomem *tail;		/* pointer to ring tail register */
+	dma_addr_t dma;			/* phys address of the ring */
+	unsigned int  size;		/* length of desc. ring in bytes */
+
+	u16 count;			/* number of desc. in the ring */
+	u8 queue_index;			/* logical index of the ring*/
+	u8 reg_idx;			/* physical index of the ring */
+
+	/* everything past this point are written often */
+	u16 next_to_clean;
+	u16 next_to_use;
+	u16 next_to_alloc;
+
+	union {
+		/* TX */
+		struct {
+			struct igb_tx_queue_stats tx_stats;
+		};
+		/* RX */
+		struct {
+			struct igb_rx_queue_stats rx_stats;
+			u16 rx_buffer_len;
+		};
+	};
+} ____cacheline_internodealigned_in_smp;
+
+struct igb_q_vector {
+	struct igb_adapter *adapter;	/* backlink */
+	int cpu;			/* CPU for DCA */
+	u32 eims_value;			/* EIMS mask value */
+
+	u16 itr_val;
+	u8 set_itr;
+	void __iomem *itr_register;
+
+	struct igb_ring_container rx, tx;
+
+	struct rcu_head rcu;	/* to avoid race with update stats on free */
+	char name[IFNAMSIZ + 9];
+
+	/* for dynamic allocation of rings associated with this q_vector */
+	struct igb_ring ring[0] ____cacheline_internodealigned_in_smp;
+};
+
+enum e1000_ring_flags_t {
+	IGB_RING_FLAG_RX_SCTP_CSUM,
+	IGB_RING_FLAG_RX_LB_VLAN_BSWAP,
+	IGB_RING_FLAG_TX_CTX_IDX,
+	IGB_RING_FLAG_TX_DETECT_HANG
+};
+
+#define IGB_TXD_DCMD (E1000_ADVTXD_DCMD_EOP | E1000_ADVTXD_DCMD_RS)
+
+#define IGB_RX_DESC(R, i)	\
+	(&(((union e1000_adv_rx_desc *)((R)->desc))[i]))
+#define IGB_TX_DESC(R, i)	\
+	(&(((union e1000_adv_tx_desc *)((R)->desc))[i]))
+#define IGB_TX_CTXTDESC(R, i)	\
+	(&(((struct e1000_adv_tx_context_desc *)((R)->desc))[i]))
+
+/* igb_test_staterr - tests bits within Rx descriptor status and error fields */
+static inline __le32 igb_test_staterr(union e1000_adv_rx_desc *rx_desc,
+				      const u32 stat_err_bits)
+{
+	return rx_desc->wb.upper.status_error & cpu_to_le32(stat_err_bits);
+}
+
+/* igb_desc_unused - calculate if we have unused descriptors */
+static inline int igb_desc_unused(struct igb_ring *ring)
+{
+	if (ring->next_to_clean > ring->next_to_use)
+		return ring->next_to_clean - ring->next_to_use - 1;
+
+	return ring->count + ring->next_to_clean - ring->next_to_use - 1;
+}
+
+#ifdef CONFIG_IGB_HWMON
+
+#define IGB_HWMON_TYPE_LOC	0
+#define IGB_HWMON_TYPE_TEMP	1
+#define IGB_HWMON_TYPE_CAUTION	2
+#define IGB_HWMON_TYPE_MAX	3
+
+struct hwmon_attr {
+	struct device_attribute dev_attr;
+	struct e1000_hw *hw;
+	struct e1000_thermal_diode_data *sensor;
+	char name[12];
+	};
+
+struct hwmon_buff {
+	struct attribute_group group;
+	const struct attribute_group *groups[2];
+	struct attribute *attrs[E1000_MAX_SENSORS * 4 + 1];
+	struct hwmon_attr hwmon_list[E1000_MAX_SENSORS * 4];
+	unsigned int n_hwmon;
+	};
+#endif
+
+#define IGB_N_EXTTS	2
+#define IGB_N_PEROUT	2
+#define IGB_N_SDP	4
+#define IGB_RETA_SIZE	128
+
+/* board specific private data structure */
+struct igb_adapter {
+	unsigned long active_vlans[BITS_TO_LONGS(VLAN_N_VID)];
+
+	struct rtnet_device *netdev;
+
+	unsigned long state;
+	unsigned int flags;
+
+	unsigned int num_q_vectors;
+	struct msix_entry msix_entries[MAX_MSIX_ENTRIES];
+	rtdm_irq_t msix_irq_handle[MAX_MSIX_ENTRIES];
+	rtdm_irq_t irq_handle;
+	rtdm_nrtsig_t watchdog_nrtsig;
+	spinlock_t stats64_lock;
+
+	/* Interrupt Throttle Rate */
+	u32 rx_itr_setting;
+	u32 tx_itr_setting;
+	u16 tx_itr;
+	u16 rx_itr;
+
+	/* TX */
+	u16 tx_work_limit;
+	u32 tx_timeout_count;
+	int num_tx_queues;
+	struct igb_ring *tx_ring[16];
+
+	/* RX */
+	int num_rx_queues;
+	struct igb_ring *rx_ring[16];
+
+	u32 max_frame_size;
+	u32 min_frame_size;
+
+	struct timer_list watchdog_timer;
+	struct timer_list phy_info_timer;
+
+	u16 mng_vlan_id;
+	u32 bd_number;
+	u32 wol;
+	u32 en_mng_pt;
+	u16 link_speed;
+	u16 link_duplex;
+
+	struct work_struct reset_task;
+	struct work_struct watchdog_task;
+	bool fc_autoneg;
+	u8  tx_timeout_factor;
+	struct timer_list blink_timer;
+	unsigned long led_status;
+
+	/* OS defined structs */
+	struct pci_dev *pdev;
+
+	struct net_device_stats net_stats;
+
+	/* structs defined in e1000_hw.h */
+	struct e1000_hw hw;
+	struct e1000_hw_stats stats;
+	struct e1000_phy_info phy_info;
+
+	u32 test_icr;
+	struct igb_ring test_tx_ring;
+	struct igb_ring test_rx_ring;
+
+	struct igb_q_vector *q_vector[MAX_Q_VECTORS];
+	u32 eims_enable_mask;
+	u32 eims_other;
+
+	/* to not mess up cache alignment, always add to the bottom */
+	u16 tx_ring_count;
+	u16 rx_ring_count;
+	int vf_rate_link_speed;
+	u32 rss_queues;
+	u32 wvbr;
+	u32 *shadow_vfta;
+
+	unsigned long last_rx_timestamp;
+
+	char fw_version[32];
+#ifdef CONFIG_IGB_HWMON
+	struct hwmon_buff *igb_hwmon_buff;
+	bool ets;
+#endif
+	struct i2c_algo_bit_data i2c_algo;
+	struct i2c_adapter i2c_adap;
+	struct i2c_client *i2c_client;
+	u32 rss_indir_tbl_init;
+	u8 rss_indir_tbl[IGB_RETA_SIZE];
+
+	unsigned long link_check_timeout;
+	int copper_tries;
+	struct e1000_info ei;
+	u16 eee_advert;
+};
+
+#define IGB_FLAG_HAS_MSI		(1 << 0)
+#define IGB_FLAG_DCA_ENABLED		(1 << 1)
+#define IGB_FLAG_QUAD_PORT_A		(1 << 2)
+#define IGB_FLAG_QUEUE_PAIRS		(1 << 3)
+#define IGB_FLAG_DMAC			(1 << 4)
+#define IGB_FLAG_PTP			(1 << 5)
+#define IGB_FLAG_RSS_FIELD_IPV4_UDP	(1 << 6)
+#define IGB_FLAG_RSS_FIELD_IPV6_UDP	(1 << 7)
+#define IGB_FLAG_WOL_SUPPORTED		(1 << 8)
+#define IGB_FLAG_NEED_LINK_UPDATE	(1 << 9)
+#define IGB_FLAG_MEDIA_RESET		(1 << 10)
+#define IGB_FLAG_MAS_CAPABLE		(1 << 11)
+#define IGB_FLAG_MAS_ENABLE		(1 << 12)
+#define IGB_FLAG_HAS_MSIX		(1 << 13)
+#define IGB_FLAG_EEE			(1 << 14)
+
+/* Media Auto Sense */
+#define IGB_MAS_ENABLE_0		0X0001
+#define IGB_MAS_ENABLE_1		0X0002
+#define IGB_MAS_ENABLE_2		0X0004
+#define IGB_MAS_ENABLE_3		0X0008
+
+/* DMA Coalescing defines */
+#define IGB_MIN_TXPBSIZE	20408
+#define IGB_TX_BUF_4096		4096
+#define IGB_DMCTLX_DCFLUSH_DIS	0x80000000  /* Disable DMA Coal Flush */
+
+#define IGB_82576_TSYNC_SHIFT	19
+#define IGB_TS_HDR_LEN		16
+enum e1000_state_t {
+	__IGB_TESTING,
+	__IGB_RESETTING,
+	__IGB_DOWN,
+	__IGB_PTP_TX_IN_PROGRESS,
+};
+
+enum igb_boards {
+	board_82575,
+};
+
+extern char igb_driver_name[];
+extern char igb_driver_version[];
+
+int igb_up(struct igb_adapter *);
+void igb_down(struct igb_adapter *);
+void igb_reinit_locked(struct igb_adapter *);
+void igb_reset(struct igb_adapter *);
+int igb_reinit_queues(struct igb_adapter *);
+void igb_write_rss_indir_tbl(struct igb_adapter *);
+int igb_set_spd_dplx(struct igb_adapter *, u32, u8);
+int igb_setup_tx_resources(struct igb_ring *);
+int igb_setup_rx_resources(struct igb_ring *);
+void igb_free_tx_resources(struct igb_ring *);
+void igb_free_rx_resources(struct igb_ring *);
+void igb_configure_tx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_configure_rx_ring(struct igb_adapter *, struct igb_ring *);
+void igb_setup_tctl(struct igb_adapter *);
+void igb_setup_rctl(struct igb_adapter *);
+netdev_tx_t igb_xmit_frame_ring(struct rtskb *, struct igb_ring *);
+void igb_unmap_and_free_tx_resource(struct igb_ring *, struct igb_tx_buffer *);
+void igb_alloc_rx_buffers(struct igb_ring *, u16);
+void igb_update_stats(struct igb_adapter *);
+bool igb_has_link(struct igb_adapter *adapter);
+void igb_set_ethtool_ops(struct rtnet_device *);
+void igb_power_up_link(struct igb_adapter *);
+void igb_set_fw_version(struct igb_adapter *);
+void igb_ptp_init(struct igb_adapter *adapter);
+void igb_ptp_stop(struct igb_adapter *adapter);
+void igb_ptp_reset(struct igb_adapter *adapter);
+void igb_ptp_rx_hang(struct igb_adapter *adapter);
+void igb_ptp_rx_rgtstamp(struct igb_q_vector *q_vector, struct rtskb *skb);
+void igb_ptp_rx_pktstamp(struct igb_q_vector *q_vector, unsigned char *va,
+			 struct rtskb *skb);
+int igb_ptp_set_ts_config(struct rtnet_device *netdev, struct ifreq *ifr);
+int igb_ptp_get_ts_config(struct rtnet_device *netdev, struct ifreq *ifr);
+#ifdef CONFIG_IGB_HWMON
+void igb_sysfs_exit(struct igb_adapter *adapter);
+int igb_sysfs_init(struct igb_adapter *adapter);
+#endif
+static inline s32 igb_reset_phy(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.reset)
+		return hw->phy.ops.reset(hw);
+
+	return 0;
+}
+
+static inline s32 igb_read_phy_reg(struct e1000_hw *hw, u32 offset, u16 *data)
+{
+	if (hw->phy.ops.read_reg)
+		return hw->phy.ops.read_reg(hw, offset, data);
+
+	return 0;
+}
+
+static inline s32 igb_write_phy_reg(struct e1000_hw *hw, u32 offset, u16 data)
+{
+	if (hw->phy.ops.write_reg)
+		return hw->phy.ops.write_reg(hw, offset, data);
+
+	return 0;
+}
+
+static inline s32 igb_get_phy_info(struct e1000_hw *hw)
+{
+	if (hw->phy.ops.get_phy_info)
+		return hw->phy.ops.get_phy_info(hw);
+
+	return 0;
+}
+
+static inline struct rtnet_device *txring_txq(const struct igb_ring *tx_ring)
+{
+	return tx_ring->netdev;
+}
+
+#endif /* _IGB_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c
new file mode 100644
index 0000000..44b6a68
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_hwmon.c
@@ -0,0 +1,249 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2014 Intel Corporation.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#include "igb.h"
+#include "e1000_82575.h"
+#include "e1000_hw.h"
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/sysfs.h>
+#include <linux/kobject.h>
+#include <linux/device.h>
+#include <linux/netdevice.h>
+#include <linux/hwmon.h>
+#include <linux/pci.h>
+
+#ifdef CONFIG_IGB_HWMON
+static struct i2c_board_info i350_sensor_info = {
+	I2C_BOARD_INFO("i350bb", (0Xf8 >> 1)),
+};
+
+/* hwmon callback functions */
+static ssize_t igb_hwmon_show_location(struct device *dev,
+				       struct device_attribute *attr,
+				       char *buf)
+{
+	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+						   dev_attr);
+	return sprintf(buf, "loc%u\n",
+		       igb_attr->sensor->location);
+}
+
+static ssize_t igb_hwmon_show_temp(struct device *dev,
+				   struct device_attribute *attr,
+				   char *buf)
+{
+	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+						   dev_attr);
+	unsigned int value;
+
+	/* reset the temp field */
+	igb_attr->hw->mac.ops.get_thermal_sensor_data(igb_attr->hw);
+
+	value = igb_attr->sensor->temp;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_cautionthresh(struct device *dev,
+					    struct device_attribute *attr,
+					    char *buf)
+{
+	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+						   dev_attr);
+	unsigned int value = igb_attr->sensor->caution_thresh;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return sprintf(buf, "%u\n", value);
+}
+
+static ssize_t igb_hwmon_show_maxopthresh(struct device *dev,
+					  struct device_attribute *attr,
+					  char *buf)
+{
+	struct hwmon_attr *igb_attr = container_of(attr, struct hwmon_attr,
+						   dev_attr);
+	unsigned int value = igb_attr->sensor->max_op_thresh;
+
+	/* display millidegree */
+	value *= 1000;
+
+	return sprintf(buf, "%u\n", value);
+}
+
+/* igb_add_hwmon_attr - Create hwmon attr table for a hwmon sysfs file.
+ * @ adapter: pointer to the adapter structure
+ * @ offset: offset in the eeprom sensor data table
+ * @ type: type of sensor data to display
+ *
+ * For each file we want in hwmon's sysfs interface we need a device_attribute
+ * This is included in our hwmon_attr struct that contains the references to
+ * the data structures we need to get the data to display.
+ */
+static int igb_add_hwmon_attr(struct igb_adapter *adapter,
+			      unsigned int offset, int type)
+{
+	int rc;
+	unsigned int n_attr;
+	struct hwmon_attr *igb_attr;
+
+	n_attr = adapter->igb_hwmon_buff->n_hwmon;
+	igb_attr = &adapter->igb_hwmon_buff->hwmon_list[n_attr];
+
+	switch (type) {
+	case IGB_HWMON_TYPE_LOC:
+		igb_attr->dev_attr.show = igb_hwmon_show_location;
+		snprintf(igb_attr->name, sizeof(igb_attr->name),
+			 "temp%u_label", offset + 1);
+		break;
+	case IGB_HWMON_TYPE_TEMP:
+		igb_attr->dev_attr.show = igb_hwmon_show_temp;
+		snprintf(igb_attr->name, sizeof(igb_attr->name),
+			 "temp%u_input", offset + 1);
+		break;
+	case IGB_HWMON_TYPE_CAUTION:
+		igb_attr->dev_attr.show = igb_hwmon_show_cautionthresh;
+		snprintf(igb_attr->name, sizeof(igb_attr->name),
+			 "temp%u_max", offset + 1);
+		break;
+	case IGB_HWMON_TYPE_MAX:
+		igb_attr->dev_attr.show = igb_hwmon_show_maxopthresh;
+		snprintf(igb_attr->name, sizeof(igb_attr->name),
+			 "temp%u_crit", offset + 1);
+		break;
+	default:
+		rc = -EPERM;
+		return rc;
+	}
+
+	/* These always the same regardless of type */
+	igb_attr->sensor =
+		&adapter->hw.mac.thermal_sensor_data.sensor[offset];
+	igb_attr->hw = &adapter->hw;
+	igb_attr->dev_attr.store = NULL;
+	igb_attr->dev_attr.attr.mode = S_IRUGO;
+	igb_attr->dev_attr.attr.name = igb_attr->name;
+	sysfs_attr_init(&igb_attr->dev_attr.attr);
+
+	adapter->igb_hwmon_buff->attrs[n_attr] = &igb_attr->dev_attr.attr;
+
+	++adapter->igb_hwmon_buff->n_hwmon;
+
+	return 0;
+}
+
+static void igb_sysfs_del_adapter(struct igb_adapter *adapter)
+{
+}
+
+/* called from igb_main.c */
+void igb_sysfs_exit(struct igb_adapter *adapter)
+{
+	igb_sysfs_del_adapter(adapter);
+}
+
+/* called from igb_main.c */
+int igb_sysfs_init(struct igb_adapter *adapter)
+{
+	struct hwmon_buff *igb_hwmon;
+	struct i2c_client *client;
+	struct device *hwmon_dev;
+	unsigned int i;
+	int rc = 0;
+
+	/* If this method isn't defined we don't support thermals */
+	if (adapter->hw.mac.ops.init_thermal_sensor_thresh == NULL)
+		goto exit;
+
+	/* Don't create thermal hwmon interface if no sensors present */
+	rc = (adapter->hw.mac.ops.init_thermal_sensor_thresh(&adapter->hw));
+	if (rc)
+		goto exit;
+
+	igb_hwmon = devm_kzalloc(&adapter->pdev->dev, sizeof(*igb_hwmon),
+				 GFP_KERNEL);
+	if (!igb_hwmon) {
+		rc = -ENOMEM;
+		goto exit;
+	}
+	adapter->igb_hwmon_buff = igb_hwmon;
+
+	for (i = 0; i < E1000_MAX_SENSORS; i++) {
+
+		/* Only create hwmon sysfs entries for sensors that have
+		 * meaningful data.
+		 */
+		if (adapter->hw.mac.thermal_sensor_data.sensor[i].location == 0)
+			continue;
+
+		/* Bail if any hwmon attr struct fails to initialize */
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_CAUTION);
+		if (rc)
+			goto exit;
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_LOC);
+		if (rc)
+			goto exit;
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_TEMP);
+		if (rc)
+			goto exit;
+		rc = igb_add_hwmon_attr(adapter, i, IGB_HWMON_TYPE_MAX);
+		if (rc)
+			goto exit;
+	}
+
+	/* init i2c_client */
+	client = i2c_new_device(&adapter->i2c_adap, &i350_sensor_info);
+	if (client == NULL) {
+		dev_info(&adapter->pdev->dev,
+			 "Failed to create new i2c device.\n");
+		rc = -ENODEV;
+		goto exit;
+	}
+	adapter->i2c_client = client;
+
+	igb_hwmon->groups[0] = &igb_hwmon->group;
+	igb_hwmon->group.attrs = igb_hwmon->attrs;
+
+	hwmon_dev = devm_hwmon_device_register_with_groups(&adapter->pdev->dev,
+							   client->name,
+							   igb_hwmon,
+							   igb_hwmon->groups);
+	if (IS_ERR(hwmon_dev)) {
+		rc = PTR_ERR(hwmon_dev);
+		goto err;
+	}
+
+	goto exit;
+
+err:
+	igb_sysfs_del_adapter(adapter);
+exit:
+	return rc;
+}
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c
new file mode 100644
index 0000000..d47c0bb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/igb/igb_main.c
@@ -0,0 +1,5676 @@
+/* Intel(R) Gigabit Ethernet Linux driver
+ * Copyright(c) 2007-2015 Intel Corporation.
+ * RTnet port   2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ * Copyright(c) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program; if not, see <http://www.gnu.org/licenses/>.
+ *
+ * The full GNU General Public License is included in this distribution in
+ * the file called "COPYING".
+ *
+ * Contact Information:
+ * e1000-devel Mailing List <e1000-devel@lists.sourceforge.net>
+ * Intel Corporation, 5200 N.E. Elam Young Parkway, Hillsboro, OR 97124-6497
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/init.h>
+#include <linux/bitops.h>
+#include <linux/vmalloc.h>
+#include <linux/pagemap.h>
+#include <linux/netdevice.h>
+#include <linux/ipv6.h>
+#include <linux/slab.h>
+#include <net/checksum.h>
+#include <net/ip6_checksum.h>
+#include <linux/net_tstamp.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/if.h>
+#include <linux/if_vlan.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/interrupt.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <linux/sctp.h>
+#include <linux/if_ether.h>
+#include <linux/aer.h>
+#include <linux/prefetch.h>
+#include <linux/pm_runtime.h>
+#include <linux/i2c.h>
+#include "igb.h"
+
+#include <rtnet_port.h>
+
+// RTNET redefines
+#ifdef  NETIF_F_TSO
+#undef  NETIF_F_TSO
+#define NETIF_F_TSO 0
+#endif
+
+#ifdef  NETIF_F_TSO6
+#undef  NETIF_F_TSO6
+#define NETIF_F_TSO6 0
+#endif
+
+#ifdef  NETIF_F_HW_VLAN_TX
+#undef  NETIF_F_HW_VLAN_TX
+#define NETIF_F_HW_VLAN_TX 0
+#endif
+
+#ifdef  NETIF_F_HW_VLAN_RX
+#undef  NETIF_F_HW_VLAN_RX
+#define NETIF_F_HW_VLAN_RX 0
+#endif
+
+#ifdef  NETIF_F_HW_VLAN_FILTER
+#undef  NETIF_F_HW_VLAN_FILTER
+#define NETIF_F_HW_VLAN_FILTER 0
+#endif
+
+#ifdef  IGB_MAX_TX_QUEUES
+#undef  IGB_MAX_TX_QUEUES
+#define IGB_MAX_TX_QUEUES 1
+#endif
+
+#ifdef  IGB_MAX_RX_QUEUES
+#undef  IGB_MAX_RX_QUEUES
+#define IGB_MAX_RX_QUEUES 1
+#endif
+
+#ifdef CONFIG_IGB_NAPI
+#undef CONFIG_IGB_NAPI
+#endif
+
+#ifdef IGB_HAVE_TX_TIMEOUT
+#undef IGB_HAVE_TX_TIMEOUT
+#endif
+
+#ifdef ETHTOOL_GPERMADDR
+#undef ETHTOOL_GPERMADDR
+#endif
+
+#ifdef CONFIG_PM
+#undef CONFIG_PM
+#endif
+
+#ifdef CONFIG_NET_POLL_CONTROLLER
+#undef CONFIG_NET_POLL_CONTROLLER
+#endif
+
+#ifdef MAX_SKB_FRAGS
+#undef MAX_SKB_FRAGS
+#define MAX_SKB_FRAGS 1
+#endif
+
+#ifdef IGB_FRAMES_SUPPORT
+#undef IGB_FRAMES_SUPPORT
+#endif
+
+#define MAJ 5
+#define MIN 2
+#define BUILD 18
+#define DRV_VERSION __stringify(MAJ) "." __stringify(MIN) "." \
+__stringify(BUILD) "-k"
+char igb_driver_name[] = "rt_igb";
+char igb_driver_version[] = DRV_VERSION;
+static const char igb_driver_string[] =
+				"Intel(R) Gigabit Ethernet Network Driver";
+static const char igb_copyright[] =
+				"Copyright (c) 2007-2014 Intel Corporation.";
+
+static const struct e1000_info *igb_info_tbl[] = {
+	[board_82575] = &e1000_82575_info,
+};
+
+#define MAX_UNITS 8
+static int InterruptThrottle = 0;
+module_param(InterruptThrottle, uint, 0);
+MODULE_PARM_DESC(InterruptThrottle, "Throttle interrupts (boolean, false by default)");
+
+static const struct pci_device_id igb_pci_tbl[] = {
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_1GBPS) },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_SGMII) },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I354_BACKPLANE_2_5GBPS) },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I211_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SGMII), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_COPPER_FLASHLESS), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I210_SERDES_FLASHLESS), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_I350_SGMII), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_QUAD_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_SGMII), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82580_COPPER_DUAL), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SGMII), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_BACKPLANE), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_DH89XXCC_SFP), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_NS_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_FIBER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_SERDES_QUAD), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER_ET2), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82576_QUAD_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_COPPER), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575EB_FIBER_SERDES), board_82575 },
+	{ PCI_VDEVICE(INTEL, E1000_DEV_ID_82575GB_QUAD_COPPER), board_82575 },
+	/* required last entry */
+	{0, }
+};
+
+MODULE_DEVICE_TABLE(pci, igb_pci_tbl);
+
+static int igb_setup_all_tx_resources(struct igb_adapter *);
+static int igb_setup_all_rx_resources(struct igb_adapter *);
+static void igb_free_all_tx_resources(struct igb_adapter *);
+static void igb_free_all_rx_resources(struct igb_adapter *);
+static void igb_setup_mrqc(struct igb_adapter *);
+static int igb_probe(struct pci_dev *, const struct pci_device_id *);
+static void igb_remove(struct pci_dev *pdev);
+static int igb_sw_init(struct igb_adapter *);
+static int igb_open(struct rtnet_device *);
+static int igb_close(struct rtnet_device *);
+static void igb_configure(struct igb_adapter *);
+static void igb_configure_tx(struct igb_adapter *);
+static void igb_configure_rx(struct igb_adapter *);
+static void igb_clean_all_tx_rings(struct igb_adapter *);
+static void igb_clean_all_rx_rings(struct igb_adapter *);
+static void igb_clean_tx_ring(struct igb_ring *);
+static void igb_clean_rx_ring(struct igb_ring *);
+static void igb_set_rx_mode(struct rtnet_device *);
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void igb_update_phy_info(struct timer_list *);
+static void igb_watchdog(struct timer_list *);
+#else
+static void igb_update_phy_info(unsigned long);
+static void igb_watchdog(unsigned long);
+#endif
+static void igb_watchdog_task(struct work_struct *);
+static netdev_tx_t igb_xmit_frame(struct rtskb *skb, struct rtnet_device *);
+static struct net_device_stats *igb_get_stats(struct rtnet_device *);
+static int igb_intr(rtdm_irq_t *irq_handle);
+static int igb_intr_msi(rtdm_irq_t *irq_handle);
+static void igb_nrtsig_watchdog(rtdm_nrtsig_t *sig, void *data);
+static irqreturn_t igb_msix_other(int irq, void *);
+static int igb_msix_ring(rtdm_irq_t *irq_handle);
+static void igb_poll(struct igb_q_vector *);
+static bool igb_clean_tx_irq(struct igb_q_vector *);
+static bool igb_clean_rx_irq(struct igb_q_vector *, int);
+static int igb_ioctl(struct rtnet_device *, struct ifreq *ifr, int cmd);
+static void igb_reset_task(struct work_struct *);
+static void igb_vlan_mode(struct rtnet_device *netdev,
+			  netdev_features_t features);
+static int igb_vlan_rx_add_vid(struct rtnet_device *, __be16, u16);
+static void igb_restore_vlan(struct igb_adapter *);
+static void igb_rar_set_qsel(struct igb_adapter *, u8 *, u32 , u8);
+
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+static int igb_suspend(struct device *);
+#endif
+static int igb_resume(struct device *);
+static int igb_runtime_suspend(struct device *dev);
+static int igb_runtime_resume(struct device *dev);
+static int igb_runtime_idle(struct device *dev);
+static const struct dev_pm_ops igb_pm_ops = {
+	SET_SYSTEM_SLEEP_PM_OPS(igb_suspend, igb_resume)
+	SET_RUNTIME_PM_OPS(igb_runtime_suspend, igb_runtime_resume,
+			igb_runtime_idle)
+};
+#endif
+static void igb_shutdown(struct pci_dev *);
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs);
+#ifdef CONFIG_NET_POLL_CONTROLLER
+/* for netdump / net console */
+static void igb_netpoll(struct rtnet_device *);
+#endif
+
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *,
+		     pci_channel_state_t);
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *);
+static void igb_io_resume(struct pci_dev *);
+
+static const struct pci_error_handlers igb_err_handler = {
+	.error_detected = igb_io_error_detected,
+	.slot_reset = igb_io_slot_reset,
+	.resume = igb_io_resume,
+};
+
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba);
+
+static struct pci_driver igb_driver = {
+	.name     = igb_driver_name,
+	.id_table = igb_pci_tbl,
+	.probe    = igb_probe,
+	.remove   = igb_remove,
+#ifdef CONFIG_PM
+	.driver.pm = &igb_pm_ops,
+#endif
+	.shutdown = igb_shutdown,
+	.sriov_configure = igb_pci_sriov_configure,
+	.err_handler = &igb_err_handler
+};
+
+MODULE_AUTHOR("Intel Corporation, <e1000-devel@lists.sourceforge.net>");
+MODULE_DESCRIPTION("Intel(R) Gigabit Ethernet Network Driver");
+MODULE_LICENSE("GPL");
+MODULE_VERSION(DRV_VERSION);
+
+#define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV|NETIF_MSG_PROBE|NETIF_MSG_LINK)
+static int local_debug = -1;
+module_param_named(debug, local_debug, int, 0);
+MODULE_PARM_DESC(debug, "debug level (0=none,...,16=all)");
+
+struct igb_reg_info {
+	u32 ofs;
+	char *name;
+};
+
+static const struct igb_reg_info igb_reg_info_tbl[] = {
+
+	/* General Registers */
+	{E1000_CTRL, "CTRL"},
+	{E1000_STATUS, "STATUS"},
+	{E1000_CTRL_EXT, "CTRL_EXT"},
+
+	/* Interrupt Registers */
+	{E1000_ICR, "ICR"},
+
+	/* RX Registers */
+	{E1000_RCTL, "RCTL"},
+	{E1000_RDLEN(0), "RDLEN"},
+	{E1000_RDH(0), "RDH"},
+	{E1000_RDT(0), "RDT"},
+	{E1000_RXDCTL(0), "RXDCTL"},
+	{E1000_RDBAL(0), "RDBAL"},
+	{E1000_RDBAH(0), "RDBAH"},
+
+	/* TX Registers */
+	{E1000_TCTL, "TCTL"},
+	{E1000_TDBAL(0), "TDBAL"},
+	{E1000_TDBAH(0), "TDBAH"},
+	{E1000_TDLEN(0), "TDLEN"},
+	{E1000_TDH(0), "TDH"},
+	{E1000_TDT(0), "TDT"},
+	{E1000_TXDCTL(0), "TXDCTL"},
+	{E1000_TDFH, "TDFH"},
+	{E1000_TDFT, "TDFT"},
+	{E1000_TDFHS, "TDFHS"},
+	{E1000_TDFPC, "TDFPC"},
+
+	/* List Terminator */
+	{}
+};
+
+/* igb_regdump - register printout routine */
+static void igb_regdump(struct e1000_hw *hw, struct igb_reg_info *reginfo)
+{
+	int n = 0;
+	char rname[16];
+	u32 regs[8];
+
+	switch (reginfo->ofs) {
+	case E1000_RDLEN(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDLEN(n));
+		break;
+	case E1000_RDH(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDH(n));
+		break;
+	case E1000_RDT(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDT(n));
+		break;
+	case E1000_RXDCTL(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RXDCTL(n));
+		break;
+	case E1000_RDBAL(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDBAL(n));
+		break;
+	case E1000_RDBAH(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDBAH(n));
+		break;
+	case E1000_TDBAL(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_RDBAL(n));
+		break;
+	case E1000_TDBAH(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TDBAH(n));
+		break;
+	case E1000_TDLEN(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TDLEN(n));
+		break;
+	case E1000_TDH(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TDH(n));
+		break;
+	case E1000_TDT(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TDT(n));
+		break;
+	case E1000_TXDCTL(0):
+		for (n = 0; n < 4; n++)
+			regs[n] = rd32(E1000_TXDCTL(n));
+		break;
+	default:
+		pr_info("%-15s %08x\n", reginfo->name, rd32(reginfo->ofs));
+		return;
+	}
+
+	snprintf(rname, 16, "%s%s", reginfo->name, "[0-3]");
+	pr_info("%-15s %08x %08x %08x %08x\n", rname, regs[0], regs[1],
+		regs[2], regs[3]);
+}
+
+/* igb_dump - Print registers, Tx-rings and Rx-rings */
+static void igb_dump(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	struct igb_reg_info *reginfo;
+	struct igb_ring *tx_ring;
+	union e1000_adv_tx_desc *tx_desc;
+	struct my_u0 { u64 a; u64 b; } *u0;
+	struct igb_ring *rx_ring;
+	union e1000_adv_rx_desc *rx_desc;
+	u32 staterr;
+	u16 i, n;
+
+	/* Print netdevice Info */
+	if (netdev) {
+		dev_info(&adapter->pdev->dev, "Net device Info\n");
+		pr_info("Device Name\n");
+		pr_info("%s\n", netdev->name);
+	}
+
+	/* Print Registers */
+	dev_info(&adapter->pdev->dev, "Register Dump\n");
+	pr_info(" Register Name   Value\n");
+	for (reginfo = (struct igb_reg_info *)igb_reg_info_tbl;
+	     reginfo->name; reginfo++) {
+		igb_regdump(hw, reginfo);
+	}
+
+	/* Print TX Ring Summary */
+	if (!netdev || !rtnetif_running(netdev))
+		goto exit;
+
+	dev_info(&adapter->pdev->dev, "TX Rings Summary\n");
+	pr_info("Queue [NTU] [NTC] [bi(ntc)->dma  ] leng ntw timestamp\n");
+	for (n = 0; n < adapter->num_tx_queues; n++) {
+		struct igb_tx_buffer *buffer_info;
+		tx_ring = adapter->tx_ring[n];
+		buffer_info = &tx_ring->tx_buffer_info[tx_ring->next_to_clean];
+		pr_info(" %5d %5X %5X %p %016llX\n",
+			n, tx_ring->next_to_use, tx_ring->next_to_clean,
+			buffer_info->next_to_watch,
+			(u64)buffer_info->time_stamp);
+	}
+
+	dev_info(&adapter->pdev->dev, "TX Rings Dump\n");
+
+	/* Transmit Descriptor Formats
+	 *
+	 * Advanced Transmit Descriptor
+	 *   +--------------------------------------------------------------+
+	 * 0 |         Buffer Address [63:0]                                |
+	 *   +--------------------------------------------------------------+
+	 * 8 | PAYLEN  | PORTS  |CC|IDX | STA | DCMD  |DTYP|MAC|RSV| DTALEN |
+	 *   +--------------------------------------------------------------+
+	 *   63      46 45    40 39 38 36 35 32 31   24             15       0
+	 */
+
+	for (n = 0; n < adapter->num_tx_queues; n++) {
+		tx_ring = adapter->tx_ring[n];
+		pr_info("------------------------------------\n");
+		pr_info("TX QUEUE INDEX = %d\n", tx_ring->queue_index);
+		pr_info("------------------------------------\n");
+		pr_info("T [desc]     [address 63:0  ] [PlPOCIStDDM Ln] "
+			"[bi->dma       ] leng  ntw timestamp        "
+			"bi->skb\n");
+
+		for (i = 0; tx_ring->desc && (i < tx_ring->count); i++) {
+			const char *next_desc;
+			struct igb_tx_buffer *buffer_info;
+			tx_desc = IGB_TX_DESC(tx_ring, i);
+			buffer_info = &tx_ring->tx_buffer_info[i];
+			u0 = (struct my_u0 *)tx_desc;
+			if (i == tx_ring->next_to_use &&
+			    i == tx_ring->next_to_clean)
+				next_desc = " NTC/U";
+			else if (i == tx_ring->next_to_use)
+				next_desc = " NTU";
+			else if (i == tx_ring->next_to_clean)
+				next_desc = " NTC";
+			else
+				next_desc = "";
+
+			pr_info("T [0x%03X]    %016llX %016llX"
+				"  %p %016llX %p%s\n", i,
+				le64_to_cpu(u0->a),
+				le64_to_cpu(u0->b),
+				buffer_info->next_to_watch,
+				(u64)buffer_info->time_stamp,
+				buffer_info->skb, next_desc);
+
+			if (buffer_info->skb)
+				print_hex_dump(KERN_INFO, "",
+					DUMP_PREFIX_ADDRESS,
+					16, 1, buffer_info->skb->data,
+					14,
+					true);
+		}
+	}
+
+	/* Print RX Rings Summary */
+	dev_info(&adapter->pdev->dev, "RX Rings Summary\n");
+	pr_info("Queue [NTU] [NTC]\n");
+	for (n = 0; n < adapter->num_rx_queues; n++) {
+		rx_ring = adapter->rx_ring[n];
+		pr_info(" %5d %5X %5X\n",
+			n, rx_ring->next_to_use, rx_ring->next_to_clean);
+	}
+
+	/* Print RX Rings */
+	dev_info(&adapter->pdev->dev, "RX Rings Dump\n");
+
+	/* Advanced Receive Descriptor (Read) Format
+	 *    63                                           1        0
+	 *    +-----------------------------------------------------+
+	 *  0 |       Packet Buffer Address [63:1]           |A0/NSE|
+	 *    +----------------------------------------------+------+
+	 *  8 |       Header Buffer Address [63:1]           |  DD  |
+	 *    +-----------------------------------------------------+
+	 *
+	 *
+	 * Advanced Receive Descriptor (Write-Back) Format
+	 *
+	 *   63       48 47    32 31  30      21 20 17 16   4 3     0
+	 *   +------------------------------------------------------+
+	 * 0 | Packet     IP     |SPH| HDR_LEN   | RSV|Packet|  RSS |
+	 *   | Checksum   Ident  |   |           |    | Type | Type |
+	 *   +------------------------------------------------------+
+	 * 8 | VLAN Tag | Length | Extended Error | Extended Status |
+	 *   +------------------------------------------------------+
+	 *   63       48 47    32 31            20 19               0
+	 */
+
+	for (n = 0; n < adapter->num_rx_queues; n++) {
+		rx_ring = adapter->rx_ring[n];
+		pr_info("------------------------------------\n");
+		pr_info("RX QUEUE INDEX = %d\n", rx_ring->queue_index);
+		pr_info("------------------------------------\n");
+		pr_info("R  [desc]      [ PktBuf     A0] [  HeadBuf   DD] "
+			"[bi->dma       ] [bi->skb] <-- Adv Rx Read format\n");
+		pr_info("RWB[desc]      [PcsmIpSHl PtRs] [vl er S cks ln] -----"
+			"----------- [bi->skb] <-- Adv Rx Write-Back format\n");
+
+		for (i = 0; i < rx_ring->count; i++) {
+			const char *next_desc;
+			struct igb_rx_buffer *buffer_info;
+			buffer_info = &rx_ring->rx_buffer_info[i];
+			rx_desc = IGB_RX_DESC(rx_ring, i);
+			u0 = (struct my_u0 *)rx_desc;
+			staterr = le32_to_cpu(rx_desc->wb.upper.status_error);
+
+			if (i == rx_ring->next_to_use)
+				next_desc = " NTU";
+			else if (i == rx_ring->next_to_clean)
+				next_desc = " NTC";
+			else
+				next_desc = "";
+
+			if (staterr & E1000_RXD_STAT_DD) {
+				/* Descriptor Done */
+				pr_info("%s[0x%03X]     %016llX %016llX ---------------- %s\n",
+					"RWB", i,
+					le64_to_cpu(u0->a),
+					le64_to_cpu(u0->b),
+					next_desc);
+			} else {
+				pr_info("%s[0x%03X]     %016llX %016llX %016llX %s\n",
+					"R  ", i,
+					le64_to_cpu(u0->a),
+					le64_to_cpu(u0->b),
+					(u64)buffer_info->dma,
+					next_desc);
+
+			}
+		}
+	}
+
+exit:
+	return;
+}
+
+/**
+ *  igb_get_hw_dev - return device
+ *  @hw: pointer to hardware structure
+ *
+ *  used by hardware layer to print debugging information
+ **/
+struct rtnet_device *igb_get_hw_dev(struct e1000_hw *hw)
+{
+	struct igb_adapter *adapter = hw->back;
+	return adapter->netdev;
+}
+
+/**
+ *  igb_init_module - Driver Registration Routine
+ *
+ *  igb_init_module is the first routine called when the driver is
+ *  loaded. All it does is register with the PCI subsystem.
+ **/
+static int __init igb_init_module(void)
+{
+	int ret;
+
+	pr_info("%s - version %s\n",
+	       igb_driver_string, igb_driver_version);
+	pr_info("%s\n", igb_copyright);
+
+	ret = pci_register_driver(&igb_driver);
+	return ret;
+}
+
+module_init(igb_init_module);
+
+/**
+ *  igb_exit_module - Driver Exit Cleanup Routine
+ *
+ *  igb_exit_module is called just before the driver is removed
+ *  from memory.
+ **/
+static void __exit igb_exit_module(void)
+{
+	pci_unregister_driver(&igb_driver);
+}
+
+module_exit(igb_exit_module);
+
+#define Q_IDX_82576(i) (((i & 0x1) << 3) + (i >> 1))
+/**
+ *  igb_cache_ring_register - Descriptor ring to register mapping
+ *  @adapter: board private structure to initialize
+ *
+ *  Once we know the feature-set enabled for the device, we'll cache
+ *  the register offset the descriptor ring is assigned to.
+ **/
+static void igb_cache_ring_register(struct igb_adapter *adapter)
+{
+	int i = 0, j = 0;
+	u32 rbase_offset = 0;
+
+	switch (adapter->hw.mac.type) {
+	case e1000_82576:
+		/* The queues are allocated for virtualization such that VF 0
+		 * is allocated queues 0 and 8, VF 1 queues 1 and 9, etc.
+		 * In order to avoid collision we start at the first free queue
+		 * and continue consuming queues in the same sequence
+		 */
+		fallthrough;
+	case e1000_82575:
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	case e1000_i211:
+		fallthrough;
+	default:
+		for (; i < adapter->num_rx_queues; i++)
+			adapter->rx_ring[i]->reg_idx = rbase_offset + i;
+		for (; j < adapter->num_tx_queues; j++)
+			adapter->tx_ring[j]->reg_idx = rbase_offset + j;
+		break;
+	}
+}
+
+u32 igb_rd32(struct e1000_hw *hw, u32 reg)
+{
+	struct igb_adapter *igb = container_of(hw, struct igb_adapter, hw);
+	u8 __iomem *hw_addr = READ_ONCE(hw->hw_addr);
+	u32 value = 0;
+
+	if (E1000_REMOVED(hw_addr))
+		return ~value;
+
+	value = readl(&hw_addr[reg]);
+
+	/* reads should not return all F's */
+	if (!(~value) && (!reg || !(~readl(hw_addr)))) {
+		struct rtnet_device *netdev = igb->netdev;
+		hw->hw_addr = NULL;
+		rtnetif_device_detach(netdev);
+		rtdev_err(netdev, "PCIe link lost, device now detached\n");
+	}
+
+	return value;
+}
+
+/**
+ *  igb_write_ivar - configure ivar for given MSI-X vector
+ *  @hw: pointer to the HW structure
+ *  @msix_vector: vector number we are allocating to a given ring
+ *  @index: row index of IVAR register to write within IVAR table
+ *  @offset: column offset of in IVAR, should be multiple of 8
+ *
+ *  This function is intended to handle the writing of the IVAR register
+ *  for adapters 82576 and newer.  The IVAR table consists of 2 columns,
+ *  each containing an cause allocation for an Rx and Tx ring, and a
+ *  variable number of rows depending on the number of queues supported.
+ **/
+static void igb_write_ivar(struct e1000_hw *hw, int msix_vector,
+			   int index, int offset)
+{
+	u32 ivar = array_rd32(E1000_IVAR0, index);
+
+	/* clear any bits that are currently set */
+	ivar &= ~((u32)0xFF << offset);
+
+	/* write vector and valid bit */
+	ivar |= (msix_vector | E1000_IVAR_VALID) << offset;
+
+	array_wr32(E1000_IVAR0, index, ivar);
+}
+
+#define IGB_N0_QUEUE -1
+static void igb_assign_vector(struct igb_q_vector *q_vector, int msix_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct e1000_hw *hw = &adapter->hw;
+	int rx_queue = IGB_N0_QUEUE;
+	int tx_queue = IGB_N0_QUEUE;
+	u32 msixbm = 0;
+
+	if (q_vector->rx.ring)
+		rx_queue = q_vector->rx.ring->reg_idx;
+	if (q_vector->tx.ring)
+		tx_queue = q_vector->tx.ring->reg_idx;
+
+	switch (hw->mac.type) {
+	case e1000_82575:
+		/* The 82575 assigns vectors using a bitmask, which matches the
+		 * bitmask for the EICR/EIMS/EIMC registers.  To assign one
+		 * or more queues to a vector, we write the appropriate bits
+		 * into the MSIXBM register for that vector.
+		 */
+		if (rx_queue > IGB_N0_QUEUE)
+			msixbm = E1000_EICR_RX_QUEUE0 << rx_queue;
+		if (tx_queue > IGB_N0_QUEUE)
+			msixbm |= E1000_EICR_TX_QUEUE0 << tx_queue;
+		if (!(adapter->flags & IGB_FLAG_HAS_MSIX) && msix_vector == 0)
+			msixbm |= E1000_EIMS_OTHER;
+		array_wr32(E1000_MSIXBM(0), msix_vector, msixbm);
+		q_vector->eims_value = msixbm;
+		break;
+	case e1000_82576:
+		/* 82576 uses a table that essentially consists of 2 columns
+		 * with 8 rows.  The ordering is column-major so we use the
+		 * lower 3 bits as the row index, and the 4th bit as the
+		 * column offset.
+		 */
+		if (rx_queue > IGB_N0_QUEUE)
+			igb_write_ivar(hw, msix_vector,
+				       rx_queue & 0x7,
+				       (rx_queue & 0x8) << 1);
+		if (tx_queue > IGB_N0_QUEUE)
+			igb_write_ivar(hw, msix_vector,
+				       tx_queue & 0x7,
+				       ((tx_queue & 0x8) << 1) + 8);
+		q_vector->eims_value = 1 << msix_vector;
+		break;
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	case e1000_i211:
+		/* On 82580 and newer adapters the scheme is similar to 82576
+		 * however instead of ordering column-major we have things
+		 * ordered row-major.  So we traverse the table by using
+		 * bit 0 as the column offset, and the remaining bits as the
+		 * row index.
+		 */
+		if (rx_queue > IGB_N0_QUEUE)
+			igb_write_ivar(hw, msix_vector,
+				       rx_queue >> 1,
+				       (rx_queue & 0x1) << 4);
+		if (tx_queue > IGB_N0_QUEUE)
+			igb_write_ivar(hw, msix_vector,
+				       tx_queue >> 1,
+				       ((tx_queue & 0x1) << 4) + 8);
+		q_vector->eims_value = 1 << msix_vector;
+		break;
+	default:
+		BUG();
+		break;
+	}
+
+	/* add q_vector eims value to global eims_enable_mask */
+	adapter->eims_enable_mask |= q_vector->eims_value;
+
+	/* configure q_vector to set itr on first interrupt */
+	q_vector->set_itr = 1;
+}
+
+/**
+ *  igb_configure_msix - Configure MSI-X hardware
+ *  @adapter: board private structure to initialize
+ *
+ *  igb_configure_msix sets up the hardware to properly
+ *  generate MSI-X interrupts.
+ **/
+static void igb_configure_msix(struct igb_adapter *adapter)
+{
+	u32 tmp;
+	int i, vector = 0;
+	struct e1000_hw *hw = &adapter->hw;
+
+	adapter->eims_enable_mask = 0;
+
+	/* set vector for other causes, i.e. link changes */
+	switch (hw->mac.type) {
+	case e1000_82575:
+		tmp = rd32(E1000_CTRL_EXT);
+		/* enable MSI-X PBA support*/
+		tmp |= E1000_CTRL_EXT_PBA_CLR;
+
+		/* Auto-Mask interrupts upon ICR read. */
+		tmp |= E1000_CTRL_EXT_EIAME;
+		tmp |= E1000_CTRL_EXT_IRCA;
+
+		wr32(E1000_CTRL_EXT, tmp);
+
+		/* enable msix_other interrupt */
+		array_wr32(E1000_MSIXBM(0), vector++, E1000_EIMS_OTHER);
+		adapter->eims_other = E1000_EIMS_OTHER;
+
+		break;
+
+	case e1000_82576:
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	case e1000_i211:
+		/* Turn on MSI-X capability first, or our settings
+		 * won't stick.  And it will take days to debug.
+		 */
+		wr32(E1000_GPIE, E1000_GPIE_MSIX_MODE |
+		     E1000_GPIE_PBA | E1000_GPIE_EIAME |
+		     E1000_GPIE_NSICR);
+
+		/* enable msix_other interrupt */
+		adapter->eims_other = 1 << vector;
+		tmp = (vector++ | E1000_IVAR_VALID) << 8;
+
+		wr32(E1000_IVAR_MISC, tmp);
+		break;
+	default:
+		/* do nothing, since nothing else supports MSI-X */
+		break;
+	} /* switch (hw->mac.type) */
+
+	adapter->eims_enable_mask |= adapter->eims_other;
+
+	for (i = 0; i < adapter->num_q_vectors; i++)
+		igb_assign_vector(adapter->q_vector[i], vector++);
+
+	wrfl();
+}
+
+/**
+ *  igb_request_msix - Initialize MSI-X interrupts
+ *  @adapter: board private structure to initialize
+ *
+ *  igb_request_msix allocates MSI-X vectors and requests interrupts from the
+ *  kernel.
+ **/
+static int igb_request_msix(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	int i, err = 0, vector = 0, free_vector = 0;
+
+	err = request_irq(adapter->msix_entries[vector].vector,
+			  igb_msix_other, 0, netdev->name, adapter);
+	if (err)
+		goto err_out;
+
+	for (i = 0; i < adapter->num_q_vectors; i++) {
+		struct igb_q_vector *q_vector = adapter->q_vector[i];
+
+		vector++;
+
+		q_vector->itr_register = hw->hw_addr + E1000_EITR(vector);
+
+		if (q_vector->rx.ring && q_vector->tx.ring)
+			sprintf(q_vector->name, "%s-TxRx-%u", netdev->name,
+				q_vector->rx.ring->queue_index);
+		else if (q_vector->tx.ring)
+			sprintf(q_vector->name, "%s-tx-%u", netdev->name,
+				q_vector->tx.ring->queue_index);
+		else if (q_vector->rx.ring)
+			sprintf(q_vector->name, "%s-rx-%u", netdev->name,
+				q_vector->rx.ring->queue_index);
+		else
+			sprintf(q_vector->name, "%s-unused", netdev->name);
+
+		err = rtdm_irq_request(&adapter->msix_irq_handle[vector],
+				adapter->msix_entries[vector].vector,
+				igb_msix_ring, 0, q_vector->name, q_vector);
+		if (err)
+			goto err_free;
+	}
+
+	igb_configure_msix(adapter);
+	return 0;
+
+err_free:
+	/* free already assigned IRQs */
+	free_irq(adapter->msix_entries[free_vector++].vector, adapter);
+
+	vector--;
+	for (i = 0; i < vector; i++)
+		rtdm_irq_free(&adapter->msix_irq_handle[free_vector++]);
+err_out:
+	return err;
+}
+
+/**
+ *  igb_free_q_vector - Free memory allocated for specific interrupt vector
+ *  @adapter: board private structure to initialize
+ *  @v_idx: Index of vector to be freed
+ *
+ *  This function frees the memory allocated to the q_vector.
+ **/
+static void igb_free_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+	adapter->q_vector[v_idx] = NULL;
+
+	/* igb_get_stats64() might access the rings on this vector,
+	 * we must wait a grace period before freeing it.
+	 */
+	if (q_vector)
+		kfree_rcu(q_vector, rcu);
+}
+
+/**
+ *  igb_reset_q_vector - Reset config for interrupt vector
+ *  @adapter: board private structure to initialize
+ *  @v_idx: Index of vector to be reset
+ *
+ *  If NAPI is enabled it will delete any references to the
+ *  NAPI struct. This is preparation for igb_free_q_vector.
+ **/
+static void igb_reset_q_vector(struct igb_adapter *adapter, int v_idx)
+{
+	struct igb_q_vector *q_vector = adapter->q_vector[v_idx];
+
+	/* Coming from igb_set_interrupt_capability, the vectors are not yet
+	 * allocated. So, q_vector is NULL so we should stop here.
+	 */
+	if (!q_vector)
+		return;
+
+	if (q_vector->tx.ring)
+		adapter->tx_ring[q_vector->tx.ring->queue_index] = NULL;
+
+	if (q_vector->rx.ring)
+		adapter->rx_ring[q_vector->rx.ring->queue_index] = NULL;
+}
+
+static void igb_reset_interrupt_capability(struct igb_adapter *adapter)
+{
+	int v_idx = adapter->num_q_vectors;
+
+	if (adapter->flags & IGB_FLAG_HAS_MSIX)
+		pci_disable_msix(adapter->pdev);
+	else if (adapter->flags & IGB_FLAG_HAS_MSI)
+		pci_disable_msi(adapter->pdev);
+
+	while (v_idx--)
+		igb_reset_q_vector(adapter, v_idx);
+}
+
+/**
+ *  igb_free_q_vectors - Free memory allocated for interrupt vectors
+ *  @adapter: board private structure to initialize
+ *
+ *  This function frees the memory allocated to the q_vectors.  In addition if
+ *  NAPI is enabled it will delete any references to the NAPI struct prior
+ *  to freeing the q_vector.
+ **/
+static void igb_free_q_vectors(struct igb_adapter *adapter)
+{
+	int v_idx = adapter->num_q_vectors;
+
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--) {
+		igb_reset_q_vector(adapter, v_idx);
+		igb_free_q_vector(adapter, v_idx);
+	}
+}
+
+/**
+ *  igb_clear_interrupt_scheme - reset the device to a state of no interrupts
+ *  @adapter: board private structure to initialize
+ *
+ *  This function resets the device so that it has 0 Rx queues, Tx queues, and
+ *  MSI-X interrupts allocated.
+ */
+static void igb_clear_interrupt_scheme(struct igb_adapter *adapter)
+{
+	igb_free_q_vectors(adapter);
+	igb_reset_interrupt_capability(adapter);
+}
+
+/**
+ *  igb_set_interrupt_capability - set MSI or MSI-X if supported
+ *  @adapter: board private structure to initialize
+ *  @msix: boolean value of MSIX capability
+ *
+ *  Attempt to configure interrupts using the best available
+ *  capabilities of the hardware and kernel.
+ **/
+static void igb_set_interrupt_capability(struct igb_adapter *adapter, bool msix)
+{
+	int err;
+	int numvecs, i;
+
+	if (!msix)
+		goto msi_only;
+	adapter->flags |= IGB_FLAG_HAS_MSIX;
+
+	/* Number of supported queues. */
+	adapter->num_rx_queues = adapter->rss_queues;
+	adapter->num_tx_queues = adapter->rss_queues;
+
+	/* start with one vector for every Rx queue */
+	numvecs = adapter->num_rx_queues;
+
+	/* if Tx handler is separate add 1 for every Tx queue */
+	if (!(adapter->flags & IGB_FLAG_QUEUE_PAIRS))
+		numvecs += adapter->num_tx_queues;
+
+	/* store the number of vectors reserved for queues */
+	adapter->num_q_vectors = numvecs;
+
+	/* add 1 vector for link status interrupts */
+	numvecs++;
+	for (i = 0; i < numvecs; i++)
+		adapter->msix_entries[i].entry = i;
+
+	err = pci_enable_msix_range(adapter->pdev,
+				    adapter->msix_entries,
+				    numvecs,
+				    numvecs);
+	if (err > 0)
+		return;
+
+	igb_reset_interrupt_capability(adapter);
+
+	/* If we can't do MSI-X, try MSI */
+msi_only:
+	adapter->flags &= ~IGB_FLAG_HAS_MSIX;
+	adapter->rss_queues = 1;
+	adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+	adapter->num_rx_queues = 1;
+	adapter->num_tx_queues = 1;
+	adapter->num_q_vectors = 1;
+	if (!pci_enable_msi(adapter->pdev))
+		adapter->flags |= IGB_FLAG_HAS_MSI;
+}
+
+static void igb_add_ring(struct igb_ring *ring,
+			 struct igb_ring_container *head)
+{
+	head->ring = ring;
+	head->count++;
+}
+
+/**
+ *  igb_alloc_q_vector - Allocate memory for a single interrupt vector
+ *  @adapter: board private structure to initialize
+ *  @v_count: q_vectors allocated on adapter, used for ring interleaving
+ *  @v_idx: index of vector in adapter struct
+ *  @txr_count: total number of Tx rings to allocate
+ *  @txr_idx: index of first Tx ring to allocate
+ *  @rxr_count: total number of Rx rings to allocate
+ *  @rxr_idx: index of first Rx ring to allocate
+ *
+ *  We allocate one q_vector.  If allocation fails we return -ENOMEM.
+ **/
+static int igb_alloc_q_vector(struct igb_adapter *adapter,
+			      int v_count, int v_idx,
+			      int txr_count, int txr_idx,
+			      int rxr_count, int rxr_idx)
+{
+	struct igb_q_vector *q_vector;
+	struct igb_ring *ring;
+	int ring_count, size;
+
+	/* igb only supports 1 Tx and/or 1 Rx queue per vector */
+	if (txr_count > 1 || rxr_count > 1)
+		return -ENOMEM;
+
+	ring_count = txr_count + rxr_count;
+	size = sizeof(struct igb_q_vector) +
+	       (sizeof(struct igb_ring) * ring_count);
+
+	/* allocate q_vector and rings */
+	q_vector = adapter->q_vector[v_idx];
+	if (!q_vector)
+		q_vector = kzalloc(size, GFP_KERNEL);
+	else
+		memset(q_vector, 0, size);
+	if (!q_vector)
+		return -ENOMEM;
+
+	/* tie q_vector and adapter together */
+	adapter->q_vector[v_idx] = q_vector;
+	q_vector->adapter = adapter;
+
+	/* initialize work limits */
+	q_vector->tx.work_limit = adapter->tx_work_limit;
+
+	/* initialize ITR configuration */
+	q_vector->itr_register = adapter->hw.hw_addr + E1000_EITR(0);
+	q_vector->itr_val = IGB_START_ITR;
+
+	/* initialize pointer to rings */
+	ring = q_vector->ring;
+
+	/* intialize ITR */
+	if (rxr_count) {
+		/* rx or rx/tx vector */
+		if (!adapter->rx_itr_setting || adapter->rx_itr_setting > 3)
+			q_vector->itr_val = adapter->rx_itr_setting;
+	} else {
+		/* tx only vector */
+		if (!adapter->tx_itr_setting || adapter->tx_itr_setting > 3)
+			q_vector->itr_val = adapter->tx_itr_setting;
+	}
+
+	if (txr_count) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Tx values */
+		igb_add_ring(ring, &q_vector->tx);
+
+		/* For 82575, context index must be unique per ring. */
+		if (adapter->hw.mac.type == e1000_82575)
+			set_bit(IGB_RING_FLAG_TX_CTX_IDX, &ring->flags);
+
+		/* apply Tx specific ring traits */
+		ring->count = adapter->tx_ring_count;
+		ring->queue_index = txr_idx;
+
+		/* assign ring to adapter */
+		adapter->tx_ring[txr_idx] = ring;
+
+		/* push pointer to next ring */
+		ring++;
+	}
+
+	if (rxr_count) {
+		/* assign generic ring traits */
+		ring->dev = &adapter->pdev->dev;
+		ring->netdev = adapter->netdev;
+
+		/* configure backlink on ring */
+		ring->q_vector = q_vector;
+
+		/* update q_vector Rx values */
+		igb_add_ring(ring, &q_vector->rx);
+
+		/* set flag indicating ring supports SCTP checksum offload */
+		if (adapter->hw.mac.type >= e1000_82576)
+			set_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags);
+
+		/* On i350, i354, i210, and i211, loopback VLAN packets
+		 * have the tag byte-swapped.
+		 */
+		if (adapter->hw.mac.type >= e1000_i350)
+			set_bit(IGB_RING_FLAG_RX_LB_VLAN_BSWAP, &ring->flags);
+
+		/* apply Rx specific ring traits */
+		ring->count = adapter->rx_ring_count;
+		ring->queue_index = rxr_idx;
+
+		/* assign ring to adapter */
+		adapter->rx_ring[rxr_idx] = ring;
+	}
+
+	return 0;
+}
+
+
+/**
+ *  igb_alloc_q_vectors - Allocate memory for interrupt vectors
+ *  @adapter: board private structure to initialize
+ *
+ *  We allocate one q_vector per queue interrupt.  If allocation fails we
+ *  return -ENOMEM.
+ **/
+static int igb_alloc_q_vectors(struct igb_adapter *adapter)
+{
+	int q_vectors = adapter->num_q_vectors;
+	int rxr_remaining = adapter->num_rx_queues;
+	int txr_remaining = adapter->num_tx_queues;
+	int rxr_idx = 0, txr_idx = 0, v_idx = 0;
+	int err;
+
+	if (q_vectors >= (rxr_remaining + txr_remaining)) {
+		for (; rxr_remaining; v_idx++) {
+			err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+						 0, 0, 1, rxr_idx);
+
+			if (err)
+				goto err_out;
+
+			/* update counts and index */
+			rxr_remaining--;
+			rxr_idx++;
+		}
+	}
+
+	for (; v_idx < q_vectors; v_idx++) {
+		int rqpv = DIV_ROUND_UP(rxr_remaining, q_vectors - v_idx);
+		int tqpv = DIV_ROUND_UP(txr_remaining, q_vectors - v_idx);
+
+		err = igb_alloc_q_vector(adapter, q_vectors, v_idx,
+					 tqpv, txr_idx, rqpv, rxr_idx);
+
+		if (err)
+			goto err_out;
+
+		/* update counts and index */
+		rxr_remaining -= rqpv;
+		txr_remaining -= tqpv;
+		rxr_idx++;
+		txr_idx++;
+	}
+
+	return 0;
+
+err_out:
+	adapter->num_tx_queues = 0;
+	adapter->num_rx_queues = 0;
+	adapter->num_q_vectors = 0;
+
+	while (v_idx--)
+		igb_free_q_vector(adapter, v_idx);
+
+	return -ENOMEM;
+}
+
+/**
+ *  igb_init_interrupt_scheme - initialize interrupts, allocate queues/vectors
+ *  @adapter: board private structure to initialize
+ *  @msix: boolean value of MSIX capability
+ *
+ *  This function initializes the interrupts and allocates all of the queues.
+ **/
+static int igb_init_interrupt_scheme(struct igb_adapter *adapter, bool msix)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	igb_set_interrupt_capability(adapter, msix);
+
+	err = igb_alloc_q_vectors(adapter);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to allocate memory for vectors\n");
+		goto err_alloc_q_vectors;
+	}
+
+	igb_cache_ring_register(adapter);
+
+	return 0;
+
+err_alloc_q_vectors:
+	igb_reset_interrupt_capability(adapter);
+	return err;
+}
+
+/**
+ *  igb_request_irq - initialize interrupts
+ *  @adapter: board private structure to initialize
+ *
+ *  Attempts to configure interrupts using the best available
+ *  capabilities of the hardware and kernel.
+ **/
+static int igb_request_irq(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	int err = 0;
+
+	rt_stack_connect(netdev, &STACK_manager);
+
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		err = igb_request_msix(adapter);
+		if (!err)
+			goto request_done;
+		/* fall back to MSI */
+		igb_free_all_tx_resources(adapter);
+		igb_free_all_rx_resources(adapter);
+
+		igb_clear_interrupt_scheme(adapter);
+		err = igb_init_interrupt_scheme(adapter, false);
+		if (err)
+			goto request_done;
+
+		igb_setup_all_tx_resources(adapter);
+		igb_setup_all_rx_resources(adapter);
+		igb_configure(adapter);
+	}
+
+	igb_assign_vector(adapter->q_vector[0], 0);
+
+	if (adapter->flags & IGB_FLAG_HAS_MSI) {
+		err = rtdm_irq_request(&adapter->irq_handle,
+				pdev->irq, igb_intr_msi, 0,
+				netdev->name, adapter);
+		if (!err)
+			goto request_done;
+
+		/* fall back to legacy interrupts */
+		igb_reset_interrupt_capability(adapter);
+		adapter->flags &= ~IGB_FLAG_HAS_MSI;
+	}
+
+	err = rtdm_irq_request(&adapter->irq_handle,
+			pdev->irq, igb_intr, IRQF_SHARED,
+			netdev->name, adapter);
+
+	if (err)
+		dev_err(&pdev->dev, "Error %d getting interrupt\n",
+			err);
+
+request_done:
+	return err;
+}
+
+static void igb_free_irq(struct igb_adapter *adapter)
+{
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		int vector = 0, i;
+
+		free_irq(adapter->msix_entries[vector++].vector, adapter);
+
+		for (i = 0; i < adapter->num_q_vectors; i++)
+			rtdm_irq_free(&adapter->msix_irq_handle[vector++]);
+	} else {
+		rtdm_irq_free(&adapter->irq_handle);
+	}
+}
+
+/**
+ *  igb_irq_disable - Mask off interrupt generation on the NIC
+ *  @adapter: board private structure
+ **/
+static void igb_irq_disable(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* we need to be careful when disabling interrupts.  The VFs are also
+	 * mapped into these registers and so clearing the bits can cause
+	 * issues on the VF drivers so we only need to clear what we set
+	 */
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		u32 regval = rd32(E1000_EIAM);
+
+		wr32(E1000_EIAM, regval & ~adapter->eims_enable_mask);
+		wr32(E1000_EIMC, adapter->eims_enable_mask);
+		regval = rd32(E1000_EIAC);
+		wr32(E1000_EIAC, regval & ~adapter->eims_enable_mask);
+	}
+
+	wr32(E1000_IAM, 0);
+	wr32(E1000_IMC, ~0);
+	wrfl();
+
+	msleep(10);
+}
+
+/**
+ *  igb_irq_enable - Enable default interrupt generation settings
+ *  @adapter: board private structure
+ **/
+static void igb_irq_enable(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		u32 ims = E1000_IMS_LSC | E1000_IMS_DOUTSYNC | E1000_IMS_DRSTA;
+		u32 regval = rd32(E1000_EIAC);
+
+		wr32(E1000_EIAC, regval | adapter->eims_enable_mask);
+		regval = rd32(E1000_EIAM);
+		wr32(E1000_EIAM, regval | adapter->eims_enable_mask);
+		wr32(E1000_EIMS, adapter->eims_enable_mask);
+		wr32(E1000_IMS, ims);
+	} else {
+		wr32(E1000_IMS, IMS_ENABLE_MASK |
+				E1000_IMS_DRSTA);
+		wr32(E1000_IAM, IMS_ENABLE_MASK |
+				E1000_IMS_DRSTA);
+	}
+}
+
+static void igb_update_mng_vlan(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 vid = adapter->hw.mng_cookie.vlan_id;
+	u16 old_vid = adapter->mng_vlan_id;
+
+	if (hw->mng_cookie.status & E1000_MNG_DHCP_COOKIE_STATUS_VLAN) {
+		/* add VID to filter table */
+		igb_vfta_set(hw, vid, true);
+		adapter->mng_vlan_id = vid;
+	} else {
+		adapter->mng_vlan_id = IGB_MNG_VLAN_NONE;
+	}
+
+	if ((old_vid != (u16)IGB_MNG_VLAN_NONE) &&
+	    (vid != old_vid) &&
+	    !test_bit(old_vid, adapter->active_vlans)) {
+		/* remove VID from filter table */
+		igb_vfta_set(hw, old_vid, false);
+	}
+}
+
+/**
+ *  igb_release_hw_control - release control of the h/w to f/w
+ *  @adapter: address of board private structure
+ *
+ *  igb_release_hw_control resets CTRL_EXT:DRV_LOAD bit.
+ *  For ASF and Pass Through versions of f/w this means that the
+ *  driver is no longer loaded.
+ **/
+static void igb_release_hw_control(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext;
+
+	/* Let firmware take over control of h/w */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	wr32(E1000_CTRL_EXT,
+			ctrl_ext & ~E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ *  igb_get_hw_control - get control of the h/w from f/w
+ *  @adapter: address of board private structure
+ *
+ *  igb_get_hw_control sets CTRL_EXT:DRV_LOAD bit.
+ *  For ASF and Pass Through versions of f/w this means that
+ *  the driver is loaded.
+ **/
+static void igb_get_hw_control(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext;
+
+	/* Let firmware know the driver has taken over */
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	wr32(E1000_CTRL_EXT,
+			ctrl_ext | E1000_CTRL_EXT_DRV_LOAD);
+}
+
+/**
+ *  igb_configure - configure the hardware for RX and TX
+ *  @adapter: private board structure
+ **/
+static void igb_configure(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	int i;
+
+	igb_get_hw_control(adapter);
+	igb_set_rx_mode(netdev);
+
+	igb_restore_vlan(adapter);
+
+	igb_setup_tctl(adapter);
+	igb_setup_mrqc(adapter);
+	igb_setup_rctl(adapter);
+
+	igb_configure_tx(adapter);
+	igb_configure_rx(adapter);
+
+	igb_rx_fifo_flush_82575(&adapter->hw);
+
+	/* call igb_desc_unused which always leaves
+	 * at least 1 descriptor unused to make sure
+	 * next_to_use != next_to_clean
+	 */
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = adapter->rx_ring[i];
+		igb_alloc_rx_buffers(ring, igb_desc_unused(ring));
+	}
+}
+
+/**
+ *  igb_power_up_link - Power up the phy/serdes link
+ *  @adapter: address of board private structure
+ **/
+void igb_power_up_link(struct igb_adapter *adapter)
+{
+	igb_reset_phy(&adapter->hw);
+
+	if (adapter->hw.phy.media_type == e1000_media_type_copper)
+		igb_power_up_phy_copper(&adapter->hw);
+	else
+		igb_power_up_serdes_link_82575(&adapter->hw);
+
+	igb_setup_link(&adapter->hw);
+}
+
+/**
+ *  igb_power_down_link - Power down the phy/serdes link
+ *  @adapter: address of board private structure
+ */
+static void igb_power_down_link(struct igb_adapter *adapter)
+{
+	if (adapter->hw.phy.media_type == e1000_media_type_copper)
+		igb_power_down_phy_copper_82575(&adapter->hw);
+	else
+		igb_shutdown_serdes_link_82575(&adapter->hw);
+}
+
+/**
+ * Detect and switch function for Media Auto Sense
+ * @adapter: address of the board private structure
+ **/
+static void igb_check_swap_media(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl_ext, connsw;
+	bool swap_now = false;
+
+	ctrl_ext = rd32(E1000_CTRL_EXT);
+	connsw = rd32(E1000_CONNSW);
+
+	/* need to live swap if current media is copper and we have fiber/serdes
+	 * to go to.
+	 */
+
+	if ((hw->phy.media_type == e1000_media_type_copper) &&
+	    (!(connsw & E1000_CONNSW_AUTOSENSE_EN))) {
+		swap_now = true;
+	} else if (!(connsw & E1000_CONNSW_SERDESD)) {
+		/* copper signal takes time to appear */
+		if (adapter->copper_tries < 4) {
+			adapter->copper_tries++;
+			connsw |= E1000_CONNSW_AUTOSENSE_CONF;
+			wr32(E1000_CONNSW, connsw);
+			return;
+		} else {
+			adapter->copper_tries = 0;
+			if ((connsw & E1000_CONNSW_PHYSD) &&
+			    (!(connsw & E1000_CONNSW_PHY_PDN))) {
+				swap_now = true;
+				connsw &= ~E1000_CONNSW_AUTOSENSE_CONF;
+				wr32(E1000_CONNSW, connsw);
+			}
+		}
+	}
+
+	if (!swap_now)
+		return;
+
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		rtdev_info(adapter->netdev,
+			"MAS: changing media to fiber/serdes\n");
+		ctrl_ext |=
+			E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+		adapter->flags |= IGB_FLAG_MEDIA_RESET;
+		adapter->copper_tries = 0;
+		break;
+	case e1000_media_type_internal_serdes:
+	case e1000_media_type_fiber:
+		rtdev_info(adapter->netdev,
+			"MAS: changing media to copper\n");
+		ctrl_ext &=
+			~E1000_CTRL_EXT_LINK_MODE_PCIE_SERDES;
+		adapter->flags |= IGB_FLAG_MEDIA_RESET;
+		break;
+	default:
+		/* shouldn't get here during regular operation */
+		rtdev_err(adapter->netdev,
+			"AMS: Invalid media type found, returning\n");
+		break;
+	}
+	wr32(E1000_CTRL_EXT, ctrl_ext);
+}
+
+/**
+ *  igb_up - Open the interface and prepare it to handle traffic
+ *  @adapter: board private structure
+ **/
+int igb_up(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* hardware has been reset, we need to reload some things */
+	igb_configure(adapter);
+
+	clear_bit(__IGB_DOWN, &adapter->state);
+
+	if (adapter->flags & IGB_FLAG_HAS_MSIX)
+		igb_configure_msix(adapter);
+	else
+		igb_assign_vector(adapter->q_vector[0], 0);
+
+	/* Clear any pending interrupts. */
+	rd32(E1000_ICR);
+	igb_irq_enable(adapter);
+
+	rtnetif_start_queue(adapter->netdev);
+
+	/* start the watchdog. */
+	hw->mac.get_link_status = 1;
+	schedule_work(&adapter->watchdog_task);
+
+	if ((adapter->flags & IGB_FLAG_EEE) &&
+	    (!hw->dev_spec._82575.eee_disable))
+		adapter->eee_advert = MDIO_EEE_100TX | MDIO_EEE_1000T;
+
+	return 0;
+}
+
+void igb_down(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tctl, rctl;
+
+	/* signal that we're down so the interrupt handler does not
+	 * reschedule our watchdog timer
+	 */
+	set_bit(__IGB_DOWN, &adapter->state);
+
+	/* disable receives in the hardware */
+	rctl = rd32(E1000_RCTL);
+	wr32(E1000_RCTL, rctl & ~E1000_RCTL_EN);
+	/* flush and sleep below */
+
+	rtnetif_stop_queue(netdev);
+
+	/* disable transmits in the hardware */
+	tctl = rd32(E1000_TCTL);
+	tctl &= ~E1000_TCTL_EN;
+	wr32(E1000_TCTL, tctl);
+	/* flush both disables and wait for them to finish */
+	wrfl();
+	usleep_range(10000, 11000);
+
+	igb_irq_disable(adapter);
+
+	adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	/* record the stats before reset*/
+	spin_lock(&adapter->stats64_lock);
+	igb_update_stats(adapter);
+	spin_unlock(&adapter->stats64_lock);
+
+	rtnetif_carrier_off(netdev);
+	adapter->link_speed = 0;
+	adapter->link_duplex = 0;
+
+	if (!pci_channel_offline(adapter->pdev))
+		igb_reset(adapter);
+	igb_clean_all_tx_rings(adapter);
+	igb_clean_all_rx_rings(adapter);
+}
+
+void igb_reinit_locked(struct igb_adapter *adapter)
+{
+	WARN_ON(in_interrupt());
+	while (test_and_set_bit(__IGB_RESETTING, &adapter->state))
+		usleep_range(1000, 2000);
+	igb_down(adapter);
+	igb_up(adapter);
+	clear_bit(__IGB_RESETTING, &adapter->state);
+}
+
+/** igb_enable_mas - Media Autosense re-enable after swap
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_enable_mas(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 connsw = rd32(E1000_CONNSW);
+
+	/* configure for SerDes media detect */
+	if ((hw->phy.media_type == e1000_media_type_copper) &&
+	    (!(connsw & E1000_CONNSW_SERDESD))) {
+		connsw |= E1000_CONNSW_ENRGSRC;
+		connsw |= E1000_CONNSW_AUTOSENSE_EN;
+		wr32(E1000_CONNSW, connsw);
+		wrfl();
+	}
+}
+
+void igb_reset(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_mac_info *mac = &hw->mac;
+	struct e1000_fc_info *fc = &hw->fc;
+	u32 pba = 0, tx_space, min_tx_space, min_rx_space, hwm;
+
+	/* Repartition Pba for greater than 9k mtu
+	 * To take effect CTRL.RST is required.
+	 */
+	switch (mac->type) {
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_82580:
+		pba = rd32(E1000_RXPBS);
+		pba = igb_rxpbs_adjust_82580(pba);
+		break;
+	case e1000_82576:
+		pba = rd32(E1000_RXPBS);
+		pba &= E1000_RXPBS_SIZE_MASK_82576;
+		break;
+	case e1000_82575:
+	case e1000_i210:
+	case e1000_i211:
+	default:
+		pba = E1000_PBA_34K;
+		break;
+	}
+
+	if ((adapter->max_frame_size > ETH_FRAME_LEN + ETH_FCS_LEN) &&
+	    (mac->type < e1000_82576)) {
+		/* adjust PBA for jumbo frames */
+		wr32(E1000_PBA, pba);
+
+		/* To maintain wire speed transmits, the Tx FIFO should be
+		 * large enough to accommodate two full transmit packets,
+		 * rounded up to the next 1KB and expressed in KB.  Likewise,
+		 * the Rx FIFO should be large enough to accommodate at least
+		 * one full receive packet and is similarly rounded up and
+		 * expressed in KB.
+		 */
+		pba = rd32(E1000_PBA);
+		/* upper 16 bits has Tx packet buffer allocation size in KB */
+		tx_space = pba >> 16;
+		/* lower 16 bits has Rx packet buffer allocation size in KB */
+		pba &= 0xffff;
+		/* the Tx fifo also stores 16 bytes of information about the Tx
+		 * but don't include ethernet FCS because hardware appends it
+		 */
+		min_tx_space = (adapter->max_frame_size +
+				sizeof(union e1000_adv_tx_desc) -
+				ETH_FCS_LEN) * 2;
+		min_tx_space = ALIGN(min_tx_space, 1024);
+		min_tx_space >>= 10;
+		/* software strips receive CRC, so leave room for it */
+		min_rx_space = adapter->max_frame_size;
+		min_rx_space = ALIGN(min_rx_space, 1024);
+		min_rx_space >>= 10;
+
+		/* If current Tx allocation is less than the min Tx FIFO size,
+		 * and the min Tx FIFO size is less than the current Rx FIFO
+		 * allocation, take space away from current Rx allocation
+		 */
+		if (tx_space < min_tx_space &&
+		    ((min_tx_space - tx_space) < pba)) {
+			pba = pba - (min_tx_space - tx_space);
+
+			/* if short on Rx space, Rx wins and must trump Tx
+			 * adjustment
+			 */
+			if (pba < min_rx_space)
+				pba = min_rx_space;
+		}
+		wr32(E1000_PBA, pba);
+	}
+
+	/* flow control settings */
+	/* The high water mark must be low enough to fit one full frame
+	 * (or the size used for early receive) above it in the Rx FIFO.
+	 * Set it to the lower of:
+	 * - 90% of the Rx FIFO size, or
+	 * - the full Rx FIFO size minus one full frame
+	 */
+	hwm = min(((pba << 10) * 9 / 10),
+			((pba << 10) - 2 * adapter->max_frame_size));
+
+	fc->high_water = hwm & 0xFFFFFFF0;	/* 16-byte granularity */
+	fc->low_water = fc->high_water - 16;
+	fc->pause_time = 0xFFFF;
+	fc->send_xon = 1;
+	fc->current_mode = fc->requested_mode;
+
+	/* Allow time for pending master requests to run */
+	hw->mac.ops.reset_hw(hw);
+	wr32(E1000_WUC, 0);
+
+	if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+		/* need to resetup here after media swap */
+		adapter->ei.get_invariants(hw);
+		adapter->flags &= ~IGB_FLAG_MEDIA_RESET;
+	}
+	if ((mac->type == e1000_82575) &&
+	    (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+		igb_enable_mas(adapter);
+	}
+	if (hw->mac.ops.init_hw(hw))
+		dev_err(&pdev->dev, "Hardware Error\n");
+
+	/* Flow control settings reset on hardware reset, so guarantee flow
+	 * control is off when forcing speed.
+	 */
+	if (!hw->mac.autoneg)
+		igb_force_mac_fc(hw);
+
+	igb_init_dmac(adapter, pba);
+#ifdef CONFIG_IGB_HWMON
+	/* Re-initialize the thermal sensor on i350 devices. */
+	if (!test_bit(__IGB_DOWN, &adapter->state)) {
+		if (mac->type == e1000_i350 && hw->bus.func == 0) {
+			/* If present, re-initialize the external thermal sensor
+			 * interface.
+			 */
+			if (adapter->ets)
+				mac->ops.init_thermal_sensor_thresh(hw);
+		}
+	}
+#endif
+	/* Re-establish EEE setting */
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		switch (mac->type) {
+		case e1000_i350:
+		case e1000_i210:
+		case e1000_i211:
+			igb_set_eee_i350(hw, true, true);
+			break;
+		case e1000_i354:
+			igb_set_eee_i354(hw, true, true);
+			break;
+		default:
+			break;
+		}
+	}
+	if (!rtnetif_running(adapter->netdev))
+		igb_power_down_link(adapter);
+
+	igb_update_mng_vlan(adapter);
+
+	/* Enable h/w to recognize an 802.1Q VLAN Ethernet packet */
+	wr32(E1000_VET, ETHERNET_IEEE_VLAN_TYPE);
+
+	igb_get_phy_info(hw);
+}
+
+
+/**
+ * igb_set_fw_version - Configure version string for ethtool
+ * @adapter: adapter struct
+ **/
+void igb_set_fw_version(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_fw_version fw;
+
+	igb_get_fw_version(hw, &fw);
+
+	switch (hw->mac.type) {
+	case e1000_i210:
+	case e1000_i211:
+		if (!(igb_get_flash_presence_i210(hw))) {
+			snprintf(adapter->fw_version,
+				 sizeof(adapter->fw_version),
+				 "%2d.%2d-%d",
+				 fw.invm_major, fw.invm_minor,
+				 fw.invm_img_type);
+			break;
+		}
+		fallthrough;
+	default:
+		/* if option is rom valid, display its version too */
+		if (fw.or_valid) {
+			snprintf(adapter->fw_version,
+				 sizeof(adapter->fw_version),
+				 "%d.%d, 0x%08x, %d.%d.%d",
+				 fw.eep_major, fw.eep_minor, fw.etrack_id,
+				 fw.or_major, fw.or_build, fw.or_patch);
+		/* no option rom */
+		} else if (fw.etrack_id != 0X0000) {
+			snprintf(adapter->fw_version,
+			    sizeof(adapter->fw_version),
+			    "%d.%d, 0x%08x",
+			    fw.eep_major, fw.eep_minor, fw.etrack_id);
+		} else {
+		snprintf(adapter->fw_version,
+		    sizeof(adapter->fw_version),
+		    "%d.%d.%d",
+		    fw.eep_major, fw.eep_minor, fw.eep_build);
+		}
+		break;
+	}
+}
+
+/**
+ * igb_init_mas - init Media Autosense feature if enabled in the NVM
+ *
+ * @adapter: adapter struct
+ **/
+static void igb_init_mas(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u16 eeprom_data;
+
+	hw->nvm.ops.read(hw, NVM_COMPAT, 1, &eeprom_data);
+	switch (hw->bus.func) {
+	case E1000_FUNC_0:
+		if (eeprom_data & IGB_MAS_ENABLE_0) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			rtdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	case E1000_FUNC_1:
+		if (eeprom_data & IGB_MAS_ENABLE_1) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			rtdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	case E1000_FUNC_2:
+		if (eeprom_data & IGB_MAS_ENABLE_2) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			rtdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	case E1000_FUNC_3:
+		if (eeprom_data & IGB_MAS_ENABLE_3) {
+			adapter->flags |= IGB_FLAG_MAS_ENABLE;
+			rtdev_info(adapter->netdev,
+				"MAS: Enabling Media Autosense for port %d\n",
+				hw->bus.func);
+		}
+		break;
+	default:
+		/* Shouldn't get here */
+		rtdev_err(adapter->netdev,
+			"MAS: Invalid port configuration, returning\n");
+		break;
+	}
+}
+
+static dma_addr_t igb_map_rtskb(struct rtnet_device *netdev,
+				struct rtskb *skb)
+{
+	struct igb_adapter *adapter = netdev->priv;
+	struct device *dev = &adapter->pdev->dev;
+	dma_addr_t addr;
+
+	addr = dma_map_single(dev, skb->buf_start, RTSKB_SIZE,
+			      DMA_BIDIRECTIONAL);
+	if (dma_mapping_error(dev, addr)) {
+		dev_err(dev, "DMA map failed\n");
+		return RTSKB_UNMAPPED;
+	}
+	return addr;
+}
+
+static void igb_unmap_rtskb(struct rtnet_device *netdev,
+			      struct rtskb *skb)
+{
+	struct igb_adapter *adapter = netdev->priv;
+	struct device *dev = &adapter->pdev->dev;
+
+	dma_unmap_single(dev, skb->buf_dma_addr, RTSKB_SIZE,
+			 DMA_BIDIRECTIONAL);
+}
+
+/**
+ *  igb_probe - Device Initialization Routine
+ *  @pdev: PCI device information struct
+ *  @ent: entry in igb_pci_tbl
+ *
+ *  Returns 0 on success, negative on failure
+ *
+ *  igb_probe initializes an adapter identified by a pci_dev structure.
+ *  The OS initialization, configuring of the adapter private structure,
+ *  and a hardware reset occur.
+ **/
+static int igb_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct rtnet_device *netdev;
+	struct igb_adapter *adapter;
+	struct e1000_hw *hw;
+	u16 eeprom_data = 0;
+	s32 ret_val;
+	static int global_quad_port_a; /* global quad port a indication */
+	const struct e1000_info *ei = igb_info_tbl[ent->driver_data];
+	int err, pci_using_dac;
+	u8 part_str[E1000_PBANUM_LENGTH];
+
+	/* Catch broken hardware that put the wrong VF device ID in
+	 * the PCIe SR-IOV capability.
+	 */
+	if (pdev->is_virtfn) {
+		WARN(1, KERN_ERR "%s (%hx:%hx) should not be a VF!\n",
+			pci_name(pdev), pdev->vendor, pdev->device);
+		return -EINVAL;
+	}
+
+	err = pci_enable_device_mem(pdev);
+	if (err)
+		return err;
+
+	pci_using_dac = 0;
+	err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64));
+	if (!err) {
+		pci_using_dac = 1;
+	} else {
+		err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
+		if (err) {
+			dev_err(&pdev->dev,
+				"No usable DMA configuration, aborting\n");
+			goto err_dma;
+		}
+	}
+
+	err = pci_request_selected_regions(pdev, pci_select_bars(pdev,
+					   IORESOURCE_MEM),
+					   igb_driver_name);
+	if (err)
+		goto err_pci_reg;
+
+	pci_enable_pcie_error_reporting(pdev);
+
+	pci_set_master(pdev);
+	pci_save_state(pdev);
+
+	err = -ENOMEM;
+	netdev = rt_alloc_etherdev(sizeof(*adapter),
+				2 * IGB_DEFAULT_RXD + IGB_DEFAULT_TXD);
+	if (!netdev)
+		goto err_alloc_etherdev;
+
+	rtdev_alloc_name(netdev, "rteth%d");
+	rt_rtdev_connect(netdev, &RTDEV_manager);
+
+	netdev->vers = RTDEV_VERS_2_0;
+	netdev->sysbind = &pdev->dev;
+
+	pci_set_drvdata(pdev, netdev);
+	adapter = rtnetdev_priv(netdev);
+	adapter->netdev = netdev;
+	adapter->pdev = pdev;
+	hw = &adapter->hw;
+	hw->back = adapter;
+
+	err = -EIO;
+	hw->hw_addr = pci_iomap(pdev, 0, 0);
+	if (!hw->hw_addr)
+		goto err_ioremap;
+
+	netdev->open = igb_open;
+	netdev->stop = igb_close;
+	netdev->hard_start_xmit = igb_xmit_frame;
+	netdev->get_stats = igb_get_stats;
+	netdev->map_rtskb = igb_map_rtskb;
+	netdev->unmap_rtskb = igb_unmap_rtskb;
+	netdev->do_ioctl = igb_ioctl;
+#if 0
+	netdev->set_multicast_list = igb_set_multi;
+	netdev->set_mac_address = igb_set_mac;
+	netdev->change_mtu = igb_change_mtu;
+
+	// No ethtool support for now
+	igb_set_ethtool_ops(netdev);
+	netdev->watchdog_timeo = 5 * HZ;
+#endif
+
+	strncpy(netdev->name, pci_name(pdev), sizeof(netdev->name) - 1);
+
+	netdev->mem_start = pci_resource_start(pdev, 0);
+	netdev->mem_end = pci_resource_end(pdev, 0);
+
+	/* PCI config space info */
+	hw->vendor_id = pdev->vendor;
+	hw->device_id = pdev->device;
+	hw->revision_id = pdev->revision;
+	hw->subsystem_vendor_id = pdev->subsystem_vendor;
+	hw->subsystem_device_id = pdev->subsystem_device;
+
+	/* Copy the default MAC, PHY and NVM function pointers */
+	memcpy(&hw->mac.ops, ei->mac_ops, sizeof(hw->mac.ops));
+	memcpy(&hw->phy.ops, ei->phy_ops, sizeof(hw->phy.ops));
+	memcpy(&hw->nvm.ops, ei->nvm_ops, sizeof(hw->nvm.ops));
+	/* Initialize skew-specific constants */
+	err = ei->get_invariants(hw);
+	if (err)
+		goto err_sw_init;
+
+	/* setup the private structure */
+	err = igb_sw_init(adapter);
+	if (err)
+		goto err_sw_init;
+
+	igb_get_bus_info_pcie(hw);
+
+	hw->phy.autoneg_wait_to_complete = false;
+
+	/* Copper options */
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		hw->phy.mdix = AUTO_ALL_MODES;
+		hw->phy.disable_polarity_correction = false;
+		hw->phy.ms_type = e1000_ms_hw_default;
+	}
+
+	if (igb_check_reset_block(hw))
+		dev_info(&pdev->dev,
+			"PHY reset is blocked due to SOL/IDER session.\n");
+
+	/* features is initialized to 0 in allocation, it might have bits
+	 * set by igb_sw_init so we should use an or instead of an
+	 * assignment.
+	 */
+	netdev->features |= NETIF_F_SG |
+			    NETIF_F_IP_CSUM |
+			    NETIF_F_IPV6_CSUM |
+			    NETIF_F_TSO |
+			    NETIF_F_TSO6 |
+			    NETIF_F_RXHASH |
+			    NETIF_F_RXCSUM |
+			    NETIF_F_HW_VLAN_CTAG_RX |
+			    NETIF_F_HW_VLAN_CTAG_TX;
+
+#if 0
+	/* set this bit last since it cannot be part of hw_features */
+	netdev->features |= NETIF_F_HW_VLAN_CTAG_FILTER;
+#endif
+
+	netdev->priv_flags |= IFF_SUPP_NOFCS;
+
+	if (pci_using_dac)
+		netdev->features |= NETIF_F_HIGHDMA;
+
+	netdev->priv_flags |= IFF_UNICAST_FLT;
+
+	adapter->en_mng_pt = igb_enable_mng_pass_thru(hw);
+
+	/* before reading the NVM, reset the controller to put the device in a
+	 * known good starting state
+	 */
+	hw->mac.ops.reset_hw(hw);
+
+	/* make sure the NVM is good , i211/i210 parts can have special NVM
+	 * that doesn't contain a checksum
+	 */
+	switch (hw->mac.type) {
+	case e1000_i210:
+	case e1000_i211:
+		if (igb_get_flash_presence_i210(hw)) {
+			if (hw->nvm.ops.validate(hw) < 0) {
+				dev_err(&pdev->dev,
+					"The NVM Checksum Is Not Valid\n");
+				err = -EIO;
+				goto err_eeprom;
+			}
+		}
+		break;
+	default:
+		if (hw->nvm.ops.validate(hw) < 0) {
+			dev_err(&pdev->dev, "The NVM Checksum Is Not Valid\n");
+			err = -EIO;
+			goto err_eeprom;
+		}
+		break;
+	}
+
+	/* copy the MAC address out of the NVM */
+	if (hw->mac.ops.read_mac_addr(hw))
+		dev_err(&pdev->dev, "NVM Read Error\n");
+
+	memcpy(netdev->dev_addr, hw->mac.addr, netdev->addr_len);
+
+	if (!is_valid_ether_addr(netdev->dev_addr)) {
+		dev_err(&pdev->dev, "Invalid MAC Address\n");
+		err = -EIO;
+		goto err_eeprom;
+	}
+
+	/* get firmware version for ethtool -i */
+	igb_set_fw_version(adapter);
+
+	/* configure RXPBSIZE and TXPBSIZE */
+	if (hw->mac.type == e1000_i210) {
+		wr32(E1000_RXPBS, I210_RXPBSIZE_DEFAULT);
+		wr32(E1000_TXPBS, I210_TXPBSIZE_DEFAULT);
+	}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+	timer_setup(&adapter->watchdog_timer, igb_watchdog, 0);
+	timer_setup(&adapter->phy_info_timer, igb_update_phy_info, 0);
+#else /* < 4.14 */
+	setup_timer(&adapter->watchdog_timer, igb_watchdog,
+		    (unsigned long) adapter);
+	setup_timer(&adapter->phy_info_timer, igb_update_phy_info,
+		    (unsigned long) adapter);
+#endif /* < 4.14 */
+
+	INIT_WORK(&adapter->reset_task, igb_reset_task);
+	INIT_WORK(&adapter->watchdog_task, igb_watchdog_task);
+	rtdm_nrtsig_init(&adapter->watchdog_nrtsig,
+			igb_nrtsig_watchdog, adapter);
+
+	/* Initialize link properties that are user-changeable */
+	adapter->fc_autoneg = true;
+	hw->mac.autoneg = true;
+	hw->phy.autoneg_advertised = 0x2f;
+
+	hw->fc.requested_mode = e1000_fc_default;
+	hw->fc.current_mode = e1000_fc_default;
+
+	igb_validate_mdi_setting(hw);
+
+	/* By default, support wake on port A */
+	if (hw->bus.func == 0)
+		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+	/* Check the NVM for wake support on non-port A ports */
+	if (hw->mac.type >= e1000_82580)
+		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_A +
+				 NVM_82580_LAN_FUNC_OFFSET(hw->bus.func), 1,
+				 &eeprom_data);
+	else if (hw->bus.func == 1)
+		hw->nvm.ops.read(hw, NVM_INIT_CONTROL3_PORT_B, 1, &eeprom_data);
+
+	if (eeprom_data & IGB_EEPROM_APME)
+		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+
+	/* now that we have the eeprom settings, apply the special cases where
+	 * the eeprom may be wrong or the board simply won't support wake on
+	 * lan on a particular port
+	 */
+	switch (pdev->device) {
+	case E1000_DEV_ID_82575GB_QUAD_COPPER:
+		adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+		break;
+	case E1000_DEV_ID_82575EB_FIBER_SERDES:
+	case E1000_DEV_ID_82576_FIBER:
+	case E1000_DEV_ID_82576_SERDES:
+		/* Wake events only supported on port A for dual fiber
+		 * regardless of eeprom setting
+		 */
+		if (rd32(E1000_STATUS) & E1000_STATUS_FUNC_1)
+			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+		break;
+	case E1000_DEV_ID_82576_QUAD_COPPER:
+	case E1000_DEV_ID_82576_QUAD_COPPER_ET2:
+		/* if quad port adapter, disable WoL on all but port A */
+		if (global_quad_port_a != 0)
+			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+		else
+			adapter->flags |= IGB_FLAG_QUAD_PORT_A;
+		/* Reset for multiple quad port adapters */
+		if (++global_quad_port_a == 4)
+			global_quad_port_a = 0;
+		break;
+	default:
+		/* If the device can't wake, don't set software support */
+		if (!device_can_wakeup(&adapter->pdev->dev))
+			adapter->flags &= ~IGB_FLAG_WOL_SUPPORTED;
+	}
+
+	/* initialize the wol settings based on the eeprom settings */
+	if (adapter->flags & IGB_FLAG_WOL_SUPPORTED)
+		adapter->wol |= E1000_WUFC_MAG;
+
+	/* Some vendors want WoL disabled by default, but still supported */
+	if ((hw->mac.type == e1000_i350) &&
+	    (pdev->subsystem_vendor == PCI_VENDOR_ID_HP)) {
+		adapter->flags |= IGB_FLAG_WOL_SUPPORTED;
+		adapter->wol = 0;
+	}
+
+	device_set_wakeup_enable(&adapter->pdev->dev,
+				 adapter->flags & IGB_FLAG_WOL_SUPPORTED);
+
+	/* reset the hardware with the new settings */
+	igb_reset(adapter);
+
+	/* let the f/w know that the h/w is now under the control of the
+	 * driver.
+	 */
+	igb_get_hw_control(adapter);
+
+	strcpy(netdev->name, "rteth%d");
+	err = rt_register_rtnetdev(netdev);
+	if (err)
+		goto err_release_hw_control;
+
+	/* carrier off reporting is important to ethtool even BEFORE open */
+	rtnetif_carrier_off(netdev);
+
+#ifdef CONFIG_IGB_HWMON
+	/* Initialize the thermal sensor on i350 devices. */
+	if (hw->mac.type == e1000_i350 && hw->bus.func == 0) {
+		u16 ets_word;
+
+		/* Read the NVM to determine if this i350 device supports an
+		 * external thermal sensor.
+		 */
+		hw->nvm.ops.read(hw, NVM_ETS_CFG, 1, &ets_word);
+		if (ets_word != 0x0000 && ets_word != 0xFFFF)
+			adapter->ets = true;
+		else
+			adapter->ets = false;
+		if (igb_sysfs_init(adapter))
+			dev_err(&pdev->dev,
+				"failed to allocate sysfs resources\n");
+	} else {
+		adapter->ets = false;
+	}
+#endif
+	/* Check if Media Autosense is enabled */
+	adapter->ei = *ei;
+	if (hw->dev_spec._82575.mas_capable)
+		igb_init_mas(adapter);
+
+	dev_info(&pdev->dev, "Intel(R) Gigabit Ethernet Network Connection\n");
+	/* print bus type/speed/width info, not applicable to i354 */
+	if (hw->mac.type != e1000_i354) {
+		dev_info(&pdev->dev, "%s: (PCIe:%s:%s) %pM\n",
+			 netdev->name,
+			 ((hw->bus.speed == e1000_bus_speed_2500) ? "2.5Gb/s" :
+			  (hw->bus.speed == e1000_bus_speed_5000) ? "5.0Gb/s" :
+			   "unknown"),
+			 ((hw->bus.width == e1000_bus_width_pcie_x4) ?
+			  "Width x4" :
+			  (hw->bus.width == e1000_bus_width_pcie_x2) ?
+			  "Width x2" :
+			  (hw->bus.width == e1000_bus_width_pcie_x1) ?
+			  "Width x1" : "unknown"), netdev->dev_addr);
+	}
+
+	if ((hw->mac.type >= e1000_i210 ||
+	     igb_get_flash_presence_i210(hw))) {
+		ret_val = igb_read_part_string(hw, part_str,
+					       E1000_PBANUM_LENGTH);
+	} else {
+		ret_val = -E1000_ERR_INVM_VALUE_NOT_FOUND;
+	}
+
+	if (ret_val)
+		strcpy(part_str, "Unknown");
+	dev_info(&pdev->dev, "%s: PBA No: %s\n", netdev->name, part_str);
+	dev_info(&pdev->dev,
+		"Using %s interrupts. %d rx queue(s), %d tx queue(s)\n",
+		(adapter->flags & IGB_FLAG_HAS_MSIX) ? "MSI-X" :
+		(adapter->flags & IGB_FLAG_HAS_MSI) ? "MSI" : "legacy",
+		adapter->num_rx_queues, adapter->num_tx_queues);
+	if (hw->phy.media_type == e1000_media_type_copper) {
+		switch (hw->mac.type) {
+		case e1000_i350:
+		case e1000_i210:
+		case e1000_i211:
+			/* Enable EEE for internal copper PHY devices */
+			err = igb_set_eee_i350(hw, true, true);
+			if ((!err) &&
+			    (!hw->dev_spec._82575.eee_disable)) {
+				adapter->eee_advert =
+					MDIO_EEE_100TX | MDIO_EEE_1000T;
+				adapter->flags |= IGB_FLAG_EEE;
+			}
+			break;
+		case e1000_i354:
+			if ((rd32(E1000_CTRL_EXT) &
+			    E1000_CTRL_EXT_LINK_MODE_SGMII)) {
+				err = igb_set_eee_i354(hw, true, true);
+				if ((!err) &&
+					(!hw->dev_spec._82575.eee_disable)) {
+					adapter->eee_advert =
+					   MDIO_EEE_100TX | MDIO_EEE_1000T;
+					adapter->flags |= IGB_FLAG_EEE;
+				}
+			}
+			break;
+		default:
+			break;
+		}
+	}
+	pm_runtime_put_noidle(&pdev->dev);
+	return 0;
+
+err_release_hw_control:
+	igb_release_hw_control(adapter);
+	memset(&adapter->i2c_adap, 0, sizeof(adapter->i2c_adap));
+err_eeprom:
+	if (!igb_check_reset_block(hw))
+		igb_reset_phy(hw);
+
+	if (hw->flash_address)
+		iounmap(hw->flash_address);
+err_sw_init:
+	igb_clear_interrupt_scheme(adapter);
+	pci_iounmap(pdev, hw->hw_addr);
+err_ioremap:
+	rtdev_free(netdev);
+err_alloc_etherdev:
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+err_pci_reg:
+err_dma:
+	pci_disable_device(pdev);
+	return err;
+}
+
+/**
+ *  igb_remove_i2c - Cleanup  I2C interface
+ *  @adapter: pointer to adapter structure
+ **/
+static void igb_remove_i2c(struct igb_adapter *adapter)
+{
+	/* free the adapter bus structure */
+	i2c_del_adapter(&adapter->i2c_adap);
+}
+
+/**
+ *  igb_remove - Device Removal Routine
+ *  @pdev: PCI device information struct
+ *
+ *  igb_remove is called by the PCI subsystem to alert the driver
+ *  that it should release a PCI device.  The could be caused by a
+ *  Hot-Plug event, or because the driver is going to be removed from
+ *  memory.
+ **/
+static void igb_remove(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	rtdev_down(netdev);
+	igb_down(adapter);
+
+	pm_runtime_get_noresume(&pdev->dev);
+#ifdef CONFIG_IGB_HWMON
+	igb_sysfs_exit(adapter);
+#endif
+	igb_remove_i2c(adapter);
+	/* The watchdog timer may be rescheduled, so explicitly
+	 * disable watchdog from being rescheduled.
+	 */
+	del_timer_sync(&adapter->watchdog_timer);
+	del_timer_sync(&adapter->phy_info_timer);
+
+	cancel_work_sync(&adapter->reset_task);
+	cancel_work_sync(&adapter->watchdog_task);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
+	igb_release_hw_control(adapter);
+
+	rt_rtdev_disconnect(netdev);
+	rt_unregister_rtnetdev(netdev);
+
+	igb_clear_interrupt_scheme(adapter);
+
+	pci_iounmap(pdev, hw->hw_addr);
+	if (hw->flash_address)
+		iounmap(hw->flash_address);
+	pci_release_selected_regions(pdev,
+				     pci_select_bars(pdev, IORESOURCE_MEM));
+
+	kfree(adapter->shadow_vfta);
+	rtdev_free(netdev);
+
+	pci_disable_pcie_error_reporting(pdev);
+
+	pci_disable_device(pdev);
+}
+
+/**
+ *  igb_probe_vfs - Initialize vf data storage and add VFs to pci config space
+ *  @adapter: board private structure to initialize
+ *
+ *  This function initializes the vf specific data storage and then attempts to
+ *  allocate the VFs.  The reason for ordering it this way is because it is much
+ *  mor expensive time wise to disable SR-IOV than it is to allocate and free
+ *  the memory for the VFs.
+ **/
+static void igb_probe_vfs(struct igb_adapter *adapter)
+{
+}
+
+static void igb_init_queue_configuration(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 max_rss_queues;
+
+	max_rss_queues = 1;
+	adapter->rss_queues = max_rss_queues;
+
+	/* Determine if we need to pair queues. */
+	switch (hw->mac.type) {
+	case e1000_82575:
+	case e1000_i211:
+		/* Device supports enough interrupts without queue pairing. */
+		break;
+	case e1000_82576:
+		/* If VFs are going to be allocated with RSS queues then we
+		 * should pair the queues in order to conserve interrupts due
+		 * to limited supply.
+		 */
+		fallthrough;
+	case e1000_82580:
+	case e1000_i350:
+	case e1000_i354:
+	case e1000_i210:
+	default:
+		/* If rss_queues > half of max_rss_queues, pair the queues in
+		 * order to conserve interrupts due to limited supply.
+		 */
+		if (adapter->rss_queues > (max_rss_queues / 2))
+			adapter->flags |= IGB_FLAG_QUEUE_PAIRS;
+		break;
+	}
+}
+
+/**
+ *  igb_sw_init - Initialize general software structures (struct igb_adapter)
+ *  @adapter: board private structure to initialize
+ *
+ *  igb_sw_init initializes the Adapter private data structure.
+ *  Fields are initialized based on PCI device information and
+ *  OS network device settings (MTU size).
+ **/
+static int igb_sw_init(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &hw->bus.pci_cmd_word);
+
+	/* set default ring sizes */
+	adapter->tx_ring_count = IGB_DEFAULT_TXD;
+	adapter->rx_ring_count = IGB_DEFAULT_RXD;
+
+	/* set default ITR values */
+	if (InterruptThrottle) {
+		adapter->rx_itr_setting = IGB_DEFAULT_ITR;
+		adapter->tx_itr_setting = IGB_DEFAULT_ITR;
+	} else {
+		adapter->rx_itr_setting = IGB_MIN_ITR_USECS;
+		adapter->tx_itr_setting = IGB_MIN_ITR_USECS;
+	}
+
+	/* set default work limits */
+	adapter->tx_work_limit = IGB_DEFAULT_TX_WORK;
+
+	adapter->max_frame_size = netdev->mtu + ETH_HLEN + ETH_FCS_LEN +
+				  VLAN_HLEN;
+	adapter->min_frame_size = ETH_ZLEN + ETH_FCS_LEN;
+
+	spin_lock_init(&adapter->stats64_lock);
+
+	igb_init_queue_configuration(adapter);
+
+	/* Setup and initialize a copy of the hw vlan table array */
+	adapter->shadow_vfta = kcalloc(E1000_VLAN_FILTER_TBL_SIZE, sizeof(u32),
+				       GFP_ATOMIC);
+
+	/* This call may decrease the number of queues */
+	if (igb_init_interrupt_scheme(adapter, true)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	igb_probe_vfs(adapter);
+
+	/* Explicitly disable IRQ since the NIC can be in any state. */
+	igb_irq_disable(adapter);
+
+	if (hw->mac.type >= e1000_i350)
+		adapter->flags &= ~IGB_FLAG_DMAC;
+
+	set_bit(__IGB_DOWN, &adapter->state);
+	return 0;
+}
+
+/**
+ *  igb_open - Called when a network interface is made active
+ *  @netdev: network interface device structure
+ *
+ *  Returns 0 on success, negative value on failure
+ *
+ *  The open entry point is called when a network interface is made
+ *  active by the system (IFF_UP).  At this point all resources needed
+ *  for transmit and receive operations are allocated, the interrupt
+ *  handler is registered with the OS, the watchdog timer is started,
+ *  and the stack is notified that the interface is ready.
+ **/
+static int __igb_open(struct rtnet_device *netdev, bool resuming)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	int err;
+
+	/* disallow open during test */
+	if (test_bit(__IGB_TESTING, &adapter->state)) {
+		WARN_ON(resuming);
+		return -EBUSY;
+	}
+
+	if (!resuming)
+		pm_runtime_get_sync(&pdev->dev);
+
+	rtnetif_carrier_off(netdev);
+
+	/* allocate transmit descriptors */
+	err = igb_setup_all_tx_resources(adapter);
+	if (err)
+		goto err_setup_tx;
+
+	/* allocate receive descriptors */
+	err = igb_setup_all_rx_resources(adapter);
+	if (err)
+		goto err_setup_rx;
+
+	igb_power_up_link(adapter);
+
+	/* before we allocate an interrupt, we must be ready to handle it.
+	 * Setting DEBUG_SHIRQ in the kernel makes it fire an interrupt
+	 * as soon as we call pci_request_irq, so we have to setup our
+	 * clean_rx handler before we do so.
+	 */
+	igb_configure(adapter);
+
+	err = igb_request_irq(adapter);
+	if (err)
+		goto err_req_irq;
+
+	/* From here on the code is the same as igb_up() */
+	clear_bit(__IGB_DOWN, &adapter->state);
+
+	/* Clear any pending interrupts. */
+	rd32(E1000_ICR);
+
+	igb_irq_enable(adapter);
+
+	rtnetif_start_queue(netdev);
+
+	if (!resuming)
+		pm_runtime_put(&pdev->dev);
+
+	/* start the watchdog. */
+	hw->mac.get_link_status = 1;
+	schedule_work(&adapter->watchdog_task);
+
+	return 0;
+
+err_req_irq:
+	igb_release_hw_control(adapter);
+	igb_power_down_link(adapter);
+	igb_free_all_rx_resources(adapter);
+err_setup_rx:
+	igb_free_all_tx_resources(adapter);
+err_setup_tx:
+	igb_reset(adapter);
+	if (!resuming)
+		pm_runtime_put(&pdev->dev);
+
+	return err;
+}
+
+static int igb_open(struct rtnet_device *netdev)
+{
+	return __igb_open(netdev, false);
+}
+
+/**
+ *  igb_close - Disables a network interface
+ *  @netdev: network interface device structure
+ *
+ *  Returns 0, this is not allowed to fail
+ *
+ *  The close entry point is called when an interface is de-activated
+ *  by the OS.  The hardware is still under the driver's control, but
+ *  needs to be disabled.  A global MAC reset is issued to stop the
+ *  hardware, and all transmit and receive resources are freed.
+ **/
+static int __igb_close(struct rtnet_device *netdev, bool suspending)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct pci_dev *pdev = adapter->pdev;
+
+	WARN_ON(test_bit(__IGB_RESETTING, &adapter->state));
+
+	if (!suspending)
+		pm_runtime_get_sync(&pdev->dev);
+
+	igb_down(adapter);
+	igb_free_irq(adapter);
+
+	rt_stack_disconnect(netdev);
+
+	igb_free_all_tx_resources(adapter);
+	igb_free_all_rx_resources(adapter);
+
+	if (!suspending)
+		pm_runtime_put_sync(&pdev->dev);
+	return 0;
+}
+
+static int igb_close(struct rtnet_device *netdev)
+{
+	return __igb_close(netdev, false);
+}
+
+/**
+ *  igb_setup_tx_resources - allocate Tx resources (Descriptors)
+ *  @tx_ring: tx descriptor ring (for a specific queue) to setup
+ *
+ *  Return 0 on success, negative on failure
+ **/
+int igb_setup_tx_resources(struct igb_ring *tx_ring)
+{
+	struct device *dev = tx_ring->dev;
+	int size;
+
+	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+
+	tx_ring->tx_buffer_info = vzalloc(size);
+	if (!tx_ring->tx_buffer_info)
+		goto err;
+
+	/* round up to nearest 4K */
+	tx_ring->size = tx_ring->count * sizeof(union e1000_adv_tx_desc);
+	tx_ring->size = ALIGN(tx_ring->size, 4096);
+
+	tx_ring->desc = dma_alloc_coherent(dev, tx_ring->size,
+					   &tx_ring->dma, GFP_KERNEL);
+	if (!tx_ring->desc)
+		goto err;
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+
+	return 0;
+
+err:
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Tx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ *  igb_setup_all_tx_resources - wrapper to allocate Tx resources
+ *				 (Descriptors) for all queues
+ *  @adapter: board private structure
+ *
+ *  Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_tx_resources(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		err = igb_setup_tx_resources(adapter->tx_ring[i]);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Allocation for Tx Queue %u failed\n", i);
+			for (i--; i >= 0; i--)
+				igb_free_tx_resources(adapter->tx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ *  igb_setup_tctl - configure the transmit control registers
+ *  @adapter: Board private structure
+ **/
+void igb_setup_tctl(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 tctl;
+
+	/* disable queue 0 which is enabled by default on 82575 and 82576 */
+	wr32(E1000_TXDCTL(0), 0);
+
+	/* Program the Transmit Control Register */
+	tctl = rd32(E1000_TCTL);
+	tctl &= ~E1000_TCTL_CT;
+	tctl |= E1000_TCTL_PSP | E1000_TCTL_RTLC |
+		(E1000_COLLISION_THRESHOLD << E1000_CT_SHIFT);
+
+	igb_config_collision_dist(hw);
+
+	/* Enable transmits */
+	tctl |= E1000_TCTL_EN;
+
+	wr32(E1000_TCTL, tctl);
+}
+
+/**
+ *  igb_configure_tx_ring - Configure transmit ring after Reset
+ *  @adapter: board private structure
+ *  @ring: tx ring to configure
+ *
+ *  Configure a transmit ring after a reset.
+ **/
+void igb_configure_tx_ring(struct igb_adapter *adapter,
+			   struct igb_ring *ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 txdctl = 0;
+	u64 tdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+
+	/* disable the queue */
+	wr32(E1000_TXDCTL(reg_idx), 0);
+	wrfl();
+	mdelay(10);
+
+	wr32(E1000_TDLEN(reg_idx),
+	     ring->count * sizeof(union e1000_adv_tx_desc));
+	wr32(E1000_TDBAL(reg_idx),
+	     tdba & 0x00000000ffffffffULL);
+	wr32(E1000_TDBAH(reg_idx), tdba >> 32);
+
+	ring->tail = hw->hw_addr + E1000_TDT(reg_idx);
+	wr32(E1000_TDH(reg_idx), 0);
+	writel(0, ring->tail);
+
+	txdctl |= IGB_TX_PTHRESH;
+	txdctl |= IGB_TX_HTHRESH << 8;
+	txdctl |= IGB_TX_WTHRESH << 16;
+
+	txdctl |= E1000_TXDCTL_QUEUE_ENABLE;
+	wr32(E1000_TXDCTL(reg_idx), txdctl);
+}
+
+/**
+ *  igb_configure_tx - Configure transmit Unit after Reset
+ *  @adapter: board private structure
+ *
+ *  Configure the Tx unit of the MAC after a reset.
+ **/
+static void igb_configure_tx(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		igb_configure_tx_ring(adapter, adapter->tx_ring[i]);
+}
+
+/**
+ *  igb_setup_rx_resources - allocate Rx resources (Descriptors)
+ *  @rx_ring: Rx descriptor ring (for a specific queue) to setup
+ *
+ *  Returns 0 on success, negative on failure
+ **/
+int igb_setup_rx_resources(struct igb_ring *rx_ring)
+{
+	struct device *dev = rx_ring->dev;
+	int size;
+
+	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+
+	rx_ring->rx_buffer_info = vzalloc(size);
+	if (!rx_ring->rx_buffer_info)
+		goto err;
+
+	/* Round up to nearest 4K */
+	rx_ring->size = rx_ring->count * sizeof(union e1000_adv_rx_desc);
+	rx_ring->size = ALIGN(rx_ring->size, 4096);
+
+	rx_ring->desc = dma_alloc_coherent(dev, rx_ring->size,
+					   &rx_ring->dma, GFP_KERNEL);
+	if (!rx_ring->desc)
+		goto err;
+
+	rx_ring->next_to_alloc = 0;
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+
+	return 0;
+
+err:
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+	dev_err(dev, "Unable to allocate memory for the Rx descriptor ring\n");
+	return -ENOMEM;
+}
+
+/**
+ *  igb_setup_all_rx_resources - wrapper to allocate Rx resources
+ *				 (Descriptors) for all queues
+ *  @adapter: board private structure
+ *
+ *  Return 0 on success, negative on failure
+ **/
+static int igb_setup_all_rx_resources(struct igb_adapter *adapter)
+{
+	struct pci_dev *pdev = adapter->pdev;
+	int i, err = 0;
+
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		err = igb_setup_rx_resources(adapter->rx_ring[i]);
+		if (err) {
+			dev_err(&pdev->dev,
+				"Allocation for Rx Queue %u failed\n", i);
+			for (i--; i >= 0; i--)
+				igb_free_rx_resources(adapter->rx_ring[i]);
+			break;
+		}
+	}
+
+	return err;
+}
+
+/**
+ *  igb_setup_mrqc - configure the multiple receive queue control registers
+ *  @adapter: Board private structure
+ **/
+static void igb_setup_mrqc(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 mrqc, rxcsum;
+	u32 j, num_rx_queues;
+	u32 rss_key[10];
+
+	get_random_bytes(rss_key, sizeof(rss_key));
+	for (j = 0; j < 10; j++)
+		wr32(E1000_RSSRK(j), rss_key[j]);
+
+	num_rx_queues = adapter->rss_queues;
+
+	switch (hw->mac.type) {
+	case e1000_82576:
+		/* 82576 supports 2 RSS queues for SR-IOV */
+		break;
+	default:
+		break;
+	}
+
+	if (adapter->rss_indir_tbl_init != num_rx_queues) {
+		for (j = 0; j < IGB_RETA_SIZE; j++)
+			adapter->rss_indir_tbl[j] =
+			(j * num_rx_queues) / IGB_RETA_SIZE;
+		adapter->rss_indir_tbl_init = num_rx_queues;
+	}
+
+	/* Disable raw packet checksumming so that RSS hash is placed in
+	 * descriptor on writeback.  No need to enable TCP/UDP/IP checksum
+	 * offloads as they are enabled by default
+	 */
+	rxcsum = rd32(E1000_RXCSUM);
+	rxcsum |= E1000_RXCSUM_PCSD;
+
+	if (adapter->hw.mac.type >= e1000_82576)
+		/* Enable Receive Checksum Offload for SCTP */
+		rxcsum |= E1000_RXCSUM_CRCOFL;
+
+	/* Don't need to set TUOFL or IPOFL, they default to 1 */
+	wr32(E1000_RXCSUM, rxcsum);
+
+	/* Generate RSS hash based on packet types, TCP/UDP
+	 * port numbers and/or IPv4/v6 src and dst addresses
+	 */
+	mrqc = E1000_MRQC_RSS_FIELD_IPV4 |
+	       E1000_MRQC_RSS_FIELD_IPV4_TCP |
+	       E1000_MRQC_RSS_FIELD_IPV6 |
+	       E1000_MRQC_RSS_FIELD_IPV6_TCP |
+	       E1000_MRQC_RSS_FIELD_IPV6_TCP_EX;
+
+	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV4_UDP)
+		mrqc |= E1000_MRQC_RSS_FIELD_IPV4_UDP;
+	if (adapter->flags & IGB_FLAG_RSS_FIELD_IPV6_UDP)
+		mrqc |= E1000_MRQC_RSS_FIELD_IPV6_UDP;
+
+	/* If VMDq is enabled then we set the appropriate mode for that, else
+	 * we default to RSS so that an RSS hash is calculated per packet even
+	 * if we are only using one queue
+	 */
+	if (hw->mac.type != e1000_i211)
+		mrqc |= E1000_MRQC_ENABLE_RSS_4Q;
+
+	wr32(E1000_MRQC, mrqc);
+}
+
+/**
+ *  igb_setup_rctl - configure the receive control registers
+ *  @adapter: Board private structure
+ **/
+void igb_setup_rctl(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 rctl;
+
+	rctl = rd32(E1000_RCTL);
+
+	rctl &= ~(3 << E1000_RCTL_MO_SHIFT);
+	rctl &= ~(E1000_RCTL_LBM_TCVR | E1000_RCTL_LBM_MAC);
+
+	rctl |= E1000_RCTL_EN | E1000_RCTL_BAM | E1000_RCTL_RDMTS_HALF |
+		(hw->mac.mc_filter_type << E1000_RCTL_MO_SHIFT);
+
+	/* enable stripping of CRC. It's unlikely this will break BMC
+	 * redirection as it did with e1000. Newer features require
+	 * that the HW strips the CRC.
+	 */
+	rctl |= E1000_RCTL_SECRC;
+
+	/* disable store bad packets and clear size bits. */
+	rctl &= ~(E1000_RCTL_SBP | E1000_RCTL_SZ_256);
+
+	/* enable LPE to prevent packets larger than max_frame_size */
+	rctl |= E1000_RCTL_LPE;
+
+	/* disable queue 0 to prevent tail write w/o re-config */
+	wr32(E1000_RXDCTL(0), 0);
+
+	/* This is useful for sniffing bad packets. */
+	if (adapter->netdev->features & NETIF_F_RXALL) {
+		/* UPE and MPE will be handled by normal PROMISC logic
+		 * in e1000e_set_rx_mode
+		 */
+		rctl |= (E1000_RCTL_SBP | /* Receive bad packets */
+			 E1000_RCTL_BAM | /* RX All Bcast Pkts */
+			 E1000_RCTL_PMCF); /* RX All MAC Ctrl Pkts */
+
+		rctl &= ~(E1000_RCTL_VFE | /* Disable VLAN filter */
+			  E1000_RCTL_DPF | /* Allow filtered pause */
+			  E1000_RCTL_CFIEN); /* Dis VLAN CFIEN Filter */
+		/* Do not mess with E1000_CTRL_VME, it affects transmit as well,
+		 * and that breaks VLANs.
+		 */
+	}
+
+	wr32(E1000_RCTL, rctl);
+}
+
+/**
+ *  igb_rlpml_set - set maximum receive packet size
+ *  @adapter: board private structure
+ *
+ *  Configure maximum receivable packet size.
+ **/
+static void igb_rlpml_set(struct igb_adapter *adapter)
+{
+	u32 max_frame_size = adapter->max_frame_size;
+	struct e1000_hw *hw = &adapter->hw;
+
+	wr32(E1000_RLPML, max_frame_size);
+}
+
+static inline void igb_set_vmolr(struct igb_adapter *adapter,
+				 int vfn, bool aupe)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 vmolr;
+
+	/* This register exists only on 82576 and newer so if we are older then
+	 * we should exit and do nothing
+	 */
+	if (hw->mac.type < e1000_82576)
+		return;
+
+	vmolr = rd32(E1000_VMOLR(vfn));
+	vmolr |= E1000_VMOLR_STRVLAN; /* Strip vlan tags */
+	if (hw->mac.type == e1000_i350) {
+		u32 dvmolr;
+
+		dvmolr = rd32(E1000_DVMOLR(vfn));
+		dvmolr |= E1000_DVMOLR_STRVLAN;
+		wr32(E1000_DVMOLR(vfn), dvmolr);
+	}
+	if (aupe)
+		vmolr |= E1000_VMOLR_AUPE; /* Accept untagged packets */
+	else
+		vmolr &= ~(E1000_VMOLR_AUPE); /* Tagged packets ONLY */
+
+	/* clear all bits that might not be set */
+	vmolr &= ~(E1000_VMOLR_BAM | E1000_VMOLR_RSSE);
+
+	if (adapter->rss_queues > 1)
+		vmolr |= E1000_VMOLR_RSSE; /* enable RSS */
+	/* for VMDq only allow the VFs and pool 0 to accept broadcast and
+	 * multicast packets
+	 */
+	vmolr |= E1000_VMOLR_BAM; /* Accept broadcast */
+
+	wr32(E1000_VMOLR(vfn), vmolr);
+}
+
+/**
+ *  igb_configure_rx_ring - Configure a receive ring after Reset
+ *  @adapter: board private structure
+ *  @ring: receive ring to be configured
+ *
+ *  Configure the Rx unit of the MAC after a reset.
+ **/
+void igb_configure_rx_ring(struct igb_adapter *adapter,
+			   struct igb_ring *ring)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u64 rdba = ring->dma;
+	int reg_idx = ring->reg_idx;
+	u32 srrctl = 0, rxdctl = 0;
+
+	ring->rx_buffer_len = max_t(u32, adapter->max_frame_size,
+				MAXIMUM_ETHERNET_VLAN_SIZE);
+
+	/* disable the queue */
+	wr32(E1000_RXDCTL(reg_idx), 0);
+
+	/* Set DMA base address registers */
+	wr32(E1000_RDBAL(reg_idx),
+	     rdba & 0x00000000ffffffffULL);
+	wr32(E1000_RDBAH(reg_idx), rdba >> 32);
+	wr32(E1000_RDLEN(reg_idx),
+	     ring->count * sizeof(union e1000_adv_rx_desc));
+
+	/* initialize head and tail */
+	ring->tail = hw->hw_addr + E1000_RDT(reg_idx);
+	wr32(E1000_RDH(reg_idx), 0);
+	writel(0, ring->tail);
+
+	/* set descriptor configuration */
+	srrctl = IGB_RX_HDR_LEN << E1000_SRRCTL_BSIZEHDRSIZE_SHIFT;
+	srrctl |= IGB_RX_BUFSZ >> E1000_SRRCTL_BSIZEPKT_SHIFT;
+	srrctl |= E1000_SRRCTL_DESCTYPE_ADV_ONEBUF;
+	if (hw->mac.type >= e1000_82580)
+		srrctl |= E1000_SRRCTL_TIMESTAMP;
+	/* Only set Drop Enable if we are supporting multiple queues */
+	if (adapter->num_rx_queues > 1)
+		srrctl |= E1000_SRRCTL_DROP_EN;
+
+	wr32(E1000_SRRCTL(reg_idx), srrctl);
+
+	/* set filtering for VMDQ pools */
+	igb_set_vmolr(adapter, reg_idx & 0x7, true);
+
+	rxdctl |= IGB_RX_PTHRESH;
+	rxdctl |= IGB_RX_HTHRESH << 8;
+	rxdctl |= IGB_RX_WTHRESH << 16;
+
+	/* enable receive descriptor fetching */
+	rxdctl |= E1000_RXDCTL_QUEUE_ENABLE;
+	wr32(E1000_RXDCTL(reg_idx), rxdctl);
+}
+
+/**
+ *  igb_configure_rx - Configure receive Unit after Reset
+ *  @adapter: board private structure
+ *
+ *  Configure the Rx unit of the MAC after a reset.
+ **/
+static void igb_configure_rx(struct igb_adapter *adapter)
+{
+	int i;
+
+	/* set the correct pool for the PF default MAC address in entry 0 */
+	igb_rar_set_qsel(adapter, adapter->hw.mac.addr, 0, 0);
+
+	/* Setup the HW Rx Head and Tail Descriptor Pointers and
+	 * the Base and Length of the Rx Descriptor Ring
+	 */
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		igb_configure_rx_ring(adapter, adapter->rx_ring[i]);
+}
+
+/**
+ *  igb_free_tx_resources - Free Tx Resources per Queue
+ *  @tx_ring: Tx descriptor ring for a specific queue
+ *
+ *  Free all transmit software resources
+ **/
+void igb_free_tx_resources(struct igb_ring *tx_ring)
+{
+	igb_clean_tx_ring(tx_ring);
+
+	vfree(tx_ring->tx_buffer_info);
+	tx_ring->tx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!tx_ring->desc)
+		return;
+
+	dma_free_coherent(tx_ring->dev, tx_ring->size,
+			  tx_ring->desc, tx_ring->dma);
+
+	tx_ring->desc = NULL;
+}
+
+/**
+ *  igb_free_all_tx_resources - Free Tx Resources for All Queues
+ *  @adapter: board private structure
+ *
+ *  Free all transmit software resources
+ **/
+static void igb_free_all_tx_resources(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		if (adapter->tx_ring[i])
+			igb_free_tx_resources(adapter->tx_ring[i]);
+}
+
+void igb_unmap_and_free_tx_resource(struct igb_ring *ring,
+				    struct igb_tx_buffer *tx_buffer)
+{
+	if (tx_buffer->skb) {
+		kfree_rtskb(tx_buffer->skb);
+		tx_buffer->skb = NULL;
+	}
+	tx_buffer->next_to_watch = NULL;
+	/* buffer_info must be completely set up in the transmit path */
+}
+
+/**
+ *  igb_clean_tx_ring - Free Tx Buffers
+ *  @tx_ring: ring to be cleaned
+ **/
+static void igb_clean_tx_ring(struct igb_ring *tx_ring)
+{
+	struct igb_tx_buffer *buffer_info;
+	unsigned long size;
+	u16 i;
+
+	if (!tx_ring->tx_buffer_info)
+		return;
+	/* Free all the Tx ring sk_buffs */
+
+	for (i = 0; i < tx_ring->count; i++) {
+		buffer_info = &tx_ring->tx_buffer_info[i];
+		igb_unmap_and_free_tx_resource(tx_ring, buffer_info);
+	}
+
+	size = sizeof(struct igb_tx_buffer) * tx_ring->count;
+	memset(tx_ring->tx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(tx_ring->desc, 0, tx_ring->size);
+
+	tx_ring->next_to_use = 0;
+	tx_ring->next_to_clean = 0;
+}
+
+/**
+ *  igb_clean_all_tx_rings - Free Tx Buffers for all queues
+ *  @adapter: board private structure
+ **/
+static void igb_clean_all_tx_rings(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_tx_queues; i++)
+		if (adapter->tx_ring[i])
+			igb_clean_tx_ring(adapter->tx_ring[i]);
+}
+
+/**
+ *  igb_free_rx_resources - Free Rx Resources
+ *  @rx_ring: ring to clean the resources from
+ *
+ *  Free all receive software resources
+ **/
+void igb_free_rx_resources(struct igb_ring *rx_ring)
+{
+	igb_clean_rx_ring(rx_ring);
+
+	vfree(rx_ring->rx_buffer_info);
+	rx_ring->rx_buffer_info = NULL;
+
+	/* if not set, then don't free */
+	if (!rx_ring->desc)
+		return;
+
+	dma_free_coherent(rx_ring->dev, rx_ring->size,
+			  rx_ring->desc, rx_ring->dma);
+
+	rx_ring->desc = NULL;
+}
+
+/**
+ *  igb_free_all_rx_resources - Free Rx Resources for All Queues
+ *  @adapter: board private structure
+ *
+ *  Free all receive software resources
+ **/
+static void igb_free_all_rx_resources(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		if (adapter->rx_ring[i])
+			igb_free_rx_resources(adapter->rx_ring[i]);
+}
+
+/**
+ *  igb_clean_rx_ring - Free Rx Buffers per Queue
+ *  @rx_ring: ring to free buffers from
+ **/
+static void igb_clean_rx_ring(struct igb_ring *rx_ring)
+{
+	unsigned long size;
+	u16 i;
+
+	if (!rx_ring->rx_buffer_info)
+		return;
+
+	/* Free all the Rx ring sk_buffs */
+	for (i = 0; i < rx_ring->count; i++) {
+		struct igb_rx_buffer *buffer_info = &rx_ring->rx_buffer_info[i];
+
+		if (buffer_info->dma)
+			buffer_info->dma = 0;
+
+		if (buffer_info->skb) {
+			kfree_rtskb(buffer_info->skb);
+			buffer_info->skb = NULL;
+		}
+	}
+
+	size = sizeof(struct igb_rx_buffer) * rx_ring->count;
+	memset(rx_ring->rx_buffer_info, 0, size);
+
+	/* Zero out the descriptor ring */
+	memset(rx_ring->desc, 0, rx_ring->size);
+
+	rx_ring->next_to_clean = 0;
+	rx_ring->next_to_use = 0;
+}
+
+/**
+ *  igb_clean_all_rx_rings - Free Rx Buffers for all queues
+ *  @adapter: board private structure
+ **/
+static void igb_clean_all_rx_rings(struct igb_adapter *adapter)
+{
+	int i;
+
+	for (i = 0; i < adapter->num_rx_queues; i++)
+		if (adapter->rx_ring[i])
+			igb_clean_rx_ring(adapter->rx_ring[i]);
+}
+
+/**
+ *  igb_write_mc_addr_list - write multicast addresses to MTA
+ *  @netdev: network interface device structure
+ *
+ *  Writes multicast address list to the MTA hash table.
+ *  Returns: -ENOMEM on failure
+ *           0 on no addresses written
+ *           X on writing X addresses to MTA
+ **/
+static int igb_write_mc_addr_list(struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+#if 0
+	struct netdev_hw_addr *ha;
+	u8  *mta_list;
+	int i;
+	if (netdev_mc_empty(netdev)) {
+		/* nothing to program, so clear mc list */
+		igb_update_mc_addr_list(hw, NULL, 0);
+		igb_restore_vf_multicasts(adapter);
+		return 0;
+	}
+
+	mta_list = kzalloc(netdev_mc_count(netdev) * 6, GFP_ATOMIC);
+	if (!mta_list)
+		return -ENOMEM;
+
+	/* The shared function expects a packed array of only addresses. */
+	i = 0;
+	netdev_for_each_mc_addr(ha, netdev)
+		memcpy(mta_list + (i++ * ETH_ALEN), ha->addr, ETH_ALEN);
+
+	igb_update_mc_addr_list(hw, mta_list, i);
+	kfree(mta_list);
+
+	return netdev_mc_count(netdev);
+#else
+	igb_update_mc_addr_list(hw, NULL, 0);
+	return 0;
+#endif
+}
+
+/**
+ *  igb_write_uc_addr_list - write unicast addresses to RAR table
+ *  @netdev: network interface device structure
+ *
+ *  Writes unicast address list to the RAR table.
+ *  Returns: -ENOMEM on failure/insufficient address space
+ *           0 on no addresses written
+ *           X on writing X addresses to the RAR table
+ **/
+static int igb_write_uc_addr_list(struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int vfn = 0;
+	unsigned int rar_entries = hw->mac.rar_entry_count - (vfn + 1);
+	int count = 0;
+
+	/* write the addresses in reverse order to avoid write combining */
+	for (; rar_entries > 0 ; rar_entries--) {
+		wr32(E1000_RAH(rar_entries), 0);
+		wr32(E1000_RAL(rar_entries), 0);
+	}
+	wrfl();
+
+	return count;
+}
+
+/**
+ *  igb_set_rx_mode - Secondary Unicast, Multicast and Promiscuous mode set
+ *  @netdev: network interface device structure
+ *
+ *  The set_rx_mode entry point is called whenever the unicast or multicast
+ *  address lists or the network interface flags are updated.  This routine is
+ *  responsible for configuring the hardware for proper unicast, multicast,
+ *  promiscuous mode, and all-multi behavior.
+ **/
+static void igb_set_rx_mode(struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	unsigned int vfn = 0;
+	u32 rctl, vmolr = 0;
+	int count;
+
+	/* Check for Promiscuous and All Multicast modes */
+	rctl = rd32(E1000_RCTL);
+
+	/* clear the effected bits */
+	rctl &= ~(E1000_RCTL_UPE | E1000_RCTL_MPE | E1000_RCTL_VFE);
+
+	if (netdev->flags & IFF_PROMISC) {
+		rctl |= (E1000_RCTL_UPE | E1000_RCTL_MPE);
+		vmolr |= (E1000_VMOLR_ROPE | E1000_VMOLR_MPME);
+	} else {
+		if (netdev->flags & IFF_ALLMULTI) {
+			rctl |= E1000_RCTL_MPE;
+			vmolr |= E1000_VMOLR_MPME;
+		} else {
+			/* Write addresses to the MTA, if the attempt fails
+			 * then we should just turn on promiscuous mode so
+			 * that we can at least receive multicast traffic
+			 */
+			count = igb_write_mc_addr_list(netdev);
+			if (count < 0) {
+				rctl |= E1000_RCTL_MPE;
+				vmolr |= E1000_VMOLR_MPME;
+			} else if (count) {
+				vmolr |= E1000_VMOLR_ROMPE;
+			}
+		}
+		/* Write addresses to available RAR registers, if there is not
+		 * sufficient space to store all the addresses then enable
+		 * unicast promiscuous mode
+		 */
+		count = igb_write_uc_addr_list(netdev);
+		if (count < 0) {
+			rctl |= E1000_RCTL_UPE;
+			vmolr |= E1000_VMOLR_ROPE;
+		}
+		rctl |= E1000_RCTL_VFE;
+	}
+	wr32(E1000_RCTL, rctl);
+
+	/* In order to support SR-IOV and eventually VMDq it is necessary to set
+	 * the VMOLR to enable the appropriate modes.  Without this workaround
+	 * we will have issues with VLAN tag stripping not being done for frames
+	 * that are only arriving because we are the default pool
+	 */
+	if ((hw->mac.type < e1000_82576) || (hw->mac.type > e1000_i350))
+		return;
+
+	vmolr |= rd32(E1000_VMOLR(vfn)) &
+		 ~(E1000_VMOLR_ROPE | E1000_VMOLR_MPME | E1000_VMOLR_ROMPE);
+	wr32(E1000_VMOLR(vfn), vmolr);
+}
+
+static void igb_check_wvbr(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 wvbr = 0;
+
+	switch (hw->mac.type) {
+	case e1000_82576:
+	case e1000_i350:
+		wvbr = rd32(E1000_WVBR);
+		if (!wvbr)
+			return;
+		break;
+	default:
+		break;
+	}
+
+	adapter->wvbr |= wvbr;
+}
+
+#define IGB_STAGGERED_QUEUE_OFFSET 8
+
+/* Need to wait a few seconds after link up to get diagnostic information from
+ * the phy
+ */
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void igb_update_phy_info(struct timer_list *t)
+{
+	struct igb_adapter *adapter = from_timer(adapter, t, phy_info_timer);
+#else /* < 4.14 */
+static void igb_update_phy_info(unsigned long data)
+{
+	struct igb_adapter *adapter = (struct igb_adapter *) data;
+#endif /* < 4.14 */
+	igb_get_phy_info(&adapter->hw);
+}
+
+/**
+ *  igb_has_link - check shared code for link and determine up/down
+ *  @adapter: pointer to driver private info
+ **/
+bool igb_has_link(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	bool link_active = false;
+
+	/* get_link_status is set on LSC (link status) interrupt or
+	 * rx sequence error interrupt.  get_link_status will stay
+	 * false until the e1000_check_for_link establishes link
+	 * for copper adapters ONLY
+	 */
+	switch (hw->phy.media_type) {
+	case e1000_media_type_copper:
+		if (!hw->mac.get_link_status)
+			return true;
+		fallthrough;
+	case e1000_media_type_internal_serdes:
+		hw->mac.ops.check_for_link(hw);
+		link_active = !hw->mac.get_link_status;
+		break;
+	default:
+	case e1000_media_type_unknown:
+		break;
+	}
+
+	if (((hw->mac.type == e1000_i210) ||
+	     (hw->mac.type == e1000_i211)) &&
+	     (hw->phy.id == I210_I_PHY_ID)) {
+		if (!rtnetif_carrier_ok(adapter->netdev)) {
+			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+		} else if (!(adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)) {
+			adapter->flags |= IGB_FLAG_NEED_LINK_UPDATE;
+			adapter->link_check_timeout = jiffies;
+		}
+	}
+
+	return link_active;
+}
+
+static bool igb_thermal_sensor_event(struct e1000_hw *hw, u32 event)
+{
+	bool ret = false;
+	u32 ctrl_ext, thstat;
+
+	/* check for thermal sensor event on i350 copper only */
+	if (hw->mac.type == e1000_i350) {
+		thstat = rd32(E1000_THSTAT);
+		ctrl_ext = rd32(E1000_CTRL_EXT);
+
+		if ((hw->phy.media_type == e1000_media_type_copper) &&
+		    !(ctrl_ext & E1000_CTRL_EXT_LINK_MODE_SGMII))
+			ret = !!(thstat & event);
+	}
+
+	return ret;
+}
+
+/**
+ *  igb_check_lvmmc - check for malformed packets received
+ *  and indicated in LVMMC register
+ *  @adapter: pointer to adapter
+ **/
+static void igb_check_lvmmc(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 lvmmc;
+
+	lvmmc = rd32(E1000_LVMMC);
+	if (lvmmc) {
+		if (unlikely(net_ratelimit())) {
+			rtdev_warn(adapter->netdev,
+				    "malformed Tx packet detected and dropped, LVMMC:0x%08x\n",
+				    lvmmc);
+		}
+	}
+}
+
+/**
+ *  igb_watchdog - Timer Call-back
+ *  @data: pointer to adapter cast into an unsigned long
+ **/
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(4,14,0)
+static void igb_watchdog(struct timer_list *t)
+{
+	struct igb_adapter *adapter = from_timer(adapter, t, watchdog_timer);
+#else /* < 4.14 */
+static void igb_watchdog(unsigned long data)
+{
+	struct igb_adapter *adapter = (struct igb_adapter *)data;
+#endif /* < 4.14 */
+	/* Do the rest outside of interrupt context */
+	schedule_work(&adapter->watchdog_task);
+}
+
+static void igb_watchdog_task(struct work_struct *work)
+{
+	struct igb_adapter *adapter = container_of(work,
+						   struct igb_adapter,
+						   watchdog_task);
+	struct e1000_hw *hw = &adapter->hw;
+	struct e1000_phy_info *phy = &hw->phy;
+	struct rtnet_device *netdev = adapter->netdev;
+	u32 link;
+	int i;
+	u32 connsw;
+
+	link = igb_has_link(adapter);
+
+	if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE) {
+		if (time_after(jiffies, (adapter->link_check_timeout + HZ)))
+			adapter->flags &= ~IGB_FLAG_NEED_LINK_UPDATE;
+		else
+			link = false;
+	}
+
+	/* Force link down if we have fiber to swap to */
+	if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+		if (hw->phy.media_type == e1000_media_type_copper) {
+			connsw = rd32(E1000_CONNSW);
+			if (!(connsw & E1000_CONNSW_AUTOSENSE_EN))
+				link = 0;
+		}
+	}
+	if (link) {
+		/* Perform a reset if the media type changed. */
+		if (hw->dev_spec._82575.media_changed) {
+			hw->dev_spec._82575.media_changed = false;
+			adapter->flags |= IGB_FLAG_MEDIA_RESET;
+			igb_reset(adapter);
+		}
+		/* Cancel scheduled suspend requests. */
+		pm_runtime_resume(adapter->pdev->dev.parent);
+
+		if (!rtnetif_carrier_ok(netdev)) {
+			u32 ctrl;
+
+			hw->mac.ops.get_speed_and_duplex(hw,
+							 &adapter->link_speed,
+							 &adapter->link_duplex);
+
+			ctrl = rd32(E1000_CTRL);
+			/* Links status message must follow this format */
+			rtdev_info(netdev,
+			       "igb: %s NIC Link is Up %d Mbps %s Duplex, Flow Control: %s\n",
+			       netdev->name,
+			       adapter->link_speed,
+			       adapter->link_duplex == FULL_DUPLEX ?
+			       "Full" : "Half",
+			       (ctrl & E1000_CTRL_TFCE) &&
+			       (ctrl & E1000_CTRL_RFCE) ? "RX/TX" :
+			       (ctrl & E1000_CTRL_RFCE) ?  "RX" :
+			       (ctrl & E1000_CTRL_TFCE) ?  "TX" : "None");
+
+			/* disable EEE if enabled */
+			if ((adapter->flags & IGB_FLAG_EEE) &&
+				(adapter->link_duplex == HALF_DUPLEX)) {
+				dev_info(&adapter->pdev->dev,
+				"EEE Disabled: unsupported at half duplex. Re-enable using ethtool when at full duplex.\n");
+				adapter->hw.dev_spec._82575.eee_disable = true;
+				adapter->flags &= ~IGB_FLAG_EEE;
+			}
+
+			/* check if SmartSpeed worked */
+			igb_check_downshift(hw);
+			if (phy->speed_downgraded)
+				rtdev_warn(netdev, "Link Speed was downgraded by SmartSpeed\n");
+
+			/* check for thermal sensor event */
+			if (igb_thermal_sensor_event(hw,
+			    E1000_THSTAT_LINK_THROTTLE))
+				rtdev_info(netdev, "The network adapter link speed was downshifted because it overheated\n");
+
+			/* adjust timeout factor according to speed/duplex */
+			adapter->tx_timeout_factor = 1;
+			switch (adapter->link_speed) {
+			case SPEED_10:
+				adapter->tx_timeout_factor = 14;
+				break;
+			case SPEED_100:
+				/* maybe add some timeout factor ? */
+				break;
+			}
+
+			rtnetif_carrier_on(netdev);
+
+			/* link state has changed, schedule phy info update */
+			if (!test_bit(__IGB_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+					  round_jiffies(jiffies + 2 * HZ));
+		}
+	} else {
+		if (rtnetif_carrier_ok(netdev)) {
+			adapter->link_speed = 0;
+			adapter->link_duplex = 0;
+
+			/* check for thermal sensor event */
+			if (igb_thermal_sensor_event(hw,
+			    E1000_THSTAT_PWR_DOWN)) {
+				rtdev_err(netdev, "The network adapter was stopped because it overheated\n");
+			}
+
+			/* Links status message must follow this format */
+			rtdev_info(netdev, "igb: %s NIC Link is Down\n",
+			       netdev->name);
+			rtnetif_carrier_off(netdev);
+
+			/* link state has changed, schedule phy info update */
+			if (!test_bit(__IGB_DOWN, &adapter->state))
+				mod_timer(&adapter->phy_info_timer,
+					  round_jiffies(jiffies + 2 * HZ));
+
+			/* link is down, time to check for alternate media */
+			if (adapter->flags & IGB_FLAG_MAS_ENABLE) {
+				igb_check_swap_media(adapter);
+				if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+					schedule_work(&adapter->reset_task);
+					/* return immediately */
+					return;
+				}
+			}
+			pm_schedule_suspend(adapter->pdev->dev.parent,
+					    MSEC_PER_SEC * 5);
+
+		/* also check for alternate media here */
+		} else if (!rtnetif_carrier_ok(netdev) &&
+			   (adapter->flags & IGB_FLAG_MAS_ENABLE)) {
+			igb_check_swap_media(adapter);
+			if (adapter->flags & IGB_FLAG_MEDIA_RESET) {
+				schedule_work(&adapter->reset_task);
+				/* return immediately */
+				return;
+			}
+		}
+	}
+
+	spin_lock(&adapter->stats64_lock);
+	igb_update_stats(adapter);
+	spin_unlock(&adapter->stats64_lock);
+
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct igb_ring *tx_ring = adapter->tx_ring[i];
+		if (!rtnetif_carrier_ok(netdev)) {
+			/* We've lost link, so the controller stops DMA,
+			 * but we've got queued Tx work that's never going
+			 * to get done, so reset controller to flush Tx.
+			 * (Do the reset outside of interrupt context).
+			 */
+			if (igb_desc_unused(tx_ring) + 1 < tx_ring->count) {
+				adapter->tx_timeout_count++;
+				schedule_work(&adapter->reset_task);
+				/* return immediately since reset is imminent */
+				return;
+			}
+		}
+
+		/* Force detection of hung controller every watchdog period */
+		set_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+	}
+
+	/* Cause software interrupt to ensure Rx ring is cleaned */
+	if (adapter->flags & IGB_FLAG_HAS_MSIX) {
+		u32 eics = 0;
+
+		for (i = 0; i < adapter->num_q_vectors; i++)
+			eics |= adapter->q_vector[i]->eims_value;
+		wr32(E1000_EICS, eics);
+	} else {
+		wr32(E1000_ICS, E1000_ICS_RXDMT0);
+	}
+
+	/* Check LVMMC register on i350/i354 only */
+	if ((adapter->hw.mac.type == e1000_i350) ||
+	    (adapter->hw.mac.type == e1000_i354))
+		igb_check_lvmmc(adapter);
+
+	/* Reset the timer */
+	if (!test_bit(__IGB_DOWN, &adapter->state)) {
+		if (adapter->flags & IGB_FLAG_NEED_LINK_UPDATE)
+			mod_timer(&adapter->watchdog_timer,
+				  round_jiffies(jiffies +  HZ));
+		else
+			mod_timer(&adapter->watchdog_timer,
+				  round_jiffies(jiffies + 2 * HZ));
+	}
+}
+
+enum latency_range {
+	lowest_latency = 0,
+	low_latency = 1,
+	bulk_latency = 2,
+	latency_invalid = 255
+};
+
+/**
+ *  igb_update_ring_itr - update the dynamic ITR value based on packet size
+ *  @q_vector: pointer to q_vector
+ *
+ *  Stores a new ITR value based on strictly on packet size.  This
+ *  algorithm is less sophisticated than that used in igb_update_itr,
+ *  due to the difficulty of synchronizing statistics across multiple
+ *  receive rings.  The divisors and thresholds used by this function
+ *  were determined based on theoretical maximum wire speed and testing
+ *  data, in order to minimize response time while increasing bulk
+ *  throughput.
+ *  This functionality is controlled by ethtool's coalescing settings.
+ *  NOTE:  This function is called only when operating in a multiqueue
+ *         receive environment.
+ **/
+static void igb_update_ring_itr(struct igb_q_vector *q_vector)
+{
+	int new_val = q_vector->itr_val;
+	int avg_wire_size = 0;
+	struct igb_adapter *adapter = q_vector->adapter;
+	unsigned int packets;
+
+	if (!InterruptThrottle)
+		return;
+
+	/* For non-gigabit speeds, just fix the interrupt rate at 4000
+	 * ints/sec - ITR timer value of 120 ticks.
+	 */
+	if (adapter->link_speed != SPEED_1000) {
+		new_val = IGB_4K_ITR;
+		goto set_itr_val;
+	}
+
+	packets = q_vector->rx.total_packets;
+	if (packets)
+		avg_wire_size = q_vector->rx.total_bytes / packets;
+
+	packets = q_vector->tx.total_packets;
+	if (packets)
+		avg_wire_size = max_t(u32, avg_wire_size,
+				      q_vector->tx.total_bytes / packets);
+
+	/* if avg_wire_size isn't set no work was done */
+	if (!avg_wire_size)
+		goto clear_counts;
+
+	/* Add 24 bytes to size to account for CRC, preamble, and gap */
+	avg_wire_size += 24;
+
+	/* Don't starve jumbo frames */
+	avg_wire_size = min(avg_wire_size, 3000);
+
+	/* Give a little boost to mid-size frames */
+	if ((avg_wire_size > 300) && (avg_wire_size < 1200))
+		new_val = avg_wire_size / 3;
+	else
+		new_val = avg_wire_size / 2;
+
+	/* conservative mode (itr 3) eliminates the lowest_latency setting */
+	if (new_val < IGB_20K_ITR &&
+	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+		new_val = IGB_20K_ITR;
+
+set_itr_val:
+	if (new_val != q_vector->itr_val) {
+		q_vector->itr_val = new_val;
+		q_vector->set_itr = 1;
+	}
+clear_counts:
+	q_vector->rx.total_bytes = 0;
+	q_vector->rx.total_packets = 0;
+	q_vector->tx.total_bytes = 0;
+	q_vector->tx.total_packets = 0;
+}
+
+/**
+ *  igb_update_itr - update the dynamic ITR value based on statistics
+ *  @q_vector: pointer to q_vector
+ *  @ring_container: ring info to update the itr for
+ *
+ *  Stores a new ITR value based on packets and byte
+ *  counts during the last interrupt.  The advantage of per interrupt
+ *  computation is faster updates and more accurate ITR for the current
+ *  traffic pattern.  Constants in this function were computed
+ *  based on theoretical maximum wire speed and thresholds were set based
+ *  on testing data as well as attempting to minimize response time
+ *  while increasing bulk throughput.
+ *  This functionality is controlled by ethtool's coalescing settings.
+ *  NOTE:  These calculations are only valid when operating in a single-
+ *         queue environment.
+ **/
+static void igb_update_itr(struct igb_q_vector *q_vector,
+			   struct igb_ring_container *ring_container)
+{
+	unsigned int packets = ring_container->total_packets;
+	unsigned int bytes = ring_container->total_bytes;
+	u8 itrval = ring_container->itr;
+
+	/* no packets, exit with status unchanged */
+	if (packets == 0)
+		return;
+
+	switch (itrval) {
+	case lowest_latency:
+		/* handle TSO and jumbo frames */
+		if (bytes/packets > 8000)
+			itrval = bulk_latency;
+		else if ((packets < 5) && (bytes > 512))
+			itrval = low_latency;
+		break;
+	case low_latency:  /* 50 usec aka 20000 ints/s */
+		if (bytes > 10000) {
+			/* this if handles the TSO accounting */
+			if (bytes/packets > 8000)
+				itrval = bulk_latency;
+			else if ((packets < 10) || ((bytes/packets) > 1200))
+				itrval = bulk_latency;
+			else if ((packets > 35))
+				itrval = lowest_latency;
+		} else if (bytes/packets > 2000) {
+			itrval = bulk_latency;
+		} else if (packets <= 2 && bytes < 512) {
+			itrval = lowest_latency;
+		}
+		break;
+	case bulk_latency: /* 250 usec aka 4000 ints/s */
+		if (bytes > 25000) {
+			if (packets > 35)
+				itrval = low_latency;
+		} else if (bytes < 1500) {
+			itrval = low_latency;
+		}
+		break;
+	}
+
+	/* clear work counters since we have the values we need */
+	ring_container->total_bytes = 0;
+	ring_container->total_packets = 0;
+
+	/* write updated itr to ring container */
+	ring_container->itr = itrval;
+}
+
+static void igb_set_itr(struct igb_q_vector *q_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	u32 new_itr = q_vector->itr_val;
+	u8 current_itr = 0;
+
+	if (!InterruptThrottle)
+		return;
+
+	/* for non-gigabit speeds, just fix the interrupt rate at 4000 */
+	if (adapter->link_speed != SPEED_1000) {
+		current_itr = 0;
+		new_itr = IGB_4K_ITR;
+		goto set_itr_now;
+	}
+
+	igb_update_itr(q_vector, &q_vector->tx);
+	igb_update_itr(q_vector, &q_vector->rx);
+
+	current_itr = max(q_vector->rx.itr, q_vector->tx.itr);
+
+	/* conservative mode (itr 3) eliminates the lowest_latency setting */
+	if (current_itr == lowest_latency &&
+	    ((q_vector->rx.ring && adapter->rx_itr_setting == 3) ||
+	     (!q_vector->rx.ring && adapter->tx_itr_setting == 3)))
+		current_itr = low_latency;
+
+	switch (current_itr) {
+	/* counts and packets in update_itr are dependent on these numbers */
+	case lowest_latency:
+		new_itr = IGB_70K_ITR; /* 70,000 ints/sec */
+		break;
+	case low_latency:
+		new_itr = IGB_20K_ITR; /* 20,000 ints/sec */
+		break;
+	case bulk_latency:
+		new_itr = IGB_4K_ITR;  /* 4,000 ints/sec */
+		break;
+	default:
+		break;
+	}
+
+set_itr_now:
+	if (new_itr != q_vector->itr_val) {
+		/* this attempts to bias the interrupt rate towards Bulk
+		 * by adding intermediate steps when interrupt rate is
+		 * increasing
+		 */
+		new_itr = new_itr > q_vector->itr_val ?
+			  max((new_itr * q_vector->itr_val) /
+			  (new_itr + (q_vector->itr_val >> 2)),
+			  new_itr) : new_itr;
+		/* Don't write the value here; it resets the adapter's
+		 * internal timer, and causes us to delay far longer than
+		 * we should between interrupts.  Instead, we write the ITR
+		 * value at the beginning of the next interrupt so the timing
+		 * ends up being correct.
+		 */
+		q_vector->itr_val = new_itr;
+		q_vector->set_itr = 1;
+	}
+}
+
+
+#define IGB_SET_FLAG(_input, _flag, _result) \
+	((_flag <= _result) ? \
+	 ((u32)(_input & _flag) * (_result / _flag)) : \
+	 ((u32)(_input & _flag) / (_flag / _result)))
+
+static u32 igb_tx_cmd_type(struct rtskb *skb, u32 tx_flags)
+{
+	/* set type for advanced descriptor with frame checksum insertion */
+	u32 cmd_type = E1000_ADVTXD_DTYP_DATA |
+		       E1000_ADVTXD_DCMD_DEXT |
+		       E1000_ADVTXD_DCMD_IFCS;
+
+	return cmd_type;
+}
+
+static void igb_tx_olinfo_status(struct igb_ring *tx_ring,
+				 union e1000_adv_tx_desc *tx_desc,
+				 u32 tx_flags, unsigned int paylen)
+{
+	u32 olinfo_status = paylen << E1000_ADVTXD_PAYLEN_SHIFT;
+
+	/* 82575 requires a unique index per ring */
+	if (test_bit(IGB_RING_FLAG_TX_CTX_IDX, &tx_ring->flags))
+		olinfo_status |= tx_ring->reg_idx << 4;
+
+	tx_desc->read.olinfo_status = cpu_to_le32(olinfo_status);
+}
+
+static int __igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+	struct rtnet_device *netdev = tx_ring->netdev;
+
+	rtnetif_stop_queue(netdev);
+
+	/* Herbert's original patch had:
+	 *  smp_mb__after_netif_stop_queue();
+	 * but since that doesn't exist yet, just open code it.
+	 */
+	smp_mb();
+
+	/* We need to check again in a case another CPU has just
+	 * made room available.
+	 */
+	if (igb_desc_unused(tx_ring) < size)
+		return -EBUSY;
+
+	/* A reprieve! */
+	rtnetif_wake_queue(netdev);
+
+	tx_ring->tx_stats.restart_queue2++;
+
+	return 0;
+}
+
+static inline int igb_maybe_stop_tx(struct igb_ring *tx_ring, const u16 size)
+{
+	if (igb_desc_unused(tx_ring) >= size)
+		return 0;
+	return __igb_maybe_stop_tx(tx_ring, size);
+}
+
+static void igb_tx_map(struct igb_ring *tx_ring,
+		       struct igb_tx_buffer *first,
+		       const u8 hdr_len)
+{
+	struct rtskb *skb = first->skb;
+	struct igb_tx_buffer *tx_buffer;
+	union e1000_adv_tx_desc *tx_desc;
+	dma_addr_t dma;
+	unsigned int size;
+	u32 tx_flags = first->tx_flags;
+	u32 cmd_type = igb_tx_cmd_type(skb, tx_flags);
+	u16 i = tx_ring->next_to_use;
+
+	/* first descriptor is also last, set RS and EOP bits */
+	cmd_type |= IGB_TXD_DCMD;
+	tx_desc = IGB_TX_DESC(tx_ring, i);
+
+	igb_tx_olinfo_status(tx_ring, tx_desc, tx_flags, skb->len - hdr_len);
+
+	size = skb->len;
+
+	dma = rtskb_data_dma_addr(skb, 0);
+
+	tx_buffer = first;
+
+	tx_desc->read.buffer_addr = cpu_to_le64(dma);
+	tx_desc->read.cmd_type_len = cpu_to_le32(cmd_type ^ size);
+
+	/* set the timestamp */
+	first->time_stamp = jiffies;
+	first->next_to_watch = tx_desc;
+
+	i++;
+	tx_desc++;
+	if (i == tx_ring->count) {
+		tx_desc = IGB_TX_DESC(tx_ring, 0);
+		i = 0;
+	}
+
+	/* Force memory writes to complete before letting h/w know there
+	 * are new descriptors to fetch.  (Only applicable for weak-ordered
+	 * memory model archs, such as IA-64).
+	 *
+	 * We also need this memory barrier to make certain all of the
+	 * status bits have been updated before next_to_watch is written.
+	 */
+	wmb();
+
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+	/* set next_to_watch value indicating a packet is present */
+	tx_ring->next_to_use = i;
+
+	/* Make sure there is space in the ring for the next send. */
+	igb_maybe_stop_tx(tx_ring, DESC_NEEDED);
+
+	writel(i, tx_ring->tail);
+
+	/* we need this if more than one processor can write to our tail
+	 * at a time, it synchronizes IO on IA64/Altix systems
+	 */
+	mmiowb();
+
+	return;
+}
+
+netdev_tx_t igb_xmit_frame_ring(struct rtskb *skb,
+				struct igb_ring *tx_ring)
+{
+	struct igb_tx_buffer *first;
+	u32 tx_flags = 0;
+	u16 count = 2;
+	u8 hdr_len = 0;
+
+	/* need: 1 descriptor per page * PAGE_SIZE/IGB_MAX_DATA_PER_TXD,
+	 *       + 1 desc for skb_headlen/IGB_MAX_DATA_PER_TXD,
+	 *       + 2 desc gap to keep tail from touching head,
+	 *       + 1 desc for context descriptor,
+	 * otherwise try next time
+	 */
+	if (igb_maybe_stop_tx(tx_ring, count + 3)) {
+		/* this is a hard error */
+		return NETDEV_TX_BUSY;
+	}
+
+	if (skb->protocol == htons(ETH_P_IP))
+		tx_flags |= IGB_TX_FLAGS_IPV4;
+
+	/* record the location of the first descriptor for this packet */
+	first = &tx_ring->tx_buffer_info[tx_ring->next_to_use];
+	first->skb = skb;
+	first->bytecount = skb->len;
+	first->gso_segs = 1;
+
+	/* record initial flags and protocol */
+	first->tx_flags = tx_flags;
+	first->protocol = skb->protocol;
+
+	igb_tx_map(tx_ring, first, hdr_len);
+
+	return NETDEV_TX_OK;
+}
+
+static inline struct igb_ring *igb_tx_queue_mapping(struct igb_adapter *adapter,
+						    struct rtskb *skb)
+{
+	return adapter->tx_ring[0];
+}
+
+static netdev_tx_t igb_xmit_frame(struct rtskb *skb,
+				  struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+
+	if (test_bit(__IGB_DOWN, &adapter->state)) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	if (skb->len <= 0) {
+		kfree_rtskb(skb);
+		return NETDEV_TX_OK;
+	}
+
+	/* The minimum packet size with TCTL.PSP set is 17 so pad the skb
+	 * in order to meet this minimum size requirement.
+	 */
+	if (skb->len < 17) {
+		skb = rtskb_padto(skb, 17);
+		if (!skb)
+			return NETDEV_TX_OK;
+	}
+
+	return igb_xmit_frame_ring(skb, igb_tx_queue_mapping(adapter, skb));
+}
+
+static void igb_reset_task(struct work_struct *work)
+{
+	struct igb_adapter *adapter;
+	adapter = container_of(work, struct igb_adapter, reset_task);
+
+	igb_dump(adapter);
+	rtdev_err(adapter->netdev, "Reset adapter\n");
+	igb_reinit_locked(adapter);
+}
+
+/**
+ * igb_get_stats - Get System Network Statistics
+ * @netdev: network interface device structure
+ *
+ * Returns the address of the device statistics structure.
+ * The statistics are actually updated from the timer callback.
+ **/
+static struct net_device_stats *
+igb_get_stats(struct rtnet_device *netdev)
+{
+	struct igb_adapter *adapter = netdev->priv;
+
+	/* only return the current stats */
+	return &adapter->net_stats;
+}
+
+/**
+ *  igb_update_stats - Update the board statistics counters
+ *  @adapter: board private structure
+ **/
+void igb_update_stats(struct igb_adapter *adapter)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	struct pci_dev *pdev = adapter->pdev;
+	struct net_device_stats *net_stats;
+	u32 reg, mpc;
+	int i;
+	u64 bytes, packets;
+
+	/* Prevent stats update while adapter is being reset, or if the pci
+	 * connection is down.
+	 */
+	if (adapter->link_speed == 0)
+		return;
+	if (pci_channel_offline(pdev))
+		return;
+
+	net_stats = &adapter->net_stats;
+	bytes = 0;
+	packets = 0;
+
+	rcu_read_lock();
+	for (i = 0; i < adapter->num_rx_queues; i++) {
+		struct igb_ring *ring = adapter->rx_ring[i];
+		u32 rqdpc = rd32(E1000_RQDPC(i));
+		if (hw->mac.type >= e1000_i210)
+			wr32(E1000_RQDPC(i), 0);
+
+		if (rqdpc) {
+			ring->rx_stats.drops += rqdpc;
+			net_stats->rx_fifo_errors += rqdpc;
+		}
+
+		bytes += ring->rx_stats.bytes;
+		packets += ring->rx_stats.packets;
+	}
+
+	net_stats->rx_bytes = bytes;
+	net_stats->rx_packets = packets;
+
+	bytes = 0;
+	packets = 0;
+	for (i = 0; i < adapter->num_tx_queues; i++) {
+		struct igb_ring *ring = adapter->tx_ring[i];
+		bytes += ring->tx_stats.bytes;
+		packets += ring->tx_stats.packets;
+	}
+	net_stats->tx_bytes = bytes;
+	net_stats->tx_packets = packets;
+	rcu_read_unlock();
+
+	/* read stats registers */
+	adapter->stats.crcerrs += rd32(E1000_CRCERRS);
+	adapter->stats.gprc += rd32(E1000_GPRC);
+	adapter->stats.gorc += rd32(E1000_GORCL);
+	rd32(E1000_GORCH); /* clear GORCL */
+	adapter->stats.bprc += rd32(E1000_BPRC);
+	adapter->stats.mprc += rd32(E1000_MPRC);
+	adapter->stats.roc += rd32(E1000_ROC);
+
+	adapter->stats.prc64 += rd32(E1000_PRC64);
+	adapter->stats.prc127 += rd32(E1000_PRC127);
+	adapter->stats.prc255 += rd32(E1000_PRC255);
+	adapter->stats.prc511 += rd32(E1000_PRC511);
+	adapter->stats.prc1023 += rd32(E1000_PRC1023);
+	adapter->stats.prc1522 += rd32(E1000_PRC1522);
+	adapter->stats.symerrs += rd32(E1000_SYMERRS);
+	adapter->stats.sec += rd32(E1000_SEC);
+
+	mpc = rd32(E1000_MPC);
+	adapter->stats.mpc += mpc;
+	net_stats->rx_fifo_errors += mpc;
+	adapter->stats.scc += rd32(E1000_SCC);
+	adapter->stats.ecol += rd32(E1000_ECOL);
+	adapter->stats.mcc += rd32(E1000_MCC);
+	adapter->stats.latecol += rd32(E1000_LATECOL);
+	adapter->stats.dc += rd32(E1000_DC);
+	adapter->stats.rlec += rd32(E1000_RLEC);
+	adapter->stats.xonrxc += rd32(E1000_XONRXC);
+	adapter->stats.xontxc += rd32(E1000_XONTXC);
+	adapter->stats.xoffrxc += rd32(E1000_XOFFRXC);
+	adapter->stats.xofftxc += rd32(E1000_XOFFTXC);
+	adapter->stats.fcruc += rd32(E1000_FCRUC);
+	adapter->stats.gptc += rd32(E1000_GPTC);
+	adapter->stats.gotc += rd32(E1000_GOTCL);
+	rd32(E1000_GOTCH); /* clear GOTCL */
+	adapter->stats.rnbc += rd32(E1000_RNBC);
+	adapter->stats.ruc += rd32(E1000_RUC);
+	adapter->stats.rfc += rd32(E1000_RFC);
+	adapter->stats.rjc += rd32(E1000_RJC);
+	adapter->stats.tor += rd32(E1000_TORH);
+	adapter->stats.tot += rd32(E1000_TOTH);
+	adapter->stats.tpr += rd32(E1000_TPR);
+
+	adapter->stats.ptc64 += rd32(E1000_PTC64);
+	adapter->stats.ptc127 += rd32(E1000_PTC127);
+	adapter->stats.ptc255 += rd32(E1000_PTC255);
+	adapter->stats.ptc511 += rd32(E1000_PTC511);
+	adapter->stats.ptc1023 += rd32(E1000_PTC1023);
+	adapter->stats.ptc1522 += rd32(E1000_PTC1522);
+
+	adapter->stats.mptc += rd32(E1000_MPTC);
+	adapter->stats.bptc += rd32(E1000_BPTC);
+
+	adapter->stats.tpt += rd32(E1000_TPT);
+	adapter->stats.colc += rd32(E1000_COLC);
+
+	adapter->stats.algnerrc += rd32(E1000_ALGNERRC);
+	/* read internal phy specific stats */
+	reg = rd32(E1000_CTRL_EXT);
+	if (!(reg & E1000_CTRL_EXT_LINK_MODE_MASK)) {
+		adapter->stats.rxerrc += rd32(E1000_RXERRC);
+
+		/* this stat has invalid values on i210/i211 */
+		if ((hw->mac.type != e1000_i210) &&
+		    (hw->mac.type != e1000_i211))
+			adapter->stats.tncrs += rd32(E1000_TNCRS);
+	}
+
+	adapter->stats.tsctc += rd32(E1000_TSCTC);
+	adapter->stats.tsctfc += rd32(E1000_TSCTFC);
+
+	adapter->stats.iac += rd32(E1000_IAC);
+	adapter->stats.icrxoc += rd32(E1000_ICRXOC);
+	adapter->stats.icrxptc += rd32(E1000_ICRXPTC);
+	adapter->stats.icrxatc += rd32(E1000_ICRXATC);
+	adapter->stats.ictxptc += rd32(E1000_ICTXPTC);
+	adapter->stats.ictxatc += rd32(E1000_ICTXATC);
+	adapter->stats.ictxqec += rd32(E1000_ICTXQEC);
+	adapter->stats.ictxqmtc += rd32(E1000_ICTXQMTC);
+	adapter->stats.icrxdmtc += rd32(E1000_ICRXDMTC);
+
+	/* Fill out the OS statistics structure */
+	net_stats->multicast = adapter->stats.mprc;
+	net_stats->collisions = adapter->stats.colc;
+
+	/* Rx Errors */
+
+	/* RLEC on some newer hardware can be incorrect so build
+	 * our own version based on RUC and ROC
+	 */
+	net_stats->rx_errors = adapter->stats.rxerrc +
+		adapter->stats.crcerrs + adapter->stats.algnerrc +
+		adapter->stats.ruc + adapter->stats.roc +
+		adapter->stats.cexterr;
+	net_stats->rx_length_errors = adapter->stats.ruc +
+				      adapter->stats.roc;
+	net_stats->rx_crc_errors = adapter->stats.crcerrs;
+	net_stats->rx_frame_errors = adapter->stats.algnerrc;
+	net_stats->rx_missed_errors = adapter->stats.mpc;
+
+	/* Tx Errors */
+	net_stats->tx_errors = adapter->stats.ecol +
+			       adapter->stats.latecol;
+	net_stats->tx_aborted_errors = adapter->stats.ecol;
+	net_stats->tx_window_errors = adapter->stats.latecol;
+	net_stats->tx_carrier_errors = adapter->stats.tncrs;
+
+	/* Tx Dropped needs to be maintained elsewhere */
+
+	/* Management Stats */
+	adapter->stats.mgptc += rd32(E1000_MGTPTC);
+	adapter->stats.mgprc += rd32(E1000_MGTPRC);
+	adapter->stats.mgpdc += rd32(E1000_MGTPDC);
+
+	/* OS2BMC Stats */
+	reg = rd32(E1000_MANC);
+	if (reg & E1000_MANC_EN_BMC2OS) {
+		adapter->stats.o2bgptc += rd32(E1000_O2BGPTC);
+		adapter->stats.o2bspc += rd32(E1000_O2BSPC);
+		adapter->stats.b2ospc += rd32(E1000_B2OSPC);
+		adapter->stats.b2ogprc += rd32(E1000_B2OGPRC);
+	}
+}
+
+static void igb_nrtsig_watchdog(rtdm_nrtsig_t *sig, void *data)
+{
+	struct igb_adapter *adapter = data;
+	mod_timer(&adapter->watchdog_timer, jiffies + 1);
+}
+
+static void igb_other_handler(struct igb_adapter *adapter, u32 icr, bool root)
+{
+	struct e1000_hw *hw = &adapter->hw;
+
+	if (icr & E1000_ICR_DRSTA)
+		rtdm_schedule_nrt_work(&adapter->reset_task);
+
+	if (icr & E1000_ICR_DOUTSYNC) {
+		/* HW is reporting DMA is out of sync */
+		adapter->stats.doosync++;
+		/* The DMA Out of Sync is also indication of a spoof event
+		 * in IOV mode. Check the Wrong VM Behavior register to
+		 * see if it is really a spoof event.
+		 */
+		igb_check_wvbr(adapter);
+	}
+
+	if (icr & E1000_ICR_LSC) {
+		hw->mac.get_link_status = 1;
+		/* guard against interrupt when we're going down */
+		if (!test_bit(__IGB_DOWN, &adapter->state)) {
+			if (root)
+				mod_timer(&adapter->watchdog_timer,
+					jiffies + 1);
+			else
+				rtdm_nrtsig_pend(&adapter->watchdog_nrtsig);
+		}
+	}
+}
+
+static irqreturn_t igb_msix_other(int irq, void *data)
+{
+	struct igb_adapter *adapter = data;
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = rd32(E1000_ICR);
+	/* reading ICR causes bit 31 of EICR to be cleared */
+
+	igb_other_handler(adapter, icr, true);
+
+	wr32(E1000_EIMS, adapter->eims_other);
+
+	return IRQ_HANDLED;
+}
+
+static void igb_write_itr(struct igb_q_vector *q_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	u32 itr_val = (q_vector->itr_val + 0x3) & 0x7FFC;
+
+	if (!q_vector->set_itr)
+		return;
+
+	if (!itr_val)
+		itr_val = 0x4;
+
+	if (adapter->hw.mac.type == e1000_82575)
+		itr_val |= itr_val << 16;
+	else
+		itr_val |= E1000_EITR_CNT_IGNR;
+
+	writel(itr_val, q_vector->itr_register);
+	q_vector->set_itr = 0;
+}
+
+static int igb_msix_ring(rtdm_irq_t *ih)
+{
+	struct igb_q_vector *q_vector =
+		rtdm_irq_get_arg(ih, struct igb_q_vector);
+
+	/* Write the ITR value calculated from the previous interrupt. */
+	igb_write_itr(q_vector);
+
+	igb_poll(q_vector);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+
+/**
+ *  igb_intr_msi - Interrupt Handler
+ *  @irq: interrupt number
+ *  @data: pointer to a network interface device structure
+ **/
+static int igb_intr_msi(rtdm_irq_t *ih)
+{
+	struct igb_adapter *adapter =
+		rtdm_irq_get_arg(ih, struct igb_adapter);
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
+	struct e1000_hw *hw = &adapter->hw;
+	u32 icr = rd32(E1000_ICR);
+
+	igb_write_itr(q_vector);
+
+	igb_other_handler(adapter, icr, false);
+
+	igb_poll(q_vector);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+/**
+ *  igb_intr - Legacy Interrupt Handler
+ *  @irq: interrupt number
+ *  @data: pointer to a network interface device structure
+ **/
+static int igb_intr(rtdm_irq_t *ih)
+{
+	struct igb_adapter *adapter =
+		rtdm_irq_get_arg(ih, struct igb_adapter);
+	struct igb_q_vector *q_vector = adapter->q_vector[0];
+	struct e1000_hw *hw = &adapter->hw;
+	/* Interrupt Auto-Mask...upon reading ICR, interrupts are masked.  No
+	 * need for the IMC write
+	 */
+	u32 icr = rd32(E1000_ICR);
+
+	/* IMS will not auto-mask if INT_ASSERTED is not set, and if it is
+	 * not set, then the adapter didn't send an interrupt
+	 */
+	if (!(icr & E1000_ICR_INT_ASSERTED))
+		return IRQ_NONE;
+
+	igb_write_itr(q_vector);
+
+	igb_other_handler(adapter, icr, false);
+
+	igb_poll(q_vector);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static void igb_ring_irq_enable(struct igb_q_vector *q_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct e1000_hw *hw = &adapter->hw;
+
+	if ((q_vector->rx.ring && (adapter->rx_itr_setting & 3)) ||
+	    (!q_vector->rx.ring && (adapter->tx_itr_setting & 3))) {
+		if (adapter->num_q_vectors == 1)
+			igb_set_itr(q_vector);
+		else
+			igb_update_ring_itr(q_vector);
+	}
+
+	if (!test_bit(__IGB_DOWN, &adapter->state)) {
+		if (adapter->flags & IGB_FLAG_HAS_MSIX)
+			wr32(E1000_EIMS, q_vector->eims_value);
+		else
+			igb_irq_enable(adapter);
+	}
+}
+
+/**
+ *  igb_poll - NAPI Rx polling callback
+ *  @napi: napi polling structure
+ *  @budget: count of how many packets we should handle
+ **/
+static void igb_poll(struct igb_q_vector *q_vector)
+{
+	if (q_vector->tx.ring)
+		igb_clean_tx_irq(q_vector);
+
+	if (q_vector->rx.ring)
+		igb_clean_rx_irq(q_vector, 64);
+
+	igb_ring_irq_enable(q_vector);
+}
+
+/**
+ *  igb_clean_tx_irq - Reclaim resources after transmit completes
+ *  @q_vector: pointer to q_vector containing needed info
+ *
+ *  returns true if ring is completely cleaned
+ **/
+static bool igb_clean_tx_irq(struct igb_q_vector *q_vector)
+{
+	struct igb_adapter *adapter = q_vector->adapter;
+	struct igb_ring *tx_ring = q_vector->tx.ring;
+	struct igb_tx_buffer *tx_buffer;
+	union e1000_adv_tx_desc *tx_desc;
+	unsigned int total_bytes = 0, total_packets = 0;
+	unsigned int budget = q_vector->tx.work_limit;
+	unsigned int i = tx_ring->next_to_clean;
+
+	if (test_bit(__IGB_DOWN, &adapter->state))
+		return true;
+
+	tx_buffer = &tx_ring->tx_buffer_info[i];
+	tx_desc = IGB_TX_DESC(tx_ring, i);
+	i -= tx_ring->count;
+
+	do {
+		union e1000_adv_tx_desc *eop_desc = tx_buffer->next_to_watch;
+
+		/* if next_to_watch is not set then there is no work pending */
+		if (!eop_desc)
+			break;
+
+		/* prevent any other reads prior to eop_desc */
+		smp_rmb();
+
+		/* if DD is not set pending work has not been completed */
+		if (!(eop_desc->wb.status & cpu_to_le32(E1000_TXD_STAT_DD)))
+			break;
+
+		/* clear next_to_watch to prevent false hangs */
+		tx_buffer->next_to_watch = NULL;
+
+		/* update the statistics for this packet */
+		total_bytes += tx_buffer->bytecount;
+		total_packets += tx_buffer->gso_segs;
+
+		/* free the skb */
+		kfree_rtskb(tx_buffer->skb);
+
+		/* clear tx_buffer data */
+		tx_buffer->skb = NULL;
+
+		/* clear last DMA location and unmap remaining buffers */
+		while (tx_desc != eop_desc) {
+			tx_buffer++;
+			tx_desc++;
+			i++;
+			if (unlikely(!i)) {
+				i -= tx_ring->count;
+				tx_buffer = tx_ring->tx_buffer_info;
+				tx_desc = IGB_TX_DESC(tx_ring, 0);
+			}
+		}
+
+		/* move us one more past the eop_desc for start of next pkt */
+		tx_buffer++;
+		tx_desc++;
+		i++;
+		if (unlikely(!i)) {
+			i -= tx_ring->count;
+			tx_buffer = tx_ring->tx_buffer_info;
+			tx_desc = IGB_TX_DESC(tx_ring, 0);
+		}
+
+		/* issue prefetch for next Tx descriptor */
+		prefetch(tx_desc);
+
+		/* update budget accounting */
+		budget--;
+	} while (likely(budget));
+
+	i += tx_ring->count;
+	tx_ring->next_to_clean = i;
+	tx_ring->tx_stats.bytes += total_bytes;
+	tx_ring->tx_stats.packets += total_packets;
+	q_vector->tx.total_bytes += total_bytes;
+	q_vector->tx.total_packets += total_packets;
+
+	if (test_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags)) {
+		struct e1000_hw *hw = &adapter->hw;
+
+		/* Detect a transmit hang in hardware, this serializes the
+		 * check with the clearing of time_stamp and movement of i
+		 */
+		clear_bit(IGB_RING_FLAG_TX_DETECT_HANG, &tx_ring->flags);
+		if (tx_buffer->next_to_watch &&
+		    time_after(jiffies, tx_buffer->time_stamp +
+			       (adapter->tx_timeout_factor * HZ)) &&
+		    !(rd32(E1000_STATUS) & E1000_STATUS_TXOFF)) {
+
+			/* detected Tx unit hang */
+			dev_err(tx_ring->dev,
+				"Detected Tx Unit Hang\n"
+				"  Tx Queue             <%d>\n"
+				"  TDH                  <%x>\n"
+				"  TDT                  <%x>\n"
+				"  next_to_use          <%x>\n"
+				"  next_to_clean        <%x>\n"
+				"buffer_info[next_to_clean]\n"
+				"  time_stamp           <%lx>\n"
+				"  next_to_watch        <%p>\n"
+				"  jiffies              <%lx>\n"
+				"  desc.status          <%x>\n",
+				tx_ring->queue_index,
+				rd32(E1000_TDH(tx_ring->reg_idx)),
+				readl(tx_ring->tail),
+				tx_ring->next_to_use,
+				tx_ring->next_to_clean,
+				tx_buffer->time_stamp,
+				tx_buffer->next_to_watch,
+				jiffies,
+				tx_buffer->next_to_watch->wb.status);
+			rtnetif_stop_queue(tx_ring->netdev);
+
+			/* we are about to reset, no point in enabling stuff */
+			return true;
+		}
+	}
+
+#define TX_WAKE_THRESHOLD (DESC_NEEDED * 2)
+	if (unlikely(total_packets &&
+	    rtnetif_carrier_ok(tx_ring->netdev) &&
+	    igb_desc_unused(tx_ring) >= TX_WAKE_THRESHOLD)) {
+		/* Make sure that anybody stopping the queue after this
+		 * sees the new next_to_clean.
+		 */
+		smp_mb();
+		if (rtnetif_queue_stopped(tx_ring->netdev) &&
+		    !(test_bit(__IGB_DOWN, &adapter->state))) {
+			rtnetif_wake_queue(tx_ring->netdev);
+
+			tx_ring->tx_stats.restart_queue++;
+		}
+	}
+
+	return !!budget;
+}
+
+static struct rtskb *igb_fetch_rx_buffer(struct igb_ring *rx_ring,
+					   union e1000_adv_rx_desc *rx_desc)
+{
+	struct igb_rx_buffer *rx_buffer;
+	struct rtskb *skb;
+
+	rx_buffer = &rx_ring->rx_buffer_info[rx_ring->next_to_clean];
+	skb = rx_buffer->skb;
+	prefetchw(skb->data);
+
+	/* pull the header of the skb in */
+	rtskb_put(skb, le16_to_cpu(rx_desc->wb.upper.length));
+	rx_buffer->skb = NULL;
+	rx_buffer->dma = 0;
+
+	return skb;
+}
+
+static inline void igb_rx_checksum(struct igb_ring *ring,
+				   union e1000_adv_rx_desc *rx_desc,
+				   struct rtskb *skb)
+{
+	skb->ip_summed = CHECKSUM_NONE;
+
+	/* Ignore Checksum bit is set */
+	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_IXSM))
+		return;
+
+	/* Rx checksum disabled via ethtool */
+	if (!(ring->netdev->features & NETIF_F_RXCSUM))
+		return;
+
+	/* TCP/UDP checksum error bit is set */
+	if (igb_test_staterr(rx_desc,
+			     E1000_RXDEXT_STATERR_TCPE |
+			     E1000_RXDEXT_STATERR_IPE)) {
+		/* work around errata with sctp packets where the TCPE aka
+		 * L4E bit is set incorrectly on 64 byte (60 byte w/o crc)
+		 * packets, (aka let the stack check the crc32c)
+		 */
+		if (!((skb->len == 60) &&
+		      test_bit(IGB_RING_FLAG_RX_SCTP_CSUM, &ring->flags))) {
+			ring->rx_stats.csum_err++;
+		}
+		/* let the stack verify checksum errors */
+		return;
+	}
+	/* It must be a TCP or UDP packet with a valid checksum */
+	if (igb_test_staterr(rx_desc, E1000_RXD_STAT_TCPCS |
+				      E1000_RXD_STAT_UDPCS))
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+	dev_dbg(ring->dev, "cksum success: bits %08X\n",
+		le32_to_cpu(rx_desc->wb.upper.status_error));
+}
+
+/**
+ *  igb_is_non_eop - process handling of non-EOP buffers
+ *  @rx_ring: Rx ring being processed
+ *  @rx_desc: Rx descriptor for current buffer
+ *  @skb: current socket buffer containing buffer in progress
+ *
+ *  This function updates next to clean.  If the buffer is an EOP buffer
+ *  this function exits returning false, otherwise it will place the
+ *  sk_buff in the next buffer to be chained and return true indicating
+ *  that this is in fact a non-EOP buffer.
+ **/
+static bool igb_is_non_eop(struct igb_ring *rx_ring,
+			   union e1000_adv_rx_desc *rx_desc)
+{
+	u32 ntc = rx_ring->next_to_clean + 1;
+
+	/* fetch, update, and store next to clean */
+	ntc = (ntc < rx_ring->count) ? ntc : 0;
+	rx_ring->next_to_clean = ntc;
+
+	prefetch(IGB_RX_DESC(rx_ring, ntc));
+
+	if (likely(igb_test_staterr(rx_desc, E1000_RXD_STAT_EOP)))
+		return false;
+
+	return true;
+}
+
+/**
+ *  igb_cleanup_headers - Correct corrupted or empty headers
+ *  @rx_ring: rx descriptor ring packet is being transacted on
+ *  @rx_desc: pointer to the EOP Rx descriptor
+ *  @skb: pointer to current skb being fixed
+ *
+ *  Address the case where we are pulling data in on pages only
+ *  and as such no data is present in the skb header.
+ *
+ *  In addition if skb is not at least 60 bytes we need to pad it so that
+ *  it is large enough to qualify as a valid Ethernet frame.
+ *
+ *  Returns true if an error was encountered and skb was freed.
+ **/
+static bool igb_cleanup_headers(struct igb_ring *rx_ring,
+				union e1000_adv_rx_desc *rx_desc,
+				struct rtskb *skb)
+{
+	if (unlikely((igb_test_staterr(rx_desc,
+				       E1000_RXDEXT_ERR_FRAME_ERR_MASK)))) {
+		struct rtnet_device *netdev = rx_ring->netdev;
+		if (!(netdev->features & NETIF_F_RXALL)) {
+			kfree_rtskb(skb);
+			return true;
+		}
+	}
+
+	return false;
+}
+
+/**
+ *  igb_process_skb_fields - Populate skb header fields from Rx descriptor
+ *  @rx_ring: rx descriptor ring packet is being transacted on
+ *  @rx_desc: pointer to the EOP Rx descriptor
+ *  @skb: pointer to current skb being populated
+ *
+ *  This function checks the ring, descriptor, and packet information in
+ *  order to populate the hash, checksum, VLAN, timestamp, protocol, and
+ *  other fields within the skb.
+ **/
+static void igb_process_skb_fields(struct igb_ring *rx_ring,
+				   union e1000_adv_rx_desc *rx_desc,
+				   struct rtskb *skb)
+{
+	igb_rx_checksum(rx_ring, rx_desc, skb);
+
+	skb->protocol = rt_eth_type_trans(skb, rx_ring->netdev);
+}
+
+static bool igb_clean_rx_irq(struct igb_q_vector *q_vector, const int budget)
+{
+	struct igb_ring *rx_ring = q_vector->rx.ring;
+	unsigned int total_bytes = 0, total_packets = 0;
+	u16 cleaned_count = igb_desc_unused(rx_ring);
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtskb *skb;
+
+	while (likely(total_packets < budget)) {
+		union e1000_adv_rx_desc *rx_desc;
+
+		/* return some buffers to hardware, one at a time is too slow */
+		if (cleaned_count >= IGB_RX_BUFFER_WRITE) {
+			igb_alloc_rx_buffers(rx_ring, cleaned_count);
+			cleaned_count = 0;
+		}
+
+		rx_desc = IGB_RX_DESC(rx_ring, rx_ring->next_to_clean);
+
+		if (!rx_desc->wb.upper.status_error)
+			break;
+
+		/* This memory barrier is needed to keep us from reading
+		 * any other fields out of the rx_desc until we know the
+		 * descriptor has been written back
+		 */
+		rmb();
+
+		/* retrieve a buffer from the ring */
+		skb = igb_fetch_rx_buffer(rx_ring, rx_desc);
+		skb->time_stamp = time_stamp;
+
+		cleaned_count++;
+
+		/* fetch next buffer in frame if non-eop */
+		if (igb_is_non_eop(rx_ring, rx_desc)) {
+			kfree_rtskb(skb);
+			continue;
+		}
+
+		/* verify the packet layout is correct */
+		if (igb_cleanup_headers(rx_ring, rx_desc, skb))
+			continue;
+
+		/* probably a little skewed due to removing CRC */
+		total_bytes += skb->len;
+
+		/* populate checksum, timestamp, VLAN, and protocol */
+		igb_process_skb_fields(rx_ring, rx_desc, skb);
+
+		rtnetif_rx(skb);
+
+		/* reset skb pointer */
+		skb = NULL;
+
+		/* update budget accounting */
+		total_packets++;
+	}
+
+	rx_ring->rx_stats.packets += total_packets;
+	rx_ring->rx_stats.bytes += total_bytes;
+	q_vector->rx.total_packets += total_packets;
+	q_vector->rx.total_bytes += total_bytes;
+
+	if (cleaned_count)
+		igb_alloc_rx_buffers(rx_ring, cleaned_count);
+
+	if (total_packets)
+		rt_mark_stack_mgr(q_vector->adapter->netdev);
+
+	return total_packets < budget;
+}
+
+static bool igb_alloc_mapped_skb(struct igb_ring *rx_ring,
+				 struct igb_rx_buffer *bi)
+{
+	struct igb_adapter *adapter = rx_ring->q_vector->adapter;
+	struct rtskb *skb = bi->skb;
+	dma_addr_t dma = bi->dma;
+
+	if (dma)
+		return true;
+
+	if (likely(!skb)) {
+		skb = rtnetdev_alloc_rtskb(adapter->netdev,
+					rx_ring->rx_buffer_len + NET_IP_ALIGN);
+		if (!skb) {
+			rx_ring->rx_stats.alloc_failed++;
+			return false;
+		}
+
+		rtskb_reserve(skb, NET_IP_ALIGN);
+		skb->rtdev = adapter->netdev;
+
+		bi->skb = skb;
+		bi->dma = rtskb_data_dma_addr(skb, 0);
+	}
+
+	return true;
+}
+
+/**
+ *  igb_alloc_rx_buffers - Replace used receive buffers; packet split
+ *  @adapter: address of board private structure
+ **/
+void igb_alloc_rx_buffers(struct igb_ring *rx_ring, u16 cleaned_count)
+{
+	union e1000_adv_rx_desc *rx_desc;
+	struct igb_rx_buffer *bi;
+	u16 i = rx_ring->next_to_use;
+
+	/* nothing to do */
+	if (!cleaned_count)
+		return;
+
+	rx_desc = IGB_RX_DESC(rx_ring, i);
+	bi = &rx_ring->rx_buffer_info[i];
+	i -= rx_ring->count;
+
+	do {
+		if (!igb_alloc_mapped_skb(rx_ring, bi))
+			break;
+
+		/* Refresh the desc even if buffer_addrs didn't change
+		 * because each write-back erases this info.
+		 */
+		rx_desc->read.pkt_addr = cpu_to_le64(bi->dma);
+
+		rx_desc++;
+		bi++;
+		i++;
+		if (unlikely(!i)) {
+			rx_desc = IGB_RX_DESC(rx_ring, 0);
+			bi = rx_ring->rx_buffer_info;
+			i -= rx_ring->count;
+		}
+
+		/* clear the status bits for the next_to_use descriptor */
+		rx_desc->wb.upper.status_error = 0;
+
+		cleaned_count--;
+	} while (cleaned_count);
+
+	i += rx_ring->count;
+
+	if (rx_ring->next_to_use != i) {
+		/* record the next descriptor to use */
+		rx_ring->next_to_use = i;
+
+		/* Force memory writes to complete before letting h/w
+		 * know there are new descriptors to fetch.  (Only
+		 * applicable for weak-ordered memory model archs,
+		 * such as IA-64).
+		 */
+		wmb();
+		writel(i, rx_ring->tail);
+	}
+}
+
+/**
+ * igb_mii_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int igb_mii_ioctl(struct rtnet_device *netdev, struct ifreq *ifr, int cmd)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct mii_ioctl_data *data = if_mii(ifr);
+
+	if (adapter->hw.phy.media_type != e1000_media_type_copper)
+		return -EOPNOTSUPP;
+
+	switch (cmd) {
+	case SIOCGMIIPHY:
+		data->phy_id = adapter->hw.phy.addr;
+		break;
+	case SIOCGMIIREG:
+		if (igb_read_phy_reg(&adapter->hw, data->reg_num & 0x1F,
+				     &data->val_out))
+			return -EIO;
+		break;
+	case SIOCSMIIREG:
+	default:
+		return -EOPNOTSUPP;
+	}
+	return 0;
+}
+
+/**
+ * igb_ioctl -
+ * @netdev:
+ * @ifreq:
+ * @cmd:
+ **/
+static int igb_ioctl(struct rtnet_device *netdev, struct ifreq *ifr, int cmd)
+{
+	if (rtdm_in_rt_context())
+		return -ENOSYS;
+	
+	switch (cmd) {
+	case SIOCGMIIPHY:
+	case SIOCGMIIREG:
+	case SIOCSMIIREG:
+		return igb_mii_ioctl(netdev, ifr, cmd);
+
+	default:
+		return -EOPNOTSUPP;
+	}
+}
+
+void igb_read_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+
+	pci_read_config_word(adapter->pdev, reg, value);
+}
+
+void igb_write_pci_cfg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+
+	pci_write_config_word(adapter->pdev, reg, *value);
+}
+
+s32 igb_read_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+
+	if (pcie_capability_read_word(adapter->pdev, reg, value))
+		return -E1000_ERR_CONFIG;
+
+	return 0;
+}
+
+s32 igb_write_pcie_cap_reg(struct e1000_hw *hw, u32 reg, u16 *value)
+{
+	struct igb_adapter *adapter = hw->back;
+
+	if (pcie_capability_write_word(adapter->pdev, reg, *value))
+		return -E1000_ERR_CONFIG;
+
+	return 0;
+}
+
+static void igb_vlan_mode(struct rtnet_device *netdev, netdev_features_t features)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl;
+
+	/* disable VLAN tag insert/strip */
+	ctrl = rd32(E1000_CTRL);
+	ctrl &= ~E1000_CTRL_VME;
+	wr32(E1000_CTRL, ctrl);
+
+	igb_rlpml_set(adapter);
+}
+
+static int igb_vlan_rx_add_vid(struct rtnet_device *netdev,
+			       __be16 proto, u16 vid)
+{
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* add the filter since PF can receive vlans w/o entry in vlvf */
+	igb_vfta_set(hw, vid, true);
+
+	set_bit(vid, adapter->active_vlans);
+
+	return 0;
+}
+
+static void igb_restore_vlan(struct igb_adapter *adapter)
+{
+	u16 vid;
+
+	igb_vlan_mode(adapter->netdev, adapter->netdev->features);
+
+	for_each_set_bit(vid, adapter->active_vlans, VLAN_N_VID)
+		igb_vlan_rx_add_vid(adapter->netdev, htons(ETH_P_8021Q), vid);
+}
+
+static int __igb_shutdown(struct pci_dev *pdev, bool *enable_wake,
+			  bool runtime)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 ctrl, rctl, status;
+	u32 wufc = runtime ? E1000_WUFC_LNKC : adapter->wol;
+#ifdef CONFIG_PM
+	int retval = 0;
+#endif
+
+	rtnetif_device_detach(netdev);
+
+	if (rtnetif_running(netdev))
+		__igb_close(netdev, true);
+
+	igb_clear_interrupt_scheme(adapter);
+
+#ifdef CONFIG_PM
+	retval = pci_save_state(pdev);
+	if (retval)
+		return retval;
+#endif
+
+	status = rd32(E1000_STATUS);
+	if (status & E1000_STATUS_LU)
+		wufc &= ~E1000_WUFC_LNKC;
+
+	if (wufc) {
+		igb_setup_rctl(adapter);
+		igb_set_rx_mode(netdev);
+
+		/* turn on all-multi mode if wake on multicast is enabled */
+		if (wufc & E1000_WUFC_MC) {
+			rctl = rd32(E1000_RCTL);
+			rctl |= E1000_RCTL_MPE;
+			wr32(E1000_RCTL, rctl);
+		}
+
+		ctrl = rd32(E1000_CTRL);
+		/* advertise wake from D3Cold */
+		#define E1000_CTRL_ADVD3WUC 0x00100000
+		/* phy power management enable */
+		#define E1000_CTRL_EN_PHY_PWR_MGMT 0x00200000
+		ctrl |= E1000_CTRL_ADVD3WUC;
+		wr32(E1000_CTRL, ctrl);
+
+		/* Allow time for pending master requests to run */
+		igb_disable_pcie_master(hw);
+
+		wr32(E1000_WUC, E1000_WUC_PME_EN);
+		wr32(E1000_WUFC, wufc);
+	} else {
+		wr32(E1000_WUC, 0);
+		wr32(E1000_WUFC, 0);
+	}
+
+	*enable_wake = wufc || adapter->en_mng_pt;
+	if (!*enable_wake)
+		igb_power_down_link(adapter);
+	else
+		igb_power_up_link(adapter);
+
+	/* Release control of h/w to f/w.  If f/w is AMT enabled, this
+	 * would have already happened in close and is redundant.
+	 */
+	igb_release_hw_control(adapter);
+
+	pci_disable_device(pdev);
+
+	return 0;
+}
+
+#ifdef CONFIG_PM
+#ifdef CONFIG_PM_SLEEP
+static int igb_suspend(struct device *dev)
+{
+	int retval;
+	bool wake;
+	struct pci_dev *pdev = to_pci_dev(dev);
+
+	retval = __igb_shutdown(pdev, &wake, 0);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+#endif /* CONFIG_PM_SLEEP */
+
+static int igb_resume(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	u32 err;
+
+	pci_set_power_state(pdev, PCI_D0);
+	pci_restore_state(pdev);
+	pci_save_state(pdev);
+
+	if (!pci_device_is_present(pdev))
+		return -ENODEV;
+	err = pci_enable_device_mem(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"igb: Cannot enable PCI device from suspend\n");
+		return err;
+	}
+	pci_set_master(pdev);
+
+	pci_enable_wake(pdev, PCI_D3hot, 0);
+	pci_enable_wake(pdev, PCI_D3cold, 0);
+
+	if (igb_init_interrupt_scheme(adapter, true)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	igb_reset(adapter);
+
+	/* let the f/w know that the h/w is now under the control of the
+	 * driver.
+	 */
+	igb_get_hw_control(adapter);
+
+	wr32(E1000_WUS, ~0);
+
+	if (netdev->flags & IFF_UP) {
+		rtnl_lock();
+		err = __igb_open(netdev, true);
+		rtnl_unlock();
+		if (err)
+			return err;
+	}
+
+	rtnetif_device_attach(netdev);
+	return 0;
+}
+
+static int igb_runtime_idle(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+
+	if (!igb_has_link(adapter))
+		pm_schedule_suspend(dev, MSEC_PER_SEC * 5);
+
+	return -EBUSY;
+}
+
+static int igb_runtime_suspend(struct device *dev)
+{
+	struct pci_dev *pdev = to_pci_dev(dev);
+	int retval;
+	bool wake;
+
+	retval = __igb_shutdown(pdev, &wake, 1);
+	if (retval)
+		return retval;
+
+	if (wake) {
+		pci_prepare_to_sleep(pdev);
+	} else {
+		pci_wake_from_d3(pdev, false);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+
+	return 0;
+}
+
+static int igb_runtime_resume(struct device *dev)
+{
+	return igb_resume(dev);
+}
+#endif /* CONFIG_PM */
+
+static void igb_shutdown(struct pci_dev *pdev)
+{
+	bool wake;
+
+	__igb_shutdown(pdev, &wake, 0);
+
+	if (system_state == SYSTEM_POWER_OFF) {
+		pci_wake_from_d3(pdev, wake);
+		pci_set_power_state(pdev, PCI_D3hot);
+	}
+}
+
+static int igb_pci_sriov_configure(struct pci_dev *dev, int num_vfs)
+{
+	return 0;
+}
+
+/**
+ *  igb_io_error_detected - called when PCI error is detected
+ *  @pdev: Pointer to PCI device
+ *  @state: The current pci connection state
+ *
+ *  This function is called after a PCI bus error affecting
+ *  this device has been detected.
+ **/
+static pci_ers_result_t igb_io_error_detected(struct pci_dev *pdev,
+					      pci_channel_state_t state)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+
+	rtnetif_device_detach(netdev);
+
+	if (state == pci_channel_io_perm_failure)
+		return PCI_ERS_RESULT_DISCONNECT;
+
+	if (rtnetif_running(netdev))
+		igb_down(adapter);
+	pci_disable_device(pdev);
+
+	/* Request a slot slot reset. */
+	return PCI_ERS_RESULT_NEED_RESET;
+}
+
+/**
+ *  igb_io_slot_reset - called after the pci bus has been reset.
+ *  @pdev: Pointer to PCI device
+ *
+ *  Restart the card from scratch, as if from a cold-boot. Implementation
+ *  resembles the first-half of the igb_resume routine.
+ **/
+static pci_ers_result_t igb_io_slot_reset(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+	struct e1000_hw *hw = &adapter->hw;
+	pci_ers_result_t result;
+	int err;
+
+	if (pci_enable_device_mem(pdev)) {
+		dev_err(&pdev->dev,
+			"Cannot re-enable PCI device after reset.\n");
+		result = PCI_ERS_RESULT_DISCONNECT;
+	} else {
+		pci_set_master(pdev);
+		pci_restore_state(pdev);
+		pci_save_state(pdev);
+
+		pci_enable_wake(pdev, PCI_D3hot, 0);
+		pci_enable_wake(pdev, PCI_D3cold, 0);
+
+		igb_reset(adapter);
+		wr32(E1000_WUS, ~0);
+		result = PCI_ERS_RESULT_RECOVERED;
+	}
+
+	err = pci_aer_clear_nonfatal_status(pdev);
+	if (err) {
+		dev_err(&pdev->dev,
+			"pci_aer_clear_nonfatal_status failed 0x%0x\n",
+			err);
+		/* non-fatal, continue */
+	}
+
+	return result;
+}
+
+/**
+ *  igb_io_resume - called when traffic can start flowing again.
+ *  @pdev: Pointer to PCI device
+ *
+ *  This callback is called when the error recovery driver tells us that
+ *  its OK to resume normal operation. Implementation resembles the
+ *  second-half of the igb_resume routine.
+ */
+static void igb_io_resume(struct pci_dev *pdev)
+{
+	struct rtnet_device *netdev = pci_get_drvdata(pdev);
+	struct igb_adapter *adapter = rtnetdev_priv(netdev);
+
+	if (rtnetif_running(netdev)) {
+		if (igb_up(adapter)) {
+			dev_err(&pdev->dev, "igb_up failed after reset\n");
+			return;
+		}
+	}
+
+	rtnetif_device_attach(netdev);
+
+	/* let the f/w know that the h/w is now under the control of the
+	 * driver.
+	 */
+	igb_get_hw_control(adapter);
+}
+
+static void igb_rar_set_qsel(struct igb_adapter *adapter, u8 *addr, u32 index,
+			     u8 qsel)
+{
+	u32 rar_low, rar_high;
+	struct e1000_hw *hw = &adapter->hw;
+
+	/* HW expects these in little endian so we reverse the byte order
+	 * from network order (big endian) to little endian
+	 */
+	rar_low = ((u32) addr[0] | ((u32) addr[1] << 8) |
+		   ((u32) addr[2] << 16) | ((u32) addr[3] << 24));
+	rar_high = ((u32) addr[4] | ((u32) addr[5] << 8));
+
+	/* Indicate to hardware the Address is Valid. */
+	rar_high |= E1000_RAH_AV;
+
+	if (hw->mac.type == e1000_82575)
+		rar_high |= E1000_RAH_POOL_1 * qsel;
+	else
+		rar_high |= E1000_RAH_POOL_1 << qsel;
+
+	wr32(E1000_RAL(index), rar_low);
+	wrfl();
+	wr32(E1000_RAH(index), rar_high);
+	wrfl();
+}
+
+static void igb_init_dmac(struct igb_adapter *adapter, u32 pba)
+{
+	struct e1000_hw *hw = &adapter->hw;
+	u32 dmac_thr;
+	u16 hwm;
+
+	if (hw->mac.type > e1000_82580) {
+		if (adapter->flags & IGB_FLAG_DMAC) {
+			u32 reg;
+
+			/* force threshold to 0. */
+			wr32(E1000_DMCTXTH, 0);
+
+			/* DMA Coalescing high water mark needs to be greater
+			 * than the Rx threshold. Set hwm to PBA - max frame
+			 * size in 16B units, capping it at PBA - 6KB.
+			 */
+			hwm = 64 * pba - adapter->max_frame_size / 16;
+			if (hwm < 64 * (pba - 6))
+				hwm = 64 * (pba - 6);
+			reg = rd32(E1000_FCRTC);
+			reg &= ~E1000_FCRTC_RTH_COAL_MASK;
+			reg |= ((hwm << E1000_FCRTC_RTH_COAL_SHIFT)
+				& E1000_FCRTC_RTH_COAL_MASK);
+			wr32(E1000_FCRTC, reg);
+
+			/* Set the DMA Coalescing Rx threshold to PBA - 2 * max
+			 * frame size, capping it at PBA - 10KB.
+			 */
+			dmac_thr = pba - adapter->max_frame_size / 512;
+			if (dmac_thr < pba - 10)
+				dmac_thr = pba - 10;
+			reg = rd32(E1000_DMACR);
+			reg &= ~E1000_DMACR_DMACTHR_MASK;
+			reg |= ((dmac_thr << E1000_DMACR_DMACTHR_SHIFT)
+				& E1000_DMACR_DMACTHR_MASK);
+
+			/* transition to L0x or L1 if available..*/
+			reg |= (E1000_DMACR_DMAC_EN | E1000_DMACR_DMAC_LX_MASK);
+
+			/* watchdog timer= +-1000 usec in 32usec intervals */
+			reg |= (1000 >> 5);
+
+			/* Disable BMC-to-OS Watchdog Enable */
+			if (hw->mac.type != e1000_i354)
+				reg &= ~E1000_DMACR_DC_BMC2OSW_EN;
+
+			wr32(E1000_DMACR, reg);
+
+			/* no lower threshold to disable
+			 * coalescing(smart fifb)-UTRESH=0
+			 */
+			wr32(E1000_DMCRTRH, 0);
+
+			reg = (IGB_DMCTLX_DCFLUSH_DIS | 0x4);
+
+			wr32(E1000_DMCTLX, reg);
+
+			/* free space in tx packet buffer to wake from
+			 * DMA coal
+			 */
+			wr32(E1000_DMCTXTH, (IGB_MIN_TXPBSIZE -
+			     (IGB_TX_BUF_4096 + adapter->max_frame_size)) >> 6);
+
+			/* make low power state decision controlled
+			 * by DMA coal
+			 */
+			reg = rd32(E1000_PCIEMISC);
+			reg &= ~E1000_PCIEMISC_LX_DECISION;
+			wr32(E1000_PCIEMISC, reg);
+		} /* endif adapter->dmac is not disabled */
+	} else if (hw->mac.type == e1000_82580) {
+		u32 reg = rd32(E1000_PCIEMISC);
+
+		wr32(E1000_PCIEMISC, reg & ~E1000_PCIEMISC_LX_DECISION);
+		wr32(E1000_DMACR, 0);
+	}
+}
+
+/**
+ *  igb_read_i2c_byte - Reads 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to read
+ *  @dev_addr: device address
+ *  @data: value read
+ *
+ *  Performs byte read operation over I2C interface at
+ *  a specified device address.
+ **/
+s32 igb_read_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+		      u8 dev_addr, u8 *data)
+{
+	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+	struct i2c_client *this_client = adapter->i2c_client;
+	s32 status;
+	u16 swfw_mask = 0;
+
+	if (!this_client)
+		return E1000_ERR_I2C;
+
+	swfw_mask = E1000_SWFW_PHY0_SM;
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+		return E1000_ERR_SWFW_SYNC;
+
+	status = i2c_smbus_read_byte_data(this_client, byte_offset);
+	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+	if (status < 0)
+		return E1000_ERR_I2C;
+	else {
+		*data = status;
+		return 0;
+	}
+}
+
+/**
+ *  igb_write_i2c_byte - Writes 8 bit word over I2C
+ *  @hw: pointer to hardware structure
+ *  @byte_offset: byte offset to write
+ *  @dev_addr: device address
+ *  @data: value to write
+ *
+ *  Performs byte write operation over I2C interface at
+ *  a specified device address.
+ **/
+s32 igb_write_i2c_byte(struct e1000_hw *hw, u8 byte_offset,
+		       u8 dev_addr, u8 data)
+{
+	struct igb_adapter *adapter = container_of(hw, struct igb_adapter, hw);
+	struct i2c_client *this_client = adapter->i2c_client;
+	s32 status;
+	u16 swfw_mask = E1000_SWFW_PHY0_SM;
+
+	if (!this_client)
+		return E1000_ERR_I2C;
+
+	if (hw->mac.ops.acquire_swfw_sync(hw, swfw_mask))
+		return E1000_ERR_SWFW_SYNC;
+	status = i2c_smbus_write_byte_data(this_client, byte_offset, data);
+	hw->mac.ops.release_swfw_sync(hw, swfw_mask);
+
+	if (status)
+		return E1000_ERR_I2C;
+	else
+		return 0;
+
+}
+
+int igb_reinit_queues(struct igb_adapter *adapter)
+{
+	struct rtnet_device *netdev = adapter->netdev;
+	struct pci_dev *pdev = adapter->pdev;
+	int err = 0;
+
+	if (rtnetif_running(netdev))
+		igb_close(netdev);
+
+	igb_reset_interrupt_capability(adapter);
+
+	if (igb_init_interrupt_scheme(adapter, true)) {
+		dev_err(&pdev->dev, "Unable to allocate memory for queues\n");
+		return -ENOMEM;
+	}
+
+	if (rtnetif_running(netdev))
+		err = igb_open(netdev);
+
+	return err;
+}
+/* igb_main.c */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c
new file mode 100644
index 0000000..26aa66e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/loopback.c
@@ -0,0 +1,139 @@
+/* loopback.c
+ *
+ * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ * extended by Jose Carlos Billalabeitia and Jan Kiszka
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+
+#include <linux/printk.h>
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <linux/netdevice.h>
+
+#include <rtnet_port.h>
+#include <stack_mgr.h>
+
+MODULE_AUTHOR("Maintainer: Jan Kiszka <Jan.Kiszka@web.de>");
+MODULE_DESCRIPTION("RTnet loopback driver");
+MODULE_LICENSE("GPL");
+
+static struct rtnet_device *rt_loopback_dev;
+
+/***
+ *  rt_loopback_open
+ *  @rtdev
+ */
+static int rt_loopback_open(struct rtnet_device *rtdev)
+{
+	rt_stack_connect(rtdev, &STACK_manager);
+	rtnetif_start_queue(rtdev);
+
+	return 0;
+}
+
+/***
+ *  rt_loopback_close
+ *  @rtdev
+ */
+static int rt_loopback_close(struct rtnet_device *rtdev)
+{
+	rtnetif_stop_queue(rtdev);
+	rt_stack_disconnect(rtdev);
+
+	return 0;
+}
+
+/***
+ *  rt_loopback_xmit - begin packet transmission
+ *  @skb: packet to be sent
+ *  @dev: network device to which packet is sent
+ *
+ */
+static int rt_loopback_xmit(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	/* write transmission stamp - in case any protocol ever gets the idea to
+       ask the lookback device for this service... */
+	if (rtskb->xmit_stamp)
+		*rtskb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *rtskb->xmit_stamp);
+
+	/* make sure that critical fields are re-intialised */
+	rtskb->chain_end = rtskb;
+
+	/* parse the Ethernet header as usual */
+	rtskb->protocol = rt_eth_type_trans(rtskb, rtdev);
+
+	rt_stack_deliver(rtskb);
+
+	return 0;
+}
+
+/***
+ *  loopback_init
+ */
+static int __init loopback_init(void)
+{
+	int err;
+	struct rtnet_device *rtdev;
+
+	pr_info("initializing loopback interface...\n");
+
+	if ((rtdev = rt_alloc_etherdev(0, 1)) == NULL)
+		return -ENODEV;
+
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+
+	strcpy(rtdev->name, "rtlo");
+
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->open = &rt_loopback_open;
+	rtdev->stop = &rt_loopback_close;
+	rtdev->hard_start_xmit = &rt_loopback_xmit;
+	rtdev->flags |= IFF_LOOPBACK;
+	rtdev->flags &= ~IFF_BROADCAST;
+	rtdev->features |= NETIF_F_LLTX;
+
+	if ((err = rt_register_rtnetdev(rtdev)) != 0) {
+		rtdev_free(rtdev);
+		return err;
+	}
+
+	rt_loopback_dev = rtdev;
+
+	return 0;
+}
+
+/***
+ *  loopback_cleanup
+ */
+static void __exit loopback_cleanup(void)
+{
+	struct rtnet_device *rtdev = rt_loopback_dev;
+
+	pr_info("removing loopback interface...\n");
+
+	rt_unregister_rtnetdev(rtdev);
+	rt_rtdev_disconnect(rtdev);
+
+	rtdev_free(rtdev);
+}
+
+module_init(loopback_init);
+module_exit(loopback_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c
new file mode 100644
index 0000000..77957d9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/macb.c
@@ -0,0 +1,1828 @@
+/*
+ * Cadence MACB/GEM Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ *
+ * RTnet porting by Cristiano Mantovani & Stefano Banzi (Marposs SpA).
+ * Copyright (C) 2014 Gilles Chanteperdrix <gch@xenomai.org>
+ */
+
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
+#include <linux/clk.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/circ_buf.h>
+#include <linux/slab.h>
+#include <linux/init.h>
+#include <linux/io.h>
+#include <linux/gpio.h>
+#include <linux/interrupt.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/dma-mapping.h>
+#include <linux/platform_data/macb.h>
+#include <linux/platform_device.h>
+#include <linux/phy.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+#include <linux/of_mdio.h>
+#include <linux/of_net.h>
+#include <linux/pinctrl/consumer.h>
+
+#include <rtdev.h>
+#include <rtdm/net.h>
+#include <rtnet_port.h>
+#include <rtskb.h>
+
+#include "rt_macb.h"
+
+#define MACB_RX_BUFFER_SIZE	128
+#define RX_BUFFER_MULTIPLE	64  /* bytes */
+#define RX_RING_SIZE		512 /* must be power of 2 */
+#define RX_RING_BYTES		(sizeof(struct macb_dma_desc) * RX_RING_SIZE)
+
+#define TX_RING_SIZE		128 /* must be power of 2 */
+#define TX_RING_BYTES		(sizeof(struct macb_dma_desc) * TX_RING_SIZE)
+
+/* level of occupied TX descriptors under which we wake up TX process */
+#define MACB_TX_WAKEUP_THRESH	(3 * TX_RING_SIZE / 4)
+
+#define MACB_RX_INT_FLAGS	(MACB_BIT(RCOMP) | MACB_BIT(RXUBR)	\
+				 | MACB_BIT(ISR_ROVR))
+#define MACB_TX_ERR_FLAGS	(MACB_BIT(ISR_TUND)			\
+					| MACB_BIT(ISR_RLE)		\
+					| MACB_BIT(TXERR))
+#define MACB_TX_INT_FLAGS	(MACB_TX_ERR_FLAGS | MACB_BIT(TCOMP))
+
+/*
+ * Graceful stop timeouts in us. We should allow up to
+ * 1 frame time (10 Mbits/s, full-duplex, ignoring collisions)
+ */
+#define MACB_HALT_TIMEOUT	1230
+
+/* Ring buffer accessors */
+static unsigned int macb_tx_ring_wrap(unsigned int index)
+{
+	return index & (TX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_tx_desc(struct macb *bp, unsigned int index)
+{
+	return &bp->tx_ring[macb_tx_ring_wrap(index)];
+}
+
+static struct macb_tx_skb *macb_tx_skb(struct macb *bp, unsigned int index)
+{
+	return &bp->tx_skb[macb_tx_ring_wrap(index)];
+}
+
+static unsigned int macb_rx_ring_wrap(unsigned int index)
+{
+	return index & (RX_RING_SIZE - 1);
+}
+
+static struct macb_dma_desc *macb_rx_desc(struct macb *bp, unsigned int index)
+{
+	return &bp->rx_ring[macb_rx_ring_wrap(index)];
+}
+
+static void *macb_rx_buffer(struct macb *bp, unsigned int index)
+{
+	return bp->rx_buffers + bp->rx_buffer_size * macb_rx_ring_wrap(index);
+}
+
+void rtmacb_set_hwaddr(struct macb *bp)
+{
+	u32 bottom;
+	u16 top;
+
+	bottom = cpu_to_le32(*((u32 *)bp->dev->dev_addr));
+	macb_or_gem_writel(bp, SA1B, bottom);
+	top = cpu_to_le16(*((u16 *)(bp->dev->dev_addr + 4)));
+	macb_or_gem_writel(bp, SA1T, top);
+
+	/* Clear unused address register sets */
+	macb_or_gem_writel(bp, SA2B, 0);
+	macb_or_gem_writel(bp, SA2T, 0);
+	macb_or_gem_writel(bp, SA3B, 0);
+	macb_or_gem_writel(bp, SA3T, 0);
+	macb_or_gem_writel(bp, SA4B, 0);
+	macb_or_gem_writel(bp, SA4T, 0);
+}
+EXPORT_SYMBOL_GPL(rtmacb_set_hwaddr);
+
+void rtmacb_get_hwaddr(struct macb *bp)
+{
+	struct macb_platform_data *pdata;
+	u32 bottom;
+	u16 top;
+	u8 addr[6];
+	int i;
+
+	pdata = dev_get_platdata(&bp->pdev->dev);
+
+	/* Check all 4 address register for vaild address */
+	for (i = 0; i < 4; i++) {
+		bottom = macb_or_gem_readl(bp, SA1B + i * 8);
+		top = macb_or_gem_readl(bp, SA1T + i * 8);
+
+		if (pdata && pdata->rev_eth_addr) {
+			addr[5] = bottom & 0xff;
+			addr[4] = (bottom >> 8) & 0xff;
+			addr[3] = (bottom >> 16) & 0xff;
+			addr[2] = (bottom >> 24) & 0xff;
+			addr[1] = top & 0xff;
+			addr[0] = (top & 0xff00) >> 8;
+		} else {
+			addr[0] = bottom & 0xff;
+			addr[1] = (bottom >> 8) & 0xff;
+			addr[2] = (bottom >> 16) & 0xff;
+			addr[3] = (bottom >> 24) & 0xff;
+			addr[4] = top & 0xff;
+			addr[5] = (top >> 8) & 0xff;
+		}
+
+		if (is_valid_ether_addr(addr)) {
+			memcpy(bp->dev->dev_addr, addr, sizeof(addr));
+			return;
+		}
+	}
+}
+EXPORT_SYMBOL_GPL(rtmacb_get_hwaddr);
+
+static int macb_mdio_read(struct mii_bus *bus, int mii_id, int regnum)
+{
+	struct macb *bp = bus->priv;
+	int value;
+
+	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+			      | MACB_BF(RW, MACB_MAN_READ)
+			      | MACB_BF(PHYA, mii_id)
+			      | MACB_BF(REGA, regnum)
+			      | MACB_BF(CODE, MACB_MAN_CODE)));
+
+	/* wait for end of transfer */
+	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+		cpu_relax();
+
+	value = MACB_BFEXT(DATA, macb_readl(bp, MAN));
+
+	return value;
+}
+
+static int macb_mdio_write(struct mii_bus *bus, int mii_id, int regnum,
+			   u16 value)
+{
+	struct macb *bp = bus->priv;
+
+	macb_writel(bp, MAN, (MACB_BF(SOF, MACB_MAN_SOF)
+			      | MACB_BF(RW, MACB_MAN_WRITE)
+			      | MACB_BF(PHYA, mii_id)
+			      | MACB_BF(REGA, regnum)
+			      | MACB_BF(CODE, MACB_MAN_CODE)
+			      | MACB_BF(DATA, value)));
+
+	/* wait for end of transfer */
+	while (!MACB_BFEXT(IDLE, macb_readl(bp, NSR)))
+		cpu_relax();
+
+	return 0;
+}
+
+/**
+ * macb_set_tx_clk() - Set a clock to a new frequency
+ * @clk		Pointer to the clock to change
+ * @rate	New frequency in Hz
+ * @dev		Pointer to the struct rtnet_device
+ */
+static void macb_set_tx_clk(struct clk *clk, int speed, struct rtnet_device *dev)
+{
+	long ferr, rate, rate_rounded;
+
+	switch (speed) {
+	case SPEED_10:
+		rate = 2500000;
+		break;
+	case SPEED_100:
+		rate = 25000000;
+		break;
+	case SPEED_1000:
+		rate = 125000000;
+		break;
+	default:
+		return;
+	}
+
+	rate_rounded = clk_round_rate(clk, rate);
+	if (rate_rounded < 0)
+		return;
+
+	/* RGMII allows 50 ppm frequency error. Test and warn if this limit
+	 * is not satisfied.
+	 */
+	ferr = abs(rate_rounded - rate);
+	ferr = DIV_ROUND_UP(ferr, rate / 100000);
+	if (ferr > 5)
+		rtdev_warn(dev, "unable to generate target frequency: %ld Hz\n",
+				rate);
+
+	if (clk_set_rate(clk, rate_rounded))
+		rtdev_err(dev, "adjusting tx_clk failed.\n");
+}
+
+struct macb_dummy_netdev_priv {
+	struct rtnet_device *rtdev;
+};
+
+static void macb_handle_link_change(struct net_device *nrt_dev)
+{
+	struct macb_dummy_netdev_priv *p = netdev_priv(nrt_dev);
+	struct rtnet_device *dev = p->rtdev;
+	struct macb *bp = rtnetdev_priv(dev);
+	struct phy_device *phydev = bp->phy_dev;
+	unsigned long flags;
+
+	int status_change = 0;
+
+	rtdm_lock_get_irqsave(&bp->lock, flags);
+
+	if (phydev->link) {
+		if ((bp->speed != phydev->speed) ||
+		    (bp->duplex != phydev->duplex)) {
+			u32 reg;
+
+			reg = macb_readl(bp, NCFGR);
+			reg &= ~(MACB_BIT(SPD) | MACB_BIT(FD));
+			if (macb_is_gem(bp))
+				reg &= ~GEM_BIT(GBE);
+
+			if (phydev->duplex)
+				reg |= MACB_BIT(FD);
+			if (phydev->speed == SPEED_100)
+				reg |= MACB_BIT(SPD);
+			if (phydev->speed == SPEED_1000)
+				reg |= GEM_BIT(GBE);
+
+			macb_or_gem_writel(bp, NCFGR, reg);
+
+			bp->speed = phydev->speed;
+			bp->duplex = phydev->duplex;
+			status_change = 1;
+		}
+	}
+
+	if (phydev->link != bp->link) {
+		if (!phydev->link) {
+			bp->speed = 0;
+			bp->duplex = -1;
+		}
+		bp->link = phydev->link;
+
+		status_change = 1;
+	}
+
+	rtdm_lock_put_irqrestore(&bp->lock, flags);
+
+	if (!IS_ERR(bp->tx_clk))
+		macb_set_tx_clk(bp->tx_clk, phydev->speed, dev);
+
+	if (status_change) {
+		if (phydev->link) {
+			rtnetif_carrier_on(dev);
+			rtdev_info(dev, "link up (%d/%s)\n",
+				    phydev->speed,
+				    phydev->duplex == DUPLEX_FULL ?
+				    "Full" : "Half");
+		} else {
+			rtnetif_carrier_off(dev);
+			rtdev_info(dev, "link down\n");
+		}
+	}
+}
+
+/* based on au1000_eth. c*/
+static int macb_mii_probe(struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	struct macb_dummy_netdev_priv *p;
+	struct macb_platform_data *pdata;
+	struct phy_device *phydev;
+	struct net_device *dummy;
+	int phy_irq;
+	int ret;
+
+	phydev = phy_find_first(bp->mii_bus);
+	if (!phydev) {
+		rtdev_err(dev, "no PHY found\n");
+		return -ENXIO;
+	}
+
+	pdata = dev_get_platdata(&bp->pdev->dev);
+	if (pdata && gpio_is_valid(pdata->phy_irq_pin)) {
+		ret = devm_gpio_request(&bp->pdev->dev, pdata->phy_irq_pin, "phy int");
+		if (!ret) {
+			phy_irq = gpio_to_irq(pdata->phy_irq_pin);
+			phydev->irq = (phy_irq < 0) ? PHY_POLL : phy_irq;
+		}
+	}
+
+	dummy = alloc_etherdev(sizeof(*p));
+	p = netdev_priv(dummy);
+	p->rtdev = dev;
+	bp->phy_phony_net_device = dummy;
+
+	/* attach the mac to the phy */
+	ret = phy_connect_direct(dummy, phydev, &macb_handle_link_change,
+				 bp->phy_interface);
+	if (ret) {
+		rtdev_err(dev, "Could not attach to PHY\n");
+		return ret;
+	}
+
+	/* mask with MAC supported features */
+	if (macb_is_gem(bp))
+		phydev->supported &= PHY_GBIT_FEATURES;
+	else
+		phydev->supported &= PHY_BASIC_FEATURES;
+
+	phydev->advertising = phydev->supported;
+
+	bp->link = 0;
+	bp->speed = 0;
+	bp->duplex = -1;
+	bp->phy_dev = phydev;
+
+	return 0;
+}
+
+int rtmacb_mii_init(struct macb *bp)
+{
+	struct macb_platform_data *pdata;
+	struct device_node *np;
+	int err = -ENXIO, i;
+
+	/* Enable management port */
+	macb_writel(bp, NCR, MACB_BIT(MPE));
+
+	bp->mii_bus = mdiobus_alloc();
+	if (bp->mii_bus == NULL) {
+		err = -ENOMEM;
+		goto err_out;
+	}
+
+	bp->mii_bus->name = "MACB_mii_bus";
+	bp->mii_bus->read = &macb_mdio_read;
+	bp->mii_bus->write = &macb_mdio_write;
+	snprintf(bp->mii_bus->id, MII_BUS_ID_SIZE, "%s-%x",
+		bp->pdev->name, bp->pdev->id);
+	bp->mii_bus->priv = bp;
+	bp->mii_bus->parent = &bp->pdev->dev;
+	pdata = dev_get_platdata(&bp->pdev->dev);
+
+	bp->mii_bus->irq = kmalloc(sizeof(int)*PHY_MAX_ADDR, GFP_KERNEL);
+	if (!bp->mii_bus->irq) {
+		err = -ENOMEM;
+		goto err_out_free_mdiobus;
+	}
+
+	np = bp->pdev->dev.of_node;
+	if (np) {
+		/* try dt phy registration */
+		err = of_mdiobus_register(bp->mii_bus, np);
+
+		/* fallback to standard phy registration if no phy were
+		   found during dt phy registration */
+		if (!err && !phy_find_first(bp->mii_bus)) {
+			for (i = 0; i < PHY_MAX_ADDR; i++) {
+				struct phy_device *phydev;
+
+				phydev = mdiobus_scan(bp->mii_bus, i);
+				if (IS_ERR(phydev)) {
+					err = PTR_ERR(phydev);
+					break;
+				}
+			}
+
+			if (err)
+				goto err_out_unregister_bus;
+		}
+	} else {
+		for (i = 0; i < PHY_MAX_ADDR; i++)
+			bp->mii_bus->irq[i] = PHY_POLL;
+
+		if (pdata)
+			bp->mii_bus->phy_mask = pdata->phy_mask;
+
+		err = mdiobus_register(bp->mii_bus);
+	}
+
+	if (err)
+		goto err_out_free_mdio_irq;
+
+	err = macb_mii_probe(bp->dev);
+	if (err)
+		goto err_out_unregister_bus;
+
+	return 0;
+
+err_out_unregister_bus:
+	mdiobus_unregister(bp->mii_bus);
+err_out_free_mdio_irq:
+	kfree(bp->mii_bus->irq);
+err_out_free_mdiobus:
+	mdiobus_free(bp->mii_bus);
+err_out:
+	return err;
+}
+EXPORT_SYMBOL_GPL(rtmacb_mii_init);
+
+static void macb_update_stats(struct macb *bp)
+{
+	u32 __iomem *reg = bp->regs + MACB_PFR;
+	u32 *p = &bp->hw_stats.macb.rx_pause_frames;
+	u32 *end = &bp->hw_stats.macb.tx_pause_frames + 1;
+
+	WARN_ON((unsigned long)(end - p - 1) != (MACB_TPF - MACB_PFR) / 4);
+
+	for(; p < end; p++, reg++)
+		*p += __raw_readl(reg);
+}
+
+static int macb_halt_tx(struct macb *bp)
+{
+	unsigned long	halt_time, timeout;
+	u32		status;
+
+	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(THALT));
+
+	timeout = jiffies + usecs_to_jiffies(MACB_HALT_TIMEOUT);
+	do {
+		halt_time = jiffies;
+		status = macb_readl(bp, TSR);
+		if (!(status & MACB_BIT(TGO)))
+			return 0;
+
+		usleep_range(10, 250);
+	} while (time_before(halt_time, timeout));
+
+	return -ETIMEDOUT;
+}
+
+static void macb_tx_error_task(struct work_struct *work)
+{
+	struct macb	*bp = container_of(work, struct macb, tx_error_task);
+	struct macb_tx_skb	*tx_skb;
+	struct rtskb		*skb;
+	unsigned int		tail;
+
+	rtdev_vdbg(bp->dev, "macb_tx_error_task: t = %u, h = %u\n",
+		    bp->tx_tail, bp->tx_head);
+
+	/* Make sure nobody is trying to queue up new packets */
+	rtnetif_stop_queue(bp->dev);
+
+	/*
+	 * Stop transmission now
+	 * (in case we have just queued new packets)
+	 */
+	if (macb_halt_tx(bp))
+		/* Just complain for now, reinitializing TX path can be good */
+		rtdev_err(bp->dev, "BUG: halt tx timed out\n");
+
+	/* No need for the lock here as nobody will interrupt us anymore */
+
+	/*
+	 * Treat frames in TX queue including the ones that caused the error.
+	 * Free transmit buffers in upper layer.
+	 */
+	for (tail = bp->tx_tail; tail != bp->tx_head; tail++) {
+		struct macb_dma_desc	*desc;
+		u32			ctrl;
+
+		desc = macb_tx_desc(bp, tail);
+		ctrl = desc->ctrl;
+		tx_skb = macb_tx_skb(bp, tail);
+		skb = tx_skb->skb;
+
+		if (ctrl & MACB_BIT(TX_USED)) {
+			rtdev_vdbg(bp->dev, "txerr skb %u (data %p) TX complete\n",
+				    macb_tx_ring_wrap(tail), skb->data);
+			bp->stats.tx_packets++;
+			bp->stats.tx_bytes += skb->len;
+		} else {
+			/*
+			 * "Buffers exhausted mid-frame" errors may only happen
+			 * if the driver is buggy, so complain loudly about those.
+			 * Statistics are updated by hardware.
+			 */
+			if (ctrl & MACB_BIT(TX_BUF_EXHAUSTED))
+				rtdev_err(bp->dev,
+					   "BUG: TX buffers exhausted mid-frame\n");
+
+			desc->ctrl = ctrl | MACB_BIT(TX_USED);
+		}
+
+		dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+				 DMA_TO_DEVICE);
+		tx_skb->skb = NULL;
+		dev_kfree_rtskb(skb);
+	}
+
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	/* Reinitialize the TX desc queue */
+	macb_writel(bp, TBQP, bp->tx_ring_dma);
+	/* Make TX ring reflect state of hardware */
+	bp->tx_head = bp->tx_tail = 0;
+
+	/* Now we are ready to start transmission again */
+	rtnetif_wake_queue(bp->dev);
+
+	/* Housework before enabling TX IRQ */
+	macb_writel(bp, TSR, macb_readl(bp, TSR));
+	macb_writel(bp, IER, MACB_TX_INT_FLAGS);
+}
+
+static void macb_tx_interrupt(struct macb *bp)
+{
+	unsigned int tail;
+	unsigned int head;
+	u32 status;
+
+	status = macb_readl(bp, TSR);
+	macb_writel(bp, TSR, status);
+
+	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+		macb_writel(bp, ISR, MACB_BIT(TCOMP));
+
+	rtdev_vdbg(bp->dev, "macb_tx_interrupt status = 0x%03lx\n",
+		(unsigned long)status);
+
+	head = bp->tx_head;
+	for (tail = bp->tx_tail; tail != head; tail++) {
+		struct macb_tx_skb	*tx_skb;
+		struct rtskb		*skb;
+		struct macb_dma_desc	*desc;
+		u32			ctrl;
+
+		desc = macb_tx_desc(bp, tail);
+
+		/* Make hw descriptor updates visible to CPU */
+		rmb();
+
+		ctrl = desc->ctrl;
+
+		if (!(ctrl & MACB_BIT(TX_USED)))
+			break;
+
+		tx_skb = macb_tx_skb(bp, tail);
+		skb = tx_skb->skb;
+
+		rtdev_vdbg(bp->dev, "skb %u (data %p) TX complete\n",
+			macb_tx_ring_wrap(tail), skb->data);
+		dma_unmap_single(&bp->pdev->dev, tx_skb->mapping, skb->len,
+				 DMA_TO_DEVICE);
+		bp->stats.tx_packets++;
+		bp->stats.tx_bytes += skb->len;
+		tx_skb->skb = NULL;
+		dev_kfree_rtskb(skb);
+	}
+
+	bp->tx_tail = tail;
+	if (rtnetif_queue_stopped(bp->dev)
+			&& CIRC_CNT(bp->tx_head, bp->tx_tail,
+				    TX_RING_SIZE) <= MACB_TX_WAKEUP_THRESH)
+		rtnetif_wake_queue(bp->dev);
+}
+
+static void gem_rx_refill(struct macb *bp)
+{
+	unsigned int		entry;
+	struct rtskb		*skb;
+	dma_addr_t		paddr;
+
+	while (CIRC_SPACE(bp->rx_prepared_head, bp->rx_tail, RX_RING_SIZE) > 0) {
+		entry = macb_rx_ring_wrap(bp->rx_prepared_head);
+
+		/* Make hw descriptor updates visible to CPU */
+		rmb();
+
+		bp->rx_prepared_head++;
+
+		if (bp->rx_skbuff[entry] == NULL) {
+			/* allocate rtskb for this free entry in ring */
+			skb = rtnetdev_alloc_rtskb(bp->dev, bp->rx_buffer_size);
+			if (unlikely(skb == NULL)) {
+				rtdev_err(bp->dev,
+					   "Unable to allocate sk_buff\n");
+				break;
+			}
+
+			/* now fill corresponding descriptor entry */
+			paddr = dma_map_single(&bp->pdev->dev, skb->data,
+					       bp->rx_buffer_size, DMA_FROM_DEVICE);
+			if (dma_mapping_error(&bp->pdev->dev, paddr)) {
+				dev_kfree_rtskb(skb);
+				break;
+			}
+
+			bp->rx_skbuff[entry] = skb;
+
+			if (entry == RX_RING_SIZE - 1)
+				paddr |= MACB_BIT(RX_WRAP);
+			bp->rx_ring[entry].addr = paddr;
+			bp->rx_ring[entry].ctrl = 0;
+
+			/* properly align Ethernet header */
+			rtskb_reserve(skb, NET_IP_ALIGN);
+		}
+	}
+
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	rtdev_vdbg(bp->dev, "rx ring: prepared head %d, tail %d\n",
+		   bp->rx_prepared_head, bp->rx_tail);
+}
+
+/* Mark DMA descriptors from begin up to and not including end as unused */
+static void discard_partial_frame(struct macb *bp, unsigned int begin,
+				  unsigned int end)
+{
+	unsigned int frag;
+
+	for (frag = begin; frag != end; frag++) {
+		struct macb_dma_desc *desc = macb_rx_desc(bp, frag);
+		desc->addr &= ~MACB_BIT(RX_USED);
+	}
+
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	/*
+	 * When this happens, the hardware stats registers for
+	 * whatever caused this is updated, so we don't have to record
+	 * anything.
+	 */
+}
+
+static int gem_rx(struct macb *bp, int budget, nanosecs_abs_t *time_stamp)
+{
+	unsigned int		len;
+	unsigned int		entry;
+	struct rtskb		*skb;
+	struct macb_dma_desc	*desc;
+	int			count = 0, status;
+
+	status = macb_readl(bp, RSR);
+	macb_writel(bp, RSR, status);
+
+	if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+		macb_writel(bp, ISR, MACB_BIT(RCOMP));
+
+	while (count < budget) {
+		u32 addr, ctrl;
+
+		entry = macb_rx_ring_wrap(bp->rx_tail);
+		desc = &bp->rx_ring[entry];
+
+		/* Make hw descriptor updates visible to CPU */
+		rmb();
+
+		addr = desc->addr;
+		ctrl = desc->ctrl;
+
+		if (!(addr & MACB_BIT(RX_USED)))
+			break;
+
+		bp->rx_tail++;
+		count++;
+
+		if (!(ctrl & MACB_BIT(RX_SOF) && ctrl & MACB_BIT(RX_EOF))) {
+			rtdev_err(bp->dev,
+				   "not whole frame pointed by descriptor\n");
+			bp->stats.rx_dropped++;
+			break;
+		}
+		skb = bp->rx_skbuff[entry];
+		if (unlikely(!skb)) {
+			rtdev_err(bp->dev,
+				   "inconsistent Rx descriptor chain\n");
+			bp->stats.rx_dropped++;
+			break;
+		}
+		skb->time_stamp = *time_stamp;
+		/* now everything is ready for receiving packet */
+		bp->rx_skbuff[entry] = NULL;
+		len = MACB_BFEXT(RX_FRMLEN, ctrl);
+
+		rtdev_vdbg(bp->dev, "gem_rx %u (len %u)\n", entry, len);
+
+		rtskb_put(skb, len);
+		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, addr));
+		dma_unmap_single(&bp->pdev->dev, addr,
+				 bp->rx_buffer_size, DMA_FROM_DEVICE);
+
+		skb->protocol = rt_eth_type_trans(skb, bp->dev);
+
+		bp->stats.rx_packets++;
+		bp->stats.rx_bytes += skb->len;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+		rtdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+			    skb->len, skb->csum);
+		print_hex_dump(KERN_DEBUG, " mac: ", DUMP_PREFIX_ADDRESS, 16, 1,
+			       skb->mac_header, 16, true);
+		print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_ADDRESS, 16, 1,
+			       skb->data, 32, true);
+#endif
+
+		rtnetif_rx(skb);
+	}
+
+	gem_rx_refill(bp);
+
+	return count;
+}
+
+static int macb_rx_frame(struct macb *bp, unsigned int first_frag,
+			unsigned int last_frag, nanosecs_abs_t *time_stamp)
+{
+	unsigned int len;
+	unsigned int frag;
+	unsigned int offset;
+	struct rtskb *skb;
+	struct macb_dma_desc *desc;
+
+	desc = macb_rx_desc(bp, last_frag);
+	len = MACB_BFEXT(RX_FRMLEN, desc->ctrl);
+
+	rtdev_vdbg(bp->dev, "macb_rx_frame frags %u - %u (len %u)\n",
+		macb_rx_ring_wrap(first_frag),
+		macb_rx_ring_wrap(last_frag), len);
+
+	/*
+	 * The ethernet header starts NET_IP_ALIGN bytes into the
+	 * first buffer. Since the header is 14 bytes, this makes the
+	 * payload word-aligned.
+	 *
+	 * Instead of calling skb_reserve(NET_IP_ALIGN), we just copy
+	 * the two padding bytes into the skb so that we avoid hitting
+	 * the slowpath in memcpy(), and pull them off afterwards.
+	 */
+	skb = rtnetdev_alloc_rtskb(bp->dev, len + NET_IP_ALIGN);
+	if (!skb) {
+		rtdev_notice(bp->dev, "Low memory, packet dropped.\n");
+		bp->stats.rx_dropped++;
+		for (frag = first_frag; ; frag++) {
+			desc = macb_rx_desc(bp, frag);
+			desc->addr &= ~MACB_BIT(RX_USED);
+			if (frag == last_frag)
+				break;
+		}
+
+		/* Make descriptor updates visible to hardware */
+		wmb();
+
+		return 1;
+	}
+
+	offset = 0;
+	len += NET_IP_ALIGN;
+	skb->time_stamp = *time_stamp;
+	rtskb_put(skb, len);
+
+	for (frag = first_frag; ; frag++) {
+		unsigned int frag_len = bp->rx_buffer_size;
+
+		if (offset + frag_len > len) {
+			BUG_ON(frag != last_frag);
+			frag_len = len - offset;
+		}
+		memcpy(skb->data + offset, macb_rx_buffer(bp, frag), frag_len);
+		offset += bp->rx_buffer_size;
+		desc = macb_rx_desc(bp, frag);
+		desc->addr &= ~MACB_BIT(RX_USED);
+
+		if (frag == last_frag)
+			break;
+	}
+
+	/* Make descriptor updates visible to hardware */
+	wmb();
+
+	__rtskb_pull(skb, NET_IP_ALIGN);
+	skb->protocol = rt_eth_type_trans(skb, bp->dev);
+
+	bp->stats.rx_packets++;
+	bp->stats.rx_bytes += skb->len;
+	rtdev_vdbg(bp->dev, "received skb of length %u, csum: %08x\n",
+		   skb->len, skb->csum);
+	rtnetif_rx(skb);
+
+	return 0;
+}
+
+static int macb_rx(struct macb *bp, int budget, nanosecs_abs_t *time_stamp)
+{
+	int received = 0;
+	unsigned int tail;
+	int first_frag = -1;
+
+	for (tail = bp->rx_tail; budget > 0; tail++) {
+		struct macb_dma_desc *desc = macb_rx_desc(bp, tail);
+		u32 addr, ctrl;
+
+		/* Make hw descriptor updates visible to CPU */
+		rmb();
+
+		addr = desc->addr;
+		ctrl = desc->ctrl;
+
+		if (!(addr & MACB_BIT(RX_USED)))
+			break;
+
+		if (ctrl & MACB_BIT(RX_SOF)) {
+			if (first_frag != -1)
+				discard_partial_frame(bp, first_frag, tail);
+			first_frag = tail;
+		}
+
+		if (ctrl & MACB_BIT(RX_EOF)) {
+			int dropped;
+			BUG_ON(first_frag == -1);
+
+			dropped = macb_rx_frame(bp, first_frag, tail, time_stamp);
+			first_frag = -1;
+			if (!dropped) {
+				received++;
+				budget--;
+			}
+		}
+	}
+
+	if (first_frag != -1)
+		bp->rx_tail = first_frag;
+	else
+		bp->rx_tail = tail;
+
+	return received;
+}
+
+static int macb_interrupt(rtdm_irq_t *irq_handle)
+{
+	void *dev_id = rtdm_irq_get_arg(irq_handle, void);
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+	struct rtnet_device *dev = dev_id;
+	struct macb *bp = rtnetdev_priv(dev);
+	unsigned received = 0;
+	u32 status, ctrl;
+
+	status = macb_readl(bp, ISR);
+
+	if (unlikely(!status))
+		return RTDM_IRQ_NONE;
+
+	rtdm_lock_get(&bp->lock);
+
+	while (status) {
+		/* close possible race with dev_close */
+		if (unlikely(!rtnetif_running(dev))) {
+			macb_writel(bp, IDR, -1);
+			break;
+		}
+
+		rtdev_vdbg(bp->dev, "isr = 0x%08lx\n", (unsigned long)status);
+
+		if (status & MACB_BIT(RCOMP)) {
+			received += bp->macbgem_ops.mog_rx(bp, 100 - received,
+							&time_stamp);
+		}
+
+		if (unlikely(status & (MACB_TX_ERR_FLAGS))) {
+			macb_writel(bp, IDR, MACB_TX_INT_FLAGS);
+			rtdm_schedule_nrt_work(&bp->tx_error_task);
+
+			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+				macb_writel(bp, ISR, MACB_TX_ERR_FLAGS);
+
+			break;
+		}
+
+		if (status & MACB_BIT(TCOMP))
+			macb_tx_interrupt(bp);
+
+		/*
+		 * Link change detection isn't possible with RMII, so we'll
+		 * add that if/when we get our hands on a full-blown MII PHY.
+		 */
+
+		if (status & MACB_BIT(RXUBR)) {
+			ctrl = macb_readl(bp, NCR);
+			macb_writel(bp, NCR, ctrl & ~MACB_BIT(RE));
+			macb_writel(bp, NCR, ctrl | MACB_BIT(RE));
+
+			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+				macb_writel(bp, ISR, MACB_BIT(RXUBR));
+		}
+
+		if (status & MACB_BIT(ISR_ROVR)) {
+			/* We missed at least one packet */
+			if (macb_is_gem(bp))
+				bp->hw_stats.gem.rx_overruns++;
+			else
+				bp->hw_stats.macb.rx_overruns++;
+
+			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+				macb_writel(bp, ISR, MACB_BIT(ISR_ROVR));
+		}
+
+		if (status & MACB_BIT(HRESP)) {
+			/*
+			 * TODO: Reset the hardware, and maybe move the
+			 * rtdev_err to a lower-priority context as well
+			 * (work queue?)
+			 */
+			rtdev_err(dev, "DMA bus error: HRESP not OK\n");
+
+			if (bp->caps & MACB_CAPS_ISR_CLEAR_ON_WRITE)
+				macb_writel(bp, ISR, MACB_BIT(HRESP));
+		}
+
+		status = macb_readl(bp, ISR);
+	}
+
+	rtdm_lock_put(&bp->lock);
+
+	if (received)
+		rt_mark_stack_mgr(dev);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int macb_start_xmit(struct rtskb *skb, struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	dma_addr_t mapping;
+	unsigned int len, entry;
+	struct macb_dma_desc *desc;
+	struct macb_tx_skb *tx_skb;
+	u32 ctrl;
+	unsigned long flags;
+
+#if defined(DEBUG) && defined(VERBOSE_DEBUG)
+	rtdev_vdbg(bp->dev,
+		   "start_xmit: len %u head %p data %p tail %p end %p\n",
+		   skb->len, skb->head, skb->data,
+		   rtskb_tail_pointer(skb), rtskb_end_pointer(skb));
+	print_hex_dump(KERN_DEBUG, "data: ", DUMP_PREFIX_OFFSET, 16, 1,
+		       skb->data, 16, true);
+#endif
+
+	len = skb->len;
+	rtdm_lock_get_irqsave(&bp->lock, flags);
+
+	/* This is a hard error, log it. */
+	if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1) {
+		rtnetif_stop_queue(dev);
+		rtdm_lock_put_irqrestore(&bp->lock, flags);
+		rtdev_err(bp->dev, "BUG! Tx Ring full when queue awake!\n");
+		rtdev_dbg(bp->dev, "tx_head = %u, tx_tail = %u\n",
+			   bp->tx_head, bp->tx_tail);
+		return RTDEV_TX_BUSY;
+	}
+
+	entry = macb_tx_ring_wrap(bp->tx_head);
+	rtdev_vdbg(bp->dev, "Allocated ring entry %u\n", entry);
+	mapping = dma_map_single(&bp->pdev->dev, skb->data,
+				 len, DMA_TO_DEVICE);
+	if (dma_mapping_error(&bp->pdev->dev, mapping)) {
+		dev_kfree_rtskb(skb);
+		goto unlock;
+	}
+
+	bp->tx_head++;
+	tx_skb = &bp->tx_skb[entry];
+	tx_skb->skb = skb;
+	tx_skb->mapping = mapping;
+	rtdev_vdbg(bp->dev, "Mapped skb data %p to DMA addr %08lx\n",
+		   skb->data, (unsigned long)mapping);
+
+	ctrl = MACB_BF(TX_FRMLEN, len);
+	ctrl |= MACB_BIT(TX_LAST);
+	if (entry == (TX_RING_SIZE - 1))
+		ctrl |= MACB_BIT(TX_WRAP);
+
+	desc = &bp->tx_ring[entry];
+	desc->addr = mapping;
+	desc->ctrl = ctrl;
+
+	/* Make newly initialized descriptor visible to hardware */
+	wmb();
+
+	rtskb_tx_timestamp(skb);
+
+	macb_writel(bp, NCR, macb_readl(bp, NCR) | MACB_BIT(TSTART));
+
+	if (CIRC_SPACE(bp->tx_head, bp->tx_tail, TX_RING_SIZE) < 1)
+		rtnetif_stop_queue(dev);
+
+unlock:
+	rtdm_lock_put_irqrestore(&bp->lock, flags);
+
+	return RTDEV_TX_OK;
+}
+
+static void macb_init_rx_buffer_size(struct macb *bp, size_t size)
+{
+	if (!macb_is_gem(bp)) {
+		bp->rx_buffer_size = MACB_RX_BUFFER_SIZE;
+	} else {
+		bp->rx_buffer_size = size;
+
+		if (bp->rx_buffer_size % RX_BUFFER_MULTIPLE) {
+			rtdev_dbg(bp->dev,
+				    "RX buffer must be multiple of %d bytes, expanding\n",
+				    RX_BUFFER_MULTIPLE);
+			bp->rx_buffer_size =
+				roundup(bp->rx_buffer_size, RX_BUFFER_MULTIPLE);
+		}
+	}
+
+	rtdev_dbg(bp->dev, "mtu [%u] rx_buffer_size [%Zu]\n",
+		   bp->dev->mtu, bp->rx_buffer_size);
+}
+
+static void gem_free_rx_buffers(struct macb *bp)
+{
+	struct rtskb		*skb;
+	struct macb_dma_desc	*desc;
+	dma_addr_t		addr;
+	int i;
+
+	if (!bp->rx_skbuff)
+		return;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		skb = bp->rx_skbuff[i];
+
+		if (skb == NULL)
+			continue;
+
+		desc = &bp->rx_ring[i];
+		addr = MACB_BF(RX_WADDR, MACB_BFEXT(RX_WADDR, desc->addr));
+		dma_unmap_single(&bp->pdev->dev, addr, bp->rx_buffer_size,
+				 DMA_FROM_DEVICE);
+		dev_kfree_rtskb(skb);
+		skb = NULL;
+	}
+
+	kfree(bp->rx_skbuff);
+	bp->rx_skbuff = NULL;
+}
+
+static void macb_free_rx_buffers(struct macb *bp)
+{
+	if (bp->rx_buffers) {
+		dma_free_coherent(&bp->pdev->dev,
+				  RX_RING_SIZE * bp->rx_buffer_size,
+				  bp->rx_buffers, bp->rx_buffers_dma);
+		bp->rx_buffers = NULL;
+	}
+}
+
+static void macb_free_consistent(struct macb *bp)
+{
+	if (bp->tx_skb) {
+		kfree(bp->tx_skb);
+		bp->tx_skb = NULL;
+	}
+	bp->macbgem_ops.mog_free_rx_buffers(bp);
+	if (bp->rx_ring) {
+		dma_free_coherent(&bp->pdev->dev, RX_RING_BYTES,
+				  bp->rx_ring, bp->rx_ring_dma);
+		bp->rx_ring = NULL;
+	}
+	if (bp->tx_ring) {
+		dma_free_coherent(&bp->pdev->dev, TX_RING_BYTES,
+				  bp->tx_ring, bp->tx_ring_dma);
+		bp->tx_ring = NULL;
+	}
+}
+
+static int gem_alloc_rx_buffers(struct macb *bp)
+{
+	int size;
+
+	size = RX_RING_SIZE * sizeof(struct rtskb *);
+	bp->rx_skbuff = kzalloc(size, GFP_KERNEL);
+	if (!bp->rx_skbuff)
+		return -ENOMEM;
+	else
+		rtdev_dbg(bp->dev,
+			   "Allocated %d RX struct rtskb entries at %p\n",
+			   RX_RING_SIZE, bp->rx_skbuff);
+	return 0;
+}
+
+static int macb_alloc_rx_buffers(struct macb *bp)
+{
+	int size;
+
+	size = RX_RING_SIZE * bp->rx_buffer_size;
+	bp->rx_buffers = dma_alloc_coherent(&bp->pdev->dev, size,
+					    &bp->rx_buffers_dma, GFP_KERNEL);
+	if (!bp->rx_buffers)
+		return -ENOMEM;
+	else
+		rtdev_dbg(bp->dev,
+			   "Allocated RX buffers of %d bytes at %08lx (mapped %p)\n",
+			   size, (unsigned long)bp->rx_buffers_dma, bp->rx_buffers);
+	return 0;
+}
+
+static int macb_alloc_consistent(struct macb *bp)
+{
+	int size;
+
+	size = TX_RING_SIZE * sizeof(struct macb_tx_skb);
+	bp->tx_skb = kmalloc(size, GFP_KERNEL);
+	if (!bp->tx_skb)
+		goto out_err;
+
+	size = RX_RING_BYTES;
+	bp->rx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+					 &bp->rx_ring_dma, GFP_KERNEL);
+	if (!bp->rx_ring)
+		goto out_err;
+	rtdev_dbg(bp->dev,
+		   "Allocated RX ring of %d bytes at %08lx (mapped %p)\n",
+		   size, (unsigned long)bp->rx_ring_dma, bp->rx_ring);
+
+	size = TX_RING_BYTES;
+	bp->tx_ring = dma_alloc_coherent(&bp->pdev->dev, size,
+					 &bp->tx_ring_dma, GFP_KERNEL);
+	if (!bp->tx_ring)
+		goto out_err;
+	rtdev_dbg(bp->dev,
+		   "Allocated TX ring of %d bytes at %08lx (mapped %p)\n",
+		   size, (unsigned long)bp->tx_ring_dma, bp->tx_ring);
+
+	if (bp->macbgem_ops.mog_alloc_rx_buffers(bp))
+		goto out_err;
+
+	return 0;
+
+out_err:
+	macb_free_consistent(bp);
+	return -ENOMEM;
+}
+
+static void gem_init_rings(struct macb *bp)
+{
+	int i;
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		bp->tx_ring[i].addr = 0;
+		bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+	}
+	bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+	bp->rx_tail = bp->rx_prepared_head = bp->tx_head = bp->tx_tail = 0;
+
+	gem_rx_refill(bp);
+}
+
+static void macb_init_rings(struct macb *bp)
+{
+	int i;
+	dma_addr_t addr;
+
+	addr = bp->rx_buffers_dma;
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		bp->rx_ring[i].addr = addr;
+		bp->rx_ring[i].ctrl = 0;
+		addr += bp->rx_buffer_size;
+	}
+	bp->rx_ring[RX_RING_SIZE - 1].addr |= MACB_BIT(RX_WRAP);
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		bp->tx_ring[i].addr = 0;
+		bp->tx_ring[i].ctrl = MACB_BIT(TX_USED);
+	}
+	bp->tx_ring[TX_RING_SIZE - 1].ctrl |= MACB_BIT(TX_WRAP);
+
+	bp->rx_tail = bp->tx_head = bp->tx_tail = 0;
+}
+
+static void macb_reset_hw(struct macb *bp)
+{
+	/*
+	 * Disable RX and TX (XXX: Should we halt the transmission
+	 * more gracefully?)
+	 */
+	macb_writel(bp, NCR, 0);
+
+	/* Clear the stats registers (XXX: Update stats first?) */
+	macb_writel(bp, NCR, MACB_BIT(CLRSTAT));
+
+	/* Clear all status flags */
+	macb_writel(bp, TSR, -1);
+	macb_writel(bp, RSR, -1);
+
+	/* Disable all interrupts */
+	macb_writel(bp, IDR, -1);
+	macb_readl(bp, ISR);
+}
+
+static u32 gem_mdc_clk_div(struct macb *bp)
+{
+	u32 config;
+	unsigned long pclk_hz = clk_get_rate(bp->pclk);
+
+	if (pclk_hz <= 20000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV8);
+	else if (pclk_hz <= 40000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV16);
+	else if (pclk_hz <= 80000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV32);
+	else if (pclk_hz <= 120000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV48);
+	else if (pclk_hz <= 160000000)
+		config = GEM_BF(CLK, GEM_CLK_DIV64);
+	else
+		config = GEM_BF(CLK, GEM_CLK_DIV96);
+
+	return config;
+}
+
+static u32 macb_mdc_clk_div(struct macb *bp)
+{
+	u32 config;
+	unsigned long pclk_hz;
+
+	if (macb_is_gem(bp))
+		return gem_mdc_clk_div(bp);
+
+	pclk_hz = clk_get_rate(bp->pclk);
+	if (pclk_hz <= 20000000)
+		config = MACB_BF(CLK, MACB_CLK_DIV8);
+	else if (pclk_hz <= 40000000)
+		config = MACB_BF(CLK, MACB_CLK_DIV16);
+	else if (pclk_hz <= 80000000)
+		config = MACB_BF(CLK, MACB_CLK_DIV32);
+	else
+		config = MACB_BF(CLK, MACB_CLK_DIV64);
+
+	return config;
+}
+
+/*
+ * Get the DMA bus width field of the network configuration register that we
+ * should program.  We find the width from decoding the design configuration
+ * register to find the maximum supported data bus width.
+ */
+static u32 macb_dbw(struct macb *bp)
+{
+	if (!macb_is_gem(bp))
+		return 0;
+
+	switch (GEM_BFEXT(DBWDEF, gem_readl(bp, DCFG1))) {
+	case 4:
+		return GEM_BF(DBW, GEM_DBW128);
+	case 2:
+		return GEM_BF(DBW, GEM_DBW64);
+	case 1:
+	default:
+		return GEM_BF(DBW, GEM_DBW32);
+	}
+}
+
+/*
+ * Configure the receive DMA engine
+ * - use the correct receive buffer size
+ * - set the possibility to use INCR16 bursts
+ *   (if not supported by FIFO, it will fallback to default)
+ * - set both rx/tx packet buffers to full memory size
+ * These are configurable parameters for GEM.
+ */
+static void macb_configure_dma(struct macb *bp)
+{
+	u32 dmacfg;
+
+	if (macb_is_gem(bp)) {
+		dmacfg = gem_readl(bp, DMACFG) & ~GEM_BF(RXBS, -1L);
+		dmacfg |= GEM_BF(RXBS, bp->rx_buffer_size / RX_BUFFER_MULTIPLE);
+		dmacfg |= GEM_BF(FBLDO, 16);
+		dmacfg |= GEM_BIT(TXPBMS) | GEM_BF(RXBMS, -1L);
+		dmacfg &= ~GEM_BIT(ENDIA);
+		gem_writel(bp, DMACFG, dmacfg);
+	}
+}
+
+/*
+ * Configure peripheral capacities according to integration options used
+ */
+static void macb_configure_caps(struct macb *bp)
+{
+	if (macb_is_gem(bp)) {
+		if (GEM_BFEXT(IRQCOR, gem_readl(bp, DCFG1)) == 0)
+			bp->caps |= MACB_CAPS_ISR_CLEAR_ON_WRITE;
+	}
+	rtdev_vdbg(bp->dev, "Capabilities : %X\n", bp->caps);
+}
+
+static void macb_init_hw(struct macb *bp)
+{
+	u32 config;
+
+	macb_reset_hw(bp);
+	rtmacb_set_hwaddr(bp);
+
+	config = macb_mdc_clk_div(bp);
+	config |= MACB_BF(RBOF, NET_IP_ALIGN);	/* Make eth data aligned */
+	config |= MACB_BIT(PAE);		/* PAuse Enable */
+	config |= MACB_BIT(DRFCS);		/* Discard Rx FCS */
+	if (bp->dev->flags & IFF_PROMISC)
+		config |= MACB_BIT(CAF);	/* Copy All Frames */
+	if (!(bp->dev->flags & IFF_BROADCAST))
+		config |= MACB_BIT(NBC);	/* No BroadCast */
+	config |= macb_dbw(bp);
+	macb_writel(bp, NCFGR, config);
+	bp->speed = SPEED_10;
+	bp->duplex = DUPLEX_HALF;
+
+	macb_configure_dma(bp);
+	macb_configure_caps(bp);
+
+	/* Initialize TX and RX buffers */
+	macb_writel(bp, RBQP, bp->rx_ring_dma);
+	macb_writel(bp, TBQP, bp->tx_ring_dma);
+
+	/* Enable TX and RX */
+	macb_writel(bp, NCR, MACB_BIT(RE) | MACB_BIT(TE) | MACB_BIT(MPE));
+
+	/* Enable interrupts */
+	macb_writel(bp, IER, (MACB_RX_INT_FLAGS
+			      | MACB_TX_INT_FLAGS
+			      | MACB_BIT(HRESP)));
+
+}
+
+static int macb_open(struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	size_t bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + NET_IP_ALIGN;
+	int err;
+
+	rt_stack_connect(dev, &STACK_manager);
+
+	rtdev_dbg(bp->dev, "open\n");
+
+	/* carrier starts down */
+	rtnetif_carrier_off(dev);
+
+	/* if the phy is not yet register, retry later*/
+	if (!bp->phy_dev)
+		return -EAGAIN;
+
+	/* RX buffers initialization */
+	macb_init_rx_buffer_size(bp, bufsz);
+
+	err = macb_alloc_consistent(bp);
+	if (err) {
+		rtdev_err(dev, "Unable to allocate DMA memory (error %d)\n",
+			   err);
+		return err;
+	}
+
+	bp->macbgem_ops.mog_init_rings(bp);
+	macb_init_hw(bp);
+
+	/* schedule a link state check */
+	phy_start(bp->phy_dev);
+
+	rtnetif_start_queue(dev);
+
+	return 0;
+}
+
+static int macb_close(struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	unsigned long flags;
+
+	rtnetif_stop_queue(dev);
+
+	if (bp->phy_dev)
+		phy_stop(bp->phy_dev);
+
+	rtdm_lock_get_irqsave(&bp->lock, flags);
+	macb_reset_hw(bp);
+	rtnetif_carrier_off(dev);
+	rtdm_lock_put_irqrestore(&bp->lock, flags);
+
+	macb_free_consistent(bp);
+
+	rt_stack_disconnect(dev);
+
+	return 0;
+}
+
+static void gem_update_stats(struct macb *bp)
+{
+	u32 __iomem *reg = bp->regs + GEM_OTX;
+	u32 *p = &bp->hw_stats.gem.tx_octets_31_0;
+	u32 *end = &bp->hw_stats.gem.rx_udp_checksum_errors + 1;
+
+	for (; p < end; p++, reg++)
+		*p += __raw_readl(reg);
+}
+
+static struct net_device_stats *gem_get_stats(struct macb *bp)
+{
+	struct gem_stats *hwstat = &bp->hw_stats.gem;
+	struct net_device_stats *nstat = &bp->stats;
+
+	gem_update_stats(bp);
+
+	nstat->rx_errors = (hwstat->rx_frame_check_sequence_errors +
+			    hwstat->rx_alignment_errors +
+			    hwstat->rx_resource_errors +
+			    hwstat->rx_overruns +
+			    hwstat->rx_oversize_frames +
+			    hwstat->rx_jabbers +
+			    hwstat->rx_undersized_frames +
+			    hwstat->rx_length_field_frame_errors);
+	nstat->tx_errors = (hwstat->tx_late_collisions +
+			    hwstat->tx_excessive_collisions +
+			    hwstat->tx_underrun +
+			    hwstat->tx_carrier_sense_errors);
+	nstat->multicast = hwstat->rx_multicast_frames;
+	nstat->collisions = (hwstat->tx_single_collision_frames +
+			     hwstat->tx_multiple_collision_frames +
+			     hwstat->tx_excessive_collisions);
+	nstat->rx_length_errors = (hwstat->rx_oversize_frames +
+				   hwstat->rx_jabbers +
+				   hwstat->rx_undersized_frames +
+				   hwstat->rx_length_field_frame_errors);
+	nstat->rx_over_errors = hwstat->rx_resource_errors;
+	nstat->rx_crc_errors = hwstat->rx_frame_check_sequence_errors;
+	nstat->rx_frame_errors = hwstat->rx_alignment_errors;
+	nstat->rx_fifo_errors = hwstat->rx_overruns;
+	nstat->tx_aborted_errors = hwstat->tx_excessive_collisions;
+	nstat->tx_carrier_errors = hwstat->tx_carrier_sense_errors;
+	nstat->tx_fifo_errors = hwstat->tx_underrun;
+
+	return nstat;
+}
+
+struct net_device_stats *rtmacb_get_stats(struct rtnet_device *dev)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	struct net_device_stats *nstat = &bp->stats;
+	struct macb_stats *hwstat = &bp->hw_stats.macb;
+
+	if (macb_is_gem(bp))
+		return gem_get_stats(bp);
+
+	/* read stats from hardware */
+	macb_update_stats(bp);
+
+	/* Convert HW stats into netdevice stats */
+	nstat->rx_errors = (hwstat->rx_fcs_errors +
+			    hwstat->rx_align_errors +
+			    hwstat->rx_resource_errors +
+			    hwstat->rx_overruns +
+			    hwstat->rx_oversize_pkts +
+			    hwstat->rx_jabbers +
+			    hwstat->rx_undersize_pkts +
+			    hwstat->sqe_test_errors +
+			    hwstat->rx_length_mismatch);
+	nstat->tx_errors = (hwstat->tx_late_cols +
+			    hwstat->tx_excessive_cols +
+			    hwstat->tx_underruns +
+			    hwstat->tx_carrier_errors);
+	nstat->collisions = (hwstat->tx_single_cols +
+			     hwstat->tx_multiple_cols +
+			     hwstat->tx_excessive_cols);
+	nstat->rx_length_errors = (hwstat->rx_oversize_pkts +
+				   hwstat->rx_jabbers +
+				   hwstat->rx_undersize_pkts +
+				   hwstat->rx_length_mismatch);
+	nstat->rx_over_errors = hwstat->rx_resource_errors +
+				   hwstat->rx_overruns;
+	nstat->rx_crc_errors = hwstat->rx_fcs_errors;
+	nstat->rx_frame_errors = hwstat->rx_align_errors;
+	nstat->rx_fifo_errors = hwstat->rx_overruns;
+	/* XXX: What does "missed" mean? */
+	nstat->tx_aborted_errors = hwstat->tx_excessive_cols;
+	nstat->tx_carrier_errors = hwstat->tx_carrier_errors;
+	nstat->tx_fifo_errors = hwstat->tx_underruns;
+	/* Don't know about heartbeat or window errors... */
+
+	return nstat;
+}
+EXPORT_SYMBOL_GPL(rtmacb_get_stats);
+
+int rtmacb_ioctl(struct rtnet_device *dev, unsigned cmd, void *rq)
+{
+	struct macb *bp = rtnetdev_priv(dev);
+	struct phy_device *phydev = bp->phy_dev;
+
+	if (!rtnetif_running(dev))
+		return -EINVAL;
+
+	if (!phydev)
+		return -ENODEV;
+
+	return phy_mii_ioctl(phydev, rq, cmd);
+}
+EXPORT_SYMBOL_GPL(rtmacb_ioctl);
+
+#if defined(CONFIG_OF)
+static const struct of_device_id macb_dt_ids[] = {
+	{ .compatible = "cdns,at32ap7000-macb" },
+	{ .compatible = "cdns,at91sam9260-macb" },
+	{ .compatible = "cdns,macb" },
+	{ .compatible = "cdns,pc302-gem" },
+	{ .compatible = "cdns,gem" },
+	{ .compatible = "atmel,sama5d3-gem" },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, macb_dt_ids);
+#endif
+
+static int __init macb_probe(struct platform_device *pdev)
+{
+	struct macb_platform_data *pdata;
+	struct resource *regs;
+	struct rtnet_device *dev;
+	struct macb *bp;
+	struct phy_device *phydev;
+	u32 config;
+	int err = -ENXIO;
+	struct pinctrl *pinctrl;
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5, 13, 0)
+	const char *mac;
+#endif
+
+	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!regs) {
+		dev_err(&pdev->dev, "no mmio resource defined\n");
+		goto err_out;
+	}
+
+	pinctrl = devm_pinctrl_get_select_default(&pdev->dev);
+	if (IS_ERR(pinctrl)) {
+		err = PTR_ERR(pinctrl);
+		if (err == -EPROBE_DEFER)
+			goto err_out;
+
+		dev_warn(&pdev->dev, "No pinctrl provided\n");
+	}
+
+	err = -ENOMEM;
+	dev = rt_alloc_etherdev(sizeof(*bp), RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (!dev)
+		goto err_out;
+
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+
+	/* TODO: Actually, we have some interesting features... */
+	dev->features |= 0;
+
+	bp = rtnetdev_priv(dev);
+	bp->pdev = pdev;
+	bp->dev = dev;
+
+	rtdm_lock_init(&bp->lock);
+	INIT_WORK(&bp->tx_error_task, macb_tx_error_task);
+
+	bp->pclk = devm_clk_get(&pdev->dev, "pclk");
+	if (IS_ERR(bp->pclk)) {
+		err = PTR_ERR(bp->pclk);
+		dev_err(&pdev->dev, "failed to get macb_clk (%u)\n", err);
+		goto err_out_free_dev;
+	}
+
+	bp->hclk = devm_clk_get(&pdev->dev, "hclk");
+	if (IS_ERR(bp->hclk)) {
+		err = PTR_ERR(bp->hclk);
+		dev_err(&pdev->dev, "failed to get hclk (%u)\n", err);
+		goto err_out_free_dev;
+	}
+
+	bp->tx_clk = devm_clk_get(&pdev->dev, "tx_clk");
+
+	err = clk_prepare_enable(bp->pclk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable pclk (%u)\n", err);
+		goto err_out_free_dev;
+	}
+
+	err = clk_prepare_enable(bp->hclk);
+	if (err) {
+		dev_err(&pdev->dev, "failed to enable hclk (%u)\n", err);
+		goto err_out_disable_pclk;
+	}
+
+	if (!IS_ERR(bp->tx_clk)) {
+		err = clk_prepare_enable(bp->tx_clk);
+		if (err) {
+			dev_err(&pdev->dev, "failed to enable tx_clk (%u)\n",
+					err);
+			goto err_out_disable_hclk;
+		}
+	}
+
+	bp->regs = devm_ioremap(&pdev->dev, regs->start, resource_size(regs));
+	if (!bp->regs) {
+		dev_err(&pdev->dev, "failed to map registers, aborting.\n");
+		err = -ENOMEM;
+		goto err_out_disable_clocks;
+	}
+
+	dev->irq = platform_get_irq(pdev, 0);
+	rt_stack_connect(dev, &STACK_manager);
+
+	err = rtdm_irq_request(&bp->irq_handle, dev->irq, macb_interrupt, 0,
+			dev->name, dev);
+	if (err) {
+		dev_err(&pdev->dev, "Unable to request IRQ %d (error %d)\n",
+			dev->irq, err);
+		goto err_out_disable_clocks;
+	}
+
+	dev->open = macb_open;
+	dev->stop = macb_close;
+	dev->hard_start_xmit = macb_start_xmit;
+	dev->do_ioctl = rtmacb_ioctl;
+	dev->get_stats = rtmacb_get_stats;
+
+	dev->base_addr = regs->start;
+
+	/* setup appropriated routines according to adapter type */
+	if (macb_is_gem(bp)) {
+		bp->macbgem_ops.mog_alloc_rx_buffers = gem_alloc_rx_buffers;
+		bp->macbgem_ops.mog_free_rx_buffers = gem_free_rx_buffers;
+		bp->macbgem_ops.mog_init_rings = gem_init_rings;
+		bp->macbgem_ops.mog_rx = gem_rx;
+	} else {
+		bp->macbgem_ops.mog_alloc_rx_buffers = macb_alloc_rx_buffers;
+		bp->macbgem_ops.mog_free_rx_buffers = macb_free_rx_buffers;
+		bp->macbgem_ops.mog_init_rings = macb_init_rings;
+		bp->macbgem_ops.mog_rx = macb_rx;
+	}
+
+	/* Set MII management clock divider */
+	config = macb_mdc_clk_div(bp);
+	config |= macb_dbw(bp);
+	macb_writel(bp, NCFGR, config);
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5, 13, 0)
+	err = of_get_mac_address(pdev->dev.of_node, bp->dev->dev_addr);
+	if (err)
+#else
+	mac = of_get_mac_address(pdev->dev.of_node);
+	if (mac)
+		memcpy(bp->dev->dev_addr, mac, ETH_ALEN);
+	else
+#endif
+		rtmacb_get_hwaddr(bp);
+
+	err = of_get_phy_mode(pdev->dev.of_node);
+	if (err < 0) {
+		pdata = dev_get_platdata(&pdev->dev);
+		if (pdata && pdata->is_rmii)
+			bp->phy_interface = PHY_INTERFACE_MODE_RMII;
+		else
+			bp->phy_interface = PHY_INTERFACE_MODE_MII;
+	} else {
+		bp->phy_interface = err;
+	}
+
+	if (bp->phy_interface == PHY_INTERFACE_MODE_RGMII)
+		macb_or_gem_writel(bp, USRIO, GEM_BIT(RGMII));
+	else if (bp->phy_interface == PHY_INTERFACE_MODE_RMII)
+#if defined(CONFIG_ARCH_AT91)
+		macb_or_gem_writel(bp, USRIO, (MACB_BIT(RMII) |
+					       MACB_BIT(CLKEN)));
+#else
+		macb_or_gem_writel(bp, USRIO, 0);
+#endif
+	else
+#if defined(CONFIG_ARCH_AT91)
+		macb_or_gem_writel(bp, USRIO, MACB_BIT(CLKEN));
+#else
+		macb_or_gem_writel(bp, USRIO, MACB_BIT(MII));
+#endif
+
+	err = rt_register_rtnetdev(dev);
+	if (err) {
+		dev_err(&pdev->dev, "Cannot register net device, aborting.\n");
+		goto err_out_irq_free;
+	}
+
+	err = rtmacb_mii_init(bp);
+	if (err)
+		goto err_out_unregister_netdev;
+
+	platform_set_drvdata(pdev, dev);
+
+	rtnetif_carrier_off(dev);
+
+	rtdev_info(dev, "Cadence %s at 0x%08lx irq %d (%pM)\n",
+		    macb_is_gem(bp) ? "GEM" : "MACB", dev->base_addr,
+		    dev->irq, dev->dev_addr);
+
+	phydev = bp->phy_dev;
+	rtdev_info(dev, "attached PHY driver [%s] (mii_bus:phy_addr=%s, irq=%d)\n",
+		    phydev->drv->name, dev_name(&phydev->dev), phydev->irq);
+
+	return 0;
+
+err_out_unregister_netdev:
+	rt_unregister_rtnetdev(dev);
+err_out_irq_free:
+	rtdm_irq_free(&bp->irq_handle);
+err_out_disable_clocks:
+	if (!IS_ERR(bp->tx_clk))
+		clk_disable_unprepare(bp->tx_clk);
+err_out_disable_hclk:
+	clk_disable_unprepare(bp->hclk);
+err_out_disable_pclk:
+	clk_disable_unprepare(bp->pclk);
+err_out_free_dev:
+	rtdev_free(dev);
+err_out:
+	return err;
+}
+
+static int __exit macb_remove(struct platform_device *pdev)
+{
+	struct rtnet_device *dev;
+	struct macb *bp;
+
+	dev = platform_get_drvdata(pdev);
+
+	if (dev) {
+		bp = rtnetdev_priv(dev);
+		if (bp->phy_dev)
+			phy_disconnect(bp->phy_dev);
+		mdiobus_unregister(bp->mii_bus);
+		if (bp->phy_phony_net_device)
+			free_netdev(bp->phy_phony_net_device);
+		kfree(bp->mii_bus->irq);
+		rt_rtdev_disconnect(dev);
+		rtdm_irq_free(&bp->irq_handle);
+		mdiobus_free(bp->mii_bus);
+		rt_unregister_rtnetdev(dev);
+		if (!IS_ERR(bp->tx_clk))
+			clk_disable_unprepare(bp->tx_clk);
+		clk_disable_unprepare(bp->hclk);
+		clk_disable_unprepare(bp->pclk);
+		rtdev_free(dev);
+	}
+
+	return 0;
+}
+
+static struct platform_driver macb_driver = {
+	.remove		= __exit_p(macb_remove),
+	.driver		= {
+		.name		= "macb",
+		.owner	= THIS_MODULE,
+		.of_match_table	= of_match_ptr(macb_dt_ids),
+	},
+};
+
+static bool found;
+static int __init macb_driver_init(void)
+{
+	found = platform_driver_probe(&macb_driver, macb_probe) == 0;
+	return 0;
+}
+module_init(macb_driver_init);
+
+static void __exit macb_driver_exit(void)
+{
+	if (found)
+		platform_driver_unregister(&macb_driver);
+}
+module_exit(macb_driver_exit);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("Cadence MACB/GEM Ethernet driver");
+MODULE_AUTHOR("Haavard Skinnemoen (Atmel)");
+MODULE_ALIAS("platform:macb");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile
new file mode 100644
index 0000000..4edf7ad
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_MPC52XX_FEC) += rt_mpc52xx_fec.o
+
+rt_mpc52xx_fec-y := mpc52xx_fec.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/README b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/README
new file mode 100644
index 0000000..17bc72d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/README
@@ -0,0 +1,67 @@
+This is the RTnet driver for the MPC 52xx FEC for the Linux kernel
+2.4.25 (DENX linuxppc_2_4_devel tree). Unfortunately, the kernel
+requires a small patch to permit kernel modules accessing BESTCOMM
+functions (the FEC driver is not supported as module). The patch
+linuxppc_2_4_devel-fec.patch is available in this directory.
+
+Apply the FEC module patch mentioned above and then prepare and
+build the Linux kernel and Xenomai as usual:
+
+  $ export CROSS_COMPILE=ppc_6xx-
+  $ export QUILT_PATCHES=quilt_patches
+  $ export DESTDIR=/opt/eldk/ppc_6xx
+
+  $ cd linuxppc_2_4_devel
+  $ export KERNELDIR=$PWD
+  $ patch -p1 < <path>/linuxppc_2_4_devel-fec.patch
+  $ make TQM5200_config
+
+  $ cd ../xenomai
+  $ export XENODIR=$PWD
+  $ ./scripts/prepare-kernel.sh --linux=../linuxppc_2_4_devel --arch=ppc
+
+  $ cd $KERNELDIR
+  $ make menuconfig
+    ... check loadable module support ...
+    ... exit and save default configuration ...
+  $ make dep
+  $ make uImage
+  $ cp -pv arch/ppc/boot/images/uImage /tftpboot
+
+  $ $XENODIR
+  $ ./configure --host=ppc-linux --prefix=/usr/xenomai
+  $ make
+  $ make install
+
+This is the build process for the TQM5200-Board using the ELDK 3.1.1.
+
+The file rt_mpc52xx_fec.h contains a few configuration option.
+Please customize them according to your needs (according to your
+standard Linux kernel configuration):
+
+  $ cat rt_mpc52xx_fec.h
+  ...
+  /* Define board specific options */
+  #define CONFIG_RTNET_USE_MDIO
+  #define CONFIG_RTNET_FEC_GENERIC_PHY
+  #define CONFIG_RTNET_FEC_LXT971
+  #undef CONFIG_RTNET_FEC_DP83847
+
+Then configure and cross compile RTnet as shown:
+
+  $ ./configure --host=ppc-linux --with-linux=$KERNELDIR \
+    --with-rtext-config=$DESTDIR/usr/xenomai/bin/xeno-config \
+    --disable-e1000 --disable-8139  --disable-8139too \
+    --enable-mpc52xx-fec --enable-eepro100 \
+    --prefix=/usr/xenomai --enable-proxy
+  $ make
+  $ make install
+
+Note that RTnet gets installed into $DESTDIR/usr/xenomai, including
+the kernel modules.
+
+Also be aware that the MPC52xx has only one on-chip ethernet port.
+Driver development using a ram-disk based system might be cumbersome.
+It is convenient to use a supported PCI ethernet card, e.g. an
+EEPRO100, and to mount the root file-system via NFS.
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c
new file mode 100644
index 0000000..5e8f16d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/mpc52xx_fec.c
@@ -0,0 +1,1985 @@
+/*
+ * arch/ppc/5xxx_io/fec.c
+ *
+ * Driver for the MPC5200 Fast Ethernet Controller
+ * Support for MPC5100 FEC has been removed, contact the author if you need it
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * 2003 (c) MontaVista, Software, Inc.  This file is licensed under the terms
+ * of the GNU General Public License version 2.  This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ *
+ * Ported to RTnet from "linuxppc_2_4_devel/arch/ppc/5xxx_io/fec.c".
+ * Copyright (c) 2008 Wolfgang Grandegger <wg@denx.de>
+ */
+
+/* #define PARANOID_CHECKS*/
+/* #define MUST_ALIGN_TRANSMIT_DATA*/
+#define MUST_UNALIGN_RECEIVE_DATA
+/* #define EXIT_ISR_AT_MEMORY_SQUEEZE*/
+/* #define DISPLAY_WARNINGS*/
+
+#ifdef ORIGINAL_CODE
+static const char *version = "fec.c v0.2\n";
+#endif /* ORIGINAL_CODE */
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/types.h>
+#include <linux/fcntl.h>
+#include <linux/interrupt.h>
+#include <linux/ptrace.h>
+#include <linux/ioport.h>
+#include <linux/in.h>
+#include <linux/slab.h>
+#include <linux/string.h>
+#include <asm/system.h>
+#include <asm/bitops.h>
+#include <linux/spinlock.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/crc32.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/skbuff.h>
+#include <asm/delay.h>
+#include <rtnet_port.h>
+#include "rt_mpc52xx_fec.h"
+#ifdef CONFIG_UBOOT
+#include <asm/ppcboot.h>
+#endif
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FASTROUTE
+#error "Fast Routing on MPC5200 ethernet not supported"
+#endif
+
+MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTnet driver for MPC52xx FEC");
+MODULE_LICENSE("GPL");
+
+static unsigned int rx_pool_size =  0;
+MODULE_PARM(rx_pool_size, "i");
+MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size");
+
+#define printk(fmt,args...)	rtdm_printk (fmt ,##args)
+
+static struct rtnet_device *mpc5xxx_fec_dev;
+static int mpc5xxx_fec_interrupt(rtdm_irq_t *irq_handle);
+static int mpc5xxx_fec_receive_interrupt(rtdm_irq_t *irq_handle);
+static int mpc5xxx_fec_transmit_interrupt(rtdm_irq_t *irq_handle);
+static struct net_device_stats *mpc5xxx_fec_get_stats(struct rtnet_device *dev);
+#ifdef ORIGINAL_CODE
+static void mpc5xxx_fec_set_multicast_list(struct rtnet_device *dev);
+#endif /* ORIGINAL_CODE */
+static void mpc5xxx_fec_reinit(struct rtnet_device* dev);
+static int mpc5xxx_fec_setup(struct rtnet_device *dev, int reinit);
+static int mpc5xxx_fec_cleanup(struct rtnet_device *dev, int reinit);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static void mpc5xxx_fec_mii(struct rtnet_device *dev);
+#ifdef ORIGINAL_CODE
+static int mpc5xxx_fec_ioctl(struct rtnet_device *, struct ifreq *rq, int cmd);
+static int mpc5xxx_netdev_ethtool_ioctl(struct rtnet_device *dev, void *useraddr);
+#endif /* ORIGINAL_CODE */
+static void mdio_timer_callback(unsigned long data);
+static void mii_display_status(struct rtnet_device *dev);
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET
+static void mpc5xxx_mdio_callback(uint regval, struct rtnet_device *dev, uint data);
+static int mpc5xxx_mdio_read(struct rtnet_device *dev, int phy_id, int location);
+#endif
+
+static void mpc5xxx_fec_update_stat(struct rtnet_device *);
+
+/* MII processing.  We keep this as simple as possible.  Requests are
+ * placed on the list (if there is room).  When the request is finished
+ * by the MII, an optional function may be called.
+ */
+typedef struct mii_list {
+	uint    mii_regval;
+	void    (*mii_func)(uint val, struct rtnet_device *dev, uint data);
+	struct  mii_list *mii_next;
+	uint    mii_data;
+} mii_list_t;
+
+#define		NMII	20
+mii_list_t      mii_cmds[NMII];
+mii_list_t      *mii_free;
+mii_list_t      *mii_head;
+mii_list_t      *mii_tail;
+
+typedef struct mdio_read_data {
+	u16 regval;
+	struct task_struct *sleeping_task;
+} mdio_read_data_t;
+
+static int mii_queue(struct rtnet_device *dev, int request,
+		void (*func)(uint, struct rtnet_device *, uint), uint data);
+
+/* Make MII read/write commands for the FEC.
+ * */
+#define mk_mii_read(REG)	(0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL)	(0x50020000 | ((REG & 0x1f) << 18) | \
+							(VAL & 0xffff))
+#define mk_mii_end	0
+
+/* Register definitions for the PHY.
+*/
+
+#define MII_REG_CR	 0	/* Control Register */
+#define MII_REG_SR	 1	/* Status Register */
+#define MII_REG_PHYIR1	 2	/* PHY Identification Register 1 */
+#define MII_REG_PHYIR2	 3	/* PHY Identification Register 2 */
+#define MII_REG_ANAR	 4	/* A-N Advertisement Register */
+#define MII_REG_ANLPAR	 5	/* A-N Link Partner Ability Register */
+#define MII_REG_ANER	 6	/* A-N Expansion Register */
+#define MII_REG_ANNPTR	 7	/* A-N Next Page Transmit Register */
+#define MII_REG_ANLPRNPR 8	/* A-N Link Partner Received Next Page Reg. */
+
+/* values for phy_status */
+
+#define PHY_CONF_ANE	0x0001	/* 1 auto-negotiation enabled */
+#define PHY_CONF_LOOP	0x0002	/* 1 loopback mode enabled */
+#define PHY_CONF_SPMASK	0x00f0	/* mask for speed */
+#define PHY_CONF_10HDX	0x0010	/* 10 Mbit half duplex supported */
+#define PHY_CONF_10FDX	0x0020	/* 10 Mbit full duplex supported */
+#define PHY_CONF_100HDX	0x0040	/* 100 Mbit half duplex supported */
+#define PHY_CONF_100FDX	0x0080	/* 100 Mbit full duplex supported */
+
+#define PHY_STAT_LINK	0x0100	/* 1 up - 0 down */
+#define PHY_STAT_FAULT	0x0200	/* 1 remote fault */
+#define PHY_STAT_ANC	0x0400	/* 1 auto-negotiation complete	*/
+#define PHY_STAT_SPMASK	0xf000	/* mask for speed */
+#define PHY_STAT_10HDX	0x1000	/* 10 Mbit half duplex selected	*/
+#define PHY_STAT_10FDX	0x2000	/* 10 Mbit full duplex selected	*/
+#define PHY_STAT_100HDX	0x4000	/* 100 Mbit half duplex selected */
+#define PHY_STAT_100FDX	0x8000	/* 100 Mbit full duplex selected */
+
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+u8 mpc5xxx_fec_mac_addr[6];
+u8 null_mac[6];
+
+#ifdef ORIGINAL_CODE
+static void mpc5xxx_fec_tx_timeout(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+
+	priv->stats.tx_errors++;
+
+	if (!priv->tx_full)
+		rtnetif_wake_queue(dev);
+}
+#endif /* ORIGINAL_CODE */
+
+static void
+mpc5xxx_fec_set_paddr(struct rtnet_device *dev, u8 *mac)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+
+	out_be32(&fec->paddr1, (mac[0]<<24) | (mac[1]<<16)
+			| (mac[2]<<8) | (mac[3]<<0));
+	out_be32(&fec->paddr2, (mac[4]<<24) | (mac[5]<<16) | 0x8808);
+}
+
+#ifdef ORIGINAL_CODE
+static int
+mpc5xxx_fec_set_mac_address(struct rtnet_device *dev, void *addr)
+{
+	struct sockaddr *sock = (struct sockaddr *)addr;
+
+	mpc5xxx_fec_set_paddr(dev, sock->sa_data);
+	return 0;
+}
+#endif /* ORIGINAL_CODE */
+
+/* This function is called to start or restart the FEC during a link
+ * change.  This happens on fifo errors or when switching between half
+ * and full duplex.
+ */
+static void
+mpc5xxx_fec_restart(struct rtnet_device *dev, int duplex)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	u32 rcntrl;
+	u32 tcntrl;
+	int i;
+
+#if MPC5xxx_FEC_DEBUG > 1
+	printk("mpc5xxx_fec_restart\n");
+#endif
+	out_be32(&fec->rfifo_status, in_be32(&fec->rfifo_status) & 0x700000);
+	out_be32(&fec->tfifo_status, in_be32(&fec->tfifo_status) & 0x700000);
+	out_be32(&fec->reset_cntrl, 0x1000000);
+
+	/* Whack a reset.  We should wait for this. */
+	out_be32(&fec->ecntrl, MPC5xxx_FEC_ECNTRL_RESET);
+	for (i = 0; i < MPC5xxx_FEC_RESET_DELAY; ++i) {
+		if ((in_be32(&fec->ecntrl) & MPC5xxx_FEC_ECNTRL_RESET) == 0)
+			break;
+		udelay(1);
+	}
+	if (i == MPC5xxx_FEC_RESET_DELAY)
+		printk ("FEC Reset timeout!\n");
+
+	/* Set station address. */
+	out_be32(&fec->paddr1, *(u32 *)&dev->dev_addr[0]);
+	out_be32(&fec->paddr2,
+		((*(u16 *)&dev->dev_addr[4]) << 16) | 0x8808);
+
+#ifdef ORIGINAL_CODE
+	mpc5xxx_fec_set_multicast_list(dev);
+#endif /* ORIGINAL_CODE */
+
+	rcntrl = MPC5xxx_FEC_RECV_BUFFER_SIZE << 16;	/* max frame length */
+	rcntrl |= MPC5xxx_FEC_RCNTRL_FCE;
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	rcntrl |= MPC5xxx_FEC_RCNTRL_MII_MODE;
+#endif
+	if (duplex)
+		tcntrl = MPC5xxx_FEC_TCNTRL_FDEN;		/* FD enable */
+	else {
+		rcntrl |= MPC5xxx_FEC_RCNTRL_DRT;
+		tcntrl = 0;
+	}
+	out_be32(&fec->r_cntrl, rcntrl);
+	out_be32(&fec->x_cntrl, tcntrl);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Set MII speed. */
+	out_be32(&fec->mii_speed, priv->phy_speed);
+#endif
+
+	priv->full_duplex = duplex;
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	priv->duplex_change = 0;
+#endif
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("%s: duplex set to %d\n", dev->name, priv->full_duplex);
+#endif
+
+	/* Clear any outstanding interrupt. */
+	out_be32(&fec->ievent, 0xffffffff);	/* clear intr events */
+
+	/* Enable interrupts we wish to service.
+	*/
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	out_be32(&fec->imask, 0xf0fe0000);	/* enable all intr but tfint */
+#else
+	out_be32(&fec->imask, 0xf07e0000);	/* enable all intr but tfint */
+#endif
+
+	/* And last, enable the transmit and receive processing.
+	*/
+	out_be32(&fec->ecntrl, MPC5xxx_FEC_ECNTRL_ETHER_EN);
+	out_be32(&fec->r_des_active, 0x01000000);
+
+	/* The tx ring is no longer full. */
+	if (priv->tx_full)
+	{
+		priv->tx_full = 0;
+		rtnetif_wake_queue(dev);
+	}
+}
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static void
+mpc5xxx_fec_mii(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	mii_list_t	*mip;
+	uint		mii_reg;
+
+	mii_reg = in_be32(&fec->mii_data);
+
+	if ((mip = mii_head) == NULL) {
+		printk("MII and no head!\n");
+		return;
+	}
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mpc5xxx_fec_mii %08x %08x %08x\n",
+		mii_reg, (u32)mip->mii_func, mip->mii_data);
+#endif
+
+	if (mip->mii_func != NULL)
+		(*(mip->mii_func))(mii_reg, dev, mip->mii_data);
+
+	mii_head = mip->mii_next;
+	mip->mii_next = mii_free;
+	mii_free = mip;
+
+	if ((mip = mii_head) != NULL)
+		out_be32(&fec->mii_data, mip->mii_regval);
+}
+
+static int
+mii_queue(struct rtnet_device *dev, int regval, void (*func)(uint, struct rtnet_device *, uint), uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	rtdm_lockctx_t	context;
+	mii_list_t	*mip;
+	int		retval;
+
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mii_queue: %08x %08x %08x\n", regval, (u32)func, data);
+#endif
+
+	/* Add PHY address to register command.
+	*/
+	regval |= priv->phy_addr << 23;
+
+	retval = 0;
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+
+	if ((mip = mii_free) != NULL) {
+		mii_free = mip->mii_next;
+		mip->mii_regval = regval;
+		mip->mii_func = func;
+		mip->mii_next = NULL;
+		mip->mii_data = data;
+		if (mii_head) {
+			mii_tail->mii_next = mip;
+			mii_tail = mip;
+		} else {
+			mii_head = mii_tail = mip;
+			out_be32(&fec->mii_data, regval);
+		}
+	} else
+		retval = 1;
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	return retval;
+}
+
+static void mii_do_cmd(struct rtnet_device *dev, const phy_cmd_t *c)
+{
+	int k;
+
+	if (!c)
+		return;
+
+	for (k = 0; (c+k)->mii_data != mk_mii_end; k++)
+		mii_queue(dev, (c+k)->mii_data, (c+k)->funct, 0);
+}
+
+static void mii_parse_sr(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
+
+	if (mii_reg & 0x0004)
+		s |= PHY_STAT_LINK;
+	if (mii_reg & 0x0010)
+		s |= PHY_STAT_FAULT;
+	if (mii_reg & 0x0020)
+		s |= PHY_STAT_ANC;
+
+	priv->phy_status = s;
+	priv->link = (s & PHY_STAT_LINK) ? 1 : 0;
+}
+
+static void mii_parse_cr(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
+
+	if (mii_reg & 0x1000)
+		s |= PHY_CONF_ANE;
+	if (mii_reg & 0x4000)
+		s |= PHY_CONF_LOOP;
+
+	priv->phy_status = s;
+}
+
+static void mii_parse_anar(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_CONF_SPMASK);
+
+	if (mii_reg & 0x0020)
+		s |= PHY_CONF_10HDX;
+	if (mii_reg & 0x0040)
+		s |= PHY_CONF_10FDX;
+	if (mii_reg & 0x0080)
+		s |= PHY_CONF_100HDX;
+	if (mii_reg & 0x0100)
+		s |= PHY_CONF_100FDX;
+
+	priv->phy_status = s;
+}
+
+/* ------------------------------------------------------------------------- */
+/* Generic PHY support.  Should work for all PHYs, but does not support link
+ * change interrupts.
+ */
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY
+
+static phy_info_t phy_info_generic = {
+	0x00000000, /* 0-->match any PHY */
+	"GENERIC",
+
+	(const phy_cmd_t []) {  /* config */
+		/* advertise only half-duplex capabilities */
+		{ mk_mii_write(MII_ADVERTISE, MII_ADVERTISE_HALF),
+			mii_parse_anar },
+
+		/* enable auto-negotiation */
+		{ mk_mii_write(MII_BMCR, BMCR_ANENABLE), mii_parse_cr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		/* restart auto-negotiation */
+		{ mk_mii_write(MII_BMCR, (BMCR_ANENABLE | BMCR_ANRESTART)),
+			NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* We don't actually use the ack_int table with a generic
+		 * PHY, but putting a reference to mii_parse_sr here keeps
+		 * us from getting a compiler warning about unused static
+		 * functions in the case where we only compile in generic
+		 * PHY support.
+		 */
+		{ mk_mii_read(MII_BMSR), mii_parse_sr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown */
+		{ mk_mii_end, }
+	},
+};
+#endif	/* CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY */
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT971 is used on some of my custom boards		     */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_LXT971
+
+/* register definitions for the 971 */
+
+#define MII_LXT971_PCR	16	/* Port Control Register	*/
+#define MII_LXT971_SR2	17	/* Status Register 2		*/
+#define MII_LXT971_IER	18	/* Interrupt Enable Register	*/
+#define MII_LXT971_ISR	19	/* Interrupt Status Register	*/
+#define MII_LXT971_LCR	20	/* LED Control Register		*/
+#define MII_LXT971_TCR	30	/* Transmit Control Register	*/
+
+static void mii_parse_lxt971_sr2(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x4000) {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+	else {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	if (mii_reg & 0x0008)
+		s |= PHY_STAT_FAULT;
+
+	/* Record the new full_duplex value only if the link is up
+	 * (so we don't bother restarting the driver on duplex
+	 * changes when the link is down).
+	 */
+	if (priv->link) {
+		int prev_duplex = priv->full_duplex;
+		priv->full_duplex = ((mii_reg & 0x0200) != 0);
+		if (priv->full_duplex != prev_duplex) {
+			/* trigger a restart with changed duplex */
+			priv->duplex_change = 1;
+#if MPC5xxx_FEC_DEBUG > 1
+			printk("%s: duplex change: %s\n",
+			       dev->name, priv->full_duplex ? "full" : "half");
+#endif
+		}
+	}
+	priv->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt971 = {
+	0x0001378e,
+	"LXT971",
+
+	(const phy_cmd_t []) {	/* config */
+#ifdef MPC5100_FIX10HDX
+		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10 Mbps, HD */
+#else
+/*		{ mk_mii_write(MII_REG_ANAR, 0x0A1), NULL }, *//*  10/100, HD */
+		{ mk_mii_write(MII_REG_ANAR, 0x01E1), NULL }, /* 10/100, FD */
+#endif
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {	/* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+
+		/* Somehow does the 971 tell me that the link is down
+		 * the first read after power-up.
+		 * read here to get a valid value in ack_int */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+#if defined(CONFIG_UC101)
+		{ mk_mii_write(MII_LXT971_LCR, 0x4122), NULL }, /* LED settings */
+#endif
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* find out the current status */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
+
+		/* we only need to read ISR to acknowledge */
+
+		{ mk_mii_read(MII_LXT971_ISR), NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {	/* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_LXT971 */
+
+/* ----------------------------------------------------------------- */
+/* The National Semiconductor DP83847 is used on a INKA 4X0 board    */
+/* ----------------------------------------------------------------- */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_DP83847
+
+/* Register definitions */
+#define MII_DP83847_PHYSTS 0x10  /* PHY Status Register */
+
+static void mii_parse_dp83847_physts(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x2) {
+		if (mii_reg & 0x4)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	else {
+		if (mii_reg & 0x4)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+	if (mii_reg & 0x40)
+		s |= PHY_STAT_FAULT;
+
+	priv->full_duplex = ((mii_reg & 0x4) != 0);
+
+	priv->phy_status = s;
+}
+
+static phy_info_t phy_info_dp83847 = {
+	0x020005c3,
+	"DP83847",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_write(MII_REG_ANAR, 0x01E1), NULL  }, /* Auto-Negociation Register Control set to    */
+							       /* auto-negociate 10/100MBps, Half/Full duplex */
+		{ mk_mii_read(MII_REG_CR),   mii_parse_cr   },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* Enable and Restart Auto-Negotiation */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr   },
+		{ mk_mii_read(MII_DP83847_PHYSTS), mii_parse_dp83847_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr   },
+		{ mk_mii_read(MII_DP83847_PHYSTS), mii_parse_dp83847_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_end, }
+	}
+};
+
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_DP83847 */
+
+static phy_info_t *phy_info[] = {
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_LXT971
+	&phy_info_lxt971,
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_LXT971 */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_DP83847
+	&phy_info_dp83847,
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_DP83847 */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY
+	/* Generic PHY support.  This must be the last PHY in the table.
+	 * It will be used to support any PHY that doesn't match a previous
+	 * entry in the table.
+	 */
+	&phy_info_generic,
+#endif /* CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY */
+
+	NULL
+};
+
+static void mii_display_config(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint s = priv->phy_status;
+
+	printk("%s: config: auto-negotiation ", dev->name);
+
+	if (s & PHY_CONF_ANE)
+		printk("on");
+	else
+		printk("off");
+
+	if (s & PHY_CONF_100FDX)
+		printk(", 100FDX");
+	if (s & PHY_CONF_100HDX)
+		printk(", 100HDX");
+	if (s & PHY_CONF_10FDX)
+		printk(", 10FDX");
+	if (s & PHY_CONF_10HDX)
+		printk(", 10HDX");
+	if (!(s & PHY_CONF_SPMASK))
+		printk(", No speed/duplex selected?");
+
+	if (s & PHY_CONF_LOOP)
+		printk(", loopback enabled");
+
+	printk(".\n");
+
+	priv->sequence_done = 1;
+}
+
+static void mii_queue_config(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+
+	priv->phy_task.routine = (void *)mii_display_config;
+	priv->phy_task.data = dev;
+	schedule_task(&priv->phy_task);
+}
+
+
+phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config },
+			       { mk_mii_end, } };
+
+
+/* Read remainder of PHY ID.
+*/
+static void
+mii_discover_phy3(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	int	i;
+
+	priv->phy_id |= (mii_reg & 0xffff);
+
+	for (i = 0; phy_info[i]; i++) {
+		if (phy_info[i]->id == (priv->phy_id >> 4) || !phy_info[i]->id)
+			break;
+		if (phy_info[i]->id == 0)	/* check generic entry */
+			break;
+	}
+
+	if (!phy_info[i])
+		panic("%s: PHY id 0x%08x is not supported!\n",
+			dev->name, priv->phy_id);
+
+	priv->phy = phy_info[i];
+	priv->phy_id_done = 1;
+
+	printk("%s: Phy @ 0x%x, type %s (0x%08x)\n",
+		dev->name, priv->phy_addr, priv->phy->name, priv->phy_id);
+#if defined(CONFIG_UC101)
+	mii_do_cmd(dev, priv->phy->startup);
+#endif
+}
+
+/* Scan all of the MII PHY addresses looking for someone to respond
+ * with a valid ID.  This usually happens quickly.
+ */
+static void
+mii_discover_phy(uint mii_reg, struct rtnet_device *dev, uint data)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	uint	phytype;
+
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mii_discover_phy\n");
+#endif
+
+	if ((phytype = (mii_reg & 0xffff)) != 0xffff) {
+		/* Got first part of ID, now get remainder.
+		*/
+		priv->phy_id = phytype << 16;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3, 0);
+	} else {
+		priv->phy_addr++;
+		if (priv->phy_addr < 32)
+			mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
+							mii_discover_phy, 0);
+		else
+			printk("fec: No PHY device found.\n");
+	}
+}
+
+static void
+mpc5xxx_fec_link_up(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)(dev->priv);
+
+	printk("mpc5xxx_fec_link_up: link_up=%d\n", priv->link_up);
+#ifdef ORIGINAL_CODE
+	priv->link_up = 0;
+#endif /* ORIGINAL_CODE */
+	mii_display_status(dev);
+	if (priv->duplex_change) {
+#if MPC5xxx_FEC_DEBUG > 1
+		printk("%s: restarting with %s duplex...\n",
+		       dev->name, priv->full_duplex ? "full" : "half");
+#endif
+		mpc5xxx_fec_restart(dev, priv->full_duplex);
+	}
+}
+
+/*
+ * Execute the ack_int command set and schedules next timer call back.
+ */
+static void mdio_timer_callback(unsigned long data)
+{
+	struct rtnet_device *dev = (struct rtnet_device *)data;
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)(dev->priv);
+	mii_do_cmd(dev, priv->phy->ack_int);
+
+	if (priv->link_up) {
+#ifdef ORIGINAL_CODE
+		priv->link_up_task.routine = (void *)mpc5xxx_fec_link_up;
+		priv->link_up_task.data = dev;
+		schedule_task(&priv->link_up_task);
+#else
+		mpc5xxx_fec_link_up(dev);
+		return;
+#endif /* ORIGINAL_CODE */
+	}
+	/* Reschedule in 1 second */
+	priv->phy_timer_list.expires = jiffies + (1000 * HZ / 1000);
+	add_timer(&priv->phy_timer_list);
+}
+
+/*
+ * Displays the current status of the PHY.
+ */
+static void mii_display_status(struct rtnet_device *dev)
+{
+    struct mpc5xxx_fec_priv *priv = dev->priv;
+    uint s = priv->phy_status;
+
+    printk("%s: status: ", dev->name);
+
+    if (!priv->link) {
+	printk("link down");
+    } else {
+	printk("link up");
+
+	switch(s & PHY_STAT_SPMASK) {
+	case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break;
+	case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break;
+	case PHY_STAT_10FDX:  printk(", 10 Mbps Full Duplex");  break;
+	case PHY_STAT_10HDX:  printk(", 10 Mbps Half Duplex");  break;
+	default:
+	    printk(", Unknown speed/duplex");
+	}
+
+	if (s & PHY_STAT_ANC)
+	    printk(", auto-negotiation complete");
+    }
+
+    if (s & PHY_STAT_FAULT)
+	printk(", remote fault");
+
+    printk(".\n");
+}
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+
+#define RFIFO_DATA	0xf0003184
+#define TFIFO_DATA	0xf00031a4
+
+/*
+ * Initialize FEC receive task.
+ * Returns task number of FEC receive task.
+ * Returns -1 on failure
+ */
+int
+mpc5xxx_fec_rx_task_setup(int num_bufs, int maxbufsize)
+{
+	static TaskSetupParamSet_t params;
+	int tasknum;
+
+	params.NumBD = num_bufs;
+	params.Size.MaxBuf = maxbufsize;
+	params.StartAddrSrc = RFIFO_DATA;
+	params.IncrSrc = 0;
+	params.SzSrc = 4;
+	params.IncrDst = 4;
+	params.SzDst = 4;
+
+	tasknum = TaskSetup(TASK_FEC_RX, &params);
+
+	/* clear pending interrupt bits */
+	TaskIntClear(tasknum);
+
+	return tasknum;
+}
+
+/*
+ * Initialize FEC transmit task.
+ * Returns task number of FEC transmit task.
+ * Returns -1 on failure
+ */
+int
+mpc5xxx_fec_tx_task_setup(int num_bufs)
+{
+	static TaskSetupParamSet_t params;
+	int tasknum;
+
+	params.NumBD = num_bufs;
+	params.IncrSrc = 4;
+	params.SzSrc = 4;
+	params.StartAddrDst = TFIFO_DATA;
+	params.IncrDst = 0;
+	params.SzDst = 4;
+
+	tasknum = TaskSetup(TASK_FEC_TX, &params);
+
+	/* clear pending interrupt bits */
+	TaskIntClear(tasknum);
+
+	return tasknum;
+}
+
+
+
+#ifdef PARANOID_CHECKS
+static volatile int tx_fifo_cnt, tx_fifo_ipos, tx_fifo_opos;
+static volatile int rx_fifo_opos;
+#endif
+
+static struct rtskb *tx_fifo_skb[MPC5xxx_FEC_TBD_NUM];
+static struct rtskb *rx_fifo_skb[MPC5xxx_FEC_RBD_NUM];
+static BDIdx mpc5xxx_bdi_tx = 0;
+
+
+static int
+mpc5xxx_fec_setup(struct rtnet_device *dev, int reinit)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_xlb *xlb = (struct mpc5xxx_xlb *)MPC5xxx_XLB;
+	struct rtskb *skb;
+	int i;
+	struct mpc5xxx_rbuf *rbuf;
+	struct mpc5xxx_fec *fec = priv->fec;
+	u32 u32_value;
+	u16 u16_value;
+
+#if MPC5xxx_FEC_DEBUG > 1
+	printk("mpc5xxx_fec_setup\n");
+#endif
+
+	mpc5xxx_fec_set_paddr(dev, dev->dev_addr);
+
+	/*
+	 * Initialize receive queue
+	 */
+	priv->r_tasknum = mpc5xxx_fec_rx_task_setup(MPC5xxx_FEC_RBD_NUM,
+						    MPC5xxx_FEC_RECV_BUFFER_SIZE_BC);
+	TaskBDReset(priv->r_tasknum);
+	for(i=0;i<MPC5xxx_FEC_RBD_NUM;i++) {
+		BDIdx bdi_a;
+		if(!reinit) {
+			skb = dev_alloc_rtskb(sizeof *rbuf, dev);
+			if (skb == 0)
+				goto eagain;
+#ifdef MUST_UNALIGN_RECEIVE_DATA
+			rtskb_reserve(skb,2);
+#endif
+			rbuf = (struct mpc5xxx_rbuf *)rtskb_put(skb, sizeof *rbuf);
+			rx_fifo_skb[i]=skb;
+		}
+		else {
+			skb=rx_fifo_skb[i];
+			rbuf = (struct mpc5xxx_rbuf *)skb->data;
+		}
+		bdi_a = TaskBDAssign(priv->r_tasknum,
+					(void*)virt_to_phys((void *)&rbuf->data),
+					0, sizeof *rbuf, MPC5xxx_FEC_RBD_INIT);
+		if(bdi_a<0)
+			panic("mpc5xxx_fec_setup: error while TaskBDAssign, err=%i\n",(int)bdi_a);
+	}
+#ifdef PARANOID_CHECKS
+	rx_fifo_opos = 0;
+#endif
+
+	/*
+	 * Initialize transmit queue
+	 */
+	if(!reinit) {
+		priv->t_tasknum = mpc5xxx_fec_tx_task_setup(MPC5xxx_FEC_TBD_NUM);
+		TaskBDReset(priv->t_tasknum);
+		mpc5xxx_bdi_tx = 0;
+		for(i=0;i<MPC5xxx_FEC_TBD_NUM;i++) tx_fifo_skb[i]=0;
+#ifdef PARANOID_CHECKS
+		tx_fifo_cnt = tx_fifo_ipos = tx_fifo_opos = 0;
+#endif
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		if (reinit) {
+			if (!priv->sequence_done) {
+				if (!priv->phy) {
+					printk("mpc5xxx_fec_setup: PHY not configured\n");
+					return -ENODEV; /* No PHY we understand */
+				}
+
+				mii_do_cmd(dev, priv->phy->config);
+				mii_do_cmd(dev, phy_cmd_config); /* display configuration */
+				while(!priv->sequence_done)
+					schedule();
+
+				mii_do_cmd(dev, priv->phy->startup);
+			}
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+		dev->irq = MPC5xxx_FEC_IRQ;
+		priv->r_irq = MPC5xxx_SDMA_IRQ_BASE + priv->r_tasknum;
+		priv->t_irq = MPC5xxx_SDMA_IRQ_BASE + priv->t_tasknum;
+
+		if ((i = rtdm_irq_request(&priv->irq_handle, dev->irq,
+					  mpc5xxx_fec_interrupt, 0,
+					  "rteth_err", dev))) {
+			printk(KERN_ERR "FEC interrupt allocation failed\n");
+			return i;
+		}
+
+		if ((i = rtdm_irq_request(&priv->r_irq_handle, priv->r_irq,
+					  mpc5xxx_fec_receive_interrupt, 0,
+					  "rteth_recv", dev))) {
+			printk(KERN_ERR "FEC receive task interrupt allocation failed\n");
+			return i;
+		}
+
+		if ((i = rtdm_irq_request(&priv->t_irq_handle, priv->t_irq,
+					  mpc5xxx_fec_transmit_interrupt, 0,
+					  "rteth_xmit", dev))) {
+			printk(KERN_ERR "FEC transmit task interrupt allocation failed\n");
+			return i;
+		}
+
+		rt_stack_connect(dev, &STACK_manager);
+
+		u32_value = in_be32(&priv->gpio->port_config);
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		u32_value |= 0x00050000;	/* 100MBit with MD	*/
+#else
+		u32_value |= 0x00020000;	/* 10MBit with 7-wire	*/
+#endif
+		out_be32(&priv->gpio->port_config, u32_value);
+
+	}
+
+	out_be32(&fec->op_pause, 0x00010020);	/* change to 0xffff0020 ??? */
+	out_be32(&fec->rfifo_cntrl, 0x0f240000);
+	out_be32(&fec->rfifo_alarm, 0x0000030c);
+	out_be32(&fec->tfifo_cntrl, 0x0f240000);
+	out_be32(&fec->tfifo_alarm, 0x00000100);
+	out_be32(&fec->x_wmrk, 0x3);		/* xmit fifo watermark = 256 */
+	out_be32(&fec->xmit_fsm, 0x03000000);	/* enable crc generation */
+	out_be32(&fec->iaddr1, 0x00000000);	/* No individual filter */
+	out_be32(&fec->iaddr2, 0x00000000);	/* No individual filter */
+
+#ifdef CONFIG_MPC5200
+	/* Disable COMM Bus Prefetch */
+	u16_value = in_be16(&priv->sdma->PtdCntrl);
+	u16_value |= 1;
+	out_be16(&priv->sdma->PtdCntrl, u16_value);
+
+	/* Disable (or enable?) BestComm XLB address snooping */
+	out_be32(&xlb->config, in_be32(&xlb->config) | MPC5200B_XLB_CONF_BSDIS);
+#endif
+
+	if(!reinit) {
+#if !defined(CONFIG_XENO_DRIVERS_NET_USE_MDIO)
+		mpc5xxx_fec_restart (dev, 0);	/* always use half duplex mode only */
+#else
+#ifdef CONFIG_UBOOT
+		extern unsigned char __res[];
+		bd_t *bd = (bd_t *)__res;
+#define MPC5xxx_IPBFREQ bd->bi_ipbfreq
+#else
+#define MPC5xxx_IPBFREQ CONFIG_PPC_5xxx_IPBFREQ
+#endif
+
+		for (i=0; i<NMII-1; i++)
+			mii_cmds[i].mii_next = &mii_cmds[i+1];
+		mii_free = mii_cmds;
+
+		priv->phy_speed = (((MPC5xxx_IPBFREQ >> 20) / 5) << 1);
+
+		/*mpc5xxx_fec_restart (dev, 0);*/ /* half duplex, negotiate speed */
+		mpc5xxx_fec_restart (dev, 1);	/* full duplex, negotiate speed */
+
+		/* Queue up command to detect the PHY and initialize the
+		 * remainder of the interface.
+		 */
+		priv->phy_id_done = 0;
+		priv->phy_addr = 0;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy, 0);
+
+		priv->old_status = 0;
+
+		/*
+		 * Read MIB counters in order to reset them,
+		 * then zero all the stats fields in memory
+		 */
+		mpc5xxx_fec_update_stat(dev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		if (reinit) {
+			if (!priv->sequence_done) {
+				if (!priv->phy) {
+					printk("mpc5xxx_fec_open: PHY not configured\n");
+					return -ENODEV;		/* No PHY we understand */
+				}
+
+				mii_do_cmd(dev, priv->phy->config);
+				mii_do_cmd(dev, phy_cmd_config);  /* display configuration */
+				while(!priv->sequence_done)
+					schedule();
+
+				mii_do_cmd(dev, priv->phy->startup);
+
+				/*
+				 * Currently, MII link interrupts are not supported,
+				 * so start the 100 msec timer to monitor the link up event.
+				 */
+				init_timer(&priv->phy_timer_list);
+
+				priv->phy_timer_list.expires = jiffies + (100 * HZ / 1000);
+				priv->phy_timer_list.data = (unsigned long)dev;
+				priv->phy_timer_list.function = mdio_timer_callback;
+				add_timer(&priv->phy_timer_list);
+
+				printk("%s: Waiting for the link to be up...\n", dev->name);
+				while (priv->link == 0) {
+					schedule();
+				}
+				mii_display_status(dev);
+				if (priv->full_duplex == 0) { /* FD is not negotiated, restart the fec in HD */
+					mpc5xxx_fec_restart(dev, 0);
+				}
+			}
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+#endif
+	}
+	else {
+		mpc5xxx_fec_restart (dev, 0);
+	}
+
+	rtnetif_start_queue(dev);
+
+	TaskStart(priv->r_tasknum, TASK_AUTOSTART_ENABLE,
+		  priv->r_tasknum, TASK_INTERRUPT_ENABLE);
+
+	if(reinit) {
+		TaskStart(priv->t_tasknum, TASK_AUTOSTART_ENABLE,
+			  priv->t_tasknum, TASK_INTERRUPT_ENABLE);
+	}
+
+	return 0;
+
+eagain:
+	printk("mpc5xxx_fec_setup: failed\n");
+	for (i=0; i<MPC5xxx_FEC_RBD_NUM; i++) {
+		skb = rx_fifo_skb[i];
+		if (skb == 0)
+			break;
+		dev_kfree_rtskb(skb);
+	}
+	TaskBDReset(priv->r_tasknum);
+
+	return -EAGAIN;
+}
+
+static int
+mpc5xxx_fec_open(struct rtnet_device *dev)
+{
+	return mpc5xxx_fec_setup(dev,0);
+}
+
+/* This will only be invoked if your driver is _not_ in XOFF state.
+ * What this means is that you need not check it, and that this
+ * invariant will hold if you make sure that the netif_*_queue()
+ * calls are done at the proper times.
+ */
+static int
+mpc5xxx_fec_hard_start_xmit(struct rtskb *skb, struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	rtdm_lockctx_t context;
+	int pad;
+	short length;
+	BDIdx bdi_a;
+
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mpc5xxx_fec_hard_start_xmit:\n");
+	printk("dev %08x, priv %08x, skb %08x\n",
+			(u32)dev, (u32)priv, (u32)skb);
+#endif
+#if MPC5xxx_FEC_DEBUG > 0
+	if (fec_start_status(&priv->t_queue) & MPC5xxx_FEC_TBD_TFD)
+		panic("MPC5xxx transmit queue overrun\n");
+#endif
+
+	length = skb->len;
+#ifdef	MUST_ALIGN_TRANSMIT_DATA
+	pad = (int)skb->data & 3;
+	if (pad) {
+		void *old_data = skb->data;
+		rtskb_push(skb, pad);
+		memcpy(skb->data, old_data, length);
+		rtskb_trim(skb, length);
+	}
+#endif
+	/* Zero out up to the minimum length ethernet packet size,
+	 * so we don't inadvertently expose sensitive data
+	 */
+	pad = ETH_ZLEN - skb->len;
+	if (pad > 0) {
+		skb = rtskb_padto(skb, ETH_ZLEN);
+		if (skb == 0) {
+			printk("rtskb_padto failed\n");
+			return 0;
+		}
+		length += pad;
+	}
+
+	flush_dcache_range((u32)skb->data, (u32)skb->data + length);
+
+	rtdm_lock_get_irqsave(&priv->lock, context);
+
+	bdi_a = TaskBDAssign(priv->t_tasknum,(void*)virt_to_phys((void *)skb->data),
+			     NULL,length,MPC5xxx_FEC_TBD_INIT);
+
+#ifdef PARANOID_CHECKS
+	/* check for other errors during assignment*/
+	if((bdi_a<0)||(bdi_a>=MPC5xxx_FEC_TBD_NUM))
+		panic("mpc5xxx_fec_hard_start_xmit: error while TaskBDAssign, err=%i\n",(int)bdi_a);
+
+	/* sanity check: bdi must always equal tx_fifo_ipos*/
+	if(bdi_a!=tx_fifo_ipos)
+		panic("bdi_a!=tx_fifo_ipos: %i, %i\n",(int)bdi_a,tx_fifo_ipos);
+
+	tx_fifo_cnt++;
+	tx_fifo_ipos++;
+	if(tx_fifo_ipos==MPC5xxx_FEC_TBD_NUM) tx_fifo_ipos=0;
+
+	/* check number of BDs in use*/
+	if(TaskBDInUse(priv->t_tasknum)!=tx_fifo_cnt)
+		panic("TaskBDInUse != tx_fifo_cnt: %i %i\n",TaskBDInUse(priv->t_tasknum),tx_fifo_cnt);
+#endif
+
+	tx_fifo_skb[bdi_a]=skb;
+
+#ifdef ORIGINAL_CODE
+	dev->trans_start = jiffies;
+#endif /* ORIGINAL_CODE */
+
+	/* Get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	TaskStart(priv->t_tasknum, TASK_AUTOSTART_ENABLE, priv->t_tasknum, TASK_INTERRUPT_ENABLE);
+
+	if(TaskBDInUse(priv->t_tasknum)==MPC5xxx_FEC_TBD_NUM) {
+		priv->tx_full = 1;
+		rtnetif_stop_queue(dev);
+	}
+	rtdm_lock_put_irqrestore(&priv->lock, context);
+
+	return 0;
+}
+
+/* This handles SDMA transmit task interrupts
+ */
+static int
+mpc5xxx_fec_transmit_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	BDIdx bdi_r;
+
+	rtdm_lock_get(&priv->lock);
+
+	while(TaskBDInUse(priv->t_tasknum)) {
+
+		/* relase BD*/
+		bdi_r = TaskBDRelease(priv->t_tasknum);
+
+		/* we are done if we can't release any more BDs*/
+		if(bdi_r==TASK_ERR_BD_BUSY) break;
+		/* if(bdi_r<0) break;*/
+
+#ifdef PARANOID_CHECKS
+		/* check for other errors during release*/
+		if((bdi_r<0)||(bdi_r>=MPC5xxx_FEC_TBD_NUM))
+			panic("mpc5xxx_fec_transmit_interrupt: error while TaskBDRelease, err=%i\n",(int)bdi_r);
+
+		tx_fifo_cnt--;
+		tx_fifo_opos++;
+		if(tx_fifo_opos==MPC5xxx_FEC_TBD_NUM) tx_fifo_opos=0;
+
+		/* sanity check: bdi_r must always equal tx_fifo_opos*/
+		if(bdi_r!=tx_fifo_opos) {
+			panic("bdi_r!=tx_fifo_opos: %i, %i\n",(int)bdi_r,tx_fifo_opos);
+		}
+
+		/* check number of BDs in use*/
+		if(TaskBDInUse(priv->t_tasknum)!=tx_fifo_cnt)
+			panic("TaskBDInUse != tx_fifo_cnt: %i %i\n",TaskBDInUse(priv->t_tasknum),tx_fifo_cnt);
+#endif
+
+		if((tx_fifo_skb[mpc5xxx_bdi_tx])==0)
+			panic("skb confusion in tx\n");
+
+		dev_kfree_rtskb(tx_fifo_skb[mpc5xxx_bdi_tx]);
+		tx_fifo_skb[mpc5xxx_bdi_tx]=0;
+
+		mpc5xxx_bdi_tx = bdi_r;
+
+		if(TaskBDInUse(priv->t_tasknum)<MPC5xxx_FEC_TBD_NUM/2)
+			priv->tx_full = 0;
+
+	}
+
+	if (rtnetif_queue_stopped(dev) && !priv->tx_full)
+		rtnetif_wake_queue(dev);
+
+	rtdm_lock_put(&priv->lock);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static BDIdx mpc5xxx_bdi_rx = 0;
+
+static int
+mpc5xxx_fec_receive_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct rtskb *skb;
+	struct rtskb *nskb;
+	struct mpc5xxx_rbuf *rbuf;
+	struct mpc5xxx_rbuf *nrbuf;
+	u32 status;
+	int length;
+	BDIdx bdi_a, bdi_r;
+	int discard = 0;
+	int dropped = 0;
+	int packets = 0;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+	while(1) {
+
+		/* release BD*/
+		bdi_r = TaskBDRelease(priv->r_tasknum);
+
+		/* we are done if we can't release any more BDs*/
+		if(bdi_r==TASK_ERR_BD_BUSY) break;
+
+#ifdef PARANOID_CHECKS
+		/* check for other errors during release*/
+		if((bdi_r<0)||(bdi_r>=MPC5xxx_FEC_RBD_NUM))
+			panic("mpc5xxx_fec_receive_interrupt: error while TaskBDRelease, err=%i\n",(int)bdi_r);
+
+		rx_fifo_opos++;
+		if(rx_fifo_opos==MPC5xxx_FEC_RBD_NUM) rx_fifo_opos=0;
+
+		if(bdi_r != rx_fifo_opos)
+			panic("bdi_r != rx_fifo_opos: %i, %i\n",bdi_r, rx_fifo_opos);
+#endif
+
+		/* get BD status in order to determine length*/
+		status = TaskGetBD(priv->r_tasknum,mpc5xxx_bdi_rx)->Status;
+
+		/* determine packet length and pointer to socket buffer / actual data*/
+		skb = rx_fifo_skb[mpc5xxx_bdi_rx];
+		length = (status & 0xffff) - 4;
+		rbuf = (struct mpc5xxx_rbuf *)skb->data;
+
+#ifndef EXIT_ISR_AT_MEMORY_SQUEEZE
+		/* in case of a memory squeeze, we just drop all packets, because*/
+		/* subsequent allocations will also fail.*/
+		if(discard!=3) {
+#endif
+
+			/* check for frame errors*/
+			if(status&0x00370000) {
+				/* frame error, drop */
+#ifdef DISPLAY_WARNINGS
+				if(status&MPC5xxx_FEC_FRAME_LG)
+					printk("%s: Frame length error, dropping packet (status=0x%08x)\n",dev->name,status);
+				if(status&MPC5xxx_FEC_FRAME_NO)
+					printk("%s: Non-octet aligned frame error, dropping packet (status=0x%08x)\n",dev->name,status);
+				if(status&MPC5xxx_FEC_FRAME_CR)
+					printk("%s: Frame CRC error, dropping packet (status=0x%08x)\n",dev->name,status);
+				if(status&MPC5xxx_FEC_FRAME_OV)
+					printk("%s: FIFO overrun error, dropping packet (status=0x%08x)\n",dev->name,status);
+				if(status&MPC5xxx_FEC_FRAME_TR)
+					printk("%s: Frame truncated error, dropping packet (status=0x%08x)\n",dev->name,status);
+#endif
+				discard=1;
+			}
+			else if (length>(MPC5xxx_FEC_RECV_BUFFER_SIZE-4)) {
+				/* packet too big, drop */
+#ifdef DISPLAY_WARNINGS
+				printk("%s: Frame too big, dropping packet (length=%i)\n",dev->name,length);
+#endif
+				discard=2;
+			}
+			else {
+				/* allocate replacement skb */
+				nskb = dev_alloc_rtskb(sizeof *nrbuf, dev);
+				if (nskb == NULL) {
+					/* memory squeeze, drop */
+					discard=3;
+					dropped++;
+				}
+				else {
+					discard=0;
+				}
+			}
+
+#ifndef EXIT_ISR_AT_MEMORY_SQUEEZE
+		}
+		else {
+			dropped++;
+		}
+#endif
+
+		if (discard) {
+			priv->stats.rx_dropped++;
+			nrbuf = (struct mpc5xxx_rbuf *)skb->data;
+		}
+		else {
+#ifdef MUST_UNALIGN_RECEIVE_DATA
+			rtskb_reserve(nskb,2);
+#endif
+			nrbuf = (struct mpc5xxx_rbuf *)rtskb_put(nskb, sizeof *nrbuf);
+
+			/* only invalidate the number of bytes in dcache actually received*/
+#ifdef MUST_UNALIGN_RECEIVE_DATA
+			invalidate_dcache_range((u32)rbuf - 2, (u32)rbuf + length);
+#else
+			invalidate_dcache_range((u32)rbuf, (u32)rbuf + length);
+#endif
+			rtskb_trim(skb, length);
+			skb->protocol = rt_eth_type_trans(skb, dev);
+			skb->time_stamp = time_stamp;
+			rtnetif_rx(skb);
+			packets++;
+#ifdef ORIGINAL_CODE
+			dev->last_rx = jiffies;
+#endif /* ORIGINAL_CODE */
+			rx_fifo_skb[mpc5xxx_bdi_rx] = nskb;
+		}
+
+		/* Assign new socket buffer to BD*/
+		bdi_a = TaskBDAssign(priv->r_tasknum, (void*)virt_to_phys((void *)&nrbuf->data),
+				     0, sizeof *nrbuf, MPC5xxx_FEC_RBD_INIT);
+
+#ifdef PARANOID_CHECKS
+		/* check for errors during assignment*/
+		if((bdi_a<0)||(bdi_r>=MPC5xxx_FEC_RBD_NUM))
+			panic("mpc5xxx_fec_receive_interrupt: error while TaskBDAssign, err=%i\n",(int)bdi_a);
+
+		/* check if Assign/Release sequence numbers are ok*/
+		if(((bdi_a+1)%MPC5xxx_FEC_RBD_NUM) != bdi_r)
+			panic("bdi_a+1 != bdi_r: %i %i\n",(int)((bdi_a+1)%MPC5xxx_FEC_RBD_NUM),(int)bdi_r);
+#endif
+
+		mpc5xxx_bdi_rx = bdi_r;
+
+#ifdef EXIT_ISR_AT_MEMORY_SQUEEZE
+		/* if we couldn't get memory for a new socket buffer, then it doesn't*/
+		/* make sense to proceed.*/
+		if (discard==3)
+			break;
+#endif
+
+	}
+
+#ifdef DISPLAY_WARNINGS
+	if(dropped) {
+		printk("%s: Memory squeeze, dropped %i packets\n",dev->name,dropped);
+	}
+#endif
+	TaskStart(priv->r_tasknum, TASK_AUTOSTART_ENABLE, priv->r_tasknum, TASK_INTERRUPT_ENABLE);
+
+	if (packets > 0)
+		rt_mark_stack_mgr(dev);
+	return RTDM_IRQ_HANDLED;
+}
+
+
+static void
+mpc5xxx_fec_reinit(struct rtnet_device *dev)
+{
+	int retval;
+	printk("mpc5xxx_fec_reinit\n");
+	mpc5xxx_fec_cleanup(dev,1);
+	retval=mpc5xxx_fec_setup(dev,1);
+	if(retval) panic("reinit failed\n");
+}
+
+
+static int
+mpc5xxx_fec_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *dev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	int ievent;
+
+#if MPC5xxx_FEC_DEBUG > 4
+	printk("mpc5xxx_fec_interrupt:\n");
+#endif
+
+	ievent = in_be32(&fec->ievent);
+	out_be32(&fec->ievent, ievent);		/* clear pending events */
+
+	if (ievent & (MPC5xxx_FEC_IEVENT_RFIFO_ERROR |
+		      MPC5xxx_FEC_IEVENT_XFIFO_ERROR)) {
+		if (ievent & MPC5xxx_FEC_IEVENT_RFIFO_ERROR)
+			printk(KERN_WARNING "MPC5xxx_FEC_IEVENT_RFIFO_ERROR\n");
+		if (ievent & MPC5xxx_FEC_IEVENT_XFIFO_ERROR)
+			printk(KERN_WARNING "MPC5xxx_FEC_IEVENT_XFIFO_ERROR\n");
+		mpc5xxx_fec_reinit(dev);
+	}
+	else if (ievent & MPC5xxx_FEC_IEVENT_MII) {
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		mpc5xxx_fec_mii(dev);
+#else
+		printk("%s[%d] %s: unexpected MPC5xxx_FEC_IEVENT_MII\n",
+			__FILE__, __LINE__, __FUNCTION__);
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+	}
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int
+mpc5xxx_fec_cleanup(struct rtnet_device *dev, int reinit)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	unsigned long timeout;
+	int i;
+
+	priv->open_time = 0;
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	priv->sequence_done = 0;
+#endif
+
+	rtnetif_stop_queue(dev);
+
+	/* Wait for rx queue to drain */
+	if(!reinit) {
+		timeout = jiffies + 2*HZ;
+		while (TaskBDInUse(priv->t_tasknum) && (jiffies < timeout)) {
+			set_current_state(TASK_INTERRUPTIBLE);
+			schedule_timeout(HZ/10);
+		}
+	}
+
+	/* Disable FEC interrupts */
+	out_be32(&fec->imask, 0x0);
+
+	/* Stop FEC */
+	out_be32(&fec->ecntrl, in_be32(&fec->ecntrl) & ~0x2);
+
+	/* Disable the rx and tx queues. */
+	TaskStop(priv->r_tasknum);
+	TaskStop(priv->t_tasknum);
+
+	/* Release irqs */
+	if(!reinit) {
+		rtdm_irq_disable(&priv->irq_handle);
+		rtdm_irq_disable(&priv->r_irq_handle);
+		rtdm_irq_disable(&priv->t_irq_handle);
+		rtdm_irq_free(&priv->irq_handle);
+		rtdm_irq_free(&priv->r_irq_handle);
+		rtdm_irq_free(&priv->t_irq_handle);
+		rt_stack_disconnect(dev);
+	}
+
+	/* Free rx Buffers */
+	if(!reinit) {
+		for (i=0; i<MPC5xxx_FEC_RBD_NUM; i++) {
+			dev_kfree_rtskb(rx_fifo_skb[i]);
+		}
+	}
+
+	mpc5xxx_fec_get_stats(dev);
+
+	return 0;
+}
+
+static int
+mpc5xxx_fec_close(struct rtnet_device *dev)
+{
+	int ret = mpc5xxx_fec_cleanup(dev,0);
+	return ret;
+}
+
+/*
+ * Get the current statistics.
+ * This may be called with the card open or closed.
+ */
+static struct net_device_stats *mpc5xxx_fec_get_stats(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct net_device_stats *stats = &priv->stats;
+	struct mpc5xxx_fec *fec = priv->fec;
+
+	stats->rx_bytes = in_be32(&fec->rmon_r_octets);
+	stats->rx_packets = in_be32(&fec->rmon_r_packets);
+	stats->rx_errors = stats->rx_packets - (
+					in_be32(&fec->ieee_r_frame_ok) +
+					in_be32(&fec->rmon_r_mc_pkt));
+	stats->tx_bytes = in_be32(&fec->rmon_t_octets);
+	stats->tx_packets = in_be32(&fec->rmon_t_packets);
+	stats->tx_errors = stats->tx_packets - (
+					in_be32(&fec->ieee_t_frame_ok) +
+					in_be32(&fec->rmon_t_col) +
+					in_be32(&fec->ieee_t_1col) +
+					in_be32(&fec->ieee_t_mcol) +
+					in_be32(&fec->ieee_t_def));
+	stats->multicast = in_be32(&fec->rmon_r_mc_pkt);
+	stats->collisions = in_be32(&fec->rmon_t_col);
+
+	/* detailed rx_errors: */
+	stats->rx_length_errors = in_be32(&fec->rmon_r_undersize)
+			+ in_be32(&fec->rmon_r_oversize)
+			+ in_be32(&fec->rmon_r_frag)
+			+ in_be32(&fec->rmon_r_jab);
+	stats->rx_over_errors = in_be32(&fec->r_macerr);
+	stats->rx_crc_errors = in_be32(&fec->ieee_r_crc);
+	stats->rx_frame_errors = in_be32(&fec->ieee_r_align);
+	stats->rx_fifo_errors = in_be32(&fec->rmon_r_drop);
+	stats->rx_missed_errors = in_be32(&fec->rmon_r_drop);
+
+	/* detailed tx_errors: */
+	stats->tx_aborted_errors = 0;
+	stats->tx_carrier_errors = in_be32(&fec->ieee_t_cserr);
+	stats->tx_fifo_errors = in_be32(&fec->rmon_t_drop) +
+				in_be32(&fec->ieee_t_macerr);
+	stats->tx_heartbeat_errors = in_be32(&fec->ieee_t_sqe);
+	stats->tx_window_errors = in_be32(&fec->ieee_t_lcol);
+
+	return stats;
+}
+
+static void
+mpc5xxx_fec_update_stat(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct net_device_stats *stats = &priv->stats;
+	struct mpc5xxx_fec *fec = priv->fec;
+
+	out_be32(&fec->mib_control, MPC5xxx_FEC_MIB_DISABLE);
+	memset_io(&fec->rmon_t_drop, 0,
+			(u32)&fec->reserved10 - (u32)&fec->rmon_t_drop);
+	out_be32(&fec->mib_control, 0);
+	memset(stats, 0, sizeof *stats);
+	mpc5xxx_fec_get_stats(dev);
+}
+
+#ifdef ORIGINAL_CODE
+/*
+ * Set or clear the multicast filter for this adaptor.
+ */
+static void
+mpc5xxx_fec_set_multicast_list(struct rtnet_device *dev)
+{
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+	struct mpc5xxx_fec *fec = priv->fec;
+	u32 u32_value;
+
+	if (dev->flags & IFF_PROMISC) {
+		printk("%s: Promiscuous mode enabled.\n", dev->name);
+		u32_value = in_be32(&fec->r_cntrl);
+		u32_value |= MPC5xxx_FEC_RCNTRL_PROM;
+		out_be32(&fec->r_cntrl, u32_value);
+	}
+	else if (dev->flags & IFF_ALLMULTI) {
+		u32_value = in_be32(&fec->r_cntrl);
+		u32_value &= ~MPC5xxx_FEC_RCNTRL_PROM;
+		out_be32(&fec->r_cntrl, u32_value);
+		out_be32(&fec->gaddr1, 0xffffffff);
+		out_be32(&fec->gaddr2, 0xffffffff);
+	}
+	else {
+		u32 crc;
+		int i;
+		struct dev_mc_list *dmi;
+		u32 gaddr1 = 0x00000000;
+		u32 gaddr2 = 0x00000000;
+
+		dmi = dev->mc_list;
+		for (i=0; i<dev->mc_count; i++) {
+			crc = ether_crc_le(6, dmi->dmi_addr) >> 26;
+			if (crc >= 32)
+				gaddr1 |= 1 << (crc-32);
+			else
+				gaddr2 |= 1 << crc;
+			dmi = dmi->next;
+		}
+		out_be32(&fec->gaddr1, gaddr1);
+		out_be32(&fec->gaddr2, gaddr2);
+	}
+}
+#endif /* ORIGINAL_CODE */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET
+static void mpc5xxx_mdio_callback(uint regval, struct rtnet_device *dev, uint data)
+{
+	mdio_read_data_t* mrd = (mdio_read_data_t *)data;
+	mrd->regval = 0xFFFF & regval;
+	wake_up_process(mrd->sleeping_task);
+}
+
+static int mpc5xxx_mdio_read(struct rtnet_device *dev, int phy_id, int location)
+{
+	uint retval;
+	mdio_read_data_t* mrd = (mdio_read_data_t *)kmalloc(sizeof(*mrd),
+			GFP_KERNEL);
+
+	mrd->sleeping_task = current;
+	set_current_state(TASK_INTERRUPTIBLE);
+	mii_queue(dev, mk_mii_read(location),
+		mpc5xxx_mdio_callback, (unsigned int) mrd);
+	schedule();
+
+	retval = mrd->regval;
+
+	kfree(mrd);
+
+	return retval;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+static void mpc5xxx_mdio_write(struct rtnet_device *dev, int phy_id, int location, int value)
+{
+	mii_queue(dev, mk_mii_write(location, value), NULL, 0);
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifdef ORIGINAL_CODE
+static int
+mpc5xxx_netdev_ethtool_ioctl(struct rtnet_device *dev, void *useraddr)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+	struct mpc5xxx_fec_priv *private = (struct mpc5xxx_fec_priv *)dev->priv;
+#endif
+	u32 ethcmd;
+
+	if (copy_from_user(&ethcmd, useraddr, sizeof ethcmd))
+		return -EFAULT;
+
+	switch (ethcmd) {
+
+		/* Get driver info */
+	case ETHTOOL_GDRVINFO:{
+			struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+			strncpy(info.driver, "gt64260",
+				sizeof info.driver - 1);
+			strncpy(info.version, version,
+				sizeof info.version - 1);
+			if (copy_to_user(useraddr, &info, sizeof info))
+				return -EFAULT;
+			return 0;
+		}
+		/* get settings */
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+	case ETHTOOL_GSET:{
+			struct ethtool_cmd ecmd = { ETHTOOL_GSET };
+			spin_lock_irq(&private->lock);
+			mii_ethtool_gset(&private->mii_if, &ecmd);
+			spin_unlock_irq(&private->lock);
+			if (copy_to_user(useraddr, &ecmd, sizeof ecmd))
+				return -EFAULT;
+			return 0;
+		}
+		/* set settings */
+	case ETHTOOL_SSET:{
+			int r;
+			struct ethtool_cmd ecmd;
+			if (copy_from_user(&ecmd, useraddr, sizeof ecmd))
+				return -EFAULT;
+			spin_lock_irq(&private->lock);
+			r = mii_ethtool_sset(&private->mii_if, &ecmd);
+			spin_unlock_irq(&private->lock);
+			return r;
+		}
+		/* restart autonegotiation */
+	case ETHTOOL_NWAY_RST:{
+			return mii_nway_restart(&private->mii_if);
+		}
+		/* get link status */
+	case ETHTOOL_GLINK:{
+			struct ethtool_value edata = { ETHTOOL_GLINK };
+			edata.data = mii_link_ok(&private->mii_if);
+			if (copy_to_user(useraddr, &edata, sizeof edata))
+				return -EFAULT;
+			return 0;
+		}
+#endif
+		/* get message-level */
+	case ETHTOOL_GMSGLVL:{
+			struct ethtool_value edata = { ETHTOOL_GMSGLVL };
+			edata.data = 0;	/* XXX */
+			if (copy_to_user(useraddr, &edata, sizeof edata))
+				return -EFAULT;
+			return 0;
+		}
+		/* set message-level */
+	case ETHTOOL_SMSGLVL:{
+			struct ethtool_value edata;
+			if (copy_from_user(&edata, useraddr, sizeof edata))
+				return -EFAULT;
+/* debug = edata.data; *//* XXX */
+			return 0;
+		}
+	}
+	return -EOPNOTSUPP;
+}
+
+static int
+mpc5xxx_fec_ioctl(struct rtnet_device *dev, struct ifreq *rq, int cmd)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+	struct mii_ioctl_data *data = (struct mii_ioctl_data *) &rq->ifr_data;
+	int phy = dev->base_addr & 0x1f;
+#endif
+	int retval;
+
+	switch (cmd) {
+	case SIOCETHTOOL:
+		retval = mpc5xxx_netdev_ethtool_ioctl(
+					dev, (void *) rq->ifr_data);
+		break;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO_NOT_YET_XXX
+	case SIOCGMIIPHY:	/* Get address of MII PHY in use. */
+	case SIOCDEVPRIVATE:	/* for binary compat, remove in 2.5 */
+		data->phy_id = phy;
+		/* Fall through */
+
+	case SIOCGMIIREG:	/* Read MII PHY register. */
+	case SIOCDEVPRIVATE + 1:	/* for binary compat, remove in 2.5 */
+		data->val_out =
+			mpc5xxx_mdio_read(dev, data->phy_id&0x1f,
+				data->reg_num&0x1f);
+		retval = 0;
+		break;
+
+	case SIOCSMIIREG:	/* Write MII PHY register. */
+	case SIOCDEVPRIVATE + 2:	/* for binary compat, remove in 2.5 */
+		if (!capable(CAP_NET_ADMIN)) {
+			retval = -EPERM;
+		} else {
+			mpc5xxx_mdio_write(dev, data->phy_id & 0x1f,
+				data->reg_num & 0x1f, data->val_in);
+			retval = 0;
+		}
+		break;
+#endif
+
+	default:
+		retval = -EOPNOTSUPP;
+		break;
+	}
+	return retval;
+}
+
+static void __init
+mpc5xxx_fec_str2mac(char *str, unsigned char *mac)
+{
+	int i;
+	u64 val64;
+
+	val64 = simple_strtoull(str, NULL, 16);
+
+	for (i = 0; i < 6; i++)
+		mac[5-i] = val64 >> (i*8);
+}
+
+static int __init
+mpc5xxx_fec_mac_setup(char *mac_address)
+{
+	mpc5xxx_fec_str2mac(mac_address, mpc5xxx_fec_mac_addr);
+	return 0;
+}
+
+__setup("mpc5xxx_mac=", mpc5xxx_fec_mac_setup);
+#endif /* ORIGINAL_CODE */
+
+static int __init
+mpc5xxx_fec_init(void)
+{
+	struct mpc5xxx_fec *fec;
+	struct rtnet_device *dev;
+	struct mpc5xxx_fec_priv *priv;
+	int err = 0;
+
+#if MPC5xxx_FEC_DEBUG > 1
+	printk("mpc5xxx_fec_init\n");
+#endif
+
+	if (!rx_pool_size)
+		rx_pool_size = MPC5xxx_FEC_RBD_NUM * 2;
+
+	dev = rt_alloc_etherdev(sizeof(*priv), rx_pool_size + MPC5xxx_FEC_TBD_NUM);
+	if (!dev)
+		return -EIO;
+	rtdev_alloc_name(dev, "rteth%d");
+	memset(dev->priv, 0, sizeof(*priv));
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+
+
+	mpc5xxx_fec_dev = dev;
+	priv = (struct mpc5xxx_fec_priv *)dev->priv;
+#if MPC5xxx_FEC_DEBUG > 1
+	printk("fec_priv %08x\n", (u32)priv);
+#endif
+	priv->fec = fec = (struct mpc5xxx_fec *)MPC5xxx_FEC;
+	priv->gpio = (struct mpc5xxx_gpio *)MPC5xxx_GPIO;
+	priv->sdma = (struct mpc5xxx_sdma *)MPC5xxx_SDMA;
+
+	rtdm_lock_init(&priv->lock);
+	dev->open		= mpc5xxx_fec_open;
+	dev->stop		= mpc5xxx_fec_close;
+	dev->hard_start_xmit	= mpc5xxx_fec_hard_start_xmit;
+	//FIXME dev->hard_header	= &rt_eth_header;
+	dev->get_stats		= mpc5xxx_fec_get_stats;
+#ifdef ORIGINAL_CODE
+	dev->do_ioctl		= mpc5xxx_fec_ioctl;
+	dev->set_mac_address	= mpc5xxx_fec_set_mac_address;
+	dev->set_multicast_list = mpc5xxx_fec_set_multicast_list;
+
+	dev->tx_timeout		= mpc5xxx_fec_tx_timeout;
+	dev->watchdog_timeo	= MPC5xxx_FEC_WATCHDOG_TIMEOUT;
+#endif /* ORIGINAL_CODE */
+	dev->flags &= ~IFF_RUNNING;
+
+	if ((err = rt_register_rtnetdev(dev)))
+		goto abort;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_FASTROUTE
+	dev->accept_fastpath = mpc5xxx_fec_accept_fastpath;
+#endif
+	if (memcmp(mpc5xxx_fec_mac_addr, null_mac, 6) != 0)
+		memcpy(dev->dev_addr, mpc5xxx_fec_mac_addr, 6);
+	else {
+		*(u32 *)&dev->dev_addr[0] = in_be32(&fec->paddr1);
+		*(u16 *)&dev->dev_addr[4] = in_be16((u16*)&fec->paddr2);
+	}
+
+	/*
+	 * Read MIB counters in order to reset them,
+	 * then zero all the stats fields in memory
+	 */
+	mpc5xxx_fec_update_stat(dev);
+
+	return 0;
+
+abort:
+	rtdev_free(dev);
+
+	return err;
+}
+
+static void __exit
+mpc5xxx_fec_uninit(void)
+{
+	struct rtnet_device *dev = mpc5xxx_fec_dev;
+	struct mpc5xxx_fec_priv *priv = (struct mpc5xxx_fec_priv *)dev->priv;
+
+	rt_stack_disconnect(dev);
+	rt_unregister_rtnetdev(dev);
+	rt_rtdev_disconnect(dev);
+	printk("%s: unloaded\n", dev->name);
+	rtdev_free(dev);
+	dev->priv = NULL;
+}
+
+static int __init
+mpc5xxx_fec_module_init(void)
+{
+	return mpc5xxx_fec_init();
+}
+
+static void __exit
+mpc5xxx_fec_module_exit(void)
+{
+	mpc5xxx_fec_uninit();
+}
+
+module_init(mpc5xxx_fec_module_init);
+module_exit(mpc5xxx_fec_module_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h
new file mode 100644
index 0000000..db21607
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc52xx_fec/rt_mpc52xx_fec.h
@@ -0,0 +1,428 @@
+/*
+ * arch/ppc/5xxx_io/fec.h
+ *
+ * Header file for the MPC5xxx Fast Ethernet Controller driver
+ *
+ * Author: Dale Farnsworth <dfarnsworth@mvista.com>
+ *
+ * Copyright 2003 MontaVista Software
+ *
+ * 2003 (c) MontaVista, Software, Inc.  This file is licensed under the terms
+ * of the GNU General Public License version 2.  This program is licensed
+ * "as is" without any warranty of any kind, whether express or implied.
+ */
+
+#ifndef __RT_MPC52XX_FEC_H_
+#define __RT_MPC52XX_FEC_H_
+
+#include <linux/types.h>
+#include <linux/kernel.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/skbuff.h>
+#include <asm/mpc5xxx.h>
+#include <bestcomm_api.h>
+
+/* Define board specific options */
+#define CONFIG_XENO_DRIVERS_NET_USE_MDIO
+#define CONFIG_XENO_DRIVERS_NET_FEC_GENERIC_PHY
+#define CONFIG_XENO_DRIVERS_NET_FEC_LXT971
+#undef CONFIG_XENO_DRIVERS_NET_FEC_DP83847
+
+/* Tunable constants */
+#define MPC5xxx_FEC_RECV_BUFFER_SIZE	1518	/* max receive packet size */
+#define MPC5xxx_FEC_RECV_BUFFER_SIZE_BC 2048	/* max receive packet size */
+#define MPC5xxx_FEC_TBD_NUM		256	/* max transmit packets */
+#define MPC5xxx_FEC_RBD_NUM		256	/* max receive packets */
+
+struct mpc5xxx_fec {
+	volatile u32 fec_id;			/* FEC + 0x000 */
+	volatile u32 ievent;			/* FEC + 0x004 */
+	volatile u32 imask;			/* FEC + 0x008 */
+
+	volatile u32 reserved0[1];		/* FEC + 0x00C */
+	volatile u32 r_des_active;		/* FEC + 0x010 */
+	volatile u32 x_des_active;		/* FEC + 0x014 */
+	volatile u32 r_des_active_cl;		/* FEC + 0x018 */
+	volatile u32 x_des_active_cl;		/* FEC + 0x01C */
+	volatile u32 ivent_set;			/* FEC + 0x020 */
+	volatile u32 ecntrl;			/* FEC + 0x024 */
+
+	volatile u32 reserved1[6];		/* FEC + 0x028-03C */
+	volatile u32 mii_data;			/* FEC + 0x040 */
+	volatile u32 mii_speed;			/* FEC + 0x044 */
+	volatile u32 mii_status;		/* FEC + 0x048 */
+
+	volatile u32 reserved2[5];		/* FEC + 0x04C-05C */
+	volatile u32 mib_data;			/* FEC + 0x060 */
+	volatile u32 mib_control;		/* FEC + 0x064 */
+
+	volatile u32 reserved3[6];		/* FEC + 0x068-7C */
+	volatile u32 r_activate;		/* FEC + 0x080 */
+	volatile u32 r_cntrl;			/* FEC + 0x084 */
+	volatile u32 r_hash;			/* FEC + 0x088 */
+	volatile u32 r_data;			/* FEC + 0x08C */
+	volatile u32 ar_done;			/* FEC + 0x090 */
+	volatile u32 r_test;			/* FEC + 0x094 */
+	volatile u32 r_mib;			/* FEC + 0x098 */
+	volatile u32 r_da_low;			/* FEC + 0x09C */
+	volatile u32 r_da_high;			/* FEC + 0x0A0 */
+
+	volatile u32 reserved4[7];		/* FEC + 0x0A4-0BC */
+	volatile u32 x_activate;		/* FEC + 0x0C0 */
+	volatile u32 x_cntrl;			/* FEC + 0x0C4 */
+	volatile u32 backoff;			/* FEC + 0x0C8 */
+	volatile u32 x_data;			/* FEC + 0x0CC */
+	volatile u32 x_status;			/* FEC + 0x0D0 */
+	volatile u32 x_mib;			/* FEC + 0x0D4 */
+	volatile u32 x_test;			/* FEC + 0x0D8 */
+	volatile u32 fdxfc_da1;			/* FEC + 0x0DC */
+	volatile u32 fdxfc_da2;			/* FEC + 0x0E0 */
+	volatile u32 paddr1;			/* FEC + 0x0E4 */
+	volatile u32 paddr2;			/* FEC + 0x0E8 */
+	volatile u32 op_pause;			/* FEC + 0x0EC */
+
+	volatile u32 reserved5[4];		/* FEC + 0x0F0-0FC */
+	volatile u32 instr_reg;			/* FEC + 0x100 */
+	volatile u32 context_reg;		/* FEC + 0x104 */
+	volatile u32 test_cntrl;		/* FEC + 0x108 */
+	volatile u32 acc_reg;			/* FEC + 0x10C */
+	volatile u32 ones;			/* FEC + 0x110 */
+	volatile u32 zeros;			/* FEC + 0x114 */
+	volatile u32 iaddr1;			/* FEC + 0x118 */
+	volatile u32 iaddr2;			/* FEC + 0x11C */
+	volatile u32 gaddr1;			/* FEC + 0x120 */
+	volatile u32 gaddr2;			/* FEC + 0x124 */
+	volatile u32 random;			/* FEC + 0x128 */
+	volatile u32 rand1;			/* FEC + 0x12C */
+	volatile u32 tmp;			/* FEC + 0x130 */
+
+	volatile u32 reserved6[3];		/* FEC + 0x134-13C */
+	volatile u32 fifo_id;			/* FEC + 0x140 */
+	volatile u32 x_wmrk;			/* FEC + 0x144 */
+	volatile u32 fcntrl;			/* FEC + 0x148 */
+	volatile u32 r_bound;			/* FEC + 0x14C */
+	volatile u32 r_fstart;			/* FEC + 0x150 */
+	volatile u32 r_count;			/* FEC + 0x154 */
+	volatile u32 r_lag;			/* FEC + 0x158 */
+	volatile u32 r_read;			/* FEC + 0x15C */
+	volatile u32 r_write;			/* FEC + 0x160 */
+	volatile u32 x_count;			/* FEC + 0x164 */
+	volatile u32 x_lag;			/* FEC + 0x168 */
+	volatile u32 x_retry;			/* FEC + 0x16C */
+	volatile u32 x_write;			/* FEC + 0x170 */
+	volatile u32 x_read;			/* FEC + 0x174 */
+
+	volatile u32 reserved7[2];		/* FEC + 0x178-17C */
+	volatile u32 fm_cntrl;			/* FEC + 0x180 */
+	volatile u32 rfifo_data;		/* FEC + 0x184 */
+	volatile u32 rfifo_status;		/* FEC + 0x188 */
+	volatile u32 rfifo_cntrl;		/* FEC + 0x18C */
+	volatile u32 rfifo_lrf_ptr;		/* FEC + 0x190 */
+	volatile u32 rfifo_lwf_ptr;		/* FEC + 0x194 */
+	volatile u32 rfifo_alarm;		/* FEC + 0x198 */
+	volatile u32 rfifo_rdptr;		/* FEC + 0x19C */
+	volatile u32 rfifo_wrptr;		/* FEC + 0x1A0 */
+	volatile u32 tfifo_data;		/* FEC + 0x1A4 */
+	volatile u32 tfifo_status;		/* FEC + 0x1A8 */
+	volatile u32 tfifo_cntrl;		/* FEC + 0x1AC */
+	volatile u32 tfifo_lrf_ptr;		/* FEC + 0x1B0 */
+	volatile u32 tfifo_lwf_ptr;		/* FEC + 0x1B4 */
+	volatile u32 tfifo_alarm;		/* FEC + 0x1B8 */
+	volatile u32 tfifo_rdptr;		/* FEC + 0x1BC */
+	volatile u32 tfifo_wrptr;		/* FEC + 0x1C0 */
+
+	volatile u32 reset_cntrl;		/* FEC + 0x1C4 */
+	volatile u32 xmit_fsm;			/* FEC + 0x1C8 */
+
+	volatile u32 reserved8[3];		/* FEC + 0x1CC-1D4 */
+	volatile u32 rdes_data0;		/* FEC + 0x1D8 */
+	volatile u32 rdes_data1;		/* FEC + 0x1DC */
+	volatile u32 r_length;			/* FEC + 0x1E0 */
+	volatile u32 x_length;			/* FEC + 0x1E4 */
+	volatile u32 x_addr;			/* FEC + 0x1E8 */
+	volatile u32 cdes_data;			/* FEC + 0x1EC */
+	volatile u32 status;			/* FEC + 0x1F0 */
+	volatile u32 dma_control;		/* FEC + 0x1F4 */
+	volatile u32 des_cmnd;			/* FEC + 0x1F8 */
+	volatile u32 data;			/* FEC + 0x1FC */
+
+	volatile u32 rmon_t_drop;		/* FEC + 0x200 */
+	volatile u32 rmon_t_packets;		/* FEC + 0x204 */
+	volatile u32 rmon_t_bc_pkt;		/* FEC + 0x208 */
+	volatile u32 rmon_t_mc_pkt;		/* FEC + 0x20C */
+	volatile u32 rmon_t_crc_align;		/* FEC + 0x210 */
+	volatile u32 rmon_t_undersize;		/* FEC + 0x214 */
+	volatile u32 rmon_t_oversize;		/* FEC + 0x218 */
+	volatile u32 rmon_t_frag;		/* FEC + 0x21C */
+	volatile u32 rmon_t_jab;		/* FEC + 0x220 */
+	volatile u32 rmon_t_col;		/* FEC + 0x224 */
+	volatile u32 rmon_t_p64;		/* FEC + 0x228 */
+	volatile u32 rmon_t_p65to127;		/* FEC + 0x22C */
+	volatile u32 rmon_t_p128to255;		/* FEC + 0x230 */
+	volatile u32 rmon_t_p256to511;		/* FEC + 0x234 */
+	volatile u32 rmon_t_p512to1023;		/* FEC + 0x238 */
+	volatile u32 rmon_t_p1024to2047;	/* FEC + 0x23C */
+	volatile u32 rmon_t_p_gte2048;		/* FEC + 0x240 */
+	volatile u32 rmon_t_octets;		/* FEC + 0x244 */
+	volatile u32 ieee_t_drop;		/* FEC + 0x248 */
+	volatile u32 ieee_t_frame_ok;		/* FEC + 0x24C */
+	volatile u32 ieee_t_1col;		/* FEC + 0x250 */
+	volatile u32 ieee_t_mcol;		/* FEC + 0x254 */
+	volatile u32 ieee_t_def;		/* FEC + 0x258 */
+	volatile u32 ieee_t_lcol;		/* FEC + 0x25C */
+	volatile u32 ieee_t_excol;		/* FEC + 0x260 */
+	volatile u32 ieee_t_macerr;		/* FEC + 0x264 */
+	volatile u32 ieee_t_cserr;		/* FEC + 0x268 */
+	volatile u32 ieee_t_sqe;		/* FEC + 0x26C */
+	volatile u32 t_fdxfc;			/* FEC + 0x270 */
+	volatile u32 ieee_t_octets_ok;		/* FEC + 0x274 */
+
+	volatile u32 reserved9[2];		/* FEC + 0x278-27C */
+	volatile u32 rmon_r_drop;		/* FEC + 0x280 */
+	volatile u32 rmon_r_packets;		/* FEC + 0x284 */
+	volatile u32 rmon_r_bc_pkt;		/* FEC + 0x288 */
+	volatile u32 rmon_r_mc_pkt;		/* FEC + 0x28C */
+	volatile u32 rmon_r_crc_align;		/* FEC + 0x290 */
+	volatile u32 rmon_r_undersize;		/* FEC + 0x294 */
+	volatile u32 rmon_r_oversize;		/* FEC + 0x298 */
+	volatile u32 rmon_r_frag;		/* FEC + 0x29C */
+	volatile u32 rmon_r_jab;		/* FEC + 0x2A0 */
+
+	volatile u32 rmon_r_resvd_0;		/* FEC + 0x2A4 */
+
+	volatile u32 rmon_r_p64;		/* FEC + 0x2A8 */
+	volatile u32 rmon_r_p65to127;		/* FEC + 0x2AC */
+	volatile u32 rmon_r_p128to255;		/* FEC + 0x2B0 */
+	volatile u32 rmon_r_p256to511;		/* FEC + 0x2B4 */
+	volatile u32 rmon_r_p512to1023;		/* FEC + 0x2B8 */
+	volatile u32 rmon_r_p1024to2047;	/* FEC + 0x2BC */
+	volatile u32 rmon_r_p_gte2048;		/* FEC + 0x2C0 */
+	volatile u32 rmon_r_octets;		/* FEC + 0x2C4 */
+	volatile u32 ieee_r_drop;		/* FEC + 0x2C8 */
+	volatile u32 ieee_r_frame_ok;		/* FEC + 0x2CC */
+	volatile u32 ieee_r_crc;		/* FEC + 0x2D0 */
+	volatile u32 ieee_r_align;		/* FEC + 0x2D4 */
+	volatile u32 r_macerr;			/* FEC + 0x2D8 */
+	volatile u32 r_fdxfc;			/* FEC + 0x2DC */
+	volatile u32 ieee_r_octets_ok;		/* FEC + 0x2E0 */
+
+	volatile u32 reserved10[6];		/* FEC + 0x2E4-2FC */
+
+	volatile u32 reserved11[64];		/* FEC + 0x300-3FF */
+};
+
+#define MPC5xxx_FEC_MIB_DISABLE			0x80000000
+
+#define MPC5xxx_FEC_IEVENT_HBERR		0x80000000
+#define MPC5xxx_FEC_IEVENT_BABR			0x40000000
+#define MPC5xxx_FEC_IEVENT_BABT			0x20000000
+#define MPC5xxx_FEC_IEVENT_GRA			0x10000000
+#define MPC5xxx_FEC_IEVENT_TFINT		0x08000000
+#define MPC5xxx_FEC_IEVENT_MII			0x00800000
+#define MPC5xxx_FEC_IEVENT_LATE_COL		0x00200000
+#define MPC5xxx_FEC_IEVENT_COL_RETRY_LIM	0x00100000
+#define MPC5xxx_FEC_IEVENT_XFIFO_UN		0x00080000
+#define MPC5xxx_FEC_IEVENT_XFIFO_ERROR		0x00040000
+#define MPC5xxx_FEC_IEVENT_RFIFO_ERROR		0x00020000
+
+#define MPC5xxx_FEC_IMASK_HBERR			0x80000000
+#define MPC5xxx_FEC_IMASK_BABR			0x40000000
+#define MPC5xxx_FEC_IMASK_BABT			0x20000000
+#define MPC5xxx_FEC_IMASK_GRA			0x10000000
+#define MPC5xxx_FEC_IMASK_MII			0x00800000
+#define MPC5xxx_FEC_IMASK_LATE_COL		0x00200000
+#define MPC5xxx_FEC_IMASK_COL_RETRY_LIM		0x00100000
+#define MPC5xxx_FEC_IMASK_XFIFO_UN		0x00080000
+#define MPC5xxx_FEC_IMASK_XFIFO_ERROR		0x00040000
+#define MPC5xxx_FEC_IMASK_RFIFO_ERROR		0x00020000
+
+#define MPC5xxx_FEC_RCNTRL_MAX_FL_SHIFT		16
+#define MPC5xxx_FEC_RCNTRL_LOOP			0x01
+#define MPC5xxx_FEC_RCNTRL_DRT			0x02
+#define MPC5xxx_FEC_RCNTRL_MII_MODE		0x04
+#define MPC5xxx_FEC_RCNTRL_PROM			0x08
+#define MPC5xxx_FEC_RCNTRL_BC_REJ		0x10
+#define MPC5xxx_FEC_RCNTRL_FCE			0x20
+
+#define MPC5xxx_FEC_TCNTRL_GTS			0x00000001
+#define MPC5xxx_FEC_TCNTRL_HBC			0x00000002
+#define MPC5xxx_FEC_TCNTRL_FDEN			0x00000004
+#define MPC5xxx_FEC_TCNTRL_TFC_PAUSE		0x00000008
+#define MPC5xxx_FEC_TCNTRL_RFC_PAUSE		0x00000010
+
+#define MPC5xxx_FEC_ECNTRL_RESET		0x00000001
+#define MPC5xxx_FEC_ECNTRL_ETHER_EN		0x00000002
+
+#define MPC5xxx_FEC_RESET_DELAY			50 /* uS */
+
+
+/* Receive & Transmit Buffer Descriptor definitions */
+struct mpc5xxx_fec_bd {
+	volatile u32 status;
+	volatile u32 data;
+};
+
+/* Receive data buffer format */
+struct mpc5xxx_rbuf {
+	u8 data[MPC5xxx_FEC_RECV_BUFFER_SIZE_BC];
+};
+
+struct fec_queue {
+	volatile struct mpc5xxx_fec_bd *bd_base;
+	struct rtskb **skb_base;
+	u16 last_index;
+	u16 start_index;
+	u16 finish_index;
+};
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+#define MII_ADVERTISE_HALF	(ADVERTISE_100HALF | ADVERTISE_10HALF | \
+				 ADVERTISE_CSMA)
+
+#define MII_ADVERTISE_ALL	(ADVERTISE_100FULL | ADVERTISE_10FULL | \
+				 MII_ADVERTISE_HALF)
+#ifdef PHY_INTERRUPT
+#define MII_ADVERTISE_DEFAULT   MII_ADVERTISE_ALL
+#else
+#define MII_ADVERTISE_DEFAULT   MII_ADVERTISE_HALF
+#endif
+
+typedef struct {
+	uint mii_data;
+	void (*funct)(uint mii_reg, struct rtnet_device *dev, uint data);
+} phy_cmd_t;
+
+typedef struct {
+	uint id;
+	char *name;
+
+	const phy_cmd_t *config;
+	const phy_cmd_t *startup;
+	const phy_cmd_t *ack_int;
+	const phy_cmd_t *shutdown;
+} phy_info_t;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+struct mpc5xxx_fec_priv {
+	int full_duplex;
+	int tx_full;
+	int r_tasknum;
+	int t_tasknum;
+	int r_irq;
+	int t_irq;
+	rtdm_irq_t irq_handle;
+	rtdm_irq_t r_irq_handle;
+	rtdm_irq_t t_irq_handle;
+	u32 last_transmit_time;
+	u32 last_receive_time;
+	struct mpc5xxx_fec *fec;
+	struct mpc5xxx_sram_fec *sram;
+	struct mpc5xxx_gpio *gpio;
+	struct mpc5xxx_sdma *sdma;
+	struct fec_queue r_queue;
+	struct rtskb *rskb[MPC5xxx_FEC_RBD_NUM];
+	struct fec_queue t_queue;
+	struct rtskb *tskb[MPC5xxx_FEC_TBD_NUM];
+	rtdm_lock_t lock;
+	unsigned long open_time;
+	struct net_device_stats stats;
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	uint phy_id;
+	uint phy_id_done;
+	uint phy_status;
+	uint phy_speed;
+	phy_info_t *phy;
+	struct tq_struct phy_task;
+	volatile uint sequence_done;
+	uint link;
+	uint phy_addr;
+
+	struct tq_struct link_up_task;
+	int duplex_change;
+	int link_up;
+
+	struct timer_list phy_timer_list;
+	u16 old_status;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+};
+
+struct mpc5xxx_sram_fec {
+	volatile struct mpc5xxx_fec_bd tbd[MPC5xxx_FEC_TBD_NUM];
+	volatile struct mpc5xxx_fec_bd rbd[MPC5xxx_FEC_RBD_NUM];
+};
+
+#define MPC5xxx_FEC_RBD_READY	0x40000000
+#define MPC5xxx_FEC_RBD_RFD	0x08000000	/* receive frame done */
+
+#define MPC5xxx_FEC_RBD_INIT	MPC5xxx_FEC_RBD_READY
+
+#define MPC5xxx_FEC_TBD_READY	0x40000000
+#define MPC5xxx_FEC_TBD_TFD	0x08000000	/* transmit frame done */
+#define MPC5xxx_FEC_TBD_INT	0x04000000	/* Interrupt */
+
+#define MPC5xxx_FEC_TBD_INIT	(MPC5xxx_FEC_TBD_INT | MPC5xxx_FEC_TBD_TFD | \
+				 MPC5xxx_FEC_TBD_READY)
+
+
+
+/* MII-related definitions */
+#define MPC5xxx_FEC_MII_DATA_ST		0x40000000	/* Start frame */
+#define MPC5xxx_FEC_MII_DATA_OP_RD	0x20000000	/* Perform read */
+#define MPC5xxx_FEC_MII_DATA_OP_WR	0x10000000	/* Perform write */
+#define MPC5xxx_FEC_MII_DATA_PA_MSK	0x0f800000	/* PHY Address mask */
+#define MPC5xxx_FEC_MII_DATA_RA_MSK	0x007c0000	/* PHY Register mask */
+#define MPC5xxx_FEC_MII_DATA_TA		0x00020000	/* Turnaround */
+#define MPC5xxx_FEC_MII_DATA_DATAMSK	0x00000fff	/* PHY data mask */
+
+#define MPC5xxx_FEC_MII_DATA_RA_SHIFT	0x12		/* MII reg addr bits */
+#define MPC5xxx_FEC_MII_DATA_PA_SHIFT	0x17		/* MII PHY addr bits */
+
+#define MPC5xxx_FEC_MII_SPEED		(5 * 2)
+
+const char mpc5xxx_fec_name[] = "eth0";
+
+struct mibCounters {
+	unsigned int byteReceived;
+	unsigned int byteSent;
+	unsigned int framesReceived;
+	unsigned int framesSent;
+	unsigned int totalByteReceived;
+	unsigned int totalFramesReceived;
+	unsigned int broadcastFramesReceived;
+	unsigned int multicastFramesReceived;
+	unsigned int cRCError;
+	unsigned int oversizeFrames;
+	unsigned int fragments;
+	unsigned int jabber;
+	unsigned int collision;
+	unsigned int lateCollision;
+	unsigned int frames64;
+	unsigned int frames65_127;
+	unsigned int frames128_255;
+	unsigned int frames256_511;
+	unsigned int frames512_1023;
+	unsigned int frames1024_MaxSize;
+	unsigned int macRxError;
+	unsigned int droppedFrames;
+	unsigned int outMulticastFrames;
+	unsigned int outBroadcastFrames;
+	unsigned int undersizeFrames;
+};
+
+#define MPC5xxx_FEC_WATCHDOG_TIMEOUT  ((400*HZ)/1000)
+
+
+#define MPC5xxx_FEC_FRAME_LAST		0x08000000	/* Last */
+#define MPC5xxx_FEC_FRAME_M		0x01000000	/* M? */
+#define MPC5xxx_FEC_FRAME_BC		0x00800000	/* Broadcast */
+#define MPC5xxx_FEC_FRAME_MC		0x00400000	/* Multicast */
+#define MPC5xxx_FEC_FRAME_LG		0x00200000	/* Length error */
+#define MPC5xxx_FEC_FRAME_NO		0x00100000	/* Non-octet aligned frame error */
+#define MPC5xxx_FEC_FRAME_CR		0x00040000	/* CRC frame error */
+#define MPC5xxx_FEC_FRAME_OV		0x00020000	/* Overrun error */
+#define MPC5xxx_FEC_FRAME_TR		0x00010000	/* Truncated error */
+
+
+
+#endif	/* __RT_MPC52XX_FEC_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c
new file mode 100644
index 0000000..5167dd7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8260_fcc_enet.c
@@ -0,0 +1,2235 @@
+/*
+ * Fast Ethernet Controller (FCC) driver for Motorola MPC8260.
+ * Copyright (c) 2000 MontaVista Software, Inc.   Dan Malek (dmalek@jlc.net)
+ *
+ * This version of the driver is a combination of the 8xx fec and
+ * 8260 SCC Ethernet drivers.  This version has some additional
+ * configuration options, which should probably be moved out of
+ * here.  This driver currently works for the EST SBC8260,
+ * SBS Diablo/BCM, Embedded Planet RPX6, TQM8260, and others.
+ *
+ * Right now, I am very watseful with the buffers.  I allocate memory
+ * pages and then divide them into 2K frame buffers.  This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.  Since this is a cache coherent processor and CPM,
+ * I could also preallocate SKB's and use them directly on the interface.
+ *
+ * Ported to RTnet from "linuxppc_2_4_devel/arch/ppc/8260_io/fcc_enet.c".
+ * Copyright (c) 2003 Wolfgang Grandegger (wg@denx.de)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <asm/immap_8260.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8260.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/cpm_8260.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+#error "MDIO for PHY configuration is not yet supported!"
+#endif
+
+#include <rtnet_port.h>
+
+MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTnet driver for the MPC8260 FCC Ethernet");
+MODULE_LICENSE("GPL");
+
+static unsigned int rx_pool_size =  0;
+MODULE_PARM(rx_pool_size, "i");
+MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size");
+
+static unsigned int rtnet_fcc = 1;
+MODULE_PARM(rtnet_fcc, "i");
+MODULE_PARM_DESC(rtnet_fcc, "FCCx port for RTnet (default=1)");
+
+#define RT_DEBUG(fmt,args...)
+
+/* The transmitter timeout
+ */
+#define TX_TIMEOUT	(2*HZ)
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* Forward declarations of some structures to support different PHYs */
+
+typedef struct {
+	uint mii_data;
+	void (*funct)(uint mii_reg, struct net_device *dev);
+} phy_cmd_t;
+
+typedef struct {
+	uint id;
+	char *name;
+
+	const phy_cmd_t *config;
+	const phy_cmd_t *startup;
+	const phy_cmd_t *ack_int;
+	const phy_cmd_t *shutdown;
+} phy_info_t;
+
+/* Register definitions for the PHY. */
+
+#define MII_REG_CR          0  /* Control Register                         */
+#define MII_REG_SR          1  /* Status Register                          */
+#define MII_REG_PHYIR1      2  /* PHY Identification Register 1            */
+#define MII_REG_PHYIR2      3  /* PHY Identification Register 2            */
+#define MII_REG_ANAR        4  /* A-N Advertisement Register               */
+#define MII_REG_ANLPAR      5  /* A-N Link Partner Ability Register        */
+#define MII_REG_ANER        6  /* A-N Expansion Register                   */
+#define MII_REG_ANNPTR      7  /* A-N Next Page Transmit Register          */
+#define MII_REG_ANLPRNPR    8  /* A-N Link Partner Received Next Page Reg. */
+
+/* values for phy_status */
+
+#define PHY_CONF_ANE	0x0001  /* 1 auto-negotiation enabled */
+#define PHY_CONF_LOOP	0x0002  /* 1 loopback mode enabled */
+#define PHY_CONF_SPMASK	0x00f0  /* mask for speed */
+#define PHY_CONF_10HDX	0x0010  /* 10 Mbit half duplex supported */
+#define PHY_CONF_10FDX	0x0020  /* 10 Mbit full duplex supported */
+#define PHY_CONF_100HDX	0x0040  /* 100 Mbit half duplex supported */
+#define PHY_CONF_100FDX	0x0080  /* 100 Mbit full duplex supported */
+
+#define PHY_STAT_LINK	0x0100  /* 1 up - 0 down */
+#define PHY_STAT_FAULT	0x0200  /* 1 remote fault */
+#define PHY_STAT_ANC	0x0400  /* 1 auto-negotiation complete	*/
+#define PHY_STAT_SPMASK	0xf000  /* mask for speed */
+#define PHY_STAT_10HDX	0x1000  /* 10 Mbit half duplex selected	*/
+#define PHY_STAT_10FDX	0x2000  /* 10 Mbit full duplex selected	*/
+#define PHY_STAT_100HDX	0x4000  /* 100 Mbit half duplex selected */
+#define PHY_STAT_100FDX	0x8000  /* 100 Mbit full duplex selected */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it is best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+#define FCC_ENET_RX_PAGES	16
+#define FCC_ENET_RX_FRSIZE	2048
+#define FCC_ENET_RX_FRPPG	(PAGE_SIZE / FCC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE		(FCC_ENET_RX_FRPPG * FCC_ENET_RX_PAGES)
+#define TX_RING_SIZE		16	/* Must be power of two */
+#define TX_RING_MOD_MASK	15	/*   for this to work */
+
+/* The FCC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE		1518
+#define PKT_MINBUF_SIZE		64
+
+/* Maximum input DMA size.  Must be a should(?) be a multiple of 4.
+*/
+#define PKT_MAXDMA_SIZE		1520
+
+/* Maximum input buffer size.  Must be a multiple of 32.
+*/
+#define PKT_MAXBLR_SIZE		1536
+
+static int  fcc_enet_open(struct rtnet_device *rtev);
+static int  fcc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static int  fcc_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int fcc_enet_interrupt(rtdm_irq_t *irq_handle);
+static int  fcc_enet_close(struct rtnet_device *dev);
+
+static struct net_device_stats *fcc_enet_get_stats(struct rtnet_device *rtdev);
+#ifdef ORIGINAL_VERSION
+static void set_multicast_list(struct net_device *dev);
+static int fcc_enet_set_mac_address(struct net_device *dev, void *addr);
+#endif /* ORIGINAL_VERSION */
+
+static void fcc_restart(struct rtnet_device *rtdev, int duplex);
+
+/* These will be configurable for the FCC choice.
+ * Multiple ports can be configured.  There is little choice among the
+ * I/O pins to the PHY, except the clocks.  We will need some board
+ * dependent clock selection.
+ * Why in the hell did I put these inside #ifdef's?  I dunno, maybe to
+ * help show what pins are used for each device.
+ */
+
+/* I/O Pin assignment for FCC1.  I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PA1_COL		((uint)0x00000001)
+#define PA1_CRS		((uint)0x00000002)
+#define PA1_TXER	((uint)0x00000004)
+#define PA1_TXEN	((uint)0x00000008)
+#define PA1_RXDV	((uint)0x00000010)
+#define PA1_RXER	((uint)0x00000020)
+#define PA1_TXDAT	((uint)0x00003c00)
+#define PA1_RXDAT	((uint)0x0003c000)
+#define PA1_PSORA0	(PA1_RXDAT | PA1_TXDAT)
+#define PA1_PSORA1	(PA1_COL | PA1_CRS | PA1_TXER | PA1_TXEN | \
+				PA1_RXDV | PA1_RXER)
+#define PA1_DIRA0	(PA1_RXDAT | PA1_CRS | PA1_COL | PA1_RXER | PA1_RXDV)
+#define PA1_DIRA1	(PA1_TXDAT | PA1_TXEN | PA1_TXER)
+
+/* CLK12 is receive, CLK11 is transmit.  These are board specific.
+*/
+#define PC_F1RXCLK	((uint)0x00000800)
+#define PC_F1TXCLK	((uint)0x00000400)
+#if defined(CONFIG_PM826)
+#ifndef CONFIG_RTAI_RTNET_DB_CR826_J30x_ON
+#define CMX1_CLK_ROUTE  ((uint)0x35000000)
+#define CMX1_CLK_MASK   ((uint)0x7f000000)
+#else
+#define CMX1_CLK_ROUTE	((uint)0x37000000)
+#define CMX1_CLK_MASK	((uint)0x7f000000)
+#endif
+#elif defined(CONFIG_CPU86)
+#define CMX1_CLK_ROUTE  ((uint)0x37000000)
+#define CMX1_CLK_MASK   ((uint)0x7f000000)
+#else
+#define CMX1_CLK_ROUTE	((uint)0x3e000000)
+#define CMX1_CLK_MASK	((uint)0xff000000)
+#endif	/* CONFIG_PM826 */
+
+/* I/O Pin assignment for FCC2.  I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB2_TXER	((uint)0x00000001)
+#define PB2_RXDV	((uint)0x00000002)
+#define PB2_TXEN	((uint)0x00000004)
+#define PB2_RXER	((uint)0x00000008)
+#define PB2_COL		((uint)0x00000010)
+#define PB2_CRS		((uint)0x00000020)
+#define PB2_TXDAT	((uint)0x000003c0)
+#define PB2_RXDAT	((uint)0x00003c00)
+#define PB2_PSORB0	(PB2_RXDAT | PB2_TXDAT | PB2_CRS | PB2_COL | \
+				PB2_RXER | PB2_RXDV | PB2_TXER)
+#define PB2_PSORB1	(PB2_TXEN)
+#define PB2_DIRB0	(PB2_RXDAT | PB2_CRS | PB2_COL | PB2_RXER | PB2_RXDV)
+#define PB2_DIRB1	(PB2_TXDAT | PB2_TXEN | PB2_TXER)
+
+/* CLK13 is receive, CLK14 is transmit.  These are board dependent.
+*/
+#define PC_F2RXCLK	((uint)0x00001000)
+#define PC_F2TXCLK	((uint)0x00002000)
+#define CMX2_CLK_ROUTE	((uint)0x00250000)
+#define CMX2_CLK_MASK	((uint)0x00ff0000)
+
+/* I/O Pin assignment for FCC3.  I don't yet know the best way to do this,
+ * but there is little variation among the choices.
+ */
+#define PB3_RXDV	((uint)0x00004000)
+#define PB3_RXER	((uint)0x00008000)
+#define PB3_TXER	((uint)0x00010000)
+#define PB3_TXEN	((uint)0x00020000)
+#define PB3_COL		((uint)0x00040000)
+#define PB3_CRS		((uint)0x00080000)
+#define PB3_TXDAT	((uint)0x0f000000)
+#define PB3_RXDAT	((uint)0x00f00000)
+#define PB3_PSORB0	(PB3_RXDAT | PB3_TXDAT | PB3_CRS | PB3_COL | \
+				PB3_RXER | PB3_RXDV | PB3_TXER | PB3_TXEN)
+#define PB3_PSORB1	(0)
+#define PB3_DIRB0	(PB3_RXDAT | PB3_CRS | PB3_COL | PB3_RXER | PB3_RXDV)
+#define PB3_DIRB1	(PB3_TXDAT | PB3_TXEN | PB3_TXER)
+
+/* CLK15 is receive, CLK16 is transmit.  These are board dependent.
+*/
+#ifdef CONFIG_IPHASE4539
+#define PC_F3RXCLK	((uint)0x00002000) /* CLK 14 is receive  */
+#define PC_F3TXCLK	((uint)0x00008000) /* CLK 16 is transmit */
+#define CMX3_CLK_ROUTE	((uint)0x00002f00)
+#define CMX3_CLK_MASK	((uint)0x00007f00)
+#else
+#define PC_F3RXCLK	((uint)0x00004000)
+#define PC_F3TXCLK	((uint)0x00008000)
+#define CMX3_CLK_ROUTE	((uint)0x00003700)
+#define CMX3_CLK_MASK	((uint)0x0000ff00)
+#endif
+
+/* MII status/control serial interface.
+*/
+#define IOP_PORT_OFF(f)	((uint)(&((iop8260_t *)0)->iop_p##f))
+#define IOP_PORT(x)	IOP_PORT_OFF(dir##x)
+
+#define IOP_DIR(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(dira)-IOP_PORT_OFF(dira))))
+#define IOP_PAR(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(para)-IOP_PORT_OFF(dira))))
+#define IOP_SOR(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(sora)-IOP_PORT_OFF(dira))))
+#define IOP_ODR(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(odra)-IOP_PORT_OFF(dira))))
+#define IOP_DAT(b,p)	*((uint*)((void*)(b)+(p)+(IOP_PORT_OFF(data)-IOP_PORT_OFF(dira))))
+
+#if defined(CONFIG_TQM8260)
+/* TQM8260 has MDIO and MDCK on PC30 and PC31 respectively */
+#define MII_MDIO		((uint)0x00000002)
+#define MII_MDCK		((uint)0x00000001)
+#elif defined (CONFIG_PM826)
+#ifndef CONFIG_RTAI_RTNET_DB_CR826_J30x_ON
+#define MII_MDIO		((uint)0x00000080) /* MDIO on PC24 */
+#define MII_MDCK		((uint)0x00000100) /* MDCK on PC23 */
+#else
+#define MII_MDIO		((uint)0x00000100) /* MDIO on PA23 */
+#define MII_MDCK		((uint)0x00000200) /* MDCK on PA22 */
+#define MII_PORT		IOP_PORT(a)
+#endif	/* CONFIG_RTAI_RTNET_DB_CR826_J30x_ON */
+#elif defined (CONFIG_IPHASE4539)
+#define MII_MDIO		((uint)0x00000080) /* MDIO on PC24 */
+#define MII_MDCK		((uint)0x00000100) /* MDCK on PC23 */
+#else
+#define MII_MDIO		((uint)0x00000004)
+#define MII_MDCK		((uint)0x00000100)
+#endif
+
+# if defined(CONFIG_TQM8260)
+#define MII_MDIO2		MII_MDIO
+#define MII_MDCK2		MII_MDCK
+#elif defined(CONFIG_EST8260) || defined(CONFIG_ADS8260)
+#define MII_MDIO2		((uint)0x00400000)
+#define MII_MDCK2		((uint)0x00200000)
+#elif defined(CONFIG_PM826)
+#define MII_MDIO2		((uint)0x00000040) /* MDIO on PA25 */
+#define MII_MDCK2		((uint)0x00000080) /* MDCK on PA24 */
+#define MII_PORT2		IOP_PORT(a)
+#else
+#define MII_MDIO2		((uint)0x00000002)
+#define MII_MDCK2		((uint)0x00000080)
+#endif
+
+# if defined(CONFIG_TQM8260)
+#define MII_MDIO3		MII_MDIO
+#define MII_MDCK3		MII_MDCK
+#else
+#define MII_MDIO3		((uint)0x00000001)
+#define MII_MDCK3		((uint)0x00000040)
+#endif
+
+#ifndef MII_PORT
+#define MII_PORT		IOP_PORT(c)
+#endif
+
+#ifndef MII_PORT2
+#define MII_PORT2		IOP_PORT(c)
+#endif
+
+#ifndef MII_PORT3
+#define MII_PORT3		IOP_PORT(c)
+#endif
+
+/* A table of information for supporting FCCs.  This does two things.
+ * First, we know how many FCCs we have and they are always externally
+ * numbered from zero.  Second, it holds control register and I/O
+ * information that could be different among board designs.
+ */
+typedef struct fcc_info {
+	uint	fc_fccnum;
+	uint	fc_cpmblock;
+	uint	fc_cpmpage;
+	uint	fc_proff;
+	uint	fc_interrupt;
+	uint	fc_trxclocks;
+	uint	fc_clockroute;
+	uint	fc_clockmask;
+	uint	fc_mdio;
+	uint	fc_mdck;
+	uint	fc_port;
+	struct rtnet_device *rtdev;
+} fcc_info_t;
+
+static fcc_info_t fcc_ports[] = {
+	{ 0, CPM_CR_FCC1_SBLOCK, CPM_CR_FCC1_PAGE, PROFF_FCC1, SIU_INT_FCC1,
+		(PC_F1RXCLK | PC_F1TXCLK), CMX1_CLK_ROUTE, CMX1_CLK_MASK,
+		MII_MDIO, MII_MDCK, MII_PORT },
+	{ 1, CPM_CR_FCC2_SBLOCK, CPM_CR_FCC2_PAGE, PROFF_FCC2, SIU_INT_FCC2,
+		(PC_F2RXCLK | PC_F2TXCLK), CMX2_CLK_ROUTE, CMX2_CLK_MASK,
+		MII_MDIO2, MII_MDCK2, MII_PORT2 },
+	{ 2, CPM_CR_FCC3_SBLOCK, CPM_CR_FCC3_PAGE, PROFF_FCC3, SIU_INT_FCC3,
+		(PC_F3RXCLK | PC_F3TXCLK), CMX3_CLK_ROUTE, CMX3_CLK_MASK,
+		MII_MDIO3, MII_MDCK3, MII_PORT3 },
+};
+
+/* The FCC buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fcc_enet_private {
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct	rtskb *tx_skbuff[TX_RING_SIZE];
+	ushort	skb_cur;
+	ushort	skb_dirty;
+
+	/* CPM dual port RAM relative addresses.
+	*/
+	cbd_t	*rx_bd_base;		/* Address of Rx and Tx buffers. */
+	cbd_t	*tx_bd_base;
+	cbd_t	*cur_rx, *cur_tx;		/* The next free ring entry */
+	cbd_t	*dirty_tx;	/* The ring entries to be free()ed. */
+	volatile fcc_t	*fccp;
+	volatile fcc_enet_t	*ep;
+	struct	net_device_stats stats;
+	uint	tx_full;
+	rtdm_lock_t lock;
+	rtdm_irq_t irq_handle;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	uint	phy_id;
+	uint	phy_id_done;
+	uint	phy_status;
+	phy_info_t	*phy;
+	struct tq_struct phy_task;
+
+	uint	sequence_done;
+
+	uint	phy_addr;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	int	link;
+	int	old_link;
+	int	full_duplex;
+
+	fcc_info_t	*fip;
+};
+
+static void init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep,
+	volatile immap_t *immap);
+static void init_fcc_startup(fcc_info_t *fip, struct rtnet_device *rtdev);
+static void init_fcc_ioports(fcc_info_t *fip, volatile iop8260_t *io,
+	volatile immap_t *immap);
+static void init_fcc_param(fcc_info_t *fip, struct rtnet_device *rtdev,
+	volatile immap_t *immap);
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static int	mii_queue(struct net_device *dev, int request, void (*func)(uint, struct net_device *));
+static uint	mii_send_receive(fcc_info_t *fip, uint cmd);
+
+static void	fcc_stop(struct net_device *dev);
+
+/* Make MII read/write commands for the FCC.
+*/
+#define mk_mii_read(REG)	(0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL)	(0x50020000 | ((REG & 0x1f) << 18) | \
+						(VAL & 0xffff))
+#define mk_mii_end	0
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+
+static int
+fcc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct fcc_enet_private *cep = (struct fcc_enet_private *)rtdev->priv;
+	volatile cbd_t	*bdp;
+	rtdm_lockctx_t	context;
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	if (!cep->link) {
+		/* Link is down or autonegotiation is in progress. */
+		return 1;
+	}
+
+	/* Fill in a Tx ring entry */
+	bdp = cep->cur_tx;
+
+#ifndef final_version
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		/* Ooops.  All transmit buffers are full.  Bail out.
+		 * This should not happen, since cep->tx_full should be set.
+		 */
+		rtdm_printk("%s: tx queue full!.\n", rtdev->name);
+		return 1;
+	}
+#endif
+
+	/* Clear all of the status flags. */
+	bdp->cbd_sc &= ~BD_ENET_TX_STATS;
+
+	/* If the frame is short, tell CPM to pad it. */
+	if (skb->len <= ETH_ZLEN)
+		bdp->cbd_sc |= BD_ENET_TX_PAD;
+	else
+		bdp->cbd_sc &= ~BD_ENET_TX_PAD;
+
+	/* Set buffer length and buffer pointer. */
+	bdp->cbd_datlen = skb->len;
+	bdp->cbd_bufaddr = __pa(skb->data);
+
+	/* Save skb pointer. */
+	cep->tx_skbuff[cep->skb_cur] = skb;
+
+	cep->stats.tx_bytes += skb->len;
+	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
+
+	rtdm_lock_get_irqsave(&cep->lock, context);
+
+	/* Get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	/* Send it on its way.  Tell CPM its ready, interrupt when done,
+	 * its the last BD of the frame, and to put the CRC on the end.
+	 */
+	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+
+#ifdef ORIGINAL_VERSION
+	dev->trans_start = jiffies;
+#endif
+
+	/* If this was the last BD in the ring, start at the beginning again. */
+	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+		bdp = cep->tx_bd_base;
+	else
+		bdp++;
+
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		rtnetif_stop_queue(rtdev);
+		cep->tx_full = 1;
+	}
+
+	cep->cur_tx = (cbd_t *)bdp;
+
+	rtdm_lock_put_irqrestore(&cep->lock, context);
+
+	return 0;
+}
+
+
+#ifdef ORIGINAL_VERSION
+static void
+fcc_enet_timeout(struct net_device *dev)
+{
+	struct fcc_enet_private *cep = (struct fcc_enet_private *)dev->priv;
+
+	printk("%s: transmit timed out.\n", dev->name);
+	cep->stats.tx_errors++;
+#ifndef final_version
+	{
+		int	i;
+		cbd_t	*bdp;
+		printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
+		       cep->cur_tx, cep->tx_full ? " (full)" : "",
+		       cep->cur_rx);
+		bdp = cep->tx_bd_base;
+		printk(" Tx @base %p :\n", bdp);
+		for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
+			printk("%04x %04x %08x\n",
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+		bdp = cep->rx_bd_base;
+		printk(" Rx @base %p :\n", bdp);
+		for (i = 0 ; i < RX_RING_SIZE; i++, bdp++)
+			printk("%04x %04x %08x\n",
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+	}
+#endif
+	if (!cep->tx_full)
+		netif_wake_queue(dev);
+}
+#endif /* ORIGINAL_VERSION */
+
+/* The interrupt handler. */
+static int fcc_enet_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	struct	fcc_enet_private *cep;
+	volatile cbd_t	*bdp;
+	ushort	int_events;
+	int	must_restart;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+
+	cep = (struct fcc_enet_private *)rtdev->priv;
+
+	/* Get the interrupt events that caused us to be here.
+	*/
+	int_events = cep->fccp->fcc_fcce;
+	cep->fccp->fcc_fcce = int_events;
+	must_restart = 0;
+
+	/* Handle receive event in its own function.
+	*/
+	if (int_events & FCC_ENET_RXF) {
+		fcc_enet_rx(rtdev, &packets, &time_stamp);
+	}
+
+	/* Check for a transmit error.  The manual is a little unclear
+	 * about this, so the debug code until I get it figured out.  It
+	 * appears that if TXE is set, then TXB is not set.  However,
+	 * if carrier sense is lost during frame transmission, the TXE
+	 * bit is set, "and continues the buffer transmission normally."
+	 * I don't know if "normally" implies TXB is set when the buffer
+	 * descriptor is closed.....trial and error :-).
+	 */
+
+	/* Transmit OK, or non-fatal error.  Update the buffer descriptors.
+	*/
+	if (int_events & (FCC_ENET_TXE | FCC_ENET_TXB)) {
+	    rtdm_lock_get(&cep->lock);
+	    bdp = cep->dirty_tx;
+	    while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
+		if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
+		    break;
+
+		if (bdp->cbd_sc & BD_ENET_TX_HB)	/* No heartbeat */
+			cep->stats.tx_heartbeat_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_LC)	/* Late collision */
+			cep->stats.tx_window_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_RL)	/* Retrans limit */
+			cep->stats.tx_aborted_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_UN)	/* Underrun */
+			cep->stats.tx_fifo_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_CSL)	/* Carrier lost */
+			cep->stats.tx_carrier_errors++;
+
+
+		/* No heartbeat or Lost carrier are not really bad errors.
+		 * The others require a restart transmit command.
+		 */
+		if (bdp->cbd_sc &
+		    (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+			must_restart = 1;
+			cep->stats.tx_errors++;
+		}
+
+		cep->stats.tx_packets++;
+
+		/* Deferred means some collisions occurred during transmit,
+		 * but we eventually sent the packet OK.
+		 */
+		if (bdp->cbd_sc & BD_ENET_TX_DEF)
+			cep->stats.collisions++;
+
+		/* Free the sk buffer associated with this last transmit. */
+		dev_kfree_rtskb(cep->tx_skbuff[cep->skb_dirty]);
+		cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;
+
+		/* Update pointer to next buffer descriptor to be transmitted. */
+		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+			bdp = cep->tx_bd_base;
+		else
+			bdp++;
+
+		/* I don't know if we can be held off from processing these
+		 * interrupts for more than one frame time.  I really hope
+		 * not.  In such a case, we would now want to check the
+		 * currently available BD (cur_tx) and determine if any
+		 * buffers between the dirty_tx and cur_tx have also been
+		 * sent.  We would want to process anything in between that
+		 * does not have BD_ENET_TX_READY set.
+		 */
+
+		/* Since we have freed up a buffer, the ring is no longer
+		 * full.
+		 */
+		if (cep->tx_full) {
+			cep->tx_full = 0;
+			if (rtnetif_queue_stopped(rtdev))
+				rtnetif_wake_queue(rtdev);
+		}
+
+		cep->dirty_tx = (cbd_t *)bdp;
+	    }
+
+	    if (must_restart) {
+		volatile cpm8260_t *cp;
+
+		/* Some transmit errors cause the transmitter to shut
+		 * down.  We now issue a restart transmit.  Since the
+		 * errors close the BD and update the pointers, the restart
+		 * _should_ pick up without having to reset any of our
+		 * pointers either.  Also, To workaround 8260 device erratum
+		 * CPM37, we must disable and then re-enable the transmitter
+		 * following a Late Collision, Underrun, or Retry Limit error.
+		 */
+		cep->fccp->fcc_gfmr &= ~FCC_GFMR_ENT;
+#ifdef ORIGINAL_VERSION
+		udelay(10); /* wait a few microseconds just on principle */
+#endif
+		cep->fccp->fcc_gfmr |=  FCC_GFMR_ENT;
+
+		cp = cpmp;
+		cp->cp_cpcr =
+		    mk_cr_cmd(cep->fip->fc_cpmpage, cep->fip->fc_cpmblock,
+				0x0c, CPM_CR_RESTART_TX) | CPM_CR_FLG;
+		while (cp->cp_cpcr & CPM_CR_FLG); // looks suspicious - how long may it take?
+	    }
+	    rtdm_lock_put(&cep->lock);
+	}
+
+	/* Check for receive busy, i.e. packets coming but no place to
+	 * put them.
+	 */
+	if (int_events & FCC_ENET_BSY) {
+		cep->stats.rx_dropped++;
+	}
+
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static int
+fcc_enet_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp)
+{
+	struct	fcc_enet_private *cep;
+	volatile cbd_t	*bdp;
+	struct	rtskb *skb;
+	ushort	pkt_len;
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	cep = (struct fcc_enet_private *)rtdev->priv;
+
+	/* First, grab all of the stats for the incoming packet.
+	 * These get messed up if we get called due to a busy condition.
+	 */
+	bdp = cep->cur_rx;
+
+for (;;) {
+	if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
+		break;
+
+#ifndef final_version
+	/* Since we have allocated space to hold a complete frame, both
+	 * the first and last indicators should be set.
+	 */
+	if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
+		(BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
+			rtdm_printk("CPM ENET: rcv is not first+last\n");
+#endif
+
+	/* Frame too long or too short. */
+	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+		cep->stats.rx_length_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
+		cep->stats.rx_frame_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
+		cep->stats.rx_crc_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
+		cep->stats.rx_crc_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_CL)	/* Late Collision */
+		cep->stats.rx_frame_errors++;
+
+	if (!(bdp->cbd_sc &
+	      (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO | BD_ENET_RX_CR
+	       | BD_ENET_RX_OV | BD_ENET_RX_CL)))
+	{
+		/* Process the incoming frame. */
+		cep->stats.rx_packets++;
+
+		/* Remove the FCS from the packet length. */
+		pkt_len = bdp->cbd_datlen - 4;
+		cep->stats.rx_bytes += pkt_len;
+
+		/* This does 16 byte alignment, much more than we need. */
+		skb = rtnetdev_alloc_rtskb(rtdev, pkt_len);
+
+		if (skb == NULL) {
+			rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name);
+			cep->stats.rx_dropped++;
+		}
+		else {
+			rtskb_put(skb,pkt_len); /* Make room */
+			memcpy(skb->data,
+			       (unsigned char *)__va(bdp->cbd_bufaddr),
+			       pkt_len);
+			skb->protocol=rt_eth_type_trans(skb,rtdev);
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+			(*packets)++;
+		}
+	}
+
+	/* Clear the status flags for this buffer. */
+	bdp->cbd_sc &= ~BD_ENET_RX_STATS;
+
+	/* Mark the buffer empty. */
+	bdp->cbd_sc |= BD_ENET_RX_EMPTY;
+
+	/* Update BD pointer to next entry. */
+	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
+		bdp = cep->rx_bd_base;
+	else
+		bdp++;
+
+   }
+	cep->cur_rx = (cbd_t *)bdp;
+
+	return 0;
+}
+
+static int
+fcc_enet_close(struct rtnet_device *rtdev)
+{
+	/* Don't know what to do yet. */
+	rtnetif_stop_queue(rtdev);
+
+	return 0;
+}
+
+static struct net_device_stats *fcc_enet_get_stats(struct rtnet_device *rtdev)
+{
+	struct fcc_enet_private *cep = (struct fcc_enet_private *)rtdev->priv;
+
+	return &cep->stats;
+}
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+
+/* NOTE: Most of the following comes from the FEC driver for 860. The
+ * overall structure of MII code has been retained (as it's proved stable
+ * and well-tested), but actual transfer requests are processed "at once"
+ * instead of being queued (there's no interrupt-driven MII transfer
+ * mechanism, one has to toggle the data/clock bits manually).
+ */
+static int
+mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *))
+{
+	struct fcc_enet_private *fep;
+	int		retval, tmp;
+
+	/* Add PHY address to register command. */
+	fep = dev->priv;
+	regval |= fep->phy_addr << 23;
+
+	retval = 0;
+
+	tmp = mii_send_receive(fep->fip, regval);
+	if (func)
+		func(tmp, dev);
+
+	return retval;
+}
+
+static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
+{
+	int k;
+
+	if(!c)
+		return;
+
+	for(k = 0; (c+k)->mii_data != mk_mii_end; k++)
+		mii_queue(dev, (c+k)->mii_data, (c+k)->funct);
+}
+
+static void mii_parse_sr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
+
+	if (mii_reg & 0x0004)
+		s |= PHY_STAT_LINK;
+	if (mii_reg & 0x0010)
+		s |= PHY_STAT_FAULT;
+	if (mii_reg & 0x0020)
+		s |= PHY_STAT_ANC;
+
+	fep->phy_status = s;
+	fep->link = (s & PHY_STAT_LINK) ? 1 : 0;
+}
+
+static void mii_parse_cr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
+
+	if (mii_reg & 0x1000)
+		s |= PHY_CONF_ANE;
+	if (mii_reg & 0x4000)
+		s |= PHY_CONF_LOOP;
+
+	fep->phy_status = s;
+}
+
+static void mii_parse_anar(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_CONF_SPMASK);
+
+	if (mii_reg & 0x0020)
+		s |= PHY_CONF_10HDX;
+	if (mii_reg & 0x0040)
+		s |= PHY_CONF_10FDX;
+	if (mii_reg & 0x0080)
+		s |= PHY_CONF_100HDX;
+	if (mii_reg & 0x00100)
+		s |= PHY_CONF_100FDX;
+
+	fep->phy_status = s;
+}
+
+/* Some boards don't have the MDIRQ line connected (PM826 is such a board) */
+
+static void mii_waitfor_anc(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep;
+	int regval;
+	int i;
+
+	fep = dev->priv;
+	regval = mk_mii_read(MII_REG_SR) | (fep->phy_addr << 23);
+
+	for (i = 0; i < 1000; i++)
+	{
+		if (mii_send_receive(fep->fip, regval) & 0x20)
+			return;
+		udelay(10000);
+	}
+
+	printk("%s: autonegotiation timeout\n", dev->name);
+}
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT970 is used by many boards				     */
+
+#ifdef CONFIG_FCC_LXT970
+
+#define MII_LXT970_MIRROR    16  /* Mirror register           */
+#define MII_LXT970_IER       17  /* Interrupt Enable Register */
+#define MII_LXT970_ISR       18  /* Interrupt Status Register */
+#define MII_LXT970_CONFIG    19  /* Configuration Register    */
+#define MII_LXT970_CSR       20  /* Chip Status Register      */
+
+static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x0800) {
+		if (mii_reg & 0x1000)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	} else {
+		if (mii_reg & 0x1000)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt970 = {
+	0x07810000,
+	"LXT970",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* read SR and ISR to acknowledge */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT970_ISR), NULL },
+
+		/* find out the current status */
+
+		{ mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_LXT970 */
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT971 is used on some of my custom boards                  */
+
+#ifdef CONFIG_FCC_LXT971
+
+/* register definitions for the 971 */
+
+#define MII_LXT971_PCR       16  /* Port Control Register     */
+#define MII_LXT971_SR2       17  /* Status Register 2         */
+#define MII_LXT971_IER       18  /* Interrupt Enable Register */
+#define MII_LXT971_ISR       19  /* Interrupt Status Register */
+#define MII_LXT971_LCR       20  /* LED Control Register      */
+#define MII_LXT971_TCR       30  /* Transmit Control Register */
+
+/*
+ * I had some nice ideas of running the MDIO faster...
+ * The 971 should support 8MHz and I tried it, but things acted really
+ * weird, so 2.5 MHz ought to be enough for anyone...
+ */
+
+static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x4000) {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	} else {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	if (mii_reg & 0x0008)
+		s |= PHY_STAT_FAULT;
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt971 = {
+	0x0001378e,
+	"LXT971",
+
+	(const phy_cmd_t []) {  /* config */
+//		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10  Mbps, HD */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+
+		/* Somehow does the 971 tell me that the link is down
+		 * the first read after power-up.
+		 * read here to get a valid value in ack_int */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+#ifdef	CONFIG_PM826
+		{ mk_mii_read(MII_REG_SR), mii_waitfor_anc },
+#endif
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* find out the current status */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
+
+		/* we only need to read ISR to acknowledge */
+
+		{ mk_mii_read(MII_LXT971_ISR), NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_LXT971 */
+
+
+/* ------------------------------------------------------------------------- */
+/* The Quality Semiconductor QS6612 is used on the RPX CLLF                  */
+
+#ifdef CONFIG_FCC_QS6612
+
+/* register definitions */
+
+#define MII_QS6612_MCR       17  /* Mode Control Register      */
+#define MII_QS6612_FTR       27  /* Factory Test Register      */
+#define MII_QS6612_MCO       28  /* Misc. Control Register     */
+#define MII_QS6612_ISR       29  /* Interrupt Source Register  */
+#define MII_QS6612_IMR       30  /* Interrupt Mask Register    */
+#define MII_QS6612_PCR       31  /* 100BaseTx PHY Control Reg. */
+
+static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	switch((mii_reg >> 2) & 7) {
+	case 1: s |= PHY_STAT_10HDX;  break;
+	case 2: s |= PHY_STAT_100HDX; break;
+	case 5: s |= PHY_STAT_10FDX;  break;
+	case 6: s |= PHY_STAT_100FDX; break;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_qs6612 = {
+	0x00181440,
+	"QS6612",
+
+	(const phy_cmd_t []) {  /* config */
+//	{ mk_mii_write(MII_REG_ANAR, 0x061), NULL }, /* 10  Mbps */
+
+		/* The PHY powers up isolated on the RPX,
+		 * so send a command to allow operation.
+		 */
+
+		{ mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
+
+		/* parse cr and anar to get some info */
+
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+
+		/* we need to read ISR, SR and ANER to acknowledge */
+
+		{ mk_mii_read(MII_QS6612_ISR), NULL },
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_ANER), NULL },
+
+		/* read pcr to get info */
+
+		{ mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+
+#endif /* CONFIG_FCC_QS6612 */
+
+/* ------------------------------------------------------------------------- */
+/* The AMD Am79C873 PHY is on PM826				*/
+
+#ifdef CONFIG_FCC_AMD79C873
+
+#define MII_79C873_IER       17  /* Interrupt Enable Register */
+#define MII_79C873_DR        18  /* Diagnostic Register       */
+
+static void mii_parse_79c873_cr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x2000) {
+		if (mii_reg & 0x0100)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	} else {
+		if (mii_reg & 0x0100)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_79c873 = {
+	0x00181b80,
+	"AMD79C873",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+#ifdef	CONFIG_PM826
+		{ mk_mii_read(MII_REG_SR), mii_waitfor_anc },
+#endif
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* read SR twice: to acknowledge and to get link status */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+
+		/* find out the current link parameters */
+
+		{ mk_mii_read(MII_REG_CR), mii_parse_79c873_cr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_79C873_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FCC_AMD79C873 */
+
+
+/* ------------------------------------------------------------------------- */
+/* The Davicom DM9131 is used on the HYMOD board			     */
+
+#ifdef CONFIG_FCC_DM9131
+
+/* register definitions */
+
+#define MII_DM9131_ACR		16	/* Aux. Config Register		*/
+#define MII_DM9131_ACSR		17	/* Aux. Config/Status Register	*/
+#define MII_DM9131_10TCSR	18	/* 10BaseT Config/Status Reg.	*/
+#define MII_DM9131_INTR		21	/* Interrupt Register		*/
+#define MII_DM9131_RECR		22	/* Receive Error Counter Reg.	*/
+#define MII_DM9131_DISCR	23	/* Disconnect Counter Register	*/
+
+static void mii_parse_dm9131_acsr(uint mii_reg, struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	switch ((mii_reg >> 12) & 0xf) {
+	case 1: s |= PHY_STAT_10HDX;  break;
+	case 2: s |= PHY_STAT_10FDX;  break;
+	case 4: s |= PHY_STAT_100HDX; break;
+	case 8: s |= PHY_STAT_100FDX; break;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_dm9131 = {
+	0x00181b80,
+	"DM9131",
+
+	(const phy_cmd_t []) {  /* config */
+		/* parse cr and anar to get some info */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_DM9131_INTR, 0x0002), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+
+		/* we need to read INTR, SR and ANER to acknowledge */
+
+		{ mk_mii_read(MII_DM9131_INTR), NULL },
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_ANER), NULL },
+
+		/* read acsr to get info */
+
+		{ mk_mii_read(MII_DM9131_ACSR), mii_parse_dm9131_acsr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_DM9131_INTR, 0x0f00), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+
+#endif /* CONFIG_FEC_DM9131 */
+
+
+static phy_info_t *phy_info[] = {
+
+#ifdef CONFIG_FCC_LXT970
+	&phy_info_lxt970,
+#endif /* CONFIG_FCC_LXT970 */
+
+#ifdef CONFIG_FCC_LXT971
+	&phy_info_lxt971,
+#endif /* CONFIG_FCC_LXT971 */
+
+#ifdef CONFIG_FCC_QS6612
+	&phy_info_qs6612,
+#endif /* CONFIG_FCC_QS6612 */
+
+#ifdef CONFIG_FCC_DM9131
+	&phy_info_dm9131,
+#endif /* CONFIG_FCC_DM9131 */
+
+#ifdef CONFIG_FCC_AMD79C873
+	&phy_info_79c873,
+#endif /* CONFIG_FCC_AMD79C873 */
+
+	NULL
+};
+
+static void mii_display_status(struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	if (!fep->link && !fep->old_link) {
+		/* Link is still down - don't print anything */
+		return;
+	}
+
+	printk("%s: status: ", dev->name);
+
+	if (!fep->link) {
+		printk("link down");
+	} else {
+		printk("link up");
+
+		switch(s & PHY_STAT_SPMASK) {
+		case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break;
+		case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break;
+		case PHY_STAT_10FDX:  printk(", 10 Mbps Full Duplex");  break;
+		case PHY_STAT_10HDX:  printk(", 10 Mbps Half Duplex");  break;
+		default:
+			printk(", Unknown speed/duplex");
+		}
+
+		if (s & PHY_STAT_ANC)
+			printk(", auto-negotiation complete");
+	}
+
+	if (s & PHY_STAT_FAULT)
+		printk(", remote fault");
+
+	printk(".\n");
+}
+
+static void mii_display_config(struct net_device *dev)
+{
+	volatile struct fcc_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	printk("%s: config: auto-negotiation ", dev->name);
+
+	if (s & PHY_CONF_ANE)
+		printk("on");
+	else
+		printk("off");
+
+	if (s & PHY_CONF_100FDX)
+		printk(", 100FDX");
+	if (s & PHY_CONF_100HDX)
+		printk(", 100HDX");
+	if (s & PHY_CONF_10FDX)
+		printk(", 10FDX");
+	if (s & PHY_CONF_10HDX)
+		printk(", 10HDX");
+	if (!(s & PHY_CONF_SPMASK))
+		printk(", No speed/duplex selected?");
+
+	if (s & PHY_CONF_LOOP)
+		printk(", loopback enabled");
+
+	printk(".\n");
+
+	fep->sequence_done = 1;
+}
+
+static void mii_relink(struct net_device *dev)
+{
+	struct fcc_enet_private *fep = dev->priv;
+	int duplex;
+
+	fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
+	mii_display_status(dev);
+	fep->old_link = fep->link;
+
+	if (fep->link) {
+		duplex = 0;
+		if (fep->phy_status
+		    & (PHY_STAT_100FDX | PHY_STAT_10FDX))
+			duplex = 1;
+		fcc_restart(dev, duplex);
+	} else {
+		fcc_stop(dev);
+	}
+}
+
+static void mii_queue_relink(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep = dev->priv;
+
+	fep->phy_task.routine = (void *)mii_relink;
+	fep->phy_task.data = dev;
+	schedule_task(&fep->phy_task);
+}
+
+static void mii_queue_config(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep = dev->priv;
+
+	fep->phy_task.routine = (void *)mii_display_config;
+	fep->phy_task.data = dev;
+	schedule_task(&fep->phy_task);
+}
+
+
+
+phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_REG_CR), mii_queue_relink },
+			       { mk_mii_end, } };
+phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config },
+			       { mk_mii_end, } };
+
+
+/* Read remainder of PHY ID.
+*/
+static void
+mii_discover_phy3(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep;
+	int	i;
+
+	fep = dev->priv;
+	fep->phy_id |= (mii_reg & 0xffff);
+
+	for(i = 0; phy_info[i]; i++)
+		if(phy_info[i]->id == (fep->phy_id >> 4))
+			break;
+
+	if(!phy_info[i])
+		panic("%s: PHY id 0x%08x is not supported!\n",
+		      dev->name, fep->phy_id);
+
+	fep->phy = phy_info[i];
+
+	printk("%s: Phy @ 0x%x, type %s (0x%08x)\n",
+		dev->name, fep->phy_addr, fep->phy->name, fep->phy_id);
+}
+
+/* Scan all of the MII PHY addresses looking for someone to respond
+ * with a valid ID.  This usually happens quickly.
+ */
+static void
+mii_discover_phy(uint mii_reg, struct net_device *dev)
+{
+	struct fcc_enet_private *fep;
+	uint	phytype;
+
+	fep = dev->priv;
+
+	if ((phytype = (mii_reg & 0xfff)) != 0xfff && phytype != 0) {
+
+		/* Got first part of ID, now get remainder. */
+		fep->phy_id = phytype << 16;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3);
+	} else {
+		fep->phy_addr++;
+		if (fep->phy_addr < 32) {
+			mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
+							mii_discover_phy);
+		} else {
+			printk("FCC: No PHY device found.\n");
+		}
+	}
+}
+
+/* This interrupt occurs when the PHY detects a link change. */
+#if !defined (CONFIG_PM826)
+static void
+mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+{
+	struct	net_device *dev = dev_id;
+	struct fcc_enet_private *fep = dev->priv;
+
+	mii_do_cmd(dev, fep->phy->ack_int);
+	mii_do_cmd(dev, phy_cmd_relink);  /* restart and display status */
+}
+#endif	/* !CONFIG_PM826 */
+
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifdef ORIGINAL_VERSION
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering.  Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not.  I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+static void
+set_multicast_list(struct net_device *dev)
+{
+	struct	fcc_enet_private *cep;
+	struct	dev_mc_list *dmi;
+	u_char	*mcptr, *tdptr;
+	volatile fcc_enet_t *ep;
+	int	i, j;
+
+	cep = (struct fcc_enet_private *)dev->priv;
+
+return;
+	/* Get pointer to FCC area in parameter RAM.
+	*/
+	ep = (fcc_enet_t *)dev->base_addr;
+
+	if (dev->flags&IFF_PROMISC) {
+
+		/* Log any net taps. */
+		printk("%s: Promiscuous mode enabled.\n", dev->name);
+		cep->fccp->fcc_fpsmr |= FCC_PSMR_PRO;
+	} else {
+
+		cep->fccp->fcc_fpsmr &= ~FCC_PSMR_PRO;
+
+		if (dev->flags & IFF_ALLMULTI) {
+			/* Catch all multicast addresses, so set the
+			 * filter to all 1's.
+			 */
+			ep->fen_gaddrh = 0xffffffff;
+			ep->fen_gaddrl = 0xffffffff;
+		}
+		else {
+			/* Clear filter and add the addresses in the list.
+			*/
+			ep->fen_gaddrh = 0;
+			ep->fen_gaddrl = 0;
+
+			dmi = dev->mc_list;
+
+			for (i=0; i<dev->mc_count; i++) {
+
+				/* Only support group multicast for now.
+				*/
+				if (!(dmi->dmi_addr[0] & 1))
+					continue;
+
+				/* The address in dmi_addr is LSB first,
+				 * and taddr is MSB first.  We have to
+				 * copy bytes MSB first from dmi_addr.
+				 */
+				mcptr = (u_char *)dmi->dmi_addr + 5;
+				tdptr = (u_char *)&ep->fen_taddrh;
+				for (j=0; j<6; j++)
+					*tdptr++ = *mcptr--;
+
+				/* Ask CPM to run CRC and set bit in
+				 * filter mask.
+				 */
+				cpmp->cp_cpcr = mk_cr_cmd(cep->fip->fc_cpmpage,
+						cep->fip->fc_cpmblock, 0x0c,
+						CPM_CR_SET_GADDR) | CPM_CR_FLG;
+				udelay(10);
+				while (cpmp->cp_cpcr & CPM_CR_FLG);
+			}
+		}
+	}
+}
+
+
+/* Set the individual MAC address.
+ */
+int fcc_enet_set_mac_address(struct net_device *dev, void *p)
+{
+	struct sockaddr *addr= (struct sockaddr *) p;
+	struct fcc_enet_private *cep;
+	volatile fcc_enet_t *ep;
+	unsigned char *eap;
+	int i;
+
+	cep = (struct fcc_enet_private *)(dev->priv);
+	ep = cep->ep;
+
+	if (netif_running(dev))
+		return -EBUSY;
+
+	memcpy(dev->dev_addr, addr->sa_data, dev->addr_len);
+
+	eap = (unsigned char *) &(ep->fen_paddrh);
+	for (i=5; i>=0; i--)
+		*eap++ = addr->sa_data[i];
+
+	return 0;
+}
+#endif /* ORIGINAL_VERSION */
+
+
+/* Initialize the CPM Ethernet on FCC.
+ */
+int __init fec_enet_init(void)
+{
+	struct rtnet_device *rtdev = NULL;
+	struct fcc_enet_private *cep;
+	fcc_info_t	*fip;
+	int		i, np;
+	volatile	immap_t		*immap;
+	volatile	iop8260_t	*io;
+
+	immap = (immap_t *)IMAP_ADDR;	/* and to internal registers */
+	io = &immap->im_ioport;
+
+	for (np = 0, fip = fcc_ports;
+	     np < sizeof(fcc_ports) / sizeof(fcc_info_t);
+	     np++, fip++) {
+
+		/* Skip FCC ports not used for RTnet.
+		 */
+		if (np != rtnet_fcc - 1) continue;
+
+		/* Allocate some private information and create an Ethernet device instance.
+		*/
+		if (!rx_pool_size)
+			rx_pool_size = RX_RING_SIZE * 2;
+
+		rtdev = rt_alloc_etherdev(sizeof(struct fcc_enet_private),
+					rx_pool_size + TX_RING_SIZE);
+		if (rtdev == NULL) {
+			printk(KERN_ERR "fcc_enet: Could not allocate ethernet device.\n");
+			return -1;
+		}
+		rtdev_alloc_name(rtdev, "rteth%d");
+		rt_rtdev_connect(rtdev, &RTDEV_manager);
+		rtdev->vers = RTDEV_VERS_2_0;
+
+		cep = (struct fcc_enet_private *)rtdev->priv;
+		rtdm_lock_init(&cep->lock);
+		cep->fip = fip;
+		fip->rtdev = rtdev; /* need for cleanup */
+
+		init_fcc_shutdown(fip, cep, immap);
+		init_fcc_ioports(fip, io, immap);
+		init_fcc_param(fip, rtdev, immap);
+
+		rtdev->base_addr = (unsigned long)(cep->ep);
+
+		/* The CPM Ethernet specific entries in the device
+		 * structure.
+		 */
+		rtdev->open = fcc_enet_open;
+		rtdev->hard_start_xmit = fcc_enet_start_xmit;
+		rtdev->stop = fcc_enet_close;
+		rtdev->hard_header = &rt_eth_header;
+		rtdev->get_stats = fcc_enet_get_stats;
+
+		if ((i = rt_register_rtnetdev(rtdev))) {
+			rtdm_irq_disable(&cep->irq_handle);
+			rtdm_irq_free(&cep->irq_handle);
+			rtdev_free(rtdev);
+			return i;
+		}
+		init_fcc_startup(fip, rtdev);
+
+		printk("%s: FCC%d ENET Version 0.4, %02x:%02x:%02x:%02x:%02x:%02x\n",
+		       rtdev->name, fip->fc_fccnum + 1,
+		       rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2],
+		       rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]);
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		/* Queue up command to detect the PHY and initialize the
+		 * remainder of the interface.
+		 */
+		cep->phy_addr = 0;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+	}
+
+	return 0;
+}
+
+/* Make sure the device is shut down during initialization.
+*/
+static void __init
+init_fcc_shutdown(fcc_info_t *fip, struct fcc_enet_private *cep,
+						volatile immap_t *immap)
+{
+	volatile	fcc_enet_t	*ep;
+	volatile	fcc_t		*fccp;
+
+	/* Get pointer to FCC area in parameter RAM.
+	*/
+	ep = (fcc_enet_t *)(&immap->im_dprambase[fip->fc_proff]);
+
+	/* And another to the FCC register area.
+	*/
+	fccp = (volatile fcc_t *)(&immap->im_fcc[fip->fc_fccnum]);
+	cep->fccp = fccp;		/* Keep the pointers handy */
+	cep->ep = ep;
+
+	/* Disable receive and transmit in case someone left it running.
+	*/
+	fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT);
+}
+
+/* Initialize the I/O pins for the FCC Ethernet.
+*/
+static void __init
+init_fcc_ioports(fcc_info_t *fip, volatile iop8260_t *io,
+						volatile immap_t *immap)
+{
+
+	/* FCC1 pins are on port A/C.  FCC2/3 are port B/C.
+	*/
+	if (fip->fc_proff == PROFF_FCC1) {
+		/* Configure port A and C pins for FCC1 Ethernet.
+		 */
+		io->iop_pdira &= ~PA1_DIRA0;
+		io->iop_pdira |= PA1_DIRA1;
+		io->iop_psora &= ~PA1_PSORA0;
+		io->iop_psora |= PA1_PSORA1;
+		io->iop_ppara |= (PA1_DIRA0 | PA1_DIRA1);
+	}
+	if (fip->fc_proff == PROFF_FCC2) {
+		/* Configure port B and C pins for FCC Ethernet.
+		 */
+		io->iop_pdirb &= ~PB2_DIRB0;
+		io->iop_pdirb |= PB2_DIRB1;
+		io->iop_psorb &= ~PB2_PSORB0;
+		io->iop_psorb |= PB2_PSORB1;
+		io->iop_pparb |= (PB2_DIRB0 | PB2_DIRB1);
+	}
+	if (fip->fc_proff == PROFF_FCC3) {
+		/* Configure port B and C pins for FCC Ethernet.
+		 */
+		io->iop_pdirb &= ~PB3_DIRB0;
+		io->iop_pdirb |= PB3_DIRB1;
+		io->iop_psorb &= ~PB3_PSORB0;
+		io->iop_psorb |= PB3_PSORB1;
+		io->iop_pparb |= (PB3_DIRB0 | PB3_DIRB1);
+	}
+
+	/* Port C has clocks......
+	*/
+	io->iop_psorc &= ~(fip->fc_trxclocks);
+	io->iop_pdirc &= ~(fip->fc_trxclocks);
+	io->iop_pparc |= fip->fc_trxclocks;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* ....and the MII serial clock/data.
+	*/
+#ifndef	CONFIG_PM826
+	IOP_DAT(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck);
+	IOP_ODR(io,fip->fc_port) &= ~(fip->fc_mdio | fip->fc_mdck);
+#endif	/* CONFIG_PM826 */
+	IOP_DIR(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck);
+	IOP_PAR(io,fip->fc_port) &= ~(fip->fc_mdio | fip->fc_mdck);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	/* Configure Serial Interface clock routing.
+	 * First, clear all FCC bits to zero,
+	 * then set the ones we want.
+	 */
+	immap->im_cpmux.cmx_fcr &= ~(fip->fc_clockmask);
+	immap->im_cpmux.cmx_fcr |= fip->fc_clockroute;
+}
+
+static void __init
+init_fcc_param(fcc_info_t *fip, struct rtnet_device *rtdev,
+						volatile immap_t *immap)
+{
+	unsigned char	*eap;
+	unsigned long	mem_addr;
+	bd_t		*bd;
+	int		i, j;
+	struct		fcc_enet_private *cep;
+	volatile	fcc_enet_t	*ep;
+	volatile	cbd_t		*bdp;
+	volatile	cpm8260_t	*cp;
+
+	cep = (struct fcc_enet_private *)rtdev->priv;
+	ep = cep->ep;
+	cp = cpmp;
+
+	bd = (bd_t *)__res;
+
+	/* Zero the whole thing.....I must have missed some individually.
+	 * It works when I do this.
+	 */
+	memset((char *)ep, 0, sizeof(fcc_enet_t));
+
+	/* Allocate space for the buffer descriptors in the DP ram.
+	 * These are relative offsets in the DP ram address space.
+	 * Initialize base addresses for the buffer descriptors.
+	 */
+	cep->rx_bd_base = (cbd_t *)m8260_cpm_hostalloc(sizeof(cbd_t) * RX_RING_SIZE, 8);
+	ep->fen_genfcc.fcc_rbase = __pa(cep->rx_bd_base);
+	cep->tx_bd_base = (cbd_t *)m8260_cpm_hostalloc(sizeof(cbd_t) * TX_RING_SIZE, 8);
+	ep->fen_genfcc.fcc_tbase = __pa(cep->tx_bd_base);
+
+	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
+	cep->cur_rx = cep->rx_bd_base;
+
+	ep->fen_genfcc.fcc_rstate = (CPMFCR_GBL | CPMFCR_EB) << 24;
+	ep->fen_genfcc.fcc_tstate = (CPMFCR_GBL | CPMFCR_EB) << 24;
+
+	/* Set maximum bytes per receive buffer.
+	 * It must be a multiple of 32.
+	 */
+	ep->fen_genfcc.fcc_mrblr = PKT_MAXBLR_SIZE;
+
+	/* Allocate space in the reserved FCC area of DPRAM for the
+	 * internal buffers.  No one uses this space (yet), so we
+	 * can do this.  Later, we will add resource management for
+	 * this area.
+	 */
+	mem_addr = CPM_FCC_SPECIAL_BASE + (fip->fc_fccnum * 128);
+	ep->fen_genfcc.fcc_riptr = mem_addr;
+	ep->fen_genfcc.fcc_tiptr = mem_addr+32;
+	ep->fen_padptr = mem_addr+64;
+	memset((char *)(&(immap->im_dprambase[(mem_addr+64)])), 0x88, 32);
+
+	ep->fen_genfcc.fcc_rbptr = 0;
+	ep->fen_genfcc.fcc_tbptr = 0;
+	ep->fen_genfcc.fcc_rcrc = 0;
+	ep->fen_genfcc.fcc_tcrc = 0;
+	ep->fen_genfcc.fcc_res1 = 0;
+	ep->fen_genfcc.fcc_res2 = 0;
+
+	ep->fen_camptr = 0;	/* CAM isn't used in this driver */
+
+	/* Set CRC preset and mask.
+	*/
+	ep->fen_cmask = 0xdebb20e3;
+	ep->fen_cpres = 0xffffffff;
+
+	ep->fen_crcec = 0;	/* CRC Error counter */
+	ep->fen_alec = 0;	/* alignment error counter */
+	ep->fen_disfc = 0;	/* discard frame counter */
+	ep->fen_retlim = 15;	/* Retry limit threshold */
+	ep->fen_pper = 0;	/* Normal persistence */
+
+	/* Clear hash filter tables.
+	*/
+	ep->fen_gaddrh = 0;
+	ep->fen_gaddrl = 0;
+	ep->fen_iaddrh = 0;
+	ep->fen_iaddrl = 0;
+
+	/* Clear the Out-of-sequence TxBD.
+	*/
+	ep->fen_tfcstat = 0;
+	ep->fen_tfclen = 0;
+	ep->fen_tfcptr = 0;
+
+	ep->fen_mflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
+	ep->fen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */
+
+	/* Set Ethernet station address.
+	 *
+	 * This is supplied in the board information structure, so we
+	 * copy that into the controller.
+	 */
+	eap = (unsigned char *)&(ep->fen_paddrh);
+#if defined(CONFIG_CPU86) || defined(CONFIG_TQM8260)
+	/*
+	 * TQM8260 and CPU86 use sequential MAC addresses
+	 */
+	*eap++ = rtdev->dev_addr[5] = bd->bi_enetaddr[5] + fip->fc_fccnum;
+	for (i=4; i>=0; i--) {
+		*eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i];
+	}
+#elif defined(CONFIG_PM826)
+	*eap++ = rtdev->dev_addr[5] = bd->bi_enetaddr[5] + fip->fc_fccnum + 1;
+	for (i=4; i>=0; i--) {
+		*eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i];
+	}
+#else
+	/*
+	 * So, far we have only been given one Ethernet address. We make
+	 * it unique by toggling selected bits in the upper byte of the
+	 * non-static part of the address (for the second and third ports,
+	 * the first port uses the address supplied as is).
+	 */
+	for (i=5; i>=0; i--) {
+		if (i == 3 && fip->fc_fccnum != 0) {
+			rtdev->dev_addr[i] = bd->bi_enetaddr[i];
+			rtdev->dev_addr[i] ^= (1 << (7 - fip->fc_fccnum));
+			*eap++ = dev->dev_addr[i];
+		}
+		else {
+			*eap++ = dev->dev_addr[i] = bd->bi_enetaddr[i];
+		}
+	}
+#endif
+
+	ep->fen_taddrh = 0;
+	ep->fen_taddrm = 0;
+	ep->fen_taddrl = 0;
+
+	ep->fen_maxd1 = PKT_MAXDMA_SIZE;	/* maximum DMA1 length */
+	ep->fen_maxd2 = PKT_MAXDMA_SIZE;	/* maximum DMA2 length */
+
+	/* Clear stat counters, in case we ever enable RMON.
+	*/
+	ep->fen_octc = 0;
+	ep->fen_colc = 0;
+	ep->fen_broc = 0;
+	ep->fen_mulc = 0;
+	ep->fen_uspc = 0;
+	ep->fen_frgc = 0;
+	ep->fen_ospc = 0;
+	ep->fen_jbrc = 0;
+	ep->fen_p64c = 0;
+	ep->fen_p65c = 0;
+	ep->fen_p128c = 0;
+	ep->fen_p256c = 0;
+	ep->fen_p512c = 0;
+	ep->fen_p1024c = 0;
+
+	ep->fen_rfthr = 0;	/* Suggested by manual */
+	ep->fen_rfcnt = 0;
+	ep->fen_cftype = 0;
+
+	/* Now allocate the host memory pages and initialize the
+	 * buffer descriptors.
+	 */
+	bdp = cep->tx_bd_base;
+	for (i=0; i<TX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		bdp->cbd_sc = 0;
+		bdp->cbd_datlen = 0;
+		bdp->cbd_bufaddr = 0;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	bdp = cep->rx_bd_base;
+	for (i=0; i<FCC_ENET_RX_PAGES; i++) {
+
+		/* Allocate a page.
+		*/
+		mem_addr = __get_free_page(GFP_KERNEL);
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		for (j=0; j<FCC_ENET_RX_FRPPG; j++) {
+			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
+			bdp->cbd_datlen = 0;
+			bdp->cbd_bufaddr = __pa(mem_addr);
+			mem_addr += FCC_ENET_RX_FRSIZE;
+			bdp++;
+		}
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* Let's re-initialize the channel now.  We have to do it later
+	 * than the manual describes because we have just now finished
+	 * the BD initialization.
+	 */
+	cp->cp_cpcr = mk_cr_cmd(fip->fc_cpmpage, fip->fc_cpmblock, 0x0c,
+			CPM_CR_INIT_TRX) | CPM_CR_FLG;
+	while (cp->cp_cpcr & CPM_CR_FLG);
+
+	cep->skb_cur = cep->skb_dirty = 0;
+}
+
+/* Let 'er rip.
+*/
+static void __init
+init_fcc_startup(fcc_info_t *fip, struct rtnet_device *rtdev)
+{
+	volatile fcc_t	*fccp;
+	struct fcc_enet_private *cep;
+
+	cep = (struct fcc_enet_private *)rtdev->priv;
+	fccp = cep->fccp;
+
+	fccp->fcc_fcce = 0xffff;	/* Clear any pending events */
+
+	/* Enable interrupts for transmit error, complete frame
+	 * received, and any transmit buffer we have also set the
+	 * interrupt flag.
+	 */
+	fccp->fcc_fccm = (FCC_ENET_TXE | FCC_ENET_RXF | FCC_ENET_TXB);
+
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	/* Install our interrupt handler.
+	*/
+	if (rtdm_irq_request(&cep->irq_handle, fip->fc_interrupt,
+			     fcc_enet_interrupt, 0, "rt_mpc8260_fcc_enet", rtdev))  {
+		printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq);
+		rtdev_free(rtdev);
+		return;
+	}
+
+
+#if defined (CONFIG_XENO_DRIVERS_NET_USE_MDIO) && !defined (CONFIG_PM826)
+# ifndef PHY_INTERRUPT
+#  error Want to use MDIO, but PHY_INTERRUPT not defined!
+# endif
+	if (request_8xxirq(PHY_INTERRUPT, mii_link_interrupt, 0,
+							"mii", dev) < 0)
+		printk("Can't get MII IRQ %d\n", PHY_INTERRUPT);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO, CONFIG_PM826 */
+
+	/* Set GFMR to enable Ethernet operating mode.
+	 */
+#ifndef CONFIG_EST8260
+	fccp->fcc_gfmr = (FCC_GFMR_TCI | FCC_GFMR_MODE_ENET);
+#else
+	fccp->fcc_gfmr = FCC_GFMR_MODE_ENET;
+#endif
+
+	/* Set sync/delimiters.
+	*/
+	fccp->fcc_fdsr = 0xd555;
+
+	/* Set protocol specific processing mode for Ethernet.
+	 * This has to be adjusted for Full Duplex operation after we can
+	 * determine how to detect that.
+	 */
+	fccp->fcc_fpsmr = FCC_PSMR_ENCRC;
+
+#ifdef CONFIG_ADS8260
+	/* Enable the PHY.
+	*/
+	ads_csr_addr[1] |= BCSR1_FETH_RST;	/* Remove reset */
+	ads_csr_addr[1] &= ~BCSR1_FETHIEN;	/* Enable */
+#endif
+
+#if defined(CONFIG_XENO_DRIVERS_NET_USE_MDIO) || defined(CONFIG_TQM8260)
+	/* start in full duplex mode, and negotiate speed */
+	fcc_restart (rtdev, 1);
+#else
+	/* start in half duplex mode */
+	fcc_restart (rtdev, 0);
+#endif
+}
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* MII command/status interface.
+ * I'm not going to describe all of the details.  You can find the
+ * protocol definition in many other places, including the data sheet
+ * of most PHY parts.
+ * I wonder what "they" were thinking (maybe weren't) when they leave
+ * the I2C in the CPM but I have to toggle these bits......
+ *
+ * Timing is a critical, especially on faster CPU's ...
+ */
+#define MDIO_DELAY	5
+
+#define FCC_MDIO(bit) do {					\
+	udelay(MDIO_DELAY);					\
+	if (bit)						\
+		IOP_DAT(io,fip->fc_port) |= fip->fc_mdio;	\
+	else							\
+		IOP_DAT(io,fip->fc_port) &= ~fip->fc_mdio;	\
+} while(0)
+
+#define FCC_MDC(bit) do {					\
+	udelay(MDIO_DELAY);					\
+	if (bit)						\
+		IOP_DAT(io,fip->fc_port) |= fip->fc_mdck;	\
+	else							\
+		IOP_DAT(io,fip->fc_port) &= ~fip->fc_mdck;	\
+} while(0)
+
+static uint
+mii_send_receive(fcc_info_t *fip, uint cmd)
+{
+	uint		retval;
+	int		read_op, i, off;
+	volatile	immap_t		*immap;
+	volatile	iop8260_t	*io;
+
+	immap = (immap_t *)IMAP_ADDR;
+	io = &immap->im_ioport;
+
+	IOP_DIR(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck);
+
+	read_op = ((cmd & 0xf0000000) == 0x60000000);
+
+	/* Write preamble
+	 */
+	for (i = 0; i < 32; i++)
+	{
+		FCC_MDC(0);
+		FCC_MDIO(1);
+		FCC_MDC(1);
+	}
+
+	/* Write data
+	 */
+	for (i = 0, off = 31; i < (read_op ? 14 : 32); i++, --off)
+	{
+		FCC_MDC(0);
+		FCC_MDIO((cmd >> off) & 0x00000001);
+		FCC_MDC(1);
+	}
+
+	retval = cmd;
+
+	if (read_op)
+	{
+		retval >>= 16;
+
+		FCC_MDC(0);
+		IOP_DIR(io,fip->fc_port) &= ~fip->fc_mdio;
+		FCC_MDC(1);
+		FCC_MDC(0);
+
+		for (i = 0, off = 15; i < 16; i++, off--)
+		{
+			FCC_MDC(1);
+			udelay(MDIO_DELAY);
+			retval <<= 1;
+			if (IOP_DAT(io,fip->fc_port) & fip->fc_mdio)
+				retval++;
+			FCC_MDC(0);
+		}
+	}
+
+	IOP_DIR(io,fip->fc_port) |= (fip->fc_mdio | fip->fc_mdck);
+
+	for (i = 0; i < 32; i++)
+	{
+		FCC_MDC(0);
+		FCC_MDIO(1);
+		FCC_MDC(1);
+	}
+
+	return retval;
+}
+
+static void
+fcc_stop(struct net_device *dev)
+{
+	volatile fcc_t	*fccp;
+	struct fcc_enet_private	*fcp;
+
+	fcp = (struct fcc_enet_private *)(dev->priv);
+	fccp = fcp->fccp;
+
+	/* Disable transmit/receive */
+	fccp->fcc_gfmr &= ~(FCC_GFMR_ENR | FCC_GFMR_ENT);
+}
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+static void
+fcc_restart(struct rtnet_device *rtdev, int duplex)
+{
+	volatile fcc_t	*fccp;
+	struct fcc_enet_private	*fcp;
+
+	fcp = (struct fcc_enet_private *)rtdev->priv;
+	fccp = fcp->fccp;
+
+	if (duplex)
+		fccp->fcc_fpsmr |= (FCC_PSMR_FDE | FCC_PSMR_LPB);
+	else
+		fccp->fcc_fpsmr &= ~(FCC_PSMR_FDE | FCC_PSMR_LPB);
+
+	/* Enable transmit/receive */
+	fccp->fcc_gfmr |= FCC_GFMR_ENR | FCC_GFMR_ENT;
+}
+
+static int
+fcc_enet_open(struct rtnet_device *rtdev)
+{
+	struct fcc_enet_private *fep = rtdev->priv;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	fep->sequence_done = 0;
+	fep->link = 0;
+
+	if (fep->phy) {
+		mii_do_cmd(dev, fep->phy->ack_int);
+		mii_do_cmd(dev, fep->phy->config);
+		mii_do_cmd(dev, phy_cmd_config);  /* display configuration */
+		while(!fep->sequence_done)
+			schedule();
+
+		mii_do_cmd(dev, fep->phy->startup);
+#ifdef	CONFIG_PM826
+		/* Read the autonegotiation results */
+		mii_do_cmd(dev, fep->phy->ack_int);
+		mii_do_cmd(dev, phy_cmd_relink);
+#endif	/* CONFIG_PM826 */
+		rtnetif_start_queue(rtdev);
+		return 0;		/* Success */
+	}
+	return -ENODEV;		/* No PHY we understand */
+#else
+	fep->link = 1;
+	rtnetif_start_queue(rtdev);
+	return 0;					/* Always succeed */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+}
+
+static void __exit fcc_enet_cleanup(void)
+{
+	struct rtnet_device *rtdev;
+	volatile immap_t *immap = (immap_t *)IMAP_ADDR;
+	struct fcc_enet_private *cep;
+	fcc_info_t *fip;
+	int np;
+
+	for (np = 0, fip = fcc_ports;
+	     np < sizeof(fcc_ports) / sizeof(fcc_info_t);
+	     np++, fip++) {
+
+		/* Skip FCC ports not used for RTnet. */
+		if (np != rtnet_fcc - 1) continue;
+
+		rtdev = fip->rtdev;
+		cep = (struct fcc_enet_private *)rtdev->priv;
+
+		rtdm_irq_disable(&cep->irq_handle);
+		rtdm_irq_free(&cep->irq_handle);
+
+		init_fcc_shutdown(fip, cep, immap);
+		printk("%s: cleanup incomplete (m8260_cpm_dpfree does not exit)!\n",
+		       rtdev->name);
+		rt_stack_disconnect(rtdev);
+		rt_unregister_rtnetdev(rtdev);
+		rt_rtdev_disconnect(rtdev);
+
+		printk("%s: unloaded\n", rtdev->name);
+		rtdev_free(rtdev);
+		fip++;
+	}
+}
+
+module_init(fec_enet_init);
+module_exit(fcc_enet_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c
new file mode 100644
index 0000000..7fb0fcf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_enet.c
@@ -0,0 +1,1073 @@
+/*
+ * BK Id: SCCS/s.enet.c 1.24 01/19/02 03:07:14 dan
+ */
+/*
+ * Ethernet driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * I copied the basic skeleton from the lance driver, because I did not
+ * know how to write the Linux driver, but I did know how the LANCE worked.
+ *
+ * This version of the driver is somewhat selectable for the different
+ * processor/board combinations.  It works for the boards I know about
+ * now, and should be easily modified to include others.  Some of the
+ * configuration information is contained in <asm/commproc.h> and the
+ * remainder is here.
+ *
+ * Buffer descriptors are kept in the CPM dual port RAM, and the frame
+ * buffers are in the host memory.
+ *
+ * Right now, I am very watseful with the buffers.  I allocate memory
+ * pages and then divide them into 2K frame buffers.  This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Ported to RTnet.
+ * Copyright (c) 2003 Wolfgang Grandegger (wg@denx.de)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/uaccess.h>
+
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/bitops.h>
+#include <asm/irq.h>
+#include <asm/commproc.h>
+
+#include <rtnet_port.h>
+
+MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTnet MPC8xx SCC Ethernet driver");
+MODULE_LICENSE("GPL");
+
+static unsigned int rx_pool_size =  0;
+MODULE_PARM(rx_pool_size, "i");
+MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size");
+
+static unsigned int rtnet_scc = 1; /* SCC1 */
+MODULE_PARM(rtnet_scc, "i");
+MODULE_PARM_DESC(rtnet_scc, "SCCx port for RTnet, x=1..3 (default=1)");
+
+#define RT_DEBUG(fmt,args...)
+
+/*
+ *				Theory of Operation
+ *
+ * The MPC8xx CPM performs the Ethernet processing on SCC1.  It can use
+ * an aribtrary number of buffers on byte boundaries, but must have at
+ * least two receive buffers to prevent constant overrun conditions.
+ *
+ * The buffer descriptors are allocated from the CPM dual port memory
+ * with the data buffers allocated from host memory, just like all other
+ * serial communication protocols.  The host memory buffers are allocated
+ * from the free page pool, and then divided into smaller receive and
+ * transmit buffers.  The size of the buffers should be a power of two,
+ * since that nicely divides the page.  This creates a ring buffer
+ * structure similar to the LANCE and other controllers.
+ *
+ * Like the LANCE driver:
+ * The driver runs as two independent, single-threaded flows of control.  One
+ * is the send-packet routine, which enforces single-threaded use by the
+ * cep->tx_busy flag.  The other thread is the interrupt handler, which is
+ * single threaded by the hardware and other software.
+ *
+ * The send packet thread has partial control over the Tx ring and the
+ * 'cep->tx_busy' flag.  It sets the tx_busy flag whenever it's queuing a Tx
+ * packet. If the next queue slot is empty, it clears the tx_busy flag when
+ * finished otherwise it sets the 'lp->tx_full' flag.
+ *
+ * The MBX has a control register external to the MPC8xx that has some
+ * control of the Ethernet interface.  Information is in the manual for
+ * your board.
+ *
+ * The RPX boards have an external control/status register.  Consult the
+ * programming documents for details unique to your board.
+ *
+ * For the TQM8xx(L) modules, there is no control register interface.
+ * All functions are directly controlled using I/O pins.  See <asm/commproc.h>.
+ */
+
+/* The transmitter timeout
+ */
+#define TX_TIMEOUT	(2*HZ)
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it is best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+#define CPM_ENET_RX_PAGES	4
+#define CPM_ENET_RX_FRSIZE	2048
+#define CPM_ENET_RX_FRPPG	(PAGE_SIZE / CPM_ENET_RX_FRSIZE)
+#define RX_RING_SIZE		(CPM_ENET_RX_FRPPG * CPM_ENET_RX_PAGES)
+#define TX_RING_SIZE		8	/* Must be power of two */
+#define TX_RING_MOD_MASK	7	/*   for this to work */
+
+/* The CPM stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE		1518
+#define PKT_MINBUF_SIZE		64
+#define PKT_MAXBLR_SIZE		1520
+
+/* The CPM buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct scc_enet_private {
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	ushort	skb_cur;
+	ushort	skb_dirty;
+
+	/* CPM dual port RAM relative addresses.
+	*/
+	cbd_t	*rx_bd_base;		/* Address of Rx and Tx buffers. */
+	cbd_t	*tx_bd_base;
+	cbd_t	*cur_rx, *cur_tx;		/* The next free ring entry */
+	cbd_t	*dirty_tx;	/* The ring entries to be free()ed. */
+	scc_t	*sccp;
+
+	/* Virtual addresses for the receive buffers because we can't
+	 * do a __va() on them anymore.
+	 */
+	unsigned char *rx_vaddr[RX_RING_SIZE];
+	struct	net_device_stats stats;
+	uint	tx_full;
+	rtdm_lock_t lock;
+	rtdm_irq_t irq_handle;
+};
+
+static int scc_enet_open(struct rtnet_device *rtdev);
+static int scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static int scc_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int scc_enet_interrupt(rtdm_irq_t *irq_handle);
+static int scc_enet_close(struct rtnet_device *rtdev);
+
+static struct net_device_stats *scc_enet_get_stats(struct rtnet_device *rtdev);
+#ifdef ORIGINAL_VERSION
+static void set_multicast_list(struct net_device *dev);
+#endif
+
+#ifndef ORIGINAL_VERSION
+static struct rtnet_device *rtdev_root = NULL;
+#endif
+
+/* Typically, 860(T) boards use SCC1 for Ethernet, and other 8xx boards
+ * use SCC2. Some even may use SCC3.
+ * This is easily extended if necessary.
+ * These values are set when the driver is initialized.
+ */
+static int CPM_CR_ENET;
+static int PROFF_ENET;
+static int SCC_ENET;
+static int CPMVEC_ENET;
+
+static int
+scc_enet_open(struct rtnet_device *rtdev)
+{
+	/* I should reset the ring buffers here, but I don't yet know
+	 * a simple way to do that.
+	 */
+	rtnetif_start_queue(rtdev);
+
+	return 0;					/* Always succeed */
+}
+
+static int
+scc_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
+	volatile cbd_t	*bdp;
+	rtdm_lockctx_t context;
+
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	/* Fill in a Tx ring entry */
+	bdp = cep->cur_tx;
+
+#ifndef final_version
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		/* Ooops.  All transmit buffers are full.  Bail out.
+		 * This should not happen, since cep->tx_busy should be set.
+		 */
+		rtdm_printk("%s: tx queue full!.\n", rtdev->name);
+		return 1;
+	}
+#endif
+
+	/* Clear all of the status flags.
+	 */
+	bdp->cbd_sc &= ~BD_ENET_TX_STATS;
+
+	/* If the frame is short, tell CPM to pad it.
+	*/
+	if (skb->len <= ETH_ZLEN)
+		bdp->cbd_sc |= BD_ENET_TX_PAD;
+	else
+		bdp->cbd_sc &= ~BD_ENET_TX_PAD;
+
+	/* Set buffer length and buffer pointer.
+	*/
+	bdp->cbd_datlen = skb->len;
+	bdp->cbd_bufaddr = __pa(skb->data);
+
+	/* Save skb pointer.
+	*/
+	cep->tx_skbuff[cep->skb_cur] = skb;
+
+	cep->stats.tx_bytes += skb->len;
+	cep->skb_cur = (cep->skb_cur+1) & TX_RING_MOD_MASK;
+
+	/* Prevent interrupts from changing the Tx ring from underneath us. */
+	// *** RTnet ***
+	rtdm_lock_get_irqsave(&cep->lock, context);
+
+	/* Get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	/* Push the data cache so the CPM does not get stale memory
+	 * data.
+	 */
+	flush_dcache_range((unsigned long)(skb->data),
+			   (unsigned long)(skb->data + skb->len));
+
+
+	/* Send it on its way.  Tell CPM its ready, interrupt when done,
+	 * its the last BD of the frame, and to put the CRC on the end.
+	 */
+	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR | BD_ENET_TX_LAST | BD_ENET_TX_TC);
+
+	/* If this was the last BD in the ring, start at the beginning again.
+	*/
+	if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+		bdp = cep->tx_bd_base;
+	else
+		bdp++;
+
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		rtnetif_stop_queue(rtdev);
+		cep->tx_full = 1;
+	}
+
+	cep->cur_tx = (cbd_t *)bdp;
+
+	// *** RTnet ***
+	rtdm_lock_put_irqrestore(&cep->lock, context);
+
+	return 0;
+}
+
+#ifdef ORIGINAL_VERSION
+static void
+scc_enet_timeout(struct net_device *dev)
+{
+	struct scc_enet_private *cep = (struct scc_enet_private *)dev->priv;
+
+	printk("%s: transmit timed out.\n", dev->name);
+	cep->stats.tx_errors++;
+#ifndef final_version
+	{
+		int	i;
+		cbd_t	*bdp;
+		printk(" Ring data dump: cur_tx %p%s cur_rx %p.\n",
+		       cep->cur_tx, cep->tx_full ? " (full)" : "",
+		       cep->cur_rx);
+		bdp = cep->tx_bd_base;
+		for (i = 0 ; i < TX_RING_SIZE; i++, bdp++)
+			printk("%04x %04x %08x\n",
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+		bdp = cep->rx_bd_base;
+		for (i = 0 ; i < RX_RING_SIZE; i++, bdp++)
+			printk("%04x %04x %08x\n",
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+	}
+#endif
+	if (!cep->tx_full)
+		netif_wake_queue(dev);
+}
+#endif /* ORIGINAL_VERSION */
+
+/* The interrupt handler.
+ * This is called from the CPM handler, not the MPC core interrupt.
+ */
+static int scc_enet_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	struct	scc_enet_private *cep;
+	volatile cbd_t	*bdp;
+	ushort	int_events;
+	int	must_restart;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+
+	cep = (struct scc_enet_private *)rtdev->priv;
+
+	/* Get the interrupt events that caused us to be here.
+	*/
+	int_events = cep->sccp->scc_scce;
+	cep->sccp->scc_scce = int_events;
+	must_restart = 0;
+
+	/* Handle receive event in its own function.
+	*/
+	if (int_events & SCCE_ENET_RXF) {
+		scc_enet_rx(rtdev, &packets, &time_stamp);
+	}
+
+	/* Check for a transmit error.  The manual is a little unclear
+	 * about this, so the debug code until I get it figured out.  It
+	 * appears that if TXE is set, then TXB is not set.  However,
+	 * if carrier sense is lost during frame transmission, the TXE
+	 * bit is set, "and continues the buffer transmission normally."
+	 * I don't know if "normally" implies TXB is set when the buffer
+	 * descriptor is closed.....trial and error :-).
+	 */
+
+	/* Transmit OK, or non-fatal error.  Update the buffer descriptors.
+	*/
+	if (int_events & (SCCE_ENET_TXE | SCCE_ENET_TXB)) {
+	    rtdm_lock_get(&cep->lock);
+	    bdp = cep->dirty_tx;
+	    while ((bdp->cbd_sc&BD_ENET_TX_READY)==0) {
+		RT_DEBUG(__FUNCTION__": Tx ok\n");
+		if ((bdp==cep->cur_tx) && (cep->tx_full == 0))
+		    break;
+
+		if (bdp->cbd_sc & BD_ENET_TX_HB)	/* No heartbeat */
+			cep->stats.tx_heartbeat_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_LC)	/* Late collision */
+			cep->stats.tx_window_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_RL)	/* Retrans limit */
+			cep->stats.tx_aborted_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_UN)	/* Underrun */
+			cep->stats.tx_fifo_errors++;
+		if (bdp->cbd_sc & BD_ENET_TX_CSL)	/* Carrier lost */
+			cep->stats.tx_carrier_errors++;
+
+
+		/* No heartbeat or Lost carrier are not really bad errors.
+		 * The others require a restart transmit command.
+		 */
+		if (bdp->cbd_sc &
+		    (BD_ENET_TX_LC | BD_ENET_TX_RL | BD_ENET_TX_UN)) {
+			must_restart = 1;
+			cep->stats.tx_errors++;
+		}
+
+		cep->stats.tx_packets++;
+
+		/* Deferred means some collisions occurred during transmit,
+		 * but we eventually sent the packet OK.
+		 */
+		if (bdp->cbd_sc & BD_ENET_TX_DEF)
+			cep->stats.collisions++;
+
+		/* Free the sk buffer associated with this last transmit.
+		*/
+		dev_kfree_rtskb(cep->tx_skbuff[cep->skb_dirty]);
+		cep->skb_dirty = (cep->skb_dirty + 1) & TX_RING_MOD_MASK;
+
+		/* Update pointer to next buffer descriptor to be transmitted.
+		*/
+		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+			bdp = cep->tx_bd_base;
+		else
+			bdp++;
+
+		/* I don't know if we can be held off from processing these
+		 * interrupts for more than one frame time.  I really hope
+		 * not.  In such a case, we would now want to check the
+		 * currently available BD (cur_tx) and determine if any
+		 * buffers between the dirty_tx and cur_tx have also been
+		 * sent.  We would want to process anything in between that
+		 * does not have BD_ENET_TX_READY set.
+		 */
+
+		/* Since we have freed up a buffer, the ring is no longer
+		 * full.
+		 */
+		if (cep->tx_full) {
+			cep->tx_full = 0;
+			if (rtnetif_queue_stopped(rtdev))
+				rtnetif_wake_queue(rtdev);
+		}
+
+		cep->dirty_tx = (cbd_t *)bdp;
+	    }
+
+	    if (must_restart) {
+		volatile cpm8xx_t *cp;
+
+		/* Some transmit errors cause the transmitter to shut
+		 * down.  We now issue a restart transmit.  Since the
+		 * errors close the BD and update the pointers, the restart
+		 * _should_ pick up without having to reset any of our
+		 * pointers either.
+		 */
+		cp = cpmp;
+		cp->cp_cpcr =
+		    mk_cr_cmd(CPM_CR_ENET, CPM_CR_RESTART_TX) | CPM_CR_FLG;
+		while (cp->cp_cpcr & CPM_CR_FLG);
+	    }
+	    rtdm_lock_put(&cep->lock);
+	}
+
+	/* Check for receive busy, i.e. packets coming but no place to
+	 * put them.  This "can't happen" because the receive interrupt
+	 * is tossing previous frames.
+	 */
+	if (int_events & SCCE_ENET_BSY) {
+		cep->stats.rx_dropped++;
+		rtdm_printk("CPM ENET: BSY can't happen.\n");
+	}
+
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static int
+scc_enet_rx(struct rtnet_device *rtdev, int* packets, nanosecs_abs_t *time_stamp)
+{
+	struct	scc_enet_private *cep;
+	volatile cbd_t	*bdp;
+	ushort	pkt_len;
+	struct	rtskb *skb;
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	cep = (struct scc_enet_private *)rtdev->priv;
+
+	/* First, grab all of the stats for the incoming packet.
+	 * These get messed up if we get called due to a busy condition.
+	 */
+	bdp = cep->cur_rx;
+
+    for (;;) {
+
+	if (bdp->cbd_sc & BD_ENET_RX_EMPTY)
+		break;
+
+#ifndef final_version
+	/* Since we have allocated space to hold a complete frame, both
+	 * the first and last indicators should be set.
+	 */
+	if ((bdp->cbd_sc & (BD_ENET_RX_FIRST | BD_ENET_RX_LAST)) !=
+		(BD_ENET_RX_FIRST | BD_ENET_RX_LAST))
+			rtdm_printk("CPM ENET: rcv is not first+last\n");
+#endif
+
+	/* Frame too long or too short.
+	*/
+	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH))
+		cep->stats.rx_length_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
+		cep->stats.rx_frame_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
+		cep->stats.rx_crc_errors++;
+	if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
+		cep->stats.rx_crc_errors++;
+
+	/* Report late collisions as a frame error.
+	 * On this error, the BD is closed, but we don't know what we
+	 * have in the buffer.  So, just drop this frame on the floor.
+	 */
+	if (bdp->cbd_sc & BD_ENET_RX_CL) {
+		cep->stats.rx_frame_errors++;
+	}
+	else {
+
+		/* Process the incoming frame.
+		*/
+		cep->stats.rx_packets++;
+		pkt_len = bdp->cbd_datlen;
+		cep->stats.rx_bytes += pkt_len;
+
+		/* This does 16 byte alignment, much more than we need.
+		 * The packet length includes FCS, but we don't want to
+		 * include that when passing upstream as it messes up
+		 * bridging applications.
+		 */
+		skb = rtnetdev_alloc_rtskb(rtdev, pkt_len-4);
+		if (skb == NULL) {
+			rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name);
+			cep->stats.rx_dropped++;
+		}
+		else {
+			rtskb_put(skb,pkt_len-4); /* Make room */
+			memcpy(skb->data,
+			       cep->rx_vaddr[bdp - cep->rx_bd_base],
+			       pkt_len-4);
+			skb->protocol=rt_eth_type_trans(skb,rtdev);
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+			(*packets)++;
+		}
+	}
+
+	/* Clear the status flags for this buffer.
+	*/
+	bdp->cbd_sc &= ~BD_ENET_RX_STATS;
+
+	/* Mark the buffer empty.
+	*/
+	bdp->cbd_sc |= BD_ENET_RX_EMPTY;
+
+	/* Update BD pointer to next entry.
+	*/
+	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
+		bdp = cep->rx_bd_base;
+	else
+		bdp++;
+
+    }
+	cep->cur_rx = (cbd_t *)bdp;
+
+	return 0;
+}
+
+static int
+scc_enet_close(struct rtnet_device *rtdev)
+{
+	/* Don't know what to do yet.
+	*/
+	rtnetif_stop_queue(rtdev);
+
+	return 0;
+}
+
+static struct net_device_stats *scc_enet_get_stats(struct rtnet_device *rtdev)
+{
+	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
+
+	return &cep->stats;
+}
+
+#ifdef ORIGINAL_VERSION
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering.  Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not.  I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+	struct	scc_enet_private *cep;
+	struct	dev_mc_list *dmi;
+	u_char	*mcptr, *tdptr;
+	volatile scc_enet_t *ep;
+	int	i, j;
+	cep = (struct scc_enet_private *)dev->priv;
+
+	/* Get pointer to SCC area in parameter RAM.
+	*/
+	ep = (scc_enet_t *)dev->base_addr;
+
+	if (dev->flags&IFF_PROMISC) {
+
+		/* Log any net taps. */
+		printk("%s: Promiscuous mode enabled.\n", dev->name);
+		cep->sccp->scc_pmsr |= SCC_PMSR_PRO;
+	} else {
+
+		cep->sccp->scc_pmsr &= ~SCC_PMSR_PRO;
+
+		if (dev->flags & IFF_ALLMULTI) {
+			/* Catch all multicast addresses, so set the
+			 * filter to all 1's.
+			 */
+			ep->sen_gaddr1 = 0xffff;
+			ep->sen_gaddr2 = 0xffff;
+			ep->sen_gaddr3 = 0xffff;
+			ep->sen_gaddr4 = 0xffff;
+		}
+		else {
+			/* Clear filter and add the addresses in the list.
+			*/
+			ep->sen_gaddr1 = 0;
+			ep->sen_gaddr2 = 0;
+			ep->sen_gaddr3 = 0;
+			ep->sen_gaddr4 = 0;
+
+			dmi = dev->mc_list;
+
+			for (i=0; i<dev->mc_count; i++) {
+
+				/* Only support group multicast for now.
+				*/
+				if (!(dmi->dmi_addr[0] & 1))
+					continue;
+
+				/* The address in dmi_addr is LSB first,
+				 * and taddr is MSB first.  We have to
+				 * copy bytes MSB first from dmi_addr.
+				 */
+				mcptr = (u_char *)dmi->dmi_addr + 5;
+				tdptr = (u_char *)&ep->sen_taddrh;
+				for (j=0; j<6; j++)
+					*tdptr++ = *mcptr--;
+
+				/* Ask CPM to run CRC and set bit in
+				 * filter mask.
+				 */
+				cpmp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_SET_GADDR) | CPM_CR_FLG;
+				/* this delay is necessary here -- Cort */
+				udelay(10);
+				while (cpmp->cp_cpcr & CPM_CR_FLG);
+			}
+		}
+	}
+}
+#endif /* ORIGINAL_VERSION */
+
+/* Initialize the CPM Ethernet on SCC.  If EPPC-Bug loaded us, or performed
+ * some other network I/O, a whole bunch of this has already been set up.
+ * It is no big deal if we do it again, we just have to disable the
+ * transmit and receive to make sure we don't catch the CPM with some
+ * inconsistent control information.
+ */
+int __init scc_enet_init(void)
+{
+	struct rtnet_device *rtdev = NULL;
+	struct scc_enet_private *cep;
+	int i, j, k;
+	unsigned char	*eap, *ba;
+	dma_addr_t	mem_addr;
+	bd_t		*bd;
+	volatile	cbd_t		*bdp;
+	volatile	cpm8xx_t	*cp;
+	volatile	scc_t		*sccp;
+	volatile	scc_enet_t	*ep;
+	volatile	immap_t		*immap;
+
+	cp = cpmp;	/* Get pointer to Communication Processor */
+
+	immap = (immap_t *)(mfspr(IMMR) & 0xFFFF0000);	/* and to internal registers */
+
+	bd = (bd_t *)__res;
+
+	/* Configure the SCC parameters (this has formerly be done
+	 * by macro definitions).
+	 */
+	switch (rtnet_scc) {
+	case 3:
+		CPM_CR_ENET = CPM_CR_CH_SCC3;
+		PROFF_ENET  = PROFF_SCC3;
+		SCC_ENET    = 2;		/* Index, not number! */
+		CPMVEC_ENET = CPMVEC_SCC3;
+		break;
+	case 2:
+		CPM_CR_ENET = CPM_CR_CH_SCC2;
+		PROFF_ENET  = PROFF_SCC2;
+		SCC_ENET    = 1;		/* Index, not number! */
+		CPMVEC_ENET = CPMVEC_SCC2;
+		break;
+	case 1:
+		CPM_CR_ENET = CPM_CR_CH_SCC1;
+		PROFF_ENET  = PROFF_SCC1;
+		SCC_ENET    = 0;		/* Index, not number! */
+		CPMVEC_ENET = CPMVEC_SCC1;
+		break;
+	default:
+		printk(KERN_ERR "enet: SCC%d doesn't exit (check rtnet_scc)\n", rtnet_scc);
+		return -1;
+	}
+
+	/* Allocate some private information and create an Ethernet device instance.
+	*/
+	if (!rx_pool_size)
+		rx_pool_size = RX_RING_SIZE * 2;
+	rtdev = rtdev_root = rt_alloc_etherdev(sizeof(struct scc_enet_private),
+					rx_pool_size + TX_RING_SIZE);
+	if (rtdev == NULL) {
+		printk(KERN_ERR "enet: Could not allocate ethernet device.\n");
+		return -1;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+
+	cep = (struct scc_enet_private *)rtdev->priv;
+	rtdm_lock_init(&cep->lock);
+
+	/* Get pointer to SCC area in parameter RAM.
+	*/
+	ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]);
+
+	/* And another to the SCC register area.
+	*/
+	sccp = (volatile scc_t *)(&cp->cp_scc[SCC_ENET]);
+	cep->sccp = (scc_t *)sccp;		/* Keep the pointer handy */
+
+	/* Disable receive and transmit in case EPPC-Bug started it.
+	*/
+	sccp->scc_gsmrl &= ~(SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+	/* Cookbook style from the MPC860 manual.....
+	 * Not all of this is necessary if EPPC-Bug has initialized
+	 * the network.
+	 * So far we are lucky, all board configurations use the same
+	 * pins, or at least the same I/O Port for these functions.....
+	 * It can't last though......
+	 */
+
+#if (defined(PA_ENET_RXD) && defined(PA_ENET_TXD))
+	/* Configure port A pins for Txd and Rxd.
+	*/
+	immap->im_ioport.iop_papar |=  (PA_ENET_RXD | PA_ENET_TXD);
+	immap->im_ioport.iop_padir &= ~(PA_ENET_RXD | PA_ENET_TXD);
+	immap->im_ioport.iop_paodr &=                ~PA_ENET_TXD;
+#elif (defined(PB_ENET_RXD) && defined(PB_ENET_TXD))
+	/* Configure port B pins for Txd and Rxd.
+	*/
+	immap->im_cpm.cp_pbpar |=  (PB_ENET_RXD | PB_ENET_TXD);
+	immap->im_cpm.cp_pbdir &= ~(PB_ENET_RXD | PB_ENET_TXD);
+	immap->im_cpm.cp_pbodr &=		 ~PB_ENET_TXD;
+#else
+#error Exactly ONE pair of PA_ENET_[RT]XD, PB_ENET_[RT]XD must be defined
+#endif
+
+#if defined(PC_ENET_LBK)
+	/* Configure port C pins to disable External Loopback
+	 */
+	immap->im_ioport.iop_pcpar &= ~PC_ENET_LBK;
+	immap->im_ioport.iop_pcdir |=  PC_ENET_LBK;
+	immap->im_ioport.iop_pcso  &= ~PC_ENET_LBK;
+	immap->im_ioport.iop_pcdat &= ~PC_ENET_LBK;	/* Disable Loopback */
+#endif	/* PC_ENET_LBK */
+
+	/* Configure port C pins to enable CLSN and RENA.
+	*/
+	immap->im_ioport.iop_pcpar &= ~(PC_ENET_CLSN | PC_ENET_RENA);
+	immap->im_ioport.iop_pcdir &= ~(PC_ENET_CLSN | PC_ENET_RENA);
+	immap->im_ioport.iop_pcso  |=  (PC_ENET_CLSN | PC_ENET_RENA);
+
+	/* Configure port A for TCLK and RCLK.
+	*/
+	immap->im_ioport.iop_papar |=  (PA_ENET_TCLK | PA_ENET_RCLK);
+	immap->im_ioport.iop_padir &= ~(PA_ENET_TCLK | PA_ENET_RCLK);
+
+	/* Configure Serial Interface clock routing.
+	 * First, clear all SCC bits to zero, then set the ones we want.
+	 */
+	cp->cp_sicr &= ~SICR_ENET_MASK;
+	cp->cp_sicr |=  SICR_ENET_CLKRT;
+
+	/* Manual says set SDDR, but I can't find anything with that
+	 * name.  I think it is a misprint, and should be SDCR.  This
+	 * has already been set by the communication processor initialization.
+	 */
+
+	/* Allocate space for the buffer descriptors in the DP ram.
+	 * These are relative offsets in the DP ram address space.
+	 * Initialize base addresses for the buffer descriptors.
+	 */
+	i = m8xx_cpm_dpalloc(sizeof(cbd_t) * RX_RING_SIZE);
+	ep->sen_genscc.scc_rbase = i;
+	cep->rx_bd_base = (cbd_t *)&cp->cp_dpmem[i];
+
+	i = m8xx_cpm_dpalloc(sizeof(cbd_t) * TX_RING_SIZE);
+	ep->sen_genscc.scc_tbase = i;
+	cep->tx_bd_base = (cbd_t *)&cp->cp_dpmem[i];
+
+	cep->dirty_tx = cep->cur_tx = cep->tx_bd_base;
+	cep->cur_rx = cep->rx_bd_base;
+
+	/* Issue init Rx BD command for SCC.
+	 * Manual says to perform an Init Rx parameters here.  We have
+	 * to perform both Rx and Tx because the SCC may have been
+	 * already running.
+	 * In addition, we have to do it later because we don't yet have
+	 * all of the BD control/status set properly.
+	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_RX) | CPM_CR_FLG;
+	while (cp->cp_cpcr & CPM_CR_FLG);
+	 */
+
+	/* Initialize function code registers for big-endian.
+	*/
+	ep->sen_genscc.scc_rfcr = SCC_EB;
+	ep->sen_genscc.scc_tfcr = SCC_EB;
+
+	/* Set maximum bytes per receive buffer.
+	 * This appears to be an Ethernet frame size, not the buffer
+	 * fragment size.  It must be a multiple of four.
+	 */
+	ep->sen_genscc.scc_mrblr = PKT_MAXBLR_SIZE;
+
+	/* Set CRC preset and mask.
+	*/
+	ep->sen_cpres = 0xffffffff;
+	ep->sen_cmask = 0xdebb20e3;
+
+	ep->sen_crcec = 0;	/* CRC Error counter */
+	ep->sen_alec = 0;	/* alignment error counter */
+	ep->sen_disfc = 0;	/* discard frame counter */
+
+	ep->sen_pads = 0x8888;	/* Tx short frame pad character */
+	ep->sen_retlim = 15;	/* Retry limit threshold */
+
+	ep->sen_maxflr = PKT_MAXBUF_SIZE;   /* maximum frame length register */
+	ep->sen_minflr = PKT_MINBUF_SIZE;  /* minimum frame length register */
+
+	ep->sen_maxd1 = PKT_MAXBLR_SIZE;	/* maximum DMA1 length */
+	ep->sen_maxd2 = PKT_MAXBLR_SIZE;	/* maximum DMA2 length */
+
+	/* Clear hash tables.
+	*/
+	ep->sen_gaddr1 = 0;
+	ep->sen_gaddr2 = 0;
+	ep->sen_gaddr3 = 0;
+	ep->sen_gaddr4 = 0;
+	ep->sen_iaddr1 = 0;
+	ep->sen_iaddr2 = 0;
+	ep->sen_iaddr3 = 0;
+	ep->sen_iaddr4 = 0;
+
+	/* Set Ethernet station address.
+	 */
+	eap = (unsigned char *)&(ep->sen_paddrh);
+#ifdef CONFIG_FEC_ENET
+	/* We need a second MAC address if FEC is used by Linux */
+	for (i=5; i>=0; i--)
+		*eap++ = rtdev->dev_addr[i] = (bd->bi_enetaddr[i] |
+					     (i==3 ? 0x80 : 0));
+#else
+	for (i=5; i>=0; i--)
+		*eap++ = rtdev->dev_addr[i] = bd->bi_enetaddr[i];
+#endif
+
+	ep->sen_pper = 0;	/* 'cause the book says so */
+	ep->sen_taddrl = 0;	/* temp address (LSB) */
+	ep->sen_taddrm = 0;
+	ep->sen_taddrh = 0;	/* temp address (MSB) */
+
+	/* Now allocate the host memory pages and initialize the
+	 * buffer descriptors.
+	 */
+	bdp = cep->tx_bd_base;
+	for (i=0; i<TX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		bdp->cbd_sc = 0;
+		bdp->cbd_bufaddr = 0;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	bdp = cep->rx_bd_base;
+	k = 0;
+	for (i=0; i<CPM_ENET_RX_PAGES; i++) {
+
+		/* Allocate a page.
+		*/
+		ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, &mem_addr);
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		for (j=0; j<CPM_ENET_RX_FRPPG; j++) {
+			bdp->cbd_sc = BD_ENET_RX_EMPTY | BD_ENET_RX_INTR;
+			bdp->cbd_bufaddr = mem_addr;
+			cep->rx_vaddr[k++] = ba;
+			mem_addr += CPM_ENET_RX_FRSIZE;
+			ba += CPM_ENET_RX_FRSIZE;
+			bdp++;
+		}
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* Let's re-initialize the channel now.  We have to do it later
+	 * than the manual describes because we have just now finished
+	 * the BD initialization.
+	 */
+	cp->cp_cpcr = mk_cr_cmd(CPM_CR_ENET, CPM_CR_INIT_TRX) | CPM_CR_FLG;
+	while (cp->cp_cpcr & CPM_CR_FLG);
+
+	cep->skb_cur = cep->skb_dirty = 0;
+
+	sccp->scc_scce = 0xffff;	/* Clear any pending events */
+
+	/* Enable interrupts for transmit error, complete frame
+	 * received, and any transmit buffer we have also set the
+	 * interrupt flag.
+	 */
+	sccp->scc_sccm = (SCCE_ENET_TXE | SCCE_ENET_RXF | SCCE_ENET_TXB);
+
+	/* Install our interrupt handler.
+	*/
+	rtdev->irq = CPM_IRQ_OFFSET + CPMVEC_ENET;
+	rt_stack_connect(rtdev, &STACK_manager);
+	if ((i = rtdm_irq_request(&cep->irq_handle, rtdev->irq,
+				  scc_enet_interrupt, 0, "rt_mpc8xx_enet", rtdev))) {
+		printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+
+	/* Set GSMR_H to enable all normal operating modes.
+	 * Set GSMR_L to enable Ethernet to MC68160.
+	 */
+	sccp->scc_gsmrh = 0;
+	sccp->scc_gsmrl = (SCC_GSMRL_TCI | SCC_GSMRL_TPL_48 | SCC_GSMRL_TPP_10 | SCC_GSMRL_MODE_ENET);
+
+	/* Set sync/delimiters.
+	*/
+	sccp->scc_dsr = 0xd555;
+
+	/* Set processing mode.  Use Ethernet CRC, catch broadcast, and
+	 * start frame search 22 bit times after RENA.
+	 */
+	sccp->scc_pmsr = (SCC_PMSR_ENCRC | SCC_PMSR_NIB22);
+
+	/* It is now OK to enable the Ethernet transmitter.
+	 * Unfortunately, there are board implementation differences here.
+	 */
+#if   (!defined (PB_ENET_TENA) &&  defined (PC_ENET_TENA))
+	immap->im_ioport.iop_pcpar |=  PC_ENET_TENA;
+	immap->im_ioport.iop_pcdir &= ~PC_ENET_TENA;
+#elif ( defined (PB_ENET_TENA) && !defined (PC_ENET_TENA))
+	cp->cp_pbpar |= PB_ENET_TENA;
+	cp->cp_pbdir |= PB_ENET_TENA;
+#else
+#error Configuration Error: define exactly ONE of PB_ENET_TENA, PC_ENET_TENA
+#endif
+
+#if defined(CONFIG_RPXLITE) || defined(CONFIG_RPXCLASSIC)
+	/* And while we are here, set the configuration to enable ethernet.
+	*/
+	*((volatile uint *)RPX_CSR_ADDR) &= ~BCSR0_ETHLPBK;
+	*((volatile uint *)RPX_CSR_ADDR) |=
+			(BCSR0_ETHEN | BCSR0_COLTESTDIS | BCSR0_FULLDPLXDIS);
+#endif
+
+#ifdef CONFIG_BSEIP
+	/* BSE uses port B and C for PHY control.
+	*/
+	cp->cp_pbpar &= ~(PB_BSE_POWERUP | PB_BSE_FDXDIS);
+	cp->cp_pbdir |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);
+	cp->cp_pbdat |= (PB_BSE_POWERUP | PB_BSE_FDXDIS);
+
+	immap->im_ioport.iop_pcpar &= ~PC_BSE_LOOPBACK;
+	immap->im_ioport.iop_pcdir |= PC_BSE_LOOPBACK;
+	immap->im_ioport.iop_pcso &= ~PC_BSE_LOOPBACK;
+	immap->im_ioport.iop_pcdat &= ~PC_BSE_LOOPBACK;
+#endif
+
+#ifdef CONFIG_FADS
+	cp->cp_pbpar |= PB_ENET_TENA;
+	cp->cp_pbdir |= PB_ENET_TENA;
+
+	/* Enable the EEST PHY.
+	*/
+	*((volatile uint *)BCSR1) &= ~BCSR1_ETHEN;
+#endif
+
+	rtdev->base_addr = (unsigned long)ep;
+
+	/* The CPM Ethernet specific entries in the device structure. */
+	rtdev->open = scc_enet_open;
+	rtdev->hard_start_xmit = scc_enet_start_xmit;
+	rtdev->stop = scc_enet_close;
+	rtdev->hard_header = &rt_eth_header;
+	rtdev->get_stats = scc_enet_get_stats;
+
+	if (!rx_pool_size)
+		rx_pool_size = RX_RING_SIZE * 2;
+
+	if ((i = rt_register_rtnetdev(rtdev))) {
+		printk(KERN_ERR "Couldn't register rtdev\n");
+		rtdm_irq_disable(&cep->irq_handle);
+		rtdm_irq_free(&cep->irq_handle);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+	/* And last, enable the transmit and receive processing.
+	*/
+	sccp->scc_gsmrl |= (SCC_GSMRL_ENR | SCC_GSMRL_ENT);
+
+	printk("%s: CPM ENET Version 0.2 on SCC%d, irq %d, addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+	       rtdev->name, SCC_ENET+1, rtdev->irq,
+	       rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2],
+	       rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]);
+
+	return 0;
+}
+
+static void __exit scc_enet_cleanup(void)
+{
+	struct rtnet_device *rtdev = rtdev_root;
+	struct scc_enet_private *cep = (struct scc_enet_private *)rtdev->priv;
+	volatile cpm8xx_t *cp = cpmp;
+	volatile scc_enet_t *ep;
+
+	if (rtdev) {
+		rtdm_irq_disable(&cep->irq_handle);
+		rtdm_irq_free(&cep->irq_handle);
+
+		ep = (scc_enet_t *)(&cp->cp_dparam[PROFF_ENET]);
+		m8xx_cpm_dpfree(ep->sen_genscc.scc_rbase);
+		m8xx_cpm_dpfree(ep->sen_genscc.scc_tbase);
+
+		rt_stack_disconnect(rtdev);
+		rt_unregister_rtnetdev(rtdev);
+		rt_rtdev_disconnect(rtdev);
+
+		printk("%s: unloaded\n", rtdev->name);
+		rtdev_free(rtdev);
+		rtdev_root = NULL;
+	}
+}
+
+module_init(scc_enet_init);
+module_exit(scc_enet_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c
new file mode 100644
index 0000000..e57f85a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/mpc8xx_fec.c
@@ -0,0 +1,2341 @@
+/*
+ * BK Id: SCCS/s.fec.c 1.30 09/11/02 14:55:08 paulus
+ */
+/*
+ * Fast Ethernet Controller (FEC) driver for Motorola MPC8xx.
+ * Copyright (c) 1997 Dan Malek (dmalek@jlc.net)
+ *
+ * This version of the driver is specific to the FADS implementation,
+ * since the board contains control registers external to the processor
+ * for the control of the LevelOne LXT970 transceiver.  The MPC860T manual
+ * describes connections using the internal parallel port I/O, which
+ * is basically all of Port D.
+ *
+ * Includes support for the following PHYs: QS6612, LXT970, LXT971/2.
+ *
+ * Right now, I am very wasteful with the buffers.  I allocate memory
+ * pages and then divide them into 2K frame buffers.  This way I know I
+ * have buffers large enough to hold one frame within one buffer descriptor.
+ * Once I get this working, I will use 64 or 128 byte CPM buffers, which
+ * will be much more memory efficient and will easily handle lots of
+ * small packets.
+ *
+ * Much better multiple PHY support by Magnus Damm.
+ * Copyright (c) 2000 Ericsson Radio Systems AB.
+ *
+ * Make use of MII for PHY control configurable.
+ * Some fixes.
+ * Copyright (c) 2000-2002 Wolfgang Denk, DENX Software Engineering.
+ *
+ * Fixes for tx_full condition and relink when using MII.
+ * Support for AMD AM79C874 added.
+ * Thomas Lange, thomas@corelatus.com
+ *
+ * Added code for Multicast support, Frederic Goddeeris, Paul Geerinckx
+ * Copyright (c) 2002 Siemens Atea
+ *
+ * Ported to RTnet from "linuxppc_2_4_devel/arch/ppc/8xx_io/fec.c".
+ * Copyright (c) 2003 Wolfgang Grandegger (wg@denx.de)
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/uaccess.h>
+
+#include <asm/8xx_immap.h>
+#include <asm/pgtable.h>
+#include <asm/mpc8xx.h>
+#include <asm/irq.h>
+#include <asm/bitops.h>
+#include <asm/commproc.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+#error "MDIO for PHY configuration is not yet supported!"
+#endif
+
+#include <rtnet_port.h>
+
+MODULE_AUTHOR("Maintainer: Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTnet driver for the MPC8xx FEC Ethernet");
+MODULE_LICENSE("GPL");
+
+static unsigned int rx_pool_size =  0;
+MODULE_PARM(rx_pool_size, "i");
+MODULE_PARM_DESC(rx_pool_size, "Receive buffer pool size");
+
+#define RT_DEBUG(fmt,args...)
+
+/* multicast support
+ */
+/* #define DEBUG_MULTICAST */
+
+/* CRC polynomium used by the FEC for the multicast group filtering
+ */
+#define FEC_CRC_POLY   0x04C11DB7
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* Forward declarations of some structures to support different PHYs
+*/
+
+typedef struct {
+	uint mii_data;
+	void (*funct)(uint mii_reg, struct net_device *dev, uint data);
+} phy_cmd_t;
+
+typedef struct {
+	uint id;
+	char *name;
+
+	const phy_cmd_t *config;
+	const phy_cmd_t *startup;
+	const phy_cmd_t *ack_int;
+	const phy_cmd_t *shutdown;
+} phy_info_t;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+/* The number of Tx and Rx buffers.  These are allocated from the page
+ * pool.  The code may assume these are power of two, so it is best
+ * to keep them that size.
+ * We don't need to allocate pages for the transmitter.  We just use
+ * the skbuffer directly.
+ */
+#define FEC_ENET_RX_PAGES	4
+#define FEC_ENET_RX_FRSIZE	2048
+#define FEC_ENET_RX_FRPPG	(PAGE_SIZE / FEC_ENET_RX_FRSIZE)
+#define RX_RING_SIZE		(FEC_ENET_RX_FRPPG * FEC_ENET_RX_PAGES)
+#define TX_RING_SIZE		8	/* Must be power of two */
+#define TX_RING_MOD_MASK	7	/*   for this to work */
+
+/* Interrupt events/masks.
+*/
+#define FEC_ENET_HBERR	((uint)0x80000000)	/* Heartbeat error */
+#define FEC_ENET_BABR	((uint)0x40000000)	/* Babbling receiver */
+#define FEC_ENET_BABT	((uint)0x20000000)	/* Babbling transmitter */
+#define FEC_ENET_GRA	((uint)0x10000000)	/* Graceful stop complete */
+#define FEC_ENET_TXF	((uint)0x08000000)	/* Full frame transmitted */
+#define FEC_ENET_TXB	((uint)0x04000000)	/* A buffer was transmitted */
+#define FEC_ENET_RXF	((uint)0x02000000)	/* Full frame received */
+#define FEC_ENET_RXB	((uint)0x01000000)	/* A buffer was received */
+#define FEC_ENET_MII	((uint)0x00800000)	/* MII interrupt */
+#define FEC_ENET_EBERR	((uint)0x00400000)	/* SDMA bus error */
+
+/*
+*/
+#define FEC_ECNTRL_PINMUX	0x00000004
+#define FEC_ECNTRL_ETHER_EN	0x00000002
+#define FEC_ECNTRL_RESET	0x00000001
+
+#define FEC_RCNTRL_BC_REJ	0x00000010
+#define FEC_RCNTRL_PROM		0x00000008
+#define FEC_RCNTRL_MII_MODE	0x00000004
+#define FEC_RCNTRL_DRT		0x00000002
+#define FEC_RCNTRL_LOOP		0x00000001
+
+#define FEC_TCNTRL_FDEN		0x00000004
+#define FEC_TCNTRL_HBC		0x00000002
+#define FEC_TCNTRL_GTS		0x00000001
+
+/* Delay to wait for FEC reset command to complete (in us)
+*/
+#define FEC_RESET_DELAY		50
+
+/* The FEC stores dest/src/type, data, and checksum for receive packets.
+ */
+#define PKT_MAXBUF_SIZE		1518
+#define PKT_MINBUF_SIZE		64
+#define PKT_MAXBLR_SIZE		1520
+
+/* The FEC buffer descriptors track the ring buffers.  The rx_bd_base and
+ * tx_bd_base always point to the base of the buffer descriptors.  The
+ * cur_rx and cur_tx point to the currently available buffer.
+ * The dirty_tx tracks the current buffer that is being sent by the
+ * controller.  The cur_tx and dirty_tx are equal under both completely
+ * empty and completely full conditions.  The empty/ready indicator in
+ * the buffer descriptor determines the actual condition.
+ */
+struct fec_enet_private {
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct	rtskb *tx_skbuff[TX_RING_SIZE];
+	ushort	skb_cur;
+	ushort	skb_dirty;
+
+	/* CPM dual port RAM relative addresses.
+	*/
+	cbd_t	*rx_bd_base;		/* Address of Rx and Tx buffers. */
+	cbd_t	*tx_bd_base;
+	cbd_t	*cur_rx, *cur_tx;		/* The next free ring entry */
+	cbd_t	*dirty_tx;	/* The ring entries to be free()ed. */
+
+	/* Virtual addresses for the receive buffers because we can't
+	 * do a __va() on them anymore.
+	 */
+	unsigned char *rx_vaddr[RX_RING_SIZE];
+
+	struct	net_device_stats stats;
+	uint	tx_full;
+	rtdm_lock_t lock;
+	rtdm_irq_t irq_handle;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	uint	phy_id;
+	uint	phy_id_done;
+	uint	phy_status;
+	uint	phy_speed;
+	phy_info_t	*phy;
+	struct tq_struct phy_task;
+
+	uint	sequence_done;
+
+	uint	phy_addr;
+
+	struct timer_list phy_timer_list;
+	u16 old_status;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	int	link;
+	int	old_link;
+	int	full_duplex;
+
+};
+
+static int  fec_enet_open(struct rtnet_device *rtev);
+static int  fec_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+static void fec_enet_tx(struct rtnet_device *rtdev);
+static void fec_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp);
+static int fec_enet_interrupt(rtdm_irq_t *irq_handle);
+static int  fec_enet_close(struct rtnet_device *dev);
+static void fec_restart(struct rtnet_device *rtdev, int duplex);
+static void fec_stop(struct rtnet_device *rtdev);
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static void fec_enet_mii(struct net_device *dev);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+static struct net_device_stats *fec_enet_get_stats(struct rtnet_device *rtdev);
+#ifdef ORIGINAL_VERSION
+static void set_multicast_list(struct net_device *dev);
+#endif /* ORIGINAL_VERSION */
+
+static struct rtnet_device *rtdev_root = NULL; /* for cleanup */
+
+static	ushort	my_enet_addr[3];
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_ethtool_ioctl(struct net_device *dev, void *useraddr);
+
+static void mdio_callback(uint regval, struct net_device *dev, uint data);
+static int mdio_read(struct net_device *dev, int phy_id, int location);
+
+#if defined(CONFIG_FEC_DP83846A)
+static void mdio_timer_callback(unsigned long data);
+#endif /* CONFIG_FEC_DP83846A */
+
+/* MII processing.  We keep this as simple as possible.  Requests are
+ * placed on the list (if there is room).  When the request is finished
+ * by the MII, an optional function may be called.
+ */
+typedef struct mii_list {
+	uint	mii_regval;
+	void	(*mii_func)(uint val, struct net_device *dev, uint data);
+	struct	mii_list *mii_next;
+	uint	mii_data;
+} mii_list_t;
+
+#define		NMII	20
+mii_list_t	mii_cmds[NMII];
+mii_list_t	*mii_free;
+mii_list_t	*mii_head;
+mii_list_t	*mii_tail;
+
+typedef struct mdio_read_data {
+	u16 regval;
+	struct task_struct *sleeping_task;
+} mdio_read_data_t;
+
+static int	mii_queue(struct net_device *dev, int request,
+				void (*func)(uint, struct net_device *, uint), uint data);
+static void mii_queue_relink(uint mii_reg, struct net_device *dev, uint data);
+
+/* Make MII read/write commands for the FEC.
+*/
+#define mk_mii_read(REG)	(0x60020000 | ((REG & 0x1f) << 18))
+#define mk_mii_write(REG, VAL)	(0x50020000 | ((REG & 0x1f) << 18) | \
+						(VAL & 0xffff))
+#define mk_mii_end	0
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+/* Transmitter timeout.
+*/
+#define TX_TIMEOUT (2*HZ)
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* Register definitions for the PHY.
+*/
+
+#define MII_REG_CR          0  /* Control Register                         */
+#define MII_REG_SR          1  /* Status Register                          */
+#define MII_REG_PHYIR1      2  /* PHY Identification Register 1            */
+#define MII_REG_PHYIR2      3  /* PHY Identification Register 2            */
+#define MII_REG_ANAR        4  /* A-N Advertisement Register               */
+#define MII_REG_ANLPAR      5  /* A-N Link Partner Ability Register        */
+#define MII_REG_ANER        6  /* A-N Expansion Register                   */
+#define MII_REG_ANNPTR      7  /* A-N Next Page Transmit Register          */
+#define MII_REG_ANLPRNPR    8  /* A-N Link Partner Received Next Page Reg. */
+
+/* values for phy_status */
+
+#define PHY_CONF_ANE	0x0001  /* 1 auto-negotiation enabled */
+#define PHY_CONF_LOOP	0x0002  /* 1 loopback mode enabled */
+#define PHY_CONF_SPMASK	0x00f0  /* mask for speed */
+#define PHY_CONF_10HDX	0x0010  /* 10 Mbit half duplex supported */
+#define PHY_CONF_10FDX	0x0020  /* 10 Mbit full duplex supported */
+#define PHY_CONF_100HDX	0x0040  /* 100 Mbit half duplex supported */
+#define PHY_CONF_100FDX	0x0080  /* 100 Mbit full duplex supported */
+
+#define PHY_STAT_LINK	0x0100  /* 1 up - 0 down */
+#define PHY_STAT_FAULT	0x0200  /* 1 remote fault */
+#define PHY_STAT_ANC	0x0400  /* 1 auto-negotiation complete	*/
+#define PHY_STAT_SPMASK	0xf000  /* mask for speed */
+#define PHY_STAT_10HDX	0x1000  /* 10 Mbit half duplex selected	*/
+#define PHY_STAT_10FDX	0x2000  /* 10 Mbit full duplex selected	*/
+#define PHY_STAT_100HDX	0x4000  /* 100 Mbit half duplex selected */
+#define PHY_STAT_100FDX	0x8000  /* 100 Mbit full duplex selected */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+
+static int
+fec_enet_start_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct fec_enet_private *fep;
+	volatile fec_t	*fecp;
+	volatile cbd_t	*bdp;
+	rtdm_lockctx_t	context;
+
+
+	RT_DEBUG(__FUNCTION__": ...\n");
+
+	fep = rtdev->priv;
+	fecp = (volatile fec_t*)rtdev->base_addr;
+
+	if (!fep->link) {
+		/* Link is down or autonegotiation is in progress. */
+		return 1;
+	}
+
+	/* Fill in a Tx ring entry */
+	bdp = fep->cur_tx;
+
+#ifndef final_version
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		/* Ooops.  All transmit buffers are full.  Bail out.
+		 * This should not happen, since dev->tbusy should be set.
+		 */
+		rtdm_printk("%s: tx queue full!.\n", rtdev->name);
+		return 1;
+	}
+#endif
+
+	/* Clear all of the status flags.
+	 */
+	bdp->cbd_sc &= ~BD_ENET_TX_STATS;
+
+	/* Set buffer length and buffer pointer.
+	*/
+	bdp->cbd_bufaddr = __pa(skb->data);
+	bdp->cbd_datlen = skb->len;
+
+	/* Save skb pointer.
+	*/
+	fep->tx_skbuff[fep->skb_cur] = skb;
+
+	fep->stats.tx_bytes += skb->len;
+	fep->skb_cur = (fep->skb_cur+1) & TX_RING_MOD_MASK;
+
+	rtdm_lock_get_irqsave(&fep->lock, context);
+
+	/* Get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+
+	/* Push the data cache so the CPM does not get stale memory
+	 * data.
+	 */
+	flush_dcache_range((unsigned long)skb->data,
+			   (unsigned long)skb->data + skb->len);
+
+	/* Send it on its way.  Tell FEC its ready, interrupt when done,
+	 * its the last BD of the frame, and to put the CRC on the end.
+	 */
+
+	bdp->cbd_sc |= (BD_ENET_TX_READY | BD_ENET_TX_INTR
+			| BD_ENET_TX_LAST | BD_ENET_TX_TC);
+
+	//rtdev->trans_start = jiffies;
+
+	/* Trigger transmission start */
+	fecp->fec_x_des_active = 0x01000000;
+
+	/* If this was the last BD in the ring, start at the beginning again.
+	*/
+	if (bdp->cbd_sc & BD_ENET_TX_WRAP) {
+		bdp = fep->tx_bd_base;
+	} else {
+		bdp++;
+	}
+
+	if (bdp->cbd_sc & BD_ENET_TX_READY) {
+		rtnetif_stop_queue(rtdev);
+		fep->tx_full = 1;
+	}
+
+	fep->cur_tx = (cbd_t *)bdp;
+
+	rtdm_lock_put_irqrestore(&fep->lock, context);
+
+	return 0;
+}
+
+#ifdef ORIGINAL_VERSION
+static void
+fec_timeout(struct net_device *dev)
+{
+	struct fec_enet_private *fep = rtdev->priv;
+
+	if (fep->link || fep->old_link) {
+		/* Link status changed - print timeout message */
+		printk("%s: transmit timed out.\n", rtdev->name);
+	}
+
+	fep->stats.tx_errors++;
+#ifndef final_version
+	if (fep->link) {
+		int	i;
+		cbd_t	*bdp;
+
+		printk ("Ring data dump: "
+			"cur_tx %p%s dirty_tx %p cur_rx %p\n",
+		       fep->cur_tx,
+		       fep->tx_full ? " (full)" : "",
+		       fep->dirty_tx,
+		       fep->cur_rx);
+
+		bdp = fep->tx_bd_base;
+		printk(" tx: %u buffers\n",  TX_RING_SIZE);
+		for (i = 0 ; i < TX_RING_SIZE; i++) {
+			printk("  %08x: %04x %04x %08x\n",
+			       (uint) bdp,
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+			bdp++;
+		}
+
+		bdp = fep->rx_bd_base;
+		printk(" rx: %lu buffers\n",  RX_RING_SIZE);
+		for (i = 0 ; i < RX_RING_SIZE; i++) {
+			printk("  %08x: %04x %04x %08x\n",
+			       (uint) bdp,
+			       bdp->cbd_sc,
+			       bdp->cbd_datlen,
+			       bdp->cbd_bufaddr);
+			bdp++;
+		}
+	}
+#endif
+	if (!fep->tx_full) {
+		netif_wake_queue(dev);
+	}
+}
+#endif /* ORIGINAL_VERSION */
+
+/* The interrupt handler.
+ * This is called from the MPC core interrupt.
+ */
+static int fec_enet_interrupt(rtdm_irq_t *irq_handle)
+{
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device);
+	int packets = 0;
+	volatile fec_t	*fecp;
+	uint	int_events;
+	nanosecs_abs_t time_stamp = rtdm_clock_read();
+
+
+	fecp = (volatile fec_t*)rtdev->base_addr;
+
+	/* Get the interrupt events that caused us to be here.
+	*/
+	while ((int_events = fecp->fec_ievent) != 0) {
+		fecp->fec_ievent = int_events;
+		if ((int_events & (FEC_ENET_HBERR | FEC_ENET_BABR |
+				   FEC_ENET_BABT | FEC_ENET_EBERR)) != 0) {
+			rtdm_printk("FEC ERROR %x\n", int_events);
+		}
+
+		/* Handle receive event in its own function.
+		 */
+		if (int_events & FEC_ENET_RXF) {
+			fec_enet_rx(rtdev, &packets, &time_stamp);
+		}
+
+		/* Transmit OK, or non-fatal error. Update the buffer
+		   descriptors. FEC handles all errors, we just discover
+		   them as part of the transmit process.
+		*/
+		if (int_events & FEC_ENET_TXF) {
+			fec_enet_tx(rtdev);
+		}
+
+		if (int_events & FEC_ENET_MII) {
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+			fec_enet_mii(dev);
+#else
+		rtdm_printk("%s[%d] %s: unexpected FEC_ENET_MII event\n",
+			__FILE__,__LINE__,__FUNCTION__);
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+		}
+
+	}
+
+	if (packets > 0)
+		rt_mark_stack_mgr(rtdev);
+	return RTDM_IRQ_HANDLED;
+}
+
+
+static void
+fec_enet_tx(struct rtnet_device *rtdev)
+{
+	struct rtskb *skb;
+	struct	fec_enet_private *fep = rtdev->priv;
+	volatile cbd_t	*bdp;
+	rtdm_lock_get(&fep->lock);
+	bdp = fep->dirty_tx;
+
+	while ((bdp->cbd_sc&BD_ENET_TX_READY) == 0) {
+		if (bdp == fep->cur_tx && fep->tx_full == 0) break;
+
+		skb = fep->tx_skbuff[fep->skb_dirty];
+		/* Check for errors. */
+		if (bdp->cbd_sc & (BD_ENET_TX_HB | BD_ENET_TX_LC |
+				   BD_ENET_TX_RL | BD_ENET_TX_UN |
+				   BD_ENET_TX_CSL)) {
+			fep->stats.tx_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_HB)  /* No heartbeat */
+				fep->stats.tx_heartbeat_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_LC)  /* Late collision */
+				fep->stats.tx_window_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_RL)  /* Retrans limit */
+				fep->stats.tx_aborted_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_UN)  /* Underrun */
+				fep->stats.tx_fifo_errors++;
+			if (bdp->cbd_sc & BD_ENET_TX_CSL) /* Carrier lost */
+				fep->stats.tx_carrier_errors++;
+		} else {
+			fep->stats.tx_packets++;
+		}
+
+#ifndef final_version
+		if (bdp->cbd_sc & BD_ENET_TX_READY)
+			rtdm_printk("HEY! Enet xmit interrupt and TX_READY.\n");
+#endif
+		/* Deferred means some collisions occurred during transmit,
+		 * but we eventually sent the packet OK.
+		 */
+		if (bdp->cbd_sc & BD_ENET_TX_DEF)
+			fep->stats.collisions++;
+
+		/* Free the sk buffer associated with this last transmit.
+		 */
+		dev_kfree_rtskb(skb);
+		fep->tx_skbuff[fep->skb_dirty] = NULL;
+		fep->skb_dirty = (fep->skb_dirty + 1) & TX_RING_MOD_MASK;
+
+		/* Update pointer to next buffer descriptor to be transmitted.
+		 */
+		if (bdp->cbd_sc & BD_ENET_TX_WRAP)
+			bdp = fep->tx_bd_base;
+		else
+			bdp++;
+
+		/* Since we have freed up a buffer, the ring is no longer
+		 * full.
+		 */
+		if (fep->tx_full) {
+			fep->tx_full = 0;
+			if (rtnetif_queue_stopped(rtdev))
+				rtnetif_wake_queue(rtdev);
+		}
+	}
+	fep->dirty_tx = (cbd_t *)bdp;
+	rtdm_lock_put(&fep->lock);
+}
+
+
+/* During a receive, the cur_rx points to the current incoming buffer.
+ * When we update through the ring, if the next incoming buffer has
+ * not been given to the system, we just set the empty indicator,
+ * effectively tossing the packet.
+ */
+static void
+fec_enet_rx(struct rtnet_device *rtdev, int *packets, nanosecs_abs_t *time_stamp)
+{
+	struct	fec_enet_private *fep;
+	volatile fec_t	*fecp;
+	volatile cbd_t *bdp;
+	struct	rtskb *skb;
+	ushort	pkt_len;
+	__u8 *data;
+
+	fep = rtdev->priv;
+	fecp = (volatile fec_t*)rtdev->base_addr;
+
+	/* First, grab all of the stats for the incoming packet.
+	 * These get messed up if we get called due to a busy condition.
+	 */
+	bdp = fep->cur_rx;
+
+while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) {
+
+#ifndef final_version
+	/* Since we have allocated space to hold a complete frame,
+	 * the last indicator should be set.
+	 */
+	if ((bdp->cbd_sc & BD_ENET_RX_LAST) == 0)
+		rtdm_printk("FEC ENET: rcv is not +last\n");
+#endif
+
+	/* Check for errors. */
+	if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH | BD_ENET_RX_NO |
+			   BD_ENET_RX_CR | BD_ENET_RX_OV)) {
+		fep->stats.rx_errors++;
+		if (bdp->cbd_sc & (BD_ENET_RX_LG | BD_ENET_RX_SH)) {
+		/* Frame too long or too short. */
+			fep->stats.rx_length_errors++;
+		}
+		if (bdp->cbd_sc & BD_ENET_RX_NO)	/* Frame alignment */
+			fep->stats.rx_frame_errors++;
+		if (bdp->cbd_sc & BD_ENET_RX_CR)	/* CRC Error */
+			fep->stats.rx_crc_errors++;
+		if (bdp->cbd_sc & BD_ENET_RX_OV)	/* FIFO overrun */
+			fep->stats.rx_crc_errors++;
+	}
+
+	/* Report late collisions as a frame error.
+	 * On this error, the BD is closed, but we don't know what we
+	 * have in the buffer.  So, just drop this frame on the floor.
+	 */
+	if (bdp->cbd_sc & BD_ENET_RX_CL) {
+		fep->stats.rx_errors++;
+		fep->stats.rx_frame_errors++;
+		goto rx_processing_done;
+	}
+
+	/* Process the incoming frame.
+	 */
+	fep->stats.rx_packets++;
+	pkt_len = bdp->cbd_datlen;
+	fep->stats.rx_bytes += pkt_len;
+	data = fep->rx_vaddr[bdp - fep->rx_bd_base];
+
+	/* This does 16 byte alignment, exactly what we need.
+	 * The packet length includes FCS, but we don't want to
+	 * include that when passing upstream as it messes up
+	 * bridging applications.
+	 */
+	skb = rtnetdev_alloc_rtskb(rtdev, pkt_len-4);
+
+	if (skb == NULL) {
+		rtdm_printk("%s: Memory squeeze, dropping packet.\n", rtdev->name);
+		fep->stats.rx_dropped++;
+	} else {
+		rtskb_put(skb,pkt_len-4); /* Make room */
+		memcpy(skb->data, data, pkt_len-4);
+		skb->protocol=rt_eth_type_trans(skb,rtdev);
+		skb->time_stamp = *time_stamp;
+		rtnetif_rx(skb);
+		(*packets)++;
+	}
+rx_processing_done:
+
+	/* Clear the status flags for this buffer.
+	*/
+	bdp->cbd_sc &= ~BD_ENET_RX_STATS;
+
+	/* Mark the buffer empty.
+	*/
+	bdp->cbd_sc |= BD_ENET_RX_EMPTY;
+
+	/* Update BD pointer to next entry.
+	*/
+	if (bdp->cbd_sc & BD_ENET_RX_WRAP)
+		bdp = fep->rx_bd_base;
+	else
+		bdp++;
+
+	/* Doing this here will keep the FEC running while we process
+	 * incoming frames.  On a heavily loaded network, we should be
+	 * able to keep up at the expense of system resources.
+	 */
+	fecp->fec_r_des_active = 0x01000000;
+   } /* while (!(bdp->cbd_sc & BD_ENET_RX_EMPTY)) */
+	fep->cur_rx = (cbd_t *)bdp;
+
+}
+
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+static void
+fec_enet_mii(struct net_device *dev)
+{
+	struct	fec_enet_private *fep;
+	volatile fec_t	*ep;
+	mii_list_t	*mip;
+	uint		mii_reg;
+
+	fep = (struct fec_enet_private *)dev->priv;
+	ep = &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec);
+	mii_reg = ep->fec_mii_data;
+
+	if ((mip = mii_head) == NULL) {
+		printk("MII and no head!\n");
+		return;
+	}
+
+	if (mip->mii_func != NULL)
+		(*(mip->mii_func))(mii_reg, dev, mip->mii_data);
+
+	mii_head = mip->mii_next;
+	mip->mii_next = mii_free;
+	mii_free = mip;
+
+	if ((mip = mii_head) != NULL) {
+		ep->fec_mii_data = mip->mii_regval;
+	}
+}
+
+static int
+mii_queue(struct net_device *dev, int regval, void (*func)(uint, struct net_device *, uint), uint data)
+{
+	struct fec_enet_private *fep;
+	unsigned long	flags;
+	mii_list_t	*mip;
+	int		retval;
+
+	/* Add PHY address to register command.
+	*/
+	fep = dev->priv;
+	regval |= fep->phy_addr << 23;
+
+	retval = 0;
+
+	save_flags(flags);
+	cli();
+
+	if ((mip = mii_free) != NULL) {
+		mii_free = mip->mii_next;
+		mip->mii_regval = regval;
+		mip->mii_func = func;
+		mip->mii_next = NULL;
+		mip->mii_data = data;
+		if (mii_head) {
+			mii_tail->mii_next = mip;
+			mii_tail = mip;
+		} else {
+			mii_head = mii_tail = mip;
+			(&(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec))->fec_mii_data = regval;
+		}
+	} else {
+		retval = 1;
+	}
+
+	restore_flags(flags);
+
+	return(retval);
+}
+
+static void mii_do_cmd(struct net_device *dev, const phy_cmd_t *c)
+{
+	int k;
+
+	if(!c)
+		return;
+
+	for(k = 0; (c+k)->mii_data != mk_mii_end; k++)
+		mii_queue(dev, (c+k)->mii_data, (c+k)->funct, 0);
+}
+
+static void mii_parse_sr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_LINK | PHY_STAT_FAULT | PHY_STAT_ANC);
+
+	if (mii_reg & 0x0004)
+		s |= PHY_STAT_LINK;
+	if (mii_reg & 0x0010)
+		s |= PHY_STAT_FAULT;
+	if (mii_reg & 0x0020)
+		s |= PHY_STAT_ANC;
+
+	fep->phy_status = s;
+	fep->link = (s & PHY_STAT_LINK) ? 1 : 0;
+}
+
+static void mii_parse_cr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_CONF_ANE | PHY_CONF_LOOP);
+
+	if (mii_reg & 0x1000)
+		s |= PHY_CONF_ANE;
+	if (mii_reg & 0x4000)
+		s |= PHY_CONF_LOOP;
+
+	fep->phy_status = s;
+}
+
+static void mii_parse_anar(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_CONF_SPMASK);
+
+	if (mii_reg & 0x0020)
+		s |= PHY_CONF_10HDX;
+	if (mii_reg & 0x0040)
+		s |= PHY_CONF_10FDX;
+	if (mii_reg & 0x0080)
+		s |= PHY_CONF_100HDX;
+	if (mii_reg & 0x0100)
+		s |= PHY_CONF_100FDX;
+
+	fep->phy_status = s;
+}
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT970 is used by many boards				     */
+
+#ifdef CONFIG_FEC_LXT970
+
+#define MII_LXT970_MIRROR    16  /* Mirror register           */
+#define MII_LXT970_IER       17  /* Interrupt Enable Register */
+#define MII_LXT970_ISR       18  /* Interrupt Status Register */
+#define MII_LXT970_CONFIG    19  /* Configuration Register    */
+#define MII_LXT970_CSR       20  /* Chip Status Register      */
+
+static void mii_parse_lxt970_csr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x0800) {
+		if (mii_reg & 0x1000)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+	else {
+		if (mii_reg & 0x1000)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt970 = {
+	0x07810000,
+	"LXT970",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT970_IER, 0x0002), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* read SR and ISR to acknowledge */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT970_ISR), NULL },
+
+		/* find out the current status */
+
+		{ mk_mii_read(MII_LXT970_CSR), mii_parse_lxt970_csr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT970_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_LXT970 */
+
+/* ------------------------------------------------------------------------- */
+/* The Level one LXT971 is used on some of my custom boards                  */
+
+#ifdef CONFIG_FEC_LXT971
+
+/* register definitions for the 971 */
+
+#define MII_LXT971_PCR       16  /* Port Control Register     */
+#define MII_LXT971_SR2       17  /* Status Register 2         */
+#define MII_LXT971_IER       18  /* Interrupt Enable Register */
+#define MII_LXT971_ISR       19  /* Interrupt Status Register */
+#define MII_LXT971_LCR       20  /* LED Control Register      */
+#define MII_LXT971_TCR       30  /* Transmit Control Register */
+
+/*
+ * I had some nice ideas of running the MDIO faster...
+ * The 971 should support 8MHz and I tried it, but things acted really
+ * weird, so 2.5 MHz ought to be enough for anyone...
+ */
+
+static void mii_parse_lxt971_sr2(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x4000) {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+	else {
+		if (mii_reg & 0x0200)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	if (mii_reg & 0x0008)
+		s |= PHY_STAT_FAULT;
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_lxt971 = {
+	0x0001378e,
+	"LXT971",
+
+	(const phy_cmd_t []) {  /* config */
+//		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10  Mbps, HD */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x00f2), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+
+		/* Somehow does the 971 tell me that the link is down
+		 * the first read after power-up.
+		 * read here to get a valid value in ack_int */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* find out the current status */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_LXT971_SR2), mii_parse_lxt971_sr2 },
+
+		/* we only need to read ISR to acknowledge */
+
+		{ mk_mii_read(MII_LXT971_ISR), NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_LXT971_IER, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_LXT971 */
+
+
+/* ------------------------------------------------------------------------- */
+/* The Quality Semiconductor QS6612 is used on the RPX CLLF                  */
+
+#ifdef CONFIG_FEC_QS6612
+
+/* register definitions */
+
+#define MII_QS6612_MCR       17  /* Mode Control Register      */
+#define MII_QS6612_FTR       27  /* Factory Test Register      */
+#define MII_QS6612_MCO       28  /* Misc. Control Register     */
+#define MII_QS6612_ISR       29  /* Interrupt Source Register  */
+#define MII_QS6612_IMR       30  /* Interrupt Mask Register    */
+#define MII_QS6612_PCR       31  /* 100BaseTx PHY Control Reg. */
+
+static void mii_parse_qs6612_pcr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	switch((mii_reg >> 2) & 7) {
+	case 1: s |= PHY_STAT_10HDX;  break;
+	case 2: s |= PHY_STAT_100HDX; break;
+	case 5: s |= PHY_STAT_10FDX;  break;
+	case 6: s |= PHY_STAT_100FDX; break;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_qs6612 = {
+	0x00181440,
+	"QS6612",
+
+	(const phy_cmd_t []) {  /* config */
+//	{ mk_mii_write(MII_REG_ANAR, 0x061), NULL }, /* 10  Mbps */
+
+		/* The PHY powers up isolated on the RPX,
+		 * so send a command to allow operation.
+		 */
+
+		{ mk_mii_write(MII_QS6612_PCR, 0x0dc0), NULL },
+
+		/* parse cr and anar to get some info */
+
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_QS6612_IMR, 0x003a), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+
+		/* we need to read ISR, SR and ANER to acknowledge */
+
+		{ mk_mii_read(MII_QS6612_ISR), NULL },
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_ANER), NULL },
+
+		/* read pcr to get info */
+
+		{ mk_mii_read(MII_QS6612_PCR), mii_parse_qs6612_pcr },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_QS6612_IMR, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_QS6612 */
+
+/* ------------------------------------------------------------------------- */
+/* The Advanced Micro Devices AM79C874 is used on the ICU862		     */
+
+#ifdef CONFIG_FEC_AM79C874
+
+/* register definitions for the 79C874 */
+
+#define MII_AM79C874_MFR	16  /* Miscellaneous Features Register      */
+#define MII_AM79C874_ICSR	17  /* Interrupt Control/Status Register    */
+#define MII_AM79C874_DR		18  /* Diagnostic Register		    */
+#define MII_AM79C874_PMLR	19  /* Power Management & Loopback Register */
+#define MII_AM79C874_MCR	21  /* Mode Control Register		    */
+#define MII_AM79C874_DC		23  /* Disconnect Counter		    */
+#define MII_AM79C874_REC	24  /* Receiver Error Counter		    */
+
+static void mii_parse_amd79c874_dr(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	/* Register 18: Bit 10 is data rate, 11 is Duplex */
+	switch ((mii_reg >> 10) & 3) {
+	case 0:	s |= PHY_STAT_10HDX;	break;
+	case 1:	s |= PHY_STAT_100HDX;	break;
+	case 2:	s |= PHY_STAT_10FDX;	break;
+	case 3:	s |= PHY_STAT_100FDX;	break;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_amd79c874 = {
+	0x00022561,
+	"AM79C874",
+
+	(const phy_cmd_t []) {  /* config */
+//		{ mk_mii_write(MII_REG_ANAR, 0x021), NULL }, /* 10  Mbps, HD */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup - enable interrupts */
+		{ mk_mii_write(MII_AM79C874_ICSR, 0xff00), NULL },
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* autonegotiate */
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		/* find out the current status */
+
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_AM79C874_DR), mii_parse_amd79c874_dr },
+
+		/* we only need to read ICSR to acknowledge */
+
+		{ mk_mii_read(MII_AM79C874_ICSR), NULL },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_write(MII_AM79C874_ICSR, 0x0000), NULL },
+		{ mk_mii_end, }
+	},
+};
+
+#endif /* CONFIG_FEC_AM79C874 */
+
+/* -------------------------------------------------------------------- */
+/* The National Semiconductor DP83843BVJE is used on a Mediatrix board  */
+/* -------------------------------------------------------------------- */
+
+#ifdef CONFIG_FEC_DP83843
+
+/* Register definitions */
+#define MII_DP83843_PHYSTS 0x10  /* PHY Status Register */
+#define MII_DP83843_MIPSCR 0x11  /* Specific Status Register */
+#define MII_DP83843_MIPGSR 0x12  /* Generic Status Register */
+
+static void mii_parse_dp83843_physts(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x0002)
+	{
+		if (mii_reg & 0x0004)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	else
+	{
+		if (mii_reg & 0x0004)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+
+	fep->phy_status = s;
+}
+
+static phy_info_t phy_info_dp83843 = {
+	0x020005c1,
+	"DP83843BVJE",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_write(MII_REG_ANAR, 0x01E1), NULL  }, /* Auto-Negociation Register Control set to    */
+							       /* auto-negociate 10/100MBps, Half/Full duplex */
+		{ mk_mii_read(MII_REG_CR),   mii_parse_cr   },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		{ mk_mii_write(MII_DP83843_MIPSCR, 0x0002), NULL }, /* Enable interrupts */
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL         }, /* Enable and Restart Auto-Negotiation */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr		 },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_DP83843_PHYSTS), mii_parse_dp83843_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		{ mk_mii_read(MII_DP83843_MIPGSR), NULL },  /* Acknowledge interrupts */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },  /* Find out the current status */
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr },
+		{ mk_mii_read(MII_DP83843_PHYSTS), mii_parse_dp83843_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_end, }
+	}
+};
+
+#endif /* CONFIG_FEC_DP83843 */
+
+
+/* ----------------------------------------------------------------- */
+/* The National Semiconductor DP83846A is used on a Mediatrix board  */
+/* ----------------------------------------------------------------- */
+
+#ifdef CONFIG_FEC_DP83846A
+
+/* Register definitions */
+#define MII_DP83846A_PHYSTS 0x10  /* PHY Status Register */
+
+static void mii_parse_dp83846a_physts(uint mii_reg, struct net_device *dev, uint data)
+{
+	volatile struct fec_enet_private *fep = (struct fec_enet_private *)dev->priv;
+	uint s = fep->phy_status;
+	int link_change_mask;
+
+	s &= ~(PHY_STAT_SPMASK);
+
+	if (mii_reg & 0x0002) {
+		if (mii_reg & 0x0004)
+			s |= PHY_STAT_10FDX;
+		else
+			s |= PHY_STAT_10HDX;
+	}
+	else {
+		if (mii_reg & 0x0004)
+			s |= PHY_STAT_100FDX;
+		else
+			s |= PHY_STAT_100HDX;
+	}
+
+	fep->phy_status = s;
+
+	link_change_mask = PHY_STAT_LINK | PHY_STAT_10FDX | PHY_STAT_10HDX | PHY_STAT_100FDX | PHY_STAT_100HDX;
+	if(fep->old_status != (link_change_mask & s))
+	{
+		fep->old_status = (link_change_mask & s);
+		mii_queue_relink(mii_reg, dev, 0);
+	}
+}
+
+static phy_info_t phy_info_dp83846a = {
+	0x020005c2,
+	"DP83846A",
+
+	(const phy_cmd_t []) {  /* config */
+		{ mk_mii_write(MII_REG_ANAR, 0x01E1), NULL  }, /* Auto-Negociation Register Control set to    */
+							       /* auto-negociate 10/100MBps, Half/Full duplex */
+		{ mk_mii_read(MII_REG_CR),   mii_parse_cr   },
+		{ mk_mii_read(MII_REG_ANAR), mii_parse_anar },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* startup */
+		{ mk_mii_write(MII_REG_CR, 0x1200), NULL }, /* Enable and Restart Auto-Negotiation */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr   },
+		{ mk_mii_read(MII_DP83846A_PHYSTS), mii_parse_dp83846a_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) { /* ack_int */
+		{ mk_mii_read(MII_REG_SR), mii_parse_sr },
+		{ mk_mii_read(MII_REG_CR), mii_parse_cr   },
+		{ mk_mii_read(MII_DP83846A_PHYSTS), mii_parse_dp83846a_physts },
+		{ mk_mii_end, }
+	},
+	(const phy_cmd_t []) {  /* shutdown - disable interrupts */
+		{ mk_mii_end, }
+	}
+};
+
+#endif /* CONFIG_FEC_DP83846A */
+
+
+static phy_info_t *phy_info[] = {
+
+#ifdef CONFIG_FEC_LXT970
+	&phy_info_lxt970,
+#endif /* CONFIG_FEC_LXT970 */
+
+#ifdef CONFIG_FEC_LXT971
+	&phy_info_lxt971,
+#endif /* CONFIG_FEC_LXT971 */
+
+#ifdef CONFIG_FEC_QS6612
+	&phy_info_qs6612,
+#endif /* CONFIG_FEC_QS6612 */
+
+#ifdef CONFIG_FEC_AM79C874
+	&phy_info_amd79c874,
+#endif /* CONFIG_FEC_AM79C874 */
+
+#ifdef CONFIG_FEC_DP83843
+	&phy_info_dp83843,
+#endif /* CONFIG_FEC_DP83843 */
+
+#ifdef CONFIG_FEC_DP83846A
+	&phy_info_dp83846a,
+#endif /* CONFIG_FEC_DP83846A */
+
+	NULL
+};
+
+static void mii_display_status(struct net_device *dev)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	if (!fep->link && !fep->old_link) {
+		/* Link is still down - don't print anything */
+		return;
+	}
+
+	printk("%s: status: ", dev->name);
+
+	if (!fep->link) {
+		printk("link down");
+	} else {
+		printk("link up");
+
+		switch(s & PHY_STAT_SPMASK) {
+		case PHY_STAT_100FDX: printk(", 100 Mbps Full Duplex"); break;
+		case PHY_STAT_100HDX: printk(", 100 Mbps Half Duplex"); break;
+		case PHY_STAT_10FDX:  printk(", 10 Mbps Full Duplex");  break;
+		case PHY_STAT_10HDX:  printk(", 10 Mbps Half Duplex");  break;
+		default:
+			printk(", Unknown speed/duplex");
+		}
+
+		if (s & PHY_STAT_ANC)
+			printk(", auto-negotiation complete");
+	}
+
+	if (s & PHY_STAT_FAULT)
+		printk(", remote fault");
+
+	printk(".\n");
+}
+
+static void mii_display_config(struct net_device *dev)
+{
+	volatile struct fec_enet_private *fep = dev->priv;
+	uint s = fep->phy_status;
+
+	printk("%s: config: auto-negotiation ", dev->name);
+
+	if (s & PHY_CONF_ANE)
+		printk("on");
+	else
+		printk("off");
+
+	if (s & PHY_CONF_100FDX)
+		printk(", 100FDX");
+	if (s & PHY_CONF_100HDX)
+		printk(", 100HDX");
+	if (s & PHY_CONF_10FDX)
+		printk(", 10FDX");
+	if (s & PHY_CONF_10HDX)
+		printk(", 10HDX");
+	if (!(s & PHY_CONF_SPMASK))
+		printk(", No speed/duplex selected?");
+
+	if (s & PHY_CONF_LOOP)
+		printk(", loopback enabled");
+
+	printk(".\n");
+
+	fep->sequence_done = 1;
+}
+
+static void mii_relink(struct net_device *dev)
+{
+	struct fec_enet_private *fep = dev->priv;
+	int duplex;
+
+	fep->link = (fep->phy_status & PHY_STAT_LINK) ? 1 : 0;
+	mii_display_status(dev);
+	fep->old_link = fep->link;
+
+	if (fep->link) {
+		duplex = 0;
+		if (fep->phy_status
+		    & (PHY_STAT_100FDX | PHY_STAT_10FDX))
+			duplex = 1;
+		fec_restart(dev, duplex);
+
+		if (netif_queue_stopped(dev)) {
+			netif_wake_queue(dev);
+		}
+	} else {
+		netif_stop_queue(dev);
+		fec_stop(dev);
+	}
+}
+
+static void mii_queue_relink(uint mii_reg, struct net_device *dev, uint data)
+{
+	struct fec_enet_private *fep = dev->priv;
+
+	fep->phy_task.routine = (void *)mii_relink;
+	fep->phy_task.data = dev;
+	schedule_task(&fep->phy_task);
+}
+
+static void mii_queue_config(uint mii_reg, struct net_device *dev, uint data)
+{
+	struct fec_enet_private *fep = dev->priv;
+
+	fep->phy_task.routine = (void *)mii_display_config;
+	fep->phy_task.data = dev;
+	schedule_task(&fep->phy_task);
+}
+
+
+
+phy_cmd_t phy_cmd_relink[] = { { mk_mii_read(MII_REG_CR), mii_queue_relink },
+			       { mk_mii_end, } };
+phy_cmd_t phy_cmd_config[] = { { mk_mii_read(MII_REG_CR), mii_queue_config },
+			       { mk_mii_end, } };
+
+
+
+/* Read remainder of PHY ID.
+*/
+static void
+mii_discover_phy3(uint mii_reg, struct net_device *dev, uint data)
+{
+	struct fec_enet_private *fep;
+	int	i;
+
+	fep = dev->priv;
+	fep->phy_id |= (mii_reg & 0xffff);
+
+	for(i = 0; phy_info[i]; i++)
+		if(phy_info[i]->id == (fep->phy_id >> 4))
+			break;
+
+	if(!phy_info[i])
+		panic("%s: PHY id 0x%08x is not supported!\n",
+		      dev->name, fep->phy_id);
+
+	fep->phy = phy_info[i];
+	fep->phy_id_done = 1;
+
+	printk("%s: Phy @ 0x%x, type %s (0x%08x)\n",
+		dev->name, fep->phy_addr, fep->phy->name, fep->phy_id);
+}
+
+/* Scan all of the MII PHY addresses looking for someone to respond
+ * with a valid ID.  This usually happens quickly.
+ */
+static void
+mii_discover_phy(uint mii_reg, struct net_device *dev, uint data)
+{
+	struct fec_enet_private *fep;
+	uint	phytype;
+
+	fep = dev->priv;
+
+	if ((phytype = (mii_reg & 0xffff)) != 0xffff) {
+
+		/* Got first part of ID, now get remainder.
+		*/
+		fep->phy_id = phytype << 16;
+		mii_queue(dev, mk_mii_read(MII_REG_PHYIR2), mii_discover_phy3, 0);
+	} else {
+		fep->phy_addr++;
+		if (fep->phy_addr < 32) {
+			mii_queue(dev, mk_mii_read(MII_REG_PHYIR1),
+							mii_discover_phy, 0);
+		} else {
+			printk("fec: No PHY device found.\n");
+		}
+	}
+}
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+/* This interrupt occurs when the PHY detects a link change.
+*/
+static void
+#ifdef CONFIG_RPXCLASSIC
+mii_link_interrupt(void *dev_id)
+#else
+mii_link_interrupt(int irq, void * dev_id, struct pt_regs * regs)
+#endif
+{
+	struct	net_device *dev = dev_id;
+	struct fec_enet_private *fep = dev->priv;
+	volatile immap_t *immap = (immap_t *)IMAP_ADDR;
+	volatile fec_t *fecp = &(immap->im_cpm.cp_fec);
+	unsigned int ecntrl = fecp->fec_ecntrl;
+
+	/*
+	 * Acknowledge the interrupt if possible. If we have not
+	 * found the PHY yet we can't process or acknowledge the
+	 * interrupt now. Instead we ignore this interrupt for now,
+	 * which we can do since it is edge triggered. It will be
+	 * acknowledged later by fec_enet_open().
+	 */
+	if (fep->phy) {
+		/*
+		 * We need the FEC enabled to access the MII
+		 */
+		if ((ecntrl & FEC_ECNTRL_ETHER_EN) == 0) {
+			fecp->fec_ecntrl |= FEC_ECNTRL_ETHER_EN;
+		}
+
+		mii_do_cmd(dev, fep->phy->ack_int);
+		mii_do_cmd(dev, phy_cmd_relink);  /* restart and display status */
+
+		if ((ecntrl & FEC_ECNTRL_ETHER_EN) == 0) {
+			fecp->fec_ecntrl = ecntrl;	/* restore old settings */
+		}
+	}
+
+}
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+static int
+fec_enet_open(struct rtnet_device *rtdev)
+{
+	struct fec_enet_private *fep = rtdev->priv;
+
+	/* I should reset the ring buffers here, but I don't yet know
+	 * a simple way to do that.
+	 */
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	fep->sequence_done = 0;
+	fep->link = 0;
+
+	if (fep->phy) {
+		mii_do_cmd(dev, fep->phy->config);
+		mii_do_cmd(dev, phy_cmd_config);  /* display configuration */
+		while(!fep->sequence_done)
+			schedule();
+
+		mii_do_cmd(dev, fep->phy->startup);
+
+#if defined(CONFIG_XENO_DRIVERS_NET_USE_MDIO) && defined(CONFIG_FEC_DP83846A)
+		if(fep->phy == &phy_info_dp83846a)
+		{
+			/* Initializing timers
+			 */
+			init_timer( &fep->phy_timer_list );
+
+			/* Starting timer for periodic link status check
+			 * After 100 milli-seconds, mdio_timer_callback function is called.
+			 */
+			fep->phy_timer_list.expires  = jiffies + (100 * HZ / 1000);
+			fep->phy_timer_list.data     = (unsigned long)dev;
+			fep->phy_timer_list.function = mdio_timer_callback;
+			add_timer( &fep->phy_timer_list );
+		}
+
+#if defined(CONFIG_IP_PNP)
+	rtdm_printk("%s: Waiting for the link to be up...\n", rtdev->name);
+
+	while(fep->link == 0 || ((((volatile fec_t*)rtdev->base_addr)->fec_ecntrl & FEC_ECNTRL_ETHER_EN) == 0))
+	{
+	    schedule();
+	}
+#endif /* CONFIG_IP_PNP */
+
+#endif /* CONFIG_XENO_DRIVERS_NET_USE_MDIO && CONFIG_FEC_DP83846A */
+
+		netif_start_queue(dev);
+		return 0;		/* Success */
+	}
+	return -ENODEV;		/* No PHY we understand */
+#else	/* !CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+	fep->link = 1;
+	rtnetif_start_queue(rtdev);
+
+	return 0;	/* Success */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+}
+
+static int
+fec_enet_close(struct rtnet_device *rtdev)
+{
+	/* Don't know what to do yet.
+	*/
+	rtnetif_stop_queue(rtdev);
+
+	fec_stop(rtdev);
+
+	return 0;
+}
+
+static struct net_device_stats *fec_enet_get_stats(struct rtnet_device *rtdev)
+{
+	struct fec_enet_private *fep = (struct fec_enet_private *)rtdev->priv;
+
+	return &fep->stats;
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+
+#if defined(CONFIG_FEC_DP83846A)
+/* Execute the ack_int command set and schedules next timer call back.  */
+static void mdio_timer_callback(unsigned long data)
+{
+	struct net_device *dev = (struct net_device *)data;
+	struct fec_enet_private *fep = (struct fec_enet_private *)(dev->priv);
+	mii_do_cmd(dev, fep->phy->ack_int);
+
+	if(fep->link == 0)
+	{
+		fep->phy_timer_list.expires  = jiffies + (100 * HZ / 1000); /* Sleep for 100ms */
+	}
+	else
+	{
+		fep->phy_timer_list.expires  = jiffies + (1 * HZ); /* Sleep for 1 sec. */
+	}
+	add_timer( &fep->phy_timer_list );
+}
+#endif /* CONFIG_FEC_DP83846A */
+
+static void mdio_callback(uint regval, struct net_device *dev, uint data)
+{
+	mdio_read_data_t* mrd = (mdio_read_data_t *)data;
+	mrd->regval = 0xFFFF & regval;
+	wake_up_process(mrd->sleeping_task);
+}
+
+static int mdio_read(struct net_device *dev, int phy_id, int location)
+{
+	uint retval;
+	mdio_read_data_t* mrd = (mdio_read_data_t *)kmalloc(sizeof(*mrd), GFP_KERNEL);
+
+	mrd->sleeping_task = current;
+	set_current_state(TASK_INTERRUPTIBLE);
+	mii_queue(dev, mk_mii_read(location), mdio_callback, (unsigned int) mrd);
+	schedule();
+
+	retval = mrd->regval;
+
+	kfree(mrd);
+
+	return retval;
+}
+
+void mdio_write(struct net_device *dev, int phy_id, int location, int value)
+{
+	mii_queue(dev, mk_mii_write(location, value), NULL, 0);
+}
+
+static int fec_enet_ioctl(struct net_device *dev, struct ifreq *rq, int cmd)
+{
+	struct fec_enet_private *cep = (struct fec_enet_private *)dev->priv;
+	struct mii_ioctl_data *data = (struct mii_ioctl_data *)&rq->ifr_data;
+
+	int phy = cep->phy_addr & 0x1f;
+	int retval;
+
+	if (data == NULL)
+	{
+		retval = -EINVAL;
+	}
+	else
+	{
+		switch(cmd)
+		{
+		case SIOCETHTOOL:
+			return netdev_ethtool_ioctl(dev, (void*)rq->ifr_data);
+			break;
+
+		case SIOCGMIIPHY:		/* Get address of MII PHY in use. */
+		case SIOCDEVPRIVATE:		/* for binary compat, remove in 2.5 */
+			data->phy_id = phy;
+
+		case SIOCGMIIREG:		/* Read MII PHY register.	*/
+		case SIOCDEVPRIVATE+1:		/* for binary compat, remove in 2.5 */
+			data->val_out = mdio_read(dev, data->phy_id & 0x1f, data->reg_num & 0x1f);
+			retval = 0;
+			break;
+
+		case SIOCSMIIREG:		/* Write MII PHY register.	*/
+		case SIOCDEVPRIVATE+2:		/* for binary compat, remove in 2.5 */
+			if (!capable(CAP_NET_ADMIN))
+			{
+				retval = -EPERM;
+			}
+			else
+			{
+				mdio_write(dev, data->phy_id & 0x1f, data->reg_num & 0x1f, data->val_in);
+				retval = 0;
+			}
+			break;
+
+		default:
+			retval = -EOPNOTSUPP;
+			break;
+		}
+	}
+	return retval;
+}
+
+
+static int netdev_ethtool_ioctl (struct net_device *dev, void *useraddr)
+{
+	u32 ethcmd;
+
+	/* dev_ioctl() in ../../net/core/dev.c has already checked
+	   capable(CAP_NET_ADMIN), so don't bother with that here.  */
+
+	if (copy_from_user (&ethcmd, useraddr, sizeof (ethcmd)))
+		return -EFAULT;
+
+	switch (ethcmd) {
+	case ETHTOOL_GDRVINFO:
+		{
+			struct ethtool_drvinfo info = { ETHTOOL_GDRVINFO };
+			strcpy (info.driver, dev->name);
+			strcpy (info.version, "0.3");
+			strcpy (info.bus_info, "");
+			if (copy_to_user (useraddr, &info, sizeof (info)))
+				return -EFAULT;
+			return 0;
+		}
+	default:
+		break;
+	}
+
+	return -EOPNOTSUPP;
+}
+
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+
+#ifdef ORIGINAL_VERSION
+
+/* Returns the CRC needed when filling in the hash table for
+ * multicast group filtering
+ * pAddr must point to a MAC address (6 bytes)
+ */
+static u32 fec_mulicast_calc_crc(char *pAddr)
+{
+	u8	byte;
+	int	byte_count;
+	int	bit_count;
+	u32	crc = 0xffffffff;
+	u8	msb;
+
+	for (byte_count=0; byte_count<6; byte_count++) {
+		byte = pAddr[byte_count];
+		for (bit_count=0; bit_count<8; bit_count++) {
+			msb = crc >> 31;
+			crc <<= 1;
+			if (msb ^ (byte & 0x1)) {
+				crc ^= FEC_CRC_POLY;
+			}
+			byte >>= 1;
+		}
+	}
+	return (crc);
+}
+
+/* Set or clear the multicast filter for this adaptor.
+ * Skeleton taken from sunlance driver.
+ * The CPM Ethernet implementation allows Multicast as well as individual
+ * MAC address filtering.  Some of the drivers check to make sure it is
+ * a group multicast address, and discard those that are not.  I guess I
+ * will do the same for now, but just remove the test if you want
+ * individual filtering as well (do the upper net layers want or support
+ * this kind of feature?).
+ */
+
+static void set_multicast_list(struct net_device *dev)
+{
+	struct	fec_enet_private *fep;
+	volatile fec_t *ep;
+
+	fep = (struct fec_enet_private *)dev->priv;
+	ep = &(((immap_t *)IMAP_ADDR)->im_cpm.cp_fec);
+
+	if (dev->flags&IFF_PROMISC) {
+
+		/* Log any net taps. */
+		printk("%s: Promiscuous mode enabled.\n", dev->name);
+		ep->fec_r_cntrl |= FEC_RCNTRL_PROM;
+	} else {
+
+		ep->fec_r_cntrl &= ~FEC_RCNTRL_PROM;
+
+		if (dev->flags & IFF_ALLMULTI) {
+			/* Catch all multicast addresses, so set the
+			 * filter to all 1's.
+			 */
+			ep->fec_hash_table_high = 0xffffffff;
+			ep->fec_hash_table_low = 0xffffffff;
+		} else {
+			struct dev_mc_list *pmc = dev->mc_list;
+
+			/* Clear Hash-Table
+			*/
+			ep->fec_hash_table_high = 0;
+			ep->fec_hash_table_low = 0;
+
+			/* Now populate the hash table
+			*/
+#ifdef DEBUG_MULTICAST
+			if (pmc) {
+				printk ("%s: Recalculating hash-table:\n",
+					dev->name);
+				printk (" MAC Address         high     low\n");
+			}
+#endif
+
+			while (pmc) {
+				u32	crc;
+				int	temp;
+				u32	csrVal;
+				int	hash_index;
+
+				crc = fec_mulicast_calc_crc(pmc->dmi_addr);
+				temp = (crc & 0x3f) >> 1;
+				hash_index = ((temp & 0x01) << 4) |
+					     ((temp & 0x02) << 2) |
+					     ((temp & 0x04))      |
+					     ((temp & 0x08) >> 2) |
+					     ((temp & 0x10) >> 4);
+				csrVal = (1 << hash_index);
+				if (crc & 1) {
+					ep->fec_hash_table_high	|= csrVal;
+				}
+				else {
+					ep->fec_hash_table_low	|= csrVal;
+				}
+#ifdef DEBUG_MULTICAST
+				printk (" %02x:%02x:%02x:%02x:%02x:%02x   %08x %08x\n",
+					(int)pmc->dmi_addr[0],
+					(int)pmc->dmi_addr[1],
+					(int)pmc->dmi_addr[2],
+					(int)pmc->dmi_addr[3],
+					(int)pmc->dmi_addr[4],
+					(int)pmc->dmi_addr[5],
+					ep->fec_hash_table_high,
+					ep->fec_hash_table_low
+				);
+#endif
+				pmc = pmc->next;
+			}
+		}
+	}
+}
+#endif /* ORIGINAL_VERSION */
+
+/* Initialize the FEC Ethernet on 860T.
+ */
+int __init fec_enet_init(void)
+{
+	struct rtnet_device *rtdev = NULL;
+	struct fec_enet_private *fep;
+	int i, j, k;
+	unsigned char	*eap, *iap, *ba;
+	unsigned long	mem_addr;
+	volatile	cbd_t	*bdp;
+	cbd_t		*cbd_base;
+	volatile	immap_t	*immap;
+	volatile	fec_t	*fecp;
+	bd_t		*bd;
+
+	immap = (immap_t *)IMAP_ADDR;	/* pointer to internal registers */
+
+	bd = (bd_t *)__res;
+
+	if (!rx_pool_size)
+		rx_pool_size = RX_RING_SIZE * 2;
+
+	rtdev = rtdev_root = rt_alloc_etherdev(sizeof(struct fec_enet_private),
+					rx_pool_size + TX_RING_SIZE);
+	if (rtdev == NULL) {
+		printk(KERN_ERR "enet: Could not allocate ethernet device.\n");
+		return -1;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+
+	fep = (struct fec_enet_private *)rtdev->priv;
+	fecp = &(immap->im_cpm.cp_fec);
+
+	/* Whack a reset.  We should wait for this.
+	*/
+	fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET;
+	for (i = 0;
+	     (fecp->fec_ecntrl & FEC_ECNTRL_RESET) && (i < FEC_RESET_DELAY);
+	     ++i) {
+		udelay(1);
+	}
+	if (i == FEC_RESET_DELAY) {
+		printk ("FEC Reset timeout!\n");
+	}
+
+	/* Set the Ethernet address.  If using multiple Enets on the 8xx,
+	 * this needs some work to get unique addresses.
+	 */
+	eap = (unsigned char *)my_enet_addr;
+	iap = bd->bi_enetaddr;
+
+#if defined(CONFIG_SCC_ENET) && !defined(ORIGINAL_VERSION)
+	/*
+	 * If a board has Ethernet configured both on a SCC and the
+	 * FEC, it needs (at least) 2 MAC addresses (we know that Sun
+	 * disagrees, but anyway). For the FEC port, we create
+	 * another address by setting one of the address bits above
+	 * something that would have (up to now) been allocated.
+	 */
+	{
+		unsigned char	tmpaddr[6];
+		for (i=0; i<6; i++)
+			tmpaddr[i] = *iap++;
+		tmpaddr[3] |= 0x80;
+		iap = tmpaddr;
+	}
+#endif
+
+	for (i=0; i<6; i++) {
+		rtdev->dev_addr[i] = *eap++ = *iap++;
+	}
+
+	/* Allocate memory for buffer descriptors.
+	*/
+	if (((RX_RING_SIZE + TX_RING_SIZE) * sizeof(cbd_t)) > PAGE_SIZE) {
+		printk("FEC init error.  Need more space.\n");
+		printk("FEC initialization failed.\n");
+		return 1;
+	}
+	cbd_base = (cbd_t *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, (void *)&mem_addr);
+
+	/* Set receive and transmit descriptor base.
+	*/
+	fep->rx_bd_base = cbd_base;
+	fep->tx_bd_base = cbd_base + RX_RING_SIZE;
+
+	fep->skb_cur = fep->skb_dirty = 0;
+
+	/* Initialize the receive buffer descriptors.
+	*/
+	bdp = fep->rx_bd_base;
+	k = 0;
+	for (i=0; i<FEC_ENET_RX_PAGES; i++) {
+
+		/* Allocate a page.
+		*/
+		ba = (unsigned char *)consistent_alloc(GFP_KERNEL, PAGE_SIZE, (void *)&mem_addr);
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		for (j=0; j<FEC_ENET_RX_FRPPG; j++) {
+			bdp->cbd_sc = BD_ENET_RX_EMPTY;
+			bdp->cbd_bufaddr = mem_addr;
+			fep->rx_vaddr[k++] = ba;
+			mem_addr += FEC_ENET_RX_FRSIZE;
+			ba += FEC_ENET_RX_FRSIZE;
+			bdp++;
+		}
+	}
+
+	rtdm_lock_init(&fep->lock);
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* Install our interrupt handler.
+	*/
+	rt_stack_connect(rtdev, &STACK_manager);
+	if ((i = rtdm_irq_request(&fep->irq_handle, FEC_INTERRUPT,
+				  fec_enet_interrupt, 0, "rt_mpc8xx_fec", rtdev))) {
+		printk(KERN_ERR "Couldn't request IRQ %d\n", rtdev->irq);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+	rtdev->base_addr = (unsigned long)fecp;
+
+#ifdef CONFIG_RPXCLASSIC
+/* If MDIO is disabled the PHY should not be allowed to
+ * generate interrupts telling us to read the PHY.
+ */
+# ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Make Port C, bit 15 an input that causes interrupts.
+	*/
+	immap->im_ioport.iop_pcpar &= ~0x0001;
+	immap->im_ioport.iop_pcdir &= ~0x0001;
+	immap->im_ioport.iop_pcso  &= ~0x0001;
+	immap->im_ioport.iop_pcint |=  0x0001;
+	cpm_install_handler(CPMVEC_PIO_PC15, mii_link_interrupt, dev);
+# endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	/* Make LEDS reflect Link status.
+	*/
+	*((uint *) RPX_CSR_ADDR) &= ~BCSR2_FETHLEDMODE;
+#endif	/* CONFIG_RPXCLASSIC */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_USE_MDIO
+# ifndef PHY_INTERRUPT
+#  error Want to use MII, but PHY_INTERRUPT not defined!
+# endif
+	((immap_t *)IMAP_ADDR)->im_siu_conf.sc_siel |=
+		(0x80000000 >> PHY_INTERRUPT);
+
+	if (request_8xxirq(PHY_INTERRUPT, mii_link_interrupt, 0, "mii", dev) != 0)
+		panic("Could not allocate MII IRQ!");
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	rtdev->base_addr = (unsigned long)fecp;
+
+	/* The FEC Ethernet specific entries in the device structure. */
+	rtdev->open = fec_enet_open;
+	rtdev->hard_start_xmit = fec_enet_start_xmit;
+	rtdev->stop = fec_enet_close;
+	rtdev->hard_header = &rt_eth_header;
+	rtdev->get_stats = fec_enet_get_stats;
+
+	if ((i = rt_register_rtnetdev(rtdev))) {
+		rtdm_irq_disable(&fep->irq_handle);
+		rtdm_irq_free(&fep->irq_handle);
+		rtdev_free(rtdev);
+		return i;
+	}
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	dev->do_ioctl = fec_enet_ioctl;
+
+	for (i=0; i<NMII-1; i++)
+		mii_cmds[i].mii_next = &mii_cmds[i+1];
+	mii_free = mii_cmds;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifndef CONFIG_ICU862
+	/* Configure all of port D for MII.
+	*/
+	immap->im_ioport.iop_pdpar = 0x1fff;
+
+#else	/* CONFIG_ICU862 */
+	/* Configure port A for MII.
+	*/
+
+	/* Has Utopia been configured? */
+	if (immap->im_ioport.iop_pdpar & (0x8000 >> 1)) {
+		/*
+		 * YES - Use MUXED mode for UTOPIA bus.
+		 * This frees Port A for use by MII (see 862UM table 41-6).
+		 */
+		immap->im_ioport.utmode &= ~0x80;
+	} else {
+		/*
+		 * NO - set SPLIT mode for UTOPIA bus.
+		 *
+		 * This doesn't really effect UTOPIA (which isn't
+		 * enabled anyway) but just tells the 862
+		 * to use port A for MII (see 862UM table 41-6).
+		 */
+		immap->im_ioport.utmode |= 0x80;
+	}
+
+# ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Now configure MII_MDC pin */
+	immap->im_ioport.iop_pdpar |= (0x8000 >> 8);
+# endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+#endif  /* CONFIG_ICU862 */
+
+	/* Bits moved from Rev. D onward.
+	*/
+	if ((mfspr(IMMR) & 0xffff) < 0x0501)
+		immap->im_ioport.iop_pddir = 0x1c58;	/* Pre rev. D */
+	else
+		immap->im_ioport.iop_pddir = 0x1fff;	/* Rev. D and later */
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Set MII speed to 2.5 MHz
+	*/
+	fecp->fec_mii_speed = fep->phy_speed =
+	    ((((bd->bi_intfreq + 4999999) / 2500000) / 2 ) & 0x3F ) << 1;
+#else
+	fecp->fec_mii_speed = 0;	/* turn off MDIO */
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+#ifndef ORIGINAL_VERSION
+	printk("%s: FEC ENET Version 0.3, irq %d, addr %02x:%02x:%02x:%02x:%02x:%02x\n",
+	       rtdev->name, FEC_INTERRUPT,
+	       rtdev->dev_addr[0], rtdev->dev_addr[1], rtdev->dev_addr[2],
+	       rtdev->dev_addr[3], rtdev->dev_addr[4], rtdev->dev_addr[5]);
+#else
+	printk ("%s: FEC ENET Version 0.3, FEC irq %d"
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+		", with MDIO"
+#endif
+#ifdef PHY_INTERRUPT
+		", MII irq %d"
+#endif
+		", addr ",
+		dev->name, FEC_INTERRUPT
+#ifdef PHY_INTERRUPT
+		, PHY_INTERRUPT
+#endif
+	);
+	for (i=0; i<6; i++)
+		printk("%02x%c", rtdev->dev_addr[i], (i==5) ? '\n' : ':');
+#endif /* ORIGINAL_VERSION */
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO	/* start in full duplex mode, and negotiate speed */
+	fec_restart (dev, 1);
+#else			/* always use half duplex mode only */
+	fec_restart (rtdev, 0);
+#endif
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Queue up command to detect the PHY and initialize the
+	 * remainder of the interface.
+	 */
+	fep->phy_id_done = 0;
+	fep->phy_addr = 0;
+	mii_queue(dev, mk_mii_read(MII_REG_PHYIR1), mii_discover_phy, 0);
+
+	fep->old_status = 0;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	return 0;
+}
+
+/* This function is called to start or restart the FEC during a link
+ * change.  This only happens when switching between half and full
+ * duplex.
+ */
+static void
+fec_restart(struct rtnet_device *rtdev, int duplex)
+{
+	struct fec_enet_private *fep;
+	int i;
+	volatile	cbd_t	*bdp;
+	volatile	immap_t	*immap;
+	volatile	fec_t	*fecp;
+
+	immap = (immap_t *)IMAP_ADDR;	/* pointer to internal registers */
+
+	fecp = &(immap->im_cpm.cp_fec);
+
+	fep = rtdev->priv;
+
+	/* Whack a reset.  We should wait for this.
+	*/
+	fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_RESET;
+	for (i = 0;
+	     (fecp->fec_ecntrl & FEC_ECNTRL_RESET) && (i < FEC_RESET_DELAY);
+	     ++i) {
+		udelay(1);
+	}
+	if (i == FEC_RESET_DELAY) {
+		printk ("FEC Reset timeout!\n");
+	}
+
+	/* Set station address.
+	*/
+	fecp->fec_addr_low  = (my_enet_addr[0] << 16) | my_enet_addr[1];
+	fecp->fec_addr_high =  my_enet_addr[2];
+
+	/* Reset all multicast.
+	*/
+	fecp->fec_hash_table_high = 0;
+	fecp->fec_hash_table_low  = 0;
+
+	/* Set maximum receive buffer size.
+	*/
+	fecp->fec_r_buff_size = PKT_MAXBLR_SIZE;
+	fecp->fec_r_hash = PKT_MAXBUF_SIZE;
+
+	/* Set receive and transmit descriptor base.
+	*/
+	fecp->fec_r_des_start = iopa((uint)(fep->rx_bd_base));
+	fecp->fec_x_des_start = iopa((uint)(fep->tx_bd_base));
+
+	fep->dirty_tx = fep->cur_tx = fep->tx_bd_base;
+	fep->cur_rx = fep->rx_bd_base;
+
+	/* Reset SKB transmit buffers.
+	*/
+	fep->skb_cur = fep->skb_dirty = 0;
+	for (i=0; i<=TX_RING_MOD_MASK; i++) {
+		if (fep->tx_skbuff[i] != NULL) {
+			dev_kfree_rtskb(fep->tx_skbuff[i]);
+			fep->tx_skbuff[i] = NULL;
+		}
+	}
+
+	/* Initialize the receive buffer descriptors.
+	*/
+	bdp = fep->rx_bd_base;
+	for (i=0; i<RX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		bdp->cbd_sc = BD_ENET_RX_EMPTY;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* ...and the same for transmmit.
+	*/
+	bdp = fep->tx_bd_base;
+	for (i=0; i<TX_RING_SIZE; i++) {
+
+		/* Initialize the BD for every fragment in the page.
+		*/
+		bdp->cbd_sc = 0;
+		bdp->cbd_bufaddr = 0;
+		bdp++;
+	}
+
+	/* Set the last buffer to wrap.
+	*/
+	bdp--;
+	bdp->cbd_sc |= BD_SC_WRAP;
+
+	/* Enable MII mode.
+	*/
+	if (duplex) {
+		fecp->fec_r_cntrl = FEC_RCNTRL_MII_MODE;	/* MII enable */
+		fecp->fec_x_cntrl = FEC_TCNTRL_FDEN;		/* FD enable */
+	}
+	else {
+		fecp->fec_r_cntrl = FEC_RCNTRL_MII_MODE | FEC_RCNTRL_DRT;
+		fecp->fec_x_cntrl = 0;
+	}
+
+	fep->full_duplex = duplex;
+
+	/* Enable big endian and don't care about SDMA FC.
+	*/
+	fecp->fec_fun_code = 0x78000000;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Set MII speed.
+	*/
+	fecp->fec_mii_speed = fep->phy_speed;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	/* Clear any outstanding interrupt.
+	*/
+	fecp->fec_ievent = 0xffc0;
+
+	fecp->fec_ivec = (FEC_INTERRUPT/2) << 29;
+
+	/* Enable interrupts we wish to service.
+	*/
+	fecp->fec_imask = ( FEC_ENET_TXF | FEC_ENET_TXB |
+			    FEC_ENET_RXF | FEC_ENET_RXB | FEC_ENET_MII );
+
+	/* And last, enable the transmit and receive processing.
+	*/
+	fecp->fec_ecntrl = FEC_ECNTRL_PINMUX | FEC_ECNTRL_ETHER_EN;
+	fecp->fec_r_des_active = 0x01000000;
+
+	/* The tx ring is no longer full. */
+	if(fep->tx_full)
+	{
+		fep->tx_full = 0;
+		rtnetif_wake_queue(rtdev);
+	}
+}
+
+static void
+fec_stop(struct rtnet_device *rtdev)
+{
+	volatile	immap_t	*immap;
+	volatile	fec_t	*fecp;
+	int i;
+	struct fec_enet_private *fep;
+
+	immap = (immap_t *)IMAP_ADDR;	/* pointer to internal registers */
+
+	fecp = &(immap->im_cpm.cp_fec);
+
+	if ((fecp->fec_ecntrl & FEC_ECNTRL_ETHER_EN) == 0)
+		return;	/* already down */
+
+	fep = rtdev->priv;
+
+
+	fecp->fec_x_cntrl = 0x01;	/* Graceful transmit stop */
+
+	for (i = 0;
+	     ((fecp->fec_ievent & 0x10000000) == 0) && (i < FEC_RESET_DELAY);
+	     ++i) {
+		udelay(1);
+	}
+	if (i == FEC_RESET_DELAY) {
+		printk ("FEC timeout on graceful transmit stop\n");
+	}
+
+	/* Clear outstanding MII command interrupts.
+	*/
+	fecp->fec_ievent = FEC_ENET_MII;
+
+	/* Enable MII command finished interrupt
+	*/
+	fecp->fec_ivec = (FEC_INTERRUPT/2) << 29;
+	fecp->fec_imask = FEC_ENET_MII;
+
+#ifdef	CONFIG_XENO_DRIVERS_NET_USE_MDIO
+	/* Set MII speed.
+	*/
+	fecp->fec_mii_speed = fep->phy_speed;
+#endif	/* CONFIG_XENO_DRIVERS_NET_USE_MDIO */
+
+	/* Disable FEC
+	*/
+	fecp->fec_ecntrl &= ~(FEC_ECNTRL_ETHER_EN);
+}
+
+static void __exit fec_enet_cleanup(void)
+{
+	struct rtnet_device *rtdev = rtdev_root;
+	struct fec_enet_private *fep = rtdev->priv;
+
+	if (rtdev) {
+		rtdm_irq_disable(&fep->irq_handle);
+		rtdm_irq_free(&fep->irq_handle);
+
+		consistent_free(fep->rx_bd_base);
+
+		rt_stack_disconnect(rtdev);
+		rt_unregister_rtnetdev(rtdev);
+		rt_rtdev_disconnect(rtdev);
+
+		printk("%s: unloaded\n", rtdev->name);
+		rtdev_free(rtdev);
+		rtdev_root = NULL;
+	}
+}
+
+module_init(fec_enet_init);
+module_exit(fec_enet_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c
new file mode 100644
index 0000000..82d1c33
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/natsemi.c
@@ -0,0 +1,2095 @@
+/* natsemi.c: A Linux PCI Ethernet driver for the NatSemi DP8381x series. */
+/*
+	Written/copyright 1999-2001 by Donald Becker.
+	Portions copyright (c) 2001,2002 Sun Microsystems (thockin@sun.com)
+	Portions copyright 2001,2002 Manfred Spraul (manfred@colorfullife.com)
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.  License for under other terms may be
+	available.  Contact the original author for details.
+
+	The original author may be reached as becker@scyld.com, or at
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+	Support information and updates available at
+	http://www.scyld.com/network/netsemi.html
+
+
+	Linux kernel modifications:
+
+	Version 1.0.1:
+		- Spinlock fixes
+		- Bug fixes and better intr performance (Tjeerd)
+	Version 1.0.2:
+		- Now reads correct MAC address from eeprom
+	Version 1.0.3:
+		- Eliminate redundant priv->tx_full flag
+		- Call netif_start_queue from dev->tx_timeout
+		- wmb() in start_tx() to flush data
+		- Update Tx locking
+		- Clean up PCI enable (davej)
+	Version 1.0.4:
+		- Merge Donald Becker's natsemi.c version 1.07
+	Version 1.0.5:
+		- { fill me in }
+	Version 1.0.6:
+		* ethtool support (jgarzik)
+		* Proper initialization of the card (which sometimes
+		fails to occur and leaves the card in a non-functional
+		state). (uzi)
+
+		* Some documented register settings to optimize some
+		of the 100Mbit autodetection circuitry in rev C cards. (uzi)
+
+		* Polling of the PHY intr for stuff like link state
+		change and auto- negotiation to finally work properly. (uzi)
+
+		* One-liner removal of a duplicate declaration of
+		netdev_error(). (uzi)
+
+	Version 1.0.7: (Manfred Spraul)
+		* pci dma
+		* SMP locking update
+		* full reset added into tx_timeout
+		* correct multicast hash generation (both big and little endian)
+			[copied from a natsemi driver version
+			 from Myrio Corporation, Greg Smith]
+		* suspend/resume
+
+	version 1.0.8 (Tim Hockin <thockin@sun.com>)
+		* ETHTOOL_* support
+		* Wake on lan support (Erik Gilling)
+		* MXDMA fixes for serverworks
+		* EEPROM reload
+
+	version 1.0.9 (Manfred Spraul)
+		* Main change: fix lack of synchronize
+		netif_close/netif_suspend against a last interrupt
+		or packet.
+		* do not enable superflous interrupts (e.g. the
+		drivers relies on TxDone - TxIntr not needed)
+		* wait that the hardware has really stopped in close
+		and suspend.
+		* workaround for the (at least) gcc-2.95.1 compiler
+		problem. Also simplifies the code a bit.
+		* disable_irq() in tx_timeout - needed to protect
+		against rx interrupts.
+		* stop the nic before switching into silent rx mode
+		for wol (required according to docu).
+
+	version 1.0.10:
+		* use long for ee_addr (various)
+		* print pointers properly (DaveM)
+		* include asm/irq.h (?)
+
+	version 1.0.11:
+		* check and reset if PHY errors appear (Adrian Sun)
+		* WoL cleanup (Tim Hockin)
+		* Magic number cleanup (Tim Hockin)
+		* Don't reload EEPROM on every reset (Tim Hockin)
+		* Save and restore EEPROM state across reset (Tim Hockin)
+		* MDIO Cleanup (Tim Hockin)
+		* Reformat register offsets/bits (jgarzik)
+
+	version 1.0.12:
+		* ETHTOOL_* further support (Tim Hockin)
+
+	version 1.0.13:
+		* ETHTOOL_[G]EEPROM support (Tim Hockin)
+
+	version 1.0.13:
+		* crc cleanup (Matt Domsch <Matt_Domsch@dell.com>)
+
+	version 1.0.14:
+		* Cleanup some messages and autoneg in ethtool (Tim Hockin)
+
+	version 1.0.15:
+		* Get rid of cable_magic flag
+		* use new (National provided) solution for cable magic issue
+
+	version 1.0.16:
+		* call netdev_rx() for RxErrors (Manfred Spraul)
+		* formatting and cleanups
+		* change options and full_duplex arrays to be zero
+		  initialized
+		* enable only the WoL and PHY interrupts in wol mode
+
+	version 1.0.17:
+		* only do cable_magic on 83815 and early 83816 (Tim Hockin)
+		* create a function for rx refill (Manfred Spraul)
+		* combine drain_ring and init_ring (Manfred Spraul)
+		* oom handling (Manfred Spraul)
+		* hands_off instead of playing with netif_device_{de,a}ttach
+		  (Manfred Spraul)
+		* be sure to write the MAC back to the chip (Manfred Spraul)
+		* lengthen EEPROM timeout, and always warn about timeouts
+		  (Manfred Spraul)
+		* comments update (Manfred)
+		* do the right thing on a phy-reset (Manfred and Tim)
+
+	TODO:
+	* big endian support with CFG:BEM instead of cpu_to_le32
+	* support for an external PHY
+	* NAPI
+
+	Ported to RTNET: December 2003, Erik Buit <e.buit@student.utwente.nl>
+*/
+
+#if !defined(__OPTIMIZE__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error You must compile this driver with "-O".
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/spinlock.h>
+#include <linux/ethtool.h>
+#include <linux/delay.h>
+#include <linux/rtnetlink.h>
+#include <linux/mii.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h>	/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+/*** RTnet ***/
+#include <rtnet_port.h>
+
+#define MAX_UNITS 8		/* More are supported, limit only on options */
+#define DEFAULT_RX_POOL_SIZE    16
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+/*** RTnet ***/
+
+#define DRV_NAME	"natsemi-rt"
+#define DRV_VERSION	"1.07+LK1.0.17-RTnet-0.2"
+#define DRV_RELDATE	"Dec 16, 2003"
+
+/* Updated to recommendations in pci-skeleton v2.03. */
+
+/* The user-configurable values.
+   These may be modified when a driver module is loaded.*/
+
+#define NATSEMI_DEF_MSG		(NETIF_MSG_DRV		| \
+				 NETIF_MSG_LINK		| \
+				 NETIF_MSG_WOL		| \
+				 NETIF_MSG_RX_ERR	| \
+				 NETIF_MSG_TX_ERR)
+static int local_debug = -1;
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+static int mtu;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1518 effectively disables this feature. */
+/*** RTnet ***
+static int rx_copybreak;
+ *** RTnet ***/
+
+/* Used to pass the media type, etc.
+   Both 'options[]' and 'full_duplex[]' should exist for driver
+   interoperability.
+   The media type is usually passed in 'options[]'.
+*/
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE	16
+#define TX_QUEUE_LEN	10 /* Limit ring entries actually used, min 4. */
+#define RX_RING_SIZE	8 /*** RTnet ***/
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (2*HZ)
+
+#define NATSEMI_HW_TIMEOUT	400
+#define NATSEMI_TIMER_FREQ	3*HZ
+#define NATSEMI_PG0_NREGS	64
+#define NATSEMI_RFDR_NREGS	8
+#define NATSEMI_PG1_NREGS	4
+#define NATSEMI_NREGS		(NATSEMI_PG0_NREGS + NATSEMI_RFDR_NREGS + \
+				 NATSEMI_PG1_NREGS)
+#define NATSEMI_REGS_VER	1 /* v1 added RFDR registers */
+#define NATSEMI_REGS_SIZE	(NATSEMI_NREGS * sizeof(u32))
+#define NATSEMI_EEPROM_SIZE	24 /* 12 16-bit values */
+
+#define PKT_BUF_SZ		1536 /* Size of each temporary Rx buffer. */
+
+/* These identify the driver base version and may not be removed. */
+static char version[] =
+  KERN_INFO DRV_NAME " dp8381x driver, version "
+      DRV_VERSION ", " DRV_RELDATE "\n"
+  KERN_INFO "  originally by Donald Becker <becker@scyld.com>\n"
+  KERN_INFO "  http://www.scyld.com/network/natsemi.html\n"
+  KERN_INFO "  2.4.x kernel port by Jeff Garzik, Tjeerd Mulder\n"
+  KERN_INFO "  RTnet port by Erik Buit\n";
+
+MODULE_AUTHOR("Erik Buit");
+MODULE_DESCRIPTION("RTnet National Semiconductor DP8381x series PCI Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0444);
+module_param(mtu, int, 0444);
+module_param_named(debug, local_debug, int, 0444);
+/*** RTnet ***
+MODULE_PARM(rx_copybreak, "i");
+ *** RTnet ***/
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+MODULE_PARM_DESC(max_interrupt_work,
+	"DP8381x maximum events handled per interrupt");
+MODULE_PARM_DESC(mtu, "DP8381x MTU (all boards)");
+MODULE_PARM_DESC(debug, "DP8381x default debug level");
+/*** RTnet ***
+MODULE_PARM_DESC(rx_copybreak,
+	"DP8381x copy breakpoint for copy-only-tiny-frames");
+ *** RTnet ***/
+MODULE_PARM_DESC(options, "DP8381x: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "DP8381x full duplex setting(s) (1)");
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for National Semiconductor DP83815 PCI Ethernet NIC.
+It also works with other chips in in the DP83810 series.
+
+II. Board-specific settings
+
+This driver requires the PCI interrupt line to be valid.
+It honors the EEPROM-set values.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
+The NatSemi design uses a 'next descriptor' pointer that the driver forms
+into a list.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver uses a zero-copy receive and transmit scheme.
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack.  Buffers consumed this way are replaced by newly allocated
+skbuffs in a later phase of receives.
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames.  New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.  When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine.  Copying also preloads the cache, which is
+most useful with small frames.
+
+A subtle aspect of the operation is that unaligned buffers are not permitted
+by the hardware.  Thus the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing.  On copies frames are put into the
+skbuff at an offset of "+2", 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+Most operations are synchronized on the np->lock irq spinlock, except the
+performance critical codepaths:
+
+The rx process only runs in the interrupt handler. Access from outside
+the interrupt handler is only permitted after disable_irq().
+
+The rx process usually runs under the dev->xmit_lock. If np->intr_tx_reap
+is set, then access is permitted under spin_lock_irq(&np->lock).
+
+Thus configuration functions that want to access everything must call
+	disable_irq(dev->irq);
+	spin_lock_bh(dev->xmit_lock);
+	spin_lock_irq(&np->lock);
+
+IV. Notes
+
+NatSemi PCI network controllers are very uncommon.
+
+IVb. References
+
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+Datasheet is available from:
+http://www.national.com/pf/DP/DP83815.html
+
+IVc. Errata
+
+None characterised.
+*/
+
+
+
+enum pcistuff {
+	PCI_USES_IO = 0x01,
+	PCI_USES_MEM = 0x02,
+	PCI_USES_MASTER = 0x04,
+	PCI_ADDR0 = 0x08,
+	PCI_ADDR1 = 0x10,
+};
+
+/* MMIO operations required */
+#define PCI_IOTYPE (PCI_USES_MASTER | PCI_USES_MEM | PCI_ADDR1)
+
+
+/* array of board data directly indexed by pci_tbl[x].driver_data */
+static struct {
+	const char *name;
+	unsigned long flags;
+} natsemi_pci_info[] = {
+	{ "NatSemi DP8381[56]", PCI_IOTYPE },
+};
+
+static struct pci_device_id natsemi_pci_tbl[] = {
+	{ PCI_VENDOR_ID_NS, PCI_DEVICE_ID_NS_83815, PCI_ANY_ID, PCI_ANY_ID, },
+	{ 0, },
+};
+MODULE_DEVICE_TABLE(pci, natsemi_pci_tbl);
+
+/* Offsets to the device registers.
+   Unlike software-only systems, device drivers interact with complex hardware.
+   It's not useful to define symbolic names for every register bit in the
+   device.
+*/
+enum register_offsets {
+	ChipCmd			= 0x00,
+	ChipConfig		= 0x04,
+	EECtrl			= 0x08,
+	PCIBusCfg		= 0x0C,
+	IntrStatus		= 0x10,
+	IntrMask		= 0x14,
+	IntrEnable		= 0x18,
+	IntrHoldoff		= 0x16, /* DP83816 only */
+	TxRingPtr		= 0x20,
+	TxConfig		= 0x24,
+	RxRingPtr		= 0x30,
+	RxConfig		= 0x34,
+	ClkRun			= 0x3C,
+	WOLCmd			= 0x40,
+	PauseCmd		= 0x44,
+	RxFilterAddr		= 0x48,
+	RxFilterData		= 0x4C,
+	BootRomAddr		= 0x50,
+	BootRomData		= 0x54,
+	SiliconRev		= 0x58,
+	StatsCtrl		= 0x5C,
+	StatsData		= 0x60,
+	RxPktErrs		= 0x60,
+	RxMissed		= 0x68,
+	RxCRCErrs		= 0x64,
+	BasicControl		= 0x80,
+	BasicStatus		= 0x84,
+	AnegAdv			= 0x90,
+	AnegPeer		= 0x94,
+	PhyStatus		= 0xC0,
+	MIntrCtrl		= 0xC4,
+	MIntrStatus		= 0xC8,
+	PhyCtrl			= 0xE4,
+
+	/* These are from the spec, around page 78... on a separate table.
+	 * The meaning of these registers depend on the value of PGSEL. */
+	PGSEL			= 0xCC,
+	PMDCSR			= 0xE4,
+	TSTDAT			= 0xFC,
+	DSPCFG			= 0xF4,
+	SDCFG			= 0xF8
+};
+/* the values for the 'magic' registers above (PGSEL=1) */
+#define PMDCSR_VAL	0x189c	/* enable preferred adaptation circuitry */
+#define TSTDAT_VAL	0x0
+#define DSPCFG_VAL	0x5040
+#define SDCFG_VAL	0x008c	/* set voltage thresholds for Signal Detect */
+#define DSPCFG_LOCK	0x20	/* coefficient lock bit in DSPCFG */
+#define TSTDAT_FIXED	0xe8	/* magic number for bad coefficients */
+
+/* misc PCI space registers */
+enum pci_register_offsets {
+	PCIPM			= 0x44,
+};
+
+enum ChipCmd_bits {
+	ChipReset		= 0x100,
+	RxReset			= 0x20,
+	TxReset			= 0x10,
+	RxOff			= 0x08,
+	RxOn			= 0x04,
+	TxOff			= 0x02,
+	TxOn			= 0x01,
+};
+
+enum ChipConfig_bits {
+	CfgPhyDis		= 0x200,
+	CfgPhyRst		= 0x400,
+	CfgExtPhy		= 0x1000,
+	CfgAnegEnable		= 0x2000,
+	CfgAneg100		= 0x4000,
+	CfgAnegFull		= 0x8000,
+	CfgAnegDone		= 0x8000000,
+	CfgFullDuplex		= 0x20000000,
+	CfgSpeed100		= 0x40000000,
+	CfgLink			= 0x80000000,
+};
+
+enum EECtrl_bits {
+	EE_ShiftClk		= 0x04,
+	EE_DataIn		= 0x01,
+	EE_ChipSelect		= 0x08,
+	EE_DataOut		= 0x02,
+};
+
+enum PCIBusCfg_bits {
+	EepromReload		= 0x4,
+};
+
+/* Bits in the interrupt status/mask registers. */
+enum IntrStatus_bits {
+	IntrRxDone		= 0x0001,
+	IntrRxIntr		= 0x0002,
+	IntrRxErr		= 0x0004,
+	IntrRxEarly		= 0x0008,
+	IntrRxIdle		= 0x0010,
+	IntrRxOverrun		= 0x0020,
+	IntrTxDone		= 0x0040,
+	IntrTxIntr		= 0x0080,
+	IntrTxErr		= 0x0100,
+	IntrTxIdle		= 0x0200,
+	IntrTxUnderrun		= 0x0400,
+	StatsMax		= 0x0800,
+	SWInt			= 0x1000,
+	WOLPkt			= 0x2000,
+	LinkChange		= 0x4000,
+	IntrHighBits		= 0x8000,
+	RxStatusFIFOOver	= 0x10000,
+	IntrPCIErr		= 0xf00000,
+	RxResetDone		= 0x1000000,
+	TxResetDone		= 0x2000000,
+	IntrAbnormalSummary	= 0xCD20,
+};
+
+/*
+ * Default Interrupts:
+ * Rx OK, Rx Packet Error, Rx Overrun,
+ * Tx OK, Tx Packet Error, Tx Underrun,
+ * MIB Service, Phy Interrupt, High Bits,
+ * Rx Status FIFO overrun,
+ * Received Target Abort, Received Master Abort,
+ * Signalled System Error, Received Parity Error
+ */
+#define DEFAULT_INTR 0x00f1cd65
+
+enum TxConfig_bits {
+	TxDrthMask		= 0x3f,
+	TxFlthMask		= 0x3f00,
+	TxMxdmaMask		= 0x700000,
+	TxMxdma_512		= 0x0,
+	TxMxdma_4		= 0x100000,
+	TxMxdma_8		= 0x200000,
+	TxMxdma_16		= 0x300000,
+	TxMxdma_32		= 0x400000,
+	TxMxdma_64		= 0x500000,
+	TxMxdma_128		= 0x600000,
+	TxMxdma_256		= 0x700000,
+	TxCollRetry		= 0x800000,
+	TxAutoPad		= 0x10000000,
+	TxMacLoop		= 0x20000000,
+	TxHeartIgn		= 0x40000000,
+	TxCarrierIgn		= 0x80000000
+};
+
+enum RxConfig_bits {
+	RxDrthMask		= 0x3e,
+	RxMxdmaMask		= 0x700000,
+	RxMxdma_512		= 0x0,
+	RxMxdma_4		= 0x100000,
+	RxMxdma_8		= 0x200000,
+	RxMxdma_16		= 0x300000,
+	RxMxdma_32		= 0x400000,
+	RxMxdma_64		= 0x500000,
+	RxMxdma_128		= 0x600000,
+	RxMxdma_256		= 0x700000,
+	RxAcceptLong		= 0x8000000,
+	RxAcceptTx		= 0x10000000,
+	RxAcceptRunt		= 0x40000000,
+	RxAcceptErr		= 0x80000000
+};
+
+enum ClkRun_bits {
+	PMEEnable		= 0x100,
+	PMEStatus		= 0x8000,
+};
+
+enum WolCmd_bits {
+	WakePhy			= 0x1,
+	WakeUnicast		= 0x2,
+	WakeMulticast		= 0x4,
+	WakeBroadcast		= 0x8,
+	WakeArp			= 0x10,
+	WakePMatch0		= 0x20,
+	WakePMatch1		= 0x40,
+	WakePMatch2		= 0x80,
+	WakePMatch3		= 0x100,
+	WakeMagic		= 0x200,
+	WakeMagicSecure		= 0x400,
+	SecureHack		= 0x100000,
+	WokePhy			= 0x400000,
+	WokeUnicast		= 0x800000,
+	WokeMulticast		= 0x1000000,
+	WokeBroadcast		= 0x2000000,
+	WokeArp			= 0x4000000,
+	WokePMatch0		= 0x8000000,
+	WokePMatch1		= 0x10000000,
+	WokePMatch2		= 0x20000000,
+	WokePMatch3		= 0x40000000,
+	WokeMagic		= 0x80000000,
+	WakeOptsSummary		= 0x7ff
+};
+
+enum RxFilterAddr_bits {
+	RFCRAddressMask		= 0x3ff,
+	AcceptMulticast		= 0x00200000,
+	AcceptMyPhys		= 0x08000000,
+	AcceptAllPhys		= 0x10000000,
+	AcceptAllMulticast	= 0x20000000,
+	AcceptBroadcast		= 0x40000000,
+	RxFilterEnable		= 0x80000000
+};
+
+enum StatsCtrl_bits {
+	StatsWarn		= 0x1,
+	StatsFreeze		= 0x2,
+	StatsClear		= 0x4,
+	StatsStrobe		= 0x8,
+};
+
+enum MIntrCtrl_bits {
+	MICRIntEn		= 0x2,
+};
+
+enum PhyCtrl_bits {
+	PhyAddrMask		= 0xf,
+};
+
+/* values we might find in the silicon revision register */
+#define SRR_DP83815_C	0x0302
+#define SRR_DP83815_D	0x0403
+#define SRR_DP83816_A4	0x0504
+#define SRR_DP83816_A5	0x0505
+
+/* The Rx and Tx buffer descriptors. */
+/* Note that using only 32 bit fields simplifies conversion to big-endian
+   architectures. */
+struct netdev_desc {
+	u32 next_desc;
+	s32 cmd_status;
+	u32 addr;
+	u32 software_use;
+};
+
+/* Bits in network_desc.status */
+enum desc_status_bits {
+	DescOwn=0x80000000, DescMore=0x40000000, DescIntr=0x20000000,
+	DescNoCRC=0x10000000, DescPktOK=0x08000000,
+	DescSizeMask=0xfff,
+
+	DescTxAbort=0x04000000, DescTxFIFO=0x02000000,
+	DescTxCarrier=0x01000000, DescTxDefer=0x00800000,
+	DescTxExcDefer=0x00400000, DescTxOOWCol=0x00200000,
+	DescTxExcColl=0x00100000, DescTxCollCount=0x000f0000,
+
+	DescRxAbort=0x04000000, DescRxOver=0x02000000,
+	DescRxDest=0x01800000, DescRxLong=0x00400000,
+	DescRxRunt=0x00200000, DescRxInvalid=0x00100000,
+	DescRxCRC=0x00080000, DescRxAlign=0x00040000,
+	DescRxLoop=0x00020000, DesRxColl=0x00010000,
+};
+
+struct netdev_private {
+	/* Descriptor rings first for alignment */
+	dma_addr_t ring_dma;
+	struct netdev_desc *rx_ring;
+	struct netdev_desc *tx_ring;
+	/* The addresses of receive-in-place skbuffs */
+	struct rtskb *rx_skbuff[RX_RING_SIZE]; /*** RTnet ***/
+	dma_addr_t rx_dma[RX_RING_SIZE];
+	/* address of a sent-in-place packet/buffer, for later free() */
+	struct rtskb *tx_skbuff[TX_RING_SIZE]; /*** RTnet ***/
+	dma_addr_t tx_dma[TX_RING_SIZE];
+	struct net_device_stats stats;
+	/* Media monitoring timer */
+	struct timer_list timer;
+	/* Frequently used values: keep some adjacent for cache effect */
+	struct pci_dev *pci_dev;
+	struct netdev_desc *rx_head_desc;
+	/* Producer/consumer ring indices */
+	unsigned int cur_rx, dirty_rx;
+	unsigned int cur_tx, dirty_tx;
+	/* Based on MTU+slack. */
+	unsigned int rx_buf_sz;
+	int oom;
+	/* Do not touch the nic registers */
+	int hands_off;
+	/* These values are keep track of the transceiver/media in use */
+	unsigned int full_duplex;
+	/* Rx filter */
+	u32 cur_rx_mode;
+	u32 rx_filter[16];
+	/* FIFO and PCI burst thresholds */
+	u32 tx_config, rx_config;
+	/* original contents of ClkRun register */
+	u32 SavedClkRun;
+	/* silicon revision */
+	u32 srr;
+	/* expected DSPCFG value */
+	u16 dspcfg;
+	/* MII transceiver section */
+	u16 advertising;
+	unsigned int iosize;
+	rtdm_lock_t lock;
+	u32 msg_enable;
+
+	rtdm_irq_t irq_handle;
+};
+
+static int eeprom_read(long ioaddr, int location);
+static int mdio_read(struct rtnet_device *dev, int phy_id, int reg);
+/*static void mdio_write(struct rtnet_device *dev, int phy_id, int reg, u16 data);*/
+static void natsemi_reset(struct rtnet_device *dev);
+static void natsemi_reload_eeprom(struct rtnet_device *dev);
+static void natsemi_stop_rxtx(struct rtnet_device *dev);
+static int netdev_open(struct rtnet_device *dev);
+static void do_cable_magic(struct rtnet_device *dev);
+static void undo_cable_magic(struct rtnet_device *dev);
+static void check_link(struct rtnet_device *dev);
+/*static void netdev_timer(unsigned long data);*/
+static void dump_ring(struct rtnet_device *dev);
+/*static void tx_timeout(struct rtnet_device *dev);*/
+static int alloc_ring(struct rtnet_device *dev);
+static void refill_rx(struct rtnet_device *dev);
+static void init_ring(struct rtnet_device *dev);
+static void drain_tx(struct rtnet_device *dev);
+static void drain_ring(struct rtnet_device *dev);
+static void free_ring(struct rtnet_device *dev);
+/*static void reinit_ring(struct rtnet_device *dev);*/
+static void init_registers(struct rtnet_device *dev);
+static int start_tx(struct rtskb *skb, struct rtnet_device *dev);
+static int intr_handler(rtdm_irq_t *irq_handle);
+static void netdev_error(struct rtnet_device *dev, int intr_status);
+static void netdev_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp);
+static void netdev_tx_done(struct rtnet_device *dev);
+static void __set_rx_mode(struct rtnet_device *dev);
+/*static void set_rx_mode(struct rtnet_device *dev);*/
+static void __get_stats(struct rtnet_device *rtdev);
+static struct net_device_stats *get_stats(struct rtnet_device *dev);
+/*static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);
+static int netdev_set_wol(struct rtnet_device *dev, u32 newval);
+static int netdev_get_wol(struct rtnet_device *dev, u32 *supported, u32 *cur);
+static int netdev_set_sopass(struct rtnet_device *dev, u8 *newval);
+static int netdev_get_sopass(struct rtnet_device *dev, u8 *data);
+static int netdev_get_ecmd(struct rtnet_device *dev, struct ethtool_cmd *ecmd);
+static int netdev_set_ecmd(struct rtnet_device *dev, struct ethtool_cmd *ecmd);
+static void enable_wol_mode(struct rtnet_device *dev, int enable_intr);*/
+static int netdev_close(struct rtnet_device *dev);
+/*static int netdev_get_regs(struct rtnet_device *dev, u8 *buf);
+static int netdev_get_eeprom(struct rtnet_device *dev, u8 *buf);*/
+
+
+static int natsemi_probe1 (struct pci_dev *pdev,
+	const struct pci_device_id *ent)
+{
+	struct rtnet_device *dev; /*** RTnet ***/
+	struct netdev_private *np;
+	int i, option, irq, chip_idx = ent->driver_data;
+	static int find_cnt = -1;
+	unsigned long ioaddr, iosize;
+	const int pcibar = 1; /* PCI base address register */
+	int prev_eedata;
+	u32 tmp;
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+	static int printed_version;
+	if (!printed_version++)
+		rtdm_printk(version);
+#endif
+
+	i = pci_enable_device(pdev);
+	if (i) return i;
+
+	/* natsemi has a non-standard PM control register
+	 * in PCI config space.  Some boards apparently need
+	 * to be brought to D0 in this manner.
+	 */
+	pci_read_config_dword(pdev, PCIPM, &tmp);
+	if (tmp & PCI_PM_CTRL_STATE_MASK) {
+		/* D0 state, disable PME assertion */
+		u32 newtmp = tmp & ~PCI_PM_CTRL_STATE_MASK;
+		pci_write_config_dword(pdev, PCIPM, newtmp);
+	}
+
+	find_cnt++;
+	ioaddr = pci_resource_start(pdev, pcibar);
+	iosize = pci_resource_len(pdev, pcibar);
+	irq = pdev->irq;
+
+/*** RTnet ***/
+	if (cards[find_cnt] == 0)
+		goto err_out;
+/*** RTnet ***/
+
+	if (natsemi_pci_info[chip_idx].flags & PCI_USES_MASTER)
+		pci_set_master(pdev);
+
+/*** RTnet ***/
+	dev = rt_alloc_etherdev(sizeof(struct netdev_private),
+				RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (dev == NULL) {
+		rtdm_printk(KERN_ERR "init_ethernet failed for card #%d\n", find_cnt);
+		goto err_out;
+	}
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+/*** RTnet ***/
+
+	i = pci_request_regions(pdev, dev->name);
+	if (i) {
+/*** RTnet ***/
+		rt_rtdev_disconnect(dev);
+		rtdev_free(dev);
+/*** RTnet ***/
+		return i;
+	}
+
+	{
+		void *mmio = ioremap (ioaddr, iosize);
+		if (!mmio) {
+			pci_release_regions(pdev);
+/*** RTnet ***/
+			rt_rtdev_disconnect(dev);
+			rtdev_free(dev);
+/*** RTnet ***/
+			return -ENOMEM;
+		}
+		ioaddr = (unsigned long) mmio;
+	}
+
+	/* Work around the dropped serial bit. */
+	prev_eedata = eeprom_read(ioaddr, 6);
+	for (i = 0; i < 3; i++) {
+		int eedata = eeprom_read(ioaddr, i + 7);
+		dev->dev_addr[i*2] = (eedata << 1) + (prev_eedata >> 15);
+		dev->dev_addr[i*2+1] = eedata >> 7;
+		prev_eedata = eedata;
+	}
+
+	dev->base_addr = ioaddr;
+	dev->irq = irq;
+
+	np = dev->priv;
+
+	np->pci_dev = pdev;
+	pci_set_drvdata(pdev, dev);
+	np->iosize = iosize;
+	rtdm_lock_init(&np->lock);
+	np->msg_enable = (local_debug >= 0) ? (1<<local_debug)-1 : NATSEMI_DEF_MSG;
+	np->hands_off = 0;
+
+	/* Reset the chip to erase previous misconfiguration. */
+	natsemi_reload_eeprom(dev);
+	natsemi_reset(dev);
+
+	option = find_cnt < MAX_UNITS ? options[find_cnt] : 0;
+	if (dev->mem_start)
+		option = dev->mem_start;
+
+	/* The lower four bits are the media type. */
+	if (option) {
+		if (option & 0x200)
+			np->full_duplex = 1;
+		if (option & 15)
+			rtdm_printk(KERN_INFO
+				"%s: ignoring user supplied media type %d",
+				dev->name, option & 15);
+	}
+	if (find_cnt < MAX_UNITS  &&  full_duplex[find_cnt])
+		np->full_duplex = 1;
+
+	/* The chip-specific entries in the device structure. */
+	dev->open = &netdev_open;
+	dev->hard_start_xmit = &start_tx;
+	dev->stop = &netdev_close;
+	dev->get_stats = &get_stats;
+/*** RTnet ***
+	dev->set_multicast_list = &set_rx_mode;
+	dev->do_ioctl = &netdev_ioctl;
+	dev->tx_timeout = &tx_timeout;
+	dev->watchdog_timeo = TX_TIMEOUT;
+  *** RTnet ***/
+
+	if (mtu)
+		dev->mtu = mtu;
+
+/*** RTnet ***/
+	i = rt_register_rtnetdev(dev);
+	if (i) {
+		goto err_out_unmap;
+	}
+/*** RTnet ***/
+
+	rtnetif_carrier_off(dev);
+
+	if (netif_msg_drv(np)) {
+		rtdm_printk(KERN_INFO "%s: %s at %#08lx, ",
+			dev->name, natsemi_pci_info[chip_idx].name, ioaddr);
+		for (i = 0; i < ETH_ALEN-1; i++)
+				rtdm_printk("%02x:", dev->dev_addr[i]);
+		rtdm_printk("%02x, IRQ %d.\n", dev->dev_addr[i], irq);
+	}
+
+	np->advertising = mdio_read(dev, 1, MII_ADVERTISE);
+	if ((readl((void *)(ioaddr + ChipConfig)) & 0xe000) != 0xe000
+	 && netif_msg_probe(np)) {
+		u32 chip_config = readl((void *)(ioaddr + ChipConfig));
+		rtdm_printk(KERN_INFO "%s: Transceiver default autonegotiation %s "
+			"10%s %s duplex.\n",
+			dev->name,
+			chip_config & CfgAnegEnable ?
+			  "enabled, advertise" : "disabled, force",
+			chip_config & CfgAneg100 ? "0" : "",
+			chip_config & CfgAnegFull ? "full" : "half");
+	}
+	if (netif_msg_probe(np))
+		rtdm_printk(KERN_INFO
+			"%s: Transceiver status %#04x advertising %#04x.\n",
+			dev->name, mdio_read(dev, 1, MII_BMSR),
+			np->advertising);
+
+	/* save the silicon revision for later querying */
+	np->srr = readl((void *)(ioaddr + SiliconRev));
+	if (netif_msg_hw(np))
+		rtdm_printk(KERN_INFO "%s: silicon revision %#04x.\n",
+				dev->name, np->srr);
+
+
+	return 0;
+
+err_out_unmap:
+#ifdef USE_MEM
+	iounmap((void *)ioaddr);
+err_out_free_res:
+#endif
+	pci_release_regions(pdev);
+/*err_out_free_netdev:*/
+/*** RTnet ***/
+	rt_rtdev_disconnect(dev);
+	rtdev_free(dev);
+/*** RTnet ***/
+err_out:
+	return -ENODEV;
+
+}
+
+
+/* Read the EEPROM and MII Management Data I/O (MDIO) interfaces.
+   The EEPROM code is for the common 93c06/46 EEPROMs with 6 bit addresses. */
+
+/* Delay between EEPROM clock transitions.
+   No extra delay is needed with 33Mhz PCI, but future 66Mhz access may need
+   a delay.  Note that pre-2.0.34 kernels had a cache-alignment bug that
+   made udelay() unreliable.
+   The old method of using an ISA access as a delay, __SLOW_DOWN_IO__, is
+   depricated.
+*/
+#define eeprom_delay(ee_addr)	readl((void *)(ee_addr))
+
+#define EE_Write0 (EE_ChipSelect)
+#define EE_Write1 (EE_ChipSelect | EE_DataIn)
+
+/* The EEPROM commands include the alway-set leading bit. */
+enum EEPROM_Cmds {
+	EE_WriteCmd=(5 << 6), EE_ReadCmd=(6 << 6), EE_EraseCmd=(7 << 6),
+};
+
+static int eeprom_read(long addr, int location)
+{
+	int i;
+	int retval = 0;
+	long ee_addr = addr + EECtrl;
+	int read_cmd = location | EE_ReadCmd;
+	writel(EE_Write0, (void *)ee_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 10; i >= 0; i--) {
+		short dataval = (read_cmd & (1 << i)) ? EE_Write1 : EE_Write0;
+		writel(dataval, (void *)ee_addr);
+		eeprom_delay(ee_addr);
+		writel(dataval | EE_ShiftClk, (void *)ee_addr);
+		eeprom_delay(ee_addr);
+	}
+	writel(EE_ChipSelect, (void *)ee_addr);
+	eeprom_delay(ee_addr);
+
+	for (i = 0; i < 16; i++) {
+		writel(EE_ChipSelect | EE_ShiftClk, (void *)ee_addr);
+		eeprom_delay(ee_addr);
+		retval |= (readl((void *)ee_addr) & EE_DataOut) ? 1 << i : 0;
+		writel(EE_ChipSelect, (void *)ee_addr);
+		eeprom_delay(ee_addr);
+	}
+
+	/* Terminate the EEPROM access. */
+	writel(EE_Write0, (void *)ee_addr);
+	writel(0, (void *)ee_addr);
+	return retval;
+}
+
+/* MII transceiver control section.
+ * The 83815 series has an internal transceiver, and we present the
+ * management registers as if they were MII connected. */
+
+static int mdio_read(struct rtnet_device *dev, int phy_id, int reg)
+{
+	if (phy_id == 1 && reg < 32)
+		return readl((void *)(dev->base_addr+BasicControl+(reg<<2)))&0xffff;
+	else
+		return 0xffff;
+}
+/*** RTnet
+static void mdio_write(struct rtnet_device *dev, int phy_id, int reg, u16 data)
+{
+	struct netdev_private *np = dev->priv;
+	if (phy_id == 1 && reg < 32) {
+		writew(data, dev->base_addr+BasicControl+(reg<<2));
+		switch (reg) {
+			case MII_ADVERTISE: np->advertising = data; break;
+		}
+	}
+}
+RTnet ***/
+/* CFG bits [13:16] [18:23] */
+#define CFG_RESET_SAVE 0xfde000
+/* WCSR bits [0:4] [9:10] */
+#define WCSR_RESET_SAVE 0x61f
+/* RFCR bits [20] [22] [27:31] */
+#define RFCR_RESET_SAVE 0xf8500000;
+
+static void natsemi_reset(struct rtnet_device *dev)
+{
+	int i;
+	u32 cfg;
+	u32 wcsr;
+	u32 rfcr;
+	u16 pmatch[3];
+	u16 sopass[3];
+	struct netdev_private *np = dev->priv;
+
+	/*
+	 * Resetting the chip causes some registers to be lost.
+	 * Natsemi suggests NOT reloading the EEPROM while live, so instead
+	 * we save the state that would have been loaded from EEPROM
+	 * on a normal power-up (see the spec EEPROM map).  This assumes
+	 * whoever calls this will follow up with init_registers() eventually.
+	 */
+
+	/* CFG */
+	cfg = readl((void *)(dev->base_addr + ChipConfig)) & CFG_RESET_SAVE;
+	/* WCSR */
+	wcsr = readl((void *)(dev->base_addr + WOLCmd)) & WCSR_RESET_SAVE;
+	/* RFCR */
+	rfcr = readl((void *)(dev->base_addr + RxFilterAddr)) & RFCR_RESET_SAVE;
+	/* PMATCH */
+	for (i = 0; i < 3; i++) {
+		writel(i*2, (void *)(dev->base_addr + RxFilterAddr));
+		pmatch[i] = readw((void *)(dev->base_addr + RxFilterData));
+	}
+	/* SOPAS */
+	for (i = 0; i < 3; i++) {
+		writel(0xa+(i*2), (void *)(dev->base_addr + RxFilterAddr));
+		sopass[i] = readw((void *)(dev->base_addr + RxFilterData));
+	}
+
+	/* now whack the chip */
+	writel(ChipReset, (void *)(dev->base_addr + ChipCmd));
+	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+		if (!(readl((void *)(dev->base_addr + ChipCmd)) & ChipReset))
+			break;
+		udelay(5);
+	}
+	if (i==NATSEMI_HW_TIMEOUT) {
+		rtdm_printk(KERN_WARNING "%s: reset did not complete in %d usec.\n",
+			dev->name, i*5);
+	} else if (netif_msg_hw(np)) {
+		rtdm_printk(KERN_DEBUG "%s: reset completed in %d usec.\n",
+			dev->name, i*5);
+	}
+
+	/* restore CFG */
+	cfg |= readl((void *)(dev->base_addr + ChipConfig)) & ~CFG_RESET_SAVE;
+	writel(cfg, (void *)(dev->base_addr + ChipConfig));
+	/* restore WCSR */
+	wcsr |= readl((void *)(dev->base_addr + WOLCmd)) & ~WCSR_RESET_SAVE;
+	writel(wcsr, (void *)(dev->base_addr + WOLCmd));
+	/* read RFCR */
+	rfcr |= readl((void *)(dev->base_addr + RxFilterAddr)) & ~RFCR_RESET_SAVE;
+	/* restore PMATCH */
+	for (i = 0; i < 3; i++) {
+		writel(i*2, (void *)(dev->base_addr + RxFilterAddr));
+		writew(pmatch[i], (void *)(dev->base_addr + RxFilterData));
+	}
+	for (i = 0; i < 3; i++) {
+		writel(0xa+(i*2), (void *)(dev->base_addr + RxFilterAddr));
+		writew(sopass[i], (void *)(dev->base_addr + RxFilterData));
+	}
+	/* restore RFCR */
+	writel(rfcr, (void *)(dev->base_addr + RxFilterAddr));
+}
+
+static void natsemi_reload_eeprom(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	writel(EepromReload, (void *)(dev->base_addr + PCIBusCfg));
+	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+		udelay(50);
+		if (!(readl((void *)(dev->base_addr + PCIBusCfg)) & EepromReload))
+			break;
+	}
+	if (i==NATSEMI_HW_TIMEOUT) {
+		rtdm_printk(KERN_WARNING "%s: EEPROM did not reload in %d usec.\n",
+			dev->name, i*50);
+	} else if (netif_msg_hw(np)) {
+		rtdm_printk(KERN_DEBUG "%s: EEPROM reloaded in %d usec.\n",
+			dev->name, i*50);
+	}
+}
+
+static void natsemi_stop_rxtx(struct rtnet_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	writel(RxOff | TxOff, (void *)(ioaddr + ChipCmd));
+	for(i=0;i< NATSEMI_HW_TIMEOUT;i++) {
+		if ((readl((void *)(ioaddr + ChipCmd)) & (TxOn|RxOn)) == 0)
+			break;
+		udelay(5);
+	}
+	if (i==NATSEMI_HW_TIMEOUT) {
+		rtdm_printk(KERN_WARNING "%s: Tx/Rx process did not stop in %d usec.\n",
+			dev->name, i*5);
+	} else if (netif_msg_hw(np)) {
+		rtdm_printk(KERN_DEBUG "%s: Tx/Rx process stopped in %d usec.\n",
+			dev->name, i*5);
+	}
+}
+
+static int netdev_open(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	long ioaddr = dev->base_addr;
+	int i;
+
+	/* Reset the chip, just in case. */
+	natsemi_reset(dev);
+
+/*** RTnet ***/
+	rt_stack_connect(dev, &STACK_manager);
+	i = rtdm_irq_request(&np->irq_handle, dev->irq, intr_handler,
+			     RTDM_IRQTYPE_SHARED, "rt_natsemi", dev);
+/*** RTnet ***/
+/*	i = request_irq(dev->irq, &intr_handler, SA_SHIRQ, dev->name, dev);*/
+	if (i) {
+		return i;
+	}
+
+	if (netif_msg_ifup(np))
+		rtdm_printk(KERN_DEBUG "%s: netdev_open() irq %d.\n",
+			dev->name, dev->irq);
+	i = alloc_ring(dev);
+	if (i < 0) {
+		rtdm_irq_free(&np->irq_handle);
+		return i;
+	}
+	init_ring(dev);
+	init_registers(dev);
+	/* now set the MAC address according to dev->dev_addr */
+	for (i = 0; i < 3; i++) {
+		u16 mac = (dev->dev_addr[2*i+1]<<8) + dev->dev_addr[2*i];
+
+		writel(i*2, (void *)(ioaddr + RxFilterAddr));
+		writew(mac, (void *)(ioaddr + RxFilterData));
+	}
+	writel(np->cur_rx_mode, (void *)(ioaddr + RxFilterAddr));
+
+	rtnetif_start_queue(dev); /*** RTnet ***/
+
+	if (netif_msg_ifup(np))
+		rtdm_printk(KERN_DEBUG "%s: Done netdev_open(), status: %#08x.\n",
+			dev->name, (int)readl((void *)(ioaddr + ChipCmd)));
+
+/*** RTnet ***/
+	/* Set the timer to check for link beat. */
+/*** RTnet ***/
+
+	return 0;
+}
+
+static void do_cable_magic(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+
+	if (np->srr >= SRR_DP83816_A5)
+		return;
+
+	/*
+	 * 100 MBit links with short cables can trip an issue with the chip.
+	 * The problem manifests as lots of CRC errors and/or flickering
+	 * activity LED while idle.  This process is based on instructions
+	 * from engineers at National.
+	 */
+	if (readl((void *)(dev->base_addr + ChipConfig)) & CfgSpeed100) {
+		u16 data;
+
+		writew(1, (void *)(dev->base_addr + PGSEL));
+		/*
+		 * coefficient visibility should already be enabled via
+		 * DSPCFG | 0x1000
+		 */
+		data = readw((void *)(dev->base_addr + TSTDAT)) & 0xff;
+		/*
+		 * the value must be negative, and within certain values
+		 * (these values all come from National)
+		 */
+		if (!(data & 0x80) || ((data >= 0xd8) && (data <= 0xff))) {
+			struct netdev_private *np = dev->priv;
+
+			/* the bug has been triggered - fix the coefficient */
+			writew(TSTDAT_FIXED, (void *)(dev->base_addr + TSTDAT));
+			/* lock the value */
+			data = readw((void *)(dev->base_addr + DSPCFG));
+			np->dspcfg = data | DSPCFG_LOCK;
+			writew(np->dspcfg, (void *)(dev->base_addr + DSPCFG));
+		}
+		writew(0, (void *)(dev->base_addr + PGSEL));
+	}
+}
+
+static void undo_cable_magic(struct rtnet_device *dev)
+{
+	u16 data;
+	struct netdev_private *np = dev->priv;
+
+	if (np->srr >= SRR_DP83816_A5)
+		return;
+
+	writew(1, (void *)(dev->base_addr + PGSEL));
+	/* make sure the lock bit is clear */
+	data = readw((void *)(dev->base_addr + DSPCFG));
+	np->dspcfg = data & ~DSPCFG_LOCK;
+	writew(np->dspcfg, (void *)(dev->base_addr + DSPCFG));
+	writew(0, (void *)(dev->base_addr + PGSEL));
+}
+
+static void check_link(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	long ioaddr = dev->base_addr;
+	int duplex;
+	int chipcfg = readl((void *)(ioaddr + ChipConfig));
+
+	if (!(chipcfg & CfgLink)) {
+		if (rtnetif_carrier_ok(dev)) {
+			if (netif_msg_link(np))
+				rtdm_printk(KERN_NOTICE "%s: link down.\n",
+					dev->name);
+			rtnetif_carrier_off(dev);
+			undo_cable_magic(dev);
+		}
+		return;
+	}
+	if (!rtnetif_carrier_ok(dev)) {
+		if (netif_msg_link(np))
+			rtdm_printk(KERN_NOTICE "%s: link up.\n", dev->name);
+		rtnetif_carrier_on(dev);
+		do_cable_magic(dev);
+	}
+
+	duplex = np->full_duplex || (chipcfg & CfgFullDuplex ? 1 : 0);
+
+	/* if duplex is set then bit 28 must be set, too */
+	if (duplex ^ !!(np->rx_config & RxAcceptTx)) {
+		if (netif_msg_link(np))
+			rtdm_printk(KERN_INFO
+				"%s: Setting %s-duplex based on negotiated "
+				"link capability.\n", dev->name,
+				duplex ? "full" : "half");
+		if (duplex) {
+			np->rx_config |= RxAcceptTx;
+			np->tx_config |= TxCarrierIgn | TxHeartIgn;
+		} else {
+			np->rx_config &= ~RxAcceptTx;
+			np->tx_config &= ~(TxCarrierIgn | TxHeartIgn);
+		}
+		writel(np->tx_config, (void *)(ioaddr + TxConfig));
+		writel(np->rx_config, (void *)(ioaddr + RxConfig));
+	}
+}
+
+static void init_registers(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	long ioaddr = dev->base_addr;
+	int i;
+
+	for (i=0;i<NATSEMI_HW_TIMEOUT;i++) {
+		if (readl((void *)(dev->base_addr + ChipConfig)) & CfgAnegDone)
+			break;
+		udelay(10);
+	}
+	if (i==NATSEMI_HW_TIMEOUT && netif_msg_link(np)) {
+		rtdm_printk(KERN_INFO
+			"%s: autonegotiation did not complete in %d usec.\n",
+			dev->name, i*10);
+	}
+
+	/* On page 78 of the spec, they recommend some settings for "optimum
+	   performance" to be done in sequence.  These settings optimize some
+	   of the 100Mbit autodetection circuitry.  They say we only want to
+	   do this for rev C of the chip, but engineers at NSC (Bradley
+	   Kennedy) recommends always setting them.  If you don't, you get
+	   errors on some autonegotiations that make the device unusable.
+	*/
+	writew(1, (void *)(ioaddr + PGSEL));
+	writew(PMDCSR_VAL, (void *)(ioaddr + PMDCSR));
+	writew(TSTDAT_VAL, (void *)(ioaddr + TSTDAT));
+	writew(DSPCFG_VAL, (void *)(ioaddr + DSPCFG));
+	writew(SDCFG_VAL, (void *)(ioaddr + SDCFG));
+	writew(0, (void *)(ioaddr + PGSEL));
+	np->dspcfg = DSPCFG_VAL;
+
+	/* Enable PHY Specific event based interrupts.  Link state change
+	   and Auto-Negotiation Completion are among the affected.
+	   Read the intr status to clear it (needed for wake events).
+	*/
+	readw((void *)(ioaddr + MIntrStatus));
+	writew(MICRIntEn, (void *)(ioaddr + MIntrCtrl));
+
+	/* clear any interrupts that are pending, such as wake events */
+	readl((void *)(ioaddr + IntrStatus));
+
+	writel(np->ring_dma, (void *)(ioaddr + RxRingPtr));
+	writel(np->ring_dma + RX_RING_SIZE * sizeof(struct netdev_desc),
+		(void *)(ioaddr + TxRingPtr));
+
+	/* Initialize other registers.
+	 * Configure the PCI bus bursts and FIFO thresholds.
+	 * Configure for standard, in-spec Ethernet.
+	 * Start with half-duplex. check_link will update
+	 * to the correct settings.
+	 */
+
+	/* DRTH: 2: start tx if 64 bytes are in the fifo
+	 * FLTH: 0x10: refill with next packet if 512 bytes are free
+	 * MXDMA: 0: up to 256 byte bursts.
+	 *	MXDMA must be <= FLTH
+	 * ECRETRY=1
+	 * ATP=1
+	 */
+	np->tx_config = TxAutoPad | TxCollRetry | TxMxdma_256 | (0x1002);
+	writel(np->tx_config, (void *)(ioaddr + TxConfig));
+
+	/* DRTH 0x10: start copying to memory if 128 bytes are in the fifo
+	 * MXDMA 0: up to 256 byte bursts
+	 */
+	np->rx_config = RxMxdma_256 | 0x20;
+	writel(np->rx_config, (void *)(ioaddr + RxConfig));
+
+	/* Disable PME:
+	 * The PME bit is initialized from the EEPROM contents.
+	 * PCI cards probably have PME disabled, but motherboard
+	 * implementations may have PME set to enable WakeOnLan.
+	 * With PME set the chip will scan incoming packets but
+	 * nothing will be written to memory. */
+	np->SavedClkRun = readl((void *)(ioaddr + ClkRun));
+	writel(np->SavedClkRun & ~PMEEnable, (void *)(ioaddr + ClkRun));
+	if (np->SavedClkRun & PMEStatus && netif_msg_wol(np)) {
+		rtdm_printk(KERN_NOTICE "%s: Wake-up event %#08x\n",
+			dev->name, readl((void *)(ioaddr + WOLCmd)));
+	}
+
+	check_link(dev);
+	__set_rx_mode(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	writel(DEFAULT_INTR, (void *)(ioaddr + IntrMask));
+	writel(1, (void *)(ioaddr + IntrEnable));
+
+	writel(RxOn | TxOn, (void *)(ioaddr + ChipCmd));
+	writel(StatsClear, (void *)(ioaddr + StatsCtrl)); /* Clear Stats */
+}
+
+/*
+ * netdev_timer:
+ * Purpose:
+ * 1) check for link changes. Usually they are handled by the MII interrupt
+ *    but it doesn't hurt to check twice.
+ * 2) check for sudden death of the NIC:
+ *    It seems that a reference set for this chip went out with incorrect info,
+ *    and there exist boards that aren't quite right.  An unexpected voltage
+ *    drop can cause the PHY to get itself in a weird state (basically reset).
+ *    NOTE: this only seems to affect revC chips.
+ * 3) check of death of the RX path due to OOM
+ */
+/*** RTnet ***/
+/*** RTnet ***/
+
+static void dump_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+
+	if (netif_msg_pktdata(np)) {
+		int i;
+		rtdm_printk(KERN_DEBUG "  Tx ring at %p:\n", np->tx_ring);
+		for (i = 0; i < TX_RING_SIZE; i++) {
+			rtdm_printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
+				i, np->tx_ring[i].next_desc,
+				np->tx_ring[i].cmd_status,
+				np->tx_ring[i].addr);
+		}
+		rtdm_printk(KERN_DEBUG "  Rx ring %p:\n", np->rx_ring);
+		for (i = 0; i < RX_RING_SIZE; i++) {
+			rtdm_printk(KERN_DEBUG " #%d desc. %#08x %#08x %#08x.\n",
+				i, np->rx_ring[i].next_desc,
+				np->rx_ring[i].cmd_status,
+				np->rx_ring[i].addr);
+		}
+	}
+}
+
+/*** RTnet ***/
+/*** RTnet ***/
+
+static int alloc_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	np->rx_ring = dma_alloc_coherent(&np->pci_dev->dev,
+		sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+		&np->ring_dma, GFP_ATOMIC);
+	if (!np->rx_ring)
+		return -ENOMEM;
+	np->tx_ring = &np->rx_ring[RX_RING_SIZE];
+	return 0;
+}
+
+static void refill_rx(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+
+	/* Refill the Rx ring buffers. */
+	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+		struct rtskb *skb;
+		int entry = np->dirty_rx % RX_RING_SIZE;
+		if (np->rx_skbuff[entry] == NULL) {
+			skb = rtnetdev_alloc_rtskb(dev, np->rx_buf_sz);
+			np->rx_skbuff[entry] = skb;
+			if (skb == NULL)
+				break; /* Better luck next round. */
+			np->rx_dma[entry] = dma_map_single(&np->pci_dev->dev,
+				skb->data, np->rx_buf_sz, DMA_FROM_DEVICE);
+			np->rx_ring[entry].addr = cpu_to_le32(np->rx_dma[entry]);
+		}
+		np->rx_ring[entry].cmd_status = cpu_to_le32(np->rx_buf_sz);
+	}
+	if (np->cur_rx - np->dirty_rx == RX_RING_SIZE) {
+		if (netif_msg_rx_err(np))
+			rtdm_printk(KERN_WARNING "%s: going OOM.\n", dev->name);
+		np->oom = 1;
+	}
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void init_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	/* 1) TX ring */
+	np->dirty_tx = np->cur_tx = 0;
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_skbuff[i] = NULL;
+		np->tx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+			+sizeof(struct netdev_desc)
+			*((i+1)%TX_RING_SIZE+RX_RING_SIZE));
+		np->tx_ring[i].cmd_status = 0;
+	}
+
+	/* 2) RX ring */
+	np->dirty_rx = 0;
+	np->cur_rx = RX_RING_SIZE;
+	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+	np->oom = 0;
+	np->rx_head_desc = &np->rx_ring[0];
+
+	/* Please be carefull before changing this loop - at least gcc-2.95.1
+	 * miscompiles it otherwise.
+	 */
+	/* Initialize all Rx descriptors. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].next_desc = cpu_to_le32(np->ring_dma
+				+sizeof(struct netdev_desc)
+				*((i+1)%RX_RING_SIZE));
+		np->rx_ring[i].cmd_status = cpu_to_le32(DescOwn);
+		np->rx_skbuff[i] = NULL;
+	}
+	refill_rx(dev);
+	dump_ring(dev);
+}
+
+static void drain_tx(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (np->tx_skbuff[i]) {
+			dma_unmap_single(&np->pci_dev->dev,
+				np->rx_dma[i], np->tx_skbuff[i]->len,
+				DMA_TO_DEVICE);
+			dev_kfree_rtskb(np->tx_skbuff[i]);
+			np->stats.tx_dropped++;
+		}
+		np->tx_skbuff[i] = NULL;
+	}
+}
+
+static void drain_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].cmd_status = 0;
+		np->rx_ring[i].addr = 0xBADF00D0; /* An invalid address. */
+		if (np->rx_skbuff[i]) {
+			dma_unmap_single(&np->pci_dev->dev,
+				np->rx_dma[i], np->rx_skbuff[i]->len,
+				DMA_FROM_DEVICE);
+			dev_kfree_rtskb(np->rx_skbuff[i]);
+		}
+		np->rx_skbuff[i] = NULL;
+	}
+	drain_tx(dev);
+}
+
+static void free_ring(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	dma_free_coherent(&np->pci_dev->dev,
+		sizeof(struct netdev_desc) * (RX_RING_SIZE+TX_RING_SIZE),
+		np->rx_ring, np->ring_dma);
+}
+
+static int start_tx(struct rtskb *skb, struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	unsigned entry;
+/*** RTnet ***/
+	rtdm_lockctx_t context;
+/*** RTnet ***/
+
+	/* Note: Ordering is important here, set the field with the
+	   "ownership" bit last, and only then increment cur_tx. */
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = np->cur_tx % TX_RING_SIZE;
+
+	np->tx_skbuff[entry] = skb;
+	np->tx_dma[entry] = dma_map_single(&np->pci_dev->dev,
+				skb->data,skb->len, DMA_TO_DEVICE);
+
+	np->tx_ring[entry].addr = cpu_to_le32(np->tx_dma[entry]);
+
+/*	spin_lock_irq(&np->lock);*/
+/*** RTnet ***/
+	rtdm_lock_get_irqsave(&np->lock, context);
+/*** RTnet ***/
+
+	if (!np->hands_off) {
+		/* get and patch time stamp just before the transmission */
+		if (skb->xmit_stamp)
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+				*skb->xmit_stamp);
+		np->tx_ring[entry].cmd_status = cpu_to_le32(DescOwn | skb->len);
+		/* StrongARM: Explicitly cache flush np->tx_ring and
+		 * skb->data,skb->len. */
+		wmb();
+		np->cur_tx++;
+		if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1) {
+			netdev_tx_done(dev);
+			if (np->cur_tx - np->dirty_tx >= TX_QUEUE_LEN - 1)
+				rtnetif_stop_queue(dev);
+		}
+		/* Wake the potentially-idle transmit channel. */
+		writel(TxOn, (void *)(dev->base_addr + ChipCmd));
+	} else {
+		dev_kfree_rtskb(skb); /*** RTnet ***/
+		np->stats.tx_dropped++;
+	}
+
+/*	spin_unlock_irq(&np->lock);*/
+/*** RTnet ***/
+	rtdm_lock_put_irqrestore(&np->lock, context);
+/*** RTnet ***/
+
+/*	dev->trans_start = jiffies;*/
+
+	if (netif_msg_tx_queued(np)) {
+		rtdm_printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n",
+			dev->name, np->cur_tx, entry);
+	}
+	return 0;
+}
+
+static void netdev_tx_done(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+
+	for (; np->cur_tx - np->dirty_tx > 0; np->dirty_tx++) {
+		int entry = np->dirty_tx % TX_RING_SIZE;
+		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescOwn))
+			break;
+		if (netif_msg_tx_done(np))
+			rtdm_printk(KERN_DEBUG
+				"%s: tx frame #%d finished, status %#08x.\n",
+					dev->name, np->dirty_tx,
+					le32_to_cpu(np->tx_ring[entry].cmd_status));
+		if (np->tx_ring[entry].cmd_status & cpu_to_le32(DescPktOK)) {
+			np->stats.tx_packets++;
+			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+		} else { /* Various Tx errors */
+			int tx_status =
+				le32_to_cpu(np->tx_ring[entry].cmd_status);
+			if (tx_status & (DescTxAbort|DescTxExcColl))
+				np->stats.tx_aborted_errors++;
+			if (tx_status & DescTxFIFO)
+				np->stats.tx_fifo_errors++;
+			if (tx_status & DescTxCarrier)
+				np->stats.tx_carrier_errors++;
+			if (tx_status & DescTxOOWCol)
+				np->stats.tx_window_errors++;
+			np->stats.tx_errors++;
+		}
+		dma_unmap_single(&np->pci_dev->dev,np->tx_dma[entry],
+				np->tx_skbuff[entry]->len,
+				DMA_TO_DEVICE);
+		/* Free the original skb. */
+		dev_kfree_rtskb(np->tx_skbuff[entry]); /*** RTnet ***/
+/*		dev_kfree_skb_irq(np->tx_skbuff[entry]);*/
+		np->tx_skbuff[entry] = NULL;
+	}
+	if (rtnetif_queue_stopped(dev)
+		&& np->cur_tx - np->dirty_tx < TX_QUEUE_LEN - 4) {
+		/* The ring is no longer full, wake queue. */
+		rtnetif_wake_queue(dev);
+	}
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static int intr_handler(rtdm_irq_t *irq_handle)
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/
+	struct rtnet_device *dev =
+	    rtdm_irq_get_arg(irq_handle, struct rtnet_device); /*** RTnet ***/
+	struct netdev_private *np = dev->priv;
+	unsigned int old_packet_cnt = np->stats.rx_packets; /*** RTnet ***/
+	long ioaddr = dev->base_addr;
+	int boguscnt = max_interrupt_work;
+	int ret = RTDM_IRQ_NONE;
+
+	if (np->hands_off)
+		return ret;
+	do {
+		/* Reading automatically acknowledges all int sources. */
+		u32 intr_status = readl((void *)(ioaddr + IntrStatus));
+
+		if (netif_msg_intr(np))
+			rtdm_printk(KERN_DEBUG
+				"%s: Interrupt, status %#08x, mask %#08x.\n",
+				dev->name, intr_status,
+				readl((void *)(ioaddr + IntrMask)));
+
+		if (intr_status == 0)
+			break;
+
+		ret = RTDM_IRQ_HANDLED;
+
+		if (intr_status &
+		   (IntrRxDone | IntrRxIntr | RxStatusFIFOOver |
+		    IntrRxErr | IntrRxOverrun)) {
+			netdev_rx(dev, &time_stamp);
+		}
+
+		if (intr_status &
+		   (IntrTxDone | IntrTxIntr | IntrTxIdle | IntrTxErr)) {
+			rtdm_lock_get(&np->lock);
+			netdev_tx_done(dev);
+			rtdm_lock_put(&np->lock);
+		}
+
+		/* Abnormal error summary/uncommon events handlers. */
+		if (intr_status & IntrAbnormalSummary)
+			netdev_error(dev, intr_status);
+
+		if (--boguscnt < 0) {
+			if (netif_msg_intr(np))
+				rtdm_printk(KERN_WARNING
+					"%s: Too much work at interrupt, "
+					"status=%#08x.\n",
+					dev->name, intr_status);
+			break;
+		}
+	} while (1);
+
+	if (netif_msg_intr(np))
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt.\n", dev->name);
+
+/*** RTnet ***/
+	if (old_packet_cnt != np->stats.rx_packets)
+		rt_mark_stack_mgr(dev);
+	return ret;
+}
+
+/* This routine is logically part of the interrupt handler, but separated
+   for clarity and better register allocation. */
+static void netdev_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp)
+{
+	struct netdev_private *np = dev->priv;
+	int entry = np->cur_rx % RX_RING_SIZE;
+	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+	s32 desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+
+	/* If the driver owns the next entry it's a new packet. Send it up. */
+	while (desc_status < 0) { /* e.g. & DescOwn */
+		if (netif_msg_rx_status(np))
+			rtdm_printk(KERN_DEBUG
+				"  netdev_rx() entry %d status was %#08x.\n",
+				entry, desc_status);
+		if (--boguscnt < 0)
+			break;
+		if ((desc_status&(DescMore|DescPktOK|DescRxLong)) != DescPktOK){
+			if (desc_status & DescMore) {
+				if (netif_msg_rx_err(np))
+					rtdm_printk(KERN_WARNING
+						"%s: Oversized(?) Ethernet "
+						"frame spanned multiple "
+						"buffers, entry %#08x "
+						"status %#08x.\n", dev->name,
+						np->cur_rx, desc_status);
+				np->stats.rx_length_errors++;
+			} else {
+				/* There was an error. */
+				np->stats.rx_errors++;
+				if (desc_status & (DescRxAbort|DescRxOver))
+					np->stats.rx_over_errors++;
+				if (desc_status & (DescRxLong|DescRxRunt))
+					np->stats.rx_length_errors++;
+				if (desc_status & (DescRxInvalid|DescRxAlign))
+					np->stats.rx_frame_errors++;
+				if (desc_status & DescRxCRC)
+					np->stats.rx_crc_errors++;
+			}
+		} else {
+			struct rtskb *skb;
+			/* Omit CRC size. */
+			int pkt_len = (desc_status & DescSizeMask) - 4;
+			/* Check if the packet is long enough to accept
+			 * without copying to a minimally-sized skbuff. */
+/*** RTnet ***/
+			{
+				skb = np->rx_skbuff[entry];
+				dma_unmap_single(&np->pci_dev->dev,
+						 np->rx_dma[entry],
+						 np->rx_skbuff[entry]->len,
+						 DMA_FROM_DEVICE);
+				rtskb_put(skb, pkt_len);
+				np->rx_skbuff[entry] = NULL;
+			}
+/*** RTnet ***/
+			skb->protocol = rt_eth_type_trans(skb, dev);
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+			/*dev->last_rx = jiffies;*/
+/*** RTnet ***/
+			np->stats.rx_packets++;
+			np->stats.rx_bytes += pkt_len;
+		}
+		entry = (++np->cur_rx) % RX_RING_SIZE;
+		np->rx_head_desc = &np->rx_ring[entry];
+		desc_status = le32_to_cpu(np->rx_head_desc->cmd_status);
+	}
+	refill_rx(dev);
+
+	/* Restart Rx engine if stopped. */
+	if (np->oom)
+		;
+/*		mod_timer(&np->timer, jiffies + 1);*/
+	else
+		writel(RxOn, (void *)(dev->base_addr + ChipCmd));
+}
+
+static void netdev_error(struct rtnet_device *dev, int intr_status)
+{
+	struct netdev_private *np = dev->priv;
+	long ioaddr = dev->base_addr;
+
+	rtdm_lock_get(&np->lock);
+	if (intr_status & LinkChange) {
+		u16 adv = mdio_read(dev, 1, MII_ADVERTISE);
+		u16 lpa = mdio_read(dev, 1, MII_LPA);
+		if (mdio_read(dev, 1, MII_BMCR) & BMCR_ANENABLE
+		 && netif_msg_link(np)) {
+			rtdm_printk(KERN_INFO
+				"%s: Autonegotiation advertising"
+				" %#04x  partner %#04x.\n", dev->name,
+				adv, lpa);
+		}
+
+		/* read MII int status to clear the flag */
+		readw((void *)(ioaddr + MIntrStatus));
+		check_link(dev);
+	}
+	if (intr_status & StatsMax) {
+		__get_stats(dev);
+	}
+	if (intr_status & IntrTxUnderrun) {
+		if ((np->tx_config & TxDrthMask) < 62)
+			np->tx_config += 2;
+		if (netif_msg_tx_err(np))
+			rtdm_printk(KERN_NOTICE
+				"%s: increased Tx threshold, txcfg %#08x.\n",
+				dev->name, np->tx_config);
+		writel(np->tx_config, (void *)(ioaddr + TxConfig));
+	}
+	if (intr_status & WOLPkt && netif_msg_wol(np)) {
+		int wol_status = readl((void *)(ioaddr + WOLCmd));
+		rtdm_printk(KERN_NOTICE "%s: Link wake-up event %#08x\n",
+			dev->name, wol_status);
+	}
+	if (intr_status & RxStatusFIFOOver) {
+		if (netif_msg_rx_err(np) && netif_msg_intr(np)) {
+			rtdm_printk(KERN_NOTICE "%s: Rx status FIFO overrun\n",
+				dev->name);
+		}
+		np->stats.rx_fifo_errors++;
+	}
+	/* Hmmmmm, it's not clear how to recover from PCI faults. */
+	if (intr_status & IntrPCIErr) {
+		rtdm_printk(KERN_NOTICE "%s: PCI error %#08x\n", dev->name,
+			intr_status & IntrPCIErr);
+		np->stats.tx_fifo_errors++;
+		np->stats.rx_fifo_errors++;
+	}
+	rtdm_lock_put(&np->lock);
+}
+
+static void __get_stats(struct rtnet_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+
+	/* The chip only need report frame silently dropped. */
+	np->stats.rx_crc_errors	+= readl((void *)(ioaddr + RxCRCErrs));
+	np->stats.rx_missed_errors += readl((void *)(ioaddr + RxMissed));
+}
+
+static struct net_device_stats *get_stats(struct rtnet_device *rtdev)
+{
+	struct netdev_private *np = rtdev->priv;
+	rtdm_lockctx_t context;
+
+	/* The chip only need report frame silently dropped. */
+	rtdm_lock_get_irqsave(&np->lock, context);
+	if (rtnetif_running(rtdev) && !np->hands_off)
+		__get_stats(rtdev);
+	rtdm_lock_put_irqrestore(&np->lock, context);
+
+	return &np->stats;
+}
+
+#define HASH_TABLE	0x200
+static void __set_rx_mode(struct rtnet_device *dev)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+	u8 mc_filter[64]; /* Multicast hash filter */
+	u32 rx_mode;
+
+	if (dev->flags & IFF_PROMISC) { /* Set promiscuous. */
+		/* Unconditionally log net taps. */
+		rtdm_printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n",
+			dev->name);
+		rx_mode = RxFilterEnable | AcceptBroadcast
+			| AcceptAllMulticast | AcceptAllPhys | AcceptMyPhys;
+	} else if (dev->flags & IFF_ALLMULTI) {
+		rx_mode = RxFilterEnable | AcceptBroadcast
+			| AcceptAllMulticast | AcceptMyPhys;
+	} else {
+		int i;
+
+		memset(mc_filter, 0, sizeof(mc_filter));
+		rx_mode = RxFilterEnable | AcceptBroadcast
+			| AcceptMulticast | AcceptMyPhys;
+		for (i = 0; i < 64; i += 2) {
+			writew(HASH_TABLE + i, (void *)(ioaddr + RxFilterAddr));
+			writew((mc_filter[i+1]<<8) + mc_filter[i],
+				(void *)(ioaddr + RxFilterData));
+		}
+	}
+	writel(rx_mode, (void *)(ioaddr + RxFilterAddr));
+	np->cur_rx_mode = rx_mode;
+}
+/*** RTnet
+static void set_rx_mode(struct rtnet_device *dev)
+{
+	struct netdev_private *np = dev->priv;
+	spin_lock_irq(&np->lock);
+	if (!np->hands_off)
+		__set_rx_mode(dev);
+	spin_unlock_irq(&np->lock);
+}
+RTnet ***/
+/*** RTnet ***/
+/*** RTnet ***/
+
+static void enable_wol_mode(struct rtnet_device *dev, int enable_intr)
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+
+	if (netif_msg_wol(np))
+		rtdm_printk(KERN_INFO "%s: remaining active for wake-on-lan\n",
+			dev->name);
+
+	/* For WOL we must restart the rx process in silent mode.
+	 * Write NULL to the RxRingPtr. Only possible if
+	 * rx process is stopped
+	 */
+	writel(0, (void *)(ioaddr + RxRingPtr));
+
+	/* read WoL status to clear */
+	readl((void *)(ioaddr + WOLCmd));
+
+	/* PME on, clear status */
+	writel(np->SavedClkRun | PMEEnable | PMEStatus, (void *)(ioaddr + ClkRun));
+
+	/* and restart the rx process */
+	writel(RxOn, (void *)(ioaddr + ChipCmd));
+
+	if (enable_intr) {
+		/* enable the WOL interrupt.
+		 * Could be used to send a netlink message.
+		 */
+		writel(WOLPkt | LinkChange, (void *)(ioaddr + IntrMask));
+		writel(1, (void *)(ioaddr + IntrEnable));
+	}
+}
+
+static int netdev_close(struct rtnet_device *dev)
+{
+	int i;
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+
+	if (netif_msg_ifdown(np))
+		rtdm_printk(KERN_DEBUG
+			"%s: Shutting down ethercard, status was %#04x.\n",
+			dev->name, (int)readl((void *)(ioaddr + ChipCmd)));
+	if (netif_msg_pktdata(np))
+		rtdm_printk(KERN_DEBUG
+			"%s: Queue pointers were Tx %d / %d,  Rx %d / %d.\n",
+			dev->name, np->cur_tx, np->dirty_tx,
+			np->cur_rx, np->dirty_rx);
+
+	/*
+	 * FIXME: what if someone tries to close a device
+	 * that is suspended?
+	 * Should we reenable the nic to switch to
+	 * the final WOL settings?
+	 */
+/*** RTnet ***
+	del_timer_sync(&np->timer);
+ *** RTnet ***/
+/*	disable_irq(dev->irq);*/
+	rtdm_irq_disable(&np->irq_handle);
+	rtdm_lock_get(&np->lock);
+	/* Disable interrupts, and flush posted writes */
+	writel(0, (void *)(ioaddr + IntrEnable));
+	readl((void *)(ioaddr + IntrEnable));
+	np->hands_off = 1;
+	rtdm_lock_put(&np->lock);
+
+/*** RTnet ***/
+	if ( (i=rtdm_irq_free(&np->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(dev);
+/*** RTnet ***/
+
+/*	enable_irq(dev->irq);*/
+
+/*	free_irq(dev->irq, dev);*/
+
+	/* Interrupt disabled, interrupt handler released,
+	 * queue stopped, timer deleted, rtnl_lock held
+	 * All async codepaths that access the driver are disabled.
+	 */
+	rtdm_lock_get(&np->lock);
+	np->hands_off = 0;
+	readl((void *)(ioaddr + IntrMask));
+	readw((void *)(ioaddr + MIntrStatus));
+
+	/* Freeze Stats */
+	writel(StatsFreeze, (void *)(ioaddr + StatsCtrl));
+
+	/* Stop the chip's Tx and Rx processes. */
+	natsemi_stop_rxtx(dev);
+
+	__get_stats(dev);
+	rtdm_lock_put(&np->lock);
+
+	/* clear the carrier last - an interrupt could reenable it otherwise */
+	rtnetif_carrier_off(dev);
+	rtnetif_stop_queue(dev);
+
+	dump_ring(dev);
+	drain_ring(dev);
+	free_ring(dev);
+
+	{
+		u32 wol = readl((void *)(ioaddr + WOLCmd)) & WakeOptsSummary;
+		if (wol) {
+			/* restart the NIC in WOL mode.
+			 * The nic must be stopped for this.
+			 */
+			enable_wol_mode(dev, 0);
+		} else {
+			/* Restore PME enable bit unmolested */
+			writel(np->SavedClkRun, (void *)(ioaddr + ClkRun));
+		}
+	}
+
+	return 0;
+}
+
+
+static void natsemi_remove1 (struct pci_dev *pdev)
+{
+
+ /*** RTnet ***/
+	struct rtnet_device *dev = pci_get_drvdata(pdev);
+
+	rt_unregister_rtnetdev(dev);
+	rt_rtdev_disconnect(dev);
+/*** RTnet ***/
+
+	pci_release_regions (pdev);
+	iounmap ((char *) dev->base_addr);
+	rtdev_free(dev); /*** RTnet ***/
+	pci_set_drvdata(pdev, NULL);
+}
+
+#ifdef CONFIG_PM
+
+/*
+ * The ns83815 chip doesn't have explicit RxStop bits.
+ * Kicking the Rx or Tx process for a new packet reenables the Rx process
+ * of the nic, thus this function must be very careful:
+ *
+ * suspend/resume synchronization:
+ * entry points:
+ *   netdev_open, netdev_close, netdev_ioctl, set_rx_mode, intr_handler,
+ *   start_tx, tx_timeout
+ *
+ * No function accesses the hardware without checking np->hands_off.
+ *	the check occurs under spin_lock_irq(&np->lock);
+ * exceptions:
+ *	* netdev_ioctl: noncritical access.
+ *	* netdev_open: cannot happen due to the device_detach
+ *	* netdev_close: doesn't hurt.
+ *	* netdev_timer: timer stopped by natsemi_suspend.
+ *	* intr_handler: doesn't acquire the spinlock. suspend calls
+ *		disable_irq() to enforce synchronization.
+ *
+ * Interrupts must be disabled, otherwise hands_off can cause irq storms.
+ */
+
+#endif /* CONFIG_PM */
+
+static struct pci_driver natsemi_driver = {
+	.name		= DRV_NAME,
+	.id_table	= natsemi_pci_tbl,
+	.probe		= natsemi_probe1,
+	.remove		= natsemi_remove1,
+/*#ifdef CONFIG_PM*/
+};
+
+static int __init natsemi_init_mod (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+	rtdm_printk(version);
+#endif
+
+	return pci_register_driver (&natsemi_driver);
+}
+
+static void __exit natsemi_exit_mod (void)
+{
+	pci_unregister_driver (&natsemi_driver);
+}
+
+module_init(natsemi_init_mod);
+module_exit(natsemi_exit_mod);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c
new file mode 100644
index 0000000..eebb66e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/pcnet32.c
@@ -0,0 +1,1657 @@
+/* pcnet32.c: An AMD PCnet32 ethernet driver for linux. */
+/*
+ *	Copyright 1996-1999 Thomas Bogendoerfer
+ *
+ *	Derived from the lance driver written 1993,1994,1995 by Donald Becker.
+ *
+ *	Copyright 1993 United States Government as represented by the
+ *	Director, National Security Agency.
+ *
+ *	This software may be used and distributed according to the terms
+ *	of the GNU General Public License, incorporated herein by reference.
+ *
+ *	This driver is for PCnet32 and PCnetPCI based ethercards
+ */
+/**************************************************************************
+ *  23 Oct, 2000.
+ *  Fixed a few bugs, related to running the controller in 32bit mode.
+ *
+ *  Carsten Langgaard, carstenl@mips.com
+ *  Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
+ *
+ *  Ported to RTnet: September 2003, Jan Kiszka <Jan.Kiszka@web.de>
+ *************************************************************************/
+
+#define DRV_NAME "pcnet32-rt"
+#define DRV_VERSION "1.27a-RTnet-0.2"
+#define DRV_RELDATE "2003-09-24"
+#define PFX DRV_NAME ": "
+
+static const char *version =
+	DRV_NAME ".c:v" DRV_VERSION " " DRV_RELDATE " Jan.Kiszka@web.de\n";
+
+#include <linux/module.h>
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/string.h>
+#include <linux/ptrace.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/ethtool.h>
+#include <linux/mii.h>
+#include <linux/crc32.h>
+#include <linux/uaccess.h>
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/dma.h>
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/spinlock.h>
+
+/*** RTnet ***/
+#include <rtnet_port.h>
+
+#define MAX_UNITS 8 /* More are supported, limit only on options */
+#define DEFAULT_RX_POOL_SIZE 16
+
+static int cards[MAX_UNITS] = { [0 ...(MAX_UNITS - 1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+/*** RTnet ***/
+
+/*
+ * PCI device identifiers for "new style" Linux PCI Device Drivers
+ */
+static struct pci_device_id pcnet32_pci_tbl[] = {
+	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE_HOME, PCI_ANY_ID,
+	  PCI_ANY_ID, 0, 0, 0 },
+	{ PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_LANCE, PCI_ANY_ID, PCI_ANY_ID, 0,
+	  0, 0 },
+	{
+		0,
+	}
+};
+
+MODULE_DEVICE_TABLE(pci, pcnet32_pci_tbl);
+
+static int cards_found = -1;
+static int pcnet32_have_pci;
+
+/*
+ * VLB I/O addresses
+ */
+static unsigned int pcnet32_portlist[] = { 0x300, 0x320, 0x340, 0x360, 0 };
+
+static int pcnet32_debug = 1;
+static int tx_start =
+	1; /* Mapping -- 0:20, 1:64, 2:128, 3:~220 (depends on chip vers) */
+static int pcnet32vlb; /* check for VLB cards ? */
+
+static struct rtnet_device *pcnet32_dev; /*** RTnet ***/
+
+static int max_interrupt_work = 80;
+/*** RTnet ***
+static int rx_copybreak = 200;
+ *** RTnet ***/
+
+#define PCNET32_PORT_AUI 0x00
+#define PCNET32_PORT_10BT 0x01
+#define PCNET32_PORT_GPSI 0x02
+#define PCNET32_PORT_MII 0x03
+
+#define PCNET32_PORT_PORTSEL 0x03
+#define PCNET32_PORT_ASEL 0x04
+#define PCNET32_PORT_100 0x40
+#define PCNET32_PORT_FD 0x80
+
+#define PCNET32_DMA_MASK 0xffffffff
+
+/*
+ * table to translate option values from tulip
+ * to internal options
+ */
+static unsigned char options_mapping[] = {
+	PCNET32_PORT_ASEL, /*  0 Auto-select	  */
+	PCNET32_PORT_AUI, /*  1 BNC/AUI	  */
+	PCNET32_PORT_AUI, /*  2 AUI/BNC	  */
+	PCNET32_PORT_ASEL, /*  3 not supported	  */
+	PCNET32_PORT_10BT | PCNET32_PORT_FD, /*  4 10baseT-FD	  */
+	PCNET32_PORT_ASEL, /*  5 not supported	  */
+	PCNET32_PORT_ASEL, /*  6 not supported	  */
+	PCNET32_PORT_ASEL, /*  7 not supported	  */
+	PCNET32_PORT_ASEL, /*  8 not supported	  */
+	PCNET32_PORT_MII, /*  9 MII 10baseT	  */
+	PCNET32_PORT_MII | PCNET32_PORT_FD, /* 10 MII 10baseT-FD	  */
+	PCNET32_PORT_MII, /* 11 MII (autosel)	  */
+	PCNET32_PORT_10BT, /* 12 10BaseT	  */
+	PCNET32_PORT_MII | PCNET32_PORT_100, /* 13 MII 100BaseTx	  */
+	PCNET32_PORT_MII | PCNET32_PORT_100 |
+		PCNET32_PORT_FD, /* 14 MII 100BaseTx-FD */
+	PCNET32_PORT_ASEL /* 15 not supported	  */
+};
+
+static int options[MAX_UNITS];
+static int full_duplex[MAX_UNITS];
+
+/*
+ *				Theory of Operation
+ *
+ * This driver uses the same software structure as the normal lance
+ * driver. So look for a verbose description in lance.c. The differences
+ * to the normal lance driver is the use of the 32bit mode of PCnet32
+ * and PCnetPCI chips. Because these chips are 32bit chips, there is no
+ * 16MB limitation and we don't need bounce buffers.
+ */
+
+/*
+ * History:
+ * v0.01:  Initial version
+ *	   only tested on Alpha Noname Board
+ * v0.02:  changed IRQ handling for new interrupt scheme (dev_id)
+ *	   tested on a ASUS SP3G
+ * v0.10:  fixed an odd problem with the 79C974 in a Compaq Deskpro XL
+ *	   looks like the 974 doesn't like stopping and restarting in a
+ *	   short period of time; now we do a reinit of the lance; the
+ *	   bug was triggered by doing ifconfig eth0 <ip> broadcast <addr>
+ *	   and hangs the machine (thanks to Klaus Liedl for debugging)
+ * v0.12:  by suggestion from Donald Becker: Renamed driver to pcnet32,
+ *	   made it standalone (no need for lance.c)
+ * v0.13:  added additional PCI detecting for special PCI devices (Compaq)
+ * v0.14:  stripped down additional PCI probe (thanks to David C Niemi
+ *	   and sveneric@xs4all.nl for testing this on their Compaq boxes)
+ * v0.15:  added 79C965 (VLB) probe
+ *	   added interrupt sharing for PCI chips
+ * v0.16:  fixed set_multicast_list on Alpha machines
+ * v0.17:  removed hack from dev.c; now pcnet32 uses ethif_probe in Space.c
+ * v0.19:  changed setting of autoselect bit
+ * v0.20:  removed additional Compaq PCI probe; there is now a working one
+ *	   in arch/i386/bios32.c
+ * v0.21:  added endian conversion for ppc, from work by cort@cs.nmt.edu
+ * v0.22:  added printing of status to ring dump
+ * v0.23:  changed enet_statistics to net_devive_stats
+ * v0.90:  added multicast filter
+ *	   added module support
+ *	   changed irq probe to new style
+ *	   added PCnetFast chip id
+ *	   added fix for receive stalls with Intel saturn chipsets
+ *	   added in-place rx skbs like in the tulip driver
+ *	   minor cleanups
+ * v0.91:  added PCnetFast+ chip id
+ *	   back port to 2.0.x
+ * v1.00:  added some stuff from Donald Becker's 2.0.34 version
+ *	   added support for byte counters in net_dev_stats
+ * v1.01:  do ring dumps, only when debugging the driver
+ *	   increased the transmit timeout
+ * v1.02:  fixed memory leak in pcnet32_init_ring()
+ * v1.10:  workaround for stopped transmitter
+ *	   added port selection for modules
+ *	   detect special T1/E1 WAN card and setup port selection
+ * v1.11:  fixed wrong checking of Tx errors
+ * v1.20:  added check of return value kmalloc (cpeterso@cs.washington.edu)
+ *	   added save original kmalloc addr for freeing (mcr@solidum.com)
+ *	   added support for PCnetHome chip (joe@MIT.EDU)
+ *	   rewritten PCI card detection
+ *	   added dwio mode to get driver working on some PPC machines
+ * v1.21:  added mii selection and mii ioctl
+ * v1.22:  changed pci scanning code to make PPC people happy
+ *	   fixed switching to 32bit mode in pcnet32_open() (thanks
+ *	   to Michael Richard <mcr@solidum.com> for noticing this one)
+ *	   added sub vendor/device id matching (thanks again to
+ *	   Michael Richard <mcr@solidum.com>)
+ *	   added chip id for 79c973/975 (thanks to Zach Brown <zab@zabbo.net>)
+ * v1.23   fixed small bug, when manual selecting MII speed/duplex
+ * v1.24   Applied Thomas' patch to use TxStartPoint and thus decrease TxFIFO
+ *	   underflows.	Added tx_start_pt module parameter. Increased
+ *	   TX_RING_SIZE from 16 to 32.	Added #ifdef'd code to use DXSUFLO
+ *	   for FAST[+] chipsets. <kaf@fc.hp.com>
+ * v1.24ac Added SMP spinlocking - Alan Cox <alan@redhat.com>
+ * v1.25kf Added No Interrupt on successful Tx for some Tx's <kaf@fc.hp.com>
+ * v1.26   Converted to pci_alloc_consistent, Jamey Hicks / George France
+ *                                           <jamey@crl.dec.com>
+ * -	   Fixed a few bugs, related to running the controller in 32bit mode.
+ *	   23 Oct, 2000.  Carsten Langgaard, carstenl@mips.com
+ *	   Copyright (C) 2000 MIPS Technologies, Inc.  All rights reserved.
+ * v1.26p  Fix oops on rmmod+insmod; plug i/o resource leak - Paul Gortmaker
+ * v1.27   improved CSR/PROM address detection, lots of cleanups,
+ *	   new pcnet32vlb module option, HP-PARISC support,
+ *	   added module parameter descriptions,
+ *	   initial ethtool support - Helge Deller <deller@gmx.de>
+ * v1.27a  Sun Feb 10 2002 Go Taniguchi <go@turbolinux.co.jp>
+ *	   use alloc_etherdev and register_netdev
+ *	   fix pci probe not increment cards_found
+ *	   FD auto negotiate error workaround for xSeries250
+ *	   clean up and using new mii module
+ */
+
+/*
+ * Set the number of Tx and Rx buffers, using Log_2(# buffers).
+ * Reasonable default values are 4 Tx buffers, and 16 Rx buffers.
+ * That translates to 2 (4 == 2^^2) and 4 (16 == 2^^4).
+ */
+#ifndef PCNET32_LOG_TX_BUFFERS
+#define PCNET32_LOG_TX_BUFFERS 4
+#define PCNET32_LOG_RX_BUFFERS 3 /*** RTnet ***/
+#endif
+
+#define TX_RING_SIZE (1 << (PCNET32_LOG_TX_BUFFERS))
+#define TX_RING_MOD_MASK (TX_RING_SIZE - 1)
+#define TX_RING_LEN_BITS ((PCNET32_LOG_TX_BUFFERS) << 12)
+
+#define RX_RING_SIZE (1 << (PCNET32_LOG_RX_BUFFERS))
+#define RX_RING_MOD_MASK (RX_RING_SIZE - 1)
+#define RX_RING_LEN_BITS ((PCNET32_LOG_RX_BUFFERS) << 4)
+
+#define PKT_BUF_SZ 1544
+
+/* Offsets from base I/O address. */
+#define PCNET32_WIO_RDP 0x10
+#define PCNET32_WIO_RAP 0x12
+#define PCNET32_WIO_RESET 0x14
+#define PCNET32_WIO_BDP 0x16
+
+#define PCNET32_DWIO_RDP 0x10
+#define PCNET32_DWIO_RAP 0x14
+#define PCNET32_DWIO_RESET 0x18
+#define PCNET32_DWIO_BDP 0x1C
+
+#define PCNET32_TOTAL_SIZE 0x20
+
+/* The PCNET32 Rx and Tx ring descriptors. */
+struct pcnet32_rx_head {
+	u32 base;
+	s16 buf_length;
+	s16 status;
+	u32 msg_length;
+	u32 reserved;
+};
+
+struct pcnet32_tx_head {
+	u32 base;
+	s16 length;
+	s16 status;
+	u32 misc;
+	u32 reserved;
+};
+
+/* The PCNET32 32-Bit initialization block, described in databook. */
+struct pcnet32_init_block {
+	u16 mode;
+	u16 tlen_rlen;
+	u8 phys_addr[6];
+	u16 reserved;
+	u32 filter[2];
+	/* Receive and transmit ring base, along with extra bits. */
+	u32 rx_ring;
+	u32 tx_ring;
+};
+
+/* PCnet32 access functions */
+struct pcnet32_access {
+	u16 (*read_csr)(unsigned long, int);
+	void (*write_csr)(unsigned long, int, u16);
+	u16 (*read_bcr)(unsigned long, int);
+	void (*write_bcr)(unsigned long, int, u16);
+	u16 (*read_rap)(unsigned long);
+	void (*write_rap)(unsigned long, u16);
+	void (*reset)(unsigned long);
+};
+
+/*
+ * The first three fields of pcnet32_private are read by the ethernet device
+ * so we allocate the structure should be allocated by pci_alloc_consistent().
+ */
+struct pcnet32_private {
+	/* The Tx and Rx ring entries must be aligned on 16-byte boundaries in 32bit mode. */
+	struct pcnet32_rx_head rx_ring[RX_RING_SIZE];
+	struct pcnet32_tx_head tx_ring[TX_RING_SIZE];
+	struct pcnet32_init_block init_block;
+	dma_addr_t dma_addr; /* DMA address of beginning of this object,
+					   returned by pci_alloc_consistent */
+	struct pci_dev
+		*pci_dev; /* Pointer to the associated pci device structure */
+	const char *name;
+	/* The saved address of a sent-in-place packet/buffer, for skfree(). */
+	/*** RTnet ***/
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	struct rtskb *rx_skbuff[RX_RING_SIZE];
+	/*** RTnet ***/
+	dma_addr_t tx_dma_addr[TX_RING_SIZE];
+	dma_addr_t rx_dma_addr[RX_RING_SIZE];
+	struct pcnet32_access a;
+	rtdm_lock_t lock; /* Guard lock */
+	unsigned int cur_rx, cur_tx; /* The next free ring entry */
+	unsigned int dirty_rx, dirty_tx; /* The ring entries to be free()ed. */
+	struct net_device_stats stats;
+	char tx_full;
+	int options;
+	int shared_irq : 1, /* shared irq possible */
+		ltint : 1, /* enable TxDone-intr inhibitor */
+		dxsuflo : 1, /* disable transmit stop on uflo */
+		mii : 1; /* mii port available */
+	struct rtnet_device *next; /*** RTnet ***/
+	struct mii_if_info mii_if;
+	rtdm_irq_t irq_handle;
+};
+
+static void pcnet32_probe_vlbus(void);
+static int pcnet32_probe_pci(struct pci_dev *, const struct pci_device_id *);
+static int pcnet32_probe1(unsigned long, unsigned int, int, struct pci_dev *);
+/*** RTnet ***/
+static int pcnet32_open(struct rtnet_device *);
+static int pcnet32_init_ring(struct rtnet_device *);
+static int pcnet32_start_xmit(struct rtskb *, struct rtnet_device *);
+static int pcnet32_rx(struct rtnet_device *, nanosecs_abs_t *time_stamp);
+//static void pcnet32_tx_timeout (struct net_device *dev);
+static int pcnet32_interrupt(rtdm_irq_t *irq_handle);
+static int pcnet32_close(struct rtnet_device *);
+static struct net_device_stats *pcnet32_get_stats(struct rtnet_device *);
+//static void pcnet32_set_multicast_list(struct net_device *);
+//static int  pcnet32_ioctl(struct net_device *, struct ifreq *, int);
+//static int mdio_read(struct net_device *dev, int phy_id, int reg_num);
+//static void mdio_write(struct net_device *dev, int phy_id, int reg_num, int val);
+/*** RTnet ***/
+
+enum pci_flags_bit {
+	PCI_USES_IO = 1,
+	PCI_USES_MEM = 2,
+	PCI_USES_MASTER = 4,
+	PCI_ADDR0 = 0x10 << 0,
+	PCI_ADDR1 = 0x10 << 1,
+	PCI_ADDR2 = 0x10 << 2,
+	PCI_ADDR3 = 0x10 << 3,
+};
+
+static u16 pcnet32_wio_read_csr(unsigned long addr, int index)
+{
+	outw(index, addr + PCNET32_WIO_RAP);
+	return inw(addr + PCNET32_WIO_RDP);
+}
+
+static void pcnet32_wio_write_csr(unsigned long addr, int index, u16 val)
+{
+	outw(index, addr + PCNET32_WIO_RAP);
+	outw(val, addr + PCNET32_WIO_RDP);
+}
+
+static u16 pcnet32_wio_read_bcr(unsigned long addr, int index)
+{
+	outw(index, addr + PCNET32_WIO_RAP);
+	return inw(addr + PCNET32_WIO_BDP);
+}
+
+static void pcnet32_wio_write_bcr(unsigned long addr, int index, u16 val)
+{
+	outw(index, addr + PCNET32_WIO_RAP);
+	outw(val, addr + PCNET32_WIO_BDP);
+}
+
+static u16 pcnet32_wio_read_rap(unsigned long addr)
+{
+	return inw(addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_write_rap(unsigned long addr, u16 val)
+{
+	outw(val, addr + PCNET32_WIO_RAP);
+}
+
+static void pcnet32_wio_reset(unsigned long addr)
+{
+	inw(addr + PCNET32_WIO_RESET);
+}
+
+static int pcnet32_wio_check(unsigned long addr)
+{
+	outw(88, addr + PCNET32_WIO_RAP);
+	return (inw(addr + PCNET32_WIO_RAP) == 88);
+}
+
+static struct pcnet32_access pcnet32_wio = {
+	read_csr: pcnet32_wio_read_csr,
+	write_csr: pcnet32_wio_write_csr,
+	read_bcr: pcnet32_wio_read_bcr,
+	write_bcr: pcnet32_wio_write_bcr,
+	read_rap: pcnet32_wio_read_rap,
+	write_rap: pcnet32_wio_write_rap,
+	reset: pcnet32_wio_reset
+};
+
+static u16 pcnet32_dwio_read_csr(unsigned long addr, int index)
+{
+	outl(index, addr + PCNET32_DWIO_RAP);
+	return (inl(addr + PCNET32_DWIO_RDP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_csr(unsigned long addr, int index, u16 val)
+{
+	outl(index, addr + PCNET32_DWIO_RAP);
+	outl(val, addr + PCNET32_DWIO_RDP);
+}
+
+static u16 pcnet32_dwio_read_bcr(unsigned long addr, int index)
+{
+	outl(index, addr + PCNET32_DWIO_RAP);
+	return (inl(addr + PCNET32_DWIO_BDP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_bcr(unsigned long addr, int index, u16 val)
+{
+	outl(index, addr + PCNET32_DWIO_RAP);
+	outl(val, addr + PCNET32_DWIO_BDP);
+}
+
+static u16 pcnet32_dwio_read_rap(unsigned long addr)
+{
+	return (inl(addr + PCNET32_DWIO_RAP) & 0xffff);
+}
+
+static void pcnet32_dwio_write_rap(unsigned long addr, u16 val)
+{
+	outl(val, addr + PCNET32_DWIO_RAP);
+}
+
+static void pcnet32_dwio_reset(unsigned long addr)
+{
+	inl(addr + PCNET32_DWIO_RESET);
+}
+
+static int pcnet32_dwio_check(unsigned long addr)
+{
+	outl(88, addr + PCNET32_DWIO_RAP);
+	return ((inl(addr + PCNET32_DWIO_RAP) & 0xffff) == 88);
+}
+
+static struct pcnet32_access pcnet32_dwio = {
+	read_csr: pcnet32_dwio_read_csr,
+	write_csr: pcnet32_dwio_write_csr,
+	read_bcr: pcnet32_dwio_read_bcr,
+	write_bcr: pcnet32_dwio_write_bcr,
+	read_rap: pcnet32_dwio_read_rap,
+	write_rap: pcnet32_dwio_write_rap,
+	reset: pcnet32_dwio_reset
+};
+
+/* only probes for non-PCI devices, the rest are handled by
+ * pci_register_driver via pcnet32_probe_pci */
+
+static void pcnet32_probe_vlbus(void)
+{
+	unsigned int *port, ioaddr;
+
+	/* search for PCnet32 VLB cards at known addresses */
+	for (port = pcnet32_portlist; (ioaddr = *port); port++) {
+		if (!request_region(ioaddr, PCNET32_TOTAL_SIZE,
+				    "pcnet32_probe_vlbus")) {
+			/* check if there is really a pcnet chip on that ioaddr */
+			if ((inb(ioaddr + 14) == 0x57) &&
+			    (inb(ioaddr + 15) == 0x57)) {
+				pcnet32_probe1(ioaddr, 0, 0, NULL);
+			} else {
+				release_region(ioaddr, PCNET32_TOTAL_SIZE);
+			}
+		}
+	}
+}
+
+static int pcnet32_probe_pci(struct pci_dev *pdev,
+			     const struct pci_device_id *ent)
+{
+	unsigned long ioaddr;
+	int err;
+
+	err = pci_enable_device(pdev);
+	if (err < 0) {
+		printk(KERN_ERR PFX "failed to enable device -- err=%d\n", err);
+		return err;
+	}
+	pci_set_master(pdev);
+
+	ioaddr = pci_resource_start(pdev, 0);
+	if (!ioaddr) {
+		printk(KERN_ERR PFX "card has no PCI IO resources, aborting\n");
+		return -ENODEV;
+	}
+
+	err = dma_set_mask(&pdev->dev, PCNET32_DMA_MASK);
+	if (err) {
+		printk(KERN_ERR PFX
+		       "architecture does not support 32bit PCI busmaster DMA\n");
+		return err;
+	}
+
+	return pcnet32_probe1(ioaddr, pdev->irq, 1, pdev);
+}
+
+/* pcnet32_probe1
+ *  Called from both pcnet32_probe_vlbus and pcnet_probe_pci.
+ *  pdev will be NULL when called from pcnet32_probe_vlbus.
+ */
+static int pcnet32_probe1(unsigned long ioaddr, unsigned int irq_line,
+			  int shared, struct pci_dev *pdev)
+{
+	struct pcnet32_private *lp;
+	dma_addr_t lp_dma_addr;
+	int i, media;
+	int fdx, mii, fset, dxsuflo, ltint;
+	int chip_version;
+	char *chipname;
+	struct rtnet_device *dev; /*** RTnet ***/
+	struct pcnet32_access *a = NULL;
+	u8 promaddr[6];
+
+	// *** RTnet ***
+	cards_found++;
+	if (cards[cards_found] == 0)
+		return -ENODEV;
+	// *** RTnet ***
+
+	/* reset the chip */
+	pcnet32_wio_reset(ioaddr);
+
+	/* NOTE: 16-bit check is first, otherwise some older PCnet chips fail */
+	if (pcnet32_wio_read_csr(ioaddr, 0) == 4 && pcnet32_wio_check(ioaddr)) {
+		a = &pcnet32_wio;
+	} else {
+		pcnet32_dwio_reset(ioaddr);
+		if (pcnet32_dwio_read_csr(ioaddr, 0) == 4 &&
+		    pcnet32_dwio_check(ioaddr)) {
+			a = &pcnet32_dwio;
+		} else
+			return -ENODEV;
+	}
+
+	chip_version =
+		a->read_csr(ioaddr, 88) | (a->read_csr(ioaddr, 89) << 16);
+	if (pcnet32_debug > 2)
+		printk(KERN_INFO "  PCnet chip version is %#x.\n",
+		       chip_version);
+	if ((chip_version & 0xfff) != 0x003)
+		return -ENODEV;
+
+	/* initialize variables */
+	fdx = mii = fset = dxsuflo = ltint = 0;
+	chip_version = (chip_version >> 12) & 0xffff;
+
+	switch (chip_version) {
+	case 0x2420:
+		chipname = "PCnet/PCI 79C970"; /* PCI */
+		break;
+	case 0x2430:
+		if (shared)
+			chipname =
+				"PCnet/PCI 79C970"; /* 970 gives the wrong chip id back */
+		else
+			chipname = "PCnet/32 79C965"; /* 486/VL bus */
+		break;
+	case 0x2621:
+		chipname = "PCnet/PCI II 79C970A"; /* PCI */
+		fdx = 1;
+		break;
+	case 0x2623:
+		chipname = "PCnet/FAST 79C971"; /* PCI */
+		fdx = 1;
+		mii = 1;
+		fset = 1;
+		ltint = 1;
+		break;
+	case 0x2624:
+		chipname = "PCnet/FAST+ 79C972"; /* PCI */
+		fdx = 1;
+		mii = 1;
+		fset = 1;
+		break;
+	case 0x2625:
+		chipname = "PCnet/FAST III 79C973"; /* PCI */
+		fdx = 1;
+		mii = 1;
+		break;
+	case 0x2626:
+		chipname = "PCnet/Home 79C978"; /* PCI */
+		fdx = 1;
+		/*
+	 * This is based on specs published at www.amd.com.  This section
+	 * assumes that a card with a 79C978 wants to go into 1Mb HomePNA
+	 * mode.  The 79C978 can also go into standard ethernet, and there
+	 * probably should be some sort of module option to select the
+	 * mode by which the card should operate
+	 */
+		/* switch to home wiring mode */
+		media = a->read_bcr(ioaddr, 49);
+		if (pcnet32_debug > 2)
+			printk(KERN_DEBUG PFX "media reset to %#x.\n", media);
+		a->write_bcr(ioaddr, 49, media);
+		break;
+	case 0x2627:
+		chipname = "PCnet/FAST III 79C975"; /* PCI */
+		fdx = 1;
+		mii = 1;
+		break;
+	default:
+		printk(KERN_INFO PFX "PCnet version %#x, no PCnet32 chip.\n",
+		       chip_version);
+		return -ENODEV;
+	}
+
+	/*
+     *	On selected chips turn on the BCR18:NOUFLO bit. This stops transmit
+     *	starting until the packet is loaded. Strike one for reliability, lose
+     *	one for latency - although on PCI this isnt a big loss. Older chips
+     *	have FIFO's smaller than a packet, so you can't do this.
+     */
+
+	if (fset) {
+		a->write_bcr(ioaddr, 18, (a->read_bcr(ioaddr, 18) | 0x0800));
+		a->write_csr(ioaddr, 80,
+			     (a->read_csr(ioaddr, 80) & 0x0C00) | 0x0c00);
+		dxsuflo = 1;
+		ltint = 1;
+	}
+
+	/*** RTnet ***/
+	dev = rt_alloc_etherdev(0, RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (dev == NULL)
+		return -ENOMEM;
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+	/*** RTnet ***/
+
+	printk(KERN_INFO PFX "%s at %#3lx,", chipname, ioaddr);
+
+	/* In most chips, after a chip reset, the ethernet address is read from the
+     * station address PROM at the base address and programmed into the
+     * "Physical Address Registers" CSR12-14.
+     * As a precautionary measure, we read the PROM values and complain if
+     * they disagree with the CSRs.  Either way, we use the CSR values, and
+     * double check that they are valid.
+     */
+	for (i = 0; i < 3; i++) {
+		unsigned int val;
+		val = a->read_csr(ioaddr, i + 12) & 0x0ffff;
+		/* There may be endianness issues here. */
+		dev->dev_addr[2 * i] = val & 0x0ff;
+		dev->dev_addr[2 * i + 1] = (val >> 8) & 0x0ff;
+	}
+
+	/* read PROM address and compare with CSR address */
+	for (i = 0; i < 6; i++)
+		promaddr[i] = inb(ioaddr + i);
+
+	if (memcmp(promaddr, dev->dev_addr, 6) ||
+	    !is_valid_ether_addr(dev->dev_addr)) {
+#ifndef __powerpc__
+		if (is_valid_ether_addr(promaddr)) {
+#else
+		if (!is_valid_ether_addr(dev->dev_addr) &&
+		    is_valid_ether_addr(promaddr)) {
+#endif
+			printk(" warning: CSR address invalid,\n");
+			printk(KERN_INFO "    using instead PROM address of");
+			memcpy(dev->dev_addr, promaddr, 6);
+		}
+	}
+
+	/* if the ethernet address is not valid, force to 00:00:00:00:00:00 */
+	if (!is_valid_ether_addr(dev->dev_addr))
+		memset(dev->dev_addr, 0, sizeof(dev->dev_addr));
+
+	for (i = 0; i < 6; i++)
+		printk(" %2.2x", dev->dev_addr[i]);
+
+	if (((chip_version + 1) & 0xfffe) ==
+	    0x2624) { /* Version 0x2623 or 0x2624 */
+		i = a->read_csr(ioaddr, 80) & 0x0C00; /* Check tx_start_pt */
+		printk("\n" KERN_INFO "    tx_start_pt(0x%04x):", i);
+		switch (i >> 10) {
+		case 0:
+			printk("  20 bytes,");
+			break;
+		case 1:
+			printk("  64 bytes,");
+			break;
+		case 2:
+			printk(" 128 bytes,");
+			break;
+		case 3:
+			printk("~220 bytes,");
+			break;
+		}
+		i = a->read_bcr(ioaddr, 18); /* Check Burst/Bus control */
+		printk(" BCR18(%x):", i & 0xffff);
+		if (i & (1 << 5))
+			printk("BurstWrEn ");
+		if (i & (1 << 6))
+			printk("BurstRdEn ");
+		if (i & (1 << 7))
+			printk("DWordIO ");
+		if (i & (1 << 11))
+			printk("NoUFlow ");
+		i = a->read_bcr(ioaddr, 25);
+		printk("\n" KERN_INFO "    SRAMSIZE=0x%04x,", i << 8);
+		i = a->read_bcr(ioaddr, 26);
+		printk(" SRAM_BND=0x%04x,", i << 8);
+		i = a->read_bcr(ioaddr, 27);
+		if (i & (1 << 14))
+			printk("LowLatRx");
+	}
+
+	dev->base_addr = ioaddr;
+	if (request_region(ioaddr, PCNET32_TOTAL_SIZE, chipname) == NULL)
+		return -EBUSY;
+
+	/* dma_alloc_coherent returns page-aligned memory, so we do not have to check the alignment */
+	if ((lp = dma_alloc_coherent(&pdev->dev, sizeof(*lp), &lp_dma_addr,
+				     GFP_ATOMIC)) ==
+	    NULL) {
+		release_region(ioaddr, PCNET32_TOTAL_SIZE);
+		return -ENOMEM;
+	}
+
+	memset(lp, 0, sizeof(*lp));
+	lp->dma_addr = lp_dma_addr;
+	lp->pci_dev = pdev;
+
+	rtdm_lock_init(&lp->lock);
+
+	dev->priv = lp;
+	lp->name = chipname;
+	lp->shared_irq = shared;
+	lp->mii_if.full_duplex = fdx;
+	lp->dxsuflo = dxsuflo;
+	lp->ltint = ltint;
+	lp->mii = mii;
+	if ((cards_found >= MAX_UNITS) ||
+	    (options[cards_found] > (int)sizeof(options_mapping)))
+		lp->options = PCNET32_PORT_ASEL;
+	else
+		lp->options = options_mapping[options[cards_found]];
+	/*** RTnet ***
+    lp->mii_if.dev = dev;
+    lp->mii_if.mdio_read = mdio_read;
+    lp->mii_if.mdio_write = mdio_write;
+ *** RTnet ***/
+
+	if (fdx && !(lp->options & PCNET32_PORT_ASEL) &&
+	    ((cards_found >= MAX_UNITS) || full_duplex[cards_found]))
+		lp->options |= PCNET32_PORT_FD;
+
+	if (!a) {
+		printk(KERN_ERR PFX "No access methods\n");
+		dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp), lp,
+				  lp->dma_addr);
+		release_region(ioaddr, PCNET32_TOTAL_SIZE);
+		return -ENODEV;
+	}
+	lp->a = *a;
+
+	/* detect special T1/E1 WAN card by checking for MAC address */
+	if (dev->dev_addr[0] == 0x00 && dev->dev_addr[1] == 0xe0 &&
+	    dev->dev_addr[2] == 0x75)
+		lp->options = PCNET32_PORT_FD | PCNET32_PORT_GPSI;
+
+	lp->init_block.mode = le16_to_cpu(0x0003); /* Disable Rx and Tx. */
+	lp->init_block.tlen_rlen =
+		le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+	for (i = 0; i < 6; i++)
+		lp->init_block.phys_addr[i] = dev->dev_addr[i];
+	lp->init_block.filter[0] = 0x00000000;
+	lp->init_block.filter[1] = 0x00000000;
+	lp->init_block.rx_ring = (u32)le32_to_cpu(
+		lp->dma_addr + offsetof(struct pcnet32_private, rx_ring));
+	lp->init_block.tx_ring = (u32)le32_to_cpu(
+		lp->dma_addr + offsetof(struct pcnet32_private, tx_ring));
+
+	/* switch pcnet32 to 32bit mode */
+	a->write_bcr(ioaddr, 20, 2);
+
+	a->write_csr(
+		ioaddr, 1,
+		(lp->dma_addr + offsetof(struct pcnet32_private, init_block)) &
+			0xffff);
+	a->write_csr(
+		ioaddr, 2,
+		(lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >>
+			16);
+
+	if (irq_line) {
+		dev->irq = irq_line;
+	}
+
+	if (dev->irq >= 2)
+		printk(" assigned IRQ %d.\n", dev->irq);
+	else {
+		unsigned long irq_mask = probe_irq_on();
+
+		/*
+	 * To auto-IRQ we enable the initialization-done and DMA error
+	 * interrupts. For ISA boards we get a DMA error, but VLB and PCI
+	 * boards will work.
+	 */
+		/* Trigger an initialization just for the interrupt. */
+		a->write_csr(ioaddr, 0, 0x41);
+		mdelay(1);
+
+		dev->irq = probe_irq_off(irq_mask);
+		if (dev->irq)
+			printk(", probed IRQ %d.\n", dev->irq);
+		else {
+			printk(", failed to detect IRQ line.\n");
+			dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp), lp,
+					  lp->dma_addr);
+			release_region(ioaddr, PCNET32_TOTAL_SIZE);
+			return -ENODEV;
+		}
+	}
+
+	/* The PCNET32-specific entries in the device structure. */
+	dev->open = &pcnet32_open;
+	dev->hard_start_xmit = &pcnet32_start_xmit;
+	dev->stop = &pcnet32_close;
+	dev->get_stats = &pcnet32_get_stats;
+	/*** RTnet ***
+    dev->set_multicast_list = &pcnet32_set_multicast_list;
+    dev->do_ioctl = &pcnet32_ioctl;
+    dev->tx_timeout = pcnet32_tx_timeout;
+    dev->watchdog_timeo = (5*HZ);
+ *** RTnet ***/
+
+	lp->next = pcnet32_dev;
+	pcnet32_dev = dev;
+
+	/* Fill in the generic fields of the device structure. */
+	/*** RTnet ***/
+	if ((i = rt_register_rtnetdev(dev))) {
+		dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp), lp,
+				   lp->dma_addr);
+		release_region(ioaddr, PCNET32_TOTAL_SIZE);
+		rtdev_free(dev);
+		return i;
+	}
+	/*** RTnet ***/
+
+	printk(KERN_INFO "%s: registered as %s\n", dev->name, lp->name);
+	return 0;
+}
+
+static int pcnet32_open(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct pcnet32_private *lp = dev->priv;
+	unsigned long ioaddr = dev->base_addr;
+	u16 val;
+	int i;
+
+	/*** RTnet ***/
+	if (dev->irq == 0)
+		return -EAGAIN;
+
+	rt_stack_connect(dev, &STACK_manager);
+
+	i = rtdm_irq_request(&lp->irq_handle, dev->irq, pcnet32_interrupt,
+			     RTDM_IRQTYPE_SHARED, "rt_pcnet32", dev);
+	if (i)
+		return i;
+	/*** RTnet ***/
+
+	/* Check for a valid station address */
+	if (!is_valid_ether_addr(dev->dev_addr))
+		return -EINVAL;
+
+	/* Reset the PCNET32 */
+	lp->a.reset(ioaddr);
+
+	/* switch pcnet32 to 32bit mode */
+	lp->a.write_bcr(ioaddr, 20, 2);
+
+	if (pcnet32_debug > 1)
+		printk(KERN_DEBUG
+		       "%s: pcnet32_open() irq %d tx/rx rings %#x/%#x init %#x.\n",
+		       dev->name, dev->irq,
+		       (u32)(lp->dma_addr +
+			     offsetof(struct pcnet32_private, tx_ring)),
+		       (u32)(lp->dma_addr +
+			     offsetof(struct pcnet32_private, rx_ring)),
+		       (u32)(lp->dma_addr +
+			     offsetof(struct pcnet32_private, init_block)));
+
+	/* set/reset autoselect bit */
+	val = lp->a.read_bcr(ioaddr, 2) & ~2;
+	if (lp->options & PCNET32_PORT_ASEL)
+		val |= 2;
+	lp->a.write_bcr(ioaddr, 2, val);
+
+	/* handle full duplex setting */
+	if (lp->mii_if.full_duplex) {
+		val = lp->a.read_bcr(ioaddr, 9) & ~3;
+		if (lp->options & PCNET32_PORT_FD) {
+			val |= 1;
+			if (lp->options == (PCNET32_PORT_FD | PCNET32_PORT_AUI))
+				val |= 2;
+		} else if (lp->options & PCNET32_PORT_ASEL) {
+			/* workaround of xSeries250, turn on for 79C975 only */
+			i = ((lp->a.read_csr(ioaddr, 88) |
+			      (lp->a.read_csr(ioaddr, 89) << 16)) >>
+			     12) &
+			    0xffff;
+			if (i == 0x2627)
+				val |= 3;
+		}
+		lp->a.write_bcr(ioaddr, 9, val);
+	}
+
+	/* set/reset GPSI bit in test register */
+	val = lp->a.read_csr(ioaddr, 124) & ~0x10;
+	if ((lp->options & PCNET32_PORT_PORTSEL) == PCNET32_PORT_GPSI)
+		val |= 0x10;
+	lp->a.write_csr(ioaddr, 124, val);
+
+	if (lp->mii && !(lp->options & PCNET32_PORT_ASEL)) {
+		val = lp->a.read_bcr(ioaddr, 32) &
+		      ~0x38; /* disable Auto Negotiation, set 10Mpbs, HD */
+		if (lp->options & PCNET32_PORT_FD)
+			val |= 0x10;
+		if (lp->options & PCNET32_PORT_100)
+			val |= 0x08;
+		lp->a.write_bcr(ioaddr, 32, val);
+	} else {
+		if (lp->options &
+		    PCNET32_PORT_ASEL) { /* enable auto negotiate, setup, disable fd */
+			val = lp->a.read_bcr(ioaddr, 32) & ~0x98;
+			val |= 0x20;
+			lp->a.write_bcr(ioaddr, 32, val);
+		}
+	}
+
+#ifdef DO_DXSUFLO
+	if (lp->dxsuflo) { /* Disable transmit stop on underflow */
+		val = lp->a.read_csr(ioaddr, 3);
+		val |= 0x40;
+		lp->a.write_csr(ioaddr, 3, val);
+	}
+#endif
+
+	if (lp->ltint) { /* Enable TxDone-intr inhibitor */
+		val = lp->a.read_csr(ioaddr, 5);
+		val |= (1 << 14);
+		lp->a.write_csr(ioaddr, 5, val);
+	}
+
+	lp->init_block.mode =
+		le16_to_cpu((lp->options & PCNET32_PORT_PORTSEL) << 7);
+	lp->init_block.filter[0] = 0x00000000;
+	lp->init_block.filter[1] = 0x00000000;
+	if (pcnet32_init_ring(dev))
+		return -ENOMEM;
+
+	/* Re-initialize the PCNET32, and start it when done. */
+	lp->a.write_csr(
+		ioaddr, 1,
+		(lp->dma_addr + offsetof(struct pcnet32_private, init_block)) &
+			0xffff);
+	lp->a.write_csr(
+		ioaddr, 2,
+		(lp->dma_addr + offsetof(struct pcnet32_private, init_block)) >>
+			16);
+
+	lp->a.write_csr(ioaddr, 4, 0x0915);
+	lp->a.write_csr(ioaddr, 0, 0x0001);
+
+	rtnetif_start_queue(dev); /*** RTnet ***/
+
+	i = 0;
+	while (i++ < 100)
+		if (lp->a.read_csr(ioaddr, 0) & 0x0100)
+			break;
+	/*
+     * We used to clear the InitDone bit, 0x0100, here but Mark Stockton
+     * reports that doing so triggers a bug in the '974.
+     */
+	lp->a.write_csr(ioaddr, 0, 0x0042);
+
+	if (pcnet32_debug > 2)
+		printk(KERN_DEBUG
+		       "%s: pcnet32 open after %d ticks, init block %#x csr0 %4.4x.\n",
+		       dev->name, i,
+		       (u32)(lp->dma_addr +
+			     offsetof(struct pcnet32_private, init_block)),
+		       lp->a.read_csr(ioaddr, 0));
+
+	return 0; /* Always succeed */
+}
+
+/*
+ * The LANCE has been halted for one reason or another (busmaster memory
+ * arbitration error, Tx FIFO underflow, driver stopped it to reconfigure,
+ * etc.).  Modern LANCE variants always reload their ring-buffer
+ * configuration when restarted, so we must reinitialize our ring
+ * context before restarting.  As part of this reinitialization,
+ * find all packets still on the Tx ring and pretend that they had been
+ * sent (in effect, drop the packets on the floor) - the higher-level
+ * protocols will time out and retransmit.  It'd be better to shuffle
+ * these skbs to a temp list and then actually re-Tx them after
+ * restarting the chip, but I'm too lazy to do so right now.  dplatt@3do.com
+ */
+
+/*** RTnet ***
+static void
+pcnet32_purge_tx_ring(struct net_device *dev)
+{
+    struct pcnet32_private *lp = dev->priv;
+    int i;
+
+    for (i = 0; i < TX_RING_SIZE; i++) {
+	if (lp->tx_skbuff[i]) {
+	    pci_unmap_single(lp->pci_dev, lp->tx_dma_addr[i], lp->tx_skbuff[i]->len, PCI_DMA_TODEVICE);
+	    dev_kfree_skb(lp->tx_skbuff[i]);
+	    lp->tx_skbuff[i] = NULL;
+	    lp->tx_dma_addr[i] = 0;
+	}
+    }
+}
+ *** RTnet ***/
+
+/* Initialize the PCNET32 Rx and Tx rings. */
+static int pcnet32_init_ring(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct pcnet32_private *lp = dev->priv;
+	int i;
+
+	lp->tx_full = 0;
+	lp->cur_rx = lp->cur_tx = 0;
+	lp->dirty_rx = lp->dirty_tx = 0;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct rtskb *rx_skbuff = lp->rx_skbuff[i]; /*** RTnet ***/
+		if (rx_skbuff == NULL) {
+			if (!(rx_skbuff = lp->rx_skbuff[i] =
+				      rtnetdev_alloc_rtskb(
+					      dev,
+					      PKT_BUF_SZ))) { /*** RTnet ***/
+				/* there is not much, we can do at this point */
+				printk(KERN_ERR
+				       "%s: pcnet32_init_ring rtnetdev_alloc_rtskb failed.\n",
+				       dev->name);
+				return -1;
+			}
+			rtskb_reserve(rx_skbuff, 2); /*** RTnet ***/
+		}
+		lp->rx_dma_addr[i] =
+			dma_map_single(&lp->pci_dev->dev, rx_skbuff->tail,
+				       rx_skbuff->len, DMA_FROM_DEVICE);
+		lp->rx_ring[i].base = (u32)le32_to_cpu(lp->rx_dma_addr[i]);
+		lp->rx_ring[i].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+		lp->rx_ring[i].status = le16_to_cpu(0x8000);
+	}
+	/* The Tx buffer address is filled in as needed, but we do need to clear
+       the upper ownership bit. */
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		lp->tx_ring[i].base = 0;
+		lp->tx_ring[i].status = 0;
+		lp->tx_dma_addr[i] = 0;
+	}
+
+	lp->init_block.tlen_rlen =
+		le16_to_cpu(TX_RING_LEN_BITS | RX_RING_LEN_BITS);
+	for (i = 0; i < 6; i++)
+		lp->init_block.phys_addr[i] = dev->dev_addr[i];
+	lp->init_block.rx_ring = (u32)le32_to_cpu(
+		lp->dma_addr + offsetof(struct pcnet32_private, rx_ring));
+	lp->init_block.tx_ring = (u32)le32_to_cpu(
+		lp->dma_addr + offsetof(struct pcnet32_private, tx_ring));
+	return 0;
+}
+
+/*** RTnet ***/
+/*** RTnet ***/
+
+static int pcnet32_start_xmit(struct rtskb *skb,
+			      struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct pcnet32_private *lp = dev->priv;
+	unsigned long ioaddr = dev->base_addr;
+	u16 status;
+	int entry;
+	rtdm_lockctx_t context;
+
+	if (pcnet32_debug > 3) {
+		rtdm_printk(KERN_DEBUG
+			    "%s: pcnet32_start_xmit() called, csr0 %4.4x.\n",
+			    dev->name, lp->a.read_csr(ioaddr, 0));
+	}
+
+	/*** RTnet ***/
+	rtdm_lock_get_irqsave(&lp->lock, context);
+	/*** RTnet ***/
+
+	/* Default status -- will not enable Successful-TxDone
+     * interrupt when that option is available to us.
+     */
+	status = 0x8300;
+	if ((lp->ltint) && ((lp->cur_tx - lp->dirty_tx == TX_RING_SIZE / 2) ||
+			    (lp->cur_tx - lp->dirty_tx >= TX_RING_SIZE - 2))) {
+		/* Enable Successful-TxDone interrupt if we have
+	 * 1/2 of, or nearly all of, our ring buffer Tx'd
+	 * but not yet cleaned up.  Thus, most of the time,
+	 * we will not enable Successful-TxDone interrupts.
+	 */
+		status = 0x9300;
+	}
+
+	/* Fill in a Tx ring entry */
+
+	/* Mask to ring buffer boundary. */
+	entry = lp->cur_tx & TX_RING_MOD_MASK;
+
+	/* Caution: the write order is important here, set the base address
+       with the "ownership" bits last. */
+
+	lp->tx_ring[entry].length = le16_to_cpu(-skb->len);
+
+	lp->tx_ring[entry].misc = 0x00000000;
+
+	lp->tx_skbuff[entry] = skb;
+	lp->tx_dma_addr[entry] = dma_map_single(&lp->pci_dev->dev, skb->data,
+						skb->len, DMA_TO_DEVICE);
+	lp->tx_ring[entry].base = (u32)le32_to_cpu(lp->tx_dma_addr[entry]);
+
+	/*** RTnet ***/
+	/* get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp =
+			cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+	/*** RTnet ***/
+
+	wmb();
+	lp->tx_ring[entry].status = le16_to_cpu(status);
+
+	lp->cur_tx++;
+	lp->stats.tx_bytes += skb->len;
+
+	/* Trigger an immediate send poll. */
+	lp->a.write_csr(ioaddr, 0, 0x0048);
+
+	//dev->trans_start = jiffies; /*** RTnet ***/
+
+	if (lp->tx_ring[(entry + 1) & TX_RING_MOD_MASK].base == 0)
+		rtnetif_start_queue(dev); /*** RTnet ***/
+	else {
+		lp->tx_full = 1;
+		rtnetif_stop_queue(dev); /*** RTnet ***/
+	}
+	/*** RTnet ***/
+	rtdm_lock_put_irqrestore(&lp->lock, context);
+	/*** RTnet ***/
+	return 0;
+}
+
+/* The PCNET32 interrupt handler. */
+static int pcnet32_interrupt(rtdm_irq_t *irq_handle) /*** RTnet ***/
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/
+	struct rtnet_device *dev = rtdm_irq_get_arg(
+		irq_handle, struct rtnet_device); /*** RTnet ***/
+	struct pcnet32_private *lp;
+	unsigned long ioaddr;
+	u16 csr0, rap;
+	int boguscnt = max_interrupt_work;
+	int must_restart;
+	unsigned int old_packet_cnt; /*** RTnet ***/
+	int ret = RTDM_IRQ_NONE;
+
+	/*** RTnet ***
+    if (!dev) {
+	rtdm_printk (KERN_DEBUG "%s(): irq %d for unknown device\n",
+		__FUNCTION__, irq);
+	return;
+    }
+ *** RTnet ***/
+
+	ioaddr = dev->base_addr;
+	lp = dev->priv;
+	old_packet_cnt = lp->stats.rx_packets; /*** RTnet ***/
+
+	rtdm_lock_get(&lp->lock); /*** RTnet ***/
+
+	rap = lp->a.read_rap(ioaddr);
+	while ((csr0 = lp->a.read_csr(ioaddr, 0)) & 0x8600 && --boguscnt >= 0) {
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		lp->a.write_csr(ioaddr, 0, csr0 & ~0x004f);
+
+		ret = RTDM_IRQ_HANDLED;
+
+		must_restart = 0;
+
+		if (pcnet32_debug > 5)
+			rtdm_printk(
+				KERN_DEBUG
+				"%s: interrupt  csr0=%#2.2x new csr=%#2.2x.\n",
+				dev->name, csr0, lp->a.read_csr(ioaddr, 0));
+
+		if (csr0 & 0x0400) /* Rx interrupt */
+			pcnet32_rx(dev, &time_stamp);
+
+		if (csr0 & 0x0200) { /* Tx-done interrupt */
+			unsigned int dirty_tx = lp->dirty_tx;
+
+			while (dirty_tx < lp->cur_tx) {
+				int entry = dirty_tx & TX_RING_MOD_MASK;
+				int status = (short)le16_to_cpu(
+					lp->tx_ring[entry].status);
+
+				if (status < 0)
+					break; /* It still hasn't been Txed */
+
+				lp->tx_ring[entry].base = 0;
+
+				if (status & 0x4000) {
+					/* There was an major error, log it. */
+					int err_status = le32_to_cpu(
+						lp->tx_ring[entry].misc);
+					lp->stats.tx_errors++;
+					if (err_status & 0x04000000)
+						lp->stats.tx_aborted_errors++;
+					if (err_status & 0x08000000)
+						lp->stats.tx_carrier_errors++;
+					if (err_status & 0x10000000)
+						lp->stats.tx_window_errors++;
+#ifndef DO_DXSUFLO
+					if (err_status & 0x40000000) {
+						lp->stats.tx_fifo_errors++;
+						/* Ackk!  On FIFO errors the Tx unit is turned off! */
+						/* Remove this verbosity later! */
+						rtdm_printk(
+							KERN_ERR
+							"%s: Tx FIFO error! CSR0=%4.4x\n",
+							dev->name, csr0);
+						must_restart = 1;
+					}
+#else
+					if (err_status & 0x40000000) {
+						lp->stats.tx_fifo_errors++;
+						if (!lp->dxsuflo) { /* If controller doesn't recover ... */
+							/* Ackk!  On FIFO errors the Tx unit is turned off! */
+							/* Remove this verbosity later! */
+							rtdm_printk(
+								KERN_ERR
+								"%s: Tx FIFO error! CSR0=%4.4x\n",
+								dev->name,
+								csr0);
+							must_restart = 1;
+						}
+					}
+#endif
+				} else {
+					if (status & 0x1800)
+						lp->stats.collisions++;
+					lp->stats.tx_packets++;
+				}
+
+				/* We must free the original skb */
+				if (lp->tx_skbuff[entry]) {
+					dma_unmap_single(
+						&lp->pci_dev->dev,
+						lp->tx_dma_addr[entry],
+						lp->tx_skbuff[entry]->len,
+						DMA_TO_DEVICE);
+					dev_kfree_rtskb(
+						lp->tx_skbuff[entry]); /*** RTnet ***/
+					lp->tx_skbuff[entry] = 0;
+					lp->tx_dma_addr[entry] = 0;
+				}
+				dirty_tx++;
+			}
+
+			if (lp->cur_tx - dirty_tx >= TX_RING_SIZE) {
+				rtdm_printk(
+					KERN_ERR
+					"%s: out-of-sync dirty pointer, %d vs. %d, full=%d.\n",
+					dev->name, dirty_tx, lp->cur_tx,
+					lp->tx_full);
+				dirty_tx += TX_RING_SIZE;
+			}
+
+			if (lp->tx_full &&
+			    rtnetif_queue_stopped(dev) && /*** RTnet ***/
+			    dirty_tx > lp->cur_tx - TX_RING_SIZE + 2) {
+				/* The ring is no longer full, clear tbusy. */
+				lp->tx_full = 0;
+				rtnetif_wake_queue(dev); /*** RTnet ***/
+			}
+			lp->dirty_tx = dirty_tx;
+		}
+
+		/* Log misc errors. */
+		if (csr0 & 0x4000)
+			lp->stats.tx_errors++; /* Tx babble. */
+		if (csr0 & 0x1000) {
+			/*
+	     * this happens when our receive ring is full. This shouldn't
+	     * be a problem as we will see normal rx interrupts for the frames
+	     * in the receive ring. But there are some PCI chipsets (I can reproduce
+	     * this on SP3G with Intel saturn chipset) which have sometimes problems
+	     * and will fill up the receive ring with error descriptors. In this
+	     * situation we don't get a rx interrupt, but a missed frame interrupt sooner
+	     * or later. So we try to clean up our receive ring here.
+	     */
+			pcnet32_rx(dev, &time_stamp);
+			lp->stats.rx_errors++; /* Missed a Rx frame. */
+		}
+		if (csr0 & 0x0800) {
+			rtdm_printk(
+				KERN_ERR
+				"%s: Bus master arbitration failure, status %4.4x.\n",
+				dev->name, csr0);
+			/* unlike for the lance, there is no restart needed */
+		}
+
+		/*** RTnet ***/
+		/*** RTnet ***/
+	}
+
+	/* Clear any other interrupt, and set interrupt enable. */
+	lp->a.write_csr(ioaddr, 0, 0x7940);
+	lp->a.write_rap(ioaddr, rap);
+
+	if (pcnet32_debug > 4)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, csr0=%#4.4x.\n",
+			    dev->name, lp->a.read_csr(ioaddr, 0));
+
+	/*** RTnet ***/
+	rtdm_lock_put(&lp->lock);
+
+	if (old_packet_cnt != lp->stats.rx_packets)
+		rt_mark_stack_mgr(dev);
+
+	return ret;
+	/*** RTnet ***/
+}
+
+static int pcnet32_rx(struct rtnet_device *dev,
+		      nanosecs_abs_t *time_stamp) /*** RTnet ***/
+{
+	struct pcnet32_private *lp = dev->priv;
+	int entry = lp->cur_rx & RX_RING_MOD_MASK;
+
+	/* If we own the next entry, it's a new packet. Send it up. */
+	while ((short)le16_to_cpu(lp->rx_ring[entry].status) >= 0) {
+		int status = (short)le16_to_cpu(lp->rx_ring[entry].status) >> 8;
+
+		if (status != 0x03) { /* There was an error. */
+			/*
+	     * There is a tricky error noted by John Murphy,
+	     * <murf@perftech.com> to Russ Nelson: Even with full-sized
+	     * buffers it's possible for a jabber packet to use two
+	     * buffers, with only the last correctly noting the error.
+	     */
+			if (status &
+			    0x01) /* Only count a general error at the */
+				lp->stats.rx_errors++; /* end of a packet.*/
+			if (status & 0x20)
+				lp->stats.rx_frame_errors++;
+			if (status & 0x10)
+				lp->stats.rx_over_errors++;
+			if (status & 0x08)
+				lp->stats.rx_crc_errors++;
+			if (status & 0x04)
+				lp->stats.rx_fifo_errors++;
+			lp->rx_ring[entry].status &= le16_to_cpu(0x03ff);
+		} else {
+			/* Malloc up new buffer, compatible with net-2e. */
+			short pkt_len =
+				(le32_to_cpu(lp->rx_ring[entry].msg_length) &
+				 0xfff) -
+				4;
+			struct rtskb *skb; /*** RTnet ***/
+
+			if (pkt_len < 60) {
+				rtdm_printk(KERN_ERR "%s: Runt packet!\n",
+					    dev->name);
+				lp->stats.rx_errors++;
+			} else {
+				/*** RTnet ***/
+				/*int rx_in_place = 0;*/
+
+				/*if (pkt_len > rx_copybreak)*/ {
+					struct rtskb *newskb;
+
+					if ((newskb = rtnetdev_alloc_rtskb(
+						     dev, PKT_BUF_SZ))) {
+						rtskb_reserve(newskb, 2);
+						skb = lp->rx_skbuff[entry];
+						dma_unmap_single(
+							&lp->pci_dev->dev,
+							lp->rx_dma_addr[entry],
+							skb->len,
+							DMA_FROM_DEVICE);
+						rtskb_put(skb, pkt_len);
+						lp->rx_skbuff[entry] = newskb;
+						lp->rx_dma_addr
+							[entry] = dma_map_single(
+							&lp->pci_dev->dev,
+							newskb->tail,
+							newskb->len,
+							DMA_FROM_DEVICE);
+						lp->rx_ring[entry]
+							.base = le32_to_cpu(
+							lp->rx_dma_addr[entry]);
+						/*rx_in_place = 1;*/
+					} else
+						skb = NULL;
+				} /*else {
+		    skb = dev_alloc_skb(pkt_len+2);
+		}*/
+				/*** RTnet ***/
+
+				if (skb == NULL) {
+					int i;
+					rtdm_printk(
+						KERN_ERR
+						"%s: Memory squeeze, deferring packet.\n",
+						dev->name);
+					for (i = 0; i < RX_RING_SIZE; i++)
+						if ((short)le16_to_cpu(
+							    lp->rx_ring[(entry +
+									 i) &
+									RX_RING_MOD_MASK]
+								    .status) <
+						    0)
+							break;
+
+					if (i > RX_RING_SIZE - 2) {
+						lp->stats.rx_dropped++;
+						lp->rx_ring[entry].status |=
+							le16_to_cpu(0x8000);
+						lp->cur_rx++;
+					}
+					break;
+				}
+				/*** RTnet ***/
+				lp->stats.rx_bytes += skb->len;
+				skb->protocol = rt_eth_type_trans(skb, dev);
+				skb->time_stamp = *time_stamp;
+				rtnetif_rx(skb);
+				///dev->last_rx = jiffies;
+				/*** RTnet ***/
+				lp->stats.rx_packets++;
+			}
+		}
+		/*
+	 * The docs say that the buffer length isn't touched, but Andrew Boyd
+	 * of QNX reports that some revs of the 79C965 clear it.
+	 */
+		lp->rx_ring[entry].buf_length = le16_to_cpu(-PKT_BUF_SZ);
+		lp->rx_ring[entry].status |= le16_to_cpu(0x8000);
+		entry = (++lp->cur_rx) & RX_RING_MOD_MASK;
+	}
+
+	return 0;
+}
+
+static int pcnet32_close(struct rtnet_device *dev) /*** RTnet ***/
+{
+	unsigned long ioaddr = dev->base_addr;
+	struct pcnet32_private *lp = dev->priv;
+	int i;
+
+	rtnetif_stop_queue(dev); /*** RTnet ***/
+
+	lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
+
+	if (pcnet32_debug > 1)
+		printk(KERN_DEBUG
+		       "%s: Shutting down ethercard, status was %2.2x.\n",
+		       dev->name, lp->a.read_csr(ioaddr, 0));
+
+	/* We stop the PCNET32 here -- it occasionally polls memory if we don't. */
+	lp->a.write_csr(ioaddr, 0, 0x0004);
+
+	/*
+     * Switch back to 16bit mode to avoid problems with dumb
+     * DOS packet driver after a warm reboot
+     */
+	lp->a.write_bcr(ioaddr, 20, 4);
+
+	/*** RTnet ***/
+	if ((i = rtdm_irq_free(&lp->irq_handle)) < 0)
+		return i;
+
+	rt_stack_disconnect(dev);
+	/*** RTnet ***/
+
+	/* free all allocated skbuffs */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		lp->rx_ring[i].status = 0;
+		if (lp->rx_skbuff[i]) {
+			dma_unmap_single(&lp->pci_dev->dev, lp->rx_dma_addr[i],
+					 lp->rx_skbuff[i]->len,
+					 DMA_FROM_DEVICE);
+			dev_kfree_rtskb(lp->rx_skbuff[i]); /*** RTnet ***/
+		}
+		lp->rx_skbuff[i] = NULL;
+		lp->rx_dma_addr[i] = 0;
+	}
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		if (lp->tx_skbuff[i]) {
+			dma_unmap_single(&lp->pci_dev->dev, lp->tx_dma_addr[i],
+					 lp->tx_skbuff[i]->len,
+					 DMA_TO_DEVICE);
+			dev_kfree_rtskb(lp->tx_skbuff[i]); /*** RTnet ***/
+		}
+		lp->tx_skbuff[i] = NULL;
+		lp->tx_dma_addr[i] = 0;
+	}
+
+	return 0;
+}
+
+/*** RTnet ***/
+static struct net_device_stats *pcnet32_get_stats(struct rtnet_device *rtdev)
+{
+	struct pcnet32_private *lp = rtdev->priv;
+	unsigned long ioaddr = rtdev->base_addr;
+	rtdm_lockctx_t context;
+	u16 saved_addr;
+
+	rtdm_lock_get_irqsave(&lp->lock, context);
+	saved_addr = lp->a.read_rap(ioaddr);
+	lp->stats.rx_missed_errors = lp->a.read_csr(ioaddr, 112);
+	lp->a.write_rap(ioaddr, saved_addr);
+	rtdm_lock_put_irqrestore(&lp->lock, context);
+
+	return &lp->stats;
+}
+
+/*** RTnet ***/
+
+static struct pci_driver pcnet32_driver = {
+	name: DRV_NAME,
+	probe: pcnet32_probe_pci,
+	id_table: pcnet32_pci_tbl,
+};
+
+/* An additional parameter that may be passed in... */
+static int local_debug = -1;
+static int tx_start_pt = -1;
+
+module_param_named(debug, local_debug, int, 0444);
+MODULE_PARM_DESC(debug, DRV_NAME " debug level (0-6)");
+module_param(max_interrupt_work, int, 0444);
+MODULE_PARM_DESC(max_interrupt_work,
+		 DRV_NAME " maximum events handled per interrupt");
+/*** RTnet ***
+MODULE_PARM(rx_copybreak, "i");
+MODULE_PARM_DESC(rx_copybreak, DRV_NAME " copy breakpoint for copy-only-tiny-frames");
+ *** RTnet ***/
+module_param(tx_start_pt, int, 0444);
+MODULE_PARM_DESC(tx_start_pt, DRV_NAME " transmit start point (0-3)");
+module_param(pcnet32vlb, int, 0444);
+MODULE_PARM_DESC(pcnet32vlb, DRV_NAME " Vesa local bus (VLB) support (0/1)");
+module_param_array(options, int, NULL, 0444);
+MODULE_PARM_DESC(options, DRV_NAME " initial option setting(s) (0-15)");
+module_param_array(full_duplex, int, NULL, 0444);
+MODULE_PARM_DESC(full_duplex, DRV_NAME " full duplex setting(s) (1)");
+
+MODULE_AUTHOR("Jan Kiszka");
+MODULE_DESCRIPTION("RTnet Driver for PCnet32 and PCnetPCI based ethercards");
+MODULE_LICENSE("GPL");
+
+static int __init pcnet32_init_module(void)
+{
+	printk(KERN_INFO "%s", version);
+
+	if (local_debug > 0)
+		pcnet32_debug = local_debug;
+
+	if ((tx_start_pt >= 0) && (tx_start_pt <= 3))
+		tx_start = tx_start_pt;
+
+	/* find the PCI devices */
+	if (!pci_register_driver(&pcnet32_driver))
+		pcnet32_have_pci = 1;
+
+	/* should we find any remaining VLbus devices ? */
+	if (pcnet32vlb)
+		pcnet32_probe_vlbus();
+
+	if (cards_found)
+		printk(KERN_INFO PFX "%d cards_found.\n", cards_found);
+
+	return (pcnet32_have_pci + cards_found) ? 0 : -ENODEV;
+}
+
+static void __exit pcnet32_cleanup_module(void)
+{
+	struct rtnet_device *next_dev; /*** RTnet ***/
+
+	/* No need to check MOD_IN_USE, as sys_delete_module() checks. */
+	while (pcnet32_dev) {
+		struct pcnet32_private *lp = pcnet32_dev->priv;
+		next_dev = lp->next;
+		/*** RTnet ***/
+		rt_unregister_rtnetdev(pcnet32_dev);
+		rt_rtdev_disconnect(pcnet32_dev);
+		/*** RTnet ***/
+		release_region(pcnet32_dev->base_addr, PCNET32_TOTAL_SIZE);
+		dma_free_coherent(&lp->pci_dev->dev, sizeof(*lp), lp,
+				  lp->dma_addr);
+		/*** RTnet ***/
+		rtdev_free(pcnet32_dev);
+		/*** RTnet ***/
+		pcnet32_dev = next_dev;
+	}
+
+	if (pcnet32_have_pci)
+		pci_unregister_driver(&pcnet32_driver);
+}
+
+module_init(pcnet32_init_module);
+module_exit(pcnet32_cleanup_module);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c
new file mode 100644
index 0000000..9b2ac74
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/r8169.c
@@ -0,0 +1,2027 @@
+/*
+=========================================================================
+ r8169.c: A RealTek RTL8169s/8110s Gigabit Ethernet driver for Linux kernel 2.4.x.
+ --------------------------------------------------------------------
+
+ History:
+ Feb  4 2002	- created initially by ShuChen <shuchen@realtek.com.tw>.
+ May 20 2002	- Add link status force-mode and TBI mode support.
+=========================================================================
+
+RTL8169_VERSION "1.1"	<2002/10/4>
+
+	The bit4:0 of MII register 4 is called "selector field", and have to be
+	00001b to indicate support of IEEE std 802.3 during NWay process of
+	exchanging Link Code Word (FLP).
+
+RTL8169_VERSION "1.2"	<2003/6/17>
+	Update driver module name.
+	Modify ISR.
+	Add chip mcfg.
+
+RTL8169_VERSION "1.3"	<2003/6/20>
+	Add chip pcfg.
+	Add priv->phy_timer_t, rtl8169_phy_timer_t_handler()
+	Add rtl8169_hw_PHY_config()
+	Add rtl8169_hw_PHY_reset()
+
+RTL8169_VERSION "1.4"	<2003/7/14>
+	Add tx_bytes, rx_bytes.
+
+RTL8169_VERSION "1.5"	<2003/7/18>
+	Set 0x0000 to PHY at offset 0x0b.
+	Modify chip mcfg, pcfg
+	Force media for multiple card.
+RTL8169_VERSION "1.6"	<2003/8/25>
+	Modify receive data buffer.
+
+RTL8169_VERSION "1.7"	<2003/9/18>
+	Add Jumbo Frame support.
+
+RTL8169_VERSION "1.8"	<2003/10/21>
+	Performance and CPU Utilizaion Enhancement.
+
+RTL8169_VERSION "1.9"	<2003/12/29>
+	Enable Tx/Rx flow control.
+
+RTL8169_VERSION "2.0"	<2004/03/26>
+	Beta version.
+	Support for linux 2.6.x
+
+RTL8169_VERSION "2.1"	<2004/07/05>
+	Modify parameters.
+
+RTL8169_VERSION "2.2"	<2004/08/09>
+	Add.pci_dma_sync_single.
+	Add pci_alloc_consistent()/pci_free_consistent().
+	Revise parameters.
+	Recognize our interrupt for linux 2.6.x.
+*/
+
+/*
+ * Ported to RTnet by Klaus Keppler <klaus.keppler@gmx.de>
+ * All RTnet porting stuff may be used and distributed according to the
+ * terms of the GNU General Public License (GPL).
+ *
+ * Version 2.2-04 <2005/08/22>
+ *    Initial release of this driver, based on RTL8169 driver v2.2
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/version.h>
+
+#include <linux/timer.h>
+#include <linux/init.h>
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,4,0)
+#include <linux/pci-aspm.h>
+#endif
+
+#include <rtnet_port.h>	/*** RTnet ***/
+
+#define RTL8169_VERSION "2.2-04"
+#define MODULENAME "rt_r8169"
+#define RTL8169_DRIVER_NAME   MODULENAME " RTnet Gigabit Ethernet driver " RTL8169_VERSION
+#define PFX MODULENAME ": "
+
+//#define RTL8169_DEBUG
+#undef RTL8169_JUMBO_FRAME_SUPPORT	/*** RTnet: no not enable! ***/
+#undef	RTL8169_HW_FLOW_CONTROL_SUPPORT
+
+
+#undef RTL8169_IOCTL_SUPPORT	/*** RTnet: do not enable! ***/
+#undef RTL8169_DYNAMIC_CONTROL
+#undef RTL8169_USE_IO
+
+
+#ifdef RTL8169_DEBUG
+	#define assert(expr) \
+		if(!(expr)) { printk( "Assertion failed! %s,%s,%s,line=%d\n", #expr,__FILE__,__FUNCTION__,__LINE__); }
+	/*** RTnet / <kk>: rt_assert must be used instead of assert() within interrupt context! ***/
+	#define rt_assert(expr) \
+		if(!(expr)) { rtdm_printk( "Assertion failed! %s,%s,%s,line=%d\n", #expr,__FILE__,__FUNCTION__,__LINE__); }
+	/*** RTnet / <kk>: RT_DBG_PRINT must be used instead of DBG_PRINT() within interrupt context! ***/
+	#define DBG_PRINT( fmt, args...)   printk("r8169: " fmt, ## args);
+	#define RT_DBG_PRINT( fmt, args...)   rtdm_printk("r8169: " fmt, ## args);
+#else
+	#define assert(expr) do {} while (0)
+	#define rt_assert(expr) do {} while (0)
+	#define DBG_PRINT( fmt, args...)   ;
+	#define RT_DBG_PRINT( fmt, args...)   ;
+#endif	// end of #ifdef RTL8169_DEBUG
+
+/* media options */
+#define MAX_UNITS 8
+static int media[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/*** RTnet ***/
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+/*** /RTnet ***/
+
+/* <kk> Enable debugging output */
+#define DEBUG_RX_SYNC 1
+#define DEBUG_RX_OTHER 2
+#define DEBUG_TX_SYNC 4
+#define DEBUG_TX_OTHER 8
+#define DEBUG_RUN 16
+static int local_debug = -1;
+static int r8169_debug = -1;
+module_param_named(debug, local_debug, int, 0444);
+MODULE_PARM_DESC(debug, MODULENAME " debug level (bit mask, see docs!)");
+
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static int max_interrupt_work = 20;
+
+/* MAC address length*/
+#define MAC_ADDR_LEN        6
+
+#define RX_FIFO_THRESH      7       /* 7 means NO threshold, Rx buffer level before first PCI xfer.  */
+#define RX_DMA_BURST        7       /* Maximum PCI burst, '6' is 1024 */
+#define TX_DMA_BURST        7       /* Maximum PCI burst, '6' is 1024 */
+#define ETTh                0x3F    /* 0x3F means NO threshold */
+
+#define ETH_HDR_LEN         14
+#define DEFAULT_MTU         1500
+#define DEFAULT_RX_BUF_LEN  1536
+
+
+#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+#define MAX_JUMBO_FRAME_MTU	( 10000 )
+#define MAX_RX_SKBDATA_SIZE	( MAX_JUMBO_FRAME_MTU + ETH_HDR_LEN )
+#else
+#define MAX_RX_SKBDATA_SIZE 1600
+#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+
+#define InterFrameGap       0x03    /* 3 means InterFrameGap = the shortest one */
+
+//#define NUM_TX_DESC         64	/* Number of Tx descriptor registers*/
+//#define NUM_RX_DESC         64	/* Number of Rx descriptor registers*/
+
+#define TX_RING_SIZE          16	/*** RTnet ***/
+#define NUM_TX_DESC TX_RING_SIZE	/* Number of Tx descriptor registers*/	/*** RTnet ***/
+#define RX_RING_SIZE           8	/*** RTnet ***/
+#define NUM_RX_DESC RX_RING_SIZE	/* Number of Rx descriptor registers*/	/*** RTnet ***/
+
+#define RTL_MIN_IO_SIZE     0x80
+#define TX_TIMEOUT          (6*HZ)
+//#define RTL8169_TIMER_EXPIRE_TIME 100 //100	/*** RTnet ***/
+
+
+#ifdef RTL8169_USE_IO
+#define RTL_W8(reg, val8)   outb ((val8), ioaddr + (reg))
+#define RTL_W16(reg, val16) outw ((val16), ioaddr + (reg))
+#define RTL_W32(reg, val32) outl ((val32), ioaddr + (reg))
+#define RTL_R8(reg)         inb (ioaddr + (reg))
+#define RTL_R16(reg)        inw (ioaddr + (reg))
+#define RTL_R32(reg)        ((unsigned long) inl (ioaddr + (reg)))
+#else
+/* write/read MMIO register */
+#define RTL_W8(reg, val8)   writeb ((val8), (void *)ioaddr + (reg))
+#define RTL_W16(reg, val16) writew ((val16), (void *)ioaddr + (reg))
+#define RTL_W32(reg, val32) writel ((val32), (void *)ioaddr + (reg))
+#define RTL_R8(reg)         readb ((void *)ioaddr + (reg))
+#define RTL_R16(reg)        readw ((void *)ioaddr + (reg))
+#define RTL_R32(reg)        ((unsigned long) readl ((void *)ioaddr + (reg)))
+#endif
+
+#define MCFG_METHOD_1		0x01
+#define MCFG_METHOD_2		0x02
+#define MCFG_METHOD_3		0x03
+#define MCFG_METHOD_4		0x04
+
+#define PCFG_METHOD_1		0x01	//PHY Reg 0x03 bit0-3 == 0x0000
+#define PCFG_METHOD_2		0x02	//PHY Reg 0x03 bit0-3 == 0x0001
+#define PCFG_METHOD_3		0x03	//PHY Reg 0x03 bit0-3 == 0x0002
+
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+#include "r8169_callback.h"
+#endif  //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+
+const static struct {
+	const char *name;
+	u8 mcfg;                 /* depend on RTL8169 docs */
+	u32 RxConfigMask;       /* should clear the bits supported by this chip */
+} rtl_chip_info[] = {
+	{ "RTL8169",  MCFG_METHOD_1,  0xff7e1880 },
+	{ "RTL8169s/8110s",  MCFG_METHOD_2,  0xff7e1880 },
+	{ "RTL8169s/8110s",  MCFG_METHOD_3,  0xff7e1880 },
+};
+
+
+static struct pci_device_id rtl8169_pci_tbl[] = {
+	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8136), 0, 0, 2 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8167), 0, 0, 1 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_REALTEK,	0x8169), 0, 0, 1 },
+	{ PCI_DEVICE(PCI_VENDOR_ID_DLINK,	0x4300), 0, 0, 1 },	/* <kk> D-Link DGE-528T */
+	{0,},
+};
+
+
+MODULE_DEVICE_TABLE (pci, rtl8169_pci_tbl);
+
+
+enum RTL8169_registers {
+	MAC0 = 0x0,
+	MAR0 = 0x8,
+	TxDescStartAddr	= 0x20,
+	TxHDescStartAddr= 0x28,
+	FLASH	= 0x30,
+	ERSR	= 0x36,
+	ChipCmd	= 0x37,
+	TxPoll	= 0x38,
+	IntrMask = 0x3C,
+	IntrStatus = 0x3E,
+	TxConfig = 0x40,
+	RxConfig = 0x44,
+	RxMissed = 0x4C,
+	Cfg9346 = 0x50,
+	Config0	= 0x51,
+	Config1	= 0x52,
+	Config2	= 0x53,
+	Config3	= 0x54,
+	Config4	= 0x55,
+	Config5	= 0x56,
+	MultiIntr = 0x5C,
+	PHYAR	= 0x60,
+	TBICSR	= 0x64,
+	TBI_ANAR = 0x68,
+	TBI_LPAR = 0x6A,
+	PHYstatus = 0x6C,
+	RxMaxSize = 0xDA,
+	CPlusCmd = 0xE0,
+	RxDescStartAddr	= 0xE4,
+	ETThReg	= 0xEC,
+	FuncEvent	= 0xF0,
+	FuncEventMask	= 0xF4,
+	FuncPresetState	= 0xF8,
+	FuncForceEvent	= 0xFC,
+};
+
+enum RTL8169_register_content {
+	/*InterruptStatusBits*/
+	SYSErr          = 0x8000,
+	PCSTimeout	= 0x4000,
+	SWInt		= 0x0100,
+	TxDescUnavail	= 0x80,
+	RxFIFOOver      = 0x40,
+	LinkChg         = 0x20,
+	RxOverflow      = 0x10,
+	TxErr   = 0x08,
+	TxOK    = 0x04,
+	RxErr   = 0x02,
+	RxOK    = 0x01,
+
+	/*RxStatusDesc*/
+	RxRES = 0x00200000,
+	RxCRC = 0x00080000,
+	RxRUNT= 0x00100000,
+	RxRWT = 0x00400000,
+
+	/*ChipCmdBits*/
+	CmdReset = 0x10,
+	CmdRxEnb = 0x08,
+	CmdTxEnb = 0x04,
+	RxBufEmpty = 0x01,
+
+	/*Cfg9346Bits*/
+	Cfg9346_Lock = 0x00,
+	Cfg9346_Unlock = 0xC0,
+
+	/*rx_mode_bits*/
+	AcceptErr = 0x20,
+	AcceptRunt = 0x10,
+	AcceptBroadcast = 0x08,
+	AcceptMulticast = 0x04,
+	AcceptMyPhys = 0x02,
+	AcceptAllPhys = 0x01,
+
+	/*RxConfigBits*/
+	RxCfgFIFOShift = 13,
+	RxCfgDMAShift = 8,
+
+	/*TxConfigBits*/
+	TxInterFrameGapShift = 24,
+	TxDMAShift = 8,
+
+	/* Config2 register */
+	MSIEnable	= (1 << 5),
+
+	/*rtl8169_PHYstatus*/
+	TBI_Enable	= 0x80,
+	TxFlowCtrl	= 0x40,
+	RxFlowCtrl	= 0x20,
+	_1000bpsF	= 0x10,
+	_100bps		= 0x08,
+	_10bps		= 0x04,
+	LinkStatus	= 0x02,
+	FullDup		= 0x01,
+
+	/*GIGABIT_PHY_registers*/
+	PHY_CTRL_REG = 0,
+	PHY_STAT_REG = 1,
+	PHY_AUTO_NEGO_REG = 4,
+	PHY_1000_CTRL_REG = 9,
+
+	/*GIGABIT_PHY_REG_BIT*/
+	PHY_Restart_Auto_Nego	= 0x0200,
+	PHY_Enable_Auto_Nego	= 0x1000,
+
+	//PHY_STAT_REG = 1;
+	PHY_Auto_Neco_Comp	= 0x0020,
+
+	//PHY_AUTO_NEGO_REG = 4;
+	PHY_Cap_10_Half		= 0x0020,
+	PHY_Cap_10_Full		= 0x0040,
+	PHY_Cap_100_Half	= 0x0080,
+	PHY_Cap_100_Full	= 0x0100,
+
+	//PHY_1000_CTRL_REG = 9;
+	PHY_Cap_1000_Full	= 0x0200,
+	PHY_Cap_1000_Half	= 0x0100,
+
+	PHY_Cap_PAUSE		= 0x0400,
+	PHY_Cap_ASYM_PAUSE	= 0x0800,
+
+	PHY_Cap_Null		= 0x0,
+
+	/*_MediaType*/
+	_10_Half	= 0x01,
+	_10_Full	= 0x02,
+	_100_Half	= 0x04,
+	_100_Full	= 0x08,
+	_1000_Full	= 0x10,
+
+	/*_TBICSRBit*/
+	TBILinkOK       = 0x02000000,
+};
+
+
+
+enum _DescStatusBit {
+	OWNbit	= 0x80000000,
+	EORbit	= 0x40000000,
+	FSbit	= 0x20000000,
+	LSbit	= 0x10000000,
+};
+
+
+struct TxDesc {
+	u32		status;
+	u32		vlan_tag;
+	u32		buf_addr;
+	u32		buf_Haddr;
+};
+
+struct RxDesc {
+	u32		status;
+	u32		vlan_tag;
+	u32		buf_addr;
+	u32		buf_Haddr;
+};
+
+
+typedef struct timer_list rt_timer_t;
+
+enum rtl8169_features {
+	RTL_FEATURE_WOL		= (1 << 0),
+	RTL_FEATURE_MSI		= (1 << 1),
+	RTL_FEATURE_GMII	= (1 << 2),
+};
+
+
+struct rtl8169_private {
+	unsigned long ioaddr;                /* memory map physical address*/
+	struct pci_dev *pci_dev;                /* Index of PCI device  */
+	struct net_device_stats stats;          /* statistics of net device */
+	rtdm_lock_t lock;                       /* spin lock flag */	/*** RTnet ***/
+	int chipset;
+	int mcfg;
+	int pcfg;
+/*	rt_timer_t r8169_timer; */	/*** RTnet ***/
+/*	unsigned long expire_time;	*/	/*** RTnet ***/
+
+	unsigned long phy_link_down_cnt;
+	unsigned long cur_rx;                   /* Index into the Rx descriptor buffer of next Rx pkt. */
+	unsigned long cur_tx;                   /* Index into the Tx descriptor buffer of next Rx pkt. */
+	unsigned long dirty_tx;
+	struct	TxDesc	*TxDescArray;           /* Index of 256-alignment Tx Descriptor buffer */
+	struct	RxDesc	*RxDescArray;           /* Index of 256-alignment Rx Descriptor buffer */
+	struct	rtskb	*Tx_skbuff[NUM_TX_DESC];/* Index of Transmit data buffer */	/*** RTnet ***/
+	struct	rtskb	*Rx_skbuff[NUM_RX_DESC];/* Receive data buffer */			/*** RTnet ***/
+	unsigned char   drvinit_fail;
+
+	dma_addr_t txdesc_array_dma_addr[NUM_TX_DESC];
+	dma_addr_t rxdesc_array_dma_addr[NUM_RX_DESC];
+	dma_addr_t rx_skbuff_dma_addr[NUM_RX_DESC];
+
+	void *txdesc_space;
+	dma_addr_t txdesc_phy_dma_addr;
+	int sizeof_txdesc_space;
+
+	void *rxdesc_space;
+	dma_addr_t rxdesc_phy_dma_addr;
+	int sizeof_rxdesc_space;
+
+	int curr_mtu_size;
+	int tx_pkt_len;
+	int rx_pkt_len;
+
+	int hw_rx_pkt_len;
+
+	int rx_buf_size;	/*** RTnet / <kk> ***/
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+	struct r8169_cb_t rt;
+#endif //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+	unsigned char   linkstatus;
+	rtdm_irq_t irq_handle;			/*** RTnet ***/
+
+	unsigned features;
+};
+
+
+MODULE_AUTHOR ("Realtek, modified for RTnet by Klaus.Keppler@gmx.de");
+MODULE_DESCRIPTION ("RealTek RTL-8169 Gigabit Ethernet driver");
+module_param_array(media, int, NULL, 0444);
+MODULE_LICENSE("GPL");
+
+
+static int rtl8169_open (struct rtnet_device *rtdev);
+static int rtl8169_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev);
+
+static int rtl8169_interrupt(rtdm_irq_t *irq_handle);
+
+static void rtl8169_init_ring (struct rtnet_device *rtdev);
+static void rtl8169_hw_start (struct rtnet_device *rtdev);
+static int rtl8169_close (struct rtnet_device *rtdev);
+static void rtl8169_set_rx_mode (struct rtnet_device *rtdev);
+/* static void rtl8169_tx_timeout (struct net_device *dev); */	/*** RTnet ***/
+static struct net_device_stats *rtl8169_get_stats(struct rtnet_device *netdev);
+
+#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+static int rtl8169_change_mtu(struct net_device *dev, int new_mtu);
+#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+static void rtl8169_hw_PHY_config (struct rtnet_device *rtdev);
+/* static void rtl8169_hw_PHY_reset(struct net_device *dev); */	/*** RTnet ***/
+static const u16 rtl8169_intr_mask = LinkChg | RxOverflow | RxFIFOOver | TxErr | TxOK | RxErr | RxOK | SYSErr;	/*** <kk> added SYSErr ***/
+static const unsigned int rtl8169_rx_config = (RX_FIFO_THRESH << RxCfgFIFOShift) | (RX_DMA_BURST << RxCfgDMAShift) | 0x0000000E;
+
+/*** <kk> these functions are backported from Linux-2.6.12's r8169.c driver ***/
+static void rtl8169_irq_mask_and_ack(unsigned long ioaddr);
+/* static void rtl8169_asic_down(unsigned long ioaddr); */ /*** RTnet ***/
+static void rtl8169_pcierr_interrupt(struct rtnet_device *rtdev);
+
+#define RTL8169_WRITE_GMII_REG_BIT( ioaddr, reg, bitnum, bitval )\
+{ \
+	int val; \
+	if( bitval == 1 ){ val = ( RTL8169_READ_GMII_REG( ioaddr, reg ) | (bitval<<bitnum) ) & 0xffff ; } \
+	else{ val = ( RTL8169_READ_GMII_REG( ioaddr, reg ) & (~(0x0001<<bitnum)) ) & 0xffff ; } \
+	RTL8169_WRITE_GMII_REG( ioaddr, reg, val ); \
+}
+
+
+
+#ifdef RTL8169_DEBUG
+unsigned alloc_rxskb_cnt = 0;
+#define RTL8169_ALLOC_RXSKB(bufsize)    dev_alloc_skb(bufsize); alloc_rxskb_cnt ++ ;
+#define RTL8169_FREE_RXSKB(skb)         kfree_skb(skb); alloc_rxskb_cnt -- ;
+#define RTL8169_NETIF_RX(skb)           netif_rx(skb); alloc_rxskb_cnt -- ;
+#else
+#define RTL8169_ALLOC_RXSKB(bufsize)    dev_alloc_skb(bufsize);
+#define RTL8169_FREE_RXSKB(skb)         kfree_skb(skb);
+#define RTL8169_NETIF_RX(skb)           netif_rx(skb);
+#endif //end #ifdef RTL8169_DEBUG
+
+
+//=================================================================
+//	PHYAR
+//	bit		Symbol
+//	31		Flag
+//	30-21	reserved
+//	20-16	5-bit GMII/MII register address
+//	15-0	16-bit GMII/MII register data
+//=================================================================
+void RTL8169_WRITE_GMII_REG( unsigned long ioaddr, int RegAddr, int value )
+{
+	int	i;
+
+	RTL_W32 ( PHYAR, 0x80000000 | (RegAddr&0xFF)<<16 | value);
+	udelay(1000);
+
+	for( i = 2000; i > 0 ; i -- ){
+		// Check if the RTL8169 has completed writing to the specified MII register
+		if( ! (RTL_R32(PHYAR)&0x80000000) ){
+			break;
+		}
+		else{
+			udelay(100);
+		}// end of if( ! (RTL_R32(PHYAR)&0x80000000) )
+	}// end of for() loop
+}
+//=================================================================
+int RTL8169_READ_GMII_REG( unsigned long ioaddr, int RegAddr )
+{
+	int i, value = -1;
+
+	RTL_W32 ( PHYAR, 0x0 | (RegAddr&0xFF)<<16 );
+	udelay(1000);
+
+	for( i = 2000; i > 0 ; i -- ){
+		// Check if the RTL8169 has completed retrieving data from the specified MII register
+		if( RTL_R32(PHYAR) & 0x80000000 ){
+			value = (int)( RTL_R32(PHYAR)&0xFFFF );
+			break;
+		}
+		else{
+			udelay(100);
+		}// end of if( RTL_R32(PHYAR) & 0x80000000 )
+	}// end of for() loop
+	return value;
+}
+
+
+#ifdef RTL8169_IOCTL_SUPPORT
+#include "r8169_ioctl.c"
+#endif //end #ifdef RTL8169_IOCTL_SUPPORT
+
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+#include "r8169_callback.c"
+#endif
+
+
+
+//======================================================================================================
+//======================================================================================================
+static int rtl8169_init_board ( struct pci_dev *pdev, struct rtnet_device **dev_out, unsigned long *ioaddr_out, int region)
+{
+	unsigned long ioaddr = 0;
+	struct rtnet_device *rtdev;
+	struct rtl8169_private *priv;
+	int rc, i;
+	unsigned long mmio_start, mmio_end, mmio_flags, mmio_len;
+
+
+	assert (pdev != NULL);
+	assert (ioaddr_out != NULL);
+
+	*ioaddr_out = 0;
+	*dev_out = NULL;
+
+	/*** RTnet ***/
+	rtdev = rt_alloc_etherdev(sizeof(struct rtl8169_private),
+				RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (rtdev == NULL) {
+		printk (KERN_ERR PFX "unable to alloc new ethernet\n");
+		return -ENOMEM;
+	}
+	rtdev_alloc_name(rtdev, "rteth%d");
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->sysbind = &pdev->dev;
+	/*** /RTnet ***/
+
+	priv = rtdev->priv;
+
+	/* disable ASPM completely as that cause random device stop working
+	 * problems as well as full system hangs for some PCIe devices users */
+	pci_disable_link_state(pdev, PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1 |
+				     PCIE_LINK_STATE_CLKPM);
+
+	// enable device (incl. PCI PM wakeup and hotplug setup)
+	rc = pci_enable_device (pdev);
+	if (rc)
+		goto err_out;
+
+	if (pci_set_mwi(pdev) < 0)
+		printk("R8169: Mem-Wr-Inval unavailable\n");
+
+	mmio_start = pci_resource_start (pdev, region);
+	mmio_end = pci_resource_end (pdev, region);
+	mmio_flags = pci_resource_flags (pdev, region);
+	mmio_len = pci_resource_len (pdev, region);
+
+	// make sure PCI base addr 1 is MMIO
+	if (!(mmio_flags & IORESOURCE_MEM)) {
+		printk (KERN_ERR PFX "region #%d not an MMIO resource, aborting\n", region);
+		rc = -ENODEV;
+		goto err_out;
+	}
+
+	// check for weird/broken PCI region reporting
+	if ( mmio_len < RTL_MIN_IO_SIZE ) {
+		printk (KERN_ERR PFX "Invalid PCI region size(s), aborting\n");
+		rc = -ENODEV;
+		goto err_out;
+	}
+
+
+	rc = pci_request_regions (pdev, rtdev->name);
+	if (rc)
+		goto err_out;
+
+	// enable PCI bus-mastering
+	pci_set_master (pdev);
+
+#ifdef RTL8169_USE_IO
+	ioaddr = pci_resource_start(pdev, 0);
+#else
+	// ioremap MMIO region
+	ioaddr = (unsigned long)ioremap (mmio_start, mmio_len);
+	if (ioaddr == 0) {
+		printk (KERN_ERR PFX "cannot remap MMIO, aborting\n");
+		rc = -EIO;
+		goto err_out_free_res;
+	}
+#endif
+
+	// Soft reset the chip.
+	RTL_W8 ( ChipCmd, CmdReset);
+
+	// Check that the chip has finished the reset.
+	for (i = 1000; i > 0; i--){
+		if ( (RTL_R8(ChipCmd) & CmdReset) == 0){
+			break;
+		}
+		else{
+			udelay (10);
+		}
+	}
+
+	{
+		u8 cfg2 = RTL_R8(Config2) & ~MSIEnable;
+		if (region) {
+			if (pci_enable_msi(pdev))
+				printk("R8169: no MSI, Back to INTx.\n");
+			else {
+				cfg2 |= MSIEnable;
+				priv->features |= RTL_FEATURE_MSI;
+			}
+		}
+		RTL_W8(Config2, cfg2);
+	}
+
+	// identify config method
+	{
+		unsigned long val32 = (RTL_R32(TxConfig)&0x7c800000);
+
+		if( val32 == (0x1<<28) ){
+			priv->mcfg = MCFG_METHOD_4;
+		}
+		else if( val32 == (0x1<<26) ){
+			priv->mcfg = MCFG_METHOD_3;
+		}
+		else if( val32 == (0x1<<23) ){
+			priv->mcfg = MCFG_METHOD_2;
+		}
+		else if( val32 == 0x00000000 ){
+			priv->mcfg = MCFG_METHOD_1;
+		}
+		else{
+			priv->mcfg = MCFG_METHOD_1;
+		}
+	}
+
+	{
+		unsigned char val8 = (unsigned char)(RTL8169_READ_GMII_REG(ioaddr,3)&0x000f);
+		if( val8 == 0x00 ){
+			priv->pcfg = PCFG_METHOD_1;
+		}
+		else if( val8 == 0x01 ){
+			priv->pcfg = PCFG_METHOD_2;
+		}
+		else if( val8 == 0x02 ){
+			priv->pcfg = PCFG_METHOD_3;
+		}
+		else{
+			priv->pcfg = PCFG_METHOD_3;
+		}
+	}
+
+
+	for (i = ARRAY_SIZE (rtl_chip_info) - 1; i >= 0; i--){
+		if (priv->mcfg == rtl_chip_info[i].mcfg) {
+			priv->chipset = i;
+			goto match;
+		}
+	}
+
+	//if unknown chip, assume array element #0, original RTL-8169 in this case
+	printk (KERN_DEBUG PFX "PCI device %s: unknown chip version, assuming RTL-8169\n", pci_name(pdev));
+	priv->chipset = 0;
+
+match:
+	*ioaddr_out = ioaddr;
+	*dev_out = rtdev;
+	return 0;
+
+#ifndef RTL8169_USE_IO
+err_out_free_res:
+#endif
+	pci_release_regions (pdev);	/*** <kk> moved outside of #ifdev ***/
+
+err_out:
+	/*** RTnet ***/
+	rt_rtdev_disconnect(rtdev);
+	rtdev_free(rtdev);
+	/*** /RTnet ***/
+	return rc;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static int rtl8169_init_one (struct pci_dev *pdev, const struct pci_device_id *ent)
+{
+	struct rtnet_device *rtdev = NULL;	/*** RTnet ***/
+	struct rtl8169_private *priv = NULL;
+	unsigned long ioaddr = 0;
+	static int board_idx = -1;
+	int region = ent->driver_data;
+	int i;
+	int option = -1, Cap10_100 = 0, Cap1000 = 0;
+
+
+	assert (pdev != NULL);
+	assert (ent != NULL);
+
+	board_idx++;
+
+	/*** RTnet ***/
+	if (board_idx >= MAX_UNITS) {
+		return -ENODEV;
+	}
+	if (cards[board_idx] == 0)
+		return -ENODEV;
+	/*** RTnet ***/
+
+	i = rtl8169_init_board (pdev, &rtdev, &ioaddr, region);
+	if (i < 0) {
+		return i;
+	}
+
+	priv = rtdev->priv;
+
+	assert (ioaddr != 0);
+	assert (rtdev != NULL);
+	assert (priv != NULL);
+
+	// Get MAC address //
+	for (i = 0; i < MAC_ADDR_LEN ; i++){
+		rtdev->dev_addr[i] = RTL_R8( MAC0 + i );
+	}
+
+	rtdev->open		= rtl8169_open;
+	rtdev->hard_start_xmit  = rtl8169_start_xmit;
+	rtdev->get_stats        = rtl8169_get_stats;
+	rtdev->stop             = rtl8169_close;
+	/* dev->tx_timeout      = rtl8169_tx_timeout; */			/*** RTnet ***/
+	/* dev->set_multicast_list = rtl8169_set_rx_mode; */	/*** RTnet ***/
+	/* dev->watchdog_timeo  = TX_TIMEOUT; */				/*** RTnet ***/
+	rtdev->irq              = pdev->irq;
+	rtdev->base_addr                = (unsigned long) ioaddr;
+
+#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+	rtdev->change_mtu		= rtl8169_change_mtu;
+#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+#ifdef RTL8169_IOCTL_SUPPORT
+	rtdev->do_ioctl                 = rtl8169_ioctl;
+#endif //end #ifdef RTL8169_IOCTL_SUPPORT
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+	priv->rt.dev = rtdev;
+#endif //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+	priv = rtdev->priv;				// private data //
+	priv->pci_dev   = pdev;
+	priv->ioaddr    = ioaddr;
+
+//#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+	priv->curr_mtu_size = rtdev->mtu;
+	priv->tx_pkt_len = rtdev->mtu + ETH_HDR_LEN;
+	priv->rx_pkt_len = rtdev->mtu + ETH_HDR_LEN;
+	priv->hw_rx_pkt_len = priv->rx_pkt_len + 8;
+//#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+	DBG_PRINT("-------------------------- \n");
+	DBG_PRINT("dev->mtu = %d \n", rtdev->mtu);
+	DBG_PRINT("priv->curr_mtu_size = %d \n", priv->curr_mtu_size);
+	DBG_PRINT("priv->tx_pkt_len = %d \n", priv->tx_pkt_len);
+	DBG_PRINT("priv->rx_pkt_len = %d \n", priv->rx_pkt_len);
+	DBG_PRINT("priv->hw_rx_pkt_len = %d \n", priv->hw_rx_pkt_len);
+	DBG_PRINT("-------------------------- \n");
+
+	rtdm_lock_init(&priv->lock);	/*** RTnet ***/
+
+	/*** RTnet ***/
+	if (rt_register_rtnetdev(rtdev) < 0) {
+		/* clean up... */
+		pci_release_regions (pdev);
+		rt_rtdev_disconnect(rtdev);
+		rtdev_free(rtdev);
+		return -ENODEV;
+	}
+	/*** /RTnet ***/
+
+	pci_set_drvdata(pdev, rtdev);     //      pdev->driver_data = data;
+
+	printk (KERN_DEBUG "%s: Identified chip type is '%s'.\n", rtdev->name, rtl_chip_info[priv->chipset].name);
+	printk (KERN_INFO "%s: %s at 0x%lx, "
+				"%2.2x:%2.2x:%2.2x:%2.2x:%2.2x:%2.2x, "
+				"IRQ %d\n",
+				rtdev->name,
+				RTL8169_DRIVER_NAME,
+				rtdev->base_addr,
+				rtdev->dev_addr[0], rtdev->dev_addr[1],
+				rtdev->dev_addr[2], rtdev->dev_addr[3],
+				rtdev->dev_addr[4], rtdev->dev_addr[5],
+				rtdev->irq);
+
+	// Config PHY
+	rtl8169_hw_PHY_config(rtdev);
+
+	DBG_PRINT("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+	RTL_W8( 0x82, 0x01 );
+
+	if( priv->mcfg < MCFG_METHOD_3 ){
+		DBG_PRINT("Set PCI Latency=0x40\n");
+		pci_write_config_byte(pdev, PCI_LATENCY_TIMER, 0x40);
+	}
+
+	if( priv->mcfg == MCFG_METHOD_2 ){
+		DBG_PRINT("Set MAC Reg C+CR Offset 0x82h = 0x01h\n");
+		RTL_W8( 0x82, 0x01 );
+		DBG_PRINT("Set PHY Reg 0x0bh = 0x00h\n");
+		RTL8169_WRITE_GMII_REG( ioaddr, 0x0b, 0x0000 );	//w 0x0b 15 0 0
+	}
+
+	// if TBI is not endbled
+	if( !(RTL_R8(PHYstatus) & TBI_Enable) ){
+		int	val = RTL8169_READ_GMII_REG( ioaddr, PHY_AUTO_NEGO_REG );
+
+#ifdef RTL8169_HW_FLOW_CONTROL_SUPPORT
+		val |= PHY_Cap_PAUSE | PHY_Cap_ASYM_PAUSE ;
+#endif //end #define RTL8169_HW_FLOW_CONTROL_SUPPORT
+
+		option = (board_idx >= MAX_UNITS) ? 0 : media[board_idx];
+		// Force RTL8169 in 10/100/1000 Full/Half mode.
+		if( option > 0 ){
+			printk(KERN_INFO "%s: Force-mode Enabled. \n", rtdev->name);
+			Cap10_100 = 0;
+			Cap1000 = 0;
+			switch( option ){
+				case _10_Half:
+						Cap10_100 = PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_Null;
+						break;
+				case _10_Full:
+						Cap10_100 = PHY_Cap_10_Full | PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_Null;
+						break;
+				case _100_Half:
+						Cap10_100 = PHY_Cap_100_Half | PHY_Cap_10_Full | PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_Null;
+						break;
+				case _100_Full:
+						Cap10_100 = PHY_Cap_100_Full | PHY_Cap_100_Half | PHY_Cap_10_Full | PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_Null;
+						break;
+				case _1000_Full:
+						Cap10_100 = PHY_Cap_100_Full | PHY_Cap_100_Half | PHY_Cap_10_Full | PHY_Cap_10_Half;
+						Cap1000 = PHY_Cap_1000_Full;
+						break;
+				default:
+						break;
+			}
+			RTL8169_WRITE_GMII_REG( ioaddr, PHY_AUTO_NEGO_REG, Cap10_100 | ( val&0xC1F ) );	//leave PHY_AUTO_NEGO_REG bit4:0 unchanged
+			RTL8169_WRITE_GMII_REG( ioaddr, PHY_1000_CTRL_REG, Cap1000 );
+		}
+		else{
+			printk(KERN_INFO "%s: Auto-negotiation Enabled.\n", rtdev->name);
+
+			// enable 10/100 Full/Half Mode, leave PHY_AUTO_NEGO_REG bit4:0 unchanged
+			RTL8169_WRITE_GMII_REG( ioaddr, PHY_AUTO_NEGO_REG,
+				PHY_Cap_10_Half | PHY_Cap_10_Full | PHY_Cap_100_Half | PHY_Cap_100_Full | ( val&0xC1F ) );
+
+			// enable 1000 Full Mode
+//			RTL8169_WRITE_GMII_REG( ioaddr, PHY_1000_CTRL_REG, PHY_Cap_1000_Full );
+			RTL8169_WRITE_GMII_REG( ioaddr, PHY_1000_CTRL_REG, PHY_Cap_1000_Full | PHY_Cap_1000_Half);	//rtl8168
+
+		}// end of if( option > 0 )
+
+		// Enable auto-negotiation and restart auto-nigotiation
+		RTL8169_WRITE_GMII_REG( ioaddr, PHY_CTRL_REG, PHY_Enable_Auto_Nego | PHY_Restart_Auto_Nego );
+		udelay(100);
+
+		// wait for auto-negotiation process
+		for( i = 10000; i > 0; i-- ){
+			//check if auto-negotiation complete
+			if( RTL8169_READ_GMII_REG(ioaddr, PHY_STAT_REG) & PHY_Auto_Neco_Comp ){
+				udelay(100);
+				option = RTL_R8(PHYstatus);
+				if( option & _1000bpsF ){
+					printk(KERN_INFO "%s: 1000Mbps Full-duplex operation.\n", rtdev->name);
+				}
+				else{
+					printk(KERN_INFO "%s: %sMbps %s-duplex operation.\n", rtdev->name,
+							(option & _100bps) ? "100" : "10", (option & FullDup) ? "Full" : "Half" );
+				}
+				break;
+			}
+			else{
+				udelay(100);
+			}// end of if( RTL8169_READ_GMII_REG(ioaddr, 1) & 0x20 )
+		}// end for-loop to wait for auto-negotiation process
+
+		option = RTL_R8(PHYstatus);
+		if( option & _1000bpsF ){
+			priv->linkstatus = _1000_Full;
+		}
+		else{
+			if(option & _100bps){
+				priv->linkstatus = (option & FullDup) ? _100_Full : _100_Half;
+			}
+	    else{
+				priv->linkstatus = (option & FullDup) ? _10_Full : _10_Half;
+			}
+		}
+		DBG_PRINT("priv->linkstatus = 0x%02x\n", priv->linkstatus);
+
+	}// end of TBI is not enabled
+	else{
+		udelay(100);
+		DBG_PRINT("1000Mbps Full-duplex operation, TBI Link %s!\n",(RTL_R32(TBICSR) & TBILinkOK) ? "OK" : "Failed" );
+
+	}// end of TBI is not enabled
+
+	return 0;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_remove_one (struct pci_dev *pdev)
+{
+	struct rtnet_device *rtdev = pci_get_drvdata(pdev);
+	struct rtl8169_private *priv = rtdev->priv;;
+
+	assert (rtdev != NULL);
+
+	/*** RTnet ***/
+	rt_unregister_rtnetdev(rtdev);
+	rt_rtdev_disconnect(rtdev);
+	/*** /RTnet ***/
+
+	if (priv->features & RTL_FEATURE_MSI)
+		pci_disable_msi(pdev);
+
+#ifdef RTL8169_USE_IO
+#else
+	iounmap ((void *)(rtdev->base_addr));
+#endif
+	pci_release_regions(pdev);
+
+	rtdev_free(rtdev);	/*** RTnet ***/
+
+	pci_disable_device(pdev);	/*** <kk> Disable device now :-) ***/
+
+	pci_set_drvdata(pdev, NULL);
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static int rtl8169_open (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	struct pci_dev *pdev = priv->pci_dev;
+	int retval;
+//	u8 diff;
+//	u32 TxPhyAddr, RxPhyAddr;
+
+	if( priv->drvinit_fail == 1 ){
+		printk("%s: Gigabit driver open failed.\n", rtdev->name );
+		return -ENOMEM;
+	}
+
+	/*** RTnet ***/
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	retval = rtdm_irq_request(&priv->irq_handle, rtdev->irq, rtl8169_interrupt, 0, "rt_r8169", rtdev);
+	/*** /RTnet ***/
+
+	// retval = request_irq (dev->irq, rtl8169_interrupt, SA_SHIRQ, dev->name, dev);
+	if (retval) {
+		return retval;
+	}
+
+
+	//2004-05-11
+	// Allocate tx/rx descriptor space
+	priv->sizeof_txdesc_space = NUM_TX_DESC * sizeof(struct TxDesc)+256;
+	priv->txdesc_space = dma_alloc_coherent(&pdev->dev,
+		priv->sizeof_txdesc_space, &priv->txdesc_phy_dma_addr, GFP_ATOMIC);
+	if( priv->txdesc_space == NULL ){
+		printk("%s: Gigabit driver alloc txdesc_space failed.\n", rtdev->name );
+		return -ENOMEM;
+	}
+	priv->sizeof_rxdesc_space = NUM_RX_DESC * sizeof(struct RxDesc)+256;
+	priv->rxdesc_space = dma_alloc_coherent(&pdev->dev,
+		priv->sizeof_rxdesc_space, &priv->rxdesc_phy_dma_addr, GFP_ATOMIC);
+	if( priv->rxdesc_space == NULL ){
+		printk("%s: Gigabit driver alloc rxdesc_space failed.\n", rtdev->name );
+		return -ENOMEM;
+	}
+
+	if(priv->txdesc_phy_dma_addr & 0xff){
+		printk("%s: Gigabit driver txdesc_phy_dma_addr is not 256-bytes-aligned.\n", rtdev->name );
+	}
+	if(priv->rxdesc_phy_dma_addr & 0xff){
+		printk("%s: Gigabit driver rxdesc_phy_dma_addr is not 256-bytes-aligned.\n", rtdev->name );
+	}
+	// Set tx/rx descriptor space
+	priv->TxDescArray = (struct TxDesc *)priv->txdesc_space;
+	priv->RxDescArray = (struct RxDesc *)priv->rxdesc_space;
+
+	{
+		int i;
+		struct rtskb *skb = NULL;	/*** RTnet ***/
+		priv->rx_buf_size = (rtdev->mtu <= 1500 ? DEFAULT_RX_BUF_LEN : rtdev->mtu + 32);	/*** RTnet / <kk> ***/
+
+		for(i=0;i<NUM_RX_DESC;i++){
+			//skb = RTL8169_ALLOC_RXSKB(MAX_RX_SKBDATA_SIZE);	/*** <kk> ***/
+			skb = rtnetdev_alloc_rtskb(rtdev, priv->rx_buf_size); /*** RTnet ***/;
+			if( skb != NULL ) {
+				rtskb_reserve (skb, 2);	// 16 byte align the IP fields. //
+				priv->Rx_skbuff[i] = skb;
+			}
+			else{
+				printk("%s: Gigabit driver failed to allocate skbuff.\n", rtdev->name);
+				priv->drvinit_fail = 1;
+			}
+		}
+	}
+
+
+	//////////////////////////////////////////////////////////////////////////////
+	rtl8169_init_ring(rtdev);
+	rtl8169_hw_start(rtdev);
+
+	// ------------------------------------------------------
+
+	//DBG_PRINT("%s: %s() alloc_rxskb_cnt = %d\n", dev->name, __FUNCTION__, alloc_rxskb_cnt );	/*** <kk> won't work anymore... ***/
+
+	return 0;
+
+}//end of rtl8169_open (struct net_device *dev)
+
+
+
+
+
+
+
+
+//======================================================================================================
+
+
+
+//======================================================================================================
+static void rtl8169_hw_PHY_config (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	void *ioaddr = (void*)priv->ioaddr;
+
+	DBG_PRINT("priv->mcfg=%d, priv->pcfg=%d\n",priv->mcfg,priv->pcfg);
+
+	if( priv->mcfg == MCFG_METHOD_4 ){
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0001 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1b, 0x841e );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x0e, 0x7bfb );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x09, 0x273a );
+
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0002 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x90D0 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0000 );
+	}else if((priv->mcfg == MCFG_METHOD_2)||(priv->mcfg == MCFG_METHOD_3)){
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0001 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x15, 0x1000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x18, 0x65C7 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0x00A1 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0x0008 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x1020 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x1000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x7000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xFF41 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDE60 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x0140 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x0077 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x7800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x7000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xA000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xDF01 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDF20 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0xFF95 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0xFA00 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xA800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xA000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xB000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xFF41 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDE20 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0x0140 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0x00BB );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xB800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xB000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xF000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x03, 0xDF01 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x02, 0xDF20 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x01, 0xFF95 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x00, 0xBF00 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xF800 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0xF000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x04, 0x0000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x1F, 0x0000 );
+		RTL8169_WRITE_GMII_REG( (unsigned long)ioaddr, 0x0B, 0x0000 );
+	}
+	else{
+		DBG_PRINT("priv->mcfg=%d. Discard hw PHY config.\n",priv->mcfg);
+	}
+}
+
+
+
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_hw_start (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+	u32 i;
+
+
+	/* Soft reset the chip. */
+	RTL_W8 ( ChipCmd, CmdReset);
+
+	/* Check that the chip has finished the reset. */
+	for (i = 1000; i > 0; i--){
+		if ((RTL_R8( ChipCmd ) & CmdReset) == 0) break;
+		else udelay (10);
+	}
+
+	RTL_W8 ( Cfg9346, Cfg9346_Unlock);
+	RTL_W8 ( ChipCmd, CmdTxEnb | CmdRxEnb);
+	RTL_W8 ( ETThReg, ETTh);
+
+	// For gigabit rtl8169
+	RTL_W16	( RxMaxSize, (unsigned short)priv->hw_rx_pkt_len );
+
+	// Set Rx Config register
+	i = rtl8169_rx_config | ( RTL_R32( RxConfig ) & rtl_chip_info[priv->chipset].RxConfigMask);
+	RTL_W32 ( RxConfig, i);
+
+
+	/* Set DMA burst size and Interframe Gap Time */
+	RTL_W32 ( TxConfig, (TX_DMA_BURST << TxDMAShift) | (InterFrameGap << TxInterFrameGapShift) );
+
+
+
+	RTL_W16( CPlusCmd, RTL_R16(CPlusCmd) );
+
+	if(	priv->mcfg == MCFG_METHOD_2 ||
+		priv->mcfg == MCFG_METHOD_3)
+	{
+		RTL_W16( CPlusCmd, (RTL_R16(CPlusCmd)|(1<<14)|(1<<3)) );
+		DBG_PRINT("Set MAC Reg C+CR Offset 0xE0: bit-3 and bit-14\n");
+	}
+	else
+	{
+		RTL_W16( CPlusCmd, (RTL_R16(CPlusCmd)|(1<<3)) );
+		DBG_PRINT("Set MAC Reg C+CR Offset 0xE0: bit-3.\n");
+	}
+
+	{
+		//RTL_W16(0xE2, 0x1517);
+		//RTL_W16(0xE2, 0x152a);
+		//RTL_W16(0xE2, 0x282a);
+		RTL_W16(0xE2, 0x0000);		/* 0xE2 = IntrMitigate */
+	}
+
+	priv->cur_rx = 0;
+
+	RTL_W32 ( TxDescStartAddr, priv->txdesc_phy_dma_addr);
+	RTL_W32 ( TxDescStartAddr + 4, 0x00);
+	RTL_W32 ( RxDescStartAddr, priv->rxdesc_phy_dma_addr);
+	RTL_W32 ( RxDescStartAddr + 4, 0x00);
+
+	RTL_W8 ( Cfg9346, Cfg9346_Lock );
+	udelay (10);
+
+	RTL_W32 ( RxMissed, 0 );
+
+	rtl8169_set_rx_mode (rtdev);
+
+	/* no early-rx interrupts */
+	RTL_W16 ( MultiIntr, RTL_R16(MultiIntr) & 0xF000);
+
+	/* enable all known interrupts by setting the interrupt mask */
+	RTL_W16 ( IntrMask, rtl8169_intr_mask);
+
+	rtnetif_start_queue (rtdev);	/*** RTnet ***/
+
+}//end of rtl8169_hw_start (struct net_device *dev)
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_init_ring (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	struct pci_dev *pdev = priv->pci_dev;
+	int i;
+	struct rtskb	*skb;
+
+
+	priv->cur_rx = 0;
+	priv->cur_tx = 0;
+	priv->dirty_tx = 0;
+	memset(priv->TxDescArray, 0x0, NUM_TX_DESC*sizeof(struct TxDesc));
+	memset(priv->RxDescArray, 0x0, NUM_RX_DESC*sizeof(struct RxDesc));
+
+
+	for (i=0 ; i<NUM_TX_DESC ; i++){
+		priv->Tx_skbuff[i]=NULL;
+		priv->txdesc_array_dma_addr[i] = dma_map_single(&pdev->dev, &priv->TxDescArray[i], sizeof(struct TxDesc), DMA_TO_DEVICE);
+	}
+
+	for (i=0; i <NUM_RX_DESC; i++) {
+		if(i==(NUM_RX_DESC-1)){
+			priv->RxDescArray[i].status = cpu_to_le32((OWNbit | EORbit) | (unsigned long)priv->hw_rx_pkt_len);
+		}
+		else{
+			priv->RxDescArray[i].status = cpu_to_le32(OWNbit | (unsigned long)priv->hw_rx_pkt_len);
+		}
+
+		{//-----------------------------------------------------------------------
+			skb = priv->Rx_skbuff[i];
+			priv->rx_skbuff_dma_addr[i] = dma_map_single(&pdev->dev, skb->data, priv->rx_buf_size /* MAX_RX_SKBDATA_SIZE */, DMA_FROM_DEVICE);	/*** <kk> ***/
+
+			if( skb != NULL ){
+				priv->RxDescArray[i].buf_addr = cpu_to_le32(priv->rx_skbuff_dma_addr[i]);
+				priv->RxDescArray[i].buf_Haddr = 0;
+			}
+			else{
+				DBG_PRINT("%s: %s() Rx_skbuff == NULL\n", rtdev->name, __FUNCTION__);
+				priv->drvinit_fail = 1;
+			}
+		}//-----------------------------------------------------------------------
+		priv->rxdesc_array_dma_addr[i] = dma_map_single(&pdev->dev, &priv->RxDescArray[i], sizeof(struct RxDesc), DMA_TO_DEVICE);
+		dma_sync_single_for_device(&pdev->dev, priv->rxdesc_array_dma_addr[i], sizeof(struct RxDesc), DMA_TO_DEVICE);
+	}
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_tx_clear (struct rtl8169_private *priv)
+{
+	int i;
+
+	priv->cur_tx = 0;
+	for ( i = 0 ; i < NUM_TX_DESC ; i++ ){
+		if ( priv->Tx_skbuff[i] != NULL ) {
+			dev_kfree_rtskb ( priv->Tx_skbuff[i] );
+			priv->Tx_skbuff[i] = NULL;
+			priv->stats.tx_dropped++;
+		}
+	}
+}
+
+
+
+
+
+
+
+//======================================================================================================
+
+
+
+
+
+
+//======================================================================================================
+static int rtl8169_start_xmit (struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+	struct pci_dev *pdev = priv->pci_dev;
+	int entry = priv->cur_tx % NUM_TX_DESC;
+	// int buf_len = 60;
+	dma_addr_t txbuf_dma_addr;
+	rtdm_lockctx_t context;	/*** RTnet ***/
+	u32 status, len;		/* <kk> */
+
+	rtdm_lock_get_irqsave(&priv->lock, context);	/*** RTnet ***/
+
+	status = le32_to_cpu(priv->TxDescArray[entry].status);
+
+	if( (status & OWNbit)==0 ){
+
+		priv->Tx_skbuff[entry] = skb;
+
+		len = skb->len;
+		if (len < ETH_ZLEN) {
+			skb = rtskb_padto(skb, ETH_ZLEN);
+			if (skb == NULL) {
+				/* Error... */
+				rtdm_printk("%s: Error -- rtskb_padto returned NULL; out of memory?\n", rtdev->name);
+			}
+			len = ETH_ZLEN;
+		}
+
+		txbuf_dma_addr = dma_map_single(&pdev->dev, skb->data, len, DMA_TO_DEVICE);
+
+		priv->TxDescArray[entry].buf_addr = cpu_to_le32(txbuf_dma_addr);
+
+		/* <kk> print TX frame debug informations? */
+		while (r8169_debug & (DEBUG_TX_SYNC | DEBUG_TX_OTHER)) {
+			unsigned short proto = 0;
+
+			/* get ethernet protocol id */
+			if (skb->len < 14) break;	/* packet too small! */
+			if (skb->len > 12) proto = be16_to_cpu(*((unsigned short *)(skb->data + 12)));
+
+			if (proto == 0x9021 && !(r8169_debug & DEBUG_TX_SYNC)) {
+				/* don't show TDMA Sync frames for better debugging, so look at RTmac frame type... */
+				unsigned short type;
+
+				if (skb->len < 16) break;	/* packet too small! */
+				type = be16_to_cpu(*((unsigned short *)(skb->data + 14)));
+
+				if (type == 0x0001) {
+					/* TDMA-Frame; get Message ID */
+					unsigned short tdma_version;
+
+					if (skb->len < 20) break;	/* packet too small! */
+					tdma_version = be16_to_cpu(*((unsigned short *)(skb->data + 18)));
+
+					if (tdma_version == 0x0201) {
+						unsigned short tdma_id;
+
+						if (skb->len < 22) break;	/* packet too small! */
+						tdma_id = be16_to_cpu(*((unsigned short *)(skb->data + 20)));
+
+						if (tdma_id == 0x0000 && !(r8169_debug & DEBUG_TX_SYNC)) {
+							/* TDMA sync frame found, but not allowed to print it */
+							break;
+						}
+					}
+				}
+
+			}
+
+			/* print frame informations */
+			RT_DBG_PRINT("%s: TX len = %d, skb->len = %d, eth_proto=%04x\n", __FUNCTION__, len, skb->len, proto);
+
+			break;	/* leave loop */
+		}
+
+		if( len > priv->tx_pkt_len ){
+			rtdm_printk("%s: Error -- Tx packet size(%d) > mtu(%d)+14\n", rtdev->name, len, rtdev->mtu);
+			len = priv->tx_pkt_len;
+		}
+
+		/*** RTnet ***/
+		/* get and patch time stamp just before the transmission */
+		if (skb->xmit_stamp)
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+		/*** /RTnet ***/
+
+		if( entry != (NUM_TX_DESC-1) ){
+			status = (OWNbit | FSbit | LSbit) | len;
+		}
+		else{
+			status = (OWNbit | EORbit | FSbit | LSbit) | len;
+		}
+		priv->TxDescArray[entry].status = cpu_to_le32(status);
+
+		dma_sync_single_for_device(&pdev->dev, priv->txdesc_array_dma_addr[entry], sizeof(struct TxDesc), DMA_TO_DEVICE);
+
+		RTL_W8 ( TxPoll, 0x40);		//set polling bit
+
+		//rtdev->trans_start = jiffies;
+
+		priv->stats.tx_bytes += len;
+		priv->cur_tx++;
+	}//end of if( (priv->TxDescArray[entry].status & 0x80000000)==0 )
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);	/*** RTnet ***/
+
+	if ( (priv->cur_tx - NUM_TX_DESC) == priv->dirty_tx ){
+		if (r8169_debug & DEBUG_RUN) rtdm_printk(KERN_DEBUG "%s: stopping rtnetif queue", __FUNCTION__);
+		rtnetif_stop_queue (rtdev);
+	}
+	else{
+		if (rtnetif_queue_stopped (rtdev)){
+			if (r8169_debug & DEBUG_RUN) rtdm_printk(KERN_DEBUG "%s: waking rtnetif queue", __FUNCTION__);
+			rtnetif_wake_queue (rtdev);
+		}
+	}
+
+	return 0;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+/* This routine is logically part of the interrupt handler, but isolated
+   for clarity. */
+static void rtl8169_tx_interrupt (struct rtnet_device *rtdev, struct rtl8169_private *priv, unsigned long ioaddr)
+{
+	unsigned long dirty_tx, tx_left=0;
+	//int entry = priv->cur_tx % NUM_TX_DESC;	/* <kk> */
+	int txloop_cnt = 0;
+
+	rt_assert (rtdev != NULL);
+	rt_assert (priv != NULL);
+	rt_assert (ioaddr != 0);
+
+	rtdm_lock_get(&priv->lock); /*** RTnet ***/
+
+	dirty_tx = priv->dirty_tx;
+	smp_rmb();	/*** <kk> ***/
+	tx_left = priv->cur_tx - dirty_tx;
+
+	while( (tx_left > 0) && (txloop_cnt < max_interrupt_work) ){
+		unsigned int entry = dirty_tx % NUM_TX_DESC;	/* <kk> */
+		if( (le32_to_cpu(priv->TxDescArray[entry].status) & OWNbit) == 0 ){
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+			r8169_callback_tx(&(priv->rt), 1, priv->Tx_skbuff[dirty_tx % NUM_TX_DESC]->len);
+#endif //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+			if (priv->txdesc_array_dma_addr[entry])
+				dma_unmap_single(&priv->pci_dev->dev, priv->txdesc_array_dma_addr[entry], priv->Tx_skbuff[entry]->len, DMA_TO_DEVICE);	/*** ##KK## ***/
+			dev_kfree_rtskb( priv->Tx_skbuff[entry] );	/*** RTnet; previously: dev_kfree_skb_irq() - luckily we're within an IRQ ***/
+			priv->Tx_skbuff[entry] = NULL;
+			priv->stats.tx_packets++;
+			dirty_tx++;
+			tx_left--;
+			entry++;
+		}
+		txloop_cnt ++;
+	}
+
+	if (priv->dirty_tx != dirty_tx) {
+		priv->dirty_tx = dirty_tx;
+		smp_wmb();	/*** <kk> ***/
+		if (rtnetif_queue_stopped (rtdev))
+			rtnetif_wake_queue (rtdev);
+	}
+
+	rtdm_lock_put(&priv->lock); /*** RTnet ***/
+
+}
+
+
+
+
+
+
+//======================================================================================================
+/* This routine is logically part of the interrupt handler, but isolated
+   for clarity. */
+static void rtl8169_rx_interrupt (struct rtnet_device *rtdev, struct rtl8169_private *priv, unsigned long ioaddr, nanosecs_abs_t *time_stamp)
+{
+	struct pci_dev *pdev = priv->pci_dev;
+	int cur_rx;
+	int pkt_size = 0 ;
+	int rxdesc_cnt = 0;
+	/* int ret; */	/*** RTnet ***/
+	struct rtskb *n_skb = NULL;
+	struct rtskb *cur_skb;
+	struct rtskb *rx_skb;
+	struct	RxDesc	*rxdesc;
+
+	rt_assert (rtdev != NULL);
+	rt_assert (priv != NULL);
+	rt_assert (ioaddr != 0);
+
+
+	cur_rx = priv->cur_rx;
+
+	rxdesc = &priv->RxDescArray[cur_rx];
+	dma_sync_single_for_cpu(&pdev->dev, priv->rxdesc_array_dma_addr[cur_rx], sizeof(struct RxDesc), DMA_FROM_DEVICE);
+
+	while ( ((le32_to_cpu(rxdesc->status) & OWNbit)== 0) && (rxdesc_cnt < max_interrupt_work) ){
+
+	    rxdesc_cnt++;
+
+	    if( le32_to_cpu(rxdesc->status) & RxRES ){
+			rtdm_printk(KERN_INFO "%s: Rx ERROR!!!\n", rtdev->name);
+			priv->stats.rx_errors++;
+			if ( le32_to_cpu(rxdesc->status) & (RxRWT|RxRUNT) )
+				priv->stats.rx_length_errors++;
+			if ( le32_to_cpu(rxdesc->status) & RxCRC) {
+				/* in the rt_via-rhine.c there's a lock around the incrementation... we'll do that also here <kk> */
+				rtdm_lock_get(&priv->lock); /*** RTnet ***/
+				priv->stats.rx_crc_errors++;
+				rtdm_lock_put(&priv->lock); /*** RTnet ***/
+			}
+	    }
+	    else{
+			pkt_size=(int)(le32_to_cpu(rxdesc->status) & 0x00001FFF)-4;
+
+			if( pkt_size > priv->rx_pkt_len ){
+				rtdm_printk("%s: Error -- Rx packet size(%d) > mtu(%d)+14\n", rtdev->name, pkt_size, rtdev->mtu);
+				pkt_size = priv->rx_pkt_len;
+			}
+
+			{// -----------------------------------------------------
+				rx_skb = priv->Rx_skbuff[cur_rx];
+				// n_skb = RTL8169_ALLOC_RXSKB(MAX_RX_SKBDATA_SIZE);	/*** <kk> ***/
+				n_skb = rtnetdev_alloc_rtskb(rtdev, priv->rx_buf_size);	/*** RTnet ***/
+				if( n_skb != NULL ) {
+					rtskb_reserve (n_skb, 2);	// 16 byte align the IP fields. //
+
+					// Indicate rx_skb
+					if( rx_skb != NULL ){
+						dma_sync_single_for_cpu(&pdev->dev, priv->rx_skbuff_dma_addr[cur_rx], sizeof(struct RxDesc), DMA_FROM_DEVICE);
+
+						rtskb_put ( rx_skb, pkt_size );
+						rx_skb->protocol = rt_eth_type_trans ( rx_skb, rtdev );
+						rx_skb->time_stamp = *time_stamp;	/*** RTnet ***/
+						//ret = RTL8169_NETIF_RX (rx_skb);
+						rtnetif_rx(rx_skb);	/*** RTnet ***/
+
+//						dev->last_rx = jiffies;
+						priv->stats.rx_bytes += pkt_size;
+						priv->stats.rx_packets++;
+
+#ifdef RTL8169_DYNAMIC_CONTROL
+						r8169_callback_rx( &(priv->rt), 1, pkt_size);
+#endif //end #ifdef RTL8169_DYNAMIC_CONTROL
+
+					}//end if( rx_skb != NULL )
+
+					priv->Rx_skbuff[cur_rx] = n_skb;
+				}
+				else{
+					RT_DBG_PRINT("%s: Allocate n_skb failed! (priv->rx_buf_size = %d)\n",__FUNCTION__, priv->rx_buf_size );
+					priv->Rx_skbuff[cur_rx] = rx_skb;
+				}
+
+
+				// Update rx descriptor
+				if( cur_rx == (NUM_RX_DESC-1) ){
+					priv->RxDescArray[cur_rx].status  = cpu_to_le32((OWNbit | EORbit) | (unsigned long)priv->hw_rx_pkt_len);
+				}
+				else{
+					priv->RxDescArray[cur_rx].status  = cpu_to_le32(OWNbit | (unsigned long)priv->hw_rx_pkt_len);
+				}
+
+				cur_skb = priv->Rx_skbuff[cur_rx];
+
+				if( cur_skb != NULL ){
+					priv->rx_skbuff_dma_addr[cur_rx] = dma_map_single(&pdev->dev, cur_skb->data, priv->rx_buf_size /* <kk> MAX_RX_SKBDATA_SIZE */, DMA_FROM_DEVICE);
+					rxdesc->buf_addr = cpu_to_le32(priv->rx_skbuff_dma_addr[cur_rx]);
+				}
+				else{
+					RT_DBG_PRINT("%s: %s() cur_skb == NULL\n", rtdev->name, __FUNCTION__);
+				}
+
+			}//------------------------------------------------------------
+
+	    }// end of if( priv->RxDescArray[cur_rx].status & RxRES )
+
+	    cur_rx = (cur_rx +1) % NUM_RX_DESC;
+	    rxdesc = &priv->RxDescArray[cur_rx];
+	    dma_sync_single_for_cpu(&pdev->dev, priv->rxdesc_array_dma_addr[cur_rx], sizeof(struct RxDesc), DMA_FROM_DEVICE);
+
+	}// end of while ( (priv->RxDescArray[cur_rx].status & 0x80000000)== 0)
+
+	if( rxdesc_cnt >= max_interrupt_work ){
+		RT_DBG_PRINT("%s: Too much work at Rx interrupt.\n", rtdev->name);
+	}
+
+	priv->cur_rx = cur_rx;
+}
+
+
+
+
+
+
+
+
+//======================================================================================================
+/* The interrupt handler does all of the Rx thread work and cleans up after the Tx thread. */
+static int rtl8169_interrupt(rtdm_irq_t *irq_handle)
+{
+	/* struct net_device *dev = (struct net_device *) dev_instance; */	/*** RTnet ***/
+	struct rtnet_device *rtdev = rtdm_irq_get_arg(irq_handle, struct rtnet_device); /*** RTnet ***/
+	struct rtl8169_private *priv = rtdev->priv;
+	int boguscnt = max_interrupt_work;
+	unsigned long ioaddr = priv->ioaddr;
+	int status = 0;
+	unsigned int old_packet_cnt = priv->stats.rx_packets; /*** RTnet ***/
+	nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/
+
+	int interrupt_handled = RTDM_IRQ_NONE; /*** <kk> ***/
+
+	do {
+		status = RTL_R16(IntrStatus);	/* read interrupt status */
+
+		if ((status == 0xFFFF) || (!status)) {
+			break;						/* hotplug/major error/no more work/shared irq */
+		}
+
+
+		interrupt_handled = RTDM_IRQ_HANDLED;
+
+/*		if (unlikely(!rtnetif_running(rtdev))) {
+			rtl8169_asic_down(ioaddr);
+			goto out;
+		}
+*/
+
+		/* Acknowledge interrupts */
+		RTL_W16(IntrStatus, 0xffff);
+
+		if (!(status & rtl8169_intr_mask)) {
+			break;
+		}
+
+		if (unlikely(status & SYSErr)) {
+			RT_DBG_PRINT("PCI error...!? %i\n", __LINE__);
+			rtl8169_pcierr_interrupt(rtdev);
+			break;
+		}
+
+		/*** RTnet / <kk> (Linux-2.6.12-Backport) ***/
+		if (unlikely(status & LinkChg)) {
+			rtdm_lock_get(&priv->lock);
+			if (RTL_R8(PHYstatus) & LinkStatus)	/*** <kk> only supporting XMII, not yet TBI ***/
+				rtnetif_carrier_on(rtdev);
+			else
+				rtnetif_carrier_off(rtdev);
+			rtdm_lock_put(&priv->lock);
+		}
+
+		// Rx interrupt
+		if (status & (RxOK | RxOverflow | RxFIFOOver)) {
+			rtl8169_rx_interrupt (rtdev, priv, ioaddr, &time_stamp);
+		}
+
+		// Tx interrupt
+		if (status & (TxOK | TxErr)) {
+			rtl8169_tx_interrupt (rtdev, priv, ioaddr);
+		}
+
+		boguscnt--;
+	} while (boguscnt > 0);
+
+	if (boguscnt <= 0) {
+		rtdm_printk(KERN_WARNING "%s: Too much work at interrupt!\n", rtdev->name);
+		RTL_W16( IntrStatus, 0xffff);	/* Clear all interrupt sources */
+	}
+
+//out:
+
+	if (old_packet_cnt != priv->stats.rx_packets)
+		rt_mark_stack_mgr(rtdev);
+	return interrupt_handled;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static int rtl8169_close (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+	int i;
+	rtdm_lockctx_t context;	/*** RTnet, for rtdm_lock_get_irqsave ***/
+
+	// -----------------------------------------
+	/* rtl8169_delete_timer( &(priv->r8169_timer) ); */	/*** RTnet ***/
+
+
+	rtdm_lock_get_irqsave (&priv->lock, context);	/*** RTnet ***/
+
+	rtnetif_stop_queue (rtdev);		/*** RTnet / <kk>: moved behind spin_lock! ***/
+
+	/* Stop the chip's Tx and Rx processes. */
+	RTL_W8 ( ChipCmd, 0x00);
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	RTL_W16 ( IntrMask, 0x0000);
+
+	/* Update the error counts. */
+	priv->stats.rx_missed_errors += RTL_R32(RxMissed);
+	RTL_W32( RxMissed, 0);
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);	/*** RTnet ***/
+
+	/*** RTnet ***/
+	if ( (i=rtdm_irq_free(&priv->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(rtdev);
+	/*** /RTnet ***/
+
+	rtl8169_tx_clear (priv);
+
+	//2004-05-11
+	if(priv->txdesc_space != NULL){
+		dma_free_coherent(
+				&priv->pci_dev->dev,
+				priv->sizeof_txdesc_space,
+				priv->txdesc_space,
+				priv->txdesc_phy_dma_addr
+		);
+		priv->txdesc_space = NULL;
+	}
+
+	if(priv->rxdesc_space != NULL){
+		dma_free_coherent(
+				&priv->pci_dev->dev,
+				priv->sizeof_rxdesc_space,
+				priv->rxdesc_space,
+				priv->rxdesc_phy_dma_addr
+		);
+		priv->rxdesc_space = NULL;
+	}
+
+	priv->TxDescArray = NULL;
+	priv->RxDescArray = NULL;
+
+	{//-----------------------------------------------------------------------------
+		for(i=0;i<NUM_RX_DESC;i++){
+			if( priv->Rx_skbuff[i] != NULL ) {
+				//RTL8169_FREE_RXSKB ( priv->Rx_skbuff[i] );	/*** <kk> ***/
+				dev_kfree_rtskb(priv->Rx_skbuff[i]);	/*** RTnet ***/
+			}
+		}
+	}//-----------------------------------------------------------------------------
+
+	//DBG_PRINT("%s: %s() alloc_rxskb_cnt = %d\n", dev->name, __FUNCTION__, alloc_rxskb_cnt );	/*** <kk> won't work anymore ***/
+
+	return 0;
+}
+
+
+
+
+
+
+
+//======================================================================================================
+static void rtl8169_set_rx_mode (struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+	rtdm_lockctx_t context;
+	u32 mc_filter[2];	/* Multicast hash filter */
+	int rx_mode;
+	u32 tmp=0;
+
+
+	if (rtdev->flags & IFF_PROMISC) {
+		/* Unconditionally log net taps. */
+		printk (KERN_NOTICE "%s: Promiscuous mode enabled.\n", rtdev->name);
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys | AcceptAllPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else if (rtdev->flags & IFF_ALLMULTI) {
+		/* Too many to filter perfectly -- accept all multicasts. */
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0xffffffff;
+	} else {
+		rx_mode = AcceptBroadcast | AcceptMulticast | AcceptMyPhys;
+		mc_filter[1] = mc_filter[0] = 0;
+	}
+
+	rtdm_lock_get_irqsave(&priv->lock, context);			/*** RTnet ***/
+
+	tmp = rtl8169_rx_config | rx_mode | (RTL_R32(RxConfig) & rtl_chip_info[priv->chipset].RxConfigMask);
+
+	RTL_W32 ( RxConfig, tmp);
+	RTL_W32 ( MAR0 + 0, mc_filter[0]);
+	RTL_W32 ( MAR0 + 4, mc_filter[1]);
+
+	rtdm_lock_put_irqrestore(&priv->lock, context);	/*** RTnet ***/
+
+}//end of rtl8169_set_rx_mode (struct net_device *dev)
+
+
+
+
+
+
+
+//================================================================================
+static struct net_device_stats *rtl8169_get_stats(struct rtnet_device *rtdev)
+
+{
+    struct rtl8169_private *priv = rtdev->priv;
+
+    return &priv->stats;
+}
+
+
+
+
+
+
+
+//================================================================================
+static struct pci_driver rtl8169_pci_driver = {
+	name:		MODULENAME,
+	id_table:	rtl8169_pci_tbl,
+	probe:		rtl8169_init_one,
+	remove:		rtl8169_remove_one,
+	suspend:	NULL,
+	resume:		NULL,
+};
+
+
+
+
+
+//======================================================================================================
+static int __init rtl8169_init_module (void)
+{
+	/* <kk> Enable debugging output... */
+	if (local_debug > 0) {
+		r8169_debug = local_debug;
+	}
+	if (r8169_debug & DEBUG_RUN) printk("Initializing " MODULENAME " driver");
+	return pci_register_driver (&rtl8169_pci_driver);
+}
+
+
+
+
+//======================================================================================================
+static void __exit rtl8169_cleanup_module (void)
+{
+	pci_unregister_driver (&rtl8169_pci_driver);
+}
+
+
+#ifdef RTL8169_JUMBO_FRAME_SUPPORT
+static int rtl8169_change_mtu(struct net_device *dev, int new_mtu)
+{
+	struct rtl8169_private *priv = dev->priv;
+	unsigned long ioaddr = priv->ioaddr;
+
+	if( new_mtu > MAX_JUMBO_FRAME_MTU ){
+		printk("%s: Error -- new_mtu(%d) > MAX_JUMBO_FRAME_MTU(%d).\n", dev->name, new_mtu, MAX_JUMBO_FRAME_MTU);
+		return -1;
+	}
+
+	dev->mtu = new_mtu;
+
+	priv->curr_mtu_size = new_mtu;
+	priv->tx_pkt_len = new_mtu + ETH_HDR_LEN;
+	priv->rx_pkt_len = new_mtu + ETH_HDR_LEN;
+	priv->hw_rx_pkt_len = priv->rx_pkt_len + 8;
+
+	RTL_W8 ( Cfg9346, Cfg9346_Unlock);
+	RTL_W16	( RxMaxSize, (unsigned short)priv->hw_rx_pkt_len );
+	RTL_W8 ( Cfg9346, Cfg9346_Lock);
+
+	DBG_PRINT("-------------------------- \n");
+	DBG_PRINT("dev->mtu = %d \n", dev->mtu);
+	DBG_PRINT("priv->curr_mtu_size = %d \n", priv->curr_mtu_size);
+	DBG_PRINT("priv->rx_pkt_len = %d \n", priv->rx_pkt_len);
+	DBG_PRINT("priv->tx_pkt_len = %d \n", priv->tx_pkt_len);
+	DBG_PRINT("RTL_W16( RxMaxSize, %d )\n", priv->hw_rx_pkt_len);
+	DBG_PRINT("-------------------------- \n");
+
+	rtl8169_close (dev);
+	rtl8169_open (dev);
+
+	return 0;
+}
+#endif //end #ifdef RTL8169_JUMBO_FRAME_SUPPORT
+
+
+
+/*** <kk> these functions are backported from Linux-2.6.12's r8169.c driver ***/
+static void rtl8169_irq_mask_and_ack(unsigned long ioaddr)
+{
+	RTL_W16(IntrMask, 0x0000);
+
+	RTL_W16(IntrStatus, 0xffff);
+}
+
+static void rtl8169_pcierr_interrupt(struct rtnet_device *rtdev)
+{
+	struct rtl8169_private *priv = rtdev->priv;
+	struct pci_dev *pdev = priv->pci_dev;
+	unsigned long ioaddr = priv->ioaddr;
+	u16 pci_status, pci_cmd;
+
+	pci_read_config_word(pdev, PCI_COMMAND, &pci_cmd);
+	pci_read_config_word(pdev, PCI_STATUS, &pci_status);
+
+	rtdm_printk(KERN_ERR PFX "%s: PCI error (cmd = 0x%04x, status = 0x%04x).\n",
+	       rtdev->name, pci_cmd, pci_status);
+
+	/*
+	 * The recovery sequence below admits a very elaborated explanation:
+	 * - it seems to work;
+	 * - I did not see what else could be done.
+	 *
+	 * Feel free to adjust to your needs.
+	 */
+	pci_write_config_word(pdev, PCI_COMMAND,
+			      pci_cmd | PCI_COMMAND_SERR | PCI_COMMAND_PARITY);
+
+	pci_write_config_word(pdev, PCI_STATUS,
+		pci_status & (PCI_STATUS_DETECTED_PARITY |
+		PCI_STATUS_SIG_SYSTEM_ERROR | PCI_STATUS_REC_MASTER_ABORT |
+		PCI_STATUS_REC_TARGET_ABORT | PCI_STATUS_SIG_TARGET_ABORT));
+
+	/* The infamous DAC f*ckup only happens at boot time */
+	/*** <kk> ***
+	if ((priv->cp_cmd & PCIDAC) && !priv->dirty_rx && !priv->cur_rx) {
+		rtdm_printk(KERN_INFO PFX "%s: disabling PCI DAC.\n", rtdev->name);
+		priv->cp_cmd &= ~PCIDAC;
+		RTL_W16(CPlusCmd, priv->cp_cmd);
+		rtdev->features &= ~NETIF_F_HIGHDMA;
+		rtl8169_schedule_work(rtdev, rtl8169_reinit_task);
+	}
+	 *** /RTnet ***/
+
+	/* Disable interrupts */
+	rtl8169_irq_mask_and_ack(ioaddr);
+
+	/* Reset the chipset */
+	RTL_W8(ChipCmd, CmdReset);
+
+	/* PCI commit */
+	RTL_R8(ChipCmd);
+
+}
+
+
+
+
+
+
+//======================================================================================================
+module_init(rtl8169_init_module);
+module_exit(rtl8169_cleanup_module);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_at91_ether.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_at91_ether.h
new file mode 100644
index 0000000..445ddc3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_at91_ether.h
@@ -0,0 +1,109 @@
+/*
+ * Ethernet driver for the Atmel AT91RM9200 (Thunder)
+ *
+ *  Copyright (C) SAN People (Pty) Ltd
+ *
+ * Based on an earlier Atmel EMAC macrocell driver by Atmel and Lineo Inc.
+ * Initial version by Rick Bronson.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef AT91_ETHERNET
+#define AT91_ETHERNET
+
+#include <rtdm/driver.h>
+#include <rtskb.h>
+
+/* Davicom 9161 PHY */
+#define MII_DM9161_ID	0x0181b880
+#define MII_DM9161A_ID	0x0181b8a0
+
+/* Davicom specific registers */
+#define MII_DSCR_REG	16
+#define MII_DSCSR_REG	17
+#define MII_DSINTR_REG	21
+
+/* Intel LXT971A PHY */
+#define MII_LXT971A_ID	0x001378E0
+
+/* Intel specific registers */
+#define MII_ISINTE_REG	18
+#define MII_ISINTS_REG	19
+#define MII_LEDCTRL_REG	20
+
+/* Realtek RTL8201 PHY */
+#define MII_RTL8201_ID	0x00008200
+
+/* Broadcom BCM5221 PHY */
+#define MII_BCM5221_ID	0x004061e0
+
+/* Broadcom specific registers */
+#define MII_BCMINTR_REG	26
+
+/* National Semiconductor DP83847 */
+#define MII_DP83847_ID	0x20005c30
+
+/* Altima AC101L PHY */
+#define MII_AC101L_ID	0x00225520
+
+/* Micrel KS8721 PHY */
+#define MII_KS8721_ID	0x00221610
+
+/* ........................................................................ */
+
+#define MAX_RBUFF_SZ	0x600		/* 1518 rounded up */
+#define MAX_RX_DESCR	9		/* max number of receive buffers */
+
+#define EMAC_DESC_DONE	0x00000001	/* bit for if DMA is done */
+#define EMAC_DESC_WRAP	0x00000002	/* bit for wrap */
+
+#define EMAC_BROADCAST	0x80000000	/* broadcast address */
+#define EMAC_MULTICAST	0x40000000	/* multicast address */
+#define EMAC_UNICAST	0x20000000	/* unicast address */
+
+struct rbf_t
+{
+	unsigned int addr;
+	unsigned long size;
+};
+
+struct recv_desc_bufs
+{
+	struct rbf_t descriptors[MAX_RX_DESCR];		/* must be on sizeof (rbf_t) boundary */
+	char recv_buf[MAX_RX_DESCR][MAX_RBUFF_SZ];	/* must be on long boundary */
+};
+
+struct at91_private
+{
+	struct net_device_stats stats;
+	struct mii_if_info mii;			/* ethtool support */
+	struct at91_eth_data board_data;	/* board-specific configuration */
+	struct clk *ether_clk;			/* clock */
+
+	/* PHY */
+	unsigned long phy_type;			/* type of PHY (PHY_ID) */
+	rtdm_lock_t lock;			/* lock for MDI interface */
+	short phy_media;			/* media interface type */
+	unsigned short phy_address;		/* 5-bit MDI address of PHY (0..31) */
+	struct timer_list check_timer;		/* Poll link status */
+
+	/* Transmit */
+	struct rtskb *skb;			/* holds skb until xmit interrupt completes */
+	dma_addr_t skb_physaddr;		/* phys addr from pci_map_single */
+	int skb_length;				/* saved skb length for pci_unmap_single */
+
+	/* Receive */
+	int rxBuffIndex;			/* index into receive descriptor list */
+	struct recv_desc_bufs *dlist;		/* descriptor list address */
+	struct recv_desc_bufs *dlist_phys;	/* descriptor list physical address */
+
+	/* RT Net */
+	rtdm_irq_t irq_handle;
+	rtdm_irq_t phy_irq_handle;
+};
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_eth1394.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_eth1394.h
new file mode 100644
index 0000000..f9cddff
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_eth1394.h
@@ -0,0 +1,240 @@
+/*
+ * eth1394.h -- Driver for Ethernet emulation over FireWire, (adapted from Linux1394)
+ *		working under RTnet.
+ *
+ * Copyright (C) 2005	Zhang Yuchen <yuchen623@gmail.com>
+ *
+ * Mainly based on work by Emanuel Pirker and Andreas E. Bombe
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __ETH1394_H
+#define __ETH1394_H
+
+#include <ieee1394.h>
+#include <rtskb.h>
+#include <linux/netdevice.h>
+#include <rtnet_port.h>
+
+
+/* Register for incoming packets. This is 4096 bytes, which supports up to
+ * S3200 (per Table 16-3 of IEEE 1394b-2002). */
+#define ETHER1394_REGION_ADDR_LEN	4096
+#define ETHER1394_REGION_ADDR		0xfffff0200000ULL
+#define ETHER1394_REGION_ADDR_END	(ETHER1394_REGION_ADDR + ETHER1394_REGION_ADDR_LEN)
+
+/* GASP identifier numbers for IPv4 over IEEE 1394 */
+#define ETHER1394_GASP_SPECIFIER_ID	0x00005E
+#define ETHER1394_GASP_SPECIFIER_ID_HI	((ETHER1394_GASP_SPECIFIER_ID >> 8) & 0xffff)
+#define ETHER1394_GASP_SPECIFIER_ID_LO	(ETHER1394_GASP_SPECIFIER_ID & 0xff)
+#define ETHER1394_GASP_VERSION		1
+
+#define ETHER1394_GASP_OVERHEAD (2 * sizeof(quadlet_t))  /* GASP header overhead */
+
+#define ETHER1394_GASP_BUFFERS 16
+
+#define ETH1394_BC_CHANNEL 31
+
+#define ALL_NODES	0x003f //stolen from ieee1394_types.h
+/* Node set == 64 */
+#define NODE_SET			(ALL_NODES + 1)
+
+enum eth1394_bc_states { ETHER1394_BC_CLOSED, ETHER1394_BC_OPENED,
+			 ETHER1394_BC_CHECK, ETHER1394_BC_ERROR,
+			 ETHER1394_BC_RUNNING,
+			 ETHER1394_BC_STOPPED  };
+
+#define TX_RING_SIZE	32
+#define RX_RING_SIZE	8 /* RX_RING_SIZE*2 rtskbs will be preallocated */
+
+struct pdg_list {
+	struct list_head list;		/* partial datagram list per node */
+	unsigned int sz;		/* partial datagram list size per node	*/
+	rtdm_lock_t lock;		/* partial datagram lock		*/
+};
+
+/* IP1394 headers */
+#include <asm/byteorder.h>
+
+/* Unfragmented */
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_uf_hdr {
+	u16 lf:2;
+	u16 res:14;
+	u16 ether_type;		/* Ethernet packet type */
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_uf_hdr {
+	u16 res:14;
+	u16 lf:2;
+	u16 ether_type;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+/* End of IP1394 headers */
+
+/* Fragment types */
+#define ETH1394_HDR_LF_UF	0	/* unfragmented		*/
+#define ETH1394_HDR_LF_FF	1	/* first fragment	*/
+#define ETH1394_HDR_LF_LF	2	/* last fragment	*/
+#define ETH1394_HDR_LF_IF	3	/* interior fragment	*/
+
+#define IP1394_HW_ADDR_LEN	2	/* In RFC, the value is 16; here use the value for modified spec		*/
+
+/* Our arp packet (ARPHRD_IEEE1394) */
+struct eth1394_arp {
+	u16 hw_type;		/* 0x0018	*/
+	u16 proto_type;		/* 0x0080	*/
+	u8 hw_addr_len;		/* 2		*/
+	u8 ip_addr_len;		/* 4		*/
+	u16 opcode;		/* ARP Opcode: 1 for req, 2 for resp	*/
+	/* Above is exactly the same format as struct arphdr */
+
+	unsigned char s_uniq_id[ETH_ALEN];	/* Sender's node id padded with zeros	*/
+	u8 max_rec;		/* Sender's max packet size		*/
+	u8 sspd;		/* Sender's max speed			*/
+	u32 sip;		/* Sender's IP Address			*/
+	u32 tip;		/* IP Address of requested hw addr	*/
+};
+
+
+/* Network timeout */
+#define ETHER1394_TIMEOUT	100000
+
+/* First fragment */
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_ff_hdr {
+	u16 lf:2;
+	u16 res1:2;
+	u16 dg_size:12;		/* Datagram size */
+	u16 ether_type;		/* Ethernet packet type */
+	u16 dgl;		/* Datagram label */
+	u16 res2;
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_ff_hdr {
+	u16 dg_size:12;
+	u16 res1:2;
+	u16 lf:2;
+	u16 ether_type;
+	u16 dgl;
+	u16 res2;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+/* XXX: Subsequent fragments, including last */
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_sf_hdr {
+	u16 lf:2;
+	u16 res1:2;
+	u16 dg_size:12;		/* Datagram size */
+	u16 res2:4;
+	u16 fg_off:12;		/* Fragment offset */
+	u16 dgl;		/* Datagram label */
+	u16 res3;
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_sf_hdr {
+	u16 dg_size:12;
+	u16 res1:2;
+	u16 lf:2;
+	u16 fg_off:12;
+	u16 res2:4;
+	u16 dgl;
+	u16 res3;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+#if defined __BIG_ENDIAN_BITFIELD
+struct eth1394_common_hdr {
+	u16 lf:2;
+	u16 pad1:14;
+} __attribute__((packed));
+#elif defined __LITTLE_ENDIAN_BITFIELD
+struct eth1394_common_hdr {
+	u16 pad1:14;
+	u16 lf:2;
+} __attribute__((packed));
+#else
+#error Unknown bit field type
+#endif
+
+struct eth1394_hdr_words {
+	u16 word1;
+	u16 word2;
+	u16 word3;
+	u16 word4;
+};
+
+union eth1394_hdr {
+	struct eth1394_common_hdr common;
+	struct eth1394_uf_hdr uf;
+	struct eth1394_ff_hdr ff;
+	struct eth1394_sf_hdr sf;
+	struct eth1394_hdr_words words;
+};
+
+typedef enum {ETH1394_GASP, ETH1394_WRREQ} eth1394_tx_type;
+
+/* This is our task struct. It's used for the packet complete callback.  */
+struct packet_task {
+	struct list_head lh;
+	struct rtskb *skb;
+	int outstanding_pkts;
+	eth1394_tx_type tx_type;
+	int max_payload;
+	struct hpsb_packet *packet;
+	struct eth1394_priv *priv;
+	union eth1394_hdr hdr;
+	u64 addr;
+	u16 dest_node;
+	unsigned int priority; //the priority mapped to priority on 1394 transaction
+};
+
+/* Private structure for our ethernet driver */
+struct eth1394_priv {
+	struct net_device_stats stats;	/* Device stats			 */
+	struct hpsb_host *host;		/* The card for this dev	 */
+	u16 maxpayload[NODE_SET];	/* Max payload per node		 */
+	unsigned char sspd[NODE_SET];	/* Max speed per node		 */
+	rtdm_lock_t lock;		/* Private lock			 */
+	int broadcast_channel;		/* Async stream Broadcast Channel */
+	enum eth1394_bc_states bc_state; /* broadcast channel state	 */
+	struct hpsb_iso	*iso;
+	struct pdg_list pdg[ALL_NODES]; /* partial RX datagram lists     */
+	int dgl[NODE_SET];              /* Outgoing datagram label per node */
+
+	/* The addresses of a Tx/Rx-in-place packets/buffers. */
+	struct rtskb *tx_skbuff[TX_RING_SIZE];
+	struct rtskb *rx_skbuff[RX_RING_SIZE];
+	struct packet_task ptask_list[20]; //the list of pre-allocated ptask structure
+};
+
+
+
+struct host_info {
+	struct hpsb_host *host;
+	struct rtnet_device *dev;
+};
+
+
+#endif /* __ETH1394_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_macb.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_macb.h
new file mode 100644
index 0000000..53b9607
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/rt_macb.h
@@ -0,0 +1,624 @@
+/*
+ * Atmel MACB Ethernet Controller driver
+ *
+ * Copyright (C) 2004-2006 Atmel Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#ifndef _MACB_H
+#define _MACB_H
+
+#define MACB_GREGS_NBR 16
+#define MACB_GREGS_VERSION 1
+
+/* MACB register offsets */
+#define MACB_NCR				0x0000
+#define MACB_NCFGR				0x0004
+#define MACB_NSR				0x0008
+#define MACB_TAR				0x000c /* AT91RM9200 only */
+#define MACB_TCR				0x0010 /* AT91RM9200 only */
+#define MACB_TSR				0x0014
+#define MACB_RBQP				0x0018
+#define MACB_TBQP				0x001c
+#define MACB_RSR				0x0020
+#define MACB_ISR				0x0024
+#define MACB_IER				0x0028
+#define MACB_IDR				0x002c
+#define MACB_IMR				0x0030
+#define MACB_MAN				0x0034
+#define MACB_PTR				0x0038
+#define MACB_PFR				0x003c
+#define MACB_FTO				0x0040
+#define MACB_SCF				0x0044
+#define MACB_MCF				0x0048
+#define MACB_FRO				0x004c
+#define MACB_FCSE				0x0050
+#define MACB_ALE				0x0054
+#define MACB_DTF				0x0058
+#define MACB_LCOL				0x005c
+#define MACB_EXCOL				0x0060
+#define MACB_TUND				0x0064
+#define MACB_CSE				0x0068
+#define MACB_RRE				0x006c
+#define MACB_ROVR				0x0070
+#define MACB_RSE				0x0074
+#define MACB_ELE				0x0078
+#define MACB_RJA				0x007c
+#define MACB_USF				0x0080
+#define MACB_STE				0x0084
+#define MACB_RLE				0x0088
+#define MACB_TPF				0x008c
+#define MACB_HRB				0x0090
+#define MACB_HRT				0x0094
+#define MACB_SA1B				0x0098
+#define MACB_SA1T				0x009c
+#define MACB_SA2B				0x00a0
+#define MACB_SA2T				0x00a4
+#define MACB_SA3B				0x00a8
+#define MACB_SA3T				0x00ac
+#define MACB_SA4B				0x00b0
+#define MACB_SA4T				0x00b4
+#define MACB_TID				0x00b8
+#define MACB_TPQ				0x00bc
+#define MACB_USRIO				0x00c0
+#define MACB_WOL				0x00c4
+#define MACB_MID				0x00fc
+
+/* GEM register offsets. */
+#define GEM_NCFGR				0x0004
+#define GEM_USRIO				0x000c
+#define GEM_DMACFG				0x0010
+#define GEM_HRB					0x0080
+#define GEM_HRT					0x0084
+#define GEM_SA1B				0x0088
+#define GEM_SA1T				0x008C
+#define GEM_SA2B				0x0090
+#define GEM_SA2T				0x0094
+#define GEM_SA3B				0x0098
+#define GEM_SA3T				0x009C
+#define GEM_SA4B				0x00A0
+#define GEM_SA4T				0x00A4
+#define GEM_OTX					0x0100
+#define GEM_DCFG1				0x0280
+#define GEM_DCFG2				0x0284
+#define GEM_DCFG3				0x0288
+#define GEM_DCFG4				0x028c
+#define GEM_DCFG5				0x0290
+#define GEM_DCFG6				0x0294
+#define GEM_DCFG7				0x0298
+
+/* Bitfields in NCR */
+#define MACB_LB_OFFSET				0
+#define MACB_LB_SIZE				1
+#define MACB_LLB_OFFSET				1
+#define MACB_LLB_SIZE				1
+#define MACB_RE_OFFSET				2
+#define MACB_RE_SIZE				1
+#define MACB_TE_OFFSET				3
+#define MACB_TE_SIZE				1
+#define MACB_MPE_OFFSET				4
+#define MACB_MPE_SIZE				1
+#define MACB_CLRSTAT_OFFSET			5
+#define MACB_CLRSTAT_SIZE			1
+#define MACB_INCSTAT_OFFSET			6
+#define MACB_INCSTAT_SIZE			1
+#define MACB_WESTAT_OFFSET			7
+#define MACB_WESTAT_SIZE			1
+#define MACB_BP_OFFSET				8
+#define MACB_BP_SIZE				1
+#define MACB_TSTART_OFFSET			9
+#define MACB_TSTART_SIZE			1
+#define MACB_THALT_OFFSET			10
+#define MACB_THALT_SIZE				1
+#define MACB_NCR_TPF_OFFSET			11
+#define MACB_NCR_TPF_SIZE			1
+#define MACB_TZQ_OFFSET				12
+#define MACB_TZQ_SIZE				1
+
+/* Bitfields in NCFGR */
+#define MACB_SPD_OFFSET				0
+#define MACB_SPD_SIZE				1
+#define MACB_FD_OFFSET				1
+#define MACB_FD_SIZE				1
+#define MACB_BIT_RATE_OFFSET			2
+#define MACB_BIT_RATE_SIZE			1
+#define MACB_JFRAME_OFFSET			3
+#define MACB_JFRAME_SIZE			1
+#define MACB_CAF_OFFSET				4
+#define MACB_CAF_SIZE				1
+#define MACB_NBC_OFFSET				5
+#define MACB_NBC_SIZE				1
+#define MACB_NCFGR_MTI_OFFSET			6
+#define MACB_NCFGR_MTI_SIZE			1
+#define MACB_UNI_OFFSET				7
+#define MACB_UNI_SIZE				1
+#define MACB_BIG_OFFSET				8
+#define MACB_BIG_SIZE				1
+#define MACB_EAE_OFFSET				9
+#define MACB_EAE_SIZE				1
+#define MACB_CLK_OFFSET				10
+#define MACB_CLK_SIZE				2
+#define MACB_RTY_OFFSET				12
+#define MACB_RTY_SIZE				1
+#define MACB_PAE_OFFSET				13
+#define MACB_PAE_SIZE				1
+#define MACB_RM9200_RMII_OFFSET			13 /* AT91RM9200 only */
+#define MACB_RM9200_RMII_SIZE			1  /* AT91RM9200 only */
+#define MACB_RBOF_OFFSET			14
+#define MACB_RBOF_SIZE				2
+#define MACB_RLCE_OFFSET			16
+#define MACB_RLCE_SIZE				1
+#define MACB_DRFCS_OFFSET			17
+#define MACB_DRFCS_SIZE				1
+#define MACB_EFRHD_OFFSET			18
+#define MACB_EFRHD_SIZE				1
+#define MACB_IRXFCS_OFFSET			19
+#define MACB_IRXFCS_SIZE			1
+
+/* GEM specific NCFGR bitfields. */
+#define GEM_GBE_OFFSET				10
+#define GEM_GBE_SIZE				1
+#define GEM_CLK_OFFSET				18
+#define GEM_CLK_SIZE				3
+#define GEM_DBW_OFFSET				21
+#define GEM_DBW_SIZE				2
+
+/* Constants for data bus width. */
+#define GEM_DBW32				0
+#define GEM_DBW64				1
+#define GEM_DBW128				2
+
+/* Bitfields in DMACFG. */
+#define GEM_FBLDO_OFFSET			0
+#define GEM_FBLDO_SIZE				5
+#define GEM_ENDIA_OFFSET			7
+#define GEM_ENDIA_SIZE				1
+#define GEM_RXBMS_OFFSET			8
+#define GEM_RXBMS_SIZE				2
+#define GEM_TXPBMS_OFFSET			10
+#define GEM_TXPBMS_SIZE				1
+#define GEM_TXCOEN_OFFSET			11
+#define GEM_TXCOEN_SIZE				1
+#define GEM_RXBS_OFFSET				16
+#define GEM_RXBS_SIZE				8
+#define GEM_DDRP_OFFSET				24
+#define GEM_DDRP_SIZE				1
+
+
+/* Bitfields in NSR */
+#define MACB_NSR_LINK_OFFSET			0
+#define MACB_NSR_LINK_SIZE			1
+#define MACB_MDIO_OFFSET			1
+#define MACB_MDIO_SIZE				1
+#define MACB_IDLE_OFFSET			2
+#define MACB_IDLE_SIZE				1
+
+/* Bitfields in TSR */
+#define MACB_UBR_OFFSET				0
+#define MACB_UBR_SIZE				1
+#define MACB_COL_OFFSET				1
+#define MACB_COL_SIZE				1
+#define MACB_TSR_RLE_OFFSET			2
+#define MACB_TSR_RLE_SIZE			1
+#define MACB_TGO_OFFSET				3
+#define MACB_TGO_SIZE				1
+#define MACB_BEX_OFFSET				4
+#define MACB_BEX_SIZE				1
+#define MACB_RM9200_BNQ_OFFSET			4 /* AT91RM9200 only */
+#define MACB_RM9200_BNQ_SIZE			1 /* AT91RM9200 only */
+#define MACB_COMP_OFFSET			5
+#define MACB_COMP_SIZE				1
+#define MACB_UND_OFFSET				6
+#define MACB_UND_SIZE				1
+
+/* Bitfields in RSR */
+#define MACB_BNA_OFFSET				0
+#define MACB_BNA_SIZE				1
+#define MACB_REC_OFFSET				1
+#define MACB_REC_SIZE				1
+#define MACB_OVR_OFFSET				2
+#define MACB_OVR_SIZE				1
+
+/* Bitfields in ISR/IER/IDR/IMR */
+#define MACB_MFD_OFFSET				0
+#define MACB_MFD_SIZE				1
+#define MACB_RCOMP_OFFSET			1
+#define MACB_RCOMP_SIZE				1
+#define MACB_RXUBR_OFFSET			2
+#define MACB_RXUBR_SIZE				1
+#define MACB_TXUBR_OFFSET			3
+#define MACB_TXUBR_SIZE				1
+#define MACB_ISR_TUND_OFFSET			4
+#define MACB_ISR_TUND_SIZE			1
+#define MACB_ISR_RLE_OFFSET			5
+#define MACB_ISR_RLE_SIZE			1
+#define MACB_TXERR_OFFSET			6
+#define MACB_TXERR_SIZE				1
+#define MACB_TCOMP_OFFSET			7
+#define MACB_TCOMP_SIZE				1
+#define MACB_ISR_LINK_OFFSET			9
+#define MACB_ISR_LINK_SIZE			1
+#define MACB_ISR_ROVR_OFFSET			10
+#define MACB_ISR_ROVR_SIZE			1
+#define MACB_HRESP_OFFSET			11
+#define MACB_HRESP_SIZE				1
+#define MACB_PFR_OFFSET				12
+#define MACB_PFR_SIZE				1
+#define MACB_PTZ_OFFSET				13
+#define MACB_PTZ_SIZE				1
+
+/* Bitfields in MAN */
+#define MACB_DATA_OFFSET			0
+#define MACB_DATA_SIZE				16
+#define MACB_CODE_OFFSET			16
+#define MACB_CODE_SIZE				2
+#define MACB_REGA_OFFSET			18
+#define MACB_REGA_SIZE				5
+#define MACB_PHYA_OFFSET			23
+#define MACB_PHYA_SIZE				5
+#define MACB_RW_OFFSET				28
+#define MACB_RW_SIZE				2
+#define MACB_SOF_OFFSET				30
+#define MACB_SOF_SIZE				2
+
+/* Bitfields in USRIO (AVR32) */
+#define MACB_MII_OFFSET				0
+#define MACB_MII_SIZE				1
+#define MACB_EAM_OFFSET				1
+#define MACB_EAM_SIZE				1
+#define MACB_TX_PAUSE_OFFSET			2
+#define MACB_TX_PAUSE_SIZE			1
+#define MACB_TX_PAUSE_ZERO_OFFSET		3
+#define MACB_TX_PAUSE_ZERO_SIZE			1
+
+/* Bitfields in USRIO (AT91) */
+#define MACB_RMII_OFFSET			0
+#define MACB_RMII_SIZE				1
+#define GEM_RGMII_OFFSET			0	/* GEM gigabit mode */
+#define GEM_RGMII_SIZE				1
+#define MACB_CLKEN_OFFSET			1
+#define MACB_CLKEN_SIZE				1
+
+/* Bitfields in WOL */
+#define MACB_IP_OFFSET				0
+#define MACB_IP_SIZE				16
+#define MACB_MAG_OFFSET				16
+#define MACB_MAG_SIZE				1
+#define MACB_ARP_OFFSET				17
+#define MACB_ARP_SIZE				1
+#define MACB_SA1_OFFSET				18
+#define MACB_SA1_SIZE				1
+#define MACB_WOL_MTI_OFFSET			19
+#define MACB_WOL_MTI_SIZE			1
+
+/* Bitfields in MID */
+#define MACB_IDNUM_OFFSET			16
+#define MACB_IDNUM_SIZE				16
+#define MACB_REV_OFFSET				0
+#define MACB_REV_SIZE				16
+
+/* Bitfields in DCFG1. */
+#define GEM_IRQCOR_OFFSET			23
+#define GEM_IRQCOR_SIZE				1
+#define GEM_DBWDEF_OFFSET			25
+#define GEM_DBWDEF_SIZE				3
+
+/* Constants for CLK */
+#define MACB_CLK_DIV8				0
+#define MACB_CLK_DIV16				1
+#define MACB_CLK_DIV32				2
+#define MACB_CLK_DIV64				3
+
+/* GEM specific constants for CLK. */
+#define GEM_CLK_DIV8				0
+#define GEM_CLK_DIV16				1
+#define GEM_CLK_DIV32				2
+#define GEM_CLK_DIV48				3
+#define GEM_CLK_DIV64				4
+#define GEM_CLK_DIV96				5
+
+/* Constants for MAN register */
+#define MACB_MAN_SOF				1
+#define MACB_MAN_WRITE				1
+#define MACB_MAN_READ				2
+#define MACB_MAN_CODE				2
+
+/* Capability mask bits */
+#define MACB_CAPS_ISR_CLEAR_ON_WRITE		0x1
+
+/* Bit manipulation macros */
+#define MACB_BIT(name)					\
+	(1 << MACB_##name##_OFFSET)
+#define MACB_BF(name,value)				\
+	(((value) & ((1 << MACB_##name##_SIZE) - 1))	\
+	 << MACB_##name##_OFFSET)
+#define MACB_BFEXT(name,value)\
+	(((value) >> MACB_##name##_OFFSET)		\
+	 & ((1 << MACB_##name##_SIZE) - 1))
+#define MACB_BFINS(name,value,old)			\
+	(((old) & ~(((1 << MACB_##name##_SIZE) - 1)	\
+		    << MACB_##name##_OFFSET))		\
+	 | MACB_BF(name,value))
+
+#define GEM_BIT(name)					\
+	(1 << GEM_##name##_OFFSET)
+#define GEM_BF(name, value)				\
+	(((value) & ((1 << GEM_##name##_SIZE) - 1))	\
+	 << GEM_##name##_OFFSET)
+#define GEM_BFEXT(name, value)\
+	(((value) >> GEM_##name##_OFFSET)		\
+	 & ((1 << GEM_##name##_SIZE) - 1))
+#define GEM_BFINS(name, value, old)			\
+	(((old) & ~(((1 << GEM_##name##_SIZE) - 1)	\
+		    << GEM_##name##_OFFSET))		\
+	 | GEM_BF(name, value))
+
+/* Register access macros */
+#define macb_readl(port,reg)				\
+	__raw_readl((port)->regs + MACB_##reg)
+#define macb_writel(port,reg,value)			\
+	__raw_writel((value), (port)->regs + MACB_##reg)
+#define gem_readl(port, reg)				\
+	__raw_readl((port)->regs + GEM_##reg)
+#define gem_writel(port, reg, value)			\
+	__raw_writel((value), (port)->regs + GEM_##reg)
+
+/*
+ * Conditional GEM/MACB macros.  These perform the operation to the correct
+ * register dependent on whether the device is a GEM or a MACB.  For registers
+ * and bitfields that are common across both devices, use macb_{read,write}l
+ * to avoid the cost of the conditional.
+ */
+#define macb_or_gem_writel(__bp, __reg, __value) \
+	({ \
+		if (macb_is_gem((__bp))) \
+			gem_writel((__bp), __reg, __value); \
+		else \
+			macb_writel((__bp), __reg, __value); \
+	})
+
+#define macb_or_gem_readl(__bp, __reg) \
+	({ \
+		u32 __v; \
+		if (macb_is_gem((__bp))) \
+			__v = gem_readl((__bp), __reg); \
+		else \
+			__v = macb_readl((__bp), __reg); \
+		__v; \
+	})
+
+/**
+ * @brief Hardware DMA descriptor
+ * @anchor macb_dma_desc
+ */
+struct macb_dma_desc {
+	/** DMA address of data buffer */
+	u32	addr;
+	/** Control and status bits */
+	u32	ctrl;
+};
+
+/* DMA descriptor bitfields */
+#define MACB_RX_USED_OFFSET			0
+#define MACB_RX_USED_SIZE			1
+#define MACB_RX_WRAP_OFFSET			1
+#define MACB_RX_WRAP_SIZE			1
+#define MACB_RX_WADDR_OFFSET			2
+#define MACB_RX_WADDR_SIZE			30
+
+#define MACB_RX_FRMLEN_OFFSET			0
+#define MACB_RX_FRMLEN_SIZE			12
+#define MACB_RX_OFFSET_OFFSET			12
+#define MACB_RX_OFFSET_SIZE			2
+#define MACB_RX_SOF_OFFSET			14
+#define MACB_RX_SOF_SIZE			1
+#define MACB_RX_EOF_OFFSET			15
+#define MACB_RX_EOF_SIZE			1
+#define MACB_RX_CFI_OFFSET			16
+#define MACB_RX_CFI_SIZE			1
+#define MACB_RX_VLAN_PRI_OFFSET			17
+#define MACB_RX_VLAN_PRI_SIZE			3
+#define MACB_RX_PRI_TAG_OFFSET			20
+#define MACB_RX_PRI_TAG_SIZE			1
+#define MACB_RX_VLAN_TAG_OFFSET			21
+#define MACB_RX_VLAN_TAG_SIZE			1
+#define MACB_RX_TYPEID_MATCH_OFFSET		22
+#define MACB_RX_TYPEID_MATCH_SIZE		1
+#define MACB_RX_SA4_MATCH_OFFSET		23
+#define MACB_RX_SA4_MATCH_SIZE			1
+#define MACB_RX_SA3_MATCH_OFFSET		24
+#define MACB_RX_SA3_MATCH_SIZE			1
+#define MACB_RX_SA2_MATCH_OFFSET		25
+#define MACB_RX_SA2_MATCH_SIZE			1
+#define MACB_RX_SA1_MATCH_OFFSET		26
+#define MACB_RX_SA1_MATCH_SIZE			1
+#define MACB_RX_EXT_MATCH_OFFSET		28
+#define MACB_RX_EXT_MATCH_SIZE			1
+#define MACB_RX_UHASH_MATCH_OFFSET		29
+#define MACB_RX_UHASH_MATCH_SIZE		1
+#define MACB_RX_MHASH_MATCH_OFFSET		30
+#define MACB_RX_MHASH_MATCH_SIZE		1
+#define MACB_RX_BROADCAST_OFFSET		31
+#define MACB_RX_BROADCAST_SIZE			1
+
+#define MACB_TX_FRMLEN_OFFSET			0
+#define MACB_TX_FRMLEN_SIZE			11
+#define MACB_TX_LAST_OFFSET			15
+#define MACB_TX_LAST_SIZE			1
+#define MACB_TX_NOCRC_OFFSET			16
+#define MACB_TX_NOCRC_SIZE			1
+#define MACB_TX_BUF_EXHAUSTED_OFFSET		27
+#define MACB_TX_BUF_EXHAUSTED_SIZE		1
+#define MACB_TX_UNDERRUN_OFFSET			28
+#define MACB_TX_UNDERRUN_SIZE			1
+#define MACB_TX_ERROR_OFFSET			29
+#define MACB_TX_ERROR_SIZE			1
+#define MACB_TX_WRAP_OFFSET			30
+#define MACB_TX_WRAP_SIZE			1
+#define MACB_TX_USED_OFFSET			31
+#define MACB_TX_USED_SIZE			1
+
+/**
+ * @brief Data about an skb which is being transmitted
+ * @anchor macb_tx_skb
+ */
+struct macb_tx_skb {
+	/** skb currently being transmitted */
+	struct rtskb		*skb;
+	/** DMA address of the skb's data buffer */
+	dma_addr_t		mapping;
+};
+
+/*
+ * Hardware-collected statistics. Used when updating the network
+ * device stats by a periodic timer.
+ */
+struct macb_stats {
+	u32	rx_pause_frames;
+	u32	tx_ok;
+	u32	tx_single_cols;
+	u32	tx_multiple_cols;
+	u32	rx_ok;
+	u32	rx_fcs_errors;
+	u32	rx_align_errors;
+	u32	tx_deferred;
+	u32	tx_late_cols;
+	u32	tx_excessive_cols;
+	u32	tx_underruns;
+	u32	tx_carrier_errors;
+	u32	rx_resource_errors;
+	u32	rx_overruns;
+	u32	rx_symbol_errors;
+	u32	rx_oversize_pkts;
+	u32	rx_jabbers;
+	u32	rx_undersize_pkts;
+	u32	sqe_test_errors;
+	u32	rx_length_mismatch;
+	u32	tx_pause_frames;
+};
+
+struct gem_stats {
+	u32	tx_octets_31_0;
+	u32	tx_octets_47_32;
+	u32	tx_frames;
+	u32	tx_broadcast_frames;
+	u32	tx_multicast_frames;
+	u32	tx_pause_frames;
+	u32	tx_64_byte_frames;
+	u32	tx_65_127_byte_frames;
+	u32	tx_128_255_byte_frames;
+	u32	tx_256_511_byte_frames;
+	u32	tx_512_1023_byte_frames;
+	u32	tx_1024_1518_byte_frames;
+	u32	tx_greater_than_1518_byte_frames;
+	u32	tx_underrun;
+	u32	tx_single_collision_frames;
+	u32	tx_multiple_collision_frames;
+	u32	tx_excessive_collisions;
+	u32	tx_late_collisions;
+	u32	tx_deferred_frames;
+	u32	tx_carrier_sense_errors;
+	u32	rx_octets_31_0;
+	u32	rx_octets_47_32;
+	u32	rx_frames;
+	u32	rx_broadcast_frames;
+	u32	rx_multicast_frames;
+	u32	rx_pause_frames;
+	u32	rx_64_byte_frames;
+	u32	rx_65_127_byte_frames;
+	u32	rx_128_255_byte_frames;
+	u32	rx_256_511_byte_frames;
+	u32	rx_512_1023_byte_frames;
+	u32	rx_1024_1518_byte_frames;
+	u32	rx_greater_than_1518_byte_frames;
+	u32	rx_undersized_frames;
+	u32	rx_oversize_frames;
+	u32	rx_jabbers;
+	u32	rx_frame_check_sequence_errors;
+	u32	rx_length_field_frame_errors;
+	u32	rx_symbol_errors;
+	u32	rx_alignment_errors;
+	u32	rx_resource_errors;
+	u32	rx_overruns;
+	u32	rx_ip_header_checksum_errors;
+	u32	rx_tcp_checksum_errors;
+	u32	rx_udp_checksum_errors;
+};
+
+struct macb;
+
+struct macb_or_gem_ops {
+	int	(*mog_alloc_rx_buffers)(struct macb *bp);
+	void	(*mog_free_rx_buffers)(struct macb *bp);
+	void	(*mog_init_rings)(struct macb *bp);
+	int	(*mog_rx)(struct macb *bp, int budget, nanosecs_abs_t *ts);
+};
+
+struct macb {
+	void __iomem		*regs;
+
+	unsigned int		rx_tail;
+	unsigned int		rx_prepared_head;
+	struct macb_dma_desc	*rx_ring;
+	struct rtskb		**rx_skbuff;
+	void			*rx_buffers;
+	size_t			rx_buffer_size;
+
+	unsigned int		tx_head, tx_tail;
+	struct macb_dma_desc	*tx_ring;
+	struct macb_tx_skb	*tx_skb;
+
+	rtdm_lock_t		lock;
+	struct platform_device	*pdev;
+	struct clk		*pclk;
+	struct clk		*hclk;
+	struct clk		*tx_clk;
+	struct rtnet_device	*dev;
+	struct work_struct	tx_error_task;
+	struct net_device_stats	stats;
+	union {
+		struct macb_stats	macb;
+		struct gem_stats	gem;
+	}			hw_stats;
+
+	dma_addr_t		rx_ring_dma;
+	dma_addr_t		tx_ring_dma;
+	dma_addr_t		rx_buffers_dma;
+
+	struct macb_or_gem_ops	macbgem_ops;
+
+	struct mii_bus		*mii_bus;
+	struct phy_device	*phy_dev;
+	unsigned int		link;
+	unsigned int		speed;
+	unsigned int		duplex;
+
+	u32			caps;
+
+	phy_interface_t		phy_interface;
+
+	struct net_device	*phy_phony_net_device;
+	rtdm_irq_t		irq_handle;
+
+	/* AT91RM9200 transmit */
+	struct rtskb *skb;			/* holds skb until xmit interrupt completes */
+	dma_addr_t skb_physaddr;		/* phys addr from pci_map_single */
+	int skb_length;				/* saved skb length for pci_unmap_single */
+};
+
+extern const struct ethtool_ops macb_ethtool_ops;
+
+int rtmacb_mii_init(struct macb *bp);
+int rtmacb_ioctl(struct rtnet_device *dev, unsigned cmd, void *arg);
+struct net_device_stats *rtmacb_get_stats(struct rtnet_device *dev);
+void rtmacb_set_hwaddr(struct macb *bp);
+void rtmacb_get_hwaddr(struct macb *bp);
+
+static inline bool macb_is_gem(struct macb *bp)
+{
+	return MACB_BFEXT(IDNUM, macb_readl(bp, MID)) == 0x2;
+}
+
+#endif /* _MACB_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/21142.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/21142.c
new file mode 100644
index 0000000..a36cce8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/21142.c
@@ -0,0 +1,51 @@
+/*
+	drivers/net/tulip/21142.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include "tulip.h"
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+u16 t21142_csr14[] = { 0xFFFF, 0x0705, 0x0705, 0x0000, 0x7F3D, };
+
+
+void t21142_start_nway(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int csr14 = ((tp->sym_advertise & 0x0780) << 9)  |
+		((tp->sym_advertise & 0x0020) << 1) | 0xffbf;
+
+	rtdev->if_port = 0;
+	tp->nway = tp->mediasense = 1;
+	tp->nwayset = tp->lpar = 0;
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: Restarting 21143 autonegotiation, csr14=%8.8x.\n",
+			   rtdev->name, csr14);
+	outl(0x0001, ioaddr + CSR13);
+	udelay(100);
+	outl(csr14, ioaddr + CSR14);
+	tp->csr6 = 0x82420000 | (tp->sym_advertise & 0x0040 ? FullDuplex : 0);
+	outl(tp->csr6, ioaddr + CSR6);
+	if (tp->mtable  &&  tp->mtable->csr15dir) {
+		outl(tp->mtable->csr15dir, ioaddr + CSR15);
+		outl(tp->mtable->csr15val, ioaddr + CSR15);
+	} else
+		outw(0x0008, ioaddr + CSR15);
+	outl(0x1301, ioaddr + CSR12); 		/* Trigger NWAY. */
+}
+
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/Makefile
new file mode 100644
index 0000000..3063110
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/Makefile
@@ -0,0 +1,12 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_DRV_TULIP) += rt_tulip.o
+
+rt_tulip-y := \
+	tulip_core.o \
+	eeprom.o \
+	interrupt.o \
+	media.o \
+	21142.o \
+	pnic.o \
+	pnic2.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/eeprom.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/eeprom.c
new file mode 100644
index 0000000..5f3b16c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/eeprom.c
@@ -0,0 +1,321 @@
+/*
+	drivers/net/tulip/eeprom.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include "tulip.h"
+#include <linux/init.h>
+#include <asm/unaligned.h>
+
+
+
+/* Serial EEPROM section. */
+/* The main routine to parse the very complicated SROM structure.
+   Search www.digital.com for "21X4 SROM" to get details.
+   This code is very complex, and will require changes to support
+   additional cards, so I'll be verbose about what is going on.
+   */
+
+/* Known cards that have old-style EEPROMs. */
+static struct eeprom_fixup eeprom_fixups[] = {
+  {"Asante", 0, 0, 0x94, {0x1e00, 0x0000, 0x0800, 0x0100, 0x018c,
+			  0x0000, 0x0000, 0xe078, 0x0001, 0x0050, 0x0018 }},
+  {"SMC9332DST", 0, 0, 0xC0, { 0x1e00, 0x0000, 0x0800, 0x041f,
+			   0x0000, 0x009E, /* 10baseT */
+			   0x0004, 0x009E, /* 10baseT-FD */
+			   0x0903, 0x006D, /* 100baseTx */
+			   0x0905, 0x006D, /* 100baseTx-FD */ }},
+  {"Cogent EM100", 0, 0, 0x92, { 0x1e00, 0x0000, 0x0800, 0x063f,
+				 0x0107, 0x8021, /* 100baseFx */
+				 0x0108, 0x8021, /* 100baseFx-FD */
+				 0x0100, 0x009E, /* 10baseT */
+				 0x0104, 0x009E, /* 10baseT-FD */
+				 0x0103, 0x006D, /* 100baseTx */
+				 0x0105, 0x006D, /* 100baseTx-FD */ }},
+  {"Maxtech NX-110", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x0513,
+				   0x1001, 0x009E, /* 10base2, CSR12 0x10*/
+				   0x0000, 0x009E, /* 10baseT */
+				   0x0004, 0x009E, /* 10baseT-FD */
+				   0x0303, 0x006D, /* 100baseTx, CSR12 0x03 */
+				   0x0305, 0x006D, /* 100baseTx-FD CSR12 0x03 */}},
+  {"Accton EN1207", 0, 0, 0xE8, { 0x1e00, 0x0000, 0x0800, 0x051F,
+				  0x1B01, 0x0000, /* 10base2,   CSR12 0x1B */
+				  0x0B00, 0x009E, /* 10baseT,   CSR12 0x0B */
+				  0x0B04, 0x009E, /* 10baseT-FD,CSR12 0x0B */
+				  0x1B03, 0x006D, /* 100baseTx, CSR12 0x1B */
+				  0x1B05, 0x006D, /* 100baseTx-FD CSR12 0x1B */
+   }},
+  {"NetWinder", 0x00, 0x10, 0x57,
+	/* Default media = MII
+	 * MII block, reset sequence (3) = 0x0821 0x0000 0x0001, capabilities 0x01e1
+	 */
+	{ 0x1e00, 0x0000, 0x000b, 0x8f01, 0x0103, 0x0300, 0x0821, 0x000, 0x0001, 0x0000, 0x01e1 }
+  },
+  {0, 0, 0, 0, {}}};
+
+
+static const char *block_name[] = {
+	"21140 non-MII",
+	"21140 MII PHY",
+	"21142 Serial PHY",
+	"21142 MII PHY",
+	"21143 SYM PHY",
+	"21143 reset method"
+};
+
+
+void tulip_parse_eeprom(/*RTnet*/struct rtnet_device *rtdev)
+{
+	/* The last media info list parsed, for multiport boards.  */
+	static struct mediatable *last_mediatable;
+	static unsigned char *last_ee_data;
+	static int controller_index;
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	unsigned char *ee_data = tp->eeprom;
+	int i;
+
+	tp->mtable = 0;
+	/* Detect an old-style (SA only) EEPROM layout:
+	   memcmp(eedata, eedata+16, 8). */
+	for (i = 0; i < 8; i ++)
+		if (ee_data[i] != ee_data[16+i])
+			break;
+	if (i >= 8) {
+		if (ee_data[0] == 0xff) {
+			if (last_mediatable) {
+				controller_index++;
+				/*RTnet*/rtdm_printk(KERN_INFO "%s:  Controller %d of multiport board.\n",
+					   rtdev->name, controller_index);
+				tp->mtable = last_mediatable;
+				ee_data = last_ee_data;
+				goto subsequent_board;
+			} else
+				/*RTnet*/rtdm_printk(KERN_INFO "%s:  Missing EEPROM, this interface may "
+					   "not work correctly!\n",
+			   rtdev->name);
+			return;
+		}
+	  /* Do a fix-up based on the vendor half of the station address prefix. */
+	  for (i = 0; eeprom_fixups[i].name; i++) {
+		if (rtdev->dev_addr[0] == eeprom_fixups[i].addr0
+			&&  rtdev->dev_addr[1] == eeprom_fixups[i].addr1
+			&&  rtdev->dev_addr[2] == eeprom_fixups[i].addr2) {
+		  if (rtdev->dev_addr[2] == 0xE8  &&  ee_data[0x1a] == 0x55)
+			  i++;			/* An Accton EN1207, not an outlaw Maxtech. */
+		  memcpy(ee_data + 26, eeprom_fixups[i].newtable,
+				 sizeof(eeprom_fixups[i].newtable));
+		  /*RTnet*/rtdm_printk(KERN_INFO "%s: Old format EEPROM on '%s' board.  Using"
+				 " substitute media control info.\n",
+				 rtdev->name, eeprom_fixups[i].name);
+		  break;
+		}
+	  }
+	  if (eeprom_fixups[i].name == NULL) { /* No fixup found. */
+		  /*RTnet*/rtdm_printk(KERN_INFO "%s: Old style EEPROM with no media selection "
+				 "information.\n",
+			   rtdev->name);
+		return;
+	  }
+	}
+
+	controller_index = 0;
+	if (ee_data[19] > 1) {		/* Multiport board. */
+		last_ee_data = ee_data;
+	}
+subsequent_board:
+
+	if (ee_data[27] == 0) {		/* No valid media table. */
+	} else if (tp->chip_id == DC21041) {
+		unsigned char *p = (void *)ee_data + ee_data[27 + controller_index*3];
+		int media = get_u16(p);
+		int count = p[2];
+		p += 3;
+
+		/*RTnet*/rtdm_printk(KERN_INFO "%s: 21041 Media table, default media %4.4x (%s).\n",
+			   rtdev->name, media,
+			   media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+		for (i = 0; i < count; i++) {
+			unsigned char media_block = *p++;
+			int media_code = media_block & MEDIA_MASK;
+			if (media_block & 0x40)
+				p += 6;
+			/*RTnet*/rtdm_printk(KERN_INFO "%s:  21041 media #%d, %s.\n",
+				   rtdev->name, media_code, medianame[media_code]);
+		}
+	} else {
+		unsigned char *p = (void *)ee_data + ee_data[27];
+		unsigned char csr12dir = 0;
+		int count, new_advertise = 0;
+		struct mediatable *mtable;
+		u16 media = get_u16(p);
+
+		p += 2;
+		if (tp->flags & CSR12_IN_SROM)
+			csr12dir = *p++;
+		count = *p++;
+
+	        /* there is no phy information, don't even try to build mtable */
+	        if (count == 0) {
+			if (tulip_debug > 0)
+				/*RTnet*/rtdm_printk(KERN_WARNING "%s: no phy info, aborting mtable build\n", rtdev->name);
+		        return;
+		}
+
+		mtable = (struct mediatable *)
+		    kmalloc(sizeof(struct mediatable) + count*sizeof(struct medialeaf), GFP_KERNEL);
+
+		if (mtable == NULL)
+			return;				/* Horrible, impossible failure. */
+		last_mediatable = tp->mtable = mtable;
+		mtable->defaultmedia = media;
+		mtable->leafcount = count;
+		mtable->csr12dir = csr12dir;
+		mtable->has_nonmii = mtable->has_mii = mtable->has_reset = 0;
+		mtable->csr15dir = mtable->csr15val = 0;
+
+		/*RTnet*/rtdm_printk(KERN_INFO "%s:  EEPROM default media type %s.\n", rtdev->name,
+			   media & 0x0800 ? "Autosense" : medianame[media & MEDIA_MASK]);
+		for (i = 0; i < count; i++) {
+			struct medialeaf *leaf = &mtable->mleaf[i];
+
+			if ((p[0] & 0x80) == 0) { /* 21140 Compact block. */
+				leaf->type = 0;
+				leaf->media = p[0] & 0x3f;
+				leaf->leafdata = p;
+				if ((p[2] & 0x61) == 0x01)	/* Bogus, but Znyx boards do it. */
+					mtable->has_mii = 1;
+				p += 4;
+			} else {
+				leaf->type = p[1];
+				if (p[1] == 0x05) {
+					mtable->has_reset = i;
+					leaf->media = p[2] & 0x0f;
+				} else if (tp->chip_id == DM910X && p[1] == 0x80) {
+					/* Hack to ignore Davicom delay period block */
+					mtable->leafcount--;
+					count--;
+					i--;
+					leaf->leafdata = p + 2;
+					p += (p[0] & 0x3f) + 1;
+					continue;
+				} else if (p[1] & 1) {
+					int gpr_len, reset_len;
+
+					mtable->has_mii = 1;
+					leaf->media = 11;
+					gpr_len=p[3]*2;
+					reset_len=p[4+gpr_len]*2;
+					new_advertise |= get_u16(&p[7+gpr_len+reset_len]);
+				} else {
+					mtable->has_nonmii = 1;
+					leaf->media = p[2] & MEDIA_MASK;
+					/* Davicom's media number for 100BaseTX is strange */
+					if (tp->chip_id == DM910X && leaf->media == 1)
+						leaf->media = 3;
+					switch (leaf->media) {
+					case 0: new_advertise |= 0x0020; break;
+					case 4: new_advertise |= 0x0040; break;
+					case 3: new_advertise |= 0x0080; break;
+					case 5: new_advertise |= 0x0100; break;
+					case 6: new_advertise |= 0x0200; break;
+					}
+					if (p[1] == 2  &&  leaf->media == 0) {
+						if (p[2] & 0x40) {
+							u32 base15 = get_unaligned((u16*)&p[7]);
+							mtable->csr15dir =
+								(get_unaligned((u16*)&p[9])<<16) + base15;
+							mtable->csr15val =
+								(get_unaligned((u16*)&p[11])<<16) + base15;
+						} else {
+							mtable->csr15dir = get_unaligned((u16*)&p[3])<<16;
+							mtable->csr15val = get_unaligned((u16*)&p[5])<<16;
+						}
+					}
+				}
+				leaf->leafdata = p + 2;
+				p += (p[0] & 0x3f) + 1;
+			}
+			if (tulip_debug > 1  &&  leaf->media == 11) {
+				unsigned char *bp = leaf->leafdata;
+				/*RTnet*/rtdm_printk(KERN_INFO "%s:  MII interface PHY %d, setup/reset "
+					   "sequences %d/%d long, capabilities %2.2x %2.2x.\n",
+					   rtdev->name, bp[0], bp[1], bp[2 + bp[1]*2],
+					   bp[5 + bp[2 + bp[1]*2]*2], bp[4 + bp[2 + bp[1]*2]*2]);
+			}
+			/*RTnet*/rtdm_printk(KERN_INFO "%s:  Index #%d - Media %s (#%d) described "
+				   "by a %s (%d) block.\n",
+				   rtdev->name, i, medianame[leaf->media & 15], leaf->media,
+				   leaf->type < ARRAY_SIZE(block_name) ? block_name[leaf->type] : "<unknown>",
+				   leaf->type);
+		}
+		if (new_advertise)
+			tp->sym_advertise = new_advertise;
+	}
+}
+/* Reading a serial EEPROM is a "bit" grungy, but we work our way through:->.*/
+
+/*  EEPROM_Ctrl bits. */
+#define EE_SHIFT_CLK	0x02	/* EEPROM shift clock. */
+#define EE_CS			0x01	/* EEPROM chip select. */
+#define EE_DATA_WRITE	0x04	/* Data from the Tulip to EEPROM. */
+#define EE_WRITE_0		0x01
+#define EE_WRITE_1		0x05
+#define EE_DATA_READ	0x08	/* Data from the EEPROM chip. */
+#define EE_ENB			(0x4800 | EE_CS)
+
+/* Delay between EEPROM clock transitions.
+   Even at 33Mhz current PCI implementations don't overrun the EEPROM clock.
+   We add a bus turn-around to insure that this remains true. */
+#define eeprom_delay()	inl(ee_addr)
+
+/* The EEPROM commands include the alway-set leading bit. */
+#define EE_READ_CMD		(6)
+
+/* Note: this routine returns extra data bits for size detection. */
+int tulip_read_eeprom(long ioaddr, int location, int addr_len)
+{
+	int i;
+	unsigned retval = 0;
+	long ee_addr = ioaddr + CSR9;
+	int read_cmd = location | (EE_READ_CMD << addr_len);
+
+	outl(EE_ENB & ~EE_CS, ee_addr);
+	outl(EE_ENB, ee_addr);
+
+	/* Shift the read command bits out. */
+	for (i = 4 + addr_len; i >= 0; i--) {
+		short dataval = (read_cmd & (1 << i)) ? EE_DATA_WRITE : 0;
+		outl(EE_ENB | dataval, ee_addr);
+		eeprom_delay();
+		outl(EE_ENB | dataval | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay();
+		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+	}
+	outl(EE_ENB, ee_addr);
+	eeprom_delay();
+
+	for (i = 16; i > 0; i--) {
+		outl(EE_ENB | EE_SHIFT_CLK, ee_addr);
+		eeprom_delay();
+		retval = (retval << 1) | ((inl(ee_addr) & EE_DATA_READ) ? 1 : 0);
+		outl(EE_ENB, ee_addr);
+		eeprom_delay();
+	}
+
+	/* Terminate the EEPROM access. */
+	outl(EE_ENB & ~EE_CS, ee_addr);
+	return retval;
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/interrupt.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/interrupt.c
new file mode 100644
index 0000000..d65bb3f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/interrupt.c
@@ -0,0 +1,393 @@
+/*
+	drivers/net/tulip/interrupt.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include "tulip.h"
+#include <linux/etherdevice.h>
+#include <linux/pci.h>
+
+
+int tulip_rx_copybreak;
+unsigned int tulip_max_interrupt_work;
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+
+#define MIT_SIZE 15
+unsigned int mit_table[MIT_SIZE+1] =
+{
+	/*  CRS11 21143 hardware Mitigation Control Interrupt
+	    We use only RX mitigation we other techniques for
+	    TX intr. mitigation.
+
+	   31    Cycle Size (timer control)
+	   30:27 TX timer in 16 * Cycle size
+	   26:24 TX No pkts before Int.
+	   23:20 RX timer in Cycle size
+	   19:17 RX No pkts before Int.
+	   16       Continues Mode (CM)
+	*/
+
+	0x0,             /* IM disabled */
+	0x80150000,      /* RX time = 1, RX pkts = 2, CM = 1 */
+	0x80150000,
+	0x80270000,
+	0x80370000,
+	0x80490000,
+	0x80590000,
+	0x80690000,
+	0x807B0000,
+	0x808B0000,
+	0x809D0000,
+	0x80AD0000,
+	0x80BD0000,
+	0x80CF0000,
+	0x80DF0000,
+//       0x80FF0000      /* RX time = 16, RX pkts = 7, CM = 1 */
+	0x80F10000      /* RX time = 16, RX pkts = 0, CM = 1 */
+};
+#endif
+
+
+int tulip_refill_rx(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int entry;
+	int refilled = 0;
+
+	/* Refill the Rx ring buffers. */
+	for (; tp->cur_rx - tp->dirty_rx > 0; tp->dirty_rx++) {
+		entry = tp->dirty_rx % RX_RING_SIZE;
+		if (tp->rx_buffers[entry].skb == NULL) {
+			struct /*RTnet*/rtskb *skb;
+			dma_addr_t mapping;
+
+			skb = tp->rx_buffers[entry].skb = /*RTnet*/rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ);
+			if (skb == NULL)
+				break;
+
+			mapping = dma_map_single(&tp->pdev->dev, skb->tail,
+						 PKT_BUF_SZ, DMA_FROM_DEVICE);
+			tp->rx_buffers[entry].mapping = mapping;
+
+			tp->rx_ring[entry].buffer1 = cpu_to_le32(mapping);
+			refilled++;
+		}
+		tp->rx_ring[entry].status = cpu_to_le32(DescOwned);
+	}
+	if(tp->chip_id == LC82C168) {
+		if(((inl(rtdev->base_addr + CSR5)>>17)&0x07) == 4) {
+			/* Rx stopped due to out of buffers,
+			 * restart it
+			 */
+			outl(0x01, rtdev->base_addr + CSR2);
+		}
+	}
+	return refilled;
+}
+
+
+static int tulip_rx(/*RTnet*/struct rtnet_device *rtdev, nanosecs_abs_t *time_stamp)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int entry = tp->cur_rx % RX_RING_SIZE;
+	int rx_work_limit = tp->dirty_rx + RX_RING_SIZE - tp->cur_rx;
+	int received = 0;
+
+	if (tulip_debug > 4)
+		/*RTnet*/rtdm_printk(KERN_DEBUG " In tulip_rx(), entry %d %8.8x.\n", entry,
+			   tp->rx_ring[entry].status);
+	/* If we own the next entry, it is a new packet. Send it up. */
+	while ( ! (tp->rx_ring[entry].status & cpu_to_le32(DescOwned))) {
+		s32 status = le32_to_cpu(tp->rx_ring[entry].status);
+
+		if (tulip_debug > 5)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: In tulip_rx(), entry %d %8.8x.\n",
+				   rtdev->name, entry, status);
+		if (--rx_work_limit < 0)
+			break;
+		if ((status & 0x38008300) != 0x0300) {
+			if ((status & 0x38000300) != 0x0300) {
+				/* Ingore earlier buffers. */
+				if ((status & 0xffff) != 0x7fff) {
+					if (tulip_debug > 1)
+						/*RTnet*/rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame "
+							   "spanned multiple buffers, status %8.8x!\n",
+							   rtdev->name, status);
+					tp->stats.rx_length_errors++;
+				}
+			} else if (status & RxDescFatalErr) {
+				/* There was a fatal error. */
+				if (tulip_debug > 2)
+					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Receive error, Rx status %8.8x.\n",
+						   rtdev->name, status);
+				tp->stats.rx_errors++; /* end of a packet.*/
+				if (status & 0x0890) tp->stats.rx_length_errors++;
+				if (status & 0x0004) tp->stats.rx_frame_errors++;
+				if (status & 0x0002) tp->stats.rx_crc_errors++;
+				if (status & 0x0001) tp->stats.rx_fifo_errors++;
+			}
+		} else {
+			/* Omit the four octet CRC from the length. */
+			short pkt_len = ((status >> 16) & 0x7ff) - 4;
+			struct /*RTnet*/rtskb *skb;
+
+#ifndef final_version
+			if (pkt_len > 1518) {
+				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Bogus packet size of %d (%#x).\n",
+					   rtdev->name, pkt_len, pkt_len);
+				pkt_len = 1518;
+				tp->stats.rx_length_errors++;
+			}
+#endif
+
+			{
+				unsigned char *temp = /*RTnet*/rtskb_put(skb = tp->rx_buffers[entry].skb, pkt_len);
+
+#ifndef final_version
+				if (tp->rx_buffers[entry].mapping !=
+				    le32_to_cpu(tp->rx_ring[entry].buffer1)) {
+					/*RTnet*/rtdm_printk(KERN_ERR "%s: Internal fault: The skbuff addresses "
+					       "do not match in tulip_rx: %08x vs. %08llx ? / %p.\n",
+					       rtdev->name,
+					       le32_to_cpu(tp->rx_ring[entry].buffer1),
+					       (unsigned long long)tp->rx_buffers[entry].mapping,
+					       temp);/*RTnet*/
+				}
+#endif
+
+				dma_unmap_single(&tp->pdev->dev,
+						 tp->rx_buffers[entry].mapping,
+						 PKT_BUF_SZ, DMA_FROM_DEVICE);
+
+				tp->rx_buffers[entry].skb = NULL;
+				tp->rx_buffers[entry].mapping = 0;
+			}
+			skb->protocol = /*RTnet*/rt_eth_type_trans(skb, rtdev);
+			skb->time_stamp = *time_stamp;
+			/*RTnet*/rtnetif_rx(skb);
+
+			tp->stats.rx_packets++;
+			tp->stats.rx_bytes += pkt_len;
+		}
+		received++;
+		entry = (++tp->cur_rx) % RX_RING_SIZE;
+	}
+	return received;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+int tulip_interrupt(rtdm_irq_t *irq_handle)
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read();/*RTnet*/
+	struct rtnet_device *rtdev =
+	    rtdm_irq_get_arg(irq_handle, struct rtnet_device);/*RTnet*/
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	unsigned int csr5;
+	int entry;
+	int missed;
+	int rx = 0;
+	int tx = 0;
+	int oi = 0;
+	int maxrx = RX_RING_SIZE;
+	int maxtx = TX_RING_SIZE;
+	int maxoi = TX_RING_SIZE;
+	unsigned int work_count = tulip_max_interrupt_work;
+
+	/* Let's see whether the interrupt really is for us */
+	csr5 = inl(ioaddr + CSR5);
+
+	if ((csr5 & (NormalIntr|AbnormalIntr)) == 0) {
+		rtdm_printk("%s: unexpected IRQ!\n",rtdev->name);
+		return RTDM_IRQ_NONE;
+	}
+
+	tp->nir++;
+
+	do {
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		outl(csr5 & 0x0001ffff, ioaddr + CSR5);
+
+		if (tulip_debug > 4)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: interrupt  csr5=%#8.8x new csr5=%#8.8x.\n",
+				   rtdev->name, csr5, inl(rtdev->base_addr + CSR5));
+
+		if (csr5 & (RxIntr | RxNoBuf)) {
+			rx += tulip_rx(rtdev, &time_stamp);
+			tulip_refill_rx(rtdev);
+		}
+
+		if (csr5 & (TxNoBuf | TxDied | TxIntr | TimerInt)) {
+			unsigned int dirty_tx;
+
+			rtdm_lock_get(&tp->lock);
+
+			for (dirty_tx = tp->dirty_tx; tp->cur_tx - dirty_tx > 0;
+				 dirty_tx++) {
+				int entry = dirty_tx % TX_RING_SIZE;
+				int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+				if (status < 0)
+					break;			/* It still has not been Txed */
+
+				/* Check for Rx filter setup frames. */
+				if (tp->tx_buffers[entry].skb == NULL) {
+					/* test because dummy frames not mapped */
+					if (tp->tx_buffers[entry].mapping)
+						dma_unmap_single(&tp->pdev->dev,
+							 tp->tx_buffers[entry].mapping,
+							 sizeof(tp->setup_frame),
+							 DMA_TO_DEVICE);
+					continue;
+				}
+
+				if (status & 0x8000) {
+					/* There was an major error, log it. */
+#ifndef final_version
+					if (tulip_debug > 1)
+						/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n",
+							   rtdev->name, status);
+#endif
+					tp->stats.tx_errors++;
+					if (status & 0x4104) tp->stats.tx_aborted_errors++;
+					if (status & 0x0C00) tp->stats.tx_carrier_errors++;
+					if (status & 0x0200) tp->stats.tx_window_errors++;
+					if (status & 0x0002) tp->stats.tx_fifo_errors++;
+					if ((status & 0x0080) && tp->full_duplex == 0)
+						tp->stats.tx_heartbeat_errors++;
+				} else {
+					tp->stats.tx_bytes +=
+						tp->tx_buffers[entry].skb->len;
+					tp->stats.collisions += (status >> 3) & 15;
+					tp->stats.tx_packets++;
+				}
+
+				dma_unmap_single(&tp->pdev->dev,
+						 tp->tx_buffers[entry].mapping,
+						 tp->tx_buffers[entry].skb->len,
+						 DMA_TO_DEVICE);
+
+				/* Free the original skb. */
+				/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
+				tp->tx_buffers[entry].skb = NULL;
+				tp->tx_buffers[entry].mapping = 0;
+				tx++;
+				rtnetif_tx(rtdev);
+			}
+
+#ifndef final_version
+			if (tp->cur_tx - dirty_tx > TX_RING_SIZE) {
+				/*RTnet*/rtdm_printk(KERN_ERR "%s: Out-of-sync dirty pointer, %d vs. %d.\n",
+					   rtdev->name, dirty_tx, tp->cur_tx);
+				dirty_tx += TX_RING_SIZE;
+			}
+#endif
+
+			if (tp->cur_tx - dirty_tx < TX_RING_SIZE - 2)
+				/*RTnet*/rtnetif_wake_queue(rtdev);
+
+			tp->dirty_tx = dirty_tx;
+			if (csr5 & TxDied) {
+				if (tulip_debug > 2)
+					/*RTnet*/rtdm_printk(KERN_WARNING "%s: The transmitter stopped."
+						   "  CSR5 is %x, CSR6 %x, new CSR6 %x.\n",
+						   rtdev->name, csr5, inl(ioaddr + CSR6), tp->csr6);
+				tulip_restart_rxtx(tp);
+			}
+			rtdm_lock_put(&tp->lock);
+		}
+
+		/* Log errors. */
+		if (csr5 & AbnormalIntr) {	/* Abnormal error summary bit. */
+			if (csr5 == 0xffffffff)
+				break;
+			/*RTnet*/rtdm_printk(KERN_ERR "%s: Error detected, "
+			    "device may not work any more (csr5=%08x)!\n", rtdev->name, csr5);
+			/* Clear all error sources, included undocumented ones! */
+			outl(0x0800f7ba, ioaddr + CSR5);
+			oi++;
+		}
+		if (csr5 & TimerInt) {
+
+			if (tulip_debug > 2)
+				/*RTnet*/rtdm_printk(KERN_ERR "%s: Re-enabling interrupts, %8.8x.\n",
+					   rtdev->name, csr5);
+			outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+			tp->ttimer = 0;
+			oi++;
+		}
+		if (tx > maxtx || rx > maxrx || oi > maxoi) {
+			if (tulip_debug > 1)
+				/*RTnet*/rtdm_printk(KERN_WARNING "%s: Too much work during an interrupt, "
+					   "csr5=0x%8.8x. (%lu) (%d,%d,%d)\n", rtdev->name, csr5, tp->nir, tx, rx, oi);
+
+		       /* Acknowledge all interrupt sources. */
+			outl(0x8001ffff, ioaddr + CSR5);
+			if (tp->flags & HAS_INTR_MITIGATION) {
+		     /* Josip Loncaric at ICASE did extensive experimentation
+			to develop a good interrupt mitigation setting.*/
+				outl(0x8b240000, ioaddr + CSR11);
+			} else if (tp->chip_id == LC82C168) {
+				/* the LC82C168 doesn't have a hw timer.*/
+				outl(0x00, ioaddr + CSR7);
+			} else {
+			  /* Mask all interrupting sources, set timer to
+				re-enable. */
+			}
+			break;
+		}
+
+		work_count--;
+		if (work_count == 0)
+			break;
+
+		csr5 = inl(ioaddr + CSR5);
+	} while ((csr5 & (NormalIntr|AbnormalIntr)) != 0);
+
+	tulip_refill_rx(rtdev);
+
+	/* check if the card is in suspend mode */
+	entry = tp->dirty_rx % RX_RING_SIZE;
+	if (tp->rx_buffers[entry].skb == NULL) {
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_WARNING "%s: in rx suspend mode: (%lu) (tp->cur_rx = %u, ttimer = %d, rx = %d) go/stay in suspend mode\n", rtdev->name, tp->nir, tp->cur_rx, tp->ttimer, rx);
+		if (tp->chip_id == LC82C168)
+			outl(0x00, ioaddr + CSR7);
+		else {
+			if (tp->ttimer == 0 || (inl(ioaddr + CSR11) & 0xffff) == 0) {
+				if (tulip_debug > 1)
+					/*RTnet*/rtdm_printk(KERN_WARNING "%s: in rx suspend mode: (%lu) set timer\n", rtdev->name, tp->nir);
+				outl(tulip_tbl[tp->chip_id].valid_intrs | TimerInt,
+					ioaddr + CSR7);
+				outl(TimerInt, ioaddr + CSR5);
+				outl(12, ioaddr + CSR11);
+				tp->ttimer = 1;
+			}
+		}
+	}
+
+	if ((missed = inl(ioaddr + CSR8) & 0x1ffff)) {
+		tp->stats.rx_dropped += missed & 0x10000 ? 0x10000 : missed;
+	}
+
+	if (tulip_debug > 4)
+		/*RTnet*/rtdm_printk(KERN_DEBUG "%s: exiting interrupt, csr5=%#4.4x.\n",
+			   rtdev->name, inl(ioaddr + CSR5));
+	if (rx)
+		rt_mark_stack_mgr(rtdev);
+	return RTDM_IRQ_HANDLED;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/media.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/media.c
new file mode 100644
index 0000000..679a74e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/media.c
@@ -0,0 +1,567 @@
+/*
+	drivers/net/tulip/media.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include <linux/kernel.h>
+#include <linux/mii.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include "tulip.h"
+
+
+/* This is a mysterious value that can be written to CSR11 in the 21040 (only)
+   to support a pre-NWay full-duplex signaling mechanism using short frames.
+   No one knows what it should be, but if left at its default value some
+   10base2(!) packets trigger a full-duplex-request interrupt. */
+#define FULL_DUPLEX_MAGIC	0x6969
+
+/* The maximum data clock rate is 2.5 Mhz.  The minimum timing is usually
+   met by back-to-back PCI I/O cycles, but we insert a delay to avoid
+   "overclocking" issues or future 66Mhz PCI. */
+#define mdio_delay() inl(mdio_addr)
+
+/* Read and write the MII registers using software-generated serial
+   MDIO protocol.  It is just different enough from the EEPROM protocol
+   to not share code.  The maxium data clock rate is 2.5 Mhz. */
+#define MDIO_SHIFT_CLK		0x10000
+#define MDIO_DATA_WRITE0	0x00000
+#define MDIO_DATA_WRITE1	0x20000
+#define MDIO_ENB		0x00000 /* Ignore the 0x02000 databook setting. */
+#define MDIO_ENB_IN		0x40000
+#define MDIO_DATA_READ		0x80000
+
+static const unsigned char comet_miireg2offset[32] = {
+	0xB4, 0xB8, 0xBC, 0xC0,  0xC4, 0xC8, 0xCC, 0,  0,0,0,0,  0,0,0,0,
+	0,0xD0,0,0,  0,0,0,0,  0,0,0,0, 0, 0xD4, 0xD8, 0xDC, };
+
+
+/* MII transceiver control section.
+   Read and write the MII registers using software-generated serial
+   MDIO protocol.  See the MII specifications or DP83840A data sheet
+   for details. */
+
+int tulip_mdio_read(struct rtnet_device *rtdev, int phy_id, int location)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int i;
+	int read_cmd = (0xf6 << 10) | ((phy_id & 0x1f) << 5) | location;
+	int retval = 0;
+	long ioaddr = rtdev->base_addr;
+	long mdio_addr = ioaddr + CSR9;
+	unsigned long flags;
+
+	if (location & ~0x1f)
+		return 0xffff;
+
+	if (tp->chip_id == COMET  &&  phy_id == 30) {
+		if (comet_miireg2offset[location])
+			return inl(ioaddr + comet_miireg2offset[location]);
+		return 0xffff;
+	}
+
+	spin_lock_irqsave(&tp->mii_lock, flags);
+	if (tp->chip_id == LC82C168) {
+		int i = 1000;
+		outl(0x60020000 + (phy_id<<23) + (location<<18), ioaddr + 0xA0);
+		inl(ioaddr + 0xA0);
+		inl(ioaddr + 0xA0);
+		while (--i > 0) {
+			barrier();
+			if ( ! ((retval = inl(ioaddr + 0xA0)) & 0x80000000))
+				break;
+		}
+		spin_unlock_irqrestore(&tp->mii_lock, flags);
+		return retval & 0xffff;
+	}
+
+	/* Establish sync by sending at least 32 logic ones. */
+	for (i = 32; i >= 0; i--) {
+		outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Shift the read command bits out. */
+	for (i = 15; i >= 0; i--) {
+		int dataval = (read_cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+
+		outl(MDIO_ENB | dataval, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Read the two transition, 16 data, and wire-idle bits. */
+	for (i = 19; i > 0; i--) {
+		outl(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		retval = (retval << 1) | ((inl(mdio_addr) & MDIO_DATA_READ) ? 1 : 0);
+		outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+
+	spin_unlock_irqrestore(&tp->mii_lock, flags);
+	return (retval>>1) & 0xffff;
+}
+
+void tulip_mdio_write(struct rtnet_device *rtdev, int phy_id, int location, int val)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int i;
+	int cmd = (0x5002 << 16) | ((phy_id & 0x1f) << 23) | (location<<18) | (val & 0xffff);
+	long ioaddr = rtdev->base_addr;
+	long mdio_addr = ioaddr + CSR9;
+	unsigned long flags;
+
+	if (location & ~0x1f)
+		return;
+
+	if (tp->chip_id == COMET && phy_id == 30) {
+		if (comet_miireg2offset[location])
+			outl(val, ioaddr + comet_miireg2offset[location]);
+		return;
+	}
+
+	spin_lock_irqsave(&tp->mii_lock, flags);
+	if (tp->chip_id == LC82C168) {
+		int i = 1000;
+		outl(cmd, ioaddr + 0xA0);
+		do {
+			barrier();
+			if ( ! (inl(ioaddr + 0xA0) & 0x80000000))
+				break;
+		} while (--i > 0);
+		spin_unlock_irqrestore(&tp->mii_lock, flags);
+		return;
+	}
+
+	/* Establish sync by sending 32 logic ones. */
+	for (i = 32; i >= 0; i--) {
+		outl(MDIO_ENB | MDIO_DATA_WRITE1, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | MDIO_DATA_WRITE1 | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Shift the command bits out. */
+	for (i = 31; i >= 0; i--) {
+		int dataval = (cmd & (1 << i)) ? MDIO_DATA_WRITE1 : 0;
+		outl(MDIO_ENB | dataval, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB | dataval | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+	/* Clear out extra bits. */
+	for (i = 2; i > 0; i--) {
+		outl(MDIO_ENB_IN, mdio_addr);
+		mdio_delay();
+		outl(MDIO_ENB_IN | MDIO_SHIFT_CLK, mdio_addr);
+		mdio_delay();
+	}
+
+	spin_unlock_irqrestore(&tp->mii_lock, flags);
+}
+
+
+/* Set up the transceiver control registers for the selected media type. */
+void tulip_select_media(struct rtnet_device *rtdev, int startup)
+{
+	long ioaddr = rtdev->base_addr;
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	struct mediatable *mtable = tp->mtable;
+	u32 new_csr6;
+	int i;
+
+	if (mtable) {
+		struct medialeaf *mleaf = &mtable->mleaf[tp->cur_index];
+		unsigned char *p = mleaf->leafdata;
+		switch (mleaf->type) {
+		case 0:					/* 21140 non-MII xcvr. */
+			if (tulip_debug > 1)
+				/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Using a 21140 non-MII transceiver"
+					   " with control setting %2.2x.\n",
+					   rtdev->name, p[1]);
+			rtdev->if_port = p[0];
+			if (startup)
+				outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+			outl(p[1], ioaddr + CSR12);
+			new_csr6 = 0x02000000 | ((p[2] & 0x71) << 18);
+			break;
+		case 2: case 4: {
+			u16 setup[5];
+			u32 csr13val, csr14val, csr15dir, csr15val;
+			for (i = 0; i < 5; i++)
+				setup[i] = get_u16(&p[i*2 + 1]);
+
+			rtdev->if_port = p[0] & MEDIA_MASK;
+			if (tulip_media_cap[rtdev->if_port] & MediaAlwaysFD)
+				tp->full_duplex = 1;
+
+			if (startup && mtable->has_reset) {
+				struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+				unsigned char *rst = rleaf->leafdata;
+				if (tulip_debug > 1)
+					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+						   rtdev->name);
+				for (i = 0; i < rst[0]; i++)
+					outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+			}
+			if (tulip_debug > 1)
+				/*RTnet*/rtdm_printk(KERN_DEBUG "%s: 21143 non-MII %s transceiver control "
+					   "%4.4x/%4.4x.\n",
+					   rtdev->name, medianame[rtdev->if_port], setup[0], setup[1]);
+			if (p[0] & 0x40) {	/* SIA (CSR13-15) setup values are provided. */
+				csr13val = setup[0];
+				csr14val = setup[1];
+				csr15dir = (setup[3]<<16) | setup[2];
+				csr15val = (setup[4]<<16) | setup[2];
+				outl(0, ioaddr + CSR13);
+				outl(csr14val, ioaddr + CSR14);
+				outl(csr15dir, ioaddr + CSR15);	/* Direction */
+				outl(csr15val, ioaddr + CSR15);	/* Data */
+				outl(csr13val, ioaddr + CSR13);
+			} else {
+				csr13val = 1;
+				csr14val = 0;
+				csr15dir = (setup[0]<<16) | 0x0008;
+				csr15val = (setup[1]<<16) | 0x0008;
+				if (rtdev->if_port <= 4)
+					csr14val = t21142_csr14[rtdev->if_port];
+				if (startup) {
+					outl(0, ioaddr + CSR13);
+					outl(csr14val, ioaddr + CSR14);
+				}
+				outl(csr15dir, ioaddr + CSR15);	/* Direction */
+				outl(csr15val, ioaddr + CSR15);	/* Data */
+				if (startup) outl(csr13val, ioaddr + CSR13);
+			}
+			if (tulip_debug > 1)
+				/*RTnet*/rtdm_printk(KERN_DEBUG "%s:  Setting CSR15 to %8.8x/%8.8x.\n",
+					   rtdev->name, csr15dir, csr15val);
+			if (mleaf->type == 4)
+				new_csr6 = 0x82020000 | ((setup[2] & 0x71) << 18);
+			else
+				new_csr6 = 0x82420000;
+			break;
+		}
+		case 1: case 3: {
+			int phy_num = p[0];
+			int init_length = p[1];
+			u16 *misc_info, tmp_info;
+
+			rtdev->if_port = 11;
+			new_csr6 = 0x020E0000;
+			if (mleaf->type == 3) {	/* 21142 */
+				u16 *init_sequence = (u16*)(p+2);
+				u16 *reset_sequence = &((u16*)(p+3))[init_length];
+				int reset_length = p[2 + init_length*2];
+				misc_info = reset_sequence + reset_length;
+				if (startup)
+					for (i = 0; i < reset_length; i++)
+						outl(get_u16(&reset_sequence[i]) << 16, ioaddr + CSR15);
+				for (i = 0; i < init_length; i++)
+					outl(get_u16(&init_sequence[i]) << 16, ioaddr + CSR15);
+			} else {
+				u8 *init_sequence = p + 2;
+				u8 *reset_sequence = p + 3 + init_length;
+				int reset_length = p[2 + init_length];
+				misc_info = (u16*)(reset_sequence + reset_length);
+				if (startup) {
+					outl(mtable->csr12dir | 0x100, ioaddr + CSR12);
+					for (i = 0; i < reset_length; i++)
+						outl(reset_sequence[i], ioaddr + CSR12);
+				}
+				for (i = 0; i < init_length; i++)
+					outl(init_sequence[i], ioaddr + CSR12);
+			}
+			tmp_info = get_u16(&misc_info[1]);
+			if (tmp_info)
+				tp->advertising[phy_num] = tmp_info | 1;
+			if (tmp_info && startup < 2) {
+				if (tp->mii_advertise == 0)
+					tp->mii_advertise = tp->advertising[phy_num];
+				if (tulip_debug > 1)
+					/*RTnet*/rtdm_printk(KERN_DEBUG "%s:  Advertising %4.4x on MII %d.\n",
+					       rtdev->name, tp->mii_advertise, tp->phys[phy_num]);
+				tulip_mdio_write(rtdev, tp->phys[phy_num], 4, tp->mii_advertise);
+			}
+			break;
+		}
+		case 5: case 6: {
+			u16 setup[5];
+
+			new_csr6 = 0; /* FIXME */
+
+			for (i = 0; i < 5; i++)
+				setup[i] = get_u16(&p[i*2 + 1]);
+
+			if (startup && mtable->has_reset) {
+				struct medialeaf *rleaf = &mtable->mleaf[mtable->has_reset];
+				unsigned char *rst = rleaf->leafdata;
+				if (tulip_debug > 1)
+					/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Resetting the transceiver.\n",
+						   rtdev->name);
+				for (i = 0; i < rst[0]; i++)
+					outl(get_u16(rst + 1 + (i<<1)) << 16, ioaddr + CSR15);
+			}
+
+			break;
+		}
+		default:
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s:  Invalid media table selection %d.\n",
+					   rtdev->name, mleaf->type);
+			new_csr6 = 0x020E0000;
+		}
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: Using media type %s, CSR12 is %2.2x.\n",
+				   rtdev->name, medianame[rtdev->if_port],
+				   inl(ioaddr + CSR12) & 0xff);
+	} else if (tp->chip_id == DC21041) {
+		int port = rtdev->if_port <= 4 ? rtdev->if_port : 0;
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: 21041 using media %s, CSR12 is %4.4x.\n",
+				   rtdev->name, medianame[port == 3 ? 12: port],
+				   inl(ioaddr + CSR12));
+		outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+		outl(t21041_csr14[port], ioaddr + CSR14);
+		outl(t21041_csr15[port], ioaddr + CSR15);
+		outl(t21041_csr13[port], ioaddr + CSR13);
+		new_csr6 = 0x80020000;
+	} else if (tp->chip_id == LC82C168) {
+		if (startup && ! tp->medialock)
+			rtdev->if_port = tp->mii_cnt ? 11 : 0;
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: PNIC PHY status is %3.3x, media %s.\n",
+				   rtdev->name, inl(ioaddr + 0xB8), medianame[rtdev->if_port]);
+		if (tp->mii_cnt) {
+			new_csr6 = 0x810C0000;
+			outl(0x0001, ioaddr + CSR15);
+			outl(0x0201B07A, ioaddr + 0xB8);
+		} else if (startup) {
+			/* Start with 10mbps to do autonegotiation. */
+			outl(0x32, ioaddr + CSR12);
+			new_csr6 = 0x00420000;
+			outl(0x0001B078, ioaddr + 0xB8);
+			outl(0x0201B078, ioaddr + 0xB8);
+		} else if (rtdev->if_port == 3  ||  rtdev->if_port == 5) {
+			outl(0x33, ioaddr + CSR12);
+			new_csr6 = 0x01860000;
+			/* Trigger autonegotiation. */
+			outl(startup ? 0x0201F868 : 0x0001F868, ioaddr + 0xB8);
+		} else {
+			outl(0x32, ioaddr + CSR12);
+			new_csr6 = 0x00420000;
+			outl(0x1F078, ioaddr + 0xB8);
+		}
+	} else if (tp->chip_id == DC21040) {					/* 21040 */
+		/* Turn on the xcvr interface. */
+		int csr12 = inl(ioaddr + CSR12);
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: 21040 media type is %s, CSR12 is %2.2x.\n",
+				   rtdev->name, medianame[rtdev->if_port], csr12);
+		if (tulip_media_cap[rtdev->if_port] & MediaAlwaysFD)
+			tp->full_duplex = 1;
+		new_csr6 = 0x20000;
+		/* Set the full duplux match frame. */
+		outl(FULL_DUPLEX_MAGIC, ioaddr + CSR11);
+		outl(0x00000000, ioaddr + CSR13); /* Reset the serial interface */
+		if (t21040_csr13[rtdev->if_port] & 8) {
+			outl(0x0705, ioaddr + CSR14);
+			outl(0x0006, ioaddr + CSR15);
+		} else {
+			outl(0xffff, ioaddr + CSR14);
+			outl(0x0000, ioaddr + CSR15);
+		}
+		outl(0x8f01 | t21040_csr13[rtdev->if_port], ioaddr + CSR13);
+	} else {					/* Unknown chip type with no media table. */
+		if (tp->default_port == 0)
+			rtdev->if_port = tp->mii_cnt ? 11 : 3;
+		if (tulip_media_cap[rtdev->if_port] & MediaIsMII) {
+			new_csr6 = 0x020E0000;
+		} else if (tulip_media_cap[rtdev->if_port] & MediaIsFx) {
+			new_csr6 = 0x02860000;
+		} else
+			new_csr6 = 0x03860000;
+		if (tulip_debug > 1)
+			/*RTnet*/rtdm_printk(KERN_DEBUG "%s: No media description table, assuming "
+				   "%s transceiver, CSR12 %2.2x.\n",
+				   rtdev->name, medianame[rtdev->if_port],
+				   inl(ioaddr + CSR12));
+	}
+
+	tp->csr6 = new_csr6 | (tp->csr6 & 0xfdff) | (tp->full_duplex ? 0x0200 : 0);
+
+	mdelay(1);
+
+	return;
+}
+
+/*
+  Check the MII negotiated duplex and change the CSR6 setting if
+  required.
+  Return 0 if everything is OK.
+  Return < 0 if the transceiver is missing or has no link beat.
+  */
+int tulip_check_duplex(struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = rtdev->priv;
+	unsigned int bmsr, lpa, negotiated, new_csr6;
+
+	bmsr = tulip_mdio_read(rtdev, tp->phys[0], MII_BMSR);
+	lpa = tulip_mdio_read(rtdev, tp->phys[0], MII_LPA);
+	if (tulip_debug > 1)
+		/*RTnet*/rtdm_printk(KERN_INFO "%s: MII status %4.4x, Link partner report "
+			   "%4.4x.\n", rtdev->name, bmsr, lpa);
+	if (bmsr == 0xffff)
+		return -2;
+	if ((bmsr & BMSR_LSTATUS) == 0) {
+		int new_bmsr = tulip_mdio_read(rtdev, tp->phys[0], MII_BMSR);
+		if ((new_bmsr & BMSR_LSTATUS) == 0) {
+			if (tulip_debug  > 1)
+				/*RTnet*/rtdm_printk(KERN_INFO "%s: No link beat on the MII interface,"
+					   " status %4.4x.\n", rtdev->name, new_bmsr);
+			return -1;
+		}
+	}
+	negotiated = lpa & tp->advertising[0];
+	tp->full_duplex = mii_duplex(tp->full_duplex_lock, negotiated);
+
+	new_csr6 = tp->csr6;
+
+	if (negotiated & LPA_100) new_csr6 &= ~TxThreshold;
+	else			  new_csr6 |= TxThreshold;
+	if (tp->full_duplex) new_csr6 |= FullDuplex;
+	else		     new_csr6 &= ~FullDuplex;
+
+	if (new_csr6 != tp->csr6) {
+		tp->csr6 = new_csr6;
+		tulip_restart_rxtx(tp);
+
+		if (tulip_debug > 0)
+			/*RTnet*/rtdm_printk(KERN_INFO "%s: Setting %s-duplex based on MII"
+				   "#%d link partner capability of %4.4x.\n",
+				   rtdev->name, tp->full_duplex ? "full" : "half",
+				   tp->phys[0], lpa);
+		return 1;
+	}
+
+	return 0;
+}
+
+void tulip_find_mii (struct rtnet_device *rtdev, int board_idx)
+{
+	struct tulip_private *tp = rtdev->priv;
+	int phyn, phy_idx = 0;
+	int mii_reg0;
+	int mii_advert;
+	unsigned int to_advert, new_bmcr, ane_switch;
+
+	/* Find the connected MII xcvrs.
+	   Doing this in open() would allow detecting external xcvrs later,
+	   but takes much time. */
+	for (phyn = 1; phyn <= 32 && phy_idx < sizeof (tp->phys); phyn++) {
+		int phy = phyn & 0x1f;
+		int mii_status = tulip_mdio_read (rtdev, phy, MII_BMSR);
+		if ((mii_status & 0x8301) == 0x8001 ||
+		    ((mii_status & BMSR_100BASE4) == 0
+		     && (mii_status & 0x7800) != 0)) {
+			/* preserve Becker logic, gain indentation level */
+		} else {
+			continue;
+		}
+
+		mii_reg0 = tulip_mdio_read (rtdev, phy, MII_BMCR);
+		mii_advert = tulip_mdio_read (rtdev, phy, MII_ADVERTISE);
+		ane_switch = 0;
+
+		/* if not advertising at all, gen an
+		 * advertising value from the capability
+		 * bits in BMSR
+		 */
+		if ((mii_advert & ADVERTISE_ALL) == 0) {
+			unsigned int tmpadv = tulip_mdio_read (rtdev, phy, MII_BMSR);
+			mii_advert = ((tmpadv >> 6) & 0x3e0) | 1;
+		}
+
+		if (tp->mii_advertise) {
+			tp->advertising[phy_idx] =
+			to_advert = tp->mii_advertise;
+		} else if (tp->advertising[phy_idx]) {
+			to_advert = tp->advertising[phy_idx];
+		} else {
+			tp->advertising[phy_idx] =
+			tp->mii_advertise =
+			to_advert = mii_advert;
+		}
+
+		tp->phys[phy_idx++] = phy;
+
+		/*RTnet*/rtdm_printk(KERN_INFO "tulip%d:  MII transceiver #%d "
+			"config %4.4x status %4.4x advertising %4.4x.\n",
+			board_idx, phy, mii_reg0, mii_status, mii_advert);
+
+		/* Fixup for DLink with miswired PHY. */
+		if (mii_advert != to_advert) {
+			/*RTnet*/rtdm_printk(KERN_DEBUG "tulip%d:  Advertising %4.4x on PHY %d,"
+				" previously advertising %4.4x.\n",
+				board_idx, to_advert, phy, mii_advert);
+			tulip_mdio_write (rtdev, phy, 4, to_advert);
+		}
+
+		/* Enable autonegotiation: some boards default to off. */
+		if (tp->default_port == 0) {
+			new_bmcr = mii_reg0 | BMCR_ANENABLE;
+			if (new_bmcr != mii_reg0) {
+				new_bmcr |= BMCR_ANRESTART;
+				ane_switch = 1;
+			}
+		}
+		/* ...or disable nway, if forcing media */
+		else {
+			new_bmcr = mii_reg0 & ~BMCR_ANENABLE;
+			if (new_bmcr != mii_reg0)
+				ane_switch = 1;
+		}
+
+		/* clear out bits we never want at this point */
+		new_bmcr &= ~(BMCR_CTST | BMCR_FULLDPLX | BMCR_ISOLATE |
+			      BMCR_PDOWN | BMCR_SPEED100 | BMCR_LOOPBACK |
+			      BMCR_RESET);
+
+		if (tp->full_duplex)
+			new_bmcr |= BMCR_FULLDPLX;
+		if (tulip_media_cap[tp->default_port] & MediaIs100)
+			new_bmcr |= BMCR_SPEED100;
+
+		if (new_bmcr != mii_reg0) {
+			/* some phys need the ANE switch to
+			 * happen before forced media settings
+			 * will "take."  However, we write the
+			 * same value twice in order not to
+			 * confuse the sane phys.
+			 */
+			if (ane_switch) {
+				tulip_mdio_write (rtdev, phy, MII_BMCR, new_bmcr);
+				udelay (10);
+			}
+			tulip_mdio_write (rtdev, phy, MII_BMCR, new_bmcr);
+		}
+	}
+	tp->mii_cnt = phy_idx;
+	if (tp->mtable && tp->mtable->has_mii && phy_idx == 0) {
+		/*RTnet*/rtdm_printk(KERN_INFO "tulip%d: ***WARNING***: No MII transceiver found!\n",
+			board_idx);
+		tp->phys[0] = 1;
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic.c
new file mode 100644
index 0000000..7e57d75
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic.c
@@ -0,0 +1,53 @@
+/*
+	drivers/net/tulip/pnic.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#include <linux/kernel.h>
+#include "tulip.h"
+
+
+void pnic_do_nway(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	u32 phy_reg = inl(ioaddr + 0xB8);
+	u32 new_csr6 = tp->csr6 & ~0x40C40200;
+
+	if (phy_reg & 0x78000000) { /* Ignore baseT4 */
+		if (phy_reg & 0x20000000)		rtdev->if_port = 5;
+		else if (phy_reg & 0x40000000)	rtdev->if_port = 3;
+		else if (phy_reg & 0x10000000)	rtdev->if_port = 4;
+		else if (phy_reg & 0x08000000)	rtdev->if_port = 0;
+		tp->nwayset = 1;
+		new_csr6 = (rtdev->if_port & 1) ? 0x01860000 : 0x00420000;
+		outl(0x32 | (rtdev->if_port & 1), ioaddr + CSR12);
+		if (rtdev->if_port & 1)
+			outl(0x1F868, ioaddr + 0xB8);
+		if (phy_reg & 0x30000000) {
+			tp->full_duplex = 1;
+			new_csr6 |= 0x00000200;
+		}
+		if (tulip_debug > 1)
+			/*RTnet*/printk(KERN_DEBUG "%s: PNIC autonegotiated status %8.8x, %s.\n",
+				   rtdev->name, phy_reg, medianame[rtdev->if_port]);
+		if (tp->csr6 != new_csr6) {
+			tp->csr6 = new_csr6;
+			/* Restart Tx */
+			tulip_restart_rxtx(tp);
+		}
+	}
+}
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic2.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic2.c
new file mode 100644
index 0000000..e08f1ad
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/pnic2.c
@@ -0,0 +1,158 @@
+/*
+	drivers/net/tulip/pnic2.c
+
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000,2001  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+        Modified to hep support PNIC_II by Kevin B. Hendricks
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+
+/* Understanding the PNIC_II - everything is this file is based
+ * on the PNIC_II_PDF datasheet which is sorely lacking in detail
+ *
+ * As I understand things, here are the registers and bits that
+ * explain the masks and constants used in this file that are
+ * either different from the 21142/3 or important for basic operation.
+ *
+ *
+ * CSR 6  (mask = 0xfe3bd1fd of bits not to change)
+ * -----
+ * Bit 24    - SCR
+ * Bit 23    - PCS
+ * Bit 22    - TTM (Trasmit Threshold Mode)
+ * Bit 18    - Port Select
+ * Bit 13    - Start - 1, Stop - 0 Transmissions
+ * Bit 11:10 - Loop Back Operation Mode
+ * Bit 9     - Full Duplex mode (Advertise 10BaseT-FD is CSR14<7> is set)
+ * Bit 1     - Start - 1, Stop - 0 Receive
+ *
+ *
+ * CSR 14  (mask = 0xfff0ee39 of bits not to change)
+ * ------
+ * Bit 19    - PAUSE-Pause
+ * Bit 18    - Advertise T4
+ * Bit 17    - Advertise 100baseTx-FD
+ * Bit 16    - Advertise 100baseTx-HD
+ * Bit 12    - LTE - Link Test Enable
+ * Bit 7     - ANE - Auto Negotiate Enable
+ * Bit 6     - HDE - Advertise 10baseT-HD
+ * Bit 2     - Reset to Power down - kept as 1 for normal operation
+ * Bit 1     -  Loop Back enable for 10baseT MCC
+ *
+ *
+ * CSR 12
+ * ------
+ * Bit 25    - Partner can do T4
+ * Bit 24    - Partner can do 100baseTx-FD
+ * Bit 23    - Partner can do 100baseTx-HD
+ * Bit 22    - Partner can do 10baseT-FD
+ * Bit 21    - Partner can do 10baseT-HD
+ * Bit 15    - LPN is 1 if all above bits are valid other wise 0
+ * Bit 14:12 - autonegotiation state (write 001 to start autonegotiate)
+ * Bit 3     - Autopolarity state
+ * Bit 2     - LS10B - link state of 10baseT 0 - good, 1 - failed
+ * Bit 1     - LS100B - link state of 100baseT 0 - good, 1- faild
+ *
+ *
+ * Data Port Selection Info
+ *-------------------------
+ *
+ * CSR14<7>   CSR6<18>    CSR6<22>    CSR6<23>    CSR6<24>   MODE/PORT
+ *   1           0           0 (X)       0 (X)       1        NWAY
+ *   0           0           1           0 (X)       0        10baseT
+ *   0           1           0           1           1 (X)    100baseT
+ *
+ *
+ */
+
+
+
+#include "tulip.h"
+#include <linux/pci.h>
+#include <linux/delay.h>
+
+
+void pnic2_start_nway(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+        int csr14;
+        int csr12;
+
+        /* set up what to advertise during the negotiation */
+
+        /* load in csr14  and mask off bits not to touch
+         * comment at top of file explains mask value
+         */
+	csr14 = (inl(ioaddr + CSR14) & 0xfff0ee39);
+
+        /* bit 17 - advetise 100baseTx-FD */
+        if (tp->sym_advertise & 0x0100) csr14 |= 0x00020000;
+
+        /* bit 16 - advertise 100baseTx-HD */
+        if (tp->sym_advertise & 0x0080) csr14 |= 0x00010000;
+
+        /* bit 6 - advertise 10baseT-HD */
+        if (tp->sym_advertise & 0x0020) csr14 |= 0x00000040;
+
+        /* Now set bit 12 Link Test Enable, Bit 7 Autonegotiation Enable
+         * and bit 0 Don't PowerDown 10baseT
+         */
+        csr14 |= 0x00001184;
+
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: Restarting PNIC2 autonegotiation, "
+                      "csr14=%8.8x.\n", rtdev->name, csr14);
+
+        /* tell pnic2_lnk_change we are doing an nway negotiation */
+	rtdev->if_port = 0;
+	tp->nway = tp->mediasense = 1;
+	tp->nwayset = tp->lpar = 0;
+
+        /* now we have to set up csr6 for NWAY state */
+
+	tp->csr6 = inl(ioaddr + CSR6);
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: On Entry to Nway, "
+                      "csr6=%8.8x.\n", rtdev->name, tp->csr6);
+
+        /* mask off any bits not to touch
+         * comment at top of file explains mask value
+         */
+	tp->csr6 = tp->csr6 & 0xfe3bd1fd;
+
+        /* don't forget that bit 9 is also used for advertising */
+        /* advertise 10baseT-FD for the negotiation (bit 9) */
+        if (tp->sym_advertise & 0x0040) tp->csr6 |= 0x00000200;
+
+        /* set bit 24 for nway negotiation mode ...
+         * see Data Port Selection comment at top of file
+         * and "Stop" - reset both Transmit (bit 13) and Receive (bit 1)
+         */
+        tp->csr6 |= 0x01000000;
+	outl(csr14, ioaddr + CSR14);
+	outl(tp->csr6, ioaddr + CSR6);
+        udelay(100);
+
+        /* all set up so now force the negotiation to begin */
+
+        /* read in current values and mask off all but the
+	 * Autonegotiation bits 14:12.  Writing a 001 to those bits
+         * should start the autonegotiation
+         */
+        csr12 = (inl(ioaddr + CSR12) & 0xffff8fff);
+        csr12 |= 0x1000;
+	outl(csr12, ioaddr + CSR12);
+}
+
+
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip.h
new file mode 100644
index 0000000..2bca6fe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip.h
@@ -0,0 +1,490 @@
+/*
+        drivers/net/tulip/tulip.h
+
+        Copyright 2000,2001  The Linux Kernel Team
+        Written/copyright 1994-2001 by Donald Becker.
+
+        This software may be used and distributed according to the terms
+        of the GNU General Public License, incorporated herein by reference.
+
+        Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+        for more information on this driver, or visit the project
+        Web page at http://sourceforge.net/projects/tulip/
+
+*/
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#ifndef __NET_TULIP_H__
+#define __NET_TULIP_H__
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/spinlock.h>
+#include <linux/netdevice.h>
+#include <linux/timer.h>
+#include <linux/delay.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+#include <rtnet_port.h>
+
+
+
+/* undefine, or define to various debugging levels (>4 == obscene levels) */
+#define TULIP_DEBUG 1
+
+/* undefine USE_IO_OPS for MMIO, define for PIO */
+#ifdef CONFIG_TULIP_MMIO
+# undef USE_IO_OPS
+#else
+# define USE_IO_OPS 1
+#endif
+
+
+
+struct tulip_chip_table {
+        char *chip_name;
+        unsigned int io_size;
+        int valid_intrs;	/* CSR7 interrupt enable settings */
+        int flags;
+};
+
+
+enum tbl_flag {
+        HAS_MII			= 0x0001,
+        HAS_MEDIA_TABLE		= 0x0002,
+        CSR12_IN_SROM		= 0x0004,
+        ALWAYS_CHECK_MII	= 0x0008,
+        HAS_ACPI		= 0x0010,
+        MC_HASH_ONLY		= 0x0020, /* Hash-only multicast filter. */
+        HAS_PNICNWAY		= 0x0080,
+        HAS_NWAY		= 0x0040, /* Uses internal NWay xcvr. */
+        HAS_INTR_MITIGATION	= 0x0100,
+        IS_ASIX			= 0x0200,
+        HAS_8023X		= 0x0400,
+        COMET_MAC_ADDR		= 0x0800,
+        HAS_PCI_MWI		= 0x1000,
+};
+
+
+/* chip types.  careful!  order is VERY IMPORTANT here, as these
+ * are used throughout the driver as indices into arrays */
+/* Note 21142 == 21143. */
+enum chips {
+        DC21040 = 0,
+        DC21041 = 1,
+        DC21140 = 2,
+        DC21142 = 3, DC21143 = 3,
+        LC82C168,
+        MX98713,
+        MX98715,
+        MX98725,
+        AX88140,
+        PNIC2,
+        COMET,
+        COMPEX9881,
+        I21145,
+        DM910X,
+};
+
+
+enum MediaIs {
+        MediaIsFD = 1,
+        MediaAlwaysFD = 2,
+        MediaIsMII = 4,
+        MediaIsFx = 8,
+        MediaIs100 = 16
+};
+
+
+/* Offsets to the Command and Status Registers, "CSRs".  All accesses
+   must be longword instructions and quadword aligned. */
+enum tulip_offsets {
+        CSR0 = 0,
+        CSR1 = 0x08,
+        CSR2 = 0x10,
+        CSR3 = 0x18,
+        CSR4 = 0x20,
+        CSR5 = 0x28,
+        CSR6 = 0x30,
+        CSR7 = 0x38,
+        CSR8 = 0x40,
+        CSR9 = 0x48,
+        CSR10 = 0x50,
+        CSR11 = 0x58,
+        CSR12 = 0x60,
+        CSR13 = 0x68,
+        CSR14 = 0x70,
+        CSR15 = 0x78,
+};
+
+/* register offset and bits for CFDD PCI config reg */
+enum pci_cfg_driver_reg {
+        CFDD = 0x40,
+        CFDD_Sleep = (1 << 31),
+        CFDD_Snooze = (1 << 30),
+};
+
+
+/* The bits in the CSR5 status registers, mostly interrupt sources. */
+enum status_bits {
+        TimerInt = 0x800,
+        SytemError = 0x2000,
+        TPLnkFail = 0x1000,
+        TPLnkPass = 0x10,
+        NormalIntr = 0x10000,
+        AbnormalIntr = 0x8000,
+        RxJabber = 0x200,
+        RxDied = 0x100,
+        RxNoBuf = 0x80,
+        RxIntr = 0x40,
+        TxFIFOUnderflow = 0x20,
+        TxJabber = 0x08,
+        TxNoBuf = 0x04,
+        TxDied = 0x02,
+        TxIntr = 0x01,
+};
+
+
+enum tulip_mode_bits {
+        TxThreshold		= (1 << 22),
+        FullDuplex		= (1 << 9),
+        TxOn			= 0x2000,
+        AcceptBroadcast		= 0x0100,
+        AcceptAllMulticast	= 0x0080,
+        AcceptAllPhys		= 0x0040,
+        AcceptRunt		= 0x0008,
+        RxOn			= 0x0002,
+        RxTx			= (TxOn | RxOn),
+};
+
+
+enum tulip_busconfig_bits {
+        MWI			= (1 << 24),
+        MRL			= (1 << 23),
+        MRM			= (1 << 21),
+        CALShift		= 14,
+        BurstLenShift		= 8,
+};
+
+
+/* The Tulip Rx and Tx buffer descriptors. */
+struct tulip_rx_desc {
+        s32 status;
+        s32 length;
+        u32 buffer1;
+        u32 buffer2;
+};
+
+
+struct tulip_tx_desc {
+        s32 status;
+        s32 length;
+        u32 buffer1;
+        u32 buffer2;		/* We use only buffer 1.  */
+};
+
+
+enum desc_status_bits {
+        DescOwned = 0x80000000,
+        RxDescFatalErr = 0x8000,
+        RxWholePkt = 0x0300,
+};
+
+
+enum t21041_csr13_bits {
+        csr13_eng = (0xEF0<<4), /* for eng. purposes only, hardcode at EF0h */
+        csr13_aui = (1<<3), /* clear to force 10bT, set to force AUI/BNC */
+        csr13_cac = (1<<2), /* CSR13/14/15 autoconfiguration */
+        csr13_srl = (1<<0), /* When reset, resets all SIA functions, machines */
+
+        csr13_mask_auibnc = (csr13_eng | csr13_aui | csr13_srl),
+        csr13_mask_10bt = (csr13_eng | csr13_srl),
+};
+
+enum t21143_csr6_bits {
+        csr6_sc = (1<<31),
+        csr6_ra = (1<<30),
+        csr6_ign_dest_msb = (1<<26),
+        csr6_mbo = (1<<25),
+        csr6_scr = (1<<24),  /* scramble mode flag: can't be set */
+        csr6_pcs = (1<<23),  /* Enables PCS functions (symbol mode requires csr6_ps be set) default is set */
+        csr6_ttm = (1<<22),  /* Transmit Threshold Mode, set for 10baseT, 0 for 100BaseTX */
+        csr6_sf = (1<<21),   /* Store and forward. If set ignores TR bits */
+        csr6_hbd = (1<<19),  /* Heart beat disable. Disables SQE function in 10baseT */
+        csr6_ps = (1<<18),   /* Port Select. 0 (defualt) = 10baseT, 1 = 100baseTX: can't be set */
+        csr6_ca = (1<<17),   /* Collision Offset Enable. If set uses special algorithm in low collision situations */
+        csr6_trh = (1<<15),  /* Transmit Threshold high bit */
+        csr6_trl = (1<<14),  /* Transmit Threshold low bit */
+
+        /***************************************************************
+         * This table shows transmit threshold values based on media   *
+         * and these two registers (from PNIC1 & 2 docs) Note: this is *
+         * all meaningless if sf is set.                               *
+         ***************************************************************/
+
+        /***********************************
+         * (trh,trl) * 100BaseTX * 10BaseT *
+         ***********************************
+         *   (0,0)   *     128   *    72   *
+         *   (0,1)   *     256   *    96   *
+         *   (1,0)   *     512   *   128   *
+         *   (1,1)   *    1024   *   160   *
+         ***********************************/
+
+        csr6_fc = (1<<12),   /* Forces a collision in next transmission (for testing in loopback mode) */
+        csr6_om_int_loop = (1<<10), /* internal (FIFO) loopback flag */
+        csr6_om_ext_loop = (1<<11), /* external (PMD) loopback flag */
+        /* set both and you get (PHY) loopback */
+        csr6_fd = (1<<9),    /* Full duplex mode, disables hearbeat, no loopback */
+        csr6_pm = (1<<7),    /* Pass All Multicast */
+        csr6_pr = (1<<6),    /* Promiscuous mode */
+        csr6_sb = (1<<5),    /* Start(1)/Stop(0) backoff counter */
+        csr6_if = (1<<4),    /* Inverse Filtering, rejects only addresses in address table: can't be set */
+        csr6_pb = (1<<3),    /* Pass Bad Frames, (1) causes even bad frames to be passed on */
+        csr6_ho = (1<<2),    /* Hash-only filtering mode: can't be set */
+        csr6_hp = (1<<0),    /* Hash/Perfect Receive Filtering Mode: can't be set */
+
+        csr6_mask_capture = (csr6_sc | csr6_ca),
+        csr6_mask_defstate = (csr6_mask_capture | csr6_mbo),
+        csr6_mask_hdcap = (csr6_mask_defstate | csr6_hbd | csr6_ps),
+        csr6_mask_hdcaptt = (csr6_mask_hdcap  | csr6_trh | csr6_trl),
+        csr6_mask_fullcap = (csr6_mask_hdcaptt | csr6_fd),
+        csr6_mask_fullpromisc = (csr6_pr | csr6_pm),
+        csr6_mask_filters = (csr6_hp | csr6_ho | csr6_if),
+        csr6_mask_100bt = (csr6_scr | csr6_pcs | csr6_hbd),
+};
+
+
+/* Keep the ring sizes a power of two for efficiency.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE	16
+#define RX_RING_SIZE	8 /* RTnet: RX_RING_SIZE*2 rtskbs will be preallocated */
+
+#define MEDIA_MASK     31
+
+#define PKT_BUF_SZ		1536	/* Size of each temporary Rx buffer. */
+
+#define TULIP_MIN_CACHE_LINE	8	/* in units of 32-bit words */
+
+#if defined(__sparc__) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+#define TULIP_MAX_CACHE_LINE	16	/* in units of 32-bit words */
+#else
+#define TULIP_MAX_CACHE_LINE	32	/* in units of 32-bit words */
+#endif
+
+
+/* Ring-wrap flag in length field, use for last ring entry.
+        0x01000000 means chain on buffer2 address,
+        0x02000000 means use the ring start address in CSR2/3.
+   Note: Some work-alike chips do not function correctly in chained mode.
+   The ASIX chip works only in chained mode.
+   Thus we indicates ring mode, but always write the 'next' field for
+   chained mode as well.
+*/
+#define DESC_RING_WRAP 0x02000000
+
+
+#define EEPROM_SIZE 128         /* 2 << EEPROM_ADDRLEN */
+
+
+#define RUN_AT(x) (jiffies + (x))
+
+#if defined(__i386__)			/* AKA get_unaligned() */
+#define get_u16(ptr) (*(u16 *)(ptr))
+#else
+#define get_u16(ptr) (((u8*)(ptr))[0] + (((u8*)(ptr))[1]<<8))
+#endif
+
+struct medialeaf {
+        u8 type;
+        u8 media;
+        unsigned char *leafdata;
+};
+
+
+struct mediatable {
+        u16 defaultmedia;
+        u8 leafcount;
+        u8 csr12dir;		/* General purpose pin directions. */
+        unsigned has_mii:1;
+        unsigned has_nonmii:1;
+        unsigned has_reset:6;
+        u32 csr15dir;
+        u32 csr15val;		/* 21143 NWay setting. */
+        struct medialeaf mleaf[0];
+};
+
+
+struct mediainfo {
+        struct mediainfo *next;
+        int info_type;
+        int index;
+        unsigned char *info;
+};
+
+struct ring_info {
+        struct /*RTnet*/rtskb	*skb;
+        dma_addr_t	mapping;
+};
+
+
+struct tulip_private {
+        const char *product_name;
+        /*RTnet*/struct rtnet_device *next_module;
+        struct tulip_rx_desc *rx_ring;
+        struct tulip_tx_desc *tx_ring;
+        dma_addr_t rx_ring_dma;
+        dma_addr_t tx_ring_dma;
+        /* The saved address of a sent-in-place packet/buffer, for skfree(). */
+        struct ring_info tx_buffers[TX_RING_SIZE];
+        /* The addresses of receive-in-place skbuffs. */
+        struct ring_info rx_buffers[RX_RING_SIZE];
+        u16 setup_frame[96];	/* Pseudo-Tx frame to init address table. */
+        int chip_id;
+        int revision;
+        int flags;
+        struct net_device_stats stats;
+        u32 mc_filter[2];
+        /*RTnet*/rtdm_lock_t lock;
+        spinlock_t mii_lock;
+        unsigned int cur_rx, cur_tx;	/* The next free ring entry */
+        unsigned int dirty_rx, dirty_tx;	/* The ring entries to be free()ed. */
+
+#ifdef CONFIG_NET_HW_FLOWCONTROL
+#define RX_A_NBF_STOP 0xffffff3f /* To disable RX and RX-NOBUF ints. */
+        int fc_bit;
+        int mit_sel;
+        int mit_change; /* Signal for Interrupt Mitigtion */
+#endif
+        unsigned int full_duplex:1;	/* Full-duplex operation requested. */
+        unsigned int full_duplex_lock:1;
+        unsigned int fake_addr:1;	/* Multiport board faked address. */
+        unsigned int default_port:4;	/* Last dev->if_port value. */
+        unsigned int media2:4;	/* Secondary monitored media port. */
+        unsigned int medialock:1;	/* Don't sense media type. */
+        unsigned int mediasense:1;	/* Media sensing in progress. */
+        unsigned int nway:1, nwayset:1;		/* 21143 internal NWay. */
+        unsigned int csr0;	/* CSR0 setting. */
+        unsigned int csr6;	/* Current CSR6 control settings. */
+        unsigned char eeprom[EEPROM_SIZE];	/* Serial EEPROM contents. */
+        void (*link_change) (/*RTnet*/struct rtnet_device *rtdev, int csr5);
+        u16 sym_advertise, mii_advertise; /* NWay capabilities advertised.  */
+        u16 lpar;		/* 21143 Link partner ability. */
+        u16 advertising[4];
+        signed char phys[4], mii_cnt;	/* MII device addresses. */
+        struct mediatable *mtable;
+        int cur_index;		/* Current media index. */
+        int saved_if_port;
+        struct pci_dev *pdev;
+        int ttimer;
+        int susp_rx;
+        unsigned long nir;
+        unsigned long base_addr;
+        int pad0, pad1;		/* Used for 8-byte alignment */
+        rtdm_irq_t irq_handle;
+};
+
+
+struct eeprom_fixup {
+        char *name;
+        unsigned char addr0;
+        unsigned char addr1;
+        unsigned char addr2;
+        u16 newtable[32];	/* Max length below. */
+};
+
+
+/* 21142.c */
+extern u16 t21142_csr14[];
+void t21142_start_nway(/*RTnet*/struct rtnet_device *rtdev);
+void t21142_lnk_change(/*RTnet*/struct rtnet_device *rtdev, int csr5);
+
+
+/* PNIC2.c */
+void pnic2_lnk_change(/*RTnet*/struct rtnet_device *rtdev, int csr5);
+void pnic2_start_nway(/*RTnet*/struct rtnet_device *rtdev);
+void pnic2_lnk_change(/*RTnet*/struct rtnet_device *rtdev, int csr5);
+
+/* eeprom.c */
+void tulip_parse_eeprom(struct rtnet_device *rtdev);
+int tulip_read_eeprom(long ioaddr, int location, int addr_len);
+
+/* interrupt.c */
+extern unsigned int tulip_max_interrupt_work;
+extern int tulip_rx_copybreak;
+int tulip_interrupt(rtdm_irq_t *irq_handle);
+int tulip_refill_rx(/*RTnet*/struct rtnet_device *rtdev);
+
+/* media.c */
+int tulip_mdio_read(struct rtnet_device *dev, int phy_id, int location);
+void tulip_mdio_write(struct rtnet_device *dev, int phy_id, int location, int value);
+void tulip_select_media(struct rtnet_device *dev, int startup);
+int tulip_check_duplex(struct rtnet_device *dev);
+void tulip_find_mii (struct rtnet_device *dev, int board_idx);
+
+/* pnic.c */
+void pnic_do_nway(/*RTnet*/struct rtnet_device *rtdev);
+void pnic_lnk_change(/*RTnet*/struct rtnet_device *rtdev, int csr5);
+
+/* tulip_core.c */
+extern int tulip_debug;
+extern const char * const medianame[];
+extern const char tulip_media_cap[];
+extern struct tulip_chip_table tulip_tbl[];
+extern u8 t21040_csr13[];
+extern u16 t21041_csr13[];
+extern u16 t21041_csr14[];
+extern u16 t21041_csr15[];
+
+#ifndef USE_IO_OPS
+#undef inb
+#undef inw
+#undef inl
+#undef outb
+#undef outw
+#undef outl
+#define inb(addr) readb((void*)(addr))
+#define inw(addr) readw((void*)(addr))
+#define inl(addr) readl((void*)(addr))
+#define outb(val,addr) writeb((val), (void*)(addr))
+#define outw(val,addr) writew((val), (void*)(addr))
+#define outl(val,addr) writel((val), (void*)(addr))
+#endif /* !USE_IO_OPS */
+
+
+
+static inline void tulip_start_rxtx(struct tulip_private *tp)
+{
+        long ioaddr = tp->base_addr;
+        outl(tp->csr6 | RxTx, ioaddr + CSR6);
+        barrier();
+        (void) inl(ioaddr + CSR6); /* mmio sync */
+}
+
+static inline void tulip_stop_rxtx(struct tulip_private *tp)
+{
+        long ioaddr = tp->base_addr;
+        u32 csr6 = inl(ioaddr + CSR6);
+
+        if (csr6 & RxTx) {
+                outl(csr6 & ~RxTx, ioaddr + CSR6);
+                barrier();
+                (void) inl(ioaddr + CSR6); /* mmio sync */
+        }
+}
+
+static inline void tulip_restart_rxtx(struct tulip_private *tp)
+{
+        tulip_stop_rxtx(tp);
+        rtdm_task_busy_sleep(5);
+        tulip_start_rxtx(tp);
+}
+
+#endif /* __NET_TULIP_H__ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip_core.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip_core.c
new file mode 100644
index 0000000..3d0b3db
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/tulip/tulip_core.c
@@ -0,0 +1,1406 @@
+/* tulip_core.c: A DEC 21x4x-family ethernet driver for Linux. */
+
+/*
+	Maintained by Jeff Garzik <jgarzik@mandrakesoft.com>
+	Copyright 2000-2002  The Linux Kernel Team
+	Written/copyright 1994-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms
+	of the GNU General Public License, incorporated herein by reference.
+
+	Please refer to Documentation/DocBook/tulip.{pdf,ps,html}
+	for more information on this driver, or visit the project
+	Web page at http://sourceforge.net/projects/tulip/
+
+*/
+
+/* Ported to RTnet by Wittawat Yamwong <wittawat@web.de> */
+
+#define DRV_NAME	"tulip-rt"
+#define DRV_VERSION	"0.9.15-pre11-rt"
+#define DRV_RELDATE	"May 11, 2002"
+
+#include <linux/module.h>
+#include "tulip.h"
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/etherdevice.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/uaccess.h>
+#include <asm/unaligned.h>
+
+#ifdef __sparc__
+#include <asm/pbm.h>
+#endif
+
+#include <rtnet_port.h>
+
+static char version[] =
+	"Linux Tulip driver version " DRV_VERSION " (" DRV_RELDATE ")\n";
+
+
+/* A few user-configurable values. */
+
+/* Maximum events (Rx packets, etc.) to handle at each interrupt. */
+static unsigned int max_interrupt_work = 25;
+
+#define MAX_UNITS 8
+/* Used to pass the full-duplex flag, etc. */
+static int full_duplex[MAX_UNITS];
+static int options[MAX_UNITS];
+static int mtu[MAX_UNITS];			/* Jumbo MTU for interfaces. */
+
+/*  The possible media types that can be set in options[] are: */
+const char * const medianame[32] = {
+	"10baseT", "10base2", "AUI", "100baseTx",
+	"10baseT-FDX", "100baseTx-FDX", "100baseT4", "100baseFx",
+	"100baseFx-FDX", "MII 10baseT", "MII 10baseT-FDX", "MII",
+	"10baseT(forced)", "MII 100baseTx", "MII 100baseTx-FDX", "MII 100baseT4",
+	"MII 100baseFx-HDX", "MII 100baseFx-FDX", "Home-PNA 1Mbps", "Invalid-19",
+	"","","","", "","","","",  "","","","Transceiver reset",
+};
+
+/* Set the copy breakpoint for the copy-only-tiny-buffer Rx structure. */
+#if defined(__alpha__) || defined(__arm__) || defined(__hppa__) \
+	|| defined(__sparc_) || defined(__ia64__) \
+	|| defined(__sh__) || defined(__mips__)
+static int rx_copybreak = 1518;
+#else
+static int rx_copybreak = 100;
+#endif
+
+/*
+  Set the bus performance register.
+	Typical: Set 16 longword cache alignment, no burst limit.
+	Cache alignment bits 15:14           Burst length 13:8
+		0000	No alignment  0x00000000 unlimited		0800 8 longwords
+		4000	8  longwords		0100 1 longword		1000 16 longwords
+		8000	16 longwords		0200 2 longwords	2000 32 longwords
+		C000	32  longwords		0400 4 longwords
+	Warning: many older 486 systems are broken and require setting 0x00A04800
+	   8 longword cache alignment, 8 longword burst.
+	ToDo: Non-Intel setting could be better.
+*/
+
+#if defined(__alpha__) || defined(__ia64__) || defined(__x86_64__)
+static int csr0 = 0x01A00000 | 0xE000;
+#elif defined(__i386__) || defined(__powerpc__)
+static int csr0 = 0x01A00000 | 0x8000;
+#elif defined(__sparc__) || defined(__hppa__)
+/* The UltraSparc PCI controllers will disconnect at every 64-byte
+ * crossing anyways so it makes no sense to tell Tulip to burst
+ * any more than that.
+ */
+static int csr0 = 0x01A00000 | 0x9000;
+#elif defined(__arm__) || defined(__sh__)
+static int csr0 = 0x01A00000 | 0x4800;
+#elif defined(__mips__)
+static int csr0 = 0x00200000 | 0x4000;
+#else
+#warning Processor architecture undefined!
+static int csr0 = 0x00A00000 | 0x4800;
+#endif
+
+/* Operational parameters that usually are not changed. */
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (4*HZ)
+
+
+MODULE_AUTHOR("The Linux Kernel Team");
+MODULE_DESCRIPTION("Digital 21*4* Tulip ethernet driver");
+MODULE_LICENSE("GPL");
+module_param(tulip_debug, int, 0444);
+module_param(max_interrupt_work, int, 0444);
+/*MODULE_PARM(rx_copybreak, "i");*/
+module_param(csr0, int, 0444);
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+
+#define PFX DRV_NAME ": "
+
+#ifdef TULIP_DEBUG
+int tulip_debug = TULIP_DEBUG;
+#else
+int tulip_debug = 1;
+#endif
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+
+
+
+/*
+ * This table use during operation for capabilities and media timer.
+ *
+ * It is indexed via the values in 'enum chips'
+ */
+
+struct tulip_chip_table tulip_tbl[] = {
+  /* DC21040 */
+  { "Digital DC21040 Tulip", 128, 0x0001ebef, 0 },
+
+  /* DC21041 */
+  { "Digital DC21041 Tulip", 128, 0x0001ebef,
+	HAS_MEDIA_TABLE | HAS_NWAY },
+
+  /* DC21140 */
+  { "Digital DS21140 Tulip", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_PCI_MWI },
+
+  /* DC21142, DC21143 */
+  { "Digital DS21143 Tulip", 128, 0x0801fbff,
+	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI | HAS_NWAY
+	| HAS_INTR_MITIGATION | HAS_PCI_MWI },
+
+  /* LC82C168 */
+  { "Lite-On 82c168 PNIC", 256, 0x0001fbef,
+	HAS_MII | HAS_PNICNWAY },
+
+  /* MX98713 */
+  { "Macronix 98713 PMAC", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM },
+
+  /* MX98715 */
+  { "Macronix 98715 PMAC", 256, 0x0001ebef,
+	HAS_MEDIA_TABLE },
+
+  /* MX98725 */
+  { "Macronix 98725 PMAC", 256, 0x0001ebef,
+	HAS_MEDIA_TABLE },
+
+  /* AX88140 */
+  { "ASIX AX88140", 128, 0x0001fbff,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | MC_HASH_ONLY | IS_ASIX },
+
+  /* PNIC2 */
+  { "Lite-On PNIC-II", 256, 0x0801fbff,
+	HAS_MII | HAS_NWAY | HAS_8023X | HAS_PCI_MWI },
+
+  /* COMET */
+  { "ADMtek Comet", 256, 0x0001abef,
+	MC_HASH_ONLY | COMET_MAC_ADDR },
+
+  /* COMPEX9881 */
+  { "Compex 9881 PMAC", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM },
+
+  /* I21145 */
+  { "Intel DS21145 Tulip", 128, 0x0801fbff,
+	HAS_MII | HAS_MEDIA_TABLE | ALWAYS_CHECK_MII | HAS_ACPI
+	| HAS_NWAY | HAS_PCI_MWI },
+
+  /* DM910X */
+  { "Davicom DM9102/DM9102A", 128, 0x0001ebef,
+	HAS_MII | HAS_MEDIA_TABLE | CSR12_IN_SROM | HAS_ACPI },
+};
+
+
+static struct pci_device_id tulip_pci_tbl[] = {
+	{ 0x1011, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21040 },
+	{ 0x1011, 0x0014, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21041 },
+	{ 0x1011, 0x0009, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21140 },
+	{ 0x1011, 0x0019, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DC21143 },
+	{ 0x11AD, 0x0002, PCI_ANY_ID, PCI_ANY_ID, 0, 0, LC82C168 },
+	{ 0x10d9, 0x0512, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98713 },
+	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+/*	{ 0x10d9, 0x0531, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98725 },*/
+	{ 0x125B, 0x1400, PCI_ANY_ID, PCI_ANY_ID, 0, 0, AX88140 },
+	{ 0x11AD, 0xc115, PCI_ANY_ID, PCI_ANY_ID, 0, 0, PNIC2 },
+	{ 0x1317, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x1317, 0x0985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x1317, 0x1985, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x1317, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x13D1, 0xAB02, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x13D1, 0xAB03, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x13D1, 0xAB08, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x104A, 0x0981, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x104A, 0x2774, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x11F6, 0x9881, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMPEX9881 },
+	{ 0x8086, 0x0039, PCI_ANY_ID, PCI_ANY_ID, 0, 0, I21145 },
+	{ 0x1282, 0x9100, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+	{ 0x1282, 0x9102, PCI_ANY_ID, PCI_ANY_ID, 0, 0, DM910X },
+	{ 0x1113, 0x1216, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ 0x1113, 0x1217, PCI_ANY_ID, PCI_ANY_ID, 0, 0, MX98715 },
+	{ 0x1113, 0x9511, PCI_ANY_ID, PCI_ANY_ID, 0, 0, COMET },
+	{ } /* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, tulip_pci_tbl);
+
+
+/* A full-duplex map for media types. */
+const char tulip_media_cap[32] =
+{0,0,0,16,  3,19,16,24,  27,4,7,5, 0,20,23,20,  28,31,0,0, };
+u8 t21040_csr13[] = {2,0x0C,8,4,  4,0,0,0, 0,0,0,0, 4,0,0,0};
+
+/* 21041 transceiver register settings: 10-T, 10-2, AUI, 10-T, 10T-FD*/
+u16 t21041_csr13[] = {
+	csr13_mask_10bt,		/* 10-T */
+	csr13_mask_auibnc,		/* 10-2 */
+	csr13_mask_auibnc,		/* AUI */
+	csr13_mask_10bt,		/* 10-T */
+	csr13_mask_10bt,		/* 10T-FD */
+};
+u16 t21041_csr14[] = { 0xFFFF, 0xF7FD, 0xF7FD, 0x7F3F, 0x7F3D, };
+u16 t21041_csr15[] = { 0x0008, 0x0006, 0x000E, 0x0008, 0x0008, };
+
+
+static void tulip_init_ring(/*RTnet*/struct rtnet_device *rtdev);
+static int tulip_start_xmit(struct /*RTnet*/rtskb *skb, /*RTnet*/struct rtnet_device *rtdev);
+static int tulip_open(/*RTnet*/struct rtnet_device *rtdev);
+static int tulip_close(/*RTnet*/struct rtnet_device *rtdev);
+static void tulip_up(/*RTnet*/struct rtnet_device *rtdev);
+static void tulip_down(/*RTnet*/struct rtnet_device *rtdev);
+static struct net_device_stats *tulip_get_stats(struct rtnet_device *rtdev);
+//static void set_rx_mode(struct net_device *dev);
+
+
+static void tulip_set_power_state (struct tulip_private *tp,
+				   int sleep, int snooze)
+{
+	if (tp->flags & HAS_ACPI) {
+		u32 tmp, newtmp;
+		pci_read_config_dword (tp->pdev, CFDD, &tmp);
+		newtmp = tmp & ~(CFDD_Sleep | CFDD_Snooze);
+		if (sleep)
+			newtmp |= CFDD_Sleep;
+		else if (snooze)
+			newtmp |= CFDD_Snooze;
+		if (tmp != newtmp)
+			pci_write_config_dword (tp->pdev, CFDD, newtmp);
+	}
+
+}
+
+static void tulip_up(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	int i;
+
+	/* Wake the chip from sleep/snooze mode. */
+	tulip_set_power_state (tp, 0, 0);
+
+	/* On some chip revs we must set the MII/SYM port before the reset!? */
+	if (tp->mii_cnt  ||  (tp->mtable  &&  tp->mtable->has_mii))
+		outl(0x00040000, ioaddr + CSR6);
+
+	/* Reset the chip, holding bit 0 set at least 50 PCI cycles. */
+	outl(0x00000001, ioaddr + CSR0);
+	udelay(100);
+
+	/* Deassert reset.
+	   Wait the specified 50 PCI cycles after a reset by initializing
+	   Tx and Rx queues and the address filter list. */
+	outl(tp->csr0, ioaddr + CSR0);
+	udelay(100);
+
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: tulip_up(), irq==%d.\n", rtdev->name, rtdev->irq);
+
+	outl(tp->rx_ring_dma, ioaddr + CSR3);
+	outl(tp->tx_ring_dma, ioaddr + CSR4);
+	tp->cur_rx = tp->cur_tx = 0;
+	tp->dirty_rx = tp->dirty_tx = 0;
+
+	if (tp->flags & MC_HASH_ONLY) {
+		u32 addr_low = cpu_to_le32(get_unaligned((u32 *)rtdev->dev_addr));
+		u32 addr_high = cpu_to_le32(get_unaligned((u16 *)(rtdev->dev_addr+4)));
+		if (tp->chip_id == AX88140) {
+			outl(0, ioaddr + CSR13);
+			outl(addr_low,  ioaddr + CSR14);
+			outl(1, ioaddr + CSR13);
+			outl(addr_high, ioaddr + CSR14);
+		} else if (tp->flags & COMET_MAC_ADDR) {
+			outl(addr_low,  ioaddr + 0xA4);
+			outl(addr_high, ioaddr + 0xA8);
+			outl(0, ioaddr + 0xAC);
+			outl(0, ioaddr + 0xB0);
+		}
+	} else {
+		/* This is set_rx_mode(), but without starting the transmitter. */
+		u16 *eaddrs = (u16 *)rtdev->dev_addr;
+		u16 *setup_frm = &tp->setup_frame[15*6];
+		dma_addr_t mapping;
+
+		/* 21140 bug: you must add the broadcast address. */
+		memset(tp->setup_frame, 0xff, sizeof(tp->setup_frame));
+		/* Fill the final entry of the table with our physical address. */
+		*setup_frm++ = eaddrs[0]; *setup_frm++ = eaddrs[0];
+		*setup_frm++ = eaddrs[1]; *setup_frm++ = eaddrs[1];
+		*setup_frm++ = eaddrs[2]; *setup_frm++ = eaddrs[2];
+
+		mapping = dma_map_single(&tp->pdev->dev, tp->setup_frame,
+					 sizeof(tp->setup_frame),
+					 DMA_TO_DEVICE);
+		tp->tx_buffers[tp->cur_tx].skb = NULL;
+		tp->tx_buffers[tp->cur_tx].mapping = mapping;
+
+		/* Put the setup frame on the Tx list. */
+		tp->tx_ring[tp->cur_tx].length = cpu_to_le32(0x08000000 | 192);
+		tp->tx_ring[tp->cur_tx].buffer1 = cpu_to_le32(mapping);
+		tp->tx_ring[tp->cur_tx].status = cpu_to_le32(DescOwned);
+
+		tp->cur_tx++;
+	}
+
+	tp->saved_if_port = rtdev->if_port;
+	if (rtdev->if_port == 0)
+		rtdev->if_port = tp->default_port;
+
+	/* Allow selecting a default media. */
+	i = 0;
+	if (tp->mtable == NULL)
+		goto media_picked;
+	if (rtdev->if_port) {
+		int looking_for = tulip_media_cap[rtdev->if_port] & MediaIsMII ? 11 :
+			(rtdev->if_port == 12 ? 0 : rtdev->if_port);
+		for (i = 0; i < tp->mtable->leafcount; i++)
+			if (tp->mtable->mleaf[i].media == looking_for) {
+				printk(KERN_INFO "%s: Using user-specified media %s.\n",
+					   rtdev->name, medianame[rtdev->if_port]);
+				goto media_picked;
+			}
+	}
+	if ((tp->mtable->defaultmedia & 0x0800) == 0) {
+		int looking_for = tp->mtable->defaultmedia & MEDIA_MASK;
+		for (i = 0; i < tp->mtable->leafcount; i++)
+			if (tp->mtable->mleaf[i].media == looking_for) {
+				printk(KERN_INFO "%s: Using EEPROM-set media %s.\n",
+					   rtdev->name, medianame[looking_for]);
+				goto media_picked;
+			}
+	}
+	/* Start sensing first non-full-duplex media. */
+	for (i = tp->mtable->leafcount - 1;
+		 (tulip_media_cap[tp->mtable->mleaf[i].media] & MediaAlwaysFD) && i > 0; i--)
+		;
+media_picked:
+
+	tp->csr6 = 0;
+	tp->cur_index = i;
+	tp->nwayset = 0;
+
+	if (rtdev->if_port) {
+		if (tp->chip_id == DC21143  &&
+		    (tulip_media_cap[rtdev->if_port] & MediaIsMII)) {
+			/* We must reset the media CSRs when we force-select MII mode. */
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+			outl(0x0008, ioaddr + CSR15);
+		}
+		tulip_select_media(rtdev, 1);
+	} else if (tp->chip_id == DC21041) {
+		rtdev->if_port = 0;
+		tp->nway = tp->mediasense = 1;
+		tp->nwayset = tp->lpar = 0;
+		outl(0x00000000, ioaddr + CSR13);
+		outl(0xFFFFFFFF, ioaddr + CSR14);
+		outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+		tp->csr6 = 0x80020000;
+		if (tp->sym_advertise & 0x0040)
+			tp->csr6 |= FullDuplex;
+		outl(tp->csr6, ioaddr + CSR6);
+		outl(0x0000EF01, ioaddr + CSR13);
+
+	} else if (tp->chip_id == DC21142) {
+		if (tp->mii_cnt) {
+			tulip_select_media(rtdev, 1);
+			if (tulip_debug > 1)
+				printk(KERN_INFO "%s: Using MII transceiver %d, status %4.4x.\n",
+					   rtdev->name, tp->phys[0], tulip_mdio_read(rtdev, tp->phys[0], 1));
+			outl(csr6_mask_defstate, ioaddr + CSR6);
+			tp->csr6 = csr6_mask_hdcap;
+			rtdev->if_port = 11;
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+		} else
+			t21142_start_nway(rtdev);
+	} else if (tp->chip_id == PNIC2) {
+		/* for initial startup advertise 10/100 Full and Half */
+		tp->sym_advertise = 0x01E0;
+		/* enable autonegotiate end interrupt */
+		outl(inl(ioaddr+CSR5)| 0x00008010, ioaddr + CSR5);
+		outl(inl(ioaddr+CSR7)| 0x00008010, ioaddr + CSR7);
+		pnic2_start_nway(rtdev);
+	} else if (tp->chip_id == LC82C168  &&  ! tp->medialock) {
+		if (tp->mii_cnt) {
+			rtdev->if_port = 11;
+			tp->csr6 = 0x814C0000 | (tp->full_duplex ? 0x0200 : 0);
+			outl(0x0001, ioaddr + CSR15);
+		} else if (inl(ioaddr + CSR5) & TPLnkPass)
+			pnic_do_nway(rtdev);
+		else {
+			/* Start with 10mbps to do autonegotiation. */
+			outl(0x32, ioaddr + CSR12);
+			tp->csr6 = 0x00420000;
+			outl(0x0001B078, ioaddr + 0xB8);
+			outl(0x0201B078, ioaddr + 0xB8);
+		}
+	} else if ((tp->chip_id == MX98713 || tp->chip_id == COMPEX9881)
+			   && ! tp->medialock) {
+		rtdev->if_port = 0;
+		tp->csr6 = 0x01880000 | (tp->full_duplex ? 0x0200 : 0);
+		outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+	} else if (tp->chip_id == MX98715 || tp->chip_id == MX98725) {
+		/* Provided by BOLO, Macronix - 12/10/1998. */
+		rtdev->if_port = 0;
+		tp->csr6 = 0x01a80200;
+		outl(0x0f370000 | inw(ioaddr + 0x80), ioaddr + 0x80);
+		outl(0x11000 | inw(ioaddr + 0xa0), ioaddr + 0xa0);
+	} else if (tp->chip_id == COMET) {
+		/* Enable automatic Tx underrun recovery. */
+		outl(inl(ioaddr + 0x88) | 1, ioaddr + 0x88);
+		rtdev->if_port = tp->mii_cnt ? 11 : 0;
+		tp->csr6 = 0x00040000;
+	} else if (tp->chip_id == AX88140) {
+		tp->csr6 = tp->mii_cnt ? 0x00040100 : 0x00000100;
+	} else
+		tulip_select_media(rtdev, 1);
+
+	/* Start the chip's Tx to process setup frame. */
+	tulip_stop_rxtx(tp);
+	barrier();
+	udelay(5);
+	outl(tp->csr6 | TxOn, ioaddr + CSR6);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR5);
+	outl(tulip_tbl[tp->chip_id].valid_intrs, ioaddr + CSR7);
+	tulip_start_rxtx(tp);
+	outl(0, ioaddr + CSR2);		/* Rx poll demand */
+
+	if (tulip_debug > 2) {
+		printk(KERN_DEBUG "%s: Done tulip_up(), CSR0 %8.8x, CSR5 %8.8x CSR6 %8.8x.\n",
+			   rtdev->name, inl(ioaddr + CSR0), inl(ioaddr + CSR5),
+			   inl(ioaddr + CSR6));
+	}
+}
+
+
+static int
+tulip_open(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int retval;
+
+	if ((retval = /*RTnet*/rtdm_irq_request(&tp->irq_handle, rtdev->irq,
+						tulip_interrupt, 0, "rt_tulip",
+						rtdev))) {
+		printk("%s: Unable to install ISR for IRQ %d\n",
+			  rtdev->name,rtdev->irq);
+		return retval;
+	}
+
+	rt_stack_connect(rtdev, &STACK_manager);
+
+	tulip_init_ring (rtdev);
+
+	tulip_up (rtdev);
+
+	rtnetif_start_queue (rtdev);
+
+	return 0;
+}
+
+/* Initialize the Rx and Tx rings, along with various 'dev' bits. */
+static void tulip_init_ring(/*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int i;
+
+	tp->susp_rx = 0;
+	tp->ttimer = 0;
+	tp->nir = 0;
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		tp->rx_ring[i].status = 0x00000000;
+		tp->rx_ring[i].length = cpu_to_le32(PKT_BUF_SZ);
+		tp->rx_ring[i].buffer2 = cpu_to_le32(tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * (i + 1));
+		tp->rx_buffers[i].skb = NULL;
+		tp->rx_buffers[i].mapping = 0;
+	}
+	/* Mark the last entry as wrapping the ring. */
+	tp->rx_ring[i-1].length = cpu_to_le32(PKT_BUF_SZ | DESC_RING_WRAP);
+	tp->rx_ring[i-1].buffer2 = cpu_to_le32(tp->rx_ring_dma);
+
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		dma_addr_t mapping;
+
+		/* Note the receive buffer must be longword aligned.
+		   dev_alloc_skb() provides 16 byte alignment.  But do *not*
+		   use skb_reserve() to align the IP header! */
+		struct /*RTnet*/rtskb *skb = /*RTnet*/rtnetdev_alloc_rtskb(rtdev, PKT_BUF_SZ);
+		tp->rx_buffers[i].skb = skb;
+		if (skb == NULL)
+			break;
+		mapping = dma_map_single(&tp->pdev->dev, skb->tail, PKT_BUF_SZ,
+					 DMA_FROM_DEVICE);
+		tp->rx_buffers[i].mapping = mapping;
+		tp->rx_ring[i].status = cpu_to_le32(DescOwned);	/* Owned by Tulip chip */
+		tp->rx_ring[i].buffer1 = cpu_to_le32(mapping);
+	}
+	tp->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+
+	/* The Tx buffer descriptor is filled in as needed, but we
+	   do need to clear the ownership bit. */
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		tp->tx_buffers[i].skb = NULL;
+		tp->tx_buffers[i].mapping = 0;
+		tp->tx_ring[i].status = 0x00000000;
+		tp->tx_ring[i].buffer2 = cpu_to_le32(tp->tx_ring_dma + sizeof(struct tulip_tx_desc) * (i + 1));
+	}
+	tp->tx_ring[i-1].buffer2 = cpu_to_le32(tp->tx_ring_dma);
+}
+
+static int
+tulip_start_xmit(struct /*RTnet*/rtskb *skb, /*RTnet*/struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *)rtdev->priv;
+	int entry;
+	u32 flag;
+	dma_addr_t mapping;
+	/*RTnet*/
+	rtdm_lockctx_t context;
+
+
+	rtdm_lock_get_irqsave(&tp->lock, context);
+
+	/* TODO: move to rtdev_xmit, use queue */
+	if (rtnetif_queue_stopped(rtdev)) {
+		dev_kfree_rtskb(skb);
+		tp->stats.tx_dropped++;
+
+		rtdm_lock_put_irqrestore(&tp->lock, context);
+		return 0;
+	}
+	/*RTnet*/
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = tp->cur_tx % TX_RING_SIZE;
+
+	tp->tx_buffers[entry].skb = skb;
+	mapping = dma_map_single(&tp->pdev->dev, skb->data, skb->len,
+				 DMA_TO_DEVICE);
+	tp->tx_buffers[entry].mapping = mapping;
+	tp->tx_ring[entry].buffer1 = cpu_to_le32(mapping);
+
+	if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE/2) {/* Typical path */
+		flag = 0x60000000; /* No interrupt */
+	} else if (tp->cur_tx - tp->dirty_tx == TX_RING_SIZE/2) {
+		flag = 0xe0000000; /* Tx-done intr. */
+	} else if (tp->cur_tx - tp->dirty_tx < TX_RING_SIZE - 2) {
+		flag = 0x60000000; /* No Tx-done intr. */
+	} else {		/* Leave room for set_rx_mode() to fill entries. */
+		flag = 0xe0000000; /* Tx-done intr. */
+		rtnetif_stop_queue(rtdev);
+	}
+	if (entry == TX_RING_SIZE-1)
+		flag = 0xe0000000 | DESC_RING_WRAP;
+
+	tp->tx_ring[entry].length = cpu_to_le32(skb->len | flag);
+	/* if we were using Transmit Automatic Polling, we would need a
+	 * wmb() here. */
+	tp->tx_ring[entry].status = cpu_to_le32(DescOwned);
+
+	/*RTnet*/
+	/* get and patch time stamp just before the transmission */
+	if (skb->xmit_stamp)
+		*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() + *skb->xmit_stamp);
+	/*RTnet*/
+
+	wmb();
+
+	tp->cur_tx++;
+
+	/* Trigger an immediate transmit demand. */
+	outl(0, rtdev->base_addr + CSR1);
+
+	/*RTnet*/
+	rtdm_lock_put_irqrestore(&tp->lock, context);
+	/*RTnet*/
+
+	return 0;
+}
+
+static void tulip_clean_tx_ring(struct tulip_private *tp)
+{
+	unsigned int dirty_tx;
+
+	for (dirty_tx = tp->dirty_tx ; tp->cur_tx - dirty_tx > 0;
+		dirty_tx++) {
+		int entry = dirty_tx % TX_RING_SIZE;
+		int status = le32_to_cpu(tp->tx_ring[entry].status);
+
+		if (status < 0) {
+			tp->stats.tx_errors++;	/* It wasn't Txed */
+			tp->tx_ring[entry].status = 0;
+		}
+
+		/* Check for Tx filter setup frames. */
+		if (tp->tx_buffers[entry].skb == NULL) {
+			/* test because dummy frames not mapped */
+			if (tp->tx_buffers[entry].mapping)
+				dma_unmap_single(&tp->pdev->dev,
+					tp->tx_buffers[entry].mapping,
+					sizeof(tp->setup_frame),
+					DMA_TO_DEVICE);
+			continue;
+		}
+
+		dma_unmap_single(&tp->pdev->dev, tp->tx_buffers[entry].mapping,
+				tp->tx_buffers[entry].skb->len,
+				DMA_TO_DEVICE);
+
+		/* Free the original skb. */
+		/*RTnet*/dev_kfree_rtskb(tp->tx_buffers[entry].skb);
+		tp->tx_buffers[entry].skb = NULL;
+		tp->tx_buffers[entry].mapping = 0;
+	}
+}
+
+static struct net_device_stats *tulip_get_stats(struct rtnet_device *rtdev)
+{
+	struct tulip_private *tp = (struct tulip_private *) rtdev->priv;
+	return &tp->stats;
+}
+
+static void tulip_down (/*RTnet*/struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	struct tulip_private *tp = (struct tulip_private *) rtdev->priv;
+
+	rtdm_irq_disable(&tp->irq_handle);
+	rtdm_lock_get(&tp->lock); /* sync with IRQ handler on other cpu -JK- */
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	outl (0x00000000, ioaddr + CSR7);
+
+	/* Stop the Tx and Rx processes. */
+	tulip_stop_rxtx(tp);
+
+	/* prepare receive buffers */
+	tulip_refill_rx(rtdev);
+
+	/* release any unconsumed transmit buffers */
+	tulip_clean_tx_ring(tp);
+
+	/* 21040 -- Leave the card in 10baseT state. */
+	if (tp->chip_id == DC21040)
+		outl (0x00000004, ioaddr + CSR13);
+
+	if (inl (ioaddr + CSR6) != 0xffffffff)
+		tp->stats.rx_missed_errors += inl (ioaddr + CSR8) & 0xffff;
+
+	rtdm_lock_put(&tp->lock);
+	rtdm_irq_enable(&tp->irq_handle);
+
+	rtdev->if_port = tp->saved_if_port;
+
+	/* Leave the driver in snooze, not sleep, mode. */
+	tulip_set_power_state (tp, 0, 1);
+}
+
+
+static int tulip_close (/*RTnet*/struct rtnet_device *rtdev)
+{
+	long ioaddr = rtdev->base_addr;
+	struct tulip_private *tp = (struct tulip_private *) rtdev->priv;
+	int i;
+
+	rtnetif_stop_queue (rtdev);
+
+	tulip_down (rtdev);
+
+	if (tulip_debug > 1)
+		printk(KERN_DEBUG "%s: Shutting down ethercard, status was %2.2x.\n",
+			rtdev->name, inl (ioaddr + CSR5));
+
+	rtdm_irq_free(&tp->irq_handle);
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct /*RTnet*/rtskb *skb = tp->rx_buffers[i].skb;
+		dma_addr_t mapping = tp->rx_buffers[i].mapping;
+
+		tp->rx_buffers[i].skb = NULL;
+		tp->rx_buffers[i].mapping = 0;
+
+		tp->rx_ring[i].status = 0;	/* Not owned by Tulip chip. */
+		tp->rx_ring[i].length = 0;
+		tp->rx_ring[i].buffer1 = 0xBADF00D0;	/* An invalid address. */
+		if (skb) {
+			dma_unmap_single(&tp->pdev->dev, mapping, PKT_BUF_SZ,
+					 DMA_FROM_DEVICE);
+			/*RTnet*/dev_kfree_rtskb (skb);
+		}
+	}
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		struct /*RTnet*/rtskb *skb = tp->tx_buffers[i].skb;
+
+		if (skb != NULL) {
+			dma_unmap_single(&tp->pdev->dev,
+					 tp->tx_buffers[i].mapping,
+					 skb->len, DMA_TO_DEVICE);
+			/*RTnet*/dev_kfree_rtskb (skb);
+		}
+		tp->tx_buffers[i].skb = NULL;
+		tp->tx_buffers[i].mapping = 0;
+	}
+
+	rt_stack_disconnect(rtdev);
+
+	return 0;
+}
+
+#ifdef XXX_CONFIG_TULIP_MWI
+static void tulip_mwi_config (struct pci_dev *pdev,
+					struct net_device *dev)
+{
+	struct tulip_private *tp = rtdev->priv;
+	u8 cache;
+	u16 pci_command;
+	u32 csr0;
+
+	if (tulip_debug > 3)
+		printk(KERN_DEBUG "%s: tulip_mwi_config()\n", pci_name(pdev));
+
+	tp->csr0 = csr0 = 0;
+
+	/* if we have any cache line size at all, we can do MRM */
+	csr0 |= MRM;
+
+	/* ...and barring hardware bugs, MWI */
+	if (!(tp->chip_id == DC21143 && tp->revision == 65))
+		csr0 |= MWI;
+
+	/* set or disable MWI in the standard PCI command bit.
+	 * Check for the case where  mwi is desired but not available
+	 */
+	if (csr0 & MWI)	pci_set_mwi(pdev);
+	else		pci_clear_mwi(pdev);
+
+	/* read result from hardware (in case bit refused to enable) */
+	pci_read_config_word(pdev, PCI_COMMAND, &pci_command);
+	if ((csr0 & MWI) && (!(pci_command & PCI_COMMAND_INVALIDATE)))
+		csr0 &= ~MWI;
+
+	/* if cache line size hardwired to zero, no MWI */
+	pci_read_config_byte(pdev, PCI_CACHE_LINE_SIZE, &cache);
+	if ((csr0 & MWI) && (cache == 0)) {
+		csr0 &= ~MWI;
+		pci_clear_mwi(pdev);
+	}
+
+	/* assign per-cacheline-size cache alignment and
+	 * burst length values
+	 */
+	switch (cache) {
+	case 8:
+		csr0 |= MRL | (1 << CALShift) | (16 << BurstLenShift);
+		break;
+	case 16:
+		csr0 |= MRL | (2 << CALShift) | (16 << BurstLenShift);
+		break;
+	case 32:
+		csr0 |= MRL | (3 << CALShift) | (32 << BurstLenShift);
+		break;
+	default:
+		cache = 0;
+		break;
+	}
+
+	/* if we have a good cache line size, we by now have a good
+	 * csr0, so save it and exit
+	 */
+	if (cache)
+		goto out;
+
+	/* we don't have a good csr0 or cache line size, disable MWI */
+	if (csr0 & MWI) {
+		pci_clear_mwi(pdev);
+		csr0 &= ~MWI;
+	}
+
+	/* sane defaults for burst length and cache alignment
+	 * originally from de4x5 driver
+	 */
+	csr0 |= (8 << BurstLenShift) | (1 << CALShift);
+
+out:
+	tp->csr0 = csr0;
+	if (tulip_debug > 2)
+		printk(KERN_DEBUG "%s: MWI config cacheline=%d, csr0=%08x\n",
+		       pci_name(pdev), cache, csr0);
+}
+#endif
+
+
+static int tulip_init_one (struct pci_dev *pdev,
+				     const struct pci_device_id *ent)
+{
+	struct tulip_private *tp;
+	/* See note below on the multiport cards. */
+	static unsigned char last_phys_addr[6] = {0x00, 'L', 'i', 'n', 'u', 'x'};
+	static struct pci_device_id early_486_chipsets[] = {
+		{ PCI_DEVICE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82424) },
+		{ PCI_DEVICE(PCI_VENDOR_ID_SI, PCI_DEVICE_ID_SI_496) },
+		{ },
+	};
+#if defined(__i386__)
+	static int last_irq;
+#endif
+	u8 chip_rev;
+	unsigned int i, irq;
+	unsigned short sum;
+	u8 ee_data[EEPROM_SIZE];
+	/*RTnet*/struct rtnet_device *rtdev;
+	long ioaddr;
+	static int board_idx = -1;
+	int chip_idx = ent->driver_data;
+	unsigned int t2104x_mode = 0;
+	unsigned int eeprom_missing = 0;
+
+#ifndef MODULE
+	static int did_version;		/* Already printed version info. */
+	if (tulip_debug > 0  &&  did_version++ == 0)
+		printk(KERN_INFO "%s", version);
+#endif
+
+	board_idx++;
+
+	if (cards[board_idx] == 0)
+		return -ENODEV;
+
+	/*
+	 *	Lan media wire a tulip chip to a wan interface. Needs a very
+	 *	different driver (lmc driver)
+	 */
+
+	if (pdev->subsystem_vendor == PCI_VENDOR_ID_LMC) {
+		printk(KERN_ERR PFX "skipping LMC card.\n");
+		return -ENODEV;
+	}
+
+	/*
+	 *	Early DM9100's need software CRC and the DMFE driver
+	 */
+
+	if (pdev->vendor == 0x1282 && pdev->device == 0x9100)
+	{
+		u32 dev_rev;
+		/* Read Chip revision */
+		pci_read_config_dword(pdev, PCI_REVISION_ID, &dev_rev);
+		if(dev_rev < 0x02000030)
+		{
+			printk(KERN_ERR PFX "skipping early DM9100 with Crc bug (use dmfe)\n");
+			return -ENODEV;
+		}
+	}
+
+	/*
+	 *	Looks for early PCI chipsets where people report hangs
+	 *	without the workarounds being on.
+	 */
+
+	/* 1. Intel Saturn. Switch to 8 long words burst, 8 long word cache
+	      aligned.  Aries might need this too. The Saturn errata are not
+	      pretty reading but thankfully it's an old 486 chipset.
+
+	   2. The dreaded SiS496 486 chipset. Same workaround as Intel
+	      Saturn.
+	 */
+
+	if (pci_dev_present(early_486_chipsets))
+		csr0 = MRL | MRM | (8 << BurstLenShift) | (1 << CALShift);
+
+	/* bugfix: the ASIX must have a burst limit or horrible things happen. */
+	if (chip_idx == AX88140) {
+		if ((csr0 & 0x3f00) == 0)
+			csr0 |= 0x2000;
+	}
+
+	/* PNIC doesn't have MWI/MRL/MRM... */
+	if (chip_idx == LC82C168)
+		csr0 &= ~0xfff10000; /* zero reserved bits 31:20, 16 */
+
+	/* DM9102A has troubles with MRM & clear reserved bits 24:22, 20, 16, 7:1 */
+	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
+		csr0 &= ~0x01f100ff;
+
+#if defined(__sparc__)
+	/* DM9102A needs 32-dword alignment/burst length on sparc - chip bug? */
+	if (pdev->vendor == 0x1282 && pdev->device == 0x9102)
+		csr0 = (csr0 & ~0xff00) | 0xe000;
+#endif
+
+	/*
+	 *	And back to business
+	 */
+
+	i = pci_enable_device(pdev);
+	if (i) {
+		printk(KERN_ERR PFX
+			"Cannot enable tulip board #%d, aborting\n",
+			board_idx);
+		return i;
+	}
+
+	ioaddr = pci_resource_start (pdev, 0);
+	irq = pdev->irq;
+
+	/* alloc_etherdev ensures aligned and zeroed private structures */
+	rtdev = /*RTnet*/rt_alloc_etherdev (sizeof (*tp),
+					RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (!rtdev) {
+		printk(KERN_ERR PFX "ether device alloc failed, aborting\n");
+		return -ENOMEM;
+	}
+	//rtdev_alloc_name(rtdev, "eth%d");//Done by register_rtdev()
+	rt_rtdev_connect(rtdev, &RTDEV_manager);
+	rtdev->vers = RTDEV_VERS_2_0;
+	rtdev->sysbind = &pdev->dev;
+
+	if (pci_resource_len (pdev, 0) < tulip_tbl[chip_idx].io_size) {
+		printk(KERN_ERR PFX "%s: I/O region (0x%llx@0x%llx) too small, "
+			"aborting\n", pci_name(pdev),
+			(unsigned long long)pci_resource_len (pdev, 0),
+			(unsigned long long)pci_resource_start (pdev, 0));
+		goto err_out_free_netdev;
+	}
+
+	/* grab all resources from both PIO and MMIO regions, as we
+	 * don't want anyone else messing around with our hardware */
+	if (pci_request_regions (pdev, "tulip"))
+		goto err_out_free_netdev;
+
+#ifndef USE_IO_OPS
+	ioaddr = (unsigned long) ioremap (pci_resource_start (pdev, 1),
+					  tulip_tbl[chip_idx].io_size);
+	if (!ioaddr)
+		goto err_out_free_res;
+#endif
+
+	pci_read_config_byte (pdev, PCI_REVISION_ID, &chip_rev);
+
+	/*
+	 * initialize private data structure 'tp'
+	 * it is zeroed and aligned in alloc_etherdev
+	 */
+	tp = rtdev->priv;
+
+	tp->rx_ring = dma_alloc_coherent(&pdev->dev,
+					 sizeof(struct tulip_rx_desc) * RX_RING_SIZE +
+					 sizeof(struct tulip_tx_desc) * TX_RING_SIZE,
+					 &tp->rx_ring_dma, GFP_ATOMIC);
+	if (!tp->rx_ring)
+		goto err_out_mtable;
+	tp->tx_ring = (struct tulip_tx_desc *)(tp->rx_ring + RX_RING_SIZE);
+	tp->tx_ring_dma = tp->rx_ring_dma + sizeof(struct tulip_rx_desc) * RX_RING_SIZE;
+
+	tp->chip_id = chip_idx;
+	tp->flags = tulip_tbl[chip_idx].flags;
+	tp->pdev = pdev;
+	tp->base_addr = ioaddr;
+	tp->revision = chip_rev;
+	tp->csr0 = csr0;
+	rtdm_lock_init(&tp->lock);
+	spin_lock_init(&tp->mii_lock);
+
+	rtdev->base_addr = ioaddr;
+	rtdev->irq = irq;
+
+#ifdef XXX_CONFIG_TULIP_MWI
+	if (!force_csr0 && (tp->flags & HAS_PCI_MWI))
+		tulip_mwi_config (pdev, rtdev);
+#else
+	/* MWI is broken for DC21143 rev 65... */
+	if (chip_idx == DC21143 && chip_rev == 65)
+		tp->csr0 &= ~MWI;
+#endif
+
+	/* Stop the chip's Tx and Rx processes. */
+	tulip_stop_rxtx(tp);
+
+	pci_set_master(pdev);
+
+	/* Clear the missed-packet counter. */
+	inl(ioaddr + CSR8);
+
+	if (chip_idx == DC21041) {
+		if (inl(ioaddr + CSR9) & 0x8000) {
+			chip_idx = DC21040;
+			t2104x_mode = 1;
+		} else {
+			t2104x_mode = 2;
+		}
+	}
+
+	/* The station address ROM is read byte serially.  The register must
+	   be polled, waiting for the value to be read bit serially from the
+	   EEPROM.
+	   */
+	sum = 0;
+	if (chip_idx == DC21040) {
+		outl(0, ioaddr + CSR9);		/* Reset the pointer with a dummy write. */
+		for (i = 0; i < 6; i++) {
+			int value, boguscnt = 100000;
+			do
+				value = inl(ioaddr + CSR9);
+			while (value < 0  && --boguscnt > 0);
+			rtdev->dev_addr[i] = value;
+			sum += value & 0xff;
+		}
+	} else if (chip_idx == LC82C168) {
+		for (i = 0; i < 3; i++) {
+			int value, boguscnt = 100000;
+			outl(0x600 | i, ioaddr + 0x98);
+			do
+				value = inl(ioaddr + CSR9);
+			while (value < 0  && --boguscnt > 0);
+			put_unaligned(le16_to_cpu(value), ((u16*)rtdev->dev_addr) + i);
+			sum += value & 0xffff;
+		}
+	} else if (chip_idx == COMET) {
+		/* No need to read the EEPROM. */
+		put_unaligned(inl(ioaddr + 0xA4), (u32 *)rtdev->dev_addr);
+		put_unaligned(inl(ioaddr + 0xA8), (u16 *)(rtdev->dev_addr + 4));
+		for (i = 0; i < 6; i ++)
+			sum += rtdev->dev_addr[i];
+	} else {
+		/* A serial EEPROM interface, we read now and sort it out later. */
+		int sa_offset = 0;
+		int ee_addr_size = tulip_read_eeprom(ioaddr, 0xff, 8) & 0x40000 ? 8 : 6;
+
+		for (i = 0; i < sizeof(ee_data)/2; i++)
+			((u16 *)ee_data)[i] =
+				le16_to_cpu(tulip_read_eeprom(ioaddr, i, ee_addr_size));
+
+		/* DEC now has a specification (see Notes) but early board makers
+		   just put the address in the first EEPROM locations. */
+		/* This does  memcmp(eedata, eedata+16, 8) */
+		for (i = 0; i < 8; i ++)
+			if (ee_data[i] != ee_data[16+i])
+				sa_offset = 20;
+		if (ee_data[0] == 0xff  &&  ee_data[1] == 0xff &&  ee_data[2] == 0)
+			sa_offset = 2;		/* Grrr, damn Matrox boards. */
+#ifdef CONFIG_DDB5476
+		if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 6)) {
+			/* DDB5476 MAC address in first EEPROM locations. */
+		       sa_offset = 0;
+		       /* No media table either */
+		       tp->flags &= ~HAS_MEDIA_TABLE;
+	       }
+#endif
+#ifdef CONFIG_DDB5477
+	       if ((pdev->bus->number == 0) && (PCI_SLOT(pdev->devfn) == 4)) {
+		       /* DDB5477 MAC address in first EEPROM locations. */
+		       sa_offset = 0;
+		       /* No media table either */
+		       tp->flags &= ~HAS_MEDIA_TABLE;
+	       }
+#endif
+#ifdef CONFIG_MIPS_COBALT
+	       if ((pdev->bus->number == 0) &&
+		   ((PCI_SLOT(pdev->devfn) == 7) ||
+		    (PCI_SLOT(pdev->devfn) == 12))) {
+		       /* Cobalt MAC address in first EEPROM locations. */
+		       sa_offset = 0;
+		       /* No media table either */
+		       tp->flags &= ~HAS_MEDIA_TABLE;
+	       }
+#endif
+		for (i = 0; i < 6; i ++) {
+			rtdev->dev_addr[i] = ee_data[i + sa_offset];
+			sum += ee_data[i + sa_offset];
+		}
+	}
+	/* Lite-On boards have the address byte-swapped. */
+	if ((rtdev->dev_addr[0] == 0xA0  ||  rtdev->dev_addr[0] == 0xC0)
+		&&  rtdev->dev_addr[1] == 0x00)
+		for (i = 0; i < 6; i+=2) {
+			char tmp = rtdev->dev_addr[i];
+			rtdev->dev_addr[i] = rtdev->dev_addr[i+1];
+			rtdev->dev_addr[i+1] = tmp;
+		}
+	/* On the Zynx 315 Etherarray and other multiport boards only the
+	   first Tulip has an EEPROM.
+	   On Sparc systems the mac address is held in the OBP property
+	   "local-mac-address".
+	   The addresses of the subsequent ports are derived from the first.
+	   Many PCI BIOSes also incorrectly report the IRQ line, so we correct
+	   that here as well. */
+	if (sum == 0  || sum == 6*0xff) {
+#if defined(__sparc__)
+		struct pcidev_cookie *pcp = pdev->sysdata;
+#endif
+		eeprom_missing = 1;
+		for (i = 0; i < 5; i++)
+			rtdev->dev_addr[i] = last_phys_addr[i];
+		rtdev->dev_addr[i] = last_phys_addr[i] + 1;
+#if defined(__sparc__)
+		if ((pcp != NULL) && prom_getproplen(pcp->prom_node,
+			"local-mac-address") == 6) {
+			prom_getproperty(pcp->prom_node, "local-mac-address",
+			    rtdev->dev_addr, 6);
+		}
+#endif
+#if defined(__i386__)		/* Patch up x86 BIOS bug. */
+		if (last_irq)
+			irq = last_irq;
+#endif
+	}
+
+	for (i = 0; i < 6; i++)
+		last_phys_addr[i] = rtdev->dev_addr[i];
+#if defined(__i386__)
+	last_irq = irq;
+#endif
+
+	/* The lower four bits are the media type. */
+	if (board_idx >= 0  &&  board_idx < MAX_UNITS) {
+		/* Somehow required for this RTnet version, don't ask me why... */
+		if (!options[board_idx])
+			tp->default_port = 11; /*MII*/
+		/*RTnet*/
+
+		if (options[board_idx] & MEDIA_MASK)
+			tp->default_port = options[board_idx] & MEDIA_MASK;
+		if ((options[board_idx] & FullDuplex) || full_duplex[board_idx] > 0)
+			tp->full_duplex = 1;
+		if (mtu[board_idx] > 0)
+			rtdev->mtu = mtu[board_idx];
+	}
+	if (rtdev->mem_start & MEDIA_MASK)
+		tp->default_port = rtdev->mem_start & MEDIA_MASK;
+	if (tp->default_port) {
+		printk(KERN_INFO "tulip%d: Transceiver selection forced to %s.\n",
+		       board_idx, medianame[tp->default_port & MEDIA_MASK]);
+		tp->medialock = 1;
+		if (tulip_media_cap[tp->default_port] & MediaAlwaysFD)
+			tp->full_duplex = 1;
+	}
+	if (tp->full_duplex)
+		tp->full_duplex_lock = 1;
+
+	if (tulip_media_cap[tp->default_port] & MediaIsMII) {
+		u16 media2advert[] = { 0x20, 0x40, 0x03e0, 0x60, 0x80, 0x100, 0x200 };
+		tp->mii_advertise = media2advert[tp->default_port - 9];
+		tp->mii_advertise |= (tp->flags & HAS_8023X); /* Matching bits! */
+	}
+
+	if (tp->flags & HAS_MEDIA_TABLE) {
+		memcpy(tp->eeprom, ee_data, sizeof(tp->eeprom));
+
+		sprintf(rtdev->name, "tulip%d", board_idx);	/* hack */
+		tulip_parse_eeprom(rtdev);
+		strcpy(rtdev->name, "rteth%d");			/* un-hack */
+	}
+
+	if ((tp->flags & ALWAYS_CHECK_MII) ||
+		(tp->mtable  &&  tp->mtable->has_mii) ||
+		( ! tp->mtable  &&  (tp->flags & HAS_MII))) {
+		if (tp->mtable  &&  tp->mtable->has_mii) {
+			for (i = 0; i < tp->mtable->leafcount; i++)
+				if (tp->mtable->mleaf[i].media == 11) {
+					tp->cur_index = i;
+					tp->saved_if_port = rtdev->if_port;
+					tulip_select_media(rtdev, 2);
+					rtdev->if_port = tp->saved_if_port;
+					break;
+				}
+		}
+
+		/* Find the connected MII xcvrs.
+		   Doing this in open() would allow detecting external xcvrs
+		   later, but takes much time. */
+		tulip_find_mii (rtdev, board_idx);
+	}
+
+	rtdev->open = tulip_open;
+	rtdev->stop = tulip_close;
+	rtdev->hard_header = rt_eth_header;
+	rtdev->hard_start_xmit = tulip_start_xmit;
+	rtdev->get_stats = tulip_get_stats;
+
+	if (/*RTnet*/rt_register_rtnetdev(rtdev)) {
+		goto err_out_free_ring;
+	}
+
+	printk(KERN_INFO "%s: %s rev %d at %#3lx,",
+	       rtdev->name, tulip_tbl[chip_idx].chip_name, chip_rev, ioaddr);
+	pci_set_drvdata(pdev, rtdev);
+
+	if (t2104x_mode == 1)
+		printk(" 21040 compatible mode,");
+	else if (t2104x_mode == 2)
+		printk(" 21041 mode,");
+	if (eeprom_missing)
+		printk(" EEPROM not present,");
+	for (i = 0; i < 6; i++)
+		printk("%c%2.2X", i ? ':' : ' ', rtdev->dev_addr[i]);
+	printk(", IRQ %d.\n", irq);
+
+/*RTnet
+	if (tp->chip_id == PNIC2)
+		tp->link_change = pnic2_lnk_change;
+	else if ((tp->flags & HAS_NWAY)  || tp->chip_id == DC21041)
+		tp->link_change = t21142_lnk_change;
+	else if (tp->flags & HAS_PNICNWAY)
+		tp->link_change = pnic_lnk_change;
+ *RTnet*/
+ tp->link_change = NULL;
+
+	/* Reset the xcvr interface and turn on heartbeat. */
+	switch (chip_idx) {
+	case DC21041:
+		if (tp->sym_advertise == 0)
+			tp->sym_advertise = 0x0061;
+		outl(0x00000000, ioaddr + CSR13);
+		outl(0xFFFFFFFF, ioaddr + CSR14);
+		outl(0x00000008, ioaddr + CSR15); /* Listen on AUI also. */
+		outl(inl(ioaddr + CSR6) | csr6_fd, ioaddr + CSR6);
+		outl(0x0000EF01, ioaddr + CSR13);
+		break;
+	case DC21040:
+		outl(0x00000000, ioaddr + CSR13);
+		outl(0x00000004, ioaddr + CSR13);
+		break;
+	case DC21140:
+	case DM910X:
+	default:
+		if (tp->mtable)
+			outl(tp->mtable->csr12dir | 0x100, ioaddr + CSR12);
+		break;
+	case DC21142:
+		if (tp->mii_cnt  ||  tulip_media_cap[rtdev->if_port] & MediaIsMII) {
+			outl(csr6_mask_defstate, ioaddr + CSR6);
+			outl(0x0000, ioaddr + CSR13);
+			outl(0x0000, ioaddr + CSR14);
+			outl(csr6_mask_hdcap, ioaddr + CSR6);
+		} else
+			t21142_start_nway(rtdev);
+		break;
+	case PNIC2:
+		/* just do a reset for sanity sake */
+		outl(0x0000, ioaddr + CSR13);
+		outl(0x0000, ioaddr + CSR14);
+		break;
+	case LC82C168:
+		if ( ! tp->mii_cnt) {
+			tp->nway = 1;
+			tp->nwayset = 0;
+			outl(csr6_ttm | csr6_ca, ioaddr + CSR6);
+			outl(0x30, ioaddr + CSR12);
+			outl(0x0001F078, ioaddr + CSR6);
+			outl(0x0201F078, ioaddr + CSR6); /* Turn on autonegotiation. */
+		}
+		break;
+	case MX98713:
+	case COMPEX9881:
+		outl(0x00000000, ioaddr + CSR6);
+		outl(0x000711C0, ioaddr + CSR14); /* Turn on NWay. */
+		outl(0x00000001, ioaddr + CSR13);
+		break;
+	case MX98715:
+	case MX98725:
+		outl(0x01a80000, ioaddr + CSR6);
+		outl(0xFFFFFFFF, ioaddr + CSR14);
+		outl(0x00001000, ioaddr + CSR12);
+		break;
+	case COMET:
+		/* No initialization necessary. */
+		break;
+	}
+
+	/* put the chip in snooze mode until opened */
+	tulip_set_power_state (tp, 0, 1);
+
+	return 0;
+
+err_out_free_ring:
+	dma_free_coherent(&pdev->dev,
+			  sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+			  sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+			  tp->rx_ring, tp->rx_ring_dma);
+
+err_out_mtable:
+	if (tp->mtable)
+		kfree (tp->mtable);
+#ifndef USE_IO_OPS
+	iounmap((void *)ioaddr);
+
+err_out_free_res:
+#endif
+	pci_release_regions (pdev);
+
+err_out_free_netdev:
+	/*RTnet*/rtdev_free (rtdev);
+	return -ENODEV;
+}
+
+
+static void tulip_remove_one (struct pci_dev *pdev)
+{
+	struct rtnet_device *rtdev = (struct rtnet_device *) pci_get_drvdata (pdev);
+	struct tulip_private *tp;
+
+	if (!rtdev || !rtdev->priv)
+		return;
+
+	tp = rtdev->priv;
+	dma_free_coherent(&pdev->dev,
+			  sizeof (struct tulip_rx_desc) * RX_RING_SIZE +
+			  sizeof (struct tulip_tx_desc) * TX_RING_SIZE,
+			  tp->rx_ring, tp->rx_ring_dma);
+	rt_unregister_rtnetdev (rtdev);
+	if (tp->mtable)
+		kfree (tp->mtable);
+#ifndef USE_IO_OPS
+	iounmap((void *)rtdev->base_addr);
+#endif
+	/*RTnet*/
+	rt_rtdev_disconnect(rtdev);
+	rtdev_free (rtdev);
+	/*RTnet*/
+	pci_release_regions (pdev);
+	pci_set_drvdata (pdev, NULL);
+
+	/* pci_power_off (pdev, -1); */
+}
+
+
+static struct pci_driver tulip_driver = {
+	name:		DRV_NAME,
+	id_table:	tulip_pci_tbl,
+	probe:		tulip_init_one,
+	remove:		tulip_remove_one,
+};
+
+
+static int __init tulip_init (void)
+{
+#ifdef MODULE
+	printk(KERN_INFO "%s", version);
+#endif
+
+	/* copy module parms into globals */
+	tulip_rx_copybreak = rx_copybreak;
+	tulip_max_interrupt_work = max_interrupt_work;
+
+	/* probe for and init boards */
+	return pci_register_driver (&tulip_driver);
+}
+
+
+static void __exit tulip_cleanup (void)
+{
+	pci_unregister_driver (&tulip_driver);
+}
+
+
+module_init(tulip_init);
+module_exit(tulip_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/via-rhine.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/via-rhine.c
new file mode 100644
index 0000000..192f02e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/drivers/via-rhine.c
@@ -0,0 +1,1823 @@
+/* via-rhine.c: A Linux Ethernet device driver for VIA Rhine family chips. */
+/*
+	Written 1998-2001 by Donald Becker.
+
+	This software may be used and distributed according to the terms of
+	the GNU General Public License (GPL), incorporated herein by reference.
+	Drivers based on or derived from this code fall under the GPL and must
+	retain the authorship, copyright and license notice.  This file is not
+	a complete program and may only be used when the entire operating
+	system is licensed under the GPL.
+
+	This driver is designed for the VIA VT86C100A Rhine-I.
+	It also works with the 6102 Rhine-II, and 6105/6105M Rhine-III.
+
+	The author may be reached as becker@scyld.com, or C/O
+	Scyld Computing Corporation
+	410 Severn Ave., Suite 210
+	Annapolis MD 21403
+
+
+	This driver contains some changes from the original Donald Becker
+	version. He may or may not be interested in bug reports on this
+	code. You can find his versions at:
+	http://www.scyld.com/network/via-rhine.html
+
+
+	Linux kernel version history:
+
+	LK1.1.0:
+	- Jeff Garzik: softnet 'n stuff
+
+	LK1.1.1:
+	- Justin Guyett: softnet and locking fixes
+	- Jeff Garzik: use PCI interface
+
+	LK1.1.2:
+	- Urban Widmark: minor cleanups, merges from Becker 1.03a/1.04 versions
+
+	LK1.1.3:
+	- Urban Widmark: use PCI DMA interface (with thanks to the eepro100.c
+			 code) update "Theory of Operation" with
+			 softnet/locking changes
+	- Dave Miller: PCI DMA and endian fixups
+	- Jeff Garzik: MOD_xxx race fixes, updated PCI resource allocation
+
+	LK1.1.4:
+	- Urban Widmark: fix gcc 2.95.2 problem and
+			 remove writel's to fixed address 0x7c
+
+	LK1.1.5:
+	- Urban Widmark: mdio locking, bounce buffer changes
+			 merges from Beckers 1.05 version
+			 added netif_running_on/off support
+
+	LK1.1.6:
+	- Urban Widmark: merges from Beckers 1.08b version (VT6102 + mdio)
+			 set netif_running_on/off on startup, del_timer_sync
+
+	LK1.1.7:
+	- Manfred Spraul: added reset into tx_timeout
+
+	LK1.1.9:
+	- Urban Widmark: merges from Beckers 1.10 version
+			 (media selection + eeprom reload)
+	- David Vrabel:  merges from D-Link "1.11" version
+			 (disable WOL and PME on startup)
+
+	LK1.1.10:
+	- Manfred Spraul: use "singlecopy" for unaligned buffers
+			  don't allocate bounce buffers for !ReqTxAlign cards
+
+	LK1.1.11:
+	- David Woodhouse: Set dev->base_addr before the first time we call
+					   wait_for_reset(). It's a lot happier that way.
+					   Free np->tx_bufs only if we actually allocated it.
+
+	LK1.1.12:
+	- Martin Eriksson: Allow Memory-Mapped IO to be enabled.
+
+	LK1.1.13 (jgarzik):
+	- Add ethtool support
+	- Replace some MII-related magic numbers with constants
+
+	LK1.1.14 (Ivan G.):
+	- fixes comments for Rhine-III
+	- removes W_MAX_TIMEOUT (unused)
+	- adds HasDavicomPhy for Rhine-I (basis: linuxfet driver; my card
+	  is R-I and has Davicom chip, flag is referenced in kernel driver)
+	- sends chip_id as a parameter to wait_for_reset since np is not
+	  initialized on first call
+	- changes mmio "else if (chip_id==VT6102)" to "else" so it will work
+	  for Rhine-III's (documentation says same bit is correct)
+	- transmit frame queue message is off by one - fixed
+	- adds IntrNormalSummary to "Something Wicked" exclusion list
+	  so normal interrupts will not trigger the message (src: Donald Becker)
+	(Roger Luethi)
+	- show confused chip where to continue after Tx error
+	- location of collision counter is chip specific
+	- allow selecting backoff algorithm (module parameter)
+
+	LK1.1.15 (jgarzik):
+	- Use new MII lib helper generic_mii_ioctl
+
+	LK1.1.16 (Roger Luethi)
+	- Etherleak fix
+	- Handle Tx buffer underrun
+	- Fix bugs in full duplex handling
+	- New reset code uses "force reset" cmd on Rhine-II
+	- Various clean ups
+
+	LK1.1.17 (Roger Luethi)
+	- Fix race in via_rhine_start_tx()
+	- On errors, wait for Tx engine to turn off before scavenging
+	- Handle Tx descriptor write-back race on Rhine-II
+	- Force flushing for PCI posted writes
+	- More reset code changes
+
+	Ported to RTnet: October 2003, Jan Kiszka <Jan.Kiszka@web.de>
+*/
+
+#define DRV_NAME	"via-rhine-rt"
+#define DRV_VERSION	"1.1.17-RTnet-0.1"
+#define DRV_RELDATE	"2003-10-05"
+
+
+/* A few user-configurable values.
+   These may be modified when a driver module is loaded. */
+
+static int local_debug = 1;			/* 1 normal messages, 0 quiet .. 7 verbose. */
+static int max_interrupt_work = 20;
+
+/* Set the copy breakpoint for the copy-only-tiny-frames scheme.
+   Setting to > 1518 effectively disables this feature. */
+/*** RTnet ***
+static int rx_copybreak;
+ *** RTnet ***/
+
+/* Select a backoff algorithm (Ethernet capture effect) */
+static int backoff;
+
+/* Used to pass the media type, etc.
+   Both 'options[]' and 'full_duplex[]' should exist for driver
+   interoperability.
+   The media type is usually passed in 'options[]'.
+   The default is autonegotation for speed and duplex.
+     This should rarely be overridden.
+   Use option values 0x10/0x20 for 10Mbps, 0x100,0x200 for 100Mbps.
+   Use option values 0x10 and 0x100 for forcing half duplex fixed speed.
+   Use option values 0x20 and 0x200 for forcing full duplex operation.
+*/
+#define MAX_UNITS 8		/* More are supported, limit only on options */
+static int options[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+static int full_duplex[MAX_UNITS] = {-1, -1, -1, -1, -1, -1, -1, -1};
+
+/* Maximum number of multicast addresses to filter (vs. rx-all-multicast).
+   The Rhine has a 64 element 8390-like hash table.  */
+static const int multicast_filter_limit = 32;
+
+
+/* Operational parameters that are set at compile time. */
+
+/* Keep the ring sizes a power of two for compile efficiency.
+   The compiler will convert <unsigned>'%'<2^N> into a bit mask.
+   Making the Tx ring too large decreases the effectiveness of channel
+   bonding and packet priority.
+   There are no ill effects from too-large receive rings. */
+#define TX_RING_SIZE	16
+#define TX_QUEUE_LEN	10		/* Limit ring entries actually used.  */
+#define RX_RING_SIZE	8 /*** RTnet ***/
+
+
+/* Operational parameters that usually are not changed. */
+
+/* Time in jiffies before concluding the transmitter is hung. */
+#define TX_TIMEOUT  (2*HZ)
+
+#define PKT_BUF_SZ		1536			/* Size of each temporary Rx buffer.*/
+
+#if !defined(__OPTIMIZE__)  ||  !defined(__KERNEL__)
+#warning  You must compile this file with the correct options!
+#warning  See the last lines of the source file.
+#error  You must compile this driver with "-O".
+#endif
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/string.h>
+#include <linux/timer.h>
+#include <linux/errno.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <linux/interrupt.h>
+#include <linux/pci.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+#include <linux/mii.h>
+#include <linux/ethtool.h>
+#include <linux/crc32.h>
+#include <linux/uaccess.h>
+#include <asm/processor.h>		/* Processor type for cache alignment. */
+#include <asm/bitops.h>
+#include <asm/io.h>
+#include <asm/irq.h>
+
+/*** RTnet ***/
+#include <rtnet_port.h>
+
+#define DEFAULT_RX_POOL_SIZE    16
+
+static int cards[MAX_UNITS] = { [0 ... (MAX_UNITS-1)] = 1 };
+module_param_array(cards, int, NULL, 0444);
+MODULE_PARM_DESC(cards, "array of cards to be supported (e.g. 1,0,1)");
+/*** RTnet ***/
+
+/* These identify the driver base version and may not be removed. */
+static char version[] =
+KERN_INFO DRV_NAME ".c:" DRV_VERSION "  " DRV_RELDATE "  Jan.Kiszka@web.de\n";
+
+static char shortname[] = DRV_NAME;
+
+
+/* This driver was written to use PCI memory space, however most versions
+   of the Rhine only work correctly with I/O space accesses. */
+/*#ifdef CONFIG_VIA_RHINE_MMIO
+#define USE_MEM
+#else*/
+#define USE_IO
+#undef readb
+#undef readw
+#undef readl
+#undef writeb
+#undef writew
+#undef writel
+#define readb(addr) inb((unsigned long)(addr))
+#define readw(addr) inw((unsigned long)(addr))
+#define readl(addr) inl((unsigned long)(addr))
+#define writeb(val,addr) outb((val),(unsigned long)(addr))
+#define writew(val,addr) outw((val),(unsigned long)(addr))
+#define writel(val,addr) outl((val),(unsigned long)(addr))
+/*#endif*/
+
+MODULE_AUTHOR("Jan Kiszka");
+MODULE_DESCRIPTION("RTnet VIA Rhine PCI Fast Ethernet driver");
+MODULE_LICENSE("GPL");
+
+module_param(max_interrupt_work, int, 0444);
+module_param_named(debug, local_debug, int, 0444);
+/*** RTnet ***
+MODULE_PARM(rx_copybreak, "i");
+ *** RTnet ***/
+module_param(backoff, int, 0444);
+module_param_array(options, int, NULL, 0444);
+module_param_array(full_duplex, int, NULL, 0444);
+MODULE_PARM_DESC(max_interrupt_work, "VIA Rhine maximum events handled per interrupt");
+MODULE_PARM_DESC(debug, "VIA Rhine debug level (0-7)");
+/*** RTnet ***
+MODULE_PARM_DESC(rx_copybreak, "VIA Rhine copy breakpoint for copy-only-tiny-frames");
+ *** RTnet ***/
+MODULE_PARM_DESC(backoff, "VIA Rhine: Bits 0-3: backoff algorithm");
+MODULE_PARM_DESC(options, "VIA Rhine: Bits 0-3: media type, bit 17: full duplex");
+MODULE_PARM_DESC(full_duplex, "VIA Rhine full duplex setting(s) (1)");
+
+/*
+				Theory of Operation
+
+I. Board Compatibility
+
+This driver is designed for the VIA 86c100A Rhine-II PCI Fast Ethernet
+controller.
+
+II. Board-specific settings
+
+Boards with this chip are functional only in a bus-master PCI slot.
+
+Many operational settings are loaded from the EEPROM to the Config word at
+offset 0x78. For most of these settings, this driver assumes that they are
+correct.
+If this driver is compiled to use PCI memory space operations the EEPROM
+must be configured to enable memory ops.
+
+III. Driver operation
+
+IIIa. Ring buffers
+
+This driver uses two statically allocated fixed-size descriptor lists
+formed into rings by a branch from the final descriptor to the beginning of
+the list.  The ring sizes are set at compile time by RX/TX_RING_SIZE.
+
+IIIb/c. Transmit/Receive Structure
+
+This driver attempts to use a zero-copy receive and transmit scheme.
+
+Alas, all data buffers are required to start on a 32 bit boundary, so
+the driver must often copy transmit packets into bounce buffers.
+
+The driver allocates full frame size skbuffs for the Rx ring buffers at
+open() time and passes the skb->data field to the chip as receive data
+buffers.  When an incoming frame is less than RX_COPYBREAK bytes long,
+a fresh skbuff is allocated and the frame is copied to the new skbuff.
+When the incoming frame is larger, the skbuff is passed directly up the
+protocol stack.  Buffers consumed this way are replaced by newly allocated
+skbuffs in the last phase of via_rhine_rx().
+
+The RX_COPYBREAK value is chosen to trade-off the memory wasted by
+using a full-sized skbuff for small frames vs. the copying costs of larger
+frames.  New boards are typically used in generously configured machines
+and the underfilled buffers have negligible impact compared to the benefit of
+a single allocation size, so the default value of zero results in never
+copying packets.  When copying is done, the cost is usually mitigated by using
+a combined copy/checksum routine.  Copying also preloads the cache, which is
+most useful with small frames.
+
+Since the VIA chips are only able to transfer data to buffers on 32 bit
+boundaries, the IP header at offset 14 in an ethernet frame isn't
+longword aligned for further processing.  Copying these unaligned buffers
+has the beneficial effect of 16-byte aligning the IP header.
+
+IIId. Synchronization
+
+The driver runs as two independent, single-threaded flows of control.  One
+is the send-packet routine, which enforces single-threaded use by the
+dev->priv->lock spinlock. The other thread is the interrupt handler, which
+is single threaded by the hardware and interrupt handling software.
+
+The send packet thread has partial control over the Tx ring. It locks the
+dev->priv->lock whenever it's queuing a Tx packet. If the next slot in the ring
+is not available it stops the transmit queue by calling netif_stop_queue.
+
+The interrupt handler has exclusive control over the Rx ring and records stats
+from the Tx ring.  After reaping the stats, it marks the Tx queue entry as
+empty by incrementing the dirty_tx mark. If at least half of the entries in
+the Rx ring are available the transmit queue is woken up if it was stopped.
+
+IV. Notes
+
+IVb. References
+
+Preliminary VT86C100A manual from http://www.via.com.tw/
+http://www.scyld.com/expert/100mbps.html
+http://www.scyld.com/expert/NWay.html
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT86C100A/Datasheet/VT86C100A03.pdf
+ftp://ftp.via.com.tw/public/lan/Products/NIC/VT6102/Datasheet/VT6102_021.PDF
+
+
+IVc. Errata
+
+The VT86C100A manual is not reliable information.
+The 3043 chip does not handle unaligned transmit or receive buffers, resulting
+in significant performance degradation for bounce buffer copies on transmit
+and unaligned IP headers on receive.
+The chip does not pad to minimum transmit length.
+
+*/
+
+
+/* This table drives the PCI probe routines.  It's mostly boilerplate in all
+   of the drivers, and will likely be provided by some future kernel.
+   Note the matching code -- the first table entry matchs all 56** cards but
+   second only the 1234 card.
+*/
+
+enum pci_flags_bit {
+	PCI_USES_IO=1, PCI_USES_MEM=2, PCI_USES_MASTER=4,
+	PCI_ADDR0=0x10<<0, PCI_ADDR1=0x10<<1, PCI_ADDR2=0x10<<2, PCI_ADDR3=0x10<<3,
+};
+
+enum via_rhine_chips {
+	VT86C100A = 0,
+	VT6102,
+	VT6105,
+	VT6105M
+};
+
+struct via_rhine_chip_info {
+	const char *name;
+	u16 pci_flags;
+	int io_size;
+	int drv_flags;
+};
+
+
+enum chip_capability_flags {
+	CanHaveMII=1, HasESIPhy=2, HasDavicomPhy=4,
+	ReqTxAlign=0x10, HasWOL=0x20, };
+
+#ifdef USE_MEM
+#define RHINE_IOTYPE (PCI_USES_MEM | PCI_USES_MASTER | PCI_ADDR1)
+#else
+#define RHINE_IOTYPE (PCI_USES_IO  | PCI_USES_MASTER | PCI_ADDR0)
+#endif
+/* Beware of PCI posted writes */
+#define IOSYNC	do { readb((void *)dev->base_addr + StationAddr); } while (0)
+
+/* directly indexed by enum via_rhine_chips, above */
+static struct via_rhine_chip_info via_rhine_chip_info[] =
+{
+	{ "VIA VT86C100A Rhine", RHINE_IOTYPE, 128,
+	  CanHaveMII | ReqTxAlign | HasDavicomPhy },
+	{ "VIA VT6102 Rhine-II", RHINE_IOTYPE, 256,
+	  CanHaveMII | HasWOL },
+	{ "VIA VT6105 Rhine-III", RHINE_IOTYPE, 256,
+	  CanHaveMII | HasWOL },
+	{ "VIA VT6105M Rhine-III", RHINE_IOTYPE, 256,
+	  CanHaveMII | HasWOL },
+};
+
+static struct pci_device_id via_rhine_pci_tbl[] =
+{
+	{0x1106, 0x3043, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT86C100A},
+	{0x1106, 0x3065, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6102},
+	{0x1106, 0x3106, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105},
+	{0x1106, 0x3053, PCI_ANY_ID, PCI_ANY_ID, 0, 0, VT6105M},
+	{0,}			/* terminate list */
+};
+MODULE_DEVICE_TABLE(pci, via_rhine_pci_tbl);
+
+
+/* Offsets to the device registers. */
+enum register_offsets {
+	StationAddr=0x00, RxConfig=0x06, TxConfig=0x07, ChipCmd=0x08,
+	IntrStatus=0x0C, IntrEnable=0x0E,
+	MulticastFilter0=0x10, MulticastFilter1=0x14,
+	RxRingPtr=0x18, TxRingPtr=0x1C, GFIFOTest=0x54,
+	MIIPhyAddr=0x6C, MIIStatus=0x6D, PCIBusConfig=0x6E,
+	MIICmd=0x70, MIIRegAddr=0x71, MIIData=0x72, MACRegEEcsr=0x74,
+	ConfigA=0x78, ConfigB=0x79, ConfigC=0x7A, ConfigD=0x7B,
+	RxMissed=0x7C, RxCRCErrs=0x7E, MiscCmd=0x81,
+	StickyHW=0x83, IntrStatus2=0x84, WOLcrClr=0xA4, WOLcgClr=0xA7,
+	PwrcsrClr=0xAC,
+};
+
+/* Bits in ConfigD */
+enum backoff_bits {
+	BackOptional=0x01, BackModify=0x02,
+	BackCaptureEffect=0x04, BackRandom=0x08
+};
+
+#ifdef USE_MEM
+/* Registers we check that mmio and reg are the same. */
+int mmio_verify_registers[] = {
+	RxConfig, TxConfig, IntrEnable, ConfigA, ConfigB, ConfigC, ConfigD,
+	0
+};
+#endif
+
+/* Bits in the interrupt status/mask registers. */
+enum intr_status_bits {
+	IntrRxDone=0x0001, IntrRxErr=0x0004, IntrRxEmpty=0x0020,
+	IntrTxDone=0x0002, IntrTxError=0x0008, IntrTxUnderrun=0x0210,
+	IntrPCIErr=0x0040,
+	IntrStatsMax=0x0080, IntrRxEarly=0x0100,
+	IntrRxOverflow=0x0400, IntrRxDropped=0x0800, IntrRxNoBuf=0x1000,
+	IntrTxAborted=0x2000, IntrLinkChange=0x4000,
+	IntrRxWakeUp=0x8000,
+	IntrNormalSummary=0x0003, IntrAbnormalSummary=0xC260,
+	IntrTxDescRace=0x080000,	/* mapped from IntrStatus2 */
+	IntrTxErrSummary=0x082218,
+};
+
+/* The Rx and Tx buffer descriptors. */
+struct rx_desc {
+	s32 rx_status;
+	u32 desc_length; /* Chain flag, Buffer/frame length */
+	u32 addr;
+	u32 next_desc;
+};
+struct tx_desc {
+	s32 tx_status;
+	u32 desc_length; /* Chain flag, Tx Config, Frame length */
+	u32 addr;
+	u32 next_desc;
+};
+
+/* Initial value for tx_desc.desc_length, Buffer size goes to bits 0-10 */
+#define TXDESC 0x00e08000
+
+enum rx_status_bits {
+	RxOK=0x8000, RxWholePkt=0x0300, RxErr=0x008F
+};
+
+/* Bits in *_desc.*_status */
+enum desc_status_bits {
+	DescOwn=0x80000000
+};
+
+/* Bits in ChipCmd. */
+enum chip_cmd_bits {
+	CmdInit=0x0001, CmdStart=0x0002, CmdStop=0x0004, CmdRxOn=0x0008,
+	CmdTxOn=0x0010, CmdTxDemand=0x0020, CmdRxDemand=0x0040,
+	CmdEarlyRx=0x0100, CmdEarlyTx=0x0200, CmdFDuplex=0x0400,
+	CmdNoTxPoll=0x0800, CmdReset=0x8000,
+};
+
+#define MAX_MII_CNT	4
+struct netdev_private {
+	/* Descriptor rings */
+	struct rx_desc *rx_ring;
+	struct tx_desc *tx_ring;
+	dma_addr_t rx_ring_dma;
+	dma_addr_t tx_ring_dma;
+
+	/* The addresses of receive-in-place skbuffs. */
+	struct rtskb *rx_skbuff[RX_RING_SIZE]; /*** RTnet ***/
+	dma_addr_t rx_skbuff_dma[RX_RING_SIZE];
+
+	/* The saved address of a sent-in-place packet/buffer, for later free(). */
+	struct rtskb *tx_skbuff[TX_RING_SIZE]; /*** RTnet ***/
+	dma_addr_t tx_skbuff_dma[TX_RING_SIZE];
+
+	/* Tx bounce buffers */
+	unsigned char *tx_buf[TX_RING_SIZE];
+	unsigned char *tx_bufs;
+	dma_addr_t tx_bufs_dma;
+
+	struct pci_dev *pdev;
+	struct net_device_stats stats;
+	struct timer_list timer;	/* Media monitoring timer. */
+	rtdm_lock_t lock;
+
+	/* Frequently used values: keep some adjacent for cache effect. */
+	int chip_id, drv_flags;
+	struct rx_desc *rx_head_desc;
+	unsigned int cur_rx, dirty_rx;		/* Producer/consumer ring indices */
+	unsigned int cur_tx, dirty_tx;
+	unsigned int rx_buf_sz;				/* Based on MTU+slack. */
+	u16 chip_cmd;						/* Current setting for ChipCmd */
+
+	/* These values are keep track of the transceiver/media in use. */
+	unsigned int default_port:4;		/* Last dev->if_port value. */
+	u8 tx_thresh, rx_thresh;
+
+	/* MII transceiver section. */
+	unsigned char phys[MAX_MII_CNT];			/* MII device addresses. */
+	unsigned int mii_cnt;			/* number of MIIs found, but only the first one is used */
+	u16 mii_status;						/* last read MII status */
+	struct mii_if_info mii_if;
+	unsigned int mii_if_force_media; /*** RTnet, support for older kernels (e.g. 2.4.19) ***/
+
+	rtdm_irq_t irq_handle;
+};
+
+/*** RTnet ***/
+static int  mdio_read(struct rtnet_device *dev, int phy_id, int location);
+static void mdio_write(struct rtnet_device *dev, int phy_id, int location, int value);
+static int  via_rhine_open(struct rtnet_device *dev);
+static void via_rhine_check_duplex(struct rtnet_device *dev);
+/*static void via_rhine_timer(unsigned long data);
+static void via_rhine_tx_timeout(struct net_device *dev);*/
+static int  via_rhine_start_tx(struct rtskb *skb, struct rtnet_device *dev);
+static int via_rhine_interrupt(rtdm_irq_t *irq_handle);
+static void via_rhine_tx(struct rtnet_device *dev);
+static void via_rhine_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp);
+static void via_rhine_error(struct rtnet_device *dev, int intr_status);
+static void via_rhine_set_rx_mode(struct rtnet_device *dev);
+static struct net_device_stats *via_rhine_get_stats(struct rtnet_device *rtdev);
+/*static int netdev_ioctl(struct net_device *dev, struct ifreq *rq, int cmd);*/
+static int  via_rhine_close(struct rtnet_device *dev);
+/*** RTnet ***/
+
+static inline u32 get_intr_status(struct rtnet_device *dev) /*** RTnet ***/
+{
+	void *ioaddr = (void *)dev->base_addr;
+	struct netdev_private *np = dev->priv;
+	u32 intr_status;
+
+	intr_status = readw(ioaddr + IntrStatus);
+	/* On Rhine-II, Bit 3 indicates Tx descriptor write-back race. */
+	if (np->chip_id == VT6102)
+		intr_status |= readb(ioaddr + IntrStatus2) << 16;
+	return intr_status;
+}
+
+static void wait_for_reset(struct rtnet_device *dev, int chip_id, char *name) /*** RTnet ***/
+{
+	void *ioaddr = (void *)dev->base_addr;
+	int boguscnt = 20;
+
+	IOSYNC;
+
+	if (readw(ioaddr + ChipCmd) & CmdReset) {
+		printk(KERN_INFO "%s: Reset not complete yet. "
+			"Trying harder.\n", name);
+
+		/* Rhine-II needs to be forced sometimes */
+		if (chip_id == VT6102)
+			writeb(0x40, ioaddr + MiscCmd);
+
+		/* VT86C100A may need long delay after reset (dlink) */
+		/* Seen on Rhine-II as well (rl) */
+		while ((readw(ioaddr + ChipCmd) & CmdReset) && --boguscnt)
+			udelay(5);
+
+	}
+
+	if (local_debug > 1)
+		printk(KERN_INFO "%s: Reset %s.\n", name,
+			boguscnt ? "succeeded" : "failed");
+}
+
+#ifdef USE_MEM
+static void enable_mmio(long ioaddr, int chip_id)
+{
+	int n;
+	if (chip_id == VT86C100A) {
+		/* More recent docs say that this bit is reserved ... */
+		n = inb(ioaddr + ConfigA) | 0x20;
+		outb(n, ioaddr + ConfigA);
+	} else {
+		n = inb(ioaddr + ConfigD) | 0x80;
+		outb(n, ioaddr + ConfigD);
+	}
+}
+#endif
+
+static void reload_eeprom(long ioaddr)
+{
+	int i;
+	outb(0x20, ioaddr + MACRegEEcsr);
+	/* Typically 2 cycles to reload. */
+	for (i = 0; i < 150; i++)
+		if (! (inb(ioaddr + MACRegEEcsr) & 0x20))
+			break;
+}
+
+static int via_rhine_init_one (struct pci_dev *pdev,
+					 const struct pci_device_id *ent)
+{
+	struct rtnet_device *dev; /*** RTnet ***/
+	struct netdev_private *np;
+	int i, option;
+	int chip_id = (int) ent->driver_data;
+	static int card_idx = -1;
+	void *ioaddr;
+	long memaddr;
+	unsigned int io_size;
+	int pci_flags;
+#ifdef USE_MEM
+	long ioaddr0;
+#endif
+
+/* when built into the kernel, we only print version if device is found */
+#ifndef MODULE
+	static int printed_version;
+	if (!printed_version++)
+		printk(version);
+#endif
+
+	card_idx++;
+	option = card_idx < MAX_UNITS ? options[card_idx] : 0;
+	io_size = via_rhine_chip_info[chip_id].io_size;
+	pci_flags = via_rhine_chip_info[chip_id].pci_flags;
+
+/*** RTnet ***/
+	if (cards[card_idx] == 0)
+		goto err_out;
+/*** RTnet ***/
+
+	if (pci_enable_device (pdev))
+		goto err_out;
+
+	/* this should always be supported */
+	if (dma_set_mask(&pdev->dev, DMA_BIT_MASK(32))) {
+		printk(KERN_ERR "32-bit PCI DMA addresses not supported by the card!?\n");
+		goto err_out;
+	}
+
+	/* sanity check */
+	if ((pci_resource_len (pdev, 0) < io_size) ||
+	    (pci_resource_len (pdev, 1) < io_size)) {
+		printk (KERN_ERR "Insufficient PCI resources, aborting\n");
+		goto err_out;
+	}
+
+	ioaddr = (void *)pci_resource_start (pdev, 0);
+	memaddr = pci_resource_start (pdev, 1);
+
+	if (pci_flags & PCI_USES_MASTER)
+		pci_set_master (pdev);
+
+/*** RTnet ***/
+	dev = rt_alloc_etherdev(sizeof(struct netdev_private),
+							RX_RING_SIZE * 2 + TX_RING_SIZE);
+	if (dev == NULL) {
+		printk (KERN_ERR "init_ethernet failed for card #%d\n", card_idx);
+		goto err_out;
+	}
+	rtdev_alloc_name(dev, "rteth%d");
+	rt_rtdev_connect(dev, &RTDEV_manager);
+	dev->vers = RTDEV_VERS_2_0;
+	dev->sysbind = &pdev->dev;
+/*** RTnet ***/
+
+	if (pci_request_regions(pdev, shortname))
+		goto err_out_free_netdev;
+
+#ifdef USE_MEM
+	ioaddr0 = (long)ioaddr;
+	enable_mmio(ioaddr0, chip_id);
+
+	ioaddr = ioremap (memaddr, io_size);
+	if (!ioaddr) {
+		printk (KERN_ERR "ioremap failed for device %s, region 0x%X @ 0x%lX\n",
+				pci_name(pdev), io_size, memaddr);
+		goto err_out_free_res;
+	}
+
+	/* Check that selected MMIO registers match the PIO ones */
+	i = 0;
+	while (mmio_verify_registers[i]) {
+		int reg = mmio_verify_registers[i++];
+		unsigned char a = inb(ioaddr0+reg);
+		unsigned char b = readb(ioaddr+reg);
+		if (a != b) {
+			printk (KERN_ERR "MMIO do not match PIO [%02x] (%02x != %02x)\n",
+					reg, a, b);
+			goto err_out_unmap;
+		}
+	}
+#endif
+
+	/* D-Link provided reset code (with comment additions) */
+	if (via_rhine_chip_info[chip_id].drv_flags & HasWOL) {
+		unsigned char byOrgValue;
+
+		/* clear sticky bit before reset & read ethernet address */
+		byOrgValue = readb(ioaddr + StickyHW);
+		byOrgValue = byOrgValue & 0xFC;
+		writeb(byOrgValue, ioaddr + StickyHW);
+
+		/* (bits written are cleared?) */
+		/* disable force PME-enable */
+		writeb(0x80, ioaddr + WOLcgClr);
+		/* disable power-event config bit */
+		writeb(0xFF, ioaddr + WOLcrClr);
+		/* clear power status (undocumented in vt6102 docs?) */
+		writeb(0xFF, ioaddr + PwrcsrClr);
+	}
+
+	/* Reset the chip to erase previous misconfiguration. */
+	writew(CmdReset, ioaddr + ChipCmd);
+
+	dev->base_addr = (long)ioaddr;
+	wait_for_reset(dev, chip_id, shortname);
+
+	/* Reload the station address from the EEPROM. */
+#ifdef USE_IO
+	reload_eeprom((long)ioaddr);
+#else
+	reload_eeprom(ioaddr0);
+	/* Reloading from eeprom overwrites cfgA-D, so we must re-enable MMIO.
+	   If reload_eeprom() was done first this could be avoided, but it is
+	   not known if that still works with the "win98-reboot" problem. */
+	enable_mmio(ioaddr0, chip_id);
+#endif
+
+	for (i = 0; i < 6; i++)
+		dev->dev_addr[i] = readb(ioaddr + StationAddr + i);
+
+	if (!is_valid_ether_addr(dev->dev_addr)) {
+		printk(KERN_ERR "Invalid MAC address for card #%d\n", card_idx);
+		goto err_out_unmap;
+	}
+
+	if (chip_id == VT6102) {
+		/*
+		 * for 3065D, EEPROM reloaded will cause bit 0 in MAC_REG_CFGA
+		 * turned on.  it makes MAC receive magic packet
+		 * automatically. So, we turn it off. (D-Link)
+		 */
+		writeb(readb(ioaddr + ConfigA) & 0xFE, ioaddr + ConfigA);
+	}
+
+	/* Select backoff algorithm */
+	if (backoff)
+		writeb(readb(ioaddr + ConfigD) & (0xF0 | backoff),
+			ioaddr + ConfigD);
+
+	dev->irq = pdev->irq;
+
+	np = dev->priv;
+	rtdm_lock_init (&np->lock);
+	np->chip_id = chip_id;
+	np->drv_flags = via_rhine_chip_info[chip_id].drv_flags;
+	np->pdev = pdev;
+/*** RTnet ***
+	np->mii_if.dev = dev;
+	np->mii_if.mdio_read = mdio_read;
+	np->mii_if.mdio_write = mdio_write;
+	np->mii_if.phy_id_mask = 0x1f;
+	np->mii_if.reg_num_mask = 0x1f;
+ *** RTnet ***/
+
+	if (dev->mem_start)
+		option = dev->mem_start;
+
+	/* The chip-specific entries in the device structure. */
+	dev->open = via_rhine_open;
+	dev->hard_start_xmit = via_rhine_start_tx;
+	dev->stop = via_rhine_close;
+	dev->get_stats = via_rhine_get_stats;
+/*** RTnet ***
+	dev->set_multicast_list = via_rhine_set_rx_mode;
+	dev->do_ioctl = netdev_ioctl;
+	dev->tx_timeout = via_rhine_tx_timeout;
+	dev->watchdog_timeo = TX_TIMEOUT;
+ *** RTnet ***/
+	if (np->drv_flags & ReqTxAlign)
+		dev->features |= NETIF_F_SG|NETIF_F_HW_CSUM;
+
+	/* dev->name not defined before register_netdev()! */
+/*** RTnet ***/
+	i = rt_register_rtnetdev(dev);
+	if (i) {
+		goto err_out_unmap;
+	}
+/*** RTnet ***/
+
+	/* The lower four bits are the media type. */
+	if (option > 0) {
+		if (option & 0x220)
+			np->mii_if.full_duplex = 1;
+		np->default_port = option & 15;
+	}
+	if (card_idx < MAX_UNITS  &&  full_duplex[card_idx] > 0)
+		np->mii_if.full_duplex = 1;
+
+	if (np->mii_if.full_duplex) {
+		printk(KERN_INFO "%s: Set to forced full duplex, autonegotiation"
+			   " disabled.\n", dev->name);
+		np->mii_if_force_media = 1; /*** RTnet ***/
+	}
+
+	printk(KERN_INFO "%s: %s at 0x%lx, ",
+		   dev->name, via_rhine_chip_info[chip_id].name,
+		   (pci_flags & PCI_USES_IO) ? (long)ioaddr : memaddr);
+
+	for (i = 0; i < 5; i++)
+			printk("%2.2x:", dev->dev_addr[i]);
+	printk("%2.2x, IRQ %d.\n", dev->dev_addr[i], pdev->irq);
+
+	pci_set_drvdata(pdev, dev);
+
+	if (np->drv_flags & CanHaveMII) {
+		int phy, phy_idx = 0;
+		np->phys[0] = 1;		/* Standard for this chip. */
+		for (phy = 1; phy < 32 && phy_idx < MAX_MII_CNT; phy++) {
+			int mii_status = mdio_read(dev, phy, 1);
+			if (mii_status != 0xffff  &&  mii_status != 0x0000) {
+				np->phys[phy_idx++] = phy;
+				np->mii_if.advertising = mdio_read(dev, phy, 4);
+				printk(KERN_INFO "%s: MII PHY found at address %d, status "
+					   "0x%4.4x advertising %4.4x Link %4.4x.\n",
+					   dev->name, phy, mii_status, np->mii_if.advertising,
+					   mdio_read(dev, phy, 5));
+
+				/* set IFF_RUNNING */
+				if (mii_status & BMSR_LSTATUS)
+					rtnetif_carrier_on(dev); /*** RTnet ***/
+				else
+					rtnetif_carrier_off(dev); /*** RTnet ***/
+			}
+		}
+		np->mii_cnt = phy_idx;
+		np->mii_if.phy_id = np->phys[0];
+	}
+
+	/* Allow forcing the media type. */
+	if (option > 0) {
+		if (option & 0x220)
+			np->mii_if.full_duplex = 1;
+		np->default_port = option & 0x3ff;
+		if (np->default_port & 0x330) {
+			/* FIXME: shouldn't someone check this variable? */
+			/* np->medialock = 1; */
+			printk(KERN_INFO "  Forcing %dMbs %s-duplex operation.\n",
+				   (option & 0x300 ? 100 : 10),
+				   (option & 0x220 ? "full" : "half"));
+			if (np->mii_cnt)
+				mdio_write(dev, np->phys[0], MII_BMCR,
+						   ((option & 0x300) ? 0x2000 : 0) |  /* 100mbps? */
+						   ((option & 0x220) ? 0x0100 : 0));  /* Full duplex? */
+		}
+	}
+
+	return 0;
+
+err_out_unmap:
+#ifdef USE_MEM
+	iounmap((void *)ioaddr);
+err_out_free_res:
+#endif
+	pci_release_regions(pdev);
+err_out_free_netdev:
+/*** RTnet ***/
+	rt_rtdev_disconnect(dev);
+	rtdev_free(dev);
+/*** RTnet ***/
+err_out:
+	return -ENODEV;
+}
+
+static int alloc_ring(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ring;
+	dma_addr_t ring_dma;
+
+	ring = dma_alloc_coherent(&np->pdev->dev,
+				  RX_RING_SIZE * sizeof(struct rx_desc) +
+				  TX_RING_SIZE * sizeof(struct tx_desc),
+				  &ring_dma, GFP_ATOMIC);
+	if (!ring) {
+		printk(KERN_ERR "Could not allocate DMA memory.\n");
+		return -ENOMEM;
+	}
+	if (np->drv_flags & ReqTxAlign) {
+		np->tx_bufs = dma_alloc_coherent(&np->pdev->dev,
+						 PKT_BUF_SZ * TX_RING_SIZE,
+						 &np->tx_bufs_dma, GFP_ATOMIC);
+		if (np->tx_bufs == NULL) {
+			dma_free_coherent(&np->pdev->dev,
+					  RX_RING_SIZE * sizeof(struct rx_desc) +
+					  TX_RING_SIZE * sizeof(struct tx_desc),
+					  ring, ring_dma);
+			return -ENOMEM;
+		}
+	}
+
+	np->rx_ring = ring;
+	np->tx_ring = ring + RX_RING_SIZE * sizeof(struct rx_desc);
+	np->rx_ring_dma = ring_dma;
+	np->tx_ring_dma = ring_dma + RX_RING_SIZE * sizeof(struct rx_desc);
+
+	return 0;
+}
+
+void free_ring(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+
+	dma_free_coherent(&np->pdev->dev,
+			  RX_RING_SIZE * sizeof(struct rx_desc) +
+			  TX_RING_SIZE * sizeof(struct tx_desc),
+			  np->rx_ring, np->rx_ring_dma);
+	np->tx_ring = NULL;
+
+	if (np->tx_bufs)
+		dma_free_coherent(&np->pdev->dev, PKT_BUF_SZ * TX_RING_SIZE,
+				  np->tx_bufs, np->tx_bufs_dma);
+
+	np->tx_bufs = NULL;
+
+}
+
+static void alloc_rbufs(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	dma_addr_t next;
+	int i;
+
+	np->dirty_rx = np->cur_rx = 0;
+
+	np->rx_buf_sz = (dev->mtu <= 1500 ? PKT_BUF_SZ : dev->mtu + 32);
+	np->rx_head_desc = &np->rx_ring[0];
+	next = np->rx_ring_dma;
+
+	/* Init the ring entries */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].rx_status = 0;
+		np->rx_ring[i].desc_length = cpu_to_le32(np->rx_buf_sz);
+		next += sizeof(struct rx_desc);
+		np->rx_ring[i].next_desc = cpu_to_le32(next);
+		np->rx_skbuff[i] = 0;
+	}
+	/* Mark the last entry as wrapping the ring. */
+	np->rx_ring[i-1].next_desc = cpu_to_le32(np->rx_ring_dma);
+
+	/* Fill in the Rx buffers.  Handle allocation failure gracefully. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		struct rtskb *skb = rtnetdev_alloc_rtskb(dev, np->rx_buf_sz); /*** RTnet ***/
+		np->rx_skbuff[i] = skb;
+		if (skb == NULL)
+			break;
+		np->rx_skbuff_dma[i] =
+			dma_map_single(&np->pdev->dev, skb->tail, np->rx_buf_sz,
+				       DMA_FROM_DEVICE);
+
+		np->rx_ring[i].addr = cpu_to_le32(np->rx_skbuff_dma[i]);
+		np->rx_ring[i].rx_status = cpu_to_le32(DescOwn);
+	}
+	np->dirty_rx = (unsigned int)(i - RX_RING_SIZE);
+}
+
+static void free_rbufs(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	/* Free all the skbuffs in the Rx queue. */
+	for (i = 0; i < RX_RING_SIZE; i++) {
+		np->rx_ring[i].rx_status = 0;
+		np->rx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+		if (np->rx_skbuff[i]) {
+			dma_unmap_single(&np->pdev->dev, np->rx_skbuff_dma[i],
+					 np->rx_buf_sz, DMA_FROM_DEVICE);
+			dev_kfree_rtskb(np->rx_skbuff[i]); /*** RTnet ***/
+		}
+		np->rx_skbuff[i] = 0;
+	}
+}
+
+static void alloc_tbufs(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	dma_addr_t next;
+	int i;
+
+	np->dirty_tx = np->cur_tx = 0;
+	next = np->tx_ring_dma;
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_skbuff[i] = 0;
+		np->tx_ring[i].tx_status = 0;
+		np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+		next += sizeof(struct tx_desc);
+		np->tx_ring[i].next_desc = cpu_to_le32(next);
+		np->tx_buf[i] = &np->tx_bufs[i * PKT_BUF_SZ];
+	}
+	np->tx_ring[i-1].next_desc = cpu_to_le32(np->tx_ring_dma);
+
+}
+
+static void free_tbufs(struct rtnet_device* dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	int i;
+
+	for (i = 0; i < TX_RING_SIZE; i++) {
+		np->tx_ring[i].tx_status = 0;
+		np->tx_ring[i].desc_length = cpu_to_le32(TXDESC);
+		np->tx_ring[i].addr = cpu_to_le32(0xBADF00D0); /* An invalid address. */
+		if (np->tx_skbuff[i]) {
+			if (np->tx_skbuff_dma[i]) {
+				dma_unmap_single(&np->pdev->dev,
+						 np->tx_skbuff_dma[i],
+						 np->tx_skbuff[i]->len,
+						 DMA_TO_DEVICE);
+			}
+			dev_kfree_rtskb(np->tx_skbuff[i]); /*** RTnet ***/
+		}
+		np->tx_skbuff[i] = 0;
+		np->tx_buf[i] = 0;
+	}
+}
+
+static void init_registers(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int i;
+
+	for (i = 0; i < 6; i++)
+		writeb(dev->dev_addr[i], ioaddr + StationAddr + i);
+
+	/* Initialize other registers. */
+	writew(0x0006, ioaddr + PCIBusConfig);	/* Tune configuration??? */
+	/* Configure initial FIFO thresholds. */
+	writeb(0x20, ioaddr + TxConfig);
+	np->tx_thresh = 0x20;
+	np->rx_thresh = 0x60;			/* Written in via_rhine_set_rx_mode(). */
+	np->mii_if.full_duplex = 0;
+
+	if (dev->if_port == 0)
+		dev->if_port = np->default_port;
+
+	writel(np->rx_ring_dma, ioaddr + RxRingPtr);
+	writel(np->tx_ring_dma, ioaddr + TxRingPtr);
+
+	via_rhine_set_rx_mode(dev);
+
+	/* Enable interrupts by setting the interrupt mask. */
+	writew(IntrRxDone | IntrRxErr | IntrRxEmpty| IntrRxOverflow |
+		   IntrRxDropped | IntrRxNoBuf | IntrTxAborted |
+		   IntrTxDone | IntrTxError | IntrTxUnderrun |
+		   IntrPCIErr | IntrStatsMax | IntrLinkChange,
+		   ioaddr + IntrEnable);
+
+	np->chip_cmd = CmdStart|CmdTxOn|CmdRxOn|CmdNoTxPoll;
+	if (np->mii_if_force_media) /*** RTnet ***/
+		np->chip_cmd |= CmdFDuplex;
+	writew(np->chip_cmd, ioaddr + ChipCmd);
+
+	via_rhine_check_duplex(dev);
+
+	/* The LED outputs of various MII xcvrs should be configured.  */
+	/* For NS or Mison phys, turn on bit 1 in register 0x17 */
+	/* For ESI phys, turn on bit 7 in register 0x17. */
+	mdio_write(dev, np->phys[0], 0x17, mdio_read(dev, np->phys[0], 0x17) |
+			   (np->drv_flags & HasESIPhy) ? 0x0080 : 0x0001);
+}
+/* Read and write over the MII Management Data I/O (MDIO) interface. */
+
+static int mdio_read(struct rtnet_device *dev, int phy_id, int regnum) /*** RTnet ***/
+{
+	void *ioaddr = (void *)dev->base_addr;
+	int boguscnt = 1024;
+
+	/* Wait for a previous command to complete. */
+	while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+		;
+	writeb(0x00, ioaddr + MIICmd);
+	writeb(phy_id, ioaddr + MIIPhyAddr);
+	writeb(regnum, ioaddr + MIIRegAddr);
+	writeb(0x40, ioaddr + MIICmd);			/* Trigger read */
+	boguscnt = 1024;
+	while ((readb(ioaddr + MIICmd) & 0x40) && --boguscnt > 0)
+		;
+	return readw(ioaddr + MIIData);
+}
+
+static void mdio_write(struct rtnet_device *dev, int phy_id, int regnum, int value) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int boguscnt = 1024;
+
+	if (phy_id == np->phys[0]) {
+		switch (regnum) {
+		case MII_BMCR:					/* Is user forcing speed/duplex? */
+			if (value & 0x9000)			/* Autonegotiation. */
+				np->mii_if_force_media = 0; /*** RTnet ***/
+			else
+				np->mii_if.full_duplex = (value & 0x0100) ? 1 : 0;
+			break;
+		case MII_ADVERTISE:
+			np->mii_if.advertising = value;
+			break;
+		}
+	}
+
+	/* Wait for a previous command to complete. */
+	while ((readb(ioaddr + MIICmd) & 0x60) && --boguscnt > 0)
+		;
+	writeb(0x00, ioaddr + MIICmd);
+	writeb(phy_id, ioaddr + MIIPhyAddr);
+	writeb(regnum, ioaddr + MIIRegAddr);
+	writew(value, ioaddr + MIIData);
+	writeb(0x20, ioaddr + MIICmd);			/* Trigger write. */
+}
+
+
+static int via_rhine_open(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int i;
+
+	/* Reset the chip. */
+	writew(CmdReset, ioaddr + ChipCmd);
+
+/*** RTnet ***/
+	rt_stack_connect(dev, &STACK_manager);
+	i = rtdm_irq_request(&np->irq_handle, dev->irq, via_rhine_interrupt,
+			     RTDM_IRQTYPE_SHARED, "rt_via-rhine", dev);
+/*** RTnet ***/
+	if (i) {
+		return i;
+	}
+
+	if (local_debug > 1)
+		printk(KERN_DEBUG "%s: via_rhine_open() irq %d.\n",
+			   dev->name, np->pdev->irq);
+
+	i = alloc_ring(dev);
+	if (i) {
+		return i;
+	}
+	alloc_rbufs(dev);
+	alloc_tbufs(dev);
+	wait_for_reset(dev, np->chip_id, dev->name);
+	init_registers(dev);
+	if (local_debug > 2)
+		printk(KERN_DEBUG "%s: Done via_rhine_open(), status %4.4x "
+			   "MII status: %4.4x.\n",
+			   dev->name, readw(ioaddr + ChipCmd),
+			   mdio_read(dev, np->phys[0], MII_BMSR));
+
+	rtnetif_start_queue(dev); /*** RTnet ***/
+
+/*** RTnet ***/
+	/* Set the timer to check for link beat. */
+/*** RTnet ***/
+
+	return 0;
+}
+
+static void via_rhine_check_duplex(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int mii_lpa = mdio_read(dev, np->phys[0], MII_LPA);
+	int negotiated = mii_lpa & np->mii_if.advertising;
+	int duplex;
+
+	if (np->mii_if_force_media  ||  mii_lpa == 0xffff) /*** RTnet ***/
+		return;
+	duplex = (negotiated & 0x0100) || (negotiated & 0x01C0) == 0x0040;
+	if (np->mii_if.full_duplex != duplex) {
+		np->mii_if.full_duplex = duplex;
+		if (local_debug)
+			printk(KERN_INFO "%s: Setting %s-duplex based on MII #%d link"
+				   " partner capability of %4.4x.\n", dev->name,
+				   duplex ? "full" : "half", np->phys[0], mii_lpa);
+		if (duplex)
+			np->chip_cmd |= CmdFDuplex;
+		else
+			np->chip_cmd &= ~CmdFDuplex;
+		writew(np->chip_cmd, ioaddr + ChipCmd);
+	}
+}
+
+
+/*** RTnet ***/
+/*** RTnet ***/
+
+static int via_rhine_start_tx(struct rtskb *skb, struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	unsigned entry;
+	u32 intr_status;
+/*** RTnet ***/
+	rtdm_lockctx_t context;
+/*** RTnet ***/
+
+	/* Caution: the write order is important here, set the field
+	   with the "ownership" bits last. */
+
+	/* Calculate the next Tx descriptor entry. */
+	entry = np->cur_tx % TX_RING_SIZE;
+
+	if (skb->len < ETH_ZLEN) {
+		skb = rtskb_padto(skb, ETH_ZLEN);
+		if(skb == NULL)
+			return 0;
+	}
+
+	np->tx_skbuff[entry] = skb;
+
+	if ((np->drv_flags & ReqTxAlign) &&
+		(((long)skb->data & 3) || /*** RTnet skb_shinfo(skb)->nr_frags != 0 || RTnet ***/ skb->ip_summed == CHECKSUM_PARTIAL)
+		) {
+		/* Must use alignment buffer. */
+		if (skb->len > PKT_BUF_SZ) {
+			/* packet too long, drop it */
+			dev_kfree_rtskb(skb); /*** RTnet ***/
+			np->tx_skbuff[entry] = NULL;
+			np->stats.tx_dropped++;
+			return 0;
+		}
+
+/*** RTnet ***/
+		/* get and patch time stamp just before the transmission */
+		if (skb->xmit_stamp) {
+			rtdm_lock_get_irqsave(&np->lock, context);
+
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+				*skb->xmit_stamp);
+
+			rtskb_copy_and_csum_dev(skb, np->tx_buf[entry]);
+		} else {
+			 /* no need to block the interrupts during copy */
+			rtskb_copy_and_csum_dev(skb, np->tx_buf[entry]);
+
+			rtdm_lock_get_irqsave(&np->lock, context);
+		}
+/*** RTnet ***/
+
+		np->tx_skbuff_dma[entry] = 0;
+		np->tx_ring[entry].addr = cpu_to_le32(np->tx_bufs_dma +
+										  (np->tx_buf[entry] - np->tx_bufs));
+	} else {
+		np->tx_skbuff_dma[entry] =
+			dma_map_single(&np->pdev->dev, skb->data, skb->len,
+				       DMA_TO_DEVICE);
+		np->tx_ring[entry].addr = cpu_to_le32(np->tx_skbuff_dma[entry]);
+
+/*** RTnet ***/
+		rtdm_lock_get_irqsave(&np->lock, context);
+
+		/* get and patch time stamp just before the transmission */
+		if (skb->xmit_stamp)
+			*skb->xmit_stamp = cpu_to_be64(rtdm_clock_read() +
+				*skb->xmit_stamp);
+/*** RTnet ***/
+	}
+
+	np->tx_ring[entry].desc_length =
+		cpu_to_le32(TXDESC | (skb->len >= ETH_ZLEN ? skb->len : ETH_ZLEN));
+
+	wmb();
+	np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+	wmb();
+
+	np->cur_tx++;
+
+	/* Non-x86 Todo: explicitly flush cache lines here. */
+
+	/*
+	 * Wake the potentially-idle transmit channel unless errors are
+	 * pending (the ISR must sort them out first).
+	 */
+	intr_status = get_intr_status(dev);
+	if ((intr_status & IntrTxErrSummary) == 0) {
+		writew(CmdTxDemand | np->chip_cmd, (void *)dev->base_addr + ChipCmd);
+	}
+	IOSYNC;
+
+	if (np->cur_tx == np->dirty_tx + TX_QUEUE_LEN)
+		rtnetif_stop_queue(dev); /*** RTnet ***/
+
+	/*dev->trans_start = jiffies; *** RTnet ***/
+
+/*** RTnet ***/
+	rtdm_lock_put_irqrestore(&np->lock, context);
+/*** RTnet ***/
+
+	if (local_debug > 4) {
+		rtdm_printk(KERN_DEBUG "%s: Transmit frame #%d queued in slot %d.\n", /*** RTnet ***/
+			   dev->name, np->cur_tx-1, entry);
+	}
+	return 0;
+}
+
+/* The interrupt handler does all of the Rx thread work and cleans up
+   after the Tx thread. */
+static int via_rhine_interrupt(rtdm_irq_t *irq_handle) /*** RTnet ***/
+{
+	nanosecs_abs_t time_stamp = rtdm_clock_read(); /*** RTnet ***/
+	struct rtnet_device *dev =
+	    rtdm_irq_get_arg(irq_handle, struct rtnet_device); /*** RTnet ***/
+	long ioaddr;
+	u32 intr_status;
+	int boguscnt = max_interrupt_work;
+	struct netdev_private *np = dev->priv; /*** RTnet ***/
+	unsigned int old_packet_cnt = np->stats.rx_packets; /*** RTnet ***/
+	int ret = RTDM_IRQ_NONE;
+
+	ioaddr = dev->base_addr;
+
+	while ((intr_status = get_intr_status(dev))) {
+		/* Acknowledge all of the current interrupt sources ASAP. */
+		if (intr_status & IntrTxDescRace)
+			writeb(0x08, (void *)ioaddr + IntrStatus2);
+		writew(intr_status & 0xffff, (void *)ioaddr + IntrStatus);
+		IOSYNC;
+
+		ret = RTDM_IRQ_HANDLED;
+
+		if (local_debug > 4)
+			rtdm_printk(KERN_DEBUG "%s: Interrupt, status %8.8x.\n", /*** RTnet ***/
+				   dev->name, intr_status);
+
+		if (intr_status & (IntrRxDone | IntrRxErr | IntrRxDropped |
+						   IntrRxWakeUp | IntrRxEmpty | IntrRxNoBuf))
+			via_rhine_rx(dev, &time_stamp);
+
+		if (intr_status & (IntrTxErrSummary | IntrTxDone)) {
+			if (intr_status & IntrTxErrSummary) {
+/*** RTnet ***/
+				rtdm_printk(KERN_ERR "%s: via_rhine_interrupt(), Transmissions error\n", dev->name);
+/*** RTnet ***/
+			}
+			via_rhine_tx(dev);
+		}
+
+		/* Abnormal error summary/uncommon events handlers. */
+		if (intr_status & (IntrPCIErr | IntrLinkChange |
+				   IntrStatsMax | IntrTxError | IntrTxAborted |
+				   IntrTxUnderrun | IntrTxDescRace))
+			via_rhine_error(dev, intr_status);
+
+		if (--boguscnt < 0) {
+			rtdm_printk(KERN_WARNING "%s: Too much work at interrupt, " /*** RTnet ***/
+				   "status=%#8.8x.\n",
+				   dev->name, intr_status);
+			break;
+		}
+	}
+
+	if (local_debug > 3)
+		rtdm_printk(KERN_DEBUG "%s: exiting interrupt, status=%8.8x.\n", /*** RTnet ***/
+			   dev->name, readw((void *)ioaddr + IntrStatus));
+
+/*** RTnet ***/
+	if (old_packet_cnt != np->stats.rx_packets)
+		rt_mark_stack_mgr(dev);
+	return ret;
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+   for clarity. */
+static void via_rhine_tx(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	int txstatus = 0, entry = np->dirty_tx % TX_RING_SIZE;
+
+	rtdm_lock_get(&np->lock); /*** RTnet ***/
+
+	/* find and cleanup dirty tx descriptors */
+	while (np->dirty_tx != np->cur_tx) {
+		txstatus = le32_to_cpu(np->tx_ring[entry].tx_status);
+		if (local_debug > 6)
+			rtdm_printk(KERN_DEBUG " Tx scavenge %d status %8.8x.\n", /*** RTnet ***/
+				   entry, txstatus);
+		if (txstatus & DescOwn)
+			break;
+		if (txstatus & 0x8000) {
+			if (local_debug > 1)
+				rtdm_printk(KERN_DEBUG "%s: Transmit error, Tx status %8.8x.\n", /*** RTnet ***/
+					   dev->name, txstatus);
+			np->stats.tx_errors++;
+			if (txstatus & 0x0400) np->stats.tx_carrier_errors++;
+			if (txstatus & 0x0200) np->stats.tx_window_errors++;
+			if (txstatus & 0x0100) np->stats.tx_aborted_errors++;
+			if (txstatus & 0x0080) np->stats.tx_heartbeat_errors++;
+			if (((np->chip_id == VT86C100A) && txstatus & 0x0002) ||
+				(txstatus & 0x0800) || (txstatus & 0x1000)) {
+				np->stats.tx_fifo_errors++;
+				np->tx_ring[entry].tx_status = cpu_to_le32(DescOwn);
+				break; /* Keep the skb - we try again */
+			}
+			/* Transmitter restarted in 'abnormal' handler. */
+		} else {
+			if (np->chip_id == VT86C100A)
+				np->stats.collisions += (txstatus >> 3) & 0x0F;
+			else
+				np->stats.collisions += txstatus & 0x0F;
+			if (local_debug > 6)
+				rtdm_printk(KERN_DEBUG "collisions: %1.1x:%1.1x\n", /*** RTnet ***/
+					(txstatus >> 3) & 0xF,
+					txstatus & 0xF);
+			np->stats.tx_bytes += np->tx_skbuff[entry]->len;
+			np->stats.tx_packets++;
+		}
+		/* Free the original skb. */
+		if (np->tx_skbuff_dma[entry]) {
+			dma_unmap_single(&np->pdev->dev,
+					 np->tx_skbuff_dma[entry],
+					 np->tx_skbuff[entry]->len,
+					 DMA_TO_DEVICE);
+		}
+		dev_kfree_rtskb(np->tx_skbuff[entry]); /*** RTnet ***/
+		np->tx_skbuff[entry] = NULL;
+		entry = (++np->dirty_tx) % TX_RING_SIZE;
+	}
+	if ((np->cur_tx - np->dirty_tx) < TX_QUEUE_LEN - 4)
+		rtnetif_wake_queue (dev); /*** RTnet ***/
+
+	rtdm_lock_put(&np->lock); /*** RTnet ***/
+}
+
+/* This routine is logically part of the interrupt handler, but isolated
+   for clarity and better register allocation. */
+static void via_rhine_rx(struct rtnet_device *dev, nanosecs_abs_t *time_stamp) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	int entry = np->cur_rx % RX_RING_SIZE;
+	int boguscnt = np->dirty_rx + RX_RING_SIZE - np->cur_rx;
+
+	if (local_debug > 4) {
+		rtdm_printk(KERN_DEBUG "%s: via_rhine_rx(), entry %d status %8.8x.\n", /*** RTnet ***/
+			   dev->name, entry, le32_to_cpu(np->rx_head_desc->rx_status));
+	}
+
+	/* If EOP is set on the next entry, it's a new packet. Send it up. */
+	while ( ! (np->rx_head_desc->rx_status & cpu_to_le32(DescOwn))) {
+		struct rx_desc *desc = np->rx_head_desc;
+		u32 desc_status = le32_to_cpu(desc->rx_status);
+		int data_size = desc_status >> 16;
+
+		if (local_debug > 4)
+			rtdm_printk(KERN_DEBUG "  via_rhine_rx() status is %8.8x.\n", /*** RTnet ***/
+				   desc_status);
+		if (--boguscnt < 0)
+			break;
+		if ( (desc_status & (RxWholePkt | RxErr)) !=  RxWholePkt) {
+			if ((desc_status & RxWholePkt) !=  RxWholePkt) {
+				rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame spanned " /*** RTnet ***/
+					   "multiple buffers, entry %#x length %d status %8.8x!\n",
+					   dev->name, entry, data_size, desc_status);
+				rtdm_printk(KERN_WARNING "%s: Oversized Ethernet frame %p vs %p.\n", /*** RTnet ***/
+					   dev->name, np->rx_head_desc, &np->rx_ring[entry]);
+				np->stats.rx_length_errors++;
+			} else if (desc_status & RxErr) {
+				/* There was a error. */
+				if (local_debug > 2)
+					rtdm_printk(KERN_DEBUG "  via_rhine_rx() Rx error was %8.8x.\n", /*** RTnet ***/
+						   desc_status);
+				np->stats.rx_errors++;
+				if (desc_status & 0x0030) np->stats.rx_length_errors++;
+				if (desc_status & 0x0048) np->stats.rx_fifo_errors++;
+				if (desc_status & 0x0004) np->stats.rx_frame_errors++;
+				if (desc_status & 0x0002)
+					/* RTnet: this is only updated in the interrupt handler */
+					np->stats.rx_crc_errors++;
+			}
+		} else {
+			struct rtskb *skb; /*** RTnet ***/
+			/* Length should omit the CRC */
+			int pkt_len = data_size - 4;
+
+			/* Check if the packet is long enough to accept without copying
+			   to a minimally-sized skbuff. */
+/*** RTnet ***/
+			{
+/*** RTnet ***/
+				skb = np->rx_skbuff[entry];
+				if (skb == NULL) {
+					rtdm_printk(KERN_ERR "%s: Inconsistent Rx descriptor chain.\n", /*** RTnet ***/
+						   dev->name);
+					break;
+				}
+				np->rx_skbuff[entry] = NULL;
+				rtskb_put(skb, pkt_len); /*** RTnet ***/
+				dma_unmap_single(&np->pdev->dev,
+						 np->rx_skbuff_dma[entry],
+						 np->rx_buf_sz,
+						 DMA_FROM_DEVICE);
+			}
+/*** RTnet ***/
+			skb->protocol = rt_eth_type_trans(skb, dev);
+			skb->time_stamp = *time_stamp;
+			rtnetif_rx(skb);
+			/*dev->last_rx = jiffies;*/
+/*** RTnet ***/
+			np->stats.rx_bytes += pkt_len;
+			np->stats.rx_packets++;
+		}
+		entry = (++np->cur_rx) % RX_RING_SIZE;
+		np->rx_head_desc = &np->rx_ring[entry];
+	}
+
+	/* Refill the Rx ring buffers. */
+	for (; np->cur_rx - np->dirty_rx > 0; np->dirty_rx++) {
+		struct rtskb *skb; /*** RTnet ***/
+		entry = np->dirty_rx % RX_RING_SIZE;
+		if (np->rx_skbuff[entry] == NULL) {
+			skb = rtnetdev_alloc_rtskb(dev, np->rx_buf_sz); /*** RTnet ***/
+			np->rx_skbuff[entry] = skb;
+			if (skb == NULL)
+				break;			/* Better luck next round. */
+			np->rx_skbuff_dma[entry] =
+				dma_map_single(&np->pdev->dev, skb->tail,
+					       np->rx_buf_sz, DMA_FROM_DEVICE);
+			np->rx_ring[entry].addr = cpu_to_le32(np->rx_skbuff_dma[entry]);
+		}
+		np->rx_ring[entry].rx_status = cpu_to_le32(DescOwn);
+	}
+
+	/* Pre-emptively restart Rx engine. */
+	writew(readw((void *)dev->base_addr + ChipCmd) | CmdRxOn | CmdRxDemand,
+		   (void *)dev->base_addr + ChipCmd);
+}
+
+/* Clears the "tally counters" for CRC errors and missed frames(?).
+   It has been reported that some chips need a write of 0 to clear
+   these, for others the counters are set to 1 when written to and
+   instead cleared when read. So we clear them both ways ... */
+static inline void clear_tally_counters(void *ioaddr)
+{
+	writel(0, ioaddr + RxMissed);
+	readw(ioaddr + RxCRCErrs);
+	readw(ioaddr + RxMissed);
+}
+
+static void via_rhine_restart_tx(struct rtnet_device *dev) { /*** RTnet ***/
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	int entry = np->dirty_tx % TX_RING_SIZE;
+	u32 intr_status;
+
+	/*
+	 * If new errors occured, we need to sort them out before doing Tx.
+	 * In that case the ISR will be back here RSN anyway.
+	 */
+	intr_status = get_intr_status(dev);
+
+	if ((intr_status & IntrTxErrSummary) == 0) {
+
+		/* We know better than the chip where it should continue. */
+		writel(np->tx_ring_dma + entry * sizeof(struct tx_desc),
+			   ioaddr + TxRingPtr);
+
+		writew(CmdTxDemand | np->chip_cmd, ioaddr + ChipCmd);
+		IOSYNC;
+	}
+	else {
+		/* This should never happen */
+		if (local_debug > 1)
+			rtdm_printk(KERN_WARNING "%s: via_rhine_restart_tx() " /*** RTnet ***/
+				   "Another error occured %8.8x.\n",
+				   dev->name, intr_status);
+	}
+
+}
+
+static void via_rhine_error(struct rtnet_device *dev, int intr_status) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+
+	rtdm_lock_get(&np->lock); /*** RTnet ***/
+
+	if (intr_status & (IntrLinkChange)) {
+		if (readb(ioaddr + MIIStatus) & 0x02) {
+			/* Link failed, restart autonegotiation. */
+			if (np->drv_flags & HasDavicomPhy)
+				mdio_write(dev, np->phys[0], MII_BMCR, 0x3300);
+		} else
+			via_rhine_check_duplex(dev);
+		if (local_debug)
+			rtdm_printk(KERN_ERR "%s: MII status changed: Autonegotiation " /*** RTnet ***/
+				   "advertising %4.4x  partner %4.4x.\n", dev->name,
+			   mdio_read(dev, np->phys[0], MII_ADVERTISE),
+			   mdio_read(dev, np->phys[0], MII_LPA));
+	}
+	if (intr_status & IntrStatsMax) {
+		np->stats.rx_crc_errors	+= readw(ioaddr + RxCRCErrs);
+		np->stats.rx_missed_errors	+= readw(ioaddr + RxMissed);
+		clear_tally_counters(ioaddr);
+	}
+	if (intr_status & IntrTxAborted) {
+		if (local_debug > 1)
+			rtdm_printk(KERN_INFO "%s: Abort %8.8x, frame dropped.\n", /*** RTnet ***/
+				   dev->name, intr_status);
+	}
+	if (intr_status & IntrTxUnderrun) {
+		if (np->tx_thresh < 0xE0)
+			writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
+		if (local_debug > 1)
+			rtdm_printk(KERN_INFO "%s: Transmitter underrun, Tx " /*** RTnet ***/
+				   "threshold now %2.2x.\n",
+				   dev->name, np->tx_thresh);
+	}
+	if (intr_status & IntrTxDescRace) {
+		if (local_debug > 2)
+			rtdm_printk(KERN_INFO "%s: Tx descriptor write-back race.\n", /*** RTnet ***/
+				   dev->name);
+	}
+	if ((intr_status & IntrTxError) && ~( IntrTxAborted | IntrTxUnderrun |
+		IntrTxDescRace )) {
+		if (np->tx_thresh < 0xE0) {
+			writeb(np->tx_thresh += 0x20, ioaddr + TxConfig);
+		}
+		if (local_debug > 1)
+			rtdm_printk(KERN_INFO "%s: Unspecified error. Tx " /*** RTnet ***/
+				"threshold now %2.2x.\n",
+				dev->name, np->tx_thresh);
+	}
+	if (intr_status & ( IntrTxAborted | IntrTxUnderrun | IntrTxDescRace |
+		IntrTxError ))
+		via_rhine_restart_tx(dev);
+
+	if (intr_status & ~( IntrLinkChange | IntrStatsMax | IntrTxUnderrun |
+						 IntrTxError | IntrTxAborted | IntrNormalSummary |
+						 IntrTxDescRace )) {
+		if (local_debug > 1)
+			rtdm_printk(KERN_ERR "%s: Something Wicked happened! %8.8x.\n", /*** RTnet ***/
+				   dev->name, intr_status);
+	}
+
+	rtdm_lock_put(&np->lock); /*** RTnet ***/
+}
+
+static struct net_device_stats *via_rhine_get_stats(struct rtnet_device *rtdev)
+{
+	struct netdev_private *np = rtdev->priv;
+	long ioaddr = rtdev->base_addr;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&np->lock, context);
+	np->stats.rx_crc_errors	+= readw(ioaddr + RxCRCErrs);
+	np->stats.rx_missed_errors	+= readw(ioaddr + RxMissed);
+	clear_tally_counters((void *)ioaddr);
+	rtdm_lock_put_irqrestore(&np->lock, context);
+
+	return &np->stats;
+}
+
+static void via_rhine_set_rx_mode(struct rtnet_device *dev) /*** RTnet ***/
+{
+	struct netdev_private *np = dev->priv;
+	void *ioaddr = (void *)dev->base_addr;
+	u32 mc_filter[2];			/* Multicast hash filter */
+	u8 rx_mode;					/* Note: 0x02=accept runt, 0x01=accept errs */
+
+	if (dev->flags & IFF_PROMISC) {			/* Set promiscuous. */
+		/* Unconditionally log net taps. */
+		printk(KERN_NOTICE "%s: Promiscuous mode enabled.\n", dev->name);
+		rx_mode = 0x1C;
+		writel(0xffffffff, (void *)ioaddr + MulticastFilter0);
+		writel(0xffffffff, (void *)ioaddr + MulticastFilter1);
+	} else if (dev->flags & IFF_ALLMULTI) {
+		/* Too many to match, or accept all multicasts. */
+		writel(0xffffffff, (void *)ioaddr + MulticastFilter0);
+		writel(0xffffffff, (void *)ioaddr + MulticastFilter1);
+		rx_mode = 0x0C;
+	} else {
+		memset(mc_filter, 0, sizeof(mc_filter));
+		writel(mc_filter[0], (void *)ioaddr + MulticastFilter0);
+		writel(mc_filter[1], (void *)ioaddr + MulticastFilter1);
+		rx_mode = 0x0C;
+	}
+	writeb(np->rx_thresh | rx_mode, (void *)ioaddr + RxConfig);
+}
+
+/*** RTnet ***/
+/*** RTnet ***/
+
+static int via_rhine_close(struct rtnet_device *dev) /*** RTnet ***/
+{
+	long ioaddr = dev->base_addr;
+	struct netdev_private *np = dev->priv;
+	int i; /*** RTnet ***/
+	rtdm_lockctx_t context;
+
+/*** RTnet ***
+	del_timer_sync(&np->timer);
+ *** RTnet ***/
+
+	rtdm_lock_get_irqsave(&np->lock, context); /*** RTnet ***/
+
+	rtnetif_stop_queue(dev); /*** RTnet ***/
+
+	if (local_debug > 1)
+		rtdm_printk(KERN_DEBUG "%s: Shutting down ethercard, status was %4.4x.\n", /*** RTnet ***/
+			   dev->name, readw((void *)ioaddr + ChipCmd));
+
+	/* Switch to loopback mode to avoid hardware races. */
+	writeb(np->tx_thresh | 0x02, (void *)ioaddr + TxConfig);
+
+	/* Disable interrupts by clearing the interrupt mask. */
+	writew(0x0000, (void *)ioaddr + IntrEnable);
+
+	/* Stop the chip's Tx and Rx processes. */
+	writew(CmdStop, (void *)ioaddr + ChipCmd);
+
+	rtdm_lock_put_irqrestore(&np->lock, context); /*** RTnet ***/
+
+/*** RTnet ***/
+	if ( (i=rtdm_irq_free(&np->irq_handle))<0 )
+		return i;
+
+	rt_stack_disconnect(dev);
+/*** RTnet ***/
+
+	free_rbufs(dev);
+	free_tbufs(dev);
+	free_ring(dev);
+
+	return 0;
+}
+
+
+static void via_rhine_remove_one (struct pci_dev *pdev)
+{
+ /*** RTnet ***/
+	struct rtnet_device *dev = pci_get_drvdata(pdev);
+
+	rt_unregister_rtnetdev(dev);
+	rt_rtdev_disconnect(dev);
+/*** RTnet ***/
+
+	pci_release_regions(pdev);
+
+#ifdef USE_MEM
+	iounmap((char *)(dev->base_addr));
+#endif
+
+	rtdev_free(dev); /*** RTnet ***/
+	pci_disable_device(pdev);
+	pci_set_drvdata(pdev, NULL);
+}
+
+
+static struct pci_driver via_rhine_driver = {
+	.name		= DRV_NAME,
+	.id_table	= via_rhine_pci_tbl,
+	.probe		= via_rhine_init_one,
+	.remove		= via_rhine_remove_one,
+};
+
+
+static int __init via_rhine_init (void)
+{
+/* when a module, this is printed whether or not devices are found in probe */
+#ifdef MODULE
+	printk(version);
+#endif
+	return pci_register_driver (&via_rhine_driver);
+}
+
+
+static void __exit via_rhine_cleanup (void)
+{
+	pci_unregister_driver (&via_rhine_driver);
+}
+
+
+module_init(via_rhine_init);
+module_exit(via_rhine_cleanup);
+
+
+/*
+ * Local variables:
+ *  compile-command: "gcc -DMODULE -D__KERNEL__ -I/usr/src/linux/net/inet -Wall -Wstrict-prototypes -O6 -c via-rhine.c `[ -f /usr/include/linux/modversions.h ] && echo -DMODVERSIONS`"
+ *  c-indent-level: 4
+ *  c-basic-offset: 4
+ *  tab-width: 4
+ * End:
+ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Kconfig
new file mode 100644
index 0000000..996536c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Kconfig
@@ -0,0 +1,41 @@
+menu "Protocol Stack"
+    depends on XENO_DRIVERS_NET
+
+comment "Stack parameters"
+
+config XENO_DRIVERS_NET_RX_FIFO_SIZE
+    int "Size of central RX-FIFO"
+    depends on XENO_DRIVERS_NET
+    default 32
+    help
+    Size of FIFO between NICs and stack manager task. Must be power
+    of two! Effectively, only CONFIG_RTNET_RX_FIFO_SIZE-1 slots will
+    be usable.
+
+config XENO_DRIVERS_NET_ETH_P_ALL
+    depends on XENO_DRIVERS_NET
+    bool "Support for ETH_P_ALL"
+    help
+    Enables core support for registering listeners on all layer 3
+    protocols (ETH_P_ALL). Internally this is currently realised by
+    clone-copying incoming frames for those listners, future versions
+    will implement buffer sharing for efficiency reasons. Use with
+    care, every ETH_P_ALL-listener adds noticable overhead to the
+    reception path.
+
+config XENO_DRIVERS_NET_RTWLAN
+    depends on XENO_DRIVERS_NET
+    bool "Real-Time WLAN"
+    help
+    Enables core support for real-time wireless LAN. RT-WLAN is based
+    on low-level access to 802.11-compliant adapters and is currently
+    in an experimental stage.
+
+comment "Protocols"
+
+source "drivers/xenomai/net/stack/ipv4/Kconfig"
+source "drivers/xenomai/net/stack/packet/Kconfig"
+source "drivers/xenomai/net/stack/rtmac/Kconfig"
+source "drivers/xenomai/net/stack/rtcfg/Kconfig"
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Makefile
new file mode 100644
index 0000000..f75483e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/Makefile
@@ -0,0 +1,26 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include -I$(srctree)/kernel/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4) += ipv4/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTPACKET) += packet/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTMAC) += rtmac/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTCFG) += rtcfg/
+
+obj-$(CONFIG_XENO_DRIVERS_NET) += rtnet.o
+
+rtnet-y :=  \
+	corectl.o \
+	iovec.o \
+	rtdev.o \
+	rtdev_mgr.o \
+	rtnet_chrdev.o \
+	rtnet_module.o \
+	rtnet_rtpc.o \
+	rtskb.o \
+	socket.o \
+	stack_mgr.o \
+	eth.o
+
+rtnet-$(CONFIG_XENO_DRIVERS_NET_RTWLAN) += rtwlan.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/corectl.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/corectl.c
new file mode 100644
index 0000000..9301def
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/corectl.c
@@ -0,0 +1,77 @@
+/*
+ * Copyright (C) 2016 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <asm/xenomai/syscall.h>
+#include <xenomai/posix/corectl.h>
+
+static int rtnet_corectl_call(struct notifier_block *self, unsigned long arg,
+			      void *cookie)
+{
+	struct cobalt_config_vector *vec = cookie;
+	int ret = 0;
+
+	if (arg != _CC_COBALT_GET_NET_CONFIG)
+		return NOTIFY_DONE;
+
+	if (vec->u_bufsz < sizeof(ret))
+		return notifier_from_errno(-EINVAL);
+
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET))
+		ret |= _CC_COBALT_NET;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ETH_P_ALL))
+		ret |= _CC_COBALT_NET_ETH_P_ALL;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4))
+		ret |= _CC_COBALT_NET_IPV4;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP))
+		ret |= _CC_COBALT_NET_ICMP;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING))
+		ret |= _CC_COBALT_NET_NETROUTING;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTE))
+		ret |= _CC_COBALT_NET_ROUTER;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_UDP))
+		ret |= _CC_COBALT_NET_UDP;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTPACKET))
+		ret |= _CC_COBALT_NET_AF_PACKET;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_TDMA))
+		ret |= _CC_COBALT_NET_TDMA;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_NOMAC))
+		ret |= _CC_COBALT_NET_NOMAC;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTCFG))
+		ret |= _CC_COBALT_NET_CFG;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP))
+		ret |= _CC_COBALT_NET_CAP;
+	if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY))
+		ret |= _CC_COBALT_NET_PROXY;
+
+	ret = cobalt_copy_to_user(vec->u_buf, &ret, sizeof(ret));
+
+	return ret ? notifier_from_errno(-EFAULT) : NOTIFY_STOP;
+}
+
+static struct notifier_block rtnet_corectl_notifier = {
+	.notifier_call = rtnet_corectl_call,
+};
+
+void rtnet_corectl_register(void)
+{
+	cobalt_add_config_chain(&rtnet_corectl_notifier);
+}
+
+void rtnet_corectl_unregister(void)
+{
+	cobalt_remove_config_chain(&rtnet_corectl_notifier);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/eth.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/eth.c
new file mode 100644
index 0000000..427687f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/eth.c
@@ -0,0 +1,131 @@
+/***
+ *
+ *  stack/eth.c - Ethernet-specific functions
+ *
+ *  Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/skbuff.h>
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+
+/*
+ *  Create the Ethernet MAC header for an arbitrary protocol layer
+ *
+ *  saddr=NULL  means use device source address
+ *  daddr=NULL  means leave destination address (eg unresolved arp)
+ */
+int rt_eth_header(struct rtskb *skb, struct rtnet_device *rtdev,
+		  unsigned short type, void *daddr, void *saddr, unsigned len)
+{
+	struct ethhdr *eth = (struct ethhdr *)rtskb_push(skb, ETH_HLEN);
+
+	/*
+     *  Set rtskb mac field
+     */
+
+	skb->mac.ethernet = eth;
+
+	/*
+     *  Set the protocol type. For a packet of type ETH_P_802_3 we put the length
+     *  in here instead. It is up to the 802.2 layer to carry protocol information.
+     */
+
+	if (type != ETH_P_802_3)
+		eth->h_proto = htons(type);
+	else
+		eth->h_proto = htons(len);
+
+	/*
+     *  Set the source hardware address.
+     */
+
+	if (saddr)
+		memcpy(eth->h_source, saddr, rtdev->addr_len);
+	else
+		memcpy(eth->h_source, rtdev->dev_addr, rtdev->addr_len);
+
+	if (rtdev->flags & (IFF_LOOPBACK | IFF_NOARP)) {
+		memset(eth->h_dest, 0, rtdev->addr_len);
+		return rtdev->hard_header_len;
+	}
+
+	if (daddr) {
+		memcpy(eth->h_dest, daddr, rtdev->addr_len);
+		return rtdev->hard_header_len;
+	}
+
+	return -rtdev->hard_header_len;
+}
+
+unsigned short rt_eth_type_trans(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	struct ethhdr *eth;
+	unsigned char *rawp;
+
+	rtcap_mark_incoming(skb);
+
+	skb->mac.raw = skb->data;
+	rtskb_pull(skb, rtdev->hard_header_len);
+	eth = skb->mac.ethernet;
+
+	if (*eth->h_dest & 1) {
+		if (memcmp(eth->h_dest, rtdev->broadcast, ETH_ALEN) == 0)
+			skb->pkt_type = PACKET_BROADCAST;
+		else
+			skb->pkt_type = PACKET_MULTICAST;
+	}
+
+	/*
+     *  This ALLMULTI check should be redundant by 1.4
+     *  so don't forget to remove it.
+     *
+     *  Seems, you forgot to remove it. All silly devices
+     *  seems to set IFF_PROMISC.
+     */
+
+	else if (1 /*rtdev->flags&IFF_PROMISC*/) {
+		if (memcmp(eth->h_dest, rtdev->dev_addr, ETH_ALEN))
+			skb->pkt_type = PACKET_OTHERHOST;
+	}
+
+	if (ntohs(eth->h_proto) >= 1536)
+		return eth->h_proto;
+
+	rawp = skb->data;
+
+	/*
+     *  This is a magic hack to spot IPX packets. Older Novell breaks
+     *  the protocol design and runs IPX over 802.3 without an 802.2 LLC
+     *  layer. We look for FFFF which isn't a used 802.2 SSAP/DSAP. This
+     *  won't work for fault tolerant netware but does for the rest.
+     */
+	if (*(unsigned short *)rawp == 0xFFFF)
+		return htons(ETH_P_802_3);
+
+	/*
+     *  Real 802.2 LLC
+     */
+	return htons(ETH_P_802_2);
+}
+
+EXPORT_SYMBOL_GPL(rt_eth_header);
+EXPORT_SYMBOL_GPL(rt_eth_type_trans);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/INCLUDE.policy b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/INCLUDE.policy
new file mode 100644
index 0000000..e1ba93f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/INCLUDE.policy
@@ -0,0 +1,15 @@
+RTnet Include Policy
+
+1. Every source file (/<module>/<source>.c) shall have an associated
+   header file (/include/<module>/<source>.h). This header shall contain
+   all required #defines, types, and function prototypes (except they are
+   API related). 
+
+2. API functions, types, etc. shall be placed in header files located in
+   the main include directory (/include/<module>.h>). The header files
+   shall be named after the associated module.
+
+3. The main include directory shall only contain API header files. 
+
+4. All header files shall be includable without requiring further header
+   file to be included beforehand. 
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ethernet/eth.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ethernet/eth.h
new file mode 100644
index 0000000..6c474f0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ethernet/eth.h
@@ -0,0 +1,32 @@
+/* ethernet/eth.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_ETH_H_
+#define __RTNET_ETH_H_
+
+#include <rtskb.h>
+#include <rtdev.h>
+
+extern int rt_eth_header(struct rtskb *skb, struct rtnet_device *rtdev,
+			 unsigned short type, void *daddr, void *saddr,
+			 unsigned int len);
+extern unsigned short rt_eth_type_trans(struct rtskb *skb,
+					struct rtnet_device *dev);
+
+#endif /* __RTNET_ETH_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/af_inet.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/af_inet.h
new file mode 100644
index 0000000..c798224
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/af_inet.h
@@ -0,0 +1,35 @@
+/***
+ *
+ *  include/ipv4/af_inet.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@wev.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_AF_INET_H_
+#define __RTNET_AF_INET_H_
+
+#include <rtnet_internal.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+extern struct xnvfile_directory ipv4_proc_root;
+#endif
+
+#endif /* __RTNET_AF_INET_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/arp.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/arp.h
new file mode 100644
index 0000000..cf037a1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/arp.h
@@ -0,0 +1,51 @@
+/***
+ *
+ *  include/ipv4/arp.h - Adress Resolution Protocol for RTnet
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *                2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_ARP_H_
+#define __RTNET_ARP_H_
+
+#include <linux/if_arp.h>
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <ipv4/route.h>
+
+#define RT_ARP_SKB_PRIO                                                        \
+	RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO - 1, RTSKB_DEF_NRT_CHANNEL)
+
+void rt_arp_send(int type, int ptype, u32 dest_ip, struct rtnet_device *rtdev,
+		 u32 src_ip, unsigned char *dest_hw, unsigned char *src_hw,
+		 unsigned char *target_hw);
+
+static inline void rt_arp_solicit(struct rtnet_device *rtdev, u32 target)
+{
+	rt_arp_send(ARPOP_REQUEST, ETH_P_ARP, target, rtdev, rtdev->local_ip,
+		    NULL, NULL, NULL);
+}
+
+void __init rt_arp_init(void);
+void rt_arp_release(void);
+
+#endif /* __RTNET_ARP_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/icmp.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/icmp.h
new file mode 100644
index 0000000..374f1ed
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/icmp.h
@@ -0,0 +1,56 @@
+/***
+ *
+ *  ipv4/icmp.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_ICMP_H_
+#define __RTNET_ICMP_H_
+
+#include <linux/init.h>
+
+#include <rtskb.h>
+#include <rtnet_rtpc.h>
+#include <ipv4/protocol.h>
+
+#define RT_ICMP_PRIO RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO - 1, RTSKB_DEF_NRT_CHANNEL)
+
+#define ICMP_REPLY_POOL_SIZE 8
+
+void rt_icmp_queue_echo_request(struct rt_proc_call *call);
+void rt_icmp_dequeue_echo_request(struct rt_proc_call *call);
+void rt_icmp_cleanup_echo_requests(void);
+int rt_icmp_send_echo(u32 daddr, u16 id, u16 sequence, size_t msg_size);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP
+void __init rt_icmp_init(void);
+void rt_icmp_release(void);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP */
+#define rt_icmp_init()                                                         \
+	do {                                                                   \
+	} while (0)
+#define rt_icmp_release()                                                      \
+	do {                                                                   \
+	} while (0)
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP */
+
+#endif /* __RTNET_ICMP_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_fragment.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_fragment.h
new file mode 100644
index 0000000..b079e96
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_fragment.h
@@ -0,0 +1,37 @@
+/* ipv4/ip_fragment.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *               2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_IP_FRAGMENT_H_
+#define __RTNET_IP_FRAGMENT_H_
+
+#include <linux/init.h>
+
+#include <rtskb.h>
+#include <ipv4/protocol.h>
+
+extern struct rtskb *rt_ip_defrag(struct rtskb *skb,
+				  struct rtinet_protocol *ipprot);
+
+extern void rt_ip_frag_invalidate_socket(struct rtsocket *sock);
+
+extern int __init rt_ip_fragment_init(void);
+extern void rt_ip_fragment_cleanup(void);
+
+#endif /* __RTNET_IP_FRAGMENT_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_input.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_input.h
new file mode 100644
index 0000000..565ecc8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_input.h
@@ -0,0 +1,45 @@
+/* ipv4/ip_input.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *               2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_IP_INPUT_H_
+#define __RTNET_IP_INPUT_H_
+
+#include <rtskb.h>
+#include <stack_mgr.h>
+
+extern int rt_ip_rcv(struct rtskb *skb, struct rtpacket_type *pt);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+typedef void (*rt_ip_fallback_handler_t)(struct rtskb *skb);
+
+/*
+ * This hook can be used to register a fallback handler for incoming
+ * IP packets. Typically this is done to move over to the standard Linux
+ * IP protocol (e.g. for handling TCP).
+ * Manipulating the fallback handler is expected to happen only when the
+ * RTnetinterfaces are shut down (avoiding race conditions).
+ *
+ * Note that merging RT and non-RT traffic this way most likely breaks hard
+ * real-time constraints!
+ */
+extern rt_ip_fallback_handler_t rt_ip_fallback_handler;
+#endif
+
+#endif /* __RTNET_IP_INPUT_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_output.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_output.h
new file mode 100644
index 0000000..d5f33fc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_output.h
@@ -0,0 +1,42 @@
+/***
+ *
+ *  include/ipv4/ip_output.h - prepare outgoing IP packets
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *                2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_IP_OUTPUT_H_
+#define __RTNET_IP_OUTPUT_H_
+
+#include <linux/init.h>
+
+#include <rtdev.h>
+#include <ipv4/route.h>
+
+extern int rt_ip_build_xmit(struct rtsocket *sk,
+			    int getfrag(const void *, unsigned char *,
+					unsigned int, unsigned int),
+			    const void *frag, unsigned length,
+			    struct dest_route *rt, int flags);
+
+extern void __init rt_ip_init(void);
+extern void rt_ip_release(void);
+
+#endif /* __RTNET_IP_OUTPUT_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_sock.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_sock.h
new file mode 100644
index 0000000..1969ab6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/ip_sock.h
@@ -0,0 +1,31 @@
+/***
+ *
+ *  include/ipv4/ip_sock.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_IP_SOCK_H_
+#define __RTNET_IP_SOCK_H_
+
+#include <rtnet_socket.h>
+
+extern int rt_ip_ioctl(struct rtdm_fd *fd, int request, void *arg);
+
+#endif /* __RTNET_IP_SOCK_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/protocol.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/protocol.h
new file mode 100644
index 0000000..1df42db
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/protocol.h
@@ -0,0 +1,54 @@
+/***
+ *
+ *  include/ipv4/protocol.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_PROTOCOL_H_
+#define __RTNET_PROTOCOL_H_
+
+#include <rtnet_socket.h>
+#include <rtskb.h>
+
+#define MAX_RT_INET_PROTOCOLS 32
+
+/***
+ * transport layer protocol
+ */
+struct rtinet_protocol {
+	char *name;
+	unsigned short protocol;
+
+	struct rtsocket *(*dest_socket)(struct rtskb *);
+	void (*rcv_handler)(struct rtskb *);
+	void (*err_handler)(struct rtskb *);
+	int (*init_socket)(struct rtdm_fd *);
+};
+
+extern struct rtinet_protocol *rt_inet_protocols[];
+
+#define rt_inet_hashkey(id) (id & (MAX_RT_INET_PROTOCOLS - 1))
+extern void rt_inet_add_protocol(struct rtinet_protocol *prot);
+extern void rt_inet_del_protocol(struct rtinet_protocol *prot);
+extern int rt_inet_socket(struct rtdm_fd *fd, int protocol);
+
+#endif /* __RTNET_PROTOCOL_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/route.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/route.h
new file mode 100644
index 0000000..286ea23
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/route.h
@@ -0,0 +1,60 @@
+/***
+ *
+ *  include/ipv4/route.h - real-time routing
+ *
+ *  Copyright (C) 2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  Rewritten version of the original route by David Schleef and Ulrich Marx
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_ROUTE_H_
+#define __RTNET_ROUTE_H_
+
+#include <linux/init.h>
+#include <linux/types.h>
+
+#include <rtdev.h>
+
+struct dest_route {
+	u32 ip;
+	unsigned char dev_addr[MAX_ADDR_LEN];
+	struct rtnet_device *rtdev;
+};
+
+int rt_ip_route_add_host(u32 addr, unsigned char *dev_addr,
+			 struct rtnet_device *rtdev);
+void rt_ip_route_del_all(struct rtnet_device *rtdev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+int rt_ip_route_add_net(u32 addr, u32 mask, u32 gw_addr);
+int rt_ip_route_del_net(u32 addr, u32 mask);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER
+int rt_ip_route_forward(struct rtskb *rtskb, u32 daddr);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER */
+
+int rt_ip_route_del_host(u32 addr, struct rtnet_device *rtdev);
+int rt_ip_route_get_host(u32 addr, char *if_name, unsigned char *dev_addr,
+			 struct rtnet_device *rtdev);
+int rt_ip_route_output(struct dest_route *rt_buf, u32 daddr, u32 saddr);
+
+int __init rt_ip_routing_init(void);
+void rt_ip_routing_release(void);
+
+#endif /* __RTNET_ROUTE_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/tcp.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/tcp.h
new file mode 100644
index 0000000..3282c3e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/tcp.h
@@ -0,0 +1,50 @@
+/***
+ *
+ *  include/ipv4/tcp.h
+ *
+ *  Copyright (C) 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2, as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_TCP_H_
+#define __RTNET_TCP_H_
+
+#include <rtskb.h>
+#include <ipv4/protocol.h>
+
+/* Maximum number of active tcp sockets, must be power of 2 */
+#define RT_TCP_SOCKETS 32
+
+/*Maximum number of active tcp connections, must be power of 2 */
+#define RT_TCP_CONNECTIONS 64
+
+/* Maximum size of TCP input window */
+#define RT_TCP_WINDOW 4096
+
+/* Maximum number of retransmissions of invalid segments */
+#define RT_TCP_RETRANSMIT 3
+
+/* Number of milliseconds to wait for ACK */
+#define RT_TCP_WAIT_TIME 10
+
+/* Priority of RST|ACK replies (error condition => non-RT prio) */
+#define RT_TCP_RST_PRIO                                                        \
+	RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO - 1, RTSKB_DEF_NRT_CHANNEL)
+
+/* rtskb pool for sending socket-less RST|ACK */
+#define RT_TCP_RST_POOL_SIZE 8
+
+#endif /* __RTNET_TCP_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/udp.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/udp.h
new file mode 100644
index 0000000..fa4d323
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4/udp.h
@@ -0,0 +1,33 @@
+/***
+ *
+ *  include/ipv4/udp.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_UDP_H_
+#define __RTNET_UDP_H_
+
+/* Maximum number of active udp sockets
+   Only increase with care (look-up delays!), must be power of 2 */
+#define RT_UDP_SOCKETS 64
+
+#endif /* __RTNET_UDP_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4_chrdev.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4_chrdev.h
new file mode 100644
index 0000000..70fd691
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/ipv4_chrdev.h
@@ -0,0 +1,94 @@
+/***
+ *
+ *  include/ipv4.h
+ *
+ *  Real-Time IP/UDP/ICMP stack
+ *
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __IPV4_H_
+#define __RTCFG_H_
+
+#include <rtnet_chrdev.h>
+
+struct ipv4_cmd {
+	struct rtnet_ioctl_head head;
+
+	union {
+		/*** rtroute ***/
+		struct {
+			__u32 ip_addr;
+		} solicit;
+
+		struct {
+			__u8 dev_addr[DEV_ADDR_LEN];
+			__u32 ip_addr;
+		} gethost;
+
+		struct {
+			__u8 dev_addr[DEV_ADDR_LEN];
+			__u32 ip_addr;
+		} addhost;
+
+		struct {
+			__u32 ip_addr;
+		} delhost;
+
+		struct {
+			__u32 net_addr;
+			__u32 net_mask;
+			__u32 gw_addr;
+		} addnet;
+
+		struct {
+			__u32 net_addr;
+			__u32 net_mask;
+		} delnet;
+
+		/*** rtping ***/
+		struct {
+			__u32 ip_addr;
+			__u16 id;
+			__u16 sequence;
+			__u32 msg_size;
+			__u32 timeout;
+			__s64 rtt;
+		} ping;
+
+		__u64 __padding[8];
+	} args;
+};
+
+#define IOC_RT_HOST_ROUTE_ADD _IOW(RTNET_IOC_TYPE_IPV4, 0, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_SOLICIT _IOW(RTNET_IOC_TYPE_IPV4, 1, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_DELETE                                               \
+	_IOW(RTNET_IOC_TYPE_IPV4, 2 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_NET_ROUTE_ADD                                                   \
+	_IOW(RTNET_IOC_TYPE_IPV4, 3 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_NET_ROUTE_DELETE                                                \
+	_IOW(RTNET_IOC_TYPE_IPV4, 4 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_PING                                                            \
+	_IOWR(RTNET_IOC_TYPE_IPV4, 5 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_DELETE_DEV                                           \
+	_IOW(RTNET_IOC_TYPE_IPV4, 6, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_GET                                                  \
+	_IOWR(RTNET_IOC_TYPE_IPV4, 7 | RTNET_IOC_NODEV_PARAM, struct ipv4_cmd)
+#define IOC_RT_HOST_ROUTE_GET_DEV _IOWR(RTNET_IOC_TYPE_IPV4, 8, struct ipv4_cmd)
+
+#endif /* __IPV4_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/nomac_chrdev.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/nomac_chrdev.h
new file mode 100644
index 0000000..52800f0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/nomac_chrdev.h
@@ -0,0 +1,39 @@
+/***
+ *
+ *  include/nomac_chrdev.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_CHRDEV_H_
+#define __NOMAC_CHRDEV_H_
+
+#include <rtnet_chrdev.h>
+
+struct nomac_config {
+	struct rtnet_ioctl_head head;
+};
+
+#define NOMAC_IOC_ATTACH                                                       \
+	_IOW(RTNET_IOC_TYPE_RTMAC_NOMAC, 0, struct nomac_config)
+#define NOMAC_IOC_DETACH                                                       \
+	_IOW(RTNET_IOC_TYPE_RTMAC_NOMAC, 1, struct nomac_config)
+
+#endif /* __NOMAC_CHRDEV_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg.h
new file mode 100644
index 0000000..0b7cb53
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg.h
@@ -0,0 +1,47 @@
+/***
+ *
+ *  include/rtcfg/rtcfg.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_H_INTERNAL_
+#define __RTCFG_H_INTERNAL_
+
+#include <rtdm/driver.h>
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+/***
+ * RTcfg debugging
+ */
+#ifdef CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG
+
+extern int rtcfg_debug;
+
+/* use 0 for production, 1 for verification, >2 for debug */
+#define RTCFG_DEFAULT_DEBUG_LEVEL 10
+
+#define RTCFG_DEBUG(n, args...) (rtcfg_debug >= (n)) ? (rtdm_printk(args)) : 0
+#else
+#define RTCFG_DEBUG(n, args...)
+#endif /* CONFIG_RTCFG_DEBUG */
+
+#endif /* __RTCFG_H_INTERNAL_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_client_event.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_client_event.h
new file mode 100644
index 0000000..79b635d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_client_event.h
@@ -0,0 +1,45 @@
+/***
+ *
+ *  include/rtcfg/rtcfg_client_event.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003, 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_CLIENT_EVENT_H_
+#define __RTCFG_CLIENT_EVENT_H_
+
+#include <rtcfg_chrdev.h>
+
+int rtcfg_main_state_client_0(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data);
+int rtcfg_main_state_client_1(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data);
+int rtcfg_main_state_client_announced(int ifindex, RTCFG_EVENT event_id,
+				      void *event_data);
+int rtcfg_main_state_client_all_known(int ifindex, RTCFG_EVENT event_id,
+				      void *event_data);
+int rtcfg_main_state_client_all_frames(int ifindex, RTCFG_EVENT event_id,
+				       void *event_data);
+int rtcfg_main_state_client_2(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data);
+int rtcfg_main_state_client_ready(int ifindex, RTCFG_EVENT event_id,
+				  void *event_data);
+
+#endif /* __RTCFG_CLIENT_EVENT_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_conn_event.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_conn_event.h
new file mode 100644
index 0000000..0184b65
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_conn_event.h
@@ -0,0 +1,69 @@
+/***
+ *
+ *	include/rtcfg/rtcfg_conn_event.h
+ *
+ *	Real-Time Configuration Distribution Protocol
+ *
+ *	Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ *
+ *	You should have received a copy of the GNU General Public License
+ *	along with this program; if not, write to the Free Software
+ *	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_CONN_EVENT_H_
+#define __RTCFG_CONN_EVENT_H_
+
+#include <linux/netdevice.h>
+
+#include <rtcfg_chrdev.h>
+#include <rtcfg/rtcfg_file.h>
+#include <rtnet_internal.h>
+
+typedef enum {
+	RTCFG_CONN_SEARCHING,
+	RTCFG_CONN_STAGE_1,
+	RTCFG_CONN_STAGE_2,
+	RTCFG_CONN_READY,
+	RTCFG_CONN_DEAD
+} RTCFG_CONN_STATE;
+
+struct rtcfg_connection {
+	struct list_head entry;
+	int ifindex;
+	RTCFG_CONN_STATE state;
+	u8 mac_addr[MAX_ADDR_LEN];
+	unsigned int addr_type;
+	union {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		u32 ip_addr;
+#endif
+	} addr;
+	void *stage1_data;
+	size_t stage1_size;
+	struct rtcfg_file *stage2_file;
+	u32 cfg_offs;
+	unsigned int flags;
+	unsigned int burstrate;
+	nanosecs_abs_t last_frame;
+	u64 cfg_timeout;
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnvfile_regular proc_entry;
+#endif
+};
+
+int rtcfg_do_conn_event(struct rtcfg_connection *conn, RTCFG_EVENT event_id,
+			void *event_data);
+
+#endif /* __RTCFG_CONN_EVENT_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_event.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_event.h
new file mode 100644
index 0000000..e4c5928
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_event.h
@@ -0,0 +1,121 @@
+/***
+ *
+ *  include/rtcfg/rtcfg_event.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_EVENT_H_
+#define __RTCFG_EVENT_H_
+
+#include <linux/if_ether.h>
+#include <linux/netdevice.h>
+
+#include <rtcfg_chrdev.h>
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_rtpc.h>
+
+#define FLAG_TIMER_STARTED 16
+#define FLAG_TIMER_SHUTDOWN 17
+#define FLAG_TIMER_PENDING 18
+
+#define _FLAG_TIMER_STARTED (1 << FLAG_TIMER_STARTED)
+#define _FLAG_TIMER_SHUTDOWN (1 << FLAG_TIMER_SHUTDOWN)
+#define _FLAG_TIMER_PENDING (1 << FLAG_TIMER_PENDING)
+
+typedef enum {
+	RTCFG_MAIN_OFF,
+	RTCFG_MAIN_SERVER_RUNNING,
+	RTCFG_MAIN_CLIENT_0,
+	RTCFG_MAIN_CLIENT_1,
+	RTCFG_MAIN_CLIENT_ANNOUNCED,
+	RTCFG_MAIN_CLIENT_ALL_KNOWN,
+	RTCFG_MAIN_CLIENT_ALL_FRAMES,
+	RTCFG_MAIN_CLIENT_2,
+	RTCFG_MAIN_CLIENT_READY
+} RTCFG_MAIN_STATE;
+
+struct rtcfg_station {
+	u8 mac_addr[ETH_ALEN]; /* Ethernet-specific! */
+	u8 flags;
+};
+
+struct rtcfg_device {
+	RTCFG_MAIN_STATE state;
+	u32 other_stations;
+	u32 stations_found;
+	u32 stations_ready;
+	rtdm_mutex_t dev_mutex;
+	struct list_head event_calls;
+	rtdm_lock_t event_calls_lock;
+	rtdm_timer_t timer;
+	unsigned long flags;
+	unsigned int burstrate;
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct xnvfile_directory proc_entry;
+	struct xnvfile_regular proc_state_vfile;
+	struct xnvfile_regular proc_stations_vfile;
+#endif
+
+	union {
+		struct {
+			unsigned int addr_type;
+			union {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+				u32 ip_addr;
+#endif
+			} srv_addr;
+			u8 srv_mac_addr[MAX_ADDR_LEN];
+			u8 *stage2_buffer;
+			u32 cfg_len;
+			u32 cfg_offs;
+			unsigned int packet_counter;
+			u32 chain_len;
+			struct rtskb *stage2_chain;
+			u32 max_stations;
+			struct rtcfg_station *station_addr_list;
+		} clt;
+
+		struct {
+			u32 clients_configured;
+			struct list_head conn_list;
+			u16 heartbeat;
+			u64 heartbeat_timeout;
+		} srv;
+	} spec;
+};
+
+extern struct rtcfg_device device[MAX_RT_DEVICES];
+extern const char *rtcfg_event[];
+extern const char *rtcfg_main_state[];
+
+int rtcfg_do_main_event(int ifindex, RTCFG_EVENT event_id, void *event_data);
+void rtcfg_next_main_state(int ifindex, RTCFG_MAIN_STATE state);
+
+void rtcfg_queue_blocking_call(int ifindex, struct rt_proc_call *call);
+struct rt_proc_call *rtcfg_dequeue_blocking_call(int ifindex);
+void rtcfg_complete_cmd(int ifindex, RTCFG_EVENT event_id, int result);
+void rtcfg_reset_device(int ifindex);
+
+void rtcfg_init_state_machines(void);
+void rtcfg_cleanup_state_machines(void);
+
+#endif /* __RTCFG_EVENT_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_file.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_file.h
new file mode 100644
index 0000000..dc1a9fc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_file.h
@@ -0,0 +1,43 @@
+/***
+ *
+ *  include/rtcfg/rtcfg_file.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_FILE_H_
+#define __RTCFG_FILE_H_
+
+#include <linux/list.h>
+#include <linux/types.h>
+
+struct rtcfg_file {
+	struct list_head entry;
+	int ref_count;
+	const char *name;
+	size_t size;
+	void *buffer;
+};
+
+struct rtcfg_file *rtcfg_get_file(const char *filename);
+void rtcfg_add_file(struct rtcfg_file *file);
+int rtcfg_release_file(struct rtcfg_file *file);
+
+#endif /* __RTCFG_FILE_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_frame.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_frame.h
new file mode 100644
index 0000000..bef859f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_frame.h
@@ -0,0 +1,139 @@
+/***
+ *
+ *  include/rtcfg/rtcfg_frame.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_FRAME_H_
+#define __RTCFG_FRAME_H_
+
+#include <linux/init.h>
+#include <linux/if_packet.h>
+#include <asm/byteorder.h>
+
+#include <rtcfg/rtcfg_event.h>
+
+#define ETH_RTCFG 0x9022
+
+#define RTCFG_SKB_PRIO                                                         \
+	RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO - 1, RTSKB_DEF_NRT_CHANNEL)
+
+#define RTCFG_ID_STAGE_1_CFG 0
+#define RTCFG_ID_ANNOUNCE_NEW 1
+#define RTCFG_ID_ANNOUNCE_REPLY 2
+#define RTCFG_ID_STAGE_2_CFG 3
+#define RTCFG_ID_STAGE_2_CFG_FRAG 4
+#define RTCFG_ID_ACK_CFG 5
+#define RTCFG_ID_READY 6
+#define RTCFG_ID_HEARTBEAT 7
+#define RTCFG_ID_DEAD_STATION 8
+
+#define RTCFG_ADDRSIZE_MAC 0
+#define RTCFG_ADDRSIZE_IP 4
+#define RTCFG_MAX_ADDRSIZE RTCFG_ADDRSIZE_IP
+
+#define RTCFG_FLAG_STAGE_2_DATA 0
+#define RTCFG_FLAG_READY 1
+
+#define _RTCFG_FLAG_STAGE_2_DATA (1 << RTCFG_FLAG_STAGE_2_DATA)
+#define _RTCFG_FLAG_READY (1 << RTCFG_FLAG_READY)
+
+struct rtcfg_frm_head {
+#if defined(__LITTLE_ENDIAN_BITFIELD)
+	u8 id : 5;
+	u8 version : 3;
+#elif defined(__BIG_ENDIAN_BITFIELD)
+	u8 version : 3;
+	u8 id : 5;
+#else
+#error unsupported byte order
+#endif
+} __attribute__((packed));
+
+struct rtcfg_frm_stage_1_cfg {
+	struct rtcfg_frm_head head;
+	u8 addr_type;
+	u8 client_addr[0];
+	u8 server_addr[0];
+	u8 burstrate;
+	u16 cfg_len;
+	u8 cfg_data[0];
+} __attribute__((packed));
+
+struct rtcfg_frm_announce {
+	struct rtcfg_frm_head head;
+	u8 addr_type;
+	u8 addr[0];
+	u8 flags;
+	u8 burstrate;
+} __attribute__((packed));
+
+struct rtcfg_frm_stage_2_cfg {
+	struct rtcfg_frm_head head;
+	u8 flags;
+	u32 stations;
+	u16 heartbeat_period;
+	u32 cfg_len;
+	u8 cfg_data[0];
+} __attribute__((packed));
+
+struct rtcfg_frm_stage_2_cfg_frag {
+	struct rtcfg_frm_head head;
+	u32 frag_offs;
+	u8 cfg_data[0];
+} __attribute__((packed));
+
+struct rtcfg_frm_ack_cfg {
+	struct rtcfg_frm_head head;
+	u32 ack_len;
+} __attribute__((packed));
+
+struct rtcfg_frm_simple {
+	struct rtcfg_frm_head head;
+} __attribute__((packed));
+
+struct rtcfg_frm_dead_station {
+	struct rtcfg_frm_head head;
+	u8 addr_type;
+	u8 logical_addr[0];
+	u8 physical_addr[32];
+} __attribute__((packed));
+
+int rtcfg_send_stage_1(struct rtcfg_connection *conn);
+int rtcfg_send_stage_2(struct rtcfg_connection *conn, int send_data);
+int rtcfg_send_stage_2_frag(struct rtcfg_connection *conn);
+int rtcfg_send_announce_new(int ifindex);
+int rtcfg_send_announce_reply(int ifindex, u8 *dest_mac_addr);
+int rtcfg_send_ack(int ifindex);
+int rtcfg_send_dead_station(struct rtcfg_connection *conn);
+
+int rtcfg_send_simple_frame(int ifindex, int frame_id, u8 *dest_addr);
+
+#define rtcfg_send_ready(ifindex)                                              \
+	rtcfg_send_simple_frame(ifindex, RTCFG_ID_READY, NULL)
+#define rtcfg_send_heartbeat(ifindex)                                          \
+	rtcfg_send_simple_frame(ifindex, RTCFG_ID_HEARTBEAT,                   \
+				device[ifindex].spec.clt.srv_mac_addr)
+
+int __init rtcfg_init_frames(void);
+void rtcfg_cleanup_frames(void);
+
+#endif /* __RTCFG_FRAME_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_ioctl.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_ioctl.h
new file mode 100644
index 0000000..37f0f95
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_ioctl.h
@@ -0,0 +1,33 @@
+/***
+ *
+ *  include/rtcfg/rtcfg_ioctl.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003, 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_IOCTL_H_
+#define __RTCFG_IOCTL_H_
+
+extern struct rtnet_ioctls rtcfg_ioctls;
+
+#define rtcfg_init_ioctls() rtnet_register_ioctls(&rtcfg_ioctls)
+#define rtcfg_cleanup_ioctls() rtnet_unregister_ioctls(&rtcfg_ioctls)
+
+#endif /* __RTCFG_IOCTL_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_proc.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_proc.h
new file mode 100644
index 0000000..fe6c7f6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_proc.h
@@ -0,0 +1,63 @@
+/***
+ *
+ *  include/rtcfg/rtcfg_proc.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_PROC_H_
+#define __RTCFG_PROC_H_
+
+#include <rtnet_internal.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+
+extern struct mutex nrt_proc_lock;
+
+void rtcfg_update_conn_proc_entries(int ifindex);
+void rtcfg_remove_conn_proc_entries(int ifindex);
+
+int rtcfg_init_proc(void);
+void rtcfg_cleanup_proc(void);
+
+static inline void rtcfg_lockwr_proc(int ifindex)
+{
+	mutex_lock(&nrt_proc_lock);
+	rtcfg_remove_conn_proc_entries(ifindex);
+}
+
+static inline void rtcfg_unlockwr_proc(int ifindex)
+{
+	rtcfg_update_conn_proc_entries(ifindex);
+	mutex_unlock(&nrt_proc_lock);
+}
+
+#else
+
+#define rtcfg_lockwr_proc(x)                                                   \
+	do {                                                                   \
+	} while (0)
+#define rtcfg_unlockwr_proc(x)                                                 \
+	do {                                                                   \
+	} while (0)
+
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+#endif /* __RTCFG_PROC_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_timer.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_timer.h
new file mode 100644
index 0000000..2da6d50
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg/rtcfg_timer.h
@@ -0,0 +1,34 @@
+/***
+ *
+ *  include/rtcfg/rtcfg_timer.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_TIMER_H_
+#define __RTCFG_TIMER_H_
+
+void rtcfg_timer(rtdm_timer_t *t);
+
+void rtcfg_timer_run(void);
+
+void rtcfg_thread_signal(void);
+
+#endif /* __RTCFG_TIMER_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg_chrdev.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg_chrdev.h
new file mode 100644
index 0000000..910a84c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtcfg_chrdev.h
@@ -0,0 +1,176 @@
+/***
+ *
+ *  include/rtcfg.h
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTCFG_H_
+#define __RTCFG_H_
+
+#include <rtnet_chrdev.h>
+
+#define ERTCFG_START 0x0F00
+#define ESTAGE1SIZE ERTCFG_START
+
+#define FLAG_STAGE_2_DATA 0x0001
+#define FLAG_READY 0x0002
+#define FLAG_ASSIGN_ADDR_BY_MAC 0x0100
+
+#define RTCFG_ADDR_MAC 0x00
+#define RTCFG_ADDR_IP 0x01
+#define RTCFG_ADDR_MASK 0xFF
+
+typedef enum {
+	RTCFG_CMD_SERVER,
+	RTCFG_CMD_ADD,
+	RTCFG_CMD_DEL,
+	RTCFG_CMD_WAIT,
+	RTCFG_CMD_CLIENT,
+	RTCFG_CMD_ANNOUNCE,
+	RTCFG_CMD_READY,
+	RTCFG_CMD_DETACH,
+
+	/* internal usage only */
+	RTCFG_TIMER,
+	RTCFG_FRM_STAGE_1_CFG,
+	RTCFG_FRM_ANNOUNCE_NEW,
+	RTCFG_FRM_ANNOUNCE_REPLY,
+	RTCFG_FRM_STAGE_2_CFG,
+	RTCFG_FRM_STAGE_2_CFG_FRAG,
+	RTCFG_FRM_ACK_CFG,
+	RTCFG_FRM_READY,
+	RTCFG_FRM_HEARTBEAT,
+	RTCFG_FRM_DEAD_STATION
+} RTCFG_EVENT;
+
+struct rtskb;
+struct rtcfg_station;
+struct rtcfg_connection;
+struct rtcfg_file;
+
+struct rtcfg_cmd {
+	struct rtnet_ioctl_head head;
+
+	union {
+		struct {
+			__u32 period;
+			__u32 burstrate;
+			__u32 heartbeat;
+			__u32 threshold;
+			__u32 flags;
+		} server;
+
+		struct {
+			__u32 addr_type;
+			__u32 ip_addr;
+			__u8 mac_addr[DEV_ADDR_LEN];
+			__u32 timeout;
+			__u16 stage1_size;
+			__u16 __padding;
+			void *stage1_data;
+			const char *stage2_filename;
+
+			/* internal usage only */
+			struct rtcfg_connection *conn_buf;
+			struct rtcfg_file *stage2_file;
+		} add;
+
+		struct {
+			__u32 addr_type;
+			__u32 ip_addr;
+			__u8 mac_addr[DEV_ADDR_LEN];
+
+			/* internal usage only */
+			struct rtcfg_connection *conn_buf;
+			struct rtcfg_file *stage2_file;
+		} del;
+
+		struct {
+			__u32 timeout;
+		} wait;
+
+		struct {
+			__u32 timeout;
+			__u32 max_stations;
+			__u64 buffer_size;
+			void *buffer;
+
+			/* internal usage only */
+			struct rtcfg_station *station_buf;
+			struct rtskb *rtskb;
+		} client;
+
+		struct {
+			__u32 timeout;
+			__u32 flags;
+			__u32 burstrate;
+			__u32 __padding;
+			__u64 buffer_size;
+			void *buffer;
+
+			/* internal usage only */
+			struct rtskb *rtskb;
+		} announce;
+
+		struct {
+			__u32 timeout;
+		} ready;
+
+		struct {
+			/* internal usage only */
+			struct rtcfg_connection *conn_buf;
+			struct rtcfg_file *stage2_file;
+			struct rtcfg_station *station_addr_list;
+			struct rtskb *stage2_chain;
+		} detach;
+
+		__u64 __padding[16];
+	} args;
+
+	/* internal usage only */
+	union {
+		struct {
+			int ifindex;
+			RTCFG_EVENT event_id;
+		} data;
+
+		__u64 __padding[2];
+	} internal;
+};
+
+#define RTCFG_IOC_SERVER                                                       \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_SERVER, struct rtcfg_cmd)
+#define RTCFG_IOC_ADD                                                          \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_ADD, struct rtcfg_cmd)
+#define RTCFG_IOC_DEL                                                          \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_DEL, struct rtcfg_cmd)
+#define RTCFG_IOC_WAIT                                                         \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_WAIT, struct rtcfg_cmd)
+#define RTCFG_IOC_CLIENT                                                       \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_CLIENT, struct rtcfg_cmd)
+#define RTCFG_IOC_ANNOUNCE                                                     \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_ANNOUNCE, struct rtcfg_cmd)
+#define RTCFG_IOC_READY                                                        \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_READY, struct rtcfg_cmd)
+#define RTCFG_IOC_DETACH                                                       \
+	_IOW(RTNET_IOC_TYPE_RTCFG, RTCFG_CMD_DETACH, struct rtcfg_cmd)
+
+#endif /* __RTCFG_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev.h
new file mode 100644
index 0000000..a4d963f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev.h
@@ -0,0 +1,275 @@
+/***
+ *
+ *  rtdev.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTDEV_H_
+#define __RTDEV_H_
+
+#define MAX_RT_DEVICES 8
+
+#ifdef __KERNEL__
+
+#include <asm/atomic.h>
+#include <linux/netdevice.h>
+
+#include <rtskb.h>
+#include <rtnet_internal.h>
+
+#define RTDEV_VERS_2_0 0x0200
+
+#define PRIV_FLAG_UP 0
+#define PRIV_FLAG_ADDING_ROUTE 1
+
+#ifndef NETIF_F_LLTX
+#define NETIF_F_LLTX 4096
+#endif
+
+#define RTDEV_TX_OK 0
+#define RTDEV_TX_BUSY 1
+
+enum rtnet_link_state {
+	__RTNET_LINK_STATE_XOFF = 0,
+	__RTNET_LINK_STATE_START,
+	__RTNET_LINK_STATE_PRESENT,
+	__RTNET_LINK_STATE_NOCARRIER,
+};
+#define RTNET_LINK_STATE_XOFF (1 << __RTNET_LINK_STATE_XOFF)
+#define RTNET_LINK_STATE_START (1 << __RTNET_LINK_STATE_START)
+#define RTNET_LINK_STATE_PRESENT (1 << __RTNET_LINK_STATE_PRESENT)
+#define RTNET_LINK_STATE_NOCARRIER (1 << __RTNET_LINK_STATE_NOCARRIER)
+
+/***
+ *  rtnet_device
+ */
+struct rtnet_device {
+	/* Many field are borrowed from struct net_device in
+     * <linux/netdevice.h> - WY
+     */
+	unsigned int vers;
+
+	char name[IFNAMSIZ];
+	struct device *sysbind; /* device bound in sysfs (optional) */
+
+	unsigned long rmem_end; /* shmem "recv" end     */
+	unsigned long rmem_start; /* shmem "recv" start   */
+	unsigned long mem_end; /* shared mem end       */
+	unsigned long mem_start; /* shared mem start     */
+	unsigned long base_addr; /* device I/O address   */
+	unsigned int irq; /* device IRQ number    */
+
+	/*
+     *  Some hardware also needs these fields, but they are not
+     *  part of the usual set specified in Space.c.
+     */
+	unsigned char if_port; /* Selectable AUI, TP,..*/
+	unsigned char dma; /* DMA channel          */
+	__u16 __padding;
+
+	unsigned long link_state;
+	int ifindex;
+	atomic_t refcount;
+
+	struct device *sysdev; /* node in driver model for sysfs */
+	struct module *rt_owner; /* like classic owner, but      *
+				     * forces correct macro usage   */
+
+	unsigned int flags; /* interface flags (a la BSD)   */
+	unsigned long priv_flags; /* internal flags               */
+	unsigned short type; /* interface hardware type      */
+	unsigned short hard_header_len; /* hardware hdr length  */
+	unsigned int mtu; /* eth = 1536, tr = 4...        */
+	void *priv; /* pointer to private data      */
+	netdev_features_t features; /* [RT]NETIF_F_*                */
+
+	/* Interface address info. */
+	unsigned char broadcast[MAX_ADDR_LEN]; /* hw bcast add */
+	unsigned char dev_addr[MAX_ADDR_LEN]; /* hw address   */
+	unsigned char addr_len; /* hardware address length      */
+
+	int promiscuity;
+	int allmulti;
+
+	__u32 local_ip; /* IP address in network order  */
+	__u32 broadcast_ip; /* broadcast IP in network order */
+
+	rtdm_event_t *stack_event;
+
+	rtdm_mutex_t xmit_mutex; /* protects xmit routine        */
+	rtdm_lock_t rtdev_lock; /* management lock              */
+	struct mutex nrt_lock; /* non-real-time locking        */
+
+	unsigned int add_rtskbs; /* additionally allocated global rtskbs */
+
+	struct rtskb_pool dev_pool;
+
+	/* RTmac related fields */
+	struct rtmac_disc *mac_disc;
+	struct rtmac_priv *mac_priv;
+	int (*mac_detach)(struct rtnet_device *rtdev);
+
+	/* Device operations */
+	int (*open)(struct rtnet_device *rtdev);
+	int (*stop)(struct rtnet_device *rtdev);
+	int (*hard_header)(struct rtskb *, struct rtnet_device *,
+			   unsigned short type, void *daddr, void *saddr,
+			   unsigned int len);
+	int (*rebuild_header)(struct rtskb *);
+	int (*hard_start_xmit)(struct rtskb *skb, struct rtnet_device *dev);
+	int (*hw_reset)(struct rtnet_device *rtdev);
+
+	/* Transmission hook, managed by the stack core, RTcap, and RTmac
+     *
+     * If xmit_lock is used, start_xmit points either to rtdev_locked_xmit or
+     * the RTmac discipline handler. If xmit_lock is not required, start_xmit
+     * points to hard_start_xmit or the discipline handler.
+     */
+	int (*start_xmit)(struct rtskb *skb, struct rtnet_device *dev);
+
+	/* MTU hook, managed by the stack core and RTmac */
+	unsigned int (*get_mtu)(struct rtnet_device *rtdev,
+				unsigned int priority);
+
+	int (*do_ioctl)(struct rtnet_device *rtdev, struct ifreq *ifr, int cmd);
+	struct net_device_stats *(*get_stats)(struct rtnet_device *rtdev);
+
+	/* DMA pre-mapping hooks */
+	dma_addr_t (*map_rtskb)(struct rtnet_device *rtdev, struct rtskb *skb);
+	void (*unmap_rtskb)(struct rtnet_device *rtdev, struct rtskb *skb);
+};
+
+struct rtnet_core_cmd;
+
+struct rtdev_event_hook {
+	struct list_head entry;
+	void (*register_device)(struct rtnet_device *rtdev);
+	void (*unregister_device)(struct rtnet_device *rtdev);
+	void (*ifup)(struct rtnet_device *rtdev, struct rtnet_core_cmd *up_cmd);
+	void (*ifdown)(struct rtnet_device *rtdev);
+};
+
+extern struct list_head event_hook_list;
+extern struct mutex rtnet_devices_nrt_lock;
+extern struct rtnet_device *rtnet_devices[];
+
+int __rt_init_etherdev(struct rtnet_device *rtdev, unsigned int dev_pool_size,
+		       struct module *module);
+
+#define rt_init_etherdev(__rtdev, __dev_pool_size)                             \
+	__rt_init_etherdev(__rtdev, __dev_pool_size, THIS_MODULE)
+
+struct rtnet_device *__rt_alloc_etherdev(unsigned sizeof_priv,
+					 unsigned dev_pool_size,
+					 struct module *module);
+#define rt_alloc_etherdev(priv_size, rx_size)                                  \
+	__rt_alloc_etherdev(priv_size, rx_size, THIS_MODULE)
+
+void rtdev_destroy(struct rtnet_device *rtdev);
+
+void rtdev_free(struct rtnet_device *rtdev);
+
+int rt_register_rtnetdev(struct rtnet_device *rtdev);
+int rt_unregister_rtnetdev(struct rtnet_device *rtdev);
+
+void rtdev_add_event_hook(struct rtdev_event_hook *hook);
+void rtdev_del_event_hook(struct rtdev_event_hook *hook);
+
+void rtdev_alloc_name(struct rtnet_device *rtdev, const char *name_mask);
+
+/**
+ *  __rtdev_get_by_index - find a rtnet_device by its ifindex
+ *  @ifindex: index of device
+ *  @note: caller must hold rtnet_devices_nrt_lock
+ */
+static inline struct rtnet_device *__rtdev_get_by_index(int ifindex)
+{
+	return rtnet_devices[ifindex - 1];
+}
+
+struct rtnet_device *rtdev_get_by_name(const char *if_name);
+struct rtnet_device *rtdev_get_by_index(int ifindex);
+struct rtnet_device *rtdev_get_by_hwaddr(unsigned short type, char *ha);
+struct rtnet_device *rtdev_get_loopback(void);
+
+int rtdev_reference(struct rtnet_device *rtdev);
+
+static inline void rtdev_dereference(struct rtnet_device *rtdev)
+{
+	smp_mb__before_atomic();
+	if (rtdev->rt_owner && atomic_dec_and_test(&rtdev->refcount))
+		module_put(rtdev->rt_owner);
+}
+
+int rtdev_xmit(struct rtskb *skb);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+int rtdev_xmit_proxy(struct rtskb *skb);
+#endif
+
+unsigned int rt_hard_mtu(struct rtnet_device *rtdev, unsigned int priority);
+
+int rtdev_open(struct rtnet_device *rtdev);
+int rtdev_close(struct rtnet_device *rtdev);
+
+int rtdev_up(struct rtnet_device *rtdev, struct rtnet_core_cmd *cmd);
+int rtdev_down(struct rtnet_device *rtdev);
+
+int rtdev_map_rtskb(struct rtskb *skb);
+void rtdev_unmap_rtskb(struct rtskb *skb);
+
+struct rtskb *rtnetdev_alloc_rtskb(struct rtnet_device *dev, unsigned int size);
+
+#define rtnetdev_priv(dev) ((dev)->priv)
+
+#define rtdev_emerg(__dev, format, args...)                                    \
+	pr_emerg("%s: " format, (__dev)->name, ##args)
+#define rtdev_alert(__dev, format, args...)                                    \
+	pr_alert("%s: " format, (__dev)->name, ##args)
+#define rtdev_crit(__dev, format, args...)                                     \
+	pr_crit("%s: " format, (__dev)->name, ##args)
+#define rtdev_err(__dev, format, args...)                                      \
+	pr_err("%s: " format, (__dev)->name, ##args)
+#define rtdev_warn(__dev, format, args...)                                     \
+	pr_warn("%s: " format, (__dev)->name, ##args)
+#define rtdev_notice(__dev, format, args...)                                   \
+	pr_notice("%s: " format, (__dev)->name, ##args)
+#define rtdev_info(__dev, format, args...)                                     \
+	pr_info("%s: " format, (__dev)->name, ##args)
+#define rtdev_dbg(__dev, format, args...)                                      \
+	pr_debug("%s: " format, (__dev)->name, ##args)
+
+#ifdef VERBOSE_DEBUG
+#define rtdev_vdbg rtdev_dbg
+#else
+#define rtdev_vdbg(__dev, format, args...)                                     \
+	({                                                                     \
+		if (0)                                                         \
+			pr_debug("%s: " format, (__dev)->name, ##args);        \
+                                                                               \
+		0;                                                             \
+	})
+#endif
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTDEV_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev_mgr.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev_mgr.h
new file mode 100644
index 0000000..190d172
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtdev_mgr.h
@@ -0,0 +1,39 @@
+/* rtdev_mgr.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTDEV_MGR_H_
+#define __RTDEV_MGR_H_
+
+#ifdef __KERNEL__
+
+#include <rtnet_internal.h>
+
+extern void rtnetif_err_rx(struct rtnet_device *rtdev);
+extern void rtnetif_err_tx(struct rtnet_device *rtdev);
+
+extern void rt_rtdev_connect(struct rtnet_device *rtdev, struct rtnet_mgr *mgr);
+extern void rt_rtdev_disconnect(struct rtnet_device *rtdev);
+extern int rt_rtdev_mgr_init(struct rtnet_mgr *mgr);
+extern void rt_rtdev_mgr_delete(struct rtnet_mgr *mgr);
+extern int rt_rtdev_mgr_start(struct rtnet_mgr *mgr);
+extern int rt_rtdev_mgr_stop(struct rtnet_mgr *mgr);
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTDEV_MGR_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac.h
new file mode 100644
index 0000000..da17429
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac.h
@@ -0,0 +1,92 @@
+/***
+ *
+ *  include/rtmac.h
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2004-2006 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ *  As a special exception to the GNU General Public license, the RTnet
+ *  project allows you to use this header file in unmodified form to produce
+ *  application programs executing in user-space which use RTnet services by
+ *  normal system calls. The resulting executable will not be covered by the
+ *  GNU General Public License merely as a result of this header file use.
+ *  Instead, this header file use will be considered normal use of RTnet and
+ *  not a "derived work" in the sense of the GNU General Public License.
+ *
+ *  This exception does not apply when the application code is built as a
+ *  static or dynamically loadable portion of the Linux kernel nor does the
+ *  exception override other reasons justifying application of the GNU General
+ *  Public License.
+ *
+ *  This exception applies only to the code released by the RTnet project
+ *  under the name RTnet and bearing this exception notice. If you copy code
+ *  from other sources into a copy of RTnet, the exception does not apply to
+ *  the code that you add in this way.
+ *
+ */
+
+#ifndef __RTMAC_H_
+#define __RTMAC_H_
+
+#include <rtdm/rtdm.h>
+
+/* sub-classes: RTDM_CLASS_RTMAC */
+#define RTDM_SUBCLASS_TDMA 0
+#define RTDM_SUBCLASS_UNMANAGED 1
+
+#define RTIOC_TYPE_RTMAC RTDM_CLASS_RTMAC
+
+/* ** Common Cycle Event Types ** */
+/* standard event, wake up once per cycle */
+#define RTMAC_WAIT_ON_DEFAULT 0x00
+/* wake up on media access of the station, may trigger multiple times per
+   cycle */
+#define RTMAC_WAIT_ON_XMIT 0x01
+
+/* ** TDMA-specific Cycle Event Types ** */
+/* tigger on on SYNC frame reception/transmission */
+#define TDMA_WAIT_ON_SYNC RTMAC_WAIT_ON_DEFAULT
+#define TDMA_WAIT_ON_SOF TDMA_WAIT_ON_SYNC /* legacy support */
+
+/* RTMAC_RTIOC_WAITONCYCLE_EX control and status data */
+struct rtmac_waitinfo {
+	/** Set to wait type before invoking the service */
+	unsigned int type;
+
+	/** Set to sizeof(struct rtmac_waitinfo) before invoking the service */
+	size_t size;
+
+	/** Counter of elementary cycles of the underlying RTmac discipline
+        (if applicable) */
+	unsigned long cycle_no;
+
+	/** Date (in local time) of the last elementary cycle start of the RTmac
+        discipline (if applicable) */
+	nanosecs_abs_t cycle_start;
+
+	/** Offset of the local clock to the global clock provided by the RTmac
+        discipline (if applicable): t_global = t_local + clock_offset */
+	nanosecs_rel_t clock_offset;
+};
+
+/* RTmac Discipline IOCTLs */
+#define RTMAC_RTIOC_TIMEOFFSET _IOR(RTIOC_TYPE_RTMAC, 0x00, int64_t)
+#define RTMAC_RTIOC_WAITONCYCLE _IOW(RTIOC_TYPE_RTMAC, 0x01, unsigned int)
+#define RTMAC_RTIOC_WAITONCYCLE_EX                                             \
+	_IOWR(RTIOC_TYPE_RTMAC, 0x02, struct rtmac_waitinfo)
+
+#endif /* __RTMAC_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac.h
new file mode 100644
index 0000000..6e6a09f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac.h
@@ -0,0 +1,51 @@
+/***
+ *
+ *  include/rtmac/nomac/nomac.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_H_
+#define __NOMAC_H_
+
+#include <rtdm/driver.h>
+
+#include <rtmac/rtmac_disc.h>
+
+#define RTMAC_TYPE_NOMAC 0
+
+#define NOMAC_MAGIC 0x004D0A0C
+
+struct nomac_priv {
+	unsigned int magic;
+	struct rtnet_device *rtdev;
+	char device_name[32];
+	struct rtdm_driver api_driver;
+	struct rtdm_device api_device;
+	/* ... */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct list_head list_entry;
+#endif
+};
+
+extern struct rtmac_disc nomac_disc;
+
+#endif /* __NOMAC_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_dev.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_dev.h
new file mode 100644
index 0000000..bf73a69
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_dev.h
@@ -0,0 +1,37 @@
+/***
+ *
+ *  include/rtmac/nomac/nomac_dev.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_DEV_H_
+#define __NOMAC_DEV_H_
+
+#include <rtmac/nomac/nomac.h>
+
+int nomac_dev_init(struct rtnet_device *rtdev, struct nomac_priv *nomac);
+
+static inline void nomac_dev_release(struct nomac_priv *nomac)
+{
+	rtdm_dev_unregister(&nomac->api_device);
+}
+
+#endif /* __NOMAC_DEV_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_ioctl.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_ioctl.h
new file mode 100644
index 0000000..df22c08
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_ioctl.h
@@ -0,0 +1,31 @@
+/***
+ *
+ *  include/rtmac/nomac/nomac_ioctl.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_IOCTL_H_
+#define __NOMAC_IOCTL_H_
+
+int nomac_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		unsigned long arg);
+
+#endif /* __NOMAC_IOCTL_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_proto.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_proto.h
new file mode 100644
index 0000000..375dfb0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/nomac/nomac_proto.h
@@ -0,0 +1,38 @@
+/***
+ *
+ *  include/rtmac/nomac/nomac_proto.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __NOMAC_PROTO_H_
+#define __NOMAC_PROTO_H_
+
+#include <rtdev.h>
+
+int nomac_rt_packet_tx(struct rtskb *rtskb, struct rtnet_device *rtdev);
+int nomac_nrt_packet_tx(struct rtskb *rtskb);
+
+int nomac_packet_rx(struct rtskb *rtskb);
+
+int nomac_proto_init(void);
+void nomac_proto_cleanup(void);
+
+#endif /* __NOMAC_PROTO_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_disc.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_disc.h
new file mode 100644
index 0000000..c366371
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_disc.h
@@ -0,0 +1,95 @@
+/***
+ *
+ *  include/rtmac/rtmac_disc.h
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTMAC_DISC_H_
+#define __RTMAC_DISC_H_
+
+#include <linux/list.h>
+#include <linux/netdevice.h>
+
+#include <rtdev.h>
+#include <rtnet_chrdev.h>
+
+#define RTMAC_NO_VNIC NULL
+#define RTMAC_DEFAULT_VNIC rtmac_vnic_xmit
+
+typedef int (*vnic_xmit_handler)(struct sk_buff *skb, struct net_device *dev);
+
+struct rtmac_priv {
+	int (*orig_start_xmit)(struct rtskb *skb, struct rtnet_device *dev);
+	struct net_device *vnic;
+	struct net_device_stats vnic_stats;
+	struct rtskb_pool vnic_skb_pool;
+	unsigned int vnic_max_mtu;
+
+	u8 disc_priv[0] __attribute__((aligned(16)));
+};
+
+struct rtmac_proc_entry {
+	const char *name;
+	int (*handler)(struct xnvfile_regular_iterator *it, void *data);
+	struct xnvfile_regular vfile;
+};
+
+struct rtmac_disc {
+	struct list_head list;
+
+	const char *name;
+	unsigned int priv_size; /* size of rtmac_priv.disc_priv */
+	u16 disc_type;
+
+	int (*packet_rx)(struct rtskb *skb);
+	/* rt_packet_tx prototype must be compatible with hard_start_xmit */
+	int (*rt_packet_tx)(struct rtskb *skb, struct rtnet_device *dev);
+	int (*nrt_packet_tx)(struct rtskb *skb);
+
+	unsigned int (*get_mtu)(struct rtnet_device *rtdev,
+				unsigned int priority);
+
+	vnic_xmit_handler vnic_xmit;
+
+	int (*attach)(struct rtnet_device *rtdev, void *disc_priv);
+	int (*detach)(struct rtnet_device *rtdev, void *disc_priv);
+
+	struct rtnet_ioctls ioctls;
+
+	struct rtmac_proc_entry *proc_entries;
+	unsigned nr_proc_entries;
+
+	struct module *owner;
+};
+
+int rtmac_disc_attach(struct rtnet_device *rtdev, struct rtmac_disc *disc);
+int rtmac_disc_detach(struct rtnet_device *rtdev);
+
+int __rtmac_disc_register(struct rtmac_disc *disc, struct module *module);
+#define rtmac_disc_register(disc) __rtmac_disc_register(disc, THIS_MODULE)
+
+void rtmac_disc_deregister(struct rtmac_disc *disc);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int rtnet_rtmac_disciplines_show(struct xnvfile_regular_iterator *it, void *d);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+#endif /* __RTMAC_DISC_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proc.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proc.h
new file mode 100644
index 0000000..b25b199
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proc.h
@@ -0,0 +1,34 @@
+/***
+ *
+ *  include/rtmac/rtmac_proc.h
+ *
+ *  rtmac - real-time networking medium access control subsystem
+ *  Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>
+ *                2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTMAC_PROC_H_
+#define __RTMAC_PROC_H_
+
+int rtmac_disc_proc_register(struct rtmac_disc *disc);
+void rtmac_disc_proc_unregister(struct rtmac_disc *disc);
+
+int rtmac_proc_register(void);
+void rtmac_proc_release(void);
+
+#endif /* __RTMAC_PROC_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proto.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proto.h
new file mode 100644
index 0000000..76ecc42
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_proto.h
@@ -0,0 +1,78 @@
+/***
+ *
+ *  include/rtmac/rtmac_proto.h
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTMAC_PROTO_H_
+#define __RTMAC_PROTO_H_
+
+#include <stack_mgr.h>
+
+#define RTMAC_VERSION 0x02
+#define ETH_RTMAC 0x9021
+
+#define RTMAC_FLAG_TUNNEL 0x01
+
+struct rtmac_hdr {
+	u16 type;
+	u8 ver;
+	u8 flags;
+} __attribute__((packed));
+
+static inline int rtmac_add_header(struct rtnet_device *rtdev, void *daddr,
+				   struct rtskb *skb, u16 type, u8 flags)
+{
+	struct rtmac_hdr *hdr =
+		(struct rtmac_hdr *)rtskb_push(skb, sizeof(struct rtmac_hdr));
+
+	hdr->type = htons(type);
+	hdr->ver = RTMAC_VERSION;
+	hdr->flags = flags;
+
+	skb->rtdev = rtdev;
+
+	if (rtdev->hard_header &&
+	    (rtdev->hard_header(skb, rtdev, ETH_RTMAC, daddr, rtdev->dev_addr,
+				skb->len) < 0))
+		return -1;
+
+	return 0;
+}
+
+static inline int rtmac_xmit(struct rtskb *skb)
+{
+	struct rtnet_device *rtdev = skb->rtdev;
+	int ret;
+
+	ret = rtdev->hard_start_xmit(skb, rtdev);
+	if (ret != 0)
+		kfree_rtskb(skb);
+
+	return ret;
+}
+
+extern struct rtpacket_type rtmac_packet_type;
+
+#define rtmac_proto_init() rtdev_add_pack(&rtmac_packet_type)
+void rtmac_proto_release(void);
+
+#endif /* __RTMAC_PROTO_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_vnic.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_vnic.h
new file mode 100644
index 0000000..17ec07b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/rtmac_vnic.h
@@ -0,0 +1,59 @@
+/* include/rtmac/rtmac_vnic.h
+ *
+ * rtmac - real-time networking media access control subsystem
+ * Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *               2003 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#ifndef __RTMAC_VNIC_H_
+#define __RTMAC_VNIC_H_
+
+#ifdef __KERNEL__
+
+#include <linux/init.h>
+#include <linux/netdevice.h>
+
+#include <rtmac/rtmac_disc.h>
+
+#define DEFAULT_VNIC_RTSKBS 32
+
+int rtmac_vnic_rx(struct rtskb *skb, u16 type);
+
+int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev);
+
+void rtmac_vnic_set_max_mtu(struct rtnet_device *rtdev, unsigned int max_mtu);
+
+int rtmac_vnic_add(struct rtnet_device *rtdev, vnic_xmit_handler vnic_xmit);
+int rtmac_vnic_unregister(struct rtnet_device *rtdev);
+
+static inline void rtmac_vnic_cleanup(struct rtnet_device *rtdev)
+{
+	struct rtmac_priv *mac_priv = rtdev->mac_priv;
+
+	rtskb_pool_release(&mac_priv->vnic_skb_pool);
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int rtnet_rtmac_vnics_show(struct xnvfile_regular_iterator *it, void *data);
+#endif
+
+int __init rtmac_vnic_module_init(void);
+void rtmac_vnic_module_cleanup(void);
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTMAC_VNIC_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma.h
new file mode 100644
index 0000000..9574c2d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma.h
@@ -0,0 +1,161 @@
+/***
+ *
+ *  include/rtmac/tdma/tdma.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_H_
+#define __TDMA_H_
+
+#include <rtdm/driver.h>
+
+#include <rtnet_rtpc.h>
+#include <rtmac/rtmac_disc.h>
+
+#define RTMAC_TYPE_TDMA 0x0001
+
+#define TDMA_MAGIC 0x3A0D4D0A
+
+#define TDMA_FLAG_CALIBRATED 1
+#define TDMA_FLAG_RECEIVED_SYNC 2
+#define TDMA_FLAG_MASTER 3 /* also set for backup masters */
+#define TDMA_FLAG_BACKUP_MASTER 4
+#define TDMA_FLAG_ATTACHED 5
+#define TDMA_FLAG_BACKUP_ACTIVE 6
+
+#define DEFAULT_SLOT 0
+#define DEFAULT_NRT_SLOT 1
+
+/* job IDs */
+#define WAIT_ON_SYNC -1
+#define XMIT_SYNC -2
+#define BACKUP_SYNC -3
+#define XMIT_REQ_CAL -4
+#define XMIT_RPL_CAL -5
+
+struct tdma_priv;
+
+struct tdma_job {
+	struct list_head entry;
+	int id;
+	unsigned int ref_count;
+};
+
+#define SLOT_JOB(job) ((struct tdma_slot *)(job))
+
+struct tdma_slot {
+	struct tdma_job head;
+
+	u64 offset;
+	unsigned int period;
+	unsigned int phasing;
+	unsigned int mtu;
+	unsigned int size;
+	struct rtskb_prio_queue *queue;
+	struct rtskb_prio_queue local_queue;
+};
+
+#define REQUEST_CAL_JOB(job) ((struct tdma_request_cal *)(job))
+
+struct tdma_request_cal {
+	struct tdma_job head;
+
+	struct tdma_priv *tdma;
+	u64 offset;
+	unsigned int period;
+	unsigned int phasing;
+	unsigned int cal_rounds;
+	u64 *cal_results;
+	u64 *result_buffer;
+};
+
+#define REPLY_CAL_JOB(job) ((struct tdma_reply_cal *)(job))
+
+struct tdma_reply_cal {
+	struct tdma_job head;
+
+	u32 reply_cycle;
+	u64 reply_offset;
+	struct rtskb *reply_rtskb;
+};
+
+struct tdma_priv {
+	unsigned int magic;
+	struct rtnet_device *rtdev;
+	char device_name[32];
+	struct rtdm_driver api_driver;
+	struct rtdm_device api_device;
+
+#ifdef ALIGN_RTOS_TASK
+	__u8 __align[(ALIGN_RTOS_TASK -
+		      ((sizeof(unsigned int) + sizeof(struct rtnet_device *) +
+			sizeof(struct rtdm_device)) &
+		       (ALIGN_RTOS_TASK - 1))) &
+		     (ALIGN_RTOS_TASK - 1)];
+#endif
+	rtdm_task_t worker_task;
+	rtdm_event_t worker_wakeup;
+	rtdm_event_t xmit_event;
+	rtdm_event_t sync_event;
+
+	unsigned long flags;
+	unsigned int cal_rounds;
+	u32 current_cycle;
+	u64 current_cycle_start;
+	u64 master_packet_delay_ns;
+	nanosecs_rel_t clock_offset;
+
+	struct tdma_job sync_job;
+	struct tdma_job *first_job;
+	struct tdma_job *current_job;
+	volatile unsigned int job_list_revision;
+
+	unsigned int max_slot_id;
+	struct tdma_slot **slot_table;
+
+	struct rt_proc_call *calibration_call;
+	unsigned char master_hw_addr[MAX_ADDR_LEN];
+
+	rtdm_lock_t lock;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	struct rtskb_pool cal_rtskb_pool;
+	u64 cycle_period;
+	u64 backup_sync_inc;
+#endif
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	struct list_head list_entry;
+#endif
+};
+
+extern struct rtmac_disc tdma_disc;
+
+#define print_jobs()                                                           \
+	do {                                                                   \
+		struct tdma_job *entry;                                        \
+		rtdm_printk("%s:%d - ", __FUNCTION__, __LINE__);               \
+		list_for_each_entry (entry, &tdma->first_job->entry, entry)    \
+			rtdm_printk("%d ", entry->id);                         \
+		rtdm_printk("\n");                                             \
+	} while (0)
+
+#endif /* __TDMA_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_dev.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_dev.h
new file mode 100644
index 0000000..776102a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_dev.h
@@ -0,0 +1,37 @@
+/***
+ *
+ *  include/rtmac/tdma/tdma_dev.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_DEV_H_
+#define __TDMA_DEV_H_
+
+#include <rtmac/tdma/tdma.h>
+
+int tdma_dev_init(struct rtnet_device *rtdev, struct tdma_priv *tdma);
+
+static inline void tdma_dev_release(struct tdma_priv *tdma)
+{
+	rtdm_dev_unregister(&tdma->api_device);
+}
+
+#endif /* __TDMA_DEV_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_ioctl.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_ioctl.h
new file mode 100644
index 0000000..241aa44
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_ioctl.h
@@ -0,0 +1,35 @@
+/***
+ *
+ *  include/rtmac/tdma/tdma_ioctl.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_IOCTL_H_
+#define __TDMA_IOCTL_H_
+
+#include <rtmac/tdma/tdma.h>
+
+int tdma_cleanup_slot(struct tdma_priv *tdma, struct tdma_slot *slot);
+
+int tdma_ioctl(struct rtnet_device *rtdev, unsigned int request,
+	       unsigned long arg);
+
+#endif /* __TDMA_IOCTL_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_proto.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_proto.h
new file mode 100644
index 0000000..7cddc49
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_proto.h
@@ -0,0 +1,81 @@
+/***
+ *
+ *  include/rtmac/tdma/tdma_proto.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_PROTO_H_
+#define __TDMA_PROTO_H_
+
+#include <rtdev.h>
+
+#include <rtmac/tdma/tdma.h>
+
+#define TDMA_FRM_VERSION 0x0201
+
+#define TDMA_FRM_SYNC 0x0000
+#define TDMA_FRM_REQ_CAL 0x0010
+#define TDMA_FRM_RPL_CAL 0x0011
+
+struct tdma_frm_head {
+	u16 version;
+	u16 id;
+} __attribute__((packed));
+
+#define SYNC_FRM(head) ((struct tdma_frm_sync *)(head))
+
+struct tdma_frm_sync {
+	struct tdma_frm_head head;
+	u32 cycle_no;
+	u64 xmit_stamp;
+	u64 sched_xmit_stamp;
+} __attribute__((packed));
+
+#define REQ_CAL_FRM(head) ((struct tdma_frm_req_cal *)(head))
+
+struct tdma_frm_req_cal {
+	struct tdma_frm_head head;
+	u64 xmit_stamp;
+	u32 reply_cycle;
+	u64 reply_slot_offset;
+} __attribute__((packed));
+
+#define RPL_CAL_FRM(head) ((struct tdma_frm_rpl_cal *)(head))
+
+struct tdma_frm_rpl_cal {
+	struct tdma_frm_head head;
+	u64 request_xmit_stamp;
+	u64 reception_stamp;
+	u64 xmit_stamp;
+} __attribute__((packed));
+
+void tdma_xmit_sync_frame(struct tdma_priv *tdma);
+int tdma_xmit_request_cal_frame(struct tdma_priv *tdma, u32 reply_cycle,
+				u64 reply_slot_offset);
+
+int tdma_rt_packet_tx(struct rtskb *rtskb, struct rtnet_device *rtdev);
+int tdma_nrt_packet_tx(struct rtskb *rtskb);
+
+int tdma_packet_rx(struct rtskb *rtskb);
+
+unsigned int tdma_get_mtu(struct rtnet_device *rtdev, unsigned int priority);
+
+#endif /* __TDMA_PROTO_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_worker.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_worker.h
new file mode 100644
index 0000000..35469d0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtmac/tdma/tdma_worker.h
@@ -0,0 +1,34 @@
+/***
+ *
+ *  include/rtmac/tdma/tdma_worker.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_WORKER_H_
+#define __TDMA_WORKER_H_
+
+#include <rtdm/driver.h>
+
+#define DEF_WORKER_PRIO RTDM_TASK_HIGHEST_PRIORITY
+
+void tdma_worker(void *arg);
+
+#endif /* __TDMA_WORKER_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_checksum.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_checksum.h
new file mode 100644
index 0000000..7c18413
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_checksum.h
@@ -0,0 +1,19 @@
+// SPDX-License-Identifier: GPL-2.0
+#ifndef __RTNET_CHECKSUM_H_
+#define __RTNET_CHECKSUM_H_
+
+#include <linux/string.h>
+#include <net/checksum.h>
+
+#define rtnet_csum(__buf, __len, __csum)				\
+	({								\
+		csum_partial(__buf, __len, (__force __wsum)__csum);	\
+	})
+
+#define rtnet_csum_copy(__src, __dst, __len, __csum)			\
+	({								\
+		memcpy(__dst, __src, __len);				\
+		csum_partial(__dst, __len, (__force __wsum)__csum);	\
+	})
+
+#endif /* !__RTNET_CHECKSUM_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_chrdev.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_chrdev.h
new file mode 100644
index 0000000..0fc10f6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_chrdev.h
@@ -0,0 +1,116 @@
+/***
+ *
+ *  include/rtnet_chrdev.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999    Lineo, Inc
+ *                1999,2002 David A. Schleef <ds@schleef.org>
+ *                2002 Ulrich Marx <marx@fet.uni-hannover.de>
+ *                2003,2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_CHRDEV_H_
+#define __RTNET_CHRDEV_H_
+
+#include <rtdev.h>
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+#include <linux/init.h>
+#include <linux/ioctl.h>
+#include <linux/netdevice.h>
+#include <linux/types.h>
+
+/* new extensible interface */
+struct rtnet_ioctls {
+	/* internal usage only */
+	struct list_head entry;
+	atomic_t ref_count;
+
+	/* provider specification */
+	const char *service_name;
+	unsigned int ioctl_type;
+	int (*handler)(struct rtnet_device *rtdev, unsigned int request,
+		       unsigned long arg);
+};
+
+extern int rtnet_register_ioctls(struct rtnet_ioctls *ioctls);
+extern void rtnet_unregister_ioctls(struct rtnet_ioctls *ioctls);
+
+extern int __init rtnet_chrdev_init(void);
+extern void rtnet_chrdev_release(void);
+
+#else /* ifndef __KERNEL__ */
+
+#include <net/if.h> /* IFNAMSIZ */
+#include <linux/types.h>
+
+#endif /* __KERNEL__ */
+
+#define RTNET_MINOR 240 /* user interface for /dev/rtnet */
+#define DEV_ADDR_LEN 32 /* avoids inconsistent MAX_ADDR_LEN */
+
+struct rtnet_ioctl_head {
+	char if_name[IFNAMSIZ];
+};
+
+struct rtnet_core_cmd {
+	struct rtnet_ioctl_head head;
+
+	union {
+		/*** rtifconfig **/
+		struct {
+			__u32 ip_addr;
+			__u32 broadcast_ip;
+			__u32 set_dev_flags;
+			__u32 clear_dev_flags;
+			__u32 dev_addr_type;
+			__u32 __padding;
+			__u8 dev_addr[DEV_ADDR_LEN];
+		} up;
+
+		struct {
+			__u32 ifindex;
+			__u32 type;
+			__u32 ip_addr;
+			__u32 broadcast_ip;
+			__u32 mtu;
+			__u32 flags;
+			__u8 dev_addr[DEV_ADDR_LEN];
+		} info;
+
+		__u64 __padding[8];
+	} args;
+};
+
+#define RTNET_IOC_NODEV_PARAM 0x80
+
+#define RTNET_IOC_TYPE_CORE 0
+#define RTNET_IOC_TYPE_RTCFG 1
+#define RTNET_IOC_TYPE_IPV4 2
+#define RTNET_IOC_TYPE_RTMAC_NOMAC 100
+#define RTNET_IOC_TYPE_RTMAC_TDMA 110
+
+#define IOC_RT_IFUP _IOW(RTNET_IOC_TYPE_CORE, 0, struct rtnet_core_cmd)
+#define IOC_RT_IFDOWN _IOW(RTNET_IOC_TYPE_CORE, 1, struct rtnet_core_cmd)
+#define IOC_RT_IFINFO                                                          \
+	_IOWR(RTNET_IOC_TYPE_CORE, 2 | RTNET_IOC_NODEV_PARAM,                  \
+	      struct rtnet_core_cmd)
+
+#endif /* __RTNET_CHRDEV_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_internal.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_internal.h
new file mode 100644
index 0000000..ec7e614
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_internal.h
@@ -0,0 +1,75 @@
+/***
+ *
+ *  rtnet_internal.h - internal declarations
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_INTERNAL_H_
+#define __RTNET_INTERNAL_H_
+
+#include <linux/module.h>
+#include <linux/mutex.h>
+#include <rtdm/driver.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+#define RTNET_ASSERT(expr, func)                                               \
+	if (!(expr)) {                                                         \
+		rtdm_printk("Assertion failed! %s:%s:%d %s\n", __FILE__,       \
+			    __FUNCTION__, __LINE__, (#expr));                  \
+		func                                                           \
+	}
+#else
+#define RTNET_ASSERT(expr, func)
+#endif /* CONFIG_XENO_DRIVERS_NET_CHECKED */
+
+/* some configurables */
+
+#define RTNET_DEF_STACK_PRIORITY                                               \
+	RTDM_TASK_HIGHEST_PRIORITY + RTDM_TASK_LOWER_PRIORITY
+/*#define RTNET_RTDEV_PRIORITY        5*/
+
+struct rtnet_device;
+
+/*struct rtnet_msg {
+    int                 msg_type;
+    struct rtnet_device *rtdev;
+};*/
+
+struct rtnet_mgr {
+	rtdm_task_t task;
+	/*    MBX     mbx;*/
+	rtdm_event_t event;
+};
+
+extern struct rtnet_mgr STACK_manager;
+extern struct rtnet_mgr RTDEV_manager;
+
+extern const char rtnet_rtdm_provider_name[];
+
+#ifdef CONFIG_XENO_OPT_VFILE
+extern struct xnvfile_directory rtnet_proc_root;
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+extern struct class *rtnet_class;
+
+#endif /* __RTNET_INTERNAL_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_iovec.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_iovec.h
new file mode 100644
index 0000000..45b1cf7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_iovec.h
@@ -0,0 +1,38 @@
+/* rtnet_iovec.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *               2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_IOVEC_H_
+#define __RTNET_IOVEC_H_
+
+#ifdef __KERNEL__
+
+#include <linux/uio.h>
+
+struct user_msghdr;
+struct rtdm_fd;
+
+ssize_t rtnet_write_to_iov(struct rtdm_fd *fd, struct iovec *iov, int iovlen,
+			   const void *data, size_t len);
+
+ssize_t rtnet_read_from_iov(struct rtdm_fd *fd, struct iovec *iov, int iovlen,
+			    void *data, size_t len);
+#endif /* __KERNEL__ */
+
+#endif /* __RTNET_IOVEC_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_port.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_port.h
new file mode 100644
index 0000000..5342cc6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_port.h
@@ -0,0 +1,113 @@
+/* include/rtnet_port.h
+ *
+ * RTnet - real-time networking subsystem
+ * Copyright (C) 2003      Wittawat Yamwong
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+#ifndef __RTNET_PORT_H_
+#define __RTNET_PORT_H_
+
+#ifdef __KERNEL__
+
+#include <linux/bitops.h>
+#include <linux/moduleparam.h>
+#include <linux/list.h>
+#include <linux/netdevice.h>
+#include <linux/vmalloc.h>
+#include <linux/bitops.h>
+
+#include <rtdev.h>
+#include <rtdev_mgr.h>
+#include <rtdm/driver.h>
+#include <stack_mgr.h>
+#include <ethernet/eth.h>
+
+static inline void rtnetif_start_queue(struct rtnet_device *rtdev)
+{
+	clear_bit(__RTNET_LINK_STATE_XOFF, &rtdev->link_state);
+}
+
+static inline void rtnetif_wake_queue(struct rtnet_device *rtdev)
+{
+	if (test_and_clear_bit(__RTNET_LINK_STATE_XOFF, &rtdev->link_state))
+		/*TODO __netif_schedule(dev); */;
+}
+
+static inline void rtnetif_stop_queue(struct rtnet_device *rtdev)
+{
+	set_bit(__RTNET_LINK_STATE_XOFF, &rtdev->link_state);
+}
+
+static inline int rtnetif_queue_stopped(struct rtnet_device *rtdev)
+{
+	return test_bit(__RTNET_LINK_STATE_XOFF, &rtdev->link_state);
+}
+
+static inline int rtnetif_running(struct rtnet_device *rtdev)
+{
+	return test_bit(__RTNET_LINK_STATE_START, &rtdev->link_state);
+}
+
+static inline int rtnetif_device_present(struct rtnet_device *rtdev)
+{
+	return test_bit(__RTNET_LINK_STATE_PRESENT, &rtdev->link_state);
+}
+
+static inline void rtnetif_device_detach(struct rtnet_device *rtdev)
+{
+	if (test_and_clear_bit(__RTNET_LINK_STATE_PRESENT,
+			       &rtdev->link_state) &&
+	    rtnetif_running(rtdev)) {
+		rtnetif_stop_queue(rtdev);
+	}
+}
+
+static inline void rtnetif_device_attach(struct rtnet_device *rtdev)
+{
+	if (!test_and_set_bit(__RTNET_LINK_STATE_PRESENT, &rtdev->link_state) &&
+	    rtnetif_running(rtdev)) {
+		rtnetif_wake_queue(rtdev);
+		/* __netdev_watchdog_up(rtdev); */
+	}
+}
+
+static inline void rtnetif_carrier_on(struct rtnet_device *rtdev)
+{
+	clear_bit(__RTNET_LINK_STATE_NOCARRIER, &rtdev->link_state);
+	/*
+    if (netif_running(dev))
+	__netdev_watchdog_up(dev);
+    */
+}
+
+static inline void rtnetif_carrier_off(struct rtnet_device *rtdev)
+{
+	set_bit(__RTNET_LINK_STATE_NOCARRIER, &rtdev->link_state);
+}
+
+static inline int rtnetif_carrier_ok(struct rtnet_device *rtdev)
+{
+	return !test_bit(__RTNET_LINK_STATE_NOCARRIER, &rtdev->link_state);
+}
+
+#define NIPQUAD(addr)                                                          \
+	((unsigned char *)&addr)[0], ((unsigned char *)&addr)[1],              \
+		((unsigned char *)&addr)[2], ((unsigned char *)&addr)[3]
+#define NIPQUAD_FMT "%u.%u.%u.%u"
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTNET_PORT_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_rtpc.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_rtpc.h
new file mode 100644
index 0000000..01d7ab1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_rtpc.h
@@ -0,0 +1,71 @@
+/***
+ *
+ *  include/rtnet_rtpc.h
+ *
+ *  RTnet - real-time networking subsystem
+ *
+ *  Copyright (C) 2003, 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_RTPC_H_
+#define __RTNET_RTPC_H_
+
+#include <linux/init.h>
+
+#include <rtnet_internal.h>
+
+struct rt_proc_call;
+
+typedef int (*rtpc_proc)(struct rt_proc_call *call);
+typedef void (*rtpc_copy_back_proc)(struct rt_proc_call *call, void *priv_data);
+typedef void (*rtpc_cleanup_proc)(void *priv_data);
+
+struct rt_proc_call {
+	struct list_head list_entry;
+	int processed;
+	rtpc_proc proc;
+	int result;
+	atomic_t ref_count;
+	wait_queue_head_t call_wq;
+	rtpc_cleanup_proc cleanup_handler;
+	char priv_data[0] __attribute__((aligned(8)));
+};
+
+#define CALL_PENDING 1000 /* result value for blocked calls */
+
+int rtnet_rtpc_dispatch_call(rtpc_proc rt_proc, unsigned int timeout,
+			     void *priv_data, size_t priv_data_size,
+			     rtpc_copy_back_proc copy_back_handler,
+			     rtpc_cleanup_proc cleanup_handler);
+
+void rtnet_rtpc_complete_call(struct rt_proc_call *call, int result);
+void rtnet_rtpc_complete_call_nrt(struct rt_proc_call *call, int result);
+
+#define rtpc_dispatch_call rtnet_rtpc_dispatch_call
+#define rtpc_complete_call rtnet_rtpc_complete_call
+#define rtpc_complete_call_nrt rtnet_rtpc_complete_call_nrt
+
+#define rtpc_get_priv(call, type) (type *)(call->priv_data)
+#define rtpc_get_result(call) call->result
+#define rtpc_set_result(call, new_result) call->result = new_result
+#define rtpc_set_cleanup_handler(call, handler) call->cleanup_handler = handler;
+
+int __init rtpc_init(void);
+void rtpc_cleanup(void);
+
+#endif /* __RTNET_RTPC_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_socket.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_socket.h
new file mode 100644
index 0000000..d2caab6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtnet_socket.h
@@ -0,0 +1,108 @@
+/***
+ *
+ *  include/rtnet_socket.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTNET_SOCKET_H_
+#define __RTNET_SOCKET_H_
+
+#include <asm/atomic.h>
+#include <linux/list.h>
+
+#include <rtdev.h>
+#include <rtdm/net.h>
+#include <rtdm/driver.h>
+#include <stack_mgr.h>
+
+struct rtsocket {
+	unsigned short protocol;
+
+	struct rtskb_pool skb_pool;
+	unsigned int pool_size;
+	struct mutex pool_nrt_lock;
+
+	struct rtskb_queue incoming;
+
+	rtdm_lock_t param_lock;
+
+	unsigned int priority;
+	nanosecs_rel_t timeout; /* receive timeout, 0 for infinite */
+
+	rtdm_sem_t pending_sem;
+
+	void (*callback_func)(struct rtdm_fd *, void *arg);
+	void *callback_arg;
+
+	unsigned long flags;
+
+	union {
+		/* IP specific */
+		struct {
+			u32 saddr; /* source ip-addr (bind) */
+			u32 daddr; /* destination ip-addr */
+			u16 sport; /* source port */
+			u16 dport; /* destination port */
+
+			int reg_index; /* index in port registry */
+			u8 tos;
+			u8 state;
+		} inet;
+
+		/* packet socket specific */
+		struct {
+			struct rtpacket_type packet_type;
+			int ifindex;
+		} packet;
+	} prot;
+};
+
+static inline struct rtdm_fd *rt_socket_fd(struct rtsocket *sock)
+{
+	return rtdm_private_to_fd(sock);
+}
+
+void *rtnet_get_arg(struct rtdm_fd *fd, void *tmp, const void *src, size_t len);
+
+int rtnet_put_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len);
+
+#define rt_socket_reference(sock) rtdm_fd_lock(rt_socket_fd(sock))
+#define rt_socket_dereference(sock) rtdm_fd_unlock(rt_socket_fd(sock))
+
+int rt_socket_init(struct rtdm_fd *fd, unsigned short protocol);
+
+void rt_socket_cleanup(struct rtdm_fd *fd);
+int rt_socket_common_ioctl(struct rtdm_fd *fd, int request, void __user *arg);
+int rt_socket_if_ioctl(struct rtdm_fd *fd, int request, void __user *arg);
+int rt_socket_select_bind(struct rtdm_fd *fd, rtdm_selector_t *selector,
+			  enum rtdm_selecttype type, unsigned fd_index);
+
+int rt_bare_socket_init(struct rtdm_fd *fd, unsigned short protocol,
+			unsigned int priority, unsigned int pool_size);
+
+static inline void rt_bare_socket_cleanup(struct rtsocket *sock)
+{
+	rtskb_pool_release(&sock->skb_pool);
+}
+
+#endif /* __RTNET_SOCKET_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb.h
new file mode 100644
index 0000000..66c4372
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb.h
@@ -0,0 +1,809 @@
+/***
+ *
+ *  include/rtskb.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>,
+ *                2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTSKB_H_
+#define __RTSKB_H_
+
+#ifdef __KERNEL__
+
+#include <linux/skbuff.h>
+
+#include <rtdm/net.h>
+#include <rtnet_internal.h>
+
+/***
+
+rtskb Management - A Short Introduction
+---------------------------------------
+
+1. rtskbs (Real-Time Socket Buffers)
+
+A rtskb consists of a management structure (struct rtskb) and a fixed-sized
+(RTSKB_SIZE) data buffer. It is used to store network packets on their way from
+the API routines through the stack to the NICs or vice versa. rtskbs are
+allocated as one chunk of memory which contains both the managment structure
+and the buffer memory itself.
+
+
+2. rtskb Queues
+
+A rtskb queue is described by struct rtskb_queue. A queue can contain an
+unlimited number of rtskbs in an ordered way. A rtskb can either be added to
+the head (rtskb_queue_head()) or the tail of a queue (rtskb_queue_tail()). When
+a rtskb is removed from a queue (rtskb_dequeue()), it is always taken from the
+head. Queues are normally spin lock protected unless the __variants of the
+queuing functions are used.
+
+
+3. Prioritized rtskb Queues
+
+A prioritized queue contains a number of normal rtskb queues within an array.
+The array index of a sub-queue correspond to the priority of the rtskbs within
+this queue. For enqueuing a rtskb (rtskb_prio_queue_head()), its priority field
+is evaluated and the rtskb is then placed into the appropriate sub-queue. When
+dequeuing a rtskb, the first rtskb of the first non-empty sub-queue with the
+highest priority is returned. The current implementation supports 32 different
+priority levels, the lowest if defined by QUEUE_MIN_PRIO, the highest by
+QUEUE_MAX_PRIO.
+
+
+4. rtskb Pools
+
+As rtskbs must not be allocated by a normal memory manager during runtime,
+preallocated rtskbs are kept ready in several pools. Most packet producers
+(NICs, sockets, etc.) have their own pools in order to be independent of the
+load situation of other parts of the stack.
+
+When a pool is created (rtskb_pool_init()), the required rtskbs are allocated
+from a Linux slab cache. Pools can be extended (rtskb_pool_extend()) or
+shrinked (rtskb_pool_shrink()) during runtime. When shutting down the
+program/module, every pool has to be released (rtskb_pool_release()). All these
+commands demand to be executed within a non real-time context.
+
+Pools are organized as normal rtskb queues (struct rtskb_queue). When a rtskb
+is allocated (alloc_rtskb()), it is actually dequeued from the pool's queue.
+When freeing a rtskb (kfree_rtskb()), the rtskb is enqueued to its owning pool.
+rtskbs can be exchanged between pools (rtskb_acquire()). In this case, the
+passed rtskb switches over to from its owning pool to a given pool, but only if
+this pool can pass an empty rtskb from its own queue back.
+
+
+5. rtskb Chains
+
+To ease the defragmentation of larger IP packets, several rtskbs can form a
+chain. For these purposes, the first rtskb (and only the first!) provides a
+pointer to the last rtskb in the chain. When enqueuing the first rtskb of a
+chain, the whole chain is automatically placed into the destined queue. But,
+to dequeue a complete chain specialized calls are required (postfix: _chain).
+While chains also get freed en bloc (kfree_rtskb()) when passing the first
+rtskbs, it is not possible to allocate a chain from a pool (alloc_rtskb()); a
+newly allocated rtskb is always reset to a "single rtskb chain". Furthermore,
+the acquisition of complete chains is NOT supported (rtskb_acquire()).
+
+
+6. Capturing Support (Optional)
+
+When incoming or outgoing packets are captured, the assigned rtskb needs to be
+shared between the stack, the driver, and the capturing service. In contrast to
+many other network stacks, RTnet does not create a new rtskb head and
+re-references the payload. Instead, additional fields at the end of the rtskb
+structure are use for sharing a rtskb with a capturing service. If the sharing
+bit (RTSKB_CAP_SHARED) in cap_flags is set, the rtskb will not be returned to
+the owning pool upon the call of kfree_rtskb. Instead this bit will be reset,
+and a compensation rtskb stored in cap_comp_skb will be returned to the owning
+pool. cap_start and cap_len can be used to mirror the dimension of the full
+packet. This is required because the data and len fields will be modified while
+walking through the stack. cap_next allows to add a rtskb to a separate queue
+which is independent of any queue described in 2.
+
+Certain setup tasks for capturing packets can not become part of a capturing
+module, they have to be embedded into the stack. For this purpose, several
+inline functions are provided. rtcap_mark_incoming() is used to save the packet
+dimension right before it is modifed by the stack. rtcap_report_incoming()
+calls the capturing handler, if present, in order to let it process the
+received rtskb (e.g. allocate compensation rtskb, mark original rtskb as
+shared, and enqueue it).
+
+Outgoing rtskb have to be captured by adding a hook function to the chain of
+hard_start_xmit functions of a device. To measure the delay caused by RTmac
+between the request and the actual transmission, a time stamp can be taken using
+rtcap_mark_rtmac_enqueue(). This function is typically called by RTmac
+disciplines when they add a rtskb to their internal transmission queue. In such
+a case, the RTSKB_CAP_RTMAC_STAMP bit is set in cap_flags to indicate that the
+cap_rtmac_stamp field now contains valid data.
+
+ ***/
+
+#ifndef CHECKSUM_PARTIAL
+#define CHECKSUM_PARTIAL CHECKSUM_HW
+#endif
+
+#define RTSKB_CAP_SHARED 1 /* rtskb shared between stack and RTcap */
+#define RTSKB_CAP_RTMAC_STAMP 2 /* cap_rtmac_stamp is valid             */
+
+#define RTSKB_UNMAPPED 0
+
+struct rtskb_queue;
+struct rtsocket;
+struct rtnet_device;
+
+/***
+ *  rtskb - realtime socket buffer
+ */
+struct rtskb {
+	struct rtskb *next; /* used for queuing rtskbs */
+	struct rtskb *chain_end; /* marks the end of a rtskb chain starting
+				       with this very rtskb */
+
+	struct rtskb_pool *pool; /* owning pool */
+
+	unsigned int priority; /* bit 0..15: prio, 16..31: user-defined */
+
+	struct rtsocket *sk; /* assigned socket */
+	struct rtnet_device *rtdev; /* source or destination device */
+
+	nanosecs_abs_t time_stamp; /* arrival or transmission (RTcap) time */
+
+	/* patch address of the transmission time stamp, can be NULL
+     * calculation: *xmit_stamp = cpu_to_be64(time_in_ns + *xmit_stamp)
+     */
+	nanosecs_abs_t *xmit_stamp;
+
+	/* transport layer */
+	union {
+		struct tcphdr *th;
+		struct udphdr *uh;
+		struct icmphdr *icmph;
+		struct iphdr *ipihdr;
+		unsigned char *raw;
+	} h;
+
+	/* network layer */
+	union {
+		struct iphdr *iph;
+		struct arphdr *arph;
+		unsigned char *raw;
+	} nh;
+
+	/* link layer */
+	union {
+		struct ethhdr *ethernet;
+		unsigned char *raw;
+	} mac;
+
+	unsigned short protocol;
+	unsigned char pkt_type;
+
+	unsigned char ip_summed;
+	unsigned int csum;
+
+	unsigned char *data;
+	unsigned char *tail;
+	unsigned char *end;
+	unsigned int len;
+
+	dma_addr_t buf_dma_addr;
+
+	unsigned char *buf_start;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+	unsigned char *buf_end;
+#endif
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	int cap_flags; /* see RTSKB_CAP_xxx                    */
+	struct rtskb *cap_comp_skb; /* compensation rtskb                */
+	struct rtskb *cap_next; /* used for capture queue               */
+	unsigned char *cap_start; /* start offset for capturing           */
+	unsigned int cap_len; /* capture length of this rtskb         */
+	nanosecs_abs_t cap_rtmac_stamp; /* RTmac enqueuing time            */
+#endif
+
+	struct list_head entry; /* for global rtskb list */
+};
+
+struct rtskb_queue {
+	struct rtskb *first;
+	struct rtskb *last;
+	rtdm_lock_t lock;
+};
+
+struct rtskb_pool_lock_ops {
+	int (*trylock)(void *cookie);
+	void (*unlock)(void *cookie);
+};
+
+struct rtskb_pool {
+	struct rtskb_queue queue;
+	const struct rtskb_pool_lock_ops *lock_ops;
+	void *lock_cookie;
+};
+
+#define QUEUE_MAX_PRIO 0
+#define QUEUE_MIN_PRIO 31
+
+struct rtskb_prio_queue {
+	rtdm_lock_t lock;
+	unsigned long usage; /* bit array encoding non-empty sub-queues */
+	struct rtskb_queue queue[QUEUE_MIN_PRIO + 1];
+};
+
+#define RTSKB_PRIO_MASK 0x0000FFFF /* bits  0..15: xmit prio    */
+#define RTSKB_CHANNEL_MASK 0xFFFF0000 /* bits 16..31: xmit channel */
+#define RTSKB_CHANNEL_SHIFT 16
+
+#define RTSKB_DEF_RT_CHANNEL SOCK_DEF_RT_CHANNEL
+#define RTSKB_DEF_NRT_CHANNEL SOCK_DEF_NRT_CHANNEL
+#define RTSKB_USER_CHANNEL SOCK_USER_CHANNEL
+
+/* Note: always keep SOCK_XMIT_PARAMS consistent with definitions above! */
+#define RTSKB_PRIO_VALUE SOCK_XMIT_PARAMS
+
+/* default values for the module parameter */
+#define DEFAULT_GLOBAL_RTSKBS 0 /* default number of rtskb's in global pool */
+#define DEFAULT_DEVICE_RTSKBS                                                  \
+	16 /* default additional rtskbs per network adapter */
+#define DEFAULT_SOCKET_RTSKBS 16 /* default number of rtskb's in socket pools */
+
+#define ALIGN_RTSKB_STRUCT_LEN SKB_DATA_ALIGN(sizeof(struct rtskb))
+#define RTSKB_SIZE                  (2048 + NET_IP_ALIGN)    /* maximum needed by igb */
+
+extern unsigned int rtskb_pools; /* current number of rtskb pools      */
+extern unsigned int rtskb_pools_max; /* maximum number of rtskb pools      */
+extern unsigned int rtskb_amount; /* current number of allocated rtskbs */
+extern unsigned int rtskb_amount_max; /* maximum number of allocated rtskbs */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+extern void rtskb_over_panic(struct rtskb *skb, int len, void *here);
+extern void rtskb_under_panic(struct rtskb *skb, int len, void *here);
+#endif
+
+extern struct rtskb *rtskb_pool_dequeue(struct rtskb_pool *pool);
+
+extern void rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb);
+
+extern struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_pool *pool);
+
+extern void kfree_rtskb(struct rtskb *skb);
+#define dev_kfree_rtskb(a) kfree_rtskb(a)
+
+static inline void rtskb_tx_timestamp(struct rtskb *skb)
+{
+	nanosecs_abs_t *ts = skb->xmit_stamp;
+
+	if (!ts)
+		return;
+
+	*ts = cpu_to_be64(rtdm_clock_read() + *ts);
+}
+
+/***
+ *  rtskb_queue_init - initialize the queue
+ *  @queue
+ */
+static inline void rtskb_queue_init(struct rtskb_queue *queue)
+{
+	rtdm_lock_init(&queue->lock);
+	queue->first = NULL;
+	queue->last = NULL;
+}
+
+/***
+ *  rtskb_prio_queue_init - initialize the prioritized queue
+ *  @prioqueue
+ */
+static inline void rtskb_prio_queue_init(struct rtskb_prio_queue *prioqueue)
+{
+	memset(prioqueue, 0, sizeof(struct rtskb_prio_queue));
+	rtdm_lock_init(&prioqueue->lock);
+}
+
+/***
+ *  rtskb_queue_empty
+ *  @queue
+ */
+static inline int rtskb_queue_empty(struct rtskb_queue *queue)
+{
+	return (queue->first == NULL);
+}
+
+/***
+ *  rtskb__prio_queue_empty
+ *  @queue
+ */
+static inline int rtskb_prio_queue_empty(struct rtskb_prio_queue *prioqueue)
+{
+	return (prioqueue->usage == 0);
+}
+
+/***
+ *  __rtskb_queue_head - insert a buffer at the queue head (w/o locks)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void __rtskb_queue_head(struct rtskb_queue *queue,
+				      struct rtskb *skb)
+{
+	struct rtskb *chain_end = skb->chain_end;
+
+	chain_end->next = queue->first;
+
+	if (queue->first == NULL)
+		queue->last = chain_end;
+	queue->first = skb;
+}
+
+/***
+ *  rtskb_queue_head - insert a buffer at the queue head (lock protected)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void rtskb_queue_head(struct rtskb_queue *queue,
+				    struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	__rtskb_queue_head(queue, skb);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+}
+
+/***
+ *  __rtskb_prio_queue_head - insert a buffer at the prioritized queue head
+ *                            (w/o locks)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void __rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
+					   struct rtskb *skb)
+{
+	unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
+
+	RTNET_ASSERT(prio <= 31, prio = 31;);
+
+	__rtskb_queue_head(&prioqueue->queue[prio], skb);
+	__set_bit(prio, &prioqueue->usage);
+}
+
+/***
+ *  rtskb_prio_queue_head - insert a buffer at the prioritized queue head
+ *                          (lock protected)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void rtskb_prio_queue_head(struct rtskb_prio_queue *prioqueue,
+					 struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&prioqueue->lock, context);
+	__rtskb_prio_queue_head(prioqueue, skb);
+	rtdm_lock_put_irqrestore(&prioqueue->lock, context);
+}
+
+/***
+ *  __rtskb_queue_tail - insert a buffer at the queue tail (w/o locks)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void __rtskb_queue_tail(struct rtskb_queue *queue,
+				      struct rtskb *skb)
+{
+	struct rtskb *chain_end = skb->chain_end;
+
+	chain_end->next = NULL;
+
+	if (queue->first == NULL)
+		queue->first = skb;
+	else
+		queue->last->next = skb;
+	queue->last = chain_end;
+}
+
+/***
+ *  rtskb_queue_tail - insert a buffer at the queue tail (lock protected)
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void rtskb_queue_tail(struct rtskb_queue *queue,
+				    struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	__rtskb_queue_tail(queue, skb);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+}
+
+/***
+ *  rtskb_queue_tail_check - variant of rtskb_queue_tail
+ *          returning true on empty->non empty transition.
+ *  @queue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline bool rtskb_queue_tail_check(struct rtskb_queue *queue,
+					  struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+	bool ret;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	ret = queue->first == NULL;
+	__rtskb_queue_tail(queue, skb);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+
+	return ret;
+}
+
+/***
+ *  __rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
+ *                            (w/o locks)
+ *  @prioqueue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void __rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
+					   struct rtskb *skb)
+{
+	unsigned int prio = skb->priority & RTSKB_PRIO_MASK;
+
+	RTNET_ASSERT(prio <= 31, prio = 31;);
+
+	__rtskb_queue_tail(&prioqueue->queue[prio], skb);
+	__set_bit(prio, &prioqueue->usage);
+}
+
+/***
+ *  rtskb_prio_queue_tail - insert a buffer at the prioritized queue tail
+ *                          (lock protected)
+ *  @prioqueue: queue to use
+ *  @skb: buffer to queue
+ */
+static inline void rtskb_prio_queue_tail(struct rtskb_prio_queue *prioqueue,
+					 struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&prioqueue->lock, context);
+	__rtskb_prio_queue_tail(prioqueue, skb);
+	rtdm_lock_put_irqrestore(&prioqueue->lock, context);
+}
+
+/***
+ *  __rtskb_dequeue - remove from the head of the queue (w/o locks)
+ *  @queue: queue to remove from
+ */
+static inline struct rtskb *__rtskb_dequeue(struct rtskb_queue *queue)
+{
+	struct rtskb *result;
+
+	if ((result = queue->first) != NULL) {
+		queue->first = result->next;
+		result->next = NULL;
+	}
+
+	return result;
+}
+
+/***
+ *  rtskb_dequeue - remove from the head of the queue (lock protected)
+ *  @queue: queue to remove from
+ */
+static inline struct rtskb *rtskb_dequeue(struct rtskb_queue *queue)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *result;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	result = __rtskb_dequeue(queue);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+
+	return result;
+}
+
+/***
+ *  __rtskb_prio_dequeue - remove from the head of the prioritized queue
+ *                         (w/o locks)
+ *  @prioqueue: queue to remove from
+ */
+static inline struct rtskb *
+__rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
+{
+	int prio;
+	struct rtskb *result = NULL;
+	struct rtskb_queue *sub_queue;
+
+	if (prioqueue->usage) {
+		prio = ffz(~prioqueue->usage);
+		sub_queue = &prioqueue->queue[prio];
+		result = __rtskb_dequeue(sub_queue);
+		if (rtskb_queue_empty(sub_queue))
+			__change_bit(prio, &prioqueue->usage);
+	}
+
+	return result;
+}
+
+/***
+ *  rtskb_prio_dequeue - remove from the head of the prioritized queue
+ *                       (lock protected)
+ *  @prioqueue: queue to remove from
+ */
+static inline struct rtskb *
+rtskb_prio_dequeue(struct rtskb_prio_queue *prioqueue)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *result;
+
+	rtdm_lock_get_irqsave(&prioqueue->lock, context);
+	result = __rtskb_prio_dequeue(prioqueue);
+	rtdm_lock_put_irqrestore(&prioqueue->lock, context);
+
+	return result;
+}
+
+/***
+ *  __rtskb_dequeue_chain - remove a chain from the head of the queue
+ *                          (w/o locks)
+ *  @queue: queue to remove from
+ */
+static inline struct rtskb *__rtskb_dequeue_chain(struct rtskb_queue *queue)
+{
+	struct rtskb *result;
+	struct rtskb *chain_end;
+
+	if ((result = queue->first) != NULL) {
+		chain_end = result->chain_end;
+		queue->first = chain_end->next;
+		chain_end->next = NULL;
+	}
+
+	return result;
+}
+
+/***
+ *  rtskb_dequeue_chain - remove a chain from the head of the queue
+ *                        (lock protected)
+ *  @queue: queue to remove from
+ */
+static inline struct rtskb *rtskb_dequeue_chain(struct rtskb_queue *queue)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *result;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	result = __rtskb_dequeue_chain(queue);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+
+	return result;
+}
+
+/***
+ *  rtskb_prio_dequeue_chain - remove a chain from the head of the
+ *                             prioritized queue
+ *  @prioqueue: queue to remove from
+ */
+static inline struct rtskb *
+rtskb_prio_dequeue_chain(struct rtskb_prio_queue *prioqueue)
+{
+	rtdm_lockctx_t context;
+	int prio;
+	struct rtskb *result = NULL;
+	struct rtskb_queue *sub_queue;
+
+	rtdm_lock_get_irqsave(&prioqueue->lock, context);
+	if (prioqueue->usage) {
+		prio = ffz(~prioqueue->usage);
+		sub_queue = &prioqueue->queue[prio];
+		result = __rtskb_dequeue_chain(sub_queue);
+		if (rtskb_queue_empty(sub_queue))
+			__change_bit(prio, &prioqueue->usage);
+	}
+	rtdm_lock_put_irqrestore(&prioqueue->lock, context);
+
+	return result;
+}
+
+/***
+ *  rtskb_queue_purge - clean the queue
+ *  @queue
+ */
+static inline void rtskb_queue_purge(struct rtskb_queue *queue)
+{
+	struct rtskb *skb;
+	while ((skb = rtskb_dequeue(queue)) != NULL)
+		kfree_rtskb(skb);
+}
+
+static inline int rtskb_headlen(const struct rtskb *skb)
+{
+	return skb->len;
+}
+
+static inline void rtskb_reserve(struct rtskb *skb, unsigned int len)
+{
+	skb->data += len;
+	skb->tail += len;
+}
+
+static inline unsigned char *__rtskb_put(struct rtskb *skb, unsigned int len)
+{
+	unsigned char *tmp = skb->tail;
+
+	skb->tail += len;
+	skb->len += len;
+	return tmp;
+}
+
+#define rtskb_put(skb, length)                                                 \
+	({                                                                     \
+		struct rtskb *__rtskb = (skb);                                 \
+		unsigned int __len = (length);                                 \
+		unsigned char *tmp = __rtskb->tail;                            \
+                                                                               \
+		__rtskb->tail += __len;                                        \
+		__rtskb->len += __len;                                         \
+                                                                               \
+		RTNET_ASSERT(__rtskb->tail <= __rtskb->buf_end,                \
+			     rtskb_over_panic(__rtskb, __len,                  \
+					      current_text_addr()););          \
+                                                                               \
+		tmp;                                                           \
+	})
+
+static inline unsigned char *__rtskb_push(struct rtskb *skb, unsigned int len)
+{
+	skb->data -= len;
+	skb->len += len;
+	return skb->data;
+}
+
+#define rtskb_push(skb, length)                                                \
+	({                                                                     \
+		struct rtskb *__rtskb = (skb);                                 \
+		unsigned int __len = (length);                                 \
+                                                                               \
+		__rtskb->data -= __len;                                        \
+		__rtskb->len += __len;                                         \
+                                                                               \
+		RTNET_ASSERT(__rtskb->data >= __rtskb->buf_start,              \
+			     rtskb_under_panic(__rtskb, __len,                 \
+					       current_text_addr()););         \
+                                                                               \
+		__rtskb->data;                                                 \
+	})
+
+static inline unsigned char *__rtskb_pull(struct rtskb *skb, unsigned int len)
+{
+	RTNET_ASSERT(len <= skb->len, return NULL;);
+
+	skb->len -= len;
+
+	return skb->data += len;
+}
+
+static inline unsigned char *rtskb_pull(struct rtskb *skb, unsigned int len)
+{
+	if (len > skb->len)
+		return NULL;
+
+	skb->len -= len;
+
+	return skb->data += len;
+}
+
+static inline void rtskb_trim(struct rtskb *skb, unsigned int len)
+{
+	if (skb->len > len) {
+		skb->len = len;
+		skb->tail = skb->data + len;
+	}
+}
+
+static inline struct rtskb *rtskb_padto(struct rtskb *rtskb, unsigned int len)
+{
+	RTNET_ASSERT(len <= (unsigned int)(rtskb->buf_end + 1 - rtskb->data),
+		     return NULL;);
+
+	memset(rtskb->data + rtskb->len, 0, len - rtskb->len);
+
+	return rtskb;
+}
+
+static inline dma_addr_t rtskb_data_dma_addr(struct rtskb *rtskb,
+					     unsigned int offset)
+{
+	return rtskb->buf_dma_addr + rtskb->data - rtskb->buf_start + offset;
+}
+
+extern struct rtskb_pool global_pool;
+
+extern unsigned int rtskb_pool_init(struct rtskb_pool *pool,
+				    unsigned int initial_size,
+				    const struct rtskb_pool_lock_ops *lock_ops,
+				    void *lock_cookie);
+
+extern unsigned int __rtskb_module_pool_init(struct rtskb_pool *pool,
+					     unsigned int initial_size,
+					     struct module *module);
+
+#define rtskb_module_pool_init(pool, size)                                     \
+	__rtskb_module_pool_init(pool, size, THIS_MODULE)
+
+extern void rtskb_pool_release(struct rtskb_pool *pool);
+
+extern unsigned int rtskb_pool_extend(struct rtskb_pool *pool,
+				      unsigned int add_rtskbs);
+extern unsigned int rtskb_pool_shrink(struct rtskb_pool *pool,
+				      unsigned int rem_rtskbs);
+extern int rtskb_acquire(struct rtskb *rtskb, struct rtskb_pool *comp_pool);
+extern struct rtskb *rtskb_clone(struct rtskb *rtskb, struct rtskb_pool *pool);
+
+extern int rtskb_pools_init(void);
+extern void rtskb_pools_release(void);
+
+extern unsigned int rtskb_copy_and_csum_bits(const struct rtskb *skb,
+					     int offset, u8 *to, int len,
+					     unsigned int csum);
+extern void rtskb_copy_and_csum_dev(const struct rtskb *skb, u8 *to);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+
+extern rtdm_lock_t rtcap_lock;
+extern void (*rtcap_handler)(struct rtskb *skb);
+
+static inline void rtcap_mark_incoming(struct rtskb *skb)
+{
+	skb->cap_start = skb->data;
+	skb->cap_len = skb->len;
+}
+
+static inline void rtcap_report_incoming(struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rtcap_lock, context);
+	if (rtcap_handler != NULL)
+		rtcap_handler(skb);
+
+	rtdm_lock_put_irqrestore(&rtcap_lock, context);
+}
+
+static inline void rtcap_mark_rtmac_enqueue(struct rtskb *skb)
+{
+	/* rtskb start and length are probably not valid yet */
+	skb->cap_flags |= RTSKB_CAP_RTMAC_STAMP;
+	skb->cap_rtmac_stamp = rtdm_clock_read();
+}
+
+#else /* ifndef CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
+
+#define rtcap_mark_incoming(skb)
+#define rtcap_report_incoming(skb)
+#define rtcap_mark_rtmac_enqueue(skb)
+
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
+
+#endif /* __KERNEL__ */
+
+#endif /* __RTSKB_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb_fifo.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb_fifo.h
new file mode 100644
index 0000000..9fe76bc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtskb_fifo.h
@@ -0,0 +1,144 @@
+/***
+ *
+ *  include/rtskb_fifo.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __RTSKB_FIFO_H_
+#define __RTSKB_FIFO_H_
+
+#include <rtskb.h>
+
+struct rtskb_fifo {
+	unsigned long read_pos ____cacheline_aligned_in_smp;
+	rtdm_lock_t read_lock;
+	unsigned long size_mask;
+	unsigned long write_pos ____cacheline_aligned_in_smp;
+	rtdm_lock_t write_lock;
+	struct rtskb *buffer[0];
+};
+
+#define DECLARE_RTSKB_FIFO(name_prefix, size)                                  \
+	struct {                                                               \
+		struct rtskb_fifo fifo;                                        \
+		struct rtskb *__buffer[(size)];                                \
+	} name_prefix
+
+static inline int __rtskb_fifo_insert(struct rtskb_fifo *fifo,
+				      struct rtskb *rtskb)
+{
+	unsigned long pos = fifo->write_pos;
+	unsigned long new_pos = (pos + 1) & fifo->size_mask;
+
+	if (unlikely(new_pos == fifo->read_pos))
+		return -EAGAIN;
+
+	fifo->buffer[pos] = rtskb;
+
+	/* rtskb must have been written before write_pos update */
+	smp_wmb();
+
+	fifo->write_pos = new_pos;
+
+	return 0;
+}
+
+static inline int rtskb_fifo_insert(struct rtskb_fifo *fifo,
+				    struct rtskb *rtskb)
+{
+	rtdm_lockctx_t context;
+	int result;
+
+	rtdm_lock_get_irqsave(&fifo->write_lock, context);
+	result = __rtskb_fifo_insert(fifo, rtskb);
+	rtdm_lock_put_irqrestore(&fifo->write_lock, context);
+
+	return result;
+}
+
+static inline int rtskb_fifo_insert_inirq(struct rtskb_fifo *fifo,
+					  struct rtskb *rtskb)
+{
+	int result;
+
+	rtdm_lock_get(&fifo->write_lock);
+	result = __rtskb_fifo_insert(fifo, rtskb);
+	rtdm_lock_put(&fifo->write_lock);
+
+	return result;
+}
+
+static inline struct rtskb *__rtskb_fifo_remove(struct rtskb_fifo *fifo)
+{
+	unsigned long pos = fifo->read_pos;
+	struct rtskb *result;
+
+	/* check FIFO status first */
+	if (unlikely(pos == fifo->write_pos))
+		return NULL;
+
+	/* at least one rtskb is enqueued, so get the next one */
+	result = fifo->buffer[pos];
+
+	/* result must have been read before read_pos update */
+	smp_rmb();
+
+	fifo->read_pos = (pos + 1) & fifo->size_mask;
+
+	/* read_pos must have been written for a consitent fifo state on exit */
+	smp_wmb();
+
+	return result;
+}
+
+static inline struct rtskb *rtskb_fifo_remove(struct rtskb_fifo *fifo)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *result;
+
+	rtdm_lock_get_irqsave(&fifo->read_lock, context);
+	result = __rtskb_fifo_remove(fifo);
+	rtdm_lock_put_irqrestore(&fifo->read_lock, context);
+
+	return result;
+}
+
+static inline struct rtskb *rtskb_fifo_remove_inirq(struct rtskb_fifo *fifo)
+{
+	struct rtskb *result;
+
+	rtdm_lock_get(&fifo->read_lock);
+	result = __rtskb_fifo_remove(fifo);
+	rtdm_lock_put(&fifo->read_lock);
+
+	return result;
+}
+
+/* for now inlined... */
+static inline void rtskb_fifo_init(struct rtskb_fifo *fifo, unsigned long size)
+{
+	fifo->read_pos = 0;
+	fifo->write_pos = 0;
+	fifo->size_mask = size - 1;
+	rtdm_lock_init(&fifo->read_lock);
+	rtdm_lock_init(&fifo->write_lock);
+}
+
+#endif /* __RTSKB_FIFO_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan.h
new file mode 100644
index 0000000..85d6a99
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan.h
@@ -0,0 +1,263 @@
+/* rtwlan.h
+ *
+ * This file is a rtnet adaption from ieee80211/ieee80211.h used by the
+ * rt2x00-2.0.0-b3 sourceforge project
+ *
+ * Merged with mainline ieee80211.h in Aug 2004.  Original ieee802_11
+ * remains copyright by the original authors
+ *
+ * Portions of the merged code are based on Host AP (software wireless
+ * LAN access point) driver for Intersil Prism2/2.5/3.
+ *
+ * Copyright (c) 2001-2002, SSH Communications Security Corp and Jouni Malinen
+ * <jkmaline@cc.hut.fi>
+ * Copyright (c) 2002-2003, Jouni Malinen <jkmaline@cc.hut.fi>
+ *
+ * Adaption to a generic IEEE 802.11 stack by James Ketrenos
+ * <jketreno@linux.intel.com>
+ * Copyright (c) 2004-2005, Intel Corporation
+ *
+ * Adaption to rtnet
+ * Copyright (c) 2006, Daniel Gregorek <dxg@gmx.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef RTWLAN_H
+#define RTWLAN_H
+
+#include <linux/if_ether.h> /* ETH_ALEN */
+#include <linux/kernel.h> /* ARRAY_SIZE */
+
+#include <rtskb.h>
+#include <rtwlan_io.h>
+
+#define IEEE80211_1ADDR_LEN 10
+#define IEEE80211_2ADDR_LEN 16
+#define IEEE80211_3ADDR_LEN 24
+#define IEEE80211_4ADDR_LEN 30
+#define IEEE80211_FCS_LEN 4
+#define IEEE80211_HLEN (IEEE80211_4ADDR_LEN)
+#define IEEE80211_FRAME_LEN (IEEE80211_DATA_LEN + IEEE80211_HLEN)
+
+#define MIN_FRAG_THRESHOLD 256U
+#define MAX_FRAG_THRESHOLD 2346U
+
+/* Frame control field constants */
+#define IEEE80211_FCTL_VERS 0x0003
+#define IEEE80211_FCTL_FTYPE 0x000c
+#define IEEE80211_FCTL_STYPE 0x00f0
+#define IEEE80211_FCTL_TODS 0x0100
+#define IEEE80211_FCTL_FROMDS 0x0200
+#define IEEE80211_FCTL_MOREFRAGS 0x0400
+#define IEEE80211_FCTL_RETRY 0x0800
+#define IEEE80211_FCTL_PM 0x1000
+#define IEEE80211_FCTL_MOREDATA 0x2000
+#define IEEE80211_FCTL_PROTECTED 0x4000
+#define IEEE80211_FCTL_ORDER 0x8000
+
+#define IEEE80211_FTYPE_MGMT 0x0000
+#define IEEE80211_FTYPE_CTL 0x0004
+#define IEEE80211_FTYPE_DATA 0x0008
+
+/* management */
+#define IEEE80211_STYPE_ASSOC_REQ 0x0000
+#define IEEE80211_STYPE_ASSOC_RESP 0x0010
+#define IEEE80211_STYPE_REASSOC_REQ 0x0020
+#define IEEE80211_STYPE_REASSOC_RESP 0x0030
+#define IEEE80211_STYPE_PROBE_REQ 0x0040
+#define IEEE80211_STYPE_PROBE_RESP 0x0050
+#define IEEE80211_STYPE_BEACON 0x0080
+#define IEEE80211_STYPE_ATIM 0x0090
+#define IEEE80211_STYPE_DISASSOC 0x00A0
+#define IEEE80211_STYPE_AUTH 0x00B0
+#define IEEE80211_STYPE_DEAUTH 0x00C0
+#define IEEE80211_STYPE_ACTION 0x00D0
+
+/* control */
+#define IEEE80211_STYPE_PSPOLL 0x00A0
+#define IEEE80211_STYPE_RTS 0x00B0
+#define IEEE80211_STYPE_CTS 0x00C0
+#define IEEE80211_STYPE_ACK 0x00D0
+#define IEEE80211_STYPE_CFEND 0x00E0
+#define IEEE80211_STYPE_CFENDACK 0x00F0
+
+/* data */
+#define IEEE80211_STYPE_DATA 0x0000
+#define IEEE80211_STYPE_DATA_CFACK 0x0010
+#define IEEE80211_STYPE_DATA_CFPOLL 0x0020
+#define IEEE80211_STYPE_DATA_CFACKPOLL 0x0030
+#define IEEE80211_STYPE_NULLFUNC 0x0040
+#define IEEE80211_STYPE_CFACK 0x0050
+#define IEEE80211_STYPE_CFPOLL 0x0060
+#define IEEE80211_STYPE_CFACKPOLL 0x0070
+#define IEEE80211_STYPE_QOS_DATA 0x0080
+
+#define RTWLAN_SCTL_SEQ 0xFFF0
+
+#define WLAN_FC_GET_VERS(fc) ((fc)&IEEE80211_FCTL_VERS)
+#define WLAN_FC_GET_TYPE(fc) ((fc)&IEEE80211_FCTL_FTYPE)
+#define WLAN_FC_GET_STYPE(fc) ((fc)&IEEE80211_FCTL_STYPE)
+
+#define IEEE80211_DSSS_RATE_1MB 0x02
+#define IEEE80211_DSSS_RATE_2MB 0x04
+#define IEEE80211_DSSS_RATE_5MB 0x0B
+#define IEEE80211_DSSS_RATE_11MB 0x16
+#define IEEE80211_OFDM_RATE_6MB 0x0C
+#define IEEE80211_OFDM_RATE_9MB 0x12
+#define IEEE80211_OFDM_RATE_12MB 0x18
+#define IEEE80211_OFDM_RATE_18MB 0x24
+#define IEEE80211_OFDM_RATE_24MB 0x30
+#define IEEE80211_OFDM_RATE_36MB 0x48
+#define IEEE80211_OFDM_RATE_48MB 0x60
+#define IEEE80211_OFDM_RATE_54MB 0x6C
+#define IEEE80211_BASIC_RATE_MASK 0x80
+
+#define MAC_FMT "%02x:%02x:%02x:%02x:%02x:%02x"
+#define MAC_ARG(x)                                                             \
+	((u8 *)(x))[0], ((u8 *)(x))[1], ((u8 *)(x))[2], ((u8 *)(x))[3],        \
+		((u8 *)(x))[4], ((u8 *)(x))[5]
+
+#ifdef CONFIG_RTWLAN_DEBUG
+#define RTWLAN_DEBUG_PRINTK(__message...)                                      \
+	do {                                                                   \
+		rtdm_printk(__message);                                        \
+	} while (0)
+#define RTWLAN_DEBUG(__message, __args...)                                     \
+	RTWLAN_DEBUG_PRINTK(KERN_DEBUG "rtwlan->%s: Debug - " __message,       \
+			    __FUNCTION__, ##__args);
+#else
+#define RTWLAN_DEBUG(__message...)                                             \
+	do {                                                                   \
+	} while (0)
+#endif
+
+struct rtwlan_stats {
+	unsigned long rx_packets; /* total packets received	*/
+	unsigned long tx_packets; /* total packets transmitted	*/
+	unsigned long tx_retry; /* total packets transmitted with retry */
+};
+
+struct rtwlan_device {
+	struct rtwlan_stats stats;
+
+	struct rtskb_pool skb_pool;
+
+	int mode;
+
+	int (*hard_start_xmit)(struct rtskb *rtskb,
+			       struct rtnet_device *rtnet_dev);
+
+	/* This must be the last item */
+	u8 priv[0];
+};
+
+/* Minimal header; can be used for passing 802.11 frames with sufficient
+ * information to determine what type of underlying data type is actually
+ * stored in the data. */
+struct ieee80211_hdr {
+	u16 frame_ctl;
+	u16 duration_id;
+	u8 payload[0];
+} __attribute__((packed));
+
+struct ieee80211_hdr_3addr {
+	u16 frame_ctl;
+	u16 duration_id;
+	u8 addr1[ETH_ALEN];
+	u8 addr2[ETH_ALEN];
+	u8 addr3[ETH_ALEN];
+	u16 seq_ctl;
+	u8 payload[0];
+} __attribute__((packed));
+
+static inline int ieee80211_get_hdrlen(u16 fc)
+{
+	int hdrlen = IEEE80211_3ADDR_LEN;
+	u16 stype = WLAN_FC_GET_STYPE(fc);
+
+	switch (WLAN_FC_GET_TYPE(fc)) {
+	case IEEE80211_FTYPE_DATA:
+		if ((fc & IEEE80211_FCTL_FROMDS) && (fc & IEEE80211_FCTL_TODS))
+			hdrlen = IEEE80211_4ADDR_LEN;
+		if (stype & IEEE80211_STYPE_QOS_DATA)
+			hdrlen += 2;
+		break;
+
+	case IEEE80211_FTYPE_CTL:
+		switch (WLAN_FC_GET_STYPE(fc)) {
+		case IEEE80211_STYPE_CTS:
+		case IEEE80211_STYPE_ACK:
+			hdrlen = IEEE80211_1ADDR_LEN;
+			break;
+
+		default:
+			hdrlen = IEEE80211_2ADDR_LEN;
+			break;
+		}
+		break;
+	}
+
+	return hdrlen;
+}
+
+static inline int ieee80211_is_ofdm_rate(u8 rate)
+{
+	switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
+	case IEEE80211_OFDM_RATE_6MB:
+	case IEEE80211_OFDM_RATE_9MB:
+	case IEEE80211_OFDM_RATE_12MB:
+	case IEEE80211_OFDM_RATE_18MB:
+	case IEEE80211_OFDM_RATE_24MB:
+	case IEEE80211_OFDM_RATE_36MB:
+	case IEEE80211_OFDM_RATE_48MB:
+	case IEEE80211_OFDM_RATE_54MB:
+		return 1;
+	}
+	return 0;
+}
+
+static inline int ieee80211_is_dsss_rate(u8 rate)
+{
+	switch (rate & ~IEEE80211_BASIC_RATE_MASK) {
+	case IEEE80211_DSSS_RATE_1MB:
+	case IEEE80211_DSSS_RATE_2MB:
+	case IEEE80211_DSSS_RATE_5MB:
+	case IEEE80211_DSSS_RATE_11MB:
+		return 1;
+	}
+	return 0;
+}
+
+static inline void *rtwlan_priv(struct rtwlan_device *rtwlan_dev)
+{
+	return (void *)rtwlan_dev + sizeof(struct rtwlan_device);
+}
+
+struct rtnet_device *rtwlan_alloc_dev(unsigned sizeof_priv,
+				      unsigned dev_pool_size);
+int rtwlan_rx(struct rtskb *rtskb, struct rtnet_device *rtnet_dev);
+int rtwlan_tx(struct rtskb *rtskb, struct rtnet_device *rtnet_dev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTWLAN
+int __init rtwlan_init(void);
+void rtwlan_exit(void);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTWLAN */
+#define rtwlan_init() 0
+#define rtwlan_exit()
+#endif /* CONFIG_XENO_DRIVERS_NET_RTWLAN */
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan_io.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan_io.h
new file mode 100644
index 0000000..e1952f5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/rtwlan_io.h
@@ -0,0 +1,104 @@
+/* rtwlan_io.h
+ *
+ * Copyright (C) 2006      Daniel Gregorek <dxg@gmx.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef RTWLAN_IO
+#define RTWLAN_IO
+
+#include <rtnet_chrdev.h>
+
+#define RTWLAN_TXMODE_RAW 0
+#define RTWLAN_TXMODE_ACK 1
+#define RTWLAN_TXMODE_MCAST 2
+
+#define ENORTWLANDEV 0xff08
+
+struct rtwlan_cmd {
+	struct rtnet_ioctl_head head;
+
+	union {
+		struct {
+			unsigned int bitrate;
+			unsigned int channel;
+			unsigned int retry;
+			unsigned int txpower;
+			unsigned int mode;
+			unsigned int autoresponder;
+			unsigned int dropbcast;
+			unsigned int dropmcast;
+			unsigned int bbpsens;
+		} set;
+
+		struct {
+			unsigned int address;
+			unsigned int value;
+		} reg;
+
+		struct {
+			int ifindex;
+			unsigned int flags;
+			unsigned int bitrate;
+			unsigned int channel;
+			unsigned int retry;
+			unsigned int txpower;
+			unsigned int bbpsens;
+			unsigned int mode;
+			unsigned int autoresponder;
+			unsigned int dropbcast;
+			unsigned int dropmcast;
+			unsigned int rx_packets;
+			unsigned int tx_packets;
+			unsigned int tx_retry;
+		} info;
+	} args;
+};
+
+#define RTNET_IOC_TYPE_RTWLAN 8
+
+#define IOC_RTWLAN_IFINFO                                                      \
+	_IOWR(RTNET_IOC_TYPE_RTWLAN, 0 | RTNET_IOC_NODEV_PARAM,                \
+	      struct rtwlan_cmd)
+
+#define IOC_RTWLAN_BITRATE _IOWR(RTNET_IOC_TYPE_RTWLAN, 1, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_CHANNEL _IOWR(RTNET_IOC_TYPE_RTWLAN, 2, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_TXPOWER _IOWR(RTNET_IOC_TYPE_RTWLAN, 3, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_RETRY _IOWR(RTNET_IOC_TYPE_RTWLAN, 4, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_TXMODE _IOWR(RTNET_IOC_TYPE_RTWLAN, 5, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_DROPBCAST _IOWR(RTNET_IOC_TYPE_RTWLAN, 6, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_DROPMCAST _IOWR(RTNET_IOC_TYPE_RTWLAN, 7, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_REGREAD _IOWR(RTNET_IOC_TYPE_RTWLAN, 8, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_REGWRITE _IOWR(RTNET_IOC_TYPE_RTWLAN, 9, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_BBPWRITE _IOWR(RTNET_IOC_TYPE_RTWLAN, 10, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_BBPREAD _IOWR(RTNET_IOC_TYPE_RTWLAN, 11, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_BBPSENS _IOWR(RTNET_IOC_TYPE_RTWLAN, 12, struct rtwlan_cmd)
+
+#define IOC_RTWLAN_AUTORESP _IOWR(RTNET_IOC_TYPE_RTWLAN, 13, struct rtwlan_cmd)
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/stack_mgr.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/stack_mgr.h
new file mode 100644
index 0000000..e547a3a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/stack_mgr.h
@@ -0,0 +1,95 @@
+/***
+ *
+ *  stack_mgr.h
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2002      Ulrich Marx <marx@fet.uni-hannover.de>
+ *                2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __STACK_MGR_H_
+#define __STACK_MGR_H_
+
+#ifdef __KERNEL__
+
+#include <linux/list.h>
+
+#include <rtnet_internal.h>
+#include <rtdev.h>
+
+/***
+ * network layer protocol (layer 3)
+ */
+
+#define RTPACKET_HASH_TBL_SIZE 64
+#define RTPACKET_HASH_KEY_MASK (RTPACKET_HASH_TBL_SIZE - 1)
+
+struct rtpacket_type {
+	struct list_head list_entry;
+
+	unsigned short type;
+	short refcount;
+
+	int (*handler)(struct rtskb *, struct rtpacket_type *);
+	int (*err_handler)(struct rtskb *, struct rtnet_device *,
+			   struct rtpacket_type *);
+	bool (*trylock)(struct rtpacket_type *);
+	void (*unlock)(struct rtpacket_type *);
+
+	struct module *owner;
+};
+
+int __rtdev_add_pack(struct rtpacket_type *pt, struct module *module);
+#define rtdev_add_pack(pt) __rtdev_add_pack(pt, THIS_MODULE)
+
+void rtdev_remove_pack(struct rtpacket_type *pt);
+
+static inline bool rtdev_lock_pack(struct rtpacket_type *pt)
+{
+	return try_module_get(pt->owner);
+}
+
+static inline void rtdev_unlock_pack(struct rtpacket_type *pt)
+{
+	module_put(pt->owner);
+}
+
+void rt_stack_connect(struct rtnet_device *rtdev, struct rtnet_mgr *mgr);
+void rt_stack_disconnect(struct rtnet_device *rtdev);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK)
+void rt_stack_deliver(struct rtskb *rtskb);
+#endif /* CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK */
+
+int rt_stack_mgr_init(struct rtnet_mgr *mgr);
+void rt_stack_mgr_delete(struct rtnet_mgr *mgr);
+
+void rtnetif_rx(struct rtskb *skb);
+
+static inline void rtnetif_tx(struct rtnet_device *rtdev)
+{
+}
+
+static inline void rt_mark_stack_mgr(struct rtnet_device *rtdev)
+{
+	rtdm_event_signal(rtdev->stack_event);
+}
+
+#endif /* __KERNEL__ */
+
+#endif /* __STACK_MGR_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/tdma_chrdev.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/tdma_chrdev.h
new file mode 100644
index 0000000..2f9fadc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/include/tdma_chrdev.h
@@ -0,0 +1,81 @@
+/***
+ *
+ *  include/tdma_chrdev.h
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TDMA_CHRDEV_H_
+#define __TDMA_CHRDEV_H_
+
+#ifndef __KERNEL__
+#include <inttypes.h>
+#endif
+
+#include <rtnet_chrdev.h>
+
+#define MIN_SLOT_SIZE 60
+
+struct tdma_config {
+	struct rtnet_ioctl_head head;
+
+	union {
+		struct {
+			__u64 cycle_period;
+			__u64 backup_sync_offset;
+			__u32 cal_rounds;
+			__u32 max_cal_requests;
+			__u32 max_slot_id;
+		} master;
+
+		struct {
+			__u32 cal_rounds;
+			__u32 max_slot_id;
+		} slave;
+
+		struct {
+			__s32 id;
+			__u32 period;
+			__u64 offset;
+			__u32 phasing;
+			__u32 size;
+			__s32 joint_slot;
+			__u32 cal_timeout;
+			__u64 *cal_results;
+		} set_slot;
+
+		struct {
+			__s32 id;
+		} remove_slot;
+
+		__u64 __padding[8];
+	} args;
+};
+
+#define TDMA_IOC_MASTER _IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 0, struct tdma_config)
+#define TDMA_IOC_SLAVE _IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 1, struct tdma_config)
+#define TDMA_IOC_CAL_RESULT_SIZE                                               \
+	_IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 2, struct tdma_config)
+#define TDMA_IOC_SET_SLOT _IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 3, struct tdma_config)
+#define TDMA_IOC_REMOVE_SLOT                                                   \
+	_IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 4, struct tdma_config)
+#define TDMA_IOC_DETACH _IOW(RTNET_IOC_TYPE_RTMAC_TDMA, 5, struct tdma_config)
+
+#endif /* __TDMA_CHRDEV_H_ */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/iovec.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/iovec.c
new file mode 100644
index 0000000..d82aed0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/iovec.c
@@ -0,0 +1,103 @@
+/***
+ *
+ *  stack/iovec.c
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 1999,2000 Zentropic Computing, LLC
+ *                2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <rtdm/driver.h>
+#include <rtnet_iovec.h>
+#include <rtnet_socket.h>
+
+ssize_t rtnet_write_to_iov(struct rtdm_fd *fd, struct iovec *iov, int iovlen,
+			   const void *data, size_t len)
+{
+	ssize_t ret = 0;
+	size_t nbytes;
+	int n;
+
+	for (n = 0; len > 0 && n < iovlen; n++, iov++) {
+		if (iov->iov_len == 0)
+			continue;
+
+		nbytes = iov->iov_len;
+		if (nbytes > len)
+			nbytes = len;
+
+		ret = rtnet_put_arg(fd, iov->iov_base, data, nbytes);
+		if (ret)
+			break;
+
+		len -= nbytes;
+		data += nbytes;
+		iov->iov_len -= nbytes;
+		iov->iov_base += nbytes;
+		ret += nbytes;
+		if (ret < 0) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtnet_write_to_iov);
+
+ssize_t rtnet_read_from_iov(struct rtdm_fd *fd, struct iovec *iov, int iovlen,
+			    void *data, size_t len)
+{
+	ssize_t ret = 0;
+	size_t nbytes;
+	int n;
+
+	for (n = 0; len > 0 && n < iovlen; n++, iov++) {
+		if (iov->iov_len == 0)
+			continue;
+
+		nbytes = iov->iov_len;
+		if (nbytes > len)
+			nbytes = len;
+
+		if (!rtdm_fd_is_user(fd))
+			memcpy(data, iov->iov_base, nbytes);
+		else {
+			ret = rtdm_copy_from_user(fd, data, iov->iov_base,
+						  nbytes);
+			if (ret)
+				break;
+		}
+
+		len -= nbytes;
+		data += nbytes;
+		iov->iov_len -= nbytes;
+		iov->iov_base += nbytes;
+		ret += nbytes;
+		if (ret < 0) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtnet_read_from_iov);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Kconfig
new file mode 100644
index 0000000..d5a6cd6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Kconfig
@@ -0,0 +1,75 @@
+config XENO_DRIVERS_NET_RTIPV4
+    depends on XENO_DRIVERS_NET
+    tristate "Real-Time IPv4"
+    default y
+    help
+    Enables the real-time capable IPv4 support of RTnet. The protocol is
+    implemented as a separate module. Supplementing tools (rtroute,
+    rtping) and examples are provided as well. Moreover, RTcfg will
+    include IPv4 support when this option is switched on.
+
+    For further information see also Documentation/README.routing and
+    Documentation/README.ipfragmentation.
+
+config XENO_DRIVERS_NET_RTIPV4_ICMP
+    bool "ICMP support"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    default y
+    help
+    Enables ICMP support of the RTnet Real-Time IPv4 protocol.
+
+    When the RTnet-Proxy is enabled while this feature is disabled, ICMP
+    will be forwarded to the Linux network stack.
+
+config XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES
+    int "Maximum host routing table entries"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    default 32
+    help
+    Each IPv4 supporting interface and each remote host that is directly
+    reachable via via some output interface requires a host routing table
+    entry. If you run larger networks with may hosts per subnet, you may
+    have to increase this limit. Must be power of 2!
+
+config XENO_DRIVERS_NET_RTIPV4_NETROUTING
+    bool "IP Network Routing"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    help
+    Enables routing across IPv4 real-time networks. You will only require
+    this feature in complex networks, while switching it off for flat,
+    single-segment networks improves code size and the worst-case routing
+    decision delay.
+
+    See Documentation/README.routing for further information.
+
+config XENO_DRIVERS_NET_RTIPV4_NET_ROUTES
+    int "Maximum network routing table entries"
+    depends on XENO_DRIVERS_NET_RTIPV4_NETROUTING
+    default 16
+    help
+    Each route describing a target network reachable via a router
+    requires an entry in the network routing table. If you run very
+    complex realtime networks, you may have to increase this limit. Must
+    be power of 2!
+
+config XENO_DRIVERS_NET_RTIPV4_ROUTER
+    bool "IP Router"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    help
+    When switched on, the RTnet station will be able to forward IPv4
+    packets that are not directed to the station itself. Typically used in
+    combination with CONFIG_RTNET_RTIPV4_NETROUTING.
+
+    See Documentation/README.routing for further information.
+
+config XENO_DRIVERS_NET_RTIPV4_DEBUG
+    bool "RTipv4 Debugging"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    default n
+    
+    help
+    Enables debug message output of the RTipv4 layer. Typically, you
+    may want to turn this on for tracing issues in packet delivery.
+
+source "drivers/xenomai/net/stack/ipv4/udp/Kconfig"
+source "drivers/xenomai/net/stack/ipv4/tcp/Kconfig"
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Makefile
new file mode 100644
index 0000000..afdbeaf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/Makefile
@@ -0,0 +1,19 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4) += rtipv4.o
+
+rtipv4-y := \
+	route.o \
+	protocol.o \
+	arp.o \
+	af_inet.o \
+	ip_input.o \
+	ip_sock.o \
+	ip_output.o \
+	ip_fragment.o
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_UDP) += udp/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP) += tcp/
+
+rtipv4-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP) += icmp.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/af_inet.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/af_inet.c
new file mode 100644
index 0000000..8bc87d9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/af_inet.c
@@ -0,0 +1,340 @@
+/***
+ *
+ *  ipv4/af_inet.c
+ *
+ *  rtnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include <ipv4_chrdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_rtpc.h>
+#include <ipv4/arp.h>
+#include <ipv4/icmp.h>
+#include <ipv4/ip_output.h>
+#include <ipv4/protocol.h>
+#include <ipv4/route.h>
+
+MODULE_LICENSE("GPL");
+
+struct route_solicit_params {
+	struct rtnet_device *rtdev;
+	__u32 ip_addr;
+};
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct xnvfile_directory ipv4_proc_root;
+EXPORT_SYMBOL_GPL(ipv4_proc_root);
+#endif
+
+static int route_solicit_handler(struct rt_proc_call *call)
+{
+	struct route_solicit_params *param;
+	struct rtnet_device *rtdev;
+
+	param = rtpc_get_priv(call, struct route_solicit_params);
+	rtdev = param->rtdev;
+
+	if ((rtdev->flags & IFF_UP) == 0)
+		return -ENODEV;
+
+	rt_arp_solicit(rtdev, param->ip_addr);
+
+	return 0;
+}
+
+static void cleanup_route_solicit(void *priv_data)
+{
+	struct route_solicit_params *param;
+
+	param = (struct route_solicit_params *)priv_data;
+	rtdev_dereference(param->rtdev);
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP
+static int ping_handler(struct rt_proc_call *call)
+{
+	struct ipv4_cmd *cmd;
+	int err;
+
+	cmd = rtpc_get_priv(call, struct ipv4_cmd);
+
+	rt_icmp_queue_echo_request(call);
+
+	err = rt_icmp_send_echo(cmd->args.ping.ip_addr, cmd->args.ping.id,
+				cmd->args.ping.sequence,
+				cmd->args.ping.msg_size);
+	if (err < 0) {
+		rt_icmp_dequeue_echo_request(call);
+		return err;
+	}
+
+	return -CALL_PENDING;
+}
+
+static void ping_complete_handler(struct rt_proc_call *call, void *priv_data)
+{
+	struct ipv4_cmd *cmd;
+	struct ipv4_cmd *usr_cmd = (struct ipv4_cmd *)priv_data;
+
+	if (rtpc_get_result(call) < 0)
+		return;
+
+	cmd = rtpc_get_priv(call, struct ipv4_cmd);
+	usr_cmd->args.ping.ip_addr = cmd->args.ping.ip_addr;
+	usr_cmd->args.ping.rtt = cmd->args.ping.rtt;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP */
+
+static int ipv4_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		      unsigned long arg)
+{
+	struct ipv4_cmd cmd;
+	struct route_solicit_params params;
+	int ret;
+
+	ret = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+	if (ret != 0)
+		return -EFAULT;
+
+	switch (request) {
+	case IOC_RT_HOST_ROUTE_ADD:
+		if (mutex_lock_interruptible(&rtdev->nrt_lock))
+			return -ERESTARTSYS;
+
+		ret = rt_ip_route_add_host(cmd.args.addhost.ip_addr,
+					   cmd.args.addhost.dev_addr, rtdev);
+
+		mutex_unlock(&rtdev->nrt_lock);
+		break;
+
+	case IOC_RT_HOST_ROUTE_SOLICIT:
+		if (mutex_lock_interruptible(&rtdev->nrt_lock))
+			return -ERESTARTSYS;
+
+		if (!rtdev_reference(rtdev)) {
+			mutex_unlock(&rtdev->nrt_lock);
+			return -EIDRM;
+		}
+
+		params.rtdev = rtdev;
+		params.ip_addr = cmd.args.solicit.ip_addr;
+
+		/* We need the rtpc wrapping because rt_arp_solicit can block on a
+	     * real-time lock in the NIC's xmit routine. */
+		ret = rtpc_dispatch_call(route_solicit_handler, 0, &params,
+					 sizeof(params), NULL,
+					 cleanup_route_solicit);
+
+		mutex_unlock(&rtdev->nrt_lock);
+		break;
+
+	case IOC_RT_HOST_ROUTE_DELETE:
+	case IOC_RT_HOST_ROUTE_DELETE_DEV:
+		ret = rt_ip_route_del_host(cmd.args.delhost.ip_addr, rtdev);
+		break;
+
+	case IOC_RT_HOST_ROUTE_GET:
+	case IOC_RT_HOST_ROUTE_GET_DEV:
+		ret = rt_ip_route_get_host(cmd.args.gethost.ip_addr,
+					   cmd.head.if_name,
+					   cmd.args.gethost.dev_addr, rtdev);
+		if (ret >= 0) {
+			if (copy_to_user((void *)arg, &cmd, sizeof(cmd)) != 0)
+				ret = -EFAULT;
+		}
+		break;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	case IOC_RT_NET_ROUTE_ADD:
+		ret = rt_ip_route_add_net(cmd.args.addnet.net_addr,
+					  cmd.args.addnet.net_mask,
+					  cmd.args.addnet.gw_addr);
+		break;
+
+	case IOC_RT_NET_ROUTE_DELETE:
+		ret = rt_ip_route_del_net(cmd.args.delnet.net_addr,
+					  cmd.args.delnet.net_mask);
+		break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP
+	case IOC_RT_PING:
+		ret = rtpc_dispatch_call(ping_handler, cmd.args.ping.timeout,
+					 &cmd, sizeof(cmd),
+					 ping_complete_handler, NULL);
+		if (ret >= 0) {
+			if (copy_to_user((void *)arg, &cmd, sizeof(cmd)) != 0)
+				ret = -EFAULT;
+		}
+		if (ret < 0)
+			rt_icmp_cleanup_echo_requests();
+		break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ICMP */
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+unsigned long rt_inet_aton(const char *ip)
+{
+	int p, n, c;
+	union {
+		unsigned long l;
+		char c[4];
+	} u;
+	p = n = 0;
+	while ((c = *ip++)) {
+		if (c != '.') {
+			n = n * 10 + c - '0';
+		} else {
+			if (n > 0xFF) {
+				return 0;
+			}
+			u.c[p++] = n;
+			n = 0;
+		}
+	}
+	u.c[3] = n;
+	return u.l;
+}
+
+static void rt_ip_ifup(struct rtnet_device *rtdev,
+		       struct rtnet_core_cmd *up_cmd)
+{
+	struct rtnet_device *tmp;
+	int i;
+
+	rt_ip_route_del_all(rtdev); /* cleanup routing table */
+
+	if (up_cmd->args.up.ip_addr != 0xFFFFFFFF) {
+		rtdev->local_ip = up_cmd->args.up.ip_addr;
+		rtdev->broadcast_ip = up_cmd->args.up.broadcast_ip;
+	}
+
+	if (rtdev->local_ip != 0) {
+		if (rtdev->flags & IFF_LOOPBACK) {
+			for (i = 0; i < MAX_RT_DEVICES; i++)
+				if ((tmp = rtdev_get_by_index(i)) != NULL) {
+					rt_ip_route_add_host(tmp->local_ip,
+							     rtdev->dev_addr,
+							     rtdev);
+					rtdev_dereference(tmp);
+				}
+		} else if ((tmp = rtdev_get_loopback()) != NULL) {
+			rt_ip_route_add_host(rtdev->local_ip, tmp->dev_addr,
+					     tmp);
+			rtdev_dereference(tmp);
+		}
+
+		if (rtdev->flags & IFF_BROADCAST)
+			rt_ip_route_add_host(up_cmd->args.up.broadcast_ip,
+					     rtdev->broadcast, rtdev);
+	}
+}
+
+static void rt_ip_ifdown(struct rtnet_device *rtdev)
+{
+	rt_ip_route_del_all(rtdev);
+}
+
+static struct rtdev_event_hook rtdev_hook = { .unregister_device = rt_ip_ifdown,
+					      .ifup = rt_ip_ifup,
+					      .ifdown = rt_ip_ifdown };
+
+static struct rtnet_ioctls ipv4_ioctls = { .service_name = "IPv4",
+					   .ioctl_type = RTNET_IOC_TYPE_IPV4,
+					   .handler = ipv4_ioctl };
+
+static int __init rt_ipv4_proto_init(void)
+{
+	int i;
+	int result;
+
+	/* Network-Layer */
+	rt_ip_init();
+	rt_arp_init();
+
+	/* Transport-Layer */
+	for (i = 0; i < MAX_RT_INET_PROTOCOLS; i++)
+		rt_inet_protocols[i] = NULL;
+
+	rt_icmp_init();
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	result = xnvfile_init_dir("ipv4", &ipv4_proc_root, &rtnet_proc_root);
+	if (result < 0)
+		goto err1;
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	if ((result = rt_ip_routing_init()) < 0)
+		goto err2;
+	if ((result = rtnet_register_ioctls(&ipv4_ioctls)) < 0)
+		goto err3;
+
+	rtdev_add_event_hook(&rtdev_hook);
+
+	return 0;
+
+err3:
+	rt_ip_routing_release();
+
+err2:
+#ifdef CONFIG_XENO_OPT_VFILE
+	xnvfile_destroy_dir(&ipv4_proc_root);
+err1:
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	rt_icmp_release();
+	rt_arp_release();
+	rt_ip_release();
+
+	return result;
+}
+
+static void __exit rt_ipv4_proto_release(void)
+{
+	rt_ip_release();
+
+	rtdev_del_event_hook(&rtdev_hook);
+	rtnet_unregister_ioctls(&ipv4_ioctls);
+	rt_ip_routing_release();
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	xnvfile_destroy_dir(&ipv4_proc_root);
+#endif
+
+	/* Transport-Layer */
+	rt_icmp_release();
+
+	/* Network-Layer */
+	rt_arp_release();
+}
+
+module_init(rt_ipv4_proto_init);
+module_exit(rt_ipv4_proto_release);
+
+EXPORT_SYMBOL_GPL(rt_inet_aton);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/arp.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/arp.c
new file mode 100644
index 0000000..91c151e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/arp.c
@@ -0,0 +1,212 @@
+/***
+ *
+ *  ipv4/arp.h - Adress Resolution Protocol for RTnet
+ *
+ *  Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <rtdev.h>
+#include <stack_mgr.h>
+#include <ipv4/arp.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+#include <ipv4/ip_input.h>
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+
+/***
+ *  arp_send:   Create and send an arp packet. If (dest_hw == NULL),
+ *              we create a broadcast message.
+ */
+void rt_arp_send(int type, int ptype, u32 dest_ip, struct rtnet_device *rtdev,
+		 u32 src_ip, unsigned char *dest_hw, unsigned char *src_hw,
+		 unsigned char *target_hw)
+{
+	struct rtskb *skb;
+	struct arphdr *arp;
+	unsigned char *arp_ptr;
+
+	if (rtdev->flags & IFF_NOARP)
+		return;
+
+	if (!(skb = alloc_rtskb(sizeof(struct arphdr) +
+					2 * (rtdev->addr_len + 4) +
+					rtdev->hard_header_len + 15,
+				&global_pool)))
+		return;
+
+	rtskb_reserve(skb, (rtdev->hard_header_len + 15) & ~15);
+
+	skb->nh.raw = skb->data;
+	arp = (struct arphdr *)rtskb_put(
+		skb, sizeof(struct arphdr) + 2 * (rtdev->addr_len + 4));
+
+	skb->rtdev = rtdev;
+	skb->protocol = __constant_htons(ETH_P_ARP);
+	skb->priority = RT_ARP_SKB_PRIO;
+	if (src_hw == NULL)
+		src_hw = rtdev->dev_addr;
+	if (dest_hw == NULL)
+		dest_hw = rtdev->broadcast;
+
+	/*
+     *  Fill the device header for the ARP frame
+     */
+	if (rtdev->hard_header &&
+	    (rtdev->hard_header(skb, rtdev, ptype, dest_hw, src_hw, skb->len) <
+	     0))
+		goto out;
+
+	arp->ar_hrd = htons(rtdev->type);
+	arp->ar_pro = __constant_htons(ETH_P_IP);
+	arp->ar_hln = rtdev->addr_len;
+	arp->ar_pln = 4;
+	arp->ar_op = htons(type);
+
+	arp_ptr = (unsigned char *)(arp + 1);
+
+	memcpy(arp_ptr, src_hw, rtdev->addr_len);
+	arp_ptr += rtdev->addr_len;
+
+	memcpy(arp_ptr, &src_ip, 4);
+	arp_ptr += 4;
+
+	if (target_hw != NULL)
+		memcpy(arp_ptr, target_hw, rtdev->addr_len);
+	else
+		memset(arp_ptr, 0, rtdev->addr_len);
+	arp_ptr += rtdev->addr_len;
+
+	memcpy(arp_ptr, &dest_ip, 4);
+
+	/* send the frame */
+	rtdev_xmit(skb);
+
+	return;
+
+out:
+	kfree_rtskb(skb);
+}
+
+/***
+ *  arp_rcv:    Receive an arp request by the device layer.
+ */
+int rt_arp_rcv(struct rtskb *skb, struct rtpacket_type *pt)
+{
+	struct rtnet_device *rtdev = skb->rtdev;
+	struct arphdr *arp = skb->nh.arph;
+	unsigned char *arp_ptr = (unsigned char *)(arp + 1);
+	unsigned char *sha;
+	u32 sip, tip;
+	u16 dev_type = rtdev->type;
+
+	/*
+     *  The hardware length of the packet should match the hardware length
+     *  of the device.  Similarly, the hardware types should match.  The
+     *  device should be ARP-able.  Also, if pln is not 4, then the lookup
+     *  is not from an IP number.  We can't currently handle this, so toss
+     *  it.
+     */
+	if ((arp->ar_hln != rtdev->addr_len) || (rtdev->flags & IFF_NOARP) ||
+	    (skb->pkt_type == PACKET_OTHERHOST) ||
+	    (skb->pkt_type == PACKET_LOOPBACK) || (arp->ar_pln != 4))
+		goto out;
+
+	switch (dev_type) {
+	default:
+		if ((arp->ar_pro != __constant_htons(ETH_P_IP)) &&
+		    (htons(dev_type) != arp->ar_hrd))
+			goto out;
+		break;
+	case ARPHRD_ETHER:
+		/*
+	     * ETHERNET devices will accept ARP hardware types of either
+	     * 1 (Ethernet) or 6 (IEEE 802.2).
+	     */
+		if ((arp->ar_hrd != __constant_htons(ARPHRD_ETHER)) &&
+		    (arp->ar_hrd != __constant_htons(ARPHRD_IEEE802))) {
+			goto out;
+		}
+		if (arp->ar_pro != __constant_htons(ETH_P_IP)) {
+			goto out;
+		}
+		break;
+	}
+
+	/* Understand only these message types */
+	if ((arp->ar_op != __constant_htons(ARPOP_REPLY)) &&
+	    (arp->ar_op != __constant_htons(ARPOP_REQUEST)))
+		goto out;
+
+	/*
+     *  Extract fields
+     */
+	sha = arp_ptr;
+	arp_ptr += rtdev->addr_len;
+	memcpy(&sip, arp_ptr, 4);
+
+	arp_ptr += 4;
+	arp_ptr += rtdev->addr_len;
+	memcpy(&tip, arp_ptr, 4);
+
+	/* process only requests/replies directed to us */
+	if (tip == rtdev->local_ip) {
+		rt_ip_route_add_host(sip, sha, rtdev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+		if (!rt_ip_fallback_handler)
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+			if (arp->ar_op == __constant_htons(ARPOP_REQUEST)) {
+				rt_arp_send(ARPOP_REPLY, ETH_P_ARP, sip, rtdev,
+					    tip, sha, rtdev->dev_addr, sha);
+				goto out1;
+			}
+	}
+
+out:
+#ifdef CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP
+	if (rt_ip_fallback_handler) {
+		rt_ip_fallback_handler(skb);
+		return 0;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY_ARP */
+out1:
+	kfree_rtskb(skb);
+	return 0;
+}
+
+static struct rtpacket_type arp_packet_type = {
+	type: __constant_htons(ETH_P_ARP),
+	handler: &rt_arp_rcv
+};
+
+/***
+ *  rt_arp_init
+ */
+void __init rt_arp_init(void)
+{
+	rtdev_add_pack(&arp_packet_type);
+}
+
+/***
+ *  rt_arp_release
+ */
+void rt_arp_release(void)
+{
+	rtdev_remove_pack(&arp_packet_type);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/icmp.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/icmp.c
new file mode 100644
index 0000000..8485614
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/icmp.c
@@ -0,0 +1,510 @@
+/***
+ *
+ *  ipv4/icmp.c
+ *
+ *  rtnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2002       Vinay Sridhara <vinaysridhara@yahoo.com>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/types.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/icmp.h>
+#include <net/checksum.h>
+
+#include <rtskb.h>
+#include <rtnet_socket.h>
+#include <rtnet_checksum.h>
+#include <ipv4_chrdev.h>
+#include <ipv4/icmp.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/ip_output.h>
+#include <ipv4/protocol.h>
+#include <ipv4/route.h>
+
+/***
+ * Structure for sending the icmp packets
+ */
+struct icmp_bxm {
+	unsigned int csum;
+	size_t head_len;
+	size_t data_len;
+	off_t offset;
+	struct {
+		struct icmphdr icmph;
+		nanosecs_abs_t timestamp;
+	} head;
+	union {
+		struct rtskb *skb;
+		void *buf;
+	} data;
+};
+
+struct rt_icmp_control {
+	void (*handler)(struct rtskb *skb);
+	short error; /* This ICMP is classed as an error message */
+};
+
+static DEFINE_RTDM_LOCK(echo_calls_lock);
+LIST_HEAD(echo_calls);
+
+static struct {
+	/*
+     * Scratch pad, provided so that rt_socket_dereference(&icmp_socket);
+     * remains legal.
+     */
+	struct rtdm_dev_context dummy;
+
+	/*
+     *  Socket for icmp replies
+     *  It is not part of the socket pool. It may furthermore be used
+     *  concurrently by multiple tasks because all fields are static excect
+     *  skb_pool, but that one is spinlock protected.
+     */
+	struct rtsocket socket;
+} icmp_socket_container;
+
+#define icmp_fd (&icmp_socket_container.dummy.fd)
+#define icmp_socket ((struct rtsocket *)rtdm_fd_to_private(icmp_fd))
+
+void rt_icmp_queue_echo_request(struct rt_proc_call *call)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&echo_calls_lock, context);
+	list_add_tail(&call->list_entry, &echo_calls);
+	rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+}
+
+void rt_icmp_dequeue_echo_request(struct rt_proc_call *call)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&echo_calls_lock, context);
+	list_del(&call->list_entry);
+	rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+}
+
+void rt_icmp_cleanup_echo_requests(void)
+{
+	rtdm_lockctx_t context;
+	struct list_head *entry;
+	struct list_head *next;
+
+	rtdm_lock_get_irqsave(&echo_calls_lock, context);
+	entry = echo_calls.next;
+	INIT_LIST_HEAD(&echo_calls);
+	rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+
+	while (entry != &echo_calls) {
+		next = entry->next;
+		rtpc_complete_call_nrt((struct rt_proc_call *)entry, -EINTR);
+		entry = next;
+	}
+
+	/* purge any pending ICMP fragments */
+	rt_ip_frag_invalidate_socket(icmp_socket);
+}
+
+/***
+ *  rt_icmp_discard - dummy function
+ */
+static void rt_icmp_discard(struct rtskb *skb)
+{
+}
+
+static int rt_icmp_glue_reply_bits(const void *p, unsigned char *to,
+				   unsigned int offset, unsigned int fraglen)
+{
+	struct icmp_bxm *icmp_param = (struct icmp_bxm *)p;
+	struct icmphdr *icmph;
+	unsigned long csum;
+
+	/* TODO: add support for fragmented ICMP packets */
+	if (offset != 0)
+		return -EMSGSIZE;
+
+	csum = rtnet_csum_copy((void *)&icmp_param->head, to,
+			       icmp_param->head_len,
+			       icmp_param->csum);
+
+	csum = rtskb_copy_and_csum_bits(icmp_param->data.skb,
+					icmp_param->offset,
+					to + icmp_param->head_len,
+					fraglen - icmp_param->head_len, csum);
+
+	icmph = (struct icmphdr *)to;
+
+	icmph->checksum = csum_fold(csum);
+
+	return 0;
+}
+
+/***
+ *  common reply function
+ */
+static void rt_icmp_send_reply(struct icmp_bxm *icmp_param, struct rtskb *skb)
+{
+	struct dest_route rt;
+	int err;
+
+	icmp_param->head.icmph.checksum = 0;
+	icmp_param->csum = 0;
+
+	/* route back to the source address via the incoming device */
+	if (rt_ip_route_output(&rt, skb->nh.iph->saddr, skb->rtdev->local_ip) !=
+	    0)
+		return;
+
+	rt_socket_reference(icmp_socket);
+	err = rt_ip_build_xmit(icmp_socket, rt_icmp_glue_reply_bits, icmp_param,
+			       sizeof(struct icmphdr) + icmp_param->data_len,
+			       &rt, MSG_DONTWAIT);
+	if (err)
+		rt_socket_dereference(icmp_socket);
+
+	rtdev_dereference(rt.rtdev);
+
+	RTNET_ASSERT(err == 0,
+		     rtdm_printk("RTnet: %s() error in xmit\n", __FUNCTION__););
+	(void)err;
+}
+
+/***
+ *  rt_icmp_echo - handles echo replies on our previously sent requests
+ */
+static void rt_icmp_echo_reply(struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+	struct rt_proc_call *call;
+	struct ipv4_cmd *cmd;
+
+	rtdm_lock_get_irqsave(&echo_calls_lock, context);
+
+	if (!list_empty(&echo_calls)) {
+		call = (struct rt_proc_call *)echo_calls.next;
+		list_del(&call->list_entry);
+
+		rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+	} else {
+		rtdm_lock_put_irqrestore(&echo_calls_lock, context);
+		return;
+	}
+
+	cmd = rtpc_get_priv(call, struct ipv4_cmd);
+
+	cmd->args.ping.ip_addr = skb->nh.iph->saddr;
+	cmd->args.ping.rtt = 0;
+
+	if ((skb->h.icmph->un.echo.id == cmd->args.ping.id) &&
+	    (ntohs(skb->h.icmph->un.echo.sequence) ==
+	     cmd->args.ping.sequence) &&
+	    skb->len == cmd->args.ping.msg_size) {
+		if (skb->len >= sizeof(nanosecs_abs_t))
+			cmd->args.ping.rtt = rtdm_clock_read() -
+					     *((nanosecs_abs_t *)skb->data);
+		rtpc_complete_call(call, sizeof(struct icmphdr) + skb->len);
+	} else
+		rtpc_complete_call(call, 0);
+}
+
+/***
+ *  rt_icmp_echo_request - handles echo requests sent by other stations
+ */
+static void rt_icmp_echo_request(struct rtskb *skb)
+{
+	struct icmp_bxm icmp_param;
+
+	icmp_param.head.icmph = *skb->h.icmph;
+	icmp_param.head.icmph.type = ICMP_ECHOREPLY;
+	icmp_param.data.skb = skb;
+	icmp_param.offset = 0;
+	icmp_param.data_len = skb->len;
+	icmp_param.head_len = sizeof(struct icmphdr);
+
+	rt_icmp_send_reply(&icmp_param, skb);
+
+	return;
+}
+
+static int rt_icmp_glue_request_bits(const void *p, unsigned char *to,
+				     unsigned int offset, unsigned int fraglen)
+{
+	struct icmp_bxm *icmp_param = (struct icmp_bxm *)p;
+	struct icmphdr *icmph;
+	unsigned long csum;
+
+	/* TODO: add support for fragmented ICMP packets */
+	RTNET_ASSERT(
+		offset == 0,
+		rtdm_printk("RTnet: %s() does not support fragmentation.\n",
+			    __FUNCTION__);
+		return -1;);
+
+	csum = rtnet_csum_copy((void *)&icmp_param->head, to,
+			       icmp_param->head_len,
+			       icmp_param->csum);
+
+	csum = rtnet_csum_copy(icmp_param->data.buf,
+			       to + icmp_param->head_len,
+			       fraglen - icmp_param->head_len, csum);
+
+	icmph = (struct icmphdr *)to;
+
+	icmph->checksum = csum_fold(csum);
+
+	return 0;
+}
+
+/***
+ *  common request function
+ */
+static int rt_icmp_send_request(u32 daddr, struct icmp_bxm *icmp_param)
+{
+	struct dest_route rt;
+	unsigned int size;
+	int err;
+
+	icmp_param->head.icmph.checksum = 0;
+	icmp_param->csum = 0;
+
+	if ((err = rt_ip_route_output(&rt, daddr, INADDR_ANY)) < 0)
+		return err;
+
+	/* TODO: add support for fragmented ICMP packets */
+	size = icmp_param->head_len + icmp_param->data_len;
+	if (size + 20 /* ip header */ >
+	    rt.rtdev->get_mtu(rt.rtdev, RT_ICMP_PRIO))
+		err = -EMSGSIZE;
+	else {
+		rt_socket_reference(icmp_socket);
+		err = rt_ip_build_xmit(icmp_socket, rt_icmp_glue_request_bits,
+				       icmp_param, size, &rt, MSG_DONTWAIT);
+		if (err)
+			rt_socket_dereference(icmp_socket);
+	}
+
+	rtdev_dereference(rt.rtdev);
+
+	return err;
+}
+
+/***
+ *  rt_icmp_send_echo - sends an echo request to the specified address
+ */
+int rt_icmp_send_echo(u32 daddr, u16 id, u16 sequence, size_t msg_size)
+{
+	struct icmp_bxm icmp_param;
+	unsigned char *pattern_buf;
+	off_t pos;
+	int ret;
+
+	/*
+	 * This is just setup of a ping message, exec time is not critical, so
+	 * rtdm_malloc() is ok here.
+	 */
+	pattern_buf = rtdm_malloc(msg_size);
+	if (pattern_buf == NULL)
+		return -ENOMEM;
+
+	/* first purge any potentially pending ICMP fragments */
+	rt_ip_frag_invalidate_socket(icmp_socket);
+
+	icmp_param.head.icmph.type = ICMP_ECHO;
+	icmp_param.head.icmph.code = 0;
+	icmp_param.head.icmph.un.echo.id = id;
+	icmp_param.head.icmph.un.echo.sequence = htons(sequence);
+	icmp_param.offset = 0;
+
+	if (msg_size >= sizeof(nanosecs_abs_t)) {
+		icmp_param.head_len =
+			sizeof(struct icmphdr) + sizeof(nanosecs_abs_t);
+		icmp_param.data_len = msg_size - sizeof(nanosecs_abs_t);
+
+		for (pos = 0; pos < icmp_param.data_len; pos++)
+			pattern_buf[pos] = pos & 0xFF;
+
+		icmp_param.head.timestamp = rtdm_clock_read();
+	} else {
+		icmp_param.head_len = sizeof(struct icmphdr) + msg_size;
+		icmp_param.data_len = 0;
+
+		for (pos = 0; pos < msg_size; pos++)
+			pattern_buf[pos] = pos & 0xFF;
+	}
+	icmp_param.data.buf = pattern_buf;
+
+	ret = rt_icmp_send_request(daddr, &icmp_param);
+	rtdm_free(pattern_buf);
+
+	return ret;
+}
+
+/***
+ *  rt_icmp_socket
+ */
+int rt_icmp_socket(struct rtdm_fd *fd)
+{
+	/* we don't support user-created ICMP sockets */
+	return -ENOPROTOOPT;
+}
+
+static struct rt_icmp_control rt_icmp_pointers[NR_ICMP_TYPES + 1] = {
+	/* ECHO REPLY (0) */
+	{ rt_icmp_echo_reply, 0 },
+	{ rt_icmp_discard, 1 },
+	{ rt_icmp_discard, 1 },
+
+	/* DEST UNREACH (3) */
+	{ rt_icmp_discard, 1 },
+
+	/* SOURCE QUENCH (4) */
+	{ rt_icmp_discard, 1 },
+
+	/* REDIRECT (5) */
+	{ rt_icmp_discard, 1 },
+	{ rt_icmp_discard, 1 },
+	{ rt_icmp_discard, 1 },
+
+	/* ECHO (8) */
+	{ rt_icmp_echo_request, 0 },
+	{ rt_icmp_discard, 1 },
+	{ rt_icmp_discard, 1 },
+
+	/* TIME EXCEEDED (11) */
+	{ rt_icmp_discard, 1 },
+
+	/* PARAMETER PROBLEM (12) */
+	{ rt_icmp_discard, 1 },
+
+	/* TIMESTAMP (13) */
+	{ rt_icmp_discard, 0 },
+
+	/* TIMESTAMP REPLY (14) */
+	{ rt_icmp_discard, 0 },
+
+	/* INFO (15) */
+	{ rt_icmp_discard, 0 },
+
+	/* INFO REPLY (16) */
+	{ rt_icmp_discard, 0 },
+
+	/* ADDR MASK (17) */
+	{ rt_icmp_discard, 0 },
+
+	/* ADDR MASK REPLY (18) */
+	{ rt_icmp_discard, 0 }
+};
+
+/***
+ *  rt_icmp_dest_pool
+ */
+struct rtsocket *rt_icmp_dest_socket(struct rtskb *skb)
+{
+	rt_socket_reference(icmp_socket);
+	return icmp_socket;
+}
+
+/***
+ *  rt_icmp_rcv
+ */
+void rt_icmp_rcv(struct rtskb *skb)
+{
+	struct icmphdr *icmpHdr = skb->h.icmph;
+	unsigned int length = skb->len;
+
+	/* check header sanity and don't accept fragmented packets */
+	if ((length < sizeof(struct icmphdr)) || (skb->next != NULL)) {
+		rtdm_printk("RTnet: improper length in icmp packet\n");
+		goto cleanup;
+	}
+
+	if (ip_compute_csum((unsigned char *)icmpHdr, length)) {
+		rtdm_printk("RTnet: invalid checksum in icmp packet %d\n",
+			    length);
+		goto cleanup;
+	}
+
+	if (!rtskb_pull(skb, sizeof(struct icmphdr))) {
+		rtdm_printk("RTnet: pull failed %p\n", (skb->sk));
+		goto cleanup;
+	}
+
+	if (icmpHdr->type > NR_ICMP_TYPES) {
+		rtdm_printk("RTnet: invalid icmp type\n");
+		goto cleanup;
+	}
+
+	/* sane packet, process it */
+	rt_icmp_pointers[icmpHdr->type].handler(skb);
+
+cleanup:
+	kfree_rtskb(skb);
+}
+
+/***
+ *  rt_icmp_rcv_err
+ */
+void rt_icmp_rcv_err(struct rtskb *skb)
+{
+	rtdm_printk("RTnet: rt_icmp_rcv err\n");
+}
+
+/***
+ *  ICMP-Initialisation
+ */
+static struct rtinet_protocol icmp_protocol = { .protocol = IPPROTO_ICMP,
+						.dest_socket =
+							&rt_icmp_dest_socket,
+						.rcv_handler = &rt_icmp_rcv,
+						.err_handler = &rt_icmp_rcv_err,
+						.init_socket =
+							&rt_icmp_socket };
+
+/***
+ *  rt_icmp_init
+ */
+void __init rt_icmp_init(void)
+{
+	int skbs;
+
+	skbs = rt_bare_socket_init(icmp_fd, IPPROTO_ICMP, RT_ICMP_PRIO,
+				   ICMP_REPLY_POOL_SIZE);
+	BUG_ON(skbs < 0);
+	if (skbs < ICMP_REPLY_POOL_SIZE)
+		printk("RTnet: allocated only %d icmp rtskbs\n", skbs);
+
+	icmp_socket->prot.inet.tos = 0;
+	icmp_fd->refs = 1;
+
+	rt_inet_add_protocol(&icmp_protocol);
+}
+
+/***
+ *  rt_icmp_release
+ */
+void rt_icmp_release(void)
+{
+	rt_icmp_cleanup_echo_requests();
+	rt_inet_del_protocol(&icmp_protocol);
+	rt_bare_socket_cleanup(icmp_socket);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_fragment.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_fragment.c
new file mode 100644
index 0000000..113a712
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_fragment.c
@@ -0,0 +1,327 @@
+/* ip_fragment.c
+ *
+ * Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *               2003      Mathias Koehrer <mathias_koehrer@yahoo.de>
+ *               2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_socket.h>
+
+#include <linux/ip.h>
+#include <linux/in.h>
+
+#include <ipv4/ip_fragment.h>
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+#include <ipv4/ip_input.h>
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY */
+
+/*
+ * This defined sets the number of incoming fragmented IP messages that
+ * can be handled in parallel.
+ */
+#define COLLECTOR_COUNT 10
+
+struct ip_collector {
+	int in_use;
+	__u32 saddr;
+	__u32 daddr;
+	__u16 id;
+	__u8 protocol;
+
+	struct rtskb_queue frags;
+	struct rtsocket *sock;
+	unsigned int buf_size;
+};
+
+static struct ip_collector collector[COLLECTOR_COUNT];
+
+static void alloc_collector(struct rtskb *skb, struct rtsocket *sock)
+{
+	int i;
+	rtdm_lockctx_t context;
+	struct ip_collector *p_coll;
+	struct iphdr *iph = skb->nh.iph;
+
+	/*
+     * Find a free collector
+     *
+     * Note: We once used to clean up probably outdated chains, but the
+     * algorithm was not stable enough and could cause incorrect drops even
+     * under medium load. If we run in overload, we will loose data anyhow.
+     * What we should do in the future is to account collectors per socket or
+     * socket owner and set quotations.
+     * Garbage collection is now performed only on socket close.
+     */
+	for (i = 0; i < COLLECTOR_COUNT; i++) {
+		p_coll = &collector[i];
+		rtdm_lock_get_irqsave(&p_coll->frags.lock, context);
+
+		if (!p_coll->in_use) {
+			p_coll->in_use = 1;
+			p_coll->buf_size = skb->len;
+			p_coll->frags.first = skb;
+			p_coll->frags.last = skb;
+			p_coll->saddr = iph->saddr;
+			p_coll->daddr = iph->daddr;
+			p_coll->id = iph->id;
+			p_coll->protocol = iph->protocol;
+			p_coll->sock = sock;
+
+			rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+
+			return;
+		}
+
+		rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+	}
+
+	rtdm_printk("RTnet: IP fragmentation - no collector available\n");
+	kfree_rtskb(skb);
+}
+
+/*
+ * Return a pointer to the collector that holds the message which
+ * fits to the iphdr of the passed rtskb.
+ * */
+static struct rtskb *add_to_collector(struct rtskb *skb, unsigned int offset,
+				      int more_frags)
+{
+	int i, err;
+	rtdm_lockctx_t context;
+	struct ip_collector *p_coll;
+	struct iphdr *iph = skb->nh.iph;
+	struct rtskb *first_skb;
+
+	/* Search in existing collectors */
+	for (i = 0; i < COLLECTOR_COUNT; i++) {
+		p_coll = &collector[i];
+		rtdm_lock_get_irqsave(&p_coll->frags.lock, context);
+
+		if (p_coll->in_use && (iph->saddr == p_coll->saddr) &&
+		    (iph->daddr == p_coll->daddr) && (iph->id == p_coll->id) &&
+		    (iph->protocol == p_coll->protocol)) {
+			first_skb = p_coll->frags.first;
+
+			/* Acquire the rtskb at the expense of the protocol pool */
+			if (rtskb_acquire(skb, &p_coll->sock->skb_pool) != 0) {
+				/* We have to drop this fragment => clean up the whole chain */
+				p_coll->in_use = 0;
+
+				rtdm_lock_put_irqrestore(&p_coll->frags.lock,
+							 context);
+
+#ifdef FRAG_DBG
+				rtdm_printk(
+					"RTnet: Compensation pool empty - IP fragments "
+					"dropped (saddr:%x, daddr:%x)\n",
+					iph->saddr, iph->daddr);
+#endif
+
+				kfree_rtskb(first_skb);
+				kfree_rtskb(skb);
+				return NULL;
+			}
+
+			/* Optimized version of __rtskb_queue_tail */
+			skb->next = NULL;
+			p_coll->frags.last->next = skb;
+			p_coll->frags.last = skb;
+
+			/* Extend the chain */
+			first_skb->chain_end = skb;
+
+			/* Sanity check: unordered fragments are not allowed! */
+			if (offset != p_coll->buf_size) {
+				/* We have to drop this fragment => clean up the whole chain */
+				p_coll->in_use = 0;
+				skb = first_skb;
+
+				rtdm_lock_put_irqrestore(&p_coll->frags.lock,
+							 context);
+				break; /* leave the for loop */
+			}
+
+			p_coll->buf_size += skb->len;
+
+			if (!more_frags) {
+				p_coll->in_use = 0;
+
+				err = rt_socket_reference(p_coll->sock);
+
+				rtdm_lock_put_irqrestore(&p_coll->frags.lock,
+							 context);
+
+				if (err < 0) {
+					kfree_rtskb(first_skb);
+					return NULL;
+				}
+
+				return first_skb;
+			} else {
+				rtdm_lock_put_irqrestore(&p_coll->frags.lock,
+							 context);
+				return NULL;
+			}
+		}
+
+		rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+	}
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+	if (rt_ip_fallback_handler) {
+		__rtskb_push(skb, iph->ihl * 4);
+		rt_ip_fallback_handler(skb);
+		return NULL;
+	}
+#endif
+
+#ifdef FRAG_DBG
+	rtdm_printk("RTnet: Unordered IP fragment (saddr:%x, daddr:%x)"
+		    " - dropped\n",
+		    iph->saddr, iph->daddr);
+#endif
+
+	kfree_rtskb(skb);
+	return NULL;
+}
+
+/*
+ * Cleans up all collectors referring to the specified socket.
+ * This is now the only kind of garbage collection we do.
+ */
+void rt_ip_frag_invalidate_socket(struct rtsocket *sock)
+{
+	int i;
+	rtdm_lockctx_t context;
+	struct ip_collector *p_coll;
+
+	for (i = 0; i < COLLECTOR_COUNT; i++) {
+		p_coll = &collector[i];
+		rtdm_lock_get_irqsave(&p_coll->frags.lock, context);
+
+		if ((p_coll->in_use) && (p_coll->sock == sock)) {
+			p_coll->in_use = 0;
+			kfree_rtskb(p_coll->frags.first);
+		}
+
+		rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+	}
+}
+EXPORT_SYMBOL_GPL(rt_ip_frag_invalidate_socket);
+
+/*
+ * Cleans up all existing collectors
+ */
+static void cleanup_all_collectors(void)
+{
+	int i;
+	rtdm_lockctx_t context;
+	struct ip_collector *p_coll;
+
+	for (i = 0; i < COLLECTOR_COUNT; i++) {
+		p_coll = &collector[i];
+		rtdm_lock_get_irqsave(&p_coll->frags.lock, context);
+
+		if (p_coll->in_use) {
+			p_coll->in_use = 0;
+			kfree_rtskb(p_coll->frags.first);
+		}
+
+		rtdm_lock_put_irqrestore(&p_coll->frags.lock, context);
+	}
+}
+
+/*
+ * This function returns an rtskb that contains the complete, accumulated IP message.
+ * If not all fragments of the IP message have been received yet, it returns NULL
+ * Note: the IP header must have already been pulled from the rtskb!
+ * */
+struct rtskb *rt_ip_defrag(struct rtskb *skb, struct rtinet_protocol *ipprot)
+{
+	unsigned int more_frags;
+	unsigned int offset;
+	struct rtsocket *sock;
+	struct iphdr *iph = skb->nh.iph;
+	int ret;
+
+	/* Parse the IP header */
+	offset = ntohs(iph->frag_off);
+	more_frags = offset & IP_MF;
+	offset &= IP_OFFSET;
+	offset <<= 3; /* offset is in 8-byte chunks */
+
+	/* First fragment? */
+	if (offset == 0) {
+		/* Get the destination socket */
+		if ((sock = ipprot->dest_socket(skb)) == NULL) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+			if (rt_ip_fallback_handler) {
+				__rtskb_push(skb, iph->ihl * 4);
+				rt_ip_fallback_handler(skb);
+				return NULL;
+			}
+#endif
+			/* Drop the rtskb */
+			kfree_rtskb(skb);
+			return NULL;
+		}
+
+		/* Acquire the rtskb, to unlock the device skb pool */
+		ret = rtskb_acquire(skb, &sock->skb_pool);
+
+		if (ret != 0) {
+			/* Drop the rtskb */
+			kfree_rtskb(skb);
+		} else {
+			/* Allocates a new collector */
+			alloc_collector(skb, sock);
+		}
+
+		/* Packet is queued or freed, socket can be released */
+		rt_socket_dereference(sock);
+
+		return NULL;
+	} else {
+		/* Add to an existing collector */
+		return add_to_collector(skb, offset, more_frags);
+	}
+}
+
+int __init rt_ip_fragment_init(void)
+{
+	int i;
+
+	/* Probably not needed (static variable...) */
+	memset(collector, 0, sizeof(collector));
+
+	for (i = 0; i < COLLECTOR_COUNT; i++)
+		rtdm_lock_init(&collector[i].frags.lock);
+
+	return 0;
+}
+
+void rt_ip_fragment_cleanup(void)
+{
+	cleanup_all_collectors();
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_input.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_input.c
new file mode 100644
index 0000000..4495b8d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_input.c
@@ -0,0 +1,159 @@
+/***
+ *
+ *  ipv4/ip_input.c - process incoming IP packets
+ *
+ *  Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <rtskb.h>
+#include <rtnet_socket.h>
+#include <stack_mgr.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/protocol.h>
+#include <ipv4/route.h>
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+#include <ipv4/ip_input.h>
+
+rt_ip_fallback_handler_t rt_ip_fallback_handler = NULL;
+EXPORT_SYMBOL_GPL(rt_ip_fallback_handler);
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY */
+
+/***
+ *  rt_ip_local_deliver
+ */
+static inline void rt_ip_local_deliver(struct rtskb *skb)
+{
+	struct iphdr *iph = skb->nh.iph;
+	unsigned short protocol = iph->protocol;
+	struct rtinet_protocol *ipprot;
+	struct rtsocket *sock;
+	int err;
+
+	ipprot = rt_inet_protocols[rt_inet_hashkey(protocol)];
+
+	/* Check if we are supporting the protocol */
+	if ((ipprot != NULL) && (ipprot->protocol == protocol)) {
+		__rtskb_pull(skb, iph->ihl * 4);
+
+		/* Point into the IP datagram, just past the header. */
+		skb->h.raw = skb->data;
+
+		/* Reassemble IP fragments */
+		if (iph->frag_off & htons(IP_MF | IP_OFFSET)) {
+			skb = rt_ip_defrag(skb, ipprot);
+			if (!skb)
+				return;
+
+			sock = skb->sk;
+		} else {
+			/* Get the destination socket */
+			if ((sock = ipprot->dest_socket(skb)) == NULL) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+				if (rt_ip_fallback_handler) {
+					__rtskb_push(skb, iph->ihl * 4);
+					rt_ip_fallback_handler(skb);
+					return;
+				}
+#endif
+				kfree_rtskb(skb);
+				return;
+			}
+
+			/* Acquire the rtskb, to unlock the device skb pool */
+			err = rtskb_acquire(skb, &sock->skb_pool);
+
+			if (err) {
+				kfree_rtskb(skb);
+				rt_socket_dereference(sock);
+				return;
+			}
+		}
+
+		/* Deliver the packet to the next layer */
+		ipprot->rcv_handler(skb);
+
+		/* Packet is queued, socket can be released */
+		rt_socket_dereference(sock);
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+	} else if (rt_ip_fallback_handler) {
+		/* If a fallback handler for IP protocol has been installed,
+         * call it. */
+		rt_ip_fallback_handler(skb);
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY */
+	} else {
+		if (IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4_DEBUG))
+			rtdm_printk("RTnet: no protocol found\n");
+		kfree_rtskb(skb);
+	}
+}
+
+/***
+ *  rt_ip_rcv
+ */
+int rt_ip_rcv(struct rtskb *skb, struct rtpacket_type *pt)
+{
+	struct iphdr *iph;
+	__u32 len;
+
+	/* When the interface is in promisc. mode, drop all the crap
+     * that it receives, do not try to analyse it.
+     */
+	if (skb->pkt_type == PACKET_OTHERHOST)
+		goto drop;
+
+	iph = skb->nh.iph;
+
+	/*
+     *  RFC1122: 3.1.2.2 MUST silently discard any IP frame that fails the checksum.
+     *
+     *  Is the datagram acceptable?
+     *
+     *  1.  Length at least the size of an ip header
+     *  2.  Version of 4
+     *  3.  Checksums correctly. [Speed optimisation for later, skip loopback checksums]
+     *  4.  Doesn't have a bogus length
+     */
+	if (iph->ihl < 5 || iph->version != 4)
+		goto drop;
+
+	if (ip_fast_csum((u8 *)iph, iph->ihl) != 0)
+		goto drop;
+
+	len = ntohs(iph->tot_len);
+	if ((skb->len < len) || (len < ((__u32)iph->ihl << 2)))
+		goto drop;
+
+	rtskb_trim(skb, len);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER
+	if (rt_ip_route_forward(skb, iph->daddr))
+		return 0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER */
+
+	rt_ip_local_deliver(skb);
+	return 0;
+
+drop:
+	kfree_rtskb(skb);
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_output.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_output.c
new file mode 100644
index 0000000..664d0c7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_output.c
@@ -0,0 +1,267 @@
+/***
+ *
+ *  ipv4/ip_output.c - prepare outgoing IP packets
+ *
+ *  Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/ip.h>
+#include <net/checksum.h>
+#include <net/ip.h>
+
+#include <rtnet_socket.h>
+#include <stack_mgr.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/ip_input.h>
+#include <ipv4/route.h>
+
+static DEFINE_RTDM_LOCK(rt_ip_id_lock);
+static u16 rt_ip_id_count = 0;
+
+/***
+ *  Slow path for fragmented packets
+ */
+int rt_ip_build_xmit_slow(struct rtsocket *sk,
+			  int getfrag(const void *, char *, unsigned int,
+				      unsigned int),
+			  const void *frag, unsigned length,
+			  struct dest_route *rt, int msg_flags,
+			  unsigned int mtu, unsigned int prio)
+{
+	int err, next_err;
+	struct rtskb *skb;
+	struct rtskb *next_skb;
+	struct iphdr *iph;
+	struct rtnet_device *rtdev = rt->rtdev;
+	unsigned int fragdatalen;
+	unsigned int offset = 0;
+	u16 msg_rt_ip_id;
+	rtdm_lockctx_t context;
+	unsigned int rtskb_size;
+	int hh_len = (rtdev->hard_header_len + 15) & ~15;
+
+#define FRAGHEADERLEN sizeof(struct iphdr)
+
+	fragdatalen = ((mtu - FRAGHEADERLEN) & ~7);
+
+	/* Store id in local variable */
+	rtdm_lock_get_irqsave(&rt_ip_id_lock, context);
+	msg_rt_ip_id = rt_ip_id_count++;
+	rtdm_lock_put_irqrestore(&rt_ip_id_lock, context);
+
+	rtskb_size = mtu + hh_len + 15;
+
+	/* TODO: delay previous skb until ALL errors are catched which may occure
+	     during next skb setup */
+
+	/* Preallocate first rtskb */
+	skb = alloc_rtskb(rtskb_size, &sk->skb_pool);
+	if (skb == NULL)
+		return -ENOBUFS;
+
+	for (offset = 0; offset < length; offset += fragdatalen) {
+		int fraglen; /* The length (IP, including ip-header) of this
+			very fragment */
+		__u16 frag_off = offset >> 3;
+
+		next_err = 0;
+		if (offset >= length - fragdatalen) {
+			/* last fragment */
+			fraglen = FRAGHEADERLEN + length - offset;
+			next_skb = NULL;
+		} else {
+			fraglen = FRAGHEADERLEN + fragdatalen;
+			frag_off |= IP_MF;
+
+			next_skb = alloc_rtskb(rtskb_size, &sk->skb_pool);
+			if (next_skb == NULL) {
+				frag_off &= ~IP_MF; /* cut the chain */
+				next_err = -ENOBUFS;
+			}
+		}
+
+		rtskb_reserve(skb, hh_len);
+
+		skb->rtdev = rtdev;
+		skb->nh.iph = iph = (struct iphdr *)rtskb_put(skb, fraglen);
+		skb->priority = prio;
+
+		iph->version = 4;
+		iph->ihl = 5; /* 20 byte header - no options */
+		iph->tos = sk->prot.inet.tos;
+		iph->tot_len = htons(fraglen);
+		iph->id = htons(msg_rt_ip_id);
+		iph->frag_off = htons(frag_off);
+		iph->ttl = 255;
+		iph->protocol = sk->protocol;
+		iph->saddr = rtdev->local_ip;
+		iph->daddr = rt->ip;
+		iph->check = 0; /* required! */
+		iph->check = ip_fast_csum((unsigned char *)iph, 5 /*iph->ihl*/);
+
+		if ((err = getfrag(frag, ((char *)iph) + 5 /*iph->ihl*/ * 4,
+				   offset, fraglen - FRAGHEADERLEN)))
+			goto error;
+
+		if (rtdev->hard_header) {
+			err = rtdev->hard_header(skb, rtdev, ETH_P_IP,
+						 rt->dev_addr, rtdev->dev_addr,
+						 skb->len);
+			if (err < 0)
+				goto error;
+		}
+
+		err = rtdev_xmit(skb);
+
+		skb = next_skb;
+
+		if (err != 0) {
+			err = -EAGAIN;
+			goto error;
+		}
+
+		if (next_err != 0)
+			return next_err;
+	}
+	return 0;
+
+error:
+	if (skb != NULL) {
+		kfree_rtskb(skb);
+
+		if (next_skb != NULL)
+			kfree_rtskb(next_skb);
+	}
+	return err;
+}
+
+/***
+ *  Fast path for unfragmented packets.
+ */
+int rt_ip_build_xmit(struct rtsocket *sk,
+		     int getfrag(const void *, char *, unsigned int,
+				 unsigned int),
+		     const void *frag, unsigned length, struct dest_route *rt,
+		     int msg_flags)
+{
+	int err = 0;
+	struct rtskb *skb;
+	struct iphdr *iph;
+	int hh_len;
+	u16 msg_rt_ip_id;
+	rtdm_lockctx_t context;
+	struct rtnet_device *rtdev = rt->rtdev;
+	unsigned int prio;
+	unsigned int mtu;
+
+	/* sk->priority may encode both priority and output channel. Make sure
+       we use a consitent value, also for the MTU which is derived from the
+       channel. */
+	prio = (volatile unsigned int)sk->priority;
+	mtu = rtdev->get_mtu(rtdev, prio);
+
+	/*
+     *  Try the simple case first. This leaves fragmented frames, and by choice
+     *  RAW frames within 20 bytes of maximum size(rare) to the long path
+     */
+	length += sizeof(struct iphdr);
+
+	if (length > mtu)
+		return rt_ip_build_xmit_slow(sk, getfrag, frag,
+					     length - sizeof(struct iphdr), rt,
+					     msg_flags, mtu, prio);
+
+	/* Store id in local variable */
+	rtdm_lock_get_irqsave(&rt_ip_id_lock, context);
+	msg_rt_ip_id = rt_ip_id_count++;
+	rtdm_lock_put_irqrestore(&rt_ip_id_lock, context);
+
+	hh_len = (rtdev->hard_header_len + 15) & ~15;
+
+	skb = alloc_rtskb(length + hh_len + 15, &sk->skb_pool);
+	if (skb == NULL)
+		return -ENOBUFS;
+
+	rtskb_reserve(skb, hh_len);
+
+	skb->rtdev = rtdev;
+	skb->nh.iph = iph = (struct iphdr *)rtskb_put(skb, length);
+	skb->priority = prio;
+
+	iph->version = 4;
+	iph->ihl = 5;
+	iph->tos = sk->prot.inet.tos;
+	iph->tot_len = htons(length);
+	iph->id = htons(msg_rt_ip_id);
+	iph->frag_off = htons(IP_DF);
+	iph->ttl = 255;
+	iph->protocol = sk->protocol;
+	iph->saddr = rtdev->local_ip;
+	iph->daddr = rt->ip;
+	iph->check = 0; /* required! */
+	iph->check = ip_fast_csum((unsigned char *)iph, 5 /*iph->ihl*/);
+
+	if ((err = getfrag(frag, ((char *)iph) + 5 /*iph->ihl*/ * 4, 0,
+			   length - 5 /*iph->ihl*/ * 4)))
+		goto error;
+
+	if (rtdev->hard_header) {
+		err = rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->dev_addr,
+					 rtdev->dev_addr, skb->len);
+		if (err < 0)
+			goto error;
+	}
+
+	err = rtdev_xmit(skb);
+
+	if (err)
+		return -EAGAIN;
+	else
+		return 0;
+
+error:
+	kfree_rtskb(skb);
+	return err;
+}
+EXPORT_SYMBOL_GPL(rt_ip_build_xmit);
+
+/***
+ *  IP protocol layer initialiser
+ */
+static struct rtpacket_type ip_packet_type = { .type = __constant_htons(
+						       ETH_P_IP),
+					       .handler = &rt_ip_rcv };
+
+/***
+ *  ip_init
+ */
+void __init rt_ip_init(void)
+{
+	rtdev_add_pack(&ip_packet_type);
+	rt_ip_fragment_init();
+}
+
+/***
+ *  ip_release
+ */
+void rt_ip_release(void)
+{
+	rtdev_remove_pack(&ip_packet_type);
+	rt_ip_fragment_cleanup();
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_sock.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_sock.c
new file mode 100644
index 0000000..8ca6aeb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/ip_sock.c
@@ -0,0 +1,194 @@
+/***
+ *
+ *  ipv4/ip_sock.c
+ *
+ *  Copyright (C) 2003       Hans-Peter Bock <hpbock@avaapgh.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *                2019       Sebastian Smolorz <sebastian.smolorz@gmx.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/errno.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+
+#include <rtnet_socket.h>
+
+int rt_ip_setsockopt(struct rtdm_fd *fd, struct rtsocket *s, int level,
+		     int optname, const void __user *optval, socklen_t optlen)
+{
+	int err = 0;
+	unsigned int _tos, *tos;
+
+	if (level != SOL_IP)
+		return -ENOPROTOOPT;
+
+	if (optlen < sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (optname) {
+	case IP_TOS:
+		tos = rtnet_get_arg(fd, &_tos, optval, sizeof(_tos));
+		if (IS_ERR(tos))
+			return PTR_ERR(tos);
+		else
+			s->prot.inet.tos = *tos;
+		break;
+
+	default:
+		err = -ENOPROTOOPT;
+		break;
+	}
+
+	return err;
+}
+
+int rt_ip_getsockopt(struct rtdm_fd *fd, struct rtsocket *s, int level,
+		     int optname, void __user *optval, socklen_t __user *optlen)
+{
+	int err = 0;
+	unsigned int tos;
+	socklen_t _len, *len;
+
+	len = rtnet_get_arg(fd, &_len, optlen, sizeof(_len));
+	if (IS_ERR(len))
+		return PTR_ERR(len);
+
+	if (*len < sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (optname) {
+	case IP_TOS:
+		tos = s->prot.inet.tos;
+		err = rtnet_put_arg(fd, optval, &tos, sizeof(tos));
+		if (!err) {
+			*len = sizeof(unsigned int);
+			err = rtnet_put_arg(fd, optlen, len, sizeof(socklen_t));
+		}
+		break;
+
+	default:
+		err = -ENOPROTOOPT;
+		break;
+	}
+
+	return err;
+}
+
+int rt_ip_getsockname(struct rtdm_fd *fd, struct rtsocket *s,
+		      struct sockaddr __user *addr, socklen_t __user *addrlen)
+{
+	struct sockaddr_in _sin;
+	socklen_t *len, _len;
+	int ret;
+
+	len = rtnet_get_arg(fd, &_len, addrlen, sizeof(_len));
+	if (IS_ERR(len))
+		return PTR_ERR(len);
+
+	if (*len < sizeof(struct sockaddr_in))
+		return -EINVAL;
+
+	_sin.sin_family = AF_INET;
+	_sin.sin_addr.s_addr = s->prot.inet.saddr;
+	_sin.sin_port = s->prot.inet.sport;
+	memset(&_sin.sin_zero, 0, sizeof(_sin.sin_zero));
+	ret = rtnet_put_arg(fd, addr, &_sin, sizeof(_sin));
+	if (ret)
+		return ret;
+
+	*len = sizeof(struct sockaddr_in);
+	ret = rtnet_put_arg(fd, addrlen, len, sizeof(socklen_t));
+
+	return ret;
+}
+
+int rt_ip_getpeername(struct rtdm_fd *fd, struct rtsocket *s,
+		      struct sockaddr __user *addr, socklen_t __user *addrlen)
+{
+	struct sockaddr_in _sin;
+	socklen_t *len, _len;
+	int ret;
+
+	len = rtnet_get_arg(fd, &_len, addrlen, sizeof(_len));
+	if (IS_ERR(len))
+		return PTR_ERR(len);
+
+	if (*len < sizeof(struct sockaddr_in))
+		return -EINVAL;
+
+	_sin.sin_family = AF_INET;
+	_sin.sin_addr.s_addr = s->prot.inet.daddr;
+	_sin.sin_port = s->prot.inet.dport;
+	memset(&_sin.sin_zero, 0, sizeof(_sin.sin_zero));
+	ret = rtnet_put_arg(fd, addr, &_sin, sizeof(_sin));
+	if (ret)
+		return ret;
+
+	*len = sizeof(struct sockaddr_in);
+	ret = rtnet_put_arg(fd, addrlen, len, sizeof(socklen_t));
+
+	return ret;
+}
+
+int rt_ip_ioctl(struct rtdm_fd *fd, int request, void __user *arg)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	struct _rtdm_getsockaddr_args _getaddr, *getaddr;
+	struct _rtdm_getsockopt_args _getopt, *getopt;
+	struct _rtdm_setsockopt_args _setopt, *setopt;
+
+	switch (request) {
+	case _RTIOC_SETSOCKOPT:
+		setopt = rtnet_get_arg(fd, &_setopt, arg, sizeof(_setopt));
+		if (IS_ERR(setopt))
+			return PTR_ERR(setopt);
+
+		return rt_ip_setsockopt(fd, sock, setopt->level,
+					setopt->optname, setopt->optval,
+					setopt->optlen);
+
+	case _RTIOC_GETSOCKOPT:
+		getopt = rtnet_get_arg(fd, &_getopt, arg, sizeof(_getopt));
+		if (IS_ERR(getopt))
+			return PTR_ERR(getopt);
+
+		return rt_ip_getsockopt(fd, sock, getopt->level,
+					getopt->optname, getopt->optval,
+					getopt->optlen);
+
+	case _RTIOC_GETSOCKNAME:
+		getaddr = rtnet_get_arg(fd, &_getaddr, arg, sizeof(_getaddr));
+		if (IS_ERR(getaddr))
+			return PTR_ERR(getaddr);
+
+		return rt_ip_getsockname(fd, sock, getaddr->addr,
+					 getaddr->addrlen);
+
+	case _RTIOC_GETPEERNAME:
+		getaddr = rtnet_get_arg(fd, &_getaddr, arg, sizeof(_getaddr));
+		if (IS_ERR(getaddr))
+			return PTR_ERR(getaddr);
+
+		return rt_ip_getpeername(fd, sock, getaddr->addr,
+					 getaddr->addrlen);
+
+	default:
+		return rt_socket_if_ioctl(fd, request, arg);
+	}
+}
+EXPORT_SYMBOL_GPL(rt_ip_ioctl);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/protocol.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/protocol.c
new file mode 100644
index 0000000..f56177d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/protocol.c
@@ -0,0 +1,88 @@
+/***
+ *
+ *  ipv4/protocol.c
+ *
+ *  rtnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/socket.h>
+#include <linux/in.h>
+
+#include <rtnet_socket.h>
+#include <ipv4/protocol.h>
+
+struct rtinet_protocol *rt_inet_protocols[MAX_RT_INET_PROTOCOLS];
+
+/***
+ * rt_inet_add_protocol
+ */
+void rt_inet_add_protocol(struct rtinet_protocol *prot)
+{
+	unsigned char hash = rt_inet_hashkey(prot->protocol);
+
+	if (rt_inet_protocols[hash] == NULL)
+		rt_inet_protocols[hash] = prot;
+}
+EXPORT_SYMBOL_GPL(rt_inet_add_protocol);
+
+/***
+ * rt_inet_del_protocol
+ */
+void rt_inet_del_protocol(struct rtinet_protocol *prot)
+{
+	unsigned char hash = rt_inet_hashkey(prot->protocol);
+
+	if (prot == rt_inet_protocols[hash])
+		rt_inet_protocols[hash] = NULL;
+}
+EXPORT_SYMBOL_GPL(rt_inet_del_protocol);
+
+/***
+ * rt_inet_socket - initialize an Internet socket
+ * @sock: socket structure
+ * @protocol: protocol id
+ */
+int rt_inet_socket(struct rtdm_fd *fd, int protocol)
+{
+	struct rtinet_protocol *prot;
+
+	if (protocol == 0)
+		switch (rtdm_fd_to_context(fd)->device->driver->socket_type) {
+		case SOCK_DGRAM:
+			protocol = IPPROTO_UDP;
+			break;
+		case SOCK_STREAM:
+			protocol = IPPROTO_TCP;
+			break;
+		}
+
+	prot = rt_inet_protocols[rt_inet_hashkey(protocol)];
+
+	/* create the socket (call the socket creator) */
+	if ((prot != NULL) && (prot->protocol == protocol))
+		return prot->init_socket(fd);
+	else {
+		rtdm_printk("RTnet: protocol with id %d not found\n", protocol);
+
+		return -ENOPROTOOPT;
+	}
+}
+EXPORT_SYMBOL_GPL(rt_inet_socket);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/route.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/route.c
new file mode 100644
index 0000000..d9a9c8b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/route.c
@@ -0,0 +1,1057 @@
+/***
+ *
+ *  ipv4/route.c - real-time routing
+ *
+ *  Copyright (C) 2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  Rewritten version of the original route by David Schleef and Ulrich Marx
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <net/ip.h>
+
+#include <rtnet_internal.h>
+#include <rtnet_port.h>
+#include <rtnet_chrdev.h>
+#include <ipv4/af_inet.h>
+#include <ipv4/route.h>
+
+/* FIXME: should also become some tunable parameter */
+#define ROUTER_FORWARD_PRIO                                                    \
+	RTSKB_PRIO_VALUE(QUEUE_MAX_PRIO +                                      \
+				 (QUEUE_MIN_PRIO - QUEUE_MAX_PRIO + 1) / 2,    \
+			 RTSKB_DEF_RT_CHANNEL)
+
+/* First-level routing: explicite host routes */
+struct host_route {
+	struct host_route *next;
+	struct dest_route dest_host;
+};
+
+/* Second-level routing: routes to other networks */
+struct net_route {
+	struct net_route *next;
+	u32 dest_net_ip;
+	u32 dest_net_mask;
+	u32 gw_ip;
+};
+
+#if (CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES &                              \
+     (CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES - 1))
+#error CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES must be power of 2
+#endif
+#if CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES < 256
+#define HOST_HASH_TBL_SIZE 64
+#else
+#define HOST_HASH_TBL_SIZE                                                     \
+	((CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES / 256) * 64)
+#endif
+#define HOST_HASH_KEY_MASK (HOST_HASH_TBL_SIZE - 1)
+
+static struct host_route host_routes[CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES];
+static struct host_route *free_host_route;
+static int allocated_host_routes;
+static struct host_route *host_hash_tbl[HOST_HASH_TBL_SIZE];
+static DEFINE_RTDM_LOCK(host_table_lock);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+#if (CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES &                               \
+     (CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES - 1))
+#error CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES must be power of 2
+#endif
+#if CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES < 256
+#define NET_HASH_TBL_SIZE 64
+#else
+#define NET_HASH_TBL_SIZE                                                      \
+	((CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES / 256) * 64)
+#endif
+#define NET_HASH_KEY_MASK (NET_HASH_TBL_SIZE - 1)
+#define NET_HASH_KEY_SHIFT 8
+
+static struct net_route net_routes[CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES];
+static struct net_route *free_net_route;
+static int allocated_net_routes;
+static struct net_route *net_hash_tbl[NET_HASH_TBL_SIZE + 1];
+static unsigned int net_hash_key_shift = NET_HASH_KEY_SHIFT;
+static DEFINE_RTDM_LOCK(net_table_lock);
+
+module_param(net_hash_key_shift, uint, 0444);
+MODULE_PARM_DESC(net_hash_key_shift, "destination right shift for "
+				     "network hash key (default: 8)");
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+/***
+ *  proc filesystem section
+ */
+#ifdef CONFIG_XENO_OPT_VFILE
+static int rtnet_ipv4_route_show(struct xnvfile_regular_iterator *it, void *d)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	u32 mask;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+	xnvfile_printf(it,
+		       "Host routes allocated/total:\t%d/%d\n"
+		       "Host hash table size:\t\t%d\n",
+		       allocated_host_routes,
+		       CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES,
+		       HOST_HASH_TBL_SIZE);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	mask = NET_HASH_KEY_MASK << net_hash_key_shift;
+	xnvfile_printf(it,
+		       "Network routes allocated/total:\t%d/%d\n"
+		       "Network hash table size:\t%d\n"
+		       "Network hash key shift/mask:\t%d/%08X\n",
+		       allocated_net_routes,
+		       CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES,
+		       NET_HASH_TBL_SIZE, net_hash_key_shift, mask);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER
+	xnvfile_printf(it, "IP Router:\t\t\tyes\n");
+#else
+	xnvfile_printf(it, "IP Router:\t\t\tno\n");
+#endif
+
+	return 0;
+}
+
+static int rtnet_ipv4_module_lock(struct xnvfile *vfile)
+{
+	bool res = try_module_get(THIS_MODULE);
+	if (!res)
+		return -EIDRM;
+
+	return 0;
+}
+
+static void rtnet_ipv4_module_unlock(struct xnvfile *vfile)
+{
+	module_put(THIS_MODULE);
+}
+
+static struct xnvfile_lock_ops rtnet_ipv4_module_lock_ops = {
+	.get = rtnet_ipv4_module_lock,
+	.put = rtnet_ipv4_module_unlock,
+};
+
+static struct xnvfile_regular_ops rtnet_ipv4_route_vfile_ops = {
+	.show = rtnet_ipv4_route_show,
+};
+
+static struct xnvfile_regular rtnet_ipv4_route_vfile = {
+    .entry = {
+	.lockops = &rtnet_ipv4_module_lock_ops,
+    },
+    .ops = &rtnet_ipv4_route_vfile_ops,
+};
+
+static rtdm_lockctx_t rtnet_ipv4_host_route_lock_ctx;
+
+static int rtnet_ipv4_host_route_lock(struct xnvfile *vfile)
+{
+	rtdm_lock_get_irqsave(&host_table_lock, rtnet_ipv4_host_route_lock_ctx);
+	return 0;
+}
+
+static void rtnet_ipv4_host_route_unlock(struct xnvfile *vfile)
+{
+	rtdm_lock_put_irqrestore(&host_table_lock,
+				 rtnet_ipv4_host_route_lock_ctx);
+}
+
+static struct xnvfile_lock_ops rtnet_ipv4_host_route_lock_ops = {
+	.get = rtnet_ipv4_host_route_lock,
+	.put = rtnet_ipv4_host_route_unlock,
+};
+
+struct rtnet_ipv4_host_route_priv {
+	unsigned key;
+	struct host_route *entry_ptr;
+};
+
+struct rtnet_ipv4_host_route_data {
+	int key;
+	char name[IFNAMSIZ];
+	struct dest_route dest_host;
+};
+
+struct xnvfile_rev_tag host_route_tag;
+
+static void *rtnet_ipv4_host_route_begin(struct xnvfile_snapshot_iterator *it)
+{
+	struct rtnet_ipv4_host_route_priv *priv = xnvfile_iterator_priv(it);
+	struct rtnet_ipv4_host_route_data *data;
+	unsigned routes;
+	int err;
+
+	routes = allocated_host_routes;
+	if (!routes)
+		return VFILE_SEQ_EMPTY;
+
+	data = kmalloc(sizeof(*data) * routes, GFP_KERNEL);
+	if (data == NULL)
+		return NULL;
+
+	err = rtnet_ipv4_module_lock(NULL);
+	if (err < 0) {
+		kfree(data);
+		return VFILE_SEQ_EMPTY;
+	}
+
+	priv->key = -1;
+	priv->entry_ptr = NULL;
+	return data;
+}
+
+static void rtnet_ipv4_host_route_end(struct xnvfile_snapshot_iterator *it,
+				      void *buf)
+{
+	rtnet_ipv4_module_unlock(NULL);
+	kfree(buf);
+}
+
+static int rtnet_ipv4_host_route_next(struct xnvfile_snapshot_iterator *it,
+				      void *data)
+{
+	struct rtnet_ipv4_host_route_priv *priv = xnvfile_iterator_priv(it);
+	struct rtnet_ipv4_host_route_data *p = data;
+	struct rtnet_device *rtdev;
+
+	if (priv->entry_ptr == NULL) {
+		if (++priv->key >= HOST_HASH_TBL_SIZE)
+			return 0;
+
+		priv->entry_ptr = host_hash_tbl[priv->key];
+		if (priv->entry_ptr == NULL)
+			return VFILE_SEQ_SKIP;
+	}
+
+	rtdev = priv->entry_ptr->dest_host.rtdev;
+
+	if (!rtdev_reference(rtdev))
+		return -EIDRM;
+
+	memcpy(&p->name, rtdev->name, sizeof(p->name));
+
+	rtdev_dereference(rtdev);
+
+	p->key = priv->key;
+
+	memcpy(&p->dest_host, &priv->entry_ptr->dest_host,
+	       sizeof(p->dest_host));
+
+	priv->entry_ptr = priv->entry_ptr->next;
+
+	return 1;
+}
+
+static int rtnet_ipv4_host_route_show(struct xnvfile_snapshot_iterator *it,
+				      void *data)
+{
+	struct rtnet_ipv4_host_route_data *p = data;
+
+	if (p == NULL) {
+		xnvfile_printf(it, "Hash\tDestination\tHW Address\t\tDevice\n");
+		return 0;
+	}
+
+	xnvfile_printf(it,
+		       "%02X\t%u.%u.%u.%-3u\t"
+		       "%02X:%02X:%02X:%02X:%02X:%02X\t%s\n",
+		       p->key, NIPQUAD(p->dest_host.ip),
+		       p->dest_host.dev_addr[0], p->dest_host.dev_addr[1],
+		       p->dest_host.dev_addr[2], p->dest_host.dev_addr[3],
+		       p->dest_host.dev_addr[4], p->dest_host.dev_addr[5],
+		       p->name);
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops rtnet_ipv4_host_route_vfile_ops = {
+	.begin = rtnet_ipv4_host_route_begin,
+	.end = rtnet_ipv4_host_route_end,
+	.next = rtnet_ipv4_host_route_next,
+	.show = rtnet_ipv4_host_route_show,
+};
+
+static struct xnvfile_snapshot rtnet_ipv4_host_route_vfile = {
+    .entry = {
+	.lockops = &rtnet_ipv4_host_route_lock_ops,
+    },
+    .privsz = sizeof(struct rtnet_ipv4_host_route_priv),
+    .datasz = sizeof(struct rtnet_ipv4_host_route_data),
+    .tag = &host_route_tag,
+    .ops = &rtnet_ipv4_host_route_vfile_ops,
+};
+
+static struct xnvfile_link rtnet_ipv4_arp_vfile;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+static rtdm_lockctx_t rtnet_ipv4_net_route_lock_ctx;
+
+static int rtnet_ipv4_net_route_lock(struct xnvfile *vfile)
+{
+	rtdm_lock_get_irqsave(&net_table_lock, rtnet_ipv4_net_route_lock_ctx);
+	return 0;
+}
+
+static void rtnet_ipv4_net_route_unlock(struct xnvfile *vfile)
+{
+	rtdm_lock_put_irqrestore(&net_table_lock,
+				 rtnet_ipv4_net_route_lock_ctx);
+}
+
+static struct xnvfile_lock_ops rtnet_ipv4_net_route_lock_ops = {
+	.get = rtnet_ipv4_net_route_lock,
+	.put = rtnet_ipv4_net_route_unlock,
+};
+
+struct rtnet_ipv4_net_route_priv {
+	unsigned key;
+	struct net_route *entry_ptr;
+};
+
+struct rtnet_ipv4_net_route_data {
+	int key;
+	u32 dest_net_ip;
+	u32 dest_net_mask;
+	u32 gw_ip;
+};
+
+struct xnvfile_rev_tag net_route_tag;
+
+static void *rtnet_ipv4_net_route_begin(struct xnvfile_snapshot_iterator *it)
+{
+	struct rtnet_ipv4_net_route_priv *priv = xnvfile_iterator_priv(it);
+	struct rtnet_ipv4_net_route_data *data;
+	unsigned routes;
+	int err;
+
+	routes = allocated_net_routes;
+	if (!routes)
+		return VFILE_SEQ_EMPTY;
+
+	data = kmalloc(sizeof(*data) * routes, GFP_KERNEL);
+	if (data == NULL)
+		return NULL;
+
+	err = rtnet_ipv4_module_lock(NULL);
+	if (err < 0) {
+		kfree(data);
+		return VFILE_SEQ_EMPTY;
+	}
+
+	priv->key = -1;
+	priv->entry_ptr = NULL;
+	return data;
+}
+
+static void rtnet_ipv4_net_route_end(struct xnvfile_snapshot_iterator *it,
+				     void *buf)
+{
+	rtnet_ipv4_module_unlock(NULL);
+	kfree(buf);
+}
+
+static int rtnet_ipv4_net_route_next(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	struct rtnet_ipv4_net_route_priv *priv = xnvfile_iterator_priv(it);
+	struct rtnet_ipv4_net_route_data *p = data;
+
+	if (priv->entry_ptr == NULL) {
+		if (++priv->key >= NET_HASH_TBL_SIZE + 1)
+			return 0;
+
+		priv->entry_ptr = net_hash_tbl[priv->key];
+		if (priv->entry_ptr == NULL)
+			return VFILE_SEQ_SKIP;
+	}
+
+	p->key = priv->key;
+	p->dest_net_ip = priv->entry_ptr->dest_net_ip;
+	p->dest_net_mask = priv->entry_ptr->dest_net_mask;
+	p->gw_ip = priv->entry_ptr->gw_ip;
+
+	priv->entry_ptr = priv->entry_ptr->next;
+
+	return 1;
+}
+
+static int rtnet_ipv4_net_route_show(struct xnvfile_snapshot_iterator *it,
+				     void *data)
+{
+	struct rtnet_ipv4_net_route_data *p = data;
+
+	if (p == NULL) {
+		xnvfile_printf(it, "Hash\tDestination\tMask\t\t\tGateway\n");
+		return 0;
+	}
+
+	if (p->key < NET_HASH_TBL_SIZE)
+		xnvfile_printf(it,
+			       "%02X\t%u.%u.%u.%-3u\t%u.%u.%u.%-3u"
+			       "\t\t%u.%u.%u.%-3u\n",
+			       p->key, NIPQUAD(p->dest_net_ip),
+			       NIPQUAD(p->dest_net_mask), NIPQUAD(p->gw_ip));
+	else
+		xnvfile_printf(it,
+			       "*\t%u.%u.%u.%-3u\t%u.%u.%u.%-3u\t\t"
+			       "%u.%u.%u.%-3u\n",
+			       NIPQUAD(p->dest_net_ip),
+			       NIPQUAD(p->dest_net_mask), NIPQUAD(p->gw_ip));
+
+	return 0;
+}
+
+static struct xnvfile_snapshot_ops rtnet_ipv4_net_route_vfile_ops = {
+	.begin = rtnet_ipv4_net_route_begin,
+	.end = rtnet_ipv4_net_route_end,
+	.next = rtnet_ipv4_net_route_next,
+	.show = rtnet_ipv4_net_route_show,
+};
+
+static struct xnvfile_snapshot rtnet_ipv4_net_route_vfile = {
+    .entry = {
+	.lockops = &rtnet_ipv4_net_route_lock_ops,
+    },
+    .privsz = sizeof(struct rtnet_ipv4_net_route_priv),
+    .datasz = sizeof(struct rtnet_ipv4_net_route_data),
+    .tag = &net_route_tag,
+    .ops = &rtnet_ipv4_net_route_vfile_ops,
+};
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+static int __init rt_route_proc_register(void)
+{
+	int err;
+
+	err = xnvfile_init_regular("route", &rtnet_ipv4_route_vfile,
+				   &ipv4_proc_root);
+	if (err < 0)
+		goto err1;
+
+	err = xnvfile_init_snapshot("host_route", &rtnet_ipv4_host_route_vfile,
+				    &ipv4_proc_root);
+	if (err < 0)
+		goto err2;
+
+	/* create "arp" as an alias for "host_route" */
+	err = xnvfile_init_link("arp", "host_route", &rtnet_ipv4_arp_vfile,
+				&ipv4_proc_root);
+	if (err < 0)
+		goto err3;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	err = xnvfile_init_snapshot("net_route", &rtnet_ipv4_net_route_vfile,
+				    &ipv4_proc_root);
+	if (err < 0)
+		goto err4;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+	return 0;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+err4:
+	xnvfile_destroy_link(&rtnet_ipv4_arp_vfile);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+err3:
+	xnvfile_destroy_snapshot(&rtnet_ipv4_host_route_vfile);
+
+err2:
+	xnvfile_destroy_regular(&rtnet_ipv4_route_vfile);
+
+err1:
+	printk("RTnet: unable to initialize /proc entries (route)\n");
+	return err;
+}
+
+static void rt_route_proc_unregister(void)
+{
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	xnvfile_destroy_snapshot(&rtnet_ipv4_net_route_vfile);
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+	xnvfile_destroy_link(&rtnet_ipv4_arp_vfile);
+	xnvfile_destroy_snapshot(&rtnet_ipv4_host_route_vfile);
+	xnvfile_destroy_regular(&rtnet_ipv4_route_vfile);
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/***
+ *  rt_alloc_host_route - allocates new host route
+ */
+static inline struct host_route *rt_alloc_host_route(void)
+{
+	rtdm_lockctx_t context;
+	struct host_route *rt;
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	if ((rt = free_host_route) != NULL) {
+		free_host_route = rt->next;
+		allocated_host_routes++;
+	}
+
+	rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+	return rt;
+}
+
+/***
+ *  rt_free_host_route - releases host route
+ *
+ *  Note: must be called with host_table_lock held
+ */
+static inline void rt_free_host_route(struct host_route *rt)
+{
+	rt->next = free_host_route;
+	free_host_route = rt;
+	allocated_host_routes--;
+}
+
+/***
+ *  rt_ip_route_add_host: add or update host route
+ */
+int rt_ip_route_add_host(u32 addr, unsigned char *dev_addr,
+			 struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct host_route *new_route;
+	struct host_route *rt;
+	unsigned int key;
+	int ret = 0;
+
+	rtdm_lock_get_irqsave(&rtdev->rtdev_lock, context);
+
+	if ((!test_bit(PRIV_FLAG_UP, &rtdev->priv_flags) ||
+	     test_and_set_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags))) {
+		rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context);
+		return -EBUSY;
+	}
+
+	rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context);
+
+	if ((new_route = rt_alloc_host_route()) != NULL) {
+		new_route->dest_host.ip = addr;
+		new_route->dest_host.rtdev = rtdev;
+		memcpy(new_route->dest_host.dev_addr, dev_addr,
+		       rtdev->addr_len);
+	}
+
+	key = ntohl(addr) & HOST_HASH_KEY_MASK;
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	xnvfile_touch_tag(&host_route_tag);
+
+	rt = host_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_host.ip == addr) &&
+		    (rt->dest_host.rtdev->local_ip == rtdev->local_ip)) {
+			rt->dest_host.rtdev = rtdev;
+			memcpy(rt->dest_host.dev_addr, dev_addr,
+			       rtdev->addr_len);
+
+			if (new_route)
+				rt_free_host_route(new_route);
+
+			rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+			goto out;
+		}
+
+		rt = rt->next;
+	}
+
+	if (new_route) {
+		new_route->next = host_hash_tbl[key];
+		host_hash_tbl[key] = new_route;
+
+		rtdm_lock_put_irqrestore(&host_table_lock, context);
+	} else {
+		rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+		/*ERRMSG*/ rtdm_printk(
+			"RTnet: no more host routes available\n");
+		ret = -ENOBUFS;
+	}
+
+out:
+	clear_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags);
+
+	return ret;
+}
+
+/***
+ *  rt_ip_route_del_host - deletes specified host route
+ */
+int rt_ip_route_del_host(u32 addr, struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct host_route *rt;
+	struct host_route **last_ptr;
+	unsigned int key;
+
+	key = ntohl(addr) & HOST_HASH_KEY_MASK;
+	last_ptr = &host_hash_tbl[key];
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	rt = host_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_host.ip == addr) &&
+		    (!rtdev ||
+		     (rt->dest_host.rtdev->local_ip == rtdev->local_ip))) {
+			*last_ptr = rt->next;
+
+			rt_free_host_route(rt);
+
+			xnvfile_touch_tag(&host_route_tag);
+
+			rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+			return 0;
+		}
+
+		last_ptr = &rt->next;
+		rt = rt->next;
+	}
+
+	rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+	return -ENOENT;
+}
+
+/***
+ *  rt_ip_route_del_all - deletes all routes associated with a specified device
+ */
+void rt_ip_route_del_all(struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct host_route *host_rt;
+	struct host_route **last_host_ptr;
+	unsigned int key;
+	u32 ip;
+
+	for (key = 0; key < HOST_HASH_TBL_SIZE; key++) {
+	host_start_over:
+		last_host_ptr = &host_hash_tbl[key];
+
+		rtdm_lock_get_irqsave(&host_table_lock, context);
+
+		host_rt = host_hash_tbl[key];
+		while (host_rt != NULL) {
+			if (host_rt->dest_host.rtdev == rtdev) {
+				*last_host_ptr = host_rt->next;
+
+				rt_free_host_route(host_rt);
+
+				rtdm_lock_put_irqrestore(&host_table_lock,
+							 context);
+
+				goto host_start_over;
+			}
+
+			last_host_ptr = &host_rt->next;
+			host_rt = host_rt->next;
+		}
+
+		rtdm_lock_put_irqrestore(&host_table_lock, context);
+	}
+
+	if ((ip = rtdev->local_ip) != 0)
+		rt_ip_route_del_host(ip, rtdev);
+}
+
+/***
+ *  rt_ip_route_get_host - check if specified host route is resolved
+ */
+int rt_ip_route_get_host(u32 addr, char *if_name, unsigned char *dev_addr,
+			 struct rtnet_device *rtdev)
+{
+	rtdm_lockctx_t context;
+	struct host_route *rt;
+	unsigned int key;
+
+	key = ntohl(addr) & HOST_HASH_KEY_MASK;
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	rt = host_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_host.ip == addr) &&
+		    (!rtdev ||
+		     rt->dest_host.rtdev->local_ip == rtdev->local_ip)) {
+			memcpy(dev_addr, rt->dest_host.dev_addr,
+			       rt->dest_host.rtdev->addr_len);
+			strncpy(if_name, rt->dest_host.rtdev->name, IFNAMSIZ);
+
+			rtdm_lock_put_irqrestore(&host_table_lock, context);
+			return 0;
+		}
+
+		rt = rt->next;
+	}
+
+	rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+	return -ENOENT;
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+/***
+ *  rt_alloc_net_route - allocates new network route
+ */
+static inline struct net_route *rt_alloc_net_route(void)
+{
+	rtdm_lockctx_t context;
+	struct net_route *rt;
+
+	rtdm_lock_get_irqsave(&net_table_lock, context);
+
+	if ((rt = free_net_route) != NULL) {
+		free_net_route = rt->next;
+		allocated_net_routes++;
+	}
+
+	rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+	return rt;
+}
+
+/***
+ *  rt_free_net_route - releases network route
+ *
+ *  Note: must be called with net_table_lock held
+ */
+static inline void rt_free_net_route(struct net_route *rt)
+{
+	rt->next = free_net_route;
+	free_net_route = rt;
+	allocated_host_routes--;
+}
+
+/***
+ *  rt_ip_route_add_net: add or update network route
+ */
+int rt_ip_route_add_net(u32 addr, u32 mask, u32 gw_addr)
+{
+	rtdm_lockctx_t context;
+	struct net_route *new_route;
+	struct net_route *rt;
+	struct net_route **last_ptr;
+	unsigned int key;
+	u32 shifted_mask;
+
+	addr &= mask;
+
+	if ((new_route = rt_alloc_net_route()) != NULL) {
+		new_route->dest_net_ip = addr;
+		new_route->dest_net_mask = mask;
+		new_route->gw_ip = gw_addr;
+	}
+
+	shifted_mask = NET_HASH_KEY_MASK << net_hash_key_shift;
+	if ((mask & shifted_mask) == shifted_mask)
+		key = (ntohl(addr) >> net_hash_key_shift) & NET_HASH_KEY_MASK;
+	else
+		key = NET_HASH_TBL_SIZE;
+	last_ptr = &net_hash_tbl[key];
+
+	rtdm_lock_get_irqsave(&net_table_lock, context);
+
+	xnvfile_touch_tag(&net_route_tag);
+
+	rt = net_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_net_ip == addr) && (rt->dest_net_mask == mask)) {
+			rt->gw_ip = gw_addr;
+
+			if (new_route)
+				rt_free_net_route(new_route);
+
+			rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+			return 0;
+		}
+
+		last_ptr = &rt->next;
+		rt = rt->next;
+	}
+
+	if (new_route) {
+		new_route->next = *last_ptr;
+		*last_ptr = new_route;
+
+		rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+		return 0;
+	} else {
+		rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+		/*ERRMSG*/ rtdm_printk(
+			"RTnet: no more network routes available\n");
+		return -ENOBUFS;
+	}
+}
+
+/***
+ *  rt_ip_route_del_net - deletes specified network route
+ */
+int rt_ip_route_del_net(u32 addr, u32 mask)
+{
+	rtdm_lockctx_t context;
+	struct net_route *rt;
+	struct net_route **last_ptr;
+	unsigned int key;
+	u32 shifted_mask;
+
+	addr &= mask;
+
+	shifted_mask = NET_HASH_KEY_MASK << net_hash_key_shift;
+	if ((mask & shifted_mask) == shifted_mask)
+		key = (ntohl(addr) >> net_hash_key_shift) & NET_HASH_KEY_MASK;
+	else
+		key = NET_HASH_TBL_SIZE;
+	last_ptr = &net_hash_tbl[key];
+
+	rtdm_lock_get_irqsave(&net_table_lock, context);
+
+	rt = net_hash_tbl[key];
+	while (rt != NULL) {
+		if ((rt->dest_net_ip == addr) && (rt->dest_net_mask == mask)) {
+			*last_ptr = rt->next;
+
+			rt_free_net_route(rt);
+
+			xnvfile_touch_tag(&net_route_tag);
+
+			rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+			return 0;
+		}
+
+		last_ptr = &rt->next;
+		rt = rt->next;
+	}
+
+	rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+	return -ENOENT;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+/***
+ *  rt_ip_route_output - looks up output route
+ *
+ *  Note: increments refcount on returned rtdev in rt_buf
+ */
+int rt_ip_route_output(struct dest_route *rt_buf, u32 daddr, u32 saddr)
+{
+	rtdm_lockctx_t context;
+	struct host_route *host_rt;
+	unsigned int key;
+
+#ifndef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+#define DADDR daddr
+#else
+#define DADDR real_daddr
+
+	struct net_route *net_rt;
+	int lookup_gw = 1;
+	u32 real_daddr = daddr;
+
+restart:
+#endif /* !CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+	key = ntohl(daddr) & HOST_HASH_KEY_MASK;
+
+	rtdm_lock_get_irqsave(&host_table_lock, context);
+
+	host_rt = host_hash_tbl[key];
+	if (likely(saddr == INADDR_ANY))
+		while (host_rt != NULL) {
+			if (host_rt->dest_host.ip == daddr) {
+			host_route_found:
+				if (!rtdev_reference(
+					    host_rt->dest_host.rtdev)) {
+					rtdm_lock_put_irqrestore(
+						&host_table_lock, context);
+					goto next;
+				}
+
+				memcpy(rt_buf->dev_addr,
+				       &host_rt->dest_host.dev_addr,
+				       sizeof(rt_buf->dev_addr));
+				rt_buf->rtdev = host_rt->dest_host.rtdev;
+
+				rtdm_lock_put_irqrestore(&host_table_lock,
+							 context);
+
+				rt_buf->ip = DADDR;
+
+				return 0;
+			}
+		next:
+			host_rt = host_rt->next;
+		}
+	else
+		while (host_rt != NULL) {
+			if ((host_rt->dest_host.ip == daddr) &&
+			    (host_rt->dest_host.rtdev->local_ip == saddr))
+				goto host_route_found;
+			host_rt = host_rt->next;
+		}
+
+	rtdm_lock_put_irqrestore(&host_table_lock, context);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	if (lookup_gw) {
+		lookup_gw = 0;
+		key = (ntohl(daddr) >> net_hash_key_shift) & NET_HASH_KEY_MASK;
+
+		rtdm_lock_get_irqsave(&net_table_lock, context);
+
+		net_rt = net_hash_tbl[key];
+		while (net_rt != NULL) {
+			if (net_rt->dest_net_ip ==
+			    (daddr & net_rt->dest_net_mask)) {
+				daddr = net_rt->gw_ip;
+
+				rtdm_lock_put_irqrestore(&net_table_lock,
+							 context);
+
+				/* start over, now using the gateway ip as destination */
+				goto restart;
+			}
+
+			net_rt = net_rt->next;
+		}
+
+		rtdm_lock_put_irqrestore(&net_table_lock, context);
+
+		/* last try: no hash key */
+		rtdm_lock_get_irqsave(&net_table_lock, context);
+
+		net_rt = net_hash_tbl[NET_HASH_TBL_SIZE];
+		while (net_rt != NULL) {
+			if (net_rt->dest_net_ip ==
+			    (daddr & net_rt->dest_net_mask)) {
+				daddr = net_rt->gw_ip;
+
+				rtdm_lock_put_irqrestore(&net_table_lock,
+							 context);
+
+				/* start over, now using the gateway ip as destination */
+				goto restart;
+			}
+
+			net_rt = net_rt->next;
+		}
+
+		rtdm_lock_put_irqrestore(&net_table_lock, context);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+	/*ERRMSG*/ rtdm_printk("RTnet: host %u.%u.%u.%u unreachable\n",
+			       NIPQUAD(daddr));
+	return -EHOSTUNREACH;
+}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER
+int rt_ip_route_forward(struct rtskb *rtskb, u32 daddr)
+{
+	struct rtnet_device *rtdev = rtskb->rtdev;
+	struct dest_route dest;
+
+	if (likely((daddr == rtdev->local_ip) ||
+		   (daddr == rtdev->broadcast_ip) ||
+		   (rtdev->flags & IFF_LOOPBACK)))
+		return 0;
+
+	if (rtskb_acquire(rtskb, &global_pool) != 0) {
+		/*ERRMSG*/ rtdm_printk(
+			"RTnet: router overloaded, dropping packet\n");
+		goto error;
+	}
+
+	if (rt_ip_route_output(&dest, daddr, INADDR_ANY) < 0) {
+		/*ERRMSG*/ rtdm_printk(
+			"RTnet: unable to forward packet from %u.%u.%u.%u\n",
+			NIPQUAD(rtskb->nh.iph->saddr));
+		goto error;
+	}
+
+	rtskb->rtdev = dest.rtdev;
+	rtskb->priority = ROUTER_FORWARD_PRIO;
+
+	if ((dest.rtdev->hard_header) &&
+	    (dest.rtdev->hard_header(rtskb, dest.rtdev, ETH_P_IP, dest.dev_addr,
+				     dest.rtdev->dev_addr, rtskb->len) < 0))
+		goto error;
+
+	rtdev_xmit(rtskb);
+
+	return 1;
+
+error:
+	kfree_rtskb(rtskb);
+	return 1;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_ROUTER */
+
+/***
+ *  rt_ip_routing_init: initialize
+ */
+int __init rt_ip_routing_init(void)
+{
+	int i;
+
+	for (i = 0; i < CONFIG_XENO_DRIVERS_NET_RTIPV4_HOST_ROUTES - 2; i++)
+		host_routes[i].next = &host_routes[i + 1];
+	free_host_route = &host_routes[0];
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING
+	for (i = 0; i < CONFIG_XENO_DRIVERS_NET_RTIPV4_NET_ROUTES - 2; i++)
+		net_routes[i].next = &net_routes[i + 1];
+	free_net_route = &net_routes[0];
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_NETROUTING */
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	return rt_route_proc_register();
+#else /* !CONFIG_XENO_OPT_VFILE */
+	return 0;
+#endif /* CONFIG_XENO_OPT_VFILE */
+}
+
+/***
+ *  rt_ip_routing_realease
+ */
+void rt_ip_routing_release(void)
+{
+#ifdef CONFIG_XENO_OPT_VFILE
+	rt_route_proc_unregister();
+#endif /* CONFIG_XENO_OPT_VFILE */
+}
+
+EXPORT_SYMBOL_GPL(rt_ip_route_add_host);
+EXPORT_SYMBOL_GPL(rt_ip_route_del_host);
+EXPORT_SYMBOL_GPL(rt_ip_route_del_all);
+EXPORT_SYMBOL_GPL(rt_ip_route_output);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Kconfig
new file mode 100644
index 0000000..a69d346
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Kconfig
@@ -0,0 +1,18 @@
+config XENO_DRIVERS_NET_RTIPV4_TCP
+    tristate "TCP support"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    help
+    Enables TCP support of the RTnet Real-Time IPv4 protocol.
+
+    When the RTnet IPv4 is enabled while this feature is disabled, TCP
+    will be forwarded to the Linux network stack.
+
+config XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+    bool "TCP error injection"
+    depends on XENO_DRIVERS_NET_RTIPV4_TCP
+    help
+    Enables error injection for incoming TCP packets. This can be used
+    to test both protocol as well as application behavior under error
+    conditions. The per-socket error rate is 0 by default and can be
+    tuned during runtime via the error_rate and multi_error module
+    parameters.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Makefile
new file mode 100644
index 0000000..88a9acf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/Makefile
@@ -0,0 +1,7 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP) += rttcp.o
+
+rttcp-y := \
+	tcp.o \
+	timerwheel.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/tcp.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/tcp.c
new file mode 100644
index 0000000..71628ba
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/tcp.c
@@ -0,0 +1,2453 @@
+/***
+ *
+ *  ipv4/tcp/tcp.c - TCP implementation for RTnet
+ *
+ *  Copyright (C) 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2, as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/list.h>
+#include <linux/skbuff.h>
+#include <linux/err.h>
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/completion.h>
+#include <net/tcp_states.h>
+#include <net/tcp.h>
+
+#include <rtdm/driver.h>
+#include <rtnet_rtpc.h>
+#include <rtskb.h>
+#include <rtdev.h>
+#include <rtnet_port.h>
+#include <rtnet_checksum.h>
+#include <ipv4/tcp.h>
+#include <ipv4/ip_sock.h>
+#include <ipv4/ip_output.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/route.h>
+#include <ipv4/af_inet.h>
+#include "timerwheel.h"
+
+static unsigned int close_timeout = 1000;
+module_param(close_timeout, uint, 0664);
+MODULE_PARM_DESC(close_timeout,
+		 "max time (ms) to wait during close for FIN-ACK handshake to complete, default 1000");
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+
+static unsigned int error_rate;
+module_param(error_rate, uint, 0664);
+MODULE_PARM_DESC(error_rate, "simulate packet loss after every n packets");
+
+static unsigned int multi_error = 1;
+module_param(multi_error, uint, 0664);
+MODULE_PARM_DESC(multi_error, "on simulated error, drop n packets in a row");
+
+static unsigned int counter_start = 1234;
+module_param(counter_start, uint, 0664);
+MODULE_PARM_DESC(counter_start, "start value of per-socket packet counter "
+				"(used for error injection)");
+
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION */
+
+struct tcp_sync {
+	u32 seq;
+	u32 ack_seq;
+
+	/* Local window size sent to peer  */
+	u16 window;
+	/* Last received destination peer window size */
+	u16 dst_window;
+};
+
+/*
+  connection timeout
+*/
+/* 5 second */
+static const nanosecs_rel_t rt_tcp_connection_timeout = 1000000000ull;
+
+/* retransmission timerwheel timeout */
+static const u64 rt_tcp_retransmit_timeout = 100000000ull;
+
+/*
+  keepalive constants
+*/
+/* 75 second */
+static const u64 rt_tcp_keepalive_intvl = 75000000000ull;
+/* 9 probes to send */
+static const u8 rt_tcp_keepalive_probes = 9;
+/* 2 hour */
+static const u64 rt_tcp_keepalive_timeout = 7200000000000ull;
+
+/*
+  retransmission timeout
+*/
+/* 50 millisecond */
+static const nanosecs_rel_t rt_tcp_retransmission_timeout = 50000000ull;
+/*
+  maximum allowed number of retransmissions
+*/
+static const unsigned int max_retransmits = 3;
+
+struct tcp_keepalive {
+	u8 enabled;
+	u32 probes;
+	rtdm_timer_t timer;
+};
+
+/***
+ *  This structure is used to register a TCP socket for reception. All
+ *  structures are kept in the port_registry array to increase the cache
+ *  locality during the critical port lookup in rt_tcp_v4_lookup().
+ */
+
+/* if dport & daddr are zeroes, it means a listening socket */
+/* otherwise this is a data structure, which describes a connection */
+
+/* NB: sock->prot.inet.saddr & sock->prot.inet.sport values are not used */
+struct tcp_socket {
+	struct rtsocket sock; /* set up by rt_socket_init() implicitly */
+	u16 sport; /* local port */
+	u32 saddr; /* local ip-addr */
+	u16 dport; /* destination port */
+	u32 daddr; /* destination ip-addr */
+
+	u8 tcp_state; /* tcp connection state */
+
+	u8 is_binding; /* if set, tcp socket is in port binding progress */
+	u8 is_bound; /* if set, tcp socket is already port bound */
+	u8 is_valid; /* if set, read() and write() can process */
+	u8 is_accepting; /* if set, accept() is in progress */
+	u8 is_accepted; /* if set, accept() is already called */
+	u8 is_closed; /* close() call for resource deallocation follows */
+
+	rtdm_event_t send_evt; /* write request is permissible */
+	rtdm_event_t conn_evt; /* connection event */
+
+	struct dest_route rt;
+	struct tcp_sync sync;
+	struct tcp_keepalive keepalive;
+	rtdm_lock_t socket_lock;
+
+	struct hlist_node link;
+
+	nanosecs_rel_t sk_sndtimeo;
+
+	/* retransmission routine data */
+	u32 nacked_first;
+	unsigned int timer_state;
+	struct rtskb_queue retransmit_queue;
+	struct timerwheel_timer timer;
+
+	struct completion fin_handshake;
+	rtdm_nrtsig_t close_sig;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+	unsigned int packet_counter;
+	unsigned int error_rate;
+	unsigned int multi_error;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION */
+};
+
+struct rt_tcp_dispatched_packet_send_cmd {
+	__be32 flags; /* packet flags value */
+	struct tcp_socket *ts;
+};
+
+/***
+ *  Automatic port number assignment
+
+ *  The automatic assignment of port numbers to unbound sockets is realised as
+ *  a simple addition of two values:
+ *   - the socket ID (lower 8 bits of file descriptor) which is set during
+ *     initialisation and left unchanged afterwards
+ *   - the start value tcp_auto_port_start which is a module parameter
+
+ *  tcp_auto_port_mask, also a module parameter, is used to define the range of
+ *  port numbers which are used for automatic assignment. Any number within
+ *  this range will be rejected when passed to bind_rt().
+
+ */
+
+MODULE_LICENSE("GPL");
+
+static struct {
+	struct rtdm_dev_context dummy;
+	struct tcp_socket rst_socket;
+} rst_socket_container;
+
+#define rst_fd (&rst_socket_container.dummy.fd)
+#define rst_socket (*(struct tcp_socket *)rtdm_fd_to_private(rst_fd))
+
+static u32 tcp_auto_port_start = 1024;
+static u32 tcp_auto_port_mask = ~(RT_TCP_SOCKETS - 1);
+static u32 free_ports = RT_TCP_SOCKETS;
+#define RT_PORT_BITMAP_WORDS                                                   \
+	((RT_TCP_SOCKETS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+static unsigned long port_bitmap[RT_PORT_BITMAP_WORDS];
+
+static struct tcp_socket *port_registry[RT_TCP_SOCKETS];
+static DEFINE_RTDM_LOCK(tcp_socket_base_lock);
+
+static struct hlist_head port_hash[RT_TCP_SOCKETS * 2];
+#define port_hash_mask (RT_TCP_SOCKETS * 2 - 1)
+
+module_param(tcp_auto_port_start, uint, 0444);
+module_param(tcp_auto_port_mask, uint, 0444);
+MODULE_PARM_DESC(tcp_auto_port_start, "Start of automatically assigned "
+				      "port range for TCP");
+MODULE_PARM_DESC(tcp_auto_port_mask, "Mask that defines port range for TCP "
+				     "for automatic assignment");
+
+static inline struct tcp_socket *port_hash_search(u32 saddr, u16 sport)
+{
+	u32 bucket = sport & port_hash_mask;
+	struct tcp_socket *ts;
+
+	hlist_for_each_entry (ts, &port_hash[bucket], link)
+		if (ts->sport == sport &&
+		    (saddr == INADDR_ANY || ts->saddr == saddr ||
+		     ts->saddr == INADDR_ANY))
+			return ts;
+
+	return NULL;
+}
+
+static int port_hash_insert(struct tcp_socket *ts, u32 saddr, u16 sport)
+{
+	u32 bucket;
+
+	if (port_hash_search(saddr, sport))
+		return -EADDRINUSE;
+
+	bucket = sport & port_hash_mask;
+	ts->saddr = saddr;
+	ts->sport = sport;
+	ts->daddr = 0;
+	ts->dport = 0;
+
+	hlist_add_head(&ts->link, &port_hash[bucket]);
+
+	return 0;
+}
+
+static inline void port_hash_del(struct tcp_socket *ts)
+{
+	hlist_del(&ts->link);
+}
+
+/***
+ *  rt_tcp_v4_lookup
+ */
+static struct rtsocket *rt_tcp_v4_lookup(u32 daddr, u16 dport)
+{
+	rtdm_lockctx_t context;
+	struct tcp_socket *ts;
+	int ret;
+
+	rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+	ts = port_hash_search(daddr, dport);
+
+	if (ts != NULL) {
+		ret = rt_socket_reference(&ts->sock);
+		if (ret == 0 || (ret == -EIDRM && ts->is_closed)) {
+			rtdm_lock_put_irqrestore(&tcp_socket_base_lock,
+						 context);
+
+			return &ts->sock;
+		}
+	}
+
+	rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+	return NULL;
+}
+
+/* test seq1 <= seq2 */
+static inline int rt_tcp_before(__u32 seq1, __u32 seq2)
+{
+	return (__s32)(seq1 - seq2) <= 0;
+}
+
+/* test seq1 => seq2 */
+static inline int rt_tcp_after(__u32 seq1, __u32 seq2)
+{
+	return (__s32)(seq2 - seq1) <= 0;
+}
+
+static inline u32 rt_tcp_compute_ack_seq(struct tcphdr *th, u32 len)
+{
+	u32 ack_seq = ntohl(th->seq) + len;
+
+	if (unlikely(th->syn || th->fin))
+		ack_seq++;
+
+	return ack_seq;
+}
+
+static void rt_tcp_keepalive_start(struct tcp_socket *ts)
+{
+	if (ts->tcp_state == TCP_ESTABLISHED) {
+		rtdm_timer_start(&ts->keepalive.timer, rt_tcp_keepalive_timeout,
+				 0, RTDM_TIMERMODE_RELATIVE);
+	}
+}
+
+static void rt_tcp_keepalive_stop(struct tcp_socket *ts)
+{
+	if (ts->tcp_state == TCP_ESTABLISHED) {
+		rtdm_timer_stop(&ts->keepalive.timer);
+	}
+}
+
+#ifdef YET_UNUSED
+static void rt_tcp_keepalive_timer(rtdm_timer_t *timer);
+
+static void rt_tcp_keepalive_enable(struct tcp_socket *ts)
+{
+	rtdm_lockctx_t context;
+	struct tcp_keepalive *keepalive;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	keepalive = &ts->keepalive;
+
+	if (keepalive->enabled) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	keepalive->probes = rt_tcp_keepalive_probes;
+
+	rtdm_timer_init(&keepalive->timer, rt_tcp_keepalive_timer,
+			"RT TCP keepalive timer");
+
+	rt_tcp_keepalive_start(ts);
+
+	keepalive->enabled = 1;
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+}
+#endif
+
+static void rt_tcp_keepalive_disable(struct tcp_socket *ts)
+{
+	struct tcp_keepalive *keepalive;
+
+	keepalive = &ts->keepalive;
+
+	if (!keepalive->enabled) {
+		return;
+	}
+
+	rt_tcp_keepalive_stop(ts);
+	rtdm_timer_destroy(&keepalive->timer);
+
+	keepalive->enabled = 0;
+}
+
+static void rt_tcp_keepalive_feed(struct tcp_socket *ts)
+{
+	rtdm_lockctx_t context;
+	struct tcp_keepalive *keepalive;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	keepalive = &ts->keepalive;
+
+	if (ts->tcp_state == TCP_ESTABLISHED && ts->keepalive.enabled) {
+		keepalive->probes = rt_tcp_keepalive_probes;
+
+		/* Restart keepalive timer */
+		rtdm_timer_stop(&keepalive->timer);
+		rtdm_timer_start(&keepalive->timer, rt_tcp_keepalive_timeout, 0,
+				 RTDM_TIMERMODE_RELATIVE);
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	} else {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	}
+}
+
+static int rt_tcp_socket_invalidate(struct tcp_socket *ts, u8 to_state)
+{
+	int signal = ts->is_valid;
+
+	ts->tcp_state = to_state;
+
+	/*
+      multiple invalidation could happen without fuss,
+      see rt_tcp_close(), rt_tcp_rcv(), timeout expiration etc.
+    */
+	if (ts->is_valid) {
+		ts->is_valid = 0;
+
+		if (ts->keepalive.enabled) {
+			rt_tcp_keepalive_stop(ts);
+		}
+	}
+
+	return signal;
+}
+
+static void rt_tcp_socket_invalidate_signal(struct tcp_socket *ts)
+{
+	/* awake all readers and writers destroying events */
+	rtdm_sem_destroy(&ts->sock.pending_sem);
+	rtdm_event_destroy(&ts->send_evt);
+}
+
+static void rt_tcp_socket_validate(struct tcp_socket *ts)
+{
+	ts->tcp_state = TCP_ESTABLISHED;
+
+	ts->is_valid = 1;
+
+	if (ts->keepalive.enabled) {
+		rt_tcp_keepalive_start(ts);
+	}
+
+	rtdm_event_init(&ts->send_evt, 0);
+}
+
+/***
+ *  rt_tcp_retransmit_handler - timerwheel handler to process a retransmission
+ *  @data: pointer to a rttcp socket structure
+ */
+static void rt_tcp_retransmit_handler(void *data)
+{
+	struct tcp_socket *ts = (struct tcp_socket *)data;
+	struct rtskb *skb;
+	rtdm_lockctx_t context;
+	int signal;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (unlikely(rtskb_queue_empty(&ts->retransmit_queue))) {
+		/* handled, but retransmission queue is empty */
+		rtdm_lock_get_irqsave(&ts->socket_lock, context);
+		rtdm_printk("rttcp: bug in RT TCP retransmission routine\n");
+		return;
+	}
+
+	if (ts->tcp_state == TCP_CLOSE) {
+		/* socket is already closed */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	if (ts->timer_state) {
+		/* more tries */
+		ts->timer_state--;
+		timerwheel_add_timer(&ts->timer, rt_tcp_retransmission_timeout);
+
+		/* warning, rtskb_clone is under lock */
+		skb = rtskb_clone(ts->retransmit_queue.first,
+				  &ts->sock.skb_pool);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		/* BUG, window changes are not respected */
+		if (unlikely(rtdev_xmit(skb)) != 0) {
+			kfree_rtskb(skb);
+			rtdm_printk(
+				"rttcp: packet retransmission from timer failed\n");
+		}
+	} else {
+		ts->timer_state = max_retransmits;
+
+		/* report about connection lost */
+		signal = rt_tcp_socket_invalidate(ts, TCP_CLOSE);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		if (signal)
+			rt_tcp_socket_invalidate_signal(ts);
+
+		/* retransmission queue will be cleaned up in rt_tcp_socket_destruct */
+		rtdm_printk("rttcp: connection is lost by NACK timeout\n");
+	}
+}
+
+/***
+ *  rt_tcp_retransmit_ack - remove skbs from retransmission queue on ACK
+ *  @ts: rttcp socket
+ *  @ack_seq: received ACK sequence value
+ */
+static void rt_tcp_retransmit_ack(struct tcp_socket *ts, u32 ack_seq)
+{
+	struct rtskb *skb;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	/*
+      ACK, but retransmission queue is empty
+      This could happen on repeated ACKs
+    */
+	if (rtskb_queue_empty(&ts->retransmit_queue)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	/*
+      Check ts->nacked_first value firstly to ensure that
+      skb for retransmission is present in the queue, otherwise
+      retransmission queue will be drained completely
+    */
+	if (!rt_tcp_before(ts->nacked_first, ack_seq)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	if (timerwheel_remove_timer(&ts->timer) != 0) {
+		/* already timed out */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+dequeue_loop:
+	if (ts->tcp_state == TCP_CLOSE) {
+		/* warn about queue safety in race with anyone,
+	   who closes the socket */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	if ((skb = __rtskb_dequeue(&ts->retransmit_queue)) == NULL) {
+		ts->timer_state = max_retransmits;
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return;
+	}
+
+	if (rt_tcp_before(ts->nacked_first, ack_seq)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		kfree_rtskb(skb);
+		rtdm_lock_get_irqsave(&ts->socket_lock, context);
+		goto dequeue_loop;
+	}
+
+	/* Put NACKed skb back to queue */
+	/* BUG, need to respect half-acknowledged packets */
+	ts->nacked_first = ntohl(skb->h.th->seq) + 1;
+
+	__rtskb_queue_head(&ts->retransmit_queue, skb);
+
+	/* Have more packages in retransmission queue, restart the timer */
+	timerwheel_add_timer(&ts->timer, rt_tcp_retransmission_timeout);
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+}
+
+/***
+ *  rt_tcp_retransmit_send - enqueue a skb to retransmission queue (not locked)
+ *  @ts: rttcp socket
+ *  @skb: a copied skb for enqueueing
+ */
+static void rt_tcp_retransmit_send(struct tcp_socket *ts, struct rtskb *skb)
+{
+	if (rtskb_queue_empty(&ts->retransmit_queue)) {
+		/* retransmission queue is empty */
+		ts->nacked_first = ntohl(skb->h.th->seq) + 1;
+
+		__rtskb_queue_tail(&ts->retransmit_queue, skb);
+
+		timerwheel_add_timer(&ts->timer, rt_tcp_retransmission_timeout);
+	} else {
+		/* retransmission queue is not empty */
+		__rtskb_queue_tail(&ts->retransmit_queue, skb);
+	}
+}
+
+static int rt_ip_build_frame(struct rtskb *skb, struct rtsocket *sk,
+			     struct dest_route *rt, struct iphdr *iph)
+{
+	int ret;
+	struct rtnet_device *rtdev = rt->rtdev;
+
+	RTNET_ASSERT(rtdev->hard_header, return -EBADF;);
+
+	if (!rtdev_reference(rt->rtdev))
+		return -EIDRM;
+
+	iph->ihl = 5; /* 20 byte header only - no TCP options */
+
+	skb->nh.iph = iph;
+
+	iph->version = 4;
+	iph->tos = sk->prot.inet.tos;
+	iph->tot_len = htons(skb->len); /* length of IP header and IP payload */
+	iph->id = htons(0x00); /* zero IP frame id */
+	iph->frag_off = htons(IP_DF); /* and no more frames */
+	iph->ttl = 255;
+	iph->protocol = sk->protocol;
+	iph->saddr = rtdev->local_ip;
+	iph->daddr = rt->ip;
+	iph->check = 0; /* required to compute correct checksum */
+	iph->check = ip_fast_csum((u8 *)iph, 5 /*iph->ihl*/);
+
+	ret = rtdev->hard_header(skb, rtdev, ETH_P_IP, rt->dev_addr,
+				 rtdev->dev_addr, skb->len);
+	rtdev_dereference(rt->rtdev);
+
+	if (ret != rtdev->hard_header_len) {
+		rtdm_printk("rttcp: rt_ip_build_frame: error on lower level\n");
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void rt_tcp_build_header(struct tcp_socket *ts, struct rtskb *skb,
+				__be32 flags, u8 is_keepalive)
+{
+	u32 wcheck;
+	u8 tcphdrlen = 20;
+	u8 iphdrlen = 20;
+	struct tcphdr *th;
+
+	th = skb->h.th;
+	th->source = ts->sport;
+	th->dest = ts->dport;
+
+	th->seq = htonl(ts->sync.seq);
+
+	if (unlikely(is_keepalive))
+		th->seq--;
+
+	tcp_flag_word(th) = flags;
+	th->ack_seq = htonl(ts->sync.ack_seq);
+	th->window = htons(ts->sync.window);
+
+	th->doff = tcphdrlen >> 2; /* No options for now */
+	th->res1 = 0;
+	th->check = 0;
+	th->urg_ptr = 0;
+
+	/* compute checksum */
+	wcheck = rtnet_csum(th, tcphdrlen, 0);
+
+	if (skb->len - tcphdrlen - iphdrlen) {
+		wcheck = rtnet_csum(skb->data + tcphdrlen + iphdrlen,
+				      skb->len - tcphdrlen - iphdrlen, wcheck);
+	}
+
+	th->check =
+		tcp_v4_check(skb->len - iphdrlen, ts->saddr, ts->daddr, wcheck);
+}
+
+static int rt_tcp_segment(struct dest_route *rt, struct tcp_socket *ts,
+			  __be32 flags, u32 data_len, u8 *data_ptr,
+			  u8 is_keepalive)
+{
+	struct tcphdr *th;
+	struct rtsocket *sk = &ts->sock;
+	struct rtnet_device *rtdev = rt->rtdev;
+	struct rtskb *skb;
+	struct iphdr *iph;
+	struct rtskb *cloned_skb;
+	rtdm_lockctx_t context;
+
+	int ret;
+
+	u32 hh_len = (rtdev->hard_header_len + 15) & ~15;
+	u32 prio = (volatile unsigned int)sk->priority;
+	u32 mtu = rtdev->get_mtu(rtdev, prio);
+
+	u8 *data = NULL;
+
+	if ((skb = alloc_rtskb(mtu + hh_len + 15, &sk->skb_pool)) == NULL) {
+		rtdm_printk(
+			"rttcp: no more elements in skb_pool for allocation\n");
+		return -ENOBUFS;
+	}
+
+	/* rtskb_reserve(skb, hh_len + 20); */
+	rtskb_reserve(skb, hh_len);
+
+	iph = (struct iphdr *)rtskb_put(skb, 20); /* length of IP header */
+	skb->nh.iph = iph;
+
+	th = (struct tcphdr *)rtskb_put(skb, 20); /* length of TCP header */
+	skb->h.th = th;
+
+	if (data_len) { /* check for available place */
+		data = (u8 *)rtskb_put(skb,
+				       data_len); /* length of TCP payload */
+		if (!memcpy(data, (void *)data_ptr, data_len)) {
+			ret = -EFAULT;
+			goto error;
+		}
+	}
+
+	/* used local phy MTU value */
+	if (data_len > mtu)
+		data_len = mtu;
+
+	skb->rtdev = rtdev;
+	skb->priority = prio;
+
+	/* do not validate socket connection on xmit
+       this should be done at upper level */
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	rt_tcp_build_header(ts, skb, flags, is_keepalive);
+
+	if ((ret = rt_ip_build_frame(skb, sk, rt, iph)) != 0) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		goto error;
+	}
+
+	/* add rtskb entry to the socket retransmission queue */
+	if (ts->tcp_state != TCP_CLOSE &&
+	    ((flags & (TCP_FLAG_SYN | TCP_FLAG_FIN)) || data_len)) {
+		/* rtskb_clone below is called under lock, this is an admission,
+	   because for now there is no rtskb copy by reference */
+		cloned_skb = rtskb_clone(skb, &ts->sock.skb_pool);
+		if (!cloned_skb) {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rtdm_printk("rttcp: cann't clone skb\n");
+			ret = -ENOMEM;
+			goto error;
+		}
+
+		rt_tcp_retransmit_send(ts, cloned_skb);
+	}
+
+	/* need to update sync here, because it is safe way in
+       comparison with races on fast ACK response */
+	if (flags & (TCP_FLAG_FIN | TCP_FLAG_SYN))
+		ts->sync.seq++;
+
+	ts->sync.seq += data_len;
+	ts->sync.dst_window -= data_len;
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	/* ignore return value from rtdev_xmit */
+	/* the packet was enqueued and on error will be retransmitted later */
+	/* on critical error after retransmission timeout the connection will
+       be closed by connection lost */
+	rtdev_xmit(skb);
+
+	return data_len;
+
+error:
+	kfree_rtskb(skb);
+	return ret;
+}
+
+static int rt_tcp_send(struct tcp_socket *ts, __be32 flags)
+{
+	struct dest_route rt;
+	int ret;
+
+	/*
+     * We may not have a route yet during setup. But once it is set, it stays
+     * until the socket died.
+     */
+	if (likely(ts->rt.rtdev)) {
+		ret = rt_tcp_segment(&ts->rt, ts, flags, 0, NULL, 0);
+	} else {
+		ret = rt_ip_route_output(&rt, ts->daddr, ts->saddr);
+		if (ret == 0) {
+			ret = rt_tcp_segment(&rt, ts, flags, 0, NULL, 0);
+			rtdev_dereference(rt.rtdev);
+		}
+	}
+	if (ret < 0)
+		rtdm_printk("rttcp: can't send a packet: err %d\n", -ret);
+	return ret;
+}
+
+#ifdef YET_UNUSED
+static void rt_tcp_keepalive_timer(rtdm_timer_t *timer)
+{
+	rtdm_lockctx_t context;
+	struct tcp_keepalive *keepalive =
+		container_of(timer, struct tcp_keepalive, timer);
+
+	struct tcp_socket *ts =
+		container_of(keepalive, struct tcp_socket, keepalive);
+	int signal = 0;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (keepalive->probes) {
+		/* Send a probe */
+		if (rt_tcp_segment(&ts->rt, ts, 0, 0, NULL, 1) < 0) {
+			/* data receiving and sending is not possible anymore */
+			signal = rt_tcp_socket_invalidate(ts, TCP_TIME_WAIT);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		}
+
+		keepalive->probes--;
+		rtdm_timer_start_in_handler(&keepalive->timer,
+					    rt_tcp_keepalive_intvl, 0,
+					    RTDM_TIMERMODE_RELATIVE);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	} else {
+		/* data receiving and sending is not possible anymore */
+
+		signal = rt_tcp_socket_invalidate(ts, TCP_TIME_WAIT);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	}
+
+	if (signal)
+		rt_tcp_socket_invalidate_signal(ts);
+}
+#endif
+
+static inline u32 rt_tcp_initial_seq(void)
+{
+	uint64_t clock_val = rtdm_clock_read_monotonic();
+	return (u32)(clock_val ^ (clock_val >> 32));
+}
+
+/***
+ *  rt_tcp_dest_socket
+ */
+static struct rtsocket *rt_tcp_dest_socket(struct rtskb *skb)
+{
+	struct tcphdr *th = skb->h.th;
+
+	u32 saddr = skb->nh.iph->saddr;
+	u32 daddr = skb->nh.iph->daddr;
+	u32 sport = th->source;
+	u32 dport = th->dest;
+
+	u32 data_len;
+
+	if (tcp_v4_check(skb->len, saddr, daddr,
+			 rtnet_csum(skb->data, skb->len, 0))) {
+		rtdm_printk("rttcp: invalid TCP packet checksum, dropped\n");
+		return NULL; /* Invalid checksum, drop the packet */
+	}
+
+	/* find the destination socket */
+	if ((skb->sk = rt_tcp_v4_lookup(daddr, dport)) == NULL) {
+		/*
+	  rtdm_printk("Not found addr:0x%08x, port: 0x%04x\n", daddr, dport);
+	*/
+		if (!th->rst) {
+			/* No listening socket found, send RST|ACK */
+			rst_socket.saddr = daddr;
+			rst_socket.daddr = saddr;
+			rst_socket.sport = dport;
+			rst_socket.dport = sport;
+
+			data_len = skb->len - (th->doff << 2);
+
+			rst_socket.sync.seq = 0;
+			rst_socket.sync.ack_seq =
+				rt_tcp_compute_ack_seq(th, data_len);
+
+			if (rt_ip_route_output(&rst_socket.rt, daddr, saddr) ==
+			    0) {
+				rt_socket_reference(&rst_socket.sock);
+				rt_tcp_send(&rst_socket,
+					    TCP_FLAG_ACK | TCP_FLAG_RST);
+				rtdev_dereference(rst_socket.rt.rtdev);
+			}
+		}
+	}
+
+	return skb->sk;
+}
+
+static void rt_tcp_window_update(struct tcp_socket *ts, u16 window)
+{
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (ts->sync.dst_window) {
+		ts->sync.dst_window = window;
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		if (!window) {
+			/* clear send event status */
+			rtdm_event_clear(&ts->send_evt);
+		}
+	} else {
+		if (window) {
+			ts->sync.dst_window = window;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			/* set send event status */
+			rtdm_event_signal(&ts->send_evt);
+		} else {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		}
+	}
+}
+
+/***
+ *  rt_tcp_rcv
+ */
+static void rt_tcp_rcv(struct rtskb *skb)
+{
+	rtdm_lockctx_t context;
+	struct tcp_socket *ts;
+	struct tcphdr *th = skb->h.th;
+	unsigned int data_len = skb->len - (th->doff << 2);
+	u32 seq = ntohl(th->seq);
+	int signal;
+
+	ts = container_of(skb->sk, struct tcp_socket, sock);
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+	if (ts->error_rate > 0) {
+		if ((ts->packet_counter++ % error_rate) < ts->multi_error) {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto drop;
+		}
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION */
+
+	/* Check for daddr/dport correspondence to values stored in
+       selected socket from hash */
+	if (ts->tcp_state != TCP_LISTEN && (ts->daddr != skb->nh.iph->saddr ||
+					    ts->dport != skb->h.th->source)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		goto drop;
+	}
+
+	/* Check if it is a keepalive probe */
+	if (ts->sync.ack_seq == (seq + 1) && ts->tcp_state == TCP_ESTABLISHED) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rt_tcp_send(ts, TCP_FLAG_ACK);
+		goto feed;
+	}
+
+	if (ts->tcp_state == TCP_SYN_SENT) {
+		ts->sync.ack_seq = rt_tcp_compute_ack_seq(th, data_len);
+
+		if (th->syn && th->ack) {
+			rt_tcp_socket_validate(ts);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rtdm_event_signal(&ts->conn_evt);
+			/* Send ACK */
+			rt_tcp_send(ts, TCP_FLAG_ACK);
+			goto feed;
+		}
+
+		ts->tcp_state = TCP_CLOSE;
+		ts->sync.seq = ntohl(th->ack_seq);
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		/* Send RST|ACK */
+		rtdm_event_signal(&ts->conn_evt);
+		rt_tcp_send(ts, TCP_FLAG_RST | TCP_FLAG_ACK);
+		goto drop;
+	}
+
+	/* Check for SEQ correspondence to determine the connection relevance */
+
+	/* OR-list of conditions to be satisfied:
+     *
+     * th->ack && rt_tcp_after(ts->nacked_first, ntohl(th->ack_seq))
+     * th->ack && th->rst && ...
+     * th->syn && (ts->tcp_state == TCP_LISTEN ||
+		   ts->tcp_state == TCP_SYN_SENT)
+     * rt_tcp_after(seq, ts->sync.ack_seq) &&
+	   rt_tcp_before(seq, ts->sync.ack_seq + ts->sync.window)
+     */
+
+	if ((rt_tcp_after(seq, ts->sync.ack_seq) &&
+	     rt_tcp_before(seq, ts->sync.ack_seq + ts->sync.window)) ||
+	    th->rst ||
+	    (th->syn &&
+	     (ts->tcp_state == TCP_LISTEN || ts->tcp_state == TCP_SYN_SENT))) {
+		/* everything is ok */
+	} else if (rt_tcp_after(seq, ts->sync.ack_seq - data_len)) {
+		/* retransmission of data we already acked */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rt_tcp_send(ts, TCP_FLAG_ACK);
+		goto drop;
+	} else {
+		/* drop forward ack */
+		if (th->ack &&
+		    /* but reset ack from old connection */
+		    ts->tcp_state == TCP_ESTABLISHED) {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rtdm_printk(
+				"rttcp: dropped unappropriate ACK packet %u\n",
+				ts->sync.ack_seq);
+			goto drop;
+		}
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rtdm_printk("rttcp: sequence number is not in window, "
+			    "dropped (failed: %u <= %u <= %u)\n",
+			    ts->sync.ack_seq, seq,
+			    ts->sync.ack_seq + ts->sync.window);
+
+		/* That's a forced RST for a lost connection */
+		rst_socket.saddr = skb->nh.iph->daddr;
+		rst_socket.daddr = skb->nh.iph->saddr;
+		rst_socket.sport = th->dest;
+		rst_socket.dport = th->source;
+
+		rst_socket.sync.seq = ntohl(th->ack_seq);
+		rst_socket.sync.ack_seq = rt_tcp_compute_ack_seq(th, data_len);
+
+		if (rt_ip_route_output(&rst_socket.rt, rst_socket.daddr,
+				       rst_socket.saddr) == 0) {
+			rt_socket_reference(&rst_socket.sock);
+			rt_tcp_send(&rst_socket, TCP_FLAG_RST | TCP_FLAG_ACK);
+			rtdev_dereference(rst_socket.rt.rtdev);
+		}
+		goto drop;
+	}
+
+	if (th->rst) {
+		if (ts->tcp_state == TCP_SYN_RECV) {
+			ts->tcp_state = TCP_LISTEN;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto drop;
+		} else {
+			/* Drop our half-open connection, peer obviously went away. */
+			signal = rt_tcp_socket_invalidate(ts, TCP_CLOSE);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+			if (signal)
+				rt_tcp_socket_invalidate_signal(ts);
+
+			goto drop;
+		}
+	}
+
+	ts->sync.ack_seq = rt_tcp_compute_ack_seq(th, data_len);
+
+	if (th->fin) {
+		if (ts->tcp_state == TCP_ESTABLISHED) {
+			/* Send ACK */
+			signal = rt_tcp_socket_invalidate(ts, TCP_CLOSE_WAIT);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+			if (signal)
+				rt_tcp_socket_invalidate_signal(ts);
+
+			rt_tcp_send(ts, TCP_FLAG_ACK);
+			goto feed;
+		} else if ((ts->tcp_state == TCP_FIN_WAIT1 && th->ack) ||
+			   ts->tcp_state == TCP_FIN_WAIT2) {
+			/* Send ACK */
+			ts->tcp_state = TCP_TIME_WAIT;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rt_tcp_send(ts, TCP_FLAG_ACK);
+			/* data receiving is not possible anymore */
+			rtdm_sem_destroy(&ts->sock.pending_sem);
+			rtdm_nrtsig_pend(&ts->close_sig);
+			goto feed;
+		} else if (ts->tcp_state == TCP_FIN_WAIT1) {
+			/* Send ACK */
+			ts->tcp_state = TCP_CLOSING;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rt_tcp_send(ts, TCP_FLAG_ACK);
+			/* data receiving is not possible anymore */
+			rtdm_sem_destroy(&ts->sock.pending_sem);
+			goto feed;
+		} else {
+			/* just drop it */
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto drop;
+		}
+	}
+
+	if (th->syn) {
+		/* Need to differentiate LISTEN socket from ESTABLISHED one */
+		/* Both of them have the same sport/saddr, but different dport/daddr */
+		/* dport is unknown if it is the first connection of n */
+
+		if (ts->tcp_state == TCP_LISTEN) {
+			/* Need to store ts->seq while sending SYN earlier */
+			/* The socket shall be in TCP_LISTEN state */
+
+			/* safe to update ts->saddr here due to a single task for
+	       rt_tcp_rcv() and rt_tcp_dest_socket() callers */
+			ts->saddr = skb->nh.iph->daddr;
+
+			ts->daddr = skb->nh.iph->saddr;
+			ts->dport = th->source;
+			ts->sync.seq = rt_tcp_initial_seq();
+			ts->sync.window = 4096;
+			ts->tcp_state = TCP_SYN_RECV;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+			/* Send SYN|ACK */
+			rt_tcp_send(ts, TCP_FLAG_SYN | TCP_FLAG_ACK);
+			goto drop;
+		}
+
+		/* Send RST|ACK */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rt_tcp_send(ts, TCP_FLAG_RST | TCP_FLAG_ACK);
+		goto drop;
+	}
+
+	/* ACK received without SYN, FIN or RST flags */
+	if (th->ack) {
+		/* Check ack sequence */
+		if (rt_tcp_before(ts->sync.seq + 1, ntohl(th->ack_seq))) {
+			rtdm_printk("rttcp: unexpected ACK %u %u %u\n",
+				    ts->sync.seq, ts->nacked_first,
+				    ntohl(th->ack_seq));
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto drop;
+		}
+
+		if (ts->tcp_state == TCP_LAST_ACK) {
+			/* close connection and free socket data */
+			ts->tcp_state = TCP_CLOSE;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			/* socket destruction will be done on close() */
+			rtdm_nrtsig_pend(&ts->close_sig);
+			goto drop;
+		} else if (ts->tcp_state == TCP_FIN_WAIT1) {
+			ts->tcp_state = TCP_FIN_WAIT2;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			goto feed;
+		} else if (ts->tcp_state == TCP_SYN_RECV) {
+			rt_tcp_socket_validate(ts);
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rtdm_event_signal(&ts->conn_evt);
+			goto feed;
+		} else if (ts->tcp_state == TCP_CLOSING) {
+			ts->tcp_state = TCP_TIME_WAIT;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			/* socket destruction will be done on close() */
+			rtdm_nrtsig_pend(&ts->close_sig);
+			goto feed;
+		}
+	}
+
+	if (ts->tcp_state != TCP_ESTABLISHED) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		goto drop;
+	}
+
+	if (data_len == 0) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		goto feed;
+	}
+
+	/* Send ACK */
+	ts->sync.window -= data_len;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	rt_tcp_send(ts, TCP_FLAG_ACK);
+
+	rtskb_queue_tail(&skb->sk->incoming, skb);
+	rtdm_sem_up(&ts->sock.pending_sem);
+
+	/* inform retransmission subsystem about arrived ack */
+	if (th->ack) {
+		rt_tcp_retransmit_ack(ts, ntohl(th->ack_seq));
+	}
+
+	rt_tcp_keepalive_feed(ts);
+	rt_tcp_window_update(ts, ntohs(th->window));
+
+	return;
+
+feed:
+	/* inform retransmission subsystem about arrived ack */
+	if (th->ack) {
+		rt_tcp_retransmit_ack(ts, ntohl(th->ack_seq));
+	}
+
+	rt_tcp_keepalive_feed(ts);
+	rt_tcp_window_update(ts, ntohs(th->window));
+
+drop:
+	kfree_rtskb(skb);
+	return;
+}
+
+/***
+ *  rt_tcp_rcv_err
+ */
+static void rt_tcp_rcv_err(struct rtskb *skb)
+{
+	rtdm_printk("rttcp: rt_tcp_rcv err\n");
+}
+
+static int rt_tcp_window_send(struct tcp_socket *ts, u32 data_len, u8 *data_ptr)
+{
+	u32 dst_window = ts->sync.dst_window;
+	int ret;
+
+	if (data_len > dst_window)
+		data_len = dst_window;
+
+	if ((ret = rt_tcp_segment(&ts->rt, ts, TCP_FLAG_ACK, data_len, data_ptr,
+				  0)) < 0) {
+		rtdm_printk("rttcp: cann't send a packet: err %d\n", -ret);
+		return ret;
+	}
+
+	return ret;
+}
+
+static void rt_tcp_close_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg)
+{
+	complete_all((struct completion *)arg);
+}
+
+static int rt_tcp_socket_create(struct tcp_socket *ts)
+{
+	rtdm_lockctx_t context;
+	int i;
+	int index;
+	struct rtsocket *sock = &ts->sock;
+
+	sock->prot.inet.saddr = INADDR_ANY;
+	sock->prot.inet.state = TCP_CLOSE;
+	sock->prot.inet.tos = 0;
+	/*
+      rtdm_printk("rttcp: rt_tcp_socket_create 0x%p\n", ts);
+    */
+	rtdm_lock_init(&ts->socket_lock);
+
+	ts->rt.rtdev = NULL;
+
+	ts->tcp_state = TCP_CLOSE;
+
+	ts->is_accepting = 0;
+	ts->is_accepted = 0;
+	ts->is_binding = 0;
+	ts->is_bound = 0;
+	ts->is_valid = 0;
+	ts->is_closed = 0;
+
+	ts->sk_sndtimeo = RTDM_TIMEOUT_INFINITE;
+
+	rtdm_event_init(&ts->conn_evt, 0);
+
+	ts->keepalive.enabled = 0;
+
+	ts->timer_state = max_retransmits;
+	timerwheel_init_timer(&ts->timer, rt_tcp_retransmit_handler, ts);
+	rtskb_queue_init(&ts->retransmit_queue);
+
+	init_completion(&ts->fin_handshake);
+	rtdm_nrtsig_init(&ts->close_sig, rt_tcp_close_signal_handler,
+			 &ts->fin_handshake);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION
+	ts->packet_counter = counter_start;
+	ts->error_rate = error_rate;
+	ts->multi_error = multi_error;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4_TCP_ERROR_INJECTION */
+
+	rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+
+	/* enforce maximum number of TCP sockets */
+	if (free_ports == 0) {
+		rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+		rtdm_nrtsig_destroy(&ts->close_sig);
+		return -EAGAIN;
+	}
+	free_ports--;
+
+	/* find free auto-port in bitmap */
+	for (i = 0; i < RT_PORT_BITMAP_WORDS; i++)
+		if (port_bitmap[i] != (unsigned long)-1)
+			break;
+	index = ffz(port_bitmap[i]);
+	set_bit(index, &port_bitmap[i]);
+	index += i * 32;
+	sock->prot.inet.reg_index = index;
+	sock->prot.inet.sport = index + tcp_auto_port_start;
+
+	/* register TCP socket */
+	port_registry[index] = ts;
+	port_hash_insert(ts, INADDR_ANY, sock->prot.inet.sport);
+
+	rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+	return 0;
+}
+
+/***
+ *  rt_tcp_socket - create a new TCP-Socket
+ *  @s: socket
+ */
+static int rt_tcp_socket(struct rtdm_fd *fd)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	int ret;
+
+	if ((ret = rt_socket_init(fd, IPPROTO_TCP)) != 0)
+		return ret;
+
+	if ((ret = rt_tcp_socket_create(ts)) != 0)
+		rt_socket_cleanup(fd);
+
+	return ret;
+}
+
+static int rt_tcp_dispatched_packet_send(struct rt_proc_call *call)
+{
+	int ret;
+	struct rt_tcp_dispatched_packet_send_cmd *cmd;
+
+	cmd = rtpc_get_priv(call, struct rt_tcp_dispatched_packet_send_cmd);
+	ret = rt_tcp_send(cmd->ts, cmd->flags);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_socket_destruct
+ *  this function requires non realtime context
+ */
+static void rt_tcp_socket_destruct(struct tcp_socket *ts)
+{
+	rtdm_lockctx_t context;
+	struct rtskb *skb;
+	int index;
+	int signal;
+	struct rtsocket *sock = &ts->sock;
+
+	/*
+      rtdm_printk("rttcp: rt_tcp_socket_destruct 0x%p\n", ts);
+    */
+
+	rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+	if (sock->prot.inet.reg_index >= 0) {
+		index = sock->prot.inet.reg_index;
+
+		clear_bit(index % BITS_PER_LONG,
+			  &port_bitmap[index / BITS_PER_LONG]);
+		port_hash_del(port_registry[index]);
+		free_ports++;
+		sock->prot.inet.reg_index = -1;
+	}
+	rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	signal = rt_tcp_socket_invalidate(ts, TCP_CLOSE);
+
+	rt_tcp_keepalive_disable(ts);
+
+	sock->prot.inet.state = TCP_CLOSE;
+
+	/* dereference rtdev */
+	if (ts->rt.rtdev != NULL) {
+		rtdev_dereference(ts->rt.rtdev);
+		ts->rt.rtdev = NULL;
+	}
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	if (signal)
+		rt_tcp_socket_invalidate_signal(ts);
+
+	rtdm_event_destroy(&ts->conn_evt);
+
+	rtdm_nrtsig_destroy(&ts->close_sig);
+
+	/* cleanup already collected fragments */
+	rt_ip_frag_invalidate_socket(sock);
+
+	/* free packets in incoming queue */
+	while ((skb = rtskb_dequeue(&sock->incoming)) != NULL)
+		kfree_rtskb(skb);
+
+	/* ensure that the timer is no longer running */
+	timerwheel_remove_timer_sync(&ts->timer);
+
+	/* free packets in retransmission queue */
+	while ((skb = __rtskb_dequeue(&ts->retransmit_queue)) != NULL)
+		kfree_rtskb(skb);
+}
+
+/***
+ *  rt_tcp_close
+ */
+static void rt_tcp_close(struct rtdm_fd *fd)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	struct rt_tcp_dispatched_packet_send_cmd send_cmd;
+	rtdm_lockctx_t context;
+	int signal = 0;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	ts->is_closed = 1;
+
+	if (ts->tcp_state == TCP_ESTABLISHED || ts->tcp_state == TCP_SYN_RECV) {
+		/* close() from ESTABLISHED */
+		send_cmd.ts = ts;
+		send_cmd.flags = TCP_FLAG_FIN | TCP_FLAG_ACK;
+		signal = rt_tcp_socket_invalidate(ts, TCP_FIN_WAIT1);
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		rtpc_dispatch_call(rt_tcp_dispatched_packet_send, 0, &send_cmd,
+				   sizeof(send_cmd), NULL, NULL);
+		/* result is ignored */
+
+		/* Give the peer some time to reply to our FIN.
+		   Since it is not relevant what exactly causes the wait
+		   function to return its result is ignored. */
+		wait_for_completion_interruptible_timeout(&ts->fin_handshake,
+					      msecs_to_jiffies(close_timeout));
+	} else if (ts->tcp_state == TCP_CLOSE_WAIT) {
+		/* Send FIN in CLOSE_WAIT */
+		send_cmd.ts = ts;
+		send_cmd.flags = TCP_FLAG_FIN | TCP_FLAG_ACK;
+		signal = rt_tcp_socket_invalidate(ts, TCP_LAST_ACK);
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		rtpc_dispatch_call(rt_tcp_dispatched_packet_send, 0, &send_cmd,
+				   sizeof(send_cmd), NULL, NULL);
+		/* result is ignored */
+
+		/* Give the peer some time to reply to our FIN.
+		   Since it is not relevant what exactly causes the wait
+		   function to return its result is ignored. */
+		wait_for_completion_interruptible_timeout(&ts->fin_handshake,
+					      msecs_to_jiffies(close_timeout));
+	} else {
+		/*
+	  rt_tcp_socket_validate() has not been called at all,
+	  hence socket state is TCP_SYN_SENT or TCP_LISTEN,
+	  or socket is in one of close states,
+	  hence rt_tcp_socket_invalidate() was called,
+	  but close() is called at first time
+	*/
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+	}
+
+	if (signal)
+		rt_tcp_socket_invalidate_signal(ts);
+
+	rt_tcp_socket_destruct(ts);
+
+	rt_socket_cleanup(fd);
+}
+
+/***
+ *  rt_tcp_bind - bind socket to local address
+ *  @s:     socket
+ *  @addr:  local address
+ */
+static int rt_tcp_bind(struct rtdm_fd *fd, struct tcp_socket *ts,
+		       const struct sockaddr __user *addr, socklen_t addrlen)
+{
+	struct sockaddr_in *usin, _usin;
+	rtdm_lockctx_t context;
+	int index;
+	int bound = 0;
+	int ret = 0;
+
+	usin = rtnet_get_arg(fd, &_usin, addr, sizeof(_usin));
+	if (IS_ERR(usin))
+		return PTR_ERR(usin);
+
+	if ((addrlen < (int)sizeof(struct sockaddr_in)) ||
+	    ((usin->sin_port & tcp_auto_port_mask) == tcp_auto_port_start))
+		return -EINVAL;
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	if (ts->tcp_state != TCP_CLOSE || ts->is_bound || ts->is_binding) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EINVAL;
+	}
+
+	ts->is_binding = 1;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+
+	if ((index = ts->sock.prot.inet.reg_index) < 0) {
+		/* socket is destroyed */
+		ret = -EBADF;
+		goto unlock_out;
+	}
+
+	port_hash_del(ts);
+	if (port_hash_insert(ts, usin->sin_addr.s_addr,
+			     usin->sin_port ?: index + tcp_auto_port_start)) {
+		port_hash_insert(ts, ts->saddr, ts->sport);
+
+		ret = -EADDRINUSE;
+		goto unlock_out;
+	}
+
+	bound = 1;
+
+unlock_out:
+	rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	ts->is_bound = bound;
+	ts->is_binding = 0;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_connect
+ */
+static int rt_tcp_connect(struct rtdm_fd *fd, struct tcp_socket *ts,
+			  const struct sockaddr __user *serv_addr,
+			  socklen_t addrlen)
+{
+	struct sockaddr_in *usin, _usin;
+	struct dest_route rt;
+	rtdm_lockctx_t context;
+	int ret;
+
+	if (addrlen < (int)sizeof(struct sockaddr_in))
+		return -EINVAL;
+
+	usin = rtnet_get_arg(fd, &_usin, serv_addr, sizeof(_usin));
+	if (IS_ERR(usin))
+		return PTR_ERR(usin);
+
+	if (usin->sin_family != AF_INET)
+		return -EAFNOSUPPORT;
+
+	ret = rt_ip_route_output(&rt, usin->sin_addr.s_addr, ts->saddr);
+	if (ret < 0) {
+		/* no route to host */
+		return -ENETUNREACH;
+	}
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (ts->is_closed) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		ret = -EBADF;
+		goto err_deref;
+	}
+
+	if (ts->tcp_state != TCP_CLOSE || ts->is_binding) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		ret = -EINVAL;
+		goto err_deref;
+	}
+
+	if (ts->rt.rtdev == NULL)
+		memcpy(&ts->rt, &rt, sizeof(rt));
+	else
+		rtdev_dereference(rt.rtdev);
+
+	ts->saddr = rt.rtdev->local_ip;
+
+	ts->daddr = usin->sin_addr.s_addr;
+	ts->dport = usin->sin_port;
+
+	ts->sync.seq = rt_tcp_initial_seq();
+	ts->sync.ack_seq = 0;
+	ts->sync.window = 4096;
+	ts->sync.dst_window = 0;
+
+	ts->tcp_state = TCP_SYN_SENT;
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	/* Complete three-way handshake */
+	ret = rt_tcp_send(ts, TCP_FLAG_SYN);
+	if (ret < 0) {
+		rtdm_printk("rttcp: cann't send SYN\n");
+		return ret;
+	}
+
+	ret = rtdm_event_timedwait(&ts->conn_evt, rt_tcp_connection_timeout,
+				   NULL);
+	if (unlikely(ret < 0))
+		switch (ret) {
+		case -EWOULDBLOCK:
+		case -ETIMEDOUT:
+		case -EINTR:
+			return ret;
+
+		default:
+			return -EBADF;
+		}
+
+	if (ts->tcp_state == TCP_SYN_SENT) {
+		/* received conn_evt, but connection is not established */
+		return -ECONNREFUSED;
+	}
+
+	return ret;
+
+err_deref:
+	rtdev_dereference(rt.rtdev);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_listen
+ */
+static int rt_tcp_listen(struct tcp_socket *ts, unsigned long backlog)
+{
+	int ret;
+	rtdm_lockctx_t context;
+
+	/* Ignore backlog value, maximum number of queued connections is 1 */
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	if (ts->is_closed) {
+		ret = -EBADF;
+		goto unlock_out;
+	}
+
+	if (ts->tcp_state != TCP_CLOSE || ts->is_binding) {
+		ret = -EINVAL;
+		goto unlock_out;
+	}
+
+	ts->tcp_state = TCP_LISTEN;
+	ret = 0;
+
+unlock_out:
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_accept
+ */
+static int rt_tcp_accept(struct rtdm_fd *fd, struct tcp_socket *ts,
+			 struct sockaddr *addr, socklen_t __user *addrlen)
+{
+	/* Return sockaddr, but bind it with rt_socket_init, so it would be
+       possible to read/write from it in future, return valid file descriptor */
+
+	int ret;
+	socklen_t *uaddrlen, _uaddrlen;
+	struct sockaddr_in sin;
+	nanosecs_rel_t timeout = ts->sock.timeout;
+	rtdm_lockctx_t context;
+	struct dest_route rt;
+
+	uaddrlen = rtnet_get_arg(fd, &_uaddrlen, addrlen, sizeof(_uaddrlen));
+	if (IS_ERR(uaddrlen))
+		return PTR_ERR(uaddrlen);
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+	if (ts->is_accepting || ts->is_accepted) {
+		/* socket is already accepted or is accepting a connection right now */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EALREADY;
+	}
+
+	if (ts->tcp_state != TCP_LISTEN ||
+	    *uaddrlen < sizeof(struct sockaddr_in)) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EINVAL;
+	}
+
+	ts->is_accepting = 1;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	ret = rtdm_event_timedwait(&ts->conn_evt, timeout, NULL);
+
+	if (unlikely(ret < 0))
+		switch (ret) {
+		case -ETIMEDOUT:
+		case -EINTR:
+			goto err;
+
+		default:
+			ret = -EBADF;
+			goto err;
+		}
+
+	/* accept() reported about connection establishment */
+	ret = rt_ip_route_output(&rt, ts->daddr, ts->saddr);
+	if (ret < 0) {
+		/* strange, no route to host, keep status quo */
+		ret = -EPROTO;
+		goto err;
+	}
+
+	if (addr) {
+		sin.sin_family = AF_INET;
+		sin.sin_port = ts->dport;
+		sin.sin_addr.s_addr = ts->daddr;
+		ret = rtnet_put_arg(fd, addr, &sin, sizeof(sin));
+		if (ret) {
+			rtdev_dereference(rt.rtdev);
+			ret = -EFAULT;
+			goto err;
+		}
+	}
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (ts->tcp_state != TCP_ESTABLISHED) {
+		/* protocol error */
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		rtdev_dereference(rt.rtdev);
+		ret = -EPROTO;
+		goto err;
+	}
+
+	if (ts->rt.rtdev == NULL)
+		memcpy(&ts->rt, &rt, sizeof(rt));
+	else
+		rtdev_dereference(rt.rtdev);
+
+	ts->is_accepted = 1;
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	ret = rtdm_fd_ufd(rt_socket_fd(&ts->sock));
+
+err:
+	/* it is not critical to leave this unlocked
+       due to single entry nature of accept() */
+	ts->is_accepting = 0;
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_shutdown
+ */
+static int rt_tcp_shutdown(struct tcp_socket *ts, unsigned long how)
+{
+	return -EOPNOTSUPP;
+}
+
+/***
+ *  rt_tcp_setsockopt
+ */
+static int rt_tcp_setsockopt(struct rtdm_fd *fd, struct tcp_socket *ts,
+			     int level, int optname, const void *optval,
+			     socklen_t optlen)
+{
+	/* uint64_t val; */
+	struct __kernel_old_timeval tv;
+	rtdm_lockctx_t context;
+
+	switch (optname) {
+	case SO_KEEPALIVE:
+		if (optlen < sizeof(unsigned int))
+			return -EINVAL;
+
+		/* commented out, because current implementation transmits
+	       keepalive probes from interrupt context */
+		/*
+	    val = *(unsigned long*)optval;
+
+	    if (val)
+		rt_tcp_keepalive_enable(ts);
+	    else
+		rt_tcp_keepalive_disable(ts);
+	    */
+		return 0;
+
+	case SO_SNDTIMEO_OLD:
+		if (optlen < sizeof(tv))
+			return -EINVAL;
+		if (rtdm_copy_from_user(fd, &tv, optval, sizeof(tv)))
+			return -EFAULT;
+		if (tv.tv_usec < 0 || tv.tv_usec >= 1000000)
+			return -EDOM;
+
+		rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+		if (tv.tv_sec < 0) {
+			ts->sk_sndtimeo = RTDM_TIMEOUT_NONE;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			return 0;
+		}
+
+		ts->sk_sndtimeo = RTDM_TIMEOUT_INFINITE;
+		if (tv.tv_sec == 0 && tv.tv_usec == 0) {
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			return 0;
+		}
+
+		if (tv.tv_sec < (MAX_SCHEDULE_TIMEOUT / 1000000000ull - 1))
+			ts->sk_sndtimeo =
+				(tv.tv_sec * 1000000 + tv.tv_usec) * 1000;
+
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+		return 0;
+
+	case SO_REUSEADDR:
+		/* to implement */
+		return -EOPNOTSUPP;
+	}
+
+	return -ENOPROTOOPT;
+}
+
+/***
+ *  rt_tcp_getsockopt
+ */
+static int rt_tcp_getsockopt(struct rtdm_fd *fd, struct tcp_socket *ts,
+			     int level, int optname, void *optval,
+			     socklen_t *optlen)
+{
+	int ret = 0;
+
+	if (*optlen < sizeof(unsigned int))
+		return -EINVAL;
+
+	switch (optname) {
+	case SO_ERROR:
+		ret = 0; /* used in nonblocking connect(), extend later */
+		break;
+
+	default:
+		ret = -ENOPROTOOPT;
+		break;
+	}
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_ioctl
+ */
+static int rt_tcp_ioctl(struct rtdm_fd *fd, unsigned int request,
+			void __user *arg)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	const struct _rtdm_setsockaddr_args *setaddr;
+	struct _rtdm_setsockaddr_args _setaddr;
+	const struct _rtdm_getsockaddr_args *getaddr;
+	struct _rtdm_getsockaddr_args _getaddr;
+	const struct _rtdm_getsockopt_args *getopt;
+	struct _rtdm_getsockopt_args _getopt;
+	const struct _rtdm_setsockopt_args *setopt;
+	struct _rtdm_setsockopt_args _setopt;
+	int in_rt;
+
+	/* fast path for common socket IOCTLs */
+	if (_IOC_TYPE(request) == RTIOC_TYPE_NETWORK)
+		return rt_socket_common_ioctl(fd, request, arg);
+
+	in_rt = rtdm_in_rt_context();
+
+	switch (request) {
+	case _RTIOC_BIND:
+		setaddr = rtnet_get_arg(fd, &_setaddr, arg, sizeof(_setaddr));
+		if (IS_ERR(setaddr))
+			return PTR_ERR(setaddr);
+		return rt_tcp_bind(fd, ts, setaddr->addr, setaddr->addrlen);
+	case _RTIOC_CONNECT:
+		if (!in_rt)
+			return -ENOSYS;
+		setaddr = rtnet_get_arg(fd, &_setaddr, arg, sizeof(_setaddr));
+		if (IS_ERR(setaddr))
+			return PTR_ERR(setaddr);
+		return rt_tcp_connect(fd, ts, setaddr->addr, setaddr->addrlen);
+
+	case _RTIOC_LISTEN:
+		return rt_tcp_listen(ts, (unsigned long)arg);
+
+	case _RTIOC_ACCEPT:
+		if (!in_rt)
+			return -ENOSYS;
+		getaddr = rtnet_get_arg(fd, &_getaddr, arg, sizeof(_getaddr));
+		if (IS_ERR(getaddr))
+			return PTR_ERR(getaddr);
+		return rt_tcp_accept(fd, ts, getaddr->addr, getaddr->addrlen);
+
+	case _RTIOC_SHUTDOWN:
+		return rt_tcp_shutdown(ts, (unsigned long)arg);
+
+	case _RTIOC_SETSOCKOPT:
+		setopt = rtnet_get_arg(fd, &_setopt, arg, sizeof(_setopt));
+		if (IS_ERR(setopt))
+			return PTR_ERR(setopt);
+
+		if (setopt->level != SOL_SOCKET)
+			break;
+
+		return rt_tcp_setsockopt(fd, ts, setopt->level, setopt->optname,
+					 setopt->optval, setopt->optlen);
+
+	case _RTIOC_GETSOCKOPT:
+		getopt = rtnet_get_arg(fd, &_getopt, arg, sizeof(_getopt));
+		if (IS_ERR(getopt))
+			return PTR_ERR(getopt);
+
+		if (getopt->level != SOL_SOCKET)
+			break;
+
+		return rt_tcp_getsockopt(fd, ts, getopt->level, getopt->optname,
+					 getopt->optval, getopt->optlen);
+	default:
+		break;
+	}
+
+	return rt_ip_ioctl(fd, request, arg);
+}
+
+/***
+ *  rt_tcp_read
+ */
+static ssize_t rt_tcp_read(struct rtdm_fd *fd, void *buf, size_t nbyte)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	struct rtsocket *sock = &ts->sock;
+
+	struct rtskb *skb;
+	struct rtskb *first_skb;
+	nanosecs_rel_t timeout = sock->timeout;
+	size_t data_len;
+	size_t th_len;
+	size_t copied = 0;
+	size_t block_size;
+	u8 *user_buf = buf;
+	int ret;
+	rtdm_lockctx_t context;
+
+	rtdm_toseq_t timeout_seq;
+
+	if (!rtdm_fd_is_user(fd)) {
+		return -EFAULT;
+	}
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	if (ts->is_closed) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EBADF;
+	}
+
+	if (!ts->is_valid) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return 0;
+	}
+
+	if (ts->tcp_state != TCP_ESTABLISHED &&
+	    ts->tcp_state != TCP_FIN_WAIT2) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EINVAL;
+	}
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	rtdm_toseq_init(&timeout_seq, timeout);
+
+	while (copied < nbyte) {
+		ret = rtdm_sem_timeddown(&ts->sock.pending_sem, timeout,
+					 &timeout_seq);
+
+		if (unlikely(ret < 0))
+			switch (ret) {
+			case -EWOULDBLOCK:
+			case -ETIMEDOUT:
+			case -EINTR:
+				return (copied ? copied : ret);
+
+			case -EIDRM: /* event is destroyed */
+				if (ts->is_closed)
+					return -EBADF;
+
+				return copied;
+
+			default:
+				if (ts->is_closed) {
+					return -EBADF;
+				}
+
+				return 0;
+			}
+
+		skb = rtskb_dequeue_chain(&sock->incoming);
+		RTNET_ASSERT(skb != NULL, return -EFAULT;);
+
+		th_len = (skb->h.th->doff) << 2;
+
+		data_len = skb->len - th_len;
+
+		__rtskb_pull(skb, th_len);
+
+		first_skb = skb;
+
+		/* iterate over all IP fragments */
+	iterate_fragments:
+		block_size = skb->len;
+		copied += block_size;
+		data_len -= block_size;
+
+		if (copied > nbyte) {
+			block_size -= copied - nbyte;
+			copied = nbyte;
+
+			if (rtdm_copy_to_user(fd, user_buf, skb->data,
+					      block_size)) {
+				kfree_rtskb(first_skb); /* or store the data? */
+				return -EFAULT;
+			}
+			rtdm_lock_get_irqsave(&ts->socket_lock, context);
+			if (ts->sync.window) {
+				ts->sync.window += block_size;
+				rtdm_lock_put_irqrestore(&ts->socket_lock,
+							 context);
+			} else {
+				ts->sync.window = block_size;
+				rtdm_lock_put_irqrestore(&ts->socket_lock,
+							 context);
+				rt_tcp_send(ts,
+					    TCP_FLAG_ACK); /* window update */
+			}
+
+			__rtskb_pull(skb, block_size);
+			__rtskb_push(first_skb, sizeof(struct tcphdr));
+			first_skb->h.th->doff = 5;
+			rtskb_queue_head(&sock->incoming, first_skb);
+			rtdm_sem_up(&ts->sock.pending_sem);
+
+			return copied;
+		}
+
+		if (rtdm_copy_to_user(fd, user_buf, skb->data, block_size)) {
+			kfree_rtskb(first_skb); /* or store the data? */
+			return -EFAULT;
+		}
+		rtdm_lock_get_irqsave(&ts->socket_lock, context);
+		if (ts->sync.window) {
+			ts->sync.window += block_size;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		} else {
+			ts->sync.window = block_size;
+			rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+			rt_tcp_send(ts, TCP_FLAG_ACK); /* window update */
+		}
+
+		if ((skb = skb->next) != NULL) {
+			user_buf += data_len;
+			goto iterate_fragments;
+		}
+
+		kfree_rtskb(first_skb);
+	}
+
+	return copied;
+}
+
+/***
+ *  rt_tcp_write
+ */
+static ssize_t rt_tcp_write(struct rtdm_fd *fd, const void __user *user_buf,
+			    size_t nbyte)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+	uint32_t sent_len = 0;
+	rtdm_lockctx_t context;
+	int ret = 0;
+	nanosecs_rel_t sk_sndtimeo;
+	void *buf;
+
+	if (!rtdm_fd_is_user(fd)) {
+		return -EFAULT;
+	}
+
+	rtdm_lock_get_irqsave(&ts->socket_lock, context);
+
+	sk_sndtimeo = ts->sk_sndtimeo;
+
+	if (!ts->is_valid) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EPIPE;
+	}
+
+	if ((ts->daddr | ts->dport) == 0 || ts->tcp_state != TCP_ESTABLISHED) {
+		rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+		return -EINVAL;
+	}
+
+	rtdm_lock_put_irqrestore(&ts->socket_lock, context);
+
+	buf = xnmalloc(nbyte);
+	if (buf == NULL)
+		return -ENOMEM;
+
+	ret = rtdm_copy_from_user(fd, buf, user_buf, nbyte);
+	if (ret) {
+		xnfree(buf);
+		return ret;
+	}
+
+	while (sent_len < nbyte) {
+		ret = rtdm_event_timedwait(&ts->send_evt, sk_sndtimeo, NULL);
+
+		if (unlikely(ret < 0))
+			switch (ret) {
+			case -EWOULDBLOCK:
+			case -ETIMEDOUT:
+			case -EINTR:
+				xnfree(buf);
+				return sent_len ?: ret;
+
+			case -EIDRM: /* event is destroyed */
+			default:
+				if (ts->is_closed) {
+					xnfree(buf);
+					return -EBADF;
+				}
+
+				xnfree(buf);
+				return sent_len ?: ret;
+			}
+
+		ret = rt_tcp_window_send(ts, nbyte - sent_len,
+					 ((u8 *)buf) + sent_len);
+
+		if (ret < 0) { /* check this branch correctness */
+			rtdm_event_signal(&ts->send_evt);
+			break;
+		}
+
+		sent_len += ret;
+		if (ts->sync.dst_window)
+			rtdm_event_signal(&ts->send_evt);
+	}
+
+	xnfree(buf);
+	return (ret < 0 ? ret : sent_len);
+}
+
+/***
+ *  rt_tcp_recvmsg
+ */
+static ssize_t rt_tcp_recvmsg(struct rtdm_fd *fd, struct user_msghdr *msg,
+			      int msg_flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	ssize_t ret;
+	size_t len;
+	void *buf;
+
+	if (msg_flags)
+		return -EOPNOTSUPP;
+
+	/* loop over all vectors to be implemented */
+	if (msg->msg_iovlen != 1)
+		return -EOPNOTSUPP;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	len = iov[0].iov_len;
+	if (len > 0) {
+		buf = iov[0].iov_base;
+		ret = rt_tcp_read(fd, buf, len);
+	}
+
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_sendmsg
+ */
+static ssize_t rt_tcp_sendmsg(struct rtdm_fd *fd, const struct user_msghdr *msg,
+			      int msg_flags)
+{
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+	ssize_t ret;
+	size_t len;
+
+	if (msg_flags)
+		return -EOPNOTSUPP;
+
+	/* loop over all vectors to be implemented */
+	if (msg->msg_iovlen != 1)
+		return -EOPNOTSUPP;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	len = iov[0].iov_len;
+	if (len > 0)
+		ret = rt_tcp_write(fd, iov[0].iov_base, len);
+
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_select
+ */
+static int rt_tcp_select(struct rtdm_fd *fd, rtdm_selector_t *selector,
+			 enum rtdm_selecttype type, unsigned fd_index)
+{
+	struct tcp_socket *ts = rtdm_fd_to_private(fd);
+
+	switch (type) {
+	case XNSELECT_READ:
+		return rtdm_sem_select(&ts->sock.pending_sem, selector,
+				       XNSELECT_READ, fd_index);
+	case XNSELECT_WRITE:
+		return rtdm_event_select(&ts->send_evt, selector,
+					 XNSELECT_WRITE, fd_index);
+	default:
+		return -EBADF;
+	}
+
+	return -EINVAL;
+}
+
+/***
+ *  TCP-Initialisation
+ */
+static struct rtinet_protocol tcp_protocol = { .protocol = IPPROTO_TCP,
+					       .dest_socket =
+						       &rt_tcp_dest_socket,
+					       .rcv_handler = &rt_tcp_rcv,
+					       .err_handler = &rt_tcp_rcv_err,
+					       .init_socket = &rt_tcp_socket };
+
+static struct rtdm_driver tcp_driver = {
+    .profile_info =     RTDM_PROFILE_INFO(tcp,
+					RTDM_CLASS_NETWORK,
+					RTDM_SUBCLASS_RTNET,
+					RTNET_RTDM_VER),
+    .device_flags =     RTDM_PROTOCOL_DEVICE,
+    .device_count =	1,
+    .context_size =     sizeof(struct tcp_socket),
+
+    .protocol_family =  PF_INET,
+    .socket_type =      SOCK_STREAM,
+
+    .ops = {
+	.socket     =   rt_inet_socket,
+	.close      =   rt_tcp_close,
+	.ioctl_rt   =   rt_tcp_ioctl,
+	.ioctl_nrt  =   rt_tcp_ioctl,
+	.read_rt    =   rt_tcp_read,
+	.write_rt   =   rt_tcp_write,
+	.recvmsg_rt =   rt_tcp_recvmsg,
+	.sendmsg_rt =   rt_tcp_sendmsg,
+	.select     =   rt_tcp_select,
+    },
+};
+
+static struct rtdm_device tcp_device = {
+	.driver = &tcp_driver,
+	.label = "tcp",
+};
+
+#ifdef CONFIG_XENO_OPT_VFILE
+/***
+ *  rt_tcp_proc_read
+ */
+static inline char *rt_tcp_string_of_state(u8 state)
+{
+	switch (state) {
+	case TCP_ESTABLISHED:
+		return "ESTABLISHED";
+	case TCP_SYN_SENT:
+		return "SYN_SENT";
+	case TCP_SYN_RECV:
+		return "SYN_RECV";
+	case TCP_FIN_WAIT1:
+		return "FIN_WAIT1";
+	case TCP_FIN_WAIT2:
+		return "FIN_WAIT2";
+	case TCP_TIME_WAIT:
+		return "TIME_WAIT";
+	case TCP_CLOSE:
+		return "CLOSE";
+	case TCP_CLOSE_WAIT:
+		return "CLOSE_WAIT";
+	case TCP_LAST_ACK:
+		return "LASK_ACK";
+	case TCP_LISTEN:
+		return "LISTEN";
+	case TCP_CLOSING:
+		return "CLOSING";
+	default:
+		return "UNKNOWN";
+	}
+}
+
+static int rtnet_ipv4_tcp_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	rtdm_lockctx_t context;
+	struct tcp_socket *ts;
+	u32 saddr, daddr;
+	u16 sport = 0, dport = 0; /* set to 0 to silence compiler */
+	char sbuffer[24];
+	char dbuffer[24];
+	int state;
+	int index;
+
+	xnvfile_printf(it, "Hash    Local Address           "
+			   "Foreign Address         State\n");
+
+	for (index = 0; index < RT_TCP_SOCKETS; index++) {
+		rtdm_lock_get_irqsave(&tcp_socket_base_lock, context);
+
+		ts = port_registry[index];
+		state = ts ? ts->tcp_state : TCP_CLOSE;
+
+		if (ts && ts->tcp_state != TCP_CLOSE) {
+			saddr = ts->saddr;
+			sport = ts->sport;
+			daddr = ts->daddr;
+			dport = ts->dport;
+		}
+
+		rtdm_lock_put_irqrestore(&tcp_socket_base_lock, context);
+
+		if (state != TCP_CLOSE) {
+			snprintf(sbuffer, sizeof(sbuffer), "%u.%u.%u.%u:%u",
+				 NIPQUAD(saddr), ntohs(sport));
+			snprintf(dbuffer, sizeof(dbuffer), "%u.%u.%u.%u:%u",
+				 NIPQUAD(daddr), ntohs(dport));
+
+			xnvfile_printf(it, "%04X    %-23s %-23s %s\n",
+				       sport & port_hash_mask, sbuffer, dbuffer,
+				       rt_tcp_string_of_state(state));
+		}
+	}
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_ipv4_tcp_vfile_ops = {
+	.show = rtnet_ipv4_tcp_show,
+};
+
+static struct xnvfile_regular rtnet_ipv4_tcp_vfile = {
+	.ops = &rtnet_ipv4_tcp_vfile_ops,
+};
+
+/***
+ *  rt_tcp_proc_register
+ */
+static int __init rt_tcp_proc_register(void)
+{
+	return xnvfile_init_regular("tcp", &rtnet_ipv4_tcp_vfile,
+				    &ipv4_proc_root);
+}
+
+/***
+ *  rt_tcp_proc_unregister
+ */
+
+static void rt_tcp_proc_unregister(void)
+{
+	xnvfile_destroy_regular(&rtnet_ipv4_tcp_vfile);
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/***
+ *  rt_tcp_init
+ */
+int __init rt_tcp_init(void)
+{
+	unsigned int skbs;
+	int i;
+	int ret;
+
+	if ((tcp_auto_port_start < 0) ||
+	    (tcp_auto_port_start >= 0x10000 - RT_TCP_SOCKETS))
+		tcp_auto_port_start = 1024;
+	tcp_auto_port_start =
+		htons(tcp_auto_port_start & (tcp_auto_port_mask & 0xFFFF));
+	tcp_auto_port_mask = htons(tcp_auto_port_mask | 0xFFFF0000);
+
+	for (i = 0; i < ARRAY_SIZE(port_hash); i++)
+		INIT_HLIST_HEAD(&port_hash[i]);
+
+	/* Perform essential initialization of the RST|ACK socket */
+	skbs = rt_bare_socket_init(rst_fd, IPPROTO_TCP, RT_TCP_RST_PRIO,
+				   RT_TCP_RST_POOL_SIZE);
+	if (skbs < RT_TCP_RST_POOL_SIZE)
+		printk("rttcp: allocated only %d RST|ACK rtskbs\n", skbs);
+	rst_socket.sock.prot.inet.tos = 0;
+	rst_fd->refs = 1;
+	rtdm_lock_init(&rst_socket.socket_lock);
+
+	/*
+     * 100 ms forwarding timer with 8.38 ms slots
+     */
+	ret = timerwheel_init(100000000ull, 23);
+	if (ret < 0) {
+		rtdm_printk("rttcp: cann't initialize timerwheel task: %d\n",
+			    -ret);
+		goto out_1;
+	}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if ((ret = rt_tcp_proc_register()) < 0) {
+		rtdm_printk("rttcp: cann't initialize proc entry: %d\n", -ret);
+		goto out_2;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	rt_inet_add_protocol(&tcp_protocol);
+
+	ret = rtdm_dev_register(&tcp_device);
+	if (ret < 0) {
+		rtdm_printk("rttcp: cann't register RT TCP: %d\n", -ret);
+		goto out_3;
+	}
+
+	return ret;
+
+out_3:
+	rt_inet_del_protocol(&tcp_protocol);
+#ifdef CONFIG_XENO_OPT_VFILE
+	rt_tcp_proc_unregister();
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+out_2:
+	timerwheel_cleanup();
+
+out_1:
+	rt_bare_socket_cleanup(&rst_socket.sock);
+
+	return ret;
+}
+
+/***
+ *  rt_tcp_release
+ */
+void __exit rt_tcp_release(void)
+{
+	rt_inet_del_protocol(&tcp_protocol);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	rt_tcp_proc_unregister();
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	timerwheel_cleanup();
+
+	rt_bare_socket_cleanup(&rst_socket.sock);
+
+	rtdm_dev_unregister(&tcp_device);
+}
+
+module_init(rt_tcp_init);
+module_exit(rt_tcp_release);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.c
new file mode 100644
index 0000000..240837d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.c
@@ -0,0 +1,220 @@
+/***
+ *
+ *  ipv4/tcp/timerwheel.c - timerwheel implementation for RTnet
+ *
+ *  Copyright (C) 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2, as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/delay.h>
+#include <linux/slab.h>
+
+#include <rtdm/driver.h>
+#include "timerwheel.h"
+
+static struct {
+	/* timer pivot task */
+	rtdm_task_t pivot_task;
+
+	/* time length for one period of rotation of timerwheel */
+	nanosecs_rel_t timeout;
+
+	/* timer wheel slots for storing timers up to timerwheel_timeout */
+	unsigned int slots;
+
+	/* timer wheel interval timeout */
+	nanosecs_rel_t interval;
+
+	/* timer wheel interval timeout */
+	unsigned int interval_base;
+
+	/* timerwheel array */
+	struct list_head *ring;
+
+	/* timerwheel slot counter */
+	unsigned int current_slot;
+
+	/* timerwheel current slot lock */
+	rtdm_lock_t slot_lock;
+} wheel;
+
+static struct timerwheel_timer *timerwheel_get_from_current_slot(void)
+{
+	struct timerwheel_timer *timer = NULL;
+	struct list_head *slot_list;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&wheel.slot_lock, context);
+
+	slot_list = &wheel.ring[wheel.current_slot];
+
+	if (!list_empty(slot_list)) {
+		timer = list_first_entry(slot_list, struct timerwheel_timer,
+					 link);
+		list_del(&timer->link);
+		timer->slot = TIMERWHEEL_TIMER_UNUSED;
+		timer->refcount++;
+	}
+
+	rtdm_lock_put_irqrestore(&wheel.slot_lock, context);
+
+	return timer;
+}
+
+int timerwheel_add_timer(struct timerwheel_timer *timer, nanosecs_rel_t expires)
+{
+	rtdm_lockctx_t context;
+	int slot;
+
+	slot = expires >> wheel.interval_base;
+
+	if (slot >= wheel.slots)
+		return -EINVAL;
+
+	rtdm_lock_get_irqsave(&wheel.slot_lock, context);
+
+	/* cancel timer if it's still running */
+	if (timer->slot >= 0)
+		list_del(&timer->link);
+
+	slot = slot + wheel.current_slot;
+	if (slot >= wheel.slots)
+		slot = slot - wheel.slots;
+
+	list_add_tail(&timer->link, &wheel.ring[slot]);
+	timer->slot = slot;
+
+	rtdm_lock_put_irqrestore(&wheel.slot_lock, context);
+
+	return 0;
+}
+
+static int timerwheel_sleep(void)
+{
+	int ret;
+
+	ret = rtdm_task_sleep(wheel.interval);
+	if (ret < 0)
+		return ret;
+
+	wheel.current_slot++;
+	if (wheel.current_slot == wheel.slots)
+		wheel.current_slot = 0;
+
+	return 0;
+}
+
+static void timerwheel_pivot(void *arg)
+{
+	struct timerwheel_timer *timer;
+	int ret;
+
+	while (1) {
+		ret = timerwheel_sleep();
+		if (ret < 0) {
+			rtdm_printk(
+				"timerwheel: timerwheel_pivot interrupted %d\n",
+				-ret);
+			break;
+		}
+
+		while ((timer = timerwheel_get_from_current_slot())) {
+			timer->handler(timer->data);
+
+			smp_mb();
+			timer->refcount--;
+		}
+	}
+}
+
+int timerwheel_remove_timer(struct timerwheel_timer *timer)
+{
+	rtdm_lockctx_t context;
+	int ret;
+
+	rtdm_lock_get_irqsave(&wheel.slot_lock, context);
+
+	if (timer->slot >= 0) {
+		list_del(&timer->link);
+		timer->slot = TIMERWHEEL_TIMER_UNUSED;
+		ret = 0;
+	} else
+		ret = -ENOENT;
+
+	rtdm_lock_put_irqrestore(&wheel.slot_lock, context);
+
+	return ret;
+}
+
+void timerwheel_remove_timer_sync(struct timerwheel_timer *timer)
+{
+	u64 interval_ms = wheel.interval;
+
+	do_div(interval_ms, 1000000);
+
+	timerwheel_remove_timer(timer);
+
+	while (timer->refcount > 0)
+		msleep(interval_ms);
+}
+
+/*
+  timeout     - maximum expiration timeout for timers
+  granularity - is an exponent of 2 representing nanoseconds for
+  one wheel tick
+  heapsize    - is a number of timers to allocate
+*/
+int __init timerwheel_init(nanosecs_rel_t timeout, unsigned int granularity)
+{
+	int i;
+	int err;
+
+	/* the least possible slot timeout is set for 1ms */
+	if (granularity < 10)
+		return -EINVAL;
+
+	wheel.timeout = timeout;
+	wheel.interval_base = granularity;
+	wheel.slots = (timeout >> granularity) + 1;
+	wheel.interval = (1 << granularity);
+	wheel.current_slot = 0;
+
+	wheel.ring =
+		kmalloc(sizeof(struct list_head) * wheel.slots, GFP_KERNEL);
+	if (!wheel.ring)
+		return -ENOMEM;
+
+	for (i = 0; i < wheel.slots; i++)
+		INIT_LIST_HEAD(&wheel.ring[i]);
+
+	rtdm_lock_init(&wheel.slot_lock);
+
+	err = rtdm_task_init(&wheel.pivot_task, "rttcp timerwheel",
+			     timerwheel_pivot, NULL, 1, 0);
+	if (err) {
+		printk("timerwheel: error on pivot task initialization: %d\n",
+		       err);
+		kfree(wheel.ring);
+	}
+
+	return err;
+}
+
+void timerwheel_cleanup(void)
+{
+	rtdm_task_destroy(&wheel.pivot_task);
+	kfree(wheel.ring);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.h b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.h
new file mode 100644
index 0000000..b879c24
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/tcp/timerwheel.h
@@ -0,0 +1,62 @@
+/***
+ *
+ *  ipv4/tcp/timerwheel.h - timerwheel interface for RTnet
+ *
+ *  Copyright (C) 2009 Vladimir Zapolskiy <vladimir.zapolskiy@siemens.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License, version 2, as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#ifndef __TIMERWHEEL_H_
+#define __TIMERWHEEL_H_
+
+#include <linux/list.h>
+#include <rtdm/net.h>
+
+#define TIMERWHEEL_TIMER_UNUSED -1
+
+typedef void (*timerwheel_timer_handler)(void *);
+
+struct timerwheel_timer {
+	struct list_head link;
+	timerwheel_timer_handler handler;
+	void *data;
+	int slot;
+	volatile int refcount; /* only written by wheel task */
+};
+
+static inline void timerwheel_init_timer(struct timerwheel_timer *timer,
+					 timerwheel_timer_handler handler,
+					 void *data)
+{
+	timer->slot = TIMERWHEEL_TIMER_UNUSED;
+	timer->handler = handler;
+	timer->data = data;
+	timer->refcount = 0;
+}
+
+/* passed data must remain valid till a timer fireup */
+int timerwheel_add_timer(struct timerwheel_timer *timer,
+			 nanosecs_rel_t expires);
+
+int timerwheel_remove_timer(struct timerwheel_timer *timer);
+
+void timerwheel_remove_timer_sync(struct timerwheel_timer *timer);
+
+int timerwheel_init(nanosecs_rel_t timeout, unsigned int granularity);
+
+void timerwheel_cleanup(void);
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Kconfig
new file mode 100644
index 0000000..a232794
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Kconfig
@@ -0,0 +1,6 @@
+config XENO_DRIVERS_NET_RTIPV4_UDP
+    tristate "UDP support"
+    depends on XENO_DRIVERS_NET_RTIPV4
+    default y
+    help
+    Enables UDP support of the RTnet Real-Time IPv4 protocol.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Makefile
new file mode 100644
index 0000000..b25a334
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTIPV4_UDP) += rtudp.o
+
+rtudp-y := udp.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/udp.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/udp.c
new file mode 100644
index 0000000..777c8a2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/ipv4/udp/udp.c
@@ -0,0 +1,831 @@
+/***
+ *
+ *  ipv4/udp.c - UDP implementation for RTnet
+ *
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/ip.h>
+#include <linux/err.h>
+#include <linux/udp.h>
+#include <linux/tcp.h>
+#include <linux/list.h>
+
+#include <rtdm/compat.h>
+#include <rtskb.h>
+#include <rtnet_internal.h>
+#include <rtnet_checksum.h>
+#include <rtnet_port.h>
+#include <rtnet_iovec.h>
+#include <rtnet_socket.h>
+#include <ipv4/ip_fragment.h>
+#include <ipv4/ip_output.h>
+#include <ipv4/ip_sock.h>
+#include <ipv4/protocol.h>
+#include <ipv4/route.h>
+#include <ipv4/udp.h>
+
+/***
+ *  This structure is used to register a UDP socket for reception. All
+ +  structures are kept in the port_registry array to increase the cache
+ *  locality during the critical port lookup in rt_udp_v4_lookup().
+ */
+struct udp_socket {
+	u16 sport; /* local port */
+	u32 saddr; /* local ip-addr */
+	struct rtsocket *sock;
+	struct hlist_node link;
+};
+
+/***
+ *  Automatic port number assignment
+
+ *  The automatic assignment of port numbers to unbound sockets is realised as
+ *  a simple addition of two values:
+ *   - the socket ID (lower 8 bits of file descriptor) which is set during
+ *     initialisation and left unchanged afterwards
+ *   - the start value auto_port_start which is a module parameter
+
+ *  auto_port_mask, also a module parameter, is used to define the range of
+ *  port numbers which are used for automatic assignment. Any number within
+ *  this range will be rejected when passed to bind_rt().
+
+ */
+static unsigned int auto_port_start = 1024;
+static unsigned int auto_port_mask = ~(RT_UDP_SOCKETS - 1);
+static int free_ports = RT_UDP_SOCKETS;
+#define RT_PORT_BITMAP_WORDS                                                   \
+	((RT_UDP_SOCKETS + BITS_PER_LONG - 1) / BITS_PER_LONG)
+static unsigned long port_bitmap[RT_PORT_BITMAP_WORDS];
+static struct udp_socket port_registry[RT_UDP_SOCKETS];
+static DEFINE_RTDM_LOCK(udp_socket_base_lock);
+
+static struct hlist_head port_hash[RT_UDP_SOCKETS * 2];
+#define port_hash_mask (RT_UDP_SOCKETS * 2 - 1)
+
+MODULE_LICENSE("GPL");
+
+module_param(auto_port_start, uint, 0444);
+module_param(auto_port_mask, uint, 0444);
+MODULE_PARM_DESC(auto_port_start, "Start of automatically assigned port range");
+MODULE_PARM_DESC(auto_port_mask,
+		 "Mask that defines port range for automatic assignment");
+
+static inline struct udp_socket *port_hash_search(u32 saddr, u16 sport)
+{
+	unsigned bucket = sport & port_hash_mask;
+	struct udp_socket *sock;
+
+	hlist_for_each_entry (sock, &port_hash[bucket], link)
+		if (sock->sport == sport &&
+		    (saddr == INADDR_ANY || sock->saddr == saddr ||
+		     sock->saddr == INADDR_ANY))
+			return sock;
+
+	return NULL;
+}
+
+static inline int port_hash_insert(struct udp_socket *sock, u32 saddr,
+				   u16 sport)
+{
+	unsigned bucket;
+
+	if (port_hash_search(saddr, sport))
+		return -EADDRINUSE;
+
+	bucket = sport & port_hash_mask;
+	sock->saddr = saddr;
+	sock->sport = sport;
+	hlist_add_head(&sock->link, &port_hash[bucket]);
+	return 0;
+}
+
+static inline void port_hash_del(struct udp_socket *sock)
+{
+	hlist_del(&sock->link);
+}
+
+/***
+ *  rt_udp_v4_lookup
+ */
+static inline struct rtsocket *rt_udp_v4_lookup(u32 daddr, u16 dport)
+{
+	rtdm_lockctx_t context;
+	struct udp_socket *sock;
+
+	rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+	sock = port_hash_search(daddr, dport);
+	if (sock && rt_socket_reference(sock->sock) == 0) {
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+		return sock->sock;
+	}
+
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	return NULL;
+}
+
+/***
+ *  rt_udp_bind - bind socket to local address
+ *  @s:     socket
+ *  @addr:  local address
+ */
+int rt_udp_bind(struct rtdm_fd *fd, struct rtsocket *sock,
+		const struct sockaddr __user *addr, socklen_t addrlen)
+{
+	struct sockaddr_in _sin, *sin;
+	rtdm_lockctx_t context;
+	int index;
+	int err = 0;
+
+	if (addrlen < sizeof(struct sockaddr_in))
+		return -EINVAL;
+
+	sin = rtnet_get_arg(fd, &_sin, addr, sizeof(_sin));
+	if (IS_ERR(sin))
+		return PTR_ERR(sin);
+
+	if ((sin->sin_port & auto_port_mask) == auto_port_start)
+		return -EINVAL;
+
+	rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+	if ((index = sock->prot.inet.reg_index) < 0) {
+		/* socket is being closed */
+		err = -EBADF;
+		goto unlock_out;
+	}
+	if (sock->prot.inet.state != TCP_CLOSE) {
+		err = -EINVAL;
+		goto unlock_out;
+	}
+
+	port_hash_del(&port_registry[index]);
+	if (port_hash_insert(&port_registry[index], sin->sin_addr.s_addr,
+			     sin->sin_port ?: index + auto_port_start)) {
+		port_hash_insert(&port_registry[index],
+				 port_registry[index].saddr,
+				 port_registry[index].sport);
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+		return -EADDRINUSE;
+	}
+
+	/* set the source-addr */
+	sock->prot.inet.saddr = port_registry[index].saddr;
+
+	/* set source port, if not set by user */
+	sock->prot.inet.sport = port_registry[index].sport;
+
+unlock_out:
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	return err;
+}
+
+/***
+ *  rt_udp_connect
+ */
+int rt_udp_connect(struct rtdm_fd *fd, struct rtsocket *sock,
+		   const struct sockaddr __user *serv_addr, socklen_t addrlen)
+{
+	struct sockaddr _sa, *sa;
+	struct sockaddr_in _sin, *sin;
+	rtdm_lockctx_t context;
+	int index;
+
+	if (addrlen < sizeof(struct sockaddr))
+		return -EINVAL;
+
+	sa = rtnet_get_arg(fd, &_sa, serv_addr, sizeof(_sa));
+	if (IS_ERR(sa))
+		return PTR_ERR(sa);
+
+	if (sa->sa_family == AF_UNSPEC) {
+		if ((index = sock->prot.inet.reg_index) < 0)
+			/* socket is being closed */
+			return -EBADF;
+
+		rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+		sock->prot.inet.saddr = INADDR_ANY;
+		/* Note: The following line differs from standard
+		   stacks, and we also don't remove the socket from
+		   the port list. Might get fixed in the future... */
+		sock->prot.inet.sport = index + auto_port_start;
+		sock->prot.inet.daddr = INADDR_ANY;
+		sock->prot.inet.dport = 0;
+		sock->prot.inet.state = TCP_CLOSE;
+
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+	} else {
+		if (addrlen < sizeof(struct sockaddr_in))
+			return -EINVAL;
+
+		sin = rtnet_get_arg(fd, &_sin, serv_addr, sizeof(_sin));
+		if (IS_ERR(sin))
+			return PTR_ERR(sin);
+
+		if (sin->sin_family != AF_INET)
+			return -EINVAL;
+
+		rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+		if (sock->prot.inet.state != TCP_CLOSE) {
+			rtdm_lock_put_irqrestore(&udp_socket_base_lock,
+						 context);
+			return -EINVAL;
+		}
+
+		sock->prot.inet.state = TCP_ESTABLISHED;
+		sock->prot.inet.daddr = sin->sin_addr.s_addr;
+		sock->prot.inet.dport = sin->sin_port;
+
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+	}
+
+	return 0;
+}
+
+/***
+ *  rt_udp_socket - create a new UDP-Socket
+ *  @s: socket
+ */
+int rt_udp_socket(struct rtdm_fd *fd)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	int ret;
+	int i;
+	int index;
+	rtdm_lockctx_t context;
+
+	if ((ret = rt_socket_init(fd, IPPROTO_UDP)) != 0)
+		return ret;
+
+	sock->prot.inet.saddr = INADDR_ANY;
+	sock->prot.inet.state = TCP_CLOSE;
+	sock->prot.inet.tos = 0;
+
+	rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+	/* enforce maximum number of UDP sockets */
+	if (free_ports == 0) {
+		rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+		rt_socket_cleanup(fd);
+		return -EAGAIN;
+	}
+	free_ports--;
+
+	/* find free auto-port in bitmap */
+	for (i = 0; i < RT_PORT_BITMAP_WORDS; i++)
+		if (port_bitmap[i] != (unsigned long)-1)
+			break;
+	index = ffz(port_bitmap[i]);
+	set_bit(index, &port_bitmap[i]);
+	index += i * BITS_PER_LONG;
+	sock->prot.inet.reg_index = index;
+	sock->prot.inet.sport = index + auto_port_start;
+
+	/* register UDP socket */
+	port_hash_insert(&port_registry[index], INADDR_ANY,
+			 sock->prot.inet.sport);
+	port_registry[index].sock = sock;
+
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	return 0;
+}
+
+/***
+ *  rt_udp_close
+ */
+void rt_udp_close(struct rtdm_fd *fd)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	struct rtskb *del;
+	int port;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+	sock->prot.inet.state = TCP_CLOSE;
+
+	if (sock->prot.inet.reg_index >= 0) {
+		port = sock->prot.inet.reg_index;
+		clear_bit(port % BITS_PER_LONG,
+			  &port_bitmap[port / BITS_PER_LONG]);
+		port_hash_del(&port_registry[port]);
+
+		free_ports++;
+
+		sock->prot.inet.reg_index = -1;
+	}
+
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	/* cleanup already collected fragments */
+	rt_ip_frag_invalidate_socket(sock);
+
+	/* free packets in incoming queue */
+	while ((del = rtskb_dequeue(&sock->incoming)) != NULL)
+		kfree_rtskb(del);
+
+	rt_socket_cleanup(fd);
+}
+
+int rt_udp_ioctl(struct rtdm_fd *fd, unsigned int request, void __user *arg)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	struct _rtdm_setsockaddr_args args;
+	bool do_bind = false;
+	int ret;
+
+	/* fast path for common socket IOCTLs */
+	if (_IOC_TYPE(request) == RTIOC_TYPE_NETWORK)
+		return rt_socket_common_ioctl(fd, request, arg);
+
+	switch (request) {
+	COMPAT_CASE(_RTIOC_BIND):
+		do_bind = true;
+		fallthrough;
+	COMPAT_CASE(_RTIOC_CONNECT):
+		ret = rtdm_fd_get_setsockaddr_args(fd, &args, arg);
+		if (ret)
+			return ret;
+
+		if (do_bind)
+			return rt_udp_bind(fd, sock, args.addr, args.addrlen);
+
+		return rt_udp_connect(fd, sock, args.addr, args.addrlen);
+
+	default:
+		return rt_ip_ioctl(fd, request, arg);
+	}
+}
+
+/***
+ *  rt_udp_recvmsg
+ */
+/***
+ *  rt_udp_recvmsg
+ */
+ssize_t rt_udp_recvmsg(struct rtdm_fd *fd, struct user_msghdr *msg,
+		       int msg_flags)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	size_t len;
+	struct rtskb *skb;
+	struct rtskb *first_skb;
+	size_t copied = 0;
+	size_t block_size;
+	size_t data_len;
+	struct udphdr *uh;
+	struct sockaddr_in sin;
+	nanosecs_rel_t timeout = sock->timeout;
+	int ret, flags;
+	socklen_t namelen;
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+
+	if (msg->msg_iovlen < 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen == 0)
+		return 0;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	/* non-blocking receive? */
+	if (msg_flags & MSG_DONTWAIT)
+		timeout = -1;
+
+	ret = rtdm_sem_timeddown(&sock->pending_sem, timeout, NULL);
+	if (unlikely(ret < 0))
+		switch (ret) {
+		default:
+			ret = -EBADF; /* socket has been closed */
+			fallthrough;
+		case -EWOULDBLOCK:
+		case -ETIMEDOUT:
+		case -EINTR:
+			rtdm_drop_iovec(iov, iov_fast);
+			return ret;
+		}
+
+	skb = rtskb_dequeue_chain(&sock->incoming);
+	RTNET_ASSERT(skb != NULL, return -EFAULT;);
+	uh = skb->h.uh;
+	first_skb = skb;
+
+	/* copy the address if required. */
+	if (msg->msg_name) {
+		memset(&sin, 0, sizeof(sin));
+		sin.sin_family = AF_INET;
+		sin.sin_port = uh->source;
+		sin.sin_addr.s_addr = skb->nh.iph->saddr;
+
+		if (msg->msg_namelen < 0) {
+			ret = -EINVAL;
+			goto fail;
+		}
+		namelen = min(sizeof(sin), (size_t)msg->msg_namelen);
+
+		ret = rtnet_put_arg(fd, msg->msg_name, &sin, namelen);
+		if (ret)
+			goto fail;
+
+		msg->msg_namelen = sizeof(sin);
+	}
+
+	data_len = ntohs(uh->len) - sizeof(struct udphdr);
+
+	/* remove the UDP header */
+	__rtskb_pull(skb, sizeof(struct udphdr));
+
+	flags = msg->msg_flags & ~MSG_TRUNC;
+	len = rtdm_get_iov_flatlen(iov, msg->msg_iovlen);
+
+	/* iterate over all IP fragments */
+	do {
+		rtskb_trim(skb, data_len);
+
+		block_size = skb->len;
+		copied += block_size;
+		data_len -= block_size;
+
+		/* The data must not be longer than the available buffer size */
+		if (copied > len) {
+			block_size -= copied - len;
+			copied = len;
+			flags |= MSG_TRUNC;
+		}
+
+		/* copy the data */
+		ret = rtnet_write_to_iov(fd, iov, msg->msg_iovlen, skb->data,
+					 block_size);
+		if (ret)
+			goto fail;
+
+		/* next fragment */
+		skb = skb->next;
+	} while (skb && !(flags & MSG_TRUNC));
+
+	/* did we copied all bytes? */
+	if (data_len > 0)
+		flags |= MSG_TRUNC;
+
+	msg->msg_flags = flags;
+out:
+	if ((msg_flags & MSG_PEEK) == 0)
+		kfree_rtskb(first_skb);
+	else {
+		__rtskb_push(first_skb, sizeof(struct udphdr));
+		rtskb_queue_head(&sock->incoming, first_skb);
+		rtdm_sem_up(&sock->pending_sem);
+	}
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return copied;
+fail:
+	copied = ret;
+	goto out;
+}
+
+/***
+ *  struct udpfakehdr
+ */
+struct udpfakehdr {
+	struct udphdr uh;
+	u32 daddr;
+	u32 saddr;
+	struct rtdm_fd *fd;
+	struct iovec *iov;
+	int iovlen;
+	u32 wcheck;
+};
+
+/***
+ *
+ */
+static int rt_udp_getfrag(const void *p, unsigned char *to, unsigned int offset,
+			  unsigned int fraglen)
+{
+	struct udpfakehdr *ufh = (struct udpfakehdr *)p;
+	int ret;
+
+	// We should optimize this function a bit (copy+csum...)!
+	if (offset) {
+		ret = rtnet_read_from_iov(ufh->fd, ufh->iov, ufh->iovlen, to,
+					  fraglen);
+		return ret < 0 ? ret : 0;
+	}
+
+	ret = rtnet_read_from_iov(ufh->fd, ufh->iov, ufh->iovlen,
+				  to + sizeof(struct udphdr),
+				  fraglen - sizeof(struct udphdr));
+	if (ret < 0)
+		return ret;
+
+	/* Checksum of the complete data part of the UDP message: */
+	ufh->wcheck =
+		rtnet_csum(to + sizeof(struct udphdr),
+			   fraglen - sizeof(struct udphdr), ufh->wcheck);
+
+	/* Checksum of the udp header: */
+	ufh->wcheck = rtnet_csum((unsigned char *)ufh, sizeof(struct udphdr),
+				 ufh->wcheck);
+
+	ufh->uh.check =
+		csum_tcpudp_magic(ufh->saddr, ufh->daddr, ntohs(ufh->uh.len),
+				  IPPROTO_UDP, ufh->wcheck);
+
+	if (ufh->uh.check == 0)
+		ufh->uh.check = -1;
+
+	memcpy(to, ufh, sizeof(struct udphdr));
+
+	return 0;
+}
+
+/***
+ *  rt_udp_sendmsg
+ */
+ssize_t rt_udp_sendmsg(struct rtdm_fd *fd, const struct user_msghdr *msg,
+		       int msg_flags)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	size_t len;
+	int ulen;
+	struct sockaddr_in _sin, *sin;
+	struct udpfakehdr ufh;
+	struct dest_route rt;
+	u32 saddr;
+	u32 daddr;
+	u16 dport;
+	int err;
+	rtdm_lockctx_t context;
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+
+	if (msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
+		return -EOPNOTSUPP;
+
+	if (msg_flags & ~(MSG_DONTROUTE | MSG_DONTWAIT))
+		return -EINVAL;
+
+	if (msg->msg_iovlen < 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen == 0)
+		return 0;
+
+	err = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (err)
+		return err;
+
+	len = rtdm_get_iov_flatlen(iov, msg->msg_iovlen);
+	if ((len < 0) ||
+	    (len > 0xFFFF - sizeof(struct iphdr) - sizeof(struct udphdr))) {
+		err = -EMSGSIZE;
+		goto out;
+	}
+
+	ulen = len + sizeof(struct udphdr);
+
+	if (msg->msg_name && msg->msg_namelen == sizeof(*sin)) {
+		sin = rtnet_get_arg(fd, &_sin, msg->msg_name, sizeof(_sin));
+		if (IS_ERR(sin)) {
+			err = PTR_ERR(sin);
+			goto out;
+		}
+
+		if (sin->sin_family != AF_INET &&
+		    sin->sin_family != AF_UNSPEC) {
+			err = -EINVAL;
+			goto out;
+		}
+
+		daddr = sin->sin_addr.s_addr;
+		dport = sin->sin_port;
+		rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+	} else {
+		rtdm_lock_get_irqsave(&udp_socket_base_lock, context);
+
+		if (sock->prot.inet.state != TCP_ESTABLISHED) {
+			rtdm_lock_put_irqrestore(&udp_socket_base_lock,
+						 context);
+			err = -ENOTCONN;
+			goto out;
+		}
+
+		daddr = sock->prot.inet.daddr;
+		dport = sock->prot.inet.dport;
+	}
+
+	saddr = sock->prot.inet.saddr;
+	ufh.uh.source = sock->prot.inet.sport;
+
+	rtdm_lock_put_irqrestore(&udp_socket_base_lock, context);
+
+	if ((daddr | dport) == 0) {
+		err = -EINVAL;
+		goto out;
+	}
+
+	/* get output route */
+	err = rt_ip_route_output(&rt, daddr, saddr);
+	if (err)
+		goto out;
+
+	/* we found a route, remember the routing dest-addr could be the netmask */
+	ufh.saddr = saddr != INADDR_ANY ? saddr : rt.rtdev->local_ip;
+	ufh.daddr = daddr;
+	ufh.uh.dest = dport;
+	ufh.uh.len = htons(ulen);
+	ufh.uh.check = 0;
+	ufh.fd = fd;
+	ufh.iov = iov;
+	ufh.iovlen = msg->msg_iovlen;
+	ufh.wcheck = 0;
+
+	err = rt_ip_build_xmit(sock, rt_udp_getfrag, &ufh, ulen, &rt,
+			       msg_flags);
+
+	/* Drop the reference obtained in rt_ip_route_output() */
+	rtdev_dereference(rt.rtdev);
+out:
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return err ?: len;
+}
+
+/***
+ *  rt_udp_check
+ */
+static inline unsigned short rt_udp_check(struct udphdr *uh, int len,
+					  unsigned long saddr,
+					  unsigned long daddr,
+					  unsigned long base)
+{
+	return (csum_tcpudp_magic(saddr, daddr, len, IPPROTO_UDP, base));
+}
+
+struct rtsocket *rt_udp_dest_socket(struct rtskb *skb)
+{
+	struct udphdr *uh = skb->h.uh;
+	unsigned short ulen = ntohs(uh->len);
+	u32 saddr = skb->nh.iph->saddr;
+	u32 daddr = skb->nh.iph->daddr;
+	struct rtnet_device *rtdev = skb->rtdev;
+
+	if (uh->check == 0)
+		skb->ip_summed = CHECKSUM_UNNECESSARY;
+	/* ip_summed (yet) never equals CHECKSUM_PARTIAL
+    else
+        if (skb->ip_summed == CHECKSUM_PARTIAL) {
+            skb->ip_summed = CHECKSUM_UNNECESSARY;
+
+            if ( !rt_udp_check(uh, ulen, saddr, daddr, skb->csum) )
+                return NULL;
+
+            skb->ip_summed = CHECKSUM_NONE;
+        }*/
+
+	if (skb->ip_summed != CHECKSUM_UNNECESSARY)
+		skb->csum =
+			csum_tcpudp_nofold(saddr, daddr, ulen, IPPROTO_UDP, 0);
+
+	/* patch broadcast daddr */
+	if (daddr == rtdev->broadcast_ip)
+		daddr = rtdev->local_ip;
+
+	/* find the destination socket */
+	skb->sk = rt_udp_v4_lookup(daddr, uh->dest);
+
+	return skb->sk;
+}
+
+/***
+ *  rt_udp_rcv
+ */
+void rt_udp_rcv(struct rtskb *skb)
+{
+	struct rtsocket *sock = skb->sk;
+	void (*callback_func)(struct rtdm_fd *, void *);
+	void *callback_arg;
+	rtdm_lockctx_t context;
+
+	rtskb_queue_tail(&sock->incoming, skb);
+	rtdm_sem_up(&sock->pending_sem);
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+	callback_func = sock->callback_func;
+	callback_arg = sock->callback_arg;
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	if (callback_func)
+		callback_func(rt_socket_fd(sock), callback_arg);
+}
+
+/***
+ *  rt_udp_rcv_err
+ */
+void rt_udp_rcv_err(struct rtskb *skb)
+{
+	rtdm_printk("RTnet: rt_udp_rcv err\n");
+}
+
+/***
+ *  UDP-Initialisation
+ */
+static struct rtinet_protocol udp_protocol = { .protocol = IPPROTO_UDP,
+					       .dest_socket =
+						       &rt_udp_dest_socket,
+					       .rcv_handler = &rt_udp_rcv,
+					       .err_handler = &rt_udp_rcv_err,
+					       .init_socket = &rt_udp_socket };
+
+static struct rtdm_driver udp_driver = {
+    .profile_info =     RTDM_PROFILE_INFO(udp,
+                                        RTDM_CLASS_NETWORK,
+                                        RTDM_SUBCLASS_RTNET,
+                                        RTNET_RTDM_VER),
+    .device_flags =     RTDM_PROTOCOL_DEVICE,
+    .device_count =	1,
+    .context_size =     sizeof(struct rtsocket),
+
+    .protocol_family =  PF_INET,
+    .socket_type =      SOCK_DGRAM,
+
+    /* default is UDP */
+    .ops = {
+        .socket =       rt_inet_socket,
+        .close =        rt_udp_close,
+        .ioctl_rt =     rt_udp_ioctl,
+        .ioctl_nrt =    rt_udp_ioctl,
+        .recvmsg_rt =   rt_udp_recvmsg,
+        .sendmsg_rt =   rt_udp_sendmsg,
+        .select =       rt_socket_select_bind,
+    },
+};
+
+static struct rtdm_device udp_device = {
+	.driver = &udp_driver,
+	.label = "udp",
+};
+
+/***
+ *  rt_udp_init
+ */
+static int __init rt_udp_init(void)
+{
+	int i, err;
+
+	if ((auto_port_start < 0) ||
+	    (auto_port_start >= 0x10000 - RT_UDP_SOCKETS))
+		auto_port_start = 1024;
+	auto_port_start = htons(auto_port_start & (auto_port_mask & 0xFFFF));
+	auto_port_mask = htons(auto_port_mask | 0xFFFF0000);
+
+	rt_inet_add_protocol(&udp_protocol);
+
+	for (i = 0; i < ARRAY_SIZE(port_hash); i++)
+		INIT_HLIST_HEAD(&port_hash[i]);
+
+	err = rtdm_dev_register(&udp_device);
+	if (err)
+		rt_inet_del_protocol(&udp_protocol);
+	return err;
+}
+
+/***
+ *  rt_udp_release
+ */
+static void __exit rt_udp_release(void)
+{
+	rtdm_dev_unregister(&udp_device);
+	rt_inet_del_protocol(&udp_protocol);
+}
+
+module_init(rt_udp_init);
+module_exit(rt_udp_release);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Kconfig
new file mode 100644
index 0000000..4c83b73
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Kconfig
@@ -0,0 +1,14 @@
+config XENO_DRIVERS_NET_RTPACKET
+    depends on XENO_DRIVERS_NET
+    tristate "Real-Time Packet Socket Support"
+    default y
+    help
+    Enables real-time packet sockets for RTnet. This support is
+    implemented in a separate module. When loaded, application programs
+    can send and received so-called "cooked" packets directly at OSI layer
+    2 (device layer). This means that RTnet will still maintain the
+    device-dependent packet header but leave the full data segment to the
+    user.
+
+    Examples like raw-ethernet or netshm make use of this support. See
+    also Linux man page packet(7).
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Makefile
new file mode 100644
index 0000000..abee4f6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTPACKET) += rtpacket.o
+
+rtpacket-y := af_packet.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/af_packet.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/af_packet.c
new file mode 100644
index 0000000..6013ff9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/packet/af_packet.c
@@ -0,0 +1,659 @@
+/***
+ *
+ *  packet/af_packet.c
+ *
+ *  RTnet - real-time networking subsystem
+ *  Copyright (C) 2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *  Copyright (C) 2006 Jorge Almeida <j-almeida@criticalsoftware.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/err.h>
+
+#include <rtnet_iovec.h>
+#include <rtnet_socket.h>
+#include <stack_mgr.h>
+
+MODULE_LICENSE("GPL");
+
+/***
+ *  rt_packet_rcv
+ */
+static int rt_packet_rcv(struct rtskb *skb, struct rtpacket_type *pt)
+{
+	struct rtsocket *sock =
+		container_of(pt, struct rtsocket, prot.packet.packet_type);
+	int ifindex = sock->prot.packet.ifindex;
+	void (*callback_func)(struct rtdm_fd *, void *);
+	void *callback_arg;
+	rtdm_lockctx_t context;
+
+	if (unlikely((ifindex != 0) && (ifindex != skb->rtdev->ifindex)))
+		return -EUNATCH;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+	if (pt->type == htons(ETH_P_ALL)) {
+		struct rtskb *clone_skb = rtskb_clone(skb, &sock->skb_pool);
+		if (clone_skb == NULL)
+			goto out;
+		skb = clone_skb;
+	} else
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+		if (unlikely(rtskb_acquire(skb, &sock->skb_pool) < 0)) {
+		kfree_rtskb(skb);
+		goto out;
+	}
+
+	rtskb_queue_tail(&sock->incoming, skb);
+	rtdm_sem_up(&sock->pending_sem);
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+	callback_func = sock->callback_func;
+	callback_arg = sock->callback_arg;
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	if (callback_func)
+		callback_func(rt_socket_fd(sock), callback_arg);
+
+out:
+	return 0;
+}
+
+static bool rt_packet_trylock(struct rtpacket_type *pt)
+{
+	struct rtsocket *sock =
+		container_of(pt, struct rtsocket, prot.packet.packet_type);
+	struct rtdm_fd *fd = rtdm_private_to_fd(sock);
+
+	if (rtdm_fd_lock(fd) < 0)
+		return false;
+
+	return true;
+}
+
+static void rt_packet_unlock(struct rtpacket_type *pt)
+{
+	struct rtsocket *sock =
+		container_of(pt, struct rtsocket, prot.packet.packet_type);
+	struct rtdm_fd *fd = rtdm_private_to_fd(sock);
+
+	rtdm_fd_unlock(fd);
+}
+
+/***
+ *  rt_packet_bind
+ */
+static int rt_packet_bind(struct rtdm_fd *fd, struct rtsocket *sock,
+			  const struct sockaddr *addr, socklen_t addrlen)
+{
+	struct sockaddr_ll _sll, *sll;
+	struct rtpacket_type *pt = &sock->prot.packet.packet_type;
+	int new_type;
+	int ret;
+	rtdm_lockctx_t context;
+
+	if (addrlen < sizeof(struct sockaddr_ll))
+		return -EINVAL;
+
+	sll = rtnet_get_arg(fd, &_sll, addr, sizeof(_sll));
+	if (IS_ERR(sll))
+		return PTR_ERR(sll);
+
+	if (sll->sll_family != AF_PACKET)
+		return -EINVAL;
+
+	new_type =
+		(sll->sll_protocol != 0) ? sll->sll_protocol : sock->protocol;
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+
+	/* release existing binding */
+	if (pt->type != 0)
+		rtdev_remove_pack(pt);
+
+	pt->type = new_type;
+	sock->prot.packet.ifindex = sll->sll_ifindex;
+
+	/* if protocol is non-zero, register the packet type */
+	if (new_type != 0) {
+		pt->handler = rt_packet_rcv;
+		pt->err_handler = NULL;
+		pt->trylock = rt_packet_trylock;
+		pt->unlock = rt_packet_unlock;
+
+		ret = rtdev_add_pack(pt);
+	} else
+		ret = 0;
+
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	return ret;
+}
+
+/***
+ *  rt_packet_getsockname
+ */
+static int rt_packet_getsockname(struct rtdm_fd *fd, struct rtsocket *sock,
+				 struct sockaddr *addr, socklen_t *addrlen)
+{
+	struct sockaddr_ll _sll, *sll;
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+	socklen_t _namelen, *namelen;
+	int ret;
+
+	namelen = rtnet_get_arg(fd, &_namelen, addrlen, sizeof(_namelen));
+	if (IS_ERR(namelen))
+		return PTR_ERR(namelen);
+
+	if (*namelen < sizeof(struct sockaddr_ll))
+		return -EINVAL;
+
+	sll = rtnet_get_arg(fd, &_sll, addr, sizeof(_sll));
+	if (IS_ERR(sll))
+		return PTR_ERR(sll);
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+
+	sll->sll_family = AF_PACKET;
+	sll->sll_ifindex = sock->prot.packet.ifindex;
+	sll->sll_protocol = sock->protocol;
+
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	rtdev = rtdev_get_by_index(sll->sll_ifindex);
+	if (rtdev != NULL) {
+		sll->sll_hatype = rtdev->type;
+		sll->sll_halen = rtdev->addr_len;
+		memcpy(sll->sll_addr, rtdev->dev_addr, rtdev->addr_len);
+		rtdev_dereference(rtdev);
+	} else {
+		sll->sll_hatype = 0;
+		sll->sll_halen = 0;
+	}
+
+	*namelen = sizeof(struct sockaddr_ll);
+
+	ret = rtnet_put_arg(fd, addr, sll, sizeof(*sll));
+	if (ret)
+		return ret;
+
+	return rtnet_put_arg(fd, addrlen, namelen, sizeof(*namelen));
+}
+
+/***
+ * rt_packet_socket - initialize a packet socket
+ */
+static int rt_packet_socket(struct rtdm_fd *fd, int protocol)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	int ret;
+
+	if ((ret = rt_socket_init(fd, protocol)) != 0)
+		return ret;
+
+	sock->prot.packet.packet_type.type = protocol;
+	sock->prot.packet.ifindex = 0;
+	sock->prot.packet.packet_type.trylock = rt_packet_trylock;
+	sock->prot.packet.packet_type.unlock = rt_packet_unlock;
+
+	/* if protocol is non-zero, register the packet type */
+	if (protocol != 0) {
+		sock->prot.packet.packet_type.handler = rt_packet_rcv;
+		sock->prot.packet.packet_type.err_handler = NULL;
+
+		if ((ret = rtdev_add_pack(&sock->prot.packet.packet_type)) <
+		    0) {
+			rt_socket_cleanup(fd);
+			return ret;
+		}
+	}
+
+	return 0;
+}
+
+/***
+ *  rt_packet_close
+ */
+static void rt_packet_close(struct rtdm_fd *fd)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	struct rtpacket_type *pt = &sock->prot.packet.packet_type;
+	struct rtskb *del;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&sock->param_lock, context);
+
+	if (pt->type != 0) {
+		rtdev_remove_pack(pt);
+		pt->type = 0;
+	}
+
+	rtdm_lock_put_irqrestore(&sock->param_lock, context);
+
+	/* free packets in incoming queue */
+	while ((del = rtskb_dequeue(&sock->incoming)) != NULL) {
+		kfree_rtskb(del);
+	}
+
+	rt_socket_cleanup(fd);
+}
+
+/***
+ *  rt_packet_ioctl
+ */
+static int rt_packet_ioctl(struct rtdm_fd *fd, unsigned int request,
+			   void __user *arg)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	const struct _rtdm_setsockaddr_args *setaddr;
+	struct _rtdm_setsockaddr_args _setaddr;
+	const struct _rtdm_getsockaddr_args *getaddr;
+	struct _rtdm_getsockaddr_args _getaddr;
+
+	/* fast path for common socket IOCTLs */
+	if (_IOC_TYPE(request) == RTIOC_TYPE_NETWORK)
+		return rt_socket_common_ioctl(fd, request, arg);
+
+	switch (request) {
+	case _RTIOC_BIND:
+		setaddr = rtnet_get_arg(fd, &_setaddr, arg, sizeof(_setaddr));
+		if (IS_ERR(setaddr))
+			return PTR_ERR(setaddr);
+		return rt_packet_bind(fd, sock, setaddr->addr,
+				      setaddr->addrlen);
+
+	case _RTIOC_GETSOCKNAME:
+		getaddr = rtnet_get_arg(fd, &_getaddr, arg, sizeof(_getaddr));
+		if (IS_ERR(getaddr))
+			return PTR_ERR(getaddr);
+		return rt_packet_getsockname(fd, sock, getaddr->addr,
+					     getaddr->addrlen);
+
+	default:
+		return rt_socket_if_ioctl(fd, request, arg);
+	}
+}
+
+/***
+ *  rt_packet_recvmsg
+ */
+static ssize_t rt_packet_recvmsg(struct rtdm_fd *fd, struct user_msghdr *msg,
+				 int msg_flags)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	ssize_t len;
+	size_t copy_len;
+	struct rtskb *rtskb;
+	struct sockaddr_ll sll;
+	int ret;
+	nanosecs_rel_t timeout = sock->timeout;
+	socklen_t namelen;
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+
+	if (msg->msg_iovlen < 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen == 0)
+		return 0;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	/* non-blocking receive? */
+	if (msg_flags & MSG_DONTWAIT)
+		timeout = -1;
+
+	ret = rtdm_sem_timeddown(&sock->pending_sem, timeout, NULL);
+	if (unlikely(ret < 0))
+		switch (ret) {
+		default:
+			ret = -EBADF; /* socket has been closed */
+			fallthrough;
+		case -EWOULDBLOCK:
+		case -ETIMEDOUT:
+		case -EINTR:
+			rtdm_drop_iovec(iov, iov_fast);
+			return ret;
+		}
+
+	rtskb = rtskb_dequeue_chain(&sock->incoming);
+	RTNET_ASSERT(rtskb != NULL, return -EFAULT;);
+
+	/* copy the address if required. */
+	if (msg->msg_name) {
+		struct rtnet_device *rtdev = rtskb->rtdev;
+		memset(&sll, 0, sizeof(sll));
+		sll.sll_family = AF_PACKET;
+		sll.sll_hatype = rtdev->type;
+		sll.sll_protocol = rtskb->protocol;
+		sll.sll_pkttype = rtskb->pkt_type;
+		sll.sll_ifindex = rtdev->ifindex;
+
+		if (msg->msg_namelen < 0) {
+			ret = -EINVAL;
+			goto fail;
+		}
+		namelen = min(sizeof(sll), (size_t)msg->msg_namelen);
+
+		/* Ethernet specific - we rather need some parse handler here */
+		memcpy(sll.sll_addr, rtskb->mac.ethernet->h_source, ETH_ALEN);
+		sll.sll_halen = ETH_ALEN;
+		ret = rtnet_put_arg(fd, msg->msg_name, &sll, namelen);
+		if (ret)
+			goto fail;
+
+		msg->msg_namelen = sizeof(sll);
+	}
+
+	/* Include the header in raw delivery */
+	if (rtdm_fd_to_context(fd)->device->driver->socket_type != SOCK_DGRAM)
+		rtskb_push(rtskb, rtskb->data - rtskb->mac.raw);
+
+	/* The data must not be longer than the available buffer size */
+	copy_len = rtskb->len;
+	len = rtdm_get_iov_flatlen(iov, msg->msg_iovlen);
+	if (len < 0) {
+		copy_len = len;
+		goto out;
+	}
+
+	if (copy_len > len) {
+		copy_len = len;
+		msg->msg_flags |= MSG_TRUNC;
+	}
+
+	copy_len = rtnet_write_to_iov(fd, iov, msg->msg_iovlen, rtskb->data,
+				      copy_len);
+out:
+	if ((msg_flags & MSG_PEEK) == 0) {
+		kfree_rtskb(rtskb);
+	} else {
+		rtskb_queue_head(&sock->incoming, rtskb);
+		rtdm_sem_up(&sock->pending_sem);
+	}
+
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return copy_len;
+fail:
+	copy_len = ret;
+	goto out;
+}
+
+/***
+ *  rt_packet_sendmsg
+ */
+static ssize_t rt_packet_sendmsg(struct rtdm_fd *fd,
+				 const struct user_msghdr *msg, int msg_flags)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	size_t len;
+	struct sockaddr_ll _sll, *sll;
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned short proto;
+	unsigned char *addr;
+	int ifindex;
+	ssize_t ret;
+	struct iovec iov_fast[RTDM_IOV_FASTMAX], *iov;
+
+	if (msg_flags & MSG_OOB) /* Mirror BSD error message compatibility */
+		return -EOPNOTSUPP;
+	if (msg_flags & ~MSG_DONTWAIT)
+		return -EINVAL;
+
+	if (msg->msg_iovlen < 0)
+		return -EINVAL;
+
+	if (msg->msg_iovlen == 0)
+		return 0;
+
+	ret = rtdm_get_iovec(fd, &iov, msg, iov_fast);
+	if (ret)
+		return ret;
+
+	if (msg->msg_name == NULL) {
+		/* Note: We do not care about races with rt_packet_bind here -
+	   the user has to do so. */
+		ifindex = sock->prot.packet.ifindex;
+		proto = sock->prot.packet.packet_type.type;
+		addr = NULL;
+		sll = NULL;
+	} else {
+		sll = rtnet_get_arg(fd, &_sll, msg->msg_name, sizeof(_sll));
+		if (IS_ERR(sll)) {
+			ret = PTR_ERR(sll);
+			goto abort;
+		}
+
+		if ((msg->msg_namelen < sizeof(struct sockaddr_ll)) ||
+		    (msg->msg_namelen <
+		     (sll->sll_halen +
+		      offsetof(struct sockaddr_ll, sll_addr))) ||
+		    ((sll->sll_family != AF_PACKET) &&
+		     (sll->sll_family != AF_UNSPEC))) {
+			ret = -EINVAL;
+			goto abort;
+		}
+
+		ifindex = sll->sll_ifindex;
+		proto = sll->sll_protocol;
+		addr = sll->sll_addr;
+	}
+
+	if ((rtdev = rtdev_get_by_index(ifindex)) == NULL) {
+		ret = -ENODEV;
+		goto abort;
+	}
+
+	len = rtdm_get_iov_flatlen(iov, msg->msg_iovlen);
+	rtskb = alloc_rtskb(rtdev->hard_header_len + len, &sock->skb_pool);
+	if (rtskb == NULL) {
+		ret = -ENOBUFS;
+		goto out;
+	}
+
+	/* If an RTmac discipline is active, this becomes a pure sanity check to
+       avoid writing beyond rtskb boundaries. The hard check is then performed
+       upon rtdev_xmit() by the discipline's xmit handler. */
+	if (len >
+	    rtdev->mtu +
+		    ((rtdm_fd_to_context(fd)->device->driver->socket_type ==
+		      SOCK_RAW) ?
+			     rtdev->hard_header_len :
+			     0)) {
+		ret = -EMSGSIZE;
+		goto err;
+	}
+
+	if ((sll != NULL) && (sll->sll_halen != rtdev->addr_len)) {
+		ret = -EINVAL;
+		goto err;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	rtskb->rtdev = rtdev;
+	rtskb->priority = sock->priority;
+
+	if (rtdev->hard_header) {
+		int hdr_len;
+
+		ret = -EINVAL;
+		hdr_len = rtdev->hard_header(rtskb, rtdev, ntohs(proto), addr,
+					     NULL, len);
+		if (rtdm_fd_to_context(fd)->device->driver->socket_type !=
+		    SOCK_DGRAM) {
+			rtskb->tail = rtskb->data;
+			rtskb->len = 0;
+		} else if (hdr_len < 0)
+			goto err;
+	}
+
+	ret = rtnet_read_from_iov(fd, iov, msg->msg_iovlen,
+				  rtskb_put(rtskb, len), len);
+
+	if ((rtdev->flags & IFF_UP) != 0) {
+		if ((ret = rtdev_xmit(rtskb)) == 0)
+			ret = len;
+	} else {
+		ret = -ENETDOWN;
+		goto err;
+	}
+
+out:
+	rtdev_dereference(rtdev);
+abort:
+	rtdm_drop_iovec(iov, iov_fast);
+
+	return ret;
+err:
+	kfree_rtskb(rtskb);
+	goto out;
+}
+
+static struct rtdm_driver packet_proto_drv = {
+    .profile_info =     RTDM_PROFILE_INFO(packet,
+					RTDM_CLASS_NETWORK,
+					RTDM_SUBCLASS_RTNET,
+					RTNET_RTDM_VER),
+    .device_flags =     RTDM_PROTOCOL_DEVICE,
+    .device_count =     1,
+    .context_size =     sizeof(struct rtsocket),
+
+    .protocol_family =  PF_PACKET,
+    .socket_type =      SOCK_DGRAM,
+
+
+    .ops = {
+	.socket =       rt_packet_socket,
+	.close =        rt_packet_close,
+	.ioctl_rt =     rt_packet_ioctl,
+	.ioctl_nrt =    rt_packet_ioctl,
+	.recvmsg_rt =   rt_packet_recvmsg,
+	.sendmsg_rt =   rt_packet_sendmsg,
+	.select =       rt_socket_select_bind,
+    },
+};
+
+static struct rtdm_device packet_proto_dev = {
+	.driver = &packet_proto_drv,
+	.label = "packet",
+};
+
+static struct rtdm_driver raw_packet_proto_drv = {
+    .profile_info =     RTDM_PROFILE_INFO(raw_packet,
+					RTDM_CLASS_NETWORK,
+					RTDM_SUBCLASS_RTNET,
+					RTNET_RTDM_VER),
+    .device_flags =     RTDM_PROTOCOL_DEVICE,
+    .device_count =     1,
+    .context_size =     sizeof(struct rtsocket),
+
+    .protocol_family =  PF_PACKET,
+    .socket_type =      SOCK_RAW,
+
+    .ops = {
+	.socket =       rt_packet_socket,
+	.close =        rt_packet_close,
+	.ioctl_rt =     rt_packet_ioctl,
+	.ioctl_nrt =    rt_packet_ioctl,
+	.recvmsg_rt =   rt_packet_recvmsg,
+	.sendmsg_rt =   rt_packet_sendmsg,
+	.select =       rt_socket_select_bind,
+    },
+};
+
+static struct rtdm_device raw_packet_proto_dev = {
+	.driver = &raw_packet_proto_drv,
+	.label = "raw_packet",
+};
+
+static int __init rt_packet_proto_init(void)
+{
+	int err;
+
+	err = rtdm_dev_register(&packet_proto_dev);
+	if (err)
+		return err;
+
+	err = rtdm_dev_register(&raw_packet_proto_dev);
+	if (err)
+		rtdm_dev_unregister(&packet_proto_dev);
+
+	return err;
+}
+
+static void rt_packet_proto_release(void)
+{
+	rtdm_dev_unregister(&packet_proto_dev);
+	rtdm_dev_unregister(&raw_packet_proto_dev);
+}
+
+module_init(rt_packet_proto_init);
+module_exit(rt_packet_proto_release);
+
+/**********************************************************
+ * Utilities                                              *
+ **********************************************************/
+
+static int hex2int(unsigned char hex_char)
+{
+	if ((hex_char >= '0') && (hex_char <= '9'))
+		return hex_char - '0';
+	else if ((hex_char >= 'a') && (hex_char <= 'f'))
+		return hex_char - 'a' + 10;
+	else if ((hex_char >= 'A') && (hex_char <= 'F'))
+		return hex_char - 'A' + 10;
+	else
+		return -EINVAL;
+}
+
+int rt_eth_aton(unsigned char *addr_buf, const char *mac)
+{
+	int i = 0;
+	int nibble;
+
+	while (1) {
+		if (*mac == 0)
+			return -EINVAL;
+
+		if ((nibble = hex2int(*mac++)) < 0)
+			return nibble;
+		*addr_buf = nibble << 4;
+
+		if (*mac == 0)
+			return -EINVAL;
+
+		if ((nibble = hex2int(*mac++)) < 0)
+			return nibble;
+		*addr_buf++ |= nibble;
+
+		if (++i == 6)
+			break;
+
+		if ((*mac == 0) || (*mac++ != ':'))
+			return -EINVAL;
+	}
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rt_eth_aton);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Kconfig
new file mode 100644
index 0000000..38c4840
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Kconfig
@@ -0,0 +1,23 @@
+config XENO_DRIVERS_NET_RTCFG
+    depends on XENO_DRIVERS_NET
+    tristate "RTcfg Service"
+    default y
+    help
+    The Real-Time Configuration service configures and monitors nodes in
+    a RTnet network. It works both with plain MAC as well as with IPv4
+    addresses (in case CONFIG_RTNET_RTIPV4 has been switched on). RTcfg
+    consists of a configuration server, which can run on the same station
+    as the TDMA master e.g., and one or more clients. Clients can join and
+    leave the network during runtime without interfering with other
+    stations. Besides network configuration, the RTcfg server can also
+    distribute custom data.
+
+    See Documentation/README.rtcfg for further information.
+
+config XENO_DRIVERS_NET_RTCFG_DEBUG
+    bool "RTcfg Debugging"
+    depends on XENO_DRIVERS_NET_RTCFG
+    default n
+    help
+    Enables debug message output of the RTcfg state machines. Switch on if
+    you have to trace some problem related to RTcfg.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Makefile
new file mode 100644
index 0000000..80a954d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/Makefile
@@ -0,0 +1,14 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTCFG) += rtcfg.o
+
+rtcfg-y := \
+	rtcfg_module.o \
+	rtcfg_event.o \
+	rtcfg_client_event.o \
+	rtcfg_conn_event.o \
+	rtcfg_ioctl.o \
+	rtcfg_frame.o \
+	rtcfg_timer.o \
+	rtcfg_file.o \
+	rtcfg_proc.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_client_event.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_client_event.c
new file mode 100644
index 0000000..1a69bf4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_client_event.c
@@ -0,0 +1,1175 @@
+/***
+ *
+ *  rtcfg/rtcfg_client_event.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <ipv4/route.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_timer.h>
+
+static int rtcfg_client_get_frag(int ifindex, struct rt_proc_call *call);
+static void rtcfg_client_detach(int ifindex, struct rt_proc_call *call);
+static void rtcfg_client_recv_stage_1(int ifindex, struct rtskb *rtskb);
+static int rtcfg_client_recv_announce(int ifindex, struct rtskb *rtskb);
+static void rtcfg_client_recv_stage_2_cfg(int ifindex, struct rtskb *rtskb);
+static void rtcfg_client_recv_stage_2_frag(int ifindex, struct rtskb *rtskb);
+static int rtcfg_client_recv_ready(int ifindex, struct rtskb *rtskb);
+static void rtcfg_client_recv_dead_station(int ifindex, struct rtskb *rtskb);
+static void rtcfg_client_update_server(int ifindex, struct rtskb *rtskb);
+
+/*** Client States ***/
+
+int rtcfg_main_state_client_0(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+
+	switch (event_id) {
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		rtcfg_client_recv_stage_1(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_1(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_cmd *cmd_event;
+	int ret;
+
+	switch (event_id) {
+	case RTCFG_CMD_CLIENT:
+		/* second trial (buffer was probably too small) */
+		rtcfg_queue_blocking_call(ifindex,
+					  (struct rt_proc_call *)event_data);
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_0);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_ANNOUNCE:
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		if (cmd_event->args.announce.burstrate == 0) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			return -EINVAL;
+		}
+
+		rtcfg_queue_blocking_call(ifindex,
+					  (struct rt_proc_call *)event_data);
+
+		if (cmd_event->args.announce.flags & _RTCFG_FLAG_STAGE_2_DATA)
+			set_bit(RTCFG_FLAG_STAGE_2_DATA, &rtcfg_dev->flags);
+		if (cmd_event->args.announce.flags & _RTCFG_FLAG_READY)
+			set_bit(RTCFG_FLAG_READY, &rtcfg_dev->flags);
+		if (cmd_event->args.announce.burstrate < rtcfg_dev->burstrate)
+			rtcfg_dev->burstrate =
+				cmd_event->args.announce.burstrate;
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_ANNOUNCED);
+
+		ret = rtcfg_send_announce_new(ifindex);
+		if (ret < 0) {
+			rtcfg_dequeue_blocking_call(ifindex);
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			return ret;
+		}
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_announced(int ifindex, RTCFG_EVENT event_id,
+				      void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_device *rtcfg_dev;
+
+	switch (event_id) {
+	case RTCFG_CMD_ANNOUNCE:
+		return rtcfg_client_get_frag(ifindex, call);
+
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_STAGE_2_CFG:
+		rtcfg_client_recv_stage_2_cfg(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_2_CFG_FRAG:
+		rtcfg_client_recv_stage_2_frag(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_found ==
+			    rtcfg_dev->other_stations)
+				rtcfg_next_main_state(
+					ifindex, RTCFG_MAIN_CLIENT_ALL_KNOWN);
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_found ==
+			    rtcfg_dev->other_stations)
+				rtcfg_next_main_state(
+					ifindex, RTCFG_MAIN_CLIENT_ALL_KNOWN);
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+int rtcfg_main_state_client_all_known(int ifindex, RTCFG_EVENT event_id,
+				      void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+
+	switch (event_id) {
+	case RTCFG_CMD_ANNOUNCE:
+		return rtcfg_client_get_frag(ifindex, call);
+
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_STAGE_2_CFG_FRAG:
+		rtcfg_client_recv_stage_2_frag(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_DEAD_STATION:
+		rtcfg_client_recv_dead_station(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_all_frames(int ifindex, RTCFG_EVENT event_id,
+				       void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_device *rtcfg_dev;
+
+	switch (event_id) {
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_found ==
+			    rtcfg_dev->other_stations) {
+				rtcfg_complete_cmd(ifindex, RTCFG_CMD_ANNOUNCE,
+						   0);
+
+				rtcfg_next_main_state(
+					ifindex,
+					test_bit(RTCFG_FLAG_READY,
+						 &rtcfg_dev->flags) ?
+						RTCFG_MAIN_CLIENT_READY :
+						RTCFG_MAIN_CLIENT_2);
+			}
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_found ==
+			    rtcfg_dev->other_stations) {
+				rtcfg_complete_cmd(ifindex, RTCFG_CMD_ANNOUNCE,
+						   0);
+
+				rtcfg_next_main_state(
+					ifindex,
+					test_bit(RTCFG_FLAG_READY,
+						 &rtcfg_dev->flags) ?
+						RTCFG_MAIN_CLIENT_READY :
+						RTCFG_MAIN_CLIENT_2);
+			}
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	case RTCFG_FRM_DEAD_STATION:
+		rtcfg_client_recv_dead_station(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_2(int ifindex, RTCFG_EVENT event_id,
+			      void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_device *rtcfg_dev;
+
+	switch (event_id) {
+	case RTCFG_CMD_READY:
+		rtcfg_dev = &device[ifindex];
+
+		if (rtcfg_dev->stations_ready == rtcfg_dev->other_stations)
+			rtpc_complete_call(call, 0);
+		else
+			rtcfg_queue_blocking_call(ifindex, call);
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_READY);
+
+		if (!test_and_set_bit(RTCFG_FLAG_READY, &rtcfg_dev->flags))
+			rtcfg_send_ready(ifindex);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0)
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_DEAD_STATION:
+		rtcfg_client_recv_dead_station(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		/* ignore */
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		kfree_rtskb(rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+int rtcfg_main_state_client_ready(int ifindex, RTCFG_EVENT event_id,
+				  void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_device *rtcfg_dev;
+
+	switch (event_id) {
+	case RTCFG_CMD_DETACH:
+		rtcfg_client_detach(ifindex, call);
+		break;
+
+	case RTCFG_FRM_READY:
+		if (rtcfg_client_recv_ready(ifindex, rtskb) == 0) {
+			rtcfg_dev = &device[ifindex];
+			if (rtcfg_dev->stations_ready ==
+			    rtcfg_dev->other_stations)
+				rtcfg_complete_cmd(ifindex, RTCFG_CMD_READY, 0);
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		}
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		if (rtcfg_client_recv_announce(ifindex, rtskb) == 0) {
+			rtcfg_send_announce_reply(
+				ifindex, rtskb->mac.ethernet->h_source);
+			rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		}
+		kfree_rtskb(rtskb);
+		break;
+
+	case RTCFG_FRM_DEAD_STATION:
+		rtcfg_client_recv_dead_station(ifindex, rtskb);
+		break;
+
+	case RTCFG_FRM_STAGE_1_CFG:
+		rtcfg_client_update_server(ifindex, rtskb);
+		break;
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*** Client Command Event Handlers ***/
+
+static int rtcfg_client_get_frag(int ifindex, struct rt_proc_call *call)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	if (test_bit(RTCFG_FLAG_STAGE_2_DATA, &rtcfg_dev->flags) == 0) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		return -EINVAL;
+	}
+
+	rtcfg_send_ack(ifindex);
+
+	if (rtcfg_dev->spec.clt.cfg_offs >= rtcfg_dev->spec.clt.cfg_len) {
+		if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) {
+			rtpc_complete_call(call, 0);
+
+			rtcfg_next_main_state(ifindex,
+					      test_bit(RTCFG_FLAG_READY,
+						       &rtcfg_dev->flags) ?
+						      RTCFG_MAIN_CLIENT_READY :
+						      RTCFG_MAIN_CLIENT_2);
+		} else {
+			rtcfg_next_main_state(ifindex,
+					      RTCFG_MAIN_CLIENT_ALL_FRAMES);
+			rtcfg_queue_blocking_call(ifindex, call);
+		}
+	} else
+		rtcfg_queue_blocking_call(ifindex, call);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	return -CALL_PENDING;
+}
+
+/* releases rtcfg_dev->dev_mutex on return */
+static void rtcfg_client_detach(int ifindex, struct rt_proc_call *call)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rtcfg_cmd *cmd_event;
+
+	cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+	cmd_event->args.detach.station_addr_list =
+		rtcfg_dev->spec.clt.station_addr_list;
+	cmd_event->args.detach.stage2_chain = rtcfg_dev->spec.clt.stage2_chain;
+
+	while (1) {
+		call = rtcfg_dequeue_blocking_call(ifindex);
+		if (call == NULL)
+			break;
+
+		rtpc_complete_call(call, -ENODEV);
+	}
+
+	if (test_and_clear_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags))
+		rtdm_timer_destroy(&rtcfg_dev->timer);
+	rtcfg_reset_device(ifindex);
+
+	rtcfg_next_main_state(cmd_event->internal.data.ifindex, RTCFG_MAIN_OFF);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+}
+
+/*** Client Frame Event Handlers ***/
+
+static void rtcfg_client_recv_stage_1(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_stage_1_cfg *stage_1_cfg;
+	struct rt_proc_call *call;
+	struct rtcfg_cmd *cmd_event;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	u8 addr_type;
+	int ret;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_stage_1_cfg)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid stage_1_cfg frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	stage_1_cfg = (struct rtcfg_frm_stage_1_cfg *)rtskb->data;
+	__rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_1_cfg));
+
+	addr_type = stage_1_cfg->addr_type;
+
+	switch (stage_1_cfg->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	case RTCFG_ADDR_IP: {
+		struct rtnet_device *rtdev, *tmp;
+		u32 daddr, saddr, mask, bcast;
+
+		if (rtskb->len < sizeof(struct rtcfg_frm_stage_1_cfg) +
+					 2 * RTCFG_ADDRSIZE_IP) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			RTCFG_DEBUG(1, "RTcfg: received invalid stage_1_cfg "
+				       "frame\n");
+			kfree_rtskb(rtskb);
+			return;
+		}
+
+		rtdev = rtskb->rtdev;
+
+		memcpy(&daddr, stage_1_cfg->client_addr, 4);
+		stage_1_cfg =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_cfg) +
+							 RTCFG_ADDRSIZE_IP);
+
+		memcpy(&saddr, stage_1_cfg->server_addr, 4);
+		stage_1_cfg =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_cfg) +
+							 RTCFG_ADDRSIZE_IP);
+
+		__rtskb_pull(rtskb, 2 * RTCFG_ADDRSIZE_IP);
+
+		/* Broadcast: IP is used to address client */
+		if (rtskb->pkt_type == PACKET_BROADCAST) {
+			/* directed to us? */
+			if (daddr != rtdev->local_ip) {
+				rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+				kfree_rtskb(rtskb);
+				return;
+			}
+
+			/* Unicast: IP address is assigned by the server */
+		} else {
+			/* default netmask */
+			if (ntohl(daddr) <= 0x7FFFFFFF) /* 127.255.255.255  */
+				mask = 0x000000FF; /* 255.0.0.0        */
+			else if (ntohl(daddr) <=
+				 0xBFFFFFFF) /* 191.255.255.255  */
+				mask = 0x0000FFFF; /* 255.255.0.0      */
+			else
+				mask = 0x00FFFFFF; /* 255.255.255.0    */
+			bcast = daddr | (~mask);
+
+			rt_ip_route_del_all(rtdev); /* cleanup routing table */
+
+			rtdev->local_ip = daddr;
+			rtdev->broadcast_ip = bcast;
+
+			if ((tmp = rtdev_get_loopback()) != NULL) {
+				rt_ip_route_add_host(daddr, tmp->dev_addr, tmp);
+				rtdev_dereference(tmp);
+			}
+
+			if (rtdev->flags & IFF_BROADCAST)
+				rt_ip_route_add_host(bcast, rtdev->broadcast,
+						     rtdev);
+		}
+
+		/* update routing table */
+		rt_ip_route_add_host(saddr, rtskb->mac.ethernet->h_source,
+				     rtdev);
+
+		rtcfg_dev->spec.clt.srv_addr.ip_addr = saddr;
+		break;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	case RTCFG_ADDR_MAC:
+		/* nothing to do */
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown addr_type %d in %s()\n",
+			    stage_1_cfg->addr_type, __FUNCTION__);
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	rtcfg_dev->spec.clt.addr_type = addr_type;
+
+	/* Ethernet-specific */
+	memcpy(rtcfg_dev->spec.clt.srv_mac_addr, rtskb->mac.ethernet->h_source,
+	       ETH_ALEN);
+
+	rtcfg_dev->burstrate = stage_1_cfg->burstrate;
+
+	rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_1);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	while (1) {
+		call = rtcfg_dequeue_blocking_call(ifindex);
+		if (call == NULL)
+			break;
+
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		if (cmd_event->internal.data.event_id == RTCFG_CMD_CLIENT) {
+			ret = 0;
+
+			/* note: only the first pending call gets data */
+			if ((rtskb != NULL) &&
+			    (cmd_event->args.client.buffer_size > 0)) {
+				ret = ntohs(stage_1_cfg->cfg_len);
+
+				cmd_event->args.client.rtskb = rtskb;
+				rtskb = NULL;
+			}
+		} else
+			ret = -EINVAL;
+
+		rtpc_complete_call(call, ret);
+	}
+
+	if (rtskb)
+		kfree_rtskb(rtskb);
+}
+
+static int rtcfg_add_to_station_list(struct rtcfg_device *rtcfg_dev,
+				     u8 *mac_addr, u8 flags)
+{
+	if (rtcfg_dev->stations_found == rtcfg_dev->spec.clt.max_stations) {
+		RTCFG_DEBUG(
+			1, "RTcfg: insufficient memory for storing new station "
+			   "address\n");
+		return -ENOMEM;
+	}
+
+	/* Ethernet-specific! */
+	memcpy(&rtcfg_dev->spec.clt.station_addr_list[rtcfg_dev->stations_found]
+			.mac_addr,
+	       mac_addr, ETH_ALEN);
+
+	rtcfg_dev->spec.clt.station_addr_list[rtcfg_dev->stations_found].flags =
+		flags;
+
+	rtcfg_dev->stations_found++;
+	if ((flags & _RTCFG_FLAG_READY) != 0)
+		rtcfg_dev->stations_ready++;
+
+	return 0;
+}
+
+/* Notes:
+ *  o rtcfg_client_recv_announce does not release the passed rtskb.
+ *  o On success, rtcfg_client_recv_announce returns without releasing the
+ *    device lock.
+ */
+static int rtcfg_client_recv_announce(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_announce *announce_frm;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	u32 i;
+	u32 announce_frm_addr;
+	int result;
+
+	announce_frm = (struct rtcfg_frm_announce *)rtskb->data;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_announce)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1,
+			    "RTcfg: received invalid announce frame (id: %d)\n",
+			    announce_frm->head.id);
+		return -EINVAL;
+	}
+
+	switch (announce_frm->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	case RTCFG_ADDR_IP:
+		if (rtskb->len <
+		    sizeof(struct rtcfg_frm_announce) + RTCFG_ADDRSIZE_IP) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			RTCFG_DEBUG(1,
+				    "RTcfg: received invalid announce frame "
+				    "(id: %d)\n",
+				    announce_frm->head.id);
+			return -EINVAL;
+		}
+
+		memcpy(&announce_frm_addr, announce_frm->addr, 4);
+
+		/* update routing table */
+		rt_ip_route_add_host(announce_frm_addr,
+				     rtskb->mac.ethernet->h_source,
+				     rtskb->rtdev);
+
+		announce_frm =
+			(struct rtcfg_frm_announce *)(((u8 *)announce_frm) +
+						      RTCFG_ADDRSIZE_IP);
+
+		break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	case RTCFG_ADDR_MAC:
+		/* nothing to do */
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown addr_type %d in %s()\n",
+			    announce_frm->addr_type, __FUNCTION__);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < rtcfg_dev->stations_found; i++)
+		/* Ethernet-specific! */
+		if (memcmp(rtcfg_dev->spec.clt.station_addr_list[i].mac_addr,
+			   rtskb->mac.ethernet->h_source, ETH_ALEN) == 0)
+			return 0;
+
+	result = rtcfg_add_to_station_list(
+		rtcfg_dev, rtskb->mac.ethernet->h_source, announce_frm->flags);
+	if (result < 0)
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	return result;
+}
+
+static void rtcfg_client_queue_frag(int ifindex, struct rtskb *rtskb,
+				    size_t data_len)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rt_proc_call *call;
+	struct rtcfg_cmd *cmd_event;
+	int result;
+
+	rtskb_trim(rtskb, data_len);
+
+	if (rtcfg_dev->spec.clt.stage2_chain == NULL)
+		rtcfg_dev->spec.clt.stage2_chain = rtskb;
+	else {
+		rtcfg_dev->spec.clt.stage2_chain->chain_end->next = rtskb;
+		rtcfg_dev->spec.clt.stage2_chain->chain_end = rtskb;
+	}
+
+	rtcfg_dev->spec.clt.cfg_offs += data_len;
+	rtcfg_dev->spec.clt.chain_len += data_len;
+
+	if ((rtcfg_dev->spec.clt.cfg_offs >= rtcfg_dev->spec.clt.cfg_len) ||
+	    (++rtcfg_dev->spec.clt.packet_counter == rtcfg_dev->burstrate)) {
+		while (1) {
+			call = rtcfg_dequeue_blocking_call(ifindex);
+			if (call == NULL)
+				break;
+
+			cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+			result = 0;
+
+			/* note: only the first pending call gets data */
+			if (rtcfg_dev->spec.clt.stage2_chain != NULL) {
+				result = rtcfg_dev->spec.clt.chain_len;
+				cmd_event->args.announce.rtskb =
+					rtcfg_dev->spec.clt.stage2_chain;
+				rtcfg_dev->spec.clt.stage2_chain = NULL;
+			}
+
+			rtpc_complete_call(call,
+					   (cmd_event->internal.data.event_id ==
+					    RTCFG_CMD_ANNOUNCE) ?
+						   result :
+						   -EINVAL);
+		}
+
+		rtcfg_dev->spec.clt.packet_counter = 0;
+		rtcfg_dev->spec.clt.chain_len = 0;
+	}
+}
+
+static void rtcfg_client_recv_stage_2_cfg(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_stage_2_cfg *stage_2_cfg;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	size_t data_len;
+	int ret;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_stage_2_cfg)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid stage_2_cfg frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	stage_2_cfg = (struct rtcfg_frm_stage_2_cfg *)rtskb->data;
+	__rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg));
+
+	if (stage_2_cfg->heartbeat_period) {
+		ret = rtdm_timer_init(&rtcfg_dev->timer, rtcfg_timer,
+				      "rtcfg-timer");
+		if (ret == 0) {
+			ret = rtdm_timer_start(
+				&rtcfg_dev->timer, XN_INFINITE,
+				(nanosecs_rel_t)ntohs(
+					stage_2_cfg->heartbeat_period) *
+					1000000,
+				RTDM_TIMERMODE_RELATIVE);
+			if (ret < 0)
+				rtdm_timer_destroy(&rtcfg_dev->timer);
+		}
+
+		if (ret < 0)
+			/*ERRMSG*/ rtdm_printk(
+				"RTcfg: unable to create timer task\n");
+		else
+			set_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags);
+	}
+
+	/* add server to station list */
+	if (rtcfg_add_to_station_list(rtcfg_dev, rtskb->mac.ethernet->h_source,
+				      stage_2_cfg->flags) < 0) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unable to process stage_2_cfg frage\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	rtcfg_dev->other_stations = ntohl(stage_2_cfg->stations);
+	rtcfg_dev->spec.clt.cfg_len = ntohl(stage_2_cfg->cfg_len);
+	data_len = MIN(rtcfg_dev->spec.clt.cfg_len, rtskb->len);
+
+	if (test_bit(RTCFG_FLAG_STAGE_2_DATA, &rtcfg_dev->flags) &&
+	    (data_len > 0)) {
+		rtcfg_client_queue_frag(ifindex, rtskb, data_len);
+		rtskb = NULL;
+
+		if (rtcfg_dev->stations_found == rtcfg_dev->other_stations)
+			rtcfg_next_main_state(ifindex,
+					      RTCFG_MAIN_CLIENT_ALL_KNOWN);
+	} else {
+		if (rtcfg_dev->stations_found == rtcfg_dev->other_stations) {
+			rtcfg_complete_cmd(ifindex, RTCFG_CMD_ANNOUNCE, 0);
+
+			rtcfg_next_main_state(ifindex,
+					      test_bit(RTCFG_FLAG_READY,
+						       &rtcfg_dev->flags) ?
+						      RTCFG_MAIN_CLIENT_READY :
+						      RTCFG_MAIN_CLIENT_2);
+		} else
+			rtcfg_next_main_state(ifindex,
+					      RTCFG_MAIN_CLIENT_ALL_FRAMES);
+
+		rtcfg_send_ack(ifindex);
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	if (rtskb != NULL)
+		kfree_rtskb(rtskb);
+}
+
+static void rtcfg_client_recv_stage_2_frag(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_stage_2_cfg_frag *stage_2_frag;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	size_t data_len;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_stage_2_cfg_frag)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1,
+			    "RTcfg: received invalid stage_2_cfg_frag frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	stage_2_frag = (struct rtcfg_frm_stage_2_cfg_frag *)rtskb->data;
+	__rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_2_cfg_frag));
+
+	data_len =
+		MIN(rtcfg_dev->spec.clt.cfg_len - rtcfg_dev->spec.clt.cfg_offs,
+		    rtskb->len);
+
+	if (test_bit(RTCFG_FLAG_STAGE_2_DATA, &rtcfg_dev->flags) == 0) {
+		RTCFG_DEBUG(1, "RTcfg: unexpected stage 2 fragment, we did not "
+			       "request any data!\n");
+
+	} else if (rtcfg_dev->spec.clt.cfg_offs !=
+		   ntohl(stage_2_frag->frag_offs)) {
+		RTCFG_DEBUG(1,
+			    "RTcfg: unexpected stage 2 fragment (expected: %d, "
+			    "received: %d)\n",
+			    rtcfg_dev->spec.clt.cfg_offs,
+			    ntohl(stage_2_frag->frag_offs));
+
+		rtcfg_send_ack(ifindex);
+		rtcfg_dev->spec.clt.packet_counter = 0;
+	} else {
+		rtcfg_client_queue_frag(ifindex, rtskb, data_len);
+		rtskb = NULL;
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	if (rtskb != NULL)
+		kfree_rtskb(rtskb);
+}
+
+/* Notes:
+ *  o On success, rtcfg_client_recv_ready returns without releasing the
+ *    device lock.
+ */
+static int rtcfg_client_recv_ready(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	u32 i;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_simple)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid ready frame\n");
+		kfree_rtskb(rtskb);
+		return -EINVAL;
+	}
+
+	for (i = 0; i < rtcfg_dev->stations_found; i++)
+		/* Ethernet-specific! */
+		if (memcmp(rtcfg_dev->spec.clt.station_addr_list[i].mac_addr,
+			   rtskb->mac.ethernet->h_source, ETH_ALEN) == 0) {
+			if ((rtcfg_dev->spec.clt.station_addr_list[i].flags &
+			     _RTCFG_FLAG_READY) == 0) {
+				rtcfg_dev->spec.clt.station_addr_list[i].flags |=
+					_RTCFG_FLAG_READY;
+				rtcfg_dev->stations_ready++;
+			}
+			break;
+		}
+
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+static void rtcfg_client_recv_dead_station(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_dead_station *dead_station_frm;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	u32 i;
+
+	dead_station_frm = (struct rtcfg_frm_dead_station *)rtskb->data;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_dead_station)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid dead station frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	switch (dead_station_frm->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	case RTCFG_ADDR_IP: {
+		u32 ip;
+
+		if (rtskb->len <
+		    sizeof(struct rtcfg_frm_dead_station) + RTCFG_ADDRSIZE_IP) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			RTCFG_DEBUG(
+				1,
+				"RTcfg: received invalid dead station frame\n");
+			kfree_rtskb(rtskb);
+			return;
+		}
+
+		memcpy(&ip, dead_station_frm->logical_addr, 4);
+
+		/* only delete remote IPs from routing table */
+		if (rtskb->rtdev->local_ip != ip)
+			rt_ip_route_del_host(ip, rtskb->rtdev);
+
+		dead_station_frm = (struct rtcfg_frm_dead_station
+					    *)(((u8 *)dead_station_frm) +
+					       RTCFG_ADDRSIZE_IP);
+
+		break;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	case RTCFG_ADDR_MAC:
+		/* nothing to do */
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown addr_type %d in %s()\n",
+			    dead_station_frm->addr_type, __FUNCTION__);
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	for (i = 0; i < rtcfg_dev->stations_found; i++)
+		/* Ethernet-specific! */
+		if (memcmp(rtcfg_dev->spec.clt.station_addr_list[i].mac_addr,
+			   dead_station_frm->physical_addr, ETH_ALEN) == 0) {
+			if ((rtcfg_dev->spec.clt.station_addr_list[i].flags &
+			     _RTCFG_FLAG_READY) != 0)
+				rtcfg_dev->stations_ready--;
+
+			rtcfg_dev->stations_found--;
+			memmove(&rtcfg_dev->spec.clt.station_addr_list[i],
+				&rtcfg_dev->spec.clt.station_addr_list[i + 1],
+				sizeof(struct rtcfg_station) *
+					(rtcfg_dev->stations_found - i));
+
+			if (rtcfg_dev->state == RTCFG_MAIN_CLIENT_ALL_KNOWN)
+				rtcfg_next_main_state(
+					ifindex, RTCFG_MAIN_CLIENT_ANNOUNCED);
+			break;
+		}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+}
+
+static void rtcfg_client_update_server(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_frm_stage_1_cfg *stage_1_cfg;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_stage_1_cfg)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid stage_1_cfg frame\n");
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	stage_1_cfg = (struct rtcfg_frm_stage_1_cfg *)rtskb->data;
+	__rtskb_pull(rtskb, sizeof(struct rtcfg_frm_stage_1_cfg));
+
+	switch (stage_1_cfg->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	case RTCFG_ADDR_IP: {
+		struct rtnet_device *rtdev;
+		u32 daddr, saddr;
+
+		if (rtskb->len < sizeof(struct rtcfg_frm_stage_1_cfg) +
+					 2 * RTCFG_ADDRSIZE_IP) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			RTCFG_DEBUG(1, "RTcfg: received invalid stage_1_cfg "
+				       "frame\n");
+			kfree_rtskb(rtskb);
+			break;
+		}
+
+		rtdev = rtskb->rtdev;
+
+		memcpy(&daddr, stage_1_cfg->client_addr, 4);
+		stage_1_cfg =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_cfg) +
+							 RTCFG_ADDRSIZE_IP);
+
+		memcpy(&saddr, stage_1_cfg->server_addr, 4);
+		stage_1_cfg =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_cfg) +
+							 RTCFG_ADDRSIZE_IP);
+
+		__rtskb_pull(rtskb, 2 * RTCFG_ADDRSIZE_IP);
+
+		/* directed to us? */
+		if ((rtskb->pkt_type == PACKET_BROADCAST) &&
+		    (daddr != rtdev->local_ip)) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			kfree_rtskb(rtskb);
+			return;
+		}
+
+		/* update routing table */
+		rt_ip_route_add_host(saddr, rtskb->mac.ethernet->h_source,
+				     rtdev);
+
+		rtcfg_dev->spec.clt.srv_addr.ip_addr = saddr;
+		break;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	case RTCFG_ADDR_MAC:
+		/* nothing to do */
+		break;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: unknown addr_type %d in %s()\n",
+			    stage_1_cfg->addr_type, __FUNCTION__);
+		kfree_rtskb(rtskb);
+		return;
+	}
+
+	/* Ethernet-specific */
+	memcpy(rtcfg_dev->spec.clt.srv_mac_addr, rtskb->mac.ethernet->h_source,
+	       ETH_ALEN);
+
+	rtcfg_send_announce_reply(ifindex, rtskb->mac.ethernet->h_source);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_conn_event.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_conn_event.c
new file mode 100644
index 0000000..2c177fd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_conn_event.c
@@ -0,0 +1,364 @@
+/***
+ *
+ *  rtcfg/rtcfg_conn_event.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#include <ipv4/route.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+
+/****************************** states ***************************************/
+static int rtcfg_conn_state_searching(struct rtcfg_connection *conn,
+				      RTCFG_EVENT event_id, void *event_data);
+static int rtcfg_conn_state_stage_1(struct rtcfg_connection *conn,
+				    RTCFG_EVENT event_id, void *event_data);
+static int rtcfg_conn_state_stage_2(struct rtcfg_connection *conn,
+				    RTCFG_EVENT event_id, void *event_data);
+static int rtcfg_conn_state_ready(struct rtcfg_connection *conn,
+				  RTCFG_EVENT event_id, void *event_data);
+static int rtcfg_conn_state_dead(struct rtcfg_connection *conn,
+				 RTCFG_EVENT event_id, void *event_data);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG
+const char *rtcfg_conn_state[] = { "RTCFG_CONN_SEARCHING", "RTCFG_CONN_STAGE_1",
+				   "RTCFG_CONN_STAGE_2", "RTCFG_CONN_READY",
+				   "RTCFG_CONN_DEAD" };
+#endif /* CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG */
+
+static void rtcfg_conn_recv_announce_new(struct rtcfg_connection *conn,
+					 struct rtskb *rtskb);
+static void rtcfg_conn_check_cfg_timeout(struct rtcfg_connection *conn);
+static void rtcfg_conn_check_heartbeat(struct rtcfg_connection *conn);
+
+static int (*state[])(struct rtcfg_connection *conn, RTCFG_EVENT event_id,
+		      void *event_data) = {
+	rtcfg_conn_state_searching, rtcfg_conn_state_stage_1,
+	rtcfg_conn_state_stage_2, rtcfg_conn_state_ready, rtcfg_conn_state_dead
+};
+
+int rtcfg_do_conn_event(struct rtcfg_connection *conn, RTCFG_EVENT event_id,
+			void *event_data)
+{
+	int conn_state = conn->state;
+
+	RTCFG_DEBUG(3, "RTcfg: %s() conn=%p, event=%s, state=%s\n",
+		    __FUNCTION__, conn, rtcfg_event[event_id],
+		    rtcfg_conn_state[conn_state]);
+
+	return (*state[conn_state])(conn, event_id, event_data);
+}
+
+static void rtcfg_next_conn_state(struct rtcfg_connection *conn,
+				  RTCFG_CONN_STATE state)
+{
+	RTCFG_DEBUG(4, "RTcfg: next connection state=%s \n",
+		    rtcfg_conn_state[state]);
+
+	conn->state = state;
+}
+
+static int rtcfg_conn_state_searching(struct rtcfg_connection *conn,
+				      RTCFG_EVENT event_id, void *event_data)
+{
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+
+	switch (event_id) {
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		rtcfg_conn_recv_announce_new(conn, rtskb);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		conn->last_frame = rtskb->time_stamp;
+
+		rtcfg_next_conn_state(conn, RTCFG_CONN_READY);
+
+		rtcfg_dev->stations_found++;
+		rtcfg_dev->stations_ready++;
+		rtcfg_dev->spec.srv.clients_configured++;
+		if (rtcfg_dev->spec.srv.clients_configured ==
+		    rtcfg_dev->other_stations)
+			rtcfg_complete_cmd(conn->ifindex, RTCFG_CMD_WAIT, 0);
+
+		break;
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rtcfg_conn_state_stage_1(struct rtcfg_connection *conn,
+				    RTCFG_EVENT event_id, void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+	struct rtcfg_frm_ack_cfg *ack_cfg;
+	int packets;
+
+	switch (event_id) {
+	case RTCFG_FRM_ACK_CFG:
+		conn->last_frame = rtskb->time_stamp;
+
+		ack_cfg = (struct rtcfg_frm_ack_cfg *)rtskb->data;
+		conn->cfg_offs = ntohl(ack_cfg->ack_len);
+
+		if ((conn->flags & _RTCFG_FLAG_STAGE_2_DATA) != 0) {
+			if (conn->cfg_offs >= conn->stage2_file->size) {
+				rtcfg_dev->spec.srv.clients_configured++;
+				if (rtcfg_dev->spec.srv.clients_configured ==
+				    rtcfg_dev->other_stations)
+					rtcfg_complete_cmd(conn->ifindex,
+							   RTCFG_CMD_WAIT, 0);
+				rtcfg_next_conn_state(
+					conn, ((conn->flags &
+						_RTCFG_FLAG_READY) != 0) ?
+						      RTCFG_CONN_READY :
+						      RTCFG_CONN_STAGE_2);
+			} else {
+				packets = conn->burstrate;
+				while ((conn->cfg_offs <
+					conn->stage2_file->size) &&
+				       (packets > 0)) {
+					rtcfg_send_stage_2_frag(conn);
+					packets--;
+				}
+			}
+		} else {
+			rtcfg_dev->spec.srv.clients_configured++;
+			if (rtcfg_dev->spec.srv.clients_configured ==
+			    rtcfg_dev->other_stations)
+				rtcfg_complete_cmd(conn->ifindex,
+						   RTCFG_CMD_WAIT, 0);
+			rtcfg_next_conn_state(
+				conn, ((conn->flags & _RTCFG_FLAG_READY) != 0) ?
+					      RTCFG_CONN_READY :
+					      RTCFG_CONN_STAGE_2);
+		}
+
+		break;
+
+	case RTCFG_TIMER:
+		rtcfg_conn_check_cfg_timeout(conn);
+		break;
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rtcfg_conn_state_stage_2(struct rtcfg_connection *conn,
+				    RTCFG_EVENT event_id, void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+
+	switch (event_id) {
+	case RTCFG_FRM_READY:
+		conn->last_frame = rtskb->time_stamp;
+
+		rtcfg_next_conn_state(conn, RTCFG_CONN_READY);
+
+		conn->flags |= _RTCFG_FLAG_READY;
+		rtcfg_dev->stations_ready++;
+
+		if (rtcfg_dev->stations_ready == rtcfg_dev->other_stations)
+			rtcfg_complete_cmd(conn->ifindex, RTCFG_CMD_READY, 0);
+
+		break;
+
+	case RTCFG_TIMER:
+		rtcfg_conn_check_cfg_timeout(conn);
+		break;
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rtcfg_conn_state_ready(struct rtcfg_connection *conn,
+				  RTCFG_EVENT event_id, void *event_data)
+{
+	struct rtskb *rtskb = (struct rtskb *)event_data;
+
+	switch (event_id) {
+	case RTCFG_TIMER:
+		rtcfg_conn_check_heartbeat(conn);
+		break;
+
+	case RTCFG_FRM_HEARTBEAT:
+		conn->last_frame = rtskb->time_stamp;
+		break;
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static int rtcfg_conn_state_dead(struct rtcfg_connection *conn,
+				 RTCFG_EVENT event_id, void *event_data)
+{
+	switch (event_id) {
+	case RTCFG_FRM_ANNOUNCE_NEW:
+		rtcfg_conn_recv_announce_new(conn, (struct rtskb *)event_data);
+		break;
+
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		/* Spec to-do: signal station that it is assumed to be dead
+               (=> reboot command?) */
+
+	default:
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for conn %p in %s()\n",
+			    rtcfg_event[event_id], conn, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+static void rtcfg_conn_recv_announce_new(struct rtcfg_connection *conn,
+					 struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+	struct rtcfg_frm_announce *announce_new;
+	int packets;
+
+	conn->last_frame = rtskb->time_stamp;
+
+	announce_new = (struct rtcfg_frm_announce *)rtskb->data;
+
+	conn->flags = announce_new->flags;
+	if (announce_new->burstrate < conn->burstrate)
+		conn->burstrate = announce_new->burstrate;
+
+	rtcfg_next_conn_state(conn, RTCFG_CONN_STAGE_1);
+
+	rtcfg_dev->stations_found++;
+	if ((conn->flags & _RTCFG_FLAG_READY) != 0)
+		rtcfg_dev->stations_ready++;
+
+	if (((conn->flags & _RTCFG_FLAG_STAGE_2_DATA) != 0) &&
+	    (conn->stage2_file != NULL)) {
+		packets = conn->burstrate - 1;
+
+		rtcfg_send_stage_2(conn, 1);
+
+		while ((conn->cfg_offs < conn->stage2_file->size) &&
+		       (packets > 0)) {
+			rtcfg_send_stage_2_frag(conn);
+			packets--;
+		}
+	} else {
+		rtcfg_send_stage_2(conn, 0);
+		conn->flags &= ~_RTCFG_FLAG_STAGE_2_DATA;
+	}
+}
+
+static void rtcfg_conn_check_cfg_timeout(struct rtcfg_connection *conn)
+{
+	struct rtcfg_device *rtcfg_dev;
+
+	if (!conn->cfg_timeout)
+		return;
+
+	if (rtdm_clock_read() >= conn->last_frame + conn->cfg_timeout) {
+		rtcfg_dev = &device[conn->ifindex];
+
+		rtcfg_dev->stations_found--;
+		if (conn->state == RTCFG_CONN_STAGE_2)
+			rtcfg_dev->spec.srv.clients_configured--;
+
+		rtcfg_next_conn_state(conn, RTCFG_CONN_SEARCHING);
+		conn->cfg_offs = 0;
+		conn->flags = 0;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		if (conn->addr_type == RTCFG_ADDR_IP) {
+			struct rtnet_device *rtdev;
+
+			/* MAC address yet unknown -> use broadcast address */
+			rtdev = rtdev_get_by_index(conn->ifindex);
+			if (rtdev == NULL)
+				return;
+			memcpy(conn->mac_addr, rtdev->broadcast, MAX_ADDR_LEN);
+			rtdev_dereference(rtdev);
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+	}
+}
+
+static void rtcfg_conn_check_heartbeat(struct rtcfg_connection *conn)
+{
+	u64 timeout;
+	struct rtcfg_device *rtcfg_dev;
+
+	timeout = device[conn->ifindex].spec.srv.heartbeat_timeout;
+	if (!timeout)
+		return;
+
+	if (rtdm_clock_read() >= conn->last_frame + timeout) {
+		rtcfg_dev = &device[conn->ifindex];
+
+		rtcfg_dev->stations_found--;
+		rtcfg_dev->stations_ready--;
+		rtcfg_dev->spec.srv.clients_configured--;
+
+		rtcfg_send_dead_station(conn);
+
+		rtcfg_next_conn_state(conn, RTCFG_CONN_DEAD);
+		conn->cfg_offs = 0;
+		conn->flags = 0;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		if ((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) {
+			struct rtnet_device *rtdev =
+				rtdev_get_by_index(conn->ifindex);
+
+			rt_ip_route_del_host(conn->addr.ip_addr, rtdev);
+
+			if (rtdev == NULL)
+				return;
+
+			if (!(conn->addr_type & FLAG_ASSIGN_ADDR_BY_MAC))
+				/* MAC address yet unknown -> use broadcast address */
+				memcpy(conn->mac_addr, rtdev->broadcast,
+				       MAX_ADDR_LEN);
+
+			rtdev_dereference(rtdev);
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_event.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_event.c
new file mode 100644
index 0000000..5bde3c3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_event.c
@@ -0,0 +1,745 @@
+/***
+ *
+ *  rtcfg/rtcfg_event.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/vmalloc.h>
+
+#include <rtdev.h>
+#include <ipv4/route.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_client_event.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_file.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_timer.h>
+
+/*** Common and Server States ***/
+static int rtcfg_main_state_off(int ifindex, RTCFG_EVENT event_id,
+				void *event_data);
+static int rtcfg_main_state_server_running(int ifindex, RTCFG_EVENT event_id,
+					   void *event_data);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG
+const char *rtcfg_event[] = { "RTCFG_CMD_SERVER",
+			      "RTCFG_CMD_ADD",
+			      "RTCFG_CMD_DEL",
+			      "RTCFG_CMD_WAIT",
+			      "RTCFG_CMD_CLIENT",
+			      "RTCFG_CMD_ANNOUNCE",
+			      "RTCFG_CMD_READY",
+			      "RTCFG_CMD_DETACH",
+			      "RTCFG_TIMER",
+			      "RTCFG_FRM_STAGE_1_CFG",
+			      "RTCFG_FRM_ANNOUNCE_NEW",
+			      "RTCFG_FRM_ANNOUNCE_REPLY",
+			      "RTCFG_FRM_STAGE_2_CFG",
+			      "RTCFG_FRM_STAGE_2_CFG_FRAG",
+			      "RTCFG_FRM_ACK_CFG",
+			      "RTCFG_FRM_READY",
+			      "RTCFG_FRM_HEARTBEAT",
+			      "RTCFG_FRM_DEAD_STATION" };
+
+const char *rtcfg_main_state[] = { "RTCFG_MAIN_OFF",
+				   "RTCFG_MAIN_SERVER_RUNNING",
+				   "RTCFG_MAIN_CLIENT_0",
+				   "RTCFG_MAIN_CLIENT_1",
+				   "RTCFG_MAIN_CLIENT_ANNOUNCED",
+				   "RTCFG_MAIN_CLIENT_ALL_KNOWN",
+				   "RTCFG_MAIN_CLIENT_ALL_FRAMES",
+				   "RTCFG_MAIN_CLIENT_2",
+				   "RTCFG_MAIN_CLIENT_READY" };
+
+int rtcfg_debug = RTCFG_DEFAULT_DEBUG_LEVEL;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTCFG_DEBUG */
+
+struct rtcfg_device device[MAX_RT_DEVICES];
+
+static int (*state[])(int ifindex, RTCFG_EVENT event_id,
+		      void *event_data) = { rtcfg_main_state_off,
+					    rtcfg_main_state_server_running,
+					    rtcfg_main_state_client_0,
+					    rtcfg_main_state_client_1,
+					    rtcfg_main_state_client_announced,
+					    rtcfg_main_state_client_all_known,
+					    rtcfg_main_state_client_all_frames,
+					    rtcfg_main_state_client_2,
+					    rtcfg_main_state_client_ready };
+
+static int rtcfg_server_add(struct rtcfg_cmd *cmd_event);
+static int rtcfg_server_del(struct rtcfg_cmd *cmd_event);
+static int rtcfg_server_detach(int ifindex, struct rtcfg_cmd *cmd_event);
+static int rtcfg_server_recv_announce(int ifindex, RTCFG_EVENT event_id,
+				      struct rtskb *rtskb);
+static int rtcfg_server_recv_ack(int ifindex, struct rtskb *rtskb);
+static int rtcfg_server_recv_simple_frame(int ifindex, RTCFG_EVENT event_id,
+					  struct rtskb *rtskb);
+
+int rtcfg_do_main_event(int ifindex, RTCFG_EVENT event_id, void *event_data)
+{
+	int main_state;
+
+	rtdm_mutex_lock(&device[ifindex].dev_mutex);
+
+	main_state = device[ifindex].state;
+
+	RTCFG_DEBUG(3, "RTcfg: %s() rtdev=%d, event=%s, state=%s\n",
+		    __FUNCTION__, ifindex, rtcfg_event[event_id],
+		    rtcfg_main_state[main_state]);
+
+	return (*state[main_state])(ifindex, event_id, event_data);
+}
+
+void rtcfg_next_main_state(int ifindex, RTCFG_MAIN_STATE state)
+{
+	RTCFG_DEBUG(4, "RTcfg: next main state=%s \n", rtcfg_main_state[state]);
+
+	device[ifindex].state = state;
+}
+
+static int rtcfg_main_state_off(int ifindex, RTCFG_EVENT event_id,
+				void *event_data)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rt_proc_call *call = (struct rt_proc_call *)event_data;
+	struct rtcfg_cmd *cmd_event;
+	int ret;
+
+	cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+	switch (event_id) {
+	case RTCFG_CMD_SERVER:
+		INIT_LIST_HEAD(&rtcfg_dev->spec.srv.conn_list);
+
+		ret = rtdm_timer_init(&rtcfg_dev->timer, rtcfg_timer,
+				      "rtcfg-timer");
+		if (ret == 0) {
+			ret = rtdm_timer_start(
+				&rtcfg_dev->timer, XN_INFINITE,
+				(nanosecs_rel_t)cmd_event->args.server.period *
+					1000000,
+				RTDM_TIMERMODE_RELATIVE);
+			if (ret < 0)
+				rtdm_timer_destroy(&rtcfg_dev->timer);
+		}
+		if (ret < 0) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			return ret;
+		}
+
+		if (cmd_event->args.server.flags & _RTCFG_FLAG_READY)
+			set_bit(RTCFG_FLAG_READY, &rtcfg_dev->flags);
+		set_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags);
+
+		rtcfg_dev->burstrate = cmd_event->args.server.burstrate;
+
+		rtcfg_dev->spec.srv.heartbeat =
+			cmd_event->args.server.heartbeat;
+
+		rtcfg_dev->spec.srv.heartbeat_timeout =
+			((u64)cmd_event->args.server.heartbeat) * 1000000 *
+			cmd_event->args.server.threshold;
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_SERVER_RUNNING);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		break;
+
+	case RTCFG_CMD_CLIENT:
+		rtcfg_dev->spec.clt.station_addr_list =
+			cmd_event->args.client.station_buf;
+		cmd_event->args.client.station_buf = NULL;
+
+		rtcfg_dev->spec.clt.max_stations =
+			cmd_event->args.client.max_stations;
+		rtcfg_dev->other_stations = -1;
+
+		rtcfg_queue_blocking_call(ifindex, call);
+
+		rtcfg_next_main_state(ifindex, RTCFG_MAIN_CLIENT_0);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	default:
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*** Server States ***/
+
+static int rtcfg_main_state_server_running(int ifindex, RTCFG_EVENT event_id,
+					   void *event_data)
+{
+	struct rt_proc_call *call;
+	struct rtcfg_cmd *cmd_event;
+	struct rtcfg_device *rtcfg_dev;
+	struct rtskb *rtskb;
+
+	switch (event_id) {
+	case RTCFG_CMD_ADD:
+		call = (struct rt_proc_call *)event_data;
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		return rtcfg_server_add(cmd_event);
+
+	case RTCFG_CMD_DEL:
+		call = (struct rt_proc_call *)event_data;
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		return rtcfg_server_del(cmd_event);
+
+	case RTCFG_CMD_WAIT:
+		call = (struct rt_proc_call *)event_data;
+
+		rtcfg_dev = &device[ifindex];
+
+		if (rtcfg_dev->spec.srv.clients_configured ==
+		    rtcfg_dev->other_stations)
+			rtpc_complete_call(call, 0);
+		else
+			rtcfg_queue_blocking_call(ifindex, call);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_READY:
+		call = (struct rt_proc_call *)event_data;
+
+		rtcfg_dev = &device[ifindex];
+
+		if (rtcfg_dev->stations_ready == rtcfg_dev->other_stations)
+			rtpc_complete_call(call, 0);
+		else
+			rtcfg_queue_blocking_call(ifindex, call);
+
+		if (!test_and_set_bit(RTCFG_FLAG_READY, &rtcfg_dev->flags))
+			rtcfg_send_ready(ifindex);
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		return -CALL_PENDING;
+
+	case RTCFG_CMD_DETACH:
+		call = (struct rt_proc_call *)event_data;
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		return rtcfg_server_detach(ifindex, cmd_event);
+
+	case RTCFG_FRM_ANNOUNCE_NEW:
+	case RTCFG_FRM_ANNOUNCE_REPLY:
+		rtskb = (struct rtskb *)event_data;
+		return rtcfg_server_recv_announce(ifindex, event_id, rtskb);
+
+	case RTCFG_FRM_ACK_CFG:
+		rtskb = (struct rtskb *)event_data;
+		return rtcfg_server_recv_ack(ifindex, rtskb);
+
+	case RTCFG_FRM_READY:
+	case RTCFG_FRM_HEARTBEAT:
+		rtskb = (struct rtskb *)event_data;
+		return rtcfg_server_recv_simple_frame(ifindex, event_id, rtskb);
+
+	default:
+		rtdm_mutex_unlock(&device[ifindex].dev_mutex);
+
+		RTCFG_DEBUG(1, "RTcfg: unknown event %s for rtdev %d in %s()\n",
+			    rtcfg_event[event_id], ifindex, __FUNCTION__);
+		return -EINVAL;
+	}
+	return 0;
+}
+
+/*** Server Command Event Handlers ***/
+
+static int rtcfg_server_add(struct rtcfg_cmd *cmd_event)
+{
+	struct rtcfg_device *rtcfg_dev;
+	struct rtcfg_connection *conn;
+	struct rtcfg_connection *new_conn;
+	struct list_head *entry;
+	unsigned int addr_type;
+
+	rtcfg_dev = &device[cmd_event->internal.data.ifindex];
+	addr_type = cmd_event->args.add.addr_type & RTCFG_ADDR_MASK;
+
+	new_conn = cmd_event->args.add.conn_buf;
+	memset(new_conn, 0, sizeof(struct rtcfg_connection));
+
+	new_conn->ifindex = cmd_event->internal.data.ifindex;
+	new_conn->state = RTCFG_CONN_SEARCHING;
+	new_conn->addr_type = cmd_event->args.add.addr_type;
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	new_conn->addr.ip_addr = cmd_event->args.add.ip_addr;
+#endif
+	new_conn->stage1_data = cmd_event->args.add.stage1_data;
+	new_conn->stage1_size = cmd_event->args.add.stage1_size;
+	new_conn->burstrate = rtcfg_dev->burstrate;
+	new_conn->cfg_timeout = ((u64)cmd_event->args.add.timeout) * 1000000;
+
+	if (cmd_event->args.add.addr_type == RTCFG_ADDR_IP) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		struct rtnet_device *rtdev;
+
+		/* MAC address yet unknown -> use broadcast address */
+		rtdev = rtdev_get_by_index(cmd_event->internal.data.ifindex);
+		if (rtdev == NULL) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+			return -ENODEV;
+		}
+		memcpy(new_conn->mac_addr, rtdev->broadcast, MAX_ADDR_LEN);
+		rtdev_dereference(rtdev);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		return -EPROTONOSUPPORT;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+	} else
+		memcpy(new_conn->mac_addr, cmd_event->args.add.mac_addr,
+		       MAX_ADDR_LEN);
+
+	/* get stage 2 file */
+	if (cmd_event->args.add.stage2_file != NULL) {
+		if (cmd_event->args.add.stage2_file->buffer != NULL) {
+			new_conn->stage2_file = cmd_event->args.add.stage2_file;
+			rtcfg_add_file(new_conn->stage2_file);
+
+			cmd_event->args.add.stage2_file = NULL;
+		} else {
+			new_conn->stage2_file = rtcfg_get_file(
+				cmd_event->args.add.stage2_file->name);
+			if (new_conn->stage2_file == NULL) {
+				rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+				return 1;
+			}
+		}
+	}
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		if (
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+			((addr_type == RTCFG_ADDR_IP) &&
+			 (conn->addr.ip_addr == cmd_event->args.add.ip_addr)) ||
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+			((addr_type == RTCFG_ADDR_MAC) &&
+			 (memcmp(conn->mac_addr, new_conn->mac_addr,
+				 MAX_ADDR_LEN) == 0))) {
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+			if ((new_conn->stage2_file) &&
+			    (rtcfg_release_file(new_conn->stage2_file) == 0)) {
+				/* Note: This assignment cannot overwrite a valid file pointer.
+		 * Effectively, it will only be executed when
+		 * new_conn->stage2_file is the pointer originally passed by
+		 * rtcfg_ioctl. But checking this assumptions does not cause
+		 * any harm :o)
+		 */
+				RTNET_ASSERT(cmd_event->args.add.stage2_file ==
+						     NULL,
+					     ;);
+
+				cmd_event->args.add.stage2_file =
+					new_conn->stage2_file;
+			}
+
+			return -EEXIST;
+		}
+	}
+
+	list_add_tail(&new_conn->entry, &rtcfg_dev->spec.srv.conn_list);
+	rtcfg_dev->other_stations++;
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	cmd_event->args.add.conn_buf = NULL;
+	cmd_event->args.add.stage1_data = NULL;
+
+	return 0;
+}
+
+static int rtcfg_server_del(struct rtcfg_cmd *cmd_event)
+{
+	struct rtcfg_connection *conn;
+	struct list_head *entry;
+	unsigned int addr_type;
+	struct rtcfg_device *rtcfg_dev;
+
+	rtcfg_dev = &device[cmd_event->internal.data.ifindex];
+	addr_type = cmd_event->args.add.addr_type & RTCFG_ADDR_MASK;
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		if ((addr_type == conn->addr_type) &&
+		    (
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+			    ((addr_type == RTCFG_ADDR_IP) &&
+			     (conn->addr.ip_addr ==
+			      cmd_event->args.add.ip_addr)) ||
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+			    ((addr_type == RTCFG_ADDR_MAC) &&
+			     (memcmp(conn->mac_addr,
+				     cmd_event->args.add.mac_addr,
+				     MAX_ADDR_LEN) == 0)))) {
+			list_del(&conn->entry);
+			rtcfg_dev->other_stations--;
+
+			if (conn->state > RTCFG_CONN_SEARCHING) {
+				rtcfg_dev->stations_found--;
+				if (conn->state >= RTCFG_CONN_STAGE_2)
+					rtcfg_dev->spec.srv.clients_configured--;
+				if (conn->flags & _RTCFG_FLAG_READY)
+					rtcfg_dev->stations_ready--;
+			}
+
+			if ((conn->stage2_file) &&
+			    (rtcfg_release_file(conn->stage2_file) == 0))
+				cmd_event->args.del.stage2_file =
+					conn->stage2_file;
+
+			rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+			cmd_event->args.del.conn_buf = conn;
+
+			return 0;
+		}
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	return -ENOENT;
+}
+
+static int rtcfg_server_detach(int ifindex, struct rtcfg_cmd *cmd_event)
+{
+	struct rtcfg_connection *conn;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	if (!list_empty(&rtcfg_dev->spec.srv.conn_list)) {
+		conn = list_entry(rtcfg_dev->spec.srv.conn_list.next,
+				  struct rtcfg_connection, entry);
+
+		list_del(&conn->entry);
+		rtcfg_dev->other_stations--;
+
+		if (conn->state > RTCFG_CONN_SEARCHING) {
+			rtcfg_dev->stations_found--;
+			if (conn->state >= RTCFG_CONN_STAGE_2)
+				rtcfg_dev->spec.srv.clients_configured--;
+			if (conn->flags & _RTCFG_FLAG_READY)
+				rtcfg_dev->stations_ready--;
+		}
+
+		if ((conn->stage2_file) &&
+		    (rtcfg_release_file(conn->stage2_file) == 0))
+			cmd_event->args.detach.stage2_file = conn->stage2_file;
+
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+		cmd_event->args.detach.conn_buf = conn;
+
+		return -EAGAIN;
+	}
+
+	if (test_and_clear_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags))
+		rtdm_timer_destroy(&rtcfg_dev->timer);
+	rtcfg_reset_device(ifindex);
+
+	rtcfg_next_main_state(ifindex, RTCFG_MAIN_OFF);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	return 0;
+}
+
+/*** Server Frame Event Handlers ***/
+
+static int rtcfg_server_recv_announce(int ifindex, RTCFG_EVENT event_id,
+				      struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct list_head *entry;
+	struct rtcfg_frm_announce *announce;
+	struct rtcfg_connection *conn;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_announce)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid announce frame\n");
+		return -EINVAL;
+	}
+
+	announce = (struct rtcfg_frm_announce *)rtskb->data;
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		switch (announce->addr_type) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+			u32 announce_addr;
+		case RTCFG_ADDR_IP:
+			memcpy(&announce_addr, announce->addr, 4);
+
+			if (((conn->addr_type & RTCFG_ADDR_MASK) ==
+			     RTCFG_ADDR_IP) &&
+			    (announce_addr == conn->addr.ip_addr)) {
+				/* save MAC address - Ethernet-specific! */
+				memcpy(conn->mac_addr,
+				       rtskb->mac.ethernet->h_source, ETH_ALEN);
+
+				/* update routing table */
+				rt_ip_route_add_host(conn->addr.ip_addr,
+						     conn->mac_addr,
+						     rtskb->rtdev);
+
+				/* remove IP address */
+				__rtskb_pull(rtskb, RTCFG_ADDRSIZE_IP);
+
+				rtcfg_do_conn_event(conn, event_id, rtskb);
+
+				goto out;
+			}
+			break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+		case RTCFG_ADDR_MAC:
+			/* Ethernet-specific! */
+			if (memcmp(conn->mac_addr,
+				   rtskb->mac.ethernet->h_source,
+				   ETH_ALEN) == 0) {
+				rtcfg_do_conn_event(conn, event_id, rtskb);
+
+				goto out;
+			}
+			break;
+		}
+	}
+
+out:
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+static int rtcfg_server_recv_ack(int ifindex, struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct list_head *entry;
+	struct rtcfg_connection *conn;
+
+	if (rtskb->len < sizeof(struct rtcfg_frm_ack_cfg)) {
+		rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+		RTCFG_DEBUG(1, "RTcfg: received invalid ack_cfg frame\n");
+		return -EINVAL;
+	}
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		/* find the corresponding connection - Ethernet-specific! */
+		if (memcmp(conn->mac_addr, rtskb->mac.ethernet->h_source,
+			   ETH_ALEN) != 0)
+			continue;
+
+		rtcfg_do_conn_event(conn, RTCFG_FRM_ACK_CFG, rtskb);
+
+		break;
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+static int rtcfg_server_recv_simple_frame(int ifindex, RTCFG_EVENT event_id,
+					  struct rtskb *rtskb)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct list_head *entry;
+	struct rtcfg_connection *conn;
+
+	list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+		conn = list_entry(entry, struct rtcfg_connection, entry);
+
+		/* find the corresponding connection - Ethernet-specific! */
+		if (memcmp(conn->mac_addr, rtskb->mac.ethernet->h_source,
+			   ETH_ALEN) != 0)
+			continue;
+
+		rtcfg_do_conn_event(conn, event_id, rtskb);
+
+		break;
+	}
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+/*** Utility Functions ***/
+
+void rtcfg_queue_blocking_call(int ifindex, struct rt_proc_call *call)
+{
+	rtdm_lockctx_t context;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	rtdm_lock_get_irqsave(&rtcfg_dev->event_calls_lock, context);
+	list_add_tail(&call->list_entry, &rtcfg_dev->event_calls);
+	rtdm_lock_put_irqrestore(&rtcfg_dev->event_calls_lock, context);
+}
+
+struct rt_proc_call *rtcfg_dequeue_blocking_call(int ifindex)
+{
+	rtdm_lockctx_t context;
+	struct rt_proc_call *call;
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	rtdm_lock_get_irqsave(&rtcfg_dev->event_calls_lock, context);
+	if (!list_empty(&rtcfg_dev->event_calls)) {
+		call = (struct rt_proc_call *)rtcfg_dev->event_calls.next;
+		list_del(&call->list_entry);
+	} else
+		call = NULL;
+	rtdm_lock_put_irqrestore(&rtcfg_dev->event_calls_lock, context);
+
+	return call;
+}
+
+void rtcfg_complete_cmd(int ifindex, RTCFG_EVENT event_id, int result)
+{
+	struct rt_proc_call *call;
+	struct rtcfg_cmd *cmd_event;
+
+	while (1) {
+		call = rtcfg_dequeue_blocking_call(ifindex);
+		if (call == NULL)
+			break;
+
+		cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+
+		rtpc_complete_call(call, (cmd_event->internal.data.event_id ==
+					  event_id) ?
+						 result :
+						 -EINVAL);
+	}
+}
+
+void rtcfg_reset_device(int ifindex)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+
+	rtcfg_dev->other_stations = 0;
+	rtcfg_dev->stations_found = 0;
+	rtcfg_dev->stations_ready = 0;
+	rtcfg_dev->flags = 0;
+	rtcfg_dev->burstrate = 0;
+
+	memset(&rtcfg_dev->spec, 0, sizeof(rtcfg_dev->spec));
+	INIT_LIST_HEAD(&rtcfg_dev->spec.srv.conn_list);
+}
+
+void rtcfg_init_state_machines(void)
+{
+	int i;
+	struct rtcfg_device *rtcfg_dev;
+
+	memset(device, 0, sizeof(device));
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtcfg_dev = &device[i];
+		rtcfg_dev->state = RTCFG_MAIN_OFF;
+
+		rtdm_mutex_init(&rtcfg_dev->dev_mutex);
+
+		INIT_LIST_HEAD(&rtcfg_dev->event_calls);
+		rtdm_lock_init(&rtcfg_dev->event_calls_lock);
+	}
+}
+
+void rtcfg_cleanup_state_machines(void)
+{
+	int i;
+	struct rtcfg_device *rtcfg_dev;
+	struct rtcfg_connection *conn;
+	struct list_head *entry;
+	struct list_head *tmp;
+	struct rt_proc_call *call;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtcfg_dev = &device[i];
+
+		if (test_and_clear_bit(FLAG_TIMER_STARTED, &rtcfg_dev->flags))
+			rtdm_timer_destroy(&rtcfg_dev->timer);
+
+		/*
+	 * No need to synchronize with rtcfg_timer here: the task running
+	 * rtcfg_timer is already dead.
+	 */
+
+		rtdm_mutex_destroy(&rtcfg_dev->dev_mutex);
+
+		if (rtcfg_dev->state == RTCFG_MAIN_SERVER_RUNNING) {
+			list_for_each_safe (entry, tmp,
+					    &rtcfg_dev->spec.srv.conn_list) {
+				conn = list_entry(
+					entry, struct rtcfg_connection, entry);
+
+				if (conn->stage1_data != NULL)
+					kfree(conn->stage1_data);
+
+				if ((conn->stage2_file != NULL) &&
+				    (rtcfg_release_file(conn->stage2_file) ==
+				     0)) {
+					vfree(conn->stage2_file->buffer);
+					kfree(conn->stage2_file);
+				}
+
+				kfree(entry);
+			}
+		} else if (rtcfg_dev->state != RTCFG_MAIN_OFF) {
+			if (rtcfg_dev->spec.clt.station_addr_list != NULL)
+				kfree(rtcfg_dev->spec.clt.station_addr_list);
+
+			if (rtcfg_dev->spec.clt.stage2_chain != NULL)
+				kfree_rtskb(rtcfg_dev->spec.clt.stage2_chain);
+		}
+
+		while (1) {
+			call = rtcfg_dequeue_blocking_call(i);
+			if (call == NULL)
+				break;
+
+			rtpc_complete_call_nrt(call, -ENODEV);
+		}
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_file.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_file.c
new file mode 100644
index 0000000..331a4b9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_file.c
@@ -0,0 +1,81 @@
+/***
+ *
+ *  rtcfg/rtcfg_file.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+
+#include <rtdm/driver.h>
+#include <rtcfg_chrdev.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_file.h>
+
+/* Note:
+ * We don't need any special lock protection while manipulating the
+ * rtcfg_files list. The list is only accessed through valid connections, and
+ * connections are already lock-protected.
+ */
+LIST_HEAD(rtcfg_files);
+
+struct rtcfg_file *rtcfg_get_file(const char *filename)
+{
+	struct list_head *entry;
+	struct rtcfg_file *file;
+
+	RTCFG_DEBUG(4, "RTcfg: looking for file %s\n", filename);
+
+	list_for_each (entry, &rtcfg_files) {
+		file = list_entry(entry, struct rtcfg_file, entry);
+
+		if (strcmp(file->name, filename) == 0) {
+			file->ref_count++;
+
+			RTCFG_DEBUG(4,
+				    "RTcfg: reusing file entry, now %d users\n",
+				    file->ref_count);
+
+			return file;
+		}
+	}
+
+	return NULL;
+}
+
+void rtcfg_add_file(struct rtcfg_file *file)
+{
+	RTCFG_DEBUG(4, "RTcfg: adding file %s to list\n", file->name);
+
+	file->ref_count = 1;
+	list_add_tail(&file->entry, &rtcfg_files);
+}
+
+int rtcfg_release_file(struct rtcfg_file *file)
+{
+	if (--file->ref_count == 0) {
+		RTCFG_DEBUG(4, "RTcfg: removing file %s from list\n",
+			    file->name);
+
+		list_del(&file->entry);
+	}
+
+	return file->ref_count;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_frame.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_frame.c
new file mode 100644
index 0000000..ef8f0e8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_frame.c
@@ -0,0 +1,571 @@
+/***
+ *
+ *  rtcfg/rtcfg_frame.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/if_ether.h>
+
+#include <stack_mgr.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_timer.h>
+
+static unsigned int num_rtskbs = 32;
+module_param(num_rtskbs, uint, 0444);
+MODULE_PARM_DESC(num_rtskbs, "Number of realtime socket buffers used by RTcfg");
+
+static struct rtskb_pool rtcfg_pool;
+static rtdm_task_t rx_task;
+static rtdm_event_t rx_event;
+static struct rtskb_queue rx_queue;
+
+void rtcfg_thread_signal(void)
+{
+	rtdm_event_signal(&rx_event);
+}
+
+static int rtcfg_rx_handler(struct rtskb *rtskb, struct rtpacket_type *pt)
+{
+	if (rtskb_acquire(rtskb, &rtcfg_pool) == 0) {
+		rtskb_queue_tail(&rx_queue, rtskb);
+		rtcfg_thread_signal();
+	} else
+		kfree_rtskb(rtskb);
+
+	return 0;
+}
+
+static void rtcfg_rx_task(void *arg)
+{
+	struct rtskb *rtskb;
+	struct rtcfg_frm_head *frm_head;
+	struct rtnet_device *rtdev;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(&rx_event) < 0)
+			break;
+
+		while ((rtskb = rtskb_dequeue(&rx_queue))) {
+			rtdev = rtskb->rtdev;
+
+			if (rtskb->pkt_type == PACKET_OTHERHOST) {
+				kfree_rtskb(rtskb);
+				continue;
+			}
+
+			if (rtskb->len < sizeof(struct rtcfg_frm_head)) {
+				RTCFG_DEBUG(
+					1,
+					"RTcfg: %s() received an invalid frame\n",
+					__FUNCTION__);
+				kfree_rtskb(rtskb);
+				continue;
+			}
+
+			frm_head = (struct rtcfg_frm_head *)rtskb->data;
+
+			if (rtcfg_do_main_event(rtskb->rtdev->ifindex,
+						frm_head->id +
+							RTCFG_FRM_STAGE_1_CFG,
+						rtskb) < 0)
+				kfree_rtskb(rtskb);
+		}
+
+		rtcfg_timer_run();
+	}
+}
+
+int rtcfg_send_frame(struct rtskb *rtskb, struct rtnet_device *rtdev,
+		     u8 *dest_addr)
+{
+	int ret;
+
+	rtskb->rtdev = rtdev;
+	rtskb->priority = RTCFG_SKB_PRIO;
+
+	if (rtdev->hard_header) {
+		ret = rtdev->hard_header(rtskb, rtdev, ETH_RTCFG, dest_addr,
+					 rtdev->dev_addr, rtskb->len);
+		if (ret < 0)
+			goto err;
+	}
+
+	if ((rtdev->flags & IFF_UP) != 0) {
+		ret = 0;
+		if (rtdev_xmit(rtskb) != 0)
+			ret = -EAGAIN;
+	} else {
+		ret = -ENETDOWN;
+		goto err;
+	}
+
+	rtdev_dereference(rtdev);
+	return ret;
+
+err:
+	kfree_rtskb(rtskb);
+	rtdev_dereference(rtdev);
+	return ret;
+}
+
+int rtcfg_send_stage_1(struct rtcfg_connection *conn)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_stage_1_cfg *stage_1_frm;
+
+	rtdev = rtdev_get_by_index(conn->ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_stage_1_cfg) + conn->stage1_size +
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		     (((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ?
+			      2 * RTCFG_ADDRSIZE_IP :
+			      0);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		     0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	stage_1_frm = (struct rtcfg_frm_stage_1_cfg *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_stage_1_cfg));
+
+	stage_1_frm->head.id = RTCFG_ID_STAGE_1_CFG;
+	stage_1_frm->head.version = 0;
+	stage_1_frm->addr_type = conn->addr_type & RTCFG_ADDR_MASK;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if (stage_1_frm->addr_type == RTCFG_ADDR_IP) {
+		rtskb_put(rtskb, 2 * RTCFG_ADDRSIZE_IP);
+
+		memcpy(stage_1_frm->client_addr, &(conn->addr.ip_addr), 4);
+
+		stage_1_frm =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_frm) +
+							 RTCFG_ADDRSIZE_IP);
+
+		memcpy(stage_1_frm->server_addr, &(rtdev->local_ip), 4);
+
+		stage_1_frm =
+			(struct rtcfg_frm_stage_1_cfg *)(((u8 *)stage_1_frm) +
+							 RTCFG_ADDRSIZE_IP);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	stage_1_frm->burstrate = device[conn->ifindex].burstrate;
+	stage_1_frm->cfg_len = htons(conn->stage1_size);
+
+	memcpy(rtskb_put(rtskb, conn->stage1_size), conn->stage1_data,
+	       conn->stage1_size);
+
+	return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr);
+}
+
+int rtcfg_send_stage_2(struct rtcfg_connection *conn, int send_data)
+{
+	struct rtnet_device *rtdev;
+	struct rtcfg_device *rtcfg_dev = &device[conn->ifindex];
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_stage_2_cfg *stage_2_frm;
+	size_t total_size;
+	size_t frag_size;
+
+	rtdev = rtdev_get_by_index(conn->ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	if (send_data) {
+		total_size = conn->stage2_file->size;
+		frag_size = MIN(rtdev->get_mtu(rtdev, RTCFG_SKB_PRIO) -
+					sizeof(struct rtcfg_frm_stage_2_cfg),
+				total_size);
+	} else {
+		total_size = 0;
+		frag_size = 0;
+	}
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_stage_2_cfg) + frag_size;
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	stage_2_frm = (struct rtcfg_frm_stage_2_cfg *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_stage_2_cfg));
+
+	stage_2_frm->head.id = RTCFG_ID_STAGE_2_CFG;
+	stage_2_frm->head.version = 0;
+	stage_2_frm->flags = rtcfg_dev->flags;
+	stage_2_frm->stations = htonl(rtcfg_dev->other_stations);
+	stage_2_frm->heartbeat_period = htons(rtcfg_dev->spec.srv.heartbeat);
+	stage_2_frm->cfg_len = htonl(total_size);
+
+	if (send_data)
+		memcpy(rtskb_put(rtskb, frag_size), conn->stage2_file->buffer,
+		       frag_size);
+	conn->cfg_offs = frag_size;
+
+	return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr);
+}
+
+int rtcfg_send_stage_2_frag(struct rtcfg_connection *conn)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_stage_2_cfg_frag *stage_2_frm;
+	size_t frag_size;
+
+	rtdev = rtdev_get_by_index(conn->ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	frag_size = MIN(rtdev->get_mtu(rtdev, RTCFG_SKB_PRIO) -
+				sizeof(struct rtcfg_frm_stage_2_cfg_frag),
+			conn->stage2_file->size - conn->cfg_offs);
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_stage_2_cfg_frag) + frag_size;
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	stage_2_frm = (struct rtcfg_frm_stage_2_cfg_frag *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_stage_2_cfg_frag));
+
+	stage_2_frm->head.id = RTCFG_ID_STAGE_2_CFG_FRAG;
+	stage_2_frm->head.version = 0;
+	stage_2_frm->frag_offs = htonl(conn->cfg_offs);
+
+	memcpy(rtskb_put(rtskb, frag_size),
+	       conn->stage2_file->buffer + conn->cfg_offs, frag_size);
+	conn->cfg_offs += frag_size;
+
+	return rtcfg_send_frame(rtskb, rtdev, conn->mac_addr);
+}
+
+int rtcfg_send_announce_new(int ifindex)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_announce *announce_new;
+
+	rtdev = rtdev_get_by_index(ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_announce) +
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		     (((rtcfg_dev->spec.clt.addr_type & RTCFG_ADDR_MASK) ==
+		       RTCFG_ADDR_IP) ?
+			      RTCFG_ADDRSIZE_IP :
+			      0);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		     0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	announce_new = (struct rtcfg_frm_announce *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_announce));
+
+	announce_new->head.id = RTCFG_ID_ANNOUNCE_NEW;
+	announce_new->head.version = 0;
+	announce_new->addr_type = rtcfg_dev->spec.clt.addr_type;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if (announce_new->addr_type == RTCFG_ADDR_IP) {
+		rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);
+
+		memcpy(announce_new->addr, &(rtdev->local_ip), 4);
+
+		announce_new =
+			(struct rtcfg_frm_announce *)(((u8 *)announce_new) +
+						      RTCFG_ADDRSIZE_IP);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	announce_new->flags = rtcfg_dev->flags;
+	announce_new->burstrate = rtcfg_dev->burstrate;
+
+	return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast);
+}
+
+int rtcfg_send_announce_reply(int ifindex, u8 *dest_mac_addr)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_announce *announce_rpl;
+
+	rtdev = rtdev_get_by_index(ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_announce) +
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		     ((rtcfg_dev->spec.clt.addr_type == RTCFG_ADDR_IP) ?
+			      RTCFG_ADDRSIZE_IP :
+			      0);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		     0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	announce_rpl = (struct rtcfg_frm_announce *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_announce));
+
+	announce_rpl->head.id = RTCFG_ID_ANNOUNCE_REPLY;
+	announce_rpl->head.version = 0;
+	announce_rpl->addr_type = rtcfg_dev->spec.clt.addr_type;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if (announce_rpl->addr_type == RTCFG_ADDR_IP) {
+		rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);
+
+		memcpy(announce_rpl->addr, &(rtdev->local_ip), 4);
+
+		announce_rpl =
+			(struct rtcfg_frm_announce *)(((u8 *)announce_rpl) +
+						      RTCFG_ADDRSIZE_IP);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	announce_rpl->flags = rtcfg_dev->flags & _RTCFG_FLAG_READY;
+	announce_rpl->burstrate = 0; /* padding field */
+
+	return rtcfg_send_frame(rtskb, rtdev, dest_mac_addr);
+}
+
+int rtcfg_send_ack(int ifindex)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_ack_cfg *ack_frm;
+
+	rtdev = rtdev_get_by_index(ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_ack_cfg);
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	ack_frm = (struct rtcfg_frm_ack_cfg *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_ack_cfg));
+
+	ack_frm->head.id = RTCFG_ID_ACK_CFG;
+	ack_frm->head.version = 0;
+	ack_frm->ack_len = htonl(device[ifindex].spec.clt.cfg_offs);
+
+	return rtcfg_send_frame(rtskb, rtdev,
+				device[ifindex].spec.clt.srv_mac_addr);
+}
+
+int rtcfg_send_simple_frame(int ifindex, int frame_id, u8 *dest_addr)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_simple *simple_frm;
+
+	rtdev = rtdev_get_by_index(ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len + sizeof(struct rtcfg_frm_simple);
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	simple_frm = (struct rtcfg_frm_simple *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_simple));
+
+	simple_frm->head.id = frame_id;
+	simple_frm->head.version = 0;
+
+	return rtcfg_send_frame(rtskb, rtdev,
+				(dest_addr) ? dest_addr : rtdev->broadcast);
+}
+
+int rtcfg_send_dead_station(struct rtcfg_connection *conn)
+{
+	struct rtnet_device *rtdev;
+	struct rtskb *rtskb;
+	unsigned int rtskb_size;
+	struct rtcfg_frm_dead_station *dead_station_frm;
+
+	rtdev = rtdev_get_by_index(conn->ifindex);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	rtskb_size = rtdev->hard_header_len +
+		     sizeof(struct rtcfg_frm_dead_station) +
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		     (((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP) ?
+			      RTCFG_ADDRSIZE_IP :
+			      0);
+#else /* !CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+		     0;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	rtskb = alloc_rtskb(rtskb_size, &rtcfg_pool);
+	if (rtskb == NULL) {
+		rtdev_dereference(rtdev);
+		return -ENOBUFS;
+	}
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len);
+
+	dead_station_frm = (struct rtcfg_frm_dead_station *)rtskb_put(
+		rtskb, sizeof(struct rtcfg_frm_dead_station));
+
+	dead_station_frm->head.id = RTCFG_ID_DEAD_STATION;
+	dead_station_frm->head.version = 0;
+	dead_station_frm->addr_type = conn->addr_type & RTCFG_ADDR_MASK;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if (dead_station_frm->addr_type == RTCFG_ADDR_IP) {
+		rtskb_put(rtskb, RTCFG_ADDRSIZE_IP);
+
+		memcpy(dead_station_frm->logical_addr, &(conn->addr.ip_addr),
+		       4);
+
+		dead_station_frm = (struct rtcfg_frm_dead_station
+					    *)(((u8 *)dead_station_frm) +
+					       RTCFG_ADDRSIZE_IP);
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	/* Ethernet-specific! */
+	memcpy(dead_station_frm->physical_addr, conn->mac_addr, ETH_ALEN);
+	memset(&dead_station_frm->physical_addr[ETH_ALEN], 0,
+	       sizeof(dead_station_frm->physical_addr) - ETH_ALEN);
+
+	return rtcfg_send_frame(rtskb, rtdev, rtdev->broadcast);
+}
+
+static struct rtpacket_type rtcfg_packet_type = { .type = __constant_htons(
+							  ETH_RTCFG),
+						  .handler = rtcfg_rx_handler };
+
+int __init rtcfg_init_frames(void)
+{
+	int ret;
+
+	if (rtskb_module_pool_init(&rtcfg_pool, num_rtskbs) < num_rtskbs)
+		return -ENOMEM;
+
+	rtskb_queue_init(&rx_queue);
+	rtdm_event_init(&rx_event, 0);
+
+	ret = rtdm_task_init(&rx_task, "rtcfg-rx", rtcfg_rx_task, 0,
+			     RTDM_TASK_LOWEST_PRIORITY, 0);
+	if (ret < 0) {
+		rtdm_event_destroy(&rx_event);
+		goto error1;
+	}
+
+	ret = rtdev_add_pack(&rtcfg_packet_type);
+	if (ret < 0)
+		goto error2;
+
+	return 0;
+
+error2:
+	rtdm_event_destroy(&rx_event);
+	rtdm_task_destroy(&rx_task);
+
+error1:
+	rtskb_pool_release(&rtcfg_pool);
+
+	return ret;
+}
+
+void rtcfg_cleanup_frames(void)
+{
+	struct rtskb *rtskb;
+
+	rtdev_remove_pack(&rtcfg_packet_type);
+
+	rtdm_event_destroy(&rx_event);
+	rtdm_task_destroy(&rx_task);
+
+	while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) {
+		kfree_rtskb(rtskb);
+	}
+
+	rtskb_pool_release(&rtcfg_pool);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_ioctl.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_ioctl.c
new file mode 100644
index 0000000..158d711
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_ioctl.c
@@ -0,0 +1,421 @@
+/***
+ *
+ *  rtcfg/rtcfg_ioctl.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/fs.h>
+#include <linux/file.h>
+#include <linux/vmalloc.h>
+
+#include <rtcfg_chrdev.h>
+#include <rtnet_rtpc.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_proc.h>
+
+int rtcfg_event_handler(struct rt_proc_call *call)
+{
+	struct rtcfg_cmd *cmd_event;
+
+	cmd_event = rtpc_get_priv(call, struct rtcfg_cmd);
+	return rtcfg_do_main_event(cmd_event->internal.data.ifindex,
+				   cmd_event->internal.data.event_id, call);
+}
+
+void keep_cmd_add(struct rt_proc_call *call, void *priv_data)
+{
+	/* do nothing on error (<0), or if file already present (=0) */
+	if (rtpc_get_result(call) <= 0)
+		return;
+
+	/* Don't cleanup any buffers, we are going to recycle them! */
+	rtpc_set_cleanup_handler(call, NULL);
+}
+
+void cleanup_cmd_add(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	void *buf;
+
+	/* unlock proc and update directory structure */
+	rtcfg_unlockwr_proc(cmd->internal.data.ifindex);
+
+	buf = cmd->args.add.conn_buf;
+	if (buf != NULL)
+		kfree(buf);
+
+	buf = cmd->args.add.stage1_data;
+	if (buf != NULL)
+		kfree(buf);
+
+	if (cmd->args.add.stage2_file != NULL) {
+		buf = cmd->args.add.stage2_file->buffer;
+		if (buf != NULL)
+			vfree(buf);
+		kfree(cmd->args.add.stage2_file);
+	}
+}
+
+void cleanup_cmd_del(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	void *buf;
+
+	/* unlock proc and update directory structure */
+	rtcfg_unlockwr_proc(cmd->internal.data.ifindex);
+
+	if (cmd->args.del.conn_buf != NULL) {
+		buf = cmd->args.del.conn_buf->stage1_data;
+		if (buf != NULL)
+			kfree(buf);
+		kfree(cmd->args.del.conn_buf);
+	}
+
+	if (cmd->args.del.stage2_file != NULL) {
+		buf = cmd->args.del.stage2_file->buffer;
+		if (buf != NULL)
+			vfree(buf);
+		kfree(cmd->args.del.stage2_file);
+	}
+}
+
+void copy_stage_1_data(struct rt_proc_call *call, void *priv_data)
+{
+	struct rtcfg_cmd *cmd;
+	int result = rtpc_get_result(call);
+
+	if (result <= 0)
+		return;
+
+	cmd = rtpc_get_priv(call, struct rtcfg_cmd);
+
+	if (cmd->args.client.buffer_size < (size_t)result)
+		rtpc_set_result(call, -ENOSPC);
+	else if (copy_to_user(cmd->args.client.buffer,
+			      cmd->args.client.rtskb->data, result) != 0)
+		rtpc_set_result(call, -EFAULT);
+}
+
+void cleanup_cmd_client(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	void *station_buf;
+	struct rtskb *rtskb;
+
+	station_buf = cmd->args.client.station_buf;
+	if (station_buf != NULL)
+		kfree(station_buf);
+
+	rtskb = cmd->args.client.rtskb;
+	if (rtskb != NULL)
+		kfree_rtskb(rtskb);
+}
+
+void copy_stage_2_data(struct rt_proc_call *call, void *priv_data)
+{
+	struct rtcfg_cmd *cmd;
+	int result = rtpc_get_result(call);
+	struct rtskb *rtskb;
+
+	if (result <= 0)
+		return;
+
+	cmd = rtpc_get_priv(call, struct rtcfg_cmd);
+
+	if (cmd->args.announce.buffer_size < (size_t)result)
+		rtpc_set_result(call, -ENOSPC);
+	else {
+		rtskb = cmd->args.announce.rtskb;
+		do {
+			if (copy_to_user(cmd->args.announce.buffer, rtskb->data,
+					 rtskb->len) != 0) {
+				rtpc_set_result(call, -EFAULT);
+				break;
+			}
+			cmd->args.announce.buffer += rtskb->len;
+			rtskb = rtskb->next;
+		} while (rtskb != NULL);
+	}
+}
+
+void cleanup_cmd_announce(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	struct rtskb *rtskb;
+
+	rtskb = cmd->args.announce.rtskb;
+	if (rtskb != NULL)
+		kfree_rtskb(rtskb);
+}
+
+void cleanup_cmd_detach(void *priv_data)
+{
+	struct rtcfg_cmd *cmd = (struct rtcfg_cmd *)priv_data;
+	void *buf;
+
+	/* unlock proc and update directory structure */
+	rtcfg_unlockwr_proc(cmd->internal.data.ifindex);
+
+	if (cmd->args.detach.conn_buf) {
+		buf = cmd->args.detach.conn_buf->stage1_data;
+		if (buf != NULL)
+			kfree(buf);
+		kfree(cmd->args.detach.conn_buf);
+	}
+
+	if (cmd->args.detach.stage2_file != NULL) {
+		buf = cmd->args.detach.stage2_file->buffer;
+		if (buf)
+			vfree(buf);
+		kfree(cmd->args.detach.stage2_file);
+	}
+
+	if (cmd->args.detach.station_addr_list)
+		kfree(cmd->args.detach.station_addr_list);
+
+	if (cmd->args.detach.stage2_chain)
+		kfree_rtskb(cmd->args.detach.stage2_chain);
+}
+
+static int load_cfg_file(struct rtcfg_file *cfgfile, struct rtcfg_cmd *cmd)
+{
+	size_t file_size = 0;
+	struct file *filp;
+	loff_t i_size;
+	int ret;
+
+	filp = filp_open(cfgfile->name, O_RDONLY, 0);
+	if (IS_ERR(filp))
+		return PTR_ERR(filp);
+
+	i_size = i_size_read(file_inode(filp));
+	if (i_size <= 0) {
+		/* allocate buffer even for empty files */
+		cfgfile->buffer = vmalloc(1);
+	} else {
+		cfgfile->buffer = NULL; /* Leave allocation to the kernel. */
+		ret = read_file_from_kernel(filp, &cfgfile->buffer,
+					i_size_read(file_inode(filp)),
+					&file_size, READING_UNKNOWN);
+		if (ret < 0) {
+			fput(filp);
+			return ret;
+		}
+	}
+
+	fput(filp);
+	cfgfile->size = file_size;
+
+	/* dispatch again, this time with new file attached */
+	return rtpc_dispatch_call(rtcfg_event_handler, 0, cmd,
+				sizeof(*cmd), NULL, cleanup_cmd_add);
+}
+
+int rtcfg_ioctl_add(struct rtnet_device *rtdev, struct rtcfg_cmd *cmd)
+{
+	struct rtcfg_connection *conn_buf;
+	struct rtcfg_file *file = NULL;
+	void *data_buf;
+	size_t size;
+	int ret;
+
+	conn_buf = kmalloc(sizeof(struct rtcfg_connection), GFP_KERNEL);
+	if (conn_buf == NULL)
+		return -ENOMEM;
+	cmd->args.add.conn_buf = conn_buf;
+
+	data_buf = NULL;
+	size = cmd->args.add.stage1_size;
+	if (size > 0) {
+		/* check stage 1 data size */
+		if (sizeof(struct rtcfg_frm_stage_1_cfg) +
+			    2 * RTCFG_ADDRSIZE_IP + size >
+		    rtdev->get_mtu(rtdev, RTCFG_SKB_PRIO)) {
+			ret = -ESTAGE1SIZE;
+			goto err;
+		}
+
+		data_buf = kmalloc(size, GFP_KERNEL);
+		if (data_buf == NULL) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		ret = copy_from_user(data_buf, cmd->args.add.stage1_data, size);
+		if (ret != 0) {
+			ret = -EFAULT;
+			goto err;
+		}
+	}
+	cmd->args.add.stage1_data = data_buf;
+
+	if (cmd->args.add.stage2_filename != NULL) {
+		size = strnlen_user(cmd->args.add.stage2_filename, PATH_MAX);
+
+		file = kmalloc(sizeof(struct rtcfg_file) + size, GFP_KERNEL);
+		if (file == NULL) {
+			ret = -ENOMEM;
+			goto err;
+		}
+
+		file->name = (char *)file + sizeof(struct rtcfg_file);
+		file->buffer = NULL;
+
+		ret = copy_from_user(
+			(void *)file + sizeof(struct rtcfg_file),
+			(const void *)cmd->args.add.stage2_filename, size);
+		if (ret != 0) {
+			ret = -EFAULT;
+			goto err;
+		}
+	}
+	cmd->args.add.stage2_file = file;
+
+	/* lock proc structure for modification */
+	rtcfg_lockwr_proc(cmd->internal.data.ifindex);
+
+	ret = rtpc_dispatch_call(rtcfg_event_handler, 0, cmd, sizeof(*cmd),
+				 keep_cmd_add, cleanup_cmd_add);
+
+	/* load file if missing */
+	if (ret > 0) {
+		ret = load_cfg_file(file, cmd);
+		if (ret) {
+			rtcfg_unlockwr_proc(cmd->internal.data.ifindex);
+			goto err;
+		}
+	}
+
+	return ret;
+
+err:
+	kfree(conn_buf);
+	if (data_buf != NULL)
+		kfree(data_buf);
+	if (file != NULL) {
+		if (file->buffer != NULL)
+			vfree(file->buffer);
+		kfree(file);
+	}
+	return ret;
+}
+
+int rtcfg_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		unsigned long arg)
+{
+	struct rtcfg_cmd cmd;
+	struct rtcfg_station *station_buf;
+	int ret;
+
+	ret = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+	if (ret != 0)
+		return -EFAULT;
+
+	cmd.internal.data.ifindex = rtdev->ifindex;
+	cmd.internal.data.event_id = _IOC_NR(request);
+
+	switch (request) {
+	case RTCFG_IOC_SERVER:
+		ret = rtpc_dispatch_call(rtcfg_event_handler, 0, &cmd,
+					 sizeof(cmd), NULL, NULL);
+		break;
+
+	case RTCFG_IOC_ADD:
+		ret = rtcfg_ioctl_add(rtdev, &cmd);
+		break;
+
+	case RTCFG_IOC_DEL:
+		cmd.args.del.conn_buf = NULL;
+		cmd.args.del.stage2_file = NULL;
+
+		/* lock proc structure for modification
+               (unlock in cleanup_cmd_del) */
+		rtcfg_lockwr_proc(cmd.internal.data.ifindex);
+
+		ret = rtpc_dispatch_call(rtcfg_event_handler, 0, &cmd,
+					 sizeof(cmd), NULL, cleanup_cmd_del);
+		break;
+
+	case RTCFG_IOC_WAIT:
+		ret = rtpc_dispatch_call(rtcfg_event_handler,
+					 cmd.args.wait.timeout, &cmd,
+					 sizeof(cmd), NULL, NULL);
+		break;
+
+	case RTCFG_IOC_CLIENT:
+		station_buf = kmalloc(sizeof(struct rtcfg_station) *
+					      cmd.args.client.max_stations,
+				      GFP_KERNEL);
+		if (station_buf == NULL)
+			return -ENOMEM;
+		cmd.args.client.station_buf = station_buf;
+		cmd.args.client.rtskb = NULL;
+
+		ret = rtpc_dispatch_call(rtcfg_event_handler,
+					 cmd.args.client.timeout, &cmd,
+					 sizeof(cmd), copy_stage_1_data,
+					 cleanup_cmd_client);
+		break;
+
+	case RTCFG_IOC_ANNOUNCE:
+		cmd.args.announce.rtskb = NULL;
+
+		ret = rtpc_dispatch_call(rtcfg_event_handler,
+					 cmd.args.announce.timeout, &cmd,
+					 sizeof(cmd), copy_stage_2_data,
+					 cleanup_cmd_announce);
+		break;
+
+	case RTCFG_IOC_READY:
+		ret = rtpc_dispatch_call(rtcfg_event_handler,
+					 cmd.args.ready.timeout, &cmd,
+					 sizeof(cmd), NULL, NULL);
+		break;
+
+	case RTCFG_IOC_DETACH:
+		do {
+			cmd.args.detach.conn_buf = NULL;
+			cmd.args.detach.stage2_file = NULL;
+			cmd.args.detach.station_addr_list = NULL;
+			cmd.args.detach.stage2_chain = NULL;
+
+			/* lock proc structure for modification
+                   (unlock in cleanup_cmd_detach) */
+			rtcfg_lockwr_proc(cmd.internal.data.ifindex);
+
+			ret = rtpc_dispatch_call(rtcfg_event_handler, 0, &cmd,
+						 sizeof(cmd), NULL,
+						 cleanup_cmd_detach);
+		} while (ret == -EAGAIN);
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+struct rtnet_ioctls rtcfg_ioctls = { .service_name = "RTcfg",
+				     .ioctl_type = RTNET_IOC_TYPE_RTCFG,
+				     .handler = rtcfg_ioctl };
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_module.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_module.c
new file mode 100644
index 0000000..39e96b9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_module.c
@@ -0,0 +1,83 @@
+/***
+ *
+ *  rtcfg/rtcfg_module.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003, 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+#include <linux/kernel.h>
+
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_ioctl.h>
+#include <rtcfg/rtcfg_proc.h>
+
+MODULE_LICENSE("GPL");
+
+int __init rtcfg_init(void)
+{
+	int ret;
+
+	printk("RTcfg: init real-time configuration distribution protocol\n");
+
+	ret = rtcfg_init_ioctls();
+	if (ret != 0)
+		goto error1;
+
+	rtcfg_init_state_machines();
+
+	ret = rtcfg_init_frames();
+	if (ret != 0)
+		goto error2;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	ret = rtcfg_init_proc();
+	if (ret != 0) {
+		rtcfg_cleanup_frames();
+		goto error2;
+	}
+#endif
+
+	return 0;
+
+error2:
+	rtcfg_cleanup_state_machines();
+	rtcfg_cleanup_ioctls();
+
+error1:
+	return ret;
+}
+
+void rtcfg_cleanup(void)
+{
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtcfg_cleanup_proc();
+#endif
+	rtcfg_cleanup_frames();
+	rtcfg_cleanup_state_machines();
+	rtcfg_cleanup_ioctls();
+
+	printk("RTcfg: unloaded\n");
+}
+
+module_init(rtcfg_init);
+module_exit(rtcfg_cleanup);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_proc.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_proc.c
new file mode 100644
index 0000000..3b39b74
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_proc.c
@@ -0,0 +1,347 @@
+/***
+ *
+ *	rtcfg/rtcfg_proc.c
+ *
+ *	Real-Time Configuration Distribution Protocol
+ *
+ *	Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *	This program is free software; you can redistribute it and/or modify
+ *	it under the terms of the GNU General Public License as published by
+ *	the Free Software Foundation; either version 2 of the License, or
+ *	(at your option) any later version.
+ *
+ *	This program is distributed in the hope that it will be useful,
+ *	but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *	MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *	GNU General Public License for more details.
+ *
+ *	You should have received a copy of the GNU General Public License
+ *	along with this program; if not, write to the Free Software
+ *	Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_port.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+DEFINE_MUTEX(nrt_proc_lock);
+static struct xnvfile_directory rtcfg_proc_root;
+
+static int rtnet_rtcfg_proc_lock_get(struct xnvfile *vfile)
+{
+	return mutex_lock_interruptible(&nrt_proc_lock);
+}
+
+static void rtnet_rtcfg_proc_lock_put(struct xnvfile *vfile)
+{
+	return mutex_unlock(&nrt_proc_lock);
+}
+
+static struct xnvfile_lock_ops rtnet_rtcfg_proc_lock_ops = {
+	.get = rtnet_rtcfg_proc_lock_get,
+	.put = rtnet_rtcfg_proc_lock_put,
+};
+
+int rtnet_rtcfg_dev_state_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct rtcfg_device *rtcfg_dev = xnvfile_priv(it->vfile);
+	const char *state_name[] = { "OFF",
+				     "SERVER_RUNNING",
+				     "CLIENT_0",
+				     "CLIENT_1",
+				     "CLIENT_ANNOUNCED",
+				     "CLIENT_ALL_KNOWN",
+				     "CLIENT_ALL_FRAMES",
+				     "CLIENT_2",
+				     "CLIENT_READY" };
+
+	xnvfile_printf(it,
+		       "state:\t\t\t%d (%s)\n"
+		       "flags:\t\t\t%08lX\n"
+		       "other stations:\t\t%d\n"
+		       "stations found:\t\t%d\n"
+		       "stations ready:\t\t%d\n",
+		       rtcfg_dev->state, state_name[rtcfg_dev->state],
+		       rtcfg_dev->flags, rtcfg_dev->other_stations,
+		       rtcfg_dev->stations_found, rtcfg_dev->stations_ready);
+
+	if (rtcfg_dev->state == RTCFG_MAIN_SERVER_RUNNING) {
+		xnvfile_printf(it,
+			       "configured clients:\t%d\n"
+			       "burstrate:\t\t%d\n"
+			       "heartbeat period:\t%d ms\n",
+			       rtcfg_dev->spec.srv.clients_configured,
+			       rtcfg_dev->burstrate,
+			       rtcfg_dev->spec.srv.heartbeat);
+	} else if (rtcfg_dev->state != RTCFG_MAIN_OFF) {
+		xnvfile_printf(
+			it,
+			"address type:\t\t%d\n"
+			"server address:\t\t%02X:%02X:%02X:%02X:%02X:%02X\n"
+			"stage 2 config:\t\t%d/%d\n",
+			rtcfg_dev->spec.clt.addr_type,
+			rtcfg_dev->spec.clt.srv_mac_addr[0],
+			rtcfg_dev->spec.clt.srv_mac_addr[1],
+			rtcfg_dev->spec.clt.srv_mac_addr[2],
+			rtcfg_dev->spec.clt.srv_mac_addr[3],
+			rtcfg_dev->spec.clt.srv_mac_addr[4],
+			rtcfg_dev->spec.clt.srv_mac_addr[5],
+			rtcfg_dev->spec.clt.cfg_offs,
+			rtcfg_dev->spec.clt.cfg_len);
+	}
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_rtcfg_dev_state_vfile_ops = {
+	.show = rtnet_rtcfg_dev_state_show,
+};
+
+int rtnet_rtcfg_dev_stations_show(struct xnvfile_regular_iterator *it, void *d)
+{
+	struct rtcfg_device *rtcfg_dev = xnvfile_priv(it->vfile);
+	struct rtcfg_connection *conn;
+	struct rtcfg_station *station;
+	int i;
+
+	if (rtcfg_dev->state == RTCFG_MAIN_SERVER_RUNNING) {
+		list_for_each_entry (conn, &rtcfg_dev->spec.srv.conn_list,
+				     entry) {
+			if ((conn->state != RTCFG_CONN_SEARCHING) &&
+			    (conn->state != RTCFG_CONN_DEAD))
+				xnvfile_printf(
+					it,
+					"%02X:%02X:%02X:%02X:%02X:%02X\t%02X\n",
+					conn->mac_addr[0], conn->mac_addr[1],
+					conn->mac_addr[2], conn->mac_addr[3],
+					conn->mac_addr[4], conn->mac_addr[5],
+					conn->flags);
+		}
+	} else if (rtcfg_dev->spec.clt.station_addr_list) {
+		for (i = 0; i < rtcfg_dev->stations_found; i++) {
+			station = &rtcfg_dev->spec.clt.station_addr_list[i];
+
+			xnvfile_printf(
+				it, "%02X:%02X:%02X:%02X:%02X:%02X\t%02X\n",
+				station->mac_addr[0], station->mac_addr[1],
+				station->mac_addr[2], station->mac_addr[3],
+				station->mac_addr[4], station->mac_addr[5],
+				station->flags);
+		}
+	}
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_rtcfg_dev_stations_vfile_ops = {
+	.show = rtnet_rtcfg_dev_stations_show,
+};
+
+int rtnet_rtcfg_dev_conn_state_show(struct xnvfile_regular_iterator *it,
+				    void *d)
+{
+	struct rtcfg_connection *conn = xnvfile_priv(it->vfile);
+	char *state_name[] = { "SEARCHING", "STAGE_1", "STAGE_2", "READY",
+			       "DEAD" };
+
+	xnvfile_printf(it,
+		       "state:\t\t\t%d (%s)\n"
+		       "flags:\t\t\t%02X\n"
+		       "stage 1 size:\t\t%zd\n"
+		       "stage 2 filename:\t%s\n"
+		       "stage 2 size:\t\t%zd\n"
+		       "stage 2 offset:\t\t%d\n"
+		       "burstrate:\t\t%d\n"
+		       "mac address:\t\t%02X:%02X:%02X:%02X:%02X:%02X\n",
+		       conn->state, state_name[conn->state], conn->flags,
+		       conn->stage1_size,
+		       (conn->stage2_file) ? conn->stage2_file->name : "-",
+		       (conn->stage2_file) ? conn->stage2_file->size : 0,
+		       conn->cfg_offs, conn->burstrate, conn->mac_addr[0],
+		       conn->mac_addr[1], conn->mac_addr[2], conn->mac_addr[3],
+		       conn->mac_addr[4], conn->mac_addr[5]);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+	if ((conn->addr_type & RTCFG_ADDR_MASK) == RTCFG_ADDR_IP)
+		xnvfile_printf(it, "ip:\t\t\t%u.%u.%u.%u\n",
+			       NIPQUAD(conn->addr.ip_addr));
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_rtcfg_dev_conn_state_vfile_ops = {
+	.show = rtnet_rtcfg_dev_conn_state_show,
+};
+
+void rtcfg_update_conn_proc_entries(int ifindex)
+{
+	struct rtcfg_device *dev = &device[ifindex];
+	struct rtcfg_connection *conn;
+	char name_buf[64];
+
+	if (dev->state != RTCFG_MAIN_SERVER_RUNNING)
+		return;
+
+	list_for_each_entry (conn, &dev->spec.srv.conn_list, entry) {
+		switch (conn->addr_type & RTCFG_ADDR_MASK) {
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_RTIPV4)
+		case RTCFG_ADDR_IP:
+			snprintf(name_buf, 64, "CLIENT_%u.%u.%u.%u",
+				 NIPQUAD(conn->addr.ip_addr));
+			break;
+#endif /* CONFIG_XENO_DRIVERS_NET_RTIPV4 */
+
+		default: /* RTCFG_ADDR_MAC */
+			snprintf(name_buf, 64,
+				 "CLIENT_%02X%02X%02X%02X%02X%02X",
+				 conn->mac_addr[0], conn->mac_addr[1],
+				 conn->mac_addr[2], conn->mac_addr[3],
+				 conn->mac_addr[4], conn->mac_addr[5]);
+			break;
+		}
+		memset(&conn->proc_entry, '\0', sizeof(conn->proc_entry));
+		conn->proc_entry.entry.lockops = &rtnet_rtcfg_proc_lock_ops;
+		conn->proc_entry.ops = &rtnet_rtcfg_dev_conn_state_vfile_ops;
+		xnvfile_priv(&conn->proc_entry) = conn;
+
+		xnvfile_init_regular(name_buf, &conn->proc_entry,
+				     &dev->proc_entry);
+	}
+}
+
+void rtcfg_remove_conn_proc_entries(int ifindex)
+{
+	struct rtcfg_device *dev = &device[ifindex];
+	struct rtcfg_connection *conn;
+
+	if (dev->state != RTCFG_MAIN_SERVER_RUNNING)
+		return;
+
+	list_for_each_entry (conn, &dev->spec.srv.conn_list, entry)
+		xnvfile_destroy_regular(&conn->proc_entry);
+}
+
+void rtcfg_new_rtdev(struct rtnet_device *rtdev)
+{
+	struct rtcfg_device *dev = &device[rtdev->ifindex];
+	int err;
+
+	mutex_lock(&nrt_proc_lock);
+
+	memset(&dev->proc_entry, '\0', sizeof(dev->proc_entry));
+	err = xnvfile_init_dir(rtdev->name, &dev->proc_entry, &rtcfg_proc_root);
+	if (err < 0)
+		goto error1;
+
+	memset(&dev->proc_state_vfile, '\0', sizeof(dev->proc_state_vfile));
+	dev->proc_state_vfile.entry.lockops = &rtnet_rtcfg_proc_lock_ops;
+	dev->proc_state_vfile.ops = &rtnet_rtcfg_dev_state_vfile_ops;
+	xnvfile_priv(&dev->proc_state_vfile) = dev;
+
+	err = xnvfile_init_regular("state", &dev->proc_state_vfile,
+				   &dev->proc_entry);
+	if (err < 0)
+		goto error2;
+
+	memset(&dev->proc_stations_vfile, '\0',
+	       sizeof(dev->proc_stations_vfile));
+	dev->proc_stations_vfile.entry.lockops = &rtnet_rtcfg_proc_lock_ops;
+	dev->proc_stations_vfile.ops = &rtnet_rtcfg_dev_stations_vfile_ops;
+	xnvfile_priv(&dev->proc_stations_vfile) = dev;
+
+	err = xnvfile_init_regular("stations_list", &dev->proc_stations_vfile,
+				   &dev->proc_entry);
+	if (err < 0)
+		goto error3;
+
+	mutex_unlock(&nrt_proc_lock);
+
+	return;
+
+error3:
+	xnvfile_destroy_regular(&dev->proc_state_vfile);
+error2:
+	xnvfile_destroy_dir(&dev->proc_entry);
+error1:
+	dev->proc_entry.entry.pde = NULL;
+	mutex_unlock(&nrt_proc_lock);
+}
+
+void rtcfg_remove_rtdev(struct rtnet_device *rtdev)
+{
+	struct rtcfg_device *dev = &device[rtdev->ifindex];
+
+	// To-Do: issue down command
+
+	mutex_lock(&nrt_proc_lock);
+
+	if (dev->proc_entry.entry.pde) {
+		rtcfg_remove_conn_proc_entries(rtdev->ifindex);
+
+		xnvfile_destroy_regular(&dev->proc_stations_vfile);
+		xnvfile_destroy_regular(&dev->proc_state_vfile);
+		xnvfile_destroy_dir(&dev->proc_entry);
+		dev->proc_entry.entry.pde = NULL;
+	}
+
+	mutex_unlock(&nrt_proc_lock);
+}
+
+static struct rtdev_event_hook rtdev_hook = { .register_device =
+						      rtcfg_new_rtdev,
+					      .unregister_device =
+						      rtcfg_remove_rtdev,
+					      .ifup = NULL,
+					      .ifdown = NULL };
+
+int rtcfg_init_proc(void)
+{
+	struct rtnet_device *rtdev;
+	int i, err;
+
+	err = xnvfile_init_dir("rtcfg", &rtcfg_proc_root, &rtnet_proc_root);
+	if (err < 0)
+		goto err1;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtdev_get_by_index(i);
+		if (rtdev) {
+			rtcfg_new_rtdev(rtdev);
+			rtdev_dereference(rtdev);
+		}
+	}
+
+	rtdev_add_event_hook(&rtdev_hook);
+	return 0;
+
+err1:
+	printk("RTcfg: unable to initialise /proc entries\n");
+	return err;
+}
+
+void rtcfg_cleanup_proc(void)
+{
+	struct rtnet_device *rtdev;
+	int i;
+
+	rtdev_del_event_hook(&rtdev_hook);
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtdev_get_by_index(i);
+		if (rtdev) {
+			rtcfg_remove_rtdev(rtdev);
+			rtdev_dereference(rtdev);
+		}
+	}
+
+	xnvfile_destroy_dir(&rtcfg_proc_root);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_timer.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_timer.c
new file mode 100644
index 0000000..99baf2c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtcfg/rtcfg_timer.c
@@ -0,0 +1,110 @@
+/***
+ *
+ *  rtcfg/rtcfg_timer.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+
+#include <rtdev.h>
+#include <rtcfg/rtcfg.h>
+#include <rtcfg/rtcfg_conn_event.h>
+#include <rtcfg/rtcfg_event.h>
+#include <rtcfg/rtcfg_frame.h>
+#include <rtcfg/rtcfg_timer.h>
+
+void rtcfg_timer(rtdm_timer_t *t)
+{
+	struct rtcfg_device *rtcfg_dev =
+		container_of(t, struct rtcfg_device, timer);
+
+	set_bit(FLAG_TIMER_PENDING, &rtcfg_dev->flags);
+	rtcfg_thread_signal();
+}
+
+void rtcfg_timer_run_one(int ifindex)
+{
+	struct rtcfg_device *rtcfg_dev = &device[ifindex];
+	struct list_head *entry;
+	struct rtcfg_connection *conn;
+	int last_stage_1 = -1;
+	int burst_credit;
+	int index;
+	int ret, shutdown;
+
+	shutdown = test_and_clear_bit(FLAG_TIMER_SHUTDOWN, &rtcfg_dev->flags);
+
+	if (!test_and_clear_bit(FLAG_TIMER_PENDING, &rtcfg_dev->flags) ||
+	    shutdown)
+		return;
+
+	rtdm_mutex_lock(&rtcfg_dev->dev_mutex);
+
+	if (rtcfg_dev->state == RTCFG_MAIN_SERVER_RUNNING) {
+		index = 0;
+		burst_credit = rtcfg_dev->burstrate;
+
+		list_for_each (entry, &rtcfg_dev->spec.srv.conn_list) {
+			conn = list_entry(entry, struct rtcfg_connection,
+					  entry);
+
+			if ((conn->state == RTCFG_CONN_SEARCHING) ||
+			    (conn->state == RTCFG_CONN_DEAD)) {
+				if ((burst_credit > 0) &&
+				    (index > last_stage_1)) {
+					if ((ret = rtcfg_send_stage_1(conn)) <
+					    0) {
+						RTCFG_DEBUG(
+							2,
+							"RTcfg: error %d while sending "
+							"stage 1 frame\n",
+							ret);
+					}
+					burst_credit--;
+					last_stage_1 = index;
+				}
+			} else {
+				/* skip connection in history */
+				if (last_stage_1 == (index - 1))
+					last_stage_1 = index;
+
+				rtcfg_do_conn_event(conn, RTCFG_TIMER, NULL);
+			}
+			index++;
+		}
+
+		/* handle pointer overrun of the last stage 1 transmission */
+		if (last_stage_1 == (index - 1))
+			last_stage_1 = -1;
+	} else if (rtcfg_dev->state == RTCFG_MAIN_CLIENT_READY)
+		rtcfg_send_heartbeat(ifindex);
+
+	rtdm_mutex_unlock(&rtcfg_dev->dev_mutex);
+}
+
+void rtcfg_timer_run(void)
+{
+	int ifindex;
+
+	for (ifindex = 0; ifindex < MAX_RT_DEVICES; ifindex++)
+		rtcfg_timer_run_one(ifindex);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev.c
new file mode 100644
index 0000000..13e9790
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev.c
@@ -0,0 +1,940 @@
+/***
+ *
+ *  stack/rtdev.c - NIC device driver layer
+ *
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/spinlock.h>
+#include <linux/if.h>
+#include <linux/if_arp.h> /* ARPHRD_ETHER */
+#include <linux/netdevice.h>
+#include <linux/moduleparam.h>
+
+#include <rtnet_internal.h>
+#include <rtskb.h>
+#include <ethernet/eth.h>
+#include <rtmac/rtmac_disc.h>
+#include <rtnet_port.h>
+
+static unsigned int device_rtskbs = DEFAULT_DEVICE_RTSKBS;
+module_param(device_rtskbs, uint, 0444);
+MODULE_PARM_DESC(device_rtskbs, "Number of additional global realtime socket "
+				"buffers per network adapter");
+
+struct rtnet_device *rtnet_devices[MAX_RT_DEVICES];
+static struct rtnet_device *loopback_device;
+static DEFINE_RTDM_LOCK(rtnet_devices_rt_lock);
+static LIST_HEAD(rtskb_mapped_list);
+static LIST_HEAD(rtskb_mapwait_list);
+
+LIST_HEAD(event_hook_list);
+DEFINE_MUTEX(rtnet_devices_nrt_lock);
+
+static int rtdev_locked_xmit(struct rtskb *skb, struct rtnet_device *rtdev);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(4, 19, 0)
+#define atomic_fetch_add_unless __atomic_add_unless
+#endif
+
+int rtdev_reference(struct rtnet_device *rtdev)
+{
+	smp_mb__before_atomic();
+	if (rtdev->rt_owner &&
+	    atomic_fetch_add_unless(&rtdev->refcount, 1, 0) == 0) {
+		if (!try_module_get(rtdev->rt_owner))
+			return 0;
+		if (atomic_inc_return(&rtdev->refcount) != 1)
+			module_put(rtdev->rt_owner);
+	}
+	return 1;
+}
+EXPORT_SYMBOL_GPL(rtdev_reference);
+
+struct rtskb *rtnetdev_alloc_rtskb(struct rtnet_device *rtdev,
+				   unsigned int size)
+{
+	struct rtskb *rtskb = alloc_rtskb(size, &rtdev->dev_pool);
+	if (rtskb)
+		rtskb->rtdev = rtdev;
+	return rtskb;
+}
+EXPORT_SYMBOL_GPL(rtnetdev_alloc_rtskb);
+
+/***
+ *  __rtdev_get_by_name - find a rtnet_device by its name
+ *  @name: name to find
+ *  @note: caller must hold rtnet_devices_nrt_lock
+ */
+static struct rtnet_device *__rtdev_get_by_name(const char *name)
+{
+	int i;
+	struct rtnet_device *rtdev;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtnet_devices[i];
+		if ((rtdev != NULL) &&
+		    (strncmp(rtdev->name, name, IFNAMSIZ) == 0))
+			return rtdev;
+	}
+	return NULL;
+}
+
+/***
+ *  rtdev_get_by_name - find and lock a rtnet_device by its name
+ *  @name: name to find
+ */
+struct rtnet_device *rtdev_get_by_name(const char *name)
+{
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	rtdev = __rtdev_get_by_name(name);
+	if (rtdev != NULL && !rtdev_reference(rtdev))
+		rtdev = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	return rtdev;
+}
+
+/***
+ *  rtdev_get_by_index - find and lock a rtnet_device by its ifindex
+ *  @ifindex: index of device
+ */
+struct rtnet_device *rtdev_get_by_index(int ifindex)
+{
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+
+	if ((ifindex <= 0) || (ifindex > MAX_RT_DEVICES))
+		return NULL;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	rtdev = __rtdev_get_by_index(ifindex);
+	if (rtdev != NULL && !rtdev_reference(rtdev))
+		rtdev = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	return rtdev;
+}
+
+/***
+ *  __rtdev_get_by_hwaddr - find a rtnetdevice by its mac-address
+ *  @type:          Type of the net_device (may be ARPHRD_ETHER)
+ *  @hw_addr:       MAC-Address
+ */
+static inline struct rtnet_device *__rtdev_get_by_hwaddr(unsigned short type,
+							 char *hw_addr)
+{
+	int i;
+	struct rtnet_device *rtdev;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtnet_devices[i];
+		if ((rtdev != NULL) && (rtdev->type == type) &&
+		    (!memcmp(rtdev->dev_addr, hw_addr, rtdev->addr_len))) {
+			return rtdev;
+		}
+	}
+	return NULL;
+}
+
+/***
+ *  rtdev_get_by_hwaddr - find and lock a rtnetdevice by its mac-address
+ *  @type:          Type of the net_device (may be ARPHRD_ETHER)
+ *  @hw_addr:       MAC-Address
+ */
+struct rtnet_device *rtdev_get_by_hwaddr(unsigned short type, char *hw_addr)
+{
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	rtdev = __rtdev_get_by_hwaddr(type, hw_addr);
+	if (rtdev != NULL && !rtdev_reference(rtdev))
+		rtdev = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	return rtdev;
+}
+
+/***
+ *  rtdev_get_by_hwaddr - find and lock the loopback device if available
+ */
+struct rtnet_device *rtdev_get_loopback(void)
+{
+	struct rtnet_device *rtdev;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	rtdev = loopback_device;
+	if (rtdev != NULL && !rtdev_reference(rtdev))
+		rtdev = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	return rtdev;
+}
+
+/***
+ *  rtdev_alloc_name - allocate a name for the rtnet_device
+ *  @rtdev:         the rtnet_device
+ *  @name_mask:     a name mask (e.g. "rteth%d" for ethernet)
+ *
+ *  This function have to be called from the driver probe function.
+ */
+void rtdev_alloc_name(struct rtnet_device *rtdev, const char *mask)
+{
+	char buf[IFNAMSIZ];
+	int i;
+	struct rtnet_device *tmp;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		snprintf(buf, IFNAMSIZ, mask, i);
+		if ((tmp = rtdev_get_by_name(buf)) == NULL) {
+			strncpy(rtdev->name, buf, IFNAMSIZ);
+			break;
+		} else
+			rtdev_dereference(tmp);
+	}
+}
+
+static int rtdev_pool_trylock(void *cookie)
+{
+	return rtdev_reference(cookie);
+}
+
+static void rtdev_pool_unlock(void *cookie)
+{
+	rtdev_dereference(cookie);
+}
+
+static const struct rtskb_pool_lock_ops rtdev_ops = {
+	.trylock = rtdev_pool_trylock,
+	.unlock = rtdev_pool_unlock,
+};
+
+int rtdev_init(struct rtnet_device *rtdev, unsigned dev_pool_size)
+{
+	int ret;
+
+	ret = rtskb_pool_init(&rtdev->dev_pool, dev_pool_size, &rtdev_ops,
+			      rtdev);
+	if (ret < dev_pool_size) {
+		printk(KERN_ERR "RTnet: cannot allocate rtnet device pool\n");
+		rtskb_pool_release(&rtdev->dev_pool);
+		return -ENOMEM;
+	}
+
+	rtdm_mutex_init(&rtdev->xmit_mutex);
+	rtdm_lock_init(&rtdev->rtdev_lock);
+	mutex_init(&rtdev->nrt_lock);
+
+	atomic_set(&rtdev->refcount, 0);
+
+	/* scale global rtskb pool */
+	rtdev->add_rtskbs = rtskb_pool_extend(&global_pool, device_rtskbs);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rtdev_init);
+
+void rtdev_destroy(struct rtnet_device *rtdev)
+{
+	rtskb_pool_release(&rtdev->dev_pool);
+	rtskb_pool_shrink(&global_pool, rtdev->add_rtskbs);
+	rtdev->stack_event = NULL;
+	rtdm_mutex_destroy(&rtdev->xmit_mutex);
+}
+EXPORT_SYMBOL_GPL(rtdev_destroy);
+
+/***
+ *  rtdev_alloc
+ *  @int sizeof_priv:
+ *
+ *  allocate memory for a new rt-network-adapter
+ */
+struct rtnet_device *rtdev_alloc(unsigned sizeof_priv, unsigned dev_pool_size)
+{
+	struct rtnet_device *rtdev;
+	unsigned alloc_size;
+	int ret;
+
+	/* ensure 32-byte alignment of the private area */
+	alloc_size = sizeof(*rtdev) + sizeof_priv + 31;
+
+	rtdev = kzalloc(alloc_size, GFP_KERNEL);
+	if (rtdev == NULL) {
+		printk(KERN_ERR "RTnet: cannot allocate rtnet device\n");
+		return NULL;
+	}
+
+	ret = rtdev_init(rtdev, dev_pool_size);
+	if (ret) {
+		kfree(rtdev);
+		return NULL;
+	}
+
+	if (sizeof_priv)
+		rtdev->priv = (void *)(((long)(rtdev + 1) + 31) & ~31);
+
+	return rtdev;
+}
+
+/***
+ *  rtdev_free
+ */
+void rtdev_free(struct rtnet_device *rtdev)
+{
+	if (rtdev != NULL) {
+		rtdev_destroy(rtdev);
+		kfree(rtdev);
+	}
+}
+EXPORT_SYMBOL_GPL(rtdev_free);
+
+static void init_etherdev(struct rtnet_device *rtdev, struct module *module)
+{
+	rtdev->hard_header = rt_eth_header;
+	rtdev->type = ARPHRD_ETHER;
+	rtdev->hard_header_len = ETH_HLEN;
+	rtdev->mtu = 1500; /* eth_mtu */
+	rtdev->addr_len = ETH_ALEN;
+	rtdev->flags = IFF_BROADCAST; /* TODO: IFF_MULTICAST; */
+	rtdev->get_mtu = rt_hard_mtu;
+	rtdev->rt_owner = module;
+
+	memset(rtdev->broadcast, 0xFF, ETH_ALEN);
+	strcpy(rtdev->name, "rteth%d");
+}
+
+/**
+ * rt_init_etherdev - sets up an ethernet device
+ * @module: module initializing the device
+ *
+ * Fill in the fields of the device structure with ethernet-generic
+ * values. This routine can be used to set up a pre-allocated device
+ * structure. The device still needs to be registered afterwards.
+ */
+int __rt_init_etherdev(struct rtnet_device *rtdev, unsigned dev_pool_size,
+		       struct module *module)
+{
+	int ret;
+
+	ret = rtdev_init(rtdev, dev_pool_size);
+	if (ret)
+		return ret;
+
+	init_etherdev(rtdev, module);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(__rt_init_etherdev);
+
+/**
+ * rt_alloc_etherdev - Allocates and sets up an ethernet device
+ * @sizeof_priv: size of additional driver-private structure to
+ *               be allocated for this ethernet device
+ * @dev_pool_size: size of the rx pool
+ * @module: module creating the device
+ *
+ * Allocates then fills in the fields of a new device structure with
+ * ethernet-generic values. Basically does everything except
+ * registering the device.
+ *
+ * A 32-byte alignment is enforced for the private data area.
+ */
+struct rtnet_device *__rt_alloc_etherdev(unsigned sizeof_priv,
+					 unsigned dev_pool_size,
+					 struct module *module)
+{
+	struct rtnet_device *rtdev;
+
+	rtdev = rtdev_alloc(sizeof_priv, dev_pool_size);
+	if (!rtdev)
+		return NULL;
+
+	init_etherdev(rtdev, module);
+
+	return rtdev;
+}
+EXPORT_SYMBOL_GPL(__rt_alloc_etherdev);
+
+static inline int __rtdev_new_index(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_RT_DEVICES; i++)
+		if (rtnet_devices[i] == NULL)
+			return i + 1;
+
+	return -ENOMEM;
+}
+
+static int rtskb_map(struct rtnet_device *rtdev, struct rtskb *skb)
+{
+	dma_addr_t addr;
+
+	addr = rtdev->map_rtskb(rtdev, skb);
+
+	if (WARN_ON(addr == RTSKB_UNMAPPED))
+		return -ENOMEM;
+
+	if (skb->buf_dma_addr != RTSKB_UNMAPPED && addr != skb->buf_dma_addr) {
+		printk("RTnet: device %s maps skb differently than others. "
+		       "Different IOMMU domain?\nThis is not supported.\n",
+		       rtdev->name);
+		return -EACCES;
+	}
+
+	skb->buf_dma_addr = addr;
+
+	return 0;
+}
+
+int rtdev_map_rtskb(struct rtskb *skb)
+{
+	struct rtnet_device *rtdev;
+	int err = 0;
+	int i;
+
+	skb->buf_dma_addr = RTSKB_UNMAPPED;
+
+	mutex_lock(&rtnet_devices_nrt_lock);
+
+	for (i = 0; i < MAX_RT_DEVICES; i++) {
+		rtdev = rtnet_devices[i];
+		if (rtdev && rtdev->map_rtskb) {
+			err = rtskb_map(rtdev, skb);
+			if (err)
+				break;
+		}
+	}
+
+	if (!err) {
+		if (skb->buf_dma_addr != RTSKB_UNMAPPED)
+			list_add(&skb->entry, &rtskb_mapped_list);
+		else
+			list_add(&skb->entry, &rtskb_mapwait_list);
+	}
+
+	mutex_unlock(&rtnet_devices_nrt_lock);
+
+	return err;
+}
+
+static int rtdev_map_all_rtskbs(struct rtnet_device *rtdev)
+{
+	struct rtskb *skb, *n;
+	int err = 0;
+
+	if (!rtdev->map_rtskb)
+		return 0;
+
+	list_for_each_entry (skb, &rtskb_mapped_list, entry) {
+		err = rtskb_map(rtdev, skb);
+		if (err)
+			break;
+	}
+
+	list_for_each_entry_safe (skb, n, &rtskb_mapwait_list, entry) {
+		err = rtskb_map(rtdev, skb);
+		if (err)
+			break;
+		list_del(&skb->entry);
+		list_add(&skb->entry, &rtskb_mapped_list);
+	}
+
+	return err;
+}
+
+void rtdev_unmap_rtskb(struct rtskb *skb)
+{
+	struct rtnet_device *rtdev;
+	int i;
+
+	mutex_lock(&rtnet_devices_nrt_lock);
+
+	list_del(&skb->entry);
+
+	if (skb->buf_dma_addr != RTSKB_UNMAPPED) {
+		for (i = 0; i < MAX_RT_DEVICES; i++) {
+			rtdev = rtnet_devices[i];
+			if (rtdev && rtdev->unmap_rtskb) {
+				rtdev->unmap_rtskb(rtdev, skb);
+			}
+		}
+	}
+
+	skb->buf_dma_addr = RTSKB_UNMAPPED;
+
+	mutex_unlock(&rtnet_devices_nrt_lock);
+}
+
+static void rtdev_unmap_all_rtskbs(struct rtnet_device *rtdev)
+{
+	struct rtskb *skb;
+
+	if (!rtdev->unmap_rtskb)
+		return;
+
+	list_for_each_entry (skb, &rtskb_mapped_list, entry) {
+		rtdev->unmap_rtskb(rtdev, skb);
+	}
+}
+
+/***
+ * rt_register_rtnetdev: register a new rtnet_device (linux-like)
+ * @rtdev:               the device
+ */
+int rt_register_rtnetdev(struct rtnet_device *rtdev)
+{
+	struct list_head *entry;
+	struct rtdev_event_hook *hook;
+	rtdm_lockctx_t context;
+	int ifindex;
+	int err;
+
+	/* requires at least driver layer version 2.0 */
+	if (rtdev->vers < RTDEV_VERS_2_0)
+		return -EINVAL;
+
+	if (rtdev->features & NETIF_F_LLTX)
+		rtdev->start_xmit = rtdev->hard_start_xmit;
+	else
+		rtdev->start_xmit = rtdev_locked_xmit;
+
+	mutex_lock(&rtnet_devices_nrt_lock);
+
+	ifindex = __rtdev_new_index();
+	if (ifindex < 0) {
+		err = ifindex;
+		goto fail;
+	}
+	rtdev->ifindex = ifindex;
+
+	if (strchr(rtdev->name, '%') != NULL)
+		rtdev_alloc_name(rtdev, rtdev->name);
+
+	if (__rtdev_get_by_name(rtdev->name) != NULL) {
+		err = -EEXIST;
+		goto fail;
+	}
+
+	rtdev->sysdev =
+		device_create(rtnet_class, NULL, MKDEV(0, rtdev->ifindex),
+			      rtdev, rtdev->name);
+	if (IS_ERR(rtdev->sysdev)) {
+		err = PTR_ERR(rtdev->sysdev);
+		goto fail;
+	}
+
+	if (rtdev->sysbind) {
+		err = sysfs_create_link(&rtdev->sysdev->kobj,
+					&rtdev->sysbind->kobj, "adapter");
+		if (err)
+			goto fail_link;
+	}
+
+	err = rtdev_map_all_rtskbs(rtdev);
+	if (err)
+		goto fail_map;
+
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	if (rtdev->flags & IFF_LOOPBACK) {
+		/* allow only one loopback device */
+		if (loopback_device) {
+			rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock,
+						 context);
+			err = -EEXIST;
+			goto fail_loopback;
+		}
+		loopback_device = rtdev;
+	}
+	rtnet_devices[rtdev->ifindex - 1] = rtdev;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	list_for_each (entry, &event_hook_list) {
+		hook = list_entry(entry, struct rtdev_event_hook, entry);
+		if (hook->register_device)
+			hook->register_device(rtdev);
+	}
+
+	mutex_unlock(&rtnet_devices_nrt_lock);
+
+	/* Default state at registration is that the device is present. */
+	set_bit(__RTNET_LINK_STATE_PRESENT, &rtdev->link_state);
+
+	printk("RTnet: registered %s\n", rtdev->name);
+
+	return 0;
+
+fail_loopback:
+	rtdev_unmap_all_rtskbs(rtdev);
+fail_map:
+	if (rtdev->sysbind)
+		sysfs_remove_link(&rtdev->sysdev->kobj, "adapter");
+fail_link:
+	device_destroy(rtnet_class, MKDEV(0, rtdev->ifindex));
+fail:
+	mutex_unlock(&rtnet_devices_nrt_lock);
+
+	return err;
+}
+
+/***
+ * rt_unregister_rtnetdev: unregister a rtnet_device
+ * @rtdev:                 the device
+ */
+int rt_unregister_rtnetdev(struct rtnet_device *rtdev)
+{
+	struct list_head *entry;
+	struct rtdev_event_hook *hook;
+	rtdm_lockctx_t context;
+
+	RTNET_ASSERT(rtdev->ifindex != 0,
+		     printk("RTnet: device %s/%p was not registered\n",
+			    rtdev->name, rtdev);
+		     return -ENODEV;);
+
+	if (rtdev->sysbind)
+		sysfs_remove_link(&rtdev->sysdev->kobj, "adapter");
+
+	device_destroy(rtnet_class, MKDEV(0, rtdev->ifindex));
+
+	mutex_lock(&rtnet_devices_nrt_lock);
+	rtdm_lock_get_irqsave(&rtnet_devices_rt_lock, context);
+
+	RTNET_ASSERT(atomic_read(&rtdev->refcount == 0), BUG());
+	rtnet_devices[rtdev->ifindex - 1] = NULL;
+	if (rtdev->flags & IFF_LOOPBACK)
+		loopback_device = NULL;
+
+	rtdm_lock_put_irqrestore(&rtnet_devices_rt_lock, context);
+
+	list_for_each (entry, &event_hook_list) {
+		hook = list_entry(entry, struct rtdev_event_hook, entry);
+		if (hook->unregister_device)
+			hook->unregister_device(rtdev);
+	}
+
+	rtdev_unmap_all_rtskbs(rtdev);
+
+	mutex_unlock(&rtnet_devices_nrt_lock);
+
+	clear_bit(__RTNET_LINK_STATE_PRESENT, &rtdev->link_state);
+
+	RTNET_ASSERT(atomic_read(&rtdev->refcount) == 0,
+		     printk("RTnet: rtdev reference counter < 0!\n"););
+
+	printk("RTnet: unregistered %s\n", rtdev->name);
+
+	return 0;
+}
+
+void rtdev_add_event_hook(struct rtdev_event_hook *hook)
+{
+	mutex_lock(&rtnet_devices_nrt_lock);
+	list_add(&hook->entry, &event_hook_list);
+	mutex_unlock(&rtnet_devices_nrt_lock);
+}
+
+void rtdev_del_event_hook(struct rtdev_event_hook *hook)
+{
+	mutex_lock(&rtnet_devices_nrt_lock);
+	list_del(&hook->entry);
+	mutex_unlock(&rtnet_devices_nrt_lock);
+}
+
+int rtdev_up(struct rtnet_device *rtdev, struct rtnet_core_cmd *cmd)
+{
+	struct list_head *entry;
+	struct rtdev_event_hook *hook;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&rtdev->nrt_lock))
+		return -ERESTARTSYS;
+
+	/* We cannot change the promisc flag or the hardware address if
+	   the device is already up. */
+	if ((rtdev->flags & IFF_UP) &&
+	    (((cmd->args.up.set_dev_flags | cmd->args.up.clear_dev_flags) &
+	      IFF_PROMISC) ||
+	     (cmd->args.up.dev_addr_type != ARPHRD_VOID))) {
+		ret = -EBUSY;
+		goto out;
+	}
+
+	if (cmd->args.up.dev_addr_type != ARPHRD_VOID &&
+	    cmd->args.up.dev_addr_type != rtdev->type) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	/* Skip upon extraneous call only after args have been checked. */
+	if (test_and_set_bit(PRIV_FLAG_UP, &rtdev->priv_flags))
+		goto out;
+
+	rtdev->flags |= cmd->args.up.set_dev_flags;
+	rtdev->flags &= ~cmd->args.up.clear_dev_flags;
+
+	if (cmd->args.up.dev_addr_type != ARPHRD_VOID)
+		memcpy(rtdev->dev_addr, cmd->args.up.dev_addr, MAX_ADDR_LEN);
+
+	ret = rtdev_open(rtdev); /* also == 0 if rtdev is already up */
+
+	if (ret == 0) {
+		mutex_lock(&rtnet_devices_nrt_lock);
+
+		list_for_each (entry, &event_hook_list) {
+			hook = list_entry(entry, struct rtdev_event_hook,
+					  entry);
+			if (hook->ifup)
+				hook->ifup(rtdev, cmd);
+		}
+
+		mutex_unlock(&rtnet_devices_nrt_lock);
+	} else
+		clear_bit(PRIV_FLAG_UP, &rtdev->priv_flags);
+out:
+	mutex_unlock(&rtdev->nrt_lock);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdev_up);
+
+int rtdev_down(struct rtnet_device *rtdev)
+{
+	struct list_head *entry;
+	struct rtdev_event_hook *hook;
+	rtdm_lockctx_t context;
+	int ret = 0;
+
+	if (mutex_lock_interruptible(&rtdev->nrt_lock))
+		return -ERESTARTSYS;
+
+	/* spin lock required for sync with routing code */
+	rtdm_lock_get_irqsave(&rtdev->rtdev_lock, context);
+
+	if (test_bit(PRIV_FLAG_ADDING_ROUTE, &rtdev->priv_flags)) {
+		ret = -EBUSY;
+		goto fail;
+	}
+
+	if (!test_and_clear_bit(PRIV_FLAG_UP, &rtdev->priv_flags))
+		goto fail;
+
+	rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context);
+
+	if (rtdev->mac_detach != NULL)
+		ret = rtdev->mac_detach(rtdev);
+
+	if (ret == 0) {
+		mutex_lock(&rtnet_devices_nrt_lock);
+
+		list_for_each (entry, &event_hook_list) {
+			hook = list_entry(entry, struct rtdev_event_hook,
+					  entry);
+			if (hook->ifdown)
+				hook->ifdown(rtdev);
+		}
+
+		mutex_unlock(&rtnet_devices_nrt_lock);
+
+		ret = rtdev_close(rtdev);
+	}
+out:
+	mutex_unlock(&rtdev->nrt_lock);
+
+	return ret;
+fail:
+	rtdm_lock_put_irqrestore(&rtdev->rtdev_lock, context);
+	goto out;
+}
+EXPORT_SYMBOL_GPL(rtdev_down);
+
+/***
+ *  rtdev_open
+ *
+ *  Prepare an interface for use.
+ */
+int rtdev_open(struct rtnet_device *rtdev)
+{
+	int ret = 0;
+
+	if (rtdev->flags & IFF_UP) /* Is it already up?                */
+		return 0;
+
+	if (!rtdev_reference(rtdev))
+		return -EIDRM;
+
+	if (rtdev->open) /* Call device private open method  */
+		ret = rtdev->open(rtdev);
+
+	if (!ret) {
+		rtdev->flags |= IFF_UP;
+		set_bit(__RTNET_LINK_STATE_START, &rtdev->link_state);
+	} else
+		rtdev_dereference(rtdev);
+
+	return ret;
+}
+
+/***
+ *  rtdev_close
+ */
+int rtdev_close(struct rtnet_device *rtdev)
+{
+	int ret = 0;
+
+	if (!(rtdev->flags & IFF_UP))
+		return 0;
+
+	if (rtdev->stop)
+		ret = rtdev->stop(rtdev);
+
+	rtdev->flags &= ~(IFF_UP | IFF_RUNNING);
+	clear_bit(__RTNET_LINK_STATE_START, &rtdev->link_state);
+
+	if (ret == 0)
+		rtdev_dereference(rtdev);
+
+	return ret;
+}
+
+static int rtdev_locked_xmit(struct rtskb *skb, struct rtnet_device *rtdev)
+{
+	int ret;
+
+	rtdm_mutex_lock(&rtdev->xmit_mutex);
+	ret = rtdev->hard_start_xmit(skb, rtdev);
+	rtdm_mutex_unlock(&rtdev->xmit_mutex);
+
+	return ret;
+}
+
+/***
+ *  rtdev_xmit - send real-time packet
+ */
+int rtdev_xmit(struct rtskb *rtskb)
+{
+	struct rtnet_device *rtdev;
+	int err;
+
+	RTNET_ASSERT(rtskb != NULL, return -EINVAL;);
+
+	rtdev = rtskb->rtdev;
+
+	if (!rtnetif_carrier_ok(rtdev)) {
+		err = -EAGAIN;
+		kfree_rtskb(rtskb);
+		return err;
+	}
+
+	if (rtskb_acquire(rtskb, &rtdev->dev_pool) != 0) {
+		err = -ENOBUFS;
+		kfree_rtskb(rtskb);
+		return err;
+	}
+
+	RTNET_ASSERT(rtdev != NULL, return -EINVAL;);
+
+	err = rtdev->start_xmit(rtskb, rtdev);
+	if (err) {
+		/* on error we must free the rtskb here */
+		kfree_rtskb(rtskb);
+
+		rtdm_printk("hard_start_xmit returned %d\n", err);
+	}
+
+	return err;
+}
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+/***
+ *      rtdev_xmit_proxy - send rtproxy packet
+ */
+int rtdev_xmit_proxy(struct rtskb *rtskb)
+{
+	struct rtnet_device *rtdev;
+	int err;
+
+	RTNET_ASSERT(rtskb != NULL, return -EINVAL;);
+
+	rtdev = rtskb->rtdev;
+
+	RTNET_ASSERT(rtdev != NULL, return -EINVAL;);
+
+	/* TODO: make these lines race-condition-safe */
+	if (rtdev->mac_disc) {
+		RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL,
+			     return -EINVAL;);
+
+		err = rtdev->mac_disc->nrt_packet_tx(rtskb);
+	} else {
+		err = rtdev->start_xmit(rtskb, rtdev);
+		if (err) {
+			/* on error we must free the rtskb here */
+			kfree_rtskb(rtskb);
+
+			rtdm_printk("hard_start_xmit returned %d\n", err);
+		}
+	}
+
+	return err;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_PROXY */
+
+unsigned int rt_hard_mtu(struct rtnet_device *rtdev, unsigned int priority)
+{
+	return rtdev->mtu;
+}
+
+EXPORT_SYMBOL_GPL(rtdev_alloc_name);
+
+EXPORT_SYMBOL_GPL(rt_register_rtnetdev);
+EXPORT_SYMBOL_GPL(rt_unregister_rtnetdev);
+
+EXPORT_SYMBOL_GPL(rtdev_add_event_hook);
+EXPORT_SYMBOL_GPL(rtdev_del_event_hook);
+
+EXPORT_SYMBOL_GPL(rtdev_get_by_name);
+EXPORT_SYMBOL_GPL(rtdev_get_by_index);
+EXPORT_SYMBOL_GPL(rtdev_get_by_hwaddr);
+EXPORT_SYMBOL_GPL(rtdev_get_loopback);
+
+EXPORT_SYMBOL_GPL(rtdev_xmit);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+EXPORT_SYMBOL_GPL(rtdev_xmit_proxy);
+#endif
+
+EXPORT_SYMBOL_GPL(rt_hard_mtu);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev_mgr.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev_mgr.c
new file mode 100644
index 0000000..c6c78c2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtdev_mgr.c
@@ -0,0 +1,127 @@
+/***
+ *
+ *  stack/rtdev_mgr.c - device error manager
+ *
+ *  Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/netdevice.h>
+
+#include <rtdev.h>
+#include <rtdm/net.h>
+#include <rtnet_internal.h>
+
+/***
+ *  rtnetif_err_rx: will be called from the  driver
+ *
+ *
+ *  @rtdev - the network-device
+ */
+void rtnetif_err_rx(struct rtnet_device *rtdev)
+{
+}
+
+/***
+ *  rtnetif_err_tx: will be called from the  driver
+ *
+ *
+ *  @rtdev - the network-device
+ */
+void rtnetif_err_tx(struct rtnet_device *rtdev)
+{
+}
+
+/***
+ *  do_rtdev_task
+ */
+/*static void do_rtdev_task(int mgr_id)
+{
+    struct rtnet_msg msg;
+    struct rtnet_mgr *mgr = (struct rtnet_mgr *)mgr_id;
+
+    while (1) {
+        rt_mbx_receive(&(mgr->mbx), &msg, sizeof(struct rtnet_msg));
+        if (msg.rtdev) {
+            rt_printk("RTnet: error on rtdev %s\n", msg.rtdev->name);
+        }
+    }
+}*/
+
+/***
+ *  rt_rtdev_connect
+ */
+void rt_rtdev_connect(struct rtnet_device *rtdev, struct rtnet_mgr *mgr)
+{
+	/*    rtdev->rtdev_mbx=&(mgr->mbx);*/
+}
+
+/***
+ *  rt_rtdev_disconnect
+ */
+void rt_rtdev_disconnect(struct rtnet_device *rtdev)
+{
+	/*    rtdev->rtdev_mbx=NULL;*/
+}
+
+/***
+ *  rt_rtdev_mgr_start
+ */
+int rt_rtdev_mgr_start(struct rtnet_mgr *mgr)
+{
+	return /*(rt_task_resume(&(mgr->task)))*/ 0;
+}
+
+/***
+ *  rt_rtdev_mgr_stop
+ */
+int rt_rtdev_mgr_stop(struct rtnet_mgr *mgr)
+{
+	return /*(rt_task_suspend(&(mgr->task)))*/ 0;
+}
+
+/***
+ *  rt_rtdev_mgr_init
+ */
+int rt_rtdev_mgr_init(struct rtnet_mgr *mgr)
+{
+	int ret = 0;
+
+	/*    if ( (ret=rt_mbx_init (&(mgr->mbx), sizeof(struct rtnet_msg))) )
+        return ret;
+    if ( (ret=rt_task_init(&(mgr->task), &do_rtdev_task, (int)mgr, 4096, RTNET_RTDEV_PRIORITY, 0, 0)) )
+        return ret;
+    if ( (ret=rt_task_resume(&(mgr->task))) )
+        return ret;*/
+
+	return (ret);
+}
+
+/***
+ *  rt_rtdev_mgr_delete
+ */
+void rt_rtdev_mgr_delete(struct rtnet_mgr *mgr)
+{
+	/*    rt_task_delete(&(mgr->task));
+    rt_mbx_delete(&(mgr->mbx));*/
+}
+
+EXPORT_SYMBOL_GPL(rtnetif_err_rx);
+EXPORT_SYMBOL_GPL(rtnetif_err_tx);
+
+EXPORT_SYMBOL_GPL(rt_rtdev_connect);
+EXPORT_SYMBOL_GPL(rt_rtdev_disconnect);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Kconfig
new file mode 100644
index 0000000..a97b316
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Kconfig
@@ -0,0 +1,16 @@
+menuconfig XENO_DRIVERS_NET_RTMAC
+    depends on XENO_DRIVERS_NET
+    tristate "RTmac Layer"
+    default y
+    help
+    The Real-Time Media Access Control layer allows to extend the RTnet
+    stack with software-based access control mechanisms (also called
+    disciplines) for nondeterministic transport media. Disciplines can be
+    attached and detached per real-time device. RTmac also provides a
+    framework for tunnelling non-time-critical packets through real-time
+    networks by installing virtual NICs (VNIC) in the Linux domain.
+
+    See Documentation/README.rtmac for further information.
+
+source "drivers/xenomai/net/stack/rtmac/tdma/Kconfig"
+source "drivers/xenomai/net/stack/rtmac/nomac/Kconfig"
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Makefile
new file mode 100644
index 0000000..79d532f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/Makefile
@@ -0,0 +1,15 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_NOMAC) += nomac/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_TDMA) += tdma/
+
+obj-$(CONFIG_XENO_DRIVERS_NET_RTMAC) += rtmac.o
+
+rtmac-y := \
+	rtmac_disc.o \
+	rtmac_module.o \
+	rtmac_proc.o \
+	rtmac_proto.o \
+	rtmac_syms.o \
+	rtmac_vnic.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Kconfig
new file mode 100644
index 0000000..e706d4a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Kconfig
@@ -0,0 +1,9 @@
+config XENO_DRIVERS_NET_NOMAC
+    tristate "NoMAC discipline for RTmac"
+    depends on XENO_DRIVERS_NET_RTMAC
+    default n
+    help
+    This no-operation RTmac discipline is intended to act as a template
+    for new implementations. However, it can be compiled and used (see
+    nomaccfg management tool), but don't expect any improved determinism
+    of your network. ;)
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Makefile
new file mode 100644
index 0000000..50c29cc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/Makefile
@@ -0,0 +1,9 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_NOMAC) += nomac.o
+
+nomac-y := \
+	nomac_dev.o \
+	nomac_ioctl.o \
+	nomac_module.o \
+	nomac_proto.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_dev.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_dev.c
new file mode 100644
index 0000000..572d893
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_dev.c
@@ -0,0 +1,84 @@
+/***
+ *
+ *  rtmac/nomac/nomac_dev.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/list.h>
+
+#include <rtdev.h>
+#include <rtmac.h>
+#include <rtmac/nomac/nomac.h>
+
+static int nomac_dev_openclose(void)
+{
+	return 0;
+}
+
+static int nomac_dev_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct nomac_priv *nomac;
+
+	nomac = container_of(rtdm_fd_to_context(fd)->device, struct nomac_priv,
+			     api_device);
+
+	switch (request) {
+	case RTMAC_RTIOC_TIMEOFFSET:
+
+	case RTMAC_RTIOC_WAITONCYCLE:
+
+	default:
+		return -ENOTTY;
+	}
+}
+
+static struct rtdm_driver
+	nomac_driver = { .profile_info = RTDM_PROFILE_INFO(
+				 nomac, RTDM_CLASS_RTMAC,
+				 RTDM_SUBCLASS_UNMANAGED, RTNET_RTDM_VER),
+			 .device_flags = RTDM_NAMED_DEVICE,
+			 .device_count = 1,
+			 .context_size = 0,
+			 .ops = {
+				 .open = (typeof(nomac_driver.ops.open))
+					 (void (*)(void))nomac_dev_openclose,
+				 .ioctl_rt = nomac_dev_ioctl,
+				 .ioctl_nrt = nomac_dev_ioctl,
+				 .close = (typeof(nomac_driver.ops.close))
+					 (void (*)(void))nomac_dev_openclose,
+			 } };
+
+int nomac_dev_init(struct rtnet_device *rtdev, struct nomac_priv *nomac)
+{
+	char *pos;
+
+	strcpy(nomac->device_name, "NOMAC");
+	for (pos = rtdev->name + strlen(rtdev->name) - 1;
+	     (pos >= rtdev->name) && ((*pos) >= '0') && (*pos <= '9'); pos--)
+		;
+	strncat(nomac->device_name + 5, pos + 1, IFNAMSIZ - 5);
+
+	nomac->api_driver = nomac_driver;
+	nomac->api_device.driver = &nomac->api_driver;
+	nomac->api_device.label = nomac->device_name;
+
+	return rtdm_dev_register(&nomac->api_device);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_ioctl.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_ioctl.c
new file mode 100644
index 0000000..95c7bf7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_ioctl.c
@@ -0,0 +1,99 @@
+/***
+ *
+ *  rtmac/nomac/nomac_ioctl.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/uaccess.h>
+
+#include <nomac_chrdev.h>
+#include <rtmac/nomac/nomac.h>
+
+static int nomac_ioctl_attach(struct rtnet_device *rtdev)
+{
+	struct nomac_priv *nomac;
+	int ret;
+
+	if (rtdev->mac_priv == NULL) {
+		ret = rtmac_disc_attach(rtdev, &nomac_disc);
+		if (ret < 0)
+			return ret;
+	}
+
+	nomac = (struct nomac_priv *)rtdev->mac_priv->disc_priv;
+	if (nomac->magic != NOMAC_MAGIC)
+		return -ENOTTY;
+
+	/* ... */
+
+	return 0;
+}
+
+static int nomac_ioctl_detach(struct rtnet_device *rtdev)
+{
+	struct nomac_priv *nomac;
+	int ret;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	nomac = (struct nomac_priv *)rtdev->mac_priv->disc_priv;
+	if (nomac->magic != NOMAC_MAGIC)
+		return -ENOTTY;
+
+	ret = rtmac_disc_detach(rtdev);
+
+	/* ... */
+
+	return ret;
+}
+
+int nomac_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		unsigned long arg)
+{
+	struct nomac_config cfg;
+	int ret;
+
+	ret = copy_from_user(&cfg, (void *)arg, sizeof(cfg));
+	if (ret != 0)
+		return -EFAULT;
+
+	if (mutex_lock_interruptible(&rtdev->nrt_lock))
+		return -ERESTARTSYS;
+
+	switch (request) {
+	case NOMAC_IOC_ATTACH:
+		ret = nomac_ioctl_attach(rtdev);
+		break;
+
+	case NOMAC_IOC_DETACH:
+		ret = nomac_ioctl_detach(rtdev);
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	mutex_unlock(&rtdev->nrt_lock);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_module.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_module.c
new file mode 100644
index 0000000..c26a6c4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_module.c
@@ -0,0 +1,161 @@
+/***
+ *
+ *  rtmac/nomac/nomac_module.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/module.h>
+
+#include <rtdm/driver.h>
+#include <rtmac/rtmac_vnic.h>
+#include <rtmac/nomac/nomac.h>
+#include <rtmac/nomac/nomac_dev.h>
+#include <rtmac/nomac/nomac_ioctl.h>
+#include <rtmac/nomac/nomac_proto.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+LIST_HEAD(nomac_devices);
+DEFINE_MUTEX(nomac_nrt_lock);
+
+int nomac_proc_read(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct nomac_priv *entry;
+
+	mutex_lock(&nomac_nrt_lock);
+
+	xnvfile_printf(it, "Interface       API Device      State\n");
+
+	list_for_each_entry (entry, &nomac_devices, list_entry)
+		xnvfile_printf(it, "%-15s %-15s Attached\n", entry->rtdev->name,
+			       entry->api_device.name);
+
+	mutex_unlock(&nomac_nrt_lock);
+
+	return 0;
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+int nomac_attach(struct rtnet_device *rtdev, void *priv)
+{
+	struct nomac_priv *nomac = (struct nomac_priv *)priv;
+	int ret;
+
+	nomac->magic = NOMAC_MAGIC;
+	nomac->rtdev = rtdev;
+
+	/* ... */
+
+	ret = nomac_dev_init(rtdev, nomac);
+	if (ret < 0)
+		return ret;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	mutex_lock(&nomac_nrt_lock);
+	list_add(&nomac->list_entry, &nomac_devices);
+	mutex_unlock(&nomac_nrt_lock);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	return 0;
+}
+
+int nomac_detach(struct rtnet_device *rtdev, void *priv)
+{
+	struct nomac_priv *nomac = (struct nomac_priv *)priv;
+
+	nomac_dev_release(nomac);
+
+	/* ... */
+#ifdef CONFIG_XENO_OPT_VFILE
+	mutex_lock(&nomac_nrt_lock);
+	list_del(&nomac->list_entry);
+	mutex_unlock(&nomac_nrt_lock);
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct rtmac_proc_entry nomac_proc_entries[] = {
+	{ name: "nomac", handler: nomac_proc_read },
+};
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct rtmac_disc nomac_disc = {
+	name: "NoMAC",
+	priv_size: sizeof(struct nomac_priv),
+	disc_type: __constant_htons(RTMAC_TYPE_NOMAC),
+
+	packet_rx: nomac_packet_rx,
+	rt_packet_tx: nomac_rt_packet_tx,
+	nrt_packet_tx: nomac_nrt_packet_tx,
+
+	get_mtu: NULL,
+
+	vnic_xmit: RTMAC_DEFAULT_VNIC,
+
+	attach: nomac_attach,
+	detach: nomac_detach,
+
+	ioctls: {
+		service_name: "RTmac/NoMAC",
+		ioctl_type: RTNET_IOC_TYPE_RTMAC_NOMAC,
+		handler: nomac_ioctl
+	},
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	proc_entries: nomac_proc_entries,
+	nr_proc_entries: ARRAY_SIZE(nomac_proc_entries),
+#endif /* CONFIG_XENO_OPT_VFILE */
+};
+
+int __init nomac_init(void)
+{
+	int ret;
+
+	printk("RTmac/NoMAC: init void media access control mechanism\n");
+
+	ret = nomac_proto_init();
+	if (ret < 0)
+		return ret;
+
+	ret = rtmac_disc_register(&nomac_disc);
+	if (ret < 0) {
+		nomac_proto_cleanup();
+		return ret;
+	}
+
+	return 0;
+}
+
+void nomac_release(void)
+{
+	rtmac_disc_deregister(&nomac_disc);
+	nomac_proto_cleanup();
+
+	printk("RTmac/NoMAC: unloaded\n");
+}
+
+module_init(nomac_init);
+module_exit(nomac_release);
+
+MODULE_AUTHOR("Jan Kiszka");
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_proto.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_proto.c
new file mode 100644
index 0000000..66cd9fc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/nomac/nomac_proto.c
@@ -0,0 +1,127 @@
+/***
+ *
+ *  rtmac/nomac/nomac_proto.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+
+#include <rtdev.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/nomac/nomac.h>
+
+static struct rtskb_queue nrt_rtskb_queue;
+static rtdm_task_t wrapper_task;
+static rtdm_event_t wakeup_sem;
+
+int nomac_rt_packet_tx(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	/* unused here, just to demonstrate access to the discipline state
+    struct nomac_priv   *nomac =
+        (struct nomac_priv *)rtdev->mac_priv->disc_priv; */
+	int ret;
+
+	rtcap_mark_rtmac_enqueue(rtskb);
+
+	/* no MAC: we simply transmit the packet under xmit_lock */
+	rtdm_mutex_lock(&rtdev->xmit_mutex);
+	ret = rtmac_xmit(rtskb);
+	rtdm_mutex_unlock(&rtdev->xmit_mutex);
+
+	return ret;
+}
+
+int nomac_nrt_packet_tx(struct rtskb *rtskb)
+{
+	struct rtnet_device *rtdev = rtskb->rtdev;
+	/* unused here, just to demonstrate access to the discipline state
+    struct nomac_priv   *nomac =
+        (struct nomac_priv *)rtdev->mac_priv->disc_priv; */
+	int ret;
+
+	rtcap_mark_rtmac_enqueue(rtskb);
+
+	/* note: this routine may be called both in rt and non-rt context
+     *       => detect and wrap the context if necessary */
+	if (!rtdm_in_rt_context()) {
+		rtskb_queue_tail(&nrt_rtskb_queue, rtskb);
+		rtdm_event_signal(&wakeup_sem);
+		return 0;
+	} else {
+		/* no MAC: we simply transmit the packet under xmit_lock */
+		rtdm_mutex_lock(&rtdev->xmit_mutex);
+		ret = rtmac_xmit(rtskb);
+		rtdm_mutex_unlock(&rtdev->xmit_mutex);
+
+		return ret;
+	}
+}
+
+void nrt_xmit_task(void *arg)
+{
+	struct rtskb *rtskb;
+	struct rtnet_device *rtdev;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(&wakeup_sem) < 0)
+			break;
+
+		while ((rtskb = rtskb_dequeue(&nrt_rtskb_queue))) {
+			rtdev = rtskb->rtdev;
+
+			/* no MAC: we simply transmit the packet under xmit_lock */
+			rtdm_mutex_lock(&rtdev->xmit_mutex);
+			rtmac_xmit(rtskb);
+			rtdm_mutex_unlock(&rtdev->xmit_mutex);
+		}
+	}
+}
+
+int nomac_packet_rx(struct rtskb *rtskb)
+{
+	/* actually, NoMAC doesn't expect any control packet */
+	kfree_rtskb(rtskb);
+
+	return 0;
+}
+
+int __init nomac_proto_init(void)
+{
+	int ret;
+
+	rtskb_queue_init(&nrt_rtskb_queue);
+	rtdm_event_init(&wakeup_sem, 0);
+
+	ret = rtdm_task_init(&wrapper_task, "rtnet-nomac", nrt_xmit_task, 0,
+			     RTDM_TASK_LOWEST_PRIORITY, 0);
+	if (ret < 0) {
+		rtdm_event_destroy(&wakeup_sem);
+		return ret;
+	}
+
+	return 0;
+}
+
+void nomac_proto_cleanup(void)
+{
+	rtdm_event_destroy(&wakeup_sem);
+	rtdm_task_destroy(&wrapper_task);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_disc.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_disc.c
new file mode 100644
index 0000000..5806db1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_disc.c
@@ -0,0 +1,271 @@
+/***
+ *
+ *  rtmac_disc.c
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+#include <linux/netdevice.h>
+#include <linux/mutex.h>
+
+#include <rtnet_internal.h>
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_proc.h>
+#include <rtmac/rtmac_vnic.h>
+
+static DEFINE_MUTEX(disc_list_lock);
+static LIST_HEAD(disc_list);
+
+/***
+ *  rtmac_disc_attach
+ *
+ *  @rtdev       attaches a discipline to a device
+ *  @disc        discipline to attach
+ *
+ *  0            success
+ *  -EBUSY       other discipline active
+ *  -ENOMEM      could not allocate memory
+ *
+ *  Note: must be called with rtdev->nrt_lock acquired
+ */
+int rtmac_disc_attach(struct rtnet_device *rtdev, struct rtmac_disc *disc)
+{
+	int ret;
+	struct rtmac_priv *priv;
+
+	RTNET_ASSERT(rtdev != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->attach != NULL, return -EINVAL;);
+
+	if (rtdev->mac_disc) {
+		printk("RTmac: another discipline for rtdev '%s' active.\n",
+		       rtdev->name);
+		return -EBUSY;
+	}
+
+	if (rtdev->flags & IFF_LOOPBACK)
+		return -EINVAL;
+
+	if (!try_module_get(disc->owner))
+		return -EIDRM;
+
+	if (!rtdev_reference(rtdev)) {
+		ret = -EIDRM;
+		goto err_module_put;
+	}
+
+	/* alloc memory */
+	priv = kmalloc(sizeof(struct rtmac_priv) + disc->priv_size, GFP_KERNEL);
+	if (!priv) {
+		printk("RTmac: kmalloc returned NULL for rtmac!\n");
+		return -ENOMEM;
+	}
+	priv->orig_start_xmit = rtdev->start_xmit;
+
+	/* call attach function of discipline */
+	ret = disc->attach(rtdev, priv->disc_priv);
+	if (ret < 0)
+		goto err_kfree_priv;
+
+	/* now attach RTmac to device */
+	rtdev->mac_disc = disc;
+	rtdev->mac_priv = priv;
+	rtdev->start_xmit = disc->rt_packet_tx;
+	if (disc->get_mtu)
+		rtdev->get_mtu = disc->get_mtu;
+	rtdev->mac_detach = rtmac_disc_detach;
+
+	/* create the VNIC */
+	ret = rtmac_vnic_add(rtdev, disc->vnic_xmit);
+	if (ret < 0) {
+		printk("RTmac: Warning, VNIC creation failed for rtdev %s.\n",
+		       rtdev->name);
+		goto err_disc_detach;
+	}
+
+	return 0;
+
+err_disc_detach:
+	disc->detach(rtdev, priv->disc_priv);
+err_kfree_priv:
+	kfree(priv);
+	rtdev_dereference(rtdev);
+err_module_put:
+	module_put(disc->owner);
+	return ret;
+}
+
+/***
+ *  rtmac_disc_detach
+ *
+ *  @rtdev       detaches a discipline from a device
+ *
+ *  0            success
+ *  -1           discipline has no detach function
+ *  -EINVAL      called with rtdev=NULL
+ *  -ENODEV      no discipline active on dev
+ *
+ *  Note: must be called with rtdev->nrt_lock acquired
+ */
+int rtmac_disc_detach(struct rtnet_device *rtdev)
+{
+	int ret;
+	struct rtmac_disc *disc;
+	struct rtmac_priv *priv;
+
+	RTNET_ASSERT(rtdev != NULL, return -EINVAL;);
+
+	disc = rtdev->mac_disc;
+	if (!disc)
+		return -ENODEV;
+
+	RTNET_ASSERT(disc->detach != NULL, return -EINVAL;);
+
+	priv = rtdev->mac_priv;
+	RTNET_ASSERT(priv != NULL, return -EINVAL;);
+
+	ret = rtmac_vnic_unregister(rtdev);
+	if (ret < 0)
+		return ret;
+
+	/* call release function of discipline */
+	ret = disc->detach(rtdev, priv->disc_priv);
+	if (ret < 0)
+		return ret;
+
+	rtmac_vnic_cleanup(rtdev);
+
+	/* restore start_xmit and get_mtu */
+	rtdev->start_xmit = priv->orig_start_xmit;
+	rtdev->get_mtu = rt_hard_mtu;
+
+	/* remove pointers from rtdev */
+	rtdev->mac_disc = NULL;
+	rtdev->mac_priv = NULL;
+	rtdev->mac_detach = NULL;
+
+	rtdev_dereference(rtdev);
+
+	kfree(priv);
+
+	module_put(disc->owner);
+
+	return 0;
+}
+
+static struct rtmac_disc *rtmac_get_disc_by_name(const char *name)
+{
+	struct list_head *disc;
+
+	mutex_lock(&disc_list_lock);
+
+	list_for_each (disc, &disc_list) {
+		if (strcmp(((struct rtmac_disc *)disc)->name, name) == 0) {
+			mutex_unlock(&disc_list_lock);
+			return (struct rtmac_disc *)disc;
+		}
+	}
+
+	mutex_unlock(&disc_list_lock);
+
+	return NULL;
+}
+
+int __rtmac_disc_register(struct rtmac_disc *disc, struct module *module)
+{
+	int ret;
+
+	RTNET_ASSERT(disc != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->name != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->rt_packet_tx != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->nrt_packet_tx != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->attach != NULL, return -EINVAL;);
+	RTNET_ASSERT(disc->detach != NULL, return -EINVAL;);
+
+	disc->owner = module;
+
+	if (rtmac_get_disc_by_name(disc->name) != NULL) {
+		printk("RTmac: discipline '%s' already registered!\n",
+		       disc->name);
+		return -EBUSY;
+	}
+
+	ret = rtnet_register_ioctls(&disc->ioctls);
+	if (ret < 0)
+		return ret;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	ret = rtmac_disc_proc_register(disc);
+	if (ret < 0) {
+		rtnet_unregister_ioctls(&disc->ioctls);
+		return ret;
+	}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+	mutex_lock(&disc_list_lock);
+
+	list_add(&disc->list, &disc_list);
+
+	mutex_unlock(&disc_list_lock);
+
+	return 0;
+}
+
+void rtmac_disc_deregister(struct rtmac_disc *disc)
+{
+	RTNET_ASSERT(disc != NULL, return;);
+
+	mutex_lock(&disc_list_lock);
+
+	list_del(&disc->list);
+
+	mutex_unlock(&disc_list_lock);
+
+	rtnet_unregister_ioctls(&disc->ioctls);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtmac_disc_proc_unregister(disc);
+#endif /* CONFIG_XENO_OPT_VFILE */
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int rtnet_rtmac_disciplines_show(struct xnvfile_regular_iterator *it, void *d)
+{
+	struct rtmac_disc *disc;
+	int err;
+
+	err = mutex_lock_interruptible(&disc_list_lock);
+	if (err < 0)
+		return err;
+
+	xnvfile_printf(it, "Name\t\tID\n");
+
+	list_for_each_entry (disc, &disc_list, list)
+		xnvfile_printf(it, "%-15s %04X\n", disc->name,
+			       ntohs(disc->disc_type));
+
+	mutex_unlock(&disc_list_lock);
+
+	return 0;
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_module.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_module.c
new file mode 100644
index 0000000..e4013e3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_module.c
@@ -0,0 +1,80 @@
+/* rtmac_module.c
+ *
+ * rtmac - real-time networking media access control subsystem
+ * Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *               2003 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+
+#include <rtdm/driver.h>
+
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_proc.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/rtmac_vnic.h>
+
+int __init rtmac_init(void)
+{
+	int ret = 0;
+
+	printk("RTmac: init realtime media access control\n");
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	ret = rtmac_proc_register();
+	if (ret < 0)
+		return ret;
+#endif
+
+	ret = rtmac_vnic_module_init();
+	if (ret < 0)
+		goto error1;
+
+	ret = rtmac_proto_init();
+	if (ret < 0)
+		goto error2;
+
+	return 0;
+
+error2:
+	rtmac_vnic_module_cleanup();
+
+error1:
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtmac_proc_release();
+#endif
+	return ret;
+}
+
+void rtmac_release(void)
+{
+	rtmac_proto_release();
+	rtmac_vnic_module_cleanup();
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtmac_proc_release();
+#endif
+
+	printk("RTmac: unloaded\n");
+}
+
+module_init(rtmac_init);
+module_exit(rtmac_release);
+
+MODULE_AUTHOR("Marc Kleine-Budde, Jan Kiszka");
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proc.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proc.c
new file mode 100644
index 0000000..3f0c3aa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proc.c
@@ -0,0 +1,132 @@
+/***
+ *
+ *  rtmac_proc.c
+ *
+ *  rtmac - real-time networking medium access control subsystem
+ *  Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>
+ *                2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <rtnet_internal.h>
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_vnic.h>
+#include <rtmac/rtmac_proc.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct xnvfile_directory rtmac_proc_root;
+
+static struct xnvfile_regular_ops rtnet_rtmac_disciplines_vfile_ops = {
+	.show = rtnet_rtmac_disciplines_show,
+};
+
+static struct xnvfile_regular rtnet_rtmac_disciplines_vfile = {
+	.ops = &rtnet_rtmac_disciplines_vfile_ops,
+};
+
+static struct xnvfile_regular_ops rtnet_rtmac_vnics_vfile_ops = {
+	.show = rtnet_rtmac_vnics_show,
+};
+
+static struct xnvfile_regular rtnet_rtmac_vnics_vfile = {
+	.ops = &rtnet_rtmac_vnics_vfile_ops,
+};
+
+static int rtnet_rtmac_disc_show(struct xnvfile_regular_iterator *it,
+				 void *data)
+{
+	struct rtmac_proc_entry *entry;
+	entry = container_of(it->vfile, struct rtmac_proc_entry, vfile);
+	return entry->handler(it, data);
+}
+
+static struct xnvfile_regular_ops rtnet_rtmac_disc_vfile_ops = {
+	.show = rtnet_rtmac_disc_show,
+};
+
+int rtmac_disc_proc_register(struct rtmac_disc *disc)
+{
+	int i, err;
+	struct rtmac_proc_entry *entry;
+
+	for (i = 0; i < disc->nr_proc_entries; i++) {
+		entry = &disc->proc_entries[i];
+
+		entry->vfile.ops = &rtnet_rtmac_disc_vfile_ops;
+		err = xnvfile_init_regular(entry->name, &entry->vfile,
+					   &rtmac_proc_root);
+		if (err < 0) {
+			while (--i >= 0)
+				xnvfile_destroy_regular(
+					&disc->proc_entries[i].vfile);
+			return err;
+		}
+	}
+
+	return 0;
+}
+
+void rtmac_disc_proc_unregister(struct rtmac_disc *disc)
+{
+	int i;
+
+	for (i = 0; i < disc->nr_proc_entries; i++)
+		xnvfile_destroy_regular(&disc->proc_entries[i].vfile);
+}
+
+int rtmac_proc_register(void)
+{
+	int err;
+
+	err = xnvfile_init_dir("rtmac", &rtmac_proc_root, &rtnet_proc_root);
+	if (err < 0)
+		goto err1;
+
+	err = xnvfile_init_regular("disciplines",
+				   &rtnet_rtmac_disciplines_vfile,
+				   &rtmac_proc_root);
+	if (err < 0)
+		goto err2;
+
+	err = xnvfile_init_regular("vnics", &rtnet_rtmac_vnics_vfile,
+				   &rtmac_proc_root);
+	if (err < 0)
+		goto err3;
+
+	return 0;
+
+err3:
+	xnvfile_destroy_regular(&rtnet_rtmac_disciplines_vfile);
+
+err2:
+	xnvfile_destroy_dir(&rtmac_proc_root);
+
+err1:
+	/*ERRMSG*/ printk("RTmac: unable to initialize /proc entries\n");
+	return err;
+}
+
+void rtmac_proc_release(void)
+{
+	xnvfile_destroy_regular(&rtnet_rtmac_vnics_vfile);
+	xnvfile_destroy_regular(&rtnet_rtmac_disciplines_vfile);
+	xnvfile_destroy_dir(&rtmac_proc_root);
+}
+
+#endif /* CONFIG_XENO_OPT_VFILE */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proto.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proto.c
new file mode 100644
index 0000000..55e1165
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_proto.c
@@ -0,0 +1,68 @@
+/***
+ *
+ *  rtmac/rtmac_proto.c
+ *
+ *  rtmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <rtdm/driver.h>
+#include <stack_mgr.h>
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/rtmac_vnic.h>
+
+int rtmac_proto_rx(struct rtskb *skb, struct rtpacket_type *pt)
+{
+	struct rtmac_disc *disc = skb->rtdev->mac_disc;
+	struct rtmac_hdr *hdr;
+
+	if (disc == NULL) {
+		goto error;
+	}
+
+	hdr = (struct rtmac_hdr *)skb->data;
+	rtskb_pull(skb, sizeof(struct rtmac_hdr));
+
+	if (hdr->ver != RTMAC_VERSION) {
+		rtdm_printk(
+			"RTmac: received unsupported RTmac protocol version on "
+			"device %s.  Got 0x%x but expected 0x%x\n",
+			skb->rtdev->name, hdr->ver, RTMAC_VERSION);
+		goto error;
+	}
+
+	if (hdr->flags & RTMAC_FLAG_TUNNEL)
+		rtmac_vnic_rx(skb, hdr->type);
+	else if (disc->disc_type == hdr->type)
+		disc->packet_rx(skb);
+	return 0;
+
+error:
+	kfree_rtskb(skb);
+	return 0;
+}
+
+struct rtpacket_type rtmac_packet_type = { .type = __constant_htons(ETH_RTMAC),
+					   .handler = rtmac_proto_rx };
+
+void rtmac_proto_release(void)
+{
+	rtdev_remove_pack(&rtmac_packet_type);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_syms.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_syms.c
new file mode 100644
index 0000000..7abb772
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_syms.c
@@ -0,0 +1,36 @@
+/* rtmac_syms.c
+ *
+ * rtmac - real-time networking media access control subsystem
+ * Copyright (C) 2002 Marc Kleine-Budde <kleine-budde@gmx.de>
+ *               2003 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/kernel.h>
+#include <linux/module.h>
+
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_vnic.h>
+
+EXPORT_SYMBOL_GPL(__rtmac_disc_register);
+EXPORT_SYMBOL_GPL(rtmac_disc_deregister);
+
+EXPORT_SYMBOL_GPL(rtmac_disc_attach);
+EXPORT_SYMBOL_GPL(rtmac_disc_detach);
+
+EXPORT_SYMBOL_GPL(rtmac_vnic_set_max_mtu);
+
+EXPORT_SYMBOL_GPL(rtmac_vnic_xmit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_vnic.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_vnic.c
new file mode 100644
index 0000000..4b41785
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/rtmac_vnic.c
@@ -0,0 +1,333 @@
+/* rtmac_vnic.c
+ *
+ * rtmac - real-time networking media access control subsystem
+ * Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *               2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/netdevice.h>
+#include <linux/etherdevice.h>
+#include <linux/rtnetlink.h>
+
+#include <rtnet_internal.h>
+#include <rtdev.h>
+#include <rtnet_port.h> /* for netdev_priv() */
+#include <rtmac/rtmac_disc.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/rtmac_vnic.h>
+
+static unsigned int vnic_rtskbs = DEFAULT_VNIC_RTSKBS;
+module_param(vnic_rtskbs, uint, 0444);
+MODULE_PARM_DESC(vnic_rtskbs,
+		 "Number of realtime socket buffers per virtual NIC");
+
+static rtdm_nrtsig_t vnic_signal;
+static struct rtskb_queue rx_queue;
+
+int rtmac_vnic_rx(struct rtskb *rtskb, u16 type)
+{
+	struct rtmac_priv *mac_priv = rtskb->rtdev->mac_priv;
+	struct rtskb_pool *pool = &mac_priv->vnic_skb_pool;
+
+	if (rtskb_acquire(rtskb, pool) != 0) {
+		mac_priv->vnic_stats.rx_dropped++;
+		kfree_rtskb(rtskb);
+		return -1;
+	}
+
+	rtskb->protocol = type;
+
+	if (rtskb_queue_tail_check(&rx_queue, rtskb))
+		rtdm_nrtsig_pend(&vnic_signal);
+
+	return 0;
+}
+
+static void rtmac_vnic_signal_handler(rtdm_nrtsig_t *nrtsig, void *arg)
+{
+	struct rtskb *rtskb;
+	struct sk_buff *skb;
+	unsigned hdrlen;
+	struct net_device_stats *stats;
+	struct rtnet_device *rtdev;
+
+	while (1) {
+		rtskb = rtskb_dequeue(&rx_queue);
+		if (!rtskb)
+			break;
+
+		rtdev = rtskb->rtdev;
+		hdrlen = rtdev->hard_header_len;
+
+		skb = dev_alloc_skb(hdrlen + rtskb->len + 2);
+		if (skb) {
+			/* the rtskb stamp is useless (different clock), get new one */
+			__net_timestamp(skb);
+
+			skb_reserve(skb,
+				    2); /* Align IP on 16 byte boundaries */
+
+			/* copy Ethernet header */
+			memcpy(skb_put(skb, hdrlen),
+			       rtskb->data - hdrlen - sizeof(struct rtmac_hdr),
+			       hdrlen);
+
+			/* patch the protocol field in the original Ethernet header */
+			((struct ethhdr *)skb->data)->h_proto = rtskb->protocol;
+
+			/* copy data */
+			memcpy(skb_put(skb, rtskb->len), rtskb->data,
+			       rtskb->len);
+
+			skb->dev = rtskb->rtdev->mac_priv->vnic;
+			skb->protocol = eth_type_trans(skb, skb->dev);
+
+			stats = &rtskb->rtdev->mac_priv->vnic_stats;
+
+			kfree_rtskb(rtskb);
+
+			stats->rx_packets++;
+			stats->rx_bytes += skb->len;
+
+			netif_rx(skb);
+		} else {
+			printk("RTmac: VNIC fails to allocate linux skb\n");
+			kfree_rtskb(rtskb);
+		}
+	}
+}
+
+static int rtmac_vnic_copy_mac(struct net_device *dev)
+{
+	dev_addr_set(dev,
+		     (*(struct rtnet_device **)netdev_priv(dev))->dev_addr);
+
+	return 0;
+}
+
+int rtmac_vnic_xmit(struct sk_buff *skb, struct net_device *dev)
+{
+	struct rtnet_device *rtdev = *(struct rtnet_device **)netdev_priv(dev);
+	struct net_device_stats *stats = &rtdev->mac_priv->vnic_stats;
+	struct rtskb_pool *pool = &rtdev->mac_priv->vnic_skb_pool;
+	struct ethhdr *ethernet = (struct ethhdr *)skb->data;
+	struct rtskb *rtskb;
+	int res;
+	int data_len;
+
+	rtskb = alloc_rtskb((skb->len + sizeof(struct rtmac_hdr) + 15) & ~15,
+			    pool);
+	if (!rtskb)
+		return NETDEV_TX_BUSY;
+
+	rtskb_reserve(rtskb, rtdev->hard_header_len + sizeof(struct rtmac_hdr));
+
+	data_len = skb->len - dev->hard_header_len;
+	memcpy(rtskb_put(rtskb, data_len), skb->data + dev->hard_header_len,
+	       data_len);
+
+	res = rtmac_add_header(rtdev, ethernet->h_dest, rtskb,
+			       ntohs(ethernet->h_proto), RTMAC_FLAG_TUNNEL);
+	if (res < 0) {
+		stats->tx_dropped++;
+		kfree_rtskb(rtskb);
+		goto done;
+	}
+
+	RTNET_ASSERT(rtdev->mac_disc->nrt_packet_tx != NULL, kfree_rtskb(rtskb);
+		     goto done;);
+
+	res = rtdev->mac_disc->nrt_packet_tx(rtskb);
+	if (res < 0) {
+		stats->tx_dropped++;
+		kfree_rtskb(rtskb);
+	} else {
+		stats->tx_packets++;
+		stats->tx_bytes += skb->len;
+	}
+
+done:
+	dev_kfree_skb(skb);
+	return NETDEV_TX_OK;
+}
+
+static struct net_device_stats *rtmac_vnic_get_stats(struct net_device *dev)
+{
+	return &(*(struct rtnet_device **)netdev_priv(dev))
+			->mac_priv->vnic_stats;
+}
+
+static int rtmac_vnic_change_mtu(struct net_device *dev, int new_mtu)
+{
+	if ((new_mtu < 68) ||
+	    ((unsigned)new_mtu > 1500 - sizeof(struct rtmac_hdr)))
+		return -EINVAL;
+	dev->mtu = new_mtu;
+	return 0;
+}
+
+void rtmac_vnic_set_max_mtu(struct rtnet_device *rtdev, unsigned int max_mtu)
+{
+	struct rtmac_priv *mac_priv = rtdev->mac_priv;
+	struct net_device *vnic = mac_priv->vnic;
+	unsigned int prev_mtu = mac_priv->vnic_max_mtu;
+
+	mac_priv->vnic_max_mtu = max_mtu - sizeof(struct rtmac_hdr);
+
+	/* set vnic mtu in case max_mtu is smaller than the current mtu or
+       the current mtu was set to previous max_mtu */
+	rtnl_lock();
+	if ((vnic->mtu > mac_priv->vnic_max_mtu) ||
+	    (prev_mtu == mac_priv->vnic_max_mtu)) {
+		dev_set_mtu(vnic, mac_priv->vnic_max_mtu);
+	}
+	rtnl_unlock();
+}
+
+static struct net_device_ops vnic_netdev_ops = {
+	.ndo_open = rtmac_vnic_copy_mac,
+	.ndo_get_stats = rtmac_vnic_get_stats,
+	.ndo_change_mtu = rtmac_vnic_change_mtu,
+};
+
+static void rtmac_vnic_setup(struct net_device *dev)
+{
+	ether_setup(dev);
+
+	dev->netdev_ops = &vnic_netdev_ops;
+	dev->flags &= ~IFF_MULTICAST;
+}
+
+int rtmac_vnic_add(struct rtnet_device *rtdev, vnic_xmit_handler vnic_xmit)
+{
+	int res;
+	struct rtmac_priv *mac_priv = rtdev->mac_priv;
+	struct net_device *vnic;
+	char buf[IFNAMSIZ];
+
+	/* does the discipline request vnic support? */
+	if (!vnic_xmit)
+		return 0;
+
+	mac_priv->vnic = NULL;
+	mac_priv->vnic_max_mtu = rtdev->mtu - sizeof(struct rtmac_hdr);
+	memset(&mac_priv->vnic_stats, 0, sizeof(mac_priv->vnic_stats));
+
+	/* create the rtskb pool */
+	if (rtskb_pool_init(&mac_priv->vnic_skb_pool, vnic_rtskbs, NULL, NULL) <
+	    vnic_rtskbs) {
+		res = -ENOMEM;
+		goto error;
+	}
+
+	snprintf(buf, sizeof(buf), "vnic%d", rtdev->ifindex - 1);
+
+	vnic = alloc_netdev(sizeof(struct rtnet_device *), buf,
+			    NET_NAME_UNKNOWN, rtmac_vnic_setup);
+	if (!vnic) {
+		res = -ENOMEM;
+		goto error;
+	}
+
+	vnic_netdev_ops.ndo_start_xmit = vnic_xmit;
+	vnic->mtu = mac_priv->vnic_max_mtu;
+	*(struct rtnet_device **)netdev_priv(vnic) = rtdev;
+	rtmac_vnic_copy_mac(vnic);
+
+	res = register_netdev(vnic);
+	if (res < 0)
+		goto error;
+
+	mac_priv->vnic = vnic;
+
+	return 0;
+
+error:
+	rtskb_pool_release(&mac_priv->vnic_skb_pool);
+	return res;
+}
+
+int rtmac_vnic_unregister(struct rtnet_device *rtdev)
+{
+	struct rtmac_priv *mac_priv = rtdev->mac_priv;
+
+	if (mac_priv->vnic) {
+		rtskb_pool_release(&mac_priv->vnic_skb_pool);
+		unregister_netdev(mac_priv->vnic);
+		free_netdev(mac_priv->vnic);
+		mac_priv->vnic = NULL;
+	}
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int rtnet_rtmac_vnics_show(struct xnvfile_regular_iterator *it, void *d)
+{
+	struct rtnet_device *rtdev;
+	int i;
+	int err;
+
+	xnvfile_printf(it, "RT-NIC name\tVNIC name\n");
+
+	for (i = 1; i <= MAX_RT_DEVICES; i++) {
+		rtdev = rtdev_get_by_index(i);
+		if (rtdev == NULL)
+			continue;
+
+		err = mutex_lock_interruptible(&rtdev->nrt_lock);
+		if (err < 0) {
+			rtdev_dereference(rtdev);
+			return err;
+		}
+
+		if (rtdev->mac_priv != NULL) {
+			struct rtmac_priv *rtmac;
+
+			rtmac = (struct rtmac_priv *)rtdev->mac_priv;
+			xnvfile_printf(it, "%-15s %s\n", rtdev->name,
+				       rtmac->vnic->name);
+		}
+
+		mutex_unlock(&rtdev->nrt_lock);
+		rtdev_dereference(rtdev);
+	}
+
+	return 0;
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+int __init rtmac_vnic_module_init(void)
+{
+	rtskb_queue_init(&rx_queue);
+
+	rtdm_nrtsig_init(&vnic_signal, rtmac_vnic_signal_handler, NULL);
+
+	return 0;
+}
+
+void rtmac_vnic_module_cleanup(void)
+{
+	struct rtskb *rtskb;
+
+	rtdm_nrtsig_destroy(&vnic_signal);
+
+	while ((rtskb = rtskb_dequeue(&rx_queue)) != NULL) {
+		kfree_rtskb(rtskb);
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Kconfig
new file mode 100644
index 0000000..4444661
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Kconfig
@@ -0,0 +1,21 @@
+config XENO_DRIVERS_NET_TDMA
+    tristate "TDMA discipline for RTmac"
+    depends on XENO_DRIVERS_NET_RTMAC
+    default y
+    help
+    The Time Division Multiple Access discipline is the default RTmac
+    protocol for Ethernet networks. It consists of a master synchronising
+    the access of the slaves to the media by periodically issuing frames.
+    Backup masters can be set up to take over if the primary master fails.
+    TDMA also provides a global clock across all participants. The tdmacfg
+    tool can be used to configure a real-time NIC to use TDMA.
+
+    See Documenatation/README.rtmac for further details.
+
+config XENO_DRIVERS_NET_TDMA_MASTER
+    bool "TDMA master support"
+    depends on XENO_DRIVERS_NET_TDMA
+    default y
+    help
+    Enables TDMA master and backup master support for the node. This can
+    be switched of to reduce the memory footprint of pure slave nodes.
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Makefile
new file mode 100644
index 0000000..f85f5d7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/Makefile
@@ -0,0 +1,10 @@
+ccflags-y += -I$(srctree)/drivers/xenomai/net/stack/include
+
+obj-$(CONFIG_XENO_DRIVERS_NET_TDMA) += tdma.o
+
+tdma-y := \
+	tdma_dev.o \
+	tdma_ioctl.o \
+	tdma_module.o \
+	tdma_proto.o \
+	tdma_worker.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_dev.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_dev.c
new file mode 100644
index 0000000..99c7672
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_dev.c
@@ -0,0 +1,186 @@
+/***
+ *
+ *  rtmac/tdma/tdma_dev.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>
+ *                2003-2006 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/list.h>
+
+#include <rtdev.h>
+#include <rtmac.h>
+#include <rtmac/tdma/tdma.h>
+
+struct tdma_dev_ctx {
+	rtdm_task_t *cycle_waiter;
+};
+
+static int tdma_dev_open(struct rtdm_fd *fd, int oflags)
+{
+	struct tdma_dev_ctx *ctx = rtdm_fd_to_private(fd);
+
+	ctx->cycle_waiter = NULL;
+
+	return 0;
+}
+
+static void tdma_dev_close(struct rtdm_fd *fd)
+{
+	struct tdma_dev_ctx *ctx = rtdm_fd_to_private(fd);
+	rtdm_lockctx_t lock_ctx;
+
+	cobalt_atomic_enter(lock_ctx);
+	if (ctx->cycle_waiter)
+		rtdm_task_unblock(ctx->cycle_waiter);
+	cobalt_atomic_leave(lock_ctx);
+}
+
+static int wait_on_sync(struct tdma_dev_ctx *tdma_ctx, rtdm_event_t *sync_event)
+{
+	rtdm_lockctx_t lock_ctx;
+	int ret;
+
+	cobalt_atomic_enter(lock_ctx);
+	/* keep it simple: only one waiter per device instance allowed */
+	if (!tdma_ctx->cycle_waiter) {
+		tdma_ctx->cycle_waiter = rtdm_task_current();
+		ret = rtdm_event_wait(sync_event);
+		tdma_ctx->cycle_waiter = NULL;
+	} else
+		ret = -EBUSY;
+	cobalt_atomic_leave(lock_ctx);
+
+	return ret;
+}
+
+static int tdma_dev_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	struct tdma_dev_ctx *ctx = rtdm_fd_to_private(fd);
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t lock_ctx;
+	int ret;
+
+	tdma = container_of(rtdm_fd_to_context(fd)->device, struct tdma_priv,
+			    api_device);
+
+	switch (request) {
+	case RTMAC_RTIOC_TIMEOFFSET: {
+		nanosecs_rel_t offset;
+
+		rtdm_lock_get_irqsave(&tdma->lock, lock_ctx);
+		offset = tdma->clock_offset;
+		rtdm_lock_put_irqrestore(&tdma->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			if (!rtdm_rw_user_ok(fd, arg, sizeof(__s64)) ||
+			    rtdm_copy_to_user(fd, arg, &offset, sizeof(__s64)))
+				return -EFAULT;
+		} else
+			*(__s64 *)arg = offset;
+
+		return 0;
+	}
+	case RTMAC_RTIOC_WAITONCYCLE:
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		if ((long)arg != TDMA_WAIT_ON_SYNC)
+			return -EINVAL;
+
+		return wait_on_sync(ctx, &tdma->sync_event);
+
+	case RTMAC_RTIOC_WAITONCYCLE_EX: {
+		struct rtmac_waitinfo *waitinfo = (struct rtmac_waitinfo *)arg;
+		struct rtmac_waitinfo waitinfo_buf;
+
+#define WAITINFO_HEAD_SIZE                                                     \
+	((char *)&waitinfo_buf.cycle_no - (char *)&waitinfo_buf)
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		if (rtdm_fd_is_user(fd)) {
+			if (!rtdm_rw_user_ok(fd, waitinfo,
+					     sizeof(struct rtmac_waitinfo)) ||
+			    rtdm_copy_from_user(fd, &waitinfo_buf, arg,
+						WAITINFO_HEAD_SIZE))
+				return -EFAULT;
+
+			waitinfo = &waitinfo_buf;
+		}
+
+		if ((waitinfo->type != TDMA_WAIT_ON_SYNC) ||
+		    (waitinfo->size < sizeof(struct rtmac_waitinfo)))
+			return -EINVAL;
+
+		ret = wait_on_sync(ctx, &tdma->sync_event);
+		if (ret)
+			return ret;
+
+		rtdm_lock_get_irqsave(&tdma->lock, lock_ctx);
+		waitinfo->cycle_no = tdma->current_cycle;
+		waitinfo->cycle_start = tdma->current_cycle_start;
+		waitinfo->clock_offset = tdma->clock_offset;
+		rtdm_lock_put_irqrestore(&tdma->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			if (rtdm_copy_to_user(fd, arg, &waitinfo_buf,
+					      sizeof(struct rtmac_waitinfo)))
+				return -EFAULT;
+		}
+
+		return 0;
+	}
+	default:
+		return -ENOTTY;
+	}
+}
+
+static struct rtdm_driver tdma_driver = { .profile_info = RTDM_PROFILE_INFO(
+						  tdma, RTDM_CLASS_RTMAC,
+						  RTDM_SUBCLASS_TDMA,
+						  RTNET_RTDM_VER),
+					  .device_flags = RTDM_NAMED_DEVICE,
+					  .device_count = 1,
+					  .context_size =
+						  sizeof(struct tdma_dev_ctx),
+					  .ops = {
+						  .open = tdma_dev_open,
+						  .ioctl_rt = tdma_dev_ioctl,
+						  .ioctl_nrt = tdma_dev_ioctl,
+						  .close = tdma_dev_close,
+					  } };
+
+int tdma_dev_init(struct rtnet_device *rtdev, struct tdma_priv *tdma)
+{
+	char *pos;
+
+	strcpy(tdma->device_name, "TDMA");
+	for (pos = rtdev->name + strlen(rtdev->name) - 1;
+	     (pos >= rtdev->name) && ((*pos) >= '0') && (*pos <= '9'); pos--)
+		;
+	strncat(tdma->device_name + 4, pos + 1, IFNAMSIZ - 4);
+
+	tdma->api_driver = tdma_driver;
+	tdma->api_device.driver = &tdma->api_driver;
+	tdma->api_device.label = tdma->device_name;
+
+	return rtdm_dev_register(&tdma->api_device);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_ioctl.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_ioctl.c
new file mode 100644
index 0000000..fe4ff59
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_ioctl.c
@@ -0,0 +1,663 @@
+/***
+ *
+ *  rtmac/tdma/tdma_ioctl.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/delay.h>
+#include <linux/uaccess.h>
+#include <asm/div64.h>
+
+#include <tdma_chrdev.h>
+#include <rtmac/rtmac_vnic.h>
+#include <rtmac/tdma/tdma.h>
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+static int tdma_ioctl_master(struct rtnet_device *rtdev,
+			     struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+	u64 cycle_ms;
+	unsigned int table_size;
+	int ret;
+
+	if (rtdev->mac_priv == NULL) {
+		ret = rtmac_disc_attach(rtdev, &tdma_disc);
+		if (ret < 0)
+			return ret;
+	}
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC) {
+		/* note: we don't clean up an unknown discipline */
+		return -ENOTTY;
+	}
+
+	if (test_bit(TDMA_FLAG_ATTACHED, &tdma->flags)) {
+		/* already attached */
+		return -EBUSY;
+	}
+
+	set_bit(TDMA_FLAG_MASTER, &tdma->flags);
+
+	tdma->cal_rounds = cfg->args.master.cal_rounds;
+
+	/* search at least 3 cycle periods for other masters */
+	cycle_ms = cfg->args.master.cycle_period;
+	do_div(cycle_ms, 1000000);
+	if (cycle_ms == 0)
+		cycle_ms = 1;
+	msleep(3 * cycle_ms);
+
+	if (rtskb_module_pool_init(&tdma->cal_rtskb_pool,
+				   cfg->args.master.max_cal_requests) !=
+	    cfg->args.master.max_cal_requests) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+
+	table_size = sizeof(struct tdma_slot *) *
+		     ((cfg->args.master.max_slot_id >= 1) ?
+			      cfg->args.master.max_slot_id + 1 :
+			      2);
+
+	tdma->slot_table = (struct tdma_slot **)kmalloc(table_size, GFP_KERNEL);
+	if (!tdma->slot_table) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+	tdma->max_slot_id = cfg->args.master.max_slot_id;
+	memset(tdma->slot_table, 0, table_size);
+
+	tdma->cycle_period = cfg->args.master.cycle_period;
+	tdma->sync_job.ref_count = 0;
+	INIT_LIST_HEAD(&tdma->sync_job.entry);
+
+	if (cfg->args.master.backup_sync_offset == 0)
+		tdma->sync_job.id = XMIT_SYNC;
+	else {
+		set_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags);
+		tdma->sync_job.id = BACKUP_SYNC;
+		tdma->backup_sync_inc = cfg->args.master.backup_sync_offset +
+					tdma->cycle_period;
+	}
+
+	/* did we detect another active master? */
+	if (test_bit(TDMA_FLAG_RECEIVED_SYNC, &tdma->flags)) {
+		/* become a slave, we need to calibrate first */
+		tdma->sync_job.id = WAIT_ON_SYNC;
+	} else {
+		if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags))
+			printk("TDMA: warning, no primary master detected!\n");
+		set_bit(TDMA_FLAG_CALIBRATED, &tdma->flags);
+		tdma->current_cycle_start = rtdm_clock_read();
+	}
+
+	tdma->first_job = tdma->current_job = &tdma->sync_job;
+
+	rtdm_event_signal(&tdma->worker_wakeup);
+
+	set_bit(TDMA_FLAG_ATTACHED, &tdma->flags);
+
+	return 0;
+
+err_out:
+	rtmac_disc_detach(rtdev);
+	return ret;
+}
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+
+static int tdma_ioctl_slave(struct rtnet_device *rtdev, struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+	unsigned int table_size;
+	int ret;
+
+	if (rtdev->mac_priv == NULL) {
+		ret = rtmac_disc_attach(rtdev, &tdma_disc);
+		if (ret < 0)
+			return ret;
+	}
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC) {
+		/* note: we don't clean up an unknown discipline */
+		return -ENOTTY;
+	}
+
+	if (test_bit(TDMA_FLAG_ATTACHED, &tdma->flags)) {
+		/* already attached */
+		return -EBUSY;
+	}
+
+	tdma->cal_rounds = cfg->args.slave.cal_rounds;
+	if (tdma->cal_rounds == 0)
+		set_bit(TDMA_FLAG_CALIBRATED, &tdma->flags);
+
+	table_size = sizeof(struct tdma_slot *) *
+		     ((cfg->args.slave.max_slot_id >= 1) ?
+			      cfg->args.slave.max_slot_id + 1 :
+			      2);
+
+	tdma->slot_table = (struct tdma_slot **)kmalloc(table_size, GFP_KERNEL);
+	if (!tdma->slot_table) {
+		ret = -ENOMEM;
+		goto err_out;
+	}
+	tdma->max_slot_id = cfg->args.slave.max_slot_id;
+	memset(tdma->slot_table, 0, table_size);
+
+	tdma->sync_job.id = WAIT_ON_SYNC;
+	tdma->sync_job.ref_count = 0;
+	INIT_LIST_HEAD(&tdma->sync_job.entry);
+
+	tdma->first_job = tdma->current_job = &tdma->sync_job;
+
+	rtdm_event_signal(&tdma->worker_wakeup);
+
+	set_bit(TDMA_FLAG_ATTACHED, &tdma->flags);
+
+	return 0;
+
+err_out:
+	rtmac_disc_detach(rtdev);
+	return ret;
+}
+
+static int tdma_ioctl_cal_result_size(struct rtnet_device *rtdev,
+				      struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC)
+		return -ENOTTY;
+
+	if (!test_bit(TDMA_FLAG_CALIBRATED, &tdma->flags))
+		return tdma->cal_rounds;
+	else
+		return 0;
+}
+
+int start_calibration(struct rt_proc_call *call)
+{
+	struct tdma_request_cal *req_cal;
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t context;
+
+	req_cal = rtpc_get_priv(call, struct tdma_request_cal);
+	tdma = req_cal->tdma;
+
+	/* there are no slots yet, simply add this job after first_job */
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+	tdma->calibration_call = call;
+	tdma->job_list_revision++;
+	list_add(&req_cal->head.entry, &tdma->first_job->entry);
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	return -CALL_PENDING;
+}
+
+void copyback_calibration(struct rt_proc_call *call, void *priv_data)
+{
+	struct tdma_request_cal *req_cal;
+	struct tdma_priv *tdma;
+	int i;
+	u64 value;
+	u64 average = 0;
+	u64 min = 0x7FFFFFFFFFFFFFFFLL;
+	u64 max = 0;
+
+	req_cal = rtpc_get_priv(call, struct tdma_request_cal);
+	tdma = req_cal->tdma;
+
+	for (i = 0; i < tdma->cal_rounds; i++) {
+		value = req_cal->result_buffer[i];
+		average += value;
+		if (value < min)
+			min = value;
+		if (value > max)
+			max = value;
+		if ((req_cal->cal_results) &&
+		    (copy_to_user(&req_cal->cal_results[i], &value,
+				  sizeof(value)) != 0))
+			rtpc_set_result(call, -EFAULT);
+	}
+	do_div(average, tdma->cal_rounds);
+	tdma->master_packet_delay_ns = average;
+
+	average += 500;
+	do_div(average, 1000);
+	min += 500;
+	do_div(min, 1000);
+	max += 500;
+	do_div(max, 1000);
+	printk("TDMA: calibrated master-to-slave packet delay: "
+	       "%ld us (min/max: %ld/%ld us)\n",
+	       (unsigned long)average, (unsigned long)min, (unsigned long)max);
+}
+
+void cleanup_calibration(void *priv_data)
+{
+	struct tdma_request_cal *req_cal;
+
+	req_cal = (struct tdma_request_cal *)priv_data;
+	kfree(req_cal->result_buffer);
+}
+
+static int tdma_ioctl_set_slot(struct rtnet_device *rtdev,
+			       struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+	int id;
+	int jnt_id;
+	struct tdma_slot *slot, *old_slot;
+	struct tdma_job *job, *prev_job;
+	struct tdma_request_cal req_cal;
+	struct rtskb *rtskb;
+	unsigned int job_list_revision;
+	rtdm_lockctx_t context;
+	int ret;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC)
+		return -ENOTTY;
+
+	id = cfg->args.set_slot.id;
+	if (id > tdma->max_slot_id)
+		return -EINVAL;
+
+	if (cfg->args.set_slot.size == 0)
+		cfg->args.set_slot.size = rtdev->mtu;
+	else if (cfg->args.set_slot.size > rtdev->mtu)
+		return -EINVAL;
+
+	jnt_id = cfg->args.set_slot.joint_slot;
+	if ((jnt_id >= 0) &&
+	    ((jnt_id >= tdma->max_slot_id) || (tdma->slot_table[jnt_id] == 0) ||
+	     (tdma->slot_table[jnt_id]->mtu != cfg->args.set_slot.size)))
+		return -EINVAL;
+
+	slot = (struct tdma_slot *)kmalloc(sizeof(struct tdma_slot),
+					   GFP_KERNEL);
+	if (!slot)
+		return -ENOMEM;
+
+	if (!test_bit(TDMA_FLAG_CALIBRATED, &tdma->flags)) {
+		req_cal.head.id = XMIT_REQ_CAL;
+		req_cal.head.ref_count = 0;
+		req_cal.tdma = tdma;
+		req_cal.offset = cfg->args.set_slot.offset;
+		req_cal.period = cfg->args.set_slot.period;
+		req_cal.phasing = cfg->args.set_slot.phasing;
+		req_cal.cal_rounds = tdma->cal_rounds;
+		req_cal.cal_results = cfg->args.set_slot.cal_results;
+
+		req_cal.result_buffer =
+			kmalloc(req_cal.cal_rounds * sizeof(u64), GFP_KERNEL);
+		if (!req_cal.result_buffer) {
+			kfree(slot);
+			return -ENOMEM;
+		}
+
+		ret = rtpc_dispatch_call(start_calibration, 0, &req_cal,
+					 sizeof(req_cal), copyback_calibration,
+					 cleanup_calibration);
+		if (ret < 0) {
+			/* kick out any pending calibration job before returning */
+			rtdm_lock_get_irqsave(&tdma->lock, context);
+
+			job = list_entry(tdma->first_job->entry.next,
+					 struct tdma_job, entry);
+			if (job != tdma->first_job) {
+				__list_del(job->entry.prev, job->entry.next);
+
+				while (job->ref_count > 0) {
+					rtdm_lock_put_irqrestore(&tdma->lock,
+								 context);
+					msleep(100);
+					rtdm_lock_get_irqsave(&tdma->lock,
+							      context);
+				}
+			}
+
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+			kfree(slot);
+			return ret;
+		}
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+		if (test_bit(TDMA_FLAG_MASTER, &tdma->flags)) {
+			u32 cycle_no = (volatile u32)tdma->current_cycle;
+			u64 cycle_ms;
+
+			/* switch back to [backup] master mode */
+			if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags))
+				tdma->sync_job.id = BACKUP_SYNC;
+			else
+				tdma->sync_job.id = XMIT_SYNC;
+
+			/* wait 2 cycle periods for the mode switch */
+			cycle_ms = tdma->cycle_period;
+			do_div(cycle_ms, 1000000);
+			if (cycle_ms == 0)
+				cycle_ms = 1;
+			msleep(2 * cycle_ms);
+
+			/* catch the very unlikely case that the current master died
+               while we just switched the mode */
+			if (cycle_no == (volatile u32)tdma->current_cycle) {
+				kfree(slot);
+				return -ETIME;
+			}
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+
+		set_bit(TDMA_FLAG_CALIBRATED, &tdma->flags);
+	}
+
+	slot->head.id = id;
+	slot->head.ref_count = 0;
+	slot->period = cfg->args.set_slot.period;
+	slot->phasing = cfg->args.set_slot.phasing;
+	slot->mtu = cfg->args.set_slot.size;
+	slot->size = cfg->args.set_slot.size + rtdev->hard_header_len;
+	slot->offset = cfg->args.set_slot.offset;
+	slot->queue = &slot->local_queue;
+	rtskb_prio_queue_init(&slot->local_queue);
+
+	if (jnt_id >= 0) /* all other validation tests performed above */
+		slot->queue = tdma->slot_table[jnt_id]->queue;
+
+	old_slot = tdma->slot_table[id];
+	if ((id == DEFAULT_NRT_SLOT) &&
+	    (old_slot == tdma->slot_table[DEFAULT_SLOT]))
+		old_slot = NULL;
+
+restart:
+	job_list_revision = tdma->job_list_revision;
+
+	if (!old_slot) {
+		job = tdma->first_job;
+		while (1) {
+			prev_job = job;
+			job = list_entry(job->entry.next, struct tdma_job,
+					 entry);
+			if (((job->id >= 0) &&
+			     ((slot->offset < SLOT_JOB(job)->offset) ||
+			      ((slot->offset == SLOT_JOB(job)->offset) &&
+			       (slot->head.id <= SLOT_JOB(job)->head.id)))) ||
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+			    ((job->id == XMIT_RPL_CAL) &&
+			     (slot->offset <
+			      REPLY_CAL_JOB(job)->reply_offset)) ||
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+			    (job == tdma->first_job))
+				break;
+		}
+
+	} else
+		prev_job = list_entry(old_slot->head.entry.prev,
+				      struct tdma_job, entry);
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	if (job_list_revision != tdma->job_list_revision) {
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		msleep(100);
+		goto restart;
+	}
+
+	if (old_slot)
+		__list_del(old_slot->head.entry.prev,
+			   old_slot->head.entry.next);
+
+	list_add(&slot->head.entry, &prev_job->entry);
+	tdma->slot_table[id] = slot;
+	if ((id == DEFAULT_SLOT) &&
+	    (tdma->slot_table[DEFAULT_NRT_SLOT] == old_slot))
+		tdma->slot_table[DEFAULT_NRT_SLOT] = slot;
+
+	if (old_slot) {
+		while (old_slot->head.ref_count > 0) {
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+			msleep(100);
+			rtdm_lock_get_irqsave(&tdma->lock, context);
+		}
+
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		/* search for other slots linked to the old one */
+		for (jnt_id = 0; jnt_id < tdma->max_slot_id; jnt_id++)
+			if ((tdma->slot_table[jnt_id] != 0) &&
+			    (tdma->slot_table[jnt_id]->queue ==
+			     &old_slot->local_queue)) {
+				/* found a joint slot, move or detach it now */
+				rtdm_lock_get_irqsave(&tdma->lock, context);
+
+				while (tdma->slot_table[jnt_id]->head.ref_count >
+				       0) {
+					rtdm_lock_put_irqrestore(&tdma->lock,
+								 context);
+					msleep(100);
+					rtdm_lock_get_irqsave(&tdma->lock,
+							      context);
+				}
+
+				/* If the new slot size is larger, detach the other slot,
+                 * update it otherwise. */
+				if (slot->mtu > tdma->slot_table[jnt_id]->mtu)
+					tdma->slot_table[jnt_id]->queue =
+						&tdma->slot_table[jnt_id]
+							 ->local_queue;
+				else {
+					tdma->slot_table[jnt_id]->mtu =
+						slot->mtu;
+					tdma->slot_table[jnt_id]->queue =
+						slot->queue;
+				}
+
+				rtdm_lock_put_irqrestore(&tdma->lock, context);
+			}
+	} else
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	rtmac_vnic_set_max_mtu(rtdev, cfg->args.set_slot.size);
+
+	if (old_slot) {
+		/* avoid that the formerly joint queue gets purged */
+		old_slot->queue = &old_slot->local_queue;
+
+		/* Without any reference to the old job and no joint slots we can
+         * safely purge its queue without lock protection.
+         * NOTE: Reconfiguring a slot during runtime may lead to packet
+         *       drops! */
+		while ((rtskb = __rtskb_prio_dequeue(old_slot->queue)))
+			kfree_rtskb(rtskb);
+
+		kfree(old_slot);
+	}
+
+	return 0;
+}
+
+int tdma_cleanup_slot(struct tdma_priv *tdma, struct tdma_slot *slot)
+{
+	struct rtskb *rtskb;
+	unsigned int id, jnt_id;
+	rtdm_lockctx_t context;
+
+	if (!slot)
+		return -EINVAL;
+
+	id = slot->head.id;
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	__list_del(slot->head.entry.prev, slot->head.entry.next);
+
+	if (id == DEFAULT_NRT_SLOT)
+		tdma->slot_table[DEFAULT_NRT_SLOT] =
+			tdma->slot_table[DEFAULT_SLOT];
+	else {
+		if ((id == DEFAULT_SLOT) &&
+		    (tdma->slot_table[DEFAULT_NRT_SLOT] == slot))
+			tdma->slot_table[DEFAULT_NRT_SLOT] = NULL;
+		tdma->slot_table[id] = NULL;
+	}
+
+	while (slot->head.ref_count > 0) {
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+		msleep(100);
+		rtdm_lock_get_irqsave(&tdma->lock, context);
+	}
+
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	/* search for other slots linked to this one */
+	for (jnt_id = 0; jnt_id < tdma->max_slot_id; jnt_id++)
+		if ((tdma->slot_table[jnt_id] != 0) &&
+		    (tdma->slot_table[jnt_id]->queue == &slot->local_queue)) {
+			/* found a joint slot, detach it now under lock protection */
+			rtdm_lock_get_irqsave(&tdma->lock, context);
+
+			while (tdma->slot_table[jnt_id]->head.ref_count > 0) {
+				rtdm_lock_put_irqrestore(&tdma->lock, context);
+				msleep(100);
+				rtdm_lock_get_irqsave(&tdma->lock, context);
+			}
+			tdma->slot_table[jnt_id]->queue =
+				&tdma->slot_table[jnt_id]->local_queue;
+
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+		}
+
+	/* avoid that the formerly joint queue gets purged */
+	slot->queue = &slot->local_queue;
+
+	/* No need to protect the queue access here -
+     * no one is referring to this job anymore
+     * (ref_count == 0, all joint slots detached). */
+	while ((rtskb = __rtskb_prio_dequeue(slot->queue)))
+		kfree_rtskb(rtskb);
+
+	kfree(slot);
+
+	return 0;
+}
+
+static int tdma_ioctl_remove_slot(struct rtnet_device *rtdev,
+				  struct tdma_config *cfg)
+{
+	struct tdma_priv *tdma;
+	int id;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC)
+		return -ENOTTY;
+
+	id = cfg->args.remove_slot.id;
+	if (id > tdma->max_slot_id)
+		return -EINVAL;
+
+	if ((id == DEFAULT_NRT_SLOT) && (tdma->slot_table[DEFAULT_NRT_SLOT] ==
+					 tdma->slot_table[DEFAULT_SLOT]))
+		return -EINVAL;
+
+	return tdma_cleanup_slot(tdma, tdma->slot_table[id]);
+}
+
+static int tdma_ioctl_detach(struct rtnet_device *rtdev)
+{
+	struct tdma_priv *tdma;
+	int ret;
+
+	if (rtdev->mac_priv == NULL)
+		return -ENOTTY;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+	if (tdma->magic != TDMA_MAGIC)
+		return -ENOTTY;
+
+	ret = rtmac_disc_detach(rtdev);
+
+	return ret;
+}
+
+int tdma_ioctl(struct rtnet_device *rtdev, unsigned int request,
+	       unsigned long arg)
+{
+	struct tdma_config cfg;
+	int ret;
+
+	ret = copy_from_user(&cfg, (void *)arg, sizeof(cfg));
+	if (ret != 0)
+		return -EFAULT;
+
+	if (mutex_lock_interruptible(&rtdev->nrt_lock))
+		return -ERESTARTSYS;
+
+	switch (request) {
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	case TDMA_IOC_MASTER:
+		ret = tdma_ioctl_master(rtdev, &cfg);
+		break;
+#endif
+	case TDMA_IOC_SLAVE:
+		ret = tdma_ioctl_slave(rtdev, &cfg);
+		break;
+
+	case TDMA_IOC_CAL_RESULT_SIZE:
+		ret = tdma_ioctl_cal_result_size(rtdev, &cfg);
+		break;
+
+	case TDMA_IOC_SET_SLOT:
+		ret = tdma_ioctl_set_slot(rtdev, &cfg);
+		break;
+
+	case TDMA_IOC_REMOVE_SLOT:
+		ret = tdma_ioctl_remove_slot(rtdev, &cfg);
+		break;
+
+	case TDMA_IOC_DETACH:
+		ret = tdma_ioctl_detach(rtdev);
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	mutex_unlock(&rtdev->nrt_lock);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_module.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_module.c
new file mode 100644
index 0000000..e25ad53
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_module.c
@@ -0,0 +1,317 @@
+/***
+ *
+ *  rtmac/tdma/tdma_module.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <asm/div64.h>
+#include <linux/delay.h>
+#include <linux/init.h>
+#include <linux/module.h>
+#include <linux/moduleparam.h>
+
+#include <rtdm/driver.h>
+#include <rtmac/rtmac_vnic.h>
+#include <rtmac/tdma/tdma.h>
+#include <rtmac/tdma/tdma_dev.h>
+#include <rtmac/tdma/tdma_ioctl.h>
+#include <rtmac/tdma/tdma_proto.h>
+#include <rtmac/tdma/tdma_worker.h>
+
+#ifdef CONFIG_XENO_OPT_VFILE
+int tdma_proc_read(struct xnvfile_regular_iterator *it, void *data)
+{
+	int d, err = 0;
+	struct rtnet_device *rtdev;
+	struct tdma_priv *tdma;
+	const char *state;
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	u64 cycle;
+#endif
+
+	xnvfile_printf(it, "Interface       API Device      Operation Mode  "
+			   "Cycle   State\n");
+
+	for (d = 1; d <= MAX_RT_DEVICES; d++) {
+		rtdev = rtdev_get_by_index(d);
+		if (!rtdev)
+			continue;
+
+		err = mutex_lock_interruptible(&rtdev->nrt_lock);
+		if (err < 0) {
+			rtdev_dereference(rtdev);
+			break;
+		}
+
+		if (!rtdev->mac_priv)
+			goto unlock_dev;
+		tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+
+		xnvfile_printf(it, "%-15s %-15s ", rtdev->name,
+			       tdma->api_device.name);
+
+		if (test_bit(TDMA_FLAG_CALIBRATED, &tdma->flags)) {
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+			if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags) &&
+			    !test_bit(TDMA_FLAG_BACKUP_ACTIVE, &tdma->flags))
+				state = "stand-by";
+			else
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+				state = "active";
+		} else
+			state = "init";
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+		if (test_bit(TDMA_FLAG_MASTER, &tdma->flags)) {
+			cycle = tdma->cycle_period + 500;
+			do_div(cycle, 1000);
+			if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags))
+				xnvfile_printf(it, "Backup Master   %-7ld %s\n",
+					       (unsigned long)cycle, state);
+			else
+				xnvfile_printf(it, "Master          %-7ld %s\n",
+					       (unsigned long)cycle, state);
+		} else
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+			xnvfile_printf(it, "Slave           -       %s\n",
+				       state);
+
+	unlock_dev:
+		mutex_unlock(&rtdev->nrt_lock);
+		rtdev_dereference(rtdev);
+	}
+
+	return err;
+}
+
+int tdma_slots_proc_read(struct xnvfile_regular_iterator *it, void *data)
+{
+	int d, i, err = 0;
+	struct rtnet_device *rtdev;
+	struct tdma_priv *tdma;
+	struct tdma_slot *slot;
+	int jnt_id;
+	u64 slot_offset;
+
+	xnvfile_printf(it, "Interface       "
+			   "Slots (id[->joint]:offset:phasing/period:size)\n");
+
+	for (d = 1; d <= MAX_RT_DEVICES; d++) {
+		rtdev = rtdev_get_by_index(d);
+		if (!rtdev)
+			continue;
+
+		err = mutex_lock_interruptible(&rtdev->nrt_lock);
+		if (err < 0) {
+			rtdev_dereference(rtdev);
+			break;
+		}
+
+		if (!rtdev->mac_priv)
+			goto unlock_dev;
+		tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+
+		xnvfile_printf(it, "%-15s ", rtdev->name);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+		if (test_bit(TDMA_FLAG_BACKUP_MASTER, &tdma->flags)) {
+			slot_offset = tdma->backup_sync_inc -
+				      tdma->cycle_period + 500;
+			do_div(slot_offset, 1000);
+			xnvfile_printf(it, "bak:%ld  ",
+				       (unsigned long)slot_offset);
+		}
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+
+		if (tdma->slot_table)
+			for (i = 0; i <= tdma->max_slot_id; i++) {
+				slot = tdma->slot_table[i];
+				if (!slot ||
+				    ((i == DEFAULT_NRT_SLOT) &&
+				     (tdma->slot_table[DEFAULT_SLOT] == slot)))
+					continue;
+
+				if (slot->queue == &slot->local_queue) {
+					xnvfile_printf(it, "%d", i);
+				} else
+					for (jnt_id = 0;
+					     jnt_id <= tdma->max_slot_id;
+					     jnt_id++)
+						if (&tdma->slot_table[jnt_id]
+							     ->local_queue ==
+						    slot->queue) {
+							xnvfile_printf(it,
+								       "%d->%d",
+								       i,
+								       jnt_id);
+							break;
+						}
+
+				slot_offset = slot->offset + 500;
+				do_div(slot_offset, 1000);
+				xnvfile_printf(it, ":%ld:%d/%d:%d  ",
+					       (unsigned long)slot_offset,
+					       slot->phasing + 1, slot->period,
+					       slot->mtu);
+			}
+
+		xnvfile_printf(it, "\n");
+
+	unlock_dev:
+		mutex_unlock(&rtdev->nrt_lock);
+		rtdev_dereference(rtdev);
+	}
+
+	return err;
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+int tdma_attach(struct rtnet_device *rtdev, void *priv)
+{
+	struct tdma_priv *tdma = (struct tdma_priv *)priv;
+	int ret;
+
+	memset(tdma, 0, sizeof(struct tdma_priv));
+
+	tdma->magic = TDMA_MAGIC;
+	tdma->rtdev = rtdev;
+
+	rtdm_lock_init(&tdma->lock);
+
+	rtdm_event_init(&tdma->worker_wakeup, 0);
+	rtdm_event_init(&tdma->xmit_event, 0);
+	rtdm_event_init(&tdma->sync_event, 0);
+
+	ret = tdma_dev_init(rtdev, tdma);
+	if (ret < 0)
+		goto err_out1;
+
+	ret = rtdm_task_init(&tdma->worker_task, "rtnet-tdma", tdma_worker,
+			     tdma, DEF_WORKER_PRIO, 0);
+	if (ret != 0)
+		goto err_out2;
+
+	return 0;
+
+err_out2:
+	tdma_dev_release(tdma);
+
+err_out1:
+	rtdm_event_destroy(&tdma->sync_event);
+	rtdm_event_destroy(&tdma->xmit_event);
+	rtdm_event_destroy(&tdma->worker_wakeup);
+
+	return ret;
+}
+
+int tdma_detach(struct rtnet_device *rtdev, void *priv)
+{
+	struct tdma_priv *tdma = (struct tdma_priv *)priv;
+	struct tdma_job *job, *tmp;
+
+	rtdm_event_destroy(&tdma->sync_event);
+	rtdm_event_destroy(&tdma->xmit_event);
+	rtdm_event_destroy(&tdma->worker_wakeup);
+
+	tdma_dev_release(tdma);
+
+	rtdm_task_destroy(&tdma->worker_task);
+
+	list_for_each_entry_safe (job, tmp, &tdma->first_job->entry, entry) {
+		if (job->id >= 0)
+			tdma_cleanup_slot(tdma, SLOT_JOB(job));
+		else if (job->id == XMIT_RPL_CAL) {
+			__list_del(job->entry.prev, job->entry.next);
+			kfree_rtskb(REPLY_CAL_JOB(job)->reply_rtskb);
+		}
+	}
+
+	if (tdma->slot_table)
+		kfree(tdma->slot_table);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	if (test_bit(TDMA_FLAG_MASTER, &tdma->flags))
+		rtskb_pool_release(&tdma->cal_rtskb_pool);
+#endif
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_OPT_VFILE
+struct rtmac_proc_entry tdma_proc_entries[] = {
+	{ name: "tdma", handler: tdma_proc_read },
+	{ name: "tdma_slots", handler: tdma_slots_proc_read },
+};
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+struct rtmac_disc tdma_disc = {
+	name: "TDMA",
+	priv_size: sizeof(struct tdma_priv),
+	disc_type: __constant_htons(RTMAC_TYPE_TDMA),
+
+	packet_rx: tdma_packet_rx,
+	rt_packet_tx: tdma_rt_packet_tx,
+	nrt_packet_tx: tdma_nrt_packet_tx,
+
+	get_mtu: tdma_get_mtu,
+
+	vnic_xmit: RTMAC_DEFAULT_VNIC,
+
+	attach: tdma_attach,
+	detach: tdma_detach,
+
+	ioctls: {
+		service_name: "RTmac/TDMA",
+		ioctl_type: RTNET_IOC_TYPE_RTMAC_TDMA,
+		handler: tdma_ioctl
+	},
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	proc_entries: tdma_proc_entries,
+	nr_proc_entries: ARRAY_SIZE(tdma_proc_entries),
+#endif /* CONFIG_XENO_OPT_VFILE */
+};
+
+int __init tdma_init(void)
+{
+	int ret;
+
+	printk("RTmac/TDMA: init time division multiple access control "
+	       "mechanism\n");
+
+	ret = rtmac_disc_register(&tdma_disc);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+void tdma_release(void)
+{
+	rtmac_disc_deregister(&tdma_disc);
+
+	printk("RTmac/TDMA: unloaded\n");
+}
+
+module_init(tdma_init);
+module_exit(tdma_release);
+
+MODULE_AUTHOR("Jan Kiszka");
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_proto.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_proto.c
new file mode 100644
index 0000000..b371c63
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_proto.c
@@ -0,0 +1,407 @@
+/***
+ *
+ *  rtmac/tdma/tdma_proto.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/init.h>
+#include "asm/div64.h"
+
+#include <rtdev.h>
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/tdma/tdma_proto.h>
+
+void tdma_xmit_sync_frame(struct tdma_priv *tdma)
+{
+	struct rtnet_device *rtdev = tdma->rtdev;
+	struct rtskb *rtskb;
+	struct tdma_frm_sync *sync;
+
+	rtskb = alloc_rtskb(rtdev->hard_header_len + sizeof(struct rtmac_hdr) +
+				    sizeof(struct tdma_frm_sync) + 15,
+			    &global_pool);
+	if (!rtskb)
+		goto err_out;
+
+	rtskb_reserve(rtskb,
+		      (rtdev->hard_header_len + sizeof(struct rtmac_hdr) + 15) &
+			      ~15);
+
+	sync = (struct tdma_frm_sync *)rtskb_put(rtskb,
+						 sizeof(struct tdma_frm_sync));
+
+	if (rtmac_add_header(rtdev, rtdev->broadcast, rtskb, RTMAC_TYPE_TDMA,
+			     0) < 0) {
+		kfree_rtskb(rtskb);
+		goto err_out;
+	}
+
+	sync->head.version = __constant_htons(TDMA_FRM_VERSION);
+	sync->head.id = __constant_htons(TDMA_FRM_SYNC);
+
+	sync->cycle_no = htonl(tdma->current_cycle);
+	sync->xmit_stamp = tdma->clock_offset;
+	sync->sched_xmit_stamp =
+		cpu_to_be64(tdma->clock_offset + tdma->current_cycle_start);
+
+	rtskb->xmit_stamp = &sync->xmit_stamp;
+
+	rtmac_xmit(rtskb);
+
+	/* signal local waiters */
+	rtdm_event_pulse(&tdma->sync_event);
+
+	return;
+
+err_out:
+	/*ERROR*/ rtdm_printk("TDMA: Failed to transmit sync frame!\n");
+	return;
+}
+
+int tdma_xmit_request_cal_frame(struct tdma_priv *tdma, u32 reply_cycle,
+				u64 reply_slot_offset)
+{
+	struct rtnet_device *rtdev = tdma->rtdev;
+	struct rtskb *rtskb;
+	struct tdma_frm_req_cal *req_cal;
+	int ret;
+
+	rtskb = alloc_rtskb(rtdev->hard_header_len + sizeof(struct rtmac_hdr) +
+				    sizeof(struct tdma_frm_req_cal) + 15,
+			    &global_pool);
+	ret = -ENOMEM;
+	if (!rtskb)
+		goto err_out;
+
+	rtskb_reserve(rtskb,
+		      (rtdev->hard_header_len + sizeof(struct rtmac_hdr) + 15) &
+			      ~15);
+
+	req_cal = (struct tdma_frm_req_cal *)rtskb_put(
+		rtskb, sizeof(struct tdma_frm_req_cal));
+
+	if ((ret = rtmac_add_header(rtdev, tdma->master_hw_addr, rtskb,
+				    RTMAC_TYPE_TDMA, 0)) < 0) {
+		kfree_rtskb(rtskb);
+		goto err_out;
+	}
+
+	req_cal->head.version = __constant_htons(TDMA_FRM_VERSION);
+	req_cal->head.id = __constant_htons(TDMA_FRM_REQ_CAL);
+
+	req_cal->xmit_stamp = 0;
+	req_cal->reply_cycle = htonl(reply_cycle);
+	req_cal->reply_slot_offset = cpu_to_be64(reply_slot_offset);
+
+	rtskb->xmit_stamp = &req_cal->xmit_stamp;
+
+	ret = rtmac_xmit(rtskb);
+	if (ret < 0)
+		goto err_out;
+
+	return 0;
+
+err_out:
+	/*ERROR*/ rtdm_printk("TDMA: Failed to transmit request calibration "
+			      "frame!\n");
+	return ret;
+}
+
+int tdma_rt_packet_tx(struct rtskb *rtskb, struct rtnet_device *rtdev)
+{
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t context;
+	struct tdma_slot *slot;
+	int ret = 0;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+
+	rtcap_mark_rtmac_enqueue(rtskb);
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	slot = tdma->slot_table[(rtskb->priority & RTSKB_CHANNEL_MASK) >>
+				RTSKB_CHANNEL_SHIFT];
+
+	if (unlikely(!slot)) {
+		ret = -EAGAIN;
+		goto err_out;
+	}
+
+	if (unlikely(rtskb->len > slot->size)) {
+		ret = -EMSGSIZE;
+		goto err_out;
+	}
+
+	__rtskb_prio_queue_tail(slot->queue, rtskb);
+
+err_out:
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	return ret;
+}
+
+int tdma_nrt_packet_tx(struct rtskb *rtskb)
+{
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t context;
+	struct tdma_slot *slot;
+	int ret = 0;
+
+	tdma = (struct tdma_priv *)rtskb->rtdev->mac_priv->disc_priv;
+
+	rtcap_mark_rtmac_enqueue(rtskb);
+
+	rtskb->priority = RTSKB_PRIO_VALUE(QUEUE_MIN_PRIO, DEFAULT_NRT_SLOT);
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	slot = tdma->slot_table[DEFAULT_NRT_SLOT];
+
+	if (unlikely(!slot)) {
+		ret = -EAGAIN;
+		goto err_out;
+	}
+
+	if (unlikely(rtskb->len > slot->size)) {
+		ret = -EMSGSIZE;
+		goto err_out;
+	}
+
+	__rtskb_prio_queue_tail(slot->queue, rtskb);
+
+err_out:
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	return ret;
+}
+
+int tdma_packet_rx(struct rtskb *rtskb)
+{
+	struct tdma_priv *tdma;
+	struct tdma_frm_head *head;
+	u64 delay;
+	u64 cycle_start;
+	nanosecs_rel_t clock_offset;
+	struct rt_proc_call *call;
+	struct tdma_request_cal *req_cal_job;
+	rtdm_lockctx_t context;
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	struct rtskb *reply_rtskb;
+	struct rtnet_device *rtdev;
+	struct tdma_frm_rpl_cal *rpl_cal_frm;
+	struct tdma_reply_cal *rpl_cal_job;
+	struct tdma_job *job;
+#endif
+
+	tdma = (struct tdma_priv *)rtskb->rtdev->mac_priv->disc_priv;
+
+	head = (struct tdma_frm_head *)rtskb->data;
+
+	if (head->version != __constant_htons(TDMA_FRM_VERSION))
+		goto kfree_out;
+
+	switch (head->id) {
+	case __constant_htons(TDMA_FRM_SYNC):
+		rtskb_pull(rtskb, sizeof(struct tdma_frm_sync));
+
+		/* see "Time Arithmetics" in the TDMA specification */
+		clock_offset = be64_to_cpu(SYNC_FRM(head)->xmit_stamp) +
+			       tdma->master_packet_delay_ns;
+		clock_offset -= rtskb->time_stamp;
+
+		cycle_start = be64_to_cpu(SYNC_FRM(head)->sched_xmit_stamp) -
+			      clock_offset;
+
+		rtdm_lock_get_irqsave(&tdma->lock, context);
+		tdma->current_cycle = ntohl(SYNC_FRM(head)->cycle_no);
+		tdma->current_cycle_start = cycle_start;
+		tdma->clock_offset = clock_offset;
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		/* note: Ethernet-specific! */
+		memcpy(tdma->master_hw_addr, rtskb->mac.ethernet->h_source,
+		       ETH_ALEN);
+
+		set_bit(TDMA_FLAG_RECEIVED_SYNC, &tdma->flags);
+
+		rtdm_event_pulse(&tdma->sync_event);
+		break;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+	case __constant_htons(TDMA_FRM_REQ_CAL):
+		RTNET_ASSERT(test_bit(TDMA_FLAG_MASTER, &tdma->flags) &&
+				     test_bit(TDMA_FLAG_CALIBRATED,
+					      &tdma->flags),
+			     break;);
+
+		rtskb_pull(rtskb, sizeof(struct tdma_frm_req_cal));
+
+		rtdev = rtskb->rtdev;
+
+		reply_rtskb = alloc_rtskb(
+			rtdev->hard_header_len + sizeof(struct rtmac_hdr) +
+				sizeof(struct tdma_frm_rpl_cal) + 15,
+			&tdma->cal_rtskb_pool);
+		if (unlikely(!reply_rtskb)) {
+			/*ERROR*/ rtdm_printk(
+				"TDMA: Too many calibration requests "
+				"pending!\n");
+			break;
+		}
+
+		rtskb_reserve(reply_rtskb, (rtdev->hard_header_len +
+					    sizeof(struct rtmac_hdr) + 15) &
+						   ~15);
+
+		rpl_cal_frm = (struct tdma_frm_rpl_cal *)rtskb_put(
+			reply_rtskb, sizeof(struct tdma_frm_rpl_cal));
+
+		/* note: Ethernet-specific! */
+		if (unlikely(rtmac_add_header(
+				     rtdev, rtskb->mac.ethernet->h_source,
+				     reply_rtskb, RTMAC_TYPE_TDMA, 0) < 0)) {
+			kfree_rtskb(reply_rtskb);
+			break;
+		}
+
+		rpl_cal_frm->head.version = __constant_htons(TDMA_FRM_VERSION);
+		rpl_cal_frm->head.id = __constant_htons(TDMA_FRM_RPL_CAL);
+
+		rpl_cal_frm->request_xmit_stamp = REQ_CAL_FRM(head)->xmit_stamp;
+		rpl_cal_frm->reception_stamp = cpu_to_be64(rtskb->time_stamp);
+		rpl_cal_frm->xmit_stamp = 0;
+
+		reply_rtskb->xmit_stamp = &rpl_cal_frm->xmit_stamp;
+
+		/* use reply_rtskb memory behind the frame as job buffer */
+		rpl_cal_job = (struct tdma_reply_cal *)reply_rtskb->tail;
+		RTNET_ASSERT(reply_rtskb->tail +
+					     sizeof(struct tdma_reply_cal) <=
+				     reply_rtskb->buf_end,
+			     rtskb_over_panic(reply_rtskb,
+					      sizeof(struct tdma_reply_cal),
+					      current_text_addr()););
+
+		rpl_cal_job->head.id = XMIT_RPL_CAL;
+		rpl_cal_job->head.ref_count = 0;
+		rpl_cal_job->reply_cycle =
+			ntohl(REQ_CAL_FRM(head)->reply_cycle);
+		rpl_cal_job->reply_rtskb = reply_rtskb;
+		rpl_cal_job->reply_offset =
+			be64_to_cpu(REQ_CAL_FRM(head)->reply_slot_offset);
+
+		rtdm_lock_get_irqsave(&tdma->lock, context);
+
+		job = tdma->current_job;
+		while (1) {
+			job = list_entry(job->entry.prev, struct tdma_job,
+					 entry);
+			if ((job == tdma->first_job) ||
+			    ((job->id >= 0) && (SLOT_JOB(job)->offset <
+						rpl_cal_job->reply_offset)) ||
+			    ((job->id == XMIT_RPL_CAL) &&
+			     (REPLY_CAL_JOB(job)->reply_offset <
+			      rpl_cal_job->reply_offset)))
+				break;
+		}
+		list_add(&rpl_cal_job->head.entry, &job->entry);
+		tdma->job_list_revision++;
+
+		rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		break;
+#endif
+
+	case __constant_htons(TDMA_FRM_RPL_CAL):
+		rtskb_pull(rtskb, sizeof(struct tdma_frm_rpl_cal));
+
+		/* see "Time Arithmetics" in the TDMA specification */
+		delay = (rtskb->time_stamp -
+			 be64_to_cpu(RPL_CAL_FRM(head)->request_xmit_stamp)) -
+			(be64_to_cpu(RPL_CAL_FRM(head)->xmit_stamp) -
+			 be64_to_cpu(RPL_CAL_FRM(head)->reception_stamp));
+		delay = (delay + 1) >> 1;
+
+		rtdm_lock_get_irqsave(&tdma->lock, context);
+
+		call = tdma->calibration_call;
+		if (call == NULL) {
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+			break;
+		}
+		req_cal_job = rtpc_get_priv(call, struct tdma_request_cal);
+
+		req_cal_job->result_buffer[--req_cal_job->cal_rounds] = delay;
+
+		if (req_cal_job->cal_rounds > 0) {
+			tdma->job_list_revision++;
+			list_add(&req_cal_job->head.entry,
+				 &tdma->first_job->entry);
+
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+		} else {
+			tdma->calibration_call = NULL;
+
+			rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+			rtpc_complete_call(call, 0);
+		}
+
+		break;
+
+	default:
+		/*ERROR*/ rtdm_printk("TDMA: Unknown frame %d!\n",
+				      ntohs(head->id));
+	}
+
+kfree_out:
+	kfree_rtskb(rtskb);
+	return 0;
+}
+
+unsigned int tdma_get_mtu(struct rtnet_device *rtdev, unsigned int priority)
+{
+	struct tdma_priv *tdma;
+	rtdm_lockctx_t context;
+	struct tdma_slot *slot;
+	unsigned int mtu;
+
+	tdma = (struct tdma_priv *)rtdev->mac_priv->disc_priv;
+
+	rtdm_lock_get_irqsave(&tdma->lock, context);
+
+	slot = tdma->slot_table[(priority & RTSKB_CHANNEL_MASK) >>
+				RTSKB_CHANNEL_SHIFT];
+
+	if (unlikely(!slot)) {
+		mtu = rtdev->mtu;
+		goto out;
+	}
+
+	mtu = slot->mtu;
+
+out:
+	rtdm_lock_put_irqrestore(&tdma->lock, context);
+
+	return mtu;
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_worker.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_worker.c
new file mode 100644
index 0000000..d12a480
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtmac/tdma/tdma_worker.c
@@ -0,0 +1,231 @@
+/***
+ *
+ *  rtmac/tdma/tdma_worker.c
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <rtmac/rtmac_proto.h>
+#include <rtmac/tdma/tdma_proto.h>
+
+static void do_slot_job(struct tdma_priv *tdma, struct tdma_slot *job,
+			rtdm_lockctx_t lockctx)
+{
+	struct rtskb *rtskb;
+
+	if ((job->period != 1) &&
+	    (tdma->current_cycle % job->period != job->phasing))
+		return;
+
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	/* wait for slot begin, then send one pending packet */
+	rtdm_task_sleep_abs(tdma->current_cycle_start + SLOT_JOB(job)->offset,
+			    RTDM_TIMERMODE_REALTIME);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+	rtskb = __rtskb_prio_dequeue(SLOT_JOB(job)->queue);
+	if (!rtskb)
+		return;
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	rtmac_xmit(rtskb);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+}
+
+static void do_xmit_sync_job(struct tdma_priv *tdma, rtdm_lockctx_t lockctx)
+{
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	/* wait for beginning of next cycle, then send sync */
+	rtdm_task_sleep_abs(tdma->current_cycle_start + tdma->cycle_period,
+			    RTDM_TIMERMODE_REALTIME);
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+	tdma->current_cycle++;
+	tdma->current_cycle_start += tdma->cycle_period;
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	tdma_xmit_sync_frame(tdma);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+}
+
+static void do_backup_sync_job(struct tdma_priv *tdma, rtdm_lockctx_t lockctx)
+{
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	/* wait for backup slot */
+	rtdm_task_sleep_abs(tdma->current_cycle_start + tdma->backup_sync_inc,
+			    RTDM_TIMERMODE_REALTIME);
+
+	/* take over sync transmission if all earlier masters failed */
+	if (!test_and_clear_bit(TDMA_FLAG_RECEIVED_SYNC, &tdma->flags)) {
+		rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+		tdma->current_cycle++;
+		tdma->current_cycle_start += tdma->cycle_period;
+		rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+		tdma_xmit_sync_frame(tdma);
+
+		set_bit(TDMA_FLAG_BACKUP_ACTIVE, &tdma->flags);
+	} else
+		clear_bit(TDMA_FLAG_BACKUP_ACTIVE, &tdma->flags);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+}
+
+static struct tdma_job *do_request_cal_job(struct tdma_priv *tdma,
+					   struct tdma_request_cal *job,
+					   rtdm_lockctx_t lockctx)
+{
+	struct rt_proc_call *call;
+	struct tdma_job *prev_job;
+	int err;
+
+	if ((job->period != 1) &&
+	    (tdma->current_cycle % job->period != job->phasing))
+		return &job->head;
+
+	/* remove job until we get a reply */
+	__list_del(job->head.entry.prev, job->head.entry.next);
+	job->head.ref_count--;
+	prev_job = tdma->current_job =
+		list_entry(job->head.entry.prev, struct tdma_job, entry);
+	prev_job->ref_count++;
+	tdma->job_list_revision++;
+
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	rtdm_task_sleep_abs(tdma->current_cycle_start + job->offset,
+			    RTDM_TIMERMODE_REALTIME);
+	err = tdma_xmit_request_cal_frame(
+		tdma, tdma->current_cycle + job->period, job->offset);
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+
+	/* terminate call on error */
+	if (err < 0) {
+		call = tdma->calibration_call;
+		tdma->calibration_call = NULL;
+
+		if (call) {
+			rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+			rtpc_complete_call(call, err);
+			rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+		}
+	}
+
+	return prev_job;
+}
+
+static struct tdma_job *do_reply_cal_job(struct tdma_priv *tdma,
+					 struct tdma_reply_cal *job,
+					 rtdm_lockctx_t lockctx)
+{
+	struct tdma_job *prev_job;
+
+	if (job->reply_cycle > tdma->current_cycle)
+		return &job->head;
+
+	/* remove the job */
+	__list_del(job->head.entry.prev, job->head.entry.next);
+	job->head.ref_count--;
+	prev_job = tdma->current_job =
+		list_entry(job->head.entry.prev, struct tdma_job, entry);
+	prev_job->ref_count++;
+	tdma->job_list_revision++;
+
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+
+	if (job->reply_cycle == tdma->current_cycle) {
+		/* send reply in the assigned slot */
+		rtdm_task_sleep_abs(tdma->current_cycle_start +
+					    job->reply_offset,
+				    RTDM_TIMERMODE_REALTIME);
+		rtmac_xmit(job->reply_rtskb);
+	} else {
+		/* cleanup if cycle already passed */
+		kfree_rtskb(job->reply_rtskb);
+	}
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+
+	return prev_job;
+}
+
+void tdma_worker(void *arg)
+{
+	struct tdma_priv *tdma = arg;
+	struct tdma_job *job;
+	rtdm_lockctx_t lockctx;
+	int ret;
+
+	ret = rtdm_event_wait(&tdma->worker_wakeup);
+	if (ret)
+		return;
+
+	rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+
+	job = tdma->first_job;
+
+	while (!rtdm_task_should_stop()) {
+		job->ref_count++;
+		switch (job->id) {
+		case WAIT_ON_SYNC:
+			rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+			ret = rtdm_event_wait(&tdma->sync_event);
+			if (ret)
+				return;
+			rtdm_lock_get_irqsave(&tdma->lock, lockctx);
+			break;
+
+		case XMIT_REQ_CAL:
+			job = do_request_cal_job(tdma, REQUEST_CAL_JOB(job),
+						 lockctx);
+			break;
+
+#ifdef CONFIG_XENO_DRIVERS_NET_TDMA_MASTER
+		case XMIT_SYNC:
+			do_xmit_sync_job(tdma, lockctx);
+			break;
+
+		case BACKUP_SYNC:
+			do_backup_sync_job(tdma, lockctx);
+			break;
+
+		case XMIT_RPL_CAL:
+			job = do_reply_cal_job(tdma, REPLY_CAL_JOB(job),
+					       lockctx);
+			break;
+#endif /* CONFIG_XENO_DRIVERS_NET_TDMA_MASTER */
+
+		default:
+			do_slot_job(tdma, SLOT_JOB(job), lockctx);
+			break;
+		}
+		job->ref_count--;
+
+		job = tdma->current_job =
+			list_entry(job->entry.next, struct tdma_job, entry);
+	}
+
+	rtdm_lock_put_irqrestore(&tdma->lock, lockctx);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_chrdev.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_chrdev.c
new file mode 100644
index 0000000..fa859c3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_chrdev.c
@@ -0,0 +1,241 @@
+/***
+ *
+ *  stack/rtnet_chrdev.c - implements char device for management interface
+ *
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@fet.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/errno.h>
+#include <linux/if_arp.h>
+#include <linux/kmod.h>
+#include <linux/miscdevice.h>
+#include <linux/netdevice.h>
+#include <linux/spinlock.h>
+
+#include <rtnet_chrdev.h>
+#include <rtnet_internal.h>
+#include <ipv4/route.h>
+
+static DEFINE_SPINLOCK(ioctl_handler_lock);
+static LIST_HEAD(ioctl_handlers);
+
+static long rtnet_ioctl(struct file *file, unsigned int request,
+			unsigned long arg)
+{
+	struct rtnet_ioctl_head head;
+	struct rtnet_device *rtdev = NULL;
+	struct rtnet_ioctls *ioctls;
+	struct list_head *entry;
+	int ret;
+
+	if (!capable(CAP_SYS_ADMIN))
+		return -EPERM;
+
+	ret = copy_from_user(&head, (void *)arg, sizeof(head));
+	if (ret != 0)
+		return -EFAULT;
+
+	spin_lock(&ioctl_handler_lock);
+
+	list_for_each (entry, &ioctl_handlers) {
+		ioctls = list_entry(entry, struct rtnet_ioctls, entry);
+
+		if (ioctls->ioctl_type == _IOC_TYPE(request)) {
+			atomic_inc(&ioctls->ref_count);
+
+			spin_unlock(&ioctl_handler_lock);
+
+			if ((_IOC_NR(request) & RTNET_IOC_NODEV_PARAM) == 0) {
+				rtdev = rtdev_get_by_name(head.if_name);
+				if (!rtdev) {
+					atomic_dec(&ioctls->ref_count);
+					return -ENODEV;
+				}
+			}
+
+			ret = ioctls->handler(rtdev, request, arg);
+
+			if (rtdev)
+				rtdev_dereference(rtdev);
+			atomic_dec(&ioctls->ref_count);
+
+			return ret;
+		}
+	}
+
+	spin_unlock(&ioctl_handler_lock);
+
+	return -ENOTTY;
+}
+
+static int rtnet_core_ioctl(struct rtnet_device *rtdev, unsigned int request,
+			    unsigned long arg)
+{
+	struct rtnet_core_cmd cmd;
+	int ret;
+
+	ret = copy_from_user(&cmd, (void *)arg, sizeof(cmd));
+	if (ret != 0)
+		return -EFAULT;
+
+	switch (request) {
+	case IOC_RT_IFUP:
+		ret = rtdev_up(rtdev, &cmd);
+		break;
+
+	case IOC_RT_IFDOWN:
+		ret = rtdev_down(rtdev);
+		break;
+
+	case IOC_RT_IFINFO:
+		if (cmd.args.info.ifindex > 0)
+			rtdev = rtdev_get_by_index(cmd.args.info.ifindex);
+		else
+			rtdev = rtdev_get_by_name(cmd.head.if_name);
+		if (rtdev == NULL)
+			return -ENODEV;
+
+		if (mutex_lock_interruptible(&rtdev->nrt_lock)) {
+			rtdev_dereference(rtdev);
+			return -ERESTARTSYS;
+		}
+
+		memcpy(cmd.head.if_name, rtdev->name, IFNAMSIZ);
+		cmd.args.info.ifindex = rtdev->ifindex;
+		cmd.args.info.type = rtdev->type;
+		cmd.args.info.ip_addr = rtdev->local_ip;
+		cmd.args.info.broadcast_ip = rtdev->broadcast_ip;
+		cmd.args.info.mtu = rtdev->mtu;
+		cmd.args.info.flags = rtdev->flags;
+		if ((cmd.args.info.flags & IFF_UP) &&
+		    (rtdev->link_state &
+		     (RTNET_LINK_STATE_PRESENT | RTNET_LINK_STATE_NOCARRIER)) ==
+			    RTNET_LINK_STATE_PRESENT)
+			cmd.args.info.flags |= IFF_RUNNING;
+
+		memcpy(cmd.args.info.dev_addr, rtdev->dev_addr, MAX_ADDR_LEN);
+
+		mutex_unlock(&rtdev->nrt_lock);
+
+		rtdev_dereference(rtdev);
+
+		if (copy_to_user((void *)arg, &cmd, sizeof(cmd)) != 0)
+			return -EFAULT;
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+int rtnet_register_ioctls(struct rtnet_ioctls *ioctls)
+{
+	struct list_head *entry;
+	struct rtnet_ioctls *registered_ioctls;
+
+	RTNET_ASSERT(ioctls->handler != NULL, return -EINVAL;);
+
+	spin_lock(&ioctl_handler_lock);
+
+	list_for_each (entry, &ioctl_handlers) {
+		registered_ioctls =
+			list_entry(entry, struct rtnet_ioctls, entry);
+		if (registered_ioctls->ioctl_type == ioctls->ioctl_type) {
+			spin_unlock(&ioctl_handler_lock);
+			return -EEXIST;
+		}
+	}
+
+	list_add_tail(&ioctls->entry, &ioctl_handlers);
+	atomic_set(&ioctls->ref_count, 0);
+
+	spin_unlock(&ioctl_handler_lock);
+
+	return 0;
+}
+
+void rtnet_unregister_ioctls(struct rtnet_ioctls *ioctls)
+{
+	spin_lock(&ioctl_handler_lock);
+
+	while (atomic_read(&ioctls->ref_count) != 0) {
+		spin_unlock(&ioctl_handler_lock);
+
+		set_current_state(TASK_UNINTERRUPTIBLE);
+		schedule_timeout(1 * HZ); /* wait a second */
+
+		spin_lock(&ioctl_handler_lock);
+	}
+
+	list_del(&ioctls->entry);
+
+	spin_unlock(&ioctl_handler_lock);
+}
+
+static struct file_operations rtnet_fops = {
+	.owner = THIS_MODULE,
+	.unlocked_ioctl = rtnet_ioctl,
+	.compat_ioctl = rtnet_ioctl,
+};
+
+static struct miscdevice rtnet_chr_misc_dev = {
+	.minor = RTNET_MINOR,
+	.name = "rtnet",
+	.fops = &rtnet_fops,
+};
+
+static struct rtnet_ioctls core_ioctls = { .service_name = "RTnet Core",
+					   .ioctl_type = RTNET_IOC_TYPE_CORE,
+					   .handler = rtnet_core_ioctl };
+
+/**
+ * rtnet_chrdev_init -
+ *
+ */
+int __init rtnet_chrdev_init(void)
+{
+	int err;
+
+	err = misc_register(&rtnet_chr_misc_dev);
+	if (err) {
+		printk("RTnet: unable to register rtnet management device/class "
+		       "(error %d)\n",
+		       err);
+		return err;
+	}
+
+	rtnet_register_ioctls(&core_ioctls);
+	return 0;
+}
+
+/**
+ * rtnet_chrdev_release -
+ *
+ */
+void rtnet_chrdev_release(void)
+{
+	misc_deregister(&rtnet_chr_misc_dev);
+}
+
+EXPORT_SYMBOL_GPL(rtnet_register_ioctls);
+EXPORT_SYMBOL_GPL(rtnet_unregister_ioctls);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_module.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_module.c
new file mode 100644
index 0000000..16dc91f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_module.c
@@ -0,0 +1,411 @@
+/***
+ *
+ *  stack/rtnet_module.c - module framework, proc file system
+ *
+ *  Copyright (C) 2002      Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/init.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+
+#include <rtdev_mgr.h>
+#include <rtnet_chrdev.h>
+#include <rtnet_internal.h>
+#include <rtnet_socket.h>
+#include <rtnet_rtpc.h>
+#include <stack_mgr.h>
+#include <rtwlan.h>
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("RTnet stack core");
+
+struct class *rtnet_class;
+
+struct rtnet_mgr STACK_manager;
+struct rtnet_mgr RTDEV_manager;
+
+EXPORT_SYMBOL_GPL(STACK_manager);
+EXPORT_SYMBOL_GPL(RTDEV_manager);
+
+const char rtnet_rtdm_provider_name[] =
+	"(C) 1999-2008 RTnet Development Team, http://www.rtnet.org";
+
+EXPORT_SYMBOL_GPL(rtnet_rtdm_provider_name);
+
+void rtnet_corectl_register(void);
+void rtnet_corectl_unregister(void);
+
+#ifdef CONFIG_XENO_OPT_VFILE
+/***
+ *      proc filesystem section
+ */
+struct xnvfile_directory rtnet_proc_root;
+EXPORT_SYMBOL_GPL(rtnet_proc_root);
+
+static int rtnet_devices_nrt_lock_get(struct xnvfile *vfile)
+{
+	return mutex_lock_interruptible(&rtnet_devices_nrt_lock);
+}
+
+static void rtnet_devices_nrt_lock_put(struct xnvfile *vfile)
+{
+	mutex_unlock(&rtnet_devices_nrt_lock);
+}
+
+static struct xnvfile_lock_ops rtnet_devices_nrt_lock_ops = {
+	.get = rtnet_devices_nrt_lock_get,
+	.put = rtnet_devices_nrt_lock_put,
+};
+
+static void *rtnet_devices_begin(struct xnvfile_regular_iterator *it)
+{
+	if (it->pos == 0)
+		return VFILE_SEQ_START;
+
+	return (void *)2UL;
+}
+
+static void *rtnet_devices_next(struct xnvfile_regular_iterator *it)
+{
+	if (it->pos >= MAX_RT_DEVICES)
+		return NULL;
+
+	return (void *)2UL;
+}
+
+static int rtnet_devices_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct rtnet_device *rtdev;
+
+	if (data == NULL) {
+		xnvfile_printf(it, "Index\tName\t\tFlags\n");
+		return 0;
+	}
+
+	rtdev = __rtdev_get_by_index(it->pos);
+	if (rtdev == NULL)
+		return VFILE_SEQ_SKIP;
+
+	xnvfile_printf(it, "%d\t%-15s %s%s%s%s\n", rtdev->ifindex, rtdev->name,
+		       (rtdev->flags & IFF_UP) ? "UP" : "DOWN",
+		       (rtdev->flags & IFF_BROADCAST) ? " BROADCAST" : "",
+		       (rtdev->flags & IFF_LOOPBACK) ? " LOOPBACK" : "",
+		       (rtdev->flags & IFF_PROMISC) ? " PROMISC" : "");
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_devices_vfile_ops = {
+	.begin = rtnet_devices_begin,
+	.next = rtnet_devices_next,
+	.show = rtnet_devices_show,
+};
+
+static struct xnvfile_regular rtnet_devices_vfile = {
+	.entry = { .lockops = &rtnet_devices_nrt_lock_ops, },
+	.ops = &rtnet_devices_vfile_ops,
+};
+
+static int rtnet_rtskb_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	unsigned int rtskb_len;
+
+	rtskb_len = ALIGN_RTSKB_STRUCT_LEN + SKB_DATA_ALIGN(RTSKB_SIZE);
+
+	xnvfile_printf(it,
+		       "Statistics\t\tCurrent\tMaximum\n"
+		       "rtskb pools\t\t%d\t%d\n"
+		       "rtskbs\t\t\t%d\t%d\n"
+		       "rtskb memory need\t%d\t%d\n",
+		       rtskb_pools, rtskb_pools_max, rtskb_amount,
+		       rtskb_amount_max, rtskb_amount * rtskb_len,
+		       rtskb_amount_max * rtskb_len);
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_rtskb_vfile_ops = {
+	.show = rtnet_rtskb_show,
+};
+
+static struct xnvfile_regular rtnet_rtskb_vfile = {
+	.ops = &rtnet_rtskb_vfile_ops,
+};
+
+static int rtnet_version_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	const char verstr[] = "RTnet for Xenomai v" XENO_VERSION_STRING "\n"
+			      "RTcap:      "
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+			      "yes\n"
+#else
+			      "no\n"
+#endif
+			      "rtnetproxy: "
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_PROXY)
+			      "yes\n"
+#else
+			      "no\n"
+#endif
+			      "bug checks: "
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+			      "yes\n"
+#else
+			      "no\n"
+#endif
+		;
+
+	xnvfile_printf(it, "%s", verstr);
+
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_version_vfile_ops = {
+	.show = rtnet_version_show,
+};
+
+static struct xnvfile_regular rtnet_version_vfile = {
+	.ops = &rtnet_version_vfile_ops,
+};
+
+static void *rtnet_stats_begin(struct xnvfile_regular_iterator *it)
+{
+	return (void *)1UL;
+}
+
+static void *rtnet_stats_next(struct xnvfile_regular_iterator *it)
+{
+	if (it->pos >= MAX_RT_DEVICES)
+		return NULL;
+
+	return (void *)1UL;
+}
+
+static int rtnet_stats_show(struct xnvfile_regular_iterator *it, void *data)
+{
+	struct net_device_stats *stats;
+	struct rtnet_device *rtdev;
+
+	if (it->pos == 0) {
+		xnvfile_printf(it,
+			       "Inter-|   Receive                            "
+			       "                    |  Transmit\n");
+		xnvfile_printf(it,
+			       " face |bytes    packets errs drop fifo frame "
+			       "compressed multicast|bytes    packets errs "
+			       "drop fifo colls carrier compressed\n");
+		return 0;
+	}
+
+	rtdev = __rtdev_get_by_index(it->pos);
+	if (rtdev == NULL)
+		return VFILE_SEQ_SKIP;
+
+	if (rtdev->get_stats == NULL) {
+		xnvfile_printf(it, "%6s: No statistics available.\n",
+			       rtdev->name);
+		return 0;
+	}
+
+	stats = rtdev->get_stats(rtdev);
+	xnvfile_printf(
+		it,
+		"%6s:%8lu %7lu %4lu %4lu %4lu %5lu %10lu %9lu "
+		"%8lu %7lu %4lu %4lu %4lu %5lu %7lu %10lu\n",
+		rtdev->name, stats->rx_bytes, stats->rx_packets,
+		stats->rx_errors, stats->rx_dropped + stats->rx_missed_errors,
+		stats->rx_fifo_errors,
+		stats->rx_length_errors + stats->rx_over_errors +
+			stats->rx_crc_errors + stats->rx_frame_errors,
+		stats->rx_compressed, stats->multicast, stats->tx_bytes,
+		stats->tx_packets, stats->tx_errors, stats->tx_dropped,
+		stats->tx_fifo_errors, stats->collisions,
+		stats->tx_carrier_errors + stats->tx_aborted_errors +
+			stats->tx_window_errors + stats->tx_heartbeat_errors,
+		stats->tx_compressed);
+	return 0;
+}
+
+static struct xnvfile_regular_ops rtnet_stats_vfile_ops = {
+	.begin = rtnet_stats_begin,
+	.next = rtnet_stats_next,
+	.show = rtnet_stats_show,
+};
+
+static struct xnvfile_regular rtnet_stats_vfile = {
+	.entry = { .lockops = &rtnet_devices_nrt_lock_ops, },
+	.ops = &rtnet_stats_vfile_ops,
+};
+
+static int rtnet_proc_register(void)
+{
+	int err;
+
+	err = xnvfile_init_dir("rtnet", &rtnet_proc_root, NULL);
+	if (err < 0)
+		goto error1;
+
+	err = xnvfile_init_regular("devices", &rtnet_devices_vfile,
+				   &rtnet_proc_root);
+	if (err < 0)
+		goto error2;
+
+	err = xnvfile_init_regular("rtskb", &rtnet_rtskb_vfile,
+				   &rtnet_proc_root);
+	if (err < 0)
+		goto error3;
+
+	err = xnvfile_init_regular("version", &rtnet_version_vfile,
+				   &rtnet_proc_root);
+	if (err < 0)
+		goto error4;
+
+	err = xnvfile_init_regular("stats", &rtnet_stats_vfile,
+				   &rtnet_proc_root);
+	if (err < 0)
+		goto error5;
+
+	return 0;
+
+error5:
+	xnvfile_destroy_regular(&rtnet_version_vfile);
+
+error4:
+	xnvfile_destroy_regular(&rtnet_rtskb_vfile);
+
+error3:
+	xnvfile_destroy_regular(&rtnet_devices_vfile);
+
+error2:
+	xnvfile_destroy_dir(&rtnet_proc_root);
+
+error1:
+	printk("RTnet: unable to initialize /proc entries\n");
+	return err;
+}
+
+static void rtnet_proc_unregister(void)
+{
+	xnvfile_destroy_regular(&rtnet_stats_vfile);
+	xnvfile_destroy_regular(&rtnet_version_vfile);
+	xnvfile_destroy_regular(&rtnet_rtskb_vfile);
+	xnvfile_destroy_regular(&rtnet_devices_vfile);
+	xnvfile_destroy_dir(&rtnet_proc_root);
+}
+#endif /* CONFIG_XENO_OPT_VFILE */
+
+/**
+ *  rtnet_init()
+ */
+int __init rtnet_init(void)
+{
+	int err = 0;
+
+	if (!rtdm_available())
+		return -ENOSYS;
+
+	printk("\n*** RTnet for Xenomai v" XENO_VERSION_STRING " ***\n\n");
+	printk("RTnet: initialising real-time networking\n");
+
+	rtnet_class = class_create(THIS_MODULE, "rtnet");
+	if (IS_ERR(rtnet_class))
+		return PTR_ERR(rtnet_class);
+
+	if ((err = rtskb_pools_init()) != 0)
+		goto err_out1;
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	if ((err = rtnet_proc_register()) != 0)
+		goto err_out2;
+#endif
+
+	/* initialize the Stack-Manager */
+	if ((err = rt_stack_mgr_init(&STACK_manager)) != 0)
+		goto err_out3;
+
+	/* initialize the RTDEV-Manager */
+	if ((err = rt_rtdev_mgr_init(&RTDEV_manager)) != 0)
+		goto err_out4;
+
+	rtnet_chrdev_init();
+
+	if ((err = rtwlan_init()) != 0)
+		goto err_out5;
+
+	if ((err = rtpc_init()) != 0)
+		goto err_out6;
+
+	rtnet_corectl_register();
+
+	return 0;
+
+err_out6:
+	rtwlan_exit();
+
+err_out5:
+	rtnet_chrdev_release();
+	rt_rtdev_mgr_delete(&RTDEV_manager);
+
+err_out4:
+	rt_stack_mgr_delete(&STACK_manager);
+
+err_out3:
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtnet_proc_unregister();
+
+err_out2:
+#endif
+	rtskb_pools_release();
+
+err_out1:
+	class_destroy(rtnet_class);
+
+	return err;
+}
+
+/**
+ *  rtnet_release()
+ */
+void __exit rtnet_release(void)
+{
+	rtnet_corectl_unregister();
+
+	rtpc_cleanup();
+
+	rtwlan_exit();
+
+	rtnet_chrdev_release();
+
+	rt_stack_mgr_delete(&STACK_manager);
+	rt_rtdev_mgr_delete(&RTDEV_manager);
+
+	rtskb_pools_release();
+
+#ifdef CONFIG_XENO_OPT_VFILE
+	rtnet_proc_unregister();
+#endif
+
+	class_destroy(rtnet_class);
+
+	printk("RTnet: unloaded\n");
+}
+
+module_init(rtnet_init);
+module_exit(rtnet_release);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_rtpc.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_rtpc.c
new file mode 100644
index 0000000..cd5f054
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtnet_rtpc.c
@@ -0,0 +1,258 @@
+/***
+ *
+ *  stack/rtnet_rtpc.c
+ *
+ *  RTnet - real-time networking subsystem
+ *
+ *  Copyright (C) 2003-2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/kernel.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/slab.h>
+#include <linux/wait.h>
+
+#include <rtnet_rtpc.h>
+#include <rtdm/driver.h>
+
+static DEFINE_RTDM_LOCK(pending_calls_lock);
+static DEFINE_RTDM_LOCK(processed_calls_lock);
+static rtdm_event_t dispatch_event;
+static rtdm_task_t dispatch_task;
+static rtdm_nrtsig_t rtpc_nrt_signal;
+
+LIST_HEAD(pending_calls);
+LIST_HEAD(processed_calls);
+
+#ifndef __wait_event_interruptible_timeout
+#define __wait_event_interruptible_timeout(wq, condition, ret)                 \
+	do {                                                                   \
+		wait_queue_t __wait;                                           \
+		init_waitqueue_entry(&__wait, current);                        \
+                                                                               \
+		add_wait_queue(&wq, &__wait);                                  \
+		for (;;) {                                                     \
+			set_current_state(TASK_INTERRUPTIBLE);                 \
+			if (condition)                                         \
+				break;                                         \
+			if (!signal_pending(current)) {                        \
+				ret = schedule_timeout(ret);                   \
+				if (!ret)                                      \
+					break;                                 \
+				continue;                                      \
+			}                                                      \
+			ret = -ERESTARTSYS;                                    \
+			break;                                                 \
+		}                                                              \
+		current->state = TASK_RUNNING;                                 \
+		remove_wait_queue(&wq, &__wait);                               \
+	} while (0)
+#endif
+
+#ifndef wait_event_interruptible_timeout
+#define wait_event_interruptible_timeout(wq, condition, timeout)               \
+	({                                                                     \
+		long __ret = timeout;                                          \
+		if (!(condition))                                              \
+			__wait_event_interruptible_timeout(wq, condition,      \
+							   __ret);             \
+		__ret;                                                         \
+	})
+#endif
+
+int rtnet_rtpc_dispatch_call(rtpc_proc proc, unsigned int timeout,
+			     void *priv_data, size_t priv_data_size,
+			     rtpc_copy_back_proc copy_back_handler,
+			     rtpc_cleanup_proc cleanup_handler)
+{
+	struct rt_proc_call *call;
+	rtdm_lockctx_t context;
+	int ret;
+
+	call = kmalloc(sizeof(struct rt_proc_call) + priv_data_size,
+		       GFP_KERNEL);
+	if (call == NULL)
+		return -ENOMEM;
+
+	memcpy(call->priv_data, priv_data, priv_data_size);
+
+	call->processed = 0;
+	call->proc = proc;
+	call->result = 0;
+	call->cleanup_handler = cleanup_handler;
+	atomic_set(&call->ref_count, 2); /* dispatcher + rt-procedure */
+	init_waitqueue_head(&call->call_wq);
+
+	rtdm_lock_get_irqsave(&pending_calls_lock, context);
+	list_add_tail(&call->list_entry, &pending_calls);
+	rtdm_lock_put_irqrestore(&pending_calls_lock, context);
+
+	rtdm_event_signal(&dispatch_event);
+
+	if (timeout > 0) {
+		ret = wait_event_interruptible_timeout(
+			call->call_wq, call->processed, (timeout * HZ) / 1000);
+		if (ret == 0)
+			ret = -ETIME;
+	} else
+		ret = wait_event_interruptible(call->call_wq, call->processed);
+
+	if (ret >= 0) {
+		if (copy_back_handler != NULL)
+			copy_back_handler(call, priv_data);
+		ret = call->result;
+	}
+
+	if (atomic_dec_and_test(&call->ref_count)) {
+		if (call->cleanup_handler != NULL)
+			call->cleanup_handler(&call->priv_data);
+		kfree(call);
+	}
+
+	return ret;
+}
+
+static inline struct rt_proc_call *rtpc_dequeue_pending_call(void)
+{
+	rtdm_lockctx_t context;
+	struct rt_proc_call *call = NULL;
+
+	rtdm_lock_get_irqsave(&pending_calls_lock, context);
+	if (!list_empty(&pending_calls)) {
+		call = (struct rt_proc_call *)pending_calls.next;
+		list_del(&call->list_entry);
+	}
+	rtdm_lock_put_irqrestore(&pending_calls_lock, context);
+
+	return call;
+}
+
+static inline void rtpc_queue_processed_call(struct rt_proc_call *call)
+{
+	rtdm_lockctx_t context;
+	bool trigger;
+
+	rtdm_lock_get_irqsave(&processed_calls_lock, context);
+	trigger = list_empty(&processed_calls);
+	list_add_tail(&call->list_entry, &processed_calls);
+	rtdm_lock_put_irqrestore(&processed_calls_lock, context);
+
+	if (trigger)
+		rtdm_nrtsig_pend(&rtpc_nrt_signal);
+}
+
+static inline struct rt_proc_call *rtpc_dequeue_processed_call(void)
+{
+	rtdm_lockctx_t context;
+	struct rt_proc_call *call = NULL;
+
+	rtdm_lock_get_irqsave(&processed_calls_lock, context);
+	if (!list_empty(&processed_calls)) {
+		call = (struct rt_proc_call *)processed_calls.next;
+		list_del(&call->list_entry);
+	}
+	rtdm_lock_put_irqrestore(&processed_calls_lock, context);
+
+	return call;
+}
+
+static void rtpc_dispatch_handler(void *arg)
+{
+	struct rt_proc_call *call;
+	int ret;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(&dispatch_event) < 0)
+			break;
+
+		while ((call = rtpc_dequeue_pending_call())) {
+			ret = call->proc(call);
+			if (ret != -CALL_PENDING)
+				rtpc_complete_call(call, ret);
+		}
+	}
+}
+
+static void rtpc_signal_handler(rtdm_nrtsig_t *nrt_sig, void *arg)
+{
+	struct rt_proc_call *call;
+
+	while ((call = rtpc_dequeue_processed_call()) != NULL) {
+		call->processed = 1;
+		wake_up(&call->call_wq);
+
+		if (atomic_dec_and_test(&call->ref_count)) {
+			if (call->cleanup_handler != NULL)
+				call->cleanup_handler(&call->priv_data);
+			kfree(call);
+		}
+	}
+}
+
+void rtnet_rtpc_complete_call(struct rt_proc_call *call, int result)
+{
+	call->result = result;
+	rtpc_queue_processed_call(call);
+}
+
+void rtnet_rtpc_complete_call_nrt(struct rt_proc_call *call, int result)
+{
+	RTNET_ASSERT(!rtdm_in_rt_context(),
+		     rtnet_rtpc_complete_call(call, result);
+		     return;);
+
+	call->processed = 1;
+	wake_up(&call->call_wq);
+
+	if (atomic_dec_and_test(&call->ref_count)) {
+		if (call->cleanup_handler != NULL)
+			call->cleanup_handler(&call->priv_data);
+		kfree(call);
+	}
+}
+
+int __init rtpc_init(void)
+{
+	int ret;
+
+	rtdm_nrtsig_init(&rtpc_nrt_signal, rtpc_signal_handler, NULL);
+
+	rtdm_event_init(&dispatch_event, 0);
+
+	ret = rtdm_task_init(&dispatch_task, "rtnet-rtpc",
+			     rtpc_dispatch_handler, 0,
+			     RTDM_TASK_LOWEST_PRIORITY, 0);
+	if (ret < 0) {
+		rtdm_event_destroy(&dispatch_event);
+		rtdm_nrtsig_destroy(&rtpc_nrt_signal);
+	}
+
+	return ret;
+}
+
+void rtpc_cleanup(void)
+{
+	rtdm_event_destroy(&dispatch_event);
+	rtdm_task_destroy(&dispatch_task);
+	rtdm_nrtsig_destroy(&rtpc_nrt_signal);
+}
+
+EXPORT_SYMBOL_GPL(rtnet_rtpc_dispatch_call);
+EXPORT_SYMBOL_GPL(rtnet_rtpc_complete_call);
+EXPORT_SYMBOL_GPL(rtnet_rtpc_complete_call_nrt);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtskb.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtskb.c
new file mode 100644
index 0000000..e92cb3c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtskb.c
@@ -0,0 +1,535 @@
+/***
+ *
+ *  stack/rtskb.c - rtskb implementation for rtnet
+ *
+ *  Copyright (C) 2002      Ulrich Marx <marx@fet.uni-hannover.de>,
+ *  Copyright (C) 2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *  Copyright (C) 2006 Jorge Almeida <j-almeida@criticalsoftware.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of version 2 of the GNU General Public License as
+ *  published by the Free Software Foundation.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/slab.h>
+#include <rtnet_checksum.h>
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtskb.h>
+#include <rtnet_port.h>
+
+static unsigned int global_rtskbs = DEFAULT_GLOBAL_RTSKBS;
+module_param(global_rtskbs, uint, 0444);
+MODULE_PARM_DESC(global_rtskbs,
+		 "Number of realtime socket buffers in global pool");
+
+/* Linux slab pool for rtskbs */
+static struct kmem_cache *rtskb_slab_pool;
+
+/* pool of rtskbs for global use */
+struct rtskb_pool global_pool;
+EXPORT_SYMBOL_GPL(global_pool);
+
+/* pool statistics */
+unsigned int rtskb_pools = 0;
+unsigned int rtskb_pools_max = 0;
+unsigned int rtskb_amount = 0;
+unsigned int rtskb_amount_max = 0;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+/* RTcap interface */
+rtdm_lock_t rtcap_lock;
+EXPORT_SYMBOL_GPL(rtcap_lock);
+
+void (*rtcap_handler)(struct rtskb *skb) = NULL;
+EXPORT_SYMBOL_GPL(rtcap_handler);
+#endif
+
+/***
+ *  rtskb_copy_and_csum_bits
+ */
+unsigned int rtskb_copy_and_csum_bits(const struct rtskb *skb, int offset,
+				      u8 *to, int len, unsigned int csum)
+{
+	int copy;
+
+	/* Copy header. */
+	if ((copy = skb->len - offset) > 0) {
+		if (copy > len)
+			copy = len;
+		csum = rtnet_csum_copy(skb->data + offset, to, copy, csum);
+		if ((len -= copy) == 0)
+			return csum;
+		offset += copy;
+		to += copy;
+	}
+
+	RTNET_ASSERT(len == 0, );
+	return csum;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_copy_and_csum_bits);
+
+/***
+ *  rtskb_copy_and_csum_dev
+ */
+void rtskb_copy_and_csum_dev(const struct rtskb *skb, u8 *to)
+{
+	unsigned int csum;
+	unsigned int csstart;
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		csstart = skb->h.raw - skb->data;
+
+		if (csstart > skb->len)
+			BUG();
+	} else
+		csstart = skb->len;
+
+	memcpy(to, skb->data, csstart);
+
+	csum = 0;
+	if (csstart != skb->len)
+		csum = rtskb_copy_and_csum_bits(skb, csstart, to + csstart,
+						skb->len - csstart, 0);
+
+	if (skb->ip_summed == CHECKSUM_PARTIAL) {
+		unsigned int csstuff = csstart + skb->csum;
+
+		*((unsigned short *)(to + csstuff)) = csum_fold(csum);
+	}
+}
+
+EXPORT_SYMBOL_GPL(rtskb_copy_and_csum_dev);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+/**
+ *  skb_over_panic - private function
+ *  @skb: buffer
+ *  @sz: size
+ *  @here: address
+ *
+ *  Out of line support code for rtskb_put(). Not user callable.
+ */
+void rtskb_over_panic(struct rtskb *skb, int sz, void *here)
+{
+	rtdm_printk("RTnet: rtskb_put :over: %p:%d put:%d dev:%s\n", here,
+		    skb->len, sz, (skb->rtdev) ? skb->rtdev->name : "<NULL>");
+}
+
+EXPORT_SYMBOL_GPL(rtskb_over_panic);
+
+/**
+ *  skb_under_panic - private function
+ *  @skb: buffer
+ *  @sz: size
+ *  @here: address
+ *
+ *  Out of line support code for rtskb_push(). Not user callable.
+ */
+void rtskb_under_panic(struct rtskb *skb, int sz, void *here)
+{
+	rtdm_printk("RTnet: rtskb_push :under: %p:%d put:%d dev:%s\n", here,
+		    skb->len, sz, (skb->rtdev) ? skb->rtdev->name : "<NULL>");
+}
+
+EXPORT_SYMBOL_GPL(rtskb_under_panic);
+#endif /* CONFIG_XENO_DRIVERS_NET_CHECKED */
+
+static struct rtskb *__rtskb_pool_dequeue(struct rtskb_pool *pool)
+{
+	struct rtskb_queue *queue = &pool->queue;
+	struct rtskb *skb;
+
+	if (pool->lock_ops && !pool->lock_ops->trylock(pool->lock_cookie))
+		return NULL;
+	skb = __rtskb_dequeue(queue);
+	if (skb == NULL && pool->lock_ops)
+		pool->lock_ops->unlock(pool->lock_cookie);
+
+	return skb;
+}
+
+struct rtskb *rtskb_pool_dequeue(struct rtskb_pool *pool)
+{
+	struct rtskb_queue *queue = &pool->queue;
+	rtdm_lockctx_t context;
+	struct rtskb *skb;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	skb = __rtskb_pool_dequeue(pool);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+
+	return skb;
+}
+EXPORT_SYMBOL_GPL(rtskb_pool_dequeue);
+
+static void __rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb)
+{
+	struct rtskb_queue *queue = &pool->queue;
+
+	__rtskb_queue_tail(queue, skb);
+	if (pool->lock_ops)
+		pool->lock_ops->unlock(pool->lock_cookie);
+}
+
+void rtskb_pool_queue_tail(struct rtskb_pool *pool, struct rtskb *skb)
+{
+	struct rtskb_queue *queue = &pool->queue;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&queue->lock, context);
+	__rtskb_pool_queue_tail(pool, skb);
+	rtdm_lock_put_irqrestore(&queue->lock, context);
+}
+EXPORT_SYMBOL_GPL(rtskb_pool_queue_tail);
+
+/***
+ *  alloc_rtskb - allocate an rtskb from a pool
+ *  @size: required buffer size (to check against maximum boundary)
+ *  @pool: pool to take the rtskb from
+ */
+struct rtskb *alloc_rtskb(unsigned int size, struct rtskb_pool *pool)
+{
+	struct rtskb *skb;
+
+	RTNET_ASSERT(size <= SKB_DATA_ALIGN(RTSKB_SIZE), return NULL;);
+
+	skb = rtskb_pool_dequeue(pool);
+	if (!skb)
+		return NULL;
+
+	/* Load the data pointers. */
+	skb->data = skb->buf_start;
+	skb->tail = skb->buf_start;
+	skb->end = skb->buf_start + size;
+
+	/* Set up other states */
+	skb->chain_end = skb;
+	skb->len = 0;
+	skb->pkt_type = PACKET_HOST;
+	skb->xmit_stamp = NULL;
+	skb->ip_summed = CHECKSUM_NONE;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	skb->cap_flags = 0;
+#endif
+
+	return skb;
+}
+
+EXPORT_SYMBOL_GPL(alloc_rtskb);
+
+/***
+ *  kfree_rtskb
+ *  @skb    rtskb
+ */
+void kfree_rtskb(struct rtskb *skb)
+{
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	rtdm_lockctx_t context;
+	struct rtskb *comp_skb;
+	struct rtskb *next_skb;
+	struct rtskb *chain_end;
+#endif
+
+	RTNET_ASSERT(skb != NULL, return;);
+	RTNET_ASSERT(skb->pool != NULL, return;);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	next_skb = skb;
+	chain_end = skb->chain_end;
+
+	do {
+		skb = next_skb;
+		next_skb = skb->next;
+
+		rtdm_lock_get_irqsave(&rtcap_lock, context);
+
+		if (skb->cap_flags & RTSKB_CAP_SHARED) {
+			skb->cap_flags &= ~RTSKB_CAP_SHARED;
+
+			comp_skb = skb->cap_comp_skb;
+			skb->pool = xchg(&comp_skb->pool, skb->pool);
+
+			rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+			rtskb_pool_queue_tail(comp_skb->pool, comp_skb);
+		} else {
+			rtdm_lock_put_irqrestore(&rtcap_lock, context);
+
+			skb->chain_end = skb;
+			rtskb_pool_queue_tail(skb->pool, skb);
+		}
+
+	} while (chain_end != skb);
+
+#else /* CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
+
+	rtskb_pool_queue_tail(skb->pool, skb);
+
+#endif /* CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP */
+}
+
+EXPORT_SYMBOL_GPL(kfree_rtskb);
+
+/***
+ *  rtskb_pool_init
+ *  @pool: pool to be initialized
+ *  @initial_size: number of rtskbs to allocate
+ *  return: number of actually allocated rtskbs
+ */
+unsigned int rtskb_pool_init(struct rtskb_pool *pool, unsigned int initial_size,
+			     const struct rtskb_pool_lock_ops *lock_ops,
+			     void *lock_cookie)
+{
+	unsigned int i;
+
+	rtskb_queue_init(&pool->queue);
+
+	i = rtskb_pool_extend(pool, initial_size);
+
+	rtskb_pools++;
+	if (rtskb_pools > rtskb_pools_max)
+		rtskb_pools_max = rtskb_pools;
+
+	pool->lock_ops = lock_ops;
+	pool->lock_cookie = lock_cookie;
+
+	return i;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_pool_init);
+
+static int rtskb_module_pool_trylock(void *cookie)
+{
+	int err = 1;
+	if (cookie)
+		err = try_module_get(cookie);
+	return err;
+}
+
+static void rtskb_module_pool_unlock(void *cookie)
+{
+	if (cookie)
+		module_put(cookie);
+}
+
+static const struct rtskb_pool_lock_ops rtskb_module_lock_ops = {
+	.trylock = rtskb_module_pool_trylock,
+	.unlock = rtskb_module_pool_unlock,
+};
+
+unsigned int __rtskb_module_pool_init(struct rtskb_pool *pool,
+				      unsigned int initial_size,
+				      struct module *module)
+{
+	return rtskb_pool_init(pool, initial_size, &rtskb_module_lock_ops,
+			       module);
+}
+EXPORT_SYMBOL_GPL(__rtskb_module_pool_init);
+
+/***
+ *  __rtskb_pool_release
+ *  @pool: pool to release
+ */
+void rtskb_pool_release(struct rtskb_pool *pool)
+{
+	struct rtskb *skb;
+
+	while ((skb = rtskb_dequeue(&pool->queue)) != NULL) {
+		rtdev_unmap_rtskb(skb);
+		kmem_cache_free(rtskb_slab_pool, skb);
+		rtskb_amount--;
+	}
+
+	rtskb_pools--;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_pool_release);
+
+unsigned int rtskb_pool_extend(struct rtskb_pool *pool, unsigned int add_rtskbs)
+{
+	unsigned int i;
+	struct rtskb *skb;
+
+	RTNET_ASSERT(pool != NULL, return -EINVAL;);
+
+	for (i = 0; i < add_rtskbs; i++) {
+		/* get rtskb from slab pool */
+		if (!(skb = kmem_cache_alloc(rtskb_slab_pool, GFP_KERNEL))) {
+			printk(KERN_ERR
+			       "RTnet: rtskb allocation from slab pool failed\n");
+			break;
+		}
+
+		/* fill the header with zero */
+		memset(skb, 0, sizeof(struct rtskb));
+
+		skb->chain_end = skb;
+		skb->pool = pool;
+		skb->buf_start =
+			((unsigned char *)skb) + ALIGN_RTSKB_STRUCT_LEN;
+#ifdef CONFIG_XENO_DRIVERS_NET_CHECKED
+		skb->buf_end = skb->buf_start + SKB_DATA_ALIGN(RTSKB_SIZE) - 1;
+#endif
+
+		if (rtdev_map_rtskb(skb) < 0) {
+			kmem_cache_free(rtskb_slab_pool, skb);
+			break;
+		}
+
+		rtskb_queue_tail(&pool->queue, skb);
+
+		rtskb_amount++;
+		if (rtskb_amount > rtskb_amount_max)
+			rtskb_amount_max = rtskb_amount;
+	}
+
+	return i;
+}
+
+unsigned int rtskb_pool_shrink(struct rtskb_pool *pool, unsigned int rem_rtskbs)
+{
+	unsigned int i;
+	struct rtskb *skb;
+
+	for (i = 0; i < rem_rtskbs; i++) {
+		if ((skb = rtskb_dequeue(&pool->queue)) == NULL)
+			break;
+
+		rtdev_unmap_rtskb(skb);
+		kmem_cache_free(rtskb_slab_pool, skb);
+		rtskb_amount--;
+	}
+
+	return i;
+}
+
+/* Note: acquires only the first skb of a chain! */
+int rtskb_acquire(struct rtskb *rtskb, struct rtskb_pool *comp_pool)
+{
+	struct rtskb *comp_rtskb;
+	struct rtskb_pool *release_pool;
+	rtdm_lockctx_t context;
+
+	rtdm_lock_get_irqsave(&comp_pool->queue.lock, context);
+
+	comp_rtskb = __rtskb_pool_dequeue(comp_pool);
+	if (!comp_rtskb) {
+		rtdm_lock_put_irqrestore(&comp_pool->queue.lock, context);
+		return -ENOMEM;
+	}
+
+	rtdm_lock_put(&comp_pool->queue.lock);
+
+	comp_rtskb->chain_end = comp_rtskb;
+	comp_rtskb->pool = release_pool = rtskb->pool;
+
+	rtdm_lock_get(&release_pool->queue.lock);
+
+	__rtskb_pool_queue_tail(release_pool, comp_rtskb);
+
+	rtdm_lock_put_irqrestore(&release_pool->queue.lock, context);
+
+	rtskb->pool = comp_pool;
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_acquire);
+
+/* clone rtskb to another, allocating the new rtskb from pool */
+struct rtskb *rtskb_clone(struct rtskb *rtskb, struct rtskb_pool *pool)
+{
+	struct rtskb *clone_rtskb;
+	unsigned int total_len;
+
+	clone_rtskb = alloc_rtskb(rtskb->end - rtskb->buf_start, pool);
+	if (clone_rtskb == NULL)
+		return NULL;
+
+	/* Note: We don't clone
+	- rtskb.sk
+	- rtskb.xmit_stamp
+       until real use cases show up. */
+
+	clone_rtskb->priority = rtskb->priority;
+	clone_rtskb->rtdev = rtskb->rtdev;
+	clone_rtskb->time_stamp = rtskb->time_stamp;
+
+	clone_rtskb->mac.raw = clone_rtskb->buf_start;
+	clone_rtskb->nh.raw = clone_rtskb->buf_start;
+	clone_rtskb->h.raw = clone_rtskb->buf_start;
+
+	clone_rtskb->data += rtskb->data - rtskb->buf_start;
+	clone_rtskb->tail += rtskb->tail - rtskb->buf_start;
+	clone_rtskb->mac.raw += rtskb->mac.raw - rtskb->buf_start;
+	clone_rtskb->nh.raw += rtskb->nh.raw - rtskb->buf_start;
+	clone_rtskb->h.raw += rtskb->h.raw - rtskb->buf_start;
+
+	clone_rtskb->protocol = rtskb->protocol;
+	clone_rtskb->pkt_type = rtskb->pkt_type;
+
+	clone_rtskb->ip_summed = rtskb->ip_summed;
+	clone_rtskb->csum = rtskb->csum;
+
+	total_len = rtskb->len + rtskb->data - rtskb->mac.raw;
+	memcpy(clone_rtskb->mac.raw, rtskb->mac.raw, total_len);
+	clone_rtskb->len = rtskb->len;
+
+	return clone_rtskb;
+}
+
+EXPORT_SYMBOL_GPL(rtskb_clone);
+
+int rtskb_pools_init(void)
+{
+	rtskb_slab_pool = kmem_cache_create_usercopy("rtskb_slab_pool",
+						ALIGN_RTSKB_STRUCT_LEN +
+						    SKB_DATA_ALIGN(RTSKB_SIZE),
+						0, SLAB_HWCACHE_ALIGN,
+						0, SKB_DATA_ALIGN(RTSKB_SIZE), NULL);
+	if (rtskb_slab_pool == NULL)
+		return -ENOMEM;
+
+	/* reset the statistics (cache is accounted separately) */
+	rtskb_pools = 0;
+	rtskb_pools_max = 0;
+	rtskb_amount = 0;
+	rtskb_amount_max = 0;
+
+	/* create the global rtskb pool */
+	if (rtskb_module_pool_init(&global_pool, global_rtskbs) < global_rtskbs)
+		goto err_out;
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_ADDON_RTCAP)
+	rtdm_lock_init(&rtcap_lock);
+#endif
+
+	return 0;
+
+err_out:
+	rtskb_pool_release(&global_pool);
+	kmem_cache_destroy(rtskb_slab_pool);
+
+	return -ENOMEM;
+}
+
+void rtskb_pools_release(void)
+{
+	rtskb_pool_release(&global_pool);
+	kmem_cache_destroy(rtskb_slab_pool);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtwlan.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtwlan.c
new file mode 100644
index 0000000..26e9262
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/rtwlan.c
@@ -0,0 +1,219 @@
+/* rtwlan.c
+ *
+ * rtwlan protocol stack
+ * Copyright (c) 2006, Daniel Gregorek <dxg@gmx.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/module.h>
+
+#include <rtnet_port.h>
+
+#include <rtwlan.h>
+
+int rtwlan_rx(struct rtskb *rtskb, struct rtnet_device *rtnet_dev)
+{
+	struct ieee80211_hdr *hdr = (struct ieee80211_hdr *)rtskb->data;
+	u16 fc = le16_to_cpu(hdr->frame_ctl);
+
+	/* strip rtwlan header */
+	rtskb_pull(rtskb, ieee80211_get_hdrlen(fc));
+	rtskb->protocol = rt_eth_type_trans(rtskb, rtnet_dev);
+
+	/* forward rtskb to rtnet */
+	rtnetif_rx(rtskb);
+
+	return 0;
+}
+
+EXPORT_SYMBOL_GPL(rtwlan_rx);
+
+int rtwlan_tx(struct rtskb *rtskb, struct rtnet_device *rtnet_dev)
+{
+	struct rtwlan_device *rtwlan_dev = rtnetdev_priv(rtnet_dev);
+	struct ieee80211_hdr_3addr header = { /* Ensure zero initialized */
+					      .duration_id = 0,
+					      .seq_ctl = 0
+	};
+	int ret;
+	u8 dest[ETH_ALEN], src[ETH_ALEN];
+
+	/* Get source and destination addresses */
+
+	memcpy(src, rtskb->data + ETH_ALEN, ETH_ALEN);
+
+	if (rtwlan_dev->mode == RTWLAN_TXMODE_MCAST) {
+		memcpy(dest, rtnet_dev->dev_addr, ETH_ALEN);
+		dest[0] |= 0x01;
+	} else {
+		memcpy(dest, rtskb->data, ETH_ALEN);
+	}
+
+	/*
+     * Generate ieee80211 compatible header
+     */
+	memcpy(header.addr3, src, ETH_ALEN); /* BSSID */
+	memcpy(header.addr2, src, ETH_ALEN); /* SA */
+	memcpy(header.addr1, dest, ETH_ALEN); /* DA */
+
+	/* Write frame control field */
+	header.frame_ctl =
+		cpu_to_le16(IEEE80211_FTYPE_DATA | IEEE80211_STYPE_DATA);
+
+	memcpy(rtskb_push(rtskb, IEEE80211_3ADDR_LEN), &header,
+	       IEEE80211_3ADDR_LEN);
+
+	ret = (*rtwlan_dev->hard_start_xmit)(rtskb, rtnet_dev);
+
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(rtwlan_tx);
+
+/**
+ * rtalloc_wlandev - Allocates and sets up a wlan device
+ * @sizeof_priv: size of additional driver-private structure to
+ *               be allocated for this wlan device
+ *
+ * Fill in the fields of the device structure with wlan-generic
+ * values. Basically does everything except registering the device.
+ *
+ * A 32-byte alignment is enforced for the private data area.
+ */
+
+struct rtnet_device *rtwlan_alloc_dev(unsigned sizeof_priv,
+				      unsigned dev_pool_size)
+{
+	struct rtnet_device *rtnet_dev;
+
+	RTWLAN_DEBUG("Start.\n");
+
+	rtnet_dev = rt_alloc_etherdev(
+		sizeof(struct rtwlan_device) + sizeof_priv, dev_pool_size);
+	if (!rtnet_dev)
+		return NULL;
+
+	rtnet_dev->hard_start_xmit = rtwlan_tx;
+
+	rtdev_alloc_name(rtnet_dev, "rtwlan%d");
+
+	return rtnet_dev;
+}
+
+EXPORT_SYMBOL_GPL(rtwlan_alloc_dev);
+
+int rtwlan_ioctl(struct rtnet_device *rtdev, unsigned int request,
+		 unsigned long arg)
+{
+	struct rtwlan_cmd cmd;
+	struct ifreq ifr;
+	int ret = 0;
+
+	if (copy_from_user(&cmd, (void *)arg, sizeof(cmd)) != 0)
+		return -EFAULT;
+
+	/*
+     * FIXME: proper do_ioctl() should expect a __user pointer
+     * arg. This only works with the existing WLAN support because the
+     * only driver currently providing this feature is broken, not
+     * doing the copy_to/from_user dance.
+     */
+	memset(&ifr, 0, sizeof(ifr));
+	ifr.ifr_data = &cmd;
+
+	switch (request) {
+	case IOC_RTWLAN_IFINFO:
+		if (cmd.args.info.ifindex > 0)
+			rtdev = rtdev_get_by_index(cmd.args.info.ifindex);
+		else
+			rtdev = rtdev_get_by_name(cmd.head.if_name);
+		if (rtdev == NULL)
+			return -ENODEV;
+
+		if (mutex_lock_interruptible(&rtdev->nrt_lock)) {
+			rtdev_dereference(rtdev);
+			return -ERESTARTSYS;
+		}
+
+		if (rtdev->do_ioctl)
+			ret = rtdev->do_ioctl(rtdev, &ifr, request);
+		else
+			ret = -ENORTWLANDEV;
+
+		memcpy(cmd.head.if_name, rtdev->name, IFNAMSIZ);
+		cmd.args.info.ifindex = rtdev->ifindex;
+		cmd.args.info.flags = rtdev->flags;
+
+		mutex_unlock(&rtdev->nrt_lock);
+
+		rtdev_dereference(rtdev);
+
+		break;
+
+	case IOC_RTWLAN_TXMODE:
+	case IOC_RTWLAN_BITRATE:
+	case IOC_RTWLAN_CHANNEL:
+	case IOC_RTWLAN_RETRY:
+	case IOC_RTWLAN_TXPOWER:
+	case IOC_RTWLAN_AUTORESP:
+	case IOC_RTWLAN_DROPBCAST:
+	case IOC_RTWLAN_DROPMCAST:
+	case IOC_RTWLAN_REGREAD:
+	case IOC_RTWLAN_REGWRITE:
+	case IOC_RTWLAN_BBPWRITE:
+	case IOC_RTWLAN_BBPREAD:
+	case IOC_RTWLAN_BBPSENS:
+		if (mutex_lock_interruptible(&rtdev->nrt_lock))
+			return -ERESTARTSYS;
+
+		if (rtdev->do_ioctl)
+			ret = rtdev->do_ioctl(rtdev, &ifr, request);
+		else
+			ret = -ENORTWLANDEV;
+
+		mutex_unlock(&rtdev->nrt_lock);
+
+		break;
+
+	default:
+		ret = -ENOTTY;
+	}
+
+	if (copy_to_user((void *)arg, &cmd, sizeof(cmd)) != 0)
+		return -EFAULT;
+
+	return ret;
+}
+
+struct rtnet_ioctls rtnet_wlan_ioctls = {
+	service_name: "rtwlan ioctl",
+	ioctl_type: RTNET_IOC_TYPE_RTWLAN,
+	handler: rtwlan_ioctl
+};
+
+int __init rtwlan_init(void)
+{
+	if (rtnet_register_ioctls(&rtnet_wlan_ioctls))
+		rtdm_printk(KERN_ERR "Failed to register rtnet_wlan_ioctl!\n");
+
+	return 0;
+}
+
+void rtwlan_exit(void)
+{
+	rtnet_unregister_ioctls(&rtnet_wlan_ioctls);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/socket.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/socket.c
new file mode 100644
index 0000000..f030663
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/socket.c
@@ -0,0 +1,395 @@
+/***
+ *
+ *  stack/socket.c - sockets implementation for rtnet
+ *
+ *  Copyright (C) 1999       Lineo, Inc
+ *                1999, 2002 David A. Schleef <ds@schleef.org>
+ *                2002       Ulrich Marx <marx@kammer.uni-hannover.de>
+ *                2003-2005  Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+#include <linux/spinlock.h>
+#include <linux/socket.h>
+#include <linux/in.h>
+#include <linux/err.h>
+#include <linux/ip.h>
+#include <linux/tcp.h>
+#include <asm/bitops.h>
+
+#include <rtdm/net.h>
+#include <rtnet_internal.h>
+#include <rtnet_iovec.h>
+#include <rtnet_socket.h>
+#include <ipv4/protocol.h>
+
+#define SKB_POOL_CLOSED 0
+
+static unsigned int socket_rtskbs = DEFAULT_SOCKET_RTSKBS;
+module_param(socket_rtskbs, uint, 0444);
+MODULE_PARM_DESC(socket_rtskbs,
+		 "Default number of realtime socket buffers in socket pools");
+
+/************************************************************************
+ *  internal socket functions                                           *
+ ************************************************************************/
+
+int rt_bare_socket_init(struct rtdm_fd *fd, unsigned short protocol,
+			unsigned int priority, unsigned int pool_size)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	int err;
+
+	err = rtskb_pool_init(&sock->skb_pool, pool_size, NULL, fd);
+	if (err < 0)
+		return err;
+
+	sock->protocol = protocol;
+	sock->priority = priority;
+
+	return err;
+}
+EXPORT_SYMBOL_GPL(rt_bare_socket_init);
+
+/***
+ *  rt_socket_init - initialises a new socket structure
+ */
+int rt_socket_init(struct rtdm_fd *fd, unsigned short protocol)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	unsigned int pool_size;
+
+	sock->flags = 0;
+	sock->callback_func = NULL;
+
+	rtskb_queue_init(&sock->incoming);
+
+	sock->timeout = 0;
+
+	rtdm_lock_init(&sock->param_lock);
+	rtdm_sem_init(&sock->pending_sem, 0);
+
+	pool_size = rt_bare_socket_init(fd, protocol,
+					RTSKB_PRIO_VALUE(SOCK_DEF_PRIO,
+							 RTSKB_DEF_RT_CHANNEL),
+					socket_rtskbs);
+	sock->pool_size = pool_size;
+	mutex_init(&sock->pool_nrt_lock);
+
+	if (pool_size < socket_rtskbs) {
+		/* fix statistics */
+		if (pool_size == 0)
+			rtskb_pools--;
+
+		rt_socket_cleanup(fd);
+		return -ENOMEM;
+	}
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(rt_socket_init);
+
+/***
+ *  rt_socket_cleanup - releases resources allocated for the socket
+ */
+void rt_socket_cleanup(struct rtdm_fd *fd)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+
+	rtdm_sem_destroy(&sock->pending_sem);
+
+	mutex_lock(&sock->pool_nrt_lock);
+
+	set_bit(SKB_POOL_CLOSED, &sock->flags);
+
+	if (sock->pool_size > 0)
+		rtskb_pool_release(&sock->skb_pool);
+
+	mutex_unlock(&sock->pool_nrt_lock);
+}
+EXPORT_SYMBOL_GPL(rt_socket_cleanup);
+
+/***
+ *  rt_socket_common_ioctl
+ */
+int rt_socket_common_ioctl(struct rtdm_fd *fd, int request, void __user *arg)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+	int ret = 0;
+	struct rtnet_callback *callback;
+	const unsigned int *val;
+	unsigned int _val;
+	const nanosecs_rel_t *timeout;
+	nanosecs_rel_t _timeout;
+	rtdm_lockctx_t context;
+
+	switch (request) {
+	case RTNET_RTIOC_XMITPARAMS:
+		val = rtnet_get_arg(fd, &_val, arg, sizeof(_val));
+		if (IS_ERR(val))
+			return PTR_ERR(val);
+		sock->priority = *val;
+		break;
+
+	case RTNET_RTIOC_TIMEOUT:
+		timeout = rtnet_get_arg(fd, &_timeout, arg, sizeof(_timeout));
+		if (IS_ERR(timeout))
+			return PTR_ERR(timeout);
+		sock->timeout = *timeout;
+		break;
+
+	case RTNET_RTIOC_CALLBACK:
+		if (rtdm_fd_is_user(fd))
+			return -EACCES;
+
+		rtdm_lock_get_irqsave(&sock->param_lock, context);
+
+		callback = arg;
+		sock->callback_func = callback->func;
+		sock->callback_arg = callback->arg;
+
+		rtdm_lock_put_irqrestore(&sock->param_lock, context);
+		break;
+
+	case RTNET_RTIOC_EXTPOOL:
+		val = rtnet_get_arg(fd, &_val, arg, sizeof(_val));
+		if (IS_ERR(val))
+			return PTR_ERR(val);
+
+		if (rtdm_in_rt_context())
+			return -ENOSYS;
+
+		mutex_lock(&sock->pool_nrt_lock);
+
+		if (test_bit(SKB_POOL_CLOSED, &sock->flags)) {
+			mutex_unlock(&sock->pool_nrt_lock);
+			return -EBADF;
+		}
+		ret = rtskb_pool_extend(&sock->skb_pool, *val);
+		sock->pool_size += ret;
+
+		mutex_unlock(&sock->pool_nrt_lock);
+
+		if (ret == 0 && *val > 0)
+			ret = -ENOMEM;
+
+		break;
+
+	case RTNET_RTIOC_SHRPOOL:
+		val = rtnet_get_arg(fd, &_val, arg, sizeof(_val));
+		if (IS_ERR(val))
+			return PTR_ERR(val);
+
+		if (rtdm_in_rt_context())
+			return -ENOSYS;
+
+		mutex_lock(&sock->pool_nrt_lock);
+
+		ret = rtskb_pool_shrink(&sock->skb_pool, *val);
+		sock->pool_size -= ret;
+
+		mutex_unlock(&sock->pool_nrt_lock);
+
+		if (ret == 0 && *val > 0)
+			ret = -EBUSY;
+
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rt_socket_common_ioctl);
+
+/***
+ *  rt_socket_if_ioctl
+ */
+int rt_socket_if_ioctl(struct rtdm_fd *fd, int request, void __user *arg)
+{
+	struct rtnet_device *rtdev;
+	struct ifreq _ifr, *ifr, *u_ifr;
+	struct sockaddr_in _sin;
+	struct ifconf _ifc, *ifc, *u_ifc;
+	int ret = 0, size = 0, i;
+	short flags;
+
+	if (request == SIOCGIFCONF) {
+		u_ifc = arg;
+		ifc = rtnet_get_arg(fd, &_ifc, u_ifc, sizeof(_ifc));
+		if (IS_ERR(ifc))
+			return PTR_ERR(ifc);
+
+		for (u_ifr = ifc->ifc_req, i = 1; i <= MAX_RT_DEVICES;
+		     i++, u_ifr++) {
+			rtdev = rtdev_get_by_index(i);
+			if (rtdev == NULL)
+				continue;
+
+			if ((rtdev->flags & IFF_UP) == 0) {
+				rtdev_dereference(rtdev);
+				continue;
+			}
+
+			size += sizeof(struct ifreq);
+			if (size > ifc->ifc_len) {
+				rtdev_dereference(rtdev);
+				size = ifc->ifc_len;
+				break;
+			}
+
+			ret = rtnet_put_arg(fd, u_ifr->ifr_name, rtdev->name,
+					    IFNAMSIZ);
+			if (ret == 0) {
+				memset(&_sin, 0, sizeof(_sin));
+				_sin.sin_family = AF_INET;
+				_sin.sin_addr.s_addr = rtdev->local_ip;
+				ret = rtnet_put_arg(fd, &u_ifr->ifr_addr, &_sin,
+						    sizeof(_sin));
+			}
+
+			rtdev_dereference(rtdev);
+			if (ret)
+				return ret;
+		}
+
+		return rtnet_put_arg(fd, &u_ifc->ifc_len, &size, sizeof(size));
+	}
+
+	u_ifr = arg;
+	ifr = rtnet_get_arg(fd, &_ifr, u_ifr, sizeof(_ifr));
+	if (IS_ERR(ifr))
+		return PTR_ERR(ifr);
+
+	if (request == SIOCGIFNAME) {
+		rtdev = rtdev_get_by_index(ifr->ifr_ifindex);
+		if (rtdev == NULL)
+			return -ENODEV;
+		ret = rtnet_put_arg(fd, u_ifr->ifr_name, rtdev->name, IFNAMSIZ);
+		goto out;
+	}
+
+	rtdev = rtdev_get_by_name(ifr->ifr_name);
+	if (rtdev == NULL)
+		return -ENODEV;
+
+	switch (request) {
+	case SIOCGIFINDEX:
+		ret = rtnet_put_arg(fd, &u_ifr->ifr_ifindex, &rtdev->ifindex,
+				    sizeof(u_ifr->ifr_ifindex));
+		break;
+
+	case SIOCGIFFLAGS:
+		flags = rtdev->flags;
+		if ((ifr->ifr_flags & IFF_UP) &&
+		    (rtdev->link_state &
+		     (RTNET_LINK_STATE_PRESENT | RTNET_LINK_STATE_NOCARRIER)) ==
+			    RTNET_LINK_STATE_PRESENT)
+			flags |= IFF_RUNNING;
+		ret = rtnet_put_arg(fd, &u_ifr->ifr_flags, &flags,
+				    sizeof(u_ifr->ifr_flags));
+		break;
+
+	case SIOCGIFHWADDR:
+		ret = rtnet_put_arg(fd, &u_ifr->ifr_hwaddr.sa_data,
+				    rtdev->dev_addr, rtdev->addr_len);
+		if (!ret)
+			ret = rtnet_put_arg(
+				fd, &u_ifr->ifr_hwaddr.sa_family, &rtdev->type,
+				sizeof(u_ifr->ifr_hwaddr.sa_family));
+		break;
+
+	case SIOCGIFADDR:
+		memset(&_sin, 0, sizeof(_sin));
+		_sin.sin_family = AF_INET;
+		_sin.sin_addr.s_addr = rtdev->local_ip;
+		ret = rtnet_put_arg(fd, &u_ifr->ifr_addr, &_sin, sizeof(_sin));
+		break;
+
+	case SIOCETHTOOL:
+		if (rtdev->do_ioctl != NULL) {
+			if (rtdm_in_rt_context())
+				return -ENOSYS;
+			ret = rtdev->do_ioctl(rtdev, ifr, request);
+		} else
+			ret = -EOPNOTSUPP;
+		break;
+
+	case SIOCDEVPRIVATE ... SIOCDEVPRIVATE + 15:
+		if (rtdev->do_ioctl != NULL)
+			ret = rtdev->do_ioctl(rtdev, ifr, request);
+		else
+			ret = -EOPNOTSUPP;
+		break;
+
+	default:
+		ret = -EOPNOTSUPP;
+		break;
+	}
+
+out:
+	rtdev_dereference(rtdev);
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rt_socket_if_ioctl);
+
+int rt_socket_select_bind(struct rtdm_fd *fd, rtdm_selector_t *selector,
+			  enum rtdm_selecttype type, unsigned fd_index)
+{
+	struct rtsocket *sock = rtdm_fd_to_private(fd);
+
+	switch (type) {
+	case XNSELECT_READ:
+		return rtdm_sem_select(&sock->pending_sem, selector,
+				       XNSELECT_READ, fd_index);
+	default:
+		return -EBADF;
+	}
+
+	return -EINVAL;
+}
+EXPORT_SYMBOL_GPL(rt_socket_select_bind);
+
+void *rtnet_get_arg(struct rtdm_fd *fd, void *tmp, const void *src, size_t len)
+{
+	int ret;
+
+	if (!rtdm_fd_is_user(fd))
+		return (void *)src;
+
+	ret = rtdm_copy_from_user(fd, tmp, src, len);
+	if (ret)
+		return ERR_PTR(ret);
+
+	return tmp;
+}
+EXPORT_SYMBOL_GPL(rtnet_get_arg);
+
+int rtnet_put_arg(struct rtdm_fd *fd, void *dst, const void *src, size_t len)
+{
+	if (!rtdm_fd_is_user(fd)) {
+		if (dst != src)
+			memcpy(dst, src, len);
+		return 0;
+	}
+
+	return rtdm_copy_to_user(fd, dst, src, len);
+}
+EXPORT_SYMBOL_GPL(rtnet_put_arg);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/stack_mgr.c b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/stack_mgr.c
new file mode 100644
index 0000000..a7e2245
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/net/stack/stack_mgr.c
@@ -0,0 +1,256 @@
+/***
+ *
+ *  stack/stack_mgr.c - Stack-Manager
+ *
+ *  Copyright (C) 2002 Ulrich Marx <marx@kammer.uni-hannover.de>
+ *  Copyright (C) 2003-2006 Jan Kiszka <jan.kiszka@web.de>
+ *  Copyright (C) 2006 Jorge Almeida <j-almeida@criticalsoftware.com>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <linux/moduleparam.h>
+
+#include <rtdev.h>
+#include <rtnet_internal.h>
+#include <rtskb_fifo.h>
+#include <stack_mgr.h>
+
+static unsigned int stack_mgr_prio = RTNET_DEF_STACK_PRIORITY;
+module_param(stack_mgr_prio, uint, 0444);
+MODULE_PARM_DESC(stack_mgr_prio, "Priority of the stack manager task");
+
+#if (CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE &                                    \
+     (CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE - 1)) != 0
+#error CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE must be power of 2!
+#endif
+static DECLARE_RTSKB_FIFO(rx, CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE);
+
+struct list_head rt_packets[RTPACKET_HASH_TBL_SIZE];
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+struct list_head rt_packets_all;
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+DEFINE_RTDM_LOCK(rt_packets_lock);
+
+/***
+ *  rtdev_add_pack:         add protocol (Layer 3)
+ *  @pt:                    the new protocol
+ */
+int __rtdev_add_pack(struct rtpacket_type *pt, struct module *module)
+{
+	int ret = 0;
+	rtdm_lockctx_t context;
+
+	INIT_LIST_HEAD(&pt->list_entry);
+	pt->refcount = 0;
+	if (pt->trylock == NULL)
+		pt->trylock = rtdev_lock_pack;
+	if (pt->unlock == NULL)
+		pt->unlock = rtdev_unlock_pack;
+	pt->owner = module;
+
+	rtdm_lock_get_irqsave(&rt_packets_lock, context);
+
+	if (pt->type == htons(ETH_P_ALL))
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+		list_add_tail(&pt->list_entry, &rt_packets_all);
+#else /* !CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+		ret = -EINVAL;
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+	else
+		list_add_tail(
+			&pt->list_entry,
+			&rt_packets[ntohs(pt->type) & RTPACKET_HASH_KEY_MASK]);
+
+	rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+
+	return ret;
+}
+
+EXPORT_SYMBOL_GPL(__rtdev_add_pack);
+
+/***
+ *  rtdev_remove_pack:  remove protocol (Layer 3)
+ *  @pt:                protocol
+ */
+void rtdev_remove_pack(struct rtpacket_type *pt)
+{
+	rtdm_lockctx_t context;
+
+	RTNET_ASSERT(pt != NULL, return;);
+
+	rtdm_lock_get_irqsave(&rt_packets_lock, context);
+	list_del(&pt->list_entry);
+	rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+}
+
+EXPORT_SYMBOL_GPL(rtdev_remove_pack);
+
+/***
+ *  rtnetif_rx: will be called from the driver interrupt handler
+ *  (IRQs disabled!) and send a message to rtdev-owned stack-manager
+ *
+ *  @skb - the packet
+ */
+void rtnetif_rx(struct rtskb *skb)
+{
+	RTNET_ASSERT(skb != NULL, return;);
+	RTNET_ASSERT(skb->rtdev != NULL, return;);
+
+	if (unlikely(rtskb_fifo_insert_inirq(&rx.fifo, skb) < 0)) {
+		rtdm_printk("RTnet: dropping packet in %s()\n", __FUNCTION__);
+		kfree_rtskb(skb);
+	}
+}
+
+EXPORT_SYMBOL_GPL(rtnetif_rx);
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK)
+#define __DELIVER_PREFIX
+#else /* !CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK */
+#define __DELIVER_PREFIX static inline
+#endif /* CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK */
+
+__DELIVER_PREFIX void rt_stack_deliver(struct rtskb *rtskb)
+{
+	unsigned short hash;
+	struct rtpacket_type *pt_entry;
+	rtdm_lockctx_t context;
+	struct rtnet_device *rtdev = rtskb->rtdev;
+	int err;
+	int eth_p_all_hit = 0;
+
+	rtcap_report_incoming(rtskb);
+
+	rtskb->nh.raw = rtskb->data;
+
+	rtdm_lock_get_irqsave(&rt_packets_lock, context);
+
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+	eth_p_all_hit = 0;
+	list_for_each_entry (pt_entry, &rt_packets_all, list_entry) {
+		if (!pt_entry->trylock(pt_entry))
+			continue;
+		rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+
+		pt_entry->handler(rtskb, pt_entry);
+
+		rtdm_lock_get_irqsave(&rt_packets_lock, context);
+		pt_entry->unlock(pt_entry);
+		eth_p_all_hit = 1;
+	}
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+
+	hash = ntohs(rtskb->protocol) & RTPACKET_HASH_KEY_MASK;
+
+	list_for_each_entry (pt_entry, &rt_packets[hash], list_entry)
+		if (pt_entry->type == rtskb->protocol) {
+			if (!pt_entry->trylock(pt_entry))
+				continue;
+			rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+
+			err = pt_entry->handler(rtskb, pt_entry);
+
+			rtdm_lock_get_irqsave(&rt_packets_lock, context);
+			pt_entry->unlock(pt_entry);
+
+			if (likely(!err)) {
+				rtdm_lock_put_irqrestore(&rt_packets_lock,
+							 context);
+				return;
+			}
+		}
+
+	rtdm_lock_put_irqrestore(&rt_packets_lock, context);
+
+	/* Don't warn if ETH_P_ALL listener were present or when running in
+       promiscuous mode (RTcap). */
+	if (unlikely(!eth_p_all_hit && !(rtdev->flags & IFF_PROMISC)))
+		rtdm_printk("RTnet: no one cared for packet with layer 3 "
+			    "protocol type 0x%04x\n",
+			    ntohs(rtskb->protocol));
+
+	kfree_rtskb(rtskb);
+}
+
+#if IS_ENABLED(CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK)
+EXPORT_SYMBOL_GPL(rt_stack_deliver);
+#endif /* CONFIG_XENO_DRIVERS_NET_DRV_LOOPBACK */
+
+static void rt_stack_mgr_task(void *arg)
+{
+	rtdm_event_t *mgr_event = &((struct rtnet_mgr *)arg)->event;
+	struct rtskb *rtskb;
+
+	while (!rtdm_task_should_stop()) {
+		if (rtdm_event_wait(mgr_event) < 0)
+			break;
+
+		/* we are the only reader => no locking required */
+		while ((rtskb = __rtskb_fifo_remove(&rx.fifo)))
+			rt_stack_deliver(rtskb);
+	}
+}
+
+/***
+ *  rt_stack_connect
+ */
+void rt_stack_connect(struct rtnet_device *rtdev, struct rtnet_mgr *mgr)
+{
+	rtdev->stack_event = &mgr->event;
+}
+
+EXPORT_SYMBOL_GPL(rt_stack_connect);
+
+/***
+ *  rt_stack_disconnect
+ */
+void rt_stack_disconnect(struct rtnet_device *rtdev)
+{
+	rtdev->stack_event = NULL;
+}
+
+EXPORT_SYMBOL_GPL(rt_stack_disconnect);
+
+/***
+ *  rt_stack_mgr_init
+ */
+int rt_stack_mgr_init(struct rtnet_mgr *mgr)
+{
+	int i;
+
+	rtskb_fifo_init(&rx.fifo, CONFIG_XENO_DRIVERS_NET_RX_FIFO_SIZE);
+
+	for (i = 0; i < RTPACKET_HASH_TBL_SIZE; i++)
+		INIT_LIST_HEAD(&rt_packets[i]);
+#ifdef CONFIG_XENO_DRIVERS_NET_ETH_P_ALL
+	INIT_LIST_HEAD(&rt_packets_all);
+#endif /* CONFIG_XENO_DRIVERS_NET_ETH_P_ALL */
+
+	rtdm_event_init(&mgr->event, 0);
+
+	return rtdm_task_init(&mgr->task, "rtnet-stack", rt_stack_mgr_task, mgr,
+			      stack_mgr_prio, 0);
+}
+
+/***
+ *  rt_stack_mgr_delete
+ */
+void rt_stack_mgr_delete(struct rtnet_mgr *mgr)
+{
+	rtdm_event_destroy(&mgr->event);
+	rtdm_task_destroy(&mgr->task);
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A.c b/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A.c
new file mode 100644
index 0000000..c9274fb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A.c
@@ -0,0 +1,1188 @@
+/*
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/ioport.h>
+#include <linux/slab.h>
+#include <asm/io.h>
+
+#include <rtdm/serial.h>
+#include <rtdm/driver.h>
+
+MODULE_DESCRIPTION("RTDM-based driver for 16550A UARTs");
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_VERSION("1.5.2");
+MODULE_LICENSE("GPL");
+
+#define RT_16550_DRIVER_NAME	"xeno_16550A"
+
+#define MAX_DEVICES		8
+
+#define IN_BUFFER_SIZE		4096
+#define OUT_BUFFER_SIZE		4096
+
+#define DEFAULT_BAUD_BASE	115200
+#define DEFAULT_TX_FIFO		16
+
+#define PARITY_MASK		0x03
+#define DATA_BITS_MASK		0x03
+#define STOP_BITS_MASK		0x01
+#define FIFO_MASK		0xC0
+#define EVENT_MASK		0x0F
+
+#define LCR_DLAB		0x80
+
+#define FCR_FIFO		0x01
+#define FCR_RESET_RX		0x02
+#define FCR_RESET_TX		0x04
+
+#define IER_RX			0x01
+#define IER_TX			0x02
+#define IER_STAT		0x04
+#define IER_MODEM		0x08
+
+#define IIR_MODEM		0x00
+#define IIR_PIRQ		0x01
+#define IIR_TX			0x02
+#define IIR_RX			0x04
+#define IIR_STAT		0x06
+#define IIR_MASK		0x07
+
+#define RHR			0	/* Receive Holding Buffer */
+#define THR			0	/* Transmit Holding Buffer */
+#define DLL			0	/* Divisor Latch LSB */
+#define IER			1	/* Interrupt Enable Register */
+#define DLM			1	/* Divisor Latch MSB */
+#define IIR			2	/* Interrupt Id Register */
+#define FCR			2	/* Fifo Control Register */
+#define LCR			3	/* Line Control Register */
+#define MCR			4	/* Modem Control Register */
+#define LSR			5	/* Line Status Register */
+#define MSR			6	/* Modem Status Register */
+
+struct rt_16550_context {
+	struct rtser_config config;	/* current device configuration */
+
+	rtdm_irq_t irq_handle;		/* device IRQ handle */
+	rtdm_lock_t lock;		/* lock to protect context struct */
+
+	unsigned long base_addr;	/* hardware IO base address */
+#ifdef CONFIG_XENO_DRIVERS_16550A_ANY
+	int io_mode;			/* hardware IO-access mode */
+#endif
+	int tx_fifo;			/* cached global tx_fifo[<device>] */
+
+	int in_head;			/* RX ring buffer, head pointer */
+	int in_tail;			/* RX ring buffer, tail pointer */
+	size_t in_npend;		/* pending bytes in RX ring */
+	int in_nwait;			/* bytes the user waits for */
+	rtdm_event_t in_event;		/* raised to unblock reader */
+	char in_buf[IN_BUFFER_SIZE];	/* RX ring buffer */
+	volatile unsigned long in_lock;	/* single-reader lock */
+	uint64_t *in_history;		/* RX timestamp buffer */
+
+	int out_head;			/* TX ring buffer, head pointer */
+	int out_tail;			/* TX ring buffer, tail pointer */
+	size_t out_npend;		/* pending bytes in TX ring */
+	rtdm_event_t out_event;		/* raised to unblock writer */
+	char out_buf[OUT_BUFFER_SIZE];	/* TX ring buffer */
+	rtdm_mutex_t out_lock;		/* single-writer mutex */
+
+	uint64_t last_timestamp;	/* timestamp of last event */
+	int ioc_events;			/* recorded events */
+	rtdm_event_t ioc_event;		/* raised to unblock event waiter */
+	volatile unsigned long ioc_event_lock;	/* single-waiter lock */
+
+	int ier_status;			/* IER cache */
+	int mcr_status;			/* MCR cache */
+	int status;			/* cache for LSR + soft-states */
+	int saved_errors;		/* error cache for RTIOC_GET_STATUS */
+};
+
+static const struct rtser_config default_config = {
+	0xFFFF, RTSER_DEF_BAUD, RTSER_DEF_PARITY, RTSER_DEF_BITS,
+	RTSER_DEF_STOPB, RTSER_DEF_HAND, RTSER_DEF_FIFO_DEPTH, 0,
+	RTSER_DEF_TIMEOUT, RTSER_DEF_TIMEOUT, RTSER_DEF_TIMEOUT,
+	RTSER_DEF_TIMESTAMP_HISTORY, RTSER_DEF_EVENT_MASK, RTSER_DEF_RS485
+};
+
+static struct rtdm_device *device[MAX_DEVICES];
+
+static unsigned int irq[MAX_DEVICES];
+static unsigned long irqtype[MAX_DEVICES] = {
+	[0 ... MAX_DEVICES-1] = RTDM_IRQTYPE_SHARED | RTDM_IRQTYPE_EDGE
+};
+static unsigned int baud_base[MAX_DEVICES];
+static int tx_fifo[MAX_DEVICES];
+
+module_param_array(irq, uint, NULL, 0400);
+module_param_array(baud_base, uint, NULL, 0400);
+module_param_array(tx_fifo, int, NULL, 0400);
+
+MODULE_PARM_DESC(irq, "IRQ numbers of the serial devices");
+MODULE_PARM_DESC(baud_base, "Maximum baud rate of the serial device "
+		 "(internal clock rate / 16)");
+MODULE_PARM_DESC(tx_fifo, "Transmitter FIFO size");
+
+#include "16550A_io.h"
+#include "16550A_pnp.h"
+#include "16550A_pci.h"
+
+static inline int rt_16550_rx_interrupt(struct rt_16550_context *ctx,
+					uint64_t * timestamp)
+{
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+	int rbytes = 0;
+	int lsr = 0;
+	int c;
+
+	do {
+		c = rt_16550_reg_in(mode, base, RHR);	/* read input char */
+
+		ctx->in_buf[ctx->in_tail] = c;
+		if (ctx->in_history)
+			ctx->in_history[ctx->in_tail] = *timestamp;
+		ctx->in_tail = (ctx->in_tail + 1) & (IN_BUFFER_SIZE - 1);
+
+		if (++ctx->in_npend > IN_BUFFER_SIZE) {
+			lsr |= RTSER_SOFT_OVERRUN_ERR;
+			ctx->in_npend--;
+		}
+
+		rbytes++;
+		lsr &= ~RTSER_LSR_DATA;
+		lsr |= (rt_16550_reg_in(mode, base, LSR) &
+			(RTSER_LSR_DATA | RTSER_LSR_OVERRUN_ERR |
+			 RTSER_LSR_PARITY_ERR | RTSER_LSR_FRAMING_ERR |
+			 RTSER_LSR_BREAK_IND));
+	} while (lsr & RTSER_LSR_DATA);
+
+	/* save new errors */
+	ctx->status |= lsr;
+
+	/* If we are enforcing the RTSCTS control flow and the input
+	   buffer is busy above the specified high watermark, clear
+	   RTS. */
+/*	if (uart->i_count >= uart->config.rts_hiwm &&
+	    (uart->config.handshake & RT_UART_RTSCTS) != 0 &&
+	    (uart->modem & MCR_RTS) != 0) {
+		uart->modem &= ~MCR_RTS;
+		rt_16550_reg_out(mode, base, MCR, uart->modem);
+	}*/
+
+	return rbytes;
+}
+
+static void rt_16550_tx_fill(struct rt_16550_context *ctx)
+{
+	int c;
+	int count;
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+
+/*	if (uart->modem & MSR_CTS)*/
+	{
+		for (count = ctx->tx_fifo;
+		     (count > 0) && (ctx->out_npend > 0);
+		     count--, ctx->out_npend--) {
+			c = ctx->out_buf[ctx->out_head++];
+			rt_16550_reg_out(mode, base, THR, c);
+			ctx->out_head &= (OUT_BUFFER_SIZE - 1);
+		}
+	}
+}
+
+static inline void rt_16550_stat_interrupt(struct rt_16550_context *ctx)
+{
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+
+	ctx->status |= (rt_16550_reg_in(mode, base, LSR) &
+			(RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			 RTSER_LSR_FRAMING_ERR | RTSER_LSR_BREAK_IND));
+}
+
+static int rt_16550_interrupt(rtdm_irq_t * irq_context)
+{
+	struct rt_16550_context *ctx;
+	unsigned long base;
+	int mode;
+	int iir;
+	uint64_t timestamp = rtdm_clock_read();
+	int rbytes = 0;
+	int events = 0;
+	int modem;
+	int ret = RTDM_IRQ_NONE;
+
+	ctx = rtdm_irq_get_arg(irq_context, struct rt_16550_context);
+	base = ctx->base_addr;
+	mode = rt_16550_io_mode_from_ctx(ctx);
+
+	rtdm_lock_get(&ctx->lock);
+
+	while (1) {
+		iir = rt_16550_reg_in(mode, base, IIR) & IIR_MASK;
+		if (iir & IIR_PIRQ)
+			break;
+
+		if (iir == IIR_RX) {
+			rbytes += rt_16550_rx_interrupt(ctx, &timestamp);
+			events |= RTSER_EVENT_RXPEND;
+		} else if (iir == IIR_STAT)
+			rt_16550_stat_interrupt(ctx);
+		else if (iir == IIR_TX)
+			rt_16550_tx_fill(ctx);
+		else if (iir == IIR_MODEM) {
+			modem = rt_16550_reg_in(mode, base, MSR);
+			if (modem & (modem << 4))
+				events |= RTSER_EVENT_MODEMHI;
+			if ((modem ^ 0xF0) & (modem << 4))
+				events |= RTSER_EVENT_MODEMLO;
+		}
+
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (ctx->in_nwait > 0) {
+		if ((ctx->in_nwait <= rbytes) || ctx->status) {
+			ctx->in_nwait = 0;
+			rtdm_event_signal(&ctx->in_event);
+		} else
+			ctx->in_nwait -= rbytes;
+	}
+
+	if (ctx->status) {
+		events |= RTSER_EVENT_ERRPEND;
+		ctx->ier_status &= ~IER_STAT;
+	}
+
+	if (events & ctx->config.event_mask) {
+		int old_events = ctx->ioc_events;
+
+		ctx->last_timestamp = timestamp;
+		ctx->ioc_events = events;
+
+		if (!old_events)
+			rtdm_event_signal(&ctx->ioc_event);
+	}
+
+	if ((ctx->ier_status & IER_TX) && (ctx->out_npend == 0)) {
+		/* mask transmitter empty interrupt */
+		ctx->ier_status &= ~IER_TX;
+
+		rtdm_event_signal(&ctx->out_event);
+	}
+
+	/* update interrupt mask */
+	rt_16550_reg_out(mode, base, IER, ctx->ier_status);
+
+	rtdm_lock_put(&ctx->lock);
+
+	return ret;
+}
+
+static int rt_16550_set_config(struct rt_16550_context *ctx,
+			       const struct rtser_config *config,
+			       uint64_t **in_history_ptr)
+{
+	rtdm_lockctx_t lock_ctx;
+	unsigned long base = ctx->base_addr;
+	int mode = rt_16550_io_mode_from_ctx(ctx);
+	int err = 0;
+
+	/* make line configuration atomic and IRQ-safe */
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	if (config->config_mask & RTSER_SET_BAUD) {
+		int dev_id = rtdm_fd_minor(rtdm_private_to_fd(ctx));
+		int baud_div;
+
+		ctx->config.baud_rate = config->baud_rate;
+		baud_div = (baud_base[dev_id] + (ctx->config.baud_rate>>1)) /
+			ctx->config.baud_rate;
+		rt_16550_reg_out(mode, base, LCR, LCR_DLAB);
+		rt_16550_reg_out(mode, base, DLL, baud_div & 0xff);
+		rt_16550_reg_out(mode, base, DLM, baud_div >> 8);
+	}
+
+	if (config->config_mask & RTSER_SET_PARITY)
+		ctx->config.parity = config->parity & PARITY_MASK;
+	if (config->config_mask & RTSER_SET_DATA_BITS)
+		ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
+	if (config->config_mask & RTSER_SET_STOP_BITS)
+		ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
+
+	if (config->config_mask & (RTSER_SET_PARITY |
+				   RTSER_SET_DATA_BITS |
+				   RTSER_SET_STOP_BITS |
+				   RTSER_SET_BAUD)) {
+		rt_16550_reg_out(mode, base, LCR,
+				 (ctx->config.parity << 3) |
+				 (ctx->config.stop_bits << 2) |
+				 ctx->config.data_bits);
+		ctx->status = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+	}
+
+	if (config->config_mask & RTSER_SET_FIFO_DEPTH) {
+		ctx->config.fifo_depth = config->fifo_depth & FIFO_MASK;
+		rt_16550_reg_out(mode, base, FCR,
+				 FCR_FIFO | FCR_RESET_RX | FCR_RESET_TX);
+		rt_16550_reg_out(mode, base, FCR,
+				 FCR_FIFO | ctx->config.fifo_depth);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	/* Timeout manipulation is not atomic. The user is supposed to take
+	   care not to use and change timeouts at the same time. */
+	if (config->config_mask & RTSER_SET_TIMEOUT_RX)
+		ctx->config.rx_timeout = config->rx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_TX)
+		ctx->config.tx_timeout = config->tx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
+		ctx->config.event_timeout = config->event_timeout;
+
+	if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+		/* change timestamp history atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
+			if (!ctx->in_history) {
+				ctx->in_history = *in_history_ptr;
+				*in_history_ptr = NULL;
+				if (!ctx->in_history)
+					err = -ENOMEM;
+			}
+		} else {
+			*in_history_ptr = ctx->in_history;
+			ctx->in_history = NULL;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	if (config->config_mask & RTSER_SET_EVENT_MASK) {
+		/* change event mask atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		ctx->config.event_mask = config->event_mask & EVENT_MASK;
+		ctx->ioc_events = 0;
+
+		if ((config->event_mask & RTSER_EVENT_RXPEND) &&
+		    (ctx->in_npend > 0))
+			ctx->ioc_events |= RTSER_EVENT_RXPEND;
+
+		if ((config->event_mask & RTSER_EVENT_ERRPEND)
+		    && ctx->status)
+			ctx->ioc_events |= RTSER_EVENT_ERRPEND;
+
+		if (config->event_mask & (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+			/* enable modem status interrupt */
+			ctx->ier_status |= IER_MODEM;
+		else
+			/* disable modem status interrupt */
+			ctx->ier_status &= ~IER_MODEM;
+		rt_16550_reg_out(mode, base, IER, ctx->ier_status);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	if (config->config_mask & RTSER_SET_HANDSHAKE) {
+		/* change handshake atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		ctx->config.handshake = config->handshake;
+
+		switch (ctx->config.handshake) {
+		case RTSER_RTSCTS_HAND:
+			// ...?
+
+		default:	/* RTSER_NO_HAND */
+			ctx->mcr_status =
+			    RTSER_MCR_DTR | RTSER_MCR_RTS | RTSER_MCR_OUT2;
+			break;
+		}
+		rt_16550_reg_out(mode, base, MCR, ctx->mcr_status);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	return err;
+}
+
+void rt_16550_cleanup_ctx(struct rt_16550_context *ctx)
+{
+	rtdm_event_destroy(&ctx->in_event);
+	rtdm_event_destroy(&ctx->out_event);
+	rtdm_event_destroy(&ctx->ioc_event);
+	rtdm_mutex_destroy(&ctx->out_lock);
+}
+
+int rt_16550_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_16550_context *ctx;
+	int dev_id = rtdm_fd_minor(fd);
+	int err;
+	uint64_t *dummy;
+	rtdm_lockctx_t lock_ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	/* IPC initialisation - cannot fail with used parameters */
+	rtdm_lock_init(&ctx->lock);
+	rtdm_event_init(&ctx->in_event, 0);
+	rtdm_event_init(&ctx->out_event, 0);
+	rtdm_event_init(&ctx->ioc_event, 0);
+	rtdm_mutex_init(&ctx->out_lock);
+
+	rt_16550_init_io_ctx(dev_id, ctx);
+
+	ctx->tx_fifo = tx_fifo[dev_id];
+
+	ctx->in_head = 0;
+	ctx->in_tail = 0;
+	ctx->in_npend = 0;
+	ctx->in_nwait = 0;
+	ctx->in_lock = 0;
+	ctx->in_history = NULL;
+
+	ctx->out_head = 0;
+	ctx->out_tail = 0;
+	ctx->out_npend = 0;
+
+	ctx->ioc_events = 0;
+	ctx->ioc_event_lock = 0;
+	ctx->status = 0;
+	ctx->saved_errors = 0;
+
+	rt_16550_set_config(ctx, &default_config, &dummy);
+
+	err = rtdm_irq_request(&ctx->irq_handle, irq[dev_id],
+			rt_16550_interrupt, irqtype[dev_id],
+			rtdm_fd_device(fd)->name, ctx);
+	if (err) {
+		/* reset DTR and RTS */
+		rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx), ctx->base_addr,
+				 MCR, 0);
+
+		rt_16550_cleanup_ctx(ctx);
+
+		return err;
+	}
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/* enable interrupts */
+	ctx->ier_status = IER_RX;
+	rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx), ctx->base_addr, IER,
+			 IER_RX);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	return 0;
+}
+
+void rt_16550_close(struct rtdm_fd *fd)
+{
+	struct rt_16550_context *ctx;
+	unsigned long base;
+	int mode;
+	uint64_t *in_history;
+	rtdm_lockctx_t lock_ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+	base = ctx->base_addr;
+	mode = rt_16550_io_mode_from_ctx(ctx);
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/* reset DTR and RTS */
+	rt_16550_reg_out(mode, base, MCR, 0);
+
+	/* mask all UART interrupts and clear pending ones. */
+	rt_16550_reg_out(mode, base, IER, 0);
+	rt_16550_reg_in(mode, base, IIR);
+	rt_16550_reg_in(mode, base, LSR);
+	rt_16550_reg_in(mode, base, RHR);
+	rt_16550_reg_in(mode, base, MSR);
+
+	in_history = ctx->in_history;
+	ctx->in_history = NULL;
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rtdm_irq_free(&ctx->irq_handle);
+
+	rt_16550_cleanup_ctx(ctx);
+
+	kfree(in_history);
+}
+
+int rt_16550_ioctl(struct rtdm_fd *fd, unsigned int request, void *arg)
+{
+	rtdm_lockctx_t lock_ctx;
+	struct rt_16550_context *ctx;
+	int err = 0;
+	unsigned long base;
+	int mode;
+
+	ctx = rtdm_fd_to_private(fd);
+	base = ctx->base_addr;
+	mode = rt_16550_io_mode_from_ctx(ctx);
+
+	switch (request) {
+	case RTSER_RTIOC_GET_CONFIG:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->config,
+						   sizeof(struct
+							  rtser_config));
+		else
+			memcpy(arg, &ctx->config,
+			       sizeof(struct rtser_config));
+		break;
+
+	case RTSER_RTIOC_SET_CONFIG: {
+		struct rtser_config *config;
+		struct rtser_config config_buf;
+		uint64_t *hist_buf = NULL;
+
+		config = (struct rtser_config *)arg;
+
+		if (rtdm_fd_is_user(fd)) {
+			err =
+			    rtdm_safe_copy_from_user(fd, &config_buf,
+						     arg,
+						     sizeof(struct
+							    rtser_config));
+			if (err)
+				return err;
+
+			config = &config_buf;
+		}
+
+		if ((config->config_mask & RTSER_SET_BAUD) &&
+		    (config->baud_rate >
+			    baud_base[rtdm_fd_minor(fd)] ||
+			    config->baud_rate <= 0))
+			/* invalid baudrate for this port */
+			return -EINVAL;
+
+		if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+			/*
+			 * Reflect the call to non-RT as we will likely
+			 * allocate or free the buffer.
+			 */
+			if (rtdm_in_rt_context())
+				return -ENOSYS;
+
+			if (config->timestamp_history &
+			    RTSER_RX_TIMESTAMP_HISTORY)
+				hist_buf = kmalloc(IN_BUFFER_SIZE *
+						   sizeof(nanosecs_abs_t),
+						   GFP_KERNEL);
+		}
+
+		rt_16550_set_config(ctx, config, &hist_buf);
+
+		if (hist_buf)
+			kfree(hist_buf);
+
+		break;
+	}
+
+	case RTSER_RTIOC_GET_STATUS: {
+		int status;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		status = ctx->saved_errors | ctx->status;
+		ctx->status = 0;
+		ctx->saved_errors = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rtser_status status_buf;
+
+			status_buf.line_status =
+			    rt_16550_reg_in(mode, base, LSR) | status;
+			status_buf.modem_status =
+			    rt_16550_reg_in(mode, base, MSR);
+
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &status_buf,
+						   sizeof(struct
+							  rtser_status));
+		} else {
+			((struct rtser_status *)arg)->line_status =
+			    rt_16550_reg_in(mode, base, LSR) | status;
+			((struct rtser_status *)arg)->modem_status =
+			    rt_16550_reg_in(mode, base, MSR);
+		}
+		break;
+	}
+
+	case RTSER_RTIOC_GET_CONTROL:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->mcr_status,
+						   sizeof(int));
+		else
+			*(int *)arg = ctx->mcr_status;
+
+		break;
+
+	case RTSER_RTIOC_SET_CONTROL: {
+		int new_mcr = (long)arg;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		ctx->mcr_status = new_mcr;
+		rt_16550_reg_out(mode, base, MCR, new_mcr);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTSER_RTIOC_WAIT_EVENT: {
+		struct rtser_event ev = { .rxpend_timestamp = 0 };
+		rtdm_toseq_t timeout_seq;
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		/* Only one waiter allowed, stop any further attempts here. */
+		if (test_and_set_bit(0, &ctx->ioc_event_lock))
+			return -EBUSY;
+
+		rtdm_toseq_init(&timeout_seq, ctx->config.event_timeout);
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		while (!ctx->ioc_events) {
+			/* Only enable error interrupt
+			   when the user waits for it. */
+			if (ctx->config.event_mask & RTSER_EVENT_ERRPEND) {
+				ctx->ier_status |= IER_STAT;
+				rt_16550_reg_out(mode, base, IER,
+						 ctx->ier_status);
+			}
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			err = rtdm_event_timedwait(&ctx->ioc_event,
+						   ctx->config.event_timeout,
+						   &timeout_seq);
+			if (err) {
+				/* Device has been closed? */
+				if (err == -EIDRM)
+					err = -EBADF;
+				goto wait_unlock_out;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		}
+
+		ev.events = ctx->ioc_events;
+		ctx->ioc_events &=
+		    ~(RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO);
+
+		ev.last_timestamp = ctx->last_timestamp;
+		ev.rx_pending = ctx->in_npend;
+
+		if (ctx->in_history)
+			ev.rxpend_timestamp = ctx->in_history[ctx->in_head];
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg, &ev,
+						   sizeof(struct
+							  rtser_event));
+			else
+				memcpy(arg, &ev, sizeof(struct rtser_event));
+
+	      wait_unlock_out:
+		/* release the simple event waiter lock */
+		clear_bit(0, &ctx->ioc_event_lock);
+		break;
+	}
+
+	case RTSER_RTIOC_BREAK_CTL: {
+		int lcr = ((long)arg & RTSER_BREAK_SET) << 6;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		lcr |=
+		    (ctx->config.parity << 3) | (ctx->config.stop_bits << 2) |
+		    ctx->config.data_bits;
+
+		rt_16550_reg_out(mode, base, LCR, lcr);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTIOC_PURGE: {
+		int fcr = 0;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTDM_PURGE_RX_BUFFER) {
+			ctx->in_head = 0;
+			ctx->in_tail = 0;
+			ctx->in_npend = 0;
+			ctx->status = 0;
+			fcr |= FCR_FIFO | FCR_RESET_RX;
+			rt_16550_reg_in(mode, base, RHR);
+		}
+		if ((long)arg & RTDM_PURGE_TX_BUFFER) {
+			ctx->out_head = 0;
+			ctx->out_tail = 0;
+			ctx->out_npend = 0;
+			fcr |= FCR_FIFO | FCR_RESET_TX;
+		}
+		if (fcr) {
+			rt_16550_reg_out(mode, base, FCR, fcr);
+			rt_16550_reg_out(mode, base, FCR,
+					 FCR_FIFO | ctx->config.fifo_depth);
+		}
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+ssize_t rt_16550_read(struct rtdm_fd *fd, void *buf, size_t nbyte)
+{
+	struct rt_16550_context *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t read = 0;
+	int pending;
+	int block;
+	int subblock;
+	int in_pos;
+	char *out_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret = -EAGAIN;	/* for non-blocking read */
+	int nonblocking;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_rw_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* non-blocking is handled separately here */
+	nonblocking = (ctx->config.rx_timeout < 0);
+
+	/* only one reader allowed, stop any further attempts here */
+	if (test_and_set_bit(0, &ctx->in_lock))
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	while (1) {
+		/* switch on error interrupt - the user is ready to listen */
+		if ((ctx->ier_status & IER_STAT) == 0) {
+			ctx->ier_status |= IER_STAT;
+			rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx),
+					 ctx->base_addr, IER,
+					 ctx->ier_status);
+		}
+
+		if (ctx->status) {
+			if (ctx->status & RTSER_LSR_BREAK_IND)
+				ret = -EPIPE;
+			else
+				ret = -EIO;
+			ctx->saved_errors = ctx->status &
+			    (RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			     RTSER_LSR_FRAMING_ERR | RTSER_SOFT_OVERRUN_ERR);
+			ctx->status = 0;
+			break;
+		}
+
+		pending = ctx->in_npend;
+
+		if (pending > 0) {
+			block = subblock = (pending <= nbyte) ? pending : nbyte;
+			in_pos = ctx->in_head;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (in_pos + subblock > IN_BUFFER_SIZE) {
+				/* Treat the block between head and buffer end
+				   separately. */
+				subblock = IN_BUFFER_SIZE - in_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_to_user
+					    (fd, out_pos,
+					     &ctx->in_buf[in_pos],
+					     subblock) != 0) {
+						ret = -EFAULT;
+						goto break_unlocked;
+					}
+				} else
+					memcpy(out_pos, &ctx->in_buf[in_pos],
+					       subblock);
+
+				read += subblock;
+				out_pos += subblock;
+
+				subblock = block - subblock;
+				in_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_to_user(fd, out_pos,
+						      &ctx->in_buf[in_pos],
+						      subblock) != 0) {
+					ret = -EFAULT;
+					goto break_unlocked;
+				}
+			} else
+				memcpy(out_pos, &ctx->in_buf[in_pos], subblock);
+
+			read += subblock;
+			out_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->in_head =
+			    (ctx->in_head + block) & (IN_BUFFER_SIZE - 1);
+			if ((ctx->in_npend -= block) == 0)
+				ctx->ioc_events &= ~RTSER_EVENT_RXPEND;
+
+			if (nbyte == 0)
+				break; /* All requested bytes read. */
+
+			continue;
+		}
+
+		if (nonblocking)
+			/* ret was set to EAGAIN in case of a real
+			   non-blocking call or contains the error
+			   returned by rtdm_event_wait[_until] */
+			break;
+
+		ctx->in_nwait = nbyte;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->in_event,
+					   ctx->config.rx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			nonblocking = 1;
+			if (ctx->in_npend > 0) {
+				/* Final turn: collect pending bytes
+				   before exit. */
+				continue;
+			}
+
+			ctx->in_nwait = 0;
+			break;
+		}
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+break_unlocked:
+	/* Release the simple reader lock, */
+	clear_bit(0, &ctx->in_lock);
+
+	if ((read > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			   (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = read;
+
+	return ret;
+}
+
+ssize_t rt_16550_write(struct rtdm_fd *fd, const void *buf, size_t nbyte)
+{
+	struct rt_16550_context *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t written = 0;
+	int free;
+	int block;
+	int subblock;
+	int out_pos;
+	int lsr;
+	char *in_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_read_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.tx_timeout);
+
+	/* Make write operation atomic. */
+	ret = rtdm_mutex_timedlock(&ctx->out_lock, ctx->config.tx_timeout,
+				   &timeout_seq);
+	if (ret)
+		return ret;
+
+	while (nbyte > 0) {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		free = OUT_BUFFER_SIZE - ctx->out_npend;
+
+		if (free > 0) {
+			block = subblock = (nbyte <= free) ? nbyte : free;
+			out_pos = ctx->out_tail;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (out_pos + subblock > OUT_BUFFER_SIZE) {
+				/* Treat the block between head and buffer
+				   end separately. */
+				subblock = OUT_BUFFER_SIZE - out_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_from_user
+					    (fd,
+					     &ctx->out_buf[out_pos],
+					     in_pos, subblock) != 0) {
+						ret = -EFAULT;
+						break;
+					}
+				} else
+					memcpy(&ctx->out_buf[out_pos], in_pos,
+					       subblock);
+
+				written += subblock;
+				in_pos += subblock;
+
+				subblock = block - subblock;
+				out_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_from_user
+				    (fd, &ctx->out_buf[out_pos],
+				     in_pos, subblock) != 0) {
+					ret = -EFAULT;
+					break;
+				}
+			} else
+				memcpy(&ctx->out_buf[out_pos], in_pos, block);
+
+			written += subblock;
+			in_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->out_tail =
+			    (ctx->out_tail + block) & (OUT_BUFFER_SIZE - 1);
+			ctx->out_npend += block;
+
+			lsr = rt_16550_reg_in(rt_16550_io_mode_from_ctx(ctx),
+					      ctx->base_addr, LSR);
+			if (lsr & RTSER_LSR_THR_EMTPY)
+				rt_16550_tx_fill(ctx);
+
+			if (ctx->out_npend > 0 && !(ctx->ier_status & IER_TX)) {
+				/* unmask tx interrupt */
+				ctx->ier_status |= IER_TX;
+				rt_16550_reg_out(rt_16550_io_mode_from_ctx(ctx),
+						 ctx->base_addr, IER,
+						 ctx->ier_status);
+			}
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+			continue;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret =
+		    rtdm_event_timedwait(&ctx->out_event,
+					 ctx->config.tx_timeout,
+					 &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+			if (ret == -EWOULDBLOCK) {
+				/* Fix error code for non-blocking mode. */
+				ret = -EAGAIN;
+			}
+			break;
+		}
+	}
+
+	rtdm_mutex_unlock(&ctx->out_lock);
+
+	if ((written > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			      (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = written;
+
+	return ret;
+}
+
+static struct rtdm_driver uart16550A_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(uart16550A,
+						    RTDM_CLASS_SERIAL,
+						    RTDM_SUBCLASS_16550A,
+						    RTSER_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= MAX_DEVICES,
+	.context_size		= sizeof(struct rt_16550_context),
+	.ops = {
+		.open		= rt_16550_open,
+		.close		= rt_16550_close,
+		.ioctl_rt	= rt_16550_ioctl,
+		.ioctl_nrt	= rt_16550_ioctl,
+		.read_rt	= rt_16550_read,
+		.write_rt	= rt_16550_write,
+	},
+};
+
+void rt_16550_exit(void);
+
+int __init rt_16550_init(void)
+{
+	struct rtdm_device *dev;
+	unsigned long base;
+	char *name;
+	int mode;
+	int err;
+	int i;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	rt_16550_pnp_init();
+	rt_16550_pci_init();
+
+	for (i = 0; i < MAX_DEVICES; i++) {
+		if (!rt_16550_addr_param(i))
+			continue;
+
+		err = -EINVAL;
+		if (!irq[i] || !rt_16550_addr_param_valid(i))
+			goto cleanup_out;
+
+		dev = kmalloc(sizeof(struct rtdm_device) +
+			      RTDM_MAX_DEVNAME_LEN, GFP_KERNEL);
+		err = -ENOMEM;
+		if (!dev)
+			goto cleanup_out;
+
+		dev->driver = &uart16550A_driver;
+		dev->label = "rtser%d";
+		name = (char *)(dev + 1);
+		ksformat(name, RTDM_MAX_DEVNAME_LEN, dev->label, i);
+
+		err = rt_16550_init_io(i, name);
+		if (err)
+			goto kfree_out;
+
+		if (baud_base[i] == 0)
+			baud_base[i] = DEFAULT_BAUD_BASE;
+
+		if (tx_fifo[i] == 0)
+			tx_fifo[i] = DEFAULT_TX_FIFO;
+
+		/* Mask all UART interrupts and clear pending ones. */
+		base = rt_16550_base_addr(i);
+		mode = rt_16550_io_mode(i);
+		rt_16550_reg_out(mode, base, IER, 0);
+		rt_16550_reg_in(mode, base, IIR);
+		rt_16550_reg_in(mode, base, LSR);
+		rt_16550_reg_in(mode, base, RHR);
+		rt_16550_reg_in(mode, base, MSR);
+
+		err = rtdm_dev_register(dev);
+
+		if (err)
+			goto release_io_out;
+
+		device[i] = dev;
+	}
+
+	return 0;
+
+      release_io_out:
+	rt_16550_release_io(i);
+
+      kfree_out:
+	kfree(dev);
+
+      cleanup_out:
+	rt_16550_exit();
+
+	return err;
+}
+
+void rt_16550_exit(void)
+{
+	int i;
+
+	for (i = 0; i < MAX_DEVICES; i++)
+		if (device[i]) {
+			rtdm_dev_unregister(device[i]);
+			rt_16550_release_io(i);
+			kfree(device[i]);
+		}
+
+	rt_16550_pci_cleanup();
+	rt_16550_pnp_cleanup();
+}
+
+module_init(rt_16550_init);
+module_exit(rt_16550_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_io.h b/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_io.h
new file mode 100644
index 0000000..4dd7b56
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_io.h
@@ -0,0 +1,210 @@
+/*
+ * Copyright (C) 2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+/* Manages the I/O access method of the driver. */
+
+typedef enum { MODE_PIO, MODE_MMIO } io_mode_t;
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_PIO) || \
+    defined(CONFIG_XENO_DRIVERS_16550A_ANY)
+static unsigned long io[MAX_DEVICES];
+module_param_array(io, ulong, NULL, 0400);
+MODULE_PARM_DESC(io, "I/O port addresses of the serial devices");
+#endif /* CONFIG_XENO_DRIVERS_16550A_PIO || CONFIG_XENO_DRIVERS_16550A_ANY */
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_MMIO) || \
+    defined(CONFIG_XENO_DRIVERS_16550A_ANY)
+static unsigned long mem[MAX_DEVICES];
+static void *mapped_io[MAX_DEVICES];
+module_param_array(mem, ulong, NULL, 0400);
+MODULE_PARM_DESC(mem, "I/O memory addresses of the serial devices");
+#endif /* CONFIG_XENO_DRIVERS_16550A_MMIO || CONFIG_XENO_DRIVERS_16550A_ANY */
+
+#ifdef CONFIG_XENO_DRIVERS_16550A_PIO
+
+#define RT_16550_IO_INLINE inline
+
+extern void *mapped_io[]; /* dummy */
+
+static inline unsigned long rt_16550_addr_param(int dev_id)
+{
+	return io[dev_id];
+}
+
+static inline int rt_16550_addr_param_valid(int dev_id)
+{
+	return 1;
+}
+
+static inline unsigned long rt_16550_base_addr(int dev_id)
+{
+	return io[dev_id];
+}
+
+static inline io_mode_t rt_16550_io_mode(int dev_id)
+{
+	return MODE_PIO;
+}
+
+static inline io_mode_t
+rt_16550_io_mode_from_ctx(struct rt_16550_context *ctx)
+{
+	return MODE_PIO;
+}
+
+static inline void
+rt_16550_init_io_ctx(int dev_id, struct rt_16550_context *ctx)
+{
+	ctx->base_addr = io[dev_id];
+}
+
+#elif defined(CONFIG_XENO_DRIVERS_16550A_MMIO)
+
+#define RT_16550_IO_INLINE inline
+
+extern unsigned long io[]; /* dummy */
+
+static inline unsigned long rt_16550_addr_param(int dev_id)
+{
+	return mem[dev_id];
+}
+
+static inline int rt_16550_addr_param_valid(int dev_id)
+{
+	return 1;
+}
+
+static inline unsigned long rt_16550_base_addr(int dev_id)
+{
+	return (unsigned long)mapped_io[dev_id];
+}
+
+static inline io_mode_t rt_16550_io_mode(int dev_id)
+{
+	return MODE_MMIO;
+}
+
+static inline io_mode_t
+rt_16550_io_mode_from_ctx(struct rt_16550_context *ctx)
+{
+	return MODE_MMIO;
+}
+
+static inline void
+rt_16550_init_io_ctx(int dev_id, struct rt_16550_context *ctx)
+{
+	ctx->base_addr = (unsigned long)mapped_io[dev_id];
+}
+
+#elif defined(CONFIG_XENO_DRIVERS_16550A_ANY)
+
+#define RT_16550_IO_INLINE /* uninline */
+
+static inline unsigned long rt_16550_addr_param(int dev_id)
+{
+	return (io[dev_id]) ? io[dev_id] : mem[dev_id];
+}
+
+static inline int rt_16550_addr_param_valid(int dev_id)
+{
+	return !(io[dev_id] && mem[dev_id]);
+}
+
+static inline unsigned long rt_16550_base_addr(int dev_id)
+{
+	return (io[dev_id]) ? io[dev_id] : (unsigned long)mapped_io[dev_id];
+}
+
+static inline io_mode_t rt_16550_io_mode(int dev_id)
+{
+	return (io[dev_id]) ? MODE_PIO : MODE_MMIO;
+}
+
+static inline io_mode_t
+rt_16550_io_mode_from_ctx(struct rt_16550_context *ctx)
+{
+	return ctx->io_mode;
+}
+
+static inline void
+rt_16550_init_io_ctx(int dev_id, struct rt_16550_context *ctx)
+{
+	if (io[dev_id]) {
+		ctx->base_addr = io[dev_id];
+		ctx->io_mode   = MODE_PIO;
+	} else {
+		ctx->base_addr = (unsigned long)mapped_io[dev_id];
+		ctx->io_mode   = MODE_MMIO;
+	}
+}
+
+#else
+# error Unsupported I/O access method
+#endif
+
+static RT_16550_IO_INLINE u8
+rt_16550_reg_in(io_mode_t io_mode, unsigned long base, int off)
+{
+	switch (io_mode) {
+	case MODE_PIO:
+		return inb(base + off);
+	default: /* MODE_MMIO */
+		return readb((void *)base + off);
+	}
+}
+
+static RT_16550_IO_INLINE void
+rt_16550_reg_out(io_mode_t io_mode, unsigned long base, int off, u8 val)
+{
+	switch (io_mode) {
+	case MODE_PIO:
+		outb(val, base + off);
+		break;
+	case MODE_MMIO:
+		writeb(val, (void *)base + off);
+		break;
+	}
+}
+
+static int rt_16550_init_io(int dev_id, char* name)
+{
+	switch (rt_16550_io_mode(dev_id)) {
+	case MODE_PIO:
+		if (!request_region(rt_16550_addr_param(dev_id), 8, name))
+			return -EBUSY;
+		break;
+	case MODE_MMIO:
+		mapped_io[dev_id] = ioremap(rt_16550_addr_param(dev_id), 8);
+		if (!mapped_io[dev_id])
+			return -EBUSY;
+		break;
+	}
+	return 0;
+}
+
+static void rt_16550_release_io(int dev_id)
+{
+	switch (rt_16550_io_mode(dev_id)) {
+	case MODE_PIO:
+		release_region(io[dev_id], 8);
+		break;
+	case MODE_MMIO:
+		iounmap(mapped_io[dev_id]);
+		break;
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pci.h b/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pci.h
new file mode 100644
index 0000000..161874d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pci.h
@@ -0,0 +1,299 @@
+/*
+ * Copyright (C) 2006-2007 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2011 Stefan Kisdaroczi <kisda@hispeed.ch>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_PCI)
+
+#include <linux/pci.h>
+
+struct rt_16550_pci_board {
+	char *name;
+	resource_size_t resource_base_addr;
+	unsigned int nports;
+	unsigned int port_ofs;
+	unsigned long irqtype;
+	unsigned int baud_base;
+	int tx_fifo;
+};
+
+#if defined(CONFIG_XENO_DRIVERS_16550A_PCI_MOXA)
+
+#define PCI_DEVICE_ID_CP112UL		0x1120
+#define PCI_DEVICE_ID_CP114UL		0x1143
+#define PCI_DEVICE_ID_CP138U		0x1380
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,17,0)
+#define PCI_DEVICE_ID_MOXA_CP102UL	0x1021
+#define PCI_DEVICE_ID_MOXA_CP102U	0x1022
+#define PCI_DEVICE_ID_MOXA_C104		0x1040
+#define PCI_DEVICE_ID_MOXA_CP104U	0x1041
+#define PCI_DEVICE_ID_MOXA_CP114	0x1141
+#define PCI_DEVICE_ID_MOXA_CP118U	0x1180
+#define PCI_DEVICE_ID_MOXA_CP132	0x1320
+#define PCI_DEVICE_ID_MOXA_CP132U	0x1321
+#define PCI_DEVICE_ID_MOXA_CP134U	0x1340
+#define PCI_DEVICE_ID_MOXA_C168		0x1680
+#define PCI_DEVICE_ID_MOXA_CP168U	0x1681
+#endif
+
+static const struct rt_16550_pci_board rt_16550_moxa_c104 = {
+	.name = "Moxa C104H/PCI",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_c168 = {
+	.name = "Moxa C168H/PCI",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp114 = {
+	.name = "Moxa CP-114",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp132 = {
+	.name = "Moxa CP-132",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp102u = {
+	.name = "Moxa CP-102U",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp102ul = {
+	.name = "Moxa CP-102UL",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp104u = {
+	.name = "Moxa CP-104U",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp112ul = {
+	.name = "Moxa CP-112UL",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp114ul = {
+	.name = "Moxa CP-114UL",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp118u = {
+	.name = "Moxa CP-118U",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp132u = {
+	.name = "Moxa CP-132U",
+	.resource_base_addr = 2,
+	.nports = 2,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp134u = {
+	.name = "Moxa CP-134U",
+	.resource_base_addr = 2,
+	.nports = 4,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp138u = {
+	.name = "Moxa CP-138U",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+
+static const struct rt_16550_pci_board rt_16550_moxa_cp168u = {
+	.name = "Moxa CP-168U",
+	.resource_base_addr = 2,
+	.nports = 8,
+	.port_ofs = 8,
+	.baud_base = 921600,
+	.tx_fifo = 16,
+	.irqtype = RTDM_IRQTYPE_SHARED,
+};
+#endif
+
+const struct pci_device_id rt_16550_pci_table[] = {
+#if defined(CONFIG_XENO_DRIVERS_16550A_PCI_MOXA)
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C104),
+	 .driver_data = (unsigned long)&rt_16550_moxa_c104},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_C168),
+	 .driver_data = (unsigned long)&rt_16550_moxa_c168},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP114),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp114},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp132},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp102u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP102UL),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp102ul},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP104U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp104u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP112UL),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp112ul},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP114UL),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp114ul},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP118U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp118u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP132U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp132u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP134U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp134u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_CP138U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp138u},
+	{PCI_VDEVICE(MOXA, PCI_DEVICE_ID_MOXA_CP168U),
+	 .driver_data = (unsigned long)&rt_16550_moxa_cp168u},
+#endif
+	{ }
+};
+
+static int rt_16550_pci_probe(struct pci_dev *pdev,
+			      const struct pci_device_id *ent)
+{
+	struct rt_16550_pci_board *board;
+	int err;
+	int i;
+	int port = 0;
+	int base_addr;
+	int max_devices = 0;
+
+	if (!ent->driver_data)
+		return -ENODEV;
+
+	board = (struct rt_16550_pci_board *)ent->driver_data;
+
+	for (i = 0; i < MAX_DEVICES; i++)
+		if (!rt_16550_addr_param(i))
+			max_devices++;
+
+	if (board->nports > max_devices)
+		return -ENODEV;
+
+	if ((err = pci_enable_device(pdev)))
+		return err;
+
+	base_addr = pci_resource_start(pdev, board->resource_base_addr);
+
+	for (i = 0; i < MAX_DEVICES; i++) {
+		if ((port < board->nports) && (!rt_16550_addr_param(i))) {
+			io[i] = base_addr + port * board->port_ofs;
+			irq[i] = pdev->irq;
+			irqtype[i] = board->irqtype;
+			baud_base[i] = board->baud_base;
+			tx_fifo[i] = board->tx_fifo;
+			port++;
+		}
+	}
+
+	return 0;
+}
+
+static void rt_16550_pci_remove(struct pci_dev *pdev) {
+	pci_disable_device( pdev );
+};
+
+static struct pci_driver rt_16550_pci_driver = {
+	.name     = RT_16550_DRIVER_NAME,
+	.id_table = rt_16550_pci_table,
+	.probe    = rt_16550_pci_probe,
+	.remove   = rt_16550_pci_remove
+};
+
+static int pci_registered;
+
+static inline void rt_16550_pci_init(void)
+{
+	if (pci_register_driver(&rt_16550_pci_driver) == 0)
+		pci_registered = 1;
+}
+
+static inline void rt_16550_pci_cleanup(void)
+{
+	if (pci_registered)
+		pci_unregister_driver(&rt_16550_pci_driver);
+}
+
+#else /* Linux < 2.6.0 || !CONFIG_PCI || !(..._16550A_PCI */
+
+#define rt_16550_pci_init()	do { } while (0)
+#define rt_16550_pci_cleanup()	do { } while (0)
+
+#endif /* Linux < 2.6.0 || !CONFIG_PCI || !(..._16550A_PCI */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pnp.h b/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pnp.h
new file mode 100644
index 0000000..8746fa4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/serial/16550A_pnp.h
@@ -0,0 +1,387 @@
+/*
+ * Copyright (C) 2006-2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#if defined(CONFIG_PNP) && \
+    (defined(CONFIG_XENO_DRIVERS_16550A_PIO) || \
+     defined(CONFIG_XENO_DRIVERS_16550A_ANY))
+
+#include <linux/pnp.h>
+
+#define UNKNOWN_DEV 0x3000
+
+/* Bluntly cloned from drivers/serial/8250_pnp.c */
+static const struct pnp_device_id rt_16550_pnp_tbl[] = {
+	/* Archtek America Corp. */
+	/* Archtek SmartLink Modem 3334BT Plug & Play */
+	{	"AAC000F",		0	},
+	/* Anchor Datacomm BV */
+	/* SXPro 144 External Data Fax Modem Plug & Play */
+	{	"ADC0001",		0	},
+	/* SXPro 288 External Data Fax Modem Plug & Play */
+	{	"ADC0002",		0	},
+	/* PROLiNK 1456VH ISA PnP K56flex Fax Modem */
+	{	"AEI0250",		0	},
+	/* Actiontec ISA PNP 56K X2 Fax Modem */
+	{	"AEI1240",		0	},
+	/* Rockwell 56K ACF II Fax+Data+Voice Modem */
+	{	"AKY1021",		0 /*SPCI_FL_NO_SHIRQ*/	},
+	/* AZT3005 PnP SOUND DEVICE */
+	{	"AZT4001",		0	},
+	/* Best Data Products Inc. Smart One 336F PnP Modem */
+	{	"BDP3336",		0	},
+	/*  Boca Research */
+	/* Boca Complete Ofc Communicator 14.4 Data-FAX */
+	{	"BRI0A49",		0	},
+	/* Boca Research 33,600 ACF Modem */
+	{	"BRI1400",		0	},
+	/* Boca 33.6 Kbps Internal FD34FSVD */
+	{	"BRI3400",		0	},
+	/* Boca 33.6 Kbps Internal FD34FSVD */
+	{	"BRI0A49",		0	},
+	/* Best Data Products Inc. Smart One 336F PnP Modem */
+	{	"BDP3336",		0	},
+	/* Computer Peripherals Inc */
+	/* EuroViVa CommCenter-33.6 SP PnP */
+	{	"CPI4050",		0	},
+	/* Creative Labs */
+	/* Creative Labs Phone Blaster 28.8 DSVD PnP Voice */
+	{	"CTL3001",		0	},
+	/* Creative Labs Modem Blaster 28.8 DSVD PnP Voice */
+	{	"CTL3011",		0	},
+	/* Creative */
+	/* Creative Modem Blaster Flash56 DI5601-1 */
+	{	"DMB1032",		0	},
+	/* Creative Modem Blaster V.90 DI5660 */
+	{	"DMB2001",		0	},
+	/* E-Tech */
+	/* E-Tech CyberBULLET PC56RVP */
+	{	"ETT0002",		0	},
+	/* FUJITSU */
+	/* Fujitsu 33600 PnP-I2 R Plug & Play */
+	{	"FUJ0202",		0	},
+	/* Fujitsu FMV-FX431 Plug & Play */
+	{	"FUJ0205",		0	},
+	/* Fujitsu 33600 PnP-I4 R Plug & Play */
+	{	"FUJ0206",		0	},
+	/* Fujitsu Fax Voice 33600 PNP-I5 R Plug & Play */
+	{	"FUJ0209",		0	},
+	/* Archtek America Corp. */
+	/* Archtek SmartLink Modem 3334BT Plug & Play */
+	{	"GVC000F",		0	},
+	/* Hayes */
+	/* Hayes Optima 288 V.34-V.FC + FAX + Voice Plug & Play */
+	{	"HAY0001",		0	},
+	/* Hayes Optima 336 V.34 + FAX + Voice PnP */
+	{	"HAY000C",		0	},
+	/* Hayes Optima 336B V.34 + FAX + Voice PnP */
+	{	"HAY000D",		0	},
+	/* Hayes Accura 56K Ext Fax Modem PnP */
+	{	"HAY5670",		0	},
+	/* Hayes Accura 56K Ext Fax Modem PnP */
+	{	"HAY5674",		0	},
+	/* Hayes Accura 56K Fax Modem PnP */
+	{	"HAY5675",		0	},
+	/* Hayes 288, V.34 + FAX */
+	{	"HAYF000",		0	},
+	/* Hayes Optima 288 V.34 + FAX + Voice, Plug & Play */
+	{	"HAYF001",		0	},
+	/* IBM */
+	/* IBM Thinkpad 701 Internal Modem Voice */
+	{	"IBM0033",		0	},
+	/* Intertex */
+	/* Intertex 28k8 33k6 Voice EXT PnP */
+	{	"IXDC801",		0	},
+	/* Intertex 33k6 56k Voice EXT PnP */
+	{	"IXDC901",		0	},
+	/* Intertex 28k8 33k6 Voice SP EXT PnP */
+	{	"IXDD801",		0	},
+	/* Intertex 33k6 56k Voice SP EXT PnP */
+	{	"IXDD901",		0	},
+	/* Intertex 28k8 33k6 Voice SP INT PnP */
+	{	"IXDF401",		0	},
+	/* Intertex 28k8 33k6 Voice SP EXT PnP */
+	{	"IXDF801",		0	},
+	/* Intertex 33k6 56k Voice SP EXT PnP */
+	{	"IXDF901",		0	},
+	/* Kortex International */
+	/* KORTEX 28800 Externe PnP */
+	{	"KOR4522",		0	},
+	/* KXPro 33.6 Vocal ASVD PnP */
+	{	"KORF661",		0	},
+	/* Lasat */
+	/* LASAT Internet 33600 PnP */
+	{	"LAS4040",		0	},
+	/* Lasat Safire 560 PnP */
+	{	"LAS4540",		0	},
+	/* Lasat Safire 336  PnP */
+	{	"LAS5440",		0	},
+	/* Microcom, Inc. */
+	/* Microcom TravelPorte FAST V.34 Plug & Play */
+	{	"MNP0281",		0	},
+	/* Microcom DeskPorte V.34 FAST or FAST+ Plug & Play */
+	{	"MNP0336",		0	},
+	/* Microcom DeskPorte FAST EP 28.8 Plug & Play */
+	{	"MNP0339",		0	},
+	/* Microcom DeskPorte 28.8P Plug & Play */
+	{	"MNP0342",		0	},
+	/* Microcom DeskPorte FAST ES 28.8 Plug & Play */
+	{	"MNP0500",		0	},
+	/* Microcom DeskPorte FAST ES 28.8 Plug & Play */
+	{	"MNP0501",		0	},
+	/* Microcom DeskPorte 28.8S Internal Plug & Play */
+	{	"MNP0502",		0	},
+	/* Motorola */
+	/* Motorola BitSURFR Plug & Play */
+	{	"MOT1105",		0	},
+	/* Motorola TA210 Plug & Play */
+	{	"MOT1111",		0	},
+	/* Motorola HMTA 200 (ISDN) Plug & Play */
+	{	"MOT1114",		0	},
+	/* Motorola BitSURFR Plug & Play */
+	{	"MOT1115",		0	},
+	/* Motorola Lifestyle 28.8 Internal */
+	{	"MOT1190",		0	},
+	/* Motorola V.3400 Plug & Play */
+	{	"MOT1501",		0	},
+	/* Motorola Lifestyle 28.8 V.34 Plug & Play */
+	{	"MOT1502",		0	},
+	/* Motorola Power 28.8 V.34 Plug & Play */
+	{	"MOT1505",		0	},
+	/* Motorola ModemSURFR External 28.8 Plug & Play */
+	{	"MOT1509",		0	},
+	/* Motorola Premier 33.6 Desktop Plug & Play */
+	{	"MOT150A",		0	},
+	/* Motorola VoiceSURFR 56K External PnP */
+	{	"MOT150F",		0	},
+	/* Motorola ModemSURFR 56K External PnP */
+	{	"MOT1510",		0	},
+	/* Motorola ModemSURFR 56K Internal PnP */
+	{	"MOT1550",		0	},
+	/* Motorola ModemSURFR Internal 28.8 Plug & Play */
+	{	"MOT1560",		0	},
+	/* Motorola Premier 33.6 Internal Plug & Play */
+	{	"MOT1580",		0	},
+	/* Motorola OnlineSURFR 28.8 Internal Plug & Play */
+	{	"MOT15B0",		0	},
+	/* Motorola VoiceSURFR 56K Internal PnP */
+	{	"MOT15F0",		0	},
+	/* Com 1 */
+	/*  Deskline K56 Phone System PnP */
+	{	"MVX00A1",		0	},
+	/* PC Rider K56 Phone System PnP */
+	{	"MVX00F2",		0	},
+	/* NEC 98NOTE SPEAKER PHONE FAX MODEM(33600bps) */
+	{	"nEC8241",		0	},
+	/* Pace 56 Voice Internal Plug & Play Modem */
+	{	"PMC2430",		0	},
+	/* Generic */
+	/* Generic standard PC COM port	 */
+	{	"PNP0500",		0	},
+	/* Generic 16550A-compatible COM port */
+	{	"PNP0501",		0	},
+	/* Compaq 14400 Modem */
+	{	"PNPC000",		0	},
+	/* Compaq 2400/9600 Modem */
+	{	"PNPC001",		0	},
+	/* Dial-Up Networking Serial Cable between 2 PCs */
+	{	"PNPC031",		0	},
+	/* Dial-Up Networking Parallel Cable between 2 PCs */
+	{	"PNPC032",		0	},
+	/* Standard 9600 bps Modem */
+	{	"PNPC100",		0	},
+	/* Standard 14400 bps Modem */
+	{	"PNPC101",		0	},
+	/*  Standard 28800 bps Modem*/
+	{	"PNPC102",		0	},
+	/*  Standard Modem*/
+	{	"PNPC103",		0	},
+	/*  Standard 9600 bps Modem*/
+	{	"PNPC104",		0	},
+	/*  Standard 14400 bps Modem*/
+	{	"PNPC105",		0	},
+	/*  Standard 28800 bps Modem*/
+	{	"PNPC106",		0	},
+	/*  Standard Modem */
+	{	"PNPC107",		0	},
+	/* Standard 9600 bps Modem */
+	{	"PNPC108",		0	},
+	/* Standard 14400 bps Modem */
+	{	"PNPC109",		0	},
+	/* Standard 28800 bps Modem */
+	{	"PNPC10A",		0	},
+	/* Standard Modem */
+	{	"PNPC10B",		0	},
+	/* Standard 9600 bps Modem */
+	{	"PNPC10C",		0	},
+	/* Standard 14400 bps Modem */
+	{	"PNPC10D",		0	},
+	/* Standard 28800 bps Modem */
+	{	"PNPC10E",		0	},
+	/* Standard Modem */
+	{	"PNPC10F",		0	},
+	/* Standard PCMCIA Card Modem */
+	{	"PNP2000",		0	},
+	/* Rockwell */
+	/* Modular Technology */
+	/* Rockwell 33.6 DPF Internal PnP */
+	/* Modular Technology 33.6 Internal PnP */
+	{	"ROK0030",		0	},
+	/* Kortex International */
+	/* KORTEX 14400 Externe PnP */
+	{	"ROK0100",		0	},
+	/* Rockwell 28.8 */
+	{	"ROK4120",		0	},
+	/* Viking Components, Inc */
+	/* Viking 28.8 INTERNAL Fax+Data+Voice PnP */
+	{	"ROK4920",		0	},
+	/* Rockwell */
+	/* British Telecom */
+	/* Modular Technology */
+	/* Rockwell 33.6 DPF External PnP */
+	/* BT Prologue 33.6 External PnP */
+	/* Modular Technology 33.6 External PnP */
+	{	"RSS00A0",		0	},
+	/* Viking 56K FAX INT */
+	{	"RSS0262",		0	},
+	/* K56 par,VV,Voice,Speakphone,AudioSpan,PnP */
+	{       "RSS0250",              0       },
+	/* SupraExpress 28.8 Data/Fax PnP modem */
+	{	"SUP1310",		0	},
+	/* SupraExpress 33.6 Data/Fax PnP modem */
+	{	"SUP1421",		0	},
+	/* SupraExpress 33.6 Data/Fax PnP modem */
+	{	"SUP1590",		0	},
+	/* SupraExpress 336i Sp ASVD */
+	{	"SUP1620",		0	},
+	/* SupraExpress 33.6 Data/Fax PnP modem */
+	{	"SUP1760",		0	},
+	/* SupraExpress 56i Sp Intl */
+	{	"SUP2171",		0	},
+	/* Phoebe Micro */
+	/* Phoebe Micro 33.6 Data Fax 1433VQH Plug & Play */
+	{	"TEX0011",		0	},
+	/* Archtek America Corp. */
+	/* Archtek SmartLink Modem 3334BT Plug & Play */
+	{	"UAC000F",		0	},
+	/* 3Com Corp. */
+	/* Gateway Telepath IIvi 33.6 */
+	{	"USR0000",		0	},
+	/* U.S. Robotics Sporster 33.6K Fax INT PnP */
+	{	"USR0002",		0	},
+	/*  Sportster Vi 14.4 PnP FAX Voicemail */
+	{	"USR0004",		0	},
+	/* U.S. Robotics 33.6K Voice INT PnP */
+	{	"USR0006",		0	},
+	/* U.S. Robotics 33.6K Voice EXT PnP */
+	{	"USR0007",		0	},
+	/* U.S. Robotics Courier V.Everything INT PnP */
+	{	"USR0009",		0	},
+	/* U.S. Robotics 33.6K Voice INT PnP */
+	{	"USR2002",		0	},
+	/* U.S. Robotics 56K Voice INT PnP */
+	{	"USR2070",		0	},
+	/* U.S. Robotics 56K Voice EXT PnP */
+	{	"USR2080",		0	},
+	/* U.S. Robotics 56K FAX INT */
+	{	"USR3031",		0	},
+	/* U.S. Robotics 56K FAX INT */
+	{	"USR3050",		0	},
+	/* U.S. Robotics 56K Voice INT PnP */
+	{	"USR3070",		0	},
+	/* U.S. Robotics 56K Voice EXT PnP */
+	{	"USR3080",		0	},
+	/* U.S. Robotics 56K Voice INT PnP */
+	{	"USR3090",		0	},
+	/* U.S. Robotics 56K Message  */
+	{	"USR9100",		0	},
+	/* U.S. Robotics 56K FAX EXT PnP*/
+	{	"USR9160",		0	},
+	/* U.S. Robotics 56K FAX INT PnP*/
+	{	"USR9170",		0	},
+	/* U.S. Robotics 56K Voice EXT PnP*/
+	{	"USR9180",		0	},
+	/* U.S. Robotics 56K Voice INT PnP*/
+	{	"USR9190",		0	},
+	/* Wacom tablets */
+	{	"WACF004",		0	},
+	{	"WACF005",		0	},
+	{       "WACF006",              0       },
+	/* Compaq touchscreen */
+	{       "FPI2002",              0 },
+	/* Fujitsu Stylistic touchscreens */
+	{       "FUJ02B2",              0 },
+	{       "FUJ02B3",              0 },
+	/* Fujitsu Stylistic LT touchscreens */
+	{       "FUJ02B4",              0 },
+	/* Passive Fujitsu Stylistic touchscreens */
+	{       "FUJ02B6",              0 },
+	{       "FUJ02B7",              0 },
+	{       "FUJ02B8",              0 },
+	{       "FUJ02B9",              0 },
+	{       "FUJ02BC",              0 },
+	/* Rockwell's (PORALiNK) 33600 INT PNP */
+	{	"WCI0003",		0	},
+	/* Unkown PnP modems */
+	{	"PNPCXXX",		UNKNOWN_DEV	},
+	/* More unkown PnP modems */
+	{	"PNPDXXX",		UNKNOWN_DEV	},
+	{	"",			0	}
+};
+
+static int rt_16550_pnp_probe(struct pnp_dev *dev,
+			       const struct pnp_device_id *dev_id)
+{
+	int i;
+
+	for (i = 0; i < MAX_DEVICES; i++)
+		if (pnp_port_valid(dev, 0) &&
+		    pnp_port_start(dev, 0) == io[i]) {
+			if (!irq[i])
+				irq[i] = pnp_irq(dev, 0);
+			return 0;
+		}
+
+	return -ENODEV;
+}
+
+static struct pnp_driver rt_16550_pnp_driver = {
+	.name		= RT_16550_DRIVER_NAME,
+	.id_table	= rt_16550_pnp_tbl,
+	.probe		= rt_16550_pnp_probe,
+};
+
+static int pnp_registered;
+
+static inline void rt_16550_pnp_init(void)
+{
+	if (pnp_register_driver(&rt_16550_pnp_driver) == 0)
+		pnp_registered = 1;
+}
+
+static inline void rt_16550_pnp_cleanup(void)
+{
+	if (pnp_registered)
+		pnp_unregister_driver(&rt_16550_pnp_driver);
+}
+
+#else /* !CONFIG_PNP || !(..._16550A_IO || ..._16550A_ANY) */
+
+#define rt_16550_pnp_init()	do { } while (0)
+#define rt_16550_pnp_cleanup()	do { } while (0)
+
+#endif /* !CONFIG_PNP || !(..._16550A_IO || ..._16550A_ANY) */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/serial/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/serial/Kconfig
new file mode 100644
index 0000000..43dd5ec
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/serial/Kconfig
@@ -0,0 +1,79 @@
+menu "Serial drivers"
+
+config XENO_DRIVERS_16550A
+	tristate "16550A UART driver"
+	help
+	Real-time UART driver for 16550A compatible controllers. See
+	doc/txt/16550A-driver.txt for more details.
+
+choice
+	prompt "Hardware access mode"
+	depends on XENO_DRIVERS_16550A
+	default XENO_DRIVERS_16550A_PIO
+
+config XENO_DRIVERS_16550A_PIO
+	bool "Port-based I/O"
+	help
+	Hardware access only via I/O ports. Use module parameter
+	"io=<port>[,<port>[,...]]" to specify the base port of a device.
+
+config XENO_DRIVERS_16550A_MMIO
+	bool "Memory-mapped I/O"
+	help
+	Hardware access only via memory mapping. Use module paramter
+	"mem=<addr>[,<addr>[,...]]" to specify the physical base address of
+	a device.
+
+config XENO_DRIVERS_16550A_ANY
+	bool "Any access mode"
+	help
+	Decide at module load-time (or via kernel parameter) which access
+	mode to use for which device. This mode is useful when devices of
+	both types can be present in a system, also at the same time.
+
+	Both "io" and "mem" module parameters are available, but always only
+	one of them can be applied on a particular device. Use, e.g.,
+	"io=0x3f8,0 mem=0,0xe0000000" to address device 1 via IO base port
+	0x3f8 and device 2 via physical base address 0xe0000000.
+
+endchoice
+
+config XENO_DRIVERS_16550A_PCI
+	depends on PCI && (XENO_DRIVERS_16550A_PIO || XENO_DRIVERS_16550A_ANY)
+	bool "PCI board support"
+	default n
+	help
+
+	This option activates support for PCI serial boards.
+
+config XENO_DRIVERS_16550A_PCI_MOXA
+	depends on XENO_DRIVERS_16550A_PCI
+	bool "Moxa PCI boards"
+	default n
+	help
+
+	This option activates support for the following Moxa boards:
+	PCI Serial Boards:
+	  C104H/PCI, C168H/PCI
+	  CP-114, CP-132
+	Universal PCI Serial Boards:
+	  CP-102U, CP-102UL, CP-104U
+	  CP-112UL, CP-114UL, CP-118U
+	  CP-132U, CP-134U, CP-138U
+	  CP-168U
+
+config XENO_DRIVERS_MPC52XX_UART
+	depends on PPC_MPC52xx
+	tristate "MPC52xx PSC UART driver"
+	help
+	Real-time UART driver for the PSC on the MPC5200 processor.
+
+config XENO_DRIVERS_IMX_UART
+	depends on ARCH_IMX || ARCH_MXC
+	tristate "RT IMX UART driver"
+	select RATIONAL
+	help
+	Real-time UART driver for the Freescale Semiconductor MXC Internal
+	UART compatible controllers.
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/serial/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/serial/Makefile
new file mode 100644
index 0000000..2705d83
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/serial/Makefile
@@ -0,0 +1,8 @@
+
+obj-$(CONFIG_XENO_DRIVERS_16550A) += xeno_16550A.o
+obj-$(CONFIG_XENO_DRIVERS_MPC52XX_UART) += xeno_mpc52xx_uart.o
+obj-$(CONFIG_XENO_DRIVERS_IMX_UART) += xeno_imx_uart.o
+
+xeno_16550A-y := 16550A.o
+xeno_mpc52xx_uart-y := mpc52xx_uart.o
+xeno_imx_uart-y := rt_imx_uart.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/serial/mpc52xx_uart.c b/kernel/xenomai-v3.2.4/kernel/drivers/serial/mpc52xx_uart.c
new file mode 100644
index 0000000..fe469d5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/serial/mpc52xx_uart.c
@@ -0,0 +1,1438 @@
+/*
+ * Copyright (C) 2011 Wolfgang Grandegger <wg@denx.de>.
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/version.h>
+#include <linux/module.h>
+#include <linux/of.h>
+#include <linux/slab.h>
+#include <linux/of_platform.h>
+#include <linux/io.h>
+
+#include <asm/mpc52xx.h>
+#include <asm/mpc52xx_psc.h>
+
+#include <rtdm/serial.h>
+#include <rtdm/driver.h>
+
+MODULE_DESCRIPTION("RTDM-based driver for MPC52xx UARTs");
+MODULE_AUTHOR("Wolfgang Grandegger <wg@denx.de>");
+MODULE_VERSION("1.0.0");
+MODULE_LICENSE("GPL");
+
+#define RT_MPC52XX_UART_DRVNAM	"xeno_mpc52xx_uart"
+
+#define IN_BUFFER_SIZE		512
+#define OUT_BUFFER_SIZE		512
+
+#define PARITY_MASK		0x03
+#define DATA_BITS_MASK		0x03
+#define STOP_BITS_MASK		0x01
+#define FIFO_MASK		0xC0
+#define EVENT_MASK		0x0F
+
+
+struct rt_mpc52xx_uart_port {
+	const struct device *dev;
+	struct mpc52xx_psc __iomem *psc;
+	struct mpc52xx_psc_fifo __iomem *fifo;
+	unsigned int uartclk;
+	int irq;
+	int num;
+};
+
+struct rt_mpc52xx_uart_ctx {
+	struct rtser_config config;	/* current device configuration */
+
+	rtdm_irq_t irq_handle;		/* device IRQ handle */
+	rtdm_lock_t lock;		/* lock to protect context struct */
+
+	int in_head;			/* RX ring buffer, head pointer */
+	int in_tail;			/* RX ring buffer, tail pointer */
+	size_t in_npend;		/* pending bytes in RX ring */
+	int in_nwait;			/* bytes the user waits for */
+	rtdm_event_t in_event;		/* raised to unblock reader */
+	char in_buf[IN_BUFFER_SIZE];	/* RX ring buffer */
+	volatile unsigned long in_lock;	/* single-reader lock */
+	uint64_t *in_history;		/* RX timestamp buffer */
+
+	int out_head;			/* TX ring buffer, head pointer */
+	int out_tail;			/* TX ring buffer, tail pointer */
+	size_t out_npend;		/* pending bytes in TX ring */
+	rtdm_event_t out_event;		/* raised to unblock writer */
+	char out_buf[OUT_BUFFER_SIZE];	/* TX ring buffer */
+	rtdm_mutex_t out_lock;		/* single-writer mutex */
+
+	uint64_t last_timestamp;	/* timestamp of last event */
+	int ioc_events;			/* recorded events */
+	rtdm_event_t ioc_event;		/* raised to unblock event waiter */
+	volatile unsigned long ioc_event_lock;	/* single-waiter lock */
+
+
+	int mcr_status;			/* emulated MCR cache */
+	int status;			/* cache for LSR + soft-states */
+	int saved_errors;		/* error cache for RTIOC_GET_STATUS */
+
+	unsigned int imr_status;	/* interrupt mask register cache */
+	int tx_empty;			/* shift register empty flag */
+
+	struct rt_mpc52xx_uart_port *port; /* Port related data */
+};
+
+static const struct rtser_config default_config = {
+	.config_mask = 0xFFFF,
+	.baud_rate = RTSER_DEF_BAUD,
+	.parity = RTSER_DEF_PARITY,
+	.data_bits = RTSER_DEF_BITS,
+	.stop_bits = RTSER_DEF_STOPB,
+	.handshake = RTSER_DEF_HAND,
+	.fifo_depth = RTSER_DEF_FIFO_DEPTH,
+	.rx_timeout = RTSER_DEF_TIMEOUT,
+	.tx_timeout = RTSER_DEF_TIMEOUT,
+	.event_timeout = RTSER_DEF_TIMEOUT,
+	.timestamp_history = RTSER_DEF_TIMESTAMP_HISTORY,
+	.event_mask = RTSER_DEF_EVENT_MASK,
+	.rs485 = RTSER_DEF_RS485,
+};
+
+/* lookup table for matching device nodes to index numbers */
+static struct device_node *rt_mpc52xx_uart_nodes[MPC52xx_PSC_MAXNUM];
+
+static inline void psc_fifo_init(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	out_8(&ctx->port->fifo->rfcntl, 0x00);
+	out_be16(&ctx->port->fifo->rfalarm, 0x1ff);
+	out_8(&ctx->port->fifo->tfcntl, 0x07);
+	out_be16(&ctx->port->fifo->tfalarm, 0x80);
+}
+
+static inline int psc_raw_rx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_status) &
+		MPC52xx_PSC_SR_RXRDY;
+}
+
+static inline int psc_raw_tx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_status) &
+		MPC52xx_PSC_SR_TXRDY;
+}
+
+static inline int psc_rx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_isr) &
+		ctx->imr_status & MPC52xx_PSC_IMR_RXRDY;
+}
+
+static int psc_tx_rdy(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_isr) &
+		ctx->imr_status & MPC52xx_PSC_IMR_TXRDY;
+}
+
+static inline int psc_tx_empty(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_be16(&ctx->port->psc->mpc52xx_psc_status) &
+		MPC52xx_PSC_SR_TXEMP;
+}
+
+static inline void psc_start_tx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status |= MPC52xx_PSC_IMR_TXRDY;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static inline void psc_stop_tx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status &= ~MPC52xx_PSC_IMR_TXRDY;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static inline void psc_stop_rx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status &= ~MPC52xx_PSC_IMR_RXRDY;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static inline void psc_write_char(struct rt_mpc52xx_uart_ctx *ctx,
+				  unsigned char c)
+{
+	out_8(&ctx->port->psc->mpc52xx_psc_buffer_8, c);
+}
+
+static inline unsigned char psc_read_char(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	return in_8(&ctx->port->psc->mpc52xx_psc_buffer_8);
+}
+
+static inline void psc_disable_ints(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	ctx->imr_status = 0;
+	out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static void psc_set_mcr(struct rt_mpc52xx_uart_ctx *ctx,
+			unsigned int mcr)
+{
+	if (mcr & RTSER_MCR_RTS)
+		out_8(&ctx->port->psc->op1, MPC52xx_PSC_OP_RTS);
+	else
+		out_8(&ctx->port->psc->op0, MPC52xx_PSC_OP_RTS);
+}
+
+/* FIXME: status interrupts not yet handled properly */
+static unsigned int psc_get_msr(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	unsigned int msr = RTSER_MSR_DSR;
+	u8 status = in_8(&ctx->port->psc->mpc52xx_psc_ipcr);
+
+	if (!(status & MPC52xx_PSC_CTS))
+		msr |= RTSER_MSR_CTS;
+	if (!(status & MPC52xx_PSC_DCD))
+		msr |= RTSER_MSR_DCD;
+
+	return msr;
+}
+
+static void psc_enable_ms(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	struct mpc52xx_psc *psc = ctx->port->psc;
+
+	/* clear D_*-bits by reading them */
+	in_8(&psc->mpc52xx_psc_ipcr);
+	/* enable CTS and DCD as IPC interrupts */
+	out_8(&psc->mpc52xx_psc_acr, MPC52xx_PSC_IEC_CTS | MPC52xx_PSC_IEC_DCD);
+
+	ctx->imr_status |= MPC52xx_PSC_IMR_IPC;
+	out_be16(&psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static void psc_disable_ms(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	struct mpc52xx_psc *psc = ctx->port->psc;
+
+	/* disable CTS and DCD as IPC interrupts */
+	out_8(&psc->mpc52xx_psc_acr, 0);
+
+	ctx->imr_status &= ~MPC52xx_PSC_IMR_IPC;
+	out_be16(&psc->mpc52xx_psc_imr, ctx->imr_status);
+}
+
+static struct of_device_id mpc5200_gpio_ids[] = {
+	{ .compatible = "fsl,mpc5200-gpio", },
+	{ .compatible = "mpc5200-gpio", },
+	{}
+};
+
+static void rt_mpc52xx_uart_init_hw(struct rt_mpc52xx_uart_port *port)
+{
+	struct mpc52xx_gpio __iomem *gpio;
+	struct device_node *gpio_np;
+	u32 port_config;
+
+	if (port->num == 6) {
+		gpio_np = of_find_matching_node(NULL, mpc5200_gpio_ids);
+		gpio = of_iomap(gpio_np, 0);
+		of_node_put(gpio_np);
+		if (!gpio) {
+			dev_err(port->dev, "PSC%d port_config: "
+				"couldn't map gpio ids\n", port->num);
+			return;
+		}
+		port_config = in_be32(&gpio->port_config);
+		port_config &= 0xFF0FFFFF; /* port config for PSC6 */
+		port_config |= 0x00500000;
+		dev_dbg(port->dev, "PSC%d port_config: old:%x new:%x\n",
+			port->num, in_be32(&gpio->port_config), port_config);
+		out_be32(&gpio->port_config, port_config);
+		iounmap(gpio);
+	}
+}
+
+static inline void rt_mpc52xx_uart_put_char(struct rt_mpc52xx_uart_ctx *ctx,
+					    uint64_t *timestamp,
+					    unsigned char ch)
+{
+	ctx->in_buf[ctx->in_tail] = ch;
+	if (ctx->in_history)
+		ctx->in_history[ctx->in_tail] = *timestamp;
+	ctx->in_tail = (ctx->in_tail + 1) & (IN_BUFFER_SIZE - 1);
+
+	if (++ctx->in_npend > IN_BUFFER_SIZE) {
+		ctx->status |= RTSER_SOFT_OVERRUN_ERR;
+		ctx->in_npend--;
+	}
+}
+
+static inline int rt_mpc52xx_uart_rx_interrupt(struct rt_mpc52xx_uart_ctx *ctx,
+					       uint64_t *timestamp)
+{
+	int rbytes = 0;
+	int psc_status;
+
+	psc_status = in_be16(&ctx->port->psc->mpc52xx_psc_status);
+	while (psc_status & MPC52xx_PSC_SR_RXRDY) {
+		/* read input character */
+		rt_mpc52xx_uart_put_char(ctx, timestamp, psc_read_char(ctx));
+		rbytes++;
+
+		/* save new errors */
+		if (psc_status & (MPC52xx_PSC_SR_OE | MPC52xx_PSC_SR_PE |
+				  MPC52xx_PSC_SR_FE | MPC52xx_PSC_SR_RB)) {
+			if (psc_status & MPC52xx_PSC_SR_PE)
+				ctx->status |= RTSER_LSR_PARITY_ERR;
+			if (psc_status & MPC52xx_PSC_SR_FE)
+				ctx->status |= RTSER_LSR_FRAMING_ERR;
+			if (psc_status & MPC52xx_PSC_SR_RB)
+				ctx->status |= RTSER_LSR_BREAK_IND;
+
+			/*
+			 * Overrun is special, since it's reported
+			 * immediately, and doesn't affect the current
+			 * character.
+			 */
+			if (psc_status & MPC52xx_PSC_SR_OE) {
+				ctx->status |= RTSER_LSR_OVERRUN_ERR;
+				rt_mpc52xx_uart_put_char(ctx, timestamp, 0);
+				rbytes++;
+			}
+
+			/* Clear error condition */
+			out_8(&ctx->port->psc->command,
+			      MPC52xx_PSC_RST_ERR_STAT);
+		}
+
+		psc_status = in_be16(&ctx->port->psc->mpc52xx_psc_status);
+	};
+
+	return rbytes;
+}
+
+static inline int rt_mpc52xx_uart_tx_interrupt(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	while (psc_raw_tx_rdy(ctx) && (ctx->out_npend > 0)) {
+		if (ctx->config.rs485 &&
+		    (ctx->mcr_status & RTSER_MCR_RTS) == 0) {
+			/* switch RTS */
+			ctx->mcr_status |= RTSER_MCR_RTS;
+			dev_dbg(ctx->port->dev, "Set RTS, mcr_status=%#x\n",
+				ctx->mcr_status);
+			psc_set_mcr(ctx, ctx->mcr_status);
+		}
+		if (ctx->config.rs485 ||
+		    ((ctx->config.event_mask & RTSER_EVENT_TXEMPTY) &&
+		     (ctx->imr_status & MPC52xx_PSC_IMR_TXEMP) == 0)) {
+			/* enable tx-empty interrupt */
+			ctx->imr_status |= MPC52xx_PSC_IMR_TXEMP;
+			dev_dbg(ctx->port->dev, "Enable TXEMP interrupt, "
+				"imr_status=%#x\n", ctx->imr_status);
+			out_be16(&ctx->port->psc->mpc52xx_psc_imr,
+				 ctx->imr_status);
+		}
+
+		psc_write_char(ctx, ctx->out_buf[ctx->out_head++]);
+		ctx->out_head &= OUT_BUFFER_SIZE - 1;
+		ctx->out_npend--;
+	}
+
+	return ctx->out_npend;
+}
+
+static int rt_mpc52xx_uart_interrupt(rtdm_irq_t *irq_context)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	uint64_t timestamp = rtdm_clock_read();
+	int rbytes = 0;
+	int events = 0;
+	int ret = RTDM_IRQ_NONE;
+	int goon = 1;
+	int n;
+
+	ctx = rtdm_irq_get_arg(irq_context, struct rt_mpc52xx_uart_ctx);
+
+	rtdm_lock_get(&ctx->lock);
+
+	while (goon) {
+		goon = 0;
+		if (psc_rx_rdy(ctx)) {
+			dev_dbg(ctx->port->dev, "RX interrupt\n");
+			n = rt_mpc52xx_uart_rx_interrupt(ctx, &timestamp);
+			if (n) {
+				rbytes += n;
+				events |= RTSER_EVENT_RXPEND;
+			}
+		}
+		if (psc_tx_rdy(ctx))
+			goon |= rt_mpc52xx_uart_tx_interrupt(ctx);
+
+		if (psc_tx_empty(ctx)) {
+			if (ctx->config.rs485 &&
+			    (ctx->mcr_status & RTSER_MCR_RTS)) {
+				/* reset RTS */
+				ctx->mcr_status &= ~RTSER_MCR_RTS;
+				dev_dbg(ctx->port->dev, "Reset RTS, "
+					"mcr_status=%#x\n", ctx->mcr_status);
+				psc_set_mcr(ctx, ctx->mcr_status);
+			}
+			/* disable tx-empty interrupt */
+			ctx->imr_status &= ~MPC52xx_PSC_IMR_TXEMP;
+			dev_dbg(ctx->port->dev, "Disable TXEMP interrupt, "
+				"imr_status=%#x\n", ctx->imr_status);
+			out_be16(&ctx->port->psc->mpc52xx_psc_imr,
+				 ctx->imr_status);
+
+			events |= RTSER_EVENT_TXEMPTY;
+			ctx->tx_empty = 1;
+		}
+
+		if (ctx->config.event_mask &
+		    (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO)) {
+			u8 status = in_8(&ctx->port->psc->mpc52xx_psc_ipcr);
+
+			if (status & MPC52xx_PSC_D_DCD)
+				events |= (status & MPC52xx_PSC_DCD) ?
+					RTSER_EVENT_MODEMLO :
+					RTSER_EVENT_MODEMHI;
+			if (status & MPC52xx_PSC_D_CTS)
+				events |= (status & MPC52xx_PSC_CTS) ?
+					RTSER_EVENT_MODEMLO :
+					RTSER_EVENT_MODEMHI;
+			dev_dbg(ctx->port->dev, "Modem line changed, "
+				"events=%#x\n", events);
+		}
+
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (ctx->in_nwait > 0) {
+		if ((ctx->in_nwait <= rbytes) || ctx->status) {
+			ctx->in_nwait = 0;
+			rtdm_event_signal(&ctx->in_event);
+		} else
+			ctx->in_nwait -= rbytes;
+	}
+
+	if (ctx->status)
+		events |= RTSER_EVENT_ERRPEND;
+
+	if (events & ctx->config.event_mask) {
+		int old_events = ctx->ioc_events;
+
+		ctx->last_timestamp = timestamp;
+		ctx->ioc_events = events;
+
+		if (!old_events)
+			rtdm_event_signal(&ctx->ioc_event);
+	}
+
+	if ((ctx->imr_status & MPC52xx_PSC_IMR_TXRDY) &&
+	    (ctx->out_npend == 0)) {
+		psc_stop_tx(ctx);
+		rtdm_event_signal(&ctx->out_event);
+	}
+
+	rtdm_lock_put(&ctx->lock);
+
+	return ret;
+}
+
+
+static int rt_mpc52xx_uart_set_config(struct rt_mpc52xx_uart_ctx *ctx,
+				      const struct rtser_config *config,
+				      uint64_t **in_history_ptr)
+{
+	rtdm_lockctx_t lock_ctx;
+	int err = 0;
+
+	/* make line configuration atomic and IRQ-safe */
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	if (config->config_mask & RTSER_SET_BAUD)
+		ctx->config.baud_rate = config->baud_rate;
+	if (config->config_mask & RTSER_SET_PARITY)
+		ctx->config.parity = config->parity & PARITY_MASK;
+	if (config->config_mask & RTSER_SET_DATA_BITS)
+		ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
+	if (config->config_mask & RTSER_SET_STOP_BITS)
+		ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
+	if (config->config_mask & RTSER_SET_HANDSHAKE)
+		ctx->config.handshake = config->handshake;
+
+	if (config->config_mask & (RTSER_SET_PARITY |
+				   RTSER_SET_DATA_BITS | RTSER_SET_STOP_BITS |
+				   RTSER_SET_BAUD | RTSER_SET_HANDSHAKE)) {
+		struct mpc52xx_psc *psc = ctx->port->psc;
+		unsigned char mr1 = 0, mr2 = 0;
+		unsigned int divisor;
+		u16 prescaler;
+
+		switch (ctx->config.data_bits) {
+		case RTSER_5_BITS:
+			mr1 |= MPC52xx_PSC_MODE_5_BITS;
+			break;
+		case RTSER_6_BITS:
+			mr1 |= MPC52xx_PSC_MODE_6_BITS;
+			break;
+		case RTSER_7_BITS:
+			mr1 |= MPC52xx_PSC_MODE_7_BITS;
+			break;
+		case RTSER_8_BITS:
+		default:
+			mr1 |= MPC52xx_PSC_MODE_8_BITS;
+			break;
+		}
+
+		switch (ctx->config.parity) {
+		case RTSER_ODD_PARITY:
+			mr1 |= MPC52xx_PSC_MODE_PARODD;
+			break;
+		case RTSER_EVEN_PARITY:
+			mr1 |= MPC52xx_PSC_MODE_PAREVEN;
+			break;
+		case RTSER_NO_PARITY:
+		default:
+			mr1 |= MPC52xx_PSC_MODE_PARNONE;
+			break;
+		}
+
+		if (ctx->config.stop_bits == RTSER_2_STOPB)
+			mr2 |= (ctx->config.data_bits == RTSER_5_BITS) ?
+				MPC52xx_PSC_MODE_ONE_STOP_5_BITS :
+				MPC52xx_PSC_MODE_TWO_STOP;
+		else
+			mr2 |= MPC52xx_PSC_MODE_ONE_STOP;
+
+		if (ctx->config.handshake == RTSER_RTSCTS_HAND) {
+			mr1 |= MPC52xx_PSC_MODE_RXRTS;
+			mr2 |= MPC52xx_PSC_MODE_TXCTS;
+		} else if (config->config_mask & RTSER_SET_HANDSHAKE) {
+			ctx->mcr_status =
+				RTSER_MCR_DTR | RTSER_MCR_RTS | RTSER_MCR_OUT2;
+			psc_set_mcr(ctx, ctx->mcr_status);
+		}
+
+		/* Reset the TX & RX */
+		out_8(&psc->command, MPC52xx_PSC_RST_RX);
+		out_8(&psc->command, MPC52xx_PSC_RST_TX);
+
+		/* Send new mode settings */
+		out_8(&psc->command, MPC52xx_PSC_SEL_MODE_REG_1);
+		out_8(&psc->mode, mr1);
+		out_8(&psc->mode, mr2);
+
+		/* Set baudrate */
+		divisor = (ctx->port->uartclk + 16 * ctx->config.baud_rate) /
+			(32 * ctx->config.baud_rate);
+		prescaler = 0xdd00;
+		out_be16(&psc->mpc52xx_psc_clock_select, prescaler);
+		out_8(&psc->ctur, divisor >> 8);
+		out_8(&psc->ctlr, divisor & 0xff);
+
+		dev_info(ctx->port->dev,
+			 "mr1=%#x mr2=%#x baud=%d divisor=%d prescaler=%x\n",
+			 mr1, mr2, ctx->config.baud_rate, divisor, prescaler);
+
+		/* Reenable TX & RX */
+		out_8(&psc->command, MPC52xx_PSC_TX_ENABLE);
+		out_8(&psc->command, MPC52xx_PSC_RX_ENABLE);
+
+		/* Enable RX */
+		ctx->imr_status |= MPC52xx_PSC_IMR_RXRDY;
+		out_be16(&ctx->port->psc->mpc52xx_psc_imr, ctx->imr_status);
+
+		ctx->status = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+	}
+
+	if (config->config_mask & RTSER_SET_RS485) {
+		ctx->config.rs485 = config->rs485;
+		if (config->rs485) {
+			/* reset RTS */
+			ctx->mcr_status &= ~RTSER_MCR_RTS;
+			dev_dbg(ctx->port->dev, "Reset RTS, mcr_status=%#x\n",
+				ctx->mcr_status);
+			psc_set_mcr(ctx, ctx->mcr_status);
+		}
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	/* Timeout manipulation is not atomic. The user is supposed to take
+	   care not to use and change timeouts at the same time. */
+	if (config->config_mask & RTSER_SET_TIMEOUT_RX)
+		ctx->config.rx_timeout = config->rx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_TX)
+		ctx->config.tx_timeout = config->tx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
+		ctx->config.event_timeout = config->event_timeout;
+
+	if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+		/* change timestamp history atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
+			if (!ctx->in_history) {
+				ctx->in_history = *in_history_ptr;
+				*in_history_ptr = NULL;
+				if (!ctx->in_history)
+					err = -ENOMEM;
+			}
+		} else {
+			*in_history_ptr = ctx->in_history;
+			ctx->in_history = NULL;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	if (config->config_mask & RTSER_SET_EVENT_MASK) {
+		/* change event mask atomically */
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		ctx->config.event_mask = config->event_mask & EVENT_MASK;
+		ctx->ioc_events = 0;
+
+		if ((config->event_mask & RTSER_EVENT_RXPEND) &&
+		    (ctx->in_npend > 0))
+			ctx->ioc_events |= RTSER_EVENT_RXPEND;
+
+		if ((config->event_mask & RTSER_EVENT_ERRPEND) &&
+		    ctx->status)
+			ctx->ioc_events |= RTSER_EVENT_ERRPEND;
+
+		if ((config->event_mask & RTSER_EVENT_TXEMPTY) &&
+		    !ctx->out_npend && ctx->tx_empty)
+			ctx->ioc_events |= RTSER_EVENT_TXEMPTY;
+
+		if (config->event_mask &
+		    (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+			psc_enable_ms(ctx);
+		else
+			psc_disable_ms(ctx);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+	}
+
+	return err;
+}
+
+void rt_mpc52xx_uart_cleanup_ctx(struct rt_mpc52xx_uart_ctx *ctx)
+{
+	rtdm_event_destroy(&ctx->in_event);
+	rtdm_event_destroy(&ctx->out_event);
+	rtdm_event_destroy(&ctx->ioc_event);
+	rtdm_mutex_destroy(&ctx->out_lock);
+}
+
+static int rt_mpc52xx_uart_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	uint64_t *dummy;
+	int err;
+
+	ctx = rtdm_fd_to_private(fd);
+	ctx->port = (struct rt_mpc52xx_uart_port *)rtdm_fd_device(fd)->device_data;
+
+	/* IPC initialisation - cannot fail with used parameters */
+	rtdm_lock_init(&ctx->lock);
+	rtdm_event_init(&ctx->in_event, 0);
+	rtdm_event_init(&ctx->out_event, 0);
+	rtdm_event_init(&ctx->ioc_event, 0);
+	rtdm_mutex_init(&ctx->out_lock);
+
+	ctx->in_head = 0;
+	ctx->in_tail = 0;
+	ctx->in_npend = 0;
+	ctx->in_nwait = 0;
+	ctx->in_lock = 0;
+	ctx->in_history = NULL;
+
+	ctx->out_head = 0;
+	ctx->out_tail = 0;
+	ctx->out_npend = 0;
+
+	ctx->ioc_events = 0;
+	ctx->ioc_event_lock = 0;
+	ctx->status = 0;
+	ctx->saved_errors = 0;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	psc_disable_ints(ctx);
+
+	/* Reset/activate the port, clear and enable interrupts */
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_RST_RX);
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_RST_TX);
+
+	out_be32(&ctx->port->psc->sicr, 0);	/* UART mode DCD ignored */
+
+	psc_fifo_init(ctx);
+
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_TX_ENABLE);
+	out_8(&ctx->port->psc->command, MPC52xx_PSC_RX_ENABLE);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rt_mpc52xx_uart_set_config(ctx, &default_config, &dummy);
+
+	err = rtdm_irq_request(&ctx->irq_handle, ctx->port->irq,
+			       rt_mpc52xx_uart_interrupt, 0,
+			       rtdm_fd_device(fd)->name, ctx);
+	if (err) {
+		psc_set_mcr(ctx, 0);
+		rt_mpc52xx_uart_cleanup_ctx(ctx);
+
+		return err;
+	}
+
+	return 0;
+}
+
+static void rt_mpc52xx_uart_close(struct rtdm_fd *fd)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	uint64_t *in_history;
+	rtdm_lockctx_t lock_ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/* reset DTR and RTS */
+	psc_set_mcr(ctx, 0);
+
+	psc_disable_ints(ctx);
+
+	in_history = ctx->in_history;
+	ctx->in_history = NULL;
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rtdm_irq_free(&ctx->irq_handle);
+
+	rt_mpc52xx_uart_cleanup_ctx(ctx);
+
+	kfree(in_history);
+}
+
+static int rt_mpc52xx_uart_ioctl(struct rtdm_fd *fd,
+				 unsigned int request, void *arg)
+{
+	rtdm_lockctx_t lock_ctx;
+	struct rt_mpc52xx_uart_ctx *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTSER_RTIOC_GET_CONFIG:
+		if (rtdm_fd_is_user(fd))
+			err = rtdm_safe_copy_to_user(fd, arg,
+						     &ctx->config,
+						     sizeof(struct
+							    rtser_config));
+		else
+			memcpy(arg, &ctx->config, sizeof(struct rtser_config));
+		break;
+
+	case RTSER_RTIOC_SET_CONFIG: {
+		struct rtser_config *config;
+		struct rtser_config config_buf;
+		uint64_t *hist_buf = NULL;
+
+		config = (struct rtser_config *)arg;
+
+		if (rtdm_fd_is_user(fd)) {
+			err = rtdm_safe_copy_from_user(fd, &config_buf,
+						       arg,
+						       sizeof(struct
+							      rtser_config));
+			if (err)
+				return err;
+
+			config = &config_buf;
+		}
+
+		if ((config->config_mask & RTSER_SET_BAUD) &&
+		    (config->baud_rate <= 0))
+			/* invalid baudrate for this port */
+			return -EINVAL;
+
+		if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+			/*
+			 * Reflect the call to non-RT as we will likely
+			 * allocate or free the buffer.
+			 */
+			if (rtdm_in_rt_context())
+				return -ENOSYS;
+
+			if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY)
+				hist_buf = kmalloc(IN_BUFFER_SIZE *
+						   sizeof(nanosecs_abs_t),
+						   GFP_KERNEL);
+		}
+
+		rt_mpc52xx_uart_set_config(ctx, config, &hist_buf);
+
+		if (hist_buf)
+			kfree(hist_buf);
+
+		break;
+	}
+
+	case RTSER_RTIOC_GET_STATUS: {
+		int status;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		status = ctx->saved_errors | ctx->status;
+		ctx->status = 0;
+		ctx->saved_errors = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rtser_status status_buf;
+
+			status_buf.line_status = status;
+			status_buf.modem_status = psc_get_msr(ctx);
+
+			err = rtdm_safe_copy_to_user(fd, arg,
+						     &status_buf,
+						     sizeof(struct
+							    rtser_status));
+		} else {
+			((struct rtser_status *)arg)->line_status = status;
+			((struct rtser_status *)arg)->modem_status =
+				psc_get_msr(ctx);
+		}
+		break;
+	}
+
+	case RTSER_RTIOC_GET_CONTROL:
+		if (rtdm_fd_is_user(fd))
+			err = rtdm_safe_copy_to_user(fd, arg,
+						     &ctx->mcr_status,
+						     sizeof(int));
+		else
+			*(int *)arg = ctx->mcr_status;
+
+		break;
+
+	case RTSER_RTIOC_SET_CONTROL: {
+		int new_mcr = (long)arg;
+
+		if ((new_mcr & RTSER_MCR_RTS) != RTSER_MCR_RTS)
+			dev_warn(ctx->port->dev,
+				 "MCR: Only RTS is supported\n");
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		ctx->mcr_status = new_mcr & RTSER_MCR_RTS;
+		psc_set_mcr(ctx, ctx->mcr_status);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTSER_RTIOC_WAIT_EVENT: {
+		struct rtser_event ev = { .rxpend_timestamp = 0 };
+		rtdm_toseq_t timeout_seq;
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		/* Only one waiter allowed, stop any further attempts here. */
+		if (test_and_set_bit(0, &ctx->ioc_event_lock))
+			return -EBUSY;
+
+		rtdm_toseq_init(&timeout_seq, ctx->config.event_timeout);
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		while (!ctx->ioc_events) {
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			err = rtdm_event_timedwait(&ctx->ioc_event,
+						   ctx->config.event_timeout,
+						   &timeout_seq);
+			if (err) {
+				/* Device has been closed? */
+				if (err == -EIDRM)
+					err = -EBADF;
+				goto wait_unlock_out;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		}
+
+		ev.events = ctx->ioc_events;
+		ctx->ioc_events &= ~(RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO);
+
+		ev.last_timestamp = ctx->last_timestamp;
+		ev.rx_pending = ctx->in_npend;
+
+		if (ctx->in_history)
+			ev.rxpend_timestamp = ctx->in_history[ctx->in_head];
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg, &ev,
+						   sizeof(struct
+							  rtser_event));
+			else
+				memcpy(arg, &ev, sizeof(struct rtser_event));
+
+	      wait_unlock_out:
+		/* release the simple event waiter lock */
+		clear_bit(0, &ctx->ioc_event_lock);
+		break;
+	}
+
+	case RTSER_RTIOC_BREAK_CTL: {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTSER_BREAK_SET)
+			out_8(&ctx->port->psc->command,
+			      MPC52xx_PSC_START_BRK);
+		else
+			out_8(&ctx->port->psc->command,
+			      MPC52xx_PSC_STOP_BRK);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+#ifdef ISREADY
+	case RTIOC_PURGE: {
+		int fcr = 0;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTDM_PURGE_RX_BUFFER) {
+			ctx->in_head = 0;
+			ctx->in_tail = 0;
+			ctx->in_npend = 0;
+			ctx->status = 0;
+			fcr |= FCR_FIFO | FCR_RESET_RX;
+			rt_mpc52xx_uart_reg_in(mode, base, RHR);
+		}
+		if ((long)arg & RTDM_PURGE_TX_BUFFER) {
+			ctx->out_head = 0;
+			ctx->out_tail = 0;
+			ctx->out_npend = 0;
+			fcr |= FCR_FIFO | FCR_RESET_TX;
+		}
+		if (fcr) {
+			rt_mpc52xx_uart_reg_out(mode, base, FCR, fcr);
+			rt_mpc52xx_uart_reg_out(mode, base, FCR,
+					 FCR_FIFO | ctx->config.fifo_depth);
+		}
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+#endif
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+static ssize_t rt_mpc52xx_uart_read(struct rtdm_fd *fd, void *buf,
+				    size_t nbyte)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t read = 0;
+	int pending;
+	int block;
+	int subblock;
+	int in_pos;
+	char *out_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret = -EAGAIN;	/* for non-blocking read */
+	int nonblocking;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_rw_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* non-blocking is handled separately here */
+	nonblocking = (ctx->config.rx_timeout < 0);
+
+	/* only one reader allowed, stop any further attempts here */
+	if (test_and_set_bit(0, &ctx->in_lock))
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	while (1) {
+		if (ctx->status) {
+			if (ctx->status & RTSER_LSR_BREAK_IND)
+				ret = -EPIPE;
+			else
+				ret = -EIO;
+			ctx->saved_errors = ctx->status &
+			    (RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			     RTSER_LSR_FRAMING_ERR | RTSER_SOFT_OVERRUN_ERR);
+			ctx->status = 0;
+			break;
+		}
+
+		pending = ctx->in_npend;
+
+		if (pending > 0) {
+			block = subblock = (pending <= nbyte) ? pending : nbyte;
+			in_pos = ctx->in_head;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (in_pos + subblock > IN_BUFFER_SIZE) {
+				/* Treat the block between head and buffer end
+				   separately. */
+				subblock = IN_BUFFER_SIZE - in_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_to_user
+					    (fd, out_pos,
+					     &ctx->in_buf[in_pos],
+					     subblock) != 0) {
+						ret = -EFAULT;
+						goto break_unlocked;
+					}
+				} else
+					memcpy(out_pos, &ctx->in_buf[in_pos],
+					       subblock);
+
+				read += subblock;
+				out_pos += subblock;
+
+				subblock = block - subblock;
+				in_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_to_user(fd, out_pos,
+						      &ctx->in_buf[in_pos],
+						      subblock) != 0) {
+					ret = -EFAULT;
+					goto break_unlocked;
+				}
+			} else
+				memcpy(out_pos, &ctx->in_buf[in_pos], subblock);
+
+			read += subblock;
+			out_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->in_head =
+			    (ctx->in_head + block) & (IN_BUFFER_SIZE - 1);
+			if ((ctx->in_npend -= block) == 0)
+				ctx->ioc_events &= ~RTSER_EVENT_RXPEND;
+
+			if (nbyte == 0)
+				break; /* All requested bytes read. */
+
+			continue;
+		}
+
+		if (nonblocking)
+			/* ret was set to EAGAIN in case of a real
+			   non-blocking call or contains the error
+			   returned by rtdm_event_wait[_until] */
+			break;
+
+		ctx->in_nwait = nbyte;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->in_event,
+					   ctx->config.rx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			nonblocking = 1;
+			if (ctx->in_npend > 0) {
+				/* Final turn: collect pending bytes
+				   before exit. */
+				continue;
+			}
+
+			ctx->in_nwait = 0;
+			break;
+		}
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+break_unlocked:
+	/* Release the simple reader lock, */
+	clear_bit(0, &ctx->in_lock);
+
+	if ((read > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			   (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = read;
+
+	return ret;
+}
+
+static ssize_t rt_mpc52xx_uart_write(struct rtdm_fd *fd,
+				     const void *buf,
+				     size_t nbyte)
+{
+	struct rt_mpc52xx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t written = 0;
+	int free;
+	int block;
+	int subblock;
+	int out_pos;
+	char *in_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_read_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* Make write operation atomic. */
+	ret = rtdm_mutex_timedlock(&ctx->out_lock, ctx->config.rx_timeout,
+				   &timeout_seq);
+	if (ret)
+		return ret;
+
+	while (nbyte > 0) {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		free = OUT_BUFFER_SIZE - ctx->out_npend;
+
+		if (free > 0) {
+			block = subblock = (nbyte <= free) ? nbyte : free;
+			out_pos = ctx->out_tail;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (out_pos + subblock > OUT_BUFFER_SIZE) {
+				/* Treat the block between head and buffer
+				   end separately. */
+				subblock = OUT_BUFFER_SIZE - out_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_from_user
+					    (fd,
+					     &ctx->out_buf[out_pos],
+					     in_pos, subblock) != 0) {
+						ret = -EFAULT;
+						break;
+					}
+				} else
+					memcpy(&ctx->out_buf[out_pos], in_pos,
+					       subblock);
+
+				written += subblock;
+				in_pos += subblock;
+
+				subblock = block - subblock;
+				out_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_from_user
+				    (fd, &ctx->out_buf[out_pos],
+				     in_pos, subblock) != 0) {
+					ret = -EFAULT;
+					break;
+				}
+			} else
+				memcpy(&ctx->out_buf[out_pos], in_pos, block);
+
+			written += subblock;
+			in_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->out_tail =
+			    (ctx->out_tail + block) & (OUT_BUFFER_SIZE - 1);
+			ctx->out_npend += block;
+
+			/* Mark shift register not empty */
+			ctx->ioc_events &= ~RTSER_EVENT_TXEMPTY;
+			ctx->tx_empty = 0;
+
+			psc_start_tx(ctx);
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+			continue;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->out_event,
+					   ctx->config.tx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				   return immediately. */
+				return -EBADF;
+			}
+			if (ret == -EWOULDBLOCK) {
+				/* Fix error code for non-blocking mode. */
+				ret = -EAGAIN;
+			}
+			break;
+		}
+	}
+
+	rtdm_mutex_unlock(&ctx->out_lock);
+
+	if ((written > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			      (ret == -ETIMEDOUT) || (ret == -EINTR)))
+		ret = written;
+
+	return ret;
+}
+
+static struct rtdm_driver mpc52xx_uart_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(imx_uart,
+						    RTDM_CLASS_SERIAL,
+						    RTDM_SUBCLASS_16550A,
+						    RTSER_PROFILE_VER),
+	.device_count		= MPC52xx_PSC_MAXNUM,
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.context_size		= sizeof(struct rt_mpc52xx_uart_ctx),
+	.ops = {
+		.open		= rt_mpc52xx_uart_open,
+		.close		= rt_mpc52xx_uart_close,
+		.ioctl_rt	= rt_mpc52xx_uart_ioctl,
+		.ioctl_nrt	= rt_mpc52xx_uart_ioctl,
+		.read_rt	= rt_mpc52xx_uart_read,
+		.write_rt	= rt_mpc52xx_uart_write,
+	},
+};
+
+static int rt_mpc52xx_uart_of_probe(struct platform_device *op)
+{
+	struct rt_mpc52xx_uart_port *port;
+	struct rtdm_device *dev;
+	struct resource res;
+	int ret, idx;
+
+	dev_dbg(&op->dev, "mpc52xx_uart_probe(op=%p)\n", op);
+
+	/* Check validity & presence */
+	for (idx = 0; idx < MPC52xx_PSC_MAXNUM; idx++)
+		if (rt_mpc52xx_uart_nodes[idx] == op->dev.of_node)
+			break;
+	if (idx >= MPC52xx_PSC_MAXNUM)
+		return -EINVAL;
+
+	port = kmalloc(sizeof(*port), GFP_KERNEL);
+	if (!port) {
+		dev_err(&op->dev, "Could allocate port space\n");
+		return -ENOMEM;
+	}
+	port->dev = &op->dev;
+
+	/*
+	 * Set the uart clock to the input clock of the psc, the different
+	 * prescalers are taken into account in the set_baudrate() methods
+	 * of the respective chip
+	 */
+	port->uartclk = mpc5xxx_get_bus_frequency(op->dev.of_node);
+	if (port->uartclk == 0) {
+		dev_err(&op->dev, "Could not find uart clock frequency\n");
+		ret = -EINVAL;
+		goto out_kfree_port;
+	}
+
+	/* Fetch register locations */
+	ret = of_address_to_resource(op->dev.of_node, 0, &res);
+	if (ret) {
+		dev_err(&op->dev, "Could not get resources\n");
+		goto out_kfree_port;
+	}
+	port->num = ((res.start >> 8) & 0xf) / 2;
+	if (port->num < 6)
+		port->num++;
+
+	if (!request_mem_region(res.start, resource_size(&res),
+				RT_MPC52XX_UART_DRVNAM)) {
+		ret = -EBUSY;
+		goto out_kfree_port;
+	}
+
+	port->psc = ioremap(res.start, resource_size(&res));
+	if (!port->psc) {
+		dev_err(&op->dev, "Could not map PSC registers\n");
+		ret = -ENOMEM;
+		goto out_release_mem_region;
+	}
+	port->fifo = (struct mpc52xx_psc_fifo __iomem *)(port->psc + 1);
+
+	port->irq = irq_of_parse_and_map(op->dev.of_node, 0);
+	if (port->irq <= 0) {
+		dev_err(&op->dev, "Could not get irq\n");
+		ret = -ENODEV;
+		goto out_iounmap;
+	}
+
+	dev = kmalloc(sizeof(struct rtdm_device), GFP_KERNEL);
+	if (!dev) {
+		dev_err(&op->dev, "Could allocate device context\n");
+		ret = -ENOMEM;
+		goto out_dispose_irq_mapping;
+	}
+
+	dev->driver = &mpc52xx_uart_driver;
+	dev->label = "rtserPSC%d";
+	dev->device_data = port;
+
+	rt_mpc52xx_uart_init_hw(port);
+
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		goto out_kfree_dev;
+
+	dev_set_drvdata(&op->dev, dev);
+
+	dev_info(&op->dev, "%s on PSC%d at 0x%p, irq=%d, clk=%i\n",
+		 dev->name, port->num, port->psc, port->irq,
+		 port->uartclk);
+
+	return 0;
+
+out_kfree_dev:
+	kfree(dev);
+out_dispose_irq_mapping:
+	irq_dispose_mapping(port->irq);
+out_iounmap:
+	iounmap(port->psc);
+out_release_mem_region:
+	release_mem_region(res.start, resource_size(&res));
+out_kfree_port:
+	kfree(port);
+
+	return ret;
+}
+
+static int rt_mpc52xx_uart_of_remove(struct platform_device *op)
+{
+	struct rtdm_device *dev = dev_get_drvdata(&op->dev);
+	struct rt_mpc52xx_uart_port *port = dev->device_data;
+	struct resource res;
+
+	dev_set_drvdata(&op->dev, NULL);
+
+	rtdm_dev_unregister(dev);
+	irq_dispose_mapping(port->irq);
+	iounmap(port->psc);
+	if (!of_address_to_resource(op->dev.of_node, 0, &res))
+		release_mem_region(res.start, resource_size(&res));
+	kfree(port);
+	kfree(dev);
+
+	return 0;
+}
+
+static struct of_device_id rt_mpc52xx_uart_of_match[] = {
+	{ .compatible = "fsl,mpc5200b-psc-uart", },
+	{ .compatible = "fsl,mpc5200-psc-uart", },
+	{},
+};
+MODULE_DEVICE_TABLE(of, rt_mpc52xx_uart_of_match);
+
+static struct platform_driver rt_mpc52xx_uart_of_driver = {
+	.probe = rt_mpc52xx_uart_of_probe,
+	.remove	=  rt_mpc52xx_uart_of_remove,
+	.driver = {
+		.name = "rt-mpc52xx-psc-uart",
+		.owner = THIS_MODULE,
+		.of_match_table = rt_mpc52xx_uart_of_match,
+	},
+};
+
+static void rt_mpc52xx_uart_of_enumerate(void)
+{
+	struct device_node *np;
+	int idx = 0;
+
+	/* Assign index to each PSC in device tree line the linux driver does */
+	for_each_matching_node(np, rt_mpc52xx_uart_of_match) {
+		of_node_get(np);
+		rt_mpc52xx_uart_nodes[idx] = np;
+		idx++;
+	}
+}
+
+static int __init rt_mpc52xx_uart_init(void)
+{
+	int ret;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	printk(KERN_INFO "RTserial: MPC52xx PSC UART driver\n");
+
+	rt_mpc52xx_uart_of_enumerate();
+
+	ret = platform_driver_register(&rt_mpc52xx_uart_of_driver);
+	if (ret) {
+		printk(KERN_ERR
+		       "%s; Could not register  driver (err=%d)\n",
+		       __func__, ret);
+		return ret;
+	}
+
+	return 0;
+}
+
+static void __exit rt_mpc52xx_uart_exit(void)
+{
+	platform_driver_unregister(&rt_mpc52xx_uart_of_driver);
+}
+
+module_init(rt_mpc52xx_uart_init);
+module_exit(rt_mpc52xx_uart_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/serial/rt_imx_uart.c b/kernel/xenomai-v3.2.4/kernel/drivers/serial/rt_imx_uart.c
new file mode 100644
index 0000000..00fdd5f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/serial/rt_imx_uart.c
@@ -0,0 +1,1651 @@
+/*
+ * Copyright 2012 Wolfgang Grandegger <wg@denx.de>
+ *
+ * Derived from the Linux IMX UART driver (drivers/tty/serial/imx.c)
+ * and 16650A RTserial driver.
+ *
+ * Copyright (C) 2005-2007 Jan Kiszka <jan.kiszka@web.de>.
+ * Copyright (C) 2004 Pengutronix
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include <linux/tty.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/init.h>
+#include <linux/serial.h>
+#include <linux/console.h>
+#include <linux/platform_device.h>
+#include <linux/sysrq.h>
+#include <linux/dma-mapping.h>
+#include <linux/clk.h>
+#include <linux/delay.h>
+#include <linux/rational.h>
+#include <linux/io.h>
+#include <asm/irq.h>
+#include <asm/dma.h>
+#include <asm/div64.h>
+#include <linux/slab.h>
+#include <linux/of.h>
+#include <linux/of_device.h>
+
+#include <rtdm/serial.h>
+#include <rtdm/driver.h>
+
+MODULE_AUTHOR("Wolfgang Grandegger <wg@denx.de>");
+MODULE_DESCRIPTION("RTDM-based driver for IMX UARTs");
+MODULE_VERSION("1.0.0");
+MODULE_LICENSE("GPL");
+
+#define DRIVER_NAME	"xeno_imx_uart"
+
+/* Register definitions */
+#define URXD0	0x0  /* Receiver Register */
+#define URTX0	0x40 /* Transmitter Register */
+#define UCR1	0x80 /* Control Register 1 */
+#define UCR2	0x84 /* Control Register 2 */
+#define UCR3	0x88 /* Control Register 3 */
+#define UCR4	0x8c /* Control Register 4 */
+#define UFCR	0x90 /* FIFO Control Register */
+#define USR1	0x94 /* Status Register 1 */
+#define USR2	0x98 /* Status Register 2 */
+#define UESC	0x9c /* Escape Character Register */
+#define UTIM	0xa0 /* Escape Timer Register */
+#define UBIR	0xa4 /* BRM Incremental Register */
+#define UBMR	0xa8 /* BRM Modulator Register */
+#define UBRC	0xac /* Baud Rate Count Register */
+#define MX2_ONEMS 0xb0 /* One Millisecond register */
+#define IMX1_UTS 0xd0 /* UART Test Register on i.mx1 */
+#define IMX21_UTS 0xb4 /* UART Test Register on all other i.mx*/
+
+
+
+/* UART Control Register Bit Fields.*/
+#define URXD_CHARRDY	(1<<15)
+#define URXD_ERR	(1<<14)
+#define URXD_OVRRUN	(1<<13)
+#define URXD_FRMERR	(1<<12)
+#define URXD_BRK	(1<<11)
+#define URXD_PRERR	(1<<10)
+#define UCR1_ADEN	(1<<15) /* Auto dectect interrupt */
+#define UCR1_ADBR	(1<<14) /* Auto detect baud rate */
+#define UCR1_TRDYEN	(1<<13) /* Transmitter ready interrupt enable */
+#define UCR1_IDEN	(1<<12) /* Idle condition interrupt */
+#define UCR1_RRDYEN	(1<<9)	/* Recv ready interrupt enable */
+#define UCR1_RDMAEN	(1<<8)	/* Recv ready DMA enable */
+#define UCR1_IREN	(1<<7)	/* Infrared interface enable */
+#define UCR1_TXMPTYEN	(1<<6)	/* Transimitter empty interrupt enable */
+#define UCR1_RTSDEN	(1<<5)	/* RTS delta interrupt enable */
+#define UCR1_SNDBRK	(1<<4)	/* Send break */
+#define UCR1_TDMAEN	(1<<3)	/* Transmitter ready DMA enable */
+#define MX1_UCR1_UARTCLKEN	(1<<2)	/* UART clock enabled, mx1 only */
+#define UCR1_DOZE	(1<<1)	/* Doze */
+#define UCR1_UARTEN	(1<<0)	/* UART enabled */
+#define UCR2_ESCI	(1<<15) /* Escape seq interrupt enable */
+#define UCR2_IRTS	(1<<14) /* Ignore RTS pin */
+#define UCR2_CTSC	(1<<13) /* CTS pin control */
+#define UCR2_CTS	(1<<12) /* Clear to send */
+#define UCR2_ESCEN	(1<<11) /* Escape enable */
+#define UCR2_PREN	(1<<8)	/* Parity enable */
+#define UCR2_PROE	(1<<7)	/* Parity odd/even */
+#define UCR2_STPB	(1<<6)	/* Stop */
+#define UCR2_WS		(1<<5)	/* Word size */
+#define UCR2_RTSEN	(1<<4)	/* Request to send interrupt enable */
+#define UCR2_ATEN	(1<<3)	/* Aging Timer Enable */
+#define UCR2_TXEN	(1<<2)	/* Transmitter enabled */
+#define UCR2_RXEN	(1<<1)	/* Receiver enabled */
+#define UCR2_SRST	(1<<0)	/* SW reset */
+#define UCR3_DTREN	(1<<13) /* DTR interrupt enable */
+#define UCR3_PARERREN	(1<<12) /* Parity enable */
+#define UCR3_FRAERREN	(1<<11) /* Frame error interrupt enable */
+#define UCR3_DSR	(1<<10) /* Data set ready */
+#define UCR3_DCD	(1<<9)	/* Data carrier detect */
+#define UCR3_RI		(1<<8)	/* Ring indicator */
+#define UCR3_ADNIMP	(1<<7)	/* Autobaud Detection Not Improved */
+#define UCR3_RXDSEN	(1<<6)	/* Receive status interrupt enable */
+#define UCR3_AIRINTEN	(1<<5)	/* Async IR wake interrupt enable */
+#define UCR3_AWAKEN	(1<<4)	/* Async wake interrupt enable */
+#define UCR3_DTRDEN	(1<<3)	/* Data Terminal Ready Delta Enable. */
+#define MX1_UCR3_REF25		(1<<3)	/* Ref freq 25 MHz, only on mx1 */
+#define MX1_UCR3_REF30		(1<<2)	/* Ref Freq 30 MHz, only on mx1 */
+#define MX2_UCR3_RXDMUXSEL	(1<<2)	/* RXD Muxed Input Select, on mx2/mx3 */
+#define UCR3_INVT	(1<<1)	/* Inverted Infrared transmission */
+#define UCR3_BPEN	(1<<0)	/* Preset registers enable */
+#define UCR4_CTSTL_SHF	10	/* CTS trigger level shift */
+#define UCR4_CTSTL_MASK	0x3F	/* CTS trigger is 6 bits wide */
+#define UCR4_INVR	(1<<9)	/* Inverted infrared reception */
+#define UCR4_ENIRI	(1<<8)	/* Serial infrared interrupt enable */
+#define UCR4_WKEN	(1<<7)	/* Wake interrupt enable */
+#define UCR4_REF16	(1<<6)	/* Ref freq 16 MHz */
+#define UCR4_IRSC	(1<<5)	/* IR special case */
+#define UCR4_TCEN	(1<<3)	/* Transmit complete interrupt enable */
+#define UCR4_BKEN	(1<<2)	/* Break condition interrupt enable */
+#define UCR4_OREN	(1<<1)	/* Receiver overrun interrupt enable */
+#define UCR4_DREN	(1<<0)	/* Recv data ready interrupt enable */
+#define UFCR_RXTL_SHF	0	/* Receiver trigger level shift */
+#define UFCR_RFDIV	(7<<7)	/* Reference freq divider mask */
+#define UFCR_RFDIV_REG(x)	(((x) < 7 ? 6 - (x) : 6) << 7)
+#define UFCR_TXTL_SHF	10	/* Transmitter trigger level shift */
+#define UFCR_DCEDTE	(1<<6)
+#define USR1_PARITYERR	(1<<15) /* Parity error interrupt flag */
+#define USR1_RTSS	(1<<14) /* RTS pin status */
+#define USR1_TRDY	(1<<13) /* Transmitter ready interrupt/dma flag */
+#define USR1_RTSD	(1<<12) /* RTS delta */
+#define USR1_ESCF	(1<<11) /* Escape seq interrupt flag */
+#define USR1_FRAMERR	(1<<10) /* Frame error interrupt flag */
+#define USR1_RRDY	(1<<9)	/* Receiver ready interrupt/dma flag */
+#define USR1_AGTIM	(1<<8)	/* Ageing Timer Interrupt Flag */
+#define USR1_DTRD	(1<<7)	/* DTR Delta */
+#define USR1_RXDS	(1<<6)	/* Receiver idle interrupt flag */
+#define USR1_AIRINT	(1<<5)	/* Async IR wake interrupt flag */
+#define USR1_AWAKE	(1<<4)	/* Async wake interrupt flag */
+#define USR2_ADET	(1<<15) /* Auto baud rate detect complete */
+#define USR2_TXFE	(1<<14) /* Transmit buffer FIFO empty */
+#define USR2_DTRF	(1<<13) /* DTR edge interrupt flag */
+#define USR2_IDLE	(1<<12) /* Idle condition */
+#define USR2_RIDELT	(1<<10) /* Ring Indicator Delta */
+#define USR2_RIIN	(1<<9)	/* Ring Indicator Input */
+#define USR2_IRINT	(1<<8)	/* Serial infrared interrupt flag */
+#define USR2_WAKE	(1<<7)	/* Wake */
+#define USR2_DCDDELT	(1<<6)	/* Data Carrier Detect Delta */
+#define USR2_DCDIN	(1<<5)	/* Data Carrier Detect Input */
+#define USR2_RTSF	(1<<4)	/* RTS edge interrupt flag */
+#define USR2_TXDC	(1<<3)	/* Transmitter complete */
+#define USR2_BRCD	(1<<2)	/* Break condition */
+#define USR2_ORE	(1<<1)	/* Overrun error */
+#define USR2_RDR	(1<<0)	/* Recv data ready */
+#define UTS_FRCPERR	(1<<13) /* Force parity error */
+#define UTS_LOOP	(1<<12) /* Loop tx and rx */
+#define UTS_TXEMPTY	(1<<6)	/* TxFIFO empty */
+#define UTS_RXEMPTY	(1<<5)	/* RxFIFO empty */
+#define UTS_TXFULL	(1<<4)	/* TxFIFO full */
+#define UTS_RXFULL	(1<<3)	/* RxFIFO full */
+#define UTS_SOFTRST	(1<<0)	/* Software reset */
+
+#define IN_BUFFER_SIZE		4096
+#define OUT_BUFFER_SIZE		4096
+
+#define TX_FIFO_SIZE		32
+
+#define PARITY_MASK		0x03
+#define DATA_BITS_MASK		0x03
+#define STOP_BITS_MASK		0x01
+#define FIFO_MASK		0xC0
+#define EVENT_MASK		0x0F
+
+#define IER_RX			0x01
+#define IER_TX			0x02
+#define IER_STAT		0x04
+#define IER_MODEM		0x08
+
+#define IMX_ISR_PASS_LIMIT	256
+#define UART_CREAD_BIT		256
+
+#define RT_IMX_UART_MAX		5
+
+static int tx_fifo[RT_IMX_UART_MAX];
+module_param_array(tx_fifo, int, NULL, 0400);
+MODULE_PARM_DESC(tx_fifo, "Transmitter FIFO size");
+
+/* i.MX21 type uart runs on all i.mx except i.MX1 and i.MX6q */
+enum imx_uart_type {
+	IMX1_UART,
+	IMX21_UART,
+	IMX53_UART,
+	IMX6Q_UART,
+};
+
+/* device type dependent stuff */
+struct imx_uart_data {
+	unsigned int uts_reg;
+	enum imx_uart_type devtype;
+};
+
+
+struct rt_imx_uart_port {
+	unsigned char __iomem *membase;	/* read/write[bwl] */
+	resource_size_t mapbase;	/* for ioremap */
+	unsigned int irq;		/* irq number */
+	int tx_fifo;			/* TX fifo size*/
+	unsigned int have_rtscts;
+	unsigned int use_dcedte;
+	unsigned int use_hwflow;
+	struct clk *clk_ipg;		/* clock id for UART clock */
+	struct clk *clk_per;		/* clock id for UART clock */
+	const struct imx_uart_data *devdata;
+	unsigned int uartclk;		/* base uart clock */
+	struct rtdm_device rtdm_dev;	/* RTDM device structure */
+};
+
+
+static struct imx_uart_data imx_uart_devdata[] = {
+	[IMX1_UART] = {
+		.uts_reg = IMX1_UTS,
+		.devtype = IMX1_UART,
+	},
+	[IMX21_UART] = {
+		.uts_reg = IMX21_UTS,
+		.devtype = IMX21_UART,
+	},
+	[IMX53_UART] = {
+		.uts_reg = IMX21_UTS,
+		.devtype = IMX53_UART,
+	},
+	[IMX6Q_UART] = {
+		.uts_reg = IMX21_UTS,
+		.devtype = IMX6Q_UART,
+	},
+};
+
+static const struct platform_device_id rt_imx_uart_id_table[] = {
+	{
+		.name = "imx1-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX1_UART],
+	}, {
+		.name = "imx21-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX21_UART],
+	}, {
+		.name = "imx53-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX53_UART],
+	}, {
+		.name = "imx6q-uart",
+		.driver_data = (kernel_ulong_t) &imx_uart_devdata[IMX6Q_UART],
+	}, {
+		/* sentinel */
+	}
+};
+MODULE_DEVICE_TABLE(platform, rt_imx_uart_id_table);
+
+static const struct of_device_id rt_imx_uart_dt_ids[] = {
+	{
+		.compatible = "fsl,imx6q-uart",
+		.data = &imx_uart_devdata[IMX6Q_UART], },
+	{
+		.compatible = "fsl,imx53-uart",
+		.data = &imx_uart_devdata[IMX53_UART], },
+	{
+		.compatible = "fsl,imx1-uart",
+		.data = &imx_uart_devdata[IMX1_UART], },
+	{
+		.compatible = "fsl,imx21-uart",
+		.data = &imx_uart_devdata[IMX21_UART], },
+	{ /* sentinel */ }
+};
+MODULE_DEVICE_TABLE(of, rt_imx_uart_dt_ids);
+
+struct rt_imx_uart_ctx {
+	struct rtser_config config;	/* current device configuration */
+
+	rtdm_irq_t irq_handle;		/* device IRQ handle */
+	rtdm_lock_t lock;		/* lock to protect context struct */
+
+	int in_head;			/* RX ring buffer, head pointer */
+	int in_tail;			/* RX ring buffer, tail pointer */
+	size_t in_npend;		/* pending bytes in RX ring */
+	int in_nwait;			/* bytes the user waits for */
+	rtdm_event_t in_event;		/* raised to unblock reader */
+	char in_buf[IN_BUFFER_SIZE];	/* RX ring buffer */
+
+	volatile unsigned long in_lock;	/* single-reader lock */
+	uint64_t *in_history;		/* RX timestamp buffer */
+
+	int out_head;			/* TX ring buffer, head pointer */
+	int out_tail;			/* TX ring buffer, tail pointer */
+	size_t out_npend;		/* pending bytes in TX ring */
+	rtdm_event_t out_event;		/* raised to unblock writer */
+	char out_buf[OUT_BUFFER_SIZE];	/* TX ring buffer */
+	rtdm_mutex_t out_lock;		/* single-writer mutex */
+
+	uint64_t last_timestamp;	/* timestamp of last event */
+	int ioc_events;			/* recorded events */
+	rtdm_event_t ioc_event;		/* raised to unblock event waiter */
+	volatile unsigned long ioc_event_lock;	/* single-waiter lock */
+
+	int ier_status;			/* IER cache */
+	int mcr_status;			/* MCR cache */
+	int status;			/* cache for LSR + soft-states */
+	int saved_errors;		/* error cache for RTIOC_GET_STATUS */
+
+	/*
+	 * The port structure holds all the information about the UART
+	 * port like base address, and so on.
+	 */
+	struct rt_imx_uart_port *port;
+};
+
+static const struct rtser_config default_config = {
+	.config_mask = 0xFFFF,
+	.baud_rate = RTSER_DEF_BAUD,
+	.parity = RTSER_DEF_PARITY,
+	.data_bits = RTSER_DEF_BITS,
+	.stop_bits = RTSER_DEF_STOPB,
+	.handshake = RTSER_DEF_HAND,
+	.fifo_depth = RTSER_DEF_FIFO_DEPTH,
+	.rx_timeout = RTSER_DEF_TIMEOUT,
+	.tx_timeout = RTSER_DEF_TIMEOUT,
+	.event_timeout = RTSER_DEF_TIMEOUT,
+	.timestamp_history = RTSER_DEF_TIMESTAMP_HISTORY,
+	.event_mask = RTSER_DEF_EVENT_MASK,
+};
+
+static void rt_imx_uart_stop_tx(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long temp;
+
+	temp = readl(ctx->port->membase + UCR1);
+	writel(temp & ~UCR1_TXMPTYEN, ctx->port->membase + UCR1);
+}
+
+static void rt_imx_uart_start_tx(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long temp;
+
+	temp = readl(ctx->port->membase + UCR1);
+	writel(temp | UCR1_TXMPTYEN, ctx->port->membase + UCR1);
+}
+
+static void rt_imx_uart_enable_ms(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long ucr3;
+
+	/*
+	 * RTS interrupt is enabled only if we are using interrupt-driven
+	 * software controlled hardware flow control
+	 */
+	if (!ctx->port->use_hwflow) {
+		unsigned long ucr1 = readl(ctx->port->membase + UCR1);
+
+		ucr1 |= UCR1_RTSDEN;
+		writel(ucr1, ctx->port->membase + UCR1);
+	}
+	ucr3 = readl(ctx->port->membase + UCR3);
+	ucr3 |= UCR3_DTREN;
+	if (ctx->port->use_dcedte) /* DTE mode */
+		ucr3 |= UCR3_DCD | UCR3_RI;
+	writel(ucr3, ctx->port->membase + UCR3);
+}
+
+static int rt_imx_uart_rx_chars(struct rt_imx_uart_ctx *ctx,
+				uint64_t *timestamp)
+{
+	unsigned int rx, temp;
+	int rbytes = 0;
+	int lsr = 0;
+
+	while (readl(ctx->port->membase + USR2) & USR2_RDR) {
+		rx = readl(ctx->port->membase + URXD0);
+		temp = readl(ctx->port->membase + USR2);
+		if (temp & USR2_BRCD) {
+			writel(USR2_BRCD, ctx->port->membase + USR2);
+			lsr |= RTSER_LSR_BREAK_IND;
+		}
+
+		if (rx & (URXD_PRERR | URXD_OVRRUN | URXD_FRMERR)) {
+			if (rx & URXD_PRERR)
+				lsr |= RTSER_LSR_PARITY_ERR;
+			else if (rx & URXD_FRMERR)
+				lsr |= RTSER_LSR_FRAMING_ERR;
+			if (rx & URXD_OVRRUN)
+				lsr |= RTSER_LSR_OVERRUN_ERR;
+		}
+
+		/* save received character */
+		ctx->in_buf[ctx->in_tail] = rx & 0xff;
+		if (ctx->in_history)
+			ctx->in_history[ctx->in_tail] = *timestamp;
+		ctx->in_tail = (ctx->in_tail + 1) & (IN_BUFFER_SIZE - 1);
+
+		if (unlikely(ctx->in_npend >= IN_BUFFER_SIZE))
+			lsr |= RTSER_SOFT_OVERRUN_ERR;
+		else
+			ctx->in_npend++;
+
+		rbytes++;
+	}
+
+	/* save new errors */
+	ctx->status |= lsr;
+
+	return rbytes;
+}
+
+static void rt_imx_uart_tx_chars(struct rt_imx_uart_ctx *ctx)
+{
+	int ch;
+	unsigned int uts_reg = ctx->port->devdata->uts_reg;
+
+	while (ctx->out_npend > 0 &&
+	       !(readl(ctx->port->membase + uts_reg) & UTS_TXFULL)) {
+		ch = ctx->out_buf[ctx->out_head++];
+		writel(ch, ctx->port->membase + URTX0);
+		ctx->out_head &= (OUT_BUFFER_SIZE - 1);
+		ctx->out_npend--;
+	}
+}
+
+static int rt_imx_uart_modem_status(struct rt_imx_uart_ctx *ctx,
+				     unsigned int usr1,
+				     unsigned int usr2)
+{
+	int events = 0;
+
+	/* Clear the status bits that triggered the interrupt */
+	writel(usr1, ctx->port->membase + USR1);
+	writel(usr2, ctx->port->membase + USR2);
+
+	if (ctx->port->use_dcedte) { /* DTE mode */
+		if (usr2 & USR2_DCDDELT)
+			events |= !(usr2 & USR2_DCDIN) ?
+				RTSER_EVENT_MODEMHI : RTSER_EVENT_MODEMLO;
+	}
+	if (!ctx->port->use_hwflow && (usr1 & USR1_RTSD)) {
+		events |= (usr1 & USR1_RTSS) ?
+			RTSER_EVENT_MODEMHI : RTSER_EVENT_MODEMLO;
+	}
+
+	return events;
+}
+
+static int rt_imx_uart_int(rtdm_irq_t *irq_context)
+{
+	uint64_t timestamp = rtdm_clock_read();
+	struct rt_imx_uart_ctx *ctx;
+	unsigned int usr1, usr2, ucr1;
+	int rbytes = 0, events = 0;
+	int ret = RTDM_IRQ_NONE;
+
+	ctx = rtdm_irq_get_arg(irq_context, struct rt_imx_uart_ctx);
+
+	rtdm_lock_get(&ctx->lock);
+
+	usr1 = readl(ctx->port->membase + USR1);
+	usr2 = readl(ctx->port->membase + USR2);
+	ucr1 = readl(ctx->port->membase + UCR1);
+
+	/*
+	 * Read if there is data available
+	 */
+	if (usr1 & USR1_RRDY) {
+		if (likely(ucr1 & UCR1_RRDYEN)) {
+			rbytes = rt_imx_uart_rx_chars(ctx, &timestamp);
+			events |= RTSER_EVENT_RXPEND;
+		}
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	/*
+	 * Send data if there is data to be sent
+	 */
+	if (usr1 & USR1_TRDY) {
+		if (likely(ucr1 & UCR1_TXMPTYEN))
+			rt_imx_uart_tx_chars(ctx);
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	/*
+	 * Handle modem status events
+	 */
+	if ((usr1 & (USR1_RTSD | USR1_DTRD)) ||
+	    (usr2 & (USR2_DCDDELT | USR2_RIDELT))) {
+		events |= rt_imx_uart_modem_status(ctx, usr1, usr2);
+		ret = RTDM_IRQ_HANDLED;
+	}
+
+	if (ctx->in_nwait > 0) {
+		if ((ctx->in_nwait <= rbytes) || ctx->status) {
+			ctx->in_nwait = 0;
+			rtdm_event_signal(&ctx->in_event);
+		} else {
+			ctx->in_nwait -= rbytes;
+		}
+	}
+
+	if (ctx->status) {
+		events |= RTSER_EVENT_ERRPEND;
+#ifdef FIXME
+		ctx->ier_status &= ~IER_STAT;
+#endif
+	}
+
+	if (events & ctx->config.event_mask) {
+		int old_events = ctx->ioc_events;
+
+		ctx->last_timestamp = timestamp;
+		ctx->ioc_events = events;
+
+		if (!old_events)
+			rtdm_event_signal(&ctx->ioc_event);
+	}
+
+	if ((ctx->ier_status & IER_TX) && (ctx->out_npend == 0)) {
+		rt_imx_uart_stop_tx(ctx);
+		ctx->ier_status &= ~IER_TX;
+		rtdm_event_signal(&ctx->out_event);
+	}
+
+	rtdm_lock_put(&ctx->lock);
+
+	if (ret != RTDM_IRQ_HANDLED)
+		pr_warn("%s: unhandled interrupt\n", __func__);
+	return ret;
+}
+
+static unsigned int rt_imx_uart_get_msr(struct rt_imx_uart_ctx *ctx)
+{
+	unsigned long usr1 = readl(ctx->port->membase + USR1);
+	unsigned long usr2 = readl(ctx->port->membase + USR2);
+	unsigned int msr = 0;
+
+	if (usr1 & USR1_RTSD)
+		msr |= RTSER_MSR_DCTS;
+	if (usr1 & USR1_DTRD)
+		msr |= RTSER_MSR_DDSR;
+	if (usr2 & USR2_RIDELT)
+		msr |= RTSER_MSR_TERI;
+	if (usr2 & USR2_DCDDELT)
+		msr |= RTSER_MSR_DDCD;
+
+	if (usr1 & USR1_RTSS)
+		msr |= RTSER_MSR_CTS;
+
+	if (ctx->port->use_dcedte) { /* DTE mode */
+		if (!(usr2 & USR2_DCDIN))
+			msr |= RTSER_MSR_DCD;
+		if (!(usr2 & USR2_RIIN))
+			msr |= RTSER_MSR_RI;
+	}
+
+	return msr;
+}
+
+static void rt_imx_uart_set_mcr(struct rt_imx_uart_ctx *ctx,
+				unsigned int mcr)
+{
+	unsigned int uts_reg = ctx->port->devdata->uts_reg;
+	unsigned long ucr2 = readl(ctx->port->membase + UCR2);
+	unsigned long ucr3 = readl(ctx->port->membase + UCR3);
+	unsigned long uts = readl(ctx->port->membase + uts_reg);
+
+	if (mcr & RTSER_MCR_RTS) {
+		/*
+		 * Return to hardware-driven hardware flow control if the
+		 * option is enabled
+		 */
+		if (ctx->port->use_hwflow) {
+			ucr2 |= UCR2_CTSC;
+		} else {
+			ucr2 |= UCR2_CTS;
+			ucr2 &= ~UCR2_CTSC;
+		}
+	} else {
+		ucr2 &= ~(UCR2_CTS | UCR2_CTSC);
+	}
+	writel(ucr2, ctx->port->membase + UCR2);
+
+	if (mcr & RTSER_MCR_DTR)
+		ucr3 |= UCR3_DSR;
+	else
+		ucr3 &= ~UCR3_DSR;
+	writel(ucr3, ctx->port->membase + UCR3);
+
+	if (mcr & RTSER_MCR_LOOP)
+		uts |= UTS_LOOP;
+	else
+		uts &= ~UTS_LOOP;
+	writel(uts, ctx->port->membase + uts_reg);
+}
+
+static void rt_imx_uart_break_ctl(struct rt_imx_uart_ctx *ctx,
+				  int break_state)
+{
+	unsigned long ucr1 = readl(ctx->port->membase + UCR1);
+
+	if (break_state == RTSER_BREAK_SET)
+		ucr1 |= UCR1_SNDBRK;
+	else
+		ucr1 &= ~UCR1_SNDBRK;
+	writel(ucr1, ctx->port->membase + UCR1);
+}
+
+static int rt_imx_uart_set_config(struct rt_imx_uart_ctx *ctx,
+				  const struct rtser_config *config,
+				  uint64_t **in_history_ptr)
+{
+	rtdm_lockctx_t lock_ctx;
+	int err = 0;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	if (config->config_mask & RTSER_SET_BAUD)
+		ctx->config.baud_rate = config->baud_rate;
+	if (config->config_mask & RTSER_SET_DATA_BITS)
+		ctx->config.data_bits = config->data_bits & DATA_BITS_MASK;
+	if (config->config_mask & RTSER_SET_PARITY)
+		ctx->config.parity = config->parity & PARITY_MASK;
+	if (config->config_mask & RTSER_SET_STOP_BITS)
+		ctx->config.stop_bits = config->stop_bits & STOP_BITS_MASK;
+
+	/* Timeout manipulation is not atomic. The user is supposed to take
+	 * care not to use and change timeouts at the same time.
+	 */
+	if (config->config_mask & RTSER_SET_TIMEOUT_RX)
+		ctx->config.rx_timeout = config->rx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_TX)
+		ctx->config.tx_timeout = config->tx_timeout;
+	if (config->config_mask & RTSER_SET_TIMEOUT_EVENT)
+		ctx->config.event_timeout = config->event_timeout;
+
+	if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+		if (config->timestamp_history & RTSER_RX_TIMESTAMP_HISTORY) {
+			if (!ctx->in_history) {
+				ctx->in_history = *in_history_ptr;
+				*in_history_ptr = NULL;
+				if (!ctx->in_history)
+					err = -ENOMEM;
+			}
+		} else {
+			*in_history_ptr = ctx->in_history;
+			ctx->in_history = NULL;
+		}
+	}
+
+	if (config->config_mask & RTSER_SET_EVENT_MASK) {
+		ctx->config.event_mask = config->event_mask & EVENT_MASK;
+		ctx->ioc_events = 0;
+
+		if ((config->event_mask & RTSER_EVENT_RXPEND) &&
+		    (ctx->in_npend > 0))
+			ctx->ioc_events |= RTSER_EVENT_RXPEND;
+
+		if ((config->event_mask & RTSER_EVENT_ERRPEND)
+		    && ctx->status)
+			ctx->ioc_events |= RTSER_EVENT_ERRPEND;
+	}
+
+	if (config->config_mask & RTSER_SET_HANDSHAKE) {
+		ctx->config.handshake = config->handshake;
+
+		switch (ctx->config.handshake) {
+		case RTSER_RTSCTS_HAND:
+			/* ...? */
+
+		default:	/* RTSER_NO_HAND */
+			ctx->mcr_status = RTSER_MCR_RTS | RTSER_MCR_OUT1;
+			break;
+		}
+		rt_imx_uart_set_mcr(ctx, ctx->mcr_status);
+	}
+
+	/* configure hardware with new parameters */
+	if (config->config_mask & (RTSER_SET_BAUD |
+				   RTSER_SET_PARITY |
+				   RTSER_SET_DATA_BITS |
+				   RTSER_SET_STOP_BITS |
+				   RTSER_SET_EVENT_MASK |
+				   RTSER_SET_HANDSHAKE)) {
+		struct rt_imx_uart_port *port = ctx->port;
+		unsigned int ucr2, old_ucr1, old_txrxen, old_ucr2;
+		unsigned int baud = ctx->config.baud_rate;
+		unsigned int div, ufcr;
+		unsigned long num, denom;
+		uint64_t tdiv64;
+
+		if (ctx->config.data_bits == RTSER_8_BITS)
+			ucr2 = UCR2_WS | UCR2_SRST | UCR2_IRTS;
+		else
+			ucr2 = UCR2_SRST | UCR2_IRTS;
+
+		if (ctx->config.handshake == RTSER_RTSCTS_HAND) {
+			if (port->have_rtscts) {
+				ucr2 &= ~UCR2_IRTS;
+				ucr2 |= UCR2_CTSC;
+			}
+		}
+
+		if (ctx->config.stop_bits == RTSER_2_STOPB)
+			ucr2 |= UCR2_STPB;
+		if (ctx->config.parity == RTSER_ODD_PARITY ||
+		    ctx->config.parity == RTSER_EVEN_PARITY) {
+			ucr2 |= UCR2_PREN;
+			if (ctx->config.parity == RTSER_ODD_PARITY)
+				ucr2 |= UCR2_PROE;
+		}
+
+		/*
+		 * disable interrupts and drain transmitter
+		 */
+		old_ucr1 = readl(port->membase + UCR1);
+		old_ucr1 &= ~UCR1_RTSDEN; /* reset in  rt_imx_uart_enable_ms()*/
+		writel(old_ucr1 & ~(UCR1_TXMPTYEN | UCR1_RRDYEN),
+		       port->membase + UCR1);
+		old_ucr2 = readl(port->membase + USR2);
+		writel(old_ucr2 & ~UCR2_ATEN, port->membase + USR2);
+		while (!(readl(port->membase + USR2) & USR2_TXDC))
+			barrier();
+
+		/* then, disable everything */
+		old_txrxen = readl(port->membase + UCR2);
+		writel(old_txrxen & ~(UCR2_TXEN | UCR2_RXEN),
+		       port->membase + UCR2);
+		old_txrxen &= (UCR2_TXEN | UCR2_RXEN);
+		div = port->uartclk / (baud * 16);
+		if (div > 7)
+			div = 7;
+		if (!div)
+			div = 1;
+
+		rational_best_approximation(16 * div * baud, port->uartclk,
+					    1 << 16, 1 << 16, &num, &denom);
+
+		tdiv64 = port->uartclk;
+		tdiv64 *= num;
+		do_div(tdiv64, denom * 16 * div);
+
+		num -= 1;
+		denom -= 1;
+
+		ufcr = readl(port->membase + UFCR);
+		ufcr = (ufcr & (~UFCR_RFDIV)) | UFCR_RFDIV_REG(div);
+
+		if (port->use_dcedte)
+			ufcr |= UFCR_DCEDTE;
+
+		writel(ufcr, port->membase + UFCR);
+
+		writel(num, port->membase + UBIR);
+		writel(denom, port->membase + UBMR);
+
+		writel(port->uartclk / div / 1000, port->membase + MX2_ONEMS);
+
+		writel(old_ucr1, port->membase + UCR1);
+
+		/* set the parity, stop bits and data size */
+		writel(ucr2 | old_txrxen, port->membase + UCR2);
+
+		if (config->event_mask &
+		    (RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO))
+			rt_imx_uart_enable_ms(ctx);
+
+		ctx->status = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	return err;
+}
+
+void rt_imx_uart_cleanup_ctx(struct rt_imx_uart_ctx *ctx)
+{
+	rtdm_event_destroy(&ctx->in_event);
+	rtdm_event_destroy(&ctx->out_event);
+	rtdm_event_destroy(&ctx->ioc_event);
+	rtdm_mutex_destroy(&ctx->out_lock);
+}
+
+#define TXTL 2 /* reset default */
+#define RXTL 1 /* reset default */
+
+static int rt_imx_uart_setup_ufcr(struct rt_imx_uart_port *port)
+{
+	unsigned int val;
+	unsigned int ufcr_rfdiv;
+
+	/* set receiver / transmitter trigger level.
+	 * RFDIV is set such way to satisfy requested uartclk value
+	 */
+	val = TXTL << 10 | RXTL;
+	ufcr_rfdiv = (clk_get_rate(port->clk_per) + port->uartclk / 2) /
+		port->uartclk;
+
+	if (!ufcr_rfdiv)
+		ufcr_rfdiv = 1;
+
+	val |= UFCR_RFDIV_REG(ufcr_rfdiv);
+
+	writel(val, port->membase + UFCR);
+
+	return 0;
+}
+
+/* half the RX buffer size */
+#define CTSTL 16
+
+static void uart_reset(struct rt_imx_uart_port *port)
+{
+	unsigned int uts_reg = port->devdata->uts_reg;
+	int n = 100;
+	u32 temp;
+
+	/* Reset fifo's and state machines */
+	temp = readl(port->membase + UCR2);
+	temp &= ~UCR2_SRST;
+	writel(temp, port->membase + UCR2);
+	n = 100;
+	while (!(readl(port->membase + uts_reg) & UTS_SOFTRST) && --n > 0)
+		udelay(1);
+}
+
+static int rt_imx_uart_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_imx_uart_ctx *ctx;
+	struct rt_imx_uart_port *port;
+	rtdm_lockctx_t lock_ctx;
+	unsigned long temp;
+	uint64_t *dummy;
+
+	ctx = rtdm_fd_to_private(fd);
+	ctx->port = (struct rt_imx_uart_port *)rtdm_fd_device(fd)->device_data;
+
+	port = ctx->port;
+
+	/* IPC initialisation - cannot fail with used parameters */
+	rtdm_lock_init(&ctx->lock);
+	rtdm_event_init(&ctx->in_event, 0);
+	rtdm_event_init(&ctx->out_event, 0);
+	rtdm_event_init(&ctx->ioc_event, 0);
+	rtdm_mutex_init(&ctx->out_lock);
+
+	ctx->in_head = 0;
+	ctx->in_tail = 0;
+	ctx->in_npend = 0;
+	ctx->in_nwait = 0;
+	ctx->in_lock = 0;
+	ctx->in_history = NULL;
+
+	ctx->out_head = 0;
+	ctx->out_tail = 0;
+	ctx->out_npend = 0;
+
+	ctx->ioc_events = 0;
+	ctx->ioc_event_lock = 0;
+	ctx->status = 0;
+	ctx->saved_errors = 0;
+
+	/*
+	 * disable the DREN bit (Data Ready interrupt enable) before
+	 * requesting IRQs
+	 */
+	temp = readl(port->membase + UCR4);
+
+	/* set the trigger level for CTS */
+	temp &= ~(UCR4_CTSTL_MASK << UCR4_CTSTL_SHF);
+	temp |= CTSTL << UCR4_CTSTL_SHF;
+	writel(temp & ~UCR4_DREN, port->membase + UCR4);
+
+	uart_reset(port);
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	/*
+	 * Finally, clear status and enable interrupts
+	 */
+	writel(USR1_RTSD | USR1_DTRD, port->membase + USR1);
+	writel(USR2_ORE, port->membase + USR2);
+
+	temp = readl(port->membase + UCR1) & ~UCR1_RRDYEN;
+	temp |= UCR1_UARTEN;
+	if (port->have_rtscts)
+		temp |= UCR1_RTSDEN;
+	writel(temp, port->membase + UCR1);
+
+	temp = readl(port->membase + UCR4);
+	temp |= UCR4_OREN;
+	writel(temp, port->membase + UCR4);
+
+	temp = readl(port->membase + UCR2) & ~(UCR2_ATEN|UCR2_RTSEN);
+	temp |= (UCR2_RXEN | UCR2_TXEN);
+	if (!port->have_rtscts)
+		temp |= UCR2_IRTS;
+	writel(temp, port->membase + UCR2);
+
+	temp = readl(port->membase + UCR3);
+	temp |= MX2_UCR3_RXDMUXSEL;
+	writel(temp, port->membase + UCR3);
+
+	temp = readl(port->membase + UCR1);
+	temp |= UCR1_RRDYEN;
+	writel(temp, port->membase + UCR1);
+
+	temp = readl(port->membase + UCR2);
+	temp |= UCR2_ATEN;
+	writel(temp, port->membase + UCR2);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rt_imx_uart_set_config(ctx, &default_config, &dummy);
+
+	rt_imx_uart_setup_ufcr(port);
+
+	return rtdm_irq_request(&ctx->irq_handle,
+				port->irq, rt_imx_uart_int, 0,
+				rtdm_fd_device(fd)->name, ctx);
+}
+
+void rt_imx_uart_close(struct rtdm_fd *fd)
+{
+	struct rt_imx_uart_port *port;
+	struct rt_imx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	unsigned long temp;
+
+	ctx = rtdm_fd_to_private(fd);
+	port = ctx->port;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	temp = readl(port->membase + UCR2);
+	temp &= ~(UCR2_ATEN|UCR2_RTSEN|UCR2_RXEN|UCR2_TXEN|UCR2_IRTS);
+	writel(temp, port->membase + UCR2);
+	/*
+	 * Disable all interrupts, port and break condition, then
+	 * reset.
+	 */
+	temp = readl(port->membase + UCR1);
+	temp &= ~(UCR1_TXMPTYEN | UCR1_RRDYEN | UCR1_RTSDEN | UCR1_UARTEN);
+	writel(temp, port->membase + UCR1);
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+	rtdm_irq_free(&ctx->irq_handle);
+
+	uart_reset(port);
+
+	rt_imx_uart_cleanup_ctx(ctx);
+	kfree(ctx->in_history);
+}
+
+static int rt_imx_uart_ioctl(struct rtdm_fd *fd,
+			     unsigned int request, void *arg)
+{
+	rtdm_lockctx_t lock_ctx;
+	struct rt_imx_uart_ctx *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTSER_RTIOC_GET_CONFIG:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->config,
+						   sizeof(struct rtser_config));
+		else
+			memcpy(arg, &ctx->config,
+			       sizeof(struct rtser_config));
+		break;
+
+	case RTSER_RTIOC_SET_CONFIG: {
+		struct rtser_config *config;
+		struct rtser_config config_buf;
+		uint64_t *hist_buf = NULL;
+
+		/*
+		 * We may call regular kernel services ahead, ask for
+		 * re-entering secondary mode if need be.
+		 */
+		if (rtdm_in_rt_context())
+			return -ENOSYS;
+
+		config = (struct rtser_config *)arg;
+
+		if (rtdm_fd_is_user(fd)) {
+			err =
+			    rtdm_safe_copy_from_user(fd, &config_buf,
+						     arg,
+						     sizeof(struct
+							    rtser_config));
+			if (err)
+				return err;
+
+			config = &config_buf;
+		}
+
+		if ((config->config_mask & RTSER_SET_BAUD) &&
+		    (config->baud_rate > clk_get_rate(ctx->port->clk_per) / 16 ||
+		     config->baud_rate <= 0))
+			/* invalid baudrate for this port */
+			return -EINVAL;
+
+		if (config->config_mask & RTSER_SET_TIMESTAMP_HISTORY) {
+			if (config->timestamp_history &
+						RTSER_RX_TIMESTAMP_HISTORY)
+				hist_buf = kmalloc(IN_BUFFER_SIZE *
+						   sizeof(nanosecs_abs_t),
+						   GFP_KERNEL);
+		}
+
+		rt_imx_uart_set_config(ctx, config, &hist_buf);
+
+		if (hist_buf)
+			kfree(hist_buf);
+		break;
+	}
+
+	case RTSER_RTIOC_GET_STATUS: {
+		int status, msr;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		status = ctx->saved_errors | ctx->status;
+		ctx->status = 0;
+		ctx->saved_errors = 0;
+		ctx->ioc_events &= ~RTSER_EVENT_ERRPEND;
+
+		msr = rt_imx_uart_get_msr(ctx);
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rtser_status status_buf;
+
+
+			status_buf.line_status = status;
+			status_buf.modem_status = msr;
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &status_buf,
+						   sizeof(struct
+							  rtser_status));
+		} else {
+			((struct rtser_status *)arg)->line_status = 0;
+			((struct rtser_status *)arg)->modem_status = msr;
+		}
+		break;
+	}
+
+	case RTSER_RTIOC_GET_CONTROL:
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg,
+						   &ctx->mcr_status,
+						   sizeof(int));
+		else
+			*(int *)arg = ctx->mcr_status;
+
+		break;
+
+	case RTSER_RTIOC_SET_CONTROL: {
+		int new_mcr = (long)arg;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		ctx->mcr_status = new_mcr;
+		rt_imx_uart_set_mcr(ctx, new_mcr);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+	case RTSER_RTIOC_WAIT_EVENT: {
+		struct rtser_event ev = { .rxpend_timestamp = 0 };
+		rtdm_toseq_t timeout_seq;
+
+		if (!rtdm_in_rt_context())
+			return -ENOSYS;
+
+		/* Only one waiter allowed, stop any further attempts here. */
+		if (test_and_set_bit(0, &ctx->ioc_event_lock))
+			return -EBUSY;
+
+		rtdm_toseq_init(&timeout_seq, ctx->config.event_timeout);
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		while (!ctx->ioc_events) {
+			/* Only enable error interrupt
+			 * when the user waits for it.
+			 */
+			if (ctx->config.event_mask & RTSER_EVENT_ERRPEND) {
+				ctx->ier_status |= IER_STAT;
+#ifdef FIXME
+				rt_imx_uart_reg_out(mode, base, IER,
+						 ctx->ier_status);
+#endif
+			}
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			err = rtdm_event_timedwait(&ctx->ioc_event,
+						   ctx->config.event_timeout,
+						   &timeout_seq);
+			if (err) {
+				/* Device has been closed? */
+				if (err == -EIDRM)
+					err = -EBADF;
+				goto wait_unlock_out;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		}
+
+		ev.events = ctx->ioc_events;
+		ctx->ioc_events &=
+		    ~(RTSER_EVENT_MODEMHI | RTSER_EVENT_MODEMLO);
+
+		ev.last_timestamp = ctx->last_timestamp;
+		ev.rx_pending = ctx->in_npend;
+
+		if (ctx->in_history)
+			ev.rxpend_timestamp = ctx->in_history[ctx->in_head];
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		if (rtdm_fd_is_user(fd))
+			err =
+			    rtdm_safe_copy_to_user(fd, arg, &ev,
+						   sizeof(struct
+							  rtser_event));
+			else
+				memcpy(arg, &ev, sizeof(struct rtser_event));
+
+wait_unlock_out:
+		/* release the simple event waiter lock */
+		clear_bit(0, &ctx->ioc_event_lock);
+		break;
+	}
+
+	case RTSER_RTIOC_BREAK_CTL: {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		rt_imx_uart_break_ctl(ctx, (unsigned long)arg);
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+
+#ifdef FIXME
+	case RTIOC_PURGE: {
+		int fcr = 0;
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+		if ((long)arg & RTDM_PURGE_RX_BUFFER) {
+			ctx->in_head = 0;
+			ctx->in_tail = 0;
+			ctx->in_npend = 0;
+			ctx->status = 0;
+			fcr |= FCR_FIFO | FCR_RESET_RX;
+			rt_imx_uart_reg_in(mode, base, RHR);
+		}
+		if ((long)arg & RTDM_PURGE_TX_BUFFER) {
+			ctx->out_head = 0;
+			ctx->out_tail = 0;
+			ctx->out_npend = 0;
+			fcr |= FCR_FIFO | FCR_RESET_TX;
+		}
+		if (fcr) {
+			rt_imx_uart_reg_out(mode, base, FCR, fcr);
+			rt_imx_uart_reg_out(mode, base, FCR,
+					 FCR_FIFO | ctx->config.fifo_depth);
+		}
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+		break;
+	}
+#endif
+
+	default:
+		err = -ENOTTY;
+	}
+
+	return err;
+}
+
+ssize_t rt_imx_uart_read(struct rtdm_fd *fd, void *buf, size_t nbyte)
+{
+	struct rt_imx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t read = 0;
+	int pending;
+	int block;
+	int subblock;
+	int in_pos;
+	char *out_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret = -EAGAIN;	/* for non-blocking read */
+	int nonblocking;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_rw_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* non-blocking is handled separately here */
+	nonblocking = (ctx->config.rx_timeout < 0);
+
+	/* only one reader allowed, stop any further attempts here */
+	if (test_and_set_bit(0, &ctx->in_lock))
+		return -EBUSY;
+
+	rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+	while (1) {
+		if (ctx->status) {
+			if (ctx->status & RTSER_LSR_BREAK_IND)
+				ret = -EPIPE;
+			else
+				ret = -EIO;
+			ctx->saved_errors = ctx->status &
+			    (RTSER_LSR_OVERRUN_ERR | RTSER_LSR_PARITY_ERR |
+			     RTSER_LSR_FRAMING_ERR | RTSER_SOFT_OVERRUN_ERR);
+			ctx->status = 0;
+			break;
+		}
+
+		pending = ctx->in_npend;
+
+		if (pending > 0) {
+			block = subblock = (pending <= nbyte) ? pending : nbyte;
+			in_pos = ctx->in_head;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (in_pos + subblock > IN_BUFFER_SIZE) {
+				/* Treat the block between head and buffer end
+				 * separately.
+				 */
+				subblock = IN_BUFFER_SIZE - in_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_to_user
+					    (fd, out_pos,
+					     &ctx->in_buf[in_pos],
+					     subblock) != 0) {
+						ret = -EFAULT;
+						goto break_unlocked;
+					}
+				} else
+					memcpy(out_pos, &ctx->in_buf[in_pos],
+					       subblock);
+
+				read += subblock;
+				out_pos += subblock;
+
+				subblock = block - subblock;
+				in_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_to_user(fd, out_pos,
+						      &ctx->in_buf[in_pos],
+						      subblock) != 0) {
+					ret = -EFAULT;
+					goto break_unlocked;
+				}
+			} else
+				memcpy(out_pos, &ctx->in_buf[in_pos], subblock);
+
+			read += subblock;
+			out_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->in_head =
+			    (ctx->in_head + block) & (IN_BUFFER_SIZE - 1);
+			ctx->in_npend -= block;
+			if (ctx->in_npend == 0)
+				ctx->ioc_events &= ~RTSER_EVENT_RXPEND;
+
+			if (nbyte == 0)
+				break; /* All requested bytes read. */
+
+			continue;
+		}
+
+		if (nonblocking)
+			/* ret was set to EAGAIN in case of a real
+			 * non-blocking call or contains the error
+			 * returned by rtdm_event_wait[_until]
+			 */
+			break;
+
+		ctx->in_nwait = nbyte;
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->in_event,
+					   ctx->config.rx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				 * return immediately.
+				 */
+				return -EBADF;
+			}
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			nonblocking = 1;
+			if (ctx->in_npend > 0) {
+				/* Final turn: collect pending bytes
+				 * before exit.
+				 */
+				continue;
+			}
+
+			ctx->in_nwait = 0;
+			break;
+		}
+
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+	}
+
+	rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+break_unlocked:
+	/* Release the simple reader lock, */
+	clear_bit(0, &ctx->in_lock);
+
+	if ((read > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			   (ret == -ETIMEDOUT)))
+		ret = read;
+
+	return ret;
+}
+
+static ssize_t rt_imx_uart_write(struct rtdm_fd *fd, const void *buf,
+				size_t nbyte)
+{
+	struct rt_imx_uart_ctx *ctx;
+	rtdm_lockctx_t lock_ctx;
+	size_t written = 0;
+	int free;
+	int block;
+	int subblock;
+	int out_pos;
+	char *in_pos = (char *)buf;
+	rtdm_toseq_t timeout_seq;
+	ssize_t ret;
+
+	if (nbyte == 0)
+		return 0;
+
+	if (rtdm_fd_is_user(fd) && !rtdm_read_user_ok(fd, buf, nbyte))
+		return -EFAULT;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	rtdm_toseq_init(&timeout_seq, ctx->config.rx_timeout);
+
+	/* Make write operation atomic. */
+	ret = rtdm_mutex_timedlock(&ctx->out_lock, ctx->config.rx_timeout,
+				   &timeout_seq);
+	if (ret)
+		return ret;
+
+	while (nbyte > 0) {
+		rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+		free = OUT_BUFFER_SIZE - ctx->out_npend;
+
+		if (free > 0) {
+			block = subblock = (nbyte <= free) ? nbyte : free;
+			out_pos = ctx->out_tail;
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+			/* Do we have to wrap around the buffer end? */
+			if (out_pos + subblock > OUT_BUFFER_SIZE) {
+				/* Treat the block between head and buffer
+				 * end separately.
+				 */
+				subblock = OUT_BUFFER_SIZE - out_pos;
+
+				if (rtdm_fd_is_user(fd)) {
+					if (rtdm_copy_from_user
+					    (fd,
+					     &ctx->out_buf[out_pos],
+					     in_pos, subblock) != 0) {
+						ret = -EFAULT;
+						break;
+					}
+				} else
+					memcpy(&ctx->out_buf[out_pos], in_pos,
+					       subblock);
+
+				written += subblock;
+				in_pos += subblock;
+
+				subblock = block - subblock;
+				out_pos = 0;
+			}
+
+			if (rtdm_fd_is_user(fd)) {
+				if (rtdm_copy_from_user
+				    (fd, &ctx->out_buf[out_pos],
+				     in_pos, subblock) != 0) {
+					ret = -EFAULT;
+					break;
+				}
+			} else
+				memcpy(&ctx->out_buf[out_pos], in_pos, block);
+
+			written += subblock;
+			in_pos += subblock;
+			nbyte -= block;
+
+			rtdm_lock_get_irqsave(&ctx->lock, lock_ctx);
+
+			ctx->out_tail =
+			    (ctx->out_tail + block) & (OUT_BUFFER_SIZE - 1);
+			ctx->out_npend += block;
+
+			ctx->ier_status |= IER_TX;
+			rt_imx_uart_start_tx(ctx);
+
+			rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+			continue;
+		}
+
+		rtdm_lock_put_irqrestore(&ctx->lock, lock_ctx);
+
+		ret = rtdm_event_timedwait(&ctx->out_event,
+					   ctx->config.tx_timeout,
+					   &timeout_seq);
+		if (ret < 0) {
+			if (ret == -EIDRM) {
+				/* Device has been closed -
+				 * return immediately.
+				 */
+				ret = -EBADF;
+			}
+			break;
+		}
+	}
+
+	rtdm_mutex_unlock(&ctx->out_lock);
+
+	if ((written > 0) && ((ret == 0) || (ret == -EAGAIN) ||
+			      (ret == -ETIMEDOUT)))
+		ret = written;
+
+	return ret;
+}
+
+static struct rtdm_driver imx_uart_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(imx_uart,
+						    RTDM_CLASS_SERIAL,
+						    RTDM_SUBCLASS_16550A,
+						    RTSER_PROFILE_VER),
+	.device_count		= RT_IMX_UART_MAX,
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.context_size		= sizeof(struct rt_imx_uart_ctx),
+	.ops = {
+		.open		= rt_imx_uart_open,
+		.close		= rt_imx_uart_close,
+		.ioctl_rt	= rt_imx_uart_ioctl,
+		.ioctl_nrt	= rt_imx_uart_ioctl,
+		.read_rt	= rt_imx_uart_read,
+		.write_rt	= rt_imx_uart_write,
+	},
+};
+
+
+/*
+ * This function returns 0 iff it could successfully get all information
+ * from dt or a negative errno.
+ */
+static int rt_imx_uart_probe_dt(struct rt_imx_uart_port *port,
+				struct platform_device *pdev)
+{
+	struct device_node *np = pdev->dev.of_node;
+	const struct of_device_id *of_id =
+			of_match_device(rt_imx_uart_dt_ids, &pdev->dev);
+	int ret;
+
+	if (!np)
+		/* no device tree device */
+		return 1;
+
+	ret = of_alias_get_id(np, "serial");
+	if (ret < 0) {
+		dev_err(&pdev->dev, "failed to get alias id, errno %d\n", ret);
+		return ret;
+	}
+
+	pdev->id = ret;
+
+	if (of_get_property(np, "uart-has-rtscts", NULL) ||
+	    of_get_property(np, "fsl,uart-has-rtscts", NULL) /* deprecated */)
+		port->have_rtscts = 1;
+	if (of_get_property(np, "fsl,irda-mode", NULL))
+		dev_warn(&pdev->dev, "IRDA not yet supported\n");
+
+	if (of_get_property(np, "fsl,dte-mode", NULL))
+		port->use_dcedte = 1;
+
+	port->devdata = of_id->data;
+
+	return 0;
+}
+
+static int rt_imx_uart_probe(struct platform_device *pdev)
+{
+	struct rtdm_device *dev;
+	struct rt_imx_uart_port *port;
+	struct resource *res;
+	int ret;
+
+	port = devm_kzalloc(&pdev->dev, sizeof(*port), GFP_KERNEL);
+	if (!port)
+		return -ENOMEM;
+
+	ret = rt_imx_uart_probe_dt(port, pdev);
+	if (ret < 0)
+		return ret;
+
+	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	if (!res)
+		return -ENODEV;
+
+	port->irq = platform_get_irq(pdev, 0);
+
+	if (port->irq <= 0)
+		return -ENODEV;
+
+	port->membase = devm_ioremap_resource(&pdev->dev, res);
+	if (IS_ERR(port->membase))
+		return PTR_ERR(port->membase);
+
+	dev = &port->rtdm_dev;
+	dev->driver = &imx_uart_driver;
+	dev->label = "rtser%d";
+	dev->device_data = port;
+
+	if (!tx_fifo[pdev->id] || tx_fifo[pdev->id] > TX_FIFO_SIZE)
+		port->tx_fifo = TX_FIFO_SIZE;
+	else
+		port->tx_fifo = tx_fifo[pdev->id];
+
+	port->clk_ipg = devm_clk_get(&pdev->dev, "ipg");
+	if (IS_ERR(port->clk_ipg))
+		return PTR_ERR(port->clk_ipg);
+
+	port->clk_per = devm_clk_get(&pdev->dev, "per");
+	if (IS_ERR(port->clk_per))
+		return PTR_ERR(port->clk_per);
+
+	clk_prepare_enable(port->clk_ipg);
+	clk_prepare_enable(port->clk_per);
+	port->uartclk = clk_get_rate(port->clk_per);
+
+	port->use_hwflow = 1;
+
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		return ret;
+
+	platform_set_drvdata(pdev, port);
+
+	pr_info("%s on IMX UART%d: membase=0x%p irq=%d uartclk=%d\n",
+	       dev->name, pdev->id, port->membase, port->irq, port->uartclk);
+	return 0;
+}
+
+static int rt_imx_uart_remove(struct platform_device *pdev)
+{
+	struct imxuart_platform_data *pdata;
+	struct rt_imx_uart_port *port = platform_get_drvdata(pdev);
+	struct rtdm_device *dev = &port->rtdm_dev;
+
+	pdata = pdev->dev.platform_data;
+	platform_set_drvdata(pdev, NULL);
+
+	clk_disable_unprepare(port->clk_ipg);
+	clk_disable_unprepare(port->clk_per);
+	rtdm_dev_unregister(dev);
+
+	return 0;
+}
+
+static struct platform_driver rt_imx_uart_driver = {
+	.probe = rt_imx_uart_probe,
+	.remove	= rt_imx_uart_remove,
+	.id_table = rt_imx_uart_id_table,
+	.driver = {
+		.name = DRIVER_NAME,
+		.owner = THIS_MODULE,
+		.of_match_table = rt_imx_uart_dt_ids,
+	},
+	.prevent_deferred_probe = true,
+};
+
+
+static int __init rt_imx_uart_init(void)
+{
+	int ret;
+
+	if (!rtdm_available())
+		return -ENODEV;
+
+	ret = platform_driver_register(&rt_imx_uart_driver);
+	if (ret) {
+		pr_err("%s; Could not register  driver (err=%d)\n",
+			__func__, ret);
+	}
+
+	return ret;
+}
+
+static void __exit rt_imx_uart_exit(void)
+{
+	platform_driver_unregister(&rt_imx_uart_driver);
+}
+
+module_init(rt_imx_uart_init);
+module_exit(rt_imx_uart_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/spi/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/spi/Kconfig
new file mode 100644
index 0000000..b48aaf3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/spi/Kconfig
@@ -0,0 +1,39 @@
+menu "Real-time SPI master drivers"
+
+config XENO_DRIVERS_SPI
+       depends on SPI
+       tristate
+
+config XENO_DRIVERS_SPI_BCM2835
+	depends on ARCH_BCM2708 || ARCH_BCM2835
+	select XENO_DRIVERS_SPI
+	tristate "Support for BCM2835 SPI"
+	help
+
+	Enables support for the SPI0 controller available from
+	Broadcom's BCM2835 SoC.
+
+config XENO_DRIVERS_SPI_SUN6I
+	depends on MACH_SUN6I || MACH_SUN8I
+	select XENO_DRIVERS_SPI
+	tristate "Support for A31/H3 SoC SPI"
+	help
+
+	Enables support for the SPI controller available from
+	Allwinner's A31, H3 SoCs.
+
+config XENO_DRIVERS_SPI_OMAP2_MCSPI_RT
+	tristate "McSPI rt-driver for OMAP"
+	depends on HAS_DMA
+	depends on ARCH_OMAP2PLUS || COMPILE_TEST
+	select XENO_DRIVERS_SPI
+	help
+
+	SPI real-time master controller for OMAP24XX and later Multichannel SPI
+	(McSPI) modules.
+
+config XENO_DRIVERS_SPI_DEBUG
+       depends on XENO_DRIVERS_SPI
+       bool "Enable SPI core debugging features"
+       
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/spi/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/spi/Makefile
new file mode 100644
index 0000000..889033c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/spi/Makefile
@@ -0,0 +1,14 @@
+
+ccflags-$(CONFIG_XENO_DRIVERS_SPI_DEBUG) := -DDEBUG
+
+obj-$(CONFIG_XENO_DRIVERS_SPI) += xeno_spi.o
+
+xeno_spi-y := spi-master.o spi-device.o
+
+obj-$(CONFIG_XENO_DRIVERS_SPI_BCM2835) += xeno_spi_bcm2835.o
+obj-$(CONFIG_XENO_DRIVERS_SPI_SUN6I) += xeno_spi_sun6i.o
+obj-$(CONFIG_XENO_DRIVERS_SPI_OMAP2_MCSPI_RT) += xeno_spi_omap2_mcspi_rt.o
+
+xeno_spi_bcm2835-y := spi-bcm2835.o
+xeno_spi_sun6i-y := spi-sun6i.o
+xeno_spi_omap2_mcspi_rt-y := spi-omap2-mcspi-rt.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-bcm2835.c b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-bcm2835.c
new file mode 100644
index 0000000..a3fb0c7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-bcm2835.c
@@ -0,0 +1,722 @@
+/**
+ * I/O handling lifted from drivers/spi/spi-bcm2835.c:
+ * Copyright (C) 2012 Chris Boot
+ * Copyright (C) 2013 Stephen Warren
+ * Copyright (C) 2015 Martin Sperl
+ *
+ * RTDM integration by:
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include "spi-master.h"
+
+#define RTDM_SUBCLASS_BCM2835  1
+
+/* SPI register offsets */
+#define BCM2835_SPI_CS			0x00
+#define BCM2835_SPI_FIFO		0x04
+#define BCM2835_SPI_CLK			0x08
+#define BCM2835_SPI_DLEN		0x0c
+#define BCM2835_SPI_LTOH		0x10
+#define BCM2835_SPI_DC			0x14
+
+/* Bitfields in CS */
+#define BCM2835_SPI_CS_LEN_LONG		0x02000000
+#define BCM2835_SPI_CS_DMA_LEN		0x01000000
+#define BCM2835_SPI_CS_CSPOL2		0x00800000
+#define BCM2835_SPI_CS_CSPOL1		0x00400000
+#define BCM2835_SPI_CS_CSPOL0		0x00200000
+#define BCM2835_SPI_CS_RXF		0x00100000
+#define BCM2835_SPI_CS_RXR		0x00080000
+#define BCM2835_SPI_CS_TXD		0x00040000
+#define BCM2835_SPI_CS_RXD		0x00020000
+#define BCM2835_SPI_CS_DONE		0x00010000
+#define BCM2835_SPI_CS_LEN		0x00002000
+#define BCM2835_SPI_CS_REN		0x00001000
+#define BCM2835_SPI_CS_ADCS		0x00000800
+#define BCM2835_SPI_CS_INTR		0x00000400
+#define BCM2835_SPI_CS_INTD		0x00000200
+#define BCM2835_SPI_CS_DMAEN		0x00000100
+#define BCM2835_SPI_CS_TA		0x00000080
+#define BCM2835_SPI_CS_CSPOL		0x00000040
+#define BCM2835_SPI_CS_CLEAR_RX		0x00000020
+#define BCM2835_SPI_CS_CLEAR_TX		0x00000010
+#define BCM2835_SPI_CS_CPOL		0x00000008
+#define BCM2835_SPI_CS_CPHA		0x00000004
+#define BCM2835_SPI_CS_CS_10		0x00000002
+#define BCM2835_SPI_CS_CS_01		0x00000001
+
+#define BCM2835_SPI_POLLING_LIMIT_US	30
+#define BCM2835_SPI_POLLING_JIFFIES	2
+#define BCM2835_SPI_DMA_MIN_LENGTH	96
+#define BCM2835_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH \
+				| SPI_NO_CS | SPI_3WIRE)
+
+struct spi_master_bcm2835 {
+	struct rtdm_spi_master master;
+	void __iomem *regs;
+	struct clk *clk;
+	unsigned long clk_hz;
+	rtdm_irq_t irqh;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	int tx_len;
+	int rx_len;
+	rtdm_event_t transfer_done;
+};
+
+struct spi_slave_bcm2835 {
+	struct rtdm_spi_remote_slave slave;
+	void *io_virt;
+	dma_addr_t io_dma;
+	size_t io_len;
+};
+
+static inline struct spi_slave_bcm2835 *
+to_slave_bcm2835(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave, struct spi_slave_bcm2835, slave);
+}
+
+static inline struct spi_master_bcm2835 *
+to_master_bcm2835(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave->master, struct spi_master_bcm2835, master);
+}
+
+static inline struct device *
+master_to_kdev(struct rtdm_spi_master *master)
+{
+	return &master->controller->dev;
+}
+
+static inline u32 bcm2835_rd(struct spi_master_bcm2835 *spim,
+			     unsigned int reg)
+{
+	return readl(spim->regs + reg);
+}
+
+static inline void bcm2835_wr(struct spi_master_bcm2835 *spim,
+			      unsigned int reg, u32 val)
+{
+	writel(val, spim->regs + reg);
+}
+
+static inline void bcm2835_rd_fifo(struct spi_master_bcm2835 *spim)
+{
+	u8 byte;
+
+	while (spim->rx_len > 0 &&
+	       (bcm2835_rd(spim, BCM2835_SPI_CS) & BCM2835_SPI_CS_RXD)) {
+		byte = bcm2835_rd(spim, BCM2835_SPI_FIFO);
+		if (spim->rx_buf)
+			*spim->rx_buf++ = byte;
+		spim->rx_len--;
+	}
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0)
+static inline bool xn_gpio_is_valid(struct spi_device *spi)
+{
+       return spi->cs_gpiod != NULL;
+}
+
+static inline int xn_get_gpio(struct spi_device *spi)
+{
+       return desc_to_gpio(spi->cs_gpiod);
+}
+#else
+static inline bool xn_gpio_is_valid(struct spi_device *spi)
+{
+       return gpio_is_valid(spi->cs_gpio);
+}
+
+static inline int xn_get_gpio(struct spi_device *spi)
+{
+       return spi->cs_gpio;
+}
+#endif
+
+static inline void bcm2835_wr_fifo(struct spi_master_bcm2835 *spim)
+{
+	u8 byte;
+
+	while (spim->tx_len > 0 &&
+	       (bcm2835_rd(spim, BCM2835_SPI_CS) & BCM2835_SPI_CS_TXD)) {
+		byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		bcm2835_wr(spim, BCM2835_SPI_FIFO, byte);
+		spim->tx_len--;
+	}
+}
+
+static void bcm2835_reset_hw(struct spi_master_bcm2835 *spim)
+{
+	u32 cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	cs &= ~(BCM2835_SPI_CS_INTR |
+		BCM2835_SPI_CS_INTD |
+		BCM2835_SPI_CS_DMAEN |
+		BCM2835_SPI_CS_TA);
+	cs |= BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX;
+
+	/* Reset the SPI block. */
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+	bcm2835_wr(spim, BCM2835_SPI_DLEN, 0);
+}
+
+static int bcm2835_spi_interrupt(rtdm_irq_t *irqh)
+{
+	struct spi_master_bcm2835 *spim;
+
+	spim = rtdm_irq_get_arg(irqh, struct spi_master_bcm2835);
+
+	bcm2835_rd_fifo(spim);
+	bcm2835_wr_fifo(spim);
+
+	if (bcm2835_rd(spim, BCM2835_SPI_CS) & BCM2835_SPI_CS_DONE) {
+		bcm2835_reset_hw(spim);
+		rtdm_event_signal(&spim->transfer_done);
+	}
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int bcm2835_configure(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	unsigned long spi_hz, cdiv;
+	u32 cs;
+
+	/* Set clock polarity and phase. */
+
+	cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	cs &= ~(BCM2835_SPI_CS_CPOL | BCM2835_SPI_CS_CPHA);
+	if (config->mode & SPI_CPOL)
+		cs |= BCM2835_SPI_CS_CPOL;
+	if (config->mode & SPI_CPHA)
+		cs |= BCM2835_SPI_CS_CPHA;
+
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+	
+	/* Set clock frequency. */
+
+	spi_hz = config->speed_hz;
+
+	/*
+	 * Fastest clock rate is of the APB clock, which is close to
+	 * clk_hz / 2.
+	 */
+	if (spi_hz >= spim->clk_hz / 2)
+		cdiv = 2;
+	else if (spi_hz) {
+		cdiv = DIV_ROUND_UP(spim->clk_hz, spi_hz); /* Multiple of 2. */
+		cdiv += (cdiv % 2);
+		if (cdiv >= 65536)
+			cdiv = 0;
+	} else
+		cdiv = 0;
+
+	bcm2835_wr(spim, BCM2835_SPI_CLK, cdiv);
+	
+	return 0;
+}
+
+static void bcm2835_chip_select(struct rtdm_spi_remote_slave *slave,
+				bool active)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	u32 cs;
+
+	cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	if (config->mode & SPI_CS_HIGH) {
+		cs |= BCM2835_SPI_CS_CSPOL;
+		cs |= BCM2835_SPI_CS_CSPOL0 << slave->chip_select;
+	} else {
+		cs &= ~BCM2835_SPI_CS_CSPOL;
+		cs &= ~(BCM2835_SPI_CS_CSPOL0 << slave->chip_select);
+	}
+
+	/* "active" is the logical state, not the impedance level. */
+
+	if (active) {
+		if (config->mode & SPI_NO_CS)
+			cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+		else {
+			cs &= ~(BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01);
+			cs |= slave->chip_select;
+		}
+	} else {
+		/* Put HW-CS into deselected state. */
+		cs &= ~BCM2835_SPI_CS_CSPOL;
+		/* Use the "undefined" chip-select as precaution. */
+		cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+	}
+
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+}
+
+static int do_transfer_irq(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	int ret;
+	u32 cs;
+	
+	cs = bcm2835_rd(spim, BCM2835_SPI_CS);
+
+	cs &= ~BCM2835_SPI_CS_REN;
+	if ((slave->config.mode & SPI_3WIRE) && spim->rx_buf)
+		cs |= BCM2835_SPI_CS_REN;
+
+	cs |= BCM2835_SPI_CS_TA;
+
+	/*
+	 * Fill in fifo if we have gpio-cs note that there have been
+	 * rare events where the native-CS flapped for <1us which may
+	 * change the behaviour with gpio-cs this does not happen, so
+	 * it is implemented only for this case.
+	 */
+	if (slave->cs_gpiod) {
+		/* Set dummy CS, ->chip_select() was not called. */
+		cs |= BCM2835_SPI_CS_CS_10 | BCM2835_SPI_CS_CS_01;
+		/* Enable SPI block, before filling FIFO. */
+		bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+		bcm2835_wr_fifo(spim);
+	}
+
+	/* Enable interrupts last, wait for transfer completion. */
+	cs |= BCM2835_SPI_CS_INTR | BCM2835_SPI_CS_INTD;
+	bcm2835_wr(spim, BCM2835_SPI_CS, cs);
+
+	ret = rtdm_event_wait(&spim->transfer_done);
+	if (ret) {
+		bcm2835_reset_hw(spim);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int bcm2835_transfer_iobufs(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	if (bcm->io_len == 0)
+		return -EINVAL;	/* No I/O buffers set. */
+	
+	spim->tx_len = bcm->io_len / 2;
+	spim->rx_len = spim->tx_len;
+	spim->tx_buf = bcm->io_virt + spim->rx_len;
+	spim->rx_buf = bcm->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static int bcm2835_transfer_iobufs_n(struct rtdm_spi_remote_slave *slave,
+				     int len)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	if ((bcm->io_len == 0) ||
+		(len <= 0) || (len > (bcm->io_len / 2)))
+		return -EINVAL;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = bcm->io_virt + bcm->io_len / 2;
+	spim->rx_buf = bcm->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static ssize_t bcm2835_read(struct rtdm_spi_remote_slave *slave,
+			    void *rx, size_t len)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = NULL;
+	spim->rx_buf = rx;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static ssize_t bcm2835_write(struct rtdm_spi_remote_slave *slave,
+			     const void *tx, size_t len)
+{
+	struct spi_master_bcm2835 *spim = to_master_bcm2835(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = tx;
+	spim->rx_buf = NULL;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static int set_iobufs(struct spi_slave_bcm2835 *bcm, size_t len)
+{
+	dma_addr_t dma;
+	void *p;
+
+	if (len == 0)
+		return -EINVAL;
+	
+	len = L1_CACHE_ALIGN(len) * 2;
+	if (len == bcm->io_len)
+		return 0;
+
+	if (bcm->io_len)
+		return -EINVAL;	/* I/O buffers may not be resized. */
+
+	/*
+	 * Since we need the I/O buffers to be set for starting a
+	 * transfer, there is no need for serializing this routine and
+	 * transfer_iobufs(), provided io_len is set last.
+	 *
+	 * NOTE: We don't need coherent memory until we actually get
+	 * DMA transfers working, this code is a bit ahead of
+	 * schedule.
+	 *
+	 * Revisit: this assumes DMA mask is 4Gb.
+	 */
+	p = dma_alloc_coherent(NULL, len, &dma, GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	bcm->io_dma = dma;
+	bcm->io_virt = p;
+	smp_mb();
+	/*
+	 * May race with transfer_iobufs(), must be assigned after all
+	 * the rest is set up, enforcing a membar.
+	 */
+	bcm->io_len = len;
+	
+	return 0;
+}
+
+static int bcm2835_set_iobufs(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_iobufs *p)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+	int ret;
+
+	ret = set_iobufs(bcm, p->io_len);
+	if (ret)
+		return ret;
+
+	p->i_offset = 0;
+	p->o_offset = bcm->io_len / 2;
+	p->map_len = bcm->io_len;
+	
+	return 0;
+}
+
+static int bcm2835_mmap_iobufs(struct rtdm_spi_remote_slave *slave,
+			       struct vm_area_struct *vma)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	/*
+	 * dma_alloc_coherent() delivers non-cached memory, make sure
+	 * to return consistent mapping attributes. Typically, mixing
+	 * memory attributes across address spaces referring to the
+	 * same physical area is architecturally wrong on ARM.
+	 */
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	return rtdm_mmap_kmem(vma, bcm->io_virt);
+}
+
+static void bcm2835_mmap_release(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	dma_free_coherent(NULL, bcm->io_len,
+			  bcm->io_virt, bcm->io_dma);
+	bcm->io_len = 0;
+}
+
+static int gpio_match_name(struct gpio_chip *chip, void *data)
+{
+	return !strcmp(chip->label, data);
+}
+
+static int find_cs_gpio(struct spi_device *spi)
+{
+	struct spi_controller *ctlr = spi->controller;
+	u32 pingroup_index, pin, pin_index;
+	struct device_node *pins;
+	struct gpio_chip *chip;
+	int cs_gpio = -ENOENT;
+	int ret;
+
+	if (xn_gpio_is_valid(spi)) {
+		dev_info(&spi->dev, "using GPIO%i for CS%d\n",
+			 xn_get_gpio(spi), spi->chip_select);
+		return 0;
+	}
+
+	/* Translate native CS to GPIO. */
+
+	for (pingroup_index = 0;
+	     (pins = of_parse_phandle(ctlr->dev.of_node,
+		     "pinctrl-0", pingroup_index)) != 0; pingroup_index++) {
+		for (pin_index = 0;
+		     of_property_read_u32_index(pins, "brcm,pins",
+				pin_index, &pin) == 0; pin_index++) {
+			if ((spi->chip_select == 0 &&
+			     (pin == 8 || pin == 36 || pin == 46)) ||
+			    (spi->chip_select == 1 &&
+			     (pin == 7 || pin == 35))) {
+				cs_gpio = pin;
+				break;
+			}
+		}
+		of_node_put(pins);
+	}
+
+	/* If that failed, assume GPIOs 7-11 are used */
+	if (!gpio_is_valid(cs_gpio)) {
+		chip = gpiochip_find("pinctrl-bcm2835", gpio_match_name);
+		if (chip == NULL)
+			return 0;
+
+		cs_gpio = chip->base + 8 - spi->chip_select;
+	}
+
+	dev_info(&spi->dev,
+		 "setting up native-CS%i as GPIO %i\n",
+		 spi->chip_select, cs_gpio);
+
+	ret = gpio_direction_output(cs_gpio,
+			    (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+	if (ret) {
+		dev_err(&spi->dev,
+			"could not set CS%i gpio %i as output: %i",
+			spi->chip_select, cs_gpio, ret);
+		return ret;
+	}
+
+	/*
+	 * Force value on GPIO in case the pin controller does not
+	 * handle that properly when switching to output mode.
+	 */
+	gpio_set_value(cs_gpio, (spi->mode & SPI_CS_HIGH) ? 0 : 1);
+
+	return 0;
+}
+
+static struct rtdm_spi_remote_slave *
+bcm2835_attach_slave(struct rtdm_spi_master *master, struct spi_device *spi)
+{
+	struct spi_slave_bcm2835 *bcm;
+	int ret;
+
+	if (spi->chip_select > 1) {
+		/*
+		 * Error in the case of native CS requested with CS >
+		 * 1 officially there is a CS2, but it is not
+		 * documented which GPIO is connected with that...
+		 */
+		dev_err(&spi->dev,
+			"%s: only two native chip-selects are supported\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	ret = find_cs_gpio(spi);
+	if (ret)
+		return ERR_PTR(ret);
+	
+	bcm = kzalloc(sizeof(*bcm), GFP_KERNEL);
+	if (bcm == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_spi_add_remote_slave(&bcm->slave, master, spi);
+	if (ret) {
+		dev_err(&spi->dev,
+			"%s: failed to attach slave\n", __func__);
+		kfree(bcm);
+		return ERR_PTR(ret);
+	}
+
+	return &bcm->slave;
+}
+
+static void bcm2835_detach_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_bcm2835 *bcm = to_slave_bcm2835(slave);
+
+	rtdm_spi_remove_remote_slave(slave);
+	kfree(bcm);
+}
+
+static struct rtdm_spi_master_ops bcm2835_master_ops = {
+	.configure = bcm2835_configure,
+	.chip_select = bcm2835_chip_select,
+	.set_iobufs = bcm2835_set_iobufs,
+	.mmap_iobufs = bcm2835_mmap_iobufs,
+	.mmap_release = bcm2835_mmap_release,
+	.transfer_iobufs = bcm2835_transfer_iobufs,
+	.transfer_iobufs_n = bcm2835_transfer_iobufs_n,
+	.write = bcm2835_write,
+	.read = bcm2835_read,
+	.attach_slave = bcm2835_attach_slave,
+	.detach_slave = bcm2835_detach_slave,
+};
+
+static int bcm2835_spi_probe(struct platform_device *pdev)
+{
+	struct spi_master_bcm2835 *spim;
+	struct rtdm_spi_master *master;
+	struct spi_controller *ctlr;
+	struct resource *r;
+	int ret, irq;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	master = rtdm_spi_alloc_master(&pdev->dev,
+		   struct spi_master_bcm2835, master);
+	if (master == NULL)
+		return -ENOMEM;
+
+	master->subclass = RTDM_SUBCLASS_BCM2835;
+	master->ops = &bcm2835_master_ops;
+	platform_set_drvdata(pdev, master);
+
+	ctlr = master->controller;
+	ctlr->mode_bits = BCM2835_SPI_MODE_BITS;
+	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+	ctlr->num_chipselect = 2;
+	ctlr->dev.of_node = pdev->dev.of_node;
+
+	spim = container_of(master, struct spi_master_bcm2835, master);
+	rtdm_event_init(&spim->transfer_done, 0);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spim->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(spim->regs)) {
+		dev_err(&pdev->dev, "%s: cannot map I/O memory\n", __func__);
+		ret = PTR_ERR(spim->regs);
+		goto fail;
+	}
+	
+	spim->clk = devm_clk_get(&pdev->dev, NULL);
+	if (IS_ERR(spim->clk)) {
+		ret = PTR_ERR(spim->clk);
+		goto fail;
+	}
+
+	spim->clk_hz = clk_get_rate(spim->clk);
+
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq <= 0) {
+		ret = irq ?: -ENODEV;
+		goto fail;
+	}
+
+	clk_prepare_enable(spim->clk);
+
+	/* Initialise the hardware with the default polarities */
+	bcm2835_wr(spim, BCM2835_SPI_CS,
+		   BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+	ret = rtdm_irq_request(&spim->irqh, irq,
+			       bcm2835_spi_interrupt, 0,
+			       dev_name(&pdev->dev), spim);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: cannot request IRQ%d\n",
+			__func__, irq);
+		goto fail_unclk;
+	}
+
+	ret = rtdm_spi_add_master(&spim->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to add master\n",
+			__func__);
+		goto fail_unclk;
+	}
+
+	return 0;
+
+fail_unclk:
+	clk_disable_unprepare(spim->clk);
+fail:
+	spi_controller_put(ctlr);
+
+	return ret;
+}
+
+static int bcm2835_spi_remove(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master_bcm2835 *spim;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	spim = container_of(master, struct spi_master_bcm2835, master);
+
+	/* Clear FIFOs, and disable the HW block */
+	bcm2835_wr(spim, BCM2835_SPI_CS,
+		   BCM2835_SPI_CS_CLEAR_RX | BCM2835_SPI_CS_CLEAR_TX);
+
+	rtdm_irq_free(&spim->irqh);
+
+	clk_disable_unprepare(spim->clk);
+
+	rtdm_spi_remove_master(master);
+
+	return 0;
+}
+
+static const struct of_device_id bcm2835_spi_match[] = {
+	{
+		.compatible = "brcm,bcm2835-spi",
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, bcm2835_spi_match);
+
+static struct platform_driver bcm2835_spi_driver = {
+	.driver		= {
+		.name		= "spi-bcm2835",
+		.of_match_table	= bcm2835_spi_match,
+	},
+	.probe		= bcm2835_spi_probe,
+	.remove		= bcm2835_spi_remove,
+};
+module_platform_driver(bcm2835_spi_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.c b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.c
new file mode 100644
index 0000000..c6ac7bd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.c
@@ -0,0 +1,198 @@
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/gpio.h>
+#include <linux/spi/spi.h>
+#include "spi-master.h"
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0)
+static inline void rtdm_spi_slave_init(struct spi_controller *ctrl,
+				       struct rtdm_spi_remote_slave *slave,
+				       struct spi_device *spi)
+{
+	if (spi->cs_gpiod)
+		slave->cs_gpiod = spi->cs_gpiod;
+	else {
+		slave->cs_gpiod = NULL;
+		if (ctrl->cs_gpiods)
+			slave->cs_gpiod = ctrl->cs_gpiods[spi->chip_select];
+	}
+}
+#else
+static inline void rtdm_spi_slave_init(struct spi_controller *ctrl,
+				       struct rtdm_spi_remote_slave *slave,
+				       struct spi_device *spi)
+{
+	if (gpio_is_valid(spi->cs_gpio))
+		slave->cs_gpiod = gpio_to_desc(spi->cs_gpio);
+	else {
+		slave->cs_gpiod = NULL;
+		if (ctrl->cs_gpios)
+			slave->cs_gpiod =
+				gpio_to_desc(ctrl->cs_gpios[spi->chip_select]);
+	}
+}
+#endif
+
+int rtdm_spi_add_remote_slave(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_master *master,
+			      struct spi_device *spi)
+{
+	struct spi_controller *ctlr = master->controller;
+	struct rtdm_device *dev;
+	rtdm_lockctx_t c;
+	int ret;
+
+	memset(slave, 0, sizeof(*slave));
+	slave->chip_select = spi->chip_select;
+	slave->config.bits_per_word = spi->bits_per_word;
+	slave->config.speed_hz = spi->max_speed_hz;
+	slave->config.mode = spi->mode;
+	slave->master = master;
+	
+	dev = &slave->dev;
+	dev->driver = &master->driver;
+	dev->label = kasprintf(GFP_KERNEL, "%s/slave%d.%%d",
+			       dev_name(&ctlr->dev),
+			       ctlr->bus_num);
+	if (dev->label == NULL)
+		return -ENOMEM;
+
+	rtdm_spi_slave_init(ctlr, slave, spi);
+
+	mutex_init(&slave->ctl_lock);
+
+	dev->device_data = master;
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		goto fail;
+
+	rtdm_lock_get_irqsave(&master->lock, c);
+	list_add_tail(&slave->next, &master->slaves);
+	rtdm_lock_put_irqrestore(&master->lock, c);
+
+	return 0;
+fail:
+	kfree(dev->label);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_add_remote_slave);
+
+void rtdm_spi_remove_remote_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct rtdm_spi_master *master = slave->master;
+	struct rtdm_device *dev;
+	rtdm_lockctx_t c;
+
+	mutex_destroy(&slave->ctl_lock);
+	rtdm_lock_get_irqsave(&master->lock, c);
+	list_del(&slave->next);
+	rtdm_lock_put_irqrestore(&master->lock, c);
+	dev = &slave->dev;
+	rtdm_dev_unregister(dev);
+	kfree(dev->label);
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_remove_remote_slave);
+
+static int spi_device_probe(struct spi_device *spi)
+{
+	struct rtdm_spi_remote_slave *slave;
+	struct rtdm_spi_master *master;
+	int ret;
+
+	/*
+	 * Chicken and egg issue: we want the RTDM device class name
+	 * to duplicate the SPI master name, but that information is
+	 * only available after spi_register_master() has returned. We
+	 * solve this by initializing the RTDM driver descriptor on
+	 * the fly when the first SPI device on the bus is advertised
+	 * on behalf of spi_register_master().
+	 *
+	 * NOTE: the driver core guarantees serialization.
+	 */
+	master = spi_master_get_devdata(spi->master);
+	if (master->devclass == NULL) {
+		ret = __rtdm_spi_setup_driver(master);
+		if (ret)
+			return ret;
+	}
+
+	slave = master->ops->attach_slave(master, spi);
+	if (IS_ERR(slave))
+		return PTR_ERR(slave);
+
+	spi_set_drvdata(spi, slave);
+
+	return 0;
+}
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,18,0)
+static void spi_device_remove(struct spi_device *spi)
+#else
+static int spi_device_remove(struct spi_device *spi)
+#endif
+{
+	struct rtdm_spi_remote_slave *slave = spi_get_drvdata(spi);
+
+	slave->master->ops->detach_slave(slave);
+
+#if LINUX_VERSION_CODE < KERNEL_VERSION(5,18,0)
+	return 0;
+#endif
+}
+
+static const struct of_device_id spi_device_match[] = {
+	{
+		.compatible = "rtdm-spidev",
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, spi_device_match);
+
+static struct spi_driver spi_device_driver = {
+	.driver = {
+		.name =	"rtdm_spi_device",
+		.owner = THIS_MODULE,
+		.of_match_table = spi_device_match,
+	},
+	.probe	= spi_device_probe,
+	.remove	= spi_device_remove,
+};
+
+static int __init spi_device_init(void)
+{
+	int ret;
+
+	ret = spi_register_driver(&spi_device_driver);
+
+	return ret;
+}
+module_init(spi_device_init);
+
+static void __exit spi_device_exit(void)
+{
+	spi_unregister_driver(&spi_device_driver);
+
+}
+module_exit(spi_device_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.h b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.h
new file mode 100644
index 0000000..305dec3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-device.h
@@ -0,0 +1,53 @@
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_SPI_DEVICE_H
+#define _RTDM_SPI_DEVICE_H
+
+#include <linux/list.h>
+#include <linux/atomic.h>
+#include <linux/mutex.h>
+#include <rtdm/driver.h>
+#include <rtdm/uapi/spi.h>
+
+struct class;
+struct rtdm_spi_master;
+
+struct rtdm_spi_remote_slave {
+	u8 chip_select;
+	struct gpio_desc *cs_gpiod;
+	struct rtdm_device dev;
+	struct list_head next;
+	struct rtdm_spi_config config;
+	struct rtdm_spi_master *master;
+	atomic_t mmap_refs;
+	struct mutex ctl_lock;
+};
+
+static inline struct device *
+slave_to_kdev(struct rtdm_spi_remote_slave *slave)
+{
+	return rtdm_dev_to_kdev(&slave->dev);
+}
+
+int rtdm_spi_add_remote_slave(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_master *spim,
+			      struct spi_device *spi);
+
+void rtdm_spi_remove_remote_slave(struct rtdm_spi_remote_slave *slave);
+
+#endif /* !_RTDM_SPI_DEVICE_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.c b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.c
new file mode 100644
index 0000000..9bddb9e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.c
@@ -0,0 +1,451 @@
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/device.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/err.h>
+#include <linux/spi/spi.h>
+#include <linux/gpio.h>
+#include "spi-master.h"
+
+static inline
+struct device *to_kdev(struct rtdm_spi_remote_slave *slave)
+{
+	return rtdm_dev_to_kdev(&slave->dev);
+}
+
+static inline struct rtdm_spi_remote_slave *fd_to_slave(struct rtdm_fd *fd)
+{
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+
+	return container_of(dev, struct rtdm_spi_remote_slave, dev);
+}
+
+static int update_slave_config(struct rtdm_spi_remote_slave *slave,
+			       struct rtdm_spi_config *config)
+{
+	struct rtdm_spi_config old_config;
+	struct rtdm_spi_master *master = slave->master;
+	int ret;
+
+	rtdm_mutex_lock(&master->bus_lock);
+
+	old_config = slave->config;
+	slave->config = *config;
+	ret = slave->master->ops->configure(slave);
+	if (ret) {
+		slave->config = old_config;
+		rtdm_mutex_unlock(&master->bus_lock);
+		return ret;
+	}
+
+	rtdm_mutex_unlock(&master->bus_lock);
+	
+	dev_info(to_kdev(slave),
+		 "configured mode %d, %s%s%s%s%u bits/w, %u Hz max\n",
+		 (int) (slave->config.mode & (SPI_CPOL | SPI_CPHA)),
+		 (slave->config.mode & SPI_CS_HIGH) ? "cs_high, " : "",
+		 (slave->config.mode & SPI_LSB_FIRST) ? "lsb, " : "",
+		 (slave->config.mode & SPI_3WIRE) ? "3wire, " : "",
+		 (slave->config.mode & SPI_LOOP) ? "loopback, " : "",
+		 slave->config.bits_per_word,
+		 slave->config.speed_hz);
+	
+	return 0;
+}
+
+static int spi_master_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+
+	if (master->ops->open)
+		return master->ops->open(slave);
+		
+	return 0;
+}
+
+static void spi_master_close(struct rtdm_fd *fd)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	rtdm_lockctx_t c;
+
+	rtdm_lock_get_irqsave(&master->lock, c);
+
+	if (master->cs == slave)
+		master->cs = NULL;
+
+	rtdm_lock_put_irqrestore(&master->lock, c);
+
+	if (master->ops->close)
+		master->ops->close(slave);
+}
+
+static int do_chip_select(struct rtdm_spi_remote_slave *slave)
+{				/* master->bus_lock held */
+	struct rtdm_spi_master *master = slave->master;
+	rtdm_lockctx_t c;
+	int state;
+
+	if (slave->config.speed_hz == 0)
+		return -EINVAL; /* Setup is missing. */
+
+	/* Serialize with spi_master_close() */
+	rtdm_lock_get_irqsave(&master->lock, c);
+	
+	if (master->cs != slave) {
+		if (slave->cs_gpiod) {
+			state = !!(slave->config.mode & SPI_CS_HIGH);
+			gpiod_set_raw_value(slave->cs_gpiod, state);
+		} else
+			master->ops->chip_select(slave, true);
+		master->cs = slave;
+	}
+
+	rtdm_lock_put_irqrestore(&master->lock, c);
+
+	return 0;
+}
+
+static void do_chip_deselect(struct rtdm_spi_remote_slave *slave)
+{				/* master->bus_lock held */
+	struct rtdm_spi_master *master = slave->master;
+	rtdm_lockctx_t c;
+	int state;
+
+	rtdm_lock_get_irqsave(&master->lock, c);
+
+	if (slave->cs_gpiod) {
+		state = !(slave->config.mode & SPI_CS_HIGH);
+		gpiod_set_raw_value(slave->cs_gpiod, state);
+	} else
+		master->ops->chip_select(slave, false);
+
+	master->cs = NULL;
+
+	rtdm_lock_put_irqrestore(&master->lock, c);
+}
+
+static int spi_master_ioctl_rt(struct rtdm_fd *fd,
+			       unsigned int request, void *arg)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	struct rtdm_spi_config config;
+	int ret, len;
+
+	switch (request) {
+	case SPI_RTIOC_SET_CONFIG:
+		ret = rtdm_safe_copy_from_user(fd, &config,
+					       arg, sizeof(config));
+		if (ret == 0)
+			ret = update_slave_config(slave, &config);
+		break;
+	case SPI_RTIOC_GET_CONFIG:
+		rtdm_mutex_lock(&master->bus_lock);
+		config = slave->config;
+		rtdm_mutex_unlock(&master->bus_lock);
+		ret = rtdm_safe_copy_to_user(fd, arg,
+					     &config, sizeof(config));
+		break;
+	case SPI_RTIOC_TRANSFER:
+		ret = -EINVAL;
+		if (master->ops->transfer_iobufs) {
+			rtdm_mutex_lock(&master->bus_lock);
+			ret = do_chip_select(slave);
+			if (ret == 0) {
+				ret = master->ops->transfer_iobufs(slave);
+				do_chip_deselect(slave);
+			}
+			rtdm_mutex_unlock(&master->bus_lock);
+		}
+		break;
+	case SPI_RTIOC_TRANSFER_N:
+		ret = -EINVAL;
+		if (master->ops->transfer_iobufs_n) {
+			len = (long)arg;
+			rtdm_mutex_lock(&master->bus_lock);
+			ret = do_chip_select(slave);
+			if (ret == 0) {
+				ret = master->ops->transfer_iobufs_n(slave, len);
+				do_chip_deselect(slave);
+			}
+			rtdm_mutex_unlock(&master->bus_lock);
+		}
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static int spi_master_ioctl_nrt(struct rtdm_fd *fd,
+				unsigned int request, void *arg)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	struct rtdm_spi_iobufs iobufs;
+	int ret;
+
+	switch (request) {
+	case SPI_RTIOC_SET_IOBUFS:
+		ret = rtdm_safe_copy_from_user(fd, &iobufs,
+					       arg, sizeof(iobufs));
+		if (ret)
+			break;
+		/*
+		 * No transfer can happen without I/O buffers being
+		 * set, and I/O buffers cannot be reset, therefore we
+		 * need no serialization with the transfer code here.
+		 */
+		mutex_lock(&slave->ctl_lock);
+		ret = master->ops->set_iobufs(slave, &iobufs);
+		mutex_unlock(&slave->ctl_lock);
+		if (ret == 0)
+			ret = rtdm_safe_copy_to_user(fd, arg,
+					     &iobufs, sizeof(iobufs));
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static ssize_t spi_master_read_rt(struct rtdm_fd *fd,
+				  void __user *u_buf, size_t len)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	void *rx;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	rx = xnmalloc(len);
+	if (rx == NULL)
+		return -ENOMEM;
+
+	rtdm_mutex_lock(&master->bus_lock);
+	ret = do_chip_select(slave);
+	if (ret == 0) {
+		ret = master->ops->read(slave, rx, len);
+		do_chip_deselect(slave);
+	}
+	rtdm_mutex_unlock(&master->bus_lock);
+	if (ret > 0)
+		ret = rtdm_safe_copy_to_user(fd, u_buf, rx, ret);
+	
+	xnfree(rx);
+	
+	return ret;
+}
+
+static ssize_t spi_master_write_rt(struct rtdm_fd *fd,
+				   const void __user *u_buf, size_t len)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	struct rtdm_spi_master *master = slave->master;
+	void *tx;
+	int ret;
+
+	if (len == 0)
+		return 0;
+
+	tx = xnmalloc(len);
+	if (tx == NULL)
+		return -ENOMEM;
+
+	ret = rtdm_safe_copy_from_user(fd, tx, u_buf, len);
+	if (ret == 0) {
+		rtdm_mutex_lock(&master->bus_lock);
+		ret = do_chip_select(slave);
+		if (ret == 0) {
+			ret = master->ops->write(slave, tx, len);
+			do_chip_deselect(slave);
+		}
+		rtdm_mutex_unlock(&master->bus_lock);
+	}
+	
+	xnfree(tx);
+
+	return ret;
+}
+
+static void iobufs_vmopen(struct vm_area_struct *vma)
+{
+	struct rtdm_spi_remote_slave *slave = vma->vm_private_data;
+
+	atomic_inc(&slave->mmap_refs);
+	dev_dbg(slave_to_kdev(slave), "mapping added\n");
+}
+
+static void iobufs_vmclose(struct vm_area_struct *vma)
+{
+	struct rtdm_spi_remote_slave *slave = vma->vm_private_data;
+
+	if (atomic_dec_and_test(&slave->mmap_refs)) {
+		slave->master->ops->mmap_release(slave);
+		dev_dbg(slave_to_kdev(slave), "mapping released\n");
+	}
+}
+
+static struct vm_operations_struct iobufs_vmops = {
+	.open = iobufs_vmopen,
+	.close = iobufs_vmclose,
+};
+
+static int spi_master_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct rtdm_spi_remote_slave *slave = fd_to_slave(fd);
+	int ret;
+
+	if (slave->master->ops->mmap_iobufs == NULL)
+		return -EINVAL;
+
+	ret = slave->master->ops->mmap_iobufs(slave, vma);
+	if (ret)
+		return ret;
+
+	dev_dbg(slave_to_kdev(slave), "mapping created\n");
+	atomic_inc(&slave->mmap_refs);
+
+	if (slave->master->ops->mmap_release) {
+		vma->vm_ops = &iobufs_vmops;
+		vma->vm_private_data = slave;
+	}
+
+	return 0;
+}
+
+static char *spi_slave_devnode(struct device *dev, umode_t *mode)
+{
+	return kasprintf(GFP_KERNEL, "rtdm/%s/%s",
+			 dev->class->name,
+			 dev_name(dev));
+}
+
+struct rtdm_spi_master *
+__rtdm_spi_alloc_master(struct device *dev, size_t size, int off)
+{
+	struct rtdm_spi_master *master;
+	struct spi_controller *ctlr;
+
+	ctlr = spi_alloc_master(dev, size);
+	if (ctlr == NULL)
+		return NULL;
+
+#if LINUX_VERSION_CODE >= KERNEL_VERSION(5,1,0)
+	ctlr->use_gpio_descriptors = true;
+#endif
+	master = (void *)(ctlr + 1) + off;
+	master->controller = ctlr;
+	spi_master_set_devdata(ctlr, master);
+
+	return master;
+}
+EXPORT_SYMBOL_GPL(__rtdm_spi_alloc_master);
+
+int __rtdm_spi_setup_driver(struct rtdm_spi_master *master)
+{
+	master->classname = kstrdup(
+		dev_name(&master->controller->dev), GFP_KERNEL);
+	master->devclass = class_create(THIS_MODULE,
+		master->classname);
+	if (IS_ERR(master->devclass)) {
+		kfree(master->classname);
+		printk(XENO_ERR "cannot create sysfs class\n");
+		return PTR_ERR(master->devclass);
+	}
+
+	master->devclass->devnode = spi_slave_devnode;
+	master->cs = NULL;
+
+	master->driver.profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(rtdm_spi_master,
+				  RTDM_CLASS_SPI,
+				  master->subclass,
+				  0);
+	master->driver.device_flags = RTDM_NAMED_DEVICE;
+	master->driver.base_minor = 0;
+	master->driver.device_count = 256;
+	master->driver.context_size = 0;
+	master->driver.ops = (struct rtdm_fd_ops){
+		.open		=	spi_master_open,
+		.close		=	spi_master_close,
+		.read_rt	=	spi_master_read_rt,
+		.write_rt	=	spi_master_write_rt,
+		.ioctl_rt	=	spi_master_ioctl_rt,
+		.ioctl_nrt	=	spi_master_ioctl_nrt,
+		.mmap		=	spi_master_mmap,
+	};
+	
+	rtdm_drv_set_sysclass(&master->driver, master->devclass);
+
+	INIT_LIST_HEAD(&master->slaves);
+	rtdm_lock_init(&master->lock);
+	rtdm_mutex_init(&master->bus_lock);
+
+	return 0;
+}
+
+static int spi_transfer_one_unimp(struct spi_master *master,
+				  struct spi_device *spi,
+				  struct spi_transfer *tfr)
+{
+	return -ENODEV;
+}
+
+int rtdm_spi_add_master(struct rtdm_spi_master *master)
+{
+	struct spi_controller *ctlr = master->controller;
+
+	/*
+	 * Prevent the transfer handler to be called from the regular
+	 * SPI stack, just in case.
+	 */
+	ctlr->transfer_one = spi_transfer_one_unimp;
+	master->devclass = NULL;
+
+	/*
+	 * Add the core SPI driver, devices on the bus will be
+	 * enumerated, handed to spi_device_probe().
+	 */
+	return spi_register_controller(ctlr);
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_add_master);
+
+void rtdm_spi_remove_master(struct rtdm_spi_master *master)
+{
+	struct class *class = master->devclass;
+	char *classname = master->classname;
+	
+	rtdm_mutex_destroy(&master->bus_lock);
+	spi_unregister_controller(master->controller);
+	rtdm_drv_set_sysclass(&master->driver, NULL);
+	class_destroy(class);
+	kfree(classname);
+}
+EXPORT_SYMBOL_GPL(rtdm_spi_remove_master);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.h b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.h
new file mode 100644
index 0000000..449a6cc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-master.h
@@ -0,0 +1,82 @@
+/**
+ * @note Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef _RTDM_SPI_MASTER_H
+#define _RTDM_SPI_MASTER_H
+
+#include <rtdm/driver.h>
+#include <rtdm/uapi/spi.h>
+#include "spi-device.h"
+
+struct class;
+struct device_node;
+struct rtdm_spi_master;
+struct spi_master;
+
+struct rtdm_spi_master_ops {
+	int (*open)(struct rtdm_spi_remote_slave *slave);
+	void (*close)(struct rtdm_spi_remote_slave *slave);
+	int (*configure)(struct rtdm_spi_remote_slave *slave);
+	void (*chip_select)(struct rtdm_spi_remote_slave *slave,
+			    bool active);
+	int (*set_iobufs)(struct rtdm_spi_remote_slave *slave,
+			  struct rtdm_spi_iobufs *p);
+	int (*mmap_iobufs)(struct rtdm_spi_remote_slave *slave,
+			   struct vm_area_struct *vma);
+	void (*mmap_release)(struct rtdm_spi_remote_slave *slave);
+	int (*transfer_iobufs)(struct rtdm_spi_remote_slave *slave);
+	int (*transfer_iobufs_n)(struct rtdm_spi_remote_slave *slave, int len);
+	ssize_t (*write)(struct rtdm_spi_remote_slave *slave,
+			 const void *tx, size_t len);
+	ssize_t (*read)(struct rtdm_spi_remote_slave *slave,
+			 void *rx, size_t len);
+	struct rtdm_spi_remote_slave *(*attach_slave)
+		(struct rtdm_spi_master *master,
+			struct spi_device *spi);
+	void (*detach_slave)(struct rtdm_spi_remote_slave *slave);
+};
+
+struct rtdm_spi_master {
+	int subclass;
+	const struct rtdm_spi_master_ops *ops;
+	struct spi_controller *controller;
+	struct {	/* Internal */
+		struct rtdm_driver driver;
+		struct class *devclass;
+		char *classname;
+		struct list_head slaves;
+		struct list_head next;
+		rtdm_lock_t lock;
+		rtdm_mutex_t bus_lock;
+		struct rtdm_spi_remote_slave *cs;
+	};
+};
+
+#define rtdm_spi_alloc_master(__dev, __type, __mptr)			\
+	__rtdm_spi_alloc_master(__dev, sizeof(__type),			\
+				offsetof(__type, __mptr))		\
+
+struct rtdm_spi_master *
+__rtdm_spi_alloc_master(struct device *dev, size_t size, int off);
+
+int __rtdm_spi_setup_driver(struct rtdm_spi_master *master);
+
+int rtdm_spi_add_master(struct rtdm_spi_master *master);
+
+void rtdm_spi_remove_master(struct rtdm_spi_master *master);
+
+#endif /* !_RTDM_SPI_MASTER_H */
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-omap2-mcspi-rt.c b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-omap2-mcspi-rt.c
new file mode 100644
index 0000000..7918f61
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-omap2-mcspi-rt.c
@@ -0,0 +1,1025 @@
+/**
+ * I/O handling lifted from drivers/spi/spi-omap2-mcspi.c:
+ * Copyright (C) 2019 Laurentiu-Cristian Duca
+ *  <laurentiu [dot] duca [at] gmail [dot] com>
+ * RTDM integration by:
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
+#include <linux/of_gpio.h>
+#include <linux/of_device.h>
+#include <linux/pm_runtime.h>
+#include <linux/gcd.h>
+#include "spi-master.h"
+
+#define RTDM_SUBCLASS_OMAP2_MCSPI  3
+
+#define OMAP4_MCSPI_REG_OFFSET 0x100
+#define OMAP2_MCSPI_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH)
+
+#define OMAP2_MCSPI_MAX_FREQ		48000000
+#define OMAP2_MCSPI_DRIVER_MAX_FREQ	40000000
+#define OMAP2_MCSPI_MAX_DIVIDER		4096
+#define OMAP2_MCSPI_MAX_FIFODEPTH	64
+#define OMAP2_MCSPI_MAX_FIFOWCNT	0xFFFF
+#define SPI_AUTOSUSPEND_TIMEOUT		2000
+#define PM_NEGATIVE_DELAY			-2000
+
+#define OMAP2_MCSPI_REVISION		0x00
+#define OMAP2_MCSPI_SYSCONFIG		0x10
+#define OMAP2_MCSPI_SYSSTATUS		0x14
+#define OMAP2_MCSPI_IRQSTATUS		0x18
+#define OMAP2_MCSPI_IRQENABLE		0x1c
+#define OMAP2_MCSPI_WAKEUPENABLE	0x20
+#define OMAP2_MCSPI_SYST		0x24
+#define OMAP2_MCSPI_MODULCTRL		0x28
+#define OMAP2_MCSPI_XFERLEVEL		0x7c
+
+/* per-channel (chip select) banks, 0x14 bytes each, first is: */
+#define OMAP2_MCSPI_CHANNELBANK_SIZE	0x14
+#define OMAP2_MCSPI_CHCONF0		0x2c
+#define OMAP2_MCSPI_CHSTAT0		0x30
+#define OMAP2_MCSPI_CHCTRL0		0x34
+#define OMAP2_MCSPI_TX0			0x38
+#define OMAP2_MCSPI_RX0			0x3c
+
+/* per-register bitmasks: */
+#define OMAP2_MCSPI_IRQSTATUS_EOW		BIT(17)
+#define OMAP2_MCSPI_IRQSTATUS_RX1_FULL  BIT(6)
+#define OMAP2_MCSPI_IRQSTATUS_TX1_EMPTY	BIT(4)
+#define OMAP2_MCSPI_IRQSTATUS_RX0_FULL  BIT(2)
+#define OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY	BIT(0)
+
+#define OMAP2_MCSPI_IRQENABLE_EOW		BIT(17)
+#define OMAP2_MCSPI_IRQENABLE_RX1_FULL  BIT(6)
+#define OMAP2_MCSPI_IRQENABLE_TX1_EMPTY	BIT(4)
+#define OMAP2_MCSPI_IRQENABLE_RX0_FULL  BIT(2)
+#define OMAP2_MCSPI_IRQENABLE_TX0_EMPTY	BIT(0)
+
+#define OMAP2_MCSPI_MODULCTRL_SINGLE	BIT(0)
+#define OMAP2_MCSPI_MODULCTRL_MS	BIT(2)
+#define OMAP2_MCSPI_MODULCTRL_STEST	BIT(3)
+
+#define OMAP2_MCSPI_CHCONF_PHA		BIT(0)
+#define OMAP2_MCSPI_CHCONF_POL		BIT(1)
+#define OMAP2_MCSPI_CHCONF_CLKD_MASK	(0x0f << 2)
+#define OMAP2_MCSPI_CHCONF_EPOL		BIT(6)
+#define OMAP2_MCSPI_CHCONF_WL_MASK	(0x1f << 7)
+#define OMAP2_MCSPI_CHCONF_TRM_RX_ONLY	BIT(12)
+#define OMAP2_MCSPI_CHCONF_TRM_TX_ONLY	BIT(13)
+#define OMAP2_MCSPI_CHCONF_TRM_MASK	(0x03 << 12)
+#define OMAP2_MCSPI_CHCONF_DMAW		BIT(14)
+#define OMAP2_MCSPI_CHCONF_DMAR		BIT(15)
+#define OMAP2_MCSPI_CHCONF_DPE0		BIT(16)
+#define OMAP2_MCSPI_CHCONF_DPE1		BIT(17)
+#define OMAP2_MCSPI_CHCONF_IS		BIT(18)
+#define OMAP2_MCSPI_CHCONF_TURBO	BIT(19)
+#define OMAP2_MCSPI_CHCONF_FORCE	BIT(20)
+#define OMAP2_MCSPI_CHCONF_FFET		BIT(27)
+#define OMAP2_MCSPI_CHCONF_FFER		BIT(28)
+#define OMAP2_MCSPI_CHCONF_CLKG		BIT(29)
+
+#define OMAP2_MCSPI_CHSTAT_RXS		BIT(0)
+#define OMAP2_MCSPI_CHSTAT_TXS		BIT(1)
+#define OMAP2_MCSPI_CHSTAT_EOT		BIT(2)
+#define OMAP2_MCSPI_CHSTAT_TXFFE	BIT(3)
+
+#define OMAP2_MCSPI_CHCTRL_EN		BIT(0)
+#define OMAP2_MCSPI_CHCTRL_EXTCLK_MASK	(0xff << 8)
+
+#define OMAP2_MCSPI_WAKEUPENABLE_WKEN	BIT(0)
+
+#define OMAP2_MCSPI_SYSCONFIG_CLOCKACTIVITY_MASK	(0x3 << 8)
+#define OMAP2_MCSPI_SYSCONFIG_SIDLEMODE_MASK		(0x3 << 3)
+#define OMAP2_MCSPI_SYSCONFIG_SOFTRESET				BIT(1)
+#define OMAP2_MCSPI_SYSCONFIG_AUTOIDLE				BIT(0)
+
+#define OMAP2_MCSPI_SYSSTATUS_RESETDONE BIT(0)
+
+/* current version supports max 2 CS per module */
+#define OMAP2_MCSPI_CS_N	2
+
+#define MCSPI_PINDIR_D0_IN_D1_OUT	0
+#define MCSPI_PINDIR_D0_OUT_D1_IN	1
+
+struct omap2_mcspi_platform_config {
+	unsigned short	num_cs;
+	unsigned int regs_offset;
+	unsigned int pin_dir:1;
+};
+
+struct omap2_mcspi_cs {
+	/* CS channel */
+	void __iomem		*regs;
+	unsigned long		phys;
+	u8 chosen;
+};
+
+struct spi_master_omap2_mcspi {
+	struct rtdm_spi_master master;
+	void __iomem *regs;
+	unsigned long phys;
+	rtdm_irq_t irqh;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	int tx_len;
+	int rx_len;
+	int fifo_depth;
+	rtdm_event_t transfer_done;
+	rtdm_lock_t lock;
+	unsigned int pin_dir:1;
+	struct omap2_mcspi_cs cs[OMAP2_MCSPI_CS_N];
+	/* logging */
+	int n_rx_full;
+	int n_tx_empty;
+	int n_interrupts;
+};
+
+struct spi_slave_omap2_mcspi {
+	struct rtdm_spi_remote_slave slave;
+	void *io_virt;
+	dma_addr_t io_dma;
+	size_t io_len;
+};
+
+static inline struct spi_slave_omap2_mcspi *
+to_slave_omap2_mcspi(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave, struct spi_slave_omap2_mcspi, slave);
+}
+
+static inline struct spi_master_omap2_mcspi *
+to_master_omap2_mcspi(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave->master,
+			struct spi_master_omap2_mcspi, master);
+}
+
+static inline struct device *
+master_to_kdev(struct rtdm_spi_master *master)
+{
+	return &master->controller->dev;
+}
+
+static inline u32 mcspi_rd_reg(struct spi_master_omap2_mcspi *spim,
+			     unsigned int reg)
+{
+	return readl(spim->regs + reg);
+}
+
+static inline void mcspi_wr_reg(struct spi_master_omap2_mcspi *spim,
+			      unsigned int reg, u32 val)
+{
+	writel(val, spim->regs + reg);
+}
+
+static inline u32
+mcspi_rd_cs_reg(struct spi_master_omap2_mcspi *spim,
+				int cs_id, unsigned int reg)
+{
+	return readl(spim->cs[cs_id].regs + reg);
+}
+
+static inline void
+mcspi_wr_cs_reg(struct spi_master_omap2_mcspi *spim, int cs_id,
+				unsigned int reg, u32 val)
+{
+	writel(val, spim->cs[cs_id].regs + reg);
+}
+
+static void omap2_mcspi_init_hw(struct spi_master_omap2_mcspi *spim)
+{
+	u32 l;
+
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_SYSCONFIG);
+	/* CLOCKACTIVITY = 3h: OCP and Functional clocks are maintained */
+	l |= OMAP2_MCSPI_SYSCONFIG_CLOCKACTIVITY_MASK;
+	/* SIDLEMODE = 1h: ignore idle requests */
+	l &= ~OMAP2_MCSPI_SYSCONFIG_SIDLEMODE_MASK;
+	l |= 0x1 << 3;
+	/* AUTOIDLE=0: OCP clock is free-running */
+	l &= ~OMAP2_MCSPI_SYSCONFIG_AUTOIDLE;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_SYSCONFIG, l);
+
+	/* Initialise the hardware with the default polarities (only omap2) */
+	mcspi_wr_reg(spim, OMAP2_MCSPI_WAKEUPENABLE,
+				 OMAP2_MCSPI_WAKEUPENABLE_WKEN);
+
+	/* Setup single-channel master mode */
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_MODULCTRL);
+	/* MS=0 => spi master */
+	l &= ~(OMAP2_MCSPI_MODULCTRL_STEST | OMAP2_MCSPI_MODULCTRL_MS);
+	l |= OMAP2_MCSPI_MODULCTRL_SINGLE;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_MODULCTRL, l);
+}
+
+static void omap2_mcspi_reset_hw(struct spi_master_omap2_mcspi *spim)
+{
+	u32 l;
+
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_SYSCONFIG);
+	l |= OMAP2_MCSPI_SYSCONFIG_SOFTRESET;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_SYSCONFIG, l);
+	/* wait until reset is done */
+	do {
+		l = mcspi_rd_reg(spim, OMAP2_MCSPI_SYSSTATUS);
+		cpu_relax();
+	} while (!(l & OMAP2_MCSPI_SYSSTATUS_RESETDONE));
+}
+
+static void
+omap2_mcspi_chip_select(struct rtdm_spi_remote_slave *slave, bool active)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 l;
+
+	/* FORCE: manual SPIEN assertion to keep SPIEN active */
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+	/* "active" is the logical state, not the impedance level. */
+	if (active)
+		l |= OMAP2_MCSPI_CHCONF_FORCE;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_FORCE;
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, l);
+	/* Flash post-writes */
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+}
+
+static u32 omap2_mcspi_calc_divisor(u32 speed_hz)
+{
+	u32 div;
+
+	for (div = 0; div < 15; div++)
+		if (speed_hz >= (OMAP2_MCSPI_MAX_FREQ >> div))
+			return div;
+
+	return 15;
+}
+
+/* channel 0 enable/disable */
+static void
+omap2_mcspi_channel_enable(struct rtdm_spi_remote_slave *slave, int enable)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 l;
+
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCTRL0);
+	if (enable)
+		l |= OMAP2_MCSPI_CHCTRL_EN;
+	else
+		l &= ~OMAP2_MCSPI_CHCTRL_EN;
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCTRL0, l);
+	/* Flash post-writes */
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCTRL0);
+}
+
+/* called only when no transfer is active to this device */
+static int omap2_mcspi_configure(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	u32 l = 0, clkd = 0, div = 1, extclk = 0, clkg = 0, word_len;
+	u32 speed_hz = OMAP2_MCSPI_MAX_FREQ;
+	u32 chctrl0;
+
+	/* The configuration parameters can be loaded in MCSPI_CH(i)CONF
+	 * only when the channel is disabled
+	 */
+	omap2_mcspi_channel_enable(slave, 0);
+
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+
+	/* Set clock frequency. */
+	speed_hz = (u32) config->speed_hz;
+	if (speed_hz > OMAP2_MCSPI_DRIVER_MAX_FREQ) {
+		dev_warn(slave_to_kdev(slave),
+			"maximum clock frequency is %d",
+			OMAP2_MCSPI_DRIVER_MAX_FREQ);
+	}
+	speed_hz = min_t(u32, speed_hz, OMAP2_MCSPI_DRIVER_MAX_FREQ);
+	if (speed_hz < (OMAP2_MCSPI_MAX_FREQ / OMAP2_MCSPI_MAX_DIVIDER)) {
+		clkd = omap2_mcspi_calc_divisor(speed_hz);
+		speed_hz = OMAP2_MCSPI_MAX_FREQ >> clkd;
+		clkg = 0;
+	} else {
+		div = (OMAP2_MCSPI_MAX_FREQ + speed_hz - 1) / speed_hz;
+		speed_hz = OMAP2_MCSPI_MAX_FREQ / div;
+		clkd = (div - 1) & 0xf;
+		extclk = (div - 1) >> 4;
+		clkg = OMAP2_MCSPI_CHCONF_CLKG;
+	}
+	/* set clock divisor */
+	l &= ~OMAP2_MCSPI_CHCONF_CLKD_MASK;
+	l |= clkd << 2;
+	/* set clock granularity */
+	l &= ~OMAP2_MCSPI_CHCONF_CLKG;
+	l |= clkg;
+	if (clkg) {
+		chctrl0 = mcspi_rd_cs_reg(spim,
+			slave->chip_select, OMAP2_MCSPI_CHCTRL0);
+		chctrl0 &= ~OMAP2_MCSPI_CHCTRL_EXTCLK_MASK;
+		chctrl0 |= extclk << 8;
+		mcspi_wr_cs_reg(spim,
+			slave->chip_select, OMAP2_MCSPI_CHCTRL0, chctrl0);
+	}
+
+	if (spim->pin_dir == MCSPI_PINDIR_D0_IN_D1_OUT) {
+		l &= ~OMAP2_MCSPI_CHCONF_IS;
+		l &= ~OMAP2_MCSPI_CHCONF_DPE1;
+		l |= OMAP2_MCSPI_CHCONF_DPE0;
+	} else {
+		l |= OMAP2_MCSPI_CHCONF_IS;
+		l |= OMAP2_MCSPI_CHCONF_DPE1;
+		l &= ~OMAP2_MCSPI_CHCONF_DPE0;
+	}
+
+	/* wordlength */
+	word_len = config->bits_per_word;
+	/* TODO: allow word_len != 8 */
+	if (word_len != 8) {
+		dev_err(slave_to_kdev(slave), "word_len(%d) != 8.\n",
+				word_len);
+		return -EIO;
+	}
+	l &= ~OMAP2_MCSPI_CHCONF_WL_MASK;
+	l |= (word_len - 1) << 7;
+
+	/* set chipselect polarity; manage with FORCE */
+	if (!(config->mode & SPI_CS_HIGH))
+		/* CS active-low */
+		l |= OMAP2_MCSPI_CHCONF_EPOL;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_EPOL;
+
+	/* set SPI mode 0..3 */
+	if (config->mode & SPI_CPOL)
+		l |= OMAP2_MCSPI_CHCONF_POL;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_POL;
+	if (config->mode & SPI_CPHA)
+		l |= OMAP2_MCSPI_CHCONF_PHA;
+	else
+		l &= ~OMAP2_MCSPI_CHCONF_PHA;
+
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, l);
+	l = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+
+	omap2_mcspi_chip_select(slave, 0);
+
+	return 0;
+}
+
+static void mcspi_rd_fifo(struct spi_master_omap2_mcspi *spim, int cs_id)
+{
+	u8 byte;
+	int i;
+
+	/* Receiver register must be read to remove source of interrupt */
+	for (i = 0; i < spim->fifo_depth; i++) {
+		byte = mcspi_rd_cs_reg(spim, cs_id, OMAP2_MCSPI_RX0);
+		if (spim->rx_buf && (spim->rx_len > 0))
+			*spim->rx_buf++ = byte;
+		spim->rx_len--;
+	}
+}
+
+static void mcspi_wr_fifo(struct spi_master_omap2_mcspi *spim, int cs_id)
+{
+	u8 byte;
+	int i;
+
+	/* load transmitter register to remove the source of the interrupt */
+	for (i = 0; i < spim->fifo_depth; i++) {
+		if (spim->tx_len <= 0)
+			byte = 0;
+		else
+			byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		mcspi_wr_cs_reg(spim, cs_id, OMAP2_MCSPI_TX0, byte);
+		spim->tx_len--;
+	}
+}
+
+static void mcspi_wr_fifo_bh(struct spi_master_omap2_mcspi *spim, int cs_id)
+{
+	u8 byte;
+	int i;
+	rtdm_lockctx_t c;
+
+	rtdm_lock_get_irqsave(&spim->lock, c);
+
+	for (i = 0; i < spim->fifo_depth; i++) {
+		if (spim->tx_len <= 0)
+			byte = 0;
+		else
+			byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		mcspi_wr_cs_reg(spim, cs_id, OMAP2_MCSPI_TX0, byte);
+		spim->tx_len--;
+	}
+
+	rtdm_lock_put_irqrestore(&spim->lock, c);
+}
+
+static int omap2_mcspi_interrupt(rtdm_irq_t *irqh)
+{
+	struct spi_master_omap2_mcspi *spim;
+	u32 l;
+	int i, cs_id = 0;
+
+	spim = rtdm_irq_get_arg(irqh, struct spi_master_omap2_mcspi);
+	rtdm_lock_get(&spim->lock);
+
+	for (i = 0; i < OMAP2_MCSPI_CS_N; i++)
+		if (spim->cs[i].chosen) {
+			cs_id = i;
+			break;
+		}
+
+	spim->n_interrupts++;
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_IRQSTATUS);
+
+	if ((l & OMAP2_MCSPI_IRQSTATUS_RX0_FULL) ||
+	   (l & OMAP2_MCSPI_IRQSTATUS_RX1_FULL)) {
+		mcspi_rd_fifo(spim, cs_id);
+		spim->n_rx_full++;
+	}
+	if ((l & OMAP2_MCSPI_IRQSTATUS_TX0_EMPTY) ||
+		(l & OMAP2_MCSPI_IRQSTATUS_TX1_EMPTY)) {
+		if (spim->tx_len > 0)
+			mcspi_wr_fifo(spim, cs_id);
+		spim->n_tx_empty++;
+	}
+
+	/* write 1 to OMAP2_MCSPI_IRQSTATUS field to reset it */
+	mcspi_wr_reg(spim, OMAP2_MCSPI_IRQSTATUS, l);
+
+	if ((spim->tx_len <= 0) && (spim->rx_len <= 0)) {
+		/* disable interrupts */
+		mcspi_wr_reg(spim, OMAP2_MCSPI_IRQENABLE, 0);
+
+		rtdm_event_signal(&spim->transfer_done);
+	}
+
+	rtdm_lock_put(&spim->lock);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int omap2_mcspi_disable_fifo(struct rtdm_spi_remote_slave *slave,
+							int cs_id)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 chconf;
+
+	chconf = mcspi_rd_cs_reg(spim, cs_id, OMAP2_MCSPI_CHCONF0);
+	chconf &= ~(OMAP2_MCSPI_CHCONF_FFER | OMAP2_MCSPI_CHCONF_FFET);
+	mcspi_wr_cs_reg(spim, cs_id, OMAP2_MCSPI_CHCONF0, chconf);
+	return 0;
+}
+
+static int omap2_mcspi_set_fifo(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	unsigned int wcnt;
+	int max_fifo_depth, fifo_depth, bytes_per_word;
+	u32 chconf, xferlevel;
+
+	chconf = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+	bytes_per_word = 1;
+
+	max_fifo_depth = OMAP2_MCSPI_MAX_FIFODEPTH / 2;
+	if (spim->tx_len < max_fifo_depth) {
+		fifo_depth = spim->tx_len;
+		wcnt = spim->tx_len / bytes_per_word;
+	} else {
+		fifo_depth = max_fifo_depth;
+		wcnt = max_fifo_depth * (spim->tx_len / max_fifo_depth)
+			/ bytes_per_word;
+	}
+	if (wcnt > OMAP2_MCSPI_MAX_FIFOWCNT) {
+		dev_err(slave_to_kdev(slave),
+			"%s: wcnt=%d: too many bytes in a transfer.\n",
+			__func__, wcnt);
+		return -EINVAL;
+	}
+
+	chconf |= OMAP2_MCSPI_CHCONF_FFER;
+	chconf |= OMAP2_MCSPI_CHCONF_FFET;
+
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, chconf);
+	spim->fifo_depth = fifo_depth;
+
+	xferlevel = wcnt << 16;
+	xferlevel |= (fifo_depth - 1) << 8;
+	xferlevel |= fifo_depth - 1;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_XFERLEVEL, xferlevel);
+
+	return 0;
+}
+
+
+static int do_transfer_irq_bh(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	u32 chconf, l;
+	int ret;
+	int i;
+
+	/* configure to send and receive */
+	chconf = mcspi_rd_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0);
+	chconf &= ~OMAP2_MCSPI_CHCONF_TRM_MASK;
+	chconf &= ~OMAP2_MCSPI_CHCONF_TURBO;
+	mcspi_wr_cs_reg(spim, slave->chip_select, OMAP2_MCSPI_CHCONF0, chconf);
+
+	/* fifo can be enabled on a single channel */
+	if (slave->chip_select == 0) {
+		if (spim->cs[1].chosen)
+			omap2_mcspi_disable_fifo(slave, 1);
+	} else {
+		if (spim->cs[0].chosen)
+			omap2_mcspi_disable_fifo(slave, 0);
+	}
+	ret = omap2_mcspi_set_fifo(slave);
+	if (ret)
+		return ret;
+
+	omap2_mcspi_channel_enable(slave, 1);
+
+	/* Set slave->chip_select as chosen */
+	for (i = 0; i < OMAP2_MCSPI_CS_N; i++)
+		if (i == slave->chip_select)
+			spim->cs[i].chosen = 1;
+		else
+			spim->cs[i].chosen = 0;
+
+	/* The interrupt status bit should always be reset
+	 * after the channel is enabled
+	 * and before the event is enabled as an interrupt source.
+	 */
+	/* write 1 to OMAP2_MCSPI_IRQSTATUS field to reset it */
+	l = mcspi_rd_reg(spim, OMAP2_MCSPI_IRQSTATUS);
+	mcspi_wr_reg(spim, OMAP2_MCSPI_IRQSTATUS, l);
+
+	spim->n_interrupts = 0;
+	spim->n_rx_full = 0;
+	spim->n_tx_empty = 0;
+
+	/* Enable interrupts last. */
+	/* support only two channels */
+	if (slave->chip_select == 0)
+		l = OMAP2_MCSPI_IRQENABLE_TX0_EMPTY |
+			OMAP2_MCSPI_IRQENABLE_RX0_FULL;
+	else
+		l = OMAP2_MCSPI_IRQENABLE_TX1_EMPTY |
+			OMAP2_MCSPI_IRQENABLE_RX1_FULL;
+	mcspi_wr_reg(spim, OMAP2_MCSPI_IRQENABLE, l);
+
+	/* TX_EMPTY will be raised only after data is transfered */
+	mcspi_wr_fifo_bh(spim, slave->chip_select);
+
+	/* wait for transfer completion */
+	ret = rtdm_event_wait(&spim->transfer_done);
+	omap2_mcspi_channel_enable(slave, 0);
+	if (ret)
+		return ret;
+
+	/* spim->tx_len and spim->rx_len should be 0 */
+	if (spim->tx_len || spim->rx_len)
+		return -EIO;
+	return 0;
+}
+
+static int do_transfer_irq(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	int len, first_size, last_size, ret;
+
+	len = spim->tx_len;
+
+	if (len < (OMAP2_MCSPI_MAX_FIFODEPTH / 2))
+		goto label_last;
+
+	first_size = (OMAP2_MCSPI_MAX_FIFODEPTH / 2) *
+		(len / (OMAP2_MCSPI_MAX_FIFODEPTH / 2));
+	spim->tx_len = first_size;
+	spim->rx_len = first_size;
+	ret = do_transfer_irq_bh(slave);
+	if (ret)
+		return ret;
+
+label_last:
+	last_size = len % (OMAP2_MCSPI_MAX_FIFODEPTH / 2);
+	if (last_size == 0)
+		return ret;
+	spim->tx_len = last_size;
+	spim->rx_len = last_size;
+	ret = do_transfer_irq_bh(slave);
+	return ret;
+}
+
+static int omap2_mcspi_transfer_iobufs(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+	int ret;
+
+	if (mapped_data->io_len == 0)
+		return -EINVAL;	/* No I/O buffers set. */
+
+	spim->tx_len = mapped_data->io_len / 2;
+	spim->rx_len = spim->tx_len;
+	spim->tx_buf = mapped_data->io_virt + spim->rx_len;
+	spim->rx_buf = mapped_data->io_virt;
+
+	ret = do_transfer_irq(slave);
+
+	return ret ? : 0;
+}
+
+static int omap2_mcspi_transfer_iobufs_n(struct rtdm_spi_remote_slave *slave,
+								 int len)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+	int ret;
+
+	if ((mapped_data->io_len == 0) ||
+		(len <= 0) || (len > (mapped_data->io_len / 2)))
+		return -EINVAL;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = mapped_data->io_virt + mapped_data->io_len / 2;
+	spim->rx_buf = mapped_data->io_virt;
+
+	ret = do_transfer_irq(slave);
+
+
+	return ret ? : 0;
+}
+
+static ssize_t omap2_mcspi_read(struct rtdm_spi_remote_slave *slave,
+			    void *rx, size_t len)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	int ret;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = NULL;
+	spim->rx_buf = rx;
+
+	ret = do_transfer_irq(slave);
+
+	return  ret ? : len;
+}
+
+static ssize_t omap2_mcspi_write(struct rtdm_spi_remote_slave *slave,
+			     const void *tx, size_t len)
+{
+	struct spi_master_omap2_mcspi *spim = to_master_omap2_mcspi(slave);
+	int ret;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = tx;
+	spim->rx_buf = NULL;
+
+	ret = do_transfer_irq(slave);
+
+	return  ret ? : len;
+}
+
+static int set_iobufs(struct spi_slave_omap2_mcspi *mapped_data, size_t len)
+{
+	dma_addr_t dma;
+	void *p;
+
+	if (len == 0)
+		return -EINVAL;
+
+	len = L1_CACHE_ALIGN(len) * 2;
+	if (len == mapped_data->io_len)
+		return 0;
+
+	if (mapped_data->io_len)
+		return -EINVAL;	/* I/O buffers may not be resized. */
+
+	/*
+	 * Since we need the I/O buffers to be set for starting a
+	 * transfer, there is no need for serializing this routine and
+	 * transfer_iobufs(), provided io_len is set last.
+	 *
+	 * NOTE: We don't need coherent memory until we actually get
+	 * DMA transfers working, this code is a bit ahead of
+	 * schedule.
+	 *
+	 * Revisit: this assumes DMA mask is 4Gb.
+	 */
+	p = dma_alloc_coherent(NULL, len, &dma, GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	mapped_data->io_dma = dma;
+	mapped_data->io_virt = p;
+	/*
+	 * May race with transfer_iobufs(), must be assigned after all
+	 * the rest is set up, enforcing a membar.
+	 */
+	smp_mb();
+	mapped_data->io_len = len;
+
+	return 0;
+}
+
+static int omap2_mcspi_set_iobufs(struct rtdm_spi_remote_slave *slave,
+			      struct rtdm_spi_iobufs *p)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+	int ret;
+
+	ret = set_iobufs(mapped_data, p->io_len);
+	if (ret)
+		return ret;
+
+	p->i_offset = 0;
+	p->o_offset = mapped_data->io_len / 2;
+	p->map_len = mapped_data->io_len;
+
+	return 0;
+}
+
+static int omap2_mcspi_mmap_iobufs(struct rtdm_spi_remote_slave *slave,
+			       struct vm_area_struct *vma)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+
+	/*
+	 * dma_alloc_coherent() delivers non-cached memory, make sure
+	 * to return consistent mapping attributes. Typically, mixing
+	 * memory attributes across address spaces referring to the
+	 * same physical area is architecturally wrong on ARM.
+	 */
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+
+	return rtdm_mmap_kmem(vma, mapped_data->io_virt);
+}
+
+static void omap2_mcspi_mmap_release(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+
+	dma_free_coherent(NULL, mapped_data->io_len,
+			  mapped_data->io_virt, mapped_data->io_dma);
+	mapped_data->io_len = 0;
+}
+
+static struct rtdm_spi_remote_slave *
+omap2_mcspi_attach_slave(struct rtdm_spi_master *master, struct spi_device *spi)
+{
+	struct spi_master_omap2_mcspi *spim;
+	struct spi_slave_omap2_mcspi *mapped_data;
+	int ret;
+
+	if ((spi->chip_select >= OMAP2_MCSPI_CS_N) || (OMAP2_MCSPI_CS_N > 2)) {
+		/* Error in the case of native CS requested with CS > 1 */
+		dev_err(&spi->dev, "%s: only two native CS per spi module are supported\n",
+			__func__);
+		return ERR_PTR(-EINVAL);
+	}
+
+	mapped_data = kzalloc(sizeof(*mapped_data), GFP_KERNEL);
+	if (mapped_data == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_spi_add_remote_slave(&mapped_data->slave, master, spi);
+	if (ret) {
+		dev_err(&spi->dev, "%s: failed to attach slave\n", __func__);
+		kfree(mapped_data);
+		return ERR_PTR(ret);
+	}
+
+	spim = container_of(master, struct spi_master_omap2_mcspi, master);
+	spim->cs[spi->chip_select].chosen = 0;
+	spim->cs[spi->chip_select].regs = spim->regs +
+		spi->chip_select * OMAP2_MCSPI_CHANNELBANK_SIZE;
+	spim->cs[spi->chip_select].phys = spim->phys +
+		spi->chip_select * OMAP2_MCSPI_CHANNELBANK_SIZE;
+
+	return &mapped_data->slave;
+}
+
+static void omap2_mcspi_detach_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_omap2_mcspi *mapped_data = to_slave_omap2_mcspi(slave);
+
+	rtdm_spi_remove_remote_slave(slave);
+
+	kfree(mapped_data);
+}
+
+static struct rtdm_spi_master_ops omap2_mcspi_master_ops = {
+	.configure = omap2_mcspi_configure,
+	.chip_select = omap2_mcspi_chip_select,
+	.set_iobufs = omap2_mcspi_set_iobufs,
+	.mmap_iobufs = omap2_mcspi_mmap_iobufs,
+	.mmap_release = omap2_mcspi_mmap_release,
+	.transfer_iobufs = omap2_mcspi_transfer_iobufs,
+	.transfer_iobufs_n = omap2_mcspi_transfer_iobufs_n,
+	.write = omap2_mcspi_write,
+	.read = omap2_mcspi_read,
+	.attach_slave = omap2_mcspi_attach_slave,
+	.detach_slave = omap2_mcspi_detach_slave,
+};
+
+static struct omap2_mcspi_platform_config omap2_pdata = {
+	.regs_offset = 0,
+};
+
+static struct omap2_mcspi_platform_config omap4_pdata = {
+	.regs_offset = OMAP4_MCSPI_REG_OFFSET,
+};
+
+static const struct of_device_id omap_mcspi_of_match[] = {
+	{
+		.compatible = "ti,omap2-mcspi",
+		.data = &omap2_pdata,
+	},
+	{
+		/* beaglebone black */
+		.compatible = "ti,omap4-mcspi",
+		.data = &omap4_pdata,
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, omap_mcspi_of_match);
+
+static int omap2_mcspi_probe(struct platform_device *pdev)
+{
+	struct spi_master_omap2_mcspi *spim;
+	struct rtdm_spi_master *master;
+	struct spi_controller *ctlr;
+	struct resource *r;
+	int ret, irq;
+	u32 regs_offset = 0;
+	const struct omap2_mcspi_platform_config *pdata;
+	const struct of_device_id *match;
+	u32 num_cs = 1;
+	unsigned int pin_dir = MCSPI_PINDIR_D0_IN_D1_OUT;
+
+	match = of_match_device(omap_mcspi_of_match, &pdev->dev);
+	if (match) {
+		pdata = match->data;
+		regs_offset = pdata->regs_offset;
+	} else {
+		dev_err(&pdev->dev, "%s: cannot find a match with device tree\n"
+				"of '%s' or '%s'",
+				__func__,
+				omap_mcspi_of_match[0].compatible,
+				omap_mcspi_of_match[1].compatible);
+		return -ENOENT;
+	}
+
+	master = rtdm_spi_alloc_master(&pdev->dev,
+		   struct spi_master_omap2_mcspi, master);
+	if (master == NULL)
+		return -ENOMEM;
+
+	master->subclass = RTDM_SUBCLASS_OMAP2_MCSPI;
+	master->ops = &omap2_mcspi_master_ops;
+	platform_set_drvdata(pdev, master);
+
+	ctlr = master->controller;
+	/* flags understood by this controller driver */
+	ctlr->mode_bits = OMAP2_MCSPI_SPI_MODE_BITS;
+	/* TODO: SPI_BPW_RANGE_MASK(4, 32); */
+	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+	of_property_read_u32(pdev->dev.of_node, "ti,spi-num-cs", &num_cs);
+	ctlr->num_chipselect = num_cs;
+	if (of_get_property(pdev->dev.of_node,
+		"ti,pindir-d0-out-d1-in", NULL)) {
+		pin_dir = MCSPI_PINDIR_D0_OUT_D1_IN;
+	}
+
+	ctlr->max_speed_hz = OMAP2_MCSPI_MAX_FREQ;
+	ctlr->min_speed_hz = OMAP2_MCSPI_MAX_FREQ >> 15;
+	ctlr->dev.of_node = pdev->dev.of_node;
+
+	spim = container_of(master, struct spi_master_omap2_mcspi, master);
+	rtdm_event_init(&spim->transfer_done, 0);
+	rtdm_lock_init(&spim->lock);
+
+	spim->pin_dir = pin_dir;
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spim->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(spim->regs)) {
+		dev_err(&pdev->dev, "%s: cannot map I/O memory\n", __func__);
+		ret = PTR_ERR(spim->regs);
+		goto fail;
+	}
+	spim->phys = r->start + regs_offset;
+	spim->regs += regs_offset;
+
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq <= 0) {
+		ret = irq ?: -ENODEV;
+		dev_err(&pdev->dev, "%s: irq_of_parse_and_map: %d\n",
+				__func__, irq);
+		goto fail;
+	}
+
+	ret = rtdm_irq_request(&spim->irqh, irq,
+			       omap2_mcspi_interrupt, 0,
+			       dev_name(&pdev->dev), spim);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: cannot request IRQ%d\n",
+				__func__, irq);
+		goto fail_unclk;
+	}
+
+	ret = rtdm_spi_add_master(&spim->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to add master\n", __func__);
+		goto fail_unclk;
+	}
+
+	pm_runtime_use_autosuspend(&pdev->dev);
+	/* if delay is negative and the use_autosuspend flag is set
+	 * then runtime suspends are prevented.
+	 */
+	pm_runtime_set_autosuspend_delay(&pdev->dev, PM_NEGATIVE_DELAY);
+	pm_runtime_enable(&pdev->dev);
+	ret = pm_runtime_get_sync(&pdev->dev);
+	if (ret < 0) {
+		dev_err(&pdev->dev, "%s: pm_runtime_get_sync error %d\n",
+				__func__, ret);
+		return ret;
+	}
+
+	omap2_mcspi_reset_hw(spim);
+	omap2_mcspi_init_hw(spim);
+
+	dev_info(&pdev->dev, "success\n");
+	return 0;
+
+fail_unclk:
+fail:
+	spi_controller_put(ctlr);
+
+	return ret;
+}
+
+static int omap2_mcspi_remove(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master_omap2_mcspi *spim;
+
+	spim = container_of(master, struct spi_master_omap2_mcspi, master);
+
+	omap2_mcspi_reset_hw(spim);
+
+	pm_runtime_dont_use_autosuspend(&pdev->dev);
+	pm_runtime_put_sync(&pdev->dev);
+	pm_runtime_disable(&pdev->dev);
+
+	rtdm_irq_free(&spim->irqh);
+
+	rtdm_spi_remove_master(master);
+
+	return 0;
+}
+
+static struct platform_driver omap2_mcspi_spi_driver = {
+	.driver		= {
+		.name		= "omap2_mcspi_rt",
+		.of_match_table	= omap_mcspi_of_match,
+	},
+	.probe		= omap2_mcspi_probe,
+	.remove		= omap2_mcspi_remove,
+};
+module_platform_driver(omap2_mcspi_spi_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-sun6i.c b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-sun6i.c
new file mode 100644
index 0000000..72a4abd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/spi/spi-sun6i.c
@@ -0,0 +1,674 @@
+/**
+ * I/O handling lifted from drivers/spi/spi-sun6i.c:
+ * Copyright (C) 2012 - 2014 Allwinner Tech
+ * Pan Nan <pannan@allwinnertech.com>
+ * Copyright (C) 2014 Maxime Ripard
+ * Maxime Ripard <maxime.ripard@free-electrons.com>
+ *
+ * RTDM integration by:
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/err.h>
+#include <linux/dma-mapping.h>
+#include <linux/interrupt.h>
+#include <linux/platform_device.h>
+#include <linux/io.h>
+#include <linux/clk.h>
+#include <linux/spi/spi.h>
+#include <linux/of_irq.h>
+#include <linux/of_device.h>
+#include <linux/reset.h>
+#include "spi-master.h"
+
+#define RTDM_SUBCLASS_SUN6I  2
+
+#define SUN6I_GBL_CTL_REG		0x04
+#define SUN6I_GBL_CTL_BUS_ENABLE	BIT(0)
+#define SUN6I_GBL_CTL_MASTER		BIT(1)
+#define SUN6I_GBL_CTL_TP		BIT(7)
+#define SUN6I_GBL_CTL_RST		BIT(31)
+
+#define SUN6I_TFR_CTL_REG		0x08
+#define SUN6I_TFR_CTL_CPHA		BIT(0)
+#define SUN6I_TFR_CTL_CPOL		BIT(1)
+#define SUN6I_TFR_CTL_SPOL		BIT(2)
+#define SUN6I_TFR_CTL_CS_MASK		0x30
+#define SUN6I_TFR_CTL_CS(cs)		(((cs) << 4) & SUN6I_TFR_CTL_CS_MASK)
+#define SUN6I_TFR_CTL_CS_MANUAL		BIT(6)
+#define SUN6I_TFR_CTL_CS_LEVEL		BIT(7)
+#define SUN6I_TFR_CTL_DHB		BIT(8)
+#define SUN6I_TFR_CTL_FBS		BIT(12)
+#define SUN6I_TFR_CTL_XCH		BIT(31)
+
+#define SUN6I_INT_CTL_REG		0x10
+#define SUN6I_INT_CTL_RX_RDY		BIT(0)
+#define SUN6I_INT_CTL_TX_RDY		BIT(4)
+#define SUN6I_INT_CTL_RX_OVF		BIT(8)
+#define SUN6I_INT_CTL_TC		BIT(12)
+
+#define SUN6I_INT_STA_REG		0x14
+
+#define SUN6I_FIFO_CTL_REG		0x18
+#define SUN6I_FIFO_CTL_RX_RDY_TRIG_LEVEL_MASK	0xff
+#define SUN6I_FIFO_CTL_RX_RDY_TRIG_LEVEL_BITS	0
+#define SUN6I_FIFO_CTL_RX_RST			BIT(15)
+#define SUN6I_FIFO_CTL_TX_RDY_TRIG_LEVEL_MASK	0xff
+#define SUN6I_FIFO_CTL_TX_RDY_TRIG_LEVEL_BITS	16
+#define SUN6I_FIFO_CTL_TX_RST			BIT(31)
+
+#define SUN6I_FIFO_STA_REG		0x1c
+#define SUN6I_FIFO_STA_RX_CNT(reg)	(((reg) >> 0) & 0xff)
+#define SUN6I_FIFO_STA_TX_CNT(reg)	(((reg) >> 16) & 0xff)
+
+#define SUN6I_CLK_CTL_REG		0x24
+#define SUN6I_CLK_CTL_CDR2_MASK		0xff
+#define SUN6I_CLK_CTL_CDR2(div)		(((div) & SUN6I_CLK_CTL_CDR2_MASK) << 0)
+#define SUN6I_CLK_CTL_CDR1_MASK		0xf
+#define SUN6I_CLK_CTL_CDR1(div)		(((div) & SUN6I_CLK_CTL_CDR1_MASK) << 8)
+#define SUN6I_CLK_CTL_DRS		BIT(12)
+
+#define SUN6I_MAX_XFER_SIZE		0xffffff
+
+#define SUN6I_BURST_CNT_REG		0x30
+#define SUN6I_BURST_CNT(cnt)		((cnt) & SUN6I_MAX_XFER_SIZE)
+
+#define SUN6I_XMIT_CNT_REG		0x34
+#define SUN6I_XMIT_CNT(cnt)		((cnt) & SUN6I_MAX_XFER_SIZE)
+
+#define SUN6I_BURST_CTL_CNT_REG		0x38
+#define SUN6I_BURST_CTL_CNT_STC(cnt)	((cnt) & SUN6I_MAX_XFER_SIZE)
+
+#define SUN6I_TXDATA_REG		0x200
+#define SUN6I_RXDATA_REG		0x300
+
+#define SUN6I_SPI_MODE_BITS	(SPI_CPOL | SPI_CPHA | SPI_CS_HIGH	\
+				 | SPI_LSB_FIRST)
+
+	struct spi_setup_data {
+		int fifo_depth;
+	};
+
+static struct spi_setup_data sun6i_data = {
+	.fifo_depth = 128,
+};
+
+static struct spi_setup_data sun8i_data = {
+	.fifo_depth = 64,
+};
+
+struct spi_master_sun6i {
+	struct rtdm_spi_master master;
+	void __iomem *regs;
+	struct reset_control *rstc;
+	struct clk *hclk;
+	struct clk *mclk;
+	unsigned long clk_hz;
+	rtdm_irq_t irqh;
+	const u8 *tx_buf;
+	u8 *rx_buf;
+	int tx_len;
+	int rx_len;
+	rtdm_event_t transfer_done;
+	const struct spi_setup_data *setup;
+};
+
+struct spi_slave_sun6i {
+	struct rtdm_spi_remote_slave slave;
+	void *io_virt;
+	dma_addr_t io_dma;
+	size_t io_len;
+};
+
+static inline struct spi_slave_sun6i *
+to_slave_sun6i(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave, struct spi_slave_sun6i, slave);
+}
+
+static inline struct spi_master_sun6i *
+to_master_sun6i(struct rtdm_spi_remote_slave *slave)
+{
+	return container_of(slave->master, struct spi_master_sun6i, master);
+}
+
+static inline struct device *
+master_to_kdev(struct rtdm_spi_master *master)
+{
+	return &master->controller->dev;
+}
+
+static inline u32 sun6i_rd(struct spi_master_sun6i *spim,
+			   unsigned int reg)
+{
+	return readl(spim->regs + reg);
+}
+
+static inline void sun6i_wr(struct spi_master_sun6i *spim,
+			    unsigned int reg, u32 val)
+{
+	writel(val, spim->regs + reg);
+}
+
+static void sun6i_rd_fifo(struct spi_master_sun6i *spim)
+{
+	u32 reg;
+	int len;
+	u8 byte;
+
+	reg = sun6i_rd(spim, SUN6I_FIFO_STA_REG);
+	len = min((int)SUN6I_FIFO_STA_RX_CNT(reg), spim->rx_len);
+
+	while (len-- > 0) {
+		byte = sun6i_rd(spim, SUN6I_RXDATA_REG);
+		if (spim->rx_buf)
+			*spim->rx_buf++ = byte;
+		spim->rx_len--;
+	}
+}
+
+static void sun6i_wr_fifo(struct spi_master_sun6i *spim)
+{
+	u32 reg;
+	int len;
+	u8 byte;
+
+	reg = sun6i_rd(spim, SUN6I_FIFO_STA_REG);
+	len = min(spim->setup->fifo_depth - (int)SUN6I_FIFO_STA_TX_CNT(reg),
+		  spim->tx_len);
+	
+	while (len-- > 0) {
+		byte = spim->tx_buf ? *spim->tx_buf++ : 0;
+		sun6i_wr(spim, SUN6I_TXDATA_REG, byte);
+		spim->tx_len--;
+	}
+}
+
+static int sun6i_spi_interrupt(rtdm_irq_t *irqh)
+{
+	struct spi_master_sun6i *spim;
+	u32 status;
+
+	spim = rtdm_irq_get_arg(irqh, struct spi_master_sun6i);
+
+	sun6i_rd_fifo(spim);
+	sun6i_wr_fifo(spim);
+	
+	status = sun6i_rd(spim, SUN6I_INT_STA_REG);
+	if ((status & SUN6I_INT_CTL_TC)) {
+		sun6i_wr(spim, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TC);
+		sun6i_wr(spim, SUN6I_INT_CTL_REG, 0);
+		rtdm_event_signal(&spim->transfer_done);
+	} else if (status & SUN6I_INT_CTL_TX_RDY)
+		sun6i_wr(spim, SUN6I_INT_STA_REG, SUN6I_INT_CTL_TX_RDY);
+
+	return RTDM_IRQ_HANDLED;
+}
+
+static int sun6i_configure(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	struct rtdm_spi_config *config = &slave->config;
+	u32 reg, div;
+	
+	/* Set clock polarity and phase. */
+
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	reg &= ~(SUN6I_TFR_CTL_CPOL | SUN6I_TFR_CTL_CPHA |
+		 SUN6I_TFR_CTL_FBS | SUN6I_TFR_CTL_SPOL);
+
+	/* Manual CS via ->chip_select(). */
+	reg |= SUN6I_TFR_CTL_CS_MANUAL;
+
+	if (config->mode & SPI_CPOL)
+		reg |= SUN6I_TFR_CTL_CPOL;
+
+	if (config->mode & SPI_CPHA)
+		reg |= SUN6I_TFR_CTL_CPHA;
+
+	if (config->mode & SPI_LSB_FIRST)
+		reg |= SUN6I_TFR_CTL_FBS;
+
+	if (!(config->mode & SPI_CS_HIGH))
+		reg |= SUN6I_TFR_CTL_SPOL;
+
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg);
+	
+	/* Setup clock divider. */
+
+	div = spim->clk_hz / (2 * config->speed_hz);
+	if (div <= SUN6I_CLK_CTL_CDR2_MASK + 1) {
+		if (div > 0)
+			div--;
+		reg = SUN6I_CLK_CTL_CDR2(div) | SUN6I_CLK_CTL_DRS;
+	} else {
+		div = ilog2(spim->clk_hz) - ilog2(config->speed_hz);
+		reg = SUN6I_CLK_CTL_CDR1(div);
+	}
+
+	sun6i_wr(spim, SUN6I_CLK_CTL_REG, reg);
+
+	return 0;
+}
+
+static void sun6i_chip_select(struct rtdm_spi_remote_slave *slave,
+			      bool active)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	u32 reg;
+
+	/*
+	 * We have no cs_gpios, so this handler will be called for
+	 * each transfer.
+	 */
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	reg &= ~(SUN6I_TFR_CTL_CS_MASK | SUN6I_TFR_CTL_CS_LEVEL);
+	reg |= SUN6I_TFR_CTL_CS(slave->chip_select);
+
+	if (active)
+		reg |= SUN6I_TFR_CTL_CS_LEVEL;
+
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg);
+}
+
+static int do_transfer_irq(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	u32 tx_len = 0, reg;
+	int ret;
+
+	/* Reset FIFO. */
+	sun6i_wr(spim, SUN6I_FIFO_CTL_REG,
+		 SUN6I_FIFO_CTL_RX_RST | SUN6I_FIFO_CTL_TX_RST);
+
+	/* Set FIFO interrupt trigger level to 3/4 of the fifo depth. */
+	reg = spim->setup->fifo_depth / 4 * 3;
+	sun6i_wr(spim, SUN6I_FIFO_CTL_REG,
+		 (reg << SUN6I_FIFO_CTL_RX_RDY_TRIG_LEVEL_BITS) |
+		 (reg << SUN6I_FIFO_CTL_TX_RDY_TRIG_LEVEL_BITS));
+
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	reg &= ~SUN6I_TFR_CTL_DHB;
+	/* Discard unused SPI bursts if TX only. */
+	if (spim->rx_buf == NULL)
+		reg |= SUN6I_TFR_CTL_DHB;
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg);
+
+	if (spim->tx_buf)
+		tx_len = spim->tx_len;
+
+	/* Setup the counters. */
+	sun6i_wr(spim, SUN6I_BURST_CNT_REG, SUN6I_BURST_CNT(spim->tx_len));
+	sun6i_wr(spim, SUN6I_XMIT_CNT_REG, SUN6I_XMIT_CNT(tx_len));
+	sun6i_wr(spim, SUN6I_BURST_CTL_CNT_REG,
+		 SUN6I_BURST_CTL_CNT_STC(tx_len));
+
+	/* Fill the TX FIFO */
+	sun6i_wr_fifo(spim);
+
+	/* Enable interrupts. */
+	reg = sun6i_rd(spim, SUN6I_INT_CTL_REG);
+	reg |= SUN6I_INT_CTL_TC | SUN6I_INT_CTL_TX_RDY;
+	sun6i_wr(spim, SUN6I_INT_CTL_REG, reg);
+
+	/* Start the transfer. */
+	reg = sun6i_rd(spim, SUN6I_TFR_CTL_REG);
+	sun6i_wr(spim, SUN6I_TFR_CTL_REG, reg | SUN6I_TFR_CTL_XCH);
+	
+	ret = rtdm_event_wait(&spim->transfer_done);
+	if (ret) {
+		sun6i_wr(spim, SUN6I_INT_CTL_REG, 0);
+		return ret;
+	}
+
+	return 0;
+}
+
+static int sun6i_transfer_iobufs(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	if (sun6i->io_len == 0)
+		return -EINVAL;	/* No I/O buffers set. */
+	
+	spim->tx_len = sun6i->io_len / 2;
+	spim->rx_len = spim->tx_len;
+	spim->tx_buf = sun6i->io_virt + spim->rx_len;
+	spim->rx_buf = sun6i->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static int sun6i_transfer_iobufs_n(struct rtdm_spi_remote_slave *slave,
+				   int len)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	if ((sun6i->io_len == 0) ||
+		(len <= 0) || (len > (sun6i->io_len / 2)))
+		return -EINVAL;
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = sun6i->io_virt + sun6i->io_len / 2;
+	spim->rx_buf = sun6i->io_virt;
+
+	return do_transfer_irq(slave);
+}
+
+static ssize_t sun6i_read(struct rtdm_spi_remote_slave *slave,
+			  void *rx, size_t len)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = NULL;
+	spim->rx_buf = rx;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static ssize_t sun6i_write(struct rtdm_spi_remote_slave *slave,
+			   const void *tx, size_t len)
+{
+	struct spi_master_sun6i *spim = to_master_sun6i(slave);
+
+	spim->tx_len = len;
+	spim->rx_len = len;
+	spim->tx_buf = tx;
+	spim->rx_buf = NULL;
+
+	return do_transfer_irq(slave) ?: len;
+}
+
+static int set_iobufs(struct spi_slave_sun6i *sun6i, size_t len)
+{
+	dma_addr_t dma;
+	void *p;
+
+	if (len == 0)
+		return -EINVAL;
+	
+	len = L1_CACHE_ALIGN(len) * 2;
+	if (len == sun6i->io_len)
+		return 0;
+
+	if (sun6i->io_len)
+		return -EINVAL;	/* I/O buffers may not be resized. */
+
+	p = dma_alloc_coherent(NULL, len, &dma, GFP_KERNEL);
+	if (p == NULL)
+		return -ENOMEM;
+
+	sun6i->io_dma = dma;
+	sun6i->io_virt = p;
+	smp_mb();
+	sun6i->io_len = len;
+	
+	return 0;
+}
+
+static int sun6i_set_iobufs(struct rtdm_spi_remote_slave *slave,
+			    struct rtdm_spi_iobufs *p)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+	int ret;
+
+	ret = set_iobufs(sun6i, p->io_len);
+	if (ret)
+		return ret;
+
+	p->i_offset = 0;
+	p->o_offset = sun6i->io_len / 2;
+	p->map_len = sun6i->io_len;
+	
+	return 0;
+}
+
+static int sun6i_mmap_iobufs(struct rtdm_spi_remote_slave *slave,
+			     struct vm_area_struct *vma)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
+
+	return rtdm_mmap_kmem(vma, sun6i->io_virt);
+}
+
+static void sun6i_mmap_release(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	dma_free_coherent(NULL, sun6i->io_len,
+			  sun6i->io_virt, sun6i->io_dma);
+	sun6i->io_len = 0;
+}
+
+static struct rtdm_spi_remote_slave *
+sun6i_attach_slave(struct rtdm_spi_master *master, struct spi_device *spi)
+{
+	struct spi_slave_sun6i *sun6i;
+	int ret;
+
+	sun6i = kzalloc(sizeof(*sun6i), GFP_KERNEL);
+	if (sun6i == NULL)
+		return ERR_PTR(-ENOMEM);
+
+	ret = rtdm_spi_add_remote_slave(&sun6i->slave, master, spi);
+	if (ret) {
+		dev_err(&spi->dev,
+			"%s: failed to attach slave\n", __func__);
+		kfree(sun6i);
+		return ERR_PTR(ret);
+	}
+
+	return &sun6i->slave;
+}
+
+static void sun6i_detach_slave(struct rtdm_spi_remote_slave *slave)
+{
+	struct spi_slave_sun6i *sun6i = to_slave_sun6i(slave);
+
+	rtdm_spi_remove_remote_slave(slave);
+	kfree(sun6i);
+}
+
+static struct rtdm_spi_master_ops sun6i_master_ops = {
+	.configure = sun6i_configure,
+	.chip_select = sun6i_chip_select,
+	.set_iobufs = sun6i_set_iobufs,
+	.mmap_iobufs = sun6i_mmap_iobufs,
+	.mmap_release = sun6i_mmap_release,
+	.transfer_iobufs = sun6i_transfer_iobufs,
+	.transfer_iobufs_n = sun6i_transfer_iobufs_n,
+	.write = sun6i_write,
+	.read = sun6i_read,
+	.attach_slave = sun6i_attach_slave,
+	.detach_slave = sun6i_detach_slave,
+};
+
+static int sun6i_spi_probe(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master;
+	struct spi_master_sun6i *spim;
+	struct spi_controller *ctlr;
+	struct resource *r;
+	int ret, irq;
+	u32 clk_rate;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	master = rtdm_spi_alloc_master(&pdev->dev,
+				       struct spi_master_sun6i, master);
+	if (master == NULL)
+		return -ENOMEM;
+
+	master->subclass = RTDM_SUBCLASS_SUN6I;
+	master->ops = &sun6i_master_ops;
+	platform_set_drvdata(pdev, master);
+
+	ctlr = master->controller;
+	ctlr->max_speed_hz = 100 * 1000 * 1000;
+	ctlr->min_speed_hz = 3 * 1000;
+	ctlr->mode_bits = SUN6I_SPI_MODE_BITS;
+	ctlr->bits_per_word_mask = SPI_BPW_MASK(8);
+	ctlr->num_chipselect = 4;
+	ctlr->dev.of_node = pdev->dev.of_node;
+
+	spim = container_of(master, struct spi_master_sun6i, master);
+	spim->setup = of_device_get_match_data(&pdev->dev);
+
+	rtdm_event_init(&spim->transfer_done, 0);
+
+	r = platform_get_resource(pdev, IORESOURCE_MEM, 0);
+	spim->regs = devm_ioremap_resource(&pdev->dev, r);
+	if (IS_ERR(spim->regs)) {
+		dev_err(&pdev->dev, "%s: cannot map I/O memory\n", __func__);
+		ret = PTR_ERR(spim->regs);
+		goto fail;
+	}
+	
+	spim->hclk = devm_clk_get(&pdev->dev, "ahb");
+	if (IS_ERR(spim->hclk)) {
+		dev_err(&pdev->dev, "Unable to acquire AHB clock\n");
+		ret = PTR_ERR(spim->hclk);
+		goto fail;
+	}
+
+	spim->mclk = devm_clk_get(&pdev->dev, "mod");
+	if (IS_ERR(spim->mclk)) {
+		dev_err(&pdev->dev, "Unable to acquire MOD clock\n");
+		ret = PTR_ERR(spim->mclk);
+		goto fail;
+	}
+
+	spim->rstc = devm_reset_control_get(&pdev->dev, NULL);
+	if (IS_ERR(spim->rstc)) {
+		dev_err(&pdev->dev, "Couldn't get reset controller\n");
+		ret = PTR_ERR(spim->rstc);
+		goto fail;
+	}
+
+	/*
+	 * Ensure that we have a parent clock fast enough to handle
+	 * the fastest transfers properly.
+	 */
+	clk_rate = clk_get_rate(spim->mclk);
+	if (clk_rate < 2 * ctlr->max_speed_hz)
+		clk_set_rate(spim->mclk, 2 * ctlr->max_speed_hz);
+
+	spim->clk_hz = clk_get_rate(spim->mclk);
+
+	irq = irq_of_parse_and_map(pdev->dev.of_node, 0);
+	if (irq <= 0) {
+		ret = irq ?: -ENODEV;
+		goto fail;
+	}
+
+	clk_prepare_enable(spim->hclk);
+	clk_prepare_enable(spim->mclk);
+
+	ret = reset_control_deassert(spim->rstc);
+	if (ret)
+		goto fail_unclk;
+
+	/* Enable SPI module, in master mode with smart burst. */
+
+	sun6i_wr(spim, SUN6I_GBL_CTL_REG,
+		 SUN6I_GBL_CTL_BUS_ENABLE | SUN6I_GBL_CTL_MASTER |
+		 SUN6I_GBL_CTL_TP);
+
+	/* Disable and clear all interrupts. */
+	sun6i_wr(spim, SUN6I_INT_CTL_REG, 0);
+	sun6i_wr(spim, SUN6I_INT_STA_REG, ~0);
+	
+	ret = rtdm_irq_request(&spim->irqh, irq,
+			       sun6i_spi_interrupt, 0,
+			       dev_name(&pdev->dev), spim);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: cannot request IRQ%d\n",
+			__func__, irq);
+		goto fail_unclk;
+	}
+
+	ret = rtdm_spi_add_master(&spim->master);
+	if (ret) {
+		dev_err(&pdev->dev, "%s: failed to add master\n",
+			__func__);
+		goto fail_register;
+	}
+
+	return 0;
+
+fail_register:
+	rtdm_irq_free(&spim->irqh);
+fail_unclk:
+	clk_disable_unprepare(spim->mclk);
+	clk_disable_unprepare(spim->hclk);
+fail:
+	spi_controller_put(ctlr);
+
+	return ret;
+}
+
+static int sun6i_spi_remove(struct platform_device *pdev)
+{
+	struct rtdm_spi_master *master = platform_get_drvdata(pdev);
+	struct spi_master_sun6i *spim;
+
+	dev_dbg(&pdev->dev, "%s: entered\n", __func__);
+
+	spim = container_of(master, struct spi_master_sun6i, master);
+
+	rtdm_irq_free(&spim->irqh);
+
+	clk_disable_unprepare(spim->mclk);
+	clk_disable_unprepare(spim->hclk);
+
+	rtdm_spi_remove_master(master);
+
+	return 0;
+}
+
+static const struct of_device_id sun6i_spi_match[] = {
+	{
+		.compatible = "allwinner,sun6i-a31-spi",
+		.data = &sun6i_data,
+	},
+	{
+		.compatible = "allwinner,sun8i-h3-spi",
+		.data = &sun8i_data,
+	},
+	{ /* Sentinel */ },
+};
+MODULE_DEVICE_TABLE(of, sun6i_spi_match);
+
+static struct platform_driver sun6i_spi_driver = {
+	.driver		= {
+		.name		= "spi-sun6i",
+		.of_match_table	= sun6i_spi_match,
+	},
+	.probe		= sun6i_spi_probe,
+	.remove		= sun6i_spi_remove,
+};
+module_platform_driver(sun6i_spi_driver);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/testing/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/testing/Kconfig
new file mode 100644
index 0000000..88c043c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/testing/Kconfig
@@ -0,0 +1,29 @@
+menu "Testing drivers"
+
+config XENO_DRIVERS_TIMERBENCH
+	tristate "Timer benchmark driver"
+	default y
+	help
+	Kernel-based benchmark driver for timer latency evaluation.
+	See testsuite/latency for a possible front-end.
+
+config XENO_DRIVERS_SWITCHTEST
+	tristate "Context switch unit testing driver"
+	default y
+	help
+	Kernel-based driver for unit testing context switches and
+	FPU switches.
+
+config XENO_DRIVERS_HEAPCHECK
+	tristate "Memory allocator test driver"
+	default y
+	help
+	Kernel-based driver for testing Cobalt's memory allocator.
+
+config XENO_DRIVERS_RTDMTEST
+	depends on m
+	tristate "RTDM unit tests driver"
+	help
+	Kernel driver for performing RTDM unit tests.
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/testing/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/testing/Makefile
new file mode 100644
index 0000000..09b0763
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/testing/Makefile
@@ -0,0 +1,13 @@
+
+obj-$(CONFIG_XENO_DRIVERS_TIMERBENCH) += xeno_timerbench.o
+obj-$(CONFIG_XENO_DRIVERS_SWITCHTEST) += xeno_switchtest.o
+obj-$(CONFIG_XENO_DRIVERS_RTDMTEST)   += xeno_rtdmtest.o
+obj-$(CONFIG_XENO_DRIVERS_HEAPCHECK)   += xeno_heapcheck.o
+
+xeno_timerbench-y := timerbench.o
+
+xeno_switchtest-y := switchtest.o
+
+xeno_rtdmtest-y := rtdmtest.o
+
+xeno_heapcheck-y := heapcheck.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/testing/heapcheck.c b/kernel/xenomai-v3.2.4/kernel/drivers/testing/heapcheck.c
new file mode 100644
index 0000000..fc6e91d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/testing/heapcheck.c
@@ -0,0 +1,550 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; either version 2 of the License,
+ * or (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/slab.h>
+#include <linux/kernel.h>
+#include <linux/random.h>
+#include <cobalt/kernel/assert.h>
+#include <cobalt/kernel/heap.h>
+#include <rtdm/testing.h>
+#include <rtdm/driver.h>
+
+#define complain(__fmt, __args...)	\
+	printk(XENO_WARNING "heap check: " __fmt "\n", ##__args)
+
+static struct xnheap test_heap = {
+	.name = "test_heap"
+};
+
+enum pattern {
+	alphabet_series,
+	digit_series,
+	binary_series,
+};
+
+struct chunk {
+	void *ptr;
+	enum pattern pattern;
+};
+
+struct runstats {
+	struct rttst_heap_stats stats;
+	struct runstats *next;
+};
+
+static struct runstats *statistics;
+
+static int nrstats;
+
+static unsigned int random_seed;
+
+static inline void breathe(int loops)
+{
+	if ((loops % 1000) == 0)
+		rtdm_task_sleep(300000ULL);
+}
+
+static inline void do_swap(void *left, void *right)
+{
+	char trans[sizeof(struct chunk)];
+
+	memcpy(trans, left, sizeof(struct chunk));
+	memcpy(left, right, sizeof(struct chunk));
+	memcpy(right, trans, sizeof(struct chunk));
+}
+
+static inline unsigned int pseudo_random(void)
+{
+	random_seed = random_seed * 1664525 + 1013904223;
+	return random_seed;
+}
+
+static void random_shuffle(void *vbase, size_t nmemb)
+{
+	struct {
+		char x[sizeof(struct chunk)];
+	} __attribute__((packed)) *base = vbase;
+	unsigned int j, k;
+
+	for (j = nmemb; j > 0; j--) {
+		k = (unsigned int)(pseudo_random() % nmemb) + 1;
+		if (j == k)
+			continue;
+		do_swap(&base[j - 1], &base[k - 1]);
+	}
+}
+
+static void fill_pattern(char *p, size_t size, enum pattern pat)
+{
+	unsigned int val, count;
+
+	switch (pat) {
+	case alphabet_series:
+		val = 'a';
+		count = 26;
+		break;
+	case digit_series:
+		val = '0';
+		count = 10;
+		break;
+	default:
+		val = 0;
+		count = 255;
+		break;
+	}
+
+	while (size-- > 0) {
+		*p++ = (char)(val % count);
+		val++;
+	}
+}
+
+static int check_pattern(const char *p, size_t size, enum pattern pat)
+{
+	unsigned int val, count;
+
+	switch (pat) {
+	case alphabet_series:
+		val = 'a';
+		count = 26;
+		break;
+	case digit_series:
+		val = '0';
+		count = 10;
+		break;
+	default:
+		val = 0;
+		count = 255;
+		break;
+	}
+
+	while (size-- > 0) {
+		if (*p++ != (char)(val % count))
+			return 0;
+		val++;
+	}
+
+	return 1;
+}
+
+static size_t find_largest_free(size_t free_size, size_t block_size)
+{
+	void *p;
+
+	for (;;) {
+		p = xnheap_alloc(&test_heap, free_size);
+		if (p) {
+			xnheap_free(&test_heap, p);
+			break;
+		}
+		if (free_size <= block_size)
+			break;
+		free_size -= block_size;
+	}
+
+	return free_size;
+}
+
+static int test_seq(size_t heap_size, size_t block_size, int flags)
+{
+	long alloc_sum_ns, alloc_avg_ns, free_sum_ns, free_avg_ns,
+		alloc_max_ns, free_max_ns, d;
+	size_t user_size, largest_free, maximum_free, freed;
+	int ret, n, k, maxblocks, nrblocks;
+	nanosecs_rel_t start, end;
+	struct chunk *chunks;
+	struct runstats *st;
+	bool done_frag;
+	void *mem, *p;
+
+	random_seed = get_random_u32();
+
+	maxblocks = heap_size / block_size;
+
+	mem = vmalloc(heap_size);
+	if (mem == NULL)
+		return -ENOMEM;
+
+	ret = xnheap_init(&test_heap, mem, heap_size);
+	if (ret) {
+		complain("cannot init heap with size %zu",
+		       heap_size);
+		goto out;
+	}
+
+	chunks = vmalloc(sizeof(*chunks) * maxblocks);
+	if (chunks == NULL) {
+		ret = -ENOMEM;
+		goto no_chunks;
+	}
+	memset(chunks, 0, sizeof(*chunks) * maxblocks);
+
+	ret = xnthread_harden();
+	if (ret)
+		goto done;
+
+	if (xnheap_get_size(&test_heap) != heap_size) {
+		complain("memory size inconsistency (%zu / %zu bytes)",
+			 heap_size, xnheap_get_size(&test_heap));
+		goto bad;
+	}
+
+	user_size = 0;
+	alloc_avg_ns = 0;
+	free_avg_ns = 0;
+	alloc_max_ns = 0;
+	free_max_ns = 0;
+	maximum_free = 0;
+	largest_free = 0;
+
+	for (n = 0, alloc_sum_ns = 0; ; n++) {
+		start = rtdm_clock_read_monotonic();
+		p = xnheap_alloc(&test_heap, block_size);
+		end = rtdm_clock_read_monotonic();
+		d = end - start;
+		if (d > alloc_max_ns)
+			alloc_max_ns = d;
+		alloc_sum_ns += d;
+		if (p == NULL)
+			break;
+		user_size += block_size;
+		if (n >= maxblocks) {
+			complain("too many blocks fetched"
+			       " (heap=%zu, block=%zu, "
+			       "got more than %d blocks)",
+			       heap_size, block_size, maxblocks);
+			goto bad;
+		}
+		chunks[n].ptr = p;
+		if (flags & RTTST_HEAPCHECK_PATTERN) {
+			chunks[n].pattern = (enum pattern)(pseudo_random() % 3);
+			fill_pattern(chunks[n].ptr, block_size, chunks[n].pattern);
+		}
+		breathe(n);
+	}
+
+	nrblocks = n;
+	if (nrblocks == 0)
+		goto do_stats;
+
+	if ((flags & RTTST_HEAPCHECK_ZEROOVRD) && nrblocks != maxblocks) {
+		complain("too few blocks fetched, unexpected overhead"
+			 " (heap=%zu, block=%zu, "
+			 "got %d, less than %d blocks)",
+			 heap_size, block_size, nrblocks, maxblocks);
+		goto bad;
+	}
+
+	breathe(0);
+
+	/* Make sure we did not trash any busy block while allocating. */
+	if (flags & RTTST_HEAPCHECK_PATTERN) {
+		for (n = 0; n < nrblocks; n++) {
+			if (!check_pattern(chunks[n].ptr, block_size,
+					   chunks[n].pattern)) {
+				complain("corrupted block #%d on alloc"
+					 " sequence (pattern %d)",
+					 n, chunks[n].pattern);
+				goto bad;
+			}
+			breathe(n);
+		}
+	}
+	
+	if (flags & RTTST_HEAPCHECK_SHUFFLE)
+		random_shuffle(chunks, nrblocks);
+
+	/*
+	 * Release all blocks.
+	 */
+	for (n = 0, free_sum_ns = 0, freed = 0, done_frag = false;
+	     n < nrblocks; n++) {
+		start = rtdm_clock_read_monotonic();
+		xnheap_free(&test_heap, chunks[n].ptr);
+		end = rtdm_clock_read_monotonic();
+		d = end - start;
+		if (d > free_max_ns)
+			free_max_ns = d;
+		free_sum_ns += d;
+		chunks[n].ptr = NULL;
+		/* Make sure we did not trash busy blocks while freeing. */
+		if (flags & RTTST_HEAPCHECK_PATTERN) {
+			for (k = 0; k < nrblocks; k++) {
+				if (chunks[k].ptr &&
+				    !check_pattern(chunks[k].ptr, block_size,
+						   chunks[k].pattern)) {
+					complain("corrupted block #%d on release"
+						 " sequence (pattern %d)",
+						 k, chunks[k].pattern);
+					goto bad;
+				}
+				breathe(k);
+			}
+		}
+		freed += block_size;
+		/*
+		 * Get a sense of the fragmentation for the tested
+		 * allocation pattern, heap and block sizes when half
+		 * of the usable heap size should be available to us.
+		 * NOTE: user_size excludes the overhead, this is
+		 * actually what we managed to get from the current
+		 * heap out of the allocation loop.
+		 */
+		if (!done_frag && freed >= user_size / 2) {
+			/* Calculate the external fragmentation. */
+			largest_free = find_largest_free(freed, block_size);
+			maximum_free = freed;
+			done_frag = true;
+		}
+		breathe(n);
+	}
+
+	/*
+	 * If the deallocation mechanism is broken, we might not be
+	 * able to reproduce the same allocation pattern with the same
+	 * outcome, check this.
+	 */
+	if (flags & RTTST_HEAPCHECK_HOT) {
+		for (n = 0, alloc_max_ns = alloc_sum_ns = 0; ; n++) {
+			start = rtdm_clock_read_monotonic();
+			p = xnheap_alloc(&test_heap, block_size);
+			end = rtdm_clock_read_monotonic();
+			d = end - start;
+			if (d > alloc_max_ns)
+				alloc_max_ns = d;
+			alloc_sum_ns += d;
+			if (p == NULL)
+				break;
+			if (n >= maxblocks) {
+				complain("too many blocks fetched during hot pass"
+					 " (heap=%zu, block=%zu, "
+					 "got more than %d blocks)",
+					 heap_size, block_size, maxblocks);
+				goto bad;
+			}
+			chunks[n].ptr = p;
+			breathe(n);
+		}
+		if (n != nrblocks) {
+			complain("inconsistent block count fetched"
+				 " during hot pass (heap=%zu, block=%zu, "
+				 "got %d blocks vs %d during alloc)",
+				 heap_size, block_size, n, nrblocks);
+			goto bad;
+		}
+		for (n = 0, free_max_ns = free_sum_ns = 0; n < nrblocks; n++) {
+			start = rtdm_clock_read_monotonic();
+			xnheap_free(&test_heap, chunks[n].ptr);
+			end = rtdm_clock_read_monotonic();
+			d = end - start;
+			if (d > free_max_ns)
+				free_max_ns = d;
+			free_sum_ns += d;
+			breathe(n);
+		}
+	}
+
+	alloc_avg_ns = alloc_sum_ns / nrblocks;
+	free_avg_ns = free_sum_ns / nrblocks;
+
+	if ((flags & RTTST_HEAPCHECK_ZEROOVRD) && heap_size != user_size) {
+		complain("unexpected overhead reported");
+		goto bad;
+	}
+
+	if (xnheap_get_used(&test_heap) > 0) {
+		complain("memory leakage reported: %zu bytes missing",
+			 xnheap_get_used(&test_heap));
+		goto bad;
+	}
+		
+do_stats:
+	xnthread_relax(0, 0);
+	ret = 0;
+	/*
+	 * Don't report stats when running a pattern check, timings
+	 * are affected.
+	 */
+	if (!(flags & RTTST_HEAPCHECK_PATTERN)) {
+		st = kmalloc(sizeof(*st), GFP_KERNEL);
+		if (st == NULL) {
+			complain("failed allocating memory");
+			ret = -ENOMEM;
+			goto out;
+		}
+		st->stats.heap_size = heap_size;
+		st->stats.user_size = user_size;
+		st->stats.block_size = block_size;
+		st->stats.nrblocks = nrblocks;
+		st->stats.alloc_avg_ns = alloc_avg_ns;
+		st->stats.alloc_max_ns = alloc_max_ns;
+		st->stats.free_avg_ns = free_avg_ns;
+		st->stats.free_max_ns = free_max_ns;
+		st->stats.maximum_free = maximum_free;
+		st->stats.largest_free = largest_free;
+		st->stats.flags = flags;
+		st->next = statistics;
+		statistics = st;
+		nrstats++;
+	}
+
+done:
+	vfree(chunks);
+no_chunks:
+	xnheap_destroy(&test_heap);
+out:
+	vfree(mem);
+
+	return ret;
+bad:
+	xnthread_relax(0, 0);
+	ret = -EPROTO;
+	goto done;
+}
+
+static int collect_stats(struct rtdm_fd *fd,
+			 struct rttst_heap_stats __user *buf, int nr)
+{
+	struct runstats *p, *next;
+	int ret, n;
+
+	if (nr < 0)
+		return -EINVAL;
+
+	for (p = statistics, n = nr; p && n > 0 && nrstats > 0;
+	     n--, nrstats--, p = next, buf += sizeof(p->stats)) {
+		ret = rtdm_copy_to_user(fd, buf, &p->stats, sizeof(p->stats));
+		if (ret)
+			return ret;
+		next = p->next;
+		statistics = next;
+		kfree(p);
+	}
+
+	return nr - n;
+}
+
+static void heapcheck_close(struct rtdm_fd *fd)
+{
+	struct runstats *p, *next;
+
+	for (p = statistics; p; p = next) {
+		next = p->next;
+		kfree(p);
+	}
+
+	statistics = NULL;
+}
+
+static int heapcheck_ioctl(struct rtdm_fd *fd,
+			   unsigned int request, void __user *arg)
+{
+#ifdef CONFIG_XENO_ARCH_SYS3264
+	struct compat_rttst_heap_stathdr compat_sthdr;
+#endif
+	struct rttst_heap_stathdr sthdr;
+	struct rttst_heap_parms parms;
+	int ret;
+
+	switch (request) {
+	case RTTST_RTIOC_HEAP_CHECK:
+		ret = rtdm_copy_from_user(fd, &parms, arg, sizeof(parms));
+		if (ret)
+			return ret;
+		ret = test_seq(parms.heap_size,
+			       parms.block_size,
+			       parms.flags);
+		if (ret)
+			return ret;
+		parms.nrstats = nrstats;
+		ret = rtdm_copy_to_user(fd, arg, &parms, sizeof(parms));
+		break;
+	case RTTST_RTIOC_HEAP_STAT_COLLECT:
+		sthdr.buf = NULL;
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (rtdm_fd_is_compat(fd)) {
+			ret = rtdm_copy_from_user(fd, &compat_sthdr, arg,
+						  sizeof(compat_sthdr));
+			if (ret)
+				return ret;
+
+			ret = collect_stats(fd, compat_ptr(compat_sthdr.buf),
+					    compat_sthdr.nrstats);
+			if (ret < 0)
+				return ret;
+
+			compat_sthdr.nrstats = ret;
+			ret = rtdm_copy_to_user(fd, arg, &compat_sthdr,
+						sizeof(compat_sthdr));
+		} else
+#endif
+		{
+			ret = rtdm_copy_from_user(fd, &sthdr, arg,
+						  sizeof(sthdr));
+			if (ret)
+				return ret;
+
+			ret = collect_stats(fd, sthdr.buf, sthdr.nrstats);
+			if (ret < 0)
+				return ret;
+
+			sthdr.nrstats = ret;
+			ret = rtdm_copy_to_user(fd, arg, &sthdr, sizeof(sthdr));
+		}
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static struct rtdm_driver heapcheck_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(heap_check,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_HEAPCHECK,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 1,
+	.ops = {
+		.close		= heapcheck_close,
+		.ioctl_nrt	= heapcheck_ioctl,
+	},
+};
+
+static struct rtdm_device heapcheck_device = {
+	.driver = &heapcheck_driver,
+	.label = "heapcheck",
+};
+
+static int __init heapcheck_init(void)
+{
+	return rtdm_dev_register(&heapcheck_device);
+}
+
+static void __exit heapcheck_exit(void)
+{
+	rtdm_dev_unregister(&heapcheck_device);
+}
+
+module_init(heapcheck_init);
+module_exit(heapcheck_exit);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/testing/rtdmtest.c b/kernel/xenomai-v3.2.4/kernel/drivers/testing/rtdmtest.c
new file mode 100644
index 0000000..87aef8e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/testing/rtdmtest.c
@@ -0,0 +1,293 @@
+/*
+ * Copyright (C) 2010 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <rtdm/driver.h>
+#include <rtdm/testing.h>
+
+MODULE_DESCRIPTION("RTDM test helper module");
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_VERSION("0.1.0");
+MODULE_LICENSE("GPL");
+
+struct rtdm_basic_context {
+	rtdm_timer_t close_timer;
+	unsigned long close_counter;
+	unsigned long close_deferral;
+};
+
+struct rtdm_actor_context {
+	rtdm_task_t actor_task;
+	unsigned int request;
+	rtdm_event_t run;
+	rtdm_event_t done;
+	union {
+		__u32 cpu;
+	} args;
+};
+
+static void close_timer_proc(rtdm_timer_t *timer)
+{
+	struct rtdm_basic_context *ctx =
+		container_of(timer, struct rtdm_basic_context, close_timer);
+
+	if (ctx->close_counter != 1)
+		printk(XENO_ERR
+		       "rtdmtest: %s: close_counter is %lu, should be 1!\n",
+		       __FUNCTION__, ctx->close_counter);
+
+	ctx->close_deferral = RTTST_RTDM_NORMAL_CLOSE;
+	rtdm_fd_unlock(rtdm_private_to_fd(ctx));
+}
+
+static int rtdm_basic_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_basic_context *ctx = rtdm_fd_to_private(fd);
+
+	rtdm_timer_init(&ctx->close_timer, close_timer_proc,
+			"rtdm close test");
+	ctx->close_counter = 0;
+	ctx->close_deferral = RTTST_RTDM_NORMAL_CLOSE;
+
+	return 0;
+}
+
+static void rtdm_basic_close(struct rtdm_fd *fd)
+{
+	struct rtdm_basic_context *ctx = rtdm_fd_to_private(fd);
+
+	ctx->close_counter++;
+
+	switch (ctx->close_deferral) {
+	case RTTST_RTDM_DEFER_CLOSE_CONTEXT:
+		if (ctx->close_counter != 2) {
+			printk(XENO_ERR
+			       "rtdmtest: %s: close_counter is %lu, "
+			       "should be 2!\n",
+			       __FUNCTION__, ctx->close_counter);
+			return;
+		}
+		rtdm_fd_unlock(fd);
+		break;
+	}
+
+	rtdm_timer_destroy(&ctx->close_timer);
+}
+
+static int rtdm_basic_ioctl_rt(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	int ret, magic = RTTST_RTDM_MAGIC_PRIMARY;
+
+	switch (request) {
+	case RTTST_RTIOC_RTDM_PING_PRIMARY:
+		ret = rtdm_safe_copy_to_user(fd, arg, &magic,
+					     sizeof(magic));
+		break;
+	default:
+		ret = -ENOSYS;
+	}
+
+	return ret;
+}
+
+static int rtdm_basic_ioctl_nrt(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	struct rtdm_basic_context *ctx = rtdm_fd_to_private(fd);
+	int ret = 0, magic = RTTST_RTDM_MAGIC_SECONDARY;
+
+	switch (request) {
+	case RTTST_RTIOC_RTDM_DEFER_CLOSE:
+		ctx->close_deferral = (unsigned long)arg;
+		if (ctx->close_deferral == RTTST_RTDM_DEFER_CLOSE_CONTEXT) {
+			++ctx->close_counter;
+			rtdm_fd_lock(fd);
+			rtdm_timer_start(&ctx->close_timer, 300000000ULL, 0,
+					RTDM_TIMERMODE_RELATIVE);
+		}
+		break;
+	case RTTST_RTIOC_RTDM_PING_SECONDARY:
+		ret = rtdm_safe_copy_to_user(fd, arg, &magic,
+					     sizeof(magic));
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+
+static void actor_handler(void *arg)
+{
+	struct rtdm_actor_context *ctx = arg;
+	int ret;
+
+	for (;;) {
+		if (rtdm_task_should_stop())
+			return;
+
+		ret = rtdm_event_wait(&ctx->run);
+		if (ret)
+			break;
+
+		switch (ctx->request) {
+		case RTTST_RTIOC_RTDM_ACTOR_GET_CPU:
+			ctx->args.cpu = task_cpu(current);
+			break;
+		default:
+			printk(XENO_ERR "rtdmtest: bad request code %d\n",
+			       ctx->request);
+		}
+
+		rtdm_event_signal(&ctx->done);
+	}
+}
+
+static int rtdm_actor_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtdm_actor_context *ctx = rtdm_fd_to_private(fd);
+
+	rtdm_event_init(&ctx->run, 0);
+	rtdm_event_init(&ctx->done, 0);
+
+	return rtdm_task_init(&ctx->actor_task, "rtdm_actor",
+			      actor_handler, ctx,
+			      RTDM_TASK_LOWEST_PRIORITY, 0);
+}
+
+static void rtdm_actor_close(struct rtdm_fd *fd)
+{
+	struct rtdm_actor_context *ctx = rtdm_fd_to_private(fd);
+
+	rtdm_task_destroy(&ctx->actor_task);
+	rtdm_event_destroy(&ctx->run);
+	rtdm_event_destroy(&ctx->done);
+}
+
+#define ACTION_TIMEOUT 50000000ULL /* 50 ms timeout on action */
+
+static int run_action(struct rtdm_actor_context *ctx, unsigned int request)
+{
+	rtdm_toseq_t toseq;
+
+	rtdm_toseq_init(&toseq, ACTION_TIMEOUT);
+	ctx->request = request;
+	rtdm_event_signal(&ctx->run);
+	/*
+	 * XXX: The handshake mechanism is not bullet-proof against
+	 * -EINTR received when waiting for the done event. Hopefully
+	 * we won't restart/start a request while the action task has
+	 * not yet completed the previous one we stopped waiting for
+	 * abruptly.
+	 */
+	return rtdm_event_timedwait(&ctx->done, ACTION_TIMEOUT, &toseq);
+}
+
+static int rtdm_actor_ioctl(struct rtdm_fd *fd,
+			    unsigned int request, void __user *arg)
+{
+	struct rtdm_actor_context *ctx = rtdm_fd_to_private(fd);
+	int ret;
+
+	switch (request) {
+	case RTTST_RTIOC_RTDM_ACTOR_GET_CPU:
+		ctx->args.cpu = (__u32)-EINVAL;
+		ret = run_action(ctx, request);
+		if (ret)
+			break;
+		ret = rtdm_safe_copy_to_user(fd, arg, &ctx->args.cpu,
+					     sizeof(ctx->args.cpu));
+		break;
+	default:
+		ret = -ENOTTY;
+	}
+
+	return ret;
+}
+      
+static struct rtdm_driver rtdm_basic_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(rtdm_test_basic,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_RTDMTEST,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 2,
+	.context_size		= sizeof(struct rtdm_basic_context),
+	.ops = {
+		.open		= rtdm_basic_open,
+		.close		= rtdm_basic_close,
+		.ioctl_rt	= rtdm_basic_ioctl_rt,
+		.ioctl_nrt	= rtdm_basic_ioctl_nrt,
+	},
+};
+
+static struct rtdm_driver rtdm_actor_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(rtdm_test_actor,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_RTDMTEST,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE | RTDM_EXCLUSIVE,
+	.device_count		= 1,
+	.context_size		= sizeof(struct rtdm_actor_context),
+	.ops = {
+		.open		= rtdm_actor_open,
+		.close		= rtdm_actor_close,
+		.ioctl_rt	= rtdm_actor_ioctl,
+	},
+};
+
+static struct rtdm_device device[3] = {
+	[0 ... 1] = {
+		.driver = &rtdm_basic_driver,
+		.label = "rtdm%d",
+	},
+	[2] = {
+		.driver = &rtdm_actor_driver,
+		.label = "rtdmx",
+	}
+};
+
+static int __init rtdm_test_init(void)
+{
+	int i, ret;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++) {
+		ret = rtdm_dev_register(device + i);
+		if (ret)
+			goto fail;
+	}
+
+	return 0;
+fail:
+	while (i-- > 0)
+		rtdm_dev_unregister(device + i);
+
+	return ret;
+}
+
+static void __exit rtdm_test_exit(void)
+{
+	int i;
+
+	for (i = 0; i < ARRAY_SIZE(device); i++)
+		rtdm_dev_unregister(device + i);
+}
+
+module_init(rtdm_test_init);
+module_exit(rtdm_test_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/testing/switchtest.c b/kernel/xenomai-v3.2.4/kernel/drivers/testing/switchtest.c
new file mode 100644
index 0000000..e2c0f4b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/testing/switchtest.c
@@ -0,0 +1,764 @@
+/*
+ * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/vmalloc.h>
+#include <linux/semaphore.h>
+#include <cobalt/kernel/sched.h>
+#include <cobalt/kernel/synch.h>
+#include <cobalt/kernel/thread.h>
+#include <cobalt/kernel/trace.h>
+#include <rtdm/testing.h>
+#include <rtdm/driver.h>
+#include <asm/xenomai/fptest.h>
+
+MODULE_DESCRIPTION("Cobalt context switch test helper");
+MODULE_AUTHOR("Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>");
+MODULE_VERSION("0.1.1");
+MODULE_LICENSE("GPL");
+
+#define RTSWITCH_RT      0x10000
+#define RTSWITCH_NRT     0
+#define RTSWITCH_KERNEL  0x20000
+
+struct rtswitch_task {
+	struct rttst_swtest_task base;
+	rtdm_event_t rt_synch;
+	struct semaphore nrt_synch;
+	struct xnthread ktask;          /* For kernel-space real-time tasks. */
+	unsigned int last_switch;
+};
+
+struct rtswitch_context {
+	struct rtswitch_task *tasks;
+	unsigned int tasks_count;
+	unsigned int next_index;
+	struct semaphore lock;
+	unsigned int cpu;
+	unsigned int switches_count;
+
+	unsigned long pause_us;
+	unsigned int next_task;
+	rtdm_timer_t wake_up_delay;
+
+	unsigned int failed;
+	struct rttst_swtest_error error;
+
+	struct rtswitch_task *utask;
+	rtdm_nrtsig_t wake_utask;
+};
+
+static int fp_features;
+
+static int report(const char *fmt, ...)
+{
+	va_list ap;
+	int ret;
+
+	va_start(ap, fmt);
+	ret = vprintk(fmt, ap);
+	va_end(ap);
+
+	return ret;
+}
+
+static void handle_ktask_error(struct rtswitch_context *ctx, unsigned int fp_val)
+{
+	struct rtswitch_task *cur = &ctx->tasks[ctx->error.last_switch.to];
+	unsigned int i;
+
+	ctx->failed = 1;
+	ctx->error.fp_val = fp_val;
+
+	if ((cur->base.flags & RTSWITCH_RT) == RTSWITCH_RT)
+		for (i = 0; i < ctx->tasks_count; i++) {
+			struct rtswitch_task *task = &ctx->tasks[i];
+
+			/* Find the first non kernel-space task. */
+			if ((task->base.flags & RTSWITCH_KERNEL))
+				continue;
+
+			/* Unblock it. */
+			switch(task->base.flags & RTSWITCH_RT) {
+			case RTSWITCH_NRT:
+				ctx->utask = task;
+				rtdm_nrtsig_pend(&ctx->wake_utask);
+				break;
+
+			case RTSWITCH_RT:
+				rtdm_event_signal(&task->rt_synch);
+				break;
+			}
+
+			xnthread_suspend(&cur->ktask,
+					 XNSUSP, XN_INFINITE, XN_RELATIVE, NULL);
+		}
+}
+
+static int rtswitch_pend_rt(struct rtswitch_context *ctx,
+			    unsigned int idx)
+{
+	struct rtswitch_task *task;
+	int rc;
+
+	if (idx > ctx->tasks_count)
+		return -EINVAL;
+
+	task = &ctx->tasks[idx];
+	task->base.flags |= RTSWITCH_RT;
+
+	rc = rtdm_event_wait(&task->rt_synch);
+	if (rc < 0)
+		return rc;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static void timed_wake_up(rtdm_timer_t *timer)
+{
+	struct rtswitch_context *ctx =
+		container_of(timer, struct rtswitch_context, wake_up_delay);
+	struct rtswitch_task *task;
+
+	task = &ctx->tasks[ctx->next_task];
+
+	switch (task->base.flags & RTSWITCH_RT) {
+	case RTSWITCH_NRT:
+		ctx->utask = task;
+		rtdm_nrtsig_pend(&ctx->wake_utask);
+		break;
+
+	case RTSWITCH_RT:
+		rtdm_event_signal(&task->rt_synch);
+	}
+}
+
+static int rtswitch_to_rt(struct rtswitch_context *ctx,
+			  unsigned int from_idx,
+			  unsigned int to_idx)
+{
+	struct rtswitch_task *from, *to;
+	int rc;
+
+	if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count)
+		return -EINVAL;
+
+	/* to == from is a special case which means
+	   "return to the previous task". */
+	if (to_idx == from_idx)
+		to_idx = ctx->error.last_switch.from;
+
+	from = &ctx->tasks[from_idx];
+	to = &ctx->tasks[to_idx];
+
+	from->base.flags |= RTSWITCH_RT;
+	from->last_switch = ++ctx->switches_count;
+	ctx->error.last_switch.from = from_idx;
+	ctx->error.last_switch.to = to_idx;
+	barrier();
+
+	if (ctx->pause_us) {
+		ctx->next_task = to_idx;
+		barrier();
+		rtdm_timer_start(&ctx->wake_up_delay,
+				 ctx->pause_us * 1000, 0,
+				 RTDM_TIMERMODE_RELATIVE);
+		xnsched_lock();
+	} else
+		switch (to->base.flags & RTSWITCH_RT) {
+		case RTSWITCH_NRT:
+			ctx->utask = to;
+			barrier();
+			rtdm_nrtsig_pend(&ctx->wake_utask);
+			xnsched_lock();
+			break;
+
+		case RTSWITCH_RT:
+			xnsched_lock();
+			rtdm_event_signal(&to->rt_synch);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+	rc = rtdm_event_wait(&from->rt_synch);
+	xnsched_unlock();
+
+	if (rc < 0)
+		return rc;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static int rtswitch_pend_nrt(struct rtswitch_context *ctx,
+			     unsigned int idx)
+{
+	struct rtswitch_task *task;
+
+	if (idx > ctx->tasks_count)
+		return -EINVAL;
+
+	task = &ctx->tasks[idx];
+
+	task->base.flags &= ~RTSWITCH_RT;
+
+	if (down_interruptible(&task->nrt_synch))
+		return -EINTR;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static int rtswitch_to_nrt(struct rtswitch_context *ctx,
+			   unsigned int from_idx,
+			   unsigned int to_idx)
+{
+	struct rtswitch_task *from, *to;
+	unsigned int expected, fp_val;
+	int fp_check;
+
+	if (from_idx > ctx->tasks_count || to_idx > ctx->tasks_count)
+		return -EINVAL;
+
+	/* to == from is a special case which means
+	   "return to the previous task". */
+	if (to_idx == from_idx)
+		to_idx = ctx->error.last_switch.from;
+
+	from = &ctx->tasks[from_idx];
+	to = &ctx->tasks[to_idx];
+
+	fp_check = ctx->switches_count == from->last_switch + 1
+		&& ctx->error.last_switch.from == to_idx
+		&& ctx->error.last_switch.to == from_idx;
+
+	from->base.flags &= ~RTSWITCH_RT;
+	from->last_switch = ++ctx->switches_count;
+	ctx->error.last_switch.from = from_idx;
+	ctx->error.last_switch.to = to_idx;
+	barrier();
+
+	if (ctx->pause_us) {
+		ctx->next_task = to_idx;
+		barrier();
+		rtdm_timer_start(&ctx->wake_up_delay,
+				 ctx->pause_us * 1000, 0,
+				 RTDM_TIMERMODE_RELATIVE);
+	} else
+		switch (to->base.flags & RTSWITCH_RT) {
+		case RTSWITCH_NRT:
+		switch_to_nrt:
+			up(&to->nrt_synch);
+			break;
+
+		case RTSWITCH_RT:
+
+			if (!fp_check || fp_linux_begin() < 0) {
+				fp_check = 0;
+				goto signal_nofp;
+			}
+
+			expected = from_idx + 500 +
+				(ctx->switches_count % 4000000) * 1000;
+
+			fp_regs_set(fp_features, expected);
+			rtdm_event_signal(&to->rt_synch);
+			fp_val = fp_regs_check(fp_features, expected, report);
+			fp_linux_end();
+
+			if(down_interruptible(&from->nrt_synch))
+				return -EINTR;
+			if (ctx->failed)
+				return 1;
+			if (fp_val != expected) {
+				handle_ktask_error(ctx, fp_val);
+				return 1;
+			}
+
+			from->base.flags &= ~RTSWITCH_RT;
+			from->last_switch = ++ctx->switches_count;
+			ctx->error.last_switch.from = from_idx;
+			ctx->error.last_switch.to = to_idx;
+			if ((to->base.flags & RTSWITCH_RT) == RTSWITCH_NRT)
+				goto switch_to_nrt;
+			expected = from_idx + 500 +
+				(ctx->switches_count % 4000000) * 1000;
+			barrier();
+
+			fp_linux_begin();
+			fp_regs_set(fp_features, expected);
+			rtdm_event_signal(&to->rt_synch);
+			fp_val = fp_regs_check(fp_features, expected, report);
+			fp_linux_end();
+
+			if (down_interruptible(&from->nrt_synch))
+				return -EINTR;
+			if (ctx->failed)
+				return 1;
+			if (fp_val != expected) {
+				handle_ktask_error(ctx, fp_val);
+				return 1;
+			}
+
+			from->base.flags &= ~RTSWITCH_RT;
+			from->last_switch = ++ctx->switches_count;
+			ctx->error.last_switch.from = from_idx;
+			ctx->error.last_switch.to = to_idx;
+			barrier();
+			if ((to->base.flags & RTSWITCH_RT) == RTSWITCH_NRT)
+				goto switch_to_nrt;
+
+		signal_nofp:
+			rtdm_event_signal(&to->rt_synch);
+			break;
+
+		default:
+			return -EINVAL;
+		}
+
+	if (down_interruptible(&from->nrt_synch))
+		return -EINTR;
+
+	if (ctx->failed)
+		return 1;
+
+	return 0;
+}
+
+static int rtswitch_set_tasks_count(struct rtswitch_context *ctx, unsigned int count)
+{
+	struct rtswitch_task *tasks;
+
+	if (ctx->tasks_count == count)
+		return 0;
+
+	tasks = vmalloc(count * sizeof(*tasks));
+
+	if (!tasks)
+		return -ENOMEM;
+
+	down(&ctx->lock);
+
+	if (ctx->tasks)
+		vfree(ctx->tasks);
+
+	ctx->tasks = tasks;
+	ctx->tasks_count = count;
+	ctx->next_index = 0;
+
+	up(&ctx->lock);
+
+	return 0;
+}
+
+static int rtswitch_register_task(struct rtswitch_context *ctx,
+				  struct rttst_swtest_task *arg)
+{
+	struct rtswitch_task *t;
+
+	down(&ctx->lock);
+
+	if (ctx->next_index == ctx->tasks_count) {
+		up(&ctx->lock);
+		return -EBUSY;
+	}
+
+	arg->index = ctx->next_index;
+	t = &ctx->tasks[arg->index];
+	ctx->next_index++;
+	t->base = *arg;
+	t->last_switch = 0;
+	sema_init(&t->nrt_synch, 0);
+	rtdm_event_init(&t->rt_synch, 0);
+
+	up(&ctx->lock);
+
+	return 0;
+}
+
+struct taskarg {
+	struct rtswitch_context *ctx;
+	struct rtswitch_task *task;
+};
+
+static void rtswitch_ktask(void *cookie)
+{
+	struct taskarg *arg = (struct taskarg *) cookie;
+	unsigned int fp_val, expected, to, i = 0;
+	struct rtswitch_context *ctx = arg->ctx;
+	struct rtswitch_task *task = arg->task;
+
+	to = task->base.index;
+
+	rtswitch_pend_rt(ctx, task->base.index);
+
+	while (!rtdm_task_should_stop()) {
+		if (task->base.flags & RTTST_SWTEST_USE_FPU) {
+			fp_init();
+			fp_regs_set(fp_features, task->base.index + i * 1000);
+		}
+
+		switch(i % 3) {
+		case 0:
+			/* to == from means "return to last task" */
+			rtswitch_to_rt(ctx, task->base.index, task->base.index);
+			break;
+		case 1:
+			if (++to == task->base.index)
+				++to;
+			if (to > ctx->tasks_count - 1)
+				to = 0;
+			if (to == task->base.index)
+				++to;
+
+			fallthrough;
+		case 2:
+			rtswitch_to_rt(ctx, task->base.index, to);
+		}
+
+		if (task->base.flags & RTTST_SWTEST_USE_FPU) {
+			expected = task->base.index + i * 1000;
+			fp_val = fp_regs_check(fp_features, expected, report);
+
+			if (fp_val != expected) {
+				if (task->base.flags & RTTST_SWTEST_FREEZE)
+					xntrace_user_freeze(0, 0);
+				handle_ktask_error(ctx, fp_val);
+			}
+		}
+
+		if (++i == 4000000)
+			i = 0;
+	}
+}
+
+static int rtswitch_create_ktask(struct rtswitch_context *ctx,
+				 struct rttst_swtest_task *ptask)
+{
+	union xnsched_policy_param param;
+	struct xnthread_start_attr sattr;
+	struct xnthread_init_attr iattr;
+	struct rtswitch_task *task;
+	struct taskarg arg;
+	int init_flags;
+	char name[30];
+	int err;
+
+	/*
+	 * Silently disable FP tests in kernel if FPU is not supported
+	 * there. Typical case is math emulation support: we can use
+	 * it from userland as a synthetic FPU, but there is no sane
+	 * way to use it from kernel-based threads (Xenomai or Linux).
+	 */
+	if (!fp_kernel_supported())
+		ptask->flags &= ~RTTST_SWTEST_USE_FPU;
+
+	ptask->flags |= RTSWITCH_KERNEL;
+	err = rtswitch_register_task(ctx, ptask);
+
+	if (err)
+		return err;
+
+	ksformat(name, sizeof(name), "rtk%d/%u", ptask->index, ctx->cpu);
+
+	task = &ctx->tasks[ptask->index];
+
+	arg.ctx = ctx;
+	arg.task = task;
+
+	init_flags = (ptask->flags & RTTST_SWTEST_FPU) ? XNFPU : 0;
+
+	iattr.name = name;
+	iattr.flags = init_flags;
+	iattr.personality = &xenomai_personality;
+	iattr.affinity = *cpumask_of(ctx->cpu);
+	param.rt.prio = 1;
+
+	set_cpus_allowed_ptr(current, cpumask_of(ctx->cpu));
+
+	err = xnthread_init(&task->ktask,
+			    &iattr, &xnsched_class_rt, &param);
+	if (!err) {
+		sattr.mode = 0;
+		sattr.entry = rtswitch_ktask;
+		sattr.cookie = &arg;
+		err = xnthread_start(&task->ktask, &sattr);
+		if (err)
+			__xnthread_discard(&task->ktask);
+	} else
+		/*
+		 * In order to avoid calling xnthread_cancel() for an
+		 * invalid thread.
+		 */
+		task->base.flags = 0;
+	/*
+	 * Putting the argument on stack is safe, because the new
+	 * thread, thanks to the above call to set_cpus_allowed_ptr(),
+	 * will preempt the current thread immediately, and will
+	 * suspend only once the arguments on stack are used.
+	 */
+
+	return err;
+}
+
+static void rtswitch_utask_waker(rtdm_nrtsig_t *sig, void *arg)
+{
+	struct rtswitch_context *ctx = (struct rtswitch_context *)arg;
+	up(&ctx->utask->nrt_synch);
+}
+
+static int rtswitch_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+
+	ctx->tasks = NULL;
+	ctx->tasks_count = ctx->next_index = ctx->cpu = ctx->switches_count = 0;
+	sema_init(&ctx->lock, 1);
+	ctx->failed = 0;
+	ctx->error.last_switch.from = ctx->error.last_switch.to = -1;
+	ctx->pause_us = 0;
+
+	rtdm_nrtsig_init(&ctx->wake_utask, rtswitch_utask_waker, ctx);
+
+	rtdm_timer_init(&ctx->wake_up_delay, timed_wake_up, "switchtest timer");
+
+	return 0;
+}
+
+static void rtswitch_close(struct rtdm_fd *fd)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+	unsigned int i;
+
+	rtdm_timer_destroy(&ctx->wake_up_delay);
+	rtdm_nrtsig_destroy(&ctx->wake_utask);
+
+	if (ctx->tasks) {
+		set_cpus_allowed_ptr(current, cpumask_of(ctx->cpu));
+
+		for (i = 0; i < ctx->next_index; i++) {
+			struct rtswitch_task *task = &ctx->tasks[i];
+
+			if (task->base.flags & RTSWITCH_KERNEL) {
+				rtdm_task_destroy(&task->ktask);
+				rtdm_task_join(&task->ktask);
+			}
+			rtdm_event_destroy(&task->rt_synch);
+		}
+		vfree(ctx->tasks);
+	}
+}
+
+static int rtswitch_ioctl_nrt(struct rtdm_fd *fd,
+			      unsigned int request,
+			      void *arg)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+	struct rttst_swtest_task task;
+	struct rttst_swtest_dir fromto;
+	__u32 count;
+	int err;
+
+	switch (request) {
+	case RTTST_RTIOC_SWTEST_SET_TASKS_COUNT:
+		return rtswitch_set_tasks_count(ctx,
+						(unsigned long) arg);
+
+	case RTTST_RTIOC_SWTEST_SET_CPU:
+		if ((unsigned long) arg > num_online_cpus() - 1)
+			return -EINVAL;
+
+		ctx->cpu = (unsigned long) arg;
+		return 0;
+
+	case RTTST_RTIOC_SWTEST_SET_PAUSE:
+		ctx->pause_us = (unsigned long) arg;
+		return 0;
+
+	case RTTST_RTIOC_SWTEST_REGISTER_UTASK:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		err = rtswitch_register_task(ctx, &task);
+
+		if (!err)
+			rtdm_copy_to_user(fd,
+					  arg,
+					  &task,
+					  sizeof(task));
+
+		return err;
+
+	case RTTST_RTIOC_SWTEST_CREATE_KTASK:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		err = rtswitch_create_ktask(ctx, &task);
+
+		if (!err)
+			rtdm_copy_to_user(fd,
+					  arg,
+					  &task,
+					  sizeof(task));
+
+		return err;
+
+	case RTTST_RTIOC_SWTEST_PEND:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		return rtswitch_pend_nrt(ctx, task.index);
+
+	case RTTST_RTIOC_SWTEST_SWITCH_TO:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(fromto)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd,
+				    &fromto,
+				    arg,
+				    sizeof(fromto));
+
+		if (fromto.switch_mode) {
+			xnthread_harden();
+			return rtswitch_to_rt(ctx, fromto.from, fromto.to);
+		}
+		return rtswitch_to_nrt(ctx, fromto.from, fromto.to);
+
+	case RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(count)))
+			return -EFAULT;
+
+		count = ctx->switches_count;
+
+		rtdm_copy_to_user(fd, arg, &count, sizeof(count));
+
+		return 0;
+
+	case RTTST_RTIOC_SWTEST_GET_LAST_ERROR:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(ctx->error)))
+			return -EFAULT;
+
+		rtdm_copy_to_user(fd,
+				  arg,
+				  &ctx->error,
+				  sizeof(ctx->error));
+
+		return 0;
+
+	default:
+		return -ENOSYS;
+	}
+}
+
+static int rtswitch_ioctl_rt(struct rtdm_fd *fd,
+			     unsigned int request,
+			     void *arg)
+{
+	struct rtswitch_context *ctx = rtdm_fd_to_private(fd);
+	struct rttst_swtest_task task;
+	struct rttst_swtest_dir fromto;
+
+	switch (request) {
+	case RTTST_RTIOC_SWTEST_PEND:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(task)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd, &task, arg, sizeof(task));
+
+		return rtswitch_pend_rt(ctx, task.index);
+
+	case RTTST_RTIOC_SWTEST_SWITCH_TO:
+		if (!rtdm_read_user_ok(fd, arg, sizeof(fromto)))
+			return -EFAULT;
+
+		rtdm_copy_from_user(fd,
+				    &fromto,
+				    arg,
+				    sizeof(fromto));
+
+		if (fromto.switch_mode) {
+			xnthread_relax(0, 0);
+			return rtswitch_to_nrt(ctx, fromto.from, fromto.to);
+		}
+		return rtswitch_to_rt(ctx, fromto.from, fromto.to);
+
+	case RTTST_RTIOC_SWTEST_GET_LAST_ERROR:
+		if (!rtdm_rw_user_ok(fd, arg, sizeof(ctx->error)))
+			return -EFAULT;
+
+		rtdm_copy_to_user(fd,
+				  arg,
+				  &ctx->error,
+				  sizeof(ctx->error));
+
+		return 0;
+
+	default:
+		return -ENOSYS;
+	}
+}
+
+static struct rtdm_driver switchtest_driver = {
+	.profile_info = RTDM_PROFILE_INFO(switchtest,
+					  RTDM_CLASS_TESTING,
+					  RTDM_SUBCLASS_SWITCHTEST,
+					  RTTST_PROFILE_VER),
+	.device_flags = RTDM_NAMED_DEVICE,
+	.device_count =	1,
+	.context_size = sizeof(struct rtswitch_context),
+	.ops = {
+		.open = rtswitch_open,
+		.close = rtswitch_close,
+		.ioctl_rt = rtswitch_ioctl_rt,
+		.ioctl_nrt = rtswitch_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &switchtest_driver,
+	.label = "switchtest",
+};
+
+int __init __switchtest_init(void)
+{
+	fp_features = fp_detect();
+
+	return rtdm_dev_register(&device);
+}
+
+void __switchtest_exit(void)
+{
+	rtdm_dev_unregister(&device);
+}
+
+module_init(__switchtest_init);
+module_exit(__switchtest_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/testing/timerbench.c b/kernel/xenomai-v3.2.4/kernel/drivers/testing/timerbench.c
new file mode 100644
index 0000000..68e50a2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/testing/timerbench.c
@@ -0,0 +1,529 @@
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <linux/module.h>
+#include <linux/slab.h>
+#include <linux/semaphore.h>
+#include <cobalt/kernel/trace.h>
+#include <cobalt/kernel/arith.h>
+#include <rtdm/testing.h>
+#include <rtdm/driver.h>
+#include <rtdm/compat.h>
+
+MODULE_DESCRIPTION("Timer latency test helper");
+MODULE_AUTHOR("Jan Kiszka <jan.kiszka@web.de>");
+MODULE_VERSION("0.2.1");
+MODULE_LICENSE("GPL");
+
+struct rt_tmbench_context {
+	int mode;
+	unsigned int period;
+	int freeze_max;
+	int warmup_loops;
+	int samples_per_sec;
+	int32_t *histogram_min;
+	int32_t *histogram_max;
+	int32_t *histogram_avg;
+	int histogram_size;
+	int bucketsize;
+
+	rtdm_task_t timer_task;
+
+	rtdm_timer_t timer;
+	int warmup;
+	uint64_t start_time;
+	uint64_t date;
+	struct rttst_bench_res curr;
+
+	rtdm_event_t result_event;
+	struct rttst_interm_bench_res result;
+
+	struct semaphore nrt_mutex;
+};
+
+static inline void add_histogram(struct rt_tmbench_context *ctx,
+				 __s32 *histogram, __s32 addval)
+{
+	/* bucketsize steps */
+	int inabs = (addval >= 0 ? addval : -addval) / ctx->bucketsize;
+	histogram[inabs < ctx->histogram_size ?
+		  inabs : ctx->histogram_size - 1]++;
+}
+
+static inline long long slldiv(long long s, unsigned d)
+{
+	return s >= 0 ? xnarch_ulldiv(s, d, NULL) : -xnarch_ulldiv(-s, d, NULL);
+}
+
+static void eval_inner_loop(struct rt_tmbench_context *ctx, __s32 dt)
+{
+	if (dt > ctx->curr.max)
+		ctx->curr.max = dt;
+	if (dt < ctx->curr.min)
+		ctx->curr.min = dt;
+	ctx->curr.avg += dt;
+
+	if (xntrace_enabled() &&
+		ctx->freeze_max &&
+		(dt > ctx->result.overall.max) &&
+		!ctx->warmup) {
+		ctx->result.overall.max = dt;
+		xntrace_latpeak_freeze(dt);
+	}
+
+	ctx->date += ctx->period;
+
+	if (!ctx->warmup && ctx->histogram_size)
+		add_histogram(ctx, ctx->histogram_avg, dt);
+
+	/* Evaluate overruns and adjust next release date.
+	   Beware of signedness! */
+	while (dt > 0 && (unsigned long)dt > ctx->period) {
+		ctx->curr.overruns++;
+		ctx->date += ctx->period;
+		dt -= ctx->period;
+	}
+}
+
+static void eval_outer_loop(struct rt_tmbench_context *ctx)
+{
+	if (!ctx->warmup) {
+		if (ctx->histogram_size) {
+			add_histogram(ctx, ctx->histogram_max, ctx->curr.max);
+			add_histogram(ctx, ctx->histogram_min, ctx->curr.min);
+		}
+
+		ctx->result.last.min = ctx->curr.min;
+		if (ctx->curr.min < ctx->result.overall.min)
+			ctx->result.overall.min = ctx->curr.min;
+
+		ctx->result.last.max = ctx->curr.max;
+		if (ctx->curr.max > ctx->result.overall.max)
+			ctx->result.overall.max = ctx->curr.max;
+
+		ctx->result.last.avg =
+		    slldiv(ctx->curr.avg, ctx->samples_per_sec);
+		ctx->result.overall.avg += ctx->result.last.avg;
+		ctx->result.overall.overruns += ctx->curr.overruns;
+		rtdm_event_pulse(&ctx->result_event);
+	}
+
+	if (ctx->warmup &&
+	    (ctx->result.overall.test_loops == ctx->warmup_loops)) {
+		ctx->result.overall.test_loops = 0;
+		ctx->warmup = 0;
+	}
+
+	ctx->curr.min = 10000000;
+	ctx->curr.max = -10000000;
+	ctx->curr.avg = 0;
+	ctx->curr.overruns = 0;
+
+	ctx->result.overall.test_loops++;
+}
+
+static void timer_task_proc(void *arg)
+{
+	struct rt_tmbench_context *ctx = arg;
+	int count, err;
+	spl_t s;
+
+	/* first event: one millisecond from now. */
+	ctx->date = rtdm_clock_read_monotonic() + 1000000;
+
+	while (1) {
+		for (count = 0; count < ctx->samples_per_sec; count++) {
+			cobalt_atomic_enter(s);
+			ctx->start_time = rtdm_clock_read_monotonic();
+			err = rtdm_task_sleep_abs(ctx->date,
+						  RTDM_TIMERMODE_ABSOLUTE);
+			cobalt_atomic_leave(s);
+			if (err)
+				return;
+
+			eval_inner_loop(ctx,
+					(__s32)(rtdm_clock_read_monotonic() -
+						ctx->date));
+		}
+		eval_outer_loop(ctx);
+	}
+}
+
+static void timer_proc(rtdm_timer_t *timer)
+{
+	struct rt_tmbench_context *ctx =
+	    container_of(timer, struct rt_tmbench_context, timer);
+	int err;
+
+	do {
+		eval_inner_loop(ctx, (__s32)(rtdm_clock_read_monotonic() -
+					     ctx->date));
+
+		ctx->start_time = rtdm_clock_read_monotonic();
+		err = rtdm_timer_start_in_handler(&ctx->timer, ctx->date, 0,
+						  RTDM_TIMERMODE_ABSOLUTE);
+
+		if (++ctx->curr.test_loops >= ctx->samples_per_sec) {
+			ctx->curr.test_loops = 0;
+			eval_outer_loop(ctx);
+		}
+	} while (err);
+}
+
+static int rt_tmbench_open(struct rtdm_fd *fd, int oflags)
+{
+	struct rt_tmbench_context *ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	ctx->mode = RTTST_TMBENCH_INVALID;
+	sema_init(&ctx->nrt_mutex, 1);
+
+	return 0;
+}
+
+static void rt_tmbench_close(struct rtdm_fd *fd)
+{
+	struct rt_tmbench_context *ctx;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	down(&ctx->nrt_mutex);
+
+	if (ctx->mode >= 0) {
+		if (ctx->mode == RTTST_TMBENCH_TASK)
+			rtdm_task_destroy(&ctx->timer_task);
+		else if (ctx->mode == RTTST_TMBENCH_HANDLER)
+			rtdm_timer_destroy(&ctx->timer);
+
+		rtdm_event_destroy(&ctx->result_event);
+
+		if (ctx->histogram_size)
+			kfree(ctx->histogram_min);
+
+		ctx->mode = RTTST_TMBENCH_INVALID;
+		ctx->histogram_size = 0;
+	}
+
+	up(&ctx->nrt_mutex);
+}
+
+static int rt_tmbench_start(struct rtdm_fd *fd,
+			    struct rt_tmbench_context *ctx,
+			    struct rttst_tmbench_config __user *user_config)
+{
+	int err = 0;
+	spl_t s;
+
+	struct rttst_tmbench_config config_buf;
+	struct rttst_tmbench_config *config =
+		(struct rttst_tmbench_config *)user_config;
+
+	if (rtdm_fd_is_user(fd)) {
+		if (rtdm_safe_copy_from_user
+		    (fd, &config_buf,user_config,
+		     sizeof(struct rttst_tmbench_config)) < 0)
+			return -EFAULT;
+
+		config = &config_buf;
+	}
+
+	down(&ctx->nrt_mutex);
+
+	ctx->period = config->period;
+	ctx->warmup_loops = config->warmup_loops;
+	ctx->samples_per_sec = 1000000000 / ctx->period;
+	ctx->histogram_size = config->histogram_size;
+	ctx->freeze_max = config->freeze_max;
+
+	if (ctx->histogram_size > 0) {
+		ctx->histogram_min =
+		    kmalloc(3 * ctx->histogram_size * sizeof(int32_t),
+			    GFP_KERNEL);
+		ctx->histogram_max =
+		    ctx->histogram_min + config->histogram_size;
+		ctx->histogram_avg =
+		    ctx->histogram_max + config->histogram_size;
+
+		if (!ctx->histogram_min) {
+			up(&ctx->nrt_mutex);
+			return -ENOMEM;
+		}
+
+		memset(ctx->histogram_min, 0,
+		       3 * ctx->histogram_size * sizeof(int32_t));
+		ctx->bucketsize = config->histogram_bucketsize;
+	}
+
+	ctx->result.overall.min = 10000000;
+	ctx->result.overall.max = -10000000;
+	ctx->result.overall.avg = 0;
+	ctx->result.overall.test_loops = 1;
+	ctx->result.overall.overruns = 0;
+
+	ctx->warmup = 1;
+
+	ctx->curr.min = 10000000;
+	ctx->curr.max = -10000000;
+	ctx->curr.avg = 0;
+	ctx->curr.overruns = 0;
+	ctx->mode = RTTST_TMBENCH_INVALID;
+
+	rtdm_event_init(&ctx->result_event, 0);
+
+	if (config->mode == RTTST_TMBENCH_TASK) {
+		err = rtdm_task_init(&ctx->timer_task, "timerbench",
+				timer_task_proc, ctx,
+				config->priority, 0);
+		if (!err)
+			ctx->mode = RTTST_TMBENCH_TASK;
+	} else {
+		rtdm_timer_init(&ctx->timer, timer_proc,
+				rtdm_fd_device(fd)->name);
+
+		ctx->curr.test_loops = 0;
+
+		ctx->mode = RTTST_TMBENCH_HANDLER;
+
+		cobalt_atomic_enter(s);
+		ctx->start_time = rtdm_clock_read_monotonic();
+
+		/* first event: one millisecond from now. */
+		ctx->date = ctx->start_time + 1000000;
+
+		err = rtdm_timer_start(&ctx->timer, ctx->date, 0,
+				RTDM_TIMERMODE_ABSOLUTE);
+		cobalt_atomic_leave(s);
+	}
+
+	up(&ctx->nrt_mutex);
+
+	return err;
+}
+
+static int kernel_copy_results(struct rt_tmbench_context *ctx,
+			       struct rttst_overall_bench_res *res)
+{
+	int size;
+
+	memcpy(&res->result, &ctx->result.overall, sizeof(res->result));
+
+	if (ctx->histogram_size > 0) {
+		size = ctx->histogram_size * sizeof(int32_t);
+		memcpy(res->histogram_min, ctx->histogram_min, size);
+		memcpy(res->histogram_max, ctx->histogram_max, size);
+		memcpy(res->histogram_avg, ctx->histogram_avg, size);
+		kfree(ctx->histogram_min);
+	}
+
+	return 0;
+}
+
+static int user_copy_results(struct rt_tmbench_context *ctx,
+			     struct rttst_overall_bench_res __user *u_res)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(ctx);
+	struct rttst_overall_bench_res res_buf;
+	int ret, size;
+
+	ret = rtdm_safe_copy_to_user(fd, &u_res->result,
+				     &ctx->result.overall,
+				     sizeof(u_res->result));
+	if (ret || ctx->histogram_size == 0)
+		return ret;
+
+	size = ctx->histogram_size * sizeof(int32_t);
+
+	if (rtdm_safe_copy_from_user(fd, &res_buf, u_res, sizeof(res_buf)) < 0 ||
+	    rtdm_safe_copy_to_user(fd, res_buf.histogram_min,
+				   ctx->histogram_min, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, res_buf.histogram_max,
+				   ctx->histogram_max, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, res_buf.histogram_avg,
+				   ctx->histogram_avg, size) < 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_ARCH_SYS3264
+
+static int compat_user_copy_results(struct rt_tmbench_context *ctx,
+				    struct compat_rttst_overall_bench_res __user *u_res)
+{
+	struct compat_rttst_overall_bench_res res_buf;
+	struct rtdm_fd *fd = rtdm_private_to_fd(ctx);
+	int ret, size;
+
+	ret = rtdm_safe_copy_to_user(fd, &u_res->result,
+				     &ctx->result.overall,
+				     sizeof(u_res->result));
+	if (ret || ctx->histogram_size == 0)
+		return ret;
+
+	size = ctx->histogram_size * sizeof(int32_t);
+
+	if (rtdm_safe_copy_from_user(fd, &res_buf, u_res, sizeof(res_buf)) < 0 ||
+	    rtdm_safe_copy_to_user(fd, compat_ptr(res_buf.histogram_min),
+				   ctx->histogram_min, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, compat_ptr(res_buf.histogram_max),
+				   ctx->histogram_max, size) < 0 ||
+	    rtdm_safe_copy_to_user(fd, compat_ptr(res_buf.histogram_avg),
+				   ctx->histogram_avg, size) < 0)
+		return -EFAULT;
+
+	return 0;
+}
+
+#endif /* CONFIG_XENO_ARCH_SYS3264 */
+
+static int rt_tmbench_stop(struct rt_tmbench_context *ctx, void *u_res)
+{
+	struct rtdm_fd *fd = rtdm_private_to_fd(ctx);
+	int ret;
+
+	down(&ctx->nrt_mutex);
+
+	if (ctx->mode < 0) {
+		up(&ctx->nrt_mutex);
+		return -EINVAL;
+	}
+
+	if (ctx->mode == RTTST_TMBENCH_TASK)
+		rtdm_task_destroy(&ctx->timer_task);
+	else if (ctx->mode == RTTST_TMBENCH_HANDLER)
+		rtdm_timer_destroy(&ctx->timer);
+
+	rtdm_event_destroy(&ctx->result_event);
+
+	ctx->mode = RTTST_TMBENCH_INVALID;
+
+	ctx->result.overall.avg =
+	    slldiv(ctx->result.overall.avg,
+		   ((ctx->result.overall.test_loops) > 1 ?
+		    ctx->result.overall.test_loops : 2) - 1);
+
+	if (rtdm_fd_is_user(fd)) {
+#ifdef CONFIG_XENO_ARCH_SYS3264
+		if (rtdm_fd_is_compat(fd))
+			ret = compat_user_copy_results(ctx, u_res);
+		else
+#endif
+			ret = user_copy_results(ctx, u_res);
+	} else
+		ret = kernel_copy_results(ctx, u_res);
+
+	if (ctx->histogram_size > 0)
+		kfree(ctx->histogram_min);
+
+	up(&ctx->nrt_mutex);
+
+	return ret;
+}
+
+static int rt_tmbench_ioctl_nrt(struct rtdm_fd *fd,
+				unsigned int request, void __user *arg)
+{
+	struct rt_tmbench_context *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTTST_RTIOC_TMBENCH_START:
+		err = rt_tmbench_start(fd, ctx, arg);
+		break;
+
+	COMPAT_CASE(RTTST_RTIOC_TMBENCH_STOP):
+		err = rt_tmbench_stop(ctx, arg);
+		break;
+	default:
+		err = -ENOSYS;
+	}
+
+	return err;
+}
+
+static int rt_tmbench_ioctl_rt(struct rtdm_fd *fd,
+			       unsigned int request, void __user *arg)
+{
+	struct rt_tmbench_context *ctx;
+	int err = 0;
+
+	ctx = rtdm_fd_to_private(fd);
+
+	switch (request) {
+	case RTTST_RTIOC_INTERM_BENCH_RES:
+		err = rtdm_event_wait(&ctx->result_event);
+		if (err)
+			return err;
+
+		if (rtdm_fd_is_user(fd)) {
+			struct rttst_interm_bench_res __user *user_res = arg;
+
+			err = rtdm_safe_copy_to_user(fd, user_res,
+						     &ctx->result,
+						     sizeof(*user_res));
+		} else {
+			struct rttst_interm_bench_res *res = (void *)arg;
+
+			memcpy(res, &ctx->result, sizeof(*res));
+		}
+
+		break;
+
+	default:
+		err = -ENOSYS;
+	}
+
+	return err;
+}
+
+static struct rtdm_driver timerbench_driver = {
+	.profile_info		= RTDM_PROFILE_INFO(timerbench,
+						    RTDM_CLASS_TESTING,
+						    RTDM_SUBCLASS_TIMERBENCH,
+						    RTTST_PROFILE_VER),
+	.device_flags		= RTDM_NAMED_DEVICE,
+	.device_count		= 1,
+	.context_size		= sizeof(struct rt_tmbench_context),
+	.ops = {
+		.open		= rt_tmbench_open,
+		.close		= rt_tmbench_close,
+		.ioctl_rt	= rt_tmbench_ioctl_rt,
+		.ioctl_nrt	= rt_tmbench_ioctl_nrt,
+	},
+};
+
+static struct rtdm_device device = {
+	.driver = &timerbench_driver,
+	.label = "timerbench",
+};
+
+static int __init __timerbench_init(void)
+{
+	return rtdm_dev_register(&device);
+}
+
+static void __timerbench_exit(void)
+{
+	rtdm_dev_unregister(&device);
+}
+
+module_init(__timerbench_init);
+module_exit(__timerbench_exit);
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/udd/Kconfig b/kernel/xenomai-v3.2.4/kernel/drivers/udd/Kconfig
new file mode 100644
index 0000000..86b9a82
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/udd/Kconfig
@@ -0,0 +1,10 @@
+menu "UDD support"
+
+config XENO_DRIVERS_UDD
+	tristate "User-space device driver framework"
+	help
+
+	A RTDM-based driver for enabling interrupt control and I/O
+	memory access interfaces to user-space device drivers.
+
+endmenu
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/udd/Makefile b/kernel/xenomai-v3.2.4/kernel/drivers/udd/Makefile
new file mode 100644
index 0000000..dedf72b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/udd/Makefile
@@ -0,0 +1,5 @@
+ccflags-y += -I$(srctree)/kernel
+
+obj-$(CONFIG_XENO_DRIVERS_UDD) += xeno_udd.o
+
+xeno_udd-y := udd.o
diff --git a/kernel/xenomai-v3.2.4/kernel/drivers/udd/udd.c b/kernel/xenomai-v3.2.4/kernel/drivers/udd/udd.c
new file mode 100644
index 0000000..d263afc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/kernel/drivers/udd/udd.c
@@ -0,0 +1,665 @@
+/*
+ * This file is part of the Xenomai project.
+ *
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <linux/module.h>
+#include <linux/init.h>
+#include <linux/slab.h>
+#include <linux/mm.h>
+#include <rtdm/cobalt.h>
+#include <rtdm/driver.h>
+#include <rtdm/udd.h>
+#include <pipeline/inband_work.h>
+
+struct udd_context {
+	u32 event_count;
+};
+
+static int udd_open(struct rtdm_fd *fd, int oflags)
+{
+	struct udd_context *context;
+	struct udd_device *udd;
+	int ret;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->ops.open) {
+		ret = udd->ops.open(fd, oflags);
+		if (ret)
+			return ret;
+	}
+
+	context = rtdm_fd_to_private(fd);
+	context->event_count = 0;
+
+	return 0;
+}
+
+static void udd_close(struct rtdm_fd *fd)
+{
+	struct udd_device *udd;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->ops.close)
+		udd->ops.close(fd);
+}
+
+static int udd_ioctl_rt(struct rtdm_fd *fd,
+			unsigned int request, void __user *arg)
+{
+	struct udd_signotify signfy;
+	struct udd_reserved *ur;
+	struct udd_device *udd;
+	rtdm_event_t done;
+	int ret;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->ops.ioctl) {
+		ret = udd->ops.ioctl(fd, request, arg);
+		if (ret != -ENOSYS)
+			return ret;
+	}
+
+	ur = &udd->__reserved;
+
+	switch (request) {
+	case UDD_RTIOC_IRQSIG:
+		ret = rtdm_safe_copy_from_user(fd, &signfy, arg, sizeof(signfy));
+		if (ret)
+			return ret;
+		/* Early check, we'll redo at each signal issue. */
+		if (signfy.pid <= 0)
+			ur->signfy.pid = -1;
+		else {
+			if (signfy.sig < SIGRTMIN || signfy.sig > SIGRTMAX)
+				return -EINVAL;
+			if (cobalt_thread_find_local(signfy.pid) == NULL)
+				return -EINVAL;
+			ur->signfy = signfy;
+		}
+		break;
+	case UDD_RTIOC_IRQEN:
+	case UDD_RTIOC_IRQDIS:
+		if (udd->irq == UDD_IRQ_NONE || udd->irq == UDD_IRQ_CUSTOM)
+			return -EIO;
+		rtdm_event_init(&done, 0);
+		if (request == UDD_RTIOC_IRQEN)
+			udd_enable_irq(udd, &done);
+		else
+			udd_disable_irq(udd, &done);
+		ret = rtdm_event_wait(&done);
+		if (ret != -EIDRM)
+			rtdm_event_destroy(&done);
+		break;
+	default:
+		ret = -EINVAL;
+	}
+
+	return ret;
+}
+
+static ssize_t udd_read_rt(struct rtdm_fd *fd,
+			   void __user *buf, size_t len)
+{
+	struct udd_context *context;
+	struct udd_reserved *ur;
+	struct udd_device *udd;
+	rtdm_lockctx_t ctx;
+	ssize_t ret = 0;
+	u32 count;
+
+	if (len != sizeof(count))
+		return -EINVAL;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->irq == UDD_IRQ_NONE)
+		return -EIO;
+
+	ur = &udd->__reserved;
+	context = rtdm_fd_to_private(fd);
+
+	cobalt_atomic_enter(ctx);
+
+	if (ur->event_count != context->event_count)
+		rtdm_event_clear(&ur->pulse);
+	else
+		ret = rtdm_event_wait(&ur->pulse);
+
+	count = ur->event_count;
+
+	cobalt_atomic_leave(ctx);
+
+	if (ret)
+		return ret;
+
+	context->event_count = count;
+	ret = rtdm_copy_to_user(fd, buf, &count, sizeof(count));
+
+	return ret ?: sizeof(count);
+}
+
+static ssize_t udd_write_rt(struct rtdm_fd *fd,
+			    const void __user *buf, size_t len)
+{
+	int ret;
+	u32 val;
+
+	if (len != sizeof(val))
+		return -EINVAL;
+
+	ret = rtdm_safe_copy_from_user(fd, &val, buf, sizeof(val));
+	if (ret)
+		return ret;
+
+	ret = udd_ioctl_rt(fd, val ? UDD_RTIOC_IRQEN : UDD_RTIOC_IRQDIS, NULL);
+
+	return ret ?: len;
+}
+
+static int udd_select(struct rtdm_fd *fd, struct xnselector *selector,
+		      unsigned int type, unsigned int index)
+{
+	struct udd_device *udd;
+
+	udd = container_of(rtdm_fd_device(fd), struct udd_device, __reserved.device);
+	if (udd->irq == UDD_IRQ_NONE)
+		return -EIO;
+
+	return rtdm_event_select(&udd->__reserved.pulse,
+				 selector, type, index);
+}
+
+static int udd_irq_handler(rtdm_irq_t *irqh)
+{
+	struct udd_device *udd;
+	int ret;
+
+	udd = rtdm_irq_get_arg(irqh, struct udd_device);
+	ret = udd->ops.interrupt(udd);
+	if (ret == RTDM_IRQ_HANDLED)
+		udd_notify_event(udd);
+
+	return ret;
+}
+
+static int mapper_open(struct rtdm_fd *fd, int oflags)
+{
+	int minor = rtdm_fd_minor(fd);
+	struct udd_device *udd;
+
+	/*
+	 * Check that we are opening a mapper instance pointing at a
+	 * valid memory region. e.g. UDD creates the companion device
+	 * "foo,mapper" on the fly when registering the main device
+	 * "foo". Userland may then open("/dev/foo,mapper0", ...)
+	 * followed by a call to mmap() for mapping the memory region
+	 * #0 as declared in the mem_regions[] array of the main
+	 * device.
+	 *
+	 * We support sparse region arrays, so the device minor shall
+	 * match the mem_regions[] index exactly.
+	 */
+	if (minor < 0 || minor >= UDD_NR_MAPS)
+		return -EIO;
+
+	udd = udd_get_device(fd);
+	if (udd->mem_regions[minor].type == UDD_MEM_NONE)
+		return -EIO;
+
+	return 0;
+}
+
+static void mapper_close(struct rtdm_fd *fd)
+{
+	/* nop */
+}
+
+static int mapper_mmap(struct rtdm_fd *fd, struct vm_area_struct *vma)
+{
+	struct udd_memregion *rn;
+	struct udd_device *udd;
+	size_t len;
+	int ret;
+
+	udd = udd_get_device(fd);
+	if (udd->ops.mmap)
+		/* Offload to client driver if handler is present. */
+		return udd->ops.mmap(fd, vma);
+
+	/* Otherwise DIY using the RTDM helpers. */
+
+	len = vma->vm_end - vma->vm_start;
+	rn = udd->mem_regions + rtdm_fd_minor(fd);
+	if (rn->len < len)
+		/* Can't map that much, bail out. */
+		return -EINVAL;
+
+	switch (rn->type) {
+	case UDD_MEM_PHYS:
+		ret = rtdm_mmap_iomem(vma, rn->addr);
+		break;
+	case UDD_MEM_LOGICAL:
+		ret = rtdm_mmap_kmem(vma, (void *)rn->addr);
+		break;
+	case UDD_MEM_VIRTUAL:
+		ret = rtdm_mmap_vmem(vma, (void *)rn->addr);
+		break;
+	default:
+		ret = -EINVAL;	/* Paranoid, can't happen. */
+	}
+
+	return ret;
+}
+
+static inline int check_memregion(struct udd_device *udd,
+				  struct udd_memregion *rn)
+{
+	if (rn->name == NULL)
+		return -EINVAL;
+
+	if (rn->addr == 0)
+		return -EINVAL;
+
+	if (rn->len == 0)
+		return -EINVAL;
+
+	return 0;
+}
+
+static inline int register_mapper(struct udd_device *udd)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+	struct rtdm_driver *drv = &ur->mapper_driver;
+	struct udd_mapper *mapper;
+	struct udd_memregion *rn;
+	int n, ret;
+
+	ur->mapper_name = kasformat("%s,mapper%%d", udd->device_name);
+	if (ur->mapper_name == NULL)
+		return -ENOMEM;
+
+	drv->profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(mapper, RTDM_CLASS_MEMORY,
+				  RTDM_SUBCLASS_GENERIC, 0);
+	drv->device_flags = RTDM_NAMED_DEVICE|RTDM_FIXED_MINOR;
+	drv->device_count = UDD_NR_MAPS;
+	drv->base_minor = 0;
+	drv->ops = (struct rtdm_fd_ops){
+		.open		=	mapper_open,
+		.close		=	mapper_close,
+		.mmap		=	mapper_mmap,
+	};
+
+	for (n = 0, mapper = ur->mapdev; n < UDD_NR_MAPS; n++, mapper++) {
+		rn = udd->mem_regions + n;
+		if (rn->type == UDD_MEM_NONE)
+			continue;
+		mapper->dev.driver = drv;
+		mapper->dev.label = ur->mapper_name;
+		mapper->dev.minor = n;
+		mapper->udd = udd;
+		ret = rtdm_dev_register(&mapper->dev);
+		if (ret)
+			goto undo;
+	}
+
+	return 0;
+undo:
+	while (--n >= 0)
+		rtdm_dev_unregister(&ur->mapdev[n].dev);
+
+	return ret;
+}
+
+/**
+ * @brief Register a UDD device
+ *
+ * This routine registers a mini-driver at the UDD core.
+ *
+ * @param udd @ref udd_device "UDD device descriptor" which should
+ * describe the new device properties.
+ *
+ * @return Zero is returned upon success, otherwise a negative error
+ * code is received, from the set of error codes defined by
+ * rtdm_dev_register(). In addition, the following error codes can be
+ * returned:
+ *
+ * - -EINVAL, some of the memory regions declared in the
+ *   udd_device.mem_regions[] array have invalid properties, i.e. bad
+ *   type, NULL name, zero length or address. Any undeclared region
+ *   entry from the array must bear the UDD_MEM_NONE type.
+ *
+ * - -EINVAL, if udd_device.irq is different from UDD_IRQ_CUSTOM and
+ * UDD_IRQ_NONE but invalid, causing rtdm_irq_request() to fail.
+ *
+ * - -EINVAL, if udd_device.device_flags contains invalid flags.
+ *
+ * - -ENOSYS, if this service is called while the real-time core is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int udd_register_device(struct udd_device *udd)
+{
+	struct rtdm_device *dev = &udd->__reserved.device;
+	struct udd_reserved *ur = &udd->__reserved;
+	struct rtdm_driver *drv = &ur->driver;
+	struct udd_memregion *rn;
+	int ret, n;
+
+	if (udd->device_flags & RTDM_PROTOCOL_DEVICE)
+		return -EINVAL;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM &&
+	    udd->ops.interrupt == NULL)
+		return -EINVAL;
+
+	for (n = 0, ur->nr_maps = 0; n < UDD_NR_MAPS; n++) {
+		/* We allow sparse region arrays. */
+		rn = udd->mem_regions + n;
+		if (rn->type == UDD_MEM_NONE)
+			continue;
+		ret = check_memregion(udd, rn);
+		if (ret)
+			return ret;
+		udd->__reserved.nr_maps++;
+	}
+
+	drv->profile_info = (struct rtdm_profile_info)
+		RTDM_PROFILE_INFO(udd->device_name, RTDM_CLASS_UDD,
+				  udd->device_subclass, 0);
+	drv->device_flags = RTDM_NAMED_DEVICE|udd->device_flags;
+	drv->device_count = 1;
+	drv->context_size = sizeof(struct udd_context);
+	drv->ops = (struct rtdm_fd_ops){
+		.open = udd_open,
+		.ioctl_rt = udd_ioctl_rt,
+		.read_rt = udd_read_rt,
+		.write_rt = udd_write_rt,
+		.close = udd_close,
+		.select = udd_select,
+	};
+
+	dev->driver = drv;
+	dev->label = udd->device_name;
+
+	ret = rtdm_dev_register(dev);
+	if (ret)
+		return ret;
+
+	if (ur->nr_maps > 0) {
+		ret = register_mapper(udd);
+		if (ret)
+			goto fail_mapper;
+	} else
+		ur->mapper_name = NULL;
+
+	ur->event_count = 0;
+	rtdm_event_init(&ur->pulse, 0);
+	ur->signfy.pid = -1;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM) {
+		ret = rtdm_irq_request(&ur->irqh, udd->irq,
+				       udd_irq_handler, 0,
+				       dev->name, udd);
+		if (ret)
+			goto fail_irq_request;
+	}
+
+	return 0;
+
+fail_irq_request:
+	for (n = 0; n < UDD_NR_MAPS; n++) {
+		rn = udd->mem_regions + n;
+		if (rn->type != UDD_MEM_NONE)
+			rtdm_dev_unregister(&ur->mapdev[n].dev);
+	}
+fail_mapper:
+	rtdm_dev_unregister(dev);
+	if (ur->mapper_name)
+		kfree(ur->mapper_name);
+
+	return ret;
+}
+EXPORT_SYMBOL_GPL(udd_register_device);
+
+/**
+ * @brief Unregister a UDD device
+ *
+ * This routine unregisters a mini-driver from the UDD core. This
+ * routine waits until all connections to @a udd have been closed
+ * prior to unregistering.
+ *
+ * @param udd UDD device descriptor
+ *
+ * @return Zero is returned upon success, otherwise -ENXIO is received
+ * if this service is called while the Cobalt kernel is disabled.
+ *
+ * @coretags{secondary-only}
+ */
+int udd_unregister_device(struct udd_device *udd)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+	struct udd_memregion *rn;
+	int n;
+
+	rtdm_event_destroy(&ur->pulse);
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM)
+		rtdm_irq_free(&ur->irqh);
+
+	for (n = 0; n < UDD_NR_MAPS; n++) {
+		rn = udd->mem_regions + n;
+		if (rn->type != UDD_MEM_NONE)
+			rtdm_dev_unregister(&ur->mapdev[n].dev);
+	}
+
+	if (ur->mapper_name)
+		kfree(ur->mapper_name);
+
+	rtdm_dev_unregister(&ur->device);
+
+	return 0;
+}
+EXPORT_SYMBOL_GPL(udd_unregister_device);
+
+/**
+ * @brief Notify an IRQ event for an unmanaged interrupt
+ *
+ * When the UDD core shall hand over the interrupt management for a
+ * device to the mini-driver (see UDD_IRQ_CUSTOM), the latter should
+ * notify the UDD core when IRQ events are received by calling this
+ * service.
+ *
+ * As a result, the UDD core wakes up any Cobalt thread waiting for
+ * interrupts on the device via a read(2) or select(2) call.
+ *
+ * @param udd UDD device descriptor receiving the IRQ.
+ *
+ * @coretags{coreirq-only}
+ *
+ * @note In case the @ref udd_irq_handler "IRQ handler" from the
+ * mini-driver requested the UDD core not to re-enable the interrupt
+ * line, the application may later request the unmasking by issuing
+ * the UDD_RTIOC_IRQEN ioctl(2) command. Writing a non-zero integer to
+ * the device via the write(2) system call has the same effect.
+ */
+void udd_notify_event(struct udd_device *udd)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+	union sigval sival;
+	rtdm_lockctx_t ctx;
+
+	cobalt_atomic_enter(ctx);
+	ur->event_count++;
+	rtdm_event_signal(&ur->pulse);
+	cobalt_atomic_leave(ctx);
+
+	if (ur->signfy.pid > 0) {
+		sival.sival_int = (int)ur->event_count;
+		__cobalt_sigqueue(ur->signfy.pid, ur->signfy.sig, &sival);
+	}
+}
+EXPORT_SYMBOL_GPL(udd_notify_event);
+
+struct irqswitch_work {
+	struct pipeline_inband_work inband_work;
+	rtdm_irq_t *irqh;
+	int enabled;
+	rtdm_event_t *done;
+	struct irqswitch_work *self; /* Revisit: I-pipe requirement */
+};
+
+static void lostage_irqswitch_line(struct pipeline_inband_work *inband_work)
+{
+	struct irqswitch_work *rq;
+
+	/*
+	 * This runs from secondary mode, we may flip the IRQ state
+	 * now.
+	 */
+	rq = container_of(inband_work, struct irqswitch_work, inband_work);
+	if (rq->enabled)
+		rtdm_irq_enable(rq->irqh);
+	else
+		rtdm_irq_disable(rq->irqh);
+
+	if (rq->done)
+		rtdm_event_signal(rq->done);
+
+	xnfree(rq->self);
+}
+
+static void switch_irq_line(rtdm_irq_t *irqh, int enable, rtdm_event_t *done)
+{
+	struct irqswitch_work *rq;
+
+	rq = xnmalloc(sizeof(*rq));
+	if (WARN_ON(rq == NULL))
+		return;
+
+	rq->inband_work = (struct pipeline_inband_work)
+		PIPELINE_INBAND_WORK_INITIALIZER(*rq,
+					lostage_irqswitch_line);
+	rq->irqh = irqh;
+	rq->enabled = enable;
+	rq->done = done;
+	rq->self = rq;	/* Revisit: I-pipe requirement */
+
+	/*
+	 * Not pretty, but we may not traverse the kernel code for
+	 * enabling/disabling IRQ lines from primary mode. Defer this
+	 * to the root context.
+	 */
+	pipeline_post_inband_work(rq);
+}
+
+/**
+ * @brief Enable the device IRQ line
+ *
+ * This service issues a request to the regular kernel for enabling
+ * the IRQ line registered by the driver. If the caller runs in
+ * primary mode, the request is scheduled but deferred until the
+ * current CPU leaves the real-time domain (see note). Otherwise, the
+ * request is immediately handled.
+ *
+ * @param udd The UDD driver handling the IRQ to disable. If no IRQ
+ * was registered by the driver at the UDD core, this routine has no
+ * effect.
+ *
+ * @param done Optional event to signal upon completion. If non-NULL,
+ * @a done will be posted by a call to rtdm_event_signal() after the
+ * interrupt line is enabled.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note The deferral is required as some interrupt management code
+ * involved in enabling interrupt lines may not be safely executed
+ * from primary mode. By passing a valid @a done object address, the
+ * caller can wait for the request to complete, by sleeping on
+ * rtdm_event_wait().
+ */
+void udd_enable_irq(struct udd_device *udd, rtdm_event_t *done)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM)
+		switch_irq_line(&ur->irqh, 1, done);
+}
+EXPORT_SYMBOL_GPL(udd_enable_irq);
+
+/**
+ * @brief Disable the device IRQ line
+ *
+ * This service issues a request to the regular kernel for disabling
+ * the IRQ line registered by the driver. If the caller runs in
+ * primary mode, the request is scheduled but deferred until the
+ * current CPU leaves the real-time domain (see note). Otherwise, the
+ * request is immediately handled.
+ *
+ * @param udd The UDD driver handling the IRQ to disable. If no IRQ
+ * was registered by the driver at the UDD core, this routine has no
+ * effect.
+ *
+ * @param done Optional event to signal upon completion. If non-NULL,
+ * @a done will be posted by a call to rtdm_event_signal() after the
+ * interrupt line is disabled.
+ *
+ * @coretags{unrestricted}
+ *
+ * @note The deferral is required as some interrupt management code
+ * involved in disabling interrupt lines may not be safely executed
+ * from primary mode. By passing a valid @a done object address, the
+ * caller can wait for the request to complete, by sleeping on
+ * rtdm_event_wait().
+ */
+void udd_disable_irq(struct udd_device *udd, rtdm_event_t *done)
+{
+	struct udd_reserved *ur = &udd->__reserved;
+
+	if (udd->irq != UDD_IRQ_NONE && udd->irq != UDD_IRQ_CUSTOM)
+		switch_irq_line(&ur->irqh, 0, done);
+}
+EXPORT_SYMBOL_GPL(udd_disable_irq);
+
+/**
+ * @brief RTDM file descriptor to target UDD device
+ *
+ * Retrieves the UDD device from a RTDM file descriptor.
+ *
+ * @param fd File descriptor received by an ancillary I/O handler
+ * from a mini-driver based on the UDD core.
+ *
+ * @return A pointer to the UDD device to which @a fd refers to.
+ *
+ * @note This service is intended for use by mini-drivers based on the
+ * UDD core exclusively. Passing file descriptors referring to other
+ * RTDM devices will certainly lead to invalid results.
+ *
+ * @coretags{mode-unrestricted}
+ */
+struct udd_device *udd_get_device(struct rtdm_fd *fd)
+{
+	struct rtdm_device *dev = rtdm_fd_device(fd);
+
+	if (dev->driver->profile_info.class_id == RTDM_CLASS_MEMORY)
+		return container_of(dev, struct udd_mapper, dev)->udd;
+
+	return container_of(dev, struct udd_device, __reserved.device);
+}
+EXPORT_SYMBOL_GPL(udd_get_device);
+
+MODULE_LICENSE("GPL");
diff --git a/kernel/xenomai-v3.2.4/lib/Makefile.am b/kernel/xenomai-v3.2.4/lib/Makefile.am
new file mode 100644
index 0000000..e909030
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/Makefile.am
@@ -0,0 +1,32 @@
+SUBDIRS = boilerplate
+
+if XENO_COBALT
+SUBDIRS += cobalt
+else
+SUBDIRS += mercury
+endif
+
+SUBDIRS +=		\
+	copperplate	\
+	smokey		\
+	alchemy		\
+	vxworks		\
+	psos
+
+if XENO_COBALT
+SUBDIRS += 		\
+	analogy		\
+	trank
+endif
+
+DIST_SUBDIRS = 		\
+	alchemy		\
+	analogy		\
+	boilerplate	\
+	cobalt		\
+	copperplate	\
+	mercury		\
+	psos		\
+	smokey		\
+	trank		\
+	vxworks
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/COPYING b/kernel/xenomai-v3.2.4/lib/alchemy/COPYING
new file mode 100644
index 0000000..3b20440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/COPYING
@@ -0,0 +1,458 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/Makefile.am b/kernel/xenomai-v3.2.4/lib/alchemy/Makefile.am
new file mode 100644
index 0000000..d350912
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/Makefile.am
@@ -0,0 +1,53 @@
+lib_LTLIBRARIES = libalchemy@CORE@.la
+
+libalchemy@CORE@_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -version-info 0:0:0
+
+libalchemy@CORE@_la_LIBADD =						\
+	@XENO_CORE_LDADD@					\
+	$(top_builddir)/lib/copperplate/libcopperplate@CORE@.la
+
+libalchemy@CORE@_la_SOURCES =	\
+	init.c		\
+	internal.c	\
+	internal.h	\
+	reference.h	\
+	alarm.c		\
+	alarm.h		\
+	buffer.c	\
+	buffer.h	\
+	cond.c		\
+	cond.h		\
+	event.c		\
+	event.h		\
+	heap.c		\
+	heap.h		\
+	mutex.c		\
+	mutex.h		\
+	queue.c		\
+	queue.h		\
+	task.c		\
+	task.h		\
+	sem.c		\
+	sem.h		\
+	timer.c		\
+	timer.h
+
+if XENO_COBALT
+libalchemy@CORE@_la_SOURCES +=	\
+	pipe.c			\
+	pipe.h
+endif
+
+libalchemy@CORE@_la_CPPFLAGS =				\
+	@XENO_USER_CFLAGS@				\
+	-I$(top_srcdir)/include				\
+	-I$(top_srcdir)/lib
+
+EXTRA_DIST = testsuite
+
+SPARSE = sparse
+
+sparse:
+	@for i in $(libalchemy@CORE@_la_SOURCES); do \
+		$(SPARSE) $(CHECKFLAGS) $(srcdir)/$$i; \
+	done
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/alarm.c b/kernel/xenomai-v3.2.4/lib/alchemy/alarm.c
new file mode 100644
index 0000000..2298114
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/alarm.c
@@ -0,0 +1,411 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <string.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include "reference.h"
+#include "internal.h"
+#include "alarm.h"
+#include "timer.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_alarm Alarm services
+ *
+ * General-purpose watchdog timers
+ *
+ * Alarms are general-purpose watchdog timers. Alchemy tasks may
+ * create any number of alarms and use them to run a user-defined
+ * handler, after a specified initial delay has elapsed. Alarms can be
+ * either one shot or periodic; in the latter case, the real-time
+ * system automatically reprograms the alarm for the next shot
+ * according to a user-defined interval value.
+ *
+ * @{
+ */
+
+struct pvcluster alchemy_alarm_table;
+
+static DEFINE_NAME_GENERATOR(alarm_namegen, "alarm",
+			     struct alchemy_alarm, name);
+
+#ifdef CONFIG_XENO_REGISTRY
+
+static int alarm_registry_open(struct fsobj *fsobj, void *priv)
+{
+	struct fsobstack *o = priv;
+	struct alchemy_alarm *acb;
+	struct itimerspec itmspec;
+	unsigned long expiries;
+	struct timespec delta;
+	int ret;
+
+	acb = container_of(fsobj, struct alchemy_alarm, fsobj);
+	ret = timerobj_lock(&acb->tmobj);
+	if (ret)
+		return ret;
+	itmspec = acb->itmspec;
+	expiries = acb->expiries;
+	timerobj_unlock(&acb->tmobj);
+
+	fsobstack_init(o);
+
+	fsobstack_grow_format(o, "%-12s%-12s%-12s\n",
+			      "[EXPIRIES]", "[DISTANCE]", "[INTERVAL]");
+	clockobj_get_distance(&alchemy_clock, &itmspec, &delta);
+	fsobstack_grow_format(o, "%8lu%10ld\"%ld%10ld\"%ld\n",
+			      expiries,
+			      delta.tv_sec,
+			      delta.tv_nsec / 100000000,
+			      itmspec.it_interval.tv_sec,
+			      itmspec.it_interval.tv_nsec / 100000000);
+
+	fsobstack_finish(o);
+
+	return 0;
+}
+
+static struct registry_operations registry_ops = {
+	.open		= alarm_registry_open,
+	.release	= fsobj_obstack_release,
+	.read		= fsobj_obstack_read
+};
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+static struct alchemy_alarm *get_alchemy_alarm(RT_ALARM *alarm, int *err_r)
+{
+	struct alchemy_alarm *acb;
+
+	if (bad_pointer(alarm))
+		goto bad_handle;
+
+	acb = (struct alchemy_alarm *)alarm->handle;
+	if (bad_pointer(acb) || timerobj_lock(&acb->tmobj))
+		goto bad_handle;
+
+	if (acb->magic == alarm_magic)
+		return acb;
+bad_handle:
+	*err_r = -EINVAL;
+
+	return NULL;
+}
+
+static inline void put_alchemy_alarm(struct alchemy_alarm *acb)
+{
+	timerobj_unlock(&acb->tmobj);
+}
+
+static void alarm_handler(struct timerobj *tmobj)
+{
+	struct alchemy_alarm *acb;
+
+	acb = container_of(tmobj, struct alchemy_alarm, tmobj);
+	acb->expiries++;
+	acb->handler(acb->arg);
+}
+
+/**
+ * @fn int rt_alarm_create(RT_ALARM *alarm,const char *name,void (*handler)(void *arg),void *arg)
+ * @brief Create an alarm object.
+ *
+ * This routine creates an object triggering an alarm routine at a
+ * specified time in the future. Alarms can be periodic or oneshot,
+ * depending on the reload interval value passed to rt_alarm_start().
+ *
+ * @param alarm The address of an alarm descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * alarm. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created alarm into the object registry.
+ *
+ * @param handler The address of the routine to call when the alarm
+ * expires. This routine is passed the @a arg value.
+ *
+ * @param arg A user-defined opaque argument passed to the @a handler.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * local pool in order to create the alarm.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered alarm.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ *
+ * @note Alarms are process-private objects and thus cannot be shared
+ * by multiple processes, even if they belong to the same Xenomai
+ * session.
+ */
+#ifndef DOXYGEN_CPP
+CURRENT_IMPL(int, rt_alarm_create, (RT_ALARM *alarm, const char *name,
+				    void (*handler)(void *arg),
+				    void *arg))
+#else
+int rt_alarm_create(RT_ALARM *alarm, const char *name,
+		    void (*handler)(void *arg),
+		    void *arg)
+#endif
+{
+	struct alchemy_alarm *acb;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	acb = pvmalloc(sizeof(*acb));
+	if (acb == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	ret = timerobj_init(&acb->tmobj);
+	if (ret)
+		goto fail;
+
+	generate_name(acb->name, name, &alarm_namegen);
+	acb->handler = handler;
+	acb->arg = arg;
+	acb->expiries = 0;
+	memset(&acb->itmspec, 0, sizeof(acb->itmspec));
+	acb->magic = alarm_magic;
+
+	registry_init_file_obstack(&acb->fsobj, &registry_ops);
+	ret = __bt(registry_add_file(&acb->fsobj, O_RDONLY,
+				     "/alchemy/alarms/%s", acb->name));
+	if (ret)
+		warning("failed to export alarm %s to registry, %s",
+			acb->name, symerror(ret));
+
+	if (pvcluster_addobj(&alchemy_alarm_table, acb->name, &acb->cobj)) {
+		registry_destroy_file(&acb->fsobj);
+		timerobj_destroy(&acb->tmobj);
+		ret = -EEXIST;
+		goto fail;
+	}
+
+	alarm->handle = (uintptr_t)acb;
+
+	CANCEL_RESTORE(svc);
+
+	return 0;
+fail:
+	pvfree(acb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_alarm_delete(RT_ALARM *alarm)
+ * @brief Delete an alarm.
+ *
+ * This routine deletes an alarm object previously created by a call
+ * to rt_alarm_create().
+ *
+ * @param alarm The alarm descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a alarm is not a valid alarm descriptor.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ */
+#ifndef DOXYGEN_CPP
+CURRENT_IMPL(int, rt_alarm_delete, (RT_ALARM *alarm))
+#else
+int rt_alarm_delete(RT_ALARM *alarm)
+#endif
+{
+	struct alchemy_alarm *acb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	acb = get_alchemy_alarm(alarm, &ret);
+	if (acb == NULL)
+		goto out;
+
+	timerobj_destroy(&acb->tmobj);
+	pvcluster_delobj(&alchemy_alarm_table, &acb->cobj);
+	acb->magic = ~alarm_magic;
+	registry_destroy_file(&acb->fsobj);
+	pvfree(acb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * Start an alarm.
+ *
+ * This routine programs the trigger date of an alarm object. An alarm
+ * can be either periodic or oneshot, depending on the @a interval
+ * value.
+ *
+ * Alarm handlers are always called on behalf of Xenomai's internal
+ * timer event routine. Therefore, Xenomai routines which can be
+ * called from such handlers are restricted to the set of services
+ * available on behalf of an asynchronous context.
+ *
+ * This service overrides any previous setup of the expiry date and
+ * reload interval for the alarm.
+ *
+ * @param alarm The alarm descriptor.
+ *
+ * @param value The relative date of the first expiry, expressed in
+ * clock ticks (see note).
+ *
+ * @param interval The reload value of the alarm. It is a periodic
+ * interval value to be used for reprogramming the next alarm shot,
+ * expressed in clock ticks (see note). If @a interval is equal to
+ * TM_INFINITE, the alarm will not be reloaded after it has expired.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a alarm is not a valid alarm descriptor.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * @apitags{xthread-only, switch-primary}
+ *
+ * @note Each of the initial @a value and @a interval is interpreted
+ * as a multiple of the Alchemy clock resolution (see
+ * --alchemy-clock-resolution option, defaults to 1 nanosecond).
+ */
+int rt_alarm_start(RT_ALARM *alarm, RTIME value, RTIME interval)
+{
+	struct alchemy_alarm *acb;
+	struct itimerspec it;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	acb = get_alchemy_alarm(alarm, &ret);
+	if (acb == NULL)
+		goto out;
+
+	clockobj_ticks_to_timeout(&alchemy_clock, value, &it.it_value);
+	clockobj_ticks_to_timespec(&alchemy_clock, interval, &it.it_interval);
+	acb->itmspec = it;
+	ret = timerobj_start(&acb->tmobj, alarm_handler, &it);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_alarm_stop(RT_ALARM *alarm)
+ * @brief Stop an alarm.
+ *
+ * This routine disables an alarm object, preventing any further
+ * expiry until it is re-enabled via rt_alarm_start().
+ *
+ * @param alarm The alarm descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a alarm is not a valid alarm descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_alarm_stop(RT_ALARM *alarm)
+{
+	struct alchemy_alarm *acb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	acb = get_alchemy_alarm(alarm, &ret);
+	if (acb == NULL)
+		goto out;
+
+	memset(&acb->itmspec, 0, sizeof(acb->itmspec));
+	ret = timerobj_stop(&acb->tmobj);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_alarm_inquire(RT_ALARM *alarm, RT_ALARM_INFO *info)
+ * @brief Query alarm status.
+ *
+ * This routine returns the status information about the specified @a
+ * alarm.
+ *
+ * @param alarm The alarm descriptor.
+ *
+ * @param info A pointer to the @ref RT_ALARM_INFO "return
+ * buffer" to copy the information to.
+ *
+ * @return Zero is returned and status information is written to the
+ * structure pointed at by @a info upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a alarm is not a valid alarm descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_alarm_inquire(RT_ALARM *alarm, RT_ALARM_INFO *info)
+{
+	struct alchemy_alarm *acb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	acb = get_alchemy_alarm(alarm, &ret);
+	if (acb == NULL)
+		goto out;
+
+	strcpy(info->name, acb->name);
+	info->expiries = acb->expiries;
+	info->active = !(alchemy_poll_mode(&acb->itmspec.it_value) &&
+			 alchemy_poll_mode(&acb->itmspec.it_interval));
+
+	put_alchemy_alarm(acb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/alarm.h b/kernel/xenomai-v3.2.4/lib/alchemy/alarm.h
new file mode 100644
index 0000000..3fa4296
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/alarm.h
@@ -0,0 +1,43 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_ALARM_H
+#define _ALCHEMY_ALARM_H
+
+#include <copperplate/registry-obstack.h>
+#include <copperplate/timerobj.h>
+#include <copperplate/cluster.h>
+#include <alchemy/alarm.h>
+
+#define alarm_magic	0x8888ebeb
+
+struct alchemy_alarm {
+	unsigned int magic;	/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	struct timerobj tmobj;
+	struct pvclusterobj cobj;
+	void (*handler)(void *arg);
+	void *arg;
+	struct itimerspec itmspec;
+	unsigned long expiries;
+	struct fsobj fsobj;
+};
+
+extern struct pvcluster alchemy_alarm_table;
+
+#endif /* _ALCHEMY_ALARM_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/buffer.c b/kernel/xenomai-v3.2.4/lib/alchemy/buffer.c
new file mode 100644
index 0000000..1fa281f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/buffer.c
@@ -0,0 +1,953 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <string.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include "reference.h"
+#include "internal.h"
+#include "buffer.h"
+#include "timer.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_buffer Buffer services
+ *
+ * Lightweight FIFO IPC mechanism
+ *
+ * A buffer is a lightweight IPC mechanism, implementing a fast,
+ * one-way producer-consumer data path. All messages written are
+ * buffered in a single memory area in strict FIFO order, until read
+ * either in blocking or non-blocking mode.
+ *
+ * Message are always atomically handled on the write side (i.e. no
+ * interleave, no short writes), whilst only complete messages are
+ * normally returned to the read side. However, short reads may happen
+ * under a well-defined situation (see note in rt_buffer_read()),
+ * albeit they can be fully avoided by proper use of the buffer.
+ *
+ * @{
+ */
+struct syncluster alchemy_buffer_table;
+
+static DEFINE_NAME_GENERATOR(buffer_namegen, "buffer",
+			     struct alchemy_buffer, name);
+
+DEFINE_SYNC_LOOKUP(buffer, RT_BUFFER);
+
+#ifdef CONFIG_XENO_REGISTRY
+
+static inline
+void prepare_waiter_cache(struct obstack *cache, int item_count)
+{
+	const struct alchemy_buffer *bcb;
+	obstack_blank(cache, item_count * sizeof(bcb->name));
+}
+
+static int prepare_grant_cache(struct fsobstack *o,
+			       struct obstack *cache, int item_count)
+{
+	fsobstack_grow_format(o, "--\n[INPUT-WAIT]\n");
+	prepare_waiter_cache(cache, item_count);
+
+	return 0;
+}
+
+static int prepare_drain_cache(struct fsobstack *o,
+			       struct obstack *cache, int item_count)
+{
+	fsobstack_grow_format(o, "--\n[OUTPUT-WAIT]\n");
+	prepare_waiter_cache(cache, item_count);
+
+	return 0;
+}
+
+static size_t collect_waiter_data(void *p, struct threadobj *thobj)
+{
+	const char *name = threadobj_get_name(thobj);
+	int len = strlen(name);
+
+	strcpy(p, name);
+	*(char *)(p + len) = '\n';
+
+	return len + 1;
+}
+
+static struct fsobstack_syncops fill_grant_ops = {
+	.prepare_cache = prepare_grant_cache,
+	.collect_data = collect_waiter_data,
+};
+
+static struct fsobstack_syncops fill_drain_ops = {
+	.prepare_cache = prepare_drain_cache,
+	.collect_data = collect_waiter_data,
+};
+
+static int buffer_registry_open(struct fsobj *fsobj, void *priv)
+{
+	struct fsobstack *o = priv;
+	struct alchemy_buffer *bcb;
+	struct syncstate syns;
+	size_t bufsz, fillsz;
+	int ret, mode;
+
+	bcb = container_of(fsobj, struct alchemy_buffer, fsobj);
+
+	ret = syncobj_lock(&bcb->sobj, &syns);
+	if (ret)
+		return -EIO;
+
+	bufsz = bcb->bufsz;
+	fillsz = bcb->fillsz;
+	mode = bcb->mode;
+
+	syncobj_unlock(&bcb->sobj, &syns);
+
+	fsobstack_init(o);
+
+	fsobstack_grow_format(o, "%6s  %10s  %9s\n",
+			      "[TYPE]", "[TOTALMEM]", "[USEDMEM]");
+
+	fsobstack_grow_format(o, " %s   %9Zu  %9Zu\n",
+			      mode & B_PRIO ? "PRIO" : "FIFO",
+			      bufsz, fillsz);
+
+	fsobstack_grow_syncobj_grant(o, &bcb->sobj, &fill_grant_ops);
+	fsobstack_grow_syncobj_drain(o, &bcb->sobj, &fill_drain_ops);
+
+	fsobstack_finish(o);
+
+	return 0;
+}
+
+static struct registry_operations registry_ops = {
+	.open		= buffer_registry_open,
+	.release	= fsobj_obstack_release,
+	.read		= fsobj_obstack_read
+};
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+static void buffer_finalize(struct syncobj *sobj)
+{
+	struct alchemy_buffer *bcb;
+
+	bcb = container_of(sobj, struct alchemy_buffer, sobj);
+	registry_destroy_file(&bcb->fsobj);
+	xnfree(__mptr(bcb->buf));
+	xnfree(bcb);
+}
+fnref_register(libalchemy, buffer_finalize);
+
+/**
+ * @fn int rt_buffer_create(RT_BUFFER *bf, const char *name, size_t bufsz, int mode)
+ * @brief Create an IPC buffer.
+ *
+ * This routine creates an IPC object that allows tasks to send and
+ * receive data asynchronously via a memory buffer. Data may be of an
+ * arbitrary length, albeit this IPC is best suited for small to
+ * medium-sized messages, since data always have to be copied to the
+ * buffer during transit. Large messages may be more efficiently
+ * handled by message queues (RT_QUEUE).
+ *
+ * @param bf The address of a buffer descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * buffer. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created buffer into the object registry.
+ *
+ * @param bufsz The size of the buffer space available to hold
+ * data. The required memory is obtained from the main heap.
+ *
+ * @param mode The buffer creation mode. The following flags can be
+ * OR'ed into this bitmask, each of them affecting the new buffer:
+ *
+ * - B_FIFO makes tasks pend in FIFO order for reading data from the
+ *   buffer.
+ *
+ * - B_PRIO makes tasks pend in priority order for reading data from
+ *   the buffer.
+ *
+ * This parameter also applies to tasks blocked on the buffer's write
+ * side (see rt_buffer_write()).
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a mode is invalid or @a bufsz is zero.
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the buffer.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered buffer.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt or non-Xenomai thread.
+ *
+ * @apitags{xthread-only, mode-unrestricted, switch-secondary}
+ *
+ * @note Buffers can be shared by multiple processes which belong to
+ * the same Xenomai session.
+ */
+int rt_buffer_create(RT_BUFFER *bf, const char *name,
+		     size_t bufsz, int mode)
+{
+	struct alchemy_buffer *bcb;
+	struct service svc;
+	int sobj_flags = 0;
+	void *buf;
+	int ret;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	if (bufsz == 0 || (mode & ~B_PRIO) != 0)
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	bcb = xnmalloc(sizeof(*bcb));
+	if (bcb == NULL) {
+		ret = __bt(-ENOMEM);
+		goto fail;
+	}
+
+	buf = xnmalloc(bufsz);
+	if (buf == NULL) {
+		ret = __bt(-ENOMEM);
+		goto fail_bufalloc;
+	}
+
+	bcb->buf = __moff(buf);
+	generate_name(bcb->name, name, &buffer_namegen);
+	bcb->mode = mode;
+	bcb->bufsz = bufsz;
+	bcb->rdoff = 0;
+	bcb->wroff = 0;
+	bcb->fillsz = 0;
+	if (mode & B_PRIO)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	ret = syncobj_init(&bcb->sobj, CLOCK_COPPERPLATE, sobj_flags,
+			   fnref_put(libalchemy, buffer_finalize));
+	if (ret)
+		goto fail_syncinit;
+
+	bcb->magic = buffer_magic;
+
+	registry_init_file_obstack(&bcb->fsobj, &registry_ops);
+	ret = __bt(registry_add_file(&bcb->fsobj, O_RDONLY,
+				     "/alchemy/buffers/%s", bcb->name));
+	if (ret)
+		warning("failed to export buffer %s to registry, %s",
+			bcb->name, symerror(ret));
+
+	ret = syncluster_addobj(&alchemy_buffer_table, bcb->name, &bcb->cobj);
+	if (ret)
+		goto fail_register;
+
+	bf->handle = mainheap_ref(bcb, uintptr_t);
+
+	CANCEL_RESTORE(svc);
+
+	return 0;
+
+fail_register:
+	registry_destroy_file(&bcb->fsobj);
+	syncobj_uninit(&bcb->sobj);
+fail_syncinit:
+	xnfree(buf);
+fail_bufalloc:
+	xnfree(bcb);
+fail:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_buffer_delete(RT_BUFFER *bf)
+ * @brief Delete an IPC buffer.
+ *
+ * This routine deletes a buffer object previously created by a call
+ * to rt_buffer_create().
+ *
+ * @param bf The buffer descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a bf is not a valid buffer descriptor.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ */
+int rt_buffer_delete(RT_BUFFER *bf)
+{
+	struct alchemy_buffer *bcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	bcb = get_alchemy_buffer(bf, &syns, &ret);
+	if (bcb == NULL)
+		goto out;
+
+	syncluster_delobj(&alchemy_buffer_table, &bcb->cobj);
+	bcb->magic = ~buffer_magic;
+	syncobj_destroy(&bcb->sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn ssize_t rt_buffer_read(RT_BUFFER *bf, void *ptr, size_t len, RTIME timeout)
+ * @brief Read from an IPC buffer (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_buffer_read_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param bf The buffer descriptor.
+ *
+ * @param ptr A pointer to a memory area which will be written upon
+ * success with the received data.
+ *
+ * @param len The length in bytes of the memory area pointed to by @a
+ * ptr.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE causes the caller to block indefinitely until enough
+ * data is available. Passing TM_NONBLOCK causes the service
+ * to return immediately without blocking in case not enough data is
+ * available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_buffer_read_until(RT_BUFFER *bf, void *ptr, size_t len, RTIME abs_timeout)
+ * @brief Read from an IPC buffer (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_buffer_read_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param bf The buffer descriptor.
+ *
+ * @param ptr A pointer to a memory area which will be written upon
+ * success with the received data.
+ *
+ * @param len The length in bytes of the memory area pointed to by @a
+ * ptr.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * enough data is available. Passing TM_NONBLOCK causes the service
+ * to return immediately without blocking in case not enough data is
+ * available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_buffer_read_timed(RT_BUFFER *bf, void *ptr, size_t len, const struct timespec *abs_timeout)
+ * @brief Read from an IPC buffer.
+ *
+ * This routine reads the next message from the specified buffer. If
+ * no message is available on entry, the caller is allowed to block
+ * until enough data is written to the buffer, or a timeout elapses.
+ *
+ * @param bf The buffer descriptor.
+ *
+ * @param ptr A pointer to a memory area which will be written upon
+ * success with the received data.
+ *
+ * @param len The length in bytes of the memory area pointed to by @a
+ * ptr. Under normal circumstances, rt_buffer_read_timed() only
+ * returns entire messages as specified by the @a len argument, or an
+ * error value. However, short reads are allowed when a potential
+ * deadlock situation is detected (see note below).
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for a
+ * message to be available from the buffer. Passing NULL causes the caller
+ * to block indefinitely until enough data is available. Passing
+ * { .tv_sec = 0, .tv_nsec = 0 } causes the service to return immediately
+ * without blocking in case not enough data is available.
+ *
+ * @return The number of bytes read from the buffer is returned upon
+ * success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned if @a abs_timeout is reached before a
+ * complete message arrives.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } and not enough data is immediately available on
+ * entry to form a complete message.
+
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before enough data became available to form a complete
+ * message.
+ *
+ * - -EINVAL is returned if @a bf is not a valid buffer descriptor, or
+ * @a len is greater than the actual buffer length.
+ *
+ * - -EIDRM is returned if @a bf is deleted while the caller was
+ * waiting for data. In such event, @a bf is no more valid upon return
+ * of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @note A short read (i.e. fewer bytes returned than requested by @a
+ * len) may happen whenever a pathological use of the buffer is
+ * encountered. This condition only arises when the system detects
+ * that one or more writers are waiting for sending data, while a
+ * reader would have to wait for receiving a complete message at the
+ * same time. For instance, consider the following sequence, involving
+ * a 1024-byte buffer (bf) and two threads:
+ *
+ * writer thread > rt_write_buffer(&bf, ptr, 1, TM_INFINITE);
+ *        (one byte to read, 1023 bytes available for sending)
+ * writer thread > rt_write_buffer(&bf, ptr, 1024, TM_INFINITE);
+ *        (writer blocks - no space for another 1024-byte message)
+ * reader thread > rt_read_buffer(&bf, ptr, 1024, TM_INFINITE);
+ *        (short read - a truncated (1-byte) message is returned)
+ *
+ * In order to prevent both threads to wait for each other
+ * indefinitely, a short read is allowed, which may be completed by a
+ * subsequent call to rt_buffer_read() or rt_buffer_read_until().  If
+ * that case arises, thread priorities, buffer and/or message lengths
+ * should likely be fixed, in order to eliminate such condition.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+ssize_t rt_buffer_read_timed(RT_BUFFER *bf,
+			     void *ptr, size_t size,
+			     const struct timespec *abs_timeout)
+{
+	struct alchemy_buffer_wait *wait = NULL;
+	struct alchemy_buffer *bcb;
+	struct threadobj *thobj;
+	size_t len, rbytes, n;
+	struct syncstate syns;
+	struct service svc;
+	size_t rdoff;
+	int ret = 0;
+	void *p;
+
+	len = size;
+	if (len == 0)
+		return 0;
+
+	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	bcb = get_alchemy_buffer(bf, &syns, &ret);
+	if (bcb == NULL)
+		goto out;
+
+	/*
+	 * We may only return complete messages to readers, so there
+	 * is no point in waiting for messages which are larger than
+	 * what the buffer can hold.
+	 */
+	if (len > bcb->bufsz) {
+		ret = -EINVAL;
+		goto done;
+	}
+redo:
+	for (;;) {
+		/*
+		 * We should be able to read a complete message of the
+		 * requested length, or block.
+		 */
+		if (bcb->fillsz < len)
+			goto wait;
+
+		/* Read from the buffer in a circular way. */
+		rdoff = bcb->rdoff;
+		rbytes = len;
+		p = ptr;
+
+		do {
+			if (rdoff + rbytes > bcb->bufsz)
+				n = bcb->bufsz - rdoff;
+			else
+				n = rbytes;
+			memcpy(p, __mptr(bcb->buf) + rdoff, n);
+			p += n;
+			rdoff = (rdoff + n) % bcb->bufsz;
+			rbytes -= n;
+		} while (rbytes > 0);
+
+		bcb->fillsz -= len;
+		bcb->rdoff = rdoff;
+		ret = (ssize_t)len;
+
+		/*
+		 * Wake up all threads waiting for the buffer to
+		 * drain, if we freed enough room for the leading one
+		 * to post its message.
+		 */
+		thobj = syncobj_peek_drain(&bcb->sobj);
+		if (thobj == NULL)
+			goto done;
+
+		wait = threadobj_get_wait(thobj);
+		if (wait->size + bcb->fillsz <= bcb->bufsz)
+			syncobj_drain(&bcb->sobj);
+
+		goto done;
+	wait:
+		if (alchemy_poll_mode(abs_timeout)) {
+			ret = -EWOULDBLOCK;
+			goto done;
+		}
+
+		/*
+		 * Check whether writers are already waiting for
+		 * sending data, while we are about to wait for
+		 * receiving some. In such a case, we have a
+		 * pathological use of the buffer. We must allow for a
+		 * short read to prevent a deadlock.
+		 */
+		if (bcb->fillsz > 0 && syncobj_count_drain(&bcb->sobj)) {
+			len = bcb->fillsz;
+			goto redo;
+		}
+
+		if (wait == NULL)
+			wait = threadobj_prepare_wait(struct alchemy_buffer_wait);
+
+		wait->size = len;
+
+		ret = syncobj_wait_grant(&bcb->sobj, abs_timeout, &syns);
+		if (ret) {
+			if (ret == -EIDRM)
+				goto out;
+			break;
+		}
+	}
+done:
+	put_alchemy_buffer(bcb, &syns);
+out:
+	if (wait)
+		threadobj_finish_wait();
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn ssize_t rt_buffer_write(RT_BUFFER *bf, const void *ptr, size_t len, RTIME timeout)
+ * @brief Write to an IPC buffer (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_buffer_write_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param bf The buffer descriptor.
+ *
+ * @param ptr The address of the message data to be written to the
+ * buffer.
+ *
+ * @param len The length in bytes of the message data.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE causes the caller to block indefinitely until enough
+ * buffer space is available. Passing TM_NONBLOCK causes the service
+ * to return immediately without blocking in case of buffer space
+ * shortage.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_buffer_write_until(RT_BUFFER *bf, const void *ptr, size_t len, RTIME abs_timeout)
+ * @brief Write to an IPC buffer (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_buffer_write_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param bf The buffer descriptor.
+ *
+ * @param ptr The address of the message data to be written to the
+ * buffer.
+ *
+ * @param len The length in bytes of the message data.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * enough buffer space is available. Passing TM_NONBLOCK causes the
+ * service to return immediately without blocking in case of buffer
+ * space shortage.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_buffer_write_timed(RT_BUFFER *bf, const void *ptr, size_t len, const struct timespec *abs_timeout)
+ * @brief Write to an IPC buffer.
+ *
+ * This routine writes a message to the specified buffer. If not
+ * enough buffer space is available on entry to hold the message, the
+ * caller is allowed to block until enough room is freed, or a timeout
+ * elapses, whichever comes first.
+ *
+ * @param bf The buffer descriptor.
+ *
+ * @param ptr The address of the message data to be written to the
+ * buffer.
+ *
+ * @param len The length in bytes of the message data. Zero is a valid
+ * value, in which case the buffer is left untouched, and zero is
+ * returned to the caller.
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for enough
+ * buffer space to be available to hold the message. Passing NULL causes
+ * the caller to block indefinitely until enough buffer space is
+ * available. Passing { .tv_sec = 0, .tv_nsec = 0 } causes the service
+ * to return immediately without blocking in case of buffer space
+ * shortage.
+ *
+ * @return The number of bytes written to the buffer is returned upon
+ * success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned if the absolute @a abs_timeout date is
+ * reached before enough buffer space is available to hold the
+ * message.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } and no buffer space is immediately available on
+ * entry to hold the message.
+
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before enough buffer space became available to hold
+ * the message.
+ *
+ * - -EINVAL is returned if @a bf is not a valid buffer descriptor, or
+ * @a len is greater than the actual buffer length.
+ *
+ * - -EIDRM is returned if @a bf is deleted while the caller was
+ * waiting for buffer space. In such event, @a bf is no more valid
+ * upon return of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+ssize_t rt_buffer_write_timed(RT_BUFFER *bf,
+			      const void *ptr, size_t size,
+			      const struct timespec *abs_timeout)
+{
+	struct alchemy_buffer_wait *wait = NULL;
+	struct alchemy_buffer *bcb;
+	struct threadobj *thobj;
+	size_t len, rbytes, n;
+	struct syncstate syns;
+	struct service svc;
+	const void *p;
+	size_t wroff;
+	int ret = 0;
+
+	len = size;
+	if (len == 0)
+		return 0;
+
+	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	bcb = get_alchemy_buffer(bf, &syns, &ret);
+	if (bcb == NULL)
+		goto out;
+
+	/*
+	 * We may only send complete messages, so there is no point in
+	 * accepting messages which are larger than what the buffer
+	 * can hold.
+	 */
+	if (len > bcb->bufsz) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	for (;;) {
+		/*
+		 * We should be able to write the entire message at
+		 * once, or block.
+		 */
+		if (bcb->fillsz + len > bcb->bufsz)
+			goto wait;
+
+		/* Write to the buffer in a circular way. */
+		wroff = bcb->wroff;
+		rbytes = len;
+		p = ptr;
+
+		do {
+			if (wroff + rbytes > bcb->bufsz)
+				n = bcb->bufsz - wroff;
+			else
+				n = rbytes;
+
+			memcpy(__mptr(bcb->buf) + wroff, p, n);
+			p += n;
+			wroff = (wroff + n) % bcb->bufsz;
+			rbytes -= n;
+		} while (rbytes > 0);
+
+		bcb->fillsz += len;
+		bcb->wroff = wroff;
+		ret = (ssize_t)len;
+
+		/*
+		 * Wake up all threads waiting for input, if we
+		 * accumulated enough data to feed the leading one.
+		 */
+		thobj = syncobj_peek_grant(&bcb->sobj);
+		if (thobj == NULL)
+			goto done;
+
+		wait = threadobj_get_wait(thobj);
+		if (wait->size <= bcb->fillsz)
+			syncobj_grant_all(&bcb->sobj);
+
+		goto done;
+	wait:
+		if (alchemy_poll_mode(abs_timeout)) {
+			ret = -EWOULDBLOCK;
+			goto done;
+		}
+
+		if (wait == NULL)
+			wait = threadobj_prepare_wait(struct alchemy_buffer_wait);
+
+		wait->size = len;
+
+		/*
+		 * Check whether readers are already waiting for
+		 * receiving data, while we are about to wait for
+		 * sending some. In such a case, we have the converse
+		 * pathological use of the buffer. We must kick
+		 * readers to allow for a short read to prevent a
+		 * deadlock.
+		 *
+		 * XXX: instead of broadcasting a general wake up
+		 * event, we could be smarter and wake up only the
+		 * number of waiters required to consume the amount of
+		 * data we want to send, but this does not seem worth
+		 * the burden: this is an error condition, we just
+		 * have to mitigate its effect, avoiding a deadlock.
+		 */
+		if (bcb->fillsz > 0 && syncobj_count_grant(&bcb->sobj))
+			syncobj_grant_all(&bcb->sobj);
+
+		ret = syncobj_wait_drain(&bcb->sobj, abs_timeout, &syns);
+		if (ret) {
+			if (ret == -EIDRM)
+				goto out;
+			break;
+		}
+	}
+done:
+	put_alchemy_buffer(bcb, &syns);
+out:
+	if (wait)
+		threadobj_finish_wait();
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_buffer_clear(RT_BUFFER *bf)
+ * @brief Clear an IPC buffer.
+ *
+ * This routine empties a buffer from any data.
+ *
+ * @param bf The buffer descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a bf is not a valid buffer descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_buffer_clear(RT_BUFFER *bf)
+{
+	struct alchemy_buffer *bcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	bcb = get_alchemy_buffer(bf, &syns, &ret);
+	if (bcb == NULL)
+		goto out;
+
+	bcb->wroff = 0;
+	bcb->rdoff = 0;
+	bcb->fillsz = 0;
+	syncobj_drain(&bcb->sobj);
+
+	put_alchemy_buffer(bcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_buffer_inquire(RT_BUFFER *bf, RT_BUFFER_INFO *info)
+ * @brief Query buffer status.
+ *
+ * This routine returns the status information about the specified
+ * buffer.
+ *
+ * @param bf The buffer descriptor.
+ *
+ * @param info A pointer to the @ref RT_BUFFER_INFO "return
+ * buffer" to copy the information to.
+ *
+ * @return Zero is returned and status information is written to the
+ * structure pointed at by @a info upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a bf is not a valid buffer descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_buffer_inquire(RT_BUFFER *bf, RT_BUFFER_INFO *info)
+{
+	struct alchemy_buffer *bcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	bcb = get_alchemy_buffer(bf, &syns, &ret);
+	if (bcb == NULL)
+		goto out;
+
+	info->iwaiters = syncobj_count_grant(&bcb->sobj);
+	info->owaiters = syncobj_count_drain(&bcb->sobj);
+	info->totalmem = bcb->bufsz;
+	info->availmem = bcb->bufsz - bcb->fillsz;
+	strcpy(info->name, bcb->name);
+
+	put_alchemy_buffer(bcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_buffer_bind(RT_BUFFER *bf, const char *name, RTIME timeout)
+ * @brief Bind to an IPC buffer.
+ *
+ * This routine creates a new descriptor to refer to an existing IPC
+ * buffer identified by its symbolic name. If the object does not
+ * exist on entry, the caller may block until a buffer of the given
+ * name is created.
+ *
+ * @param bf The address of a buffer descriptor filled in by the
+ * operation. Contents of this memory is undefined upon failure.
+ *
+ * @param name A valid NULL-terminated name which identifies the
+ * buffer to bind to. This string should match the object name
+ * argument passed to rt_buffer_create().
+ *
+ * @param timeout The number of clock ticks to wait for the
+ * registration to occur (see note). Passing TM_INFINITE causes the
+ * caller to block indefinitely until the object is
+ * registered. Passing TM_NONBLOCK causes the service to return
+ * immediately without waiting if the object is not registered on
+ * entry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
+ * and the searched object is not registered on entry.
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ *
+ * @note The @a timeout value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_buffer_bind(RT_BUFFER *bf,
+		   const char *name, RTIME timeout)
+{
+	return alchemy_bind_object(name,
+				   &alchemy_buffer_table,
+				   timeout,
+				   offsetof(struct alchemy_buffer, cobj),
+				   &bf->handle);
+}
+
+/**
+ * @fn int rt_buffer_unbind(RT_BUFFER *bf)
+ * @brief Unbind from an IPC buffer.
+ *
+ * @param bf The buffer descriptor.
+ *
+ * This routine releases a previous binding to an IPC buffer. After
+ * this call has returned, the descriptor is no more valid for
+ * referencing this object.
+ *
+ * @apitags{thread-unrestricted}
+ */
+int rt_buffer_unbind(RT_BUFFER *bf)
+{
+	bf->handle = 0;
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/buffer.h b/kernel/xenomai-v3.2.4/lib/alchemy/buffer.h
new file mode 100644
index 0000000..3c99d0c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/buffer.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_BUFFER_H
+#define _ALCHEMY_BUFFER_H
+
+#include <copperplate/registry-obstack.h>
+#include <copperplate/syncobj.h>
+#include <copperplate/cluster.h>
+#include <alchemy/buffer.h>
+
+struct alchemy_buffer {
+	unsigned int magic;	/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	struct syncobj sobj;
+	struct clusterobj cobj;
+	size_t bufsz;
+	int mode;
+	dref_type(void *) buf;
+	size_t rdoff;
+	size_t wroff;
+	size_t fillsz;
+	struct fsobj fsobj;
+};
+
+struct alchemy_buffer_wait {
+	size_t size;
+};
+
+#define buffer_magic	0x8989ebeb
+
+extern struct syncluster alchemy_buffer_table;
+
+#endif /* _ALCHEMY_BUFFER_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/cond.c b/kernel/xenomai-v3.2.4/lib/alchemy/cond.c
new file mode 100644
index 0000000..e4ad714
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/cond.c
@@ -0,0 +1,497 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <string.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include "internal.h"
+#include "cond.h"
+#include "timer.h"
+#include "mutex.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_cond Condition variable services
+ *
+ * POSIXish condition variable mechanism
+ *
+ * A condition variable is a synchronization mechanism which allows
+ * tasks to suspend execution until some predicate on some arbitrary
+ * shared data is satisfied.
+ *
+ * The basic operations on conditions are: signal the condition (when
+ * the predicate becomes true), and wait for the condition, blocking
+ * the task execution until another task signals the condition.  A
+ * condition variable must always be associated with a mutex, to avoid
+ * a well-known race condition where a task prepares to wait on a
+ * condition variable and another task signals the condition just
+ * before the first task actually waits on it.
+ *
+ * @{
+ */
+
+struct syncluster alchemy_cond_table;
+
+static DEFINE_NAME_GENERATOR(cond_namegen, "cond",
+			     struct alchemy_cond, name);
+
+DEFINE_LOOKUP_PRIVATE(cond, RT_COND);
+
+#ifdef CONFIG_XENO_REGISTRY
+
+static ssize_t cond_registry_read(struct fsobj *fsobj,
+				  char *buf, size_t size, off_t offset,
+				  void *priv)
+{
+	return 0;		/* FIXME */
+}
+
+static struct registry_operations registry_ops = {
+	.read	= cond_registry_read
+};
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+/**
+ * @fn int rt_cond_create(RT_COND *cond, const char *name)
+ * @brief Create a condition variable.
+ *
+ * Create a synchronization object which allows tasks to suspend
+ * execution until some predicate on shared data is satisfied.
+ *
+ * @param cond The address of a condition variable descriptor which
+ * can be later used to identify uniquely the created object, upon
+ * success of this call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * condition variable. When non-NULL and non-empty, a copy of this
+ * string is used for indexing the created condition variable into the
+ * object registry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the condition variable.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered condition variable.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt or non-Xenomai thread.
+ *
+ * @apitags{xthread-only, mode-unrestricted, switch-secondary}
+ *
+ * @note Condition variables can be shared by multiple processes which
+ * belong to the same Xenomai session.
+ *
+ * @attention If the underlying threading library does not support
+ * pthread_condattr_setclock(), timings with Alchemy condition
+ * variables will be based on CLOCK_REALTIME, and may therefore be
+ * affected by updates to the system date (e.g. NTP). This typically
+ * concerns legacy setups based on the linuxthreads library.
+ * In the normal case, timings are based on CLOCK_MONOTONIC.
+ */
+int rt_cond_create(RT_COND *cond, const char *name)
+{
+	struct alchemy_cond *ccb;
+	pthread_condattr_t cattr;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	ccb = xnmalloc(sizeof(*ccb));
+	if (ccb == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * XXX: Alchemy condvars are paired with Alchemy mutex
+	 * objects, so we must rely on POSIX condvars directly.
+	 */
+	generate_name(ccb->name, name, &cond_namegen);
+	pthread_condattr_init(&cattr);
+	pthread_condattr_setpshared(&cattr, mutex_scope_attribute);
+	/*
+	 * pthread_condattr_setclock() may return ENOSYS over Cobalt
+	 * if not actually implemented by the threading library, but
+	 * only by the compat placeholder. In such a case, timings
+	 * will be based on CLOCK_REALTIME, which is an accepted
+	 * restriction.
+	 */
+	pthread_condattr_setclock(&cattr, CLOCK_COPPERPLATE);
+	__RT(pthread_cond_init(&ccb->cond, &cattr));
+	pthread_condattr_destroy(&cattr);
+	ccb->magic = cond_magic;
+
+	registry_init_file(&ccb->fsobj, &registry_ops, 0);
+	ret = __bt(registry_add_file(&ccb->fsobj, O_RDONLY,
+				     "/alchemy/condvars/%s", ccb->name));
+	if (ret) {
+		warning("failed to export condvar %s to registry, %s",
+			ccb->name, symerror(ret));
+		ret = 0;
+	}
+
+	ret = syncluster_addobj(&alchemy_cond_table, ccb->name, &ccb->cobj);
+	if (ret) {
+		registry_destroy_file(&ccb->fsobj);
+		__RT(pthread_cond_destroy(&ccb->cond));
+		xnfree(ccb);
+	} else
+		cond->handle = mainheap_ref(ccb, uintptr_t);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_cond_delete(RT_COND *cond)
+ * @brief Delete a condition variable.
+ *
+ * This routine deletes a condition variable object previously created
+ * by a call to rt_cond_create().
+ *
+ * @param cond The condition variable descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a alarm is not a valid condition variable
+ * descriptor.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * - -EBUSY is returned upon an attempt to destroy the object
+ * referenced by @a cond while it is referenced (for example, while
+ * being used in a rt_cond_wait(), rt_cond_wait_timed() or
+ * rt_cond_wait_until() by another task).
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ */
+int rt_cond_delete(RT_COND *cond)
+{
+	struct alchemy_cond *ccb;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	ccb = find_alchemy_cond(cond, &ret);
+	if (ccb == NULL)
+		goto out;
+
+	ret = -__RT(pthread_cond_destroy(&ccb->cond));
+	if (ret)
+		goto out;
+
+	ccb->magic = ~cond_magic;
+	registry_destroy_file(&ccb->fsobj);
+	syncluster_delobj(&alchemy_cond_table, &ccb->cobj);
+	xnfree(ccb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_cond_signal(RT_COND *cond)
+ * @brief Signal a condition variable.
+ *
+ * If the condition variable @a cond is pended, this routine
+ * immediately unblocks the first waiting task (by queuing priority
+ * order).
+ *
+ * @param cond The condition variable descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a cond is not a valid condition variable
+ * descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_cond_signal(RT_COND *cond)
+{
+	struct alchemy_cond *ccb;
+	int ret = 0;
+
+	ccb = find_alchemy_cond(cond, &ret);
+	if (ccb == NULL)
+		return ret;
+
+	return -__RT(pthread_cond_signal(&ccb->cond));
+}
+
+/**
+ * @fn int rt_cond_broadcast(RT_COND *cond)
+ * @brief Broadcast a condition variable
+ *
+ * All tasks currently waiting on the condition variable are
+ * immediately unblocked.
+ *
+ * @param cond The condition variable descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a cond is not a valid condition variable
+ * descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_cond_broadcast(RT_COND *cond)
+{
+	struct alchemy_cond *ccb;
+	int ret = 0;
+
+	ccb = find_alchemy_cond(cond, &ret);
+	if (ccb == NULL)
+		return ret;
+
+	return -__RT(pthread_cond_broadcast(&ccb->cond));
+}
+
+/**
+ * @fn int rt_cond_wait(RT_COND *cond, RT_MUTEX *mutex, RTIME timeout)
+ * @brief Wait on a condition variable (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_cond_wait_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param cond The condition variable descriptor.
+ *
+ * @param mutex The address of the mutex serializing the access to the
+ * shared data.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE causes the caller to block indefinitely. Passing
+ * TM_NONBLOCK causes the caller to return immediately without block.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+
+/**
+ * @fn int rt_cond_wait_until(RT_COND *cond, RT_MUTEX *mutex, RTIME abs_timeout)
+ * @brief Wait on a condition variable (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_cond_wait_timed() accepting an
+ * abs_timeout specification expressed as a scalar value.
+ *
+ * @param cond The condition variable descriptor.
+ *
+ * @param mutex The address of the mutex serializing the access to the
+ * shared data.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely.
+ * Passing TM_NONBLOCK causes the caller to return immediately
+ * without block.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+
+/**
+ * @fn int rt_cond_wait_timed(RT_COND *cond, RT_MUTEX *mutex, const struct timespec *abs_timeout)
+ * @brief Wait on a condition variable.
+ *
+ * This service atomically releases the mutex and blocks the calling
+ * task, until the condition variable @a cond is signaled or a timeout
+ * occurs, whichever comes first. The mutex is re-acquired before
+ * returning from this service.
+ *
+ * @param cond The condition variable descriptor.
+ *
+ * @param mutex The address of the mutex serializing the access to the
+ * shared data.
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for the
+ * condition variable to be signaled. Passing NULL causes the caller to
+ * block indefinitely. Passing { .tv_sec = 0, .tv_nsec = 0 } causes the
+ * caller to return immediately without block.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned if @a abs_timeout is reached before the
+ * condition variable is signaled.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } .
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task.
+ *
+ * - -EINVAL is returned if @a cond is not a valid condition variable
+ * descriptor.
+ *
+ * - -EIDRM is returned if @a cond is deleted while the caller was
+ * waiting on the condition variable. In such event, @a cond is no
+ * more valid upon return of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+int rt_cond_wait_timed(RT_COND *cond, RT_MUTEX *mutex,
+		       const struct timespec *abs_timeout)
+{
+	struct alchemy_mutex *mcb;
+	struct alchemy_cond *ccb;
+	int ret = 0;
+
+	if (alchemy_poll_mode(abs_timeout))
+		return -EWOULDBLOCK;
+
+	ccb = find_alchemy_cond(cond, &ret);
+	if (ccb == NULL)
+		return ret;
+
+	mcb = find_alchemy_mutex(mutex, &ret);
+	if (mcb == NULL)
+		return ret;
+
+	if (abs_timeout)
+		ret = -__RT(pthread_cond_timedwait(&ccb->cond,
+						   &mcb->lock, abs_timeout));
+	else
+		ret = -__RT(pthread_cond_wait(&ccb->cond, &mcb->lock));
+
+	return ret;
+}
+
+/**
+ * @fn int rt_cond_inquire(RT_COND *cond, RT_COND_INFO *info)
+ * @brief Query condition variable status.
+ *
+ * This routine returns the status information about the specified
+ * condition variable.
+ *
+ * @param cond The condition variable descriptor.
+ *
+ * @param info A pointer to the @ref RT_COND_INFO "return
+ * buffer" to copy the information to.
+ *
+ * @return Zero is returned and status information is written to the
+ * structure pointed at by @a info upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a cond is not a valid condition variable
+ * descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_cond_inquire(RT_COND *cond, RT_COND_INFO *info)
+{
+	struct alchemy_cond *ccb;
+	int ret = 0;
+
+	ccb = find_alchemy_cond(cond, &ret);
+	if (ccb == NULL)
+		return ret;
+
+	strcpy(info->name, ccb->name);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_cond_bind(RT_COND *cond, const char *name, RTIME timeout)
+ * @brief Bind to a condition variable.
+ *
+ * This routine creates a new descriptor to refer to an existing
+ * condition variable identified by its symbolic name. If the object
+ * not exist on entry, the caller may block until a condition variable
+ * of the given name is created.
+ *
+ * @param cond The address of a condition variable descriptor filled
+ * in by the operation. Contents of this memory is undefined upon
+ * failure.
+ *
+ * @param name A valid NULL-terminated name which identifies the
+ * condition variable to bind to. This string should match the object
+ * name argument passed to rt_cond_create().
+ *
+ * @param timeout The number of clock ticks to wait for the
+ * registration to occur (see note). Passing TM_INFINITE causes the
+ * caller to block indefinitely until the object is
+ * registered. Passing TM_NONBLOCK causes the service to return
+ * immediately without waiting if the object is not registered on
+ * entry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
+ * and the searched object is not registered on entry.
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ *
+ * @note The @a timeout value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_cond_bind(RT_COND *cond,
+		 const char *name, RTIME timeout)
+{
+	return alchemy_bind_object(name,
+				   &alchemy_cond_table,
+				   timeout,
+				   offsetof(struct alchemy_cond, cobj),
+				   &cond->handle);
+}
+
+/**
+ * @fn int rt_cond_unbind(RT_COND *cond)
+ * @brief Unbind from a condition variable.
+ *
+ * @param cond The condition variable descriptor.
+ *
+ * This routine releases a previous binding to a condition
+ * variable. After this call has returned, the descriptor is no more
+ * valid for referencing this object.
+ *
+ * @apitags{thread-unrestricted}
+ */
+int rt_cond_unbind(RT_COND *cond)
+{
+	cond->handle = 0;
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/cond.h b/kernel/xenomai-v3.2.4/lib/alchemy/cond.h
new file mode 100644
index 0000000..a807636
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/cond.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_COND_H
+#define _ALCHEMY_COND_H
+
+#include <copperplate/registry.h>
+#include <copperplate/cluster.h>
+#include <alchemy/cond.h>
+
+struct threadobj;
+
+struct alchemy_cond {
+	unsigned int magic;	/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	pthread_cond_t cond;
+	struct clusterobj cobj;
+	struct fsobj fsobj;
+};
+
+#define cond_magic	0x8686ebeb
+
+extern struct syncluster alchemy_cond_table;
+
+#endif /* _ALCHEMY_COND_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/event.c b/kernel/xenomai-v3.2.4/lib/alchemy/event.c
new file mode 100644
index 0000000..8ccd99b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/event.c
@@ -0,0 +1,622 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/registry-obstack.h>
+#include "reference.h"
+#include "internal.h"
+#include "event.h"
+#include "timer.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_event Event flag group services
+ *
+ * Inter-task notification mechanism based on discrete flags
+ *
+ * An event flag group is a synchronization object represented by a
+ * long-word structure; every available bit in this word represents a
+ * user-defined event flag.
+ *
+ * When a bit is set, the associated event is said to have
+ * occurred. Xenomai tasks can use this mechanism to signal the
+ * occurrence of particular events to other tasks.
+ *
+ * Tasks can either wait for events to occur in a conjunctive manner
+ * (all awaited events must have occurred to satisfy the wait
+ * request), or in a disjunctive way (at least one of the awaited
+ * events must have occurred to satisfy the wait request).
+ *
+ * @{
+ */
+
+struct syncluster alchemy_event_table;
+
+static DEFINE_NAME_GENERATOR(event_namegen, "event",
+			     struct alchemy_event, name);
+
+DEFINE_LOOKUP_PRIVATE(event, RT_EVENT);
+
+#ifdef CONFIG_XENO_REGISTRY
+
+static int event_registry_open(struct fsobj *fsobj, void *priv)
+{
+	struct eventobj_waitentry *waitlist, *p;
+	struct fsobstack *o = priv;
+	struct alchemy_event *evcb;
+	unsigned int val;
+	size_t waitsz;
+	int ret;
+
+	evcb = container_of(fsobj, struct alchemy_event, fsobj);
+
+	waitsz = sizeof(*p) * 256;
+	waitlist = __STD(malloc(waitsz));
+	if (waitlist == NULL)
+		return -ENOMEM;
+
+	ret = eventobj_inquire(&evcb->evobj, waitsz, waitlist, &val);
+	if (ret < 0)
+		goto out;
+
+	fsobstack_init(o);
+
+	fsobstack_grow_format(o, "=%lx\n", val);
+
+	if (ret) {
+		fsobstack_grow_format(o, "--\n[WAITER]\n");
+		p = waitlist;
+		do {
+			fsobstack_grow_format(o, "%s\n", p->name);
+			p++;
+		} while (--ret > 0);
+	}
+
+	fsobstack_finish(o);
+out:
+	__STD(free(waitlist));
+
+	return ret;
+}
+
+static struct registry_operations registry_ops = {
+	.open		= event_registry_open,
+	.release	= fsobj_obstack_release,
+	.read		= fsobj_obstack_read
+};
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+static void event_finalize(struct eventobj *evobj)
+{
+	struct alchemy_event *evcb = container_of(evobj, struct alchemy_event, evobj);
+
+	registry_destroy_file(&evcb->fsobj);
+	/* We should never fail here, so we backtrace. */
+	__bt(syncluster_delobj(&alchemy_event_table, &evcb->cobj));
+	evcb->magic = ~event_magic;
+	xnfree(evcb);
+}
+fnref_register(libalchemy, event_finalize);
+
+/**
+ * @fn int rt_event_create(RT_EVENT *event, const char *name, unsigned int ivalue, int mode)
+ * @brief Create an event flag group.
+ *
+ * Event groups provide for task synchronization by allowing a set of
+ * flags (or "events") to be waited for and posted atomically. An
+ * event group contains a mask of received events; an arbitrary set of
+ * event flags can be pended or posted in a single operation.
+ *
+ * @param event The address of an event descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * event. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created event into the object registry.
+ *
+ * @param ivalue The initial value of the group's event mask.
+ *
+ * @param mode The event group creation mode. The following flags can
+ * be OR'ed into this bitmask:
+ *
+ * - EV_FIFO makes tasks pend in FIFO order on the event flag group.
+ *
+ * - EV_PRIO makes tasks pend in priority order on the event flag group.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a mode is invalid.
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the event flag group.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered event flag group.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt or non-Xenomai thread.
+ *
+ * @apitags{xthread-only, mode-unrestricted, switch-secondary}
+ *
+ * @note Event flag groups can be shared by multiple processes which
+ * belong to the same Xenomai session.
+ */
+#ifndef DOXYGEN_CPP
+CURRENT_IMPL(int, rt_event_create, (RT_EVENT *event, const char *name,
+				    unsigned int ivalue, int mode))
+#else
+int rt_event_create(RT_EVENT *event, const char *name,
+		    unsigned int ivalue, int mode)
+#endif
+{
+	int evobj_flags = 0, ret = 0;
+	struct alchemy_event *evcb;
+	struct service svc;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	if (mode & ~EV_PRIO)
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	evcb = xnmalloc(sizeof(*evcb));
+	if (evcb == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	generate_name(evcb->name, name, &event_namegen);
+	if (mode & EV_PRIO)
+		evobj_flags = EVOBJ_PRIO;
+
+	ret = eventobj_init(&evcb->evobj, ivalue, evobj_flags,
+			    fnref_put(libalchemy, event_finalize));
+	if (ret) {
+		xnfree(evcb);
+		goto out;
+	}
+
+	evcb->magic = event_magic;
+
+	registry_init_file_obstack(&evcb->fsobj, &registry_ops);
+	ret = __bt(registry_add_file(&evcb->fsobj, O_RDONLY,
+				     "/alchemy/events/%s", evcb->name));
+	if (ret) {
+		warning("failed to export event %s to registry, %s",
+			evcb->name, symerror(ret));
+		ret = 0;
+	}
+
+	ret = syncluster_addobj(&alchemy_event_table, evcb->name, &evcb->cobj);
+	if (ret) {
+		registry_destroy_file(&evcb->fsobj);
+		eventobj_uninit(&evcb->evobj);
+		xnfree(evcb);
+	} else
+		event->handle = mainheap_ref(evcb, uintptr_t);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_event_delete(RT_EVENT *event)
+ * @brief Delete an event flag group.
+ *
+ * This routine deletes a event flag group previously created by a
+ * call to rt_event_create().
+ *
+ * @param event The event descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a event is not a valid event flag group
+ * descriptor.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ */
+int rt_event_delete(RT_EVENT *event)
+{
+	struct alchemy_event *evcb;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	evcb = find_alchemy_event(event, &ret);
+	if (evcb == NULL)
+		goto out;
+
+	/*
+	 * XXX: we rely on copperplate's eventobj to check for event
+	 * existence, so we refrain from altering the object memory
+	 * until we know it was valid. So the only safe place to
+	 * negate the magic tag, deregister from the cluster and
+	 * release the memory is in the finalizer routine, which is
+	 * only called for valid objects.
+	 */
+	ret = eventobj_destroy(&evcb->evobj);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_event_wait(RT_EVENT *event, unsigned int mask, unsigned int *mask_r, int mode, RTIME timeout)
+ * @brief Wait for an arbitrary set of events (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_event_wait_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param event The event descriptor.
+ *
+ * @param mask The set of bits to wait for.
+ *
+ * @param mask_r The value of the event mask at the time the task was
+ * readied.
+ *
+ * @param mode The pend mode.
+ *
+ * @param timeout A delay expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * the request is satisfied. Passing TM_NONBLOCK causes the service
+ * to return without blocking in case the request cannot be satisfied
+ * immediately.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn int rt_event_wait_until(RT_EVENT *event, unsigned int mask, unsigned int *mask_r, int mode, RTIME abs_timeout)
+ * @brief Wait for an arbitrary set of events (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_event_wait_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param event The event descriptor.
+ *
+ * @param mask The set of bits to wait for.
+ *
+ * @param mask_r The value of the event mask at the time the task was
+ * readied.
+ *
+ * @param mode The pend mode.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * the request is satisfied. Passing TM_NONBLOCK causes the service
+ * to return without blocking in case the request cannot be satisfied
+ * immediately.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn int rt_event_wait_timed(RT_EVENT *event, unsigned int mask, unsigned int *mask_r, int mode, const struct timespec *abs_timeout)
+ * @brief Wait for an arbitrary set of events.
+ *
+ * Waits for one or more events to be signaled in @a event, or until a
+ * timeout elapses.
+ *
+ * @param event The event descriptor.
+ *
+ * @param mask The set of bits to wait for. Passing zero causes this
+ * service to return immediately with a success value; the current
+ * value of the event mask is also copied to @a mask_r.
+ *
+ * @param mask_r The value of the event mask at the time the task was
+ * readied.
+ *
+ * @param mode The pend mode. The following flags can be OR'ed into
+ * this bitmask, each of them affecting the operation:
+ *
+ * - EV_ANY makes the task pend in disjunctive mode (i.e. OR); this
+ * means that the request is fulfilled when at least one bit set into
+ * @a mask is set in the current event mask.
+ *
+ * - EV_ALL makes the task pend in conjunctive mode (i.e. AND); this
+ * means that the request is fulfilled when at all bits set into @a
+ * mask are set in the current event mask.
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for the
+ * request to be satisfied. Passing NULL causes the caller to block
+ * indefinitely until the request is satisfied. Passing
+ * { .tv_sec = 0, .tv_nsec = 0 } causes the service to return without
+ * blocking in case the request cannot be satisfied immediately.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned if @a abs_timeout is reached before the
+ * request is satisfied.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } and the requested flags are not set on entry to the
+ * call.
+
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the request is satisfied.
+ *
+ * - -EINVAL is returned if @a mode is invalid, or @a event is not a
+ * valid event flag group descriptor.
+ *
+ * - -EIDRM is returned if @a event is deleted while the caller was
+ * sleeping on it. In such a case, @a event is no more valid upon
+ * return of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+int rt_event_wait_timed(RT_EVENT *event,
+			unsigned int mask, unsigned int *mask_r,
+			int mode, const struct timespec *abs_timeout)
+{
+	int evobj_mode = 0, ret = 0;
+	struct alchemy_event *evcb;
+	struct service svc;
+
+	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
+		return -EPERM;
+
+	if (mode & ~EVOBJ_ANY)
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	evcb = find_alchemy_event(event, &ret);
+	if (evcb == NULL)
+		goto out;
+
+	if (mode & EV_ANY)
+		evobj_mode = EVOBJ_ANY;
+
+	ret = eventobj_wait(&evcb->evobj, mask, mask_r,
+			    evobj_mode, abs_timeout);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_event_signal(RT_EVENT *event, unsigned int mask)
+ * @brief Signal an event.
+ *
+ * Post a set of flags to @a event. All tasks having their wait
+ * request satisfied as a result of this operation are immediately
+ * readied.
+ *
+ * @param event The event descriptor.
+ *
+ * @param mask The set of events to be posted.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a event is not an event flag group
+ * descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+#ifndef DOXYGEN_CPP
+CURRENT_IMPL(int, rt_event_signal,
+	     (RT_EVENT *event, unsigned int mask))
+#else
+int rt_event_signal(RT_EVENT *event, unsigned int mask)
+#endif
+{
+	struct alchemy_event *evcb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	evcb = find_alchemy_event(event, &ret);
+	if (evcb == NULL)
+		goto out;
+
+	ret = eventobj_post(&evcb->evobj, mask);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_event_clear(RT_EVENT *event,unsigned int mask,unsigned int *mask_r)
+ * @brief Clear event flags.
+ *
+ * This routine clears a set of flags from @a event.
+ *
+ * @param event The event descriptor.
+ *
+ * @param mask The set of event flags to be cleared.
+ *
+ * @param mask_r If non-NULL, @a mask_r is the address of a memory
+ * location which will receive the previous value of the event flag
+ * group before the flags are cleared.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a event is not a valid event flag group
+ * descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+#ifndef DOXYGEN_CPP
+CURRENT_IMPL(int, rt_event_clear,
+	     (RT_EVENT *event, unsigned int mask, unsigned int *mask_r))
+#else
+int rt_event_clear(RT_EVENT *event,
+		   unsigned int mask, unsigned int *mask_r)
+#endif
+{
+	struct alchemy_event *evcb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	evcb = find_alchemy_event(event, &ret);
+	if (evcb == NULL)
+		goto out;
+
+	ret = eventobj_clear(&evcb->evobj, mask, mask_r);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_event_inquire(RT_EVENT *event, RT_EVENT_INFO *info)
+ * @brief Query event flag group status.
+ *
+ * This routine returns the status information about @a event.
+ *
+ * @param event The event descriptor.
+ *
+ * @param info A pointer to the @ref RT_EVENT_INFO "return
+ * buffer" to copy the information to.
+ *
+ * @return Zero is returned and status information is written to the
+ * structure pointed at by @a info upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a event is not a valid event flag group
+ * descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_event_inquire(RT_EVENT *event, RT_EVENT_INFO *info)
+{
+	struct alchemy_event *evcb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	evcb = find_alchemy_event(event, &ret);
+	if (evcb == NULL)
+		goto out;
+
+	ret = eventobj_inquire(&evcb->evobj, 0, NULL, &info->value);
+	if (ret < 0)
+		goto out;
+
+	strcpy(info->name, evcb->name);
+	info->nwaiters = ret;
+	ret = 0;
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_event_bind(RT_EVENT *event, const char *name, RTIME timeout)
+ * @brief Bind to an event flag group.
+ *
+ * This routine creates a new descriptor to refer to an existing event
+ * flag group identified by its symbolic name. If the object does not
+ * exist on entry, the caller may block until an event flag group of
+ * the given name is created.
+ *
+ * @param event The address of an event flag group descriptor filled
+ * in by the operation. Contents of this memory is undefined upon
+ * failure.
+ *
+ * @param name A valid NULL-terminated name which identifies the event
+ * flag group to bind to. This string should match the object name
+ * argument passed to rt_event_create().
+ *
+ * @param timeout The number of clock ticks to wait for the
+ * registration to occur (see note). Passing TM_INFINITE causes the
+ * caller to block indefinitely until the object is
+ * registered. Passing TM_NONBLOCK causes the service to return
+ * immediately without waiting if the object is not registered on
+ * entry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
+ * and the searched object is not registered on entry.
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ *
+ * @note The @a timeout value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_event_bind(RT_EVENT *event,
+		  const char *name, RTIME timeout)
+{
+	return alchemy_bind_object(name,
+				   &alchemy_event_table,
+				   timeout,
+				   offsetof(struct alchemy_event, cobj),
+				   &event->handle);
+}
+
+/**
+ * @fn int rt_event_unbind(RT_EVENT *event)
+ * @brief Unbind from an event flag group.
+ *
+ * @param event The event descriptor.
+ *
+ * This routine releases a previous binding to an event flag
+ * group. After this call has returned, the descriptor is no more
+ * valid for referencing this object.
+ *
+ * @apitags{thread-unrestricted}
+ */
+int rt_event_unbind(RT_EVENT *event)
+{
+	event->handle = 0;
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/event.h b/kernel/xenomai-v3.2.4/lib/alchemy/event.h
new file mode 100644
index 0000000..7ea53c3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/event.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_EVENT_H
+#define _ALCHEMY_EVENT_H
+
+#include <copperplate/eventobj.h>
+#include <copperplate/registry.h>
+#include <copperplate/cluster.h>
+#include <alchemy/event.h>
+
+struct alchemy_event {
+	unsigned int magic;	/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	struct eventobj evobj;
+	struct clusterobj cobj;
+	struct fsobj fsobj;
+};
+
+#define event_magic	0x8484ebeb
+
+extern struct syncluster alchemy_event_table;
+
+#endif /* _ALCHEMY_EVENT_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/heap.c b/kernel/xenomai-v3.2.4/lib/alchemy/heap.c
new file mode 100644
index 0000000..c658c46
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/heap.c
@@ -0,0 +1,679 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/registry-obstack.h>
+#include "reference.h"
+#include "internal.h"
+#include "heap.h"
+#include "timer.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_heap Heap management services
+ *
+ * Region of memory dedicated to real-time allocation
+ *
+ * Heaps are regions of memory used for dynamic memory allocation in
+ * a time-bounded fashion. Blocks of memory are allocated and freed in
+ * an arbitrary order and the pattern of allocation and size of blocks
+ * is not known until run time.
+ *
+ * @{
+ */
+struct syncluster alchemy_heap_table;
+
+static DEFINE_NAME_GENERATOR(heap_namegen, "heap",
+			     struct alchemy_heap, name);
+
+DEFINE_SYNC_LOOKUP(heap, RT_HEAP);
+
+#ifdef CONFIG_XENO_REGISTRY
+
+struct heap_waiter_data {
+	char name[XNOBJECT_NAME_LEN];
+	size_t reqsz;
+};
+
+static int prepare_waiter_cache(struct fsobstack *o,
+				struct obstack *cache, int item_count)
+{
+	fsobstack_grow_format(o, "--\n%-10s  %s\n", "[REQ-SIZE]", "[WAITER]");
+	obstack_blank(cache, item_count * sizeof(struct heap_waiter_data));
+
+	return 0;
+}
+
+static size_t collect_waiter_data(void *p, struct threadobj *thobj)
+{
+	struct alchemy_heap_wait *wait;
+	struct heap_waiter_data data;
+
+	strcpy(data.name, threadobj_get_name(thobj));
+	wait = threadobj_get_wait(thobj);
+	data.reqsz = wait->size;
+	memcpy(p, &data, sizeof(data));
+
+	return sizeof(data);
+}
+
+static size_t format_waiter_data(struct fsobstack *o, void *p)
+{
+	struct heap_waiter_data *data = p;
+
+	fsobstack_grow_format(o, "%9Zu    %s\n",
+			      data->reqsz, data->name);
+
+	return sizeof(*data);
+}
+
+static struct fsobstack_syncops fill_ops = {
+	.prepare_cache = prepare_waiter_cache,
+	.collect_data = collect_waiter_data,
+	.format_data = format_waiter_data,
+};
+
+static int heap_registry_open(struct fsobj *fsobj, void *priv)
+{
+	size_t usable_mem, used_mem;
+	struct fsobstack *o = priv;
+	struct alchemy_heap *hcb;
+	struct syncstate syns;
+	int mode, ret;
+
+	hcb = container_of(fsobj, struct alchemy_heap, fsobj);
+
+	ret = syncobj_lock(&hcb->sobj, &syns);
+	if (ret)
+		return -EIO;
+
+	usable_mem = heapobj_size(&hcb->hobj);
+	used_mem = heapobj_inquire(&hcb->hobj);
+	mode = hcb->mode;
+
+	syncobj_unlock(&hcb->sobj, &syns);
+
+	fsobstack_init(o);
+
+	fsobstack_grow_format(o, "%6s  %10s  %9s\n",
+			      "[TYPE]", "[TOTALMEM]", "[USEDMEM]");
+
+	fsobstack_grow_format(o, " %s  %10Zu %10Zu\n",
+			      mode & H_PRIO ? "PRIO" : "FIFO",
+			      usable_mem,
+			      used_mem);
+
+	fsobstack_grow_syncobj_grant(o, &hcb->sobj, &fill_ops);
+
+	fsobstack_finish(o);
+
+	return 0;
+}
+
+static struct registry_operations registry_ops = {
+	.open		= heap_registry_open,
+	.release	= fsobj_obstack_release,
+	.read		= fsobj_obstack_read
+};
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+static void heap_finalize(struct syncobj *sobj)
+{
+	struct alchemy_heap *hcb;
+
+	hcb = container_of(sobj, struct alchemy_heap, sobj);
+	registry_destroy_file(&hcb->fsobj);
+	heapobj_destroy(&hcb->hobj);
+	xnfree(hcb);
+}
+fnref_register(libalchemy, heap_finalize);
+
+/**
+ * @fn int rt_heap_create(RT_HEAP *heap, const char *name, size_t heapsz, int mode)
+ * @brief Create a heap.
+ *
+ * This routine creates a memory heap suitable for time-bounded
+ * allocation requests of RAM chunks. When not enough memory is
+ * available, tasks may be blocked until their allocation request can
+ * be fulfilled.
+ *
+ * By default, heaps support allocation of multiple blocks of memory
+ * in an arbitrary order. However, it is possible to ask for
+ * single-block management by passing the H_SINGLE flag into the @a
+ * mode parameter, in which case the entire memory space managed by
+ * the heap is made available as a unique block.  In this mode, all
+ * allocation requests made through rt_heap_alloc() will return the
+ * same block address, pointing at the beginning of the heap memory.
+ *
+ * @param heap The address of a heap descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * heap. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created heap into the object registry.
+ *
+ * @param heapsz The size (in bytes) of the memory pool, blocks will
+ * be claimed and released to.  This area is not extensible, so this
+ * value must be compatible with the highest memory pressure that
+ * could be expected. The valid range is between 1 byte and 2Gb.
+ *
+ * @param mode The heap creation mode. The following flags can be
+ * OR'ed into this bitmask, each of them affecting the new heap:
+ *
+ * - H_FIFO makes tasks pend in FIFO order on the heap when waiting
+ * for available blocks.
+ *
+ * - H_PRIO makes tasks pend in priority order on the heap when
+ * waiting for available blocks.
+ *
+ * - H_SINGLE causes the entire heap space to be managed as a single
+ * memory block.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a mode is invalid, or @a heapsz is zero
+ * or larger than 2Gb.
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the heap.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered heap.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt or non-Xenomai thread.
+ *
+ * @apitags{xthread-only, mode-unrestricted, switch-secondary}
+ *
+ * @note Heaps can be shared by multiple processes which belong to the
+ * same Xenomai session.
+ */
+int rt_heap_create(RT_HEAP *heap,
+		   const char *name, size_t heapsz, int mode)
+{
+	struct alchemy_heap *hcb;
+	int sobj_flags = 0, ret;
+	struct service svc;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	if (heapsz == 0 || heapsz >= 1U << 31)
+		return -EINVAL;
+
+	if (mode & ~(H_PRIO|H_SINGLE))
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	ret = -ENOMEM;
+	hcb = xnmalloc(sizeof(*hcb));
+	if (hcb == NULL)
+		goto fail_cballoc;
+
+	/*
+	 * The memory pool has to be part of the main heap for proper
+	 * sharing between processes.
+	 */
+	if (heapobj_init(&hcb->hobj, NULL, heapsz))
+		goto fail_bufalloc;
+
+	generate_name(hcb->name, name, &heap_namegen);
+	hcb->mode = mode;
+	hcb->size = heapsz;
+	hcb->sba = __moff_nullable(NULL);
+
+	if (mode & H_PRIO)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	ret = syncobj_init(&hcb->sobj, CLOCK_COPPERPLATE, sobj_flags,
+			   fnref_put(libalchemy, heap_finalize));
+	if (ret)
+		goto fail_syncinit;
+
+	hcb->magic = heap_magic;
+
+	registry_init_file_obstack(&hcb->fsobj, &registry_ops);
+	ret = __bt(registry_add_file(&hcb->fsobj, O_RDONLY,
+				     "/alchemy/heaps/%s", hcb->name));
+	if (ret)
+		warning("failed to export heap %s to registry, %s",
+			hcb->name, symerror(ret));
+
+	ret = syncluster_addobj(&alchemy_heap_table, hcb->name, &hcb->cobj);
+	if (ret)
+		goto fail_register;
+
+	heap->handle = mainheap_ref(hcb, uintptr_t);
+
+	CANCEL_RESTORE(svc);
+
+	return 0;
+
+fail_register:
+	registry_destroy_file(&hcb->fsobj);
+	syncobj_uninit(&hcb->sobj);
+fail_syncinit:
+	heapobj_destroy(&hcb->hobj);
+fail_bufalloc:
+	xnfree(hcb);
+fail_cballoc:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_heap_delete(RT_HEAP *heap)
+ * @brief Delete a heap.
+ *
+ * This routine deletes a heap object previously created by a call to
+ * rt_heap_create(), releasing all tasks currently blocked on it.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a heap is not a valid heap descriptor.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ */
+int rt_heap_delete(RT_HEAP *heap)
+{
+	struct alchemy_heap *hcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	hcb = get_alchemy_heap(heap, &syns, &ret);
+	if (hcb == NULL)
+		goto out;
+
+	syncluster_delobj(&alchemy_heap_table, &hcb->cobj);
+	hcb->magic = ~heap_magic;
+	syncobj_destroy(&hcb->sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_heap_alloc(RT_HEAP *heap, size_t size, RTIME timeout, void **blockp)
+ * @brief Allocate a block from a heap (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_heap_alloc_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ * Passing TM_INFINITE in @a timeout causes the caller to block
+ * indefinitely until a block is available. Passing TM_NONBLOCK
+ * causes the service to return immediately without blocking in case
+ * a block is not available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn int rt_heap_alloc_until(RT_HEAP *heap, size_t size, RTIME abs_timeout, void **blockp)
+ * @brief Allocate a block from a heap (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_heap_alloc_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ * Passing TM_INFINITE in @a timeout causes the caller to block
+ * indefinitely until a block is available. Passing TM_NONBLOCK
+ * causes the service to return immediately without blocking in case
+ * a block is not available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn int rt_heap_alloc_timed(RT_HEAP *heap, size_t size, const struct timespec *abs_timeout, void **blockp)
+ * @brief Allocate a block from a heap.
+ *
+ * This service allocates a block from a given heap, or returns the
+ * address of the single memory segment if H_SINGLE was mentioned in
+ * the creation mode to rt_heap_create(). When not enough memory is
+ * available on entry to this service, tasks may be blocked until
+ * their allocation request can be fulfilled.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @param size The requested size (in bytes) of the block. If the heap
+ * is managed as a single-block area (H_SINGLE), this value can be
+ * either zero, or the same value given to rt_heap_create(). In that
+ * case, the same block covering the entire heap space is returned to
+ * all callers of this service.
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for a block
+ * of the requested size to be available from the heap. Passing NULL causes
+ * the caller to block indefinitely until a block is available. Passing
+ * { .tv_sec = 0, .tv_nsec = 0 } causes the service to return immediately
+ * without blocking in case a block is not available.
+ *
+ * @param blockp A pointer to a memory location which will be written
+ * upon success with the address of the allocated block, or the start
+ * address of the single memory segment. In the former case, the block
+ * can be freed using rt_heap_free().
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned if @a abs_timeout is reached before a
+ * block is available.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is equal to { .tv_sec
+ * = 0, .tv_nsec = 0 } and no block is immediately available on entry
+ * to fulfill the allocation request.
+
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before a block became available.
+ *
+ * - -EINVAL is returned if @a heap is not a valid heap descriptor, or
+ * @a heap is managed as a single-block area (i.e. H_SINGLE mode) and
+ * @a size is non-zero but does not match the original heap size
+ * passed to rt_heap_create().
+ *
+ * - -EIDRM is returned if @a heap is deleted while the caller was
+ * waiting for a block. In such event, @a heap is no more valid upon
+ * return of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ *
+ * @note If shared multi-processing is enabled (i.e. --enable-pshared
+ * was passed to the configure script), requests for a block size
+ * larger than twice the allocation page size are rounded up to the
+ * next page size. The allocation page size is currently 512 bytes
+ * long (HOBJ_PAGE_SIZE), which means that any request larger than 1k
+ * will be rounded up to the next 512 byte boundary.
+ */
+int rt_heap_alloc_timed(RT_HEAP *heap,
+			size_t size, const struct timespec *abs_timeout,
+			void **blockp)
+{
+	struct alchemy_heap_wait *wait;
+	struct alchemy_heap *hcb;
+	struct syncstate syns;
+	struct service svc;
+	void *p = NULL;
+	int ret = 0;
+
+	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	hcb = get_alchemy_heap(heap, &syns, &ret);
+	if (hcb == NULL)
+		goto out;
+
+	if (hcb->mode & H_SINGLE) {
+		p = __mptr_nullable(hcb->sba);
+		if (p)
+			goto done;
+		if (size == 0)
+			size = heapobj_size(&hcb->hobj);
+		else if (size != hcb->size) {
+			ret = -EINVAL;
+			goto done;
+		}
+		p = heapobj_alloc(&hcb->hobj, size);
+		if (p == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+		hcb->sba = __moff(p);
+		goto done;
+	}
+
+	p = heapobj_alloc(&hcb->hobj, size);
+	if (p)
+		goto done;
+
+	if (alchemy_poll_mode(abs_timeout)) {
+		ret = -EWOULDBLOCK;
+		goto done;
+	}
+
+	wait = threadobj_prepare_wait(struct alchemy_heap_wait);
+	wait->size = size;
+
+	ret = syncobj_wait_grant(&hcb->sobj, abs_timeout, &syns);
+	if (ret) {
+		if (ret == -EIDRM) {
+			threadobj_finish_wait();
+			goto out;
+		}
+	} else
+		p = __mptr(wait->ptr);
+
+	threadobj_finish_wait();
+done:
+	*blockp = p;
+
+	put_alchemy_heap(hcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_heap_free(RT_HEAP *heap, void *block)
+ * @brief Release a block to a heap.
+ *
+ * This service should be used to release a block to the heap it
+ * belongs to. An attempt to fulfill the request of every task blocked
+ * on rt_heap_alloc() is made once @a block is returned to the memory
+ * pool.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @param block The address of the block to free.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a heap is not a valid heap descriptor, or
+ * @a block is not a valid block previously allocated by the
+ * rt_heap_alloc() service from @a heap.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_heap_free(RT_HEAP *heap, void *block)
+{
+	struct alchemy_heap_wait *wait;
+	struct threadobj *thobj, *tmp;
+	struct alchemy_heap *hcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+	void *ptr;
+
+	CANCEL_DEFER(svc);
+
+	hcb = get_alchemy_heap(heap, &syns, &ret);
+	if (hcb == NULL)
+		goto out;
+
+	if (hcb->mode & H_SINGLE)
+		goto done;
+
+	if (heapobj_validate(&hcb->hobj, block) == 0) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	heapobj_free(&hcb->hobj, block);
+
+	if (!syncobj_grant_wait_p(&hcb->sobj))
+		goto done;
+	/*
+	 * We might be releasing a block large enough to satisfy
+	 * multiple requests, so we iterate over all waiters.
+	 */
+	syncobj_for_each_grant_waiter_safe(&hcb->sobj, thobj, tmp) {
+		wait = threadobj_get_wait(thobj);
+		ptr = heapobj_alloc(&hcb->hobj, wait->size);
+		if (ptr) {
+			wait->ptr = __moff(ptr);
+			syncobj_grant_to(&hcb->sobj, thobj);
+		}
+	}
+done:
+	put_alchemy_heap(hcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_heap_inquire(RT_HEAP *heap, RT_HEAP_INFO *info)
+ * @brief Query heap status.
+ *
+ * This routine returns the status information about @a heap.
+ *
+ * @param heap The heap descriptor.
+ *
+ * @param info A pointer to the @ref RT_HEAP_INFO "return
+ * buffer" to copy the information to.
+ *
+ * @return Zero is returned and status information is written to the
+ * structure pointed at by @a info upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a heap is not a valid heap descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_heap_inquire(RT_HEAP *heap, RT_HEAP_INFO *info)
+{
+	struct alchemy_heap *hcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	hcb = get_alchemy_heap(heap, &syns, &ret);
+	if (hcb == NULL)
+		goto out;
+
+	info->nwaiters = syncobj_count_grant(&hcb->sobj);
+	info->heapsize = hcb->size;
+	info->usablemem = heapobj_size(&hcb->hobj);
+	info->usedmem = heapobj_inquire(&hcb->hobj);
+	strcpy(info->name, hcb->name);
+
+	put_alchemy_heap(hcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_heap_bind(RT_HEAP *heap, const char *name, RTIME timeout)
+ * @brief Bind to a heap.
+ *
+ * This routine creates a new descriptor to refer to an existing heap
+ * identified by its symbolic name. If the object does not exist on
+ * entry, the caller may block until a heap of the given name is
+ * created.
+ *
+ * @param heap The address of a heap descriptor filled in by the
+ * operation. Contents of this memory is undefined upon failure.
+ *
+ * @param name A valid NULL-terminated name which identifies the
+ * heap to bind to. This string should match the object name
+ * argument passed to rt_heap_create().
+ *
+ * @param timeout The number of clock ticks to wait for the
+ * registration to occur (see note). Passing TM_INFINITE causes the
+ * caller to block indefinitely until the object is
+ * registered. Passing TM_NONBLOCK causes the service to return
+ * immediately without waiting if the object is not registered on
+ * entry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
+ * and the searched object is not registered on entry.
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ *
+ * @note The @a timeout value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_heap_bind(RT_HEAP *heap,
+		  const char *name, RTIME timeout)
+{
+	return alchemy_bind_object(name,
+				   &alchemy_heap_table,
+				   timeout,
+				   offsetof(struct alchemy_heap, cobj),
+				   &heap->handle);
+}
+
+/**
+ * @fn int rt_heap_unbind(RT_HEAP *heap)
+ * @brief Unbind from a heap.
+ *
+ * @param heap The heap descriptor.
+ *
+ * This routine releases a previous binding to a heap. After this call
+ * has returned, the descriptor is no more valid for referencing this
+ * object.
+ *
+ * @apitags{thread-unrestricted}
+ */
+int rt_heap_unbind(RT_HEAP *heap)
+{
+	heap->handle = 0;
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/heap.h b/kernel/xenomai-v3.2.4/lib/alchemy/heap.h
new file mode 100644
index 0000000..e76e36f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/heap.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_HEAP_H
+#define _ALCHEMY_HEAP_H
+
+#include <copperplate/syncobj.h>
+#include <copperplate/registry.h>
+#include <copperplate/cluster.h>
+#include <copperplate/heapobj.h>
+#include <alchemy/heap.h>
+
+struct alchemy_heap {
+	unsigned int magic;	/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	struct heapobj hobj;
+	struct syncobj sobj;
+	struct clusterobj cobj;
+	int mode;
+	size_t size;
+	dref_type(void *) sba;
+	struct fsobj fsobj;
+};
+
+struct alchemy_heap_wait {
+	size_t size;
+	dref_type (void *) ptr;
+};
+
+#define heap_magic	0x8a8aebeb
+
+extern struct syncluster alchemy_heap_table;
+
+#endif /* _ALCHEMY_HEAP_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/init.c b/kernel/xenomai-v3.2.4/lib/alchemy/init.c
new file mode 100644
index 0000000..2efd138
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/init.c
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <xenomai/init.h>
+#include "timer.h"
+#include "task.h"
+#include "sem.h"
+#include "event.h"
+#include "cond.h"
+#include "mutex.h"
+#include "queue.h"
+#include "buffer.h"
+#include "heap.h"
+#include "alarm.h"
+#include "pipe.h"
+
+/**
+ * @defgroup alchemy Alchemy API
+ *
+ * A programming interface reminiscent from traditional RTOS APIs
+ *
+ * This interface is an evolution of the former @a native API
+ * available with the Xenomai 2.x series.
+ */
+
+static unsigned int clock_resolution = 1; /* nanosecond. */
+
+static const struct option alchemy_options[] = {
+	{
+#define clock_resolution_opt	0
+		.name = "alchemy-clock-resolution",
+		.has_arg = required_argument,
+	},
+	{ /* Sentinel */ }
+};
+
+static int alchemy_parse_option(int optnum, const char *optarg)
+{
+	switch (optnum) {
+	case clock_resolution_opt:
+		clock_resolution = atoi(optarg);
+		break;
+	default:
+		/* Paranoid, can't happen. */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void alchemy_help(void)
+{
+        fprintf(stderr, "--alchemy-clock-resolution=<ns> tick value (default 1ns, tickless)\n");
+}
+
+#ifdef CONFIG_XENO_COBALT
+
+static inline void init_corespec(void)
+{
+	syncluster_init(&alchemy_pipe_table, "alchemy.pipe");
+	registry_add_dir("/alchemy/pipes");
+}
+
+#else
+
+static inline void init_corespec(void) { }
+
+#endif
+
+static int alchemy_init(void)
+{
+	int ret;
+
+	syncluster_init(&alchemy_task_table, "alchemy.task");
+	syncluster_init(&alchemy_sem_table, "alchemy.sem");
+	syncluster_init(&alchemy_event_table, "alchemy.event");
+	syncluster_init(&alchemy_cond_table, "alchemy.cond");
+	syncluster_init(&alchemy_mutex_table, "alchemy.mutex");
+	syncluster_init(&alchemy_queue_table, "alchemy.queue");
+	syncluster_init(&alchemy_buffer_table, "alchemy.buffer");
+	syncluster_init(&alchemy_heap_table, "alchemy.heap");
+	pvcluster_init(&alchemy_alarm_table, "alchemy.alarm");
+
+	ret = clockobj_init(&alchemy_clock, clock_resolution);
+	if (ret) {
+		warning("%s: failed to initialize Alchemy clock (res=%u ns)",
+			__FUNCTION__, clock_resolution);
+		return __bt(ret);
+	}
+
+	registry_add_dir("/alchemy");
+	registry_add_dir("/alchemy/tasks");
+	registry_add_dir("/alchemy/semaphores");
+	registry_add_dir("/alchemy/events");
+	registry_add_dir("/alchemy/condvars");
+	registry_add_dir("/alchemy/mutexes");
+	registry_add_dir("/alchemy/queues");
+	registry_add_dir("/alchemy/buffers");
+	registry_add_dir("/alchemy/heaps");
+	registry_add_dir("/alchemy/alarms");
+
+	init_corespec();
+
+	return 0;
+}
+
+static struct setup_descriptor alchemy_skin = {
+	.name = "alchemy",
+	.init = alchemy_init,
+	.options = alchemy_options,
+	.parse_option = alchemy_parse_option,
+	.help = alchemy_help,
+};
+
+interface_setup_call(alchemy_skin);
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/internal.c b/kernel/xenomai-v3.2.4/lib/alchemy/internal.c
new file mode 100644
index 0000000..7e998ff
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/internal.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <string.h>
+#include <stdio.h>
+#include <errno.h>
+#include <boilerplate/lock.h>
+#include <copperplate/cluster.h>
+#include <copperplate/heapobj.h>
+#include "internal.h"
+
+int alchemy_bind_object(const char *name, struct syncluster *sc,
+			RTIME timeout,
+			int offset,
+			uintptr_t *handle)
+{
+	struct clusterobj *cobj;
+	struct service svc;
+	struct timespec ts;
+	void *p;
+	int ret;
+
+	CANCEL_DEFER(svc);
+	ret = syncluster_findobj(sc, name,
+				 alchemy_rel_timeout(timeout, &ts),
+				 &cobj);
+	CANCEL_RESTORE(svc);
+	if (ret)
+		return ret;
+
+	p = cobj;
+	p -= offset;
+	*handle = mainheap_ref(p, uintptr_t);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/internal.h b/kernel/xenomai-v3.2.4/lib/alchemy/internal.h
new file mode 100644
index 0000000..b867604
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/internal.h
@@ -0,0 +1,93 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_INTERNAL_H
+#define _ALCHEMY_INTERNAL_H
+
+#include "boilerplate/ancillaries.h"
+#include "boilerplate/namegen.h"
+#include "timer.h"
+
+#define DEFINE_SYNC_LOOKUP(__name, __dsctype)				\
+static inline struct alchemy_ ## __name *				\
+get_alchemy_ ## __name(__dsctype *desc,					\
+		       struct syncstate *syns, int *err_r)		\
+{									\
+	struct alchemy_ ## __name *cb;					\
+									\
+	if (bad_pointer(desc)) {					\
+		*err_r = -EINVAL;					\
+		return NULL;						\
+	}								\
+									\
+	cb = mainheap_deref(desc->handle, struct alchemy_ ## __name);	\
+	if (bad_pointer(cb)) {						\
+		*err_r = -EINVAL;					\
+		return NULL;						\
+	}								\
+									\
+	if (syncobj_lock(&cb->sobj, syns) ||				\
+	    cb->magic != __name ## _magic) {				\
+		*err_r = -EINVAL;					\
+		return NULL;						\
+	}								\
+									\
+	return cb;							\
+}									\
+									\
+static inline								\
+void put_alchemy_ ## __name(struct alchemy_ ## __name *cb,		\
+			    struct syncstate *syns)			\
+{									\
+	syncobj_unlock(&cb->sobj, syns);				\
+}
+
+#define __DEFINE_LOOKUP(__scope, __name, __dsctype)			\
+__scope struct alchemy_ ## __name *					\
+find_alchemy_ ## __name(__dsctype *desc, int *err_r)			\
+{									\
+	struct alchemy_ ## __name *cb;					\
+									\
+	if (bad_pointer(desc)) {					\
+		*err_r = -EINVAL;					\
+		return NULL;						\
+	}								\
+									\
+	cb = mainheap_deref(desc->handle, struct alchemy_ ## __name);	\
+	if (bad_pointer(cb) || cb->magic != __name ## _magic) {		\
+		*err_r = -EINVAL;					\
+		return NULL;						\
+	}								\
+									\
+	return cb;							\
+}									\
+
+#define DEFINE_LOOKUP_PRIVATE(__name, __dsctype)			\
+	__DEFINE_LOOKUP(static inline, __name, __dsctype)
+
+#define DEFINE_LOOKUP(__name, __dsctype)				\
+	__DEFINE_LOOKUP(, __name, __dsctype)
+
+struct syncluster;
+
+int alchemy_bind_object(const char *name, struct syncluster *sc,
+			RTIME timeout,
+			int offset,
+			uintptr_t *handle);
+
+#endif /* !_ALCHEMY_INTERNAL_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/mutex.c b/kernel/xenomai-v3.2.4/lib/alchemy/mutex.c
new file mode 100644
index 0000000..5836af5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/mutex.c
@@ -0,0 +1,521 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <string.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include "internal.h"
+#include "mutex.h"
+#include "timer.h"
+#include "task.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_mutex Mutex services
+ *
+ * POSIXish mutual exclusion servicesl
+ *
+ * A mutex is a MUTual EXclusion object, and is useful for protecting
+ * shared data structures from concurrent modifications, and
+ * implementing critical sections and monitors.
+ *
+ * A mutex has two possible states: unlocked (not owned by any task),
+ * and locked (owned by one task). A mutex can never be owned by two
+ * different tasks simultaneously. A task attempting to lock a mutex
+ * that is already locked by another task is blocked until the latter
+ * unlocks the mutex first.
+ *
+ * Xenomai mutex services enforce a priority inheritance protocol in
+ * order to solve priority inversions.
+ *
+ * @{
+ */
+struct syncluster alchemy_mutex_table;
+
+static DEFINE_NAME_GENERATOR(mutex_namegen, "mutex",
+			     struct alchemy_mutex, name);
+
+DEFINE_LOOKUP(mutex, RT_MUTEX);
+
+#ifdef CONFIG_XENO_REGISTRY
+
+static ssize_t mutex_registry_read(struct fsobj *fsobj,
+				   char *buf, size_t size, off_t offset,
+				   void *priv)
+{
+	return 0;		/* FIXME */
+}
+
+static struct registry_operations registry_ops = {
+	.read	= mutex_registry_read
+};
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+/**
+ * @fn int rt_mutex_create(RT_MUTEX *mutex, const char *name)
+ * @brief Create a mutex.
+ *
+ * Create a mutual exclusion object that allows multiple tasks to
+ * synchronize access to a shared resource. A mutex is left in an
+ * unlocked state after creation.
+ *
+ * @param mutex The address of a mutex descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * mutex. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created mutex into the object registry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the mutex.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered mutex.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt or non-Xenomai thread.
+ *
+ * @apitags{xthread-only, mode-unrestricted, switch-secondary}
+ *
+ * @note Mutexes can be shared by multiple processes which belong to
+ * the same Xenomai session.
+ */
+int rt_mutex_create(RT_MUTEX *mutex, const char *name)
+{
+	struct alchemy_mutex *mcb;
+	pthread_mutexattr_t mattr;
+	struct service svc;
+	int ret;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	mcb = xnmalloc(sizeof(*mcb));
+	if (mcb == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	/*
+	 * XXX: we can't have priority inheritance with syncobj, so we
+	 * have to base this code directly over the POSIX layer.
+	 */
+	generate_name(mcb->name, name, &mutex_namegen);
+	mcb->owner = NO_ALCHEMY_TASK;
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute);
+	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
+	/* pthread_mutexattr_setrobust() might not be implemented. */
+	pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST_NP);
+	ret = -__RT(pthread_mutex_init(&mcb->lock, &mattr));
+	if (ret) {
+		xnfree(mcb);
+		goto out;
+	}
+	
+	pthread_mutexattr_destroy(&mattr);
+
+	mcb->magic = mutex_magic;
+
+	registry_init_file(&mcb->fsobj, &registry_ops, 0);
+	ret = __bt(registry_add_file(&mcb->fsobj, O_RDONLY,
+				     "/alchemy/mutexes/%s", mcb->name));
+	if (ret) {
+		warning("failed to export mutex %s to registry, %s",
+			mcb->name, symerror(ret));
+		ret = 0;
+	}
+
+	ret = syncluster_addobj(&alchemy_mutex_table, mcb->name, &mcb->cobj);
+	if (ret) {
+		registry_destroy_file(&mcb->fsobj);
+		xnfree(mcb);
+	} else
+		mutex->handle = mainheap_ref(mcb, uintptr_t);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_mutex_delete(RT_MUTEX *mutex)
+ * @brief Delete a mutex.
+ *
+ * This routine deletes a mutex object previously created by a call to
+ * rt_mutex_create().
+ *
+ * @param mutex The mutex descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a alarm is not a valid mutex descriptor.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * - -EBUSY is returned upon an attempt to destroy the object
+ * referenced by @a mutex while it is referenced (for example, while
+ * being used in a rt_mutex_acquire(), rt_mutex_acquire_timed() or
+ * rt_mutex_acquire_until() by another task).
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ */
+int rt_mutex_delete(RT_MUTEX *mutex)
+{
+	struct alchemy_mutex *mcb;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	mcb = find_alchemy_mutex(mutex, &ret);
+	if (mcb == NULL)
+		goto out;
+
+	ret = -__RT(pthread_mutex_destroy(&mcb->lock));
+	if (ret)
+		goto out;
+
+	mcb->magic = ~mutex_magic;
+	syncluster_delobj(&alchemy_mutex_table, &mcb->cobj);
+	registry_destroy_file(&mcb->fsobj);
+	xnfree(mcb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_mutex_acquire(RT_MUTEX *mutex, RTIME timeout)
+ * @brief Acquire/lock a mutex (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_mutex_acquire_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param mutex The mutex descriptor.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE the caller to block indefinitely. Passing TM_NONBLOCK
+ * causes the service to return immediately without blocking in case
+ * @a mutex is already locked by another task.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+
+/**
+ * @fn int rt_mutex_acquire_until(RT_MUTEX *mutex, RTIME abs_timeout)
+ * @brief Acquire/lock a mutex (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_mutex_acquire_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param mutex The mutex descriptor.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE the caller to block indefinitely. Passing
+ * TM_NONBLOCK causes the service to return immediately without
+ * blocking in case @a mutex is already locked by another task.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+
+/**
+ * @fn int rt_mutex_acquire_timed(RT_MUTEX *mutex, const struct timespec *abs_timeout)
+ * @brief Acquire/lock a mutex (with absolute timeout date).
+ *
+ * Attempt to lock a mutex. The calling task is blocked until the
+ * mutex is available, in which case it is locked again before this
+ * service returns. Xenomai mutexes are implicitely recursive and
+ * implement the priority inheritance protocol.
+ *
+ * @param mutex The mutex descriptor.
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for the
+ * mutex to be available. Passing NULL the caller to block indefinitely.
+ * Passing { .tv_sec = 0, .tv_nsec = 0 } causes the service to return
+ * immediately without blocking in case @a mutex is already locked by
+ * another task.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned if @a abs_timeout is reached before the
+ * mutex is available.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is { .tv_sec = 0, .tv_nsec
+ * = 0 } and the mutex is not immediately available.
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task.
+ *
+ * - -EINVAL is returned if @a mutex is not a valid mutex descriptor.
+ *
+ * - -EIDRM is returned if @a mutex is deleted while the caller was
+ * waiting on it. In such event, @a mutex is no more valid upon return
+ * of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-only, switch-primary}
+ *
+ * @sideeffect
+ * Over the Cobalt core, an Alchemy task with priority zero keeps
+ * running in primary mode until it releases the mutex, at which point
+ * it is switched back to secondary mode automatically.
+ */
+int rt_mutex_acquire_timed(RT_MUTEX *mutex,
+			   const struct timespec *abs_timeout)
+{
+	struct alchemy_task *current;
+	struct alchemy_mutex *mcb;
+	struct timespec ts;
+	int ret = 0;
+
+	/* This must be an alchemy task. */
+	current = alchemy_task_current();
+	if (current == NULL)
+		return -EPERM;
+
+	/*
+	 * Try the fast path first. Note that we don't need any
+	 * protected section here: the caller should have provided for
+	 * it.
+	 */
+	mcb = find_alchemy_mutex(mutex, &ret);
+	if (mcb == NULL)
+		return ret;
+
+	/*
+	 * We found the mutex, but locklessly: let the POSIX layer
+	 * check for object existence.
+	 */
+	ret = -__RT(pthread_mutex_trylock(&mcb->lock));
+	if (ret == 0 || ret != -EBUSY || alchemy_poll_mode(abs_timeout))
+		goto done;
+
+	/* Slow path. */
+	if (abs_timeout == NULL) {
+		ret = -__RT(pthread_mutex_lock(&mcb->lock));
+		goto done;
+	}
+
+	/*
+	 * What a mess: we want all our timings to be based on
+	 * CLOCK_COPPERPLATE, but pthread_mutex_timedlock() is
+	 * implicitly based on CLOCK_REALTIME, so we need to translate
+	 * the user timeout into something POSIX understands.
+	 */
+	clockobj_convert_clocks(&alchemy_clock, abs_timeout, CLOCK_REALTIME, &ts);
+	ret = -__RT(pthread_mutex_timedlock(&mcb->lock, &ts));
+done:
+	switch (ret) {
+	case -ENOTRECOVERABLE:
+		ret = -EOWNERDEAD;
+	case -EOWNERDEAD:
+		warning("owner of mutex 0x%x died", mutex->handle);
+		break;
+	case -EBUSY:
+		/*
+		 * Remap EBUSY -> EWOULDBLOCK: not very POSIXish, but
+		 * consistent with similar cases in the Alchemy API.
+		 */
+		ret = -EWOULDBLOCK;
+		break;
+	case 0:
+		mcb->owner.handle = mainheap_ref(current, uintptr_t);
+	}
+
+	return ret;
+}
+
+/**
+ * @fn int rt_mutex_release(RT_MUTEX *mutex)
+ * @brief Release/unlock a mutex.
+ *
+ * This routine releases a mutex object previously locked by a call to
+ * rt_mutex_acquire() or rt_mutex_acquire_until().  If the mutex is
+ * pended, the first waiting task (by priority order) is immediately
+ * unblocked and transfered the ownership of the mutex; otherwise, the
+ * mutex is left in an unlocked state.
+ *
+ * @param mutex The mutex descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a alarm is not a valid mutex descriptor.
+ *
+ * - -EPERM is returned if @a mutex is not owned by the current task,
+ * or more generally if this service was called from a context which
+ * cannot own any mutex (e.g. interrupt context).
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+int rt_mutex_release(RT_MUTEX *mutex)
+{
+	struct alchemy_mutex *mcb;
+	int ret = 0;
+
+	mcb = find_alchemy_mutex(mutex, &ret);
+	if (mcb == NULL)
+		return ret;
+
+	/* Let the POSIX layer check for object existence. */
+	return -__RT(pthread_mutex_unlock(&mcb->lock));
+}
+
+/**
+ * @fn int rt_mutex_inquire(RT_MUTEX *mutex, RT_MUTEX_INFO *info)
+ * @brief Query mutex status.
+ *
+ * This routine returns the status information about the specified
+ * mutex.
+ *
+ * @param mutex The mutex descriptor.
+ *
+ * @param info A pointer to the @ref RT_MUTEX_INFO "return
+ * buffer" to copy the information to.
+ *
+ * @return Zero is returned and status information is written to the
+ * structure pointed at by @a info upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a mutex is not a valid mutex descriptor.
+ *
+ * - -EPERM is returned if this service is called from an interrupt
+ * context.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+int rt_mutex_inquire(RT_MUTEX *mutex, RT_MUTEX_INFO *info)
+{
+	struct alchemy_mutex *mcb;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	mcb = find_alchemy_mutex(mutex, &ret);
+	if (mcb == NULL)
+		goto out;
+
+	ret = -__RT(pthread_mutex_trylock(&mcb->lock));
+	if (ret) {
+		if (ret != -EBUSY)
+			goto out;
+		info->owner = mcb->owner;
+		ret = 0;
+	} else {
+		__RT(pthread_mutex_unlock(&mcb->lock));
+		info->owner = NO_ALCHEMY_TASK;
+	}
+
+	strcpy(info->name, mcb->name);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_mutex_bind(RT_MUTEX *mutex, const char *name, RTIME timeout)
+ * @brief Bind to a mutex.
+ *
+ * This routine creates a new descriptor to refer to an existing mutex
+ * identified by its symbolic name. If the object not exist on entry,
+ * the caller may block until a mutex of the given name is created.
+ *
+ * @param mutex The address of a mutex descriptor filled in by the
+ * operation. Contents of this memory is undefined upon failure.
+ *
+ * @param name A valid NULL-terminated name which identifies the mutex
+ * to bind to. This string should match the object name argument
+ * passed to rt_mutex_create().
+ *
+ * @param timeout The number of clock ticks to wait for the
+ * registration to occur (see note). Passing TM_INFINITE causes the
+ * caller to block indefinitely until the object is
+ * registered. Passing TM_NONBLOCK causes the service to return
+ * immediately without waiting if the object is not registered on
+ * entry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
+ * and the searched object is not registered on entry.
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ *
+ * @note The @a timeout value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_mutex_bind(RT_MUTEX *mutex,
+		  const char *name, RTIME timeout)
+{
+	return alchemy_bind_object(name,
+				   &alchemy_mutex_table,
+				   timeout,
+				   offsetof(struct alchemy_mutex, cobj),
+				   &mutex->handle);
+}
+
+/**
+ * @fn int rt_mutex_unbind(RT_MUTEX *mutex)
+ * @brief Unbind from a mutex.
+ *
+ * @param mutex The mutex descriptor.
+ *
+ * This routine releases a previous binding to a mutex. After this
+ * call has returned, the descriptor is no more valid for referencing
+ * this object.
+ */
+int rt_mutex_unbind(RT_MUTEX *mutex)
+{
+	mutex->handle = 0;
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/mutex.h b/kernel/xenomai-v3.2.4/lib/alchemy/mutex.h
new file mode 100644
index 0000000..998074c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/mutex.h
@@ -0,0 +1,42 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_MUTEX_H
+#define _ALCHEMY_MUTEX_H
+
+#include <copperplate/cluster.h>
+#include <copperplate/registry.h>
+#include <alchemy/mutex.h>
+#include <alchemy/task.h>
+
+struct alchemy_mutex {
+	unsigned int magic;	/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	pthread_mutex_t lock;
+	struct clusterobj cobj;
+	RT_TASK owner;
+	struct fsobj fsobj;
+};
+
+#define mutex_magic	0x8585ebeb
+
+extern struct syncluster alchemy_mutex_table;
+
+struct alchemy_mutex *find_alchemy_mutex(RT_MUTEX *mutex, int *err_r);
+
+#endif /* _ALCHEMY_MUTEX_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/pipe.c b/kernel/xenomai-v3.2.4/lib/alchemy/pipe.c
new file mode 100644
index 0000000..c334349
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/pipe.c
@@ -0,0 +1,675 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <unistd.h>
+#include <errno.h>
+#include <string.h>
+#include <fcntl.h>
+#include "rtdm/ipc.h"
+#include "copperplate/threadobj.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/cluster.h"
+#include "reference.h"
+#include "internal.h"
+#include "pipe.h"
+#include "timer.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_pipe Message pipe services
+ *
+ * Two-way communication channel between Xenomai & Linux domains
+ *
+ * A message pipe is a two-way communication channel between Xenomai
+ * threads and normal Linux threads using regular file I/O operations
+ * on a pseudo-device. Pipes can be operated in a message-oriented
+ * fashion so that message boundaries are preserved, and also in
+ * byte-oriented streaming mode from real-time to normal Linux
+ * threads for optimal throughput.
+ *
+ * Xenomai threads open their side of the pipe using the
+ * rt_pipe_create() service; regular Linux threads do the same by
+ * opening one of the /dev/rtpN special devices, where N is the minor
+ * number agreed upon between both ends of each pipe.
+ *
+ * In addition, named pipes are available through the registry
+ * support, which automatically creates a symbolic link from entries
+ * under /proc/xenomai/registry/rtipc/xddp/ to the corresponding
+ * special device file.
+ *
+ * @note Alchemy's message pipes are fully based on the @ref
+ * RTIPC_PROTO "XDDP protocol" available from the RTDM/ipc driver.
+ *
+ * @{
+ */
+struct syncluster alchemy_pipe_table;
+
+static DEFINE_NAME_GENERATOR(pipe_namegen, "pipe",
+			     struct alchemy_pipe, name);
+
+DEFINE_LOOKUP_PRIVATE(pipe, RT_PIPE);
+
+/**
+ * @fn int rt_pipe_create(RT_PIPE *pipe, const char *name, int minor, size_t poolsize)
+ * @brief Create a message pipe.
+ *
+ * This service opens a bi-directional communication channel for
+ * exchanging messages between Xenomai threads and regular Linux
+ * threads. Pipes natively preserve message boundaries, but can also
+ * be used in byte-oriented streaming mode from Xenomai to Linux.
+ *
+ * rt_pipe_create() always returns immediately, even if no thread has
+ * opened the associated special device file yet. On the contrary, the
+ * non real-time side could block upon attempt to open the special
+ * device file until rt_pipe_create() is issued on the same pipe from
+ * a Xenomai thread, unless O_NONBLOCK was given to the open(2) system
+ * call.
+ *
+ * @param pipe The address of a pipe descriptor which can be later used
+ * to identify uniquely the created object, upon success of this call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * pipe. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created pipe into the object registry.
+ *
+ * Named pipes are supported through the use of the registry. Passing
+ * a valid @a name parameter when creating a message pipe causes a
+ * symbolic link to be created from
+ * /proc/xenomai/registry/rtipc/xddp/@a name to the associated special
+ * device (i.e. /dev/rtp*), so that the specific @a minor information
+ * does not need to be known from those processes for opening the
+ * proper device file. In such a case, both sides of the pipe only
+ * need to agree upon a symbolic name to refer to the same data path,
+ * which is especially useful whenever the @a minor number is picked
+ * up dynamically using an adaptive algorithm, such as passing
+ * P_MINOR_AUTO as @a minor value.
+ *
+ * @param minor The minor number of the device associated with the
+ * pipe.  Passing P_MINOR_AUTO causes the minor number to be
+ * auto-allocated. In such a case, a symbolic link will be
+ * automatically created from
+ * /proc/xenomai/registry/rtipc/xddp/@a name to the allocated pipe
+ * device entry. Valid minor numbers range from 0 to
+ * CONFIG_XENO_OPT_PIPE_NRDEV-1.
+ *
+ * @param poolsize Specifies the size of a dedicated buffer pool for the
+ * pipe. Passing 0 means that all message allocations for this pipe are
+ * performed on the Cobalt core heap.
+ *
+ * @return The @a minor number assigned to the connection is returned
+ * upon success. Otherwise:
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the pipe.
+ *
+ * - -ENODEV is returned if @a minor is different from P_MINOR_AUTO
+ * and is not a valid minor number.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered pipe.
+ *
+ * - -EBUSY is returned if @a minor is already open.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt or non-Xenomai thread.
+ *
+ * @apitags{xthread-only, mode-unrestricted, switch-secondary}
+ */
+#ifndef DOXYGEN_CPP
+CURRENT_IMPL(int, rt_pipe_create,
+	     (RT_PIPE *pipe, const char *name, int minor, size_t poolsize))
+#else
+int rt_pipe_create(RT_PIPE *pipe,
+		   const char *name, int minor, size_t poolsize)
+#endif
+{
+	struct rtipc_port_label plabel;
+	struct sockaddr_ipc saddr;
+	struct alchemy_pipe *pcb;
+	struct service svc;
+	size_t streambufsz;
+	socklen_t addrlen;
+	int ret, sock;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	pcb = xnmalloc(sizeof(*pcb));
+	if (pcb == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	sock = __RT(socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP));
+	if (sock < 0) {
+		warning("RTIPC/XDDP protocol not supported by kernel");
+		ret = -errno;
+		xnfree(pcb);
+		goto out;
+	}
+
+	if (name && *name) {
+		namecpy(plabel.label, name);
+		ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_LABEL,
+				      &plabel, sizeof(plabel)));
+		if (ret)
+			goto fail_sockopt;
+	}
+
+	if (poolsize > 0) {
+		ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_POOLSZ,
+				      &poolsize, sizeof(poolsize)));
+		if (ret)
+			goto fail_sockopt;
+	}
+
+	streambufsz = ALCHEMY_PIPE_STREAMSZ;
+	ret = __RT(setsockopt(sock, SOL_XDDP, XDDP_BUFSZ,
+			      &streambufsz, sizeof(streambufsz)));
+	if (ret)
+		goto fail_sockopt;
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = minor;
+	ret = __RT(bind(sock, (struct sockaddr *)&saddr, sizeof(saddr)));
+	if (ret)
+		goto fail_sockopt;
+
+	if (minor == P_MINOR_AUTO) {
+		/* Fetch the assigned minor device. */
+		addrlen = sizeof(saddr);
+		ret = __RT(getsockname(sock, (struct sockaddr *)&saddr, &addrlen));
+		if (ret)
+			goto fail_sockopt;
+		if (addrlen != sizeof(saddr)) {
+			ret = -EINVAL;
+			goto fail_register;
+		}
+		minor = saddr.sipc_port;
+	}
+
+	generate_name(pcb->name, name, &pipe_namegen);
+	pcb->sock = sock;
+	pcb->minor = minor;
+	pcb->magic = pipe_magic;
+
+	ret = syncluster_addobj(&alchemy_pipe_table, pcb->name, &pcb->cobj);
+	if (ret)
+		goto fail_register;
+
+	pipe->handle = mainheap_ref(pcb, uintptr_t);
+
+	CANCEL_RESTORE(svc);
+
+	return minor;
+fail_sockopt:
+	ret = -errno;
+	if (ret == -EADDRINUSE)
+		ret = -EBUSY;
+fail_register:
+	__RT(close(sock));
+	xnfree(pcb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;	
+}
+
+/**
+ * @fn int rt_pipe_delete(RT_PIPE *pipe)
+ * @brief Delete a message pipe.
+ *
+ * This routine deletes a pipe object previously created by a call to
+ * rt_pipe_create(). All resources attached to that pipe are
+ * automatically released, all pending data is flushed.
+ *
+ * @param pipe The pipe descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a pipe is not a valid pipe descriptor.
+ *
+ * - -EIDRM is returned if @a pipe is a closed pipe descriptor.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ */
+int rt_pipe_delete(RT_PIPE *pipe)
+{
+	struct alchemy_pipe *pcb;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	pcb = find_alchemy_pipe(pipe, &ret);
+	if (pcb == NULL)
+		goto out;
+
+	ret = __RT(close(pcb->sock));
+	if (ret) {
+		ret = -errno;
+		if (ret == -EBADF)
+			ret = -EIDRM;
+		goto out;
+	}
+
+	syncluster_delobj(&alchemy_pipe_table, &pcb->cobj);
+	pcb->magic = ~pipe_magic;
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn ssize_t rt_pipe_read(RT_PIPE *pipe, void *buf, size_t size, RTIME timeout)
+ * @brief Read from a pipe (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_queue_read_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param pipe The pipe descriptor.
+ *
+ * @param buf A pointer to a memory area which will be written upon
+ * success with the message received.
+ *
+ * @param size The count of bytes from the received message to read up
+ * into @a buf. If @a size is lower than the actual message size,
+ * -ENOBUFS is returned since the incompletely received message would
+ * be lost. If @a size is zero, this call returns immediately with no
+ * other action.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE causes the caller to block indefinitely until
+ * a message is available. Passing TM_NONBLOCK causes the service
+ * to return immediately without blocking in case no message is
+ * available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+ssize_t rt_pipe_read(RT_PIPE *pipe,
+		     void *buf, size_t size, RTIME timeout)
+{
+	struct alchemy_pipe *pcb;
+	int err = 0, flags;
+	struct timespec ts;
+	struct timeval tv;
+	ssize_t ret;
+
+	pcb = find_alchemy_pipe(pipe, &err);
+	if (pcb == NULL)
+		return err;
+
+	if (timeout == TM_NONBLOCK)
+		flags = MSG_DONTWAIT;
+	else {
+		if (!threadobj_current_p())
+			return -EPERM;
+		if (timeout != TM_INFINITE) {
+			clockobj_ticks_to_timespec(&alchemy_clock, timeout,
+						   &ts);
+			tv.tv_sec = ts.tv_sec;
+			tv.tv_usec = ts.tv_nsec / 1000;
+		} else {
+			tv.tv_sec = 0;
+			tv.tv_usec = 0;
+		}
+		__RT(setsockopt(pcb->sock, SOL_SOCKET,
+				SO_RCVTIMEO, &tv, sizeof(tv)));
+		flags = 0;
+	}
+
+	ret = __RT(recvfrom(pcb->sock, buf, size, flags, NULL, 0));
+	if (ret < 0)
+		ret = -errno;
+
+	return ret;
+}
+
+/**
+ * @fn ssize_t rt_pipe_read_until(RT_PIPE *pipe, void *buf, size_t size, RTIME abs_timeout)
+ * @brief Read from a pipe (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_queue_read_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param pipe The pipe descriptor.
+ *
+ * @param buf A pointer to a memory area which will be written upon
+ * success with the message received.
+ *
+ * @param size The count of bytes from the received message to read up
+ * into @a buf. If @a size is lower than the actual message size,
+ * -ENOBUFS is returned since the incompletely received message would
+ * be lost. If @a size is zero, this call returns immediately with no
+ * other action.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * a message is available. Passing TM_NONBLOCK causes the service
+ * to return immediately without blocking in case no message is
+ * available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_pipe_read_timed(RT_PIPE *pipe, void *buf, size_t size, const struct timespec *abs_timeout)
+ * @brief Read a message from a pipe.
+ *
+ * This service reads the next available message from a given pipe.
+ *
+ * @param pipe The pipe descriptor.
+ *
+ * @param buf A pointer to a memory area which will be written upon
+ * success with the message received.
+ *
+ * @param size The count of bytes from the received message to read up
+ * into @a buf. If @a size is lower than the actual message size,
+ * -ENOBUFS is returned since the incompletely received message would
+ * be lost. If @a size is zero, this call returns immediately with no
+ * other action.
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for a
+ * message to be available from the pipe. Passing NULL causes the caller
+ * to block indefinitely until a message is available. Passing
+ * { .tv_sec = 0, .tv_nsec = 0 } causes the service to return immediately
+ * without blocking in case no message is available.
+ *
+ * @return The number of bytes available from the received message is
+ * returned upon success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned if @a abs_timeout is reached before a
+ * message arrives.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } and no message is immediately available on entry to
+ * the call.
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before a message was available.
+ *
+ * - -EINVAL is returned if @a pipe is not a valid pipe descriptor.
+ *
+ * - -EIDRM is returned if @a pipe is deleted while the caller was
+ * waiting for a message. In such event, @a pipe is no more valid upon
+ * return of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+ssize_t rt_pipe_read_timed(RT_PIPE *pipe,
+			   void *buf, size_t size,
+			   const struct timespec *abs_timeout)
+{
+	struct timespec now, timeout;
+	struct alchemy_pipe *pcb;
+	int err = 0, flags;
+	struct timeval tv;
+	ssize_t ret;
+
+	pcb = find_alchemy_pipe(pipe, &err);
+	if (pcb == NULL)
+		return err;
+
+	if (alchemy_poll_mode(abs_timeout))
+		flags = MSG_DONTWAIT;
+	else {
+		if (!threadobj_current_p())
+			return -EPERM;
+		if (abs_timeout) {
+			__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
+			timespec_sub(&timeout, abs_timeout, &now);
+			tv.tv_sec = timeout.tv_sec;
+			tv.tv_usec = timeout.tv_nsec / 1000;
+		} else {
+			tv.tv_sec = 0;
+			tv.tv_usec = 0;
+		}
+		__RT(setsockopt(pcb->sock, SOL_SOCKET,
+				SO_RCVTIMEO, &tv, sizeof(tv)));
+		flags = 0;
+	}
+
+	ret = __RT(recvfrom(pcb->sock, buf, size, flags, NULL, 0));
+	if (ret < 0)
+		ret = -errno;
+
+	return ret;
+}
+
+static ssize_t do_write_pipe(RT_PIPE *pipe,
+			     const void *buf, size_t size, int flags)
+{
+	struct alchemy_pipe *pcb;
+	struct service svc;
+	ssize_t ret;
+	int err = 0;
+
+	CANCEL_DEFER(svc);
+
+	pcb = find_alchemy_pipe(pipe, &err);
+	if (pcb == NULL) {
+		ret = err;
+		goto out;
+	}
+
+	ret = __RT(sendto(pcb->sock, buf, size, flags, NULL, 0));
+	if (ret < 0) {
+		ret = -errno;
+		if (ret == -EBADF)
+			ret = -EIDRM;
+	}
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+ /**
+ * @fn ssize_t rt_pipe_write(RT_PIPE *pipe,const void *buf,size_t size,int mode)
+ * @brief Write a message to a pipe.
+ *
+ * This service writes a complete message to be received from the
+ * associated special device. rt_pipe_write() always preserves message
+ * boundaries, which means that all data sent through a single call of
+ * this service will be gathered in a single read(2) operation from
+ * the special device.
+ *
+ * This service differs from rt_pipe_send() in that it accepts a
+ * pointer to the raw data to be sent, instead of a canned message
+ * buffer.
+ *
+ * @param pipe The pipe descriptor.
+ *
+ * @param buf The address of the first data byte to send. The
+ * data will be copied to an internal buffer before transmission.
+ *
+ * @param size The size in bytes of the message (payload data
+ * only). Zero is a valid value, in which case the service returns
+ * immediately without sending any message.
+ *
+ * @param mode A set of flags affecting the operation:
+ *
+ * - P_URGENT causes the message to be prepended to the output
+ * queue, ensuring a LIFO ordering.
+ *
+ * - P_NORMAL causes the message to be appended to the output
+ * queue, ensuring a FIFO ordering.
+ *
+ * @return Upon success, this service returns @a size. Upon error, one
+ * of the following error codes is returned:
+ *
+ * - -EINVAL is returned if @a mode is invalid or @a pipe is not a
+ * pipe descriptor.
+ *
+ * - -ENOMEM is returned if not enough buffer space is available to
+ * complete the operation.
+ *
+ * - -EIDRM is returned if @a pipe is a closed pipe descriptor.
+ *
+ * @note Writing data to a pipe before any peer has opened the
+ * associated special device is allowed. The output will be buffered
+ * until then, only restricted by the available memory in the
+ * associated buffer pool (see rt_pipe_create()).
+ *
+ * @apitags{xcontext, switch-primary}
+ */
+ssize_t rt_pipe_write(RT_PIPE *pipe,
+		      const void *buf, size_t size, int mode)
+{
+	int flags = 0;
+
+	if (mode & ~P_URGENT)
+		return -EINVAL;
+
+	if (mode & P_URGENT)
+		flags |= MSG_OOB;
+
+	return do_write_pipe(pipe, buf, size, flags);
+}
+
+ /**
+ * @brief Stream bytes through a pipe.
+ *
+ * This service writes a sequence of bytes to be received from the
+ * associated special device. Unlike rt_pipe_send(), this service does
+ * not preserve message boundaries. Instead, an internal buffer is
+ * filled on the fly with the data, which will be consumed as soon as
+ * the receiver wakes up.
+ *
+ * Data buffers sent by the rt_pipe_stream() service are always
+ * transmitted in FIFO order (i.e. P_NORMAL mode).
+ *
+ * @param pipe The pipe descriptor.
+ *
+ * @param buf The address of the first data byte to send. The
+ * data will be copied to an internal buffer before transmission.
+ *
+ * @param size The size in bytes of the buffer. Zero is a valid value,
+ * in which case the service returns immediately without sending any
+ * data.
+ *
+ * @return The number of bytes sent upon success; this value may be
+ * lower than @a size, depending on the available space in the
+ * internal buffer. Otherwise:
+ *
+ * - -EINVAL is returned if @a mode is invalid or @a pipe is not a
+ * pipe descriptor.
+ *
+ * - -ENOMEM is returned if not enough buffer space is available to
+ * complete the operation.
+ *
+ * - -EIDRM is returned if @a pipe is a closed pipe descriptor.
+ *
+ * @note Writing data to a pipe before any peer has opened the
+ * associated special device is allowed. The output will be buffered
+ * until then, only restricted by the available memory in the
+ * associated buffer pool (see rt_pipe_create()).
+ *
+ * @apitags{xcontext, switch-primary}
+ */
+ssize_t rt_pipe_stream(RT_PIPE *pipe,
+		       const void *buf, size_t size)
+{
+	return do_write_pipe(pipe, buf, size, MSG_MORE);
+}
+
+/**
+ * @fn int rt_pipe_bind(RT_PIPE *pipe, const char *name, RTIME timeout)
+ * @brief Bind to a message pipe.
+ *
+ * This routine creates a new descriptor to refer to an existing
+ * message pipe identified by its symbolic name. If the object does
+ * not exist on entry, the caller may block until a pipe of the given
+ * name is created.
+ *
+ * @param pipe The address of a pipe descriptor filled in by the
+ * operation. Contents of this memory is undefined upon failure.
+ *
+ * @param name A valid NULL-terminated name which identifies the
+ * pipe to bind to. This string should match the object name
+ * argument passed to rt_pipe_create().
+ *
+ * @param timeout The number of clock ticks to wait for the
+ * registration to occur (see note). Passing TM_INFINITE causes the
+ * caller to block indefinitely until the object is
+ * registered. Passing TM_NONBLOCK causes the service to return
+ * immediately without waiting if the object is not registered on
+ * entry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
+ * and the searched object is not registered on entry.
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait}
+ *
+ * @note The @a timeout value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_pipe_bind(RT_PIPE *pipe,
+		 const char *name, RTIME timeout)
+{
+	return alchemy_bind_object(name,
+				   &alchemy_pipe_table,
+				   timeout,
+				   offsetof(struct alchemy_pipe, cobj),
+				   &pipe->handle);
+}
+
+/**
+ * @fn int rt_pipe_unbind(RT_PIPE *pipe)
+ * @brief Unbind from a message pipe.
+ *
+ * @param pipe The pipe descriptor.
+ *
+ * This routine releases a previous binding to a message pipe. After
+ * this call has returned, the descriptor is no more valid for
+ * referencing this object.
+ *
+ * @apitags{thread-unrestricted}
+ */
+int rt_pipe_unbind(RT_PIPE *pipe)
+{
+	pipe->handle = 0;
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/pipe.h b/kernel/xenomai-v3.2.4/lib/alchemy/pipe.h
new file mode 100644
index 0000000..48843af
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/pipe.h
@@ -0,0 +1,40 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_PIPE_H
+#define _ALCHEMY_PIPE_H
+
+#include <copperplate/cluster.h>
+#include <alchemy/pipe.h>
+
+/* Fixed default for MSG_MORE accumulation. */
+#define ALCHEMY_PIPE_STREAMSZ  16384
+
+struct alchemy_pipe {
+	unsigned int magic;	/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	int sock;
+	int minor;
+	struct clusterobj cobj;
+};
+
+#define pipe_magic	0x8b8bebeb
+
+extern struct syncluster alchemy_pipe_table;
+
+#endif /* _ALCHEMY_PIPE_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/queue.c b/kernel/xenomai-v3.2.4/lib/alchemy/queue.c
new file mode 100644
index 0000000..c45e424
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/queue.c
@@ -0,0 +1,1198 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <string.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/registry-obstack.h>
+#include "reference.h"
+#include "internal.h"
+#include "queue.h"
+#include "timer.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_queue Message queue services
+ *
+ * real-time IPC mechanism for sending messages of arbitrary size
+ *
+ * Message queueing is a method by which real-time tasks can exchange
+ * or pass data through a Xenomai-managed queue of messages. Messages
+ * can vary in length and be assigned different types or usages. A
+ * message queue can be created by one task and used by multiple tasks
+ * that send and/or receive messages to the queue.
+ *
+ * @{
+ */
+struct syncluster alchemy_queue_table;
+
+static DEFINE_NAME_GENERATOR(queue_namegen, "queue",
+			     struct alchemy_queue, name);
+
+DEFINE_SYNC_LOOKUP(queue, RT_QUEUE);
+
+#ifdef CONFIG_XENO_REGISTRY
+
+static int prepare_waiter_cache(struct fsobstack *o,
+				struct obstack *cache, int item_count)
+{
+	const struct alchemy_queue *qcb;
+
+	fsobstack_grow_format(o, "--\n[WAITER]\n");
+	obstack_blank(cache, item_count * sizeof(qcb->name));
+
+	return 0;
+}
+
+static size_t collect_waiter_data(void *p, struct threadobj *thobj)
+{
+	const char *name = threadobj_get_name(thobj);
+	int len = strlen(name);
+
+	strcpy(p, name);
+	*(char *)(p + len) = '\n';
+
+	return len + 1;
+}
+
+static struct fsobstack_syncops fill_ops = {
+	.prepare_cache = prepare_waiter_cache,
+	.collect_data = collect_waiter_data,
+};
+
+static int queue_registry_open(struct fsobj *fsobj, void *priv)
+{
+	size_t usable_mem, used_mem, limit;
+	struct fsobstack *o = priv;
+	struct alchemy_queue *qcb;
+	struct syncstate syns;
+	unsigned int mcount;
+	int mode, ret;
+
+	qcb = container_of(fsobj, struct alchemy_queue, fsobj);
+
+	ret = syncobj_lock(&qcb->sobj, &syns);
+	if (ret)
+		return -EIO;
+
+	usable_mem = heapobj_size(&qcb->hobj);
+	used_mem = heapobj_inquire(&qcb->hobj);
+	limit = qcb->limit;
+	mcount = qcb->mcount;
+	mode = qcb->mode;
+
+	syncobj_unlock(&qcb->sobj, &syns);
+
+	fsobstack_init(o);
+
+	fsobstack_grow_format(o, "%6s  %10s  %9s  %8s  %s\n",
+			      "[TYPE]", "[TOTALMEM]", "[USEDMEM]", "[QLIMIT]", "[MCOUNT]");
+	fsobstack_grow_format(o, " %s   %9Zu  %9Zu  %8Zu  %8u\n",
+			      mode & Q_PRIO ? "PRIO" : "FIFO",
+			      usable_mem,
+			      used_mem,
+			      limit,
+			      mcount);
+
+	fsobstack_grow_syncobj_grant(o, &qcb->sobj, &fill_ops);
+
+	fsobstack_finish(o);
+
+	return 0;
+}
+
+static struct registry_operations registry_ops = {
+	.open		= queue_registry_open,
+	.release	= fsobj_obstack_release,
+	.read		= fsobj_obstack_read
+};
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+static void queue_finalize(struct syncobj *sobj)
+{
+	struct alchemy_queue *qcb;
+
+	qcb = container_of(sobj, struct alchemy_queue, sobj);
+	registry_destroy_file(&qcb->fsobj);
+	heapobj_destroy(&qcb->hobj);
+	xnfree(qcb);
+}
+fnref_register(libalchemy, queue_finalize);
+
+/**
+ * @fn int rt_queue_create(RT_QUEUE *q, const char *name, size_t poolsize, size_t qlimit, int mode)
+ * @brief Create a message queue.
+ *
+ * Create a message queue object which allows multiple tasks to
+ * exchange data through the use of variable-sized messages. A message
+ * queue is created empty.
+ *
+ * @param q The address of a queue descriptor which can be later used
+ * to identify uniquely the created object, upon success of this call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * queue. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created queue into the object registry.
+ *
+ * @param poolsize The size (in bytes) of the message buffer pool to
+ * be pre-allocated for holding messages. Message buffers will be
+ * claimed and released to this pool.  The buffer pool memory cannot
+ * be extended. See note.
+ *
+ * @param qlimit This parameter allows to limit the maximum number of
+ * messages which can be queued at any point in time, sending to a
+ * full queue begets an error. The special value Q_UNLIMITED can be
+ * passed to disable the limit check.
+ *
+ * @param mode The queue creation mode. The following flags can be
+ * OR'ed into this bitmask, each of them affecting the new queue:
+ *
+ * - Q_FIFO makes tasks pend in FIFO order on the queue for consuming
+ * messages.
+ *
+ * - Q_PRIO makes tasks pend in priority order on the queue.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a mode is invalid or @a poolsize is zero.
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the queue.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered queue.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt or non-Xenomai thread.
+ *
+ * @apitags{xthread-only, mode-unrestricted, switch-secondary}
+ *
+ * @note Queues can be shared by multiple processes which belong to
+ * the same Xenomai session.
+ *
+ * @note Each message pending into the queue consumes four long words
+ * plus the actual payload size, aligned to the next long word
+ * boundary. e.g. a 6 byte message on a 32 bit platform would require
+ * 24 bytes of storage into the pool.
+ *
+ * When @a qlimit is given (i.e. different from Q_UNLIMITED), this
+ * overhead is accounted for automatically, so that @a qlimit messages
+ * of @a poolsize / @a qlimit bytes can be stored into the pool
+ * concurrently. Otherwise, @a poolsize is increased by 5% internally
+ * to cope with such overhead.
+ */
+int rt_queue_create(RT_QUEUE *queue, const char *name,
+		    size_t poolsize, size_t qlimit, int mode)
+{
+	struct alchemy_queue *qcb;
+	int sobj_flags = 0, ret;
+	struct service svc;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	if (poolsize == 0 || (mode & ~Q_PRIO) != 0)
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	ret = -ENOMEM;
+	qcb = xnmalloc(sizeof(*qcb));
+	if (qcb == NULL)
+		goto fail_cballoc;
+
+	generate_name(qcb->name, name, &queue_namegen);
+	/*
+	 * The message pool has to be part of the main heap for proper
+	 * sharing between processes.
+	 *
+	 * We have the message descriptor overhead to cope with when
+	 * allocating the buffer pool. When the queue limit is not
+	 * known, assume 5% overhead.
+	 */
+	if (qlimit == Q_UNLIMITED)
+		ret = heapobj_init(&qcb->hobj, qcb->name,
+				   poolsize + (poolsize * 5 / 100));
+	else
+		ret = heapobj_init_array(&qcb->hobj, qcb->name,
+					 (poolsize / qlimit) +
+					 sizeof(struct alchemy_queue_msg),
+					 qlimit);
+	if (ret)
+		goto fail_bufalloc;
+
+	qcb->mode = mode;
+	qcb->limit = qlimit;
+	list_init(&qcb->mq);
+	qcb->mcount = 0;
+
+	if (mode & Q_PRIO)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	ret = syncobj_init(&qcb->sobj, CLOCK_COPPERPLATE, sobj_flags,
+			   fnref_put(libalchemy, queue_finalize));
+	if (ret)
+		goto fail_syncinit;
+
+	qcb->magic = queue_magic;
+
+	registry_init_file_obstack(&qcb->fsobj, &registry_ops);
+	ret = __bt(registry_add_file(&qcb->fsobj, O_RDONLY,
+				     "/alchemy/queues/%s", qcb->name));
+	if (ret)
+		warning("failed to export queue %s to registry, %s",
+			qcb->name, symerror(ret));
+
+	ret = syncluster_addobj(&alchemy_queue_table, qcb->name, &qcb->cobj);
+	if (ret)
+		goto fail_register;
+
+	queue->handle = mainheap_ref(qcb, uintptr_t);
+
+	CANCEL_RESTORE(svc);
+
+	return 0;
+
+fail_register:
+	registry_destroy_file(&qcb->fsobj);
+	syncobj_uninit(&qcb->sobj);
+fail_syncinit:
+	heapobj_destroy(&qcb->hobj);
+fail_bufalloc:
+	xnfree(qcb);
+fail_cballoc:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_queue_delete(RT_QUEUE *q)
+ * @brief Delete a message queue.
+ *
+ * This routine deletes a queue object previously created by a call to
+ * rt_queue_create(). All resources attached to that queue are
+ * automatically released, including all pending messages.
+ *
+ * @param q The queue descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a q is not a valid queue descriptor.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ */
+int rt_queue_delete(RT_QUEUE *queue)
+{
+	struct alchemy_queue *qcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	qcb = get_alchemy_queue(queue, &syns, &ret);
+	if (qcb == NULL)
+		goto out;
+
+	syncluster_delobj(&alchemy_queue_table, &qcb->cobj);
+	qcb->magic = ~queue_magic;
+	syncobj_destroy(&qcb->sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn void *rt_queue_alloc(RT_QUEUE *q, size_t size)
+ * @brief Allocate a message buffer.
+ *
+ * This service allocates a message buffer from the queue's internal
+ * pool. This buffer can be filled in with payload information, prior
+ * enqueuing it by a call to rt_queue_send().  When used in pair,
+ * these services provide a zero-copy interface for sending messages.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param size The requested size in bytes of the buffer. Zero is an
+ * acceptable value, which means that the message conveys no payload;
+ * in this case, the receiver will get a zero-sized message.
+ *
+ * @return The address of the allocated buffer upon success, or NULL
+ * if the call fails.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+void *rt_queue_alloc(RT_QUEUE *queue, size_t size)
+{
+	struct alchemy_queue_msg *msg = NULL;
+	struct alchemy_queue *qcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	qcb = get_alchemy_queue(queue, &syns, &ret);
+	if (qcb == NULL)
+		goto out;
+
+	msg = heapobj_alloc(&qcb->hobj, size + sizeof(*msg));
+	if (msg == NULL)
+		goto done;
+
+	/*
+	 * XXX: no need to init the ->next holder, list_*pend() do not
+	 * require this, and this ends up being costly on low end.
+	 */
+	msg->size = size;	/* Zero is allowed. */
+	msg->refcount = 1;
+	++msg;
+done:
+	put_alchemy_queue(qcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return msg;
+}
+
+/**
+ * @fn int rt_queue_free(RT_QUEUE *q, void *buf)
+ * @brief Free a message buffer.
+ *
+ * This service releases a message buffer to the queue's internal
+ * pool.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param buf The address of the message buffer to free. Even
+ * zero-sized messages carrying no payload data must be freed, since
+ * they are assigned a valid memory space to store internal
+ * information.
+ *
+ * @return Zero is returned upon success, or -EINVAL if @a buf is not
+ * a valid message buffer previously allocated by the rt_queue_alloc()
+ * service, or the caller did not get ownership of the message through
+ * a successful return from rt_queue_receive().
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_queue_free(RT_QUEUE *queue, void *buf)
+{
+	struct alchemy_queue_msg *msg;
+	struct alchemy_queue *qcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	if (buf == NULL)
+		return -EINVAL;
+
+	msg = (struct alchemy_queue_msg *)buf - 1;
+
+	CANCEL_DEFER(svc);
+
+	qcb = get_alchemy_queue(queue, &syns, &ret);
+	if (qcb == NULL)
+		goto out;
+
+	if (heapobj_validate(&qcb->hobj, msg) == 0) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	/*
+	 * Check the reference count under lock, so that we properly
+	 * serialize with rt_queue_send() and rt_queue_receive() which
+	 * may update it.
+	 */
+	if (msg->refcount == 0) { /* Mm, double-free? */
+		ret = -EINVAL;
+		goto done;
+	}
+
+	if (--msg->refcount == 0)
+		heapobj_free(&qcb->hobj, msg);
+done:
+	put_alchemy_queue(qcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_queue_send(RT_QUEUE *q, const void *buf, size_t size, int mode)
+ * @brief Send a message to a queue.
+ *
+ * This service sends a complete message to a given queue. The message
+ * must have been allocated by a previous call to rt_queue_alloc().
+ *
+ * @param q The queue descriptor.
+ *
+ * @param buf The address of the message buffer to be sent.  The
+ * message buffer must have been allocated using the rt_queue_alloc()
+ * service.  Once passed to rt_queue_send(), the memory pointed to by
+ * @a buf is no more under the control of the sender and thus should
+ * not be referenced by it anymore; deallocation of this memory must
+ * be handled on the receiving side.
+ *
+ * @param size The actual size in bytes of the message, which may be
+ * lower than the allocated size for the buffer obtained from
+ * rt_queue_alloc(). Zero is a valid value, in which case an empty
+ * message will be sent.
+ *
+ * @param mode A set of flags affecting the operation:
+ *
+ * - Q_URGENT causes the message to be prepended to the message queue,
+ * ensuring a LIFO ordering.
+ *
+ * - Q_NORMAL causes the message to be appended to the message queue,
+ * ensuring a FIFO ordering.
+ *
+ * - Q_BROADCAST causes the message to be sent to all tasks currently
+ * waiting for messages. The message is not copied; a reference count
+ * is maintained instead so that the message will remain valid until
+ * the last receiver releases its own reference using rt_queue_free(),
+ * after which the message space will be returned to the queue's
+ * internal pool.
+ *
+ * @return Upon success, this service returns the number of receivers
+ * which got awaken as a result of the operation. If zero is returned,
+ * no task was waiting on the receiving side of the queue, and the
+ * message has been enqueued. Upon error, one of the following error
+ * codes is returned:
+ *
+ * - -EINVAL is returned if @a q is not a message queue descriptor, @a
+ * mode is invalid, or @a buf is NULL.
+ *
+ * - -ENOMEM is returned if queuing the message would exceed the limit
+ * defined for the queue at creation.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_queue_send(RT_QUEUE *queue,
+		  const void *buf, size_t size, int mode)
+{
+	struct alchemy_queue_wait *wait;
+	struct alchemy_queue_msg *msg;
+	struct alchemy_queue *qcb;
+	struct threadobj *waiter;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	if (buf == NULL || (mode & ~(Q_URGENT|Q_BROADCAST)) != 0)
+		return -EINVAL;
+
+	msg = (struct alchemy_queue_msg *)buf - 1;
+
+	CANCEL_DEFER(svc);
+
+	qcb = get_alchemy_queue(queue, &syns, &ret);
+	if (qcb == NULL)
+		goto out;
+
+	if (qcb->limit && qcb->mcount >= qcb->limit) {
+		ret = -ENOMEM;
+		goto done;
+	}
+
+	if (msg->refcount == 0) {
+		ret = -EINVAL;
+		goto done;
+	}
+
+	msg->refcount--;
+	msg->size = size;
+	ret = 0;  /* # of tasks unblocked. */
+
+	do {
+		waiter = syncobj_grant_one(&qcb->sobj);
+		if (waiter == NULL)
+			break;
+		wait = threadobj_get_wait(waiter);
+		wait->msg = __moff(msg);
+		msg->refcount++;
+		ret++;
+	} while (mode & Q_BROADCAST);
+
+	if (ret)
+		goto done;
+	/*
+	 * We need to queue the message if no task was waiting for it,
+	 * except in broadcast mode, in which case we only fix up the
+	 * reference count.
+	 */
+	if (mode & Q_BROADCAST)
+		msg->refcount++;
+	else {
+		qcb->mcount++;
+		if (mode & Q_URGENT)
+			list_prepend(&msg->next, &qcb->mq);
+		else
+			list_append(&msg->next, &qcb->mq);
+	}
+done:
+	put_alchemy_queue(qcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_queue_write(RT_QUEUE *q, const void *buf, size_t size)
+ * @brief Write data to a queue.
+ *
+ * This service builds a message out of a raw data buffer, then send
+ * it to a given queue.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param buf The address of the payload data to be written to the
+ * queue. The payload is copied to a message buffer allocated
+ * internally by this service.
+ *
+ * @param size The size in bytes of the payload data.  Zero is a valid
+ * value, in which case an empty message is queued.
+ *
+ * @param mode A set of flags affecting the operation:
+ *
+ * - Q_URGENT causes the message to be prepended to the message queue,
+ * ensuring a LIFO ordering.
+ *
+ * - Q_NORMAL causes the message to be appended to the message queue,
+ * ensuring a FIFO ordering.
+ *
+ * - Q_BROADCAST causes the message to be sent to all tasks currently
+ * waiting for messages. The message is not copied multiple times; a
+ * reference count is maintained instead so that the message will
+ * remain valid until the last receiver releases its own reference
+ * using rt_queue_free(), after which the message space will be
+ * returned to the queue's internal pool.
+ *
+ * @return Upon success, this service returns the number of receivers
+ * which got awaken as a result of the operation. If zero is returned,
+ * no task was waiting on the receiving side of the queue, and the
+ * message has been enqueued. Upon error, one of the following error
+ * codes is returned:
+ *
+ * - -EINVAL is returned if @a mode is invalid, @a buf is NULL with a
+ * non-zero @a size, or @a q is not a essage queue descriptor.
+ *
+ * - -ENOMEM is returned if queuing the message would exceed the limit
+ * defined for the queue at creation, or if no memory can be obtained
+ * to convey the message data internally.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_queue_write(RT_QUEUE *queue,
+		   const void *buf, size_t size, int mode)
+{
+	struct alchemy_queue_wait *wait;
+	struct alchemy_queue_msg *msg;
+	struct alchemy_queue *qcb;
+	struct threadobj *waiter;
+	struct syncstate syns;
+	int ret = 0, nwaiters;
+	struct service svc;
+	size_t bufsz;
+
+	if (mode & ~(Q_URGENT|Q_BROADCAST))
+		return -EINVAL;
+
+	if (buf == NULL && size > 0)
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	qcb = get_alchemy_queue(queue, &syns, &ret);
+	if (qcb == NULL)
+		goto out;
+
+	if (mode & Q_BROADCAST)
+		/* No buffer-to-buffer copy in broadcast mode. */
+		goto enqueue;
+
+	waiter = syncobj_peek_grant(&qcb->sobj);
+	if (waiter && threadobj_local_p(waiter)) {
+		/*
+		 * Fast path for local threads already waiting for
+		 * data via rt_queue_read(): do direct copy to the
+		 * reader's buffer.
+		 */
+		wait = threadobj_get_wait(waiter);
+		bufsz = wait->local_bufsz;
+		if (bufsz == 0)
+			/* no buffer provided, enqueue normally. */
+			goto enqueue;
+		if (size > bufsz)
+			size = bufsz;
+		if (size > 0)
+			memcpy(wait->local_buf, buf, size);
+		wait->local_bufsz = size;
+		syncobj_grant_to(&qcb->sobj, waiter);
+		ret = 1;
+		goto done;
+	}
+
+enqueue:
+	nwaiters = syncobj_count_grant(&qcb->sobj);
+	if (nwaiters == 0 && (mode & Q_BROADCAST) != 0)
+		goto done;
+
+	ret = -ENOMEM;
+	if (qcb->limit && qcb->mcount >= qcb->limit)
+		goto done;
+
+	msg = heapobj_alloc(&qcb->hobj, size + sizeof(*msg));
+	if (msg == NULL)
+		goto done;
+
+	msg->size = size;
+	msg->refcount = 0;
+	if (size > 0)
+		memcpy(msg + 1, buf, size);
+
+	ret = 0;  /* # of tasks unblocked. */
+	if (nwaiters == 0) {
+		qcb->mcount++;
+		if (mode & Q_URGENT)
+			list_prepend(&msg->next, &qcb->mq);
+		else
+			list_append(&msg->next, &qcb->mq);
+		goto done;
+	}
+
+	do {
+		waiter = syncobj_grant_one(&qcb->sobj);
+		if (waiter == NULL)
+			break;
+		wait = threadobj_get_wait(waiter);
+		wait->msg = __moff(msg);
+		msg->refcount++;
+		ret++;
+	} while (mode & Q_BROADCAST);
+done:
+	put_alchemy_queue(qcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn ssize_t rt_queue_receive(RT_QUEUE *q, void **bufp, RTIME timeout)
+ * @brief Receive from a queue (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_queue_receive_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param bufp A pointer to a memory location which will be written
+ * with the address of the received message.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE causes the caller to block indefinitely until
+ * a message is available. Passing TM_NONBLOCK causes the service
+ * to return immediately without blocking in case no message is
+ * available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_queue_receive_until(RT_QUEUE *q, void **bufp, RTIME abs_timeout)
+ * @brief Receive from a queue (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_queue_receive_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param bufp A pointer to a memory location which will be written
+ * with the address of the received message.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * a message is available. Passing TM_NONBLOCK causes the service
+ * to return immediately without blocking in case no message is
+ * available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_queue_receive_timed(RT_QUEUE *q, void **bufp, const struct timespec *abs_timeout)
+ * @brief Receive a message from a queue (with absolute timeout date).
+ *
+ * This service receives the next available message from a given
+ * queue.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param bufp A pointer to a memory location which will be written
+ * with the address of the received message, upon success. Once
+ * consumed, the message space should be freed using rt_queue_free().
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for a
+ * message to be available from the queue. Passing NULL causes the caller
+ * to block indefinitely until a message is available. Passing
+ * { .tv_sec = 0, .tv_nsec = 0 } causes the service to return immediately
+ * without blocking in case no message is available.
+ *
+ * @return The number of bytes available from the received message is
+ * returned upon success. Zero is a possible value corresponding to a
+ * zero-sized message passed to rt_queue_send() or
+ * rt_queue_write(). Otherwise:
+ *
+ * - -ETIMEDOUT is returned if @a abs_timeout is reached before a
+ * message arrives.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } and no message is immediately available on entry to
+ * the call.
+
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before a message was available.
+ *
+ * - -EINVAL is returned if @a q is not a valid queue descriptor.
+ *
+ * - -EIDRM is returned if @a q is deleted while the caller was
+ * waiting for a message. In such event, @a q is no more valid upon
+ * return of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+ssize_t rt_queue_receive_timed(RT_QUEUE *queue, void **bufp,
+			       const struct timespec *abs_timeout)
+{
+	struct alchemy_queue_wait *wait;
+	struct alchemy_queue_msg *msg;
+	struct alchemy_queue *qcb;
+	struct syncstate syns;
+	struct service svc;
+	ssize_t ret;
+	int err = 0;
+
+	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	qcb = get_alchemy_queue(queue, &syns, &err);
+	if (qcb == NULL) {
+		ret = err;
+		goto out;
+	}
+
+	if (list_empty(&qcb->mq))
+		goto wait;
+
+	msg = list_pop_entry(&qcb->mq, struct alchemy_queue_msg, next);
+	msg->refcount++;
+	*bufp = msg + 1;
+	ret = (ssize_t)msg->size;
+	qcb->mcount--;
+	goto done;
+wait:
+	if (alchemy_poll_mode(abs_timeout)) {
+		ret = -EWOULDBLOCK;
+		goto done;
+	}
+
+	wait = threadobj_prepare_wait(struct alchemy_queue_wait);
+	wait->local_bufsz = 0;
+
+	ret = syncobj_wait_grant(&qcb->sobj, abs_timeout, &syns);
+	if (ret) {
+		if (ret == -EIDRM) {
+			threadobj_finish_wait();
+			goto out;
+		}
+	} else {
+		msg = __mptr(wait->msg);
+		*bufp = msg + 1;
+		ret = (ssize_t)msg->size;
+	}
+
+	threadobj_finish_wait();
+done:
+	put_alchemy_queue(qcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn ssize_t rt_queue_read(RT_QUEUE *q, void *buf, size_t size, RTIME timeout)
+ * @brief Read from a queue (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_queue_read_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param buf A pointer to a memory area which will be written upon
+ * success with the received message payload.
+ *
+ * @param size The length in bytes of the memory area pointed to by @a
+ * buf.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE causes the caller to block indefinitely until
+ * a message is available. Passing TM_NONBLOCK causes the service
+ * to return immediately without blocking in case no message is
+ * available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_queue_read_until(RT_QUEUE *q, void *buf, size_t size, RTIME abs_timeout)
+ * @brief Read from a queue (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_queue_read_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param buf A pointer to a memory area which will be written upon
+ * success with the received message payload.
+ *
+ * @param size The length in bytes of the memory area pointed to by @a
+ * buf.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * a message is available. Passing TM_NONBLOCK causes the service
+ * to return immediately without blocking in case no message is
+ * available.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_queue_read_timed(RT_QUEUE *q, void *buf, size_t size, const struct timespec *abs_timeout)
+ * @brief Read from a queue.
+ *
+ * This service reads the next available message from a given
+ * queue.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param buf A pointer to a memory area which will be written upon
+ * success with the received message payload. The internal message
+ * buffer conveying the data is automatically freed by this call.  If
+ * --enable-pshared is enabled in the configuration, @a buf must have
+ * been obtained from the Xenomai memory allocator via xnmalloc() or
+ * any service based on it, such as rt_heap_alloc().
+ *
+ * @param size The length in bytes of the memory area pointed to by @a
+ * buf. Messages larger than @a size are truncated appropriately.
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for a
+ * message to be available from the queue. Passing NULL causes the
+ * caller to block indefinitely until a message is available. Passing
+ * { .tv_sec = 0, .tv_nsec = 0 } causes the service to return immediately
+ * without blocking in case no message is available.
+ *
+ * @return The number of bytes copied to @a buf is returned upon
+ * success. Zero is a possible value corresponding to a zero-sized
+ * message passed to rt_queue_send() or rt_queue_write(). Otherwise:
+ *
+ * - -ETIMEDOUT is returned if @a abs_timeout is reached before a
+ * message arrives.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } and no message is immediately available on entry to
+ * the call.
+
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before a message was available.
+ *
+ * - -EINVAL is returned if @a q is not a valid queue descriptor.
+ *
+ * - -EIDRM is returned if @a q is deleted while the caller was
+ * waiting for a message. In such event, @a q is no more valid upon
+ * return of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+ssize_t rt_queue_read_timed(RT_QUEUE *queue,
+			    void *buf, size_t size,
+			    const struct timespec *abs_timeout)
+{
+	struct alchemy_queue_wait *wait;
+	struct alchemy_queue_msg *msg;
+	struct alchemy_queue *qcb;
+	struct syncstate syns;
+	struct service svc;
+	ssize_t ret;
+	int err = 0;
+
+	if (!threadobj_current_p() && !alchemy_poll_mode(abs_timeout))
+		return -EPERM;
+
+	if (size == 0)
+		return 0;
+
+	CANCEL_DEFER(svc);
+
+	qcb = get_alchemy_queue(queue, &syns, &err);
+	if (qcb == NULL) {
+		ret = err;
+		goto out;
+	}
+
+	if (list_empty(&qcb->mq))
+		goto wait;
+
+	msg = list_pop_entry(&qcb->mq, struct alchemy_queue_msg, next);
+	qcb->mcount--;
+	goto transfer;
+wait:
+	if (alchemy_poll_mode(abs_timeout)) {
+		ret = -EWOULDBLOCK;
+		goto done;
+	}
+
+	wait = threadobj_prepare_wait(struct alchemy_queue_wait);
+	wait->local_buf = buf;
+	wait->local_bufsz = size;
+	wait->msg = __moff_nullable(NULL);
+
+	ret = syncobj_wait_grant(&qcb->sobj, abs_timeout, &syns);
+	if (ret) {
+		if (ret == -EIDRM) {
+			threadobj_finish_wait();
+			goto out;
+		}
+	} else if (__mptr_nullable(wait->msg)) {
+		msg = __mptr(wait->msg);
+	transfer:
+		ret = (ssize_t)(msg->size > size ? size : msg->size);
+		if (ret > 0) 
+			memcpy(buf, msg + 1, ret);
+		heapobj_free(&qcb->hobj, msg);
+	} else	/* A direct copy took place. */
+		ret = (ssize_t)wait->local_bufsz;
+
+	threadobj_finish_wait();
+done:
+	put_alchemy_queue(qcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_queue_flush(RT_QUEUE *q)
+ * @brief Flush pending messages from a queue.
+ *
+ * This routine flushes all messages currently pending in a queue,
+ * releasing all message buffers appropriately.
+ *
+ * @param q The queue descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a q is not a valid queue descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_queue_flush(RT_QUEUE *queue)
+{
+	struct alchemy_queue_msg *msg, *tmp;
+	struct alchemy_queue *qcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	qcb = get_alchemy_queue(queue, &syns, &ret);
+	if (qcb == NULL)
+		goto out;
+
+	ret = qcb->mcount;
+	qcb->mcount = 0;
+
+	/*
+	 * Flushing a message queue is not an operation we should see
+	 * in any fast path within an application, so locking out
+	 * other threads from using that queue while we flush it is
+	 * acceptable.
+	 */
+	if (!list_empty(&qcb->mq)) {
+		list_for_each_entry_safe(msg, tmp, &qcb->mq, next) {
+			list_remove(&msg->next);
+			heapobj_free(&qcb->hobj, msg);
+		}
+	}
+
+	put_alchemy_queue(qcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_queue_inquire(RT_QUEUE *q, RT_QUEUE_INFO *info)
+ * @brief Query queue status.
+ *
+ * This routine returns the status information about the specified
+ * queue.
+ *
+ * @param q The queue descriptor.
+ *
+ * @param info A pointer to the @ref RT_QUEUE_INFO "return
+ * buffer" to copy the information to.
+ *
+ * @return Zero is returned and status information is written to the
+ * structure pointed at by @a info upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a q is not a valid queue descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_queue_inquire(RT_QUEUE *queue, RT_QUEUE_INFO *info)
+{
+	struct alchemy_queue *qcb;
+	struct syncstate syns;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	qcb = get_alchemy_queue(queue, &syns, &ret);
+	if (qcb == NULL)
+		goto out;
+
+	info->nwaiters = syncobj_count_grant(&qcb->sobj);
+	info->nmessages = qcb->mcount;
+	info->mode = qcb->mode;
+	info->qlimit = qcb->limit;
+	info->poolsize = heapobj_size(&qcb->hobj);
+	info->usedmem = heapobj_inquire(&qcb->hobj);
+	strcpy(info->name, qcb->name);
+
+	put_alchemy_queue(qcb, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_queue_bind(RT_QUEUE *q, const char *name, RTIME timeout)
+ * @brief Bind to a message queue.
+ *
+ * This routine creates a new descriptor to refer to an existing
+ * message queue identified by its symbolic name. If the object does
+ * not exist on entry, the caller may block until a queue of the
+ * given name is created.
+ *
+ * @param q The address of a queue descriptor filled in by the
+ * operation. Contents of this memory is undefined upon failure.
+ *
+ * @param name A valid NULL-terminated name which identifies the
+ * queue to bind to. This string should match the object name
+ * argument passed to rt_queue_create().
+ *
+ * @param timeout The number of clock ticks to wait for the
+ * registration to occur (see note). Passing TM_INFINITE causes the
+ * caller to block indefinitely until the object is
+ * registered. Passing TM_NONBLOCK causes the service to return
+ * immediately without waiting if the object is not registered on
+ * entry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
+ * and the searched object is not registered on entry.
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ *
+ * @note The @a timeout value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_queue_bind(RT_QUEUE *queue,
+		  const char *name, RTIME timeout)
+{
+	return alchemy_bind_object(name,
+				   &alchemy_queue_table,
+				   timeout,
+				   offsetof(struct alchemy_queue, cobj),
+				   &queue->handle);
+}
+
+/**
+ * @fn int rt_queue_unbind(RT_QUEUE *q)
+ * @brief Unbind from a message queue.
+ *
+ * @param q The queue descriptor.
+ *
+ * This routine releases a previous binding to a message queue. After
+ * this call has returned, the descriptor is no more valid for
+ * referencing this object.
+ *
+ * @apitags{thread-unrestricted}
+ */
+int rt_queue_unbind(RT_QUEUE *queue)
+{
+	queue->handle = 0;
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/queue.h b/kernel/xenomai-v3.2.4/lib/alchemy/queue.h
new file mode 100644
index 0000000..196a6f1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/queue.h
@@ -0,0 +1,59 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_QUEUE_H
+#define _ALCHEMY_QUEUE_H
+
+#include <boilerplate/list.h>
+#include <copperplate/syncobj.h>
+#include <copperplate/registry.h>
+#include <copperplate/cluster.h>
+#include <copperplate/heapobj.h>
+#include <alchemy/queue.h>
+
+struct alchemy_queue {
+	unsigned int magic;	/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	int mode;
+	size_t limit;
+	struct heapobj hobj;
+	struct syncobj sobj;
+	struct clusterobj cobj;
+	struct listobj mq;
+	unsigned int mcount;
+	struct fsobj fsobj;
+};
+
+#define queue_magic	0x8787ebeb
+
+struct alchemy_queue_msg {
+	size_t size;
+	unsigned int refcount;
+	struct holder next;
+	/* Payload data follows. */
+};
+
+struct alchemy_queue_wait {
+	dref_type(struct alchemy_queue_msg *) msg;
+	void *local_buf;
+	size_t local_bufsz;
+};
+
+extern struct syncluster alchemy_queue_table;
+
+#endif /* _ALCHEMY_QUEUE_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/reference.h b/kernel/xenomai-v3.2.4/lib/alchemy/reference.h
new file mode 100644
index 0000000..948aa60
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/reference.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <copperplate/reference.h>
+
+#define libalchemy_tag  3
+#define libalchemy_cbi  1
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/sem.c b/kernel/xenomai-v3.2.4/lib/alchemy/sem.c
new file mode 100644
index 0000000..497756e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/sem.c
@@ -0,0 +1,576 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/registry-obstack.h>
+#include "reference.h"
+#include "internal.h"
+#include "sem.h"
+#include "timer.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_sem Semaphore services
+ *
+ * Counting semaphore IPC mechanism
+ *
+ * A counting semaphore is a synchronization object for controlling
+ * the concurrency level allowed in accessing a resource from multiple
+ * real-time tasks, based on the value of a count variable accessed
+ * atomically.  The semaphore is used through the P ("Proberen", from
+ * the Dutch "test and decrement") and V ("Verhogen", increment)
+ * operations. The P operation decrements the semaphore count by one
+ * if non-zero, or waits until a V operation is issued by another
+ * task. Conversely, the V operation releases a resource by
+ * incrementing the count by one, unblocking the heading task waiting
+ * on the P operation if any. Waiting on a semaphore may cause
+ * a priority inversion.
+ *
+ * If no more than a single resource is made available at any point in
+ * time, the semaphore enforces mutual exclusion and thus can be used
+ * to serialize access to a critical section. However, mutexes should
+ * be used instead in order to prevent priority inversions, based on
+ * the priority inheritance protocol.
+ *
+ * @{
+ */
+
+struct syncluster alchemy_sem_table;
+
+static DEFINE_NAME_GENERATOR(sem_namegen, "sem",
+			     struct alchemy_sem, name);
+
+DEFINE_LOOKUP_PRIVATE(sem, RT_SEM);
+
+#ifdef CONFIG_XENO_REGISTRY
+
+static int sem_registry_open(struct fsobj *fsobj, void *priv)
+{
+	struct semobj_waitentry *waitlist, *p;
+	struct fsobstack *o = priv;
+	struct alchemy_sem *scb;
+	size_t waitsz;
+	int ret, val;
+
+	scb = container_of(fsobj, struct alchemy_sem, fsobj);
+
+	waitsz = sizeof(*p) * 256;
+	waitlist = __STD(malloc(waitsz));
+	if (waitlist == NULL)
+		return -ENOMEM;
+
+	ret = semobj_inquire(&scb->smobj, waitsz, waitlist, &val);
+	if (ret < 0)
+		goto out;
+
+	fsobstack_init(o);
+
+	if (val < 0)
+		val = 0; /* Report depleted state as null. */
+
+	fsobstack_grow_format(o, "=%d\n", val);
+
+	if (ret) {
+		fsobstack_grow_format(o, "--\n[WAITER]\n");
+		p = waitlist;
+		do {
+			fsobstack_grow_format(o, "%s\n", p->name);
+			p++;
+		} while (--ret > 0);
+	}
+
+	fsobstack_finish(o);
+out:
+	__STD(free(waitlist));
+
+	return ret;
+}
+
+static struct registry_operations registry_ops = {
+	.open		= sem_registry_open,
+	.release	= fsobj_obstack_release,
+	.read		= fsobj_obstack_read
+};
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+static void sem_finalize(struct semobj *smobj)
+{
+	struct alchemy_sem *scb = container_of(smobj, struct alchemy_sem, smobj);
+
+	registry_destroy_file(&scb->fsobj);
+	/* We should never fail here, so we backtrace. */
+	__bt(syncluster_delobj(&alchemy_sem_table, &scb->cobj));
+	scb->magic = ~sem_magic;
+	xnfree(scb);
+}
+fnref_register(libalchemy, sem_finalize);
+
+/**
+ * @fn int rt_sem_create(RT_SEM *sem, const char *name, unsigned long icount, int mode)
+ * @brief Create a counting semaphore.
+ *
+ * @param sem The address of a semaphore descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * semaphore. When non-NULL and non-empty, a copy of this string is
+ * used for indexing the created semaphore into the object registry.
+ *
+ * @param icount The initial value of the counting semaphore.
+ *
+ * @param mode The semaphore creation mode. The following flags can be
+ * OR'ed into this bitmask:
+ *
+ * - S_FIFO makes tasks pend in FIFO order on the semaphore.
+ *
+ * - S_PRIO makes tasks pend in priority order on the semaphore.
+ *
+ * - S_PULSE causes the semaphore to behave in "pulse" mode. In this
+ * mode, the V (signal) operation attempts to release a single waiter
+ * each time it is called, without incrementing the semaphore count,
+ * even if no waiter is pending. For this reason, the semaphore count
+ * in pulse mode remains zero.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a icount is non-zero and S_PULSE is set
+ * in @a mode, or @a mode is otherwise invalid.
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the semaphore.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered semaphore.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt or non-Xenomai thread.
+ *
+ * @apitags{xthread-only, mode-unrestricted, switch-secondary}
+ *
+ * @note Semaphores can be shared by multiple processes which belong
+ * to the same Xenomai session.
+ */
+int rt_sem_create(RT_SEM *sem, const char *name,
+		  unsigned long icount, int mode)
+{
+	int smobj_flags = 0, ret;
+	struct alchemy_sem *scb;
+	struct service svc;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	if (mode & ~(S_PRIO|S_PULSE))
+		return -EINVAL;
+
+	if (mode & S_PULSE) {
+		if (icount > 0)
+			return -EINVAL;
+		smobj_flags |= SEMOBJ_PULSE;
+	}
+
+	CANCEL_DEFER(svc);
+
+	scb = xnmalloc(sizeof(*scb));
+	if (scb == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+
+	if (mode & S_PRIO)
+		smobj_flags |= SEMOBJ_PRIO;
+
+	ret = semobj_init(&scb->smobj, smobj_flags, icount,
+			  fnref_put(libalchemy, sem_finalize));
+	if (ret) {
+		xnfree(scb);
+		goto out;
+	}
+
+	generate_name(scb->name, name, &sem_namegen);
+	scb->magic = sem_magic;
+
+	registry_init_file_obstack(&scb->fsobj, &registry_ops);
+	ret = __bt(registry_add_file(&scb->fsobj, O_RDONLY,
+				     "/alchemy/semaphores/%s", scb->name));
+	if (ret) {
+		warning("failed to export semaphore %s to registry, %s",
+			scb->name, symerror(ret));
+		ret = 0;
+	}
+
+	ret = syncluster_addobj(&alchemy_sem_table, scb->name, &scb->cobj);
+	if (ret) {
+		registry_destroy_file(&scb->fsobj);
+		semobj_uninit(&scb->smobj);
+		xnfree(scb);
+	} else
+		sem->handle = mainheap_ref(scb, uintptr_t);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_sem_delete(RT_SEM *sem)
+ * @brief Delete a semaphore.
+ *
+ * This routine deletes a semaphore previously created by a call to
+ * rt_sem_create().
+ *
+ * @param sem The semaphore descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a sem is not a valid semaphore
+ * descriptor.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ */
+int rt_sem_delete(RT_SEM *sem)
+{
+	struct alchemy_sem *scb;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	scb = find_alchemy_sem(sem, &ret);
+	if (scb == NULL)
+		goto out;
+
+	/*
+	 * XXX: we rely on copperplate's semobj to check for semaphore
+	 * existence, so we refrain from altering the object memory
+	 * until we know it was valid. So the only safe place to
+	 * negate the magic tag, deregister from the cluster and
+	 * release the memory is in the finalizer routine, which is
+	 * only called for valid objects.
+	 */
+	ret = semobj_destroy(&scb->smobj);
+	if (ret > 0)
+		ret = 0;
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_sem_p(RT_SEM *sem, RTIME timeout)
+ * @brief Pend on a semaphore (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_sem_p_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param sem The semaphore descriptor.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE causes the caller to block indefinitely until the
+ * request is satisfied. Passing TM_NONBLOCK causes the service to
+ * return without blocking in case the request cannot be satisfied
+ * immediately.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn int rt_sem_p_until(RT_SEM *sem, RTIME abs_timeout)
+ * @brief Pend on a semaphore (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_sem_p_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param sem The semaphore descriptor.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * the request is satisfied. Passing TM_NONBLOCK causes the service
+ * to return without blocking in case the request cannot be satisfied
+ * immediately.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+
+/**
+ * @fn int rt_sem_p_timed(RT_SEM *sem, const struct timespec *abs_timeout)
+ * @brief Pend on a semaphore.
+ *
+ * Test and decrement the semaphore count. If the semaphore value is
+ * greater than zero, it is decremented by one and the service
+ * immediately returns to the caller. Otherwise, the caller is blocked
+ * until the semaphore is either signaled or destroyed, unless a
+ * non-blocking operation was required.
+ *
+ * @param sem The semaphore descriptor.
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for the
+ * request to be satisfied. Passing NULL causes the caller to block
+ * indefinitely until the request is satisfied. Passing { .tv_sec = 0,
+ * .tv_nsec = 0 } causes the service to return without blocking in case
+ * the request cannot be satisfied immediately.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -ETIMEDOUT is returned if @a abs_timeout is reached before the
+ * request is satisfied.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } and the semaphore count is zero on entry.
+
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the request is satisfied.
+ *
+ * - -EINVAL is returned if @a sem is not a valid semaphore
+ * descriptor.
+ *
+ * - -EIDRM is returned if @a sem is deleted while the caller was
+ * sleeping on it. In such a case, @a sem is no more valid upon
+ * return of this service.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ */
+int rt_sem_p_timed(RT_SEM *sem, const struct timespec *abs_timeout)
+{
+	struct alchemy_sem *scb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	scb = find_alchemy_sem(sem, &ret);
+	if (scb == NULL)
+		goto out;
+
+	ret = semobj_wait(&scb->smobj, abs_timeout);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_sem_v(RT_SEM *sem)
+ * @brief Signal a semaphore.
+ *
+ * If the semaphore is pended, the task heading the wait queue is
+ * immediately unblocked. Otherwise, the semaphore count is
+ * incremented by one, unless the semaphore is used in "pulse" mode
+ * (see rt_sem_create()).
+ *
+ * @param sem The semaphore descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a sem is not a valid semaphore
+ * descriptor.
+ *
+ * @apitags{unrestricted}
+ */
+int rt_sem_v(RT_SEM *sem)
+{
+	struct alchemy_sem *scb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	scb = find_alchemy_sem(sem, &ret);
+	if (scb == NULL)
+		goto out;
+
+	ret = semobj_post(&scb->smobj);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_sem_broadcast(RT_SEM *sem)
+ * @brief Broadcast a semaphore.
+ *
+ * All tasks currently waiting on the semaphore are immediately
+ * unblocked. The semaphore count is set to zero.
+ *
+ * @param sem The semaphore descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a sem is not a valid semaphore
+ * descriptor.
+ *
+ * @apitags{unrestricted}
+ */
+int rt_sem_broadcast(RT_SEM *sem)
+{
+	struct alchemy_sem *scb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	scb = find_alchemy_sem(sem, &ret);
+	if (scb == NULL)
+		goto out;
+
+	ret = semobj_broadcast(&scb->smobj);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_sem_inquire(RT_SEM *sem, RT_SEM_INFO *info)
+ * @brief Query semaphore status.
+ *
+ * This routine returns the status information about the specified
+ * semaphore.
+ *
+ * @param sem The semaphore descriptor.
+ *
+ * @param info A pointer to the @ref RT_SEM_INFO "return
+ * buffer" to copy the information to.
+ *
+ * @return Zero is returned and status information is written to the
+ * structure pointed at by @a info upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a sem is not a valid semaphore
+ * descriptor.
+ *
+ * @apitags{unrestricted}
+ */
+int rt_sem_inquire(RT_SEM *sem, RT_SEM_INFO *info)
+{
+	struct alchemy_sem *scb;
+	struct service svc;
+	int ret = 0, sval;
+
+	CANCEL_DEFER(svc);
+
+	scb = find_alchemy_sem(sem, &ret);
+	if (scb == NULL)
+		goto out;
+
+	ret = semobj_getvalue(&scb->smobj, &sval);
+	if (ret)
+		goto out;
+
+	info->count = sval < 0 ? 0 : sval;
+	info->nwaiters = sval < 0 ? -sval : 0;
+	strcpy(info->name, scb->name); /* <= racy. */
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_sem_bind(RT_SEM *sem, const char *name, RTIME timeout)
+ * @brief Bind to a semaphore.
+ *
+ * This routine creates a new descriptor to refer to an existing
+ * semaphore identified by its symbolic name. If the object does not
+ * exist on entry, the caller may block until a semaphore of the given
+ * name is created.
+ *
+ * @param sem The address of a semaphore descriptor filled in by the
+ * operation. Contents of this memory is undefined upon failure.
+ *
+ * @param name A valid NULL-terminated name which identifies the
+ * semaphore to bind to. This string should match the object name
+ * argument passed to rt_sem_create().
+ *
+ * @param timeout The number of clock ticks to wait for the
+ * registration to occur (see note). Passing TM_INFINITE causes the
+ * caller to block indefinitely until the object is
+ * registered. Passing TM_NONBLOCK causes the service to return
+ * immediately without waiting if the object is not registered on
+ * entry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
+ * and the searched object is not registered on entry.
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ *
+ * @note The @a timeout value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_sem_bind(RT_SEM *sem,
+		const char *name, RTIME timeout)
+{
+	return alchemy_bind_object(name,
+				   &alchemy_sem_table,
+				   timeout,
+				   offsetof(struct alchemy_sem, cobj),
+				   &sem->handle);
+}
+
+/**
+ * @fn int rt_sem_unbind(RT_SEM *sem)
+ * @brief Unbind from a semaphore.
+ *
+ * @param sem The semaphore descriptor.
+ *
+ * This routine releases a previous binding to a semaphore. After this
+ * call has returned, the descriptor is no more valid for referencing
+ * this object.
+ *
+ * @apitags{thread-unrestricted}
+ */
+int rt_sem_unbind(RT_SEM *sem)
+{
+	sem->handle = 0;
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/sem.h b/kernel/xenomai-v3.2.4/lib/alchemy/sem.h
new file mode 100644
index 0000000..ee2ff2f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/sem.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_SEM_H
+#define _ALCHEMY_SEM_H
+
+#include <copperplate/semobj.h>
+#include <copperplate/registry.h>
+#include <copperplate/cluster.h>
+#include <alchemy/sem.h>
+
+struct alchemy_sem {
+	unsigned int magic;	/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	struct semobj smobj;
+	struct clusterobj cobj;
+	struct fsobj fsobj;
+};
+
+#define sem_magic	0x8383ebeb
+
+extern struct syncluster alchemy_sem_table;
+
+#endif /* _ALCHEMY_SEM_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/task.c b/kernel/xenomai-v3.2.4/lib/alchemy/task.c
new file mode 100644
index 0000000..9e6101d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/task.c
@@ -0,0 +1,2181 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <sched.h>
+#include <pthread.h>
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include "copperplate/heapobj.h"
+#include "copperplate/internal.h"
+#include "internal.h"
+#include "task.h"
+#include "buffer.h"
+#include "queue.h"
+#include "timer.h"
+#include "heap.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_task Task management services
+ *
+ * Services dealing with preemptive multi-tasking
+ *
+ * Each Alchemy task is an independent portion of the overall
+ * application code embodied in a C procedure, which executes on its
+ * own stack context.
+ *
+ * @{
+ */
+
+union alchemy_wait_union {
+	struct alchemy_task_wait task_wait;
+	struct alchemy_buffer_wait buffer_wait;
+	struct alchemy_queue_wait queue_wait;
+	struct alchemy_heap_wait heap_wait;
+};
+
+struct syncluster alchemy_task_table;
+
+static DEFINE_NAME_GENERATOR(task_namegen, "task",
+			     struct alchemy_task, name);
+
+#ifdef CONFIG_XENO_REGISTRY
+
+static int task_registry_open(struct fsobj *fsobj, void *priv)
+{
+	struct fsobstack *o = priv;
+	struct threadobj_stat buf;
+	struct alchemy_task *tcb;
+	int ret;
+
+	tcb = container_of(fsobj, struct alchemy_task, fsobj);
+	ret = threadobj_lock(&tcb->thobj);
+	if (ret)
+		return -EIO;
+
+	ret = threadobj_stat(&tcb->thobj, &buf);
+	threadobj_unlock(&tcb->thobj);
+	if (ret)
+		return ret;
+
+	fsobstack_init(o);
+
+	fsobstack_finish(o);
+
+	return 0;
+}
+
+static struct registry_operations registry_ops = {
+	.open		= task_registry_open,
+	.release	= fsobj_obstack_release,
+	.read		= fsobj_obstack_read
+};
+
+#else /* !CONFIG_XENO_REGISTRY */
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+static struct alchemy_task *find_alchemy_task(RT_TASK *task, int *err_r)
+{
+	struct alchemy_task *tcb;
+
+	if (bad_pointer(task))
+		goto bad_handle;
+
+	tcb = mainheap_deref(task->handle, struct alchemy_task);
+	if (bad_pointer(tcb))
+		goto bad_handle;
+
+	if (threadobj_get_magic(&tcb->thobj) == task_magic)
+		return tcb;
+bad_handle:
+	*err_r = -EINVAL;
+
+	return NULL;
+}
+
+static struct alchemy_task *find_alchemy_task_or_self(RT_TASK *task, int *err_r)
+{
+	struct alchemy_task *current;
+
+	if (task)
+		return find_alchemy_task(task, err_r);
+
+	current = alchemy_task_current();
+	if (current == NULL) {
+		*err_r = -EPERM;
+		return NULL;
+	}
+
+	return current;
+}
+
+struct alchemy_task *get_alchemy_task(RT_TASK *task, int *err_r)
+{
+	struct alchemy_task *tcb = find_alchemy_task(task, err_r);
+
+	/*
+	 * Grab the task lock, assuming that the task might have been
+	 * deleted, and/or maybe we have been lucky, and some random
+	 * opaque pointer might lead us to something which is laid in
+	 * valid memory but certainly not to a task object. Last
+	 * chance is pthread_mutex_lock() detecting a wrong mutex kind
+	 * and bailing out.
+	 */
+	if (tcb == NULL || threadobj_lock(&tcb->thobj) == -EINVAL) {
+		*err_r = -EINVAL;
+		return NULL;
+	}
+
+	/* Check the magic word again, while we hold the lock. */
+	if (threadobj_get_magic(&tcb->thobj) != task_magic) {
+		threadobj_unlock(&tcb->thobj);
+		*err_r = -EINVAL;
+		return NULL;
+	}
+
+	return tcb;
+}
+
+struct alchemy_task *get_alchemy_task_or_self(RT_TASK *task, int *err_r)
+{
+	struct alchemy_task *current;
+
+	if (task)
+		return get_alchemy_task(task, err_r);
+
+	current = alchemy_task_current();
+	if (current == NULL) {
+		*err_r = -EPERM;
+		return NULL;
+	}
+
+	/* This one might block but can't fail, it is ours. */
+	threadobj_lock(&current->thobj);
+
+	return current;
+}
+
+void put_alchemy_task(struct alchemy_task *tcb)
+{
+	threadobj_unlock(&tcb->thobj);
+}
+
+static void task_finalizer(struct threadobj *thobj)
+{
+	struct alchemy_task *tcb;
+	struct syncstate syns;
+	int ret;
+
+	tcb = container_of(thobj, struct alchemy_task, thobj);
+	registry_destroy_file(&tcb->fsobj);
+	syncluster_delobj(&alchemy_task_table, &tcb->cobj);
+	/*
+	 * The msg sync may be pended by other threads, so we do have
+	 * to use syncobj_destroy() on it (i.e. NOT syncobj_uninit()).
+	 */
+	ret = __bt(syncobj_lock(&tcb->sobj_msg, &syns));
+	if (ret == 0)
+		syncobj_destroy(&tcb->sobj_msg, &syns);
+}
+
+static int task_prologue_1(void *arg)
+{
+	struct alchemy_task *tcb = arg;
+
+	return __bt(threadobj_prologue(&tcb->thobj, tcb->name));
+}
+
+static int task_prologue_2(struct alchemy_task *tcb)
+{
+	int ret;
+
+	threadobj_wait_start();
+	threadobj_lock(&tcb->thobj);
+	ret = threadobj_set_mode(0, tcb->mode, NULL);
+	threadobj_unlock(&tcb->thobj);
+
+	return ret;
+}
+
+static void *task_entry(void *arg)
+{
+	struct alchemy_task *tcb = arg;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	ret = __bt(task_prologue_2(tcb));
+	if (ret) {
+		CANCEL_RESTORE(svc);
+		return (void *)(long)ret;
+	}
+
+	threadobj_notify_entry();
+
+	CANCEL_RESTORE(svc);
+
+	tcb->entry(tcb->arg);
+
+	return NULL;
+}
+
+static void delete_tcb(struct alchemy_task *tcb)
+{
+	syncobj_uninit(&tcb->sobj_msg);
+	threadobj_uninit(&tcb->thobj);
+	threadobj_free(&tcb->thobj);
+}
+
+static int create_tcb(struct alchemy_task **tcbp, RT_TASK *task,
+		      const char *name, int prio, int mode)
+{
+	struct threadobj_init_data idata;
+	struct alchemy_task *tcb;
+	int ret;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	ret = check_task_priority(prio);
+	if (ret)
+		return ret;
+
+	tcb = threadobj_alloc(struct alchemy_task, thobj,
+			      union alchemy_wait_union);
+	if (tcb == NULL)
+		return -ENOMEM;
+
+	generate_name(tcb->name, name, &task_namegen);
+
+	tcb->mode = mode;
+	tcb->entry = NULL;	/* Not yet known. */
+	tcb->arg = NULL;
+
+	CPU_ZERO(&tcb->affinity);
+
+	ret = syncobj_init(&tcb->sobj_msg, CLOCK_COPPERPLATE,
+			   SYNCOBJ_PRIO, fnref_null);
+	if (ret)
+		goto fail_syncinit;
+
+	tcb->suspends = 0;
+	tcb->flowgen = 0;
+
+	idata.magic = task_magic;
+	idata.finalizer = task_finalizer;
+	idata.policy = prio ? SCHED_FIFO : SCHED_OTHER;
+	idata.param_ex.sched_priority = prio;
+	ret = threadobj_init(&tcb->thobj, &idata);
+	if (ret)
+		goto fail_threadinit;
+
+	*tcbp = tcb;
+
+	/*
+	 * CAUTION: The task control block must be fully built before
+	 * we publish it through syncluster_addobj(), at which point
+	 * it could be referred to immediately from another task as we
+	 * got preempted. In addition, the task descriptor must be
+	 * updated prior to starting the task.
+	 */
+	tcb->self.handle = mainheap_ref(tcb, uintptr_t);
+
+	registry_init_file_obstack(&tcb->fsobj, &registry_ops);
+	ret = __bt(registry_add_file(&tcb->fsobj, O_RDONLY,
+				     "/alchemy/tasks/%s", tcb->name));
+	if (ret)
+		warning("failed to export task %s to registry, %s",
+			tcb->name, symerror(ret));
+
+	ret = syncluster_addobj(&alchemy_task_table, tcb->name, &tcb->cobj);
+	if (ret)
+		goto fail_register;
+
+	if (task)
+		task->handle = tcb->self.handle;
+
+	return 0;
+
+fail_register:
+	registry_destroy_file(&tcb->fsobj);
+	threadobj_uninit(&tcb->thobj);
+fail_threadinit:
+	syncobj_uninit(&tcb->sobj_msg);
+fail_syncinit:
+	threadobj_free(&tcb->thobj);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_create(RT_TASK *task, const char *name, int stksize, int prio, int mode)
+ * @brief Create a task with Alchemy personality.
+ *
+ * This service creates a task with access to the full set of Alchemy
+ * services. If @a prio is non-zero, the new task belongs to Xenomai's
+ * real-time FIFO scheduling class, aka SCHED_FIFO. If @a prio is
+ * zero, the task belongs to the regular SCHED_OTHER class.
+ *
+ * Creating tasks with zero priority is useful for running non
+ * real-time processes which may invoke blocking real-time services,
+ * such as pending on a semaphore, reading from a message queue or a
+ * buffer, and so on.
+ *
+ * Once created, the task is left dormant until it is actually started
+ * by rt_task_start().
+ *
+ * @param task The address of a task descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * task. When non-NULL and non-empty, a copy of this string is
+ * used for indexing the created task into the object registry.
+ *
+ * @param stksize The size of the stack (in bytes) for the new
+ * task. If zero is passed, a system-dependent default size will be
+ * substituted.
+ *
+ * @param prio The base priority of the new task. This value must be
+ * in the [0 .. 99] range, where 0 is the lowest effective priority. 
+ *
+ * @param mode The task creation mode. The following flags can be
+ * OR'ed into this bitmask:
+ *
+ * - T_JOINABLE allows another task to wait on the termination of the
+ * new task. rt_task_join() shall be called for this task to clean up
+ * any resources after its termination.
+ *
+ * - T_LOCK causes the new task to lock the scheduler prior to
+ * entering the user routine specified by rt_task_start(). A call to
+ * rt_task_set_mode() from the new task is required to drop this lock.
+ *
+ * - When running over the Cobalt core, T_WARNSW causes the SIGDEBUG
+ * signal to be sent to the current task whenever it switches to the
+ * secondary mode. This feature is useful to detect unwanted
+ * migrations to the Linux domain. This flag has no effect over the
+ * Mercury core.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if either @a prio, @a mode or @a stksize are
+ * invalid.
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the task.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered task.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt or non-Xenomai thread.
+ *
+ * @apitags{xthread-only, mode-unrestricted, switch-secondary}
+ *
+ * @sideeffect
+ * - When running over the Cobalt core:
+ *
+ *   - calling rt_task_create() causes SCHED_FIFO tasks to switch to
+ * secondary mode.
+ *
+ *   - members of Xenomai's SCHED_FIFO class running in the primary
+ * domain have utmost priority over all Linux activities in the
+ * system, including Linux interrupt handlers.
+ *
+ * - When running over the Mercury core, the new task belongs to the
+ * regular POSIX SCHED_FIFO class.
+ *
+ * @note Tasks can be referred to from multiple processes which all
+ * belong to the same Xenomai session.
+ */
+#ifndef DOXYGEN_CPP
+CURRENT_IMPL(int, rt_task_create, (RT_TASK *task, const char *name,
+				   int stksize, int prio, int mode))
+#else
+int rt_task_create(RT_TASK *task, const char *name,
+		   int stksize, int prio, int mode)
+#endif
+{
+	struct corethread_attributes cta;
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret;
+
+	if (mode & ~(T_LOCK | T_WARNSW | T_JOINABLE))
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	ret = create_tcb(&tcb, task, name, prio, mode);
+	if (ret)
+		goto out;
+
+	/* We want this to be set prior to spawning the thread. */
+	tcb->self = *task;
+
+	cta.detachstate = mode & T_JOINABLE ?
+		PTHREAD_CREATE_JOINABLE : PTHREAD_CREATE_DETACHED;
+	cta.policy = threadobj_get_policy(&tcb->thobj);
+	threadobj_copy_schedparam(&cta.param_ex, &tcb->thobj);
+	cta.prologue = task_prologue_1;
+	cta.run = task_entry;
+	cta.arg = tcb;
+	cta.stacksize = stksize;
+
+	ret = __bt(copperplate_create_thread(&cta, &tcb->thobj.ptid));
+	if (ret) {
+		delete_tcb(tcb);
+	} else {
+		tcb->self.thread = tcb->thobj.ptid;
+		task->thread = tcb->thobj.ptid;
+	}
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_delete(RT_TASK *task)
+ * @brief Delete a real-time task.
+ *
+ * This call terminates a task previously created by
+ * rt_task_create().
+ *
+ * Tasks created with the T_JOINABLE flag shall be joined by a
+ * subsequent call to rt_task_join() once successfully deleted, to
+ * reclaim all resources.
+ *
+ * @param task The task descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor.
+ *
+ * - -EPERM is returned if @a task is NULL and this service was called
+ * from an invalid context. In addition, this error is always raised
+ * when this service is called from asynchronous context, such as a
+ * timer/alarm handler.
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ *
+ * @note The caller must be an Alchemy task if @a task is NULL.
+ */
+int rt_task_delete(RT_TASK *task)
+{
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret;
+
+	if (threadobj_irq_p())
+		return -EPERM;
+
+	tcb = find_alchemy_task_or_self(task, &ret);
+	if (tcb == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+	threadobj_lock(&tcb->thobj);
+	/* Self-deletion is handled by threadobj_cancel(). */
+	threadobj_cancel(&tcb->thobj);
+	CANCEL_RESTORE(svc);
+
+	return 0;
+}
+
+/**
+ * @fn int rt_task_join(RT_TASK *task)
+ * @brief Wait on the termination of a real-time task.
+ *
+ * This service blocks the caller in non-real-time context until @a
+ * task has terminated. All resources are released after successful
+ * completion of this service.
+ *
+ * The specified task must have been created by the same process that
+ * wants to join it, and the T_JOINABLE mode flag must have been set
+ * on creation to rt_task_create().
+ *
+ * Tasks created with the T_JOINABLE flag shall be joined by a
+ * subsequent call to rt_task_join() once successfully deleted, to
+ * reclaim all resources.
+ *
+ * @param task The task descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor.
+ *
+ * - -EINVAL is returned if the task was not created with T_JOINABLE
+ * set or some other task is already waiting on the termination.
+ *
+ * - -EDEADLK is returned if @a task refers to the caller.
+ *
+ * - -ESRCH is returned if @a task no longer exists or refers to task
+ * created by a different process.
+ *
+ * @apitags{mode-unrestricted, switch-primary}
+ *
+ * @note After successful completion of this service, it is neither
+ * required nor valid to additionally invoke rt_task_delete() on the
+ * same task.
+ */
+int rt_task_join(RT_TASK *task)
+{
+	if (bad_pointer(task))
+		return -EINVAL;
+
+	return -__RT(pthread_join(task->thread, NULL));
+}
+
+/**
+ * @fn int rt_task_set_affinity(RT_TASK *task, const cpu_set_t *cpus)
+ * @brief Set CPU affinity of real-time task.
+ *
+ * This calls makes @a task affine to the set of CPUs defined by @a
+ * cpus.
+ *
+ * @param task The task descriptor.  If @a task is NULL, the CPU
+ * affinity of the current task is changed.
+ *
+ * @param cpus The set of CPUs @a task should be affine to.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is NULL but the caller is not a
+ * Xenomai task, or if @a task is non-NULL but not a valid task
+ * descriptor.
+ *
+ * - -EINVAL is returned if @a cpus contains no processors that are
+ * currently physically on the system and permitted to the process
+ * according to any restrictions that may be imposed by the "cpuset"
+ * mechanism described in cpuset(7).
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ *
+ * @note The caller must be an Alchemy task if @a task is NULL.
+ */
+int rt_task_set_affinity(RT_TASK *task, const cpu_set_t *cpus)
+{
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	tcb = get_alchemy_task_or_self(task, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	tcb->affinity = *cpus;
+
+	ret = sched_setaffinity(threadobj_get_pid(&tcb->thobj),
+				sizeof(tcb->affinity), &tcb->affinity);
+	if (ret)
+		ret = -errno;
+
+	put_alchemy_task(tcb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_start(RT_TASK *task, void (*entry)(void *arg), void *arg)
+ * @brief Start a real-time task.
+ *
+ * This call starts execution of a task previously created by
+ * rt_task_create(). This service causes the started task to leave the
+ * initial dormant state.
+ *
+ * @param task The task descriptor.
+ *
+ * @param entry The address of the task entry point.
+ *
+ * @param arg A user-defined opaque argument @a entry will receive.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor.
+ *
+ * @apitags{mode-unrestricted, switch-primary}
+ *
+ * @note Starting an already started task leads to a nop, returning a
+ * success status.
+ */
+int rt_task_start(RT_TASK *task,
+		  void (*entry)(void *arg),
+		  void *arg)
+{
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	tcb = get_alchemy_task(task, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	tcb->entry = entry;
+	tcb->arg = arg;
+	ret = threadobj_start(&tcb->thobj);
+	if (ret == -EIDRM)
+		/*
+		 * The started thread has run then exited, tcb->thobj
+		 * is stale: don't touch it anymore.
+		 */
+		ret = 0;
+	else
+		put_alchemy_task(tcb);
+out:
+	CANCEL_DEFER(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_shadow(RT_TASK *task, const char *name, int prio, int mode)
+ * @brief Turn caller into a real-time task.
+ *
+ * Set the calling thread personality to the Alchemy API, enabling the
+ * full set of Alchemy services. Upon success, the caller is no more a
+ * regular POSIX thread, but a Xenomai-extended thread.
+ *
+ * If @a prio is non-zero, the new task moves to Xenomai's real-time
+ * FIFO scheduling class, aka SCHED_FIFO. If @a prio is zero, the task
+ * moves to the regular SCHED_OTHER class.
+ *
+ * Running Xenomai tasks with zero priority is useful for running non
+ * real-time processes which may invoke blocking real-time services,
+ * such as pending on a semaphore, reading from a message queue or a
+ * buffer, and so on.
+ *
+ * @param task If non-NULL, the address of a task descriptor which can
+ * be later used to identify uniquely the task, upon success of this
+ * call. If NULL, no descriptor is returned.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * task. When non-NULL and non-empty, a copy of this string is
+ * used for indexing the task into the object registry.
+ *
+ * @param prio The base priority of the task. This value must be in
+ * the [0 .. 99] range, where 0 is the lowest effective priority.
+ *
+ * @param mode The task shadowing mode. The following flags can be
+ * OR'ed into this bitmask:
+ *
+ * - T_LOCK causes the current task to lock the scheduler before
+ * returning to the caller, preventing all further involuntary task
+ * switches on the current CPU. A call to rt_task_set_mode() from the
+ * current task is required to drop this lock.
+ *
+ * - When running over the Cobalt core, T_WARNSW causes the SIGDEBUG
+ * signal to be sent to the current task whenever it switches to the
+ * secondary mode. This feature is useful to detect unwanted
+ * migrations to the Linux domain. This flag has no effect over the
+ * Mercury core.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a prio is invalid.
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the task extension.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered task.
+ *
+ * - -EBUSY is returned if the caller is not a regular POSIX thread.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context, e.g. interrupt handler.
+ *
+ * @apitags{pthread-only, switch-secondary, switch-primary}
+ *
+ * @sideeffect Over Cobalt, if the caller is a plain POSIX thread, it
+ * is turned into a Xenomai _shadow_ thread, with full access to all
+ * Cobalt services. The caller always returns from this service in
+ * primary mode.
+ *
+ * @note Tasks can be referred to from multiple processes which all
+ * belong to the same Xenomai session.
+ */
+int rt_task_shadow(RT_TASK *task, const char *name, int prio, int mode)
+{
+	struct threadobj *current = threadobj_current();
+	struct sched_param_ex param_ex;
+	struct alchemy_task *tcb;
+	struct service svc;
+	int policy, ret;
+	pthread_t self;
+
+	if (mode & ~(T_LOCK | T_WARNSW))
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	/*
+	 * This is ok to overlay the default TCB for the main thread
+	 * assigned by Copperplate at init, but it is not to
+	 * over-shadow a Xenomai thread. A valid TCB pointer with a
+	 * zero magic identifies the default main TCB.
+	 */
+	if (current && threadobj_get_magic(current))
+		return -EBUSY;
+
+	/*
+	 * Over Cobalt, the following call turns the current context
+	 * into a dual-kernel thread. Do this early, since this will
+	 * be required next for creating the TCB and running the
+	 * prologue code (i.e. real-time mutexes and monitors are
+	 * locked there).
+	 */
+	self = pthread_self();
+	policy = prio ? SCHED_FIFO : SCHED_OTHER;
+	param_ex.sched_priority = prio;
+	ret = __bt(copperplate_renice_local_thread(self, policy, &param_ex));
+	if (ret)
+		goto out;
+
+	ret = create_tcb(&tcb, task, name, prio, mode);
+	if (ret)
+		goto out;
+
+	CANCEL_RESTORE(svc);
+
+	if (task)
+		task->thread = self;
+
+	ret = threadobj_shadow(&tcb->thobj, tcb->name);
+	if (ret)
+		goto undo;
+
+	CANCEL_DEFER(svc);
+
+	ret = task_prologue_2(tcb);
+	if (ret)
+		goto undo;
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+undo:
+	delete_tcb(tcb);
+	goto out;
+}
+
+/**
+ * @fn int rt_task_set_periodic(RT_TASK *task, RTIME idate, RTIME period)
+ * @brief Make a real-time task periodic.
+ *
+ * Make a task periodic by programing its first release point and its
+ * period in the processor time line.  @a task should then call
+ * rt_task_wait_period() to sleep until the next periodic release
+ * point in the processor timeline is reached.
+ *
+ * @param task The task descriptor.  If @a task is NULL, the current
+ * task is made periodic. @a task must belong the current process.
+ *
+ * @param idate The initial (absolute) date of the first release
+ * point, expressed in clock ticks (see note).  If @a idate is equal
+ * to TM_NOW, the current system date is used.
+ *
+ * @param period The period of the task, expressed in clock ticks (see
+ * note). Passing TM_INFINITE stops the task's periodic timer if
+ * enabled, then returns successfully.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is NULL but the caller is not a
+ * Xenomai task, or if @a task is non-NULL but not a valid task
+ * descriptor.
+ *
+ * - -ETIMEDOUT is returned if @a idate is different from TM_INFINITE
+ * and represents a date in the past.
+ *
+ * @apitags{mode-unrestricted, switch-primary}
+ *
+ * @note The caller must be an Alchemy task if @a task is NULL.
+ *
+ * @note Over Cobalt, -EINVAL is returned if @a period is
+ * different from TM_INFINITE but shorter than the user scheduling
+ * latency value for the target system, as displayed by
+ * /proc/xenomai/latency.
+ *
+ * @note The @a idate and @a period values are interpreted as a
+ * multiple of the Alchemy clock resolution (see
+ * --alchemy-clock-resolution option, defaults to 1 nanosecond).
+ *
+ * @attention Unlike its Xenomai 2.x counterpart,
+ * rt_task_set_periodic() will @b NOT block @a task until @a idate is
+ * reached. The first beat in the periodic timeline should be awaited
+ * for by a call to rt_task_wait_period().
+ */
+#ifndef DOXYGEN_CPP
+CURRENT_IMPL(int, rt_task_set_periodic,
+	     (RT_TASK *task, RTIME idate, RTIME period))
+#else
+int rt_task_set_periodic(RT_TASK *task, RTIME idate, RTIME period)
+#endif
+{
+	struct timespec its, pts, now;
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	if (period == TM_INFINITE) {
+		pts.tv_sec = 0;
+		pts.tv_nsec = 0;
+		its = pts;
+	} else {
+		clockobj_ticks_to_timespec(&alchemy_clock, period, &pts);
+		if (idate == TM_NOW) {
+			__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
+			timespec_add(&its, &now, &pts);
+		} else
+			/*
+			 * idate is an absolute time specification
+			 * already, so we want a direct conversion to
+			 * timespec.
+			 */
+			clockobj_ticks_to_timespec(&alchemy_clock, idate, &its);
+	}
+
+	tcb = get_alchemy_task_or_self(task, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	if (!threadobj_local_p(&tcb->thobj)) {
+		ret = -EINVAL;
+		goto out;
+	}
+
+	ret = threadobj_set_periodic(&tcb->thobj, &its, &pts);
+	put_alchemy_task(tcb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_wait_period(unsigned long *overruns_r)
+ * @brief Wait for the next periodic release point.
+ *
+ * Delay the current task until the next periodic release point is
+ * reached. The periodic timer should have been previously started for
+ * @a task by a call to rt_task_set_periodic().
+ *
+ * @param overruns_r If non-NULL, @a overruns_r shall be a pointer to
+ * a memory location which will be written with the count of pending
+ * overruns. This value is written to only when rt_task_wait_period()
+ * returns -ETIMEDOUT or success. The memory location remains
+ * unmodified otherwise. If NULL, this count will not be returned.
+ *
+ * @return Zero is returned upon success. If @a overruns_r is
+ * non-NULL, zero is written to the pointed memory
+ * location. Otherwise:
+ *
+ * - -EWOULDBLOCK is returned if rt_task_set_periodic() was not called
+ * for the current task.
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * waiting task before the next periodic release point was reached. In
+ * this case, the overrun counter is also cleared.
+ *
+ * - -ETIMEDOUT is returned if a timer overrun occurred, which
+ * indicates that a previous release point was missed by the calling
+ * task. If @a overruns_r is non-NULL, the count of pending overruns
+ * is written to the pointed memory location.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * @apitags{xthread-only, switch-primary}
+ *
+ * @note If the current release point has already been reached at the
+ * time of the call, the current task immediately returns from this
+ * service with no delay.
+ */
+int rt_task_wait_period(unsigned long *overruns_r)
+{
+	if (!threadobj_current_p())
+		return -EPERM;
+
+	return threadobj_wait_period(overruns_r);
+}
+
+/**
+ * @fn int rt_task_sleep_until(RTIME date)
+ * @brief Delay the current real-time task (with absolute wakeup date).
+ *
+ * Delay the execution of the calling task until a given date is
+ * reached. The caller is put to sleep, and does not consume any CPU
+ * time in such a state.
+ *
+ * @param date An absolute date expressed in clock ticks, specifying a
+ * wakeup date (see note). As a special case, TM_INFINITE is an
+ * acceptable value that causes the caller to block indefinitely,
+ * until rt_task_unblock() is called against it. Otherwise, any wake
+ * up date in the past causes the task to return immediately.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task.
+ *
+ * - -ETIMEDOUT is returned if @a date has already elapsed.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * @apitags{xthread-only, switch-primary}
+ *
+ * @note The caller must be an Alchemy task if @a task is NULL.
+ *
+ * @note The @a date value is interpreted as a multiple of the Alchemy
+ * clock resolution (see --alchemy-clock-resolution option, defaults
+ * to 1 nanosecond).
+ */
+int rt_task_sleep_until(RTIME date)
+{
+	struct timespec ts;
+	struct service svc;
+	ticks_t now;
+
+	if (!threadobj_current_p())
+		return -EPERM;
+
+	if (date == TM_INFINITE)
+		ts = zero_time;
+	else {
+		now = clockobj_get_time(&alchemy_clock);
+		if (date <= now)
+			return -ETIMEDOUT;
+		CANCEL_DEFER(svc);
+		clockobj_ticks_to_timespec(&alchemy_clock, date, &ts);
+		CANCEL_RESTORE(svc);
+	}
+
+	return threadobj_sleep(&ts);
+}
+
+/**
+ * @fn int rt_task_sleep(RTIME delay)
+ * @brief Delay the current real-time task (with relative delay).
+ *
+ * This routine is a variant of rt_task_sleep_until() accepting a
+ * relative timeout specification.
+ *
+ * @param delay A relative delay expressed in clock ticks (see
+ * note). A zero delay causes this service to return immediately to
+ * the caller with a success status.
+ *
+ * @return See rt_task_sleep_until().
+ *
+ * @apitags{xthread-only, switch-primary}
+ *
+ * @note The @a delay value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_task_sleep(RTIME delay)
+{
+	struct timespec ts;
+	struct service svc;
+
+	if (!threadobj_current_p())
+		return -EPERM;
+
+	if (delay == 0)
+		return 0;
+
+	CANCEL_DEFER(svc);
+	clockobj_ticks_to_timeout(&alchemy_clock, delay, &ts);
+	CANCEL_RESTORE(svc);
+
+	return threadobj_sleep(&ts);
+}
+
+/**
+ * @fn int rt_task_spawn(RT_TASK *task, const char *name, int stksize, int prio, int mode, void (*entry)(void *arg), void *arg)
+ * @brief Create and start a real-time task.
+ *
+ * This service spawns a task by combining calls to rt_task_create()
+ * and rt_task_start() for the new task.
+ *
+ * @param task The address of a task descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * task. When non-NULL and non-empty, a copy of this string is
+ * used for indexing the created task into the object registry.
+ *
+ * @param stksize The size of the stack (in bytes) for the new
+ * task. If zero is passed, a system-dependent default size will be
+ * substituted.
+ *
+ * @param prio The base priority of the new task. This value must be
+ * in the [0 .. 99] range, where 0 is the lowest effective priority. 
+ *
+ * @param mode The task creation mode. See rt_task_create().
+ *
+ * @param entry The address of the task entry point.
+ *
+ * @param arg A user-defined opaque argument @a entry will receive.
+ *
+ * @return See rt_task_create().
+ *
+ * @apitags{mode-unrestricted, switch-secondary}
+ *
+ * @sideeffect see rt_task_create().
+ */
+#ifndef DOXYGEN_CPP
+CURRENT_IMPL(int, rt_task_spawn, (RT_TASK *task, const char *name,
+				  int stksize, int prio, int mode,
+				  void (*entry)(void *arg),
+				  void *arg))
+#else
+int rt_task_spawn(RT_TASK *task, const char *name,
+		  int stksize, int prio, int mode,
+		  void (*entry)(void *arg),
+		  void *arg)
+#endif
+{
+	int ret;
+
+	ret = rt_task_create(task, name, stksize, prio, mode);
+	if (ret)
+		return ret;
+
+	return rt_task_start(task, entry, arg);
+}
+
+/**
+ * @fn int rt_task_same(RT_TASK *task1, RT_TASK *task2)
+ * @brief Compare real-time task descriptors.
+ *
+ * This predicate returns true if @a task1 and @a task2 refer to the
+ * same task.
+ *
+ * @param task1 First task descriptor to compare.
+ *
+ * @param task2 Second task descriptor to compare.
+ *
+ * @return A non-zero value is returned if both descriptors refer to
+ * the same task, zero otherwise.
+ *
+ * @apitags{unrestricted}
+ */
+int rt_task_same(RT_TASK *task1, RT_TASK *task2)
+{
+	return task1->handle == task2->handle;
+}
+
+/**
+ * @fn int rt_task_suspend(RT_TASK *task)
+ * @brief Suspend a real-time task.
+ *
+ * Forcibly suspend the execution of a task. This task will not be
+ * eligible for scheduling until it is explicitly resumed by a call to
+ * rt_task_resume(). In other words, the suspended state caused by a
+ * call to rt_task_suspend() is cumulative with respect to the delayed
+ * and blocked states caused by other services, and is managed
+ * separately from them.
+ *
+ * A nesting count is maintained so that rt_task_suspend() and
+ * rt_task_resume() must be used in pairs.
+ *
+ * Receiving a Linux signal causes the suspended task to resume
+ * immediately.
+ *
+ * @param task The task descriptor. If @a task is NULL, the current
+ * task is suspended.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is NULL but the caller is not a
+ * Xenomai task, or if @a task is non-NULL but not a valid task
+ * descriptor.
+ *
+ * - -EINTR is returned if a Linux signal has been received by the
+ * caller if suspended.
+ *
+ * - -EPERM is returned if @a task is NULL and this service was called
+ * from an invalid context.
+ *
+ * @apitags{mode-unrestricted, switch-primary}
+ *
+ * @note The caller must be an Alchemy task if @a task is NULL.
+ *
+ * @note Blocked and suspended task states are cumulative. Therefore,
+ * suspending a task currently waiting on a synchronization object
+ * (e.g. semaphore, queue) holds its execution until it is resumed,
+ * despite the awaited resource may have been acquired, or a timeout
+ * has elapsed in the meantime.
+ */
+int rt_task_suspend(RT_TASK *task)
+{
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	tcb = get_alchemy_task_or_self(task, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	if (tcb->suspends++ == 0)
+		ret = threadobj_suspend(&tcb->thobj);
+
+	put_alchemy_task(tcb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_resume(RT_TASK *task)
+ * @brief Resume a real-time task.
+ *
+ * Forcibly resume the execution of a task which was previously
+ * suspended by a call to rt_task_suspend(), if the suspend nesting
+ * count decrements to zero.
+ *
+ * @param task The task descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ *
+ * @note Blocked and suspended task states are cumulative. Therefore,
+ * resuming a task currently waiting on a synchronization object
+ * (e.g. semaphore, queue) does not make it eligible for scheduling
+ * until the awaited resource is eventually acquired, or a timeout
+ * elapses.
+ */
+int rt_task_resume(RT_TASK *task)
+{
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	tcb = get_alchemy_task(task, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	if (tcb->suspends > 0 && --tcb->suspends == 0)
+		ret = threadobj_resume(&tcb->thobj);
+
+	put_alchemy_task(tcb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn RT_TASK *rt_task_self(void)
+ * @brief Retrieve the current task descriptor.
+ *
+ * Return the address of the current Alchemy task descriptor.
+ *
+ * @return The address of the task descriptor referring to the current
+ * Alchemy task is returned upon success, or NULL if not called from a
+ * valid Alchemy task context.
+ *
+ * @apitags{xthread-only}
+ */
+RT_TASK *rt_task_self(void)
+{
+	struct alchemy_task *tcb;
+
+	tcb = alchemy_task_current();
+	if (tcb == NULL)
+		return NULL;
+
+	return &tcb->self;
+}
+
+/**
+ * @fn int rt_task_set_priority(RT_TASK *task, int prio)
+ * @brief Change the base priority of a real-time task.
+ *
+ * The base priority of a task defines the relative importance of the
+ * work being done by each task, which gains conrol of the CPU
+ * accordingly.
+ *
+ * Changing the base priority of a task does not affect the priority
+ * boost the target task might have obtained as a consequence of a
+ * priority inheritance undergoing.
+ *
+ * @param task The task descriptor. If @a task is NULL, the priority
+ * of the current task is changed.
+ *
+ * @param prio The new priority. This value must range from [T_LOPRIO
+ * .. T_HIPRIO] (inclusive) where T_LOPRIO is the lowest effective
+ * priority.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor, or
+ * if @a prio is invalid.
+ *
+ * - -EPERM is returned if @a task is NULL and this service was called
+ * from an invalid context.
+ *
+ * @apitags{mode-unrestricted, switch-primary, switch-secondary}
+ *
+ * @note The caller must be an Alchemy task if @a task is NULL.
+ *
+ * @note Assigning the same priority to a running or ready task moves
+ * it to the end of its priority group, thus causing a manual
+ * round-robin.
+ */
+int rt_task_set_priority(RT_TASK *task, int prio)
+{
+	struct sched_param_ex param_ex;
+	struct alchemy_task *tcb;
+	struct service svc;
+	int policy, ret;
+
+	ret = check_task_priority(prio);
+	if (ret)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	tcb = get_alchemy_task_or_self(task, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	policy = prio ? SCHED_FIFO : SCHED_OTHER;
+	param_ex.sched_priority = prio;
+	ret = threadobj_set_schedparam(&tcb->thobj, policy, &param_ex);
+	switch (ret) {
+	case -EIDRM:
+		ret = 0;
+		break;
+	default:
+		put_alchemy_task(tcb);
+	}
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_yield(void)
+ * @brief Manual round-robin.
+ *
+ * Move the current task to the end of its priority group, so that the
+ * next equal-priority task in ready state is switched in.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+int rt_task_yield(void)
+{
+	if (!threadobj_current_p())
+		return -EPERM;
+
+	threadobj_yield();
+
+	return 0;
+}
+
+/**
+ * @fn int rt_task_unblock(RT_TASK *task)
+ * @brief Unblock a real-time task.
+ *
+ * Break the task out of any wait it is currently in.  This call
+ * clears all delay and/or resource wait condition for the target
+ * task.
+ *
+ * However, rt_task_unblock() does not resume a task which has been
+ * forcibly suspended by a previous call to rt_task_suspend().  If all
+ * suspensive conditions are gone, the task becomes eligible anew for
+ * scheduling.
+ *
+ * @param task The task descriptor.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ */
+int rt_task_unblock(RT_TASK *task)
+{
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	tcb = get_alchemy_task(task, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	ret = threadobj_unblock(&tcb->thobj);
+	put_alchemy_task(tcb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_slice(RT_TASK *task, RTIME quantum)
+ * @brief Set a task's round-robin quantum.
+ *
+ * Set the time credit allotted to a task undergoing the round-robin
+ * scheduling. If @a quantum is non-zero, rt_task_slice() also refills
+ * the current quantum for the target task, otherwise, time-slicing is
+ * stopped for that task.
+ *
+ * In other words, rt_task_slice() should be used to toggle
+ * round-robin scheduling for an Alchemy task.
+ *
+ * @param task The task descriptor. If @a task is NULL, the time
+ * credit of the current task is changed. @a task must belong to the
+ * current process.
+ *
+ * @param quantum The round-robin quantum for the task expressed in
+ * clock ticks (see note).
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor, or
+ * if @a prio is invalid.
+ *
+ * - -EPERM is returned if @a task is NULL and this service was called
+ * from an invalid context.
+ *
+ * @apitags{mode-unrestricted, switch-primary}
+ *
+ * @note The caller must be an Alchemy task if @a task is NULL.
+ *
+ * @note The @a quantum value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_task_slice(RT_TASK *task, RTIME quantum)
+{
+	struct sched_param_ex param_ex;
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret, policy;
+
+	CANCEL_DEFER(svc);
+
+	tcb = get_alchemy_task_or_self(task, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	param_ex.sched_priority = threadobj_get_priority(&tcb->thobj);
+	if (quantum) {
+		struct timespec ts;
+		policy = SCHED_RR;
+		clockobj_ticks_to_timespec(&alchemy_clock, quantum, &ts);
+		param_ex.sched_rr_quantum.tv_sec = ts.tv_sec;
+		param_ex.sched_rr_quantum.tv_nsec = ts.tv_nsec;
+	} else {
+		policy = param_ex.sched_priority ? SCHED_FIFO : SCHED_OTHER;
+	}
+
+	ret = threadobj_set_schedparam(&tcb->thobj, policy, &param_ex);
+	switch (ret) {
+	case -EIDRM:
+		ret = 0;
+		break;
+	default:
+		put_alchemy_task(tcb);
+	}
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_set_mode(int clrmask, int setmask, int *mode_r)
+ * @brief Change the current task mode.
+ *
+ * Each Alchemy task has a set of internal flags determining several
+ * operating conditions. rt_task_set_mode() takes a bitmask of mode
+ * bits to clear for disabling the corresponding modes for the current
+ * task, and another one to set for enabling them. The mode bits which
+ * were previously in effect before the change can be returned upon
+ * request.
+ *
+ * The following bits can be part of the bitmask:
+ *
+ * - T_LOCK causes the current task to lock the scheduler on the
+ * current CPU, preventing all further involuntary task switches on
+ * this CPU. Clearing this bit unlocks the scheduler.
+ *
+ * - Only when running over the Cobalt core:
+ *
+ *   - T_WARNSW causes the SIGDEBUG signal to be sent to the current
+ * task whenever it switches to the secondary mode. This feature is
+ * useful to detect unwanted migrations to the Linux domain.
+ *
+ *   - T_CONFORMING can be passed in @a setmask to switch the current
+ * Alchemy task to its preferred runtime mode. The only meaningful use
+ * of this switch is to force a real-time task back to primary
+ * mode (see note). Any other use leads to a nop.
+ *
+ * These two last flags have no effect over the Mercury core, and are
+ * simply ignored.
+ *
+ * @param clrmask A bitmask of mode bits to clear for the current
+ * task, before @a setmask is applied. Zero is an acceptable value
+ * which leads to a no-op.
+ *
+ * @param setmask A bitmask of mode bits to set for the current
+ * task. Zero is an acceptable value which leads to a no-op.
+ *
+ * @param mode_r If non-NULL, @a mode_r must be a pointer to a memory
+ * location which will be written upon success with the previous set
+ * of active mode bits. If NULL, the previous set of active mode bits
+ * will not be returned.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor, or
+ * if any bit from @a clrmask or @a setmask is invalid.
+
+ * - -EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * @apitags{xthread-only, switch-primary}
+ *
+ * @note The caller must be an Alchemy task.
+ *
+ * @note Forcing the task mode using the T_CONFORMING bit from user
+ * code is almost always wrong, since the Xenomai/cobalt core handles
+ * mode switches internally when/if required. Most often, manual mode
+ * switching from applications introduces useless overhead. This mode
+ * bit is part of the API only to cover rare use cases in middleware
+ * code based on the Alchemy interface.
+ */
+int rt_task_set_mode(int clrmask, int setmask, int *mode_r)
+{
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret = 0;
+
+	if (threadobj_irq_p()) {
+		clrmask &= ~T_LOCK;
+		setmask &= ~T_LOCK;
+		return (clrmask | setmask) ? -EPERM : 0;
+	}
+
+	if (((clrmask | setmask) & ~(T_LOCK | T_WARNSW | T_CONFORMING)) != 0)
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	tcb = get_alchemy_task_or_self(NULL, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	ret = threadobj_set_mode(clrmask, setmask, mode_r);
+	put_alchemy_task(tcb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_inquire(RT_TASK *task, RT_TASK_INFO *info)
+ * @brief Retrieve information about a real-time task.
+ *
+ * Return various information about an Alchemy task. This service may
+ * also be used to probe for task existence.
+ *
+ * @param task The task descriptor. If @a task is NULL, the
+ * information about the current task is returned.
+ *
+ * @param info  The address of a structure the task information will be
+ * written to. Passing NULL is valid, in which case the system is only
+ * probed for existence of the specified task.
+ *
+ * @return Zero is returned if the task exists. In addition, if @a
+ * info is non-NULL, it is filled in with task information.
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor, or
+ * if @a prio is invalid.
+ *
+ * - -EPERM is returned if @a task is NULL and this service was called
+ * from an invalid context.
+ *
+ * @apitags{mode-unrestricted, switch-primary}
+ *
+ * @note The caller must be an Alchemy task if @a task is NULL.
+ */
+int rt_task_inquire(RT_TASK *task, RT_TASK_INFO *info)
+{
+	struct alchemy_task *tcb;
+	struct service svc;
+	int ret = 0;
+
+	CANCEL_DEFER(svc);
+
+	tcb = get_alchemy_task_or_self(task, &ret);
+	if (tcb == NULL)
+		goto out;
+
+	ret = __bt(threadobj_stat(&tcb->thobj, &info->stat));
+	if (ret)
+		goto out;
+
+	strcpy(info->name, tcb->name);
+	info->prio = threadobj_get_priority(&tcb->thobj);
+	info->pid = threadobj_get_pid(&tcb->thobj);
+
+	put_alchemy_task(tcb);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+
+/**
+ * @fn ssize_t rt_task_send(RT_TASK *task, RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r, RTIME timeout)
+ * @brief Send a message to a real-time task (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_task_send_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param task The task descriptor.
+ *
+ * @param mcb_s The address of the message control block referring to
+ * the message to be sent.
+ *
+ * @param mcb_r The address of an optional message control block
+ * referring to the reply message area.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE causes the caller to block indefinitely until a reply
+ * is received. Passing TM_NONBLOCK causes the service to return
+ * without blocking in case the recipient task is not waiting for
+ * messages at the time of the call.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_task_send_until(RT_TASK *task, RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r, RTIME abs_timeout)
+ * @brief Send a message to a real-time task (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_task_send_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param task The task descriptor.
+ *
+ * @param mcb_s The address of the message control block referring to
+ * the message to be sent.
+ *
+ * @param mcb_r The address of an optional message control block
+ * referring to the reply message area.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * a reply is received. Passing TM_NONBLOCK causes the service to
+ * return without blocking in case the recipient task is not waiting
+ * for messages at the time of the call.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_task_send_timed(RT_TASK *task, RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r, const struct timespec *abs_timeout)
+ * @brief Send a message to a real-time task.
+ *
+ * This service is part of the synchronous message passing support
+ * available to Alchemy tasks. The caller sends a variable-sized
+ * message to another task, waiting for the remote to receive the
+ * initial message by a call to rt_task_receive(), then reply to it
+ * using rt_task_reply().
+ *
+ * A basic message control block is used to store the location and
+ * size of the data area to send or retrieve upon reply, in addition
+ * to a user-defined operation code.
+ *
+ * @param task The task descriptor.
+ *
+ * @param mcb_s The address of the message control block referring to
+ * the message to be sent. The fields from this control block should
+ * be set as follows:
+ *
+ * - mcb_s->data should contain the address of the payload data to
+ * send to the remote task.
+ *
+ * - mcb_s->size should contain the size in bytes of the payload data
+ * pointed at by mcb_s->data. Zero is a legitimate value, and
+ * indicates that no payload data will be transferred. In the latter
+ * case, mcb_s->data will be ignored.
+ *
+ * - mcb_s->opcode is an opaque operation code carried during the
+ * message transfer, the caller can fill with any appropriate
+ * value. It will be made available "as is" to the remote task into
+ * the operation code field by the rt_task_receive() service.
+ *
+ * @param mcb_r The address of an optional message control block
+ * referring to the reply message area. If @a mcb_r is NULL and a
+ * reply is sent back by the remote task, the reply message will be
+ * discarded, and -ENOBUFS will be returned to the caller. When @a
+ * mcb_r is valid, the fields from this control block should be set as
+ * follows:
+ *
+ * - mcb_r->data should contain the address of a buffer large enough
+ * to collect the reply data from the remote task.
+ *
+ * - mcb_r->size should contain the size in bytes of the buffer space
+ * pointed at by mcb_r->data. If mcb_r->size is lower than the actual
+ * size of the reply message, no data copy takes place and -ENOBUFS is
+ * returned to the caller.
+ *
+ * Upon return, mcb_r->opcode will contain the status code sent back
+ * from the remote task using rt_task_reply(), or zero if unspecified.
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying a time limit to wait for the
+ * recipient task to reply to the initial message. Passing NULL causes
+ * the caller to block indefinitely until a reply is received. Passing
+ * { .tv_sec = 0, .tv_nsec = 0 } causes the service to return without
+ * blocking in case the recipient task is not waiting for messages at
+ * the time of the call.
+ *
+ * @return A positive value is returned upon success, representing the
+ * length (in bytes) of the reply message returned by the remote
+ * task. Zero is a success status, meaning either that @a mcb_r was
+ * NULL on entry, or that no actual message was passed to the remote
+ * call to rt_task_reply(). Otherwise:
+ *
+ * - -EINVAL is returned if @a task is not a valid task descriptor.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * - -ENOBUFS is returned if @a mcb_r does not point at a message area
+ * large enough to collect the remote task's reply. This includes the
+ * case where @a mcb_r is NULL on entry, despite the remote task
+ * attempts to send a reply message.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } and the recipient @a task is not currently waiting
+ * for a message on the rt_task_receive() service.
+ *
+ * - -EIDRM is returned if @a task has been deleted while waiting for
+ * a reply.
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before any reply was received from the recipient @a
+ * task.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+ssize_t rt_task_send_timed(RT_TASK *task,
+			   RT_TASK_MCB *mcb_s, RT_TASK_MCB *mcb_r,
+			   const struct timespec *abs_timeout)
+{
+	void *rbufin = NULL, *rbufout = NULL;
+	struct alchemy_task_wait *wait;
+	struct threadobj *current;
+	struct alchemy_task *tcb;
+	struct syncstate syns;
+	struct service svc;
+	ssize_t ret;
+	int err;
+
+	current = threadobj_current();
+	if (current == NULL)
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	tcb = find_alchemy_task(task, &err);
+	if (tcb == NULL) {
+		ret = err;
+		goto out;
+	}
+
+	ret = syncobj_lock(&tcb->sobj_msg, &syns);
+	if (ret)
+		goto out;
+
+	if (alchemy_poll_mode(abs_timeout)) {
+		if (!syncobj_count_drain(&tcb->sobj_msg)) {
+			ret = -EWOULDBLOCK;
+			goto done;
+		}
+		abs_timeout = NULL;
+	}
+
+	/* Get space for the reply. */
+	wait = threadobj_prepare_wait(struct alchemy_task_wait);
+
+	/*
+	 * Compute the next flow identifier, making sure that we won't
+	 * draw a null or negative value.
+	 */
+	if (++tcb->flowgen < 0)
+		tcb->flowgen = 1;
+
+	wait->request = *mcb_s;
+	/*
+	 * Payloads exchanged with remote tasks have to go through the
+	 * main heap.
+	 */
+	if (mcb_s->size > 0 && !threadobj_local_p(&tcb->thobj)) {
+		rbufin = xnmalloc(mcb_s->size);
+		if (rbufin == NULL) {
+			ret = -ENOMEM;
+			goto cleanup;
+		}
+		memcpy(rbufin, mcb_s->data, mcb_s->size);
+		wait->request.__dref = __moff(rbufin);
+	}
+	wait->request.flowid = tcb->flowgen;
+	if (mcb_r) {
+		wait->reply.size = mcb_r->size;
+		wait->reply.data = mcb_r->data;
+		if (mcb_r->size > 0 && !threadobj_local_p(&tcb->thobj)) {
+			rbufout = xnmalloc(mcb_r->size);
+			if (rbufout == NULL) {
+				ret = -ENOMEM;
+				goto cleanup;
+			}
+			wait->reply.__dref = __moff(rbufout);
+		}
+	} else {
+		wait->reply.data = NULL;
+		wait->reply.size = 0;
+	}
+
+	if (syncobj_count_drain(&tcb->sobj_msg))
+		syncobj_drain(&tcb->sobj_msg);
+
+	ret = syncobj_wait_grant(&tcb->sobj_msg, abs_timeout, &syns);
+	if (ret) {
+		threadobj_finish_wait();
+		if (ret == -EIDRM)
+			goto out;
+		goto done;
+	}
+
+	ret = wait->reply.size;
+	if (!threadobj_local_p(&tcb->thobj) && ret > 0 && mcb_r)
+		memcpy(mcb_r->data, rbufout, ret);
+cleanup:
+	threadobj_finish_wait();
+done:
+	syncobj_unlock(&tcb->sobj_msg, &syns);
+out:
+	if (rbufin)
+		xnfree(rbufin);
+	if (rbufout)
+		xnfree(rbufout);
+	
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn ssize_t rt_task_receive(RT_TASK_MCB *mcb_r, RTIME timeout)
+ * @brief Receive a message from a real-time task (with relative scalar timeout).
+ *
+ * This routine is a variant of rt_task_receive_timed() accepting a
+ * relative timeout specification expressed as a scalar value.
+ *
+ * @param mcb_r The address of a message control block referring to
+ * the receive message area.
+ *
+ * @param timeout A delay expressed in clock ticks. Passing
+ * TM_INFINITE causes the caller to block indefinitely until a remote
+ * task eventually sends a message.Passing TM_NONBLOCK causes the
+ * service to return immediately without waiting if no remote task is
+ * currently waiting for sending a message.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_task_receive_until(RT_TASK_MCB *mcb_r, RTIME abs_timeout)
+ * @brief Receive a message from a real-time task (with absolute scalar timeout).
+ *
+ * This routine is a variant of rt_task_receive_timed() accepting an
+ * absolute timeout specification expressed as a scalar value.
+ *
+ * @param mcb_r The address of a message control block referring to
+ * the receive message area.
+ *
+ * @param abs_timeout An absolute date expressed in clock ticks.
+ * Passing TM_INFINITE causes the caller to block indefinitely until
+ * a remote task eventually sends a message.Passing TM_NONBLOCK
+ * causes the service to return immediately without waiting if no
+ * remote task is currently waiting for sending a message.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+
+/**
+ * @fn ssize_t rt_task_receive_timed(RT_TASK_MCB *mcb_r, const struct timespec *abs_timeout)
+ * @brief Receive a message from a real-time task.
+ *
+ * This service is part of the synchronous message passing support
+ * available to Alchemy tasks. The caller receives a variable-sized
+ * message from another task. The sender is blocked until the caller
+ * invokes rt_task_reply() to finish the transaction.
+ *
+ * A basic message control block is used to store the location and
+ * size of the data area to receive from the client, in addition to a
+ * user-defined operation code.
+ *
+ * @param mcb_r The address of a message control block referring to
+ * the receive message area. The fields from this control block should
+ * be set as follows:
+ *
+ * - mcb_r->data should contain the address of a buffer large enough
+ * to collect the data sent by the remote task;
+ *
+ * - mcb_r->size should contain the size in bytes of the buffer space
+ * pointed at by mcb_r->data. If mcb_r->size is lower than the actual
+ * size of the received message, no data copy takes place and -ENOBUFS
+ * is returned to the caller. See note.
+ *
+ * Upon return, mcb_r->opcode will contain the operation code sent
+ * from the remote task using rt_task_send().
+ *
+ * @param abs_timeout An absolute date expressed in seconds / nanoseconds,
+ * based on the Alchemy clock, specifying the time limit to wait for
+ * receiving a message. Passing NULL causes the caller to block
+ * indefinitely until a remote task eventually sends a message.
+ * Passing { .tv_sec = 0, .tv_nsec = 0 } causes the service to return
+ * immediately without waiting if no remote task is currently waiting
+ * for sending a message.
+ *
+ * @return A strictly positive value is returned upon success,
+ * representing a flow identifier for the opening transaction; this
+ * token should be passed to rt_task_reply(), in order to send back a
+ * reply to and unblock the remote task appropriately. Otherwise:
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before a message was received.
+ *
+ * - -ENOBUFS is returned if @a mcb_r does not point at a message area
+ * large enough to collect the remote task's message.
+ *
+ * - -EWOULDBLOCK is returned if @a abs_timeout is { .tv_sec = 0,
+ * .tv_nsec = 0 } and no remote task is currently waiting for sending
+ * a message to the caller.
+ *
+ * - -ETIMEDOUT is returned if no message was received within the @a
+ * timeout.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+int rt_task_receive_timed(RT_TASK_MCB *mcb_r,
+			  const struct timespec *abs_timeout)
+{
+	struct alchemy_task_wait *wait;
+	struct alchemy_task *current;
+	struct threadobj *thobj;
+	struct syncstate syns;
+	struct service svc;
+	RT_TASK_MCB *mcb_s;
+	int ret;
+
+	current = alchemy_task_current();
+	if (current == NULL)
+		return -EPERM;
+
+	CANCEL_DEFER(svc);
+
+	ret = syncobj_lock(&current->sobj_msg, &syns);
+	if (ret)
+		goto out;
+
+	while (!syncobj_grant_wait_p(&current->sobj_msg)) {
+		if (alchemy_poll_mode(abs_timeout)) {
+			ret = -EWOULDBLOCK;
+			goto done;
+		}
+		ret = syncobj_wait_drain(&current->sobj_msg, abs_timeout, &syns);
+		if (ret)
+			goto done;
+	}
+
+	thobj = syncobj_peek_grant(&current->sobj_msg);
+	wait = threadobj_get_wait(thobj);
+	mcb_s = &wait->request;
+
+	if (mcb_s->size > mcb_r->size) {
+		ret = -ENOBUFS;
+		goto fixup;
+	}
+
+	if (mcb_s->size > 0) {
+		if (!threadobj_local_p(thobj))
+			memcpy(mcb_r->data, __mptr(mcb_s->__dref), mcb_s->size);
+		else
+			memcpy(mcb_r->data, mcb_s->data, mcb_s->size);
+	}
+
+	/* The flow identifier is always strictly positive. */
+	ret = mcb_s->flowid;
+	mcb_r->opcode = mcb_s->opcode;
+fixup:
+	mcb_r->size = mcb_s->size;
+done:
+	syncobj_unlock(&current->sobj_msg, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_reply(int flowid, RT_TASK_MCB *mcb_s)
+ * @brief Reply to a remote task message.
+ *
+ * This service is part of the synchronous message passing support
+ * available to Alchemy tasks. The caller sends a variable-sized
+ * message back to a remote task, in response to this task's initial
+ * message received by a call to rt_task_receive(). As a consequence
+ * of calling rt_task_reply(), the remote task will be unblocked from
+ * the rt_task_send() service.
+ *
+ * A basic message control block is used to store the location and
+ * size of the data area to send back, in addition to a user-defined
+ * status code.
+ *
+ * @param flowid The flow identifier returned by a previous call to
+ * rt_task_receive() which uniquely identifies the current
+ * transaction.
+ *
+ * @param mcb_s The address of an optional message control block
+ * referring to the message to be sent back. If @a mcb_s is NULL, the
+ * remote will be unblocked without getting any reply data. When @a
+ * mcb_s is valid, the fields from this control block should be set as
+ * follows:
+ *
+ * - mcb_s->data should contain the address of the payload data to
+ * send to the remote task.
+ *
+ * - mcb_s->size should contain the size in bytes of the payload data
+ * pointed at by mcb_s->data. Zero is a legitimate value, and
+ * indicates that no payload data will be transferred. In the latter
+ * case, mcb_s->data will be ignored.
+ *
+ * - mcb_s->opcode is an opaque status code carried during the message
+ * transfer the caller can fill with any appropriate value. It will be
+ * made available "as is" to the remote task into the status code
+ * field by the rt_task_send() service. If @a mcb_s is NULL, Zero will
+ * be returned to the remote task into the status code field.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a flowid is invalid.
+ *
+ * - -ENXIO is returned if @a flowid does not match the expected
+ * identifier returned from the latest call of the current task to
+ * rt_task_receive(), or if the remote task stopped waiting for the
+ * reply in the meantime (e.g. the remote could have been deleted or
+ * forcibly unblocked).
+ *
+ * - -ENOBUFS is returned if the reply data referred to by @a mcb_s is
+*  larger than the reply area mentioned by the remote task when
+*  calling rt_task_send(). In such a case, the remote task also
+*  receives -ENOBUFS on return from rt_task_send().
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+int rt_task_reply(int flowid, RT_TASK_MCB *mcb_s)
+{
+	struct alchemy_task_wait *wait = NULL;
+	struct alchemy_task *current;
+	struct threadobj *thobj;
+	struct syncstate syns;
+	struct service svc;
+	RT_TASK_MCB *mcb_r;
+	size_t size;
+	int ret;
+
+	current = alchemy_task_current();
+	if (current == NULL)
+		return -EPERM;
+
+	if (flowid <= 0)
+		return -EINVAL;
+
+	CANCEL_DEFER(svc);
+
+	ret = __bt(syncobj_lock(&current->sobj_msg, &syns));
+	if (ret)
+		goto out;
+
+	ret = -ENXIO;
+	if (!syncobj_grant_wait_p(&current->sobj_msg))
+		goto done;
+
+	syncobj_for_each_grant_waiter(&current->sobj_msg, thobj) {
+		wait = threadobj_get_wait(thobj);
+		if (wait->request.flowid == flowid)
+			goto reply;
+	}
+	goto done;
+ reply:
+	size = mcb_s ? mcb_s->size : 0;
+	syncobj_grant_to(&current->sobj_msg, thobj);
+	mcb_r = &wait->reply;
+
+	/*
+	 * NOTE: sending back a NULL or zero-length reply is perfectly
+	 * valid; it just means to unblock the client without passing
+	 * it back any reply data. Sending a response larger than what
+	 * the client expects is invalid.
+	 */
+	if (mcb_r->size < size) {
+		ret = -ENOBUFS;	/* Client will get this too. */
+		mcb_r->size = -ENOBUFS;
+	} else {
+		ret = 0;
+		mcb_r->size = size;
+		if (size > 0) {
+			if (!threadobj_local_p(thobj))
+				memcpy(__mptr(mcb_r->__dref), mcb_s->data, size);
+			else
+				memcpy(mcb_r->data, mcb_s->data, size);
+		}
+	}
+
+	mcb_r->flowid = flowid;
+	mcb_r->opcode = mcb_s ? mcb_s->opcode : 0;
+done:
+	syncobj_unlock(&current->sobj_msg, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+/**
+ * @fn int rt_task_bind(RT_TASK *task, const char *name, RTIME timeout)
+ * @brief Bind to a task.
+ *
+ * This routine creates a new descriptor to refer to an existing
+ * Alchemy task identified by its symbolic name. If the object does
+ * not exist on entry, the caller may block until a task of the given
+ * name is created.
+ *
+ * @param task The address of a task descriptor filled in by the
+ * operation. Contents of this memory is undefined upon failure.
+ *
+ * @param name A valid NULL-terminated name which identifies the task
+ * to bind to. This string should match the object name argument
+ * passed to rt_task_create(), or rt_task_shadow().
+ *
+ * @param timeout The number of clock ticks to wait for the
+ * registration to occur (see note). Passing TM_INFINITE causes the
+ * caller to block indefinitely until the object is
+ * registered. Passing TM_NONBLOCK causes the service to return
+ * immediately without waiting if the object is not registered on
+ * entry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the retrieval has completed.
+ *
+ * - -EWOULDBLOCK is returned if @a timeout is equal to TM_NONBLOCK
+ * and the searched object is not registered on entry.
+ *
+ * - -ETIMEDOUT is returned if the object cannot be retrieved within
+ * the specified amount of time.
+ *
+ * - -EPERM is returned if this service should block, but was not
+ * called from a Xenomai thread.
+ *
+ * @apitags{xthread-nowait, switch-primary}
+ *
+ * @note The @a timeout value is interpreted as a multiple of the
+ * Alchemy clock resolution (see --alchemy-clock-resolution option,
+ * defaults to 1 nanosecond).
+ */
+int rt_task_bind(RT_TASK *task,
+		 const char *name, RTIME timeout)
+{
+	return alchemy_bind_object(name,
+				   &alchemy_task_table,
+				   timeout,
+				   offsetof(struct alchemy_task, cobj),
+				   &task->handle);
+}
+
+/**
+ * @fn int rt_task_unbind(RT_TASK *task)
+ * @brief Unbind from a task.
+ *
+ * @param task The task descriptor.
+ *
+ * This routine releases a previous binding to an Alchemy task. After
+ * this call has returned, the descriptor is no more valid for
+ * referencing this object.
+ *
+ * @apitags{thread-unrestricted}
+ */
+int rt_task_unbind(RT_TASK *task)
+{
+	*task = NO_ALCHEMY_TASK;
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/task.h b/kernel/xenomai-v3.2.4/lib/alchemy/task.h
new file mode 100644
index 0000000..e2e0b60
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/task.h
@@ -0,0 +1,78 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_TASK_H
+#define _ALCHEMY_TASK_H
+
+#include <sched.h>
+#include <semaphore.h>
+#include <errno.h>
+#include <boilerplate/list.h>
+#include <copperplate/syncobj.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/registry.h>
+#include <copperplate/cluster.h>
+#include <alchemy/task.h>
+
+struct alchemy_task {
+	char name[XNOBJECT_NAME_LEN];
+	int mode;
+	cpu_set_t affinity;
+	int suspends;
+	struct syncobj sobj_msg;
+	int flowgen;
+	struct threadobj thobj;
+	struct clusterobj cobj;
+	void (*entry)(void *arg);
+	void *arg;
+	RT_TASK self;
+	struct fsobj fsobj;
+};
+
+struct alchemy_task_wait {
+	struct RT_TASK_MCB request;
+	struct RT_TASK_MCB reply;
+};
+
+#define task_magic	0x8282ebeb
+
+static inline struct alchemy_task *alchemy_task_current(void)
+{
+	struct threadobj *thobj = threadobj_current();
+
+	if (thobj == NULL ||
+	    threadobj_get_magic(thobj) != task_magic)
+		return NULL;
+
+	return container_of(thobj, struct alchemy_task, thobj);
+}
+
+struct alchemy_task *get_alchemy_task(RT_TASK *task, int *err_r);
+
+struct alchemy_task *get_alchemy_task_or_self(RT_TASK *task, int *err_r);
+
+void put_alchemy_task(struct alchemy_task *tcb);
+
+static inline int check_task_priority(int prio)
+{				/* FIXME: HIPRIO is lower over Mercury */
+	return prio < T_LOPRIO || prio > T_HIPRIO ? -EINVAL : 0;
+}
+
+extern struct syncluster alchemy_task_table;
+
+#endif /* _ALCHEMY_TASK_H */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/Makefile b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/Makefile
new file mode 100644
index 0000000..a831c99
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/Makefile
@@ -0,0 +1,70 @@
+ifeq ($(DESTDIR),)
+XENO_CONFIG=xeno-config
+else
+XENO_CONFIG=$(DESTDIR)/bin/xeno-config
+endif
+
+prefix := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --prefix)
+solibs := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --libdir)
+core := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --core)
+
+ifeq ($(prefix),)
+$(error Please add <xenomai-install-path>/bin to your PATH variable or specify DESTDIR)
+endif
+
+cobalt-only := pipe-1
+mercury-only :=
+core-specific = $($(core)-only)
+
+TESTS :=		\
+	task-1		\
+	task-2		\
+	task-3		\
+	task-4		\
+	task-5		\
+	task-6		\
+	task-7		\
+	task-8		\
+	task-9		\
+	task-10		\
+	mq-1		\
+	mq-2		\
+	mq-3		\
+	alarm-1		\
+	sem-1		\
+	sem-2		\
+	mutex-1		\
+	event-1		\
+	heap-1		\
+	heap-2		\
+	buffer-1	\
+	$(core-specific)
+
+CFLAGS := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --skin=alchemy --cflags) -g
+LDFLAGS := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --skin=alchemy --ldflags)
+CC = $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --cc)
+
+all: $(TESTS)
+
+%: %.c
+	$(CC) -o $@ $< $(CFLAGS) $(LDFLAGS)
+
+install: all
+	install -d $(prefix)/testsuite/alchemy
+	install -t $(prefix)/testsuite/alchemy $(TESTS)
+
+clean:
+	$(RM) $(TESTS) *~
+
+# Run the test suite. We pin all tests to CPU #0, so that SMP does not
+# alter the execution sequence we expect from them.
+test: all
+	@for t in $(TESTS); do \
+		echo -n $$t...; \
+		sudo LD_LIBRARY_PATH=$(solibs) $(VALGRIND) ./$$t --cpu-affinity=0 --silent && echo ok || echo BAD; \
+	done
+
+test/%: %
+	sudo LD_LIBRARY_PATH=$(solibs) $(VALGRIND) ./$(@F) --cpu-affinity=0 --silent && echo ok || echo BAD
+
+.PHONY: clean test
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/alarm-1.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/alarm-1.c
new file mode 100644
index 0000000..29b3bbe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/alarm-1.c
@@ -0,0 +1,88 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/alarm.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	5, 6, 8,
+	1, 4, 1, 4, 1,
+	2, 3, 7
+};
+
+static RT_TASK t_main;
+
+static RT_ALARM alrm;
+
+static void alarm_handler(void *arg)
+{
+	static int hits;
+	int ret;
+
+	traceobj_assert(&trobj, arg == &alrm);
+
+	traceobj_mark(&trobj, 1);
+
+	if (++hits >= 3) {
+		ret = rt_alarm_stop(&alrm);
+		traceobj_check(&trobj, ret, 0);
+		traceobj_mark(&trobj, 2);
+		ret = rt_task_resume(&t_main);
+		traceobj_check(&trobj, ret, 0);
+		traceobj_mark(&trobj, 3);
+		return;
+	}
+
+	traceobj_mark(&trobj, 4);
+}
+
+static void main_task(void *arg)
+{
+	RT_TASK *p;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	p = rt_task_self();
+	traceobj_assert(&trobj, p != NULL && rt_task_same(p, &t_main));
+
+	traceobj_mark(&trobj, 5);
+
+	ret = rt_alarm_start(&alrm, 200000000ULL, 200000000ULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = rt_task_suspend(&t_main);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = rt_alarm_delete(&alrm);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_alarm_create(&alrm, "ALARM", alarm_handler, &alrm);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_spawn(&t_main, "main_task", 0,  50, 0, main_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 8);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/buffer-1.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/buffer-1.c
new file mode 100644
index 0000000..d6d1d9a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/buffer-1.c
@@ -0,0 +1,108 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/buffer.h>
+
+static struct traceobj trobj;
+
+static RT_TASK t_bgnd, t_fgnd;
+
+static RT_BUFFER buffer;
+
+static void foreground_task(void *arg)
+{
+	ssize_t ret;
+	char buf[6];
+	int n = 0;
+
+	traceobj_enter(&trobj);
+
+	/*
+	 * We should get a short read (1 bytes), then a full read (2
+	 * bytes), and back again.
+	 */
+	for (;;) {
+		memset(buf,0, sizeof(buf));
+		ret = rt_buffer_read(&buffer, buf, 2, TM_INFINITE);
+		switch (ret) {
+		case -EINVAL:
+		case -EIDRM:	/* Fine, deleted. */
+			goto out;
+		case 1:
+			traceobj_assert(&trobj, buf[0] == ((n / 2) % 26) + 'A');
+			break;
+		case 2:
+			traceobj_assert(&trobj, atoi(buf) == ((n / 2) % 10) * 11);
+			break;
+		default:
+			traceobj_assert(&trobj, 0);
+		}
+		n++;
+	}
+out:
+	traceobj_exit(&trobj);
+}
+
+static void background_task(void *arg)
+{
+	char c = 'A', s[3];
+	ssize_t ret;
+	int n = 0;
+
+	traceobj_enter(&trobj);
+
+	for (;;) {
+		ret = rt_buffer_write(&buffer, &c, 1, TM_INFINITE);
+		if (ret == -EINVAL || ret == -EIDRM)
+			break;
+		traceobj_assert(&trobj, ret == 1);
+		c++;
+		if (c > 'Z')
+			c = 'A';
+		sprintf(s, "%.2d", 11 * n);
+		ret = rt_buffer_write(&buffer, s, 2, TM_INFINITE);
+		if (ret == -EINVAL || ret == -EIDRM)
+			break;
+		traceobj_assert(&trobj, ret == 2);
+		n = (n + 1) % 10;
+	}
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_buffer_create(&buffer, NULL, 2, B_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_shadow(NULL, "main_task", 30, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_fgnd, "FGND", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_fgnd, foreground_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_bgnd, "BGND", 0,  10, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_bgnd, background_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_sleep(1500000000ULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_buffer_delete(&buffer);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/event-1.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/event-1.c
new file mode 100644
index 0000000..0893df3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/event-1.c
@@ -0,0 +1,97 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/event.h>
+
+static struct traceobj trobj;
+
+static RT_TASK t_bgnd, t_fgnd;
+
+static RT_EVENT event;
+
+static void background_task(void *arg)
+{
+	unsigned int flags;
+	int ret, n;
+
+	traceobj_enter(&trobj);
+
+	for (n = 0; n < 10; n++) {
+		flags = 0;
+		ret = rt_event_wait(&event, 0x55555, &flags, EV_ANY, TM_INFINITE);
+		traceobj_check(&trobj, ret, 0);
+		traceobj_assert(&trobj, flags == 1 << n * 2);
+		ret = rt_event_clear(&event, flags, NULL);
+		traceobj_check(&trobj, ret, 0);
+		ret = rt_event_signal(&event, 2 << n * 2);
+		traceobj_check(&trobj, ret, 0);
+	}
+
+	ret = rt_event_wait(&event, 0x55555, &flags, EV_ANY, TM_INFINITE);
+	traceobj_check(&trobj, ret, -EIDRM);
+
+	traceobj_exit(&trobj);
+}
+
+static void foreground_task(void *arg)
+{
+	unsigned int flags;
+	int ret, n;
+
+	traceobj_enter(&trobj);
+
+	for (n = 0; n < 10; n++) {
+		flags = 0;
+		ret = rt_event_signal(&event, 1 << n * 2);
+		traceobj_check(&trobj, ret, 0);
+		ret = rt_event_wait(&event, 2 << n * 2, &flags, EV_ALL, TM_NONBLOCK);
+		traceobj_check(&trobj, ret, -EWOULDBLOCK);
+		ret = rt_event_wait(&event, 2 << n * 2, &flags, EV_ALL, TM_INFINITE);
+		traceobj_check(&trobj, ret, 0);
+		traceobj_assert(&trobj, flags == 2 << n * 2);
+		ret = rt_event_clear(&event, flags, NULL);
+		traceobj_check(&trobj, ret, 0);
+	}
+
+	rt_task_sleep(1000000ULL);
+	ret = rt_event_delete(&event);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	unsigned int flags;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_event_create(&event, "EVENT", 0, EV_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_event_signal(&event, 0x3);
+	traceobj_check(&trobj, ret, 0);
+	ret = rt_event_wait(&event, 0x1, &flags, EV_ALL, TM_NONBLOCK);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_assert(&trobj, flags == 0x1);
+	ret = rt_event_clear(&event, 0x3, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_bgnd, "BGND", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_bgnd, background_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_fgnd, "FGND", 0,  21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_fgnd, foreground_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-1.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-1.c
new file mode 100644
index 0000000..48bc4a5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-1.c
@@ -0,0 +1,113 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/heap.h>
+
+static int tseq[] = {
+	7, 1, 2, 3, 4,
+	8, 9, 5, 6, 10,
+	11, 12,
+};
+
+static struct traceobj trobj;
+
+static RT_TASK t_bgnd, t_fgnd;
+
+static void background_task(void *arg)
+{
+	void *p1, *p2;
+	RT_HEAP heap;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 7);
+	ret = rt_heap_bind(&heap, "HEAP", TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_mark(&trobj, 8);
+
+	ret = rt_heap_alloc(&heap, 8192, TM_NONBLOCK, &p1);
+	traceobj_mark(&trobj, 9);
+	traceobj_check(&trobj, ret, -EWOULDBLOCK);
+	ret = rt_heap_alloc(&heap, 8192, TM_INFINITE, &p1);
+	traceobj_mark(&trobj, 10);
+	traceobj_check(&trobj, ret, 0);
+	ret = rt_heap_alloc(&heap, 8192, TM_NONBLOCK, &p2);
+	traceobj_mark(&trobj, 11);
+	traceobj_check(&trobj, ret, 0);
+	ret = rt_heap_alloc(&heap, 8192, TM_INFINITE, &p1);
+	traceobj_mark(&trobj, 12);
+	traceobj_check(&trobj, ret, -EIDRM);
+
+	traceobj_exit(&trobj);
+}
+
+static void foreground_task(void *arg)
+{
+	void *p1, *p2;
+	RT_HEAP heap;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+	ret = rt_heap_bind(&heap, "HEAP", TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_mark(&trobj, 2);
+
+	ret = rt_heap_alloc(&heap, 8192, TM_NONBLOCK, &p1);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_mark(&trobj, 3);
+	ret = rt_heap_alloc(&heap, 8192, TM_NONBLOCK, &p2);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_mark(&trobj, 4);
+
+	ret = rt_task_set_priority(NULL, 19);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_mark(&trobj, 5);
+	ret = rt_task_set_priority(NULL, 21);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_mark(&trobj, 6);
+
+	ret = rt_heap_free(&heap, p1);
+	traceobj_check(&trobj, ret, 0);
+	ret = rt_heap_free(&heap, p2);
+	traceobj_check(&trobj, ret, 0);
+
+	rt_task_sleep(1000000ULL);
+
+	ret = rt_heap_delete(&heap);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	RT_HEAP heap;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_task_create(&t_fgnd, "FGND", 0,  21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_bgnd, "BGND", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_bgnd, background_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_fgnd, foreground_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_heap_create(&heap, "HEAP", 16384, H_PRIO);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-2.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-2.c
new file mode 100644
index 0000000..ad67afb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/heap-2.c
@@ -0,0 +1,98 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/heap.h>
+#include <alchemy/queue.h>
+
+#define HEAPSIZE  16384
+#define MSGSIZE   16
+#define NMESSAGES (HEAPSIZE / MSGSIZE)
+#define POOLSIZE  (NMESSAGES * sizeof(void *))
+
+static struct traceobj trobj;
+
+static RT_TASK t_pull, t_push;
+
+static RT_HEAP heap1, heap2;
+
+static RT_QUEUE queue1, queue2;
+
+static void pull_task(void *arg)
+{
+	int ret, n = 0;
+	void *p;
+
+	traceobj_enter(&trobj);
+
+	while (n++ < 1000) {
+		ret = rt_heap_alloc(&heap1, MSGSIZE, TM_INFINITE, &p);
+		traceobj_check(&trobj, ret, 0);
+		ret = rt_queue_write(&queue1, &p, sizeof(p), Q_NORMAL);
+		traceobj_assert(&trobj, ret >= 0);
+
+		ret = rt_queue_read(&queue2, &p, sizeof(p), TM_INFINITE);
+		traceobj_assert(&trobj, ret == sizeof(p));
+		ret = rt_heap_free(&heap2, p);
+		traceobj_check(&trobj, ret, 0);
+	}
+
+	traceobj_exit(&trobj);
+}
+
+static void push_task(void *arg)
+{
+	int ret, n = 0;
+	void *p;
+
+	traceobj_enter(&trobj);
+
+	while (n++ < 1000) {
+		ret = rt_queue_read(&queue1, &p, sizeof(p), TM_INFINITE);
+		traceobj_assert(&trobj, ret == sizeof(p));
+		ret = rt_heap_free(&heap1, p);
+		traceobj_check(&trobj, ret, 0);
+	
+		ret = rt_heap_alloc(&heap2, MSGSIZE, TM_INFINITE, &p);
+		traceobj_check(&trobj, ret, 0);
+		ret = rt_queue_write(&queue2, &p, sizeof(p), Q_NORMAL);
+		traceobj_assert(&trobj, ret >= 0);
+	}
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_heap_create(&heap1, "HEAP1", HEAPSIZE, H_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_heap_create(&heap2, "HEAP2", HEAPSIZE, H_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_queue_create(&queue1, "QUEUE1", POOLSIZE, NMESSAGES, Q_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_queue_create(&queue2, "QUEUE2", POOLSIZE, NMESSAGES, Q_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_pull, "PULL", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_push, "PUSH", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_pull, pull_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_push, push_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-1.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-1.c
new file mode 100644
index 0000000..ac9801e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-1.c
@@ -0,0 +1,103 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/queue.h>
+
+static struct traceobj trobj;
+
+#define NMESSAGES (sizeof(messages) / sizeof(messages[0]))
+
+static int messages[] = {
+	0xfafafafa,
+	0xbebebebe,
+	0xcdcdcdcd,
+	0xabcdefff,
+	0x12121212,
+	0x34343434,
+	0x56565656,
+	0x78787878,
+	0xdededede,
+	0xbcbcbcbc
+};
+
+static void main_task(void *arg)
+{
+	RT_QUEUE_INFO info;
+	int ret, msg = 0;
+	RT_QUEUE q;
+
+	traceobj_enter(&trobj);
+
+	ret = rt_queue_create(&q, "QUEUE", sizeof(messages), Q_UNLIMITED, 0xffffffff);
+	traceobj_check(&trobj, ret, -EINVAL);
+
+	ret = rt_queue_create(&q, "QUEUE", 0, NMESSAGES, Q_FIFO);
+	traceobj_check(&trobj, ret, -EINVAL);
+
+	ret = rt_queue_create(&q, "QUEUE", sizeof(messages), Q_UNLIMITED, Q_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_queue_delete(&q);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_queue_create(&q, "QUEUE", sizeof(messages), NMESSAGES, Q_PRIO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_queue_inquire(&q, &info);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_assert(&trobj, info.nmessages == 0);
+
+	ret = rt_queue_write(&q, &messages[0], sizeof(int), Q_NORMAL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_queue_inquire(&q, &info);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_assert(&trobj, info.nmessages == 1);
+
+	ret = rt_queue_write(&q, &messages[1], sizeof(int), Q_NORMAL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_queue_inquire(&q, &info);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_assert(&trobj, info.nmessages == 2);
+
+	ret = rt_queue_read(&q, &msg, sizeof(msg), TM_NONBLOCK);
+	traceobj_assert(&trobj, ret == sizeof(msg));
+	traceobj_assert(&trobj, msg == 0xfafafafa);
+
+	ret = rt_queue_inquire(&q, &info);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_assert(&trobj, info.nmessages == 1);
+
+	ret = rt_queue_read(&q, &msg, sizeof(msg), TM_NONBLOCK);
+	traceobj_assert(&trobj, ret == sizeof(msg));
+	traceobj_assert(&trobj, msg == 0xbebebebe);
+
+	ret = rt_queue_inquire(&q, &info);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_assert(&trobj, info.nmessages == 0);
+
+	ret = rt_queue_read(&q, &msg, sizeof(msg), 1000000ULL);
+	traceobj_check(&trobj, ret, -ETIMEDOUT);
+
+	ret = rt_queue_delete(&q);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	RT_TASK t_main;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_task_spawn(&t_main, "main_task", 0,  50, 0, main_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-2.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-2.c
new file mode 100644
index 0000000..9fea471
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-2.c
@@ -0,0 +1,108 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/queue.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	3, 4, 5, 6,
+	1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2,
+	7,
+};
+
+#define NMESSAGES (sizeof(messages) / sizeof(messages[0]))
+
+static int messages[] = {
+	0xfafafafa,
+	0xbebebebe,
+	0xcdcdcdcd,
+	0xabcdefff,
+	0x12121212,
+	0x34343434,
+	0x56565656,
+	0x78787878,
+	0xdededede,
+	0xbcbcbcbc,
+};
+
+RT_QUEUE q;
+
+static void peer_task(void *arg)
+{
+	int ret, msg, n;
+
+	traceobj_enter(&trobj);
+
+	n = 1;
+	do {
+		traceobj_mark(&trobj, 1);
+		ret = rt_queue_read(&q, &msg, sizeof(msg), TM_NONBLOCK);
+		traceobj_assert(&trobj, ret == sizeof(msg));
+		traceobj_assert(&trobj, msg == messages[NMESSAGES - n - 1]);
+		traceobj_mark(&trobj, 2);
+	} while (n++ < NMESSAGES - 1);
+
+	traceobj_exit(&trobj);
+}
+
+static void main_task(void *arg)
+{
+	RT_QUEUE_INFO info;
+	RT_TASK t_peer;
+	int ret, n;
+
+	traceobj_enter(&trobj);
+
+	ret = rt_queue_create(&q, "QUEUE", sizeof(messages), NMESSAGES - 1, Q_PRIO);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = rt_task_set_priority(NULL, 11);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = rt_task_spawn(&t_peer, "peer_task", 0,  10, 0, peer_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 5);
+
+	n = 0;
+	do {
+		ret = rt_queue_write(&q, &messages[n], sizeof(int), Q_URGENT); /* LIFO */
+	} while (++n < NMESSAGES && ret >= 0);
+
+	traceobj_assert(&trobj, ret == -ENOMEM && n == NMESSAGES);
+
+	traceobj_mark(&trobj, 6);
+
+	rt_task_sleep(10000000ULL);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = rt_queue_inquire(&q, &info);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_assert(&trobj, info.nmessages == 0);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	RT_TASK t_main;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_task_spawn(&t_main, "main_task", 0,  50, 0, main_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-3.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-3.c
new file mode 100644
index 0000000..7fcae02
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mq-3.c
@@ -0,0 +1,118 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/queue.h>
+
+#define NMESSAGES  10
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	11, 1, 2, 3, 12, 8, 14,
+	13, 4, 5, 6, 7, 9, 10
+};
+
+static RT_QUEUE q;
+
+static void main_task(void *arg)
+{
+	int ret, msg, n;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = rt_queue_create(&q, "QUEUE", NMESSAGES * sizeof(int), NMESSAGES, Q_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 2);
+
+	for (msg = 0; msg < NMESSAGES; msg++) {
+		ret = rt_queue_write(&q, &msg, sizeof(int), Q_NORMAL);
+		traceobj_check(&trobj, ret, 0);
+	}
+
+	traceobj_mark(&trobj, 3);
+
+	ret = rt_queue_write(&q, &msg, sizeof(int), Q_URGENT);
+	traceobj_check(&trobj, ret, -ENOMEM);
+
+	rt_task_sleep(100000000ULL);
+
+	ret = rt_queue_write(&q, &msg, sizeof(int), Q_URGENT);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = rt_queue_read(&q, &msg, sizeof(msg), TM_INFINITE);
+	traceobj_assert(&trobj, ret == sizeof(int) && msg == 10);
+
+	traceobj_mark(&trobj, 5);
+
+	for (n = 1; n < NMESSAGES; n++) { /* peer task read #0 already. */
+		ret = rt_queue_read(&q, &msg, sizeof(msg), TM_INFINITE);
+		traceobj_assert(&trobj, ret == sizeof(int) && msg == n);
+	}
+
+	traceobj_mark(&trobj, 6);
+
+	ret = rt_queue_delete(&q);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 7);
+
+	traceobj_exit(&trobj);
+}
+
+static void peer_task(void *arg)
+{
+	int ret, msg;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = rt_queue_read(&q, &msg, sizeof(msg), TM_INFINITE);
+	traceobj_assert(&trobj, ret == sizeof(int) && msg == 0);
+
+	traceobj_mark(&trobj, 14);
+
+	rt_task_sleep(100000000ULL);
+
+	traceobj_mark(&trobj, 9);
+
+	/* Valgrind will bark at this one, this is expected. */
+	ret = rt_queue_read(&q, &msg, sizeof(msg), TM_INFINITE);
+	traceobj_check(&trobj, ret, -EINVAL);
+
+	traceobj_mark(&trobj, 10);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	RT_TASK t_main, t_peer;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	traceobj_mark(&trobj, 11);
+
+	ret = rt_task_spawn(&t_main, "main_task", 0,  50, 0, main_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 12);
+
+	ret = rt_task_spawn(&t_peer, "peer_task", 0,  49, 0, peer_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 13);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mutex-1.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mutex-1.c
new file mode 100644
index 0000000..ef58a2c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/mutex-1.c
@@ -0,0 +1,148 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/mutex.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+	18, 1, 2, 3, 19, 4, 5, 16, 6, 17
+};
+
+static RT_TASK t_a, t_b;
+
+static RT_MUTEX mutex;
+
+static void task_a(void *arg)
+{
+	RT_TASK t;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = rt_task_bind(&t, "taskB", TM_INFINITE);
+	traceobj_assert(&trobj, ret == 0 && rt_task_same(&t, &t_b));
+
+	traceobj_mark(&trobj, 2);
+
+	ret = rt_mutex_acquire(&mutex, TM_NONBLOCK);
+	traceobj_check(&trobj, ret, -EWOULDBLOCK);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = rt_mutex_acquire(&mutex, 100000000ULL);
+	traceobj_check(&trobj, ret, -ETIMEDOUT);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = rt_task_resume(&t);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 5);
+
+	ret = rt_mutex_acquire(&mutex, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 6);
+
+	traceobj_exit(&trobj);
+}
+
+static void task_b(void *arg)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = rt_task_set_priority(NULL, 19);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = rt_mutex_create(&mutex, "MUTEX");
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = rt_mutex_create(&mutex, "MUTEX");
+	traceobj_check(&trobj, ret, -EEXIST);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = rt_mutex_acquire(&mutex, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = rt_mutex_acquire(&mutex, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 11);
+
+	ret = rt_mutex_release(&mutex);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 12);
+
+	ret = rt_mutex_release(&mutex);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 13);
+
+	ret = rt_mutex_release(&mutex);
+	traceobj_check(&trobj, ret, -EPERM);
+
+	traceobj_mark(&trobj, 14);
+
+	ret = rt_mutex_acquire(&mutex, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 15);
+
+	ret = rt_task_suspend(rt_task_self());
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 16);
+
+	ret = rt_mutex_release(&mutex);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 17);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_task_create(&t_b, "taskB", 0, 21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_b, task_b, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 18);
+
+	ret = rt_task_create(&t_a, "taskA", 0, 20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_a, task_a, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 19);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/pipe-1.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/pipe-1.c
new file mode 100644
index 0000000..4202be2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/pipe-1.c
@@ -0,0 +1,139 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/pipe.h>
+
+static struct traceobj trobj;
+
+static RT_TASK t_real;
+
+static RT_PIPE mpipe;
+
+static pthread_t t_reg;
+
+static int minor;
+
+struct pipe_message {
+	int value;
+};
+
+static void realtime_task(void *arg)
+{
+	struct pipe_message m;
+	int ret, seq = 0;
+
+	traceobj_enter(&trobj);
+
+	ret = rt_pipe_bind(&mpipe, "pipe", TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	while (seq < 8192) {
+		ret = rt_pipe_read(&mpipe, &m, sizeof(m), TM_INFINITE);
+		traceobj_assert(&trobj, ret == sizeof(m));
+		traceobj_assert(&trobj, m.value == seq);
+		ret = rt_pipe_write(&mpipe, &m, sizeof(m),
+				    (seq & 1) ? P_URGENT : P_NORMAL);
+		traceobj_assert(&trobj, ret == sizeof(m));
+		seq++;
+	}
+
+	pthread_cancel(t_reg);
+
+	traceobj_exit(&trobj);
+}
+
+static void *regular_thread(void *arg)
+{
+	struct pipe_message m;
+	int fd, seq = 0;
+	ssize_t ret;
+	char *rtp;
+
+	asprintf(&rtp, "/dev/rtp%d", minor);
+
+	fd = open(rtp, O_RDWR);
+	free(rtp);
+	traceobj_assert(&trobj, fd >= 0);
+
+	for (;;) {
+		m.value = seq;
+		ret = write(fd, &m, sizeof(m));
+		traceobj_assert(&trobj, ret == sizeof(m));
+		ret = read(fd, &m, sizeof(m));
+		traceobj_assert(&trobj, ret == sizeof(m));
+		traceobj_assert(&trobj, m.value == seq);
+		seq++;
+	}
+
+	return NULL;
+}
+
+int main(int argc, char *const argv[])
+{
+	struct timespec ts_start, ts_end, ts_timeout, ts_delta;
+	struct pipe_message m;
+	RTIME start, end;
+	SRTIME timeout;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_pipe_create(&mpipe, "pipe", P_MINOR_AUTO, 0);
+	traceobj_assert(&trobj, ret >= 0);
+
+	ret = rt_pipe_delete(&mpipe);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_real, "realtime", 0,  10, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_real, realtime_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_pipe_create(&mpipe, "pipe", P_MINOR_AUTO, 16384);
+	traceobj_assert(&trobj, ret >= 0);
+	minor = ret;
+
+	ret = rt_pipe_read(&mpipe, &m, sizeof(m), TM_NONBLOCK);
+	traceobj_check(&trobj, ret, -EWOULDBLOCK);
+
+	ret = rt_pipe_read_until(&mpipe, &m, sizeof(m), TM_NONBLOCK);
+	traceobj_check(&trobj, ret, -EWOULDBLOCK);
+
+	ts_timeout.tv_sec = 0;
+	ts_timeout.tv_nsec = 0;
+	ret = rt_pipe_read_timed(&mpipe, &m, sizeof(m), &ts_timeout);
+	traceobj_check(&trobj, ret, -EWOULDBLOCK);
+
+	start = rt_timer_read();
+	timeout = rt_timer_ns2ticks(100000000);
+	ret = rt_pipe_read(&mpipe, &m, sizeof(m), timeout);
+	end = rt_timer_read();
+	traceobj_assert(&trobj, end - start >= timeout);
+	traceobj_assert(&trobj, end - start < timeout + rt_timer_ns2ticks(5000000));
+
+	start = rt_timer_read();
+	timeout = start + rt_timer_ns2ticks(100000000);
+	ret = rt_pipe_read_until(&mpipe, &m, sizeof(m), timeout);
+	end = rt_timer_read();
+	traceobj_assert(&trobj, end >= timeout);
+	traceobj_assert(&trobj, end < timeout + rt_timer_ns2ticks(5000000));
+
+	clock_gettime(CLOCK_COPPERPLATE, &ts_start);
+	timespec_adds(&ts_timeout, &ts_start, 100000000);
+	ret = rt_pipe_read_timed(&mpipe, &m, sizeof(m), &ts_timeout);
+	clock_gettime(CLOCK_COPPERPLATE, &ts_end);
+	timespec_sub(&ts_delta, &ts_end, &ts_timeout);
+	traceobj_assert(&trobj, ts_delta.tv_sec >= 0);
+	traceobj_assert(&trobj, ts_delta.tv_nsec < 5000000);
+
+	ret = pthread_create(&t_reg, NULL, regular_thread, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-1.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-1.c
new file mode 100644
index 0000000..3e526ed
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-1.c
@@ -0,0 +1,151 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/sem.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	10, 13,
+	1, 14, 15, 2, 3, 4,
+	5, 6, 7, 8, 16, 17, 18,
+	9, 19
+};
+
+static RT_TASK t_a, t_b;
+
+static RT_SEM sem;
+
+static void task_a(void *arg)
+{
+	int ret, oldmode;
+	RT_TASK t;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = rt_task_set_mode(T_LOCK, T_LOCK, &oldmode);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = rt_task_bind(&t, "taskB", TM_INFINITE);
+	traceobj_assert(&trobj, ret == 0 && rt_task_same(&t, &t_b));
+
+	traceobj_mark(&trobj, 5);
+
+	ret = rt_task_resume(&t);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = rt_task_set_mode(T_LOCK, 0, &oldmode);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = rt_task_suspend(NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_exit(&trobj);
+}
+
+static void task_b(void *arg)
+{
+	RT_TASK t;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = rt_sem_create(&sem, "SEMA", 0, S_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 13);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_bind(&t, "taskA", TM_INFINITE);
+	traceobj_assert(&trobj, ret == 0 && rt_task_same(&t, &t_a));
+
+	traceobj_mark(&trobj, 14);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 15);
+
+	ret = rt_task_suspend(NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 16);
+
+	ret = rt_sem_p(&sem, 10000000ULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 17);
+
+	ret = rt_sem_p(&sem, TM_NONBLOCK);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 18);
+
+	ret = rt_sem_p(&sem, 100000000ULL);
+	traceobj_check(&trobj, ret, -ETIMEDOUT);
+
+	traceobj_mark(&trobj, 19);
+
+	ret = rt_task_resume(&t);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_task_create(&t_a, "taskA", 0, 20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_b, "taskB", 0, 21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_b, task_b, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_a, task_a, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-2.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-2.c
new file mode 100644
index 0000000..c01f6a4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/sem-2.c
@@ -0,0 +1,66 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/sem.h>
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 2, 3, 5, 4, 6
+};
+
+static RT_TASK t_main;
+
+static RT_SEM sem;
+
+static void main_task(void *arg)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = rt_sem_create(&sem, "SEMA", 1, S_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = rt_sem_p(&sem, TM_NONBLOCK);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, -EIDRM);
+
+	traceobj_mark(&trobj, 4);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_task_create(&t_main, "main_task", 0, 20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_main, main_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 5);
+
+	ret = rt_sem_delete(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 6);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-1.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-1.c
new file mode 100644
index 0000000..34a6c22
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-1.c
@@ -0,0 +1,32 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+
+static struct traceobj trobj;
+
+static RT_TASK t_main;
+
+static void main_task(void *arg)
+{
+	traceobj_enter(&trobj);
+	traceobj_assert(&trobj, arg == (void *)(long)0xdeadbeef);
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_task_create(&t_main, "main_task", 0, 99, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_main, main_task, (void *)(long)0xdeadbeef);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-10.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-10.c
new file mode 100644
index 0000000..58f4cb0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-10.c
@@ -0,0 +1,72 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/sem.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 6, 2, 3, 4, 7, 5
+};
+
+static RT_TASK t_test;
+
+static RT_SEM sem;
+
+static void test_task(void *arg)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 7);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_sem_create(&sem, "SEMA", 0, S_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_test, "test_task", 0, 10, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = rt_task_start(&t_test, test_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = rt_task_suspend(&t_test);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = rt_task_resume(&t_test);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 5);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-2.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-2.c
new file mode 100644
index 0000000..e751ddd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-2.c
@@ -0,0 +1,102 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/sem.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	8, 1, 9, 4, 10, 5, 11, 2, 6, 7
+};
+
+static RT_TASK t_bgnd, t_fgnd;
+
+static RT_SEM sem;
+
+static void background_task(void *arg)
+{
+	unsigned int safety = 100000000, count = 0;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 2);
+
+	while (--safety > 0)
+		count++;
+
+	traceobj_exit(&trobj);
+}
+
+static void foreground_task(void *arg)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 5);
+
+	rt_task_sleep(20000000ULL);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = rt_task_delete(&t_bgnd);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 7);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_sem_create(&sem, "SEMA", 0, S_PRIO);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = rt_task_create(&t_bgnd, "BGND", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_bgnd, background_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = rt_task_create(&t_fgnd, "FGND", 0,  21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_fgnd, foreground_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 11);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-3.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-3.c
new file mode 100644
index 0000000..06f235a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-3.c
@@ -0,0 +1,48 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+
+static struct traceobj trobj;
+
+static RT_TASK t_a, t_b;
+
+int main(int argc, char *const argv[])
+{
+	RT_TASK t;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_task_create(&t_a, "taskA", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_b, "taskB", 0,  21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_bind(&t, "taskA", TM_NONBLOCK);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_assert(&trobj, rt_task_same(&t, &t_a));
+
+	ret = rt_task_bind(&t, "taskB", TM_NONBLOCK);
+	traceobj_check(&trobj, ret, 0);
+	traceobj_assert(&trobj, rt_task_same(&t, &t_b));
+
+	ret = rt_task_delete(&t_a);
+	traceobj_check(&trobj, ret, 0);
+	ret = rt_task_bind(&t, "taskA", TM_NONBLOCK);
+	traceobj_check(&trobj, ret, -EWOULDBLOCK);
+
+	ret = rt_task_delete(&t_b);
+	traceobj_check(&trobj, ret, 0);
+	ret = rt_task_bind(&t, "taskB", TM_NONBLOCK);
+	traceobj_check(&trobj, ret, -EWOULDBLOCK);
+
+	ret = rt_task_shadow(NULL, "main_task", 1, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_bind(&t, "taskB", 1000000000ULL);
+	traceobj_check(&trobj, ret, -ETIMEDOUT);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-4.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-4.c
new file mode 100644
index 0000000..eba7fbb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-4.c
@@ -0,0 +1,103 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/sem.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	8, 1, 9, 4, 10, 2, 11, 12, 3, 5, 13
+};
+
+static RT_TASK t_bgnd, t_fgnd;
+
+static RT_SEM sem;
+
+static void background_task(void *arg)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = rt_task_suspend(&t_fgnd);
+	traceobj_check(&trobj, ret, 0);
+
+	rt_task_sleep(20000000ULL);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = rt_task_resume(&t_fgnd);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 13);
+
+	traceobj_exit(&trobj);
+}
+
+static void foreground_task(void *arg)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 5);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_sem_create(&sem, "SEMA", 0, S_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = rt_task_create(&t_bgnd, "BGND", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_bgnd, background_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = rt_task_create(&t_fgnd, "FGND", 0,  21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_fgnd, foreground_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 11);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 12);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-5.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-5.c
new file mode 100644
index 0000000..3c0fa42
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-5.c
@@ -0,0 +1,107 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/sem.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	9, 1, 10, 3, 11, 4, 5, 6, 7, 2, 8, 12
+};
+
+static RT_TASK t_bgnd, t_fgnd;
+
+static RT_SEM sem;
+
+static void background_task(void *arg)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 2);
+
+	traceobj_exit(&trobj);
+}
+
+static void foreground_task(void *arg)
+{
+	RT_TASK_INFO info;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 5);
+
+	ret = rt_task_inquire(NULL, &info);
+	traceobj_assert(&trobj, ret == 0 && info.prio == 21);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = rt_task_set_priority(&t_bgnd, info.prio);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = rt_task_set_priority(&t_bgnd, info.prio + 1);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 8);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = rt_sem_create(&sem, "SEMA", 0, S_PRIO);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = rt_task_create(&t_bgnd, "BGND", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_bgnd, background_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = rt_task_create(&t_fgnd, "FGND", 0,  21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_fgnd, foreground_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 11);
+
+	ret = rt_sem_v(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_mark(&trobj, 12);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-6.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-6.c
new file mode 100644
index 0000000..7c88fde
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-6.c
@@ -0,0 +1,77 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+
+static struct traceobj trobj;
+
+static RT_TASK t_bgnd, t_fgnd;
+
+static void background_task(void *arg)
+{
+	int msg, ret, flowid, n;
+	RT_TASK_MCB mcb;
+
+	traceobj_enter(&trobj);
+
+	for (n = 0; n < 10; n++) {
+		mcb.data = &msg;
+		mcb.size = sizeof(msg);
+		flowid = rt_task_receive(&mcb, TM_INFINITE);
+		traceobj_assert(&trobj, flowid > 0);
+		traceobj_assert(&trobj, mcb.opcode == 0x77);
+		traceobj_assert(&trobj, mcb.size == sizeof(msg));
+		traceobj_assert(&trobj, msg == n);
+		msg = ~msg;
+		ret = rt_task_reply(flowid, &mcb);
+		traceobj_check(&trobj, ret, 0);
+	}
+
+	traceobj_exit(&trobj);
+}
+
+static void foreground_task(void *arg)
+{
+	RT_TASK_MCB mcb, mcb_r;
+	int ret, msg, notmsg;
+
+	traceobj_enter(&trobj);
+
+	for (msg = 0; msg < 10; msg++) {
+		rt_task_sleep(1000000ULL);
+		mcb.opcode = 0x77;
+		mcb.data = &msg;
+		mcb.size = sizeof(msg);
+		mcb_r.data = &notmsg;
+		mcb_r.size = sizeof(notmsg);
+		notmsg = msg;
+		ret = rt_task_send(&t_bgnd, &mcb, &mcb_r, TM_INFINITE);
+		traceobj_assert(&trobj, ret == sizeof(msg));
+		traceobj_assert(&trobj, notmsg == ~msg);
+	}
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_task_create(&t_bgnd, "BGND", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_bgnd, background_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_fgnd, "FGND", 0,  21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_fgnd, foreground_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-7.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-7.c
new file mode 100644
index 0000000..1350bb3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-7.c
@@ -0,0 +1,116 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+
+static struct traceobj trobj;
+
+static RT_TASK t_bgnd, t_fgnda, t_fgndb;
+
+static void background_task(void *arg)
+{
+	int msg, ret, flowid, n;
+	RT_TASK_MCB mcb;
+
+	traceobj_enter(&trobj);
+
+	ret = rt_task_reply(-1, &mcb);
+	traceobj_check(&trobj, ret, -EINVAL);
+
+	ret = rt_task_reply(999, &mcb);
+	traceobj_check(&trobj, ret, -ENXIO);
+
+	for (n = 0; n < 20; n++) {
+		mcb.data = &msg;
+		mcb.size = sizeof(msg);
+		flowid = rt_task_receive(&mcb, TM_NONBLOCK);
+		traceobj_assert(&trobj, flowid > 0);
+		traceobj_assert(&trobj, mcb.size == sizeof(msg));
+		switch (mcb.opcode) {
+		case 0x77:
+			msg = ~msg;
+			ret = rt_task_reply(flowid, &mcb);
+			traceobj_check(&trobj, ret, 0);
+			break;
+		case 0x78:
+			ret = rt_task_reply(flowid, &mcb);
+			traceobj_assert(&trobj, ret == -ENOBUFS);
+			break;
+		default:
+			traceobj_assert(&trobj, 0);
+		}
+	}
+
+	traceobj_exit(&trobj);
+}
+
+static void foreground_task_a(void *arg)
+{
+	RT_TASK_MCB mcb, mcb_r;
+	int ret, msg, notmsg;
+
+	traceobj_enter(&trobj);
+
+	for (msg = 0; msg < 10; msg++) {
+		mcb.opcode = 0x77;
+		mcb.data = &msg;
+		mcb.size = sizeof(msg);
+		mcb_r.data = &notmsg;
+		mcb_r.size = sizeof(notmsg);
+		notmsg = msg;
+		ret = rt_task_send(&t_bgnd, &mcb, &mcb_r, TM_INFINITE);
+		traceobj_assert(&trobj, ret == sizeof(msg));
+		traceobj_assert(&trobj, notmsg == ~msg);
+	}
+
+	traceobj_exit(&trobj);
+}
+
+static void foreground_task_b(void *arg)
+{
+	RT_TASK_MCB mcb, mcb_r;
+	int ret, msg;
+
+	traceobj_enter(&trobj);
+
+	for (msg = 0; msg < 10; msg++) {
+		mcb.opcode = 0x78;
+		mcb.data = &msg;
+		mcb.size = sizeof(msg);
+		mcb_r.data = NULL;
+		mcb_r.size = 0;
+		ret = rt_task_send(&t_bgnd, &mcb, &mcb_r, TM_INFINITE);
+		traceobj_check(&trobj, ret, -ENOBUFS);
+	}
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_task_create(&t_bgnd, "BGND", 0,  20, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_fgnda, "FGND-A", 0,  21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_fgnda, foreground_task_a, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_fgndb, "FGND-B", 0,  21, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_fgndb, foreground_task_b, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_bgnd, background_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-8.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-8.c
new file mode 100644
index 0000000..bb6dfff
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-8.c
@@ -0,0 +1,66 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+#include <alchemy/sem.h>
+
+static struct traceobj trobj;
+
+static RT_TASK t_rr1, t_rr2;
+
+static RT_SEM sem;
+
+#define RR_QUANTUM  500000ULL
+
+double d = 0.7;
+
+double f = 1.7;
+
+static void rr_task(void *arg)
+{
+	int ret, n;
+
+	traceobj_enter(&trobj);
+
+	ret = rt_task_slice(NULL, RR_QUANTUM);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_sem_p(&sem, TM_INFINITE);
+	traceobj_check(&trobj, ret, 0);
+
+	for (n = 0; n < 1000000; n++) {
+		d *= 0.99;
+		f = d / 16;
+	}
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_sem_create(&sem, "SEMA", 0, S_FIFO);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_rr1, "rr_task_1", 0, 10, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_rr1, rr_task, "t1");
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_create(&t_rr2, "rr_task_2", 0, 10, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_start(&t_rr2, rr_task, "t2");
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_sem_broadcast(&sem);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-9.c b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-9.c
new file mode 100644
index 0000000..e358154
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/testsuite/task-9.c
@@ -0,0 +1,70 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <copperplate/traceobj.h>
+#include <alchemy/task.h>
+
+static struct traceobj trobj;
+
+static RT_TASK t_test;
+
+#define ONE_SECOND  1000000000ULL
+
+void sighandler(int sig)
+{
+	/* nop */
+}
+
+static void test_task(void *arg)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	ret = rt_task_sleep_until(TM_INFINITE);
+	traceobj_check(&trobj, ret, -EINTR);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	struct sigaction sa;
+	RT_TASK_INFO info;
+	sigset_t set;
+	int ret;
+
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = sighandler;
+	sigaction(SIGUSR1, &sa, NULL);
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rt_task_create(&t_test, "test_task", 0, 10, 0);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_inquire(&t_test, &info);
+	traceobj_check(&trobj, ret, 0);
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGUSR1);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	ret = rt_task_start(&t_test, test_task, NULL);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_sleep(ONE_SECOND);
+	traceobj_check(&trobj, ret, 0);
+
+	ret = __STD(kill(info.pid, SIGUSR1));
+	traceobj_check(&trobj, ret, 0);
+
+	ret = rt_task_sleep(ONE_SECOND);
+	traceobj_check(&trobj, ret, 0);
+	ret = rt_task_unblock(&t_test);
+	traceobj_check(&trobj, ret, 0);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/timer.c b/kernel/xenomai-v3.2.4/lib/alchemy/timer.c
new file mode 100644
index 0000000..7c08233
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/timer.c
@@ -0,0 +1,113 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <copperplate/threadobj.h>
+#include "timer.h"
+
+/**
+ * @ingroup alchemy
+ * @defgroup alchemy_timer Timer management services
+ *
+ * Services for reading and spinning on the hardware timer
+ *
+ * @{
+ */
+
+struct clockobj alchemy_clock;
+
+/**
+ * @fn RTIME rt_timer_ns2ticks(SRTIME ns)
+ * @brief Convert nanoseconds to Alchemy clock ticks.
+ *
+ * Convert a count of nanoseconds to Alchemy clock ticks.  This
+ * routine operates on signed nanosecond values. This is the converse
+ * call to rt_timer_ticks2ns().
+ *
+ * @param ns The count of nanoseconds to convert.
+ *
+ * @return The corresponding value expressed in clock ticks of the
+ * Alchemy clock. The resolution of the Alchemy clock can be set using
+ * the --alchemy-clock-resolution option when starting the application
+ * process (defaults to 1 nanosecond).
+ *
+ * @apitags{unrestricted}
+ */
+SRTIME rt_timer_ns2ticks(SRTIME ns)
+{
+	return clockobj_ns_to_ticks(&alchemy_clock, ns);
+}
+
+/**
+ * @fn RTIME rt_timer_ticks2ns(SRTIME ns)
+ * @brief Convert Alchemy clock ticks to nanoseconds.
+ *
+ * Convert a count of Alchemy clock ticks to nanoseconds.  This
+ * routine operates on signed nanosecond values. This is the converse
+ * call to rt_timer_ns2ticks().
+ *
+ * @param ns The count of nanoseconds to convert.
+ *
+ * @return The corresponding value expressed in nanoseconds.  The
+ * resolution of the Alchemy clock can be set using the
+ * --alchemy-clock-resolution option when starting the application
+ * process (defaults to 1 nanosecond).
+ *
+ * @apitags{unrestricted}
+ */
+SRTIME rt_timer_ticks2ns(SRTIME ticks)
+{
+	return clockobj_ticks_to_ns(&alchemy_clock, ticks);
+}
+
+/**
+ * @fn void rt_timer_inquire(RT_TIMER_INFO *info)
+ * @brief Inquire about the Alchemy clock.
+ *
+ * Return status information about the Alchemy clock.
+ *
+ * @param info The address of a @ref RT_TIMER_INFO "structure" to fill
+ * with the clock information.
+ *
+ * @apitags{unrestricted}
+ */
+void rt_timer_inquire(RT_TIMER_INFO *info)
+{
+	info->period = clockobj_get_resolution(&alchemy_clock);
+	info->date = clockobj_get_time(&alchemy_clock);
+}
+
+/**
+ * @fn void rt_timer_spin(RTIME ns)
+ * @brief Busy wait burning CPU cycles.
+ *
+ * Enter a busy waiting loop for a count of nanoseconds.
+ *
+ * Since this service is always called with interrupts enabled, the
+ * caller might be preempted by other real-time activities, therefore
+ * the actual delay might be longer than specified.
+ *
+ * @param ns The time to wait expressed in nanoseconds.
+ *
+ * @apitags{unrestricted}
+ */
+void rt_timer_spin(RTIME ns)
+{
+	threadobj_spin(ns);
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/alchemy/timer.h b/kernel/xenomai-v3.2.4/lib/alchemy/timer.h
new file mode 100644
index 0000000..cb0285f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/alchemy/timer.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _ALCHEMY_TIMER_H
+#define _ALCHEMY_TIMER_H
+
+#include <copperplate/clockobj.h>
+#include <alchemy/timer.h>
+
+#endif /* _ALCHEMY_TIMER_H */
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/COPYING b/kernel/xenomai-v3.2.4/lib/analogy/COPYING
new file mode 100644
index 0000000..3b20440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/COPYING
@@ -0,0 +1,458 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/Makefile.am b/kernel/xenomai-v3.2.4/lib/analogy/Makefile.am
new file mode 100644
index 0000000..4c6f0de
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/Makefile.am
@@ -0,0 +1,23 @@
+lib_LTLIBRARIES = libanalogy.la
+
+libanalogy_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -version-info 1:0:0 -lm
+
+libanalogy_la_LIBADD = @XENO_CORE_LDADD@
+
+libanalogy_la_SOURCES =	\
+	async.c		\
+	descriptor.c	\
+	info.c		\
+	internal.h	\
+	math.c		\
+	calibration.c	\
+	calibration.h	\
+	range.c		\
+	root_leaf.h	\
+	sync.c		\
+	sys.c
+
+libanalogy_la_CPPFLAGS =		\
+	@XENO_USER_CFLAGS@		\
+	-I$(top_srcdir)/include 	\
+	-I$(top_srcdir)/lib/boilerplate	
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/async.c b/kernel/xenomai-v3.2.4/lib/analogy/async.c
new file mode 100644
index 0000000..39d2109
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/async.c
@@ -0,0 +1,471 @@
+/**
+ * @file
+ * Analogy for Linux, command, transfer, etc. related features
+ *
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <rtdm/analogy.h>
+#include "internal.h"
+
+/**
+ * @ingroup analogy_lib_level1
+ * @defgroup analogy_lib_async1 Asynchronous acquisition API
+ * @{
+ */
+
+/**
+ * @brief Send a command to an Analoy device
+ *
+ * The function a4l_snd_command() triggers asynchronous
+ * acquisition.
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] cmd Command structure
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -ENOMEM is returned if the system is out of memory
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -EIO is returned if the selected subdevice cannot handle command
+ * - -EBUSY is returned if the selected subdevice is already
+ *    processing an asynchronous operation
+ *
+ */
+int a4l_snd_command(a4l_desc_t * dsc, a4l_cmd_t * cmd)
+{
+	/* Basic checking */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	return __sys_ioctl(dsc->fd, A4L_CMD, cmd);
+}
+
+/**
+ * @brief Cancel an asynchronous acquisition
+ *
+ * The function a4l_snd_cancel() is devoted to stop an asynchronous
+ * acquisition configured thanks to an Analogy command.
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Subdevice index
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EIO is returned if the selected subdevice does not support
+ *    asynchronous operation
+ *
+ */
+int a4l_snd_cancel(a4l_desc_t * dsc, unsigned int idx_subd)
+{
+	/* Basic checking */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	return __sys_ioctl(dsc->fd, A4L_CANCEL, (void *)(long)idx_subd);
+}
+
+/**
+ * @brief Change the size of the asynchronous buffer
+ *
+ * During asynchronous acquisition, a ring-buffer enables the
+ * transfers from / to user-space. Functions like a4l_read() or
+ * a4l_write() recovers / sends data through this intermediate
+ * buffer. The function a4l_set_bufsize() can change the size of the
+ * ring-buffer. Please note, there is one ring-buffer per subdevice
+ * capable of asynchronous acquisition. By default, each buffer size
+ * is set to 64 KB.
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] size New buffer size, the maximal tolerated value is
+ * 16MB (A4L_BUF_MAXSIZE)
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if the analogy descriptor is not correct or
+      if some argument is missing or wrong (Please, type "dmesg" for
+      more info)
+ * - -EPERM is returned if the function is called in an RT context or
+ *    if the buffer to resize is mapped in user-space (Please, type
+ *    "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -EBUSY is returned if the selected subdevice is already
+ *    processing an asynchronous operation
+ * - -ENOMEM is returned if the system is out of memory
+ *
+ */
+int a4l_set_bufsize(a4l_desc_t * dsc,
+		    unsigned int idx_subd, unsigned long size)
+{
+	/* Basic checking */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	return a4l_sys_bufcfg(dsc->fd, idx_subd, size);
+}
+
+int a4l_set_wakesize(a4l_desc_t * dsc, unsigned long size)
+{
+	int err;
+	a4l_bufcfg2_t cfg = { .wake_count = size };
+
+	/* Basic checking */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	return  __sys_ioctl(dsc->fd, A4L_BUFCFG2, &cfg);
+
+	return err;
+}
+
+int a4l_get_wakesize(a4l_desc_t * dsc, unsigned long *size)
+{
+	int err;
+	a4l_bufcfg2_t cfg;
+
+	/* Basic checking */
+	if (size == NULL || dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	err = __sys_ioctl(dsc->fd, A4L_BUFINFO2, &cfg);
+
+	if (err == 0)
+		*size = cfg.wake_count;
+
+	return err;
+}
+
+/**
+ * @brief Get the size of the asynchronous buffer
+ *
+ * During asynchronous acquisition, a ring-buffer enables the
+ * transfers from / to user-space. Functions like a4l_read() or
+ * a4l_write() recovers / sends data through this intermediate
+ * buffer. Please note, there is one ring-buffer per subdevice
+ * capable of asynchronous acquisition. By default, each buffer size
+ * is set to 64 KB.
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[out] size Buffer size
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ *
+ */
+int a4l_get_bufsize(a4l_desc_t * dsc,
+		    unsigned int idx_subd, unsigned long *size)
+{
+	a4l_bufinfo_t info = { idx_subd, 0, 0 };
+	int ret;
+
+	/* Basic checkings */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	if (size == NULL)
+		return -EINVAL;
+
+	ret = __sys_ioctl(dsc->fd, A4L_BUFINFO, &info);
+
+	if (ret == 0)
+		*size = info.buf_size;
+
+	return ret;
+}
+
+/**
+ * @brief Update the asynchronous buffer state
+ *
+ * When the mapping of the asynchronous ring-buffer (thanks to
+ * a4l_mmap() is disabled, common read / write syscalls have to be
+ * used.
+ * In input case, a4l_read() must be used for:
+ * - the retrieval of the acquired data.
+ * - the notification to the Analogy layer that the acquired data have
+ *   been consumed, then the area in the ring-buffer which was
+ *   containing becomes available.
+ * In output case, a4l_write() must be called to:
+ * - send some data to the Analogy layer.
+ * - signal the Analogy layer that a chunk of data in the ring-buffer
+ *   must be used by the driver.
+
+ * In mmap configuration, these features are provided by unique
+ * function named a4l_mark_bufrw().
+ * In input case, a4l_mark_bufrw() can :
+ * - recover the count of data newly available in the ring-buffer.
+ * - notify the Analogy layer how many bytes have been consumed.
+ * In output case, a4l_mark_bufrw() can:
+ * - recover the count of data available for writing.
+ * - notify Analogy that some bytes have been written.
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] cur Amount of consumed data
+ * @param[out] new Amount of available data
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong; the
+ *    descriptor and the new pointer should be checked; check also the
+ *    kernel log ("dmesg")
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ *
+ */
+int a4l_mark_bufrw(a4l_desc_t * dsc,
+		   unsigned int idx_subd,
+		   unsigned long cur, unsigned long *new)
+{
+	int ret;
+	a4l_bufinfo_t info = { idx_subd, 0, cur };
+
+	/* Basic checkings */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	if (new == NULL)
+		return -EINVAL;
+
+	ret = __sys_ioctl(dsc->fd, A4L_BUFINFO, &info);
+
+	if (ret == 0)
+		*new = info.rw_count;
+
+	return ret;
+}
+
+/**
+ * @brief Get the available data count
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] ms_timeout The number of miliseconds to wait for some
+ * data to be available. Passing A4L_INFINITE causes the caller to
+ * block indefinitely until some data is available. Passing
+ * A4L_NONBLOCK causes the function to return immediately without
+ * waiting for any available data
+ *
+ * @return the available data count. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -EINTR is returned if calling task has been unblocked by a signal
+ *
+ */
+int a4l_poll(a4l_desc_t * dsc,
+	     unsigned int idx_subd, unsigned long ms_timeout)
+{
+	int ret;
+	a4l_poll_t poll = { idx_subd, ms_timeout };
+
+	/* Basic checkings */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	ret = __sys_ioctl(dsc->fd, A4L_POLL, &poll);
+
+	/* There is an ugly cast, but it is the only way to stick with
+	   the original Comedi API */
+	if (ret == 0)
+		ret = (int)poll.arg;
+
+	return ret;
+}
+
+/**
+ * @brief Map the asynchronous ring-buffer into a user-space
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] size Size of the buffer to map
+ * @param[out] ptr Address of the pointer containing the assigned
+ * address on return
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong, the
+ *    descriptor and the pointer should be checked; check also the
+ *    kernel log
+ * - -EPERM is returned if the function is called in an RT context or
+ *    if the buffer to resize is mapped in user-space (Please, type
+ *    "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -EBUSY is returned if the buffer is already mapped in user-space
+ *
+ */
+int a4l_mmap(a4l_desc_t * dsc,
+	     unsigned int idx_subd, unsigned long size, void **ptr)
+{
+	int ret;
+	a4l_mmap_t map = { idx_subd, size, NULL };
+
+	/* Basic checkings */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	if (ptr == NULL)
+		return -EINVAL;
+
+	ret = __sys_ioctl(dsc->fd, A4L_MMAP, &map);
+
+	if (ret == 0)
+		*ptr = map.ptr;
+
+	return ret;
+}
+
+/** @} Command syscall API */
+
+/**
+ * @ingroup analogy_lib_level2
+ * @defgroup analogy_lib_async2 Asynchronous acquisition API
+ * @{
+ */
+
+/**
+ * @brief Perform asynchronous read operation on the analog input
+ * subdevice
+ *
+ * The function a4l_async_read() is only useful for acquisition
+ * configured through an Analogy command.
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[out] buf Input buffer
+ * @param[in] nbyte Number of bytes to read
+ * @param[in] ms_timeout The number of miliseconds to wait for some
+ * data to be available. Passing A4L_INFINITE causes the caller to
+ * block indefinitely until some data is available. Passing
+ * A4L_NONBLOCK causes the function to return immediately without
+ * waiting for any available data
+ *
+ * @return Number of bytes read, otherwise negative error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong, the
+ *    descriptor should be checked; check also the kernel log
+ * - -ENOENT is returned if the device's reading subdevice is idle (no
+ *    command was sent)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -EINTR is returned if calling task has been unblocked by a signal
+ *
+ */
+int a4l_async_read(a4l_desc_t * dsc,
+		   void *buf, size_t nbyte, unsigned long ms_timeout)
+{
+	/* Basic checking */
+	if (dsc == NULL)
+		return -EINVAL;
+
+	/* The function a4l_poll() is useful only if
+	   the timeout is not A4L_INFINITE (== 0) */
+	if (ms_timeout != A4L_INFINITE) {
+		int ret;
+
+		ret = a4l_poll(dsc, dsc->idx_read_subd, ms_timeout);
+		if (ret < 0)
+			return ret;
+
+		/* If the timeout value is equal to A4L_NONBLOCK,
+		   there is no need to call the launch any read operation */
+		if (ret == 0 && ms_timeout == A4L_NONBLOCK)
+			return ret;
+	}
+
+	/* One more basic checking */
+	if (dsc->fd < 0)
+		return -EINVAL;
+
+	/* Performs the read operation */
+	return a4l_sys_read(dsc->fd, buf, nbyte);
+}
+
+/**
+ * @brief Perform asynchronous write operation on the analog input
+ * subdevice
+ *
+ * The function a4l_async_write() is only useful for acquisition
+ * configured through an Analogy command.
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] buf Ouput buffer
+ * @param[in] nbyte Number of bytes to write
+ * @param[in] ms_timeout The number of miliseconds to wait for some
+ * free area to be available. Passing A4L_INFINITE causes the
+ * caller to block indefinitely until some data is available. Passing
+ * A4L_NONBLOCK causes the function to return immediately without
+ * waiting any available space to write data.
+ *
+ * @return Number of bytes written, otherwise negative error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong, the
+ *    descriptor should be checked; check also the kernel log
+ * - -ENOENT is returned if the device's reading subdevice is idle (no
+ *    command was sent)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -EINTR is returned if calling task has been unblocked by a signal
+ *
+ */
+int a4l_async_write(a4l_desc_t * dsc,
+		    void *buf, size_t nbyte, unsigned long ms_timeout)
+{
+	/* Basic checking */
+	if (dsc == NULL)
+		return -EINVAL;
+
+	/* The function a4l_poll() is useful only if
+	   the timeout is not A4L_INFINITE (== 0) */
+	if (ms_timeout != A4L_INFINITE) {
+		int ret;
+
+		ret = a4l_poll(dsc, dsc->idx_write_subd, ms_timeout);
+		if (ret < 0)
+			return ret;
+
+		/* If the timeout value is equal to A4L_NONBLOCK,
+		   there is no need to call the launch any read operation */
+		if (ret == 0 && ms_timeout == A4L_NONBLOCK)
+			return ret;
+	}
+
+	/* One more basic checking */
+	if (dsc->fd < 0)
+		return -EINVAL;
+
+	/* Performs the write operation */
+	return a4l_sys_write(dsc->fd, buf, nbyte);
+}
+
+/** @} Command syscall API */
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/calibration.c b/kernel/xenomai-v3.2.4/lib/analogy/calibration.c
new file mode 100644
index 0000000..b50cb2f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/calibration.c
@@ -0,0 +1,473 @@
+/**
+ * @file
+ * Analogy for Linux, device, subdevice, etc. related features
+ *
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2014 Jorge A. Ramirez-Ortiz <jro@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <math.h>
+#include <rtdm/analogy.h>
+#include <stdio.h>
+#include <errno.h>
+#include "iniparser/iniparser.h"
+#include "boilerplate/list.h"
+#include "calibration.h"
+
+#define CHK(func, ...)								\
+do {										\
+	int rc = func(__VA_ARGS__);						\
+	if (rc < 0) 								\
+		return -1;							\
+} while (0)
+
+#define ARRAY_LEN(a)  (sizeof(a) / sizeof((a)[0]))
+
+static lsampl_t data32_get(void *src)
+{
+	return (lsampl_t) * ((lsampl_t *) (src));
+}
+
+static lsampl_t data16_get(void *src)
+{
+	return (lsampl_t) * ((sampl_t *) (src));
+}
+
+static lsampl_t data8_get(void *src)
+{
+	return (lsampl_t) * ((unsigned char *)(src));
+}
+
+static void data32_set(void *dst, lsampl_t val)
+{
+	*((lsampl_t *) (dst)) = val;
+}
+
+static void data16_set(void *dst, lsampl_t val)
+{
+	*((sampl_t *) (dst)) = (sampl_t) (0xffff & val);
+}
+
+static void data8_set(void *dst, lsampl_t val)
+{
+	*((unsigned char *)(dst)) = (unsigned char)(0xff & val);
+}
+
+static inline int read_dbl(double *d, struct _dictionary_ *f,const char *subd,
+			   int subd_idx, char *type, int type_idx)
+{
+	char *str;
+	int ret;
+
+	/* if only contains doubles as coefficients */
+	if (strncmp(type, COEFF_STR, strlen(COEFF_STR) != 0))
+		return -ENOENT;
+
+	ret = asprintf(&str, COEFF_FMT, subd, subd_idx, type, type_idx);
+	if (ret < 0)
+		return ret;
+
+	*d = iniparser_getdouble(f, str, -255.0);
+	if (*d == -255.0)
+		ret = -ENOENT;
+	free(str);
+
+	return ret;
+}
+
+static inline int read_int(int *val, struct _dictionary_ *f, const char *subd,
+			   int subd_idx, char *type)
+{
+	char *str;
+	int ret;
+
+	ret = (subd_idx >= 0) ?
+	      asprintf(&str, ELEMENT_FIELD_FMT, subd, subd_idx, type):
+	      asprintf(&str, ELEMENT_FMT, subd, type);
+	if (ret < 0)
+		return ret;
+
+	*val = iniparser_getint(f, str, 0xFFFF);
+	if (*val == 0xFFFF)
+		ret = -ENOENT;
+	free(str);
+
+	return ret;
+}
+
+static inline int read_str(char **val, struct _dictionary_ *f, const char *subd,
+			   const char *type)
+{
+	char *str;
+	int ret;
+
+	ret = asprintf(&str, ELEMENT_FMT, subd, type);
+	if (ret < 0)
+		return ret;
+
+	*val = (char *) iniparser_getstring(f, str, NULL);
+	if (*val == NULL)
+		ret = -ENOENT;
+	free(str);
+
+	return ret;
+}
+
+static inline void write_calibration(FILE *file, char *fmt, ...)
+{
+	va_list ap;
+
+	if (!file)
+		return;
+
+	va_start(ap, fmt);
+	vfprintf(file, fmt, ap);
+	fflush(file);
+	va_end(ap);
+}
+
+void
+write_calibration_file(FILE *dst, struct listobj *l,
+		       struct a4l_calibration_subdev *subd, a4l_desc_t *desc)
+{
+	struct subdevice_calibration_node *e, *t;
+	int i, j = 0;
+
+	if (list_empty(l))
+		return;
+
+	/* TODO: modify the meaning of board/driver in the proc */
+	if (desc) {
+		write_calibration(dst, "[%s] \n",PLATFORM_STR);
+		write_calibration(dst, DRIVER_STR" = %s;\n", desc->board_name);
+		write_calibration(dst, BOARD_STR" = %s;\n", desc->driver_name);
+	}
+
+	write_calibration(dst, "\n[%s] \n", subd->name);
+	write_calibration(dst, INDEX_STR" = %d;\n", subd->idx);
+	list_for_each_entry_safe(e, t, l, node) {
+		j++;
+	}
+	write_calibration(dst, ELEMENTS_STR" = %d;\n", j);
+
+	j = 0;
+	list_for_each_entry_safe(e, t, l, node) {
+		write_calibration(dst, "[%s_%d] \n", subd->name, j);
+		write_calibration(dst, CHANNEL_STR" = %d;\n", e->channel);
+		write_calibration(dst, RANGE_STR" = %d;\n", e->range);
+		write_calibration(dst, EXPANSION_STR" = %g;\n",
+				  e->polynomial->expansion_origin);
+		write_calibration(dst, NBCOEFF_STR"= %d;\n",
+				  e->polynomial->nb_coefficients);
+
+		for (i = 0; i < e->polynomial->nb_coefficients; i++)
+			write_calibration(dst, COEFF_STR"_%d = %g;\n",
+					  i,
+					  e->polynomial->coefficients[i]);
+		j++;
+	}
+
+	return;
+}
+
+/*!
+ * @ingroup analogy_lib_level2
+ * @defgroup analogy_lib_calibration Software calibration API
+ * @{
+ */
+
+/**
+ * @brief Read the analogy generated calibration file
+ *
+ * @param[in] name Name of the calibration file
+ * @param[out] data Pointer to the calibration file contents
+ *
+ */
+
+int a4l_read_calibration_file(char *name, struct a4l_calibration_data *data)
+{
+	const char *subdevice[2] = { AI_SUBD_STR, AO_SUBD_STR };
+	int i, j, k, index = -1, nb_elements = -1;
+	struct a4l_calibration_subdev_data *p = NULL;
+	struct _dictionary_ *d;
+	struct stat st;
+
+	if (access(name, R_OK))
+		return -1;
+
+	if (stat(name, &st) || !st.st_size)
+		return -1;
+
+	d = iniparser_load(name);
+	if (d == NULL)
+		return -1;
+
+	CHK(read_str, &data->driver_name, d, PLATFORM_STR, DRIVER_STR);
+	CHK(read_str, &data->board_name, d, PLATFORM_STR, BOARD_STR);
+
+	for (k = 0; k < ARRAY_LEN(subdevice); k++) {
+		read_int(&nb_elements, d, subdevice[k], -1, ELEMENTS_STR);
+		if (nb_elements < 0 ) {
+			/* AO is optional */
+			if (!strncmp(subdevice[k], AO_SUBD_STR, sizeof(AO_SUBD_STR)))
+			     break;
+			return -1;
+		}
+
+		CHK(read_int, &index, d, subdevice[k], -1, INDEX_STR);
+
+		if (strncmp(subdevice[k], AI_SUBD_STR,
+			    strlen(AI_SUBD_STR)) == 0) {
+			data->ai = malloc(nb_elements *
+					  sizeof(struct a4l_calibration_subdev_data));
+			data->nb_ai = nb_elements;
+			p  = data->ai;
+		}
+
+		if (strncmp(subdevice[k], AO_SUBD_STR,
+			    strlen(AO_SUBD_STR)) == 0) {
+			data->ao = malloc(nb_elements *
+					  sizeof(struct a4l_calibration_subdev_data));
+			data->nb_ao = nb_elements;
+			p = data->ao;
+		}
+
+		for (i = 0; i < nb_elements; i++) {
+			CHK(read_int, &p->expansion, d, subdevice[k], i,
+				 EXPANSION_STR);
+			CHK(read_int, &p->nb_coeff, d, subdevice[k], i,
+				 NBCOEFF_STR);
+			CHK(read_int, &p->channel, d, subdevice[k], i,
+				 CHANNEL_STR);
+			CHK(read_int, &p->range, d, subdevice[k], i,
+				 RANGE_STR);
+
+			p->coeff = malloc(p->nb_coeff * sizeof(double));
+
+			for (j = 0; j < p->nb_coeff; j++) {
+				CHK(read_dbl,&p->coeff[j], d, subdevice[k], i,
+					 COEFF_STR, j);
+			}
+
+			p->index = index;
+			p++;
+		}
+	}
+
+
+	return 0;
+}
+
+/**
+ * @brief Get the polynomial that will be use for the software calibration
+ *
+ * @param[out] converter Polynomial to be used on the software calibration
+ * @param[in] subd Subdevice index
+ * @param[in] chan Channel
+ * @param[in] range Range
+ * @param[in] data Calibration data read from the calibration file
+ *
+ * @return -1 on error
+ *
+ */
+
+int a4l_get_softcal_converter(struct a4l_polynomial *converter,
+			      int subd, int chan, int range,
+			      struct a4l_calibration_data *data )
+{
+	int i;
+
+	for (i = 0; i < data->nb_ai; i++) {
+		if (data->ai[i].index != subd)
+			break;
+		if ((data->ai[i].channel == chan || data->ai[i].channel == -1)
+		    &&
+		    (data->ai[i].range == range || data->ai[i].range == -1)) {
+			converter->expansion = data->ai[i].expansion;
+			converter->nb_coeff = data->ai[i].nb_coeff;
+			converter->coeff = data->ai[i].coeff;
+			converter->order = data->ai[i].nb_coeff - 1;
+			return 0;
+		}
+	}
+
+	for (i = 0; i < data->nb_ao; i++) {
+		if (data->ao[i].index != subd)
+			break;
+		if ((data->ao[i].channel == chan || data->ao[i].channel == -1)
+		    &&
+		    (data->ao[i].range == range || data->ao[i].range == -1)) {
+			converter->expansion = data->ao[i].expansion;
+			converter->nb_coeff = data->ao[i].nb_coeff;
+			converter->coeff = data->ao[i].coeff;
+			converter->order = data->ao[i].nb_coeff - 1;
+			return 0;
+		}
+	}
+
+	return -1;
+}
+
+/**
+ * @brief Convert raw data (from the driver) to calibrated double units
+ * @param[in] chan Channel descriptor
+ * @param[out] dst Ouput buffer
+ * @param[in] src Input buffer
+ * @param[in] cnt Count of conversion to perform
+ * @param[in] converter Conversion polynomial
+ *
+ *
+ * @return the count of conversion performed, otherwise a negative
+ * error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong;
+ *    chan, rng and the pointers should be checked; check also the
+ *    kernel log ("dmesg"); WARNING: a4l_fill_desc() should be called
+ *    before using a4l_rawtodcal()
+ *
+ *
+ *
+ */
+
+int a4l_rawtodcal(a4l_chinfo_t *chan, double *dst, void *src,
+		  int cnt, struct a4l_polynomial *converter)
+{
+	int i = 0, j = 0, k = 0;
+	double term = 1.0;
+	lsampl_t tmp;
+	int size;
+
+	/* Temporary data accessor */
+	lsampl_t(*datax_get) (void *);
+
+	/* Basic checking */
+	if (chan == NULL)
+		return -EINVAL;
+
+	/* Find out the size in memory */
+	size = a4l_sizeof_chan(chan);
+
+	/* Get the suitable accessor */
+	switch (a4l_sizeof_chan(chan)) {
+	case 4:
+		datax_get = data32_get;
+		break;
+	case 2:
+		datax_get = data16_get;
+		break;
+	case 1:
+		datax_get = data8_get;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	while (j < cnt) {
+		/* Properly retrieve the data */
+		tmp = datax_get(src + i);
+
+		/* Perform the conversion */
+		dst[j] = 0.0;
+		term = 1.0;
+		for (k = 0; k < converter->nb_coeff; k++) {
+			dst[j] += converter->coeff[k] * term;
+			term *= tmp - converter->expansion;
+		}
+
+		/* Update the counters */
+		i += size;
+		j++;
+	}
+
+	return j;
+}
+
+/**
+ * @brief Convert double values to raw calibrated data using polynomials
+ *
+ * @param[in] chan Channel descriptor
+ * @param[out] dst Ouput buffer
+ * @param[in] src Input buffer
+ * @param[in] cnt Count of conversion to perform
+ * @param[in] converter Conversion polynomial
+ *
+ * @return the count of conversion performed, otherwise a negative
+ * error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong;
+ *    chan, rng and the pointers should be checked; check also the
+ *    kernel log ("dmesg"); WARNING: a4l_fill_desc() should be called
+ *    before using a4l_dcaltoraw()
+ *
+ */
+
+int a4l_dcaltoraw( a4l_chinfo_t * chan, void *dst, double *src, int cnt,
+		   struct a4l_polynomial *converter)
+{
+	int size, i = 0, j = 0, k = 0;
+	double value, term;
+
+	/* Temporary data accessor */
+	void (*datax_set) (void *, lsampl_t);
+
+	/* Basic checking */
+	if (chan == NULL)
+		return -EINVAL;
+
+	/* Find out the size in memory */
+	size = a4l_sizeof_chan(chan);
+
+	/* Select the suitable accessor */
+	switch (size) {
+	case 4:
+		datax_set = data32_set;
+		break;
+	case 2:
+		datax_set = data16_set;
+		break;
+	case 1:
+		datax_set = data8_set;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	while (j < cnt) {
+
+		/* Performs the conversion */
+		value = 0.0;
+		term = 1.0;
+		for (k = 0; k < converter->nb_coeff; k++) {
+			value += converter->coeff[k] * term;
+			term *= src[j] - converter->expansion;
+		}
+		value = nearbyint(value);
+
+		datax_set(dst + i, (lsampl_t) value);
+
+		/* Updates the counters */
+		i += size;
+		j++;
+	}
+
+	return j;
+}
+
+/** @} Calibration API */
+
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/calibration.h b/kernel/xenomai-v3.2.4/lib/analogy/calibration.h
new file mode 100644
index 0000000..5d881b5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/calibration.h
@@ -0,0 +1,68 @@
+/**
+ * @file
+ * Analogy for Linux, internal calibration declarations
+ *
+ * @note Copyright (C) 2014 Jorge A Ramirez-Ortiz <jro@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef __ANALOGY_CALIBRATION_H__
+#define __ANALOGY_CALIBRATION_H__
+
+/*
+ * internal definitions between the xenomai utils and the library.
+ * no need to expose them to the USER
+ *
+ */
+#define ELEMENT_FIELD_FMT	"%s_%d:%s"
+#define ELEMENT_FMT		"%s:%s"
+#define COEFF_FMT		ELEMENT_FIELD_FMT"_%d"
+
+#define PLATFORM_STR		"platform"
+#define CALIBRATION_SUBD_STR	"calibration"
+#define MEMORY_SUBD_STR		"memory"
+#define AI_SUBD_STR		"analog_input"
+#define AO_SUBD_STR		"analog_output"
+
+#define INDEX_STR	"index"
+#define ELEMENTS_STR	"elements"
+#define CHANNEL_STR	"channel"
+#define RANGE_STR	"range"
+#define EXPANSION_STR	"expansion_origin"
+#define NBCOEFF_STR	"nbcoeff"
+#define COEFF_STR	"coeff"
+#define BOARD_STR	"board_name"
+#define DRIVER_STR	"driver_name"
+
+struct polynomial {
+	double expansion_origin;
+	double *coefficients;
+	int nb_coefficients;
+	int order;
+};
+
+struct subdevice_calibration_node {
+	struct holder node;
+	struct polynomial *polynomial;
+	unsigned channel;
+	unsigned range;
+};
+
+void write_calibration_file(FILE *dst, struct listobj *l,
+                            struct a4l_calibration_subdev *subd,
+	                    a4l_desc_t *desc);
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/descriptor.c b/kernel/xenomai-v3.2.4/lib/analogy/descriptor.c
new file mode 100644
index 0000000..bc4d7bd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/descriptor.c
@@ -0,0 +1,503 @@
+/**
+ * @file
+ * Analogy for Linux, descriptor related features
+ *
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <rtdm/analogy.h>
+#include "internal.h"
+#include "root_leaf.h"
+
+#ifndef DOXYGEN_CPP
+
+static void a4l_root_setup(a4l_root_t * rt,
+			   unsigned long gsize, unsigned long rsize)
+{
+	/* Common init */
+	rt->offset = ((void *)rt + sizeof(a4l_root_t));
+	rt->gsize = gsize;
+	rt->id = 0xffffffff;
+	rt->nb_leaf = 0;
+	rt->lfnxt = NULL;
+	rt->lfchd = NULL;
+
+	/* Specific init */
+	rt->data = rt->offset;
+	rt->offset += rsize;
+}
+
+static int a4l_leaf_add(a4l_root_t * rt,
+			a4l_leaf_t * lf,
+			a4l_leaf_t ** lfchild, unsigned long lfsize)
+{
+	/* Basic checking */
+	if (rt->offset +
+	    sizeof(a4l_leaf_t) + lfsize > ((void *)rt) + rt->gsize)
+		return -ENOMEM;
+
+	if (lf->nb_leaf != 0) {
+		int i;
+		a4l_leaf_t *lflst = lf->lfchd;
+
+		for (i = 0; i < (lf->nb_leaf - 1); i++) {
+			if (lflst == NULL)
+				return -EFAULT;
+			else
+				lflst = lflst->lfnxt;
+		}
+		lflst->lfnxt = (a4l_leaf_t *) rt->offset;
+	} else
+		lf->lfchd = (a4l_leaf_t *) rt->offset;
+
+	/* Inits parent leaf */
+	lf->nb_leaf++;
+	*lfchild = (a4l_leaf_t *) rt->offset;
+	rt->offset += sizeof(a4l_leaf_t);
+
+	/* Performs child leaf init */
+	(*lfchild)->id = lf->nb_leaf - 1;
+	(*lfchild)->nb_leaf = 0;
+	(*lfchild)->lfnxt = NULL;
+	(*lfchild)->lfchd = NULL;
+	(*lfchild)->data = (void *)rt->offset;
+
+	/* Performs root modifications */
+	rt->offset += lfsize;
+
+	return 0;
+}
+
+static inline a4l_leaf_t *a4l_leaf_get(a4l_leaf_t * lf,
+				       unsigned int id)
+{
+	int i;
+	a4l_leaf_t *lflst = lf->lfchd;
+
+	for (i = 0; i < id && lflst != NULL; i++)
+		lflst = lflst->lfnxt;
+
+	return lflst;
+}
+
+static int __a4l_get_sbsize(int fd, a4l_desc_t * dsc)
+{
+	unsigned int i, j, nb_chan, nb_rng;
+	int ret, res =
+		dsc->nb_subd * (sizeof(a4l_sbinfo_t) + sizeof(a4l_leaf_t));
+
+	for (i = 0; i < dsc->nb_subd; i++) {
+
+		if ((ret = a4l_sys_nbchaninfo(fd, i, &nb_chan)) < 0)
+			return ret;
+
+		res += nb_chan * (sizeof(a4l_chinfo_t) + sizeof(a4l_leaf_t));
+		for (j = 0; j < nb_chan; j++) {
+			if ((ret = a4l_sys_nbrnginfo(fd, i, j, &nb_rng)) < 0)
+				return ret;
+			res += nb_rng * (sizeof(a4l_rnginfo_t) +
+					 sizeof(a4l_leaf_t));
+		}
+	}
+
+	return res;
+}
+
+static int __a4l_fill_desc(int fd, a4l_desc_t * dsc)
+{
+	int ret;
+	unsigned int i, j;
+	a4l_sbinfo_t *sbinfo;
+	a4l_root_t *rt = (a4l_root_t *) dsc->sbdata;
+
+	a4l_root_setup(rt, dsc->sbsize,
+		       dsc->nb_subd * sizeof(a4l_sbinfo_t));
+	sbinfo = (a4l_sbinfo_t *) rt->data;
+
+	if ((ret = a4l_sys_subdinfo(fd, sbinfo)) < 0)
+		return ret;
+
+	for (i = 0; i < dsc->nb_subd; i++) {
+		a4l_leaf_t *lfs;
+		a4l_chinfo_t *chinfo;
+
+		/* For each subd, add a leaf for the channels even if
+		   the subd does not own any channel */
+		ret = a4l_leaf_add(rt, (a4l_leaf_t *) rt, &lfs,
+			     sbinfo[i].nb_chan * sizeof(a4l_chinfo_t));
+		if (ret < 0)
+			return ret;
+
+		/* If there is no channel, no need to go further */
+		if(sbinfo[i].nb_chan == 0)
+			continue;
+
+		chinfo = (a4l_chinfo_t *) lfs->data;
+
+		if ((ret = a4l_sys_chaninfo(fd, i, chinfo)) < 0)
+			return ret;
+
+		for (j = 0; j < sbinfo[i].nb_chan; j++) {
+			a4l_leaf_t *lfc;
+			a4l_rnginfo_t *rnginfo;
+
+			/* For each channel, add a leaf for the ranges
+			   even if no range descriptor is available */
+			ret = a4l_leaf_add(rt, lfs, &lfc,
+				     chinfo[j].nb_rng *
+				     sizeof(a4l_rnginfo_t));
+			if (ret < 0)
+				return ret;
+
+
+			/* If there is no range, no need to go further */
+			if(chinfo[j].nb_rng ==0)
+				continue;
+
+			rnginfo = (a4l_rnginfo_t *) lfc->data;
+			if ((ret = a4l_sys_rnginfo(fd, i, j, rnginfo)) < 0)
+				return ret;
+		}
+	}
+
+	return 0;
+}
+
+#endif /* !DOXYGEN_CPP */
+
+/*!
+ * @ingroup analogy_lib_syscall
+ * @defgroup analogy_lib_descriptor Descriptor Syscall API
+ * @{
+ */
+
+/**
+ * @brief Get a descriptor on an attached device
+ *
+ * Once the device has been attached, the function a4l_get_desc()
+ * retrieves various information on the device (subdevices, channels,
+ * ranges, etc.).
+ * The function a4l_get_desc() can be called twice:
+ * - The first time, almost all the fields, except sbdata, are set
+ *   (board_name, nb_subd, idx_read_subd, idx_write_subd, magic,
+ *   sbsize); the last field , sbdata, is supposed to be a pointer on
+ *   a buffer, which size is defined by the field sbsize.
+ * - The second time, the buffer pointed by sbdata is filled with data
+ *   about the subdevices, the channels and the ranges.
+ *
+ * Between the two calls, an allocation must be performed in order to
+ * recover a buffer large enough to contain all the data. These data
+ * are set up according a root-leaf organization (device -> subdevice
+ * -> channel -> range). They cannot be accessed directly; specific
+ * functions are available so as to retrieve them:
+ * - a4l_get_subdinfo() to get some subdevice's characteristics.
+ * - a4l_get_chaninfo() to get some channel's characteristics.
+ * - a4l_get_rnginfo() to get some range's characteristics.
+ *
+ * @param[in] fd Driver file descriptor
+ * @param[out] dsc Device descriptor
+ * @param[in] pass Description level to retrieve:
+ * - A4L_BSC_DESC to get the basic descriptor (notably the size of
+ *   the data buffer to allocate).
+ * - A4L_CPLX_DESC to get the complex descriptor, the data buffer
+ *   is filled with characteristics about the subdevices, the channels
+ *   and the ranges.
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong; the
+ *    pass argument should be checked; check also the kernel log
+ *    ("dmesg")
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -ENODEV is returned if the descriptor is incoherent (the device
+ *    may be unattached)
+ *
+ */
+
+int a4l_sys_desc(int fd, a4l_desc_t * dsc, int pass)
+{
+	int ret = 0;
+
+	if (dsc == NULL ||
+	    (pass != A4L_BSC_DESC && dsc->magic != MAGIC_BSC_DESC))
+		return -EINVAL;
+
+	if (pass == A4L_BSC_DESC) {
+
+		ret = a4l_sys_devinfo(fd, (a4l_dvinfo_t *) dsc);
+		if (ret < 0)
+			goto out_a4l_sys_desc;
+
+		dsc->sbsize = __a4l_get_sbsize(fd, dsc);
+		dsc->sbdata = NULL;
+		dsc->magic = MAGIC_BSC_DESC;
+	} else {
+
+		if (!dsc->sbsize) {
+			ret = -ENODEV;
+			goto out_a4l_sys_desc;
+		}
+
+		ret = __a4l_fill_desc(fd, dsc);
+		if (ret < 0)
+			goto out_a4l_sys_desc;
+
+		dsc->magic = MAGIC_CPLX_DESC;
+	}
+
+out_a4l_sys_desc:
+	return ret;
+}
+
+/*! @} Descriptor Syscall API */
+
+/*!
+ * @ingroup analogy_lib_level1
+ * @defgroup analogy_lib_descriptor1 Descriptor API
+ *
+ * This is the API interface used to fill and use Analogy device
+ * descriptor structure
+ * @{
+ */
+
+/**
+ * @brief Open an Analogy device and basically fill the descriptor
+ *
+ * @param[out] dsc Device descriptor
+ * @param[in] fname Device name
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong; the
+ *    fname and the dsc pointer should be checked; check also the
+ *    kernel log ("dmesg")
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ *
+ */
+int a4l_open(a4l_desc_t *dsc, const char *fname)
+{
+	int ret;
+
+	/* Basic checking */
+	if (dsc == NULL)
+		return -EINVAL;
+
+	/* Initializes the descriptor */
+	memset(dsc, 0, sizeof(a4l_desc_t));
+
+	/* Opens the driver */
+	dsc->fd = a4l_sys_open(fname);
+	if (dsc->fd < 0)
+		return dsc->fd;
+
+	/* Basically fills the descriptor */
+	ret = a4l_sys_desc(dsc->fd, dsc, A4L_BSC_DESC);
+	if (ret < 0) {
+		a4l_sys_close(dsc->fd);
+	}
+
+	return ret;
+}
+
+/**
+ * @brief Close the Analogy device related with the descriptor
+ *
+ * The file descriptor is associated with a context. The context is
+ * one of the enabler of asynchronous transfers. So, by closing the
+ * file descriptor, the programer must keep in mind that the currently
+ * occuring asynchronous transfer will cancelled.
+ *
+ * @param[in] dsc Device descriptor
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong; the
+ *    the dsc pointer should be checked; check also the kernel log
+ *    ("dmesg")
+ *
+ */
+int a4l_close(a4l_desc_t * dsc)
+{
+	/* Basic checking */
+	if (dsc == NULL)
+		return -EINVAL;
+
+	return a4l_sys_close(dsc->fd);
+}
+
+/**
+ * @brief Fill the descriptor with subdevices, channels and ranges
+ * data
+ *
+ * @param[in] dsc Device descriptor partly filled by a4l_open().
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong; the
+ *    the dsc pointer should be checked; check also the kernel log
+ *    ("dmesg")
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -ENODEV is returned if the descriptor is incoherent (the device
+ *    may be unattached)
+ *
+ */
+int a4l_fill_desc(a4l_desc_t * dsc)
+{
+	/* Basic checking */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	/* Checks the descriptor has been basically filled */
+	if (dsc->magic != MAGIC_BSC_DESC)
+		return -EINVAL;
+
+	return a4l_sys_desc(dsc->fd, dsc, A4L_CPLX_DESC);
+}
+
+/**
+ * @brief Get an information structure on a specified subdevice
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() and
+ * a4l_fill_desc()
+ * @param[in] subd Subdevice index
+ * @param[out] info Subdevice information structure
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong; subd
+ *    and the dsc pointer should be checked; check also the kernel log
+ *    ("dmesg"); WARNING: a4l_fill_desc() should be called before
+ *    using a4l_get_subdinfo().
+ *
+ */
+int a4l_get_subdinfo(a4l_desc_t * dsc,
+		     unsigned int subd, a4l_sbinfo_t ** info)
+{
+	a4l_leaf_t *tmp;
+
+	if (dsc == NULL || info == NULL)
+		return -EINVAL;
+
+	if (dsc->magic != MAGIC_CPLX_DESC)
+		return -EINVAL;
+
+	if (subd >= dsc->nb_subd)
+		return -EINVAL;
+
+	tmp = (a4l_leaf_t *) dsc->sbdata;
+	*info = &(((a4l_sbinfo_t *) tmp->data)[subd]);
+
+	return 0;
+}
+
+/**
+ * @brief Get an information structure on a specified channel
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() and
+ * a4l_fill_desc()
+ * @param[in] subd Subdevice index
+ * @param[in] chan Channel index
+ * @param[out] info Channel information structure
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong; subd,
+ *    chan and the dsc pointer should be checked; check also the
+ *    kernel log ("dmesg"); WARNING: a4l_fill_desc() should be called
+ *    before using a4l_get_chinfo()
+ *
+ */
+int a4l_get_chinfo(a4l_desc_t * dsc,
+		   unsigned int subd,
+		   unsigned int chan, a4l_chinfo_t ** info)
+{
+	a4l_leaf_t *tmp;
+
+	if (dsc == NULL || info == NULL)
+		return -EINVAL;
+
+	if (dsc->magic != MAGIC_CPLX_DESC)
+		return -EINVAL;
+
+	if (subd >= dsc->nb_subd)
+		return -EINVAL;
+
+	tmp = (a4l_leaf_t *) dsc->sbdata;
+
+	if (chan >= ((a4l_sbinfo_t *) tmp->data)[subd].nb_chan)
+		return -EINVAL;
+
+	tmp = a4l_leaf_get(tmp, subd);
+	*info = &(((a4l_chinfo_t *) tmp->data)[chan]);
+
+	return 0;
+}
+
+/**
+ * @brief Get an information structure on a specified range
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() and
+ * a4l_fill_desc()
+ * @param[in] subd Subdevice index
+ * @param[in] chan Channel index
+ * @param[in] rng Range index
+ * @param[out] info Range information structure
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong; subd,
+ *    chan, rng and the dsc pointer should be checked; check also the
+ *    kernel log ("dmesg"); WARNING: a4l_fill_desc() should be called
+ *    before using a4l_get_rnginfo()
+ *
+ */
+int a4l_get_rnginfo(a4l_desc_t * dsc,
+		    unsigned int subd,
+		    unsigned int chan,
+		    unsigned int rng, a4l_rnginfo_t ** info)
+{
+	a4l_leaf_t *tmp;
+
+	if (dsc == NULL || info == NULL)
+		return -EINVAL;
+
+	if (dsc->magic != MAGIC_CPLX_DESC)
+		return -EINVAL;
+
+	if (subd >= dsc->nb_subd)
+		return -EINVAL;
+
+	tmp = (a4l_leaf_t *) dsc->sbdata;
+
+	if (chan >= ((a4l_sbinfo_t *) tmp->data)[subd].nb_chan)
+		return -EINVAL;
+
+	tmp = a4l_leaf_get(tmp, subd);
+
+	if (rng >= ((a4l_chinfo_t *) tmp->data)[chan].nb_rng)
+		return -EINVAL;
+
+	tmp = a4l_leaf_get(tmp, chan);
+	*info = &(((a4l_rnginfo_t *) tmp->data)[rng]);
+
+	return 0;
+}
+
+/*! @} Descriptor API */
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/info.c b/kernel/xenomai-v3.2.4/lib/analogy/info.c
new file mode 100644
index 0000000..a68e4a3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/info.c
@@ -0,0 +1,84 @@
+/*
+ * Analogy for Linux, device, subdevice, etc. related features
+ *
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <rtdm/analogy.h>
+#include "internal.h"
+
+#ifndef DOXYGEN_CPP
+
+int a4l_sys_devinfo(int fd, a4l_dvinfo_t * info)
+{
+	return __sys_ioctl(fd, A4L_DEVINFO, info);
+}
+
+int a4l_sys_subdinfo(int fd, a4l_sbinfo_t * info)
+{
+	return __sys_ioctl(fd, A4L_SUBDINFO, info);
+}
+
+int a4l_sys_nbchaninfo(int fd, unsigned int idx_subd, unsigned int *nb)
+{
+	int ret;
+	a4l_chinfo_arg_t arg = { idx_subd, NULL };
+
+	if (nb == NULL)
+		return -EINVAL;
+
+	ret = __sys_ioctl(fd, A4L_NBCHANINFO, &arg);
+	*nb = (unsigned long)arg.info;
+
+	return ret;
+}
+
+int a4l_sys_chaninfo(int fd, unsigned int idx_subd, a4l_chinfo_t * info)
+{
+	a4l_chinfo_arg_t arg = { idx_subd, info };
+
+	return __sys_ioctl(fd, A4L_CHANINFO, &arg);
+}
+
+int a4l_sys_nbrnginfo(int fd,
+		      unsigned int idx_subd,
+		      unsigned int idx_chan, unsigned int *nb)
+{
+	int ret;
+	a4l_rnginfo_arg_t arg = { idx_subd, idx_chan, NULL };
+
+	if (nb == NULL)
+		return -EINVAL;
+
+	ret = __sys_ioctl(fd, A4L_NBRNGINFO, &arg);
+	*nb = (unsigned long)arg.info;
+
+	return ret;
+}
+
+int a4l_sys_rnginfo(int fd,
+		    unsigned int idx_subd,
+		    unsigned int idx_chan, a4l_rnginfo_t * info)
+{
+	a4l_rnginfo_arg_t arg = { idx_subd, idx_chan, info };
+
+	return __sys_ioctl(fd, A4L_RNGINFO, &arg);
+}
+
+#endif /* !DOXYGEN_CPP */
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/internal.h b/kernel/xenomai-v3.2.4/lib/analogy/internal.h
new file mode 100644
index 0000000..da8b658
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/internal.h
@@ -0,0 +1,58 @@
+/**
+ * @file
+ * Analogy for Linux, internal declarations
+ *
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef __ANALOGY_LIB_INTERNAL__
+#define __ANALOGY_LIB_INTERNAL__
+
+#define MAGIC_BSC_DESC 0x1234abcd
+#define MAGIC_CPLX_DESC 0xabcd1234
+
+#include <rtdm/rtdm.h>
+
+#ifndef DOXYGEN_CPP
+
+static inline int __sys_open(const char *fname)
+{
+	return __RT(open(fname, 0));
+}
+
+static inline int __sys_close(int fd)
+{
+	return __RT(close(fd));
+}
+
+static inline int __sys_ioctl(int fd, int request, void *arg)
+{
+	return __RT(ioctl(fd, request, arg));
+}
+
+static inline ssize_t __sys_read(int fd, void *buf, size_t nbyte)
+{
+	return __RT(read(fd, buf, nbyte));
+}
+
+static inline ssize_t __sys_write(int fd, const void *buf, size_t nbyte)
+{
+	return __RT(write(fd, buf, nbyte));
+}
+
+#endif /* !DOXYGEN_CPP */
+
+#endif /* __ANALOGY_LIB_INTERNAL__ */
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/math.c b/kernel/xenomai-v3.2.4/lib/analogy/math.c
new file mode 100644
index 0000000..2e6e475
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/math.c
@@ -0,0 +1,457 @@
+#include <math.h>
+
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <math.h>
+#include <assert.h>
+
+#include <rtdm/analogy.h>
+
+struct vec {
+	unsigned dim;
+	unsigned stride;
+	double *val;
+};
+
+struct mat {
+	unsigned rows;
+	unsigned cols;
+	double *val;
+};
+
+static void vec_init(struct vec *v, unsigned dim, double *val)
+{
+	v->dim = dim;
+	v->stride = 1;
+	v->val = val;
+}
+
+static int vec_alloc(struct vec *v, unsigned dim)
+{
+	double *val = malloc(sizeof(*val) * dim);
+
+	if (val == NULL)
+		return -ENOMEM;
+
+	v->dim = dim;
+	v->stride = 1;
+	v->val = val;
+
+	return 0;
+}
+
+static void vec_free(struct vec *v)
+{
+	free(v->val);
+	v->val = NULL;
+	v->dim = 0;
+	v->stride = 0;
+}
+
+static void row_vec_init(struct vec *v, struct mat *m, unsigned row)
+{
+	v->dim = m->cols;
+	v->stride = 1;
+	v->val = m->val + row * m->cols;
+}
+
+static void col_vec_init(struct vec *v, struct mat *m, unsigned col)
+{
+	v->dim = m->rows;
+	v->stride = m->cols;
+	v->val = m->val + col;
+}
+
+static void vec_copy(struct vec *dst, struct vec *src)
+{
+	const unsigned d_stride = dst->stride, s_stride = src->stride;
+	double *d_val = dst->val, *s_val = src->val;
+	unsigned dim = dst->dim;
+	unsigned i;
+
+	assert(dst->dim == src->dim);
+
+	for (i = 0; i < dim; i++, d_val += d_stride, s_val += s_stride)
+		*d_val = *s_val;
+}
+
+static inline double *vec_of(struct vec *v, unsigned k)
+{
+	return v->val + v->stride * k;
+}
+
+static void vec_mult_scalar(struct vec *v, double x)
+{
+	const unsigned v_stride = v->stride;
+	const unsigned dim = v->dim;
+	double *v_val = v->val;
+	unsigned i;
+
+	for (i = 0; i < dim; i++, v_val += v_stride)
+		*v_val *= x;
+}
+
+static double vec_dot(struct vec *l, struct vec *r)
+{
+	const unsigned l_stride = l->stride;
+	const unsigned r_stride = r->stride;
+	const double *l_val = l->val;
+	const double *r_val = r->val;
+	const unsigned dim = l->dim;
+	double res = 0;
+	unsigned i;
+
+	assert(dim == r->dim);
+
+	for (i = 0; i < dim; i++, l_val += l_stride, r_val += r_stride)
+		res += (*l_val) * (*r_val);
+
+	return res;
+}
+
+static inline double vec_norm2(struct vec *v)
+{
+	return sqrt(vec_dot(v, v));
+}
+
+static void vec_vandermonde(struct vec *v, const double x)
+{
+	const unsigned v_stride = v->stride;
+	const unsigned dim = v->dim;
+	double *v_val = v->val;
+	double tmp = 1;
+	unsigned i;
+
+	for (i = 0; i < dim; i++, v_val += v_stride, tmp *= x)
+		*v_val = tmp;
+}
+
+static void vec_householder(struct vec *res, struct vec *v, unsigned k)
+{
+	const unsigned res_stride = res->stride;
+	const unsigned v_stride = v->stride;
+	const unsigned dim = res->dim;
+	const double *v_val = v->val;
+	double *x_k, *res_val = res->val;
+	double alpha;
+	unsigned j;
+
+	assert(dim == v->dim);
+	assert(k < dim);
+
+	for (j = 0; j < k; j++, res_val += res_stride, v_val += v_stride)
+		*res_val = 0;
+	x_k = res_val;
+	for (j = k; j < dim; j++, res_val += res_stride, v_val += v_stride)
+		*res_val = *v_val;
+
+	alpha = (signbit(*x_k) ? 1 : -1) * vec_norm2(res);
+	*x_k -= alpha;
+
+	vec_mult_scalar(res, 1 / vec_norm2(res));
+}
+
+static int mat_alloc(struct mat *m, unsigned rows, unsigned cols)
+{
+	double *val = malloc(sizeof(*val) * rows * cols);
+
+	if (val == NULL)
+		return -ENOMEM;
+
+	m->rows = rows;
+	m->cols = cols;
+	m->val = val;
+
+	return 0;
+}
+
+static void mat_free(struct mat *m)
+{
+	free(m->val);
+	m->val = NULL;
+	m->cols = 0;
+	m->rows = 0;
+}
+
+static inline double *mat_of(struct mat *m, unsigned row, unsigned col)
+{
+	return m->val + row * m->cols + col;
+}
+
+static void vec_mult_mat(struct vec *res, struct vec *v, struct mat *m)
+{
+	const unsigned res_stride = res->stride;
+	const unsigned cols = m->cols;
+	double *res_val = res->val;
+	struct vec cur_col;
+	unsigned i;
+
+	assert(v->dim == m->rows);
+
+	col_vec_init(&cur_col, m, 0);
+	for (i = 0; i < cols; i++, cur_col.val++, res_val += res_stride)
+		*res_val = vec_dot(v, &cur_col);
+}
+
+static void mat_vandermonde(struct mat *m, struct vec *v, const double origin)
+{
+	const unsigned v_stride = v->stride;
+	const unsigned m_rows = m->rows;
+	const unsigned m_cols = m->cols;
+	const double *v_val = v->val;
+	struct vec m_row;
+	unsigned i;
+
+	assert(m->rows == v->dim);
+
+	row_vec_init(&m_row, m, 0);
+	for (i = 0; i < m_rows; i++, m_row.val += m_cols, v_val += v_stride)
+		vec_vandermonde(&m_row, *v_val - origin);
+}
+
+static void
+house_mult_mat(struct mat *res, struct vec *tmp, struct vec *vh, struct mat *m)
+{
+	const double *vh_val = vh->val, *tmp_val = tmp->val, *m_val = m->val;
+	const unsigned tmp_stride = tmp->stride;
+	const unsigned vh_stride = vh->stride;
+	const unsigned res_cols = res->cols;
+	const unsigned res_rows = res->rows;
+	double *res_val = res->val;
+	unsigned i, j;
+
+	assert(res_cols == m->cols &&
+		res_rows == m->rows && res->rows == vh->dim);
+
+	vec_mult_mat(tmp, vh, m);
+
+	for (j = 0; j < res_rows; j++, vh_val += vh_stride, tmp_val = tmp->val)
+		for (i = 0; i < res_cols; i++, tmp_val += tmp_stride)
+			*res_val++ = (*m_val++) - 2 * (*vh_val) * (*tmp_val);
+}
+
+static void
+house_mult_vec(struct vec *res, struct vec *vh, struct vec *v)
+{
+	const double *vh_val = vh->val, *v_val = v->val;
+	const unsigned res_stride = res->stride;
+	const unsigned vh_stride = vh->stride;
+	const unsigned v_stride = v->stride;
+	const unsigned res_dim = res->dim;
+	double *res_val = res->val;
+	double dot;
+	unsigned i;
+
+	assert(res_dim == v->dim);
+
+	dot = 2 * vec_dot(vh, v);
+
+	for (i = 0; i < res_dim; i++, res_val += res_stride,
+		     v_val += v_stride, vh_val += vh_stride)
+		*res_val = (*v_val) - dot * (*vh_val);
+}
+
+static void
+mat_upper_triangular_backsub(struct vec *res, struct mat *m, struct vec *v)
+{
+	unsigned dim = res->dim;
+	unsigned j;
+	int i;
+
+	assert(dim == m->cols);
+
+	for (i = dim - 1; i >= 0; i--) {
+		double sum = *vec_of(v, i);
+
+		for (j = i + 1; j < dim; j++)
+			sum -= (*mat_of(m, i, j)) * (*vec_of(res, j));
+
+		*vec_of(res, i) = sum / (*mat_of(m, i, i));
+	}
+}
+
+/*
+ * A = Q.R decomposition using Householder reflections
+ * Input: R <- A
+ *	  Y
+ * Output: R
+ *	   Y <- Q^tY
+ */
+static int mat_qr(struct mat *r, struct vec *y)
+{
+	struct vec r_col, vh, tr;
+	unsigned i;
+	int rc;
+
+	rc = vec_alloc(&vh, r->rows);
+	if (rc < 0)
+		return rc;
+
+	rc = vec_alloc(&tr, y->dim);
+	if (rc < 0)
+		goto err_free_vh;
+
+	col_vec_init(&r_col, r, 0);
+	for (i = 0; i < r->cols; i++, r_col.val++) {
+		vec_householder(&vh, &r_col, i);
+
+		house_mult_vec(y, &vh, y);
+		house_mult_mat(r, &tr, &vh, r);
+	}
+
+	rc = 0;
+	vec_free(&tr);
+  err_free_vh:
+	vec_free(&vh);
+	return rc;
+}
+
+
+/*!
+ * @ingroup analogy_lib_level2
+ * @defgroup analogy_lib_math Math API
+ * @{
+ */
+
+
+/**
+ * @brief Calculate the polynomial fit
+ *
+ * @param[in] r_dim Number of elements of the resulting polynomial
+ * @param[out] r Polynomial
+ * @param[in] orig
+ * @param[in] dim Number of elements in the polynomials
+ * @param[in] x Polynomial
+ * @param[in] y Polynomial
+ *
+ *
+ * Operation:
+ *
+ * We are looking for Res such that A.Res = Y, with A the Vandermonde
+ * matrix made from the X vector.
+ *
+ * Using the least square method, this means finding Res such that:
+ * A^t.A.Res = A^tY
+ *
+ * If we write A = Q.R with Q^t.Q = 1, and R non singular, this can be
+ * reduced to:
+ * R.Res = Q^t.Y
+ *
+ * mat_qr() gives us R and Q^t.Y from A and Y
+ *
+ * We can then obtain Res by back substitution using
+ * mat_upper_triangular_backsub() with R upper triangular.
+ *
+ */
+int a4l_math_polyfit(unsigned r_dim, double *r, double orig, const unsigned dim,
+	        double *x, double *y)
+{
+	struct vec v_x, v_y, v_r, qty;
+	struct mat vdm;
+	int rc;
+
+	vec_init(&v_x, dim, x);
+	vec_init(&v_y, dim, y);
+	vec_init(&v_r, r_dim, r);
+
+	rc = vec_alloc(&qty, dim);
+	if (rc < 0)
+		return rc;
+	vec_copy(&qty, &v_y);
+
+	rc = mat_alloc(&vdm, dim, r_dim);
+	if (rc < 0)
+		goto err_free_qty;
+
+	mat_vandermonde(&vdm, &v_x, orig);
+
+	rc = mat_qr(&vdm, &qty);
+	if (rc == 0)
+		mat_upper_triangular_backsub(&v_r, &vdm, &qty);
+
+	mat_free(&vdm);
+
+  err_free_qty:
+	vec_free(&qty);
+
+        return rc;
+}
+
+/**
+ * @brief Calculate the aritmetic mean of an array of values
+ *
+ * @param[out] pmean Pointer to the resulting value
+ * @param[in] val Array of input values
+ * @param[in] nr Number of array elements
+ *
+ */
+void a4l_math_mean(double *pmean, double *val, unsigned nr)
+{
+	double sum;
+	int i;
+
+	for (sum = 0, i = 0; i < nr; i++)
+		sum += val[i];
+
+	*pmean = sum / nr;
+}
+
+/**
+ * @brief Calculate the standard deviation of an array of values
+ *
+ * @param[out] pstddev Pointer to the resulting value
+ * @param[in] mean Mean value
+ * @param[in] val Array of input values
+ * @param[in] nr Number of array elements
+ *
+ */
+void a4l_math_stddev(double *pstddev, double mean, double *val, unsigned nr)
+{
+	double sum, sum_sq;
+	int i;
+
+	for (sum = 0, sum_sq = 0, i = 0; i < nr; i++) {
+		double x = val[i] - mean;
+		sum_sq += x * x;
+		sum += x;
+	}
+
+	*pstddev = sqrt((sum_sq - (sum * sum) / nr) / (nr - 1));
+}
+
+/**
+ * @brief Calculate the standard deviation of the mean
+ *
+ * @param[out] pstddevm Pointer to the resulting value
+ * @param[in] mean Mean value
+ * @param[in] val Array of input values
+ * @param[in] nr Number of array elements
+ *
+ */
+void a4l_math_stddev_of_mean(double *pstddevm, double mean, double *val, unsigned nr)
+{
+	a4l_math_stddev(pstddevm, mean, val, nr);
+	*pstddevm = *pstddevm / sqrt(nr); 
+}
+
+
+/** @} Math API */
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/range.c b/kernel/xenomai-v3.2.4/lib/analogy/range.c
new file mode 100644
index 0000000..d9f7a48
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/range.c
@@ -0,0 +1,638 @@
+/**
+ * @file
+ * Analogy for Linux, range related features
+ *
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <math.h>
+#include "internal.h"
+#include <rtdm/analogy.h>
+
+#ifndef DOXYGEN_CPP
+
+static lsampl_t data32_get(void *src)
+{
+	return (lsampl_t) * ((lsampl_t *) (src));
+}
+
+static lsampl_t data16_get(void *src)
+{
+	return (lsampl_t) * ((sampl_t *) (src));
+}
+
+static lsampl_t data8_get(void *src)
+{
+	return (lsampl_t) * ((unsigned char *)(src));
+}
+
+static void data32_set(void *dst, lsampl_t val)
+{
+	*((lsampl_t *) (dst)) = val;
+}
+
+static void data16_set(void *dst, lsampl_t val)
+{
+	*((sampl_t *) (dst)) = (sampl_t) (0xffff & val);
+}
+
+static void data8_set(void *dst, lsampl_t val)
+{
+	*((unsigned char *)(dst)) = (unsigned char)(0xff & val);
+}
+
+#endif /* !DOXYGEN_CPP */
+
+/*!
+ * @ingroup analogy_lib_level2
+ * @defgroup analogy_lib_rng2 Range / conversion API
+ * @{
+ */
+
+/**
+ * @brief Get the size in memory of an acquired element
+ *
+ * According to the board, the channels have various acquisition
+ * widths. With values like 8, 16 or 32, there is no problem finding
+ * out the size in memory (1, 2, 4); however with widths like 12 or
+ * 24, this function might be helpful to guess the size needed in RAM
+ * for a single acquired element.
+ *
+ * @param[in] chan Channel descriptor
+ *
+ * @return the size in memory of an acquired element, otherwise a negative
+ * error code:
+ *
+ * - -EINVAL is returned if the argument chan is NULL
+ *
+ */
+int a4l_sizeof_chan(a4l_chinfo_t * chan)
+{
+	/* So far, it seems there is no 64 bit acquistion stuff */
+	int i = 0, sizes[3] = {8, 16, 32};
+
+	if (chan == NULL)
+		return -EINVAL;
+
+	while (i < 3 && sizes[i] < chan->nb_bits)
+		i++;
+
+	return (i == 3) ? -EINVAL : sizes[i] / 8;
+}
+
+/**
+ * @brief Get the size in memory of a digital acquired element
+ *
+ * This function is only useful for DIO subdevices. Digital subdevices
+ * are a specific kind of subdevice on which channels are regarded as
+ * bits composing the subdevice's bitfield. During a DIO acquisition,
+ * all bits are sampled. Therefore, a4l_sizeof_chan() is useless in
+ * this case and we have to use a4l_sizeof_subd().
+ * With bitfields which sizes are 8, 16 or 32, there is no problem
+ * finding out the size in memory (1, 2, 4); however with widths like
+ * 12 or 24, this function might be helpful to guess the size needed
+ * in RAM for a single acquired element.
+ *
+ * @param[in] subd Subdevice descriptor
+ *
+ * @return the size in memory of an acquired element, otherwise a negative
+ * error code:
+ *
+ * - -EINVAL is returned if the argument chan is NULL or if the
+ *    subdevice is not a digital subdevice
+ *
+ */
+int a4l_sizeof_subd(a4l_sbinfo_t *subd)
+{
+	/* So far, it seems there is no 64 bit acquistion stuff */
+	int i = 0, sizes[3] = {8, 16, 32};
+
+	if (subd == NULL)
+		return -EINVAL;
+
+	/* This function is only useful for DIO subdevice (all
+	   channels are acquired in one shot); for other kind of
+	   subdevice, the user must use a4l_sizeof_chan() so as to
+	   find out the size of the channel he wants to use */
+	if ((subd->flags & A4L_SUBD_TYPES) != A4L_SUBD_DIO &&
+	    (subd->flags & A4L_SUBD_TYPES) != A4L_SUBD_DI &&
+	    (subd->flags & A4L_SUBD_TYPES) != A4L_SUBD_DO)
+		return -EINVAL;
+
+	while (i < 3 && sizes[i] < subd->nb_chan)
+		i++;
+
+	return (i == 3) ? -EINVAL : sizes[i] / 8;
+}
+
+/**
+ * @brief Find the must suitable range
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() and
+ * a4l_fill_desc()
+ *
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] idx_chan Index of the concerned channel
+ * @param[in] unit Unit type used in the range
+ * @param[in] min Minimal limit value
+ * @param[in] max Maximal limit value
+ * @param[out] rng Found range
+ *
+ * @return The index of the most suitable range on success. Otherwise:
+ *
+ * - -ENOENT is returned if a suitable range is not found.
+ * - -EINVAL is returned if some argument is missing or wrong;
+ *    idx_subd, idx_chan and the dsc pointer should be checked; check
+ *    also the kernel log ("dmesg"); WARNING: a4l_fill_desc() should
+ *    be called before using a4l_find_range()
+ *
+ */
+int a4l_find_range(a4l_desc_t * dsc,
+		   unsigned int idx_subd,
+		   unsigned int idx_chan,
+		   unsigned long unit,
+		   double min, double max, a4l_rnginfo_t ** rng)
+{
+	int i, ret;
+	long lmin, lmax;
+	a4l_chinfo_t *chinfo;
+	a4l_rnginfo_t *rnginfo, *tmp = NULL;
+	unsigned int idx_rng = -ENOENT;
+
+	if (rng != NULL)
+		*rng = NULL;
+
+	/* Basic checkings */
+	if (dsc == NULL)
+		return -EINVAL;
+
+	/* a4l_fill_desc() must have been called on this descriptor */
+	if (dsc->magic != MAGIC_CPLX_DESC)
+		return -EINVAL;
+
+	/* Retrieves the ranges count */
+	ret = a4l_get_chinfo(dsc, idx_subd, idx_chan, &chinfo);
+	if (ret < 0)
+		return ret;
+
+	/* Initializes variables */
+	lmin = (long)(min * A4L_RNG_FACTOR);
+	lmax = (long)(max * A4L_RNG_FACTOR);
+
+	/* Performs the research */
+	for (i = 0; i < chinfo->nb_rng; i++) {
+
+		ret = a4l_get_rnginfo(dsc, idx_subd, idx_chan, i, &rnginfo);
+		if (ret < 0)
+			return ret;
+
+		if (A4L_RNG_UNIT(rnginfo->flags) == unit &&
+		    rnginfo->min <= lmin && rnginfo->max >= lmax) {
+
+			if (tmp != NULL) {
+				if (rnginfo->min >= tmp->min &&
+				    rnginfo->max <= tmp->max) {
+					idx_rng = i;
+					tmp = rnginfo;
+				}
+			} else {
+				idx_rng = i;
+				tmp = rnginfo;
+			}
+		}
+	}
+
+	if (rng != NULL)
+		*rng = tmp;
+
+	return idx_rng;
+}
+
+/**
+ * @brief Unpack raw data (from the driver) into unsigned long values
+ *
+ * This function takes as input driver-specific data and scatters each
+ * element into an entry of an unsigned long table. It is a
+ * convenience routine which performs no conversion, just copy.
+ *
+ * @param[in] chan Channel descriptor
+ * @param[out] dst Ouput buffer
+ * @param[in] src Input buffer
+ * @param[in] cnt Count of transfer to copy
+ *
+ * @return the count of copy performed, otherwise a negative error
+ * code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong;
+ *    chan, dst and src pointers should be checked; check also the
+ *    kernel log ("dmesg"); WARNING: a4l_fill_desc() should be called
+ *    before using a4l_ultoraw()
+ *
+ */
+int a4l_rawtoul(a4l_chinfo_t * chan, unsigned long *dst, void *src, int cnt)
+{
+	int size, i = 0, j = 0;
+
+	/* Temporary data accessor */
+	lsampl_t(*datax_get) (void *);
+
+	/* Basic checking */
+	if (chan == NULL)
+		return -EINVAL;
+
+	/* Find out the size in memory */
+	size = a4l_sizeof_chan(chan);
+
+	/* Get the suitable accessor */
+	switch (size) {
+	case 4:
+		datax_get = data32_get;
+		break;
+	case 2:
+		datax_get = data16_get;
+		break;
+	case 1:
+		datax_get = data8_get;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	while (j < cnt) {
+
+		/* Properly copy the data */
+		dst[j] = (unsigned long)datax_get(src + i);
+
+		/* Update the counters */
+		i += size;
+		j++;
+	}
+
+	return j;
+}
+
+/**
+ * @brief Convert raw data (from the driver) to float-typed samples
+ *
+ * @param[in] chan Channel descriptor
+ * @param[in] rng Range descriptor
+ * @param[out] dst Ouput buffer
+ * @param[in] src Input buffer
+ * @param[in] cnt Count of conversion to perform
+ *
+ * @return the count of conversion performed, otherwise a negative
+ * error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong;
+ *    chan, rng and the pointers should be checked; check also the
+ *    kernel log ("dmesg"); WARNING: a4l_fill_desc() should be called
+ *    before using a4l_rawtod()
+ *
+ */
+int a4l_rawtof(a4l_chinfo_t * chan,
+	       a4l_rnginfo_t * rng, float *dst, void *src, int cnt)
+{
+	int size, i = 0, j = 0;
+	lsampl_t tmp;
+
+	/* Temporary values used for conversion
+	   (phys = a * src + b) */
+	float a, b;
+	/* Temporary data accessor */
+	lsampl_t(*datax_get) (void *);
+
+	/* Basic checking */
+	if (rng == NULL || chan == NULL)
+		return -EINVAL;
+
+	/* Find out the size in memory */
+	size = a4l_sizeof_chan(chan);
+
+	/* Get the suitable accessor */
+	switch (a4l_sizeof_chan(chan)) {
+	case 4:
+		datax_get = data32_get;
+		break;
+	case 2:
+		datax_get = data16_get;
+		break;
+	case 1:
+		datax_get = data8_get;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	/* Compute the translation factor and the constant only once */
+	a = ((float)(rng->max - rng->min)) /
+		(((1ULL << chan->nb_bits) - 1) * A4L_RNG_FACTOR);
+	b = ((float)rng->min) / A4L_RNG_FACTOR;
+
+	while (j < cnt) {
+
+		/* Properly retrieve the data */
+		tmp = datax_get(src + i);
+
+		/* Perform the conversion */
+		dst[j] = a * tmp + b;
+
+		/* Update the counters */
+		i += size;
+		j++;
+	}
+
+	return j;
+}
+
+/**
+ * @brief Convert raw data (from the driver) to double-typed samples
+ *
+ * @param[in] chan Channel descriptor
+ * @param[in] rng Range descriptor
+ * @param[out] dst Ouput buffer
+ * @param[in] src Input buffer
+ * @param[in] cnt Count of conversion to perform
+ *
+ * @return the count of conversion performed, otherwise a negative
+ * error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong;
+ *    chan, rng and the pointers should be checked; check also the
+ *    kernel log ("dmesg"); WARNING: a4l_fill_desc() should be called
+ *    before using a4l_rawtod()
+ *
+ */
+int a4l_rawtod(a4l_chinfo_t * chan,
+	       a4l_rnginfo_t * rng, double *dst, void *src, int cnt)
+{
+	int size, i = 0, j = 0;
+	lsampl_t tmp;
+
+	/* Temporary values used for conversion
+	   (phys = a * src + b) */
+	double a, b;
+	/* Temporary data accessor */
+	lsampl_t(*datax_get) (void *);
+
+	/* Basic checking */
+	if (rng == NULL || chan == NULL)
+		return -EINVAL;
+
+	/* Find out the size in memory */
+	size = a4l_sizeof_chan(chan);
+
+	/* Get the suitable accessor */
+	switch (a4l_sizeof_chan(chan)) {
+	case 4:
+		datax_get = data32_get;
+		break;
+	case 2:
+		datax_get = data16_get;
+		break;
+	case 1:
+		datax_get = data8_get;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	/* Computes the translation factor and the constant only once */
+	a = ((double)(rng->max - rng->min)) /
+		(((1ULL << chan->nb_bits) - 1) * A4L_RNG_FACTOR);
+	b = ((double)rng->min) / A4L_RNG_FACTOR;
+
+	while (j < cnt) {
+
+		/* Properly retrieve the data */
+		tmp = datax_get(src + i);
+
+		/* Perform the conversion */
+		dst[j] = a * tmp + b;
+
+		/* Update the counters */
+		i += size;
+		j++;
+	}
+
+	return j;
+}
+
+/**
+ * @brief Pack unsigned long values into raw data (for the driver)
+ *
+ * This function takes as input a table of unsigned long values and
+ * gather them according to the channel width. It is a convenience
+ * routine which performs no conversion, just formatting.
+ *
+ * @param[in] chan Channel descriptor
+ * @param[out] dst Ouput buffer
+ * @param[in] src Input buffer
+ * @param[in] cnt Count of transfer to copy
+ *
+ * @return the count of copy performed, otherwise a negative error
+ * code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong; chan,
+ *    dst and src pointers should be checked; check also the kernel
+ *    log ("dmesg"); WARNING: a4l_fill_desc() should be called before
+ *    using a4l_ultoraw()
+ *
+ */
+int a4l_ultoraw(a4l_chinfo_t * chan, void *dst, unsigned long *src, int cnt)
+{
+	int size, i = 0, j = 0;
+
+	/* Temporary data accessor */
+	void (*datax_set) (void *, lsampl_t);
+
+	/* Basic checking */
+	if (chan == NULL)
+		return -EINVAL;
+
+	/* Find out the size in memory */
+	size = a4l_sizeof_chan(chan);
+
+	/* Select the suitable accessor */
+	switch (size) {
+	case 4:
+		datax_set = data32_set;
+		break;
+	case 2:
+		datax_set = data16_set;
+		break;
+	case 1:
+		datax_set = data8_set;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	while (j < cnt) {
+
+		/* Perform the copy */
+		datax_set(dst + i, (lsampl_t)src[j]);
+
+		/* Update the counters */
+		i += size;
+		j++;
+	}
+
+	return j;
+}
+
+/**
+ * @brief Convert float-typed samples to raw data (for the driver)
+ *
+ * @param[in] chan Channel descriptor
+ * @param[in] rng Range descriptor
+ * @param[out] dst Ouput buffer
+ * @param[in] src Input buffer
+ * @param[in] cnt Count of conversion to perform
+ *
+ * @return the count of conversion performed, otherwise a negative
+ * error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong;
+ *    chan, rng and the pointers should be checked; check also the
+ *    kernel log ("dmesg"); WARNING: a4l_fill_desc() should be called
+ *    before using a4l_ftoraw()
+ *
+ */
+int a4l_ftoraw(a4l_chinfo_t * chan,
+	       a4l_rnginfo_t * rng, void *dst, float *src, int cnt)
+{
+	int size, i = 0, j = 0;
+
+	/* Temporary values used for conversion
+	   (dst = a * phys - b) */
+	float a, b;
+	/* Temporary data accessor */
+	void (*datax_set) (void *, lsampl_t);
+
+	/* Basic checking */
+	if (rng == NULL || chan == NULL)
+		return -EINVAL;
+
+	/* Find out the size in memory */
+	size = a4l_sizeof_chan(chan);
+
+	/* Select the suitable accessor */
+	switch (size) {
+	case 4:
+		datax_set = data32_set;
+		break;
+	case 2:
+		datax_set = data16_set;
+		break;
+	case 1:
+		datax_set = data8_set;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	/* Computes the translation factor and the constant only once */
+	a = (((float)A4L_RNG_FACTOR) / (rng->max - rng->min)) *
+		((1ULL << chan->nb_bits) - 1);
+	b = ((float)(rng->min) / (rng->max - rng->min)) *
+		((1ULL << chan->nb_bits) - 1);
+
+	while (j < cnt) {
+
+		/* Performs the conversion */
+		datax_set(dst + i, (lsampl_t) (a * src[j] - b));
+
+		/* Updates the counters */
+		i += size;
+		j++;
+	}
+
+	return j;
+}
+
+/**
+ * @brief Convert double-typed samples to raw data (for the driver)
+ *
+ * @param[in] chan Channel descriptor
+ * @param[in] rng Range descriptor
+ * @param[out] dst Ouput buffer
+ * @param[in] src Input buffer
+ * @param[in] cnt Count of conversion to perform
+ *
+ * @return the count of conversion performed, otherwise a negative
+ * error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong;
+ *    chan, rng and the pointers should be checked; check also the
+ *    kernel log ("dmesg"); WARNING: a4l_fill_desc() should be called
+ *    before using a4l_dtoraw()
+ *
+ */
+int a4l_dtoraw(a4l_chinfo_t * chan,
+	       a4l_rnginfo_t * rng, void *dst, double *src, int cnt)
+{
+	int size, i = 0, j = 0;
+
+	/* Temporary values used for conversion
+	   (dst = a * phys - b) */
+	double a, b;
+	/* Temporary data accessor */
+	void (*datax_set) (void *, lsampl_t);
+
+	/* Basic checking */
+	if (rng == NULL || chan == NULL)
+		return -EINVAL;
+
+	/* Find out the size in memory */
+	size = a4l_sizeof_chan(chan);
+
+	/* Select the suitable accessor */
+	switch (size) {
+	case 4:
+		datax_set = data32_set;
+		break;
+	case 2:
+		datax_set = data16_set;
+		break;
+	case 1:
+		datax_set = data8_set;
+		break;
+	default:
+		return -EINVAL;
+	};
+
+	/* Computes the translation factor and the constant only once */
+	a = (((double)A4L_RNG_FACTOR) / (rng->max - rng->min)) *
+		((1ULL << chan->nb_bits) - 1);
+	b = ((double)(rng->min) / (rng->max - rng->min)) *
+		((1ULL << chan->nb_bits) - 1);
+
+	while (j < cnt) {
+
+		/* Performs the conversion */
+		datax_set(dst + i, (lsampl_t) (a * src[j] - b));
+
+		/* Updates the counters */
+		i += size;
+		j++;
+	}
+
+	return j;
+}
+/** @} Range / conversion  API */
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/root_leaf.h b/kernel/xenomai-v3.2.4/lib/analogy/root_leaf.h
new file mode 100644
index 0000000..6c58001
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/root_leaf.h
@@ -0,0 +1,54 @@
+/**
+ * @file
+ * Analogy for Linux, root / leaf system
+ *
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef __ANALOGY_ROOT_LEAF_H__
+#define __ANALOGY_ROOT_LEAF_H__
+
+#ifndef DOXYGEN_CPP
+
+#include <errno.h>
+
+struct a4l_leaf {
+	unsigned int id;
+	unsigned int nb_leaf;
+	struct a4l_leaf *lfnxt;
+	struct a4l_leaf *lfchd;
+	void *data;
+};
+typedef struct a4l_leaf a4l_leaf_t;
+
+struct a4l_root {
+	/* Same fields as a4l_leaf_t */
+	unsigned int id;
+	unsigned int nb_leaf;
+	struct a4l_leaf *lfnxt;
+	struct a4l_leaf *lfchd;
+	void *data;
+	/* Root specific: buffer control stuff */
+	void *offset;
+	unsigned long gsize;
+};
+typedef struct a4l_root a4l_root_t;
+
+#endif /* !DOXYGEN_CPP */
+
+#endif /* __ANALOGY_ROOT_LEAF_H__ */
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/sync.c b/kernel/xenomai-v3.2.4/lib/analogy/sync.c
new file mode 100644
index 0000000..fee5fc9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/sync.c
@@ -0,0 +1,426 @@
+/**
+ * @file
+ * Analogy for Linux, instruction related features
+ *
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdarg.h>
+#include <errno.h>
+#include <rtdm/analogy.h>
+#include "internal.h"
+
+/*!
+ * @ingroup analogy_lib
+ * @defgroup analogy_lib_level1 Level 1 API
+ * @{
+ */
+
+/*!
+ * @ingroup analogy_lib_level1
+ * @defgroup analogy_lib_sync1 Synchronous acquisition API
+ * @{
+ */
+
+/**
+ * @brief Perform a list of synchronous acquisition misc operations
+ *
+ * The function a4l_snd_insnlist() is able to send many synchronous
+ * instructions on a various set of subdevices, channels, etc.
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] arg Instructions list structure
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -ENOMEM is returned if the system is out of memory
+ *
+ */
+int a4l_snd_insnlist(a4l_desc_t * dsc, a4l_insnlst_t * arg)
+{
+	/* Basic checking */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	return __sys_ioctl(dsc->fd, A4L_INSNLIST, arg);
+}
+
+/**
+ * @brief Perform a synchronous acquisition misc operation
+ *
+ * The function a4l_snd_insn() triggers a synchronous acquisition.
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] arg Instruction structure
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -ENOMEM is returned if the system is out of memory
+ *
+ */
+int a4l_snd_insn(a4l_desc_t * dsc, a4l_insn_t * arg)
+{
+	/* Basic checking */
+	if (dsc == NULL || dsc->fd < 0)
+		return -EINVAL;
+
+	return __sys_ioctl(dsc->fd, A4L_INSN, arg);
+}
+
+/** @} Synchronous acquisition API */
+
+/** @} Level 1 API */
+
+/*!
+ * @ingroup analogy_lib
+ * @defgroup analogy_lib_level2 Level 2 API
+ * @{
+ */
+
+/*!
+ * @ingroup analogy_lib_level2
+ * @defgroup analogy_lib_sync2 Synchronous acquisition API
+ * @{
+ */
+
+/**
+ * @brief Perform a synchronous acquisition write operation
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] chan_desc Channel descriptor (channel, range and
+ * reference)
+ * @param[in] ns_delay Optional delay (in nanoseconds) to wait between
+ * the setting of the input channel and sample(s) acquisition(s).
+ * @param[in] buf Output buffer
+ * @param[in] nbyte Number of bytes to write
+ *
+ * @return Number of bytes written, otherwise negative error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -ENOMEM is returned if the system is out of memory
+ *
+ */
+int a4l_sync_write(a4l_desc_t * dsc,
+		   unsigned int idx_subd,
+		   unsigned int chan_desc,
+		   unsigned int ns_delay, void *buf, size_t nbyte)
+{
+	int ret;
+	a4l_insn_t insn_tab[2] = {
+		{
+			.type = A4L_INSN_WRITE,
+			.idx_subd = idx_subd,
+			.chan_desc = chan_desc,
+			.data_size = 0,
+			.data = buf
+		}, {
+			.type = A4L_INSN_WAIT,
+			.idx_subd = idx_subd,
+			.chan_desc = chan_desc,
+			.data_size = 1,
+			.data = NULL
+		}
+	};
+
+	/* If some delay needs to be applied,
+	   the instruction list feature is needed */
+	if (ns_delay != 0) {
+		int ret;
+		lsampl_t _delay = (lsampl_t) ns_delay;
+		a4l_insnlst_t insnlst = {
+			.count = 2,
+			.insns = insn_tab
+		};
+
+		/* Sets the delay to wait */
+		insn_tab[1].data = &_delay;
+
+		/* Sends the two instructions (false read + wait)
+		   to the Analogy layer */
+		ret = a4l_snd_insnlist(dsc, &insnlst);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* The first instruction structure must be updated so as
+	   to write the proper data amount */
+	insn_tab[0].data_size = nbyte;
+
+	/* Sends the write instruction to the Analogy layer */
+	ret = a4l_snd_insn(dsc, insn_tab);
+
+	return (ret == 0) ? nbyte : ret;
+}
+
+/**
+ * @brief Perform a synchronous acquisition read operation
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] chan_desc Channel descriptor (channel, range and
+ * reference)
+ * @param[in] ns_delay Optional delay (in nanoseconds) to wait between
+ * the setting of the input channel and sample(s) acquisition(s).
+ * @param[in] buf Input buffer
+ * @param[in] nbyte Number of bytes to read
+ *
+ * @return Number of bytes read, otherwise negative error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -ENOMEM is returned if the system is out of memory
+ *
+ */
+int a4l_sync_read(a4l_desc_t * dsc,
+		  unsigned int idx_subd,
+		  unsigned int chan_desc,
+		  unsigned int ns_delay, void *buf, size_t nbyte)
+{
+	int ret;
+	a4l_insn_t insn_tab[2] = {
+		{
+			.type = A4L_INSN_READ,
+			.idx_subd = idx_subd,
+			.chan_desc = chan_desc,
+			.data_size = nbyte,
+			.data = buf},
+		{
+			.type = A4L_INSN_WAIT,
+			.idx_subd = idx_subd,
+			.chan_desc = chan_desc,
+			.data_size = sizeof(unsigned int),
+			.data = NULL}
+	};
+
+	/* If some delay needs to be applied,
+	   the instruction list feature is needed */
+	if (ns_delay != 0) {
+		int ret;
+		lsampl_t _delay = (lsampl_t) ns_delay;
+		a4l_insnlst_t insnlst = {
+			.count = 2,
+			.insns = insn_tab
+		};
+
+		/* Sets the delay to wait */
+		insn_tab[1].data = &_delay;
+
+		/* Sends the two instructions (false read + wait)
+		   to the Analogy layer */
+		ret = a4l_snd_insnlist(dsc, &insnlst);
+		if (ret < 0)
+			return ret;
+	}
+
+	/* The first instruction structure must be updated so as
+	   to retrieve the proper data amount */
+	insn_tab[0].data_size = nbyte;
+
+	/* Sends the read instruction to the Analogy layer */
+	ret = a4l_snd_insn(dsc, insn_tab);
+
+	return (ret == 0) ? nbyte : ret;
+}
+
+/**
+ * @brief Perform a synchronous acquisition digital acquisition
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] mask Write mask which indicates which bit(s) must be
+ * modified
+ * @param[in,out] buf Input / output buffer
+ *
+ * @return Number of bytes read, otherwise negative error code:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -ENOMEM is returned if the system is out of memory
+ * - -ENOSYS is returned if the driver does not provide any handler
+ *    "instruction bits"
+ *
+ */
+int a4l_sync_dio(a4l_desc_t *dsc,
+		 unsigned int idx_subd, void *mask, void *buf)
+{
+	unsigned char values[16];
+	a4l_insn_t insn = {
+		.type = A4L_INSN_BITS,
+		.idx_subd = idx_subd,
+		.data = values,
+	};
+
+	int ret, size;
+	a4l_sbinfo_t *subd;
+
+	/* Get the subdevice descriptor */
+	ret = a4l_get_subdinfo(dsc, idx_subd, &subd);
+	if (ret < 0)
+		return ret;
+
+	/* Get the size in memory of a DIO acquisition */
+	size = a4l_sizeof_subd(subd);
+
+	switch(size) {
+	case sizeof(uint32_t): {
+		uint32_t *tmp = (uint32_t *)values;
+		tmp[0] = *((uint32_t *)mask);
+		tmp[1] = *((uint32_t *)buf);
+		insn.data_size = 2 * sizeof(uint32_t);
+		break;
+	}
+	case sizeof(uint16_t): {
+		uint16_t *tmp = (uint16_t *)values;
+		tmp[0] = *((uint16_t *)mask);
+		tmp[1] = *((uint16_t *)buf);
+		insn.data_size = 2 * sizeof(uint16_t);
+		break;
+	}
+	case sizeof(uint8_t): {
+		uint8_t *tmp = (uint8_t *)values;
+		tmp[0] = *((uint8_t *)mask);
+		tmp[1] = *((uint8_t *)buf);
+		insn.data_size = 2 * sizeof(uint8_t);
+		break;
+	}
+	default:
+		return -EINVAL;
+	}
+
+	/* Send the insn_bits instruction */
+	ret = a4l_snd_insn(dsc, &insn);
+
+	/* Update the buffer if need be */
+	switch(size) {
+	case sizeof(uint32_t): {
+		uint32_t *tmp = (uint32_t *)buf;
+		*tmp = ((uint32_t *)values)[1];
+		break;
+	}
+	case sizeof(uint16_t): {
+		uint16_t *tmp = (uint16_t *)buf;
+		*tmp = ((uint16_t *)values)[1];
+		break;
+	}
+	case sizeof(uint8_t): {
+		uint8_t *tmp = (uint8_t *)buf;
+		*tmp = ((uint8_t *)values)[1];
+		break;
+	}
+	}
+
+	return ret;
+}
+
+/**
+ * @brief Configure a subdevice
+ *
+ * a4l_config_subd() takes a variable count of arguments. According to
+ * the configuration type, some additional argument is necessary:
+ * - A4L_INSN_CONFIG_DIO_INPUT: the channel index (unsigned int)
+ * - A4L_INSN_CONFIG_DIO_OUTPUT: the channel index (unsigned int)
+ * - A4L_INSN_CONFIG_DIO_QUERY: the returned DIO polarity (unsigned
+ *   int *)
+ *
+ * @param[in] dsc Device descriptor filled by a4l_open() (and
+ * optionally a4l_fill_desc())
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] type Configuration parameter
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -ENOSYS is returned if the configuration parameter is not
+ *    supported
+ *
+ */
+int a4l_config_subd(a4l_desc_t * dsc,
+		    unsigned int idx_subd, unsigned int type, ...)
+{
+	unsigned int values[4] = {type, 0, 0, 0};
+	a4l_insn_t insn = {
+		.type = A4L_INSN_CONFIG,
+		.idx_subd = idx_subd,
+		.data = values,
+	};
+	int ret = 0;
+	va_list args;
+
+	va_start(args, type);
+
+	/* So far, few config types are supported */
+	switch (type) {
+	case A4L_INSN_CONFIG_DIO_OUTPUT:
+	case A4L_INSN_CONFIG_DIO_INPUT:
+	case A4L_INSN_CONFIG_DIO_OPENDRAIN:
+	{
+		unsigned int idx_chan = va_arg(args, unsigned int);
+		insn.chan_desc = CHAN(idx_chan);
+		insn.data_size = sizeof(unsigned int);
+		break;
+	}
+	case A4L_INSN_CONFIG_DIO_QUERY:
+		insn.data_size = 2 * sizeof(unsigned int);
+		break;
+	default:
+		return -ENOSYS;
+	}
+
+	/* Send the config instruction */
+	ret = a4l_snd_insn(dsc, &insn);
+	if (ret < 0)
+		goto out;
+
+	/* Retrieve the result(s), if need be */
+	switch (type) {
+	case A4L_INSN_CONFIG_DIO_QUERY:
+	{
+		unsigned int *value = va_arg(args, unsigned int *);
+		*value = values[1];
+		break;
+	}
+	default:
+		break;
+	}
+
+out:
+	va_end(args);
+
+	return ret;
+}
+
+/** @} Synchronous acquisition API */
+
+/** @} Level 2 API */
diff --git a/kernel/xenomai-v3.2.4/lib/analogy/sys.c b/kernel/xenomai-v3.2.4/lib/analogy/sys.c
new file mode 100644
index 0000000..165c41d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/analogy/sys.c
@@ -0,0 +1,213 @@
+/**
+ * @file
+ * Analogy for Linux, descriptor related features
+ *
+ * @note Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * @note Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+/*!
+ * @ingroup analogy
+ * @defgroup analogy_lib Analogy user API
+ *
+ * This is the API interface of Analogy library
+ *
+ */
+
+/*!
+ * @ingroup analogy_lib
+ * @defgroup analogy_lib_syscall Level 0 API
+ *
+ * System call interface to core Analogy services
+ *
+ * This interface should not be used directly by applications.
+ */
+
+#include <rtdm/analogy.h>
+#include "internal.h"
+
+/*!
+ * @ingroup analogy_lib_syscall
+ * @defgroup analogy_lib_core Basic Syscall API
+ * @{
+ */
+
+/**
+ * @brief Open an Analogy device
+ *
+ * @param[in] fname Device name
+ *
+ * @return Positive file descriptor value on success, otherwise a negative
+ * error code.
+ *
+ */
+int a4l_sys_open(const char *fname)
+{
+	return __sys_open(fname);
+}
+
+/**
+ * @brief Close an Analogy device
+ *
+ * @param[in] fd File descriptor as returned by a4l_sys_open()
+ *
+ * @return 0 on success, otherwise a negative error code.
+ *
+ */
+int a4l_sys_close(int fd)
+{
+	return __sys_close(fd);
+}
+
+/**
+ * @brief Read from an Analogy device
+ *
+ * The function a4l_read() is only useful for acquisition
+ * configured through an Analogy command.
+ *
+ * @param[in] fd File descriptor as returned by a4l_sys_open()
+ * @param[out] buf Input buffer
+ * @param[in] nbyte Number of bytes to read
+ *
+ * @return Number of bytes read. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -ENOENT is returned if the device's reading subdevice is idle (no
+ *    command was sent)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -EINTR is returned if calling task has been unblocked by a signal
+ *
+ */
+int a4l_sys_read(int fd, void *buf, size_t nbyte)
+{
+	return __sys_read(fd, buf, nbyte);
+}
+
+/**
+ * @brief Write to an Analogy device
+ *
+ * The function a4l_write() is only useful for acquisition
+ * configured through an Analogy command.
+ *
+ * @param[in] fd File descriptor as returned by a4l_sys_open()
+ * @param[in] buf Output buffer
+ * @param[in] nbyte Number of bytes to write
+ *
+ * @return Number of bytes written. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -ENOENT is returned if the device's writing subdevice is idle (no
+ *    command was sent)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -EINTR is returned if calling task has been unblocked by a signal
+ *
+ */
+int a4l_sys_write(int fd, void *buf, size_t nbyte)
+{
+	return __sys_write(fd, buf, nbyte);
+}
+
+/** @} */
+
+/*!
+ * @ingroup analogy_lib_syscall
+ * @defgroup analogy_lib_attach Attach / detach Syscall API
+ * @{
+ */
+
+/**
+ * @brief Attach an Analogy device to a driver
+ *
+ * @param[in] fd File descriptor as returned by a4l_sys_open()
+ * @param[in] arg Link descriptor argument
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -ENOMEM is returned if the system is out of memory
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -ENODEV is returned in case of internal error (Please, type
+ *    "dmesg" for more info)
+ * - -ENXIO is returned in case of internal error (Please, type
+ *    "dmesg" for more info)
+ *
+ */
+int a4l_sys_attach(int fd, a4l_lnkdesc_t * arg)
+{
+	return __sys_ioctl(fd, A4L_DEVCFG, arg);
+}
+
+/**
+ * @brief Detach an Analogy device from a driver
+ *
+ * @param[in] fd File descriptor as returned by a4l_sys_open()
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EBUSY is returned if the device to be detached is in use
+ * - -EPERM is returned if the devive to be detached still has some
+ *    buffer mapped in user-space
+ * - -ENODEV is returned in case of internal error (Please, type
+ *    "dmesg" for more info)
+ * - -ENXIO is returned in case of internal error (Please, type
+ *    "dmesg" for more info)
+ *
+ */
+int a4l_sys_detach(int fd)
+{
+	return __sys_ioctl(fd, A4L_DEVCFG, NULL);
+}
+
+/**
+ * @brief Configure the buffer size
+ *
+ *
+ * This function can configure the buffer size of the file descriptor
+ * currently in use. If the subdevice index is set to
+ * A4L_BUF_DEFMAGIC, it can also define the default buffser size at
+ * open time.
+ *
+ * @param[in] fd File descriptor as returned by a4l_sys_open()
+ * @param[in] idx_subd Index of the concerned subdevice
+ * @param[in] size Buffer size to be set
+ *
+ * @return 0 on success. Otherwise:
+ *
+ * - -EINVAL is returned if some argument is missing or wrong (Please,
+ *    type "dmesg" for more info)
+ * - -EPERM is returned if the function is called in an RT context or
+ *    if the buffer to resize is mapped in user-space (Please, type
+ *    "dmesg" for more info)
+ * - -EFAULT is returned if a user <-> kernel transfer went wrong
+ * - -EBUSY is returned if the selected subdevice is already
+ *    processing an asynchronous operation
+ * - -ENOMEM is returned if the system is out of memory
+ *
+ */
+int a4l_sys_bufcfg(int fd, unsigned int idx_subd, unsigned long size)
+{
+	a4l_bufcfg_t cfg = { idx_subd, size };
+
+	return __sys_ioctl(fd, A4L_BUFCFG, &cfg);
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/COPYING b/kernel/xenomai-v3.2.4/lib/boilerplate/COPYING
new file mode 100644
index 0000000..3b20440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/COPYING
@@ -0,0 +1,458 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/Makefile.am b/kernel/xenomai-v3.2.4/lib/boilerplate/Makefile.am
new file mode 100644
index 0000000..d6fc0ab
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/Makefile.am
@@ -0,0 +1,121 @@
+
+noinst_LTLIBRARIES = libavl.la libversion.la libiniparser.la libboilerplate.la
+libboilerplate_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -lpthread
+libboilerplate_la_LIBADD = libavl.la libversion.la libiniparser.la
+
+libboilerplate_la_SOURCES =	\
+	ancillaries.c		\
+	heapmem.c		\
+	hash.c			\
+	setup.c			\
+	time.c
+
+if XENO_PRIVATE_OBSTACK
+libboilerplate_la_SOURCES += obstack.c
+endif
+
+if XENO_DEBUG
+libboilerplate_la_SOURCES += debug.c
+endif
+
+libversion_la_CFLAGS = @XENO_USER_CFLAGS@
+libversion_la_SOURCES = version.c
+version.c: git-stamp.h
+
+git-stamp.h: git-stamp
+	@if test -r $(top_srcdir)/.git; then						\
+	  stamp=`git --git-dir=$(top_srcdir)/.git rev-list --abbrev-commit -1 HEAD`;	\
+	  if test \! -s $@ || grep -wvq $$stamp $@; then				\
+		date=`git --git-dir=$(top_srcdir)/.git log -1 $$stamp --pretty=format:%ci`;	\
+		echo "#define GIT_STAMP \"#$$stamp ($$date)\"" > $@;			\
+	  fi;										\
+	elif test \! -r $@ -o -s $@; then						\
+	    rm -f $@ && touch $@;							\
+	fi; true
+
+sbin_PROGRAMS = version
+version_CFLAGS = @XENO_USER_CFLAGS@ -I$(top_srcdir)/include -D__PROGRAM__
+version_SOURCES = version.c
+
+clean-local:
+	$(RM) git-stamp.h
+
+nodist_libboilerplate_la_SOURCES = config-dump.h
+BUILT_SOURCES = config-dump.h
+CLEANFILES = config-dump.h
+
+config-dump.h: Makefile $(CONFIG_HEADER)
+	@( sed -e '/^#define CONFIG_/!d' \
+	-e 's/^#[\t ]*define \(CONFIG_[^ ]*\) \(.*\)/\1=\2/' \
+	-e 's,\",\\&,g' \
+	-e 's/^.*$$/\"&\",/' $(CONFIG_HEADER); echo '"---",'; \
+	sed -e '/#undef CONFIG_/!d' \
+	-e 's/.*\(CONFIG_[^ ]*\).*/\1/' \
+	-e 's/^.*$$/\"& is OFF\",/' $(CONFIG_HEADER); echo '"---",' ) > $@
+
+libboilerplate_la_CPPFLAGS =				\
+	@XENO_USER_CFLAGS@				\
+	-I$(top_srcdir)					\
+	-I$(top_srcdir)/include
+
+libavl_la_SOURCES = avl.c
+
+libavl_la_CPPFLAGS = 			\
+	@XENO_USER_CFLAGS@		\
+	-I$(top_srcdir)/include		\
+	-include boilerplate/avl.h
+
+if XENO_PSHARED
+
+noinst_LTLIBRARIES += libshavl.la
+libboilerplate_la_LIBADD += libshavl.la
+
+libshavl_la_SOURCES = avl.c
+
+libshavl_la_CPPFLAGS = 			\
+	@XENO_USER_CFLAGS@		\
+	-I$(top_srcdir)/include		\
+	-include boilerplate/shavl.h
+
+endif
+
+libiniparser_la_SOURCES	=	\
+	iniparser/dictionary.c	\
+	iniparser/dictionary.h	\
+	iniparser/iniparser.h	\
+	iniparser/iniparser.c
+
+libiniparser_la_CPPFLAGS	=		\
+	-Iiniparser @XENO_USER_CFLAGS@		\
+	-I$(top_srcdir)/include
+
+EXTRA_DIST = iniparser/README iniparser/LICENSE
+
+# Always build TLSF for benchmarking purpose via the
+# smokey testsuite.
+
+libboilerplate_la_LIBADD +=	libtlsf.la
+noinst_LTLIBRARIES += libtlsf.la
+
+libtlsf_la_SOURCES =	\
+	tlsf/tlsf.c	\
+	tlsf/tlsf.h	\
+	tlsf/target.h
+
+libtlsf_la_CPPFLAGS =					\
+	-Itlsf @XENO_USER_CFLAGS@			\
+	-I$(top_srcdir)/include				\
+	-DTLSF_USE_LOCKS=1 -DUSE_MMAP=1 -DTLSF_STATISTIC=1
+
+SUBDIRS = init
+
+EXTRA_DIST += tlsf/README
+
+SPARSE = sparse
+
+sparse:
+	@for i in $(libboilerplate_la_SOURCES) $(libtlsf_la_SOURCES) $(libiniparser_la_SOURCES; do \
+		$(SPARSE) $(CHECKFLAGS) $(srcdir)/$$i; \
+	done
+
+.PHONY: git-stamp
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/ancillaries.c b/kernel/xenomai-v3.2.4/lib/boilerplate/ancillaries.c
new file mode 100644
index 0000000..a0754cb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/ancillaries.c
@@ -0,0 +1,542 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <sys/types.h>
+#include <sys/syscall.h>
+#include <stdio.h>
+#include <time.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <pthread.h>
+#include <sched.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <string.h>
+#include <errno.h>
+#include <limits.h>
+#include <malloc.h>
+#include "boilerplate/atomic.h"
+#include "boilerplate/lock.h"
+#include "boilerplate/time.h"
+#include "boilerplate/scope.h"
+#include "boilerplate/setup.h"
+#include "boilerplate/debug.h"
+#include "boilerplate/ancillaries.h"
+#include "boilerplate/signal.h"
+#include "boilerplate/namegen.h"
+#include "xenomai/init.h"
+
+pthread_mutex_t __printlock;
+
+static struct timespec init_date;
+
+static int init_done;
+
+static void __do_printout(const char *name, const char *header,
+			  unsigned int ms, unsigned int us,
+			  const char *fmt, va_list ap)
+{
+	FILE *fp = stderr;
+
+	__RT(fprintf(fp, "%4u\"%.3u.%.3u| ", ms / 1000, ms % 1000, us));
+
+	if (header)
+		__RT(fputs(header, fp));
+
+	__RT(fprintf(fp, "[%s] ", name ?: "main"));
+	__RT(vfprintf(fp, fmt, ap));
+	__RT(fputc('\n', fp));
+}
+
+static void __do_early_printout(const char *name, const char *header,
+			  unsigned int ms, unsigned int us,
+			  const char *fmt, va_list ap)
+{
+	FILE *fp = stderr;
+
+	__STD(fprintf(fp, "%4u\"%.3u.%.3u| ", ms / 1000, ms % 1000, us));
+
+	if (header)
+		__STD(fputs(header, fp));
+
+	__STD(fprintf(fp, "[%s] ", name ?: "main"));
+	__STD(vfprintf(fp, fmt, ap));
+	__STD(fputc('\n', fp));
+	fflush(fp);
+}
+
+void __printout(const char *name, const char *header,
+		const char *fmt, va_list ap)
+{
+	struct timespec now, delta;
+	unsigned long long ns;
+	unsigned int ms, us;
+
+	/*
+	 * Catch early printouts, when the init sequence is not
+	 * completed yet. In such event, we don't care for serializing
+	 * output, since we must be running over the main thread
+	 * uncontended.
+	 */
+	if (!init_done) {
+		__do_early_printout(name, header, 0, 0, fmt, ap);
+		return;
+	}
+	
+	__RT(clock_gettime(CLOCK_MONOTONIC, &now));
+	timespec_sub(&delta, &now, &init_date);
+	ns = delta.tv_sec * 1000000000ULL;
+	ns += delta.tv_nsec;
+	ms = ns / 1000000ULL;
+	us = (ns % 1000000ULL) / 1000ULL;
+	SIGSAFE_LOCK_ENTRY(&__printlock);
+	__do_printout(name, header, ms, us, fmt, ap);
+	SIGSAFE_LOCK_EXIT(&__printlock);
+}
+
+void __warning(const char *name, const char *fmt, va_list ap)
+{
+	__printout(name, "WARNING: ", fmt, ap);
+}
+
+void __notice(const char *name, const char *fmt, va_list ap)
+{
+	__printout(name, NULL, fmt, ap);
+}
+
+void ___panic(const char *fn, const char *name,
+	     const char *fmt, va_list ap)
+{
+	char *p;
+
+	if (asprintf(&p, "BUG in %s(): ", fn) < 0)
+		p = "BUG: ";
+	__printout(name, p, fmt, ap);
+#ifdef CONFIG_XENO_COBALT
+	rt_print_flush_buffers();
+#endif /* CONFIG_XENO_COBALT */
+	exit(1);
+}
+
+__weak void error_hook(struct error_frame *ef) /* NULL in non-debug mode */
+{
+}
+
+#define __esym_def(e)	[e] = #e
+
+static const char *__esym_map[] = {
+	[0] = "OK",
+	__esym_def(EPERM),
+	__esym_def(ENOENT),
+	__esym_def(ESRCH),
+	__esym_def(EINTR),
+	__esym_def(EIO),
+	__esym_def(ENXIO),
+	__esym_def(E2BIG),
+	__esym_def(ENOEXEC),
+	__esym_def(EBADF),
+	__esym_def(ECHILD),
+	__esym_def(EAGAIN),
+	__esym_def(ENOMEM),
+	__esym_def(EACCES),
+	__esym_def(EFAULT),
+	__esym_def(ENOTBLK),
+	__esym_def(EBUSY),
+	__esym_def(EEXIST),
+	__esym_def(EXDEV),
+	__esym_def(ENODEV),
+	__esym_def(ENOTDIR),
+	__esym_def(EISDIR),
+	__esym_def(EINVAL),
+	__esym_def(ENFILE),
+	__esym_def(EMFILE),
+	__esym_def(ENOTTY),
+	__esym_def(ETXTBSY),
+	__esym_def(EFBIG),
+	__esym_def(ENOSPC),
+	__esym_def(ESPIPE),
+	__esym_def(EROFS),
+	__esym_def(EMLINK),
+	__esym_def(EPIPE),
+	__esym_def(EDOM),
+	__esym_def(ERANGE),
+	__esym_def(ENOSYS),
+	__esym_def(ETIMEDOUT),
+	__esym_def(ENOMSG),
+	__esym_def(EIDRM),
+	__esym_def(EADDRINUSE),
+	__esym_def(EPROTO),
+};
+
+#define __esym_max  (sizeof(__esym_map) / sizeof(__esym_map[0]))
+
+const char *symerror(int errnum)
+{
+	int v = -errnum;
+	size_t ebufsz;
+	char *ebuf;
+
+	if (v < 0 || v >= (int)__esym_max || __esym_map[v] == NULL) {
+		/* Catch missing codes in the error map. */
+		ebuf = __get_error_buf(&ebufsz);
+		snprintf(ebuf, ebufsz, "%d?", errnum);
+		return ebuf;
+	}
+
+	return __esym_map[v];
+}
+
+void __run_cleanup_block(struct cleanup_block *cb)
+{
+	__RT(pthread_mutex_unlock(cb->lock));
+	cb->handler(cb->arg);
+}
+
+void __early_panic(const char *fn, const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	___panic(fn, NULL, fmt, ap);
+	va_end(ap);
+}
+
+void __panic(const char *fn, const char *fmt, ...)
+__attribute__((alias("__early_panic"), weak));
+
+void early_warning(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	__warning(NULL, fmt, ap);
+	va_end(ap);
+}
+
+void warning(const char *fmt, ...)
+__attribute__((alias("early_warning"), weak));
+
+void early_notice(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	__notice(NULL, fmt, ap);
+	va_end(ap);
+}
+
+void notice(const char *fmt, ...)
+__attribute__((alias("early_notice"), weak));
+
+char *generate_name(char *buf, const char *radix,
+		    struct name_generator *ngen)
+{
+	int len = ngen->length - 1, tag;
+
+	if (radix && *radix) {
+		strncpy(buf, radix, len);
+		buf[len] = '\0';
+	} else {
+		tag = atomic_add_fetch(&ngen->serial, 1);
+#ifdef CONFIG_XENO_PSHARED
+		snprintf(buf, len, "%s@%d[%d]", ngen->radix, tag, __node_id);
+#else
+		snprintf(buf, len, "%s@%d", ngen->radix, tag);
+#endif
+	}
+
+	return buf;
+}
+
+#ifdef CONFIG_XENO_PSHARED
+
+/*
+ * Client libraries may override these symbols for implementing heap
+ * pointer validation in their own context (e.g. copperplate).
+ */
+
+__weak int pshared_check(void *heap, void *addr)
+{
+	return 1;
+}
+
+__weak void *__main_heap = NULL;
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+#ifdef CONFIG_XENO_DEBUG
+
+int __check_cancel_type(const char *locktype)
+{
+	int oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &oldtype);
+	if (oldtype == PTHREAD_CANCEL_DEFERRED)
+		return 0;
+
+	warning("%s() section is NOT cancel-safe", locktype);
+	abort();
+
+	return __bt(-EINVAL);
+}
+
+#endif /* CONFIG_XENO_DEBUG */
+
+int get_static_cpu_count(void)
+{
+	char buf[BUFSIZ];
+	int count = 0;
+	FILE *fp;
+
+	/*
+	 * We want the maximum # of CPU the running kernel was
+	 * configured for, not the current online/present/possible
+	 * count of CPU devices.
+	 */
+	fp = fopen("/sys/devices/system/cpu/kernel_max", "r");
+	if (fp == NULL)
+		return -1;
+
+	if (fgets(buf, sizeof(buf), fp))
+		count = atoi(buf);
+
+	fclose(fp);
+
+	return count;
+}
+
+static int __get_online_cpus_from_proc(cpu_set_t *cpuset)
+{
+	char buf[BUFSIZ];
+	FILE *fp;
+	int cpu;
+
+	/* If no hotplug support, fall back to reading /proc/stat. */
+
+	fp = fopen("/proc/stat", "r");
+	if (fp == NULL)
+		return -ENOENT;
+
+	while (fgets(buf, sizeof(buf), fp)) {
+		/*
+		 * Like the glibc, assume cpu* entries are at the
+		 * front of /proc/stat and will stay this way.
+		 */
+		if (strncmp(buf, "cpu", 3))
+			break;
+		if (!isdigit(buf[3]))
+			continue;
+		cpu = atoi(buf + 3);
+		if (cpu >= 0 && cpu < CPU_SETSIZE)
+			CPU_SET(cpu, cpuset);
+	}
+
+	fclose(fp);
+
+	return 0;
+}
+
+int get_online_cpu_set(cpu_set_t *cpuset)
+{
+	char buf[BUFSIZ], *range, *save_range, *save_bound,
+		*lo_bound, *hi_bound, *p;
+	int cpu_lo, cpu_hi;
+	FILE *fp;
+
+	CPU_ZERO(cpuset);
+
+	fp = fopen("/sys/devices/system/cpu/online", "r");
+	if (fp == NULL)
+		return __get_online_cpus_from_proc(cpuset);
+
+	if (fgets(buf, sizeof(buf), fp) == NULL) {
+		fclose(fp);
+		return -EBADF;
+	}
+
+	p = buf;
+	for (;;) {
+		range = strtok_r(p, " \t", &save_range);
+		if (range == NULL)
+			break;
+		lo_bound = strtok_r(range, "-", &save_bound);
+		if (lo_bound) {
+			cpu_lo = atoi(lo_bound);
+			hi_bound = strtok_r(NULL, "-", &save_bound);
+			if (hi_bound) {
+				cpu_hi = atoi(hi_bound);
+				do {
+					CPU_SET(cpu_lo, cpuset);
+				} while (cpu_lo++ < cpu_hi);
+			} else 
+				CPU_SET(cpu_lo, cpuset);
+		}
+		p = NULL;
+	}
+
+	fclose(fp);
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <sys/cobalt.h>
+
+int get_realtime_cpu_set(cpu_set_t *cpuset)
+{
+	unsigned long long cpumask;
+	char buf[BUFSIZ], *p;
+	FILE *fp;
+	int cpu;
+
+	fp = fopen("/sys/module/xenomai/parameters/supported_cpus", "r");
+	if (fp == NULL)
+		return -ENOENT;
+
+	p = fgets(buf, sizeof(buf), fp);
+	fclose(fp);
+	if (p == NULL)
+		return -EBADF;
+
+	errno = 0;
+	cpumask = strtoll(p, NULL, 10);
+	if (cpumask == LLONG_MAX && errno == ERANGE)
+		cpumask = ULLONG_MAX;
+	for (cpu = 0; cpumask != 0; cpu++, cpumask >>= 1) {
+		if (cpumask & 1ULL)
+			CPU_SET(cpu, cpuset);
+	}
+
+	return 0;
+}
+
+int get_current_cpu(void) /* No mode migration */
+{
+	struct cobalt_threadstat stat;
+	int ret;
+
+	ret = cobalt_thread_stat(0, &stat);
+	if (ret)
+		return ret;
+
+	return stat.cpu;
+}
+
+#else  /* CONFIG_XENO_MERCURY */
+
+int get_realtime_cpu_set(cpu_set_t *cpuset)
+{
+	return get_online_cpu_set(cpuset);
+}
+
+int get_current_cpu(void) 
+{
+	int cpu = sched_getcpu();
+
+	if (cpu < 0)
+		return -errno;
+
+	return cpu;
+}
+
+#endif  /* CONFIG_XENO_MERCURY */
+
+pid_t get_thread_pid(void)
+{
+	return syscall(__NR_gettid);
+}
+
+char *lookup_command(const char *cmd)
+{
+	const char *dirs[] = {
+		"/bin",
+		"/sbin",
+		"/usr/bin",
+		"/usr/sbin",
+	};
+	char *path;
+	int n, ret;
+
+	for (n = 0; n < sizeof(dirs) / sizeof(dirs[0]); n++) {
+		ret = asprintf(&path, "%s/%s", dirs[n], cmd);
+		if (ret < 0)
+			return NULL;
+		if (access(path, X_OK) == 0)
+			return path;
+		free(path);
+	}
+
+	return NULL;
+}
+
+size_t get_mem_size(const char *arg)
+{
+	size_t size;
+	char *p;
+
+	size = strtol(arg, &p, 0);
+	if (size == LONG_MIN || size == LONG_MAX)
+		return 0;
+
+	if (*p == '\0')
+		return size;
+
+	switch (tolower(*p)) {
+	case 'k':
+		size *= 1024;
+		break;
+	case 'm':
+		size *= (1024 * 1024);
+		break;
+	case 'g':
+		size *= (1024 * 1024 * 1024);
+		break;
+	default:
+		size = 0;
+	}
+
+	return size;
+}
+
+const char *config_strings[] = {
+#include "config-dump.h"
+	NULL,
+};
+
+void __boilerplate_init(void)
+{
+	__RT(clock_gettime(CLOCK_MONOTONIC, &init_date));
+	__RT(pthread_mutex_init(&__printlock, NULL));
+	debug_init();
+	init_done = 1;
+}
+
+static int boilerplate_init(void)
+{
+	pthread_atfork(NULL, NULL, __boilerplate_init);
+	__boilerplate_init();
+
+	return 0;
+}
+
+static struct setup_descriptor boilerplate_interface = {
+	.name = "boilerplate",
+	.init = boilerplate_init,
+};
+
+boilerplate_setup_call(boilerplate_interface);
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/avl.c b/kernel/xenomai-v3.2.4/lib/boilerplate/avl.c
new file mode 100644
index 0000000..3bf9bf1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/avl.c
@@ -0,0 +1,778 @@
+/*
+ * Copyright (c) 2015 Gilles Chanteperdrix
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <errno.h>
+#include <memory.h>
+
+#ifdef AVL_PSHARED
+#define __AVL(__decl)	shavl_ ## __decl
+#define __AVLH(__decl)	shavlh_ ## __decl
+#define __AVL_T(__type)	sh ## __type
+#else
+#define __AVL(__decl)	avl_ ## __decl
+#define __AVLH(__decl)	avlh_ ## __decl
+#define __AVL_T(__type)	__type
+#endif
+
+struct __AVL_T (avlh) * __AVL(inorder)(const struct __AVL_T(avl) * const avl,
+				       struct __AVL_T(avlh) * holder,
+				       const int dir)
+{
+	/* Assume dir == AVL_RIGHT in comments. */
+	struct __AVL_T (avlh) * next;
+
+	/*
+	 * If the current node is not right threaded, then go down left,
+	 * starting from its right child.
+	 */
+	if (__AVLH(has_child)(avl, holder, dir)) {
+		const int opp_dir = avl_opposite(dir);
+		holder = __AVLH(link)(avl, holder, dir);
+		while ((next = __AVLH(child)(avl, holder, opp_dir)))
+			holder = next;
+		next = holder;
+	} else {
+		for (;;) {
+			next = __AVLH(up)(avl, holder);
+			if (next == __AVL(anchor)(avl))
+				return NULL;
+			if (holder->type != dir)
+				break;
+			holder = next;
+		}
+	}
+
+	return next;
+}
+
+struct __AVL_T (avlh) * __AVL(postorder)(const struct __AVL_T(avl) * const avl,
+					 struct __AVL_T(avlh) * const holder,
+					 const int dir)
+{
+	/* Assume dir == AVL_RIGHT in comments. */
+	struct __AVL_T (avlh) * next = __AVLH(up)(avl, holder);
+
+	if (holder->type != dir)
+		/*
+		 * If the current node is not a right node, follow the nodes in
+		 * inorder until we find a right threaded node.
+		 */
+		while (__AVLH(has_child)(avl, next, dir))
+			next = __AVL(inorder)(avl, next, dir);
+	else
+		/*
+		 * else the current node is a right node, its parent is the
+		 * next in postorder.
+		 */
+		if (next == __AVL(anchor)(avl))
+			next = NULL;
+
+	return next;
+}
+
+struct __AVL_T (avlh) * __AVL(preorder)(const struct __AVL_T(avl) * const avl,
+					struct __AVL_T(avlh) * holder,
+					const int dir)
+{
+	struct __AVL_T (avlh) * next;
+	/* Assume dir == AVL_RIGHT in comments. */
+	/*
+	 * If the current node has a left child (hence is not left threaded),
+	 * then return it.
+	 */
+
+	if (__AVLH(has_child)(avl, holder, avl_opposite(dir)))
+		return __AVLH(link)(avl, holder, avl_opposite(dir));
+
+	/*
+	 * Else follow the right threads until we find a node which is not right
+	 * threaded (hence has a right child) and return its right child.
+	 */
+	next = holder;
+
+	while (!__AVLH(has_child)(avl, next, dir)) {
+		next = __AVL(inorder)(avl, next, dir);
+		if (next == NULL)
+			return NULL;
+	}
+
+	return __AVLH(link)(avl, next, dir);
+}
+
+static inline unsigned int avlh_thr(const struct __AVL_T (avl) * const avl,
+				    const struct __AVL_T (avlh) * h)
+{
+	unsigned int result = 0;
+
+	if (__AVLH(link)(avl, h, AVL_LEFT) == NULL)
+		result |= AVL_THR_LEFT;
+	if (__AVLH(link)(avl, h, AVL_RIGHT) == NULL)
+		result |= AVL_THR_RIGHT;
+
+	return result;
+}
+
+static inline void
+avlh_set_parent_link(struct __AVL_T (avl) * const avl,
+		     struct __AVL_T (avlh) * lhs, struct __AVL_T (avlh) * rhs)
+{
+	__AVLH(set_link)(avl, __AVLH(up)(avl, lhs), lhs->type, rhs);
+}
+
+static inline void
+avlh_set_left(struct __AVL_T (avl) * const avl, struct __AVL_T (avlh) * lhs,
+	      struct __AVL_T (avlh) * rhs)
+{
+	__AVLH(set_link)(avl, lhs, AVL_LEFT, rhs);
+}
+
+static inline void
+avlh_set_up(struct __AVL_T (avl) * const avl, struct __AVL_T (avlh) * lhs,
+	    struct __AVL_T (avlh) * rhs)
+{
+	__AVLH(set_link)(avl, lhs, AVL_UP, rhs);
+}
+
+static inline void
+avlh_set_right(struct __AVL_T (avl) * const avl, struct __AVL_T (avlh) * lhs,
+	       struct __AVL_T (avlh) * rhs)
+{
+	__AVLH(set_link)(avl, lhs, AVL_RIGHT, rhs);
+}
+
+static inline void avl_set_top(struct __AVL_T (avl) * const avl,
+			       struct __AVL_T (avlh) * holder)
+{
+	__AVLH(set_link)(avl, __AVL(anchor)(avl), AVL_RIGHT, holder);
+}
+
+static inline void avl_set_head(struct __AVL_T (avl) * const avl,
+				struct __AVL_T (avlh) * holder)
+{
+	__AVL(set_end)(avl, AVL_LEFT, holder);
+}
+
+static inline void avl_set_tail(struct __AVL_T (avl) * const avl,
+				struct __AVL_T (avlh) * holder)
+{
+	__AVL(set_end)(avl, AVL_RIGHT, holder);
+}
+
+/* Internal functions used for rebalancing (for insertion and deletion). */
+static inline struct __AVL_T (avlh) *
+	avlh_rotate(struct __AVL_T (avl) * const avl,
+		    struct __AVL_T (avlh) * const holder, const int dir)
+{
+	const int opp_dir = avl_opposite(dir);
+	struct __AVL_T (avlh) * const nexttop =
+		__AVLH(link)(avl, holder, opp_dir);
+	struct __AVL_T (avlh) * const subtree =
+		__AVLH(child)(avl, nexttop, dir);
+
+	if (subtree) {
+		__AVLH(set_link)(avl, holder, opp_dir, subtree);
+		avlh_set_up(avl, subtree, holder);
+		subtree->type = opp_dir;
+	} else
+		__AVLH(set_link)(avl, holder, opp_dir, NULL);
+
+	__AVLH(set_link)(avl, nexttop, dir, holder);
+	avlh_set_up(avl, nexttop, __AVLH(up)(avl, holder));
+	nexttop->type = holder->type;
+	avlh_set_up(avl, holder, nexttop);
+	holder->type = dir;
+
+	avlh_set_parent_link(avl, nexttop, nexttop);
+
+	return nexttop;
+}
+
+static inline struct __AVL_T (avlh) *
+	avlh_dbl_rotate(struct __AVL_T (avl) * const avl,
+			struct __AVL_T (avlh) * const holder, const int dir)
+{
+	const int opp = avl_opposite(dir);
+
+	avlh_rotate(avl, __AVLH(link)(avl, holder, opp), opp);
+	return avlh_rotate(avl, holder, dir);
+}
+
+static struct __AVL_T (avlh) *
+	avlh_rebalance(struct __AVL_T (avl) * const avl, struct __AVL_T (avlh) * holder,
+		       const int delta)
+{
+
+	int dir = delta;
+	struct __AVL_T (avlh) * const heavy_side =
+		__AVLH(link)(avl, holder, dir);
+
+	if (heavy_side->balance == -delta) {
+		/* heavy_side->balance == -delta, double rotation needed. */
+		holder = avlh_dbl_rotate(avl, holder, avl_opposite(dir));
+
+		/*
+		 * recompute balances, there are three nodes involved, two of
+		 * which balances become null.
+		 */
+		dir = holder->balance ? : AVL_RIGHT;
+		__AVLH(link)(avl, holder, dir)->balance = 0;
+		__AVLH(link)(avl, holder, avl_opposite(dir))->balance
+			= -holder->balance;
+		holder->balance = 0;
+	} else {
+		/*
+		 * heavy_side->balance == delta or 0, simple rotation needed.
+		 * the case 0 occurs only when deleting, never when inserting.
+		 */
+
+		/* heavy_side becomes the new root. */
+		avlh_rotate(avl, holder, avl_opposite(dir));
+
+		/* recompute balances. */
+		holder->balance -= heavy_side->balance;
+		heavy_side->balance -= delta;
+
+		holder = heavy_side;
+	}
+	return holder;
+}
+
+/*
+ * The avlh_rebalance functions was split in two parts to allow inlining in
+ * the simplest case.
+ */
+static inline struct __AVL_T (avlh) *
+	avlh_balance_add(struct __AVL_T (avl) * const avl,
+			 struct __AVL_T (avlh) * const holder, const int delta)
+{
+	if (holder->balance == delta)
+		/* we need to rebalance the current subtree. */
+		return avlh_rebalance(avl, holder, delta);
+
+	/* the current subtree does not need rebalancing */
+	holder->balance += delta;
+	return holder;
+}
+
+static inline void
+avlh_link_child(struct __AVL_T (avl) * const avl,
+		struct __AVL_T (avlh) * const oldh,
+		struct __AVL_T (avlh) * const newh, const int side)
+{
+	struct __AVL_T (avlh) * const child = __AVLH(link)(avl, oldh, side);
+
+	__AVLH(set_link)(avl, newh, side, child);
+	if (__AVLH(has_child)(avl, oldh, side))
+		avlh_set_up(avl, child, newh);
+}
+
+static inline void
+avlh_replace(struct __AVL_T (avl) * const avl,
+	     struct __AVL_T (avlh) * const oldh,
+	     struct __AVL_T (avlh) * const newh)
+{
+	newh->type = oldh->type;
+	/* Do not update the balance, this has to be done by the caller. */
+
+	avlh_set_up(avl, newh, __AVLH(up)(avl, oldh));
+	avlh_set_parent_link(avl, oldh, newh);
+
+	avlh_link_child(avl, oldh, newh, AVL_LEFT);
+	avlh_link_child(avl, oldh, newh, AVL_RIGHT);
+}
+
+/* Deletion helpers. */
+static void avl_delete_leaf(struct __AVL_T (avl) * const avl,
+			    struct __AVL_T (avlh) * const node)
+{
+	/*
+	 * Node has no child at all. It disappears and its father becomes
+	 * threaded on the side id was.
+	 */
+
+	struct __AVL_T (avlh) * const new_node = __AVLH(up)(avl, node);
+	const int dir = node->type;
+
+	/* Suppress node. */
+	__AVLH(set_link)(avl, new_node, dir, __AVLH(link)(avl, node, dir));
+
+	if (node == __AVL(end)(avl, dir))
+		__AVL(set_end)(avl, dir, new_node);
+}
+
+static struct __AVL_T (avlh) * avl_delete_1child(struct __AVL_T (avl) *
+						 const avl,
+						 struct __AVL_T (avlh) *
+						 const node, const int dir)
+{
+	/*
+	 * Node is threaded on one side and has a child on the other
+	 * side. In this case, node is replaced by its child.
+	 */
+
+	struct __AVL_T (avlh) * const new_node = __AVLH(link)(avl, node, dir);
+
+	/*
+	 * Change links as if new_node was suppressed before calling
+	 * avlh_replace.
+	 */
+	__AVLH(set_link)(avl, node, dir, __AVLH(link)(avl, new_node, dir));
+	avlh_replace(avl, node, new_node);
+
+	if (node == __AVL(end)(avl, avl_opposite(dir)))
+		__AVL(set_end)(avl, avl_opposite(dir), new_node);
+	/* new_node->balance 0, which is correct. */
+	return new_node;
+}
+
+static int avl_delete_2children(struct __AVL_T (avl) * const avl,
+				struct __AVL_T (avlh) * const node);
+
+/* Insertion helpers. */
+static inline void
+avlh_attach(struct __AVL_T (avl) * const avl,
+	    struct __AVL_T (avlh) * const parent,
+	    struct __AVL_T (avlh) * const child, const int side)
+{
+	avlh_set_left(avl, child, NULL);
+	avlh_set_right(avl, child, NULL);
+	avlh_set_up(avl, child, parent);
+	__AVLH(set_link)(avl, parent, side, child);
+	child->type = side;
+}
+
+/*
+ * Insert a node, given its parent and the side where it should be inserted.
+ * Helper for all insertion functions.
+ */
+static inline void avl_insert_inner(struct __AVL_T (avl) * const avl,
+				    struct __AVL_T (avlh) * parent,
+				    struct __AVL_T (avlh) * const node,
+				    const int side)
+{
+	avlh_attach(avl, parent ? : __AVL(anchor)(avl), node, side);
+	++__AVL(count)(avl);
+
+	if (parent == NULL)
+		goto insert_first_and_ret;	/* Get away from fast path */
+
+	if (parent == __AVL(end)(avl, side))
+		__AVL(set_end)(avl, side, node);
+
+	parent->balance += side;
+
+	while (parent->balance) {
+		const int delta = parent->type;
+		parent = __AVLH(up)(avl, parent);
+		if (parent == __AVL(anchor)(avl))
+			goto inc_height_and_ret;	/* Get away from fast path */
+		parent = avlh_balance_add(avl, parent, delta);
+	}
+
+	return;
+
+insert_first_and_ret:
+	avl_set_head(avl, node);
+	avl_set_tail(avl, node);
+inc_height_and_ret:
+	++__AVL(height)(avl);
+}
+
+/* External functions. */
+int __AVL(delete)(struct __AVL_T(avl) * const avl, struct __AVL_T(avlh) * node)
+{
+	if (!--__AVL(count)(avl)) {
+		goto delete_last_and_ret;
+	}
+
+	switch (avlh_thr(avl, node)) {
+	case (AVL_THR_LEFT | AVL_THR_RIGHT):	/* thr is 5 */
+		avl_delete_leaf(avl, node);
+		break;
+
+	case AVL_THR_LEFT:	/* only AVL_LEFT bit is on, thr is 1. */
+		node = avl_delete_1child(avl, node, AVL_RIGHT);
+		break;
+
+	case AVL_THR_RIGHT:	/* only AVL_RIGHT bit is on, thr is 4. */
+		node = avl_delete_1child(avl, node, AVL_LEFT);
+		break;
+
+	case 0:
+		return avl_delete_2children(avl, node);
+	}
+
+	/* node is the first node which needs to be rebalanced.
+	   The tree is rebalanced, and contrarily to what happened for insertion,
+	   the rebalancing stops when a node which is NOT balanced is met. */
+	while (!node->balance) {
+		const int delta = -node->type;
+		node = __AVLH(up)(avl, node);
+		if (node == __AVL(anchor)(avl))
+			goto dec_height_and_ret;
+		node = avlh_balance_add(avl, node, delta);
+	}
+
+	return 0;
+
+delete_last_and_ret:
+	avl_set_top(avl, NULL);
+	avl_set_head(avl, NULL);
+	avl_set_tail(avl, NULL);
+dec_height_and_ret:
+	--__AVL(height)(avl);
+	return 0;
+}
+
+static int avl_delete_2children(struct __AVL_T (avl) * const avl,
+				struct __AVL_T (avlh) * const node)
+{
+	const int dir = node->balance ? node->balance : 1;
+	struct __AVL_T (avlh) * const new_node =
+		__AVL(inorder)(avl, node, dir);
+	__AVL(delete)(avl, new_node);
+	++__AVL(count)(avl);
+	avlh_replace(avl, node, new_node);
+	new_node->balance = node->balance;
+	if (__AVL(end)(avl, dir) == node)
+		__AVL(set_end)(avl, dir, new_node);
+	return 0;
+}
+
+int __AVL(prepend)(struct __AVL_T(avl) * const avl,
+		   struct __AVL_T(avlh) * const holder,
+		   const struct __AVL_T(avl_searchops) * ops)
+{
+	struct __AVL_T (avlh) * const parent = __AVL(head)(avl);
+	int type = parent == NULL ? AVL_RIGHT : AVL_LEFT;
+
+	if (parent == NULL || ops->cmp(holder, parent) < 0) {
+		avl_insert_inner(avl, parent, holder, type);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+int __AVL(insert_at)(struct __AVL_T(avl) * const avl,
+		     struct __AVL_T(avlh) * parent, int dir,
+		     struct __AVL_T(avlh) * child)
+{
+	if (parent == NULL)
+		dir = AVL_RIGHT;
+	else {
+		if (!__AVLH(thr_tst)(avl, parent, dir))
+			return -EINVAL;
+	}
+
+	avl_insert_inner(avl, parent, child, dir);
+	return 0;
+}
+
+int __AVL(insert)(struct __AVL_T(avl) * const avl,
+		  struct __AVL_T(avlh) * const holder,
+		  const struct __AVL_T(avl_searchops) * ops)
+{
+	int delta;
+	struct __AVL_T (avlh) * parent;
+
+	parent = __AVL(search_inner)(avl, holder, &delta, ops);
+	if (delta == 0)
+		return -EBUSY;
+
+	avl_insert_inner(avl, parent, holder, delta);
+
+	return 0;
+}
+
+int __AVL(insert_front)(struct __AVL_T(avl) * const avl,
+			struct __AVL_T(avlh) * const holder,
+			const struct __AVL_T(avl_searchops) * ops)
+{
+	int delta;
+	struct __AVL_T (avlh) * parent;
+
+	parent = ops->search(avl, holder, &delta, AVL_LEFT);
+
+	avl_insert_inner(avl, parent, holder, delta ? : AVL_LEFT);
+	return 0;
+}
+
+int __AVL(insert_back)(struct __AVL_T(avl) * const avl,
+		       struct __AVL_T(avlh) * const holder,
+		       const struct __AVL_T(avl_searchops) * ops)
+{
+	int delta;
+	struct __AVL_T (avlh) * parent;
+
+	parent = ops->search(avl, holder, &delta, AVL_RIGHT);
+
+	avl_insert_inner(avl, parent, holder, delta ? : AVL_RIGHT);
+	return 0;
+}
+
+int __AVL(append)(struct __AVL_T(avl) * const avl,
+		  struct __AVL_T(avlh) * const holder,
+		  const struct __AVL_T(avl_searchops) * ops)
+{
+	struct __AVL_T (avlh) * const parent = __AVL(tail)(avl);
+
+	if (parent == NULL || ops->cmp(holder, parent) > 0) {
+		avl_insert_inner(avl, parent, holder, AVL_RIGHT);
+		return 0;
+	}
+
+	return -EINVAL;
+}
+
+/*
+ * Special case, when we know that replacing a node with another will not change
+ * the avl, much faster than remove + add
+ */
+int __AVL(replace)(struct __AVL_T(avl) * avl, struct __AVL_T(avlh) * oldh,
+		   struct __AVL_T(avlh) * newh,
+		   const struct __AVL_T(avl_searchops) * ops)
+{
+	struct __AVL_T (avlh) * prev, *next;
+
+	prev = __AVL(prev)(avl, oldh);
+	next = __AVL(next)(avl, oldh);
+
+	if ((prev && ops->cmp(newh, prev) < 0)
+	    || (next && ops->cmp(newh, next) > 0))
+		return -EINVAL;
+
+	avlh_replace(avl, oldh, newh);
+	if (oldh == __AVL(head)(avl))
+		avl_set_head(avl, newh);
+	if (oldh == __AVL(tail)(avl))
+		avl_set_tail(avl, newh);
+	newh->balance = oldh->balance;
+	return 0;
+}
+
+struct __AVL_T (avlh) * __AVL(update)(struct __AVL_T(avl) * const avl,
+				      struct __AVL_T(avlh) * const holder,
+				      const struct __AVL_T(avl_searchops) * ops)
+{
+	int delta;
+	struct __AVL_T (avlh) * const oldh =
+		__AVL(search_inner)(avl, holder, &delta, ops);
+
+	if (!delta) {
+		__AVL(replace)(avl, oldh, holder, ops);
+		return oldh;
+	}
+
+	return NULL;
+}
+
+struct __AVL_T (avlh) * __AVL(set)(struct __AVL_T(avl) * const avl,
+				   struct __AVL_T(avlh) * const holder,
+				   const struct __AVL_T(avl_searchops) * ops)
+{
+	int delta;
+	struct __AVL_T (avlh) * const oldh =
+		__AVL(search_inner)(avl, holder, &delta, ops);
+
+	if (delta) {
+		avl_insert_inner(avl, oldh, holder, delta);
+		return NULL;
+	}
+
+	__AVL(replace)(avl, oldh, holder, ops);
+	return oldh;
+}
+
+void __AVL(init)(struct __AVL_T(avl) * const avl)
+{
+	__AVLH(init)(__AVL(anchor)(avl));	/* this must be first. */
+	__AVL(height)(avl) = 0;
+	__AVL(count)(avl) = 0;
+	avl_set_top(avl, NULL);
+
+	avl_set_head(avl, NULL);
+	avl_set_tail(avl, NULL);
+}
+
+void __AVL(destroy)(struct __AVL_T(avl) * const avl)
+{
+	__AVL(init)(avl);
+}
+
+void __AVL(clear)(struct __AVL_T(avl) * const avl,
+		  void (*destruct)(struct __AVL_T(avlh) *))
+{
+	if (destruct) {
+		struct __AVL_T (avlh) * next, *holder = __AVL(gethead)(avl);
+
+		while (holder) {
+			next = __AVL(postorder_next)(avl, holder);
+			destruct(holder);
+			holder = next;
+		}
+	}
+
+	__AVL(init)(avl);
+}
+
+static inline
+void avl_dumper_visit(FILE * file, const struct __AVL_T (avl) * const avl,
+		      struct __AVL_T (avlh) * node,
+		      __AVL_T(avlh_prn_t) * prn, const char *blank,
+		      unsigned int blank_sz, char *buf,
+		      unsigned int indent, unsigned int len)
+{
+	char bal;
+
+	if (__AVLH(has_child)(avl, node, AVL_RIGHT)) {
+		if (blank_sz >= (unsigned int)(buf - blank)) {
+			snprintf(buf, len + 3, "%*s\n", (int)len + 1, "bug!");
+			fputs(buf - blank_sz, file);
+		} else
+			avl_dumper_visit(file, avl,
+					 __AVLH(right)(avl, node),
+					 prn, blank,
+					 blank_sz + indent, buf, indent, len);
+	}
+
+	switch (node->balance) {
+	case 0:
+		bal = '.';
+		break;
+	case -1:
+		bal = '-';
+		break;
+	case 1:
+		bal = '+';
+		break;
+	default:
+		bal = '?';	/* Bug. */
+	}
+
+	(*prn)(buf, len + 1, node);
+	buf[len] = bal;
+	buf[len + 1] = '\n';
+	buf[len + 2] = '\0';
+
+	fputs(buf - blank_sz, file);
+
+	if (__AVLH(has_child)(avl, node, AVL_LEFT)) {
+		if (blank_sz >= (unsigned int)(buf - blank)) {
+			snprintf(buf, len + 3, "%*s\n", (int)len + 1, "bug!");
+			fputs(buf - blank_sz, file);
+		} else
+			avl_dumper_visit(file, avl,
+					 __AVLH(left)(avl, node),
+					 prn, blank,
+					 blank_sz + indent, buf, indent, len);
+	}
+}
+
+void __AVL(dump)(FILE * file, const struct __AVL_T(avl) * const avl,
+		 __AVL_T(avlh_prn_t) * prn, unsigned int indent,
+		 unsigned int len)
+{
+
+	struct __AVL_T (avlh) * holder = __AVL(gettop)(avl);
+
+	putc('\n', file);
+	if (!holder)
+		fputs("Empty.\n", file);
+	else {
+		size_t blank_sz = (__AVL(height)(avl) - 1) * indent;
+		char buffer[blank_sz + len + 3];
+		/* 3 == balance char + sizeof("\n\0") */
+		memset(buffer, ' ', blank_sz);
+
+		avl_dumper_visit(file, avl, holder, prn, buffer, 0,
+				 buffer + blank_sz, indent, len);
+	}
+	fflush(file);
+}
+
+static int avl_check_visit(const struct __AVL_T (avl) * avl,
+			   struct __AVL_T (avlh) * node, unsigned int level)
+{
+	int err;
+
+	if (!__AVLH(has_child)(avl, node, AVL_RIGHT))
+		goto check_balance;
+
+	if (level > __AVL(height)(avl)) {
+		fprintf(stderr, "too much recursion\n");
+		return -EINVAL;
+	}
+
+	err = avl_check_visit(avl, __AVLH(right)(avl, node), level + 1);
+	if (err < 0)
+		return err;
+
+check_balance:
+	switch (node->balance) {
+	case 0:
+		break;
+	case -1:
+		break;
+	case 1:
+		break;
+	default:
+		fprintf(stderr, "invalid balance\n");
+		return -EINVAL;
+	}
+
+	if (!__AVLH(has_child)(avl, node, AVL_LEFT))
+		return 0;
+
+	err = avl_check_visit(avl, __AVLH(left)(avl, node), level + 1);
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+int __AVL(check)(const struct __AVL_T(avl) * avl,
+		 const struct __AVL_T(avl_searchops) * ops)
+{
+	struct __AVL_T (avlh) * holder = __AVL(gettop)(avl), *last;
+	int err;
+
+	if (!holder)
+		return 0;
+
+	err = avl_check_visit(avl, holder, 0);
+	if (err < 0)
+		return err;
+
+	last = NULL;
+	for (holder = __AVL(gethead)(avl); holder;
+	     holder = __AVL(next)(avl, holder)) {
+		if (last != NULL)
+			if (ops->cmp(holder, last) < 0) {
+				fprintf(stderr, "disordered nodes\n");
+				return -EINVAL;
+			}
+		last = holder;
+	}
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/debug.c b/kernel/xenomai-v3.2.4/lib/boilerplate/debug.c
new file mode 100644
index 0000000..42ea8e8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/debug.c
@@ -0,0 +1,185 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <assert.h>
+#include <pthread.h>
+#include <malloc.h>
+#include <errno.h>
+#include <signal.h>
+#include "boilerplate/ancillaries.h"
+#include "boilerplate/wrappers.h"
+#include "boilerplate/lock.h"
+#include "boilerplate/signal.h"
+#include "boilerplate/debug.h"
+
+static pthread_key_t btkey;
+
+static struct backtrace_data main_btd = {
+	.name = "main",
+};
+
+void __debug(const char *name, const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	__printout(name, NULL, fmt, ap);
+	va_end(ap);
+}
+
+void backtrace_log(int retval, const char *fn,
+		   const char *file, int lineno)
+{
+	struct backtrace_data *btd;
+	struct error_frame *ef;
+	int state;
+
+	btd = pthread_getspecific(btkey);
+	if (btd == NULL)
+		btd = &main_btd;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &state);
+
+	ef = malloc(sizeof(*ef));
+	if (ef == NULL)
+		goto out;
+
+	ef->retval = retval;
+	ef->lineno = lineno;
+	ef->fn = fn;
+	ef->file = file;
+
+	write_lock(&btd->lock);
+
+	if (btd->inner == NULL)
+		/* Fire the hook for the inner trace. */
+		error_hook(ef);
+
+	ef->next = btd->inner;
+	btd->inner = ef;
+
+	write_unlock(&btd->lock);
+out:
+	pthread_setcanceltype(state, NULL);
+}
+
+static void flush_backtrace(struct backtrace_data *btd)
+{
+	struct error_frame *ef, *nef;
+	int state;
+
+	/* Locking order must be __printlock, then btlock. */
+
+	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, &state);
+
+	write_lock(&btd->lock);
+
+	for (ef = btd->inner; ef; ef = nef) {
+		nef = ef->next;
+		free(ef);
+	}
+
+	btd->inner = NULL;
+	write_unlock(&btd->lock);
+
+	pthread_setcanceltype(state, NULL);
+}
+
+void backtrace_init_context(struct backtrace_data *btd,
+			    const char *name)
+{
+	__RT(pthread_mutex_init(&btd->lock, NULL));
+	btd->inner = NULL;
+	btd->name = name ?: "<anonymous>";
+	pthread_setspecific(btkey, btd);
+}
+
+void backtrace_destroy_context(struct backtrace_data *btd)
+{
+	flush_backtrace(btd);
+	__RT(pthread_mutex_destroy(&btd->lock));
+}
+
+static const char *dashes = "------------------------------------------------------------------------------";
+
+void backtrace_dump(struct backtrace_data *btd)
+{
+	struct error_frame *ef;
+	FILE *tracefp = stderr;
+	int n = 0;
+
+	if (btd == NULL)
+		btd = &main_btd;
+
+	SIGSAFE_LOCK_ENTRY(&__printlock);
+
+	if (btd->inner == NULL)
+		goto no_error;
+
+	fprintf(tracefp,
+		"%s\n[ ERROR BACKTRACE: thread %s ]\n\n",
+		dashes, btd->name);
+
+	for (ef = btd->inner; ef; ef = ef->next, n++)
+		fprintf(tracefp, "%s #%-2d %s in %s(), %s:%d\n",
+			ef->next ? "  " : "=>",
+			n, symerror(ef->retval),
+			ef->fn, ef->file, ef->lineno);
+
+	fputs(dashes, tracefp);
+	fputc('\n', tracefp);
+	fflush(tracefp);
+	flush_backtrace(btd);
+
+no_error:
+	SIGSAFE_LOCK_EXIT(&__printlock);
+}
+
+void backtrace_check(void)
+{
+	struct backtrace_data *btd;
+
+	btd = pthread_getspecific(btkey);
+	if (btd == NULL)
+		btd = &main_btd;
+
+	backtrace_dump(btd);
+}
+
+char *__get_error_buf(size_t *sizep)
+{
+	struct backtrace_data *btd;
+
+	btd = pthread_getspecific(btkey);
+	if (btd == NULL)
+		btd = &main_btd;
+
+	*sizep = sizeof(btd->eundef);
+
+	return btd->eundef;
+}
+
+void debug_init(void)
+{
+	__RT(pthread_mutex_init(&main_btd.lock, NULL));
+	pthread_key_create(&btkey, NULL);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/hash.c b/kernel/xenomai-v3.2.4/lib/boilerplate/hash.c
new file mode 100644
index 0000000..9ce1b7c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/hash.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <string.h>
+#include <errno.h>
+#include "boilerplate/lock.h"
+#include "boilerplate/hash.h"
+#include "boilerplate/debug.h"
+
+/*
+ * Crunching routine borrowed from:
+ *
+ * lookup2.c, by Bob Jenkins, December 1996, Public Domain.
+ * hash(), hash2(), hash3, and mix() are externally useful functions.
+ * Routines to test the hash are included if SELF_TEST is defined.
+ * You can use this free for any purpose.  It has no warranty.
+ */
+
+#define __mixer(a, b, c) \
+	{					\
+	a -= b; a -= c; a ^= (c>>13);		\
+	b -= c; b -= a; b ^= (a<<8);		\
+	c -= a; c -= b; c ^= (b>>13);		\
+	a -= b; a -= c; a ^= (c>>12);		\
+	b -= c; b -= a; b ^= (a<<16);		\
+	c -= a; c -= b; c ^= (b>>5);		\
+	a -= b; a -= c; a ^= (c>>3);		\
+	b -= c; b -= a; b ^= (a<<10);		\
+	c -= a; c -= b; c ^= (b>>15);		\
+}
+
+static inline int store_key(struct hashobj *obj,
+			    const void *key, size_t len,
+			    const struct hash_operations *hops);
+
+static inline void drop_key(struct hashobj *obj,
+			    const struct hash_operations *hops);
+
+#define GOLDEN_HASH_RATIO  0x9e3779b9  /* Arbitrary value. */
+
+unsigned int __hash_key(const void *key, size_t length, unsigned int c)
+{
+	const unsigned char *k = key;
+	unsigned int a, b, len;
+
+	len = (unsigned int)length;
+	a = b = GOLDEN_HASH_RATIO;
+
+	while (len >= 12) {
+		a += (k[0] +((unsigned int)k[1]<<8) +((unsigned int)k[2]<<16) +((unsigned int)k[3]<<24));
+		b += (k[4] +((unsigned int)k[5]<<8) +((unsigned int)k[6]<<16) +((unsigned int)k[7]<<24));
+		c += (k[8] +((unsigned int)k[9]<<8) +((unsigned int)k[10]<<16)+((unsigned int)k[11]<<24));
+		__mixer(a, b, c);
+		k += 12;
+		len -= 12;
+	}
+
+	c += (unsigned int)length;
+
+	switch (len) {
+	case 11: c += ((unsigned int)k[10]<<24);
+	case 10: c += ((unsigned int)k[9]<<16);
+	case 9 : c += ((unsigned int)k[8]<<8);
+	case 8 : b += ((unsigned int)k[7]<<24);
+	case 7 : b += ((unsigned int)k[6]<<16);
+	case 6 : b += ((unsigned int)k[5]<<8);
+	case 5 : b += k[4];
+	case 4 : a += ((unsigned int)k[3]<<24);
+	case 3 : a += ((unsigned int)k[2]<<16);
+	case 2 : a += ((unsigned int)k[1]<<8);
+	case 1 : a += k[0];
+	};
+
+	__mixer(a, b, c);
+
+	return c;
+}
+
+void __hash_init(void *heap, struct hash_table *t)
+{
+	pthread_mutexattr_t mattr;
+	int n;
+
+	for (n = 0; n < HASHSLOTS; n++)
+		__list_init(heap, &t->table[n].obj_list);
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute);
+	__RT(pthread_mutex_init(&t->lock, &mattr));
+	pthread_mutexattr_destroy(&mattr);
+}
+
+void hash_destroy(struct hash_table *t)
+{
+	__RT(pthread_mutex_destroy(&t->lock));
+}
+
+static struct hash_bucket *do_hash(struct hash_table *t,
+				   const void *key, size_t len)
+{
+	unsigned int hash = __hash_key(key, len, 0);
+	return &t->table[hash & (HASHSLOTS-1)];
+}
+
+int __hash_enter(struct hash_table *t,
+		 const void *key, size_t len,
+		 struct hashobj *newobj,
+		 const struct hash_operations *hops,
+		 int nodup)
+{
+	struct hash_bucket *bucket;
+	struct hashobj *obj;
+	int ret;
+
+	holder_init(&newobj->link);
+	ret = store_key(newobj, key, len, hops);
+	if (ret)
+		return ret;
+
+	bucket = do_hash(t, key, len);
+	write_lock_nocancel(&t->lock);
+
+	if (nodup && !list_empty(&bucket->obj_list)) {
+		list_for_each_entry(obj, &bucket->obj_list, link) {
+			if (obj->len != newobj->len)
+				continue;
+			if (hops->compare(__mptr(obj->key), __mptr(newobj->key),
+					  obj->len) == 0) {
+				drop_key(newobj, hops);
+				ret = -EEXIST;
+				goto out;
+			}
+		}
+	}
+
+	list_append(&newobj->link, &bucket->obj_list);
+out:
+	write_unlock(&t->lock);
+
+	return ret;
+}
+
+int hash_remove(struct hash_table *t, struct hashobj *delobj,
+		const struct hash_operations *hops)
+{
+	struct hash_bucket *bucket;
+	struct hashobj *obj;
+	int ret = -ESRCH;
+
+	bucket = do_hash(t, __mptr(delobj->key), delobj->len);
+
+	write_lock_nocancel(&t->lock);
+
+	if (!list_empty(&bucket->obj_list)) {
+		list_for_each_entry(obj, &bucket->obj_list, link) {
+			if (obj == delobj) {
+				list_remove_init(&obj->link);
+				drop_key(obj, hops);
+				ret = 0;
+				goto out;
+			}
+		}
+	}
+out:
+	write_unlock(&t->lock);
+
+	return __bt(ret);
+}
+
+struct hashobj *hash_search(struct hash_table *t, const void *key,
+			    size_t len, const struct hash_operations *hops)
+{
+	struct hash_bucket *bucket;
+	struct hashobj *obj;
+
+	bucket = do_hash(t, key, len);
+
+	read_lock_nocancel(&t->lock);
+
+	if (!list_empty(&bucket->obj_list)) {
+		list_for_each_entry(obj, &bucket->obj_list, link) {
+			if (obj->len != len)
+				continue;
+			if (hops->compare(__mptr(obj->key), key, len) == 0)
+				goto out;
+		}
+	}
+	obj = NULL;
+out:
+	read_unlock(&t->lock);
+
+	return obj;
+}
+
+int hash_walk(struct hash_table *t, hash_walk_op walk, void *arg)
+{
+	struct hash_bucket *bucket;
+	struct hashobj *obj, *tmp;
+	int ret, n;
+
+	read_lock_nocancel(&t->lock);
+
+	for (n = 0; n < HASHSLOTS; n++) {
+		bucket = &t->table[n];
+		if (list_empty(&bucket->obj_list))
+			continue;
+		list_for_each_entry_safe(obj, tmp, &bucket->obj_list, link) {
+			read_unlock(&t->lock);
+			ret = walk(t, obj, arg);
+			if (ret)
+				return __bt(ret);
+			read_lock_nocancel(&t->lock);
+		}
+	}
+
+	read_unlock(&t->lock);
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_PSHARED
+
+static inline int store_key(struct hashobj *obj,
+			    const void *key, size_t len,
+			    const struct hash_operations *hops)
+{
+	void *p;
+	
+	assert(__mchk(obj));
+
+	if (len > sizeof(obj->static_key)) {
+		p = hops->alloc(len);
+		if (p == NULL)
+			return -ENOMEM;
+		assert(__mchk(p));
+	} else
+		p = obj->static_key;
+
+	memcpy(p, key, len);
+	obj->key = __moff(p);
+	obj->len = len;
+
+	return 0;
+}
+
+static inline void drop_key(struct hashobj *obj,
+			    const struct hash_operations *hops)
+{
+	const void *key = __mptr(obj->key);
+
+	if (key != obj->static_key)
+		hops->free((void *)key);
+}
+
+int __hash_enter_probe(struct hash_table *t,
+		       const void *key, size_t len,
+		       struct hashobj *newobj,
+		       const struct hash_operations *hops,
+		       int nodup)
+{
+	struct hash_bucket *bucket;
+	struct hashobj *obj, *tmp;
+	struct service svc;
+	int ret;
+
+	holder_init(&newobj->link);
+	ret = store_key(newobj, key, len, hops);
+	if (ret)
+		return ret;
+
+	bucket = do_hash(t, key, len);
+	CANCEL_DEFER(svc);
+	write_lock(&t->lock);
+
+	if (!list_empty(&bucket->obj_list)) {
+		list_for_each_entry_safe(obj, tmp, &bucket->obj_list, link) {
+			if (obj->len != newobj->len)
+				continue;
+			if (hops->compare(__mptr(obj->key),
+					  __mptr(newobj->key), obj->len) == 0) {
+				if (hops->probe(obj)) {
+					if (nodup) {
+						drop_key(newobj, hops);
+						ret = -EEXIST;
+						goto out;
+					}
+					continue;
+				}
+				list_remove_init(&obj->link);
+				drop_key(obj, hops);
+			}
+		}
+	}
+
+	list_append(&newobj->link, &bucket->obj_list);
+out:
+	write_unlock(&t->lock);
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+struct hashobj *hash_search_probe(struct hash_table *t,
+				  const void *key, size_t len,
+				  const struct hash_operations *hops)
+{
+	struct hash_bucket *bucket;
+	struct hashobj *obj, *tmp;
+	struct service svc;
+
+	bucket = do_hash(t, key, len);
+
+	CANCEL_DEFER(svc);
+	write_lock(&t->lock);
+
+	if (!list_empty(&bucket->obj_list)) {
+		list_for_each_entry_safe(obj, tmp, &bucket->obj_list, link) {
+			if (obj->len != len)
+				continue;
+			if (hops->compare(__mptr(obj->key), key, len) == 0) {
+				if (!hops->probe(obj)) {
+					list_remove_init(&obj->link);
+					drop_key(obj, hops);
+					continue;
+				}
+				goto out;
+			}
+		}
+	}
+	obj = NULL;
+out:
+	write_unlock(&t->lock);
+	CANCEL_RESTORE(svc);
+
+	return obj;
+}
+
+void pvhash_init(struct pvhash_table *t)
+{
+	pthread_mutexattr_t mattr;
+	int n;
+
+	for (n = 0; n < HASHSLOTS; n++)
+		pvlist_init(&t->table[n].obj_list);
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	__RT(pthread_mutex_init(&t->lock, &mattr));
+	pthread_mutexattr_destroy(&mattr);
+}
+
+static struct pvhash_bucket *do_pvhash(struct pvhash_table *t,
+				       const void *key, size_t len)
+{
+	unsigned int hash = __hash_key(key, len, 0);
+	return &t->table[hash & (HASHSLOTS-1)];
+}
+
+int __pvhash_enter(struct pvhash_table *t,
+		   const void *key, size_t len,
+		   struct pvhashobj *newobj,
+		   const struct pvhash_operations *hops,
+		   int nodup)
+{
+	struct pvhash_bucket *bucket;
+	struct pvhashobj *obj;
+	int ret = 0;
+
+	pvholder_init(&newobj->link);
+	newobj->key = key;
+	newobj->len = len;
+	bucket = do_pvhash(t, key, len);
+
+	write_lock_nocancel(&t->lock);
+
+	if (nodup && !pvlist_empty(&bucket->obj_list)) {
+		pvlist_for_each_entry(obj, &bucket->obj_list, link) {
+			if (obj->len != newobj->len)
+				continue;
+			if (hops->compare(obj->key, newobj->key, len) == 0) {
+				ret = -EEXIST;
+				goto out;
+			}
+		}
+	}
+
+	pvlist_append(&newobj->link, &bucket->obj_list);
+out:
+	write_unlock(&t->lock);
+
+	return ret;
+}
+
+int pvhash_remove(struct pvhash_table *t, struct pvhashobj *delobj,
+		  const struct pvhash_operations *hops)
+{
+	struct pvhash_bucket *bucket;
+	struct pvhashobj *obj;
+	int ret = -ESRCH;
+
+	bucket = do_pvhash(t, delobj->key, delobj->len);
+
+	write_lock_nocancel(&t->lock);
+
+	if (!pvlist_empty(&bucket->obj_list)) {
+		pvlist_for_each_entry(obj, &bucket->obj_list, link) {
+			if (obj == delobj) {
+				pvlist_remove_init(&obj->link);
+				ret = 0;
+				goto out;
+			}
+		}
+	}
+out:
+	write_unlock(&t->lock);
+
+	return __bt(ret);
+}
+
+struct pvhashobj *pvhash_search(struct pvhash_table *t,
+				const void *key, size_t len,
+				const struct pvhash_operations *hops)
+{
+	struct pvhash_bucket *bucket;
+	struct pvhashobj *obj;
+
+	bucket = do_pvhash(t, key, len);
+
+	read_lock_nocancel(&t->lock);
+
+	if (!pvlist_empty(&bucket->obj_list)) {
+		pvlist_for_each_entry(obj, &bucket->obj_list, link) {
+			if (obj->len != len)
+				continue;
+			if (hops->compare(obj->key, key, len) == 0)
+				goto out;
+		}
+	}
+	obj = NULL;
+out:
+	read_unlock(&t->lock);
+
+	return obj;
+}
+
+int pvhash_walk(struct pvhash_table *t,	pvhash_walk_op walk, void *arg)
+{
+	struct pvhash_bucket *bucket;
+	struct pvhashobj *obj, *tmp;
+	int ret, n;
+
+	read_lock_nocancel(&t->lock);
+
+	for (n = 0; n < HASHSLOTS; n++) {
+		bucket = &t->table[n];
+		if (pvlist_empty(&bucket->obj_list))
+			continue;
+		pvlist_for_each_entry_safe(obj, tmp, &bucket->obj_list, link) {
+			read_unlock(&t->lock);
+			ret = walk(t, obj, arg);
+			if (ret)
+				return __bt(ret);
+			read_lock_nocancel(&t->lock);
+		}
+	}
+
+	read_unlock(&t->lock);
+
+	return 0;
+}
+
+#else /* !CONFIG_XENO_PSHARED */
+
+static inline int store_key(struct hashobj *obj,
+			    const void *key, size_t len,
+			    const struct hash_operations *hops)
+{
+	obj->key = key;
+	obj->len = len;
+
+	return 0;
+}
+
+static inline void drop_key(struct hashobj *obj,
+			    const struct hash_operations *hops)
+{ }
+
+#endif /* !CONFIG_XENO_PSHARED */
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/heapmem.c b/kernel/xenomai-v3.2.4/lib/boilerplate/heapmem.c
new file mode 100644
index 0000000..e6369c7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/heapmem.c
@@ -0,0 +1,728 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This code implements a variant of the allocator described in
+ * "Design of a General Purpose Memory Allocator for the 4.3BSD Unix
+ * Kernel" by Marshall K. McKusick and Michael J. Karels (USENIX
+ * 1988), see http://docs.FreeBSD.org/44doc/papers/kernmalloc.pdf.
+ * The free page list is maintained in AVL trees for fast lookups of
+ * multi-page memory ranges, and pages holding bucketed memory have a
+ * fast allocation bitmap to manage their blocks internally.
+ */
+#include <sys/types.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <boilerplate/heapmem.h>
+
+enum heapmem_pgtype {
+	page_free =0,
+	page_cont =1,
+	page_list =2
+};
+
+static struct avl_searchops size_search_ops;
+static struct avl_searchops addr_search_ops;
+
+static inline uint32_t __attribute__ ((always_inline))
+gen_block_mask(int log2size)
+{
+	return -1U >> (32 - (HEAPMEM_PAGE_SIZE >> log2size));
+}
+
+static inline  __attribute__ ((always_inline))
+int addr_to_pagenr(struct heapmem_extent *ext, void *p)
+{
+	return ((void *)p - ext->membase) >> HEAPMEM_PAGE_SHIFT;
+}
+
+static inline  __attribute__ ((always_inline))
+void *pagenr_to_addr(struct heapmem_extent *ext, int pg)
+{
+	return ext->membase + (pg << HEAPMEM_PAGE_SHIFT);
+}
+
+#ifdef CONFIG_XENO_DEBUG_FULL
+/*
+ * Setting page_cont/page_free in the page map is only required for
+ * enabling full checking of the block address in free requests, which
+ * may be extremely time-consuming when deallocating huge blocks
+ * spanning thousands of pages. We only do such marking when running
+ * in full debug mode.
+ */
+static inline bool
+page_is_valid(struct heapmem_extent *ext, int pg)
+{
+	switch (ext->pagemap[pg].type) {
+	case page_free:
+	case page_cont:
+		return false;
+	case page_list:
+	default:
+		return true;
+	}
+}
+
+static void mark_pages(struct heapmem_extent *ext,
+		       int pg, int nrpages,
+		       enum heapmem_pgtype type)
+{
+	while (nrpages-- > 0)
+		ext->pagemap[pg].type = type;
+}
+
+#else
+
+static inline bool
+page_is_valid(struct heapmem_extent *ext, int pg)
+{
+	return true;
+}
+
+static void mark_pages(struct heapmem_extent *ext,
+		       int pg, int nrpages,
+		       enum heapmem_pgtype type)
+{ }
+
+#endif
+
+ssize_t heapmem_check(struct heap_memory *heap, void *block)
+{
+	struct heapmem_extent *ext;
+	memoff_t pg, pgoff, boff;
+	ssize_t ret = -EINVAL;
+	size_t bsize;
+
+	read_lock_nocancel(&heap->lock);
+
+	/*
+	 * Find the extent the checked block is originating from.
+	 */
+	pvlist_for_each_entry(ext, &heap->extents, next) {
+		if (block >= ext->membase &&
+		    block < ext->memlim)
+			goto found;
+	}
+	goto out;
+found:
+	/* Calculate the page number from the block address. */
+	pgoff = block - ext->membase;
+	pg = pgoff >> HEAPMEM_PAGE_SHIFT;
+	if (page_is_valid(ext, pg)) {
+		if (ext->pagemap[pg].type == page_list)
+			bsize = ext->pagemap[pg].bsize;
+		else {
+			bsize = (1 << ext->pagemap[pg].type);
+			boff = pgoff & ~HEAPMEM_PAGE_MASK;
+			if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+				goto out;
+		}
+		ret = (ssize_t)bsize;
+	}
+out:
+	read_unlock(&heap->lock);
+
+	return ret;
+}
+
+static inline struct heapmem_range *
+find_suitable_range(struct heapmem_extent *ext, size_t size)
+{
+	struct heapmem_range lookup;
+	struct avlh *node;
+
+	lookup.size = size;
+	node = avl_search_ge(&ext->size_tree, &lookup.size_node,
+			     &size_search_ops);
+	if (node == NULL)
+		return NULL;
+
+	return container_of(node, struct heapmem_range, size_node);
+}
+
+static int reserve_page_range(struct heapmem_extent *ext, size_t size)
+{
+	struct heapmem_range *new, *splitr;
+
+	new = find_suitable_range(ext, size);
+	if (new == NULL)
+		return -1;
+
+	avl_delete(&ext->size_tree, &new->size_node);
+	if (new->size == size) {
+		avl_delete(&ext->addr_tree, &new->addr_node);
+		return addr_to_pagenr(ext, new);
+	}
+
+	/*
+	 * The free range fetched is larger than what we need: split
+	 * it in two, the upper part goes to the user, the lower part
+	 * is returned to the free list, which makes reindexing by
+	 * address pointless.
+	 */
+	splitr = new;
+	splitr->size -= size;
+	new = (struct heapmem_range *)((void *)new + splitr->size);
+	avlh_init(&splitr->size_node);
+	avl_insert_back(&ext->size_tree, &splitr->size_node,
+			&size_search_ops);
+
+	return addr_to_pagenr(ext, new);
+}
+
+static inline struct heapmem_range *
+find_left_neighbour(struct heapmem_extent *ext, struct heapmem_range *r)
+{
+	struct avlh *node;
+
+	node = avl_search_le(&ext->addr_tree, &r->addr_node,
+			     &addr_search_ops);
+	if (node == NULL)
+		return NULL;
+
+	return container_of(node, struct heapmem_range, addr_node);
+}
+
+static inline struct heapmem_range *
+find_right_neighbour(struct heapmem_extent *ext, struct heapmem_range *r)
+{
+	struct avlh *node;
+
+	node = avl_search_ge(&ext->addr_tree, &r->addr_node,
+			     &addr_search_ops);
+	if (node == NULL)
+		return NULL;
+
+	return container_of(node, struct heapmem_range, addr_node);
+}
+
+static inline struct heapmem_range *
+find_next_neighbour(struct heapmem_extent *ext, struct heapmem_range *r)
+{
+	struct avlh *node;
+
+	node = avl_next(&ext->addr_tree, &r->addr_node);
+	if (node == NULL)
+		return NULL;
+
+	return container_of(node, struct heapmem_range, addr_node);
+}
+
+static inline bool
+ranges_mergeable(struct heapmem_range *left, struct heapmem_range *right)
+{
+	return (void *)left + left->size == (void *)right;
+}
+
+static void release_page_range(struct heapmem_extent *ext,
+			       void *page, size_t size)
+{
+	struct heapmem_range *freed = page, *left, *right;
+	bool addr_linked = false;
+
+	freed->size = size;
+
+	left = find_left_neighbour(ext, freed);
+	if (left && ranges_mergeable(left, freed)) {
+		avl_delete(&ext->size_tree, &left->size_node);
+		left->size += freed->size;
+		freed = left;
+		addr_linked = true;
+		right = find_next_neighbour(ext, freed);
+	} else
+		right = find_right_neighbour(ext, freed);
+
+	if (right && ranges_mergeable(freed, right)) {
+		avl_delete(&ext->size_tree, &right->size_node);
+		freed->size += right->size;
+		if (addr_linked)
+			avl_delete(&ext->addr_tree, &right->addr_node);
+		else
+			avl_replace(&ext->addr_tree, &right->addr_node,
+				    &freed->addr_node, &addr_search_ops);
+	} else if (!addr_linked) {
+		avlh_init(&freed->addr_node);
+		if (left)
+			avl_insert(&ext->addr_tree, &freed->addr_node,
+				   &addr_search_ops);
+		else
+			avl_prepend(&ext->addr_tree, &freed->addr_node,
+				    &addr_search_ops);
+	}
+
+	avlh_init(&freed->size_node);
+	avl_insert_back(&ext->size_tree, &freed->size_node,
+			&size_search_ops);
+	mark_pages(ext, addr_to_pagenr(ext, page),
+		   size >> HEAPMEM_PAGE_SHIFT, page_free);
+}
+
+static void add_page_front(struct heap_memory *heap,
+			   struct heapmem_extent *ext,
+			   int pg, int log2size)
+{
+	struct heapmem_pgentry *new, *head, *next;
+	int ilog;
+
+	/* Insert page at front of the per-bucket page list. */
+	
+	ilog = log2size - HEAPMEM_MIN_LOG2;
+	new = &ext->pagemap[pg];
+	if (heap->buckets[ilog] == -1U) {
+		heap->buckets[ilog] = pg;
+		new->prev = new->next = pg;
+	} else {
+		head = &ext->pagemap[heap->buckets[ilog]];
+		new->prev = heap->buckets[ilog];
+		new->next = head->next;
+		next = &ext->pagemap[new->next];
+		next->prev = pg;
+		head->next = pg;
+		heap->buckets[ilog] = pg;
+	}
+}
+
+static void remove_page(struct heap_memory *heap,
+			struct heapmem_extent *ext,
+			int pg, int log2size)
+{
+	struct heapmem_pgentry *old, *prev, *next;
+	int ilog = log2size - HEAPMEM_MIN_LOG2;
+
+	/* Remove page from the per-bucket page list. */
+
+	old = &ext->pagemap[pg];
+	if (pg == old->next)
+		heap->buckets[ilog] = -1U;
+	else {
+		if (pg == heap->buckets[ilog])
+			heap->buckets[ilog] = old->next;
+		prev = &ext->pagemap[old->prev];
+		prev->next = old->next;
+		next = &ext->pagemap[old->next];
+		next->prev = old->prev;
+	}
+}
+
+static void move_page_front(struct heap_memory *heap,
+			    struct heapmem_extent *ext,
+			    int pg, int log2size)
+{
+	int ilog = log2size - HEAPMEM_MIN_LOG2;
+
+	/* Move page at front of the per-bucket page list. */
+	
+	if (heap->buckets[ilog] == pg)
+		return;	 /* Already at front, no move. */
+		
+	remove_page(heap, ext, pg, log2size);
+	add_page_front(heap, ext, pg, log2size);
+}
+
+static void move_page_back(struct heap_memory *heap,
+			   struct heapmem_extent *ext,
+			   int pg, int log2size)
+{
+	struct heapmem_pgentry *old, *last, *head, *next;
+	int ilog;
+
+	/* Move page at end of the per-bucket page list. */
+	
+	old = &ext->pagemap[pg];
+	if (pg == old->next) /* Singleton, no move. */
+		return;
+		
+	remove_page(heap, ext, pg, log2size);
+
+	ilog = log2size - HEAPMEM_MIN_LOG2;
+	head = &ext->pagemap[heap->buckets[ilog]];
+	last = &ext->pagemap[head->prev];
+	old->prev = head->prev;
+	old->next = last->next;
+	next = &ext->pagemap[old->next];
+	next->prev = pg;
+	last->next = pg;
+}
+
+static void *add_free_range(struct heap_memory *heap, size_t bsize, int log2size)
+{
+	struct heapmem_extent *ext;
+	size_t rsize;
+	int pg;
+
+	/*
+	 * Scanning each extent, search for a range of contiguous
+	 * pages in the extent. The range must be at least @bsize
+	 * long. @pg is the heading page number on success.
+	 */
+	rsize =__align_to(bsize, HEAPMEM_PAGE_SIZE);
+	pvlist_for_each_entry(ext, &heap->extents, next) {
+		pg = reserve_page_range(ext, rsize);
+		if (pg >= 0)
+			goto found;
+	}
+
+	return NULL;
+
+found:	
+	/*
+	 * Update the page entry.  If @log2size is non-zero
+	 * (i.e. bsize < HEAPMEM_PAGE_SIZE), bsize is (1 << log2Size)
+	 * between 2^HEAPMEM_MIN_LOG2 and 2^(HEAPMEM_PAGE_SHIFT - 1).
+	 * Save the log2 power into entry.type, then update the
+	 * per-page allocation bitmap to reserve the first block.
+	 *
+	 * Otherwise, we have a larger block which may span multiple
+	 * pages: set entry.type to page_list, indicating the start of
+	 * the page range, and entry.bsize to the overall block size.
+	 */
+	if (log2size) {
+		ext->pagemap[pg].type = log2size;
+		/*
+		 * Mark the first object slot (#0) as busy, along with
+		 * the leftmost bits we won't use for this log2 size.
+		 */
+		ext->pagemap[pg].map = ~gen_block_mask(log2size) | 1;
+		/*
+		 * Insert the new page at front of the per-bucket page
+		 * list, enforcing the assumption that pages with free
+		 * space live close to the head of this list.
+		 */
+		add_page_front(heap, ext, pg, log2size);
+	} else {
+		ext->pagemap[pg].type = page_list;
+		ext->pagemap[pg].bsize = (uint32_t)bsize;
+		mark_pages(ext, pg + 1,
+			   (bsize >> HEAPMEM_PAGE_SHIFT) - 1, page_cont);
+	}
+
+	heap->used_size += bsize;
+
+	return pagenr_to_addr(ext, pg);
+}
+
+void *heapmem_alloc(struct heap_memory *heap, size_t size)
+{
+	struct heapmem_extent *ext;
+	int log2size, ilog, pg, b;
+	uint32_t bmask;
+	size_t bsize;
+	void *block;
+
+	if (size == 0)
+		return NULL;
+
+	if (size < HEAPMEM_MIN_ALIGN) {
+		bsize = size = HEAPMEM_MIN_ALIGN;
+		log2size = HEAPMEM_MIN_LOG2;
+	} else {
+		log2size = sizeof(size) * CHAR_BIT - 1 -
+			xenomai_count_leading_zeros(size);
+		if (log2size < HEAPMEM_PAGE_SHIFT) {
+			if (size & (size - 1))
+				log2size++;
+			bsize = 1 << log2size;
+		} else
+			bsize = __align_to(size, HEAPMEM_PAGE_SIZE);
+	}
+	
+	/*
+	 * Allocate entire pages directly from the pool whenever the
+	 * block is larger or equal to HEAPMEM_PAGE_SIZE.  Otherwise,
+	 * use bucketed memory.
+	 *
+	 * NOTE: Fully busy pages from bucketed memory are moved back
+	 * at the end of the per-bucket page list, so that we may
+	 * always assume that either the heading page has some room
+	 * available, or no room is available from any page linked to
+	 * this list, in which case we should immediately add a fresh
+	 * page.
+	 */
+	if (bsize < HEAPMEM_PAGE_SIZE) {
+		ilog = log2size - HEAPMEM_MIN_LOG2;
+		assert(ilog >= 0 && ilog < HEAPMEM_MAX);
+
+		write_lock_nocancel(&heap->lock);
+
+		pvlist_for_each_entry(ext, &heap->extents, next) {
+			pg = heap->buckets[ilog];
+			if (pg < 0) /* Empty page list? */
+				continue;
+
+			/*
+			 * Find a block in the heading page. If there
+			 * is none, there won't be any down the list:
+			 * add a new page right away.
+			 */
+			bmask = ext->pagemap[pg].map;
+			if (bmask == -1U)
+				break;
+			b = xenomai_count_trailing_zeros(~bmask);
+
+			/*
+			 * Got one block from the heading per-bucket
+			 * page, tag it as busy in the per-page
+			 * allocation map.
+			 */
+			ext->pagemap[pg].map |= (1U << b);
+			heap->used_size += bsize;
+			block = ext->membase +
+				(pg << HEAPMEM_PAGE_SHIFT) +
+				(b << log2size);
+			if (ext->pagemap[pg].map == -1U)
+				move_page_back(heap, ext, pg, log2size);
+			goto out;
+		}
+
+		/* No free block in bucketed memory, add one page. */
+		block = add_free_range(heap, bsize, log2size);
+	} else {
+		write_lock_nocancel(&heap->lock);
+		/* Add a range of contiguous free pages. */
+		block = add_free_range(heap, bsize, 0);
+	}
+out:
+	write_unlock(&heap->lock);
+
+	return block;
+}
+
+int heapmem_free(struct heap_memory *heap, void *block)
+{
+	int log2size, ret = 0, pg, n;
+	struct heapmem_extent *ext;
+	memoff_t pgoff, boff;
+	uint32_t oldmap;
+	size_t bsize;
+
+	write_lock_nocancel(&heap->lock);
+
+	/*
+	 * Find the extent from which the returned block is
+	 * originating from.
+	 */
+	pvlist_for_each_entry(ext, &heap->extents, next) {
+		if (block >= ext->membase && block < ext->memlim)
+			goto found;
+	}
+
+	goto bad;
+found:
+	/* Compute the heading page number in the page map. */
+	pgoff = block - ext->membase;
+	pg = pgoff >> HEAPMEM_PAGE_SHIFT;
+	if (!page_is_valid(ext, pg))
+		goto bad;
+	
+	switch (ext->pagemap[pg].type) {
+	case page_list:
+		bsize = ext->pagemap[pg].bsize;
+		assert((bsize & (HEAPMEM_PAGE_SIZE - 1)) == 0);
+		release_page_range(ext, pagenr_to_addr(ext, pg), bsize);
+		break;
+
+	default:
+		log2size = ext->pagemap[pg].type;
+		bsize = (1 << log2size);
+		assert(bsize < HEAPMEM_PAGE_SIZE);
+		boff = pgoff & ~HEAPMEM_PAGE_MASK;
+		if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+			goto bad;
+
+		n = boff >> log2size; /* Block position in page. */
+		oldmap = ext->pagemap[pg].map;
+		ext->pagemap[pg].map &= ~(1U << n);
+
+		/*
+		 * If the page the block was sitting on is fully idle,
+		 * return it to the pool. Otherwise, check whether
+		 * that page is transitioning from fully busy to
+		 * partially busy state, in which case it should move
+		 * toward the front of the per-bucket page list.
+		 */
+		if (ext->pagemap[pg].map == ~gen_block_mask(log2size)) {
+			remove_page(heap, ext, pg, log2size);
+			release_page_range(ext, pagenr_to_addr(ext, pg),
+					   HEAPMEM_PAGE_SIZE);
+		} else if (oldmap == -1U)
+			move_page_front(heap, ext, pg, log2size);
+	}
+
+	heap->used_size -= bsize;
+out:
+	write_unlock(&heap->lock);
+
+	return __bt(ret);
+bad:
+	ret = -EINVAL;
+	goto out;
+}
+
+static inline int compare_range_by_size(const struct avlh *l, const struct avlh *r)
+{
+	struct heapmem_range *rl = container_of(l, typeof(*rl), size_node);
+	struct heapmem_range *rr = container_of(r, typeof(*rl), size_node);
+
+	return avl_sign((long)(rl->size - rr->size));
+}
+static DECLARE_AVL_SEARCH(search_range_by_size, compare_range_by_size);
+
+static struct avl_searchops size_search_ops = {
+	.search = search_range_by_size,
+	.cmp = compare_range_by_size,
+};
+
+static inline int compare_range_by_addr(const struct avlh *l, const struct avlh *r)
+{
+	uintptr_t al = (uintptr_t)l, ar = (uintptr_t)r;
+
+	return avl_cmp_sign(al, ar);
+}
+static DECLARE_AVL_SEARCH(search_range_by_addr, compare_range_by_addr);
+
+static struct avl_searchops addr_search_ops = {
+	.search = search_range_by_addr,
+	.cmp = compare_range_by_addr,
+};
+		
+static int add_extent(struct heap_memory *heap, void *mem, size_t size)
+{
+	size_t user_size, overhead;
+	struct heapmem_extent *ext;
+	int nrpages, state;
+
+	/*
+	 * @size must include the overhead memory we need for storing
+	 * our meta data as calculated by HEAPMEM_ARENA_SIZE(), find
+	 * this amount back.
+	 *
+	 * o = overhead
+	 * e = sizeof(heapmem_extent)
+	 * p = HEAPMEM_PAGE_SIZE
+	 * m = HEAPMEM_PGMAP_BYTES
+	 *
+	 * o = align_to(((a * m + e * p) / (p + m)), minlog2)
+	 */
+	overhead = __align_to((size * HEAPMEM_PGMAP_BYTES +
+			       sizeof(*ext) * HEAPMEM_PAGE_SIZE) /
+			      (HEAPMEM_PAGE_SIZE + HEAPMEM_PGMAP_BYTES),
+			      HEAPMEM_MIN_ALIGN);
+
+	user_size = size - overhead;
+	if (user_size & ~HEAPMEM_PAGE_MASK)
+		return -EINVAL;
+
+	if (user_size < HEAPMEM_PAGE_SIZE ||
+	    user_size > HEAPMEM_MAX_EXTSZ)
+		return -EINVAL;
+		
+	/*
+	 * Setup an extent covering user_size bytes of user memory
+	 * starting at @mem. user_size must be a multiple of
+	 * HEAPMEM_PAGE_SIZE.  The extent starts with a descriptor,
+	 * followed by the array of page entries.
+	 *
+	 * Page entries contain per-page metadata for managing the
+	 * page pool.
+	 *
+	 * +-------------------+ <= mem
+	 * | extent descriptor |
+	 * /...................\
+	 * \...page entries[]../
+	 * /...................\
+	 * +-------------------+ <= extent->membase
+	 * |                   |
+	 * |                   |
+	 * |    (page pool)    |
+	 * |                   |
+	 * |                   |
+	 * +-------------------+
+	 *                       <= extent->memlim == mem + size
+	 */
+	nrpages = user_size >> HEAPMEM_PAGE_SHIFT;
+	ext = mem;
+	ext->membase = mem + overhead;
+	ext->memlim = mem + size;
+		      
+	memset(ext->pagemap, 0, nrpages * sizeof(struct heapmem_pgentry));
+	/*
+	 * The free page pool is maintained as a set of ranges of
+	 * contiguous pages indexed by address and size in AVL
+	 * trees. Initially, we have a single range in those trees
+	 * covering the whole user memory we have been given for the
+	 * extent. Over time, that range will be split then possibly
+	 * re-merged back as allocations and deallocations take place.
+	 */
+	avl_init(&ext->size_tree);
+	avl_init(&ext->addr_tree);
+	release_page_range(ext, ext->membase, user_size);
+
+	write_lock_safe(&heap->lock, state);
+	pvlist_append(&ext->next, &heap->extents);
+	heap->arena_size += size;
+	heap->usable_size += user_size;
+	write_unlock_safe(&heap->lock, state);
+
+	return 0;
+}
+
+int heapmem_init(struct heap_memory *heap, void *mem, size_t size)
+{
+	pthread_mutexattr_t mattr;
+	int ret, n;
+
+	heap->used_size = 0;
+	heap->usable_size = 0;
+	heap->arena_size = 0;
+	pvlist_init(&heap->extents);
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-__RT(pthread_mutex_init(&heap->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		return ret;
+
+	/* Reset bucket page lists, all empty. */
+	for (n = 0; n < HEAPMEM_MAX; n++)
+		heap->buckets[n] = -1U;
+
+	ret = add_extent(heap, mem, size);
+	if (ret) {
+		__RT(pthread_mutex_destroy(&heap->lock));
+		return ret;
+	}
+
+	return 0;
+}
+
+int heapmem_extend(struct heap_memory *heap, void *mem, size_t size)
+{
+	return add_extent(heap, mem, size);
+}
+
+void heapmem_destroy(struct heap_memory *heap)
+{
+	__RT(pthread_mutex_destroy(&heap->lock));
+}
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/LICENSE b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/LICENSE
new file mode 100644
index 0000000..dbfa45d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/LICENSE
@@ -0,0 +1,21 @@
+Copyright (c) 2000-2007 by Nicolas Devillard.
+MIT License
+
+Permission is hereby granted, free of charge, to any person obtaining a
+copy of this software and associated documentation files (the "Software"),
+to deal in the Software without restriction, including without limitation
+the rights to use, copy, modify, merge, publish, distribute, sublicense,
+and/or sell copies of the Software, and to permit persons to whom the
+Software is furnished to do so, subject to the following conditions:
+
+The above copyright notice and this permission notice shall be included in
+all copies or substantial portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+DEALINGS IN THE SOFTWARE.
+
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/README b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/README
new file mode 100644
index 0000000..dfa43c8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/README
@@ -0,0 +1 @@
+See http://ndevilla.free.fr/iniparser/
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.c b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.c
new file mode 100644
index 0000000..cb7ccd4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.c
@@ -0,0 +1,380 @@
+/*-------------------------------------------------------------------------*/
+/**
+   @file    dictionary.c
+   @author  N. Devillard
+   @brief   Implements a dictionary for string variables.
+
+   This module implements a simple dictionary object, i.e. a list
+   of string/string associations. This object is useful to store e.g.
+   informations retrieved from a configuration file (ini files).
+*/
+/*--------------------------------------------------------------------------*/
+
+/*---------------------------------------------------------------------------
+                                Includes
+ ---------------------------------------------------------------------------*/
+#include "dictionary.h"
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+/** Maximum value size for integers and doubles. */
+#define MAXVALSZ    1024
+
+/** Minimal allocated number of entries in a dictionary */
+#define DICTMINSZ   128
+
+/** Invalid key token */
+#define DICT_INVALID_KEY    ((char*)-1)
+
+/*---------------------------------------------------------------------------
+                            Private functions
+ ---------------------------------------------------------------------------*/
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Duplicate a string
+  @param    s String to duplicate
+  @return   Pointer to a newly allocated string, to be freed with free()
+
+  This is a replacement for strdup(). This implementation is provided
+  for systems that do not have it.
+ */
+/*--------------------------------------------------------------------------*/
+static char * xstrdup(const char * s)
+{
+    char * t ;
+    size_t len ;
+    if (!s)
+        return NULL ;
+
+    len = strlen(s) + 1 ;
+    t = (char*) malloc(len) ;
+    if (t) {
+        memcpy(t, s, len) ;
+    }
+    return t ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Double the size of the dictionary
+  @param    d Dictionary to grow
+  @return   This function returns non-zero in case of failure
+ */
+/*--------------------------------------------------------------------------*/
+static int dictionary_grow(dictionary * d)
+{
+    char        ** new_val ;
+    char        ** new_key ;
+    unsigned     * new_hash ;
+
+    new_val  = (char**) calloc(d->size * 2, sizeof *d->val);
+    new_key  = (char**) calloc(d->size * 2, sizeof *d->key);
+    new_hash = (unsigned*) calloc(d->size * 2, sizeof *d->hash);
+    if (!new_val || !new_key || !new_hash) {
+        /* An allocation failed, leave the dictionary unchanged */
+        if (new_val)
+            free(new_val);
+        if (new_key)
+            free(new_key);
+        if (new_hash)
+            free(new_hash);
+        return -1 ;
+    }
+    /* Initialize the newly allocated space */
+    memcpy(new_val, d->val, d->size * sizeof(char *));
+    memcpy(new_key, d->key, d->size * sizeof(char *));
+    memcpy(new_hash, d->hash, d->size * sizeof(unsigned));
+    /* Delete previous data */
+    free(d->val);
+    free(d->key);
+    free(d->hash);
+    /* Actually update the dictionary */
+    d->size *= 2 ;
+    d->val = new_val;
+    d->key = new_key;
+    d->hash = new_hash;
+    return 0 ;
+}
+
+/*---------------------------------------------------------------------------
+                            Function codes
+ ---------------------------------------------------------------------------*/
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Compute the hash key for a string.
+  @param    key     Character string to use for key.
+  @return   1 unsigned int on at least 32 bits.
+
+  This hash function has been taken from an Article in Dr Dobbs Journal.
+  This is normally a collision-free function, distributing keys evenly.
+  The key is stored anyway in the struct so that collision can be avoided
+  by comparing the key itself in last resort.
+ */
+/*--------------------------------------------------------------------------*/
+unsigned dictionary_hash(const char * key)
+{
+    size_t      len ;
+    unsigned    hash ;
+    size_t      i ;
+
+    if (!key)
+        return 0 ;
+
+    len = strlen(key);
+    for (hash=0, i=0 ; i<len ; i++) {
+        hash += (unsigned)key[i] ;
+        hash += (hash<<10);
+        hash ^= (hash>>6) ;
+    }
+    hash += (hash <<3);
+    hash ^= (hash >>11);
+    hash += (hash <<15);
+    return hash ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Create a new dictionary object.
+  @param    size    Optional initial size of the dictionary.
+  @return   1 newly allocated dictionary objet.
+
+  This function allocates a new dictionary object of given size and returns
+  it. If you do not know in advance (roughly) the number of entries in the
+  dictionary, give size=0.
+ */
+/*-------------------------------------------------------------------------*/
+dictionary * dictionary_new(size_t size)
+{
+    dictionary  *   d ;
+
+    /* If no size was specified, allocate space for DICTMINSZ */
+    if (size<DICTMINSZ) size=DICTMINSZ ;
+
+    d = (dictionary*) calloc(1, sizeof *d) ;
+
+    if (d) {
+        d->size = size ;
+        d->val  = (char**) calloc(size, sizeof *d->val);
+        d->key  = (char**) calloc(size, sizeof *d->key);
+        d->hash = (unsigned*) calloc(size, sizeof *d->hash);
+    }
+    return d ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Delete a dictionary object
+  @param    d   dictionary object to deallocate.
+  @return   void
+
+  Deallocate a dictionary object and all memory associated to it.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_del(dictionary * d)
+{
+    ssize_t  i ;
+
+    if (d==NULL) return ;
+    for (i=0 ; i<d->size ; i++) {
+        if (d->key[i]!=NULL)
+            free(d->key[i]);
+        if (d->val[i]!=NULL)
+            free(d->val[i]);
+    }
+    free(d->val);
+    free(d->key);
+    free(d->hash);
+    free(d);
+    return ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get a value from a dictionary.
+  @param    d       dictionary object to search.
+  @param    key     Key to look for in the dictionary.
+  @param    def     Default value to return if key not found.
+  @return   1 pointer to internally allocated character string.
+
+  This function locates a key in a dictionary and returns a pointer to its
+  value, or the passed 'def' pointer if no such key can be found in
+  dictionary. The returned character pointer points to data internal to the
+  dictionary object, you should not try to free it or modify it.
+ */
+/*--------------------------------------------------------------------------*/
+const char * dictionary_get(const dictionary * d, const char * key, const char * def)
+{
+    unsigned    hash ;
+    ssize_t      i ;
+
+    hash = dictionary_hash(key);
+    for (i=0 ; i<d->size ; i++) {
+        if (d->key[i]==NULL)
+            continue ;
+        /* Compare hash */
+        if (hash==d->hash[i]) {
+            /* Compare string, to avoid hash collisions */
+            if (!strcmp(key, d->key[i])) {
+                return d->val[i] ;
+            }
+        }
+    }
+    return def ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Set a value in a dictionary.
+  @param    d       dictionary object to modify.
+  @param    key     Key to modify or add.
+  @param    val     Value to add.
+  @return   int     0 if Ok, anything else otherwise
+
+  If the given key is found in the dictionary, the associated value is
+  replaced by the provided one. If the key cannot be found in the
+  dictionary, it is added to it.
+
+  It is Ok to provide a NULL value for val, but NULL values for the dictionary
+  or the key are considered as errors: the function will return immediately
+  in such a case.
+
+  Notice that if you dictionary_set a variable to NULL, a call to
+  dictionary_get will return a NULL value: the variable will be found, and
+  its value (NULL) is returned. In other words, setting the variable
+  content to NULL is equivalent to deleting the variable from the
+  dictionary. It is not possible (in this implementation) to have a key in
+  the dictionary without value.
+
+  This function returns non-zero in case of failure.
+ */
+/*--------------------------------------------------------------------------*/
+int dictionary_set(dictionary * d, const char * key, const char * val)
+{
+    ssize_t         i ;
+    unsigned       hash ;
+
+    if (d==NULL || key==NULL) return -1 ;
+
+    /* Compute hash for this key */
+    hash = dictionary_hash(key) ;
+    /* Find if value is already in dictionary */
+    if (d->n>0) {
+        for (i=0 ; i<d->size ; i++) {
+            if (d->key[i]==NULL)
+                continue ;
+            if (hash==d->hash[i]) { /* Same hash value */
+                if (!strcmp(key, d->key[i])) {   /* Same key */
+                    /* Found a value: modify and return */
+                    if (d->val[i]!=NULL)
+                        free(d->val[i]);
+                    d->val[i] = (val ? xstrdup(val) : NULL);
+                    /* Value has been modified: return */
+                    return 0 ;
+                }
+            }
+        }
+    }
+    /* Add a new value */
+    /* See if dictionary needs to grow */
+    if (d->n==d->size) {
+        /* Reached maximum size: reallocate dictionary */
+        if (dictionary_grow(d) != 0)
+            return -1;
+    }
+
+    /* Insert key in the first empty slot. Start at d->n and wrap at
+       d->size. Because d->n < d->size this will necessarily
+       terminate. */
+    for (i=d->n ; d->key[i] ; ) {
+        if(++i == d->size) i = 0;
+    }
+    /* Copy key */
+    d->key[i]  = xstrdup(key);
+    d->val[i]  = (val ? xstrdup(val) : NULL) ;
+    d->hash[i] = hash;
+    d->n ++ ;
+    return 0 ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Delete a key in a dictionary
+  @param    d       dictionary object to modify.
+  @param    key     Key to remove.
+  @return   void
+
+  This function deletes a key in a dictionary. Nothing is done if the
+  key cannot be found.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_unset(dictionary * d, const char * key)
+{
+    unsigned    hash ;
+    ssize_t      i ;
+
+    if (key == NULL || d == NULL) {
+        return;
+    }
+
+    hash = dictionary_hash(key);
+    for (i=0 ; i<d->size ; i++) {
+        if (d->key[i]==NULL)
+            continue ;
+        /* Compare hash */
+        if (hash==d->hash[i]) {
+            /* Compare string, to avoid hash collisions */
+            if (!strcmp(key, d->key[i])) {
+                /* Found key */
+                break ;
+            }
+        }
+    }
+    if (i>=d->size)
+        /* Key not found */
+        return ;
+
+    free(d->key[i]);
+    d->key[i] = NULL ;
+    if (d->val[i]!=NULL) {
+        free(d->val[i]);
+        d->val[i] = NULL ;
+    }
+    d->hash[i] = 0 ;
+    d->n -- ;
+    return ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Dump a dictionary to an opened file pointer.
+  @param    d   Dictionary to dump
+  @param    f   Opened file pointer.
+  @return   void
+
+  Dumps a dictionary onto an opened file pointer. Key pairs are printed out
+  as @c [Key]=[Value], one per line. It is Ok to provide stdout or stderr as
+  output file pointers.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_dump(const dictionary * d, FILE * out)
+{
+    ssize_t  i ;
+
+    if (d==NULL || out==NULL) return ;
+    if (d->n<1) {
+        fprintf(out, "empty dictionary\n");
+        return ;
+    }
+    for (i=0 ; i<d->size ; i++) {
+        if (d->key[i]) {
+            fprintf(out, "%20s\t[%s]\n",
+                    d->key[i],
+                    d->val[i] ? d->val[i] : "UNDEF");
+        }
+    }
+    return ;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.h b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.h
new file mode 100644
index 0000000..d04b6ce
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/dictionary.h
@@ -0,0 +1,173 @@
+
+/*-------------------------------------------------------------------------*/
+/**
+   @file    dictionary.h
+   @author  N. Devillard
+   @brief   Implements a dictionary for string variables.
+
+   This module implements a simple dictionary object, i.e. a list
+   of string/string associations. This object is useful to store e.g.
+   informations retrieved from a configuration file (ini files).
+*/
+/*--------------------------------------------------------------------------*/
+
+#ifndef _DICTIONARY_H_
+#define _DICTIONARY_H_
+
+/*---------------------------------------------------------------------------
+                                Includes
+ ---------------------------------------------------------------------------*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*---------------------------------------------------------------------------
+                                New types
+ ---------------------------------------------------------------------------*/
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Dictionary object
+
+  This object contains a list of string/string associations. Each
+  association is identified by a unique string key. Looking up values
+  in the dictionary is speeded up by the use of a (hopefully collision-free)
+  hash function.
+ */
+/*-------------------------------------------------------------------------*/
+typedef struct _dictionary_ {
+    int             n ;     /** Number of entries in dictionary */
+    ssize_t         size ;  /** Storage size */
+    char        **  val ;   /** List of string values */
+    char        **  key ;   /** List of string keys */
+    unsigned     *  hash ;  /** List of hash values for keys */
+} dictionary ;
+
+
+/*---------------------------------------------------------------------------
+                            Function prototypes
+ ---------------------------------------------------------------------------*/
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Compute the hash key for a string.
+  @param    key     Character string to use for key.
+  @return   1 unsigned int on at least 32 bits.
+
+  This hash function has been taken from an Article in Dr Dobbs Journal.
+  This is normally a collision-free function, distributing keys evenly.
+  The key is stored anyway in the struct so that collision can be avoided
+  by comparing the key itself in last resort.
+ */
+/*--------------------------------------------------------------------------*/
+unsigned dictionary_hash(const char * key);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Create a new dictionary object.
+  @param    size    Optional initial size of the dictionary.
+  @return   1 newly allocated dictionary objet.
+
+  This function allocates a new dictionary object of given size and returns
+  it. If you do not know in advance (roughly) the number of entries in the
+  dictionary, give size=0.
+ */
+/*--------------------------------------------------------------------------*/
+dictionary * dictionary_new(size_t size);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Delete a dictionary object
+  @param    d   dictionary object to deallocate.
+  @return   void
+
+  Deallocate a dictionary object and all memory associated to it.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_del(dictionary * vd);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get a value from a dictionary.
+  @param    d       dictionary object to search.
+  @param    key     Key to look for in the dictionary.
+  @param    def     Default value to return if key not found.
+  @return   1 pointer to internally allocated character string.
+
+  This function locates a key in a dictionary and returns a pointer to its
+  value, or the passed 'def' pointer if no such key can be found in
+  dictionary. The returned character pointer points to data internal to the
+  dictionary object, you should not try to free it or modify it.
+ */
+/*--------------------------------------------------------------------------*/
+const char * dictionary_get(const dictionary * d, const char * key, const char * def);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Set a value in a dictionary.
+  @param    d       dictionary object to modify.
+  @param    key     Key to modify or add.
+  @param    val     Value to add.
+  @return   int     0 if Ok, anything else otherwise
+
+  If the given key is found in the dictionary, the associated value is
+  replaced by the provided one. If the key cannot be found in the
+  dictionary, it is added to it.
+
+  It is Ok to provide a NULL value for val, but NULL values for the dictionary
+  or the key are considered as errors: the function will return immediately
+  in such a case.
+
+  Notice that if you dictionary_set a variable to NULL, a call to
+  dictionary_get will return a NULL value: the variable will be found, and
+  its value (NULL) is returned. In other words, setting the variable
+  content to NULL is equivalent to deleting the variable from the
+  dictionary. It is not possible (in this implementation) to have a key in
+  the dictionary without value.
+
+  This function returns non-zero in case of failure.
+ */
+/*--------------------------------------------------------------------------*/
+int dictionary_set(dictionary * vd, const char * key, const char * val);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Delete a key in a dictionary
+  @param    d       dictionary object to modify.
+  @param    key     Key to remove.
+  @return   void
+
+  This function deletes a key in a dictionary. Nothing is done if the
+  key cannot be found.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_unset(dictionary * d, const char * key);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Dump a dictionary to an opened file pointer.
+  @param    d   Dictionary to dump
+  @param    f   Opened file pointer.
+  @return   void
+
+  Dumps a dictionary onto an opened file pointer. Key pairs are printed out
+  as @c [Key]=[Value], one per line. It is Ok to provide stdout or stderr as
+  output file pointers.
+ */
+/*--------------------------------------------------------------------------*/
+void dictionary_dump(const dictionary * d, FILE * out);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.c b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.c
new file mode 100644
index 0000000..f1d1658
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.c
@@ -0,0 +1,836 @@
+
+/*-------------------------------------------------------------------------*/
+/**
+   @file    iniparser.c
+   @author  N. Devillard
+   @brief   Parser for ini files.
+*/
+/*--------------------------------------------------------------------------*/
+/*---------------------------- Includes ------------------------------------*/
+#include <ctype.h>
+#include <stdarg.h>
+#include "iniparser.h"
+
+/*---------------------------- Defines -------------------------------------*/
+#define ASCIILINESZ         (1024)
+#define INI_INVALID_KEY     ((char*)-1)
+
+/*---------------------------------------------------------------------------
+                        Private to this module
+ ---------------------------------------------------------------------------*/
+/**
+ * This enum stores the status for each parsed line (internal use only).
+ */
+typedef enum _line_status_ {
+    LINE_UNPROCESSED,
+    LINE_ERROR,
+    LINE_EMPTY,
+    LINE_COMMENT,
+    LINE_SECTION,
+    LINE_VALUE
+} line_status ;
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Convert a string to lowercase.
+  @param    in   String to convert.
+  @param    out Output buffer.
+  @param    len Size of the out buffer.
+  @return   ptr to the out buffer or NULL if an error occured.
+
+  This function convert a string into lowercase.
+  At most len - 1 elements of the input string will be converted.
+ */
+/*--------------------------------------------------------------------------*/
+static const char * strlwc(const char * in, char *out, unsigned len)
+{
+    unsigned i ;
+
+    if (in==NULL || out == NULL || len==0) return NULL ;
+    i=0 ;
+    while (in[i] != '\0' && i < len-1) {
+        out[i] = (char)tolower((int)in[i]);
+        i++ ;
+    }
+    out[i] = '\0';
+    return out ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Duplicate a string
+  @param    s String to duplicate
+  @return   Pointer to a newly allocated string, to be freed with free()
+
+  This is a replacement for strdup(). This implementation is provided
+  for systems that do not have it.
+ */
+/*--------------------------------------------------------------------------*/
+static char * xstrdup(const char * s)
+{
+    char * t ;
+    size_t len ;
+    if (!s)
+        return NULL ;
+
+    len = strlen(s) + 1 ;
+    t = (char*) malloc(len) ;
+    if (t) {
+        memcpy(t, s, len) ;
+    }
+    return t ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Remove blanks at the beginning and the end of a string.
+  @param    str  String to parse and alter.
+  @return   unsigned New size of the string.
+ */
+/*--------------------------------------------------------------------------*/
+static unsigned strstrip(char * s)
+{
+    char *last = NULL ;
+    char *dest = s;
+
+    if (s==NULL) return 0;
+
+    last = s + strlen(s);
+    while (isspace((int)*s) && *s) s++;
+    while (last > s) {
+        if (!isspace((int)*(last-1)))
+            break ;
+        last -- ;
+    }
+    *last = (char)0;
+
+    memmove(dest,s,last - s + 1);
+    return last - s;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Default error callback for iniparser: wraps `fprintf(stderr, ...)`.
+ */
+/*--------------------------------------------------------------------------*/
+static int default_error_callback(const char *format, ...)
+{
+  int ret;
+  va_list argptr;
+  va_start(argptr, format);
+  ret = vfprintf(stderr, format, argptr);
+  va_end(argptr);
+  return ret;
+}
+
+static int (*iniparser_error_callback)(const char*, ...) = default_error_callback;
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Configure a function to receive the error messages.
+  @param    errback  Function to call.
+
+  By default, the error will be printed on stderr. If a null pointer is passed
+  as errback the error callback will be switched back to default.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_set_error_callback(int (*errback)(const char *, ...))
+{
+  if (errback) {
+    iniparser_error_callback = errback;
+  } else {
+    iniparser_error_callback = default_error_callback;
+  }
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get number of sections in a dictionary
+  @param    d   Dictionary to examine
+  @return   int Number of sections found in dictionary
+
+  This function returns the number of sections found in a dictionary.
+  The test to recognize sections is done on the string stored in the
+  dictionary: a section name is given as "section" whereas a key is
+  stored as "section:key", thus the test looks for entries that do not
+  contain a colon.
+
+  This clearly fails in the case a section name contains a colon, but
+  this should simply be avoided.
+
+  This function returns -1 in case of error.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getnsec(const dictionary * d)
+{
+    int i ;
+    int nsec ;
+
+    if (d==NULL) return -1 ;
+    nsec=0 ;
+    for (i=0 ; i<d->size ; i++) {
+        if (d->key[i]==NULL)
+            continue ;
+        if (strchr(d->key[i], ':')==NULL) {
+            nsec ++ ;
+        }
+    }
+    return nsec ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get name for section n in a dictionary.
+  @param    d   Dictionary to examine
+  @param    n   Section number (from 0 to nsec-1).
+  @return   Pointer to char string
+
+  This function locates the n-th section in a dictionary and returns
+  its name as a pointer to a string statically allocated inside the
+  dictionary. Do not free or modify the returned string!
+
+  This function returns NULL in case of error.
+ */
+/*--------------------------------------------------------------------------*/
+const char * iniparser_getsecname(const dictionary * d, int n)
+{
+    int i ;
+    int foundsec ;
+
+    if (d==NULL || n<0) return NULL ;
+    foundsec=0 ;
+    for (i=0 ; i<d->size ; i++) {
+        if (d->key[i]==NULL)
+            continue ;
+        if (strchr(d->key[i], ':')==NULL) {
+            foundsec++ ;
+            if (foundsec>n)
+                break ;
+        }
+    }
+    if (foundsec<=n) {
+        return NULL ;
+    }
+    return d->key[i] ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Dump a dictionary to an opened file pointer.
+  @param    d   Dictionary to dump.
+  @param    f   Opened file pointer to dump to.
+  @return   void
+
+  This function prints out the contents of a dictionary, one element by
+  line, onto the provided file pointer. It is OK to specify @c stderr
+  or @c stdout as output files. This function is meant for debugging
+  purposes mostly.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_dump(const dictionary * d, FILE * f)
+{
+    int     i ;
+
+    if (d==NULL || f==NULL) return ;
+    for (i=0 ; i<d->size ; i++) {
+        if (d->key[i]==NULL)
+            continue ;
+        if (d->val[i]!=NULL) {
+            fprintf(f, "[%s]=[%s]\n", d->key[i], d->val[i]);
+        } else {
+            fprintf(f, "[%s]=UNDEF\n", d->key[i]);
+        }
+    }
+    return ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Save a dictionary to a loadable ini file
+  @param    d   Dictionary to dump
+  @param    f   Opened file pointer to dump to
+  @return   void
+
+  This function dumps a given dictionary into a loadable ini file.
+  It is Ok to specify @c stderr or @c stdout as output files.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_dump_ini(const dictionary * d, FILE * f)
+{
+    int          i ;
+    int          nsec ;
+    const char * secname ;
+
+    if (d==NULL || f==NULL) return ;
+
+    nsec = iniparser_getnsec(d);
+    if (nsec<1) {
+        /* No section in file: dump all keys as they are */
+        for (i=0 ; i<d->size ; i++) {
+            if (d->key[i]==NULL)
+                continue ;
+            fprintf(f, "%s = %s\n", d->key[i], d->val[i]);
+        }
+        return ;
+    }
+    for (i=0 ; i<nsec ; i++) {
+        secname = iniparser_getsecname(d, i) ;
+        iniparser_dumpsection_ini(d, secname, f);
+    }
+    fprintf(f, "\n");
+    return ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Save a dictionary section to a loadable ini file
+  @param    d   Dictionary to dump
+  @param    s   Section name of dictionary to dump
+  @param    f   Opened file pointer to dump to
+  @return   void
+
+  This function dumps a given section of a given dictionary into a loadable ini
+  file.  It is Ok to specify @c stderr or @c stdout as output files.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_dumpsection_ini(const dictionary * d, const char * s, FILE * f)
+{
+    int     j ;
+    char    keym[ASCIILINESZ+1];
+    int     seclen ;
+
+    if (d==NULL || f==NULL) return ;
+    if (! iniparser_find_entry(d, s)) return ;
+
+    seclen  = (int)strlen(s);
+    fprintf(f, "\n[%s]\n", s);
+    sprintf(keym, "%s:", s);
+    for (j=0 ; j<d->size ; j++) {
+        if (d->key[j]==NULL)
+            continue ;
+        if (!strncmp(d->key[j], keym, seclen+1)) {
+            fprintf(f,
+                    "%-30s = %s\n",
+                    d->key[j]+seclen+1,
+                    d->val[j] ? d->val[j] : "");
+        }
+    }
+    fprintf(f, "\n");
+    return ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the number of keys in a section of a dictionary.
+  @param    d   Dictionary to examine
+  @param    s   Section name of dictionary to examine
+  @return   Number of keys in section
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getsecnkeys(const dictionary * d, const char * s)
+{
+    int     seclen, nkeys ;
+    char    keym[ASCIILINESZ+1];
+    int j ;
+
+    nkeys = 0;
+
+    if (d==NULL) return nkeys;
+    if (! iniparser_find_entry(d, s)) return nkeys;
+
+    seclen  = (int)strlen(s);
+    strlwc(s, keym, sizeof(keym));
+    keym[seclen] = ':';
+
+    for (j=0 ; j<d->size ; j++) {
+        if (d->key[j]==NULL)
+            continue ;
+        if (!strncmp(d->key[j], keym, seclen+1))
+            nkeys++;
+    }
+
+    return nkeys;
+
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the number of keys in a section of a dictionary.
+  @param    d    Dictionary to examine
+  @param    s    Section name of dictionary to examine
+  @param    keys Already allocated array to store the keys in
+  @return   The pointer passed as `keys` argument or NULL in case of error
+
+  This function queries a dictionary and finds all keys in a given section.
+  The keys argument should be an array of pointers which size has been
+  determined by calling `iniparser_getsecnkeys` function prior to this one.
+
+  Each pointer in the returned char pointer-to-pointer is pointing to
+  a string allocated in the dictionary; do not free or modify them.
+ */
+/*--------------------------------------------------------------------------*/
+const char ** iniparser_getseckeys(const dictionary * d, const char * s, const char ** keys)
+{
+    int i, j, seclen ;
+    char keym[ASCIILINESZ+1];
+
+    if (d==NULL || keys==NULL) return NULL;
+    if (! iniparser_find_entry(d, s)) return NULL;
+
+    seclen  = (int)strlen(s);
+    strlwc(s, keym, sizeof(keym));
+    keym[seclen] = ':';
+
+    i = 0;
+
+    for (j=0 ; j<d->size ; j++) {
+        if (d->key[j]==NULL)
+            continue ;
+        if (!strncmp(d->key[j], keym, seclen+1)) {
+            keys[i] = d->key[j];
+            i++;
+        }
+    }
+
+    return keys;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key
+  @param    d       Dictionary to search
+  @param    key     Key string to look for
+  @param    def     Default value to return if key not found.
+  @return   pointer to statically allocated character string
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the pointer passed as 'def' is returned.
+  The returned char pointer is pointing to a string allocated in
+  the dictionary, do not free or modify it.
+ */
+/*--------------------------------------------------------------------------*/
+const char * iniparser_getstring(const dictionary * d, const char * key, const char * def)
+{
+    const char * lc_key ;
+    const char * sval ;
+    char tmp_str[ASCIILINESZ+1];
+
+    if (d==NULL || key==NULL)
+        return def ;
+
+    lc_key = strlwc(key, tmp_str, sizeof(tmp_str));
+    sval = dictionary_get(d, lc_key, def);
+    return sval ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key, convert to an long int
+  @param    d Dictionary to search
+  @param    key Key string to look for
+  @param    notfound Value to return in case of error
+  @return   long integer
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the notfound value is returned.
+
+  Supported values for integers include the usual C notation
+  so decimal, octal (starting with 0) and hexadecimal (starting with 0x)
+  are supported. Examples:
+
+  "42"      ->  42
+  "042"     ->  34 (octal -> decimal)
+  "0x42"    ->  66 (hexa  -> decimal)
+
+  Warning: the conversion may overflow in various ways. Conversion is
+  totally outsourced to strtol(), see the associated man page for overflow
+  handling.
+
+  Credits: Thanks to A. Becker for suggesting strtol()
+ */
+/*--------------------------------------------------------------------------*/
+long int iniparser_getlongint(const dictionary * d, const char * key, long int notfound)
+{
+    const char * str ;
+
+    str = iniparser_getstring(d, key, INI_INVALID_KEY);
+    if (str==INI_INVALID_KEY) return notfound ;
+    return strtol(str, NULL, 0);
+}
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key, convert to an int
+  @param    d Dictionary to search
+  @param    key Key string to look for
+  @param    notfound Value to return in case of error
+  @return   integer
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the notfound value is returned.
+
+  Supported values for integers include the usual C notation
+  so decimal, octal (starting with 0) and hexadecimal (starting with 0x)
+  are supported. Examples:
+
+  "42"      ->  42
+  "042"     ->  34 (octal -> decimal)
+  "0x42"    ->  66 (hexa  -> decimal)
+
+  Warning: the conversion may overflow in various ways. Conversion is
+  totally outsourced to strtol(), see the associated man page for overflow
+  handling.
+
+  Credits: Thanks to A. Becker for suggesting strtol()
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getint(const dictionary * d, const char * key, int notfound)
+{
+    return (int)iniparser_getlongint(d, key, notfound);
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key, convert to a double
+  @param    d Dictionary to search
+  @param    key Key string to look for
+  @param    notfound Value to return in case of error
+  @return   double
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the notfound value is returned.
+ */
+/*--------------------------------------------------------------------------*/
+double iniparser_getdouble(const dictionary * d, const char * key, double notfound)
+{
+    const char * str ;
+
+    str = iniparser_getstring(d, key, INI_INVALID_KEY);
+    if (str==INI_INVALID_KEY) return notfound ;
+    return atof(str);
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key, convert to a boolean
+  @param    d Dictionary to search
+  @param    key Key string to look for
+  @param    notfound Value to return in case of error
+  @return   integer
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the notfound value is returned.
+
+  A true boolean is found if one of the following is matched:
+
+  - A string starting with 'y'
+  - A string starting with 'Y'
+  - A string starting with 't'
+  - A string starting with 'T'
+  - A string starting with '1'
+
+  A false boolean is found if one of the following is matched:
+
+  - A string starting with 'n'
+  - A string starting with 'N'
+  - A string starting with 'f'
+  - A string starting with 'F'
+  - A string starting with '0'
+
+  The notfound value returned if no boolean is identified, does not
+  necessarily have to be 0 or 1.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getboolean(const dictionary * d, const char * key, int notfound)
+{
+    int          ret ;
+    const char * c ;
+
+    c = iniparser_getstring(d, key, INI_INVALID_KEY);
+    if (c==INI_INVALID_KEY) return notfound ;
+    if (c[0]=='y' || c[0]=='Y' || c[0]=='1' || c[0]=='t' || c[0]=='T') {
+        ret = 1 ;
+    } else if (c[0]=='n' || c[0]=='N' || c[0]=='0' || c[0]=='f' || c[0]=='F') {
+        ret = 0 ;
+    } else {
+        ret = notfound ;
+    }
+    return ret;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Finds out if a given entry exists in a dictionary
+  @param    ini     Dictionary to search
+  @param    entry   Name of the entry to look for
+  @return   integer 1 if entry exists, 0 otherwise
+
+  Finds out if a given entry exists in the dictionary. Since sections
+  are stored as keys with NULL associated values, this is the only way
+  of querying for the presence of sections in a dictionary.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_find_entry(const dictionary * ini, const char * entry)
+{
+    int found=0 ;
+    if (iniparser_getstring(ini, entry, INI_INVALID_KEY)!=INI_INVALID_KEY) {
+        found = 1 ;
+    }
+    return found ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Set an entry in a dictionary.
+  @param    ini     Dictionary to modify.
+  @param    entry   Entry to modify (entry name)
+  @param    val     New value to associate to the entry.
+  @return   int 0 if Ok, -1 otherwise.
+
+  If the given entry can be found in the dictionary, it is modified to
+  contain the provided value. If it cannot be found, the entry is created.
+  It is Ok to set val to NULL.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_set(dictionary * ini, const char * entry, const char * val)
+{
+    char tmp_str[ASCIILINESZ+1];
+    return dictionary_set(ini, strlwc(entry, tmp_str, sizeof(tmp_str)), val) ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Delete an entry in a dictionary
+  @param    ini     Dictionary to modify
+  @param    entry   Entry to delete (entry name)
+  @return   void
+
+  If the given entry can be found, it is deleted from the dictionary.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_unset(dictionary * ini, const char * entry)
+{
+    char tmp_str[ASCIILINESZ+1];
+    dictionary_unset(ini, strlwc(entry, tmp_str, sizeof(tmp_str)));
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Load a single line from an INI file
+  @param    input_line  Input line, may be concatenated multi-line input
+  @param    section     Output space to store section
+  @param    key         Output space to store key
+  @param    value       Output space to store value
+  @return   line_status value
+ */
+/*--------------------------------------------------------------------------*/
+static line_status iniparser_line(
+    const char * input_line,
+    char * section,
+    char * key,
+    char * value)
+{
+    line_status sta ;
+    char * line = NULL;
+    size_t      len ;
+
+    line = xstrdup(input_line);
+    len = strstrip(line);
+
+    sta = LINE_UNPROCESSED ;
+    if (len<1) {
+        /* Empty line */
+        sta = LINE_EMPTY ;
+    } else if (line[0]=='#' || line[0]==';') {
+        /* Comment line */
+        sta = LINE_COMMENT ;
+    } else if (line[0]=='[' && line[len-1]==']') {
+        /* Section name */
+        sscanf(line, "[%[^]]", section);
+        strstrip(section);
+        strlwc(section, section, len);
+        sta = LINE_SECTION ;
+    } else if (sscanf (line, "%[^=] = \"%[^\"]\"", key, value) == 2
+           ||  sscanf (line, "%[^=] = '%[^\']'",   key, value) == 2) {
+        /* Usual key=value with quotes, with or without comments */
+        strstrip(key);
+        strlwc(key, key, len);
+        /* Don't strip spaces from values surrounded with quotes */
+        sta = LINE_VALUE ;
+    } else if (sscanf (line, "%[^=] = %[^;#]", key, value) == 2) {
+        /* Usual key=value without quotes, with or without comments */
+        strstrip(key);
+        strlwc(key, key, len);
+        strstrip(value);
+        /*
+         * sscanf cannot handle '' or "" as empty values
+         * this is done here
+         */
+        if (!strcmp(value, "\"\"") || (!strcmp(value, "''"))) {
+            value[0]=0 ;
+        }
+        sta = LINE_VALUE ;
+    } else if (sscanf(line, "%[^=] = %[;#]", key, value)==2
+           ||  sscanf(line, "%[^=] %[=]", key, value) == 2) {
+        /*
+         * Special cases:
+         * key=
+         * key=;
+         * key=#
+         */
+        strstrip(key);
+        strlwc(key, key, len);
+        value[0]=0 ;
+        sta = LINE_VALUE ;
+    } else {
+        /* Generate syntax error */
+        sta = LINE_ERROR ;
+    }
+
+    free(line);
+    return sta ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Parse an ini file and return an allocated dictionary object
+  @param    ininame Name of the ini file to read.
+  @return   Pointer to newly allocated dictionary
+
+  This is the parser for ini files. This function is called, providing
+  the name of the file to be read. It returns a dictionary object that
+  should not be accessed directly, but through accessor functions
+  instead.
+
+  The returned dictionary must be freed using iniparser_freedict().
+ */
+/*--------------------------------------------------------------------------*/
+dictionary * iniparser_load(const char * ininame)
+{
+    FILE * in ;
+
+    char line    [ASCIILINESZ+1] ;
+    char section [ASCIILINESZ+1] ;
+    char key     [ASCIILINESZ+1] ;
+    char tmp     [(ASCIILINESZ * 2) + 2] ;
+    char val     [ASCIILINESZ+1] ;
+
+    int  last=0 ;
+    int  len ;
+    int  lineno=0 ;
+    int  errs=0;
+    int  mem_err=0;
+
+    dictionary * dict ;
+
+    if ((in=fopen(ininame, "r"))==NULL) {
+        iniparser_error_callback("iniparser: cannot open %s\n", ininame);
+        return NULL ;
+    }
+
+    dict = dictionary_new(0) ;
+    if (!dict) {
+        fclose(in);
+        return NULL ;
+    }
+
+    memset(line,    0, ASCIILINESZ);
+    memset(section, 0, ASCIILINESZ);
+    memset(key,     0, ASCIILINESZ);
+    memset(val,     0, ASCIILINESZ);
+    last=0 ;
+
+    while (fgets(line+last, ASCIILINESZ-last, in)!=NULL) {
+        lineno++ ;
+        len = (int)strlen(line)-1;
+        if (len<=0)
+            continue;
+        /* Safety check against buffer overflows */
+        if (line[len]!='\n' && !feof(in)) {
+            iniparser_error_callback(
+              "iniparser: input line too long in %s (%d)\n",
+              ininame,
+              lineno);
+            dictionary_del(dict);
+            fclose(in);
+            return NULL ;
+        }
+        /* Get rid of \n and spaces at end of line */
+        while ((len>=0) &&
+                ((line[len]=='\n') || (isspace(line[len])))) {
+            line[len]=0 ;
+            len-- ;
+        }
+        if (len < 0) { /* Line was entirely \n and/or spaces */
+            len = 0;
+        }
+        /* Detect multi-line */
+        if (line[len]=='\\') {
+            /* Multi-line value */
+            last=len ;
+            continue ;
+        } else {
+            last=0 ;
+        }
+        switch (iniparser_line(line, section, key, val)) {
+            case LINE_EMPTY:
+            case LINE_COMMENT:
+            break ;
+
+            case LINE_SECTION:
+            mem_err = dictionary_set(dict, section, NULL);
+            break ;
+
+            case LINE_VALUE:
+            sprintf(tmp, "%s:%s", section, key);
+            mem_err = dictionary_set(dict, tmp, val);
+            break ;
+
+            case LINE_ERROR:
+            iniparser_error_callback(
+              "iniparser: syntax error in %s (%d):\n-> %s\n",
+              ininame,
+              lineno,
+              line);
+            errs++ ;
+            break;
+
+            default:
+            break ;
+        }
+        memset(line, 0, ASCIILINESZ);
+        last=0;
+        if (mem_err<0) {
+            iniparser_error_callback("iniparser: memory allocation failure\n");
+            break ;
+        }
+    }
+    if (errs) {
+        dictionary_del(dict);
+        dict = NULL ;
+    }
+    fclose(in);
+    return dict ;
+}
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Free all memory associated to an ini dictionary
+  @param    d Dictionary to free
+  @return   void
+
+  Free all memory associated to an ini dictionary.
+  It is mandatory to call this function before the dictionary object
+  gets out of the current context.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_freedict(dictionary * d)
+{
+    dictionary_del(d);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.h b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.h
new file mode 100644
index 0000000..37ff7b7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/iniparser/iniparser.h
@@ -0,0 +1,358 @@
+
+/*-------------------------------------------------------------------------*/
+/**
+   @file    iniparser.h
+   @author  N. Devillard
+   @brief   Parser for ini files.
+*/
+/*--------------------------------------------------------------------------*/
+
+#ifndef _INIPARSER_H_
+#define _INIPARSER_H_
+
+/*---------------------------------------------------------------------------
+                                Includes
+ ---------------------------------------------------------------------------*/
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+
+/*
+ * The following #include is necessary on many Unixes but not Linux.
+ * It is not needed for Windows platforms.
+ * Uncomment it if needed.
+ */
+/* #include <unistd.h> */
+
+#include "dictionary.h"
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Configure a function to receive the error messages.
+  @param    errback  Function to call.
+
+  By default, the error will be printed on stderr. If a null pointer is passed
+  as errback the error callback will be switched back to default.
+ */
+/*--------------------------------------------------------------------------*/
+
+void iniparser_set_error_callback(int (*errback)(const char *, ...));
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get number of sections in a dictionary
+  @param    d   Dictionary to examine
+  @return   int Number of sections found in dictionary
+
+  This function returns the number of sections found in a dictionary.
+  The test to recognize sections is done on the string stored in the
+  dictionary: a section name is given as "section" whereas a key is
+  stored as "section:key", thus the test looks for entries that do not
+  contain a colon.
+
+  This clearly fails in the case a section name contains a colon, but
+  this should simply be avoided.
+
+  This function returns -1 in case of error.
+ */
+/*--------------------------------------------------------------------------*/
+
+int iniparser_getnsec(const dictionary * d);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get name for section n in a dictionary.
+  @param    d   Dictionary to examine
+  @param    n   Section number (from 0 to nsec-1).
+  @return   Pointer to char string
+
+  This function locates the n-th section in a dictionary and returns
+  its name as a pointer to a string statically allocated inside the
+  dictionary. Do not free or modify the returned string!
+
+  This function returns NULL in case of error.
+ */
+/*--------------------------------------------------------------------------*/
+
+const char * iniparser_getsecname(const dictionary * d, int n);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Save a dictionary to a loadable ini file
+  @param    d   Dictionary to dump
+  @param    f   Opened file pointer to dump to
+  @return   void
+
+  This function dumps a given dictionary into a loadable ini file.
+  It is Ok to specify @c stderr or @c stdout as output files.
+ */
+/*--------------------------------------------------------------------------*/
+
+void iniparser_dump_ini(const dictionary * d, FILE * f);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Save a dictionary section to a loadable ini file
+  @param    d   Dictionary to dump
+  @param    s   Section name of dictionary to dump
+  @param    f   Opened file pointer to dump to
+  @return   void
+
+  This function dumps a given section of a given dictionary into a loadable ini
+  file.  It is Ok to specify @c stderr or @c stdout as output files.
+ */
+/*--------------------------------------------------------------------------*/
+
+void iniparser_dumpsection_ini(const dictionary * d, const char * s, FILE * f);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Dump a dictionary to an opened file pointer.
+  @param    d   Dictionary to dump.
+  @param    f   Opened file pointer to dump to.
+  @return   void
+
+  This function prints out the contents of a dictionary, one element by
+  line, onto the provided file pointer. It is OK to specify @c stderr
+  or @c stdout as output files. This function is meant for debugging
+  purposes mostly.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_dump(const dictionary * d, FILE * f);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the number of keys in a section of a dictionary.
+  @param    d   Dictionary to examine
+  @param    s   Section name of dictionary to examine
+  @return   Number of keys in section
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getsecnkeys(const dictionary * d, const char * s);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the number of keys in a section of a dictionary.
+  @param    d    Dictionary to examine
+  @param    s    Section name of dictionary to examine
+  @param    keys Already allocated array to store the keys in
+  @return   The pointer passed as `keys` argument or NULL in case of error
+
+  This function queries a dictionary and finds all keys in a given section.
+  The keys argument should be an array of pointers which size has been
+  determined by calling `iniparser_getsecnkeys` function prior to this one.
+
+  Each pointer in the returned char pointer-to-pointer is pointing to
+  a string allocated in the dictionary; do not free or modify them.
+ */
+/*--------------------------------------------------------------------------*/
+const char ** iniparser_getseckeys(const dictionary * d, const char * s, const char ** keys);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key
+  @param    d       Dictionary to search
+  @param    key     Key string to look for
+  @param    def     Default value to return if key not found.
+  @return   pointer to statically allocated character string
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the pointer passed as 'def' is returned.
+  The returned char pointer is pointing to a string allocated in
+  the dictionary, do not free or modify it.
+ */
+/*--------------------------------------------------------------------------*/
+const char * iniparser_getstring(const dictionary * d, const char * key, const char * def);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key, convert to an int
+  @param    d Dictionary to search
+  @param    key Key string to look for
+  @param    notfound Value to return in case of error
+  @return   integer
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the notfound value is returned.
+
+  Supported values for integers include the usual C notation
+  so decimal, octal (starting with 0) and hexadecimal (starting with 0x)
+  are supported. Examples:
+
+  - "42"      ->  42
+  - "042"     ->  34 (octal -> decimal)
+  - "0x42"    ->  66 (hexa  -> decimal)
+
+  Warning: the conversion may overflow in various ways. Conversion is
+  totally outsourced to strtol(), see the associated man page for overflow
+  handling.
+
+  Credits: Thanks to A. Becker for suggesting strtol()
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getint(const dictionary * d, const char * key, int notfound);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key, convert to an long int
+  @param    d Dictionary to search
+  @param    key Key string to look for
+  @param    notfound Value to return in case of error
+  @return   integer
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the notfound value is returned.
+
+  Supported values for integers include the usual C notation
+  so decimal, octal (starting with 0) and hexadecimal (starting with 0x)
+  are supported. Examples:
+
+  - "42"      ->  42
+  - "042"     ->  34 (octal -> decimal)
+  - "0x42"    ->  66 (hexa  -> decimal)
+
+  Warning: the conversion may overflow in various ways. Conversion is
+  totally outsourced to strtol(), see the associated man page for overflow
+  handling.
+ */
+/*--------------------------------------------------------------------------*/
+long int iniparser_getlongint(const dictionary * d, const char * key, long int notfound);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key, convert to a double
+  @param    d Dictionary to search
+  @param    key Key string to look for
+  @param    notfound Value to return in case of error
+  @return   double
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the notfound value is returned.
+ */
+/*--------------------------------------------------------------------------*/
+double iniparser_getdouble(const dictionary * d, const char * key, double notfound);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Get the string associated to a key, convert to a boolean
+  @param    d Dictionary to search
+  @param    key Key string to look for
+  @param    notfound Value to return in case of error
+  @return   integer
+
+  This function queries a dictionary for a key. A key as read from an
+  ini file is given as "section:key". If the key cannot be found,
+  the notfound value is returned.
+
+  A true boolean is found if one of the following is matched:
+
+  - A string starting with 'y'
+  - A string starting with 'Y'
+  - A string starting with 't'
+  - A string starting with 'T'
+  - A string starting with '1'
+
+  A false boolean is found if one of the following is matched:
+
+  - A string starting with 'n'
+  - A string starting with 'N'
+  - A string starting with 'f'
+  - A string starting with 'F'
+  - A string starting with '0'
+
+  The notfound value returned if no boolean is identified, does not
+  necessarily have to be 0 or 1.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_getboolean(const dictionary * d, const char * key, int notfound);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Set an entry in a dictionary.
+  @param    ini     Dictionary to modify.
+  @param    entry   Entry to modify (entry name)
+  @param    val     New value to associate to the entry.
+  @return   int     0 if Ok, -1 otherwise.
+
+  If the given entry can be found in the dictionary, it is modified to
+  contain the provided value. If it cannot be found, the entry is created.
+  It is Ok to set val to NULL.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_set(dictionary * ini, const char * entry, const char * val);
+
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Delete an entry in a dictionary
+  @param    ini     Dictionary to modify
+  @param    entry   Entry to delete (entry name)
+  @return   void
+
+  If the given entry can be found, it is deleted from the dictionary.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_unset(dictionary * ini, const char * entry);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Finds out if a given entry exists in a dictionary
+  @param    ini     Dictionary to search
+  @param    entry   Name of the entry to look for
+  @return   integer 1 if entry exists, 0 otherwise
+
+  Finds out if a given entry exists in the dictionary. Since sections
+  are stored as keys with NULL associated values, this is the only way
+  of querying for the presence of sections in a dictionary.
+ */
+/*--------------------------------------------------------------------------*/
+int iniparser_find_entry(const dictionary * ini, const char * entry) ;
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Parse an ini file and return an allocated dictionary object
+  @param    ininame Name of the ini file to read.
+  @return   Pointer to newly allocated dictionary
+
+  This is the parser for ini files. This function is called, providing
+  the name of the file to be read. It returns a dictionary object that
+  should not be accessed directly, but through accessor functions
+  instead.
+
+  The returned dictionary must be freed using iniparser_freedict().
+ */
+/*--------------------------------------------------------------------------*/
+dictionary * iniparser_load(const char * ininame);
+
+/*-------------------------------------------------------------------------*/
+/**
+  @brief    Free all memory associated to an ini dictionary
+  @param    d Dictionary to free
+  @return   void
+
+  Free all memory associated to an ini dictionary.
+  It is mandatory to call this function before the dictionary object
+  gets out of the current context.
+ */
+/*--------------------------------------------------------------------------*/
+void iniparser_freedict(dictionary * d);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/init/Makefile.am b/kernel/xenomai-v3.2.4/lib/boilerplate/init/Makefile.am
new file mode 100644
index 0000000..6481688
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/init/Makefile.am
@@ -0,0 +1,32 @@
+noinst_LIBRARIES = libbootstrap.a
+
+libbootstrap_a_SOURCES = bootstrap.c
+
+libbootstrap_a_CPPFLAGS =		\
+	@XENO_USER_CFLAGS@		\
+	-I$(top_srcdir)/include		\
+	-I$(top_srcdir)/lib
+
+noinst_LTLIBRARIES = libbootstrap-pic.la
+
+libbootstrap_pic_la_SOURCES = bootstrap.c
+
+libbootstrap_pic_la_CPPFLAGS =		\
+	-D__BOOTSTRAP_DSO__		\
+	@XENO_USER_CFLAGS@		\
+	-I$(top_srcdir)/include		\
+	-I$(top_srcdir)/lib
+
+get_pic_object = $(shell source ./$(libbootstrap_pic_la_OBJECTS) && echo $$pic_object)
+
+all-local: $(libbootstrap_a_OBJECTS)
+	@cp $< bootstrap-internal.o
+
+install-data-local:
+	@$(mkinstalldirs) $(DESTDIR)$(libdir)/xenomai
+	@$(INSTALL_DATA) $(libbootstrap_a_OBJECTS) $(DESTDIR)$(libdir)/xenomai/bootstrap.o
+	@test -r $(call get_pic_object) && \
+	$(INSTALL_DATA) $(call get_pic_object) $(DESTDIR)$(libdir)/xenomai/bootstrap-pic.o || true
+
+uninstall-local:
+	$(RM) $(DESTDIR)$(libdir)/xenomai/bootstrap*.o
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/init/bootstrap.c b/kernel/xenomai-v3.2.4/lib/boilerplate/init/bootstrap.c
new file mode 100644
index 0000000..64a11c2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/init/bootstrap.c
@@ -0,0 +1,172 @@
+/*
+ * The source code in this particular file is released using a dual
+ * license scheme.  You can choose the licence that better fits your
+ * requirements.
+ *
+ * -----------------------------------------------------------------------
+ *
+ * Copyright (C) 2017 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * -----------------------------------------------------------------------
+ *
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <sys/types.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <fcntl.h>
+#include <xenomai/init.h>
+
+static int early_argc;
+
+static char *const *early_argv;
+
+/*
+ * The bootstrap module object is built in two forms:
+ *
+ * - in static object form, to be glued to the main executable, which
+ *   should include a wrapper interposing on the main() routine for
+ *   auto-init purpose. Such wrapper is activated when symbol wrapping
+ *   is enabled at link time (--wrap).
+ *    
+ * - in dynamic object form, to be included in a shared library target
+ *   which enables the auto-init feature. This form should not include
+ *   any wrapper to a main() routine - which does not exist - but only
+ *   a constructor routine performing the inits.
+ *
+ * The macro __BOOTSTRAP_DSO__ tells us whether we are building the
+ * bootstrap module to be glued into a dynamic shared object. If not,
+ * the main() interception code should be present in the relocatable
+ * object.
+ */
+
+#ifdef __BOOTSTRAP_DSO__
+
+static inline void call_init(int *argcp, char *const **argvp)
+{
+	xenomai_init_dso(argcp, argvp);
+}
+
+#else
+
+const int xenomai_auto_bootstrap = 1;
+
+int __real_main(int argc, char *const argv[]);
+
+int __wrap_main(int argc, char *const argv[])
+__attribute__((alias("xenomai_main"), weak));
+
+int xenomai_main(int argc, char *const argv[])
+{
+	if (early_argc)
+		return __real_main(early_argc, early_argv);
+	
+	xenomai_init(&argc, &argv);
+
+	return __real_main(argc, argv);
+}
+
+static inline void call_init(int *argcp, char *const **argvp)
+{
+	xenomai_init(argcp, argvp);
+}
+
+#endif /* !__BOOTSTRAP_DSO__ */
+
+__bootstrap_ctor static void xenomai_bootstrap(void)
+{
+	char *arglist, *argend, *p, **v, *const *argv;
+	ssize_t len, ret;
+	int fd, n, argc;
+
+	len = 1024;
+
+	for (;;) {
+		fd = __STD(open("/proc/self/cmdline", O_RDONLY));
+		if (fd < 0)
+			return;
+
+		arglist = __STD(malloc(len));
+		if (arglist == NULL) {
+			__STD(close(fd));
+			return;
+		}
+
+		ret = __STD(read(fd, arglist, len));
+		__STD(close(fd));
+
+		if (ret < 0) {
+			__STD(free(arglist));
+			return;
+		}
+
+		if (ret < len)
+			break;
+
+		__STD(free(arglist));
+		len <<= 1;
+	}
+
+	argend = arglist + ret;
+	p = arglist;
+	n = 0;
+	while (p < argend) {
+		n++;
+		p += strlen(p) + 1;
+	}
+
+	v = __STD(malloc((n + 1) * sizeof(char *)));
+	if (v == NULL) {
+		__STD(free(arglist));
+		return;
+	}
+
+	p = arglist;
+	n = 0;
+	while (p < argend) {
+		v[n++] = p;
+		p += strlen(p) + 1;
+	}
+
+	v[n] = NULL;
+	argv = v;
+	argc = n;
+
+	call_init(&argc, &argv);
+	early_argc = argc;
+	early_argv = argv;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/obstack.c b/kernel/xenomai-v3.2.4/lib/boilerplate/obstack.c
new file mode 100644
index 0000000..fab62ce
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/obstack.c
@@ -0,0 +1,356 @@
+/* obstack.c - subroutines used implicitly by object stack macros
+   Copyright (C) 1988, 1989, 1990, 1991, 1992, 1993, 1994, 1996, 1997, 1998,
+   1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
+   This file is part of the GNU C Library.
+
+   The GNU C Library is free software; you can redistribute it and/or
+   modify it under the terms of the GNU Lesser General Public
+   License as published by the Free Software Foundation; either
+   version 2.1 of the License, or (at your option) any later version.
+
+   The GNU C Library is distributed in the hope that it will be useful,
+   but WITHOUT ANY WARRANTY; without even the implied warranty of
+   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+   Lesser General Public License for more details.
+
+   You should have received a copy of the GNU Lesser General Public
+   License along with the GNU C Library; if not, write to the Free
+   Software Foundation, Inc., 51 Franklin Street, Fifth Floor,
+   Boston, MA 02110-1301, USA.  */
+
+
+#include <xeno_config.h>
+#include <boilerplate/obstack.h>
+
+/* Use this obstack implementation unconditionally.  */
+
+#include <stdio.h>
+#include <stddef.h>
+#include <stdint.h>
+
+/* Determine default alignment.  */
+union fooround
+{
+  uintmax_t i;
+  long double d;
+  void *p;
+};
+struct fooalign
+{
+  char c;
+  union fooround u;
+};
+/* If malloc were really smart, it would round addresses to DEFAULT_ALIGNMENT.
+   But in fact it might be less smart and round addresses to as much as
+   DEFAULT_ROUNDING.  So we prepare for it to do that.  */
+enum
+  {
+    DEFAULT_ALIGNMENT = offsetof (struct fooalign, u),
+    DEFAULT_ROUNDING = sizeof (union fooround)
+  };
+
+/* When we copy a long block of data, this is the unit to do it with.
+   On some machines, copying successive ints does not work;
+   in such a case, redefine COPYING_UNIT to `long' (if that works)
+   or `char' as a last resort.  */
+# ifndef COPYING_UNIT
+#  define COPYING_UNIT int
+# endif
+
+
+/* The functions allocating more room by calling `obstack_chunk_alloc'
+   jump to the handler pointed to by `obstack_alloc_failed_handler'.
+   This can be set to a user defined function which should either
+   abort gracefully or use longjump - but shouldn't return.  This
+   variable by default points to the internal function
+   `print_and_abort'.  */
+static void print_and_abort (void);
+void (*obstack_alloc_failed_handler) (void) = print_and_abort;
+
+/* Exit value used when `print_and_abort' is used.  */
+# include <stdlib.h>
+int obstack_exit_failure = EXIT_FAILURE;
+
+/* Define a macro that either calls functions with the traditional malloc/free
+   calling interface, or calls functions with the mmalloc/mfree interface
+   (that adds an extra first argument), based on the state of use_extra_arg.
+   For free, do not use ?:, since some compilers, like the MIPS compilers,
+   do not allow (expr) ? void : void.  */
+
+# define CALL_CHUNKFUN(h, size) \
+  (((h) -> use_extra_arg) \
+   ? (*(h)->chunkfun) ((h)->extra_arg, (size)) \
+   : (*(struct _obstack_chunk *(*) (long)) (h)->chunkfun) ((size)))
+
+# define CALL_FREEFUN(h, old_chunk) \
+  do { \
+    if ((h) -> use_extra_arg) \
+      (*(h)->freefun) ((h)->extra_arg, (old_chunk)); \
+    else \
+      (*(void (*) (void *)) (h)->freefun) ((old_chunk)); \
+  } while (0)
+
+
+/* Initialize an obstack H for use.  Specify chunk size SIZE (0 means default).
+   Objects start on multiples of ALIGNMENT (0 means use default).
+   CHUNKFUN is the function to use to allocate chunks,
+   and FREEFUN the function to free them.
+
+   Return nonzero if successful, calls obstack_alloc_failed_handler if
+   allocation fails.  */
+
+int
+_obstack_begin (struct obstack *h,
+		int size, int alignment,
+		void *(*chunkfun) (long),
+		void (*freefun) (void *))
+{
+  struct _obstack_chunk *chunk; /* points to new chunk */
+
+  if (alignment == 0)
+    alignment = DEFAULT_ALIGNMENT;
+  if (size == 0)
+    /* Default size is what GNU malloc can fit in a 4096-byte block.  */
+    {
+      /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc.
+	 Use the values for range checking, because if range checking is off,
+	 the extra bytes won't be missed terribly, but if range checking is on
+	 and we used a larger request, a whole extra 4096 bytes would be
+	 allocated.
+
+	 These number are irrelevant to the new GNU malloc.  I suspect it is
+	 less sensitive to the size of the request.  */
+      int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1))
+		    + 4 + DEFAULT_ROUNDING - 1)
+		   & ~(DEFAULT_ROUNDING - 1));
+      size = 4096 - extra;
+    }
+
+  h->chunkfun = (struct _obstack_chunk * (*)(void *, long)) chunkfun;
+  h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun;
+  h->chunk_size = size;
+  h->alignment_mask = alignment - 1;
+  h->use_extra_arg = 0;
+
+  chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size);
+  if (!chunk)
+    (*obstack_alloc_failed_handler) ();
+  h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents,
+					       alignment - 1);
+  h->chunk_limit = chunk->limit
+    = (char *) chunk + h->chunk_size;
+  chunk->prev = 0;
+  /* The initial chunk now contains no empty object.  */
+  h->maybe_empty_object = 0;
+  h->alloc_failed = 0;
+  return 1;
+}
+
+int
+_obstack_begin_1 (struct obstack *h, int size, int alignment,
+		  void *(*chunkfun) (void *, long),
+		  void (*freefun) (void *, void *),
+		  void *arg)
+{
+  struct _obstack_chunk *chunk; /* points to new chunk */
+
+  if (alignment == 0)
+    alignment = DEFAULT_ALIGNMENT;
+  if (size == 0)
+    /* Default size is what GNU malloc can fit in a 4096-byte block.  */
+    {
+      /* 12 is sizeof (mhead) and 4 is EXTRA from GNU malloc.
+	 Use the values for range checking, because if range checking is off,
+	 the extra bytes won't be missed terribly, but if range checking is on
+	 and we used a larger request, a whole extra 4096 bytes would be
+	 allocated.
+
+	 These number are irrelevant to the new GNU malloc.  I suspect it is
+	 less sensitive to the size of the request.  */
+      int extra = ((((12 + DEFAULT_ROUNDING - 1) & ~(DEFAULT_ROUNDING - 1))
+		    + 4 + DEFAULT_ROUNDING - 1)
+		   & ~(DEFAULT_ROUNDING - 1));
+      size = 4096 - extra;
+    }
+
+  h->chunkfun = (struct _obstack_chunk * (*)(void *,long)) chunkfun;
+  h->freefun = (void (*) (void *, struct _obstack_chunk *)) freefun;
+  h->chunk_size = size;
+  h->alignment_mask = alignment - 1;
+  h->extra_arg = arg;
+  h->use_extra_arg = 1;
+
+  chunk = h->chunk = CALL_CHUNKFUN (h, h -> chunk_size);
+  if (!chunk)
+    (*obstack_alloc_failed_handler) ();
+  h->next_free = h->object_base = __PTR_ALIGN ((char *) chunk, chunk->contents,
+					       alignment - 1);
+  h->chunk_limit = chunk->limit
+    = (char *) chunk + h->chunk_size;
+  chunk->prev = 0;
+  /* The initial chunk now contains no empty object.  */
+  h->maybe_empty_object = 0;
+  h->alloc_failed = 0;
+  return 1;
+}
+
+/* Allocate a new current chunk for the obstack *H
+   on the assumption that LENGTH bytes need to be added
+   to the current object, or a new object of length LENGTH allocated.
+   Copies any partial object from the end of the old chunk
+   to the beginning of the new one.  */
+
+void
+_obstack_newchunk (struct obstack *h, int length)
+{
+  struct _obstack_chunk *old_chunk = h->chunk;
+  struct _obstack_chunk *new_chunk;
+  long	new_size;
+  long obj_size = h->next_free - h->object_base;
+  long i;
+  long already;
+  char *object_base;
+
+  /* Compute size for new chunk.  */
+  new_size = (obj_size + length) + (obj_size >> 3) + h->alignment_mask + 100;
+  if (new_size < h->chunk_size)
+    new_size = h->chunk_size;
+
+  /* Allocate and initialize the new chunk.  */
+  new_chunk = CALL_CHUNKFUN (h, new_size);
+  if (!new_chunk)
+    (*obstack_alloc_failed_handler) ();
+  h->chunk = new_chunk;
+  new_chunk->prev = old_chunk;
+  new_chunk->limit = h->chunk_limit = (char *) new_chunk + new_size;
+
+  /* Compute an aligned object_base in the new chunk */
+  object_base =
+    __PTR_ALIGN ((char *) new_chunk, new_chunk->contents, h->alignment_mask);
+
+  /* Move the existing object to the new chunk.
+     Word at a time is fast and is safe if the object
+     is sufficiently aligned.  */
+  if (h->alignment_mask + 1 >= DEFAULT_ALIGNMENT)
+    {
+      for (i = obj_size / sizeof (COPYING_UNIT) - 1;
+	   i >= 0; i--)
+	((COPYING_UNIT *)object_base)[i]
+	  = ((COPYING_UNIT *)h->object_base)[i];
+      /* We used to copy the odd few remaining bytes as one extra COPYING_UNIT,
+	 but that can cross a page boundary on a machine
+	 which does not do strict alignment for COPYING_UNITS.  */
+      already = obj_size / sizeof (COPYING_UNIT) * sizeof (COPYING_UNIT);
+    }
+  else
+    already = 0;
+  /* Copy remaining bytes one by one.  */
+  for (i = already; i < obj_size; i++)
+    object_base[i] = h->object_base[i];
+
+  /* If the object just copied was the only data in OLD_CHUNK,
+     free that chunk and remove it from the chain.
+     But not if that chunk might contain an empty object.  */
+  if (! h->maybe_empty_object
+      && (h->object_base
+	  == __PTR_ALIGN ((char *) old_chunk, old_chunk->contents,
+			  h->alignment_mask)))
+    {
+      new_chunk->prev = old_chunk->prev;
+      CALL_FREEFUN (h, old_chunk);
+    }
+
+  h->object_base = object_base;
+  h->next_free = h->object_base + obj_size;
+  /* The new chunk certainly contains no empty object yet.  */
+  h->maybe_empty_object = 0;
+}
+
+/* Return nonzero if object OBJ has been allocated from obstack H.
+   This is here for debugging.
+   If you use it in a program, you are probably losing.  */
+
+/* Suppress -Wmissing-prototypes warning.  We don't want to declare this in
+   obstack.h because it is just for debugging.  */
+int _obstack_allocated_p (struct obstack *h, void *obj);
+
+int
+_obstack_allocated_p (struct obstack *h, void *obj)
+{
+  struct _obstack_chunk *lp;	/* below addr of any objects in this chunk */
+  struct _obstack_chunk *plp;	/* point to previous chunk if any */
+
+  lp = (h)->chunk;
+  /* We use >= rather than > since the object cannot be exactly at
+     the beginning of the chunk but might be an empty object exactly
+     at the end of an adjacent chunk.  */
+  while (lp != 0 && ((void *) lp >= obj || (void *) (lp)->limit < obj))
+    {
+      plp = lp->prev;
+      lp = plp;
+    }
+  return lp != 0;
+}
+
+/* Free objects in obstack H, including OBJ and everything allocate
+   more recently than OBJ.  If OBJ is zero, free everything in H.  */
+
+# undef obstack_free
+
+void
+obstack_free (struct obstack *h, void *obj)
+{
+  struct _obstack_chunk *lp;	/* below addr of any objects in this chunk */
+  struct _obstack_chunk *plp;	/* point to previous chunk if any */
+
+  lp = h->chunk;
+  /* We use >= because there cannot be an object at the beginning of a chunk.
+     But there can be an empty object at that address
+     at the end of another chunk.  */
+  while (lp != 0 && ((void *) lp >= obj || (void *) (lp)->limit < obj))
+    {
+      plp = lp->prev;
+      CALL_FREEFUN (h, lp);
+      lp = plp;
+      /* If we switch chunks, we can't tell whether the new current
+	 chunk contains an empty object, so assume that it may.  */
+      h->maybe_empty_object = 1;
+    }
+  if (lp)
+    {
+      h->object_base = h->next_free = (char *) (obj);
+      h->chunk_limit = lp->limit;
+      h->chunk = lp;
+    }
+  else if (obj != 0)
+    /* obj is not in any of the chunks! */
+    abort ();
+}
+
+
+int
+_obstack_memory_used (struct obstack *h)
+{
+  struct _obstack_chunk* lp;
+  int nbytes = 0;
+
+  for (lp = h->chunk; lp != 0; lp = lp->prev)
+    {
+      nbytes += lp->limit - (char *) lp;
+    }
+  return nbytes;
+}
+
+/* Define the error handler.  */
+static void
+__attribute__ ((noreturn))
+print_and_abort (void)
+{
+  /* Don't change any of these strings.  Yes, it would be possible to add
+     the newline to the string and use fputs or so.  But this must not
+     happen because the "memory exhausted" message appears in other places
+     like this and the translation should be reused instead of creating
+     a very similar string which requires a separate translation.  */
+  fprintf (stderr, "memory exhausted\n");
+  exit (obstack_exit_failure);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/setup.c b/kernel/xenomai-v3.2.4/lib/boilerplate/setup.c
new file mode 100644
index 0000000..5139560
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/setup.c
@@ -0,0 +1,737 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sched.h>
+#include <getopt.h>
+#include <string.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <ctype.h>
+#include <memory.h>
+#include <malloc.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <assert.h>
+#include <xeno_config.h>
+#include <boilerplate/lock.h>
+#include <boilerplate/debug.h>
+#include <boilerplate/ancillaries.h>
+#include <xenomai/init.h>
+
+struct base_setup_data __base_setup_data = {
+	.no_sanity = !CONFIG_XENO_SANITY,
+	.verbosity_level = 1,
+	.trace_level = 0,
+	.arg0 = NULL,
+	.no_mlock = 0,
+};
+
+pid_t __node_id = 0;
+
+int __config_done = 0;
+
+const int __weak xenomai_auto_bootstrap = 0;
+
+static int base_init_done;
+
+static int main_init_done;
+
+static DEFINE_PRIVATE_LIST(setup_list);
+
+static const struct option base_options[] = {
+	{
+#define help_opt	0
+		.name = "help",
+	},
+	{
+#define affinity_opt	1
+		.name = "cpu-affinity",
+		.has_arg = required_argument,
+	},
+	{
+#define verbose_opt	2
+		.name = "verbose",
+		.has_arg = optional_argument,
+	},
+	{
+#define silent_opt	3
+		.name = "silent",
+		.has_arg = no_argument,
+		.flag = &__base_setup_data.verbosity_level,
+		.val = 0,
+	},
+	{
+#define quiet_opt	4
+		.name = "quiet",
+		.has_arg = no_argument,
+		.flag = &__base_setup_data.verbosity_level,
+		.val = 0,
+	},
+	{
+#define version_opt	5
+		.name = "version",
+		.has_arg = no_argument,
+	},
+	{
+#define dumpconfig_opt	6
+		.name = "dump-config",
+		.has_arg = no_argument,
+	},
+	{
+#define no_sanity_opt	7
+		.name = "no-sanity",
+		.has_arg = no_argument,
+		.flag = &__base_setup_data.no_sanity,
+		.val = 1
+	},
+	{
+#define sanity_opt	8
+		.name = "sanity",
+		.has_arg = no_argument,
+		.flag = &__base_setup_data.no_sanity,
+	},
+	{
+#define trace_opt	9
+		.name = "trace",
+		.has_arg = optional_argument,
+	},
+	{
+#define no_mlock_opt	10
+#ifdef CONFIG_XENO_MERCURY
+		.name = "no-mlock",
+		.has_arg = no_argument,
+		.flag = &__base_setup_data.no_mlock,
+		.val = 1
+#endif
+	},
+	{ /* Sentinel */ }
+};
+
+void __weak application_version(void)
+{
+	/*
+	 * Applications can implement this hook for dumping their own
+	 * version stamp.
+	 */
+}
+
+static inline void print_version(void)
+{
+	application_version();
+	fprintf(stderr, "based on %s\n", xenomai_version_string);
+}
+
+static inline void dump_configuration(void)
+{
+	int n;
+
+	print_version();
+
+	for (n = 0; config_strings[n]; n++)
+		puts(config_strings[n]);
+
+	printf("PTHREAD_STACK_DEFAULT=%d\n", PTHREAD_STACK_DEFAULT);
+	printf("AUTOMATIC_BOOTSTRAP=%d\n", xenomai_auto_bootstrap);
+}
+
+static inline int resolve_cpuid(const char *s)
+{
+	return isdigit(*s) ? atoi(s) : -1;
+}
+
+static int collect_cpu_affinity(const char *cpu_list)
+{
+	char *s, *n, *range, *range_p = NULL, *id, *id_r;
+	int start, end, cpu, nr_cpus, ret;
+
+	/*
+	 * We don't know which CPUs are online yet, but we may know
+	 * which CPU identifier range is valid. Ask for the number of
+	 * processors configured to find out.
+	 */
+	nr_cpus = (int)sysconf(_SC_NPROCESSORS_CONF);
+	if (nr_cpus < 0) {
+		ret = -errno;
+		warning("sysconf(_SC_NPROCESSORS_CONF) failed [%s]", symerror(ret));
+		return ret;
+	}
+
+	CPU_ZERO(&__base_setup_data.cpu_affinity);
+
+	s = n = strdup(cpu_list);
+	while ((range = strtok_r(n, ",", &range_p)) != NULL) {
+		if (*range == '\0')
+			goto next;
+		end = -1;
+		if (range[strlen(range)-1] == '-')
+			end = nr_cpus - 1;
+		id = strtok_r(range, "-", &id_r);
+		if (id) {
+			start = resolve_cpuid(id);
+			if (*range == '-') {
+				end = start;
+				start = 0;
+			}
+			id = strtok_r(NULL, "-", &id_r);
+			if (id)
+				end = resolve_cpuid(id);
+			else if (end < 0)
+				end = start;
+			if (start < 0 || start >= nr_cpus ||
+			    end < 0 || end >= nr_cpus)
+				goto fail;
+		} else {
+			start = 0;
+			end = nr_cpus - 1;
+		}
+		for (cpu = start; cpu <= end; cpu++)
+			CPU_SET(cpu, &__base_setup_data.cpu_affinity);
+	next:
+		n = NULL;
+	}
+
+	free(s);
+
+	/*
+	 * Check we may use this affinity, at least one CPU from the
+	 * given set should be available for running threads. Since
+	 * CPU affinity will be inherited by children threads, we only
+	 * have to set it here.
+	 *
+	 * NOTE: we don't clear __base_setup_data.cpu_affinity on
+	 * entry to this routine to allow cumulative --cpu-affinity
+	 * options to appear in the command line arguments.
+	 */
+	ret = sched_setaffinity(0, sizeof(__base_setup_data.cpu_affinity),
+				&__base_setup_data.cpu_affinity);
+	if (ret) {
+		ret = -errno;
+		early_warning("invalid CPU in '%s'", cpu_list);
+		return ret;
+	}
+
+	return 0;
+fail:
+	warning("invalid CPU number/range in '%s'", cpu_list);
+	free(s);
+
+	return -EINVAL;
+}
+
+static void retrieve_default_cpu_affinity(void)
+{
+	CPU_ZERO(&__base_setup_data.cpu_affinity);
+
+#ifdef CONFIG_XENO_COBALT
+	/*
+	 * If the Cobalt core has restricted the CPU set, update our
+	 * mask accordingly.
+	 */
+	unsigned long cpumask;
+	FILE *fp;
+	int cpu;
+
+	fp = fopen("/proc/xenomai/affinity", "r");
+	if (fp == NULL)
+		return;	/* uhh? */
+
+	if (fscanf(fp, "%lx", &cpumask) == 1) {
+		for (cpu = 0; cpumask; cpumask >>= 1, cpu++)
+			if (cpumask & 1)
+				CPU_SET(cpu, &__base_setup_data.cpu_affinity);
+	}
+
+	fclose(fp);
+#endif
+}
+
+static inline char **prep_args(int argc, char *const argv[])
+{
+	char **uargv;
+	int n;
+
+	uargv = malloc((argc + 1) * sizeof(char *));
+	if (uargv == NULL)
+		return NULL;
+
+	for (n = 0; n < argc; n++) {
+		uargv[n] = strdup(argv[n]);
+		if (uargv[n] == NULL)
+			return NULL;
+	}
+
+	uargv[argc] = NULL;
+
+	return uargv;
+}
+
+static inline void pack_args(int *argcp, char **argv)
+{
+	int in, out;
+
+	for (in = out = 0; in < *argcp; in++) {
+		if (*argv[in])
+			argv[out++] = argv[in];
+		else
+			free(argv[in]);
+	}
+
+	argv[out] = NULL;
+	*argcp = out;
+}
+
+static struct option *build_option_array(int *base_opt_startp)
+{
+	struct setup_descriptor *setup;
+	struct option *options, *q;
+	const struct option *p;
+	int nopts;
+
+	nopts = sizeof(base_options) / sizeof(base_options[0]);
+
+	if (!pvlist_empty(&setup_list)) {
+		pvlist_for_each_entry(setup, &setup_list, __reserved.next) {
+			p = setup->options;
+			if (p) {
+				while (p->name) {
+					nopts++;
+					p++;
+				}
+			}
+		}
+	}
+
+	options = malloc(sizeof(*options) * nopts);
+	if (options == NULL)
+		return NULL;
+
+	q = options;
+
+	if (!pvlist_empty(&setup_list)) {
+		pvlist_for_each_entry(setup, &setup_list, __reserved.next) {
+			p = setup->options;
+			if (p) {
+				setup->__reserved.opt_start = q - options;
+				while (p->name)
+					memcpy(q++, p++, sizeof(*q));
+			}
+			setup->__reserved.opt_end = q - options;
+		}
+	}
+
+	*base_opt_startp = q - options;
+	memcpy(q, base_options, sizeof(base_options));
+
+	return options;
+}
+
+void __weak application_usage(void)
+{
+	/*
+	 * Applications can implement this hook for dumping their own
+	 * help strings.
+	 */
+        fprintf(stderr, "usage: %s <options>:\n", get_program_name());
+}
+
+void xenomai_usage(void)
+{
+	struct setup_descriptor *setup;
+
+	print_version();
+
+	/*
+	 * Dump help strings from the highest level code to the
+	 * lowest.
+	 */
+	application_usage();
+
+	if (!pvlist_empty(&setup_list)) {
+		pvlist_for_each_entry_reverse(setup, &setup_list,
+					      __reserved.next) {
+			if (setup->help)
+				setup->help();
+		}
+	}
+
+        fprintf(stderr, "--cpu-affinity=<cpu[,cpu]...>	set CPU affinity of threads\n");
+        fprintf(stderr, "--[no-]sanity			disable/enable sanity checks\n");
+        fprintf(stderr, "--verbose[=level] 		set verbosity to desired level [=1]\n");
+        fprintf(stderr, "--silent, --quiet 		same as --verbose=0\n");
+        fprintf(stderr, "--trace[=level] 		set tracing to desired level [=1]\n");
+        fprintf(stderr, "--version			get version information\n");
+        fprintf(stderr, "--dump-config			dump configuration settings\n");
+#ifdef CONFIG_XENO_MERCURY
+        fprintf(stderr, "--no-mlock			do not lock memory at init\n");
+#endif
+        fprintf(stderr, "--help				display help\n");
+}
+
+static int parse_base_options(int *argcp, char **uargv,
+			      const struct option *options,
+			      int base_opt_start)
+{
+	int c, lindex, ret, n;
+
+	__base_setup_data.arg0 = uargv[0];
+	opterr = 0;
+
+	/*
+	 * NOTE: since we pack the argument vector on the fly while
+	 * processing the options, optarg should be considered as
+	 * volatile by option handlers; i.e. strdup() is required if
+	 * the value has to be retained. Values from the user vector
+	 * returned by xenomai_init() live in permanent memory though.
+	 */
+
+	for (;;) {
+		lindex = -1;
+		c = getopt_long(*argcp, uargv, "-", options, &lindex);
+		if (c == EOF)
+			break;
+		if (lindex == -1)
+			continue;
+
+		switch (lindex - base_opt_start) {
+		case affinity_opt:
+			ret = collect_cpu_affinity(optarg);
+			if (ret)
+				return ret;
+			break;
+		case verbose_opt:
+			__base_setup_data.verbosity_level = 1;
+			if (optarg)
+				__base_setup_data.verbosity_level = atoi(optarg);
+			break;
+		case trace_opt:
+			__base_setup_data.trace_level = 1;
+			if (optarg)
+				__base_setup_data.trace_level = atoi(optarg);
+			break;
+		case silent_opt:
+		case quiet_opt:
+		case no_mlock_opt:
+		case no_sanity_opt:
+		case sanity_opt:
+			break;
+		case version_opt:
+			print_version();
+			exit(0);
+		case dumpconfig_opt:
+			dump_configuration();
+			exit(0);
+		case help_opt:
+			xenomai_usage();
+			exit(0);
+		default:
+			/* Skin option, don't process yet. */
+			continue;
+		}
+
+		/*
+		 * Clear the first byte of the base option we found
+		 * (including any companion argument), pack_args()
+		 * will expunge all options we have already handled.
+		 *
+		 * NOTE: only options with double-dash prefix may have
+		 * been recognized by getopt_long() as Xenomai
+		 * ones. This reserves short options to the
+		 * application layer, sharing only the long option
+		 * namespace with the Xenomai core libs. In addition,
+		 * the user can delimit the start of the application
+		 * arguments, preceeding them by the '--' separator on
+		 * the command line.
+		 */
+		n = optind - 1;
+		if (uargv[n][0] != '-' || uargv[n][1] != '-')
+			/* Clear the separate argument value. */
+			uargv[n--][0] = '\0';
+		uargv[n][0] = '\0'; /* Clear the option switch. */
+	}
+
+	pack_args(argcp, uargv);
+
+	optind = 0;
+
+	return 0;
+}
+
+static int parse_setup_options(int *argcp, char **uargv,
+			       const struct option *options)
+{
+	struct setup_descriptor *setup;
+	int lindex, n, c, ret;
+
+	for (;;) {
+		lindex = -1;
+		/*
+		 * We want to keep the original order of parameters in
+		 * the vector, disable getopt's parameter shuffling.
+		 */
+		c = getopt_long(*argcp, uargv, "-", options, &lindex);
+		if (c == EOF)
+			break;
+		if (lindex == -1)
+			continue; /* Not handled here. */
+		pvlist_for_each_entry(setup, &setup_list, __reserved.next) {
+			if (setup->__reserved.done ||
+			    setup->parse_option == NULL)
+				continue;
+			if (lindex < setup->__reserved.opt_start ||
+			    lindex >= setup->__reserved.opt_end)
+				continue;
+			lindex -= setup->__reserved.opt_start;
+			trace_me("%s->parse_options()", setup->name);
+			ret = setup->parse_option(lindex, optarg);
+			if (ret == 0)
+				break;
+			return ret;
+		}
+		n = optind - 1;
+		if (uargv[n][0] != '-' || uargv[n][1] != '-')
+			/* Clear the separate argument value. */
+			uargv[n--][0] = '\0';
+		uargv[n][0] = '\0'; /* Clear the option switch. */
+	}
+
+	pack_args(argcp, uargv);
+
+	optind = 0;
+
+	return 0;
+}
+
+static void __xenomai_init(int *argcp, char *const **argvp, const char *me)
+{
+	struct setup_descriptor *setup;
+	int ret, base_opt_start;
+	struct option *options;
+	struct service svc;
+	char **uargv;
+
+	/*
+	 * Build the global option array, merging all option sets.
+	 */
+	options = build_option_array(&base_opt_start);
+	if (options == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	/*
+	 * Prepare a user argument vector we can modify, copying the
+	 * one we have been given by the bootstrap module. This vector
+	 * will be expunged from Xenomai's base options as we discover
+	 * them.
+	 */
+	uargv = prep_args(*argcp, *argvp);
+	if (uargv == NULL) {
+		ret = -ENOMEM;
+		goto fail;
+	}
+
+	if (base_init_done) {
+		trace_me("warm init from %s", me);
+		goto setup;
+	}
+
+	/* Our node id. is the tid of the main thread. */
+	__node_id = get_thread_pid();
+
+	/* No ifs, no buts: we must be called over the main thread. */
+	assert(getpid() == __node_id);
+
+	/* Retrieve the default CPU affinity. */
+	retrieve_default_cpu_affinity();
+
+	/*
+	 * Parse the base options first, to bootstrap the core with
+	 * the right config values.
+	 */
+	ret = parse_base_options(argcp, uargv,
+				 options, base_opt_start);
+	if (ret)
+		goto fail;
+
+	trace_me("cold init from %s", me);
+	
+#ifndef CONFIG_SMP
+	if (__base_setup_data.no_sanity == 0) {
+		ret = get_static_cpu_count();
+		if (ret > 0)
+			early_panic("running non-SMP libraries on SMP kernel?\n"
+	    "              build with --enable-smp or disable check with --no-sanity");
+	}
+#endif
+
+#ifdef CONFIG_XENO_MERCURY
+	if (__base_setup_data.no_mlock == 0) {
+		ret = mlockall(MCL_CURRENT | MCL_FUTURE);
+		if (ret) {
+			ret = -errno;
+			early_warning("failed to lock memory");
+			goto fail;
+		}
+		trace_me("memory locked");
+	} else
+		trace_me("memory NOT locked");
+#endif
+
+	/*
+	 * Now that we have bootstrapped the core, we may call the
+	 * setup handlers for tuning the configuration, then parsing
+	 * their own options, and eventually doing the init chores.
+	 */
+setup:
+	if (!pvlist_empty(&setup_list)) {
+
+		CANCEL_DEFER(svc);
+
+		pvlist_for_each_entry(setup, &setup_list, __reserved.next) {
+			if (!setup->__reserved.done && setup->tune) {
+				trace_me("%s->tune()", setup->name);
+				ret = setup->tune();
+				if (ret)
+					break;
+			}
+		}
+		
+		ret = parse_setup_options(argcp, uargv, options);
+		if (ret)
+			goto fail;
+
+		/*
+		 * From now on, we may not assign configuration
+		 * tunables anymore.
+		 */
+		__config_done = 1;
+	
+		pvlist_for_each_entry(setup, &setup_list, __reserved.next) {
+			if (setup->__reserved.done)
+				continue;
+			if (setup->init) {
+				trace_me("%s->init()", setup->name);
+				ret = setup->init();
+				if (ret)
+					break;
+				setup->__reserved.done = 1;
+			}
+		}
+
+		CANCEL_RESTORE(svc);
+
+		if (ret) {
+			early_warning("setup call %s failed", setup->name);
+			goto fail;
+		}
+	} else
+		__config_done = 1;
+
+	free(options);
+
+#ifdef CONFIG_XENO_DEBUG
+	if (!base_init_done && __base_setup_data.verbosity_level > 0) {
+		early_warning("Xenomai compiled with %s debug enabled,\n"
+			"                              "
+			"%shigh latencies expected [--enable-debug=%s]",
+#ifdef CONFIG_XENO_DEBUG_FULL
+			"full", "very ", "full"
+#else
+			"partial", "", "partial"
+#endif
+			);
+	}
+#endif
+
+	/*
+	 * The final user arg vector only contains options we could
+	 * not handle. The caller should be able to process them, or
+	 * bail out.
+	 */
+	*argvp = uargv;
+	base_init_done = 1;
+
+	return;
+fail:
+	early_panic("initialization failed, %s", symerror(ret));
+}
+
+void xenomai_init(int *argcp, char *const **argvp)
+{
+	const char *me = get_program_name();
+
+	if (main_init_done) {
+		early_warning("duplicate call from main program "
+			      "to %s() ignored", __func__);
+		early_warning("(xeno-config --no-auto-init disables implicit call)");
+	}
+
+	__xenomai_init(argcp, argvp, me);
+	main_init_done = 1;
+	trace_me("%s bootstrap done", me);
+}
+
+void xenomai_init_dso(int *argcp, char *const **argvp)
+{
+	__xenomai_init(argcp, argvp, "DSO");
+	trace_me("DSO bootstrap done");
+}
+
+void __trace_me(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	fprintf(stderr, "--  ");
+	vfprintf(stderr, fmt, ap);
+	fputc('\n', stderr);
+	fflush(stderr);
+	va_end(ap);
+}
+
+void __register_setup_call(struct setup_descriptor *p, int id)
+{
+	struct setup_descriptor *pos;
+
+	/*
+	 * Trap late registration due to wrong constructor priorities.
+	 */
+	assert(!main_init_done);
+	p->__reserved.id = id;
+	p->__reserved.done = 0;
+
+	/*
+	 * Insert the new descriptor (highest id first).
+	 */
+	if (!pvlist_empty(&setup_list)) {
+		pvlist_for_each_entry_reverse(pos, &setup_list, __reserved.next) {
+			if (id >= pos->__reserved.id) {
+				atpvh(&pos->__reserved.next, &p->__reserved.next);
+				return;
+			}
+		}
+	}
+	pvlist_prepend(&p->__reserved.next, &setup_list);
+}
+
+const char *get_program_name(void)
+{
+	return basename(__base_setup_data.arg0 ?: "program");
+}
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/time.c b/kernel/xenomai-v3.2.4/lib/boilerplate/time.c
new file mode 100644
index 0000000..d112593
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/time.c
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include "boilerplate/time.h"
+
+void timespec_sub(struct timespec *__restrict r,
+		  const struct timespec *__restrict t1,
+		  const struct timespec *__restrict t2)
+{
+	r->tv_sec = t1->tv_sec - t2->tv_sec;
+	r->tv_nsec = t1->tv_nsec - t2->tv_nsec;
+	if (r->tv_nsec < 0) {
+		r->tv_sec--;
+		r->tv_nsec += 1000000000;
+	}
+}
+
+void timespec_subs(struct timespec *__restrict r,
+		   const struct timespec *__restrict t1,
+		   sticks_t t2)
+{
+	sticks_t s, rem;
+
+	s = t2 / 1000000000;
+	rem = t2 - s * 1000000000;
+	r->tv_sec = t1->tv_sec - s;
+	r->tv_nsec = t1->tv_nsec - rem;
+	if (r->tv_nsec < 0) {
+		r->tv_sec--;
+		r->tv_nsec += 1000000000;
+	}
+}
+
+void timespec_add(struct timespec *__restrict r,
+		  const struct timespec *__restrict t1,
+		  const struct timespec *__restrict t2)
+{
+	r->tv_sec = t1->tv_sec + t2->tv_sec;
+	r->tv_nsec = t1->tv_nsec + t2->tv_nsec;
+	if (r->tv_nsec >= 1000000000) {
+		r->tv_sec++;
+		r->tv_nsec -= 1000000000;
+	}
+}
+
+void timespec_adds(struct timespec *__restrict r,
+		   const struct timespec *__restrict t1,
+		   sticks_t t2)
+{
+	sticks_t s, rem;
+
+	s = t2 / 1000000000;
+	rem = t2 - s * 1000000000;
+	r->tv_sec = t1->tv_sec + s;
+	r->tv_nsec = t1->tv_nsec + rem;
+	if (r->tv_nsec >= 1000000000) {
+		r->tv_sec++;
+		r->tv_nsec -= 1000000000;
+	}
+}
+
+void timespec_sets(struct timespec *__restrict r,
+		   ticks_t ns)
+{
+	r->tv_sec = ns / 1000000000UL;
+	r->tv_nsec = ns - r->tv_sec * 1000000000UL;
+	if (r->tv_nsec >= 1000000000) {
+		r->tv_sec++;
+		r->tv_nsec -= 1000000000;
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/README b/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/README
new file mode 100644
index 0000000..c9f47ac
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/README
@@ -0,0 +1,4 @@
+This is TLSF 2.4.6
+==================
+
+See http://rtportal.upv.es/rtmalloc/
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/target.h b/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/target.h
new file mode 100644
index 0000000..27fdd2d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/target.h
@@ -0,0 +1,30 @@
+/*
+ * Two Levels Segregate Fit memory allocator (TLSF)
+ * Version 2.4.6
+ *
+ * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
+ *
+ * Thanks to Ismael Ripoll for his suggestions and reviews
+ *
+ * Copyright (C) 2008, 2007, 2006, 2005, 2004
+ *
+ * This code is released using a dual license strategy: GPL/LGPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of the GNU General Public License Version 2.0
+ * Released under the terms of the GNU Lesser General Public License Version 2.1
+ *
+ */
+#ifndef _TARGET_H_
+#define _TARGET_H_
+
+#include <pthread.h>
+#include "boilerplate/wrappers.h"
+
+#define TLSF_MLOCK_T            pthread_mutex_t
+#define TLSF_CREATE_LOCK(l)     __RT(pthread_mutex_init (l, NULL))
+#define TLSF_DESTROY_LOCK(l)    __RT(pthread_mutex_destroy(l))
+#define TLSF_ACQUIRE_LOCK(l)    __RT(pthread_mutex_lock(l))
+#define TLSF_RELEASE_LOCK(l)    __RT(pthread_mutex_unlock(l))
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.c b/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.c
new file mode 100644
index 0000000..92e651a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.c
@@ -0,0 +1,1011 @@
+/*
+ * Two Levels Segregate Fit memory allocator (TLSF)
+ * Version 2.4.6
+ *
+ * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
+ *
+ * Thanks to Ismael Ripoll for his suggestions and reviews
+ *
+ * Copyright (C) 2008, 2007, 2006, 2005, 2004
+ *
+ * This code is released using a dual license strategy: GPL/LGPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of the GNU General Public License Version 2.0
+ * Released under the terms of the GNU Lesser General Public License Version 2.1
+ *
+ */
+
+/*
+ * Code contributions:
+ *
+ * (Jul 28 2007)  Herman ten Brugge <hermantenbrugge@home.nl>:
+ *
+ * - Add 64 bit support. It now runs on x86_64 and solaris64.
+ * - I also tested this on vxworks/32and solaris/32 and i386/32 processors.
+ * - Remove assembly code. I could not measure any performance difference
+ *   on my core2 processor. This also makes the code more portable.
+ * - Moved defines/typedefs from tlsf.h to tlsf.c
+ * - Changed MIN_BLOCK_SIZE to sizeof (free_ptr_t) and BHDR_OVERHEAD to
+ *   (sizeof (bhdr_t) - MIN_BLOCK_SIZE). This does not change the fact
+ *    that the minumum size is still sizeof
+ *   (bhdr_t).
+ * - Changed all C++ comment style to C style. (// -> /.* ... *./)
+ * - Used ls_bit instead of ffs and ms_bit instead of fls. I did this to
+ *   avoid confusion with the standard ffs function which returns
+ *   different values.
+ * - Created set_bit/clear_bit fuctions because they are not present
+ *   on x86_64.
+ * - Added locking support + extra file target.h to show how to use it.
+ * - Added get_used_size function (REMOVED in 2.4)
+ * - Added rtl_realloc and rtl_calloc function
+ * - Implemented realloc clever support.
+ * - Added some test code in the example directory.
+ * - Bug fixed (discovered by the rockbox project: www.rockbox.org).
+ *
+ * (Oct 23 2006) Adam Scislowicz:
+ *
+ * - Support for ARMv5 implemented
+ *
+ */
+
+/*#define USE_SBRK        (0) */
+/*#define USE_MMAP        (0) */
+
+#ifndef USE_PRINTF
+#define USE_PRINTF      (1)
+#endif
+
+#include <string.h>
+
+#ifndef TLSF_USE_LOCKS
+#define	TLSF_USE_LOCKS	(0)
+#endif
+
+#ifndef TLSF_STATISTIC
+#define	TLSF_STATISTIC	(0)
+#endif
+
+#ifndef USE_MMAP
+#define	USE_MMAP	(0)
+#endif
+
+#ifndef USE_SBRK
+#define	USE_SBRK	(0)
+#endif
+
+
+#if TLSF_USE_LOCKS
+#include "target.h"
+#else
+#define TLSF_CREATE_LOCK(_unused_)   do{}while(0)
+#define TLSF_DESTROY_LOCK(_unused_)  do{}while(0)
+#define TLSF_ACQUIRE_LOCK(_unused_)  do{}while(0)
+#define TLSF_RELEASE_LOCK(_unused_)  do{}while(0)
+#endif
+
+#if TLSF_STATISTIC
+#define	TLSF_ADD_SIZE(tlsf, b) do {									\
+		tlsf->used_size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;	\
+		if (tlsf->used_size > tlsf->max_size)						\
+			tlsf->max_size = tlsf->used_size;						\
+		} while(0)
+
+#define	TLSF_REMOVE_SIZE(tlsf, b) do {								\
+		tlsf->used_size -= (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;	\
+	} while(0)
+#else
+#define	TLSF_ADD_SIZE(tlsf, b)	     do{}while(0)
+#define	TLSF_REMOVE_SIZE(tlsf, b)    do{}while(0)
+#endif
+
+#if USE_MMAP || USE_SBRK
+#include <unistd.h>
+#endif
+
+#if USE_MMAP
+#include <sys/mman.h>
+#endif
+
+#include "tlsf.h"
+
+#if !defined(__GNUC__)
+#ifndef __inline__
+#define __inline__
+#endif
+#endif
+
+/* The  debug functions  only can  be used  when _DEBUG_TLSF_  is set. */
+#ifndef _DEBUG_TLSF_
+#define _DEBUG_TLSF_  (0)
+#endif
+
+/*************************************************************************/
+/* Definition of the structures used by TLSF */
+
+
+/* Some IMPORTANT TLSF parameters */
+/* Unlike the preview TLSF versions, now they are statics */
+#define BLOCK_ALIGN (sizeof(void *) * 2)
+
+#define MAX_FLI		(30)
+#define MAX_LOG2_SLI	(5)
+#define MAX_SLI		(1 << MAX_LOG2_SLI)     /* MAX_SLI = 2^MAX_LOG2_SLI */
+
+#define FLI_OFFSET	(6)     /* tlsf structure just will manage blocks bigger */
+/* than 128 bytes */
+#define SMALL_BLOCK	(128)
+#define REAL_FLI	(MAX_FLI - FLI_OFFSET)
+#define MIN_BLOCK_SIZE	(sizeof (free_ptr_t))
+#define BHDR_OVERHEAD	(sizeof (bhdr_t) - MIN_BLOCK_SIZE)
+#define TLSF_SIGNATURE	(0x2A59FA59)
+
+#define	PTR_MASK	(sizeof(void *) - 1)
+#define BLOCK_SIZE	(0xFFFFFFFF - PTR_MASK)
+
+#define GET_NEXT_BLOCK(_addr, _r) ((bhdr_t *) ((char *) (_addr) + (_r)))
+#define	MEM_ALIGN		  ((BLOCK_ALIGN) - 1)
+#define ROUNDUP_SIZE(_r)          (((_r) + MEM_ALIGN) & ~MEM_ALIGN)
+#define ROUNDDOWN_SIZE(_r)        ((_r) & ~MEM_ALIGN)
+#define ROUNDUP(_x, _v)           ((((~(_x)) + 1) & ((_v)-1)) + (_x))
+
+#define BLOCK_STATE	(0x1)
+#define PREV_STATE	(0x2)
+
+/* bit 0 of the block size */
+#define FREE_BLOCK	(0x1)
+#define USED_BLOCK	(0x0)
+
+/* bit 1 of the block size */
+#define PREV_FREE	(0x2)
+#define PREV_USED	(0x0)
+
+
+#define DEFAULT_AREA_SIZE (1024*10)
+
+#ifdef USE_PRINTF
+#include <stdio.h>
+# define PRINT_MSG(fmt, args...) printf(fmt, ## args)
+# define ERROR_MSG(fmt, args...) printf(fmt, ## args)
+#else
+# if !defined(PRINT_MSG)
+#  define PRINT_MSG(fmt, args...)
+# endif
+# if !defined(ERROR_MSG)
+#  define ERROR_MSG(fmt, args...)
+# endif
+#endif
+
+typedef unsigned int u32_t;     /* NOTE: Make sure that this type is 4 bytes long on your computer */
+typedef unsigned char u8_t;     /* NOTE: Make sure that this type is 1 byte on your computer */
+
+typedef struct free_ptr_struct {
+    struct bhdr_struct *prev;
+    struct bhdr_struct *next;
+} free_ptr_t;
+
+typedef struct bhdr_struct {
+    /* This pointer is just valid if the first bit of size is set */
+    struct bhdr_struct *prev_hdr;
+    /* The size is stored in bytes */
+    size_t size;                /* bit 0 indicates whether the block is used and */
+    /* bit 1 allows to know whether the previous block is free */
+    union {
+	struct free_ptr_struct free_ptr;
+	u8_t buffer[1];         /*sizeof(struct free_ptr_struct)]; */
+    } ptr;
+} bhdr_t;
+
+/* This structure is embedded at the beginning of each area, giving us
+ * enough information to cope with a set of areas */
+
+typedef struct area_info_struct {
+    bhdr_t *end;
+    struct area_info_struct *next;
+} area_info_t;
+
+typedef struct TLSF_struct {
+    /* the TLSF's structure signature */
+    u32_t tlsf_signature;
+
+#if TLSF_USE_LOCKS
+    TLSF_MLOCK_T lock;
+#endif
+
+#if TLSF_STATISTIC
+    /* These can not be calculated outside tlsf because we
+     * do not know the sizes when freeing/reallocing memory. */
+    size_t used_size;
+    size_t max_size;
+#endif
+
+    /* A linked list holding all the existing areas */
+    area_info_t *area_head;
+
+    /* the first-level bitmap */
+    /* This array should have a size of REAL_FLI bits */
+    u32_t fl_bitmap;
+
+    /* the second-level bitmap */
+    u32_t sl_bitmap[REAL_FLI];
+
+    bhdr_t *matrix[REAL_FLI][MAX_SLI];
+} tlsf_t;
+
+
+/******************************************************************/
+/**************     Helping functions    **************************/
+/******************************************************************/
+static __inline__ void set_bit(int nr, u32_t * addr);
+static __inline__ void clear_bit(int nr, u32_t * addr);
+static __inline__ int ls_bit(int x);
+static __inline__ int ms_bit(int x);
+static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl);
+static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl);
+static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl);
+static __inline__ bhdr_t *process_area(void *area, size_t size);
+#if USE_SBRK || USE_MMAP
+static __inline__ void *get_new_area(size_t * size);
+#endif
+
+static const int table[] = {
+    -1, 0, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3, 3, 3, 3, 3, 4, 4, 4, 4, 4, 4, 4,
+    4, 4,
+    4, 4, 4, 4, 4, 4, 4,
+    5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5, 5,
+    5,
+    5, 5, 5, 5, 5, 5, 5,
+    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+    6,
+    6, 6, 6, 6, 6, 6, 6,
+    6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6, 6,
+    6,
+    6, 6, 6, 6, 6, 6, 6,
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+    7,
+    7, 7, 7, 7, 7, 7, 7,
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+    7,
+    7, 7, 7, 7, 7, 7, 7,
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+    7,
+    7, 7, 7, 7, 7, 7, 7,
+    7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7,
+    7,
+    7, 7, 7, 7, 7, 7, 7
+};
+
+static __inline__ int ls_bit(int i)
+{
+    unsigned int a;
+    unsigned int x = i & -i;
+
+    a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
+    return table[x >> a] + a;
+}
+
+static __inline__ int ms_bit(int i)
+{
+    unsigned int a;
+    unsigned int x = (unsigned int) i;
+
+    a = x <= 0xffff ? (x <= 0xff ? 0 : 8) : (x <= 0xffffff ? 16 : 24);
+    return table[x >> a] + a;
+}
+
+static __inline__ void set_bit(int nr, u32_t * addr)
+{
+    addr[nr >> 5] |= 1 << (nr & 0x1f);
+}
+
+static __inline__ void clear_bit(int nr, u32_t * addr)
+{
+    addr[nr >> 5] &= ~(1 << (nr & 0x1f));
+}
+
+static __inline__ void MAPPING_SEARCH(size_t * _r, int *_fl, int *_sl)
+{
+    int _t;
+
+    if (*_r < SMALL_BLOCK) {
+	*_fl = 0;
+	*_sl = *_r / (SMALL_BLOCK / MAX_SLI);
+    } else {
+	_t = (1 << (ms_bit(*_r) - MAX_LOG2_SLI)) - 1;
+	*_r = *_r + _t;
+	*_fl = ms_bit(*_r);
+	*_sl = (*_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
+	*_fl -= FLI_OFFSET;
+	/*if ((*_fl -= FLI_OFFSET) < 0) // FL wil be always >0!
+	 *_fl = *_sl = 0;
+	 */
+	*_r &= ~_t;
+    }
+}
+
+static __inline__ void MAPPING_INSERT(size_t _r, int *_fl, int *_sl)
+{
+    if (_r < SMALL_BLOCK) {
+	*_fl = 0;
+	*_sl = _r / (SMALL_BLOCK / MAX_SLI);
+    } else {
+	*_fl = ms_bit(_r);
+	*_sl = (_r >> (*_fl - MAX_LOG2_SLI)) - MAX_SLI;
+	*_fl -= FLI_OFFSET;
+    }
+}
+
+
+static __inline__ bhdr_t *FIND_SUITABLE_BLOCK(tlsf_t * _tlsf, int *_fl, int *_sl)
+{
+    u32_t _tmp = _tlsf->sl_bitmap[*_fl] & (~0 << *_sl);
+    bhdr_t *_b = NULL;
+
+    if (_tmp) {
+	*_sl = ls_bit(_tmp);
+	_b = _tlsf->matrix[*_fl][*_sl];
+    } else {
+	*_fl = ls_bit(_tlsf->fl_bitmap & (~0 << (*_fl + 1)));
+	if (*_fl > 0) {         /* likely */
+	    *_sl = ls_bit(_tlsf->sl_bitmap[*_fl]);
+	    _b = _tlsf->matrix[*_fl][*_sl];
+	}
+    }
+    return _b;
+}
+
+
+#define EXTRACT_BLOCK_HDR(_b, _tlsf, _fl, _sl) do {					\
+		_tlsf -> matrix [_fl] [_sl] = _b -> ptr.free_ptr.next;		\
+		if (_tlsf -> matrix[_fl][_sl])								\
+			_tlsf -> matrix[_fl][_sl] -> ptr.free_ptr.prev = NULL;	\
+		else {														\
+			clear_bit (_sl, &_tlsf -> sl_bitmap [_fl]);				\
+			if (!_tlsf -> sl_bitmap [_fl])							\
+				clear_bit (_fl, &_tlsf -> fl_bitmap);				\
+		}															\
+		_b -> ptr.free_ptr.prev =  NULL;				\
+		_b -> ptr.free_ptr.next =  NULL;				\
+	}while(0)
+
+
+#define EXTRACT_BLOCK(_b, _tlsf, _fl, _sl) do {							\
+		if (_b -> ptr.free_ptr.next)									\
+			_b -> ptr.free_ptr.next -> ptr.free_ptr.prev = _b -> ptr.free_ptr.prev; \
+		if (_b -> ptr.free_ptr.prev)									\
+			_b -> ptr.free_ptr.prev -> ptr.free_ptr.next = _b -> ptr.free_ptr.next; \
+		if (_tlsf -> matrix [_fl][_sl] == _b) {							\
+			_tlsf -> matrix [_fl][_sl] = _b -> ptr.free_ptr.next;		\
+			if (!_tlsf -> matrix [_fl][_sl]) {							\
+				clear_bit (_sl, &_tlsf -> sl_bitmap[_fl]);				\
+				if (!_tlsf -> sl_bitmap [_fl])							\
+					clear_bit (_fl, &_tlsf -> fl_bitmap);				\
+			}															\
+		}																\
+		_b -> ptr.free_ptr.prev = NULL;					\
+		_b -> ptr.free_ptr.next = NULL;					\
+	} while(0)
+
+#define INSERT_BLOCK(_b, _tlsf, _fl, _sl) do {							\
+		_b -> ptr.free_ptr.prev = NULL; \
+		_b -> ptr.free_ptr.next = _tlsf -> matrix [_fl][_sl]; \
+		if (_tlsf -> matrix [_fl][_sl])									\
+			_tlsf -> matrix [_fl][_sl] -> ptr.free_ptr.prev = _b;		\
+		_tlsf -> matrix [_fl][_sl] = _b;								\
+		set_bit (_sl, &_tlsf -> sl_bitmap [_fl]);						\
+		set_bit (_fl, &_tlsf -> fl_bitmap);								\
+	} while(0)
+
+#if USE_SBRK || USE_MMAP
+static __inline__ void *get_new_area(size_t * size)
+{
+    void *area;
+
+#if USE_SBRK
+    area = (void *)sbrk(0);
+    if (((void *)sbrk(*size)) != ((void *) -1))
+	return area;
+#endif
+
+#ifndef MAP_ANONYMOUS
+/* https://dev.openwrt.org/ticket/322 */
+# define MAP_ANONYMOUS MAP_ANON
+#endif
+
+
+#if USE_MMAP
+    *size = ROUNDUP(*size, getpagesize());
+    if ((area = __STD(mmap(0, *size, PROT_READ | PROT_WRITE, MAP_PRIVATE | MAP_ANONYMOUS, -1, 0))) != MAP_FAILED)
+	return area;
+#endif
+    return ((void *) ~0);
+}
+#endif
+
+static __inline__ bhdr_t *process_area(void *area, size_t size)
+{
+    bhdr_t *b, *lb, *ib;
+    area_info_t *ai;
+
+    ib = (bhdr_t *) area;
+    ib->size =
+	(sizeof(area_info_t) <
+	 MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(sizeof(area_info_t)) | USED_BLOCK | PREV_USED;
+    b = (bhdr_t *) GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
+    b->size = ROUNDDOWN_SIZE(size - 3 * BHDR_OVERHEAD - (ib->size & BLOCK_SIZE)) | USED_BLOCK | PREV_USED;
+    b->ptr.free_ptr.prev = b->ptr.free_ptr.next = 0;
+    lb = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+    lb->prev_hdr = b;
+    lb->size = 0 | USED_BLOCK | PREV_FREE;
+    ai = (area_info_t *) ib->ptr.buffer;
+    ai->next = 0;
+    ai->end = lb;
+    return ib;
+}
+
+/******************************************************************/
+/******************** Begin of the allocator code *****************/
+/******************************************************************/
+
+static char *mp;         /* Default memory pool. */
+
+/******************************************************************/
+size_t init_memory_pool(size_t mem_pool_size, void *mem_pool)
+{
+/******************************************************************/
+    tlsf_t *tlsf;
+    bhdr_t *b, *ib;
+
+    if (!mem_pool || !mem_pool_size || mem_pool_size < sizeof(tlsf_t) + BHDR_OVERHEAD * 8) {
+	ERROR_MSG("init_memory_pool(): invalid pool\n");
+	return -1;
+    }
+
+    if (((unsigned long) mem_pool & PTR_MASK)) {
+	ERROR_MSG("init_memory_pool(): pool must be aligned to a word\n");
+	return -1;
+    }
+    tlsf = (tlsf_t *) mem_pool;
+    /* Check if already initialised */
+    if (tlsf->tlsf_signature == TLSF_SIGNATURE) {
+	ERROR_MSG("init_memory_pool(): already initialized\n");
+	return -1;
+    }
+
+    /* Zeroing the memory pool */
+    memset(mem_pool, 0, sizeof(tlsf_t));
+
+    tlsf->tlsf_signature = TLSF_SIGNATURE;
+
+    TLSF_CREATE_LOCK(&tlsf->lock);
+
+    ib = process_area(GET_NEXT_BLOCK
+		      (mem_pool, ROUNDUP_SIZE(sizeof(tlsf_t))), ROUNDDOWN_SIZE(mem_pool_size - sizeof(tlsf_t)));
+    b = GET_NEXT_BLOCK(ib->ptr.buffer, ib->size & BLOCK_SIZE);
+    free_ex(b->ptr.buffer, tlsf);
+    tlsf->area_head = (area_info_t *) ib->ptr.buffer;
+
+#if TLSF_STATISTIC
+    tlsf->used_size = mem_pool_size - (b->size & BLOCK_SIZE);
+    tlsf->max_size = mem_pool_size - tlsf->used_size;
+#endif
+
+    return (b->size & BLOCK_SIZE);
+}
+
+/******************************************************************/
+size_t add_new_area(void *area, size_t area_size, void *mem_pool)
+{
+/******************************************************************/
+    tlsf_t *tlsf = (tlsf_t *) mem_pool;
+    area_info_t *ptr, *ptr_prev, *ai;
+    bhdr_t *ib0, *b0, *lb0, *ib1, *b1, *lb1, *next_b;
+
+    memset(area, 0, area_size);
+    ptr = tlsf->area_head;
+    ptr_prev = 0;
+
+    ib0 = process_area(area, area_size);
+    b0 = GET_NEXT_BLOCK(ib0->ptr.buffer, ib0->size & BLOCK_SIZE);
+    lb0 = GET_NEXT_BLOCK(b0->ptr.buffer, b0->size & BLOCK_SIZE);
+
+    /* Before inserting the new area, we have to merge this area with the
+       already existing ones */
+
+    while (ptr) {
+	ib1 = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
+	b1 = GET_NEXT_BLOCK(ib1->ptr.buffer, ib1->size & BLOCK_SIZE);
+	lb1 = ptr->end;
+
+	/* Merging the new area with the next physically contigous one */
+	if ((unsigned long) ib1 == (unsigned long) lb0 + BHDR_OVERHEAD) {
+	    if (tlsf->area_head == ptr) {
+		tlsf->area_head = ptr->next;
+		ptr = ptr->next;
+	    } else {
+		ptr_prev->next = ptr->next;
+		ptr = ptr->next;
+	    }
+
+	    b0->size =
+		ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) +
+			       (ib1->size & BLOCK_SIZE) + 2 * BHDR_OVERHEAD) | USED_BLOCK | PREV_USED;
+
+	    b1->prev_hdr = b0;
+	    lb0 = lb1;
+
+	    continue;
+	}
+
+	/* Merging the new area with the previous physically contigous
+	   one */
+	if ((unsigned long) lb1->ptr.buffer == (unsigned long) ib0) {
+	    if (tlsf->area_head == ptr) {
+		tlsf->area_head = ptr->next;
+		ptr = ptr->next;
+	    } else {
+		ptr_prev->next = ptr->next;
+		ptr = ptr->next;
+	    }
+
+	    lb1->size =
+		ROUNDDOWN_SIZE((b0->size & BLOCK_SIZE) +
+			       (ib0->size & BLOCK_SIZE) + 2 * BHDR_OVERHEAD) | USED_BLOCK | (lb1->size & PREV_STATE);
+	    next_b = GET_NEXT_BLOCK(lb1->ptr.buffer, lb1->size & BLOCK_SIZE);
+	    next_b->prev_hdr = lb1;
+	    b0 = lb1;
+	    ib0 = ib1;
+
+	    continue;
+	}
+	ptr_prev = ptr;
+	ptr = ptr->next;
+    }
+
+    /* Inserting the area in the list of linked areas */
+    ai = (area_info_t *) ib0->ptr.buffer;
+    ai->next = tlsf->area_head;
+    ai->end = lb0;
+    tlsf->area_head = ai;
+    free_ex(b0->ptr.buffer, mem_pool);
+    return (b0->size & BLOCK_SIZE);
+}
+
+
+/******************************************************************/
+size_t get_used_size(void *mem_pool)
+{
+/******************************************************************/
+#if TLSF_STATISTIC
+    return ((tlsf_t *) mem_pool)->used_size;
+#else
+    (void)mem_pool;
+    return 0;
+#endif
+}
+
+/******************************************************************/
+size_t get_max_size(void *mem_pool)
+{
+/******************************************************************/
+#if TLSF_STATISTIC
+    return ((tlsf_t *) mem_pool)->max_size;
+#else
+    (void)mem_pool;
+    return 0;
+#endif
+}
+
+/******************************************************************/
+void destroy_memory_pool(void *mem_pool)
+{
+/******************************************************************/
+    tlsf_t *tlsf = (tlsf_t *) mem_pool;
+
+    tlsf->tlsf_signature = 0;
+
+    TLSF_DESTROY_LOCK(&tlsf->lock);
+
+}
+
+
+/******************************************************************/
+void *tlsf_malloc(size_t size)
+{
+/******************************************************************/
+    void *ret;
+
+#if USE_MMAP || USE_SBRK
+    if (!mp) {
+	size_t area_size;
+	void *area;
+
+	area_size = sizeof(tlsf_t) + BHDR_OVERHEAD * 8; /* Just a safety constant */
+	area_size += size;
+	area_size += (1 << (ms_bit(area_size) - MAX_LOG2_SLI)) - 1;
+	area_size = (area_size > DEFAULT_AREA_SIZE) ? area_size : DEFAULT_AREA_SIZE;
+	area = get_new_area(&area_size);
+	if (area == ((void *) ~0))
+	    return NULL;        /* Not enough system memory */
+	init_memory_pool(area_size, area);
+	mp = area;
+    }
+#endif
+
+    TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
+
+    ret = malloc_ex(size, mp);
+
+    TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
+
+    return ret;
+}
+
+/******************************************************************/
+void tlsf_free(void *ptr)
+{
+/******************************************************************/
+
+    TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
+
+    free_ex(ptr, mp);
+
+    TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
+
+}
+
+/******************************************************************/
+void *tlsf_realloc(void *ptr, size_t size)
+{
+/******************************************************************/
+    void *ret;
+
+#if USE_MMAP || USE_SBRK
+	if (!mp) {
+		return tlsf_malloc(size);
+	}
+#endif
+
+    TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
+
+    ret = realloc_ex(ptr, size, mp);
+
+    TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
+
+    return ret;
+}
+
+/******************************************************************/
+void *tlsf_calloc(size_t nelem, size_t elem_size)
+{
+/******************************************************************/
+    void *ret;
+
+    TLSF_ACQUIRE_LOCK(&((tlsf_t *)mp)->lock);
+
+    ret = calloc_ex(nelem, elem_size, mp);
+
+    TLSF_RELEASE_LOCK(&((tlsf_t *)mp)->lock);
+
+    return ret;
+}
+
+/******************************************************************/
+void *malloc_ex(size_t size, void *mem_pool)
+{
+/******************************************************************/
+    tlsf_t *tlsf = (tlsf_t *) mem_pool;
+    bhdr_t *b, *b2, *next_b;
+    int fl, sl;
+    size_t tmp_size;
+
+    size = (size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(size);
+
+    /* Rounding up the requested size and calculating fl and sl */
+    MAPPING_SEARCH(&size, &fl, &sl);
+
+    /* Searching a free block, recall that this function changes the values of fl and sl,
+       so they are not longer valid when the function fails */
+    b = FIND_SUITABLE_BLOCK(tlsf, &fl, &sl);
+    if (!b)
+	return NULL;            /* Not found */
+
+    EXTRACT_BLOCK_HDR(b, tlsf, fl, sl);
+
+    /*-- found: */
+    next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+    /* Should the block be split? */
+    tmp_size = (b->size & BLOCK_SIZE) - size;
+    if (tmp_size >= sizeof(bhdr_t)) {
+	tmp_size -= BHDR_OVERHEAD;
+	b2 = GET_NEXT_BLOCK(b->ptr.buffer, size);
+	b2->size = tmp_size | FREE_BLOCK | PREV_USED;
+	next_b->prev_hdr = b2;
+	MAPPING_INSERT(tmp_size, &fl, &sl);
+	INSERT_BLOCK(b2, tlsf, fl, sl);
+
+	b->size = size | (b->size & PREV_STATE);
+    } else {
+	next_b->size &= (~PREV_FREE);
+	b->size &= (~FREE_BLOCK);       /* Now it's used */
+    }
+
+    TLSF_ADD_SIZE(tlsf, b);
+
+    return (void *) b->ptr.buffer;
+}
+
+/******************************************************************/
+void free_ex(void *ptr, void *mem_pool)
+{
+/******************************************************************/
+    tlsf_t *tlsf = (tlsf_t *) mem_pool;
+    bhdr_t *b, *tmp_b;
+    int fl = 0, sl = 0;
+
+    if (!ptr) {
+	return;
+    }
+    b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
+    b->size |= FREE_BLOCK;
+
+    TLSF_REMOVE_SIZE(tlsf, b);
+
+    b->ptr.free_ptr.prev = NULL;
+    b->ptr.free_ptr.next = NULL;
+    tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+    if (tmp_b->size & FREE_BLOCK) {
+	MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
+	EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
+	b->size += (tmp_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
+    }
+    if (b->size & PREV_FREE) {
+	tmp_b = b->prev_hdr;
+	MAPPING_INSERT(tmp_b->size & BLOCK_SIZE, &fl, &sl);
+	EXTRACT_BLOCK(tmp_b, tlsf, fl, sl);
+	tmp_b->size += (b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
+	b = tmp_b;
+    }
+    MAPPING_INSERT(b->size & BLOCK_SIZE, &fl, &sl);
+    INSERT_BLOCK(b, tlsf, fl, sl);
+
+    tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+    tmp_b->size |= PREV_FREE;
+    tmp_b->prev_hdr = b;
+}
+
+/******************************************************************/
+void *realloc_ex(void *ptr, size_t new_size, void *mem_pool)
+{
+/******************************************************************/
+    tlsf_t *tlsf = (tlsf_t *) mem_pool;
+    void *ptr_aux;
+    unsigned int cpsize;
+    bhdr_t *b, *tmp_b, *next_b;
+    int fl, sl;
+    size_t tmp_size;
+
+    if (!ptr) {
+	if (new_size)
+	    return (void *) malloc_ex(new_size, mem_pool);
+	if (!new_size)
+	    return NULL;
+    } else if (!new_size) {
+	free_ex(ptr, mem_pool);
+	return NULL;
+    }
+
+    b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
+    next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+    new_size = (new_size < MIN_BLOCK_SIZE) ? MIN_BLOCK_SIZE : ROUNDUP_SIZE(new_size);
+    tmp_size = (b->size & BLOCK_SIZE);
+    if (new_size <= tmp_size) {
+	TLSF_REMOVE_SIZE(tlsf, b);
+	if (next_b->size & FREE_BLOCK) {
+	    MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl);
+	    EXTRACT_BLOCK(next_b, tlsf, fl, sl);
+	    tmp_size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
+	    next_b = GET_NEXT_BLOCK(next_b->ptr.buffer, next_b->size & BLOCK_SIZE);
+	    /* We allways reenter this free block because tmp_size will
+	       be greater then sizeof (bhdr_t) */
+	}
+	tmp_size -= new_size;
+	if (tmp_size >= sizeof(bhdr_t)) {
+	    tmp_size -= BHDR_OVERHEAD;
+	    tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size);
+	    tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED;
+	    next_b->prev_hdr = tmp_b;
+	    next_b->size |= PREV_FREE;
+	    MAPPING_INSERT(tmp_size, &fl, &sl);
+	    INSERT_BLOCK(tmp_b, tlsf, fl, sl);
+	    b->size = new_size | (b->size & PREV_STATE);
+	}
+	TLSF_ADD_SIZE(tlsf, b);
+	return (void *) b->ptr.buffer;
+    }
+    if ((next_b->size & FREE_BLOCK)) {
+	if (new_size <= (tmp_size + (next_b->size & BLOCK_SIZE))) {
+			TLSF_REMOVE_SIZE(tlsf, b);
+	    MAPPING_INSERT(next_b->size & BLOCK_SIZE, &fl, &sl);
+	    EXTRACT_BLOCK(next_b, tlsf, fl, sl);
+	    b->size += (next_b->size & BLOCK_SIZE) + BHDR_OVERHEAD;
+	    next_b = GET_NEXT_BLOCK(b->ptr.buffer, b->size & BLOCK_SIZE);
+	    next_b->prev_hdr = b;
+	    next_b->size &= ~PREV_FREE;
+	    tmp_size = (b->size & BLOCK_SIZE) - new_size;
+	    if (tmp_size >= sizeof(bhdr_t)) {
+		tmp_size -= BHDR_OVERHEAD;
+		tmp_b = GET_NEXT_BLOCK(b->ptr.buffer, new_size);
+		tmp_b->size = tmp_size | FREE_BLOCK | PREV_USED;
+		next_b->prev_hdr = tmp_b;
+		next_b->size |= PREV_FREE;
+		MAPPING_INSERT(tmp_size, &fl, &sl);
+		INSERT_BLOCK(tmp_b, tlsf, fl, sl);
+		b->size = new_size | (b->size & PREV_STATE);
+	    }
+			TLSF_ADD_SIZE(tlsf, b);
+	    return (void *) b->ptr.buffer;
+	}
+    }
+
+    if (!(ptr_aux = malloc_ex(new_size, mem_pool))){
+	return NULL;
+    }
+
+    cpsize = ((b->size & BLOCK_SIZE) > new_size) ? new_size : (b->size & BLOCK_SIZE);
+
+    memcpy(ptr_aux, ptr, cpsize);
+
+    free_ex(ptr, mem_pool);
+    return ptr_aux;
+}
+
+
+/******************************************************************/
+void *calloc_ex(size_t nelem, size_t elem_size, void *mem_pool)
+{
+/******************************************************************/
+    void *ptr;
+
+    if (nelem <= 0 || elem_size <= 0)
+	return NULL;
+
+    if (!(ptr = malloc_ex(nelem * elem_size, mem_pool)))
+	return NULL;
+    memset(ptr, 0, nelem * elem_size);
+
+    return ptr;
+}
+
+
+/******************************************************************/
+size_t malloc_usable_size_ex(void *ptr, void *pool)
+{
+/******************************************************************/
+    bhdr_t *b;
+
+    if (!ptr) {
+	return 0;
+    }
+    b = (bhdr_t *) ((char *) ptr - BHDR_OVERHEAD);
+    if ((b->size & BLOCK_STATE) == FREE_BLOCK)
+	return 0;
+    return b->size & BLOCK_SIZE;
+}
+
+
+#if _DEBUG_TLSF_
+
+/***************  DEBUG FUNCTIONS   **************/
+
+/* The following functions have been designed to ease the debugging of */
+/* the TLSF  structure.  For non-developing  purposes, it may  be they */
+/* haven't too much worth.  To enable them, _DEBUG_TLSF_ must be set.  */
+
+extern void dump_memory_region(unsigned char *mem_ptr, unsigned int size);
+extern void print_block(bhdr_t * b);
+extern void print_tlsf(tlsf_t * tlsf);
+void print_all_blocks(tlsf_t * tlsf);
+
+void dump_memory_region(unsigned char *mem_ptr, unsigned int size)
+{
+
+    unsigned long begin = (unsigned long) mem_ptr;
+    unsigned long end = (unsigned long) mem_ptr + size;
+    int column = 0;
+
+    begin >>= 2;
+    begin <<= 2;
+
+    end >>= 2;
+    end++;
+    end <<= 2;
+
+    PRINT_MSG("\nMemory region dumped: 0x%lx - 0x%lx\n\n", begin, end);
+
+    column = 0;
+    PRINT_MSG("0x%lx ", begin);
+
+    while (begin < end) {
+	if (((unsigned char *) begin)[0] == 0)
+	    PRINT_MSG("00");
+	else
+	    PRINT_MSG("%02x", ((unsigned char *) begin)[0]);
+	if (((unsigned char *) begin)[1] == 0)
+	    PRINT_MSG("00 ");
+	else
+	    PRINT_MSG("%02x ", ((unsigned char *) begin)[1]);
+	begin += 2;
+	column++;
+	if (column == 8) {
+	    PRINT_MSG("\n0x%lx ", begin);
+	    column = 0;
+	}
+
+    }
+    PRINT_MSG("\n\n");
+}
+
+void print_block(bhdr_t * b)
+{
+    if (!b)
+	return;
+    PRINT_MSG(">> [%p] (", b);
+    if ((b->size & BLOCK_SIZE))
+	PRINT_MSG("%lu bytes, ", (unsigned long) (b->size & BLOCK_SIZE));
+    else
+	PRINT_MSG("sentinel, ");
+    if ((b->size & BLOCK_STATE) == FREE_BLOCK)
+	PRINT_MSG("free [%p, %p], ", b->ptr.free_ptr.prev, b->ptr.free_ptr.next);
+    else
+	PRINT_MSG("used, ");
+    if ((b->size & PREV_STATE) == PREV_FREE)
+	PRINT_MSG("prev. free [%p])\n", b->prev_hdr);
+    else
+	PRINT_MSG("prev used)\n");
+}
+
+void print_tlsf(tlsf_t * tlsf)
+{
+    bhdr_t *next;
+    int i, j;
+
+    PRINT_MSG("\nTLSF at %p\n", tlsf);
+
+    PRINT_MSG("FL bitmap: 0x%x\n\n", (unsigned) tlsf->fl_bitmap);
+
+    for (i = 0; i < REAL_FLI; i++) {
+	if (tlsf->sl_bitmap[i])
+	    PRINT_MSG("SL bitmap 0x%x\n", (unsigned) tlsf->sl_bitmap[i]);
+	for (j = 0; j < MAX_SLI; j++) {
+	    next = tlsf->matrix[i][j];
+	    if (next)
+		PRINT_MSG("-> [%d][%d]\n", i, j);
+	    while (next) {
+		print_block(next);
+		next = next->ptr.free_ptr.next;
+	    }
+	}
+    }
+}
+
+void print_all_blocks(tlsf_t * tlsf)
+{
+    area_info_t *ai;
+    bhdr_t *next;
+    PRINT_MSG("\nTLSF at %p\nALL BLOCKS\n\n", tlsf);
+    ai = tlsf->area_head;
+    while (ai) {
+	next = (bhdr_t *) ((char *) ai - BHDR_OVERHEAD);
+	while (next) {
+	    print_block(next);
+	    if ((next->size & BLOCK_SIZE))
+		next = GET_NEXT_BLOCK(next->ptr.buffer, next->size & BLOCK_SIZE);
+	    else
+		next = NULL;
+	}
+	ai = ai->next;
+    }
+}
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.h b/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.h
new file mode 100644
index 0000000..aac4769
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/tlsf/tlsf.h
@@ -0,0 +1,43 @@
+/*
+ * Two Levels Segregate Fit memory allocator (TLSF)
+ * Version 2.4.6
+ *
+ * Written by Miguel Masmano Tello <mimastel@doctor.upv.es>
+ *
+ * Thanks to Ismael Ripoll for his suggestions and reviews
+ *
+ * Copyright (C) 2008, 2007, 2006, 2005, 2004
+ *
+ * This code is released using a dual license strategy: GPL/LGPL
+ * You can choose the licence that better fits your requirements.
+ *
+ * Released under the terms of the GNU General Public License Version 2.0
+ * Released under the terms of the GNU Lesser General Public License Version 2.1
+ *
+ */
+
+#ifndef _TLSF_H_
+#define _TLSF_H_
+
+#include <sys/types.h>
+
+/* A basic heap size which won't be rejected by init_memory_pool(). */
+#define MIN_TLSF_HEAPSZ 65536
+
+extern size_t init_memory_pool(size_t, void *);
+extern size_t get_used_size(void *);
+extern size_t get_max_size(void *);
+extern void destroy_memory_pool(void *);
+extern size_t add_new_area(void *, size_t, void *);
+extern void *malloc_ex(size_t, void *);
+extern void free_ex(void *, void *);
+extern void *realloc_ex(void *, size_t, void *);
+extern void *calloc_ex(size_t, size_t, void *);
+
+extern void *tlsf_malloc(size_t size);
+extern void tlsf_free(void *ptr);
+extern void *tlsf_realloc(void *ptr, size_t size);
+extern void *tlsf_calloc(size_t nelem, size_t elem_size);
+size_t malloc_usable_size_ex(void *ptr, void *pool);
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/lib/boilerplate/version.c b/kernel/xenomai-v3.2.4/lib/boilerplate/version.c
new file mode 100644
index 0000000..d28c496
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/boilerplate/version.c
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <xeno_config.h>
+#include "git-stamp.h"
+
+#ifndef GIT_STAMP
+#define devel_suffix  ""
+#else
+#define devel_suffix  " -- " GIT_STAMP
+#endif
+
+#ifdef CONFIG_XENO_COBALT
+#define core_suffix "/cobalt v"
+#else /* CONFIG_XENO_MERCURY */
+#define core_suffix "/mercury v"
+#endif
+
+const char *xenomai_version_string = PACKAGE_NAME \
+	core_suffix PACKAGE_VERSION devel_suffix;
+
+#ifdef __PROGRAM__
+
+#include <stdio.h>
+#include <string.h>
+
+int main(int argc, char *const argv[])
+{
+	int ret = puts(xenomai_version_string) == EOF;
+
+	if (ret == 0 && argc > 1 &&
+	    (strcmp(argv[1], "-a") == 0 || strcmp(argv[1], "--all") == 0))
+		printf("Target: %s\nCompiler: %s\nBuild args: %s\n",
+		       CONFIG_XENO_HOST_STRING,
+		       CONFIG_XENO_COMPILER,
+		       CONFIG_XENO_BUILD_ARGS);
+
+	return ret;
+}
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/COPYING b/kernel/xenomai-v3.2.4/lib/cobalt/COPYING
new file mode 100644
index 0000000..3b20440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/COPYING
@@ -0,0 +1,458 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/Makefile.am
new file mode 100644
index 0000000..b3003cd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/Makefile.am
@@ -0,0 +1,68 @@
+pkgconfigdir = $(libdir)/pkgconfig
+
+noinst_HEADERS =	\
+	current.h	\
+	umm.h		\
+	internal.h
+
+lib_LTLIBRARIES = libcobalt.la libmodechk.la
+
+libcobalt_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -version-info 2:0:0 -lpthread -lrt
+
+libcobalt_la_LIBADD = \
+	arch/@XENO_TARGET_ARCH@/libarch.la	\
+	../boilerplate/libboilerplate.la
+
+libcobalt_la_SOURCES =		\
+	attr.c			\
+	clock.c			\
+	cond.c			\
+	current.c		\
+	init.c			\
+	internal.c		\
+	mq.c			\
+	mutex.c			\
+	parse_vdso.c		\
+	printf.c		\
+	rtdm.c			\
+	sched.c			\
+	select.c		\
+	semaphore.c		\
+	signal.c		\
+	sigshadow.c		\
+	thread.c		\
+	ticks.c			\
+	timer.c			\
+	timerfd.c		\
+	trace.c			\
+	umm.c			\
+	wrappers.c
+
+libcobalt_la_CPPFLAGS =			\
+	@XENO_COBALT_CFLAGS@		\
+	-I$(top_srcdir)/include/cobalt	\
+	-I$(top_srcdir)/include
+
+libmodechk_la_LIBADD = libcobalt.la
+
+libmodechk_la_SOURCES =	\
+	malloc.c	\
+	malloc-nowrap.c
+
+libmodechk_la_CPPFLAGS =		\
+	@XENO_COBALT_CFLAGS@		\
+	-I$(top_srcdir)/include/cobalt	\
+	-I$(top_srcdir)/include
+
+install-data-local:
+	$(mkinstalldirs) $(DESTDIR)$(libdir)
+	$(INSTALL_DATA) $(srcdir)/cobalt.wrappers $(DESTDIR)$(libdir)
+	$(INSTALL_DATA) $(srcdir)/modechk.wrappers $(DESTDIR)$(libdir)
+
+uninstall-local:
+	$(RM) $(DESTDIR)$(libdir)/cobalt.wrappers
+	$(RM) $(DESTDIR)$(libdir)/modechk.wrappers
+
+EXTRA_DIST = cobalt.wrappers modechk.wrappers
+
+SUBDIRS = arch
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/Makefile.am
new file mode 100644
index 0000000..e0244f9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/Makefile.am
@@ -0,0 +1,4 @@
+
+SUBDIRS = @XENO_TARGET_ARCH@
+
+DIST_SUBDIRS = arm arm64 powerpc x86
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/Makefile.am
new file mode 100644
index 0000000..a5095be
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/Makefile.am
@@ -0,0 +1,13 @@
+noinst_LTLIBRARIES = libarch.la
+
+libarch_la_LDFLAGS = @XENO_LIB_LDFLAGS@
+
+libarch_la_SOURCES = features.c
+
+libarch_la_CPPFLAGS =			\
+	@XENO_COBALT_CFLAGS@ 		\
+	-I$(srcdir)/../..		\
+	-I$(top_srcdir)/include/cobalt	\
+	-I$(top_srcdir)/include
+
+SUBDIRS = include
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/features.c b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/features.c
new file mode 100644
index 0000000..b4eddaa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/features.c
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2011 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <string.h>
+#include <unistd.h>
+#include <limits.h>
+#include <cobalt/wrappers.h>
+#include <cobalt/ticks.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/xenomai/tsc.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/uapi/fptest.h>
+#include "internal.h"
+
+struct __xn_full_tscinfo __xn_tscinfo = {
+	.kinfo = {
+		.counter = NULL,
+	},
+};
+
+void cobalt_arch_check_features(struct cobalt_featinfo *finfo)
+{
+	unsigned long phys_addr;
+	unsigned page_size;
+	int err, fd;
+	void *addr;
+
+	if (!cobalt_use_legacy_tsc())
+		return;
+
+	if (__xn_tscinfo.kinfo.counter != NULL)
+		return;
+
+	err = XENOMAI_SYSCALL2(sc_cobalt_archcall,
+			       XENOMAI_SYSARCH_TSCINFO, &__xn_tscinfo.kinfo);
+	if (err)
+		early_panic("missing TSC emulation: %s",
+			     strerror(-err));
+
+	fd = __STD(open("/dev/mem", O_RDONLY | O_SYNC));
+	if (fd == -1)
+		early_panic("failed open(/dev/mem): %s", strerror(errno));
+
+	page_size = sysconf(_SC_PAGESIZE);
+
+	__xn_tscinfo.kuser_tsc_get =
+		(__xn_rdtsc_t *)(0xffff1004 -
+				((*(unsigned *)(0xffff0ffc) + 3) << 5));
+
+	phys_addr = (unsigned long)__xn_tscinfo.kinfo.counter;
+
+	addr = __STD(mmap(NULL, page_size, PROT_READ, MAP_SHARED,
+			  fd, phys_addr & ~(page_size - 1)));
+	if (addr == MAP_FAILED)
+		early_panic("failed mmap(/dev/mem): %s", strerror(errno));
+
+	__xn_tscinfo.kinfo.counter =
+		((volatile unsigned *)
+		 ((char *) addr + (phys_addr & (page_size - 1))));
+
+	__STD(close(fd));
+}
+
+int cobalt_fp_detect(void)
+{
+	char buffer[1024];
+	int features = 0;
+	FILE *fp;
+
+	fp = fopen("/proc/cpuinfo", "r");
+	if (fp == NULL)
+		return 0;
+
+	while (fgets(buffer, sizeof(buffer), fp)) {
+		if(strncmp(buffer, "Features", sizeof("Features") - 1))
+			continue;
+		if (strstr(buffer, "vfp")) {
+			features |= __COBALT_HAVE_VFP;
+			break;
+		}
+	}
+
+	fclose(fp);
+
+	return features;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/Makefile.am
new file mode 100644
index 0000000..5cac5d2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/Makefile.am
@@ -0,0 +1,2 @@
+
+SUBDIRS = asm
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/Makefile.am
new file mode 100644
index 0000000..55ea661
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/Makefile.am
@@ -0,0 +1,2 @@
+
+SUBDIRS = xenomai
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/Makefile.am
new file mode 100644
index 0000000..d308b06
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/Makefile.am
@@ -0,0 +1,5 @@
+
+noinst_HEADERS =	\
+	features.h	\
+	syscall.h	\
+	tsc.h
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/features.h
new file mode 100644
index 0000000..e84b809
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/features.h
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_ARM_FEATURES_H
+#define _LIB_COBALT_ARM_FEATURES_H
+
+#include_next <features.h>
+#include <xeno_config.h>
+
+#if defined(__ARM_ARCH_2__)
+#define __LINUX_ARM_ARCH__ 2
+#endif /* armv2 */
+
+#if defined(__ARM_ARCH_3__)
+#define __LINUX_ARM_ARCH__ 3
+#endif /* armv3 */
+
+#if defined(__ARM_ARCH_4__) || defined(__ARM_ARCH_4T__)
+#define __LINUX_ARM_ARCH__ 4
+#endif /* armv4 */
+
+#if defined(__ARM_ARCH_5__) || defined(__ARM_ARCH_5T__) \
+	|| defined(__ARM_ARCH_5E__) || defined(__ARM_ARCH_5TE__) \
+	|| defined(__ARM_ARCH_5TEJ__)
+#define __LINUX_ARM_ARCH__ 5
+#endif /* armv5 */
+
+#if defined(__ARM_ARCH_6__) || defined(__ARM_ARCH_6K__) \
+	|| defined(__ARM_ARCH_6Z__) || defined(__ARM_ARCH_6ZK__)
+#define __LINUX_ARM_ARCH__ 6
+#endif /* armv6 */
+
+#if defined(__ARM_ARCH_7A__)
+#define __LINUX_ARM_ARCH__ 7
+#endif /* armv7 */
+
+#ifndef __LINUX_ARM_ARCH__
+#error "Could not find current ARM architecture"
+#endif
+
+#if __LINUX_ARM_ARCH__ < 6 && defined(CONFIG_SMP)
+#error "SMP not supported below armv6, compile with -march=armv6 or above"
+#endif
+
+#include <asm/xenomai/uapi/features.h>
+
+int cobalt_fp_detect(void);
+
+#endif /* !_LIB_COBALT_ARM_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..0941e6c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/syscall.h
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_ARM_SYSCALL_H
+#define _LIB_COBALT_ARM_SYSCALL_H
+
+#include <xeno_config.h>
+#include <errno.h>
+#include <cobalt/uapi/syscall.h>
+
+/*
+ * Some of the following macros have been adapted from Linux's
+ * implementation of the syscall mechanism in <asm-arm/unistd.h>:
+ */
+#if defined(HAVE_TLS) && __GNUC__ == 4 && __GNUC_MINOR__ >= 3
+#error TLS support (__thread) is broken with GCC >= 4.3, use --disable-tls when configuring
+#endif
+
+#define LOADARGS_0(syscode, dummy...)	\
+	__a0 = (unsigned long) (syscode)
+#define LOADARGS_1(syscode, arg1)	\
+	LOADARGS_0(syscode);		\
+	__a1 = (unsigned long) (arg1)
+#define LOADARGS_2(syscode, arg1, arg2)	\
+	LOADARGS_1(syscode, arg1);	\
+	__a2 = (unsigned long) (arg2)
+#define LOADARGS_3(syscode, arg1, arg2, arg3)	\
+	LOADARGS_2(syscode,  arg1, arg2);	\
+	__a3 = (unsigned long) (arg3)
+#define LOADARGS_4(syscode,  arg1, arg2, arg3, arg4)	\
+	LOADARGS_3(syscode,  arg1, arg2, arg3);		\
+	__a4 = (unsigned long) (arg4)
+#define LOADARGS_5(syscode, arg1, arg2, arg3, arg4, arg5)	\
+	LOADARGS_4(syscode, arg1, arg2, arg3, arg4);		\
+	__a5 = (unsigned long) (arg5)
+
+#define CLOBBER_REGS_0 "r0"
+#define CLOBBER_REGS_1 CLOBBER_REGS_0, "r1"
+#define CLOBBER_REGS_2 CLOBBER_REGS_1, "r2"
+#define CLOBBER_REGS_3 CLOBBER_REGS_2, "r3"
+#define CLOBBER_REGS_4 CLOBBER_REGS_3, "r4"
+#define CLOBBER_REGS_5 CLOBBER_REGS_4, "r5"
+
+#define LOADREGS_0 __r0 = __a0
+#define LOADREGS_1 LOADREGS_0; __r1 = __a1
+#define LOADREGS_2 LOADREGS_1; __r2 = __a2
+#define LOADREGS_3 LOADREGS_2; __r3 = __a3
+#define LOADREGS_4 LOADREGS_3; __r4 = __a4
+#define LOADREGS_5 LOADREGS_4; __r5 = __a5
+
+#define ASM_INDECL_0							\
+	unsigned long __a0; register unsigned long __r0  __asm__ ("r0");
+#define ASM_INDECL_1 ASM_INDECL_0;					\
+	unsigned long __a1; register unsigned long __r1  __asm__ ("r1")
+#define ASM_INDECL_2 ASM_INDECL_1;					\
+	unsigned long __a2; register unsigned long __r2  __asm__ ("r2")
+#define ASM_INDECL_3 ASM_INDECL_2;					\
+	unsigned long __a3; register unsigned long __r3  __asm__ ("r3")
+#define ASM_INDECL_4 ASM_INDECL_3;					\
+	unsigned long __a4; register unsigned long __r4  __asm__ ("r4")
+#define ASM_INDECL_5 ASM_INDECL_4;					\
+	unsigned long __a5; register unsigned long __r5  __asm__ ("r5")
+
+#define ASM_INPUT_0 "0" (__r0)
+#define ASM_INPUT_1 ASM_INPUT_0, "r" (__r1)
+#define ASM_INPUT_2 ASM_INPUT_1, "r" (__r2)
+#define ASM_INPUT_3 ASM_INPUT_2, "r" (__r3)
+#define ASM_INPUT_4 ASM_INPUT_3, "r" (__r4)
+#define ASM_INPUT_5 ASM_INPUT_4, "r" (__r5)
+
+#define __sys2(x)	#x
+#define __sys1(x)	__sys2(x)
+
+#ifdef __ARM_EABI__
+#define __SYS_REG_DECL unsigned long __r7 = XENO_ARM_SYSCALL
+#define __SYS_REG_INPUT , [__r7] "r" (__r7)
+#define __SYS_CALLOP "push {r7}; mov %%r7,%[__r7]; swi\t0; pop {r7}"
+#else
+#define __SYS_REG_DECL
+#define __SYS_REG_INPUT
+#define __NR_OABI_SYSCALL_BASE	0x900000
+#define __SYS_CALLOP "swi\t" __sys1(__NR_OABI_SYSCALL_BASE + XENO_ARM_SYSCALL) ""
+#endif
+
+#define XENOMAI_DO_SYSCALL(nr, op, args...)				\
+	({								\
+		ASM_INDECL_##nr;					\
+		__SYS_REG_DECL;						\
+		LOADARGS_##nr(__xn_syscode(op), args);			\
+		__asm__ __volatile__ ("" : /* */ : /* */ :		\
+				      CLOBBER_REGS_##nr);		\
+		LOADREGS_##nr;						\
+		__asm__ __volatile__ (					\
+			__SYS_CALLOP					\
+			: "=r" (__r0)					\
+			: ASM_INPUT_##nr __SYS_REG_INPUT		\
+			: "memory");					\
+		(int) __r0;						\
+	})
+
+#define XENOMAI_SYSCALL0(op)			\
+	XENOMAI_DO_SYSCALL(0,op)
+#define XENOMAI_SYSCALL1(op,a1)			\
+	XENOMAI_DO_SYSCALL(1,op,a1)
+#define XENOMAI_SYSCALL2(op,a1,a2)		\
+	XENOMAI_DO_SYSCALL(2,op,a1,a2)
+#define XENOMAI_SYSCALL3(op,a1,a2,a3)		\
+	XENOMAI_DO_SYSCALL(3,op,a1,a2,a3)
+#define XENOMAI_SYSCALL4(op,a1,a2,a3,a4)	\
+	XENOMAI_DO_SYSCALL(4,op,a1,a2,a3,a4)
+#define XENOMAI_SYSCALL5(op,a1,a2,a3,a4,a5)	\
+	XENOMAI_DO_SYSCALL(5,op,a1,a2,a3,a4,a5)
+#define XENOMAI_SYSBIND(breq)			\
+	XENOMAI_DO_SYSCALL(1,sc_cobalt_bind,breq)
+
+#endif /* !_LIB_COBALT_ARM_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/time.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/time.h
new file mode 100644
index 0000000..34df7e9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/time.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LIB_COBALT_ARM_TIME_H
+#define _LIB_COBALT_ARM_TIME_H
+
+#define COBALT_VDSO_VERSION	"LINUX_2.6"
+#define COBALT_VDSO_GETTIME	"__vdso_clock_gettime"
+
+#endif /* !_LIB_COBALT_ARM_TIME_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/tsc.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/tsc.h
new file mode 100644
index 0000000..2be4009
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm/include/asm/xenomai/tsc.h
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2013 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Copyright (C) 2007 Sebastian Smolorz <sesmo@gmx.net>
+ *	Support for TSC emulation in user space for decrementing counters
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_ARM_TSC_H
+#define _LIB_COBALT_ARM_TSC_H
+
+#include <asm/xenomai/uapi/tsc.h>
+#include <asm/xenomai/features.h>
+
+/*
+ * Putting kuser_tsc_get and kinfo.counter in the same struct results
+ * in less operations in PIC code, thus optimizes.
+ */
+typedef unsigned long long __xn_rdtsc_t(volatile unsigned *vaddr);
+struct __xn_full_tscinfo {
+	__xn_rdtsc_t *kuser_tsc_get;
+	struct __xn_tscinfo kinfo;
+};
+extern struct __xn_full_tscinfo __xn_tscinfo;
+
+static inline __attribute__((always_inline))
+unsigned long long cobalt_read_legacy_tsc(void)
+{
+	return __xn_tscinfo.kuser_tsc_get(__xn_tscinfo.kinfo.counter);
+}
+
+#endif /* !_LIB_COBALT_ARM_TSC_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/Makefile.am
new file mode 100644
index 0000000..a5095be
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/Makefile.am
@@ -0,0 +1,13 @@
+noinst_LTLIBRARIES = libarch.la
+
+libarch_la_LDFLAGS = @XENO_LIB_LDFLAGS@
+
+libarch_la_SOURCES = features.c
+
+libarch_la_CPPFLAGS =			\
+	@XENO_COBALT_CFLAGS@ 		\
+	-I$(srcdir)/../..		\
+	-I$(top_srcdir)/include/cobalt	\
+	-I$(top_srcdir)/include
+
+SUBDIRS = include
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/features.c b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/features.c
new file mode 100644
index 0000000..5baabfb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/features.c
@@ -0,0 +1,102 @@
+/*
+ * Copyright (C) 2011 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <string.h>
+#include <string.h>
+#include <unistd.h>
+#include <limits.h>
+#include <cobalt/wrappers.h>
+#include <cobalt/ticks.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/xenomai/tsc.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/uapi/fptest.h>
+#include "internal.h"
+
+struct __xn_full_tscinfo __xn_tscinfo = {
+	.kinfo = {
+		.counter = NULL,
+	},
+};
+
+void cobalt_arch_check_features(struct cobalt_featinfo *finfo)
+{
+	unsigned long phys_addr;
+	unsigned page_size;
+	int err, fd;
+	void *addr;
+
+	if (!cobalt_use_legacy_tsc())
+		return;
+
+	if (__xn_tscinfo.kinfo.counter != NULL)
+		return;
+
+	err = XENOMAI_SYSCALL2(sc_cobalt_archcall,
+			       XENOMAI_SYSARCH_TSCINFO, &__xn_tscinfo.kinfo);
+	if (err)
+		early_panic("missing TSC emulation: %s",
+			     strerror(-err));
+
+	fd = __STD(open("/dev/mem", O_RDONLY | O_SYNC));
+	if (fd == -1)
+		early_panic("failed open(/dev/mem): %s", strerror(errno));
+
+	page_size = sysconf(_SC_PAGESIZE);
+
+	phys_addr = (unsigned long)__xn_tscinfo.kinfo.counter;
+
+	addr = __STD(mmap(NULL, page_size, PROT_READ, MAP_SHARED,
+			  fd, phys_addr & ~(page_size - 1)));
+	if (addr == MAP_FAILED)
+		early_panic("failed mmap(/dev/mem): %s", strerror(errno));
+
+	__xn_tscinfo.kinfo.counter =
+		((volatile unsigned *)
+		 ((char *) addr + (phys_addr & (page_size - 1))));
+
+	__STD(close(fd));
+}
+
+int cobalt_fp_detect(void)
+{
+	char buffer[1024];
+	int features = 0;
+	FILE *fp;
+
+	fp = fopen("/proc/cpuinfo", "r");
+	if (fp == NULL)
+		return 0;
+
+	while (fgets(buffer, sizeof(buffer), fp)) {
+		if(strncmp(buffer, "Features", sizeof("Features") - 1))
+			continue;
+		if (strstr(buffer, "fp")) {
+			features |= __COBALT_HAVE_FPU;
+			break;
+		}
+	}
+
+	fclose(fp);
+
+	return features;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/Makefile.am
new file mode 100644
index 0000000..5cac5d2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/Makefile.am
@@ -0,0 +1,2 @@
+
+SUBDIRS = asm
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/Makefile.am
new file mode 100644
index 0000000..55ea661
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/Makefile.am
@@ -0,0 +1,2 @@
+
+SUBDIRS = xenomai
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/Makefile.am
new file mode 100644
index 0000000..d308b06
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/Makefile.am
@@ -0,0 +1,5 @@
+
+noinst_HEADERS =	\
+	features.h	\
+	syscall.h	\
+	tsc.h
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/features.h
new file mode 100644
index 0000000..6dfe64c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/features.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_ARM64_FEATURES_H
+#define _LIB_COBALT_ARM64_FEATURES_H
+
+#include_next <features.h>
+#include <xeno_config.h>
+
+#define __LINUX_ARM_ARCH__ 8
+
+#include <asm/xenomai/uapi/features.h>
+
+int cobalt_fp_detect(void);
+
+#endif /* !_LIB_COBALT_ARM64_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..d896977
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/syscall.h
@@ -0,0 +1,104 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) Siemens AG, 2021
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_ARM64_SYSCALL_H
+#define _LIB_COBALT_ARM64_SYSCALL_H
+
+#include <xeno_config.h>
+#include <errno.h>
+#include <cobalt/uapi/syscall.h>
+
+#define __xn_syscall_args0
+#define __xn_syscall_args1 , unsigned long __a1
+#define __xn_syscall_args2 __xn_syscall_args1, unsigned long __a2
+#define __xn_syscall_args3 __xn_syscall_args2, unsigned long __a3
+#define __xn_syscall_args4 __xn_syscall_args3, unsigned long __a4
+#define __xn_syscall_args5 __xn_syscall_args4, unsigned long __a5
+
+#define __emit_syscall0(__args...)					\
+	register unsigned int __scno __asm__("w8") = __xn_syscode(__op); \
+	register unsigned long __res __asm__("x0");			\
+	__asm__ __volatile__ (						\
+		"svc 0;\n\t"						\
+		: "=r" (__res)						\
+		: "r" (__scno), ##__args				\
+		: "cc", "memory");					\
+	return __res
+#define __emit_syscall1(__args...)					\
+	register unsigned long __x0 __asm__("x0") = __a1;		\
+	__emit_syscall0("r" (__x0),  ##__args)
+#define __emit_syscall2(__args...)					\
+	register unsigned long __x1 __asm__("x1") = __a2;		\
+	__emit_syscall1("r" (__x1), ##__args)
+#define __emit_syscall3(__args...)					\
+	register unsigned long __x2 __asm__("x2") = __a3;		\
+	__emit_syscall2("r" (__x2), ##__args)
+#define __emit_syscall4(__args...)					\
+	register unsigned long __x3 __asm__("x3") = __a4;		\
+	__emit_syscall3("r" (__x3), ##__args)
+#define __emit_syscall5(__args...)	\
+	register unsigned long __x4 __asm__("x4") = __a5;		\
+	__emit_syscall4("r" (__x4), ##__args)
+
+#define DEFINE_XENOMAI_SYSCALL(__argnr)					\
+static inline long __attribute__((always_inline))			\
+__xenomai_do_syscall##__argnr(unsigned int __op				\
+			      __xn_syscall_args##__argnr)		\
+{									\
+	__emit_syscall##__argnr();					\
+}
+
+DEFINE_XENOMAI_SYSCALL(0)
+DEFINE_XENOMAI_SYSCALL(1)
+DEFINE_XENOMAI_SYSCALL(2)
+DEFINE_XENOMAI_SYSCALL(3)
+DEFINE_XENOMAI_SYSCALL(4)
+DEFINE_XENOMAI_SYSCALL(5)
+
+#define XENOMAI_SYSCALL0(__op)					\
+	__xenomai_do_syscall0(__op)
+#define XENOMAI_SYSCALL1(__op, __a1)				\
+	__xenomai_do_syscall1(__op,				\
+			      (unsigned long)__a1)
+#define XENOMAI_SYSCALL2(__op, __a1, __a2)			\
+	__xenomai_do_syscall2(__op,				\
+			      (unsigned long)__a1,		\
+			      (unsigned long)__a2)
+#define XENOMAI_SYSCALL3(__op, __a1, __a2, __a3)		\
+	__xenomai_do_syscall3(__op,				\
+			      (unsigned long)__a1,		\
+			      (unsigned long)__a2,		\
+			      (unsigned long)__a3)
+#define XENOMAI_SYSCALL4(__op, __a1, __a2, __a3, __a4)		\
+	__xenomai_do_syscall4(__op,				\
+			      (unsigned long)__a1,		\
+			      (unsigned long)__a2,		\
+			      (unsigned long)__a3,		\
+			      (unsigned long)__a4)
+#define XENOMAI_SYSCALL5(__op, __a1, __a2, __a3, __a4, __a5)	\
+	__xenomai_do_syscall5(__op,				\
+			      (unsigned long)__a1,		\
+			      (unsigned long)__a2,		\
+			      (unsigned long)__a3,		\
+			      (unsigned long)__a4,		\
+			      (unsigned long)__a5)
+#define XENOMAI_SYSBIND(__breq)					\
+	__xenomai_do_syscall1(sc_cobalt_bind,			\
+			      (unsigned long)__breq)
+
+#endif /* !_LIB_COBALT_ARM64_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/time.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/time.h
new file mode 100644
index 0000000..d0dad6d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/time.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LIB_COBALT_ARM64_TIME_H
+#define _LIB_COBALT_ARM64_TIME_H
+
+#define COBALT_VDSO_VERSION	"LINUX_2.6.39"
+#define COBALT_VDSO_GETTIME	"__kernel_clock_gettime"
+
+#endif /* !_LIB_COBALT_ARM64_TIME_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/tsc.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/tsc.h
new file mode 100644
index 0000000..e664adb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/arm64/include/asm/xenomai/tsc.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2013 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * ARM port
+ *   Copyright (C) 2005 Stelian Pop
+ *
+ * Copyright (C) 2007 Sebastian Smolorz <sesmo@gmx.net>
+ *	Support for TSC emulation in user space for decrementing counters
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_ARM64_TSC_H
+#define _LIB_COBALT_ARM64_TSC_H
+
+#include <asm/xenomai/uapi/tsc.h>
+#include <asm/xenomai/features.h>
+#include <inttypes.h>
+#include <sys/time.h>
+
+typedef unsigned long long __xn_rdtsc_t(volatile unsigned *vaddr);
+struct __xn_full_tscinfo {
+	struct __xn_tscinfo kinfo;
+};
+extern struct __xn_full_tscinfo __xn_tscinfo;
+
+static inline uint64_t get_counter(void)
+{
+        uint64_t cval;
+
+	asm volatile("isb; mrs %0, cntvct_el0; isb; " : "=r" (cval) :: "memory");
+
+	return cval;
+}
+
+static inline __attribute__((always_inline))
+unsigned long long cobalt_read_legacy_tsc(void)
+{
+	return get_counter();
+}
+
+#endif /* !_LIB_COBALT_ARM64_TSC_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/Makefile.am
new file mode 100644
index 0000000..a5095be
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/Makefile.am
@@ -0,0 +1,13 @@
+noinst_LTLIBRARIES = libarch.la
+
+libarch_la_LDFLAGS = @XENO_LIB_LDFLAGS@
+
+libarch_la_SOURCES = features.c
+
+libarch_la_CPPFLAGS =			\
+	@XENO_COBALT_CFLAGS@ 		\
+	-I$(srcdir)/../..		\
+	-I$(top_srcdir)/include/cobalt	\
+	-I$(top_srcdir)/include
+
+SUBDIRS = include
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/features.c b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/features.c
new file mode 100644
index 0000000..d795a19
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/features.c
@@ -0,0 +1,24 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+void cobalt_arch_check_features(struct cobalt_featinfo *finfo)
+{
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/Makefile.am
new file mode 100644
index 0000000..5cac5d2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/Makefile.am
@@ -0,0 +1,2 @@
+
+SUBDIRS = asm
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/Makefile.am
new file mode 100644
index 0000000..55ea661
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/Makefile.am
@@ -0,0 +1,2 @@
+
+SUBDIRS = xenomai
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/Makefile.am
new file mode 100644
index 0000000..d308b06
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/Makefile.am
@@ -0,0 +1,5 @@
+
+noinst_HEADERS =	\
+	features.h	\
+	syscall.h	\
+	tsc.h
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/features.h
new file mode 100644
index 0000000..4b75707
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/features.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_POWERPC_FEATURES_H
+#define _LIB_COBALT_POWERPC_FEATURES_H
+
+#include_next <features.h>
+#include <xeno_config.h>
+#include <asm/xenomai/uapi/features.h>
+
+static inline int cobalt_fp_detect(void)
+{
+	return 0;
+}
+
+#endif /* !_LIB_COBALT_POWERPC_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..1330c5d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/syscall.h
@@ -0,0 +1,82 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _LIB_COBALT_POWERPC_SYSCALL_H
+#define _LIB_COBALT_POWERPC_SYSCALL_H
+
+#include <cobalt/uapi/syscall.h>
+
+/* Some code pulled from glibc's inline syscalls. */
+
+#define LOADARGS_0(syscode, dummy...)			\
+	__sc_0 = (unsigned long)(syscode)
+#define LOADARGS_1(syscode, arg1)			\
+	LOADARGS_0(syscode);				\
+	__sc_3 = (unsigned long) (arg1)
+#define LOADARGS_2(syscode, arg1, arg2)			\
+	LOADARGS_1(syscode, arg1);			\
+	__sc_4 = (unsigned long) (arg2)
+#define LOADARGS_3(syscode, arg1, arg2, arg3)		\
+	LOADARGS_2(syscode, arg1, arg2);		\
+	__sc_5 = (unsigned long) (arg3)
+#define LOADARGS_4(syscode, arg1, arg2, arg3, arg4)	\
+	LOADARGS_3(syscode, arg1, arg2, arg3);		\
+	__sc_6 = (unsigned long) (arg4)
+#define LOADARGS_5(syscode, arg1, arg2, arg3, arg4, arg5) \
+	LOADARGS_4(syscode, arg1, arg2, arg3, arg4);	\
+	__sc_7 = (unsigned long) (arg5)
+
+#define ASM_INPUT_0 "0" (__sc_0)
+#define ASM_INPUT_1 ASM_INPUT_0, "1" (__sc_3)
+#define ASM_INPUT_2 ASM_INPUT_1, "2" (__sc_4)
+#define ASM_INPUT_3 ASM_INPUT_2, "3" (__sc_5)
+#define ASM_INPUT_4 ASM_INPUT_3, "4" (__sc_6)
+#define ASM_INPUT_5 ASM_INPUT_4, "5" (__sc_7)
+
+#define XENOMAI_DO_SYSCALL(nr, op, args...)			\
+  ({								\
+	register unsigned long __sc_0  __asm__ ("r0");		\
+	register unsigned long __sc_3  __asm__ ("r3");		\
+	register unsigned long __sc_4  __asm__ ("r4");		\
+	register unsigned long __sc_5  __asm__ ("r5");		\
+	register unsigned long __sc_6  __asm__ ("r6");		\
+	register unsigned long __sc_7  __asm__ ("r7");		\
+								\
+	LOADARGS_##nr(__xn_syscode(op), args);			\
+	__asm__ __volatile__					\
+		("sc           \n\t"				\
+		 "mfcr %0      "				\
+		: "=&r" (__sc_0),				\
+		  "=&r" (__sc_3),  "=&r" (__sc_4),		\
+		  "=&r" (__sc_5),  "=&r" (__sc_6),		\
+		  "=&r" (__sc_7)				\
+		: ASM_INPUT_##nr				\
+		: "cr0", "ctr", "memory",			\
+		  "r8", "r9", "r10","r11", "r12");		\
+	(int)((__sc_0 & (1 << 28)) ? -__sc_3 : __sc_3);		\
+  })
+
+#define XENOMAI_SYSCALL0(op)                XENOMAI_DO_SYSCALL(0,op)
+#define XENOMAI_SYSCALL1(op,a1)             XENOMAI_DO_SYSCALL(1,op,a1)
+#define XENOMAI_SYSCALL2(op,a1,a2)          XENOMAI_DO_SYSCALL(2,op,a1,a2)
+#define XENOMAI_SYSCALL3(op,a1,a2,a3)       XENOMAI_DO_SYSCALL(3,op,a1,a2,a3)
+#define XENOMAI_SYSCALL4(op,a1,a2,a3,a4)    XENOMAI_DO_SYSCALL(4,op,a1,a2,a3,a4)
+#define XENOMAI_SYSCALL5(op,a1,a2,a3,a4,a5) XENOMAI_DO_SYSCALL(5,op,a1,a2,a3,a4,a5)
+#define XENOMAI_SYSBIND(breq)               XENOMAI_DO_SYSCALL(1,sc_cobalt_bind,breq)
+
+#endif /* !_LIB_COBALT_POWERPC_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/time.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/time.h
new file mode 100644
index 0000000..92ba44b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/time.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LIB_COBALT_POWERPC_TIME_H
+#define _LIB_COBALT_POWERPC_TIME_H
+
+#define COBALT_VDSO_VERSION	"LINUX_2.6.15"
+#define COBALT_VDSO_GETTIME	"__kernel_clock_gettime"
+
+#endif /* !_LIB_COBALT_POWERPC_TIME_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/tsc.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/tsc.h
new file mode 100644
index 0000000..b4ff852
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/powerpc/include/asm/xenomai/tsc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2001,2002,2003,2004 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_POWERPC_TSC_H
+#define _LIB_COBALT_POWERPC_TSC_H
+
+static inline unsigned long long cobalt_read_legacy_tsc(void)
+{
+	union {
+		unsigned long long t;
+		unsigned long v[2];
+	} u;
+	unsigned long __tbu;
+
+	__asm__ __volatile__("1: mfspr %0,269\n"
+			     "mfspr %1,268\n"
+			     "mfspr %2,269\n"
+			     "cmpw %2,%0\n"
+			     "bne- 1b\n":"=r"(u.v[0]),
+			     "=r"(u.v[1]), "=r"(__tbu));
+	return u.t;
+}
+
+#endif /* !_LIB_COBALT_POWERPC_TSC_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/Makefile.am
new file mode 100644
index 0000000..a5095be
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/Makefile.am
@@ -0,0 +1,13 @@
+noinst_LTLIBRARIES = libarch.la
+
+libarch_la_LDFLAGS = @XENO_LIB_LDFLAGS@
+
+libarch_la_SOURCES = features.c
+
+libarch_la_CPPFLAGS =			\
+	@XENO_COBALT_CFLAGS@ 		\
+	-I$(srcdir)/../..		\
+	-I$(top_srcdir)/include/cobalt	\
+	-I$(top_srcdir)/include
+
+SUBDIRS = include
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/features.c b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/features.c
new file mode 100644
index 0000000..e7e62da
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/features.c
@@ -0,0 +1,70 @@
+/*
+ * Copyright (C) 2008 Gilles Chanteperdrix <gch@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <string.h>
+#include <boilerplate/atomic.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/uapi/fptest.h>
+#include "internal.h"
+
+void cobalt_arch_check_features(struct cobalt_featinfo *finfo)
+{
+#if defined(__i386__) && defined(CONFIG_XENO_X86_VSYSCALL)
+	size_t n = confstr(_CS_GNU_LIBPTHREAD_VERSION, NULL, 0);
+	if (n > 0) {
+		char buf[n];
+
+		confstr (_CS_GNU_LIBPTHREAD_VERSION, buf, n);
+
+		if (strstr (buf, "NPTL"))
+			return;
+	}
+
+	early_warning("--enable-x86-vsyscall requires NPTL, which does not match");
+	early_warning("your configuration. Please upgrade, or rebuild the");
+	early_panic("Xenomai libraries passing --disable-x86-vsyscall");
+#endif /* __i386__ && CONFIG_XENO_X86_VSYSCALL */
+}
+
+int cobalt_fp_detect(void)
+{
+	char buffer[1024];
+	int features = 0;
+	FILE *fp;
+
+	fp = fopen("/proc/cpuinfo", "r");
+	if(fp == NULL)
+		return 0;
+
+	while (fgets(buffer, sizeof(buffer), fp)) {
+		if(strncmp(buffer, "flags", sizeof("flags") - 1))
+			continue;
+		if (strstr(buffer, "sse2"))
+			features |= __COBALT_HAVE_SSE2;
+		if (strstr(buffer, "avx"))
+			features |= __COBALT_HAVE_AVX;
+		break;
+	}
+
+	fclose(fp);
+
+	return features;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/Makefile.am
new file mode 100644
index 0000000..5cac5d2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/Makefile.am
@@ -0,0 +1,2 @@
+
+SUBDIRS = asm
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/Makefile.am
new file mode 100644
index 0000000..55ea661
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/Makefile.am
@@ -0,0 +1,2 @@
+
+SUBDIRS = xenomai
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/Makefile.am b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/Makefile.am
new file mode 100644
index 0000000..d308b06
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/Makefile.am
@@ -0,0 +1,5 @@
+
+noinst_HEADERS =	\
+	features.h	\
+	syscall.h	\
+	tsc.h
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/features.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/features.h
new file mode 100644
index 0000000..9a54339
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/features.h
@@ -0,0 +1,27 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_X86_FEATURES_H
+#define _LIB_COBALT_X86_FEATURES_H
+
+#include_next <features.h>
+#include <xeno_config.h>
+#include <asm/xenomai/uapi/features.h>
+
+int cobalt_fp_detect(void);
+
+#endif /* !_LIB_COBALT_X86_FEATURES_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/syscall.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/syscall.h
new file mode 100644
index 0000000..79bf17e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/syscall.h
@@ -0,0 +1,216 @@
+/*
+ * Copyright (C) 2001,2002,2003,2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_X86_SYSCALL_H
+#define _LIB_COBALT_X86_SYSCALL_H
+
+#include <xeno_config.h>
+#include <cobalt/uapi/syscall.h>
+
+/* Some code pulled from glibc's inline syscalls. */
+
+#ifdef __i386__
+
+#ifdef CONFIG_XENO_X86_VSYSCALL
+/*
+ * This form relies on the kernel's vsyscall support in order to use
+ * the most appropriate syscall entry instruction the CPU supports. We
+ * also depend on the NPTL providing us a pointer to the vsyscall DSO
+ * entry point, to which we branch to instead of issuing a trap.
+ * We assume this pointer to be available at %gs:0x10.
+ */
+#define DOSYSCALL  "call *%%gs:0x10\n\t"
+#else /* CONFIG_XENO_X86_VSYSCALL */
+#define DOSYSCALL  "int $0x80\n\t"
+#endif /* CONFIG_XENO_X86_VSYSCALL */
+
+/* The one that cannot fail. */
+#define DOSYSCALLSAFE  "int $0x80\n\t"
+
+asm (".L__X'%ebx = 1\n\t"
+     ".L__X'%ecx = 2\n\t"
+     ".L__X'%edx = 2\n\t"
+     ".L__X'%eax = 3\n\t"
+     ".L__X'%esi = 3\n\t"
+     ".L__X'%edi = 3\n\t"
+     ".L__X'%ebp = 3\n\t"
+     ".L__X'%esp = 3\n\t"
+     ".macro bpushl name reg\n\t"
+     ".if 1 - \\name\n\t"
+     ".if 2 - \\name\n\t"
+     "pushl %ebx\n\t"
+     ".else\n\t"
+     "xchgl \\reg, %ebx\n\t"
+     ".endif\n\t"
+     ".endif\n\t"
+     ".endm\n\t"
+     ".macro bpopl name reg\n\t"
+     ".if 1 - \\name\n\t"
+     ".if 2 - \\name\n\t"
+     "popl %ebx\n\t"
+     ".else\n\t"
+     "xchgl \\reg, %ebx\n\t"
+     ".endif\n\t"
+     ".endif\n\t"
+     ".endm\n\t"
+     ".macro bmovl name reg\n\t"
+     ".if 1 - \\name\n\t"
+     ".if 2 - \\name\n\t"
+     "movl \\reg, %ebx\n\t"
+     ".endif\n\t"
+     ".endif\n\t"
+     ".endm\n\t");
+
+#define XENOMAI_DO_SYSCALL(nr, op, args...)			\
+({								\
+	unsigned __resultvar;					\
+	asm volatile (						\
+		LOADARGS_##nr					\
+		"movl %1, %%eax\n\t"				\
+		DOSYSCALL					\
+		RESTOREARGS_##nr				\
+		: "=a" (__resultvar)				\
+		: "g" (__xn_syscode(op)) ASMFMT_##nr(args)	\
+		: "memory", "cc");				\
+	(int) __resultvar;					\
+})
+
+#define XENOMAI_DO_SYSCALL_SAFE(nr, op, args...)		\
+({								\
+	unsigned __resultvar;					\
+	asm volatile (						\
+		LOADARGS_##nr					\
+		"movl %1, %%eax\n\t"				\
+		DOSYSCALLSAFE					\
+		RESTOREARGS_##nr				\
+		: "=a" (__resultvar)				\
+		: "g" (__xn_syscode(op)) ASMFMT_##nr(args)	\
+		: "memory", "cc");				\
+	(int) __resultvar;					\
+})
+
+#define LOADARGS_0
+#define LOADARGS_1 \
+	"bpushl .L__X'%k2, %k2\n\t" \
+	"bmovl .L__X'%k2, %k2\n\t"
+#define LOADARGS_2	LOADARGS_1
+#define LOADARGS_3	LOADARGS_1
+#define LOADARGS_4	LOADARGS_1
+#define LOADARGS_5	LOADARGS_1
+
+#define RESTOREARGS_0
+#define RESTOREARGS_1 \
+	"bpopl .L__X'%k2, %k2\n\t"
+#define RESTOREARGS_2	RESTOREARGS_1
+#define RESTOREARGS_3	RESTOREARGS_1
+#define RESTOREARGS_4	RESTOREARGS_1
+#define RESTOREARGS_5	RESTOREARGS_1
+
+#define ASMFMT_0()
+#define ASMFMT_1(arg1) \
+	, "acdSD" (arg1)
+#define ASMFMT_2(arg1, arg2) \
+	, "adSD" (arg1), "c" (arg2)
+#define ASMFMT_3(arg1, arg2, arg3) \
+	, "aSD" (arg1), "c" (arg2), "d" (arg3)
+#define ASMFMT_4(arg1, arg2, arg3, arg4) \
+	, "aD" (arg1), "c" (arg2), "d" (arg3), "S" (arg4)
+#define ASMFMT_5(arg1, arg2, arg3, arg4, arg5) \
+	, "a" (arg1), "c" (arg2), "d" (arg3), "S" (arg4), "D" (arg5)
+
+#define XENOMAI_SYSBIND(breq) \
+	XENOMAI_DO_SYSCALL_SAFE(1, sc_cobalt_bind, breq)
+
+#else /* x86_64 */
+
+#if __GNUC__ < 4 || (__GNUC__ == 4 && __GNUC_MINOR__ < 3)
+#define LOAD_ARGS_0()	asm volatile ("" : /* */ : /* */ : "memory");
+#else
+#define LOAD_ARGS_0()
+#endif
+#define LOAD_REGS_0
+#define ASM_ARGS_0
+
+#define LOAD_ARGS_1(a1)					\
+	long int __arg1 = (long) (a1);			\
+	LOAD_ARGS_0()
+#define LOAD_REGS_1					\
+	register long int _a1 asm ("rdi") = __arg1;	\
+	LOAD_REGS_0
+#define ASM_ARGS_1	ASM_ARGS_0, "r" (_a1)
+
+#define LOAD_ARGS_2(a1, a2)				\
+	long int __arg2 = (long) (a2);			\
+	LOAD_ARGS_1(a1)
+#define LOAD_REGS_2					\
+	register long int _a2 asm ("rsi") = __arg2;	\
+	LOAD_REGS_1
+#define ASM_ARGS_2	ASM_ARGS_1, "r" (_a2)
+
+#define LOAD_ARGS_3(a1, a2, a3)				\
+	long int __arg3 = (long) (a3);			\
+	LOAD_ARGS_2 (a1, a2)
+#define LOAD_REGS_3					\
+	register long int _a3 asm ("rdx") = __arg3;	\
+	LOAD_REGS_2
+#define ASM_ARGS_3	ASM_ARGS_2, "r" (_a3)
+
+#define LOAD_ARGS_4(a1, a2, a3, a4)			\
+	long int __arg4 = (long) (a4);			\
+	LOAD_ARGS_3 (a1, a2, a3)
+#define LOAD_REGS_4					\
+	register long int _a4 asm ("r10") = __arg4;	\
+	LOAD_REGS_3
+#define ASM_ARGS_4	ASM_ARGS_3, "r" (_a4)
+
+#define LOAD_ARGS_5(a1, a2, a3, a4, a5)			\
+	long int __arg5 = (long) (a5);			\
+	LOAD_ARGS_4 (a1, a2, a3, a4)
+#define LOAD_REGS_5					\
+	register long int _a5 asm ("r8") = __arg5;	\
+	LOAD_REGS_4
+#define ASM_ARGS_5	ASM_ARGS_4, "r" (_a5)
+
+#define DO_SYSCALL(name, nr, args...)			\
+({							\
+	unsigned long __resultvar;			\
+	LOAD_ARGS_##nr(args)				\
+	LOAD_REGS_##nr					\
+	asm volatile (					\
+		"syscall\n\t"				\
+		: "=a" (__resultvar)			\
+		: "0" (name) ASM_ARGS_##nr		\
+		: "memory", "cc", "r11", "cx");		\
+	(int) __resultvar;				\
+})
+
+#define XENOMAI_DO_SYSCALL(nr, op, args...) \
+	DO_SYSCALL(__xn_syscode(op), nr, args)
+
+#define XENOMAI_SYSBIND(breq) \
+	XENOMAI_DO_SYSCALL(1, sc_cobalt_bind, breq)
+
+#endif /* x86_64 */
+
+#define XENOMAI_SYSCALL0(op)			XENOMAI_DO_SYSCALL(0,op)
+#define XENOMAI_SYSCALL1(op,a1)			XENOMAI_DO_SYSCALL(1,op,a1)
+#define XENOMAI_SYSCALL2(op,a1,a2)		XENOMAI_DO_SYSCALL(2,op,a1,a2)
+#define XENOMAI_SYSCALL3(op,a1,a2,a3)		XENOMAI_DO_SYSCALL(3,op,a1,a2,a3)
+#define XENOMAI_SYSCALL4(op,a1,a2,a3,a4)	XENOMAI_DO_SYSCALL(4,op,a1,a2,a3,a4)
+#define XENOMAI_SYSCALL5(op,a1,a2,a3,a4,a5)	XENOMAI_DO_SYSCALL(5,op,a1,a2,a3,a4,a5)
+
+#endif /* !_LIB_COBALT_X86_SYSCALL_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/time.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/time.h
new file mode 100644
index 0000000..693be87
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/time.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2021 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _LIB_COBALT_X86_TIME_H
+#define _LIB_COBALT_X86_TIME_H
+
+#define COBALT_VDSO_VERSION	"LINUX_2.6"
+#define COBALT_VDSO_GETTIME	"__vdso_clock_gettime"
+
+#endif /* !_LIB_COBALT_X86_TIME_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/tsc.h b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/tsc.h
new file mode 100644
index 0000000..bf400e8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/arch/x86/include/asm/xenomai/tsc.h
@@ -0,0 +1,38 @@
+/*
+ * Copyright (C) 2001,2002,2003,2007 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _LIB_COBALT_X86_TSC_H
+#define _LIB_COBALT_X86_TSC_H
+
+static inline unsigned long long cobalt_read_legacy_tsc(void)
+{
+#ifdef __i386__
+	unsigned long long t;
+
+	__asm__ __volatile__ ("rdtsc" : "=A" (t));
+	return t;
+
+#else /* x86_64 */
+	unsigned int __a,__d;
+
+	__asm__ __volatile__ ("rdtsc" : "=a" (__a), "=d" (__d));
+	return ((unsigned long)__a) | (((unsigned long)__d) << 32);
+#endif /* x86_64 */
+}
+
+#endif /* !_LIB_COBALT_X86_TSC_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/attr.c b/kernel/xenomai-v3.2.4/lib/cobalt/attr.c
new file mode 100644
index 0000000..586de92
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/attr.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stddef.h>
+#include <errno.h>
+#include <pthread.h>
+#include <memory.h>
+#include <cobalt/uapi/thread.h>
+#include "internal.h"
+
+COBALT_IMPL(int, pthread_attr_init, (pthread_attr_t *attr))
+{
+	__STD(pthread_attr_init)(attr);
+	return pthread_attr_setstacksize(attr, PTHREAD_STACK_DEFAULT);
+}
+
+int pthread_attr_init_ex(pthread_attr_ex_t *attr_ex)
+{
+	struct sched_param param;
+	int policy;
+
+	/* Start with defaulting all fields to null. */
+	memset(attr_ex, 0, sizeof(*attr_ex));
+	/* Merge in the default standard attribute set. */
+	__COBALT(pthread_attr_init)(&attr_ex->std);
+	pthread_attr_getschedpolicy(&attr_ex->std, &policy);
+	attr_ex->nonstd.sched_policy = policy;
+	pthread_attr_getschedparam(&attr_ex->std, &param);
+	attr_ex->nonstd.sched_param.sched_priority = param.sched_priority;
+
+	return 0;
+}
+
+int pthread_attr_destroy_ex(pthread_attr_ex_t *attr_ex)
+{
+	return pthread_attr_destroy(&attr_ex->std);
+}
+
+int pthread_attr_setschedpolicy_ex(pthread_attr_ex_t *attr_ex,
+				   int policy)
+{
+	attr_ex->nonstd.sched_policy = policy;
+
+	return 0;
+}
+
+int pthread_attr_getschedpolicy_ex(const pthread_attr_ex_t *attr_ex,
+				   int *policy)
+{
+	*policy = attr_ex->nonstd.sched_policy;
+
+	return 0;
+}
+
+int pthread_attr_setschedparam_ex(pthread_attr_ex_t *attr_ex,
+				  const struct sched_param_ex *param_ex)
+{
+	attr_ex->nonstd.sched_param = *param_ex;
+
+	return 0;
+}
+
+int pthread_attr_getschedparam_ex(const pthread_attr_ex_t *attr_ex,
+				  struct sched_param_ex *param_ex)
+{
+	*param_ex = attr_ex->nonstd.sched_param;
+
+	return 0;
+}
+
+int pthread_attr_setinheritsched_ex(pthread_attr_ex_t *attr_ex,
+				    int inheritsched)
+{
+	return pthread_attr_setinheritsched(&attr_ex->std, inheritsched);
+}
+
+int pthread_attr_getinheritsched_ex(const pthread_attr_ex_t *attr_ex,
+				    int *inheritsched)
+{
+	return pthread_attr_getinheritsched(&attr_ex->std, inheritsched);
+}
+
+int pthread_attr_getdetachstate_ex(const pthread_attr_ex_t *attr_ex,
+				   int *detachstate)
+{
+	return pthread_attr_getdetachstate(&attr_ex->std, detachstate);
+}
+
+int pthread_attr_setdetachstate_ex(pthread_attr_ex_t *attr_ex,
+				   int detachstate)
+{
+	return pthread_attr_setdetachstate(&attr_ex->std, detachstate);
+}
+
+int pthread_attr_getstacksize_ex(const pthread_attr_ex_t *attr_ex,
+				 size_t *stacksize)
+{
+	return pthread_attr_getstacksize(&attr_ex->std, stacksize);
+}
+
+int pthread_attr_setstacksize_ex(pthread_attr_ex_t *attr_ex,
+				 size_t stacksize)
+{
+	return pthread_attr_setstacksize(&attr_ex->std, stacksize);
+}
+
+int pthread_attr_getscope_ex(const pthread_attr_ex_t *attr_ex,
+			     int *scope)
+{
+	return pthread_attr_getscope(&attr_ex->std, scope);
+}
+
+int pthread_attr_setscope_ex(pthread_attr_ex_t *attr_ex,
+			     int scope)
+{
+	return pthread_attr_setscope(&attr_ex->std, scope);
+}
+
+int pthread_attr_getpersonality_ex(const pthread_attr_ex_t *attr_ex,
+				   int *personality)
+{
+	*personality = attr_ex->nonstd.personality;
+
+	return 0;
+}
+
+int pthread_attr_setpersonality_ex(pthread_attr_ex_t *attr_ex,
+				   int personality)
+{
+	attr_ex->nonstd.personality = personality;
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/clock.c b/kernel/xenomai-v3.2.4/lib/cobalt/clock.c
new file mode 100644
index 0000000..4904264
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/clock.c
@@ -0,0 +1,489 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <stdint.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <time.h>
+#include <sys/time.h>
+#include <cobalt/uapi/time.h>
+#include <cobalt/ticks.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/xenomai/tsc.h>
+#include "umm.h"
+#include "internal.h"
+
+/**
+ * @ingroup cobalt_api
+ * @defgroup cobalt_api_time Clocks and timers
+ *
+ * Cobalt/POSIX clock and timer services
+ *
+ * Cobalt supports three built-in clocks:
+ *
+ * CLOCK_REALTIME maps to the nucleus system clock, keeping time as the amount
+ * of time since the Epoch, with a resolution of one nanosecond.
+ *
+ * CLOCK_MONOTONIC maps to an architecture-dependent high resolution
+ * counter, so is suitable for measuring short time
+ * intervals. However, when used for sleeping (with
+ * clock_nanosleep()), the CLOCK_MONOTONIC clock has a resolution of
+ * one nanosecond, like the CLOCK_REALTIME clock.
+ *
+ * CLOCK_MONOTONIC_RAW is Linux-specific, and provides monotonic time
+ * values from a hardware timer which is not adjusted by NTP. This is
+ * strictly equivalent to CLOCK_MONOTONIC with Cobalt, which is not
+ * NTP adjusted either.
+ *
+ * In addition, external clocks can be dynamically registered using
+ * the cobalt_clock_register() service. These clocks are fully managed
+ * by Cobalt extension code, which should advertise each incoming tick
+ * by calling xnclock_tick() for the relevant clock, from an interrupt
+ * context.
+ *
+ * Timer objects may be created with the timer_create() service using
+ * any of the built-in or external clocks. The resolution of these
+ * timers is clock-specific. However, built-in clocks all have
+ * nanosecond resolution, as specified for clock_nanosleep().
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/xsh_chap02_08.html#tag_02_08_05">
+ * Specification.</a>
+ *
+ *@{
+ */
+
+/**
+ * Get the resolution of the specified clock.
+ *
+ * This service returns, at the address @a res, if it is not @a NULL, the
+ * resolution of the clock @a clock_id.
+ *
+ * For both CLOCK_REALTIME and CLOCK_MONOTONIC, this resolution is the duration
+ * of one system clock tick. No other clock is supported.
+ *
+ * @param clock_id clock identifier, either CLOCK_REALTIME or CLOCK_MONOTONIC;
+ *
+ * @param tp the address where the resolution of the specified clock will be
+ * stored on success.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a clock_id is invalid;
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_getres.html">
+ * Specification.</a>
+ *
+ * @apitags{unrestricted}
+ */
+COBALT_IMPL(int, clock_getres, (clockid_t clock_id, struct timespec *tp))
+{
+	int ret;
+
+#ifdef __USE_TIME_BITS64
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_clock_getres64, clock_id, tp);
+#else
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_clock_getres, clock_id, tp);
+#endif
+
+	if (ret) {
+		errno = ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int __do_clock_host_realtime(struct timespec *ts)
+{
+	uint64_t now, base, mask, cycle_delta, nsec;
+	struct xnvdso_hostrt_data *hostrt_data;
+	uint32_t mult, shift;
+	unsigned long rem;
+	urwstate_t tmp;
+
+	if (!xnvdso_test_feature(cobalt_vdso, XNVDSO_FEAT_HOST_REALTIME))
+		return -1;
+
+	hostrt_data = &cobalt_vdso->hostrt_data;
+
+	if (!hostrt_data->live)
+		return -1;
+
+	/*
+	 * The following is essentially a verbatim copy of the
+	 * mechanism in the kernel.
+	 */
+	unsynced_read_block(&tmp, &hostrt_data->lock) {
+		now = cobalt_read_legacy_tsc();
+		base = hostrt_data->cycle_last;
+		mask = hostrt_data->mask;
+		mult = hostrt_data->mult;
+		shift = hostrt_data->shift;
+		ts->tv_sec = hostrt_data->wall_sec;
+		nsec = hostrt_data->wall_nsec;
+	}
+
+	cycle_delta = (now - base) & mask;
+	nsec += (cycle_delta * mult) >> shift;
+
+	ts->tv_sec += cobalt_divrem_billion(nsec, &rem);
+	ts->tv_nsec = rem;
+
+	return 0;
+}
+
+static int __do_clock_gettime(clockid_t clock_id, struct timespec *tp)
+{
+#ifdef __USE_TIME_BITS64
+	return -XENOMAI_SYSCALL2(sc_cobalt_clock_gettime64, clock_id, tp);
+#else
+	return -XENOMAI_SYSCALL2(sc_cobalt_clock_gettime, clock_id, tp);
+#endif
+}
+
+static int gettime_via_tsc(clockid_t clock_id, struct timespec *tp)
+{
+	unsigned long rem;
+	xnticks_t ns;
+	int ret;
+
+	switch (clock_id) {
+	case CLOCK_HOST_REALTIME:
+		ret = __do_clock_host_realtime(tp);
+		break;
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+		ns = cobalt_ticks_to_ns(cobalt_read_legacy_tsc());
+		tp->tv_sec = cobalt_divrem_billion(ns, &rem);
+		tp->tv_nsec = rem;
+		return 0;
+	case CLOCK_REALTIME:
+		ns = cobalt_ticks_to_ns(cobalt_read_legacy_tsc());
+		ns += cobalt_vdso->wallclock_offset;
+		tp->tv_sec = cobalt_divrem_billion(ns, &rem);
+		tp->tv_nsec = rem;
+		return 0;
+	default:
+		ret = __do_clock_gettime(clock_id, tp);
+	}
+
+	if (ret) {
+		errno = ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+static int gettime_via_vdso(clockid_t clock_id, struct timespec *tp)
+{
+	int ret;
+
+	switch (clock_id) {
+	case CLOCK_REALTIME:
+	case CLOCK_HOST_REALTIME:
+		ret = __cobalt_vdso_gettime(CLOCK_REALTIME, tp);
+		break;
+	case CLOCK_MONOTONIC:
+	case CLOCK_MONOTONIC_RAW:
+		ret = __cobalt_vdso_gettime(clock_id, tp);
+		break;
+	default:
+		ret = __do_clock_gettime(clock_id, tp);
+	}
+
+	if (ret) {
+		errno = ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * Read the specified clock.
+ *
+ * This service returns, at the address @a tp the current value of the clock @a
+ * clock_id. If @a clock_id is:
+ * - CLOCK_REALTIME, the clock value represents the amount of time since the
+ *   Epoch, with a precision of one system clock tick;
+ * - CLOCK_MONOTONIC or CLOCK_MONOTONIC_RAW, the clock value is given
+ *   by an architecture-dependent high resolution counter, with a
+ *   precision independent from the system clock tick duration.
+ * - CLOCK_HOST_REALTIME, the clock value as seen by the host, typically
+ *   Linux. Resolution and precision depend on the host, but it is guaranteed
+ *   that both, host and Cobalt, see the same information.
+ *
+ * @param clock_id clock identifier, either CLOCK_REALTIME, CLOCK_MONOTONIC,
+ *        or CLOCK_HOST_REALTIME;
+ *
+ * @param tp the address where the value of the specified clock will be stored.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a clock_id is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_gettime.html">
+ * Specification.</a>
+ *
+ * @apitags{unrestricted}
+ */
+COBALT_IMPL(int, clock_gettime, (clockid_t clock_id, struct timespec *tp))
+{
+	if (cobalt_use_legacy_tsc())
+		return gettime_via_tsc(clock_id, tp);
+
+	return gettime_via_vdso(clock_id, tp);
+}
+
+/**
+ * Set the specified clock.
+ *
+ * Set the CLOCK_REALTIME or Cobalt-specific clocks.
+ *
+ * @param clock_id the id of the clock to be set. CLOCK_REALTIME,
+ * and Cobalt-specific clocks are supported.
+ *
+ * @param tp the address of a struct timespec specifying the new date.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a clock_id is undefined;
+ * - EINVAL, the date specified by @a tp is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_settime.html">
+ * Specification.</a>
+ *
+ * @note Setting CLOCK_REALTIME may cause the caller to switch to
+ * secondary mode.
+ *
+ * @apitags{unrestricted, switch-secondary}
+ */
+COBALT_IMPL(int, clock_settime, (clockid_t clock_id, const struct timespec *tp))
+{
+	int ret;
+
+	if (clock_id == CLOCK_REALTIME && !cobalt_use_legacy_tsc())
+		return __STD(clock_settime(CLOCK_REALTIME, tp));
+
+#ifdef __USE_TIME_BITS64
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_clock_settime64, clock_id, tp);
+#else
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_clock_settime, clock_id, tp);
+#endif
+	if (ret) {
+		errno = ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+/* @apitags{unrestricted} */
+
+COBALT_IMPL(int, clock_adjtime, (clockid_t clock_id, struct timex *tx))
+{
+	int ret;
+
+#ifdef __USE_TIME_BITS64
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_clock_adjtime64, clock_id, tx);
+#else
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_clock_adjtime, clock_id, tx);
+#endif
+
+	if (ret < 0) {
+		errno = ret;
+		return -1;
+	}
+
+	return ret;
+}
+
+/**
+ * Sleep some amount of time.
+ *
+ * This service suspends the calling thread until the wakeup time specified by
+ * @a rqtp, or a signal is delivered to the caller. If the flag TIMER_ABSTIME is
+ * set in the @a flags argument, the wakeup time is specified as an absolute
+ * value of the clock @a clock_id. If the flag TIMER_ABSTIME is not set, the
+ * wakeup time is specified as a time interval.
+ *
+ * If this service is interrupted by a signal, the flag TIMER_ABSTIME is not
+ * set, and @a rmtp is not @a NULL, the time remaining until the specified
+ * wakeup time is returned at the address @a rmtp.
+ *
+ * The resolution of this service is one system clock tick.
+ *
+ * @param clock_id clock identifier, either CLOCK_REALTIME or CLOCK_MONOTONIC.
+ *
+ * @param flags one of:
+ * - 0 meaning that the wakeup time @a rqtp is a time interval;
+ * - TIMER_ABSTIME, meaning that the wakeup time is an absolute value of the
+ *   clock @a clock_id.
+ *
+ * @param rqtp address of the wakeup time.
+ *
+ * @param rmtp address where the remaining time before wakeup will be stored if
+ * the service is interrupted by a signal.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EPERM, the caller context is invalid;
+ * - ENOTSUP, the specified clock is unsupported;
+ * - EINVAL, the specified wakeup time is invalid;
+ * - EINTR, this service was interrupted by a signal.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/clock_nanosleep.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, clock_nanosleep, (clockid_t clock_id,
+				   int flags,
+				   const struct timespec *rqtp, struct timespec *rmtp))
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+#ifdef __USE_TIME_BITS64
+	ret = -XENOMAI_SYSCALL4(sc_cobalt_clock_nanosleep64,
+				clock_id, flags, rqtp, rmtp);
+#else
+	ret = -XENOMAI_SYSCALL4(sc_cobalt_clock_nanosleep,
+				clock_id, flags, rqtp, rmtp);
+#endif
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+/**
+ * Sleep some amount of time.
+ *
+ * This service suspends the calling thread until the wakeup time specified by
+ * @a rqtp, or a signal is delivered. The wakeup time is specified as a time
+ * interval.
+ *
+ * If this service is interrupted by a signal and @a rmtp is not @a NULL, the
+ * time remaining until the specified wakeup time is returned at the address @a
+ * rmtp.
+ *
+ * The resolution of this service is one system clock tick.
+ *
+ * @param rqtp address of the wakeup time.
+ *
+ * @param rmtp address where the remaining time before wakeup will be stored if
+ * the service is interrupted by a signal.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EPERM, the caller context is invalid;
+ * - EINVAL, the specified wakeup time is invalid;
+ * - EINTR, this service was interrupted by a signal.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/nanosleep.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, nanosleep, (const struct timespec *rqtp, struct timespec *rmtp))
+{
+	int ret;
+
+	ret = __WRAP(clock_nanosleep(CLOCK_REALTIME, 0, rqtp, rmtp));
+	if (ret) {
+		errno = ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+/* @apitags{thread-unrestricted, switch-primary} */
+
+COBALT_IMPL(unsigned int, sleep, (unsigned int seconds))
+{
+	struct timespec rqt, rem;
+	int ret;
+
+	if (cobalt_get_current_fast() == XN_NO_HANDLE)
+		return __STD(sleep(seconds));
+
+	rqt.tv_sec = seconds;
+	rqt.tv_nsec = 0;
+	ret = __WRAP(clock_nanosleep(CLOCK_MONOTONIC, 0, &rqt, &rem));
+	if (ret)
+		return rem.tv_sec;
+
+	return 0;
+}
+
+/* @apitags{thread-unrestricted, switch-primary} */
+
+COBALT_IMPL(int, usleep, (useconds_t usec))
+{
+	struct timespec rqt;
+
+	if (cobalt_get_current_fast() == XN_NO_HANDLE)
+		return __STD(usleep(usec));
+
+	rqt.tv_sec = usec / 1000000;
+	rqt.tv_nsec = (usec % 1000000) * 1000;
+	return __WRAP(clock_nanosleep(CLOCK_MONOTONIC, 0, &rqt, NULL));
+}
+
+/* @apitags{unrestricted} */
+
+COBALT_IMPL(int, gettimeofday, (struct timeval *tv, struct timezone *tz))
+{
+	struct timespec ts;
+	int ret = __WRAP(clock_gettime(CLOCK_REALTIME, &ts));
+	if (ret == 0) {
+		tv->tv_sec = ts.tv_sec;
+		tv->tv_usec = ts.tv_nsec / 1000;
+	}
+	return ret;
+}
+
+/* @apitags{unrestricted} */
+
+COBALT_IMPL(time_t, time, (time_t *t))
+{
+	struct timespec ts;
+	int ret = __WRAP(clock_gettime(CLOCK_REALTIME, &ts));
+	if (ret)
+		return (time_t)-1;
+
+	if (t)
+		*t = ts.tv_sec;
+	return ts.tv_sec;
+}
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/cobalt.wrappers b/kernel/xenomai-v3.2.4/lib/cobalt/cobalt.wrappers
new file mode 100644
index 0000000..0e95476
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/cobalt.wrappers
@@ -0,0 +1,120 @@
+--wrap pthread_attr_init
+--wrap pthread_create
+--wrap pthread_setschedparam
+--wrap pthread_getschedparam
+--wrap pthread_setschedprio
+--wrap pthread_yield
+--wrap sched_yield
+--wrap sched_get_priority_min
+--wrap sched_get_priority_max
+--wrap sched_setscheduler
+--wrap sched_getscheduler
+--wrap pthread_kill
+--wrap pthread_join
+--wrap pthread_setname_np
+--wrap sem_init
+--wrap sem_destroy
+--wrap sem_post
+--wrap sem_timedwait
+--wrap sem_wait
+--wrap sem_trywait
+--wrap sem_getvalue
+--wrap sem_open
+--wrap sem_close
+--wrap sem_unlink
+--wrap clock_getres
+--wrap clock_gettime
+--wrap clock_settime
+--wrap clock_adjtime
+--wrap clock_nanosleep
+--wrap nanosleep
+--wrap pthread_mutex_init
+--wrap pthread_mutex_destroy
+--wrap pthread_mutex_lock
+--wrap pthread_mutex_trylock
+--wrap pthread_mutex_timedlock
+--wrap pthread_mutex_unlock
+--wrap pthread_mutex_setprioceiling
+--wrap pthread_mutex_getprioceiling
+--wrap pthread_cond_init
+--wrap pthread_cond_destroy
+--wrap pthread_cond_wait
+--wrap pthread_cond_timedwait
+--wrap pthread_cond_signal
+--wrap pthread_cond_broadcast
+--wrap mq_open
+--wrap mq_close
+--wrap mq_unlink
+--wrap mq_getattr
+--wrap mq_setattr
+--wrap mq_send
+--wrap mq_timedsend
+--wrap mq_receive
+--wrap mq_timedreceive
+--wrap mq_notify
+--wrap open
+--wrap open64
+--wrap __open_2
+--wrap __open64_2
+--wrap socket
+--wrap close
+--wrap ioctl
+--wrap read
+--wrap write
+--wrap recvmsg
+--wrap recvmmsg
+--wrap sendmsg
+--wrap sendmmsg
+--wrap recvfrom
+--wrap sendto
+--wrap recv
+--wrap send
+--wrap getsockopt
+--wrap setsockopt
+--wrap bind
+--wrap connect
+--wrap listen
+--wrap accept
+--wrap getsockname
+--wrap getpeername
+--wrap shutdown
+--wrap timer_create
+--wrap timer_delete
+--wrap timer_settime
+--wrap timer_getoverrun
+--wrap timer_gettime
+--wrap timerfd_create
+--wrap timerfd_gettime
+--wrap timerfd_settime
+--wrap select
+--wrap vfprintf
+--wrap vprintf
+--wrap fprintf
+--wrap printf
+--wrap puts
+--wrap fputs
+--wrap fputc
+--wrap putchar
+--wrap fwrite
+--wrap fclose
+--wrap syslog
+--wrap vsyslog
+--wrap gettimeofday
+--wrap __vfprintf_chk
+--wrap __vprintf_chk
+--wrap __fprintf_chk
+--wrap __printf_chk
+--wrap __vsyslog_chk
+--wrap __syslog_chk
+--wrap sigwait
+--wrap sigwaitinfo
+--wrap sigtimedwait
+--wrap sigpending
+--wrap sigqueue
+--wrap kill
+--wrap sleep
+--wrap usleep
+--wrap mmap
+--wrap mmap64
+--wrap time
+--wrap fcntl
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/cond.c b/kernel/xenomai-v3.2.4/lib/cobalt/cond.c
new file mode 100644
index 0000000..1bf5c74
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/cond.c
@@ -0,0 +1,689 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <string.h>
+#include <pthread.h>
+#include <asm/xenomai/syscall.h>
+#include "current.h"
+#include "internal.h"
+
+/**
+ * @ingroup cobalt_api
+ * @defgroup cobalt_api_cond Condition variables
+ *
+ * Cobalt/POSIX condition variable services
+ *
+ * A condition variable is a synchronization object that allows threads to
+ * suspend execution until some predicate on shared data is satisfied. The basic
+ * operations on conditions are: signal the condition (when the predicate
+ * becomes true), and wait for the condition, suspending the thread execution
+ * until another thread signals the condition.
+ *
+ * A condition variable must always be associated with a mutex, to avoid the
+ * race condition where a thread prepares to wait on a condition variable and
+ * another thread signals the condition just before the first thread actually
+ * waits on it.
+ *
+ * Before it can be used, a condition variable has to be initialized with
+ * pthread_cond_init(). An attribute object, which reference may be passed to
+ * this service, allows to select the features of the created condition
+ * variable, namely the @a clock used by the pthread_cond_timedwait() service
+ * (@a CLOCK_REALTIME is used by default), and whether it may be shared between
+ * several processes (it may not be shared by default, see
+ * pthread_condattr_setpshared()).
+ *
+ * Note that pthread_cond_init() should be used to initialize a condition
+ * variable, using the static initializer @a PTHREAD_COND_INITIALIZER will
+ * delay the initialization to the first method called on the condition
+ * variable and will most likely introduce switches to secondary mode.
+ * The documentation (and specifically api-tags) of the
+ * condition variable services assumes the condition variable was explicitly
+ * initialised with pthread_cond_init().
+ *
+ *@{
+ */
+
+static pthread_condattr_t cobalt_default_condattr;
+
+static inline struct cobalt_cond_state *
+get_cond_state(struct cobalt_cond_shadow *shadow)
+{
+	if (xnsynch_is_shared(shadow->handle))
+		return cobalt_umm_shared + shadow->state_offset;
+
+	return cobalt_umm_private + shadow->state_offset;
+}
+
+static inline struct cobalt_mutex_state *
+get_mutex_state(struct cobalt_cond_shadow *shadow)
+{
+	struct cobalt_cond_state *cond_state = get_cond_state(shadow);
+
+	if (cond_state->mutex_state_offset == ~0U)
+		return NULL;
+
+	if (xnsynch_is_shared(shadow->handle))
+		return cobalt_umm_shared + cond_state->mutex_state_offset;
+
+	return cobalt_umm_private + cond_state->mutex_state_offset;
+}
+
+void cobalt_default_condattr_init(void)
+{
+	pthread_condattr_init(&cobalt_default_condattr);
+}
+
+/**
+ * @fn int pthread_cond_init(pthread_cond_t *cond, const pthread_condattr_t *attr)
+ * @brief Initialize a condition variable
+ *
+ * This service initializes the condition variable @a cond, using the
+ * condition variable attributes object @a attr. If @a attr is @a
+ * NULL, default attributes are used (see pthread_condattr_init()).
+ *
+ * @param cond the condition variable to be initialized;
+ *
+ * @param attr the condition variable attributes object.
+ *
+ * @return 0 on succes,
+ * @return an error number if:
+ * - EINVAL, the condition variable attributes object @a attr is invalid or
+ *   uninitialized;
+ * - EBUSY, the condition variable @a cond was already initialized;
+ * - ENOMEM, insufficient memory available from the system heap to initialize the
+ *   condition variable, increase CONFIG_XENO_OPT_SYS_HEAPSZ.
+ * - EAGAIN, no registry slot available, check/raise CONFIG_XENO_OPT_REGISTRY_NRSLOTS.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_init.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, pthread_cond_init, (pthread_cond_t *cond,
+				     const pthread_condattr_t * attr))
+{
+	struct cobalt_cond_shadow *_cnd = &((union cobalt_cond_union *)cond)->shadow_cond;
+	struct cobalt_cond_state *cond_state;
+	struct cobalt_condattr kcattr;
+	int err, tmp;
+
+	if (attr == NULL)
+		attr = &cobalt_default_condattr;
+
+	err = pthread_condattr_getpshared(attr, &tmp);
+	if (err)
+		return err;
+	kcattr.pshared = tmp;
+
+	err = pthread_condattr_getclock(attr, &tmp);
+	if (err)
+		return err;
+	kcattr.clock = tmp;
+
+	err = -XENOMAI_SYSCALL2( sc_cobalt_cond_init, _cnd, &kcattr);
+	if (err)
+		return err;
+
+	cond_state = get_cond_state(_cnd);
+	cobalt_commit_memory(cond_state);
+
+	return 0;
+}
+
+static int __attribute__((cold))
+	cobalt_cond_autoinit_type(const pthread_cond_t *cond)
+{
+	static const pthread_cond_t cond_initializer =
+		PTHREAD_COND_INITIALIZER;
+
+	return memcmp(cond, &cond_initializer, sizeof(cond_initializer)) == 0 ?
+		0 : -1;
+}
+
+static int __attribute__((cold))
+	cobalt_cond_doautoinit(union cobalt_cond_union *ucond)
+{
+	if (cobalt_cond_autoinit_type(&ucond->native_cond) < 0)
+		return EINVAL;
+
+	return __COBALT(pthread_cond_init(&ucond->native_cond, NULL));
+}
+
+static inline int cobalt_cond_autoinit(union cobalt_cond_union *ucond)
+{
+	if (ucond->shadow_cond.magic != COBALT_COND_MAGIC)
+		return cobalt_cond_doautoinit(ucond);
+	return 0;
+}
+
+/**
+ * @fn int pthread_cond_destroy(pthread_cond_t *cond)
+ * @brief Destroy a condition variable
+ *
+ * This service destroys the condition variable @a cond, if no thread is
+ * currently blocked on it. The condition variable becomes invalid for all
+ * condition variable services (they all return the EINVAL error) except
+ * pthread_cond_init().
+ *
+ * @param cond the condition variable to be destroyed.
+ *
+ * @return 0 on succes,
+ * @return an error number if:
+ * - EINVAL, the condition variable @a cond is invalid;
+ * - EPERM, the condition variable is not process-shared and does not belong to
+ *   the current process;
+ * - EBUSY, some thread is currently using the condition variable.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_destroy.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, pthread_cond_destroy, (pthread_cond_t *cond))
+{
+	struct cobalt_cond_shadow *_cond =
+		&((union cobalt_cond_union *)cond)->shadow_cond;
+
+	if (_cond->magic != COBALT_COND_MAGIC)
+		return (cobalt_cond_autoinit_type(cond) < 0) ? EINVAL : 0;
+
+	return -XENOMAI_SYSCALL1( sc_cobalt_cond_destroy, _cond);
+}
+
+struct cobalt_cond_cleanup_t {
+	struct cobalt_cond_shadow *cond;
+	struct cobalt_mutex_shadow *mutex;
+	unsigned count;
+	int err;
+};
+
+static void __pthread_cond_cleanup(void *data)
+{
+	struct cobalt_cond_cleanup_t *c = (struct cobalt_cond_cleanup_t *)data;
+	int err;
+
+	do {
+		err = XENOMAI_SYSCALL2(sc_cobalt_cond_wait_epilogue,
+				       c->cond, c->mutex);
+	} while (err == -EINTR);
+
+	c->mutex->lockcnt = c->count;
+}
+
+/**
+ * Wait on a condition variable.
+ *
+ * This service atomically unlocks the mutex @a mx, and block the calling thread
+ * until the condition variable @a cnd is signalled using pthread_cond_signal()
+ * or pthread_cond_broadcast(). When the condition is signaled, this service
+ * re-acquire the mutex before returning.
+ *
+ * Spurious wakeups occur if a signal is delivered to the blocked thread, so, an
+ * application should not assume that the condition changed upon successful
+ * return from this service.
+ *
+ * Even if the mutex @a mx is recursive and its recursion count is greater than
+ * one on entry, it is unlocked before blocking the caller, and the recursion
+ * count is restored once the mutex is re-acquired by this service before
+ * returning.
+ *
+ * Once a thread is blocked on a condition variable, a dynamic binding is formed
+ * between the condition vairable @a cnd and the mutex @a mx; if another thread
+ * calls this service specifying @a cnd as a condition variable but another
+ * mutex than @a mx, this service returns immediately with the EINVAL status.
+ *
+ * This service is a cancellation point for Cobalt threads (created
+ * with the pthread_create() service). When such a thread is cancelled
+ * while blocked in a call to this service, the mutex @a mx is
+ * re-acquired before the cancellation cleanup handlers are called.
+ *
+ * @param cond the condition variable to wait for;
+ *
+ * @param mutex the mutex associated with @a cnd.
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EPERM, the caller context is invalid;
+ * - EINVAL, the specified condition variable or mutex is invalid;
+ * - EPERM, the specified condition variable is not process-shared and does not
+ *   belong to the current process;
+ * - EINVAL, another thread is currently blocked on @a cnd using another mutex
+ *   than @a mx;
+ * - EPERM, the specified mutex is not owned by the caller.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_wait.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, pthread_cond_wait, (pthread_cond_t *cond, pthread_mutex_t *mutex))
+{
+	struct cobalt_cond_shadow *_cnd = &((union cobalt_cond_union *)cond)->shadow_cond;
+	struct cobalt_mutex_shadow *_mx =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	struct cobalt_cond_cleanup_t c = {
+		.cond = _cnd,
+		.mutex = _mx,
+		.err = 0,
+	};
+	int err, oldtype;
+	unsigned count;
+
+	if (_mx->magic != COBALT_MUTEX_MAGIC)
+		return EINVAL;
+
+	err = cobalt_cond_autoinit((union cobalt_cond_union *)cond);
+	if (err)
+		return err;
+
+	if (_mx->attr.type == PTHREAD_MUTEX_ERRORCHECK) {
+		xnhandle_t cur = cobalt_get_current();
+
+		if (cur == XN_NO_HANDLE)
+			return EPERM;
+
+		if (xnsynch_fast_owner_check(mutex_get_ownerp(_mx), cur))
+			return EPERM;
+	}
+
+	pthread_cleanup_push(&__pthread_cond_cleanup, &c);
+
+	count = _mx->lockcnt;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	err = XENOMAI_SYSCALL5(sc_cobalt_cond_wait_prologue,
+			       _cnd, _mx, &c.err, 0, NULL);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	pthread_cleanup_pop(0);
+
+	while (err == -EINTR)
+		err = XENOMAI_SYSCALL2(sc_cobalt_cond_wait_epilogue, _cnd, _mx);
+
+	_mx->lockcnt = count;
+
+	pthread_testcancel();
+
+	return -err ?: -c.err;
+}
+
+/**
+ * Wait a bounded time on a condition variable.
+ *
+ * This service is equivalent to pthread_cond_wait(), except that the calling
+ * thread remains blocked on the condition variable @a cnd only until the
+ * timeout specified by @a abstime expires.
+ *
+ * The timeout @a abstime is expressed as an absolute value of the @a clock
+ * attribute passed to pthread_cond_init(). By default, @a CLOCK_REALTIME is
+ * used.
+ *
+ * @param cond the condition variable to wait for;
+ *
+ * @param mutex the mutex associated with @a cnd;
+ *
+ * @param abstime the timeout, expressed as an absolute value of the clock
+ * attribute passed to pthread_cond_init().
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EPERM, the caller context is invalid;
+ * - EPERM, the specified condition variable is not process-shared and does not
+ *   belong to the current process;
+ * - EINVAL, the specified condition variable, mutex or timeout is invalid;
+ * - EINVAL, another thread is currently blocked on @a cnd using another mutex
+ *   than @a mx;
+ * - EPERM, the specified mutex is not owned by the caller;
+ * - ETIMEDOUT, the specified timeout expired.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_timedwait.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, pthread_cond_timedwait, (pthread_cond_t *cond,
+					  pthread_mutex_t *mutex,
+					  const struct timespec *abstime))
+{
+	struct cobalt_cond_shadow *_cnd = &((union cobalt_cond_union *)cond)->shadow_cond;
+	struct cobalt_mutex_shadow *_mx =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	struct cobalt_cond_cleanup_t c = {
+		.cond = _cnd,
+		.mutex = _mx,
+	};
+	int err, oldtype;
+	unsigned count;
+
+	if (_mx->magic != COBALT_MUTEX_MAGIC)
+		return EINVAL;
+
+	err = cobalt_cond_autoinit((union cobalt_cond_union *)cond);
+	if (err)
+		return err;
+
+	if (_mx->attr.type == PTHREAD_MUTEX_ERRORCHECK) {
+		xnhandle_t cur = cobalt_get_current();
+
+		if (cur == XN_NO_HANDLE)
+			return EPERM;
+
+		if (xnsynch_fast_owner_check(mutex_get_ownerp(_mx), cur))
+			return EPERM;
+	}
+
+	pthread_cleanup_push(&__pthread_cond_cleanup, &c);
+
+	count = _mx->lockcnt;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	err = XENOMAI_SYSCALL5(sc_cobalt_cond_wait_prologue,
+			       _cnd, _mx, &c.err, 1, abstime);
+	pthread_setcanceltype(oldtype, NULL);
+
+	pthread_cleanup_pop(0);
+
+	while (err == -EINTR)
+		err = XENOMAI_SYSCALL2(sc_cobalt_cond_wait_epilogue, _cnd, _mx);
+
+	_mx->lockcnt = count;
+
+	pthread_testcancel();
+
+	return -err ?: -c.err;
+}
+
+/**
+ * Signal a condition variable.
+ *
+ * This service unblocks one thread blocked on the condition variable @a cnd.
+ *
+ * If more than one thread is blocked on the specified condition variable, the
+ * highest priority thread is unblocked.
+ *
+ * @param cond the condition variable to be signalled.
+ *
+ * @return 0 on succes,
+ * @return an error number if:
+ * - EINVAL, the condition variable is invalid;
+ * - EPERM, the condition variable is not process-shared and does not belong to
+ *   the current process.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_signal.html.">
+ * Specification.</a>
+ *
+  * @apitags{xthread-only}
+*/
+COBALT_IMPL(int, pthread_cond_signal, (pthread_cond_t *cond))
+{
+	struct cobalt_cond_shadow *_cnd = &((union cobalt_cond_union *)cond)->shadow_cond;
+	struct cobalt_mutex_state *mutex_state;
+	struct cobalt_cond_state *cond_state;
+	__u32 pending_signals;
+	xnhandle_t cur;
+	__u32 flags;
+	int err;
+
+	err = cobalt_cond_autoinit((union cobalt_cond_union *)cond);
+	if (err)
+		return err;
+
+	mutex_state = get_mutex_state(_cnd);
+	if (mutex_state == NULL)
+		return 0;	/* Fast path, no waiter. */
+
+	flags = mutex_state->flags;
+	if (flags & COBALT_MUTEX_ERRORCHECK) {
+		cur = cobalt_get_current();
+		if (cur == XN_NO_HANDLE)
+			return EPERM;
+		if (xnsynch_fast_owner_check(&mutex_state->owner, cur) < 0)
+			return EPERM;
+	}
+
+	mutex_state->flags = flags | COBALT_MUTEX_COND_SIGNAL;
+	cond_state = get_cond_state(_cnd);
+	pending_signals = cond_state->pending_signals;
+	if (pending_signals != ~0U)
+		cond_state->pending_signals = pending_signals + 1;
+
+	return 0;
+}
+
+/**
+ * Broadcast a condition variable.
+ *
+ * This service unblocks all threads blocked on the condition variable @a cnd.
+ *
+ * @param cond the condition variable to be signalled.
+ *
+ * @return 0 on succes,
+ * @return an error number if:
+ * - EINVAL, the condition variable is invalid;
+ * - EPERM, the condition variable is not process-shared and does not belong to
+ *   the current process.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_cond_broadcast.html">
+ * Specification.</a>
+ *
+  * @apitags{xthread-only}
+ */
+COBALT_IMPL(int, pthread_cond_broadcast, (pthread_cond_t *cond))
+{
+	struct cobalt_cond_shadow *_cnd = &((union cobalt_cond_union *)cond)->shadow_cond;
+	struct cobalt_mutex_state *mutex_state;
+	struct cobalt_cond_state *cond_state;
+	xnhandle_t cur;
+	__u32 flags;
+	int err;
+
+	err = cobalt_cond_autoinit((union cobalt_cond_union *)cond);
+	if (err)
+		return err;
+
+	mutex_state = get_mutex_state(_cnd);
+	if (mutex_state == NULL)
+		return 0;
+
+	flags = mutex_state->flags;
+	if (flags & COBALT_MUTEX_ERRORCHECK) {
+		cur = cobalt_get_current();
+		if (cur == XN_NO_HANDLE)
+			return EPERM;
+		if (xnsynch_fast_owner_check(&mutex_state->owner, cur) < 0)
+			return EPERM;
+	}
+
+	mutex_state->flags = flags | COBALT_MUTEX_COND_SIGNAL;
+	cond_state = get_cond_state(_cnd);
+	cond_state->pending_signals = ~0U;
+
+	return 0;
+}
+
+/**
+ * Initialize a condition variable attributes object.
+ *
+ * This services initializes the condition variable attributes object @a attr
+ * with default values for all attributes. Default value are:
+ * - for the @a clock attribute, @a CLOCK_REALTIME;
+ * - for the @a pshared attribute @a PTHREAD_PROCESS_PRIVATE.
+ *
+ * If this service is called specifying a condition variable attributes object
+ * that was already initialized, the attributes object is reinitialized.
+ *
+ * @param attr the condition variable attributes object to be initialized.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ENOMEM, the condition variable attribute object pointer @a attr is @a
+ *   NULL.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_condattr_init.html">
+ * Specification.</a>
+ *
+  * @apitags{thread-unrestricted}
+ */
+int pthread_condattr_init(pthread_condattr_t * attr);
+
+/**
+ * Destroy a condition variable attributes object.
+ *
+ * This service destroys the condition variable attributes object @a attr. The
+ * object becomes invalid for all condition variable services (they all return
+ * EINVAL) except pthread_condattr_init().
+ *
+ * @param attr the initialized mutex attributes object to be destroyed.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EINVAL, the mutex attributes object @a attr is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_condattr_destroy.html">
+ * Specification.</a>
+ *
+  * @apitags{thread-unrestricted}
+ */
+int pthread_condattr_destroy(pthread_condattr_t * attr);
+
+/**
+ * Get the clock selection attribute from a condition variable attributes
+ * object.
+ *
+ * This service stores, at the address @a clk_id, the value of the @a clock
+ * attribute in the condition variable attributes object @a attr.
+ *
+ * See pthread_cond_timedwait() for a description of the effect of
+ * this attribute on a condition variable. The clock ID returned is @a
+ * CLOCK_REALTIME or @a CLOCK_MONOTONIC.
+ *
+ * @param attr an initialized condition variable attributes object,
+ *
+ * @param clk_id address where the @a clock attribute value will be stored on
+ * success.
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EINVAL, the attribute object @a attr is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_condattr_getclock.html">
+ * Specification.</a>
+ *
+  * @apitags{thread-unrestricted}
+ */
+int pthread_condattr_getclock(const pthread_condattr_t * attr,
+			clockid_t * clk_id);
+
+/**
+ * Set the clock selection attribute of a condition variable attributes object.
+ *
+ * This service set the @a clock attribute of the condition variable attributes
+ * object @a attr.
+ *
+ * See pthread_cond_timedwait() for a description of the effect of
+ * this attribute on a condition variable.
+ *
+ * @param attr an initialized condition variable attributes object,
+ *
+ * @param clk_id value of the @a clock attribute, may be @a CLOCK_REALTIME or @a
+ * CLOCK_MONOTONIC.
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EINVAL, the condition variable attributes object @a attr is invalid;
+ * - EINVAL, the value of @a clk_id is invalid for the @a clock attribute.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_condattr_setclock.html">
+ * Specification.</a>
+ *
+  * @apitags{thread-unrestricted}
+ */
+int pthread_condattr_setclock(pthread_condattr_t * attr, clockid_t clk_id);
+
+/**
+ * Get the process-shared attribute from a condition variable attributes
+ * object.
+ *
+ * This service stores, at the address @a pshared, the value of the @a pshared
+ * attribute in the condition variable attributes object @a attr.
+ *
+ * The @a pshared attribute may only be one of @a PTHREAD_PROCESS_PRIVATE or @a
+ * PTHREAD_PROCESS_SHARED. See pthread_condattr_setpshared() for the meaning of
+ * these two constants.
+ *
+ * @param attr an initialized condition variable attributes object.
+ *
+ * @param pshared address where the value of the @a pshared attribute will be
+ * stored on success.
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EINVAL, the @a pshared address is invalid;
+ * - EINVAL, the condition variable attributes object @a attr is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_condattr_getpshared.html">
+ * Specification.</a>
+ *
+  * @apitags{thread-unrestricted}
+ */
+int pthread_condattr_getpshared(const pthread_condattr_t *attr, int *pshared);
+
+/**
+ * Set the process-shared attribute of a condition variable attributes object.
+ *
+ * This service set the @a pshared attribute of the condition variable
+ * attributes object @a attr.
+ *
+ * @param attr an initialized condition variable attributes object.
+ *
+ * @param pshared value of the @a pshared attribute, may be one of:
+ * - PTHREAD_PROCESS_PRIVATE, meaning that a condition variable created with the
+ *   attributes object @a attr will only be accessible by threads within the
+ *   same process as the thread that initialized the condition variable;
+ * - PTHREAD_PROCESS_SHARED, meaning that a condition variable created with the
+ *   attributes object @a attr will be accessible by any thread that has access
+ *   to the memory where the condition variable is allocated.
+ *
+ * @return 0 on success,
+ * @return an error status if:
+ * - EINVAL, the condition variable attributes object @a attr is invalid;
+ * - EINVAL, the value of @a pshared is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_condattr_setpshared.html">
+ * Specification.</a>
+ *
+  * @apitags{thread-unrestricted}
+ */
+int pthread_condattr_setpshared(pthread_condattr_t *attr, int pshared);
+
+/** @}*/
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/current.c b/kernel/xenomai-v3.2.4/lib/cobalt/current.c
new file mode 100644
index 0000000..7d6ee9f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/current.c
@@ -0,0 +1,156 @@
+/*
+ * Copyright (C) 2009 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <pthread.h>
+#include <asm/xenomai/syscall.h>
+#include <boilerplate/list.h>
+#include "current.h"
+#include "internal.h"
+
+static DEFINE_PRIVATE_LIST(tsd_hooks);
+
+#ifdef HAVE_TLS
+
+__thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
+xnhandle_t cobalt_current = XN_NO_HANDLE;
+
+__thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
+struct xnthread_user_window *cobalt_current_window;
+
+static inline void __cobalt_set_tsd(xnhandle_t current, __u32 u_winoff)
+{
+	struct xnthread_user_window *window;
+
+	cobalt_current = current;
+	window = cobalt_umm_shared + u_winoff;
+	cobalt_current_window = window;
+	cobalt_commit_memory(cobalt_current_window);
+}
+
+static inline void __cobalt_clear_tsd(void)
+{
+	cobalt_current = XN_NO_HANDLE;
+	cobalt_current_window = NULL;
+}
+
+static void init_current_keys(void)
+{
+	cobalt_current = XN_NO_HANDLE;
+}
+
+#else /* !HAVE_TLS */
+
+pthread_key_t cobalt_current_window_key;
+pthread_key_t cobalt_current_key;
+
+static inline void __cobalt_set_tsd(xnhandle_t current,
+				    __u32 u_winoff)
+{
+	struct xnthread_user_window *window;
+
+	current = (current != XN_NO_HANDLE ? current : (xnhandle_t)0);
+	pthread_setspecific(cobalt_current_key, (void *)(uintptr_t)current);
+
+	window = cobalt_umm_shared + u_winoff;
+	pthread_setspecific(cobalt_current_window_key, window);
+	cobalt_commit_memory(window);
+}
+
+static inline void __cobalt_clear_tsd(void)
+{
+	pthread_setspecific(cobalt_current_key, NULL);
+	pthread_setspecific(cobalt_current_window_key, NULL);
+}
+
+static void init_current_keys(void)
+{
+	int ret;
+
+	ret = pthread_key_create(&cobalt_current_key, NULL);
+	if (ret)
+		goto fail;
+
+	ret = pthread_key_create(&cobalt_current_window_key, NULL);
+	if (ret == 0)
+		return;
+fail:
+	early_panic("error creating TSD key: %s", strerror(ret));
+}
+
+#endif /* !HAVE_TLS */
+
+void cobalt_clear_tsd(void)
+{
+	struct cobalt_tsd_hook *th;
+
+	if (cobalt_get_current() == XN_NO_HANDLE)
+		return;
+
+	__cobalt_clear_tsd();
+
+	if (!pvlist_empty(&tsd_hooks)) {
+		pvlist_for_each_entry(th, &tsd_hooks, next)
+			th->delete_tsd();
+	}
+}
+
+xnhandle_t cobalt_get_current_slow(void)
+{
+	xnhandle_t current;
+	int err;
+
+	err = XENOMAI_SYSCALL1(sc_cobalt_get_current, &current);
+
+	return err ? XN_NO_HANDLE : current;
+}
+
+void cobalt_set_tsd(__u32 u_winoff)
+{
+	struct cobalt_tsd_hook *th;
+	xnhandle_t current;
+	int ret;
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_get_current, &current);
+	if (ret)
+		panic("cannot retrieve current handle: %s", strerror(-ret));
+
+	__cobalt_set_tsd(current, u_winoff);
+
+	if (!pvlist_empty(&tsd_hooks)) {
+		pvlist_for_each_entry(th, &tsd_hooks, next)
+			th->create_tsd();
+	}
+}
+
+void cobalt_init_current_keys(void)
+{
+	static pthread_once_t cobalt_init_current_keys_once = PTHREAD_ONCE_INIT;
+	pthread_once(&cobalt_init_current_keys_once, init_current_keys);
+}
+
+void cobalt_register_tsd_hook(struct cobalt_tsd_hook *th)
+{
+	/*
+	 * CAUTION: we assume inherently mt-safe conditions. Unless
+	 * multiple dlopen() ends up loading extension libs
+	 * concurrently, we should be ok.
+	 */
+	pvlist_append(&th->next, &tsd_hooks);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/current.h b/kernel/xenomai-v3.2.4/lib/cobalt/current.h
new file mode 100644
index 0000000..069ee1c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/current.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2009 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_CURRENT_H
+#define _LIB_COBALT_CURRENT_H
+
+#include <stdint.h>
+#include <pthread.h>
+#include <cobalt/uapi/thread.h>
+#include <xeno_config.h>
+
+extern pthread_key_t cobalt_current_window_key;
+
+xnhandle_t cobalt_get_current_slow(void);
+
+#ifdef HAVE_TLS
+extern __thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
+xnhandle_t cobalt_current;
+extern __thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
+struct xnthread_user_window *cobalt_current_window;
+
+static inline xnhandle_t cobalt_get_current(void)
+{
+	return cobalt_current;
+}
+
+static inline xnhandle_t cobalt_get_current_fast(void)
+{
+	return cobalt_get_current();
+}
+
+static inline int cobalt_get_current_mode(void)
+{
+	return cobalt_current_window ? cobalt_current_window->state : XNRELAX;
+}
+
+static inline struct xnthread_user_window *cobalt_get_current_window(void)
+{
+	return cobalt_current ? cobalt_current_window : NULL;
+}
+
+#else /* ! HAVE_TLS */
+extern pthread_key_t cobalt_current_key;
+
+xnhandle_t cobalt_get_current_slow(void);
+
+static inline xnhandle_t cobalt_get_current(void)
+{
+	void *val = pthread_getspecific(cobalt_current_key);
+
+	return (xnhandle_t)(uintptr_t)val ?: cobalt_get_current_slow();
+}
+
+/* syscall-free, but unreliable in TSD destructor context */
+static inline xnhandle_t cobalt_get_current_fast(void)
+{
+	void *val = pthread_getspecific(cobalt_current_key);
+
+	return (xnhandle_t)(uintptr_t)val ?: XN_NO_HANDLE;
+}
+
+static inline int cobalt_get_current_mode(void)
+{
+	struct xnthread_user_window *window;
+
+	window = pthread_getspecific(cobalt_current_window_key);
+
+	return window ? window->state : XNRELAX;
+}
+
+static inline struct xnthread_user_window *cobalt_get_current_window(void)
+{
+	return pthread_getspecific(cobalt_current_window_key);
+}
+
+#endif /* ! HAVE_TLS */
+
+void cobalt_init_current_keys(void);
+
+void cobalt_set_tsd(__u32 u_winoff);
+
+void cobalt_clear_tsd(void);
+
+#endif /* _LIB_COBALT_CURRENT_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/init.c b/kernel/xenomai-v3.2.4/lib/cobalt/init.c
new file mode 100644
index 0000000..dcd63ca
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/init.c
@@ -0,0 +1,370 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <getopt.h>
+#include <limits.h>
+#include <unistd.h>
+#include <stdint.h>
+#include <semaphore.h>
+#include <boilerplate/setup.h>
+#include <cobalt/uapi/kernel/heap.h>
+#include <cobalt/ticks.h>
+#include <cobalt/tunables.h>
+#include <asm/xenomai/syscall.h>
+#include <xenomai/init.h>
+#include "umm.h"
+#include "internal.h"
+
+/**
+ * @ingroup cobalt
+ * @defgroup cobalt_api POSIX interface
+ * @anchor cobalt_api
+ *
+ * The Cobalt/POSIX interface is an implementation of a subset of the
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/">
+ * Single Unix specification</a> over the Cobalt core.
+ *
+ * The routines from this subset are implemented as wrapper functions
+ * as defined by the linker (--wrap option, see man ld(1)).  The
+ * linker flags for enabling symbol wrapping can be obtained from the
+ * following command: *xeno-config --posix --ldflags*.
+ * The full documentation for *xeno-config* can be found at
+ * https://xenomai.org/documentation/xenomai-3/html/man1/xeno-config/index.html.
+ *
+ * When symbol wrapping is enabled:
+ *
+ *   - calls to POSIX services for which Cobalt provides a (real-time)
+ * implementation are redirected to the library implementing the
+ * wrapper, by default libcobalt. A list of wrapped symbols libcobalt
+ * overrides can be found in the source tree, in
+ * lib/cobalt/cobalt.wrappers.
+ *
+ * With or without symbol wrapping:
+ *
+ *   - the wrapper function of a POSIX routine can be explicitly
+ * invoked by enclosing the function call with the __RT() macro. Since
+ * the wrapper symbol is weak, it may be overriden by a 3rd party
+ * library, typically to implement its own version of the POSIX call,
+ * instead or on top of libcobalt's. e.g. __RT(sem_init(&sem, 0, 0))
+ * would initialize a real-time semaphore, usually from libcobalt
+ * unless a stronger sem_init() wrapper has been provided by a 3rd
+ * party library.
+ *
+ *   - the libcobalt implementation of a POSIX routine can be
+ * explicitly invoked by enclosing the function call with the
+ * __COBALT() macro.  e.g. __COBALT(sem_init(&sem, 0, 0)) would always
+ * initialize a Cobalt semaphore (strong symbol).
+ *
+ *   - the regular *libc implementation of a POSIX routine can be
+ * explicitly invoked by enclosing the function call with the __STD()
+ * macro. This form basically prevents the symbol wrapping to take
+ * place. e.g.  __STD(sem_init(&sem, 0, 0)) would always initialize a
+ * regular *libc semaphore. This is strictly equivalent to calling the
+ * __real_* form of such routine as documented for ld(1).
+ *
+ * Qualifying POSIX calls explicitly as described above may prove
+ * useful for invoking real-time services selectively within a large
+ * POSIX code base, for which globally enabling symbol wrapping would
+ * be unpractical. This may also help in implementing real-time
+ * service libraries for which depending on the linker's symbol
+ * wrapping mechanism is not suitable.
+ *
+ * This said, conforming to the POSIX standard unspoiled by macro
+ * tricks for developing an application may be a significant upside as
+ * well. YMMV.
+ */
+
+__weak int __cobalt_control_bind = 0;
+
+int __cobalt_main_prio = -1;
+
+struct sigaction __cobalt_orig_sigdebug;
+
+static const struct option cobalt_options[] = {
+	{
+#define main_prio_opt		0
+		.name = "main-prio",
+		.has_arg = required_argument,
+	},
+	{
+#define print_bufsz_opt	1
+		.name = "print-buffer-size",
+		.has_arg = required_argument,
+	},
+	{
+#define print_bufcnt_opt	2
+		.name = "print-buffer-count",
+		.has_arg = required_argument,
+	},
+	{
+#define print_syncdelay_opt	3
+		.name = "print-sync-delay",
+		.has_arg = required_argument,
+	},
+	{ /* Sentinel */ }
+};
+
+static void sigill_handler(int sig)
+{
+	const char m[] = "no Xenomai/cobalt support in kernel?\n";
+	ssize_t rc __attribute__ ((unused));
+	rc = write(2, m, sizeof(m) - 1);
+	exit(EXIT_FAILURE);
+}
+
+static void low_init(void)
+{
+	sighandler_t old_sigill_handler;
+	struct cobalt_bindreq breq;
+	struct cobalt_featinfo *f;
+	int ret;
+
+	old_sigill_handler = signal(SIGILL, sigill_handler);
+	if (old_sigill_handler == SIG_ERR)
+		early_panic("signal(SIGILL): %s", strerror(errno));
+
+	f = &breq.feat_ret;
+	breq.feat_req = XENOMAI_FEAT_DEP;
+	if (__cobalt_control_bind)
+		breq.feat_req |= __xn_feat_control;
+	breq.abi_rev = XENOMAI_ABI_REV;
+	ret = XENOMAI_SYSBIND(&breq);
+
+	signal(SIGILL, old_sigill_handler);
+
+	switch (ret) {
+	case 0:
+		break;
+	case -EINVAL:
+		early_panic("missing feature: %s", f->feat_mis_s);
+	case -ENOEXEC:
+		early_panic("ABI mismatch: required r%lu, provided r%lu",
+			    XENOMAI_ABI_REV, f->feat_abirev);
+	case -EAGAIN:
+		early_panic("Cobalt core present but stopped "
+			    "(use corectl --start)");
+	case -ENOSYS:
+		early_panic("Cobalt core not enabled in kernel");
+	default:
+		early_panic("binding failed: %s", strerror(-ret));
+	}
+
+	trace_me("connected to Cobalt");
+
+	if (mlockall(MCL_CURRENT | MCL_FUTURE))
+		early_panic("mlockall: %s", strerror(errno));
+
+	trace_me("memory locked");
+	cobalt_ticks_init(f->clock_freq);
+	cobalt_features_init(f);
+	cobalt_init_umm(f->vdso_offset);
+	trace_me("memory heaps mapped");
+	cobalt_init_current_keys();
+}
+
+static int cobalt_init_2(void);
+
+static void cobalt_fork_handler(void)
+{
+	cobalt_unmap_umm();
+	cobalt_clear_tsd();
+	cobalt_print_init_atfork();
+	if (cobalt_init_2())
+		exit(EXIT_FAILURE);
+}
+
+static inline void commit_stack_memory(void)
+{
+	char stk[PTHREAD_STACK_MIN / 2];
+	cobalt_commit_memory(stk);
+}
+
+static void cobalt_init_1(void)
+{
+	struct sigaction sa;
+
+	sa.sa_sigaction = cobalt_sigdebug_handler;
+	sigemptyset(&sa.sa_mask);
+	sa.sa_flags = SA_SIGINFO;
+	sigaction(SIGDEBUG, &sa, &__cobalt_orig_sigdebug);
+
+	/*
+	 * NOTE: a placeholder for pthread_atfork() may return an
+	 * error status with uClibc, so we don't check the return
+	 * value on purpose.
+	 */
+	pthread_atfork(NULL, NULL, cobalt_fork_handler);
+
+	if (sizeof(struct cobalt_mutex_shadow) > sizeof(pthread_mutex_t))
+		early_panic("sizeof(pthread_mutex_t): %Zd <"
+			    " sizeof(cobalt_mutex_shadow): %Zd!",
+			    sizeof(pthread_mutex_t),
+			    sizeof(struct cobalt_mutex_shadow));
+
+	if (sizeof(struct cobalt_cond_shadow) > sizeof(pthread_cond_t))
+		early_panic("sizeof(pthread_cond_t): %Zd <"
+			    " sizeof(cobalt_cond_shadow): %Zd!",
+			    sizeof(pthread_cond_t),
+			    sizeof(struct cobalt_cond_shadow));
+
+	if (sizeof(struct cobalt_sem_shadow) > sizeof(sem_t))
+		early_panic("sizeof(sem_t): %Zd <"
+			    " sizeof(cobalt_sem_shadow): %Zd!",
+			    sizeof(sem_t),
+			    sizeof(struct cobalt_sem_shadow));
+}
+
+static int cobalt_init_2(void)
+{
+	pthread_t ptid = pthread_self();
+	struct sched_param parm;
+	int policy, ret;
+
+	commit_stack_memory();	/* We only need this for the main thread */
+	cobalt_default_condattr_init();
+
+	low_init();
+	cobalt_mutex_init();
+	cobalt_sched_init();
+	cobalt_thread_init();
+	cobalt_print_init();
+
+	if (__cobalt_control_bind)
+		return 0;
+
+	ret = __STD(pthread_getschedparam(ptid, &policy, &parm));
+	if (ret) {
+		early_warning("pthread_getschedparam failed");
+		return -ret;
+	}
+
+	/*
+	 * Turn the main thread into a Cobalt thread.
+	 * __cobalt_main_prio might have been overriden by some
+	 * compilation unit which has been linked in, to force the
+	 * scheduling parameters. Otherwise, the current policy and
+	 * priority are reused, for declaring the thread to the
+	 * Cobalt scheduler.
+	 *
+	 * SCHED_FIFO is assumed for __cobalt_main_prio > 0.
+	 */
+	if (__cobalt_main_prio > 0) {
+		policy = SCHED_FIFO;
+		parm.sched_priority = __cobalt_main_prio;
+	} else if (__cobalt_main_prio == 0) {
+		policy = SCHED_OTHER;
+		parm.sched_priority = 0;
+	}
+
+	ret = __RT(pthread_setschedparam(ptid, policy, &parm));
+	if (ret) {
+		early_warning("pthread_setschedparam failed { policy=%d, prio=%d }",
+			      policy, parm.sched_priority);
+		return -ret;
+	}
+
+	return 0;
+}
+
+int cobalt_init(void)
+{
+	cobalt_init_1();
+
+	return cobalt_init_2();
+}
+
+static int get_int_arg(const char *name, const char *arg,
+		       int *valp, int min)
+{
+	int value, ret;
+	char *p;
+
+	errno = 0;
+	value = (int)strtol(arg, &p, 10);
+	if (errno || *p || value < min) {
+		ret = -errno ?: -EINVAL;
+		early_warning("invalid value for %s: %s", name, arg);
+		return ret;
+	}
+
+	*valp = value;
+
+	return 0;
+}
+
+static int cobalt_parse_option(int optnum, const char *optarg)
+{
+	int value, ret;
+
+	switch (optnum) {
+	case main_prio_opt:
+		ret = get_int_arg("--main-prio", optarg, &value, INT32_MIN);
+		if (ret)
+			return ret;
+		__cobalt_main_prio = value;
+		break;
+	case print_bufsz_opt:
+		ret = get_int_arg("--print-buffer-size", optarg, &value, 0);
+		if (ret)
+			return ret;
+		__cobalt_print_bufsz = value;
+		break;
+	case print_bufcnt_opt:
+		ret = get_int_arg("--print-buffer-count", optarg, &value, 0);
+		if (ret)
+			return ret;
+		__cobalt_print_bufcount = value;
+		break;
+	case print_syncdelay_opt:
+		ret = get_int_arg("--print-sync-delay", optarg, &value, 0);
+		if (ret)
+			return ret;
+		__cobalt_print_syncdelay = value;
+		break;
+	default:
+		/* Paranoid, can't happen. */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void cobalt_help(void)
+{
+	fprintf(stderr, "--main-prio=<prio>		main thread priority\n");
+	fprintf(stderr, "--print-buffer-size=<bytes>	size of a print relay buffer (16k)\n");
+	fprintf(stderr, "--print-buffer-count=<num>	number of print relay buffers (4)\n");
+	fprintf(stderr, "--print-sync-delay=<ms>	max delay of output synchronization (100 ms)\n");
+}
+
+static struct setup_descriptor cobalt_interface = {
+	.name = "cobalt",
+	.init = cobalt_init,
+	.options = cobalt_options,
+	.parse_option = cobalt_parse_option,
+	.help = cobalt_help,
+};
+
+core_setup_call(cobalt_interface);
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/internal.c b/kernel/xenomai-v3.2.4/lib/cobalt/internal.c
new file mode 100644
index 0000000..bf1e940
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/internal.c
@@ -0,0 +1,600 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ * Copyright (C) 2008, 2009 Jan Kiszka <jan.kiszka@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * --
+ * Internal Cobalt services. No sanity check will be done with
+ * respect to object validity, callers have to take care of this.
+ */
+#include <sys/types.h>
+#include <stddef.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <signal.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <pthread.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/xenomai/tsc.h>
+#include <cobalt/ticks.h>
+#include <cobalt/sys/cobalt.h>
+#include "internal.h"
+
+int cobalt_extend(unsigned int magic)
+{
+	return XENOMAI_SYSCALL1(sc_cobalt_extend, magic);
+}
+
+int cobalt_corectl(int request, void *buf, size_t bufsz)
+{
+	return XENOMAI_SYSCALL3(sc_cobalt_corectl, request, buf, bufsz);
+}
+
+void cobalt_thread_harden(void)
+{
+	int status = cobalt_get_current_mode();
+
+	/* non-RT shadows are NOT allowed to force primary mode. */
+	if ((status & (XNRELAX|XNWEAK)) == XNRELAX)
+		XENOMAI_SYSCALL1(sc_cobalt_migrate, COBALT_PRIMARY);
+}
+
+void cobalt_thread_relax(void)
+{
+	if (!cobalt_is_relaxed())
+		XENOMAI_SYSCALL1(sc_cobalt_migrate, COBALT_SECONDARY);
+}
+
+int cobalt_thread_stat(pid_t pid, struct cobalt_threadstat *stat)
+{
+	return XENOMAI_SYSCALL2(sc_cobalt_thread_getstat, pid, stat);
+}
+
+pid_t cobalt_thread_pid(pthread_t thread)
+{
+	return XENOMAI_SYSCALL1(sc_cobalt_thread_getpid, thread);
+}
+
+int cobalt_thread_mode(void)
+{
+	return cobalt_get_current_mode();
+}
+
+int cobalt_thread_join(pthread_t thread)
+{
+	int ret, oldtype;
+
+	/*
+	 * Serialize with the regular task exit path, so that no call
+	 * for the joined pthread may succeed after this routine
+	 * returns. A successful call to sc_cobalt_thread_join
+	 * receives -EIDRM, meaning that we eventually joined the
+	 * exiting thread as seen by the Cobalt core.
+	 *
+	 * -ESRCH means that the joined thread has already exited
+	 * linux-wise, while we were about to wait for it from the
+	 * Cobalt side, in which case we are fine.
+	 *
+	 * -EBUSY denotes a multiple join for several threads in
+	 * parallel to the same target.
+	 *
+	 * -EPERM may be received because the caller is not a
+	 * Cobalt thread.
+	 *
+	 * -EINVAL is received in case the target is not a joinable
+	 * thread (i.e. detached).
+	 *
+	 * Zero is unexpected.
+	 *
+	 * CAUTION: this service joins a thread Cobat-wise only, not
+	 * glibc-wise.  For a complete join comprising the libc
+	 * cleanups, __STD(pthread_join()) should be paired with this
+	 * call.
+	 */
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	do
+		ret = XENOMAI_SYSCALL1(sc_cobalt_thread_join, thread);
+	while (ret == -EINTR);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+int cobalt_thread_probe(pid_t pid)
+{
+	return XENOMAI_SYSCALL2(sc_cobalt_kill, pid, 0);
+}
+
+void __cobalt_commit_memory(void *p, size_t len)
+{
+	volatile char *_p = (volatile char *)p, *end;
+	long pagesz = sysconf(_SC_PAGESIZE);
+
+	end = _p + len;
+	do {
+		*_p = *_p;
+		_p += pagesz;
+	} while (_p < end);
+}
+
+int cobalt_serial_debug(const char *fmt, ...)
+{
+	char msg[128];
+	va_list ap;
+	int n, ret;
+
+	/*
+	 * The serial debug output handler disables hw IRQs while
+	 * writing to the UART console port, so the message ought to
+	 * be reasonably short.
+	 */
+	va_start(ap, fmt);
+	n = vsnprintf(msg, sizeof(msg), fmt, ap);
+	ret = XENOMAI_SYSCALL2(sc_cobalt_serialdbg, msg, n);
+	va_end(ap);
+
+	return ret;
+}
+
+static inline
+struct cobalt_monitor_state *get_monitor_state(cobalt_monitor_t *mon)
+{
+	return mon->flags & COBALT_MONITOR_SHARED ?
+		cobalt_umm_shared + mon->state_offset :
+		cobalt_umm_private + mon->state_offset;
+}
+
+int cobalt_monitor_init(cobalt_monitor_t *mon, clockid_t clk_id, int flags)
+{
+	struct cobalt_monitor_state *state;
+	int ret;
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_monitor_init,
+			       mon, clk_id, flags);
+	if (ret)
+		return ret;
+
+	state = get_monitor_state(mon);
+	cobalt_commit_memory(state);
+
+	return 0;
+}
+
+int cobalt_monitor_destroy(cobalt_monitor_t *mon)
+{
+	return XENOMAI_SYSCALL1(sc_cobalt_monitor_destroy, mon);
+}
+
+int cobalt_monitor_enter(cobalt_monitor_t *mon)
+{
+	struct cobalt_monitor_state *state;
+	int status, ret, oldtype;
+	xnhandle_t cur;
+
+	/*
+	 * Assumptions on entry:
+	 *
+	 * - this is a Cobalt thread (caller checked this).
+	 * - no recursive entry/locking.
+	 */
+
+	status = cobalt_get_current_mode();
+	if (status & (XNRELAX|XNWEAK|XNDEBUG))
+		goto syscall;
+
+	state = get_monitor_state(mon);
+	cur = cobalt_get_current();
+	ret = xnsynch_fast_acquire(&state->owner, cur);
+	if (ret == 0) {
+		state->flags &= ~(COBALT_MONITOR_SIGNALED|COBALT_MONITOR_BROADCAST);
+		return 0;
+	}
+syscall:
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	/*
+	 * Jump to kernel to wait for entry. We redo in case of
+	 * interrupt.
+	 */
+	do
+		ret = XENOMAI_SYSCALL1(sc_cobalt_monitor_enter,	mon);
+	while (ret == -EINTR);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+int cobalt_monitor_exit(cobalt_monitor_t *mon)
+{
+	struct cobalt_monitor_state *state;
+	int status, ret;
+	xnhandle_t cur;
+
+	__sync_synchronize();
+
+	state = get_monitor_state(mon);
+	if ((state->flags & COBALT_MONITOR_PENDED) &&
+	    (state->flags & COBALT_MONITOR_SIGNALED))
+		goto syscall;
+
+	status = cobalt_get_current_mode();
+	if (status & (XNWEAK|XNDEBUG))
+		goto syscall;
+
+	cur = cobalt_get_current();
+	if (xnsynch_fast_release(&state->owner, cur))
+		return 0;
+syscall:
+	do
+		ret = XENOMAI_SYSCALL1(sc_cobalt_monitor_exit, mon);
+	while (ret == -EINTR);
+
+	return ret;
+}
+
+int cobalt_monitor_wait(cobalt_monitor_t *mon, int event,
+			const struct timespec *ts)
+{
+	int ret, opret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+#ifdef __USE_TIME_BITS64
+	ret = XENOMAI_SYSCALL4(sc_cobalt_monitor_wait64, mon, event, ts,
+			       &opret);
+#else
+	ret = XENOMAI_SYSCALL4(sc_cobalt_monitor_wait, mon, event, ts, &opret);
+#endif
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	/*
+	 * If we got interrupted while trying to re-enter the monitor,
+	 * we need to redo. In the meantime, any pending linux signal
+	 * has been processed.
+	 */
+	if (ret == -EINTR)
+		ret = cobalt_monitor_enter(mon);
+
+	return ret ?: opret;
+}
+
+void cobalt_monitor_grant(cobalt_monitor_t *mon,
+			  struct xnthread_user_window *u_window)
+{
+	struct cobalt_monitor_state *state = get_monitor_state(mon);
+
+	state->flags |= COBALT_MONITOR_GRANTED;
+	u_window->grant_value = 1;
+}
+
+int cobalt_monitor_grant_sync(cobalt_monitor_t *mon,
+			  struct xnthread_user_window *u_window)
+{
+	struct cobalt_monitor_state *state = get_monitor_state(mon);
+	int ret, oldtype;
+
+	cobalt_monitor_grant(mon, u_window);
+
+	if ((state->flags & COBALT_MONITOR_PENDED) == 0)
+		return 0;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_monitor_sync, mon);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret == -EINTR)
+		return cobalt_monitor_enter(mon);
+
+	return ret;
+}
+
+void cobalt_monitor_grant_all(cobalt_monitor_t *mon)
+{
+	struct cobalt_monitor_state *state = get_monitor_state(mon);
+
+	state->flags |= COBALT_MONITOR_GRANTED|COBALT_MONITOR_BROADCAST;
+}
+
+int cobalt_monitor_grant_all_sync(cobalt_monitor_t *mon)
+{
+	struct cobalt_monitor_state *state = get_monitor_state(mon);
+	int ret, oldtype;
+
+	cobalt_monitor_grant_all(mon);
+
+	if ((state->flags & COBALT_MONITOR_PENDED) == 0)
+		return 0;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_monitor_sync, mon);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret == -EINTR)
+		return cobalt_monitor_enter(mon);
+
+	return ret;
+}
+
+void cobalt_monitor_drain(cobalt_monitor_t *mon)
+{
+	struct cobalt_monitor_state *state = get_monitor_state(mon);
+
+	state->flags |= COBALT_MONITOR_DRAINED;
+}
+
+int cobalt_monitor_drain_sync(cobalt_monitor_t *mon)
+{
+	struct cobalt_monitor_state *state = get_monitor_state(mon);
+	int ret, oldtype;
+
+	cobalt_monitor_drain(mon);
+
+	if ((state->flags & COBALT_MONITOR_PENDED) == 0)
+		return 0;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_monitor_sync, mon);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret == -EINTR)
+		return cobalt_monitor_enter(mon);
+
+	return ret;
+}
+
+void cobalt_monitor_drain_all(cobalt_monitor_t *mon)
+{
+	struct cobalt_monitor_state *state = get_monitor_state(mon);
+
+	state->flags |= COBALT_MONITOR_DRAINED|COBALT_MONITOR_BROADCAST;
+}
+
+int cobalt_monitor_drain_all_sync(cobalt_monitor_t *mon)
+{
+	struct cobalt_monitor_state *state = get_monitor_state(mon);
+	int ret, oldtype;
+
+	cobalt_monitor_drain_all(mon);
+
+	if ((state->flags & COBALT_MONITOR_PENDED) == 0)
+		return 0;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_monitor_sync, mon);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret == -EINTR)
+		return cobalt_monitor_enter(mon);
+
+	return ret;
+}
+
+#define __raw_write_out(__msg)					\
+	do {							\
+		int __ret;					\
+		__ret = write(1, __msg , sizeof(__msg) - 1);	\
+		(void)__ret;					\
+	} while (0)
+
+#define raw_write_out(__msg)	__raw_write_out("Xenomai/cobalt: " __msg "\n")
+
+void cobalt_sigdebug_handler(int sig, siginfo_t *si, void *context)
+{
+	if (!sigdebug_marked(si))
+		goto forward;
+
+	switch (sigdebug_reason(si)) {
+	case SIGDEBUG_NOMLOCK:
+		raw_write_out("process memory not locked");
+		_exit(4);
+	case SIGDEBUG_RESCNT_IMBALANCE:
+		raw_write_out("resource locking imbalance");
+		break;
+	case SIGDEBUG_MUTEX_SLEEP:
+		raw_write_out("sleeping while holding mutex");
+		break;
+	case SIGDEBUG_WATCHDOG:
+		raw_write_out("watchdog triggered");
+		break;
+	}
+
+forward:
+	sigaction(SIGDEBUG, &__cobalt_orig_sigdebug, NULL);
+	pthread_kill(pthread_self(), SIGDEBUG);
+}
+
+static inline
+struct cobalt_event_state *get_event_state(cobalt_event_t *event)
+{
+	return event->flags & COBALT_EVENT_SHARED ?
+		cobalt_umm_shared + event->state_offset :
+		cobalt_umm_private + event->state_offset;
+}
+
+int cobalt_event_init(cobalt_event_t *event, unsigned int value,
+		      int flags)
+{
+	struct cobalt_event_state *state;
+	int ret;
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_event_init, event, value, flags);
+	if (ret)
+		return ret;
+
+	state = get_event_state(event);
+	cobalt_commit_memory(state);
+
+	return 0;
+}
+
+int cobalt_event_destroy(cobalt_event_t *event)
+{
+	return XENOMAI_SYSCALL1(sc_cobalt_event_destroy, event);
+}
+
+int cobalt_event_post(cobalt_event_t *event, unsigned int bits)
+{
+	struct cobalt_event_state *state = get_event_state(event);
+
+	if (bits == 0)
+		return 0;
+
+	__sync_or_and_fetch(&state->value, bits); /* full barrier. */
+
+	if ((state->flags & COBALT_EVENT_PENDED) == 0)
+		return 0;
+
+	return XENOMAI_SYSCALL1(sc_cobalt_event_sync, event);
+}
+
+int cobalt_event_wait(cobalt_event_t *event,
+		      unsigned int bits, unsigned int *bits_r,
+		      int mode, const struct timespec *timeout)
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+#ifdef __USE_TIME_BITS64
+	ret = XENOMAI_SYSCALL5(sc_cobalt_event_wait64,
+			       event, bits, bits_r, mode, timeout);
+#else
+	ret = XENOMAI_SYSCALL5(sc_cobalt_event_wait,
+			       event, bits, bits_r, mode, timeout);
+#endif
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+unsigned long cobalt_event_clear(cobalt_event_t *event,
+				 unsigned int bits)
+{
+	struct cobalt_event_state *state = get_event_state(event);
+
+	return __sync_fetch_and_and(&state->value, ~bits);
+}
+
+int cobalt_event_inquire(cobalt_event_t *event,
+			 struct cobalt_event_info *info,
+			 pid_t *waitlist, size_t waitsz)
+{
+	return XENOMAI_SYSCALL4(sc_cobalt_event_inquire, event,
+				info, waitlist, waitsz);
+}
+
+int cobalt_sem_inquire(sem_t *sem, struct cobalt_sem_info *info,
+		       pid_t *waitlist, size_t waitsz)
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	
+	return XENOMAI_SYSCALL4(sc_cobalt_sem_inquire, _sem,
+				info, waitlist, waitsz);
+}
+
+int cobalt_sched_weighted_prio(int policy,
+			       const struct sched_param_ex *param_ex)
+{
+	return XENOMAI_SYSCALL2(sc_cobalt_sched_weightprio, policy, param_ex);
+}
+
+int cobalt_xlate_schedparam(int policy,
+			    const struct sched_param_ex *param_ex,
+			    struct sched_param *param)
+{
+	int std_policy, priority;
+
+	/*
+	 * Translates Cobalt scheduling parameters to native ones,
+	 * based on a best approximation for Cobalt policies which are
+	 * not available from the host kernel.
+	 */
+	std_policy = policy;
+	priority = param_ex->sched_priority;
+
+	switch (policy) {
+	case SCHED_WEAK:
+		std_policy = priority ? SCHED_FIFO : SCHED_OTHER;
+		break;
+	default:
+		std_policy = SCHED_FIFO;
+		/* falldown wanted. */
+	case SCHED_OTHER:
+	case SCHED_FIFO:
+	case SCHED_RR:
+		/*
+		 * The Cobalt priority range is larger than those of
+		 * the native SCHED_FIFO/RR classes, so we have to cap
+		 * the priority value accordingly.  We also remap
+		 * "weak" (negative) priorities - which are only
+		 * meaningful for the Cobalt core - to regular values.
+		 */
+		if (priority > __cobalt_std_fifo_maxpri)
+			priority = __cobalt_std_fifo_maxpri;
+	}
+
+	if (priority < 0)
+		priority = -priority;
+
+	memset(param, 0, sizeof(*param));
+	param->sched_priority = priority;
+
+	return std_policy;
+}
+
+void cobalt_assert_nrt(void)
+{
+	if (cobalt_should_warn())
+		pthread_kill(pthread_self(), SIGDEBUG);
+}
+
+unsigned long long cobalt_read_tsc(void)
+{
+	struct timespec ts;
+
+	if (cobalt_use_legacy_tsc())
+		return cobalt_read_legacy_tsc();
+
+	__cobalt_vdso_gettime(CLOCK_MONOTONIC, &ts);
+
+	return ts.tv_sec * 1000000000ULL + ts.tv_nsec;
+}
+
+unsigned int cobalt_features;
+
+void cobalt_features_init(struct cobalt_featinfo *f)
+{
+	cobalt_features = f->feat_all;
+
+	/* Trigger arch specific feature initialization */
+	cobalt_arch_check_features(f);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/internal.h b/kernel/xenomai-v3.2.4/lib/cobalt/internal.h
new file mode 100644
index 0000000..acb3989
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/internal.h
@@ -0,0 +1,135 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_INTERNAL_H
+#define _LIB_COBALT_INTERNAL_H
+
+#include <limits.h>
+#include <stdbool.h>
+#include <time.h>
+#include <boilerplate/ancillaries.h>
+#include <cobalt/sys/cobalt.h>
+#include "current.h"
+
+extern void *cobalt_umm_private;
+
+extern void *cobalt_umm_shared;
+
+static inline int cobalt_is_relaxed(void)
+{
+	return cobalt_get_current_mode() & XNRELAX;
+}
+
+static inline int cobalt_should_warn(void)
+{
+	return (cobalt_get_current_mode() & (XNRELAX|XNWARN)) == XNWARN;
+}
+
+#ifdef CONFIG_XENO_LAZY_SETSCHED
+static inline int cobalt_eager_setsched(void)
+{
+	return cobalt_is_relaxed();
+}
+#else
+static inline int cobalt_eager_setsched(void)
+{
+	return 1;
+}
+#endif
+
+static inline
+struct cobalt_mutex_state *mutex_get_state(struct cobalt_mutex_shadow *shadow)
+{
+	if (shadow->attr.pshared)
+		return cobalt_umm_shared + shadow->state_offset;
+
+	return cobalt_umm_private + shadow->state_offset;
+}
+
+static inline atomic_t *mutex_get_ownerp(struct cobalt_mutex_shadow *shadow)
+{
+	return &mutex_get_state(shadow)->owner;
+}
+
+void cobalt_sigshadow_install_once(void);
+
+void cobalt_thread_init(void);
+
+int cobalt_thread_probe(pid_t pid);
+
+void cobalt_sched_init(void);
+
+void cobalt_print_init(void);
+
+void cobalt_print_init_atfork(void);
+
+void cobalt_ticks_init(unsigned long long freq);
+
+void cobalt_mutex_init(void);
+
+void cobalt_default_condattr_init(void);
+
+int cobalt_xlate_schedparam(int policy,
+			    const struct sched_param_ex *param_ex,
+			    struct sched_param *param);
+int cobalt_init(void);
+
+void *cobalt_lookup_vdso(const char *version, const char *name);
+
+extern struct sigaction __cobalt_orig_sigdebug;
+
+extern int __cobalt_std_fifo_minpri,
+	   __cobalt_std_fifo_maxpri;
+
+extern int __cobalt_std_rr_minpri,
+	   __cobalt_std_rr_maxpri;
+
+extern int (*__cobalt_vdso_gettime)(clockid_t clk_id,
+				    struct timespec *tp);
+
+extern unsigned int cobalt_features;
+
+struct cobalt_featinfo;
+
+/**
+ * Arch specific feature initialization
+ *
+ * @param finfo
+ */
+void cobalt_arch_check_features(struct cobalt_featinfo *finfo);
+
+/**
+ * Initialize the feature handling.
+ *
+ * @param f Feature info that will be cached for future feature checks
+ */
+void cobalt_features_init(struct cobalt_featinfo *f);
+
+/**
+ * Check if a given set of features is available / provided by the kernel
+ *
+ * @param feat_mask A bit mask of features to check availability for. See
+ * __xn_feat_* macros for a list of defined features
+ *
+ * @return true if all features are available, false otherwise
+ */
+static inline bool cobalt_features_available(unsigned int feat_mask)
+{
+	return (cobalt_features & feat_mask) == feat_mask;
+}
+
+#endif /* _LIB_COBALT_INTERNAL_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/malloc-nowrap.c b/kernel/xenomai-v3.2.4/lib/cobalt/malloc-nowrap.c
new file mode 100644
index 0000000..416fbad
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/malloc-nowrap.c
@@ -0,0 +1,30 @@
+/*
+ * Copyright (C) 2008, 2009 Jan Kiszka <jan.kiszka@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdlib.h>
+
+__weak
+void *__real_malloc(size_t size)
+{
+	return malloc(size);
+}
+
+__weak
+void __real_free(void *ptr)
+{
+	free(ptr);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/malloc.c b/kernel/xenomai-v3.2.4/lib/cobalt/malloc.c
new file mode 100644
index 0000000..82f89fd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/malloc.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2008, 2009 Jan Kiszka <jan.kiszka@siemens.com>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdlib.h>
+#include <cobalt/sys/cobalt.h>
+
+/* Memory allocation services */
+COBALT_IMPL(void *, malloc, (size_t size))
+{
+	cobalt_assert_nrt();
+	return __STD(malloc(size));
+}
+
+COBALT_IMPL(void, free, (void *ptr))
+{
+	cobalt_assert_nrt();
+	__STD(free(ptr));
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/modechk.wrappers b/kernel/xenomai-v3.2.4/lib/cobalt/modechk.wrappers
new file mode 100644
index 0000000..7164858
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/modechk.wrappers
@@ -0,0 +1,2 @@
+--wrap malloc
+--wrap free
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/mq.c b/kernel/xenomai-v3.2.4/lib/cobalt/mq.c
new file mode 100644
index 0000000..a8c88b4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/mq.c
@@ -0,0 +1,589 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <mqueue.h>
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+/**
+ * @ingroup cobalt_api
+ * @defgroup cobalt_api_mq Message queues
+ *
+ * Cobalt/POSIX message queue services
+ *
+ * A message queue allow exchanging data between real-time
+ * threads. For a POSIX message queue, maximum message length and
+ * maximum number of messages are fixed when it is created with
+ * mq_open().
+ *
+ *@{
+ */
+
+/**
+ * @brief Open a message queue
+ *
+ * This service opens the message queue named @a name.
+ *
+ * One of the following values should be set in @a oflags:
+ * - O_RDONLY, meaning that the returned queue descriptor may only be used for
+ *   receiving messages;
+ * - O_WRONLY, meaning that the returned queue descriptor may only be used for
+ *   sending messages;
+ * - O_RDWR, meaning that the returned queue descriptor may be used for both
+ *   sending and receiving messages.
+ *
+ * If no message queue named @a name exists, and @a oflags has the @a O_CREAT
+ * bit set, the message queue is created by this function, taking two more
+ * arguments:
+ * - a @a mode argument, of type @b mode_t, currently ignored;
+ * - an @a attr argument, pointer to an @b mq_attr structure, specifying the
+ *   attributes of the new message queue.
+ *
+ * If @a oflags has the two bits @a O_CREAT and @a O_EXCL set and the message
+ * queue alread exists, this service fails.
+ *
+ * If the O_NONBLOCK bit is set in @a oflags, the mq_send(), mq_receive(),
+ * mq_timedsend() and mq_timedreceive() services return @a -1 with @a errno set
+ * to EAGAIN instead of blocking their caller.
+ *
+ * The following arguments of the @b mq_attr structure at the address @a attr
+ * are used when creating a message queue:
+ * - @a mq_maxmsg is the maximum number of messages in the queue (128 by
+ *   default);
+ * - @a mq_msgsize is the maximum size of each message (128 by default).
+ *
+ * @a name may be any arbitrary string, in which slashes have no particular
+ * meaning. However, for portability, using a name which starts with a slash and
+ * contains no other slash is recommended.
+ *
+ * @param name name of the message queue to open;
+ *
+ * @param oflags flags.
+ *
+ * @return a message queue descriptor on success;
+ * @return -1 with @a errno set if:
+ * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters;
+ * - EEXIST, the bits @a O_CREAT and @a O_EXCL were set in @a oflags and the
+ *   message queue already exists;
+ * - ENOENT, the bit @a O_CREAT is not set in @a oflags and the message queue
+ *   does not exist;
+ * - ENOSPC, allocation of system memory failed, or insufficient memory available
+ *   from the system heap to create the queue, try increasing
+ *   CONFIG_XENO_OPT_SYS_HEAPSZ;
+ * - EPERM, attempting to create a message queue from an invalid context;
+ * - EINVAL, the @a attr argument is invalid;
+ * - EMFILE, too many descriptors are currently open.
+ * - EAGAIN, no registry slot available, check/raise CONFIG_XENO_OPT_REGISTRY_NRSLOTS.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_open.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ */
+COBALT_IMPL(mqd_t, mq_open, (const char *name, int oflags, ...))
+{
+	struct mq_attr *attr = NULL;
+	mode_t mode = 0;
+	va_list ap;
+	int fd;
+
+	if ((oflags & O_CREAT) != 0) {
+		va_start(ap, oflags);
+		mode = va_arg(ap, int);	/* unused */
+		attr = va_arg(ap, struct mq_attr *);
+		va_end(ap);
+	}
+
+	fd = XENOMAI_SYSCALL4(sc_cobalt_mq_open, name, oflags, mode, attr);
+	if (fd < 0) {
+		errno = -fd;
+		return (mqd_t)-1;
+	}
+
+	return (mqd_t)fd;
+}
+
+/**
+ * @brief Close a message queue
+ *
+ * This service closes the message queue descriptor @a mqd. The
+ * message queue is destroyed only when all open descriptors are
+ * closed, and when unlinked with a call to the mq_unlink() service.
+ *
+ * @param mqd message queue descriptor.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EBADF, @a mqd is an invalid message queue descriptor;
+ * - EPERM, the caller context is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_close.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ */
+COBALT_IMPL(int, mq_close, (mqd_t mqd))
+{
+	int err;
+
+	err = XENOMAI_SYSCALL1(sc_cobalt_mq_close, mqd);
+	if (err) {
+		errno = -err;
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * @brief Unlink a message queue
+ *
+ * This service unlinks the message queue named @a name. The message queue is
+ * not destroyed until all queue descriptors obtained with the mq_open() service
+ * are closed with the mq_close() service. However, after a call to this
+ * service, the unlinked queue may no longer be reached with the mq_open()
+ * service.
+ *
+ * @param name name of the message queue to be unlinked.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EPERM, the caller context is invalid;
+ * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters;
+ * - ENOENT, the message queue does not exist.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_unlink.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ */
+COBALT_IMPL(int, mq_unlink, (const char *name))
+{
+	int err;
+
+	err = XENOMAI_SYSCALL1(sc_cobalt_mq_unlink, name);
+	if (!err)
+		return 0;
+
+	errno = -err;
+	return -1;
+}
+
+/**
+ * @brief Get message queue attributes
+ *
+ * This service stores, at the address @a attr, the attributes of the messages
+ * queue descriptor @a mqd.
+ *
+ * The following attributes are set:
+ * - @a mq_flags, flags of the message queue descriptor @a mqd;
+ * - @a mq_maxmsg, maximum number of messages in the message queue;
+ * - @a mq_msgsize, maximum message size;
+ * - @a mq_curmsgs, number of messages currently in the queue.
+ *
+ * @param mqd message queue descriptor;
+ *
+ * @param attr address where the message queue attributes will be stored on
+ * success.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EBADF, @a mqd is not a valid descriptor.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_getattr.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, mq_getattr, (mqd_t mqd, struct mq_attr *attr))
+{
+	int err;
+
+	err = XENOMAI_SYSCALL2(sc_cobalt_mq_getattr, mqd, attr);
+	if (!err)
+		return 0;
+
+	errno = -err;
+	return -1;
+}
+
+/**
+ * @brief Set message queue attributes
+ *
+ * This service sets the flags of the @a mqd descriptor to the value
+ * of the member @a mq_flags of the @b mq_attr structure pointed to by
+ * @a attr.
+ *
+ * The previous value of the message queue attributes are stored at the address
+ * @a oattr if it is not @a NULL.
+ *
+ * Only setting or clearing the O_NONBLOCK flag has an effect.
+ *
+ * @param mqd message queue descriptor;
+ *
+ * @param attr pointer to new attributes (only @a mq_flags is used);
+ *
+ * @param oattr if not @a NULL, address where previous message queue attributes
+ * will be stored on success.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EBADF, @a mqd is not a valid message queue descriptor.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_setattr.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, mq_setattr, (mqd_t mqd,
+			      const struct mq_attr *__restrict__ attr,
+			      struct mq_attr *__restrict__ oattr))
+{
+	int err = 0, flags;
+
+	if (oattr) {
+		err = XENOMAI_SYSCALL2(sc_cobalt_mq_getattr, mqd, oattr);
+		if (err < 0)
+			goto out_err;
+		flags = oattr->mq_flags;
+	} else {
+		err = __WRAP(fcntl(mqd, F_GETFL));
+		if (err < 0)
+			goto out_err;
+		flags = err;
+	}
+
+	flags = (flags & ~O_NONBLOCK) | (attr->mq_flags & O_NONBLOCK);
+
+	err = __WRAP(fcntl(mqd, F_SETFL, flags));
+	if (!err)
+		return 0;
+
+  out_err:
+	errno = -err;
+	return -1;
+}
+
+/**
+ * Send a message to a message queue.
+ *
+ * If the message queue @a fd is not full, this service sends the message of
+ * length @a len pointed to by the argument @a buffer, with priority @a prio. A
+ * message with greater priority is inserted in the queue before a message with
+ * lower priority.
+ *
+ * If the message queue is full and the flag @a O_NONBLOCK is not set, the
+ * calling thread is suspended until the queue is not full. If the message queue
+ * is full and the flag @a O_NONBLOCK is set, the message is not sent and the
+ * service returns immediately a value of -1 with @a errno set to EAGAIN.
+ *
+ * @param q message queue descriptor;
+ *
+ * @param buffer pointer to the message to be sent;
+ *
+ * @param len length of the message;
+ *
+ * @param prio priority of the message.
+ *
+ * @return 0 and send a message on success;
+ * @return -1 with no message sent and @a errno set if:
+ * - EBADF, @a fd is not a valid message queue descriptor open for writing;
+ * - EMSGSIZE, the message length @a len exceeds the @a mq_msgsize attribute of
+ *   the message queue;
+ * - EAGAIN, the flag O_NONBLOCK is set for the descriptor @a fd and the message
+ *   queue is full;
+ * - EPERM, the caller context is invalid;
+ * - EINTR, the service was interrupted by a signal.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_send.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, mq_send, (mqd_t q, const char *buffer, size_t len, unsigned prio))
+{
+	int err, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+#ifdef __USE_TIME_BITS64
+	err = XENOMAI_SYSCALL5(sc_cobalt_mq_timedsend64,
+			       q, buffer, len, prio, NULL);
+#else
+	err = XENOMAI_SYSCALL5(sc_cobalt_mq_timedsend,
+			       q, buffer, len, prio, NULL);
+#endif
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (!err)
+		return 0;
+
+	errno = -err;
+	return -1;
+}
+
+/**
+ * Attempt, during a bounded time, to send a message to a message queue.
+ *
+ * This service is equivalent to mq_send(), except that if the message queue is
+ * full and the flag @a O_NONBLOCK is not set for the descriptor @a fd, the
+ * calling thread is only suspended until the timeout specified by @a
+ * abs_timeout expires.
+ *
+ * @param q message queue descriptor;
+ *
+ * @param buffer pointer to the message to be sent;
+ *
+ * @param len length of the message;
+ *
+ * @param prio priority of the message;
+ *
+ * @param timeout the timeout, expressed as an absolute value of the
+ * CLOCK_REALTIME clock.
+ *
+ * @return 0 and send a message on success;
+ * @return -1 with no message sent and @a errno set if:
+ * - EBADF, @a fd is not a valid message queue descriptor open for writing;
+ * - EMSGSIZE, the message length exceeds the @a mq_msgsize attribute of the
+ *   message queue;
+ * - EAGAIN, the flag O_NONBLOCK is set for the descriptor @a fd and the message
+ *   queue is full;
+ * - EPERM, the caller context is invalid;
+ * - ETIMEDOUT, the specified timeout expired;
+ * - EINTR, the service was interrupted by a signal.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_timedsend.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, mq_timedsend, (mqd_t q,
+				const char *buffer,
+				size_t len,
+				unsigned prio, const struct timespec *timeout))
+{
+	int err, oldtype;
+
+	if (timeout == NULL)
+		return -EFAULT;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	err = XENOMAI_SYSCALL5(sc_cobalt_mq_timedsend,
+			       q, buffer, len, prio, timeout);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (!err)
+		return 0;
+
+	errno = -err;
+	return -1;
+}
+
+/**
+ * Receive a message from a message queue.
+ *
+ * If the message queue @a fd is not empty and if @a len is greater than the @a
+ * mq_msgsize of the message queue, this service copies, at the address
+ * @a buffer, the queued message with the highest priority.
+ *
+ * If the queue is empty and the flag @a O_NONBLOCK is not set for the
+ * descriptor @a fd, the calling thread is suspended until some message is sent
+ * to the queue. If the queue is empty and the flag @a O_NONBLOCK is set for the
+ * descriptor @a fd, this service returns immediately a value of -1 with @a
+ * errno set to EAGAIN.
+ *
+ * @param q the queue descriptor;
+ *
+ * @param buffer the address where the received message will be stored on
+ * success;
+ *
+ * @param len @a buffer length;
+ *
+ * @param prio address where the priority of the received message will be
+ * stored on success.
+ *
+ * @return the message length, and copy a message at the address @a buffer on
+ * success;
+ * @return -1 with no message unqueued and @a errno set if:
+ * - EBADF, @a fd is not a valid descriptor open for reading;
+ * - EMSGSIZE, the length @a len is lesser than the message queue @a mq_msgsize
+ *   attribute;
+ * - EAGAIN, the queue is empty, and the flag @a O_NONBLOCK is set for the
+ *   descriptor @a fd;
+ * - EPERM, the caller context is invalid;
+ * - EINTR, the service was interrupted by a signal.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_receive.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(ssize_t, mq_receive, (mqd_t q, char *buffer, size_t len, unsigned *prio))
+{
+	ssize_t rlen = (ssize_t) len;
+	int err, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	err = XENOMAI_SYSCALL5(sc_cobalt_mq_timedreceive,
+			       q, buffer, &rlen, prio, NULL);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (!err)
+		return rlen;
+
+	errno = -err;
+	return -1;
+}
+
+/**
+ * Attempt, during a bounded time, to receive a message from a message queue.
+ *
+ * This service is equivalent to mq_receive(), except that if the flag @a
+ * O_NONBLOCK is not set for the descriptor @a fd and the message queue is
+ * empty, the calling thread is only suspended until the timeout @a abs_timeout
+ * expires.
+ *
+ * @param q the queue descriptor;
+ *
+ * @param buffer the address where the received message will be stored on
+ * success;
+ *
+ * @param len @a buffer length;
+ *
+ * @param prio address where the priority of the received message will be
+ * stored on success.
+ *
+ * @param timeout the timeout, expressed as an absolute value of the
+ * CLOCK_REALTIME clock.
+ *
+ * @return the message length, and copy a message at the address @a buffer on
+ * success;
+ * @return -1 with no message unqueued and @a errno set if:
+ * - EBADF, @a fd is not a valid descriptor open for reading;
+ * - EMSGSIZE, the length @a len is lesser than the message queue @a mq_msgsize
+ *   attribute;
+ * - EAGAIN, the queue is empty, and the flag @a O_NONBLOCK is set for the
+ *   descriptor @a fd;
+ * - EPERM, the caller context is invalid;
+ * - EINTR, the service was interrupted by a signal;
+ * - ETIMEDOUT, the specified timeout expired.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_timedreceive.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(ssize_t, mq_timedreceive, (mqd_t q,
+				       char *__restrict__ buffer,
+				       size_t len,
+				       unsigned *__restrict__ prio,
+				       const struct timespec * __restrict__ timeout))
+{
+	ssize_t rlen = (ssize_t) len;
+	int err, oldtype;
+
+	if (timeout == NULL)
+		return -EFAULT;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+#ifdef __USE_TIME_BITS64
+	err = XENOMAI_SYSCALL5(sc_cobalt_mq_timedreceive64,
+			       q, buffer, &rlen, prio, timeout);
+#else
+	err = XENOMAI_SYSCALL5(sc_cobalt_mq_timedreceive,
+			       q, buffer, &rlen, prio, timeout);
+#endif
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (!err)
+		return rlen;
+
+	errno = -err;
+	return -1;
+}
+
+/**
+ * @brief Enable notification on message arrival
+ *
+ * If @a evp is not @a NULL and is the address of a @b sigevent
+ * structure with the @a sigev_notify member set to SIGEV_SIGNAL, the
+ * current thread will be notified by a signal when a message is sent
+ * to the message queue @a mqd, the queue is empty, and no thread is
+ * blocked in call to mq_receive() or mq_timedreceive(). After the
+ * notification, the thread is unregistered.
+ *
+ * If @a evp is @a NULL or the @a sigev_notify member is SIGEV_NONE,
+ * the current thread is unregistered.
+ *
+ * Only one thread may be registered at a time.
+ *
+ * If the current thread is not a Cobalt thread (created with
+ * pthread_create()), this service fails.
+ *
+ * @param mqd message queue descriptor;
+ *
+ * @param evp pointer to an event notification structure.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a evp is invalid;
+ * - EPERM, the caller context is invalid;
+ * - EBADF, @a mqd is not a valid message queue descriptor;
+ * - EBUSY, another thread is already registered.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/mq_notify.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, mq_notify, (mqd_t mqd, const struct sigevent *evp))
+{
+	int err;
+
+	err = XENOMAI_SYSCALL2(sc_cobalt_mq_notify, mqd, evp);
+	if (err) {
+		errno = -err;
+		return -1;
+	}
+
+	return 0;
+}
+
+/** @}*/
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/mutex.c b/kernel/xenomai-v3.2.4/lib/cobalt/mutex.c
new file mode 100644
index 0000000..73e45a1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/mutex.c
@@ -0,0 +1,1006 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <string.h>
+#include <limits.h>
+#include <pthread.h>
+#include <asm/xenomai/syscall.h>
+#include "current.h"
+#include "internal.h"
+
+/**
+ * @ingroup cobalt_api
+ * @defgroup cobalt_api_mutex Mutual exclusion
+ *
+ * Cobalt/POSIX mutual exclusion services
+ *
+ * A mutex is a MUTual EXclusion device, and is useful for protecting
+ * shared data structures from concurrent modifications, and implementing
+ * critical sections and monitors.
+ *
+ * A mutex has two possible states: unlocked (not owned by any thread), and
+ * locked (owned by one thread). A mutex can never be owned by two different
+ * threads simultaneously. A thread attempting to lock a mutex that is already
+ * locked by another thread is suspended until the owning thread unlocks the
+ * mutex first.
+ *
+ * Before it can be used, a mutex has to be initialized with
+ * pthread_mutex_init(). An attribute object, which reference may be passed to
+ * this service, allows to select the features of the created mutex, namely its
+ * @a type (see pthread_mutexattr_settype()), the priority @a protocol it
+ * uses (see pthread_mutexattr_setprotocol()) and whether it may be shared
+ * between several processes (see pthread_mutexattr_setpshared()).
+ *
+ * By default, Cobalt mutexes are of the normal type, use no
+ * priority protocol and may not be shared between several processes.
+ *
+ * Note that pthread_mutex_init() should be used to initialize a mutex, using
+ * the static initializer @a PTHREAD_MUTEX_INITIALIZER will delay the
+ * initialization to the first method called on the mutex and will
+ * most likely introduce switches to secondary mode.
+ * The documentation (and specifically api-tags) of the mutex services assumes
+ * a mutex was explicitly initialised with pthread_mutex_init().
+ *
+ *@{
+ */
+
+static pthread_mutexattr_t cobalt_default_mutexattr;
+static union cobalt_mutex_union cobalt_autoinit_mutex_union;
+static pthread_mutex_t *const cobalt_autoinit_mutex =
+	&cobalt_autoinit_mutex_union.native_mutex;
+
+void cobalt_mutex_init(void)
+{
+	struct cobalt_mutex_shadow *_mutex =
+		&cobalt_autoinit_mutex_union.shadow_mutex;
+	pthread_mutexattr_t rt_init_mattr;
+	int err __attribute__((unused));
+
+	pthread_mutexattr_init(&cobalt_default_mutexattr);
+
+	pthread_mutexattr_init(&rt_init_mattr);
+	pthread_mutexattr_setprotocol(&rt_init_mattr, PTHREAD_PRIO_INHERIT);
+	_mutex->magic = ~COBALT_MUTEX_MAGIC;
+	err = __COBALT(pthread_mutex_init(cobalt_autoinit_mutex,
+						&rt_init_mattr));
+	assert(err == 0);
+	pthread_mutexattr_destroy(&rt_init_mattr);
+}
+
+/**
+ * Initialize a mutex.
+ *
+ * This services initializes the mutex @a mx, using the mutex attributes object
+ * @a attr. If @a attr is @a NULL, default attributes are used (see
+ * pthread_mutexattr_init()).
+ *
+ * @param mutex the mutex to be initialized;
+ *
+ * @param attr the mutex attributes object.
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EINVAL, the mutex attributes object @a attr is invalid or uninitialized;
+ * - EBUSY, the mutex @a mx was already initialized;
+ * - ENOMEM, insufficient memory available from the system heap to initialize the
+ *   mutex, increase CONFIG_XENO_OPT_SYS_HEAPSZ.
+ * - EAGAIN, insufficient memory available to initialize the
+ *   mutex, increase CONFIG_XENO_OPT_SHARED_HEAPSZ for a process-shared
+ *   mutex, or CONFIG_XENO_OPT_PRIVATE_HEAPSZ for a process-private mutex.
+ * - EAGAIN, no registry slot available, check/raise CONFIG_XENO_OPT_REGISTRY_NRSLOTS.
+ * - ENOSYS, @a attr mentions priority protection
+ *  (PTHREAD_PRIO_PROTECT), but the C library does not provide
+ *  pthread_mutexattr_get/setprioceiling().
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_init.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, pthread_mutex_init, (pthread_mutex_t *mutex,
+				      const pthread_mutexattr_t *attr))
+{
+	struct cobalt_mutex_shadow *_mutex =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	struct cobalt_mutex_state *state;
+	struct cobalt_mutexattr kmattr;
+	int err, tmp;
+
+	if (_mutex->magic == COBALT_MUTEX_MAGIC) {
+		err = -XENOMAI_SYSCALL1(sc_cobalt_mutex_check_init, _mutex);
+		if (err)
+			return err;
+	}
+
+	if (attr == NULL)
+		attr = &cobalt_default_mutexattr;
+
+	err = pthread_mutexattr_getpshared(attr, &tmp);
+	if (err)
+		return err;
+	kmattr.pshared = tmp;
+
+	err = pthread_mutexattr_gettype(attr, &tmp);
+	if (err)
+		return err;
+	kmattr.type = tmp;
+
+	err = pthread_mutexattr_getprotocol(attr, &tmp);
+	if (err)
+		return err;
+	kmattr.protocol = tmp;
+
+	if (kmattr.protocol == PTHREAD_PRIO_PROTECT) {
+		err = pthread_mutexattr_getprioceiling(attr, &tmp);
+		if (err)
+			return err;
+		if (tmp == 0 ||	/* Could not cope with null minpri. */
+		    tmp < __cobalt_std_fifo_minpri ||
+		    tmp > __cobalt_std_fifo_maxpri)
+			return EINVAL;
+		kmattr.ceiling = tmp - 1;
+	}
+
+	err = -XENOMAI_SYSCALL2(sc_cobalt_mutex_init, _mutex, &kmattr);
+	if (err)
+		return err;
+
+	state = mutex_get_state(_mutex);
+	cobalt_commit_memory(state);
+
+	return err;
+}
+
+/**
+ * Test if a mutex structure contains a valid autoinitializer.
+ *
+ * @return the mutex type on success,
+ * @return -1 if not in supported autoinitializer state
+ */
+static int __attribute__((cold))
+	cobalt_mutex_autoinit_type(const pthread_mutex_t *mutex)
+{
+	static const pthread_mutex_t mutex_initializers[] = {
+#if HAVE_DECL_PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+		PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP,
+#endif
+#if HAVE_DECL_PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+		PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP,
+#endif
+		PTHREAD_MUTEX_INITIALIZER
+	};
+	static const int mutex_types[] = {
+#if HAVE_DECL_PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP
+		PTHREAD_MUTEX_ERRORCHECK_NP,
+#endif
+#if HAVE_DECL_PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP
+		PTHREAD_MUTEX_RECURSIVE_NP,
+#endif
+		PTHREAD_MUTEX_DEFAULT
+	};
+	int i;
+
+	for (i = sizeof(mutex_types) / sizeof(mutex_types[0]); i > 0; --i) {
+		if (memcmp(mutex, &mutex_initializers[i - 1],
+				sizeof(mutex_initializers[0])) == 0)
+			return mutex_types[i - 1];
+	}
+	return -1;
+}
+
+static int __attribute__((cold))
+	cobalt_mutex_doautoinit(union cobalt_mutex_union *umutex)
+{
+	struct cobalt_mutex_shadow *_mutex = &umutex->shadow_mutex;
+	int err __attribute__((unused));
+	pthread_mutexattr_t mattr;
+	int ret = 0, type;
+
+	type = cobalt_mutex_autoinit_type(&umutex->native_mutex);
+	if (type < 0)
+		return EINVAL;
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, type);
+	err = __COBALT(pthread_mutex_lock(cobalt_autoinit_mutex));
+	if (err) {
+		ret = err;
+		goto out;
+	}
+	if (_mutex->magic != COBALT_MUTEX_MAGIC)
+		ret = __COBALT(pthread_mutex_init(&umutex->native_mutex,
+			&mattr));
+	err = __COBALT(pthread_mutex_unlock(cobalt_autoinit_mutex));
+	if (err) {
+		if (ret == 0)
+			ret = err;
+	}
+
+  out:
+	pthread_mutexattr_destroy(&mattr);
+
+	return ret;
+}
+
+static inline int cobalt_mutex_autoinit(union cobalt_mutex_union *umutex)
+{
+	if (umutex->shadow_mutex.magic != COBALT_MUTEX_MAGIC)
+		return cobalt_mutex_doautoinit(umutex);
+	return 0;
+}
+
+/**
+ * Destroy a mutex.
+ *
+ * This service destroys the mutex @a mx, if it is unlocked and not referenced
+ * by any condition variable. The mutex becomes invalid for all mutex services
+ * (they all return the EINVAL error) except pthread_mutex_init().
+ *
+ * @param mutex the mutex to be destroyed.
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EINVAL, the mutex @a mx is invalid;
+ * - EPERM, the mutex is not process-shared and does not belong to the current
+ *   process;
+ * - EBUSY, the mutex is locked, or used by a condition variable.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_destroy.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, pthread_mutex_destroy, (pthread_mutex_t *mutex))
+{
+	struct cobalt_mutex_shadow *_mutex =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	int err;
+
+	if (_mutex->magic != COBALT_MUTEX_MAGIC)
+		return (cobalt_mutex_autoinit_type(mutex) < 0) ? EINVAL : 0;
+
+	err = XENOMAI_SYSCALL1(sc_cobalt_mutex_destroy, _mutex);
+
+	return -err;
+}
+
+/**
+ * Lock a mutex.
+ *
+ * This service attempts to lock the mutex @a mx. If the mutex is free, it
+ * becomes locked. If it was locked by another thread than the current one, the
+ * current thread is suspended until the mutex is unlocked. If it was already
+ * locked by the current mutex, the behaviour of this service depends on the
+ * mutex type :
+ * - for mutexes of the @a PTHREAD_MUTEX_NORMAL type, this service deadlocks;
+ * - for mutexes of the @a PTHREAD_MUTEX_ERRORCHECK type, this service returns
+ *   the EDEADLK error number;
+ * - for mutexes of the @a PTHREAD_MUTEX_RECURSIVE type, this service increments
+ *   the lock recursion count and returns 0.
+ *
+ * @param mutex the mutex to be locked.
+ *
+ * @return 0 on success
+ * @return an error number if:
+ * - EPERM, the caller is not allowed to perform the operation;
+ * - EINVAL, the mutex @a mx is invalid;
+ * - EPERM, the mutex is not process-shared and does not belong to the current
+ *   process;
+ * - EDEADLK, the mutex is of the @a PTHREAD_MUTEX_ERRORCHECK type and was
+ *   already locked by the current thread;
+ * - EAGAIN, the mutex is of the @a PTHREAD_MUTEX_RECURSIVE type and the maximum
+ *   number of recursive locks has been exceeded.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_lock.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, pthread_mutex_lock, (pthread_mutex_t *mutex))
+{
+	struct cobalt_mutex_shadow *_mutex =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	struct xnthread_user_window *u_window;
+	int status, ret, lazy_protect = 0;
+	xnhandle_t cur;
+
+	cur = cobalt_get_current();
+	if (cur == XN_NO_HANDLE)
+		return EPERM;
+
+	ret = cobalt_mutex_autoinit((union cobalt_mutex_union *)mutex);
+	if (ret)
+		return ret;
+
+	/*
+	 * We track resource ownership for auto-relax of non real-time
+	 * shadows and some debug features, so we must always obtain
+	 * them via a syscall.
+	 */
+	status = cobalt_get_current_mode();
+	if ((status & (XNRELAX|XNWEAK|XNDEBUG)) == 0) {
+		if (_mutex->attr.protocol == PTHREAD_PRIO_PROTECT)
+			goto protect;
+fast_path:
+		ret = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
+		if (ret == 0) {
+			_mutex->lockcnt = 1;
+			return 0;
+		}
+	} else {
+slow_path:
+		ret = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
+		if (ret == 0)
+			ret = -EBUSY;
+	}
+
+	if (ret == -EBUSY) {
+		if (lazy_protect)
+			u_window->pp_pending = XN_NO_HANDLE;
+
+		switch(_mutex->attr.type) {
+		case PTHREAD_MUTEX_NORMAL:
+			break;
+
+		case PTHREAD_MUTEX_ERRORCHECK:
+			return EDEADLK;
+
+		case PTHREAD_MUTEX_RECURSIVE:
+			if (_mutex->lockcnt == UINT32_MAX)
+				return EAGAIN;
+			++_mutex->lockcnt;
+			return 0;
+		}
+	}
+
+	do
+		ret = XENOMAI_SYSCALL1(sc_cobalt_mutex_lock, _mutex);
+	while (ret == -EINTR);
+
+	if (ret == 0)
+		_mutex->lockcnt = 1;
+
+	return -ret;
+protect:	
+	u_window = cobalt_get_current_window();
+	/*
+	 * Can't nest lazy ceiling requests, have to take the slow
+	 * path when this happens.
+	 */
+	if (u_window->pp_pending != XN_NO_HANDLE)
+		goto slow_path;
+	u_window->pp_pending = _mutex->handle;
+	lazy_protect = 1;
+	goto fast_path;
+}
+
+/**
+ * Attempt, during a bounded time, to lock a mutex.
+ *
+ * This service is equivalent to pthread_mutex_lock(), except that if the mutex
+ * @a mx is locked by another thread than the current one, this service only
+ * suspends the current thread until the timeout specified by @a to expires.
+ *
+ * @param mutex the mutex to be locked;
+ *
+ * @param to the timeout, expressed as an absolute value of the CLOCK_REALTIME
+ * clock.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EPERM, the caller is not allowed to perform the operation;
+ * - EINVAL, the mutex @a mx is invalid;
+ * - EPERM, the mutex is not process-shared and does not belong to the current
+ *   process;
+ * - ETIMEDOUT, the mutex could not be locked and the specified timeout
+ *   expired;
+ * - EDEADLK, the mutex is of the @a PTHREAD_MUTEX_ERRORCHECK type and the mutex
+ *   was already locked by the current thread;
+ * - EAGAIN, the mutex is of the @a PTHREAD_MUTEX_RECURSIVE type and the maximum
+ *   number of recursive locks has been exceeded.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_timedlock.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, pthread_mutex_timedlock, (pthread_mutex_t *mutex,
+					   const struct timespec *to))
+{
+	struct cobalt_mutex_shadow *_mutex =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	struct xnthread_user_window *u_window;
+	int status, ret, lazy_protect = 0;
+	xnhandle_t cur;
+
+	cur = cobalt_get_current();
+	if (cur == XN_NO_HANDLE)
+		return EPERM;
+
+	ret = cobalt_mutex_autoinit((union cobalt_mutex_union *)mutex);
+	if (ret)
+		return ret;
+
+	/* See __cobalt_pthread_mutex_lock() */
+	status = cobalt_get_current_mode();
+	if ((status & (XNRELAX|XNWEAK|XNDEBUG)) == 0) {
+		if (_mutex->attr.protocol == PTHREAD_PRIO_PROTECT)
+			goto protect;
+fast_path:
+		ret = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
+		if (ret == 0) {
+			_mutex->lockcnt = 1;
+			return 0;
+		}
+	} else {
+slow_path:
+		ret = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
+		if (ret == 0)
+			ret = -EBUSY;
+	}
+
+	if (ret == -EBUSY) {
+		if (lazy_protect)
+			u_window->pp_pending = XN_NO_HANDLE;
+			
+		switch(_mutex->attr.type) {
+		case PTHREAD_MUTEX_NORMAL:
+			break;
+
+		case PTHREAD_MUTEX_ERRORCHECK:
+			return EDEADLK;
+
+		case PTHREAD_MUTEX_RECURSIVE:
+			if (_mutex->lockcnt == UINT32_MAX)
+				return EAGAIN;
+
+			++_mutex->lockcnt;
+			return 0;
+		}
+	}
+
+	do {
+#ifdef __USE_TIME_BITS64
+		ret = XENOMAI_SYSCALL2(sc_cobalt_mutex_timedlock64, _mutex, to);
+#else
+		ret = XENOMAI_SYSCALL2(sc_cobalt_mutex_timedlock, _mutex, to);
+#endif
+	} while (ret == -EINTR);
+
+	if (ret == 0)
+		_mutex->lockcnt = 1;
+	return -ret;
+protect:	
+	u_window = cobalt_get_current_window();
+	/*
+	 * Can't nest lazy ceiling requests, have to take the slow
+	 * path when this happens.
+	 */
+	if (u_window->pp_pending != XN_NO_HANDLE)
+		goto slow_path;
+	u_window->pp_pending = _mutex->handle;
+	lazy_protect = 1;
+	goto fast_path;
+}
+
+/**
+ * Attempt to lock a mutex.
+ *
+ * This service is equivalent to pthread_mutex_lock(), except that if the mutex
+ * @a mx is locked by another thread than the current one, this service returns
+ * immediately.
+ *
+ * @param mutex the mutex to be locked.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EPERM, the caller is not allowed to perform the operation;
+ * - EINVAL, the mutex is invalid;
+ * - EPERM, the mutex is not process-shared and does not belong to the current
+ *   process;
+ * - EBUSY, the mutex was locked by another thread than the current one;
+ * - EAGAIN, the mutex is recursive, and the maximum number of recursive locks
+ *   has been exceeded.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_trylock.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, pthread_mutex_trylock, (pthread_mutex_t *mutex))
+{
+	struct cobalt_mutex_shadow *_mutex =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	struct xnthread_user_window *u_window;
+	int status, ret, lazy_protect = 0;
+	xnhandle_t cur;
+
+	cur = cobalt_get_current();
+	if (cur == XN_NO_HANDLE)
+		return EPERM;
+
+	ret = cobalt_mutex_autoinit((union cobalt_mutex_union *)mutex);
+	if (ret)
+		return ret;
+
+	status = cobalt_get_current_mode();
+	if ((status & (XNRELAX|XNWEAK|XNDEBUG)) == 0) {
+		if (_mutex->attr.protocol == PTHREAD_PRIO_PROTECT)
+			goto protect;
+fast_path:
+		ret = xnsynch_fast_acquire(mutex_get_ownerp(_mutex), cur);
+		if (ret == 0) {
+			_mutex->lockcnt = 1;
+			return 0;
+		}
+	} else {
+slow_path:
+		ret = xnsynch_fast_owner_check(mutex_get_ownerp(_mutex), cur);
+		if (ret == 0)
+			ret = -EBUSY;
+	}
+
+	if (ret == -EBUSY) {
+		if (lazy_protect)
+			u_window->pp_pending = XN_NO_HANDLE;
+
+		if (_mutex->attr.type == PTHREAD_MUTEX_RECURSIVE) {
+			if (_mutex->lockcnt == UINT32_MAX)
+				return EAGAIN;
+
+			++_mutex->lockcnt;
+			return 0;
+		}
+
+		return EBUSY;
+	}
+
+	do {
+		ret = XENOMAI_SYSCALL1(sc_cobalt_mutex_trylock, _mutex);
+	} while (ret == -EINTR);
+
+	if (ret == 0)
+		_mutex->lockcnt = 1;
+
+	return -ret;
+
+protect:
+	u_window = cobalt_get_current_window();
+	/*
+	 * Can't nest lazy ceiling requests, have to take the slow
+	 * path when this happens.
+	 */
+	if (u_window->pp_pending != XN_NO_HANDLE)
+		goto slow_path;
+	u_window->pp_pending = _mutex->handle;
+	lazy_protect = 1;
+	goto fast_path;
+}
+
+/**
+ * Unlock a mutex.
+ *
+ * This service unlocks the @a mutex. If @a mutex is of the @a
+ * PTHREAD_MUTEX_RECURSIVE and the locking recursion count is greater
+ * than one, the lock recursion count is decremented and the mutex
+ * remains locked.
+ *
+ * Attempting to unlock a mutex which is not locked or which is locked by
+ * another thread than the current one yields the EPERM error, whatever the
+ * mutex @a type attribute.
+ *
+ * @param mutex the mutex to be released.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EPERM, the caller is not allowed to perform the operation;
+ * - EINVAL, the mutex @a mutex is invalid;
+ * - EPERM, the mutex was not locked by the current thread.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutex_unlock.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, pthread_mutex_unlock, (pthread_mutex_t *mutex))
+{
+	struct cobalt_mutex_shadow *_mutex =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	struct xnthread_user_window *u_window;
+	struct cobalt_mutex_state *state;
+	xnhandle_t cur;
+	int err;
+
+	err = cobalt_mutex_autoinit((union cobalt_mutex_union *)mutex);
+	if (err)
+		return err;
+
+	cur = cobalt_get_current();
+	if (cur == XN_NO_HANDLE)
+		return EPERM;
+
+	state = mutex_get_state(_mutex);
+	if (xnsynch_fast_owner_check(&state->owner, cur) != 0)
+		return EPERM;
+
+	if (_mutex->lockcnt > 1) {
+		--_mutex->lockcnt;
+		return 0;
+	}
+
+	if ((state->flags & COBALT_MUTEX_COND_SIGNAL))
+		goto do_syscall;
+
+	if (cobalt_get_current_mode() & (XNWEAK|XNDEBUG))
+		goto do_syscall;
+
+	if (xnsynch_fast_release(&state->owner, cur)) {
+		if (_mutex->attr.protocol == PTHREAD_PRIO_PROTECT)
+			goto unprotect;
+		return 0;
+	}
+do_syscall:
+	do {
+		err = XENOMAI_SYSCALL1(sc_cobalt_mutex_unlock, _mutex);
+	} while (err == -EINTR);
+
+	return -err;
+
+unprotect:
+	u_window = cobalt_get_current_window();
+	u_window->pp_pending = XN_NO_HANDLE;
+
+	return 0;
+}
+
+/**
+ * Set a mutex's priority ceiling.
+ *
+ * This routine acquires the specified mutex, then changes the
+ * associated priority ceiling value and releases it.  @a prioceiling
+ * must be between the values returned by sched_get_priority_min() and
+ * sched_get_priority_max(), inclusive.
+ *
+ * The Cobalt implementation applies the priority ceiling protocol
+ * using the previous ceiling value during this operation. The new
+ * priority ceiling will apply next time the @a mutex transitions from
+ * the unlocked to locked state.
+ *
+ * @param mutex the target mutex.
+ *
+ * @param prioceiling the new ceiling value.
+ *
+ * @param old_ceiling on success and if this parameter is non-NULL,
+ * the previous ceiling value is copied to this address.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EPERM, the caller is not allowed to perform the operation;
+ * - EINVAL, @a mutex is invalid;
+ * - EINVAL, @a mutex is not of type PTHREAD_PRIO_PROTECT;
+ * - EINVAL, @a prioceiling is out of range;
+ *
+ * @see
+ * <a href="http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_setprioceiling.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ *
+ * @note If the calling thread's priority is higher than the mutex's
+ * new priority ceiling, the operation will nevertheless succeed; the
+ * Cobalt core never decreases the effective priority of a thread
+ * which locks a priority-protected mutex.
+ */
+COBALT_IMPL(int, pthread_mutex_setprioceiling,
+	    (pthread_mutex_t *__restrict mutex,
+	     int prioceiling, int *__restrict old_ceiling))
+{
+	struct cobalt_mutex_shadow *_mutex =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	struct cobalt_mutex_state *state;
+	int ret;
+
+	if (_mutex->magic != COBALT_MUTEX_MAGIC ||
+	    _mutex->attr.protocol != PTHREAD_PRIO_PROTECT)
+		return EINVAL;
+	
+	if (prioceiling < __cobalt_std_fifo_minpri ||
+	    prioceiling > __cobalt_std_fifo_maxpri)
+		return EINVAL;
+
+	ret = __COBALT(pthread_mutex_lock(mutex));
+	if (ret)
+		return ret;
+
+	state = mutex_get_state(_mutex);
+	if (old_ceiling)
+		*old_ceiling = state->ceiling;
+
+	state->ceiling = prioceiling;
+
+	return __COBALT(pthread_mutex_unlock(mutex));
+}
+
+/**
+ * Get a mutex's priority ceiling.
+ *
+ * This routine retrieves the priority ceiling value of the specified
+ * mutex.
+ *
+ * @param mutex the target mutex.
+ *
+ * @param prioceiling on success, the current ceiling value is copied
+ * to this address.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EINVAL, @a mutex is invalid;
+ * - EINVAL, @a mutex is not of type PTHREAD_PRIO_PROTECT;
+ *
+ * @see
+ * <a href="http://pubs.opengroup.org/onlinepubs/009695399/functions/pthread_mutex_getprioceiling.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, pthread_mutex_getprioceiling,
+	    (pthread_mutex_t *__restrict mutex, int *__restrict prioceiling))
+{
+	struct cobalt_mutex_shadow *_mutex =
+		&((union cobalt_mutex_union *)mutex)->shadow_mutex;
+	struct cobalt_mutex_state *state;
+
+	if (_mutex->magic != COBALT_MUTEX_MAGIC ||
+	    _mutex->attr.protocol != PTHREAD_PRIO_PROTECT)
+		return EINVAL;
+	
+	state = mutex_get_state(_mutex);
+	*prioceiling = state->ceiling;
+
+	return 0;
+}
+
+/**
+ * Initialize a mutex attributes object.
+ *
+ * This services initializes the mutex attributes object @a attr with default
+ * values for all attributes. Default value are :
+ * - for the @a type attribute, @a PTHREAD_MUTEX_NORMAL;
+ * - for the @a protocol attribute, @a PTHREAD_PRIO_NONE;
+ * - for the @a pshared attribute, @a PTHREAD_PROCESS_PRIVATE.
+ *
+ * If this service is called specifying a mutex attributes object that was
+ * already initialized, the attributes object is reinitialized.
+ *
+ * @param attr the mutex attributes object to be initialized.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ENOMEM, the mutex attributes object pointer @a attr is @a NULL.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_init.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int pthread_mutexattr_init(pthread_mutexattr_t * attr);
+
+/**
+ * Destroy a mutex attributes object.
+ *
+ * This service destroys the mutex attributes object @a attr. The object becomes
+ * invalid for all mutex services (they all return EINVAL) except
+ * pthread_mutexattr_init().
+ *
+ * @param attr the initialized mutex attributes object to be destroyed.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EINVAL, the mutex attributes object @a attr is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_destroy.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int pthread_mutexattr_destroy(pthread_mutexattr_t * attr);
+
+/**
+ * Get the mutex type attribute from a mutex attributes object.
+ *
+ * This service stores, at the address @a type, the value of the @a type
+ * attribute in the mutex attributes object @a attr.
+ *
+ * See pthread_mutex_lock() and pthread_mutex_unlock() for a
+ * description of the values of the @a type attribute and their effect
+ * on a mutex.
+ *
+ * @param attr an initialized mutex attributes object,
+ *
+ * @param type address where the @a type attribute value will be stored on
+ * success.
+ *
+ * @return 0 on sucess,
+ * @return an error number if:
+ * - EINVAL, the @a type address is invalid;
+ * - EINVAL, the mutex attributes object @a attr is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_gettype.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int pthread_mutexattr_gettype(const pthread_mutexattr_t * attr, int *type);
+
+/**
+ * Set the mutex type attribute of a mutex attributes object.
+ *
+ * This service set the @a type attribute of the mutex attributes object
+ * @a attr.
+ *
+ * See pthread_mutex_lock() and pthread_mutex_unlock() for a
+ * description of the values of the @a type attribute and their effect
+ * on a mutex.
+ *
+ * The @a PTHREAD_MUTEX_DEFAULT default @a type is the same as @a
+ * PTHREAD_MUTEX_NORMAL. Note that using a recursive Cobalt mutex with
+ * a Cobalt condition variable is safe (see pthread_cond_wait()
+ * documentation).
+ *
+ * @param attr an initialized mutex attributes object,
+ *
+ * @param type value of the @a type attribute.
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EINVAL, the mutex attributes object @a attr is invalid;
+ * - EINVAL, the value of @a type is invalid for the @a type attribute.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_settype.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int pthread_mutexattr_settype(pthread_mutexattr_t * attr, int type);
+
+/**
+ * Get the protocol attribute from a mutex attributes object.
+ *
+ * This service stores, at the address @a proto, the value of the @a protocol
+ * attribute in the mutex attributes object @a attr.
+ *
+ * The @a protcol attribute may be one of @a PTHREAD_PRIO_NONE, @a
+ * PTHREAD_PRIO_INHERIT or @a PTHREAD_PRIO_PROTECT. See
+ * pthread_mutexattr_setprotocol() for the meaning of these constants.
+ *
+ * @param attr an initialized mutex attributes object;
+ *
+ * @param proto address where the value of the @a protocol attribute will be
+ * stored on success.
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EINVAL, the @a proto address is invalid;
+ * - EINVAL, the mutex attributes object @a attr is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_getprotocol.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int pthread_mutexattr_getprotocol(const pthread_mutexattr_t * attr, int *proto);
+
+/**
+ * Set the protocol attribute of a mutex attributes object.
+ *
+ * This service set the @a type attribute of the mutex attributes object
+ * @a attr.
+ *
+ * @param attr an initialized mutex attributes object,
+ *
+ * @param proto value of the @a protocol attribute, may be one of:
+ * - PTHREAD_PRIO_NONE, meaning that a mutex created with the attributes object
+ *   @a attr will not follow any priority protocol;
+ * - PTHREAD_PRIO_INHERIT, meaning that a mutex created with the attributes
+ *   object @a attr, will follow the priority inheritance protocol.
+ * - PTHREAD_PRIO_PROTECT, meaning that a mutex created with the attributes
+ *   object @a attr, will follow the priority protect protocol.
+ *
+ * @return 0 on success,
+ * @return an error number if:
+ * - EINVAL, the mutex attributes object @a attr is invalid;
+ * - ENOTSUP, the value of @a proto is unsupported;
+ * - EINVAL, the value of @a proto is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_setprotocol.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int pthread_mutexattr_setprotocol(pthread_mutexattr_t * attr, int proto);
+
+/**
+ * Get the process-shared attribute of a mutex attributes object.
+ *
+ * This service stores, at the address @a pshared, the value of the @a pshared
+ * attribute in the mutex attributes object @a attr.
+ *
+ * The @a pashared attribute may only be one of @a PTHREAD_PROCESS_PRIVATE or
+ * @a PTHREAD_PROCESS_SHARED. See pthread_mutexattr_setpshared() for the meaning
+ * of these two constants.
+ *
+ * @param attr an initialized mutex attributes object;
+ *
+ * @param pshared address where the value of the @a pshared attribute will be
+ * stored on success.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EINVAL, the @a pshared address is invalid;
+ * - EINVAL, the mutex attributes object @a attr is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_getpshared.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int pthread_mutexattr_getpshared(const pthread_mutexattr_t *attr, int *pshared);
+
+/**
+ * Set the process-shared attribute of a mutex attributes object.
+ *
+ * This service set the @a pshared attribute of the mutex attributes object @a
+ * attr.
+ *
+ * @param attr an initialized mutex attributes object.
+ *
+ * @param pshared value of the @a pshared attribute, may be one of:
+ * - PTHREAD_PROCESS_PRIVATE, meaning that a mutex created with the attributes
+ *   object @a attr will only be accessible by threads within the same process
+ *   as the thread that initialized the mutex;
+ * - PTHREAD_PROCESS_SHARED, meaning that a mutex created with the attributes
+ *   object @a attr will be accessible by any thread that has access to the
+ *   memory where the mutex is allocated.
+ *
+ * @return 0 on success,
+ * @return an error status if:
+ * - EINVAL, the mutex attributes object @a attr is invalid;
+ * - EINVAL, the value of @a pshared is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_mutexattr_setpshared.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int pthread_mutexattr_setpshared(pthread_mutexattr_t *attr, int pshared);
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/parse_vdso.c b/kernel/xenomai-v3.2.4/lib/cobalt/parse_vdso.c
new file mode 100644
index 0000000..339e4d5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/parse_vdso.c
@@ -0,0 +1,281 @@
+/*
+ * parse_vdso.c: Linux reference vDSO parser
+ * Written by Andrew Lutomirski, 2011-2014.
+ *
+ * This code is meant to be linked in to various programs that run on Linux.
+ * As such, it is available with as few restrictions as possible.  This file
+ * is licensed under the Creative Commons Zero License, version 1.0,
+ * available at http://creativecommons.org/publicdomain/zero/1.0/legalcode
+ *
+ * The vDSO is a regular ELF DSO that the kernel maps into user space when
+ * it starts a program.  It works equally well in statically and dynamically
+ * linked binaries.
+ *
+ * This code is tested on x86.  In principle it should work on any
+ * architecture that has a vDSO.
+ */
+
+#include <sys/types.h>
+#include <sys/auxv.h>
+#include <stdbool.h>
+#include <stdint.h>
+#include <string.h>
+#include <limits.h>
+#include <pthread.h>
+#include <error.h>
+#include <errno.h>
+#include <elf.h>
+#include "internal.h"
+
+/*
+ * To use this vDSO parser, first call one of the vdso_init_* functions.
+ * If you've already parsed auxv, then pass the value of AT_SYSINFO_EHDR
+ * to vdso_init_from_sysinfo_ehdr.  Otherwise pass auxv to vdso_init_from_auxv.
+ * Then call lookup_vdso for each symbol you want.  For example, to look up
+ * gettimeofday on x86_64, use:
+ *
+ *     <some pointer> = lookup_vdso("LINUX_2.6", "gettimeofday");
+ * or
+ *     <some pointer> = lookup_vdso("LINUX_2.6", "__vdso_gettimeofday");
+ *
+ * lookup_vdso will return 0 if the symbol doesn't exist or if the init function
+ * failed or was not called.  lookup_vdso is a little slow, so its return value
+ * should be cached.
+ *
+ * lookup_vdso is threadsafe; the init functions are not.
+ */
+
+
+/* And here's the code. */
+#ifndef ELF_BITS
+# if ULONG_MAX > 0xffffffffUL
+#  define ELF_BITS 64
+# else
+#  define ELF_BITS 32
+# endif
+#endif
+
+#define ELF_BITS_XFORM2(bits, x) Elf##bits##_##x
+#define ELF_BITS_XFORM(bits, x) ELF_BITS_XFORM2(bits, x)
+#define ELF(x) ELF_BITS_XFORM(ELF_BITS, x)
+
+static struct vdso_info
+{
+	bool valid;
+
+	/* Load information */
+	uintptr_t load_addr;
+	uintptr_t load_offset;  /* load_addr - recorded vaddr */
+
+	/* Symbol table */
+	ELF(Sym) *symtab;
+	const char *symstrings;
+	ELF(Word) *bucket, *chain;
+	ELF(Word) nbucket, nchain;
+
+	/* Version table */
+	ELF(Versym) *versym;
+	ELF(Verdef) *verdef;
+} vdso_info;
+
+/* Straight from the ELF specification. */
+static unsigned long elf_hash(const char *name)
+{
+	unsigned long h = 0, g;
+	while (*name)
+	{
+		h = (h << 4) + *name++;
+		if ((g = h & 0xf0000000))
+			h ^= g >> 24;
+		h &= ~g;
+	}
+	return h;
+}
+
+static void vdso_init_from_sysinfo_ehdr(uintptr_t base)
+{
+	size_t i;
+	bool found_vaddr = false;
+
+	vdso_info.valid = false;
+
+	vdso_info.load_addr = base;
+
+	ELF(Ehdr) *hdr = (ELF(Ehdr)*)base;
+	if (hdr->e_ident[EI_CLASS] !=
+	    (ELF_BITS == 32 ? ELFCLASS32 : ELFCLASS64)) {
+		return;  /* Wrong ELF class -- check ELF_BITS */
+	}
+
+	ELF(Phdr) *pt = (ELF(Phdr)*)(vdso_info.load_addr + hdr->e_phoff);
+	ELF(Dyn) *dyn = 0;
+
+	/*
+	 * We need two things from the segment table: the load offset
+	 * and the dynamic table.
+	 */
+	for (i = 0; i < hdr->e_phnum; i++)
+	{
+		if (pt[i].p_type == PT_LOAD && !found_vaddr) {
+			found_vaddr = true;
+			vdso_info.load_offset =	base
+				+ (uintptr_t)pt[i].p_offset
+				- (uintptr_t)pt[i].p_vaddr;
+		} else if (pt[i].p_type == PT_DYNAMIC) {
+			dyn = (ELF(Dyn)*)(base + pt[i].p_offset);
+		}
+	}
+
+	if (!found_vaddr || !dyn)
+		return;  /* Failed */
+
+	/*
+	 * Fish out the useful bits of the dynamic table.
+	 */
+	ELF(Word) *hash = 0;
+	vdso_info.symstrings = 0;
+	vdso_info.symtab = 0;
+	vdso_info.versym = 0;
+	vdso_info.verdef = 0;
+	for (i = 0; dyn[i].d_tag != DT_NULL; i++) {
+		switch (dyn[i].d_tag) {
+		case DT_STRTAB:
+			vdso_info.symstrings = (const char *)
+				((uintptr_t)dyn[i].d_un.d_ptr
+				 + vdso_info.load_offset);
+			break;
+		case DT_SYMTAB:
+			vdso_info.symtab = (ELF(Sym) *)
+				((uintptr_t)dyn[i].d_un.d_ptr
+				 + vdso_info.load_offset);
+			break;
+		case DT_HASH:
+			hash = (ELF(Word) *)
+				((uintptr_t)dyn[i].d_un.d_ptr
+				 + vdso_info.load_offset);
+			break;
+		case DT_VERSYM:
+			vdso_info.versym = (ELF(Versym) *)
+				((uintptr_t)dyn[i].d_un.d_ptr
+				 + vdso_info.load_offset);
+			break;
+		case DT_VERDEF:
+			vdso_info.verdef = (ELF(Verdef) *)
+				((uintptr_t)dyn[i].d_un.d_ptr
+				 + vdso_info.load_offset);
+			break;
+		}
+	}
+	if (!vdso_info.symstrings || !vdso_info.symtab || !hash)
+		return;  /* Failed */
+
+	if (!vdso_info.verdef)
+		vdso_info.versym = 0;
+
+	/* Parse the hash table header. */
+	vdso_info.nbucket = hash[0];
+	vdso_info.nchain = hash[1];
+	vdso_info.bucket = &hash[2];
+	vdso_info.chain = &hash[vdso_info.nbucket + 2];
+
+	/* That's all we need. */
+	vdso_info.valid = true;
+}
+
+static bool vdso_match_version(ELF(Versym) ver,
+			       const char *name, ELF(Word) hash)
+{
+	/*
+	 * This is a helper function to check if the version indexed by
+	 * ver matches name (which hashes to hash).
+	 *
+	 * The version definition table is a mess, and I don't know how
+	 * to do this in better than linear time without allocating memory
+	 * to build an index.  I also don't know why the table has
+	 * variable size entries in the first place.
+	 *
+	 * For added fun, I can't find a comprehensible specification of how
+	 * to parse all the weird flags in the table.
+	 *
+	 * So I just parse the whole table every time.
+	 */
+
+	/* First step: find the version definition */
+	ver &= 0x7fff;  /* Apparently bit 15 means "hidden" */
+	ELF(Verdef) *def = vdso_info.verdef;
+	while(true) {
+		if ((def->vd_flags & VER_FLG_BASE) == 0
+		    && (def->vd_ndx & 0x7fff) == ver)
+			break;
+
+		if (def->vd_next == 0)
+			return false;  /* No definition. */
+
+		def = (ELF(Verdef) *)((char *)def + def->vd_next);
+	}
+
+	/* Now figure out whether it matches. */
+	ELF(Verdaux) *aux = (ELF(Verdaux)*)((char *)def + def->vd_aux);
+	return def->vd_hash == hash
+		&& !strcmp(name, vdso_info.symstrings + aux->vda_name);
+}
+
+static void *lookup_vdso(const char *version, const char *name)
+{
+	unsigned long ver_hash;
+
+	if (!vdso_info.valid)
+		return 0;
+
+	ver_hash = elf_hash(version);
+	ELF(Word) chain = vdso_info.bucket[elf_hash(name) % vdso_info.nbucket];
+
+	for (; chain != STN_UNDEF; chain = vdso_info.chain[chain]) {
+		ELF(Sym) *sym = &vdso_info.symtab[chain];
+
+		/* Check for a defined global or weak function w/ right name. */
+		if (ELF64_ST_TYPE(sym->st_info) != STT_FUNC)
+			continue;
+		if (ELF64_ST_BIND(sym->st_info) != STB_GLOBAL &&
+		    ELF64_ST_BIND(sym->st_info) != STB_WEAK)
+			continue;
+		if (sym->st_shndx == SHN_UNDEF)
+			continue;
+		if (strcmp(name, vdso_info.symstrings + sym->st_name))
+			continue;
+
+		/* Check symbol version. */
+		if (vdso_info.versym
+		    && !vdso_match_version(vdso_info.versym[chain],
+					   version, ver_hash))
+			continue;
+
+		return (void *)(vdso_info.load_offset + sym->st_value);
+	}
+
+	return 0;
+}
+
+static void parse_vdso(void)
+{
+	uintptr_t vdso = (uintptr_t)getauxval(AT_SYSINFO_EHDR);
+
+	if (!vdso)
+		error(1, ENOENT, "vDSO signature not found");
+
+	vdso_init_from_sysinfo_ehdr(vdso);
+}
+
+void *cobalt_lookup_vdso(const char *version, const char *name)
+{
+	static pthread_once_t parse_vdso_once = PTHREAD_ONCE_INIT;
+	void *sym;
+
+	pthread_once(&parse_vdso_once, parse_vdso);
+
+	sym = lookup_vdso(version, name);
+	if (!sym)
+		error(1, ENOENT, "%s not found in vDSO", name);
+
+	return sym;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/printf.c b/kernel/xenomai-v3.2.4/lib/cobalt/printf.c
new file mode 100644
index 0000000..0aa5940
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/printf.c
@@ -0,0 +1,919 @@
+/*
+ * Copyright (C) 2007,2011 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <inttypes.h>
+#include <limits.h>
+#include <pthread.h>
+#include <signal.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <syslog.h>
+#include <boilerplate/atomic.h>
+#include <boilerplate/compiler.h>
+#include <cobalt/tunables.h>
+#include <cobalt/sys/cobalt.h>
+#include "internal.h"
+
+#define RT_PRINT_DEFAULT_BUFFER		16*1024
+#define RT_PRINT_DEFAULT_SYNCDELAY	100 /* ms */
+#define RT_PRINT_DEFAULT_BUFFERS_COUNT  4
+
+#define RT_PRINT_LINE_BREAK		256
+
+#define RT_PRINT_SYSLOG_STREAM		NULL
+
+#define RT_PRINT_MODE_FORMAT		0
+#define RT_PRINT_MODE_FWRITE		1
+
+struct entry_head {
+	FILE *dest;
+	uint32_t seq_no;
+	int priority;
+	size_t len;
+	char data[0];
+} __attribute__((packed));
+
+struct print_buffer {
+	off_t write_pos;
+
+	struct print_buffer *next, *prev;
+
+	void *ring;
+	size_t size;
+
+	char name[32];
+
+	/*
+	 * Keep read_pos separated from write_pos to optimise write
+	 * caching on SMP.
+	 */
+	off_t read_pos;
+};
+
+__weak int __cobalt_print_bufsz = RT_PRINT_DEFAULT_BUFFER;
+
+int __cobalt_print_bufcount = RT_PRINT_DEFAULT_BUFFERS_COUNT;
+
+int __cobalt_print_syncdelay = RT_PRINT_DEFAULT_SYNCDELAY;
+
+static struct print_buffer *first_buffer;
+static int buffers;
+static uint32_t seq_no;
+static struct timespec syncdelay;
+static pthread_mutex_t buffer_lock;
+static pthread_cond_t printer_wakeup;
+static pthread_key_t buffer_key;
+static pthread_key_t cleanup_key;
+static pthread_t printer_thread;
+static atomic_long_t *pool_bitmap;
+static unsigned pool_bitmap_len;
+static unsigned pool_buf_size;
+static unsigned long pool_start, pool_len;
+
+static void release_buffer(struct print_buffer *buffer);
+static void print_buffers(void);
+
+/* *** rt_print API *** */
+
+static int 
+vprint_to_buffer(FILE *stream, int fortify_level, int priority, 
+		 unsigned int mode, size_t sz, const char *format, va_list args)
+{
+	struct print_buffer *buffer = pthread_getspecific(buffer_key);
+	off_t write_pos, read_pos;
+	struct entry_head *head;
+	int len, str_len;
+	int res = 0;
+
+	if (!buffer) {
+		res = rt_print_init(0, NULL);
+		if (res) {
+			errno = res;
+			return -1;
+		}
+		buffer = pthread_getspecific(buffer_key);
+	}
+
+	/* Take a snapshot of the ring buffer state */
+	write_pos = buffer->write_pos;
+	read_pos = buffer->read_pos;
+	smp_mb();
+
+	/* Is our write limit the end of the ring buffer? */
+	if (write_pos >= read_pos) {
+		/* Keep a safety margin to the end for at least an empty entry */
+		len = buffer->size - write_pos - sizeof(struct entry_head);
+
+		/* Special case: We were stuck at the end of the ring buffer
+		   with space left there only for one empty entry. Now
+		   read_pos was moved forward and we can wrap around. */
+		if (len == 0 && read_pos > sizeof(struct entry_head)) {
+			/* Write out empty entry */
+			head = buffer->ring + write_pos;
+			head->seq_no = seq_no;
+			head->priority = 0;
+			head->len = 0;
+
+			/* Forward to the ring buffer start */
+			write_pos = 0;
+			len = read_pos - 1;
+		}
+	} else {
+		/* Our limit is the read_pos ahead of our write_pos. One byte
+		   margin is required to detect a full ring. */
+		len = read_pos - write_pos - 1;
+	}
+
+	/* Account for head length */
+	len -= sizeof(struct entry_head);
+	if (len < 0)
+		len = 0;
+
+	head = buffer->ring + write_pos;
+
+	if (mode == RT_PRINT_MODE_FORMAT) {
+		if (stream != RT_PRINT_SYSLOG_STREAM) {
+			/* We do not need the terminating \0 */
+#ifdef CONFIG_XENO_FORTIFY
+			if (fortify_level > 0)
+				res = __vsnprintf_chk(head->data, len,
+						      fortify_level - 1,
+						      len > 0 ? len : 0, 
+						      format, args);
+			else
+#else
+				(void)fortify_level;
+#endif
+			res = vsnprintf(head->data, len, format, args);
+
+			if (res < len) {
+				/* Text was written completely, res contains its
+				   length */
+				len = res;
+			} else {
+				/* Text was truncated */
+				res = len;
+			}
+		} else {
+			/* We DO need the terminating \0 */
+#ifdef CONFIG_XENO_FORTIFY
+			if (fortify_level > 0)
+				res = __vsnprintf_chk(head->data, len,
+						      fortify_level - 1,
+						      len > 0 ? len : 0, 
+						      format, args);
+			else
+#endif
+				res = vsnprintf(head->data, len, format, args);
+
+			if (res < len) {
+				/* Text was written completely, res contains its
+				   length */
+				len = res + 1;
+			} else {
+				/* Text was truncated */
+				res = len;
+			}
+		}
+	} else if (len >= 1) {
+		str_len = sz;
+		len = (str_len < len) ? str_len : len;
+		memcpy(head->data, format, len);
+	} else
+		len = 0;
+
+	/* If we were able to write some text, finalise the entry */
+	if (len > 0) {
+		head->seq_no = ++seq_no;
+		head->priority = priority;
+		head->dest = stream;
+		head->len = len;
+
+		/* Move forward by text and head length */
+		write_pos += len + sizeof(struct entry_head);
+	}
+
+	/* Wrap around early if there is more space on the other side */
+	if (write_pos >= buffer->size - RT_PRINT_LINE_BREAK &&
+	    read_pos <= write_pos && read_pos > buffer->size - write_pos) {
+		/* An empty entry marks the wrap-around */
+		head = buffer->ring + write_pos;
+		head->seq_no = seq_no;
+		head->priority = priority;
+		head->len = 0;
+
+		write_pos = 0;
+	}
+
+	/* All entry data must be written before we can update write_pos */
+	smp_wmb();
+
+	buffer->write_pos = write_pos;
+
+	return res;
+}
+
+static int print_to_buffer(FILE *stream, int priority, unsigned int mode,
+			   size_t sz, const char *format, ...)
+{
+	va_list args;
+	int ret;
+
+	va_start(args, format);
+	ret = vprint_to_buffer(stream, 0, priority, mode, sz, format, args);
+	va_end(args);
+
+	return ret;
+}
+
+int rt_vfprintf(FILE *stream, const char *format, va_list args)
+{
+	return vprint_to_buffer(stream, 0, 0,
+				RT_PRINT_MODE_FORMAT, 0, format, args);
+}
+
+#ifdef CONFIG_XENO_FORTIFY
+
+int __rt_vfprintf_chk(FILE *stream, int level, const char *fmt, va_list args)
+{
+	return vprint_to_buffer(stream, level + 1, 0,
+				RT_PRINT_MODE_FORMAT, 0, fmt, args);
+}
+
+#endif
+
+int rt_vprintf(const char *format, va_list args)
+{
+	return rt_vfprintf(stdout, format, args);
+}
+
+int rt_fprintf(FILE *stream, const char *format, ...)
+{
+	va_list args;
+	int n;
+
+	va_start(args, format);
+	n = rt_vfprintf(stream, format, args);
+	va_end(args);
+
+	return n;
+}
+
+int rt_printf(const char *format, ...)
+{
+	va_list args;
+	int n;
+
+	va_start(args, format);
+	n = rt_vfprintf(stdout, format, args);
+	va_end(args);
+
+	return n;
+}
+
+int rt_fputs(const char *s, FILE *stream)
+{
+	return print_to_buffer(stream, 0, RT_PRINT_MODE_FWRITE, strlen(s), s);
+}
+
+int rt_puts(const char *s)
+{
+	int res;
+
+	res = rt_fputs(s, stdout);
+	if (res < 0)
+		return res;
+
+	return print_to_buffer(stdout, 0, RT_PRINT_MODE_FWRITE, 1, "\n");
+}
+
+int rt_fputc(int c, FILE *stream)
+{
+	unsigned char uc = c;
+	int rc;
+
+	rc = print_to_buffer(stream, 0, RT_PRINT_MODE_FWRITE, 1, (char *)&uc);
+	if (rc < 0)
+		return EOF;
+
+	return (int)uc;
+}
+
+int rt_putchar(int c)
+{
+	return rt_fputc(c, stdout);
+}
+
+size_t rt_fwrite(const void *ptr, size_t size, size_t nmemb, FILE *stream)
+{
+	print_to_buffer(stream, 0, RT_PRINT_MODE_FWRITE, size * nmemb, ptr);
+	return nmemb;
+}
+
+
+void rt_syslog(int priority, const char *format, ...)
+{
+	va_list args;
+
+	va_start(args, format);
+	vprint_to_buffer(RT_PRINT_SYSLOG_STREAM, 0, priority,
+			 RT_PRINT_MODE_FORMAT, 0, format, args);
+	va_end(args);
+}
+
+void rt_vsyslog(int priority, const char *format, va_list args)
+{
+	vprint_to_buffer(RT_PRINT_SYSLOG_STREAM, 0, priority,
+			 RT_PRINT_MODE_FORMAT, 0, format, args);
+}
+
+#ifdef CONFIG_XENO_FORTIFY
+
+void __rt_vsyslog_chk(int priority, int level, const char *fmt, va_list args)
+{
+	vprint_to_buffer(RT_PRINT_SYSLOG_STREAM, level + 1, priority,
+			 RT_PRINT_MODE_FORMAT, 0, fmt, args);
+}
+
+#endif
+
+static void set_buffer_name(struct print_buffer *buffer, const char *name)
+{
+	int n;
+
+	n = sprintf(buffer->name, "%08lx", (unsigned long)pthread_self());
+	if (name) {
+		buffer->name[n++] = ' ';
+		strncpy(buffer->name+n, name, sizeof(buffer->name)-n-1);
+		buffer->name[sizeof(buffer->name)-1] = 0;
+	}
+}
+
+static void rt_print_init_inner(struct print_buffer *buffer, size_t size)
+{
+	buffer->size = size;
+
+	memset(buffer->ring, 0, size);
+
+	buffer->read_pos  = 0;
+	buffer->write_pos = 0;
+
+	buffer->prev = NULL;
+
+	pthread_mutex_lock(&buffer_lock);
+
+	buffer->next = first_buffer;
+	if (first_buffer)
+		first_buffer->prev = buffer;
+	first_buffer = buffer;
+
+	buffers++;
+	pthread_cond_signal(&printer_wakeup);
+
+	pthread_mutex_unlock(&buffer_lock);
+}
+
+int rt_print_init(size_t buffer_size, const char *buffer_name)
+{
+	struct print_buffer *buffer = pthread_getspecific(buffer_key);
+	size_t size = buffer_size;
+	unsigned long old_bitmap;
+	unsigned j;
+
+	if (!size)
+		size = __cobalt_print_bufsz;
+	else if (size < RT_PRINT_LINE_BREAK)
+		return EINVAL;
+
+	if (buffer) {
+		/* Only set name if buffer size is unchanged or default */
+		if (size == buffer->size || !buffer_size) {
+			set_buffer_name(buffer, buffer_name);
+			return 0;
+		}
+		release_buffer(buffer);
+		buffer = NULL;
+	}
+
+	/* Find a free buffer in the pool */
+	do {
+		unsigned long bitmap;
+		unsigned i;
+
+		for (i = 0; i < pool_bitmap_len; i++) {
+			old_bitmap = atomic_long_read(&pool_bitmap[i]);
+			if (old_bitmap)
+				goto acquire;
+		}
+
+		goto not_found;
+
+	  acquire:
+		do {
+			bitmap = old_bitmap;
+			j = __builtin_ffsl(bitmap) - 1;
+			old_bitmap = atomic_cmpxchg(&pool_bitmap[i],
+						    bitmap,
+						    bitmap & ~(1UL << j));
+		} while (old_bitmap != bitmap && old_bitmap);
+		j += i * LONG_BIT;
+	} while (!old_bitmap);
+
+	buffer = (struct print_buffer *)(pool_start + j * pool_buf_size);
+
+  not_found:
+
+	if (!buffer) {
+		cobalt_assert_nrt();
+
+		buffer = malloc(sizeof(*buffer));
+		if (!buffer)
+			return ENOMEM;
+
+		buffer->ring = malloc(size);
+		if (!buffer->ring)
+			return ENOMEM;
+
+		rt_print_init_inner(buffer, size);
+	}
+
+	set_buffer_name(buffer, buffer_name);
+
+	pthread_setspecific(buffer_key, buffer);
+
+	return 0;
+}
+
+const char *rt_print_buffer_name(void)
+{
+	struct print_buffer *buffer = pthread_getspecific(buffer_key);
+	int res;
+
+	if (!buffer) {
+		res = rt_print_init(0, NULL);
+		if (res)
+			return NULL;
+
+		buffer = pthread_getspecific(buffer_key);
+	}
+
+	return buffer->name;
+}
+
+/* *** Deferred Output Management *** */
+void rt_print_flush_buffers(void)
+{
+	cobalt_thread_relax();
+	pthread_mutex_lock(&buffer_lock);
+	print_buffers();
+	pthread_mutex_unlock(&buffer_lock);
+}
+
+static void release_buffer(struct print_buffer *buffer)
+{
+	struct print_buffer *prev, *next;
+	unsigned long old_bitmap, bitmap;
+	unsigned int i, j;
+
+	cobalt_assert_nrt();
+
+	pthread_setspecific(buffer_key, NULL);
+
+	pthread_mutex_lock(&buffer_lock);
+
+	print_buffers();
+
+	pthread_mutex_unlock(&buffer_lock);
+
+	/* Return the buffer to the pool */
+	if ((unsigned long)buffer - pool_start >= pool_len)
+		goto dofree;
+
+	j = ((unsigned long)buffer - pool_start) / pool_buf_size;
+	i = j / LONG_BIT;
+	j = j % LONG_BIT;
+
+	old_bitmap = atomic_long_read(&pool_bitmap[i]);
+	do {
+		bitmap = old_bitmap;
+		old_bitmap = atomic_cmpxchg(&pool_bitmap[i],
+					    bitmap,
+					    bitmap | (1UL << j));
+	} while (old_bitmap != bitmap);
+
+	return;
+dofree:
+	pthread_mutex_lock(&buffer_lock);
+
+	prev = buffer->prev;
+	next = buffer->next;
+
+	if (prev)
+		prev->next = next;
+	else
+		first_buffer = next;
+	if (next)
+		next->prev = prev;
+
+	buffers--;
+
+	pthread_mutex_unlock(&buffer_lock);
+
+	free(buffer->ring);
+	free(buffer);
+}
+
+static void do_cleanup(void *arg)
+{
+	struct print_buffer *buffer = pthread_getspecific(buffer_key);
+
+	if (buffer)
+		release_buffer(buffer);
+
+	pthread_cancel(printer_thread);
+}
+
+static inline uint32_t get_next_seq_no(struct print_buffer *buffer)
+{
+	struct entry_head *head = buffer->ring + buffer->read_pos;
+	return head->seq_no;
+}
+
+static struct print_buffer *get_next_buffer(void)
+{
+	struct print_buffer *pos = first_buffer;
+	struct print_buffer *buffer = NULL;
+	uint32_t next_seq_no = 0; /* silence gcc... */
+
+	while (pos) {
+		if (pos->read_pos != pos->write_pos &&
+		    (!buffer || get_next_seq_no(pos) < next_seq_no)) {
+			buffer = pos;
+			next_seq_no = get_next_seq_no(pos);
+		}
+		pos = pos->next;
+	}
+
+	return buffer;
+}
+
+static void print_buffers(void)
+{
+	struct print_buffer *buffer;
+	struct entry_head *head;
+	off_t read_pos;
+	int len, ret;
+
+	while (1) {
+		buffer = get_next_buffer();
+		if (!buffer)
+			break;
+
+		read_pos = buffer->read_pos;
+		head = buffer->ring + read_pos;
+		len = head->len;
+
+		if (len) {
+			/* Print out non-empty entry and proceed */
+			/* Check if output goes to syslog */
+			if (head->dest == RT_PRINT_SYSLOG_STREAM) {
+				syslog(head->priority,
+				       "%s", head->data);
+			} else {
+				ret = fwrite(head->data,
+					     head->len, 1, head->dest);
+				(void)ret;
+			}
+
+			read_pos += sizeof(*head) + len;
+		} else {
+			/* Emptry entries mark the wrap-around */
+			read_pos = 0;
+		}
+
+		/* Make sure we have read the entry competely before
+		   forwarding read_pos */
+		smp_rmb();
+		buffer->read_pos = read_pos;
+
+		/* Enforce the read_pos update before proceeding */
+		smp_wmb();
+	}
+}
+
+static void *printer_loop(void *arg)
+{
+	while (1) {
+		pthread_mutex_lock(&buffer_lock);
+
+		while (buffers == 0)
+			pthread_cond_wait(&printer_wakeup, &buffer_lock);
+
+		print_buffers();
+
+		pthread_mutex_unlock(&buffer_lock);
+
+		nanosleep(&syncdelay, NULL);
+	}
+
+	return NULL;
+}
+
+static void spawn_printer_thread(void)
+{
+	pthread_attr_t thattr;
+	sigset_t sset, oset;
+
+	pthread_attr_init(&thattr);
+	sigfillset(&sset);
+	pthread_sigmask(SIG_BLOCK, &sset, &oset);
+	pthread_create(&printer_thread, &thattr, printer_loop, NULL);
+	pthread_sigmask(SIG_SETMASK, &oset, NULL);
+	pthread_setname_np(printer_thread, "cobalt_printf");
+}
+
+void cobalt_print_init_atfork(void)
+{
+	struct print_buffer *my_buffer = pthread_getspecific(buffer_key);
+	struct print_buffer **pbuffer = &first_buffer;
+
+	if (my_buffer) {
+		/* Any content of my_buffer should be printed by our parent,
+		   not us. */
+		memset(my_buffer->ring, 0, my_buffer->size);
+
+		my_buffer->read_pos  = 0;
+		my_buffer->write_pos = 0;
+	}
+
+	/* re-init to avoid finding it locked by some parent thread */
+	pthread_mutex_init(&buffer_lock, NULL);
+
+	while (*pbuffer) {
+		if (*pbuffer == my_buffer)
+			pbuffer = &(*pbuffer)->next;
+		else if ((unsigned long)*pbuffer - pool_start < pool_len) {
+			release_buffer(*pbuffer);
+			pbuffer = &(*pbuffer)->next;
+		}
+		else
+			release_buffer(*pbuffer);
+	}
+
+	spawn_printer_thread();
+}
+
+void cobalt_print_init(void)
+{
+	unsigned int i;
+
+	first_buffer = NULL;
+	seq_no = 0;
+
+	syncdelay.tv_sec  = __cobalt_print_syncdelay / 1000;
+	syncdelay.tv_nsec = (__cobalt_print_syncdelay % 1000) * 1000000;
+
+	/* Fill the buffer pool */
+	pool_bitmap_len = (__cobalt_print_bufcount+LONG_BIT-1)/LONG_BIT;
+	if (!pool_bitmap_len)
+		goto done;
+
+	pool_bitmap = malloc(pool_bitmap_len * sizeof(*pool_bitmap));
+	if (!pool_bitmap)
+		early_panic("error allocating print relay buffers");
+
+	pool_buf_size = sizeof(struct print_buffer) + __cobalt_print_bufsz;
+	pool_len = __cobalt_print_bufcount * pool_buf_size;
+	pool_start = (unsigned long)malloc(pool_len);
+	if (!pool_start)
+		early_panic("error allocating print relay buffers");
+
+	for (i = 0; i < __cobalt_print_bufcount / LONG_BIT; i++)
+		atomic_long_set(&pool_bitmap[i], ~0UL);
+	if (__cobalt_print_bufcount % LONG_BIT)
+		atomic_long_set(&pool_bitmap[i],
+				(1UL << (__cobalt_print_bufcount % LONG_BIT)) - 1);
+
+	for (i = 0; i < __cobalt_print_bufcount; i++) {
+		struct print_buffer *buffer =
+			(struct print_buffer *)
+			(pool_start + i * pool_buf_size);
+		
+		buffer->ring = (char *)(buffer + 1);
+
+		rt_print_init_inner(buffer, __cobalt_print_bufsz);
+	}
+done:
+	pthread_mutex_init(&buffer_lock, NULL);
+	pthread_key_create(&buffer_key, (void (*)(void*))release_buffer);
+	pthread_key_create(&cleanup_key, do_cleanup);
+	pthread_cond_init(&printer_wakeup, NULL);
+	spawn_printer_thread();
+	/* We just need a non-zero TSD to trigger the dtor upon unwinding. */
+	pthread_setspecific(cleanup_key, &cleanup_key);
+
+	atexit(rt_print_flush_buffers);
+}
+
+COBALT_IMPL(int, vfprintf, (FILE *stream, const char *fmt, va_list args))
+{
+	if (!cobalt_is_relaxed())
+		return rt_vfprintf(stream, fmt, args);
+	else {
+		rt_print_flush_buffers();
+		return __STD(vfprintf(stream, fmt, args));
+	}
+}
+
+COBALT_IMPL(int, vprintf, (const char *fmt, va_list args))
+{
+	return __COBALT(vfprintf(stdout, fmt, args));
+}
+
+COBALT_IMPL(int, fprintf, (FILE *stream, const char *fmt, ...))
+{
+	va_list args;
+	int rc;
+
+	va_start(args, fmt);
+	rc = __COBALT(vfprintf(stream, fmt, args));
+	va_end(args);
+
+	return rc;
+}
+
+COBALT_IMPL(int, printf, (const char *fmt, ...))
+{
+	va_list args;
+	int rc;
+
+	va_start(args, fmt);
+	rc = __COBALT(vfprintf(stdout, fmt, args));
+	va_end(args);
+
+	return rc;
+}
+
+COBALT_IMPL(int, fputs, (const char *s, FILE *stream))
+{
+	if (!cobalt_is_relaxed())
+		return rt_fputs(s, stream);
+	else {
+		rt_print_flush_buffers();
+		return __STD(fputs(s, stream));
+	}
+}
+
+COBALT_IMPL(int, puts, (const char *s))
+{
+	if (!cobalt_is_relaxed())
+		return rt_puts(s);
+	else {
+		rt_print_flush_buffers();
+		return __STD(puts(s));
+	}
+}
+
+COBALT_IMPL(int, fputc, (int c, FILE *stream))
+{
+	if (!cobalt_is_relaxed())
+		return rt_fputc(c, stream);
+	else {
+		rt_print_flush_buffers();
+		return __STD(fputc(c, stream));
+	}
+}
+
+COBALT_IMPL(int, putchar, (int c))
+{
+	if (!cobalt_is_relaxed())
+		return rt_putchar(c);
+	else {
+		rt_print_flush_buffers();
+		return __STD(putchar(c));
+	}
+}
+
+COBALT_IMPL(size_t, fwrite, (const void *ptr, size_t size, size_t nmemb, FILE *stream))
+{
+	if (!cobalt_is_relaxed())
+		return rt_fwrite(ptr, size, nmemb, stream);
+	else {
+		rt_print_flush_buffers();
+		return __STD(fwrite(ptr, size, nmemb, stream));
+	}
+
+}
+
+COBALT_IMPL(int, fclose, (FILE *stream))
+{
+	rt_print_flush_buffers();
+	return __STD(fclose(stream));
+}
+
+COBALT_IMPL(void, vsyslog, (int priority, const char *fmt, va_list ap))
+{
+	if (!cobalt_is_relaxed())
+		return rt_vsyslog(priority, fmt, ap);
+	else {
+		rt_print_flush_buffers();
+		__STD(vsyslog(priority, fmt, ap));
+	}
+}
+
+COBALT_IMPL(void, syslog, (int priority, const char *fmt, ...))
+{
+	va_list args;
+
+	va_start(args, fmt);
+	__COBALT(vsyslog(priority, fmt, args));
+	va_end(args);
+}
+
+/* 
+ * Checked versions for -D_FORTIFY_SOURCE
+ */
+COBALT_IMPL(int, __vfprintf_chk, (FILE *f, int flag, const char *fmt, va_list ap))
+{
+#ifdef CONFIG_XENO_FORTIFY
+	if (!cobalt_is_relaxed())
+		return __rt_vfprintf_chk(f, flag, fmt, ap);
+	else {
+		rt_print_flush_buffers();
+		return __STD(__vfprintf_chk(f, flag, fmt, ap));
+	}
+#else
+	panic("--enable-fortify is required with applications enabling _FORTIFY_SOURCE");
+#endif
+}
+
+COBALT_IMPL(int, __vprintf_chk, (int flag, const char *fmt, va_list ap))
+{
+	return __COBALT(__vfprintf_chk(stdout, flag, fmt, ap));
+}
+
+COBALT_IMPL(int, __fprintf_chk, (FILE *f, int flag, const char *fmt, ...))
+{
+	va_list args;
+	int ret;
+
+	va_start(args, fmt);
+	ret = __COBALT(__vfprintf_chk(f, flag, fmt, args));
+	va_end(args);
+
+	return ret;
+}
+
+COBALT_IMPL(int, __printf_chk, (int flag, const char *fmt, ...))
+{
+	va_list args;
+	int ret;
+
+	va_start(args, fmt);
+	ret = __COBALT(__vprintf_chk(flag, fmt, args));
+	va_end(args);
+
+	return ret;
+}
+
+COBALT_IMPL(void, __vsyslog_chk, (int pri, int flag, const char *fmt, va_list ap))
+{
+#ifdef CONFIG_XENO_FORTIFY
+	if (!cobalt_is_relaxed())
+		return __rt_vsyslog_chk(pri, flag, fmt, ap);
+	else {
+		rt_print_flush_buffers();
+		__STD(__vsyslog_chk(pri, flag, fmt, ap));
+	}
+#else
+	panic("--enable-fortify is required with applications enabling _FORTIFY_SOURCE");
+#endif
+}
+
+COBALT_IMPL(void, __syslog_chk, (int pri, int flag, const char *fmt, ...))
+{
+	va_list args;
+
+	va_start(args, fmt);
+	__COBALT(__vsyslog_chk(pri, flag, fmt, args));
+	va_end(args);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/rtdm.c b/kernel/xenomai-v3.2.4/lib/cobalt/rtdm.c
new file mode 100644
index 0000000..8232310
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/rtdm.c
@@ -0,0 +1,578 @@
+/*
+ * Copyright (C) 2005 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307
+  USA.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <stdarg.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <sys/socket.h>
+#include <sys/mman.h>
+#include <rtdm/rtdm.h>
+#include <cobalt/uapi/syscall.h>
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+/* support for very old c libraries not supporting O_TMPFILE */
+#ifndef O_TMPFILE
+#define O_TMPFILE (020000000 | 0200000)
+#endif
+
+
+static inline int set_errno(int ret)
+{
+	if (ret >= 0)
+		return ret;
+
+	errno = -ret;
+	return -1;
+}
+
+static int do_open(const char *path, int oflag, mode_t mode)
+{
+	int fd, oldtype;
+
+	/*
+	 * Don't dereference path, as it might be invalid. Leave it to
+	 * the kernel service.
+	 */
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+	fd = XENOMAI_SYSCALL2( sc_cobalt_open, path, oflag);
+	pthread_setcanceltype(oldtype, NULL);
+	if (fd < 0) {
+		if (fd != -EADV && fd != -ENOSYS)
+			return set_errno(fd);
+
+		fd = __STD(open(path, oflag, mode));
+	}
+
+	return fd;
+}
+
+COBALT_IMPL(int, open, (const char *path, int oflag, ...))
+{
+	mode_t mode = 0;
+	va_list ap;
+
+	if ((oflag & O_CREAT)  || (oflag & O_TMPFILE) == O_TMPFILE) {
+		va_start(ap, oflag);
+		mode = va_arg(ap, int);
+		va_end(ap);
+	}
+
+	return do_open(path, oflag, mode);
+}
+
+COBALT_IMPL(int, open64, (const char *path, int oflag, ...))
+{
+	mode_t mode = 0;
+	va_list ap;
+
+	if ((oflag & O_CREAT)  || (oflag & O_TMPFILE) == O_TMPFILE) {
+		va_start(ap, oflag);
+		mode = va_arg(ap, int);
+		va_end(ap);
+	}
+
+	return do_open(path, oflag | O_LARGEFILE, mode);
+}
+
+COBALT_IMPL(int, __open_2, (const char *path, int oflag))
+{
+	/* __open_2() from glibc adds a runtime precondition test for the 'oflag'
+	 * parameter to the functionality of open(). It may be used when the macro
+	 * _FORTIFY_SOURCE is defined when compiling the application code.
+	 */
+	if (__OPEN_NEEDS_MODE(oflag)) {
+		const char *msg =
+			"invalid open call: O_CREAT or O_TMPFILE without mode\n";
+		ssize_t n = __STD(write(STDERR_FILENO, msg, strlen(msg)));
+		(void)n;
+		abort();
+	}
+
+	return do_open(path, oflag, 0);
+}
+
+COBALT_IMPL(int, __open64_2, (const char *path, int oflag))
+{
+	/* just like __open_2() and open64() */
+	if (__OPEN_NEEDS_MODE(oflag)) {
+		const char *msg =
+			"invalid open64 call: O_CREAT or O_TMPFILE without mode\n";
+		ssize_t n = __STD(write(STDERR_FILENO, msg, strlen(msg)));
+		(void)n;
+		abort();
+	}
+
+	return do_open(path, oflag | O_LARGEFILE, 0);
+}
+
+COBALT_IMPL(int, socket, (int protocol_family, int socket_type, int protocol))
+{
+	int s;
+
+	s = XENOMAI_SYSCALL3(sc_cobalt_socket, protocol_family,
+			     socket_type, protocol);
+	if (s < 0) {
+		if (s != -EAFNOSUPPORT &&
+		    s != -EPROTONOSUPPORT &&
+		    s != -ENOSYS)
+			return set_errno(s);
+
+		s = __STD(socket(protocol_family, socket_type, protocol));
+	}
+
+	return s;
+}
+
+COBALT_IMPL(int, close, (int fd))
+{
+	int oldtype;
+	int ret;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_close, fd);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(close(fd));
+}
+
+static int do_ioctl(int fd, unsigned int request, void *arg)
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_ioctl,	fd, request, arg);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+COBALT_IMPL(int, fcntl, (int fd, int cmd, ...))
+{
+	va_list ap;
+	long arg;
+	int ret;
+
+	va_start(ap, cmd);
+	arg = va_arg(ap, long);
+	va_end(ap);
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_fcntl, fd, cmd, arg);
+
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(fcntl(fd, cmd, arg));
+}
+
+COBALT_IMPL(int, ioctl, (int fd, unsigned int request, ...))
+{
+	va_list ap;
+	void *arg;
+	int ret;
+
+	va_start(ap, request);
+	arg = va_arg(ap, void *);
+	va_end(ap);
+
+	ret = do_ioctl(fd, request, arg);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(ioctl(fd, request, arg));
+}
+
+COBALT_IMPL(ssize_t, read, (int fd, void *buf, size_t nbyte))
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_read, fd, buf, nbyte);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(read(fd, buf, nbyte));
+}
+
+COBALT_IMPL(ssize_t, write, (int fd, const void *buf, size_t nbyte))
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_write,	fd, buf, nbyte);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(write(fd, buf, nbyte));
+}
+
+static ssize_t do_recvmsg(int fd, struct msghdr *msg, int flags)
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_recvmsg, fd, msg, flags);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+COBALT_IMPL(ssize_t, recvmsg, (int fd, struct msghdr *msg, int flags))
+{
+	int ret;
+
+	ret = do_recvmsg(fd, msg, flags);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(recvmsg(fd, msg, flags));
+}
+
+COBALT_IMPL(int, recvmmsg, (int fd, struct mmsghdr *msgvec, unsigned int vlen,
+			    unsigned int flags, struct timespec *timeout))
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+#ifdef __USE_TIME_BITS64
+	ret = XENOMAI_SYSCALL5(sc_cobalt_recvmmsg64, fd, msgvec,
+				vlen, flags, timeout);
+#else
+	ret = XENOMAI_SYSCALL5(sc_cobalt_recvmmsg, fd, msgvec, vlen, flags, timeout);
+#endif
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(recvmmsg(fd, msgvec, vlen, flags, timeout));
+}
+
+static ssize_t do_sendmsg(int fd, const struct msghdr *msg, int flags)
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_sendmsg, fd, msg, flags);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+COBALT_IMPL(ssize_t, sendmsg, (int fd, const struct msghdr *msg, int flags))
+{
+	int ret;
+
+	ret = do_sendmsg(fd, msg, flags);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(sendmsg(fd, msg, flags));
+}
+
+COBALT_IMPL(int, sendmmsg, (int fd, struct mmsghdr *msgvec,
+			    unsigned int vlen, unsigned int flags))
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL4(sc_cobalt_sendmmsg, fd, msgvec, vlen, flags);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(sendmmsg(fd, msgvec, vlen, flags));
+}
+
+COBALT_IMPL(ssize_t, recvfrom, (int fd, void *buf, size_t len, int flags,
+				struct sockaddr *from, socklen_t *fromlen))
+{
+	struct iovec iov = {
+		.iov_base = buf,
+		.iov_len = len,
+	};
+	struct msghdr msg = {
+		.msg_name = from,
+		.msg_namelen = from != NULL ? *fromlen : 0,
+		.msg_iov = &iov,
+		.msg_iovlen = 1,
+		.msg_control = NULL,
+		.msg_controllen = 0,
+	};
+	int ret;
+
+	ret = do_recvmsg(fd, &msg, flags);
+	if (ret != -EADV && ret != -ENOSYS) {
+		if (ret < 0)
+			return set_errno(ret);
+
+		if (from)
+			*fromlen = msg.msg_namelen;
+
+		return ret;
+	}
+
+	return __STD(recvfrom(fd, buf, len, flags, from, fromlen));
+}
+
+COBALT_IMPL(ssize_t, sendto, (int fd, const void *buf, size_t len, int flags,
+			      const struct sockaddr *to, socklen_t tolen))
+{
+	struct iovec iov = {
+		.iov_base = (void *)buf,
+		.iov_len = len,
+	};
+	struct msghdr msg = {
+		.msg_name = (struct sockaddr *)to,
+		.msg_namelen = tolen,
+		.msg_iov = &iov,
+		.msg_iovlen = 1,
+		.msg_control = NULL,
+		.msg_controllen = 0,
+	};
+	int ret;
+
+	ret = do_sendmsg(fd, &msg, flags);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(sendto(fd, buf, len, flags, to, tolen));
+}
+
+COBALT_IMPL(ssize_t, recv, (int fd, void *buf, size_t len, int flags))
+{
+	struct iovec iov = {
+		.iov_base = (void *)buf,
+		.iov_len = len,
+	};
+	struct msghdr msg = {
+		.msg_name = NULL,
+		.msg_namelen = 0,
+		.msg_iov = &iov,
+		.msg_iovlen = 1,
+		.msg_control = NULL,
+		.msg_controllen = 0,
+	};
+	int ret;
+
+	ret = do_recvmsg(fd, &msg, flags);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(recv(fd, buf, len, flags));
+}
+
+COBALT_IMPL(ssize_t, send, (int fd, const void *buf, size_t len, int flags))
+{
+	struct iovec iov = {
+		.iov_base = (void *)buf,
+		.iov_len = len,
+	};
+	struct msghdr msg = {
+		.msg_name = NULL,
+		.msg_namelen = 0,
+		.msg_iov = &iov,
+		.msg_iovlen = 1,
+		.msg_control = NULL,
+		.msg_controllen = 0,
+	};
+	int ret;
+
+	ret = do_sendmsg(fd, &msg, flags);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(send(fd, buf, len, flags));
+}
+
+COBALT_IMPL(int, getsockopt, (int fd, int level, int optname, void *optval,
+			      socklen_t *optlen))
+{
+	struct _rtdm_getsockopt_args args = { level, optname, optval, optlen };
+	int ret;
+
+	ret = do_ioctl(fd, _RTIOC_GETSOCKOPT, &args);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(getsockopt(fd, level, optname, optval, optlen));
+}
+
+COBALT_IMPL(int, setsockopt, (int fd, int level, int optname, const void *optval,
+			      socklen_t optlen))
+{
+	struct _rtdm_setsockopt_args args = {
+		level, optname, (void *)optval, optlen
+	};
+	int ret;
+
+	ret = do_ioctl(fd, _RTIOC_SETSOCKOPT, &args);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(setsockopt(fd, level, optname, optval, optlen));
+}
+
+COBALT_IMPL(int, bind, (int fd, const struct sockaddr *my_addr, socklen_t addrlen))
+{
+	struct _rtdm_setsockaddr_args args = { my_addr, addrlen };
+	int ret;
+
+	ret = do_ioctl(fd, _RTIOC_BIND, &args);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(bind(fd, my_addr, addrlen));
+}
+
+COBALT_IMPL(int, connect, (int fd, const struct sockaddr *serv_addr, socklen_t addrlen))
+{
+	struct _rtdm_setsockaddr_args args = { serv_addr, addrlen };
+	int ret;
+
+	ret = do_ioctl(fd, _RTIOC_CONNECT, &args);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(connect(fd, serv_addr, addrlen));
+}
+
+COBALT_IMPL(int, listen, (int fd, int backlog))
+{
+	int ret;
+
+	ret = do_ioctl(fd, _RTIOC_LISTEN, (void *)(long)backlog);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(listen(fd, backlog));
+}
+
+COBALT_IMPL(int, accept, (int fd, struct sockaddr *addr, socklen_t *addrlen))
+{
+	struct _rtdm_getsockaddr_args args = { addr, addrlen };
+	int ret;
+
+	ret = do_ioctl(fd, _RTIOC_ACCEPT, &args);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(accept(fd, addr, addrlen));
+}
+
+COBALT_IMPL(int, getsockname, (int fd, struct sockaddr *name, socklen_t *namelen))
+{
+	struct _rtdm_getsockaddr_args args = { name, namelen };
+	int ret;
+
+	ret = do_ioctl(fd, _RTIOC_GETSOCKNAME, &args);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(getsockname(fd, name, namelen));
+}
+
+COBALT_IMPL(int, getpeername, (int fd, struct sockaddr *name, socklen_t *namelen))
+{
+	struct _rtdm_getsockaddr_args args = { name, namelen };
+	int ret;
+
+	ret = do_ioctl(fd, _RTIOC_GETPEERNAME, &args);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(getpeername(fd, name, namelen));
+}
+
+COBALT_IMPL(int, shutdown, (int fd, int how))
+{
+	int ret;
+
+	ret = do_ioctl(fd, _RTIOC_SHUTDOWN, (void *)(long)how);
+	if (ret != -EADV && ret != -ENOSYS)
+		return set_errno(ret);
+
+	return __STD(shutdown(fd, how));
+}
+
+COBALT_IMPL(void *, mmap64, (void *addr, size_t length, int prot, int flags,
+			     int fd, off64_t offset))
+{
+	struct _rtdm_mmap_request rma;
+	int ret;
+
+	if (fd < 0) /* We don't do anonymous mappings. */
+		goto regular;
+
+	/* RTDM ignores the address hint, and rejects MAP_FIXED. */
+	rma.length = length;
+	rma.offset = offset;
+	rma.prot = prot;
+	rma.flags = flags;
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_mmap, fd, &rma, &addr);
+	if (ret != -EADV && ret != -ENOSYS) {
+		ret = set_errno(ret);
+		if (ret)
+			return MAP_FAILED;
+
+		return addr;
+	}
+
+regular:
+#if mmap64 == mmap
+	return __STD(mmap(addr, length, prot, flags, fd, offset));
+#else
+	return __STD(mmap64(addr, length, prot, flags, fd, offset));
+#endif
+}
+
+COBALT_IMPL(void *, mmap, (void *addr, size_t length, int prot, int flags,
+			   int fd, off_t offset))
+{
+	return __COBALT(mmap64(addr, length, prot, flags, fd, offset));
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/sched.c b/kernel/xenomai-v3.2.4/lib/cobalt/sched.c
new file mode 100644
index 0000000..bdb8603
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/sched.c
@@ -0,0 +1,649 @@
+/*
+ * Copyright (C) 2005-2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <semaphore.h>
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+/**
+ * @ingroup cobalt_api
+ * @defgroup cobalt_api_scheduler Process scheduling
+ *
+ * Cobalt/POSIX process scheduling
+ *
+ * @see
+ * <a href="http://pubs.opengroup.org/onlinepubs/000095399/functions/xsh_chap02_08.html#tag_02_08_04">
+ * Specification.</a>
+ *
+ *@{
+ */
+
+/**
+ * Yield the processor.
+ *
+ * This function move the current thread at the end of its priority group.
+ *
+ * @retval 0
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sched_yield.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-primary}
+ */
+COBALT_IMPL(int, sched_yield, (void))
+{
+	if (cobalt_get_current() == XN_NO_HANDLE ||
+	    (cobalt_get_current_mode() & (XNWEAK|XNRELAX)) == (XNWEAK|XNRELAX))
+		return __STD(sched_yield());
+
+	return -XENOMAI_SYSCALL0(sc_cobalt_sched_yield);
+}
+
+/**
+ * Get minimum priority of the specified scheduling policy.
+ *
+ * This service returns the minimum priority of the scheduling policy @a
+ * policy.
+ *
+ * @param policy scheduling policy.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a policy is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sched_get_priority_min.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ *
+ * @note Fetching the minimum priority level of SCHED_FIFO, SCHED_RR
+ * or any Xenomai-specific policy never leads to a mode switch. Any
+ * other value of @a policy may switch the caller to secondary mode.
+ */
+COBALT_IMPL(int, sched_get_priority_min, (int policy))
+{
+	int ret;
+
+	switch (policy) {
+	case SCHED_FIFO:
+		return __cobalt_std_fifo_minpri;
+	case SCHED_RR:
+		return __cobalt_std_rr_minpri;
+	default:
+		ret = XENOMAI_SYSCALL1(sc_cobalt_sched_minprio, policy);
+		if (ret >= 0)
+			return ret;
+		if (ret != -EINVAL) {
+			errno = -ret;
+			return -1;
+		}
+	}
+
+	return __STD(sched_get_priority_min(policy));
+}
+
+/**
+ * Get extended minimum priority of the specified scheduling policy.
+ *
+ * This service returns the minimum priority of the scheduling policy
+ * @a policy, reflecting any Cobalt extension to the standard classes.
+ *
+ * @param policy scheduling policy.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a policy is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sched_get_priority_min.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int sched_get_priority_min_ex(int policy)
+{
+	int ret;
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_sched_minprio, policy);
+	if (ret >= 0)
+		return ret;
+	if (ret != -EINVAL) {
+		errno = -ret;
+		return -1;
+	}
+
+	return __STD(sched_get_priority_min(policy));
+}
+
+/**
+ * Get maximum priority of the specified scheduling policy.
+ *
+ * This service returns the maximum priority of the scheduling policy @a
+ * policy.
+ *
+ * @param policy scheduling policy.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a policy is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sched_get_priority_max.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ *
+ * @note Fetching the maximum priority level of SCHED_FIFO, SCHED_RR
+ * or any Xenomai-specific policy never leads to a mode switch. Any
+ * other value of @a policy may switch the caller to secondary mode.
+ */
+COBALT_IMPL(int, sched_get_priority_max, (int policy))
+{
+	int ret;
+
+	switch (policy) {
+	case SCHED_FIFO:
+		return __cobalt_std_fifo_maxpri;
+	case SCHED_RR:
+		return __cobalt_std_rr_maxpri;
+	default:
+		ret = XENOMAI_SYSCALL1(sc_cobalt_sched_maxprio, policy);
+		if (ret >= 0)
+			return ret;
+		if (ret != -EINVAL) {
+			errno = -ret;
+			return -1;
+		}
+	}
+
+	return __STD(sched_get_priority_max(policy));
+}
+
+/**
+ * Set the scheduling policy and parameters of the specified process.
+ *
+ * This service set the scheduling policy of the Cobalt process
+ * identified by @a pid to the value @a policy, and its scheduling
+ * parameters (i.e. its priority) to the value pointed to by @a param.
+ *
+ * If the current Linux thread ID is passed (see gettid(2)), this
+ * service turns the current regular POSIX thread into a Cobalt
+ * thread. If @a pid is neither the identifier of the current thread
+ * nor the identifier of an existing Cobalt thread, this service falls
+ * back to the regular sched_setscheduler() service.
+ *
+ * @param pid target process/thread;
+ *
+ * @param policy scheduling policy, one of SCHED_FIFO, SCHED_RR, or
+ * SCHED_OTHER;
+ *
+ * @param param scheduling parameters address.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a pid is invalid;
+ * - EINVAL, @a policy or @a param->sched_priority is invalid;
+ * - EAGAIN, insufficient memory available from the system heap,
+ *   increase CONFIG_XENO_OPT_SYS_HEAPSZ;
+ * - EFAULT, @a param is an invalid address;
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sched_setscheduler.html">
+ * Specification.</a>
+ *
+ * @note
+ *
+ * See sched_setscheduler_ex().
+ *
+ * @apitags{thread-unrestricted, switch-secondary, switch-primary}
+ */
+COBALT_IMPL(int, sched_setscheduler, (pid_t pid, int policy,
+				      const struct sched_param *param))
+{
+	int ret;
+
+	struct sched_param_ex param_ex = {
+		.sched_priority = param->sched_priority,
+	};
+
+	ret = sched_setscheduler_ex(pid, policy, &param_ex);
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * Set extended scheduling policy of a process
+ *
+ * This service is an extended version of the sched_setscheduler()
+ * service, which supports Cobalt-specific and/or additional
+ * scheduling policies, not available with the host Linux environment.
+ * It sets the scheduling policy of the Cobalt process/thread
+ * identified by @a pid to the value @a policy, and the scheduling
+ * parameters (e.g. its priority) to the value pointed to by @a par.
+ *
+ * If the current Linux thread ID or zero is passed (see gettid(2)),
+ * this service may turn the current regular POSIX thread into a
+ * Cobalt thread.
+ *
+ * @param pid target process/thread. If zero, the current thread is
+ * assumed.
+ *
+ * @param policy scheduling policy, one of SCHED_WEAK, SCHED_FIFO,
+ * SCHED_COBALT, SCHED_RR, SCHED_SPORADIC, SCHED_TP, SCHED_QUOTA or
+ * SCHED_NORMAL;
+ *
+ * @param param_ex address of scheduling parameters. As a special
+ * exception, a negative sched_priority value is interpreted as if
+ * SCHED_WEAK was given in @a policy, using the absolute value of this
+ * parameter as the weak priority level.
+ *
+ * When CONFIG_XENO_OPT_SCHED_WEAK is enabled, SCHED_WEAK exhibits
+ * priority levels in the [0..99] range (inclusive). Otherwise,
+ * sched_priority must be zero for the SCHED_WEAK policy.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a pid is not found;
+ * - EINVAL, @a pid is negative, @a param_ex is NULL, any of @a policy or
+ *   @a param_ex->sched_priority is invalid;
+ * - EAGAIN, insufficient memory available from the system heap,
+ *   increase CONFIG_XENO_OPT_SYS_HEAPSZ;
+ * - EFAULT, @a param_ex is an invalid address;
+ *
+ * @note
+ *
+ * See sched_setscheduler().
+ *
+ * @apitags{thread-unrestricted, switch-secondary, switch-primary}
+ */
+int sched_setscheduler_ex(pid_t pid,
+			  int policy, const struct sched_param_ex *param_ex)
+{
+	int ret, promoted, std_policy;
+	struct sched_param std_param;
+	__u32 u_winoff;
+
+	if (pid < 0 || param_ex == NULL)
+		return EINVAL;
+
+	/* See pthread_setschedparam_ex(). */
+
+	if (cobalt_eager_setsched()) {
+		std_policy = cobalt_xlate_schedparam(policy, param_ex, &std_param);
+		ret = __STD(sched_setscheduler(pid, std_policy, &std_param));
+		if (ret)
+			return errno;
+	}
+
+	ret = -XENOMAI_SYSCALL5(sc_cobalt_sched_setscheduler_ex,
+				pid, policy, param_ex,
+				&u_winoff, &promoted);
+
+	/*
+	 * If the kernel has no reference to the target thread, let glibc
+	 * handle the call.
+	 */
+	if (ret == ESRCH) {
+		std_policy = cobalt_xlate_schedparam(policy, param_ex,
+						     &std_param);
+		return __STD(sched_setscheduler(pid, std_policy, &std_param));
+	}
+
+	if (ret == 0 && promoted) {
+		cobalt_sigshadow_install_once();
+		cobalt_set_tsd(u_winoff);
+		cobalt_thread_harden();
+	}
+
+	return ret;
+}
+
+/**
+ * Get the scheduling policy of the specified process.
+ *
+ * This service retrieves the scheduling policy of the Cobalt process
+ * identified by @a pid.
+ *
+ * If @a pid does not identify an existing Cobalt thread/process, this
+ * service falls back to the regular sched_getscheduler() service.
+ *
+ * @param pid target process/thread;
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a pid is not found;
+ * - EINVAL, @a pid is negative
+ * - EFAULT, @a param_ex is an invalid address;
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sched_getscheduler.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, sched_getscheduler, (pid_t pid))
+{
+	struct sched_param_ex param_ex;
+	int ret, policy;
+
+	if (pid < 0) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_sched_getscheduler_ex,
+			       pid, &policy, &param_ex);
+	if (ret == -ESRCH)
+		return __STD(sched_getscheduler(pid));
+
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+	
+	return policy;
+}
+
+/**
+ * Get extended scheduling policy of a process
+ *
+ * This service is an extended version of the sched_getscheduler()
+ * service, which supports Cobalt-specific and/or additional
+ * scheduling policies, not available with the host Linux environment.
+ * It retrieves the scheduling policy of the Cobalt process/thread
+ * identified by @a pid, and the associated scheduling parameters
+ * (e.g. the priority).
+ *
+ * @param pid queried process/thread. If zero, the current thread is
+ * assumed.
+ *
+ * @param policy_r a pointer to a variable receiving the current
+ * scheduling policy of @a pid.
+ *
+ * @param param_ex a pointer to a structure receiving the current
+ * scheduling parameters of @a pid.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a pid is not a Cobalt thread;
+ * - EINVAL, @a pid is negative or @a param_ex is NULL;
+ * - EFAULT, @a param_ex is an invalid address;
+ *
+ * @apitags{thread-unrestricted}
+ */
+int sched_getscheduler_ex(pid_t pid, int *policy_r,
+			  struct sched_param_ex *param_ex)
+{
+	if (pid < 0 || param_ex == NULL)
+		return EINVAL;
+
+	return -XENOMAI_SYSCALL3(sc_cobalt_sched_getscheduler_ex,
+				 pid, policy_r, param_ex);
+}
+
+/**
+ * Get extended maximum priority of the specified scheduling policy.
+ *
+ * This service returns the maximum priority of the scheduling policy
+ * @a policy, reflecting any Cobalt extension to standard classes.
+ *
+ * @param policy scheduling policy.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a policy is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sched_get_priority_max.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int sched_get_priority_max_ex(int policy)
+{
+	int ret;
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_sched_maxprio, policy);
+	if (ret >= 0)
+		return ret;
+	if (ret != -EINVAL) {
+		errno = -ret;
+		return -1;
+	}
+
+	return __STD(sched_get_priority_max(policy));
+}
+
+/**
+ * Set CPU-specific scheduler settings for a policy
+ *
+ * A configuration is strictly local to the target @a cpu, and may
+ * differ from other processors.
+ *
+ * @param cpu processor to load the configuration of.
+ *
+ * @param policy scheduling policy to which the configuration data
+ * applies. Currently, SCHED_TP and SCHED_QUOTA are valid.
+ *
+ * @param config a pointer to the configuration data to load on @a
+ * cpu, applicable to @a policy.
+ *
+ * @par Settings applicable to SCHED_TP
+ *
+ * This call controls the temporal partitions for @a cpu, depending on
+ * the operation requested.
+ *
+ * - config.tp.op specifies the operation to perform:
+ *
+ * - @a sched_tp_install installs a new TP schedule on @a cpu, defined
+ *   by config.tp.windows[]. The global time frame is not activated
+ *   upon return from this request yet; @a sched_tp_start must be
+ *   issued to activate the temporal scheduling on @a CPU.
+ *
+ * - @a sched_tp_uninstall removes the current TP schedule from @a
+ *   cpu, releasing all the attached resources. If no TP schedule
+ *   exists on @a CPU, this request has no effect.
+ *
+ * - @a sched_tp_start enables the temporal scheduling on @a cpu,
+ * starting the global time frame. If no TP schedule exists on @a cpu,
+ * this action has no effect.
+ *
+ * - @a sched_tp_stop disables the temporal scheduling on @a cpu.  The
+ * current TP schedule is not uninstalled though, and may be
+ * re-started later by a @a sched_tp_start request.
+ * @attention As a consequence of this request, threads assigned to the
+ * un-scheduled partitions may be starved from CPU time.
+ *
+ * - for a @a sched_tp_install operation, config.tp.nr_windows
+ * indicates the number of elements present in the config.tp.windows[]
+ * array. If config.tp.nr_windows is zero, the action taken is
+ * identical to @a sched_tp_uninstall.
+ *
+ * - if config.tp.nr_windows is non-zero, config.tp.windows[] is a set
+ * scheduling time slots for threads assigned to @a cpu. Each window
+ * is specified by its offset from the start of the global time frame
+ * (windows[].offset), its duration (windows[].duration), and the
+ * partition id it should activate during such period of time
+ * (windows[].ptid). This field is not considered for other requests
+ * than @a sched_tp_install.
+ *
+ * Time slots must be strictly contiguous, i.e. windows[n].offset +
+ * windows[n].duration shall equal windows[n + 1].offset.  If
+ * windows[].ptid is in the range
+ * [0..CONFIG_XENO_OPT_SCHED_TP_NRPART-1], SCHED_TP threads which
+ * belong to the partition being referred to may be given CPU time on
+ * @a cpu, from time windows[].offset to windows[].offset +
+ * windows[].duration, provided those threads are in a runnable state.
+ *
+ * Time holes between valid time slots may be defined using windows
+ * activating the pseudo partition -1. When such window is active in
+ * the global time frame, no CPU time is available to SCHED_TP threads
+ * on @a cpu.
+ *
+ * @note The sched_tp_confsz(nr_windows) macro returns the length of
+ * config.tp depending on the number of time slots to be defined in
+ * config.tp.windows[], as specified by config.tp.nr_windows.
+ *
+ * @par Settings applicable to SCHED_QUOTA
+ *
+ * This call manages thread groups running on @a cpu, defining
+ * per-group quota for limiting their CPU consumption.
+ *
+ * - config.quota.op should define the operation to be carried
+ * out. Valid operations are:
+ *
+ *    - sched_quota_add for creating a new thread group on @a cpu.
+ *      The new group identifier will be written back to info.tgid
+ *      upon success. A new group is given no initial runtime budget
+ *      when created. sched_quota_set should be issued to enable it.
+ *
+ *    - sched_quota_remove for deleting a thread group on @a cpu. The
+ *      group identifier should be passed in config.quota.remove.tgid.
+ *
+ *    - sched_quota_set for updating the scheduling parameters of a
+ *      thread group defined on @a cpu. The group identifier should be
+ *      passed in config.quota.set.tgid, along with the allotted
+ *      percentage of the quota interval (config.quota.set.quota), and
+ *      the peak percentage allowed (config.quota.set.quota_peak).
+ *
+ * All three operations fill in the @a config.info structure with the
+ * information reflecting the state of the scheduler on @a cpu with
+ * respect to @a policy, after the requested changes have been
+ * applied.
+ *
+ * @param len overall length of the configuration data (in bytes).
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ *
+ * - EINVAL, @a cpu is invalid, or @a policy is unsupported by the
+ * current kernel configuration, @a len is invalid, or @a config
+ * contains invalid parameters.
+ *
+ * - ENOMEM, lack of memory to perform the operation.
+ *
+ * - EBUSY, with @a policy equal to SCHED_QUOTA, if an attempt is made
+ *   to remove a thread group which still manages threads.
+ *
+ * - ESRCH, with @a policy equal to SCHED_QUOTA, if the group
+ *   identifier required to perform the operation is not valid.
+ *
+ * @apitags{thread-unrestricted, switch-primary}
+ */
+int sched_setconfig_np(int cpu, int policy,
+		       const union sched_config *config, size_t len)
+{
+	return -XENOMAI_SYSCALL4(sc_cobalt_sched_setconfig_np,
+				 cpu, policy, config, len);
+}
+
+/**
+ * Retrieve CPU-specific scheduler settings for a policy
+ *
+ * A configuration is strictly local to the target @a cpu, and may
+ * differ from other processors.
+ *
+ * @param cpu processor to retrieve the configuration of.
+ *
+ * @param policy scheduling policy to which the configuration data
+ * applies. Currently, only SCHED_TP and SCHED_QUOTA are valid input.
+ *
+ * @param config a pointer to a memory area which receives the
+ * configuration settings upon success of this call.
+ *
+ * @par SCHED_TP specifics
+ *
+ * On successful return, config->quota.tp contains the TP schedule
+ * active on @a cpu.
+ *
+ * @par SCHED_QUOTA specifics
+ *
+ * On entry, config->quota.get.tgid must contain the thread group
+ * identifier to inquire about.
+ *
+ * On successful exit, config->quota.info contains the information
+ * related to the thread group referenced to by
+ * config->quota.get.tgid.
+ *
+ * @param[in, out] len_r a pointer to a variable for collecting the
+ * overall length of the configuration data returned (in bytes). This
+ * variable must contain the amount of space available in @a config
+ * when the request is issued.
+ *
+ * @return the number of bytes copied to @a config on success;
+ * @return a negative error number if:
+ *
+ * - EINVAL, @a cpu is invalid, or @a policy is unsupported by the
+ * current kernel configuration, or @a len cannot hold the retrieved
+ * configuration data.
+ *
+ * - ESRCH, with @a policy equal to SCHED_QUOTA, if the group
+ *   identifier required to perform the operation is not valid
+ *   (i.e. config->quota.get.tgid is invalid).
+ *
+ * - ENOMEM, lack of memory to perform the operation.
+ *
+ * - ENOSPC, @a len is too short.
+ *
+ * @apitags{thread-unrestricted, switch-primary}
+ */
+ssize_t sched_getconfig_np(int cpu, int policy,
+			   union sched_config *config, size_t *len_r)
+{
+	ssize_t ret;
+
+	ret = XENOMAI_SYSCALL4(sc_cobalt_sched_getconfig_np,
+			       cpu, policy, config, *len_r);
+	if (ret < 0)
+		return -ret;
+
+	*len_r = ret;
+
+	return 0;
+}
+
+/** @} */
+
+int __cobalt_std_fifo_minpri,
+    __cobalt_std_fifo_maxpri;
+
+int __cobalt_std_rr_minpri,
+    __cobalt_std_rr_maxpri;
+
+void cobalt_sched_init(void)
+{
+	/* Fill in the standard priority limit cache. */
+	__cobalt_std_fifo_minpri = __STD(sched_get_priority_min(SCHED_FIFO));
+	__cobalt_std_fifo_maxpri = __STD(sched_get_priority_max(SCHED_FIFO));
+	__cobalt_std_rr_minpri = __STD(sched_get_priority_min(SCHED_RR));
+	__cobalt_std_rr_maxpri = __STD(sched_get_priority_max(SCHED_RR));
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/select.c b/kernel/xenomai-v3.2.4/lib/cobalt/select.c
new file mode 100644
index 0000000..7727012
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/select.c
@@ -0,0 +1,48 @@
+/*
+ * Copyright (C) 2010 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <pthread.h>
+#include <sys/select.h>
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+COBALT_IMPL(int, select, (int __nfds, fd_set *__restrict __readfds,
+			  fd_set *__restrict __writefds,
+			  fd_set *__restrict __exceptfds,
+			  struct timeval *__restrict __timeout))
+{
+	int err, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	err = XENOMAI_SYSCALL5(sc_cobalt_select, __nfds,
+			       __readfds, __writefds, __exceptfds, __timeout);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (err == -EADV || err == -EPERM || err == -ENOSYS)
+		return __STD(select(__nfds, __readfds,
+				    __writefds, __exceptfds, __timeout));
+
+	if (err >= 0)
+		return err;
+
+	errno = -err;
+	return -1;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/semaphore.c b/kernel/xenomai-v3.2.4/lib/cobalt/semaphore.c
new file mode 100644
index 0000000..bff0054
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/semaphore.c
@@ -0,0 +1,654 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdlib.h>		/* For malloc & free. */
+#include <stdarg.h>
+#include <errno.h>
+#include <fcntl.h>		/* For O_CREAT. */
+#include <pthread.h>		/* For pthread_setcanceltype. */
+#include <semaphore.h>
+#include <asm/xenomai/syscall.h>
+#include <cobalt/uapi/sem.h>
+#include "internal.h"
+
+/**
+ * @ingroup cobalt_api
+ * @defgroup cobalt_api_sem Semaphores
+ *
+ * Cobalt/POSIX semaphore services
+ *
+ * Semaphores are counters for resources shared between threads. The basic
+ * operations on semaphores are: increment the counter atomically, and wait
+ * until the counter is non-null and decrement it atomically.
+ *
+ * Semaphores have a maximum value past which they cannot be incremented.  The
+ * macro @a SEM_VALUE_MAX is defined to be this maximum value.
+ *
+ *@{
+ */
+
+static inline
+struct cobalt_sem_state *sem_get_state(struct cobalt_sem_shadow *shadow)
+{
+	unsigned int pshared = shadow->state_offset < 0;
+
+	if (pshared)
+		return cobalt_umm_shared - shadow->state_offset;
+
+	return cobalt_umm_private + shadow->state_offset;
+}
+
+/**
+ * Initialize an unnamed semaphore.
+ *
+ * This service initializes the semaphore @a sm, with the value @a value.
+ *
+ * This service fails if @a sm is already initialized or is a named semaphore.
+ *
+ * @param sem the semaphore to be initialized;
+ *
+ * @param pshared if zero, means that the new semaphore may only be used by
+ * threads in the same process as the thread calling sem_init(); if non zero,
+ * means that the new semaphore may be used by any thread that has access to the
+ * memory where the semaphore is allocated.
+ *
+ * @param value the semaphore initial value.
+ *
+ * @retval 0 on success,
+ * @retval -1 with @a errno set if:
+ * - EBUSY, the semaphore @a sm was already initialized;
+ * - EAGAIN, insufficient memory available to initialize the
+ *   semaphore, increase CONFIG_XENO_OPT_SHARED_HEAPSZ for a process-shared
+ *   semaphore, or CONFIG_XENO_OPT_PRIVATE_HEAPSZ for a process-private semaphore.
+ * - EAGAIN, no registry slot available, check/raise CONFIG_XENO_OPT_REGISTRY_NRSLOTS.
+ * - EINVAL, the @a value argument exceeds @a SEM_VALUE_MAX.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_init.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, sem_init, (sem_t *sem, int pshared, unsigned int value))
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	int ret;
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_sem_init,
+			       _sem, pshared ? SEM_PSHARED : 0, value);
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	cobalt_commit_memory(sem_get_state(_sem));
+
+	return 0;
+}
+
+/**
+ * @fn int sem_destroy(sem_t *sem)
+ * @brief Destroy an unnamed semaphore
+ *
+ * This service destroys the semaphore @a sem. Threads currently
+ * blocked on @a sem are unblocked and the service they called return
+ * -1 with @a errno set to EINVAL. The semaphore is then considered
+ * invalid by all semaphore services (they all fail with @a errno set
+ * to EINVAL) except sem_init().
+ *
+ * This service fails if @a sem is a named semaphore.
+ *
+ * @param sem the semaphore to be destroyed.
+ *
+ * @retval always 0 on success.  If SEM_WARNDEL was mentioned in
+ * sem_init_np(), the semaphore is deleted as requested and a strictly
+ * positive value is returned to warn the caller if threads were
+ * pending on it, otherwise zero is returned. If SEM_NOBUSYDEL was
+ * mentioned in sem_init_np(), sem_destroy() may succeed only if no
+ * thread is waiting on the semaphore to delete, otherwise -EBUSY is
+ * returned.
+ *
+ * @retval -1 with @a errno set if:
+ * - EINVAL, the semaphore @a sem is invalid or a named semaphore;
+ * - EPERM, the semaphore @a sem is not process-shared and does not belong to the
+ *   current process.
+ * - EBUSY, a thread is currently waiting on the semaphore @a sem with
+ * SEM_NOBUSYDEL set.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_destroy.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, sem_destroy, (sem_t *sem))
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	int ret;
+
+	if (_sem->magic != COBALT_SEM_MAGIC
+	    && _sem->magic != COBALT_NAMED_SEM_MAGIC) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_sem_destroy, _sem);
+	if (ret < 0) {
+		errno = -ret;
+		return -1;
+	}
+
+	return ret;
+}
+
+/**
+ * @fn int sem_post(sem_t *sem)
+ * @brief Post a semaphore
+ *
+ * This service posts the semaphore @a sem.
+ *
+ * If no thread is currently blocked on this semaphore, its count is
+ * incremented unless "pulse" mode is enabled for it (see
+ * sem_init_np(), SEM_PULSE). If a thread is blocked on the semaphore,
+ * the thread heading the wait queue is unblocked.
+ *
+ * @param sem the semaphore to be signaled.
+ *
+ * @retval 0 on success;
+ * @retval -1 with errno set if:
+ * - EINVAL, the specified semaphore is invalid or uninitialized;
+ * - EPERM, the semaphore @a sm is not process-shared and does not belong to the
+ *   current process;
+ * - EAGAIN, the semaphore count is @a SEM_VALUE_MAX.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_post.html">
+ * Specification.</a>
+ *
+ * @apitags{unrestricted}
+ */
+COBALT_IMPL(int, sem_post, (sem_t *sem))
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	struct cobalt_sem_state *state;
+	int value, ret, old, new;
+
+	if (_sem->magic != COBALT_SEM_MAGIC
+	    && _sem->magic != COBALT_NAMED_SEM_MAGIC) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	state = sem_get_state(_sem);
+	smp_mb();
+	value = atomic_read(&state->value);
+	if (value >= 0) {
+		if (state->flags & SEM_PULSE)
+			return 0;
+		do {
+			old = value;
+			new = value + 1;
+			value = atomic_cmpxchg(&state->value, old, new);
+			if (value < 0)
+				goto do_syscall;
+		} while (value != old);
+
+		return 0;
+	}
+
+  do_syscall:
+	ret = XENOMAI_SYSCALL1(sc_cobalt_sem_post, _sem);
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * @fn int sem_trywait(sem_t *sem)
+ * @brief Attempt to decrement a semaphore
+ *
+ * This service is equivalent to sem_wait(), except that it returns
+ * immediately if the semaphore @a sem is currently depleted, and that
+ * it is not a cancellation point.
+ *
+ * @param sem the semaphore to be decremented.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, the specified semaphore is invalid or uninitialized;
+ * - EPERM, the semaphore @a sem is not process-shared and does not belong to the
+ *   current process;
+ * - EAGAIN, the specified semaphore is currently fully depleted.
+ *
+ * * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_trywait.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only}
+ */
+COBALT_IMPL(int, sem_trywait, (sem_t *sem))
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	struct cobalt_sem_state *state;
+	int value, old, new;
+
+	if (_sem->magic != COBALT_SEM_MAGIC
+	    && _sem->magic != COBALT_NAMED_SEM_MAGIC) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	state = sem_get_state(_sem);
+	smp_mb();
+	value = atomic_read(&state->value);
+	if (value > 0) {
+		do {
+			old = value;
+			new = value - 1;
+			value = atomic_cmpxchg(&state->value, old, new);
+			if (value <= 0)
+				goto eagain;
+		} while (value != old);
+
+		return 0;
+	}
+eagain:
+	errno = EAGAIN;
+
+	return -1;
+}
+
+/**
+ * @fn int sem_wait(sem_t *sem)
+ * @brief Decrement a semaphore
+ *
+ * This service decrements the semaphore @a sem if it is currently if
+ * its value is greater than 0. If the semaphore's value is currently
+ * zero, the calling thread is suspended until the semaphore is
+ * posted, or a signal is delivered to the calling thread.
+ *
+ * This service is a cancellation point for Cobalt threads (created
+ * with the pthread_create() service). When such a thread is cancelled
+ * while blocked in a call to this service, the semaphore state is
+ * left unchanged before the cancellation cleanup handlers are called.
+ *
+ * @param sem the semaphore to be decremented.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EPERM, the caller context is invalid;
+ * - EINVAL, the semaphore is invalid or uninitialized;
+ * - EPERM, the semaphore @a sem is not process-shared and does not belong to the
+ *   current process;
+ * - EINTR, the caller was interrupted by a signal while blocked in this
+ *   service.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_wait.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, sem_wait, (sem_t *sem))
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	int ret, oldtype;
+
+	ret = __RT(sem_trywait(sem));
+	if (ret != -1 || errno != EAGAIN)
+		return ret;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_sem_wait, _sem);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * @fn int sem_timedwait(sem_t *sem, const struct timespec *abs_timeout)
+ * @brief Attempt to decrement a semaphore with a time limit
+ *
+ * This service is equivalent to sem_wait(), except that the caller is only
+ * blocked until the timeout @a abs_timeout expires.
+ *
+ * @param sem the semaphore to be decremented;
+ *
+ * @param abs_timeout the timeout, expressed as an absolute value of
+ * the relevant clock for the semaphore, either CLOCK_MONOTONIC if
+ * SEM_RAWCLOCK was mentioned via sem_init_np(), or CLOCK_REALTIME
+ * otherwise.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EPERM, the caller context is invalid;
+ * - EINVAL, the semaphore is invalid or uninitialized;
+ * - EINVAL, the specified timeout is invalid;
+ * - EPERM, the semaphore @a sm is not process-shared and does not belong to the
+ *   current process;
+ * - EINTR, the caller was interrupted by a signal while blocked in this
+ *   service;
+ * - ETIMEDOUT, the semaphore could not be decremented and the
+ *   specified timeout expired.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_timedwait.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+COBALT_IMPL(int, sem_timedwait, (sem_t *sem, const struct timespec *abs_timeout))
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	int ret, oldtype;
+
+	ret = __RT(sem_trywait(sem));
+	if (ret != -1 || errno != EAGAIN)
+		return ret;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+#ifdef __USE_TIME_BITS64
+	ret = XENOMAI_SYSCALL2(sc_cobalt_sem_timedwait64, _sem,
+			       abs_timeout);
+#else
+	ret = XENOMAI_SYSCALL2(sc_cobalt_sem_timedwait, _sem, abs_timeout);
+#endif
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+/**
+ * @fn int sem_getvalue(sem_t sem, int *sval_r)
+ * @brief Get the value of a semaphore.
+ *
+ * This service stores at the address @a value, the current count of
+ * the semaphore @a sem. The state of the semaphore is unchanged.
+ *
+ * If the semaphore is currently fully depleted, the value stored is
+ * zero, unless SEM_REPORT was mentioned for a non-standard semaphore
+ * (see sem_init_np()), in which case the current number of waiters is
+ * returned as the semaphore's negative value (e.g. -2 would mean the
+ * semaphore is fully depleted AND two threads are currently pending
+ * on it).
+ *
+ * @param sem a semaphore;
+ *
+ * @param sval_r address where the semaphore count will be stored on success.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, the semaphore is invalid or uninitialized;
+ * - EPERM, the semaphore @a sem is not process-shared and does not belong to the
+ *   current process.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_getvalue.html">
+ * Specification.</a>
+ *
+ * @apitags{unrestricted}
+ */
+COBALT_IMPL(int, sem_getvalue, (sem_t *sem, int *sval))
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	struct cobalt_sem_state *state;
+	int value;
+
+	if (_sem->magic != COBALT_SEM_MAGIC
+	    && _sem->magic != COBALT_NAMED_SEM_MAGIC) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	state = sem_get_state(_sem);
+	smp_mb();
+	value = atomic_read(&state->value);
+	if (value < 0 && (state->flags & SEM_REPORT) == 0)
+		value = 0;
+
+	*sval = value;
+
+	return 0;
+}
+
+/**
+ * @fn int sem_open(const char *name, int oflags, mode_t mode, unsigned int value)
+ * @brief Open a named semaphore
+ *
+ * This opens the semaphore named @a name.
+ *
+ * If no semaphore named @a name exists and @a oflags has the @a O_CREAT bit
+ * set, the semaphore is created by this function, using two more arguments:
+ * - a @a mode argument, of type @b mode_t, currently ignored;
+ * - a @a value argument, of type @b unsigned, specifying the initial value of
+ *   the created semaphore.
+ *
+ * If @a oflags has the two bits @a O_CREAT and @a O_EXCL set and the semaphore
+ * already exists, this service fails.
+ *
+ * @a name may be any arbitrary string, in which slashes have no particular
+ * meaning. However, for portability, using a name which starts with a slash and
+ * contains no other slash is recommended.
+ *
+ * If sem_open() is called from the same process several times for the
+ * same @a name, the same address is returned.
+ *
+ * @param name the name of the semaphore to be created;
+ *
+ * @param oflags flags.
+ *
+ * @return the address of the named semaphore on success;
+ * @return SEM_FAILED with @a errno set if:
+ * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters;
+ * - EEXIST, the bits @a O_CREAT and @a O_EXCL were set in @a oflags and the
+ *   named semaphore already exists;
+ * - ENOENT, the bit @a O_CREAT is not set in @a oflags and the named semaphore
+ *   does not exist;
+ * - ENOMEM, not enough memory to create the semaphore. A usual
+ *   suspect is a shortage from system heap memory, which may be
+ *   fixed by increasing CONFIG_XENO_OPT_SYS_HEAPSZ;
+ * - EINVAL, the @a value argument exceeds @a SEM_VALUE_MAX.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_open.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ */
+COBALT_IMPL(sem_t *, sem_open, (const char *name, int oflags, ...))
+{
+	union cobalt_sem_union *sem, *rsem;
+	unsigned value = 0;
+	mode_t mode = 0;
+	va_list ap;
+	int err;
+
+	if (oflags & O_CREAT) {
+		va_start(ap, oflags);
+		mode = va_arg(ap, int);
+		value = va_arg(ap, unsigned);
+		va_end(ap);
+	}
+
+	rsem = sem = malloc(sizeof(*sem));
+	if (rsem == NULL) {
+		err = -ENOMEM;
+		goto error;
+	}
+
+	err = XENOMAI_SYSCALL5(sc_cobalt_sem_open,
+			       &rsem, name, oflags, mode, value);
+	if (err == 0) {
+		if (rsem != sem)
+			free(sem);
+		return &rsem->native_sem;
+	}
+
+	free(sem);
+error:
+	errno = -err;
+
+	return SEM_FAILED;
+}
+
+/**
+ * @fn int sem_close(sem_t *sem)
+ * @brief Close a named semaphore
+ *
+ * This service closes the semaphore @a sem. The semaphore is
+ * destroyed only when unlinked with a call to the sem_unlink()
+ * service and when each call to sem_open() matches a call to this
+ * service.
+ *
+ * When a semaphore is destroyed, the memory it used is returned to the system
+ * heap, so that further references to this semaphore are not guaranteed to
+ * fail, as is the case for unnamed semaphores.
+ *
+ * This service fails if @a sem is an unnamed semaphore.
+ *
+ * @param sem the semaphore to be closed.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, the semaphore @a sem is invalid or is an unnamed semaphore.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_close.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ */
+COBALT_IMPL(int, sem_close, (sem_t *sem))
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	int ret;
+
+	if (_sem->magic != COBALT_NAMED_SEM_MAGIC) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_sem_close, _sem);
+	if (ret < 0) {
+		errno = -ret;
+		return -1;
+	}
+	if (ret)
+		free(sem);
+
+	return 0;
+}
+
+/**
+ * @fn int sem_unlink(const char *name)
+ * @brief Unlink a named semaphore
+ *
+ * This service unlinks the semaphore named @a name. This semaphore is not
+ * destroyed until all references obtained with sem_open() are closed by calling
+ * sem_close(). However, the unlinked semaphore may no longer be reached with
+ * the sem_open() service.
+ *
+ * When a semaphore is destroyed, the memory it used is returned to the system
+ * heap, so that further references to this semaphore are not guaranteed to
+ * fail, as is the case for unnamed semaphores.
+ *
+ * @param name the name of the semaphore to be unlinked.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - ENAMETOOLONG, the length of the @a name argument exceeds 64 characters;
+ * - ENOENT, the named semaphore does not exist.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sem_unlink.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ */
+COBALT_IMPL(int, sem_unlink, (const char *name))
+{
+	int ret;
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_sem_unlink, name);
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+int sem_init_np(sem_t *sem, int flags, unsigned int value)
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	int ret;
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_sem_init, _sem, flags, value);
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+int sem_broadcast_np(sem_t *sem)
+{
+	struct cobalt_sem_shadow *_sem = &((union cobalt_sem_union *)sem)->shadow_sem;
+	struct cobalt_sem_state *state;
+	int value, ret;
+
+	if (_sem->magic != COBALT_SEM_MAGIC
+	    && _sem->magic != COBALT_NAMED_SEM_MAGIC) {
+		errno = EINVAL;
+		return -1;
+	}
+
+	state = sem_get_state(_sem);
+	smp_mb();
+	value = atomic_read(&state->value);
+	if (value >= 0)
+		return 0;
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_sem_broadcast_np, _sem);
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/signal.c b/kernel/xenomai-v3.2.4/lib/cobalt/signal.c
new file mode 100644
index 0000000..40d315e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/signal.c
@@ -0,0 +1,128 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdlib.h>
+#include <stdarg.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <signal.h>
+#include <cobalt/uapi/signal.h>
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+COBALT_IMPL(int, sigwait, (const sigset_t *set, int *sig))
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_sigwait, set, sig);
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+COBALT_IMPL(int, sigwaitinfo, (const sigset_t *set, siginfo_t *si))
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+	ret = XENOMAI_SYSCALL2(sc_cobalt_sigwaitinfo, set, si);
+	if (ret < 0) {
+		errno = -ret;
+		ret = -1;
+	}
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+COBALT_IMPL(int, sigtimedwait, (const sigset_t *set, siginfo_t *si,
+				const struct timespec *timeout))
+{
+	int ret, oldtype;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+
+#ifdef __USE_TIME_BITS64
+	ret = XENOMAI_SYSCALL3(sc_cobalt_sigtimedwait64, set, si, timeout);
+#else
+	ret = XENOMAI_SYSCALL3(sc_cobalt_sigtimedwait, set, si, timeout);
+#endif
+	if (ret < 0) {
+		errno = -ret;
+		ret = -1;
+	}
+
+	pthread_setcanceltype(oldtype, NULL);
+
+	return ret;
+}
+
+COBALT_IMPL(int, sigpending, (sigset_t *set))
+{
+	int ret;
+
+	ret = XENOMAI_SYSCALL1(sc_cobalt_sigpending, set);
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+COBALT_IMPL(int, kill, (pid_t pid, int sig))
+{
+	int ret;
+
+	/*
+	 * Delegate processing of special pids to the regular
+	 * kernel. We only deal with thread-directed signals.
+	 */
+	if (pid <= 0)
+		return __STD(kill(pid, sig));
+
+	ret = XENOMAI_SYSCALL2(sc_cobalt_kill, pid, sig);
+	if (ret) {
+		/* Retry with regular kill is no RT target was found. */
+		if (ret == -ESRCH)
+			return __STD(kill(pid, sig));
+
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
+
+COBALT_IMPL(int, sigqueue, (pid_t pid, int sig, const union sigval value))
+{
+	int ret;
+
+	ret = XENOMAI_SYSCALL3(sc_cobalt_sigqueue, pid, sig, &value);
+	if (ret) {
+		errno = -ret;
+		return -1;
+	}
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/sigshadow.c b/kernel/xenomai-v3.2.4/lib/cobalt/sigshadow.c
new file mode 100644
index 0000000..68d3e62
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/sigshadow.c
@@ -0,0 +1,133 @@
+/*
+ * Copyright (C) 2007 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <pthread.h>
+#include <signal.h>
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+#if !HAVE_BACKTRACE
+static inline int backtrace(void **buffer, int size)
+{
+	/* Not all *libcs support backtrace(). */
+	return 0;
+}
+#else
+#include <execinfo.h>
+#endif
+
+static struct sigaction sigshadow_action_orig;
+
+/*
+ * The following handler is part of the inner user-interface: should
+ * remain extern.
+ */
+int cobalt_sigshadow_handler(int sig, siginfo_t *si, void *ctxt)
+{
+	void *frames[SIGSHADOW_BACKTRACE_DEPTH];
+	int action, arg, nr, skip;
+
+	if (si->si_code != SI_QUEUE)
+		return 0;
+
+	action = sigshadow_action(si->si_int);
+
+	switch (action) {
+	case SIGSHADOW_ACTION_HARDEN:
+		XENOMAI_SYSCALL1(sc_cobalt_migrate, COBALT_PRIMARY);
+		break;
+	case SIGSHADOW_ACTION_BACKTRACE:
+		arg = sigshadow_arg(si->si_int);
+		nr = backtrace(frames, sizeof(frames) / sizeof(frames[0]));
+		/* Skip the sighandler context. */
+		skip = nr > 3 ? 3 : 0;
+		XENOMAI_SYSCALL3(sc_cobalt_backtrace, nr - skip, frames + skip, arg);
+		break;
+	case SIGSHADOW_ACTION_HOME:
+		/*
+		 * We have been asked to call home from the current
+		 * context: sending a query for retrieving our handle
+		 * will just do this.
+		 */
+		cobalt_get_current_slow();
+		break;
+	default:
+		return 0;
+	}
+
+	return 1;
+}
+
+static void sigshadow_handler(int sig, siginfo_t *si, void *ctxt)
+{
+	const struct sigaction *const sa = &sigshadow_action_orig;
+	sigset_t saved_sigset;
+
+	if (cobalt_sigshadow_handler(sig, si, ctxt))
+		return;
+
+	/* Not a signal sent by the Cobalt core */
+	if (((sa->sa_flags & SA_SIGINFO) == 0 && sa->sa_handler == NULL)
+	    || ((sa->sa_flags & SA_SIGINFO) && sa->sa_sigaction == NULL))
+		return;
+
+	pthread_sigmask(SIG_SETMASK, &sa->sa_mask, &saved_sigset);
+
+	if (!(sa->sa_flags & SA_SIGINFO))
+		sa->sa_handler(sig);
+	else
+		sa->sa_sigaction(sig, si, ctxt);
+
+	pthread_sigmask(SIG_SETMASK, &saved_sigset, NULL);
+}
+
+static void install_sigshadow(void)
+{
+	void *dummy[SIGSHADOW_BACKTRACE_DEPTH];
+	struct sigaction new_sigshadow_action;
+	sigset_t saved_sigset;
+	sigset_t mask_sigset;
+
+	/*
+	 * Kickstart backtrace() so that it may call malloc() from a
+	 * safe context right now, not later on from the sigshadow
+	 * handler.
+	 */
+	backtrace(dummy, SIGSHADOW_BACKTRACE_DEPTH);
+
+	sigemptyset(&mask_sigset);
+	sigaddset(&mask_sigset, SIGSHADOW);
+
+	new_sigshadow_action.sa_flags = SA_SIGINFO | SA_RESTART;
+	new_sigshadow_action.sa_sigaction = sigshadow_handler;
+	sigemptyset(&new_sigshadow_action.sa_mask);
+	pthread_sigmask(SIG_BLOCK, &mask_sigset, &saved_sigset);
+
+	sigaction(SIGSHADOW,
+		  &new_sigshadow_action, &sigshadow_action_orig);
+
+	if ((sigshadow_action_orig.sa_flags & SA_NODEFER) == 0)
+		sigaddset(&sigshadow_action_orig.sa_mask, SIGSHADOW);
+
+	pthread_sigmask(SIG_SETMASK, &saved_sigset, NULL);
+}
+
+void cobalt_sigshadow_install_once(void)
+{
+	static pthread_once_t once = PTHREAD_ONCE_INIT;
+	pthread_once(&once, install_sigshadow);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/thread.c b/kernel/xenomai-v3.2.4/lib/cobalt/thread.c
new file mode 100644
index 0000000..bb23cdc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/thread.c
@@ -0,0 +1,819 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stddef.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sys/types.h>
+#include <semaphore.h>
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+/**
+ * @ingroup cobalt_api
+ * @defgroup cobalt_api_thread Thread management
+ *
+ * Cobalt (POSIX) thread management services
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/xsh_chap02_09.html#tag_02_09">
+ * Specification.</a>
+ *
+ *@{
+ */
+
+static pthread_attr_ex_t default_attr_ex;
+
+static int linuxthreads;
+
+struct pthread_iargs {
+	struct sched_param_ex param_ex;
+	int policy;
+	int personality;
+	void *(*start)(void *);
+	void *arg;
+	int parent_prio;
+	sem_t sync;
+	int ret;
+};
+
+static void *cobalt_thread_trampoline(void *p)
+{
+	/*
+	 * Volatile is to prevent (too) smart gcc releases from
+	 * trashing the syscall registers (see later comment).
+	 */
+	int personality, parent_prio, policy, std_policy;
+	volatile pthread_t ptid = pthread_self();
+	void *(*start)(void *), *arg, *retval;
+	struct pthread_iargs *iargs = p;
+	struct sched_param_ex param_ex;
+	struct sched_param std_param;
+	__u32 u_winoff;
+	long ret;
+
+	cobalt_sigshadow_install_once();
+
+	personality = iargs->personality;
+	param_ex = iargs->param_ex;
+	policy = iargs->policy;
+	parent_prio = iargs->parent_prio;
+	start = iargs->start;
+	arg = iargs->arg;
+
+	/*
+	 * We don't have any Xenomai extension yet, set our base
+	 * scheduling parameters for the host kernel first.
+	 */
+	std_policy = cobalt_xlate_schedparam(policy, &param_ex, &std_param);
+	ret = __STD(pthread_setschedparam(ptid, std_policy, &std_param));
+	if (ret)
+		goto sync_with_creator;
+
+	/*
+	 * Do _not_ inline the call to pthread_self() in the syscall
+	 * macro: this trashes the syscall regs on some archs.
+	 */
+	ret = -XENOMAI_SYSCALL5(sc_cobalt_thread_create, ptid,
+				policy, &param_ex, personality, &u_winoff);
+	if (ret == 0)
+		cobalt_set_tsd(u_winoff);
+
+	/*
+	 * We must access anything we'll need from *iargs before
+	 * posting the sync semaphore, since our released parent could
+	 * unwind the stack space onto which the iargs struct is laid
+	 * on before we actually get the CPU back.
+	 */
+sync_with_creator:
+	iargs->ret = ret;
+	__STD(sem_post(&iargs->sync));
+	if (ret)
+		return (void *)ret;
+
+	/*
+	 * If the parent thread runs with the same priority as we do,
+	 * then we should yield the CPU to it, to preserve the
+	 * scheduling order.
+	 */
+	if (param_ex.sched_priority == parent_prio)
+		__STD(sched_yield());
+
+	cobalt_thread_harden();
+
+	retval = start(arg);
+
+	pthread_setmode_np(PTHREAD_WARNSW, 0, NULL);
+
+	return retval;
+}
+
+int pthread_create_ex(pthread_t *ptid_r,
+		      const pthread_attr_ex_t *attr_ex,
+		      void *(*start) (void *), void *arg)
+{
+	int inherit, detachstate, ret;
+	struct pthread_iargs iargs;
+	struct sched_param param;
+	struct timespec timeout;
+	pthread_attr_t attr;
+	pthread_t lptid;
+
+	if (attr_ex == NULL)
+		attr_ex = &default_attr_ex;
+
+	pthread_getschedparam_ex(pthread_self(), &iargs.policy, &iargs.param_ex);
+	iargs.parent_prio = iargs.param_ex.sched_priority;
+	memcpy(&attr, &attr_ex->std, sizeof(attr));
+
+	pthread_attr_getinheritsched(&attr, &inherit);
+	if (inherit == PTHREAD_EXPLICIT_SCHED) {
+		pthread_attr_getschedpolicy_ex(attr_ex, &iargs.policy);
+		pthread_attr_getschedparam_ex(attr_ex, &iargs.param_ex);
+	}
+
+	if (linuxthreads && geteuid()) {
+		/*
+		 * Work around linuxthreads shortcoming: it doesn't
+		 * believe that it could have RT power as non-root and
+		 * fails the thread creation overeagerly.
+		 */
+		pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+		param.sched_priority = 0;
+		pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
+		pthread_attr_setschedparam(&attr, &param);
+	} else
+		/*
+		 * Get the created thread to temporarily inherit the
+		 * caller priority (we mean linux/libc priority here,
+		 * as we use a libc call to create the thread).
+		 */
+		pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED);
+
+	pthread_attr_getdetachstate(&attr, &detachstate);
+	pthread_attr_getpersonality_ex(attr_ex, &iargs.personality);
+
+	/*
+	 * First start a regular POSIX thread, then mate a Cobalt
+	 * thread to it.
+	 */
+	iargs.start = start;
+	iargs.arg = arg;
+	iargs.ret = EAGAIN;
+	__STD(sem_init(&iargs.sync, 0, 0));
+
+	ret = __STD(pthread_create(&lptid, &attr, cobalt_thread_trampoline, &iargs));
+	if (ret) {
+		__STD(sem_destroy(&iargs.sync));
+		return ret;
+	}
+
+	__STD(clock_gettime(CLOCK_REALTIME, &timeout));
+	timeout.tv_sec += 5;
+	timeout.tv_nsec = 0;
+
+	for (;;) {
+		ret = __STD(sem_timedwait(&iargs.sync, &timeout));
+		if (ret && errno == EINTR)
+			continue;
+		if (ret == 0) {
+			ret = iargs.ret;
+			if (ret == 0)
+				*ptid_r = lptid;
+			break;
+		} else if (errno == ETIMEDOUT) {
+			ret = EAGAIN;
+			break;
+		}
+		ret = -errno;
+		panic("regular sem_wait() failed with %s", symerror(ret));
+	}
+
+	__STD(sem_destroy(&iargs.sync));
+
+	cobalt_thread_harden(); /* May fail if regular thread. */
+
+	return ret;
+}
+
+/**
+ * @fn int pthread_create(pthread_t *ptid_r, const pthread_attr_t *attr, void *(*start)(void *), void *arg)
+ * @brief Create a new thread
+ *
+ * This service creates a thread managed by the Cobalt core in a dual
+ * kernel configuration.
+ *
+ * Attributes of the new thread depend on the @a attr argument. If @a
+ * attr is NULL, default values for these attributes are used.
+ *
+ * Returning from the @a start routine has the same effect as calling
+ * pthread_exit() with the return value.
+ *
+ * @param ptid_r address where the identifier of the new thread will be stored on
+ * success;
+ *
+ * @param attr thread attributes;
+ *
+ * @param start thread start routine;
+ *
+ * @param arg opaque user-supplied argument passed to @a start;
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EINVAL, @a attr is invalid;
+ * - EAGAIN, insufficient memory available from the system heap to create a new
+ *   thread, increase CONFIG_XENO_OPT_SYS_HEAPSZ;
+ * - EINVAL, thread attribute @a inheritsched is set to PTHREAD_INHERIT_SCHED
+ *   and the calling thread does not belong to the Cobalt interface;
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_create.html">
+ * Specification.</a>
+ *
+ * @note
+ *
+ * When creating a Cobalt thread for the first time, libcobalt
+ * installs an internal handler for the SIGSHADOW signal. If you had
+ * previously installed a handler for such signal before that point,
+ * such handler will be exclusively called for any SIGSHADOW
+ * occurrence Xenomai did not send.
+ *
+ * If, however, an application-defined handler for SIGSHADOW is
+ * installed afterwards, overriding the libcobalt handler, the new
+ * handler is required to call cobalt_sigshadow_handler() on
+ * entry. This routine returns a non-zero value for every occurrence
+ * of SIGSHADOW issued by the Cobalt core. If zero instead, the
+ * application-defined handler should process the signal.
+ *
+ * <b>int cobalt_sigshadow_handler(int sig, siginfo_t *si, void *ctxt);</b>
+ *
+ * You should register your handler with sigaction(2), setting the
+ * SA_SIGINFO flag.
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ */
+COBALT_IMPL(int, pthread_create, (pthread_t *ptid_r,
+				  const pthread_attr_t *attr,
+				  void *(*start) (void *), void *arg))
+{
+	pthread_attr_ex_t attr_ex;
+	struct sched_param param;
+	int policy;
+
+	if (attr == NULL)
+		attr = &default_attr_ex.std;
+
+	memcpy(&attr_ex.std, attr, sizeof(*attr));
+	pthread_attr_getschedpolicy(attr, &policy);
+	attr_ex.nonstd.sched_policy = policy;
+	pthread_attr_getschedparam(attr, &param);
+	attr_ex.nonstd.sched_param.sched_priority = param.sched_priority;
+	attr_ex.nonstd.personality = 0; /* Default: use Cobalt. */
+
+	return pthread_create_ex(ptid_r, &attr_ex, start, arg);
+}
+
+/**
+ * Set the mode of the current thread.
+ *
+ * This service sets the mode of the calling thread, which affects its
+ * behavior under particular circumstances. @a clrmask and @a setmask
+ * are two masks of mode bits which are respectively cleared and set
+ * by pthread_setmode_np():
+ *
+ * - PTHREAD_LOCK_SCHED, when set, locks the scheduler, which prevents
+ *   the current thread from being switched out until the scheduler is
+ *   unlocked. Unless PTHREAD_DISABLE_LOCKBREAK is also set, the
+ *   thread may still block, dropping the lock temporarily, in which
+ *   case, the lock will be reacquired automatically when the thread
+ *   resumes execution. When PTHREAD_LOCK_SCHED is cleared, the
+ *   current thread drops the scheduler lock, and the rescheduling
+ *   procedure is initiated.
+ *
+ * - When set, PTHREAD_WARNSW enables debugging notifications for the
+ *   current thread.  A SIGDEBUG (Linux-originated) signal is sent when
+ *   the following atypical or abnormal behavior is detected:
+ *
+ *   - the current thread switches to secondary mode. Such
+ *     notification comes in handy for detecting spurious relaxes,
+ *     with one of the following reason codes:
+ *
+ *     - SIGDEBUG_MIGRATE_SYSCALL, if the thread issued a regular
+ *       Linux system call.
+ *
+ *     - SIGDEBUG_MIGRATE_SIGNAL, if the thread had to leave real-time
+ *       mode for handling a Linux signal.
+ *
+ *     - SIGDEBUG_MIGRATE_FAULT, if the thread had to leave real-time
+ *       mode for handling a processor fault/exception.
+ *
+ *   - the current thread is sleeping on a Cobalt mutex currently
+ *     owned by a thread running in secondary mode, which reveals a
+ *     priority inversion. In such an event, the reason code passed to
+ *     the signal handler will be SIGDEBUG_MIGRATE_PRIOINV.
+ *
+ *   - the current thread is about to sleep while holding a Cobalt
+ *     mutex, and CONFIG_XENO_OPT_DEBUG_MUTEX_SLEEP is enabled in the
+ *     kernel configuration.  In such an event, the reason code passed
+ *     to the signal handler will be SIGDEBUG_MUTEX_SLEEP.  Blocking
+ *     for acquiring a mutex does not trigger such signal though.
+ *
+ *   - the current thread has enabled PTHREAD_DISABLE_LOCKBREAK and
+ *     PTHREAD_LOCK_SCHED, then attempts to block on a Cobalt service,
+ *     which would cause a lock break. In such an event, the reason
+ *     code passed to the signal handler will be SIGDEBUG_LOCK_BREAK.
+ *
+ * - PTHREAD_DISABLE_LOCKBREAK disallows breaking the scheduler
+ *   lock. Normally, the scheduler lock is dropped implicitly when the
+ *   current owner blocks, then reacquired automatically when the
+ *   owner resumes execution. If PTHREAD_DISABLE_LOCKBREAK is set, the
+ *   scheduler lock owner would return with EINTR immediately from any
+ *   blocking call instead (see PTHREAD_WARNSW notifications).
+ *
+ * - PTHREAD_CONFORMING can be passed in @a setmask to switch the
+ *   current Cobalt thread to its preferred runtime mode. The only
+ *   meaningful use of this switch is to force a real-time thread back
+ *   to primary mode eagerly. Other usages have no effect.
+ *
+ * This service is a non-portable extension of the Cobalt interface.
+ *
+ * @param clrmask set of bits to be cleared.
+ *
+ * @param setmask set of bits to be set.
+ *
+ * @param mode_r If non-NULL, @a mode_r must be a pointer to a memory
+ * location which will be written upon success with the previous set
+ * of active mode bits. If NULL, the previous set of active mode bits
+ * will not be returned.
+ *
+ * @return 0 on success, otherwise:
+ *
+ * - EINVAL, some bit in @a clrmask or @a setmask is invalid.
+ *
+ * @note Setting @a clrmask and @a setmask to zero leads to a nop,
+ * only returning the previous mode if @a mode_r is a valid address.
+ *
+ * @attention Issuing PTHREAD_CONFORMING is most likely useless or even
+ * introduces pure overhead in regular applications, since the Cobalt
+ * core performs the necessary mode switches, only when required.
+ *
+ * @apitags{xthread-only, switch-primary}
+ */
+int pthread_setmode_np(int clrmask, int setmask, int *mode_r)
+{
+	return -XENOMAI_SYSCALL3(sc_cobalt_thread_setmode,
+				 clrmask, setmask, mode_r);
+}
+
+/**
+ * Set a thread name.
+ *
+ * This service set to @a name, the name of @a thread. This name is used for
+ * displaying information in /proc/xenomai/sched.
+ *
+ * This service is a non-portable extension of the Cobalt interface.
+ *
+ * @param thread target thread;
+ *
+ * @param name name of the thread.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a thread is invalid.
+ *
+ * @apitags{xthread-only}
+ */
+COBALT_IMPL(int, pthread_setname_np, (pthread_t thread, const char *name))
+{
+	return -XENOMAI_SYSCALL2(sc_cobalt_thread_setname, thread, name);
+}
+
+/**
+ * Send a signal to a thread.
+ *
+ * This service send the signal @a sig to the Cobalt thread @a thread
+ * (created with pthread_create()). If @a sig is zero, this service
+ * check for existence of the thread @a thread, but no signal is sent.
+ *
+ * @param thread thread identifier;
+ *
+ * @param sig signal number.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - EINVAL, @a sig is an invalid signal number;
+ * - EAGAIN, the maximum number of pending signals has been exceeded;
+ * - ESRCH, @a thread is an invalid thread identifier.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_kill.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-primary}
+ */
+COBALT_IMPL(int, pthread_kill, (pthread_t thread, int sig))
+{
+	int ret;
+
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_thread_kill, thread, sig);
+	if (ret == ESRCH)
+		return __STD(pthread_kill(thread, sig));
+
+	return ret;
+}
+
+/**
+ * Wait for termination of a specified thread.
+ *
+ * If @a thread is running and joinable, this service blocks the
+ * caller until @a thread terminates or detaches.  When @a thread
+ * terminates, the caller is unblocked and its return value is stored
+ * at the address @a value_ptr.
+ *
+ * On the other hand, if @a thread has already finished execution, its
+ * return value collected earlier is stored at the address @a
+ * value_ptr and this service returns immediately.
+ *
+ * This service is a cancelation point for Cobalt threads: if the
+ * calling thread is canceled while blocked in a call to this service,
+ * the cancelation request is honored and @a thread remains joinable.
+ *
+ * Multiple simultaneous calls to pthread_join() specifying the same running
+ * target thread block all the callers until the target thread terminates.
+ *
+ * @param thread identifier of the thread to wait for;
+ *
+ * @param retval address where the target thread return value will be stored
+ * on success.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a thread is invalid;
+ * - EDEADLK, attempting to join the calling thread;
+ * - EINVAL, @a thread is detached;
+ * - EPERM, the caller context is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_join.html">
+ * Specification.</a>
+ *
+ * @apitags{xthread-only, switch-secondary, switch-primary}
+ */
+COBALT_IMPL(int, pthread_join, (pthread_t thread, void **retval))
+{
+	int ret;
+
+	ret = __STD(pthread_join(thread, retval));
+	if (ret)
+		return ret;
+
+	ret = cobalt_thread_join(thread);
+
+	return ret == -EBUSY ? EINVAL : 0;
+}
+
+/** @} */
+
+/**
+ * @ingroup cobalt_api
+ * @defgroup cobalt_api_sched Scheduling management
+ *
+ * Cobalt scheduling management services
+ * @{
+ */
+
+/**
+ * Set the scheduling policy and parameters of the specified thread.
+ *
+ * This service set the scheduling policy of the Cobalt thread
+ * identified by @a pid to the value @a policy, and its scheduling
+ * parameters (i.e. its priority) to the value pointed to by @a param.
+ *
+ * If pthread_self() is passed, this service turns the current thread
+ * into a Cobalt thread. If @a thread is not the identifier of a
+ * Cobalt thread, this service falls back to the regular
+ * pthread_setschedparam() service.
+ *
+ * @param thread target Cobalt thread;
+ *
+ * @param policy scheduling policy, one of SCHED_FIFO, SCHED_RR, or
+ * SCHED_OTHER;
+ *
+ * @param param address of scheduling parameters.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a pid is invalid;
+ * - EINVAL, @a policy or @a param->sched_priority is invalid;
+ * - EAGAIN, insufficient memory available from the system heap,
+ *   increase CONFIG_XENO_OPT_SYS_HEAPSZ;
+ * - EFAULT, @a param is an invalid address;
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_setschedparam.html">
+ * Specification.</a>
+ *
+ * @note
+ *
+ * See pthread_create(), pthread_setschedparam_ex().
+ *
+ * @apitags{thread-unrestricted, switch-secondary, switch-primary}
+ */
+COBALT_IMPL(int, pthread_setschedparam, (pthread_t thread,
+					 int policy, const struct sched_param *param))
+{
+	/*
+	 * XXX: We currently assume that all available policies
+	 * supported by the host kernel define a single scheduling
+	 * parameter only, i.e. a priority level.
+	 */
+	struct sched_param_ex param_ex = {
+		.sched_priority = param->sched_priority,
+	};
+
+	return pthread_setschedparam_ex(thread, policy, &param_ex);
+}
+
+/**
+ * Set extended scheduling policy of thread
+ *
+ * This service is an extended version of the regular
+ * pthread_setschedparam() service, which supports Cobalt-specific
+ * scheduling policies, not available with the host Linux environment.
+ *
+ * This service set the scheduling policy of the Cobalt thread @a
+ * thread to the value @a policy, and its scheduling parameters
+ * (e.g. its priority) to the value pointed to by @a param_ex.
+ *
+ * If @a thread does not match the identifier of a Cobalt thread, this
+ * action falls back to the regular pthread_setschedparam() service.
+ *
+ * @param thread target Cobalt thread;
+ *
+ * @param policy scheduling policy, one of SCHED_WEAK, SCHED_FIFO,
+ * SCHED_COBALT, SCHED_RR, SCHED_SPORADIC, SCHED_TP, SCHED_QUOTA or
+ * SCHED_NORMAL;
+ *
+ * @param param_ex scheduling parameters address. As a special
+ * exception, a negative sched_priority value is interpreted as if
+ * SCHED_WEAK was given in @a policy, using the absolute value of this
+ * parameter as the weak priority level.
+ *
+ * When CONFIG_XENO_OPT_SCHED_WEAK is enabled, SCHED_WEAK exhibits
+ * priority levels in the [0..99] range (inclusive). Otherwise,
+ * sched_priority must be zero for the SCHED_WEAK policy.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a thread is invalid;
+ * - EINVAL, @a policy or @a param_ex->sched_priority is invalid;
+ * - EAGAIN, insufficient memory available from the system heap,
+ *   increase CONFIG_XENO_OPT_SYS_HEAPSZ;
+ * - EFAULT, @a param_ex is an invalid address;
+ * - EPERM, the calling process does not have superuser
+ *   permissions.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_setschedparam.html">
+ * Specification.</a>
+ *
+ * @note
+ *
+ * See pthread_create(), pthread_setschedparam().
+ *
+ * @apitags{thread-unrestricted, switch-secondary, switch-primary}
+ */
+int pthread_setschedparam_ex(pthread_t thread,
+			     int policy, const struct sched_param_ex *param_ex)
+{
+	int ret, promoted, std_policy;
+	struct sched_param std_param;
+	__u32 u_winoff, *u_winoff_ptr;
+
+	/*
+	 * If we enter this call over a relaxed context, take the
+	 * opportunity to tell the host kernel via the regular libc
+	 * about the new schedparams right now, then tell Xenomai
+	 * next.  Otherwise, send the request to Xenomai only, which
+	 * will propagate the change to the host kernel asap,
+	 * i.e. almost immediately if the target thread is already
+	 * relaxed, or when it relaxes otherwise.
+	 *
+	 * CAUTION: when lazy propagation has to take place
+	 * (i.e. calling pthread_setschedparam_ex() from primary
+	 * mode), glibc's cached idea of the current schedparams of
+	 * the target thread will be out of sync. This is part of the
+	 * trade-off to keep the caller running in primary mode
+	 * throughout this service.
+	 *
+	 * Application which are sensitive to this issue with Xenomai
+	 * threads should refrain from mixing APIs for managing
+	 * scheduling parameters, and only rely on libcobalt for this.
+	 */
+	if (cobalt_eager_setsched()) {
+		/* True if disabled or shadow not mapped yet. */
+		std_policy = cobalt_xlate_schedparam(policy, param_ex, &std_param);
+		ret = __STD(pthread_setschedparam(thread, std_policy, &std_param));
+		if (ret)
+			return ret;
+	}
+
+	/* only request promotion when this targets the current thread */
+	u_winoff_ptr = thread == pthread_self() ? &u_winoff : NULL;
+
+	ret = -XENOMAI_SYSCALL5(sc_cobalt_thread_setschedparam_ex,
+				thread, policy, param_ex,
+				u_winoff_ptr, &promoted);
+
+	/*
+	 * If the kernel has no reference to the target thread. let glibc
+	 * handle the call.
+	 */
+	if (ret == ESRCH) {
+		std_policy = cobalt_xlate_schedparam(policy, param_ex,
+						     &std_param);
+		return __STD(pthread_setschedparam(thread, std_policy,
+						   &std_param));
+	}
+
+	if (ret == 0 && promoted) {
+		cobalt_sigshadow_install_once();
+		cobalt_set_tsd(u_winoff);
+		cobalt_thread_harden();
+	}
+
+	return ret;
+}
+
+COBALT_IMPL(int, pthread_setschedprio, (pthread_t thread, int prio))
+{
+	__u32 u_winoff, *u_winoff_ptr;
+	int ret, promoted;
+
+	/* See pthread_setschedparam_ex. */
+	if (cobalt_eager_setsched()) {
+		ret = __STD(pthread_setschedprio(thread, prio));
+		if (ret)
+			return ret;
+	}
+
+	/* only request promotion when this targets the current thread */
+	u_winoff_ptr = thread == pthread_self() ? &u_winoff : NULL;
+
+	ret = -XENOMAI_SYSCALL4(sc_cobalt_thread_setschedprio,
+				thread, prio, u_winoff_ptr, &promoted);
+
+	/*
+	 * If the kernel has no reference to the target thread. let glibc
+	 * handle the call.
+	 */
+	if (ret == ESRCH)
+		return __STD(pthread_setschedprio(thread, prio));
+
+	if (ret == 0 && promoted) {
+		cobalt_sigshadow_install_once();
+		cobalt_set_tsd(u_winoff);
+		cobalt_thread_harden();
+	}
+
+	return ret;
+}
+
+/**
+ * Get the scheduling policy and parameters of the specified thread.
+ *
+ * This service returns, at the addresses @a policy and @a par, the
+ * current scheduling policy and scheduling parameters (i.e. priority)
+ * of the Cobalt thread @a tid. If @a thread is not the identifier of
+ * a Cobalt thread, this service fallback to the regular POSIX
+ * pthread_getschedparam() service.
+ *
+ * @param thread target thread;
+ *
+ * @param policy address where the scheduling policy of @a tid is stored on
+ * success;
+ *
+ * @param param address where the scheduling parameters of @a tid is stored on
+ * success.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a tid is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_getschedparam.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, pthread_getschedparam, (pthread_t thread,
+					 int *__restrict__ policy,
+					 struct sched_param *__restrict__ param))
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	ret = pthread_getschedparam_ex(thread, policy, &param_ex);
+	if (ret)
+		return ret;
+
+	param->sched_priority = param_ex.sched_priority;
+
+	return 0;
+}
+
+/**
+ * Get extended scheduling policy of thread
+ *
+ * This service is an extended version of the regular
+ * pthread_getschedparam() service, which also supports
+ * Cobalt-specific policies, not available with the host Linux
+ * environment.
+ *
+ * @param thread target thread;
+ *
+ * @param policy_r address where the scheduling policy of @a thread is stored on
+ * success;
+ *
+ * @param param_ex address where the scheduling parameters of @a thread are
+ * stored on success.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a thread is invalid.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/pthread_getschedparam.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+int pthread_getschedparam_ex(pthread_t thread,
+			     int *__restrict__ policy_r,
+			     struct sched_param_ex *__restrict__ param_ex)
+{
+	struct sched_param short_param;
+	int ret;
+
+	ret = -XENOMAI_SYSCALL3(sc_cobalt_thread_getschedparam_ex,
+				thread, policy_r, param_ex);
+	if (ret == ESRCH) {
+		ret = __STD(pthread_getschedparam(thread, policy_r, &short_param));
+		if (ret == 0)
+			param_ex->sched_priority = short_param.sched_priority;
+	}
+
+	return ret;
+}
+
+/**
+ * Yield the processor.
+ *
+ * This function move the current thread at the end of its priority group.
+ *
+ * @retval 0
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/sched_yield.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted, switch-primary}
+ */
+COBALT_IMPL(int, pthread_yield, (void))
+{
+	return __WRAP(sched_yield());
+}
+
+/** @} */
+
+void cobalt_thread_init(void)
+{
+#ifdef _CS_GNU_LIBPTHREAD_VERSION
+	char vers[128];
+	linuxthreads =
+		!confstr(_CS_GNU_LIBPTHREAD_VERSION, vers, sizeof(vers))
+		|| strstr(vers, "linuxthreads");
+#else /* !_CS_GNU_LIBPTHREAD_VERSION */
+	linuxthreads = 1;
+#endif /* !_CS_GNU_LIBPTHREAD_VERSION */
+	pthread_attr_init_ex(&default_attr_ex);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/ticks.c b/kernel/xenomai-v3.2.4/lib/cobalt/ticks.c
new file mode 100644
index 0000000..94e0c1b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/ticks.c
@@ -0,0 +1,134 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <cobalt/arith.h>
+#include <cobalt/ticks.h>
+#include <asm/xenomai/tsc.h>
+#include <asm/xenomai/time.h>
+#include "internal.h"
+
+unsigned long long __cobalt_tsc_clockfreq;
+
+/*
+ * If we have no fast path via the vDSO for reading timestamps, ask
+ * the Cobalt core.
+ */
+static int gettime_fallback(clockid_t clk_id, struct timespec *tp)
+{
+	return __RT(clock_gettime(clk_id, tp));
+}
+
+int (*__cobalt_vdso_gettime)(clockid_t clk_id,
+			struct timespec *tp) = gettime_fallback;
+
+#ifdef XNARCH_HAVE_LLMULSHFT
+
+static unsigned int tsc_scale, tsc_shift;
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+
+static struct xnarch_u32frac tsc_frac;
+static struct xnarch_u32frac bln_frac;
+
+unsigned long long cobalt_divrem_billion(unsigned long long value,
+					 unsigned long *rem)
+{
+	unsigned long long q;
+	unsigned r;
+
+	q = xnarch_nodiv_ullimd(value, bln_frac.frac, bln_frac.integ);
+	r = value - q * 1000000000;
+	if (r >= 1000000000) {
+		++q;
+		r -= 1000000000;
+	}
+	*rem = r;
+	return q;
+}
+
+xnsticks_t __cobalt_ns_to_tsc(xnsticks_t ns)
+{
+	return xnarch_nodiv_llimd(ns, tsc_frac.frac, tsc_frac.integ);
+}
+
+#else /* !XNARCH_HAVE_NODIV_LLIMD */
+
+xnsticks_t __cobalt_ns_to_tsc(xnsticks_t ns)
+{
+	return xnarch_llimd(ns, 1 << tsc_shift, tsc_scale);
+}
+
+#endif /* !XNARCH_HAVE_NODIV_LLIMD */
+
+xnsticks_t __cobalt_tsc_to_ns(xnsticks_t ticks)
+{
+	return xnarch_llmulshft(ticks, tsc_scale, tsc_shift);
+}
+
+xnsticks_t __cobalt_tsc_to_ns_rounded(xnsticks_t ticks)
+{
+	unsigned int shift = tsc_shift - 1;
+	return (xnarch_llmulshft(ticks, tsc_scale, shift) + 1) / 2;
+}
+
+#else  /* !XNARCH_HAVE_LLMULSHFT */
+
+xnsticks_t __cobalt_tsc_to_ns(xnsticks_t ticks)
+{
+	return xnarch_llimd(ticks, 1000000000, __cobalt_tsc_clockfreq);
+}
+
+xnsticks_t __cobalt_tsc_to_ns_rounded(xnsticks_t ticks)
+{
+	return (xnarch_llimd(ticks, 1000000000, __cobalt_tsc_clockfreq/2) + 1) / 2;
+}
+
+xnsticks_t __cobalt_ns_to_tsc(xnsticks_t ns)
+{
+	return xnarch_llimd(ns, __cobalt_tsc_clockfreq, 1000000000);
+}
+#endif /* !XNARCH_HAVE_LLMULSHFT */
+
+#ifndef XNARCH_HAVE_NODIV_LLIMD
+unsigned long long cobalt_divrem_billion(unsigned long long value,
+					 unsigned long *rem)
+{
+	return xnarch_ulldiv(value, 1000000000, rem);
+
+}
+#endif /* !XNARCH_HAVE_NODIV_LLIMD */
+
+void cobalt_ticks_init(unsigned long long freq)
+{
+	__cobalt_tsc_clockfreq = freq;
+	if (freq) {
+#ifdef XNARCH_HAVE_LLMULSHFT
+		xnarch_init_llmulshft(1000000000, freq, &tsc_scale, &tsc_shift);
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+		xnarch_init_u32frac(&tsc_frac, 1 << tsc_shift, tsc_scale);
+#endif
+#endif
+	} else {
+		void *vcall = cobalt_lookup_vdso(COBALT_VDSO_VERSION,
+						 COBALT_VDSO_GETTIME);
+		if (vcall)
+			__cobalt_vdso_gettime = vcall;
+	}
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	xnarch_init_u32frac(&bln_frac, 1, 1000000000);
+#endif
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/timer.c b/kernel/xenomai-v3.2.4/lib/cobalt/timer.c
new file mode 100644
index 0000000..e3a1e88
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/timer.c
@@ -0,0 +1,255 @@
+/*
+ * Copyright (C) 2005 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <time.h>
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+/**
+ * @addtogroup cobalt_api_time
+ * @{
+ */
+
+/**
+ * @fn int timer_create(clockid_t clockid, const struct sigevent *__restrict__ evp, timer_t * __restrict__ timerid)
+ * @brief Create a timer
+ *
+ * This service creates a timer based on the clock @a clockid.
+ *
+ * If @a evp is not @a NULL, it describes the notification mechanism
+ * used on timer expiration. Only thread-directed notification is
+ * supported (evp->sigev_notify set to @a SIGEV_THREAD_ID).
+ *
+ * If @a evp is NULL, the current Cobalt thread will receive the
+ * notifications with signal SIGALRM.
+ *
+ * The recipient thread is delivered notifications when it calls any
+ * of the sigwait(), sigtimedwait() or sigwaitinfo() services.
+ *
+ * If this service succeeds, an identifier for the created timer is
+ * returned at the address @a timerid. The timer is unarmed until
+ * started with the timer_settime() service.
+ *
+ * @param clockid clock used as a timing base;
+ *
+ * @param evp description of the asynchronous notification to occur
+ * when the timer expires;
+ *
+ * @param timerid address where the identifier of the created timer
+ * will be stored on success.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, the clock @a clockid is invalid;
+ * - EINVAL, the member @a sigev_notify of the @b sigevent structure at the
+ *   address @a evp is not SIGEV_THREAD_ID;
+ * - EINVAL, the  member @a sigev_signo of the @b sigevent structure is an
+ *   invalid signal number;
+ * - EAGAIN, the maximum number of timers was exceeded, recompile with a larger
+ *   value.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/timer_create.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, timer_create, (clockid_t clockid,
+				const struct sigevent *__restrict__ evp,
+				timer_t * __restrict__ timerid))
+{
+	int ret;
+
+	ret = -XENOMAI_SYSCALL3(sc_cobalt_timer_create,	clockid, evp, timerid);
+	if (ret == 0)
+		return 0;
+
+	errno = ret;
+
+	return -1;
+}
+
+/**
+ * Delete a timer object.
+ *
+ * This service deletes the timer @a timerid.
+ *
+ * @param timerid identifier of the timer to be removed;
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a timerid is invalid;
+ * - EPERM, the timer @a timerid does not belong to the current process.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/timer_delete.html">
+ * Specification.</a>
+ *
+ * @apitags{thread-unrestricted}
+ */
+COBALT_IMPL(int, timer_delete, (timer_t timerid))
+{
+	int ret;
+
+	ret = -XENOMAI_SYSCALL1(sc_cobalt_timer_delete, timerid);
+	if (ret == 0)
+		return 0;
+
+	errno = ret;
+
+	return -1;
+}
+
+/**
+ * @fn timer_settime(timer_t timerid, int flags, const struct itimerspec *__restrict__ value, struct itimerspec *__restrict__ ovalue)
+ * @brief Start or stop a timer
+ *
+ * This service sets a timer expiration date and reload value of the
+ * timer @a timerid. If @a ovalue is not @a NULL, the current
+ * expiration date and reload value are stored at the address @a
+ * ovalue as with timer_gettime().
+ *
+ * If the member @a it_value of the @b itimerspec structure at @a
+ * value is zero, the timer is stopped, otherwise the timer is
+ * started. If the member @a it_interval is not zero, the timer is
+ * periodic. The current thread must be a Cobalt thread (created with
+ * pthread_create()) and will be notified via signal of timer
+ * expirations.
+ *
+ * When starting the timer, if @a flags is TIMER_ABSTIME, the expiration value
+ * is interpreted as an absolute date of the clock passed to the timer_create()
+ * service. Otherwise, the expiration value is interpreted as a time interval.
+ *
+ * Expiration date and reload value are rounded to an integer count of
+ * nanoseconds.
+ *
+ * @param timerid identifier of the timer to be started or stopped;
+ *
+ * @param flags one of 0 or TIMER_ABSTIME;
+ *
+ * @param value address where the specified timer expiration date and reload
+ * value are read;
+ *
+ * @param ovalue address where the specified timer previous expiration date and
+ * reload value are stored if not @a NULL.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, the specified timer identifier, expiration date or reload value is
+ *   invalid. For @a timerid to be valid, it must belong to the current process.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/timer_settime.html">
+ * Specification.</a>
+ *
+ * @apitags{xcontext, switch-primary}
+ */
+COBALT_IMPL(int, timer_settime, (timer_t timerid,
+				 int flags,
+				 const struct itimerspec *__restrict__ value,
+				 struct itimerspec *__restrict__ ovalue))
+{
+	int ret;
+
+	ret = -XENOMAI_SYSCALL4(sc_cobalt_timer_settime, timerid,
+				flags, value, ovalue);
+	if (ret == 0)
+		return 0;
+
+	errno = ret;
+
+	return -1;
+}
+
+/**
+ * @fn int timer_gettime(timer_t timerid, struct itimerspec *value)
+ * @brief Get timer next expiration date and reload value.
+ *
+ * This service stores, at the address @a value, the expiration date
+ * (member @a it_value) and reload value (member @a it_interval) of
+ * the timer @a timerid. The values are returned as time intervals,
+ * and as multiples of the system clock tick duration (see note in
+ * section @ref cobalt_api_time "Clocks and timers services" for details
+ * on the duration of the system clock tick). If the timer was not
+ * started, the returned members @a it_value and @a it_interval of @a
+ * value are zero.
+ *
+ * @param timerid timer identifier;
+ *
+ * @param value address where the timer expiration date and reload value are
+ * stored on success.
+ *
+ * @retval 0 on success;
+ * @retval -1 with @a errno set if:
+ * - EINVAL, @a timerid is invalid. For @a timerid to be valid, it
+ * must belong to the current process.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/timer_gettime.html">
+ * Specification.</a>
+ *
+ * @apitags{unrestricted}
+ */
+COBALT_IMPL(int, timer_gettime, (timer_t timerid, struct itimerspec *value))
+{
+	int ret;
+
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_timer_gettime, timerid, value);
+	if (ret == 0)
+		return 0;
+
+	errno = ret;
+
+	return -1;
+}
+
+/**
+ * Get expiration overruns count since the most recent timer expiration
+ * signal delivery.
+ *
+ * This service returns @a timerid expiration overruns count since the most
+ * recent timer expiration signal delivery. If this count is more than @a
+ * DELAYTIMER_MAX expirations, @a DELAYTIMER_MAX is returned.
+ *
+ * @param timerid Timer identifier.
+ *
+ * @return the overruns count on success;
+ * @return -1 with @a errno set if:
+ * - EINVAL, @a timerid is invalid;
+ * - EPERM, the timer @a timerid does not belong to the current process.
+ *
+ * @see
+ * <a href="http://www.opengroup.org/onlinepubs/000095399/functions/timer_getoverrun.html">
+ * Specification.</a>
+ *
+ * @apitags{unrestricted}
+ */
+COBALT_IMPL(int, timer_getoverrun, (timer_t timerid))
+{
+	int overrun;
+
+	overrun = XENOMAI_SYSCALL1(sc_cobalt_timer_getoverrun, timerid);
+	if (overrun >= 0)
+		return overrun;
+
+	errno = -overrun;
+
+	return -1;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/timerfd.c b/kernel/xenomai-v3.2.4/lib/cobalt/timerfd.c
new file mode 100644
index 0000000..417e3aa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/timerfd.c
@@ -0,0 +1,63 @@
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <errno.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/timerfd.h>
+#include <asm/xenomai/syscall.h>
+#include "internal.h"
+
+COBALT_IMPL(int, timerfd_create, (int clockid, int flags))
+{
+	int fd;
+
+	fd = XENOMAI_SYSCALL2(sc_cobalt_timerfd_create, clockid, flags);
+	if (fd < 0) {
+		errno = -fd;
+		return -1;
+	}
+	
+	return fd;
+}
+
+COBALT_IMPL(int, timerfd_settime, (int fd, int flags,
+		const struct itimerspec *new_value,
+		struct itimerspec *old_value))
+{
+	int ret;
+	
+	ret = -XENOMAI_SYSCALL4(sc_cobalt_timerfd_settime,
+				fd, flags, new_value, old_value);
+	if (ret == 0)
+		return ret;
+	
+	errno = ret;
+	return -1;
+}
+
+COBALT_IMPL(int, timerfd_gettime, (int fd, struct itimerspec *curr_value))
+{
+	int ret;
+	
+	ret = -XENOMAI_SYSCALL2(sc_cobalt_timerfd_gettime, fd, curr_value);
+	if (ret == 0)
+		return ret;
+	
+	errno = ret;
+	return -1;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/trace.c b/kernel/xenomai-v3.2.4/lib/cobalt/trace.c
new file mode 100644
index 0000000..8d9c189
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/trace.c
@@ -0,0 +1,97 @@
+/*
+ * Copyright (C) 2006 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <cobalt/uapi/kernel/trace.h>
+#include <cobalt/trace.h>
+#include <asm/xenomai/syscall.h>
+
+int xntrace_max_begin(unsigned long v)
+{
+	return XENOMAI_SYSCALL2(sc_cobalt_trace, __xntrace_op_max_begin, v);
+}
+
+int xntrace_max_end(unsigned long v)
+{
+	return XENOMAI_SYSCALL2(sc_cobalt_trace, __xntrace_op_max_end, v);
+}
+
+int xntrace_max_reset(void)
+{
+	return XENOMAI_SYSCALL1(sc_cobalt_trace, __xntrace_op_max_reset);
+}
+
+int xntrace_user_start(void)
+{
+	return XENOMAI_SYSCALL1(sc_cobalt_trace, __xntrace_op_user_start);
+}
+
+int xntrace_user_stop(unsigned long v)
+{
+	return XENOMAI_SYSCALL2(sc_cobalt_trace, __xntrace_op_user_stop, v);
+}
+
+int xntrace_user_freeze(unsigned long v, int once)
+{
+	return XENOMAI_SYSCALL3(sc_cobalt_trace, __xntrace_op_user_freeze,
+				v, once);
+}
+
+void xntrace_latpeak_freeze(int delay)
+{
+	XENOMAI_SYSCALL2(sc_cobalt_trace, __xntrace_op_latpeak_freeze, delay);
+}
+
+int xntrace_special(unsigned char id, unsigned long v)
+{
+	return XENOMAI_SYSCALL3(sc_cobalt_trace, __xntrace_op_special, id, v);
+}
+
+int xntrace_special_u64(unsigned char id, unsigned long long v)
+{
+	return XENOMAI_SYSCALL4(sc_cobalt_trace, __xntrace_op_special_u64, id,
+				(unsigned long)(v >> 32),
+				(unsigned long)(v & 0xFFFFFFFF));
+}
+
+int xnftrace_vprintf(const char *format, va_list args)
+{
+	char buf[256];
+	int ret;
+
+	ret = vsnprintf(buf, sizeof(buf), format, args);
+	if (ret < 0)
+		return ret;
+	if (ret >= sizeof(buf))
+		return -EOVERFLOW;
+
+	return XENOMAI_SYSCALL1(sc_cobalt_ftrace_puts, buf);
+}
+
+int xnftrace_printf(const char *format, ...)
+{
+	va_list args;
+	int ret;
+
+	va_start(args, format);
+	ret = xnftrace_vprintf(format, args);
+	va_end(args);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/umm.c b/kernel/xenomai-v3.2.4/lib/cobalt/umm.c
new file mode 100644
index 0000000..c30ada6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/umm.c
@@ -0,0 +1,136 @@
+/*
+ * Copyright (C) 2009 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <string.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <rtdm/rtdm.h>
+#include <cobalt/uapi/kernel/heap.h>
+#include <asm/xenomai/syscall.h>
+#include "current.h"
+#include "umm.h"
+#include "internal.h"
+
+struct xnvdso *cobalt_vdso;
+
+void *cobalt_umm_private = NULL;
+
+void *cobalt_umm_shared = NULL;
+
+static pthread_once_t init_bind_once = PTHREAD_ONCE_INIT;
+
+static uint32_t private_size;
+
+static void *__map_umm(const char *name, uint32_t *size_r)
+{
+	struct cobalt_memdev_stat statbuf;
+	int fd, ret;
+	void *addr;
+
+	fd = __RT(open(name, O_RDWR));
+	if (fd < 0) {
+		early_warning("cannot open RTDM device %s: %s", name,
+			      strerror(errno));
+		return MAP_FAILED;
+	}
+
+	ret = __RT(ioctl(fd, MEMDEV_RTIOC_STAT, &statbuf));
+	if (ret) {
+		__RT(close(fd));
+		early_warning("failed getting status of %s: %s",
+			      name, strerror(errno));
+		return MAP_FAILED;
+	}
+
+	addr = __RT(mmap(NULL, statbuf.size, PROT_READ|PROT_WRITE,
+			 MAP_SHARED, fd, 0));
+	__RT(close(fd));
+
+	*size_r = statbuf.size;
+
+	return addr;
+}
+
+#define map_umm(__name, __size_r)  __map_umm("/dev/rtdm/" __name, __size_r)
+
+void cobalt_unmap_umm(void)
+{
+	void *addr;
+
+	/*
+	 * Remapping the private heap must be done after the process
+	 * has re-attached to the Cobalt core, in order to reinstate a
+	 * proper private heap, Otherwise the global heap would be
+	 * used instead, leading to unwanted effects.
+	 *
+	 * On machines without an MMU, there is no such thing as fork.
+	 *
+	 * We replace former mappings with an invalid one, to detect
+	 * any spurious late access.
+	 */
+	addr = __STD(mmap(cobalt_umm_private,
+			  private_size, PROT_NONE,
+			  MAP_PRIVATE | MAP_ANONYMOUS | MAP_FIXED, -1, 0));
+
+	if (addr != cobalt_umm_private)
+		munmap(cobalt_umm_private, private_size);
+
+	cobalt_umm_private = NULL;
+	init_bind_once = PTHREAD_ONCE_INIT;
+}
+
+/*
+ * Will be called once on behalf of xenomai_init(), and when
+ * re-binding after a fork.
+ */
+static void init_bind(void)
+{
+	cobalt_umm_private = map_umm(COBALT_MEMDEV_PRIVATE, &private_size);
+	if (cobalt_umm_private == MAP_FAILED) {
+		early_warning("cannot map private umm area: %s",
+			      strerror(errno));
+		early_panic("(CONFIG_DEVTMPFS_MOUNT not enabled?)");
+	}
+}
+
+/* Will be called only once, upon call to xenomai_init(). */
+static void init_loadup(__u32 vdso_offset)
+{
+	uint32_t size;
+
+	cobalt_umm_shared = map_umm(COBALT_MEMDEV_SHARED, &size);
+	if (cobalt_umm_shared == MAP_FAILED)
+		early_panic("cannot map shared umm area: %s",
+			    strerror(errno));
+
+	cobalt_vdso = (struct xnvdso *)(cobalt_umm_shared + vdso_offset);
+}
+
+void cobalt_init_umm(__u32 vdso_offset)
+{
+	pthread_once(&init_bind_once, init_bind);
+	init_loadup(vdso_offset);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/umm.h b/kernel/xenomai-v3.2.4/lib/cobalt/umm.h
new file mode 100644
index 0000000..b592968
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/umm.h
@@ -0,0 +1,31 @@
+/*
+ * Copyright (C) 2009 Gilles Chanteperdrix <gilles.chanteperdrix@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _LIB_COBALT_UMM_H
+#define _LIB_COBALT_UMM_H
+
+#include <linux/types.h>
+
+void cobalt_init_umm(__u32 vdso_offset);
+
+void cobalt_unmap_umm(void);
+
+struct xnvdso;
+
+extern struct xnvdso *cobalt_vdso;
+
+#endif /* _LIB_COBALT_UMM_H */
diff --git a/kernel/xenomai-v3.2.4/lib/cobalt/wrappers.c b/kernel/xenomai-v3.2.4/lib/cobalt/wrappers.c
new file mode 100644
index 0000000..18c2377
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/cobalt/wrappers.c
@@ -0,0 +1,574 @@
+/*
+ * Copyright (C) 2005 Heikki Lindholm <holindho@cs.helsinki.fi>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+/*
+ * NOTE: functions in dynamically linked libraries aren't
+ * wrapped. These are fallback functions for __real* functions used by
+ * the library itself.
+ */
+#include <xeno_config.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/select.h>
+#include <sys/socket.h>
+#include <sys/time.h>
+#include <sys/mman.h>
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <syslog.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <sched.h>
+#include <memory.h>
+#include <unistd.h>
+#include <malloc.h>
+#include <boilerplate/compiler.h>
+
+/* support for very old c libraries not supporting O_TMPFILE */
+#ifndef O_TMPFILE
+#define O_TMPFILE (020000000 | 0200000)
+#endif
+
+/* sched */
+__weak
+int __real_pthread_setschedparam(pthread_t thread,
+				 int policy, const struct sched_param *param)
+{
+	return pthread_setschedparam(thread, policy, param);
+}
+
+__weak
+int __real_pthread_getschedparam(pthread_t thread,
+				 int *policy, struct sched_param *param)
+{
+	return pthread_getschedparam(thread, policy, param);
+}
+
+__weak
+int __real_pthread_setschedprio(pthread_t thread, int prio)
+{
+	return pthread_setschedprio(thread, prio);
+}
+
+__weak
+int __real_sched_yield(void)
+{
+	return sched_yield();
+}
+
+__weak
+int __real_sched_get_priority_min(int policy)
+{
+	return sched_get_priority_min(policy);
+}
+
+__weak
+int __real_sched_get_priority_max(int policy)
+{
+	return sched_get_priority_max(policy);
+}
+
+__weak
+int __real_sched_setscheduler(pid_t pid, int policy,
+			      const struct sched_param *param)
+{
+	return sched_setscheduler(pid, policy, param);
+}
+
+__weak
+int __real_sched_getscheduler(pid_t pid)
+{
+	return sched_getscheduler(pid);
+}
+
+/* pthread */
+__weak
+int __real_pthread_create(pthread_t *ptid_r,
+			  const pthread_attr_t * attr,
+			  void *(*start) (void *), void *arg)
+{
+	return pthread_create(ptid_r, attr, start, arg);
+}
+
+__weak
+int __real_pthread_kill(pthread_t ptid, int sig)
+{
+	return pthread_kill(ptid, sig);
+}
+
+__weak
+int __real_pthread_join(pthread_t ptid, void **retval)
+{
+	return pthread_join(ptid, retval);
+}
+
+/* attr */
+__weak
+int __real_pthread_attr_init(pthread_attr_t *attr)
+{
+	return pthread_attr_init(attr);
+}
+
+/* semaphores */
+__weak
+int __real_sem_init(sem_t * sem, int pshared, unsigned value)
+{
+	return sem_init(sem, pshared, value);
+}
+
+__weak
+int __real_sem_destroy(sem_t * sem)
+{
+	return sem_destroy(sem);
+}
+
+__weak
+int __real_sem_post(sem_t * sem)
+{
+	return sem_post(sem);
+}
+
+__weak
+int __real_sem_wait(sem_t * sem)
+{
+	return sem_wait(sem);
+}
+
+__weak
+int __real_sem_trywait(sem_t * sem)
+{
+	return sem_trywait(sem);
+}
+
+__weak
+int __real_sem_timedwait(sem_t * sem, const struct timespec *abs_timeout)
+{
+	return sem_timedwait(sem, abs_timeout);
+}
+
+__weak
+int __real_sem_getvalue(sem_t * sem, int *sval)
+{
+	return sem_getvalue(sem, sval);
+}
+
+/* rtdm */
+__weak
+int __real_open(const char *path, int oflag, ...)
+{
+	mode_t mode = 0;
+	va_list ap;
+
+	if ((oflag & O_CREAT) || (oflag & O_TMPFILE) == O_TMPFILE) {
+		va_start(ap, oflag);
+		mode = va_arg(ap, mode_t);
+		va_end(ap);
+	}
+
+	return open(path, oflag, mode);
+}
+
+#if open64 != open
+__weak
+int __real_open64(const char *path, int oflag, ...)
+{
+	mode_t mode = 0;
+	va_list ap;
+
+	if ((oflag & O_CREAT) || (oflag & O_TMPFILE) == O_TMPFILE) {
+		va_start(ap, oflag);
+		mode = va_arg(ap, mode_t);
+		va_end(ap);
+	}
+
+	return open64(path, oflag, mode);
+}
+#endif
+
+#if __USE_FORTIFY_LEVEL > 0
+__weak int __real___open_2(const char *path, int oflag)
+{
+	return __open_2(path, oflag);
+}
+
+__weak int __real___open64_2(const char *path, int oflag)
+{
+	return __open64_2(path, oflag);
+}
+#endif // __USE_FORTIFY_LEVEL > 0
+
+__weak
+int __real_socket(int protocol_family, int socket_type, int protocol)
+{
+	return socket(protocol_family, socket_type, protocol);
+}
+
+__weak
+int __real_close(int fd)
+{
+	return close(fd);
+}
+
+__weak
+int __real_fcntl(int fd, int cmd, ...)
+{
+	va_list ap;
+	long arg;
+
+	va_start(ap, cmd);
+	arg = va_arg(ap, long);
+	va_end(ap);
+
+	return fcntl(fd, cmd, arg);
+}
+
+__weak
+int __real_ioctl(int fd, unsigned int request, ...)
+{
+	va_list ap;
+	void *arg;
+
+	va_start(ap, request);
+	arg = va_arg(ap, void *);
+	va_end(ap);
+
+	return ioctl(fd, request, arg);
+}
+
+__weak
+ssize_t __real_read(int fd, void *buf, size_t nbyte)
+{
+	return read(fd, buf, nbyte);
+}
+
+__weak
+ssize_t __real_write(int fd, const void *buf, size_t nbyte)
+{
+	return write(fd, buf, nbyte);
+}
+
+__weak
+ssize_t __real_recvmsg(int fd, struct msghdr * msg, int flags)
+{
+	return recvmsg(fd, msg, flags);
+}
+
+__weak
+int __real_recvmmsg(int fd, struct mmsghdr *msgvec, unsigned int vlen,
+		    unsigned int flags, struct timespec *timeout)
+{
+	return recvmmsg(fd, msgvec, vlen, flags, timeout);
+}
+
+__weak
+ssize_t __real_sendmsg(int fd, const struct msghdr * msg, int flags)
+{
+	return sendmsg(fd, msg, flags);
+}
+
+__weak
+int __real_sendmmsg(int fd, struct mmsghdr *msgvec, unsigned int vlen,
+		    unsigned int flags)
+{
+	return sendmmsg(fd, msgvec, vlen, flags);
+}
+
+__weak
+ssize_t __real_recvfrom(int fd, void *buf, size_t len, int flags,
+			struct sockaddr * from, socklen_t * fromlen)
+{
+	return recvfrom(fd, buf, len, flags, from, fromlen);
+}
+
+__weak
+ssize_t __real_sendto(int fd, const void *buf, size_t len, int flags,
+		      const struct sockaddr * to, socklen_t tolen)
+{
+	return sendto(fd, buf, len, flags, to, tolen);
+}
+
+__weak
+ssize_t __real_recv(int fd, void *buf, size_t len, int flags)
+{
+	return recv(fd, buf, len, flags);
+}
+
+__weak
+ssize_t __real_send(int fd, const void *buf, size_t len, int flags)
+{
+	return send(fd, buf, len, flags);
+}
+
+__weak
+int __real_getsockopt(int fd, int level, int optname, void *optval,
+		      socklen_t * optlen)
+{
+	return getsockopt(fd, level, optname, optval, optlen);
+}
+
+__weak
+int __real_setsockopt(int fd, int level, int optname, const void *optval,
+		      socklen_t optlen)
+{
+	return setsockopt(fd, level, optname, optval, optlen);
+}
+
+__weak
+int __real_bind(int fd, const struct sockaddr *my_addr, socklen_t addrlen)
+{
+	return bind(fd, my_addr, addrlen);
+}
+
+__weak
+int __real_connect(int fd, const struct sockaddr *serv_addr, socklen_t addrlen)
+{
+	return connect(fd, serv_addr, addrlen);
+}
+
+__weak
+int __real_listen(int fd, int backlog)
+{
+	return listen(fd, backlog);
+}
+
+__weak
+int __real_accept(int fd, struct sockaddr *addr, socklen_t * addrlen)
+{
+	return accept(fd, addr, addrlen);
+}
+
+__weak
+int __real_getsockname(int fd, struct sockaddr *name, socklen_t * namelen)
+{
+	return getsockname(fd, name, namelen);
+}
+
+__weak
+int __real_getpeername(int fd, struct sockaddr *name, socklen_t * namelen)
+{
+	return getpeername(fd, name, namelen);
+}
+
+__weak
+int __real_shutdown(int fd, int how)
+{
+	return shutdown(fd, how);
+}
+
+__weak
+int __real_select (int __nfds, fd_set *__restrict __readfds,
+		   fd_set *__restrict __writefds,
+		   fd_set *__restrict __exceptfds,
+		   struct timeval *__restrict __timeout)
+{
+	return select(__nfds, __readfds, __writefds, __exceptfds, __timeout);
+}
+
+__weak
+void *__real_mmap(void *addr, size_t length, int prot, int flags,
+		  int fd, off_t offset)
+{
+	return mmap(addr, length, prot, flags, fd, offset);
+}
+
+#if mmap64 != mmap
+__weak
+void *__real_mmap64(void *addr, size_t length, int prot, int flags,
+		  int fd, off64_t offset)
+{
+	return mmap64(addr, length, prot, flags, fd, offset);
+}
+#endif
+
+__weak
+int __real_vfprintf(FILE *stream, const char *fmt, va_list args)
+{
+	return vfprintf(stream, fmt, args);
+}
+
+__weak
+int __real_vprintf(const char *fmt, va_list args)
+{
+	return vprintf(fmt, args);
+}
+
+__weak
+int __real_fprintf(FILE *stream, const char *fmt, ...)
+{
+	va_list args;
+	int rc;
+
+	va_start(args, fmt);
+	rc = vfprintf(stream, fmt, args);
+	va_end(args);
+
+	return rc;
+}
+
+__weak
+int __real_printf(const char *fmt, ...)
+{
+	va_list args;
+	int rc;
+
+	va_start(args, fmt);
+	rc = vprintf(fmt, args);
+	va_end(args);
+
+	return rc;
+}
+
+#ifdef CONFIG_XENO_FORTIFY
+
+__weak
+int __real___vfprintf_chk(FILE *stream, int level, const char *fmt, va_list ap)
+{
+	return __vfprintf_chk(stream, level, fmt, ap);
+}
+
+__weak
+void __real___vsyslog_chk(int priority, int level, const char *fmt, va_list ap)
+{
+	extern void __vsyslog_chk(int, int, const char *, va_list);
+
+	__vsyslog_chk(priority, level, fmt, ap);
+}
+
+#endif
+
+__weak
+int __real_puts(const char *s)
+{
+	return puts(s);
+}
+
+__weak
+int __real_fputs(const char *s, FILE *stream)
+{
+	return fputs(s, stream);
+}
+
+#ifndef fputc
+__weak
+int __real_fputc(int c, FILE *stream)
+{
+	return fputc(c, stream);
+}
+#endif
+
+#ifndef putchar
+__weak
+int __real_putchar(int c)
+{
+	return putchar(c);
+}
+#endif
+
+__weak
+size_t __real_fwrite(const void *ptr, size_t sz, size_t nmemb, FILE *stream)
+{
+	return fwrite(ptr, sz, nmemb, stream);
+}
+
+__weak
+int __real_fclose(FILE *stream)
+{
+	return fclose(stream);
+}
+
+__weak
+void __real_syslog(int priority, const char *fmt, ...)
+{
+	va_list args;
+
+	va_start(args, fmt);
+	vsyslog(priority, fmt, args);
+	va_end(args);
+}
+
+__weak
+void __real_vsyslog(int priority, const char *fmt, va_list ap)
+{
+	vsyslog(priority, fmt, ap);
+}
+
+__weak
+int __real_gettimeofday(struct timeval *tv, struct timezone *tz)
+{
+	return gettimeofday(tv, tz);
+}
+
+__weak
+int __real_clock_gettime(clockid_t clk_id, struct timespec *tp)
+{
+	return clock_gettime(clk_id, tp);
+}
+
+__weak
+int __real_clock_settime(clockid_t clk_id, const struct timespec *tp)
+{
+	return clock_settime(clk_id, tp);
+}
+
+__weak
+int __real_sigwait(const sigset_t *set, int *sig)
+{
+	return sigwait(set, sig);
+}
+
+__weak
+int __real_sigwaitinfo(const sigset_t *set, siginfo_t *si)
+{
+	return sigwaitinfo(set, si);
+}
+
+__weak
+int __real_sigtimedwait(const sigset_t *set, siginfo_t *si,
+			const struct timespec *timeout)
+{
+	return sigtimedwait(set, si, timeout);
+}
+
+__weak
+int __real_sigpending(sigset_t *set)
+{
+	return sigpending(set);
+}
+
+__weak
+int __real_kill(pid_t pid, int sig)
+{
+	return kill(pid, sig);
+}
+
+__weak
+unsigned int __real_sleep(unsigned int seconds)
+{
+	return sleep(seconds);
+}
+
+__weak
+int __real_usleep(useconds_t usec)
+{
+	return usleep(usec);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/COPYING b/kernel/xenomai-v3.2.4/lib/copperplate/COPYING
new file mode 100644
index 0000000..3b20440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/COPYING
@@ -0,0 +1,458 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/Makefile.am b/kernel/xenomai-v3.2.4/lib/copperplate/Makefile.am
new file mode 100644
index 0000000..d9a1c6a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/Makefile.am
@@ -0,0 +1,70 @@
+
+lib_LTLIBRARIES = libcopperplate@CORE@.la
+
+libcopperplate@CORE@_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -lpthread -lrt -version-info 0:0:0
+libcopperplate@CORE@_la_LIBADD = @XENO_CORE_LDADD@
+
+noinst_LTLIBRARIES =
+
+libcopperplate@CORE@_la_SOURCES =	\
+	clockobj.c	\
+	cluster.c	\
+	eventobj.c 	\
+	init.c		\
+	internal.c	\
+	internal.h	\
+	syncobj.c	\
+	semobj.c	\
+	threadobj.c	\
+	timerobj.c	\
+	traceobj.c
+
+libcopperplate@CORE@_la_CPPFLAGS =		\
+	@XENO_USER_CFLAGS@		\
+	-I$(top_srcdir)			\
+	-I$(top_srcdir)/include		\
+	-I$(top_srcdir)/lib
+
+if XENO_REGISTRY
+libcopperplate@CORE@_la_LIBADD += libregistry.la
+noinst_LTLIBRARIES += libregistry.la
+
+libregistry_la_SOURCES = registry.c
+
+libregistry_la_CPPFLAGS =		\
+	$(libcopperplate@CORE@_la_CPPFLAGS)	\
+	@XENO_FUSE_CFLAGS@
+
+libregistry_la_LIBADD =			\
+	@FUSE_LIBS@
+endif
+
+if XENO_PSHARED
+# The process shareable heap has real-time properties, therefore it
+# fits both the cobalt and mercury cores equally. Yummie.
+libcopperplate@CORE@_la_SOURCES += heapobj-pshared.c reference.c
+endif
+if XENO_TLSF
+libcopperplate@CORE@_la_SOURCES += heapobj-tlsf.c
+else
+if XENO_HEAPMEM
+libcopperplate@CORE@_la_SOURCES += heapobj-heapmem.c
+else
+libcopperplate@CORE@_la_SOURCES += heapobj-malloc.c
+endif
+endif
+
+SUBDIRS = .
+
+if XENO_REGISTRY
+SUBDIRS += regd
+endif
+
+DIST_SUBDIRS = regd
+
+SPARSE = sparse
+
+sparse:
+	@for i in $(libcopperplate@CORE@_la_SOURCES) ; do \
+		$(SPARSE) $(CHECKFLAGS) $(srcdir)/$$i; \
+	done
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/clockobj.c b/kernel/xenomai-v3.2.4/lib/copperplate/clockobj.c
new file mode 100644
index 0000000..b57bb46
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/clockobj.c
@@ -0,0 +1,396 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <limits.h>
+#include <time.h>
+#include <string.h>
+#include "boilerplate/lock.h"
+#include "boilerplate/time.h"
+#include "copperplate/clockobj.h"
+#include "copperplate/debug.h"
+#include "internal.h"
+
+#ifdef CONFIG_XENO_LORES_CLOCK_DISABLED
+
+static inline
+int __clockobj_set_resolution(struct clockobj *clkobj,
+			      unsigned int resolution_ns)
+{
+	if (resolution_ns > 1) {
+		warning("low resolution clock disabled [--enable-lores-clock]");
+		return __bt(-EINVAL);
+	}
+
+	return 0;
+}
+
+#else  /* !CONFIG_XENO_LORES_CLOCK_DISABLED */
+
+void __clockobj_ticks_to_timespec(struct clockobj *clkobj,
+				  ticks_t ticks,
+				  struct timespec *ts)
+{
+	unsigned int freq;
+
+	if (clockobj_get_resolution(clkobj) > 1) {
+		freq = clockobj_get_frequency(clkobj);
+		ts->tv_sec = ticks / freq;
+		ts->tv_nsec = ticks - (ts->tv_sec * freq);
+		ts->tv_nsec *= clockobj_get_resolution(clkobj);
+	} else
+		clockobj_ns_to_timespec(ticks, ts);
+}
+
+void __clockobj_ticks_to_timeout(struct clockobj *clkobj,
+				 clockid_t clk_id,
+				 ticks_t ticks, struct timespec *ts)
+{
+	struct timespec now, delta;
+
+	__RT(clock_gettime(clk_id, &now));
+	__clockobj_ticks_to_timespec(clkobj, ticks, &delta);
+	timespec_add(ts, &now, &delta);
+}
+
+static inline
+int __clockobj_set_resolution(struct clockobj *clkobj,
+			      unsigned int resolution_ns)
+{
+	clkobj->resolution = resolution_ns;
+	clkobj->frequency = 1000000000 / resolution_ns;
+
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_LORES_CLOCK_DISABLED */
+
+static const int mdays[] = {
+	31, 28, 31, 30, 31, 30,	31, 31, 30, 31, 30, 31
+};
+
+void clockobj_caltime_to_ticks(struct clockobj *clkobj, const struct tm *tm,
+			       unsigned long rticks, ticks_t *pticks)
+{
+	ticks_t t = 0;
+	int n;	/* Must be signed. */
+
+	/*
+	 * We expect tick counts to be based on the time(2) epoch,
+	 * i.e. 00:00:00 UTC, January 1, 1970.
+	 */
+	for (n = 1970; n <  1900 + tm->tm_year; n++)
+		t += ((n % 4) ? 365 : 366);
+
+	if (!(tm->tm_year % 4) && tm->tm_mon >= 2)
+		/* Add one day for leap year after February. */
+		t++;
+
+	for (n = tm->tm_mon - 1; n >= 0; n--)
+		t += mdays[n];
+
+	t += tm->tm_mday - 1;
+	t *= 24;
+	t += tm->tm_hour;
+	t *= 60;
+	t += tm->tm_min;
+	t *= 60;
+	t += tm->tm_sec;
+	t *= clockobj_get_frequency(clkobj);
+	t += rticks;
+
+	/* XXX: we currently don't care about DST. */
+
+	*pticks = t;
+}
+
+#define SECBYMIN	60
+#define SECBYHOUR	(SECBYMIN * 60)
+#define SECBYDAY	(SECBYHOUR * 24)
+
+void clockobj_ticks_to_caltime(struct clockobj *clkobj,
+			       ticks_t ticks,
+			       struct tm *tm,
+			       unsigned long *rticks)
+{
+	unsigned long year, month, day, hour, min, sec;
+	unsigned int freq;
+	time_t nsecs;
+
+	freq = clockobj_get_frequency(clkobj);
+	nsecs = ticks / freq;
+	*rticks = ticks % freq;
+
+	for (year = 1970;; year++) { /* Years since 1970. */
+		int ysecs = ((year % 4) ? 365 : 366) * SECBYDAY;
+		if (ysecs > nsecs)
+			break;
+		nsecs -= ysecs;
+	}
+
+	for (month = 0;; month++) {
+		int secbymon = mdays[month] * SECBYDAY;
+		if (month == 1 && (year % 4) == 0)
+			/* Account for leap year on February. */
+			secbymon += SECBYDAY;
+		if (secbymon > nsecs)
+			break;
+		nsecs -= secbymon;
+	}
+
+	day = nsecs / SECBYDAY;
+	nsecs -= (day * SECBYDAY);
+	hour = (nsecs / SECBYHOUR);
+	nsecs -= (hour * SECBYHOUR);
+	min = (nsecs / SECBYMIN);
+	nsecs -= (min * SECBYMIN);
+	sec = nsecs;
+
+	memset(tm, 0, sizeof(*tm));
+	tm->tm_year = year - 1900;
+	tm->tm_mon = month;
+	tm->tm_mday = day + 1;
+	tm->tm_hour = hour;
+	tm->tm_min = min;
+	tm->tm_sec = sec;
+}
+
+void clockobj_caltime_to_timeout(struct clockobj *clkobj, const struct tm *tm,
+				 unsigned long rticks, struct timespec *ts)
+{
+	struct timespec date;
+	ticks_t ticks;
+
+	clockobj_caltime_to_ticks(clkobj, tm, rticks, &ticks);
+	__clockobj_ticks_to_timespec(clkobj, ticks, &date);
+	timespec_sub(ts, &date, &clkobj->offset);
+}
+
+void clockobj_set_date(struct clockobj *clkobj, ticks_t ticks)
+{
+	struct timespec now;
+
+	/*
+	 * XXX: we grab the lock to exclude other threads from reading
+	 * the clock offset while we update it, so that they either
+	 * compute against the old value, or the new one, but always
+	 * see a valid offset.
+	 */
+	read_lock_nocancel(&clkobj->lock);
+
+	__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
+
+	__clockobj_ticks_to_timespec(clkobj, ticks, &clkobj->epoch);
+	timespec_sub(&clkobj->offset, &clkobj->epoch, &now);
+
+	read_unlock(&clkobj->lock);
+}
+
+/*
+ * CAUTION: clockobj_set_resolution() may be called during the init
+ * phase only, not after. IOW, the resolution is perceived as a
+ * constant when the application code executes. For performance
+ * reason, we want to run locklessly for common time unit conversions,
+ * so the clockobj implementation does assume that the clock
+ * resolution will NOT be updated after the init phase.
+ */
+int clockobj_set_resolution(struct clockobj *clkobj, unsigned int resolution_ns)
+{
+#ifdef CONFIG_XENO_LORES_CLOCK_DISABLED
+	assert(resolution_ns == 1);
+#else
+	__clockobj_set_resolution(clkobj, resolution_ns);
+
+	/* Changing the resolution implies resetting the epoch. */
+	clockobj_set_date(clkobj, 0);
+#endif
+	return 0;
+}
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <cobalt/arith.h>
+#include <cobalt/sys/cobalt.h>
+
+#ifdef CONFIG_XENO_COPPERPLATE_CLOCK_RESTRICTED
+#error "restricted CLOCK_COPPERPLATE not available"
+#endif
+
+ticks_t clockobj_get_time(struct clockobj *clkobj)
+{
+	ticks_t ns = cobalt_ticks_to_ns_rounded(cobalt_read_tsc());
+	return clockobj_ns_to_ticks(clkobj, ns);
+}
+
+#ifndef CONFIG_XENO_LORES_CLOCK_DISABLED
+
+sticks_t clockobj_ns_to_ticks(struct clockobj *clkobj, sticks_t ns)
+{
+	/* Cobalt has optimized arith ops, use them. */
+	return xnarch_ulldiv(ns, clkobj->resolution, NULL);
+}
+
+#endif /* !CONFIG_XENO_LORES_CLOCK_DISABLED */
+
+void clockobj_get_date(struct clockobj *clkobj, ticks_t *pticks)
+{
+	ticks_t ns;
+
+	read_lock_nocancel(&clkobj->lock);
+
+	ns = cobalt_ticks_to_ns(clockobj_get_tsc());
+	/* Add offset to epoch. */
+	ns += (ticks_t)clkobj->offset.tv_sec * 1000000000ULL;
+	ns += clkobj->offset.tv_nsec;
+	if (clockobj_get_resolution(clkobj) > 1)
+		ns /= clockobj_get_resolution(clkobj);
+	*pticks = ns;
+
+	read_unlock(&clkobj->lock);
+}
+
+#else /* CONFIG_XENO_MERCURY */
+
+ticks_t clockobj_get_tsc(void)
+{
+	struct timespec now;
+	clock_gettime(CLOCK_COPPERPLATE, &now);
+	return (ticks_t)now.tv_sec * 1000000000ULL + now.tv_nsec;
+}
+
+ticks_t clockobj_get_time(struct clockobj *clkobj)
+{
+	ticks_t ns = clockobj_get_tsc();
+
+	if (clockobj_get_resolution(clkobj) > 1)
+		return ns / clockobj_get_resolution(clkobj);
+
+	return ns;
+}
+
+#ifndef CONFIG_XENO_LORES_CLOCK_DISABLED
+
+sticks_t clockobj_ns_to_ticks(struct clockobj *clkobj, sticks_t ns)
+{
+	return ns / clkobj->resolution;
+}
+
+#endif /* !CONFIG_XENO_LORES_CLOCK_DISABLED */
+
+void clockobj_get_date(struct clockobj *clkobj, ticks_t *pticks)
+{
+	struct timespec now, date;
+
+	read_lock_nocancel(&clkobj->lock);
+
+	clock_gettime(CLOCK_COPPERPLATE, &now);
+
+	/* Add offset from epoch to current system time. */
+	timespec_add(&date, &clkobj->offset, &now);
+
+	/* Convert the time value to ticks,. */
+	*pticks = (ticks_t)date.tv_sec * clockobj_get_frequency(clkobj)
+		+ (ticks_t)date.tv_nsec / clockobj_get_resolution(clkobj);
+
+	read_unlock(&clkobj->lock);
+}
+
+#endif /* CONFIG_XENO_MERCURY */
+
+/* Conversion from CLOCK_COPPERPLATE to clk_id. */
+void clockobj_convert_clocks(struct clockobj *clkobj,
+			     const struct timespec *in,
+			     clockid_t clk_id,
+			     struct timespec *out)
+{
+	struct timespec now, delta;
+
+	__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
+	/* Offset from CLOCK_COPPERPLATE epoch. */
+	timespec_sub(&delta, in, &now);
+	/* Current time for clk_id. */
+	__RT(clock_gettime(clk_id, &now));
+	/* Absolute timeout again, clk_id-based this time. */
+	timespec_add(out, &delta, &now);
+}
+
+void clockobj_get_distance(struct clockobj *clkobj,
+			   const struct itimerspec *itm,
+			   struct timespec *delta)
+{
+	ticks_t now, start, interval, dist;
+
+	now = clockobj_get_time(clkobj);
+	start = timespec_scalar(&itm->it_value);
+
+	if (start >= now)
+		/* Distance to first shot. */
+		dist = start - now;
+	else {
+		interval = timespec_scalar(&itm->it_interval);
+		/* Distance to next shot. */
+		dist = interval - ((now - start) % interval);
+	}
+
+	__clockobj_ticks_to_timespec(clkobj, dist, delta);
+}
+
+int clockobj_init(struct clockobj *clkobj,
+		  unsigned int resolution_ns)
+{
+	pthread_mutexattr_t mattr;
+	struct timespec now;
+	int ret;
+
+	if (resolution_ns == 0)
+		return __bt(-EINVAL);
+
+	memset(clkobj, 0, sizeof(*clkobj));
+	ret = __clockobj_set_resolution(clkobj, resolution_ns);
+	if (ret)
+		return __bt(ret);
+
+	/*
+	 * FIXME: this lock is only used to protect the wallclock
+	 * offset readings from updates. We should replace this by a
+	 * confirmed reading loop.
+	 */
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-__RT(pthread_mutex_init(&clkobj->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		return ret;
+
+	__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
+	timespec_sub(&clkobj->offset, &clkobj->epoch, &now);
+
+	return 0;
+}
+
+int clockobj_destroy(struct clockobj *clkobj)
+{
+	__RT(pthread_mutex_destroy(&clkobj->lock));
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/cluster.c b/kernel/xenomai-v3.2.4/lib/copperplate/cluster.c
new file mode 100644
index 0000000..7540278
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/cluster.c
@@ -0,0 +1,601 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+ /*
+ * This file implements object clusters, to group various related
+ * runtime objects in named tables. Objects within clusters are
+ * indexed on a string label. Depending on whether shared
+ * multi-processing mode is enabled, clusters may be persistent in the
+ * main heap.
+ *
+ * In its simplest form - when shared multi-processing is disabled -,
+ * a cluster is basically a private hash table only known from the
+ * process who created it.
+ *
+ * When shared multi-processing mode is enabled, a cluster is a shared
+ * hash table indexed on a unique name within the main catalog.
+ * Therefore, all objects referred to by the cluster should be laid
+ * into the main heap as well.  Multiple processes attached to the
+ * same copperplate session do share the same main heap. Therefore,
+ * they may share objects by providing:
+ *
+ * - the name of the cluster.
+ * - the name of the object to retrieve from the cluster.
+ *
+ * Having objects shared between processes introduces the requirement
+ * to deal with stale objects, created by processes that don't exist
+ * anymore when a lookup is performed on a cluster by another
+ * process. We deal with this issue as simply as we can, as follows:
+ *
+ * - each object referenced to by a cluster bears a "creator node"
+ * identifier. This is basically the system-wide linux TID of the
+ * process owning the thread which has initially added the object to
+ * the cluster (i.e. getpid() as returned from the NPTL).
+ *
+ * - upon a lookup operation in the cluster which matches an object in
+ * the table, the process who introduced the object is probed for
+ * existence. If the process is gone, we silently drop the reference
+ * to the orphaned object from the cluster, and return a failed lookup
+ * status. Otherwise, the lookup succeeds.
+ *
+ * - when an attempt is made to index an object into cluster, any
+ * conflicting object which bears the same name is checked for
+ * staleness as described for the lookup operation. However, the
+ * insertion succeeds after the reference to a conflicting stale
+ * object was silently discarded.
+ *
+ * The test for existence based on the linux TID may return spurious
+ * "true" results in case an object was created by a long gone
+ * process, whose TID was eventually reused for a newer process,
+ * before the process who initialized the main heap has exited. In
+ * theory, this situation may happen; in practice, 1) the TID
+ * generator has to wrap around fully before this happens, 2) multiple
+ * processes sharing objects via a cluster are normally co-operating
+ * to implement a global functionality. In the event of a process
+ * exit, it is likely that the whole application system should be
+ * reinited, thus the main (session) heap would be reset, which would
+ * in turn clear the issue.
+ *
+ * In the worst case, using a stale object would never cause bad
+ * memory references, since a clustered object - and all the memory
+ * references it does via its members - must be laid into the main
+ * heap, which is persistent until the last process attached to it
+ * leaves the session.
+ *
+ * This stale object detection is essentially a sanity mechanism to
+ * cleanup obviously wrong references from clusters after some process
+ * died unexpectedly. Under normal circumstances, for an orderly exit,
+ * a process should remove all references to objects it has created
+ * from existing clusters, before eventually freeing those objects.
+ *
+ * In addition to the basic cluster object, the synchronizing cluster
+ * (struct syncluster) provides support for waiting for a given object
+ * to appear in the dictionary.
+ */
+
+#include <errno.h>
+#include <string.h>
+#include <memory.h>
+#include "copperplate/heapobj.h"
+#include "copperplate/cluster.h"
+#include "copperplate/syncobj.h"
+#include "copperplate/threadobj.h"
+#include "copperplate/debug.h"
+#include "internal.h"
+
+const static struct hash_operations hash_operations;
+
+struct cluster_walk_data {
+	struct cluster *c;
+	int (*walk)(struct cluster *c,
+		    struct clusterobj *cobj);
+};
+
+struct pvcluster_walk_data {
+	struct pvcluster *c;
+	int (*walk)(struct pvcluster *c,
+		    struct pvclusterobj *cobj);
+};
+
+#ifdef CONFIG_XENO_PSHARED
+
+int cluster_init(struct cluster *c, const char *name)
+{
+	struct dictionary *d;
+	struct hashobj *hobj;
+	int ret;
+
+	/*
+	 * NOTE: it does not make sense to destroy a shared cluster
+	 * since other processes from the same session will likely
+	 * have references on it, so there is no cluster_destroy()
+	 * routine on purpose. When all processes from the session are
+	 * gone, the shared heap is cleared next time the application
+	 * boots, so there is really no use of deleting shared
+	 * clusters.
+	 */
+redo:
+	hobj = hash_search(&main_catalog, name, strlen(name),
+			   &hash_operations);
+	if (hobj) {
+		d = container_of(hobj, struct dictionary, hobj);
+		ret = 0;
+		goto out;
+	}
+
+	d = xnmalloc(sizeof(*d));
+	if (d == NULL)
+		return __bt(-ENOMEM);
+
+	hash_init(&d->table);
+	ret = hash_enter(&main_catalog, name, strlen(name), &d->hobj,
+			 &hash_operations);
+	/*
+	 * If someone managed to slip in, creating the cluster between
+	 * the table look up and indexing the new cluster, retry the
+	 * whole process.
+	 */
+	if (ret == -EEXIST) {
+		hash_destroy(&d->table);
+		xnfree(d);
+		goto redo;
+	}
+out:
+	c->d = d;
+
+	return __bt(ret);
+}
+
+static int cluster_probe(struct hashobj *hobj)
+{
+	struct clusterobj *cobj;
+
+	cobj = container_of(hobj, struct clusterobj, hobj);
+	if (cobj->cnode == __node_id)
+		return 1; /* Trivial check: is it ours? */
+
+	/*
+	 * The node identifier is actually the main thread pid, so if
+	 * we can send the latter a signal, the node is deemed active.
+	 * Over Cobalt, the main thread is always shadowed, therefore
+	 * we may use Cobalt's kill() service to probe for it.
+	 * Receiving EPERM does mean that we found an active node,
+	 * just that we don't have the credentials to actually send it
+	 * a signal.
+	 */
+	return copperplate_probe_tid(cobj->cnode) == 0;
+}
+
+int cluster_addobj(struct cluster *c, const char *name,
+		   struct clusterobj *cobj)
+{
+	cobj->cnode = __node_id;
+	/*
+	 * Add object to cluster and probe conflicting entries for
+	 * owner node existence, overwriting dead instances on the
+	 * fly.
+	 */
+	return hash_enter_probe(&c->d->table, name, strlen(name),
+				&cobj->hobj, &hash_operations);
+}
+
+int cluster_addobj_dup(struct cluster *c, const char *name,
+		       struct clusterobj *cobj)
+{
+	cobj->cnode = __node_id;
+	/*
+	 * Same as cluster_addobj(), but allows for duplicate keys in
+	 * live objects.
+	 */
+	return hash_enter_probe_dup(&c->d->table, name, strlen(name),
+				    &cobj->hobj, &hash_operations);
+}
+
+int cluster_delobj(struct cluster *c, struct clusterobj *cobj)
+{
+	return __bt(hash_remove(&c->d->table, &cobj->hobj, &hash_operations));
+}
+
+struct clusterobj *cluster_findobj(struct cluster *c, const char *name)
+{
+	struct hashobj *hobj;
+
+	/*
+	 * Search for object entry and probe for owner node existence,
+	 * discarding dead instances on the fly.
+	 */
+	hobj = hash_search_probe(&c->d->table, name, strlen(name),
+				 &hash_operations);
+	if (hobj == NULL)
+		return NULL;
+
+	return container_of(hobj, struct clusterobj, hobj);
+}
+
+static int __cluster_walk(struct hash_table *t, struct hashobj *hobj, void *arg)
+{
+	struct cluster_walk_data *wd = arg;
+	struct clusterobj *cobj;
+
+	cobj = container_of(hobj, struct clusterobj, hobj);
+
+	return wd->walk(wd->c, cobj);
+}
+  
+int cluster_walk(struct cluster *c,
+		 int (*walk)(struct cluster *c,
+			     struct clusterobj *cobj))
+{
+	struct cluster_walk_data wd = {
+		.c = c,
+		.walk = walk,
+	};
+	return hash_walk(&c->d->table, __cluster_walk, &wd);
+}
+
+int syncluster_init(struct syncluster *sc, const char *name)
+{
+	struct syndictionary *d;
+	struct hashobj *hobj;
+	int ret;
+
+redo:
+	hobj = hash_search(&main_catalog, name, strlen(name),
+			   &hash_operations);
+	if (hobj) {
+		sc->d = container_of(hobj, struct syndictionary, hobj);
+		return 0;
+	}
+
+	d = xnmalloc(sizeof(*d));
+	if (d == NULL)
+		return -ENOMEM;
+
+	hash_init(&d->table);
+
+	ret = hash_enter(&main_catalog, name, strlen(name), &d->hobj,
+			 &hash_operations);
+	/*
+	 * Same as cluster_init(), redo if someone slipped in,
+	 * creating the cluster.
+	 */
+	if (ret == -EEXIST) {
+		hash_destroy(&d->table);
+		xnfree(d);
+		goto redo;
+	}
+
+	sc->d = d;
+
+	return syncobj_init(&d->sobj, CLOCK_COPPERPLATE,
+			    SYNCOBJ_FIFO, fnref_null);
+}
+
+int syncluster_addobj(struct syncluster *sc, const char *name,
+		      struct clusterobj *cobj)
+{
+	struct syncluster_wait_struct *wait;
+	struct threadobj *thobj, *tmp;
+	struct syncstate syns;
+	int ret;
+
+	ret = syncobj_lock(&sc->d->sobj, &syns);
+	if (ret)
+		return __bt(ret);
+
+	cobj->cnode = __node_id;
+
+	ret = hash_enter_probe(&sc->d->table, name, strlen(name),
+			       &cobj->hobj, &hash_operations);
+	if (ret)
+		goto out;
+
+	if (!syncobj_grant_wait_p(&sc->d->sobj))
+		goto out;
+	/*
+	 * Wake up all threads waiting for this key to appear in the
+	 * dictionary.
+	 */
+	syncobj_for_each_grant_waiter_safe(&sc->d->sobj, thobj, tmp) {
+		wait = threadobj_get_wait(thobj);
+		if (strcmp(__mptr(wait->name_ref), name) == 0)
+			syncobj_grant_to(&sc->d->sobj, thobj);
+	}
+out:
+	syncobj_unlock(&sc->d->sobj, &syns);
+
+	return ret;
+}
+
+int syncluster_delobj(struct syncluster *sc,
+		      struct clusterobj *cobj)
+{
+	struct syncstate syns;
+	int ret;
+
+	ret = syncobj_lock(&sc->d->sobj, &syns);
+	if (ret)
+		return ret;
+
+	ret = __bt(hash_remove(&sc->d->table, &cobj->hobj, &hash_operations));
+
+	syncobj_unlock(&sc->d->sobj, &syns);
+
+	return ret;
+}
+
+int syncluster_findobj(struct syncluster *sc,
+		       const char *name,
+		       const struct timespec *timeout,
+		       struct clusterobj **cobjp)
+{
+	struct syncluster_wait_struct *wait = NULL;
+	struct syncstate syns;
+	struct hashobj *hobj;
+	int ret = 0;
+
+	ret = syncobj_lock(&sc->d->sobj, &syns);
+	if (ret)
+		return ret;
+
+	for (;;) {
+		hobj = hash_search_probe(&sc->d->table, name, strlen(name),
+					 &hash_operations);
+		if (hobj) {
+			*cobjp = container_of(hobj, struct clusterobj, hobj);
+			break;
+		}
+		if (timeout &&
+		    timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
+			ret = -EWOULDBLOCK;
+			break;
+		}
+		if (!threadobj_current_p()) {
+			ret = -EPERM;
+			break;
+		}
+		if (wait == NULL) {
+			wait = threadobj_prepare_wait(struct syncluster_wait_struct);
+			wait->name_ref = __moff(xnstrdup(name));
+		}
+		ret = syncobj_wait_grant(&sc->d->sobj, timeout, &syns);
+		if (ret) {
+			if (ret == -EIDRM)
+				goto out;
+			break;
+		}
+	}
+
+	syncobj_unlock(&sc->d->sobj, &syns);
+out:
+	if (wait) {
+		xnfree(__mptr(wait->name_ref));
+		threadobj_finish_wait();
+	}
+
+	return ret;
+}
+
+const static struct hash_operations hash_operations = {
+	.compare = memcmp,
+	.probe = cluster_probe,
+	.alloc = xnmalloc,
+	.free = xnfree,
+};
+
+const static struct pvhash_operations pvhash_operations = {
+	.compare = memcmp,
+};
+
+#else /* !CONFIG_XENO_PSHARED */
+
+const static struct hash_operations hash_operations = {
+	.compare = memcmp,
+};
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+int pvcluster_init(struct pvcluster *c, const char *name)
+{
+	pvhash_init(&c->table);
+	return 0;
+}
+
+void pvcluster_destroy(struct pvcluster *c)
+{
+	/* nop */
+}
+
+int pvcluster_addobj(struct pvcluster *c, const char *name,
+		     struct pvclusterobj *cobj)
+{
+	return pvhash_enter(&c->table, name, strlen(name), &cobj->hobj,
+			    &pvhash_operations);
+}
+
+int pvcluster_addobj_dup(struct pvcluster *c, const char *name,
+			 struct pvclusterobj *cobj)
+{
+	return pvhash_enter_dup(&c->table, name, strlen(name), &cobj->hobj,
+				&pvhash_operations);
+}
+
+int pvcluster_delobj(struct pvcluster *c, struct pvclusterobj *cobj)
+{
+	return __bt(pvhash_remove(&c->table, &cobj->hobj, &pvhash_operations));
+}
+
+struct pvclusterobj *pvcluster_findobj(struct pvcluster *c, const char *name)
+{
+	struct pvhashobj *hobj;
+
+	hobj = pvhash_search(&c->table, name, strlen(name),
+			     &pvhash_operations);
+	if (hobj == NULL)
+		return NULL;
+
+	return container_of(hobj, struct pvclusterobj, hobj);
+}
+
+static int __pvcluster_walk(struct pvhash_table *t, struct pvhashobj *hobj,
+			    void *arg)
+{
+	struct pvcluster_walk_data *wd = arg;
+	struct pvclusterobj *cobj;
+
+	cobj = container_of(hobj, struct pvclusterobj, hobj);
+
+	return wd->walk(wd->c, cobj);
+}
+  
+int pvcluster_walk(struct pvcluster *c,
+		   int (*walk)(struct pvcluster *c,
+			       struct pvclusterobj *cobj))
+{
+	struct pvcluster_walk_data wd = {
+		.c = c,
+		.walk = walk,
+	};
+	return pvhash_walk(&c->table, __pvcluster_walk, &wd);
+}
+
+int pvsyncluster_init(struct pvsyncluster *sc, const char *name)
+{
+	int ret;
+
+	ret = __bt(pvcluster_init(&sc->c, name));
+	if (ret)
+		return ret;
+
+	/*
+	 * Assuming pvcluster_destroy() is a nop, so we don't need to
+	 * run any finalizer.
+	 */
+	return syncobj_init(&sc->sobj, CLOCK_COPPERPLATE,
+			    SYNCOBJ_FIFO, fnref_null);
+}
+
+void pvsyncluster_destroy(struct pvsyncluster *sc)
+{
+	struct syncstate syns;
+
+	if (__bt(syncobj_lock(&sc->sobj, &syns)))
+		return;
+
+	/* No finalizer, we just destroy the synchro. */
+	syncobj_destroy(&sc->sobj, &syns);
+}
+
+int pvsyncluster_addobj(struct pvsyncluster *sc, const char *name,
+			struct pvclusterobj *cobj)
+{
+	struct syncluster_wait_struct *wait;
+	struct threadobj *thobj, *tmp;
+	struct syncstate syns;
+	int ret;
+
+	ret = syncobj_lock(&sc->sobj, &syns);
+	if (ret)
+		return __bt(ret);
+
+	ret = pvcluster_addobj(&sc->c, name, cobj);
+	if (ret)
+		goto out;
+
+	if (!syncobj_grant_wait_p(&sc->sobj))
+		goto out;
+	/*
+	 * Wake up all threads waiting for this key to appear in the
+	 * dictionary.
+	 */
+	syncobj_for_each_grant_waiter_safe(&sc->sobj, thobj, tmp) {
+		wait = threadobj_get_wait(thobj);
+		if (strcmp(wait->name, name) == 0)
+			syncobj_grant_to(&sc->sobj, thobj);
+	}
+out:
+	syncobj_unlock(&sc->sobj, &syns);
+
+	return ret;
+}
+
+int pvsyncluster_delobj(struct pvsyncluster *sc,
+			struct pvclusterobj *cobj)
+{
+	struct syncstate syns;
+	int ret;
+
+	ret = syncobj_lock(&sc->sobj, &syns);
+	if (ret)
+		return ret;
+
+	ret = __bt(pvcluster_delobj(&sc->c, cobj));
+
+	syncobj_unlock(&sc->sobj, &syns);
+
+	return ret;
+}
+
+int pvsyncluster_findobj(struct pvsyncluster *sc,
+			 const char *name,
+			 const struct timespec *timeout,
+			 struct pvclusterobj **cobjp)
+{
+	struct syncluster_wait_struct *wait = NULL;
+	struct pvclusterobj *cobj;
+	struct syncstate syns;
+	int ret = 0;
+
+	ret = syncobj_lock(&sc->sobj, &syns);
+	if (ret)
+		return ret;
+
+	for (;;) {
+		cobj = pvcluster_findobj(&sc->c, name);
+		if (cobj) {
+			*cobjp = cobj;
+			break;
+		}
+		if (timeout &&
+		    timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
+			ret = -EWOULDBLOCK;
+			break;
+		}
+		if (!threadobj_current_p()) {
+			ret = -EPERM;
+			break;
+		}
+		if (wait == NULL) {
+			wait = threadobj_prepare_wait(struct syncluster_wait_struct);
+			wait->name = name;
+		}
+		ret = syncobj_wait_grant(&sc->sobj, timeout, &syns);
+		if (ret) {
+			if (ret == -EIDRM)
+				goto out;
+			break;
+		}
+	}
+
+	syncobj_unlock(&sc->sobj, &syns);
+out:
+	if (wait)
+		threadobj_finish_wait();
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/eventobj.c b/kernel/xenomai-v3.2.4/lib/copperplate/eventobj.c
new file mode 100644
index 0000000..dce5bd2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/eventobj.c
@@ -0,0 +1,337 @@
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include "copperplate/threadobj.h"
+#include "copperplate/eventobj.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/debug.h"
+
+#ifdef CONFIG_XENO_COBALT
+
+#include "cobalt/internal.h"
+
+int eventobj_init(struct eventobj *evobj, unsigned int value, int flags,
+		  fnref_type(void (*)(struct eventobj *evobj)) finalizer)
+{
+	int ret, event_flags = event_scope_attribute;
+
+	if (flags & EVOBJ_PRIO)
+		event_flags |= COBALT_EVENT_PRIO;
+
+	ret = cobalt_event_init(&evobj->core.event, value, event_flags);
+	if (ret)
+		return __bt(ret);
+
+	evobj->finalizer = finalizer;
+
+	return 0;
+}
+
+int eventobj_destroy(struct eventobj *evobj)
+{
+	void (*finalizer)(struct eventobj *evobj);
+	int ret;
+
+	ret = cobalt_event_destroy(&evobj->core.event);
+	if (ret)
+		return ret;
+
+	fnref_get(finalizer, evobj->finalizer);
+	finalizer(evobj);
+
+	return 0;
+}
+
+void eventobj_uninit(struct eventobj *evobj)
+{
+	int ret = cobalt_event_destroy(&evobj->core.event);
+	assert(ret == 0);
+	(void)ret;
+}
+
+int eventobj_wait(struct eventobj *evobj,
+		  unsigned int bits, unsigned int *bits_r,
+		  int mode, const struct timespec *timeout)
+{
+	int ret;
+
+	ret = cobalt_event_wait(&evobj->core.event,
+				bits, bits_r, mode, timeout);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int eventobj_post(struct eventobj *evobj, unsigned int bits)
+{
+	int ret;
+
+	ret = cobalt_event_post(&evobj->core.event, bits);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+int eventobj_clear(struct eventobj *evobj, unsigned int bits,
+		   unsigned int *bits_r)
+{
+	unsigned int oldval;
+
+	oldval = cobalt_event_clear(&evobj->core.event, bits);
+	if (bits_r)
+		*bits_r = oldval;
+
+	return 0;
+}
+
+int eventobj_inquire(struct eventobj *evobj, size_t waitsz,
+		     struct eventobj_waitentry *waitlist,
+		     unsigned int *bits_r)
+{
+	struct cobalt_threadstat stat;
+	struct cobalt_event_info info;
+	int nrwait, pidsz, n, ret;
+	pid_t *pidlist = NULL;
+
+	pidsz = sizeof(pid_t) * (waitsz / sizeof(*waitlist));
+	if (pidsz > 0) {
+		pidlist = pvmalloc(pidsz);
+		if (pidlist == NULL)
+			return -ENOMEM;
+	}
+
+	nrwait = cobalt_event_inquire(&evobj->core.event, &info, pidlist, pidsz);
+	if (nrwait < 0)
+		goto out;
+
+	*bits_r = info.value;
+
+	if (pidlist == NULL)
+		return nrwait;
+
+	for (n = 0; n < nrwait; n++, waitlist++) {
+		ret = cobalt_thread_stat(pidlist[n], &stat);
+		/* If waiter disappeared, fill in a dummy entry. */
+		if (ret) {
+			waitlist->pid = -1;
+			strcpy(waitlist->name, "???");
+		} else {
+			waitlist->pid = pidlist[n];
+			strcpy(waitlist->name, stat.name);
+		}
+	}
+out:
+	if (pidlist)
+		pvfree(pidlist);
+
+	return nrwait;
+}
+
+#else /* CONFIG_XENO_MERCURY */
+
+static void eventobj_finalize(struct syncobj *sobj)
+{
+	struct eventobj *evobj = container_of(sobj, struct eventobj, core.sobj);
+	void (*finalizer)(struct eventobj *evobj);
+
+	fnref_get(finalizer, evobj->finalizer);
+	finalizer(evobj);
+}
+fnref_register(libcopperplate, eventobj_finalize);
+
+int eventobj_init(struct eventobj *evobj, unsigned int value, int flags,
+		  fnref_type(void (*)(struct eventobj *evobj)) finalizer)
+{
+	int sobj_flags = 0, ret;
+
+	if (flags & EVOBJ_PRIO)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	ret = syncobj_init(&evobj->core.sobj, CLOCK_COPPERPLATE, sobj_flags,
+			   fnref_put(libcopperplate, eventobj_finalize));
+	if (ret)
+		return __bt(ret);
+
+	evobj->core.flags = flags;
+	evobj->core.value = value;
+	evobj->finalizer = finalizer;
+
+	return 0;
+}
+
+int eventobj_destroy(struct eventobj *evobj)
+{
+	struct syncstate syns;
+	int ret;
+
+	if (syncobj_lock(&evobj->core.sobj, &syns))
+		return -EINVAL;
+
+	ret = syncobj_destroy(&evobj->core.sobj, &syns);
+	if (ret < 0)
+		return ret;
+
+	return 0;
+}
+
+void eventobj_uninit(struct eventobj *evobj)
+{
+	syncobj_uninit(&evobj->core.sobj);
+}
+
+int eventobj_wait(struct eventobj *evobj,
+		  unsigned int bits, unsigned int *bits_r,
+		  int mode, const struct timespec *timeout)
+{
+	struct eventobj_wait_struct *wait;
+	unsigned int waitval, testval;
+	struct syncstate syns;
+	int ret = 0;
+
+	ret = syncobj_lock(&evobj->core.sobj, &syns);
+	if (ret)
+		return ret;
+
+	if (bits == 0) {
+		*bits_r = evobj->core.value;
+		goto done;
+	}
+
+	waitval = evobj->core.value & bits;
+	testval = mode & EVOBJ_ANY ? waitval : bits;
+
+	if (waitval && waitval == testval) {
+		*bits_r = waitval;
+		goto done;
+	}
+
+	/* Have to wait. */
+
+	if (timeout && timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
+		ret = -EWOULDBLOCK;
+		goto done;
+	}
+
+	wait = threadobj_prepare_wait(struct eventobj_wait_struct);
+	wait->value = bits;
+	wait->mode = mode;
+
+	ret = syncobj_wait_grant(&evobj->core.sobj, timeout, &syns);
+	if (ret == -EIDRM) {
+		threadobj_finish_wait();
+		return ret;
+	}
+
+	if (ret == 0)
+		*bits_r = wait->value;
+
+	threadobj_finish_wait();
+done:
+	syncobj_unlock(&evobj->core.sobj, &syns);
+
+	return ret;
+}
+
+int eventobj_post(struct eventobj *evobj, unsigned int bits)
+{
+	struct eventobj_wait_struct *wait;
+	unsigned int waitval, testval;
+	struct threadobj *thobj, *tmp;
+	struct syncstate syns;
+	int ret;
+
+	ret = syncobj_lock(&evobj->core.sobj, &syns);
+	if (ret)
+		return ret;
+
+	evobj->core.value |= bits;
+
+	if (!syncobj_grant_wait_p(&evobj->core.sobj))
+		goto done;
+
+	syncobj_for_each_grant_waiter_safe(&evobj->core.sobj, thobj, tmp) {
+		wait = threadobj_get_wait(thobj);
+		waitval = wait->value & bits;
+		testval = wait->mode & EVOBJ_ANY ? waitval : wait->value;
+		if (waitval && waitval == testval) {
+			wait->value = waitval;
+			syncobj_grant_to(&evobj->core.sobj, thobj);
+		}
+	}
+done:
+	syncobj_unlock(&evobj->core.sobj, &syns);
+
+	return 0;
+}
+
+int eventobj_clear(struct eventobj *evobj,
+		   unsigned int bits,
+		   unsigned int *bits_r)
+{
+	struct syncstate syns;
+	unsigned int oldval;
+	int ret;
+
+	ret = syncobj_lock(&evobj->core.sobj, &syns);
+	if (ret)
+		return ret;
+
+	oldval = evobj->core.value;
+	evobj->core.value &= ~bits;
+
+	syncobj_unlock(&evobj->core.sobj, &syns);
+
+	if (bits_r)
+		*bits_r = oldval;
+
+	return 0;
+}
+
+int eventobj_inquire(struct eventobj *evobj, size_t waitsz,
+		     struct eventobj_waitentry *waitlist,
+		     unsigned int *bits_r)
+{
+	struct threadobj *thobj;
+	struct syncstate syns;
+	int ret, nrwait;
+
+	ret = syncobj_lock(&evobj->core.sobj, &syns);
+	if (ret)
+		return ret;
+
+	nrwait = syncobj_count_grant(&evobj->core.sobj);
+	if (nrwait > 0) {
+		syncobj_for_each_grant_waiter(&evobj->core.sobj, thobj) {
+			waitlist->pid = threadobj_get_pid(thobj);
+			strcpy(waitlist->name, threadobj_get_name(thobj));
+			waitlist++;
+		}
+	}
+
+	*bits_r = evobj->core.value;
+
+	syncobj_unlock(&evobj->core.sobj, &syns);
+
+	return nrwait;
+}
+
+#endif /* CONFIG_XENO_MERCURY */
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-heapmem.c b/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-heapmem.c
new file mode 100644
index 0000000..cc3eb0a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-heapmem.c
@@ -0,0 +1,118 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdlib.h>
+#include "boilerplate/heapmem.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/debug.h"
+#include "copperplate/tunables.h"
+#include "xenomai/init.h"
+
+#define MIN_HEAPMEM_HEAPSZ  (64 * 1024)
+
+struct heap_memory heapmem_main;
+
+int __heapobj_init_private(struct heapobj *hobj, const char *name,
+			   size_t size, void *mem)
+{
+	struct heap_memory *heap;
+	void *_mem = mem;
+	int ret;
+
+	heap = malloc(sizeof(*heap));
+	if (heap == NULL)
+		return -ENOMEM;
+
+	if (mem == NULL) {
+		size = HEAPMEM_ARENA_SIZE(size); /* Count meta-data in. */
+		mem = malloc(size);
+		if (mem == NULL) {
+			free(heap);
+			return -ENOMEM;
+		}
+	}
+	
+	if (name)
+		snprintf(hobj->name, sizeof(hobj->name), "%s", name);
+	else
+		snprintf(hobj->name, sizeof(hobj->name), "%p", hobj);
+
+	hobj->pool = heap;
+	hobj->size = size;
+
+	ret = heapmem_init(hobj->pool, mem, size);
+	if (ret) {
+		if (_mem == NULL)
+			free(mem);
+		free(heap);
+		return ret;
+	}
+
+	return 0;
+}
+
+int heapobj_init_array_private(struct heapobj *hobj, const char *name,
+			       size_t size, int elems)
+{
+	size_t log2;
+
+	if (size == 0 || elems <= 0)
+		return __bt(-EINVAL);
+
+	log2 = sizeof(size) * CHAR_BIT - 1 -
+		xenomai_count_leading_zeros(size);
+
+	/*
+	 * Heapmem aligns individual object sizes on the next ^2
+	 * boundary, do likewise when determining the overall heap
+	 * size, so that we can allocate as many as @elems items.
+	 */
+	if (size & (size - 1))
+		log2++;
+
+	size = 1 << log2;
+
+	return __bt(__heapobj_init_private(hobj, name,
+					   size * elems, NULL));
+}
+
+int heapobj_pkg_init_private(void)
+{
+	size_t size;
+	void *mem;
+	int ret;
+
+#ifdef CONFIG_XENO_PSHARED
+	size = MIN_HEAPMEM_HEAPSZ;
+#else
+	size = __copperplate_setup_data.mem_pool;
+	if (size < MIN_HEAPMEM_HEAPSZ)
+		size = MIN_HEAPMEM_HEAPSZ;
+#endif
+	size = HEAPMEM_ARENA_SIZE(size);
+	mem = malloc(size);
+	if (mem == NULL)
+		return -ENOMEM;
+
+	ret = heapmem_init(&heapmem_main, mem, size);
+	if (ret) {
+		free(mem);
+		return ret;
+	}
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-malloc.c b/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-malloc.c
new file mode 100644
index 0000000..cde5672
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-malloc.c
@@ -0,0 +1,192 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <string.h>
+#include <malloc.h>
+#include <assert.h>
+#include <pthread.h>
+#include "boilerplate/lock.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/debug.h"
+
+#define MALLOC_MAGIC 0xabbfcddc
+
+struct pool_header {
+	pthread_mutex_t lock;
+	size_t used;
+};
+
+struct block_header {
+	unsigned int magic;
+	size_t size;
+};
+
+int __heapobj_init_private(struct heapobj *hobj, const char *name,
+			   size_t size, void *mem)
+{
+	pthread_mutexattr_t mattr;
+	struct pool_header *ph;
+	int ret;
+
+	/*
+	 * There is no local pool when working with malloc, we just
+	 * use the global process arena. This should not be an issue
+	 * since this mode is aimed at debugging, particularly to be
+	 * used along with Valgrind.
+	 *
+	 * However, we maintain a control header to track the amount
+	 * of memory currently consumed in each heap.
+	 */
+	ph = malloc(sizeof(*ph));
+	if (ph == NULL)
+		return __bt(-ENOMEM);
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-__RT(pthread_mutex_init(&ph->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret) {
+		free(ph);
+		return ret;
+	}
+
+	ph->used = 0;
+
+	hobj->pool = ph;
+	hobj->size = size;
+	if (name)
+		snprintf(hobj->name, sizeof(hobj->name), "%s", name);
+	else
+		snprintf(hobj->name, sizeof(hobj->name), "%p", hobj);
+
+	return 0;
+}
+
+int heapobj_init_array_private(struct heapobj *hobj, const char *name,
+			       size_t size, int elems)
+{
+	if (size == 0 || elems <= 0)
+		return __bt(-EINVAL);
+
+	return __bt(__heapobj_init_private(hobj, name, size * elems, NULL));
+}
+
+void pvheapobj_destroy(struct heapobj *hobj)
+{
+	struct pool_header *ph = hobj->pool;
+
+	__RT(pthread_mutex_destroy(&ph->lock));
+	free(ph);
+}
+
+int pvheapobj_extend(struct heapobj *hobj, size_t size, void *mem)
+{
+	struct pool_header *ph = hobj->pool;
+
+	write_lock_nocancel(&ph->lock);
+	hobj->size += size;
+	write_unlock(&ph->lock);
+
+	return 0;
+}
+
+void *pvheapobj_alloc(struct heapobj *hobj, size_t size)
+{
+	struct pool_header *ph = hobj->pool;
+	struct block_header *bh;
+	void *ptr;
+
+	write_lock(&ph->lock);
+
+	ph->used += size;
+	/* Enforce hard limit. */
+	if (ph->used > hobj->size)
+		goto fail;
+
+	write_unlock(&ph->lock);
+
+	/* malloc(3) is not a cancellation point. */
+	ptr = malloc(size + sizeof(*bh));
+	if (ptr == NULL) {
+		write_lock(&ph->lock);
+		goto fail;
+	}
+
+	bh = ptr;
+	bh->magic = MALLOC_MAGIC;
+	bh->size = size;
+
+	return bh + 1;
+fail:
+	ph->used -= size;
+	write_unlock(&ph->lock);
+
+	return NULL;
+}
+
+void pvheapobj_free(struct heapobj *hobj, void *ptr)
+{
+	struct block_header *bh = ptr - sizeof(*bh);
+	struct pool_header *ph = hobj->pool;
+
+	assert(hobj->size >= bh->size);
+	write_lock(&ph->lock);
+	ph->used -= bh->size;
+	write_unlock(&ph->lock);
+	free(bh);
+}
+
+size_t pvheapobj_inquire(struct heapobj *hobj)
+{
+	struct pool_header *ph = hobj->pool;
+
+	return ph->used;
+}
+
+size_t pvheapobj_validate(struct heapobj *hobj, void *ptr)
+{
+	struct block_header *bh;
+
+	/* Catch trivially wrong cases: NULL or unaligned. */
+	if (ptr == NULL)
+		return 0;
+
+	if ((unsigned long)ptr & (sizeof(unsigned long)-1))
+		return 0;
+
+	/*
+	 * We will likely get hard validation here, i.e. crash or
+	 * abort if the pointer is out of the address space. TLSF is a
+	 * bit smarter, and pshared definitely does the right thing.
+	 */
+
+	bh = ptr - sizeof(*bh);
+	if (bh->magic != MALLOC_MAGIC)
+		return 0;
+
+	return bh->size;
+}
+
+int heapobj_pkg_init_private(void)
+{
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-pshared.c b/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-pshared.c
new file mode 100644
index 0000000..5952d9f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-pshared.c
@@ -0,0 +1,1269 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * This code is adapted from Xenomai's original dual kernel xnheap
+ * support. It is simple and efficient enough for managing dynamic
+ * memory allocation backed by a tmpfs file, we can share between
+ * multiple processes in user-space.
+ */
+
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/file.h>
+#include <sys/stat.h>
+#include <stdbool.h>
+#include <assert.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <string.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <malloc.h>
+#include <unistd.h>
+#include "boilerplate/list.h"
+#include "boilerplate/hash.h"
+#include "boilerplate/lock.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/debug.h"
+#include "xenomai/init.h"
+#include "internal.h"
+
+enum sheapmem_pgtype {
+	page_free =0,
+	page_cont =1,
+	page_list =2
+};
+
+static struct shavl_searchops size_search_ops;
+static struct shavl_searchops addr_search_ops;
+
+/*
+ * The main heap consists of a shared heap at its core, with
+ * additional session-wide information.
+ */
+struct session_heap {
+	struct shared_heap_memory heap;
+	int cpid;
+	memoff_t maplen;
+	struct hash_table catalog;
+	struct sysgroup sysgroup;
+};
+
+/*
+ * The base address of the shared memory heap, as seen by each
+ * individual process. Its control block is always first, so that
+ * different processes can access this information right after the
+ * segment is mmapped. This also ensures that offset 0 will never
+ * refer to a valid page or block.
+ */
+void *__main_heap;
+#define main_heap	(*(struct session_heap *)__main_heap)
+/*
+ *  Base address for offset-based addressing, which is the start of
+ *  the session heap since all memory objects are allocated from it,
+ *  including other (sub-)heaps.
+ */
+#define main_base	__main_heap
+
+/* A table of shared clusters for the session. */
+struct hash_table *__main_catalog;
+
+/* Pointer to the system list group. */
+struct sysgroup *__main_sysgroup;
+
+static struct heapobj main_pool;
+
+#define __shoff(b, p)		((void *)(p) - (void *)(b))
+#define __shoff_check(b, p)	((p) ? __shoff(b, p) : 0)
+#define __shref(b, o)		((void *)((void *)(b) + (o)))
+#define __shref_check(b, o)	((o) ? __shref(b, o) : NULL)
+
+static inline uint32_t __attribute__ ((always_inline))
+gen_block_mask(int log2size)
+{
+	return -1U >> (32 - (SHEAPMEM_PAGE_SIZE >> log2size));
+}
+
+static inline  __attribute__ ((always_inline))
+int addr_to_pagenr(struct sheapmem_extent *ext, void *p)
+{
+	return (p - __shref(main_base, ext->membase)) >> SHEAPMEM_PAGE_SHIFT;
+}
+
+static inline  __attribute__ ((always_inline))
+void *pagenr_to_addr(struct sheapmem_extent *ext, int pg)
+{
+	return __shref(main_base, ext->membase + (pg << SHEAPMEM_PAGE_SHIFT));
+}
+
+#ifdef CONFIG_XENO_DEBUG_FULL
+/*
+ * Setting page_cont/page_free in the page map is only required for
+ * enabling full checking of the block address in free requests, which
+ * may be extremely time-consuming when deallocating huge blocks
+ * spanning thousands of pages. We only do such marking when running
+ * in full debug mode.
+ */
+static inline bool
+page_is_valid(struct sheapmem_extent *ext, int pg)
+{
+	switch (ext->pagemap[pg].type) {
+	case page_free:
+	case page_cont:
+		return false;
+	case page_list:
+	default:
+		return true;
+	}
+}
+
+static void mark_pages(struct sheapmem_extent *ext,
+		       int pg, int nrpages,
+		       enum sheapmem_pgtype type)
+{
+	while (nrpages-- > 0)
+		ext->pagemap[pg].type = type;
+}
+
+#else
+
+static inline bool
+page_is_valid(struct sheapmem_extent *ext, int pg)
+{
+	return true;
+}
+
+static void mark_pages(struct sheapmem_extent *ext,
+		       int pg, int nrpages,
+		       enum sheapmem_pgtype type)
+{ }
+
+#endif
+
+ssize_t sheapmem_check(struct shared_heap_memory *heap, void *block)
+{
+	struct sheapmem_extent *ext;
+	memoff_t pg, pgoff, boff;
+	ssize_t ret = -EINVAL;
+	size_t bsize;
+
+	read_lock_nocancel(&heap->lock);
+
+	/*
+	 * Find the extent the checked block is originating from.
+	 */
+	__list_for_each_entry(main_base, ext, &heap->extents, next) {
+		if (__shoff(main_base, block) >= ext->membase &&
+		    __shoff(main_base, block) < ext->memlim)
+			goto found;
+	}
+	goto out;
+found:
+	/* Calculate the page number from the block address. */
+	pgoff = __shoff(main_base, block) - ext->membase;
+	pg = pgoff >> SHEAPMEM_PAGE_SHIFT;
+	if (page_is_valid(ext, pg)) {
+		if (ext->pagemap[pg].type == page_list)
+			bsize = ext->pagemap[pg].bsize;
+		else {
+			bsize = (1 << ext->pagemap[pg].type);
+			boff = pgoff & ~SHEAPMEM_PAGE_MASK;
+			if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+				goto out;
+		}
+		ret = (ssize_t)bsize;
+	}
+out:
+	read_unlock(&heap->lock);
+
+	return ret;
+}
+
+static inline struct sheapmem_range *
+find_suitable_range(struct sheapmem_extent *ext, size_t size)
+{
+	struct sheapmem_range lookup;
+	struct shavlh *node;
+
+	lookup.size = size;
+	node = shavl_search_ge(&ext->size_tree, &lookup.size_node,
+			       &size_search_ops);
+	if (node == NULL)
+		return NULL;
+
+	return container_of(node, struct sheapmem_range, size_node);
+}
+
+static int reserve_page_range(struct sheapmem_extent *ext, size_t size)
+{
+	struct sheapmem_range *new, *splitr;
+
+	new = find_suitable_range(ext, size);
+	if (new == NULL)
+		return -1;
+
+	shavl_delete(&ext->size_tree, &new->size_node);
+	if (new->size == size) {
+		shavl_delete(&ext->addr_tree, &new->addr_node);
+		return addr_to_pagenr(ext, new);
+	}
+
+	/*
+	 * The free range fetched is larger than what we need: split
+	 * it in two, the upper part goes to the user, the lower part
+	 * is returned to the free list, which makes reindexing by
+	 * address pointless.
+	 */
+	splitr = new;
+	splitr->size -= size;
+	new = (struct sheapmem_range *)((void *)new + splitr->size);
+	shavlh_init(&splitr->size_node);
+	shavl_insert_back(&ext->size_tree, &splitr->size_node,
+			  &size_search_ops);
+
+	return addr_to_pagenr(ext, new);
+}
+
+static inline struct sheapmem_range *
+find_left_neighbour(struct sheapmem_extent *ext, struct sheapmem_range *r)
+{
+	struct shavlh *node;
+
+	node = shavl_search_le(&ext->addr_tree, &r->addr_node,
+			       &addr_search_ops);
+	if (node == NULL)
+		return NULL;
+
+	return container_of(node, struct sheapmem_range, addr_node);
+}
+
+static inline struct sheapmem_range *
+find_right_neighbour(struct sheapmem_extent *ext, struct sheapmem_range *r)
+{
+	struct shavlh *node;
+
+	node = shavl_search_ge(&ext->addr_tree, &r->addr_node,
+			       &addr_search_ops);
+	if (node == NULL)
+		return NULL;
+
+	return container_of(node, struct sheapmem_range, addr_node);
+}
+
+static inline struct sheapmem_range *
+find_next_neighbour(struct sheapmem_extent *ext, struct sheapmem_range *r)
+{
+	struct shavlh *node;
+
+	node = shavl_next(&ext->addr_tree, &r->addr_node);
+	if (node == NULL)
+		return NULL;
+
+	return container_of(node, struct sheapmem_range, addr_node);
+}
+
+static inline bool
+ranges_mergeable(struct sheapmem_range *left, struct sheapmem_range *right)
+{
+	return (void *)left + left->size == (void *)right;
+}
+
+static void release_page_range(struct sheapmem_extent *ext,
+			       void *page, size_t size)
+{
+	struct sheapmem_range *freed = page, *left, *right;
+	bool addr_linked = false;
+
+	freed->size = size;
+
+	left = find_left_neighbour(ext, freed);
+	if (left && ranges_mergeable(left, freed)) {
+		shavl_delete(&ext->size_tree, &left->size_node);
+		left->size += freed->size;
+		freed = left;
+		addr_linked = true;
+		right = find_next_neighbour(ext, freed);
+	} else
+		right = find_right_neighbour(ext, freed);
+
+	if (right && ranges_mergeable(freed, right)) {
+		shavl_delete(&ext->size_tree, &right->size_node);
+		freed->size += right->size;
+		if (addr_linked)
+			shavl_delete(&ext->addr_tree, &right->addr_node);
+		else
+			shavl_replace(&ext->addr_tree, &right->addr_node,
+				      &freed->addr_node, &addr_search_ops);
+	} else if (!addr_linked) {
+		shavlh_init(&freed->addr_node);
+		if (left)
+			shavl_insert(&ext->addr_tree, &freed->addr_node,
+				&addr_search_ops);
+		else
+			shavl_prepend(&ext->addr_tree, &freed->addr_node,
+				      &addr_search_ops);
+	}
+
+	shavlh_init(&freed->size_node);
+	shavl_insert_back(&ext->size_tree, &freed->size_node,
+			  &size_search_ops);
+	mark_pages(ext, addr_to_pagenr(ext, page),
+		   size >> SHEAPMEM_PAGE_SHIFT, page_free);
+}
+
+static void add_page_front(struct shared_heap_memory *heap,
+			   struct sheapmem_extent *ext,
+			   int pg, int log2size)
+{
+	struct sheapmem_pgentry *new, *head, *next;
+	int ilog;
+
+	/* Insert page at front of the per-bucket page list. */
+	
+	ilog = log2size - SHEAPMEM_MIN_LOG2;
+	new = &ext->pagemap[pg];
+	if (heap->buckets[ilog] == -1U) {
+		heap->buckets[ilog] = pg;
+		new->prev = new->next = pg;
+	} else {
+		head = &ext->pagemap[heap->buckets[ilog]];
+		new->prev = heap->buckets[ilog];
+		new->next = head->next;
+		next = &ext->pagemap[new->next];
+		next->prev = pg;
+		head->next = pg;
+		heap->buckets[ilog] = pg;
+	}
+}
+
+static void remove_page(struct shared_heap_memory *heap,
+			struct sheapmem_extent *ext,
+			int pg, int log2size)
+{
+	struct sheapmem_pgentry *old, *prev, *next;
+	int ilog = log2size - SHEAPMEM_MIN_LOG2;
+
+	/* Remove page from the per-bucket page list. */
+
+	old = &ext->pagemap[pg];
+	if (pg == old->next)
+		heap->buckets[ilog] = -1U;
+	else {
+		if (pg == heap->buckets[ilog])
+			heap->buckets[ilog] = old->next;
+		prev = &ext->pagemap[old->prev];
+		prev->next = old->next;
+		next = &ext->pagemap[old->next];
+		next->prev = old->prev;
+	}
+}
+
+static void move_page_front(struct shared_heap_memory *heap,
+			    struct sheapmem_extent *ext,
+			    int pg, int log2size)
+{
+	int ilog = log2size - SHEAPMEM_MIN_LOG2;
+
+	/* Move page at front of the per-bucket page list. */
+	
+	if (heap->buckets[ilog] == pg)
+		return;	 /* Already at front, no move. */
+		
+	remove_page(heap, ext, pg, log2size);
+	add_page_front(heap, ext, pg, log2size);
+}
+
+static void move_page_back(struct shared_heap_memory *heap,
+			   struct sheapmem_extent *ext,
+			   int pg, int log2size)
+{
+	struct sheapmem_pgentry *old, *last, *head, *next;
+	int ilog;
+
+	/* Move page at end of the per-bucket page list. */
+	
+	old = &ext->pagemap[pg];
+	if (pg == old->next) /* Singleton, no move. */
+		return;
+		
+	remove_page(heap, ext, pg, log2size);
+
+	ilog = log2size - SHEAPMEM_MIN_LOG2;
+	head = &ext->pagemap[heap->buckets[ilog]];
+	last = &ext->pagemap[head->prev];
+	old->prev = head->prev;
+	old->next = last->next;
+	next = &ext->pagemap[old->next];
+	next->prev = pg;
+	last->next = pg;
+}
+
+static void *add_free_range(struct shared_heap_memory *heap, size_t bsize, int log2size)
+{
+	struct sheapmem_extent *ext;
+	size_t rsize;
+	int pg;
+
+	/*
+	 * Scanning each extent, search for a range of contiguous
+	 * pages in the extent. The range must be at least @bsize
+	 * long. @pg is the heading page number on success.
+	 */
+	rsize =__align_to(bsize, SHEAPMEM_PAGE_SIZE);
+	__list_for_each_entry(main_base, ext, &heap->extents, next) {
+		pg = reserve_page_range(ext, rsize);
+		if (pg >= 0)
+			goto found;
+	}
+
+	return NULL;
+
+found:	
+	/*
+	 * Update the page entry.  If @log2size is non-zero
+	 * (i.e. bsize < SHEAPMEM_PAGE_SIZE), bsize is (1 << log2Size)
+	 * between 2^SHEAPMEM_MIN_LOG2 and 2^(SHEAPMEM_PAGE_SHIFT - 1).
+	 * Save the log2 power into entry.type, then update the
+	 * per-page allocation bitmap to reserve the first block.
+	 *
+	 * Otherwise, we have a larger block which may span multiple
+	 * pages: set entry.type to page_list, indicating the start of
+	 * the page range, and entry.bsize to the overall block size.
+	 */
+	if (log2size) {
+		ext->pagemap[pg].type = log2size;
+		/*
+		 * Mark the first object slot (#0) as busy, along with
+		 * the leftmost bits we won't use for this log2 size.
+		 */
+		ext->pagemap[pg].map = ~gen_block_mask(log2size) | 1;
+		/*
+		 * Insert the new page at front of the per-bucket page
+		 * list, enforcing the assumption that pages with free
+		 * space live close to the head of this list.
+		 */
+		add_page_front(heap, ext, pg, log2size);
+	} else {
+		ext->pagemap[pg].type = page_list;
+		ext->pagemap[pg].bsize = (uint32_t)bsize;
+		mark_pages(ext, pg + 1,
+			   (bsize >> SHEAPMEM_PAGE_SHIFT) - 1, page_cont);
+	}
+
+	heap->used_size += bsize;
+
+	return pagenr_to_addr(ext, pg);
+}
+
+static void *sheapmem_alloc(struct shared_heap_memory *heap, size_t size)
+{
+	struct sheapmem_extent *ext;
+	int log2size, ilog, pg, b;
+	uint32_t bmask;
+	size_t bsize;
+	void *block;
+
+	if (size == 0)
+		return NULL;
+
+	if (size < SHEAPMEM_MIN_ALIGN) {
+		bsize = size = SHEAPMEM_MIN_ALIGN;
+		log2size = SHEAPMEM_MIN_LOG2;
+	} else {
+		log2size = sizeof(size) * CHAR_BIT - 1 -
+			xenomai_count_leading_zeros(size);
+		if (log2size < SHEAPMEM_PAGE_SHIFT) {
+			if (size & (size - 1))
+				log2size++;
+			bsize = 1 << log2size;
+		} else
+			bsize = __align_to(size, SHEAPMEM_PAGE_SIZE);
+	}
+	
+	/*
+	 * Allocate entire pages directly from the pool whenever the
+	 * block is larger or equal to SHEAPMEM_PAGE_SIZE.  Otherwise,
+	 * use bucketed memory.
+	 *
+	 * NOTE: Fully busy pages from bucketed memory are moved back
+	 * at the end of the per-bucket page list, so that we may
+	 * always assume that either the heading page has some room
+	 * available, or no room is available from any page linked to
+	 * this list, in which case we should immediately add a fresh
+	 * page.
+	 */
+	if (bsize < SHEAPMEM_PAGE_SIZE) {
+		ilog = log2size - SHEAPMEM_MIN_LOG2;
+		assert(ilog >= 0 && ilog < SHEAPMEM_MAX);
+
+		write_lock_nocancel(&heap->lock);
+
+		__list_for_each_entry(main_base, ext, &heap->extents, next) {
+			pg = heap->buckets[ilog];
+			if (pg < 0) /* Empty page list? */
+				continue;
+
+			/*
+			 * Find a block in the heading page. If there
+			 * is none, there won't be any down the list:
+			 * add a new page right away.
+			 */
+			bmask = ext->pagemap[pg].map;
+			if (bmask == -1U)
+				break;
+			b = xenomai_count_trailing_zeros(~bmask);
+
+			/*
+			 * Got one block from the heading per-bucket
+			 * page, tag it as busy in the per-page
+			 * allocation map.
+			 */
+			ext->pagemap[pg].map |= (1U << b);
+			heap->used_size += bsize;
+			block = __shref(main_base, ext->membase) +
+				(pg << SHEAPMEM_PAGE_SHIFT) +
+				(b << log2size);
+			if (ext->pagemap[pg].map == -1U)
+				move_page_back(heap, ext, pg, log2size);
+			goto out;
+		}
+
+		/* No free block in bucketed memory, add one page. */
+		block = add_free_range(heap, bsize, log2size);
+	} else {
+		write_lock_nocancel(&heap->lock);
+		/* Add a range of contiguous free pages. */
+		block = add_free_range(heap, bsize, 0);
+	}
+out:
+	write_unlock(&heap->lock);
+
+	return block;
+}
+
+static int sheapmem_free(struct shared_heap_memory *heap, void *block)
+{
+	int log2size, ret = 0, pg, n;
+	struct sheapmem_extent *ext;
+	memoff_t pgoff, boff;
+	uint32_t oldmap;
+	size_t bsize;
+
+	write_lock_nocancel(&heap->lock);
+
+	/*
+	 * Find the extent from which the returned block is
+	 * originating from.
+	 */
+	__list_for_each_entry(main_base, ext, &heap->extents, next) {
+		if (__shoff(main_base, block) >= ext->membase &&
+		    __shoff(main_base, block) < ext->memlim)
+			goto found;
+	}
+
+	goto bad;
+found:
+	/* Compute the heading page number in the page map. */
+	pgoff = __shoff(main_base, block) - ext->membase;
+	pg = pgoff >> SHEAPMEM_PAGE_SHIFT;
+	if (!page_is_valid(ext, pg))
+		goto bad;
+	
+	switch (ext->pagemap[pg].type) {
+	case page_list:
+		bsize = ext->pagemap[pg].bsize;
+		assert((bsize & (SHEAPMEM_PAGE_SIZE - 1)) == 0);
+		release_page_range(ext, pagenr_to_addr(ext, pg), bsize);
+		break;
+
+	default:
+		log2size = ext->pagemap[pg].type;
+		bsize = (1 << log2size);
+		assert(bsize < SHEAPMEM_PAGE_SIZE);
+		boff = pgoff & ~SHEAPMEM_PAGE_MASK;
+		if ((boff & (bsize - 1)) != 0) /* Not at block start? */
+			goto bad;
+
+		n = boff >> log2size; /* Block position in page. */
+		oldmap = ext->pagemap[pg].map;
+		ext->pagemap[pg].map &= ~(1U << n);
+
+		/*
+		 * If the page the block was sitting on is fully idle,
+		 * return it to the pool. Otherwise, check whether
+		 * that page is transitioning from fully busy to
+		 * partially busy state, in which case it should move
+		 * toward the front of the per-bucket page list.
+		 */
+		if (ext->pagemap[pg].map == ~gen_block_mask(log2size)) {
+			remove_page(heap, ext, pg, log2size);
+			release_page_range(ext, pagenr_to_addr(ext, pg),
+					   SHEAPMEM_PAGE_SIZE);
+		} else if (oldmap == -1U)
+			move_page_front(heap, ext, pg, log2size);
+	}
+
+	heap->used_size -= bsize;
+out:
+	write_unlock(&heap->lock);
+
+	return __bt(ret);
+bad:
+	ret = -EINVAL;
+	goto out;
+}
+
+static inline int compare_range_by_size(const struct shavlh *l, const struct shavlh *r)
+{
+	struct sheapmem_range *rl = container_of(l, typeof(*rl), size_node);
+	struct sheapmem_range *rr = container_of(r, typeof(*rl), size_node);
+
+	return avl_sign((long)(rl->size - rr->size));
+}
+static DECLARE_SHAVL_SEARCH(search_range_by_size, compare_range_by_size);
+
+static struct shavl_searchops size_search_ops = {
+	.search = search_range_by_size,
+	.cmp = compare_range_by_size,
+};
+
+static inline int compare_range_by_addr(const struct shavlh *l, const struct shavlh *r)
+{
+	uintptr_t al = (uintptr_t)l, ar = (uintptr_t)r;
+
+	return avl_cmp_sign(al, ar);
+}
+static DECLARE_SHAVL_SEARCH(search_range_by_addr, compare_range_by_addr);
+
+static struct shavl_searchops addr_search_ops = {
+	.search = search_range_by_addr,
+	.cmp = compare_range_by_addr,
+};
+
+static int add_extent(struct shared_heap_memory *heap, void *base,
+		      void *mem, size_t size)
+{
+	size_t user_size, overhead;
+	struct sheapmem_extent *ext;
+	int nrpages, state;
+
+	/*
+	 * @size must include the overhead memory we need for storing
+	 * our meta data as calculated by SHEAPMEM_ARENA_SIZE(), find
+	 * this amount back.
+	 *
+	 * o = overhead
+	 * e = sizeof(sheapmem_extent)
+	 * p = SHEAPMEM_PAGE_SIZE
+	 * m = SHEAPMEM_PGMAP_BYTES
+	 *
+	 * o = align_to(((a * m + e * p) / (p + m)), minlog2)
+	 */
+	overhead = __align_to((size * SHEAPMEM_PGMAP_BYTES +
+			       sizeof(*ext) * SHEAPMEM_PAGE_SIZE) /
+			      (SHEAPMEM_PAGE_SIZE + SHEAPMEM_PGMAP_BYTES),
+			      SHEAPMEM_MIN_ALIGN);
+
+	user_size = size - overhead;
+	if (user_size & ~SHEAPMEM_PAGE_MASK)
+		return -EINVAL;
+
+	if (user_size < SHEAPMEM_PAGE_SIZE ||
+	    user_size > SHEAPMEM_MAX_EXTSZ)
+		return -EINVAL;
+		
+	/*
+	 * Setup an extent covering user_size bytes of user memory
+	 * starting at @mem. user_size must be a multiple of
+	 * SHEAPMEM_PAGE_SIZE.  The extent starts with a descriptor,
+	 * followed by the array of page entries.
+	 *
+	 * Page entries contain per-page metadata for managing the
+	 * page pool.
+	 *
+	 * +-------------------+ <= mem
+	 * | extent descriptor |
+	 * /...................\
+	 * \...page entries[]../
+	 * /...................\
+	 * +-------------------+ <= extent->membase
+	 * |                   |
+	 * |                   |
+	 * |    (page pool)    |
+	 * |                   |
+	 * |                   |
+	 * +-------------------+
+	 *                       <= extent->memlim == mem + size
+	 */
+	nrpages = user_size >> SHEAPMEM_PAGE_SHIFT;
+	ext = mem;
+	ext->membase = __shoff(base, mem) + overhead;
+	ext->memlim = __shoff(base, mem) + size;
+		      
+	memset(ext->pagemap, 0, nrpages * sizeof(struct sheapmem_pgentry));
+
+	/*
+	 * The free page pool is maintained as a set of ranges of
+	 * contiguous pages indexed by address and size in AVL
+	 * trees. Initially, we have a single range in those trees
+	 * covering the whole user memory we have been given for the
+	 * extent. Over time, that range will be split then possibly
+	 * re-merged back as allocations and deallocations take place.
+	 */
+	shavl_init(&ext->size_tree);
+	shavl_init(&ext->addr_tree);
+	release_page_range(ext, __shref(base, ext->membase), user_size);
+
+	write_lock_safe(&heap->lock, state);
+	__list_append(base, &ext->next, &heap->extents);
+	heap->arena_size += size;
+	heap->usable_size += user_size;
+	write_unlock_safe(&heap->lock, state);
+
+	return 0;
+}
+
+static int sheapmem_init(struct shared_heap_memory *heap, void *base,
+			 const char *name,
+			 void *mem, size_t size)
+{
+	pthread_mutexattr_t mattr;
+	int ret, n;
+
+	namecpy(heap->name, name);
+	heap->used_size = 0;
+	heap->usable_size = 0;
+	heap->arena_size = 0;
+	__list_init_nocheck(base, &heap->extents);
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
+	ret = __bt(-__RT(pthread_mutex_init(&heap->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		return ret;
+
+	/* Reset bucket page lists, all empty. */
+	for (n = 0; n < SHEAPMEM_MAX; n++)
+		heap->buckets[n] = -1U;
+
+	ret = add_extent(heap, base, mem, size);
+	if (ret) {
+		__RT(pthread_mutex_destroy(&heap->lock));
+		return ret;
+	}
+
+	return 0;
+}
+
+static int init_main_heap(struct session_heap *m_heap,
+			  size_t size)
+{
+	pthread_mutexattr_t mattr;
+	int ret;
+
+	ret = sheapmem_init(&m_heap->heap, m_heap, "main", m_heap + 1, size);
+	if (ret)
+		return __bt(ret);
+
+	m_heap->cpid = get_thread_pid();
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_SHARED);
+	ret = __bt(-__RT(pthread_mutex_init(&m_heap->sysgroup.lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		return ret;
+
+	__hash_init(m_heap, &m_heap->catalog);
+	m_heap->sysgroup.thread_count = 0;
+	__list_init(m_heap, &m_heap->sysgroup.thread_list);
+	m_heap->sysgroup.heap_count = 0;
+	__list_init(m_heap, &m_heap->sysgroup.heap_list);
+
+	return 0;
+}
+
+#ifndef CONFIG_XENO_REGISTRY
+static void unlink_main_heap(void)
+{
+	/*
+	 * Only the master process run this when there is no registry
+	 * support (i.e. the one which has initialized the main shared
+	 * heap for the session). When the registry is enabled,
+	 * sysregd does the housekeeping.
+	 */
+	shm_unlink(main_pool.fsname);
+}
+#endif
+
+static int create_main_heap(pid_t *cnode_r)
+{
+	const char *session = __copperplate_setup_data.session_label;
+	size_t size = __copperplate_setup_data.mem_pool, pagesz;
+	gid_t gid =__copperplate_setup_data.session_gid;
+	struct heapobj *hobj = &main_pool;
+	struct session_heap *m_heap;
+	struct stat sbuf;
+	memoff_t len;
+	int ret, fd;
+
+	*cnode_r = -1;
+	pagesz = sysconf(_SC_PAGESIZE);
+
+	/*
+	 * A storage page should be obviously larger than an extent
+	 * header, but we still make sure of this in debug mode, so
+	 * that we can rely on __align_to() for rounding to the
+	 * minimum size in production builds, without any further
+	 * test (e.g. like size >= sizeof(struct sheapmem_extent)).
+	 */
+	assert(SHEAPMEM_PAGE_SIZE > sizeof(struct sheapmem_extent));
+	size = SHEAPMEM_ARENA_SIZE(size);
+	len = __align_to(size + sizeof(*m_heap), pagesz);
+
+	/*
+	 * Bind to (and optionally create) the main session's heap:
+	 *
+	 * If the heap already exists, check whether the leading
+	 * process who created it is still alive, in which case we'll
+	 * bind to it, unless the requested size differs.
+	 *
+	 * Otherwise, create the heap for the new emerging session and
+	 * bind to it.
+	 */
+	snprintf(hobj->name, sizeof(hobj->name), "%s.heap", session);
+	snprintf(hobj->fsname, sizeof(hobj->fsname),
+		 "/xeno:%s", hobj->name);
+
+	fd = shm_open(hobj->fsname, O_RDWR|O_CREAT, 0660);
+	if (fd < 0)
+		return __bt(-errno);
+
+	ret = flock(fd, LOCK_EX);
+	if (__bterrno(ret))
+		goto errno_fail;
+
+	ret = fstat(fd, &sbuf);
+	if (__bterrno(ret))
+		goto errno_fail;
+
+	if (sbuf.st_size == 0)
+		goto init;
+
+	m_heap = __STD(mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0));
+	if (m_heap == MAP_FAILED) {
+		ret = __bt(-errno);
+		goto close_fail;
+	}
+
+	if (m_heap->cpid == 0)
+		goto reset;
+
+	if (copperplate_probe_tid(m_heap->cpid) == 0) {
+		if (m_heap->maplen == len) {
+			/* CAUTION: __moff() depends on __main_heap. */
+			__main_heap = m_heap;
+			__main_sysgroup = &m_heap->sysgroup;
+			hobj->pool_ref = __moff(&m_heap->heap);
+			goto done;
+		}
+		*cnode_r = m_heap->cpid;
+		munmap(m_heap, len);
+		__STD(close(fd));
+		return __bt(-EEXIST);
+	}
+reset:
+	munmap(m_heap, len);
+	/*
+	 * Reset shared memory ownership to revoke permissions from a
+	 * former session with more permissive access rules, such as
+	 * group-controlled access.
+	 */
+	ret = fchown(fd, geteuid(), getegid());
+	(void)ret;
+init:
+#ifndef CONFIG_XENO_REGISTRY
+	atexit(unlink_main_heap);
+#endif
+
+	ret = ftruncate(fd, 0);  /* Clear all previous contents if any. */
+	if (__bterrno(ret))
+		goto unlink_fail;
+
+	ret = ftruncate(fd, len);
+	if (__bterrno(ret))
+		goto unlink_fail;
+
+	/*
+	 * If we need to share the heap between members of a group,
+	 * give the group RW access to the shared memory file backing
+	 * the heap.
+	 */
+	if (gid != USHRT_MAX) {
+		ret = fchown(fd, geteuid(), gid);
+		if (__bterrno(ret) < 0)
+			goto unlink_fail;
+		ret = fchmod(fd, 0660);
+		if (__bterrno(ret) < 0)
+			goto unlink_fail;
+	}
+
+	m_heap = __STD(mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0));
+	if (m_heap == MAP_FAILED) {
+		ret = __bt(-errno);
+		goto unlink_fail;
+	}
+
+	__main_heap = m_heap;
+
+	m_heap->maplen = len;
+	/* CAUTION: init_main_heap() depends on hobj->pool_ref. */
+	hobj->pool_ref = __moff(&m_heap->heap);
+	ret = __bt(init_main_heap(m_heap, size));
+	if (ret) {
+		errno = -ret;
+		goto unmap_fail;
+	}
+
+	/* We need these globals set up before updating a sysgroup. */
+	__main_sysgroup = &m_heap->sysgroup;
+	sysgroup_add(heap, &m_heap->heap.memspec);
+done:
+	flock(fd, LOCK_UN);
+	__STD(close(fd));
+	hobj->size = m_heap->heap.usable_size;
+	__main_catalog = &m_heap->catalog;
+
+	return 0;
+unmap_fail:
+	munmap(m_heap, len);
+unlink_fail:
+	ret = -errno;
+	shm_unlink(hobj->fsname);
+	goto close_fail;
+errno_fail:
+	ret = __bt(-errno);
+close_fail:
+	__STD(close(fd));
+
+	return ret;
+}
+
+static int bind_main_heap(const char *session)
+{
+	struct heapobj *hobj = &main_pool;
+	struct session_heap *m_heap;
+	int ret, fd, cpid;
+	struct stat sbuf;
+	memoff_t len;
+
+	/* No error tracking, this is for internal users. */
+
+	snprintf(hobj->name, sizeof(hobj->name), "%s.heap", session);
+	snprintf(hobj->fsname, sizeof(hobj->fsname),
+		 "/xeno:%s", hobj->name);
+
+	fd = shm_open(hobj->fsname, O_RDWR, 0400);
+	if (fd < 0)
+		return -errno;
+
+	ret = flock(fd, LOCK_EX);
+	if (ret)
+		goto errno_fail;
+
+	ret = fstat(fd, &sbuf);
+	if (ret)
+		goto errno_fail;
+
+	len = sbuf.st_size;
+	if (len < sizeof(*m_heap)) {
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	m_heap = __STD(mmap(NULL, len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0));
+	if (m_heap == MAP_FAILED)
+		goto errno_fail;
+
+	cpid = m_heap->cpid;
+	__STD(close(fd));
+
+	if (cpid == 0 || copperplate_probe_tid(cpid)) {
+		munmap(m_heap, len);
+		return -ENOENT;
+	}
+
+	hobj->pool_ref = __moff(&m_heap->heap);
+	hobj->size = m_heap->heap.usable_size;
+	__main_heap = m_heap;
+	__main_catalog = &m_heap->catalog;
+	__main_sysgroup = &m_heap->sysgroup;
+
+	return 0;
+
+errno_fail:
+	ret = -errno;
+fail:
+	__STD(close(fd));
+
+	return ret;
+}
+
+int pshared_check(void *__heap, void *__addr)
+{
+	struct shared_heap_memory *heap = __heap;
+	struct sheapmem_extent *extent;
+	struct session_heap *m_heap;
+
+	/*
+	 * Fast check for the main heap: we have a single extent for
+	 * this one, so the address shall fall into the file-backed
+	 * memory range.
+	 */
+	if (__moff(heap) == main_pool.pool_ref) {
+		m_heap = container_of(heap, struct session_heap, heap);
+		return __addr >= (void *)m_heap &&
+			__addr < (void *)m_heap + m_heap->maplen;
+	}
+
+	/*
+	 * Secondary (nested) heap: some refs may fall into the
+	 * header, check for this first.
+	 */
+	if (__addr >= __heap && __addr < __heap + sizeof(*heap))
+		return 1;
+
+	/*
+	 * This address must be referring to some payload data within
+	 * the nested heap, check that it falls into one of the heap
+	 * extents.
+	 */
+	assert(!list_empty(&heap->extents));
+
+	__list_for_each_entry(main_base, extent, &heap->extents, next) {
+		if (__shoff(main_base, __addr) >= extent->membase &&
+		    __shoff(main_base, __addr) < extent->memlim)
+			return 1;
+	}
+
+	return 0;
+}
+
+int heapobj_init(struct heapobj *hobj, const char *name, size_t size)
+{
+	const char *session = __copperplate_setup_data.session_label;
+	struct shared_heap_memory *heap;
+	size_t len;
+
+	size = SHEAPMEM_ARENA_SIZE(size);
+	len = size + sizeof(*heap);
+
+	/*
+	 * Create a heap nested in the main shared heap to hold data
+	 * we can share among processes which belong to the same
+	 * session.
+	 */
+	heap = sheapmem_alloc(&main_heap.heap, len);
+	if (heap == NULL) {
+		warning("%s() failed for %Zu bytes, raise --mem-pool-size?",
+			__func__, len);
+		return __bt(-ENOMEM);
+	}
+
+	if (name)
+		snprintf(hobj->name, sizeof(hobj->name), "%s.%s",
+			 session, name);
+	else
+		snprintf(hobj->name, sizeof(hobj->name), "%s.%p",
+			 session, hobj);
+
+	sheapmem_init(heap, main_base, hobj->name, heap + 1, size);
+	hobj->pool_ref = __moff(heap);
+	hobj->size = heap->usable_size;
+	sysgroup_add(heap, &heap->memspec);
+
+	return 0;
+}
+
+int heapobj_init_array(struct heapobj *hobj, const char *name,
+		       size_t size, int elems)
+{
+	int log2size;
+
+	if (size < SHEAPMEM_MIN_ALIGN) {
+		size = SHEAPMEM_MIN_ALIGN;
+	} else {
+		log2size = sizeof(size) * CHAR_BIT - 1 -
+			xenomai_count_leading_zeros(size);
+		if (log2size < SHEAPMEM_PAGE_SHIFT) {
+			if (size & (size - 1))
+				log2size++;
+			size = 1 << log2size;
+		} else
+			size = __align_to(size, SHEAPMEM_PAGE_SIZE);
+	}
+
+	return __bt(heapobj_init(hobj, name, size * elems));
+}
+
+void heapobj_destroy(struct heapobj *hobj)
+{
+	struct shared_heap_memory *heap = __mptr(hobj->pool_ref);
+	int cpid;
+
+	if (hobj != &main_pool) {
+		__RT(pthread_mutex_destroy(&heap->lock));
+		sysgroup_remove(heap, &heap->memspec);
+		sheapmem_free(&main_heap.heap, heap);
+		return;
+	}
+
+	cpid = main_heap.cpid;
+	if (cpid != 0 && cpid != get_thread_pid() &&
+	    copperplate_probe_tid(cpid) == 0) {
+		munmap(&main_heap, main_heap.maplen);
+		return;
+	}
+	
+	__RT(pthread_mutex_destroy(&heap->lock));
+	__RT(pthread_mutex_destroy(&main_heap.sysgroup.lock));
+	munmap(&main_heap, main_heap.maplen);
+	shm_unlink(hobj->fsname);
+}
+
+int heapobj_extend(struct heapobj *hobj, size_t size, void *unused)
+{
+	struct shared_heap_memory *heap = __mptr(hobj->pool_ref);
+	void *mem;
+	int ret;
+
+	if (hobj == &main_pool)	/* Can't extend the main pool. */
+		return __bt(-EINVAL);
+
+	size = SHEAPMEM_ARENA_SIZE(size);
+	mem = sheapmem_alloc(&main_heap.heap, size);
+	if (mem == NULL)
+		return __bt(-ENOMEM);
+
+	ret = add_extent(heap, main_base, mem, size);
+	if (ret) {
+		sheapmem_free(&main_heap.heap, mem);
+		return __bt(ret);
+	}
+
+	hobj->size += size;
+
+	return 0;
+}
+
+void *heapobj_alloc(struct heapobj *hobj, size_t size)
+{
+	return sheapmem_alloc(__mptr(hobj->pool_ref), size);
+}
+
+void heapobj_free(struct heapobj *hobj, void *ptr)
+{
+	sheapmem_free(__mptr(hobj->pool_ref), ptr);
+}
+
+size_t heapobj_validate(struct heapobj *hobj, void *ptr)
+{
+	ssize_t ret = sheapmem_check(__mptr(hobj->pool_ref), ptr);
+	return ret < 0 ? 0 : ret;
+}
+
+size_t heapobj_inquire(struct heapobj *hobj)
+{
+	struct shared_heap_memory *heap = __mptr(hobj->pool_ref);
+	return heap->used_size;
+}
+
+size_t heapobj_get_size(struct heapobj *hobj)
+{
+	struct shared_heap_memory *heap = __mptr(hobj->pool_ref);
+	return heap->usable_size;
+}
+
+void *xnmalloc(size_t size)
+{
+	return sheapmem_alloc(&main_heap.heap, size);
+}
+
+void xnfree(void *ptr)
+{
+	sheapmem_free(&main_heap.heap, ptr);
+}
+
+char *xnstrdup(const char *ptr)
+{
+	char *str;
+
+	str = xnmalloc(strlen(ptr) + 1);
+	if (str == NULL)
+		return NULL;
+
+	return strcpy(str, ptr);
+}
+
+int heapobj_pkg_init_shared(void)
+{
+	pid_t cnode;
+	int ret;
+
+	ret = create_main_heap(&cnode);
+	if (ret == -EEXIST)
+		warning("session %s is still active (pid %d)\n",
+			__copperplate_setup_data.session_label, cnode);
+
+	return __bt(ret);
+}
+
+int heapobj_bind_session(const char *session)
+{
+	/* No error tracking, this is for internal users. */
+	return bind_main_heap(session);
+}
+
+void heapobj_unbind_session(void)
+{
+	size_t len = main_heap.maplen;
+
+	munmap(&main_heap, len);
+}
+
+int heapobj_unlink_session(const char *session)
+{
+	char *path;
+	int ret;
+
+	ret = asprintf(&path, "/xeno:%s.heap", session);
+	if (ret < 0)
+		return -ENOMEM;
+	ret = shm_unlink(path) ? -errno : 0;
+	free(path);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-tlsf.c b/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-tlsf.c
new file mode 100644
index 0000000..2d57e3c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/heapobj-tlsf.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <stdio.h>
+#include <stdint.h>
+#include <unistd.h>
+#include "boilerplate/tlsf/tlsf.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/debug.h"
+#include "copperplate/threadobj.h"
+#include "xenomai/init.h"
+#include "internal.h"
+
+#if LONG_BIT == 32
+#define TLSF_BLOCK_ALIGN  (8 * 2)
+#else
+#define TLSF_BLOCK_ALIGN  (16 * 2)
+#endif
+
+static int tlsf_pool_overhead;
+
+int __heapobj_init_private(struct heapobj *hobj, const char *name,
+			   size_t size, void *mem)
+{
+	if (mem == NULL) {
+		/*
+		 * When the memory area is unspecified, obtain it from
+		 * the main pool, accounting for the TLSF overhead.
+		 */
+		size += tlsf_pool_overhead;
+		mem = tlsf_malloc(size);
+		if (mem == NULL)
+			return __bt(-ENOMEM);
+	}
+
+	if (name)
+		snprintf(hobj->name, sizeof(hobj->name), "%s", name);
+	else
+		snprintf(hobj->name, sizeof(hobj->name), "%p", hobj);
+
+	hobj->pool = mem;
+	/* Make sure to wipe out tlsf's signature. */
+	memset(mem, 0, size < 32 ? size : 32);
+	hobj->size = init_memory_pool(size, mem);
+	if (hobj->size == (size_t)-1)
+		return __bt(-EINVAL);
+
+	return 0;
+}
+
+int heapobj_init_array_private(struct heapobj *hobj, const char *name,
+			       size_t size, int elems)
+{
+	size_t poolsz;
+
+	if (size == 0 || elems <= 0)
+		return __bt(-EINVAL);
+
+	poolsz = (size + TLSF_BLOCK_ALIGN - 1) & ~(TLSF_BLOCK_ALIGN - 1);
+	poolsz *= elems;
+
+	return __bt(__heapobj_init_private(hobj, name, poolsz, NULL));
+}
+
+int heapobj_pkg_init_private(void)
+{
+	size_t alloc_size, available_size;
+	void *mem;
+
+#ifdef CONFIG_XENO_PSHARED
+	alloc_size = MIN_TLSF_HEAPSZ;
+#else
+	alloc_size = __copperplate_setup_data.mem_pool;
+	if (alloc_size < MIN_TLSF_HEAPSZ)
+		alloc_size = MIN_TLSF_HEAPSZ;
+#endif
+	/*
+	 * We want to know how many bytes from a memory pool TLSF will
+	 * use for its own internal use. We get the probe memory from
+	 * tlsf_malloc(), so that the main pool will be set up in the
+	 * same move.
+	 *
+	 * We include 1k of additional memory to cope with the
+	 * per-block overhead for an undefined number of individual
+	 * allocation requests. Ugly.
+	 *
+	 * CAUTION: in pshared mode, private heaps are subsidiary
+	 * storage pools, so no need to pre-commit as much memory as
+	 * we will be preallocating for the main shared pool,
+	 * especially with memory locking in effect. In that case,
+	 * creating a temporary single-page pool is enough to figure
+	 * out the allocation overhead.
+	 */
+	mem = tlsf_malloc(alloc_size);
+	available_size = init_memory_pool(alloc_size, mem);
+	if (available_size == (size_t)-1)
+		panic("cannot initialize TLSF memory manager");
+
+	destroy_memory_pool(mem);
+	tlsf_pool_overhead = alloc_size - available_size;
+	tlsf_pool_overhead = (tlsf_pool_overhead + 1024) & ~15;
+	tlsf_free(mem);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/init.c b/kernel/xenomai-v3.2.4/lib/copperplate/init.c
new file mode 100644
index 0000000..5bfccf6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/init.c
@@ -0,0 +1,385 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <sys/types.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <string.h>
+#include <ctype.h>
+#include <pwd.h>
+#include <errno.h>
+#include <getopt.h>
+#include <grp.h>
+#include "copperplate/threadobj.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/clockobj.h"
+#include "copperplate/registry.h"
+#include "copperplate/timerobj.h"
+#include "xenomai/init.h"
+#include "internal.h"
+
+struct copperplate_setup_data __copperplate_setup_data = {
+	.mem_pool = 1024 * 1024, /* Default, 1Mb. */
+	.no_registry = 0,
+	.registry_root = DEFAULT_REGISTRY_ROOT,
+	.session_label = NULL,
+	.session_root = NULL,
+	.session_gid = USHRT_MAX,
+};
+
+#ifdef CONFIG_XENO_COBALT
+int __cobalt_print_bufsz = 32 * 1024;
+#endif
+
+static const struct option copperplate_options[] = {
+	{
+#define mempool_opt	0
+		.name = "mem-pool-size",
+		.has_arg = required_argument,
+	},
+	{
+#define regroot_opt	1
+		.name = "registry-root",
+		.has_arg = required_argument,
+	},
+	{
+#define no_registry_opt	2
+		.name = "no-registry",
+		.has_arg = no_argument,
+		.flag = &__copperplate_setup_data.no_registry,
+		.val = 1
+	},
+	{
+#define session_opt	3
+		.name = "session",
+		.has_arg = required_argument,
+	},
+	{
+#define shared_registry_opt	4
+		.name = "shared-registry",
+		.has_arg = no_argument,
+		.flag = &__copperplate_setup_data.shared_registry,
+		.val = 1,
+	},
+	{ /* Sentinel */ }
+};
+
+/*
+ * Routine to bring up the basic copperplate features, but not enough
+ * to run over a non-POSIX real-time interface though. For internal
+ * code only, such as sysregd. No code traversed should depend on
+ * __copperplate_setup_data.
+ */
+void copperplate_bootstrap_internal(const char *arg0, char *mountpt,
+				    int regflags)
+{
+	int ret;
+
+	__node_id = get_thread_pid();
+
+	CPU_ZERO(&__base_setup_data.cpu_affinity);
+
+	__boilerplate_init();
+
+	ret = heapobj_pkg_init_private();
+	if (ret) {
+		early_warning("failed to initialize main private heap");
+		goto fail;
+	}
+
+	ret = __registry_pkg_init(arg0, mountpt, regflags);
+	if (ret)
+		goto fail;
+
+	return;
+fail:
+	early_panic("initialization failed, %s", symerror(ret));
+}
+
+static int get_session_root(int *regflags_r)
+{
+	char *sessdir, *session;
+	struct passwd *pw;
+	int ret;
+
+	pw = getpwuid(geteuid());
+	if (pw == NULL)
+		return -errno;
+
+	if (__copperplate_setup_data.session_label == NULL) {
+		ret = asprintf(&session, "anon@%d", __node_id);
+		if (ret < 0)
+			return -ENOMEM;
+		__copperplate_setup_data.session_label = session;
+		*regflags_r |= REGISTRY_ANON;
+	} else if (strchr(__copperplate_setup_data.session_label, '/')) {
+		warning("session name may not contain slashes");
+		return -EINVAL;
+	}
+
+	ret = asprintf(&sessdir, "%s/%s/%s",
+		       __copperplate_setup_data.registry_root,
+		       pw->pw_name, __copperplate_setup_data.session_label);
+	if (ret < 0)
+		return -ENOMEM;
+
+	__copperplate_setup_data.session_root = sessdir;
+
+	if (__copperplate_setup_data.shared_registry)
+		*regflags_r |= REGISTRY_SHARED;
+
+	return 0;
+}
+
+static int get_session_label(const char *optarg)
+{
+	char *session, *grpname, *p;
+	struct group *grp;
+	gid_t gid;
+	int ret;
+
+	session = strdup(optarg);
+	if (!session)
+		return -ENOMEM;
+	grpname = strrchr(session, '/');
+	if (grpname == NULL)
+		goto no_group;
+
+	*grpname++ = '\0';
+
+	if (isdigit(*grpname)) {
+		gid = (gid_t)strtol(grpname, &p, 10);
+		if (*p) {
+			free(session);
+			return -EINVAL;
+		}
+		errno = 0;
+		grp = getgrgid(gid);
+	} else {
+		errno = 0;
+		grp = getgrnam(grpname);
+	}
+
+	if (grp == NULL) {
+		ret = errno ? -errno : -EINVAL;
+		warning("invalid group %s", grpname);
+		free(session);
+		return ret;
+	}
+
+	__copperplate_setup_data.session_gid = grp->gr_gid;
+no_group:
+	__copperplate_setup_data.session_label = session;
+	
+	return 0;
+}
+
+define_config_tunable(session_label, const char *, label)
+{
+	get_session_label(label);
+}
+
+static int copperplate_init(void)
+{
+	int ret, regflags = 0;
+
+	threadobj_init_key();
+
+	ret = heapobj_pkg_init_private();
+	if (ret) {
+		warning("failed to initialize main private heap");
+		return ret;
+	}
+
+	/*
+	 * We need the session label to be known before we create the
+	 * shared heap, which is named after the former.
+	 */
+	ret = get_session_root(&regflags);
+	if (ret)
+		return ret;
+
+	ret = heapobj_pkg_init_shared();
+	if (ret) {
+		warning("failed to initialize main shared heap");
+		return ret;
+	}
+
+	if (__copperplate_setup_data.no_registry == 0) {
+		ret = registry_pkg_init(__base_setup_data.arg0, regflags);
+		if (ret)
+			return ret;
+	}
+
+	ret = threadobj_pkg_init((regflags & REGISTRY_ANON) != 0);
+	if (ret) {
+		warning("failed to initialize multi-threading package");
+		return ret;
+	}
+
+	ret = timerobj_pkg_init();
+	if (ret) {
+		warning("failed to initialize timer support");
+		return ret;
+	}
+
+	return 0;
+}
+
+static int copperplate_parse_option(int optnum, const char *optarg)
+{
+	size_t memsz;
+	int ret;
+
+	switch (optnum) {
+	case mempool_opt:
+		memsz = get_mem_size(optarg);
+		if (memsz == 0)
+			return -EINVAL;
+		/*
+		 * Emulate former sloppy syntax: values below 64k are
+		 * likely to represent kilobytes, not bytes.
+		 */
+		if (isdigit(optarg[strlen(optarg)-1]) &&
+		    memsz < 64 * 1024) {
+			memsz *= 1024;
+			if (__base_setup_data.no_sanity == 0)
+				warning("--mem-pool-size=<size[K|M|G]>, using %Zu bytes", memsz);
+		}
+		__copperplate_setup_data.mem_pool = memsz;
+		break;
+	case session_opt:
+		ret = get_session_label(optarg);
+		if (ret)
+			return ret;
+		break;
+	case regroot_opt:
+		__copperplate_setup_data.registry_root = strdup(optarg);
+		break;
+	case shared_registry_opt:
+	case no_registry_opt:
+		break;
+	default:
+		/* Paranoid, can't happen. */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void copperplate_help(void)
+{
+	fprintf(stderr, "--mem-pool-size=<size[K|M|G]> 	size of the main heap\n");
+        fprintf(stderr, "--no-registry			suppress object registration\n");
+        fprintf(stderr, "--shared-registry		enable public access to registry\n");
+        fprintf(stderr, "--registry-root=<path>		root path of registry\n");
+        fprintf(stderr, "--session=<label>[/<group>]	enable shared session\n");
+}
+
+static struct setup_descriptor copperplate_interface = {
+	.name = "copperplate",
+	.init = copperplate_init,
+	.options = copperplate_options,
+	.parse_option = copperplate_parse_option,
+	.help = copperplate_help,
+};
+
+copperplate_setup_call(copperplate_interface);
+
+/**
+ * @{
+ *
+ * @page api-tags API service tags
+ *
+ * All services from the Cobalt/POSIX library, or which belong to APIs
+ * based on the Copperplate library may be restricted to particular
+ * calling contexts, or entail specific side-effects.
+ *
+ * In dual kernel mode, the Cobalt API underlies all other
+ * application-oriented APIs, providing POSIX real-time services over
+ * the Cobalt real-time core. Therefore, the information below applies
+ * to all application-oriented APIs available with Xenomai, such as
+ * the Cobalt/POSIX library, the Alchemy API, and to all RTOS
+ * emulators as well. To describe this information, each service
+ * documented by this section bears a set of tags when applicable.
+ *
+ * The table below matches the tags used throughout the documentation
+ * with the description of their meaning for the caller.
+ *
+ * @attention By Xenomai thread, we mean any thread created by a
+ * Xenomai API service, including real-time Cobalt/POSIX threads in
+ * dual kernel mode. By regular/plain POSIX thread, we mean any thread
+ * directly created by the standard @a glibc-based POSIX service over
+ * Mercury or Cobalt (i.e. NPTL/linuxthreads __STD(pthread_create())),
+ * excluding such threads which have been promoted to the real-time
+ * domain afterwards (aka "shadowed") over Cobalt.
+ *
+ * @par
+ * <b>Context tags</b>
+ * <TABLE>
+ * <TR><TH>Tag</TH> <TH>Context on entry</TH></TR>
+ * <TR><TD>xthread-only</TD>	<TD>Must be called from a Xenomai thread</TD></TR>
+ * <TR><TD>xhandler-only</TD>	<TD>Must be called from a Xenomai handler. See note.</TD></TR>
+ * <TR><TD>xcontext</TD>	<TD>May be called from any Xenomai context (thread or handler).</TD></TR>
+ * <TR><TD>pthread-only</TD>	<TD>Must be called from a regular POSIX thread</TD></TR>
+ * <TR><TD>thread-unrestricted</TD>	<TD>May be called from a Xenomai or regular POSIX thread indifferently</TD></TR>
+ * <TR><TD>xthread-nowait</TD>	<TD>May be called from a Xenomai thread unrestricted, or from a regular thread as a non-blocking service only. See note.</TD></TR>
+ * <TR><TD>unrestricted</TD>	<TD>May be called from any context previously described</TD></TR>
+ * </TABLE>
+ *
+ * @note A Xenomai handler is used for callback-based notifications
+ * from Copperplate-based APIs, such as timeouts. This context is @a
+ * NOT mapped to a regular Linux signal handler, it is actually
+ * underlaid by a special thread context, so that async-unsafe POSIX
+ * services may be invoked internally by the API implementation when
+ * running on behalf of such handler. Therefore, calling Xenomai API
+ * services from asynchronous regular signal handlers is fundamentally
+ * unsafe.
+ *
+ * @note Over Cobalt, the main thread is a particular case, which
+ * starts as a regular POSIX thread, then is automatically switched to
+ * a Cobalt thread as part of the initialization process, before the
+ * main() routine is invoked, unless automatic bootstrap was disabled
+ * (see
+ * http://xenomai.org/2015/05/application-setup-and-init/#Application_entry_CC).
+ *
+ * @par
+ * <b>Possible side-effects when running the application over the
+ * Cobalt core (i.e. dual kernel configuration)</b>
+ *
+ * <TABLE>
+ * <TR><TH>Tag</TH> <TH>Description</TH></TR>
+ * <TR><TD>switch-primary</TD>		<TD>the caller may switch to primary mode</TD></TR>
+ * <TR><TD>switch-secondary</TD>	<TD>the caller may switch to secondary mode</TD></TR>
+ * </TABLE>
+ *
+ * @note As a rule of thumb, any service which might block the caller,
+ * causes a switch to primary mode if invoked from secondary
+ * mode. This rule might not apply in case the service can complete
+ * fully from user-space without any syscall entailed, due to a
+ * particular optimization (e.g. fast acquisition of semaphore
+ * resources directly from user-space in the non-contended
+ * case). Therefore, the switch-{primary, secondary} tags denote
+ * either services which _will_ always switch the caller to the mode
+ * mentioned, or _might_ have to do so, depending on the context. The
+ * absence of such tag indicates that such services can complete in
+ * either modes and as such will entail no switch.
+ *
+ * @}
+ */
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/internal.c b/kernel/xenomai-v3.2.4/lib/copperplate/internal.c
new file mode 100644
index 0000000..1d96526
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/internal.c
@@ -0,0 +1,298 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <sys/types.h>
+#include <sys/prctl.h>
+#include <sys/syscall.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <pthread.h>
+#include <unistd.h>
+#include <signal.h>
+#include <errno.h>
+#include <limits.h>
+#include <boilerplate/ancillaries.h>
+#include <copperplate/clockobj.h>
+#include <copperplate/threadobj.h>
+#include "internal.h"
+
+static int thread_spawn_prologue(struct corethread_attributes *cta);
+
+static int thread_spawn_epilogue(struct corethread_attributes *cta);
+
+static void *thread_trampoline(void *arg);
+
+#ifdef CONFIG_XENO_COBALT
+
+#include "cobalt/internal.h"
+
+int copperplate_create_thread(struct corethread_attributes *cta,
+			      pthread_t *ptid_r)
+{
+	pthread_attr_ex_t attr_ex;
+	size_t stacksize;
+	int ret;
+
+	ret = thread_spawn_prologue(cta);
+	if (ret)
+		return __bt(ret);
+
+	stacksize = cta->stacksize;
+	if (stacksize < PTHREAD_STACK_DEFAULT)
+		stacksize = PTHREAD_STACK_DEFAULT;
+
+	pthread_attr_init_ex(&attr_ex);
+	pthread_attr_setinheritsched_ex(&attr_ex, PTHREAD_INHERIT_SCHED);
+	pthread_attr_setstacksize_ex(&attr_ex, stacksize);
+	pthread_attr_setdetachstate_ex(&attr_ex, cta->detachstate);
+	ret = -pthread_create_ex(ptid_r, &attr_ex, thread_trampoline, cta);
+	pthread_attr_destroy_ex(&attr_ex);
+	if (ret)
+		return __bt(ret);
+
+	return __bt(thread_spawn_epilogue(cta));
+}
+
+int copperplate_renice_local_thread(pthread_t ptid, int policy,
+				    const struct sched_param_ex *param_ex)
+{
+	return -pthread_setschedparam_ex(ptid, policy, param_ex);
+}
+
+static inline void prepare_wait_corespec(void)
+{
+	/*
+	 * Switch back to primary mode eagerly, so that both the
+	 * parent and the child threads compete on the same priority
+	 * scale when handshaking. In addition, this ensures the child
+	 * thread enters the run() handler over the Xenomai domain,
+	 * which is a basic assumption for all clients.
+	 */
+	cobalt_thread_harden();
+}
+
+int copperplate_kill_tid(pid_t tid, int sig)
+{
+	return __RT(kill(tid, sig)) ? -errno : 0;
+}
+
+int copperplate_probe_tid(pid_t tid)
+{
+	return cobalt_thread_probe(tid);
+}
+
+void copperplate_set_current_name(const char *name)
+{
+	__RT(pthread_setname_np(pthread_self(), name));
+}
+
+#else /* CONFIG_XENO_MERCURY */
+
+int copperplate_kill_tid(pid_t tid, int sig)
+{
+	return syscall(__NR_tkill, tid, sig) ? -errno : 0;
+}
+
+int copperplate_probe_tid(pid_t tid)
+{
+	return copperplate_kill_tid(tid, 0) && errno != EPERM ? -errno : 0;
+}
+
+void copperplate_set_current_name(const char *name)
+{
+	prctl(PR_SET_NAME, (unsigned long)name, 0, 0, 0);
+}
+
+int copperplate_create_thread(struct corethread_attributes *cta,
+			      pthread_t *ptid_r)
+{
+	pthread_attr_t attr;
+	size_t stacksize;
+	int ret;
+
+	ret = thread_spawn_prologue(cta);
+	if (ret)
+		return __bt(ret);
+
+	stacksize = cta->stacksize;
+	if (stacksize < PTHREAD_STACK_DEFAULT)
+		stacksize = PTHREAD_STACK_DEFAULT;
+
+	pthread_attr_init(&attr);
+	pthread_attr_setinheritsched(&attr, PTHREAD_INHERIT_SCHED);
+	pthread_attr_setstacksize(&attr, stacksize);
+	pthread_attr_setdetachstate(&attr, cta->detachstate);
+	ret = -pthread_create(ptid_r, &attr, thread_trampoline, cta);
+	pthread_attr_destroy(&attr);
+
+	if (ret)
+		return __bt(ret);
+
+	return __bt(thread_spawn_epilogue(cta));
+}
+
+int copperplate_renice_local_thread(pthread_t ptid, int policy,
+				    const struct sched_param_ex *param_ex)
+{
+	struct sched_param param = {
+		.sched_priority = param_ex->sched_priority,
+	};
+
+	return -__RT(pthread_setschedparam(ptid, policy, &param));
+}
+
+static inline void prepare_wait_corespec(void)
+{
+	/* empty */
+}
+
+#endif  /* CONFIG_XENO_MERCURY */
+
+int copperplate_get_current_name(char *name, size_t maxlen)
+{
+	if (maxlen < 16)
+		return -ENOSPC;
+
+	return prctl(PR_GET_NAME, (unsigned long)name, 0, 0, 0);
+}
+
+static int thread_spawn_prologue(struct corethread_attributes *cta)
+{
+	int ret;
+
+	ret = __RT(sem_init(&cta->__reserved.warm, 0, 0));
+	if (ret)
+		return __bt(-errno);
+
+	cta->__reserved.status = -ENOSYS;
+
+	return 0;
+}
+
+static void thread_spawn_wait(sem_t *sem)
+{
+	int ret;
+
+	for (;;) {
+		ret = __RT(sem_wait(sem));
+		if (ret && errno == EINTR)
+			continue;
+		if (ret == 0)
+			return;
+		ret = -errno;
+		panic("sem_wait() failed with %s", symerror(ret));
+	}
+}
+
+static void *thread_trampoline(void *arg)
+{
+	struct corethread_attributes *cta = arg, _cta;
+	sem_t released;
+	int ret;
+
+	/*
+	 * cta may be on the parent's stack, so it may be dandling
+	 * soon after the parent is posted: copy this argument
+	 * structure early on.
+	 */
+	_cta = *cta;
+
+	ret = __RT(sem_init(&released, 0, 0));
+	if (ret) {
+		ret = __bt(-errno);
+		cta->__reserved.status = ret;
+		warning("lack of resources for core thread, %s", symerror(ret));
+		goto fail;
+	}
+
+	cta->__reserved.released = &released;
+	ret = cta->prologue(cta->arg);
+	cta->__reserved.status = ret;
+	if (ret) {
+		__RT(sem_destroy(&released));
+		backtrace_check();
+		goto fail;
+	}
+
+	/*
+	 * CAUTION: Once the prologue handler has run successfully,
+	 * the client code may assume that we can't fail spawning the
+	 * child thread anymore, which guarantees that
+	 * copperplate_create_thread() will return a success code
+	 * after this point. This is important so that any thread
+	 * finalizer installed by the prologue handler won't conflict
+	 * with the cleanup code the client may run whenever
+	 * copperplate_create_thread() fails.
+	 */
+	ret = __bt(copperplate_renice_local_thread(pthread_self(),
+			   _cta.policy, &_cta.param_ex));
+	if (ret)
+		warning("cannot renice core thread, %s", symerror(ret));
+	prepare_wait_corespec();
+	__RT(sem_post(&cta->__reserved.warm));
+	thread_spawn_wait(&released);
+	__RT(sem_destroy(&released));
+
+	return _cta.run(_cta.arg);
+fail:
+	__RT(sem_post(&cta->__reserved.warm));
+
+	return (void *)(long)ret;
+}
+
+static int thread_spawn_epilogue(struct corethread_attributes *cta)
+{
+	prepare_wait_corespec();
+	thread_spawn_wait(&cta->__reserved.warm);
+
+	if (cta->__reserved.status == 0)
+		__RT(sem_post(cta->__reserved.released));
+
+	__RT(sem_destroy(&cta->__reserved.warm));
+
+	return __bt(cta->__reserved.status);
+}
+
+void __panic(const char *fn, const char *fmt, ...)
+{
+	struct threadobj *thobj = threadobj_current();
+	va_list ap;
+
+	va_start(ap, fmt);
+	___panic(fn, thobj ? threadobj_get_name(thobj) : NULL, fmt, ap);
+}
+
+void warning(const char *fmt, ...)
+{
+	struct threadobj *thobj = threadobj_current();
+	va_list ap;
+
+	va_start(ap, fmt);
+	__warning(thobj ? threadobj_get_name(thobj) : NULL, fmt, ap);
+	va_end(ap);
+}
+
+void notice(const char *fmt, ...)
+{
+	struct threadobj *thobj = threadobj_current();
+	va_list ap;
+
+	va_start(ap, fmt);
+	__notice(thobj ? threadobj_get_name(thobj) : NULL, fmt, ap);
+	va_end(ap);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/internal.h b/kernel/xenomai-v3.2.4/lib/copperplate/internal.h
new file mode 100644
index 0000000..bfd4691
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/internal.h
@@ -0,0 +1,179 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _COPPERPLATE_INTERNAL_H
+#define _COPPERPLATE_INTERNAL_H
+
+#include <sys/types.h>
+#include <stdarg.h>
+#include <time.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <xeno_config.h>
+#include <boilerplate/list.h>
+#include <boilerplate/ancillaries.h>
+#include <boilerplate/limits.h>
+#include <boilerplate/sched.h>
+#include <boilerplate/setup.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/tunables.h>
+
+#ifdef CONFIG_XENO_PSHARED
+
+#include <boilerplate/shavl.h>
+
+#define SHEAPMEM_PAGE_SHIFT	9 /* 2^9 => 512 bytes */
+#define SHEAPMEM_PAGE_SIZE	(1UL << SHEAPMEM_PAGE_SHIFT)
+#define SHEAPMEM_PAGE_MASK	(~(SHEAPMEM_PAGE_SIZE - 1))
+#define SHEAPMEM_MIN_LOG2	4 /* 16 bytes */
+/*
+ * Use bucketed memory for sizes between 2^SHEAPMEM_MIN_LOG2 and
+ * 2^(SHEAPMEM_PAGE_SHIFT-1).
+ */
+#define SHEAPMEM_MAX		(SHEAPMEM_PAGE_SHIFT - SHEAPMEM_MIN_LOG2)
+#define SHEAPMEM_MIN_ALIGN	(1U << SHEAPMEM_MIN_LOG2)
+/* Max size of an extent (4Gb - SHEAPMEM_PAGE_SIZE). */
+#define SHEAPMEM_MAX_EXTSZ	(4294967295U - SHEAPMEM_PAGE_SIZE + 1)
+/* Bits we need for encoding a page # */
+#define SHEAPMEM_PGENT_BITS      (32 - SHEAPMEM_PAGE_SHIFT)
+
+/* Each page is represented by a page map entry. */
+#define SHEAPMEM_PGMAP_BYTES	sizeof(struct sheapmem_pgentry)
+
+struct sheapmem_pgentry {
+	/* Linkage in bucket list. */
+	unsigned int prev : SHEAPMEM_PGENT_BITS;
+	unsigned int next : SHEAPMEM_PGENT_BITS;
+	/*  page_list or log2. */
+	unsigned int type : 6;
+	/*
+	 * We hold either a spatial map of busy blocks within the page
+	 * for bucketed memory (up to 32 blocks per page), or the
+	 * overall size of the multi-page block if entry.type ==
+	 * page_list.
+	 */
+	union {
+		uint32_t map;
+		uint32_t bsize;
+	};
+};
+
+/*
+ * A range descriptor is stored at the beginning of the first page of
+ * a range of free pages. sheapmem_range.size is nrpages *
+ * SHEAPMEM_PAGE_SIZE. Ranges are indexed by address and size in AVL
+ * trees.
+ */
+struct sheapmem_range {
+	struct shavlh addr_node;
+	struct shavlh size_node;
+	size_t size;
+};
+
+struct sheapmem_extent {
+	struct holder next;
+	memoff_t membase;	/* Base offset of page array */
+	memoff_t memlim;	/* Offset limit of page array */
+	struct shavl addr_tree;
+	struct shavl size_tree;
+	struct sheapmem_pgentry pagemap[0]; /* Start of page entries[] */
+};
+
+#define __SHEAPMEM_MAP_SIZE(__nrpages)					\
+	((__nrpages) * SHEAPMEM_PGMAP_BYTES)
+
+#define __SHEAPMEM_ARENA_SIZE(__size)					\
+	(__size +							\
+	 __align_to(sizeof(struct sheapmem_extent) +			\
+		    __SHEAPMEM_MAP_SIZE((__size) >> SHEAPMEM_PAGE_SHIFT),	\
+		    SHEAPMEM_MIN_ALIGN))
+
+/*
+ * Calculate the minimal size of the memory arena needed to contain a
+ * heap of __user_size bytes, including our meta data for managing it.
+ * Usable at build time if __user_size is constant.
+ */
+#define SHEAPMEM_ARENA_SIZE(__user_size)					\
+	__SHEAPMEM_ARENA_SIZE(__align_to(__user_size, SHEAPMEM_PAGE_SIZE))
+
+/*
+ * The struct below has to live in shared memory; no direct reference
+ * to process local memory in there.
+ */
+struct shared_heap_memory {
+	char name[XNOBJECT_NAME_LEN];
+	pthread_mutex_t lock;
+	struct listobj extents;
+	size_t arena_size;
+	size_t usable_size;
+	size_t used_size;
+	/* Heads of page lists for log2-sized blocks. */
+	uint32_t buckets[SHEAPMEM_MAX];
+	struct sysgroup_memspec memspec;
+};
+
+ssize_t sheapmem_check(struct shared_heap_memory *heap, void *block);
+
+#endif /* CONFIG_XENO_PSHARED */
+
+#ifdef CONFIG_XENO_REGISTRY
+#define DEFAULT_REGISTRY_ROOT		CONFIG_XENO_REGISTRY_ROOT
+#else
+#define DEFAULT_REGISTRY_ROOT		NULL
+#endif
+
+struct corethread_attributes {
+	size_t stacksize;
+	int detachstate;
+	int policy;
+	struct sched_param_ex param_ex;
+	int (*prologue)(void *arg);
+	void *(*run)(void *arg);
+	void *arg;
+	struct {
+		int status;
+		sem_t warm;
+		sem_t *released;
+	} __reserved;
+};
+
+#ifdef __cplusplus
+extern "C" {
+#endif
+
+void copperplate_set_current_name(const char *name);
+
+int copperplate_get_current_name(char *name, size_t maxlen);
+
+int copperplate_kill_tid(pid_t tid, int sig);
+
+int copperplate_probe_tid(pid_t tid);
+  
+int copperplate_create_thread(struct corethread_attributes *cta,
+			      pthread_t *ptid);
+
+int copperplate_renice_local_thread(pthread_t ptid, int policy,
+				    const struct sched_param_ex *param_ex);
+
+void copperplate_bootstrap_internal(const char *arg0,
+				    char *mountpt, int regflags);
+
+#ifdef __cplusplus
+}
+#endif
+
+#endif /* _COPPERPLATE_INTERNAL_H */
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/reference.c b/kernel/xenomai-v3.2.4/lib/copperplate/reference.c
new file mode 100644
index 0000000..09b845e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/reference.c
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <string.h>
+#include "boilerplate/hash.h"
+#include "copperplate/reference.h"
+#include "internal.h"
+
+static int nrefs[MAX_FNLIBS];
+
+struct __fnref __fnrefs[MAX_FNLIBS][MAX_FNREFS] = {
+	{ { NULL, -1U } }
+};
+
+int __fnref_register(const char *libname,
+		     int libtag, int cbirev,
+		     const char *symname, void (*fn)(void))
+{
+	unsigned int hash;
+	size_t len;
+	int pos;
+
+	if ((unsigned int)libtag >= MAX_FNLIBS)
+		early_panic("reference table overflow for library %s",
+			    libname);
+
+	pos = nrefs[libtag]++;
+	if (pos >= MAX_FNREFS)
+		early_panic("too many function references in library %s (> %d)",
+			    libname, MAX_FNREFS);
+
+	assert(__fnrefs[libtag][pos].fn == NULL);
+	__fnrefs[libtag][pos].fn = fn;
+	len = strlen(symname);
+	hash = __hash_key(symname, len, 0);
+	hash = __hash_key(&cbirev, sizeof(cbirev), hash);
+	__fnrefs[libtag][pos].hash = hash & 0xfffff;
+
+	return __refmangle(libtag, hash, pos);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/regd/Makefile.am b/kernel/xenomai-v3.2.4/lib/copperplate/regd/Makefile.am
new file mode 100644
index 0000000..5e4301b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/regd/Makefile.am
@@ -0,0 +1,23 @@
+sbin_PROGRAMS = sysregd
+
+AM_CPPFLAGS = 			\
+	@XENO_USER_CFLAGS@	\
+	@XENO_FUSE_CFLAGS@	\
+	-I$(top_srcdir)/lib	\
+	-I$(top_srcdir)/include
+
+AM_LDFLAGS = $(XENO_POSIX_WRAPPERS)
+
+LDADD = 			\
+	../libcopperplate@CORE@.la	\
+	@XENO_CORE_LDADD@	\
+	@XENO_USER_LDADD@	\
+	-lpthread -lrt
+
+sysregd_SOURCES = regd.c fs-common.c sysregfs.h
+
+if XENO_COBALT
+sysregd_SOURCES += fs-cobalt.c
+else
+sysregd_SOURCES += fs-mercury.c
+endif
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-cobalt.c b/kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-cobalt.c
new file mode 100644
index 0000000..b7c1034
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-cobalt.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <errno.h>
+#include <cobalt/uapi/thread.h>
+#include "sysregfs.h"
+
+#ifdef CONFIG_XENO_PSHARED
+/*
+ * This is a blunt copy of what we do in kernel space to produce this
+ * status in /proc/xenomai/sched/threads. There are additional states
+ * for a thread compared to Mercury, introduced by the dual kernel
+ * (such as relaxed mode, mode switch trap, and priority boost).
+ */
+char *format_thread_status(const struct thread_data *p, char *buf, size_t len)
+{
+	static const char labels[] = XNTHREAD_STATE_LABELS;
+	unsigned long mask;
+	int pos, c;
+	char *wp;
+
+	for (mask = p->status, pos = 0, wp = buf;
+	     mask != 0 && wp - buf < len - 2;	/* 1-letter label + \0 */
+	     mask >>= 1, pos++) {
+		if ((mask & 1) == 0)
+			continue;
+
+		c = labels[pos];
+
+		switch (1 << pos) {
+		case XNROOT:
+			c = 'R'; /* Always mark root as runnable. */
+			break;
+		case XNREADY:
+			if (p->status & XNROOT)
+				continue; /* Already reported on XNROOT. */
+			break;
+		case XNDELAY:
+			/*
+			 * Only report genuine delays here, not timed
+			 * waits for resources.
+			 */
+			if (p->status & XNPEND)
+				continue;
+			break;
+		case XNPEND:
+			/* Report timed waits with lowercase symbol. */
+			if (p->status & XNDELAY)
+				c |= 0x20;
+			break;
+		default:
+			if (c == '.')
+				continue;
+		}
+		*wp++ = c;
+	}
+
+	*wp = '\0';
+
+	return buf;
+}
+
+#else /* !CONFIG_XENO_PSHARED */
+
+/*
+ * If we have no session information, fallback to reading
+ * /proc/xenomai.
+ */
+
+#define PROC_PULL_HANDLER(__name, __path)				\
+int open_ ## __name(struct fsobj *fsobj, void *priv)			\
+{									\
+	return pull_proc_data("/proc/xenomai/" __path, priv);		\
+}
+
+/*
+ * Cobalt-specific helper to pull the /proc vfile data provided by the
+ * nucleus over a fuse-managed vfile.
+ */
+static int pull_proc_data(const char *path, struct fsobstack *o)
+{
+	int len;
+
+	fsobstack_init(o);
+	len = fsobstack_grow_file(o, path);
+	fsobstack_finish(o);
+
+	return len < 0 ? len : 0;
+}
+
+PROC_PULL_HANDLER(threads, "/sched/threads");
+PROC_PULL_HANDLER(heaps, "/heap");
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+struct sysreg_fsdir sysreg_dirs[] = {
+	{
+		.path = NULL,
+	},
+};
+
+struct sysreg_fsfile sysreg_files[] = {
+	{
+		.path = "/threads",
+		.mode = O_RDONLY,
+		.ops = {
+			.open = open_threads,
+			.release = fsobj_obstack_release,
+			.read = fsobj_obstack_read
+		},
+	},
+	{
+		.path = "/heaps",
+		.mode = O_RDONLY,
+		.ops = {
+			.open = open_heaps,
+			.release = fsobj_obstack_release,
+			.read = fsobj_obstack_read
+		},
+	},
+	{
+		.path = "/version",
+		.mode = O_RDONLY,
+		.ops = {
+			.open = open_version,
+			.release = fsobj_obstack_release,
+			.read = fsobj_obstack_read
+		},
+	},
+	{
+		.path = NULL,
+	}
+};
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-common.c b/kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-common.c
new file mode 100644
index 0000000..e0bdd3d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-common.c
@@ -0,0 +1,290 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <signal.h>
+#include <malloc.h>
+#include <sched.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/clockobj.h>
+#include <xenomai/version.h>
+#include "sysregfs.h"
+#include "../internal.h"
+
+#ifdef CONFIG_XENO_PSHARED
+
+/*
+ * If --enable-pshared was given, we can access the main shared heap
+ * to retrieve session-wide information.
+ */
+
+static char *format_time(ticks_t value, char *buf, size_t bufsz)
+{
+	unsigned long ms, us, ns;
+	char *p = buf;
+	ticks_t s;
+
+	if (value == 0) {
+		strcpy(buf, "-");
+		return buf;
+	}
+
+	s = value / 1000000000ULL;
+	ns = value % 1000000000ULL;
+	us = ns / 1000;
+	ms = us / 1000;
+	us %= 1000;
+
+	if (s)
+		p += snprintf(p, bufsz, "%Lus", s);
+
+	if (ms || (s && us))
+		p += snprintf(p, bufsz - (p - buf), "%lums", ms);
+
+	if (us)
+		p += snprintf(p, bufsz - (p - buf), "%luus", us);
+
+	return buf;
+}
+
+int open_threads(struct fsobj *fsobj, void *priv)
+{
+	struct thread_data *thread_data, *p;
+	struct sysgroup_memspec *obj, *tmp;
+	char sbuf[64], pbuf[16], tbuf[64];
+	struct threadobj_stat statbuf;
+	struct fsobstack *o = priv;
+	struct threadobj *thobj;
+	const char *sched_class;
+	int ret, count, len = 0;
+
+	ret = heapobj_bind_session(__copperplate_setup_data.session_label);
+	if (ret)
+		return ret;
+
+	fsobstack_init(o);
+
+	sysgroup_lock();
+	count = sysgroup_count(thread);
+	sysgroup_unlock();
+
+	if (count == 0)
+		goto out;
+
+	/*
+	 * We don't want to hold the sysgroup lock for too long, since
+	 * it could be contended by a real-time task. So we pull all
+	 * the per-thread data we need into a local array, before
+	 * printing out its contents after we dropped the lock.
+	 */
+	thread_data = p = malloc(sizeof(*p) * count);
+	if (thread_data == NULL) {
+		len = -ENOMEM;
+		goto out;
+	}
+
+	sysgroup_lock();
+
+	for_each_sysgroup(obj, tmp, thread) {
+		if (p - thread_data >= count)
+			break;
+		thobj = container_of(obj, struct threadobj, memspec);
+		ret = threadobj_lock(thobj);
+		if (ret) {
+			sysgroup_remove(thread, obj);
+			continue;
+		}
+		namecpy(p->name, thobj->name);
+		p->name[sizeof(p->name) - 1] = '\0';
+		p->pid = thobj->pid;
+		p->priority = threadobj_get_priority(thobj);
+		p->policy = threadobj_get_policy(thobj);
+		ret = threadobj_stat(thobj, &statbuf);
+		threadobj_unlock(thobj);
+		if (ret)
+			p->cpu = -1;
+		else {
+			p->status = statbuf.status;
+			p->cpu = statbuf.cpu;
+			p->timeout = statbuf.timeout;
+			p->schedlock = statbuf.schedlock;
+		}
+		p++;
+	}
+
+	sysgroup_unlock();
+
+	count = p - thread_data;
+	if (count == 0)
+		goto out_free;
+
+	len = fsobstack_grow_format(o, "%-3s  %-6s %-5s  %-8s %-8s  %-10s %s\n",
+				    "CPU", "PID", "CLASS", "PRI", "TIMEOUT",
+				    "STAT", "NAME");
+
+	for (p = thread_data; count > 0; count--) {
+		if (kill(p->pid, 0))
+			continue;
+		snprintf(pbuf, sizeof(pbuf), "%3d", p->priority);
+		if (p->cpu < 0) {
+			strcpy(tbuf, "????");
+			strcpy(sbuf, "??");
+		} else {
+			format_time(p->timeout, tbuf, sizeof(tbuf));
+			format_thread_status(p, sbuf, sizeof(sbuf));
+		}
+		switch (p->policy) {
+		case SCHED_FIFO:
+			sched_class = "fifo";
+			break;
+		case SCHED_RR:
+			sched_class = "rr";
+			break;
+#ifdef SCHED_SPORADIC
+		case SCHED_SPORADIC:
+			sched_class = "pss";
+			break;
+#endif
+#ifdef SCHED_TP
+		case SCHED_TP:
+			sched_class = "tp";
+			break;
+#endif
+#ifdef SCHED_QUOTA
+		case SCHED_QUOTA:
+			sched_class = "quota";
+			break;
+#endif
+#ifdef SCHED_QUOTA
+		case SCHED_WEAK:
+			sched_class = "weak";
+			break;
+#endif
+		default:
+			sched_class = "other";
+			break;
+		}
+		len += fsobstack_grow_format(o,
+					     "%3d  %-6d %-5s  %-8s %-8s  %-10s %s\n",
+					     p->cpu, p->pid, sched_class, pbuf,
+					     tbuf, sbuf, p->name);
+		p++;
+	}
+
+out_free:
+	free(thread_data);
+out:
+	heapobj_unbind_session();
+
+	fsobstack_finish(o);
+
+	return len < 0 ? len : 0;
+}
+
+struct heap_data {
+	char name[XNOBJECT_NAME_LEN];
+	size_t total;
+	size_t used;
+};
+
+int open_heaps(struct fsobj *fsobj, void *priv)
+{
+	struct sysgroup_memspec *obj, *tmp;
+	struct heap_data *heap_data, *p;
+	struct shared_heap_memory *heap;
+	struct fsobstack *o = priv;
+	int ret, count, len = 0;
+
+	ret = heapobj_bind_session(__copperplate_setup_data.session_label);
+	if (ret)
+		return ret;
+
+	fsobstack_init(o);
+
+	sysgroup_lock();
+	count = sysgroup_count(heap);
+	sysgroup_unlock();
+
+	if (count == 0)
+		goto out;
+
+	heap_data = p = malloc(sizeof(*p) * count);
+	if (heap_data == NULL) {
+		len = -ENOMEM;
+		goto out;
+	}
+
+	sysgroup_lock();
+
+	/*
+	 * A heap we find there cannot totally vanish until we drop
+	 * the group lock, so there is no point in acquiring each heap
+	 * lock individually for reading the slot.
+	 */
+	for_each_sysgroup(obj, tmp, heap) {
+		if (p - heap_data >= count)
+			break;
+		heap = container_of(obj, struct shared_heap_memory, memspec);
+		namecpy(p->name, heap->name);
+		p->used = heap->used_size;
+		p->total = heap->usable_size;
+		p++;
+	}
+
+	sysgroup_unlock();
+
+	count = p - heap_data;
+	if (count == 0)
+		goto out_free;
+
+	len = fsobstack_grow_format(o, "%9s %9s  %s\n",
+				    "TOTAL", "USED", "NAME");
+
+	for (p = heap_data; count > 0; count--) {
+		len += fsobstack_grow_format(o, "%9Zu %9Zu  %s\n",
+					     p->total, p->used, p->name);
+		p++;
+	}
+
+out_free:
+	free(heap_data);
+out:
+	heapobj_unbind_session();
+
+	fsobstack_finish(o);
+
+	return len < 0 ? len : 0;
+}
+
+#endif /* CONFIG_XENO_PSHARED */
+
+int open_version(struct fsobj *fsobj, void *priv)
+{
+	struct fsobstack *o = priv;
+
+	fsobstack_init(o);
+	fsobstack_grow_format(o, "%s\n", XENO_VERSION_STRING);
+	fsobstack_finish(o);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-mercury.c b/kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-mercury.c
new file mode 100644
index 0000000..9a17f83
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/regd/fs-mercury.c
@@ -0,0 +1,127 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <sys/types.h>
+#include <fcntl.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <limits.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/threadobj.h>
+#include "sysregfs.h"
+#include "../internal.h"
+
+#ifdef CONFIG_XENO_PSHARED
+
+static int retrieve_task_state(pid_t pid)
+{
+	char trash[BUFSIZ], state = '?', path[32];
+	FILE *fp;
+	int ret;
+
+	/*
+	 * Try to figure out the state in kernel context of a Mercury
+	 * task which does not wait on a copperplate service. If this
+	 * task is not runnable, then display 'X', which is
+	 * reminiscent of a Cobalt task running out of real-time mode.
+	 * Otherwise, show this task as runnable.
+	 */
+	snprintf(path, sizeof(path), "/proc/%d/stat", pid);
+	fp = fopen(path, "r");
+	if (fp) {
+		ret = fscanf(fp, "%[0-9] (%[^) ]) %c", trash, trash, &state);
+		fclose(fp);
+		if (ret == 3 && state != 'R')
+			state = 'X';
+	}
+
+	return state;
+}
+
+char *format_thread_status(const struct thread_data *p, char *buf, size_t len)
+{
+	char *wp = buf;
+
+	if (len < 4)
+		return NULL;
+
+	if (p->status & __THREAD_S_TIMEDWAIT)
+		*wp++ = 'w';
+	else if (p->status & __THREAD_S_WAIT)
+		*wp++ = 'W';
+	else if (p->status & __THREAD_S_DELAYED)
+		*wp++ = 'D';
+	else if (p->status & __THREAD_S_STARTED)
+		*wp++ = retrieve_task_state(p->pid);
+	else
+		*wp++ = 'U';
+
+	if (p->schedlock > 0)
+		*wp++ = 'l';
+
+	if (p->policy == SCHED_RR)
+		*wp++ = 'r';
+
+	*wp = '\0';
+
+	return buf;
+}
+
+#endif /* CONFIG_XENO_PSHARED */
+
+struct sysreg_fsdir sysreg_dirs[] = {
+	{
+		.path = NULL,
+	},
+};
+
+struct sysreg_fsfile sysreg_files[] = {
+#ifdef CONFIG_XENO_PSHARED
+	{
+		.path = "/threads",
+		.mode = O_RDONLY,
+		.ops = {
+			.open = open_threads,
+			.release = fsobj_obstack_release,
+			.read = fsobj_obstack_read
+		},
+	},
+	{
+		.path = "/heaps",
+		.mode = O_RDONLY,
+		.ops = {
+			.open = open_heaps,
+			.release = fsobj_obstack_release,
+			.read = fsobj_obstack_read
+		},
+	},
+#endif /* CONFIG_XENO_PSHARED */
+	{
+		.path = "/version",
+		.mode = O_RDONLY,
+		.ops = {
+			.open = open_version,
+			.release = fsobj_obstack_release,
+			.read = fsobj_obstack_read
+		},
+	},
+	{
+		.path = NULL,
+	}
+};
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/regd/regd.c b/kernel/xenomai-v3.2.4/lib/copperplate/regd/regd.c
new file mode 100644
index 0000000..8752070
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/regd/regd.c
@@ -0,0 +1,560 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/socket.h>
+#include <sys/select.h>
+#include <sys/timerfd.h>
+#include <sys/un.h>
+#include <signal.h>
+#include <stdio.h>
+#include <limits.h>
+#include <memory.h>
+#include <malloc.h>
+#include <getopt.h>
+#include <string.h>
+#include <error.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <pwd.h>
+#include <boilerplate/list.h>
+#include <boilerplate/hash.h>
+#include "../internal.h"
+#include "sysregfs.h"
+
+#define note(fmt, args...)					\
+	do {							\
+  		if (!daemonize)					\
+			printf("sysregd: " fmt "\n", ##args);	\
+	} while (0)
+
+static char *rootdir;
+
+static int sockfd;
+
+static char *sysroot;
+
+static int daemonize;
+
+static int linger;
+
+static int shared;
+
+static int anon;
+
+struct client {
+	char *mountpt;
+	int sockfd;
+	struct pvholder next;
+};
+
+static DEFINE_PRIVATE_LIST(client_list);
+
+static void usage(void)
+{
+	fprintf(stderr, "usage: sysregd --root=<dir>     set registry root directory\n");
+	fprintf(stderr, "               [--shared]       share registry between different users\n");
+	fprintf(stderr, "               [--anon]         mount registry for anonymous session\n");
+	fprintf(stderr, "               [--daemonize]    run in the background\n");
+	fprintf(stderr, "               [--linger]       disable timed exit on idleness\n");
+}
+
+static const struct option options[] = {
+	{
+#define help_opt	0
+		.name = "help",
+		.has_arg = no_argument,
+	},
+	{
+#define daemonize_opt	1
+		.name = "daemonize",
+		.has_arg = no_argument,
+		.flag = &daemonize,
+		.val = 1,
+	},
+	{
+#define root_opt	2
+		.name = "root",
+		.has_arg = required_argument,
+	},
+	{
+#define linger_opt	3
+		.name = "linger",
+		.has_arg = no_argument,
+		.flag = &linger,
+		.val = 1,
+	},
+	{
+#define shared_opt	4
+		.name = "shared",
+		.has_arg = no_argument,
+		.flag = &shared,
+		.val = 1,
+	},
+	{
+#define anon_opt	5
+		.name = "anon",
+		.has_arg = no_argument,
+		.flag = &anon,
+		.val = 1,
+	},
+	{ /* Sentinel */ },
+};
+
+static int create_directory_recursive(const char *dir) /* absolute path */
+{
+	char *s, *p;
+	int ret;
+
+	ret = chdir("/");
+	if (ret)	/* That should better work... */
+		error(1, errno, "chdir(\"/\")");
+
+	s = strdup(dir);
+	if (s == NULL)
+		return -ENOMEM;
+
+	p = strtok(s + 1, "/");
+	while (p) {
+		if (*p == '\0')
+			goto next;
+		ret = access(p, R_OK|W_OK|X_OK);
+		if (ret) {
+			ret = mkdir(p, 0755);
+			if (ret && errno != EEXIST)
+				return -errno;
+		}
+		ret = chdir(p);
+		if (ret)
+			return -errno;
+	next:
+		p = strtok(NULL, "/");
+	}
+
+	if (shared) {
+		ret = chmod(dir, 0775 | S_ISGID);
+		if (ret)
+			return -errno;
+	}
+
+	free(s);
+
+	return chdir(rootdir) ? -errno : 0; /* Back to rootdir */
+}
+
+static void create_rootdir(void)
+{
+	int ret;
+
+	if (*rootdir != '/')
+		error(1, EINVAL, "absolute root directory path required");
+
+	ret = create_directory_recursive(rootdir);
+	if (ret)
+		error(1, -ret, "create_directory_recursive(\"%s\")", rootdir);
+}
+
+/*
+ * Attempt to bind a local domain socket to some address in the
+ * abstract namespace, for allowing clients to register. This way, we
+ * won't have to suffer socket node left overs, which are a pain to
+ * deal with racelessly.
+ *
+ * The address is a hash of the root directory we have been told to
+ * maintain.
+ */
+static void bind_socket(void)
+{
+	struct sockaddr_un sun;
+	unsigned int hash;
+	socklen_t addrlen;
+	int ret;
+
+	sockfd = __STD(socket(AF_UNIX, SOCK_SEQPACKET, 0));
+	if (sockfd < 0)
+		error(1, errno, "bind_socket/socket");
+
+	memset(&sun, 0, sizeof(sun));
+	sun.sun_family = AF_UNIX;
+	hash = __hash_key(rootdir, strlen(rootdir), 0);
+	snprintf(sun.sun_path, sizeof(sun.sun_path), "X%X-xenomai", hash);
+	addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(sun.sun_path);
+	sun.sun_path[0] = '\0';
+	ret = __STD(bind(sockfd, (struct sockaddr *)&sun, addrlen));
+	if (ret) {
+		if (errno == EADDRINUSE)
+			exit(0);
+		error(1, errno, "bind_socket/bind");
+	}
+
+	ret = __STD(listen(sockfd, SOMAXCONN));
+	if (ret)
+		error(1, errno, "bind_socket/listen");
+}
+
+static int register_client(int s)
+{
+	struct ucred ucred;
+	struct client *c;
+	socklen_t optlen;
+	char *mountpt;
+	int ret;
+
+	optlen = sizeof(ucred);
+	ret = __STD(getsockopt(s, SOL_SOCKET, SO_PEERCRED, &ucred, &optlen));
+	if (ret)
+		return -errno;
+
+	c = malloc(sizeof(*c));
+	if (c == NULL)
+		return -ENOMEM;
+
+	/*
+	 * The registry mount point for a client will be
+	 * <rootdir>/pid.
+	 */
+	ret = asprintf(&mountpt, "%s/%d", rootdir, ucred.pid);
+	if (ret < 0) {
+		ret = -ENOMEM;
+		goto fail_nopath;
+	}
+
+	ret = create_directory_recursive(mountpt);
+	if (ret) {
+		note("failed creating mount point %s", mountpt);
+		goto fail;
+	}
+
+	note("created mount point %s", mountpt);
+
+	/* Send the mount point back to the client. */
+	ret = __STD(send(s, mountpt, strlen(mountpt) + 1, 0));
+	if (ret < 0)
+		goto fail;
+
+	c->mountpt = mountpt;
+	c->sockfd = s;
+	pvlist_append(&c->next, &client_list);
+
+	return 0;
+fail:
+	free(mountpt);
+fail_nopath:
+	free(c);
+
+	return ret;
+}
+
+static void unmount(const char *path)
+{
+	char *cmd, *cmdpath;
+	int flags, ret;
+
+	/*
+	 * Silence stderr while we run the shell command - it may complain
+	 * about an already unmounted path.
+	 */
+	flags = fcntl(2, F_GETFD);
+	if (flags >= 0)
+		fcntl(2, F_SETFD, flags | FD_CLOEXEC);
+
+	cmdpath = lookup_command("fusermount");
+	if (cmdpath) {
+		ret = asprintf(&cmd, "%s -uzq %s", cmdpath, path);
+		free(cmdpath);
+		if (ret < 0)
+			return;
+
+		ret = system(cmd);
+		free(cmd);
+		if (ret != -1 && WIFEXITED(ret) && WEXITSTATUS(ret) == 0)
+			return;
+	}
+
+	cmdpath = lookup_command("umount");
+	if (cmdpath == NULL)
+		return;
+
+	ret = asprintf(&cmd, "%s -l %s", cmdpath, path);
+	free(cmdpath);
+	if (ret < 0)
+		return;
+
+	ret = system(cmd);
+	free(cmd);
+	(void)ret;
+}
+
+static void unregister_client(int s)
+{
+	struct client *c;
+
+	pvlist_for_each_entry(c, &client_list, next) {
+		if (c->sockfd == s) {
+			pvlist_remove(&c->next);
+			note("deleting mount point %s", c->mountpt);
+			unmount(c->mountpt);
+			rmdir(c->mountpt);
+			free(c->mountpt);
+			free(c);
+			return;
+		}
+	}
+}
+
+static void delete_system_fs(void)
+{
+	note("unmounting %s", sysroot);
+	unmount(sysroot);
+	rmdir(sysroot);
+	rmdir(rootdir);
+}
+
+static void handle_requests(void)
+{
+	int ret, s, tmfd = -1;
+	struct itimerspec its;
+	fd_set refset, set;
+	uint64_t exp;
+	char c;
+
+	FD_ZERO(&refset);
+	FD_SET(sockfd, &refset);
+
+	if (!linger) {
+		tmfd = __STD(timerfd_create(CLOCK_MONOTONIC, 0));
+		if (tmfd < 0)
+			error(1, errno, "handle_requests/timerfd_create");
+		/* Silently exit after 30s being idle. */
+		its.it_value.tv_sec = 30;
+		its.it_value.tv_nsec = 0;
+		its.it_interval.tv_sec = 30;
+		its.it_interval.tv_nsec = 0;
+		__STD(timerfd_settime(tmfd, 0, &its, NULL));
+		FD_SET(tmfd, &refset);
+	}
+
+	for (;;) {
+		set = refset;
+		ret = __STD(select(FD_SETSIZE, &set, NULL, NULL, NULL));
+		if (ret < 0)
+			error(1, errno, "handle_requests/select");
+		if (FD_ISSET(sockfd, &set)) {
+			s = __STD(accept(sockfd, NULL, 0));
+			if (s < 0)
+				error(1, errno, "handle_requests/accept");
+			ret = register_client(s);
+			if (ret) {
+				__STD(close(s));
+				continue;
+			}
+			FD_SET(s, &refset);
+			if (tmfd != -1) {
+				if (anon) {
+					FD_CLR(tmfd, &refset);
+					__STD(close(tmfd));
+					tmfd = -1;
+				} else
+					__STD(timerfd_settime(tmfd, 0, &its, NULL));
+			}
+		}
+		if (tmfd != -1 && FD_ISSET(tmfd, &set)) {
+			ret = __STD(read(tmfd, &exp, sizeof(exp)));
+			(void)ret;
+			if (pvlist_empty(&client_list))
+				exit(0);
+		}
+		for (s = sockfd + 1; s < FD_SETSIZE; s++) {
+			if (!FD_ISSET(s, &set) || s == tmfd)
+				continue;
+			ret = __STD(recv(s, &c, sizeof(c), 0));
+			if (ret <= 0) {
+				unregister_client(s);
+				__STD(close(s));
+				FD_CLR(s, &refset);
+				if (anon && pvlist_empty(&client_list)) {
+					if (daemonize) {
+						note("unlinking session %s",
+						     __copperplate_setup_data.session_label);
+						heapobj_unlink_session(__copperplate_setup_data.session_label);
+					}
+					exit(0);
+				}
+			}
+		}
+	}
+}
+
+static void cleanup_handler(int sig)
+{
+	delete_system_fs();
+	_exit(1);
+}
+
+#ifdef CONFIG_XENO_COBALT
+
+#include "cobalt/internal.h"
+
+/*
+ * Bootstrapping Cobalt is something which is normally done through
+ * xenomai_bootstrap(), as available from lib/xenomai/bootstrap.o for
+ * normal applications. But sysregd is a peculiar one, and we need to
+ * drive the init sequence specifically for it.
+ */
+static inline int bootstrap_core(void)
+{
+	return cobalt_init();
+}
+
+#else
+
+static inline int bootstrap_core(void)
+{
+	return 0;
+}
+
+#endif
+
+static void create_system_fs(const char *arg0, const char *rootdir, int flags)
+{
+	struct sysreg_fsfile *f;
+	struct sysreg_fsdir *d;
+	struct sigaction sa;
+	const char *session;
+	char *mountpt;
+	int ret;
+
+	session = strrchr(rootdir, '/');
+	if (session++ == NULL)
+		error(1, EINVAL, "root directory %s", rootdir);
+
+	ret = asprintf(&mountpt, "%s/system", rootdir);
+	if (ret < 0)
+		error(1, ENOMEM, "malloc");
+
+	ret = create_directory_recursive(mountpt);
+	if (ret) {
+		/*
+		 * Before giving up, try to cleanup a left over, in
+		 * case a former sysregd instance died ungracefully.
+		 * Receiving ENOTCONN when creating the /system root
+		 * is the sign that we may be attempting to walk a
+		 * stale tree.
+		 */
+		if (ret == -ENOTCONN) {
+			unmount(mountpt);
+			ret = create_directory_recursive(mountpt);
+			if (ret == 0)
+				goto bootstrap;
+		}
+		error(1, -ret, "create_directory_recursive(\"%s\")", mountpt);
+	}
+
+bootstrap:
+	atexit(delete_system_fs);
+
+	ret = bootstrap_core();
+	if (ret)
+		error(1, -ret, "cannot bootstrap core interface");
+
+	__copperplate_setup_data.session_label = session;
+	__copperplate_setup_data.registry_root = rootdir;
+	sysroot = mountpt;
+	copperplate_bootstrap_internal(arg0, mountpt, flags);
+
+	note("mounted system fs at %s", mountpt);
+
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = cleanup_handler;
+	sigaction(SIGTERM, &sa, NULL);
+	sigaction(SIGINT, &sa, NULL);
+	sa.sa_handler = SIG_IGN;
+	sigaction(SIGHUP, &sa, NULL);
+
+	for (d = sysreg_dirs; d->path != NULL; d++)
+		registry_add_dir(d->path);
+
+	for (f = sysreg_files; f->path != NULL; f++) {
+		registry_init_file_obstack(&f->fsobj, &f->ops);
+		ret = registry_add_file(&f->fsobj, f->mode, f->path);
+		if (ret)
+			error(1, -ret, "failed to register %s", f->path);
+	}
+}
+
+int main(int argc, char *const *argv)
+{
+	int lindex, opt, ret, flags = 0;
+	struct sched_param schedp;
+	struct sigaction sa;
+
+	for (;;) {
+		lindex = -1;
+		opt = getopt_long_only(argc, argv, "", options, &lindex);
+		if (opt == EOF)
+			break;
+		switch (lindex) {
+		case help_opt:
+			usage();
+			return 0;
+		case daemonize_opt:
+		case linger_opt:
+			break;
+		case shared_opt:
+			flags |= REGISTRY_SHARED;
+			break;
+		case anon_opt:
+			flags |= REGISTRY_ANON;
+			break;
+		case root_opt:
+			rootdir = optarg;
+			break;
+		default:
+			usage();
+			exit(1);
+		}
+	}
+
+	if (rootdir == NULL)
+		error(1, EINVAL, "--root must be given");
+
+	/* Force SCHED_OTHER. */
+	schedp.sched_priority = 0;
+	__STD(pthread_setschedparam(pthread_self(), SCHED_OTHER, &schedp));
+
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = SIG_IGN;
+	sigaction(SIGPIPE, &sa, NULL);
+	sa.sa_handler = SIG_DFL;
+	sigaction(SIGCHLD, &sa, NULL);
+
+	if (daemonize) {
+		ret = daemon(1, 1);
+		if (ret)
+			error(1, errno, "cannot daemonize");
+	}
+
+	create_rootdir();
+	bind_socket();
+	create_system_fs(argv[0], rootdir, flags);
+	handle_requests();
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/regd/sysregfs.h b/kernel/xenomai-v3.2.4/lib/copperplate/regd/sysregfs.h
new file mode 100644
index 0000000..a7c0252
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/regd/sysregfs.h
@@ -0,0 +1,60 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _REGD_SYSREGFS_H
+#define _REGD_SYSREGFS_H
+
+#include <copperplate/clockobj.h>
+#include <copperplate/registry-obstack.h>
+
+struct sysreg_fsdir {
+	const char *path;
+};
+
+struct sysreg_fsfile {
+	const char *path;
+	struct fsobj fsobj;
+	int mode;
+	struct registry_operations ops;
+};
+
+struct thread_data {
+	char name[XNOBJECT_NAME_LEN];
+	pid_t pid;
+	int priority;
+	int policy;
+	int cpu;
+	int schedlock;
+	ticks_t timeout;
+	unsigned long status;
+};
+
+extern struct sysreg_fsdir sysreg_dirs[];
+
+extern struct sysreg_fsfile sysreg_files[];
+
+int open_threads(struct fsobj *fsobj, void *priv);
+
+int open_heaps(struct fsobj *fsobj, void *priv);
+
+int open_version(struct fsobj *fsobj, void *priv);
+
+char *format_thread_status(const struct thread_data *p,
+			   char *buf, size_t len);
+
+#endif /* !_REGD_SYSREGFS_H */
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/registry.c b/kernel/xenomai-v3.2.4/lib/copperplate/registry.c
new file mode 100644
index 0000000..a60bf98
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/registry.c
@@ -0,0 +1,1022 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+ *
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/socket.h>
+#include <sys/un.h>
+#include <limits.h>
+#include <stdarg.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <string.h>
+#include <memory.h>
+#include <signal.h>
+#include <unistd.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <malloc.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <fuse.h>
+#include <xeno_config.h>
+#include "boilerplate/atomic.h"
+#include "boilerplate/hash.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/threadobj.h"
+#include "copperplate/syncobj.h"
+#include "copperplate/registry.h"
+#include "copperplate/registry-obstack.h"
+#include "copperplate/clockobj.h"
+#include "boilerplate/lock.h"
+#include "copperplate/debug.h"
+#include "xenomai/init.h"
+#include "internal.h"
+
+/*
+ * CAUTION: this code shall NOT refer to the shared heap in any way,
+ * only private storage is allowed here: sysregd won't map the main
+ * shared heap permanently, but only in a transitory manner via
+ * heapobj_bind_session() when reading a /system node.
+ */
+
+static pthread_t regfs_thid;
+
+struct regfs_data {
+	const char *arg0;
+	char *mountpt;
+	int flags;
+	sem_t sync;
+	int status;
+	pthread_mutex_t lock;
+	struct pvhash_table files;
+	struct pvhash_table dirs;
+};
+
+static inline struct regfs_data *regfs_get_context(void)
+{
+	static struct regfs_data data;
+
+	return &data;
+}
+
+struct regfs_dir {
+	char *path;
+	const char *basename;
+	struct pvhashobj hobj;
+	struct pvlistobj file_list;
+	struct pvlistobj dir_list;
+	int ndirs, nfiles;
+	struct timespec ctime;
+	struct pvholder link;
+};
+
+const static struct pvhash_operations pvhash_operations = {
+	.compare = memcmp,
+};
+
+int registry_add_dir(const char *fmt, ...)
+{
+	struct regfs_data *p = regfs_get_context();
+	char path[PATH_MAX], *basename;
+	struct regfs_dir *parent, *d;
+	struct pvhashobj *hobj;
+	struct timespec now;
+	int ret, state;
+	va_list ap;
+
+	if (__copperplate_setup_data.no_registry)
+		return 0;
+
+	va_start(ap, fmt);
+	vsnprintf(path, PATH_MAX, fmt, ap);
+	va_end(ap);
+
+	basename = strrchr(path, '/');
+	if (basename == NULL)
+		return __bt(-EINVAL);
+
+	__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
+
+	write_lock_safe(&p->lock, state);
+
+	d = pvmalloc(sizeof(*d));
+	if (d == NULL) {
+		ret = -ENOMEM;
+		goto done;
+	}
+	pvholder_init(&d->link);
+	d->path = pvstrdup(path);
+
+	if (strcmp(path, "/")) {
+		d->basename = d->path + (basename - path) + 1;
+		if (path == basename)
+			basename++;
+		*basename = '\0';
+		hobj = pvhash_search(&p->dirs, path, strlen(path),
+				     &pvhash_operations);
+		if (hobj == NULL) {
+			ret = -ENOENT;
+			goto fail;
+		}
+		parent = container_of(hobj, struct regfs_dir, hobj);
+		pvlist_append(&d->link, &parent->dir_list);
+		parent->ndirs++;
+	} else
+		d->basename = d->path;
+
+	pvlist_init(&d->file_list);
+	pvlist_init(&d->dir_list);
+	d->ndirs = d->nfiles = 0;
+	d->ctime = now;
+	ret = pvhash_enter(&p->dirs, d->path, strlen(d->path), &d->hobj,
+			   &pvhash_operations);
+	if (ret) {
+	fail:
+		pvfree(d->path);
+		pvfree(d);
+	}
+done:
+	write_unlock_safe(&p->lock, state);
+
+	return __bt(ret);
+}
+
+int registry_init_file(struct fsobj *fsobj,
+		       const struct registry_operations *ops,
+		       size_t privsz)
+{
+	pthread_mutexattr_t mattr;
+	int ret;
+
+	if (__copperplate_setup_data.no_registry)
+		return 0;
+
+	fsobj->path = NULL;
+	fsobj->ops = ops;
+	fsobj->privsz = privsz;
+	pvholder_init(&fsobj->link);
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-__RT(pthread_mutex_init(&fsobj->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+
+	return ret;
+}
+
+int registry_add_file(struct fsobj *fsobj, int mode, const char *fmt, ...)
+{
+	struct regfs_data *p = regfs_get_context();
+	char path[PATH_MAX], *basename, *dir;
+	struct pvhashobj *hobj;
+	struct regfs_dir *d;
+	int ret, state;
+	va_list ap;
+
+	if (__copperplate_setup_data.no_registry)
+		return 0;
+
+	va_start(ap, fmt);
+	vsnprintf(path, PATH_MAX, fmt, ap);
+	va_end(ap);
+
+	basename = strrchr(path, '/');
+	if (basename == NULL)
+		return __bt(-EINVAL);
+
+	fsobj->path = pvstrdup(path);
+	fsobj->basename = fsobj->path + (basename - path) + 1;
+	fsobj->mode = mode & O_ACCMODE;
+	__RT(clock_gettime(CLOCK_COPPERPLATE, &fsobj->ctime));
+	fsobj->mtime = fsobj->ctime;
+
+	write_lock_safe(&p->lock, state);
+
+	ret = pvhash_enter(&p->files, fsobj->path, strlen(fsobj->path),
+			   &fsobj->hobj, &pvhash_operations);
+	if (ret)
+		goto fail;
+
+	*basename = '\0';
+	dir = basename == path ? "/" : path;
+	hobj = pvhash_search(&p->dirs, dir, strlen(dir),
+			     &pvhash_operations);
+	if (hobj == NULL) {
+		ret = -ENOENT;
+	fail:
+		pvhash_remove(&p->files, &fsobj->hobj, &pvhash_operations);
+		pvfree(fsobj->path);
+		fsobj->path = NULL;
+		goto done;
+	}
+
+	d = container_of(hobj, struct regfs_dir, hobj);
+	pvlist_append(&fsobj->link, &d->file_list);
+	d->nfiles++;
+	fsobj->dir = d;
+done:
+	write_unlock_safe(&p->lock, state);
+
+	return __bt(ret);
+}
+
+void registry_destroy_file(struct fsobj *fsobj)
+{
+	struct regfs_data *p = regfs_get_context();
+	struct regfs_dir *d;
+	int state;
+
+	if (__copperplate_setup_data.no_registry)
+		return;
+
+	write_lock_safe(&p->lock, state);
+
+	if (fsobj->path == NULL)
+		goto out;	/* Not registered. */
+
+	pvhash_remove(&p->files, &fsobj->hobj, &pvhash_operations);
+	/*
+	 * We are covered by a previous call to write_lock_safe(), so
+	 * we may nest pthread_mutex_lock() directly.
+	 */
+	__RT(pthread_mutex_lock(&fsobj->lock));
+	d = fsobj->dir;
+	pvlist_remove(&fsobj->link);
+	d->nfiles--;
+	assert(d->nfiles >= 0);
+	pvfree(fsobj->path);
+	__RT(pthread_mutex_unlock(&fsobj->lock));
+out:
+	__RT(pthread_mutex_destroy(&fsobj->lock));
+	write_unlock_safe(&p->lock, state);
+}
+
+void registry_touch_file(struct fsobj *fsobj)
+{
+	if (__copperplate_setup_data.no_registry)
+		return;
+
+	__RT(clock_gettime(CLOCK_COPPERPLATE, &fsobj->mtime));
+}
+
+static int regfs_getattr(const char *path, struct stat *sbuf)
+{
+	struct regfs_data *p = regfs_get_context();
+	struct pvhashobj *hobj;
+	struct regfs_dir *d;
+	struct fsobj *fsobj;
+	int ret = 0;
+
+	memset(sbuf, 0, sizeof(*sbuf));
+
+	read_lock_nocancel(&p->lock);
+
+	hobj = pvhash_search(&p->dirs, path, strlen(path),
+			     &pvhash_operations);
+	if (hobj) {
+		d = container_of(hobj, struct regfs_dir, hobj);
+		sbuf->st_mode = S_IFDIR | 0755;
+		sbuf->st_nlink = d->ndirs + 2;
+		sbuf->st_atim = d->ctime;
+		sbuf->st_ctim = d->ctime;
+		sbuf->st_mtim = d->ctime;
+		goto done;
+	}
+
+	hobj = pvhash_search(&p->files, path, strlen(path),
+			     &pvhash_operations);
+	if (hobj) {
+		fsobj = container_of(hobj, struct fsobj, hobj);
+		sbuf->st_mode = S_IFREG;
+		switch (fsobj->mode) {
+		case O_RDONLY:
+			sbuf->st_mode |= 0444;
+			break;
+		case O_WRONLY:
+			sbuf->st_mode |= 0222;
+			break;
+		case O_RDWR:
+			sbuf->st_mode |= 0666;
+			break;
+		}
+		sbuf->st_nlink = 1;
+		sbuf->st_size = 32768; /* XXX: this should be dynamic. */
+		sbuf->st_atim = fsobj->mtime;
+		sbuf->st_ctim = fsobj->ctime;
+		sbuf->st_mtim = fsobj->mtime;
+	} else
+		ret = -ENOENT;
+done:
+	read_unlock(&p->lock);
+
+	return ret;
+}
+
+static int regfs_readdir(const char *path, void *buf, fuse_fill_dir_t filler,
+			 off_t offset, struct fuse_file_info *fi)
+{
+	struct regfs_data *p = regfs_get_context();
+	struct regfs_dir *d, *subd;
+	struct pvhashobj *hobj;
+	struct fsobj *fsobj;
+
+	read_lock_nocancel(&p->lock);
+
+	hobj = pvhash_search(&p->dirs, path, strlen(path),
+			     &pvhash_operations);
+	if (hobj == NULL) {
+		read_unlock(&p->lock);
+		return __bt(-ENOENT);
+	}
+
+	filler(buf, ".", NULL, 0);
+	filler(buf, "..", NULL, 0);
+
+	d = container_of(hobj, struct regfs_dir, hobj);
+
+	if (!pvlist_empty(&d->dir_list)) {
+		pvlist_for_each_entry(subd, &d->dir_list, link) {
+			/* We don't output empty directories. */
+			if (subd->ndirs + subd->nfiles == 0)
+				continue;
+			if (filler(buf, subd->basename, NULL, 0))
+				break;
+		}
+	}
+
+	if (!pvlist_empty(&d->file_list)) {
+		pvlist_for_each_entry(fsobj, &d->file_list, link)
+			if (filler(buf, fsobj->basename, NULL, 0))
+				break;
+	}
+
+	read_unlock(&p->lock);
+
+	return 0;
+}
+
+static int regfs_open(const char *path, struct fuse_file_info *fi)
+{
+	struct regfs_data *p = regfs_get_context();
+	struct pvhashobj *hobj;
+	struct fsobj *fsobj;
+	struct service svc;
+	int ret = 0;
+	void *priv;
+
+	push_cleanup_lock(&p->lock);
+	read_lock(&p->lock);
+
+	hobj = pvhash_search(&p->files, path, strlen(path),
+			     &pvhash_operations);
+	if (hobj == NULL) {
+		ret = -ENOENT;
+		goto done;
+	}
+
+	fsobj = container_of(hobj, struct fsobj, hobj);
+	if (((fi->flags + 1) & (fsobj->mode + 1)) == 0) {
+		ret = -EACCES;
+		goto done;
+	}
+
+	if (fsobj->privsz) {
+		priv = malloc(fsobj->privsz);
+		if (priv == NULL) {
+			ret = -ENOMEM;
+			goto done;
+		}
+	} else
+		priv = NULL;
+
+	fi->fh = (uintptr_t)priv;
+	if (fsobj->ops->open) {
+		CANCEL_DEFER(svc);
+		ret = __bt(fsobj->ops->open(fsobj, priv));
+		CANCEL_RESTORE(svc);
+	}
+done:
+	read_unlock(&p->lock);
+	pop_cleanup_lock(&p->lock);
+
+	return __bt(ret);
+}
+
+static int regfs_release(const char *path, struct fuse_file_info *fi)
+{
+	struct regfs_data *p = regfs_get_context();
+	struct pvhashobj *hobj;
+	struct fsobj *fsobj;
+	struct service svc;
+	int ret = 0;
+	void *priv;
+
+	push_cleanup_lock(&p->lock);
+	read_lock(&p->lock);
+
+	hobj = pvhash_search(&p->files, path, strlen(path),
+			     &pvhash_operations);
+	if (hobj == NULL) {
+		ret = -ENOENT;
+		goto done;
+	}
+
+	fsobj = container_of(hobj, struct fsobj, hobj);
+	priv = (void *)(uintptr_t)fi->fh;
+	if (fsobj->ops->release) {
+		CANCEL_DEFER(svc);
+		ret = __bt(fsobj->ops->release(fsobj, priv));
+		CANCEL_RESTORE(svc);
+	}
+	if (priv)
+		free(priv);
+done:
+	read_unlock(&p->lock);
+	pop_cleanup_lock(&p->lock);
+
+	return __bt(ret);
+}
+
+static int regfs_read(const char *path, char *buf, size_t size, off_t offset,
+		      struct fuse_file_info *fi)
+{
+	struct regfs_data *p = regfs_get_context();
+	struct pvhashobj *hobj;
+	struct fsobj *fsobj;
+	struct service svc;
+	void *priv;
+	int ret;
+
+	read_lock_nocancel(&p->lock);
+
+	hobj = pvhash_search(&p->files, path, strlen(path),
+			     &pvhash_operations);
+	if (hobj == NULL) {
+		read_unlock(&p->lock);
+		return __bt(-EIO);
+	}
+
+	fsobj = container_of(hobj, struct fsobj, hobj);
+	if (fsobj->ops->read == NULL) {
+		read_unlock(&p->lock);
+		return __bt(-ENOSYS);
+	}
+
+	push_cleanup_lock(&fsobj->lock);
+	read_lock(&fsobj->lock);
+	read_unlock(&p->lock);
+	priv = (void *)(uintptr_t)fi->fh;
+	CANCEL_DEFER(svc);
+	ret = fsobj->ops->read(fsobj, buf, size, offset, priv);
+	CANCEL_RESTORE(svc);
+	read_unlock(&fsobj->lock);
+	pop_cleanup_lock(&fsobj->lock);
+
+	return __bt(ret);
+}
+
+static int regfs_write(const char *path, const char *buf, size_t size, off_t offset,
+		       struct fuse_file_info *fi)
+{
+	struct regfs_data *p = regfs_get_context();
+	struct pvhashobj *hobj;
+	struct fsobj *fsobj;
+	struct service svc;
+	void *priv;
+	int ret;
+
+	read_lock_nocancel(&p->lock);
+
+	hobj = pvhash_search(&p->files, path, strlen(path),
+			     &pvhash_operations);
+	if (hobj == NULL) {
+		read_unlock(&p->lock);
+		return __bt(-EIO);
+	}
+
+	fsobj = container_of(hobj, struct fsobj, hobj);
+	if (fsobj->ops->write == NULL) {
+		read_unlock(&p->lock);
+		return __bt(-ENOSYS);
+	}
+
+	push_cleanup_lock(&fsobj->lock);
+	read_lock(&fsobj->lock);
+	read_unlock(&p->lock);
+	priv = (void *)(uintptr_t)fi->fh;
+	CANCEL_DEFER(svc);
+	ret = fsobj->ops->write(fsobj, buf, size, offset, priv);
+	CANCEL_RESTORE(svc);
+	read_unlock(&fsobj->lock);
+	pop_cleanup_lock(&fsobj->lock);
+
+	return __bt(ret);
+}
+
+static int regfs_truncate(const char *path, off_t offset)
+{
+	return 0;
+}
+
+static int regfs_chmod(const char *path, mode_t mode)
+{
+	return 0;
+}
+
+static int regfs_chown(const char *path, uid_t uid, gid_t gid)
+{
+	return 0;
+}
+
+static void *regfs_init(void)
+{
+	struct regfs_data *p = regfs_get_context();
+	struct sigaction sa;
+
+	/*
+	 * Override annoying FUSE settings. Unless the application
+	 * tells otherwise, we want the emulator to exit upon common
+	 * termination signals.
+	 */
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = SIG_DFL;
+	sigaction(SIGTERM, &sa, NULL);
+	sigaction(SIGHUP, &sa, NULL);
+	sigaction(SIGINT, &sa, NULL);
+	sigaction(SIGPIPE, &sa, NULL);
+
+	p->status = 0;	/* all ok. */
+	__STD(sem_post(&p->sync));
+
+	return p;
+}
+
+static struct fuse_operations regfs_opts = {
+	.init		= regfs_init,
+	.getattr	= regfs_getattr,
+	.readdir	= regfs_readdir,
+	.open		= regfs_open,
+	.release	= regfs_release,
+	.read		= regfs_read,
+	.write		= regfs_write,
+	/* Those must be defined for writing to files too. */
+	.truncate	= regfs_truncate,
+	.chown		= regfs_chown,
+	.chmod		= regfs_chmod,
+};
+
+static void *registry_thread(void *arg)
+{
+	struct regfs_data *p = arg;
+	char *av[7];
+	int ret;
+
+	av[0] = (char *)p->arg0;
+	av[1] = "-s";
+	av[2] = "-f";
+	av[3] = p->mountpt;
+	av[4] = "-o";
+	av[5] = p->flags & REGISTRY_SHARED ?
+		"default_permissions,allow_other"
+		: "default_permissions";
+	av[6] = NULL;
+
+	/*
+	 * Once connected to sysregd, we don't have to care for the
+	 * mount point, sysregd will umount(2) it when we go away.
+	 */
+	ret = fuse_main(6, av, &regfs_opts);
+	if (ret) {
+		early_warning("can't mount registry onto %s", p->mountpt);
+		/* Attempt to figure out why we failed. */
+		ret = access(p->mountpt, F_OK);
+		p->status = ret ? -errno : -EPERM;
+		__STD(sem_post(&p->sync));
+		return (void *)(long)ret;
+	}
+
+	return NULL;
+}
+
+static pid_t regd_pid;
+
+static void sigchld_handler(int sig)
+{
+	smp_rmb();
+	if (regd_pid &&	waitpid(regd_pid, NULL, WNOHANG) == regd_pid)
+		regd_pid = 0;
+}
+
+static int spawn_daemon(const char *sessdir, int flags)
+{
+	struct sigaction sa;
+	char *path, *av[7];
+	int ret, n = 0;
+	pid_t pid;
+
+	ret = asprintf(&path, "%s/sbin/sysregd", CONFIG_XENO_PREFIX);
+	if (ret < 0)
+		return -ENOMEM;
+
+	/*
+	 * We want to allow application code to wait for children
+	 * exits explicitly and selectively using wait*() calls, while
+	 * preventing a failing sysregd to move to the zombie
+	 * state. Therefore, bluntly leaving the SIGCHLD disposition
+	 * to SIG_IGN upon return from this routine is not an option.
+	 *
+	 * To solve this issue, first we ignore SIGCHLD to plug a
+	 * potential race while forking the daemon, then we trap it to
+	 * a valid handler afterwards, once we know the daemon
+	 * pid. This handler will selectively reap the registry
+	 * daemon, and only this process, leaving all options open to
+	 * the application code for reaping its own children as it
+	 * sees fit.
+	 */
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = SIG_IGN;
+	sigaction(SIGCHLD, &sa, NULL);
+
+	av[0] = "sysregd";
+	av[1] = "--daemon";
+	av[2] = "--root";
+	av[3] = (char *)sessdir;
+	n = 4;
+	if (flags & REGISTRY_ANON)
+		av[n++] = "--anon";
+	if (flags & REGISTRY_SHARED)
+		av[n++] = "--shared";
+
+	av[n] = NULL;
+
+	pid = vfork();
+	switch (pid) {
+	case 0:
+		execv(path, av);
+		_exit(1);
+	case -1:
+		sa.sa_handler = SIG_DFL;
+		sigaction(SIGCHLD, &sa, NULL);
+		ret = -errno;
+		break;
+	default:
+		/*
+		 * Make sure we sleep at least 200 ms regardless of
+		 * signal receipts.
+		 */
+		while (usleep(200000) > 0) ;
+		regd_pid = pid;
+		compiler_barrier();
+		sa.sa_handler = sigchld_handler;
+		sa.sa_flags = SA_RESTART;
+		sigaction(SIGCHLD, &sa, NULL);
+		ret = 0;
+		break;
+	}
+
+	free(path);
+
+	return ret;
+}
+
+static int connect_regd(const char *sessdir, char **mountpt, int flags)
+{
+	struct sockaddr_un sun;
+	int s, ret, retries;
+	unsigned int hash;
+	socklen_t addrlen;
+
+	*mountpt = malloc(PATH_MAX);
+	if (*mountpt == NULL)
+		return -ENOMEM;
+
+	memset(&sun, 0, sizeof(sun));
+	sun.sun_family = AF_UNIX;
+	hash = __hash_key(sessdir, strlen(sessdir), 0);
+	snprintf(sun.sun_path, sizeof(sun.sun_path), "X%X-xenomai", hash);
+	addrlen = offsetof(struct sockaddr_un, sun_path) + strlen(sun.sun_path);
+	sun.sun_path[0] = '\0';
+
+	for (retries = 0; retries < 3; retries++) {
+		s = __STD(socket(AF_UNIX, SOCK_SEQPACKET|SOCK_CLOEXEC, 0));
+		if (s < 0) {
+			ret = -errno;
+			free(*mountpt);
+			return ret;
+		}
+		ret = __STD(connect(s, (struct sockaddr *)&sun, addrlen));
+		if (ret == 0) {
+			ret = __STD(recv(s, *mountpt, PATH_MAX, 0));
+			if (ret > 0)
+				return 0;
+		}
+		__STD(close(s));
+		ret = spawn_daemon(sessdir, flags);
+		if (ret)
+			break;
+		ret = -EAGAIN;
+	}
+
+	free(*mountpt);
+
+	early_warning("cannot connect to registry daemon");
+
+	return ret;
+}
+
+static void pkg_cleanup(void)
+{
+	registry_pkg_destroy();
+}
+
+int __registry_pkg_init(const char *arg0, char *mountpt, int flags)
+{
+	struct regfs_data *p = regfs_get_context();
+	pthread_mutexattr_t mattr;
+	struct sched_param schedp;
+	pthread_attr_t thattr;
+	int ret;
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-__RT(pthread_mutex_init(&p->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		return ret;
+
+	pvhash_init(&p->files);
+	pvhash_init(&p->dirs);
+
+	registry_add_dir("/");	/* Create the fs root. */
+
+	/* We want a SCHED_OTHER thread. */
+	pthread_attr_init(&thattr);
+	pthread_attr_setinheritsched(&thattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&thattr, SCHED_OTHER);
+	schedp.sched_priority = 0;
+	pthread_attr_setschedparam(&thattr, &schedp);
+	/*
+	 * Memory is locked as the process data grows, so we set a
+	 * smaller stack size for the fs thread than the default 8mb
+	 * set by the Glibc.
+	 */
+	pthread_attr_setstacksize(&thattr, PTHREAD_STACK_DEFAULT);
+	pthread_attr_setscope(&thattr, PTHREAD_SCOPE_PROCESS);
+	p->arg0 = arg0;
+	p->mountpt = mountpt;
+	p->flags = flags;
+	p->status = -EINVAL;
+	__STD(sem_init(&p->sync, 0, 0));
+
+	/*
+	 * Start the FUSE filesystem daemon. Over Cobalt, it runs as a
+	 * non real-time Xenomai shadow, so that it may synchronize on
+	 * real-time objects.
+	 */
+	ret = __bt(-__RT(pthread_create(&regfs_thid, &thattr,
+					registry_thread, p)));
+	if (ret)
+		return ret;
+
+	/*
+	 * We synchronize with regfs_init() to wait for FUSE to
+	 * complete all its init chores before returning to our
+	 * caller.
+	 */
+	for (;;) {
+		ret = __STD(sem_wait(&p->sync));
+		if (ret == 0)
+			break;
+		if (errno != EINTR)
+			return __bt(-errno);
+	}
+
+	atexit(pkg_cleanup);
+
+	return p->status;
+}
+
+int registry_pkg_init(const char *arg0, int flags)
+{
+	char *mountpt;
+	int ret;
+
+	ret = connect_regd(__copperplate_setup_data.session_root,
+			   &mountpt, flags);
+	if (ret)
+		return __bt(ret);
+
+	return __bt(__registry_pkg_init(arg0, mountpt, flags));
+}
+
+void registry_pkg_destroy(void)
+{
+	if (regfs_thid) {
+		pthread_cancel(regfs_thid);
+		pthread_join(regfs_thid, NULL);
+		regfs_thid = 0;
+	}
+}
+
+int fsobj_obstack_release(struct fsobj *fsobj, void *priv)
+{
+	fsobstack_destroy(priv);
+
+	return 0;
+}
+
+ssize_t fsobj_obstack_read(struct fsobj *fsobj,
+			   char *buf, size_t size, off_t offset,
+			   void *priv)
+{
+	return fsobstack_pull(priv, buf, size);
+}
+
+int fsobstack_grow_format(struct fsobstack *o, const char *fmt, ...)
+{
+	char buf[256], *p = buf;
+	int len = sizeof(buf), n;
+	va_list ap;
+
+	for (;;) {
+	       va_start(ap, fmt);
+	       n = vsnprintf(p, len, fmt, ap);
+	       va_end(ap);
+
+	       if (n > 0 && n < len)
+		       obstack_grow(&o->obstack, p, n);
+
+	       if (p != buf)
+		       free(p);
+
+	       if (n < len)
+		       return n < 0 ? -EINVAL : n;
+
+	       len = n + 1;
+	       p = malloc(len);
+	       if (p == NULL)
+		       break;
+	   }
+
+	return -ENOMEM;
+}
+
+void fsobstack_grow_string(struct fsobstack *o, const char *s)
+{
+	obstack_grow(&o->obstack, s, strlen(s));
+}
+
+void fsobstack_grow_char(struct fsobstack *o, char c)
+{
+	obstack_1grow(&o->obstack, c);
+}
+
+int fsobstack_grow_file(struct fsobstack *o, const char *path)
+{
+	int len = 0;
+	FILE *fp;
+	int c;
+
+	fp = fopen(path, "r");
+	if (fp == NULL)
+		return -errno;
+
+	for (;;) {
+		c = fgetc(fp);
+		if (c == EOF) {
+			if (ferror(fp))
+				len = -errno;
+			break;
+		}
+		obstack_1grow(&o->obstack, c);
+		len++;
+	}
+
+	fclose(fp);
+
+	return len;
+}
+
+ssize_t fsobstack_pull(struct fsobstack *o, char *buf, size_t size)
+{
+	size_t len;
+
+	if (o->data == NULL)	/* Not finished. */
+		return -EIO;
+
+	len = o->len;
+	if (len > 0) {
+		if (len > size)
+			len = size;
+		o->len -= len;
+		memcpy(buf, o->data, len);
+		o->data += len;
+	}
+
+	return len;
+}
+
+static int collect_wait_list(struct fsobstack *o,
+			     struct syncobj *sobj,
+			     struct listobj *wait_list,
+			     int *wait_count,
+			     struct fsobstack_syncops *ops)
+{
+	struct threadobj *thobj;
+	struct syncstate syns;
+	struct obstack cache;
+	struct service svc;
+	int count, ret;
+	void *p, *e;
+
+	obstack_init(&cache);
+	CANCEL_DEFER(svc);
+redo:
+	smp_rmb();
+	count = *wait_count;
+	if (count == 0)
+		goto out;
+
+	/* Pre-allocate the obstack room without holding any lock. */
+	ret = ops->prepare_cache(o, &cache, count);
+	if (ret)
+		goto out;
+
+	ret = syncobj_lock(sobj, &syns);
+	if (ret) {
+		count = ret;
+		goto out;
+	}
+
+	/* Re-validate the previous item count under lock. */
+	if (count != *wait_count) {
+		syncobj_unlock(sobj, &syns);
+		obstack_free(&cache, NULL);
+		goto redo;
+	}
+
+	p = obstack_base(&cache);
+	list_for_each_entry(thobj, wait_list, wait_link)
+		p += ops->collect_data(p, thobj);
+
+	syncobj_unlock(sobj, &syns);
+
+	/*
+	 * Some may want to format data directly from the collect
+	 * handler, when no gain is expected from splitting the
+	 * collect and format steps. In that case, we may have no
+	 * format handler.
+	 */
+	e = obstack_next_free(&cache);
+	p = obstack_finish(&cache);
+	if (ops->format_data == NULL) {
+		if (e != p)
+			obstack_grow(&o->obstack, p, e - p);
+		goto out;
+	}
+
+	/* Finally, format the output without holding any lock. */
+	do
+		p += ops->format_data(o, p);
+	while (p < e);
+out:
+	CANCEL_RESTORE(svc);
+	obstack_free(&cache, NULL);
+
+	return count;
+}
+
+int fsobstack_grow_syncobj_grant(struct fsobstack *o, struct syncobj *sobj,
+				 struct fsobstack_syncops *ops)
+{
+	return collect_wait_list(o, sobj, &sobj->grant_list,
+				 &sobj->grant_count, ops);
+}
+
+int fsobstack_grow_syncobj_drain(struct fsobstack *o, struct syncobj *sobj,
+				 struct fsobstack_syncops *ops)
+{
+	return collect_wait_list(o, sobj, &sobj->drain_list,
+				 &sobj->drain_count, ops);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/semobj.c b/kernel/xenomai-v3.2.4/lib/copperplate/semobj.c
new file mode 100644
index 0000000..a615678
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/semobj.c
@@ -0,0 +1,354 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include "copperplate/threadobj.h"
+#include "copperplate/semobj.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/debug.h"
+
+#ifdef CONFIG_XENO_COBALT
+
+#include "cobalt/internal.h"
+
+int semobj_init(struct semobj *smobj, int flags, int value,
+		fnref_type(void (*)(struct semobj *smobj)) finalizer)
+{
+	int ret, sem_flags;
+
+	sem_flags = SEM_REPORT|SEM_RAWCLOCK;
+	if (sem_scope_attribute)
+		sem_flags |= SEM_PSHARED;
+
+	if ((flags & SEMOBJ_PRIO) == 0)
+		sem_flags |= SEM_FIFO;
+
+	if (flags & SEMOBJ_PULSE)
+		sem_flags |= SEM_PULSE;
+
+	if (flags & SEMOBJ_WARNDEL)
+		sem_flags |= SEM_WARNDEL;
+
+	ret = sem_init_np(&smobj->core.sem, sem_flags, value);
+	if (ret)
+		return __bt(-errno);
+
+	smobj->finalizer = finalizer;
+
+	return 0;
+}
+
+int semobj_destroy(struct semobj *smobj)
+{
+	void (*finalizer)(struct semobj *smobj);
+	int ret;
+
+	ret = __RT(sem_destroy(&smobj->core.sem));
+	if (ret < 0)
+		return errno == EINVAL ? -EIDRM : -errno;
+	/*
+	 * All waiters have been unblocked with EINVAL, and therefore
+	 * won't touch this object anymore. We can finalize it
+	 * immediately.
+	 */
+	fnref_get(finalizer, smobj->finalizer);
+	finalizer(smobj);
+
+	return ret;
+}
+
+void semobj_uninit(struct semobj *smobj)
+{
+	int ret = __RT(sem_destroy(&smobj->core.sem));
+	assert(ret == 0);
+	(void)ret;
+}
+
+int semobj_post(struct semobj *smobj)
+{
+	int ret;
+
+	ret = __RT(sem_post(&smobj->core.sem));
+	if (ret)
+		return errno == EINVAL ? -EIDRM : -errno;
+
+	return 0;
+}
+
+int semobj_broadcast(struct semobj *smobj)
+{
+	int ret;
+
+	ret = sem_broadcast_np(&smobj->core.sem);
+	if (ret)
+		return errno == EINVAL ? -EIDRM : -errno;
+
+	return 0;
+}
+
+int semobj_wait(struct semobj *smobj, const struct timespec *timeout)
+{
+	int ret;
+
+	if (timeout == NULL) {
+		do
+			ret = __RT(sem_wait(&smobj->core.sem));
+		while (ret && errno == EINTR);
+	} else if (timeout->tv_sec == 0 && timeout->tv_nsec == 0)
+		ret = __RT(sem_trywait(&smobj->core.sem));
+	else {
+		do
+			ret = __RT(sem_timedwait(&smobj->core.sem, timeout));
+		while (ret && errno == EINTR);
+	}
+
+	if (ret)
+		return errno == EINVAL ? -EIDRM : -errno;
+
+	return 0;
+}
+
+int semobj_getvalue(struct semobj *smobj, int *sval)
+{
+	int ret;
+
+	ret = __RT(sem_getvalue(&smobj->core.sem, sval));
+	if (ret)
+		return errno == EINVAL ? -EIDRM : -errno;
+
+	return 0;
+}
+
+int semobj_inquire(struct semobj *smobj, size_t waitsz,
+		   struct semobj_waitentry *waitlist,
+		   int *val_r)
+{
+	struct cobalt_threadstat stat;
+	struct cobalt_sem_info info;
+	int nrwait, pidsz, n, ret;
+	pid_t *pidlist = NULL;
+
+	pidsz = sizeof(pid_t) * (waitsz / sizeof(*waitlist));
+	if (pidsz > 0) {
+		pidlist = pvmalloc(pidsz);
+		if (pidlist == NULL)
+			return -ENOMEM;
+	}
+
+	nrwait = cobalt_sem_inquire(&smobj->core.sem, &info, pidlist, pidsz);
+	if (nrwait < 0)
+		goto out;
+
+	*val_r = info.value;
+
+	if (pidlist == NULL)
+		return nrwait;
+
+	for (n = 0; n < nrwait; n++, waitlist++) {
+		ret = cobalt_thread_stat(pidlist[n], &stat);
+		/* If waiter disappeared, fill in a dummy entry. */
+		if (ret) {
+			waitlist->pid = -1;
+			strcpy(waitlist->name, "???");
+		} else {
+			waitlist->pid = pidlist[n];
+			strcpy(waitlist->name, stat.name);
+		}
+	}
+out:
+	if (pidlist)
+		pvfree(pidlist);
+
+	return nrwait;
+}
+
+#else /* CONFIG_XENO_MERCURY */
+
+static void semobj_finalize(struct syncobj *sobj)
+{
+	struct semobj *smobj = container_of(sobj, struct semobj, core.sobj);
+	void (*finalizer)(struct semobj *smobj);
+
+	fnref_get(finalizer, smobj->finalizer);
+	finalizer(smobj);
+}
+fnref_register(libcopperplate, semobj_finalize);
+
+int semobj_init(struct semobj *smobj, int flags, int value,
+		fnref_type(void (*)(struct semobj *smobj)) finalizer)
+{
+	int sobj_flags = 0, ret;
+
+	if (flags & SEMOBJ_PRIO)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	/*
+	 * We need a trampoline for finalizing a semobj, to escalate
+	 * from a basic syncobj we receive to the semobj container.
+	 */
+	ret = syncobj_init(&smobj->core.sobj, CLOCK_COPPERPLATE, sobj_flags,
+			   fnref_put(libcopperplate, semobj_finalize));
+	if (ret)
+		return __bt(ret);
+
+	smobj->core.flags = flags;
+	smobj->core.value = value;
+	smobj->finalizer = finalizer;
+
+	return 0;
+}
+
+int semobj_destroy(struct semobj *smobj)
+{
+	struct syncstate syns;
+
+	if (syncobj_lock(&smobj->core.sobj, &syns))
+		return -EINVAL;
+
+	return syncobj_destroy(&smobj->core.sobj, &syns);
+}
+
+void semobj_uninit(struct semobj *smobj)
+{
+	syncobj_uninit(&smobj->core.sobj);
+}
+
+int semobj_post(struct semobj *smobj)
+{
+	struct syncstate syns;
+	int ret;
+
+	ret = syncobj_lock(&smobj->core.sobj, &syns);
+	if (ret)
+		return ret;
+
+	if (++smobj->core.value <= 0)
+		syncobj_grant_one(&smobj->core.sobj);
+	else if (smobj->core.flags & SEMOBJ_PULSE)
+		smobj->core.value = 0;
+
+	syncobj_unlock(&smobj->core.sobj, &syns);
+
+	return 0;
+}
+
+int semobj_broadcast(struct semobj *smobj)
+{
+	struct syncstate syns;
+	int ret;
+
+	ret = syncobj_lock(&smobj->core.sobj, &syns);
+	if (ret)
+		return ret;
+
+	if (smobj->core.value < 0) {
+		smobj->core.value = 0;
+		syncobj_grant_all(&smobj->core.sobj);
+	}
+
+	syncobj_unlock(&smobj->core.sobj, &syns);
+
+	return 0;
+}
+
+int semobj_wait(struct semobj *smobj, const struct timespec *timeout)
+{
+	struct syncstate syns;
+	int ret = 0;
+
+	ret = syncobj_lock(&smobj->core.sobj, &syns);
+	if (ret)
+		return ret;
+
+	if (--smobj->core.value >= 0)
+		goto done;
+
+	if (timeout &&
+	    timeout->tv_sec == 0 && timeout->tv_nsec == 0) {
+		smobj->core.value++;
+		ret = -EWOULDBLOCK;
+		goto done;
+	}
+
+	if (!threadobj_current_p()) {
+		ret = -EPERM;
+		goto done;
+	}
+
+	ret = syncobj_wait_grant(&smobj->core.sobj, timeout, &syns);
+	if (ret) {
+		/*
+		 * -EIDRM means that the semaphore has been deleted,
+		 * so we bail out immediately and don't attempt to
+		 * access that stale object in any way.
+		 */
+		if (ret == -EIDRM)
+			return ret;
+
+		smobj->core.value++; /* Fix up semaphore count. */
+	}
+done:
+	syncobj_unlock(&smobj->core.sobj, &syns);
+
+	return ret;
+}
+
+int semobj_getvalue(struct semobj *smobj, int *sval)
+{
+	struct syncstate syns;
+
+	if (syncobj_lock(&smobj->core.sobj, &syns))
+		return -EINVAL;
+
+	*sval = smobj->core.value;
+
+	syncobj_unlock(&smobj->core.sobj, &syns);
+
+	return 0;
+}
+
+int semobj_inquire(struct semobj *smobj, size_t waitsz,
+		   struct semobj_waitentry *waitlist,
+		   int *val_r)
+{
+	struct threadobj *thobj;
+	struct syncstate syns;
+	int ret, nrwait;
+
+	ret = syncobj_lock(&smobj->core.sobj, &syns);
+	if (ret)
+		return ret;
+
+	nrwait = syncobj_count_grant(&smobj->core.sobj);
+	if (nrwait > 0) {
+		syncobj_for_each_grant_waiter(&smobj->core.sobj, thobj) {
+			waitlist->pid = threadobj_get_pid(thobj);
+			strcpy(waitlist->name, threadobj_get_name(thobj));
+			waitlist++;
+		}
+	}
+
+	*val_r = smobj->core.value;
+
+	syncobj_unlock(&smobj->core.sobj, &syns);
+
+	return nrwait;
+}
+
+#endif /* CONFIG_XENO_MERCURY */
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/syncobj.c b/kernel/xenomai-v3.2.4/lib/copperplate/syncobj.c
new file mode 100644
index 0000000..85b29c4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/syncobj.c
@@ -0,0 +1,626 @@
+/*
+ * Copyright (C) 2008-2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <assert.h>
+#include <errno.h>
+#include "boilerplate/lock.h"
+#include "copperplate/threadobj.h"
+#include "copperplate/syncobj.h"
+#include "copperplate/debug.h"
+#include "internal.h"
+
+/*
+ * XXX: The POSIX spec states that "Synchronization primitives that
+ * attempt to interfere with scheduling policy by specifying an
+ * ordering rule are considered undesirable. Threads waiting on
+ * mutexes and condition variables are selected to proceed in an order
+ * dependent upon the scheduling policy rather than in some fixed
+ * order (for example, FIFO or priority). Thus, the scheduling policy
+ * determines which thread(s) are awakened and allowed to proceed.".
+ * Linux enforces this by always queuing SCHED_FIFO waiters by
+ * priority when sleeping on futex objects, which underlay mutexes and
+ * condition variables.
+ *
+ * Unfortunately, most non-POSIX RTOS do allow specifying the queuing
+ * order which applies to their synchronization objects at creation
+ * time, and ignoring the FIFO queuing requirement may break the
+ * application in case a fair attribution of the resource is
+ * expected. Therefore, we must emulate FIFO ordering, and we do that
+ * using an internal queue. We also use this queue to implement the
+ * flush operation on synchronization objects which POSIX does not
+ * provide either.
+ *
+ * The syncobj abstraction is based on a complex monitor object to
+ * wait for resources, either implemented natively by Cobalt or
+ * emulated via a mutex and two condition variables over Mercury (one
+ * of which being hosted by the thread object implementation).
+ *
+ * NOTE: we don't do error backtracing in this file, since error
+ * returns when locking, pending or deleting sync objects usually
+ * express normal runtime conditions.
+ */
+
+#ifdef CONFIG_XENO_COBALT
+
+#include "cobalt/internal.h"
+
+static inline
+int monitor_enter(struct syncobj *sobj)
+{
+	return cobalt_monitor_enter(&sobj->core.monitor);
+}
+
+static inline
+void monitor_exit(struct syncobj *sobj)
+{
+	int ret;
+	ret = cobalt_monitor_exit(&sobj->core.monitor);
+	assert(ret == 0);
+	(void)ret;
+}
+
+static inline
+int monitor_wait_grant(struct syncobj *sobj,
+		       struct threadobj *current,
+		       const struct timespec *timeout)
+{
+	return cobalt_monitor_wait(&sobj->core.monitor,
+				   COBALT_MONITOR_WAITGRANT,
+				   timeout);
+}
+
+static inline
+int monitor_wait_drain(struct syncobj *sobj,
+		       struct threadobj *current,
+		       const struct timespec *timeout)
+{
+	return cobalt_monitor_wait(&sobj->core.monitor,
+				   COBALT_MONITOR_WAITDRAIN,
+				   timeout);
+}
+
+static inline
+void monitor_grant(struct syncobj *sobj, struct threadobj *thobj)
+{
+	cobalt_monitor_grant(&sobj->core.monitor,
+			     threadobj_get_window(&thobj->core));
+}
+
+static inline
+void monitor_drain_all(struct syncobj *sobj)
+{
+	cobalt_monitor_drain_all(&sobj->core.monitor);
+}
+
+static inline int syncobj_init_corespec(struct syncobj *sobj,
+					clockid_t clk_id)
+{
+	int flags = monitor_scope_attribute;
+
+	return __bt(cobalt_monitor_init(&sobj->core.monitor, clk_id, flags));
+}
+
+static inline void syncobj_cleanup_corespec(struct syncobj *sobj)
+{
+	/* We hold the gate lock while destroying. */
+	int ret = cobalt_monitor_destroy(&sobj->core.monitor);
+	/* Let earlier EPERM condition propagate, don't trap. */
+	assert(ret == 0 || ret == -EPERM);
+	(void)ret;
+}
+
+#else /* CONFIG_XENO_MERCURY */
+
+static inline
+int monitor_enter(struct syncobj *sobj)
+{
+	return -pthread_mutex_lock(&sobj->core.lock);
+}
+
+static inline
+void monitor_exit(struct syncobj *sobj)
+{
+	int ret;
+	ret = pthread_mutex_unlock(&sobj->core.lock);
+	assert(ret == 0); (void)ret;
+}
+
+static inline
+int monitor_wait_grant(struct syncobj *sobj,
+		       struct threadobj *current,
+		       const struct timespec *timeout)
+{
+	if (timeout)
+		return -threadobj_cond_timedwait(&current->core.grant_sync,
+						 &sobj->core.lock, timeout);
+
+	return -threadobj_cond_wait(&current->core.grant_sync, &sobj->core.lock);
+}
+
+static inline
+int monitor_wait_drain(struct syncobj *sobj,
+		       struct threadobj *current,
+		       const struct timespec *timeout)
+{
+	if (timeout)
+		return -threadobj_cond_timedwait(&sobj->core.drain_sync,
+						 &sobj->core.lock,
+						 timeout);
+
+	return -threadobj_cond_wait(&sobj->core.drain_sync, &sobj->core.lock);
+}
+
+static inline
+void monitor_grant(struct syncobj *sobj, struct threadobj *thobj)
+{
+	threadobj_cond_signal(&thobj->core.grant_sync);
+}
+
+static inline
+void monitor_drain_all(struct syncobj *sobj)
+{
+	threadobj_cond_broadcast(&sobj->core.drain_sync);
+}
+
+/*
+ * Over Mercury, we implement a complex monitor via a mutex and a
+ * couple of condvars, one in the syncobj and the other owned by the
+ * thread object.
+ */
+static inline int syncobj_init_corespec(struct syncobj *sobj,
+					clockid_t clk_id)
+{
+	pthread_mutexattr_t mattr;
+	pthread_condattr_t cattr;
+	int ret;
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	ret = __bt(-pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute));
+	if (ret) {
+		pthread_mutexattr_destroy(&mattr);
+		return ret;
+	}
+
+	ret = __bt(-pthread_mutex_init(&sobj->core.lock, &mattr));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		return ret;
+
+	pthread_condattr_init(&cattr);
+	pthread_condattr_setpshared(&cattr, mutex_scope_attribute);
+	ret = __bt(pthread_condattr_setclock(&cattr, clk_id));
+	if (ret)
+		goto fail;
+
+	ret = __bt(-pthread_cond_init(&sobj->core.drain_sync, &cattr));
+	pthread_condattr_destroy(&cattr);
+	if (ret) {
+	fail:
+		pthread_mutex_destroy(&sobj->core.lock);
+		return ret;
+	}
+
+	return 0;
+}
+
+static inline void syncobj_cleanup_corespec(struct syncobj *sobj)
+{
+	monitor_exit(sobj);
+	pthread_cond_destroy(&sobj->core.drain_sync);
+	pthread_mutex_destroy(&sobj->core.lock);
+}
+
+#endif	/* CONFIG_XENO_MERCURY */
+
+int syncobj_init(struct syncobj *sobj, clockid_t clk_id, int flags,
+		 fnref_type(void (*)(struct syncobj *sobj)) finalizer)
+{
+	sobj->flags = flags;
+	list_init(&sobj->grant_list);
+	list_init(&sobj->drain_list);
+	sobj->grant_count = 0;
+	sobj->drain_count = 0;
+	sobj->wait_count = 0;
+	sobj->finalizer = finalizer;
+	sobj->magic = SYNCOBJ_MAGIC;
+
+	return __bt(syncobj_init_corespec(sobj, clk_id));
+}
+
+int syncobj_lock(struct syncobj *sobj, struct syncstate *syns)
+{
+	int ret, oldstate;
+
+	/*
+	 * This magic prevents concurrent locking while a deletion is
+	 * in progress, waiting for the release count to drop to zero.
+	 */
+	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+
+	ret = monitor_enter(sobj);
+	if (ret)
+		goto fail;
+
+	/* Check for an ongoing deletion. */
+	if (sobj->magic != SYNCOBJ_MAGIC) {
+		monitor_exit(sobj);
+		ret = -EINVAL;
+		goto fail;
+	}
+
+	syns->state = oldstate;
+	__syncobj_tag_locked(sobj);
+	return 0;
+fail:
+	pthread_setcancelstate(oldstate, NULL);
+	return ret;
+}
+
+void syncobj_unlock(struct syncobj *sobj, struct syncstate *syns)
+{
+	__syncobj_tag_unlocked(sobj);
+	monitor_exit(sobj);
+	pthread_setcancelstate(syns->state, NULL);
+}
+
+static void __syncobj_finalize(struct syncobj *sobj)
+{
+	void (*finalizer)(struct syncobj *sobj);
+
+	/*
+	 * Cancelability is still disabled or we are running over the
+	 * thread finalizer, therefore we can't be wiped off in the
+	 * middle of the finalization process.
+	 */
+	syncobj_cleanup_corespec(sobj);
+	fnref_get(finalizer, sobj->finalizer);
+	if (finalizer)
+		finalizer(sobj);
+}
+
+int __syncobj_broadcast_grant(struct syncobj *sobj, int reason)
+{
+	struct threadobj *thobj;
+	int ret;
+
+	assert(!list_empty(&sobj->grant_list));
+
+	do {
+		thobj = list_pop_entry(&sobj->grant_list,
+				       struct threadobj, wait_link);
+		thobj->wait_status |= reason;
+		thobj->wait_sobj = NULL;
+		monitor_grant(sobj, thobj);
+	} while (!list_empty(&sobj->grant_list));
+
+	ret = sobj->grant_count;
+	sobj->grant_count = 0;
+
+	return ret;
+}
+
+int __syncobj_broadcast_drain(struct syncobj *sobj, int reason)
+{
+	struct threadobj *thobj;
+	int ret;
+
+	assert(!list_empty(&sobj->drain_list));
+
+	do {
+		thobj = list_pop_entry(&sobj->drain_list,
+				       struct threadobj, wait_link);
+		thobj->wait_sobj = NULL;
+		thobj->wait_status |= reason;
+	} while (!list_empty(&sobj->drain_list));
+
+	monitor_drain_all(sobj);
+
+	ret = sobj->drain_count;
+	sobj->drain_count = 0;
+
+	return ret;
+}
+
+static inline void enqueue_waiter(struct syncobj *sobj,
+				  struct threadobj *thobj)
+{
+	struct threadobj *__thobj;
+
+	thobj->wait_prio = thobj->global_priority;
+	if (list_empty(&sobj->grant_list) || (sobj->flags & SYNCOBJ_PRIO) == 0) {
+		list_append(&thobj->wait_link, &sobj->grant_list);
+		return;
+	}
+
+	list_for_each_entry_reverse(__thobj, &sobj->grant_list, wait_link) {
+		if (thobj->wait_prio <= __thobj->wait_prio)
+			break;
+	}
+	ath(&__thobj->wait_link, &thobj->wait_link);
+}
+
+static inline void dequeue_waiter(struct syncobj *sobj,
+				  struct threadobj *thobj)
+{
+	list_remove(&thobj->wait_link);
+	if (thobj->wait_status & SYNCOBJ_DRAINWAIT)
+		sobj->drain_count--;
+	else
+		sobj->grant_count--;
+
+	assert(sobj->wait_count > 0);
+}
+
+/*
+ * NOTE: we don't use POSIX cleanup handlers in syncobj_wait_grant() and
+ * syncobj_wait() on purpose: these may have a significant impact on
+ * latency due to I-cache misses on low-end hardware (e.g. ~6 us on
+ * MPC5200), particularly when unwinding the cancel frame. So the
+ * cleanup handler below is called by the threadobj finalizer instead
+ * when appropriate, since we have enough internal information to
+ * handle this situation.
+ */
+void __syncobj_cleanup_wait(struct syncobj *sobj, struct threadobj *thobj)
+{
+	/*
+	 * We don't care about resetting the original cancel type
+	 * saved in the syncstate struct since we are there precisely
+	 * because the caller got cancelled while sleeping on the
+	 * GRANT/DRAIN condition.
+	 */
+	dequeue_waiter(sobj, thobj);
+
+	if (--sobj->wait_count == 0 && sobj->magic != SYNCOBJ_MAGIC) {
+		__syncobj_finalize(sobj);
+		return;
+	}
+
+	monitor_exit(sobj);
+}
+
+struct threadobj *syncobj_grant_one(struct syncobj *sobj)
+{
+	struct threadobj *thobj;
+
+	__syncobj_check_locked(sobj);
+
+	if (list_empty(&sobj->grant_list))
+		return NULL;
+
+	thobj = list_pop_entry(&sobj->grant_list, struct threadobj, wait_link);
+	thobj->wait_status |= SYNCOBJ_SIGNALED;
+	thobj->wait_sobj = NULL;
+	sobj->grant_count--;
+	monitor_grant(sobj, thobj);
+
+	return thobj;
+}
+
+void syncobj_grant_to(struct syncobj *sobj, struct threadobj *thobj)
+{
+	__syncobj_check_locked(sobj);
+
+	list_remove(&thobj->wait_link);
+	thobj->wait_status |= SYNCOBJ_SIGNALED;
+	thobj->wait_sobj = NULL;
+	sobj->grant_count--;
+	monitor_grant(sobj, thobj);
+}
+
+struct threadobj *syncobj_peek_grant(struct syncobj *sobj)
+{
+	struct threadobj *thobj;
+
+	__syncobj_check_locked(sobj);
+
+	if (list_empty(&sobj->grant_list))
+		return NULL;
+
+	thobj = list_first_entry(&sobj->grant_list, struct threadobj,
+				 wait_link);
+	return thobj;
+}
+
+struct threadobj *syncobj_peek_drain(struct syncobj *sobj)
+{
+	struct threadobj *thobj;
+
+	__syncobj_check_locked(sobj);
+
+	if (list_empty(&sobj->drain_list))
+		return NULL;
+
+	thobj = list_first_entry(&sobj->drain_list, struct threadobj,
+				 wait_link);
+	return thobj;
+}
+
+static int wait_epilogue(struct syncobj *sobj,
+			 struct syncstate *syns,
+			 struct threadobj *current,
+			 int ret)
+{
+	current->run_state = __THREAD_S_RUNNING;
+
+	/*
+	 * Fixup a potential race upon return from grant/drain_wait
+	 * operations, e.g. given two threads A and B:
+	 *
+	 * A:enqueue_waiter(self)
+	 * A:monitor_wait
+	 *    A:monitor_unlock
+	 *    A:[timed] sleep
+	 *    A:wakeup on timeout/interrupt
+	 *       B:monitor_lock
+	 *       B:look_for_queued_waiter
+	 *          (found A, update A's state)
+	 *       B:monitor_unlock
+	 *    A:dequeue_waiter(self)
+	 *    A:return -ETIMEDOUT/-EINTR
+	 *
+	 * The race may happen anytime between the timeout/interrupt
+	 * event is received by A, and the moment it grabs back the
+	 * monitor lock before unqueuing. When the race happens, B can
+	 * squeeze in a signal before A unqueues after resumption on
+	 * error.
+	 *
+	 * Problem: A's internal state has been updated (e.g. some
+	 * data transferred to it), but it will receive
+	 * -ETIMEDOUT/-EINTR, causing it to miss the update
+	 * eventually.
+	 *
+	 * Solution: fixup the status code upon return from
+	 * wait_grant/drain operations, so that -ETIMEDOUT/-EINTR is
+	 * never returned to the caller if the syncobj was actually
+	 * signaled. We still allow the SYNCOBJ_FLUSHED condition to
+	 * override that success code though.
+	 *
+	 * Whether a condition should be deemed satisfied if it is
+	 * signaled during the race window described above is
+	 * debatable, but this is a simple and straightforward way to
+	 * handle such grey area.
+	 */
+
+	if (current->wait_sobj) {
+		dequeue_waiter(sobj, current);
+		current->wait_sobj = NULL;
+	} else if (ret == -ETIMEDOUT || ret == -EINTR)
+		ret = 0;
+
+	sobj->wait_count--;
+	assert(sobj->wait_count >= 0);
+
+	if (sobj->magic != SYNCOBJ_MAGIC) {
+		if (sobj->wait_count == 0)
+			__syncobj_finalize(sobj);
+		else
+			monitor_exit(sobj);
+		pthread_setcancelstate(syns->state, NULL);
+		return -EIDRM;
+	}
+
+	if (current->wait_status & SYNCOBJ_FLUSHED)
+		return -EINTR;
+
+	return ret;
+}
+
+int syncobj_wait_grant(struct syncobj *sobj, const struct timespec *timeout,
+		       struct syncstate *syns)
+{
+	struct threadobj *current = threadobj_current();
+	int ret, state;
+
+	__syncobj_check_locked(sobj);
+
+	assert(current != NULL);
+
+	current->run_state = timeout ? __THREAD_S_TIMEDWAIT : __THREAD_S_WAIT;
+	threadobj_save_timeout(&current->core, timeout);
+	current->wait_status = 0;
+	enqueue_waiter(sobj, current);
+	current->wait_sobj = sobj;
+	sobj->grant_count++;
+	sobj->wait_count++;
+
+	/*
+	 * NOTE: we are guaranteed to be in deferred cancel mode, with
+	 * cancelability disabled (in syncobj_lock); re-enable it
+	 * before pending on the condvar.
+	 */
+	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &state);
+	assert(state == PTHREAD_CANCEL_DISABLE);
+
+	do {
+		__syncobj_tag_unlocked(sobj);
+		ret = monitor_wait_grant(sobj, current, timeout);
+		__syncobj_tag_locked(sobj);
+		/* Check for spurious wake up. */
+	} while (ret == 0 && current->wait_sobj);
+
+	pthread_setcancelstate(state, NULL);
+
+	return wait_epilogue(sobj, syns, current, ret);
+}
+
+int syncobj_wait_drain(struct syncobj *sobj, const struct timespec *timeout,
+		       struct syncstate *syns)
+{
+	struct threadobj *current = threadobj_current();
+	int ret, state;
+
+	__syncobj_check_locked(sobj);
+
+	assert(current != NULL);
+
+	current->run_state = timeout ? __THREAD_S_TIMEDWAIT : __THREAD_S_WAIT;
+	threadobj_save_timeout(&current->core, timeout);
+	current->wait_status = SYNCOBJ_DRAINWAIT;
+	list_append(&current->wait_link, &sobj->drain_list);
+	current->wait_sobj = sobj;
+	sobj->drain_count++;
+	sobj->wait_count++;
+
+	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &state);
+	assert(state == PTHREAD_CANCEL_DISABLE);
+
+	/*
+	 * NOTE: Since the DRAINED signal is broadcast to all waiters,
+	 * a race may exist for acting upon it among those
+	 * threads. Therefore the caller must check that the drain
+	 * condition is still true before proceeding.
+	 */
+	do {
+		__syncobj_tag_unlocked(sobj);
+		ret = monitor_wait_drain(sobj, current, timeout);
+		__syncobj_tag_locked(sobj);
+	} while (ret == 0 && current->wait_sobj);
+
+	pthread_setcancelstate(state, NULL);
+
+	return wait_epilogue(sobj, syns, current, ret);
+}
+
+int syncobj_destroy(struct syncobj *sobj, struct syncstate *syns)
+{
+	int ret;
+
+	__syncobj_check_locked(sobj);
+
+	sobj->magic = ~SYNCOBJ_MAGIC;
+	ret = syncobj_flush(sobj);
+	if (ret) {
+		syncobj_unlock(sobj, syns);
+		return ret;
+	}
+
+	/* No thread awaken - we may dispose immediately. */
+	__syncobj_finalize(sobj);
+	pthread_setcancelstate(syns->state, NULL);
+
+	return 0;
+}
+
+void syncobj_uninit(struct syncobj *sobj)
+{
+	monitor_enter(sobj);
+	assert(sobj->wait_count == 0);
+	syncobj_cleanup_corespec(sobj);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/threadobj.c b/kernel/xenomai-v3.2.4/lib/copperplate/threadobj.c
new file mode 100644
index 0000000..db18f4f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/threadobj.c
@@ -0,0 +1,1835 @@
+/*
+ * Copyright (C) 2008-2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * Thread object abstraction.
+ */
+#include <signal.h>
+#include <memory.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <time.h>
+#include <fcntl.h>
+#include <assert.h>
+#include <limits.h>
+#include <sched.h>
+#include "boilerplate/signal.h"
+#include "boilerplate/atomic.h"
+#include "boilerplate/lock.h"
+#include "copperplate/traceobj.h"
+#include "copperplate/threadobj.h"
+#include "copperplate/syncobj.h"
+#include "copperplate/cluster.h"
+#include "copperplate/clockobj.h"
+#include "copperplate/eventobj.h"
+#include "copperplate/heapobj.h"
+#include "internal.h"
+
+union copperplate_wait_union {
+	struct syncluster_wait_struct syncluster_wait;
+	struct eventobj_wait_struct eventobj_wait;
+};
+
+union main_wait_union {
+	union copperplate_wait_union copperplate_wait;
+	char untyped_wait[1024];
+};
+
+static void finalize_thread(void *p);
+
+static void set_global_priority(struct threadobj *thobj, int policy,
+				const struct sched_param_ex *param_ex);
+
+static int request_setschedparam(struct threadobj *thobj, int policy,
+				 const struct sched_param_ex *param_ex);
+
+static int request_cancel(struct threadobj *thobj);
+
+static sigset_t sigperiod_set;
+
+static int threadobj_agent_prio;
+
+int threadobj_high_prio;
+
+int threadobj_irq_prio;
+
+#ifdef HAVE_TLS
+__thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
+struct threadobj *__threadobj_current;
+#endif
+
+/*
+ * We need the thread object key regardless of whether TLS is
+ * available to us, to run the thread finalizer routine.
+ */
+pthread_key_t threadobj_tskey;
+
+void threadobj_init_key(void)
+{
+	if (pthread_key_create(&threadobj_tskey, finalize_thread))
+		early_panic("failed to allocate TSD key");
+}
+
+#ifdef CONFIG_XENO_PSHARED
+
+static pid_t agent_pid;
+
+#define RMT_SETSCHED	0
+#define RMT_CANCEL	1
+
+struct remote_cancel {
+	pthread_t ptid;
+	int policy;
+	struct sched_param_ex param_ex;
+};
+
+struct remote_setsched {
+	pthread_t ptid;
+	int policy;
+	struct sched_param_ex param_ex;
+};
+
+struct remote_request {
+	int req;	/* RMT_xx */
+	union {
+		struct remote_cancel cancel;
+		struct remote_setsched setsched;
+	} u;
+};
+
+static int agent_prologue(void *arg)
+{
+	agent_pid = get_thread_pid();
+	copperplate_set_current_name("remote-agent");
+	threadobj_set_current(THREADOBJ_IRQCONTEXT);
+
+	return 0;
+}
+
+static void *agent_loop(void *arg)
+{
+	struct remote_request *rq;
+	siginfo_t si;
+	sigset_t set;
+	int sig, ret;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGAGENT);
+
+	for (;;) {
+		sig = __RT(sigwaitinfo(&set, &si));
+		if (sig < 0) {
+			if (errno == EINTR)
+				continue;
+			panic("agent thread cannot wait for request, %s",
+			      symerror(-errno));
+		}
+		rq = si.si_ptr;
+		switch (rq->req) {
+		case RMT_SETSCHED:
+			ret = copperplate_renice_local_thread(rq->u.setsched.ptid,
+							      rq->u.setsched.policy,
+							      &rq->u.setsched.param_ex);
+			break;
+		case RMT_CANCEL:
+			if (rq->u.cancel.policy != -1)
+				copperplate_renice_local_thread(rq->u.cancel.ptid,
+								rq->u.cancel.policy,
+								&rq->u.cancel.param_ex);
+			ret = pthread_cancel(rq->u.cancel.ptid);
+			break;
+		default:
+			panic("invalid remote request #%d", rq->req);
+		}
+		if (ret)
+			warning("remote request #%d failed, %s",
+				rq->req, symerror(ret));
+		xnfree(rq);
+	}
+
+	return NULL;
+}
+
+static inline int send_agent(struct threadobj *thobj,
+			     struct remote_request *rq)
+{
+	union sigval val = { .sival_ptr = rq };
+
+	/*
+	 * We are not supposed to issue remote requests when nobody
+	 * else may share our session.
+	 */
+	assert(agent_pid != 0);
+
+	/*
+	 * XXX: No backtracing, may legitimately fail if the remote
+	 * process goes away (hopefully cleanly). However, the request
+	 * blocks attached to unprocessed pending signals may leak, as
+	 * requests are fully asynchronous. Fortunately, processes
+	 * creating user threads are unlikely to ungracefully leave
+	 * the session they belong to intentionally.
+	 */
+	return __RT(sigqueue(agent_pid, SIGAGENT, val));
+}
+
+static void start_agent(void)
+{
+	struct corethread_attributes cta;
+	pthread_t ptid;
+	sigset_t set;
+	int ret;
+
+	/*
+	 * CAUTION: we expect all internal/user threads created by
+	 * Copperplate to inherit this signal mask, otherwise
+	 * sigqueue(SIGAGENT) might be delivered to the wrong
+	 * thread. So make sure the agent support is set up early
+	 * enough.
+	 */
+	sigemptyset(&set);
+	sigaddset(&set, SIGAGENT);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	cta.policy = threadobj_agent_prio ? SCHED_CORE : SCHED_OTHER;
+	cta.param_ex.sched_priority = threadobj_agent_prio;
+	cta.prologue = agent_prologue;
+	cta.run = agent_loop;
+	cta.arg = NULL;
+	cta.stacksize = PTHREAD_STACK_DEFAULT;
+	cta.detachstate = PTHREAD_CREATE_DETACHED;
+
+	ret = copperplate_create_thread(&cta, &ptid);
+	if (ret)
+		panic("failed to start agent thread, %s", symerror(ret));
+}
+
+#else  /* !CONFIG_XENO_PSHARED */
+
+static inline void start_agent(void)
+{
+	/* No agent in private (process-local) session. */
+}
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+#ifdef CONFIG_XENO_COBALT
+
+#include "cobalt/internal.h"
+
+static inline void pkg_init_corespec(void)
+{
+	/*
+	 * We must have CAP_SYS_NICE since we reached this code either
+	 * as root or as a member of the allowed group, as a result of
+	 * binding the current process to the Cobalt core earlier in
+	 * libcobalt's setup code.
+	 */
+	threadobj_irq_prio = sched_get_priority_max_ex(SCHED_CORE);
+	threadobj_high_prio = sched_get_priority_max_ex(SCHED_FIFO);
+	threadobj_agent_prio = threadobj_high_prio;
+}
+
+static inline int threadobj_init_corespec(struct threadobj *thobj)
+{
+	return 0;
+}
+
+static inline void threadobj_uninit_corespec(struct threadobj *thobj)
+{
+}
+
+#ifdef CONFIG_XENO_PSHARED
+
+static inline int threadobj_setup_corespec(struct threadobj *thobj)
+{
+	thobj->core.handle = cobalt_get_current();
+	thobj->core.u_winoff = (void *)cobalt_get_current_window() -
+		cobalt_umm_shared;
+
+	return 0;
+}
+
+#else /* !CONFIG_XENO_PSHARED */
+
+static inline int threadobj_setup_corespec(struct threadobj *thobj)
+{
+	thobj->core.handle = cobalt_get_current();
+	thobj->core.u_window = cobalt_get_current_window();
+
+	return 0;
+}
+
+#endif /* !CONFIG_XENO_PSHARED */
+
+static inline void threadobj_cleanup_corespec(struct threadobj *thobj)
+{
+}
+
+static inline void threadobj_run_corespec(struct threadobj *thobj)
+{
+	cobalt_thread_harden();
+}
+
+static inline void threadobj_cancel_1_corespec(struct threadobj *thobj) /* thobj->lock held */
+{
+}
+
+static inline void threadobj_cancel_2_corespec(struct threadobj *thobj) /* thobj->lock held */
+{
+	/*
+	 * Send a SIGDEMT signal to demote the target thread, to make
+	 * sure pthread_cancel() will be effective asap.
+	 *
+	 * In effect, the thread is kicked out of any blocking
+	 * syscall, a relax is forced on it (via a mayday trap if
+	 * required), and it is then required to leave the real-time
+	 * scheduling class.
+	 *
+	 * - this makes sure the thread returns with EINTR from the
+	 * syscall then hits a cancellation point asap.
+	 *
+	 * - this ensures that the thread can receive the cancellation
+	 * signal in case asynchronous cancellation is enabled and get
+	 * kicked out from syscall-less code in primary mode
+	 * (e.g. busy loops).
+	 *
+	 * - this makes sure the thread won't preempt the caller
+	 * indefinitely when resuming due to priority enforcement
+	 * (i.e. when the target thread has higher Xenomai priority
+	 * than the caller of threadobj_cancel()), but will receive
+	 * the following cancellation request asap.
+	 */
+	__RT(kill(thobj->pid, SIGDEMT));
+}
+
+int threadobj_suspend(struct threadobj *thobj) /* thobj->lock held */
+{
+	pid_t pid = thobj->pid;
+	int ret;
+
+	__threadobj_check_locked(thobj);
+
+	if (thobj->status & __THREAD_S_SUSPENDED)
+		return 0;
+
+	thobj->status |= __THREAD_S_SUSPENDED;
+	if (thobj == threadobj_current()) {
+		threadobj_unlock(thobj);
+		ret = __RT(kill(pid, SIGSUSP));
+		threadobj_lock(thobj);
+	} else
+		ret = __RT(kill(pid, SIGSUSP));
+
+	return __bt(-ret);
+}
+
+int threadobj_resume(struct threadobj *thobj) /* thobj->lock held */
+{
+	int ret;
+
+	__threadobj_check_locked(thobj);
+
+	if ((thobj->status & __THREAD_S_SUSPENDED) == 0)
+		return 0;
+
+	thobj->status &= ~__THREAD_S_SUSPENDED;
+	ret = __RT(kill(thobj->pid, SIGRESM));
+
+	return __bt(-ret);
+}
+
+static inline int threadobj_unblocked_corespec(struct threadobj *current)
+{
+	return (threadobj_get_window(&current->core)->info & XNBREAK) != 0;
+}
+
+int __threadobj_lock_sched(struct threadobj *current)
+{
+	if (current->schedlock_depth++ > 0)
+		return 0;
+
+	/*
+	 * In essence, we can't be scheduled out as a result of
+	 * locking the scheduler, so no need to drop the thread lock
+	 * across this call.
+	 */
+	return __bt(-pthread_setmode_np(0, PTHREAD_LOCK_SCHED, NULL));
+}
+
+int threadobj_lock_sched(void)
+{
+	struct threadobj *current = threadobj_current();
+
+	/* This call is lock-free over Cobalt. */
+	return __bt(__threadobj_lock_sched(current));
+}
+
+int __threadobj_unlock_sched(struct threadobj *current)
+{
+	/*
+	 * Higher layers may not know about the current scheduler
+	 * locking level and fully rely on us to track it, so we
+	 * gracefully handle unbalanced calls here, and let them
+	 * decide of the outcome in case of error.
+	 */
+	if (current->schedlock_depth == 0)
+		return __bt(-EINVAL);
+
+	if (--current->schedlock_depth > 0)
+		return 0;
+
+	return __bt(-pthread_setmode_np(PTHREAD_LOCK_SCHED, 0, NULL));
+}
+
+int threadobj_unlock_sched(void)
+{
+	struct threadobj *current = threadobj_current();
+
+	/* This call is lock-free over Cobalt. */
+	return __bt(__threadobj_unlock_sched(current));
+}
+
+int threadobj_set_mode(int clrmask, int setmask, int *mode_r) /* current->lock held */
+{
+	struct threadobj *current = threadobj_current();
+	int __clrmask = 0, __setmask = 0;
+
+	__threadobj_check_locked(current);
+
+	if (setmask & __THREAD_M_WARNSW)
+		__setmask |= PTHREAD_WARNSW;
+	else if (clrmask & __THREAD_M_WARNSW)
+		__clrmask |= PTHREAD_WARNSW;
+
+	if (setmask & __THREAD_M_CONFORMING)
+		__setmask |= PTHREAD_CONFORMING;
+	else if (clrmask & __THREAD_M_CONFORMING)
+		__clrmask |= PTHREAD_CONFORMING;
+
+	if (setmask & __THREAD_M_LOCK)
+		__threadobj_lock_sched_once(current);
+	else if (clrmask & __THREAD_M_LOCK)
+		__threadobj_unlock_sched(current);
+
+	if (mode_r || __setmask || __clrmask)
+		return __bt(-pthread_setmode_np(__clrmask, __setmask, mode_r));
+
+	return 0;
+}
+
+static inline int map_priority_corespec(int policy,
+					const struct sched_param_ex *param_ex)
+{
+	int prio;
+
+	prio = cobalt_sched_weighted_prio(policy, param_ex);
+	assert(prio >= 0);
+
+	return prio;
+}
+
+static inline int prepare_rr_corespec(struct threadobj *thobj, int policy,
+				      const struct sched_param_ex *param_ex) /* thobj->lock held */
+{
+	return policy;
+}
+
+static inline int enable_rr_corespec(struct threadobj *thobj,
+				     const struct sched_param_ex *param_ex) /* thobj->lock held */
+{
+	return 0;
+}
+
+static inline void disable_rr_corespec(struct threadobj *thobj) /* thobj->lock held */
+{
+	/* nop */
+}
+
+int threadobj_stat(struct threadobj *thobj, struct threadobj_stat *p) /* thobj->lock held */
+{
+	struct cobalt_threadstat stat;
+	int ret;
+
+	__threadobj_check_locked(thobj);
+
+	ret = cobalt_thread_stat(thobj->pid, &stat);
+	if (ret)
+		return __bt(ret);
+
+	p->cpu = stat.cpu;
+	p->status = stat.status;
+	p->xtime = stat.xtime;
+	p->msw = stat.msw;
+	p->csw = stat.csw;
+	p->xsc = stat.xsc;
+	p->pf = stat.pf;
+	p->timeout = stat.timeout;
+	p->schedlock = thobj->schedlock_depth;
+
+	return 0;
+}
+
+#else /* CONFIG_XENO_MERCURY */
+
+static int threadobj_lock_prio;
+
+static void unblock_sighandler(int sig)
+{
+	struct threadobj *current = threadobj_current();
+
+	/*
+	 * SIGRELS is thread-directed, so referring to
+	 * current->run_state locklessly is safe as we are
+	 * basically introspecting.
+	 */
+	if (current->run_state == __THREAD_S_DELAYED)
+		current->run_state = __THREAD_S_BREAK;
+}
+
+static void roundrobin_handler(int sig)
+{
+	/*
+	 * We do manual round-robin over SCHED_FIFO to allow for
+	 * multiple arbitrary time slices (i.e. vs the kernel
+	 * pre-defined and fixed one).
+	 */
+	sched_yield();
+}
+
+static void sleep_suspended(void)
+{
+	sigset_t set;
+
+	/*
+	 * A suspended thread is supposed to do nothing but wait for
+	 * the wake up signal, so we may happily block all signals but
+	 * SIGRESM. Note that SIGRRB won't be accumulated during the
+	 * sleep time anyhow, as the round-robin timer is based on
+	 * CLOCK_THREAD_CPUTIME_ID, and we'll obviously don't consume
+	 * any CPU time while blocked.
+	 */
+	sigfillset(&set);
+	sigdelset(&set, SIGRESM);
+	sigsuspend(&set);
+}
+
+static void suspend_sighandler(int sig)
+{
+	sleep_suspended();
+}
+
+static void nop_sighandler(int sig)
+{
+	/* nop */
+}
+
+static inline void pkg_init_corespec(void)
+{
+	struct sigaction sa;
+
+	/*
+	 * We don't have builtin scheduler-lock feature over Mercury,
+	 * so we emulate it by reserving the highest thread priority
+	 * level from the SCHED_FIFO class to disable involuntary
+	 * preemption.
+	 *
+	 * NOTE: The remote agent thread will also run with the
+	 * highest thread priority level (threadobj_agent_prio) in
+	 * shared multi-processing mode, which won't affect any thread
+	 * holding the scheduler lock, unless the latter has to block
+	 * for some reason, defeating the purpose of such lock anyway.
+	 */
+	threadobj_irq_prio = sched_get_priority_max(SCHED_FIFO);
+	threadobj_lock_prio = threadobj_irq_prio - 1;
+	threadobj_high_prio = threadobj_irq_prio - 2;
+	threadobj_agent_prio = threadobj_high_prio;
+	/*
+	 * We allow a non-privileged process to start a low priority
+	 * agent thread only, on the assumption that it lacks
+	 * CAP_SYS_NICE, but this is pretty much the maximum extent of
+	 * our abilities for such processes. Other internal threads
+	 * requiring SCHED_CORE/FIFO scheduling such as the timer
+	 * manager won't start properly, therefore the corresponding
+	 * services won't be available.
+	 */
+	if (geteuid())
+		threadobj_agent_prio = 0;
+
+	memset(&sa, 0, sizeof(sa));
+	sa.sa_handler = unblock_sighandler;
+	sa.sa_flags = SA_RESTART;
+	sigaction(SIGRELS, &sa, NULL);
+	sa.sa_handler = roundrobin_handler;
+	sigaction(SIGRRB, &sa, NULL);
+	sa.sa_handler = suspend_sighandler;
+	sigaction(SIGSUSP, &sa, NULL);
+	sa.sa_handler = nop_sighandler;
+	sigaction(SIGRESM, &sa, NULL);
+	sigaction(SIGPERIOD, &sa, NULL);
+}
+
+static inline int threadobj_init_corespec(struct threadobj *thobj)
+{
+	pthread_condattr_t cattr;
+	int ret;
+
+	thobj->core.rr_timer = NULL;
+	/*
+	 * Over Mercury, we need an additional per-thread condvar to
+	 * implement the complex monitor for the syncobj abstraction.
+	 */
+	pthread_condattr_init(&cattr);
+	pthread_condattr_setpshared(&cattr, mutex_scope_attribute);
+	ret = __bt(-pthread_condattr_setclock(&cattr, CLOCK_COPPERPLATE));
+	if (ret)
+		warning("failed setting condvar clock, %s"
+			"(try --disable-clock-monotonic-raw)",
+			symerror(ret));
+	else
+		ret = __bt(-pthread_cond_init(&thobj->core.grant_sync, &cattr));
+	pthread_condattr_destroy(&cattr);
+
+#ifdef CONFIG_XENO_WORKAROUND_CONDVAR_PI
+	thobj->core.policy_unboosted = -1;
+#endif
+	return ret;
+}
+
+static inline void threadobj_uninit_corespec(struct threadobj *thobj)
+{
+	pthread_cond_destroy(&thobj->core.grant_sync);
+}
+
+static inline int threadobj_setup_corespec(struct threadobj *thobj)
+{
+	struct sigevent sev;
+	sigset_t set;
+	int ret;
+
+	/*
+	 * Do the per-thread setup for supporting the suspend/resume
+	 * actions over Mercury. We have two basic requirements for
+	 * this mechanism:
+	 *
+	 * - suspended requests must be handled asap, regardless of
+	 * what the target thread is doing when notified (syscall
+	 * wait, pure runtime etc.), hence the use of signals.
+	 *
+	 * - we must process the suspension signal on behalf of the
+	 * target thread, as we want that thread to block upon
+	 * receipt.
+	 *
+	 * In addition, we block the periodic signal, which we only
+	 * want to receive from within threadobj_wait_period().
+	 */
+	sigemptyset(&set);
+	sigaddset(&set, SIGRESM);
+	sigaddset(&set, SIGPERIOD);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+
+	/*
+	 * Create the per-thread round-robin timer.
+	 */
+	memset(&sev, 0, sizeof(sev));
+	sev.sigev_signo = SIGRRB;
+	sev.sigev_notify = SIGEV_SIGNAL|SIGEV_THREAD_ID;
+	sev.sigev_notify_thread_id = threadobj_get_pid(thobj);
+	ret = timer_create(CLOCK_THREAD_CPUTIME_ID, &sev,
+			   &thobj->core.rr_timer);
+	if (ret)
+		return __bt(-errno);
+
+	return 0;
+}
+
+static inline void threadobj_cleanup_corespec(struct threadobj *thobj)
+{
+	if (thobj->core.rr_timer)
+		timer_delete(thobj->core.rr_timer);
+}
+
+static inline void threadobj_run_corespec(struct threadobj *thobj)
+{
+}
+
+static inline void threadobj_cancel_1_corespec(struct threadobj *thobj) /* thobj->lock held */
+{
+	/*
+	 * If the target thread we are about to cancel gets suspended
+	 * while it is currently warming up, we have to unblock it
+	 * from sleep_suspended(), so that we don't get stuck in
+	 * cancel_sync(), waiting for a warmed up state which will
+	 * never come.
+	 *
+	 * Just send it SIGRESM unconditionally, this will either
+	 * unblock it if the thread waits in sleep_suspended(), or
+	 * lead to a nop since that signal is blocked otherwise.
+	 */
+	copperplate_kill_tid(thobj->pid, SIGRESM);
+}
+
+static inline void threadobj_cancel_2_corespec(struct threadobj *thobj) /* thobj->lock held */
+{
+}
+
+int threadobj_suspend(struct threadobj *thobj) /* thobj->lock held */
+{
+	__threadobj_check_locked(thobj);
+
+	if (thobj == threadobj_current()) {
+		thobj->status |= __THREAD_S_SUSPENDED;
+		threadobj_unlock(thobj);
+		sleep_suspended();
+		threadobj_lock(thobj);
+	} else if ((thobj->status & __THREAD_S_SUSPENDED) == 0) {
+		/*
+		 * We prevent suspension requests from cumulating, so
+		 * that we always have a flat, consistent sequence of
+		 * alternate suspend/resume events. It's up to the
+		 * client code to handle nested requests if need be.
+		 */
+		thobj->status |= __THREAD_S_SUSPENDED;
+		copperplate_kill_tid(thobj->pid, SIGSUSP);
+	}
+
+	return 0;
+}
+
+int threadobj_resume(struct threadobj *thobj) /* thobj->lock held */
+{
+	__threadobj_check_locked(thobj);
+
+	if (thobj != threadobj_current() &&
+	    (thobj->status & __THREAD_S_SUSPENDED) != 0) {
+		thobj->status &= ~__THREAD_S_SUSPENDED;
+		/*
+		 * We prevent resumption requests from cumulating. See
+		 * threadobj_suspend().
+		 */
+		copperplate_kill_tid(thobj->pid, SIGRESM);
+	}
+
+	return 0;
+}
+
+static inline int threadobj_unblocked_corespec(struct threadobj *current)
+{
+	return current->run_state != __THREAD_S_DELAYED;
+}
+
+int __threadobj_lock_sched(struct threadobj *current) /* current->lock held */
+{
+	struct sched_param_ex param_ex;
+	int ret;
+
+	__threadobj_check_locked(current);
+
+	if (current->schedlock_depth > 0)
+		goto done;
+
+	current->core.schedparam_unlocked = current->schedparam;
+	current->core.policy_unlocked = current->policy;
+	param_ex.sched_priority = threadobj_lock_prio;
+	ret = threadobj_set_schedparam(current, SCHED_FIFO, &param_ex);
+	if (ret)
+		return __bt(ret);
+done:
+	current->schedlock_depth++;
+
+	return 0;
+}
+
+int threadobj_lock_sched(void)
+{
+	struct threadobj *current = threadobj_current();
+	int ret;
+
+	threadobj_lock(current);
+	ret = __threadobj_lock_sched(current);
+	threadobj_unlock(current);
+
+	return __bt(ret);
+}
+
+int __threadobj_unlock_sched(struct threadobj *current) /* current->lock held */
+{
+	__threadobj_check_locked(current);
+
+	if (current->schedlock_depth == 0)
+		return __bt(-EINVAL);
+
+	if (--current->schedlock_depth > 0)
+		return 0;
+
+	return __bt(threadobj_set_schedparam(current,
+					     current->core.policy_unlocked,
+					     &current->core.schedparam_unlocked));
+}
+
+int threadobj_unlock_sched(void)
+{
+	struct threadobj *current = threadobj_current();
+	int ret;
+
+	threadobj_lock(current);
+	ret = __threadobj_unlock_sched(current);
+	threadobj_unlock(current);
+
+	return __bt(ret);
+}
+
+int threadobj_set_mode(int clrmask, int setmask, int *mode_r) /* current->lock held */
+{
+	struct threadobj *current = threadobj_current();
+	int ret = 0, old = 0;
+
+	__threadobj_check_locked(current);
+
+	if (current->schedlock_depth > 0)
+		old |= __THREAD_M_LOCK;
+
+	if (setmask & __THREAD_M_LOCK) {
+		ret = __threadobj_lock_sched_once(current);
+		if (ret == -EBUSY)
+			ret = 0;
+	} else if (clrmask & __THREAD_M_LOCK)
+		__threadobj_unlock_sched(current);
+
+	if (mode_r)
+		*mode_r = old;
+
+	return __bt(ret);
+}
+
+static inline int map_priority_corespec(int policy,
+					const struct sched_param_ex *param_ex)
+{
+	return param_ex->sched_priority;
+}
+
+static inline int prepare_rr_corespec(struct threadobj *thobj, int policy,
+				      const struct sched_param_ex *param_ex) /* thobj->lock held */
+{
+	return SCHED_FIFO;
+}
+
+static int enable_rr_corespec(struct threadobj *thobj,
+			      const struct sched_param_ex *param_ex) /* thobj->lock held */
+{
+	struct itimerspec value;
+	int ret;
+
+	value.it_interval = param_ex->sched_rr_quantum;
+	value.it_value = value.it_interval;
+	ret = timer_settime(thobj->core.rr_timer, 0, &value, NULL);
+	if (ret)
+		return __bt(-errno);
+
+	return 0;
+}
+
+static void disable_rr_corespec(struct threadobj *thobj) /* thobj->lock held */
+{
+ 	struct itimerspec value;
+
+	value.it_value.tv_sec = 0;
+	value.it_value.tv_nsec = 0;
+	value.it_interval = value.it_value;
+	timer_settime(thobj->core.rr_timer, 0, &value, NULL);
+}
+
+int threadobj_stat(struct threadobj *thobj,
+		   struct threadobj_stat *stat) /* thobj->lock held */
+{
+	char procstat[64], buf[BUFSIZ], *p;
+	struct timespec now, delta;
+	FILE *fp;
+	int n;
+
+	__threadobj_check_locked(thobj);
+
+	snprintf(procstat, sizeof(procstat), "/proc/%d/stat", thobj->pid);
+	fp = fopen(procstat, "r");
+	if (fp == NULL)
+		return -EINVAL;
+
+	p = fgets(buf, sizeof(buf), fp);
+	fclose(fp);
+
+	if (p == NULL)
+		return -EIO;
+
+	p += strlen(buf);
+	for (n = 0; n < 14; n++) {
+		while (*--p != ' ') {
+			if (p <= buf)
+				return -EINVAL;
+		}
+	}
+
+	stat->cpu = atoi(++p);
+	stat->status = threadobj_get_status(thobj);
+
+	if (thobj->run_state & (__THREAD_S_TIMEDWAIT|__THREAD_S_DELAYED)) {
+		__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
+		timespec_sub(&delta, &thobj->core.timeout, &now);
+		stat->timeout = timespec_scalar(&delta);
+		/*
+		 * The timeout might fire as we are calculating the
+		 * delta: sanitize any negative value as 1.
+		 */
+		if ((sticks_t)stat->timeout < 0)
+			stat->timeout = 1;
+	} else
+		stat->timeout = 0;
+
+	stat->schedlock = thobj->schedlock_depth;
+
+	return 0;
+}
+
+#ifdef CONFIG_XENO_WORKAROUND_CONDVAR_PI
+
+/*
+ * This workaround does NOT deal with concurrent updates of the caller
+ * priority by other threads while the former is boosted. If your code
+ * depends so much on strict PI to fix up CPU starvation, but you
+ * insist on using a broken glibc that does not implement PI properly
+ * nevertheless, then you have to refrain from issuing
+ * pthread_setschedparam() for threads which might be currently
+ * boosted.
+ */
+static void __threadobj_boost(void)
+{
+	struct threadobj *current = threadobj_current();
+	struct sched_param param = {
+		.sched_priority = threadobj_irq_prio, /* Highest one. */
+	};
+	int ret;
+
+	if (current == NULL)	/* IRQ or invalid context */
+		return;
+
+	if (current->schedlock_depth > 0) {
+		current->core.policy_unboosted = SCHED_FIFO;
+		current->core.schedparam_unboosted.sched_priority = threadobj_lock_prio;
+	} else {
+		current->core.policy_unboosted = current->policy;
+		current->core.schedparam_unboosted = current->schedparam;
+	}
+	compiler_barrier();
+
+	ret = pthread_setschedparam(current->ptid, SCHED_FIFO, &param);
+	if (ret) {
+		current->core.policy_unboosted = -1;
+		warning("thread boost failed, %s", symerror(-ret));
+	}
+}
+
+static void __threadobj_unboost(void)
+
+{
+	struct threadobj *current = threadobj_current();
+	struct sched_param param;
+	int ret;
+
+	if (current == NULL) 	/* IRQ or invalid context */
+		return;
+
+	param.sched_priority = current->core.schedparam_unboosted.sched_priority;
+
+	ret = pthread_setschedparam(current->ptid,
+				    current->core.policy_unboosted, &param);
+	if (ret)
+		warning("thread unboost failed, %s", symerror(-ret));
+
+	current->core.policy_unboosted = -1;
+}
+
+int threadobj_cond_timedwait(pthread_cond_t *cond,
+			     pthread_mutex_t *lock,
+			     const struct timespec *timeout)
+{
+	int ret;
+
+	__threadobj_boost();
+	ret = pthread_cond_timedwait(cond, lock, timeout);
+	__threadobj_unboost();
+
+	return ret;
+}
+
+int threadobj_cond_wait(pthread_cond_t *cond,
+			pthread_mutex_t *lock)
+{
+	int ret;
+
+	__threadobj_boost();
+	ret = pthread_cond_wait(cond, lock);
+	__threadobj_unboost();
+
+	return ret;
+}
+
+int threadobj_cond_signal(pthread_cond_t *cond)
+{
+	int ret;
+
+	__threadobj_boost();
+	ret = pthread_cond_signal(cond);
+	__threadobj_unboost();
+
+	return ret;
+}
+
+int threadobj_cond_broadcast(pthread_cond_t *cond)
+{
+	int ret;
+
+	__threadobj_boost();
+	ret = pthread_cond_broadcast(cond);
+	__threadobj_unboost();
+
+	return ret;
+}
+
+#endif /* !CONFIG_XENO_WORKAROUND_CONDVAR_PI */
+
+#endif /* CONFIG_XENO_MERCURY */
+
+static int request_setschedparam(struct threadobj *thobj, int policy,
+				 const struct sched_param_ex *param_ex)
+{				/* thobj->lock held */
+	int ret;
+
+#ifdef CONFIG_XENO_PSHARED
+	struct remote_request *rq;
+
+	if (!threadobj_local_p(thobj)) {
+		rq = xnmalloc(sizeof(*rq));
+		if (rq == NULL)
+			return -ENOMEM;
+
+		rq->req = RMT_SETSCHED;
+		rq->u.setsched.ptid = thobj->ptid;
+		rq->u.setsched.policy = policy;
+		rq->u.setsched.param_ex = *param_ex;
+
+		ret = __bt(send_agent(thobj, rq));
+		if (ret)
+			xnfree(rq);
+		return ret;
+	}
+#endif
+	/*
+	 * We must drop the lock temporarily across the setsched
+	 * operation, as libcobalt may switch us to secondary mode
+	 * when doing so (i.e. libc call to reflect the new priority
+	 * on the linux side).
+	 *
+	 * If we can't relock the target thread, this must mean that
+	 * it vanished in the meantime: return -EIDRM for the caller
+	 * to handle this case specifically.
+	 */
+	threadobj_unlock(thobj);
+	ret = copperplate_renice_local_thread(thobj->ptid, policy, param_ex);
+	if (threadobj_lock(thobj))
+		ret = -EIDRM;
+
+	return ret;
+}
+
+static int request_cancel(struct threadobj *thobj) /* thobj->lock held, dropped. */
+{
+	struct threadobj *current = threadobj_current();
+	int thprio = thobj->global_priority;
+	pthread_t ptid = thobj->ptid;
+#ifdef CONFIG_XENO_PSHARED
+	struct remote_request *rq;
+	int ret;
+
+	if (!threadobj_local_p(thobj)) {
+		threadobj_unlock(thobj);
+		rq = xnmalloc(sizeof(*rq));
+		if (rq == NULL)
+			return -ENOMEM;
+
+		rq->req = RMT_CANCEL;
+		rq->u.cancel.ptid = ptid;
+		rq->u.cancel.policy = -1;
+		if (current) {
+			rq->u.cancel.policy = current->policy;
+			rq->u.cancel.param_ex = current->schedparam;
+		}
+		ret = __bt(send_agent(thobj, rq));
+		if (ret)
+			xnfree(rq);
+		return ret;
+	}
+#endif
+	threadobj_unlock(thobj);
+
+	/*
+	 * The caller will have to wait for the killed thread to enter
+	 * its finalizer, so we boost the latter thread to prevent a
+	 * priority inversion if need be.
+	 *
+	 * NOTE: Since we dropped the lock, we might race if ptid
+	 * disappears while we are busy killing it, glibc will check
+	 * and dismiss if so.
+	 */
+
+	if (current && thprio < current->global_priority)
+		copperplate_renice_local_thread(ptid, current->policy,
+						&current->schedparam);
+	pthread_cancel(ptid);
+
+	return 0;
+}
+
+void *__threadobj_alloc(size_t tcb_struct_size,
+			size_t wait_union_size,
+			int thobj_offset)
+{
+	struct threadobj *thobj;
+	void *p;
+
+	if (wait_union_size < sizeof(union copperplate_wait_union))
+		wait_union_size = sizeof(union copperplate_wait_union);
+
+	tcb_struct_size = (tcb_struct_size+sizeof(double)-1) & ~(sizeof(double)-1);
+	p = xnmalloc(tcb_struct_size + wait_union_size);
+	if (p == NULL)
+		return NULL;
+
+	thobj = p + thobj_offset;
+	thobj->core_offset = thobj_offset;
+	thobj->wait_union = __moff(p + tcb_struct_size);
+	thobj->wait_size = wait_union_size;
+
+	return p;
+}
+
+static void set_global_priority(struct threadobj *thobj, int policy,
+				const struct sched_param_ex *param_ex)
+{
+	thobj->schedparam = *param_ex;
+	thobj->policy = policy;
+	thobj->global_priority = map_priority_corespec(policy, param_ex);
+}
+
+int threadobj_init(struct threadobj *thobj,
+		   struct threadobj_init_data *idata)
+{
+	pthread_mutexattr_t mattr;
+	pthread_condattr_t cattr;
+	int ret;
+
+	thobj->magic = idata->magic;
+	thobj->ptid = 0;
+	thobj->tracer = NULL;
+	thobj->wait_sobj = NULL;
+	thobj->finalizer = idata->finalizer;
+	thobj->schedlock_depth = 0;
+	thobj->status = __THREAD_S_WARMUP;
+	thobj->run_state = __THREAD_S_DORMANT;
+	set_global_priority(thobj, idata->policy, &idata->param_ex);
+	holder_init(&thobj->wait_link); /* mandatory */
+	thobj->cnode = __node_id;
+	thobj->pid = 0;
+	thobj->cancel_sem = NULL;
+	thobj->periodic_timer = NULL;
+
+	/*
+	 * CAUTION: wait_union and wait_size have been set in
+	 * __threadobj_alloc(), do not overwrite.
+	 */
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute);
+	ret = __bt(-__RT(pthread_mutex_init(&thobj->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		return ret;
+
+	pthread_condattr_init(&cattr);
+	pthread_condattr_setpshared(&cattr, mutex_scope_attribute);
+	ret = __bt(-__RT(pthread_cond_init(&thobj->barrier, &cattr)));
+	pthread_condattr_destroy(&cattr);
+	if (ret) {
+		__RT(pthread_mutex_destroy(&thobj->lock));
+		return ret;
+	}
+
+	return threadobj_init_corespec(thobj);
+}
+
+static void uninit_thread(struct threadobj *thobj)
+{
+	threadobj_uninit_corespec(thobj);
+	__RT(pthread_cond_destroy(&thobj->barrier));
+	__RT(pthread_mutex_destroy(&thobj->lock));
+}
+
+static void destroy_thread(struct threadobj *thobj)
+{
+	threadobj_cleanup_corespec(thobj);
+	if (thobj->status & __THREAD_S_PERIODIC)
+		__RT(timer_delete(thobj->periodic_timer));
+	uninit_thread(thobj);
+}
+
+void threadobj_uninit(struct threadobj *thobj) /* thobj->lock free */
+{
+	assert((thobj->status & (__THREAD_S_STARTED|__THREAD_S_ACTIVE)) == 0);
+	uninit_thread(thobj);
+}
+
+/*
+ * NOTE: to spare us the need for passing the equivalent of a
+ * syncstate argument to each thread locking operation, we hold the
+ * cancel state of the locker directly into the locked thread, prior
+ * to disabling cancellation for the calling thread.
+ *
+ * However, this means that we must save some state information on the
+ * stack prior to calling any service which releases that lock
+ * implicitly, such as pthread_cond_wait(). Failing to do so would
+ * introduce the possibility for the saved state to be overwritten by
+ * another thread which managed to grab the lock after
+ * pthread_cond_wait() dropped it.
+ *
+ * XXX: cancel_state is held in the descriptor of the target thread,
+ * not the current one, because we allow non-copperplate threads to
+ * call these services, and these have no threadobj descriptor.
+ */
+
+static int wait_on_barrier(struct threadobj *thobj, int mask)
+{
+	int oldstate, status;
+
+	for (;;) {
+		status = thobj->status;
+		if (status & mask)
+			break;
+		oldstate = thobj->cancel_state;
+		push_cleanup_lock(&thobj->lock);
+		__threadobj_tag_unlocked(thobj);
+		threadobj_cond_wait(&thobj->barrier, &thobj->lock);
+		__threadobj_tag_locked(thobj);
+		pop_cleanup_lock(&thobj->lock);
+		thobj->cancel_state = oldstate;
+	}
+
+	return status;
+}
+
+int threadobj_start(struct threadobj *thobj)	/* thobj->lock held. */
+{
+	struct threadobj *current = threadobj_current();
+	int ret = 0, oldstate;
+
+	__threadobj_check_locked(thobj);
+
+	if (thobj->status & __THREAD_S_STARTED)
+		return 0;
+
+	thobj->status |= __THREAD_S_STARTED;
+	threadobj_cond_signal(&thobj->barrier);
+
+	if (current && thobj->global_priority <= current->global_priority)
+		return 0;
+
+	/*
+	 * Caller needs synchronization with the thread being started,
+	 * which has higher priority. We shall wait until that thread
+	 * enters the user code, or aborts prior to reaching that
+	 * point, whichever comes first.
+	 *
+	 * We must not exit until the synchronization has fully taken
+	 * place, disable cancellability until then.
+	 */
+	pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, &oldstate);
+
+	thobj->status |= __THREAD_S_SAFE;
+	wait_on_barrier(thobj, __THREAD_S_ACTIVE);
+
+	/*
+	 * If the started thread has exited before we woke up from the
+	 * barrier, its TCB was not reclaimed, to prevent us from
+	 * treading on stale memory. Reclaim it now, and tell the
+	 * caller to forget about it as well.
+	 */
+	if (thobj->run_state == __THREAD_S_DORMANT) {
+		/* Keep cancel-safe after unlock. */
+		thobj->cancel_state = PTHREAD_CANCEL_DISABLE;
+		threadobj_unlock(thobj);
+		destroy_thread(thobj);
+		threadobj_free(thobj);
+		ret = -EIDRM;
+	} else
+		thobj->status &= ~__THREAD_S_SAFE;
+
+	pthread_setcancelstate(oldstate, NULL);
+
+	return ret;
+}
+
+void threadobj_wait_start(void) /* current->lock free. */
+{
+	struct threadobj *current = threadobj_current();
+	int status;
+
+	threadobj_lock(current);
+	status = wait_on_barrier(current, __THREAD_S_STARTED|__THREAD_S_ABORTED);
+	threadobj_unlock(current);
+
+	/*
+	 * We may have preempted the guy who set __THREAD_S_ABORTED in
+	 * our status before it had a chance to issue pthread_cancel()
+	 * on us, so we need to go idle into a cancellation point to
+	 * wait for it: use pause() for this.
+	 */
+	while (status & __THREAD_S_ABORTED)
+		pause();
+}
+
+void threadobj_notify_entry(void) /* current->lock free. */
+{
+	struct threadobj *current = threadobj_current();
+
+	threadobj_lock(current);
+	current->status |= __THREAD_S_ACTIVE;
+	current->run_state = __THREAD_S_RUNNING;
+	threadobj_cond_signal(&current->barrier);
+	threadobj_unlock(current);
+}
+
+/* thobj->lock free. */
+int threadobj_prologue(struct threadobj *thobj, const char *name)
+{
+	struct threadobj *current = threadobj_current();
+	int ret;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
+
+	/*
+	 * Check whether we overlay the default main TCB we set in
+	 * main_overlay(), releasing it if so.
+	 */
+	if (current) {
+		/*
+		 * CAUTION: we may not overlay non-default TCB. The
+		 * upper API should catch this issue before we get
+		 * called.
+		 */
+		assert(current->magic == 0);
+		sysgroup_remove(thread, &current->memspec);
+		finalize_thread(current);
+	}
+
+	if (name) {
+		namecpy(thobj->name, name);
+		copperplate_set_current_name(name);
+	} else {
+		ret = copperplate_get_current_name(thobj->name,
+						   sizeof(thobj->name));
+		if (ret)
+			warning("cannot get process name, %s", symerror(ret));
+	}
+
+	thobj->ptid = pthread_self();
+	thobj->pid = get_thread_pid();
+	thobj->errno_pointer = &errno;
+	backtrace_init_context(&thobj->btd, name);
+	ret = threadobj_setup_corespec(thobj);
+	if (ret) {
+		warning("prologue failed for thread %s, %s",
+			name ?: "<anonymous>", symerror(ret));
+		return __bt(ret);
+	}
+
+	threadobj_set_current(thobj);
+
+	/*
+	 * Link the thread to the shared queue, so that sysregd can
+	 * retrieve it. Nop if --disable-pshared.
+	 */
+	sysgroup_add(thread, &thobj->memspec);
+
+	threadobj_lock(thobj);
+	thobj->status &= ~__THREAD_S_WARMUP;
+	threadobj_cond_signal(&thobj->barrier);
+	threadobj_unlock(thobj);
+
+#ifdef CONFIG_XENO_ASYNC_CANCEL
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
+#endif
+	threadobj_run_corespec(thobj);
+
+	return 0;
+}
+
+int threadobj_shadow(struct threadobj *thobj, const char *name)
+{
+	assert(thobj != threadobj_current());
+	threadobj_lock(thobj);
+	assert((thobj->status & (__THREAD_S_STARTED|__THREAD_S_ACTIVE)) == 0);
+	thobj->status |= __THREAD_S_STARTED|__THREAD_S_ACTIVE;
+	threadobj_unlock(thobj);
+
+	return __bt(threadobj_prologue(thobj, name));
+}
+
+/*
+ * Most traditional RTOSes guarantee that the task/thread delete
+ * operation is strictly synchronous, i.e. the deletion service
+ * returns to the caller only __after__ the deleted thread entered an
+ * innocuous state, i.e. dormant/dead.
+ *
+ * For this reason, we always wait until the canceled thread has
+ * finalized (see cancel_sync()), at the expense of a potential
+ * priority inversion affecting the caller of threadobj_cancel().
+ */
+static void cancel_sync(struct threadobj *thobj) /* thobj->lock held */
+{
+	int oldstate, ret = 0;
+	sem_t *sem;
+
+	threadobj_cancel_1_corespec(thobj);
+
+	/*
+	 * We have to allocate the cancel sync sema4 in the main heap
+	 * dynamically, so that it always lives in valid memory when
+	 * we wait on it. This has to be true regardless of whether
+	 * --enable-pshared is in effect, or thobj becomes stale after
+	 * the finalizer has run (we cannot host this sema4 in thobj
+	 * for this reason).
+	 */
+	sem = xnmalloc(sizeof(*sem));
+	if (sem == NULL)
+		ret = -ENOMEM;
+	else
+		__STD(sem_init(sem, sem_scope_attribute, 0));
+
+	thobj->cancel_sem = sem;
+
+	/*
+	 * If the thread to delete is warming up, wait until it
+	 * reaches the start barrier before sending the cancellation
+	 * signal.
+	 */
+	while (thobj->status & __THREAD_S_WARMUP) {
+		oldstate = thobj->cancel_state;
+		push_cleanup_lock(&thobj->lock);
+		__threadobj_tag_unlocked(thobj);
+		threadobj_cond_wait(&thobj->barrier, &thobj->lock);
+		__threadobj_tag_locked(thobj);
+		pop_cleanup_lock(&thobj->lock);
+		thobj->cancel_state = oldstate;
+	}
+
+	/*
+	 * Ok, now we shall raise the abort flag if the thread was not
+	 * started yet, to kick it out of the barrier wait. We are
+	 * covered by the target thread lock we hold, so we can't race
+	 * with threadobj_start().
+	 */
+	if ((thobj->status & __THREAD_S_STARTED) == 0) {
+		thobj->status |= __THREAD_S_ABORTED;
+		threadobj_cond_signal(&thobj->barrier);
+	}
+
+	threadobj_cancel_2_corespec(thobj);
+
+	request_cancel(thobj);
+
+	if (sem) {
+		do
+			ret = __STD(sem_wait(sem));
+		while (ret == -1 && errno == EINTR);
+	}
+
+	/*
+	 * Not being able to sync up with the cancelled thread is not
+	 * considered fatal, despite it's likely bad news for sure, so
+	 * that we can keep on cleaning up the mess, hoping for the
+	 * best.
+	 */
+	if (sem == NULL || ret)
+		warning("cannot sync with thread finalizer, %s",
+			symerror(sem ? -errno : ret));
+	if (sem) {
+		__STD(sem_destroy(sem));
+		xnfree(sem);
+	}
+}
+
+/* thobj->lock held on entry, released on return */
+int threadobj_cancel(struct threadobj *thobj)
+{
+	__threadobj_check_locked(thobj);
+
+	/*
+	 * This basically makes the thread enter a zombie state, since
+	 * it won't be reachable by anyone after its magic has been
+	 * trashed.
+	 */
+	thobj->magic = ~thobj->magic;
+
+	if (thobj == threadobj_current()) {
+		threadobj_unlock(thobj);
+		pthread_exit(NULL);
+	}
+
+	cancel_sync(thobj);
+
+	return 0;
+}
+
+static void finalize_thread(void *p) /* thobj->lock free */
+{
+	struct threadobj *thobj = p;
+
+	if (thobj == NULL || thobj == THREADOBJ_IRQCONTEXT)
+		return;
+
+	thobj->magic = ~thobj->magic;
+	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
+	threadobj_set_current(p);
+
+	if (thobj->wait_sobj)
+		__syncobj_cleanup_wait(thobj->wait_sobj, thobj);
+
+	sysgroup_remove(thread, &thobj->memspec);
+
+	if (thobj->tracer)
+		traceobj_unwind(thobj->tracer);
+
+	backtrace_dump(&thobj->btd);
+	backtrace_destroy_context(&thobj->btd);
+
+	if (thobj->finalizer)
+		thobj->finalizer(thobj);
+
+	if (thobj->cancel_sem)
+		/* Release the killer from threadobj_cancel(). */
+		__STD(sem_post)(thobj->cancel_sem);
+
+	thobj->run_state = __THREAD_S_DORMANT;
+
+	/*
+	 * Do not reclaim the TCB core resources if another thread is
+	 * waiting for us to start, pending on
+	 * wait_on_barrier(). Instead, hand it over to this thread.
+	 */
+	threadobj_lock(thobj);
+	if ((thobj->status & __THREAD_S_SAFE) == 0) {
+		threadobj_unlock(thobj);
+		destroy_thread(thobj);
+		threadobj_free(thobj);
+	} else
+		threadobj_unlock(thobj);
+
+	threadobj_set_current(NULL);
+}
+
+int threadobj_unblock(struct threadobj *thobj) /* thobj->lock held */
+{
+	struct syncstate syns;
+	struct syncobj *sobj;
+	int ret;
+
+	__threadobj_check_locked(thobj);
+
+	sobj = thobj->wait_sobj;
+	if (sobj) {
+		ret = syncobj_lock(sobj, &syns);
+		/*
+		 * Remove PEND (+DELAY timeout).
+		 * CAUTION: thobj->wait_obj goes NULL upon flush.
+		 */
+		if (ret == 0) {
+			syncobj_flush(sobj);
+			syncobj_unlock(sobj, &syns);
+			return 0;
+		}
+	}
+
+	/* Remove standalone DELAY condition. */
+
+	if (!threadobj_local_p(thobj))
+		return __bt(-copperplate_kill_tid(thobj->pid, SIGRELS));
+
+	return __bt(-__RT(pthread_kill(thobj->ptid, SIGRELS)));
+}
+
+int threadobj_sleep(const struct timespec *ts)
+{
+	struct threadobj *current = threadobj_current();
+	sigset_t set;
+	int ret;
+
+	/*
+	 * threadobj_sleep() shall return -EINTR immediately upon
+	 * threadobj_unblock(), to honor forced wakeup semantics for
+	 * RTOS personalities.
+	 *
+	 * Otherwise, the sleep should be silently restarted until
+	 * completion after a Linux signal is handled.
+	 */
+	current->run_state = __THREAD_S_DELAYED;
+	threadobj_save_timeout(&current->core, ts);
+
+	do {
+		/*
+		 * Waiting on a null signal set causes an infinite
+		 * delay, so that only threadobj_unblock() or a linux
+		 * signal can unblock us.
+		 */
+		if (ts->tv_sec == 0 && ts->tv_nsec == 0) {
+			sigemptyset(&set);
+			ret = __RT(sigwaitinfo(&set, NULL)) ? errno : 0;
+		} else
+			ret = __RT(clock_nanosleep(CLOCK_COPPERPLATE,
+						   TIMER_ABSTIME, ts, NULL));
+	} while (ret == EINTR && !threadobj_unblocked_corespec(current));
+
+	current->run_state = __THREAD_S_RUNNING;
+
+	return -ret;
+}
+
+int threadobj_set_periodic(struct threadobj *thobj,
+			   const struct timespec *__restrict__ idate,
+			   const struct timespec *__restrict__ period)
+{				/* thobj->lock held */
+	struct itimerspec its;
+	struct sigevent sev;
+	timer_t timer;
+	int ret;
+
+	__threadobj_check_locked(thobj);
+
+	timer = thobj->periodic_timer;
+	if (!timespec_scalar(idate) && !timespec_scalar(period)) {
+		if (thobj->status & __THREAD_S_PERIODIC) {
+			thobj->status &= ~__THREAD_S_PERIODIC;
+			__RT(timer_delete(timer));
+		}
+		return 0;
+	}
+	
+	if (!(thobj->status & __THREAD_S_PERIODIC)) {
+		memset(&sev, 0, sizeof(sev));
+		sev.sigev_signo = SIGPERIOD;
+		sev.sigev_notify = SIGEV_SIGNAL|SIGEV_THREAD_ID;
+		sev.sigev_notify_thread_id = threadobj_get_pid(thobj);
+		ret = __RT(timer_create(CLOCK_COPPERPLATE, &sev, &timer));
+		if (ret)
+			return __bt(-errno);
+		thobj->periodic_timer = timer;
+		thobj->status |= __THREAD_S_PERIODIC;
+	}
+
+	its.it_value = *idate;
+	its.it_interval = *period;
+
+	ret = __RT(timer_settime(timer, TIMER_ABSTIME, &its, NULL));
+	if (ret)
+		return __bt(-errno);
+
+	return 0;
+}
+
+int threadobj_wait_period(unsigned long *overruns_r)
+{
+	struct threadobj *current = threadobj_current();
+	siginfo_t si;
+	int sig;
+
+	if (!(current->status & __THREAD_S_PERIODIC))
+		return -EWOULDBLOCK;
+
+	for (;;) {
+		current->run_state = __THREAD_S_DELAYED;
+		sig = __RT(sigwaitinfo(&sigperiod_set, &si));
+		current->run_state = __THREAD_S_RUNNING;
+		if (sig == SIGPERIOD)
+			break;
+		if (errno == EINTR)
+			return -EINTR;
+		panic("cannot wait for next period, %s", symerror(-errno));
+	}
+
+	if (si.si_overrun) {
+		if (overruns_r)
+			*overruns_r = si.si_overrun;
+		return -ETIMEDOUT;
+	}
+
+	return 0;
+}
+
+void threadobj_spin(ticks_t ns)
+{
+	ticks_t end;
+
+	end = clockobj_get_tsc() + clockobj_ns_to_tsc(ns);
+	while (clockobj_get_tsc() < end)
+		cpu_relax();
+}
+
+int threadobj_set_schedparam(struct threadobj *thobj, int policy,
+			     const struct sched_param_ex *param_ex) /* thobj->lock held */
+{
+	int ret, _policy;
+
+	__threadobj_check_locked(thobj);
+
+	if (thobj->schedlock_depth > 0)
+		return __bt(-EPERM);
+
+	_policy = policy;
+	if (policy == SCHED_RR)
+		_policy = prepare_rr_corespec(thobj, policy, param_ex);
+	/*
+	 * NOTE: if the current thread suddently starves as a result
+	 * of switching itself to a scheduling class with no runtime
+	 * budget, it will hold its own lock for an indefinite amount
+	 * of time, i.e. until it gets some budget again. That seems a
+	 * more acceptable/less likely risk than introducing a race
+	 * window between the moment set_schedparam() is actually
+	 * applied at OS level, and the update of the priority
+	 * information in set_global_priority(), as both must be seen
+	 * as a single logical operation.
+	 */
+	ret = request_setschedparam(thobj, _policy, param_ex);
+	if (ret)
+		return ret;
+
+	/*
+	 * XXX: only local threads may switch to SCHED_RR since both
+	 * Cobalt and Mercury need this for different reasons.
+	 *
+	 * This seems an acceptable limitation compared to introducing
+	 * a significantly more complex implementation only for
+	 * supporting a somewhat weird feature (i.e. controlling the
+	 * round-robin state of threads running in remote processes).
+	 */
+	if (policy == SCHED_RR) {
+		if (!threadobj_local_p(thobj))
+			return -EINVAL;
+		ret = enable_rr_corespec(thobj, param_ex);
+		if (ret)
+			return __bt(ret);
+		thobj->tslice.tv_sec = param_ex->sched_rr_quantum.tv_sec;
+		thobj->tslice.tv_nsec = param_ex->sched_rr_quantum.tv_nsec;
+	} else if (thobj->policy == SCHED_RR) /* Switching off round-robin. */
+		disable_rr_corespec(thobj);
+
+	set_global_priority(thobj, policy, param_ex);
+
+	return 0;
+}
+
+int threadobj_set_schedprio(struct threadobj *thobj, int priority)
+{				/* thobj->lock held */
+	struct sched_param_ex param_ex;
+	int policy;
+
+	__threadobj_check_locked(thobj);
+
+	param_ex = thobj->schedparam;
+	param_ex.sched_priority = priority;
+	policy = thobj->policy;
+
+	if (policy == SCHED_RR) {
+		param_ex.sched_rr_quantum.tv_sec = thobj->tslice.tv_sec;
+		param_ex.sched_rr_quantum.tv_nsec = thobj->tslice.tv_nsec;
+	}
+
+	return threadobj_set_schedparam(thobj, policy, &param_ex);
+}
+
+#ifdef CONFIG_XENO_PSHARED
+static void main_exit(void)
+{
+	struct threadobj *thobj = threadobj_current();
+
+	sysgroup_remove(thread, &thobj->memspec);
+	threadobj_free(thobj);
+}
+#endif
+
+static inline int main_overlay(void)
+{
+	struct threadobj_init_data idata;
+	struct threadobj *tcb;
+	int ret;
+
+	/*
+	 * Make the main() context a basic yet complete thread object,
+	 * so that it may use any service which requires the caller to
+	 * have a Copperplate TCB (e.g. all blocking services). We
+	 * allocate a wait union which should be sufficient for
+	 * calling any blocking service from any high-level API from
+	 * an unshadowed main thread. APIs might have reasons not to
+	 * allow such call though, in which case they should check
+	 * explicitly for those conditions.
+	 */
+	tcb = __threadobj_alloc(sizeof(*tcb),
+				sizeof(union main_wait_union),
+				0);
+	if (tcb == NULL)
+		panic("failed to allocate main tcb");
+
+	idata.magic = 0x0;
+	idata.finalizer = NULL;
+	idata.policy = SCHED_OTHER;
+	idata.param_ex.sched_priority = 0;
+	ret = threadobj_init(tcb, &idata);
+	if (ret) {
+		__threadobj_free(tcb);
+		return __bt(ret);
+	}
+
+	tcb->status = __THREAD_S_STARTED|__THREAD_S_ACTIVE;
+	threadobj_prologue(tcb, NULL);
+	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
+
+#ifdef CONFIG_XENO_PSHARED
+	atexit(main_exit);
+#endif
+
+	return 0;
+}
+
+int threadobj_pkg_init(int anon_session)
+{
+	sigaddset(&sigperiod_set, SIGPERIOD);
+	pkg_init_corespec();
+
+	if (!anon_session)
+		start_agent();
+
+	return main_overlay();
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/timerobj.c b/kernel/xenomai-v3.2.4/lib/copperplate/timerobj.c
new file mode 100644
index 0000000..ca8b82e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/timerobj.c
@@ -0,0 +1,288 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * Timer object abstraction.
+ */
+
+#include <signal.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <memory.h>
+#include <limits.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include "boilerplate/list.h"
+#include "boilerplate/signal.h"
+#include "boilerplate/lock.h"
+#include "copperplate/threadobj.h"
+#include "copperplate/timerobj.h"
+#include "copperplate/clockobj.h"
+#include "copperplate/debug.h"
+#include "internal.h"
+
+static pthread_mutex_t svlock;
+
+static pthread_t svthread;
+
+static pid_t svpid;
+
+static DEFINE_PRIVATE_LIST(svtimers);
+
+#ifdef CONFIG_XENO_COBALT
+
+static inline void timersv_init_corespec(void) { }
+
+#else /* CONFIG_XENO_MERCURY */
+
+static inline void timersv_init_corespec(void)
+{
+	sigset_t set;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGALRM);
+	pthread_sigmask(SIG_BLOCK, &set, NULL);
+}
+
+#endif /* CONFIG_XENO_MERCURY */
+
+/*
+ * XXX: at some point, we may consider using a timer wheel instead of
+ * a simple linked list to index timers. The latter method is
+ * efficient for up to ten outstanding timers or so, which should be
+ * enough for most applications. However, there exist poorly designed
+ * apps involving dozens of active timers, particularly in the legacy
+ * embedded world.
+ */
+static void timerobj_enqueue(struct timerobj *tmobj)
+{
+	struct timerobj *__tmobj;
+
+	if (pvlist_empty(&svtimers)) {
+		pvlist_append(&tmobj->next, &svtimers);
+		return;
+	}
+
+	pvlist_for_each_entry_reverse(__tmobj, &svtimers, next) {
+		if (timespec_before_or_same(&__tmobj->itspec.it_value,
+					    &tmobj->itspec.it_value))
+			break;
+	}
+
+	atpvh(&__tmobj->next, &tmobj->next);
+}
+
+static int server_prologue(void *arg)
+{
+	svpid = get_thread_pid();
+	copperplate_set_current_name("timer-internal");
+	timersv_init_corespec();
+	threadobj_set_current(THREADOBJ_IRQCONTEXT);
+
+	return 0;
+}
+
+static void *timerobj_server(void *arg)
+{
+	void (*handler)(struct timerobj *tmobj);
+	struct timespec now, value, interval;
+	struct timerobj *tmobj;
+	sigset_t set;
+	int sig, ret;
+
+	sigemptyset(&set);
+	sigaddset(&set, SIGALRM);
+
+	for (;;) {
+		ret = __RT(sigwait(&set, &sig));
+		if (ret && ret != -EINTR)
+			break;
+		/*
+		 * We have a single server thread for now, so handlers
+		 * are fully serialized.
+		 */
+		write_lock_nocancel(&svlock);
+
+		__RT(clock_gettime(CLOCK_COPPERPLATE, &now));
+
+		while (!pvlist_empty(&svtimers)) {
+			tmobj = pvlist_first_entry(&svtimers, typeof(*tmobj),
+						   next);
+
+			value = tmobj->itspec.it_value;
+			interval = tmobj->itspec.it_interval;
+			handler = tmobj->handler;
+			if (timespec_after(&value, &now))
+				break;
+			pvlist_remove_init(&tmobj->next);
+			if (interval.tv_sec > 0 || interval.tv_nsec > 0) {
+				timespec_add(&tmobj->itspec.it_value,
+					     &value, &interval);
+				timerobj_enqueue(tmobj);
+			}
+			write_unlock(&svlock);
+			handler(tmobj);
+			write_lock_nocancel(&svlock);
+		}
+
+		write_unlock(&svlock);
+	}
+
+	return NULL;
+}
+
+static void timerobj_spawn_server(void)
+{
+	struct corethread_attributes cta;
+
+	cta.policy = SCHED_CORE;
+	cta.param_ex.sched_priority = threadobj_irq_prio;
+	cta.prologue = server_prologue;
+	cta.run = timerobj_server;
+	cta.arg = NULL;
+	cta.stacksize = PTHREAD_STACK_DEFAULT;
+	cta.detachstate = PTHREAD_CREATE_DETACHED;
+
+	__bt(copperplate_create_thread(&cta, &svthread));
+}
+
+int timerobj_init(struct timerobj *tmobj)
+{
+	static pthread_once_t spawn_once;
+	pthread_mutexattr_t mattr;
+	struct sigevent sev;
+	int ret;
+
+	/*
+	 * XXX: We need a threaded handler so that we may invoke core
+	 * async-unsafe services from there (e.g. syncobj post
+	 * routines are not async-safe, but the higher layers may
+	 * invoke them from a timer handler).
+	 *
+	 * We don't rely on glibc's SIGEV_THREAD feature, because it
+	 * is unreliable with some glibc releases (2.4 -> 2.9 at the
+	 * very least), and spawning a short-lived thread at each
+	 * timeout expiration to run the handler is just overkill.
+	 */
+	pthread_once(&spawn_once, timerobj_spawn_server);
+	if (!svthread)
+		return __bt(-EAGAIN);
+
+	tmobj->handler = NULL;
+	pvholder_init(&tmobj->next); /* so we may use pvholder_linked() */
+
+	memset(&sev, 0, sizeof(sev));
+	sev.sigev_notify = SIGEV_THREAD_ID;
+	sev.sigev_signo = SIGALRM;
+	sev.sigev_notify_thread_id = svpid;
+
+	ret = __RT(timer_create(CLOCK_COPPERPLATE, &sev, &tmobj->timer));
+	if (ret)
+		return __bt(-errno);
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	ret = pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute);
+	assert(ret == 0);
+	ret = __bt(-__RT(pthread_mutex_init(&tmobj->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+
+	return ret;
+}
+
+void timerobj_destroy(struct timerobj *tmobj) /* lock held, dropped */
+{
+	write_lock_nocancel(&svlock);
+
+	if (pvholder_linked(&tmobj->next))
+		pvlist_remove_init(&tmobj->next);
+
+	write_unlock(&svlock);
+
+	__RT(timer_delete(tmobj->timer));
+	__RT(pthread_mutex_unlock(&tmobj->lock));
+	__RT(pthread_mutex_destroy(&tmobj->lock));
+}
+
+int timerobj_start(struct timerobj *tmobj,
+		   void (*handler)(struct timerobj *tmobj),
+		   struct itimerspec *it) /* lock held, dropped */
+{
+	int ret = 0;
+
+	/*
+	 * We hold the queue lock long enough to prevent the timer
+	 * from being dequeued by the carrier thread before it has
+	 * been armed, e.g. the user handler might destroy it under
+	 * our feet if so, causing timer_settime() to fail, which
+	 * would in turn lead to a double-deletion if the caller
+	 * happens to check the return code then drop the timer
+	 * (again).
+	 */
+	write_lock_nocancel(&svlock);
+
+       if (pvholder_linked(&tmobj->next))
+               pvlist_remove_init(&tmobj->next);
+
+	tmobj->handler = handler;
+	tmobj->itspec = *it;
+
+	if (__RT(timer_settime(tmobj->timer, TIMER_ABSTIME, it, NULL))) {
+		ret = __bt(-errno);
+		goto fail;
+	}
+
+	timerobj_enqueue(tmobj);
+fail:
+	write_unlock(&svlock);
+	timerobj_unlock(tmobj);
+
+	return ret;
+}
+
+int timerobj_stop(struct timerobj *tmobj) /* lock held, dropped */
+{
+	static const struct itimerspec itimer_stop;
+
+	write_lock_nocancel(&svlock);
+
+	if (pvholder_linked(&tmobj->next))
+		pvlist_remove_init(&tmobj->next);
+
+	__RT(timer_settime(tmobj->timer, 0, &itimer_stop, NULL));
+	tmobj->handler = NULL;
+	write_unlock(&svlock);
+	timerobj_unlock(tmobj);
+
+	return 0;
+}
+
+int timerobj_pkg_init(void)
+{
+	pthread_mutexattr_t mattr;
+	int ret;
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-__RT(pthread_mutex_init(&svlock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/copperplate/traceobj.c b/kernel/xenomai-v3.2.4/lib/copperplate/traceobj.c
new file mode 100644
index 0000000..3c4d69bf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/copperplate/traceobj.c
@@ -0,0 +1,330 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include "boilerplate/lock.h"
+#include "copperplate/traceobj.h"
+#include "copperplate/threadobj.h"
+#include "copperplate/heapobj.h"
+#include "xenomai/init.h"
+#include "internal.h"
+#ifdef CONFIG_XENO_VALGRIND_API
+#include <valgrind/valgrind.h>
+static inline int valgrind_detected(void)
+{
+	return RUNNING_ON_VALGRIND;
+}
+#else
+static inline int valgrind_detected(void)
+{
+	return 0;
+}
+#endif
+
+struct tracemark {
+	const char *file;
+	int line;
+	int mark;
+};
+
+int traceobj_init(struct traceobj *trobj, const char *label, int nr_marks)
+{
+	pthread_mutexattr_t mattr;
+	pthread_condattr_t cattr;
+	int ret;
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-__RT(pthread_mutex_init(&trobj->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		return ret;
+
+	pthread_condattr_init(&cattr);
+	pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-__RT(pthread_cond_init(&trobj->join, &cattr)));
+	pthread_condattr_destroy(&cattr);
+	if (ret) {
+		__RT(pthread_mutex_destroy(&trobj->lock));
+		return ret;
+	}
+
+	/*
+	 * We make sure not to unblock from threadobj_join() until at
+	 * least one thread has called trace_enter() for this trace
+	 * object.
+	 */
+	trobj->nr_threads = -1;
+
+	trobj->label = label;
+	trobj->nr_marks = nr_marks;
+	trobj->cur_mark = 0;
+
+	if (nr_marks > 0) {
+		trobj->marks = malloc(sizeof(struct tracemark) * nr_marks);
+		if (trobj->marks == NULL)
+			panic("cannot allocate mark table for tracing");
+	}
+
+	return 0;
+}
+
+static void compare_marks(struct traceobj *trobj, int tseq[], int nr_seq) /* lock held */
+{
+	int mark;
+
+	for (mark = 0; mark < trobj->cur_mark || mark < nr_seq; mark++) {
+		if (mark >= trobj->cur_mark) {
+			fprintf(stderr, " <missing mark> |  [%d] expected\n",
+				tseq[mark]);
+		} else if (mark < nr_seq)
+			__RT(fprintf(stderr, "at %s:%d  |  [%d] should be [%d]\n",
+				     trobj->marks[mark].file,
+				     trobj->marks[mark].line,
+				     trobj->marks[mark].mark,
+				     tseq[mark]));
+		else
+			__RT(fprintf(stderr, "at %s:%d  |  unexpected [%d]\n",
+				     trobj->marks[mark].file,
+				     trobj->marks[mark].line,
+				     trobj->marks[mark].mark));
+	}
+
+	fflush(stderr);
+}
+
+void traceobj_verify(struct traceobj *trobj, int tseq[], int nr_seq)
+{
+	int end_mark, mark, state;
+	struct service svc;
+
+	CANCEL_DEFER(svc);
+
+	read_lock_safe(&trobj->lock, state);
+
+	if (nr_seq > trobj->nr_marks)
+		goto fail;
+
+	end_mark = trobj->cur_mark;
+	if (end_mark == 0) {
+		read_unlock_safe(&trobj->lock, state);
+		panic("no mark defined");
+	}
+
+	if (end_mark != nr_seq)
+		goto fail;
+
+	for (mark = 0; mark < end_mark; mark++) {
+		if (trobj->marks[mark].mark != tseq[mark])
+			goto fail;
+	}
+out:
+	read_unlock_safe(&trobj->lock, state);
+
+	CANCEL_RESTORE(svc);
+
+	return;
+
+fail:
+	if (valgrind_detected()) {
+		warning("valgrind detected: ignoring sequence mismatch");
+		goto out;
+	}
+
+	warning("mismatching execution sequence detected");
+	compare_marks(trobj, tseq, nr_seq);
+	read_unlock_safe(&trobj->lock, state);
+
+	CANCEL_RESTORE(svc);
+
+#ifdef CONFIG_XENO_MERCURY
+	/*
+	 * The Mercury core does not force any affinity, which may
+	 * lead to wrong results with some unit tests checking strict
+	 * ordering of operations. Tell the user about this. Normally,
+	 * such unit tests on Mercury should be pinned on a single CPU
+	 * using --cpu-affinity.
+	 */
+	if (CPU_COUNT(&__base_setup_data.cpu_affinity) == 0)
+		warning("NOTE: --cpu-affinity option was not given - this might explain?");
+#endif
+#ifndef CONFIG_XENO_ASYNC_CANCEL
+	/*
+	 * Lack of async cancellation support might also explain why
+	 * some tests have failed.
+	 */
+	warning("NOTE: --disable-async-cancel option was given - this might explain?");
+#endif
+	exit(5);
+}
+
+void traceobj_destroy(struct traceobj *trobj)
+{
+	free(trobj->marks);
+	__RT(pthread_mutex_destroy(&trobj->lock));
+}
+
+static void dump_marks(struct traceobj *trobj) /* lock held */
+{
+	int mark;
+
+	for (mark = 0; mark < trobj->cur_mark; mark++)
+		fprintf(stderr, "[%d] at %s:%d\n",
+			trobj->marks[mark].mark,
+			trobj->marks[mark].file,
+			trobj->marks[mark].line);
+
+	fflush(stderr);
+}
+
+static void dump_marks_on_error(struct traceobj *trobj)
+{
+	struct service svc;
+
+	CANCEL_DEFER(svc);
+
+	read_lock(&trobj->lock);
+	dump_marks(trobj);
+	read_unlock(&trobj->lock);
+
+	CANCEL_RESTORE(svc);
+}
+
+void __traceobj_assert_failed(struct traceobj *trobj,
+			      const char *file, int line, const char *cond)
+{
+	dump_marks_on_error(trobj);
+	panic("trace assertion failed:\n              %s:%d => \"%s\"", file, line, cond);
+}
+
+void __traceobj_check_abort(struct traceobj *trobj,
+			    const char *file, int line,
+			    int received, int expected)
+{
+	dump_marks_on_error(trobj);
+	panic("wrong return status:\n              %s:%d => %s (want %s)", file, line,
+	      symerror(received), symerror(expected));
+}
+
+void __traceobj_check_warn(struct traceobj *trobj,
+			   const char *file, int line,
+			   int received, int expected)
+{
+	dump_marks_on_error(trobj);
+	warning("wrong return status:\n              %s:%d => %s (want %s)", file, line,
+		symerror(received), symerror(expected));
+}
+
+void __traceobj_mark(struct traceobj *trobj,
+		     const char *file, int line, int mark)
+{
+	struct tracemark *tmk;
+	struct service svc;
+	int cur_mark;
+
+	CANCEL_DEFER(svc);
+
+	pthread_testcancel();
+	push_cleanup_lock(&trobj->lock);
+	write_lock(&trobj->lock);
+
+	cur_mark = trobj->cur_mark;
+	if (cur_mark >= trobj->nr_marks) {
+		dump_marks(trobj);
+		panic("too many marks: [%d] at %s:%d", mark, file, line);
+	}
+
+	tmk = trobj->marks + cur_mark;
+	tmk->file = file;
+	tmk->line = line;
+	tmk->mark = mark;
+	trobj->cur_mark++;
+
+	write_unlock(&trobj->lock);
+	pop_cleanup_lock(&trobj->lock);
+
+	CANCEL_RESTORE(svc);
+}
+
+void traceobj_enter(struct traceobj *trobj)
+{
+	struct threadobj *current = threadobj_current();
+	struct service svc;
+
+	if (current)
+		current->tracer = trobj;
+
+	CANCEL_DEFER(svc);
+
+	write_lock_nocancel(&trobj->lock);
+
+	if (++trobj->nr_threads == 0)
+		trobj->nr_threads = 1;
+
+	write_unlock(&trobj->lock);
+
+	CANCEL_RESTORE(svc);
+}
+
+/* May be directly called from finalizer. */
+void traceobj_unwind(struct traceobj *trobj)
+{
+	struct service svc;
+	int state;
+
+	CANCEL_DEFER(svc);
+
+	write_lock_safe(&trobj->lock, state);
+
+	if (--trobj->nr_threads <= 0)
+		threadobj_cond_signal(&trobj->join);
+
+	write_unlock_safe(&trobj->lock, state);
+
+	CANCEL_RESTORE(svc);
+}
+
+void traceobj_exit(struct traceobj *trobj)
+{
+	struct threadobj *current = threadobj_current();
+
+	if (current)
+		current->tracer = NULL;
+
+	traceobj_unwind(trobj);
+}
+
+void traceobj_join(struct traceobj *trobj)
+{
+	struct service svc;
+
+	CANCEL_DEFER(svc);
+
+	push_cleanup_lock(&trobj->lock);
+	read_lock(&trobj->lock);
+
+	while (trobj->nr_threads < 0 || trobj->nr_threads > 0)
+		threadobj_cond_wait(&trobj->join, &trobj->lock);
+
+	read_unlock(&trobj->lock);
+	pop_cleanup_lock(&trobj->lock);
+
+	CANCEL_RESTORE(svc);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/mercury/Makefile.am b/kernel/xenomai-v3.2.4/lib/mercury/Makefile.am
new file mode 100644
index 0000000..d2b0621
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/mercury/Makefile.am
@@ -0,0 +1,9 @@
+
+lib_LTLIBRARIES = libmercury.la
+
+libmercury_la_SOURCES =
+
+libmercury_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -version-info 0:0:0 -lpthread -lrt
+
+libmercury_la_LIBADD = \
+	../boilerplate/libboilerplate.la
diff --git a/kernel/xenomai-v3.2.4/lib/psos/COPYING b/kernel/xenomai-v3.2.4/lib/psos/COPYING
new file mode 100644
index 0000000..3b20440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/COPYING
@@ -0,0 +1,458 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/lib/psos/Makefile.am b/kernel/xenomai-v3.2.4/lib/psos/Makefile.am
new file mode 100644
index 0000000..f54b260
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/Makefile.am
@@ -0,0 +1,38 @@
+lib_LTLIBRARIES = libpsos@CORE@.la
+
+libpsos@CORE@_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -version-info 0:0:0
+
+libpsos@CORE@_la_LIBADD =						\
+	@XENO_CORE_LDADD@					\
+	$(top_builddir)/lib/copperplate/libcopperplate@CORE@.la
+
+libpsos@CORE@_la_SOURCES =	\
+	init.c		\
+	internal.h	\
+	pt.c		\
+	pt.h		\
+	queue.c		\
+	queue.h		\
+	rn.c		\
+	rn.h		\
+	sem.c		\
+	sem.h		\
+	task.c		\
+	task.h		\
+	tm.c		\
+	tm.h		\
+	reference.h
+
+libpsos@CORE@_la_CPPFLAGS =					\
+	@XENO_USER_CFLAGS@				\
+	-I$(top_srcdir)/include				\
+	-I$(top_srcdir)/lib
+
+EXTRA_DIST = testsuite
+
+SPARSE = sparse
+
+sparse:
+	@for i in $(libpsos@CORE@_la_SOURCES); do \
+		$(SPARSE) $(CHECKFLAGS) $(srcdir)/$$i; \
+	done
diff --git a/kernel/xenomai-v3.2.4/lib/psos/README b/kernel/xenomai-v3.2.4/lib/psos/README
new file mode 100644
index 0000000..a76170e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/README
@@ -0,0 +1,51 @@
+Overview
+========
+
+This directory contains a pSOS(*) emulation interface on top of the
+Xenomai framework.
+
+
+Known variations from pSOS
+==========================
+
+You may observe some variations from pSOS. If you find such a
+variation and you think it would be easy to correct, please mail to
+the Xenomai mailing list at <xenomai@xenomai.org>.  Here are the known
+variations:
+
+- pSOS task priorities are restricted to [1..97] in the current
+  implementation of the emulator.
+
+- Clearing the T_SLICE bit using t_mode() may induce a round-robin
+  effect within the priority level of the current task.
+
+- T_LEVELMASKx bits are ignored by the emulator
+  (e.g. t_mode()). Interrupts are always enabled for the running task.
+
+- The following calls may return ERR_SSFN when called from a
+  non-pSOS task context:
+
+  ev_receive, tm_evafter, tm_evevery, tm_evwhen
+
+- ERR_NOTIME is never returned, since the emulator sets the pSOS time
+  at startup.
+
+- q_create() always uses system buffers (Q_SYSBUF), regardless of the
+  creation flags (i.e. Q_PRIBUF is ignored). System buffer memory is
+  only limited by the addressable process memory; it is obtained from
+  the main memory pool (see --mem-pool-size init option).
+
+- Fixed and variable size message queues share a common namespace with
+  respect to q_[v]ident() calls.
+
+- tm_set() will not elapse any outstanding timer based on calendar
+  time which timeout date moves to the past as a consequence of this
+  call.
+
+- rn_retseg() does not fully check the validity of the pointer
+  argument. Therefore ERR_SEGADDR and ERR_SEGFREE error codes will
+  never be returned, and passing a non-NULL invalid pointer within the
+  bounds of the region may lead to unpredictable results.
+
+(*) pSOS is a registered trademark of Wind River Systems, Inc
+(http://www.windriver.com).
diff --git a/kernel/xenomai-v3.2.4/lib/psos/init.c b/kernel/xenomai-v3.2.4/lib/psos/init.c
new file mode 100644
index 0000000..307594e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/init.c
@@ -0,0 +1,141 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdio.h>
+#include <string.h>
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <xenomai/init.h>
+#include <copperplate/registry.h>
+#include <copperplate/clockobj.h>
+#include <copperplate/debug.h>
+#include <psos/psos.h>
+#include "internal.h"
+#include "tm.h"
+#include "task.h"
+#include "sem.h"
+#include "queue.h"
+#include "pt.h"
+#include "rn.h"
+
+/**
+ * @defgroup psos pSOS&reg; emulator
+ *
+ * A pSOS&reg; emulation library on top of Xenomai.
+ *
+ * The emulator mimicks the behavior described in the public
+ * documentation of the pSOS 2.x API for the following class of
+ * services:
+ *
+ * - Tasks, Events, Queues, Semaphores
+ * - Partitions, Regions, Timers
+ */
+int psos_long_names = 0;
+
+static unsigned int clock_resolution = 1000000; /* 1ms */
+
+static unsigned int time_slice_in_ticks = 5;
+
+static const struct option psos_options[] = {
+	{
+#define clock_resolution_opt	0
+		.name = "psos-clock-resolution",
+		.has_arg = required_argument,
+	},
+	{
+#define time_slice_opt	1
+		.name = "psos-time-slice",
+		.has_arg = required_argument,
+	},
+	{
+#define long_names_opt	2
+		.name = "psos-long-names",
+		.has_arg = no_argument,
+		.flag = &psos_long_names,
+		.val = 1
+	},
+	{ /* Sentinel */ }
+};
+
+static int psos_parse_option(int optnum, const char *optarg)
+{
+	switch (optnum) {
+	case clock_resolution_opt:
+		clock_resolution = atoi(optarg);
+		break;
+	case time_slice_opt:
+		time_slice_in_ticks = atoi(optarg);
+		break;
+	case long_names_opt:
+		break;
+	default:
+		/* Paranoid, can't happen. */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void psos_help(void)
+{
+        fprintf(stderr, "--psos-clock-resolution=<ns>	tick value (default 1ms)\n");
+        fprintf(stderr, "--psos-time-slice=<psos-ticks>	round-robin time slice\n");
+        fprintf(stderr, "--psos-long-names		enable long names for objects (> 4 characters)\n");
+}
+
+static int psos_init(void)
+{
+	int ret;
+
+	registry_add_dir("/psos");
+	registry_add_dir("/psos/tasks");
+	registry_add_dir("/psos/semaphores");
+	registry_add_dir("/psos/queues");
+	registry_add_dir("/psos/timers");
+	registry_add_dir("/psos/partitions");
+	registry_add_dir("/psos/regions");
+
+	cluster_init(&psos_task_table, "psos.task");
+	cluster_init(&psos_sem_table, "psos.sema4");
+	cluster_init(&psos_queue_table, "psos.queue");
+	pvcluster_init(&psos_pt_table, "psos.pt");
+	pvcluster_init(&psos_rn_table, "psos.rn");
+
+	ret = clockobj_init(&psos_clock, clock_resolution);
+	if (ret) {
+		warning("%s: failed to initialize pSOS clock (res=%u ns)",
+			__FUNCTION__, clock_resolution);
+		return __bt(ret);
+	}
+
+	/* Convert pSOS ticks to timespec. */
+	clockobj_ticks_to_timespec(&psos_clock, time_slice_in_ticks, &psos_rrperiod);
+
+	return 0;
+}
+
+static struct setup_descriptor psos_skin = {
+	.name = "psos",
+	.init = psos_init,
+	.options = psos_options,
+	.parse_option = psos_parse_option,
+	.help = psos_help,
+};
+
+interface_setup_call(psos_skin);
diff --git a/kernel/xenomai-v3.2.4/lib/psos/internal.h b/kernel/xenomai-v3.2.4/lib/psos/internal.h
new file mode 100644
index 0000000..0e5f108
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/internal.h
@@ -0,0 +1,36 @@
+/*
+ * Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _PSOS_INTERNAL_H
+#define _PSOS_INTERNAL_H
+
+#include <string.h>
+
+extern int psos_long_names;
+
+static inline
+const char *psos_trunc_name(char dst[5], const char *src)
+{
+	if (psos_long_names)
+		return src;
+
+	strncpy(dst, src, 4)[4] = '\0';
+
+	return dst;
+}
+
+#endif /* !_PSOS_INTERNAL_H */
diff --git a/kernel/xenomai-v3.2.4/lib/psos/pt.c b/kernel/xenomai-v3.2.4/lib/psos/pt.c
new file mode 100644
index 0000000..0fc7d9b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/pt.c
@@ -0,0 +1,311 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <boilerplate/ancillaries.h>
+#include <boilerplate/lock.h>
+#include <copperplate/cluster.h>
+#include <psos/psos.h>
+#include "internal.h"
+#include "pt.h"
+
+#define pt_magic	0x8181fefe
+
+#define pt_align_mask   (sizeof(void *)-1)
+
+#define pt_bitmap_pos(pt,n) \
+pt->bitmap[((n) / (sizeof(u_long) * 8))]
+
+#define pt_block_pos(n) \
+(1L << ((n) % (sizeof(u_long) * 8)))
+
+#define pt_bitmap_setbit(pt,n) \
+(pt_bitmap_pos(pt,n) |= pt_block_pos(n))
+
+#define pt_bitmap_clrbit(pt,n) \
+(pt_bitmap_pos(pt,n) &= ~pt_block_pos(n))
+
+#define pt_bitmap_tstbit(pt,n) \
+(pt_bitmap_pos(pt,n) & pt_block_pos(n))
+
+struct pvcluster psos_pt_table;
+
+static unsigned long anon_ptids;
+
+/*
+ * XXX: Status wrt caller cancellation: all these routines are not
+ * supposed to traverse any cancellation point (*), so we don't bother
+ * adding any cleanup handler to release the partition lock, given
+ * that copperplate-protected sections disables asynchronous thread
+ * cancellation temporarily and therefore will allow callees to grab
+ * mutexes safely.
+ *
+ * (*) all private cluster management routines are expected to be free
+ * from cancellation points. You have been warned.
+ */
+
+static struct psos_pt *get_pt_from_id(u_long ptid, int *err_r)
+{
+	struct psos_pt *pt = (struct psos_pt *)ptid;
+
+	/*
+	 * Unlike most other pSOS objects (except timers), the
+	 * partition control block is NOT laid into the main heap, so
+	 * don't have to apply mainheap_deref() to convert partition
+	 * handles to pointers, but we do a plain cast instead.  (This
+	 * said, mainheap_deref() is smart enough to deal with private
+	 * pointers, but we just avoid useless overhead).
+	 */
+	if (pt == NULL || ((uintptr_t)pt & (sizeof(uintptr_t)-1)) != 0)
+		goto objid_error;
+
+	if (pt->magic == pt_magic) {
+		if (__RT(pthread_mutex_lock(&pt->lock)) == 0) {
+			if (pt->magic == pt_magic)
+				return pt;
+			__RT(pthread_mutex_unlock(&pt->lock));
+			/* Will likely fall down to ERR_OBJDEL. */
+		}
+	}
+
+	if (pt->magic == ~pt_magic) {
+		*err_r = ERR_OBJDEL;
+		return NULL;
+	}
+
+	if ((pt->magic >> 16) == 0x8181) {
+		*err_r = ERR_OBJTYPE;
+		return NULL;
+	}
+
+objid_error:
+	*err_r = ERR_OBJID;
+
+	return NULL;
+}
+
+static inline void put_pt(struct psos_pt *pt)
+{
+	__RT(pthread_mutex_unlock(&pt->lock));
+}
+
+static inline size_t pt_overhead(size_t psize, size_t bsize)
+{
+	size_t m = (bsize * 8);
+	size_t q = ((psize - sizeof(struct psos_pt)) * m) / (m + 1);
+	return (psize - q + pt_align_mask) & ~pt_align_mask;
+}
+
+u_long pt_create(const char *name,
+		 void *paddr, void *laddr  __attribute__ ((unused)),
+		 u_long psize, u_long bsize, u_long flags,
+		 u_long *ptid_r, u_long *nbuf)
+{
+	pthread_mutexattr_t mattr;
+	char short_name[5];
+	struct service svc;
+	struct psos_pt *pt;
+	int ret = SUCCESS;
+	u_long overhead;
+	caddr_t mp;
+	u_long n;
+
+	if ((uintptr_t)paddr & (sizeof(uintptr_t) - 1))
+		return ERR_PTADDR;
+
+	if (bsize <= pt_align_mask)
+		return ERR_BUFSIZE;
+
+	if (bsize & (bsize - 1))
+		return ERR_BUFSIZE;	/* Not a power of two. */
+
+	if (psize < sizeof(*pt))
+		return ERR_TINYPT;
+
+	pt = paddr;
+
+	if (name == NULL || *name == '\0')
+		sprintf(pt->name, "pt%lu", ++anon_ptids);
+	else {
+		name = psos_trunc_name(short_name, name);
+		namecpy(pt->name, name);
+	}
+
+	CANCEL_DEFER(svc);
+
+	if (pvcluster_addobj_dup(&psos_pt_table, pt->name, &pt->cobj)) {
+		warning("cannot register partition: %s", pt->name);
+		ret = ERR_OBJID;
+		goto out;
+	}
+
+	pt->flags = flags;
+	pt->bsize = (bsize + pt_align_mask) & ~pt_align_mask;
+	overhead = pt_overhead(psize, pt->bsize);
+
+	pt->nblks = (psize - overhead) / pt->bsize;
+	if (pt->nblks == 0) {
+		ret = ERR_TINYPT;
+		goto out;
+	}
+
+	pt->psize = pt->nblks * pt->bsize;
+	pt->data = (caddr_t)pt + overhead;
+	pt->freelist = mp = pt->data;
+	pt->ublks = 0;
+
+	for (n = pt->nblks; n > 1; n--) {
+		caddr_t nmp = mp + pt->bsize;
+		*((void **)mp) = nmp;
+		mp = nmp;
+	}
+
+	*((void **)mp) = NULL;
+	memset(pt->bitmap, 0, overhead - sizeof(*pt) + sizeof(pt->bitmap));
+	*nbuf = pt->nblks;
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	__RT(pthread_mutex_init(&pt->lock, &mattr));
+	pthread_mutexattr_destroy(&mattr);
+
+	pt->magic = pt_magic;
+	*ptid_r = (u_long)pt;
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long pt_delete(u_long ptid)
+{
+	struct psos_pt *pt;
+	struct service svc;
+	int ret;
+
+	pt = get_pt_from_id(ptid, &ret);
+	if (pt == NULL)
+		return ret;
+
+	if ((pt->flags & PT_DEL) == 0 && pt->ublks > 0) {
+		put_pt(pt);
+		return ERR_BUFINUSE;
+	}
+
+	CANCEL_DEFER(svc);
+	pvcluster_delobj(&psos_pt_table, &pt->cobj);
+	CANCEL_RESTORE(svc);
+	pt->magic = ~pt_magic; /* Prevent further reference. */
+	put_pt(pt);
+	__RT(pthread_mutex_destroy(&pt->lock));
+
+	return SUCCESS;
+}
+
+u_long pt_getbuf(u_long ptid, void **bufaddr)
+{
+	struct psos_pt *pt;
+	u_long numblk;
+	void *buf;
+	int ret;
+
+	pt = get_pt_from_id(ptid, &ret);
+	if (pt == NULL)
+		return ret;
+
+	buf = pt->freelist;
+	if (buf) {
+		pt->freelist = *((void **)buf);
+		pt->ublks++;
+		numblk = ((caddr_t)buf - pt->data) / pt->bsize;
+		pt_bitmap_setbit(pt, numblk);
+	}
+
+	put_pt(pt);
+
+	*bufaddr = buf;
+	if (buf == NULL)
+		return ERR_NOBUF;
+
+	return SUCCESS;
+}
+
+u_long pt_retbuf(u_long ptid, void *buf)
+{
+	struct psos_pt *pt;
+	u_long numblk;
+	int ret;
+
+	pt = get_pt_from_id(ptid, &ret);
+	if (pt == NULL)
+		return ret;
+
+	if ((caddr_t)buf < pt->data ||
+	    (caddr_t)buf >= pt->data + pt->psize ||
+	    (((caddr_t)buf - pt->data) % pt->bsize) != 0) {
+		ret = ERR_BUFADDR;
+		goto done;
+	}
+
+	numblk = ((caddr_t)buf - pt->data) / pt->bsize;
+
+	if (!pt_bitmap_tstbit(pt, numblk)) {
+		ret = ERR_BUFFREE;
+		goto done;
+	}
+
+	pt_bitmap_clrbit(pt, numblk);
+	*((void **)buf) = pt->freelist;
+	pt->freelist = buf;
+	pt->ublks--;
+	ret = SUCCESS;
+done:
+	put_pt(pt);
+
+	return ret;
+}
+
+u_long pt_ident(const char *name, u_long node, u_long *ptid_r)
+{
+	struct pvclusterobj *cobj;
+	struct service svc;
+	struct psos_pt *pt;
+	char short_name[5];
+
+	if (node)
+		return ERR_NODENO;
+
+	name = psos_trunc_name(short_name, name);
+
+	CANCEL_DEFER(svc);
+	cobj = pvcluster_findobj(&psos_pt_table, name);
+	CANCEL_RESTORE(svc);
+	if (cobj == NULL)
+		return ERR_OBJNF;
+
+	pt = container_of(cobj, struct psos_pt, cobj);
+	*ptid_r = (u_long)pt;
+
+	return SUCCESS;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/pt.h b/kernel/xenomai-v3.2.4/lib/psos/pt.h
new file mode 100644
index 0000000..e61899d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/pt.h
@@ -0,0 +1,46 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _PSOS_PT_H
+#define _PSOS_PT_H
+
+#include <sys/types.h>
+#include <pthread.h>
+#include <boilerplate/hash.h>
+#include <copperplate/cluster.h>
+
+struct psos_pt {
+	unsigned int magic;		/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	struct pvclusterobj cobj;
+	pthread_mutex_t lock;
+
+	unsigned long flags;
+	unsigned long bsize;
+	unsigned long psize;
+	unsigned long nblks;
+	unsigned long ublks;
+
+	void *freelist;
+	caddr_t data;
+	unsigned long bitmap[1];
+};
+
+extern struct pvcluster psos_pt_table;
+
+#endif /* _PSOS_PT_H */
diff --git a/kernel/xenomai-v3.2.4/lib/psos/queue.c b/kernel/xenomai-v3.2.4/lib/psos/queue.c
new file mode 100644
index 0000000..325335a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/queue.c
@@ -0,0 +1,507 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <boilerplate/ancillaries.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/clockobj.h>
+#include <copperplate/cluster.h>
+#include <psos/psos.h>
+#include "internal.h"
+#include "reference.h"
+#include "task.h"
+#include "queue.h"
+#include "tm.h"
+
+#define queue_magic	0x8181fdfd
+
+struct cluster psos_queue_table;
+
+static unsigned long anon_qids;
+
+struct msgholder {
+	int size;
+	struct holder link;
+	/* Payload data follows. */
+};
+
+static struct psos_queue *get_queue_from_id(u_long qid, int *err_r)
+{
+	struct psos_queue *q = mainheap_deref(qid, struct psos_queue);
+
+	if (q == NULL || ((uintptr_t)q & (sizeof(uintptr_t)-1)) != 0)
+		goto objid_error;
+
+	if (q->magic == queue_magic)
+		return q;
+
+	if (q->magic == ~queue_magic) {
+		*err_r = ERR_OBJDEL;
+		return NULL;
+	}
+
+	if ((q->magic >> 16) == 0x8181) {
+		*err_r = ERR_OBJTYPE;
+		return NULL;
+	}
+
+objid_error:
+	*err_r = ERR_OBJID;
+
+	return NULL;
+}
+
+static void queue_finalize(struct syncobj *sobj)
+{
+	struct psos_queue *q = container_of(sobj, struct psos_queue, sobj);
+	xnfree(q);
+}
+fnref_register(libpsos, queue_finalize);
+
+static u_long __q_create(const char *name, u_long count,
+			 u_long flags, u_long maxlen, u_long *qid_r)
+{
+	struct psos_queue *q;
+	struct service svc;
+	int sobj_flags = 0;
+	int ret = SUCCESS;
+	char short_name[5];
+
+	CANCEL_DEFER(svc);
+
+	q = xnmalloc(sizeof(*q));
+	if (q == NULL) {
+		ret = ERR_NOQCB;
+		goto out;
+	}
+
+	if (name == NULL || *name == '\0')
+		sprintf(q->name, "q%lu", ++anon_qids);
+	else {
+		name = psos_trunc_name(short_name, name);
+		namecpy(q->name, name);
+	}
+
+	if (flags & Q_PRIOR)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	q->flags = flags;
+	q->maxmsg = (flags & Q_LIMIT) ? count : 0;
+	q->maxlen = maxlen;
+	ret = syncobj_init(&q->sobj, CLOCK_COPPERPLATE, sobj_flags,
+			   fnref_put(libpsos, queue_finalize));
+	if (ret) {
+		ret = ERR_NOQCB;
+		goto fail_syncinit;
+	}
+
+	list_init(&q->msg_list);
+	q->msgcount = 0;
+	q->magic = queue_magic;
+	*qid_r = mainheap_ref(q, u_long);
+
+	if (cluster_addobj_dup(&psos_queue_table, q->name, &q->cobj)) {
+		warning("cannot register queue: %s", q->name);
+		ret = ERR_OBJID;
+		goto fail_register;
+	}
+
+	CANCEL_RESTORE(svc);
+
+	return 0;
+
+fail_register:
+	syncobj_uninit(&q->sobj);
+fail_syncinit:
+	xnfree(q);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long q_create(const char *name,
+		u_long count, u_long flags, u_long *qid_r)
+{
+	return __q_create(name, count, flags & ~Q_VARIABLE, sizeof(u_long[4]), qid_r);
+}
+
+u_long q_vcreate(const char *name, u_long flags,
+		 u_long count, u_long maxlen, u_long *qid_r)
+{
+	return __q_create(name, count, flags | Q_VARIABLE, maxlen, qid_r);
+}
+
+static u_long __q_delete(u_long qid, u_long flags)
+{
+	struct syncstate syns;
+	struct msgholder *msg;
+	struct psos_queue *q;
+	struct service svc;
+	int ret, emptyq;
+
+	q = get_queue_from_id(qid, &ret);
+	if (q == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&q->sobj, &syns))
+		return ERR_OBJDEL;
+
+	if (((flags ^ q->flags) & Q_VARIABLE)) {
+		syncobj_unlock(&q->sobj, &syns);
+		CANCEL_RESTORE(svc);
+		return (flags & Q_VARIABLE) ? ERR_NOTVARQ: ERR_VARQ;
+
+	}
+
+	emptyq = list_empty(&q->msg_list);
+	if (!emptyq) {
+		do {
+			msg = list_pop_entry(&q->msg_list,
+					     struct msgholder, link);
+			xnfree(msg);
+		} while (!list_empty(&q->msg_list));
+	}
+
+	cluster_delobj(&psos_queue_table, &q->cobj);
+	q->magic = ~queue_magic; /* Prevent further reference. */
+	ret = syncobj_destroy(&q->sobj, &syns);
+	CANCEL_RESTORE(svc);
+	if (ret)
+		return ERR_TATQDEL;
+
+	return emptyq ? SUCCESS : ERR_MATQDEL;
+}
+
+u_long q_delete(u_long qid)
+{
+	return __q_delete(qid, 0);
+}
+
+u_long q_vdelete(u_long qid)
+{
+	return __q_delete(qid, Q_VARIABLE);
+}
+
+static u_long __q_ident(const char *name,
+			u_long flags, u_long node, u_long *qid_r)
+{
+	struct clusterobj *cobj;
+	struct psos_queue *q;
+	struct service svc;
+	char short_name[5];
+
+	if (node)
+		return ERR_NODENO;
+
+	name = psos_trunc_name(short_name, name);
+
+	CANCEL_DEFER(svc);
+	cobj = cluster_findobj(&psos_queue_table, name);
+	CANCEL_RESTORE(svc);
+	if (cobj == NULL)
+		return ERR_OBJNF;
+
+	q = container_of(cobj, struct psos_queue, cobj);
+	if (((flags ^ q->flags) & Q_VARIABLE)) /* XXX: unsafe, but well... */
+		return (flags & Q_VARIABLE) ? ERR_NOTVARQ: ERR_VARQ;
+
+	*qid_r = mainheap_ref(q, u_long);
+
+	return SUCCESS;
+}
+
+u_long q_ident(const char *name, u_long node, u_long *qid_r)
+{
+	return __q_ident(name, 0, node, qid_r);
+}
+
+u_long q_vident(const char *name, u_long node, u_long *qid_r)
+{
+	return __q_ident(name, Q_VARIABLE, node, qid_r);
+}
+
+static u_long __q_send_inner(struct psos_queue *q, unsigned long flags,
+			     u_long *buffer, u_long bytes)
+{
+	struct psos_queue_wait *wait;
+	struct threadobj *thobj;
+	struct msgholder *msg;
+	u_long maxbytes;
+
+	thobj = syncobj_peek_grant(&q->sobj);
+	if (thobj && threadobj_local_p(thobj)) {
+		/* Fast path: direct copy to the receiver's buffer. */
+		wait = threadobj_get_wait(thobj);
+		maxbytes = wait->size;
+		if (bytes > maxbytes)
+			bytes = maxbytes;
+		if (bytes > 0)
+			memcpy(__mptr(wait->ptr), buffer, bytes);
+		wait->size = bytes;
+		goto done;
+	}
+
+	if ((q->flags & Q_LIMIT) && q->msgcount >= q->maxmsg)
+		return ERR_QFULL;
+
+	msg = xnmalloc(bytes + sizeof(*msg));
+	if (msg == NULL)
+		return ERR_NOMGB;
+
+	q->msgcount++;
+	msg->size = bytes;
+	holder_init(&msg->link);
+
+	if (bytes > 0)
+		memcpy(msg + 1, buffer, bytes);
+
+	if (flags & Q_JAMMED)
+		list_prepend(&msg->link, &q->msg_list);
+	else
+		list_append(&msg->link, &q->msg_list);
+
+	if (thobj) {
+		/*
+		 * We could not copy the message directly to the
+		 * remote buffer, tell the thread to pull it from the
+		 * pool.
+		 */
+		wait = threadobj_get_wait(thobj);
+		wait->size = -1UL;
+	}
+done:
+	if (thobj)
+		syncobj_grant_to(&q->sobj, thobj);
+
+	return SUCCESS;
+}
+
+static u_long __q_send(u_long qid, u_long flags, u_long *buffer, u_long bytes)
+{
+	struct syncstate syns;
+	struct psos_queue *q;
+	struct service svc;
+	int ret;
+
+	q = get_queue_from_id(qid, &ret);
+	if (q == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&q->sobj, &syns)) {
+		ret = ERR_OBJDEL;
+		goto out;
+	}
+
+	if (((flags ^ q->flags) & Q_VARIABLE)) {
+		ret = (flags & Q_VARIABLE) ? ERR_NOTVARQ: ERR_VARQ;
+		goto fail;
+	}
+
+	if (bytes > q->maxlen) {
+		ret = ERR_MSGSIZ;
+		goto fail;
+	}
+
+	ret = __q_send_inner(q, flags, buffer, bytes);
+fail:
+	syncobj_unlock(&q->sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long q_send(u_long qid, u_long msgbuf[4])
+{
+	return __q_send(qid, 0, msgbuf, sizeof(u_long[4]));
+}
+
+u_long q_vsend(u_long qid, void *msgbuf, u_long msglen)
+{
+	return __q_send(qid, Q_VARIABLE, msgbuf, msglen);
+}
+
+u_long q_urgent(u_long qid, u_long msgbuf[4])
+{
+	return __q_send(qid, Q_JAMMED, msgbuf, sizeof(u_long[4]));
+}
+
+u_long q_vurgent(u_long qid, void *msgbuf, u_long msglen)
+{
+	return __q_send(qid, Q_VARIABLE | Q_JAMMED, msgbuf, msglen);
+}
+
+static u_long __q_broadcast(u_long qid, u_long flags,
+			    u_long *buffer, u_long bytes, u_long *count_r)
+{
+	struct syncstate syns;
+	struct psos_queue *q;
+	struct service svc;
+	int ret = SUCCESS;
+
+	q = get_queue_from_id(qid, &ret);
+	if (q == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&q->sobj, &syns)) {
+		ret = ERR_OBJDEL;
+		goto out;
+	}
+
+	if (((flags ^ q->flags) & Q_VARIABLE)) {
+		ret = (flags & Q_VARIABLE) ? ERR_NOTVARQ: ERR_VARQ;
+		goto fail;
+	}
+
+	if (bytes > q->maxlen) {
+		ret = ERR_MSGSIZ;
+		goto fail;
+	}
+
+	/* Release all pending tasks atomically. */
+	*count_r = 0;
+	while (syncobj_grant_wait_p(&q->sobj)) {
+		ret = __q_send_inner(q, flags, buffer, bytes);
+		if (ret)
+			break;
+		(*count_r)++;
+	}
+fail:
+	syncobj_unlock(&q->sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long q_broadcast(u_long qid, u_long msgbuf[4], u_long *count_r)
+{
+	return __q_broadcast(qid, 0, msgbuf, sizeof(u_long[4]), count_r);
+}
+
+u_long q_vbroadcast(u_long qid, void *msgbuf, u_long msglen, u_long *count_r)
+{
+	return __q_broadcast(qid, Q_VARIABLE, msgbuf, msglen, count_r);
+}
+
+static u_long __q_receive(u_long qid, u_long flags, u_long timeout,
+			  void *buffer, u_long msglen, u_long *msglen_r)
+{
+	struct psos_queue_wait *wait = NULL;
+	struct timespec ts, *timespec;
+	struct msgholder *msg = NULL;
+	struct syncstate syns;
+	unsigned long nbytes;
+	struct psos_queue *q;
+	struct service svc;
+	int ret = SUCCESS;
+
+	q = get_queue_from_id(qid, &ret);
+	if (q == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&q->sobj, &syns)) {
+		ret = ERR_OBJDEL;
+		goto out;
+	}
+
+	if (((flags ^ q->flags) & Q_VARIABLE)) {
+		ret = (flags & Q_VARIABLE) ? ERR_NOTVARQ: ERR_VARQ;
+		goto fail;
+	}
+retry:
+	if (!list_empty(&q->msg_list)) {
+		q->msgcount--;
+		msg = list_pop_entry(&q->msg_list, struct msgholder, link);
+		nbytes = msg->size;
+		if (nbytes > msglen)
+			nbytes = msglen;
+		if (nbytes > 0)
+			memcpy(buffer, msg + 1, nbytes);
+		xnfree(msg);
+		goto done;
+	}
+
+	if (flags & Q_NOWAIT) {
+		ret = ERR_NOMSG;
+		goto fail;
+	}
+
+	if (timeout != 0) {
+		timespec = &ts;
+		clockobj_ticks_to_timeout(&psos_clock, timeout, timespec);
+	} else
+		timespec = NULL;
+
+	wait = threadobj_prepare_wait(struct psos_queue_wait);
+	wait->ptr = __moff(buffer);
+	wait->size = msglen;
+
+	ret = syncobj_wait_grant(&q->sobj, timespec, &syns);
+	if (ret == -EIDRM) {
+		ret = ERR_QKILLD;
+		goto out;
+	}
+
+	if (ret == -ETIMEDOUT) {
+		ret = ERR_TIMEOUT;
+		goto fail;
+	}
+	nbytes = wait->size;
+	if (nbytes == -1UL)	/* No direct copy? */
+		goto retry;
+done:
+	if (msglen_r)
+		*msglen_r = nbytes;
+fail:
+	syncobj_unlock(&q->sobj, &syns);
+out:
+	if (wait)
+		threadobj_finish_wait();
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long q_receive(u_long qid, u_long flags, u_long timeout, u_long msgbuf[4])
+{
+	return __q_receive(qid, flags & ~Q_VARIABLE,
+			   timeout, msgbuf, sizeof(u_long[4]), NULL);
+}
+
+u_long q_vreceive(u_long qid, u_long flags, u_long timeout,
+		  void *msgbuf, u_long msglen, u_long *msglen_r)
+{
+	return __q_receive(qid, flags | Q_VARIABLE,
+			   timeout, msgbuf, msglen, msglen_r);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/queue.h b/kernel/xenomai-v3.2.4/lib/psos/queue.h
new file mode 100644
index 0000000..8a364b1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/queue.h
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _PSOS_QUEUE_H
+#define _PSOS_QUEUE_H
+
+#include <sys/types.h>
+#include <boilerplate/hash.h>
+#include <copperplate/syncobj.h>
+#include <copperplate/cluster.h>
+
+#define Q_VARIABLE  0x40000000
+#define Q_JAMMED    0x80000000
+
+struct psos_queue {
+	unsigned int magic;		/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+
+	u_long flags;
+	u_long maxmsg;
+	u_long maxlen;
+	u_long msgcount;
+
+	struct syncobj sobj;
+	struct listobj msg_list;
+	struct clusterobj cobj;
+};
+
+struct psos_queue_wait {
+	size_t size;
+	dref_type(void *) ptr;
+};
+
+extern struct cluster psos_queue_table;
+
+#endif /* _PSOS_QUEUE_H */
diff --git a/kernel/xenomai-v3.2.4/lib/psos/reference.h b/kernel/xenomai-v3.2.4/lib/psos/reference.h
new file mode 100644
index 0000000..eed4a66
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/reference.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <copperplate/reference.h>
+
+#define libpsos_tag  2
+#define libpsos_cbi  1
diff --git a/kernel/xenomai-v3.2.4/lib/psos/rn.c b/kernel/xenomai-v3.2.4/lib/psos/rn.c
new file mode 100644
index 0000000..fa5bb57
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/rn.c
@@ -0,0 +1,343 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <boilerplate/ancillaries.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/clockobj.h>
+#include <psos/psos.h>
+#include "internal.h"
+#include "tm.h"
+#include "rn.h"
+
+#define rn_magic	0x8181efef
+
+struct pvcluster psos_rn_table;
+
+static unsigned long anon_rnids;
+
+static struct psos_rn *get_rn_from_id(u_long rnid, int *err_r)
+{
+	struct psos_rn *rn = mainheap_deref(rnid, struct psos_rn);
+
+	if (rn == NULL || ((uintptr_t)rn & (sizeof(uintptr_t)-1)) != 0)
+		goto objid_error;
+
+	if (rn->magic == rn_magic)
+		return rn;
+
+	if (rn->magic == ~rn_magic) {
+		*err_r = ERR_OBJDEL;
+		return NULL;
+	}
+
+	if ((rn->magic >> 16) == 0x8181) {
+		*err_r = ERR_OBJTYPE;
+		return NULL;
+	}
+
+objid_error:
+	*err_r = ERR_OBJID;
+
+	return NULL;
+}
+
+u_long rn_create(const char *name, void *saddr, u_long length,
+		 u_long usize, u_long flags, u_long *rnid_r,
+		 u_long *asize_r)
+{
+	int sobj_flags = 0, ret = SUCCESS;
+	struct psos_rn *rn;
+	struct service svc;
+	char short_name[5];
+
+	if ((uintptr_t)saddr & (sizeof(uintptr_t) - 1))
+		return ERR_RNADDR;
+
+	if (usize < 16)
+		return ERR_TINYUNIT;
+
+	if ((usize & (usize - 1)) != 0)
+		return ERR_UNITSIZE;	/* Not a power of two. */
+
+	if (length <= sizeof(*rn))
+		return ERR_TINYRN;
+
+	if (flags & RN_PRIOR)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	/*
+	 * XXX: We may not put the region control block directly into
+	 * the user-provided area, because shared mode requires us to
+	 * pull shareable object memory from the main heap. Albeit the
+	 * region per se is not shareable between processes, the
+	 * syncobj it embeds for synchronization is implicitely
+	 * shareable by design (there is no pvsyncobj, which would be
+	 * a very seldom use). So we allocate space for the control
+	 * block from the main pool instead.
+	 */
+	rn = xnmalloc(sizeof(*rn));
+	if (rn == NULL)
+		/*
+		 * mmmfff... When error codes are plain silly and we
+		 * don't have generic failure codes but braindamage
+		 * per-feature errnos to extend the interface, we can
+		 * only try to pick the least idiotic value.
+		 */
+		return ERR_NOSEG;
+
+	/* Skip the unused space. */
+	saddr += sizeof(*rn);
+	length -= sizeof(*rn);
+
+	CANCEL_DEFER(svc);
+
+	if (name == NULL || *name == '\0')
+		sprintf(rn->name, "rn%lu", ++anon_rnids);
+	else {
+		name = psos_trunc_name(short_name, name);
+		namecpy(rn->name, name);
+	}
+
+	if (pvcluster_addobj_dup(&psos_rn_table, rn->name, &rn->cobj)) {
+		warning("cannot register region: %s", rn->name);
+		xnfree(rn);
+		ret = ERR_OBJID;
+		goto out;
+	}
+
+	ret = __heapobj_init(&rn->hobj, name, length, saddr);
+	if (ret) {
+		pvcluster_delobj(&psos_rn_table, &rn->cobj);
+		ret = ERR_TINYRN;
+		xnfree(rn);
+		goto out;
+	}
+
+	if (flags & RN_PRIOR)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	rn->length = length;
+	rn->usize = usize;	/* Not actually used, just checked. */
+	rn->flags = flags;
+	rn->busynr = 0;
+	rn->usedmem = 0;
+	ret = syncobj_init(&rn->sobj, CLOCK_COPPERPLATE, sobj_flags, fnref_null);
+	if (ret) {
+		heapobj_destroy(&rn->hobj);
+		pvcluster_delobj(&psos_rn_table, &rn->cobj);
+		xnfree(rn);
+		goto out;
+	}
+
+	rn->magic = rn_magic;
+	*asize_r = rn->hobj.size;
+	*rnid_r = mainheap_ref(rn, u_long);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long rn_delete(u_long rnid)
+{
+	struct syncstate syns;
+	struct psos_rn *rn;
+	struct service svc;
+	int ret;
+
+	rn = get_rn_from_id(rnid, &ret);
+	if (rn == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&rn->sobj, &syns)) {
+		ret = ERR_OBJDEL;
+		goto out;
+	}
+
+	if ((rn->flags & RN_DEL) == 0 && rn->busynr > 0) {
+		syncobj_unlock(&rn->sobj, &syns);
+		ret = ERR_SEGINUSE;
+		goto out;
+	}
+
+	pvcluster_delobj(&psos_rn_table, &rn->cobj);
+	rn->magic = ~rn_magic; /* Prevent further reference. */
+	ret = syncobj_destroy(&rn->sobj, &syns);
+	if (ret)
+		ret = ERR_TATRNDEL;
+	xnfree(rn);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long rn_ident(const char *name, u_long *rnid_r)
+{
+	struct pvclusterobj *cobj;
+	struct psos_rn *rn;
+	struct service svc;
+	char short_name[5];
+
+	name = psos_trunc_name(short_name, name);
+
+	CANCEL_DEFER(svc);
+	cobj = pvcluster_findobj(&psos_rn_table, name);
+	CANCEL_RESTORE(svc);
+	if (cobj == NULL)
+		return ERR_OBJNF;
+
+	rn = container_of(cobj, struct psos_rn, cobj);
+	*rnid_r = mainheap_ref(rn, u_long);
+
+	return SUCCESS;
+}
+
+u_long rn_getseg(u_long rnid, u_long size, u_long flags,
+		 u_long timeout, void **segaddr)
+{
+	struct psos_rn_wait *wait = NULL;
+	struct timespec ts, *timespec;
+	struct syncstate syns;
+	struct psos_rn *rn;
+	struct service svc;
+	int ret = SUCCESS;
+	void *seg;
+
+	rn = get_rn_from_id(rnid, &ret);
+	if (rn == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&rn->sobj, &syns)) {
+		ret = ERR_OBJDEL;
+		goto out;
+	}
+
+	/*
+	 * The heap manager does not enforce any allocation limit; so
+	 * we have to do it by ourselves.
+	 */
+	if (rn->usedmem + size > rn->length)
+		goto starve;
+
+	seg = heapobj_alloc(&rn->hobj, size);
+	if (seg) {
+		*segaddr = seg;
+		rn->busynr++;
+		rn->usedmem += heapobj_validate(&rn->hobj, seg);
+		goto done;
+	}
+
+starve:
+	if (flags & RN_NOWAIT) {
+		ret = ERR_NOSEG;
+		goto done;
+	}
+
+	if (timeout != 0) {
+		timespec = &ts;
+		clockobj_ticks_to_timeout(&psos_clock, timeout, timespec);
+	} else
+		timespec = NULL;
+
+	wait = threadobj_prepare_wait(struct psos_rn_wait);
+	wait->ptr = __moff_nullable(NULL);
+	wait->size = size;
+
+	ret = syncobj_wait_grant(&rn->sobj, timespec, &syns);
+	if (ret == -ETIMEDOUT)
+		ret = ERR_TIMEOUT;
+	/*
+	 * There is no explicit flush operation on pSOS regions,
+	 * only an implicit one through deletion.
+	 */
+	else if (ret == -EIDRM) {
+		ret = ERR_RNKILLD;
+		goto out;
+	}
+
+	*segaddr = __mptr(wait->ptr);
+done:
+	syncobj_unlock(&rn->sobj, &syns);
+out:
+	if (wait)
+		threadobj_finish_wait();
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long rn_retseg(u_long rnid, void *segaddr)
+{
+	struct threadobj *thobj, *tmp;
+	struct psos_rn_wait *wait;
+	struct syncstate syns;
+	struct psos_rn *rn;
+	struct service svc;
+	int ret = SUCCESS;
+	u_long size;
+	void *seg;
+
+	rn = get_rn_from_id(rnid, &ret);
+	if (rn == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&rn->sobj, &syns)) {
+		ret = ERR_OBJDEL;
+		goto out;
+	}
+
+	rn->usedmem -= heapobj_validate(&rn->hobj, segaddr);
+	heapobj_free(&rn->hobj, segaddr);
+	rn->busynr--;
+
+	if (!syncobj_grant_wait_p(&rn->sobj))
+		goto done;
+
+	syncobj_for_each_grant_waiter_safe(&rn->sobj, thobj, tmp) {
+		wait = threadobj_get_wait(thobj);
+		size = wait->size;
+		if (rn->usedmem + size > rn->length)
+			continue;
+		seg = heapobj_alloc(&rn->hobj, size);
+		if (seg) {
+			rn->busynr++;
+			rn->usedmem += heapobj_validate(&rn->hobj, seg);
+			wait->ptr = __moff(seg);
+			syncobj_grant_to(&rn->sobj, thobj);
+		}
+	}
+done:
+	syncobj_unlock(&rn->sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/rn.h b/kernel/xenomai-v3.2.4/lib/psos/rn.h
new file mode 100644
index 0000000..1a026a0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/rn.h
@@ -0,0 +1,49 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _PSOS_RN_H
+#define _PSOS_RN_H
+
+#include <boilerplate/hash.h>
+#include <copperplate/syncobj.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/cluster.h>
+
+struct psos_rn {
+	unsigned int magic;		/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+
+	u_long flags;
+	u_long length;
+	u_long usize;
+	u_long busynr;
+	u_long usedmem;
+
+	struct syncobj sobj;
+	struct heapobj hobj;
+	struct pvclusterobj cobj;
+};
+
+struct psos_rn_wait {
+	size_t size;
+	dref_type(void *) ptr;
+};
+
+extern struct pvcluster psos_rn_table;
+
+#endif /* _PSOS_RN_H */
diff --git a/kernel/xenomai-v3.2.4/lib/psos/sem.c b/kernel/xenomai-v3.2.4/lib/psos/sem.c
new file mode 100644
index 0000000..6f66101
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/sem.c
@@ -0,0 +1,231 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <stdint.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <string.h>
+#include <boilerplate/ancillaries.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/cluster.h>
+#include <copperplate/clockobj.h>
+#include <copperplate/semobj.h>
+#include "reference.h"
+#include "task.h"
+#include "sem.h"
+#include "tm.h"
+#include "internal.h"
+#include <psos/psos.h>
+
+#define sem_magic	0x8181fbfb
+
+struct cluster psos_sem_table;
+
+static unsigned long anon_smids;
+
+static struct psos_sem *get_sem_from_id(u_long smid, int *err_r)
+{
+	struct psos_sem *sem = mainheap_deref(smid, struct psos_sem);
+
+	if (sem == NULL || ((uintptr_t)sem & (sizeof(uintptr_t)-1)) != 0)
+		goto objid_error;
+
+	if (sem->magic == sem_magic)
+		return sem;
+
+	if (sem->magic == ~sem_magic) {
+		*err_r = ERR_OBJDEL;
+		return NULL;
+	}
+
+	if ((sem->magic >> 16) == 0x8181) {
+		*err_r = ERR_OBJTYPE;
+		return NULL;
+	}
+
+objid_error:
+	*err_r = ERR_OBJID;
+
+	return NULL;
+}
+
+static void sem_finalize(struct semobj *smobj)
+{
+	struct psos_sem *sem = container_of(smobj, struct psos_sem, smobj);
+	xnfree(sem);
+}
+fnref_register(libpsos, sem_finalize);
+
+u_long sm_create(const char *name,
+		 u_long count, u_long flags, u_long *smid_r)
+{
+	int smobj_flags = SEMOBJ_WARNDEL;
+	struct psos_sem *sem;
+	struct service svc;
+	char short_name[5];
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	sem = xnmalloc(sizeof(*sem));
+	if (sem == NULL) {
+		ret = ERR_NOSCB;
+		goto out;
+	}
+
+	if (name == NULL || *name == '\0')
+		sprintf(sem->name, "sm%lu", ++anon_smids);
+	else {
+		name = psos_trunc_name(short_name, name);
+		namecpy(sem->name, name);
+	}
+
+	if (cluster_addobj_dup(&psos_sem_table, sem->name, &sem->cobj)) {
+		warning("cannot register semaphore: %s", sem->name);
+		xnfree(sem);
+		ret = ERR_OBJID;
+		goto out;
+	}
+
+	if (flags & SM_PRIOR)
+		smobj_flags |= SEMOBJ_PRIO;
+
+	sem->magic = sem_magic;
+	ret = semobj_init(&sem->smobj, smobj_flags, count,
+			  fnref_put(libpsos, sem_finalize));
+	if (ret) {
+		cluster_delobj(&psos_sem_table, &sem->cobj);
+		xnfree(sem);
+		goto out;
+	}
+
+	*smid_r = mainheap_ref(sem, u_long);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long sm_delete(u_long smid)
+{
+	struct psos_sem *sem;
+	struct service svc;
+	int ret;
+
+	sem = get_sem_from_id(smid, &ret);
+	if (sem == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	cluster_delobj(&psos_sem_table, &sem->cobj);
+	sem->magic = ~sem_magic; /* Prevent further reference. */
+	ret = semobj_destroy(&sem->smobj);
+	if (ret)
+		ret = ret > 0 ? ERR_TATSDEL : ERR_OBJDEL;
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long sm_ident(const char *name, u_long node, u_long *smid_r)
+{
+	struct clusterobj *cobj;
+	struct psos_sem *sem;
+	struct service svc;
+	char short_name[5];
+
+	if (node)
+		return ERR_NODENO;
+
+	name = psos_trunc_name(short_name, name);
+
+	CANCEL_DEFER(svc);
+	cobj = cluster_findobj(&psos_sem_table, name);
+	CANCEL_RESTORE(svc);
+	if (cobj == NULL)
+		return ERR_OBJNF;
+
+	sem = container_of(cobj, struct psos_sem, cobj);
+	*smid_r = mainheap_ref(sem, u_long);
+
+	return SUCCESS;
+}
+
+u_long sm_p(u_long smid, u_long flags, u_long timeout)
+{
+	struct timespec ts, *timespec = &ts;
+	struct psos_sem *sem;
+	struct service svc;
+	int ret;
+
+	sem = get_sem_from_id(smid, &ret);
+	if (sem == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	if (flags & SM_NOWAIT) {
+		timespec->tv_sec = 0;
+		timespec->tv_nsec = 0;
+	} else if (timeout != 0)
+		clockobj_ticks_to_timeout(&psos_clock, timeout, timespec);
+	else
+		timespec = NULL;
+
+	ret = semobj_wait(&sem->smobj, timespec);
+	if (ret) {
+		if (ret == -EIDRM)
+			ret = ERR_SKILLD;
+		else if (ret == -ETIMEDOUT)
+			ret = ERR_TIMEOUT;
+		else if (ret == -EWOULDBLOCK)
+			ret = ERR_NOSEM;
+		/*
+		 * There is no explicit flush operation on pSOS
+		 * semaphores, only an implicit one through deletion.
+		 */
+	}
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long sm_v(u_long smid)
+{
+	struct psos_sem *sem;
+	struct service svc;
+	int ret;
+
+	sem = get_sem_from_id(smid, &ret);
+	if (sem == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	ret = semobj_post(&sem->smobj);
+	if (ret == -EIDRM)
+		ret = ERR_OBJDEL;
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/sem.h b/kernel/xenomai-v3.2.4/lib/psos/sem.h
new file mode 100644
index 0000000..e572e22
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/sem.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _PSOS_SEM_H
+#define _PSOS_SEM_H
+
+#include <boilerplate/hash.h>
+#include <copperplate/semobj.h>
+#include <copperplate/cluster.h>
+
+struct psos_sem {
+	unsigned int magic;		/* Must be first. */
+	char name[XNOBJECT_NAME_LEN];
+	struct semobj smobj;
+	struct clusterobj cobj;
+};
+
+extern struct cluster psos_sem_table;
+
+#endif /* _PSOS_SEM_H */
diff --git a/kernel/xenomai-v3.2.4/lib/psos/task.c b/kernel/xenomai-v3.2.4/lib/psos/task.c
new file mode 100644
index 0000000..f678be6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/task.c
@@ -0,0 +1,763 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <memory.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <sched.h>
+#include "boilerplate/namegen.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/threadobj.h"
+#include "copperplate/syncobj.h"
+#include "copperplate/clockobj.h"
+#include "copperplate/cluster.h"
+#include "copperplate/internal.h"
+#include "psos/psos.h"
+#include "internal.h"
+#include "task.h"
+#include "tm.h"
+#include "queue.h"
+#include "rn.h"
+
+union psos_wait_union {
+	struct psos_queue_wait queue_wait;
+	struct psos_rn_wait rn_wait;
+};
+
+struct cluster psos_task_table;
+
+static DEFINE_NAME_GENERATOR(task_namegen, "task",
+			     struct psos_task, name);
+
+static struct psos_task *find_psos_task(u_long tid, int *err_r)
+{
+	struct psos_task *task = mainheap_deref(tid, struct psos_task);
+	unsigned int magic;
+
+	/*
+	 * Best-effort to validate a TCB pointer the cheap way,
+	 * without relying on any syscall.
+	 */
+	if (task == NULL || ((uintptr_t)task & (sizeof(uintptr_t)-1)) != 0)
+		goto objid_error;
+
+	magic = threadobj_get_magic(&task->thobj);
+
+	if (magic == task_magic)
+		return task;
+
+	if (magic == ~task_magic) {
+		*err_r = ERR_OBJDEL;
+		return NULL;
+	}
+
+	if ((magic >> 16) == 0x8181) {
+		*err_r = ERR_OBJTYPE;
+		return NULL;
+	}
+
+objid_error:
+	*err_r = ERR_OBJID;
+
+	return NULL;
+}
+
+static struct psos_task *find_psos_task_or_self(u_long tid, int *err_r)
+{
+	struct psos_task *current;
+
+	if (tid)
+		return find_psos_task(tid, err_r);
+
+	current = psos_task_current();
+	if (current == NULL) {
+		*err_r = ERR_SSFN;
+		return NULL;
+	}
+
+	return current;
+}
+
+struct psos_task *get_psos_task(u_long tid, int *err_r)
+{
+	struct psos_task *task = find_psos_task(tid, err_r);
+
+	/*
+	 * Grab the task lock, assuming that the task might have been
+	 * deleted, and/or maybe we have been lucky, and some random
+	 * opaque pointer might lead us to something which is laid in
+	 * valid memory but certainly not to a task object. Last
+	 * chance is pthread_mutex_lock() detecting a wrong mutex kind
+	 * and bailing out.
+	 *
+	 * XXX: threadobj_lock() disables cancellability for the
+	 * caller upon success, until the lock is dropped in
+	 * threadobj_unlock(), so there is no way it may vanish while
+	 * holding the lock. Therefore we need no cleanup handler
+	 * here.
+	 */
+	if (task == NULL || threadobj_lock(&task->thobj) == -EINVAL)
+		return NULL;
+
+	/* Check the magic word again, while we hold the lock. */
+	if (threadobj_get_magic(&task->thobj) != task_magic) {
+		threadobj_unlock(&task->thobj);
+		*err_r = ERR_OBJDEL;
+		return NULL;
+	}
+
+	return task;
+}
+
+struct psos_task *get_psos_task_or_self(u_long tid, int *err_r)
+{
+	struct psos_task *current;
+
+	if (tid)
+		return get_psos_task(tid, err_r);
+
+	current = psos_task_current();
+	if (current == NULL) {
+		*err_r = ERR_SSFN;
+		return NULL;
+	}
+
+	/* This one might block but can't fail, it is ours. */
+	threadobj_lock(&current->thobj);
+
+	return current;
+}
+
+void put_psos_task(struct psos_task *task)
+{
+	threadobj_unlock(&task->thobj);
+}
+
+static void task_finalizer(struct threadobj *thobj)
+{
+	struct psos_task *task = container_of(thobj, struct psos_task, thobj);
+	struct psos_tm *tm, *tmp;
+	struct syncstate syns;
+	int ret;
+
+	cluster_delobj(&psos_task_table, &task->cobj);
+
+	if (!pvlist_empty(&task->timer_list)) {
+		pvlist_for_each_entry_safe(tm, tmp, &task->timer_list, link)
+			tm_cancel((u_long)tm);
+	}
+
+	/* We have to hold a lock on a syncobj to destroy it. */
+	ret = __bt(syncobj_lock(&task->sobj, &syns));
+	if (ret == 0)
+		syncobj_destroy(&task->sobj, &syns);
+}
+
+static int task_prologue(void *arg)
+{
+	struct psos_task *task = arg;
+
+	return __bt(threadobj_prologue(&task->thobj, task->name));
+}
+
+static void *task_trampoline(void *arg)
+{
+	struct psos_task *task = arg;
+	struct psos_task_args *args = &task->args;
+	struct sched_param_ex param_ex;
+	struct service svc;
+
+	CANCEL_DEFER(svc);
+	threadobj_wait_start();
+	threadobj_lock(&task->thobj);
+
+	if (task->mode & T_TSLICE) {
+		param_ex.sched_priority = threadobj_get_priority(&task->thobj);
+		param_ex.sched_rr_quantum.tv_sec = psos_rrperiod.tv_sec;
+		param_ex.sched_rr_quantum.tv_nsec = psos_rrperiod.tv_nsec;
+		threadobj_set_schedparam(&task->thobj, SCHED_RR, &param_ex);
+	}
+
+	if (task->mode & T_NOPREEMPT)
+		__threadobj_lock_sched(&task->thobj);
+
+	threadobj_unlock(&task->thobj);
+	threadobj_notify_entry();
+	CANCEL_RESTORE(svc);
+
+	args->entry(args->arg0, args->arg1, args->arg2, args->arg3);
+
+	return NULL;
+}
+
+/*
+ * By default, pSOS priorities are mapped 1:1 to SCHED_FIFO
+ * levels. The available priority range is [1..256] over Cobalt when
+ * running in primary mode, and [1..99] over the regular kernel with
+ * the POSIX interface.
+ *
+ * NOTE: over Cobalt, a thread transitioning to secondary mode has its
+ * priority ceiled to 99 in the regular POSIX SCHED_FIFO class.
+ *
+ * The application code may override the routine doing the priority
+ * mapping from pSOS to SCHED_FIFO (normalize). Normalized priorities
+ * returned by this routine must be in the range [ 1
+ * .. threadobj_high_prio] inclusive.
+ */
+__weak int psos_task_normalize_priority(unsigned long psos_prio)
+{
+	if (psos_prio > threadobj_high_prio)
+		panic("current implementation restricts pSOS "
+		      "priority levels to range [1..%d]",
+		      threadobj_high_prio);
+
+	/* Map a pSOS priority level to a SCHED_FIFO one. */
+	return psos_prio;
+}
+
+/*
+ * Although default pSOS priorities are mapped 1:1 to SCHED_FIFO, we
+ * do still have to use a denormalize function because these calls are
+ * weak and application code may be override the call and implement
+ * the mapping differently.
+ */
+__weak unsigned long psos_task_denormalize_priority(int core_prio)
+{
+	/* Map a SCHED_FIFO priority level to a pSOS one. */
+	return core_prio;
+}
+
+static int check_task_priority(u_long psos_prio, int *core_prio)
+{
+	if (psos_prio < 1 || psos_prio > 255) /* In theory. */
+		return ERR_PRIOR;
+
+	*core_prio = psos_task_normalize_priority(psos_prio);
+
+	return SUCCESS;
+}
+
+static int psos_task_get_priority(struct psos_task *task)
+{
+	int prio = threadobj_get_priority(&task->thobj);
+	return psos_task_denormalize_priority(prio);
+}
+
+u_long t_create(const char *name, u_long prio,
+		u_long sstack, u_long ustack, u_long flags, u_long *tid_r)
+{
+	struct corethread_attributes cta;
+	struct threadobj_init_data idata;
+	struct psos_task *task;
+	struct service svc;
+	int ret, cprio = 1;
+	char short_name[5];
+
+	ret = check_task_priority(prio, &cprio);
+	if (ret)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	task = threadobj_alloc(struct psos_task,
+			       thobj, union psos_wait_union);
+	if (task == NULL) {
+		ret = ERR_NOTCB;
+		goto out;
+	}
+
+	ustack += sstack;
+
+	/*
+	 * Make sure we are granted a minimal amount of stack space
+	 * for common usage of the Glibc. If zero, we will pick a
+	 * value based on the implementation default for such minimum.
+	 */
+	if (ustack > 0 && ustack < 8192) {
+		threadobj_free(&task->thobj);
+		ret = ERR_TINYSTK;
+		goto out;
+	}
+
+	if (name == NULL || *name == '\0')
+		generate_name(task->name, name, &task_namegen);
+	else {
+		name = psos_trunc_name(short_name, name);
+		namecpy(task->name, name);
+	}
+
+	task->flags = flags;	/* We don't do much with those. */
+	task->mode = 0;	/* Not yet known. */
+	task->events = 0;
+	ret = syncobj_init(&task->sobj, CLOCK_COPPERPLATE, 0, fnref_null);
+	if (ret)
+		goto fail_syncinit;
+
+	memset(task->notepad, 0, sizeof(task->notepad));
+	pvlist_init(&task->timer_list);
+	*tid_r = mainheap_ref(task, u_long);
+
+	idata.magic = task_magic;
+	idata.finalizer = task_finalizer;
+	idata.policy = cprio ? SCHED_FIFO : SCHED_OTHER;
+	idata.param_ex.sched_priority = cprio;
+	ret = threadobj_init(&task->thobj, &idata);
+	if (ret)
+		goto fail_threadinit;
+
+	ret = __bt(cluster_addobj_dup(&psos_task_table, task->name, &task->cobj));
+	if (ret) {
+		warning("cannot register task: %s", task->name);
+		goto fail_register;
+	}
+
+	cta.policy = idata.policy;
+	cta.param_ex.sched_priority = cprio;
+	cta.prologue = task_prologue;
+	cta.run = task_trampoline;
+	cta.arg = task;
+	cta.stacksize = ustack;
+	cta.detachstate = PTHREAD_CREATE_DETACHED;
+
+	ret = __bt(copperplate_create_thread(&cta, &task->thobj.ptid));
+	if (ret) {
+		cluster_delobj(&psos_task_table, &task->cobj);
+	fail_register:
+		threadobj_uninit(&task->thobj);
+	fail_threadinit:
+		syncobj_uninit(&task->sobj);
+	fail_syncinit:
+		ret = ERR_NOTCB;
+		threadobj_free(&task->thobj);
+	}
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long t_start(u_long tid,
+	       u_long mode,
+	       void (*entry)(u_long, u_long, u_long, u_long),
+	       u_long args[])
+{
+	struct psos_task *task;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	task = get_psos_task(tid, &ret);
+	if (task == NULL)
+		goto out;
+
+	task->args.entry = entry;
+	if (args) {
+		task->args.arg0 = args[0];
+		task->args.arg1 = args[1];
+		task->args.arg2 = args[2];
+		task->args.arg3 = args[3];
+	} else {
+		task->args.arg0 = 0;
+		task->args.arg1 = 0;
+		task->args.arg2 = 0;
+		task->args.arg3 = 0;
+	}
+	task->mode = mode;
+	ret = threadobj_start(&task->thobj);
+	switch (ret) {
+	case -EIDRM:
+		ret = SUCCESS;
+		break;
+	default:
+		ret = ERR_OBJDEL;
+	case 0:	/* == SUCCESS */
+		put_psos_task(task);
+	}
+ out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long t_suspend(u_long tid)
+{
+	struct psos_task *task;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	task = get_psos_task_or_self(tid, &ret);
+	if (task == NULL)
+		goto out;
+
+	ret = threadobj_suspend(&task->thobj);
+	if (ret)
+		ret = ERR_OBJDEL;
+
+	put_psos_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long t_resume(u_long tid)
+{
+	struct psos_task *task;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	task = get_psos_task(tid, &ret);
+	if (task == NULL)
+		goto out;
+
+	ret = threadobj_resume(&task->thobj);
+	if (ret)
+		ret = ERR_OBJDEL;
+
+	put_psos_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long t_setpri(u_long tid, u_long newprio, u_long *oldprio_r)
+{
+	int policy, ret = SUCCESS, cprio = 1;
+	struct sched_param_ex param_ex;
+	struct psos_task *task;
+	struct service svc;
+
+	CANCEL_DEFER(svc);
+
+	task = get_psos_task_or_self(tid, &ret);
+	if (task == NULL)
+		goto out;
+
+	*oldprio_r = psos_task_get_priority(task);
+
+	if (newprio == 0) /* Only inquires for the task priority. */
+		goto done;
+
+	ret = check_task_priority(newprio, &cprio);
+	if (ret) {
+		ret = ERR_SETPRI;
+		goto done;
+	}
+
+	policy = cprio ? SCHED_FIFO : SCHED_OTHER;
+	param_ex.sched_priority = cprio;
+	ret = threadobj_set_schedparam(&task->thobj, policy, &param_ex);
+	switch (ret) {
+	case -EIDRM:
+		ret = SUCCESS;
+		goto out;
+	default:
+		ret = ERR_OBJDEL;
+	case 0:	/* == SUCCESS */
+		break;
+	}
+done:
+	put_psos_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long t_delete(u_long tid)
+{
+	struct psos_task *task;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	task = get_psos_task_or_self(tid, &ret);
+	if (task == NULL)
+		goto out;
+
+	ret = threadobj_cancel(&task->thobj);
+	if (ret)
+		ret = ERR_OBJDEL;
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long t_ident(const char *name, u_long node, u_long *tid_r)
+{
+	struct clusterobj *cobj;
+	struct psos_task *task;
+	struct service svc;
+	char short_name[5];
+	int ret = SUCCESS;
+
+	if (node)
+		return ERR_NODENO;
+
+	CANCEL_DEFER(svc);
+
+	if (name == NULL) {
+		task = find_psos_task_or_self(0, &ret);
+		if (task == NULL)
+			goto out;
+	} else {
+		name = psos_trunc_name(short_name, name);
+		cobj = cluster_findobj(&psos_task_table, name);
+		if (cobj == NULL) {
+			ret = ERR_OBJNF;
+			goto out;
+		}
+		task = container_of(cobj, struct psos_task, cobj);
+		/*
+		 * Last attempt to check whether the task is valid, in
+		 * case it is pending deletion.
+		 */
+		if (threadobj_get_magic(&task->thobj) != task_magic) {
+			ret = ERR_OBJNF;
+			goto out;
+		}
+	}
+
+	*tid_r = mainheap_ref(task, u_long);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long t_getreg(u_long tid, u_long regnum, u_long *regvalue_r)
+{
+	struct psos_task *task;
+	struct service svc;
+	int ret = SUCCESS;
+
+	if (regnum >= PSOSTASK_NR_REGS)
+		return ERR_REGNUM;
+
+	CANCEL_DEFER(svc);
+
+	task = get_psos_task_or_self(tid, &ret);
+	if (task == NULL)
+		goto out;
+
+	*regvalue_r = task->notepad[regnum];
+	put_psos_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long t_setreg(u_long tid, u_long regnum, u_long regvalue)
+{
+	struct psos_task *task;
+	struct service svc;
+	int ret = SUCCESS;
+
+	if (regnum >= PSOSTASK_NR_REGS)
+		return ERR_REGNUM;
+
+	CANCEL_DEFER(svc);
+
+	task = get_psos_task_or_self(tid, &ret);
+	if (task == NULL)
+		goto out;
+
+	task->notepad[regnum] = regvalue;
+	put_psos_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long t_mode(u_long mask, u_long newmask, u_long *oldmode_r)
+{
+	struct sched_param_ex param_ex;
+	int policy, ret = SUCCESS;
+	struct psos_task *task;
+	struct service svc;
+
+	CANCEL_DEFER(svc);
+
+	task = get_psos_task_or_self(0, &ret);
+	if (task == NULL)
+		goto out;
+
+	*oldmode_r = task->mode;
+
+	if (mask == 0)
+		goto done;
+
+	task->mode &= ~mask;
+	task->mode |= (newmask & mask);
+
+	if (task->mode & T_NOPREEMPT)
+		__threadobj_lock_sched_once(&task->thobj);
+	else if (*oldmode_r & T_NOPREEMPT)
+		__threadobj_unlock_sched(&task->thobj);
+
+	param_ex.sched_priority = threadobj_get_priority(&task->thobj);
+
+	if (((task->mode ^ *oldmode_r) & T_TSLICE) == 0)
+		goto done;	/* rr status not changed. */
+
+	if (task->mode & T_TSLICE) {
+		policy = SCHED_RR;
+		param_ex.sched_rr_quantum.tv_sec = psos_rrperiod.tv_sec;
+		param_ex.sched_rr_quantum.tv_nsec = psos_rrperiod.tv_nsec;
+	} else {
+		policy = param_ex.sched_priority ? SCHED_FIFO : SCHED_OTHER;
+	}
+
+	/* Working on self, so -EIDRM can't happen. */
+	threadobj_set_schedparam(&task->thobj, policy, &param_ex);
+done:
+	put_psos_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+static int collect_events(struct psos_task *task,
+			  u_long flags, u_long events, u_long *events_r)
+{
+	if (((flags & EV_ANY) && (events & task->events) != 0) ||
+	    (!(flags & EV_ANY) && ((events & task->events) == events))) {
+		/*
+		 * The condition is satisfied; update the return value
+		 * with the set of matched events, and clear the
+		 * collected events from the task's mask.
+		 */
+		*events_r = (task->events & events);
+		task->events &= ~events;
+		return 1;
+	}
+
+	return 0;
+}
+
+u_long ev_receive(u_long events, u_long flags,
+		  u_long timeout, u_long *events_r)
+{
+	struct timespec ts, *timespec;
+	struct psos_task *current;
+	struct syncstate syns;
+	struct service svc;
+	int ret;
+
+	current = find_psos_task_or_self(0, &ret);
+	if (current == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+
+	ret = syncobj_lock(&current->sobj, &syns);
+	if (ret) {
+		ret = ERR_OBJDEL;
+		goto out;
+	}
+
+	if (events == 0) {
+		*events_r = current->events; /* Only polling events. */
+		goto done;
+	}
+
+	if (collect_events(current, flags, events, events_r))
+		goto done;
+
+	if (flags & EV_NOWAIT) {
+		ret = ERR_NOEVS;
+		goto done;
+	}
+
+	if (timeout != 0) {
+		timespec = &ts;
+		clockobj_ticks_to_timeout(&psos_clock, timeout, timespec);
+	} else
+		timespec = NULL;
+
+	for (;;) {
+		ret = syncobj_wait_grant(&current->sobj, timespec, &syns);
+		if (ret == -ETIMEDOUT) {
+			ret = ERR_TIMEOUT;
+			break;
+		}
+		if (collect_events(current, flags, events, events_r))
+			break;
+	}
+done:
+	syncobj_unlock(&current->sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+int __ev_send(struct psos_task *task, u_long events)
+{
+	struct syncstate syns;
+	int ret;
+
+	ret = syncobj_lock(&task->sobj, &syns);
+	if (ret)
+		return ERR_OBJDEL;
+
+	task->events |= events;
+	/*
+	 * If the task is pending in ev_receive(), it's likely that we
+	 * are posting events the task is waiting for, so we can wake
+	 * it up immediately and let it confirm whether the condition
+	 * is now satisfied.
+	 */
+	syncobj_grant_one(&task->sobj);
+
+	syncobj_unlock(&task->sobj, &syns);
+
+	return 0;
+}
+
+u_long ev_send(u_long tid, u_long events)
+{
+	struct psos_task *task;
+	struct service svc;
+	int ret = SUCCESS;
+
+	task = find_psos_task_or_self(tid, &ret);
+	if (task == NULL)
+		return ret;
+
+	CANCEL_DEFER(svc);
+	ret = __ev_send(task, events);
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/task.h b/kernel/xenomai-v3.2.4/lib/psos/task.h
new file mode 100644
index 0000000..43173a6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/task.h
@@ -0,0 +1,76 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _PSOS_TASK_H
+#define _PSOS_TASK_H
+
+#include <boilerplate/hash.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/syncobj.h>
+#include <copperplate/cluster.h>
+
+struct psos_task_args {
+	void (*entry)(u_long a0, u_long a1, u_long a2, u_long a3);
+	u_long arg0;
+	u_long arg1;
+	u_long arg2;
+	u_long arg3;
+};
+
+#define PSOSTASK_NR_REGS  16
+
+struct psos_task {
+
+	int flags;
+	int mode;
+	u_long events;
+	u_long notepad[PSOSTASK_NR_REGS];
+	struct pvlistobj timer_list; /* Private. Never accessed remotely. */
+
+	char name[XNOBJECT_NAME_LEN];
+	struct psos_task_args args;
+
+	struct threadobj thobj;
+	struct syncobj sobj;	/* For events. */
+	struct clusterobj cobj;
+};
+
+#define task_magic	0x8181fafa
+
+static inline struct psos_task *psos_task_current(void)
+{
+	struct threadobj *thobj = threadobj_current();
+
+	if (thobj == NULL ||
+	    threadobj_get_magic(thobj) != task_magic)
+		return NULL;
+
+	return container_of(thobj, struct psos_task, thobj);
+}
+
+struct psos_task *get_psos_task(u_long tid, int *err_r);
+
+struct psos_task *get_psos_task_or_self(u_long tid, int *err_r);
+
+void put_psos_task(struct psos_task *task);
+
+int __ev_send(struct psos_task *task, unsigned long events);
+
+extern struct cluster psos_task_table;
+
+#endif /* _PSOS_TASK_H */
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/Makefile b/kernel/xenomai-v3.2.4/lib/psos/testsuite/Makefile
new file mode 100644
index 0000000..3330a61
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/Makefile
@@ -0,0 +1,49 @@
+ifeq ($(DESTDIR),)
+XENO_CONFIG=xeno-config
+else
+XENO_CONFIG=$(DESTDIR)/bin/xeno-config
+endif
+
+prefix := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --prefix)
+solibs := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --libdir)
+
+ifeq ($(prefix),)
+$(error Please add <xenomai-install-path>/bin to your PATH variable or specify DESTDIR)
+endif
+
+TESTS := \
+	task-1 task-2 task-3 task-4 task-5 task-6 task-7 task-8 task-9 \
+	tm-1 tm-2 tm-3 tm-4 tm-5 tm-6 tm-7 \
+	mq-1 mq-2 mq-3 \
+	sem-1 sem-2 \
+	pt-1 \
+	rn-1
+
+CFLAGS := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --skin=psos --cflags) -g
+LDFLAGS := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --skin=psos --ldflags)
+CC = $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --cc)
+
+all: $(TESTS)
+
+%: %.c
+	$(CC) -o $@ $< $(CFLAGS) $(LDFLAGS)
+
+install: all
+	install -d $(prefix)/testsuite/psos
+	install -t $(prefix)/testsuite/psos $(TESTS)
+
+clean:
+	$(RM) $(TESTS) *~
+
+# Run the test suite. We pin all tests to CPU #0, so that SMP does not
+# alter the execution sequence we expect from them.
+test: all
+	@for t in $(TESTS); do \
+		echo -n $$t...; \
+		sudo LD_LIBRARY_PATH=$(solibs) $(VALGRIND) ./$$t --cpu-affinity=0 --silent && echo ok || echo BAD; \
+	done
+
+test/%: %
+	sudo LD_LIBRARY_PATH=$(solibs) $(VALGRIND) ./$(@F) --cpu-affinity=0 --silent && echo ok || echo BAD
+
+.PHONY: clean test
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-1.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-1.c
new file mode 100644
index 0000000..00f4248
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-1.c
@@ -0,0 +1,61 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static u_long tid, qid;
+
+static void root_task(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long msgbuf[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_assert(&trobj, a0 == 1);
+	traceobj_assert(&trobj, a1 == 2);
+	traceobj_assert(&trobj, a2 == 3);
+	traceobj_assert(&trobj, a3 == 4);
+
+	ret = q_send(qid, msgbuf);
+	traceobj_assert(&trobj, ret == SUCCESS);
+	msgbuf[0]++;
+	ret = q_send(qid, msgbuf);
+	traceobj_assert(&trobj, ret == SUCCESS);
+	msgbuf[0]++;
+	ret = q_send(qid, msgbuf);
+	traceobj_assert(&trobj, ret == SUCCESS);
+	ret = q_send(qid, msgbuf);
+	traceobj_assert(&trobj, ret == ERR_QFULL);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 }, _qid;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = q_create("QUEUE",	3, Q_LIMIT, &qid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = q_ident("QUEUE", 0, &_qid);
+	traceobj_assert(&trobj, ret == SUCCESS && _qid == qid);
+
+	ret = t_create("root", 1, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, root_task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = q_delete(qid);
+	traceobj_assert(&trobj, ret == ERR_MATQDEL);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-2.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-2.c
new file mode 100644
index 0000000..48b4e61
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-2.c
@@ -0,0 +1,123 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 2, 7, 8, 9, 10, 11, 3, 4, 5, 6
+};
+
+static u_long tidA, tidB, qid;
+
+static void task_A(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long msgbuf[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_assert(&trobj, a0 == 1);
+	traceobj_assert(&trobj, a1 == 2);
+	traceobj_assert(&trobj, a2 == 3);
+	traceobj_assert(&trobj, a3 == 4);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = q_send(qid, msgbuf);
+	traceobj_assert(&trobj, ret == ERR_VARQ);
+	traceobj_mark(&trobj, 8);
+
+	ret = q_vsend(qid, msgbuf, sizeof(u_long[4]));
+	traceobj_assert(&trobj, ret == SUCCESS);
+	traceobj_mark(&trobj, 9);
+
+	msgbuf[0]++;
+	ret = q_vsend(qid, msgbuf, sizeof(u_long[4]));
+	traceobj_assert(&trobj, ret == SUCCESS);
+	traceobj_mark(&trobj, 10);
+
+	msgbuf[0]++;
+	ret = q_vsend(qid, msgbuf, sizeof(u_long[4]));
+	traceobj_assert(&trobj, ret == SUCCESS);
+	traceobj_mark(&trobj, 11);
+
+	traceobj_exit(&trobj);
+}
+
+static void task_B(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long _msgbuf[8], msglen;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_assert(&trobj, a0 == 1);
+	traceobj_assert(&trobj, a1 == 2);
+	traceobj_assert(&trobj, a2 == 3);
+	traceobj_assert(&trobj, a3 == 4);
+
+	traceobj_mark(&trobj, 1);
+
+	msglen = 0;
+	ret = q_vreceive(qid, Q_NOWAIT, 0, _msgbuf, sizeof(u_long[8]), &msglen);
+	traceobj_assert(&trobj, ret == ERR_NOMSG);
+	traceobj_mark(&trobj, 2);
+
+	ret = q_vreceive(qid, Q_WAIT, 0, _msgbuf, sizeof(u_long[8]), &msglen);
+	traceobj_assert(&trobj, ret == SUCCESS && msglen == sizeof(u_long[4]));
+	traceobj_assert(&trobj, _msgbuf[0] == 1);
+	traceobj_mark(&trobj, 3);
+
+	ret = q_vreceive(qid, Q_WAIT, 0, _msgbuf, sizeof(u_long[8]), &msglen);
+	traceobj_assert(&trobj, ret == SUCCESS && msglen == sizeof(u_long[4]));
+	traceobj_assert(&trobj, _msgbuf[0] == 2);
+	traceobj_mark(&trobj, 4);
+
+	ret = q_vreceive(qid, Q_WAIT, 10, _msgbuf, sizeof(u_long[8]), &msglen);
+	traceobj_assert(&trobj, ret == SUCCESS && msglen == sizeof(u_long[4]));
+	traceobj_assert(&trobj, _msgbuf[0] == 3);
+	traceobj_mark(&trobj, 5);
+
+	ret = q_vreceive(qid, Q_WAIT, 10, _msgbuf, sizeof(u_long[8]), &msglen);
+	traceobj_assert(&trobj, ret == ERR_TIMEOUT);
+	traceobj_mark(&trobj, 6);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = q_vcreate("VQUEUE", Q_LIMIT, 3, sizeof(u_long[4]), &qid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_create("TSKA", 21, 0, 0, 0, &tidA);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_create("TSKB", 20, 0, 0, 0, &tidB);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tidB, 0, task_B, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tidA, 0, task_A, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	ret = q_delete(qid);
+	traceobj_assert(&trobj, ret == ERR_VARQ);
+
+	ret = q_vdelete(qid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-3.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-3.c
new file mode 100644
index 0000000..942b1a3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/mq-3.c
@@ -0,0 +1,115 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 4, 7, 2, 5, 7, 2, 5, 7, 2, 3, 5, 6, 8
+};
+
+static u_long tidA, tidB, qid;
+
+static void task_A(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long msgbuf[4];
+	int ret, n;
+
+	traceobj_enter(&trobj);
+
+	traceobj_assert(&trobj, a0 == 1);
+	traceobj_assert(&trobj, a1 == 2);
+	traceobj_assert(&trobj, a2 == 3);
+	traceobj_assert(&trobj, a3 == 4);
+
+	traceobj_mark(&trobj, 1);
+
+	for (n = 0; n < 3; n++) {
+		ret = q_receive(qid, Q_WAIT, 10, msgbuf);
+		traceobj_mark(&trobj, 2);
+		traceobj_assert(&trobj, ret == SUCCESS);
+		traceobj_assert(&trobj, msgbuf[0] == n + 1);
+		traceobj_assert(&trobj, msgbuf[1] == n + 2);
+		traceobj_assert(&trobj, msgbuf[2] == n + 3);
+		traceobj_assert(&trobj, msgbuf[3] == n + 4);
+	}
+
+	traceobj_mark(&trobj, 3);
+
+	traceobj_exit(&trobj);
+}
+
+static void task_B(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long msgbuf[4];
+	int ret, n;
+
+	traceobj_enter(&trobj);
+
+	traceobj_assert(&trobj, a0 == 1);
+	traceobj_assert(&trobj, a1 == 2);
+	traceobj_assert(&trobj, a2 == 3);
+	traceobj_assert(&trobj, a3 == 4);
+
+	traceobj_mark(&trobj, 4);
+
+	for (n = 0; n < 3; n++) {
+		ret = q_receive(qid, Q_WAIT, 10, msgbuf);
+		traceobj_mark(&trobj, 5);
+		traceobj_assert(&trobj, ret == SUCCESS);
+		traceobj_assert(&trobj, msgbuf[0] == n + 1);
+		traceobj_assert(&trobj, msgbuf[1] == n + 2);
+		traceobj_assert(&trobj, msgbuf[2] == n + 3);
+		traceobj_assert(&trobj, msgbuf[3] == n + 4);
+	}
+
+	traceobj_mark(&trobj, 6);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 }, msgbuf[4], count;
+	int ret, n;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = q_create("QUEUE", Q_NOLIMIT, 0, &qid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_create("TSKA", 21, 0, 0, 0, &tidA);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_create("TSKB", 20, 0, 0, 0, &tidB);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tidA, 0, task_A, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tidB, 0, task_B, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	for (n = 0; n < 3; n++) {
+		msgbuf[0] = n + 1;
+		msgbuf[1] = n + 2;
+		msgbuf[2] = n + 3;
+		msgbuf[3] = n + 4;
+		count = 0;
+		traceobj_mark(&trobj, 7);
+		ret = q_broadcast(qid, msgbuf, &count);
+		traceobj_assert(&trobj, ret == SUCCESS && count == 2);
+	}
+
+	traceobj_mark(&trobj, 8);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	ret = q_delete(qid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/pt-1.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/pt-1.c
new file mode 100644
index 0000000..99f93ca
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/pt-1.c
@@ -0,0 +1,50 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static char pt_mem[65536];
+
+int main(int argc, char *const argv[])
+{
+	u_long nbufs, ptid, _ptid, n;
+	void *buf, *lbuf;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = pt_create("PART", pt_mem, NULL, sizeof(pt_mem), 16, PT_NODEL, &ptid, &nbufs);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	for (n = 0, lbuf = NULL;; n++, lbuf = buf) {
+		ret = pt_getbuf(ptid, &buf);
+		if (ret) {
+			traceobj_assert(&trobj, ret == ERR_NOBUF);
+			break;
+		}
+		if (lbuf)
+			traceobj_assert(&trobj, (caddr_t)lbuf + 16 == (caddr_t)buf);
+		memset(buf, 0xaa, 16);
+	}
+
+	traceobj_assert(&trobj, nbufs == n);
+
+	ret = pt_delete(ptid);
+	traceobj_assert(&trobj, ret == ERR_BUFINUSE);
+
+	for (buf = lbuf; n > 0; n--, buf = (caddr_t)buf - 16) {
+		ret = pt_retbuf(ptid, buf);
+		traceobj_assert(&trobj, ret == SUCCESS);
+	}
+
+	ret = pt_ident("PART", 0, &_ptid);
+	traceobj_assert(&trobj, ret == SUCCESS && _ptid == ptid);
+
+	ret = pt_delete(ptid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/rn-1.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/rn-1.c
new file mode 100644
index 0000000..da136f3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/rn-1.c
@@ -0,0 +1,63 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static char rn_mem[65536];
+
+static u_long tid, rnid;
+
+static void alloc_task(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	u_long size, alloc_size = 0;
+	int ret, n;
+	void *buf;
+
+	traceobj_enter(&trobj);
+
+	srandom(0x11223344);
+
+	for (n = 0;; n++) {
+		size = (random() % (sizeof(rn_mem) / 8)) + 4;
+		ret = rn_getseg(rnid, size, RN_NOWAIT, 0, &buf);
+		if (ret) {
+			traceobj_assert(&trobj, ret == ERR_NOSEG);
+			break;
+		}
+		memset(buf, 0xaa, size);
+		alloc_size += size;
+	}
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 }, asize, _rnid;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = rn_create("REGION", rn_mem, sizeof(rn_mem),
+			32, RN_FIFO|RN_NODEL, &rnid, &asize);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_create("TASK", 20, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, alloc_task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	ret = rn_ident("REGION", &_rnid);
+	traceobj_assert(&trobj, ret == SUCCESS && _rnid == rnid);
+
+	ret = rn_delete(rnid);
+	traceobj_assert(&trobj, ret == ERR_SEGINUSE);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-1.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-1.c
new file mode 100644
index 0000000..61fa4b6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-1.c
@@ -0,0 +1,149 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	10, 13,
+	1, 14, 15, 2, 3, 4,
+	5, 6, 7, 8, 16, 17, 18,
+	9, 19
+};
+
+static u_long tidA, tidB, sem_id;
+
+static void task_A(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long tid, oldmode;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = sm_p(sem_id, SM_WAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = t_mode(T_NOPREEMPT, T_NOPREEMPT, &oldmode);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = t_ident("TSKB", 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS && tid == tidB);
+
+	traceobj_mark(&trobj, 5);
+
+	ret = t_resume(tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = t_mode(T_NOPREEMPT, 0, &oldmode);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = t_suspend(0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_exit(&trobj);
+}
+
+static void task_B(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long tid;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = sm_create("SEM", 0, SM_FIFO, &sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 13);
+
+	ret = sm_p(sem_id, SM_WAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_ident("TSKA", 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS && tid == tidA);
+
+	traceobj_mark(&trobj, 14);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 15);
+
+	ret = t_suspend(0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 16);
+
+	ret = sm_p(sem_id, SM_WAIT, 10);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 17);
+
+	ret = sm_p(sem_id, SM_NOWAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 18);
+
+	ret = sm_p(sem_id, SM_WAIT, 100);
+	traceobj_assert(&trobj, ret == ERR_TIMEOUT);
+
+	traceobj_mark(&trobj, 19);
+
+	ret = t_resume(tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = t_create("TSKA", 20, 0, 0, 0, &tidA);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_create("TSKB", 21, 0, 0, 0, &tidB);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tidB, 0, task_B, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tidA, 0, task_A, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-2.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-2.c
new file mode 100644
index 0000000..a15bc38
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/sem-2.c
@@ -0,0 +1,68 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 2, 3, 5, 4, 6
+};
+
+static u_long tidA, sem_id;
+
+static void task_A(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = sm_create("SEM", 1, SM_FIFO, &sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = sm_p(sem_id, SM_NOWAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = sm_p(sem_id, SM_NOWAIT, 0);
+	traceobj_assert(&trobj, ret == ERR_NOSEM);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = sm_p(sem_id, SM_WAIT, 0);
+	traceobj_assert(&trobj, ret == ERR_SKILLD);
+
+	traceobj_mark(&trobj, 4);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = t_create("TSKA", 20, 0, 0, 0, &tidA);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tidA, 0, task_A, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 5);
+
+	ret = sm_delete(sem_id);
+	traceobj_assert(&trobj, ret == ERR_TATSDEL);
+
+	traceobj_mark(&trobj, 6);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-1.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-1.c
new file mode 100644
index 0000000..f4bb71b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-1.c
@@ -0,0 +1,38 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static u_long tid;
+
+static void root_task(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	traceobj_enter(&trobj);
+
+	traceobj_assert(&trobj, a0 == 1);
+	traceobj_assert(&trobj, a1 == 2);
+	traceobj_assert(&trobj, a2 == 3);
+	traceobj_assert(&trobj, a3 == 4);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = t_create("root", 1, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, root_task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-2.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-2.c
new file mode 100644
index 0000000..d7f9fa0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-2.c
@@ -0,0 +1,104 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	8, 1, 9, 4, 10, 5, 11, 2, 6, 7
+};
+
+static u_long btid, ftid;
+
+static u_long sem_id;
+
+static void backgroundTask(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	unsigned int safety = 100000000, count = 0;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = sm_p(sem_id, SM_WAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	while (--safety > 0)
+		count++;
+
+	traceobj_mark(&trobj, 3);
+
+	traceobj_exit(&trobj);
+}
+
+static void foregroundTask(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = sm_p(sem_id, SM_WAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 5);
+
+	tm_wkafter(2);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = t_delete(btid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 7);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = sm_create("SEMA", 0, SM_PRIOR, &sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = t_create("BGND", 20, 0, 0, 0, &btid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(btid, 0, backgroundTask, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = t_create("FGND", 21, 0, 0, 0, &ftid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(ftid, 0, foregroundTask, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 11);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-3.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-3.c
new file mode 100644
index 0000000..d775f28
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-3.c
@@ -0,0 +1,42 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static u_long tidA, tidB;
+
+int main(int argc, char *const argv[])
+{
+	u_long tid;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = t_create("TSKA", 20, 0, 0, 0, &tidA);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_create("TSKB", 21, 0, 0, 0, &tidB);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	tid = ~tidA;
+	ret = t_ident("TSKA", 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+	traceobj_assert(&trobj, tid == tidA);
+
+	tid = ~tidB;
+	ret = t_ident("TSKB", 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+	traceobj_assert(&trobj, tid == tidB);
+
+	ret = t_delete(tidA);
+	traceobj_assert(&trobj, ret == SUCCESS);
+	ret = t_ident("TSKA", 0, &tid);
+	traceobj_assert(&trobj, ret == ERR_OBJNF);
+
+	ret = t_ident("TSKB", 1, &tid);
+	traceobj_assert(&trobj, ret == ERR_NODENO);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-4.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-4.c
new file mode 100644
index 0000000..3bca002
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-4.c
@@ -0,0 +1,69 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 2, 3
+};
+
+static u_long tid;
+
+static void task(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	u_long regval;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	regval = ~0;
+	ret = t_getreg(0, 0, &regval);
+	traceobj_assert(&trobj, ret == SUCCESS && regval == 0);
+
+	ret = t_setreg(0, 0, 0xdeadbeef);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_setreg(0, 1024, 0);
+	traceobj_assert(&trobj, ret == ERR_REGNUM);
+
+	regval = 0;
+	ret = t_getreg(tid, 0, &regval);
+	traceobj_assert(&trobj, ret == SUCCESS && regval == 0xdeadbeef);
+
+	regval = 0;
+	ret = t_getreg(0, 0, &regval);
+	traceobj_assert(&trobj, ret == SUCCESS && regval == 0xdeadbeef);
+
+	ret = t_getreg(tid, 1024, &regval);
+	traceobj_assert(&trobj, ret == ERR_REGNUM);
+
+	traceobj_mark(&trobj, 2);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = t_create("TASK", 20, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 3);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-5.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-5.c
new file mode 100644
index 0000000..e0124c1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-5.c
@@ -0,0 +1,103 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	8, 1, 9, 4, 10, 2, 11, 12, 3, 5, 13
+};
+
+static u_long btid, ftid;
+
+static u_long sem_id;
+
+static void backgroundTask(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = sm_p(sem_id, SM_WAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = t_suspend(ftid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	tm_wkafter(2);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = t_resume(ftid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 13);
+
+	traceobj_exit(&trobj);
+}
+
+static void foregroundTask(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = sm_p(sem_id, SM_WAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 5);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = sm_create("SEMA", 0, SM_FIFO, &sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = t_create("BGND", 20, 0, 0, 0, &btid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(btid, 0, backgroundTask, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = t_create("FGND", 21, 0, 0, 0, &ftid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(ftid, 0, foregroundTask, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 11);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 12);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-6.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-6.c
new file mode 100644
index 0000000..13c618c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-6.c
@@ -0,0 +1,107 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	9, 1, 10, 3, 11, 4, 5, 6, 7, 2, 8, 12
+};
+
+static u_long btid, ftid;
+
+static u_long sem_id;
+
+static void backgroundTask(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = sm_p(sem_id, SM_WAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	traceobj_exit(&trobj);
+}
+
+static void foregroundTask(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	u_long myprio, oldprio;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = sm_p(sem_id, SM_WAIT, 0);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 5);
+
+	ret = t_setpri(0, 0, &myprio);
+	traceobj_assert(&trobj, ret == SUCCESS && myprio == 21);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = t_setpri(btid, myprio, &oldprio);
+	traceobj_assert(&trobj, ret == SUCCESS && oldprio == 20);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = t_setpri(btid, myprio + 1, &oldprio);
+	traceobj_assert(&trobj, ret == SUCCESS && oldprio == myprio);
+
+	traceobj_mark(&trobj, 8);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = sm_create("SEMA", 0, SM_PRIOR, &sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = t_create("BGND", 20, 0, 0, 0, &btid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(btid, 0, backgroundTask, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = t_create("FGND", 21, 0, 0, 0, &ftid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(ftid, 0, foregroundTask, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 11);
+
+	ret = sm_v(sem_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 12);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-7.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-7.c
new file mode 100644
index 0000000..8f5441a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-7.c
@@ -0,0 +1,64 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 10, 2, 3, 20, 4
+};
+
+static u_long tid1, tid2;
+
+static void task1(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 10);
+
+	traceobj_exit(&trobj);
+}
+
+static void task2(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 20);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = t_create("DUP", 20, 0, 0, 0, &tid1);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = t_start(tid1, 0, task1, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = t_create("DUP", 20, 0, 0, 0, &tid2);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = t_start(tid2, 0, task2, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 4);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-8.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-8.c
new file mode 100644
index 0000000..d7c8fb3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-8.c
@@ -0,0 +1,108 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <xeno_config.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 2, 3, 4, 5, 6, 7
+};
+
+static u_long tid1, tid2;
+
+static u_long count1, count2;
+
+static void do_work(u_long *counter, int mark)
+{
+	traceobj_mark(&trobj, mark);
+	tm_wkafter(2);
+
+	for (;;)
+		(*counter)++;
+}
+
+static void task1(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	traceobj_enter(&trobj);
+	do_work(&count1, 4);
+	traceobj_exit(&trobj);
+}
+
+static void task2(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	traceobj_enter(&trobj);
+	do_work(&count2, 5);
+	traceobj_exit(&trobj);
+}
+
+static void main_task(u_long a1, u_long a2, u_long a3, u_long a4)
+{
+	u_long args[] = { 1, 2, 3, 4 }, old;
+	int ret;
+
+	traceobj_mark(&trobj, 1);
+
+	ret = t_create("T1", 10, 0, 0, 0, &tid1);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_create("T2", 10, 0, 0, 0, &tid2);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = t_start(tid1, T_TSLICE, task1, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid2, T_TSLICE, task2, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = t_mode(T_NOPREEMPT, 0, &old);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	tm_wkafter(2000);
+
+	traceobj_mark(&trobj, 6);
+
+	t_delete(tid1);
+	t_delete(tid2);
+	traceobj_mark(&trobj, 7);
+}
+
+#ifdef CONFIG_XENO_DEBUG_FULL
+#define threshold_quantum 50
+#else
+#define threshold_quantum 1000
+#endif
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 }, tid, delta, max;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	ret = t_create("MAIN", 50, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, T_NOPREEMPT, main_task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	if (count1 < count2) {
+		delta = count2 - count1;
+		max = count2;
+	} else {
+		delta = count1 - count2;
+		max = count1;
+	}
+	traceobj_assert(&trobj, delta < max / threshold_quantum);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-9.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-9.c
new file mode 100644
index 0000000..1ae9929
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/task-9.c
@@ -0,0 +1,85 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+#define MAX_PRIO 95
+
+static struct traceobj trobj;
+
+static void test_task(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	traceobj_enter(&trobj);
+	tm_wkafter(1000000);
+	traceobj_exit(&trobj);
+}
+
+static void root_task(u_long increment, u_long a1, u_long a2, u_long a3)
+{
+	u_long args[] = { 1, 2, 3, 4 }, ret, tid;
+	int n;
+
+	traceobj_enter(&trobj);
+
+	for (n = 0; n < 512; n++) {
+		if(increment)
+			ret = t_create ("TEST", (n % MAX_PRIO) + 2,100000, 0, 0, &tid);
+		else
+			ret = t_create ("TEST", MAX_PRIO - (n % MAX_PRIO) + 1,100000, 0, 0, &tid);
+		traceobj_assert(&trobj, ret == SUCCESS);
+		ret = t_start(tid, T_PREEMPT, test_task, args);
+		traceobj_assert(&trobj, ret == SUCCESS);
+		ret = t_delete(tid);
+		traceobj_assert(&trobj, ret == SUCCESS);
+	}
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 }, ret, tid;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	// Low priority root task, loop incr. priority test tasks
+	ret = t_create("root", 3, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, root_task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	// Low priority root task, loop decr. priority test tasks
+	args[0] = 0;
+	ret = t_create("root", 3, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, root_task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	// High priority root task, loop incr. priority test tasks
+	args[0] = 1;
+	ret = t_create("root", 90, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, root_task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	// High priority root task, loop decr. priority test tasks 
+	args[0] = 0;
+	ret = t_create("root", 90, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, root_task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+	
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-1.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-1.c
new file mode 100644
index 0000000..b2f38f2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-1.c
@@ -0,0 +1,34 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+#define TEST_DATE  ((2008 << 16)|(4 << 8)|25) /* 4/25/2008 */
+#define TEST_TIME  ((11 << 16)|(17 << 8)|30)  /* 11:17:30 */
+#define TEST_TICKS 10
+
+static struct traceobj trobj;
+
+int main(int argc, char *const argv[])
+{
+	unsigned long date, time, ticks;
+	int ret, tries = 0;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	for (;;) {
+		ret = tm_set(TEST_DATE, TEST_TIME, TEST_TICKS);
+		traceobj_assert(&trobj, ret == SUCCESS);
+		ret = tm_get(&date, &time, &ticks);
+		traceobj_assert(&trobj, ret == SUCCESS);
+		if (time == TEST_TIME)
+			break;
+		if (++tries > 3)
+			break;
+	}
+
+	traceobj_assert(&trobj, date == TEST_DATE);
+	traceobj_assert(&trobj, time == TEST_TIME);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-2.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-2.c
new file mode 100644
index 0000000..44340f5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-2.c
@@ -0,0 +1,65 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	5, 1, 2, 3, 3, 3, 3, 3, 4
+};
+
+static u_long tid;
+
+static u_long timer_id;
+
+static void task(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long events;
+	int ret, n;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = tm_evevery(200, 0x1, &timer_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	for (n = 0; n < 5; n++) {
+		events = 0;
+		ret = ev_receive(0x1, EV_WAIT|EV_ALL, 0, &events);
+		traceobj_assert(&trobj, ret == SUCCESS && events == 0x1);
+		traceobj_mark(&trobj, 3);
+	}
+
+	traceobj_mark(&trobj, 4);
+
+	ret = tm_cancel(timer_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	traceobj_mark(&trobj, 5);
+
+	ret = t_create("TASK", 20, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-3.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-3.c
new file mode 100644
index 0000000..260a131
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-3.c
@@ -0,0 +1,75 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	7, 1, 2, 3, 4, 5, 6
+};
+
+static u_long tid;
+
+static u_long timer_id;
+
+static void task(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long events;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = tm_evafter(200, 0x1, &timer_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = ev_receive(0x3, EV_WAIT|EV_ALL, 300, &events);
+	traceobj_assert(&trobj, ret == ERR_TIMEOUT);
+	traceobj_mark(&trobj, 3);
+
+	ret = ev_receive(0x2, EV_NOWAIT|EV_ANY, 0, &events);
+	traceobj_assert(&trobj, ret == ERR_NOEVS);
+	traceobj_mark(&trobj, 4);
+
+	events = 0;
+	ret = ev_receive(0x1, EV_NOWAIT|EV_ALL, 0, &events);
+	traceobj_assert(&trobj, ret == SUCCESS && events == 0x1);
+	traceobj_mark(&trobj, 5);
+
+	events = 0;
+	ret = ev_receive(0x1, EV_WAIT|EV_ALL, 400, &events);
+	traceobj_assert(&trobj, ret == ERR_TIMEOUT);
+	traceobj_mark(&trobj, 6);
+
+	/* Valgrind will bark at this one, this is expected. */
+	ret = tm_cancel(timer_id);
+	traceobj_assert(&trobj, ret == ERR_BADTMID);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	traceobj_mark(&trobj, 7);
+
+	ret = t_create("TASK", 20, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-4.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-4.c
new file mode 100644
index 0000000..26697c2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-4.c
@@ -0,0 +1,71 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+#define TEST_DATE  ((2008 << 16)|(4 << 8)|25) /* 4/25/2008 */
+#define TEST_TIME  ((11 << 16)|(17 << 8)|30)  /* 11:17:30 */
+#define TEST_TICKS 0
+
+#define TRIG_DATE  ((2008 << 16)|(4 << 8)|25) /* 4/25/2008 */
+#define TRIG_TIME  ((11 << 16)|(17 << 8)|30)  /* 11:17:30 */
+#define TRIG_TICKS 400
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	4, 1, 2, 3
+};
+
+static u_long tid;
+
+static u_long timer_id;
+
+static void task(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	u_long events;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = tm_set(TEST_DATE, TEST_TIME, TEST_TICKS);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = tm_evwhen(TRIG_DATE, TRIG_TIME, TRIG_TICKS, 0x1234, &timer_id);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = ev_receive(0x1030, EV_WAIT|EV_ANY, 800, &events);
+	traceobj_assert(&trobj, ret == SUCCESS && events == 0x1030);
+	traceobj_mark(&trobj, 3);
+
+	ret = tm_cancel(timer_id);
+	traceobj_assert(&trobj, ret == ERR_BADTMID);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	traceobj_mark(&trobj, 4);
+
+	ret = t_create("TASK", 20, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-5.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-5.c
new file mode 100644
index 0000000..0fca7d0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-5.c
@@ -0,0 +1,69 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+#define TEST_DATE  ((2008 << 16)|(4 << 8)|25) /* 4/25/2008 */
+#define TEST_TIME  ((11 << 16)|(17 << 8)|30)  /* 11:17:30 */
+#define TEST_TICKS 0
+
+#define WAKEUP_DATE  ((2008 << 16)|(4 << 8)|25) /* 4/25/2008 */
+#define WAKEUP_TIME  ((11 << 16)|(17 << 8)|33)  /* 11:17:33 */
+#define WAKEUP_TICKS 0
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 3, 2, 4
+};
+
+static u_long tid;
+
+static void task(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	unsigned long date, time, ticks;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = tm_set(TEST_DATE, TEST_TIME, TEST_TICKS);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = tm_wkwhen(WAKEUP_DATE, WAKEUP_TIME, WAKEUP_TICKS);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = tm_get(&date, &time, &ticks);
+	traceobj_assert(&trobj, ret == SUCCESS);
+	traceobj_assert(&trobj, date == WAKEUP_DATE);
+	traceobj_assert(&trobj, time == WAKEUP_TIME);
+
+	traceobj_mark(&trobj, 4);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	traceobj_mark(&trobj, 1);
+
+	ret = t_create("TASK", 20, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	traceobj_mark(&trobj, 2);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-6.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-6.c
new file mode 100644
index 0000000..9debc49
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-6.c
@@ -0,0 +1,44 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static void task(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	unsigned long timer_id;
+	int i;
+
+	traceobj_enter(&trobj);
+
+	for (i = 0; i < 100; i++)
+		tm_evafter(20, 0x1, &timer_id);
+
+	tm_wkafter(100);
+
+	t_delete(0);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	unsigned long tid;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = t_create("TASK", 20, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	tm_wkafter(10);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-7.c b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-7.c
new file mode 100644
index 0000000..202985d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/testsuite/tm-7.c
@@ -0,0 +1,42 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <psos/psos.h>
+
+static struct traceobj trobj;
+
+static void task(u_long a0, u_long a1, u_long a2, u_long a3)
+{
+	unsigned long timer_id;
+	int i;
+
+	traceobj_enter(&trobj);
+
+	for (i = 0; i < 100; i++)
+		tm_evafter(20, 0x1, &timer_id);
+
+	t_delete(0);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	u_long args[] = { 1, 2, 3, 4 };
+	unsigned long tid;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	ret = t_create("TASK", 20, 0, 0, 0, &tid);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	ret = t_start(tid, 0, task, args);
+	traceobj_assert(&trobj, ret == SUCCESS);
+
+	tm_wkafter(10);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/tm.c b/kernel/xenomai-v3.2.4/lib/psos/tm.c
new file mode 100644
index 0000000..c315335
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/tm.c
@@ -0,0 +1,352 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <errno.h>
+#include <memory.h>
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/clockobj.h>
+#include <psos/psos.h>
+#include "task.h"
+#include "tm.h"
+
+struct clockobj psos_clock;
+
+struct timespec psos_rrperiod;
+
+#define tm_magic	0x8181fcfc
+
+static struct psos_tm *get_tm(u_long tmid, int *err_r)
+{
+	struct psos_tm *tm = (struct psos_tm *)tmid;
+
+	/*
+	 * Unlike most other pSOS objects (except partitions), the
+	 * timer control block is NOT laid into the main heap, so
+	 * don't have to apply mainheap_deref() to convert timer
+	 * handles to pointers, but we do a plain cast instead.  (This
+	 * said, mainheap_deref() is smart enough to deal with private
+	 * pointers, but we just avoid useless overhead).
+	 */
+	if (tm == NULL || ((uintptr_t)tm & (sizeof(uintptr_t)-1)) != 0)
+		goto objid_error;
+
+	if (tm->magic != tm_magic)
+		goto objid_error;
+
+	if (timerobj_lock(&tm->tmobj))
+		goto objid_error;
+
+	return tm;
+
+objid_error:
+	*err_r = ERR_BADTMID;
+
+	return NULL;
+}
+
+static void delete_timer(struct psos_tm *tm, int signal_p)
+{
+	struct psos_task *task;
+	int ret;
+
+	tm->magic = ~tm_magic;	/* Prevent further reference. */
+	timerobj_destroy(&tm->tmobj);
+	task = get_psos_task(tm->tid, &ret);
+	if (task) {
+		pvlist_remove(&tm->link);
+		/*
+		 * Send out the pending event set carried by the
+		 * deleted timer to the owner thread if requested.
+		 */
+		if (signal_p)
+			__ev_send(task, tm->events);
+		put_psos_task(task);
+	}
+	pvfree(tm);
+}
+
+static void post_event_once(struct timerobj *tmobj)
+{
+	struct psos_tm *tm = container_of(tmobj, struct psos_tm, tmobj);
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	ret = timerobj_lock(&tm->tmobj);
+	if (ret == 0)
+		delete_timer(tm, 1);
+
+	CANCEL_RESTORE(svc);
+}
+
+static void post_event_periodic(struct timerobj *tmobj)
+{
+	struct psos_tm *tm = container_of(tmobj, struct psos_tm, tmobj);
+	ev_send(tm->tid, tm->events);
+}
+
+static u_long start_evtimer(u_long events,
+			    struct itimerspec *it, u_long *tmid_r)
+{
+	void (*handler)(struct timerobj *tmobj);
+	struct psos_task *current;
+	struct psos_tm *tm;
+	int ret;
+
+	tm = pvmalloc(sizeof(*tm));
+	if (tm == NULL)
+		return ERR_NOTIMERS;
+
+	pvholder_init(&tm->link);
+	tm->events = events;
+	tm->magic = tm_magic;
+
+	current = get_psos_task_or_self(0, &ret);
+	if (current == NULL) {
+		pvfree(tm);
+		return ret;
+	}
+
+	tm->tid = mainheap_ref(current, u_long);
+	pvlist_append(&tm->link, &current->timer_list);
+
+	ret = timerobj_init(&tm->tmobj);
+	/*
+	 * Make sure to queue fully built timers only, by holding the
+	 * task lock until we are back from timerobj_init(), so that
+	 * calling timerobj_destroy() from the finalizer will always
+	 * be a valid operation for all queue members.
+	 */
+	put_psos_task(current);
+	if (ret)
+		goto fail;
+
+	handler = post_event_periodic;
+	if (it->it_interval.tv_sec == 0 &&
+	    it->it_interval.tv_nsec == 0)
+		handler = post_event_once;
+
+	*tmid_r = (u_long)tm;
+
+	timerobj_lock(&tm->tmobj);
+
+	ret = timerobj_start(&tm->tmobj, handler, it);
+	if (ret) {
+		timerobj_destroy(&tm->tmobj);
+	fail:
+		pvlist_remove(&tm->link);
+		pvfree(tm);
+		return ERR_NOTIMERS;
+	}
+
+	return SUCCESS;
+}
+
+u_long tm_evafter(u_long ticks, u_long events, u_long *tmid_r)
+{
+	struct itimerspec it;
+	struct service svc;
+	int ret;
+
+	it.it_interval.tv_sec = 0;
+	it.it_interval.tv_nsec = 0;
+
+	CANCEL_DEFER(svc);
+	clockobj_ticks_to_timeout(&psos_clock, ticks, &it.it_value);
+	ret = start_evtimer(events, &it, tmid_r);
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long tm_evevery(u_long ticks, u_long events, u_long *tmid_r)
+{
+	struct itimerspec it;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+	clockobj_ticks_to_timeout(&psos_clock, ticks, &it.it_value);
+	clockobj_ticks_to_timespec(&psos_clock, ticks, &it.it_interval);
+	ret = start_evtimer(events, &it, tmid_r);
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+static int date_to_tmstruct(u_long date, u_long time, u_long ticks,
+			    struct tm *tm)
+{
+	if (ticks > clockobj_get_frequency(&psos_clock))
+		return ERR_ILLTICKS;
+
+	memset(tm, 0, sizeof(*tm));
+
+	tm->tm_sec = time & 0xff;
+	tm->tm_min = (time >> 8) & 0xff;
+	tm->tm_hour = time >> 16;
+	if (tm->tm_sec > 59 || tm->tm_min > 59 || tm->tm_hour > 23)
+		return ERR_ILLTIME;
+
+	/*
+	 * XXX: we want the calendar time to use the time(2) epoch,
+	 * i.e. 00:00:00 UTC, January 1, 1970.
+	 */
+
+	tm->tm_mday = date & 0xff;
+	tm->tm_mon = ((date >> 8) & 0xff) - 1;
+	tm->tm_year = (date >> 16) - 1900;
+	if (tm->tm_mday < 1 || tm->tm_mday > 31 ||
+	    tm->tm_mon > 11 || tm->tm_year < 70)
+		return ERR_ILLDATE;
+
+	return SUCCESS;
+}
+
+static int date_to_timespec(u_long date, u_long time, u_long ticks,
+			    struct timespec *ts)
+{
+	struct tm tm;
+	int ret;
+
+	ret = date_to_tmstruct(date, time, ticks, &tm);
+	if (ret)
+		return ret;
+
+	clockobj_caltime_to_timeout(&psos_clock, &tm, ticks, ts);
+
+	return SUCCESS;
+}
+
+u_long tm_evwhen(u_long date, u_long time, u_long ticks,
+		 u_long events, u_long *tmid_r)
+{
+	struct itimerspec it;
+	struct service svc;
+	int ret;
+
+	it.it_interval.tv_sec = 0;
+	it.it_interval.tv_nsec = 0;
+
+	CANCEL_DEFER(svc);
+
+	ret = date_to_timespec(date, time, ticks, &it.it_value);
+	if (ret == SUCCESS)
+		ret = start_evtimer(events, &it, tmid_r);
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long tm_cancel(u_long tmid)
+{
+	struct psos_tm *tm;
+	struct service svc;
+	int ret = SUCCESS;
+
+	CANCEL_DEFER(svc);
+
+	tm = get_tm(tmid, &ret);
+	if (tm)
+		delete_timer(tm, 0);
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+u_long tm_wkafter(u_long ticks)
+{
+	struct timespec rqt;
+	struct service svc;
+
+	if (ticks == 0) {
+		sched_yield();	/* Manual round-robin. */
+		return SUCCESS;
+	}
+
+	CANCEL_DEFER(svc);
+	clockobj_ticks_to_timeout(&psos_clock, ticks, &rqt);
+	CANCEL_RESTORE(svc);
+	threadobj_sleep(&rqt);
+
+	return SUCCESS;
+}
+
+u_long tm_wkwhen(u_long date, u_long time, u_long ticks)
+{
+	struct timespec rqt;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+	ret = date_to_timespec(date, time, ticks, &rqt);
+	CANCEL_RESTORE(svc);
+	if (ret)
+		return ERR_ILLDATE;
+
+	threadobj_sleep(&rqt);
+
+	return SUCCESS;
+}
+
+u_long tm_set(u_long date, u_long time, u_long ticks)
+{
+	struct service svc;
+	struct tm tm;
+	ticks_t t;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	ret = date_to_tmstruct(date, time, ticks, &tm);
+	if (ret)
+		goto out;
+
+	clockobj_caltime_to_ticks(&psos_clock, &tm, ticks, &t);
+	clockobj_set_date(&psos_clock, t);
+out:
+	CANCEL_RESTORE(svc);
+
+	return SUCCESS;
+}
+
+u_long tm_get(u_long *date_r, u_long *time_r, u_long *ticks_r)
+{
+	struct service svc;
+	struct tm tm;
+	ticks_t t;
+
+	CANCEL_DEFER(svc);
+	clockobj_get_date(&psos_clock, &t);
+	CANCEL_RESTORE(svc);
+	clockobj_ticks_to_caltime(&psos_clock, t, &tm, ticks_r);
+	*date_r = ((tm.tm_year + 1900) << 16) | ((tm.tm_mon + 1) << 8) | tm.tm_mday;
+	*time_r = (tm.tm_hour << 16) | (tm.tm_min << 8) | tm.tm_sec;
+
+	return SUCCESS;
+}
+
+u_long tm_getm(unsigned long long *ns_r)
+{
+	*ns_r = clockobj_tsc_to_ns(clockobj_get_tsc());
+	return SUCCESS;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/psos/tm.h b/kernel/xenomai-v3.2.4/lib/psos/tm.h
new file mode 100644
index 0000000..49e9793
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/psos/tm.h
@@ -0,0 +1,39 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _PSOS_TM_H
+#define _PSOS_TM_H
+
+#include <sys/types.h>
+#include <time.h>
+#include <boilerplate/list.h>
+#include <copperplate/timerobj.h>
+
+struct psos_tm {
+	unsigned int magic;		/* Must be first. */
+	u_long tid;
+	u_long events;
+	struct timerobj tmobj;
+	struct pvholder link;
+};
+
+extern struct clockobj psos_clock;
+
+extern struct timespec psos_rrperiod;
+
+#endif /* _PSOS_TM_H */
diff --git a/kernel/xenomai-v3.2.4/lib/smokey/COPYING b/kernel/xenomai-v3.2.4/lib/smokey/COPYING
new file mode 100644
index 0000000..3b20440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/smokey/COPYING
@@ -0,0 +1,458 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/lib/smokey/Makefile.am b/kernel/xenomai-v3.2.4/lib/smokey/Makefile.am
new file mode 100644
index 0000000..2674e32
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/smokey/Makefile.am
@@ -0,0 +1,13 @@
+lib_LTLIBRARIES = libsmokey@CORE@.la
+
+libsmokey@CORE@_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -version-info 0:0:0
+libsmokey@CORE@_la_LIBADD = @XENO_CORE_LDADD@
+
+libsmokey@CORE@_la_SOURCES =	\
+	helpers.c	\
+	init.c
+
+libsmokey@CORE@_la_CPPFLAGS =			\
+	@XENO_USER_CFLAGS@		\
+	-I$(top_srcdir)/include		\
+	-I$(top_srcdir)/lib
diff --git a/kernel/xenomai-v3.2.4/lib/smokey/helpers.c b/kernel/xenomai-v3.2.4/lib/smokey/helpers.c
new file mode 100644
index 0000000..9fc4236
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/smokey/helpers.c
@@ -0,0 +1,397 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdio.h>
+#include <stdarg.h>
+#include <ctype.h>
+#include <stdlib.h>
+#include <string.h>
+#include <error.h>
+#include <errno.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <boilerplate/ancillaries.h>
+#include <smokey/smokey.h>
+
+int smokey_int(const char *s, struct smokey_arg *arg)
+{
+	char *name, *p;
+	int ret;
+
+	ret = sscanf(s, "%m[_a-z]=%m[^\n]", &name, &p);
+	if (ret != 2 || !(isdigit(*p) || *p == '-'))
+		return 0;
+
+	ret = !strcmp(name, arg->name);
+	if (ret)
+		arg->u.n_val = atoi(p);
+
+	free(p);
+	free(name);
+
+	return ret;
+}
+
+int smokey_bool(const char *s, struct smokey_arg *arg)
+{
+	int ret;
+
+	ret = smokey_int(s, arg);
+	if (ret) {
+		arg->u.n_val = !!arg->u.n_val;
+		return 1;
+	}
+
+	if (strcmp(s, arg->name) == 0) {
+		arg->u.n_val = 1;
+		return 1;
+	}
+
+	return 0;
+}
+
+int smokey_string(const char *s, struct smokey_arg *arg)
+{
+	char *name, *p;
+	int ret;
+
+	ret = sscanf(s, "%m[_a-z]=%m[^\n]", &name, &p);
+	if (ret != 2)
+		return 0;
+
+	ret = !strcmp(name, arg->name);
+	if (ret)
+		arg->u.s_val = p;
+	else
+		free(p);
+
+	free(name);
+
+	return ret;
+}
+
+int smokey_size(const char *s, struct smokey_arg *arg)
+{
+	char *name, *p;
+	int ret;
+
+	ret = sscanf(s, "%m[_a-z]=%m[^\n]", &name, &p);
+	if (ret != 2)
+		return 0;
+
+	ret = !strcmp(name, arg->name);
+	if (ret) {
+		arg->u.l_val = get_mem_size(p);
+		if (arg->u.l_val == 0)
+			ret = 0;
+	}
+
+	free(p);
+	free(name);
+
+	return ret;
+}
+
+int smokey_parse_args(struct smokey_test *t,
+		      int argc, char *const argv[])
+{
+	int matched = 0, n, ac;
+	struct smokey_arg *arg;
+
+	for (arg = t->args, ac = 0;
+	     arg->name && ac < t->nargs; arg++, ac++) {
+		for (n = 1; n < argc; n++) {
+			arg->matched = !!arg->parser(argv[n], arg);
+			if (arg->matched) {
+				matched++;
+				break;
+			}
+		}
+	}
+
+	return matched;
+}
+
+struct smokey_arg *smokey_lookup_arg(struct smokey_test *t,
+				     const char *name)
+{
+	struct smokey_arg *arg = NULL;
+	int ac;
+
+	for (arg = t->args, ac = 0;
+	     arg->name && ac < t->nargs; arg++, ac++) {
+		if (strcmp(arg->name, name) == 0)
+			return arg;
+	}
+
+	/* Assume this is fatal. */
+	panic("test %s has no argument \"%s\"",
+	      t->name, name);
+}
+
+void smokey_note(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+
+	if (smokey_verbose_mode) {
+		__RT(vfprintf(stdout, fmt, ap));
+		__RT(fprintf(stdout, "\n"));
+	}
+
+	va_end(ap);
+}
+
+void smokey_vatrace(const char *fmt, va_list ap)
+{
+	if (smokey_verbose_mode > 1) {
+		__RT(vfprintf(stdout, fmt, ap));
+		__RT(fprintf(stdout, "\n"));
+	}
+}
+
+void smokey_trace(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	smokey_vatrace(fmt, ap);
+	va_end(ap);
+}
+
+void __smokey_warning(const char *file, int lineno,
+		      const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+
+	if (smokey_verbose_mode) {
+		__RT(fprintf(stderr, "%s:%d, ", basename(file), lineno));
+		__RT(vfprintf(stderr, fmt, ap));
+		__RT(fprintf(stderr, "\n"));
+	}
+
+	va_end(ap);
+}
+
+int smokey_barrier_init(struct smokey_barrier *b)
+{
+	pthread_mutexattr_t mattr;
+	pthread_condattr_t cattr;
+	int ret;
+
+	b->signaled = 0;
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_NORMAL);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_NONE);
+	ret = __RT(pthread_mutex_init(&b->lock, &mattr));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		return ret;
+
+	pthread_condattr_init(&cattr);
+	pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __RT(pthread_cond_init(&b->barrier, &cattr));
+	pthread_condattr_destroy(&cattr);
+	if (ret)
+		__RT(pthread_mutex_destroy(&b->lock));
+
+	return ret;
+}
+
+void smokey_barrier_destroy(struct smokey_barrier *b)
+{
+	__RT(pthread_cond_destroy(&b->barrier));
+	__RT(pthread_mutex_destroy(&b->lock));
+}
+
+int smokey_barrier_wait(struct smokey_barrier *b)
+{
+	int ret = 0;
+	
+	__RT(pthread_mutex_lock(&b->lock));
+
+	while (!b->signaled) {
+		ret = __RT(pthread_cond_wait(&b->barrier, &b->lock));
+		if (ret)
+			break;
+	}
+
+	__RT(pthread_mutex_unlock(&b->lock));
+
+	return ret;
+}
+
+int smokey_barrier_timedwait(struct smokey_barrier *b, struct timespec *ts)
+{
+	int ret = 0;
+	
+	__RT(pthread_mutex_lock(&b->lock));
+
+	while (!b->signaled) {
+		ret = __RT(pthread_cond_timedwait(&b->barrier,
+						  &b->lock, ts));
+		if (ret)
+			break;
+	}
+
+	__RT(pthread_mutex_unlock(&b->lock));
+
+	return ret;
+}
+
+void smokey_barrier_release(struct smokey_barrier *b)
+{
+	__RT(pthread_mutex_lock(&b->lock));
+	b->signaled = 1;
+	__RT(pthread_cond_broadcast(&b->barrier));
+	__RT(pthread_mutex_unlock(&b->lock));
+}
+
+int smokey_fork_exec(const char *path, const char *arg)
+{
+	int wstatus = 0;
+	pid_t pid;
+
+	pid = do_fork();
+	switch (pid) {
+	case -1:
+		error(1, errno, "fork/vfork");
+	case 0:
+		execlp(path, arg, NULL);
+		error(1, errno, "execl %s", path);
+	default:
+		waitpid(pid, &wstatus, 0);
+	}
+
+	if WIFEXITED(wstatus)
+		return WEXITSTATUS(wstatus);
+
+	if WIFSIGNALED(wstatus)
+		fprintf(stderr, "%s %s\n",
+			strsignal(WTERMSIG(wstatus)),
+			WCOREDUMP(wstatus) ? "(core dumped)" : "");
+	return 1;
+
+}
+
+#define SMOKEY_MOD_NUM       32
+static char *smokey_modules[SMOKEY_MOD_NUM];
+
+int smokey_modprobe(const char *name, bool silent)
+{
+	char buffer[128];
+	int err, len, i, midx = -1;
+	FILE *fp;
+
+	if (!name)
+		return -EINVAL;
+
+	fp = fopen("/proc/modules", "r");
+	if (fp == NULL)
+		return -errno;
+
+	len = strlen(name);
+
+	while (fgets(buffer, sizeof(buffer), fp)) {
+		if (strncmp(buffer, name, len) == 0 &&
+		    len < sizeof(buffer) && buffer[len] == ' ') {
+			smokey_trace("%s module already loaded", name);
+			fclose(fp);
+			return 0;
+		}
+	}
+
+	fclose(fp);
+
+	for (i = 0; i < SMOKEY_MOD_NUM; i++) {
+		if (!smokey_modules[i]) {
+			midx = i;
+			break;
+		}
+	}
+
+	if (midx < 0)
+		return -EFAULT;
+
+	smokey_trace("%s module not there: modprobing", name);
+
+	err = smokey_check_errno(
+		snprintf(buffer, sizeof(buffer), "modprobe %s %s", name,
+			 silent ? "2>/dev/null" : ""));
+	if (err < 0)
+		return err;
+
+	err = smokey_check_errno(system(buffer));
+	if (err < 0)
+		return err;
+
+	if (!WIFEXITED(err) || WEXITSTATUS(err) != 0) {
+		if (!silent)
+			smokey_warning("%s: abnormal exit", buffer);
+		return -EINVAL;
+	}
+
+	smokey_modules[midx] = strdup(name);
+	return err;
+}
+
+int smokey_rmmod(const char *name)
+{
+	char buffer[128];
+	int i, midx = -1, err;
+
+	if (!name)
+		return -EINVAL;
+
+	for (i = 0; i < SMOKEY_MOD_NUM; i++) {
+		if (smokey_modules[i] && !strcmp(smokey_modules[i], name)) {
+			midx = i;
+			break;
+		}
+	}
+
+	if (midx < 0) {
+		smokey_trace("%s module was there on entry, keeping it", name);
+		return 0;
+	}
+
+	smokey_trace("unloading %s module", name);
+
+	err = smokey_check_errno(
+		snprintf(buffer, sizeof(buffer), "rmmod %s", name));
+	if (err < 0)
+		return err;
+
+	err = smokey_check_errno(system(buffer));
+	if (err < 0)
+		return err;
+
+	if (!WIFEXITED(err) || WEXITSTATUS(err) != 0) {
+		smokey_warning("%s: abnormal exit", buffer);
+		return -EINVAL;
+	}
+
+	free(smokey_modules[midx]);
+	smokey_modules[midx] = NULL;
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/smokey/init.c b/kernel/xenomai-v3.2.4/lib/smokey/init.c
new file mode 100644
index 0000000..e1a133e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/smokey/init.c
@@ -0,0 +1,577 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdio.h>
+#include <time.h>
+#include <stdlib.h>
+#include <ctype.h>
+#include <unistd.h>
+#include <malloc.h>
+#include <string.h>
+#include <errno.h>
+#include <getopt.h>
+#include <fnmatch.h>
+#include <boilerplate/list.h>
+#include <boilerplate/ancillaries.h>
+#include "copperplate/internal.h"
+#include <xenomai/init.h>
+#include <xenomai/tunables.h>
+#include <smokey/smokey.h>
+
+/**
+ * @defgroup smokey Smokey API
+ *
+ * A simple infrastructure for writing and running smoke tests.
+ *
+ * Smokey is based on the Copperplate API, therefore is available over
+ * the single and dual kernel Xenomai configurations indifferently.
+ *
+ * The API provides a set of services for declaring any number of test
+ * plugins, embodied into a test program. Each plugin usually
+ * implements a single smoke test, checking a particular feature of
+ * interest. Each plugin present in the running executable is
+ * automatically detected by the Smokey init routine. In addition, the
+ * Smokey API parses all arguments and options passed on the command
+ * line to the executable, running pre-defined actions which are
+ * therefore automatically recognized by all programs linked against
+ * the Smokey library.
+ *
+ * @par Writing smoke tests with Smokey
+ *
+ * A smoke test is composed of a routine which implements the test
+ * code, and a set of runtime settings/attributes for running such
+ * code. The routine prototype shall be:
+ *
+ * @code
+ * int run_<test_name>(struct smokey_test *t, int argc, char *const argv[])
+ * @endcode
+ *
+ * The test routine should return a zero value for success, or any
+ * negated POSIX error code for indicating the failure to the test
+ * driver (e.g. -EINVAL if some value is found to be wrong).
+ *
+ * With @a t referring to the Smokey test descriptor, and @a argc, @a
+ * argv the argument count and vector expunged from all the inner
+ * options which may have been previously interpreted by the Smokey
+ * API and inner layers (such as Copperplate).
+ *
+ * The Smokey API provides the services to declare a complete test
+ * (named @b foo in this example) as follows:
+ *
+ * @code
+ * #include <smokey/smokey.h>
+ *
+ * smokey_test_plugin(foo, // test name
+ *                    SMOKEY_ARGLIST( // argument list
+ *			      	SMOKEY_INT(some_integer),
+ *			      	SMOKEY_STRING(some_string),
+ *			      	SMOKEY_BOOL(some_boolean),
+ *		      ),
+ *                    // description
+ *		      "A dummy Smokey-based test plugin\n"
+ *		      "\taccepting three optional arguments:\n"
+ *		      "\tsome_integer=<value>\n"
+ *		      "\tsome_string=<string>\n"
+ *		      "\tsome_bool[=0/1]\n"
+ * );
+ *
+ * static int run_foo(struct smokey_test *t, int argc, char *const argv[])
+ * {
+ *      int i_arg = 0, nargs;
+ *      char *s_arg = NULL;
+ *      bool b_arg = false;
+ *
+ * 	nargs = smokey_parse_args(t, argc, argv);
+ *
+ *	if (SMOKEY_ARG_ISSET(foo, some_integer))
+ *		i_arg = SMOKEY_ARG_INT(foo, some_integer);
+ *	if (SMOKEY_ARG_ISSET(foo, some_string))
+ *		s_arg = SMOKEY_ARG_STRING(foo, some_string);
+ *	if (SMOKEY_ARG_ISSET(foo, some_boolean))
+ *		b_arg = SMOKEY_ARG_INT(foo, some_boolean);
+ *
+ *      return run_some_hypothetical_smoke_test_code(i_arg, s_arg, b_arg);
+ * }
+ * @endcode
+ *
+ * As illustrated, a smoke test is at least composed of a test plugin
+ * descriptor (i.e. @a smokey_test_plugin()), and a run handler named
+ * after the test.
+ *
+ * @par Test arguments
+ *
+ * Smokey recognizes three argument declarators, namely:
+ * SMOKEY_INT(name) for a C (signed) integer, SMOKEY_BOOL(name) for a
+ * boolean value and SMOKEY_STRING(name) for a character string.
+ *
+ * Each argument can be passed to the test code as a name=value pair,
+ * where @a name should match one of the declarators.  Before the
+ * test-specific arguments can be accessed, a call to
+ * smokey_parse_args() must be issued by the test code, passing the
+ * parameters received in the run handler. This routine returns the
+ * number of arguments found on the command line matching the
+ * an entry in SMOKEY_ARGLIST().
+ *
+ * Once smokey_parse_args() has returned with a non-zero value, each
+ * argument can be checked individually for presence. If a valid
+ * argument was matched on the command line,
+ * SMOKEY_ARG_ISSET(test_name, arg_name) returns non-zero. In the
+ * latter case, its value can be retrieved by a similar call to
+ * SMOKEY_ARG_INT(test_name, arg_name), SMOKEY_ARG_STRING(test_name,
+ * arg_name) or SMOKEY_ARG_BOOL(test_name, arg_name).
+ *
+ * In the above example, passing "some_integer=3" on the command line of
+ * any program implementing such Smokey-based test would cause the
+ * variable i_arg to receive "3" as a value.
+ *
+ * @par Pre-defined Smokey options
+ *
+ * Any program linked against the Smokey API implicitly recognizes the
+ * following options:
+ *
+ * - --list[=<id[,id...]>] dumps the list of tests implemented in the
+ *   program to stdout. This list may be restricted to the tests
+ *   matching the optional regular expression (see --run). The
+ *   information given includes the description strings provided in
+ *   the plugin declarators (smokey_test_plugin()).  The position and
+ *   symbolic name of each test is also issued, which may be used in
+ *   id specifications with the --run option (see below).
+ *
+ * @note Test positions may vary depending on changes to the host
+ * program like adding or removing other tests, the symbolic name
+ * however is stable and identifies each test uniquely.
+ *
+ * - --run[=<id[,id...]>] selects the tests to be run, determining the
+ *   active test list among the overall set of tests detected in the
+ *   host program.  The test driver code (e.g. implementing a test
+ *   harness program on top of Smokey) may then iterate over the @a
+ *   smokey_test_list for accessing each active test individually, in
+ *   the enumeration order specified by the user (Use
+ *   for_each_smokey_test() for that).
+ *
+ *   If no argument is passed to --run, Smokey assumes that all tests
+ *   detected in the current program should be picked, filling @a
+ *   smokey_test_list with tests by increasing position order.
+ *
+ *   Otherwise, id may be a test position, a symbolic name, or a range
+ *   thereof delimited by a dash character. A symbolic name may be
+ *   matched using a glob(3) type regular expression.
+ *
+ *   id specification may be:
+ *
+ *   - 0-9, picks tests #0 to #9
+ *   - -3, picks tests #0 to #3
+ *   - 5-, picks tests #5 to the highest possible test position
+ *   - 2-0, picks tests #2 to #0, in decreasing order
+ *   - foo, picks test foo only
+ *   - 0,1,foo- picks tests #0, #1, and any test from foo up to the
+ *     last test defined
+ *   - fo* picks any test with a name starting by "fo"
+ *
+ * - --exclude=<id[,id...]> excludes the given tests from the test
+ *   list. The format of the argument is identical to the one accepted
+ *   by the --run option.
+ *
+ * - --keep-going sets the boolean flag @a smokey_keep_going to a
+ *   non-zero value, indicating to the test driver that receiving a
+ *   failure code from a smoke test should not abort the test loop.
+ *   This flag is not otherwise interpreted by the Smokey API.
+ *
+ * - --verbose[=level] sets the integer @a smokey_verbose_mode to a
+ *   non-zero value, which should be interpreted by all parties as the
+ *   desired verbosity level (defaults to 1).
+ *
+ * - --vm gives a hint to the test code, about running in a virtual
+ * environment, such as KVM. When passed, the boolean @a smokey_on_vm
+ * is set. Each test may act upon this setting, such as skipping
+ * time-dependent checks that may fail due to any slowdown induced by
+ * the virtualization.
+ *
+ * @par Writing a test driver based on the Smokey API
+ *
+ * A test driver provides the main() entry point, which should iterate
+ * over the test list (@a smokey_test_list) prepared by the Smokey
+ * API, for running each test individually.  The @a for_each_smokey_test()
+ * helper is available for iterating over the active test list.
+ *
+ * When this entry point is called, all the initialization chores,
+ * including the test detection and the active test selection have
+ * been performed by the Smokey API already.
+ *
+ * @par Issuing information notices
+ *
+ * The printf-like @a smokey_note() routine is available for issuing
+ * notices to the output device (currently stdout), unless --silent
+ * was detected on the command line. smokey_note() outputs a
+ * terminating newline character. Notes are enabled for any verbosity
+ * level greater than zero.
+ *
+ * @par Issuing trace messages
+ *
+ * The printf-like @a smokey_trace() routine is available for issuing
+ * progress messages to the output device (currently stdout), unless
+ * --silent was detected on the command line. smokey_trace() outputs a
+ * terminating newline character. Traces are enabled for any verbosity
+ * level greater than one.
+ *
+ * Therefore, a possible implementation of a test driver could be as
+ * basic as:
+ *
+ * @code
+ * #include <stdio.h>
+ * #include <error.h>
+ * #include <smokey/smokey.h>
+ *
+ * int main(int argc, char *const argv[])
+ * {
+ *     struct smokey_test *t;
+ *     int ret;
+ *
+ *     if (pvlist_empty(&smokey_test_list))
+ *      	return 0;
+ *
+ *	for_each_smokey_test(t) {
+ *		ret = t->run(t, argc, argv);
+ *		if (ret) {
+ *			if (smokey_keep_going)
+ *				continue;
+ *			error(1, -ret, "test %s failed", t->name);
+ *		}
+ *		smokey_note("%s OK", t->name);
+ *	}
+ *
+ *	return 0;
+ * }
+ * @endcode
+ */
+
+DEFINE_PRIVATE_LIST(smokey_test_list);
+
+int smokey_keep_going;
+
+int smokey_verbose_mode = 1;
+
+int smokey_on_vm = 0;
+
+static DEFINE_PRIVATE_LIST(register_list);
+
+static DEFINE_PRIVATE_LIST(exclude_list);
+
+static char *include_arg;
+
+static char *exclude_arg;
+
+static int test_count;
+
+static int do_list;
+
+static int do_run;
+
+static const struct option smokey_options[] = {
+	{
+#define keep_going_opt	0
+		.name = "keep-going",
+		.has_arg = no_argument,
+		.flag = &smokey_keep_going,
+		.val = 1,
+	},
+	{
+#define run_opt		1
+		.name = "run",
+		.has_arg = optional_argument,
+		.flag = &do_run,
+		.val = 1,
+	},
+	{
+#define list_opt	2
+		.name = "list",
+		.has_arg = optional_argument,
+		.flag = &do_list,
+		.val = 1,
+	},
+	{
+#define vm_opt		3
+		.name = "vm",
+		.has_arg = no_argument,
+		.flag = &smokey_on_vm,
+		.val = 1,
+	},
+	{
+#define exclude_opt	4
+		.name = "exclude",
+		.has_arg = required_argument,
+	},
+	{ /* Sentinel */ }
+};
+
+static void smokey_help(void)
+{
+        fprintf(stderr, "--keep-going			don't stop upon test error\n");
+	fprintf(stderr, "--list[=<id[,id...]>]]		list [matching] tests\n");
+	fprintf(stderr, "--run[=<id[,id...]>]]		run [portion of] the test list\n");
+	fprintf(stderr, "--exclude=<id[,id...]>]	exclude test(s) from the run list\n");
+	fprintf(stderr, "--vm				hint about running in a virtual environment\n");
+}
+
+static void pick_test_range(struct pvlistobj *dst, int start, int end)
+{
+	struct smokey_test *t, *tmp;
+
+	/* Pick tests in the suggested range order. */
+
+	if (start <= end) {
+		pvlist_for_each_entry_safe(t, tmp, &register_list, __reserved.next) {
+			if (t->__reserved.id >= start &&
+			    t->__reserved.id <= end) {
+				pvlist_remove(&t->__reserved.next);
+				pvlist_append(&t->__reserved.next, dst);
+			}
+		}
+	} else {
+		pvlist_for_each_entry_reverse_safe(t, tmp, &register_list, __reserved.next) {
+			if (t->__reserved.id >= end &&
+			    t->__reserved.id <= start) {
+				pvlist_remove(&t->__reserved.next);
+				pvlist_append(&t->__reserved.next, dst);
+			}
+		}
+	} 
+}
+
+static void drop_test_range(struct pvlistobj *dst, int start, int end)
+{
+	struct smokey_test *t, *tmp;
+
+	/*
+	 * Drop tests from the register list so that we won't find
+	 * them when applying the inclusion filter next, order is not
+	 * significant.
+	 */
+	pvlist_for_each_entry_safe(t, tmp, &register_list, __reserved.next) {
+		if (t->__reserved.id >= start &&
+		    t->__reserved.id <= end) {
+			pvlist_remove(&t->__reserved.next);
+			pvlist_append(&t->__reserved.next, dst);
+		}
+	}
+}
+
+static int resolve_id(const char *s)
+{
+	struct smokey_test *t;
+
+	if (isdigit(*s))
+		return atoi(s);
+
+	/*
+	 * CAUTION: as we transfer items from register_list to a
+	 * destination list, we may end up with an empty source list,
+	 * which is a perfectly valid situation. Unlike having an
+	 * empty registration list at startup, which would mean that
+	 * no test is available from the current program.
+	 */
+	if (pvlist_empty(&register_list))
+		return -1;
+
+	pvlist_for_each_entry(t, &register_list, __reserved.next)
+		if (!fnmatch(s, t->name, FNM_PATHNAME))
+			return t->__reserved.id;
+
+	return -1;
+}
+
+static int glob_match(struct pvlistobj *dst, const char *s)
+{
+	struct smokey_test *t, *tmp;
+	int matches = 0;
+
+	if (pvlist_empty(&register_list))
+		return 0;
+
+	pvlist_for_each_entry_safe(t, tmp, &register_list, __reserved.next) {
+		if (!fnmatch(s, t->name, FNM_PATHNAME)) {
+			pvlist_remove(&t->__reserved.next);
+			pvlist_append(&t->__reserved.next, dst);
+			matches++;
+		}
+	}
+
+	return matches;
+}
+
+static int apply_test_filter(const char *test_enum,
+			     struct pvlistobj *dst,
+			     void (*filter_action)(struct pvlistobj *dst,
+						   int start, int end))
+{
+	char *s = strdup(test_enum), *n, *range, *range_p = NULL, *id, *id_r;
+	int start, end;
+
+	n = s;
+	while ((range = strtok_r(n, ",", &range_p)) != NULL) {
+		if (*range == '\0')
+			continue;
+		end = -1;
+		if (range[strlen(range)-1] == '-')
+			end = test_count - 1;
+		id = strtok_r(range, "-", &id_r);
+		if (id) {
+			if (glob_match(dst, id)) {
+				if (strtok_r(NULL, "-", &id_r))
+					goto fail;
+				n = NULL;
+				continue;
+			}
+			start = resolve_id(id);
+			if (*range == '-') {
+				end = start;
+				start = 0;
+			}
+			id = strtok_r(NULL, "-", &id_r);
+			if (id)
+				end = resolve_id(id);
+			else if (end < 0)
+				end = start;
+			if (start < 0 || start >= test_count ||
+			    end < 0 || end >= test_count)
+				goto fail;
+		} else {
+			start = 0;
+			end = test_count - 1;
+		}
+		filter_action(dst, start, end);
+		n = NULL;
+	}
+
+	free(s);
+
+	return 0;
+fail:
+	warning("invalid test range in %s", test_enum, test_count - 1);
+	free(s);
+
+	return -EINVAL;
+}
+
+static int run_include_filter(const char *include_enum)
+{
+	return apply_test_filter(include_enum, &smokey_test_list,
+				 pick_test_range);
+}
+
+static int run_exclude_filter(const char *exclude_enum)
+{
+	return apply_test_filter(exclude_enum, &exclude_list,
+				 drop_test_range);
+}
+
+static int list_tests(void)
+{
+	struct pvlistobj list;
+	struct smokey_test *t;
+	int ret = 0;
+
+	pvlist_init(&list);
+	
+	if (include_arg) {
+		ret = apply_test_filter(include_arg, &list,
+				pick_test_range);
+		free(include_arg);
+	} else
+		pick_test_range(&list, 0, test_count);
+	
+	if (!pvlist_empty(&list))
+		pvlist_for_each_entry(t, &list, __reserved.next)
+			printf("#%-3d %s\n\t%s\n",
+			       t->__reserved.id, t->name, t->description);
+	return ret;
+}
+
+void smokey_register_plugin(struct smokey_test *t)
+{
+	pvlist_append(&t->__reserved.next, &register_list);
+	t->__reserved.id = test_count++;
+}
+
+static int smokey_parse_option(int optnum, const char *optarg)
+{
+	switch (optnum) {
+	case list_opt:
+	case run_opt:
+		if (optarg)
+			include_arg = strdup(optarg);
+		break;
+	case exclude_opt:
+		exclude_arg = strdup(optarg);
+		break;
+	case keep_going_opt:
+	case vm_opt:
+		break;
+	default:
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static int smokey_init(void)
+{
+	int ret = 0;
+
+	if (do_list)
+		return list_tests();
+
+	if (do_run) {
+		if (pvlist_empty(&register_list)) {
+			warning("no test registered");
+			return -EINVAL;
+		}
+
+		if (exclude_arg) {
+			run_exclude_filter(exclude_arg);
+			free(exclude_arg);
+		}
+
+		if (include_arg) {
+			ret = run_include_filter(include_arg);
+			free(include_arg);
+		} else
+			pick_test_range(&smokey_test_list, 0, test_count);
+	
+		if (pvlist_empty(&smokey_test_list)) {
+			warning("no test selected");
+			ret = -EINVAL;
+		}
+	}
+
+	if (pvlist_empty(&smokey_test_list))
+		set_runtime_tunable(verbosity_level, 0);
+	else
+		smokey_verbose_mode = get_runtime_tunable(verbosity_level);
+
+	return ret;
+}
+
+static struct setup_descriptor smokey_interface = {
+	.name = "smokey",
+	.init = smokey_init,
+	.options = smokey_options,
+	.parse_option = smokey_parse_option,
+	.help = smokey_help,
+};
+
+post_setup_call(smokey_interface);
diff --git a/kernel/xenomai-v3.2.4/lib/trank/COPYING b/kernel/xenomai-v3.2.4/lib/trank/COPYING
new file mode 100644
index 0000000..3b20440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/trank/COPYING
@@ -0,0 +1,458 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/lib/trank/Makefile.am b/kernel/xenomai-v3.2.4/lib/trank/Makefile.am
new file mode 100644
index 0000000..ec15367
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/trank/Makefile.am
@@ -0,0 +1,21 @@
+
+lib_LTLIBRARIES = libtrank@CORE@.la
+
+libtrank@CORE@_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -version-info 0:0:0
+
+libtrank@CORE@_la_LIBADD =						\
+	@XENO_CORE_LDADD@					\
+	$(top_builddir)/lib/alchemy/libalchemy@CORE@.la
+
+libtrank@CORE@_la_SOURCES =	\
+	init.c		\
+	internal.c	\
+	internal.h	\
+	posix.c		\
+	native.c
+
+libtrank@CORE@_la_CPPFLAGS =		\
+	@XENO_USER_CFLAGS@	\
+	-D__XENO_COMPAT__	\
+	-I$(top_srcdir)/include	\
+	-I$(top_srcdir)/lib
diff --git a/kernel/xenomai-v3.2.4/lib/trank/init.c b/kernel/xenomai-v3.2.4/lib/trank/init.c
new file mode 100644
index 0000000..ebb5925
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/trank/init.c
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <xenomai/init.h>
+#include "internal.h"
+
+/**
+ * @defgroup trank Transition Kit
+ *
+ * A set of wrappers and services easing the transition from Xenomai
+ * 2.x to 3.x.
+ *
+ * This interface provides a source compatibility layer for building
+ * applications based on the Xenomai 2.x @a posix and @a native APIs
+ * over Xenomai 3.x.
+ */
+
+static struct setup_descriptor trank_interface = {
+	.name = "trank",
+	.init = trank_init_interface,
+};
+
+post_setup_call(trank_interface);
diff --git a/kernel/xenomai-v3.2.4/lib/trank/internal.c b/kernel/xenomai-v3.2.4/lib/trank/internal.c
new file mode 100644
index 0000000..585fc36
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/trank/internal.c
@@ -0,0 +1,99 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <xeno_config.h>
+#include <stdio.h>
+#include <signal.h>
+#include <stdlib.h>
+#include <string.h>
+#include <memory.h>
+#include <boilerplate/signal.h>
+#include "cobalt/internal.h"
+#include "internal.h"
+
+sigset_t trank_sigperiod_set;
+
+#ifdef HAVE_TLS
+
+__thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
+struct trank_context trank_context;
+
+static void trank_init_context(void)
+{
+	memset(&trank_context, 0, sizeof(trank_context));
+}
+
+static inline void trank_destroy_context(void)
+{
+	/* nop */
+}
+
+#else /* !HAVE_TLS */
+
+#include <malloc.h>
+
+pthread_key_t trank_context_key;
+
+static void trank_init_context(void)
+{
+	struct trank_context *tc;
+
+	tc = malloc(sizeof(*tc));
+	if (tc == NULL)
+		early_panic("error creating TSD: %s", strerror(ENOMEM));
+		
+	memset(tc, 0, sizeof(*tc));
+	pthread_setspecific(trank_context_key, tc);
+}
+
+static void __trank_destroy_context(void *p)
+{
+	free(p);
+}
+
+static void trank_destroy_context(void)
+{
+	struct trank_context *tc;
+
+	tc = pthread_getspecific(trank_context_key);
+	if (tc) {
+		pthread_setspecific(trank_context_key, NULL);
+		__trank_destroy_context(tc);
+	}
+}
+
+#endif /* !HAVE_TLS */
+
+static struct cobalt_tsd_hook tsd_hook = {
+	.create_tsd = trank_init_context,
+	.delete_tsd = trank_destroy_context,
+};
+
+int trank_init_interface(void)
+{
+#ifndef HAVE_TLS
+	int ret;
+
+	ret = pthread_key_create(&trank_context_key, __trank_destroy_context);
+	if (ret)
+		early_panic("error creating TSD key: %s", strerror(ret));
+#endif
+	sigaddset(&trank_sigperiod_set, SIGPERIOD);
+	cobalt_register_tsd_hook(&tsd_hook);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/trank/internal.h b/kernel/xenomai-v3.2.4/lib/trank/internal.h
new file mode 100644
index 0000000..5fd9d7e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/trank/internal.h
@@ -0,0 +1,54 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _TRANK_INTERNAL_H
+#define _TRANK_INTERNAL_H
+
+#include <pthread.h>
+#include <time.h>
+#include <signal.h>
+#include <xeno_config.h>
+
+struct trank_context {
+	timer_t periodic_timer;
+};
+
+#ifdef HAVE_TLS
+extern __thread __attribute__ ((tls_model (CONFIG_XENO_TLS_MODEL)))
+struct trank_context trank_context;
+
+static inline struct trank_context *trank_get_context(void)
+{
+	return &trank_context;
+}
+
+#else
+
+extern pthread_key_t trank_context_key;
+
+static inline struct trank_context *trank_get_context(void)
+{
+	return pthread_getspecific(trank_context_key);
+}
+
+#endif
+
+int trank_init_interface(void);
+
+extern sigset_t trank_sigperiod_set;
+
+#endif /* !_TRANK_INTERNAL_H */
diff --git a/kernel/xenomai-v3.2.4/lib/trank/native.c b/kernel/xenomai-v3.2.4/lib/trank/native.c
new file mode 100644
index 0000000..cacc261
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/trank/native.c
@@ -0,0 +1,668 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <copperplate/threadobj.h>
+#include <copperplate/heapobj.h>
+#include <trank/native/task.h>
+#include <trank/native/alarm.h>
+#include <trank/native/event.h>
+#include <trank/native/pipe.h>
+#include "../alchemy/alarm.h"
+
+#ifdef DOXYGEN_CPP
+
+/**
+ * @ingroup trank
+ * @{
+ *
+ * @fn int COMPAT__rt_task_create(RT_TASK *task, const char *name, int stksize, int prio, int mode)
+ * @brief Create a real-time task (compatibility service).
+ *
+ * This service creates a task with access to the full set of Xenomai
+ * real-time services.
+ *
+ * This service creates a task with access to the full set of Xenomai
+ * real-time services. If @a prio is non-zero, the new task belongs to
+ * Xenomai's real-time FIFO scheduling class, aka SCHED_FIFO. If @a
+ * prio is zero, the task belongs to the regular SCHED_OTHER class.
+ *
+ * Creating tasks with zero priority is useful for running non
+ * real-time processes which may invoke blocking real-time services,
+ * such as pending on a semaphore, reading from a message queue or a
+ * buffer, and so on.
+ *
+ * Once created, the task is left dormant until it is actually started
+ * by rt_task_start().
+ *
+ * @param task The address of a task descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * task. When non-NULL and non-empty, a copy of this string is
+ * used for indexing the created task into the object registry.
+ *
+ * @param stksize The size of the stack (in bytes) for the new
+ * task. If zero is passed, a system-dependent default size will be
+ * substituted.
+ *
+ * @param prio The base priority of the new task. This value must be
+ * in the [0 .. 99] range, where 0 is the lowest effective priority. 
+ *
+ * @param mode The task creation mode. The following flags can be
+ * OR'ed into this bitmask:
+ *
+ * - T_FPU allows the task to use the FPU whenever available on the
+ * platform. This flag may be omitted, as it is automatically set when
+ * a FPU is present on the platform, cleared otherwise.
+ *
+ * - T_SUSP causes the task to start in suspended mode. In such a
+ * case, the thread will have to be explicitly resumed using the
+ * rt_task_resume() service for its execution to actually begin.
+ *
+ * - T_CPU(cpuid) makes the new task affine to CPU # @b cpuid. CPU
+ * identifiers range from 0 to 7 (inclusive).
+ *
+ * - T_JOINABLE allows another task to wait on the termination of the
+ * new task. rt_task_join() shall be called for this task to clean up
+ * any resources after its termination.
+ *
+ * Passing T_FPU|T_CPU(1) in the @a mode parameter thus creates a task
+ * with FPU support enabled and which will be affine to CPU #1.
+ *
+ * - When running over the Cobalt core, T_WARNSW causes the SIGDEBUG
+ * signal to be sent to the current task whenever it switches to the
+ * secondary mode. This feature is useful to detect unwanted
+ * migrations to the Linux domain. This flag has no effect over the
+ * Mercury core.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if either @a prio, @a mode or @a stksize are
+ * invalid.
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the task.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered task.
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ *
+ * @sideeffect
+ *
+ *   - calling rt_task_create() causes SCHED_FIFO tasks to switch to
+ * secondary mode.
+ *
+ *   - members of Xenomai's SCHED_FIFO class running in the primary
+ * domain have utmost priority over all Linux activities in the
+ * system, including Linux interrupt handlers.
+ *
+ * @note Tasks can be referred to from multiple processes which all
+ * belong to the same Xenomai session.
+ *
+ * @deprecated This is a compatibility service from the Transition
+ * Kit.
+ */
+
+int COMPAT__rt_task_create(RT_TASK *task, const char *name,
+			   int stksize, int prio, int mode);
+
+/**
+ * @fn int COMPAT__rt_task_set_periodic(RT_TASK *task, RTIME idate, RTIME period)
+ * @brief Make a real-time task periodic (compatibility service).
+ *
+ * Make a task periodic by programing its first release point and its
+ * period in the processor time line.  @a task should then call
+ * rt_task_wait_period() to sleep until the next periodic release
+ * point in the processor timeline is reached.
+ *
+ * @param task The task descriptor.  If @a task is NULL, the current
+ * task is made periodic. @a task must belong the current process.
+ *
+ * @param idate The initial (absolute) date of the first release
+ * point, expressed in clock ticks (see note).  If @a idate is equal
+ * to TM_NOW, the current system date is used.  Otherwise, if @a task
+ * is NULL or equal to @a rt_task_self(), the caller is delayed until
+ * @a idate has elapsed.
+ *
+ * @param period The period of the task, expressed in clock ticks (see
+ * note). Passing TM_INFINITE stops the task's periodic timer if
+ * enabled, then returns successfully.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a task is NULL but the caller is not a
+ * Xenomai task, or if @a task is non-NULL but not a valid task
+ * descriptor.
+ *
+ * - -ETIMEDOUT is returned if @a idate is different from TM_INFINITE
+ * and represents a date in the past.
+ *
+ * @apitags{thread-unrestricted, switch-primary}
+ *
+ * @note The caller must be an Alchemy task if @a task is NULL.
+ *
+ * @note Unlike the original Xenomai 2.x call, this emulation delays
+ * the caller until @a idate has elapsed only if @a task is NULL or
+ * equal to rt_task_self().
+ *
+ * @sideeffect Over Cobalt, -EINVAL is returned if @a period is
+ * different from TM_INFINITE but shorter than the user scheduling
+ * latency value for the target system, as displayed by
+ * /proc/xenomai/latency.
+ *
+ * @note The @a idate and @a period values are interpreted as a
+ * multiple of the Alchemy clock resolution (see
+ * --alchemy-clock-resolution option, defaults to 1 nanosecond).
+ *
+ * @deprecated This is a compatibility service from the Transition
+ * Kit.
+ */
+
+int COMPAT__rt_task_set_periodic(RT_TASK *task, RTIME idate, RTIME period);
+
+/**
+ * @fn int COMPAT__rt_alarm_create(RT_ALARM *alarm, const char *name)
+ * @brief Create an alarm object (compatibility service).
+ *
+ * This routine creates an object triggering an alarm routine at a
+ * specified time in the future. Alarms can be periodic or oneshot,
+ * depending on the reload interval value passed to rt_alarm_start().
+ * A task can wait for timeouts using the rt_alarm_wait() service.
+ *
+ * @param alarm The address of an alarm descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * alarm. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created alarm into the object registry.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * local pool in order to create the alarm.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered alarm.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ *
+ * @note Alarms are process-private objects and thus cannot be shared
+ * by multiple processes, even if they belong to the same Xenomai
+ * session.
+ *
+ * @deprecated This is a compatibility service from the Transition
+ * Kit.
+ */
+
+int COMPAT__rt_alarm_create(RT_ALARM *alarm, const char *name);
+
+/**
+ * @fn int rt_alarm_wait(RT_ALARM *alarm)
+ * @brief Wait for the next alarm shot (compatibility service).
+ *
+ * This service allows the current task to suspend execution until the
+ * specified alarm triggers. The priority of the current task is
+ * raised above all other tasks - except those also undergoing an
+ * alarm wait.
+ *
+ * @return Zero is returned upon success, after the alarm timed
+ * out. Otherwise:
+ *
+ * - -EINVAL is returned if @a alarm is not a valid alarm descriptor.
+ *
+ * - -EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * - -EINTR is returned if rt_task_unblock() was called for the
+ * current task before the request is satisfied.
+ *
+ * - -EIDRM is returned if @a alarm is deleted while the caller was
+ * sleeping on it. In such a case, @a alarm is no more valid upon
+ * return of this service.
+ *
+ * @apitags{xthread-only, switch-primary}
+ *
+ * @deprecated This is a compatibility service from the Transition
+ * Kit.
+ *
+ */
+
+int rt_alarm_wait(RT_ALARM *alarm);
+
+/**
+ * @fn int COMPAT__rt_event_create(RT_EVENT *event, const char *name, unsigned long ivalue, int mode)
+ * @brief Create an event flag group.
+ *
+ * This call is the legacy form of the rt_event_create() service,
+ * using a long event mask. The new form uses a regular integer to
+ * hold the event mask instead.
+ *
+ * @param event The address of an event descriptor which can be later
+ * used to identify uniquely the created object, upon success of this
+ * call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * event. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created event into the object registry.
+ *
+ * @param ivalue The initial value of the group's event mask.
+ *
+ * @param mode The event group creation mode. The following flags can
+ * be OR'ed into this bitmask:
+ *
+ * - EV_FIFO makes tasks pend in FIFO order on the event flag group.
+ *
+ * - EV_PRIO makes tasks pend in priority order on the event flag group.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a mode is invalid.
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the event flag group.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered event flag group.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ *
+ * @note Event flag groups can be shared by multiple processes which
+ * belong to the same Xenomai session.
+ *
+ * @deprecated This is a compatibility service from the Transition
+ * Kit.
+ */
+int COMPAT__rt_event_create(RT_EVENT *event, const char *name,
+			    unsigned long ivalue, int mode);
+
+/**
+ * @fn int COMPAT__rt_event_signal(RT_EVENT *event, unsigned long mask)
+ * @brief Signal an event.
+ *
+ * This call is the legacy form of the rt_event_signal() service,
+ * using a long event mask. The new form uses a regular integer to
+ * hold the event mask instead.
+ *
+ * @param event The event descriptor.
+ *
+ * @param mask The set of events to be posted.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a event is not an event flag group
+ * descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ *
+ * @deprecated This is a compatibility service from the Transition
+ * Kit.
+ */
+int COMPAT__rt_event_signal(RT_EVENT *event, unsigned long mask);
+
+/**
+ * @fn int COMPAT__rt_event_clear(RT_EVENT *event,unsigned long mask,unsigned long *mask_r)
+ * @brief Clear event flags.
+ *
+ * This call is the legacy form of the rt_event_clear() service,
+ * using a long event mask. The new form uses a regular integer to
+ * hold the event mask instead.
+
+ * @param event The event descriptor.
+ *
+ * @param mask The set of event flags to be cleared.
+ *
+ * @param mask_r If non-NULL, @a mask_r is the address of a memory
+ * location which will receive the previous value of the event flag
+ * group before the flags are cleared.
+ *
+ * @return Zero is returned upon success. Otherwise:
+ *
+ * - -EINVAL is returned if @a event is not a valid event flag group
+ * descriptor.
+ *
+ * @apitags{unrestricted, switch-primary}
+ *
+ * @deprecated This is a compatibility service from the Transition
+ * Kit.
+ */
+int COMPAT__rt_event_clear(RT_EVENT *event,
+			   unsigned long mask, unsigned long *mask_r);
+
+/**
+ * @fn int COMPAT__rt_pipe_create(RT_PIPE *pipe, const char *name, int minor, size_t poolsize)
+ * @brief Create a message pipe.
+ *
+ * This call is the legacy form of the rt_pipe_create() service, which
+ * returns a zero status upon success. The new form returns the @a
+ * minor number assigned to the connection instead, which is useful
+ * when P_MINOR_AUTO is specified in the call (see the discussion
+ * about the @a minor parameter).
+ *
+ * This service opens a bi-directional communication channel for
+ * exchanging messages between Xenomai threads and regular Linux
+ * threads. Pipes natively preserve message boundaries, but can also
+ * be used in byte-oriented streaming mode from Xenomai to Linux.
+ *
+ * rt_pipe_create() always returns immediately, even if no thread has
+ * opened the associated special device file yet. On the contrary, the
+ * non real-time side could block upon attempt to open the special
+ * device file until rt_pipe_create() is issued on the same pipe from
+ * a Xenomai thread, unless O_NONBLOCK was given to the open(2) system
+ * call.
+ *
+ * @param pipe The address of a pipe descriptor which can be later used
+ * to identify uniquely the created object, upon success of this call.
+ *
+ * @param name An ASCII string standing for the symbolic name of the
+ * pipe. When non-NULL and non-empty, a copy of this string is used
+ * for indexing the created pipe into the object registry.
+ *
+ * Named pipes are supported through the use of the registry. Passing
+ * a valid @a name parameter when creating a message pipe causes a
+ * symbolic link to be created from
+ * /proc/xenomai/registry/rtipc/xddp/@a name to the associated special
+ * device (i.e. /dev/rtp*), so that the specific @a minor information
+ * does not need to be known from those processes for opening the
+ * proper device file. In such a case, both sides of the pipe only
+ * need to agree upon a symbolic name to refer to the same data path,
+ * which is especially useful whenever the @a minor number is picked
+ * up dynamically using an adaptive algorithm, such as passing
+ * P_MINOR_AUTO as @a minor value.
+ *
+ * @param minor The minor number of the device associated with the
+ * pipe.  Passing P_MINOR_AUTO causes the minor number to be
+ * auto-allocated. In such a case, a symbolic link will be
+ * automatically created from
+ * /proc/xenomai/registry/rtipc/xddp/@a name to the allocated pipe
+ * device entry. Valid minor numbers range from 0 to
+ * CONFIG_XENO_OPT_PIPE_NRDEV-1.
+ *
+ * @param poolsize Specifies the size of a dedicated buffer pool for the
+ * pipe. Passing 0 means that all message allocations for this pipe are
+ * performed on the Cobalt core heap.
+ *
+ * @return This compatibility call returns zero upon
+ * success. Otherwise:
+ *
+ * - -ENOMEM is returned if the system fails to get memory from the
+ * main heap in order to create the pipe.
+ *
+ * - -ENODEV is returned if @a minor is different from P_MINOR_AUTO
+ * and is not a valid minor number.
+ *
+ * - -EEXIST is returned if the @a name is conflicting with an already
+ * registered pipe.
+ *
+ * - -EBUSY is returned if @a minor is already open.
+ *
+ * - -EPERM is returned if this service was called from an
+ * asynchronous context.
+ *
+ * @apitags{thread-unrestricted, switch-secondary}
+ */
+int COMPAT__rt_pipe_create(RT_PIPE *pipe,
+			   const char *name, int minor, size_t poolsize);
+#else /* !DOXYGEN_CPP */
+
+int rt_task_create(RT_TASK *task, const char *name,
+		   int stksize, int prio, int mode)
+{
+	int ret, susp, cpus, cpu;
+	cpu_set_t cpuset;
+
+	susp = mode & T_SUSP;
+	cpus = mode & T_CPUMASK;
+	ret = __CURRENT(rt_task_create(task, name, stksize, prio,
+				       mode & ~(T_SUSP|T_CPUMASK|T_LOCK)));
+	if (ret)
+		return ret;
+
+	if (cpus) {
+		CPU_ZERO(&cpuset);
+		for (cpu = 0, cpus >>= 24;
+		     cpus && cpu < 8; cpu++, cpus >>= 1) {
+			if (cpus & 1)
+				CPU_SET(cpu, &cpuset);
+		}
+		ret = rt_task_set_affinity(task, &cpuset);
+		if (ret) {
+			rt_task_delete(task);
+			return ret;
+		}
+	}
+
+	return susp ? rt_task_suspend(task) : 0;
+}
+
+int rt_task_spawn(RT_TASK *task, const char *name,
+		  int stksize, int prio, int mode,
+		  void (*entry)(void *arg), void *arg)
+{
+	int ret;
+
+	ret = rt_task_create(task, name, stksize, prio, mode);
+	if (ret)
+		return ret;
+
+	return rt_task_start(task, entry, arg);
+}
+
+int rt_task_set_periodic(RT_TASK *task, RTIME idate, RTIME period)
+{
+	int ret;
+
+	ret = __CURRENT(rt_task_set_periodic(task, idate, period));
+	if (ret)
+		return ret;
+
+	if (idate != TM_NOW) {
+		if (task == NULL || task == rt_task_self())
+			ret = rt_task_wait_period(NULL);
+		else
+			trank_warning("task won't wait for start time");
+	}
+
+	return ret;
+}
+
+struct trank_alarm_wait {
+	pthread_mutex_t lock;
+	pthread_cond_t event;
+	int alarm_pulses;
+};
+
+static void trank_alarm_handler(void *arg)
+{
+	struct trank_alarm_wait *aw = arg;
+
+	__RT(pthread_mutex_lock(&aw->lock));
+	aw->alarm_pulses++;
+	__RT(pthread_cond_broadcast(&aw->event));
+	__RT(pthread_mutex_unlock(&aw->lock));
+}
+
+int rt_alarm_create(RT_ALARM *alarm, const char *name)
+{
+	struct trank_alarm_wait *aw;
+	pthread_mutexattr_t mattr;
+	pthread_condattr_t cattr;
+	int ret;
+
+	aw = xnmalloc(sizeof(*aw));
+	if (aw == NULL)
+		return -ENOMEM;
+
+	aw->alarm_pulses = 0;
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-__RT(pthread_mutex_init(&aw->lock, &mattr)));
+	pthread_mutexattr_destroy(&mattr);
+	if (ret)
+		goto fail_lock;
+
+	pthread_condattr_init(&cattr);
+	pthread_condattr_setpshared(&cattr, PTHREAD_PROCESS_PRIVATE);
+	ret = __bt(-pthread_cond_init(&aw->event, &cattr));
+	pthread_condattr_destroy(&cattr);
+	if (ret)
+		goto fail_cond;
+
+	ret = __CURRENT(rt_alarm_create(alarm, name, trank_alarm_handler, aw));
+	if (ret)
+		goto fail_alarm;
+
+	return 0;
+fail_alarm:
+	__RT(pthread_cond_destroy(&aw->event));
+fail_cond:
+	__RT(pthread_mutex_destroy(&aw->lock));
+fail_lock:
+	xnfree(aw);
+
+	return ret;
+}
+
+static struct alchemy_alarm *find_alarm(RT_ALARM *alarm)
+{
+	struct alchemy_alarm *acb;
+
+	if (bad_pointer(alarm))
+		return NULL;
+
+	acb = (struct alchemy_alarm *)alarm->handle;
+	if (bad_pointer(acb) || acb->magic != alarm_magic)
+		return NULL;
+
+	return acb;
+}
+
+int rt_alarm_wait(RT_ALARM *alarm)
+{
+	struct threadobj *current = threadobj_current();
+	struct sched_param_ex param_ex;
+	struct trank_alarm_wait *aw;
+	struct alchemy_alarm *acb;
+	int ret, prio, pulses;
+
+	acb = find_alarm(alarm);
+	if (acb == NULL)
+		return -EINVAL;
+
+	threadobj_lock(current);
+	prio = threadobj_get_priority(current);
+	if (prio != threadobj_irq_prio) {
+		param_ex.sched_priority = threadobj_irq_prio;
+		/* Working on self, so -EIDRM can't happen. */
+		threadobj_set_schedparam(current, SCHED_FIFO, &param_ex);
+	}
+	threadobj_unlock(current);
+
+	aw = acb->arg;
+
+	/*
+	 * Emulate the original behavior: wait for the next pulse (no
+	 * event buffering, broadcast to all waiters), while
+	 * preventing spurious wakeups.
+	 */
+	__RT(pthread_mutex_lock(&aw->lock));
+
+	pulses = aw->alarm_pulses;
+
+	for (;;) {
+		ret = -__RT(pthread_cond_wait(&aw->event, &aw->lock));
+		if (ret || aw->alarm_pulses != pulses)
+			break;
+	}
+
+	__RT(pthread_mutex_unlock(&aw->lock));
+
+	return __bt(ret);
+}
+
+int rt_alarm_delete(RT_ALARM *alarm)
+{
+	struct trank_alarm_wait *aw;
+	struct alchemy_alarm *acb;
+	int ret;
+
+	acb = find_alarm(alarm);
+	if (acb == NULL)
+		return -EINVAL;
+
+	aw = acb->arg;
+	ret = __CURRENT(rt_alarm_delete(alarm));
+	if (ret)
+		return ret;
+
+	__RT(pthread_cond_destroy(&aw->event));
+	__RT(pthread_mutex_destroy(&aw->lock));
+	xnfree(aw);
+
+	return 0;
+}
+
+int rt_event_create(RT_EVENT *event, const char *name,
+		    unsigned long ivalue, int mode)
+{
+	return __CURRENT(rt_event_create(event, name, ivalue, mode));
+}
+
+int rt_event_signal(RT_EVENT *event, unsigned long mask)
+{
+	return __CURRENT(rt_event_signal(event, mask));
+}
+
+int rt_event_clear(RT_EVENT *event, unsigned long mask,
+		   unsigned long *mask_r)
+{
+	unsigned int _mask;
+	int ret;
+
+	ret = __CURRENT(rt_event_clear(event, mask, &_mask));
+	if (ret)
+		return ret;
+
+	*mask_r = _mask;
+
+	return 0;
+}
+
+int rt_pipe_create(RT_PIPE *pipe, const char *name,
+		   int minor, size_t poolsize)
+{
+	int ret;
+
+	ret = __CURRENT(rt_pipe_create(pipe, name, minor, poolsize));
+
+	return ret < 0 ? ret : 0;
+}
+
+#endif	/* !DOXYGEN_CPP */
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/trank/posix.c b/kernel/xenomai-v3.2.4/lib/trank/posix.c
new file mode 100644
index 0000000..ddada56
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/trank/posix.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <xeno_config.h>
+#include <errno.h>
+#include <signal.h>
+#include <time.h>
+#include <memory.h>
+#include <boilerplate/ancillaries.h>
+#include <boilerplate/signal.h>
+#include <trank/posix/pthread.h>
+#include "cobalt/internal.h"
+#include "internal.h"
+
+/**
+ * @ingroup trank
+ * @{
+ */
+
+/**
+ * Make a thread periodic (compatibility service).
+ *
+ * This service makes the POSIX @a thread periodic.
+ *
+ * @param thread thread to arm a periodic timer for.
+ *
+ * @param starttp start time, expressed as an absolute value of the
+ * CLOCK_REALTIME clock.
+ *
+ * @param periodtp period, expressed as a time interval.
+ *
+ * @return 0 on success;
+ * @return an error number if:
+ * - ESRCH, @a thread is invalid.
+ * - ETIMEDOUT, the start time has already passed.
+ * - EPERM, the caller is not a Xenomai thread.
+ * - EINVAL, @a thread does not refer to the current thread.
+ *
+ * @note Unlike the original Xenomai 2.x call, this emulation does not
+ * delay the caller waiting for the first periodic release point. In
+ * addition, @a thread must be equal to pthread_self().
+ *
+ * @deprecated This service is a non-portable extension of the Xenomai
+ * 2.x POSIX interface, not available with Xenomai 3.x.  Instead,
+ * Cobalt-based applications should set up a periodic timer using the
+ * timer_create(), timer_settime() call pair, then wait for release
+ * points via sigwaitinfo(). Overruns can be detected by looking at
+ * the siginfo.si_overrun field.  Alternatively, applications may
+ * obtain a file descriptor referring to a Cobalt timer via the
+ * timerfd() call, and read() from it to wait for timeouts.
+ */
+int pthread_make_periodic_np(pthread_t thread,
+			     struct timespec *starttp,
+			     struct timespec *periodtp)
+{
+	struct trank_context *tc;
+	struct itimerspec its;
+	struct sigevent sev;
+	int ret;
+
+	tc = trank_get_context();
+	if (tc == NULL)
+		return EPERM;
+
+	if (thread != pthread_self())
+		return EINVAL;
+
+	if (tc->periodic_timer == NULL) {
+		memset(&sev, 0, sizeof(sev));
+		sev.sigev_signo = SIGPERIOD;
+		sev.sigev_notify = SIGEV_SIGNAL|SIGEV_THREAD_ID;
+		sev.sigev_notify_thread_id = cobalt_thread_pid(thread);
+		ret = __RT(timer_create(CLOCK_REALTIME, &sev,
+					&tc->periodic_timer));
+		if (ret)
+			return -errno;
+	}
+
+	its.it_value = *starttp;
+	its.it_interval = *periodtp;
+
+	ret = __RT(timer_settime(tc->periodic_timer, TIMER_ABSTIME, &its, NULL));
+	if (ret)
+		return -errno;
+
+	return 0;
+}
+
+/**
+ * Wait for the next periodic release point (compatibility service)
+ *
+ * Delay the current thread until the next periodic release point is
+ * reached. The periodic timer should have been previously started for
+ * @a thread by a call to pthread_make_periodic_np().
+ *
+ * @param overruns_r If non-NULL, @a overruns_r shall be a pointer to
+ * a memory location which will be written with the count of pending
+ * overruns. This value is written to only when pthread_wait_np()
+ * returns ETIMEDOUT or success. The memory location remains
+ * unmodified otherwise. If NULL, this count will not be returned.
+ *
+ * @return Zero is returned upon success. If @a overruns_r is
+ * non-NULL, zero is written to the pointed memory
+ * location. Otherwise:
+ *
+ * - EWOULDBLOCK is returned if pthread_make_periodic_np() was not
+ * called for the current thread.
+ *
+ * - EINTR is returned if @a thread was interrupted by a signal before
+ * the next periodic release point was reached.
+ *
+ * - ETIMEDOUT is returned if a timer overrun occurred, which
+ * indicates that a previous release point was missed by the calling
+ * thread. If @a overruns_r is non-NULL, the count of pending overruns
+ * is written to the pointed memory location.
+ *
+ * - EPERM is returned if this service was called from an invalid
+ * context.
+ *
+ * @note If the current release point has already been reached at the
+ * time of the call, the current thread immediately returns from this
+ * service with no delay.
+ *
+ * @deprecated This service is a non-portable extension of the Xenomai
+ * 2.x POSIX interface, not available with Xenomai 3.x.  Instead,
+ * Cobalt-based applications should set up a periodic timer using the
+ * timer_create(), timer_settime() call pair, then wait for release
+ * points via sigwaitinfo(). Overruns can be detected by looking at
+ * the siginfo.si_overrun field.  Alternatively, applications may
+ * obtain a file descriptor referring to a Cobalt timer via the
+ * timerfd() call, and read() from it to wait for timeouts.
+ */
+int pthread_wait_np(unsigned long *overruns_r)
+{
+	struct trank_context *tc;
+	siginfo_t si;
+	int sig;
+
+	tc = trank_get_context();
+	if (tc == NULL)
+		return EPERM;
+
+	if (tc->periodic_timer == NULL)
+		return EWOULDBLOCK;
+
+	for (;;) {
+		sig = __RT(sigwaitinfo(&trank_sigperiod_set, &si));
+		if (sig == SIGPERIOD)
+			break;
+		if (errno == EINTR)
+			return EINTR;
+		panic("cannot wait for next period, %s", symerror(-errno));
+	}
+
+	if (overruns_r)
+		*overruns_r = si.si_overrun;
+
+	return 0;
+}
+
+/** @} */
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/COPYING b/kernel/xenomai-v3.2.4/lib/vxworks/COPYING
new file mode 100644
index 0000000..3b20440
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/COPYING
@@ -0,0 +1,458 @@
+		  GNU LESSER GENERAL PUBLIC LICENSE
+		       Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+     59 Temple Place, Suite 330, Boston, MA  02111-1307  USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL.  It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+			    Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+  This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it.  You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+  When we speak of free software, we are referring to freedom of use,
+not price.  Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+  To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights.  These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+  For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you.  You must make sure that they, too, receive or can get the source
+code.  If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it.  And you must show them these terms so they know their rights.
+
+  We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+  To protect each distributor, we want to make it very clear that
+there is no warranty for the free library.  Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+  Finally, software patents pose a constant threat to the existence of
+any free program.  We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder.  Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+  Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License.  This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License.  We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+  When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library.  The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom.  The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+  We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License.  It also provides other free software developers Less
+of an advantage over competing non-free programs.  These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries.  However, the Lesser license provides advantages in certain
+special circumstances.
+
+  For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard.  To achieve this, non-free programs must be
+allowed to use the library.  A more frequent case is that a free
+library does the same job as widely used non-free libraries.  In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+  In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software.  For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+  Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.  Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library".  The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+		  GNU LESSER GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+  A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+  The "Library", below, refers to any such software library or work
+which has been distributed under these terms.  A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language.  (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+  "Source code" for a work means the preferred form of the work for
+making modifications to it.  For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+  Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it).  Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+  
+  1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+  You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+  2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) The modified work must itself be a software library.
+
+    b) You must cause the files modified to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    c) You must cause the whole of the work to be licensed at no
+    charge to all third parties under the terms of this License.
+
+    d) If a facility in the modified Library refers to a function or a
+    table of data to be supplied by an application program that uses
+    the facility, other than as an argument passed when the facility
+    is invoked, then you must make a good faith effort to ensure that,
+    in the event an application does not supply such function or
+    table, the facility still operates, and performs whatever part of
+    its purpose remains meaningful.
+
+    (For example, a function in a library to compute square roots has
+    a purpose that is entirely well-defined independent of the
+    application.  Therefore, Subsection 2d requires that any
+    application-supplied function or table used by this function must
+    be optional: if the application does not supply it, the square
+    root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library.  To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License.  (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.)  Do not make any other change in
+these notices.
+
+  Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+  This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+  4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+  If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library".  Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+  However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library".  The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+  When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library.  The
+threshold for this to be true is not precisely defined by law.
+
+  If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work.  (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+  Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+  6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+  You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License.  You must supply a copy of this License.  If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License.  Also, you must do one
+of these things:
+
+    a) Accompany the work with the complete corresponding
+    machine-readable source code for the Library including whatever
+    changes were used in the work (which must be distributed under
+    Sections 1 and 2 above); and, if the work is an executable linked
+    with the Library, with the complete machine-readable "work that
+    uses the Library", as object code and/or source code, so that the
+    user can modify the Library and then relink to produce a modified
+    executable containing the modified Library.  (It is understood
+    that the user who changes the contents of definitions files in the
+    Library will not necessarily be able to recompile the application
+    to use the modified definitions.)
+
+    b) Use a suitable shared library mechanism for linking with the
+    Library.  A suitable mechanism is one that (1) uses at run time a
+    copy of the library already present on the user's computer system,
+    rather than copying library functions into the executable, and (2)
+    will operate properly with a modified version of the library, if
+    the user installs one, as long as the modified version is
+    interface-compatible with the version that the work was made with.
+
+    c) Accompany the work with a written offer, valid for at
+    least three years, to give the same user the materials
+    specified in Subsection 6a, above, for a charge no more
+    than the cost of performing this distribution.
+
+    d) If distribution of the work is made by offering access to copy
+    from a designated place, offer equivalent access to copy the above
+    specified materials from the same place.
+
+    e) Verify that the user has already received a copy of these
+    materials or that you have already sent this user a copy.
+
+  For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it.  However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+  It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system.  Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+  7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+    a) Accompany the combined library with a copy of the same work
+    based on the Library, uncombined with any other library
+    facilities.  This must be distributed under the terms of the
+    Sections above.
+
+    b) Give prominent notice with the combined library of the fact
+    that part of it is a work based on the Library, and explaining
+    where to find the accompanying uncombined form of the same work.
+
+  8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License.  Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License.  However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+  9. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Library or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+  10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+  11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded.  In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+  13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation.  If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+  14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission.  For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this.  Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+			    NO WARRANTY
+
+  15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE.  THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU.  SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+  16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+		     END OF TERMS AND CONDITIONS
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/Makefile.am b/kernel/xenomai-v3.2.4/lib/vxworks/Makefile.am
new file mode 100644
index 0000000..37b8caf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/Makefile.am
@@ -0,0 +1,47 @@
+lib_LTLIBRARIES = libvxworks@CORE@.la
+
+libvxworks@CORE@_la_LDFLAGS = @XENO_LIB_LDFLAGS@ -version-info 0:0:0
+
+libvxworks@CORE@_la_LIBADD =						\
+	@XENO_CORE_LDADD@					\
+	$(top_builddir)/lib/copperplate/libcopperplate@CORE@.la
+
+libvxworks@CORE@_la_SOURCES = \
+	init.c		\
+	errnoLib.c	\
+	intLib.c	\
+	kernLib.c	\
+	lstLib.c	\
+	memPartLib.c	\
+	memPartLib.h	\
+	msgQLib.c	\
+	msgQLib.h	\
+	rngLib.c	\
+	rngLib.h	\
+	semLib.c	\
+	semLib.h	\
+	taskLib.c	\
+	taskLib.h	\
+	taskHookLib.c	\
+	taskHookLib.h	\
+	taskInfo.c	\
+	tickLib.c	\
+	tickLib.h	\
+	wdLib.c		\
+	wdLib.h		\
+	sysLib.c	\
+	reference.h
+
+libvxworks@CORE@_la_CPPFLAGS =				\
+	@XENO_USER_CFLAGS@				\
+	-I$(top_srcdir)/include				\
+	-I$(top_srcdir)/lib
+
+EXTRA_DIST = testsuite
+
+SPARSE = sparse
+
+sparse:
+	@for i in $(libvxworks@CORE@_la_SOURCES); do \
+		$(SPARSE) $(CHECKFLAGS) $(srcdir)/$$i; \
+	done
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/README b/kernel/xenomai-v3.2.4/lib/vxworks/README
new file mode 100644
index 0000000..0d0dcef
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/README
@@ -0,0 +1,66 @@
+Overview
+========
+
+This directory contains a VxWorks(*) emulation interface on top of the
+Xenomai framework.
+
+Known variations from VxWorks/WIND
+==================================
+
+You may observe some variations from VxWorks. If you find such a
+variation and you think it would be easy to correct, please mail to
+the Xenomai mailing list at <xenomai@xenomai.org>.  Here are the known
+variations:
+
+- VxWorks task priorities are restricted to [97..0] in the current
+  implementation of the emulator.
+
+- taskInit() does not allow to create unnamed tasks, but assigns an
+  auto-generated name like taskSpawn() whenever a NULL or empty name
+  string is given.
+
+- taskUnlock() may induce a round-robin effect within the priority
+  level of the task releasing the scheduler lock.
+
+- semDelete() does not flush a mutex semaphore. May return
+  S_semLib_INVALID_OPERATION if busy upon deletion.
+
+- semMCreate() does not support FIFO queuing mode, but only PRIO.
+
+- semTake() will complain about the owner of a mutex-type semaphore
+  exiting without releasing the lock while the current task pends on
+  it. S_objLib_OBJ_UNAVAILABLE is then returned to the caller, and the
+  semaphore is left in an inconsistent state.
+
+- kernelInit() has a different signature than the original WIND kernel
+  call, i.e.:
+  STATUS kernelInit(FUNCPTR rootRtn, int argc, char *const argv[]).
+
+  The first argument is a pointer to an optional root task entry
+  point, which the emulator will name "rootTask", and start with
+  priority 50. Since kernelInit() is usually called on behalf of a non
+  real-time thread within the application's main() routine, the root
+  task will preempt the caller immediately. If NULL is passed, no root
+  task will be started by kernelInit().
+
+  The second and third arguments should be the argument count and
+  vector, as passed to the application's main() routine on the
+  command line. Passing 0/NULL is valid, meaning that no command line
+  argument may be used to override the default emulator settings.
+
+- memPartCreate() may return S_memLib_NOT_ENOUGH_MEMORY in case a
+  memory shortage happens while allocating the internal partition
+  descriptor. I.e. this descriptor is not laid into the user-provided
+  memory.
+
+- memPartAddToPool() may return S_objLib_OBJ_ID_ERROR when called with
+  an invalid partition identifier.
+
+- The following calls may return S_objLib_OBJ_NO_METHOD when called
+  from a non-VxWorks task context:
+
+  taskDelay, taskLock, taskUnlock, taskSafe, taskUnsafe, and taskIdSelf.
+
+
+(*) VxWorks is a registered trademark of Wind River Systems, Inc
+(http://www.windriver.com).
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/errnoLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/errnoLib.c
new file mode 100644
index 0000000..cfb73f2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/errnoLib.c
@@ -0,0 +1,144 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <string.h>
+#include <vxworks/errnoLib.h>
+#include "taskLib.h"
+
+void printErrno(int status)
+{
+	const char *msg;
+	char buf[64];
+
+	switch (status) {
+	case S_objLib_OBJ_ID_ERROR:
+		msg = "S_objLib_OBJ_ID_ERROR";
+		break;
+	case S_objLib_OBJ_UNAVAILABLE:
+		msg = "S_objLib_OBJ_UNAVAILABLE";
+		break;
+	case S_objLib_OBJ_DELETED:
+		msg = "S_objLib_OBJ_DELETED";
+		break;
+	case S_objLib_OBJ_TIMEOUT:
+		msg = "S_objLib_OBJ_TIMEOUT";
+		break;
+	case S_taskLib_NAME_NOT_FOUND:
+		msg = "S_taskLib_NAME_NOT_FOUND";
+		break;
+	case S_taskLib_TASK_HOOK_NOT_FOUND:
+		msg = "S_taskLib_TASK_HOOK_NOT_FOUND";
+		break;
+	case S_taskLib_ILLEGAL_PRIORITY:
+		msg = "S_taskLib_ILLEGAL_PRIORITY";
+		break;
+	case S_taskLib_TASK_HOOK_TABLE_FULL:
+		msg = "S_taskLib_TASK_HOOK_TABLE_FULL";
+		break;
+	case S_semLib_INVALID_STATE:
+		msg = "S_semLib_INVALID_STATE";
+		break;
+	case S_semLib_INVALID_OPTION:
+		msg = "S_semLib_INVALID_OPTION";
+		break;
+	case S_semLib_INVALID_QUEUE_TYPE:
+		msg = "S_semLib_INVALID_QUEUE_TYPE";
+		break;
+	case S_semLib_INVALID_OPERATION:
+		msg = "S_semLib_INVALID_OPERATION";
+		break;
+	case S_msgQLib_INVALID_MSG_LENGTH:
+		msg = "S_msgQLib_INVALID_MSG_LENGTH";
+		break;
+	case S_msgQLib_NON_ZERO_TIMEOUT_AT_INT_LEVEL:
+		msg = "S_msgQLib_NON_ZERO_TIMEOUT_AT_INT_LEVEL";
+		break;
+	case S_msgQLib_INVALID_QUEUE_TYPE:
+		msg = "S_msgQLib_INVALID_QUEUE_TYPE";
+		break;
+	case S_intLib_NOT_ISR_CALLABLE:
+		msg = "S_intLib_NOT_ISR_CALLABLE";
+		break;
+	case S_memLib_NOT_ENOUGH_MEMORY:
+		msg = "S_memLib_NOT_ENOUGH_MEMORY";
+		break;
+	default:
+		if (strerror_r(status, buf, sizeof(buf)))
+			msg = "Unknown error";
+		else
+			msg = buf;
+	}
+
+	fprintf(stderr, "Error code %d: %s\n", status, msg);
+}
+
+STATUS errnoOfTaskSet(TASK_ID task_id, int status)
+{
+	struct wind_task *task;
+	struct service svc;
+	STATUS ret = OK;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task_or_self(task_id);
+	if (task == NULL) {
+		ret = ERROR;
+		goto out;
+	}
+
+	*task->thobj.errno_pointer = status;
+	put_wind_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+STATUS errnoOfTaskGet(TASK_ID task_id)
+{
+	struct wind_task *task;
+	struct service svc;
+	STATUS status = OK;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task_or_self(task_id);
+	if (task == NULL) {
+		status = ERROR;
+		goto out;
+	}
+
+	status = *task->thobj.errno_pointer;
+	put_wind_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return status;
+}
+
+STATUS errnoSet(int status)
+{
+	errno = status;
+	return OK;
+}
+
+int errnoGet(void)
+{
+	return errno;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/init.c b/kernel/xenomai-v3.2.4/lib/vxworks/init.c
new file mode 100644
index 0000000..9a3dcc4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/init.c
@@ -0,0 +1,105 @@
+/*
+ * Copyright (C) 2011 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <stdio.h>
+#include <time.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <xenomai/init.h>
+#include <vxworks/errnoLib.h>
+#include "tickLib.h"
+#include "taskLib.h"
+
+/**
+ * @defgroup vxworks VxWorks&reg; emulator
+ *
+ * A VxWorks&reg; emulation library on top of Xenomai.
+ *
+ * The emulator mimicks the behavior described in the public
+ * documentation of the WIND 5.x API for the following class of
+ * services:
+ *
+ * - taskLib, taskInfoLib, taskHookLib,
+ * - semLib, msgQLib, wdLib, memPartLib
+ * - intLib, tickLib, sysLib (partial)
+ * - errnoLib, lstLib, kernelLib (partial)
+ */
+
+static unsigned int clock_resolution = 1000000; /* 1ms */
+
+static const struct option vxworks_options[] = {
+	{
+#define clock_resolution_opt	0
+		.name = "vxworks-clock-resolution",
+		.has_arg = required_argument,
+	},
+	{ /* Sentinel */ }
+};
+
+static int vxworks_parse_option(int optnum, const char *optarg)
+{
+	switch (optnum) {
+	case clock_resolution_opt:
+		clock_resolution = atoi(optarg);
+		break;
+	default:
+		/* Paranoid, can't happen. */
+		return -EINVAL;
+	}
+
+	return 0;
+}
+
+static void vxworks_help(void)
+{
+        fprintf(stderr, "--vxworks-clock-resolution=<ns> tick value (default 1ms)\n");
+}
+
+static int vxworks_init(void)
+{
+	int ret;
+
+	registry_add_dir("/vxworks");
+	registry_add_dir("/vxworks/tasks");
+	registry_add_dir("/vxworks/semaphores");
+	registry_add_dir("/vxworks/queues");
+	registry_add_dir("/vxworks/watchdogs");
+
+	cluster_init(&wind_task_table, "vxworks.task");
+
+	ret = clockobj_init(&wind_clock, clock_resolution);
+	if (ret) {
+		warning("%s: failed to initialize VxWorks clock (res=%u ns)",
+			__FUNCTION__, clock_resolution);
+		return __bt(ret);
+	}
+
+	__RT(pthread_mutex_init(&wind_task_lock, NULL));
+
+	return 0;
+}
+
+static struct setup_descriptor vxworks_skin = {
+	.name = "vxworks",
+	.init = vxworks_init,
+	.options = vxworks_options,
+	.parse_option = vxworks_parse_option,
+	.help = vxworks_help,
+};
+
+interface_setup_call(vxworks_skin);
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/intLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/intLib.c
new file mode 100644
index 0000000..cf07b22
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/intLib.c
@@ -0,0 +1,25 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <copperplate/threadobj.h>
+#include <vxworks/intLib.h>
+
+BOOL intContext(void)
+{
+	return threadobj_irq_p();
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/kernLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/kernLib.c
new file mode 100644
index 0000000..d675027
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/kernLib.c
@@ -0,0 +1,61 @@
+/*
+ * Copyright (C) 2012 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <vxworks/kernLib.h>
+#include <vxworks/errnoLib.h>
+#include "tickLib.h"
+#include "taskLib.h"
+
+static int switch_slicing(struct threadobj *thobj, struct timespec *quantum)
+{
+	struct sched_param_ex param_ex;
+	int policy;
+
+	param_ex.sched_priority = threadobj_get_priority(thobj);
+
+	if (quantum) {
+		policy = SCHED_RR;
+		param_ex.sched_rr_quantum.tv_sec = quantum->tv_sec;
+		param_ex.sched_rr_quantum.tv_nsec = quantum->tv_nsec;
+	} else {
+		policy = param_ex.sched_priority ? SCHED_FIFO : SCHED_OTHER;
+	}
+
+	return threadobj_set_schedparam(thobj, policy, &param_ex);
+}
+
+STATUS kernelTimeSlice(int ticks)
+{
+	struct timespec quantum, *p = NULL;
+	struct wind_task *task;
+
+	if (ticks) {
+		/* Convert VxWorks ticks to timespec. */
+		clockobj_ticks_to_timespec(&wind_clock, ticks, &quantum);
+		p = &quantum;
+	}
+
+	/*
+	 * Enable/disable round-robin for all threads known by the
+	 * current process.
+	 */
+	wind_time_slice = ticks;
+	do_each_wind_task(task, switch_slicing(&task->thobj, p));
+
+	return OK;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/lstLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/lstLib.c
new file mode 100644
index 0000000..0daa864
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/lstLib.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <vxworks/errnoLib.h>
+#include <vxworks/lstLib.h>
+
+void lstExtract(LIST *lsrc, NODE *nstart, NODE *nend, LIST *ldst)
+{
+	struct pvholder *nholder = &nstart->link, *holder;
+	struct NODE *n;
+	int nitems = 0;
+
+	do {	/* XXX: terribly inefficient, but plain simple... */
+		holder = nholder;
+		nholder = holder->next;
+		pvlist_remove_init(holder);
+		pvlist_append(holder, &ldst->list);
+		n = container_of(holder, struct NODE, link);
+		n->list = ldst;
+		nitems++;
+	} while (holder != &nend->link);
+
+	lsrc->count -= nitems;
+	ldst->count += nitems;
+}
+
+NODE *lstNth(LIST *l, int nodenum)
+{
+	struct pvholder *holder;
+	int nth;
+
+	if (l == NULL || nodenum <= 0 || pvlist_empty(&l->list))
+		return NULL;
+
+	if (nodenum <= l->count >> 2) { /* nodenum is 1-based. */
+		nth = 1;
+		pvlist_for_each(holder, &l->list)
+			if (nodenum == nth++)
+				return container_of(holder, struct NODE, link);
+	} else {
+		nth = l->count;
+		pvlist_for_each_reverse(holder, &l->list)
+			if (nodenum == nth--)
+				return container_of(holder, struct NODE, link);
+	}
+
+	return NULL;
+}
+
+NODE *lstNStep(NODE *n, int steps)
+{
+	struct pvholder *holder = &n->link;
+
+	if (steps == 0)
+		return n;
+
+	if (steps < 0) {
+		do
+			holder = holder->prev;
+		while (holder->prev != &n->link && ++steps < 0);
+	} else {
+		do
+			holder = holder->next;
+		while (holder->next != &n->link && --steps > 0);
+	}
+
+	if (steps != 0)
+		return NULL;
+
+	return container_of(holder, struct NODE, link);
+}
+
+int lstFind(LIST *l, NODE *n)
+{
+	struct pvholder *holder;
+	int nth = 1;
+
+	if (l == NULL || pvlist_empty(&l->list))
+		return ERROR;
+
+	pvlist_for_each(holder, &l->list) {
+		if (holder == &n->link)
+			return nth;
+		nth++;
+	}
+
+	return ERROR;
+}
+
+void lstConcat(LIST *ldst, LIST *lsrc)
+{
+	struct pvholder *holder;
+	struct NODE *n;
+
+	if (pvlist_empty(&lsrc->list))
+		return;
+
+	pvlist_for_each(holder, &lsrc->list) {
+		n = container_of(holder, struct NODE, link);
+		n->list = ldst;
+	}
+
+	pvlist_join(&lsrc->list, &ldst->list);
+	ldst->count += lsrc->count;
+	lsrc->count = 0;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.c
new file mode 100644
index 0000000..c8eeb36
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.c
@@ -0,0 +1,242 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <boilerplate/lock.h>
+#include <boilerplate/ancillaries.h>
+#include <copperplate/heapobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/memPartLib.h>
+#include "memPartLib.h"
+
+#define mempart_magic	0x5a6b7c8d
+
+static struct wind_mempart *find_mempart_from_id(PART_ID partId)
+{
+	struct wind_mempart *mp = mainheap_deref(partId, struct wind_mempart);
+
+	if (mp == NULL || ((uintptr_t)mp & (sizeof(uintptr_t)-1)) != 0 ||
+	    mp->magic != mempart_magic)
+		return NULL;
+	/*
+	 * XXX: memory partitions may not be deleted, so we don't need
+	 * to protect against references to stale objects.
+	 */
+	return mp;
+}
+
+PART_ID memPartCreate(char *pPool, unsigned int poolSize)
+{
+	pthread_mutexattr_t mattr;
+	struct wind_mempart *mp;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	mp = xnmalloc(sizeof(*mp));
+	if (mp == NULL) {
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		goto fail;
+	}
+
+	ret = __heapobj_init(&mp->hobj, NULL, poolSize, pPool);
+	if (ret) {
+		xnfree(mp);
+		errno = S_memLib_INVALID_NBYTES;
+	fail:
+		CANCEL_RESTORE(svc);
+		return (PART_ID)0;
+	}
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, mutex_type_attribute);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute);
+	__RT(pthread_mutex_init(&mp->lock, &mattr));
+	pthread_mutexattr_destroy(&mattr);
+	memset(&mp->stats, 0, sizeof(mp->stats));
+	mp->stats.numBytesFree = poolSize;
+	mp->stats.numBlocksFree = 1;
+	mp->magic = mempart_magic;
+
+	CANCEL_RESTORE(svc);
+
+	return mainheap_ref(mp, PART_ID);
+}
+
+STATUS memPartAddToPool(PART_ID partId,
+			char *pPool, unsigned int poolSize)
+{
+	struct wind_mempart *mp;
+	struct service svc;
+	STATUS ret = OK;
+
+	if (poolSize == 0) {
+		errno = S_memLib_INVALID_NBYTES;
+		return ERROR;
+	}
+
+	mp = find_mempart_from_id(partId);
+	if (mp == NULL) {
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	CANCEL_DEFER(svc);
+
+	__RT(pthread_mutex_lock(&mp->lock));
+
+	if (heapobj_extend(&mp->hobj, poolSize, pPool)) {
+		errno = S_memLib_INVALID_NBYTES;
+		ret = ERROR;
+	} else {
+		mp->stats.numBytesFree += poolSize;
+		mp->stats.numBlocksFree++;
+	}
+
+	__RT(pthread_mutex_unlock(&mp->lock));
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+void *memPartAlignedAlloc(PART_ID partId,
+			  unsigned int nBytes, unsigned int alignment)
+{
+	unsigned int xtra = 0;
+	void *ptr;
+
+	/*
+	 * XXX: We assume that our underlying allocator (TLSF, pshared
+	 * or Glibc's malloc()) aligns at worst on a 8-bytes boundary,
+	 * so we only have to care for larger constraints.
+	 */
+	if ((alignment & (alignment - 1)) != 0) {
+		warning("%s: alignment value '%u' is not a power of two",
+			__FUNCTION__, alignment);
+		alignment = 8;
+	}
+	else if (alignment > 8)
+		xtra = alignment;
+
+	ptr = memPartAlloc(partId, nBytes + xtra);
+	if (ptr == NULL)
+		return NULL;
+
+	return (void *)(((uintptr_t)ptr + xtra) & ~(alignment - 1));
+}
+
+void *memPartAlloc(PART_ID partId, unsigned int nBytes)
+{
+	struct wind_mempart *mp;
+	void *p;
+
+	if (nBytes == 0)
+		return NULL;
+
+	mp = find_mempart_from_id(partId);
+	if (mp == NULL)
+		return NULL;
+
+	__RT(pthread_mutex_lock(&mp->lock));
+
+	p = heapobj_alloc(&mp->hobj, nBytes);
+	if (p == NULL)
+		goto out;
+
+	mp->stats.numBytesAlloc += nBytes;
+	mp->stats.numBlocksAlloc++;
+	mp->stats.numBytesFree -= nBytes;
+	mp->stats.numBlocksFree--;
+	if (mp->stats.numBytesAlloc > mp->stats.maxBytesAlloc)
+		mp->stats.maxBytesAlloc = mp->stats.numBytesAlloc;
+out:
+	__RT(pthread_mutex_unlock(&mp->lock));
+
+	return p;
+}
+
+STATUS memPartFree(PART_ID partId, char *pBlock)
+{
+	struct wind_mempart *mp;
+	struct service svc;
+	size_t size;
+
+	if (pBlock == NULL)
+		return ERROR;
+
+	mp = find_mempart_from_id(partId);
+	if (mp == NULL)
+		return ERROR;
+
+	CANCEL_DEFER(svc);
+
+	__RT(pthread_mutex_lock(&mp->lock));
+
+	heapobj_free(&mp->hobj, pBlock);
+
+	size = heapobj_validate(&mp->hobj, pBlock);
+	mp->stats.numBytesAlloc -= size;
+	mp->stats.numBlocksAlloc--;
+	mp->stats.numBytesFree += size;
+	mp->stats.numBlocksFree++;
+	
+	__RT(pthread_mutex_unlock(&mp->lock));
+
+	CANCEL_RESTORE(svc);
+
+	return OK;
+}
+
+void memAddToPool(char *pPool, unsigned int poolSize)
+{
+	/*
+	 * XXX: Since Glibc's malloc() is at least as efficient as
+	 * VxWork's first-fit allocator, we just route allocation
+	 * requests on the main partition to the regular malloc() and
+	 * free() routines. Given that, our main pool is virtually
+	 * infinite already, so we just give a hint to the user about
+	 * this when asked to extend it.
+	 */
+	warning("%s: extending the main partition is useless", __FUNCTION__);
+}
+
+STATUS memPartInfoGet(PART_ID partId, MEM_PART_STATS *ppartStats)
+{
+	struct wind_mempart *mp;
+	struct service svc;
+
+	mp = find_mempart_from_id(partId);
+	if (mp == NULL)
+		return ERROR;
+
+	CANCEL_DEFER(svc);
+
+	__RT(pthread_mutex_lock(&mp->lock));
+	*ppartStats = mp->stats;
+	__RT(pthread_mutex_unlock(&mp->lock));
+
+	CANCEL_RESTORE(svc);
+
+	return OK;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.h b/kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.h
new file mode 100644
index 0000000..f2cd00a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/memPartLib.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _VXWORKS_MEMPARTLIB_H
+#define _VXWORKS_MEMPARTLIB_H
+
+#include <copperplate/heapobj.h>
+#include <vxworks/memPartLib.h>
+
+struct wind_mempart {
+	unsigned int magic;
+	struct heapobj hobj;
+	pthread_mutex_t lock;
+	struct wind_part_stats stats;
+};
+
+#endif /* _VXWORKS_MEMPARTLIB_H */
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.c
new file mode 100644
index 0000000..aa0b71c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <memory.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/threadobj.h>
+#include <vxworks/errnoLib.h>
+#include "reference.h"
+#include "taskLib.h"
+#include "msgQLib.h"
+#include "tickLib.h"
+
+/*
+ * XXX: In order to keep the following services callable from
+ * non-VxWorks tasks (but still Xenomai ones, though), make sure
+ * to never depend on the wind_task struct, but rather on the thread
+ * base object directly.
+ */
+
+#define mq_magic	0x4a5b6c7d
+
+struct msgholder {
+	int size;
+	struct holder link;
+	/* Payload data follows. */
+};
+
+static struct wind_mq *find_mq_from_id(MSG_Q_ID qid)
+{
+	struct wind_mq *mq = mainheap_deref(qid, struct wind_mq);
+
+	if (mq == NULL || ((uintptr_t)mq & (sizeof(uintptr_t)-1)) != 0 ||
+	    mq->magic != mq_magic)
+		return NULL;
+
+	return mq;
+}
+
+static void mq_finalize(struct syncobj *sobj)
+{
+	struct wind_mq *mq = container_of(sobj, struct wind_mq, sobj);
+	heapobj_destroy(&mq->pool);
+	xnfree(mq);
+}
+fnref_register(libvxworks, mq_finalize);
+
+MSG_Q_ID msgQCreate(int maxMsgs, int maxMsgLength, int options)
+{
+	int sobj_flags = 0, ret;
+	struct wind_mq *mq;
+	struct service svc;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return (MSG_Q_ID)0;
+	}
+
+	if ((options & ~MSG_Q_PRIORITY) || maxMsgs <= 0) {
+		errno = S_msgQLib_INVALID_QUEUE_TYPE;
+		return (MSG_Q_ID)0;
+	}
+
+	if (maxMsgLength < 0) {
+		errno = S_msgQLib_INVALID_MSG_LENGTH;
+		return (MSG_Q_ID)0;
+	}
+
+	CANCEL_DEFER(svc);
+
+	mq = xnmalloc(sizeof(*mq));
+	if (mq == NULL)
+		goto fail_cballoc;
+
+	/*
+	 * The message pool must come from the main heap because of
+	 * mq->msg_list (this queue head and messages from the pool
+	 * must share the same allocation base). Create the heap
+	 * object accordingly.
+	 */
+	if (heapobj_init_array(&mq->pool, NULL, maxMsgLength +
+			       sizeof(struct msgholder), maxMsgs))
+		goto fail_bufalloc;
+
+	if (options & MSG_Q_PRIORITY)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	ret = syncobj_init(&mq->sobj, CLOCK_COPPERPLATE, sobj_flags,
+			   fnref_put(libvxworks, mq_finalize));
+	if (ret)
+		goto fail_syncinit;
+		
+	mq->options = options;
+	mq->maxmsg = maxMsgs;
+	mq->msgsize = maxMsgLength;
+	mq->msgcount = 0;
+	list_init(&mq->msg_list);
+
+	mq->magic = mq_magic;
+
+	CANCEL_RESTORE(svc);
+
+	return mainheap_ref(mq, MSG_Q_ID);
+
+fail_syncinit:
+	heapobj_destroy(&mq->pool);
+fail_bufalloc:
+	xnfree(mq);
+fail_cballoc:
+	errno = S_memLib_NOT_ENOUGH_MEMORY;
+
+	CANCEL_RESTORE(svc);
+
+	return (MSG_Q_ID)0;
+}
+
+STATUS msgQDelete(MSG_Q_ID msgQId)
+{
+	struct syncstate syns;
+	struct wind_mq *mq;
+	struct service svc;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	mq = find_mq_from_id(msgQId);
+	if (mq == NULL)
+		goto objid_error;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&mq->sobj, &syns)) {
+		CANCEL_RESTORE(svc);
+	objid_error:
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	mq->magic = ~mq_magic; /* Prevent further reference. */
+	syncobj_destroy(&mq->sobj, &syns);
+
+	CANCEL_RESTORE(svc);
+
+	return OK;
+}
+
+int msgQReceive(MSG_Q_ID msgQId, char *buffer, UINT maxNBytes, int timeout)
+{
+	struct wind_queue_wait *wait = NULL;
+	struct timespec ts, *timespec;
+	struct msgholder *msg = NULL;
+	UINT nbytes = (UINT)ERROR;
+	struct syncstate syns;
+	struct wind_mq *mq;
+	struct service svc;
+	int ret;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	mq = find_mq_from_id(msgQId);
+	if (mq == NULL)
+		goto objid_error;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&mq->sobj, &syns)) {
+		CANCEL_RESTORE(svc);
+	objid_error:
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+retry:
+	if (!list_empty(&mq->msg_list)) {
+		mq->msgcount--;
+		msg = list_pop_entry(&mq->msg_list, struct msgholder, link);
+		nbytes = msg->size;
+		if (nbytes > maxNBytes)
+			nbytes = maxNBytes;
+		if (nbytes > 0)
+			memcpy(buffer, msg + 1, nbytes);
+		heapobj_free(&mq->pool, msg);
+		syncobj_drain(&mq->sobj);
+		goto done;
+	}
+
+	if (timeout == NO_WAIT) {
+		errno = S_objLib_OBJ_UNAVAILABLE;
+		goto done;
+	}
+
+	if (timeout != WAIT_FOREVER) {
+		timespec = &ts;
+		clockobj_ticks_to_timeout(&wind_clock, timeout, timespec);
+	} else
+		timespec = NULL;
+
+	wait = threadobj_prepare_wait(struct wind_queue_wait);
+	wait->ptr = __moff(buffer);
+	wait->size = maxNBytes;
+
+	ret = syncobj_wait_grant(&mq->sobj, timespec, &syns);
+	if (ret == -EIDRM) {
+		errno = S_objLib_OBJ_DELETED;
+		goto out;
+	}
+	if (ret == -ETIMEDOUT) {
+		errno = S_objLib_OBJ_TIMEOUT;
+		goto done;
+	}
+	nbytes = wait->size;
+	if (nbytes == -1L)	/* No direct copy? */
+		goto retry;
+	syncobj_drain(&mq->sobj);
+done:
+	syncobj_unlock(&mq->sobj, &syns);
+out:
+	if (wait)
+		threadobj_finish_wait();
+
+	CANCEL_RESTORE(svc);
+
+	return nbytes;
+}
+
+STATUS msgQSend(MSG_Q_ID msgQId, const char *buffer, UINT bytes,
+		int timeout, int prio)
+{
+	struct timespec ts, *timespec;
+	struct wind_queue_wait *wait;
+	struct threadobj *thobj;
+	struct msgholder *msg;
+	struct syncstate syns;
+	struct wind_mq *mq;
+	struct service svc;
+	int ret = ERROR;
+	UINT maxbytes;
+
+	CANCEL_DEFER(svc);
+
+	mq = find_mq_from_id(msgQId);
+	if (mq == NULL)
+		goto objid_error;
+
+	if (syncobj_lock(&mq->sobj, &syns)) {
+		CANCEL_RESTORE(svc);
+	objid_error:
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	if (bytes > mq->msgsize) {
+		errno = S_msgQLib_INVALID_MSG_LENGTH;
+		goto fail;
+	}
+
+	thobj = syncobj_peek_grant(&mq->sobj);
+	if (thobj && threadobj_local_p(thobj)) {
+		/* Fast path: direct copy to the receiver's buffer. */
+		wait = threadobj_get_wait(thobj);
+		maxbytes = wait->size;
+		if (bytes > maxbytes)
+			bytes = maxbytes;
+		if (bytes > 0)
+			memcpy(__mptr(wait->ptr), buffer, bytes);
+		wait->size = bytes;
+		goto done;
+	}
+
+	if (mq->msgcount < mq->maxmsg)
+		goto enqueue;
+
+	if (timeout == NO_WAIT) {
+		errno = S_objLib_OBJ_UNAVAILABLE;
+		goto fail;
+	}
+
+	if (threadobj_irq_p()) {
+		errno = S_msgQLib_NON_ZERO_TIMEOUT_AT_INT_LEVEL;
+		goto fail;
+	}
+
+	if (timeout != WAIT_FOREVER) {
+		timespec = &ts;
+		clockobj_ticks_to_timeout(&wind_clock, timeout, timespec);
+	} else
+		timespec = NULL;
+
+	do {
+		ret = syncobj_wait_drain(&mq->sobj, timespec, &syns);
+		if (ret == -EIDRM) {
+			errno = S_objLib_OBJ_DELETED;
+			ret = ERROR;
+			goto out;
+		}
+		if (ret == -ETIMEDOUT) {
+			errno = S_objLib_OBJ_TIMEOUT;
+			ret = ERROR;
+			goto fail;
+		}
+	} while (mq->msgcount >= mq->maxmsg);
+
+enqueue:
+	msg = heapobj_alloc(&mq->pool, bytes + sizeof(*msg));
+	if (msg == NULL) {
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		ret = ERROR;
+		goto fail;
+	}
+
+	mq->msgcount++;
+	assert(mq->msgcount <= mq->maxmsg); /* Paranoid. */
+	msg->size = bytes;
+	holder_init(&msg->link);
+
+	if (bytes > 0)
+		memcpy(msg + 1, buffer, bytes);
+
+	if (prio == MSG_PRI_NORMAL)
+		list_append(&msg->link, &mq->msg_list);
+	else
+		list_prepend(&msg->link, &mq->msg_list);
+
+	if (thobj) {
+		/*
+		 * We could not copy the message directly to the
+		 * remote buffer, tell the thread to pull it from the
+		 * pool.
+		 */
+		wait = threadobj_get_wait(thobj);
+		wait->size = -1UL;
+	}
+done:
+	if (thobj)	/* Wakeup waiter. */
+		syncobj_grant_to(&mq->sobj, thobj);
+
+	ret = OK;
+fail:
+	syncobj_unlock(&mq->sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+int msgQNumMsgs(MSG_Q_ID msgQId)
+{
+	struct syncstate syns;
+	struct wind_mq *mq;
+	struct service svc;
+	int msgcount;
+
+	mq = find_mq_from_id(msgQId);
+	if (mq == NULL)
+		goto objid_error;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&mq->sobj, &syns)) {
+		CANCEL_RESTORE(svc);
+	objid_error:
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	msgcount = mq->msgcount;
+	syncobj_unlock(&mq->sobj, &syns);
+
+	CANCEL_RESTORE(svc);
+
+	return msgcount;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.h b/kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.h
new file mode 100644
index 0000000..817e90d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/msgQLib.h
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _VXWORKS_MSGQLIB_H
+#define _VXWORKS_MSGQLIB_H
+
+#include <copperplate/syncobj.h>
+#include <copperplate/heapobj.h>
+#include <vxworks/msgQLib.h>
+
+struct wind_mq {
+	unsigned int magic;
+	int options;
+	char name[XNOBJECT_NAME_LEN];
+
+	int maxmsg;
+	UINT msgsize;
+	int msgcount;
+
+	struct heapobj pool;
+	struct syncobj sobj;
+	struct listobj msg_list;
+};
+
+struct wind_queue_wait {
+	size_t size;
+	dref_type(void *) ptr;
+};
+
+#endif /* _VXWORKS_MSGQLIB_H */
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/reference.h b/kernel/xenomai-v3.2.4/lib/vxworks/reference.h
new file mode 100644
index 0000000..94456fa
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/reference.h
@@ -0,0 +1,22 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <copperplate/reference.h>
+
+#define libvxworks_tag  1
+#define libvxworks_cbi  1
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/rngLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/rngLib.c
new file mode 100644
index 0000000..b11b895
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/rngLib.c
@@ -0,0 +1,198 @@
+/*
+* Copyright (C) 2008 Niklaus Giger <niklaus.giger@member.fsf.org>.
+*
+* This library is free software; you can redistribute it and/or
+* modify it under the terms of the GNU Lesser General Public
+* License as published by the Free Software Foundation; either
+* version 2 of the License, or (at your option) any later version.
+*
+* This library is distributed in the hope that it will be useful,
+* but WITHOUT ANY WARRANTY; without even the implied warranty of
+* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+* Lesser General Public License for more details.
+
+* You should have received a copy of the GNU Lesser General Public
+* License along with this library; if not, write to the Free Software
+* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+*/
+
+#include <stdlib.h>
+#include <boilerplate/lock.h>
+#include <copperplate/heapobj.h>
+#include <vxworks/errnoLib.h>
+#include "rngLib.h"
+
+#define ring_magic 0x5432affe
+
+static struct wind_ring *find_ring_from_id(RING_ID rid)
+{
+	struct wind_ring *ring = mainheap_deref(rid, struct wind_ring);
+
+	if (ring == NULL || ((uintptr_t)ring & (sizeof(uintptr_t)-1)) != 0 ||
+	    ring->magic != ring_magic)
+		return NULL;
+
+	return ring;
+}
+
+RING_ID rngCreate(int nbytes)
+{
+	struct wind_ring *ring;
+	struct service svc;
+	void *ring_mem;
+	RING_ID rid;
+
+	if (nbytes <= 0) {
+		errnoSet(S_memLib_NOT_ENOUGH_MEMORY);
+		return 0;
+	}
+
+	CANCEL_DEFER(svc);
+
+	ring_mem = xnmalloc(sizeof(*ring) + nbytes + 1);
+	if (ring_mem == NULL) {
+		rid = 0;
+		errno = errnoSet(S_memLib_NOT_ENOUGH_MEMORY);
+		goto out;
+	}
+
+	ring = ring_mem;
+	ring->magic = ring_magic;
+	ring->bufSize = nbytes;
+	ring->readPos = 0;
+	ring->writePos = 0;
+	rid = mainheap_ref(ring, RING_ID);
+out:
+	CANCEL_RESTORE(svc);
+
+	return rid;
+}
+
+void rngDelete(RING_ID rid)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+	struct service svc;
+
+	if (ring) {
+		ring->magic = 0;
+		CANCEL_DEFER(svc);
+		xnfree(ring);
+		CANCEL_RESTORE(svc);
+	}
+}
+
+void rngFlush(RING_ID rid)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+
+	if (ring) {
+		ring->readPos = 0;
+		ring->writePos = 0;
+	}
+}
+
+int rngBufGet(RING_ID rid, char *buffer, int maxbytes)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+	unsigned int savedWritePos;
+	int j, bytesRead = 0;
+
+	if (ring == NULL)
+		return ERROR;
+
+	savedWritePos = ring->writePos;
+
+	for (j = 0; j < maxbytes; j++) {
+		if ((ring->readPos) % (ring->bufSize + 1) == savedWritePos)
+			break;
+		buffer[j] = ring->buffer[ring->readPos];
+		++bytesRead;
+		ring->readPos = (ring->readPos + 1) % (ring->bufSize + 1);
+	}
+
+	return bytesRead;
+}
+
+int rngBufPut(RING_ID rid, char *buffer, int nbytes)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+	unsigned int savedReadPos;
+	int j, bytesWritten = 0;
+
+	if (ring == NULL)
+		return ERROR;
+
+	savedReadPos = ring->readPos;
+
+	for (j = 0; j < nbytes; j++) {
+		if ((ring->writePos + 1) % (ring->bufSize + 1) == savedReadPos)
+			break;
+		ring->buffer[ring->writePos] = buffer[j];
+		++bytesWritten;
+		ring->writePos = (ring->writePos + 1) % (ring->bufSize + 1);
+	}
+
+	return bytesWritten;
+}
+
+BOOL rngIsEmpty(RING_ID rid)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+
+	if (ring == NULL)
+		return ERROR;
+
+	return rngFreeBytes(rid) == (int)ring->bufSize;
+}
+
+BOOL rngIsFull(RING_ID rid)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+
+	if (ring == NULL)
+		return ERROR;
+
+	return rngFreeBytes(rid) == 0;
+}
+
+int rngFreeBytes(RING_ID rid)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+
+	if (ring == NULL)
+		return ERROR;
+
+	return ((ring->bufSize -
+		 (ring->writePos - ring->readPos)) % (ring->bufSize + 1));
+}
+
+int rngNBytes(RING_ID rid)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+
+	if (ring == NULL)
+		return ERROR;
+
+	return ring->bufSize - rngFreeBytes(rid);
+}
+
+void rngPutAhead(RING_ID rid, char byte, int offset)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+	int where;
+
+	if (ring) {
+		where = (ring->writePos + offset) % (ring->bufSize + 1);
+		ring->buffer[where] = byte;
+	}
+}
+
+void rngMoveAhead(RING_ID rid, int n)
+{
+	struct wind_ring *ring = find_ring_from_id(rid);
+
+	if (ring) {
+		ring->writePos += n;
+		ring->writePos %= (ring->bufSize + 1);
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/rngLib.h b/kernel/xenomai-v3.2.4/lib/vxworks/rngLib.h
new file mode 100644
index 0000000..947c5ea
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/rngLib.h
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _VXWORKS_RNGLIB_H
+#define _VXWORKS_RNGLIB_H
+
+#include <vxworks/rngLib.h>
+
+struct wind_ring {
+	unsigned int magic;
+	unsigned int bufSize;
+	unsigned int readPos;
+	unsigned int writePos;
+	unsigned char buffer[];
+};
+
+#endif /* _VXWORKS_RNGLIB_H */
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/semLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/semLib.c
new file mode 100644
index 0000000..180ed20
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/semLib.c
@@ -0,0 +1,480 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <boilerplate/ancillaries.h>
+#include <copperplate/heapobj.h>
+#include <vxworks/errnoLib.h>
+#include "reference.h"
+#include "taskLib.h"
+#include "semLib.h"
+#include "tickLib.h"
+
+/*
+ * XXX: In order to keep the following services callable from
+ * non-VxWorks tasks (but still Xenomai ones, though), make sure
+ * to never depend on the wind_task struct, but rather on the basic
+ * thread object directly.
+ */
+
+#define sem_magic	0x2a3b4c5d
+
+static struct wind_sem *alloc_sem(int options, const struct wind_sem_ops *ops)
+{
+	struct wind_sem *sem;
+
+	sem = xnmalloc(sizeof(*sem));
+	if (sem == NULL) {
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		return NULL;
+	}
+
+	sem->options = options;
+	sem->semops = ops;
+	sem->magic = sem_magic;
+
+	return sem;
+}
+
+static STATUS xsem_take(struct wind_sem *sem, int timeout)
+{
+	struct timespec ts, *timespec;
+	struct syncstate syns;
+	struct service svc;
+	STATUS ret = OK;
+
+	if (threadobj_irq_p())
+		return S_intLib_NOT_ISR_CALLABLE;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&sem->u.xsem.sobj, &syns)) {
+		ret = S_objLib_OBJ_ID_ERROR;
+		goto out;
+	}
+
+	if (--sem->u.xsem.value >= 0)
+		goto done;
+
+	if (timeout == NO_WAIT) {
+		sem->u.xsem.value++;
+		ret = S_objLib_OBJ_UNAVAILABLE;
+		goto done;
+	}
+
+	if (timeout != WAIT_FOREVER) {
+		timespec = &ts;
+		clockobj_ticks_to_timeout(&wind_clock, timeout, timespec);
+	} else
+		timespec = NULL;
+
+	ret = syncobj_wait_grant(&sem->u.xsem.sobj, timespec, &syns);
+	if (ret == -EIDRM) {
+		ret = S_objLib_OBJ_DELETED;
+		goto out;
+	}
+	if (ret) {
+		sem->u.xsem.value++;
+		if (ret == -ETIMEDOUT)
+			ret = S_objLib_OBJ_TIMEOUT;
+		else if (ret == -EINTR)
+			ret = OK;	/* Flushed. */
+	}
+done:
+	syncobj_unlock(&sem->u.xsem.sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+static STATUS xsem_give(struct wind_sem *sem)
+{
+	struct syncstate syns;
+	struct service svc;
+	STATUS ret = OK;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&sem->u.xsem.sobj, &syns)) {
+		ret = S_objLib_OBJ_ID_ERROR;
+		goto out;
+	}
+
+	if (sem->u.xsem.value >= sem->u.xsem.maxvalue) {
+		if (sem->u.xsem.maxvalue == INT_MAX)
+			/* No wrap around. */
+			ret = S_semLib_INVALID_OPERATION;
+	} else if (++sem->u.xsem.value <= 0)
+		syncobj_grant_one(&sem->u.xsem.sobj);
+
+	syncobj_unlock(&sem->u.xsem.sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+static STATUS xsem_flush(struct wind_sem *sem)
+{
+	struct syncstate syns;
+	struct service svc;
+	STATUS ret = OK;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&sem->u.xsem.sobj, &syns)) {
+		ret = S_objLib_OBJ_ID_ERROR;
+		goto out;
+	}
+
+	syncobj_flush(&sem->u.xsem.sobj);
+
+	syncobj_unlock(&sem->u.xsem.sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+static void sem_finalize(struct syncobj *sobj)
+{
+	struct wind_sem *sem = container_of(sobj, struct wind_sem, u.xsem.sobj);
+	xnfree(sem);
+}
+fnref_register(libvxworks, sem_finalize);
+
+static STATUS xsem_delete(struct wind_sem *sem)
+{
+	struct syncstate syns;
+	struct service svc;
+	int ret = OK;
+
+	if (threadobj_irq_p())
+		return S_intLib_NOT_ISR_CALLABLE;
+
+	CANCEL_DEFER(svc);
+
+	if (syncobj_lock(&sem->u.xsem.sobj, &syns)) {
+		ret = S_objLib_OBJ_ID_ERROR;
+		goto out;
+	}
+
+	sem->magic = ~sem_magic; /* Prevent further reference. */
+	syncobj_destroy(&sem->u.xsem.sobj, &syns);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+static const struct wind_sem_ops xsem_ops = {
+	.take = xsem_take,
+	.give = xsem_give,
+	.flush = xsem_flush,
+	.delete = xsem_delete
+};
+
+static SEM_ID alloc_xsem(int options, int initval, int maxval)
+{
+	int sobj_flags = 0, ret;
+	struct wind_sem *sem;
+
+	if (options & ~SEM_Q_PRIORITY) {
+		errno = S_semLib_INVALID_OPTION;
+		return (SEM_ID)0;
+	}
+
+	sem = alloc_sem(options, &xsem_ops);
+	if (sem == NULL) {
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		return (SEM_ID)0;
+	}
+
+	if (options & SEM_Q_PRIORITY)
+		sobj_flags = SYNCOBJ_PRIO;
+
+	sem->u.xsem.value = initval;
+	sem->u.xsem.maxvalue = maxval;
+	ret = syncobj_init(&sem->u.xsem.sobj, CLOCK_COPPERPLATE, sobj_flags,
+			   fnref_put(libvxworks, sem_finalize));
+	if (ret) {
+		xnfree(sem);
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		return (SEM_ID)0;
+	}
+
+	return mainheap_ref(sem, SEM_ID);
+}
+
+static STATUS msem_take(struct wind_sem *sem, int timeout)
+{
+	struct wind_task *current;
+	struct timespec ts;
+	int ret;
+
+	if (threadobj_irq_p())
+		return S_intLib_NOT_ISR_CALLABLE;
+
+	/*
+	 * We allow threads from other APIs to grab a VxWorks mutex
+	 * ignoring the safe option in such a case.
+	 */
+	current = wind_task_current();
+	if (current && (sem->options & SEM_DELETE_SAFE))
+		__RT(pthread_mutex_lock(&current->safelock));
+
+	if (timeout == NO_WAIT) {
+		ret = __RT(pthread_mutex_trylock(&sem->u.msem.lock));
+		goto check;
+	}
+
+	if  (timeout == WAIT_FOREVER) {
+		ret = __RT(pthread_mutex_lock(&sem->u.msem.lock));
+		goto check;
+	}
+
+	__clockobj_ticks_to_timeout(&wind_clock, CLOCK_REALTIME, timeout, &ts);
+	ret = __RT(pthread_mutex_timedlock(&sem->u.msem.lock, &ts));
+check:
+	switch (ret) {
+	case 0:
+		return OK;
+	case EINVAL:
+		ret = S_objLib_OBJ_ID_ERROR;
+		break;
+	case EBUSY:
+		ret = S_objLib_OBJ_UNAVAILABLE;
+		break;
+	case ETIMEDOUT:
+		ret = S_objLib_OBJ_TIMEOUT;
+		break;
+	case EOWNERDEAD:
+	case ENOTRECOVERABLE:
+		warning("owner of mutex-type semaphore %p died", sem);
+		ret = S_objLib_OBJ_UNAVAILABLE;
+		break;
+	}
+
+	if (current != NULL && (sem->options & SEM_DELETE_SAFE))
+		__RT(pthread_mutex_unlock(&current->safelock));
+
+	return ret;
+}
+
+static STATUS msem_give(struct wind_sem *sem)
+{
+	struct wind_task *current;
+	int ret;
+
+	if (threadobj_irq_p())
+		return S_intLib_NOT_ISR_CALLABLE;
+
+	ret = __RT(pthread_mutex_unlock(&sem->u.msem.lock));
+	if (ret == EINVAL)
+		return S_objLib_OBJ_ID_ERROR;
+	if (ret == EPERM)
+		return S_semLib_INVALID_OPERATION;
+
+	if (sem->options & SEM_DELETE_SAFE) {
+		current = wind_task_current();
+		if (current)
+			__RT(pthread_mutex_unlock(&current->safelock));
+	}
+
+	return OK;
+}
+
+static STATUS msem_flush(struct wind_sem *sem)
+{
+	return S_semLib_INVALID_OPERATION;
+}
+
+static STATUS msem_delete(struct wind_sem *sem)
+{
+	int ret;
+
+	if (threadobj_irq_p())
+		return S_intLib_NOT_ISR_CALLABLE;
+
+	ret = __RT(pthread_mutex_destroy(&sem->u.msem.lock));
+	if (ret == EINVAL)
+		return S_objLib_OBJ_ID_ERROR;
+	/*
+	 * XXX: We depart from the spec here since we can't flush, but
+	 * we tell the caller about any pending task instead.
+	 */
+	if (ret == EBUSY)
+		return S_semLib_INVALID_OPERATION;
+	else
+		xnfree(sem);
+
+	return OK;
+}
+
+static const struct wind_sem_ops msem_ops = {
+	.take = msem_take,
+	.give = msem_give,
+	.flush = msem_flush,
+	.delete = msem_delete
+};
+
+SEM_ID semBCreate(int options, SEM_B_STATE state)
+{
+	struct service svc;
+	SEM_ID sem_id;
+
+	CANCEL_DEFER(svc);
+	sem_id = alloc_xsem(options, state, 1);
+	CANCEL_RESTORE(svc);
+
+	return sem_id;
+}
+
+SEM_ID semCCreate(int options, int count)
+{
+	struct service svc;
+	SEM_ID sem_id;
+
+	CANCEL_DEFER(svc);
+	sem_id = alloc_xsem(options, count, INT_MAX);
+	CANCEL_RESTORE(svc);
+
+	return sem_id;
+}
+
+SEM_ID semMCreate(int options)
+{
+	pthread_mutexattr_t mattr;
+	struct wind_sem *sem;
+	struct service svc;
+
+	if (options & ~(SEM_Q_PRIORITY|SEM_DELETE_SAFE|SEM_INVERSION_SAFE)) {
+		errno = S_semLib_INVALID_OPTION;
+		return (SEM_ID)0;
+	}
+
+	if ((options & SEM_Q_PRIORITY) == 0) {
+		if (options & SEM_INVERSION_SAFE) {
+			errno = S_semLib_INVALID_QUEUE_TYPE; /* C'mon... */
+			return (SEM_ID)0;
+		}
+	}
+
+	CANCEL_DEFER(svc);
+
+	sem = alloc_sem(options, &msem_ops);
+	if (sem == NULL) {
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		CANCEL_RESTORE(svc);
+		return (SEM_ID)0;
+	}
+
+	/*
+	 * XXX: POSIX-wise, we have a few issues with emulating
+	 * VxWorks semaphores of the mutex kind.
+	 *
+	 * VxWorks flushes any kind of semaphore upon deletion
+	 * (however, explicit semFlush() is not allowed on the mutex
+	 * kind though); but POSIX doesn't implement such mechanism on
+	 * its mutex object. At the same time, we need priority
+	 * inheritance when SEM_INVERSION_SAFE is passed, so we can't
+	 * emulate VxWorks mutex semaphores using condvars. Since the
+	 * only way to get priority inheritance is to use a POSIX
+	 * mutex, we choose not to emulate flushing in semDelete(),
+	 * but keep inversion-safe locking possible.
+	 *
+	 * The same way, we don't support FIFO ordering for mutexes,
+	 * since this would require to handle them as recursive binary
+	 * semaphores with ownership, for no obvious upside.
+	 * Logically speaking, relying on recursion without any
+	 * consideration for priority while serializing threads is
+	 * just asking for troubles anyway.
+	 */
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
+	/* pthread_mutexattr_setrobust() might not be implemented. */
+	pthread_mutexattr_setrobust(&mattr, PTHREAD_MUTEX_ROBUST_NP);
+	if (options & SEM_INVERSION_SAFE)
+		pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute);
+
+	__RT(pthread_mutex_init(&sem->u.msem.lock, &mattr));
+	pthread_mutexattr_destroy(&mattr);
+
+	CANCEL_RESTORE(svc);
+
+	return mainheap_ref(sem, SEM_ID);
+}
+
+static struct wind_sem *find_sem_from_id(SEM_ID sem_id)
+{
+	struct wind_sem *sem = mainheap_deref(sem_id, struct wind_sem);
+
+	if (sem == NULL || ((uintptr_t)sem & (sizeof(uintptr_t)-1)) != 0 ||
+	    sem->magic != sem_magic)
+		return NULL;
+
+	return sem;
+}
+
+#define do_sem_op(sem_id, op, args...)			\
+do {							\
+	struct wind_sem *sem;				\
+	struct service svc;				\
+	STATUS ret;					\
+							\
+	sem = find_sem_from_id(sem_id);			\
+	if (sem == NULL) {				\
+		errno = S_objLib_OBJ_ID_ERROR;		\
+		return ERROR;				\
+	}						\
+							\
+	CANCEL_DEFER(svc);			\
+	ret = sem->semops->op(sem , ##args);		\
+	CANCEL_RESTORE(svc);			\
+	if (ret) {					\
+		errno = ret;				\
+		ret = ERROR;				\
+	}						\
+							\
+	return ret;					\
+} while(0)
+
+STATUS semDelete(SEM_ID sem_id)
+{
+	do_sem_op(sem_id, delete);
+}
+
+STATUS semGive(SEM_ID sem_id)
+{
+	do_sem_op(sem_id, give);
+}
+
+STATUS semTake(SEM_ID sem_id, int timeout)
+{
+	do_sem_op(sem_id, take, timeout);
+}
+
+STATUS semFlush(SEM_ID sem_id)
+{
+	do_sem_op(sem_id, flush);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/semLib.h b/kernel/xenomai-v3.2.4/lib/vxworks/semLib.h
new file mode 100644
index 0000000..f863bf0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/semLib.h
@@ -0,0 +1,57 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _VXWORKS_SEMLIB_H
+#define _VXWORKS_SEMLIB_H
+
+#include <pthread.h>
+#include <copperplate/syncobj.h>
+#include <vxworks/semLib.h>
+
+struct wind_sem;
+
+struct wind_sem_ops {
+
+	STATUS (*take)(struct wind_sem *, int timeout);
+	STATUS (*give)(struct wind_sem *);
+	STATUS (*flush)(struct wind_sem *);
+	STATUS (*delete)(struct wind_sem *);
+};
+
+struct wind_sem {
+
+	unsigned int magic;
+	int options;
+
+	union {
+		struct {
+			struct syncobj sobj;
+			int value;
+			int maxvalue;
+		} xsem;
+		struct {
+			pthread_mutex_t lock;
+			struct threadobj *owner;
+			int lockdepth;
+		} msem;
+	} u;
+
+	const struct wind_sem_ops *semops;
+};
+
+#endif /* _VXWORKS_SEMLIB_H */
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/sysLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/sysLib.c
new file mode 100644
index 0000000..449cc64
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/sysLib.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include "tickLib.h"
+#include <boilerplate/lock.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/sysLib.h>
+
+int sysClkRateGet(void)
+{
+	unsigned int resolution;
+	struct service svc;
+
+	CANCEL_DEFER(svc);
+	resolution = clockobj_get_resolution(&wind_clock);
+	CANCEL_RESTORE(svc);
+
+	return 1000000000 / resolution;
+}
+
+STATUS sysClkRateSet(int hz)
+{
+	struct service svc;
+	int ret;
+
+	/*
+	 * This is BSP level stuff, so we don't set errno upon error,
+	 * but only return the ERROR status.
+	 */
+	if (hz <= 0)
+		return ERROR;
+
+	CANCEL_DEFER(svc);
+	ret = clockobj_set_resolution(&wind_clock, 1000000000 / hz);
+	CANCEL_RESTORE(svc);
+
+	return ret ? ERROR : OK;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.c
new file mode 100644
index 0000000..296bc72
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.c
@@ -0,0 +1,107 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#include <copperplate/heapobj.h>
+#include <vxworks/taskHookLib.h>
+#include <vxworks/errnoLib.h>
+#include "taskLib.h"
+#include "taskHookLib.h"
+
+DEFINE_PRIVATE_LIST(wind_create_hooks);
+
+DEFINE_PRIVATE_LIST(wind_delete_hooks);
+
+static STATUS add_hook(struct pvlistobj *list, FUNCPTR hook, int prepend)
+{
+	struct wind_task_hook *p;
+
+	p = xnmalloc(sizeof(*p));
+	if (p == NULL)
+		return ERROR;
+
+	p->handler = (void (*)(TASK_ID))hook;
+	write_lock_nocancel(&wind_task_lock);
+
+	if (prepend)
+		pvlist_prepend(&p->next, list);
+	else
+		pvlist_append(&p->next, list);
+
+	write_unlock(&wind_task_lock);
+
+	return OK;
+}
+
+static STATUS remove_hook(struct pvlistobj *list, FUNCPTR hook)
+{
+	struct wind_task_hook *p = NULL;
+
+	write_lock_nocancel(&wind_task_lock);
+
+	pvlist_for_each_entry(p, list, next) {
+		if (p->handler == (void (*)(TASK_ID))hook) {
+			pvlist_remove(&p->next);
+			goto found;
+		}
+	}
+
+	p = NULL;
+found:
+	write_unlock(&wind_task_lock);
+
+	if (p) {
+		xnfree(p);
+		return OK;
+	}
+
+	return ERROR;
+}
+
+void wind_run_hooks(struct pvlistobj *list, struct wind_task *task)
+{
+	struct wind_task_hook *p;
+	TASK_ID tid;
+
+	write_lock_nocancel(&wind_task_lock);
+
+	tid = mainheap_ref(&task->priv_tcb, TASK_ID);
+
+	pvlist_for_each_entry(p, list, next)
+		p->handler(tid);
+
+	write_unlock(&wind_task_lock);
+}
+
+STATUS taskCreateHookAdd(FUNCPTR createHook)
+{
+	return add_hook(&wind_create_hooks, createHook, 0);
+}
+
+STATUS taskCreateHookDelete(FUNCPTR createHook)
+{
+	return remove_hook(&wind_create_hooks, createHook);
+}
+
+STATUS taskDeleteHookAdd(FUNCPTR deleteHook)
+{
+	return add_hook(&wind_create_hooks, deleteHook, 1);
+}
+
+STATUS taskDeleteHookDelete(FUNCPTR deleteHook)
+{
+	return remove_hook(&wind_create_hooks, deleteHook);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.h b/kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.h
new file mode 100644
index 0000000..c3f9c8d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/taskHookLib.h
@@ -0,0 +1,37 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+#ifndef _VXWORKS_TASKHOOKLIB_H
+#define _VXWORKS_TASKHOOKLIB_H
+
+#include <vxworks/taskLib.h>
+
+struct wind_task;
+
+struct wind_task_hook {
+	void (*handler)(TASK_ID tid);
+	struct pvholder next;
+};
+
+extern struct pvlistobj wind_create_hooks;
+
+extern struct pvlistobj wind_delete_hooks;
+
+void wind_run_hooks(struct pvlistobj *list,
+		    struct wind_task *task);
+
+#endif /* _VXWORKS_TASKHOOKLIB_H */
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/taskInfo.c b/kernel/xenomai-v3.2.4/lib/vxworks/taskInfo.c
new file mode 100644
index 0000000..ec423d2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/taskInfo.c
@@ -0,0 +1,175 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <pthread.h>
+#include <string.h>
+#include <boilerplate/ancillaries.h>
+#include <copperplate/threadobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskInfo.h>
+#include "taskLib.h"
+
+const char *taskName(TASK_ID task_id)
+{
+	struct wind_task *task;
+	struct service svc;
+	const char *name;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task_or_self(task_id);
+	if (task == NULL) {
+		name = NULL;
+		goto out;
+	}
+
+	name = task->name;
+	put_wind_task(task);
+out:
+	CANCEL_RESTORE(svc);
+	/*
+	 * This is racy and unsafe, but this service is terminally
+	 * flawed by design anyway.
+	 */
+	return name;
+}
+
+TASK_ID taskIdDefault(TASK_ID task_id)
+{
+	static TASK_ID value;
+
+	if (task_id)
+		value = task_id;
+
+	return value;
+}
+
+TASK_ID taskNameToId(const char *name)
+{
+	struct clusterobj *cobj;
+	struct wind_task *task;
+	struct service svc;
+
+	CANCEL_DEFER(svc);
+	cobj = cluster_findobj(&wind_task_table, name);
+	CANCEL_RESTORE(svc);
+	if (cobj == NULL)
+		return ERROR;
+
+	task = container_of(cobj, struct wind_task, cobj);
+
+	return (TASK_ID)task->tcb;
+}
+
+BOOL taskIsReady(TASK_ID task_id)
+{
+	struct wind_task *task;
+	struct service svc;
+	int status = 0;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task(task_id);
+	if (task == NULL)
+		goto out;
+
+	status = get_task_status(task);
+	put_wind_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return status == WIND_READY;
+}
+
+BOOL taskIsSuspended(TASK_ID task_id)
+{
+	struct wind_task *task;
+	struct service svc;
+	int status = 0;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task(task_id);
+	if (task == NULL)
+		goto out;
+
+	status = threadobj_get_status(&task->thobj);
+	put_wind_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return (status & __THREAD_S_SUSPENDED) != 0;
+}
+
+STATUS taskGetInfo(TASK_ID task_id, TASK_DESC *desc)
+{
+	int vfirst, vlast, ret;
+	struct wind_task *task;
+	struct WIND_TCB *tcb;
+	pthread_attr_t attr;
+	STATUS status = OK;
+	struct service svc;
+	size_t stacksize;
+	void *stackbase;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task(task_id);
+	if (task == NULL) {
+		errno = S_objLib_OBJ_ID_ERROR;
+		status = ERROR;
+		goto out;
+	}
+
+	tcb = task->tcb;
+	desc->td_tid = task_id;
+	desc->td_priority = wind_task_get_priority(task);
+	desc->td_status = get_task_status(task);
+	desc->td_flags = tcb->flags;
+	namecpy(desc->td_name, task->name);
+	desc->td_entry = tcb->entry;
+	desc->td_errorStatus = *task->thobj.errno_pointer;
+	ret = pthread_getattr_np(task->thobj.ptid, &attr);
+	put_wind_task(task);
+
+	/*
+	 * If the target does not support pthread_getattr_np(), we are
+	 * out of luck for determining the stack information. We just
+	 * zero it.
+	 */
+	if (ret) {
+		/* No idea, buddy. */
+		desc->td_stacksize = 0;
+		desc->td_pStackBase = NULL;
+	} else {
+		pthread_attr_getstack(&attr, &stackbase, &stacksize);
+		desc->td_stacksize = stacksize;
+		desc->td_pStackBase = stackbase;
+
+		if (&vfirst < &vlast)
+			/* Stack grows upward. */
+			desc->td_pStackEnd = (caddr_t)stackbase + stacksize;
+		else
+			/* Stack grows downward. */
+			desc->td_pStackEnd = (caddr_t)stackbase - stacksize;
+	}
+out:
+	CANCEL_RESTORE(svc);
+
+	return status;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/taskLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/taskLib.c
new file mode 100644
index 0000000..c8723c6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/taskLib.c
@@ -0,0 +1,908 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <memory.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <sched.h>
+#include "taskLib.h"
+#include "tickLib.h"
+#include "msgQLib.h"
+#include "taskHookLib.h"
+#include "boilerplate/namegen.h"
+#include "copperplate/heapobj.h"
+#include "copperplate/threadobj.h"
+#include "copperplate/syncobj.h"
+#include "copperplate/cluster.h"
+#include "copperplate/internal.h"
+#include "copperplate/registry-obstack.h"
+#include "vxworks/errnoLib.h"
+
+union wind_wait_union {
+	struct wind_queue_wait queue_wait;
+};
+
+struct cluster wind_task_table;
+
+DEFINE_PRIVATE_LIST(wind_task_list);
+
+pthread_mutex_t wind_task_lock;
+
+int wind_time_slice = 0;
+
+static DEFINE_NAME_GENERATOR(task_namegen, "task",
+			     struct wind_task, name);
+
+static struct wind_task *find_wind_task(TASK_ID tid)
+{
+	struct WIND_TCB *tcb = mainheap_deref(tid, struct WIND_TCB);
+	struct wind_task *task;
+
+	/*
+	 * Best-effort to validate a TCB pointer the cheap way,
+	 * without relying on any syscall.
+	 */
+	if (tcb == NULL || ((uintptr_t)tcb & (sizeof(uintptr_t)-1)) != 0)
+		return NULL;
+
+	task = tcb->opaque;
+	if (task == NULL || ((uintptr_t)task & (sizeof(uintptr_t)-1)) != 0)
+		return NULL;
+
+	if (threadobj_get_magic(&task->thobj) != task_magic)
+		return NULL;
+
+	return task;
+}
+
+static struct wind_task *find_wind_task_or_self(TASK_ID tid)
+{
+	if (tid)
+		return find_wind_task(tid);
+
+	return wind_task_current();
+}
+
+struct wind_task *get_wind_task(TASK_ID tid)
+{
+	struct wind_task *task = find_wind_task(tid);
+
+	/*
+	 * Grab the task lock, assuming that the task might have been
+	 * deleted, and/or maybe we have been lucky, and some random
+	 * opaque pointer might lead us to something which is laid in
+	 * valid memory but certainly not to a task object. Last
+	 * chance is pthread_mutex_lock() in threadobj_lock()
+	 * detecting a wrong mutex kind and bailing out.
+	 *
+	 * NOTE: threadobj_lock() disables cancellability for the
+	 * caller upon success, until the lock is dropped in
+	 * threadobj_unlock(), so there is no way it may vanish while
+	 * holding the lock. Therefore we need no cleanup handler
+	 * here.
+	 */
+	if (task == NULL || threadobj_lock(&task->thobj) == -EINVAL)
+		return NULL;
+
+	/* Check the magic word again, while we hold the lock. */
+	if (threadobj_get_magic(&task->thobj) != task_magic) {
+		threadobj_unlock(&task->thobj);
+		return NULL;
+	}
+
+	return task;
+}
+
+struct wind_task *get_wind_task_or_self(TASK_ID tid)
+{
+	struct wind_task *current;
+
+	if (tid)
+		return get_wind_task(tid);
+
+	current = wind_task_current();
+	if (current == NULL)
+		return NULL;
+
+	/* This one might block but can't fail, it is ours. */
+	threadobj_lock(&current->thobj);
+
+	return current;
+}
+
+void put_wind_task(struct wind_task *task)
+{
+	threadobj_unlock(&task->thobj);
+}
+
+int get_task_status(struct wind_task *task)
+{
+	int status = threadobj_get_status(&task->thobj), ret = WIND_READY;
+
+	if (status & __THREAD_S_SUSPENDED)
+		ret |= WIND_SUSPEND;
+
+	if (status & (__THREAD_S_WAIT|__THREAD_S_TIMEDWAIT))
+		ret |= WIND_PEND;
+	else if (status & __THREAD_S_DELAYED)
+		ret |= WIND_DELAY;
+
+	return ret;
+}
+
+static void task_finalizer(struct threadobj *thobj)
+{
+	struct wind_task *task = container_of(thobj, struct wind_task, thobj);
+
+	if (pvholder_linked(&task->next)) {
+		write_lock_nocancel(&wind_task_lock);
+		pvlist_remove(&task->next);
+		write_unlock(&wind_task_lock);
+		wind_run_hooks(&wind_delete_hooks, task);
+	}
+
+	task->tcb->status |= WIND_DEAD;
+	cluster_delobj(&wind_task_table, &task->cobj);
+	registry_destroy_file(&task->fsobj);
+	__RT(pthread_mutex_destroy(&task->safelock));
+}
+
+#ifdef CONFIG_XENO_REGISTRY
+
+static void task_decode_status(struct fsobstack *o, struct wind_task *task)
+{
+	int status = threadobj_get_status(&task->thobj);
+
+	if (threadobj_get_lockdepth(&task->thobj) > 0)
+		fsobstack_grow_string(o, " LOCK");
+
+	if (threadobj_get_policy(&task->thobj) == SCHED_RR)
+		fsobstack_grow_string(o, " RR");
+
+	if (status & __THREAD_S_SUSPENDED)
+		fsobstack_grow_string(o, " SUSPEND");
+
+	if (status & (__THREAD_S_WAIT|__THREAD_S_TIMEDWAIT))
+		fsobstack_grow_string(o, " PEND");
+	else if (status & __THREAD_S_DELAYED)
+		fsobstack_grow_string(o, " DELAY");
+	else
+		fsobstack_grow_string(o, " READY");
+}
+
+static int task_registry_open(struct fsobj *fsobj, void *priv)
+{
+	struct fsobstack *o = priv;
+	struct wind_task *task;
+	int ret;
+
+	task = container_of(fsobj, struct wind_task, fsobj);
+	ret = threadobj_lock(&task->thobj);
+	if (ret)
+		return -EIO;
+
+	fsobstack_init(o);
+
+	fsobstack_grow_format(o, "errno      = %d\n",
+			      threadobj_get_errno(&task->thobj));
+	fsobstack_grow_string(o, "status     =");
+	task_decode_status(o, task);
+	fsobstack_grow_format(o, "\npriority   = %d\n",
+			      wind_task_get_priority(task));
+	fsobstack_grow_format(o, "lock_depth = %d\n",
+			      threadobj_get_lockdepth(&task->thobj));
+
+	threadobj_unlock(&task->thobj);
+
+	fsobstack_finish(o);
+
+	return 0;
+}
+
+static struct registry_operations registry_ops = {
+	.open		= task_registry_open,
+	.release	= fsobj_obstack_release,
+	.read		= fsobj_obstack_read
+};
+
+#else
+
+static struct registry_operations registry_ops;
+
+#endif /* CONFIG_XENO_REGISTRY */
+
+static int task_prologue(void *arg)
+{
+	struct wind_task *task = arg;
+
+	return __bt(threadobj_prologue(&task->thobj, task->name));
+}
+
+static void *task_trampoline(void *arg)
+{
+	struct wind_task *task = arg;
+	struct wind_task_args *args = &task->args;
+	struct sched_param_ex param_ex;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	write_lock_nocancel(&wind_task_lock);
+	pvlist_append(&task->next, &wind_task_list);
+	write_unlock(&wind_task_lock);
+
+	ret = __bt(registry_add_file(&task->fsobj, O_RDONLY,
+				     "/vxworks/tasks/%s", task->name));
+	if (ret)
+		warning("failed to export task %s to registry, %s",
+			task->name, symerror(ret));
+
+	wind_run_hooks(&wind_create_hooks, task);
+	
+	/* Wait for someone to run taskActivate() upon us. */
+	threadobj_wait_start();
+
+	/* Turn on time slicing if RR globally enabled. */
+	if (wind_time_slice) {
+		struct timespec ts;
+		clockobj_ticks_to_timespec(&wind_clock, wind_time_slice, &ts);
+		param_ex.sched_rr_quantum.tv_sec = ts.tv_sec;
+		param_ex.sched_rr_quantum.tv_nsec = ts.tv_nsec;
+		threadobj_lock(&task->thobj);
+		param_ex.sched_priority = threadobj_get_priority(&task->thobj);
+		threadobj_set_schedparam(&task->thobj, SCHED_RR, &param_ex);
+		threadobj_unlock(&task->thobj);
+	}
+
+	threadobj_notify_entry();
+
+	CANCEL_RESTORE(svc);
+
+	args->entry(args->arg0, args->arg1, args->arg2, args->arg3,
+		    args->arg4, args->arg5, args->arg6, args->arg7,
+		    args->arg8, args->arg9);
+
+	return NULL;
+}
+
+/*
+ * By default, WIND kernel priorities are reversely mapped to
+ * SCHED_FIFO levels. The available priority range is [1..256] over
+ * Cobalt when running in primary mode, and [1..99] over the regular
+ * kernel with the POSIX interface.
+ *
+ * NOTE: over Cobalt, a thread transitioning to secondary mode has its
+ * priority ceiled to 99 in the regular POSIX SCHED_FIFO class.
+ *
+ * The application code may override the routine doing the priority
+ * mapping from VxWorks to SCHED_FIFO (normalize). Normalized
+ * priorities returned by this routine must be in the range [ 1
+ * .. threadobj_high_prio ] inclusive.
+ */
+__weak int wind_task_normalize_priority(int wind_prio)
+{
+	/*
+	 * SCHED_FIFO priorities are always 1-based regardless of the
+	 * underlying real-time core. We remap the lowest VxWorks
+	 * priority to the lowest available level in the SCHED_FIFO
+	 * policy.
+	 */
+	if (wind_prio > threadobj_high_prio - 1)
+		panic("current implementation restricts VxWorks "
+		      "priority levels to range [%d..0]",
+		      threadobj_high_prio - 1);
+
+	/* Map a VxWorks priority level to a SCHED_FIFO one. */
+	return threadobj_high_prio - wind_prio - 1;
+}
+
+__weak int wind_task_denormalize_priority(int core_prio)
+{
+	/* Map a SCHED_FIFO priority level to a VxWorks one. */
+	return threadobj_high_prio - core_prio - 1;
+}
+
+static int check_task_priority(u_long wind_prio, int *core_prio)
+{
+	if (wind_prio < 0 || wind_prio > 255) /* In theory. */
+		return S_taskLib_ILLEGAL_PRIORITY;
+
+	*core_prio = wind_task_normalize_priority(wind_prio);
+
+	return OK;
+}
+
+static STATUS __taskInit(struct wind_task *task,
+			 struct WIND_TCB *tcb, const char *name,
+			 int prio, int flags, FUNCPTR entry, int stacksize)
+{
+	struct corethread_attributes cta;
+	struct threadobj_init_data idata;
+	pthread_mutexattr_t mattr;
+	int ret, cprio;
+
+	ret = check_task_priority(prio, &cprio);
+	if (ret) {
+		errno = ret;
+		return ERROR;
+	}
+
+	task->tcb = tcb;
+	initpvh(&task->next);
+	tcb->opaque = task;
+	tcb->status = WIND_SUSPEND;
+	tcb->safeCnt = 0;
+	tcb->flags = flags;
+	tcb->entry = entry;
+
+	generate_name(task->name, name, &task_namegen);
+
+	idata.magic = task_magic;
+	idata.finalizer = task_finalizer;
+	idata.policy = cprio ? SCHED_FIFO : SCHED_OTHER;
+	idata.param_ex.sched_priority = cprio;
+	ret = threadobj_init(&task->thobj, &idata);
+	if (ret) {
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		return ERROR;
+	}
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, PTHREAD_MUTEX_RECURSIVE);
+	pthread_mutexattr_setprotocol(&mattr, PTHREAD_PRIO_INHERIT);
+	pthread_mutexattr_setpshared(&mattr, mutex_scope_attribute);
+	__RT(pthread_mutex_init(&task->safelock, &mattr));
+	pthread_mutexattr_destroy(&mattr);
+
+	ret = __bt(cluster_addobj(&wind_task_table, task->name, &task->cobj));
+	if (ret) {
+		warning("duplicate task name: %s", task->name);
+		threadobj_uninit(&task->thobj);
+		__RT(pthread_mutex_destroy(&task->safelock));
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	registry_init_file_obstack(&task->fsobj, &registry_ops);
+
+	cta.policy = idata.policy;
+	cta.param_ex.sched_priority = cprio;
+	cta.prologue = task_prologue;
+	cta.run = task_trampoline;
+	cta.arg = task;
+	cta.stacksize = stacksize;
+	cta.detachstate = PTHREAD_CREATE_DETACHED;
+	ret = __bt(copperplate_create_thread(&cta, &task->thobj.ptid));
+	if (ret) {
+		registry_destroy_file(&task->fsobj);
+		cluster_delobj(&wind_task_table, &task->cobj);
+		threadobj_uninit(&task->thobj);
+		__RT(pthread_mutex_destroy(&task->safelock));
+		errno = ret == -EAGAIN ? S_memLib_NOT_ENOUGH_MEMORY : -ret;
+		return ERROR;
+	}
+
+	return OK;
+}
+
+static inline struct wind_task *alloc_task(void)
+{
+	return threadobj_alloc(struct wind_task,
+			       thobj, union wind_wait_union);
+}
+
+STATUS taskInit(WIND_TCB *pTcb,
+		const char *name,
+		int prio,
+		int flags,
+		char *stack __attribute__ ((unused)),
+		int stacksize,
+		FUNCPTR entry,
+		long arg0, long arg1, long arg2, long arg3, long arg4,
+		long arg5, long arg6, long arg7, long arg8, long arg9)
+{
+	struct wind_task_args *args;
+	struct wind_task *task;
+	struct service svc;
+	STATUS ret = ERROR;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	CANCEL_DEFER(svc);
+
+	task = alloc_task();
+	if (task == NULL) {
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		goto out;
+	}
+
+	args = &task->args;
+	args->entry = entry;
+	args->arg0 = arg0;
+	args->arg1 = arg1;
+	args->arg2 = arg2;
+	args->arg3 = arg3;
+	args->arg4 = arg4;
+	args->arg5 = arg5;
+	args->arg6 = arg6;
+	args->arg7 = arg7;
+	args->arg8 = arg8;
+	args->arg9 = arg9;
+	ret = __taskInit(task, pTcb, name, prio, flags, entry, stacksize);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+STATUS taskActivate(TASK_ID tid)
+{
+	struct wind_task *task;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task(tid);
+	if (task == NULL) {
+		ret = ERROR;
+		goto out;
+	}
+
+	task->tcb->status &= ~WIND_SUSPEND;
+	ret = threadobj_start(&task->thobj);
+	switch (ret) {
+	case -EIDRM:
+		ret = OK;
+		break;
+	default:
+		ret = ERROR;
+	case 0:	/* == OK */
+		put_wind_task(task);
+	}
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+TASK_ID taskSpawn(const char *name,
+		  int prio,
+		  int flags,
+		  int stacksize,
+		  FUNCPTR entry,
+		  long arg0, long arg1, long arg2, long arg3, long arg4,
+		  long arg5, long arg6, long arg7, long arg8, long arg9)
+{
+	struct wind_task_args *args;
+	struct wind_task *task;
+	struct service svc;
+	TASK_ID tid;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	CANCEL_DEFER(svc);
+
+	task = alloc_task();
+	if (task == NULL) {
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		CANCEL_RESTORE(svc);
+		return ERROR;
+	}
+
+	args = &task->args;
+	args->entry = entry;
+	args->arg0 = arg0;
+	args->arg1 = arg1;
+	args->arg2 = arg2;
+	args->arg3 = arg3;
+	args->arg4 = arg4;
+	args->arg5 = arg5;
+	args->arg6 = arg6;
+	args->arg7 = arg7;
+	args->arg8 = arg8;
+	args->arg9 = arg9;
+
+	if (__taskInit(task, &task->priv_tcb, name,
+		       prio, flags, entry, stacksize) == ERROR) {
+		CANCEL_RESTORE(svc);
+		return ERROR;
+	}
+
+	CANCEL_RESTORE(svc);
+
+	tid = mainheap_ref(&task->priv_tcb, TASK_ID);
+
+	return taskActivate(tid) == ERROR ? ERROR : tid;
+}
+
+static STATUS __taskDelete(TASK_ID tid, int force)
+{
+	struct wind_task *task;
+	struct service svc;
+	int ret;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	task = find_wind_task_or_self(tid);
+	if (task == NULL) {
+	objid_error:
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	CANCEL_DEFER(svc);
+
+	/*
+	 * We always attempt to grab the thread safe lock first, then
+	 * make sure nobody (including the target task itself) will be
+	 * able to alter the internal state of that task anymore. In
+	 * forced mode, we are allowed to bypass lock contention, but
+	 * then we might create dangerous situations leading to
+	 * invalid memory references; that's just part of the deal.
+	 *
+	 * NOTE: Locking order is always safelock first, internal
+	 * object lock afterwards, therefore, _never_ call
+	 * __taskDelete() directly or indirectly while holding the
+	 * thread object lock. You have been warned.
+	 */
+	if (force)	/* Best effort only. */
+		force = __RT(pthread_mutex_trylock(&task->safelock));
+	else
+		__RT(pthread_mutex_lock(&task->safelock));
+
+	ret = threadobj_lock(&task->thobj);
+
+	if (!force)	/* I.e. do we own the safe lock? */
+		__RT(pthread_mutex_unlock(&task->safelock));
+
+	if (ret == 0)
+		ret = threadobj_cancel(&task->thobj);
+
+	CANCEL_RESTORE(svc);
+
+	if (ret)
+		goto objid_error;
+
+	return OK;
+}
+
+STATUS taskDelete(TASK_ID tid)
+{
+	return __taskDelete(tid, 0);
+}
+
+STATUS taskDeleteForce(TASK_ID tid)
+{
+	return __taskDelete(tid, 1);
+}
+
+TASK_ID taskIdSelf(void)
+{
+	struct wind_task *current;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	current = wind_task_current();
+	if (current == NULL) {
+		errno = S_objLib_OBJ_NO_METHOD;
+		return ERROR;
+	}
+
+	return (TASK_ID)current->tcb;
+}
+
+struct WIND_TCB *taskTcb(TASK_ID tid)
+{
+	struct wind_task *task;
+
+	task = find_wind_task(tid);
+	if (task == NULL) {
+		errno = S_objLib_OBJ_ID_ERROR;
+		return NULL;
+	}
+
+	return task->tcb;
+}
+
+STATUS taskSuspend(TASK_ID tid)
+{
+	struct wind_task *task;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task(tid);
+	if (task == NULL)
+		goto objid_error;
+
+	ret = threadobj_suspend(&task->thobj);
+	put_wind_task(task);
+
+	if (ret) {
+	objid_error:
+		errno = S_objLib_OBJ_ID_ERROR;
+		ret = ERROR;
+	}
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+STATUS taskResume(TASK_ID tid)
+{
+	struct wind_task *task;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task(tid);
+	if (task == NULL)
+		goto objid_error;
+
+	ret = threadobj_resume(&task->thobj);
+	put_wind_task(task);
+
+	if (ret) {
+	objid_error:
+		errno = S_objLib_OBJ_ID_ERROR;
+		ret = ERROR;
+	}
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+STATUS taskSafe(void)
+{
+	struct wind_task *current;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	current = wind_task_current();
+	if (current == NULL) {
+		errno = S_objLib_OBJ_NO_METHOD;
+		return ERROR;
+	}
+
+	/*
+	 * Grabbing the safelock will lock out cancellation requests,
+	 * so we don't have to issue CANCEL_DEFER().
+	 */
+	__RT(pthread_mutex_lock(&current->safelock));
+	current->tcb->safeCnt++;
+
+	return OK;
+}
+
+STATUS taskUnsafe(void)
+{
+	struct wind_task *current;
+	int ret;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	current = wind_task_current();
+	if (current == NULL) {
+		errno = S_objLib_OBJ_NO_METHOD;
+		return ERROR;
+	}
+
+	ret = __RT(pthread_mutex_unlock(&current->safelock));
+	if (ret == 0)
+		current->tcb->safeCnt--;
+
+	return OK;
+}
+
+STATUS taskIdVerify(TASK_ID tid)
+{
+	struct wind_task *task;
+
+	task = find_wind_task(tid);
+	if (task == NULL) {
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	return OK;
+}
+
+void taskExit(int code)
+{
+	pthread_exit((void *)(long)code);
+}
+
+STATUS taskPrioritySet(TASK_ID tid, int prio)
+{
+	struct sched_param_ex param_ex;
+	struct wind_task *task;
+	int ret, policy, cprio;
+	struct service svc;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task(tid);
+	if (task == NULL)
+		goto objid_error;
+
+	ret = check_task_priority(prio, &cprio);
+	if (ret) {
+		put_wind_task(task);
+		errno = ret;
+		ret = ERROR;
+		goto out;
+	}
+
+	policy = cprio ? SCHED_FIFO : SCHED_OTHER;
+	param_ex.sched_priority = cprio;
+	ret = threadobj_set_schedparam(&task->thobj, policy, &param_ex);
+	if (ret != -EIDRM)
+		put_wind_task(task);
+
+	if (ret) {
+	objid_error:
+		errno = S_objLib_OBJ_ID_ERROR;
+		ret = ERROR;
+	}
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+int wind_task_get_priority(struct wind_task *task)
+{
+	/* Can't fail if we hold the task lock as we should. */
+	int prio = threadobj_get_priority(&task->thobj);
+	return wind_task_denormalize_priority(prio);
+}
+
+STATUS taskPriorityGet(TASK_ID tid, int *priop)
+{
+	struct wind_task *task;
+	struct service svc;
+	int ret = OK;
+
+	CANCEL_DEFER(svc);
+
+	task = get_wind_task(tid);
+	if (task == NULL) {
+		errno = S_objLib_OBJ_ID_ERROR;
+		ret = ERROR;
+		goto out;
+	}
+
+	*priop = wind_task_get_priority(task);
+	put_wind_task(task);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+STATUS taskLock(void)
+{
+	struct wind_task *task;
+	struct service svc;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	task = find_wind_task_or_self(0);
+	if (task == NULL) {
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	CANCEL_DEFER(svc);
+	threadobj_lock_sched();
+	CANCEL_RESTORE(svc);
+
+	return OK;
+}
+
+STATUS taskUnlock(void)
+{
+	struct wind_task *task;
+	struct service svc;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	task = find_wind_task_or_self(0);
+	if (task == NULL) {
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	CANCEL_DEFER(svc);
+	threadobj_unlock_sched();
+	CANCEL_RESTORE(svc);
+
+	return OK;
+}
+
+STATUS taskDelay(int ticks)
+{
+	struct wind_task *current;
+	struct timespec rqt;
+	struct service svc;
+	int ret;
+
+	if (threadobj_irq_p()) {
+		errno = S_intLib_NOT_ISR_CALLABLE;
+		return ERROR;
+	}
+
+	current = wind_task_current();
+	if (current == NULL) {
+		errno = S_objLib_OBJ_NO_METHOD;
+		return ERROR;
+	}
+
+	if (ticks == 0) {
+		sched_yield();	/* Manual round-robin. */
+		return OK;
+	}
+
+	CANCEL_DEFER(svc);
+
+	clockobj_ticks_to_timeout(&wind_clock, ticks, &rqt);
+	ret = threadobj_sleep(&rqt);
+	if (ret) {
+		errno = -ret;
+		ret = ERROR;
+	}
+
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/taskLib.h b/kernel/xenomai-v3.2.4/lib/vxworks/taskLib.h
new file mode 100644
index 0000000..c8d15dd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/taskLib.h
@@ -0,0 +1,106 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _VXWORKS_TASKLIB_H
+#define _VXWORKS_TASKLIB_H
+
+#include <copperplate/threadobj.h>
+#include <copperplate/registry.h>
+#include <copperplate/cluster.h>
+#include <vxworks/taskLib.h>
+
+struct wind_task_args {
+	FUNCPTR entry;
+	long arg0;
+	long arg1;
+	long arg2;
+	long arg3;
+	long arg4;
+	long arg5;
+	long arg6;
+	long arg7;
+	long arg8;
+	long arg9;
+};
+
+struct wind_task {
+	pthread_mutex_t safelock;
+	struct WIND_TCB *tcb;
+	struct WIND_TCB priv_tcb;
+	char name[XNOBJECT_NAME_LEN];
+	struct wind_task_args args;
+	struct threadobj thobj;
+	struct fsobj fsobj;
+	struct clusterobj cobj;
+	struct pvholder next;
+};
+
+#define do_each_wind_task(__task, __action)				\
+	({								\
+		__label__ out;						\
+		int __ret;						\
+		push_cleanup_lock(&wind_task_lock);			\
+		read_lock(&wind_task_lock);				\
+		if (!pvlist_empty(&wind_task_list))			\
+			pvlist_for_each_entry(__task, &wind_task_list, next) { \
+				threadobj_lock(&(__task)->thobj);	\
+				__ret = (__action);			\
+				if (__ret == -EIDRM)			\
+					continue;			\
+				threadobj_unlock(&(__task)->thobj);	\
+				if (__ret)				\
+					goto out;			\
+			}						\
+		read_unlock(&wind_task_lock);				\
+		pop_cleanup_lock(&wind_task_lock);			\
+	out:								\
+		__ret;							\
+	})
+
+int wind_task_get_priority(struct wind_task *task);
+
+#define task_magic	0x1a2b3c4d
+
+static inline struct wind_task *wind_task_current(void)
+{
+	struct threadobj *thobj = threadobj_current();
+
+	if (thobj == NULL ||
+	    threadobj_get_magic(thobj) != task_magic)
+		return NULL;
+
+	return container_of(thobj, struct wind_task, thobj);
+}
+
+struct wind_task *get_wind_task(TASK_ID tid);
+
+struct wind_task *get_wind_task_or_self(TASK_ID tid);
+
+void put_wind_task(struct wind_task *task);
+
+int get_task_status(struct wind_task *task);
+
+extern struct cluster wind_task_table;
+
+extern struct pvlistobj wind_task_list;
+
+extern pthread_mutex_t wind_task_lock;
+
+extern int wind_time_slice;
+
+#endif /* _VXWORKS_TASKLIB_H */
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/Makefile b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/Makefile
new file mode 100644
index 0000000..648c9be
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/Makefile
@@ -0,0 +1,43 @@
+ifeq ($(DESTDIR),)
+XENO_CONFIG=xeno-config
+else
+XENO_CONFIG=$(DESTDIR)/bin/xeno-config
+endif
+
+prefix := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --prefix)
+solibs := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --libdir)
+
+ifeq ($(prefix),)
+$(error Please add <xenomai-install-path>/bin to your PATH variable or specify DESTDIR)
+endif
+
+TESTS := task-1 task-2 msgQ-1 msgQ-2 msgQ-3 wd-1 sem-1 sem-2 sem-3 sem-4 lst-1 rng-1
+
+CFLAGS := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --skin=vxworks --cflags) -g
+LDFLAGS := $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --skin=vxworks --ldflags)
+CC = $(shell DESTDIR=$(DESTDIR) $(XENO_CONFIG) --cc)
+
+all: $(TESTS)
+
+%: %.c
+	$(CC) -o $@ $< $(CFLAGS) $(LDFLAGS)
+
+install: all
+	install -d $(prefix)/testsuite/vxworks
+	install -t $(prefix)/testsuite/vxworks $(TESTS)
+
+clean:
+	$(RM) $(TESTS) *~
+
+# Run the test suite. We pin all tests to CPU #0, so that SMP does not
+# alter the execution sequence we expect from them.
+test: all
+	@for t in $(TESTS); do \
+		echo -n $$t...; \
+		sudo LD_LIBRARY_PATH=$(solibs) $(VALGRIND) ./$$t --cpu-affinity=0 --silent && echo ok || echo BAD; \
+	done
+
+test/%: %
+	sudo LD_LIBRARY_PATH=$(solibs) $(VALGRIND) ./$(@F) --cpu-affinity=0 --silent && echo ok || echo BAD
+
+.PHONY: clean test
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/lst-1.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/lst-1.c
new file mode 100644
index 0000000..03dec36
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/lst-1.c
@@ -0,0 +1,144 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/lstLib.h>
+
+static struct traceobj trobj;
+
+static void rootTask(long arg, ...)
+{
+	NODE first, second, third, fourth;
+	LIST list;
+
+	traceobj_enter(&trobj);
+
+	traceobj_assert(&trobj, 0 == lstNth (0, 1));
+	traceobj_assert(&trobj, 0 == lstFirst(0));
+	traceobj_assert(&trobj, 0 == lstLast(0));
+	traceobj_assert(&trobj, 0 == lstGet(0));
+
+	lstInit(&list);
+	traceobj_assert(&trobj, 0 == lstCount(&list));
+	traceobj_assert(&trobj, NULL == lstFirst(&list));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 1));
+
+	lstAdd(&list, &first);
+	traceobj_assert(&trobj, 1 == lstCount(&list));
+	traceobj_assert(&trobj, &first == lstFirst(&list));
+	traceobj_assert(&trobj, &first == lstLast(&list));
+	traceobj_assert(&trobj, NULL == lstPrevious(&first));
+	traceobj_assert(&trobj, NULL == lstNext(&first));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, &first == lstNth(&list, 1));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 2));
+
+	lstAdd(&list, &second);
+	traceobj_assert(&trobj, 2 == lstCount(&list));
+	traceobj_assert(&trobj, &first == lstFirst(&list));
+	traceobj_assert(&trobj, &second == lstLast(&list));
+	traceobj_assert(&trobj, NULL == lstPrevious(&first));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, &first  == lstNth(&list, 1));
+	traceobj_assert(&trobj, &second == lstNth(&list, 2));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 3));
+
+	lstAdd(&list, &third);
+	traceobj_assert(&trobj, 3 == lstCount(&list));
+	traceobj_assert(&trobj, NULL == lstPrevious(&first));
+	traceobj_assert(&trobj, &third == lstLast(&list));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, &first  == lstNth(&list, 1));
+	traceobj_assert(&trobj, &second == lstNth(&list, 2));
+	traceobj_assert(&trobj, &third  == lstNth(&list, 3));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 4));
+
+	lstAdd(&list, &fourth);
+	traceobj_assert(&trobj, 4 == lstCount(&list));
+	traceobj_assert(&trobj, NULL == lstPrevious(&first));
+	traceobj_assert(&trobj, &fourth == lstLast(&list));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, &first  == lstNth(&list, 1));
+	traceobj_assert(&trobj, &second == lstNth(&list, 2));
+	traceobj_assert(&trobj, &third  == lstNth(&list, 3));
+	traceobj_assert(&trobj, &fourth == lstNth(&list, 4));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 5));
+
+	lstDelete(&list, &third);
+	traceobj_assert(&trobj, 3 == lstCount(&list));
+	traceobj_assert(&trobj, NULL == lstPrevious(&first));
+	traceobj_assert(&trobj, &fourth == lstLast(&list));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, &first  == lstNth(&list, 1));
+	traceobj_assert(&trobj, &second == lstNth(&list, 2));
+	traceobj_assert(&trobj, &fourth == lstNth(&list, 3));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 4));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 5));
+
+	lstInsert(&list, &second, &third);
+	traceobj_assert(&trobj, 4 == lstCount(&list));
+	traceobj_assert(&trobj, NULL == lstPrevious(&first));
+	traceobj_assert(&trobj, &fourth == lstLast(&list));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, &first  == lstNth(&list, 1));
+	traceobj_assert(&trobj, &second == lstNth(&list, 2));
+	traceobj_assert(&trobj, &third  == lstNth(&list, 3));
+	traceobj_assert(&trobj, &fourth == lstNth(&list, 4));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 5));
+
+	traceobj_assert(&trobj, &fourth == lstNStep(&second, 2));
+	traceobj_assert(&trobj, 4 == lstCount(&list));
+	traceobj_assert(&trobj, NULL == lstPrevious(&first));
+	traceobj_assert(&trobj, &fourth == lstLast(&list));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, &first  == lstNth(&list, 1));
+	traceobj_assert(&trobj, &second == lstNth(&list, 2));
+	traceobj_assert(&trobj, &third  == lstNth(&list, 3));
+	traceobj_assert(&trobj, &fourth == lstNth(&list, 4));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 5));
+
+	traceobj_assert(&trobj, 1 == lstFind(&list, &first ));
+	traceobj_assert(&trobj, 2 == lstFind(&list, &second));
+	traceobj_assert(&trobj, 3 == lstFind(&list, &third ));
+	traceobj_assert(&trobj, 4 == lstFind(&list, &fourth));
+	traceobj_assert(&trobj, 4 == lstCount(&list));
+	traceobj_assert(&trobj, NULL == lstPrevious(&first));
+	traceobj_assert(&trobj, &fourth == lstLast(&list));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, &first  == lstNth(&list, 1));
+	traceobj_assert(&trobj, &second == lstNth(&list, 2));
+	traceobj_assert(&trobj, &third  == lstNth(&list, 3));
+	traceobj_assert(&trobj, &fourth == lstNth(&list, 4));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 5));
+
+	traceobj_assert(&trobj, &first == lstGet(&list));
+	traceobj_assert(&trobj, 3 == lstCount(&list));
+	traceobj_assert(&trobj, NULL == lstPrevious(&first));
+	traceobj_assert(&trobj, NULL == lstPrevious(&second));
+	traceobj_assert(&trobj, &fourth == lstLast(&list));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 0));
+	traceobj_assert(&trobj, &second == lstNth(&list, 1));
+	traceobj_assert(&trobj, &third  == lstNth(&list, 2));
+	traceobj_assert(&trobj, &fourth == lstNth(&list, 3));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 4));
+	traceobj_assert(&trobj, 0 == lstNth(&list, 5));
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID tid;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	tid = taskSpawn("rootTask", 50,	0, 0, rootTask,
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, tid != ERROR);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-1.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-1.c
new file mode 100644
index 0000000..fd15e1d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-1.c
@@ -0,0 +1,97 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/msgQLib.h>
+
+static struct traceobj trobj;
+
+#define NMESSAGES (sizeof(messages) / sizeof(messages[0]))
+
+static int messages[] = {
+	0xfafafafa,
+	0xbebebebe,
+	0xcdcdcdcd,
+	0xabcdefff,
+	0x12121212,
+	0x34343434,
+	0x56565656,
+	0x78787878,
+	0xdededede,
+	0xbcbcbcbc
+};
+
+static void rootTask(long arg, ...)
+{
+	MSG_Q_ID qid;
+	int ret, msg;
+
+	traceobj_enter(&trobj);
+
+	qid = msgQCreate(NMESSAGES, sizeof(int), 0xffff);
+	traceobj_assert(&trobj, qid == 0 && errno == S_msgQLib_INVALID_QUEUE_TYPE);
+
+	qid = msgQCreate(-1, sizeof(int), MSG_Q_FIFO);
+	traceobj_assert(&trobj, qid == 0 && errno == S_msgQLib_INVALID_QUEUE_TYPE);
+
+	qid = msgQCreate(NMESSAGES, 0, MSG_Q_FIFO);
+	traceobj_assert(&trobj, qid != 0);
+
+	ret = msgQDelete(qid);
+	traceobj_assert(&trobj, ret == OK);
+
+	qid = msgQCreate(NMESSAGES, sizeof(int), MSG_Q_PRIORITY);
+	traceobj_assert(&trobj, qid != 0);
+
+	ret = msgQNumMsgs(qid);
+	traceobj_assert(&trobj, ret == 0);
+
+	ret = msgQSend(qid, (char *)&messages[0], sizeof(int), NO_WAIT, MSG_PRI_NORMAL);
+	traceobj_assert(&trobj, ret == OK);
+
+	ret = msgQNumMsgs(qid);
+	traceobj_assert(&trobj, ret == 1);
+
+	ret = msgQSend(qid, (char *)&messages[1], sizeof(int), NO_WAIT, MSG_PRI_NORMAL);
+	traceobj_assert(&trobj, ret == OK);
+
+	ret = msgQNumMsgs(qid);
+	traceobj_assert(&trobj, ret == 2);
+
+	ret = msgQReceive(qid, (char *)&msg, 0, NO_WAIT);
+	traceobj_assert(&trobj, ret == 0);
+
+	ret = msgQNumMsgs(qid);
+	traceobj_assert(&trobj, ret == 1);
+
+	ret = msgQReceive(qid, (char *)&msg, sizeof(int), NO_WAIT);
+	traceobj_assert(&trobj, ret == sizeof(int));
+	traceobj_assert(&trobj, msg == 0xbebebebe);
+
+	ret = msgQNumMsgs(qid);
+	traceobj_assert(&trobj, ret == 0);
+
+	ret = msgQReceive(qid, (char *)&msg, sizeof(int), 1000);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_TIMEOUT);
+
+	ret = msgQDelete(qid);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID tid;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	tid = taskSpawn("rootTask", 50,	0, 0, rootTask,
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, tid != ERROR);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-2.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-2.c
new file mode 100644
index 0000000..58f11e3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-2.c
@@ -0,0 +1,110 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/msgQLib.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	3, 4, 5, 6,
+	1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2, 1, 2,
+	7,
+};
+
+#define NMESSAGES (sizeof(messages) / sizeof(messages[0]))
+
+static int messages[] = {
+	0xfafafafa,
+	0xbebebebe,
+	0xcdcdcdcd,
+	0xabcdefff,
+	0x12121212,
+	0x34343434,
+	0x56565656,
+	0x78787878,
+	0xdededede,
+	0xbcbcbcbc
+};
+
+MSG_Q_ID qid;
+
+static void peerTask(long arg, ...)
+{
+	int ret, msg, n;
+
+	traceobj_enter(&trobj);
+
+	n = 1;
+	do {
+		traceobj_mark(&trobj, 1);
+		ret = msgQReceive(qid, (char *)&msg, sizeof(int), NO_WAIT);
+		traceobj_assert(&trobj, ret == sizeof(int));
+		traceobj_assert(&trobj, msg == messages[NMESSAGES - n]);
+		traceobj_mark(&trobj, 2);
+	} while(n++ < NMESSAGES);
+
+	traceobj_exit(&trobj);
+}
+
+static void rootTask(long arg, ...)
+{
+	TASK_ID tid;
+	int ret, n;
+
+	traceobj_enter(&trobj);
+
+	qid = msgQCreate(NMESSAGES, sizeof(int), MSG_Q_PRIORITY);
+	traceobj_assert(&trobj, qid != 0);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = taskPrioritySet(taskIdSelf(), 10);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 4);
+
+	tid = taskSpawn("peerTask",
+			11,
+			0, 0, peerTask, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, tid != ERROR);
+
+	traceobj_mark(&trobj, 5);
+
+	n = 0;
+	do
+		ret = msgQSend(qid, (char *)&messages[n], sizeof(int), NO_WAIT, MSG_PRI_URGENT);
+	while(n++ < NMESSAGES && ret != ERROR);
+
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_UNAVAILABLE && n == NMESSAGES + 1);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = taskDelay(10);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = msgQNumMsgs(qid);
+	traceobj_assert(&trobj, ret == 0);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID tid;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	tid = taskSpawn("rootTask", 50,	0, 0, rootTask,
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, tid != ERROR);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-3.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-3.c
new file mode 100644
index 0000000..1a4e20d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/msgQ-3.c
@@ -0,0 +1,111 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/msgQLib.h>
+
+#define NMESSAGES  10
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	11, 1, 2, 3, 12, 8,
+	4, 5, 6, 9, 7, 10, 13
+};
+
+static MSG_Q_ID qid;
+
+static void rootTask(long arg, ...)
+{
+	int ret, msg, n;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	qid = msgQCreate(NMESSAGES, sizeof(msg), MSG_Q_FIFO);
+	traceobj_assert(&trobj, qid != 0);
+
+	traceobj_mark(&trobj, 2);
+
+	for (msg = 0; msg < NMESSAGES; msg++) {
+		ret = msgQSend(qid, (char *)&msg, sizeof(msg), NO_WAIT, MSG_PRI_NORMAL);
+		traceobj_assert(&trobj, ret == OK);
+	}
+
+	traceobj_mark(&trobj, 3);
+
+	ret = msgQSend(qid, (char *)&msg, sizeof(msg), WAIT_FOREVER, MSG_PRI_URGENT);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = msgQReceive(qid, (char *)&msg, sizeof(int), WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == sizeof(int) && msg == 10);
+
+	traceobj_mark(&trobj, 5);
+
+	for (n = 1; n < NMESSAGES; n++) { /* peer task read #0 already. */
+		ret = msgQReceive(qid, (char *)&msg, sizeof(int), WAIT_FOREVER);
+		traceobj_assert(&trobj, ret == sizeof(int) && msg == n);
+	}
+
+	traceobj_mark(&trobj, 6);
+
+	ret = msgQReceive(qid, (char *)&msg, sizeof(int), WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_DELETED);
+
+	traceobj_mark(&trobj, 7);
+
+	traceobj_exit(&trobj);
+}
+
+static void peerTask(long arg, ...)
+{
+	int ret, msg;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = msgQReceive(qid, (char *)&msg, sizeof(int), WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == sizeof(int) && msg == 0);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = msgQDelete(qid);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 10);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID rtid, ptid;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	traceobj_mark(&trobj, 11);
+
+	rtid = taskSpawn("rootTask", 50, 0, 0, rootTask,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, rtid != ERROR);
+
+	traceobj_mark(&trobj, 12);
+
+	ptid = taskSpawn("peerTask",
+			 51,
+			 0, 0, peerTask, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, ptid != ERROR);
+
+	traceobj_mark(&trobj, 13);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/rng-1.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/rng-1.c
new file mode 100644
index 0000000..dd944a5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/rng-1.c
@@ -0,0 +1,213 @@
+#include <stdlib.h>
+#include <string.h>
+#include <stdio.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/rngLib.h>
+
+static struct traceobj trobj;
+
+#define ADD_CONTENT(buffer,bytes,counter)				\
+	{								\
+		char	*bufPtr = buffer;				\
+		for (k=0; k<bytes; k++) {				\
+			*bufPtr = counter;				\
+			counter ++;					\
+			bufPtr ++;					\
+		}							\
+	}
+
+#define CHECK_CONTENT(buffer,bytes,counter)				\
+	{								\
+		char	*bufPtr = buffer;			\
+		for (k=0; k<bytes; k++) {				\
+			traceobj_assert(&trobj, *bufPtr == (char)counter);	\
+			counter ++;					\
+			bufPtr ++;					\
+		}							\
+	}
+
+static void rootTask(long arg, ...)
+{
+	int j, res, k, chunks;
+	const int putBytes = 10;
+	const int nrChunks = 3;
+	const int rngBytes = putBytes * nrChunks;
+	char buffer[putBytes];
+	char bigBuffer[putBytes * 2 * nrChunks];
+	int bytesPut;
+	int bytesGot;
+	int expectedCounter = 0;
+	int checkCounter = 0;
+	RING_ID rng;
+	char not_a_ring[32];
+
+	traceobj_enter(&trobj);
+	ADD_CONTENT(buffer, sizeof(buffer), expectedCounter);
+	rng = rngCreate(1);
+	traceobj_assert(&trobj, rngIsEmpty(rng));
+	memset(buffer, 0, sizeof(buffer));
+	buffer[0] = 17;
+	rngBufPut(rng, buffer, 1);
+	traceobj_assert(&trobj, rngIsFull(rng));
+	rngBufGet(rng, buffer, 1);
+	traceobj_assert(&trobj, rngIsEmpty(rng));
+	buffer[0] = 34;
+	rngBufPut(rng, buffer, 1);
+	traceobj_assert(&trobj, rngIsFull(rng));
+	expectedCounter = 0;
+	rngDelete(rng);
+
+	/* Here real vxWorks 6.6 just return ERROR */
+	memset(not_a_ring, 0, sizeof(not_a_ring));
+	errnoSet(0);
+	res = rngBufPut((RING_ID) & not_a_ring[0], buffer, 1);
+	traceobj_assert(&trobj, res == ERROR);
+
+/*	rng = rngCreate(1 * 1024 * 1024 * 1024);
+	traceobj_assert(&trobj, res == ERROR);
+	traceobj_assert(&trobj, errnoGet() == S_memLib_NOT_ENOUGH_MEMORY);
+ */
+	rng = rngCreate(rngBytes);
+	traceobj_assert(&trobj, rng != 0);
+	traceobj_assert(&trobj, rngIsEmpty(rng));
+	traceobj_assert(&trobj, !rngIsFull(rng));
+
+	/* Fill a few chunks */
+	for (chunks = 0; chunks < nrChunks; chunks++) {
+		traceobj_assert(&trobj,
+				rngNBytes(rng) == chunks * (int)sizeof(buffer));
+		traceobj_assert(&trobj,
+				rngFreeBytes(rng) ==
+				rngBytes - chunks * (int)sizeof(buffer));
+		for (j = 0; j < (int)sizeof(buffer); j++) {
+			buffer[j] = (char)j + (int)sizeof(buffer) * chunks;
+		}
+		ADD_CONTENT(buffer, sizeof(buffer), checkCounter);
+		bytesPut = rngBufPut(rng, &buffer[0], sizeof(buffer));
+		traceobj_assert(&trobj, bytesPut == sizeof(buffer));
+		traceobj_assert(&trobj, !rngIsEmpty(rng));
+		traceobj_assert(&trobj,
+				rngIsFull(rng) == (nrChunks - 1 == chunks));
+		traceobj_assert(&trobj,
+				rngFreeBytes(rng) ==
+				rngBytes - bytesPut * (chunks + 1));
+		traceobj_assert(&trobj,
+				rngNBytes(rng) ==
+				(chunks + 1) * (int)sizeof(buffer));
+	}
+	traceobj_assert(&trobj, rngIsFull(rng));
+	ADD_CONTENT(buffer, sizeof(buffer), checkCounter);
+	bytesPut = rngBufPut(rng, &buffer[0], sizeof(buffer));
+	traceobj_assert(&trobj, bytesPut == 0);
+	traceobj_assert(&trobj, rngIsFull(rng));
+
+	/* Read chunks back and check content */
+	for (chunks = 0; chunks < nrChunks; chunks++) {
+		memset(buffer, 0, sizeof(buffer));
+		traceobj_assert(&trobj,
+				rngNBytes(rng) ==
+				(nrChunks - chunks) * (int)sizeof(buffer));
+		traceobj_assert(&trobj,
+				rngFreeBytes(rng) ==
+				chunks * (int)sizeof(buffer));
+		bytesGot = rngBufGet(rng, &buffer[0], sizeof(buffer));
+		traceobj_assert(&trobj, bytesGot == (int)sizeof(buffer));
+		CHECK_CONTENT(buffer, bytesGot, expectedCounter);
+		traceobj_assert(&trobj, !rngIsFull(rng));
+		traceobj_assert(&trobj,
+				rngIsEmpty(rng) == (chunks == nrChunks - 1));
+
+		traceobj_assert(&trobj,
+				rngFreeBytes(rng) ==
+				(chunks + 1) * (int)sizeof(buffer));
+		traceobj_assert(&trobj,
+				rngNBytes(rng) ==
+				(nrChunks - chunks - 1) * (int)sizeof(buffer));
+	}
+
+	/* Testing filling too many */
+	ADD_CONTENT(bigBuffer, sizeof(bigBuffer), checkCounter)
+	bytesPut = rngBufPut(rng, &bigBuffer[0], sizeof(bigBuffer));
+	traceobj_assert(&trobj, bytesPut == rngBytes);
+	traceobj_assert(&trobj, !rngIsEmpty(rng));
+	traceobj_assert(&trobj, rngIsFull(rng));
+	traceobj_assert(&trobj, rngFreeBytes(rng) == 0);
+	traceobj_assert(&trobj, rngNBytes(rng) == rngBytes);
+
+	/* Getting too many */
+	memset(bigBuffer, 0, sizeof(bigBuffer));
+	bytesGot = rngBufGet(rng, &bigBuffer[0], sizeof(bigBuffer));
+	traceobj_assert(&trobj, bytesGot == rngBytes);
+	traceobj_assert(&trobj, rngIsEmpty(rng));
+	traceobj_assert(&trobj, !rngIsFull(rng));
+	traceobj_assert(&trobj, rngFreeBytes(rng) == rngBytes);
+	traceobj_assert(&trobj, rngNBytes(rng) == 0);
+
+	/* Now we need to adjust our expectedCounter */
+	expectedCounter += sizeof(buffer);
+
+	CHECK_CONTENT(bigBuffer, bytesGot, expectedCounter);
+
+	ADD_CONTENT(bigBuffer, sizeof(bigBuffer), checkCounter);
+	bytesPut = rngBufPut(rng, &bigBuffer[0], sizeof(bigBuffer));
+	traceobj_assert(&trobj, bytesPut == rngBytes);
+	rngFlush(rng);
+	traceobj_assert(&trobj, rngIsEmpty(rng));
+	traceobj_assert(&trobj, !rngIsFull(rng));
+	traceobj_assert(&trobj, rngFreeBytes(rng) == rngBytes);
+	traceobj_assert(&trobj, rngNBytes(rng) == 0);
+	while (bytesGot > 0) {
+		bytesGot = rngBufGet(rng, &bigBuffer[0], sizeof(bigBuffer));
+		CHECK_CONTENT(bigBuffer, bytesGot, expectedCounter);
+	}
+	rngDelete(rng);
+
+	chunks = 10;
+	rng = rngCreate(chunks);
+	bytesPut = 5;
+	traceobj_assert(&trobj, rngFreeBytes(rng) > bytesPut);
+	checkCounter = 0xaa;
+	expectedCounter = checkCounter;
+	for (j = 0; j < bytesPut; j++) {
+		rngPutAhead(rng, checkCounter, j);
+		checkCounter++;
+	}
+	rngMoveAhead(rng, bytesPut);
+	bytesGot = rngBufGet(rng, &bigBuffer[0], sizeof(bigBuffer));
+	traceobj_assert(&trobj, bytesGot == bytesPut);
+	CHECK_CONTENT(bigBuffer, bytesGot, expectedCounter);
+
+	/* Check also wrap-around */
+	bytesPut = chunks -2;
+	traceobj_assert(&trobj, rngFreeBytes(rng) > bytesPut);
+	checkCounter = 0xaa;
+	expectedCounter = checkCounter;
+	for (j = 0; j < bytesPut; j++) {
+		rngPutAhead(rng, checkCounter, j);
+		checkCounter++;
+	}
+	rngMoveAhead(rng, bytesPut);
+	bytesGot = rngBufGet(rng, &bigBuffer[0], sizeof(bigBuffer));
+	traceobj_assert(&trobj, bytesGot == bytesPut);
+	CHECK_CONTENT(bigBuffer, bytesGot, expectedCounter);
+	rngDelete(rng);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID tid;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	tid = taskSpawn("rootTask", 50, 0, 0, rootTask,
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, tid != ERROR);
+
+	traceobj_join(&trobj);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-1.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-1.c
new file mode 100644
index 0000000..245eb5c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-1.c
@@ -0,0 +1,161 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/taskInfo.h>
+#include <vxworks/semLib.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	10, 11, 12, 13, 20,
+	1, 14, 15, 2, 3, 4,
+	5, 6, 7, 8, 16, 17, 18,
+	9, 21, 19
+};
+
+static SEM_ID sem_id;
+
+static void peerTask(long arg, ...)
+{
+	TASK_ID rtid;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = taskLock();
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 4);
+
+	rtid = taskNameToId("rootTask");
+	traceobj_assert(&trobj, rtid != ERROR);
+
+	traceobj_mark(&trobj, 5);
+
+	ret = taskResume(rtid);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 8);
+
+	ret = taskUnlock();
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = taskSuspend(taskIdSelf());
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_exit(&trobj);
+}
+
+static void rootTask(long arg, ...)
+{
+	TASK_ID ptid;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = taskPrioritySet(taskIdSelf(), 10);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 11);
+
+	sem_id = semCCreate(0xffffffff, 0);
+	traceobj_assert(&trobj, sem_id == 0 && errno == S_semLib_INVALID_OPTION);
+
+	traceobj_mark(&trobj, 12);
+
+	sem_id = semCCreate(SEM_Q_FIFO, 0);
+	traceobj_assert(&trobj, sem_id != 0);
+
+	traceobj_mark(&trobj, 13);
+
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+
+	ptid = taskNameToId("peerTask");
+	traceobj_assert(&trobj, ptid != ERROR);
+
+	traceobj_mark(&trobj, 14);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 15);
+
+	ret = taskSuspend(taskIdSelf());
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 16);
+
+	ret = semTake(sem_id, 10);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 17);
+
+	ret = semTake(sem_id, NO_WAIT);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 18);
+
+	ret = semTake(sem_id, 100);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_TIMEOUT);
+
+	traceobj_mark(&trobj, 19);
+
+	ret = taskResume(ptid);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID rtid, ptid;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	rtid = taskSpawn("rootTask", 50, 0, 0, rootTask,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, rtid != ERROR);
+
+	traceobj_mark(&trobj, 20);
+
+	ptid = taskSpawn("peerTask", 11, 0, 0, peerTask,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, ptid != ERROR);
+
+	traceobj_mark(&trobj, 21);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-2.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-2.c
new file mode 100644
index 0000000..c9befc4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-2.c
@@ -0,0 +1,144 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/taskInfo.h>
+#include <vxworks/semLib.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	6, 7, 8, 9, 10, 11, 12, 13, 14, 15,
+	18, 1, 2, 3, 19, 4, 5, 16, 6, 17
+};
+
+static SEM_ID sem_id;
+
+static void peerTask(long arg, ...)
+{
+	TASK_ID rtid;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	rtid = taskNameToId("rootTask");
+	traceobj_assert(&trobj, rtid != ERROR);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = semTake(sem_id, NO_WAIT);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_UNAVAILABLE);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = semTake(sem_id, 100);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_TIMEOUT);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = taskResume(rtid);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 5);
+
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 6);
+
+	traceobj_exit(&trobj);
+}
+
+static void rootTask(long arg, ...)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = taskPrioritySet(taskIdSelf(), 11);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 7);
+
+	sem_id = semMCreate(0xffffffff);
+	traceobj_assert(&trobj, sem_id == 0 && errno == S_semLib_INVALID_OPTION);
+
+	traceobj_mark(&trobj, 8);
+
+	sem_id = semMCreate(SEM_Q_PRIORITY|SEM_DELETE_SAFE|SEM_INVERSION_SAFE);
+	traceobj_assert(&trobj, sem_id != 0);
+
+	traceobj_mark(&trobj, 9);
+
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 11);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 12);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 13);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_semLib_INVALID_OPERATION);
+
+	traceobj_mark(&trobj, 14);
+
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 15);
+
+	ret = taskSuspend(taskIdSelf());
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 16);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 17);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID rtid, ptid;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	rtid = taskSpawn("rootTask", 50, 0, 0, rootTask,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, rtid != ERROR);
+
+	traceobj_mark(&trobj, 18);
+
+	ptid = taskSpawn("peerTask", 10, 0, 0, peerTask,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, ptid != ERROR);
+
+	traceobj_mark(&trobj, 19);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-3.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-3.c
new file mode 100644
index 0000000..1041d25
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-3.c
@@ -0,0 +1,68 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/semLib.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	1, 2, 3, 5, 4, 6
+};
+
+static SEM_ID sem_id;
+
+static void rootTask(long arg, ...)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	sem_id = semBCreate(SEM_Q_FIFO, SEM_FULL);
+	traceobj_assert(&trobj, sem_id != 0);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 2);
+
+	ret = semTake(sem_id, NO_WAIT);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 3);
+
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_DELETED);
+
+	traceobj_mark(&trobj, 4);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID tid;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	tid = taskSpawn("rootTask", 50, 0, 0, rootTask,
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, tid != ERROR);
+
+	traceobj_mark(&trobj, 5);
+
+	ret = semDelete(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 6);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-4.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-4.c
new file mode 100644
index 0000000..5c82169
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/sem-4.c
@@ -0,0 +1,74 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/semLib.h>
+#include <vxworks/tickLib.h>
+
+static struct traceobj trobj;
+
+#define WAIT_TIME  100
+#define TOLERANCE  20
+#define MIN_WAIT   (WAIT_TIME - TOLERANCE)
+
+static SEM_ID sem_id;
+
+static void rootTask(long arg, ...)
+{
+	ULONG start;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	sem_id = semCCreate(SEM_Q_PRIORITY, 0);
+	traceobj_assert(&trobj, sem_id != 0);
+
+	start = tickGet();
+	ret = semTake(sem_id, WAIT_TIME);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_TIMEOUT);
+	traceobj_assert(&trobj, tickGet() - start >= MIN_WAIT);
+
+	start = tickGet();
+	ret = semTake(sem_id, WAIT_TIME);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_TIMEOUT);
+	traceobj_assert(&trobj, tickGet() - start >= MIN_WAIT);
+
+	start = tickGet();
+	ret = semTake(sem_id, WAIT_TIME);
+	traceobj_assert(&trobj, ret == ERROR && errno == S_objLib_OBJ_TIMEOUT);
+	traceobj_assert(&trobj, tickGet() - start >= MIN_WAIT);
+
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+	ret = semTake(sem_id, WAIT_TIME);
+	traceobj_assert(&trobj, ret == OK);
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+	ret = semGive(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID tid;
+	int ret;
+
+	traceobj_init(&trobj, argv[0], 0);
+
+	tid = taskSpawn("rootTask", 50, 0, 0, rootTask,
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, tid != ERROR);
+
+	traceobj_join(&trobj);
+
+	ret = semDelete(sem_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-1.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-1.c
new file mode 100644
index 0000000..3cf5f38
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-1.c
@@ -0,0 +1,49 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = { 1, 2 };
+
+static void windTask(long a1, long a2, long a3, long a4, long a5,
+		     long a6, long a7, long a8, long a9, long a10)
+{
+	traceobj_mark(&trobj, 1);
+
+	traceobj_enter(&trobj);
+
+	traceobj_assert(&trobj, a1 == 1);
+	traceobj_assert(&trobj, a2 == 2);
+	traceobj_assert(&trobj, a3 == 4);
+	traceobj_assert(&trobj, a4 == 8);
+	traceobj_assert(&trobj, a5 == 16);
+	traceobj_assert(&trobj, a6 == 32);
+	traceobj_assert(&trobj, a7 == 64);
+	traceobj_assert(&trobj, a8 == 128);
+	traceobj_assert(&trobj, a9 == 256);
+	traceobj_assert(&trobj, a10 == 512);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID tid;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	tid = taskSpawn("windTask", 70, 0, 0, (FUNCPTR)windTask,
+			1, 2, 4, 8, 16, 32, 64, 128, 256, 512);
+	traceobj_assert(&trobj, tid != ERROR);
+
+	traceobj_mark(&trobj, 2);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-2.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-2.c
new file mode 100644
index 0000000..46d99a2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/task-2.c
@@ -0,0 +1,119 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/semLib.h>
+
+static inline void safe_pause(void)
+{
+	for (;;)
+		pause();
+}
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	8, 1, 9, 4, 10, 5, 11, 2, 6, 7,	12
+};
+
+static TASK_ID btid, ftid;
+
+static SEM_ID sem_id;
+
+static void backgroundTask(long arg, ...)
+{
+	unsigned int safety = 100000000, count = 0;
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 1);
+
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 2);
+
+	while (--safety > 0)
+		count++;
+
+	/*
+	 * Enter infinite pause so that any pending cancellation is
+	 * taken regardless of whether async-cancel is enabled or not.
+	 */
+	safe_pause();
+
+	traceobj_exit(&trobj);
+}
+
+static void foregroundTask(long arg, ...)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	traceobj_mark(&trobj, 4);
+
+	ret = semTake(sem_id, WAIT_FOREVER);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 5);
+
+	taskDelay(3);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = taskSuspend(btid);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 7);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	int ret;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	sem_id = semCCreate(SEM_Q_PRIORITY, 0);
+	traceobj_assert(&trobj, sem_id != 0);
+
+	traceobj_mark(&trobj, 8);
+
+	btid = taskSpawn("backgroundTask", 11, 0, 0, backgroundTask,
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, btid != ERROR);
+
+	traceobj_mark(&trobj, 9);
+
+	ftid = taskSpawn("foregroundTask", 10, 0, 0, foregroundTask,
+			 0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, ftid != ERROR);
+
+	traceobj_mark(&trobj, 10);
+
+	ret = semGive(sem_id);
+
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 11);
+
+	ret = semGive(sem_id);
+
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 12);
+
+	ret = taskDelete(btid);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/wd-1.c b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/wd-1.c
new file mode 100644
index 0000000..b557545
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/testsuite/wd-1.c
@@ -0,0 +1,92 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <copperplate/traceobj.h>
+#include <vxworks/errnoLib.h>
+#include <vxworks/taskLib.h>
+#include <vxworks/wdLib.h>
+#include <vxworks/intLib.h>
+
+static struct traceobj trobj;
+
+static int tseq[] = {
+	5, 6,
+	1, 4, 1, 4, 1,
+	2, 3, 7
+};
+
+static TASK_ID tid;
+
+static WDOG_ID wdog_id;
+
+static void watchdogHandler(long arg)
+{
+	static int hits;
+	int ret;
+
+	traceobj_assert(&trobj, arg == 0xfefbfcfd);
+
+	ret = intContext();
+	traceobj_assert(&trobj, ret);
+
+	traceobj_mark(&trobj, 1);
+
+	if (++hits >= 3) {
+		ret = wdCancel(wdog_id);
+		traceobj_assert(&trobj, ret == OK);
+		traceobj_mark(&trobj, 2);
+		ret = taskResume(tid);
+		traceobj_assert(&trobj, ret == OK);
+		traceobj_mark(&trobj, 3);
+		return;
+	}
+
+	traceobj_mark(&trobj, 4);
+	ret = wdStart(wdog_id, 200, watchdogHandler, arg);
+	traceobj_assert(&trobj, ret == OK);
+}
+
+static void rootTask(long arg, ...)
+{
+	int ret;
+
+	traceobj_enter(&trobj);
+
+	tid = taskIdSelf();
+
+	traceobj_mark(&trobj, 5);
+
+	wdog_id = wdCreate();
+	traceobj_assert(&trobj, wdog_id != 0);
+
+	ret = wdStart(wdog_id, 200, watchdogHandler, 0xfefbfcfd);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 6);
+
+	ret = taskSuspend(tid);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_mark(&trobj, 7);
+
+	ret = wdDelete(wdog_id);
+	traceobj_assert(&trobj, ret == OK);
+
+	traceobj_exit(&trobj);
+}
+
+int main(int argc, char *const argv[])
+{
+	TASK_ID tid;
+
+	traceobj_init(&trobj, argv[0], sizeof(tseq) / sizeof(int));
+
+	tid = taskSpawn("rootTask", 50, 0, 0, rootTask,
+			0, 0, 0, 0, 0, 0, 0, 0, 0, 0);
+	traceobj_assert(&trobj, tid != ERROR);
+
+	traceobj_join(&trobj);
+
+	traceobj_verify(&trobj, tseq, sizeof(tseq) / sizeof(int));
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/tickLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/tickLib.c
new file mode 100644
index 0000000..9f7ec5a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/tickLib.c
@@ -0,0 +1,45 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#include <stdio.h>
+#include <errno.h>
+#include <string.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <boilerplate/lock.h>
+#include <vxworks/tickLib.h>
+#include "tickLib.h"
+
+struct clockobj wind_clock;
+
+ULONG tickGet(void)
+{
+	struct service svc;
+	ticks_t ticks;
+
+	CANCEL_DEFER(svc);
+	clockobj_get_date(&wind_clock, &ticks);
+	CANCEL_RESTORE(svc);
+
+	return (ULONG)ticks;
+}
+
+void tickSet(ULONG ticks)
+{
+	clockobj_set_date(&wind_clock, ticks);
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/tickLib.h b/kernel/xenomai-v3.2.4/lib/vxworks/tickLib.h
new file mode 100644
index 0000000..2ddbf42
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/tickLib.h
@@ -0,0 +1,26 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _VXWORKS_TICKLIB_H
+#define _VXWORKS_TICKLIB_H
+
+#include <copperplate/clockobj.h>
+
+extern struct clockobj wind_clock;
+
+#endif /* _VXWORKS_TICKLIB_H */
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/wdLib.c b/kernel/xenomai-v3.2.4/lib/vxworks/wdLib.c
new file mode 100644
index 0000000..beb70d5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/wdLib.c
@@ -0,0 +1,169 @@
+/*
+ * Copyright (C) 2008-2010 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ *
+ * Watchdog support.
+ *
+ * Not shareable (we can't tell whether the handler would always be
+ * available in all processes).
+ */
+
+#include <errno.h>
+#include <stdlib.h>
+#include <memory.h>
+#include <copperplate/heapobj.h>
+#include <copperplate/threadobj.h>
+#include <vxworks/errnoLib.h>
+#include "wdLib.h"
+#include "tickLib.h"
+
+#define wd_magic	0x3a4b5c6d
+
+static struct wind_wd *get_wd(WDOG_ID wdog_id)
+{
+	struct wind_wd *wd = (struct wind_wd *)wdog_id;
+
+	if (wd == NULL || ((intptr_t)wd & (sizeof(intptr_t)-1)) != 0)
+		return NULL;
+
+	if (wd->magic != wd_magic)
+		return NULL;
+
+	if (timerobj_lock(&wd->tmobj))
+		return NULL;
+
+	return wd->magic != wd_magic ? NULL : wd;
+}
+
+static inline void put_wd(struct wind_wd *wd)
+{
+	timerobj_unlock(&wd->tmobj);
+}
+
+static void watchdog_handler(struct timerobj *tmobj)
+{
+	struct wind_wd *wd = container_of(tmobj, struct wind_wd, tmobj);
+	wd->handler(wd->arg);
+}
+
+WDOG_ID wdCreate(void)
+{
+	struct wind_wd *wd;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	wd = pvmalloc(sizeof(*wd));
+	if (wd == NULL)
+		goto fail;
+
+	ret = timerobj_init(&wd->tmobj);
+	if (ret) {
+		pvfree(wd);
+	fail:
+		errno = S_memLib_NOT_ENOUGH_MEMORY;
+		wd = NULL;
+		goto out;
+	}
+
+	wd->magic = wd_magic;
+out:
+	CANCEL_RESTORE(svc);
+
+	return (WDOG_ID)wd;
+}
+
+STATUS wdDelete(WDOG_ID wdog_id)
+{
+	struct wind_wd *wd;
+	struct service svc;
+	int ret = OK;
+
+	CANCEL_DEFER(svc);
+
+	wd = get_wd(wdog_id);
+	if (wd == NULL) {
+		errno = S_objLib_OBJ_ID_ERROR;
+		ret = ERROR;
+		goto out;
+	}
+
+	timerobj_destroy(&wd->tmobj);
+	wd->magic = ~wd_magic;
+	pvfree(wd);
+out:
+	CANCEL_RESTORE(svc);
+
+	return ret;
+}
+
+STATUS wdStart(WDOG_ID wdog_id, int delay, void (*handler)(long), long arg)
+{
+	struct itimerspec it;
+	struct service svc;
+	struct wind_wd *wd;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	wd = get_wd(wdog_id);
+	if (wd == NULL)
+		goto objid_error;
+
+	wd->handler = handler;
+	wd->arg = arg;
+
+	clockobj_ticks_to_timeout(&wind_clock, delay, &it.it_value);
+	it.it_interval.tv_sec = 0;
+	it.it_interval.tv_nsec = 0;
+	ret = timerobj_start(&wd->tmobj, watchdog_handler, &it);
+	if (ret) {
+	objid_error:
+		CANCEL_RESTORE(svc);
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	CANCEL_RESTORE(svc);
+
+	return OK;
+}
+
+STATUS wdCancel(WDOG_ID wdog_id)
+{
+	struct wind_wd *wd;
+	struct service svc;
+	int ret;
+
+	CANCEL_DEFER(svc);
+
+	wd = get_wd(wdog_id);
+	if (wd == NULL)
+		goto objid_error;
+
+	ret = timerobj_stop(&wd->tmobj);
+	if (ret) {
+	objid_error:
+		CANCEL_RESTORE(svc);
+		errno = S_objLib_OBJ_ID_ERROR;
+		return ERROR;
+	}
+
+	CANCEL_RESTORE(svc);
+
+	return OK;
+}
diff --git a/kernel/xenomai-v3.2.4/lib/vxworks/wdLib.h b/kernel/xenomai-v3.2.4/lib/vxworks/wdLib.h
new file mode 100644
index 0000000..4bc16a2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/lib/vxworks/wdLib.h
@@ -0,0 +1,35 @@
+/*
+ * Copyright (C) 2008 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This library is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU Lesser General Public
+ * License as published by the Free Software Foundation; either
+ * version 2 of the License, or (at your option) any later version.
+ *
+ * This library is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * Lesser General Public License for more details.
+
+ * You should have received a copy of the GNU Lesser General Public
+ * License along with this library; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA  02111-1307  USA.
+ */
+
+#ifndef _VXWORKS_WDLIB_H
+#define _VXWORKS_WDLIB_H
+
+#include <copperplate/timerobj.h>
+#include <vxworks/wdLib.h>
+
+struct wind_wd {
+
+	unsigned int magic;
+
+	void (*handler)(long arg);
+	long arg;
+
+	struct timerobj tmobj;
+};
+
+#endif /* _VXWORKS_WDLIB_H */
diff --git a/kernel/xenomai-v3.2.4/scripts/Kconfig.frag b/kernel/xenomai-v3.2.4/scripts/Kconfig.frag
new file mode 100644
index 0000000..7921286
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/Kconfig.frag
@@ -0,0 +1,49 @@
+menuconfig XENOMAI
+	depends on X86_TSC || !X86
+	bool "Xenomai/cobalt"
+	select IPIPE if HAVE_IPIPE_SUPPORT
+	select IPIPE_WANT_APIREV_2 if IPIPE
+	select DOVETAIL if HAVE_DOVETAIL
+	select DOVETAIL_LEGACY_SYSCALL_RANGE if HAVE_DOVETAIL
+	default y
+	help
+	  Xenomai's Cobalt core is a real-time extension to the Linux
+	  kernel, which exhibits very short interrupt and scheduling
+	  latency, without affecting the regular kernel services.
+
+	  This option enables the set of extended kernel services
+	  required to run the real-time applications in user-space,
+	  over the Xenomai libraries.
+
+	  Please visit http://xenomai.org for more information.
+
+if XENOMAI
+source "arch/@SRCARCH@/xenomai/Kconfig"
+endif
+
+if MIGRATION
+comment "WARNING! Page migration (CONFIG_MIGRATION) may increase"
+comment "latency."
+endif
+
+if APM || CPU_FREQ || ACPI_PROCESSOR || INTEL_IDLE
+comment "WARNING! At least one of APM, CPU frequency scaling, ACPI 'processor'"
+comment "or CPU idle features is enabled. Any of these options may"
+comment "cause troubles with Xenomai. You should disable them."
+endif
+
+config XENO_VERSION_MAJOR
+       int
+       default @VERSION_MAJOR@
+
+config XENO_VERSION_MINOR
+       int
+       default @VERSION_MINOR@
+
+config XENO_REVISION_LEVEL
+       int
+       default @REVISION_LEVEL@
+
+config XENO_VERSION_STRING
+       string
+       default "@VERSION_STRING@"
diff --git a/kernel/xenomai-v3.2.4/scripts/Makefile.am b/kernel/xenomai-v3.2.4/scripts/Makefile.am
new file mode 100644
index 0000000..9d127b9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/Makefile.am
@@ -0,0 +1,30 @@
+bin_SCRIPTS=xeno-config xeno wrap-link.sh
+
+install-exec-local:
+	@if test -r $(srcdir)/postinstall.sh ; then \
+	   echo "Running post-install script..." ;\
+	   $(srcdir)/postinstall.sh ; \
+	fi
+
+install-data-local:
+	$(mkinstalldirs) $(DESTDIR)$(libdir)
+	$(INSTALL_DATA) $(srcdir)/dynlist.ld $(DESTDIR)$(libdir)
+
+uninstall-local:
+	$(RM) $(DESTDIR)$(libdir)/dynlist.ld
+	@if test -r $(srcdir)/postinstall.sh ; then \
+	   echo "Running post-uninstall script..." ;\
+	   $(srcdir)/postinstall.sh --uninstall ; \
+	fi
+
+EXTRA_DIST =				\
+	$(wildcard postinstall.sh)	\
+	Kconfig.frag			\
+	bootstrap			\
+	dynlist.ld			\
+	histo.gp			\
+	prepare-kernel.sh		\
+	wrap-link.sh			\
+	xeno-config-cobalt.in		\
+	xeno-config-mercury.in		\
+	xeno.in
diff --git a/kernel/xenomai-v3.2.4/scripts/bootstrap b/kernel/xenomai-v3.2.4/scripts/bootstrap
new file mode 100755
index 0000000..e7b8536
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/bootstrap
@@ -0,0 +1,3 @@
+#!/bin/sh
+
+autoreconf -fi
diff --git a/kernel/xenomai-v3.2.4/scripts/dynlist.ld b/kernel/xenomai-v3.2.4/scripts/dynlist.ld
new file mode 100644
index 0000000..de6eef0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/dynlist.ld
@@ -0,0 +1,3 @@
+{
+	main;
+};
diff --git a/kernel/xenomai-v3.2.4/scripts/histo.gp b/kernel/xenomai-v3.2.4/scripts/histo.gp
new file mode 100644
index 0000000..624a174
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/histo.gp
@@ -0,0 +1,22 @@
+#
+# Run this gnuplot script with the command line:
+#
+# gnuplot -e 'input_file="/path/to/input";output_file="/path/to/output.png";graph_title="title text"' /path/to/histo.gp
+#
+# where the input file is generated using latency option "-g"
+#
+
+set terminal png size 900,700
+set output output_file
+
+set title graph_title
+set xlabel "user-space latency in microseconds"
+set ylabel "occurrences + 1 (log)"
+set logscale y
+set key off
+set grid
+
+set bar 1.000000
+set style fill  solid 1.00 border -1
+set style rectangle back fc lt -3 fillstyle  solid 1.00 border -1
+plot input_file w line
diff --git a/kernel/xenomai-v3.2.4/scripts/maint/test-xeno-test.rb b/kernel/xenomai-v3.2.4/scripts/maint/test-xeno-test.rb
new file mode 100644
index 0000000..50853b0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/maint/test-xeno-test.rb
@@ -0,0 +1,301 @@
+#!/usr/bin/env ruby
+
+# Funning xeno-test once with each option takes a long time (around 15 minutes)
+# For each test a full log is stored in the logs subdirectory.
+# Therefore the results of the tests (the ruby variable) is stored using Pstore in
+# the file logs/xeno-test.store.
+# If this file exists and is newer than xeno-test and the first parameter is != clean
+# The tst variable is fetched from the file.
+# This is convenient if you want to add new checks.
+#
+# Todo: - Why do the mailing tests not work  ?
+#       - How does xeno-test react to Ctrl-C ?
+#       - Thorougher analyses of the log files
+#
+require 'timeout'
+require 'ostruct'
+require 'ftools'
+require 'pstore'
+require 'tempfile'
+
+XenoInstall = "/usr/xenomai/bin"
+XenoHead    = File.expand_path(File.dirname(File.dirname(__FILE__)))
+XenoTestOrg = XenoHead+'/xeno-test.in'
+XenoTestRun = XenoInstall+'/xeno-test'
+LogDir      = XenoHead+'/logs'
+MainLog     = LogDir+'/runtest.log'
+StorageName = LogDir+'/xeno-test.store'
+withFinished = `grep finished /usr/xenomai/bin/xeno*`
+withFinished.size > 0 ? FinishedMsg = "xeno-test: finished" : FinishedMsg = nil
+
+$clean  = ARGV[0] == 'clean' && ! (
+	  File.exists?(XenoTestRun) &&
+          File.exists?(StorageName) &&
+          File.stat(XenoTestRun).mtime >  File.stat(StorageName).mtime
+          )
+if $clean && File.exists?(StorageName) then
+  puts "Do you really want to run all tesst? <Ctrl-C> to abort"
+  STDIN.getc
+end
+
+class XenoTest
+  attr_reader :mainLog
+  attr_reader :blockDevice
+  attr_writer :maxTimeout
+  attr_writer :chkProc
+
+  def initialize(options)
+    @from    = XenoHead
+    @inst    = XenoInstall
+    @busybox = false
+    File.makedirs(LogDir) if defined?(LogDir) && ! File.directory?(LogDir)
+    @@mainLog = open(MainLog,"w");
+    @@mainLog.puts("#{Time.now}: started run")
+    @@mainLog.sync = true
+    options.each { |name,val|
+      if val.class == String then
+        cmd =  "@#{name}='#{val}'"
+      else
+	cmd =  "@#{name}=#{val}"
+      end
+      eval cmd
+    }
+  end
+
+  def add(opts,  chkProc=nil)
+    @nr   = -1        if !defined?(@nr)
+    @tsts = Array.new if !defined?(@tsts)
+    @nr  += 1
+    elem = OpenStruct.new({'nr' => @nr, 'opts'=>opts})
+    elem.chkProc = chkProc if chkProc
+    @tsts << elem
+    return elem
+  end
+
+  def runTests
+    @tsts.each { |x| runOneTest(x) ; }
+  end
+
+  def XenoTest::writeLogEntry(tst, logMsg)
+    msg = "#{Time.now}: #{sprintf("%2d", tst.nr)} "+ logMsg
+    if ! File.exists?(MainLog) then
+      puts msg
+    end
+    outs = [ STDOUT]
+    outs << @@mainLog if defined?(@@mainLog) && @@mainLog
+    outs.each {|dev| dev.puts msg }
+  end
+
+  def runOneTest(x)
+    puts "runOneTest #{x.inspect}"  if $VERBOSE
+    opts = x.opts
+    startTime = Time.now
+    tstRes = nil
+    x.tst  = `which xeno-test`.chomp
+    x.log  = "#{LogDir}/test_xeno_test.log.#{x.nr}"
+    x.cmd  = "#{x.tst} -T 1 #{x.opts} | tee #{x.log}"   # -T 1 to make the test time shorter
+
+    # Start thread to enfore maxTimeout
+    if @maxTimeout then
+      @rThread = Thread.new{
+	    puts "@rThread: "+Process.pid.to_s if $VERBOSE
+	    writeLogEntry(x, "started: #{opts} Timeout #{@maxTimeout}")
+	    sleep(@maxTimeout);
+	    writeLogEntry(x, "rThread will kill #{$childPid.inspect}")
+	    system("kill -9 #{$childPid}") if $childPid
+	    raise Timeout::Error 
+      }
+    end
+    # Now we are doing calling our script
+    $childPid = nil
+    Dir.chdir(@inst)
+    res = IO.popen(x.cmd) { |f|
+      puts "cmd: "+Process.pid.to_s if $VERBOSE
+      $childPid = Process.pid
+      f.each{|a| puts a}
+    } 
+    tstRes = $?
+  rescue Timeout::Error => details
+    puts detail.backtrace.join("\n")
+    writeLogEntry(x, "FAILED: #{opts} Timeout error #{$childPid}")
+    system("kill -9 #{$childPid}")
+  rescue => details
+    writeLogEntry(x, "FAILED: #{opts} runtime error #{@rThread.inspect} pid #{$childPid.inspect}")
+    puts details.backtrace.join("\n")
+    system("kill -9 #{$childPid}")
+    @rThread.kill if defined?(@rThread)
+  ensure
+    # save results,
+    # clean up dd process if there are any, if xeno-test did not clean it up as it should have
+    endTime = Time.now
+    duration = (endTime-startTime).round
+    if duration <= 5 then
+      str    =  "FAILED"
+      tstRes =  'too fast'
+    else
+      str = tstRes ? 'PASSED' : 'FAILED'
+    end
+    x.tstRes   = tstRes
+    x.duration = duration
+    XenoTest::writeLogEntry(x, "#{str}: #{sprintf("%3d", duration)} seconds. opts <#{x.opts}> returned #{tstRes}")
+
+    IO.popen("ps -ef | grep -w dd") { |f|
+      line= f.gets
+      if ! line.index('grep') then
+	msg = "#{Time.now}: #{sprintf("%2d", x.nr)} FAILED: dd not killed \n    >>> #{line}"
+	[ STDOUT, @@mainLog].each {|dev| dev.puts msg if dev}
+	system "killall dd"
+      end
+    }
+    @rThread.kill if @rThread
+    @rThread = nil
+  end
+
+  def runChecks
+    puts "running all Checks"
+    @tsts.each { |aTst|
+      checkFinished(aTst)
+      chks = aTst.chkProc
+      next if ! chks
+      z = eval "#{chks[0]}"+"(aTst, chks[1])" if chks[1]
+    }
+    puts "completed all Checks"
+  end
+
+  def checkFinished(tst)
+    return if ! FinishedMsg
+    logInhalt = IO.readlines(tst.log)
+    finished  = false
+    logInhalt.each{|line|
+      finished =  true if line.index(FinishedMsg)
+    }
+    if !finished then
+      puts "CHK #{tst.nr} FAILED opts #{tst.opts} could not find #{FinishedMsg}"
+      puts "  searched in #{tst.log}" 
+    end
+  end
+
+  def getTstByOpt(opts)
+    @tsts.each{ |aTst|
+      return aTst if aTst.opts == opts
+    }
+    return nil
+  end
+end
+
+# Here follow the various routines, that check the effect of each options
+def expectInLogFile(x, string)
+  puts "x #{x.inspect}\nlooking for #{string}" if $VERBOSE
+  logInhalt = IO.readlines(x.log)
+  found = false
+  logInhalt.each{|line|
+    found = true if line.index(string)
+  }
+  puts "CHK #{x.nr} FAILED, could not find #{string} in #{x.log}" if !found
+end
+
+# Get a name for a temporary file as a target to upload via curl (option -U)
+tf = Tempfile.new("xeno-test")
+TstUrlTarget = tf.path
+tf.close
+File.delete if File.exists?(TstUrlTarget)
+
+tst   = nil
+store = nil
+File.delete(StorageName) if $clean &&  File.exists?(StorageName)
+if File.exists?(StorageName) then
+  # Read in our saved test state (if we have a valid combination)
+  store = PStore.new(StorageName)
+  puts "reading tst from #{StorageName}"
+  store.transaction do
+    tst = store['tst']
+  end
+else
+  # we prepare to run our tests
+
+  # Get the first block-Device
+  blockDevices = `mount | cut -d\\  -f1 | egrep -ve 'sysfs|proc|depts'`
+  blockDevices = blockDevices[0..blockDevices.index("\n")].chomp
+  bDev = blockDevices
+
+  tst = XenoTest.new({'blockDevice' => bDev})
+  tst.maxTimeout = 120
+
+  # Define test run for each option and/or combination thereof
+  
+  # Test for a bad option. This happens to be fast, too
+  tst_0  = tst.add("-O",                [ 'expectInLogFile', "xeno-test [options]"])
+  tst_1  = tst.add("",                  [ 'expectInLogFile', FinishedMsg])
+  tst_2  = tst.add("-T 5")
+  if true then # To speed up our test,s we may turn off defining more testruns
+      tst_3  = tst.add("-p 200",        ['expectInLogFile',"Sampling period: 200 us"])
+      tst_4  = tst.add("-w 2",          ['expectInLogFile',FinishedMsg])
+      tst_5  = tst.add("-d #{bDev}",
+	  ['expectInLogFile',"creating workload using dd if=#{bDev}"])
+
+      tst_6  = tst.add("-w /bin/dd #{bDev}",
+					['expectInLogFile',FinishedMsg])
+      tst_7  = tst.add("-P 'echo marker'", 
+					['expectInLogFile',FinishedMsg])
+      tst_8  = tst.add("-L",            ['expectInLogFile',FinishedMsg])
+      tst_9  = tst.add("-N marker",     ['expectInLogFile',FinishedMsg])
+      tst_10 = tst.add("-v",            ['expectInLogFile',FinishedMsg])
+      tst_11 = tst.add("-U #{TstUrlTarget}",
+                                        ['expectInLogFile',FinishedMsg])
+      tst_12 = tst.add("-D '%Ymarker%Mmarker%D'", 
+                                        ['expectInLogFile',FinishedMsg])
+#      tst_13 = tst.add("-M #{ENV['USER']}@localhost", 
+#					['expectInLogFile',FinishedMsg])
+#      tst_14 = tst.add("-m",            ['expectInLogFile',FinishedMsg])
+  end
+
+  # Remove files, we might have left behind in a previous test
+  ( Dir.glob("/tmp/test-#{`uname -r`.chomp}*") + 
+    Dir.glob("#{XenoInstall}/marker*")).each { |file|
+    puts "Removing #{file}"
+    File.delete file
+  }
+  tst.runTests
+
+  # Now we are ready to store our testresult (our ruby variable) to file
+  puts "Writing tst to #{StorageName}"
+  store = PStore.new(StorageName)
+  store.transaction do
+    store['tst'] = tst
+  end
+end
+
+# If we did run the curl/-N, -L test, did it generate the expected file?
+{
+ "-U #{TstUrlTarget}" => TstUrlTarget,
+ "-L" => "/tmp/test-#{`uname -r`.chomp}*",
+ "-N marker" =>  "#{XenoInstall}/marker*",
+}.each { |opt, path|
+  myTst  = tst.getTstByOpt(opt)
+  if myTst && Dir.glob(path).size == 0  then
+    XenoTest::writeLogEntry(myTst, "FAILED: #{opt}: no file #{path} found")
+  else 
+    XenoTest::writeLogEntry(myTst, "PASSED: #{opt}: #{Dir.glob(path).inspect}")
+  end if myTst
+}
+
+# This string is probably wrong, but I think xeno-test doesn't accept this
+# string as it should neither
+myTst  = tst.getTstByOpt("-D '%Ymarker%Mmarker%D'")
+tstString = "-D #{Time.now.strftime('%Ymarker%Mmarker%D')}"
+tstString = "marker"
+expectInLogFile(myTst, tstString) if myTst
+
+# Test whether we really take more time with -T 5 than with the default -T 1
+long  = tst.getTstByOpt("-T 5")
+short = tst.getTstByOpt("")
+if long and short then
+  if long.duration < short.duration + 5
+  then
+    XenoTest::writeLogEntry(long, 
+      "FAILED: with '#{short.opts}' #{sprintf("%3d", short.duration)} "+
+      "+ 5 not smaller <  #{sprintf("%3d", long.duration)} seconds with '#{long.opts}'.")
+  end
+end
+
+tst.runChecks
diff --git a/kernel/xenomai-v3.2.4/scripts/make-release.sh b/kernel/xenomai-v3.2.4/scripts/make-release.sh
new file mode 100755
index 0000000..67a191d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/make-release.sh
@@ -0,0 +1,35 @@
+#! /bin/bash
+
+usage() {
+	echo "usage: $0 name"
+	exit 1
+}
+
+name=$1
+
+if [ -z "$name" ]; then
+	usage
+fi
+
+if [ ! -f config/version-code ] || [ ! -d .git ]; then
+	echo "Must be run from top-level directory"
+	exit 1
+fi
+
+if [ -n "`git status -s -uno`" ]; then
+	echo "Working directory is dirty!"
+	exit 1
+fi
+
+echo -e "Tag commit\n\n    `git log -1 --oneline`"
+echo -e "\nof branch\n\n    `git branch | sed -n 's/^\* //p'`"
+echo -ne "\nas $name? (y/N) "
+read answer
+if [ "$answer" != "y" ]; then
+	exit 1
+fi
+
+echo $name | sed "s/v//" > config/version-code
+cp  config/version-code config/version-label
+git commit -sv config/version-code config/version-label -m "config: Bump version number"
+git tag -as $name -m "Release $name"
diff --git a/kernel/xenomai-v3.2.4/scripts/prepare-kernel.sh b/kernel/xenomai-v3.2.4/scripts/prepare-kernel.sh
new file mode 100755
index 0000000..64e2484
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/prepare-kernel.sh
@@ -0,0 +1,472 @@
+#! /bin/bash
+set -e
+
+unset CDPATH
+
+# At all time, this variable must be set to either:
+# "y" if the changes to the Linux tree are specific to the kernel version;
+# "n" otherwise.
+patch_kernelversion_specific="n"
+
+# At all time, this variable must be set to either:
+# "y" if the changes to the Linux tree are specific to the architecture;
+# "n" otherwise.
+patch_architecture_specific="n"
+
+# At all time, this variable must be set to either:
+# "y": ignore kernel-version-specific changes;
+# "n": ignore non-kernel-version-specific changes;
+# "b": don't filter according to the kernel version.
+patch_kernelversion_filter="b"
+
+# At all time, this variable must be set to either:
+# "y": ignore architecture-specific changes;
+# "n": ignore non-architecture-specific changes;
+# "b": don't filter according to the architecture.
+patch_architecture_filter="b"
+
+# Default path to kernel tree
+linux_tree=.
+
+patch_copytempfile() {
+    file="$1"
+    if ! test -f "$temp_tree/$file"; then
+        subdir=`dirname "$file"`
+        mkdir -p "$temp_tree/$subdir"
+        cp "$linux_tree/$file" "$temp_tree/$file"
+    fi
+}
+
+check_filter() {
+    if test "$patch_kernelversion_specific" != "$patch_kernelversion_filter" \
+        -a "$patch_architecture_specific" != "$patch_architecture_filter"; then
+        echo ok
+    elif test -e "$temp_tree/$1"; then
+        echo "$me: inconsistent multiple changes to $1 in Linux kernel tree" >&2
+	echo error
+    else
+        echo ignore
+    fi
+}
+
+patch_append() {
+    file="$1"
+    if test "x$output_patch" = "x"; then
+	if test -L "$linux_tree/$file" ; then
+	    mv "$linux_tree/$file" "$linux_tree/$file.orig"
+	    cp "$linux_tree/$file.orig" "$linux_tree/$file"
+	fi
+	chmod +w "$linux_tree/$file"
+        cat >> "$linux_tree/$file"
+    else
+        if test `check_filter $file` = "ok"; then
+            patch_copytempfile "$file"
+            cat >> "$temp_tree/$file"
+        fi
+    fi
+}
+
+patch_ed() {
+    file="$1"
+    if test "x$output_patch" = "x"; then
+        ed -s "$linux_tree/$file" > /dev/null
+    else
+        if test `check_filter $file` = "ok"; then
+            patch_copytempfile "$file"
+            ed -s "$temp_tree/$file" > /dev/null
+        fi
+    fi
+}
+
+patch_link() {
+    recursive="$1"         # "r" or "n"
+    link_file="$2"         # "m", "n" or some file (basename) from $target_dir
+    target_dir="$3"
+    link_dir="$4"
+
+    (
+        if test \! \( x$link_file = xm -o x$link_file = xn \); then
+	   find_clean_opt="-name $link_file"
+	   find_link_opt=$find_clean_opt
+	else
+           link_makefiles_opt=""
+           if test x$link_file = xm; then
+              link_makefiles_opt="-name Makefile -o"
+           fi
+           if test x$recursive = xr; then
+	       recursive_opts="-mindepth 1"
+	       dir_opt="-type d -o"
+           else
+	       recursive_opt="-maxdepth 1"
+	       dir_opt=""
+           fi
+	   find_clean_opt="$recursive_opt \( $dir_opt $link_makefiles_opt -name Kconfig -o -name '*.[chS]' -o -name '*.sh' \)"
+	   find_link_opt="$recursive_opt \( $link_makefiles_opt -name Kconfig -o -name '*.[chS]' -o -name '*.sh' \)"
+	fi
+
+        if test "x$output_patch" = "x" -a -e $linux_tree/$link_dir; then
+            cd $linux_tree/$link_dir &&
+	    eval find . $find_clean_opt |
+	    while read f; do
+                if test -L $f -a ! -e $xenomai_root/$target_dir/$f; then rm -Rf $f; fi
+            done
+        fi
+
+        cd $xenomai_root/$target_dir &&
+        eval find . $find_link_opt |
+        while read f; do
+            f=`echo $f | cut -d/ -f2-`
+            d=`dirname $f`
+            if test "x$output_patch" = "x"; then
+                mkdir -p $linux_tree/$link_dir/$d
+                if test x$forcelink = x1 -o \
+		   ! $xenomai_root/$target_dir/$f -ef $linux_tree/$link_dir/$f;
+		then
+                    ln -sf $xenomai_root/$target_dir/$f $linux_tree/$link_dir/$f
+                fi
+            else
+                if test `check_filter $link_dir/$f` = "ok"; then
+                    mkdir -p $temp_tree/$link_dir/$d
+                    cp $xenomai_root/$target_dir/$f $temp_tree/$link_dir/$f
+                fi
+            fi
+        done
+    )
+}
+
+generate_patch() {
+    (
+    cd "$temp_tree"
+    find . -type f |
+    while read f; do
+        diff -Naurd "$linux_tree/$f" "$f" |
+        sed -e "s,^--- ${linux_tree}/\.\(/.*\)$,--- linux\1," \
+            -e "s,^+++ \.\(/.*\)$,+++ linux-patched\1,"
+    done
+    )
+}
+
+
+usage='usage: prepare-kernel --linux=<linux-tree> [--dovetail=<dovetail-patch>]|[--ipipe=<ipipe-patch>] [--arch=<arch>] [--outpatch=<file> [--filterkvers=y|n] [--filterarch=y|n]] [--forcelink] [--default] [--verbose]'
+me=`basename $0`
+
+while test $# -gt 0; do
+    case "$1" in
+    --linux=*)
+	linux_tree=`echo $1|sed -e 's,^--linux=\\(.*\\)$,\\1,g'`
+	linux_tree=`eval "echo $linux_tree"`
+	;;
+    --adeos=*)
+	pipeline_patch=`echo $1|sed -e 's,^--adeos=\\(.*\\)$,\\1,g'`
+	pipeline_patch=`eval "echo $pipeline_patch"`
+	probe_header=include/linux/ipipe.h
+	arch_probe_header=include/asm/ipipe.h
+	pipeline_type=ipipe
+	;;
+    --ipipe=*)
+	pipeline_patch=`echo $1|sed -e 's,^--ipipe=\\(.*\\)$,\\1,g'`
+	pipeline_patch=`eval "echo $pipeline_patch"`
+	probe_header=include/linux/ipipe.h
+	arch_probe_header=include/asm/ipipe.h
+	pipeline_type=ipipe
+	;;
+    --dovetail=*)
+	pipeline_patch=`echo $1|sed -e 's,^--dovetail=\\(.*\\)$,\\1,g'`
+	pipeline_patch=`eval "echo $pipeline_patch"`
+	probe_header=include/linux/dovetail.h
+	arch_probe_header=include/asm/dovetail.h
+	pipeline_type=dovetail
+	;;
+    --arch=*)
+	linux_arch=`echo $1|sed -e 's,^--arch=\\(.*\\)$,\\1,g'`
+	;;
+    --outpatch=*)
+	output_patch=`echo $1|sed -e 's,^--outpatch=\\(.*\\)$,\\1,g'`
+	;;
+    --filterkvers=*)
+        patch_kernelversion_filter=`echo $1|sed -e 's,^--filterkvers=\\(.*\\)$,\\1,g'`
+        ;;
+    --filterarch=*)
+        patch_architecture_filter=`echo $1|sed -e 's,^--filterarch=\\(.*\\)$,\\1,g'`
+        ;;
+    --forcelink)
+        forcelink=1
+        ;;
+    --default)
+        usedefault=1
+        ;;
+    --verbose)
+	verbose=1
+	;;
+    --help)
+	echo "$usage"
+	exit 0
+	;;
+    *)
+	echo "$me: unknown flag: $1" >&2
+	echo "$usage" >&2
+	exit 1
+	;;
+    esac
+    shift
+done
+
+# Infere the location of the Xenomai source tree from
+# the path of the current script.
+
+script_path=`type -p $0`
+xenomai_root=`dirname $script_path`/..
+xenomai_root=`cd $xenomai_root && pwd`
+
+# Check the Linux tree
+
+default_linux_tree=/lib/modules/`uname -r`/source
+
+while test x$linux_tree = x; do
+   if test x$usedefault = x; then
+      echo -n "Linux tree [default $default_linux_tree]: "
+      read linux_tree
+   fi
+   if test x$linux_tree = x; then
+      linux_tree=$default_linux_tree
+   fi
+   if test \! -x "$linux_tree"; then
+      echo "$me: cannot access Linux tree in $linux_tree"
+      linux_tree=
+      usedefault=
+      default_linux_tree=/usr/src
+   else
+      break
+   fi
+done
+
+linux_tree=`cd $linux_tree && pwd`
+linux_out=$linux_tree
+
+if test \! -r $linux_tree/Makefile; then
+   echo "$me: $linux_tree is not a valid Linux kernel tree" >&2
+   exit 2
+fi
+
+# Create an empty output patch file, and initialize the temporary tree.
+if test "x$output_patch" != "x"; then
+
+    temp_tree=`mktemp -d prepare-kernel-XXX --tmpdir`
+    if [ $? -ne 0 ]; then
+	echo Temporary directory could not be created.
+	exit 1
+    fi
+
+    patchdir=`dirname $output_patch`
+    patchdir=`cd $patchdir && pwd`
+    output_patch=$patchdir/`basename $output_patch`
+    echo > "$output_patch"
+
+fi
+
+# Infer the default architecture if unspecified.
+
+if test x$linux_arch = x; then
+   build_arch=`$xenomai_root/config/config.guess`
+   default_linux_arch=`echo $build_arch|cut -f1 -d-`
+fi
+
+while : ; do
+   if test x$linux_arch = x; then
+      if test x$usedefault = x; then
+         echo -n "Target architecture [default $default_linux_arch]: "
+         read linux_arch
+      fi
+      if test "x$linux_arch" = x; then
+         linux_arch=$default_linux_arch
+      fi
+   fi
+   case "$linux_arch" in
+   x86*|i*86|amd*)
+      linux_arch=x86
+      ;;
+   powerpc*|ppc*)
+      linux_arch=powerpc
+      ;;
+   arm)
+      linux_arch=arm
+      ;;
+   arm64|aarch64)
+      linux_arch=arm64
+      ;;
+   *)
+      echo "$me: unsupported architecture: $linux_arch" >&2
+      linux_arch=
+      usedefault=
+      ;;
+   esac
+   if test \! x$linux_arch = x; then
+      break
+   fi
+done
+
+foo=`grep '^KERNELSRC    := ' $linux_tree/Makefile | cut -d= -f2`
+if [ ! -z $foo ] ; then
+    linux_tree=$foo
+fi
+unset foo
+
+eval linux_`grep '^EXTRAVERSION =' $linux_tree/Makefile | sed -e 's, ,,g'`
+eval linux_`grep '^PATCHLEVEL =' $linux_tree/Makefile | sed -e 's, ,,g'`
+eval linux_`grep '^SUBLEVEL =' $linux_tree/Makefile | sed -e 's, ,,g'`
+eval linux_`grep '^VERSION =' $linux_tree/Makefile | sed -e 's, ,,g'`
+
+linux_version="$linux_VERSION.$linux_PATCHLEVEL.$linux_SUBLEVEL"
+
+if test x$pipeline_type = x; then
+    if test -r $linux_tree/include/linux/ipipe.h; then
+	probe_header=include/linux/ipipe.h
+	arch_probe_header=include/asm/ipipe.h
+	pipeline_type=ipipe
+    elif test -r $linux_tree/include/linux/dovetail.h; then
+	probe_header=include/linux/dovetail.h
+	arch_probe_header=include/asm/dovetail.h
+	pipeline_type=dovetail
+    fi
+fi
+
+if test x$verbose = x1; then
+echo "Preparing kernel $linux_version$linux_EXTRAVERSION in $linux_tree..."
+fi
+
+if test -r $linux_tree/$probe_header; then
+    if test x$verbose = x1; then
+       echo "IRQ pipeline found - bypassing patch."
+    fi
+else
+   if test x$verbose = x1; then
+      echo "$me: no IRQ pipeline support found." >&2
+   fi
+   while test x$pipeline_patch = x; do
+      echo -n "IRQ pipeline patch: "
+      read pipeline_patch
+      if test \! -r "$pipeline_patch" -o x$pipeline_patch = x; then
+         echo "$me: cannot read IRQ pipeline support from $pipeline_patch" >&2
+         pipeline_patch=
+      fi
+   done
+   patchdir=`dirname $pipeline_patch`;
+   patchdir=`cd $patchdir && pwd`
+   pipeline_patch=$patchdir/`basename $pipeline_patch`
+   curdir=$PWD
+   cd $linux_tree && patch --dry-run -p1 -f < $pipeline_patch || {
+        cd $curdir;
+        echo "$me: Unable to patch kernel $linux_version$linux_EXTRAVERSION with `basename $pipeline_patch`." >&2
+        exit 2;
+   }
+   patch -p1 -f -s < $pipeline_patch
+   cd $curdir
+fi
+
+if test \! -r $linux_tree/arch/$linux_arch/$arch_probe_header; then
+   echo "$me: $linux_tree has no IRQ pipeline support for $linux_arch" >&2
+   exit 2
+fi
+
+if test x$verbose = x1; then
+   echo "IRQ pipeline installed."
+fi
+
+patch_kernelversion_specific="y"
+
+case $linux_VERSION.$linux_PATCHLEVEL in
+
+    2.*)
+
+    echo "$me: Unsupported kernel version $linux_VERSION.$linux_PATCHLEVEL.x" >&2
+    exit 2
+    ;;
+
+    *)
+
+    patch_architecture_specific="y"
+
+    if ! grep -q XENOMAI $linux_tree/init/Kconfig; then
+	version_stamp=`cat $xenomai_root/config/version-code`
+	version_major=`expr $version_stamp : '\([[0-9]]*\)' || true`
+	version_minor=`expr $version_stamp : '[[0-9]]*\.\([[0-9]*]*\)' || true`
+	revision_level=`expr $version_stamp : '[[0-9]]*\.[[0-9]*]*\.\([[0-9]*]*\)' || true`
+	if [ -z "$revision_level" ]; then
+	    revision_level=0
+	fi
+	version_string=`cat $xenomai_root/config/version-label`
+	sed -e "s,@VERSION_MAJOR@,$version_major,g" \
+	    -e "s,@VERSION_MINOR@,$version_minor,g" \
+	    -e "s,@REVISION_LEVEL@,$revision_level,g" \
+	    -e "s,@VERSION_STRING@,$version_string,g" \
+	    -e "s,@SRCARCH@,$linux_arch,g" \
+	    $xenomai_root/scripts/Kconfig.frag |
+            patch_append init/Kconfig
+    fi
+
+test "x$CONFIG_XENO_REVISION_LEVEL" = "x" && CONFIG_XENO_REVISION_LEVEL=0
+
+    if ! grep -q CONFIG_XENOMAI $linux_tree/arch/$linux_arch/Makefile; then
+	p="KBUILD_CFLAGS += -I\$(srctree)/arch/\$(SRCARCH)/xenomai/include -I\$(srctree)/arch/\$(SRCARCH)/xenomai/$pipeline_type/include -I\$(srctree)/include/xenomai"
+	(echo; echo $p) | patch_append arch/$linux_arch/Makefile
+	p="core-\$(CONFIG_XENOMAI)	+= arch/$linux_arch/xenomai/$pipeline_type/"
+	echo $p | patch_append arch/$linux_arch/Makefile
+    fi
+
+    patch_architecture_specific="n"
+
+    if ! grep -q CONFIG_XENOMAI $linux_tree/drivers/Makefile; then
+	p="obj-\$(CONFIG_XENOMAI)		+= xenomai/"
+	( echo ; echo $p ) | patch_append drivers/Makefile
+    fi
+
+    if ! grep -q CONFIG_XENOMAI $linux_tree/kernel/Makefile; then
+	p="obj-\$(CONFIG_XENOMAI)		+= xenomai/"
+	( echo ; echo $p ) | patch_append kernel/Makefile
+    fi
+    ;;
+
+esac
+
+# Create local directories then symlink to the source files from
+# there, so that we don't pollute the Xenomai source tree with
+# compilation files.
+
+patch_kernelversion_specific="n"
+patch_architecture_specific="y"
+patch_link r m kernel/cobalt/arch/$linux_arch arch/$linux_arch/xenomai
+patch_link n n kernel/cobalt/include/$pipeline_type arch/$linux_arch/include/$pipeline_type
+patch_architecture_specific="n"
+patch_link n m kernel/cobalt kernel/xenomai
+patch_link n cobalt-core.h kernel/cobalt/trace include/trace/events
+patch_link n cobalt-rtdm.h kernel/cobalt/trace include/trace/events
+patch_link n cobalt-posix.h kernel/cobalt/trace include/trace/events
+patch_link r n kernel/cobalt/include/asm-generic/xenomai include/asm-generic/xenomai
+patch_link r n kernel/cobalt/include/linux/xenomai include/linux/xenomai
+patch_link n m kernel/cobalt/posix kernel/xenomai/posix
+patch_link n m kernel/cobalt/rtdm kernel/xenomai/rtdm
+patch_link n m kernel/cobalt/$pipeline_type kernel/xenomai/pipeline
+patch_link r m kernel/drivers drivers/xenomai
+patch_link n n include/cobalt/kernel include/xenomai/cobalt/kernel
+patch_link r n include/cobalt/kernel/rtdm include/xenomai/rtdm
+patch_link r n include/cobalt/kernel/$pipeline_type/pipeline include/xenomai/pipeline
+patch_link r n include/cobalt/uapi include/xenomai/cobalt/uapi
+patch_link r n include/rtdm/uapi include/xenomai/rtdm/uapi
+patch_link n version.h include/xenomai include/xenomai
+patch_link n stdarg.h kernel/cobalt/include/linux include/xenomai/linux
+
+if test "x$output_patch" != "x"; then
+    if test x$verbose = x1; then
+    echo 'Generating patch.'
+    fi
+    generate_patch > "$output_patch"
+    rm -rf $temp_tree
+fi
+
+if test x$verbose = x1; then
+echo 'Links installed.'
+echo 'Build system ready.'
+fi
+
+exit 0
+
diff --git a/kernel/xenomai-v3.2.4/scripts/wrap-link.sh b/kernel/xenomai-v3.2.4/scripts/wrap-link.sh
new file mode 100755
index 0000000..fa83bd5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/wrap-link.sh
@@ -0,0 +1,213 @@
+#! /bin/sh
+# To build static applications for the Cobalt core, we need to split
+# link-edit into two stages, to avoid wrapping symbols from system
+# libs.
+
+set -e
+
+usage() {
+    cat <<EOF
+$1 [options] command-line
+
+Split command-line in two parts for linking static applications with
+the Xenomai POSIX/Cobalt interface in two stages, so that symbols
+from the system libraries are not wrapped.
+
+Options:
+-q be quiet
+-v be verbose (print each command before running it)
+-n dry run (print all commands but don't run any)
+
+Example:
+$1 -v gcc -o foo foo.o -Wl,@/usr/xenomai/lib/cobalt.wrappers -L/usr/xenomai/lib -lcobalt -lmodechk -lpthread -lrt
+will print and run:
++ gcc -o foo.tmp -Wl,-Ur -nostdlib foo.o -Wl,@/usr/xenomai/lib/cobalt.wrappers -Wl,@/usr/xenomai/lib/modechk.wrappers -L/usr/xenomai/lib
++ gcc -o foo foo.tmp -L/usr/xenomai/lib -lcobalt -lmodechk -lpthread -lrt
++ rm foo.tmp
+EOF
+}
+
+add_2stages() {
+    stage1_args="$stage1_args $@"
+    stage2_args="$stage2_args $@"
+}   
+
+add_linker_flag() {
+    if $next_is_wrapped_symbol; then
+	stage1_args="$stage1_args -Wl,--wrap $@"
+	next_is_wrapped_symbol=false
+    else
+	case "$@" in
+	*--as-needed*)
+		stage2_args="$stage2_args $@"
+		;;
+	*)
+		add_2stages "$@"
+		;;
+	esac
+    fi
+}
+
+add_linker_obj() {
+    if $stage2; then
+	stage2_args="$stage2_args $@"
+    else
+	stage1_args="$stage1_args $@"
+    fi
+}
+
+if test -n "$V" && test $V -gt 0; then
+    verbose=:
+else
+    verbose=false
+fi
+dryrun=
+progname="$0"
+
+if test $# -eq 0; then
+    usage "$progname"
+    exit 0
+fi
+
+while test $# -gt 0; do
+    arg="$1"
+    shift
+    case "$arg" in
+	-v) 
+	    verbose=:
+	    ;;
+
+	-q) 
+	    verbose=false
+	    ;;
+
+	-n) 
+	    dryrun="echo # "
+	    ;;
+
+	-*)
+	    cc="$cc $arg"
+	    ;;
+
+	*gcc*|*g++*)
+	    cc="$cc $arg"
+	    break
+	    ;;
+
+	*ld)
+	    usage "$progname"
+	    /bin/echo -e "\nlinker must be gcc or g++, not ld"
+	    exit 1
+	    ;;
+
+	*)
+	    cc="$cc $arg"
+	    ;;
+    esac
+done
+
+test -z "$dryrun" || verbose=false
+next_is_wrapped_symbol=false
+
+onestage_args="$@"
+stage1_args=""
+stage2_args=""
+stage2=false
+while test $# -gt 0; do
+    arg="$1"
+    shift
+    case "$arg" in
+	-Xlinker)
+	    arg="$1"
+	    shift
+	    case "$arg" in
+		--wrap)
+		    next_is_wrapped_symbol=:
+		    ;;
+		@*.wrappers)
+		    stage1_args="$stage1_args -Xlinker $arg"
+		    ;;
+
+		*) 
+		    add_linker_flag -Xlinker "$arg"
+		    ;;
+	    esac
+	    ;;
+
+	-Wl,--wrap)
+	    next_is_wrapped_symbol=:
+	    ;;
+
+	-Wl,--wrap,main|-Wl,--wrap=main|-Wl,--dynamic-list=*)
+	    # special case so that we can interpose on the main()
+	    # routine. For this we need this wrapping to take place in
+	    # the second stage.
+	    stage2_args="$stage2_args $arg"
+	    ;;
+
+	-Wl,@*.wrappers|-Wl,--wrap,*|-Wl,--wrap=*)
+	    stage1_args="$stage1_args $arg"
+	    ;;
+
+	-Wl,*)
+	    add_linker_flag "$arg"
+	    ;;
+
+	*libcobalt.so*|*libcobalt.a|-lcobalt)
+	    # linker directives might contain this pattern as well, so
+	    # match it later in the test sequence.
+	    stage2_args="$stage2_args $arg"
+	    stage2=:
+	    ;;
+
+	-o)
+	    output="$1"
+	    shift
+	    ;;
+
+	-o*)
+	    output=`expr "$arg" : '-o\(.*\)'`
+	    ;;
+	
+	-l) 
+	    arg="$1"
+	    shift
+	    add_linker_obj -l $arg
+	    ;;
+
+	-l*) #a library
+	    add_linker_obj $arg
+	    ;;
+
+	*.so)
+	    stage2_args="$stage2_args $arg"
+	    ;;
+
+	*.o)
+	    # Force .o to stage1 regardless of its position
+	    stage1_args="$stage1_args $arg"
+	    ;;
+
+	-pie)
+	    stage2_args="$stage2_args $arg"
+	    ;;
+
+	*) 
+	    if test -e "$arg"; then
+		add_linker_obj $arg
+	    else
+		add_2stages "$arg"
+	    fi
+	   ;;
+    esac
+done
+
+if $stage2; then
+    $verbose && set -x
+    $dryrun $cc -o "$output.tmp" -r -nostdlib $stage1_args
+    $dryrun $cc -o "$output" "$output.tmp" $stage2_args
+    $dryrun rm -f $output.tmp
+else
+    $verbose && set -x
+    $dryrun $cc $onestage_args
+fi
diff --git a/kernel/xenomai-v3.2.4/scripts/xeno-config-cobalt.in b/kernel/xenomai-v3.2.4/scripts/xeno-config-cobalt.in
new file mode 100644
index 0000000..9f60e90
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/xeno-config-cobalt.in
@@ -0,0 +1,292 @@
+#! /bin/sh
+
+staging=${DESTDIR}
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+libdir="@libdir@"
+datarootdir="@datarootdir@"
+datadir="@datadir@"
+pkgdatadir="${datadir}/@PACKAGE@"
+includedir="@includedir@"
+core="@CORE@"
+
+XENO_VERSION="@PACKAGE_VERSION@"
+XENO_PREFIX="${staging}${prefix}"
+XENO_CC="@CC@"
+XENO_TARGET_ARCH="@XENO_TARGET_ARCH@"
+XENO_INCLUDE_DIR="${staging}${includedir}"
+XENO_BASE_CFLAGS="-I$XENO_INCLUDE_DIR/cobalt -I$XENO_INCLUDE_DIR @XENO_USER_APP_CFLAGS@ -D__COBALT__"
+XENO_COBALT_LDFLAGS="-L${staging}${libdir} -lcobalt"
+XENO_POSIX_LDFLAGS="-lpthread -lrt @XENO_USER_APP_LDFLAGS@"
+XENO_LIBRARY_DIR="${staging}${libdir}"
+LD_FILE_OPTION="@LD_FILE_OPTION@"
+
+unset skin_list compat codegen
+
+dump_wrappers()
+{
+   file=$1
+   if test \! -r ${XENO_LIBRARY_DIR}/$file; then
+      echo "Xenomai: wrappers cannot be read from ${XENO_LIBRARY_DIR}/${file}" >&2
+      exit 1
+   fi
+   if test "$LD_FILE_OPTION" = "yes"; then
+      echo -n "-Wl,@${XENO_LIBRARY_DIR}/${file}"
+   else
+      wrappers=`while read wrap_option symbol; do	\
+	      echo -n "-Wl,$wrap_option=$symbol " ; \
+      done < ${XENO_LIBRARY_DIR}/${file}`
+      echo -n "$wrappers"
+   fi
+}
+
+usage ()
+{
+cat <<EOF
+Usage xeno-config OPTIONS
+Options :
+        --help
+        --v,--verbose
+        --version
+        --cc
+        --ccld
+        --arch
+        --prefix
+        --[skin=]posix|vxworks|psos|alchemy|rtdm|smokey|cobalt
+        --auto-init|auto-init-solib|no-auto-init
+        --mode-check|no-mode-check
+        --cflags
+        --ldflags
+        --lib*-dir|libdir|user-libdir
+        --core
+        --info
+        --compat
+EOF
+    exit $1
+}
+
+dump_info ()
+{
+    version=${XENO_VERSION}
+    if test -x $XENO_PREFIX/sbin/version; then
+	_version=`$XENO_PREFIX/sbin/version 2>/dev/null`
+	test x"$_version" = x || version="$_version"
+    fi
+    echo "Xenomai version: ${version}"
+    uname -a 2>/dev/null || echo "Cannot determine system information (uname?)"
+    echo "Kernel parameters: `cat /proc/cmdline`"
+    if test -r /proc/ipipe/version; then
+	echo "I-pipe release #`cat /proc/ipipe/version` detected"
+    fi
+    if test -r /proc/xenomai/version; then
+	echo "Cobalt core `cat /proc/xenomai/version` detected"
+    else
+	echo "Cobalt core disabled or not present on this system"
+    fi
+    echo "Compiler: @XENO_BUILD_COMPILER@"
+    eval echo "Build args: @XENO_BUILD_ARGS@"
+    exit 0
+}
+
+verbose ()
+{
+    echo xeno-config --verbose
+    echo "        --core=cobalt"
+    echo "        --version=\"${XENO_VERSION}\""
+    echo "        --cc=\"$XENO_CC\""
+    echo "        --ccld=\"$XENO_PREFIX/bin/wrap-link.sh $XENO_CC\""
+    echo "        --arch=\"$XENO_TARGET_ARCH\""
+    echo "        --prefix=\"$XENO_PREFIX\""
+    echo "        --library-dir=\"$XENO_LIBRARY_DIR\""
+}
+
+if test $# -eq 0; then
+    verbose $*
+    usage 1 1>&2
+fi
+
+do_ldflags=
+do_cflags=
+do_setinit=
+do_autoinit=y
+do_autoinit_solib=
+do_mode_check=y
+
+while test $# -gt 0; do
+    case "$1" in
+        --v|--verbose)
+            verbose $*
+            exit 0
+            ;;
+        --version)
+            echo ${XENO_VERSION}
+            ;;
+        --cc)
+            echo $XENO_CC
+            ;;
+        --ccld)
+            echo $XENO_PREFIX/bin/wrap-link.sh $XENO_CC
+            ;;
+        --arch)
+            echo $XENO_TARGET_ARCH
+            ;;
+        --prefix)
+            echo $XENO_PREFIX
+            ;;
+	--no-auto-init)
+	    do_setinit=y
+	    do_autoinit=
+	    do_autoinit_solib=
+	    ;;
+	--auto-init)
+	    do_setinit=y
+	    do_autoinit=y
+	    do_autoinit_solib=
+	    ;;
+	--auto-init-solib)
+	    if test \! -r ${XENO_LIBRARY_DIR}/xenomai/bootstrap-pic.o; then
+		echo "--auto-init-solib requires shared library support" 1>&2
+		exit 1
+	    fi
+	    do_setinit=y
+	    do_autoinit=y
+	    do_autoinit_solib=y
+	    ;;
+        --lib*-dir|--libdir|--user-libdir)
+            echo $XENO_LIBRARY_DIR
+            ;;
+	--skin)
+	    if [ "$2" = native ]; then
+		skin_list="$skin_list alchemy"
+		compat=y
+	    else
+		skin_list="$skin_list $2"
+	    fi
+	    shift
+	    ;;
+	--native|--skin=*)
+	    if [ "$1" = "--skin=native" -o "$1" = "--native" ]; then
+		skin_list="$skin_list alchemy"
+		compat=y
+            else
+		skin_list="$skin_list `expr "$1" : '--skin=\(.*\)'`"
+	    fi
+	    ;;
+	--posix|--cobalt|--vxworks|--psos|--alchemy|--rtdm|--smokey)
+	    skin_list="$skin_list `expr "$1" : '--\(.*\)'`"
+	    ;;
+	--cflags)
+	    do_cflags=y
+	    ;;
+	--ldflags)
+	    do_ldflags=y
+	    ;;
+	--core)
+	    echo cobalt
+	    ;;
+        --help)
+            usage 0 1>&2
+            ;;
+        --info)
+            dump_info
+            ;;
+	--compat)
+	    compat=y
+            ;;
+	--mode-check)
+	    do_mode_check=y
+	    ;;
+	--no-mode-check)
+	    do_mode_check=
+	    ;;
+        *)
+         usage 1 1>&2
+         ;;
+    esac
+    shift
+done
+
+if test x$do_setinit = xy -a x$do_ldflags = x; then
+	echo "--[no-]auto-init is meaningful only with --ldflags" 1>&2
+	exit 1
+fi
+
+if test x$do_cflags = xy; then
+    if test -z "$skin_list"; then
+	echo "no API specified, missing --skin before --cflags" 1>&2
+	exit 1
+    fi
+    cflags="$XENO_BASE_CFLAGS"
+    test x$compat = xy && cflags="-I$XENO_INCLUDE_DIR/trank -D__XENO_COMPAT__ $cflags"
+    for skin in $skin_list; do
+	case "$skin" in
+	    posix|rtdm)
+		test x$compat = xy && cflags="-I$XENO_INCLUDE_DIR/trank/posix $cflags"
+		cflags="$cflags -D__COBALT_WRAP__"
+		;;
+	    cobalt)
+		;;
+	    vxworks|psos|alchemy|smokey)
+		cflags="$cflags -I$XENO_INCLUDE_DIR/$skin"
+		;;
+	    *)
+		echo "$skin is not a user-space API" 1>&2
+		exit 1
+		;;
+	esac
+    done
+    echo $cflags
+fi
+
+if test x$do_ldflags = xy; then
+    if test -z "$skin_list"; then
+	echo "no API specified, missing --skin before --ldflags" 1>&2
+	exit 1
+    fi
+    lmodeck=
+    ldflags=
+    if test x$do_mode_check = xy; then
+	ldflags="`dump_wrappers modechk.wrappers`"
+	lmodeck=-lmodechk
+    fi
+    test x$compat = xy && ldflags="-ltrank $ldflags"
+    copperplate=
+    for skin in $skin_list; do
+	case "$skin" in
+	    posix|rtdm)
+		ldflags="`dump_wrappers cobalt.wrappers` $ldflags"
+		;;
+	    cobalt)
+		# do NOT wrap POSIX symbols in application code with
+		# --cobalt. On the contrary, --posix does. This switch
+		# does not affect mode checking wrappers,
+		# --[no-]mode-check does.
+		;;
+	    vxworks|psos|alchemy|smokey)
+		copperplate="-lcopperplate$core"
+		ldflags="$ldflags -l$skin$core"
+		if [ -r ${XENO_LIBRARY_DIR}/${skin}.wrappers ]; then
+		    ldflags=" `dump_wrappers ${skin}.wrappers` $ldflags"
+		fi
+		;;
+	    *)
+		echo "unknown API: $skin" 1>&2
+		exit 1
+		;;
+	esac
+    done
+    wrap_main=
+    if test x$do_autoinit = xy; then
+	if test x$do_autoinit_solib = xy; then
+	    codegen=-pic
+	else
+	    wrap_main="-Wl,--wrap=main -Wl,--dynamic-list=${XENO_LIBRARY_DIR}/dynlist.ld"
+	fi
+	bootstrap="${XENO_LIBRARY_DIR}/xenomai/bootstrap${codegen}.o"
+    fi
+    ldflags="-Wl,--no-as-needed $ldflags"
+    echo "$ldflags $copperplate $bootstrap $wrap_main $XENO_COBALT_LDFLAGS" \
+		"$lmodeck $XENO_POSIX_LDFLAGS"
+fi
+
+exit 0
diff --git a/kernel/xenomai-v3.2.4/scripts/xeno-config-mercury.in b/kernel/xenomai-v3.2.4/scripts/xeno-config-mercury.in
new file mode 100644
index 0000000..42b0de2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/xeno-config-mercury.in
@@ -0,0 +1,221 @@
+#! /bin/sh
+
+staging=${DESTDIR}
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+libdir="@libdir@"
+datarootdir="@datarootdir@"
+datadir="@datadir@"
+pkgdatadir="${datadir}/@PACKAGE@"
+includedir="@includedir@"
+core="@CORE@"
+
+XENO_VERSION="@PACKAGE_VERSION@"
+XENO_PREFIX="${staging}${prefix}"
+XENO_CC="@CC@"
+XENO_TARGET_ARCH="@XENO_BUILD_STRING@"
+XENO_INCLUDE_DIR="${staging}${includedir}"
+XENO_BASE_CFLAGS="-I$XENO_INCLUDE_DIR/mercury -I$XENO_INCLUDE_DIR @XENO_USER_APP_CFLAGS@ -D__MERCURY__"
+XENO_BASE_LDFLAGS="-L${staging}${libdir} -lmercury -lpthread -lrt @XENO_USER_APP_LDFLAGS@"
+XENO_LIBRARY_DIR="${staging}${libdir}"
+
+unset skin_list codegen
+
+usage ()
+{
+cat <<EOF
+Usage xeno-config OPTIONS
+Options :
+        --help
+        --v,--verbose
+        --version
+        --cc
+        --ccld
+        --arch
+        --prefix
+        --[skin=]posix|vxworks|psos|alchemy|rtdm|smokey
+        --auto-init|auto-init-solib|no-auto-init
+        --cflags
+        --ldflags
+        --lib*-dir,--libdir,--user-libdir
+        --core
+        --info
+EOF
+    exit $1
+}
+
+verbose ()
+{
+    echo xeno-config --verbose
+    echo "        --core=mercury"
+    echo "        --version=\"${XENO_VERSION}\""
+    echo "        --cc=\"$XENO_CC\""
+    echo "        --ccld=\"$XENO_CC\""
+    echo "        --arch=\"$XENO_TARGET_ARCH\""
+    echo "        --prefix=\"$XENO_PREFIX\""
+    echo "        --library-dir=\"$XENO_LIBRARY_DIR\""
+    exit 0
+}
+
+dump_info ()
+{
+    version=${XENO_VERSION}
+    if test -x $XENO_PREFIX/sbin/version; then
+	_version=`$XENO_PREFIX/sbin/version 2>/dev/null`
+	test x"$_version" = x || version="$_version"
+    fi
+    echo "Xenomai version: ${version}"
+    uname -a 2>/dev/null || echo "Cannot determine system information (uname?)"
+    echo "Kernel parameters: `cat /proc/cmdline`"
+    echo "Compiler: @XENO_BUILD_COMPILER@"
+    eval echo "Build args: @XENO_BUILD_ARGS@"
+}
+
+if test $# -eq 0; then
+    verbose $*
+    usage 1 1>&2
+fi
+
+do_ldflags=
+do_cflags=
+do_setinit=
+do_autoinit=y
+do_autoinit_solib=
+
+while test $# -gt 0; do
+    case "$1" in
+        --v|--verbose)
+            verbose $*
+            exit 0
+            ;;
+        --version)
+            echo ${XENO_VERSION}
+            ;;
+        --cc|--ccld)
+            echo $XENO_CC
+            ;;
+        --arch)
+            echo $XENO_TARGET_ARCH
+            ;;
+        --prefix)
+            echo $XENO_PREFIX
+            ;;
+	--no-auto-init)
+	    do_setinit=y
+	    do_autoinit=
+	    do_autoinit_solib=
+	    ;;
+	--auto-init)
+	    do_setinit=y
+	    do_autoinit=y
+	    do_autoinit_solib=
+	    ;;
+	--auto-init-solib)
+	    if test \! -r ${XENO_LIBRARY_DIR}/xenomai/bootstrap-pic.o; then
+		echo "--auto-init-solib requires shared library support" 1>&2
+		exit 1
+	    fi
+	    do_setinit=y
+	    do_autoinit=y
+	    do_autoinit_solib=y
+	    ;;
+        --lib*-dir|--libdir|--user-libdir)
+            echo $XENO_LIBRARY_DIR
+            ;;
+	--skin)
+	    if [ "$2" = native ]; then
+		skin_list="$skin_list alchemy"
+	    else
+		skin_list="$skin_list $2"
+	    fi
+	    shift
+	    ;;
+	--skin=*)
+	    if [ "$1" = "--skin=native" -o "$1" = "--native" ]; then
+		skin_list="$skin_list alchemy"
+            else
+		skin_list="$skin_list `expr "$1" : '--skin=\(.*\)'`"
+	    fi
+	    ;;
+	--posix|--vxworks|--psos|--alchemy|--rtdm|--smokey)
+	    skin_list="$skin_list `expr "$1" : '--\(.*\)'`"
+	    ;;
+	--cflags)
+	    do_cflags=y
+	    ;;
+	--ldflags)
+	    do_ldflags=y
+	    ;;
+	--core)
+	    echo mercury
+	    ;;
+        --help)
+            usage 0 1>&2
+            ;;
+        --info)
+            dump_info
+            ;;
+        *)
+	 echo "invalid option: $1" 1>&2
+         usage 1 1>&2
+         ;;
+    esac
+    shift
+done
+
+if test x$do_setinit = xy -a x$do_ldflags = x; then
+	echo "--[no-]auto-init is meaningful only with --ldflags" 1>&2
+	exit 1
+fi
+
+if test x$do_cflags = xy; then
+    cflags="$XENO_BASE_CFLAGS"
+    test -z "$skin_list" && skin_list=posix
+    for skin in $skin_list; do
+	case "$skin" in
+	    vxworks|psos|alchemy|smokey)
+		cflags="$cflags -I$XENO_INCLUDE_DIR/$skin"
+		;;
+	    posix|rtdm)
+		;;
+	    *)
+		echo "$skin is not a user-space API" 1>&2
+		exit 1
+		;;
+	esac
+    done
+    echo $cflags
+fi
+
+if test x$do_ldflags = xy; then
+    test -z "$skin_list" && skin_list=posix
+    ldflags=
+    copperplate=
+    for skin in $skin_list; do
+	case "$skin" in
+	    posix|rtdm)
+		;;
+	    vxworks|psos|alchemy|smokey)
+		copperplate="-lcopperplate$core"
+		ldflags="-l$skin$core $ldflags"
+		;;
+	    *)
+		echo "unknown API: $skin" 1>&2
+		exit 1
+		;;
+	esac
+    done
+    wrap_main=
+    if test x$do_autoinit = xy; then
+	if test x$do_autoinit_solib = xy; then
+	    codegen=-pic
+	else
+	    wrap_main="-Wl,--wrap=main -Wl,--dynamic-list=${XENO_LIBRARY_DIR}/dynlist.ld"
+	fi
+	bootstrap="${XENO_LIBRARY_DIR}/xenomai/bootstrap${codegen}.o"
+    fi
+    ldflags="-Wl,--no-as-needed $ldflags"
+    echo "$ldflags $copperplate $bootstrap $wrap_main $XENO_BASE_LDFLAGS"
+fi
+
+exit 0
diff --git a/kernel/xenomai-v3.2.4/scripts/xeno.in b/kernel/xenomai-v3.2.4/scripts/xeno.in
new file mode 100644
index 0000000..fea227f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/scripts/xeno.in
@@ -0,0 +1,17 @@
+#!/bin/sh
+#
+# Wrapper script to hide distribution-specific installation
+# prefixes when running standard Xenomai commands.
+#
+# e.g. "xeno latency"
+#
+
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+BINDIR="@bindir@"
+TESTDIR="@XENO_TEST_DIR@"
+unset prefix exec_prefix
+
+test -x $BINDIR/$1 && exec $BINDIR/$@
+test -x $TESTDIR/$1 && exec $TESTDIR/$@
+echo "$1: not found/executable"
diff --git a/kernel/xenomai-v3.2.4/testsuite/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/Makefile.am
new file mode 100644
index 0000000..4932f6d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/Makefile.am
@@ -0,0 +1,21 @@
+
+SUBDIRS = latency smokey gpiobench
+
+if XENO_COBALT
+SUBDIRS += 		\
+	clocktest	\
+	gpiotest	\
+	spitest		\
+	switchtest	\
+	xeno-test
+endif
+
+DIST_SUBDIRS =		\
+	clocktest	\
+	gpiotest	\
+	gpiobench   \
+	latency		\
+	smokey		\
+	spitest		\
+	switchtest	\
+	xeno-test
diff --git a/kernel/xenomai-v3.2.4/testsuite/clocktest/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/clocktest/Makefile.am
new file mode 100644
index 0000000..424f59d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/clocktest/Makefile.am
@@ -0,0 +1,18 @@
+testdir = @XENO_TEST_DIR@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+test_PROGRAMS = clocktest
+
+clocktest_SOURCES = clocktest.c
+
+clocktest_CPPFLAGS =					\
+	$(XENO_USER_CFLAGS)				\
+	-I$(top_srcdir)/include
+
+clocktest_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+clocktest_LDADD =			\
+	 @XENO_CORE_LDADD@		\
+	 @XENO_USER_LDADD@		\
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/testsuite/clocktest/clocktest.c b/kernel/xenomai-v3.2.4/testsuite/clocktest/clocktest.c
new file mode 100644
index 0000000..c524f42
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/clocktest/clocktest.c
@@ -0,0 +1,395 @@
+/*
+ * Copyright (C) 2007 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <errno.h>
+#include <pthread.h>
+#include <sched.h>
+#include <signal.h>
+#include <stdio.h>
+#include <error.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <ctype.h>
+#include <string.h>
+#include <time.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <sys/time.h>
+#include <boilerplate/ancillaries.h>
+#include <boilerplate/atomic.h>
+#include <cobalt/uapi/kernel/vdso.h>
+#include <xeno_config.h>
+
+extern struct xnvdso *cobalt_vdso;
+
+/*
+ * We can't really trust POSIX headers to check for features, since
+ * some archs may not implement all of the declared uClibc POSIX
+ * features (e.g. NIOS2).
+ */
+#ifdef HAVE_PTHREAD_SPIN_LOCK
+pthread_spinlock_t lock;
+#define init_lock(lock)				pthread_spin_init(lock, 0)
+#define acquire_lock(lock)			pthread_spin_lock(lock)
+#define release_lock(lock)			pthread_spin_unlock(lock)
+#else
+pthread_mutex_t lock;
+#define init_lock(lock)				pthread_mutex_init(lock, NULL)
+#define acquire_lock(lock)			pthread_mutex_lock(lock)
+#define release_lock(lock)			pthread_mutex_unlock(lock)
+#endif
+
+static uint64_t last_common = 0;
+static clockid_t clock_id = CLOCK_REALTIME;
+static cpu_set_t cpu_realtime_set, cpu_online_set;
+
+struct per_cpu_data {
+	uint64_t first_tod, first_clock;
+	int first_round;
+	int64_t offset;
+	double drift;
+	unsigned long warps;
+	uint64_t max_warp;
+	pthread_t thread;
+} *per_cpu_data;
+
+static void show_hostrt_diagnostics(void)
+{
+	if (!xnvdso_test_feature(cobalt_vdso, XNVDSO_FEAT_HOST_REALTIME)) {
+		printf("XNVDSO_FEAT_HOST_REALTIME not available\n");
+		return;
+	}
+
+	if (cobalt_vdso->hostrt_data.live)
+		printf("hostrt data area is live\n");
+	else {
+		printf("hostrt data area is not live\n");
+		return;
+	}
+
+	printf("sequence counter : %u\n",
+	       cobalt_vdso->hostrt_data.lock.sequence);
+	printf("wall_time_sec    : %lld\n",
+	       (unsigned long long)cobalt_vdso->hostrt_data.wall_sec);
+	printf("wall_time_nsec   : %u\n", cobalt_vdso->hostrt_data.wall_nsec);
+	printf("wall_to_monotonic_sec    : %lld\n",
+	       (unsigned long long)cobalt_vdso->hostrt_data.wtom_sec);
+	printf("wall_to_monotonic_nsec   : %u\n", cobalt_vdso->hostrt_data.wtom_nsec);
+	printf("cycle_last       : %Lu\n", (long long)cobalt_vdso->hostrt_data.cycle_last);
+	printf("mask             : 0x%Lx\n", (long long)cobalt_vdso->hostrt_data.mask);
+	printf("mult             : %u\n", cobalt_vdso->hostrt_data.mult);
+	printf("shift            : %u\n\n", cobalt_vdso->hostrt_data.shift);
+}
+
+static void show_realtime_offset(void)
+{
+	if (!xnvdso_test_feature(cobalt_vdso, XNVDSO_FEAT_HOST_REALTIME)) {
+		printf("XNVDSO_FEAT_WALLCLOCK_OFFSET not available\n");
+		return;
+	}
+
+	printf("Wallclock offset : %llu\n", (long long)cobalt_vdso->wallclock_offset);
+}
+
+static inline uint64_t read_clock(clockid_t clock_id)
+{
+	struct timespec ts;
+	int res;
+
+	res = clock_gettime(clock_id, &ts);
+	if (res != 0) {
+		fprintf(stderr, "clock_gettime failed for clock id %d\n",
+			clock_id);
+		if (clock_id == CLOCK_HOST_REALTIME)
+			show_hostrt_diagnostics();
+		else if (clock_id == CLOCK_REALTIME)
+			show_realtime_offset();
+
+		exit(-1);
+	}
+	return ts.tv_nsec + ts.tv_sec * 1000000000ULL;
+}
+
+static inline uint64_t read_reference_clock(void)
+{
+	struct timeval tv;
+
+	/*
+	 * Make sure we do not pick the vsyscall variant. It won't
+	 * switch us into secondary mode and can easily deadlock.
+	 */
+	syscall(SYS_gettimeofday, &tv, NULL);
+	return tv.tv_usec * 1000ULL + tv.tv_sec * 1000000000ULL;
+}
+
+static void check_reference(struct per_cpu_data *per_cpu_data)
+{
+	uint64_t clock_val[10], tod_val[10];
+	int64_t delta, min_delta;
+	int i, idx;
+
+	for (i = 0; i < 10; i++) {
+		tod_val[i] = read_reference_clock();
+		clock_val[i] = read_clock(clock_id);
+	}
+
+	min_delta = tod_val[1] - tod_val[0];
+	idx = 1;
+
+	for (i = 2; i < 10; i++) {
+		delta = tod_val[i] - tod_val[i-1];
+		if (delta < min_delta) {
+			min_delta = delta;
+			idx = i;
+		}
+	}
+
+	if (per_cpu_data->first_round) {
+		per_cpu_data->first_round = 0;
+
+		per_cpu_data->first_tod = tod_val[idx];
+		per_cpu_data->first_clock = clock_val[idx];
+	} else
+		per_cpu_data->drift =
+			(clock_val[idx] - per_cpu_data->first_clock) /
+			(double)(tod_val[idx] - per_cpu_data->first_tod) - 1;
+
+	per_cpu_data->offset = clock_val[idx] - tod_val[idx];
+}
+
+static void check_time_warps(struct per_cpu_data *per_cpu_data)
+{
+	int i;
+	uint64_t last, now;
+	int64_t incr;
+
+	for (i = 0; i < 100; i++) {
+		acquire_lock(&lock);
+		now = read_clock(clock_id);
+		last = last_common;
+		last_common = now;
+		release_lock(&lock);
+
+		incr = now - last;
+		if (incr < 0) {
+			acquire_lock(&lock);
+			per_cpu_data->warps++;
+			if (-incr > per_cpu_data->max_warp)
+				per_cpu_data->max_warp = -incr;
+			release_lock(&lock);
+		}
+	}
+}
+
+static void *cpu_thread(void *arg)
+{
+	int cpuid = (long)arg;
+	struct sched_param param = { .sched_priority = 1 };
+	struct timespec delay = { 0, 0 };
+	cpu_set_t cpu_set;
+
+	srandom(read_reference_clock());
+
+	CPU_ZERO(&cpu_set);
+	CPU_SET(cpuid, &cpu_set);
+	sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+	pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
+
+	while (1) {
+		check_reference(&per_cpu_data[cpuid]);
+
+		check_time_warps(&per_cpu_data[cpuid]);
+
+		delay.tv_nsec = 1000000 + random() * (100000.0 / RAND_MAX);
+		nanosleep(&delay, NULL);
+	}
+
+	return NULL;
+}
+
+static void sighand(int signal)
+{
+	exit(0);
+}
+
+static clockid_t resolve_clock_name(const char *name,
+				    const char **real_name, int ext)
+{
+	char *path, buf[BUFSIZ];
+	clockid_t clock_id;
+	FILE *fp;
+	int id;
+
+	*real_name = name;
+
+	if (!ext) {
+		if (isdigit(*name)) {
+			clock_id = (clockid_t)atoi(name);
+			switch (clock_id) {
+			case CLOCK_REALTIME:
+				*real_name = "CLOCK_REALTIME";
+				break;
+			case CLOCK_MONOTONIC:
+				*real_name = "CLOCK_MONOTONIC";
+				break;
+			case CLOCK_MONOTONIC_RAW:
+				*real_name = "CLOCK_MONOTONIC_RAW";
+				break;
+			case CLOCK_HOST_REALTIME:
+				*real_name = "CLOCK_HOST_REALTIME";
+				break;
+			default:
+				error(1, EINVAL, "bad built-in clock id '%d'",
+				      clock_id);
+			}
+			return clock_id;
+		}
+		if (strcmp(name, "CLOCK_REALTIME") == 0)
+			return CLOCK_REALTIME;
+		if (strcmp(name, "CLOCK_MONOTONIC") == 0)
+			return CLOCK_MONOTONIC;
+		if (strcmp(name, "CLOCK_MONOTONIC_RAW") == 0)
+			return CLOCK_MONOTONIC_RAW;
+		if (strcmp(name, "CLOCK_HOST_REALTIME") == 0)
+			return CLOCK_HOST_REALTIME;
+		if (strcmp(name, "coreclk") == 0) {
+			/* Read the core clock as CLOCK_MONOTONIC_RAW */
+			*real_name = "CLOCK_MONOTONIC_RAW";
+			return CLOCK_MONOTONIC_RAW;
+		}
+	}
+
+	if (!isdigit(*name)) {
+		if (asprintf(&path, "/proc/xenomai/clock/%s", name) < 0)
+			error(1, ENOMEM, "resolve_clock_name");
+		fp = fopen(path, "r");
+		if (fp == NULL)
+			error(1, EINVAL, "bad extension clock name '%s'", name);
+		if (fgets(buf, sizeof(buf), fp) == NULL)
+			error(1, EIO, "cannot read %s", path);
+		free(path);
+		if (sscanf(buf, "%*[ ]id: %d\n", &id) != 1)
+			error(1, EINVAL, "bad extension clock name '%s'", name);
+		return __COBALT_CLOCK_EXT(id);
+	}
+
+	id = atoi(name);
+	if (!__COBALT_CLOCK_EXT_P(id) ||
+	    __COBALT_CLOCK_EXT_INDEX(id) >= COBALT_MAX_EXTCLOCKS)
+		error(1, EINVAL, "bad extension clock id '%d'", id);
+		
+	*real_name = "CLOCK_UNKNOWN";
+
+	return __COBALT_CLOCK_EXT(id);
+}
+
+int main(int argc, char *argv[])
+{
+	const char *clock_name = NULL, *real_clock_name = "CLOCK_REALTIME";
+	int max_cpu, cpus;
+	int i;
+	int c;
+	int d = 0;
+	int ext = 0;
+
+	while ((c = getopt(argc, argv, "C:ET:D")) != EOF)
+		switch (c) {
+		case 'C':
+			clock_name = optarg;
+			break;
+
+		case 'E':
+			ext = 1;
+			break;
+
+		case 'T':
+			alarm(atoi(optarg));
+			break;
+
+		case 'D':
+			d = 1;
+			break;
+
+		default:
+			fprintf(stderr, "usage: clocktest [options]\n"
+				"  [-C <clock_id|clock_name>]   # tested clock, defaults to CLOCK_REALTIME\n"
+				"  [-E]                         # -C specifies extension clock\n"
+				"  [-T <test_duration_seconds>] # default=0, so ^C to end\n"
+				"  [-D]                         # print extra diagnostics for CLOCK_HOST_REALTIME\n");
+			exit(2);
+		}
+
+	if (clock_name)
+		clock_id = resolve_clock_name(clock_name, &real_clock_name, ext);
+
+	signal(SIGALRM, sighand);
+
+	init_lock(&lock);
+
+	if (d && clock_id == CLOCK_HOST_REALTIME)
+		show_hostrt_diagnostics();
+
+	if (get_realtime_cpu_set(&cpu_realtime_set) != 0)
+		error(1, ENOSYS, "get_realtime_cpu_set");
+
+	if (get_online_cpu_set(&cpu_online_set) != 0)
+		error(1, ENOSYS, "get_online_cpu_set");
+
+	CPU_AND(&cpu_realtime_set, &cpu_realtime_set, &cpu_online_set);
+
+	max_cpu = 0;
+	cpus = 0;
+	for (i = 0; i < CPU_SETSIZE; i++) {
+		if (!CPU_ISSET(i, &cpu_realtime_set))
+			continue;
+		cpus++;
+		if (i > max_cpu)
+			max_cpu = i;
+	}
+
+	per_cpu_data = malloc(sizeof(*per_cpu_data) * (max_cpu + 1));
+	if (per_cpu_data == NULL)
+		error(1, ENOMEM, "malloc");
+
+	memset(per_cpu_data, 0, sizeof(*per_cpu_data) * (max_cpu + 1));
+
+	for (i = 0; i <= max_cpu; i++) {
+		if (!CPU_ISSET(i, &cpu_realtime_set))
+			continue;
+		per_cpu_data[i].first_round = 1;
+		pthread_create(&per_cpu_data[i].thread, NULL, cpu_thread,
+			       (void *)(long)i);
+	}
+
+	printf("== Testing %s %s (%d)\n",
+	       ext ? "extension" : "built-in", real_clock_name, clock_id);
+	printf("CPU      ToD offset [us] ToD drift [us/s]      warps max delta [us]\n"
+	       "--- -------------------- ---------------- ---------- --------------\n");
+
+	while (1) {
+		for (i = 0; i <= max_cpu; i++)
+			if (CPU_ISSET(i, &cpu_realtime_set))
+				printf("%3d %20.1f %16.3f %10lu %14.1f\n",
+				       i,
+				       per_cpu_data[i].offset/1000.0,
+				       per_cpu_data[i].drift * 1000000.0,
+				       per_cpu_data[i].warps,
+				       per_cpu_data[i].max_warp/1000.0);
+		usleep(250000);
+		printf("\033[%dA", cpus);
+	}
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/gpiobench/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/gpiobench/Makefile.am
new file mode 100644
index 0000000..cca3395
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/gpiobench/Makefile.am
@@ -0,0 +1,18 @@
+testdir = @XENO_TEST_DIR@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+test_PROGRAMS = gpiobench
+
+gpiobench_SOURCES = gpiobench.c
+
+gpiobench_CPPFLAGS = 		\
+	$(XENO_USER_CFLAGS)	\
+	-I$(top_srcdir)/include
+
+gpiobench_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+gpiobench_LDADD =			\
+	@XENO_CORE_LDADD@	\
+	@XENO_USER_LDADD@	\
+	-lpthread -lrt -lm
diff --git a/kernel/xenomai-v3.2.4/testsuite/gpiobench/gpiobench.c b/kernel/xenomai-v3.2.4/testsuite/gpiobench/gpiobench.c
new file mode 100644
index 0000000..04c6946
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/gpiobench/gpiobench.c
@@ -0,0 +1,680 @@
+/*
+ * Copyright (C) 2020 Song Chen <chensong@tj.kylinos.cn>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdlib.h>
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <error.h>
+#include <signal.h>
+#include <sched.h>
+#include <time.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <sys/timerfd.h>
+#include <xeno_config.h>
+#include <rtdm/testing.h>
+#include <rtdm/gpio.h>
+#include <boilerplate/trace.h>
+#include <xenomai/init.h>
+#include <sys/mman.h>
+#include <getopt.h>
+
+#define NS_PER_MS (1000000)
+#define NS_PER_S (1000000000)
+
+#define DEFAULT_PRIO 99
+#define VERSION_STRING "0.1"
+#define GPIO_HIGH 1
+#define GPIO_LOW  0
+#define MAX_HIST		100
+#define MAX_CYCLES 1000000
+#define DEFAULT_LIMIT 1000
+#define DEV_PATH    "/dev/rtdm/"
+#define TRACING_ON  "/sys/kernel/debug/tracing/tracing_on"
+#define TRACING_EVENTS  "/sys/kernel/debug/tracing/events/enable"
+#define TRACE_MARKER  "/sys/kernel/debug/tracing/trace_marker"
+#define ON  "1"
+#define OFF "0"
+
+enum {
+	MODE_LOOPBACK,
+	MODE_REACT,
+	MODE_ALL
+};
+
+/* Struct for statistics */
+struct test_stat {
+	long inner_min;
+	long inner_max;
+	double inner_avg;
+	long *inner_hist_array;
+	long inner_hist_overflow;
+	long outer_min;
+	long outer_max;
+	double outer_avg;
+	long *outer_hist_array;
+	long outer_hist_overflow;
+};
+
+/* Struct for information */
+struct test_info {
+	unsigned long max_cycles;
+	unsigned long total_cycles;
+	unsigned long max_histogram;
+	int mode;
+	int clockid;
+	int prio;
+	int quiet;
+	int tracelimit;
+	int fd_dev_intr;
+	int fd_dev_out;
+	char pin_controller[32];
+	pthread_t gpio_task;
+	int gpio_intr;
+	int gpio_out;
+	struct test_stat ts;
+};
+
+struct test_info ti;
+/* Print usage information */
+static void display_help(void)
+{
+	printf("gpiobench V %s\n", VERSION_STRING);
+	printf("Usage:\n"
+	       "gpiobench <options>\n\n"
+
+	       "-b       --breaktrace=USEC  send break trace command when latency > USEC\n"
+	       "                            default=1000\n"
+	       "-h       --histogram=US     dump a latency histogram to stdout after the run\n"
+	       "                            US is the max time to be tracked in microseconds,\n"
+	       "                            default=100\n"
+	       "-l       --loops            number of loops, default=1000\n"
+	       "-p       --prio             priority of highest prio thread, defaults=99\n"
+	       "-q       --quiet            print only a summary on exit\n"
+	       "-o       --output           gpio port number for output, no default value,\n"
+	       "                            must be specified\n"
+	       "-i       --intr             gpio port number as an interrupt, no default value,\n"
+	       "                            must be specified\n"
+	       "-c       --pinctrl          gpio pin controller's name, no default value,\n"
+	       "                            must be specified\n"
+	       "-m       --testmode         0 is loopback mode\n"
+	       "                            1 is react mode which works with a latency box,\n"
+	       "                            default=0\n"
+	       "-k       --clockid          0 is CLOCK_REALTIME\n"
+	       "                            1 is CLOCK_MONOTONIC,\n"
+	       "                            default=1\n\n"
+
+	       "e.g.     gpiobench -o 20 -i 21 -c pinctrl-bcm2835\n"
+		);
+}
+
+static void process_options(int argc, char *argv[])
+{
+	int c = 0;
+	static const char optstring[] = "h:p:m:l:c:b:i:o:k:q";
+
+	struct option long_options[] = {
+		{ "bracetrace", required_argument, 0, 'b'},
+		{ "histogram", required_argument, 0, 'h'},
+		{ "loops", required_argument, 0, 'l'},
+		{ "prio", required_argument, 0, 'p'},
+		{ "quiet", no_argument, 0, 'q'},
+		{ "output", required_argument, 0, 'o'},
+		{ "intr", required_argument, 0, 'i'},
+		{ "pinctrl", required_argument, 0, 'c'},
+		{ "testmode", required_argument, 0, 'm'},
+		{ "clockid", required_argument, 0, 'k'},
+		{ 0, 0, 0, 0},
+	};
+
+	while ((c = getopt_long(argc, argv, optstring, long_options,
+						NULL)) != -1) {
+		switch (c) {
+		case 'h':
+			ti.max_histogram = atoi(optarg);
+			break;
+
+		case 'p':
+			ti.prio = atoi(optarg);
+			break;
+
+		case 'l':
+			ti.max_cycles = atoi(optarg);
+			break;
+
+		case 'q':
+			ti.quiet = 1;
+			break;
+
+		case 'b':
+			ti.tracelimit = atoi(optarg);
+			break;
+
+		case 'i':
+			ti.gpio_intr = atoi(optarg);
+			break;
+
+		case 'o':
+			ti.gpio_out = atoi(optarg);
+			break;
+
+		case 'c':
+			strcpy(ti.pin_controller, optarg);
+			break;
+
+		case 'm':
+			ti.mode = atoi(optarg) >=
+				MODE_REACT ? MODE_REACT : MODE_LOOPBACK;
+			break;
+
+		case 'k':
+			ti.clockid = atoi(optarg) >= CLOCK_MONOTONIC ?
+				CLOCK_MONOTONIC : CLOCK_REALTIME;
+			break;
+
+		default:
+			display_help();
+			exit(2);
+		}
+	}
+
+	if ((ti.gpio_out == -1) || (ti.gpio_intr == -1)
+				|| (strlen(ti.pin_controller) == 0)) {
+		display_help();
+		exit(2);
+	}
+
+	ti.prio = ti.prio > DEFAULT_PRIO ? DEFAULT_PRIO : ti.prio;
+	ti.max_cycles = ti.max_cycles > MAX_CYCLES ? MAX_CYCLES : ti.max_cycles;
+
+	ti.max_histogram = ti.max_histogram > MAX_HIST ?
+		MAX_HIST : ti.max_histogram;
+	ti.ts.inner_hist_array = calloc(ti.max_histogram, sizeof(long));
+	ti.ts.outer_hist_array = calloc(ti.max_histogram, sizeof(long));
+}
+
+static int thread_msleep(unsigned int ms)
+{
+	struct timespec ts = {
+		.tv_sec = (ms * NS_PER_MS) / NS_PER_S,
+		.tv_nsec = (ms * NS_PER_MS) % NS_PER_S,
+	};
+
+	return -nanosleep(&ts, NULL);
+}
+
+static inline long long calc_us(struct timespec t)
+{
+	return ((long long)t.tv_sec * NS_PER_S + t.tv_nsec);
+}
+
+static int setevent(char *event, char *val)
+{
+	int fd;
+	int ret;
+
+	fd = open(event, O_WRONLY);
+	if (fd < 0) {
+		printf("unable to open %s\n", event);
+		return -1;
+	}
+
+	ret = write(fd, val, strlen(val));
+	if (ret < 0) {
+		printf("unable to write %s to %s\n", val, event);
+		close(fd);
+		return -1;
+	}
+
+	close(fd);
+	return 0;
+}
+
+static void tracing(char *enable)
+{
+	setevent(TRACING_EVENTS, enable);
+	setevent(TRACING_ON, enable);
+}
+
+#define write_check(__fd, __buf, __len)			\
+	do {						\
+		int __ret = write(__fd, __buf, __len);	\
+		(void)__ret;				\
+	} while (0)
+
+#define TRACEBUFSIZ 1024
+static __thread char tracebuf[TRACEBUFSIZ];
+
+static void tracemark(char *fmt, ...) __attribute__((format(printf, 1, 2)));
+static void tracemark(char *fmt, ...)
+{
+	va_list ap;
+	int len;
+	int tracemark_fd;
+
+	tracemark_fd = open(TRACE_MARKER, O_WRONLY);
+	if (tracemark_fd == -1) {
+		printf("unable to open trace_marker file: %s\n", TRACE_MARKER);
+		return;
+	}
+
+	/* bail out if we're not tracing */
+	/* or if the kernel doesn't support trace_mark */
+	if (tracemark_fd < 0)
+		return;
+
+	va_start(ap, fmt);
+	len = vsnprintf(tracebuf, TRACEBUFSIZ, fmt, ap);
+	va_end(ap);
+	write_check(tracemark_fd, tracebuf, len);
+
+	close(tracemark_fd);
+}
+
+static int rw_gpio(int value, int index)
+{
+	int ret;
+	struct timespec timestamp;
+	struct rtdm_gpio_readout rdo;
+	long long gpio_write, gpio_read, inner_diff, outer_diff;
+
+	clock_gettime(ti.clockid, &timestamp);
+	gpio_write = calc_us(timestamp);
+
+	ret = write(ti.fd_dev_out, &value, sizeof(value));
+	if (ret < 0) {
+		printf("write GPIO, failed\n");
+		return ret;
+	}
+
+	ret = read(ti.fd_dev_intr, &rdo, sizeof(struct rtdm_gpio_readout));
+	if (ret < 0) {
+		printf("read GPIO, failed\n");
+		return ret;
+	}
+
+	clock_gettime(ti.clockid, &timestamp);
+	gpio_read = calc_us(timestamp);
+
+	inner_diff = (rdo.timestamp - gpio_write) / 1000;
+	outer_diff = (gpio_read - gpio_write) / 1000;
+
+	if (inner_diff < ti.ts.inner_min)
+		ti.ts.inner_min = inner_diff;
+	if (inner_diff > ti.ts.inner_max)
+		ti.ts.inner_max = inner_diff;
+	ti.ts.inner_avg += (double) inner_diff;
+	if (inner_diff >= ti.max_histogram)
+		ti.ts.inner_hist_overflow++;
+	else
+		ti.ts.inner_hist_array[inner_diff]++;
+
+	if (outer_diff < ti.ts.outer_min)
+		ti.ts.outer_min = outer_diff;
+	if (inner_diff > ti.ts.outer_max)
+		ti.ts.outer_max = outer_diff;
+	ti.ts.outer_avg += (double) outer_diff;
+	if (outer_diff >= ti.max_histogram)
+		ti.ts.outer_hist_overflow++;
+	else
+		ti.ts.outer_hist_array[outer_diff]++;
+
+	if (ti.quiet == 0)
+		printf("index: %d, inner_diff: %8lld, outer_diff: %8lld\n",
+		       index, inner_diff, outer_diff);
+
+	return outer_diff;
+}
+
+static void *run_gpiobench_loop(void *cookie)
+{
+	int i, ret;
+
+	printf("----rt task, gpio loop, test run----\n");
+
+	for (i = 0; i < ti.max_cycles; i++) {
+		ti.total_cycles = i;
+		/* send a high level pulse from gpio output pin and
+		 * receive an interrupt from the other gpio pin,
+		 * measuring the time elapsed between the two events
+		 */
+		ret = rw_gpio(GPIO_HIGH, i);
+		if (ret < 0) {
+			printf("RW GPIO, failed\n");
+			break;
+		} else if (ret > ti.tracelimit) {
+			tracemark("hit latency threshold (%d > %d), index: %d",
+						ret, ti.tracelimit, i);
+			break;
+		}
+
+		/*take a break, nanosleep here will not jeopardize the latency*/
+		thread_msleep(10);
+
+		ret = rw_gpio(GPIO_LOW, i);
+		/* send a low level pulse from gpio output pin and
+		 * receive an interrupt from the other gpio pin,
+		 * measuring the time elapsed between the two events
+		 */
+		if (ret < 0) {
+			printf("RW GPIO, failed\n");
+			break;
+		} else if (ti.tracelimit && ret > ti.tracelimit) {
+			tracemark("hit latency threshold (%d > %d), index: %d",
+						ret, ti.tracelimit, i);
+			break;
+		}
+
+		/*take a break, nanosleep here will not jeopardize the latency*/
+		thread_msleep(10);
+	}
+
+	ti.ts.inner_avg /= (ti.total_cycles * 2);
+	ti.ts.outer_avg /= (ti.total_cycles * 2);
+
+	return NULL;
+}
+
+static void *run_gpiobench_react(void *cookie)
+{
+	int value, ret, i;
+	struct rtdm_gpio_readout rdo;
+
+	printf("----rt task, gpio react, test run----\n");
+
+	for (i = 0; i < ti.max_cycles; i++) {
+		/* received a pulse from latency box from one of
+		 * the gpio pin pair
+		 */
+		ret = read(ti.fd_dev_intr, &rdo, sizeof(rdo));
+		if (ret < 0) {
+			printf("RW GPIO read, failed\n");
+			break;
+		}
+
+		if (ti.quiet == 0)
+			printf("idx: %d, received signal from latency box\n",
+				    i);
+
+		/* send a signal back from the other gpio pin
+		 * to latency box as the acknowledge,
+		 * latency box will measure the time elapsed
+		 * between the two events
+		 */
+		value = GPIO_HIGH;
+		ret = write(ti.fd_dev_out, &value, sizeof(value));
+		if (ret < 0) {
+			printf("RW GPIO write, failed\n");
+			break;
+		}
+
+		if (ti.quiet == 0)
+			printf("idx: %d, sent reaction to latency box\n", i);
+	}
+
+	return NULL;
+}
+
+static void setup_sched_parameters(pthread_attr_t *attr, int prio)
+{
+	struct sched_param p;
+	int ret;
+
+	ret = pthread_attr_init(attr);
+	if (ret) {
+		printf("pthread_attr_init(), failed\n");
+		return;
+	}
+
+	ret = pthread_attr_setinheritsched(attr, PTHREAD_EXPLICIT_SCHED);
+	if (ret) {
+		printf("pthread_attr_setinheritsched(), failed\n");
+		return;
+	}
+
+	ret = pthread_attr_setschedpolicy(attr,
+				prio ? SCHED_FIFO : SCHED_OTHER);
+	if (ret) {
+		printf("pthread_attr_setschedpolicy(), failed\n");
+		return;
+	}
+
+	p.sched_priority = prio;
+	ret = pthread_attr_setschedparam(attr, &p);
+	if (ret) {
+		printf("pthread_attr_setschedparam(), failed\n");
+		return;
+	}
+}
+
+static void init_ti(void)
+{
+	memset(&ti, 0, sizeof(struct test_info));
+	ti.prio = DEFAULT_PRIO;
+	ti.max_cycles = MAX_CYCLES;
+	ti.total_cycles = MAX_CYCLES;
+	ti.max_histogram = MAX_HIST;
+	ti.tracelimit = DEFAULT_LIMIT;
+	ti.quiet = 0;
+	ti.gpio_out = -1;
+	ti.gpio_intr = -1;
+	ti.mode = MODE_LOOPBACK;
+	ti.clockid = CLOCK_MONOTONIC;
+
+	ti.ts.inner_min = ti.ts.outer_min = DEFAULT_LIMIT;
+	ti.ts.inner_max = ti.ts.outer_max = 0;
+	ti.ts.inner_avg = ti.ts.outer_avg = 0.0;
+}
+
+static void print_hist(void)
+{
+	int i;
+
+	if (ti.total_cycles < (ti.max_cycles - 1)) {
+		/*
+		 * Test is interrupted. Force to calculate
+		 * even though it is not accurate but to avoid
+		 * large latency surprising us.
+		 */
+		printf("\nTest is interrupted and exit exceptionally\n");
+		printf("Please run again till it exit normally\n");
+
+		ti.ts.inner_avg /= (ti.total_cycles * 2);
+		ti.ts.outer_avg /= (ti.total_cycles * 2);
+
+	}
+	printf("\n");
+	printf("# Inner Loop Histogram\n");
+	printf("# Inner Loop latency is the latency in kernel space\n"
+		   "# between gpio_set_value and irq handler\n");
+
+	for (i = 0; i < ti.max_histogram; i++) {
+		unsigned long curr_latency = ti.ts.inner_hist_array[i];
+
+		printf("%06d ", i);
+		printf("%06lu\n", curr_latency);
+	}
+
+	printf("# Total:");
+	printf(" %09lu", ti.total_cycles);
+	printf("\n");
+
+	printf("# Min Latencies:");
+	printf(" %05lu", ti.ts.inner_min);
+	printf("\n");
+	printf("# Avg Latencies:");
+	printf(" %05lf", ti.ts.inner_avg);
+	printf("\n");
+	printf("# Max Latencies:");
+	printf(" %05lu", ti.ts.inner_max);
+	printf("\n");
+
+	printf("\n");
+	printf("\n");
+
+	printf("# Outer Loop Histogram\n");
+	printf("# Outer Loop latency is the latency in user space\n"
+		   "# between write and read\n"
+		   "# Technically, outer loop latency is inner loop latercy\n"
+		   "# plus overhead of event wakeup\n");
+
+	for (i = 0; i < ti.max_histogram; i++) {
+		unsigned long curr_latency = ti.ts.outer_hist_array[i];
+
+		printf("%06d ", i);
+		printf("%06lu\n", curr_latency);
+	}
+
+	printf("# Total:");
+	printf(" %09lu", ti.total_cycles);
+	printf("\n");
+
+	printf("# Min Latencies:");
+	printf(" %05lu", ti.ts.outer_min);
+	printf("\n");
+	printf("# Avg Latencies:");
+	printf(" %05lf", ti.ts.outer_avg);
+	printf("\n");
+	printf("# Max Latencies:");
+	printf(" %05lu", ti.ts.outer_max);
+	printf("\n");
+}
+
+static void cleanup(void)
+{
+	int ret;
+
+	if (ti.tracelimit < DEFAULT_LIMIT)
+		tracing(OFF);
+
+	ret = close(ti.fd_dev_out);
+	if (ret < 0)
+		printf("can't close gpio_out device\n");
+
+	ret = close(ti.fd_dev_intr);
+	if (ret < 0)
+		printf("can't close gpio_intr device\n");
+
+	if (ti.mode == MODE_LOOPBACK)
+		print_hist();
+
+}
+
+static void cleanup_and_exit(int sig)
+{
+	printf("Signal %d received\n", sig);
+	cleanup();
+	exit(0);
+}
+
+int main(int argc, char **argv)
+{
+	struct sigaction sa __attribute__((unused));
+	int ret = 0;
+	pthread_attr_t tattr;
+	int trigger, value;
+	char dev_name[64];
+
+	init_ti();
+
+	process_options(argc, argv);
+
+	ret = mlockall(MCL_CURRENT|MCL_FUTURE);
+	if (ret) {
+		printf("mlockall failed\n");
+		goto out;
+	}
+
+	sprintf(dev_name, "%s%s/gpio%d",
+		    DEV_PATH, ti.pin_controller, ti.gpio_out);
+	ti.fd_dev_out = open(dev_name, O_RDWR);
+	if (ti.fd_dev_out < 0) {
+		printf("can't open %s\n", dev_name);
+		goto out;
+	}
+
+	if (ti.gpio_out) {
+		value = 0;
+		ret = ioctl(ti.fd_dev_out, GPIO_RTIOC_DIR_OUT, &value);
+		if (ret) {
+			printf("ioctl gpio port output, failed\n");
+			goto out;
+		}
+	}
+
+	sprintf(dev_name, "%s%s/gpio%d",
+		    DEV_PATH, ti.pin_controller, ti.gpio_intr);
+	ti.fd_dev_intr = open(dev_name, O_RDWR);
+	if (ti.fd_dev_intr < 0) {
+		printf("can't open %s\n", dev_name);
+		goto out;
+	}
+
+	if (ti.gpio_intr) {
+		trigger = GPIO_TRIGGER_EDGE_FALLING|GPIO_TRIGGER_EDGE_RISING;
+		value = 1;
+
+		ret = ioctl(ti.fd_dev_intr, GPIO_RTIOC_IRQEN, &trigger);
+		if (ret) {
+			printf("ioctl gpio port interrupt, failed\n");
+			goto out;
+		}
+
+		ret = ioctl(ti.fd_dev_intr,
+			ti.clockid == CLOCK_MONOTONIC ?
+			GPIO_RTIOC_TS_MONO : GPIO_RTIOC_TS_REAL, &value);
+		if (ret) {
+			printf("ioctl gpio port ts, failed\n");
+			goto out;
+		}
+	}
+
+	if (ti.tracelimit < DEFAULT_LIMIT)
+		tracing(ON);
+
+	signal(SIGTERM, cleanup_and_exit);
+	signal(SIGINT, cleanup_and_exit);
+	setup_sched_parameters(&tattr, ti.prio);
+
+	if (ti.mode == MODE_LOOPBACK)
+		ret = pthread_create(&ti.gpio_task, &tattr,
+					run_gpiobench_loop, NULL);
+	else
+		ret = pthread_create(&ti.gpio_task, &tattr,
+					run_gpiobench_react, NULL);
+
+	if (ret) {
+		printf("pthread_create(gpiotask), failed\n");
+		goto out;
+	}
+
+	pthread_join(ti.gpio_task, NULL);
+	pthread_attr_destroy(&tattr);
+
+out:
+	cleanup();
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/gpiotest/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/gpiotest/Makefile.am
new file mode 100644
index 0000000..6f6ec6d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/gpiotest/Makefile.am
@@ -0,0 +1,19 @@
+testdir = @XENO_TEST_DIR@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+test_PROGRAMS = gpiotest
+
+gpiotest_SOURCES = gpiotest.c
+
+gpiotest_CPPFLAGS = 		\
+	$(XENO_USER_CFLAGS)	\
+	-I$(top_srcdir)/include
+
+gpiotest_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+gpiotest_LDADD =			\
+	../../lib/smokey/libsmokey@CORE@.la	\
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/testsuite/gpiotest/gpiotest.c b/kernel/xenomai-v3.2.4/testsuite/gpiotest/gpiotest.c
new file mode 100644
index 0000000..2e4b1bf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/gpiotest/gpiotest.c
@@ -0,0 +1,267 @@
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <error.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <smokey/smokey.h>
+#include <rtdm/gpio.h>
+#include <boilerplate/time.h>
+
+smokey_test_plugin(interrupt,
+		   SMOKEY_ARGLIST(
+			   SMOKEY_STRING(device),
+			   SMOKEY_STRING(trigger),
+			   SMOKEY_BOOL(select),
+			   SMOKEY_BOOL(timestamp),
+		   ),
+   "Wait for interrupts from a GPIO pin.\n"
+   "\tdevice=<device-path>\n"
+   "\trigger={edge[-rising/falling/both], level[-low/high]}\n"
+   "\tselect, wait on select(2)."
+   "\ttimestamp, enable timestamping."
+);
+
+smokey_test_plugin(read_value,
+		   SMOKEY_ARGLIST(
+			   SMOKEY_STRING(device),
+		   ),
+   "Read GPIO value.\n"
+   "\tdevice=<device-path>."
+);
+
+smokey_test_plugin(write_value,
+		   SMOKEY_ARGLIST(
+			   SMOKEY_STRING(device),
+		   ),
+   "Write GPIO value.\n"
+   "\tdevice=<device-path>."
+);
+
+static int run_interrupt(struct smokey_test *t, int argc, char *const argv[])
+{
+	static struct {
+		const char *name;
+		int flag;
+	} trigger_types[] = {
+		{ .name = "edge", .flag = GPIO_TRIGGER_EDGE_RISING },
+		{ .name = "edge-rising", .flag = GPIO_TRIGGER_EDGE_RISING },
+		{ .name = "edge-falling", .flag = GPIO_TRIGGER_EDGE_FALLING },
+		{ .name = "edge-both", .flag = GPIO_TRIGGER_EDGE_FALLING|GPIO_TRIGGER_EDGE_RISING },
+		{ .name = "level", .flag = GPIO_TRIGGER_LEVEL_LOW },
+		{ .name = "level-low", .flag = GPIO_TRIGGER_LEVEL_LOW },
+		{ .name = "level-high", .flag = GPIO_TRIGGER_LEVEL_HIGH },
+		{ NULL, 0 },
+	};
+	int do_select = 0, fd, ret, trigger, n, value, do_timestamp = 0;
+	const char *device = NULL, *trigname;
+	struct rtdm_gpio_readout rdo;
+	struct timespec now;
+	fd_set set;
+	
+	smokey_parse_args(t, argc, argv);
+
+	if (!SMOKEY_ARG_ISSET(interrupt, device)) {
+		warning("missing device= specification");
+		return -EINVAL;
+	}
+
+	device = SMOKEY_ARG_STRING(interrupt, device);
+	fd = open(device, O_RDWR);
+	if (fd < 0) {
+		ret = -errno;
+		warning("cannot open device %s [%s]",
+			device, symerror(ret));
+		return ret;
+	}
+
+	if (SMOKEY_ARG_ISSET(interrupt, select))
+		do_select = SMOKEY_ARG_BOOL(interrupt, select);
+
+	if (SMOKEY_ARG_ISSET(interrupt, timestamp))
+		do_timestamp = SMOKEY_ARG_BOOL(interrupt, timestamp);
+
+	trigger = GPIO_TRIGGER_NONE;
+	if (SMOKEY_ARG_ISSET(interrupt, trigger)) {
+		trigname = SMOKEY_ARG_STRING(interrupt, trigger);
+		for (n = 0; trigger_types[n].name; n++) {
+			if (strcmp(trigger_types[n].name, trigname) == 0) {
+				trigger = trigger_types[n].flag;
+				break;
+		    }
+		}
+		if (trigger == GPIO_TRIGGER_NONE) {
+			warning("invalid trigger type %s", trigname);
+			return -EINVAL;
+		}
+	}
+
+	ret = ioctl(fd, GPIO_RTIOC_IRQEN, &trigger);
+	if (ret) {
+		ret = -errno;
+		warning("GPIO_RTIOC_IRQEN failed on %s [%s]",
+			device, symerror(ret));
+		return ret;
+	}
+
+	FD_ZERO(&set);
+	FD_SET(fd, &set);
+	
+	for (;;) {
+		if (do_select) {
+			ret = select(fd + 1, &set, NULL, NULL, NULL);
+			if (ret < 0) {
+				ret = -errno;
+				warning("failed listening to %s [%s]",
+					device, symerror(ret));
+				return ret;
+			}
+		}
+		if (do_timestamp) {
+			ret = read(fd, &rdo, sizeof(rdo));
+			if (ret < 0) {
+				ret = -errno;
+				warning("failed reading from %s [%s]",
+					device, symerror(ret));
+				return ret;
+			}
+			clock_gettime(CLOCK_MONOTONIC, &now);
+			printf("received irq %llu us from now, GPIO state=%d\n",
+			       (timespec_scalar(&now) - rdo.timestamp) / 1000ULL,
+			       rdo.value);
+		} else {
+			ret = read(fd, &value, sizeof(value));
+			if (ret < 0) {
+				ret = -errno;
+				warning("failed reading from %s [%s]",
+					device, symerror(ret));
+				return ret;
+			}
+			printf("received irq, GPIO state=%d\n", value);
+		}
+	}
+
+	close(fd);
+
+	return 0;
+}
+
+static int run_read_value(struct smokey_test *t, int argc, char *const argv[])
+{
+	const char *device = NULL;
+	int fd, ret, value = -1;
+
+	smokey_parse_args(t, argc, argv);
+
+	if (!SMOKEY_ARG_ISSET(read_value, device)) {
+		warning("missing device= specification");
+		return -EINVAL;
+	}
+
+	device = SMOKEY_ARG_STRING(read_value, device);
+	fd = open(device, O_RDONLY|O_NONBLOCK);
+	if (fd < 0) {
+		ret = -errno;
+		warning("cannot open device %s [%s]",
+			device, symerror(ret));
+		return ret;
+	}
+
+	if (!__Terrno(ret, ioctl(fd, GPIO_RTIOC_DIR_IN)))
+		return ret;
+
+	ret = read(fd, &value, sizeof(value));
+	close(fd);
+
+	if (!__Tassert(ret == sizeof(value)))
+		return -EINVAL;
+
+	smokey_trace("value=%d", value);
+
+	return 0;
+}
+
+static int run_write_value(struct smokey_test *t, int argc, char *const argv[])
+{
+	const char *device = NULL;
+	int fd, ret, value;
+	
+	smokey_parse_args(t, argc, argv);
+
+	if (!SMOKEY_ARG_ISSET(write_value, device)) {
+		warning("missing device= specification");
+		return -EINVAL;
+	}
+
+	device = SMOKEY_ARG_STRING(write_value, device);
+	fd = open(device, O_WRONLY);
+	if (fd < 0) {
+		ret = -errno;
+		warning("cannot open device %s [%s]",
+			device, symerror(ret));
+		return ret;
+	}
+
+	value = 1;
+	if (!__Terrno(ret, ioctl(fd, GPIO_RTIOC_DIR_OUT, &value)))
+		return ret;
+	
+	ret = write(fd, &value, sizeof(value));
+	close(fd);
+
+	if (!__Tassert(ret == sizeof(value)))
+		return -EINVAL;
+
+	return 0;
+}
+
+int main(int argc, char *const argv[])
+{
+	struct smokey_test *t;
+	int ret, fails = 0;
+
+	if (pvlist_empty(&smokey_test_list))
+		return 0;
+
+	for_each_smokey_test(t) {
+		ret = t->run(t, argc, argv);
+		if (ret) {
+			if (ret == -ENOSYS) {
+				smokey_note("%s skipped (no kernel support)",
+					    t->name);
+				continue;
+			}
+			fails++;
+			if (smokey_keep_going)
+				continue;
+			if (smokey_verbose_mode)
+				error(1, -ret, "test %s failed", t->name);
+			return 1;
+		}
+		smokey_note("%s OK", t->name);
+	}
+
+	return fails != 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/latency/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/latency/Makefile.am
new file mode 100644
index 0000000..9471615
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/latency/Makefile.am
@@ -0,0 +1,18 @@
+testdir = @XENO_TEST_DIR@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+test_PROGRAMS = latency
+
+latency_SOURCES = latency.c
+
+latency_CPPFLAGS = 		\
+	$(XENO_USER_CFLAGS)	\
+	-I$(top_srcdir)/include
+
+latency_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+latency_LDADD =			\
+	@XENO_CORE_LDADD@	\
+	@XENO_USER_LDADD@	\
+	-lpthread -lrt -lm
diff --git a/kernel/xenomai-v3.2.4/testsuite/latency/latency.c b/kernel/xenomai-v3.2.4/testsuite/latency/latency.c
new file mode 100644
index 0000000..6cd3a28
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/latency/latency.c
@@ -0,0 +1,822 @@
+/*
+ * Copyright (C) 2004-2015 Philippe Gerum <rpm@xenomai.org>
+ * Copyright (C) 2014 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdlib.h>
+#include <math.h>
+#include <stdio.h>
+#include <string.h>
+#include <errno.h>
+#include <error.h>
+#include <signal.h>
+#include <sched.h>
+#include <time.h>
+#include <sys/time.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <sys/timerfd.h>
+#include <xeno_config.h>
+#include <rtdm/testing.h>
+#include <boilerplate/trace.h>
+#include <xenomai/init.h>
+
+pthread_t latency_task, display_task;
+
+sem_t *display_sem;
+
+#define ONE_BILLION	1000000000
+#define TEN_MILLIONS	10000000
+
+#define HIPRIO 99
+#define LOPRIO 0
+
+unsigned max_relaxed;
+int32_t minjitter, maxjitter, avgjitter;
+int32_t gminjitter = TEN_MILLIONS, gmaxjitter = -TEN_MILLIONS, goverrun = 0;
+int64_t gavgjitter = 0;
+
+long long period_ns = 0;
+int test_duration = 0;		/* sec of testing, via -T <sec>, 0 is inf */
+int data_lines = 21;		/* data lines per header line, -l <lines> to change */
+int quiet = 0;			/* suppress printing of RTH, RTD lines when -T given */
+int benchdev = -1;
+int freeze_max = 0;
+int priority = HIPRIO;
+int stop_upon_switch = 0;
+sig_atomic_t sampling_relaxed = 0;
+char sem_name[16];
+
+#define USER_TASK       0
+#define KERNEL_TASK     1
+#define TIMER_HANDLER   2
+
+int test_mode = USER_TASK;
+const char *test_mode_names[] = {
+	"periodic user-mode task",
+	"in-kernel periodic task",
+	"in-kernel timer handler"
+};
+
+time_t test_start, test_end;	/* report test duration */
+int test_loops = 0;		/* outer loop count */
+
+/* Warmup time : in order to avoid spurious cache effects on low-end machines. */
+#define WARMUP_TIME 1
+#define HISTOGRAM_CELLS 300
+int histogram_size = HISTOGRAM_CELLS;
+int32_t *histogram_avg = NULL, *histogram_max = NULL, *histogram_min = NULL;
+
+char *do_gnuplot = NULL;
+int do_histogram = 0, do_stats = 0, finished = 0;
+int bucketsize = 1000;		/* default = 1000ns, -B <size> to override */
+
+#define need_histo() (do_histogram || do_stats || do_gnuplot)
+
+static inline void add_histogram(int32_t *histogram, int32_t addval)
+{
+	/* bucketsize steps */
+	int inabs = (addval >= 0 ? addval : -addval) / bucketsize;
+	histogram[inabs < histogram_size ? inabs : histogram_size - 1]++;
+}
+
+static inline long long diff_ts(struct timespec *left, struct timespec *right)
+{
+	return (long long)(left->tv_sec - right->tv_sec) * ONE_BILLION
+		+ left->tv_nsec - right->tv_nsec;
+}
+
+static void *latency(void *cookie)
+{
+	int err, count, nsamples, warmup = 1;
+	unsigned long long fault_threshold;
+	struct itimerspec timer_conf;
+	struct timespec expected;
+	unsigned old_relaxed = 0;
+	char task_name[16];
+	int tfd;
+
+	snprintf(task_name, sizeof(task_name), "sampling-%d", getpid());
+	err = pthread_setname_np(pthread_self(), task_name);
+	if (err)
+		error(1, err, "pthread_setname_np(latency)");
+
+	tfd = timerfd_create(CLOCK_MONOTONIC, 0);
+	if (tfd == -1)
+		error(1, errno, "timerfd_create()");
+
+#ifdef CONFIG_XENO_COBALT
+	err = pthread_setmode_np(0, PTHREAD_WARNSW, NULL);
+	if (err)
+		error(1, err, "pthread_setmode_np()");
+#endif
+
+	err = clock_gettime(CLOCK_MONOTONIC, &expected);
+	if (err)
+		error(1, errno, "clock_gettime()");
+
+	fault_threshold = CONFIG_XENO_DEFAULT_PERIOD;
+	nsamples = (long long)ONE_BILLION / period_ns;
+	/* start time: one millisecond from now. */
+	expected.tv_nsec += 1000000;
+	if (expected.tv_nsec > ONE_BILLION) {
+		expected.tv_nsec -= ONE_BILLION;
+		expected.tv_sec++;
+	}
+	timer_conf.it_value = expected;
+	timer_conf.it_interval.tv_sec = period_ns / ONE_BILLION;
+	timer_conf.it_interval.tv_nsec = period_ns % ONE_BILLION;
+
+	err = timerfd_settime(tfd, TFD_TIMER_ABSTIME, &timer_conf, NULL);
+	if (err)
+		error(1, errno, "timerfd_settime()");
+
+	for (;;) {
+		int32_t minj = TEN_MILLIONS, maxj = -TEN_MILLIONS, dt;
+		uint32_t overrun = 0;
+		int64_t sumj;
+
+		test_loops++;
+
+		for (count = sumj = 0; count < nsamples; count++) {
+			unsigned int new_relaxed;
+			struct timespec now;
+			uint64_t ticks;
+
+			err = read(tfd, &ticks, sizeof(ticks));
+
+			clock_gettime(CLOCK_MONOTONIC, &now);
+			dt = (int32_t)diff_ts(&now, &expected);
+			new_relaxed = sampling_relaxed;
+			if (dt > maxj) {
+				if (new_relaxed != old_relaxed
+				    && dt > fault_threshold)
+					max_relaxed +=
+						new_relaxed - old_relaxed;
+				maxj = dt;
+			}
+			old_relaxed = new_relaxed;
+			if (dt < minj)
+				minj = dt;
+			sumj += dt;
+
+			if (err < 0)
+				error(1, errno, "read()");
+			if (ticks > 1)
+				overrun += ticks - 1;
+			expected.tv_nsec += (ticks * period_ns) % ONE_BILLION;
+			expected.tv_sec += (ticks * period_ns) / ONE_BILLION;
+			if (expected.tv_nsec > ONE_BILLION) {
+				expected.tv_nsec -= ONE_BILLION;
+				expected.tv_sec++;
+			}
+
+			if (freeze_max && (dt > gmaxjitter)
+			    && !(finished || warmup)) {
+				xntrace_latpeak_freeze(dt);
+				gmaxjitter = dt;
+			}
+
+			if (!(finished || warmup) && need_histo())
+				add_histogram(histogram_avg, dt);
+		}
+
+		if (!warmup) {
+			if (!finished && need_histo()) {
+				add_histogram(histogram_max, maxj);
+				add_histogram(histogram_min, minj);
+			}
+
+			minjitter = minj;
+			if (minj < gminjitter)
+				gminjitter = minj;
+
+			maxjitter = maxj;
+			if (maxj > gmaxjitter)
+				gmaxjitter = maxj;
+
+			avgjitter = sumj / nsamples;
+			gavgjitter += avgjitter;
+			goverrun += overrun;
+			sem_post(display_sem);
+		}
+
+		if (warmup && test_loops == WARMUP_TIME) {
+			test_loops = 0;
+			warmup = 0;
+		}
+	}
+
+	return NULL;
+}
+
+static void *display(void *cookie)
+{
+	char task_name[16];
+	int err, n = 0;
+	time_t start;
+
+	snprintf(task_name, sizeof(task_name), "display-%d", getpid());
+	err = pthread_setname_np(pthread_self(), task_name);
+	if (err)
+		error(1, err, "pthread_setname_np(display)");
+
+	if (test_mode == USER_TASK) {
+		snprintf(sem_name, sizeof(sem_name), "/dispsem-%d", getpid());
+		sem_unlink(sem_name); /* may fail */
+		display_sem = sem_open(sem_name, O_CREAT | O_EXCL, 0666, 0);
+		if (display_sem == SEM_FAILED)
+			error(1, errno, "sem_open()");
+	} else {
+		struct rttst_tmbench_config config;
+
+		if (test_mode == KERNEL_TASK)
+			config.mode = RTTST_TMBENCH_TASK;
+		else
+			config.mode = RTTST_TMBENCH_HANDLER;
+
+		config.period = period_ns;
+		config.priority = priority;
+		config.warmup_loops = WARMUP_TIME;
+		config.histogram_size = need_histo() ? histogram_size : 0;
+		config.histogram_bucketsize = bucketsize;
+		config.freeze_max = freeze_max;
+
+		err = ioctl(benchdev, RTTST_RTIOC_TMBENCH_START, &config);
+		if (err)
+			error(1, errno, "ioctl(RTTST_RTIOC_TMBENCH_START)");
+	}
+
+	time(&start);
+
+	if (WARMUP_TIME)
+		printf("warming up...\n");
+
+	if (quiet)
+		fprintf(stderr, "running quietly for %d seconds\n",
+			test_duration);
+
+	for (;;) {
+		long minj, gminj, maxj, gmaxj, avgj;
+
+		if (test_mode == USER_TASK) {
+			err = sem_wait(display_sem);
+
+			if (err < 0) {
+				if (errno != EIDRM)
+					error(1, errno, "sem_wait()");
+
+				return NULL;
+			}
+
+			/* convert jitters to nanoseconds. */
+			minj = minjitter;
+			gminj = gminjitter;
+			avgj = avgjitter;
+			maxj = maxjitter;
+			gmaxj = gmaxjitter;
+
+		} else {
+			struct rttst_interm_bench_res result;
+
+			err = ioctl(benchdev, RTTST_RTIOC_INTERM_BENCH_RES,
+				&result);
+
+			if (err < 0) {
+				if (errno != EIDRM)
+					error(1, errno,
+					      "ioctl(RTTST_RTIOC_INTERM_BENCH_RES)");
+
+				return NULL;
+			}
+
+			minj = result.last.min;
+			gminj = result.overall.min;
+			avgj = result.last.avg;
+			maxj = result.last.max;
+			gmaxj = result.overall.max;
+			goverrun = result.overall.overruns;
+		}
+
+		if (!quiet) {
+			if (data_lines && (n++ % data_lines) == 0) {
+				time_t now, dt;
+				time(&now);
+				dt = now - start - WARMUP_TIME;
+				printf
+				    ("RTT|  %.2ld:%.2ld:%.2ld  (%s, %Ld us period, "
+				     "priority %d)\n", dt / 3600,
+				     (dt / 60) % 60, dt % 60,
+				     test_mode_names[test_mode],
+				     period_ns / 1000, priority);
+				printf("RTH|%11s|%11s|%11s|%8s|%6s|%11s|%11s\n",
+				       "----lat min", "----lat avg",
+				       "----lat max", "-overrun", "---msw",
+				       "---lat best", "--lat worst");
+			}
+			printf("RTD|%11.3f|%11.3f|%11.3f|%8d|%6u|%11.3f|%11.3f\n",
+			       (double)minj / 1000,
+			       (double)avgj / 1000,
+			       (double)maxj / 1000,
+			       goverrun,
+			       max_relaxed,
+			       (double)gminj / 1000, (double)gmaxj / 1000);
+		}
+	}
+
+	return NULL;
+}
+
+static double dump_histogram(int32_t *histogram, char *kind)
+{
+	int n, total_hits = 0;
+	double avg = 0;		/* used to sum hits 1st */
+
+	if (do_histogram)
+		printf("---|--param|----range-|--samples\n");
+
+	for (n = 0; n < histogram_size; n++) {
+		int32_t hits = histogram[n];
+
+		if (hits) {
+			total_hits += hits;
+			avg += n * hits;
+			if (do_histogram)
+				printf("HSD|    %s| %3d -%3d | %8d\n",
+				       kind, n, n + 1, hits);
+		}
+	}
+
+	avg /= total_hits;	/* compute avg, reuse variable */
+
+	return avg;
+}
+
+static void dump_histo_gnuplot(int32_t *histogram, time_t duration)
+{
+	unsigned int start, stop;
+	char *xconf, buf[BUFSIZ];
+	FILE *ifp, *ofp;
+	int n;
+
+	if (strcmp(do_gnuplot, "-") == 0)
+		ofp = stdout;
+	else {
+		ofp = fopen(do_gnuplot, "w");
+		if (ofp == NULL)
+			return;
+	}
+
+	fprintf(ofp, "# %.2ld:%.2ld:%.2ld (%s, %Ld us period, priority %d)\n",
+		duration / 3600, (duration / 60) % 60, duration % 60,
+		test_mode_names[test_mode],
+		period_ns / 1000, priority);
+	fprintf(ofp, "# %11s|%11s|%11s|%8s|%6s|\n",
+		"----lat min", "----lat avg",
+		"----lat max", "-overrun", "---msw");
+	fprintf(ofp,
+		"# %11.3f|%11.3f|%11.3f|%8d|%6u|\n",
+		(double)gminjitter / 1000, (double)gavgjitter / 1000,
+		(double)gmaxjitter / 1000, goverrun, max_relaxed);
+
+	if (asprintf(&xconf, "%s/bin/xeno-config --info", CONFIG_XENO_PREFIX) < 0)
+		goto dump_data;
+
+	ifp = popen(xconf, "r");
+	free(xconf);
+	if (ifp == NULL)
+		goto dump_data;
+
+	while (fgets(buf, sizeof(buf), ifp)) {
+		fputc('#', ofp);
+		fputc(' ', ofp);
+		fputs(buf, ofp);
+	}
+
+	pclose(ifp);
+
+dump_data:
+	for (n = 0; n < histogram_size && histogram[n] == 0; n++)
+		;
+	start = n;
+
+	for (n = histogram_size - 1; n >= 0 && histogram[n] == 0; n--)
+		;
+	stop = n;
+
+	fprintf(ofp, "%g 1\n", start * bucketsize / 1000.0);
+	for (n = start; n <= stop; n++)
+		fprintf(ofp, "%g %d\n",
+			(n + 0.5) * bucketsize / 1000.0, histogram[n] + 1);
+	fprintf(ofp, "%g 1\n", (stop + 1) * bucketsize / 1000.0);
+
+	if (ofp != stdout)
+		fclose(ofp);
+}
+
+static void dump_stats(int32_t *histogram, char *kind, double avg)
+{
+	int n, total_hits = 0;
+	double variance = 0;
+
+	for (n = 0; n < histogram_size; n++) {
+		int32_t hits = histogram[n];
+
+		if (hits) {
+			total_hits += hits;
+			variance += hits * (n - avg) * (n - avg);
+		}
+	}
+
+	/* compute std-deviation (unbiased form) */
+	if (total_hits > 1) {
+		variance /= total_hits - 1;
+		variance = sqrt(variance);
+	} else
+		variance = 0;
+
+	printf("HSS|    %s| %9d| %10.3f| %10.3f\n",
+	       kind, total_hits, avg, variance);
+}
+
+static void dump_hist_stats(time_t duration)
+{
+	double minavg, maxavg, avgavg;
+
+	/* max is last, where its visible w/o scrolling */
+	minavg = dump_histogram(histogram_min, "min");
+	avgavg = dump_histogram(histogram_avg, "avg");
+	maxavg = dump_histogram(histogram_max, "max");
+
+	printf("HSH|--param|--samples-|--average--|---stddev--\n");
+
+	dump_stats(histogram_min, "min", minavg);
+	dump_stats(histogram_avg, "avg", avgavg);
+	dump_stats(histogram_max, "max", maxavg);
+
+	if (do_gnuplot)
+		dump_histo_gnuplot(histogram_avg, duration);
+}
+
+static void cleanup(void)
+{
+	struct rttst_overall_bench_res overall;
+	time_t actual_duration;
+
+	time(&test_end);
+	actual_duration = test_end - test_start - WARMUP_TIME;
+	if (!test_duration)
+		test_duration = actual_duration;
+
+	pthread_cancel(display_task);
+
+	if (test_mode == USER_TASK) {
+		pthread_cancel(latency_task);
+		pthread_join(latency_task, NULL);
+		pthread_join(display_task, NULL);
+
+		sem_close(display_sem);
+		sem_unlink(sem_name);
+		gavgjitter /= (test_loops > 1 ? test_loops : 2) - 1;
+	} else {
+		overall.histogram_min = histogram_min;
+		overall.histogram_max = histogram_max;
+		overall.histogram_avg = histogram_avg;
+		ioctl(benchdev, RTTST_RTIOC_TMBENCH_STOP, &overall);
+		gminjitter = overall.result.min;
+		gmaxjitter = overall.result.max;
+		gavgjitter = overall.result.avg;
+		goverrun = overall.result.overruns;
+		pthread_join(display_task, NULL);
+	}
+
+	if (benchdev >= 0)
+		close(benchdev);
+
+	if (need_histo())
+		dump_hist_stats(actual_duration);
+
+	printf
+	    ("---|-----------|-----------|-----------|--------|------|-------------------------\n"
+	     "RTS|%11.3f|%11.3f|%11.3f|%8d|%6u|    %.2ld:%.2ld:%.2ld/%.2d:%.2d:%.2d\n",
+	     (double)gminjitter / 1000, (double)gavgjitter / 1000, (double)gmaxjitter / 1000,
+	     goverrun, max_relaxed, actual_duration / 3600, (actual_duration / 60) % 60,
+	     actual_duration % 60, test_duration / 3600,
+	     (test_duration / 60) % 60, test_duration % 60);
+	if (max_relaxed > 0)
+		printf(
+"Warning! some latency peaks may have been due to involuntary mode switches.\n"
+"Please contact xenomai@xenomai.org\n");
+
+	if (histogram_avg)
+		free(histogram_avg);
+	if (histogram_max)
+		free(histogram_max);
+	if (histogram_min)
+		free(histogram_min);
+
+	exit(0);
+}
+
+static void faulthand(int sig)
+{
+	xntrace_user_freeze(0, 1);
+	signal(sig, SIG_DFL);
+	kill(getpid(), sig);
+}
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <cobalt/uapi/syscall.h>
+
+static const char *reason_str[] = {
+	[SIGDEBUG_UNDEFINED] = "received SIGDEBUG for unknown reason",
+	[SIGDEBUG_MIGRATE_SIGNAL] = "received signal",
+	[SIGDEBUG_MIGRATE_SYSCALL] = "invoked syscall",
+	[SIGDEBUG_MIGRATE_FAULT] = "triggered fault",
+	[SIGDEBUG_MIGRATE_PRIOINV] = "affected by priority inversion",
+	[SIGDEBUG_NOMLOCK] = "process memory not locked",
+	[SIGDEBUG_WATCHDOG] = "watchdog triggered (period too short?)",
+	[SIGDEBUG_LOCK_BREAK] = "scheduler lock break",
+};
+
+static void sigdebug(int sig, siginfo_t *si, void *context)
+{
+	const char fmt[] = "%s, aborting.\n"
+		"(enabling CONFIG_XENO_OPT_DEBUG_TRACE_RELAX may help)\n";
+	unsigned int reason = sigdebug_reason(si);
+	int n __attribute__ ((unused));
+	static char buffer[256];
+
+	if (reason > SIGDEBUG_WATCHDOG)
+		reason = SIGDEBUG_UNDEFINED;
+
+	switch(reason) {
+	case SIGDEBUG_UNDEFINED:
+	case SIGDEBUG_NOMLOCK:
+	case SIGDEBUG_WATCHDOG:
+		n = snprintf(buffer, sizeof(buffer), "latency: %s\n",
+			     reason_str[reason]);
+		n = write(STDERR_FILENO, buffer, n);
+		exit(EXIT_FAILURE);
+	}
+
+	if (!stop_upon_switch) {
+		++sampling_relaxed;
+		return;
+	}
+
+	n = snprintf(buffer, sizeof(buffer), fmt, reason_str[reason]);
+	n = write(STDERR_FILENO, buffer, n);
+	signal(sig, SIG_DFL);
+	kill(getpid(), sig);
+}
+
+#endif /* CONFIG_XENO_COBALT */
+
+void application_usage(void)
+{
+        fprintf(stderr, "usage: %s [options]:\n", get_program_name());
+	fprintf(stderr,
+		"-h                              print histograms of min, avg, max latencies\n"
+		"-g <file>                       dump histogram to <file> in gnuplot format\n"
+		"-s                              print statistics of min, avg, max latencies\n"
+		"-H <histogram-size>             default = 200, increase if your last bucket is full\n"
+		"-B <bucket-size>                default = 1000ns, decrease for more resolution\n"
+		"-p <period_us>                  sampling period\n"
+		"-l <data-lines per header>      default=21, 0 to supress headers\n"
+		"-T <test_duration_seconds>      default=0, so ^C to end\n"
+		"-q                              supresses RTD, RTH lines if -T is used\n"
+		"-D <testing_device_no>          number of testing device, default=0\n"
+		"-t <test_mode>                  0=user task (default), 1=kernel task, 2=timer IRQ\n"
+		"-f                              freeze trace for each new max latency\n"
+		"-c <cpu>                        pin measuring task down to given CPU\n"
+		"-P <priority>                   task priority (test mode 0 and 1 only)\n"
+		"-b                              break upon mode switch\n"
+		);
+}
+
+static void setup_sched_parameters(pthread_attr_t *attr, int prio)
+{
+	struct sched_param p;
+	int ret;
+	
+	ret = pthread_attr_init(attr);
+	if (ret)
+		error(1, ret, "pthread_attr_init()");
+
+	ret = pthread_attr_setinheritsched(attr, PTHREAD_EXPLICIT_SCHED);
+	if (ret)
+		error(1, ret, "pthread_attr_setinheritsched()");
+
+	ret = pthread_attr_setschedpolicy(attr, prio ? SCHED_FIFO : SCHED_OTHER);
+	if (ret)
+		error(1, ret, "pthread_attr_setschedpolicy()");
+
+	p.sched_priority = prio;
+	ret = pthread_attr_setschedparam(attr, &p);
+	if (ret)
+		error(1, ret, "pthread_attr_setschedparam()");
+}
+
+int main(int argc, char *const *argv)
+{
+	struct sigaction sa __attribute__((unused));
+	int c, ret, sig, cpu = 0;
+	pthread_attr_t tattr;
+	cpu_set_t cpus;
+	sigset_t mask;
+
+	while ((c = getopt(argc, argv, "g:hp:l:T:qH:B:sD:t:fc:P:b")) != EOF)
+		switch (c) {
+		case 'g':
+			do_gnuplot = strdup(optarg);
+			break;
+
+		case 'h':
+
+			do_histogram = 1;
+			break;
+
+		case 's':
+
+			do_stats = 1;
+			break;
+
+		case 'H':
+
+			histogram_size = atoi(optarg);
+			break;
+
+		case 'B':
+
+			bucketsize = atoi(optarg);
+			break;
+
+		case 'p':
+
+			period_ns = atoi(optarg) * 1000LL;
+			if (period_ns > ONE_BILLION)
+				error(1, EINVAL,
+				      "period cannot be longer than 1s");
+			break;
+
+		case 'l':
+
+			data_lines = atoi(optarg);
+			break;
+
+		case 'T':
+
+			test_duration = atoi(optarg);
+			alarm(test_duration + WARMUP_TIME);
+			break;
+
+		case 'q':
+
+			quiet = 1;
+			break;
+
+		case 't':
+
+			test_mode = atoi(optarg);
+			break;
+
+		case 'f':
+
+			freeze_max = 1;
+			break;
+
+		case 'c':
+			cpu = atoi(optarg);
+			if (cpu < 0 || cpu >= CPU_SETSIZE)
+				error(1, EINVAL, "invalid CPU #%d", cpu);
+			break;
+
+		case 'P':
+			priority = atoi(optarg);
+			break;
+
+		case 'b':
+			stop_upon_switch = 1;
+			break;
+
+		default:
+			xenomai_usage();
+			exit(2);
+		}
+
+	if (!test_duration && quiet) {
+		warning("-q requires -T, ignoring -q");
+		quiet = 0;
+	}
+
+	if (test_mode < USER_TASK || test_mode > TIMER_HANDLER)
+		error(1, EINVAL, "invalid test mode");
+
+#ifdef CONFIG_XENO_MERCURY
+	if (test_mode != USER_TASK)
+		error(1, EINVAL, "-t1, -t2 not allowed over Mercury");
+#endif
+	
+	time(&test_start);
+
+	histogram_avg = calloc(histogram_size, sizeof(int32_t));
+	histogram_max = calloc(histogram_size, sizeof(int32_t));
+	histogram_min = calloc(histogram_size, sizeof(int32_t));
+
+	if (!(histogram_avg && histogram_max && histogram_min))
+		cleanup();
+
+	if (period_ns == 0)
+		period_ns = CONFIG_XENO_DEFAULT_PERIOD;	/* ns */
+
+	if (priority <= LOPRIO)
+		priority = LOPRIO + 1;
+	else if (priority > HIPRIO)
+		priority = HIPRIO;
+
+	sigemptyset(&mask);
+	sigaddset(&mask, SIGINT);
+	sigaddset(&mask, SIGTERM);
+	sigaddset(&mask, SIGHUP);
+	sigaddset(&mask, SIGALRM);
+	pthread_sigmask(SIG_BLOCK, &mask, NULL);
+
+#ifdef CONFIG_XENO_COBALT
+	sigemptyset(&sa.sa_mask);
+	sa.sa_sigaction = sigdebug;
+	sa.sa_flags = SA_SIGINFO;
+	sigaction(SIGDEBUG, &sa, NULL);
+#endif
+
+	if (freeze_max) {
+		/* If something goes wrong, we want to freeze the current
+		   trace path to help debugging. */
+		signal(SIGSEGV, faulthand);
+		signal(SIGBUS, faulthand);
+	}
+
+	setlinebuf(stdout);
+
+	printf("== Sampling period: %Ld us\n"
+	       "== Test mode: %s\n"
+	       "== All results in microseconds\n",
+	       period_ns / 1000, test_mode_names[test_mode]);
+
+	if (test_mode != USER_TASK) {
+		benchdev = open("/dev/rtdm/timerbench", O_RDWR);
+		if (benchdev < 0)
+			error(1, errno, "open sampler device (modprobe xeno_timerbench?)");
+	}
+
+	setup_sched_parameters(&tattr, 0);
+
+	ret = pthread_create(&display_task, &tattr, display, NULL);
+	if (ret)
+		error(1, ret, "pthread_create(display)");
+
+	pthread_attr_destroy(&tattr);
+
+	if (test_mode == USER_TASK) {
+		setup_sched_parameters(&tattr, priority);
+		CPU_ZERO(&cpus);
+		CPU_SET(cpu, &cpus);
+
+		ret = pthread_attr_setaffinity_np(&tattr, sizeof(cpus), &cpus);
+		if (ret)
+			error(1, ret, "pthread_attr_setaffinity_np()");
+
+		ret = pthread_create(&latency_task, &tattr, latency, NULL);
+		if (ret)
+			error(1, ret, "pthread_create(latency)");
+
+		pthread_attr_destroy(&tattr);
+	}
+
+	__STD(sigwait(&mask, &sig));
+	finished = 1;
+
+	cleanup();
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/Makefile.am
new file mode 100644
index 0000000..56c8730
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/Makefile.am
@@ -0,0 +1,119 @@
+testdir = @XENO_TEST_DIR@
+test_PROGRAMS = smokey
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+smokey_SOURCES = main.c
+
+# Make sure to list modules from the most dependent to the
+# least. e.g. net_common should appear after all net_* modules,
+# memcheck should appear after all heapmem-* modules.
+
+COBALT_SUBDIRS = 	\
+	arith 		\
+	bufp		\
+	cpu-affinity	\
+	fpu-stress	\
+	gdb		\
+	iddp		\
+	leaks		\
+	memory-coreheap	\
+	memory-heapmem	\
+	memory-tlsf	\
+	memcheck	\
+	net_packet_dgram\
+	net_packet_raw	\
+	net_udp		\
+	net_common	\
+	posix-clock	\
+	posix-cond 	\
+	posix-fork	\
+	posix-mutex 	\
+	posix-select 	\
+	rtdm 		\
+	sched-quota 	\
+	sched-tp 	\
+	setsched	\
+	sigdebug	\
+	timerfd		\
+	tsc		\
+	vdso-access 	\
+	xddp		\
+	y2038
+
+MERCURY_SUBDIRS =	\
+	memory-heapmem	\
+	memory-tlsf	\
+	memcheck
+
+DIST_SUBDIRS = 		\
+	arith 		\
+	bufp		\
+	cpu-affinity	\
+	dlopen		\
+	fpu-stress	\
+	gdb		\
+	iddp		\
+	leaks		\
+	memory-coreheap	\
+	memory-heapmem	\
+	memory-pshared	\
+	memory-tlsf	\
+	memcheck	\
+	net_packet_dgram\
+	net_packet_raw	\
+	net_udp		\
+	net_common	\
+	posix-clock	\
+	posix-cond 	\
+	posix-fork	\
+	posix-mutex 	\
+	posix-select 	\
+	rtdm 		\
+	sched-quota 	\
+	sched-tp 	\
+	setsched	\
+	sigdebug	\
+	timerfd		\
+	tsc		\
+	vdso-access 	\
+	xddp		\
+	y2038
+
+if XENO_COBALT
+if CONFIG_XENO_LIBS_DLOPEN
+COBALT_SUBDIRS += dlopen
+endif
+if XENO_PSHARED
+COBALT_SUBDIRS += memory-pshared
+endif
+wrappers = $(XENO_POSIX_WRAPPERS)
+SUBDIRS = $(COBALT_SUBDIRS)
+else
+if XENO_PSHARED
+MERCURY_SUBDIRS += memory-pshared
+endif
+SUBDIRS = $(MERCURY_SUBDIRS)
+wrappers =
+endif
+
+plugin_list = $(foreach plugin,$(SUBDIRS),$(plugin)/lib$(plugin).a)
+# wrap-link.sh is confused by -whole-archive, so work around
+# this by forcing undefined references to symbols we expect the
+# plugins to export.
+sym_prefix=@XENO_SYMBOL_PREFIX@
+undef_list = $(foreach plugin,$(SUBDIRS),-u $(sym_prefix)smokey_plugin_$(subst -,_,$(plugin)))
+
+smokey_CPPFLAGS = 			\
+	$(XENO_USER_CFLAGS)		\
+	-I$(top_srcdir)/include
+
+smokey_LDFLAGS=$(wrappers) @XENO_AUTOINIT_LDFLAGS@ $(undef_list)
+
+smokey_LDADD = 					\
+	$(plugin_list)				\
+	../../lib/smokey/libsmokey@CORE@.la		\
+	../../lib/copperplate/libcopperplate@CORE@.la	\
+	@XENO_CORE_LDADD@			\
+	 @XENO_USER_LDADD@			\
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/arith/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/arith/Makefile.am
new file mode 100644
index 0000000..3452db8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/arith/Makefile.am
@@ -0,0 +1,8 @@
+
+noinst_LIBRARIES = libarith.a
+
+libarith_a_SOURCES = arith.c arith-noinline.c arith-noinline.h
+
+libarith_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.c b/kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.c
new file mode 100644
index 0000000..ece02e7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.c
@@ -0,0 +1,35 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <cobalt/arith.h>
+#include "arith-noinline.h"
+
+long long dummy(void)
+{
+	return 0;
+}
+
+long long
+do_llimd(long long ll, unsigned m, unsigned d)
+{
+	return xnarch_llimd(ll, m, d);
+}
+
+long long
+do_llmulshft(long long ll, unsigned m, unsigned s)
+{
+	return xnarch_llmulshft(ll, m, s);
+}
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+unsigned long long
+do_nodiv_ullimd(unsigned long long ll, unsigned long long frac, unsigned integ)
+{
+	return xnarch_nodiv_ullimd(ll, frac, integ);
+}
+
+long long
+do_nodiv_llimd(long long ll, unsigned long long frac, unsigned integ)
+{
+	return xnarch_nodiv_llimd(ll, frac, integ);
+}
+#endif
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.h b/kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.h
new file mode 100644
index 0000000..3dd7700
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith-noinline.h
@@ -0,0 +1,18 @@
+#ifndef OUTOFLINE_H
+#define OUTOFLINE_H
+
+long long dummy(void);
+
+long long
+do_llimd(long long ull, unsigned m, unsigned d);
+
+long long
+do_llmulshft(long long ull, unsigned m, unsigned s);
+
+unsigned long long
+do_nodiv_ullimd(unsigned long long ull, unsigned long long frac, unsigned integ);
+
+long long
+do_nodiv_llimd(long long ll, unsigned long long frac, unsigned integ);
+
+#endif /* OUTOFLINE_H */
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith.c b/kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith.c
new file mode 100644
index 0000000..c927d1d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/arith/arith.c
@@ -0,0 +1,124 @@
+#include <stdio.h>
+#include <smokey/smokey.h>
+#include <cobalt/arith.h>
+#include "arith-noinline.h"
+
+smokey_test_plugin(arith,
+		   SMOKEY_NOARGS,
+		   "Check helpers for fast arithmetics"
+);
+
+static volatile unsigned nsec_per_sec = 1000000000;
+static volatile unsigned sample_freq = 33000000;
+static volatile long long arg = 0x3ffffffffffffffULL;
+
+#define bench(display, f)						\
+	do {								\
+		unsigned long long result;				\
+		avg = rejected = 0;					\
+		for (i = 0; i < 10000; i++) {				\
+		  	ticks_t start, end;				\
+			unsigned long delta;				\
+									\
+			start = clockobj_get_tsc();			\
+			result = (f);					\
+			end = clockobj_get_tsc();			\
+			delta = end - start;				\
+									\
+			if (i == 0 || delta < (avg / i) * 4) {		\
+				avg += delta;				\
+			} else						\
+				++rejected;				\
+		}							\
+		if (rejected < 10000) {					\
+			avg = xnarch_llimd(avg, 10000, 10000 - rejected); \
+			avg = clockobj_tsc_to_ns(avg) - calib;		\
+			smokey_trace("%s: 0x%016llx: %lld.%03llu ns,"	\
+				    " rejected %d/10000",		\
+				    display, result, avg / 10000,	\
+				    ((avg >= 0 ? avg : -avg) % 10000) / 10, \
+				    rejected);				\
+		} else							\
+			smokey_warning("%s: rejected 10000/10000", display); \
+	} while (0)
+
+static int run_arith(struct smokey_test *t, int argc, char *const argv[])
+{
+	unsigned int mul, shft, rejected;
+	long long avg, calib = 0;
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	struct xnarch_u32frac frac;
+#endif
+	int i;
+
+	/* Prepare. */
+	xnarch_init_llmulshft(nsec_per_sec, sample_freq, &mul, &shft);
+	smokey_trace("mul: 0x%08x, shft: %d", mul, shft);
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	xnarch_init_u32frac(&frac, nsec_per_sec, sample_freq);
+	smokey_trace("integ: %d, frac: 0x%08llx", frac.integ, frac.frac);
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+
+	smokey_trace("\nsigned positive operation: 0x%016llx * %u / %d",
+		arg, nsec_per_sec, sample_freq);
+	bench("inline calibration", 0);
+	calib = avg;
+	bench("inlined llimd", xnarch_llimd(arg, nsec_per_sec, sample_freq));
+	bench("inlined llmulshft", xnarch_llmulshft(arg, mul, shft));
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	bench("inlined nodiv_llimd",
+	      xnarch_nodiv_llimd(arg, frac.frac, frac.integ));
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+
+	calib = 0;
+	bench("out of line calibration", dummy());
+	calib = avg;
+	bench("out of line llimd",
+	      do_llimd(arg, nsec_per_sec, sample_freq));
+	bench("out of line llmulshft", do_llmulshft(arg, mul, shft));
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	bench("out of line nodiv_llimd",
+	      do_nodiv_llimd(arg, frac.frac, frac.integ));
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+
+
+	smokey_trace("\nsigned negative operation: 0x%016llx * %u / %d",
+		     -arg, nsec_per_sec, sample_freq);
+	calib = 0;
+	bench("inline calibration", 0);
+	calib = avg;
+	bench("inlined llimd", xnarch_llimd(-arg, nsec_per_sec, sample_freq));
+	bench("inlined llmulshft", xnarch_llmulshft(-arg, mul, shft));
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	bench("inlined nodiv_llimd",
+	      xnarch_nodiv_llimd(-arg, frac.frac, frac.integ));
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+
+	calib = 0;
+	bench("out of line calibration", dummy());
+	calib = avg;
+	bench("out of line llimd",
+	      do_llimd(-arg, nsec_per_sec, sample_freq));
+	bench("out of line llmulshft", do_llmulshft(-arg, mul, shft));
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	bench("out of line nodiv_llimd",
+	      do_nodiv_llimd(-arg, frac.frac, frac.integ));
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+
+#ifdef XNARCH_HAVE_NODIV_LLIMD
+	smokey_trace("\nunsigned operation: 0x%016llx * %u / %d",
+		     arg, nsec_per_sec, sample_freq);
+	calib = 0;
+	bench("inline calibration", 0);
+	calib = avg;
+	bench("inlined nodiv_ullimd",
+	      xnarch_nodiv_ullimd(arg, frac.frac, frac.integ));
+
+	calib = 0;
+	bench("out of line calibration", dummy());
+	calib = avg;
+	bench("out of line nodiv_ullimd",
+	      do_nodiv_ullimd(arg, frac.frac, frac.integ));
+#endif /* XNARCH_HAVE_NODIV_LLIMD */
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/bufp/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/bufp/Makefile.am
new file mode 100644
index 0000000..c80a00f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/bufp/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libbufp.a
+
+libbufp_a_SOURCES = bufp.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libbufp_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/bufp/bufp.c b/kernel/xenomai-v3.2.4/testsuite/smokey/bufp/bufp.c
new file mode 100644
index 0000000..4705ee1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/bufp/bufp.c
@@ -0,0 +1,174 @@
+/*
+ * RTIPC/BUFP test.
+ *
+ * Copyright (C) Philippe Gerum <rpm@xenomai.org>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <smokey/smokey.h>
+#include <rtdm/ipc.h>
+
+smokey_test_plugin(bufp,
+		   SMOKEY_NOARGS,
+		   "Check RTIPC/BUFP protocol."
+);
+
+#define BUFP_SVPORT 12
+
+static pthread_t svtid, cltid;
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *server(void *arg)
+{
+	struct sockaddr_ipc saddr, claddr;
+	long data, control = 0;
+	socklen_t addrlen;
+	size_t bufsz;
+	fd_set set;
+	int ret, s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP);
+	if (s < 0)
+		fail("socket");
+
+	bufsz = 32768; /* bytes */
+	ret = setsockopt(s, SOL_BUFP, BUFP_BUFSZ,
+			 &bufsz, sizeof(bufsz));
+	if (ret)
+		fail("setsockopt");
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = BUFP_SVPORT;
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	FD_ZERO(&set);
+	FD_SET(s, &set);
+
+	for (;;) {
+		control++;
+		ret = select(s + 1, &set, NULL, NULL, NULL);
+		if (ret != 1 || !FD_ISSET(s, &set))
+			fail("select");
+
+		/*
+		 * We can't race with any other reader in this setup,
+		 * so recvfrom() shall confirm the select() result.
+		 */
+		addrlen = sizeof(saddr);
+		ret = recvfrom(s, &data, sizeof(data), MSG_DONTWAIT,
+			       (struct sockaddr *)&claddr, &addrlen);
+		if (ret != sizeof(data)) {
+			close(s);
+			fail("recvfrom");
+		}
+		if (data != control) {
+			close(s);
+			smokey_note("data does not match control value");
+			errno = -EINVAL;
+			fail("recvfrom");
+		}
+		smokey_trace("%s: received %d bytes, %ld from port %d",
+			     __func__, ret, data, claddr.sipc_port);
+	}
+
+	return NULL;
+}
+
+static void *client(void *arg)
+{
+	struct sockaddr_ipc svsaddr;
+	int ret, s, loops = 30;
+	struct timespec ts;
+	long data = 0;
+	fd_set set;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP);
+	if (s < 0)
+		fail("socket");
+
+	memset(&svsaddr, 0, sizeof(svsaddr));
+	svsaddr.sipc_family = AF_RTIPC;
+	svsaddr.sipc_port = BUFP_SVPORT;
+	ret = connect(s, (struct sockaddr *)&svsaddr, sizeof(svsaddr));
+	if (ret)
+		fail("connect");
+
+	FD_ZERO(&set);
+	FD_SET(s, &set);
+
+	while (--loops) {
+		ret = select(s + 1, NULL, &set, NULL, NULL);
+		if (ret != 1 || !FD_ISSET(s, &set))
+			fail("select");
+		data++;
+		ret = sendto(s, &data, sizeof(data), MSG_DONTWAIT,
+			     (struct sockaddr *)&svsaddr, sizeof(svsaddr));
+		if (ret != sizeof(data)) {
+			close(s);
+			fail("sendto");
+		}
+		smokey_trace("%s: sent %d bytes, %ld", __func__, ret, data);
+		ts.tv_sec = 0;
+		ts.tv_nsec = 100000000; /* 100 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+static int run_bufp(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct sched_param svparam = {.sched_priority = 71 };
+	struct sched_param clparam = {.sched_priority = 70 };
+	pthread_attr_t svattr, clattr;
+	int s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_BUFP);
+	if (s < 0) {
+		if (errno == EAFNOSUPPORT)
+			return -ENOSYS;
+	} else
+		close(s);
+
+	pthread_attr_init(&svattr);
+	pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&svattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&svattr, &svparam);
+
+	errno = pthread_create(&svtid, &svattr, &server, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&clattr);
+	pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&clattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&clattr, &clparam);
+
+	errno = pthread_create(&cltid, &clattr, &client, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_join(cltid, NULL);
+	pthread_cancel(svtid);
+	pthread_join(svtid, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/Makefile.am
new file mode 100644
index 0000000..0d2e4e6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libcpu-affinity.a
+
+libcpu_affinity_a_SOURCES = cpu-affinity.c
+
+libcpu_affinity_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)		\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/cpu-affinity.c b/kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/cpu-affinity.c
new file mode 100644
index 0000000..cbc8771
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/cpu-affinity/cpu-affinity.c
@@ -0,0 +1,252 @@
+/*
+ * Test CPU affinity control mechanisms.
+ *
+ * Copyright (C) Philippe Gerum <rpm@xenomai.org>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <pthread.h>
+#include <sched.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <semaphore.h>
+#include <rtdm/testing.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(cpu_affinity,
+		   SMOKEY_NOARGS,
+		   "Check CPU affinity control."
+);
+
+static cpu_set_t cpu_realtime_set, cpu_online_set;
+
+struct test_context {
+	sem_t done;
+	int status;
+	int kfd;
+	int nrt_cpu;
+};
+
+static void *test_thread(void *arg)
+{
+	int cpu, cpu_in_rt_set, status = 0, ncpu, ret;
+	struct test_context *p = arg;
+	cpu_set_t set;
+
+	cpu = get_current_cpu();
+	if (!__Fassert(cpu < 0)) {
+		status = cpu;
+		goto out;
+	}
+
+	/*
+	 * When emerging, we should be running on a member of the
+	 * real-time CPU set.
+	 */
+	cpu_in_rt_set = CPU_ISSET(cpu, &cpu_realtime_set);
+	if (!__Tassert(cpu_in_rt_set)) {
+		status = -EINVAL;
+		goto out;
+	}
+
+	smokey_trace(".... user thread starts on CPU%d, ok", cpu);
+
+	for (ncpu = 0; ncpu < CPU_SETSIZE; ncpu++) {
+		if (ncpu == cpu || !CPU_ISSET(ncpu, &cpu_realtime_set))
+			continue;
+		CPU_ZERO(&set);
+		CPU_SET(ncpu, &set);
+		if (!__Terrno(ret, sched_setaffinity(0, sizeof(set), &set))) {
+			status = ret;
+			goto out;
+		}
+		smokey_trace(".... user thread moved to CPU%d, good", ncpu);
+	}
+out:
+	p->status = status;
+	__STD(sem_post(&p->done));
+	
+	return NULL;
+}
+
+static int load_test_module(void)
+{
+	int fd, status;
+
+	status = smokey_modprobe("xeno_rtdmtest", true);
+	if (status)
+		return status;
+
+	/* Open the RTDM actor device. */
+	fd = open("/dev/rtdm/rtdmx", O_RDWR);
+	if (fd < 0)
+		return -errno;
+
+	return fd;
+}
+
+static void unload_test_module(int fd)
+{
+	close(fd);
+	smokey_rmmod("xeno_rtdmtest");
+}
+
+static void *__run_cpu_affinity(void *arg)
+{
+	struct test_context *context = arg;
+	struct sched_param param;
+	struct timespec ts, now;
+	pthread_attr_t thattr;
+	cpu_set_t set;
+	pthread_t tid;
+	int ret;
+
+	smokey_trace(".. control thread binding to non-RT CPU%d",
+		     context->nrt_cpu);
+
+	__STD(sem_init(&context->done, 0, 0));
+
+	/*
+	 * Make the child thread inherit a CPU affinity outside of the
+	 * valid RT set from us. Cobalt should migrate the spawned
+	 * threads (kernel and user) to a CPU from the RT set
+	 * automatically.
+	 */
+	CPU_ZERO(&set);
+	CPU_SET(context->nrt_cpu, &set);
+	if (!__Terrno(ret, sched_setaffinity(0, sizeof(set), &set))) {
+		context->status = ret;
+		goto out;
+	}
+
+	/* Check CPU affinity handling for user-space threads. */
+
+	smokey_trace(".. starting user thread");
+
+	pthread_attr_init(&thattr);
+	param.sched_priority = 1;
+	pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_DETACHED);
+	pthread_attr_setschedpolicy(&thattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&thattr, &param);
+	pthread_attr_setinheritsched(&thattr, PTHREAD_EXPLICIT_SCHED);
+
+	if (!__T(ret, pthread_create(&tid, &thattr, test_thread, context))) {
+		context->status = ret;
+		goto out;
+	}
+
+	__STD(clock_gettime(CLOCK_REALTIME, &now));
+	timespec_adds(&ts, &now, 100000000); /* 100ms from now */
+	
+	if (!__Terrno(ret, __STD(sem_timedwait(&context->done, &ts)))) {
+		context->status = ret;
+		goto out;
+	}
+
+	/*
+	 *  Prepare for testing CPU affinity handling for RTDM driver
+	 *  tasks. We don't actually run the test just yet, since we
+	 *  have no real-time context and the RTDM actor wants one,
+	 *  but we still load the module, creating the actor task over
+	 *  a non-RT CPU, which is the premise of our kernel-based
+	 *  test.
+	 */
+
+	context->kfd = load_test_module();
+out:
+	__STD(sem_destroy(&context->done));
+	
+	return NULL;
+}
+
+static int run_cpu_affinity(struct smokey_test *t,
+			    int argc, char *const argv[])
+{
+	struct test_context context;
+	int cpu, ret, cpu_in_rt_set;
+	struct sched_param param;
+	pthread_attr_t thattr;
+	pthread_t tid;
+	__u32 kcpu;
+
+	if (sysconf(_SC_NPROCESSORS_CONF) == 1) {
+		smokey_trace("uniprocessor system, skipped");
+		return 0;
+	}
+
+	ret = get_realtime_cpu_set(&cpu_realtime_set);
+	if (ret)
+		return -ENOSYS;
+
+	ret = get_online_cpu_set(&cpu_online_set);
+	if (ret)
+		return -ENOSYS;
+
+	for (cpu = 0; cpu < CPU_SETSIZE; cpu++) {
+		if (CPU_ISSET(cpu, &cpu_online_set))
+			smokey_trace(".. CPU%d is %s", cpu,
+			     CPU_ISSET(cpu, &cpu_realtime_set) ?
+			     "available" : "online, non-RT");
+	}
+			
+	/* Find a non-RT CPU in the online set. */
+	for (cpu = CPU_SETSIZE - 1; cpu >= 0; cpu--) {
+		if (CPU_ISSET(cpu, &cpu_online_set) &&
+		    !CPU_ISSET(cpu, &cpu_realtime_set))
+			break;
+	}
+
+	/*
+	 * If there is no CPU restriction on the bootargs
+	 * (i.e. xenomai.supported_cpus is absent or does not exclude
+	 * any online CPU), pretend that we have no kernel support for
+	 * running this test.
+	 */
+	if (cpu < 0) {
+		smokey_trace("no CPU restriction with xenomai.supported_cpus");
+		return -ENOSYS;
+	}
+
+	pthread_attr_init(&thattr);
+	param.sched_priority = 0;
+	pthread_attr_setdetachstate(&thattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setschedpolicy(&thattr, SCHED_OTHER);
+	pthread_attr_setschedparam(&thattr, &param);
+	pthread_attr_setinheritsched(&thattr, PTHREAD_EXPLICIT_SCHED);
+
+	context.kfd = -1;
+	context.status = 0;
+	context.nrt_cpu = cpu;
+	/*
+	 * Start a regular pthread for running the tests, to bypass
+	 * sanity checks Cobalt does on CPU affinity. We actually want
+	 * to start testing from a non-RT CPU.
+	 */
+	if (!__T(ret, __STD(pthread_create(&tid, &thattr,
+					   __run_cpu_affinity, &context))))
+		return ret;
+
+	if (!__T(ret, pthread_join(tid, NULL)))
+		return ret;
+
+	if (context.kfd < 0)
+		smokey_trace(".. RTDM test module not available, skipping");
+	else {
+		smokey_trace(".. testing kthread affinity handling");
+		if (!__Terrno(ret, ioctl(context.kfd,
+				 RTTST_RTIOC_RTDM_ACTOR_GET_CPU, &kcpu)))
+			context.status = ret;
+		else {
+			cpu_in_rt_set = CPU_ISSET(kcpu, &cpu_realtime_set);
+			if (!__Tassert(cpu_in_rt_set))
+				context.status = -EINVAL;
+			else
+				smokey_trace(".... kernel thread pinned to CPU%d, fine",
+					     kcpu);
+		}
+		unload_test_module(context.kfd);
+	}
+
+	return context.status;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/Makefile.am
new file mode 100644
index 0000000..4f79ffb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/Makefile.am
@@ -0,0 +1,58 @@
+testdir = @XENO_TEST_DIR@
+
+test_LTLIBRARIES = libalchemy-test.la libposix-test.la
+
+libalchemy_test_la_LDFLAGS =	\
+	$(XENO_POSIX_WRAPPERS)	\
+	@XENO_LIB_LDFLAGS@	\
+	-version-info 0:0:0
+
+libalchemy_test_la_LIBADD =					\
+	@XENO_CORE_LDADD@					\
+	@XENO_USER_LDADD@					\
+	../../../lib/boilerplate/init/libbootstrap-pic.la	\
+	../../../lib/alchemy/libalchemy@CORE@.la			\
+	../../../lib/copperplate/libcopperplate@CORE@.la
+
+libalchemy_test_la_SOURCES = libalchemy-test.c
+
+libalchemy_test_la_CPPFLAGS =	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
+
+libposix_test_la_LDFLAGS =	\
+	$(XENO_POSIX_WRAPPERS)	\
+	@XENO_LIB_LDFLAGS@	\
+	-version-info 0:0:0
+
+libposix_test_la_LIBADD =					\
+	@XENO_CORE_LDADD@					\
+	@XENO_USER_LDADD@					\
+	../../../lib/boilerplate/init/libbootstrap-pic.la	\
+	../../../lib/copperplate/libcopperplate@CORE@.la
+
+libposix_test_la_SOURCES = libposix-test.c
+
+libposix_test_la_CPPFLAGS =	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
+
+test_PROGRAMS = dlopentest
+
+dlopentest_SOURCES = dlopentest.c
+
+dlopentest_CPPFLAGS =				\
+	-Wno-format-security			\
+	-DXENO_TEST_DIR='"$(XENO_TEST_DIR)"'
+
+dlopentest_LDADD =		\
+	-ldl
+
+noinst_LIBRARIES = libdlopen.a
+
+libdlopen_a_SOURCES = dlopen.c
+
+libdlopen_a_CPPFLAGS =				\
+	@XENO_USER_CFLAGS@			\
+	-DXENO_TEST_DIR='"$(XENO_TEST_DIR)"'	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopen.c b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopen.c
new file mode 100644
index 0000000..282dbbe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopen.c
@@ -0,0 +1,44 @@
+/*
+ * Copyright (C) Siemens AG, 2018
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <error.h>
+#include <stdio.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(dlopen,
+		   SMOKEY_NOARGS,
+		   "Check dlopen()/dlclose() functionality."
+);
+
+static int run_dlopen(struct smokey_test *t,
+			  int argc, char *const argv[])
+{
+	/*
+	 * exec the actual program, which should be a xenomai-free
+	 * application that gains xenomai functionality with dlopen()
+	 */
+	return smokey_fork_exec(XENO_TEST_DIR "/dlopentest", "");
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopentest.c b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopentest.c
new file mode 100644
index 0000000..cbf9b74
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/dlopentest.c
@@ -0,0 +1,79 @@
+/*
+ * Copyright (C) Siemens AG, 2018
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <assert.h>
+#include <errno.h>
+#include <dlfcn.h>
+#include <error.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <limits.h>
+
+static void *my_dlopen(char *name)
+{
+	void *handle;
+	char buf[PATH_MAX];
+	int flags = RTLD_LAZY|RTLD_GLOBAL;
+
+	handle = dlopen(name, flags);
+	if (!handle) {
+		// no matter what the error was (errno not set) try it again
+		// with absolute path, the first one is there to enable
+		// LD_LIBRARY_PATH
+		snprintf(buf, PATH_MAX, "%s/%s", XENO_TEST_DIR, name);
+		handle = dlopen(buf, flags);
+		if (!handle)
+			error(1, errno, dlerror());
+	}
+	return handle;
+}
+
+static int my_dlcall(char *lname, char *fname, void **handle)
+{
+	int (*func)(void);
+	char *errstr;
+
+	*handle = my_dlopen(lname);
+	func = dlsym(*handle, fname);
+	errstr = dlerror();
+	if (errstr)
+		error(1, errno, "%s", errstr);
+
+	return func();
+}
+
+int main(int argc, char *const argv[])
+{
+	void *handlea, *handlep;
+	int ret;
+
+	ret = my_dlcall("libalchemy-test.so", "libalchemy_func", &handlea);
+	if (ret)
+		error(1, errno, "libalchemy_func: %s", strerror(-ret));
+
+	ret = my_dlcall("libposix-test.so", "libposix_func", &handlep);
+	if (ret)
+		error(1, errno, "libposix_func: %s", strerror(-ret));
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libalchemy-test.c b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libalchemy-test.c
new file mode 100644
index 0000000..3e1eb23
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libalchemy-test.c
@@ -0,0 +1,65 @@
+/*
+ * Copyright (C) Siemens AG, 2018
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <alchemy/queue.h>
+#include <string.h>
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <xenomai/init.h>
+#include <xenomai/tunables.h>
+
+static int ran_init = 0;
+static size_t def_mem_pool_size = SIZE_MAX;
+
+static int alchemy_tune(void)
+{
+	if (ran_init)
+		return 0;
+	def_mem_pool_size = get_config_tunable(mem_pool_size);
+	set_config_tunable(mem_pool_size, 2*def_mem_pool_size);
+	ran_init = 1;
+	return 0;
+}
+
+static struct setup_descriptor alchemy_setup = {
+	.name = "setup-name",
+	.tune = alchemy_tune,
+};
+
+user_setup_call(alchemy_setup);
+
+int libalchemy_func(void);
+
+int libalchemy_func(void)
+{
+	RT_QUEUE queue;
+	int ret;
+
+	ret = rt_queue_create(&queue, "q0", def_mem_pool_size,
+			      Q_UNLIMITED, Q_FIFO);
+	if (ret)
+		return ret;
+	ret = rt_queue_delete(&queue);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libposix-test.c b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libposix-test.c
new file mode 100644
index 0000000..fda01a5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/dlopen/libposix-test.c
@@ -0,0 +1,32 @@
+/*
+ * Copyright (C) Siemens AG, 2018
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <time.h>
+#include <stdio.h>
+
+int libposix_func(void);
+
+int libposix_func(void)
+{
+	struct timespec now;
+	return clock_gettime(CLOCK_REALTIME, &now);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/Makefile.am
new file mode 100644
index 0000000..c90d0dd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/Makefile.am
@@ -0,0 +1,8 @@
+
+noinst_LIBRARIES = libfpu-stress.a
+
+libfpu_stress_a_SOURCES = fpu-stress.c
+
+libfpu_stress_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/fpu-stress.c b/kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/fpu-stress.c
new file mode 100644
index 0000000..59383c9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/fpu-stress/fpu-stress.c
@@ -0,0 +1,95 @@
+#include <stdio.h>
+#include <pthread.h>
+#include <smokey/smokey.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/uapi/fptest.h>
+
+smokey_test_plugin(fpu_stress,
+		   SMOKEY_ARGLIST(
+			   SMOKEY_INT(duration),
+		   ),
+		   "Check FPU context sanity during real-time stress\n"
+		   "\tduration=<seconds>\thow long to run the stress loop (0=indefinitely)"
+);
+
+static int fp_features;
+
+static void *stress_loop(void *arg)
+{
+	struct timespec rqt = {
+		.tv_sec = 0,
+		.tv_nsec = CONFIG_XENO_DEFAULT_PERIOD
+	};
+	
+	for (;;) {
+		fp_regs_set(fp_features, 0xf1f5f1f5);
+		clock_nanosleep(CLOCK_MONOTONIC, 0, &rqt, NULL);
+	}
+
+	return NULL;
+}
+
+static int report_error(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	smokey_vatrace(fmt, ap);
+	va_end(ap);
+
+	return 0;
+}
+
+static int run_fpu_stress(struct smokey_test *t,
+			  int argc, char *const argv[])
+{
+	unsigned sleep_ms, n, rounds, duration = 3;
+	struct sched_param param;
+	pthread_attr_t attr;
+	struct timespec rqt;
+	pthread_t tid;
+	int ret;
+
+	fp_features = cobalt_fp_detect();
+	if (fp_features == 0)
+		return -ENOSYS;
+
+	smokey_parse_args(t, argc, argv);
+	
+	if (SMOKEY_ARG_ISSET(fpu_stress, duration))
+		duration = SMOKEY_ARG_INT(fpu_stress, duration);
+	
+	rqt.tv_sec = 0;
+	rqt.tv_nsec = CONFIG_XENO_DEFAULT_PERIOD;
+	sleep_ms = 1000000UL / rqt.tv_nsec; /* wake up each ms */
+	rounds = duration * 1000UL / sleep_ms;
+
+	pthread_attr_init(&attr);
+	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
+	param.sched_priority = 10;
+	pthread_attr_setschedparam(&attr, &param);
+	ret = pthread_create(&tid, &attr, stress_loop, NULL);
+	if (ret)
+		return -ret;
+
+	if (rounds)
+		smokey_trace("running for %d seconds", duration);
+	else
+		smokey_trace("running indefinitely...");
+
+	for (n = 0; rounds == 0 || n < rounds; n++) {
+		fp_regs_set(fp_features, n);
+		__STD(clock_nanosleep(CLOCK_MONOTONIC, 0, &rqt, NULL));
+		if (fp_regs_check(fp_features, n, report_error) != n) {
+			ret = -EINVAL;
+			break;
+		}
+	}
+
+	pthread_cancel(tid);
+	pthread_join(tid, NULL);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/gdb/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/gdb/Makefile.am
new file mode 100644
index 0000000..9c0843d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/gdb/Makefile.am
@@ -0,0 +1,12 @@
+
+noinst_LIBRARIES = libgdb.a
+
+libgdb_a_SOURCES = gdb.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libgdb_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)		\
+	-I$(top_srcdir)/include	\
+	-g -O0
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/gdb/gdb.c b/kernel/xenomai-v3.2.4/testsuite/smokey/gdb/gdb.c
new file mode 100644
index 0000000..b09b78b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/gdb/gdb.c
@@ -0,0 +1,317 @@
+/*
+ * gdb test.
+ *
+ * Copyright (C) Siemens AG, 2015-2019
+ *
+ * Authors.
+ *  Jan Kiszka <jan.kiszka@siemens.com>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <error.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <sys/wait.h>
+#include <xeno_config.h>
+#include <boilerplate/libc.h>
+#include <smokey/smokey.h>
+#include "lib/cobalt/current.h"
+
+static void send_command(const char *string, int wait_for_prompt);
+
+static void check_inner(const char *fn, int line, const char *msg,
+			int status, int expected)
+{
+	if (status == expected)
+		return;
+
+	rt_print_flush_buffers();
+	fprintf(stderr, "FAILURE %s:%d: %s returned %d instead of %d - %s\n",
+		fn, line, msg, status, expected, strerror(status));
+	send_command("q\n", 0);
+	exit(EXIT_FAILURE);
+}
+
+#define check(msg, status, expected) ({					\
+	int __status = status;						\
+	check_inner(__FUNCTION__, __LINE__, msg, __status, expected);	\
+	__status;							\
+})
+
+#define check_no_error(msg, status) ({					\
+	int __status = status;						\
+	check_inner(__func__, __LINE__, msg,				\
+		    __status < 0 ? errno : __status, 0);		\
+	__status;							\
+})
+
+smokey_test_plugin(gdb,
+		   SMOKEY_ARGLIST(
+			   SMOKEY_BOOL(run_target),
+		   ),
+		   "Check gdb-based debugging of application."
+);
+
+#ifdef HAVE_FORK
+#define do_fork fork
+#else
+#define do_fork vfork
+#endif
+
+static int pipe_in[2], pipe_out[2];
+static volatile unsigned long long thread_loops, expected_loops;
+static volatile int primary_mode;
+static volatile int terminate;
+static int child_terminated;
+static sem_t kickoff_lo;
+
+static void handle_sigchld(int signum)
+{
+	int status;
+
+	wait(&status);
+	if (WEXITSTATUS(status) == ESRCH)
+		smokey_note("gdb: skipped (gdb not available)");
+	else
+		check("gdb execution", WEXITSTATUS(status), 0);
+	child_terminated = 1;
+	close(pipe_out[0]);
+}
+
+static void wait_for_pattern(const char *string)
+{
+	const char *match = string;
+	char c;
+
+	while (*match != 0 && read(pipe_out[0], &c, 1) == 1) {
+		if (*match == c)
+			match++;
+		else
+			match = string;
+		if (smokey_verbose_mode > 2)
+			putc(c, stdout);
+	}
+}
+
+static void send_command(const char *string, int wait_for_prompt)
+{
+	int ret;
+
+	ret = write(pipe_in[1], string, strlen(string));
+	check("write(pipe_in)", ret, strlen(string));
+	if (wait_for_prompt)
+		wait_for_pattern("(gdb) ");
+}
+
+static void check_output(const char *fn, int line, const char *expression,
+			 const char *pattern)
+{
+	char *read_string = malloc(strlen(pattern) + 1);
+	const char *match = pattern;
+	int pos = 0;
+
+	while (*match != 0 && read(pipe_out[0], &read_string[pos], 1) == 1) {
+		if (*match != read_string[pos]) {
+			rt_print_flush_buffers();
+			read_string[pos + 1] = 0;
+			fprintf(stderr, "FAILURE %s:%d: checking expression "
+				"\"%s\", expected \"%s\", found \"%s\"\n",
+				fn, line, expression, pattern, read_string);
+			send_command("q\n", 0);
+			exit(EXIT_FAILURE);
+		}
+		if (smokey_verbose_mode > 2)
+			putc(read_string[pos], stdout);
+		match++;
+		pos++;
+	}
+	free(read_string);
+}
+
+#define eval_expression(expression, expected_value)			\
+	eval_expression_inner(__FUNCTION__, __LINE__, expression,	\
+			      expected_value)
+
+static void eval_expression_inner(const char *fn, int line,
+				  const char *expression,
+				  const char *expected_value)
+{
+	int ret;
+
+	ret = write(pipe_in[1], "p ", 2);
+	check("write(pipe_in)", ret, 2);
+	ret = write(pipe_in[1], expression, strlen(expression));
+	check("write(pipe_in)", ret, strlen(expression));
+	send_command("\n", 0);
+
+	check_output(fn, line, expression, "$");
+	wait_for_pattern(" = ");
+	check_output(fn, line, expression, expected_value);
+	check_output(fn, line, expression, "\n(gdb) ");
+}
+
+static void __attribute__((noinline)) spin_lo_prio(void)
+{
+	int err;
+
+	err = sem_wait(&kickoff_lo);
+	check_no_error("sem_wait", err);
+
+	while (!terminate)
+		thread_loops++;
+}
+
+static void __attribute__((noinline)) breakpoint_target(void)
+{
+	asm volatile("" ::: "memory");
+}
+
+static void *thread_hi_func(void *arg)
+{
+	struct timespec ts = {0, 1000000};
+	int err;
+
+	pthread_setname_np(pthread_self(), "hi-thread");
+
+	err = sem_post(&kickoff_lo);
+	check_no_error("sem_post", err);
+
+	nanosleep(&ts, NULL);
+
+	/* 1st breakpoint: synchronous stop */
+	expected_loops = thread_loops;
+	breakpoint_target();
+
+	/* 2nd bp: synchronous continue */
+	expected_loops = thread_loops;
+	breakpoint_target();
+	terminate = 1;
+
+	return NULL;
+}
+
+static int run_gdb(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct sched_param params;
+	pthread_t thread_hi;
+	pthread_attr_t attr;
+	char run_param[32];
+	cpu_set_t cpu_set;
+	int err;
+
+	smokey_parse_args(t, argc, argv);
+
+	if (SMOKEY_ARG_ISSET(gdb, run_target) &&
+            SMOKEY_ARG_BOOL(gdb, run_target)) {
+		/* we are the to-be-debugged target */
+
+		CPU_ZERO(&cpu_set);
+		CPU_SET(0, &cpu_set);
+		err = pthread_setaffinity_np(pthread_self(), sizeof(cpu_set),
+					     &cpu_set);
+		check_no_error("pthread_setaffinity_np", err);
+		params.sched_priority = 1;
+		err = pthread_setschedparam(pthread_self(), SCHED_FIFO,
+					    &params);
+		check_no_error("pthread_setschedparam", err);
+
+		check("is primary", cobalt_get_current_mode() & XNRELAX, 0);
+		breakpoint_target();
+		primary_mode = !(cobalt_get_current_mode() & XNRELAX);
+		breakpoint_target();
+
+		err = sem_init(&kickoff_lo, 0, 0);
+		check_no_error("sem_init", err);
+
+		err = pthread_attr_init(&attr);
+		check_no_error("pthread_attr_init", err);
+		err = pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+		check_no_error("pthread_attr_setinheritsched", err);
+		err = pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
+		check_no_error("pthread_attr_setschedpolicy", err);
+		err = pthread_attr_setaffinity_np(&attr, sizeof(cpu_set), &cpu_set);
+		check_no_error("pthread_attr_setaffinity_np", err);
+
+		params.sched_priority = 2;
+		err = pthread_attr_setschedparam(&attr, &params);
+		check_no_error("pthread_attr_setschedparam", err);
+		err = pthread_create(&thread_hi, &attr, thread_hi_func, NULL);
+		check_no_error("pthread_create", err);
+
+		spin_lo_prio();
+
+		pthread_join(thread_hi, NULL);
+	} else {
+		/* we are the gdb controller */
+
+		signal(SIGCHLD, handle_sigchld);
+
+		err = pipe(pipe_in);
+		check_no_error("pipe_in", err);
+
+		err = pipe(pipe_out);
+		check_no_error("pipe_out", err);
+
+		switch (do_fork()) {
+		case -1:
+			error(1, errno, "fork/vfork");
+
+		case 0:
+			/* redirect input */
+			close(0);
+			err = dup(pipe_in[0]);
+			check_no_error("dup(pipe_in)", err < 0 ? err : 0);
+
+			/* redirect output and stderr */
+			close(1);
+			err = dup(pipe_out[1]);
+			check_no_error("dup(pipe_out)", err < 0 ? err : 0);
+			close(2);
+			err = dup(1);
+			check_no_error("dup(1)", err < 0 ? err : 0);
+
+			snprintf(run_param, sizeof(run_param), "--run=%d",
+				 t->__reserved.id);
+			execlp("gdb", "gdb", "--nx", "--args",
+			      argv[0], run_param, "run_target", (char *)NULL);
+			_exit(ESRCH);
+
+		default:
+			wait_for_pattern("(gdb) ");
+			if (child_terminated)
+				break;
+			send_command("b breakpoint_target\n", 1);
+			send_command("r\n", 1);
+
+			smokey_trace("resume in primary");
+			send_command("c\n", 1);
+			eval_expression("primary_mode", "1");
+			send_command("c\n", 1);
+
+			smokey_trace("synchronous stop");
+			eval_expression("thread_loops==expected_loops", "1");
+			send_command("c\n", 1);
+
+			smokey_trace("synchronous continue");
+			eval_expression("thread_loops==expected_loops", "1");
+
+			send_command("q\n", 0);
+			pause();
+
+			if (!child_terminated) {
+				fprintf(stderr,
+					"FAILURE: gdb still running?!\n");
+				exit(1);
+			}
+		}
+
+		signal(SIGCHLD, SIG_DFL);
+	}
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/iddp/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/iddp/Makefile.am
new file mode 100644
index 0000000..4dc43c0
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/iddp/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libiddp.a
+
+libiddp_a_SOURCES = iddp.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libiddp_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/iddp/iddp.c b/kernel/xenomai-v3.2.4/testsuite/smokey/iddp/iddp.c
new file mode 100644
index 0000000..2117e31
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/iddp/iddp.c
@@ -0,0 +1,178 @@
+/*
+ * RTIPC/IDDP test.
+ *
+ * Copyright (C) Philippe Gerum <rpm@xenomai.org>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <smokey/smokey.h>
+#include <rtdm/ipc.h>
+
+smokey_test_plugin(iddp,
+		   SMOKEY_NOARGS,
+		   "Check RTIPC/IDDP protocol."
+);
+
+#define IDDP_SVPORT 12
+#define IDDP_CLPORT 13
+
+static pthread_t svtid, cltid;
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *server(void *arg)
+{
+	struct sockaddr_ipc saddr, claddr;
+	long data, control = 0;
+	socklen_t addrlen;
+	size_t poolsz;
+	fd_set set;
+	int ret, s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP);
+	if (s < 0)
+		fail("socket");
+
+	poolsz = 32768; /* bytes */
+	ret = setsockopt(s, SOL_IDDP, IDDP_POOLSZ,
+			 &poolsz, sizeof(poolsz));
+	if (ret)
+		fail("setsockopt");
+
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = IDDP_SVPORT;
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	FD_ZERO(&set);
+	FD_SET(s, &set);
+
+	for (;;) {
+		control++;
+		ret = select(s + 1, &set, NULL, NULL, NULL);
+		if (ret != 1 || !FD_ISSET(s, &set))
+			fail("select");
+
+		/*
+		 * We can't race with any other reader in this setup,
+		 * so recvfrom() shall confirm the select() result.
+		 */
+		addrlen = sizeof(saddr);
+		ret = recvfrom(s, &data, sizeof(data), MSG_DONTWAIT,
+			       (struct sockaddr *)&claddr, &addrlen);
+		if (ret != sizeof(data)) {
+			close(s);
+			fail("recvfrom");
+		}
+		if (data != control) {
+			close(s);
+			smokey_note("data does not match control value");
+			errno = -EINVAL;
+			fail("recvfrom");
+		}
+		smokey_trace("%s: received %d bytes, %ld from port %d",
+			     __func__, ret, data, claddr.sipc_port);
+	}
+
+	return NULL;
+}
+
+static void *client(void *arg)
+{
+	struct sockaddr_ipc svsaddr, clsaddr;
+	int ret, s, loops = 30;
+	struct timespec ts;
+	long data = 0;
+	fd_set set;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP);
+	if (s < 0)
+		fail("socket");
+
+	clsaddr.sipc_family = AF_RTIPC;
+	clsaddr.sipc_port = IDDP_CLPORT;
+	ret = bind(s, (struct sockaddr *)&clsaddr, sizeof(clsaddr));
+	if (ret)
+		fail("bind");
+
+	svsaddr.sipc_family = AF_RTIPC;
+	svsaddr.sipc_port = IDDP_SVPORT;
+
+	FD_ZERO(&set);
+	FD_SET(s, &set);
+
+	while (--loops) {
+		ret = select(s + 1, NULL, &set, NULL, NULL);
+		/* Should always be immediately writable. */
+		if (ret != 1 || !FD_ISSET(s, &set))
+			fail("select");
+
+		data++;
+		ret = sendto(s, &data, sizeof(data), MSG_DONTWAIT,
+			     (struct sockaddr *)&svsaddr, sizeof(svsaddr));
+		if (ret != sizeof(data)) {
+			close(s);
+			fail("sendto");
+		}
+		smokey_trace("%s: sent %d bytes, %ld", __func__, ret, data);
+		ts.tv_sec = 0;
+		ts.tv_nsec = 100000000; /* 100 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+static int run_iddp(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct sched_param svparam = {.sched_priority = 71 };
+	struct sched_param clparam = {.sched_priority = 70 };
+	pthread_attr_t svattr, clattr;
+	int s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_IDDP);
+	if (s < 0) {
+		if (errno == EAFNOSUPPORT)
+			return -ENOSYS;
+	} else
+		close(s);
+
+	pthread_attr_init(&svattr);
+	pthread_attr_setdetachstate(&svattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&svattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&svattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&svattr, &svparam);
+
+	errno = pthread_create(&svtid, &svattr, &server, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&clattr);
+	pthread_attr_setdetachstate(&clattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&clattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&clattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&clattr, &clparam);
+
+	errno = pthread_create(&cltid, &clattr, &client, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_join(cltid, NULL);
+	pthread_cancel(svtid);
+	pthread_join(svtid, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/leaks/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/leaks/Makefile.am
new file mode 100644
index 0000000..ecd8801
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/leaks/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libleaks.a
+
+libleaks_a_SOURCES = leaks.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libleaks_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/leaks/leaks.c b/kernel/xenomai-v3.2.4/testsuite/smokey/leaks/leaks.c
new file mode 100644
index 0000000..7eb5b67
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/leaks/leaks.c
@@ -0,0 +1,286 @@
+/*
+ * Copyright (C) 2012 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <signal.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <sys/stat.h>
+#include <semaphore.h>
+#include <mqueue.h>
+#include <signal.h>
+#include <sys/types.h>
+#include <sys/wait.h>
+#include <sys/ioctl.h>
+#include <linux/unistd.h>
+#include <boilerplate/compiler.h>
+#include <rtdm/rtdm.h>
+#include <cobalt/uapi/kernel/heap.h>
+#include <xeno_config.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(leaks,
+		   SMOKEY_NOARGS,
+		   "Check for resource leakage in the Cobalt core."
+);
+
+#define SEM_NAME "/sem"
+#define MQ_NAME "/mq"
+
+#define check_used(object, before, failed)				\
+	({								\
+		unsigned long long after = get_used();			\
+		if (before != after) {					\
+			smokey_warning(object				\
+				       " leaked %Lu bytes", after-before); \
+			failed = 1;					\
+		} else							\
+			smokey_trace("no leak with" object);		\
+	})
+
+#define devnode_root "/dev/rtdm/"
+
+const char *memdev[] = {
+	devnode_root COBALT_MEMDEV_PRIVATE,
+	devnode_root COBALT_MEMDEV_SHARED,
+	devnode_root COBALT_MEMDEV_SYS,
+	NULL,
+};
+
+static int memdevfd[3];
+
+static int procfs_exists(const char *type, const char *name)
+{
+	struct stat s;
+	char path[128];
+	int ret;
+
+	/* Ignore if the kernel seems to be compiled without procfs support */
+	if (stat("/proc/xenomai", &s) || !S_ISDIR(s.st_mode))
+		return 0;
+
+	/* Give the core some time to populate /proc with the new entry */
+	usleep(100000);
+
+	ret = snprintf(path, 128, "%s/%s/%s", "/proc/xenomai/registry/posix",
+		       type, &name[1]);
+	if (ret < 0)
+		return -EINVAL;
+
+	return smokey_check_errno(stat(path, &s));
+}
+
+static unsigned long long get_used(void)
+{
+	struct cobalt_memdev_stat statbuf;
+	unsigned long long used = 0;
+	int i, ret;
+
+	for (i = 0; memdev[i]; i++) {
+		ret = smokey_check_errno(ioctl(memdevfd[i], MEMDEV_RTIOC_STAT, &statbuf));
+		if (ret == 0)
+			used += statbuf.size - statbuf.free;
+	}
+
+	return used;
+}
+
+static void *empty(void *cookie)
+{
+	return cookie;
+}
+
+static inline int subprocess_leak(void)
+{
+	struct sigevent sevt;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	pthread_t thread;
+	timer_t tm;
+	sem_t sem;
+	int ret;
+
+	ret = smokey_check_status(pthread_create(&thread, NULL, empty, NULL));
+	if (ret)
+		return ret;
+	
+	ret = smokey_check_status(pthread_mutex_init(&mutex, NULL));
+	if (ret)
+		return ret;
+	
+	ret = smokey_check_status(pthread_cond_init(&cond, NULL));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(sem_init(&sem, 0, 0));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(-!(sem_open(SEM_NAME, O_CREAT, 0644, 1)));
+	if (ret)
+		return ret;
+
+	ret = procfs_exists("sem", SEM_NAME);
+	if (ret)
+		return ret;
+
+	sevt.sigev_notify = SIGEV_THREAD_ID;
+	sevt.sigev_signo = SIGALRM;
+	sevt.sigev_notify_thread_id = syscall(__NR_gettid);
+	ret = smokey_check_errno(timer_create(CLOCK_MONOTONIC, &sevt, &tm));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(mq_open(MQ_NAME, O_RDWR | O_CREAT, 0644, NULL));
+	if (ret < 0)
+		return ret;
+
+	ret = procfs_exists("mqueue", MQ_NAME);
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int run_leaks(struct smokey_test *t, int argc, char *const argv[])
+{
+	unsigned long long before;
+	struct sigevent sevt;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	int fd, failed = 0, i, ret, child_ret;
+	pthread_t thread;
+	sem_t sem, *psem;
+	timer_t tm;
+	__maybe_unused pid_t child;
+
+	for (i = 0; memdev[i]; i++) {
+		memdevfd[i] = smokey_check_errno(open(memdev[i], O_RDONLY));
+		if (memdevfd[i] < 0)
+			return memdevfd[i];
+	}
+	
+	before = get_used();
+	ret = smokey_check_status(pthread_create(&thread, NULL, empty, NULL));
+	if (ret)
+		return ret;
+	
+	ret = smokey_check_status(pthread_join(thread, NULL));
+	if (ret)
+		return ret;
+
+	sleep(1);		/* Leave some time for xnheap
+				 * deferred free */
+	check_used("thread", before, failed);
+	before = get_used();
+	ret = smokey_check_status(pthread_mutex_init(&mutex, NULL));
+	if (ret)
+		return ret;
+	ret = smokey_check_status(pthread_mutex_destroy(&mutex));
+	if (ret)
+		return ret;
+
+	check_used("mutex", before, failed);
+	before = get_used();
+	ret = smokey_check_status(pthread_cond_init(&cond, NULL));
+	if (ret)
+		return ret;
+	ret = smokey_check_status(pthread_cond_destroy(&cond));
+	if (ret)
+		return ret;
+
+	check_used("cond", before, failed);
+	before = get_used();
+	ret = smokey_check_errno(sem_init(&sem, 0, 0));
+	if (ret)
+		return ret;
+	ret = smokey_check_errno(sem_destroy(&sem));
+	if (ret)
+		return ret;
+
+	check_used("sem", before, failed);
+	before = get_used();
+	ret = smokey_check_errno(-!(psem = sem_open(SEM_NAME, O_CREAT, 0644, 1)));
+	if (ret)
+		return ret;
+	ret = smokey_check_errno(sem_close(psem));
+	if (ret)
+		return ret;
+	ret = smokey_check_errno(sem_unlink(SEM_NAME));
+	if (ret)
+		return ret;
+
+	check_used("named sem", before, failed);
+	before = get_used();
+	sevt.sigev_notify = SIGEV_THREAD_ID;
+	sevt.sigev_signo = SIGALRM;
+	sevt.sigev_notify_thread_id = syscall(__NR_gettid);
+	ret = smokey_check_errno(timer_create(CLOCK_MONOTONIC, &sevt, &tm));
+	if (ret)
+		return ret;
+	ret = smokey_check_errno(timer_delete(tm));
+	if (ret)
+		return ret;
+
+	check_used("timer", before, failed);
+	before = get_used();
+	fd = smokey_check_errno(mq_open(MQ_NAME, O_RDWR | O_CREAT, 0644, NULL));
+	if (fd < 0)
+		return fd;
+	ret = smokey_check_errno(mq_close(fd));
+	if (ret)
+		return ret;
+	ret = smokey_check_errno(mq_unlink(MQ_NAME));
+	if (ret)
+		return ret;
+
+	check_used("mq", before, failed);
+#ifdef HAVE_FORK
+	before = get_used();
+	child = smokey_check_errno(fork());
+	if (child < 0)
+		return child;
+	if (!child)
+		exit(-subprocess_leak());
+	while (waitpid(child, &child_ret, 0) == -1 && errno == EINTR);
+	sleep(1);
+
+	ret = smokey_check_errno(sem_unlink(SEM_NAME));
+	if (ret)
+		return ret;
+	ret = smokey_check_errno(mq_unlink(MQ_NAME));
+	if (ret)
+		return ret;
+	if (WIFEXITED(child_ret) && WEXITSTATUS(child_ret))
+		return -WEXITSTATUS(child_ret);
+	check_used("fork", before, failed);
+#endif
+
+	for (i = 0; memdev[i]; i++)
+		close(memdevfd[i]);
+
+	return failed ? -EINVAL : 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/main.c b/kernel/xenomai-v3.2.4/testsuite/smokey/main.c
new file mode 100644
index 0000000..f24c1e9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/main.c
@@ -0,0 +1,53 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License as
+ * published by the Free Software Foundation; either version 2 of the
+ * License, or (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <stdio.h>
+#include <error.h>
+#include <errno.h>
+#include <smokey/smokey.h>
+
+int main(int argc, char *const argv[])
+{
+	struct sched_param param = { .sched_priority = 0 };
+	struct smokey_test *t;
+	int ret, fails = 0;
+
+	if (pvlist_empty(&smokey_test_list))
+		return 0;
+
+	for_each_smokey_test(t) {
+		pthread_setschedparam(pthread_self(), SCHED_OTHER, &param);
+		smokey_trace("\n===  running %s ===\n", t->name);
+		ret = t->run(t, argc, argv);
+		if (ret) {
+			if (ret == -ENOSYS) {
+				smokey_note("%s skipped (no kernel support)",
+					    t->name);
+				continue;
+			}
+			fails++;
+			if (smokey_keep_going)
+				continue;
+			if (smokey_verbose_mode)
+				error(1, -ret, "test %s failed", t->name);
+			return 1;
+		}
+		smokey_note("%s OK", t->name);
+	}
+
+	return fails != 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/Makefile.am
new file mode 100644
index 0000000..482314a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/Makefile.am
@@ -0,0 +1,8 @@
+noinst_LIBRARIES = libmemcheck.a
+noinst_HEADERS = memcheck.h
+
+AM_CPPFLAGS =			\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
+
+libmemcheck_a_SOURCES = memcheck.c
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.c b/kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.c
new file mode 100644
index 0000000..b11d2ba
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.c
@@ -0,0 +1,886 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#include <time.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <unistd.h>
+#include <memory.h>
+#include <sched.h>
+#include <pthread.h>
+#include <boilerplate/time.h>
+#include "memcheck.h"
+
+enum pattern {
+	alphabet_series,
+	digit_series,
+	binary_series,
+};
+
+struct chunk {
+	void *ptr;
+	enum pattern pattern;
+};
+
+static struct memcheck_stat *statistics;
+
+static int nrstats;
+
+static int max_results = 4;
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <sys/cobalt.h>
+
+static inline void breathe(int loops)
+{
+	struct timespec idle = {
+		.tv_sec = 0,
+		.tv_nsec = 300000,
+	};
+
+	/*
+	 * There is not rt throttling over Cobalt, so we may need to
+	 * keep the host kernel breathing by napping during the test
+	 * sequences.
+	 */
+	if ((loops % 1000) == 0)
+		__RT(clock_nanosleep(CLOCK_MONOTONIC, 0, &idle, NULL));
+}
+
+static inline void harden(void)
+{
+	cobalt_thread_harden();
+}
+
+#else
+
+static inline void breathe(int loops) { }
+
+static inline void harden(void) { }
+
+#endif
+
+static inline long diff_ts(struct timespec *left, struct timespec *right)
+{
+	return (long long)(left->tv_sec - right->tv_sec) * ONE_BILLION
+		+ left->tv_nsec - right->tv_nsec;
+}
+
+static inline void swap(void *left, void *right, const size_t size)
+{
+	char trans[size];
+
+	memcpy(trans, left, size);
+	memcpy(left, right, size);
+	memcpy(right, trans, size);
+}
+
+static void random_shuffle(void *vbase, size_t nmemb, const size_t size)
+{
+	char *base = (char *)vbase;
+	unsigned int j, k;
+	double u;
+
+	for(j = nmemb; j > 0; j--) {
+		breathe(j);
+		u = (double)random() / RAND_MAX;
+		k = (unsigned int)(j * u) + 1;
+		if (j == k)
+			continue;
+		swap(base + (j - 1) * size, base + (k - 1) * size, size);
+	}
+}
+
+/* Reverse sort, high values first. */
+
+#define compare_values(l, r)			\
+	({					\
+		typeof(l) _l = (l);		\
+		typeof(r) _r = (r);		\
+		(_l > _r) - (_l < _r);		\
+	})
+
+static int sort_by_heap_size(const void *l, const void *r)
+{
+	const struct memcheck_stat *ls = l, *rs = r;
+
+	return compare_values(rs->heap_size, ls->heap_size);
+}
+
+static int sort_by_alloc_time(const void *l, const void *r)
+{
+	const struct memcheck_stat *ls = l, *rs = r;
+
+	return compare_values(rs->alloc_max_ns, ls->alloc_max_ns);
+}
+
+static int sort_by_free_time(const void *l, const void *r)
+{
+	const struct memcheck_stat *ls = l, *rs = r;
+
+	return compare_values(rs->free_max_ns, ls->free_max_ns);
+}
+
+static int sort_by_frag(const void *l, const void *r)
+{
+	const struct memcheck_stat *ls = l, *rs = r;
+
+	return compare_values(rs->maximum_free - rs->largest_free,
+			      ls->maximum_free - ls->largest_free);
+}
+
+static int sort_by_overhead(const void *l, const void *r)
+{
+	const struct memcheck_stat *ls = l, *rs = r;
+
+	return compare_values(rs->heap_size - rs->user_size,
+			      ls->heap_size - ls->user_size);
+}
+
+static inline const char *get_debug_state(void)
+{
+#if defined(CONFIG_XENO_DEBUG_FULL)
+	return "\n(CAUTION: full debug enabled)";
+#elif defined(CONFIG_XENO_DEBUG)
+	return "\n(debug partially enabled)";
+#else
+	return "";
+#endif
+}
+
+static void __dump_stats(struct memcheck_descriptor *md,
+			 struct memcheck_stat *stats,
+			 int (*sortfn)(const void *l, const void *r),
+			 int nr, const char *key)
+{
+	struct memcheck_stat *p;
+	int n;
+
+	qsort(stats, nrstats, sizeof(*p), sortfn);
+
+	smokey_trace("\nsorted by: max %s\n%8s  %7s  %7s  %5s  %5s  %5s  %5s   %5s  %5s  %s",
+		     key, "HEAPSZ", "BLOCKSZ", "NRBLKS", "AVG-A",
+		     "AVG-F", "MAX-A", "MAX-F", "OVRH%", "FRAG%", "FLAGS");
+
+	for (n = 0; n < nr; n++) {
+		p = stats + n;
+		smokey_trace("%7zuk  %7zu%s  %6d  %5.1f  %5.1f  %5.1f  %5.1f   %4.1f  %5.1f   %s%s%s",
+			     p->heap_size / 1024,
+			     p->block_size < 1024 ? p->block_size : p->block_size / 1024,
+			     p->block_size < 1024 ? " " : "k",
+			     p->nrblocks,
+			     (double)p->alloc_avg_ns/1000.0,
+			     (double)p->free_avg_ns/1000.0,
+			     (double)p->alloc_max_ns/1000.0,
+			     (double)p->free_max_ns/1000.0,
+			     100.0 - (p->user_size * 100.0 / p->heap_size),
+			     (1.0 - ((double)p->largest_free / p->maximum_free)) * 100.0,
+			     p->alloc_avg_ns == 0 && p->free_avg_ns == 0 ? "FAILED " : "",
+			     p->flags & MEMCHECK_SHUFFLE ? "+shuffle " : "",
+			     p->flags & MEMCHECK_HOT ? "+hot" : "");
+	}
+
+	if (nr < nrstats)
+		smokey_trace("  ... (%d results following) ...", nrstats - nr);
+}
+
+static int dump_stats(struct memcheck_descriptor *md, const char *title)
+{
+	long worst_alloc_max = 0, worst_free_max = 0;
+	double overhead_sum = 0.0, frag_sum = 0.0;
+	long max_alloc_sum = 0, max_free_sum = 0;
+	long avg_alloc_sum = 0, avg_free_sum = 0;
+	struct memcheck_stat *stats, *p, *next;
+	int n;
+
+	stats = __STD(malloc(sizeof(*p) * nrstats));
+	if (stats == NULL) {
+		smokey_warning("failed allocating memory");
+		return -ENOMEM;
+	}
+
+	for (n = 0, p = statistics; n < nrstats; n++, p = p->next)
+		stats[n] = *p;
+
+	smokey_trace("\n[%s] ON '%s'%s\n",
+		     title, md->name, get_debug_state());
+
+	smokey_trace("HEAPSZ	test heap size");
+	smokey_trace("BLOCKSZ	tested block size");
+	smokey_trace("NRBLKS	number of blocks allocatable in heap");
+	smokey_trace("AVG-A	average time to allocate block (us)");
+	smokey_trace("AVG-F	average time to free block (us)");
+	smokey_trace("MAX-A	max time to allocate block (us)");
+	smokey_trace("MAX-F	max time to free block (us)");
+	smokey_trace("OVRH%	overhead");
+	smokey_trace("FRAG%	external fragmentation");
+	smokey_trace("FLAGS	+shuffle: randomized free");
+	smokey_trace("    	+hot: measure after initial alloc/free pass (hot heap)");
+
+	if (max_results > 0) {
+		if (max_results > nrstats)
+			max_results = nrstats;
+		__dump_stats(md, stats, sort_by_alloc_time, max_results, "alloc time");
+		__dump_stats(md, stats, sort_by_free_time, max_results, "free time");
+		__dump_stats(md, stats, sort_by_overhead, max_results, "overhead");
+		__dump_stats(md, stats, sort_by_frag, max_results, "fragmentation");
+	} else if (max_results < 0)
+		__dump_stats(md, stats, sort_by_heap_size, nrstats, "heap size");
+
+	__STD(free(stats));
+
+	for (p = statistics; p; p = next) {
+		max_alloc_sum += p->alloc_max_ns;
+		max_free_sum += p->free_max_ns;
+		avg_alloc_sum += p->alloc_avg_ns;
+		avg_free_sum += p->free_avg_ns;
+		overhead_sum += 100.0 - (p->user_size * 100.0 / p->heap_size);
+		frag_sum += (1.0 - ((double)p->largest_free / p->maximum_free)) * 100.0;
+		if (p->alloc_max_ns > worst_alloc_max)
+			worst_alloc_max = p->alloc_max_ns;
+		if (p->free_max_ns > worst_free_max)
+			worst_free_max = p->free_max_ns;
+		next = p->next;
+		__STD(free(p));
+	}
+
+	smokey_trace("\noverall:");
+	smokey_trace("  worst alloc time: %.1f (us)",
+		     (double)worst_alloc_max / 1000.0);
+	smokey_trace("  worst free time: %.1f (us)",
+		     (double)worst_free_max / 1000.0);
+	smokey_trace("  average of max. alloc times: %.1f (us)",
+		     (double)max_alloc_sum / nrstats / 1000.0);
+	smokey_trace("  average of max. free times: %.1f (us)",
+		     (double)max_free_sum / nrstats / 1000.0);
+	smokey_trace("  average alloc time: %.1f (us)",
+		     (double)avg_alloc_sum / nrstats / 1000.0);
+	smokey_trace("  average free time: %.1f (us)",
+		     (double)avg_free_sum / nrstats / 1000.0);
+	smokey_trace("  average overhead: %.1f%%",
+		     (double)overhead_sum / nrstats);
+	smokey_trace("  average fragmentation: %.1f%%",
+		     (double)frag_sum / nrstats);
+
+	statistics = NULL;
+	nrstats = 0;
+
+	return 0;
+}
+
+static void fill_pattern(char *p, size_t size, enum pattern pat)
+{
+	unsigned int val, count;
+
+	switch (pat) {
+	case alphabet_series:
+		val = 'a';
+		count = 26;
+		break;
+	case digit_series:
+		val = '0';
+		count = 10;
+		break;
+	default:
+		val = 0;
+		count = 255;
+		break;
+	}
+
+	while (size-- > 0) {
+		*p++ = (char)(val % count);
+		val++;
+	}
+}
+
+static int check_pattern(const char *p, size_t size, enum pattern pat)
+{
+	unsigned int val, count;
+
+	switch (pat) {
+	case alphabet_series:
+		val = 'a';
+		count = 26;
+		break;
+	case digit_series:
+		val = '0';
+		count = 10;
+		break;
+	default:
+		val = 0;
+		count = 255;
+		break;
+	}
+
+	while (size-- > 0) {
+		if (*p++ != (char)(val % count))
+			return 0;
+		val++;
+	}
+
+	return 1;
+}
+
+static size_t find_largest_free(struct memcheck_descriptor *md,
+				size_t free_size, size_t block_size)
+{
+	void *p;
+
+	for (;;) {
+		p = md->alloc(md->heap, free_size);
+		if (p) {
+			md->free(md->heap, p);
+			break;
+		}
+		if (free_size <= block_size)
+			break;
+		free_size -= block_size;
+	}
+
+	return free_size;
+}
+
+/*
+ * The default test helper can exercise heap managers implemented in
+ * userland.
+ */
+static int default_test_seq(struct memcheck_descriptor *md,
+		    size_t heap_size, size_t block_size, int flags)
+{
+	size_t arena_size, user_size, largest_free, maximum_free, freed;
+	long alloc_sum_ns, alloc_avg_ns, free_sum_ns, free_avg_ns,
+		alloc_max_ns, free_max_ns, d;
+	int ret, n, k, maxblocks, nrblocks;
+	struct timespec start, end;
+	struct memcheck_stat *st;
+	struct sched_param param;
+	struct chunk *chunks;
+	bool done_frag;
+	void *mem, *p;
+
+	/* This switches to real-time mode over Cobalt. */
+	param.sched_priority = 1;
+	pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
+
+	arena_size = heap_size;
+	if (md->get_arena_size) {
+		arena_size = md->get_arena_size(heap_size);
+		if (arena_size == 0) {
+			smokey_trace("cannot get arena size for heap size %zu",
+				     heap_size);
+			return -ENOMEM;
+		}
+	}
+	
+	mem = __STD(malloc(arena_size));
+	if (mem == NULL)
+		return -ENOMEM;
+
+	maxblocks = heap_size / block_size;
+
+	ret = md->init(md->heap, mem, arena_size);
+	if (ret) {
+		smokey_trace("cannot init heap with arena size %zu",
+			     arena_size);
+		goto out;
+	}
+
+	chunks = calloc(sizeof(*chunks), maxblocks);
+	if (chunks == NULL) {
+		ret = -ENOMEM;
+		goto no_chunks;
+	}
+
+	if (md->get_usable_size(md->heap) != heap_size) {
+		smokey_trace("memory size inconsistency (%zu / %zu bytes)",
+			     heap_size, md->get_usable_size(md->heap));
+		goto bad;
+	}
+
+	user_size = 0;
+	alloc_avg_ns = 0;
+	free_avg_ns = 0;
+	alloc_max_ns = 0;
+	free_max_ns = 0;
+	largest_free = 0;
+	maximum_free = 0;
+
+	/*
+	 * With Cobalt, make sure to run in primary mode before the
+	 * first allocation call takes place, not to charge any switch
+	 * time to the allocator.
+	 */
+	harden();
+	for (n = 0, alloc_sum_ns = 0; ; n++) {
+		__RT(clock_gettime(CLOCK_MONOTONIC, &start));
+		p = md->alloc(md->heap, block_size);
+		__RT(clock_gettime(CLOCK_MONOTONIC, &end));
+		d = diff_ts(&end, &start);
+		if (d > alloc_max_ns)
+			alloc_max_ns = d;
+		alloc_sum_ns += d;
+		if (p == NULL)
+			break;
+		user_size += block_size;
+		if (n >= maxblocks) {
+			smokey_trace("too many blocks fetched"
+				     " (heap=%zu, block=%zu, "
+				     "got more than %d blocks)",
+				     heap_size, block_size, maxblocks);
+			goto bad;
+		}
+		chunks[n].ptr = p;
+		if (flags & MEMCHECK_PATTERN) {
+			chunks[n].pattern = (enum pattern)(random() % 3);
+			fill_pattern(chunks[n].ptr, block_size, chunks[n].pattern);
+		}
+		breathe(n);
+	}
+
+	nrblocks = n;
+	if (nrblocks == 0)
+		goto do_stats;
+
+	if ((flags & MEMCHECK_ZEROOVRD) && nrblocks != maxblocks) {
+		smokey_trace("too few blocks fetched, unexpected overhead"
+			     " (heap=%zu, block=%zu, "
+			     "got %d, less than %d blocks)",
+			     heap_size, block_size, nrblocks, maxblocks);
+		goto bad;
+	}
+
+	breathe(0);
+
+	/* Make sure we did not trash any busy block while allocating. */
+	if (flags & MEMCHECK_PATTERN) {
+		for (n = 0; n < nrblocks; n++) {
+			if (!check_pattern(chunks[n].ptr, block_size,
+					   chunks[n].pattern)) {
+				smokey_trace("corrupted block #%d on alloc"
+					     " sequence (pattern %d)",
+					     n, chunks[n].pattern);
+				goto bad;
+			}
+			breathe(n);
+		}
+	}
+	
+	if (flags & MEMCHECK_SHUFFLE)
+		random_shuffle(chunks, nrblocks, sizeof(*chunks));
+
+	/*
+	 * Release all blocks.
+	 */
+	harden();
+	for (n = 0, free_sum_ns = 0, freed = 0, done_frag = false;
+	     n < nrblocks; n++) {
+		__RT(clock_gettime(CLOCK_MONOTONIC, &start));
+		ret = md->free(md->heap, chunks[n].ptr);
+		__RT(clock_gettime(CLOCK_MONOTONIC, &end));
+		if (ret) {
+			smokey_trace("failed to free block %p "
+				     "(heap=%zu, block=%zu)",
+				     chunks[n].ptr, heap_size, block_size);
+			goto bad;
+		}
+		d = diff_ts(&end, &start);
+		if (d > free_max_ns)
+			free_max_ns = d;
+		free_sum_ns += d;
+		chunks[n].ptr = NULL;
+		/* Make sure we did not trash busy blocks while freeing. */
+		if (flags & MEMCHECK_PATTERN) {
+			for (k = 0; k < nrblocks; k++) {
+				if (chunks[k].ptr &&
+				    !check_pattern(chunks[k].ptr, block_size,
+						   chunks[k].pattern)) {
+					smokey_trace("corrupted block #%d on release"
+						     " sequence (pattern %d)",
+						     k, chunks[k].pattern);
+					goto bad;
+				}
+				breathe(k);
+			}
+		}
+		freed += block_size;
+		/*
+		 * Get a sense of the fragmentation for the tested
+		 * allocation pattern, heap and block sizes when half
+		 * of the usable heap size should be available to us.
+		 * NOTE: user_size excludes the overhead, this is
+		 * actually what we managed to get from the current
+		 * heap out of the allocation loop.
+		 */
+		if (!done_frag && freed >= user_size / 2) {
+			/* Calculate the external fragmentation. */
+			largest_free = find_largest_free(md, freed, block_size);
+			maximum_free = freed;
+			done_frag = true;
+		}
+		breathe(n);
+	}
+
+	/*
+	 * If the deallocation mechanism is broken, we might not be
+	 * able to reproduce the same allocation pattern with the same
+	 * outcome, check this.
+	 */
+	if (flags & MEMCHECK_HOT) {
+		for (n = 0, alloc_max_ns = alloc_sum_ns = 0; ; n++) {
+			__RT(clock_gettime(CLOCK_MONOTONIC, &start));
+			p = md->alloc(md->heap, block_size);
+			__RT(clock_gettime(CLOCK_MONOTONIC, &end));
+			d = diff_ts(&end, &start);
+			if (d > alloc_max_ns)
+				alloc_max_ns = d;
+			alloc_sum_ns += d;
+			if (p == NULL)
+				break;
+			if (n >= maxblocks) {
+				smokey_trace("too many blocks fetched during hot pass"
+					     " (heap=%zu, block=%zu, "
+					     "got more than %d blocks)",
+					     heap_size, block_size, maxblocks);
+				goto bad;
+			}
+			chunks[n].ptr = p;
+			breathe(n);
+		}
+		if (n != nrblocks) {
+			smokey_trace("inconsistent block count fetched during hot pass"
+				     " (heap=%zu, block=%zu, "
+				     "got %d blocks vs %d during alloc)",
+				     heap_size, block_size, n, nrblocks);
+			goto bad;
+		}
+		for (n = 0, free_max_ns = free_sum_ns = 0; n < nrblocks; n++) {
+			__RT(clock_gettime(CLOCK_MONOTONIC, &start));
+			ret = md->free(md->heap, chunks[n].ptr);
+			__RT(clock_gettime(CLOCK_MONOTONIC, &end));
+			if (ret) {
+				smokey_trace("failed to free block %p during hot pass"
+					     "(heap=%zu, block=%zu)",
+					     chunks[n].ptr, heap_size, block_size);
+				goto bad;
+			}
+			d = diff_ts(&end, &start);
+			if (d > free_max_ns)
+				free_max_ns = d;
+			free_sum_ns += d;
+			breathe(n);
+		}
+	}
+
+	alloc_avg_ns = alloc_sum_ns / nrblocks;
+	free_avg_ns = free_sum_ns / nrblocks;
+
+	if ((flags & MEMCHECK_ZEROOVRD) && heap_size != user_size) {
+		smokey_trace("unexpected overhead reported");
+		goto bad;
+	}
+
+	if (md->get_used_size(md->heap) > 0) {
+		smokey_trace("memory leakage reported: %zu bytes missing",
+			     md->get_used_size(md->heap));
+		goto bad;
+	}
+		
+	/*
+	 * Don't report stats when running a pattern check, timings
+	 * are affected.
+	 */
+do_stats:
+	breathe(0);
+	ret = 0;
+	if (!(flags & MEMCHECK_PATTERN)) {
+		st = __STD(malloc(sizeof(*st)));
+		if (st == NULL) {
+			smokey_warning("failed allocating memory");
+			ret = -ENOMEM;
+			goto oom;
+		}
+		st->heap_size = heap_size;
+		st->user_size = user_size;
+		st->block_size = block_size;
+		st->nrblocks = nrblocks;
+		st->alloc_avg_ns = alloc_avg_ns;
+		st->alloc_max_ns = alloc_max_ns;
+		st->free_avg_ns = free_avg_ns;
+		st->free_max_ns = free_max_ns;
+		st->largest_free = largest_free;
+		st->maximum_free = maximum_free;
+		st->flags = flags;
+		memcheck_log_stat(st);
+	}
+
+done:
+	__STD(free(chunks));
+no_chunks:
+	md->destroy(md->heap);
+out:
+	if (ret)
+		smokey_trace("** '%s' FAILED(overhead %s, %sshuffle, %scheck, %shot): heapsz=%zuk, "
+			     "blocksz=%zu, overhead=%zu (%.1f%%)",
+			     md->name,
+			     flags & MEMCHECK_ZEROOVRD ? "disallowed" : "allowed",
+			     flags & MEMCHECK_SHUFFLE ? "" : "no ",
+			     flags & MEMCHECK_PATTERN ? "" : "no ",
+			     flags & MEMCHECK_HOT ? "" : "no ",
+			     heap_size / 1024, block_size,
+			     arena_size - heap_size,
+			     (arena_size * 100.0 / heap_size) - 100.0);
+oom:
+	__STD(free(mem));
+
+	param.sched_priority = 0;
+	pthread_setschedparam(pthread_self(), SCHED_OTHER, &param);
+
+	return ret;
+bad:
+	ret = -EPROTO;
+	goto done;
+}
+
+static inline int test_flags(struct memcheck_descriptor *md, int flags)
+{
+	return md->valid_flags & flags;
+}
+
+void memcheck_log_stat(struct memcheck_stat *st)
+{
+	st->next = statistics;
+	statistics = st;
+	nrstats++;
+}
+
+int memcheck_run(struct memcheck_descriptor *md,
+		 struct smokey_test *t,
+		 int argc, char *const argv[])
+{
+	int (*test_seq)(struct memcheck_descriptor *md,
+			size_t heap_size, size_t block_size, int flags);
+	size_t heap_size, block_size;
+	cpu_set_t affinity;
+	unsigned long seed;
+	int ret, runs;
+	time_t now;
+	void *p;
+
+	/* Populate the malloc arena early to limit mode switching. */
+	p = __STD(malloc(2 * 1024 * 1024));
+	__STD(free(p));
+
+	smokey_parse_args(t, argc, argv);
+	
+	if (smokey_arg_isset(t, "seq_heap_size"))
+		md->seq_max_heap_size = smokey_arg_size(t, "seq_heap_size");
+	
+	if (smokey_arg_isset(t, "random_alloc_rounds"))
+		md->random_rounds = smokey_arg_int(t, "random_alloc_rounds");
+	
+	if (smokey_arg_isset(t, "pattern_heap_size"))
+		md->pattern_heap_size = smokey_arg_size(t, "pattern_heap_size");
+	
+	if (smokey_arg_isset(t, "pattern_check_rounds"))
+		md->pattern_rounds = smokey_arg_int(t, "pattern_check_rounds");
+
+	if (smokey_arg_isset(t, "max_results"))
+		max_results = smokey_arg_int(t, "max_results");
+
+	test_seq = md->test_seq;
+	if (test_seq == NULL)
+		test_seq = default_test_seq;
+
+	now = time(NULL);
+	seed = (unsigned long)now * getpid();
+	srandom(seed);
+
+	smokey_trace("== memcheck started for %s at %s", md->name, ctime(&now));
+	smokey_trace("     seq_heap_size=%zuk", md->seq_max_heap_size / 1024);
+	smokey_trace("     random_alloc_rounds=%d", md->random_rounds);
+	smokey_trace("     pattern_heap_size=%zuk", md->pattern_heap_size / 1024);
+	smokey_trace("     pattern_check_rounds=%d", md->pattern_rounds);
+	
+	CPU_ZERO(&affinity);
+	CPU_SET(0, &affinity);
+	ret = sched_setaffinity(0, sizeof(affinity), &affinity);
+	if (ret) {
+		smokey_warning("failed setting CPU affinity");
+		return -ret;
+	}
+
+	/*
+	 * Create a series of heaps of increasing size, allocating
+	 * then freeing all blocks sequentially from them, ^2 block
+	 * sizes up to half of the heap size. Test multiple patterns:
+	 *
+	 * - alloc -> free_in_alloc_order
+	 * - alloc -> free_in_alloc_order -> (re)alloc
+	 * - alloc -> free_in_random_order
+	 * - alloc -> free_in_random_order -> (re)alloc
+	 */
+	for (heap_size = md->seq_min_heap_size;
+	     heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+		for (block_size = 16;
+		     block_size < heap_size / 2; block_size <<= 1) {
+			ret = test_seq(md, heap_size, block_size,
+					   test_flags(md, MEMCHECK_ZEROOVRD));
+			if (ret) {
+				smokey_trace("failed with %zuk heap, "
+					     "%zu-byte block (pow2)",
+					     heap_size / 1024, block_size);
+				return ret;
+			}
+		}
+		for (block_size = 16;
+		     block_size < heap_size / 2; block_size <<= 1) {
+			ret = test_seq(md, heap_size, block_size,
+			   test_flags(md, MEMCHECK_ZEROOVRD|MEMCHECK_HOT));
+			if (ret) {
+				smokey_trace("failed with %zuk heap, "
+					     "%zu-byte block (pow2, hot)",
+					     heap_size / 1024, block_size);
+				return ret;
+			}
+		}
+		for (block_size = 16;
+		     block_size < heap_size / 2; block_size <<= 1) {
+			ret = test_seq(md, heap_size, block_size,
+			       test_flags(md, MEMCHECK_ZEROOVRD|MEMCHECK_SHUFFLE));
+			if (ret) {
+				smokey_trace("failed with %zuk heap, "
+					     "%zu-byte block (pow2, shuffle)",
+					     heap_size / 1024, block_size);
+				return ret;
+			}
+		}
+		for (block_size = 16;
+		     block_size < heap_size / 2; block_size <<= 1) {
+			ret = test_seq(md, heap_size, block_size,
+			       test_flags(md, MEMCHECK_ZEROOVRD|MEMCHECK_HOT|MEMCHECK_SHUFFLE));
+			if (ret) {
+				smokey_trace("failed with %zuk heap, "
+					     "%zu-byte block (pow2, shuffle, hot)",
+					     heap_size / 1024, block_size);
+				return ret;
+			}
+		}
+	}
+
+	ret = dump_stats(md, "SEQUENTIAL ALLOC->FREE, ^2 BLOCK SIZES");
+	if (ret)
+		return ret;
+
+	/*
+	 * Create a series of heaps of increasing size, allocating
+	 * then freeing all blocks sequentially from them, random
+	 * block sizes. Test multiple patterns as previously with ^2
+	 * block sizes.
+	 */
+	for (heap_size = md->seq_min_heap_size;
+	     heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+		for (runs = 0; runs < md->random_rounds; runs++) {
+			block_size = (random() % (heap_size / 2)) ?: 1;
+			ret = test_seq(md, heap_size, block_size, 0);
+			if (ret) {
+				smokey_trace("failed with %zuk heap, "
+					     "%zu-byte block (random)",
+					     heap_size / 1024, block_size);
+				return ret;
+			}
+		}
+	}
+	
+	for (heap_size = md->seq_min_heap_size;
+	     heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+		for (runs = 0; runs < md->random_rounds; runs++) {
+			block_size = (random() % (heap_size / 2)) ?: 1;
+			ret = test_seq(md, heap_size, block_size,
+				       test_flags(md, MEMCHECK_HOT));
+			if (ret) {
+				smokey_trace("failed with %zuk heap, "
+					     "%zu-byte block (random, hot)",
+					     heap_size / 1024, block_size);
+				return ret;
+			}
+		}
+	}
+	
+	for (heap_size = md->seq_min_heap_size;
+	     heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+		for (runs = 0; runs < md->random_rounds; runs++) {
+			block_size = (random() % (heap_size / 2)) ?: 1;
+			ret = test_seq(md, heap_size, block_size,
+				       test_flags(md, MEMCHECK_SHUFFLE));
+			if (ret) {
+				smokey_trace("failed with %zuk heap, "
+					     "%zu-byte block (random, shuffle)",
+					     heap_size / 1024, block_size);
+				return ret;
+			}
+		}
+	}
+	
+	for (heap_size = md->seq_min_heap_size;
+	     heap_size < md->seq_max_heap_size; heap_size <<= 1) {
+		for (runs = 0; runs < md->random_rounds; runs++) {
+			block_size = (random() % (heap_size / 2)) ?: 1;
+			ret = test_seq(md, heap_size, block_size,
+			       test_flags(md, MEMCHECK_HOT|MEMCHECK_SHUFFLE));
+			if (ret) {
+				smokey_trace("failed with %zuk heap, "
+					     "%zu-byte block (random, shuffle, hot)",
+					     heap_size / 1024, block_size);
+				return ret;
+			}
+		}
+	}
+
+	ret = dump_stats(md, "SEQUENTIAL ALLOC->FREE, RANDOM BLOCK SIZES");
+	if (ret)
+		return ret;
+
+	smokey_trace("\n(running the pattern check test for '%s'"
+		     " -- this may take some time)", md->name);
+
+	for (runs = 0; runs < md->pattern_rounds; runs++) {
+		block_size = (random() % (md->pattern_heap_size / 2)) ?: 1;
+		ret = test_seq(md, md->pattern_heap_size, block_size,
+			       test_flags(md, MEMCHECK_SHUFFLE|MEMCHECK_PATTERN));
+		if (ret) {
+			smokey_trace("failed with %zuk heap, "
+				     "%zu-byte block (random, shuffle, check)",
+				     md->pattern_heap_size / 1024, block_size);
+			return ret;
+		}
+	}
+	
+	now = time(NULL);
+	smokey_trace("\n== memcheck finished for %s at %s",
+		     md->name, ctime(&now));
+
+	return ret;
+}
+
+#ifdef CONFIG_XENO_COBALT
+
+#include <cobalt/tunables.h>
+
+static int memcheck_tune(void)
+{
+	set_config_tunable(print_buffer_size, 512 * 1024);
+
+	return 0;
+}
+
+static struct setup_descriptor memcheck_setup = {
+	.name = "memcheck",
+	.tune = memcheck_tune,
+};
+
+user_setup_call(memcheck_setup);
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.h b/kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.h
new file mode 100644
index 0000000..b367726
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memcheck/memcheck.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#ifndef SMOKEY_MEMCHECK_H
+#define SMOKEY_MEMCHECK_H
+
+#include <sys/types.h>
+#include <boilerplate/ancillaries.h>
+#include <smokey/smokey.h>
+
+/* Must match RTTST_HEAPCHECK_* flags in uapi/testing.h */
+#define MEMCHECK_ZEROOVRD   1
+#define MEMCHECK_SHUFFLE    2
+#define MEMCHECK_PATTERN    4
+#define MEMCHECK_HOT        8
+#define MEMCHECK_ALL_FLAGS  0xf
+
+struct memcheck_stat {
+	size_t heap_size;
+	size_t user_size;
+	size_t block_size;
+	size_t maximum_free;
+	size_t largest_free;
+	int nrblocks;
+	long alloc_avg_ns;
+	long alloc_max_ns;
+	long free_avg_ns;
+	long free_max_ns;
+	int flags;
+	struct memcheck_stat *next;
+};
+
+struct memcheck_descriptor {
+	const char *name;
+	int (*init)(void *heap, void *mem, size_t heap_size);
+	void (*destroy)(void *heap);
+	void *(*alloc)(void *heap, size_t size);
+	int (*free)(void *heap, void *block);
+	size_t (*get_used_size)(void *heap);
+	size_t (*get_usable_size)(void *heap);
+	size_t (*get_arena_size)(size_t heap_size);
+	size_t seq_min_heap_size;
+	size_t seq_max_heap_size;
+	int random_rounds;
+	size_t pattern_heap_size;
+	int pattern_rounds;
+	void *heap;
+	int valid_flags;
+	int (*test_seq)(struct memcheck_descriptor *md,
+			size_t heap_size, size_t block_size, int flags);
+};
+
+#define HEAP_INIT_T(__p)    ((int (*)(void *heap, void *mem, size_t size))(__p))
+#define HEAP_DESTROY_T(__p) ((void (*)(void *heap))(__p))
+#define HEAP_ALLOC_T(__p)   ((void *(*)(void *heap, size_t size))(__p))
+#define HEAP_FREE_T(__p)    ((int (*)(void *heap, void *block))(__p))
+#define HEAP_USED_T(__p)    ((size_t (*)(void *heap))(__p))
+#define HEAP_USABLE_T(__p)  ((size_t (*)(void *heap))(__p))
+
+#define MEMCHECK_ARGS					\
+	SMOKEY_ARGLIST(					\
+		SMOKEY_SIZE(seq_heap_size),		\
+		SMOKEY_SIZE(pattern_heap_size),		\
+		SMOKEY_INT(random_alloc_rounds),	\
+		SMOKEY_INT(pattern_check_rounds),	\
+		SMOKEY_INT(max_results),		\
+	)
+  
+#define MEMCHECK_HELP_STRINGS						\
+	"\tseq_heap_size=<size[K|M|G]>\tmax. heap size for sequential alloc tests\n" \
+	"\tpattern_heap_size=<size[K|M|G]>\tmax. heap size for pattern check test\n" \
+	"\trandom_alloc_rounds=<N>\t\t# of rounds of random-size allocations\n" \
+	"\tpattern_check_rounds=<N>\t# of rounds of pattern check tests\n" \
+	"\tmax_results=<N>\t# of result lines (worst-case first, -1=all)\n" \
+	"\tSet --verbose=2 for detailed runtime statistics.\n"
+
+void memcheck_log_stat(struct memcheck_stat *st);
+
+int memcheck_run(struct memcheck_descriptor *md,
+		 struct smokey_test *t,
+		 int argc, char *const argv[]);
+
+#endif /* SMOKEY_MEMCHECK_H */
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/Makefile.am
new file mode 100644
index 0000000..d9a4466
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libmemory-coreheap.a
+
+libmemory_coreheap_a_SOURCES = coreheap.c
+
+libmemory_coreheap_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(srcdir)/..		\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/coreheap.c b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/coreheap.c
new file mode 100644
index 0000000..56e84ed
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-coreheap/coreheap.c
@@ -0,0 +1,108 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#include <unistd.h>
+#include <stdlib.h>
+#include <fcntl.h>
+#include <pthread.h>
+#include <rtdm/testing.h>
+#include "memcheck/memcheck.h"
+
+smokey_test_plugin(memory_coreheap,
+		   MEMCHECK_ARGS,
+		   "Check for the Cobalt core allocator sanity.\n"
+		   MEMCHECK_HELP_STRINGS
+	);
+
+#define MIN_HEAP_SIZE  8192
+#define MAX_HEAP_SIZE  (1024 * 1024 * 2)
+#define RANDOM_ROUNDS  1024
+
+#define PATTERN_HEAP_SIZE  (128*1024)
+#define PATTERN_ROUNDS     128
+
+static int kernel_test_seq(struct memcheck_descriptor *md,
+		    size_t heap_size, size_t block_size, int flags)
+{
+	struct rttst_heap_stathdr sthdr;
+	struct rttst_heap_parms parms;
+	struct rttst_heap_stats *p;
+	struct memcheck_stat *st;
+	struct sched_param param;
+	int fd, ret, n;
+
+	fd = __RT(open("/dev/rtdm/heapcheck", O_RDWR));
+	if (fd < 0)
+		return -ENOSYS;
+
+	/* This switches to real-time mode over Cobalt. */
+	param.sched_priority = 1;
+	pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
+
+	parms.heap_size = heap_size;
+	parms.block_size = block_size;
+	parms.flags = flags;
+	ret = __RT(ioctl(fd, RTTST_RTIOC_HEAP_CHECK, &parms));
+	if (ret)
+		goto out;
+
+	if (parms.nrstats == 0)
+		goto out;
+
+	sthdr.nrstats = parms.nrstats;
+	sthdr.buf = __STD(malloc(sizeof(*sthdr.buf) * parms.nrstats));
+	if (sthdr.buf == NULL) {
+		ret = -ENOMEM;
+		goto out;
+	}
+	
+	ret = __RT(ioctl(fd, RTTST_RTIOC_HEAP_STAT_COLLECT, &sthdr));
+	if (ret)
+		goto out;
+
+	for (n = sthdr.nrstats, p = sthdr.buf; n > 0; n--, p++) {
+		st = __STD(malloc(sizeof(*st)));
+		if (st == NULL) {
+			ret = -ENOMEM;
+			goto out;
+		}
+		st->heap_size = p->heap_size;
+		st->user_size = p->user_size;
+		st->block_size = p->block_size;
+		st->nrblocks = p->nrblocks;
+		st->alloc_avg_ns = p->alloc_avg_ns;
+		st->alloc_max_ns = p->alloc_max_ns;
+		st->free_avg_ns = p->free_avg_ns;
+		st->free_max_ns = p->free_max_ns;
+		st->maximum_free = p->maximum_free;
+		st->largest_free = p->largest_free;
+		st->flags = p->flags;
+		memcheck_log_stat(st);
+	}
+out:
+	__RT(close(fd));
+
+	param.sched_priority = 0;
+	pthread_setschedparam(pthread_self(), SCHED_OTHER, &param);
+
+	return ret;
+}
+
+static struct memcheck_descriptor coreheap_descriptor = {
+	.name = "coreheap",
+	.seq_min_heap_size = MIN_HEAP_SIZE,
+	.seq_max_heap_size = MAX_HEAP_SIZE,
+	.random_rounds = RANDOM_ROUNDS,
+	.pattern_heap_size = PATTERN_HEAP_SIZE,
+	.pattern_rounds = PATTERN_ROUNDS,
+	.valid_flags = MEMCHECK_ALL_FLAGS,
+	.test_seq = kernel_test_seq,
+};
+
+static int run_memory_coreheap(struct smokey_test *t,
+			       int argc, char *const argv[])
+{
+	return memcheck_run(&coreheap_descriptor, t, argc, argv);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/Makefile.am
new file mode 100644
index 0000000..35c4a94
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libmemory-heapmem.a
+
+libmemory_heapmem_a_SOURCES = heapmem.c
+
+libmemory_heapmem_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(srcdir)/..		\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/heapmem.c b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/heapmem.c
new file mode 100644
index 0000000..1ce7b4e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-heapmem/heapmem.c
@@ -0,0 +1,51 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#include <boilerplate/heapmem.h>
+#include "memcheck/memcheck.h"
+
+smokey_test_plugin(memory_heapmem,
+		   MEMCHECK_ARGS,
+		   "Check for the heapmem allocator sanity.\n"
+		   MEMCHECK_HELP_STRINGS
+	);
+
+#define MIN_HEAP_SIZE  8192
+#define MAX_HEAP_SIZE  (1024 * 1024 * 2)
+#define RANDOM_ROUNDS  1024
+
+#define PATTERN_HEAP_SIZE  (128*1024)
+#define PATTERN_ROUNDS     128
+
+static struct heap_memory heap;
+
+static size_t get_arena_size(size_t heap_size)
+{
+	return HEAPMEM_ARENA_SIZE(heap_size);
+}
+
+static struct memcheck_descriptor heapmem_descriptor = {
+	.name = "heapmem",
+	.init = HEAP_INIT_T(heapmem_init),
+	.destroy = HEAP_DESTROY_T(heapmem_destroy),
+	.alloc = HEAP_ALLOC_T(heapmem_alloc),
+	.free = HEAP_FREE_T(heapmem_free),
+	.get_usable_size = HEAP_USABLE_T(heapmem_usable_size),
+	.get_used_size = HEAP_USED_T(heapmem_used_size),
+	.seq_min_heap_size = MIN_HEAP_SIZE,
+	.seq_max_heap_size = MAX_HEAP_SIZE,
+	.random_rounds = RANDOM_ROUNDS,
+	.pattern_heap_size = PATTERN_HEAP_SIZE,
+	.pattern_rounds = PATTERN_ROUNDS,
+	.heap = &heap,
+	.get_arena_size = get_arena_size,
+	.valid_flags = MEMCHECK_ALL_FLAGS,
+};
+
+static int run_memory_heapmem(struct smokey_test *t,
+			      int argc, char *const argv[])
+{
+	return memcheck_run(&heapmem_descriptor, t, argc, argv);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/Makefile.am
new file mode 100644
index 0000000..59df783
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libmemory-pshared.a
+
+libmemory_pshared_a_SOURCES = pshared.c
+
+libmemory_pshared_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@		\
+	-I$(srcdir)/..			\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/pshared.c b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/pshared.c
new file mode 100644
index 0000000..f94b109
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-pshared/pshared.c
@@ -0,0 +1,121 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#include <xenomai/init.h>
+#include <xenomai/tunables.h>
+#include <copperplate/heapobj.h>
+#include "memcheck/memcheck.h"
+
+smokey_test_plugin(memory_pshared,
+		   MEMCHECK_ARGS,
+		   "Check for the pshared allocator sanity.\n"
+		   MEMCHECK_HELP_STRINGS
+	);
+
+#define MIN_HEAP_SIZE  8192
+#define MAX_HEAP_SIZE  (1024 * 1024 * 2)
+#define RANDOM_ROUNDS  1024
+
+#define PATTERN_HEAP_SIZE  (128*1024)
+#define PATTERN_ROUNDS     128
+
+static struct heapobj heap;
+
+static int do_pshared_init(void *heap, void *mem, size_t arena_size)
+{
+	/* mem is ignored, pshared uses its own memory. */
+	return heapobj_init(heap, "memcheck", arena_size);
+}
+
+static void do_pshared_destroy(void *heap)
+{
+	heapobj_destroy(heap);
+}
+
+static void *do_pshared_alloc(void *heap, size_t size)
+{
+	return heapobj_alloc(heap, size);
+}
+
+static int do_pshared_free(void *heap, void *block)
+{
+	heapobj_free(heap, block);
+
+	return 0;	/* Hope for the best. */
+}
+
+static size_t do_pshared_used_size(void *heap)
+{
+	return heapobj_inquire(heap);
+}
+
+static size_t do_pshared_usable_size(void *heap)
+{
+	return heapobj_get_size(heap);
+}
+
+static size_t do_pshared_arena_size(size_t heap_size)
+{
+	struct heapobj h;
+	size_t overhead;
+	int ret;
+
+	ret = heapobj_init(&h, "memcheck", heap_size);
+	if (ret)
+		return 0;
+
+	overhead = heap_size - heapobj_get_size(&h);
+	heapobj_destroy(&h);
+
+	/*
+	 * pshared must have no external overhead, since
+	 * heapobj_init() allocates the memory it needs.  Make sure
+	 * this assumption is correct for any tested size.
+	 */
+	return overhead == 0 ? heap_size : 0;
+}
+
+static struct memcheck_descriptor pshared_descriptor = {
+	.name = "pshared",
+	.init = HEAP_INIT_T(do_pshared_init),
+	.destroy = HEAP_DESTROY_T(do_pshared_destroy),
+	.alloc = HEAP_ALLOC_T(do_pshared_alloc),
+	.free = HEAP_FREE_T(do_pshared_free),
+	.get_usable_size = HEAP_USABLE_T(do_pshared_usable_size),
+	.get_used_size = HEAP_USED_T(do_pshared_used_size),
+	.get_arena_size = do_pshared_arena_size,
+	.seq_min_heap_size = MIN_HEAP_SIZE,
+	.seq_max_heap_size = MAX_HEAP_SIZE,
+	.random_rounds = RANDOM_ROUNDS,
+	.pattern_heap_size = PATTERN_HEAP_SIZE,
+	.pattern_rounds = PATTERN_ROUNDS,
+	/* heapobj-pshared has overgead even for ^2 sizes, can't check for ZEROOVRD. */
+	.valid_flags = MEMCHECK_ALL_FLAGS & ~MEMCHECK_ZEROOVRD,
+	.heap = &heap,
+};
+
+static int run_memory_pshared(struct smokey_test *t,
+			      int argc, char *const argv[])
+{
+	return memcheck_run(&pshared_descriptor, t, argc, argv);
+}
+
+static int memcheck_pshared_tune(void)
+{
+	/*
+	 * We create test pools from the main one: make sure the
+	 * latter is large enough.
+	 */
+	set_config_tunable(mem_pool_size, MAX_HEAP_SIZE + 1024 * 1024);
+
+	return 0;
+}
+
+static struct setup_descriptor memcheck_pshared_setup = {
+	.name = "memcheck_pshared",
+	.tune = memcheck_pshared_tune,
+};
+
+user_setup_call(memcheck_pshared_setup);
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/Makefile.am
new file mode 100644
index 0000000..901e6fd
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libmemory-tlsf.a
+
+libmemory_tlsf_a_SOURCES = tlsf.c
+
+libmemory_tlsf_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@		\
+	-I$(top_srcdir)/lib/boilerplate	\
+	-I$(srcdir)/..			\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/tlsf.c b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/tlsf.c
new file mode 100644
index 0000000..5de9007
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/memory-tlsf/tlsf.c
@@ -0,0 +1,123 @@
+/*
+ * Copyright (C) 2018 Philippe Gerum <rpm@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+#include <tlsf/tlsf.h>
+#include <stdlib.h>
+#include <pthread.h>
+#include "memcheck/memcheck.h"
+
+smokey_test_plugin(memory_tlsf,
+		   MEMCHECK_ARGS,
+		   "Check for the TLSF allocator sanity.\n"
+		   MEMCHECK_HELP_STRINGS
+	);
+
+#define MIN_HEAP_SIZE  8192
+#define MAX_HEAP_SIZE  (1024 * 1024 * 2)
+#define RANDOM_ROUNDS  1024
+
+#define PATTERN_HEAP_SIZE  (128*1024)
+#define PATTERN_ROUNDS     128
+
+static struct memcheck_descriptor tlsf_descriptor;
+
+static pthread_mutex_t tlsf_lock = PTHREAD_MUTEX_INITIALIZER;
+
+static size_t overhead;
+
+static size_t test_pool_size; /* TLSF does not save this information. */
+
+static int do_tlsf_init(void *dummy, void *mem, size_t pool_size)
+{
+	tlsf_descriptor.heap = mem;
+	return init_memory_pool(pool_size, mem) == -1L ? -ENOMEM : 0;
+}
+
+static void do_tlsf_destroy(void *pool)
+{
+	destroy_memory_pool(pool);
+}
+
+static void *do_tlsf_alloc(void *pool, size_t size)
+{
+	void *p;
+
+	pthread_mutex_lock(&tlsf_lock);
+	p = malloc_ex(size, pool);
+	pthread_mutex_unlock(&tlsf_lock);
+
+	return p;
+}
+
+static int do_tlsf_free(void *pool, void *block)
+{
+	pthread_mutex_lock(&tlsf_lock);
+	free_ex(block, pool);
+	pthread_mutex_unlock(&tlsf_lock);
+
+	return 0;	/* Yeah, well... */
+}
+
+static size_t do_tlsf_used_size(void *pool)
+{
+	/* Do not count the overhead memory for the TLSF header. */
+	return get_used_size(pool) - overhead;
+}
+
+static size_t do_tlsf_usable_size(void *pool)
+{
+	return test_pool_size;
+}
+
+static size_t do_tlsf_arena_size(size_t pool_size)
+{
+	size_t available_size;
+	void *pool;
+
+	/*
+	 * The area size is the total amount of memory some allocator
+	 * may need for managing a heap, including its metadata. We
+	 * need to figure out how much memory overhead TLSF has for a
+	 * given pool size, which we add to the ideal pool_size for
+	 * determining the arena size.
+	 */
+	test_pool_size = pool_size;
+	pool = __STD(malloc(pool_size));
+	available_size = init_memory_pool(pool_size, pool);
+	if (available_size == (size_t)-1) {
+		__STD(free(pool));
+		return 0;
+	}
+
+	destroy_memory_pool(pool);
+	overhead = pool_size - available_size;
+	__STD(free(pool));
+
+	return pool_size + overhead;
+}
+
+static struct memcheck_descriptor tlsf_descriptor = {
+	.name = "tlsf",
+	.init = HEAP_INIT_T(do_tlsf_init),
+	.destroy = HEAP_DESTROY_T(do_tlsf_destroy),
+	.alloc = HEAP_ALLOC_T(do_tlsf_alloc),
+	.free = HEAP_FREE_T(do_tlsf_free),
+	.get_usable_size = HEAP_USABLE_T(do_tlsf_usable_size),
+	.get_used_size = HEAP_USED_T(do_tlsf_used_size),
+	.get_arena_size = do_tlsf_arena_size,
+	.seq_min_heap_size = MIN_HEAP_SIZE,
+	.seq_max_heap_size = MAX_HEAP_SIZE,
+	.random_rounds = RANDOM_ROUNDS,
+	.pattern_heap_size = PATTERN_HEAP_SIZE,
+	.pattern_rounds = PATTERN_ROUNDS,
+	/* TLSF always has overhead, can't check for ZEROOVRD. */
+	.valid_flags = MEMCHECK_ALL_FLAGS & ~MEMCHECK_ZEROOVRD,
+};
+
+static int run_memory_tlsf(struct smokey_test *t,
+			   int argc, char *const argv[])
+{
+	return memcheck_run(&tlsf_descriptor, t, argc, argv);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/Makefile.am
new file mode 100644
index 0000000..71ee52f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/Makefile.am
@@ -0,0 +1,28 @@
+testdir = @XENO_TEST_DIR@
+test_PROGRAMS = smokey_net_server
+
+noinst_LIBRARIES = libnet_common.a
+noinst_HEADERS = \
+	smokey_net.h \
+	smokey_net_server.h
+
+AM_CPPFLAGS = \
+	@XENO_USER_CFLAGS@ \
+	-I$(top_srcdir)/include \
+	-I$(top_srcdir)/kernel/drivers/net/stack/include
+
+libnet_common_a_SOURCES = \
+	client.c \
+	server.c \
+	setup.c
+
+smokey_net_server_SOURCES = \
+	smokey_net_server.c
+
+smokey_net_server_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@
+
+smokey_net_server_LDADD = \
+	libnet_common.a \
+	@XENO_CORE_LDADD@ \
+	@XENO_USER_LDADD@ \
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/client.c b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/client.c
new file mode 100644
index 0000000..914bda4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/client.c
@@ -0,0 +1,298 @@
+#include <stdio.h>
+#include <stdbool.h>
+
+#include <unistd.h>
+#include <pthread.h>
+#include <sched.h>
+#include <sys/socket.h>
+#include <sys/select.h>
+
+#include "smokey_net.h"
+
+static int duration = 10;
+static int rate = 1000;
+static const char *driver = "rt_loopback";
+static const char *intf;
+static pthread_t tid;
+static unsigned long long glost, glate;
+
+static int rcv_packet(struct smokey_net_client *client, int sock, unsigned seq,
+		      struct timespec *next_shot, bool last, int *linesout)
+{
+	static unsigned long long gmin = ~0ULL, gmax = 0, gsum = 0, gcount = 0;
+	static unsigned long long min = ~0ULL, max = 0, sum = 0, count = 0,
+		lost = 0, late = 0;
+	static struct timespec last_print;
+	struct smokey_net_payload payload;
+	struct timeval timeout;
+	struct timespec now;
+	char packet[256];
+	long long diff;
+	fd_set set;
+	int err;
+
+	FD_ZERO(&set);
+	FD_SET(sock, &set);
+
+	err = smokey_check_errno(
+		__RT(clock_gettime(CLOCK_MONOTONIC, &now)));
+	if (err < 0)
+		return err;
+
+	diff = next_shot->tv_sec * 1000000000ULL + next_shot->tv_nsec
+		- (now.tv_sec * 1000000000ULL + now.tv_nsec);
+	if (diff < 0)
+		diff = 0;
+
+	timeout.tv_sec = diff / 1000000000;
+	timeout.tv_usec = (diff % 1000000000 + 500) / 1000;
+
+	err = smokey_check_errno(
+		__RT(select(sock + 1, &set, NULL, NULL, &timeout)));
+	if (err < 0)
+		return err;
+
+	if (err == 0) {
+		if (seq)
+			++lost;
+		err = -ETIMEDOUT;
+		goto print_stats;
+	}
+
+	err = smokey_check_errno(
+		__RT(recv(sock, packet, sizeof(packet), 0)));
+	if (err < 0)
+		return err;
+
+	err = client->extract(client, &payload, packet, err);
+	if (err < 0)
+		return err;
+
+	err = smokey_check_errno(
+		__RT(clock_gettime(CLOCK_MONOTONIC, &now)));
+	if (err < 0)
+		return err;
+
+	diff = now.tv_sec * 1000000000ULL + now.tv_nsec
+		- (payload.ts.tv_sec * 1000000000ULL
+			+ payload.ts.tv_nsec);
+	if (diff < min)
+		min = diff;
+	if (diff > max)
+		max = diff;
+	sum += diff;
+	++count;
+
+	err = 0;
+	if (payload.seq != seq) {
+		++late;
+		err = -EAGAIN;
+	}
+
+  print_stats:
+	if (seq == 1 && !last_print.tv_sec) {
+		last_print = now;
+		if (last_print.tv_nsec < 1000000000 / rate) {
+			last_print.tv_nsec += 1000000000;
+			last_print.tv_sec--;
+		}
+		last_print.tv_nsec -= 1000000000 / rate;
+	}
+
+	diff = now.tv_sec * 1000000000ULL + now.tv_nsec
+		- (last_print.tv_sec * 1000000000ULL
+			+ last_print.tv_nsec);
+
+	if (diff < 1000000000LL && (!last || (!count && !lost)))
+		return err;
+
+	if (min < gmin)
+		gmin = min;
+	if (max > gmax)
+		gmax = max;
+	gsum += sum;
+	gcount += count;
+	glost += lost - late;
+	glate += late;
+
+	if (((*linesout)++ % 20) == 0) {
+		smokey_trace("\n   %-7s%6s%8s%8s%8s%8s%8s%10s",
+			     "PPS", "LOST", "LATE", "MIN", "MAX",
+			     "BEST", "AVG", "WORST");
+		smokey_trace("------------------------------------------------------------------");
+	}
+
+	smokey_trace("%8.2f  %6Ld  %6Ld     %.03g     %.03g     %.03g     %.03g     %.03g",
+		count / (diff / 1000000000.0),
+		glost,
+		glate,
+		count ? min / 1000.0 : 0,
+		count ? max / 1000.0 : 0,
+		gcount ? gmin / 1000.0 : 0,
+		gcount ? (gsum / (double)gcount) / 1000 : 0,
+		gcount ? gmax / 1000.0 : 0);
+
+	min = ~0ULL;
+	max = 0;
+	sum = 0;
+	count = 0;
+	lost = 0;
+	late = 0;
+	last_print = now;
+
+	return err;
+}
+
+static int smokey_net_client_loop(struct smokey_net_client *client)
+{
+	struct smokey_net_payload payload;
+	int sock, err, linesout = 0;
+	struct timespec next_shot;
+	struct sched_param prio;
+	char packet[256];
+	long long limit;
+
+	sock = client->create_socket(client);
+	if (sock < 0)
+		return sock;
+
+	prio.sched_priority = 20;
+	err = smokey_check_status(
+		pthread_setschedparam(pthread_self(), SCHED_FIFO, &prio));
+	if (err < 0)
+		return err;
+
+	err = smokey_check_errno(
+		__RT(clock_gettime(CLOCK_MONOTONIC, &next_shot)));
+	if (err < 0)
+		goto err;
+
+	smokey_trace("\nPPS, LOST, LATE: packet count");
+	smokey_trace("MIN, MAX, BEST, AVG, WORST: microseconds");
+
+	limit = (long long)rate * duration;
+	for (payload.seq = 1;
+	     limit <= 0 || payload.seq < limit + 1; payload.seq++) {
+		unsigned seq = payload.seq;
+
+		next_shot.tv_nsec += 1000000000 / rate;
+		if (next_shot.tv_nsec > 1000000000) {
+			next_shot.tv_nsec -= 1000000000;
+			next_shot.tv_sec++;
+		}
+
+		err = smokey_check_errno(
+			__RT(clock_gettime(CLOCK_MONOTONIC, &payload.ts)));
+		if (err < 0)
+			goto err;
+
+		err = client->prepare(client, packet, sizeof(packet), &payload);
+		if (err < 0)
+			goto err;
+
+		err = smokey_check_errno(
+			__RT(sendto(sock, packet, err, 0,
+					&client->peer, client->peer_len)));
+		if (err < 0)
+			goto err;
+
+		do {
+			err = rcv_packet(client, sock, seq, &next_shot,
+					 payload.seq == limit, &linesout);
+			if (!err)
+				seq = 0;
+		} while (err != -ETIMEDOUT);
+	}
+
+	if (smokey_on_vm) {
+		glate = 0; /* ignore late arrivals */
+		if (glost != limit)
+			glost = 0; /* ignore some lost packets, not all */
+	}
+
+	if (glost || glate)
+		fprintf(stderr, "RTnet %s test failed", client->name);
+	if (glost) {
+		if (glost == limit)
+			fprintf(stderr, ", all packets lost"
+				" (is smokey_net_server running ?)");
+		else
+			fprintf(stderr, ", %Lu packets lost (%g %%)",
+				glost, 100.0 * glost / limit);
+	}
+	if (glate)
+		fprintf(stderr, ", %Lu overruns", glate);
+	if (glost || glate)
+		fputc('\n', stderr);
+	err = glost || glate ? -EPROTO : 0;
+
+  err:
+	sock = smokey_check_errno(__RT(close(sock)));
+	if (err == 0)
+		err = sock;
+
+	return err;
+}
+
+static void *trampoline(void *cookie)
+{
+	int err = smokey_net_client_loop(cookie);
+	pthread_exit((void *)(long)err);
+}
+
+int smokey_net_client_run(struct smokey_test *t,
+			struct smokey_net_client *client,
+			int argc, char *const argv[])
+{
+	int err, err_teardown;
+	void *status;
+
+	smokey_parse_args(t, argc, argv);
+
+	if (SMOKEY_ARG_ISSET(*t, rtnet_driver))
+		driver = SMOKEY_ARG_STRING(*t, rtnet_driver);
+
+	if (SMOKEY_ARG_ISSET(*t, rtnet_interface))
+		intf = SMOKEY_ARG_STRING(*t, rtnet_interface);
+
+	if (SMOKEY_ARG_ISSET(*t, rtnet_duration))
+		duration = SMOKEY_ARG_INT(*t, rtnet_duration);
+
+	if (SMOKEY_ARG_ISSET(*t, rtnet_rate)) {
+		rate = SMOKEY_ARG_INT(*t, rtnet_rate);
+		if (rate == 0) {
+			smokey_warning("rate can not be null");
+			return -EINVAL;
+		}
+	}
+
+	if (!intf)
+		intf = strcmp(driver, "rt_loopback") ? "rteth0" : "rtlo";
+
+	smokey_trace("Configuring interface %s (driver %s) for RTnet %s test",
+		intf, driver, client->name);
+
+	err = smokey_net_setup(driver, intf, client->option, &client->peer);
+	if (err < 0)
+		return err;
+
+	smokey_trace("Running RTnet %s test on interface %s",
+		client->name, intf);
+
+	err = smokey_check_status(
+		__RT(pthread_create(&tid, NULL, trampoline, client)));
+	if (err < 0)
+		return err;
+
+	err = smokey_check_status(pthread_join(tid, &status));
+	if (err < 0)
+		return err;
+
+	err = (int)(long)status;
+
+	err_teardown = smokey_net_teardown(driver, intf, client->option);
+	if (err == 0)
+		err = err_teardown;
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/server.c b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/server.c
new file mode 100644
index 0000000..39d59c3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/server.c
@@ -0,0 +1,178 @@
+/*
+ * RTnet test server loop
+ *
+ * Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <limits.h>
+#include <errno.h>
+#include <string.h>
+
+#include <unistd.h>
+#include <signal.h>
+#include <pthread.h>
+#include <sched.h>
+#include <fcntl.h>
+#include <sys/socket.h>
+#include <netinet/in.h>
+#include <netpacket/packet.h>
+#include <net/ethernet.h>
+
+#include <sys/cobalt.h>
+#include <smokey/smokey.h>
+#include "smokey_net.h"
+#include "smokey_net_server.h"
+
+struct smokey_server {
+	sem_t sync;
+	int fds[0];
+};
+
+struct proto {
+	int config_flag;
+	int (*create_socket)(void);
+	void (*serve)(int fd);
+};
+
+static int udp_create_socket(void);
+static void udp_serve(int fd);
+static int packet_dgram_socket(void);
+static void packet_dgram_serve(int fd);
+
+static const struct proto protos[] = {
+	{
+		.config_flag = _CC_COBALT_NET_UDP,
+		.create_socket = &udp_create_socket,
+		.serve = &udp_serve,
+	},
+	{
+		.config_flag = _CC_COBALT_NET_AF_PACKET,
+		.create_socket = &packet_dgram_socket,
+		.serve = &packet_dgram_serve,
+	},
+};
+
+static int udp_create_socket(void)
+{
+	struct sockaddr_in name;
+	int fd;
+
+	fd = check_unix(__RT(socket(PF_INET, SOCK_DGRAM, 0)));
+
+	name.sin_family = AF_INET;
+	name.sin_port = htons(7); /* UDP echo service */
+	name.sin_addr.s_addr = htonl(INADDR_ANY);
+
+	check_unix(__RT(bind(fd, (struct sockaddr *)&name, sizeof(name))));
+
+	return fd;
+}
+
+static void udp_serve(int fd)
+{
+	struct smokey_net_payload pkt;
+	struct sockaddr_in peer;
+	socklen_t peer_len;
+	int err;
+
+	peer_len = sizeof(peer);
+	err = check_unix(
+		__RT(recvfrom(fd, &pkt, sizeof(pkt), 0,
+				(struct sockaddr *)&peer, &peer_len)));
+
+	check_unix(
+		__RT(sendto(fd, &pkt, err, 0,
+				(struct sockaddr *)&peer, peer_len)));
+}
+
+static int packet_dgram_socket(void)
+{
+	return check_unix(
+		__RT(socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_802_EX1))));
+}
+
+static void packet_dgram_serve(int fd)
+{
+	struct smokey_net_payload pkt;
+	struct sockaddr_ll peer;
+	socklen_t peer_len;
+	int err;
+
+	peer_len = sizeof(peer);
+	err = check_unix(
+		__RT(recvfrom(fd, &pkt, sizeof(pkt), 0,
+				(struct sockaddr *)&peer, &peer_len)));
+
+	peer.sll_protocol = htons(ETH_P_802_EX1 + 1);
+	check_unix(
+		__RT(sendto(fd, &pkt, err, 0,
+				(struct sockaddr *)&peer, peer_len)));
+}
+
+static void server_loop_cleanup(void *cookie)
+{
+	int *fds = cookie;
+	int i;
+
+	for (i = 0; i < sizeof(protos)/sizeof(protos[0]); i++)
+		__RT(close(fds[i]));
+	free(fds);
+}
+
+void smokey_net_server_loop(int net_config)
+{
+	struct sched_param prio;
+	const struct proto *p;
+	int i, maxfd, *fds;
+	fd_set rfds;
+
+	fds = malloc(sizeof(*fds) * sizeof(protos)/sizeof(protos[0]));
+	if (fds == NULL)
+		pthread_exit((void *)(long)-ENOMEM);
+
+	pthread_cleanup_push(server_loop_cleanup, fds);
+
+	FD_ZERO(&rfds);
+	maxfd = 0;
+	for (i = 0; i < sizeof(protos)/sizeof(protos[0]); i++) {
+		p = &protos[i];
+
+		if ((net_config & p->config_flag) == 0) {
+			fds[i] = -1;
+			continue;
+		}
+
+		fds[i] = p->create_socket();
+		FD_SET(fds[i], &rfds);
+		if (fds[i] > maxfd)
+			maxfd = fds[i];
+	}
+
+	prio.sched_priority = 20;
+	check_pthread(
+		__RT(pthread_setschedparam(pthread_self(), SCHED_FIFO, &prio)));
+
+	for (;;) {
+		fd_set tfds;
+
+		tfds = rfds;
+
+		check_unix(__RT(select(maxfd + 1, &tfds, NULL, NULL, NULL)));
+
+		for (i = 0; i < sizeof(protos)/sizeof(protos[0]); i++) {
+			p = &protos[i];
+
+			if (fds[i] < 0 || !FD_ISSET(fds[i], &tfds))
+				continue;
+
+			p->serve(fds[i]);
+		}
+	}
+
+	pthread_cleanup_pop(1);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/setup.c b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/setup.c
new file mode 100644
index 0000000..f97c148
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/setup.c
@@ -0,0 +1,518 @@
+/*
+ * Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdbool.h>
+#include <errno.h>
+
+#include <unistd.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <arpa/inet.h>
+#include <netinet/ether.h>
+#include <netpacket/packet.h>
+
+#include <sys/cobalt.h>
+#include <smokey/smokey.h>
+
+#include <rtnet_chrdev.h>
+#include <rtcfg_chrdev.h>
+#include "smokey_net.h"
+#include "smokey_net_server.h"
+
+
+struct module {
+	int option;
+	const char *name;
+};
+
+#define TIMEOUT 10
+
+static struct rtnet_core_cmd cmd;
+static int fd;
+static pthread_t loopback_server_tid;
+static bool loopback_thread_created, ifup;
+static struct module modules[] = {
+	{
+		.name = "rtnet",
+	},
+	{
+		.name = "rtipv4",
+	},
+	{
+		.name = "rtcfg",
+	},
+	{
+		.option = _CC_COBALT_NET_UDP,
+		.name = "rtudp",
+	},
+	{
+		.option = _CC_COBALT_NET_AF_PACKET,
+		.name = "rtpacket",
+	},
+	{
+		.name = NULL,	/* driver */
+	},
+};
+
+#define MODID_RTNET  0
+#define MODID_IPV4   1
+#define MODID_CFG    2
+#define MODID_UDP    3
+#define MODID_PACKET 4
+#define MODID_DRIVER 5
+
+static int option_to_modid(int option)
+{
+	unsigned i;
+
+	for (i = 0; i < sizeof(modules)/sizeof(modules[0]); i++) {
+		if (modules[i].option != option)
+			continue;
+
+		return i;
+	}
+
+	return -1;
+}
+
+static int get_info(const char *intf)
+{
+	int err;
+
+	err = smokey_check_errno(
+		snprintf(cmd.head.if_name, sizeof(cmd.head.if_name),
+			"%s", intf));
+	if (err < 0)
+		return err;
+
+	cmd.args.info.ifindex = 0;
+
+	err = smokey_check_errno(ioctl(fd, IOC_RT_IFINFO, &cmd));
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int do_up(const char *intf)
+{
+	int err;
+
+	snprintf(cmd.head.if_name, sizeof(cmd.head.if_name), "%s", intf);
+	cmd.args.info.ifindex = 0;
+	if (strcmp(intf, "rtlo")) {
+		cmd.args.up.ip_addr = 0xffffffff;
+		cmd.args.up.broadcast_ip = cmd.args.up.ip_addr;
+	} else {
+		cmd.args.up.ip_addr = htonl(0x7f000001); /* 127.0.0.1 */
+		cmd.args.up.broadcast_ip = cmd.args.up.ip_addr | ~0x000000ff;
+	}
+	cmd.args.up.set_dev_flags = 0;
+	cmd.args.up.clear_dev_flags = 0;
+	cmd.args.up.dev_addr_type = 0xffff;
+
+	err = smokey_check_errno(ioctl(fd, IOC_RT_IFUP, &cmd));
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int do_down(const char *intf)
+{
+	int err;
+
+	snprintf(cmd.head.if_name, sizeof(cmd.head.if_name), "%s", intf);
+	cmd.args.info.ifindex = 0;
+
+	err = smokey_check_errno(ioctl(fd, IOC_RT_IFDOWN, &cmd));
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int smokey_net_modprobe(int modid, bool silent)
+{
+	struct module *m = modules + modid;
+
+	if (modid < 0)
+		return -EINVAL;
+
+	return smokey_modprobe(m->name, silent);
+}
+
+static int smokey_net_rmmod(int modid)
+{
+	struct module *m = modules + modid;
+
+	return smokey_rmmod(m->name);
+}
+
+static int smokey_net_setup_rtcfg_client(const char *intf, int net_config)
+{
+	struct rtcfg_cmd cmd;
+	int err;
+
+	if ((net_config & _CC_COBALT_NET_CFG) == 0)
+		return -ENOSYS;
+
+	err = smokey_net_modprobe(MODID_CFG, false);
+	if (err < 0)
+		return err;
+
+	memset(&cmd, 0, sizeof(cmd));
+	err = smokey_check_errno(
+		snprintf(cmd.head.if_name, sizeof(cmd.head.if_name),
+			intf, IFNAMSIZ));
+	if (err < 0)
+		return err;
+
+	cmd.args.client.timeout      = 10000;
+	cmd.args.client.max_stations = 32;
+	cmd.args.client.buffer_size = 0;
+
+	err = smokey_check_errno(ioctl(fd, RTCFG_IOC_CLIENT, &cmd));
+	if (err < 0)
+		return err;
+
+	cmd.args.announce.timeout     = 5000;
+	cmd.args.announce.buffer_size = 0;
+	cmd.args.announce.flags       = 0;
+	cmd.args.announce.burstrate   = 4;
+
+	err = smokey_check_errno(ioctl(fd, RTCFG_IOC_ANNOUNCE, &cmd));
+	if (err < 0)
+		return err;
+
+	return 0;
+}
+
+static int smokey_net_teardown_rtcfg(const char *intf)
+{
+	struct rtcfg_cmd cmd;
+	int err;
+
+	memset(&cmd, 0, sizeof(cmd));
+	err = smokey_check_errno(
+		snprintf(cmd.head.if_name, sizeof(cmd.head.if_name),
+			intf, IFNAMSIZ));
+	if (err < 0)
+		return err;
+
+	/*
+	 * We may or may not be acting as a server; don't check the
+	 * status.
+	 */
+	ioctl(fd, RTCFG_IOC_DETACH, &cmd);
+
+	return smokey_net_rmmod(MODID_CFG);
+}
+
+static int find_peer(const char *intf, void *vpeer)
+{
+	struct sockaddr_in *in_peer = vpeer;
+	struct sockaddr_ll *ll_peer = vpeer;
+	struct sockaddr *peer = vpeer;
+	char buf[4096];
+	char hash[3];
+	char dest[16];
+	char mac[18];
+	char dev[16];
+	FILE *f;
+	int err;
+
+	f = fopen("/proc/rtnet/ipv4/host_route", "r");
+	if (!f) {
+		err = -errno;
+		smokey_warning("open(/proc/rtnet/ipv4/host_route): %s",
+			strerror(-err));
+		return err;
+	}
+
+	/* Skip first line */
+	if (!fgets(buf, sizeof(buf), f)) {
+		err = -errno;
+		smokey_warning("fgets(/proc/rtnet/ipv4/host_route): %s",
+			strerror(-err));
+		goto err;
+	}
+
+	for(;;) {
+		err = fscanf(f, "%s\t%s\t%s\t%s\n", hash, dest, mac, dev);
+		if (err == EOF) {
+			smokey_warning("No peer found\n");
+			err = -ENOENT;
+			goto err;
+		}
+		if (err < 4) {
+			smokey_warning("Error parsing"
+				" /proc/rtnet/ipv4/host_route\n");
+			err = -EINVAL;
+			goto err;
+		}
+
+		if (strcmp(dev, intf))
+			continue;
+
+		if (strcmp(mac, "FF:FF:FF:FF:FF:FF") == 0)
+			continue;
+
+		if (strcmp(dest, "255.255.255.255") == 0)
+			continue;
+
+		if (strcmp(dest, "0.0.0.0") == 0)
+			continue;
+
+		break;
+	}
+
+	switch(peer->sa_family) {
+	case AF_INET:
+		err = smokey_check_errno(
+			inet_pton(AF_INET, dest, &in_peer->sin_addr));
+		if (err < 0)
+			goto err;
+		break;
+
+	case AF_PACKET: {
+		const unsigned eth_alen = 6;
+		struct ether_addr eth;
+		struct ifreq ifr;
+		int sock;
+
+		ll_peer->sll_halen = eth_alen;
+		if (ether_aton_r(mac, &eth) == 0) {
+			err = -errno;
+			smokey_warning("ether_aton_r(%s): %m", mac);
+			goto err;
+		}
+
+		memcpy(&ll_peer->sll_addr[0], eth.ether_addr_octet, eth_alen);
+
+		snprintf(ifr.ifr_name, sizeof(ifr.ifr_name), "%s", dev);
+
+		err = smokey_check_errno(
+			__RT(socket(PF_PACKET, SOCK_DGRAM, 0)));
+		if (err < 0)
+			goto err;
+		sock = err;
+
+		err = smokey_check_errno(__RT(ioctl(sock, SIOCGIFINDEX, &ifr)));
+		sock = smokey_check_errno(__RT(close(sock)));
+		if (err < 0)
+			goto err;
+		if (sock < 0) {
+			err = sock;
+			goto err;
+		}
+
+		ll_peer->sll_ifindex = ifr.ifr_ifindex;
+	}
+	}
+
+	err = 0;
+  err:
+	fclose(f);
+	return err;
+}
+
+int smokey_net_server_check_inner(const char *file, int line,
+				     const char *msg, int status)
+{
+       if (status >= 0)
+	       return status;
+
+       __smokey_warning(file, line, "%s: %s", msg, strerror(-status));
+       pthread_exit((void *)(long)status);
+}
+
+static void *loopback_server(void *cookie)
+{
+	int net_config = (long)cookie;
+	smokey_net_server_loop(net_config);
+	return NULL;
+}
+
+int smokey_net_setup(const char *driver, const char *intf, int tested_config,
+		void *vpeer)
+{
+	int net_config, err, i, err_teardown;
+	struct sockaddr_in *in_peer = vpeer;
+	struct sockaddr *peer = vpeer;
+
+	/*
+	 * Main module needs to be loaded in order to use
+	 * _CC_COBALT_GET_NET_CONFIG.
+	 */
+	smokey_net_modprobe(MODID_RTNET, true);
+
+	err = cobalt_corectl(_CC_COBALT_GET_NET_CONFIG,
+			&net_config, sizeof(net_config));
+	if (err == -EINVAL)
+		return -ENOSYS;
+	if (err < 0)
+		return err;
+
+	if ((net_config & (_CC_COBALT_NET | _CC_COBALT_NET_IPV4))
+		!= (_CC_COBALT_NET | _CC_COBALT_NET_IPV4))
+		return -ENOSYS;
+
+	if ((net_config & tested_config) == 0)
+		return -ENOSYS;
+
+	modules[MODID_DRIVER].name = driver;
+	err = smokey_net_modprobe(MODID_DRIVER, false);
+	if (err < 0)
+		return err;
+
+	err = smokey_net_modprobe(MODID_IPV4, false);
+	if (err < 0)
+		return err;
+
+	err = smokey_net_modprobe(option_to_modid(tested_config), false);
+	if (err < 0)
+		return err;
+
+	fd = smokey_check_errno(open("/dev/rtnet", O_RDWR));
+	if (fd < 0)
+		return fd;
+
+	err = get_info(intf);
+	if (err < 0)
+		goto err;
+
+	if ((cmd.args.info.flags & IFF_UP) == 0) {
+		err = do_up(intf);
+		if (err < 0)
+			goto err;
+		ifup = true;
+	}
+
+	smokey_trace("Waiting for interface %s to be running", intf);
+
+	for (i = 0; i < 30; i++) {
+		err = get_info(intf);
+		if (err < 0)
+			goto err;
+
+		if ((cmd.args.info.flags & (IFF_UP | IFF_RUNNING))
+			== (IFF_UP | IFF_RUNNING))
+			goto running;
+
+		sleep(1);
+	}
+
+	smokey_warning("Interface is not running,"
+		" giving up (cable unplugged?)");
+	err = -ETIMEDOUT;
+	goto err;
+
+running:
+	err = get_info(intf);
+	if (err < 0)
+		goto err;
+
+	if (cmd.args.info.ip_addr == 0) {
+		err = smokey_net_setup_rtcfg_client(intf, net_config);
+		if (err < 0)
+			goto err;
+	}
+
+	if (strcmp(driver, "rt_loopback") == 0) {
+		err  = smokey_check_status(
+			__RT(pthread_create(&loopback_server_tid, NULL,
+						loopback_server,
+						(void *)(long)tested_config)));
+		if (err < 0)
+			goto err;
+		loopback_thread_created = true;
+	}
+
+	switch (peer->sa_family) {
+	case AF_INET:
+		if (in_peer->sin_addr.s_addr == htonl(INADDR_ANY) &&
+			strcmp(driver, "rt_loopback") == 0) {
+			in_peer->sin_addr.s_addr = cmd.args.info.ip_addr;
+			break;
+		}
+
+		/* Fallthrough wanted */
+	case AF_PACKET:
+		err = find_peer(intf, vpeer);
+		if (err < 0)
+			goto err;
+	}
+
+	close(fd);
+	return 0;
+
+  err:
+	close(fd);
+
+	err_teardown = smokey_net_teardown(driver, intf, tested_config);
+	if (err == 0)
+		err = err_teardown;
+
+	return err;
+}
+
+int smokey_net_teardown(const char *driver, const char *intf, int tested_config)
+{
+	int err = 0, tmp;
+
+	if (loopback_thread_created) {
+		void *status;
+
+		pthread_cancel(loopback_server_tid); /* May fail */
+		tmp = smokey_check_errno(
+			pthread_join(loopback_server_tid, &status));
+		if (err == 0)
+			err = tmp;
+		if (err == 0 && status != PTHREAD_CANCELED)
+			err = (long)status;
+	}
+
+	tmp = smokey_check_errno(open("/dev/rtnet", O_RDWR));
+	if (tmp >= 0) {
+		fd = tmp;
+
+		if (strcmp(driver, "rt_loopback")) {
+			tmp = smokey_net_teardown_rtcfg(intf);
+			if (err == 0)
+				err = tmp;
+		}
+
+		if (ifup) {
+			tmp = do_down(intf);
+			if (err == 0)
+				err = tmp;
+		}
+
+		close(fd);
+	} else
+		err = tmp;
+
+	tmp = smokey_net_rmmod(option_to_modid(tested_config));
+	if (err == 0)
+		err = tmp;
+
+	tmp = smokey_net_rmmod(MODID_DRIVER);
+	if (err == 0)
+		err = tmp;
+
+	tmp = smokey_net_rmmod(MODID_IPV4);
+	if (err == 0)
+		err = tmp;
+
+	tmp = smokey_net_rmmod(MODID_RTNET);
+	if (err == 0)
+		err = tmp;
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net.h b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net.h
new file mode 100644
index 0000000..811d90d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net.h
@@ -0,0 +1,55 @@
+/*
+ * Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef SMOKEY_NET_H
+#define SMOKEY_NET_H
+
+#include <time.h>
+#include <sys/socket.h>
+#include <netpacket/packet.h>
+#include <netinet/in.h>
+
+#include <smokey/smokey.h>
+
+#ifndef ETH_P_802_EX1
+#define ETH_P_802_EX1	0x88B5		/* 802.1 Local Experimental 1.  */
+#endif
+
+struct smokey_net_payload {
+	struct timespec ts;
+	unsigned seq;
+};
+
+struct smokey_net_client {
+	const char *name;
+	int option;
+	union {
+		struct sockaddr peer;
+		struct sockaddr_ll ll_peer;
+		struct sockaddr_in in_peer;
+	};
+	socklen_t peer_len;
+
+	int (*create_socket)(struct smokey_net_client *client);
+	int (*prepare)(struct smokey_net_client *client,
+		void *buf, size_t len,
+		const struct smokey_net_payload *payload);
+	int (*extract)(struct smokey_net_client *client,
+		struct smokey_net_payload *payload,
+		const void *buf, size_t len);
+};
+
+int smokey_net_setup(const char *driver, const char *intf, int tested_config,
+		void *vpeer);
+
+int smokey_net_teardown(const char *driver,
+			const char *intf, int tested_config);
+
+int smokey_net_client_run(struct smokey_test *t,
+			struct smokey_net_client *client,
+			int argc, char *const argv[]);
+
+#endif /* SMOKEY_NET_H */
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.c b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.c
new file mode 100644
index 0000000..2e19208
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.c
@@ -0,0 +1,214 @@
+/*
+ * RTnet test server
+ *
+ * Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <getopt.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <netinet/ether.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <rtcfg_chrdev.h>
+#include <sys/cobalt.h>
+#include <smokey/smokey.h>
+#include <xenomai/init.h>
+#include "smokey_net.h"
+#include "smokey_net_server.h"
+
+static const char *intf = "rteth0";
+
+int smokey_net_server_check_inner(const char *file, int line,
+					  const char *msg, int status)
+{
+       if (status >= 0)
+	       return status;
+
+       fprintf(stderr, "FAILED %s: returned error %d - %s\n",
+	       msg, -status, strerror(-status));
+       exit(EXIT_FAILURE);
+}
+
+static int rtnet_rtcfg_setup_server(void)
+{
+	struct rtcfg_cmd cmd;
+	int fd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	cmd.args.server.period    = 1000;
+	cmd.args.server.burstrate = 4;
+	cmd.args.server.heartbeat = 1000;
+	cmd.args.server.threshold = 2;
+	cmd.args.server.flags     = 0;
+
+	check_unix(snprintf(cmd.head.if_name, sizeof(cmd.head.if_name),
+				intf, sizeof(cmd.head.if_name)));
+
+	fd = check_unix(open("/dev/rtnet", O_RDWR));
+
+	check_unix(ioctl(fd, RTCFG_IOC_SERVER, &cmd));
+
+	return fd;
+}
+
+static void
+rtnet_rtcfg_add_client(int fd, const char *hwaddr, const char *ipaddr)
+{
+	struct rtcfg_cmd cmd;
+	struct ether_addr mac;
+	struct in_addr ip;
+
+	fprintf(stderr, "add client %s, mac %s\n", ipaddr, hwaddr);
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	check_unix(snprintf(cmd.head.if_name, sizeof(cmd.head.if_name),
+				intf, sizeof(cmd.head.if_name)));
+
+	if (ether_aton_r(hwaddr, &mac) == 0) {
+		fprintf(stderr, "%s is an invalid mac address\n", hwaddr);
+		exit(EXIT_FAILURE);
+	}
+
+	if (check_unix(inet_aton(ipaddr, &ip)) == 0) {
+		fprintf(stderr, "%s is an invalid ip address\n", ipaddr);
+		exit(EXIT_FAILURE);
+	}
+
+	cmd.args.add.addr_type = RTCFG_ADDR_IP | FLAG_ASSIGN_ADDR_BY_MAC;
+	cmd.args.add.ip_addr = ip.s_addr;
+	cmd.args.add.timeout = 3000;
+	memcpy(cmd.args.add.mac_addr, mac.ether_addr_octet,
+		sizeof(mac.ether_addr_octet));
+
+	check_unix(ioctl(fd, RTCFG_IOC_ADD, &cmd));
+}
+
+static void cleanup(int sig)
+{
+	struct rtcfg_cmd cmd;
+	int fd;
+
+	memset(&cmd, 0, sizeof(cmd));
+
+	check_unix(snprintf(cmd.head.if_name, sizeof(cmd.head.if_name),
+				intf, sizeof(cmd.head.if_name)));
+
+	fd = check_unix(open("/dev/rtnet", O_RDWR));
+
+	check_unix(ioctl(fd, RTCFG_IOC_DETACH, &cmd));
+
+	close(fd);
+
+	signal(sig, SIG_DFL);
+	raise(sig);
+}
+
+void application_usage(void)
+{
+	fprintf(stderr, "%s options [ <interface> ]:\n\n"
+		"Runs server for smokey network tests, on interface named "
+		"<interface>\n"
+		"(rtlo if unspecified)\n\n"
+		"Available options:\n"
+		"-f | --file <file>\t\tAnswers clients from file named <file>"
+		"\n\t(uses standard input if unspecified)\n"
+		"\tWhere every line contains a mac address and an IP address\n",
+		get_program_name());
+}
+
+int main(int argc, char *argv[])
+{
+	int net_config, c, fd, err;
+	FILE *input = stdin;
+
+	check_native(cobalt_corectl(_CC_COBALT_GET_NET_CONFIG,
+					&net_config, sizeof(net_config)));
+
+	for (;;) {
+		int option_index = 0;
+
+		static struct option long_options[] = {
+			{ "help", no_argument,       0, 'h', },
+			{ "file", required_argument, 0, 'f', },
+			{ 0,      0,                 0, 0,   },
+		};
+
+		c = getopt_long(argc, argv, "hf:", long_options, &option_index);
+		if (c == -1)
+			break;
+
+		switch(c) {
+		case 'h':
+			application_usage();
+			exit(EXIT_SUCCESS);
+
+		case 'f':
+			input = fopen(optarg, "r");
+			if (input == NULL) {
+				fprintf(stderr, "fopen(%s): %m\n", optarg);
+				exit(EXIT_FAILURE);
+			}
+			break;
+
+		case '?':
+			application_usage();
+			exit(EXIT_FAILURE);
+		}
+	}
+
+	if (optind < argc) {
+		if (argc - optind > 1) {
+			application_usage();
+			printf("\nOnly one interface argument expected\n");
+			exit(EXIT_FAILURE);
+		}
+
+		intf = argv[optind];
+		if (strcmp(intf, "rtlo") == 0) {
+			application_usage();
+			printf("\nRunning smokey_net_server on rtlo makes no sense\n");
+			exit(EXIT_FAILURE);
+		}
+	}
+
+	if ((net_config & _CC_COBALT_NET_CFG) == 0) {
+		fprintf(stderr, "RTcfg not enabled, aborting\n");
+		exit(EXIT_FAILURE);
+	}
+
+	fprintf(stderr, "Smokey network tests server, using interface %s\n",
+		intf);
+
+	signal(SIGINT, cleanup);
+	signal(SIGTERM, cleanup);
+	signal(SIGHUP, cleanup);
+
+	fd = rtnet_rtcfg_setup_server();
+
+	do {
+		char *mac, *ip;
+
+		err = fscanf(input, "%ms %ms\n", &mac, &ip);
+		if (err == 2) {
+			rtnet_rtcfg_add_client(fd, mac, ip);
+			free(mac);
+			free(ip);
+		}
+	} while (err != EOF);
+
+	close(fd);
+
+	smokey_net_server_loop(net_config);
+	exit(EXIT_SUCCESS);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.h b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.h
new file mode 100644
index 0000000..34d5692
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_common/smokey_net_server.h
@@ -0,0 +1,31 @@
+/*
+ * RTnet test server
+ *
+ * Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#ifndef SMOKEY_NET_CHECK_H
+#define SMOKEY_NET_CHECK_H
+
+#define check_native(expr)                             \
+       smokey_net_server_check_inner(__FILE__, __LINE__, #expr, (expr))
+
+#define check_pthread(expr)                            \
+       smokey_net_server_check_inner(__FILE__, __LINE__, #expr, -(expr))
+
+#define check_unix(expr)						\
+      ({								\
+	       int s = (expr);						\
+	       smokey_net_server_check_inner(__FILE__, __LINE__, #expr, s < 0 ? -errno : s); \
+       })
+
+struct smokey_server;
+
+int smokey_net_server_check_inner(const char *file, int line,
+				     const char *msg, int status);
+
+void smokey_net_server_loop(int net_config);
+
+#endif /* SMOKEY_NET_CHECK_H */
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/Makefile.am
new file mode 100644
index 0000000..257cbbe
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/Makefile.am
@@ -0,0 +1,10 @@
+noinst_LIBRARIES = libnet_packet_dgram.a
+
+libnet_packet_dgram_a_SOURCES = \
+	packet_dgram.c
+
+libnet_packet_dgram_a_CPPFLAGS = \
+	@XENO_USER_CFLAGS@ \
+	-I$(srcdir)/../net_common \
+	-I$(top_srcdir)/include \
+	-I$(top_srcdir)/kernel/drivers/net/stack/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/packet_dgram.c b/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/packet_dgram.c
new file mode 100644
index 0000000..dc716a4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_dgram/packet_dgram.c
@@ -0,0 +1,81 @@
+/*
+ * RTnet AF_PACKET test
+ *
+ * Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <unistd.h>
+#include <net/if.h>
+#include <arpa/inet.h>
+#include <net/ethernet.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <netpacket/packet.h>
+
+#include <sys/cobalt.h>
+#include <smokey/smokey.h>
+#include "smokey_net.h"
+
+smokey_test_plugin(net_packet_dgram,
+	SMOKEY_ARGLIST(
+		SMOKEY_STRING(rtnet_driver),
+		SMOKEY_STRING(rtnet_interface),
+		SMOKEY_INT(rtnet_rate),
+		SMOKEY_INT(rtnet_duration),
+	),
+	"Check RTnet driver, using cooked packets, measuring round trip time\n"
+	"\tand packet losses,\n"
+	"\tthe rtnet_driver parameter allows choosing the network driver\n"
+	"\tthe rtnet_interface parameter allows choosing the network interface\n"
+	"\tthe rtnet_rate parameter allows choosing the packet rate\n"
+	"\tthe rtnet_duration parameter allows choosing the test duration\n"
+	"\tA server on the network must run the smokey_rtnet_server program."
+);
+
+static int
+packet_dgram_create_socket(struct smokey_net_client *client)
+{
+	return smokey_check_errno(
+		__RT(socket(PF_PACKET, SOCK_DGRAM, htons(ETH_P_802_EX1 + 1))));
+}
+
+static int
+packet_dgram_prepare(struct smokey_net_client *client,
+		void *buf, size_t len, const struct smokey_net_payload *payload)
+{
+	if (sizeof(*payload) < len)
+		len = sizeof(*payload);
+	memcpy(buf, payload, len);
+	return len;
+}
+
+static int
+packet_dgram_extract(struct smokey_net_client *client,
+		struct smokey_net_payload *payload, const void *buf, size_t len)
+{
+	if (sizeof(*payload) < len)
+		len = sizeof(*payload);
+	memcpy(payload, buf, len);
+	return len;
+}
+
+static int
+run_net_packet_dgram(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct smokey_net_client client = {
+		.name = "cooked packets",
+		.option = _CC_COBALT_NET_AF_PACKET,
+		.create_socket = &packet_dgram_create_socket,
+		.prepare = &packet_dgram_prepare,
+		.extract = &packet_dgram_extract,
+	};
+
+	memset(&client.ll_peer, '\0', sizeof(client.ll_peer));
+	client.ll_peer.sll_family = AF_PACKET;
+	client.ll_peer.sll_protocol = htons(ETH_P_802_EX1);
+	client.peer_len = sizeof(client.ll_peer);
+
+	return smokey_net_client_run(t, &client, argc, argv);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/Makefile.am
new file mode 100644
index 0000000..6a6372a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/Makefile.am
@@ -0,0 +1,10 @@
+noinst_LIBRARIES = libnet_packet_raw.a
+
+libnet_packet_raw_a_SOURCES = \
+	packet_raw.c
+
+libnet_packet_raw_a_CPPFLAGS = \
+	@XENO_USER_CFLAGS@ \
+	-I$(srcdir)/../net_common \
+	-I$(top_srcdir)/include \
+	-I$(top_srcdir)/kernel/drivers/net/stack/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/packet_raw.c b/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/packet_raw.c
new file mode 100644
index 0000000..a7302ca
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_packet_raw/packet_raw.c
@@ -0,0 +1,122 @@
+/*
+ * RTnet AF_PACKET test
+ *
+ * Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <unistd.h>
+#include <net/if.h>
+#include <arpa/inet.h>
+#include <net/ethernet.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <netpacket/packet.h>
+
+#include <sys/cobalt.h>
+#include <smokey/smokey.h>
+#include "smokey_net.h"
+
+smokey_test_plugin(net_packet_raw,
+	SMOKEY_ARGLIST(
+		SMOKEY_STRING(rtnet_driver),
+		SMOKEY_STRING(rtnet_interface),
+		SMOKEY_INT(rtnet_rate),
+		SMOKEY_INT(rtnet_duration),
+	),
+	"Check RTnet driver, using raw packets, measuring round trip time\n"
+	"\tand packet losses,\n"
+	"\tthe rtnet_driver parameter allows choosing the network driver\n"
+	"\tthe rtnet_interface parameter allows choosing the network interface\n"
+	"\tthe rtnet_rate parameter allows choosing the packet rate\n"
+	"\tthe rtnet_duration parameter allows choosing the test duration\n"
+	"\tA server on the network must run the smokey_rtnet_server program."
+);
+
+struct raw_packet_client {
+	struct smokey_net_client base;
+	struct ethhdr header;
+};
+
+static int
+packet_raw_create_socket(struct smokey_net_client *bclient)
+{
+	struct raw_packet_client *client = (struct raw_packet_client *)bclient;
+	struct ifreq ifr;
+	int err, sock;
+
+	sock = smokey_check_errno(
+		__RT(socket(PF_PACKET, SOCK_RAW, htons(ETH_P_802_EX1 + 1))));
+	if (sock < 0)
+		return sock;
+
+	memcpy(client->header.h_dest, bclient->ll_peer.sll_addr, 6);
+	ifr.ifr_ifindex = bclient->ll_peer.sll_ifindex;
+	err = smokey_check_errno(
+		__RT(ioctl(sock, SIOCGIFNAME, &ifr)));
+	if (err < 0)
+		goto err;
+	err = smokey_check_errno(
+		__RT(ioctl(sock, SIOCGIFHWADDR, &ifr)));
+	if (err < 0)
+		goto err;
+	memcpy(client->header.h_source, ifr.ifr_hwaddr.sa_data, 6);
+	client->header.h_proto = htons(ETH_P_802_EX1);
+
+	return sock;
+
+  err:
+	__RT(close(sock));
+	return err;
+}
+
+static int
+packet_raw_prepare(struct smokey_net_client *bclient,
+		void *buf, size_t len, const struct smokey_net_payload *payload)
+{
+	struct raw_packet_client *client = (struct raw_packet_client *)bclient;
+
+	if (len < sizeof(client->header) + sizeof(*payload))
+		return -EINVAL;
+
+	len = sizeof(client->header) + sizeof(*payload);
+	memcpy(buf, &client->header, sizeof(client->header));
+	memcpy(buf + sizeof(client->header), payload, sizeof(*payload));
+	return len;
+}
+
+static int
+packet_raw_extract(struct smokey_net_client *bclient,
+		struct smokey_net_payload *payload, const void *buf, size_t len)
+{
+	struct raw_packet_client *client = (struct raw_packet_client *)bclient;
+
+	if (len < sizeof(client->header) + sizeof(*payload))
+		return -EINVAL;
+
+	len = sizeof(client->header) + sizeof(*payload);
+	memcpy(payload, buf + sizeof(client->header), sizeof(*payload));
+	return len;
+}
+
+static int
+run_net_packet_raw(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct raw_packet_client client = {
+		.base = {
+			.name = "raw packets",
+			.option = _CC_COBALT_NET_AF_PACKET,
+			.create_socket = &packet_raw_create_socket,
+			.prepare = &packet_raw_prepare,
+			.extract = &packet_raw_extract,
+		},
+	};
+	struct smokey_net_client *bclient = &client.base;
+
+	memset(&bclient->ll_peer, '\0', sizeof(bclient->ll_peer));
+	bclient->ll_peer.sll_family = AF_PACKET;
+	bclient->peer_len = sizeof(bclient->ll_peer);
+
+	return smokey_net_client_run(t, bclient, argc, argv);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/Makefile.am
new file mode 100644
index 0000000..576a00c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/Makefile.am
@@ -0,0 +1,10 @@
+noinst_LIBRARIES = libnet_udp.a
+
+libnet_udp_a_SOURCES = \
+	udp.c
+
+libnet_udp_a_CPPFLAGS = \
+	@XENO_USER_CFLAGS@ \
+	-I$(srcdir)/../net_common \
+	-I$(top_srcdir)/include \
+	-I$(top_srcdir)/kernel/drivers/net/stack/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/udp.c b/kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/udp.c
new file mode 100644
index 0000000..8ba89c4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/net_udp/udp.c
@@ -0,0 +1,75 @@
+/*
+ * RTnet UDP test
+ *
+ * Copyright (C) 2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * SPDX-License-Identifier: MIT
+ */
+
+#include <netinet/in.h>
+
+#include <sys/cobalt.h>
+#include <smokey/smokey.h>
+#include "smokey_net.h"
+
+smokey_test_plugin(net_udp,
+	SMOKEY_ARGLIST(
+		SMOKEY_STRING(rtnet_driver),
+		SMOKEY_STRING(rtnet_interface),
+		SMOKEY_INT(rtnet_rate),
+		SMOKEY_INT(rtnet_duration),
+	),
+	"Check RTnet driver, using UDP packets, measuring round trip time\n"
+	"\tand packet losses,\n"
+	"\tthe rtnet_driver parameter allows choosing the network driver\n"
+	"\tthe rtnet_interface parameter allows choosing the network interface\n"
+	"\tthe rtnet_rate parameter allows choosing the packet rate\n"
+	"\tthe rtnet_duration parameter allows choosing the test duration\n"
+	"\tA server on the network must run the smokey_rtnet_server program."
+);
+
+static int
+udp_create_socket(struct smokey_net_client *client)
+{
+	return smokey_check_errno(__RT(socket(PF_INET, SOCK_DGRAM, 0)));
+}
+
+static int
+udp_prepare(struct smokey_net_client *client,
+		void *buf, size_t len, const struct smokey_net_payload *payload)
+{
+	if (sizeof(*payload) < len)
+		len = sizeof(*payload);
+	memcpy(buf, payload, len);
+	return len;
+}
+
+static int
+udp_extract(struct smokey_net_client *client,
+		struct smokey_net_payload *payload, const void *buf, size_t len)
+{
+	if (sizeof(*payload) < len)
+		len = sizeof(*payload);
+	memcpy(payload, buf, len);
+	return len;
+}
+
+static int
+run_net_udp(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct smokey_net_client client = {
+		.name = "UDP",
+		.option = _CC_COBALT_NET_UDP,
+		.create_socket = &udp_create_socket,
+		.prepare = &udp_prepare,
+		.extract = &udp_extract,
+	};
+
+	memset(&client.in_peer, '\0', sizeof(client.in_peer));
+	client.in_peer.sin_family = AF_INET;
+	client.in_peer.sin_port = htons(7); /* UDP echo port */
+	client.in_peer.sin_addr.s_addr = htonl(INADDR_ANY);
+	client.peer_len = sizeof(client.in_peer);
+
+	return smokey_net_client_run(t, &client, argc, argv);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/Makefile.am
new file mode 100644
index 0000000..e5dd207
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libposix-clock.a
+
+libposix_clock_a_SOURCES = posix-clock.c
+
+libposix_clock_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)		\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/posix-clock.c b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/posix-clock.c
new file mode 100644
index 0000000..3a638d4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-clock/posix-clock.c
@@ -0,0 +1,458 @@
+/*
+ * Copyright (C) 2014 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <assert.h>
+#include <unistd.h>
+#include <time.h>
+#include <smokey/smokey.h>
+#include <sys/timerfd.h>
+
+smokey_test_plugin(posix_clock,
+		   SMOKEY_NOARGS,
+		   "Check POSIX clock services."
+);
+
+static int clock_increase_before_oneshot_timer_first_tick(void)
+{
+	unsigned long long ticks;
+	struct itimerspec timer;
+	struct timespec now;
+	int t, ret;
+
+	smokey_trace(__func__);
+	
+	t = smokey_check_errno(timerfd_create(CLOCK_REALTIME, 0));
+	if (t < 0)
+		return t;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+
+	timer.it_value = now;
+	timer.it_value.tv_sec++;
+	timer.it_interval.tv_sec = 0;
+	timer.it_interval.tv_nsec = 0;
+
+	ret = smokey_check_errno(timerfd_settime(t, TFD_TIMER_ABSTIME, &timer, NULL));
+	if (ret)
+		return ret;
+
+	now.tv_sec += 5;
+
+	ret = smokey_check_errno(clock_settime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+
+	timer.it_value = now;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+	
+	if (!smokey_assert(now.tv_sec * 1000000000ULL + now.tv_nsec -
+			   (timer.it_value.tv_sec * 1000000000ULL + timer.it_value.tv_nsec)
+			   < 1000000000))
+		return -EINVAL;
+	
+	return smokey_check_errno(close(t));
+}
+
+static int clock_increase_before_periodic_timer_first_tick(void)
+{
+	unsigned long long ticks;
+	struct itimerspec timer;
+	struct timespec now;
+	int t, ret;
+
+	smokey_trace(__func__);
+	
+	t = smokey_check_errno(timerfd_create(CLOCK_REALTIME, 0));
+	if (t < 0)
+		return t;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+	
+	timer.it_value = now;
+	timer.it_value.tv_sec++;
+	timer.it_interval.tv_sec = 1;
+	timer.it_interval.tv_nsec = 0;
+	ret = smokey_check_errno(timerfd_settime(t, TFD_TIMER_ABSTIME, &timer, NULL));
+	if (ret)
+		return ret;
+
+	now.tv_sec += 5;
+
+	ret = smokey_check_errno(clock_settime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+	
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+	
+	if (!smokey_assert(ticks == 5))
+		return -EINVAL;
+	
+	timer.it_value = now;
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+	
+	if (!smokey_assert(now.tv_sec * 1000000000ULL + now.tv_nsec -
+			   (timer.it_value.tv_sec * 1000000000ULL + timer.it_value.tv_nsec)
+			   < 1000000000))
+		return -EINVAL;
+	
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+	
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+	
+	return smokey_check_errno(close(t));
+}
+
+static int clock_increase_after_periodic_timer_first_tick(void)
+{
+	unsigned long long ticks;
+	struct itimerspec timer;
+	struct timespec now;
+	int t, ret;
+
+	smokey_trace(__func__);
+	
+	t = smokey_check_errno(timerfd_create(CLOCK_REALTIME, 0));
+	if (t < 0)
+		return t;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+	
+	timer.it_value = now;
+	timer.it_value.tv_sec++;
+	timer.it_interval.tv_sec = 1;
+	timer.it_interval.tv_nsec = 0;
+
+	ret = smokey_check_errno(timerfd_settime(t, TFD_TIMER_ABSTIME, &timer, NULL));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+	
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+	
+	ret = smokey_check_errno(clock_gettime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+	
+	now.tv_sec += 5;
+
+	ret = smokey_check_errno(clock_settime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+	
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+
+	if (!smokey_assert(ticks == 5))
+		return -EINVAL;
+	
+	timer.it_value = now;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+
+	if (!smokey_assert(now.tv_sec * 1000000000ULL + now.tv_nsec -
+		(timer.it_value.tv_sec * 1000000000ULL + timer.it_value.tv_nsec)
+			   < 1000000000))
+		return -EINVAL;
+
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+
+	return smokey_check_errno(close(t));
+}
+
+static int clock_decrease_before_oneshot_timer_first_tick(void)
+{
+	unsigned long long ticks;
+	struct itimerspec timer;
+	struct timespec now;
+	long long diff;
+	int t, ret;
+
+	smokey_trace(__func__);
+	
+	t = smokey_check_errno(timerfd_create(CLOCK_REALTIME, 0));
+	if (t < 0)
+		return t;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+
+	timer.it_value = now;
+	timer.it_value.tv_sec++;
+	timer.it_interval.tv_sec = 0;
+	timer.it_interval.tv_nsec = 0;
+
+	ret = smokey_check_errno(timerfd_settime(t, TFD_TIMER_ABSTIME, &timer, NULL));
+	if (ret)
+		return ret;
+
+	now.tv_sec -= 5;
+
+	ret = smokey_check_errno(clock_settime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+	
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+
+	timer.it_value = now;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+
+	diff = now.tv_sec * 1000000000ULL + now.tv_nsec -
+		(timer.it_value.tv_sec * 1000000000ULL + timer.it_value.tv_nsec);
+
+	if (!smokey_assert(diff >= 5500000000LL && diff <= 6500000000LL))
+		return -EINVAL;
+	
+	return smokey_check_errno(close(t));
+}
+
+static int clock_decrease_before_periodic_timer_first_tick(void)
+{
+	unsigned long long ticks;
+	struct itimerspec timer;
+	struct timespec now;
+	long long diff;
+	int t, ret;
+
+	smokey_trace(__func__);
+	
+	t = smokey_check_errno(timerfd_create(CLOCK_REALTIME, 0));
+	if (t < 0)
+		return t;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+	
+	timer.it_value = now;
+	timer.it_value.tv_sec++;
+	timer.it_interval.tv_sec = 1;
+	timer.it_interval.tv_nsec = 0;
+
+	ret = smokey_check_errno(timerfd_settime(t, TFD_TIMER_ABSTIME, &timer, NULL));
+	if (ret)
+		return ret;
+
+	now.tv_sec -= 5;
+
+	ret = smokey_check_errno(clock_settime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+
+	timer.it_value = now;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+
+	diff = now.tv_sec * 1000000000ULL + now.tv_nsec -
+		(timer.it_value.tv_sec * 1000000000ULL + timer.it_value.tv_nsec);
+
+	if (!smokey_assert(diff >= 5500000000LL && diff <= 6500000000LL))
+		return -EINVAL;
+	
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+	
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+
+	return smokey_check_errno(close(t));
+}
+
+static int clock_decrease_after_periodic_timer_first_tick(void)
+{
+	unsigned long long ticks;
+	struct itimerspec timer;
+	struct timespec now;
+	long long diff;
+	int t, ret;
+
+	smokey_trace(__func__);
+
+	t = smokey_check_errno(timerfd_create(CLOCK_REALTIME, 0));
+	if (t < 0)
+		return t;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+	
+	timer.it_value = now;
+	timer.it_value.tv_sec++;
+	timer.it_interval.tv_sec = 1;
+	timer.it_interval.tv_nsec = 0;
+
+	ret = smokey_check_errno(timerfd_settime(t, TFD_TIMER_ABSTIME, &timer, NULL));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+
+	now.tv_sec -= 5;
+
+	ret = smokey_check_errno(clock_settime(CLOCK_REALTIME, &now));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+
+	timer.it_value = now;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &now));
+	if (ret)
+		return ret;
+
+	diff = now.tv_sec * 1000000000ULL + now.tv_nsec -
+		(timer.it_value.tv_sec * 1000000000ULL + timer.it_value.tv_nsec);
+	if (!smokey_assert(diff < 2000000000))
+		return -EINVAL;
+	
+	ret = smokey_check_errno(read(t, &ticks, sizeof(ticks)));
+	if (ret < 0)
+		return ret;
+
+	if (!smokey_assert(ticks == 1))
+		return -EINVAL;
+
+	return smokey_check_errno(close(t));
+}
+
+static int run_posix_clock(struct smokey_test *t, int argc, char *const argv[])
+{
+	int ret;
+
+	ret = clock_increase_before_oneshot_timer_first_tick();
+	if (ret)
+		return ret;
+	
+	ret = clock_increase_before_periodic_timer_first_tick();
+	if (ret)
+		return ret;
+
+	ret = clock_increase_after_periodic_timer_first_tick();
+	if (ret)
+		return ret;
+
+	ret = clock_decrease_before_oneshot_timer_first_tick();
+	if (ret)
+		return ret;
+
+	ret = clock_decrease_before_periodic_timer_first_tick();
+	if (ret)
+		return ret;
+
+	return clock_decrease_after_periodic_timer_first_tick();
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/Makefile.am
new file mode 100644
index 0000000..4f911e2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libposix-cond.a
+
+libposix_cond_a_SOURCES = posix-cond.c
+
+libposix_cond_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)		\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/posix-cond.c b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/posix-cond.c
new file mode 100644
index 0000000..e49c447
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-cond/posix-cond.c
@@ -0,0 +1,788 @@
+/*
+ * Functional testing of the condvar implementation for Cobalt.
+ *
+ * Copyright (C) Gilles Chanteperdrix  <gilles.chanteperdrix@xenomai.org>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <string.h>
+#include <limits.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(posix_cond,
+		   SMOKEY_NOARGS,
+		   "Check POSIX condition variable services"
+);
+
+#define NS_PER_MS (1000000)
+#define NS_PER_S (1000000000)
+
+static unsigned long long timer_read(void)
+{
+	struct timespec ts;
+
+	clock_gettime(CLOCK_REALTIME, &ts);
+	return (unsigned long long)ts.tv_sec * NS_PER_S + ts.tv_nsec;
+}
+
+static inline unsigned long long timer_get_tsc(void)
+{
+	return clockobj_get_tsc();
+}
+
+static inline unsigned long long timer_tsc2ns(unsigned long long tsc)
+{
+	return clockobj_tsc_to_ns(tsc);
+}
+
+static void check_inner(const char *file, int line, const char *fn, const char *msg, int status, int expected)
+{
+	if (status == expected)
+		return;
+
+	fprintf(stderr, "FAILED %s %s: returned %d instead of %d - %s\n",
+		fn, msg, status, expected, strerror(-status));
+	exit(EXIT_FAILURE);
+}
+#define check(msg, status, expected) \
+	check_inner(__FILE__, __LINE__, __func__, msg, status, expected)
+
+#define check_unix(msg, status, expected)				\
+	({								\
+		int s = (status);					\
+		check_inner(__FILE__, __LINE__, __func__, msg, s < 0 ? -errno : s, expected); \
+	})
+
+static void check_sleep_inner(const char *fn,
+		       const char *prefix, unsigned long long start)
+{
+	unsigned long long diff = timer_tsc2ns(timer_get_tsc() - start);
+
+	if (diff < 10 * NS_PER_MS) {
+		fprintf(stderr, "%s waited %Ld.%03u us\n",
+			prefix, diff / 1000, (unsigned)(diff % 1000));
+		exit(EXIT_FAILURE);
+	}
+}
+#define check_sleep(prefix, start) \
+	check_sleep_inner(__func__, prefix, start)
+
+static int mutex_init(pthread_mutex_t *mutex, int type, int proto)
+{
+	pthread_mutexattr_t mattr;
+	int err;
+
+	pthread_mutexattr_init(&mattr);
+	pthread_mutexattr_settype(&mattr, type);
+	err = pthread_mutexattr_setprotocol(&mattr, proto);
+	if (err)
+		goto out;
+	if (proto == PTHREAD_PRIO_PROTECT)
+		pthread_mutexattr_setprioceiling(&mattr, 3);
+
+	err = pthread_mutex_init(mutex, &mattr);
+  out:
+	pthread_mutexattr_destroy(&mattr);
+
+	return -err;
+}
+#define mutex_lock(mutex) (-pthread_mutex_lock(mutex))
+#define mutex_unlock(mutex) (-pthread_mutex_unlock(mutex))
+#define mutex_destroy(mutex) (-pthread_mutex_destroy(mutex))
+
+static int cond_init(pthread_cond_t *cond, int absolute)
+{
+	pthread_condattr_t cattr;
+	int ret;
+
+	pthread_condattr_init(&cattr);
+	ret = pthread_condattr_setclock(&cattr,
+					absolute ? CLOCK_REALTIME : CLOCK_MONOTONIC);
+	if (ret) {
+		pthread_condattr_destroy(&cattr);
+		return ENOSYS;
+	}
+	ret = pthread_cond_init(cond, &cattr);
+	pthread_condattr_destroy(&cattr);
+
+	return -ret;
+}
+#define cond_signal(cond) (-pthread_cond_signal(cond))
+
+static int cond_wait(pthread_cond_t *cond, pthread_mutex_t *mutex, unsigned long long ns)
+{
+	struct timespec ts;
+
+	if (ns == 0)
+		return -pthread_cond_wait(cond, mutex);
+
+	clock_gettime(CLOCK_MONOTONIC, &ts);
+	ns += ts.tv_nsec;
+	ts.tv_sec += ns / NS_PER_S;
+	ts.tv_nsec = ns % NS_PER_S;
+
+	return -pthread_cond_timedwait(cond, mutex, &ts);
+}
+
+static int cond_wait_until(pthread_cond_t *cond, pthread_mutex_t *mutex, unsigned long long date)
+{
+	struct timespec ts = {
+		.tv_sec = date / NS_PER_S,
+		.tv_nsec = date % NS_PER_S,
+	};
+
+	return -pthread_cond_timedwait(cond, mutex, &ts);
+}
+#define cond_destroy(cond) (-pthread_cond_destroy(cond))
+
+static int thread_msleep(unsigned ms)
+{
+	struct timespec ts = {
+		.tv_sec = (ms * NS_PER_MS) / NS_PER_S,
+		.tv_nsec = (ms * NS_PER_MS) % NS_PER_S,
+	};
+
+	return -nanosleep(&ts, NULL);
+}
+
+struct thread_startup {
+	sem_t ready;
+	void *(*handler)(void *);
+	void *cookie;
+};
+
+static void *thread_trampoline(void *arg)
+{
+	struct thread_startup *startup = arg;
+
+	sem_post(&startup->ready);
+
+	return startup->handler(startup->cookie);
+}
+
+static int thread_spawn(pthread_t *thread, int prio,
+			void *(*handler)(void *cookie), void *cookie)
+{
+	struct thread_startup startup;
+	struct sched_param param;
+	pthread_attr_t tattr;
+	int err;
+
+	sem_init(&startup.ready, 0, 0);
+	startup.handler = handler;
+	startup.cookie = cookie;
+
+	pthread_attr_init(&tattr);
+	pthread_attr_setinheritsched(&tattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&tattr, SCHED_FIFO);
+	param.sched_priority = prio;
+	pthread_attr_setschedparam(&tattr, &param);
+	pthread_attr_setdetachstate(&tattr, PTHREAD_CREATE_JOINABLE);
+
+	err = pthread_create(thread, &tattr, thread_trampoline, &startup);
+
+	check("wait startup", sem_wait(&startup.ready), 0);
+
+	pthread_attr_destroy(&tattr);
+	sem_destroy(&startup.ready);
+
+	return -err;
+}
+#define thread_yield() sched_yield()
+#define thread_kill(thread, sig) (-__STD(pthread_kill(thread, sig)))
+#define thread_self() pthread_self()
+#define thread_join(thread) (-pthread_join(thread, NULL))
+
+struct cond_mutex {
+	pthread_mutex_t *mutex;
+	pthread_cond_t *cond;
+	pthread_t tid;
+};
+
+static void *cond_signaler(void *cookie)
+{
+	unsigned long long start;
+	struct cond_mutex *cm = cookie;
+
+	start = timer_get_tsc();
+	check("mutex_lock", mutex_lock(cm->mutex), 0);
+	check_sleep("mutex_lock", start);
+	thread_msleep(10);
+	check("cond_signal", cond_signal(cm->cond), 0);
+	check("mutex_unlock", mutex_unlock(cm->mutex), 0);
+
+	return NULL;
+}
+
+static void autoinit_simple_conddestroy(void)
+{
+	pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+	pthread_cond_t cond2 = PTHREAD_COND_INITIALIZER;
+	unsigned int invalmagic = ~0x86860505; // ~COBALT_COND_MAGIC
+
+	memcpy((char *)&cond2 + sizeof(cond2) - sizeof(invalmagic),
+		&invalmagic, sizeof(invalmagic));
+
+	smokey_trace("%s", __func__);
+	check("cond_destroy", cond_destroy(&cond), 0);
+	check("cond_destroy invalid", cond_destroy(&cond2), -EINVAL);
+}
+
+static void autoinit_simple_condwait(void)
+{
+	pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+	};
+	pthread_t cond_signaler_tid;
+
+	smokey_trace("%s", __func__);
+
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+	check("thread_spawn",
+	      thread_spawn(&cond_signaler_tid, 2, cond_signaler, &cm), 0);
+	thread_msleep(11);
+
+	start = timer_get_tsc();
+	check("cond_wait", cond_wait(&cond, &mutex, 0), 0);
+	check_sleep("cond_wait", start);
+	thread_msleep(10);
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(cond_signaler_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void simple_condwait(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+	};
+	pthread_t cond_signaler_tid;
+
+	smokey_trace("%s", __func__);
+
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 0), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+	check("thread_spawn",
+	      thread_spawn(&cond_signaler_tid, 2, cond_signaler, &cm), 0);
+	thread_msleep(11);
+
+	start = timer_get_tsc();
+	check("cond_wait", cond_wait(&cond, &mutex, 0), 0);
+	check_sleep("cond_wait", start);
+	thread_msleep(10);
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(cond_signaler_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void relative_condwait(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+
+	smokey_trace("%s", __func__);
+
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 0), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+
+	start = timer_get_tsc();
+	check("cond_wait",
+	      cond_wait(&cond, &mutex, 10 * NS_PER_MS), -ETIMEDOUT);
+	check_sleep("cond_wait", start);
+	thread_msleep(10);
+
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void autoinit_absolute_condwait(void)
+{
+	pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+	unsigned long long start;
+	pthread_mutex_t mutex;
+
+	smokey_trace("%s", __func__);
+
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+
+	start = timer_get_tsc();
+	check("cond_wait",
+	      cond_wait_until(&cond, &mutex, timer_read() + 10 * NS_PER_MS),
+	      -ETIMEDOUT);
+	check_sleep("cond_wait", start);
+
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void absolute_condwait(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+
+	smokey_trace("%s", __func__);
+
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 1), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+
+	start = timer_get_tsc();
+	check("cond_wait",
+	      cond_wait_until(&cond, &mutex, timer_read() + 10 * NS_PER_MS),
+	      -ETIMEDOUT);
+	check_sleep("cond_wait", start);
+
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void *cond_killer(void *cookie)
+{
+	unsigned long long start;
+	struct cond_mutex *cm = cookie;
+
+	start = timer_get_tsc();
+	check("mutex_lock", mutex_lock(cm->mutex), 0);
+	check_sleep("mutex_lock", start);
+	thread_msleep(10);
+	check("thread_kill", thread_kill(cm->tid, SIGRTMIN), 0);
+	check("mutex_unlock", mutex_unlock(cm->mutex), 0);
+
+	return NULL;
+}
+
+static volatile int sig_seen;
+
+static void sighandler(int sig)
+{
+	++sig_seen;
+}
+
+static void sig_norestart_condwait(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+		.tid = thread_self(),
+	};
+	pthread_t cond_killer_tid;
+	struct sigaction sa = {
+		.sa_handler = sighandler,
+		.sa_flags = 0,
+	};
+	sigemptyset(&sa.sa_mask);
+
+	smokey_trace("%s", __func__);
+
+	check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0);
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 0), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+	check("thread_spawn",
+	      thread_spawn(&cond_killer_tid, 2, cond_killer, &cm), 0);
+	thread_msleep(11);
+
+	start = timer_get_tsc();
+	sig_seen = 0;
+	check("cond_wait", cond_wait(&cond, &mutex, 0), 0);
+	check_sleep("cond_wait", start);
+	check("sig_seen", sig_seen, 1);
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(cond_killer_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void sig_restart_condwait(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+		.tid = thread_self(),
+	};
+	pthread_t cond_killer_tid;
+	struct sigaction sa = {
+		.sa_handler = sighandler,
+		.sa_flags = 0,
+	};
+	sigemptyset(&sa.sa_mask);
+
+	smokey_trace("%s", __func__);
+
+	check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0);
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 0), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+	check("thread_spawn",
+	      thread_spawn(&cond_killer_tid, 2, cond_killer, &cm), 0);
+	thread_msleep(11);
+
+	start = timer_get_tsc();
+	sig_seen = 0;
+	check("cond_wait", cond_wait(&cond, &mutex, 0), 0);
+	check_sleep("cond_wait", start);
+	check("sig_seen", sig_seen, 1);
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(cond_killer_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void *mutex_killer(void *cookie)
+{
+	unsigned long long start;
+	struct cond_mutex *cm = cookie;
+
+	start = timer_get_tsc();
+	check("mutex_lock", mutex_lock(cm->mutex), 0);
+	check_sleep("mutex_lock", start);
+	check("cond_signal", cond_signal(cm->cond), 0);
+	thread_msleep(10);
+	check("thread_kill", thread_kill(cm->tid, SIGRTMIN), 0);
+	check("mutex_unlock", mutex_unlock(cm->mutex), 0);
+
+	return NULL;
+}
+
+static void sig_norestart_condwait_mutex(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+		.tid = thread_self(),
+	};
+	pthread_t mutex_killer_tid;
+	struct sigaction sa = {
+		.sa_handler = sighandler,
+		.sa_flags = 0,
+	};
+	sigemptyset(&sa.sa_mask);
+
+	smokey_trace("%s", __func__);
+
+	check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0);
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 0), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+	check("thread_spawn",
+	      thread_spawn(&mutex_killer_tid, 2, mutex_killer, &cm), 0);
+	thread_msleep(11);
+
+	sig_seen = 0;
+	start = timer_get_tsc();
+	check("cond_wait", cond_wait(&cond, &mutex, 0), 0);
+	check_sleep("cond_wait", start);
+	check("sig_seen", sig_seen, 1);
+	thread_msleep(10);
+
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(mutex_killer_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void sig_restart_condwait_mutex(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+		.tid = thread_self(),
+	};
+	pthread_t mutex_killer_tid;
+	struct sigaction sa = {
+		.sa_handler = sighandler,
+		.sa_flags = SA_RESTART,
+	};
+	sigemptyset(&sa.sa_mask);
+
+	smokey_trace("%s", __func__);
+
+	check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0);
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 0), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+	check("thread_spawn",
+	      thread_spawn(&mutex_killer_tid, 2, mutex_killer, &cm), 0);
+	thread_msleep(11);
+
+	sig_seen = 0;
+	start = timer_get_tsc();
+
+	check("cond_wait", cond_wait(&cond, &mutex, 0), 0);
+	check_sleep("cond_wait", start);
+	thread_msleep(10);
+
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(mutex_killer_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void *double_killer(void *cookie)
+{
+	unsigned long long start;
+	struct cond_mutex *cm = cookie;
+
+	start = timer_get_tsc();
+	check("mutex_lock", mutex_lock(cm->mutex), 0);
+	check_sleep("mutex_lock", start);
+	check("thread_kill 1", thread_kill(cm->tid, SIGRTMIN), 0);
+	thread_msleep(10);
+	check("thread_kill 2", thread_kill(cm->tid, SIGRTMIN), 0);
+	check("mutex_unlock", mutex_unlock(cm->mutex), 0);
+
+	return NULL;
+}
+
+static void sig_norestart_double(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+		.tid = thread_self(),
+	};
+	pthread_t double_killer_tid;
+	struct sigaction sa = {
+		.sa_handler = sighandler,
+		.sa_flags = 0,
+	};
+	sigemptyset(&sa.sa_mask);
+
+	smokey_trace("%s", __func__);
+
+	check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0);
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 0), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+	check("thread_spawn",
+	      thread_spawn(&double_killer_tid, 2, double_killer, &cm), 0);
+	thread_msleep(11);
+
+	sig_seen = 0;
+	start = timer_get_tsc();
+	check("cond_wait", cond_wait(&cond, &mutex, 0), 0);
+	check_sleep("cond_wait", start);
+	check("sig_seen", sig_seen, 2);
+	thread_msleep(10);
+
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(double_killer_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void sig_restart_double(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+		.tid = thread_self(),
+	};
+	pthread_t double_killer_tid;
+	struct sigaction sa = {
+		.sa_handler = sighandler,
+		.sa_flags = SA_RESTART,
+	};
+	sigemptyset(&sa.sa_mask);
+
+	smokey_trace("%s", __func__);
+
+	check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0);
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 0), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+	check("thread_spawn",
+	      thread_spawn(&double_killer_tid, 2, double_killer, &cm), 0);
+	thread_msleep(11);
+
+	sig_seen = 0;
+	start = timer_get_tsc();
+
+	check("cond_wait", cond_wait(&cond, &mutex, 0), 0);
+	check_sleep("cond_wait", start);
+	check("sig_seen", sig_seen, 2);
+	thread_msleep(10);
+
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(double_killer_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void *cond_destroyer(void *cookie)
+{
+	unsigned long long start;
+	struct cond_mutex *cm = cookie;
+
+	start = timer_get_tsc();
+	check("mutex_lock", mutex_lock(cm->mutex), 0);
+	check_sleep("mutex_lock", start);
+	thread_msleep(10);
+	check("cond_destroy", cond_destroy(cm->cond), -EBUSY);
+	check("mutex_unlock", mutex_unlock(cm->mutex), 0);
+
+	return NULL;
+}
+
+static void cond_destroy_whilewait(void)
+{
+	unsigned long long start;
+	pthread_mutex_t mutex;
+	pthread_cond_t cond;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+		.tid = thread_self(),
+	};
+	pthread_t cond_destroyer_tid;
+	struct sigaction sa = {
+		.sa_handler = sighandler,
+		.sa_flags = SA_RESTART,
+	};
+	sigemptyset(&sa.sa_mask);
+
+	smokey_trace("%s", __func__);
+
+	check_unix("sigaction", sigaction(SIGRTMIN, &sa, NULL), 0);
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_NONE), 0);
+	check("cond_init", cond_init(&cond, 0), 0);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+	check("thread_spawn",
+	      thread_spawn(&cond_destroyer_tid, 2, cond_destroyer, &cm), 0);
+	thread_msleep(11);
+
+	start = timer_get_tsc();
+
+	check("cond_wait", cond_wait(&cond, &mutex, 10 * NS_PER_MS), -ETIMEDOUT);
+	check_sleep("cond_wait", start);
+	thread_msleep(10);
+
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(cond_destroyer_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+static void *cond_delayed_signaler(void *cookie)
+{
+	struct cond_mutex *cm = cookie;
+
+	thread_msleep(10);
+
+	check("mutex_lock", mutex_lock(cm->mutex), 0);
+	check("cond_signal", cond_signal(cm->cond), 0);
+	check("mutex_unlock", mutex_unlock(cm->mutex), 0);
+
+	return NULL;
+}
+
+static void cond_ppmutex(void)
+{
+	pthread_cond_t cond = PTHREAD_COND_INITIALIZER;
+	pthread_mutex_t mutex;
+	struct cond_mutex cm = {
+		.mutex = &mutex,
+		.cond = &cond,
+	};
+	pthread_t cond_signaler_tid;
+
+	smokey_trace("%s", __func__);
+
+	check("mutex_init", mutex_init(&mutex, PTHREAD_MUTEX_DEFAULT,
+				       PTHREAD_PRIO_PROTECT), 0);
+	check("thread_spawn",
+	      thread_spawn(&cond_signaler_tid, 3, cond_delayed_signaler,
+			   &cm), 0);
+
+	thread_msleep(1);
+	check("mutex_lock", mutex_lock(&mutex), 0);
+
+	check("cond_wait", cond_wait(&cond, &mutex, 0), 0);
+	check("mutex_unlock", mutex_unlock(&mutex), 0);
+	check("thread_join", thread_join(cond_signaler_tid), 0);
+	check("mutex_destroy", mutex_destroy(&mutex), 0);
+	check("cond_destroy", cond_destroy(&cond), 0);
+}
+
+int run_posix_cond(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct sched_param sparam;
+
+	/* Set scheduling parameters for the current process */
+	sparam.sched_priority = 2;
+	pthread_setschedparam(pthread_self(), SCHED_FIFO, &sparam);
+
+	autoinit_simple_conddestroy();
+	autoinit_simple_condwait();
+	simple_condwait();
+	relative_condwait();
+	autoinit_absolute_condwait();
+	absolute_condwait();
+	sig_norestart_condwait();
+	sig_restart_condwait();
+	sig_norestart_condwait_mutex();
+	sig_restart_condwait_mutex();
+	sig_norestart_double();
+	sig_restart_double();
+	cond_destroy_whilewait();
+	cond_ppmutex();
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/Makefile.am
new file mode 100644
index 0000000..516c243
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/Makefile.am
@@ -0,0 +1,11 @@
+
+noinst_LIBRARIES = libposix-fork.a
+
+libposix_fork_a_SOURCES = posix-fork.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libposix_fork_a_CPPFLAGS =			\
+	@XENO_USER_CFLAGS@			\
+	-DXENO_TEST_DIR='"$(XENO_TEST_DIR)"'	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/posix-fork.c b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/posix-fork.c
new file mode 100644
index 0000000..3541a8a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-fork/posix-fork.c
@@ -0,0 +1,36 @@
+/*
+ * fork->exec test.
+ *
+ * Copyright (C) Philippe Gerum <rpm@xenomai.org>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <xeno_config.h>
+#include <boilerplate/libc.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(posix_fork,
+		   SMOKEY_NOARGS,
+		   "Check POSIX fork->exec sequence."
+);
+
+/*
+ * The purpose of this test is to check whether Cobalt detects and
+ * handles a fork->exec sequence properly for Xenomai-enabled threads,
+ * with respect to managing their respective shadow contexts. Cobalt
+ * should drop the child's shadow upon detecting exec(), then create
+ * another one for the emerging process's main() thread as usual.
+ *
+ * We don't have to do much beyond firing such sequence fo testing: if
+ * Cobalt messes up, the kernel will certainly crash.
+ */
+static int run_posix_fork(struct smokey_test *t, int argc, char *const argv[])
+{
+	/*
+	 * Re-exec ourselves without running any test, this is
+	 * enough for creating a shadow context.
+	 */
+	return smokey_fork_exec(argv[0], "smokey");
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/Makefile.am
new file mode 100644
index 0000000..5a36c77
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libposix-mutex.a
+
+libposix_mutex_a_SOURCES = posix-mutex.c
+
+libposix_mutex_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)		\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/posix-mutex.c b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/posix-mutex.c
new file mode 100644
index 0000000..4aad249
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-mutex/posix-mutex.c
@@ -0,0 +1,1130 @@
+/*
+ * Functional testing of the mutex implementation for Cobalt.
+ *
+ * Copyright (C) Gilles Chanteperdrix  <gilles.chanteperdrix@xenomai.org>,
+ *               Marion Deveaud <marion.deveaud@siemens.com>,
+ *               Jan Kiszka <jan.kiszka@siemens.com>
+ *               Philippe Gerum <rpm@xenomai.org>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <limits.h>
+#include <string.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <signal.h>
+#include <pthread.h>
+#include <cobalt/sys/cobalt.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(posix_mutex,
+		   SMOKEY_NOARGS,
+		   "Check POSIX mutex services"
+);
+
+static const char *reason_str[] = {
+	[SIGDEBUG_UNDEFINED] = "received SIGDEBUG for unknown reason",
+	[SIGDEBUG_MIGRATE_SIGNAL] = "received signal",
+	[SIGDEBUG_MIGRATE_SYSCALL] = "invoked syscall",
+	[SIGDEBUG_MIGRATE_FAULT] = "triggered fault",
+	[SIGDEBUG_MIGRATE_PRIOINV] = "affected by priority inversion",
+	[SIGDEBUG_NOMLOCK] = "process memory not locked",
+	[SIGDEBUG_WATCHDOG] = "watchdog triggered (period too short?)",
+	[SIGDEBUG_LOCK_BREAK] = "scheduler lock break",
+};
+
+static void sigdebug(int sig, siginfo_t *si, void *context)
+{
+	const char fmt[] = "%s, this is unexpected.\n"
+		"(enabling CONFIG_XENO_OPT_DEBUG_TRACE_RELAX may help)\n";
+	unsigned int reason = sigdebug_reason(si);
+	int n __attribute__ ((unused));
+	static char buffer[256];
+
+	if (reason > SIGDEBUG_WATCHDOG)
+		reason = SIGDEBUG_UNDEFINED;
+
+	n = snprintf(buffer, sizeof(buffer), fmt, reason_str[reason]);
+	n = write(STDERR_FILENO, buffer, n);
+}
+
+#define THREAD_PRIO_WEAK	0
+#define THREAD_PRIO_LOW		1
+#define THREAD_PRIO_MEDIUM	2
+#define THREAD_PRIO_HIGH	3
+#define THREAD_PRIO_VERY_HIGH	4
+
+#define MAX_100_MS  100000000ULL
+
+struct locker_context {
+	pthread_mutex_t *mutex;
+	struct smokey_barrier *barrier;
+	int lock_acquired;
+};
+
+static void sleep_ms(unsigned int ms)	/* < 1000 */
+{
+	struct timespec ts;
+	
+	ts.tv_sec = 0;
+	ts.tv_nsec = ms * 1000000;
+	clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL);
+}
+
+static int get_effective_prio(void) 
+{
+	struct cobalt_threadstat stat;
+	int ret;
+
+	ret = cobalt_thread_stat(0, &stat);
+	if (ret)
+		return ret;
+
+	return stat.cprio;
+}
+
+static int create_thread(pthread_t *tid, int policy, int prio,
+			 void *(*thread)(void *), void *arg)
+{
+	struct sched_param param;
+	pthread_attr_t thattr;
+	int ret;
+
+	pthread_attr_init(&thattr);
+	param.sched_priority = prio;
+	pthread_attr_setschedpolicy(&thattr, policy);
+	pthread_attr_setschedparam(&thattr, &param);
+	pthread_attr_setinheritsched(&thattr, PTHREAD_EXPLICIT_SCHED);
+
+	if (!__T(ret, pthread_create(tid, &thattr, thread, arg)))
+		return ret;
+
+	return 0;
+}
+
+static int do_init_mutexattr(pthread_mutexattr_t *mattr, int type, int protocol)
+{
+	int ret;
+
+	if (!__T(ret, pthread_mutexattr_init(mattr)))
+		return ret;
+	
+	if (!__T(ret, pthread_mutexattr_settype(mattr, type)))
+		return ret;
+	
+	if (!__T(ret, pthread_mutexattr_setprotocol(mattr, protocol)))
+		return ret;
+	
+	if (!__T(ret, pthread_mutexattr_setpshared(mattr, PTHREAD_PROCESS_PRIVATE)))
+		return ret;
+
+	return 0;
+}
+
+static int do_init_mutex(pthread_mutex_t *mutex, int type, int protocol)
+{
+	pthread_mutexattr_t mattr;
+	int ret;
+
+	ret = do_init_mutexattr(&mattr, type, protocol);
+	if (ret)
+		return ret;
+	
+	if (!__T(ret, pthread_mutex_init(mutex, &mattr)))
+		return ret;
+
+	if (!__T(ret, pthread_mutexattr_destroy(&mattr)))
+		return ret;
+	
+	return 0;
+}
+
+static int do_init_mutex_ceiling(pthread_mutex_t *mutex, int type, int prio)
+{
+	pthread_mutexattr_t mattr;
+	int ret;
+
+	ret = do_init_mutexattr(&mattr, type, PTHREAD_PRIO_PROTECT);
+	if (ret)
+		return ret;
+	
+	if (!__T(ret, pthread_mutexattr_setprioceiling(&mattr, prio)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_init(mutex, &mattr)))
+		return ret;
+
+	if (!__T(ret, pthread_mutexattr_destroy(&mattr)))
+		return ret;
+	
+	return 0;
+}
+
+static void *mutex_timed_locker(void *arg)
+{
+	struct locker_context *p = arg;
+	struct timespec now, ts;
+	int ret;
+
+	clock_gettime(CLOCK_REALTIME, &now);
+	/* 5ms (or 50ms in VM) from now */
+	timespec_adds(&ts, &now, smokey_on_vm ? 50000000 : 5000000);
+
+	if (p->barrier)
+		smokey_barrier_release(p->barrier);
+	
+	if (__F(ret, pthread_mutex_timedlock(p->mutex, &ts)) &&
+	    __Tassert(ret == -ETIMEDOUT))
+		return (void *)1;
+
+	return NULL;
+}
+
+static int do_timed_contend(pthread_mutex_t *mutex, int prio)
+{
+	struct locker_context args = { .barrier = NULL };
+	pthread_t tid;
+	void *status;
+	int ret;
+
+	if (!__T(ret, pthread_mutex_lock(mutex)))
+		return ret;
+
+	args.mutex = mutex;
+	ret = create_thread(&tid, SCHED_FIFO, prio,
+			    mutex_timed_locker, &args);
+	if (ret)
+		return ret;
+	
+	if (!__T(ret, pthread_join(tid, &status)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_unlock(mutex)))
+		return ret;
+
+	if (!__Fassert(status == NULL))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_destroy(mutex)))
+		return ret;
+
+	return 0;
+}
+
+static void *mutex_locker(void *arg)
+{
+	struct locker_context *p = arg;
+	int ret;
+
+	if (!__T(ret, pthread_mutex_lock(p->mutex)))
+		return (void *)(long)ret;
+
+	p->lock_acquired = 1;
+
+	if (!__T(ret, pthread_mutex_unlock(p->mutex)))
+		return (void *)(long)ret;
+
+	smokey_barrier_release(p->barrier);
+
+	return NULL;
+}
+
+static int do_contend(pthread_mutex_t *mutex, int type)
+{
+	struct smokey_barrier barrier;
+	struct locker_context args;
+	pthread_t tid;
+	void *status;
+	int ret;
+
+	if (!__T(ret, pthread_mutex_lock(mutex)))
+		return ret;
+
+	if (type == PTHREAD_MUTEX_RECURSIVE) {
+		if (!__T(ret, pthread_mutex_lock(mutex)))
+			return ret;
+	} else if (type == PTHREAD_MUTEX_ERRORCHECK) {
+		if (!__F(ret, pthread_mutex_lock(mutex)) ||
+		    !__Tassert(ret == -EDEADLK))
+			return -EINVAL;
+	}
+
+	args.mutex = mutex;
+	smokey_barrier_init(&barrier);
+	args.barrier = &barrier;
+	args.lock_acquired = 0;
+	ret = create_thread(&tid, SCHED_FIFO, THREAD_PRIO_MEDIUM,
+			    mutex_locker, &args);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_unlock(mutex)))
+		return ret;
+
+	if (type == PTHREAD_MUTEX_RECURSIVE) {
+		if (!__T(ret, pthread_mutex_unlock(mutex)))
+			return ret;
+	} else if (type == PTHREAD_MUTEX_ERRORCHECK) {
+		if (!__F(ret, pthread_mutex_unlock(mutex)) ||
+		    !__Tassert(ret == -EPERM))
+			return -EINVAL;
+	}
+
+	/* Wait until locker runs through. */
+	if (!__T(ret, smokey_barrier_wait(&barrier)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(mutex)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_unlock(mutex)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_destroy(mutex)))
+		return ret;
+
+	if (!__T(ret, pthread_join(tid, &status)))
+		return ret;
+
+	if (!__Tassert(status == NULL))
+		return -EINVAL;
+
+	smokey_barrier_destroy(&barrier);
+
+	return 0;
+}
+
+static int do_destroy(pthread_mutex_t *mutex, pthread_mutex_t *invalmutex,
+	int type)
+{
+	int ret;
+
+	if (!__T(ret, pthread_mutex_destroy(mutex)))
+		return ret;
+	if (!__F(ret, pthread_mutex_destroy(invalmutex)) &&
+		__Tassert(ret == -EINVAL))
+		return -1;
+	return 0;
+}
+
+static int static_init_normal_destroy(void)
+{
+	pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+	pthread_mutex_t invalmutex = PTHREAD_MUTEX_INITIALIZER;
+
+	unsigned int invalmagic = ~0x86860303; // ~COBALT_MUTEX_MAGIC
+
+	memcpy((char *)&invalmutex + sizeof(invalmutex) - sizeof(invalmagic),
+		&invalmagic, sizeof(invalmagic));
+	return do_destroy(&mutex, &invalmutex, PTHREAD_MUTEX_NORMAL);
+}
+
+static int static_init_normal_contend(void)
+{
+	pthread_mutex_t mutex = PTHREAD_MUTEX_INITIALIZER;
+
+	return do_contend(&mutex, PTHREAD_MUTEX_NORMAL);
+}
+
+static int __dynamic_init_contend(int type)
+{
+	pthread_mutex_t mutex;
+	int ret;
+
+	ret = do_init_mutex(&mutex, type, PTHREAD_PRIO_NONE);
+	if (ret)
+		return ret;
+	
+	return do_contend(&mutex, type);
+}
+
+static int dynamic_init_normal_contend(void)
+{
+	return __dynamic_init_contend(PTHREAD_MUTEX_NORMAL);
+}
+
+static int static_init_recursive_destroy(void)
+{
+	pthread_mutex_t mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+	pthread_mutex_t invalmutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+
+	unsigned int invalmagic = ~0x86860303; // ~COBALT_MUTEX_MAGIC
+
+	memcpy((char *)&invalmutex + sizeof(invalmutex) - sizeof(invalmagic),
+		&invalmagic, sizeof(invalmagic));
+	return do_destroy(&mutex, &invalmutex, PTHREAD_MUTEX_RECURSIVE);
+}
+
+static int static_init_recursive_contend(void)
+{
+	pthread_mutex_t mutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
+
+	return do_contend(&mutex, PTHREAD_MUTEX_RECURSIVE);
+}
+
+static int dynamic_init_recursive_contend(void)
+{
+	return __dynamic_init_contend(PTHREAD_MUTEX_RECURSIVE);
+}
+
+static int static_init_errorcheck_destroy(void)
+{
+	pthread_mutex_t mutex = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
+	pthread_mutex_t invalmutex = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
+
+	unsigned int invalmagic = ~0x86860303; // ~COBALT_MUTEX_MAGIC
+
+	memcpy((char *)&invalmutex + sizeof(invalmutex) - sizeof(invalmagic),
+		&invalmagic, sizeof(invalmagic));
+	return do_destroy(&mutex, &invalmutex, PTHREAD_MUTEX_ERRORCHECK);
+}
+
+static int static_init_errorcheck_contend(void)
+{
+	pthread_mutex_t mutex = PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP;
+
+	return do_contend(&mutex, PTHREAD_MUTEX_ERRORCHECK);
+}
+
+static int dynamic_init_errorcheck_contend(void)
+{
+	return __dynamic_init_contend(PTHREAD_MUTEX_ERRORCHECK);
+}
+
+static int timed_contend(void)
+{
+	pthread_mutex_t mutex;
+	int ret;
+
+	ret = do_init_mutex(&mutex, PTHREAD_MUTEX_NORMAL,
+			    PTHREAD_PRIO_INHERIT);
+	if (ret)
+		return ret;
+
+	return do_timed_contend(&mutex, THREAD_PRIO_MEDIUM);
+}
+
+static int weak_mode_switch(void)
+{
+	struct sched_param old_param, param = { .sched_priority = 0 };
+	int old_policy, ret, mode;
+	pthread_mutex_t mutex;
+
+	ret = do_init_mutex(&mutex, PTHREAD_MUTEX_NORMAL,
+			    PTHREAD_PRIO_INHERIT);
+	if (ret)
+		return ret;
+
+	/* Save old schedparams, then switch to weak mode. */
+
+	if (!__T(ret, pthread_getschedparam(pthread_self(),
+					    &old_policy, &old_param)))
+		return ret;
+
+	/* Assume we are running SCHED_FIFO. */
+
+	mode = cobalt_thread_mode();
+	if (!__Fassert(mode & XNWEAK))
+		return -EINVAL;
+
+	/* Enter SCHED_WEAK scheduling. */
+	
+	if (!__T(ret, pthread_setschedparam(pthread_self(),
+					    SCHED_OTHER, &param)))
+		return ret;
+
+	mode = cobalt_thread_mode();
+	if (!__Tassert((mode & (XNWEAK|XNRELAX)) == (XNWEAK|XNRELAX)))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	/*
+	 * Holding a mutex should have switched us out of relaxed
+	 * mode despite being assigned to the SCHED_WEAK class.
+	 */
+	mode = cobalt_thread_mode();
+	if (!__Tassert((mode & (XNWEAK|XNRELAX)) == XNWEAK))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	/* Dropped it, we should have relaxed in the same move. */
+	
+	mode = cobalt_thread_mode();
+	if (!__Tassert((mode & (XNWEAK|XNRELAX)) == (XNWEAK|XNRELAX)))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	/* Leaving the SCHED_WEAK class. */
+
+	if (!__T(ret, pthread_setschedparam(pthread_self(),
+					    old_policy, &old_param)))
+		return ret;
+
+	mode = cobalt_thread_mode();
+	if (!__Fassert(mode & XNWEAK))
+		return -EINVAL;
+
+	return 0;
+}
+
+static int do_pi_contend(int prio)
+{
+	struct smokey_barrier barrier;
+	struct locker_context args;
+	pthread_mutex_t mutex;
+	pthread_t tid;
+	void *status;
+	int ret;
+
+	ret = do_init_mutex(&mutex, PTHREAD_MUTEX_NORMAL,
+			    PTHREAD_PRIO_INHERIT);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	args.mutex = &mutex;
+	smokey_barrier_init(&barrier);
+	args.barrier = &barrier;
+	ret = create_thread(&tid, SCHED_FIFO, prio,
+			    mutex_timed_locker, &args);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, smokey_barrier_wait(&barrier)))
+		return ret;
+
+	/*
+	 * Back while mutex_timed_locker is waiting. We should have
+	 * been boosted by now.
+	 */
+	if (!__Tassert(get_effective_prio() == prio))
+		return -EINVAL;
+	
+	if (!__T(ret, pthread_join(tid, &status)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	if (!__Fassert(status == NULL))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	smokey_barrier_destroy(&barrier);
+
+	return 0;
+}
+
+static int pi_contend(void)
+{
+	return do_pi_contend(THREAD_PRIO_HIGH);
+}
+
+static void *mutex_locker_steal(void *arg)
+{
+	struct locker_context *p = arg;
+	int ret;
+
+	smokey_barrier_release(p->barrier);
+	
+	if (!__T(ret, pthread_mutex_lock(p->mutex)))
+		return (void *)(long)ret;
+
+	p->lock_acquired = 1;
+
+	if (!__T(ret, pthread_mutex_unlock(p->mutex)))
+		return (void *)(long)ret;
+
+	return NULL;
+}
+
+static int do_steal(int may_steal)
+{
+	struct smokey_barrier barrier;
+	struct locker_context args;
+	pthread_mutex_t mutex;
+	pthread_t tid;
+	void *status;
+	int ret;
+
+	ret = do_init_mutex(&mutex, PTHREAD_MUTEX_NORMAL,
+			    PTHREAD_PRIO_NONE);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	args.mutex = &mutex;
+	smokey_barrier_init(&barrier);
+	args.barrier = &barrier;
+	args.lock_acquired = 0;
+	ret = create_thread(&tid, SCHED_FIFO, THREAD_PRIO_LOW,
+			    mutex_locker_steal, &args);
+	if (ret)
+		return ret;
+
+	/* Make sure the locker thread emerges... */
+	if (!__T(ret, smokey_barrier_wait(&barrier)))
+		return ret;
+
+	/* ...and blocks waiting on the mutex. */
+	sleep_ms(1);
+
+	/*
+	 * Back while mutex_locker should be blocking.
+	 *
+	 * If stealing is exercised, unlock then relock immediately:
+	 * we should have kept the ownership of the mutex and the
+	 * locker thread should not have grabbed it so far, because of
+	 * our higher priority.
+	 *
+	 * If stealing should not happen, unlock, wait a moment then
+	 * observe whether the locker thread was able to grab it as
+	 * expected.
+	 *
+	 * CAUTION: don't use pthread_mutex_trylock() to re-grab the
+	 * mutex, this is not going to do what you want, since there
+	 * is no stealing from userland, so using a fast op which
+	 * never enters the kernel won't help.
+	 */
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	if (may_steal) {
+		if (!__T(ret, pthread_mutex_lock(&mutex)))
+			return ret;
+
+		if (!__Fassert(args.lock_acquired))
+			return -EINVAL;
+	} else {
+		sleep_ms(1);
+
+		if (!__T(ret, pthread_mutex_lock(&mutex)))
+			return ret;
+
+		if (!__Tassert(args.lock_acquired))
+			return -EINVAL;
+	}
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	if (!__T(ret, pthread_join(tid, &status)))
+		return ret;
+
+	if (!__Tassert(status == NULL))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	smokey_barrier_destroy(&barrier);
+
+	return 0;
+}
+
+static int steal(void)
+{
+	return do_steal(1);
+}
+
+static int no_steal(void)
+{
+	return do_steal(0);
+}
+
+/*
+ * NOTE: Cobalt implements a lazy enforcement scheme for priority
+ * protection of threads running in primary mode, which only registers
+ * a pending boost at locking time, committing it eventually when/if
+ * the owner thread schedules away while holding it. Entering a short
+ * sleep (in primary mode) right after a mutex is grabbed makes sure
+ * the boost is actually applied.
+ */
+static int protect_raise(void)
+{
+	pthread_mutex_t mutex;
+	int ret;
+
+	ret = do_init_mutex_ceiling(&mutex, PTHREAD_MUTEX_NORMAL,
+				    THREAD_PRIO_HIGH);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	sleep_ms(1);	/* Commit the pending PP request. */
+
+	/* We should have been given a MEDIUM -> HIGH boost. */
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_HIGH))
+		return -EINVAL;
+	
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_MEDIUM))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	return 0;
+}
+
+static int protect_lower(void)
+{
+	pthread_mutex_t mutex;
+	int ret;
+
+	ret = do_init_mutex_ceiling(&mutex, PTHREAD_MUTEX_NORMAL,
+				    THREAD_PRIO_LOW);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	sleep_ms(1);	/* Commit the pending PP request. */
+
+	/* No boost should be applied. */
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_MEDIUM))
+		return -EINVAL;
+	
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_MEDIUM))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	return 0;
+}
+
+static int protect_weak(void)
+{
+	struct sched_param old_param, weak_param;
+	pthread_mutex_t mutex;
+	int ret, old_policy;
+
+	if (!__T(ret, pthread_getschedparam(pthread_self(),
+					    &old_policy, &old_param)))
+		return ret;
+
+	/*
+	 * Switch to the SCHED_WEAK class if present. THREAD_PRIO_WEAK
+	 * (0) is used to make this work even without SCHED_WEAK
+	 * support.
+	 */
+	weak_param.sched_priority = THREAD_PRIO_WEAK;
+	if (!__T(ret, pthread_setschedparam(pthread_self(),
+					    SCHED_WEAK, &weak_param)))
+		return ret;
+
+	ret = do_init_mutex_ceiling(&mutex, PTHREAD_MUTEX_NORMAL,
+				    THREAD_PRIO_HIGH);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	sleep_ms(1);	/* Commit the pending PP request. */
+
+	/* We should have been sent to SCHED_FIFO, THREAD_PRIO_HIGH. */
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_HIGH))
+		return -EINVAL;
+	
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	/* Back to SCHED_WEAK, THREAD_PRIO_WEAK. */
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_WEAK))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_setschedparam(pthread_self(),
+					    old_policy, &old_param)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	return 0;
+}
+
+static int protect_nesting_protect(void)
+{
+	pthread_mutex_t mutex_high, mutex_very_high;
+	int ret;
+
+	ret = do_init_mutex_ceiling(&mutex_high, PTHREAD_MUTEX_NORMAL,
+				    THREAD_PRIO_HIGH);
+	if (ret)
+		return ret;
+
+	ret = do_init_mutex_ceiling(&mutex_very_high, PTHREAD_MUTEX_NORMAL,
+				    THREAD_PRIO_VERY_HIGH);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex_high)))
+		return ret;
+
+	sleep_ms(1);	/* Commit the pending PP request. */
+
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_HIGH))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex_very_high)))
+		return ret;
+
+	sleep_ms(1);	/* Commit the pending PP request. */
+
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_VERY_HIGH))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex_very_high)))
+		return ret;
+
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_HIGH))
+		return -EINVAL;
+	
+	if (!__T(ret, pthread_mutex_unlock(&mutex_high)))
+		return ret;
+
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_MEDIUM))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex_high)) ||
+	    !__T(ret, pthread_mutex_destroy(&mutex_very_high)))
+		return ret;
+
+	return 0;
+}
+
+static int protect_nesting_pi(void)
+{
+	pthread_mutex_t mutex_pp;
+	int ret;
+
+	ret = do_init_mutex_ceiling(&mutex_pp, PTHREAD_MUTEX_NORMAL,
+				    THREAD_PRIO_HIGH);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex_pp)))
+		return ret;
+
+	sleep_ms(1);	/* Commit the pending PP request. */
+
+	/* PP ceiling: MEDIUM -> HIGH */
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_HIGH))
+		return -EINVAL;
+	
+	/* PI boost expected: HIGH -> VERY_HIGH, then back to HIGH */
+	ret = do_pi_contend(THREAD_PRIO_VERY_HIGH);
+	if (ret)
+		return ret;
+
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_HIGH))
+		return -EINVAL;
+	
+	if (!__T(ret, pthread_mutex_unlock(&mutex_pp)))
+		return ret;
+
+	/* PP boost just dropped: HIGH -> MEDIUM. */
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_MEDIUM))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex_pp)))
+		return ret;
+
+	return 0;
+}
+
+static int protect_dynamic(void)
+{
+	pthread_mutex_t mutex;
+	int ret, old_ceiling;
+
+	ret = do_init_mutex_ceiling(&mutex, PTHREAD_MUTEX_NORMAL,
+				    THREAD_PRIO_HIGH);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_setprioceiling(&mutex,
+						   THREAD_PRIO_VERY_HIGH, &old_ceiling)))
+		return ret;
+
+	if (!__Tassert(old_ceiling == THREAD_PRIO_HIGH))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	sleep_ms(1);	/* Commit the pending PP request. */
+
+	/* We should have been given a HIGH -> VERY_HIGH boost. */
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_VERY_HIGH))
+		return -EINVAL;
+	
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	/* Drop the boost: VERY_HIGH -> MEDIUM. */
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_MEDIUM))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_getprioceiling(&mutex, &old_ceiling)))
+		return ret;
+
+	if (!__Tassert(old_ceiling == THREAD_PRIO_VERY_HIGH))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	return 0;
+}
+
+static int protect_trylock(void)
+{
+	pthread_mutex_t mutex;
+	int ret;
+
+	ret = do_init_mutex_ceiling(&mutex, PTHREAD_MUTEX_NORMAL,
+				    THREAD_PRIO_HIGH);
+	if (ret)
+		return ret;
+
+	/* make sure we are primary to take the fast-path */
+	sleep_ms(1);
+
+	if (!__T(ret, pthread_mutex_trylock(&mutex)))
+		return ret;
+
+	if (!__Tassert(pthread_mutex_trylock(&mutex) == EBUSY))
+		return -EINVAL;
+
+	sleep_ms(1);	/* Commit the pending PP request. */
+
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_HIGH))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	/* force to secondary to take the slow-path */
+	__real_usleep(1);
+
+	if (!__T(ret, pthread_mutex_trylock(&mutex)))
+		return ret;
+
+	/* force to secondary to take the slow-path */
+	if (!__Tassert(pthread_mutex_trylock(&mutex) == EBUSY))
+		return -EINVAL;
+
+	sleep_ms(1);	/* Commit the pending PP request. */
+
+	if (!__Tassert(get_effective_prio() == THREAD_PRIO_HIGH))
+		return -EINVAL;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	return 0;
+}
+
+static int protect_handover(void)
+{
+	struct smokey_barrier barrier;
+	struct locker_context args;
+	pthread_mutex_t mutex;
+	pthread_t tid;
+	int ret;
+
+	ret = do_init_mutex_ceiling(&mutex, PTHREAD_MUTEX_NORMAL,
+				    THREAD_PRIO_HIGH);
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	args.mutex = &mutex;
+	smokey_barrier_init(&barrier);
+	args.barrier = &barrier;
+	args.lock_acquired = 0;
+	ret = create_thread(&tid, SCHED_FIFO, THREAD_PRIO_LOW,
+			    mutex_locker, &args);
+	if (ret)
+		return ret;
+
+	sleep_ms(1);	/* Wait for the locker to contend on the mutex. */
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	/* Make sure the locker thread released the lock again. */
+	if (!__T(ret, smokey_barrier_wait(&barrier)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	smokey_barrier_destroy(&barrier);
+
+	return 0;
+}
+
+static void *mutex_timed_locker_inv_timeout(void *arg)
+{
+	struct locker_context *p = arg;
+	int ret;
+
+	if (__F(ret, pthread_mutex_timedlock(p->mutex, (void *) 0xdeadbeef)) &&
+	    __Tassert(ret == -EFAULT))
+		return (void *)1;
+
+	return NULL;
+}
+
+static int check_timedlock_abstime_validation(void)
+{
+	struct locker_context args;
+	pthread_mutex_t mutex;
+	pthread_t tid;
+	void *status;
+	int ret;
+
+	if (!__T(ret, pthread_mutex_init(&mutex, NULL)))
+		return ret;
+
+	/*
+	 * We don't own the mutex yet, so no need to validate the timeout as
+	 * the mutex can be locked immediately.
+	 *
+	 * The second parameter of phtread_mutex_timedlock() is flagged as
+	 * __nonnull so we take an invalid address instead of NULL.
+	 */
+	if (!__T(ret, pthread_mutex_timedlock(&mutex, (void *) 0xdeadbeef)))
+		return ret;
+
+	/*
+	 * Create a second thread which will have to wait and therefore will
+	 * validate the (invalid) timeout
+	 */
+	args.mutex = &mutex;
+	ret = create_thread(&tid, SCHED_FIFO, THREAD_PRIO_LOW,
+			    mutex_timed_locker_inv_timeout, &args);
+
+	if (ret)
+		return ret;
+
+	if (!__T(ret, pthread_join(tid, &status)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	if (!__Fassert(status == NULL))
+		return -EINVAL;
+
+	return 0;
+}
+
+/* Detect obviously wrong execution times. */
+static int check_time_limit(const struct timespec *start,
+			    xnticks_t limit_ns)
+{
+	struct timespec stop, delta;
+
+	clock_gettime(CLOCK_MONOTONIC, &stop);
+	timespec_sub(&delta, &stop, start);
+
+	return timespec_scalar(&delta) <= limit_ns;
+}
+
+#define do_test(__fn, __limit_ns)					\
+	do {								\
+		struct timespec __start;				\
+		int __ret;						\
+		smokey_trace(".. " __stringify(__fn));			\
+		clock_gettime(CLOCK_MONOTONIC, &__start);		\
+		__ret = __fn();						\
+		if (__ret)						\
+			return __ret;					\
+		if (!__Tassert(check_time_limit(&__start, __limit_ns)))	\
+			return -ETIMEDOUT;				\
+	} while (0)
+
+static int run_posix_mutex(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct sched_param param;
+	struct sigaction sa;
+	int ret;
+
+	sigemptyset(&sa.sa_mask);
+	sa.sa_sigaction = sigdebug;
+	sa.sa_flags = SA_SIGINFO;
+	sigaction(SIGDEBUG, &sa, NULL);
+
+	param.sched_priority = THREAD_PRIO_MEDIUM;
+	if (!__T(ret, pthread_setschedparam(pthread_self(),
+					    SCHED_FIFO, &param)))
+		return ret;
+
+	do_test(static_init_normal_destroy, MAX_100_MS);
+	do_test(static_init_normal_contend, MAX_100_MS);
+	do_test(dynamic_init_normal_contend, MAX_100_MS);
+	do_test(static_init_recursive_destroy, MAX_100_MS);
+	do_test(static_init_recursive_contend, MAX_100_MS);
+	do_test(dynamic_init_recursive_contend, MAX_100_MS);
+	do_test(static_init_errorcheck_destroy, MAX_100_MS);
+	do_test(static_init_errorcheck_contend, MAX_100_MS);
+	do_test(dynamic_init_errorcheck_contend, MAX_100_MS);
+	do_test(timed_contend, MAX_100_MS);
+	do_test(weak_mode_switch, MAX_100_MS);
+	do_test(pi_contend, MAX_100_MS);
+	do_test(steal, MAX_100_MS);
+	do_test(no_steal, MAX_100_MS);
+	do_test(protect_raise, MAX_100_MS);
+	do_test(protect_lower, MAX_100_MS);
+	do_test(protect_nesting_protect, MAX_100_MS);
+	do_test(protect_nesting_pi, MAX_100_MS);
+	do_test(protect_weak, MAX_100_MS);
+	do_test(protect_dynamic, MAX_100_MS);
+	do_test(protect_trylock, MAX_100_MS);
+	do_test(protect_handover, MAX_100_MS);
+	do_test(check_timedlock_abstime_validation, MAX_100_MS);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/Makefile.am
new file mode 100644
index 0000000..40b8839
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libposix-select.a
+
+libposix_select_a_SOURCES = posix-select.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libposix_select_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/posix-select.c b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/posix-select.c
new file mode 100644
index 0000000..1484fa6
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/posix-select/posix-select.c
@@ -0,0 +1,147 @@
+/*
+ * Copyright (C) 2011-2012 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include <unistd.h>
+#include <mqueue.h>
+#include <pthread.h>
+#include <boilerplate/atomic.h>
+#include <sys/select.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <fcntl.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(posix_select,
+		   SMOKEY_NOARGS,
+		   "Check POSIX select service"
+);
+
+static const char *tunes[] = {
+    "Surfing With The Alien",
+    "Lords of Karma",
+    "Banana Mango",
+    "Psycho Monkey",
+    "Luminous Flesh Giants",
+    "Moroccan Sunset",
+    "Satch Boogie",
+    "Flying In A Blue Dream",
+    "Ride",
+    "Summer Song",
+    "Speed Of Light",
+    "Crystal Planet",
+    "Raspberry Jam Delta-V",
+    "Champagne?",
+    "Clouds Race Across The Sky",
+    "Engines Of Creation"
+};
+
+static int test_status;
+
+static void *mq_thread(void *cookie)
+{
+	mqd_t mqd = (mqd_t)(long)cookie;
+	unsigned int i = 0, prio;
+	fd_set inset, tmp_inset;
+	char buf[128];
+	int ret;
+
+	FD_ZERO(&inset);
+	FD_SET(mqd, &inset);
+
+	for (;;) {
+		tmp_inset = inset;
+
+		ret = smokey_check_errno(select(mqd + 1, &tmp_inset, NULL, NULL, NULL));
+		if (ret < 0) {
+			test_status = ret;
+			break;
+		}
+
+		ret = smokey_check_errno(mq_receive(mqd, buf, sizeof(buf), &prio));
+		if (ret < 0) {
+			test_status = ret;
+			break;
+		}
+
+		if (strcmp(buf, "/done") == 0)
+			break;
+	
+		if (!smokey_assert(strcmp(buf, tunes[i]) == 0)) {
+			test_status = -EINVAL;
+			break;
+		}
+
+		smokey_trace("received %s", buf);
+		i = (i + 1) % (sizeof(tunes) / sizeof(tunes[0]));
+	}
+
+	return NULL;
+}
+
+static int run_posix_select(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct mq_attr qa;
+	pthread_t tcb;
+	int i, j, ret;
+	mqd_t mq;
+
+	mq_unlink("/select_test_mq");
+	qa.mq_maxmsg = 128;
+	qa.mq_msgsize = 128;
+	mq = smokey_check_errno(mq_open("/select_test_mq", O_RDWR | O_CREAT | O_NONBLOCK, 0, &qa));
+	if (mq < 0)
+		return mq;
+
+	ret = smokey_check_status(pthread_create(&tcb, NULL, mq_thread, (void *)(long)mq));
+	if (ret)
+		return ret;
+
+	for (j = 0; j < 3; j++) {
+		for (i = 0; i < sizeof(tunes) / sizeof(tunes[0]); i++) {
+			ret = smokey_check_errno(mq_send(mq, tunes[i], strlen(tunes[i]) + 1, 0));
+			if (ret < 0) {
+				smokey_check_status(pthread_cancel(tcb));
+				goto out;
+			}
+			usleep(100000);
+		}
+	}
+	ret = smokey_check_errno(mq_send(mq, "/done", sizeof "/done", 0));
+	if (ret < 0) {
+		smokey_check_status(pthread_cancel(tcb));
+		goto out;
+	}
+	usleep(300000);
+	smp_rmb();
+	ret = test_status;
+out:
+	pthread_join(tcb, NULL);
+
+	mq_unlink("/select_test_mq");
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/Makefile.am
new file mode 100644
index 0000000..6aad7d9
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = librtdm.a
+
+librtdm_a_SOURCES = rtdm.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+librtdm_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/rtdm.c b/kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/rtdm.c
new file mode 100644
index 0000000..6ad0d6d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/rtdm/rtdm.c
@@ -0,0 +1,202 @@
+/*
+ * Functional testing of RTDM services.
+ *
+ * Copyright (C) 2010 Jan Kiszka <jan.kiszka@web.de>.
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <string.h>
+#include <unistd.h>
+#include <rtdm/testing.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(rtdm,
+		   SMOKEY_NOARGS,
+		   "Check core interface to RTDM services."
+);
+
+#define NS_PER_MS (1000000)
+
+static inline unsigned long long timer_get_tsc(void)
+{
+	return clockobj_get_tsc();
+}
+
+static inline unsigned long long timer_tsc2ns(unsigned long long tsc)
+{
+	return clockobj_tsc_to_ns(tsc);
+}
+
+static void check_inner(const char *fn, int line, const char *msg,
+			int status, int expected)
+{
+	if (status == expected)
+		return;
+
+	fprintf(stderr, "FAILED %s:%d: %s returned %d instead of %d - %s\n",
+		fn, line, msg, status, expected, strerror(-status));
+	exit(EXIT_FAILURE);
+}
+
+#define check(msg, status, expected) ({					\
+	int __status = status;						\
+	check_inner(__func__, __LINE__, msg,			\
+		    __status < 0 ? -errno : __status, expected);	\
+	__status;							\
+})
+
+#define check_no_error(msg, status) ({					\
+	int __status = status;						\
+	check_inner(__func__, __LINE__, msg,			\
+		    __status < 0 ? -errno : 0, 0);			\
+	__status;							\
+})
+
+static const char *devname = "/dev/rtdm/rtdm0";
+static const char *devname2 = "/dev/rtdm/rtdm1";
+
+static int do_handover(int fd)
+{
+	struct sched_param param;
+	int ret, magic = 0;
+
+	if (!__F(ret, ioctl(fd, RTTST_RTIOC_RTDM_PING_PRIMARY, &magic)) ||
+	    errno != ENOTTY)
+		return ret ? -ENOTTY : -EINVAL;
+
+	if (!__Tassert(magic == 0))
+		return -EINVAL;
+
+	if (!__Terrno(ret, ioctl(fd, RTTST_RTIOC_RTDM_PING_SECONDARY, &magic)))
+		return ret;
+
+	if (!__Tassert(magic == RTTST_RTDM_MAGIC_SECONDARY))
+		return -EINVAL;
+
+	/* Switch to Cobalt's SCHED_FIFO[1] */
+	
+	param.sched_priority = 1;
+	if (!__T(ret, pthread_setschedparam(pthread_self(),
+					    SCHED_FIFO, &param)))
+		return ret;
+	
+	if (!__Terrno(ret, ioctl(fd, RTTST_RTIOC_RTDM_PING_PRIMARY, &magic)))
+		return ret;
+
+	if (!__Tassert(magic == RTTST_RTDM_MAGIC_PRIMARY))
+		return -EINVAL;
+
+	if (!__Terrno(ret, ioctl(fd, RTTST_RTIOC_RTDM_PING_SECONDARY, &magic)))
+		return ret;
+
+	if (!__Tassert(magic == RTTST_RTDM_MAGIC_SECONDARY))
+		return -EINVAL;
+
+	/* Switch to Cobalt's SCHED_WEAK[0] */
+	
+	param.sched_priority = 0;
+	if (!__T(ret, pthread_setschedparam(pthread_self(),
+					    SCHED_WEAK, &param)))
+		return ret;
+	
+	if (!__Terrno(ret, ioctl(fd, RTTST_RTIOC_RTDM_PING_PRIMARY, &magic)))
+		return ret;
+
+	if (!__Tassert(magic == RTTST_RTDM_MAGIC_PRIMARY))
+		return -EINVAL;
+
+	if (!__Terrno(ret, ioctl(fd, RTTST_RTIOC_RTDM_PING_SECONDARY, &magic)))
+		return ret;
+
+	if (!__Tassert(magic == RTTST_RTDM_MAGIC_SECONDARY))
+		return -EINVAL;
+
+	return 0;
+}
+
+static void *__test_handover(void *arg)
+{
+	int fd = *(int *)arg;
+
+	return (void *)(long)do_handover(fd);
+}
+
+static int test_handover(int fd)
+{
+	struct sched_param param;
+	pthread_attr_t attr;
+	pthread_t tid;
+	void *p;
+	int ret;
+
+	pthread_attr_init(&attr);
+	param.sched_priority = 0;
+	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
+	pthread_attr_setschedparam(&attr, &param);
+	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+
+	if (!__T(ret, __STD(pthread_create(&tid, &attr,
+			   __test_handover, &fd))))
+		return ret;
+
+	if (!__T(ret, pthread_join(tid, &p)))
+		return ret;
+
+	return (int)(long)p;
+}
+
+static int run_rtdm(struct smokey_test *t, int argc, char *const argv[])
+{
+	int dev, dev2, status;
+
+	status = smokey_modprobe("xeno_rtdmtest", true);
+	if (status)
+		return -ENOSYS;
+
+	if (access(devname, 0) < 0 && errno == ENOENT)
+		return -ENOSYS;
+
+	smokey_trace("Setup");
+	dev = check_no_error("open", open(devname, O_RDWR));
+
+	smokey_trace("Exclusive open");
+	check("open", open(devname, O_RDWR), -EBUSY);
+
+	smokey_trace("Successive open");
+	dev2 = check("open", open(devname2, O_RDWR), dev + 1);
+	check("close", close(dev2), 0);
+
+	smokey_trace("Handover mode");
+	status = test_handover(dev);
+	if (status)
+		return status;
+
+	smokey_trace("Defer close by pending reference");
+	check("ioctl", ioctl(dev, RTTST_RTIOC_RTDM_DEFER_CLOSE,
+			     RTTST_RTDM_DEFER_CLOSE_CONTEXT), 0);
+	check("close", close(dev), 0);
+	check("open", open(devname, O_RDWR), -EBUSY);
+	dev2 = check("open", open(devname2, O_RDWR), dev);
+	check("close", close(dev2), 0);
+	usleep(smokey_on_vm ? 400000 : 301000);
+	dev = check("open", open(devname, O_RDWR), dev);
+
+	smokey_trace("Normal close");
+	check("ioctl", ioctl(dev, RTTST_RTIOC_RTDM_DEFER_CLOSE,
+			     RTTST_RTDM_NORMAL_CLOSE), 0);
+	check("close", close(dev), 0);
+	dev = check("open", open(devname, O_RDWR), dev);
+
+	smokey_trace("Disconnect on module unload");
+	check("ioctl", ioctl(dev, RTTST_RTIOC_RTDM_DEFER_CLOSE,
+			     RTTST_RTDM_DEFER_CLOSE_CONTEXT), 0);
+	check("close", close(dev), 0);
+
+	smokey_rmmod("xeno_rtdmtest");
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/Makefile.am
new file mode 100644
index 0000000..7ea5ebc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/Makefile.am
@@ -0,0 +1,8 @@
+
+noinst_LIBRARIES = libsched-quota.a
+
+libsched_quota_a_SOURCES = sched-quota.c
+
+libsched_quota_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/sched-quota.c b/kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/sched-quota.c
new file mode 100644
index 0000000..26bc15e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/sched-quota/sched-quota.c
@@ -0,0 +1,335 @@
+/*
+ * SCHED_QUOTA test.
+ *
+ * Copyright (C) Philippe Gerum <rpm@xenomai.org>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <unistd.h>
+#include <math.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <sched.h>
+#include <errno.h>
+#include <error.h>
+#include <sys/cobalt.h>
+#include <boilerplate/time.h>
+#include <boilerplate/ancillaries.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(sched_quota,
+		   SMOKEY_ARGLIST(
+			   SMOKEY_INT(quota),
+			   SMOKEY_INT(threads),
+		   ),
+   "Check the SCHED_QUOTA scheduling policy. Using a pool\n"
+   "\tof SCHED_FIFO threads, the code first calibrates, by estimating how\n"
+   "\tmuch work the system under test can perform when running\n"
+   "\tuninterrupted over a second.\n\n"
+   "\tThe same thread pool is re-started afterwards, as a SCHED_QUOTA\n"
+   "\tgroup this time, which is allotted a user-definable percentage of\n"
+   "\tthe global quota interval (CONFIG_XENO_OPT_SCHED_QUOTA_PERIOD).\n"
+   "\tUsing the reference calibration value obtained by running the\n"
+   "\tSCHED_FIFO pool, the percentage of runtime consumed by the\n"
+   "\tSCHED_QUOTA group over a second is calculated.\n\n"
+   "\tA successful test shows that the effective percentage of runtime\n"
+   "\tobserved with the SCHED_QUOTA group closely matches the allotted\n"
+   "\tquota (barring rounding errors and marginal latency)."
+);
+
+#define MAX_THREADS 8
+#define TEST_SECS   1
+
+static unsigned long long crunch_per_sec, loops_per_sec;
+
+static pthread_t threads[MAX_THREADS];
+
+static unsigned long counts[MAX_THREADS];
+
+static int nrthreads;
+
+static pthread_cond_t barrier;
+
+static pthread_mutex_t lock;
+
+static int started;
+
+static sem_t ready;
+
+static volatile int throttle;
+
+static unsigned long __attribute__(( noinline ))
+__do_work(unsigned long count)
+{
+	return count + 1;
+}
+
+static void __attribute__(( noinline ))
+do_work(unsigned long loops, unsigned long *count_r)
+{
+	unsigned long n;
+
+	for (n = 0; n < loops; n++)
+		*count_r = __do_work(*count_r);
+}
+
+static void *thread_body(void *arg)
+{
+	unsigned long *count_r = arg, loops;
+	int oldstate, oldtype;
+
+	pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &oldstate);
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &oldtype);
+	loops = crunch_per_sec / 100; /* yield each 10 ms runtime */
+	*count_r = 0;
+	sem_post(&ready);
+
+	pthread_mutex_lock(&lock);
+	for (;;) {
+		if (started)
+			break;
+		pthread_cond_wait(&barrier, &lock);
+	}
+	pthread_mutex_unlock(&lock);
+
+	for (;;) {
+		do_work(loops, count_r);
+		if (throttle)
+			sleep(1);
+		else if (nrthreads > 1)
+			sched_yield();
+	}
+
+	return NULL;
+}
+
+static void __create_quota_thread(pthread_t *tid, const char *name,
+				  int tgid, unsigned long *count_r)
+{
+	struct sched_param_ex param_ex;
+	pthread_attr_ex_t attr_ex;
+	int ret;
+
+	pthread_attr_init_ex(&attr_ex);
+	pthread_attr_setdetachstate_ex(&attr_ex, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched_ex(&attr_ex, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy_ex(&attr_ex, SCHED_QUOTA);
+	param_ex.sched_priority = 1;
+	param_ex.sched_quota_group = tgid;
+	pthread_attr_setschedparam_ex(&attr_ex, &param_ex);
+	ret = pthread_create_ex(tid, &attr_ex, thread_body, count_r);
+	if (ret)
+		error(1, ret, "pthread_create_ex(SCHED_QUOTA)");
+
+	pthread_attr_destroy_ex(&attr_ex);
+	pthread_setname_np(*tid, name);
+}
+
+#define create_quota_thread(__tid, __label, __tgid, __count)	\
+	__create_quota_thread(&(__tid), __label, __tgid, &(__count))
+
+static void __create_fifo_thread(pthread_t *tid, const char *name,
+				 unsigned long *count_r)
+{
+	struct sched_param param;
+	pthread_attr_t attr;
+	int ret;
+
+	pthread_attr_init(&attr);
+	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
+	param.sched_priority = 1;
+	pthread_attr_setschedparam(&attr, &param);
+	ret = pthread_create(tid, &attr, thread_body, count_r);
+	if (ret)
+		error(1, ret, "pthread_create(SCHED_FIFO)");
+
+	pthread_attr_destroy(&attr);
+	pthread_setname_np(*tid, name);
+}
+
+#define create_fifo_thread(__tid, __label, __count)	\
+	__create_fifo_thread(&(__tid), __label, &(__count))
+
+static double run_quota(int quota)
+{
+	size_t len = sched_quota_confsz();
+	unsigned long long count;
+	union sched_config cf;
+	struct timespec req;
+	int ret, tgid, n;
+	double percent;
+	char label[12];
+
+	cf.quota.op = sched_quota_add;
+	cf.quota.add.pshared = 0;
+	ret = sched_setconfig_np(0, SCHED_QUOTA, &cf, len);
+	if (ret)
+		error(1, ret, "sched_setconfig_np(add-quota-group)");
+
+	tgid = cf.quota.info.tgid;
+	cf.quota.op = sched_quota_set;
+	cf.quota.set.quota = quota;
+	cf.quota.set.quota_peak = quota;
+	cf.quota.set.tgid = tgid;
+	ret = sched_setconfig_np(0, SCHED_QUOTA, &cf, len);
+	if (ret)
+		error(1, ret, "sched_setconfig_np(set-quota, tgid=%d)", tgid);
+
+	smokey_trace("new thread group #%d on CPU0, quota sum is %d%%",
+		     tgid, cf.quota.info.quota_sum);
+
+	for (n = 0; n < nrthreads; n++) {
+		snprintf(label, sizeof(label), "t%d", n);
+		create_quota_thread(threads[n], label, tgid, counts[n]);
+		sem_wait(&ready);
+	}
+
+	pthread_mutex_lock(&lock);
+	started = 1;
+	pthread_cond_broadcast(&barrier);
+	pthread_mutex_unlock(&lock);
+
+	req.tv_sec = TEST_SECS;
+	req.tv_nsec = 0;
+	clock_nanosleep(CLOCK_MONOTONIC, 0, &req, NULL);
+
+	for (n = 0, count = 0; n < nrthreads; n++)
+		count += counts[n];
+
+	percent = ((double)count / TEST_SECS) * 100.0 / loops_per_sec;
+
+	throttle = 1;
+	smp_wmb();
+
+	for (n = 0; n < nrthreads; n++) {
+		smokey_trace("done quota_thread[%d], count=%lu", n, counts[n]);
+		pthread_cancel(threads[n]);
+		pthread_join(threads[n], NULL);
+	}
+
+	cf.quota.op = sched_quota_remove;
+	cf.quota.remove.tgid = tgid;
+	ret = sched_setconfig_np(0, SCHED_QUOTA, &cf, len);
+	if (ret)
+		error(1, ret, "sched_setconfig_np(remove-quota-group)");
+
+	return percent;
+}
+
+static unsigned long long calibrate(void)
+{
+	struct timespec start, end, delta;
+	const int crunch_loops = 100000;
+	unsigned long long ns, lps;
+	unsigned long count;
+	struct timespec req;
+	char label[12];
+	int n;
+
+	count = 0;
+	clock_gettime(CLOCK_MONOTONIC, &start);
+	do_work(crunch_loops, &count);
+	clock_gettime(CLOCK_MONOTONIC, &end);
+	
+	timespec_sub(&delta, &end, &start);
+	ns = delta.tv_sec * ONE_BILLION + delta.tv_nsec;
+	crunch_per_sec = (unsigned long long)((double)ONE_BILLION / (double)ns * crunch_loops);
+
+	for (n = 0; n < nrthreads; n++) {
+		snprintf(label, sizeof(label), "t%d", n);
+		create_fifo_thread(threads[n], label, counts[n]);
+		sem_wait(&ready);
+	}
+
+	pthread_mutex_lock(&lock);
+	started = 1;
+	pthread_cond_broadcast(&barrier);
+	pthread_mutex_unlock(&lock);
+
+	req.tv_sec = 1;
+	req.tv_nsec = 0;
+	clock_nanosleep(CLOCK_MONOTONIC, 0, &req, NULL);
+
+	for (n = 0, lps = 0; n < nrthreads; n++)
+		lps += counts[n];
+
+	throttle = 1;
+	smp_wmb();
+
+	for (n = 0; n < nrthreads; n++) {
+		pthread_cancel(threads[n]);
+		pthread_join(threads[n], NULL);
+	}
+
+	started = 0;
+	throttle = 0;
+
+	return lps;
+}
+
+static int run_sched_quota(struct smokey_test *t, int argc, char *const argv[])
+{
+	pthread_t me = pthread_self();
+	int ret, quota = 0, policies;
+	struct sched_param param;
+	cpu_set_t affinity;
+	double effective;
+
+	ret = cobalt_corectl(_CC_COBALT_GET_POLICIES, &policies, sizeof(policies));
+	if (ret || (policies & _CC_COBALT_SCHED_QUOTA) == 0)
+		return -ENOSYS;
+	
+	CPU_ZERO(&affinity);
+	CPU_SET(0, &affinity);
+	ret = sched_setaffinity(0, sizeof(affinity), &affinity);
+	if (ret)
+		error(1, errno, "sched_setaffinity");
+
+	smokey_parse_args(t, argc, argv);
+	pthread_mutex_init(&lock, NULL);
+	pthread_cond_init(&barrier, NULL);
+	sem_init(&ready, 0, 0);
+
+	param.sched_priority = 50;
+	ret = pthread_setschedparam(me, SCHED_FIFO, &param);
+	if (ret) {
+		warning("pthread_setschedparam(SCHED_FIFO, 50) failed");
+		return -ret;
+	}
+
+	if (SMOKEY_ARG_ISSET(sched_quota, quota))
+		quota = SMOKEY_ARG_INT(sched_quota, quota);
+
+	if (quota <= 0)
+		quota = 10;
+
+	if (SMOKEY_ARG_ISSET(sched_quota, threads))
+		nrthreads = SMOKEY_ARG_INT(sched_quota, threads);
+
+	if (nrthreads <= 0)
+		nrthreads = 3;
+	if (nrthreads > MAX_THREADS)
+		error(1, EINVAL, "max %d threads", MAX_THREADS);
+
+	calibrate();	/* Warming up, ignore result. */
+	loops_per_sec = calibrate();
+
+	smokey_trace("calibrating: %Lu loops/sec", loops_per_sec);
+
+	effective = run_quota(quota);
+	smokey_trace("%d thread%s: cap=%d%%, effective=%.1f%%",
+		     nrthreads, nrthreads > 1 ? "s": "", quota, effective);
+
+	if (!smokey_on_vm && fabs(effective - (double)quota) > 1.5) {
+		smokey_warning("out of quota: %.1f%%",
+			       effective - (double)quota);
+		return -EPROTO;
+	}
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/Makefile.am
new file mode 100644
index 0000000..4adeb89
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libsched-tp.a
+
+libsched_tp_a_SOURCES = sched-tp.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libsched_tp_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/sched-tp.c b/kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/sched-tp.c
new file mode 100644
index 0000000..434ac3c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/sched-tp/sched-tp.c
@@ -0,0 +1,244 @@
+/*
+ * SCHED_TP setup test.
+ *
+ * Copyright (C) Philippe Gerum <rpm@xenomai.org>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <limits.h>
+#include <memory.h>
+#include <time.h>
+#include <malloc.h>
+#include <unistd.h>
+#include <string.h>
+#include <signal.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <sched.h>
+#include <errno.h>
+#include <error.h>
+#include <sys/cobalt.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(sched_tp,
+		   SMOKEY_NOARGS,
+		   "Check the SCHED_TP scheduling policy"
+);
+
+static pthread_t threadA, threadB, threadC;
+
+static sem_t barrier;
+
+static const char ref_schedule[] =
+	"CCCCCCCCCCBBBBBAACCCCCCCCCCBBBBBAACCCCCCCCCCBBBBBAACCCCCCCCCC"
+	"BBBBBAACCCCCCCCCCBBBBBAACCCCCCCCCCBBBBBAACCCCCCCCCCBBBBBAA"
+	"CCCCCCCCCCBBBBBAACCCCCCCCCCBBBBBAACCCCCCCCCCBBBBBAACCCCCCCCCC"
+	"BBBBBAACCCCCCCCCCBBBBBAACCCCCCCCCCBBBBBAACCCCCCCC";
+
+static char schedule[sizeof(ref_schedule) + 8], *curr = schedule;
+
+static pthread_mutex_t lock = PTHREAD_MUTEX_INITIALIZER;
+
+static int overflow;
+
+static void *thread_body(void *arg)
+{
+	pthread_t me = pthread_self();
+	struct sched_param_ex param;
+	struct timespec ts;
+	cpu_set_t affinity;
+	int ret, part;
+
+	CPU_ZERO(&affinity);
+	CPU_SET(0, &affinity);
+	ret = sched_setaffinity(0, sizeof(affinity), &affinity);
+	if (ret)
+		error(1, errno, "sched_setaffinity");
+
+	part = (int)(long)arg;
+	param.sched_priority = 50 - part;
+	param.sched_tp_partition = part;
+	ret = pthread_setschedparam_ex(me, SCHED_TP, &param);
+	if (ret)
+		error(1, ret, "pthread_setschedparam_ex");
+
+	sem_wait(&barrier);
+	sem_post(&barrier);
+
+	for (;;) {
+		/*
+		 * The mutex is there in case the scheduler behaves in
+		 * a really weird way so that we don't write out of
+		 * bounds; otherwise no serialization should happen
+		 * due to this lock.
+		 */
+		pthread_mutex_lock(&lock);
+		if (curr >= schedule + sizeof(schedule)) {
+			pthread_mutex_unlock(&lock);
+			overflow = 1;
+			break;
+		}
+		*curr++ = 'A' + part;
+		pthread_mutex_unlock(&lock);
+		ts.tv_sec = 0;
+		ts.tv_nsec = 10500000;
+		clock_nanosleep(CLOCK_MONOTONIC, 0, &ts, NULL);
+	}
+
+	return NULL;
+}
+
+static void cleanup(void)
+{
+	pthread_cancel(threadC);
+	pthread_cancel(threadB);
+	pthread_cancel(threadA);
+	pthread_join(threadC, NULL);
+	pthread_join(threadB, NULL);
+	pthread_join(threadA, NULL);
+}
+
+static void __create_thread(pthread_t *tid, const char *name, int seq)
+{
+	struct sched_param param = { .sched_priority = 1 };
+	pthread_attr_t attr;
+	int ret;
+
+	pthread_attr_init(&attr);
+	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
+	pthread_attr_setschedparam(&attr, &param);
+	ret = pthread_create(tid, &attr, thread_body, (void *)(long)seq);
+	if (ret)
+		error(1, ret, "pthread_create");
+
+	pthread_attr_destroy(&attr);
+	pthread_setname_np(*tid, name);
+}
+
+#define create_thread(tid, n) __create_thread(&(tid), # tid, n)
+#define NR_WINDOWS  4
+
+static int run_sched_tp(struct smokey_test *t, int argc, char *const argv[])
+{
+	union sched_config *p;
+	int ret, n, policies;
+	size_t len;
+	char *s;
+
+	ret = cobalt_corectl(_CC_COBALT_GET_POLICIES, &policies, sizeof(policies));
+	if (ret || (policies & _CC_COBALT_SCHED_TP) == 0)
+		return -ENOSYS;
+	
+	/*
+	 * For a recurring global time frame of 400 ms, we define a TP
+	 * schedule as follows:
+	 *
+	 * - thread(s) assigned to partition #2 (tag C) shall be
+	 * allowed to run for 100 ms, when the next global time frame
+	 * begins.
+	 *
+	 * - thread(s) assigned to partition #1 (tag B) shall be
+	 * allowed to run for 50 ms, after the previous time slot
+	 * ends.
+	 *
+	 * - thread(s) assigned to partition #0 (tag A) shall be
+	 * allowed to run for 20 ms, after the previous time slot
+	 * ends.
+	 *
+	 * - when the previous time slot ends, no TP thread shall be
+	 * allowed to run until the global time frame ends (special
+	 * setting of ptid == -1), i.e. 230 ms.
+	 */
+	len = sched_tp_confsz(NR_WINDOWS);
+	p = malloc(len);
+	if (p == NULL)
+		error(1, ENOMEM, "malloc");
+
+	p->tp.op = sched_tp_install;
+	p->tp.nr_windows = NR_WINDOWS;
+	p->tp.windows[0].offset.tv_sec = 0;
+	p->tp.windows[0].offset.tv_nsec = 0;
+	p->tp.windows[0].duration.tv_sec = 0;
+	p->tp.windows[0].duration.tv_nsec = 100000000;
+	p->tp.windows[0].ptid = 2;
+	p->tp.windows[1].offset.tv_sec = 0;
+	p->tp.windows[1].offset.tv_nsec = 100000000;
+	p->tp.windows[1].duration.tv_sec = 0;
+	p->tp.windows[1].duration.tv_nsec = 50000000;
+	p->tp.windows[1].ptid = 1;
+	p->tp.windows[2].offset.tv_sec = 0;
+	p->tp.windows[2].offset.tv_nsec = 150000000;
+	p->tp.windows[2].duration.tv_sec = 0;
+	p->tp.windows[2].duration.tv_nsec = 20000000;
+	p->tp.windows[2].ptid = 0;
+	p->tp.windows[3].offset.tv_sec = 0;
+	p->tp.windows[3].offset.tv_nsec = 170000000;
+	p->tp.windows[3].duration.tv_sec = 0;
+	p->tp.windows[3].duration.tv_nsec = 230000000;
+	p->tp.windows[3].ptid = -1;
+
+ 	/* Assign the TP schedule to CPU #0 */
+	ret = sched_setconfig_np(0, SCHED_TP, p, len);
+	if (ret)
+		error(1, ret, "sched_setconfig_np(install)");
+
+	memset(p, 0xa5, len);
+
+	ret = sched_getconfig_np(0, SCHED_TP, p, &len);
+	if (ret)
+		error(1, ret, "sched_getconfig_np");
+
+	smokey_trace("check: %d windows", p->tp.nr_windows);
+	for (n = 0; n < 4; n++)
+		smokey_trace("[%d] offset = { %ld s, %ld ns }, duration = { %ld s, %ld ns }, ptid = %d",
+			     n,
+			     p->tp.windows[n].offset.tv_sec,
+			     p->tp.windows[n].offset.tv_nsec,
+			     p->tp.windows[n].duration.tv_sec,
+			     p->tp.windows[n].duration.tv_nsec,
+			     p->tp.windows[n].ptid);
+
+	sem_init(&barrier, 0, 0);
+	create_thread(threadA, 0);
+	create_thread(threadB, 1);
+	create_thread(threadC, 2);
+
+	/* Start the TP schedule. */
+	len = sched_tp_confsz(0);
+	p->tp.op = sched_tp_start;
+	ret = sched_setconfig_np(0, SCHED_TP, p, len);
+	if (ret)
+		error(1, ret, "sched_setconfig_np(start)");
+
+	sem_post(&barrier);
+	sleep(5);
+	cleanup();
+	sem_destroy(&barrier);
+	free(p);
+
+	if (smokey_on_vm)
+		return 0;
+
+	if (overflow) {
+		smokey_warning("schedule overflowed");
+		return -EPROTO;
+	}
+
+	/*
+	 * The first time window might be decreased for enough time to
+	 * skip an iteration due to lingering inits, and a few more
+	 * marks may be generated while we are busy stopping the
+	 * threads, so we look for a valid sub-sequence.
+	 */
+	s = strstr(ref_schedule, schedule);
+	if (s == NULL || s - ref_schedule > 1) {
+		smokey_warning("unexpected schedule:\n%s", schedule);
+		return -EPROTO;
+	}
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/setsched/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/setsched/Makefile.am
new file mode 100644
index 0000000..8c0a59b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/setsched/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libsetsched.a
+
+libsetsched_a_SOURCES = setsched.c
+
+libsetsched_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)		\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/setsched/setsched.c b/kernel/xenomai-v3.2.4/testsuite/smokey/setsched/setsched.c
new file mode 100644
index 0000000..fc348a8
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/setsched/setsched.c
@@ -0,0 +1,149 @@
+/*
+ * Scheduler live-adjustment test.
+ *
+ * Copyright (c) Siemens AG 2016
+ *
+ * Authors:
+ *  Jan Kiszka <jan.kiszka@siemens.com>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <stdbool.h>
+#include <pthread.h>
+#include <error.h>
+#include <semaphore.h>
+#include <unistd.h>
+#include <sys/syscall.h>
+#include <smokey/smokey.h>
+#include <sys/cobalt.h>
+
+smokey_test_plugin(setsched, SMOKEY_NOARGS,
+   "Validate correct application of scheduling parameters to running threads."
+);
+
+static pid_t thread_pid;
+
+static void __check_linux_schedparams(int expected_policy, int expected_prio,
+				      int line)
+{
+	struct sched_param linux_param;
+	int linux_policy;
+
+	linux_policy = syscall(SYS_sched_getscheduler, thread_pid);
+	if (smokey_check_status(syscall(SYS_sched_getparam, thread_pid,
+					&linux_param)))
+		pthread_exit((void *)(long)-EINVAL);
+
+	if (!smokey_assert(linux_policy == expected_policy) ||
+	    !smokey_assert(linux_param.sched_priority == expected_prio)) {
+		smokey_warning("called from line %d", line);
+		pthread_exit((void *)(long)-EINVAL);
+	}
+}
+
+#define check_linux_schedparams(pol, prio)	\
+	__check_linux_schedparams(pol, prio, __LINE__)
+
+static void __check_rt_schedparams(int expected_policy, int expected_prio,
+				   int line)
+{
+	struct sched_param cobalt_param;
+	int cobalt_policy;
+
+	if (smokey_check_status(pthread_getschedparam(pthread_self(),
+						      &cobalt_policy,
+						      &cobalt_param)))
+		pthread_exit((void *)(long)-EINVAL);
+
+	if (!smokey_assert(cobalt_policy == expected_policy) ||
+	    !smokey_assert(cobalt_param.sched_priority == expected_prio)) {
+		smokey_warning("called from line %d", line);
+		pthread_exit((void *)(long)-EINVAL);
+	}
+}
+
+#define check_rt_schedparams(pol, prio)	\
+	__check_rt_schedparams(pol, prio, __LINE__)
+
+static void *thread_body(void *arg)
+{
+	struct sched_param param;
+#ifdef CONFIG_XENO_LAZY_SETSCHED
+	struct cobalt_threadstat stats;
+	unsigned long long msw;
+#endif
+
+	thread_pid = syscall(SYS_gettid);
+
+	check_rt_schedparams(SCHED_FIFO, 1);
+	check_linux_schedparams(SCHED_FIFO, 1);
+
+	cobalt_thread_harden();
+
+#ifdef CONFIG_XENO_LAZY_SETSCHED
+	if (smokey_check_status(cobalt_thread_stat(thread_pid, &stats)))
+		pthread_exit((void *)(long)-EINVAL);
+	msw = stats.msw;
+#endif
+
+	param.sched_priority = 2;
+	if (smokey_check_status(pthread_setschedparam(pthread_self(),
+						      SCHED_FIFO, &param)))
+		pthread_exit((void *)(long)-EINVAL);
+
+	check_rt_schedparams(SCHED_FIFO, 2);
+
+#ifdef CONFIG_XENO_LAZY_SETSCHED
+	if (smokey_check_status(cobalt_thread_stat(thread_pid, &stats)) ||
+	    !smokey_assert(stats.msw == msw))
+		pthread_exit((void *)(long)-EINVAL);
+#endif
+
+	check_linux_schedparams(SCHED_FIFO, 2);
+
+	cobalt_thread_harden();
+
+#ifdef CONFIG_XENO_LAZY_SETSCHED
+	if (smokey_check_status(cobalt_thread_stat(thread_pid, &stats)))
+		pthread_exit((void *)(long)-EINVAL);
+	msw = stats.msw;
+#endif
+
+	if (smokey_check_status(pthread_setschedprio(pthread_self(), 3)))
+		pthread_exit((void *)(long)-EINVAL);
+
+	check_rt_schedparams(SCHED_FIFO, 3);
+
+#ifdef CONFIG_XENO_LAZY_SETSCHED
+	if (smokey_check_status(cobalt_thread_stat(thread_pid, &stats)) ||
+	    !smokey_assert(stats.msw == msw))
+		pthread_exit((void *)(long)-EINVAL);
+#endif
+
+	check_linux_schedparams(SCHED_FIFO, 3);
+
+	return (void *)0L;
+}
+
+static int run_setsched(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct sched_param param;
+	pthread_attr_t attr;
+	pthread_t thread;
+	void *retval;
+	int ret;
+
+	pthread_attr_init(&attr);
+	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
+	param.sched_priority = 1;
+	pthread_attr_setschedparam(&attr, &param);
+	ret = pthread_create(&thread, &attr, thread_body, NULL);
+	if (ret)
+		error(1, ret, "pthread_create");
+
+	pthread_join(thread, &retval);
+
+	return (long)retval;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/Makefile.am
new file mode 100644
index 0000000..06b976d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libsigdebug.a
+
+libsigdebug_a_SOURCES = sigdebug.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libsigdebug_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/sigdebug.c b/kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/sigdebug.c
new file mode 100644
index 0000000..9cb5162
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/sigdebug/sigdebug.c
@@ -0,0 +1,302 @@
+/*
+ * Functional testing of unwanted domain switch debugging mechanism.
+ *
+ * Copyright (C) Siemens AG, 2012-2019
+ *
+ * Authors:
+ *  Jan Kiszka <jan.kiszka@siemens.com>
+ *
+ * Released under the terms of GPLv2.
+ */
+
+#include <unistd.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <stdbool.h>
+#include <string.h>
+#include <signal.h>
+#include <fcntl.h>
+#include <sys/mman.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <asm/unistd.h>
+#include <sys/cobalt.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(sigdebug,
+		   SMOKEY_ARGLIST(
+			   SMOKEY_BOOL(no_watchdog),
+		   ),
+		   "Check SIGDEBUG reporting."
+);
+
+unsigned int expected_reason;
+bool sigdebug_received;
+pthread_mutex_t prio_invert;
+int corectl_debug;
+sem_t send_signal;
+char *mem;
+FILE *wd;
+
+static void setup_checkdebug(unsigned int reason)
+{
+	sigdebug_received = false;
+	expected_reason = reason;
+}
+
+static void check_inner(const char *fn, int line, const char *msg,
+			int status, int expected)
+{
+	if (status == expected)
+		return;
+
+	pthread_setmode_np(PTHREAD_WARNSW, 0, NULL);
+	rt_print_flush_buffers();
+	fprintf(stderr, "FAILURE %s:%d: %s returned %d instead of %d - %s\n",
+		fn, line, msg, status, expected, strerror(status));
+	exit(EXIT_FAILURE);
+}
+
+static void check_sigdebug_inner(const char *fn, int line, const char *reason)
+{
+	if (sigdebug_received)
+		return;
+
+	pthread_setmode_np(PTHREAD_WARNSW, 0, NULL);
+	rt_print_flush_buffers();
+	fprintf(stderr, "FAILURE %s:%d: no %s received\n", fn, line, reason);
+	exit(EXIT_FAILURE);
+}
+
+#define check(msg, status, expected) ({					\
+	int __status = status;						\
+	check_inner(__func__, __LINE__, msg, __status, expected);	\
+	__status;							\
+})
+
+#define check_no_error(msg, status) ({					\
+	int __status = status;						\
+	check_inner(__func__, __LINE__, msg,				\
+		    __status < 0 ? errno : __status, 0);		\
+	__status;							\
+})
+
+#define check_sigdebug_received(reason) do {				\
+	const char *__reason = reason;					\
+	check_sigdebug_inner(__func__, __LINE__, __reason);		\
+} while (0)
+
+static void *rt_thread_body(void *cookie)
+{
+	struct timespec now, delay = {.tv_sec = 0, .tv_nsec = 10000000LL};
+	unsigned long long end;
+	int err;
+
+	err = pthread_setname_np(pthread_self(), "test");
+	check_no_error("pthread_setname_np", err);
+	err = pthread_setmode_np(0, PTHREAD_WARNSW, NULL);
+	check_no_error("pthread_setmode_np", err);
+
+	smokey_trace("syscall");
+	setup_checkdebug(SIGDEBUG_MIGRATE_SYSCALL);
+	syscall(__NR_gettid);
+	check_sigdebug_received("SIGDEBUG_MIGRATE_SYSCALL");
+
+	smokey_trace("signal");
+	setup_checkdebug(SIGDEBUG_MIGRATE_SIGNAL);
+	err = sem_post(&send_signal);
+	check_no_error("sem_post", err);
+	err = clock_nanosleep(CLOCK_MONOTONIC, 0, &delay, NULL);
+	check("clock_nanosleep", err, EINTR);
+	check_sigdebug_received("SIGDEBUG_MIGRATE_SIGNAL");
+
+	smokey_trace("relaxed mutex owner");
+	if (corectl_debug & _CC_COBALT_DEBUG_MUTEX_RELAXED) {
+		setup_checkdebug(SIGDEBUG_MIGRATE_PRIOINV);
+		err = pthread_mutex_lock(&prio_invert);
+		check_no_error("pthread_mutex_lock", err);
+		check_sigdebug_received("SIGDEBUG_MIGRATE_PRIOINV");
+		err = pthread_mutex_unlock(&prio_invert);
+		check_no_error("pthread_mutex_unlock", err);
+	} else {
+		smokey_note("sigdebug \"SIGDEBUG_MIGRATE_PRIOINV\" skipped "
+			    "(no kernel support)");
+	}
+
+	smokey_trace("page fault");
+	setup_checkdebug(SIGDEBUG_MIGRATE_FAULT);
+	delay.tv_nsec = 0;
+	err = clock_nanosleep(CLOCK_MONOTONIC, 0, &delay, NULL);
+	check_no_error("clock_nanosleep", err);
+	*mem ^= 0xFF;
+	check_sigdebug_received("SIGDEBUG_MIGRATE_FAULT");
+
+	if (wd) {
+		smokey_trace("watchdog");
+		rt_print_flush_buffers();
+		setup_checkdebug(SIGDEBUG_WATCHDOG);
+		clock_gettime(CLOCK_MONOTONIC, &now);
+		end = now.tv_sec * 1000000000ULL + now.tv_nsec + 2100000000ULL;
+		err = clock_nanosleep(CLOCK_MONOTONIC, 0, &delay, NULL);
+		check_no_error("clock_nanosleep", err);
+		do
+			clock_gettime(CLOCK_MONOTONIC, &now);
+		while (now.tv_sec * 1000000000ULL + now.tv_nsec < end &&
+			 !sigdebug_received);
+		check_sigdebug_received("SIGDEBUG_WATCHDOG");
+	} else
+		smokey_note("watchdog not tested");
+
+	smokey_trace("lock break");
+	setup_checkdebug(SIGDEBUG_LOCK_BREAK);
+	err = pthread_setmode_np(0, PTHREAD_LOCK_SCHED |
+				    PTHREAD_DISABLE_LOCKBREAK, NULL);
+	check_no_error("pthread_setmode_np", err);
+	delay.tv_nsec = 1000000LL;
+	err = clock_nanosleep(CLOCK_MONOTONIC, 0, &delay, NULL);
+	check("clock_nanosleep", err, EINTR);
+	check_sigdebug_received("SIGDEBUG_LOCK_BREAK");
+
+	return NULL;
+}
+
+static void sigdebug_handler(int sig, siginfo_t *si, void *context)
+{
+	unsigned int reason = sigdebug_reason(si);
+
+	if (reason != expected_reason) {
+		rt_print_flush_buffers();
+		fprintf(stderr, "FAILURE: sigdebug_handler expected reason %d,"
+			" received %d\n", expected_reason, reason);
+		exit(EXIT_FAILURE);
+	}
+	sigdebug_received = true;
+}
+
+static void dummy_handler(int sig, siginfo_t *si, void *context)
+{
+}
+
+static void fault_handler(int sig)
+{
+	mprotect(mem, 1, PROT_WRITE);
+}
+
+static int run_sigdebug(struct smokey_test *t, int argc, char *const argv[])
+{
+	char tempname[] = "/tmp/sigdebug-XXXXXX";
+	struct sched_param params = {.sched_priority = 1};
+	pthread_t rt_thread;
+	pthread_attr_t attr;
+	pthread_mutexattr_t mutex_attr;
+	struct timespec delay = {.tv_sec = 0, .tv_nsec = 20000000ULL};
+	int err, wdog_delay, tmp_fd;
+	struct sigaction sa;
+
+	err = cobalt_corectl(_CC_COBALT_GET_WATCHDOG, &wdog_delay, sizeof(wdog_delay));
+	if (err || wdog_delay == 0)
+		return -ENOSYS;
+
+	err = cobalt_corectl(_CC_COBALT_GET_DEBUG, &corectl_debug,
+			     sizeof(corectl_debug));
+	if (err)
+		return -ENOSYS;
+
+	smokey_parse_args(t, argc, argv);
+	if (!SMOKEY_ARG_ISSET(sigdebug, no_watchdog) ||
+	    !SMOKEY_ARG_BOOL(sigdebug, no_watchdog)) {
+		wd = fopen("/sys/module/xenomai/parameters/watchdog_timeout",
+			   "w+");
+		if (wd) {
+			err = fprintf(wd, "2");
+			check("set watchdog", err, 1);
+			fflush(wd);
+		}
+	}
+
+	sigemptyset(&sa.sa_mask);
+	sa.sa_sigaction = sigdebug_handler;
+	sa.sa_flags = SA_SIGINFO;
+	sigaction(SIGDEBUG, &sa, NULL);
+
+	sa.sa_sigaction = dummy_handler;
+	sigaction(SIGUSR1, &sa, NULL);
+
+	sa.sa_handler = fault_handler;
+	sigaction(SIGSEGV, &sa, NULL);
+
+	errno = 0;
+	tmp_fd = mkstemp(tempname);
+	check_no_error("mkstemp", -errno);
+	unlink(tempname);
+	check_no_error("unlink", -errno);
+	mem = mmap(NULL, 1, PROT_READ, MAP_PRIVATE, tmp_fd, 0);
+	check_no_error("mmap", -errno);
+	err = write(tmp_fd, "X", 1);
+	check("write", err, 1);
+
+	err = pthread_setschedparam(pthread_self(), SCHED_FIFO, &params);
+	check_no_error("pthread_setschedparam", err);
+
+	err = pthread_mutexattr_init(&mutex_attr);
+	check_no_error("pthread_mutexattr_init", err);
+	err = pthread_mutexattr_setprotocol(&mutex_attr, PTHREAD_PRIO_INHERIT);
+	check_no_error("pthread_mutexattr_setprotocol", err);
+	err = pthread_mutex_init(&prio_invert, &mutex_attr);
+	check_no_error("pthread_mutex_init", err);
+
+	err = pthread_mutex_lock(&prio_invert);
+	check_no_error("pthread_mutex_lock", err);
+
+	err = sem_init(&send_signal, 0, 0);
+	check_no_error("sem_init", err);
+
+	err = pthread_attr_init(&attr);
+	check_no_error("pthread_attr_init", err);
+	err = pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+	check_no_error("pthread_attr_setinheritsched", err);
+	err = pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
+	check_no_error("pthread_attr_setschedpolicy", err);
+	params.sched_priority = 2;
+	err = pthread_attr_setschedparam(&attr, &params);
+	check_no_error("pthread_attr_setschedparam", err);
+
+	smokey_trace("mlockall");
+	munlockall();
+	setup_checkdebug(SIGDEBUG_NOMLOCK);
+	err = pthread_create(&rt_thread, &attr, rt_thread_body, NULL);
+	/* Note: EINTR is against the spec, but that's OK in this scenario. */
+	check("pthread_create", err, EINTR);
+	check_sigdebug_received("SIGDEBUG_NOMLOCK");
+	mlockall(MCL_CURRENT | MCL_FUTURE);
+
+	err = pthread_create(&rt_thread, &attr, rt_thread_body, NULL);
+	check_no_error("pthread_create", err);
+
+	err = sem_wait(&send_signal);
+	check_no_error("sem_wait", err);
+	err = __STD(pthread_kill(rt_thread, SIGUSR1));
+	check_no_error("pthread_kill", err);
+
+	__STD(nanosleep(&delay, NULL));
+
+	err = pthread_mutex_unlock(&prio_invert);
+	check_no_error("pthread_mutex_unlock", err);
+
+	err = pthread_join(rt_thread, NULL);
+	check_no_error("pthread_join", err);
+
+	err = pthread_mutex_destroy(&prio_invert);
+	check_no_error("pthread_mutex_destroy", err);
+
+	err = sem_destroy(&send_signal);
+	check_no_error("sem_destroy", err);
+
+	close(tmp_fd);
+	if (wd) {
+		fprintf(wd, "%d", wdog_delay);
+		fclose(wd);
+	}
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/Makefile.am
new file mode 100644
index 0000000..27c3336
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libtimerfd.a
+
+libtimerfd_a_SOURCES = timerfd.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libtimerfd_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/timerfd.c b/kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/timerfd.c
new file mode 100644
index 0000000..1d58133
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/timerfd/timerfd.c
@@ -0,0 +1,389 @@
+/*
+ * Copyright (C) 2013 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <stdio.h>
+#include <unistd.h>
+#include <errno.h>
+#include <assert.h>
+#include <fcntl.h>
+#include <sys/timerfd.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(timerfd,
+		   SMOKEY_NOARGS,
+		   "Check timerfd support."
+);
+
+#ifndef TFD_NONBLOCK
+#define TFD_NONBLOCK O_NONBLOCK
+#endif
+
+static int timerfd_basic_check(void)
+{
+	unsigned long long ticks;
+	struct itimerspec its;
+	int fd, i, ret;
+	
+	fd = smokey_check_errno(timerfd_create(CLOCK_MONOTONIC, 0));
+	if (fd < 0)
+		return fd;
+
+	its.it_value.tv_sec = 0;
+	its.it_value.tv_nsec = 100000000;
+	its.it_interval.tv_sec = 0;
+	its.it_interval.tv_nsec = 100000000;
+
+	ret = smokey_check_errno(timerfd_settime(fd, 0, &its, NULL));
+	if (ret)
+		return ret;
+	
+	for (i = 0; i < 10; i++) {
+		ret = smokey_check_errno(read(fd, &ticks, sizeof(ticks)));
+		if (ret < 0)
+			return ret;
+		if (!smokey_assert(ret == 8))
+			return -EINVAL;
+		smokey_trace("%Ld direct read ticks", ticks);
+		if (!smokey_assert(ticks >= 1))
+			return -EINVAL;
+	}
+	
+	return smokey_check_errno(close(fd));
+}
+
+static int timerfd_select_check(void)
+{
+	unsigned long long ticks;
+	fd_set tmp_inset, inset;
+	struct itimerspec its;
+	int fd, i, ret;
+	
+	fd = smokey_check_errno(timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK));
+	if (fd < 0)
+		return fd;
+
+	FD_ZERO(&inset);
+	FD_SET(fd, &inset); 
+
+	its.it_value.tv_sec = 0;
+	its.it_value.tv_nsec = 100000000;
+	its.it_interval.tv_sec = 0;
+	its.it_interval.tv_nsec = 100000000;
+
+	ret = smokey_check_errno(timerfd_settime(fd, 0, &its, NULL));
+	if (ret)
+		return ret;
+
+	ret = read(fd, &ticks, sizeof(ticks));
+	if (!smokey_assert(ret == -1 && errno == EAGAIN))
+		return -EINVAL;
+	
+	for (i = 0; i < 10; i++) {
+		tmp_inset = inset;
+		ret = smokey_check_errno(select(fd + 1, &tmp_inset, NULL, NULL, NULL));
+		if (ret < 0)
+			return ret;
+		ret = smokey_check_errno(read(fd, &ticks, sizeof(ticks)));
+		if (ret < 0)
+			return ret;
+		smokey_assert(ret == 8);
+		smokey_trace("%Ld select+read ticks", ticks);
+		if (!smokey_assert(ticks >= 1))
+			return -EINVAL;
+	}
+	
+	return smokey_check_errno(close(fd));
+}
+
+static int timerfd_basic_overruns_check(void)
+{
+	unsigned long long ticks;
+	struct itimerspec its;
+	int fd, ret, i;
+	
+	fd = smokey_check_errno(timerfd_create(CLOCK_MONOTONIC, 0));
+	if (fd < 0)
+		return fd;
+
+	its.it_value.tv_sec = 0;
+	its.it_value.tv_nsec = 100000000;
+	its.it_interval.tv_sec = 0;
+	its.it_interval.tv_nsec = 100000000;
+
+	ret = smokey_check_errno(timerfd_settime(fd, 0, &its, NULL));
+	if (ret)
+		return ret;
+	
+	for (i = 0; i < 3; i++) {
+		sleep(1);
+		ret = smokey_check_errno(read(fd, &ticks, sizeof(ticks)));
+		if (ret < 0)
+			return ret;
+		smokey_assert(ret == 8);
+		smokey_trace("%Ld direct read ticks", ticks);
+		if (!smokey_assert(ticks >= 10))
+			return -EINVAL;
+	}
+	
+	return smokey_check_errno(close(fd));
+}
+
+static int timerfd_select_overruns_check(void)
+{
+	unsigned long long ticks;
+	fd_set tmp_inset, inset;
+	struct itimerspec its;
+	int fd, ret, i;
+	
+	fd = smokey_check_errno(timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK));
+	if (fd < 0)
+		return fd;
+
+	FD_ZERO(&inset);
+	FD_SET(fd, &inset);
+
+	its.it_value.tv_sec = 0;
+	its.it_value.tv_nsec = 100000000;
+	its.it_interval.tv_sec = 0;
+	its.it_interval.tv_nsec = 100000000;
+
+	ret = smokey_check_errno(timerfd_settime(fd, 0, &its, NULL));
+	if (ret)
+		return ret;
+
+	ret = read(fd, &ticks, sizeof(ticks));
+	if (!smokey_assert(ret == -1 && errno == EAGAIN))
+		return -EINVAL;
+	
+	for (i = 0; i < 3; i++) {
+		tmp_inset = inset;
+		sleep(1);
+		ret = smokey_check_errno(select(fd + 1, &tmp_inset, NULL, NULL, NULL));
+		if (ret < 0)
+			return ret;
+		ret = smokey_check_errno(read(fd, &ticks, sizeof(ticks)));
+		if (ret < 0)
+			return ret;
+		smokey_assert(ret == 8);
+		smokey_trace("%Ld select+read ticks", ticks);
+		if (!smokey_assert(ticks >= 10))
+			return -EINVAL;
+	}
+	
+	return smokey_check_errno(close(fd));
+}
+
+static int timerfd_select_overruns2_check(void)
+{
+	unsigned long long ticks;
+	fd_set tmp_inset, inset;
+	struct itimerspec its;
+	int fd, ret, i;
+	
+	fd = smokey_check_errno(timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK));
+	if (fd < 0)
+		return fd;
+
+	FD_ZERO(&inset);
+	FD_SET(fd, &inset);
+
+	its.it_value.tv_sec = 0;
+	its.it_value.tv_nsec = 100000000;
+	its.it_interval.tv_sec = 0;
+	its.it_interval.tv_nsec = 100000000;
+
+	ret = smokey_check_errno(timerfd_settime(fd, 0, &its, NULL));
+	if (ret)
+		return ret;
+
+	ret = read(fd, &ticks, sizeof(ticks));
+	if (!smokey_assert(ret == -1 && errno == EAGAIN))
+		return -EINVAL;
+	
+	for (i = 0; i < 3; i++) {
+		tmp_inset = inset;
+		ret = smokey_check_errno(select(fd + 1, &tmp_inset, NULL, NULL, NULL));
+		if (ret < 0)
+			return ret;
+		sleep(1);
+		ret = smokey_check_errno(read(fd, &ticks, sizeof(ticks)));
+		if (ret < 0)
+			return ret;
+		smokey_assert(ret == 8);
+		smokey_trace("%Ld select+read ticks", ticks);
+		if (!smokey_assert(ticks >= 11))
+			return -EINVAL;
+	}
+	
+	return smokey_check_errno(close(fd));
+}
+
+static int timerfd_select_overruns_before_check(void)
+{
+	unsigned long long ticks;
+	fd_set tmp_inset, inset;
+	struct itimerspec its;
+	int fd, ret, i;
+	
+	fd = smokey_check_errno(timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK));
+	if (fd < 0)
+		return fd;
+
+	FD_ZERO(&inset);
+	FD_SET(fd, &inset);
+
+	its.it_value.tv_sec = 0;
+	its.it_value.tv_nsec = 100000000;
+	its.it_interval.tv_sec = 0;
+	its.it_interval.tv_nsec = 100000000;
+
+	ret = smokey_check_errno(timerfd_settime(fd, 0, &its, NULL));
+	if (ret)
+		return ret;
+
+	ret = read(fd, &ticks, sizeof(ticks));
+	if (!smokey_assert(ret == -1 && errno == EAGAIN))
+		return -EINVAL;
+
+	sleep(1);
+
+	for (i = 0; i < 3; i++) {
+		tmp_inset = inset;
+		ret = smokey_check_errno(select(fd + 1, &tmp_inset, NULL, NULL, NULL));
+		if (ret < 0)
+			return ret;
+		ret = smokey_check_errno(read(fd, &ticks, sizeof(ticks)));
+		if (ret < 0)
+			return ret;
+		smokey_assert(ret == 8);
+		smokey_trace("%Ld select+read ticks", ticks);
+		if (!smokey_assert(ticks >= 10))
+			return -EINVAL;
+		sleep(1);
+	}
+	
+	return smokey_check_errno(close(fd));
+}
+
+static ssize_t
+timed_read(int fd, void *buf, size_t len, struct timespec *ts)
+{
+	unsigned long long ticks;
+	struct itimerspec its;
+	int tfd, ret;
+	ssize_t err;
+	
+	tfd = smokey_check_errno(timerfd_create(CLOCK_MONOTONIC, TFD_NONBLOCK));
+	if (tfd < 0)
+		return tfd;
+	
+	its.it_value = *ts;
+	its.it_interval.tv_sec = 0;
+	its.it_interval.tv_nsec = 0;
+	
+	ret = smokey_check_errno(timerfd_settime(tfd, TFD_WAKEUP, &its, NULL));
+	if (ret)
+		return ret;
+	
+	err = read(fd, buf, len);
+	if (err < 0)
+		err = -errno;
+
+	if (err == -EINTR) {
+		err = read(tfd, &ticks, sizeof(ticks));
+		if (err > 0)
+			err = -ETIMEDOUT;
+		else
+			err = -EINTR;
+	}
+	
+	ret = smokey_check_errno(close(tfd));
+	if (ret)
+		return ret;
+
+	if (err >= 0)
+		return err;
+	
+	errno = -err;
+	
+	return -1;
+}
+
+static int timerfd_unblock_check(void)
+{
+	unsigned long long ticks;
+	struct itimerspec its;
+	int fd, ret;
+	
+	fd = smokey_check_errno(timerfd_create(CLOCK_MONOTONIC, 0));
+	if (fd < 0)
+		return fd;
+	
+	its.it_value.tv_sec = 5;
+	its.it_value.tv_nsec = 0;
+	its.it_interval.tv_sec = 0;
+	its.it_interval.tv_nsec = 0;
+	
+	ret = smokey_check_errno(timerfd_settime(fd, 0, &its, NULL));
+	if (ret)
+		return ret;
+
+	its.it_value.tv_sec = 0;
+	its.it_value.tv_nsec = 100000000;
+
+	if (!smokey_assert(timed_read(fd, &ticks, sizeof(ticks), &its.it_value) < 0 && 
+			   errno == ETIMEDOUT))
+		return -EINVAL;
+
+	return smokey_check_errno(close(fd));
+}
+
+static int run_timerfd(struct smokey_test *t, int argc, char *const argv[])
+{
+	int ret;
+	
+	ret = timerfd_basic_check();
+	if (ret)
+		return ret;
+	
+	timerfd_select_check();
+	if (ret)
+		return ret;
+
+	timerfd_basic_overruns_check();
+	if (ret)
+		return ret;
+
+	timerfd_select_overruns_check();
+	if (ret)
+		return ret;
+
+	timerfd_select_overruns2_check();
+	if (ret)
+		return ret;
+
+	timerfd_select_overruns_before_check();
+	if (ret)
+		return ret;
+
+	return timerfd_unblock_check();
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/tsc/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/tsc/Makefile.am
new file mode 100644
index 0000000..dbe4528
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/tsc/Makefile.am
@@ -0,0 +1,8 @@
+
+noinst_LIBRARIES = libtsc.a
+
+libtsc_a_SOURCES = tsc.c
+
+libtsc_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/tsc/tsc.c b/kernel/xenomai-v3.2.4/testsuite/smokey/tsc/tsc.c
new file mode 100644
index 0000000..f9ae82f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/tsc/tsc.c
@@ -0,0 +1,186 @@
+/*
+ * Copyright (C) 2011-2012,2015 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+
+#include <unistd.h>
+#include <sched.h>
+#include <sys/mman.h>
+
+#include <smokey/smokey.h>
+
+#include <cobalt/sys/cobalt.h>
+
+#define DURATION 10000000
+
+#if CONFIG_SMP
+#define smp_sched_setaffinity(pid,len,mask) sched_setaffinity(pid,len,mask)
+#define smp_sched_getaffinity(pid,len,mask) sched_getaffinity(pid,len,mask)
+#else /* !CONFIG_SMP */
+#define smp_sched_setaffinity(pid,len,mask) 0
+#define smp_sched_getaffinity(pid,len,mask) 0
+#endif /* !CONFIG_SMP */
+
+smokey_test_plugin(tsc,
+		SMOKEY_ARGLIST(
+			SMOKEY_INT(duration),
+			),
+		"Check that emulated tsc is monotonic"
+);
+
+
+static inline unsigned long long timer_get_tsc(void)
+{
+	/*
+	 * The additional function call clockobj_get_tsc() makes a big
+	 * difference on low end
+	 */
+	return cobalt_read_tsc();
+}
+
+static inline unsigned long long timer_tsc2ns(unsigned long long tsc)
+{
+	return clockobj_tsc_to_ns(tsc);
+}
+
+static inline unsigned long long timer_ns2tsc(unsigned long long ns)
+{
+	return clockobj_ns_to_tsc(ns);
+}
+
+static int run_tsc(struct smokey_test *t, int argc, char *const argv[])
+{
+	unsigned long long runtime, start, jump, tsc1, tsc2;
+	unsigned long long one_sec_tsc;
+	unsigned long long sum, g_sum;
+	unsigned long long loops, g_loops;
+	unsigned dt, min, max, g_min, g_max;
+	unsigned long long secs;
+	unsigned i, margin;
+
+#if CONFIG_SMP
+	/* Pin the test to the CPU it is currently running on */
+	cpu_set_t mask;
+
+	if (smp_sched_getaffinity(0, sizeof(mask), &mask) == 0)
+		for (i = 0; i < sysconf(_SC_NPROCESSORS_ONLN); i++)
+			if (CPU_ISSET(i, &mask)) {
+				CPU_ZERO(&mask);
+				CPU_SET(i, &mask);
+
+				smp_sched_setaffinity(0, sizeof(mask), &mask);
+				smokey_trace("Pinned to cpu %d", i);
+				break;
+			}
+#endif
+
+	g_min = ~0U;
+	g_max = 0;
+	g_sum = 0;
+	g_loops = 0;
+
+	smokey_parse_args(t, argc, argv);
+
+	one_sec_tsc = timer_ns2tsc(ONE_BILLION);
+
+	runtime = timer_get_tsc();
+	margin = timer_tsc2ns(2000);
+	if (margin < 80)
+		margin = 80;
+
+	if (SMOKEY_ARG_ISSET(tsc, duration)) {
+		secs = SMOKEY_ARG_INT(tsc, duration);
+		min = (secs + 59) / 60;
+		secs = min * 60;
+	} else
+		secs = 15;
+	min = secs / 60;
+	smokey_trace("Checking tsc for %u minute(s)", min);
+
+	for(i = 0; i < secs; i++) {
+		min = ~0U;
+		max = 0;
+		sum = 0;
+		loops = 0;
+		tsc2 = start = timer_get_tsc();
+		do {
+			tsc1 = timer_get_tsc();
+			if (tsc1 < tsc2) {
+				fprintf(stderr, "%016Lx -> %016Lx\n",
+					tsc2, tsc1);
+				goto err1;
+			}
+			tsc2 = timer_get_tsc();
+			if (tsc2 < tsc1) {
+				fprintf(stderr, "%016Lx -> %016Lx\n",
+					tsc1, tsc2);
+				goto err2;
+			}
+
+			dt = tsc2 - tsc1;
+
+			if (dt > margin)
+				continue;
+
+			if (dt < min)
+				min = dt;
+			if (dt > max)
+				max = dt;
+			sum += dt;
+			++loops;
+		} while (tsc2 - start < one_sec_tsc);
+
+		smokey_trace("min: %u, max: %u, avg: %g",
+			min, max, (double)sum / loops);
+
+		if (min < g_min)
+			g_min = min;
+		if (max > g_max)
+			g_max = max;
+		g_sum += sum;
+		g_loops += loops;
+	}
+
+	smokey_trace("min: %u, max: %u, avg: %g -> %g us",
+		g_min, g_max, (double)g_sum / g_loops,
+		(double)timer_tsc2ns(g_sum) / (1000 * g_loops));
+
+	return EXIT_SUCCESS;
+
+  err1:
+	runtime = tsc2 - runtime;
+	jump = tsc2 - tsc1;
+	goto display;
+  err2:
+	runtime = tsc1 - runtime;
+	jump = tsc1 - tsc2;
+
+  display:
+	fprintf(stderr, "tsc not monotonic after %Lu ticks, ",
+		runtime);
+	fprintf(stderr, "jumped back %Lu tick\n", jump);
+
+	return EXIT_FAILURE;
+
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/Makefile.am
new file mode 100644
index 0000000..250ef08
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/Makefile.am
@@ -0,0 +1,9 @@
+
+noinst_LIBRARIES = libvdso-access.a
+
+libvdso_access_a_SOURCES = vdso-access.c
+
+libvdso_access_a_CPPFLAGS = 	\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)		\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/vdso-access.c b/kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/vdso-access.c
new file mode 100644
index 0000000..42c1b28
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/vdso-access/vdso-access.c
@@ -0,0 +1,31 @@
+/*
+ * VDSO feature set testcase
+ * by Wolfgang Mauerer <wolfgang.mauerer@siemens.com>
+ */
+#include <stdio.h>
+#include <time.h>
+#include <boilerplate/atomic.h>
+#include <cobalt/uapi/kernel/vdso.h>
+#include <smokey/smokey.h>
+
+smokey_test_plugin(vdso_access,
+		   SMOKEY_NOARGS,
+		   "Check VDSO access."
+);
+
+extern void *cobalt_umm_shared;
+
+extern struct xnvdso *cobalt_vdso;
+
+int run_vdso_access(struct smokey_test *t, int argc, char *const argv[])
+{
+	if (cobalt_umm_shared == NULL) {
+		warning("could not determine position of the VDSO segment");
+		return 1;
+	}
+
+	smokey_trace("VDSO: features detected: %llx",
+		     (long long)cobalt_vdso->features);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/xddp/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/xddp/Makefile.am
new file mode 100644
index 0000000..4f53525
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/xddp/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = libxddp.a
+
+libxddp_a_SOURCES = xddp.c
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+libxddp_a_CPPFLAGS = 		\
+	@XENO_USER_CFLAGS@	\
+	-I$(top_srcdir)/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/xddp/xddp.c b/kernel/xenomai-v3.2.4/testsuite/smokey/xddp/xddp.c
new file mode 100644
index 0000000..a311854
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/xddp/xddp.c
@@ -0,0 +1,264 @@
+/*
+ * RTIPC/XDDP test.
+ *
+ * Copyright (C) Philippe Gerum <rpm@xenomai.org>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <signal.h>
+#include <string.h>
+#include <semaphore.h>
+#include <pthread.h>
+#include <fcntl.h>
+#include <errno.h>
+#include <smokey/smokey.h>
+#include <rtdm/ipc.h>
+
+smokey_test_plugin(xddp,
+		   SMOKEY_NOARGS,
+		   "Check RTIPC/XDDP protocol."
+);
+
+static pthread_t rt1, rt2, nrt;
+
+static sem_t semsync;
+
+#define XDDP_PORT_LABEL  "xddp-smokey"
+
+static void fail(const char *reason)
+{
+	perror(reason);
+	exit(EXIT_FAILURE);
+}
+
+static void *realtime_thread1(void *arg)
+{
+	struct rtipc_port_label plabel;
+	struct sockaddr_ipc saddr;
+	long control = 0, data;
+	fd_set set;
+	int ret, s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP);
+	if (s < 0) {
+		perror("socket");
+		exit(EXIT_FAILURE);
+	}
+
+	strcpy(plabel.label, XDDP_PORT_LABEL);
+	ret = setsockopt(s, SOL_XDDP, XDDP_LABEL, &plabel, sizeof(plabel));
+	if (ret)
+		fail("setsockopt");
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = -1;
+	ret = bind(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("bind");
+
+	FD_ZERO(&set);
+	FD_SET(s, &set);
+	sem_post(&semsync); /* unleash client RT thread */
+
+	for (;;) {
+		control++;
+		ret = select(s + 1, &set, NULL, NULL, NULL);
+		if (ret != 1 || !FD_ISSET(s, &set))
+			fail("select");
+
+		/*
+		 * We can't race with any other reader in this setup,
+		 * so recvfrom() shall confirm the select() result.
+		 */
+		ret = recvfrom(s, &data, sizeof(data), MSG_DONTWAIT, NULL, 0);
+		if (ret != sizeof(data)) {
+			close(s);
+			fail("recvfrom");
+		}
+
+		if (data != control) {
+			close(s);
+			smokey_note("data does not match control value");
+			errno = -EINVAL;
+			fail("recvfrom");
+		}
+
+		smokey_trace("%s: %ld relayed by peer", __func__, data);
+	}
+
+	return NULL;
+}
+
+static void sem_sync(sem_t *sem)
+{
+	int ret;
+
+	for (;;) {
+		ret = sem_wait(sem);
+		if (ret == 0)
+			return;
+		if (errno != EINTR)
+			fail("sem_wait");
+	}
+}
+
+static void *realtime_thread2(void *arg)
+{
+	struct rtipc_port_label plabel;
+	struct sockaddr_ipc saddr;
+	int ret, s, loops = 30;
+	struct timespec ts;
+	struct timeval tv;
+	socklen_t addrlen;
+	long data = 0;
+	fd_set set;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP);
+	if (s < 0) {
+		perror("socket");
+		exit(EXIT_FAILURE);
+	}
+
+	tv.tv_sec = 1;
+	tv.tv_usec = 0;
+	ret = setsockopt(s, SOL_SOCKET, SO_RCVTIMEO,
+			 &tv, sizeof(tv));
+	if (ret)
+		fail("setsockopt");
+
+	strcpy(plabel.label, XDDP_PORT_LABEL);
+	ret = setsockopt(s, SOL_XDDP, XDDP_LABEL,
+			 &plabel, sizeof(plabel));
+	if (ret)
+		fail("setsockopt");
+
+	sem_sync(&semsync);
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = -1;	/* Tell XDDP to search by label. */
+	ret = connect(s, (struct sockaddr *)&saddr, sizeof(saddr));
+	if (ret)
+		fail("connect");
+
+	addrlen = sizeof(saddr);
+	ret = getpeername(s, (struct sockaddr *)&saddr, &addrlen);
+	if (ret || addrlen != sizeof(saddr))
+		fail("getpeername");
+
+	smokey_trace("%s: NRT peer is reading from /dev/rtp%d",
+		     __func__, saddr.sipc_port);
+
+	FD_ZERO(&set);
+	FD_SET(s, &set);
+
+	while (--loops) {
+		ret = select(s + 1, NULL, &set, NULL, NULL);
+		/* Should always be immediately writable. */
+		if (ret != 1 || !FD_ISSET(s, &set))
+			fail("select");
+
+		/*
+		 * Actually we might fail sending although select() on
+		 * POLLOUT succeeded earlier, as the situation might
+		 * have changed in the meantime due to a sudden
+		 * pressure on the system heap. Pretend it did not.
+		 */
+		data++;
+		ret = sendto(s, &data, sizeof(data), MSG_DONTWAIT, NULL, 0);
+		if (ret != sizeof(data))
+			fail("sendto");
+
+		smokey_trace("%s: sent %d bytes, %ld", __func__, ret, data);
+
+		ts.tv_sec = 0;
+		ts.tv_nsec = 100000000; /* 100 ms */
+		clock_nanosleep(CLOCK_REALTIME, 0, &ts, NULL);
+	}
+
+	sleep(1);	/* Wait for the output to drain. */
+
+	return NULL;
+}
+
+static void *regular_thread(void *arg)
+{
+	char *devname;
+	int fd, ret;
+	long data;
+
+	if (asprintf(&devname,
+		     "/proc/xenomai/registry/rtipc/xddp/%s",
+		     XDDP_PORT_LABEL) < 0)
+		fail("asprintf");
+
+	do
+		fd = open(devname, O_RDWR);
+	while (fd < 0 && errno == ENOENT);
+	free(devname);
+	if (fd < 0)
+		fail("open");
+
+	for (;;) {
+		ret = read(fd, &data, sizeof(data));
+		if (ret != sizeof(data))
+			fail("read");
+
+		ret = write(fd, &data, sizeof(data));
+		if (ret != sizeof(data))
+			fail("write");
+	}
+
+	return NULL;
+}
+
+static int run_xddp(struct smokey_test *t, int argc, char *const argv[])
+{
+	struct sched_param param = { .sched_priority = 42 };
+	pthread_attr_t rtattr, regattr;
+	int s;
+
+	s = socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP);
+	if (s < 0) {
+		if (errno == EAFNOSUPPORT)
+			return -ENOSYS;
+	} else
+		close(s);
+
+	sem_init(&semsync, 0, 0);
+
+	pthread_attr_init(&rtattr);
+	pthread_attr_setdetachstate(&rtattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&rtattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&rtattr, SCHED_FIFO);
+	pthread_attr_setschedparam(&rtattr, &param);
+
+	errno = pthread_create(&rt1, &rtattr, &realtime_thread1, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	errno = pthread_create(&rt2, &rtattr, &realtime_thread2, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_attr_init(&regattr);
+	pthread_attr_setdetachstate(&regattr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&regattr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&regattr, SCHED_OTHER);
+
+	errno = pthread_create(&nrt, &regattr, &regular_thread, NULL);
+	if (errno)
+		fail("pthread_create");
+
+	pthread_join(rt2, NULL);
+	pthread_cancel(rt1);
+	pthread_cancel(nrt);
+	pthread_join(rt1, NULL);
+	pthread_join(nrt, NULL);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/y2038/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/smokey/y2038/Makefile.am
new file mode 100644
index 0000000..8dd4c97
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/y2038/Makefile.am
@@ -0,0 +1,10 @@
+
+noinst_LIBRARIES = liby2038.a
+
+liby2038_a_SOURCES = syscall-tests.c
+
+liby2038_a_CPPFLAGS = 	\
+	@XENO_COBALT_CFLAGS@	\
+	-I$(top_srcdir)		\
+	-I$(top_srcdir)/include \
+	-I$(top_srcdir)/lib/cobalt/arch/@XENO_TARGET_ARCH@/include
diff --git a/kernel/xenomai-v3.2.4/testsuite/smokey/y2038/syscall-tests.c b/kernel/xenomai-v3.2.4/testsuite/smokey/y2038/syscall-tests.c
new file mode 100644
index 0000000..a1a5d12
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/smokey/y2038/syscall-tests.c
@@ -0,0 +1,1203 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * y2038 tests
+ *
+ * Copyright (c) Siemens AG 2021
+ *
+ * Authors:
+ *  Florian Bezdeka <florian.bezdeka@siemens.com>
+ *
+ * Released under the terms of GPLv2.
+ */
+#include <asm/xenomai/syscall.h>
+#include <smokey/smokey.h>
+#include <sys/utsname.h>
+#include <mqueue.h>
+#include <rtdm/ipc.h>
+
+smokey_test_plugin(y2038, SMOKEY_NOARGS, "Validate correct y2038 support");
+
+/*
+ * libc independent data type representing a time64_t based struct timespec
+ */
+struct xn_timespec64 {
+	int64_t tv_sec;
+	int64_t tv_nsec;
+};
+
+struct xn_timex_timeval {
+	int64_t tv_sec;
+	int64_t	tv_usec;
+};
+
+struct xn_timex64 {
+	int32_t modes;		/* mode selector */
+				/* pad */
+	int64_t offset;		/* time offset (usec) */
+	int64_t freq;		/* frequency offset (scaled ppm) */
+	int64_t maxerror;	/* maximum error (usec) */
+	int64_t esterror;	/* estimated error (usec) */
+	int32_t status;		/* clock command/status */
+				/* pad */
+	int64_t constant;	/* pll time constant */
+	int64_t precision;	/* clock precision (usec) (read only) */
+	int64_t tolerance;	/* clock frequency tolerance (ppm) (read only) */
+	struct xn_timex_timeval time;	/* (read only, except for ADJ_SETOFFSET) */
+	int64_t tick;		/* (modified) usecs between clock ticks */
+
+	int64_t ppsfreq;	/* pps frequency (scaled ppm) (ro) */
+	int64_t jitter;		/* pps jitter (us) (ro) */
+	int32_t shift;		/* interval duration (s) (shift) (ro) */
+				/* pad */
+	int64_t stabil;		/* pps stability (scaled ppm) (ro) */
+	int64_t jitcnt;		/* jitter limit exceeded (ro) */
+	int64_t calcnt;		/* calibration intervals (ro) */
+	int64_t errcnt;		/* calibration errors (ro) */
+	int64_t stbcnt;		/* stability limit exceeded (ro) */
+
+	int32_t tai;		/* TAI offset (ro) */
+};
+
+#define NSEC_PER_SEC 1000000000
+
+static void ts_normalise(struct xn_timespec64 *ts)
+{
+	while (ts->tv_nsec >= NSEC_PER_SEC) {
+		ts->tv_nsec += 1;
+		ts->tv_nsec -= NSEC_PER_SEC;
+	}
+
+	while (ts->tv_nsec <= -NSEC_PER_SEC) {
+		ts->tv_sec -= 1;
+		ts->tv_nsec += NSEC_PER_SEC;
+	}
+
+	if (ts->tv_nsec < 0) {
+		/*
+		 * Negative nanoseconds isn't valid according to POSIX.
+		 * Decrement tv_sec and roll tv_nsec over.
+		 */
+		ts->tv_sec -= 1;
+		ts->tv_nsec = (NSEC_PER_SEC + ts->tv_nsec);
+	}
+}
+
+static inline void ts_add_ns(struct xn_timespec64 *ts, int ns)
+{
+	ts->tv_nsec += ns;
+	ts_normalise(ts);
+}
+
+/**
+ * Compare two struct timespec instances
+ *
+ * @param a
+ * @param b
+ * @return True if a < b, false otherwise
+ */
+static inline bool ts_less(const struct xn_timespec64 *a,
+			   const struct xn_timespec64 *b)
+{
+	if (a->tv_sec < b->tv_sec)
+		return true;
+
+	if (a->tv_sec > b->tv_sec)
+		return false;
+
+	/* a->tv_sec == b->tv_sec */
+
+	if (a->tv_nsec < b->tv_nsec)
+		return true;
+
+	return false;
+}
+
+/**
+ * Simple helper data structure for holding a thread context
+ */
+struct thread_context {
+	int sc_nr;
+	pthread_mutex_t *mutex;
+	struct xn_timespec64 *ts;
+	bool timedwait_timecheck;
+};
+
+/**
+ * Start the supplied function inside a separate thread, wait for completion
+ * and check the thread return value.
+ *
+ * @param thread The thread entry point
+ * @param arg The thread arguments
+ * @param exp_result The expected return value
+ *
+ * @return 0 if the thread reported @exp_result as return value, the thread's
+ * return value otherwise
+ */
+static int run_thread(void *(*thread)(void *), void *arg, int exp_result)
+{
+	pthread_t tid;
+	void *status;
+	long res;
+	int ret;
+
+	if (!__T(ret, pthread_create(&tid, NULL, thread, arg)))
+		return ret;
+
+	if (!__T(ret, pthread_join(tid, &status)))
+		return ret;
+
+	res = (long)status;
+
+	return (res == exp_result) ? 0 : ret;
+}
+
+static int test_sc_cobalt_sem_timedwait64(void)
+{
+	int ret;
+	sem_t sem;
+	int sc_nr = sc_cobalt_sem_timedwait64;
+	struct xn_timespec64 ts64, ts_wu;
+	struct timespec ts_nat;
+
+	sem_init(&sem, 0, 0);
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL2(sc_nr, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("sem_timedwait64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/* Timeout is never read by the kernel, so NULL should be OK */
+	sem_post(&sem);
+	ret = XENOMAI_SYSCALL2(sc_nr, &sem, NULL);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	/*
+	 * The semaphore is already exhausted, so calling again will validate
+	 * the provided timeout now. Providing NULL has to deliver EFAULT
+	 */
+	ret = XENOMAI_SYSCALL2(sc_nr, &sem, NULL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * The semaphore is already exhausted, so calling again will validate
+	 * the provided timeout now. Providing an invalid address has to deliver
+	 * EFAULT
+	 */
+	ret = XENOMAI_SYSCALL2(sc_nr, &sem, (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * The semaphore is still exhausted, calling again will validate the
+	 * timeout, providing an invalid timeout has to deliver EINVAL
+	 */
+	ts64.tv_sec = -1;
+	ret = XENOMAI_SYSCALL2(sc_nr, &sem, &ts64);
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * Providing a valid timeout, waiting for it to time out and check
+	 * that we didn't come back to early.
+	 */
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	ts64.tv_sec = ts_nat.tv_sec;
+	ts64.tv_nsec = ts_nat.tv_nsec;
+	ts_add_ns(&ts64, 500000);
+
+	ret = XENOMAI_SYSCALL2(sc_nr, &sem, &ts64);
+	if (!smokey_assert(ret == -ETIMEDOUT))
+		return ret ? ret : -EINVAL;
+
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	ts_wu.tv_sec = ts_nat.tv_sec;
+	ts_wu.tv_nsec = ts_nat.tv_nsec;
+
+	if (ts_less(&ts_wu, &ts64))
+		smokey_warning("sem_timedwait64 returned to early!\n"
+			       "Expected wakeup at: %lld sec %lld nsec\n"
+			       "Back at           : %lld sec %lld nsec\n",
+			       ts64.tv_sec, ts64.tv_nsec, ts_wu.tv_sec,
+			       ts_wu.tv_nsec);
+
+	return 0;
+}
+
+static int test_sc_cobalt_clock_gettime64(void)
+{
+	int ret;
+	int sc_nr = sc_cobalt_clock_gettime64;
+	struct xn_timespec64 ts64 = {0};
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL2(sc_nr, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("clock_gettime64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL2(sc_nr, CLOCK_MONOTONIC, (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Provide a valid 64bit timespec */
+	ret = XENOMAI_SYSCALL2(sc_nr, CLOCK_MONOTONIC, &ts64);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	/* Validate seconds only, nanoseconds might still be zero */
+	smokey_assert(ts64.tv_sec != 0);
+
+	return 0;
+}
+
+static int test_sc_cobalt_clock_settime64(void)
+{
+	int ret;
+	int sc_nr = sc_cobalt_clock_settime64;
+	struct xn_timespec64 ts64, now64;
+	struct timespec now;
+
+	if (!cobalt_use_legacy_tsc())
+		return 0; // Not implemented, nothing to test, success
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL2(sc_nr, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("clock_settime64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL2(sc_nr, CLOCK_MONOTONIC, (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	ret = clock_gettime(CLOCK_REALTIME, &now);
+	if (ret)
+		return -errno;
+
+	/* Provide a valid 64bit timespec */
+	ts64.tv_sec  = now.tv_sec + 1;
+	ts64.tv_nsec = now.tv_nsec;
+	ret = XENOMAI_SYSCALL2(sc_nr, CLOCK_REALTIME, &ts64);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	ret = clock_gettime(CLOCK_REALTIME, &now);
+	if (ret)
+		return -errno;
+
+	now64.tv_sec = now.tv_sec;
+	now64.tv_nsec = now.tv_nsec;
+
+	if (ts_less(&now64, &ts64))
+		smokey_warning("clock_settime() reported no error but no new time seen");
+
+	return 0;
+}
+
+static int test_sc_cobalt_clock_nanosleep64(void)
+{
+	int ret;
+	int sc_nr = sc_cobalt_clock_nanosleep64;
+	struct xn_timespec64 next, rmt;
+	struct timespec ts1, ts2, delta;
+	long interval = 1;
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL4(sc_nr, NULL, NULL, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("clock_nanosleep64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL4(sc_nr, CLOCK_MONOTONIC, TIMER_ABSTIME,
+			       (void *)0xdeadbeefUL, &rmt);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Provide a valid 64bit timespec, round 1 */
+	ret = clock_gettime(CLOCK_MONOTONIC, &ts1);
+	if (ret)
+		return -errno;
+
+	next.tv_sec  = ts1.tv_sec + interval;
+	next.tv_nsec = ts1.tv_nsec;
+
+	ret = XENOMAI_SYSCALL4(sc_nr, CLOCK_MONOTONIC, TIMER_ABSTIME,
+			       &next, (void *)0xdeadbeefUL);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	ret = clock_gettime(CLOCK_MONOTONIC, &ts2);
+	if (ret)
+		return -errno;
+
+	timespec_sub(&delta, &ts2, &ts1);
+	if (delta.tv_sec < interval)
+		smokey_warning("nanosleep didn't sleep long enough.");
+
+	/* Provide a valid 64bit timespec, round 2*/
+	next.tv_sec  = ts2.tv_sec + interval;
+	next.tv_nsec = ts2.tv_nsec;
+
+	ret = XENOMAI_SYSCALL4(sc_nr, CLOCK_MONOTONIC, TIMER_ABSTIME, &next,
+			       &rmt);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	ret = clock_gettime(CLOCK_MONOTONIC, &ts1);
+	if (ret)
+		return -errno;
+
+	timespec_sub(&delta, &ts1, &ts2);
+	if (delta.tv_sec < interval)
+		smokey_warning("nanosleep didn't sleep long enough.");
+
+	return 0;
+}
+
+static int test_sc_cobalt_clock_getres64(void)
+{
+	int ret;
+	int sc_nr = sc_cobalt_clock_getres64;
+	struct xn_timespec64 ts64;
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL2(sc_nr, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("clock_getres64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL2(sc_nr, CLOCK_MONOTONIC, (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Provide a valid 64bit timespec */
+	ret = XENOMAI_SYSCALL2(sc_nr, CLOCK_MONOTONIC, &ts64);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	if (ts64.tv_sec != 0 || ts64.tv_nsec != 1)
+		smokey_warning("High resolution timers not available\n");
+
+	return 0;
+}
+
+static int test_sc_cobalt_clock_adjtime64(void)
+{
+	int ret;
+	int sc_nr = sc_cobalt_clock_adjtime64;
+	struct xn_timex64 tx64 = {};
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL2(sc_nr, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("clock_adjtime64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL2(sc_nr, CLOCK_REALTIME, (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Provide a valid 64bit timex */
+	tx64.modes = ADJ_SETOFFSET;
+	tx64.time.tv_usec = 123;
+	ret = XENOMAI_SYSCALL2(sc_nr, CLOCK_REALTIME, &tx64);
+
+	/* adjtime is supported for external clocks only, expect EOPNOTSUPP */
+	if (!smokey_assert(ret == -EOPNOTSUPP))
+		return ret ? ret : -EINVAL;
+
+	return 0;
+}
+
+static void *timedlock64_thread(void *arg)
+{
+	struct thread_context *ctx = (struct thread_context *) arg;
+	struct xn_timespec64 t1 = {}; /* silence compiler warning */
+	struct xn_timespec64 t2;
+	struct timespec ts_nat;
+	int ret;
+
+	if (ctx->timedwait_timecheck) {
+		if (!__T(ret, clock_gettime(CLOCK_REALTIME, &ts_nat)))
+			return (void *)(long)ret;
+
+		t1.tv_sec = ts_nat.tv_sec;
+		t1.tv_nsec = ts_nat.tv_nsec;
+		ts_add_ns(&t1, ctx->ts->tv_nsec);
+		ts_add_ns(&t1, ctx->ts->tv_sec * NSEC_PER_SEC);
+	}
+
+	ret = XENOMAI_SYSCALL2(ctx->sc_nr, ctx->mutex, (void *) ctx->ts);
+	if (ret)
+		return (void *)(long)ret;
+
+	if (ctx->timedwait_timecheck) {
+		if (!__T(ret, clock_gettime(CLOCK_REALTIME, &ts_nat)))
+			return (void *)(long)ret;
+
+		t2.tv_sec = ts_nat.tv_sec;
+		t2.tv_nsec = ts_nat.tv_nsec;
+
+		if (ts_less(&t2, &t1))
+			smokey_warning("mutex_timedlock64 returned too early!\n"
+				       "Expected wakeup at: %lld sec %lld nsec\n"
+				       "Back at           : %lld sec %lld nsec\n",
+				       t1.tv_sec, t1.tv_nsec, t2.tv_sec,
+				       t2.tv_nsec);
+	}
+
+	return (void *)(long)pthread_mutex_unlock(ctx->mutex);
+}
+
+static int test_sc_cobalt_mutex_timedlock64(void)
+{
+	int ret;
+	pthread_mutex_t mutex;
+	int sc_nr = sc_cobalt_mutex_timedlock64;
+	struct xn_timespec64 ts64;
+	struct thread_context ctx = {0};
+
+	ret = pthread_mutex_init(&mutex, NULL);
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL2(sc_nr, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("mutex_timedlock64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * mutex can be taken immediately, no need to validate
+	 * NULL should be allowed
+	 */
+	ret = XENOMAI_SYSCALL2(sc_nr, &mutex, NULL);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	/*
+	 * mutex can be taken immediately, no need to validate
+	 * an invalid address should be allowed
+	 */
+	ret = XENOMAI_SYSCALL2(sc_nr, &mutex, 0xdeadbeef);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	/*
+	 * mutex still locked, second thread has to fail with -EINVAL when
+	 * submitting NULL as timeout
+	 */
+	ctx.sc_nr = sc_nr;
+	ctx.mutex = &mutex;
+	ctx.ts = NULL;
+	if (!__T(ret, run_thread(timedlock64_thread, &ctx, -EINVAL)))
+		return ret;
+
+	/*
+	 * mutex still locked, second thread has to fail with -EFAULT when
+	 * submitting an invalid address as timeout
+	 */
+	ctx.ts = (void *) 0xdeadbeef;
+	if (!__T(ret, run_thread(timedlock64_thread, &ctx, -EFAULT)))
+		return ret;
+
+	/*
+	 * mutex still locked, second thread has to fail with -EFAULT when
+	 * submitting an invalid timeout (while the address is valid)
+	 */
+	ts64.tv_sec = -1;
+	ctx.ts = &ts64;
+	if (!__T(ret, run_thread(timedlock64_thread, &ctx, -EFAULT)))
+		return ret;
+
+	/*
+	 * mutex still locked, second thread has to fail with -ETIMEOUT when
+	 * submitting a valid timeout
+	 */
+	ts64.tv_sec = 0;
+	ts64.tv_nsec = 500;
+	if (!__T(ret, run_thread(timedlock64_thread, &ctx, -ETIMEDOUT)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	/* mutex available, second thread should be able to lock and unlock */
+	ts64.tv_sec = 0;
+	ts64.tv_nsec = 500;
+	if (!__T(ret, run_thread(timedlock64_thread, &ctx, 0)))
+		return ret;
+
+	/*
+	 * Locking the mutex here so the second thread has to deliver -ETIMEOUT.
+	 * Timechecks will now be enabled to make sure we don't give up to early
+	 */
+	if (!__T(ret, pthread_mutex_lock(&mutex)))
+		return ret;
+
+	ts64.tv_sec = 0;
+	ts64.tv_nsec = 500;
+	ctx.timedwait_timecheck = true;
+	if (!__T(ret, run_thread(timedlock64_thread, &ctx, -ETIMEDOUT)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_unlock(&mutex)))
+		return ret;
+
+	if (!__T(ret, pthread_mutex_destroy(&mutex)))
+		return ret;
+
+	return 0;
+}
+
+static int test_sc_cobalt_mq_timedsend64(void)
+{
+	int ret;
+	int sc_nr = sc_cobalt_mq_timedsend64;
+	struct xn_timespec64 t1, t2;
+	struct timespec ts_nat;
+
+	mqd_t mq;
+	struct mq_attr qa;
+	char msg[64] = "Xenomai is cool!";
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL5(sc_nr, NULL, NULL, NULL, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("mq_timedsend64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EBADF))
+		return ret ? ret : -EBADF;
+
+	mq_unlink("/xenomai_mq_send");
+	qa.mq_maxmsg = 1;
+	qa.mq_msgsize = 64;
+
+	mq = mq_open("/xenomai_mq_send", O_RDWR | O_CREAT, 0, &qa);
+	if (mq < 0)
+		return mq;
+
+	/* Timeout is never read by the kernel, so NULL should be OK */
+	ret = XENOMAI_SYSCALL5(sc_nr, mq, msg, strlen(msg), 0, NULL);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL5(sc_nr, mq, msg, strlen(msg), 0,
+			       (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * providing an invalid timeout has to deliver EINVAL
+	 */
+	t1.tv_sec = -1;
+	ret = XENOMAI_SYSCALL5(sc_nr, mq, msg, strlen(msg), 0, &t1);
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * Providing a valid timeout, waiting for it to time out and check
+	 * that we didn't come back to early.
+	 */
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t1.tv_sec = ts_nat.tv_sec;
+	t1.tv_nsec = ts_nat.tv_nsec;
+	ts_add_ns(&t1, 500000);
+
+	ret = XENOMAI_SYSCALL5(sc_nr, mq, msg, strlen(msg), 0, &t1);
+	if (!smokey_assert(ret == -ETIMEDOUT))
+		return ret ? ret : -EINVAL;
+
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t2.tv_sec = ts_nat.tv_sec;
+	t2.tv_nsec = ts_nat.tv_nsec;
+
+	if (ts_less(&t2, &t1))
+		smokey_warning("mq_timedsend64 returned too early!\n"
+			       "Expected wakeup at: %lld sec %lld nsec\n"
+			       "Back at           : %lld sec %lld nsec\n",
+			       t1.tv_sec, t1.tv_nsec, t2.tv_sec, t2.tv_nsec);
+
+	ret = mq_unlink("/xenomai_mq_send");
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int test_sc_cobalt_mq_timedreceive64(void)
+{
+	int ret;
+	int sc_nr = sc_cobalt_mq_timedreceive64;
+	struct xn_timespec64 t1, t2;
+	struct timespec ts_nat;
+
+	mqd_t mq;
+	struct mq_attr qa;
+	char msg[64];
+	unsigned int prio = 0;
+	size_t size = 64;
+
+	mq_unlink("/xenomai_mq_recv");
+	qa.mq_maxmsg = 1;
+	qa.mq_msgsize = 64;
+
+	mq = mq_open("/xenomai_mq_recv", O_RDWR | O_CREAT, 0, &qa);
+	if (mq < 0)
+		return mq;
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL5(sc_nr, mq, NULL, NULL, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("mq_timedreceive64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Send something, we want to receive it later */
+	ret = mq_send(mq, "msg", 4, 0);
+	if (ret)
+		return ret;
+
+	/*
+	 * Timeout is never read by the kernel, so NULL should be OK, the queue
+	 * is empty afterwards
+	 */
+	ret = XENOMAI_SYSCALL5(sc_nr, mq, msg, &size, &prio, NULL);
+	if (!smokey_assert(!ret))
+		return ret;
+
+	/* Check the message content, we should have received "msg" */
+	if (!smokey_assert(!memcmp(msg, "msg", 3)))
+		return -EINVAL;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	size = 64;
+	ret = XENOMAI_SYSCALL5(sc_nr, mq, msg, &size, &prio,
+			       (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* providing an invalid timeout has to deliver EINVAL */
+	t1.tv_sec = -1;
+	ret = XENOMAI_SYSCALL5(sc_nr, mq, msg, &size, &prio, &t1);
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * Providing a valid timeout, waiting for it to time out and check
+	 * that we didn't come back to early.
+	 */
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t1.tv_sec = ts_nat.tv_sec;
+	t1.tv_nsec = ts_nat.tv_nsec;
+	ts_add_ns(&t1, 500000);
+
+	ret = XENOMAI_SYSCALL5(sc_nr, mq, msg, &size, &prio, &t1);
+	if (!smokey_assert(ret == -ETIMEDOUT))
+		return ret ? ret : -EINVAL;
+
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t2.tv_sec = ts_nat.tv_sec;
+	t2.tv_nsec = ts_nat.tv_nsec;
+
+	if (ts_less(&t2, &t1))
+		smokey_warning("mq_timedreceive64 returned too early!\n"
+			       "Expected wakeup at: %lld sec %lld nsec\n"
+			       "Back at           : %lld sec %lld nsec\n",
+			       t1.tv_sec, t1.tv_nsec, t2.tv_sec, t2.tv_nsec);
+
+	ret = mq_unlink("/xenomai_mq_recv");
+	if (ret)
+		return ret;
+
+	return 0;
+}
+
+static int test_sc_cobalt_sigtimedwait64(void)
+{
+	int ret;
+	int sc_nr = sc_cobalt_sigtimedwait64;
+	struct xn_timespec64 t1, t2;
+	struct timespec ts_nat;
+	sigset_t set;
+
+	sigaddset(&set, SIGINT);
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL3(sc_nr, NULL, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("sigtimedwait64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL3(sc_nr, &set, NULL, (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* providing an invalid timeout has to deliver EINVAL */
+	t1.tv_sec = -1;
+	ret = XENOMAI_SYSCALL3(sc_nr, &set, NULL, &t1);
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * Providing a zero timeout, should come back immediately, no signal
+	 * will be received
+	 */
+	t1.tv_sec = 0;
+	t1.tv_nsec = 0;
+	ret = XENOMAI_SYSCALL3(sc_nr, &set, NULL, &t1);
+	if (!smokey_assert(ret == -EAGAIN))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * Providing a valid timeout, waiting for it to time out and check
+	 * that we didn't come back to early.
+	 */
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t1.tv_sec = 0;
+	t1.tv_nsec = 500000;
+
+	ret = XENOMAI_SYSCALL3(sc_nr, &set, NULL, &t1);
+	if (!smokey_assert(ret == -EAGAIN))
+		return ret;
+
+	t1.tv_sec = ts_nat.tv_sec;
+	t1.tv_nsec = ts_nat.tv_nsec;
+	ts_add_ns(&t1, 500000);
+
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t2.tv_sec = ts_nat.tv_sec;
+	t2.tv_nsec = ts_nat.tv_nsec;
+
+	if (ts_less(&t2, &t1))
+		smokey_warning("sigtimedwait64 returned too early!\n"
+			       "Expected wakeup at: %lld sec %lld nsec\n"
+			       "Back at           : %lld sec %lld nsec\n",
+			       t1.tv_sec, t1.tv_nsec, t2.tv_sec, t2.tv_nsec);
+
+	return 0;
+}
+
+static int test_sc_cobalt_monitor_wait64(void)
+{
+	int ret, opret;
+	int sc_nr = sc_cobalt_monitor_wait64;
+	struct xn_timespec64 t1, t2, to;
+	struct timespec ts_nat;
+	struct cobalt_monitor_shadow mon;
+
+	ret = cobalt_monitor_init(&mon, CLOCK_REALTIME, 0);
+	if (ret)
+		return -errno;
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL4(sc_nr, NULL, NULL, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("monitor_wait64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL4(sc_nr, &mon, COBALT_MONITOR_WAITGRANT,
+			       (void *)0xdeadbeefUL, NULL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* providing an invalid timeout has to deliver EINVAL */
+	t1.tv_sec = -1;
+	ret = XENOMAI_SYSCALL4(sc_nr, &mon, COBALT_MONITOR_WAITGRANT, &t1,
+			       NULL);
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * Providing a valid timeout, waiting for it to time out and check
+	 * that we didn't come back to early.
+	 */
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t1.tv_sec = ts_nat.tv_sec;
+	t1.tv_nsec = ts_nat.tv_nsec;
+
+	to = t1;
+	ts_add_ns(&to, 50000);
+
+	ret = XENOMAI_SYSCALL4(sc_nr, &mon, COBALT_MONITOR_WAITGRANT, &to,
+			       &opret);
+	if (!smokey_assert(opret == -ETIMEDOUT))
+		return ret;
+
+	ret = clock_gettime(CLOCK_REALTIME, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t2.tv_sec = ts_nat.tv_sec;
+	t2.tv_nsec = ts_nat.tv_nsec;
+
+	if (ts_less(&t2, &to))
+		smokey_warning("monitor_wait64 returned too early!\n"
+			       "Expected wakeup at: %lld sec %lld nsec\n"
+			       "Back at           : %lld sec %lld nsec\n",
+			       to.tv_sec, to.tv_nsec, t2.tv_sec, t2.tv_nsec);
+
+	if (!__T(ret, cobalt_monitor_destroy(&mon)))
+		return ret;
+
+	return 0;
+}
+
+static int test_sc_cobalt_event_wait64(void)
+{
+	int ret;
+	int sc_nr = sc_cobalt_event_wait64;
+	struct xn_timespec64 t1, t2;
+	struct timespec ts_nat;
+	struct cobalt_event_shadow evt;
+	unsigned int flags;
+
+	ret = cobalt_event_init(&evt, 0, COBALT_EVENT_FIFO);
+	if (ret)
+		return -errno;
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL5(sc_nr, NULL, NULL, NULL, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("event_wait64: skipped. (no kernel support)");
+		return 0; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL5(sc_nr, &evt, 0x1, &flags, 0,
+			       (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT))
+		return ret ? ret : -EINVAL;
+
+	/* providing an invalid timeout has to deliver EINVAL */
+	t1.tv_sec = -1;
+	ret = XENOMAI_SYSCALL5(sc_nr, &evt, 0x1, &flags, 0, &t1);
+	if (!smokey_assert(ret == -EINVAL))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * providing a zero timeout,
+	 * should come back immediately with EWOULDBLOCK
+	 */
+	t1.tv_sec = 0;
+	t1.tv_nsec = 0;
+	ret = XENOMAI_SYSCALL5(sc_nr, &evt, 0x1, &flags, 0, &t1);
+	if (!smokey_assert(ret == -EWOULDBLOCK))
+		return ret ? ret : -EINVAL;
+
+	/*
+	 * Providing a valid timeout, waiting for it to time out and check
+	 * that we didn't come back to early.
+	 */
+	ret = clock_gettime(CLOCK_MONOTONIC, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t1.tv_sec = ts_nat.tv_sec;
+	t1.tv_nsec = ts_nat.tv_nsec;
+	ts_add_ns(&t1, 500000);
+
+	ret = XENOMAI_SYSCALL5(sc_nr, &evt, 0x1, &flags, 0, &t1);
+	if (!smokey_assert(ret == -ETIMEDOUT))
+		return ret;
+
+	ret = clock_gettime(CLOCK_MONOTONIC, &ts_nat);
+	if (ret)
+		return -errno;
+
+	t2.tv_sec = ts_nat.tv_sec;
+	t2.tv_nsec = ts_nat.tv_nsec;
+
+	if (ts_less(&t2, &t1))
+		smokey_warning("event_wait64 returned too early!\n"
+			       "Expected wakeup at: %lld sec %lld nsec\n"
+			       "Back at           : %lld sec %lld nsec\n",
+			       t1.tv_sec, t1.tv_nsec, t2.tv_sec, t2.tv_nsec);
+
+	if (!__T(ret, cobalt_event_destroy(&evt)))
+		return ret;
+
+	return 0;
+}
+
+static int test_sc_cobalt_recvmmsg64(void)
+{
+	int ret = 0;
+	int sock;
+	int sc_nr = sc_cobalt_recvmmsg64;
+	long data;
+	struct xn_timespec64 t1, t2;
+	struct timespec ts_nat;
+	struct rtipc_port_label plabel;
+	struct sockaddr_ipc saddr;
+
+	struct iovec iov = {
+		.iov_base = &data,
+		.iov_len = sizeof(data),
+	};
+	struct msghdr msg = {
+		.msg_name = NULL,
+		.msg_namelen = 0,
+		.msg_iov = &iov,
+		.msg_iovlen = 1,
+		.msg_control = NULL,
+		.msg_controllen = 0,
+	};
+
+	sock = smokey_check_errno(socket(AF_RTIPC, SOCK_DGRAM, IPCPROTO_XDDP));
+	if (sock == -EAFNOSUPPORT) {
+		smokey_note("recvmmsg64: skipped. (no kernel support)");
+		return 0;
+	}
+	if (sock < 0)
+		return sock;
+
+	strcpy(plabel.label, "y2038:recvmmsg64");
+	ret = smokey_check_errno(setsockopt(sock, SOL_XDDP, XDDP_LABEL, &plabel,
+					    sizeof(plabel)));
+	if (ret)
+		goto out;
+
+	memset(&saddr, 0, sizeof(saddr));
+	saddr.sipc_family = AF_RTIPC;
+	saddr.sipc_port = -1;
+	ret = smokey_check_errno(bind(sock, (struct sockaddr *)&saddr,
+				      sizeof(saddr)));
+	if (ret)
+		goto out;
+
+	/* Make sure we don't crash because of NULL pointers */
+	ret = XENOMAI_SYSCALL5(sc_nr, NULL, NULL, NULL, NULL, NULL);
+	if (ret == -ENOSYS) {
+		smokey_note("recvmmsg64: skipped. (no kernel support)");
+		goto out; // Not implemented, nothing to test, success
+	}
+	if (!smokey_assert(ret == -EADV)) {
+		ret = ret ? ret : -EINVAL;
+		goto out;
+	}
+
+	/* Providing an invalid address has to deliver EFAULT */
+	ret = XENOMAI_SYSCALL5(sc_nr, sock, &msg, sizeof(msg), 0,
+			       (void *)0xdeadbeefUL);
+	if (!smokey_assert(ret == -EFAULT)) {
+		ret = ret ? ret : -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * providing an invalid timeout has to deliver EINVAL
+	 */
+	t1.tv_sec = -1;
+	ret = XENOMAI_SYSCALL5(sc_nr, sock, &msg, sizeof(msg), 0, &t1);
+	if (!smokey_assert(ret == -EINVAL)) {
+		ret = ret ? ret : -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * providing a zero timeout,
+	 * should come back immediately with EWOULDBLOCK
+	 */
+	t1.tv_sec = 0;
+	t1.tv_nsec = 0;
+	ret = XENOMAI_SYSCALL5(sc_nr, sock, &msg, sizeof(msg), 0, &t1);
+	if (!smokey_assert(ret == -EWOULDBLOCK)) {
+		ret = ret ? ret : -EINVAL;
+		goto out;
+	}
+
+	/*
+	 * Providing a valid timeout, waiting for it to time out and check
+	 * that we didn't come back to early.
+	 */
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &ts_nat));
+	if (ret)
+		goto out;
+
+	t1.tv_sec = 0;
+	t1.tv_nsec = 500000;
+
+	ret = XENOMAI_SYSCALL5(sc_nr, sock, &msg, sizeof(msg), 0, &t1);
+	if (!smokey_assert(ret == -ETIMEDOUT)) {
+		ret = ret ? ret : -EINVAL;
+		goto out;
+	}
+
+	t1.tv_sec = ts_nat.tv_sec;
+	t1.tv_nsec = ts_nat.tv_nsec;
+
+	ret = smokey_check_errno(clock_gettime(CLOCK_MONOTONIC, &ts_nat));
+	if (ret)
+		goto out;
+
+	t2.tv_sec = ts_nat.tv_sec;
+	t2.tv_nsec = ts_nat.tv_nsec;
+
+	if (ts_less(&t2, &t1))
+		smokey_warning("recvmmsg64 returned to early!\n"
+			       "Expected wakeup at: %lld sec %lld nsec\n"
+			       "Back at           : %lld sec %lld nsec\n",
+			       t1.tv_sec, t1.tv_nsec, t2.tv_sec, t2.tv_nsec);
+
+out:
+	close(sock);
+	return ret;
+}
+
+
+static int check_kernel_version(void)
+{
+	int ret, major, minor;
+	struct utsname uts;
+
+	ret = smokey_check_errno(uname(&uts));
+	if (ret)
+		return ret;
+
+	ret = sscanf(uts.release, "%d.%d", &major, &minor);
+	if (!smokey_assert(ret == 2))
+		return -EINVAL;
+
+	/* We need a kernel with y2038 support, 5.4 onwards */
+	if (!(major > 5 || (major == 5 && minor >= 4))) {
+		smokey_note("y2038: skipped. (no y2038 safe kernel)");
+		return 1;
+	}
+
+	return 0;
+}
+
+static int run_y2038(struct smokey_test *t, int argc, char *const argv[])
+{
+	int ret;
+
+	ret = check_kernel_version();
+	if (ret)
+		return (ret < 0) ? ret : 0; /* skip if no y2038 safe kernel */
+
+	ret = test_sc_cobalt_sem_timedwait64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_clock_gettime64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_clock_settime64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_clock_nanosleep64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_clock_getres64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_clock_adjtime64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_mutex_timedlock64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_mq_timedsend64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_mq_timedreceive64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_sigtimedwait64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_monitor_wait64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_event_wait64();
+	if (ret)
+		return ret;
+
+	ret = test_sc_cobalt_recvmmsg64();
+	if (ret)
+		return ret;
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/spitest/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/spitest/Makefile.am
new file mode 100644
index 0000000..55b57fc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/spitest/Makefile.am
@@ -0,0 +1,19 @@
+testdir = @XENO_TEST_DIR@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+test_PROGRAMS = spitest
+
+spitest_SOURCES = spitest.c
+
+spitest_CPPFLAGS = 		\
+	$(XENO_USER_CFLAGS)	\
+	-I$(top_srcdir)/include
+
+spitest_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+spitest_LDADD =			\
+	../../lib/smokey/libsmokey@CORE@.la	\
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/testsuite/spitest/spitest.c b/kernel/xenomai-v3.2.4/testsuite/spitest/spitest.c
new file mode 100644
index 0000000..6163e54
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/spitest/spitest.c
@@ -0,0 +1,466 @@
+/*
+ * Copyright (C) 2016 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <sys/types.h>
+#include <sys/mman.h>
+#include <sys/ioctl.h>
+#include <sys/timerfd.h>
+#include <stdio.h>
+#include <error.h>
+#include <semaphore.h>
+#include <errno.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <fcntl.h>
+#include <smokey/smokey.h>
+#include <linux/spi/spidev.h>
+#include <rtdm/spi.h>
+
+smokey_test_plugin(spi_transfer,
+		   SMOKEY_ARGLIST(
+			   SMOKEY_STRING(device),
+			   SMOKEY_INT(speed),
+			   SMOKEY_BOOL(latency),
+			   SMOKEY_INT(ioctl_n)
+		   ),
+   "Run a SPI transfer.\n"
+   "\tdevice=<device-path>\n"
+   "\tspeed=<speed-hz>\n"
+   "\tlatency\n"
+   "\tioctl_n=<set to non-zero to use SPI_RTIOC_TRANSFER_N ioctl>"
+);
+
+#define ONE_BILLION	1000000000
+#define TEN_MILLIONS	10000000
+
+static int with_traffic = 1, with_latency, with_ioctl_n = 0;
+
+#define SEQ_SHIFT 24
+#define SEQ_MASK  ((1 << SEQ_SHIFT) - 1)
+
+#define BAD_CRC  0x1
+#define BAD_SEQ  0x2
+
+struct frame_header {
+	unsigned int seq: SEQ_SHIFT,
+		crc : 8;
+} __attribute__((packed));
+
+/* We send a 32bit header followed by 32 bytes of payload. */
+#define TRANSFER_SIZE (32 + sizeof(struct frame_header))
+
+static unsigned char *i_area, *o_area;
+
+static unsigned int seq_out;
+
+static unsigned int seq_in = 1 << SEQ_SHIFT;
+
+static int32_t minjitter, maxjitter, avgjitter;
+static int32_t gminjitter = TEN_MILLIONS, gmaxjitter = -TEN_MILLIONS;
+static uint32_t goverrun, gerrors;
+static int64_t gavgjitter;
+
+nanosecs_rel_t period_ns = ONE_BILLION / 2; /* 0.5s */
+
+pthread_t display_tid, consumer_tid;
+
+static sem_t display_sem;
+
+static const int data_lines = 21;
+
+static inline void *get_obuf(void)
+{
+	return o_area;
+}
+
+static inline void *get_odata(void)
+{
+	return o_area + sizeof(struct frame_header);
+}
+
+static inline size_t get_odlen(void)
+{
+	return TRANSFER_SIZE - sizeof(struct frame_header);
+}
+
+static void set_output_header(void)
+{
+	struct frame_header *fh = get_obuf();
+	unsigned char *odata = get_odata();
+	size_t odlen = get_odlen(), n;
+	unsigned char csum;
+
+	for (n = 0, csum = 0; n < odlen; n++)
+		csum += *odata++;
+
+	fh->crc = ~csum;
+	fh->seq = seq_out;
+	seq_out = (seq_out + 1) & SEQ_MASK;
+}
+
+static int check_input_header(void *ibuf, size_t ilen)
+{
+	struct frame_header *fh = ibuf;
+	unsigned char *idata = ibuf + sizeof(*fh);
+	size_t idlen = ilen - sizeof(*fh), n;
+	unsigned int seq_next;
+	unsigned char csum;
+	int checkval = 0;
+
+	for (n = 0, csum = 0; n < idlen; n++)
+		csum += *idata++;
+
+	if (fh->crc != (unsigned char)~csum)
+		checkval |= BAD_CRC;
+
+	if (seq_in > SEQ_MASK)
+		seq_in = fh->seq;
+	else {
+		seq_next = (seq_in + 1) & SEQ_MASK;
+		if (fh->seq != seq_next) {
+			/* Try to resync. */
+			seq_in = 1 << SEQ_SHIFT;
+			checkval |= BAD_SEQ;
+		} else
+			seq_in = seq_next;
+	}
+
+	return checkval;
+}
+
+static void do_traffic(int round, const void *ibuf,
+		       size_t ilen, int checkval)
+{
+	const struct frame_header *fh = ibuf;
+	size_t idlen = ilen - sizeof(*fh), n;
+	const unsigned char *idata = ibuf + sizeof(*fh);
+
+	printf("%.4d> seq=%u%s, crc=%.2X%s",
+	       round,
+	       fh->seq, checkval & BAD_SEQ ? "?" : "",
+	       fh->crc, checkval & BAD_CRC ? "?" : "");
+	
+	for (n = 0; n < idlen; n++) {
+		if ((n % 16) == 0)
+			printf("\n");
+		printf("%.2X ", idata[n]);
+	}
+	printf("\n");
+}
+
+static int do_process(int round)
+{
+	size_t odlen, n, ilen = TRANSFER_SIZE;
+	unsigned char *odata, *ibuf = i_area;
+	int checkval;
+
+	checkval = check_input_header(i_area, ilen);
+
+	if (with_traffic)
+		do_traffic(round, ibuf, ilen, checkval);
+
+	odata = get_odata();
+	odlen = get_odlen();
+	for (n = 0; n < odlen; n++)
+		odata[n] = odata[n] + 1 ?: 1;
+
+	set_output_header();
+
+	return checkval ? -EPROTO : 0;
+}
+
+static void timespec_add_ns(struct timespec *t, unsigned int ns)
+{
+	t->tv_nsec += ns;
+	if (t->tv_nsec >= ONE_BILLION) {
+		t->tv_nsec -= ONE_BILLION;
+		t->tv_sec++;
+	}
+}
+
+static inline long long diff_ts(struct timespec *left, struct timespec *right)
+{
+	return (long long)(left->tv_sec - right->tv_sec) * ONE_BILLION
+		+ left->tv_nsec - right->tv_nsec;
+}
+
+static void *display_thread(void *arg)
+{
+	long minj, gminj, maxj, gmaxj, avgj;
+	time_t start, now, dt;
+	int ret, n = 0;
+
+	sem_init(&display_sem, 0, 0);
+
+	time(&start);
+
+	for (;;) {
+		ret = sem_wait(&display_sem);
+		if (ret < 0) {
+			if (errno != EIDRM)
+				panic("sem_wait(), %s", symerror(errno));
+			return NULL;
+		}
+
+		if (smokey_verbose_mode < 1)
+			continue;
+	
+		minj = minjitter;
+		gminj = gminjitter;
+		avgj = avgjitter;
+		maxj = maxjitter;
+		gmaxj = gmaxjitter;
+
+		if (data_lines && (n++ % data_lines) == 0) {
+			time(&now);
+			dt = now - start;
+			printf("RTT|  %.2ld:%.2ld:%.2ld  (%Ld us period)\n",
+			       dt / 3600, (dt / 60) % 60, dt % 60,
+			       (long long)period_ns / 1000);
+			printf("RTH|%11s|%11s|%11s|%8s|%8s|%11s|%11s\n",
+			       "----lat min", "----lat avg",
+			       "----lat max", "-overrun", "-errors",
+			       "---lat best", "--lat worst");
+		}
+		printf("RTD|%11.3f|%11.3f|%11.3f|%8d|%8d|%11.3f|%11.3f\n",
+		       (double)minj / 1000,
+		       (double)avgj / 1000,
+		       (double)maxj / 1000,
+		       goverrun,
+		       gerrors,
+		       (double)gminj / 1000, (double)gmaxj / 1000);
+	}
+
+	return NULL;
+}
+
+static void start_display_thread(void)
+{
+	struct sched_param param;
+	pthread_attr_t attr;
+
+	pthread_attr_init(&attr);
+	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&attr, SCHED_OTHER);
+	param.sched_priority = 0;
+	pthread_attr_setschedparam(&attr, &param);
+	pthread_create(&display_tid, &attr, display_thread, NULL);
+}
+
+static inline
+nanosecs_rel_t get_start_delay(void)
+{
+	return ((1000000000ULL + period_ns - 1) / period_ns) * period_ns;
+}
+
+static int do_spi_loop(int fd)
+{
+	int ret, n, nsamples, loops = 0, tfd;
+	struct timespec now, start;
+	struct itimerspec its;
+
+	memset(get_odata(), 0x1, get_odlen());
+	set_output_header();
+
+	if (with_latency) {
+		nsamples = (long long)ONE_BILLION / period_ns;
+		start_display_thread();
+	} else
+		nsamples = 1;
+
+	tfd = timerfd_create(CLOCK_MONOTONIC, 0);
+	if (tfd < 0)
+		return -errno;
+
+	clock_gettime(CLOCK_MONOTONIC, &start);
+	timespec_add_ns(&start, get_start_delay());
+	its.it_value = start;
+	its.it_interval.tv_sec = period_ns / ONE_BILLION;
+	its.it_interval.tv_nsec = period_ns % ONE_BILLION;
+	ret = timerfd_settime(tfd, TFD_TIMER_ABSTIME, &its, NULL);
+	if (ret)
+		return -errno;
+
+	for (;;) {
+		int32_t minj = TEN_MILLIONS, maxj = -TEN_MILLIONS, dt;
+		uint32_t overrun = 0, errors = 0;
+		uint64_t ticks;
+		int64_t sumj;
+
+		loops++;
+	
+		for (n = sumj = 0; n < nsamples; n++) {
+			ret = read(tfd, &ticks, sizeof(ticks));
+			if (ret < 0)
+				break;
+			clock_gettime(CLOCK_MONOTONIC, &start);
+			if (with_ioctl_n == 0) {
+				if (!__Terrno(ret,
+					      ioctl(fd, SPI_RTIOC_TRANSFER)))
+					return ret;
+			} else {
+                                if (!__Terrno(ret,
+					ioctl(fd, SPI_RTIOC_TRANSFER_N,
+					      TRANSFER_SIZE)))
+                                        return ret;
+			}
+			if (with_latency) {
+				clock_gettime(CLOCK_MONOTONIC, &now);
+				dt = (int32_t)diff_ts(&now, &start);
+				if (dt > maxj)
+					maxj = dt;
+				if (dt < minj)
+					minj = dt;
+				sumj += dt;
+			}
+		
+			ret = do_process(loops);
+			if (ret)
+				errors++;
+		}
+
+		if (with_latency) {
+			minjitter = minj;
+			if (minj < gminjitter)
+				gminjitter = minj;
+			maxjitter = maxj;
+			if (maxj > gmaxjitter)
+				gmaxjitter = maxj;
+			avgjitter = sumj / nsamples;
+			gavgjitter += avgjitter;
+			goverrun += overrun;
+			gerrors += errors;
+			sem_post(&display_sem);
+		}		
+	}
+
+	return 0;
+}
+
+static int run_spi_transfer(struct smokey_test *t, int argc, char *const argv[])
+{
+	int fd, ret, speed_hz = 40000000;
+	struct rtdm_spi_config config;
+	struct rtdm_spi_iobufs iobufs;
+	const char *device = NULL;
+	struct sched_param param;
+	void *p;
+	
+	smokey_parse_args(t, argc, argv);
+
+	if (SMOKEY_ARG_ISSET(spi_transfer, latency) &&
+	    SMOKEY_ARG_BOOL(spi_transfer, latency)) {
+		with_latency = 1;
+		/* Disable traffic tracing when monitoring latency. */
+		with_traffic = 0;
+	}
+
+	if (SMOKEY_ARG_ISSET(spi_transfer, speed))
+		speed_hz = SMOKEY_ARG_INT(spi_transfer, speed);
+
+        if (SMOKEY_ARG_ISSET(spi_transfer, ioctl_n)) {
+		with_ioctl_n = SMOKEY_ARG_INT(spi_transfer, ioctl_n);
+		smokey_note("ioctl_n enabled; using SPI_RTIOC_TRANSFER_N");
+	}
+
+	if (!SMOKEY_ARG_ISSET(spi_transfer, device)) {
+		warning("missing device= specification");
+		return -EINVAL;
+	}
+
+	device = SMOKEY_ARG_STRING(spi_transfer, device);
+	fd = open(device, O_RDWR);
+	if (fd < 0) {
+		ret = -errno;
+		warning("cannot open device %s [%s]",
+			device, symerror(ret));
+		return ret;
+	}
+
+	iobufs.io_len = TRANSFER_SIZE;
+	if (!__Terrno(ret, ioctl(fd, SPI_RTIOC_SET_IOBUFS, &iobufs)))
+		return ret;
+
+	p = mmap(NULL, iobufs.map_len, PROT_READ|PROT_WRITE, MAP_SHARED, fd, 0);
+	if (!__Fassert(p == MAP_FAILED))
+		return -EINVAL;
+	
+	smokey_trace("input_area[%u..%u], output_area[%u..%u], mapping length=%u",
+		     iobufs.i_offset, iobufs.i_offset + TRANSFER_SIZE - 1,
+		     iobufs.o_offset, iobufs.o_offset + TRANSFER_SIZE - 1,
+		     iobufs.map_len);
+	
+	i_area = p + iobufs.i_offset;
+	o_area = p + iobufs.o_offset;
+
+	config.mode = SPI_MODE_0;
+	config.bits_per_word = 8;
+	config.speed_hz = speed_hz;
+	if (!__Terrno(ret, ioctl(fd, SPI_RTIOC_SET_CONFIG, &config)))
+		return ret;
+
+	if (!__Terrno(ret, ioctl(fd, SPI_RTIOC_GET_CONFIG, &config)))
+		return ret;
+
+	smokey_trace("speed=%u hz, mode=%#x, bits=%u",
+		     config.speed_hz, config.mode, config.bits_per_word);
+	
+	/* Switch current thread to real-time. */
+	param.sched_priority = 10;
+	if (!__T(ret, pthread_setschedparam(pthread_self(),
+				    SCHED_FIFO, &param)))
+		return ret;
+
+	if (!__T(ret, do_spi_loop(fd)))
+		return ret;
+
+	return 0;
+}
+
+int main(int argc, char *const argv[])
+{
+	struct smokey_test *t;
+	int ret, fails = 0;
+
+	if (pvlist_empty(&smokey_test_list))
+		return 0;
+
+	for_each_smokey_test(t) {
+		ret = t->run(t, argc, argv);
+		if (ret) {
+			if (ret == -ENOSYS) {
+				smokey_note("%s skipped (no kernel support)",
+					    t->name);
+				continue;
+			}
+			fails++;
+			if (smokey_keep_going)
+				continue;
+			if (smokey_verbose_mode)
+				error(1, -ret, "test %s failed", t->name);
+			return 1;
+		}
+		smokey_note("%s OK", t->name);
+	}
+
+	return fails != 0;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/switchtest/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/switchtest/Makefile.am
new file mode 100644
index 0000000..9ccfa98
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/switchtest/Makefile.am
@@ -0,0 +1,18 @@
+testdir = @XENO_TEST_DIR@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+test_PROGRAMS = switchtest
+
+switchtest_SOURCES = switchtest.c
+
+switchtest_CPPFLAGS =			\
+	$(XENO_USER_CFLAGS)		\
+	-I$(top_srcdir)/include
+
+switchtest_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+switchtest_LDADD = 		\
+	@XENO_CORE_LDADD@	\
+	@XENO_USER_LDADD@	\
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/testsuite/switchtest/switchtest.c b/kernel/xenomai-v3.2.4/testsuite/switchtest/switchtest.c
new file mode 100644
index 0000000..9149044
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/switchtest/switchtest.c
@@ -0,0 +1,1572 @@
+/*
+ * Copyright (C) 2006-2013 Gilles Chanteperdrix <gch@xenomai.org>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining
+ * a copy of this software and associated documentation files (the
+ * "Software"), to deal in the Software without restriction, including
+ * without limitation the rights to use, copy, modify, merge, publish,
+ * distribute, sublicense, and/or sell copies of the Software, and to
+ * permit persons to whom the Software is furnished to do so, subject to
+ * the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included
+ * in all copies or substantial portions of the Software.
+ *  
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+ * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
+ * CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
+ * TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
+ * SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+ */
+#include <ctype.h>
+#include <errno.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <stdarg.h>
+#include <limits.h>
+#include <sched.h>
+#include <signal.h>
+#include <unistd.h>
+#include <pthread.h>
+#include <semaphore.h>
+#include <setjmp.h>
+#include <getopt.h>
+#include <asm/unistd.h>
+#include <asm/xenomai/features.h>
+#include <asm/xenomai/syscall.h>
+#include <asm/xenomai/uapi/fptest.h>
+#include <cobalt/trace.h>
+#include <rtdm/testing.h>
+#include <sys/cobalt.h>
+#include <xenomai/init.h>
+
+static unsigned int nr_cpus;
+
+#define for_each_cpu(__cpu)				\
+	for (__cpu = 0; __cpu < CPU_SETSIZE; __cpu++)	\
+		if (CPU_ISSET(__cpu, &__base_setup_data.cpu_affinity))
+
+#define for_each_cpu_index(__cpu, __index)				\
+	for (__cpu = 0, __index = -1; __cpu < CPU_SETSIZE; __cpu++)	\
+		if (CPU_ISSET(__cpu, &__base_setup_data.cpu_affinity) && ++__index >= 0)
+
+#if CONFIG_SMP
+#define smp_sched_setaffinity(pid,len,mask) sched_setaffinity(pid,len,mask)
+#else /* !CONFIG_SMP */
+#define smp_sched_setaffinity(pid,len,mask) 0
+#endif /* !CONFIG_SMP */
+
+/* Thread type. */
+typedef enum {
+	SLEEPER = 0,
+	RTK  = 1,	 /* kernel-space thread. */
+	RTUP = 2,	 /* user-space real-time thread in primary mode. */
+	RTUS = 3,	 /* user-space real-time thread in secondary mode. */
+	RTUO = 4,	 /* user-space real-time thread oscillating
+			    between primary and secondary mode. */
+	SWITCHER = 8,
+	FPU_STRESS = 16,
+} threadtype;
+
+typedef enum {
+	AFP  = 1,	 /* arm the FPU task bit (only make sense for RTK) */
+	UFPP = 2,	 /* use the FPU while in primary mode. */
+	UFPS = 4	 /* use the FPU while in secondary mode. */
+} fpflags;
+
+#define TASK_SWITCH_MODES 3
+
+enum task_switch_mode {
+	TASK_SWITCH_PREVIOUS = 0,
+	TASK_SWITCH_NEXT = 1,
+	TASK_SWITCH_MODE = 2
+};
+
+struct cpu_tasks;
+
+struct task_params {
+	threadtype type;
+	fpflags fp;
+	pthread_t thread;
+	struct cpu_tasks *cpu;
+	struct rttst_swtest_task swt;
+};
+
+struct cpu_tasks {
+	unsigned int index;
+	struct task_params *tasks;
+	unsigned tasks_count;
+	unsigned capacity;
+	unsigned fd;
+	unsigned long last_switches_count;
+};
+
+static sem_t sleeper_start;
+static int quiet, status;
+static struct timespec start;
+static pthread_mutex_t headers_lock;
+static unsigned long data_lines = 21;
+static unsigned freeze_on_error;
+static int fp_features;
+static pthread_t main_tid;
+
+static inline unsigned stack_size(unsigned size)
+{
+	return size > PTHREAD_STACK_MIN ? size : PTHREAD_STACK_MIN;
+}
+
+static inline void clean_exit(int retval)
+{
+	status = retval;
+	__STD(pthread_kill(main_tid, SIGTERM));
+	for (;;)
+		/* Wait for cancellation. */
+		__STD(sem_wait(&sleeper_start));
+}
+
+static void timespec_substract(struct timespec *result,
+			const struct timespec *lhs,
+			const struct timespec *rhs)
+{
+	result->tv_sec = lhs->tv_sec - rhs->tv_sec;
+	if (lhs->tv_nsec >= rhs->tv_nsec)
+		result->tv_nsec = lhs->tv_nsec - rhs->tv_nsec;
+	else {
+		result->tv_sec -= 1;
+		result->tv_nsec = lhs->tv_nsec + (1000000000 - rhs->tv_nsec);
+	}
+}
+
+static char *task_name(char *buf, size_t sz,
+		       struct cpu_tasks *cpu, unsigned task)
+{
+	char *basename [] = {
+		[SLEEPER] = "sleeper",
+		[RTK] = "rtk",
+		[RTUP] = "rtup",
+		[RTUS] = "rtus",
+		[RTUO] = "rtuo",
+		[SWITCHER] = "switcher",
+		[FPU_STRESS] = "fpu_stress",
+	};
+	struct {
+		unsigned flag;
+		char *name;
+	} flags [] = {
+		{ .flag = AFP, .name = "fp" },
+		{ .flag = UFPP, .name = "ufpp" },
+		{ .flag = UFPS, .name = "ufps" },
+	};
+	struct task_params *param;
+	unsigned pos, i;
+
+	if (task > cpu->tasks_count)
+		return "???";
+
+	if (task == cpu->tasks_count)
+		param = &cpu->tasks[task];
+	else
+		for (param = &cpu->tasks[0]; param->swt.index != task; param++)
+			;
+
+	pos = snprintf(buf, sz, "%s", basename[param->type]);
+	for (i = 0; i < sizeof(flags) / sizeof(flags[0]); i++) {
+		if (!(param->fp & flags[i].flag))
+			continue;
+
+		pos += snprintf(&buf[pos],
+				sz - pos, "_%s", flags[i].name);
+	}
+
+#ifdef CONFIG_SMP
+	pos += snprintf(&buf[pos], sz - pos, "%u", cpu->index);
+#endif /* !CONFIG_SMP */
+
+	snprintf(&buf[pos], sz - pos, "-%u", param->swt.index);
+
+	return buf;
+}
+
+static void handle_bad_fpreg(struct cpu_tasks *cpu, unsigned fp_val)
+{
+	struct rttst_swtest_error err;
+	unsigned from, to;
+	char buffer[64];
+
+	if (freeze_on_error)
+		xntrace_user_freeze(0, 0);
+
+	ioctl(cpu->fd, RTTST_RTIOC_SWTEST_GET_LAST_ERROR, &err);
+
+	if (fp_val == ~0)
+		fp_val = err.fp_val;
+
+	from = err.last_switch.from;
+	to = err.last_switch.to;
+
+	fprintf(stderr, "Error after context switch from task %d(%s) ",
+		from, task_name(buffer, sizeof(buffer), cpu, from));
+	fprintf(stderr, "to task %d(%s),\nFPU registers were set to %u ",
+		to, task_name(buffer, sizeof(buffer), cpu, to), fp_val);
+	fp_val %= 1000;
+	if (fp_val < 500)
+		fprintf(stderr, "(maybe task %s)\n",
+			task_name(buffer, sizeof(buffer), cpu, fp_val));
+	else {
+		fp_val -= 500;
+		if (fp_val > cpu->tasks_count)
+			fprintf(stderr, "(unidentified task)\n");
+		else
+			fprintf(stderr, "(maybe task %s, having used fpu in "
+				"kernel-space)\n",
+				task_name(buffer, sizeof(buffer), cpu, fp_val));
+	}
+
+	clean_exit(EXIT_FAILURE);
+}
+
+static void display_cleanup(void *cookie)
+{
+	pthread_mutex_t *mutex = (pthread_mutex_t *) cookie;
+	__STD(pthread_mutex_unlock(mutex));
+}
+
+static void display_switches_count(struct cpu_tasks *cpu, struct timespec *now)
+{
+	static unsigned nlines = 0;
+	__u32 switches_count;
+
+	if (ioctl(cpu->fd,
+		  RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT,&switches_count)) {
+		perror("sleeper: ioctl(RTTST_RTIOC_SWTEST_GET_SWITCHES_COUNT)");
+		clean_exit(EXIT_FAILURE);
+	}
+
+	if (switches_count &&
+	    switches_count == cpu->last_switches_count) {
+		fprintf(stderr, "No context switches during one second, "
+			"aborting.\n");
+		clean_exit(EXIT_FAILURE);
+	}
+
+	if (quiet)
+		return;
+
+	pthread_setcanceltype(PTHREAD_CANCEL_DEFERRED, NULL);
+	pthread_cleanup_push(display_cleanup, &headers_lock);
+	__STD(pthread_mutex_lock(&headers_lock));
+
+	if (data_lines && (nlines++ % data_lines) == 0) {
+		struct timespec diff;
+		long dt;
+
+		timespec_substract(&diff, now, &start);
+		dt = diff.tv_sec;
+
+		printf("RTT|  %.2ld:%.2ld:%.2ld\n",
+		       dt / 3600, (dt / 60) % 60, dt % 60);
+#ifdef CONFIG_SMP
+		printf("RTH|%12s|%12s|%12s\n",
+		       "---------cpu","ctx switches","-------total");
+#else /* !CONFIG_SMP */
+		printf("RTH|%12s|%12s\n", "ctx switches","-------total");
+#endif /* !CONFIG_SMP */
+	}
+
+#ifdef CONFIG_SMP
+	printf("RTD|%12u|%12lu|%12u\n", cpu->index,
+	       switches_count - cpu->last_switches_count, switches_count);
+#else /* !CONFIG_SMP */
+	printf("RTD|%12lu|%12u\n",
+	       switches_count - cpu->last_switches_count, switches_count);
+#endif /* !CONFIG_SMP */
+
+	pthread_cleanup_pop(1);
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
+
+	cpu->last_switches_count = switches_count;
+}
+
+static int printout(const char *fmt, ...)
+{
+	va_list ap;
+
+	va_start(ap, fmt);
+	
+	if (quiet < 2)
+		vprintf(fmt, ap);
+
+	va_end(ap);
+
+	return 0;
+}
+
+#define check_fp_result(__expected)	\
+	fp_regs_check(fp_features, __expected, printout)
+
+static void _assert_primary_mode(char const *calling_func)
+{
+	if (cobalt_thread_mode() & (XNRELAX|XNWEAK)) {
+		fprintf(stderr,
+			"Switch to primary mode failed in %s.",
+			calling_func);
+		clean_exit(EXIT_FAILURE);
+	}
+}
+
+#define assert_primary_mode() _assert_primary_mode(__func__)
+
+static void _assert_secondary_mode(char const *calling_func)
+{
+	if (!(cobalt_thread_mode() & XNRELAX)) {
+		fprintf(stderr,
+			"Switch to secondary mode failed in %s.",
+			calling_func);
+		clean_exit(EXIT_FAILURE);
+	}
+}
+
+#define assert_secondary_mode() _assert_secondary_mode(__func__)
+
+static void switch_to_primary_mode(void)
+{
+	cobalt_thread_harden();
+	assert_primary_mode();
+}
+
+static void switch_to_secondary_mode(void)
+{
+	cobalt_thread_relax();
+	assert_secondary_mode();
+}
+
+static void switch_to_secondary_mode_by_using_linux_syscall(void)
+{
+	syscall(__NR_gettid);
+	assert_secondary_mode();
+}
+
+#define SWITCH_FUNC_COUNT 4
+
+static void (*switch_funcs[SWITCH_FUNC_COUNT]) (void) = {
+	switch_to_secondary_mode,
+	switch_to_primary_mode,
+	switch_to_secondary_mode_by_using_linux_syscall,
+	switch_to_primary_mode,
+};
+
+/* Switch two times: primary to secondary and back from secondary to primary */
+#define MODE_SWITCHES_KERNEL 2
+
+static void *sleeper_switcher(void *cookie)
+{
+	struct task_params *param = (struct task_params *) cookie;
+	unsigned to, tasks_count = param->cpu->tasks_count;
+	struct timespec ts, last;
+	int fd = param->cpu->fd;
+	struct rttst_swtest_dir rtsw;
+	cpu_set_t cpu_set;
+	unsigned i = 1;		/* Start at 1 to avoid returning to a
+				   non-existing task. */
+	int ret;
+
+	CPU_ZERO(&cpu_set);
+	CPU_SET(param->cpu->index, &cpu_set);
+	if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
+		perror("sleeper: sched_setaffinity");
+		clean_exit(EXIT_FAILURE);
+	}
+
+	rtsw.switch_mode = 0;
+	rtsw.from = param->swt.index;
+	to = param->swt.index;
+
+	ts.tv_sec = 0;
+	ts.tv_nsec = 1000000;
+
+	ret = __STD(sem_wait(&sleeper_start));
+	if (ret) {
+		fprintf(stderr, "sem_wait FAILED (%d)\n", errno);
+		fflush(stderr);
+		exit(77);
+	}
+
+	clock_gettime(CLOCK_REALTIME, &last);
+
+	/* ioctl is not a cancellation point, but we want cancellation to be
+	   allowed  when suspended in ioctl. */
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
+
+	for (;;) {
+		struct timespec now, diff;
+		unsigned expected, fp_val;
+		int err;
+		if (param->type == SLEEPER)
+			__STD(nanosleep(&ts, NULL));
+
+		clock_gettime(CLOCK_REALTIME, &now);
+
+		timespec_substract(&diff, &now, &last);
+		if (diff.tv_sec >= 1) {
+			last = now;
+
+			display_switches_count(param->cpu, &now);
+		}
+
+		if (tasks_count == 1)
+			continue;
+
+		switch (i % TASK_SWITCH_MODES) {
+		case TASK_SWITCH_PREVIOUS:
+			/* to == from means "return to last task" */
+			rtsw.to = rtsw.from;
+			break;
+
+		case TASK_SWITCH_NEXT:
+			if (++to == rtsw.from)
+				++to;
+			if (to > tasks_count - 1)
+				to = 0;
+			if (to == rtsw.from)
+				++to;
+			rtsw.to = to;
+
+			/* If i % 3 == 2, repeat the same switch. */
+		}
+
+		expected = rtsw.from + i * 1000;
+		if (param->fp & UFPS)
+			fp_regs_set(fp_features, expected);
+		err = ioctl(fd, RTTST_RTIOC_SWTEST_SWITCH_TO, &rtsw);
+		while (err == -1 && errno == EINTR)
+			err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
+
+		switch (err) {
+		case 0:
+			break;
+		case 1:
+			handle_bad_fpreg(param->cpu, ~0);
+		case -1:
+			clean_exit(EXIT_FAILURE);
+		}
+		if (param->fp & UFPS) {
+			fp_val = check_fp_result(expected);
+			if (fp_val != expected)
+				handle_bad_fpreg(param->cpu, fp_val);
+		}
+
+		if(++i == 4000000)
+			i = 0;
+	}
+
+	return NULL;
+}
+
+
+static double dot(volatile double *a, volatile double *b, int n)
+{
+    int k = n - 1;
+    double s = 0.0;
+    for(; k >= 0; k--)
+	s = s + a[k]*b[k];
+
+    return s;
+}
+
+static void *fpu_stress(void *cookie)
+{
+	static volatile double a[10000], b[sizeof(a)/sizeof(a[0])];
+	struct task_params *param = (struct task_params *) cookie;
+	cpu_set_t cpu_set;
+	unsigned i;
+
+	CPU_ZERO(&cpu_set);
+	CPU_SET(param->cpu->index, &cpu_set);
+	if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
+		perror("sleeper: sched_setaffinity");
+		clean_exit(EXIT_FAILURE);
+	}
+
+	for (i = 0; i < sizeof(a)/sizeof(a[0]); i++)
+		a[i] = b[i] = 3.14;
+
+	for (;;) {
+		double s = dot(a, b, sizeof(a)/sizeof(a[0]));
+		if ((unsigned) (s + 0.5) != 98596) {
+			fprintf(stderr, "fpu stress task failure! dot: %g\n", s);
+			clean_exit(EXIT_FAILURE);
+		}
+		pthread_testcancel();
+	}
+
+	return NULL;
+}
+
+static void *rtup(void *cookie)
+{
+	struct task_params *param = (struct task_params *) cookie;
+	unsigned to, tasks_count = param->cpu->tasks_count;
+	int err, fd = param->cpu->fd;
+	struct rttst_swtest_dir rtsw;
+	cpu_set_t cpu_set;
+	unsigned i = 0;
+
+	CPU_ZERO(&cpu_set);
+	CPU_SET(param->cpu->index, &cpu_set);
+	if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
+		perror("rtup: sched_setaffinity");
+		clean_exit(EXIT_FAILURE);
+	}
+
+	rtsw.switch_mode = 0;
+	rtsw.from = param->swt.index;
+	to = param->swt.index;
+
+	/* ioctl is not a cancellation point, but we want cancellation to be
+	   allowed when suspended in ioctl. */
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
+
+	switch_to_primary_mode();
+
+	do {
+		err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
+	} while (err == -1 && errno == EINTR);
+
+	if (err == -1)
+		return NULL;
+
+	for (;;) {
+		unsigned expected, fp_val;
+
+		switch (i % TASK_SWITCH_MODES) {
+		case TASK_SWITCH_PREVIOUS:
+			/* to == from means "return to last task" */
+			rtsw.to = rtsw.from;
+			break;
+
+		case TASK_SWITCH_NEXT:
+			if (++to == rtsw.from)
+				++to;
+			if (to > tasks_count - 1)
+				to = 0;
+			if (to == rtsw.from)
+				++to;
+			rtsw.to = to;
+
+			/* If i % 3 == 2, repeat the same switch. */
+		}
+
+		expected = rtsw.from + i * 1000;
+		if (param->fp & UFPP)
+			fp_regs_set(fp_features, expected);
+		err = ioctl(fd, RTTST_RTIOC_SWTEST_SWITCH_TO, &rtsw);
+		while (err == -1 && errno == EINTR)
+			err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
+
+		switch (err) {
+		case 0:
+			break;
+		case 1:
+			handle_bad_fpreg(param->cpu, ~0);
+		case -1:
+			clean_exit(EXIT_FAILURE);
+		}
+		if (param->fp & UFPP) {
+			fp_val = check_fp_result(expected);
+			if (fp_val != expected)
+				handle_bad_fpreg(param->cpu, fp_val);
+		}
+
+		if(++i == 4000000)
+			i = 0;
+	}
+
+	return NULL;
+}
+
+static void *rtus(void *cookie)
+{
+	struct task_params *param = (struct task_params *) cookie;
+	unsigned to, tasks_count = param->cpu->tasks_count;
+	int err, fd = param->cpu->fd;
+	struct rttst_swtest_dir rtsw;
+	cpu_set_t cpu_set;
+	unsigned i = 0;
+
+	CPU_ZERO(&cpu_set);
+	CPU_SET(param->cpu->index, &cpu_set);
+	if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
+		perror("rtus: sched_setaffinity");
+		clean_exit(EXIT_FAILURE);
+	}
+
+	rtsw.switch_mode = 0;
+	rtsw.from = param->swt.index;
+	to = param->swt.index;
+
+	/* ioctl is not a cancellation point, but we want cancellation to be
+	   allowed when suspended in ioctl. */
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
+
+	switch_to_secondary_mode();
+
+	do {
+		err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
+	} while (err == -1 && errno == EINTR);
+
+	if (err == -1)
+		return NULL;
+
+	for (;;) {
+		unsigned expected, fp_val;
+
+		switch (i % TASK_SWITCH_MODES) {
+		case TASK_SWITCH_PREVIOUS:
+			/* to == from means "return to last task" */
+			rtsw.to = rtsw.from;
+			break;
+
+		case TASK_SWITCH_NEXT:
+			if (++to == rtsw.from)
+				++to;
+			if (to > tasks_count - 1)
+				to = 0;
+			if (to == rtsw.from)
+				++to;
+			rtsw.to = to;
+
+			/* If i % 3 == 2, repeat the same switch. */
+		}
+
+		expected = rtsw.from + i * 1000;
+		if (param->fp & UFPS)
+			fp_regs_set(fp_features, expected);
+		err = ioctl(fd, RTTST_RTIOC_SWTEST_SWITCH_TO, &rtsw);
+		while (err == -1 && errno == EINTR)
+			err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
+
+		switch (err) {
+		case 0:
+			break;
+		case 1:
+			handle_bad_fpreg(param->cpu, ~0);
+		case -1:
+			clean_exit(EXIT_FAILURE);
+		}
+		if (param->fp & UFPS) {
+			fp_val = check_fp_result(expected);
+			if (fp_val != expected)
+				handle_bad_fpreg(param->cpu, fp_val);
+		}
+
+		if(++i == 4000000)
+			i = 0;
+	}
+
+	return NULL;
+}
+
+static void *rtuo(void *cookie)
+{
+	struct task_params *param = (struct task_params *) cookie;
+	unsigned mode, to, tasks_count = param->cpu->tasks_count;
+	int err, fd = param->cpu->fd;
+	struct rttst_swtest_dir rtsw;
+	cpu_set_t cpu_set;
+	unsigned i = 0;
+
+	CPU_ZERO(&cpu_set);
+	CPU_SET(param->cpu->index, &cpu_set);
+	if (smp_sched_setaffinity(0, sizeof(cpu_set), &cpu_set)) {
+		perror("rtuo: sched_setaffinity");
+		clean_exit(EXIT_FAILURE);
+	}
+
+	rtsw.switch_mode = 0;
+	rtsw.from = param->swt.index;
+	to = param->swt.index;
+
+	/* ioctl is not a cancellation point, but we want cancellation to be
+	   allowed when suspended in ioctl. */
+	pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, NULL);
+
+	switch_to_primary_mode();
+	mode = COBALT_PRIMARY;
+
+	do {
+		err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
+	} while (err == -1 && errno == EINTR);
+
+	if (err == -1)
+		return NULL;
+
+	for (;;) {
+		unsigned expected, fp_val;
+
+		switch (i % TASK_SWITCH_MODES) {
+		case TASK_SWITCH_PREVIOUS:
+			/* to == from means "return to last task" */
+			rtsw.to = rtsw.from;
+			break;
+
+		case TASK_SWITCH_NEXT:
+			if (++to == rtsw.from)
+				++to;
+			if (to > tasks_count - 1)
+				to = 0;
+			if (to == rtsw.from)
+				++to;
+			rtsw.to = to;
+
+			/* If i % 3 == 2, repeat the same switch. */
+		}
+
+		expected = rtsw.from + i * 1000;
+		if ((mode == COBALT_PRIMARY && param->fp & UFPP) ||
+		    (mode == COBALT_SECONDARY && param->fp & UFPS)) {
+			fp_regs_set(fp_features, expected);
+		}
+
+		err = ioctl(fd, RTTST_RTIOC_SWTEST_SWITCH_TO, &rtsw);
+		while (err == -1 && errno == EINTR)
+			err = ioctl(fd, RTTST_RTIOC_SWTEST_PEND, &param->swt);
+
+		/* Return to default: do not switch in syscall */
+		rtsw.switch_mode = 0;
+
+		switch (err) {
+		case 0:
+			break;
+		case 1:
+			handle_bad_fpreg(param->cpu, ~0);
+		case -1:
+			clean_exit(EXIT_FAILURE);
+		}
+
+		if ((mode == COBALT_PRIMARY && param->fp & UFPP) ||
+		    (mode == COBALT_SECONDARY && param->fp & UFPS)) {
+			fp_val = check_fp_result(expected);
+			if (fp_val != expected)
+				handle_bad_fpreg(param->cpu, fp_val);
+		}
+
+		/* Switch between primary and secondary mode */
+		if (i % TASK_SWITCH_MODES == TASK_SWITCH_MODE) {
+			uint switch_iteration = (i / TASK_SWITCH_MODES %
+				(SWITCH_FUNC_COUNT + MODE_SWITCHES_KERNEL));
+
+			if (switch_iteration < SWITCH_FUNC_COUNT) {
+				switch_funcs[switch_iteration]();
+			} else {
+				/* Switch mode on next
+				 * RTTST_RTIOC_SWTEST_SWITCH_TO syscall */
+				rtsw.switch_mode = 1;
+			}
+
+			mode = !mode;
+		}
+
+		if(++i == 4000000)
+			i = 0;
+	}
+
+	return NULL;
+}
+
+static int parse_arg(struct task_params *param,
+		     const char *text,
+		     struct cpu_tasks *cpus)
+{
+	struct t2f {
+		const char *text;
+		unsigned flag;
+	};
+
+	static struct t2f type2flags [] = {
+		{ "rtk",  RTK  },
+		{ "rtup", RTUP },
+		{ "rtus", RTUS },
+		{ "rtuo", RTUO }
+	};
+
+	static struct t2f fp2flags [] = {
+		{ "_fp",   AFP	 },
+		{ "_ufpp", UFPP },
+		{ "_ufps", UFPS }
+	};
+
+	unsigned long cpu;
+	char *cpu_end;
+	unsigned i;
+	int n;
+
+	param->type = param->fp = 0;
+	param->cpu = &cpus[0];
+
+	for(i = 0; i < sizeof(type2flags)/sizeof(struct t2f); i++) {
+		size_t len = strlen(type2flags[i].text);
+
+		if(!strncmp(text, type2flags[i].text, len)) {
+			param->type = type2flags[i].flag;
+			text += len;
+			goto fpflags;
+		}
+	}
+
+	return -1;
+
+  fpflags:
+	if (*text == '\0')
+		return 0;
+
+	if (isdigit(*text))
+		goto cpu_nr;
+
+	for(i = 0; i < sizeof(fp2flags)/sizeof(struct t2f); i++) {
+		size_t len = strlen(fp2flags[i].text);
+
+		if(!strncmp(text, fp2flags[i].text, len)) {
+			param->fp |= fp2flags[i].flag;
+			text += len;
+
+			goto fpflags;
+		}
+	}
+
+	return -1;
+
+  cpu_nr:
+	cpu = strtoul(text, &cpu_end, 0);
+
+	if (*cpu_end != '\0' || (cpu == ULONG_MAX && errno))
+		return -1;
+
+	param->cpu = &cpus[nr_cpus]; /* Invalid at first. */
+	for_each_cpu_index(i, n)
+		if (i == cpu) {
+			param->cpu = &cpus[n];
+			break;
+		}
+	
+	return 0;
+}
+
+static int check_arg(const struct task_params *param, struct cpu_tasks *end_cpu)
+{
+	if (param->cpu > end_cpu - 1)
+		return 0;
+
+	switch (param->type) {
+	case SLEEPER:
+	case SWITCHER:
+	case FPU_STRESS:
+		break;
+
+	case RTK:
+		if (param->fp & UFPS)
+			return 0;
+		break;
+
+	case RTUP:
+		if (param->fp & (AFP|UFPS))
+			return 0;
+		break;
+
+	case RTUS:
+		if (param->fp & (AFP|UFPP))
+			return 0;
+		break;
+
+	case RTUO:
+		if (param->fp & AFP)
+			return 0;
+		break;
+	default:
+		return 0;
+	}
+
+	return 1;
+}
+
+static int task_create(struct cpu_tasks *cpu,
+		       struct task_params *param,
+		       pthread_attr_t *rt_attr)
+{
+	char buffer[64];
+	typedef void *thread_routine(void *);
+	thread_routine *task_routine [] = {
+		[RTUP] = &rtup,
+		[RTUS] = &rtus,
+		[RTUO] = &rtuo
+	};
+	int err;
+
+	switch(param->type) {
+	case RTK:
+		param->swt.flags = (param->fp & AFP ? RTTST_SWTEST_FPU : 0)
+			| (param->fp & UFPP ? RTTST_SWTEST_USE_FPU : 0)
+			| (freeze_on_error ? RTTST_SWTEST_FREEZE : 0);
+
+		err=ioctl(cpu->fd,RTTST_RTIOC_SWTEST_CREATE_KTASK,&param->swt);
+		if (err) {
+			perror("ioctl(RTTST_RTIOC_SWTEST_CREATE_KTASK)");
+			return -1;
+		}
+		break;
+
+	case RTUP:
+	case RTUS:
+	case RTUO:
+	case SLEEPER:
+	case SWITCHER:
+		param->swt.flags = 0;
+
+		err=ioctl(cpu->fd,RTTST_RTIOC_SWTEST_REGISTER_UTASK,&param->swt);
+		if (err) {
+			perror("ioctl(RTTST_RTIOC_SWTEST_REGISTER_UTASK)");
+			return -1;
+		}
+		break;
+
+	case FPU_STRESS:
+		break;
+
+	default:
+		fprintf(stderr, "Invalid task type %d. Aborting\n", param->type);
+		return EINVAL;
+	}
+
+	if (param->type == RTK)
+		return 0;
+
+	if (param->type == SLEEPER || param->type == SWITCHER) {
+		pthread_attr_t attr;
+
+		pthread_attr_init(&attr);
+		pthread_attr_setstacksize(&attr, stack_size(32768));
+
+		err = __STD(pthread_create(&param->thread,
+					   &attr,
+					   sleeper_switcher,
+					   param));
+
+		pthread_attr_destroy(&attr);
+
+		if (err)
+			fprintf(stderr,"pthread_create: %s\n",strerror(err));
+
+
+		return err;
+	}
+
+	if (param->type == FPU_STRESS) {
+		pthread_attr_t attr;
+
+		pthread_attr_init(&attr);
+		pthread_attr_setstacksize(&attr, stack_size(65536));
+
+		err = __STD(pthread_create(&param->thread,
+					   &attr,
+					   fpu_stress,
+					   param));
+
+		pthread_attr_destroy(&attr);
+
+		if (err)
+			fprintf(stderr,"pthread_create: %s\n",strerror(err));
+
+
+		return err;
+	}
+
+	err = pthread_create(&param->thread, rt_attr,
+			     task_routine[param->type], param);
+	if (err) {
+		fprintf(stderr, "pthread_create: %s\n", strerror(err));
+		return err;
+	}
+
+	err = pthread_setname_np(param->thread,
+				 task_name(buffer, sizeof(buffer),
+					   param->cpu,param->swt.index));
+
+	if (err)
+		fprintf(stderr,"pthread_setname_np: %s\n", strerror(err));
+
+	return err;
+}
+
+static int open_rttest(char *buf, size_t size, unsigned count)
+{
+	int fd, ret;
+
+	fd = open("/dev/rtdm/switchtest", O_RDWR);
+	if (fd < 0) {
+		fprintf(stderr, "switchtest: cannot open /dev/rtdm/switchtest\n"
+			"(modprobe xeno_switchtest?)\n");
+		return -1;
+	}
+
+	ret = ioctl(fd, RTTST_RTIOC_SWTEST_SET_TASKS_COUNT, count);
+	if (ret) {
+		fprintf(stderr, "switchtest: ioctl: %m\n");
+		return -1;
+	}
+
+	return fd;
+}
+
+const char *all_nofp [] = {
+	"rtk",
+	"rtk",
+	"rtup",
+	"rtup",
+	"rtus",
+	"rtus",
+	"rtuo",
+	"rtuo",
+};
+
+const char *all_fp [] = {
+	"rtk",
+	"rtk",
+	"rtk_fp",
+	"rtk_fp",
+	"rtk_fp_ufpp",
+	"rtk_fp_ufpp",
+	"rtup",
+	"rtup",
+	"rtup_ufpp",
+	"rtup_ufpp",
+	"rtus",
+	"rtus",
+	"rtus_ufps",
+	"rtus_ufps",
+	"rtuo",
+	"rtuo",
+	"rtuo_ufpp",
+	"rtuo_ufpp",
+	"rtuo_ufps",
+	"rtuo_ufps",
+	"rtuo_ufpp_ufps",
+	"rtuo_ufpp_ufps"
+};
+
+static unsigned long xatoul(const char *str)
+{
+	unsigned long result;
+	char *endptr;
+
+	result = strtoul(str, &endptr, 0);
+
+	if (result == ULONG_MAX && errno == ERANGE) {
+		fprintf(stderr, "Overflow while parsing %s\n", str);
+		exit(EXIT_FAILURE);
+	}
+
+	if (*endptr != '\0') {
+		fprintf(stderr, "Error while parsing \"%s\" as a number\n", str);
+		exit(EXIT_FAILURE);
+	}
+
+	return result;
+}
+
+static void usage(FILE *fd, const char *progname)
+{
+	unsigned i, j;
+
+	fprintf(fd,
+		"Usage:\n"
+		"%s [options] threadspec threadspec...\n"
+		"Create threads of various types and attempt to switch context "
+		"between these\nthreads, printing the count of context switches "
+		"every second.\n\n"
+		"Available options are:\n"
+		"--help or -h, cause this program to print this help string and "
+		"exit;\n"
+		"--lines <lines> or -l <lines> print headers every <lines> "
+		"lines.\n"
+		"--quiet or -q, prevent this program from printing every "
+		"second the count of\ncontext switches;\n"
+		"--really-quiet or -Q, prevent this program from printing any output;\n"
+		"--timeout <duration> or -T <duration>, limit the test duration "
+		"to <duration>\nseconds;\n"
+		"--nofpu or -n, disables any use of FPU instructions.\n"
+		"--stress <period> or -s <period> enable a stress mode where:\n"
+		"  context switches occur every <period> us;\n"
+		"  a background task uses fpu (and check) fpu all the time.\n"
+		"--freeze trace upon error.\n\n"
+		"Each 'threadspec' specifies the characteristics of a "
+		"thread to be created:\n"
+		"threadspec = (rtk|rtup|rtus|rtuo)(_fp|_ufpp|_ufps)*[0-9]*\n"
+		"rtk for a kernel-space real-time thread;\n"
+		"rtup for a user-space real-time thread running in primary"
+		" mode,\n"
+		"rtus for a user-space real-time thread running in secondary"
+		" mode,\n"
+		"rtuo for a user-space real-time thread oscillating between"
+		" primary and\nsecondary mode,\n\n"
+		"_fp means that the created thread will have the XNFPU bit"
+		" armed (only valid for\nrtk),\n"
+		"_ufpp means that the created thread will use the FPU when in "
+		"primary mode\n(invalid for rtus),\n"
+		"_ufps means that the created thread will use the FPU when in "
+		"secondary mode\n(invalid for rtk and rtup),\n\n"
+		"[0-9]* specifies the ID of the CPU where the created thread "
+		"will run, 0 if\nunspecified.\n\n"
+		"Passing no 'threadspec' is equivalent to running:\n%s",
+		progname, progname);
+
+	for_each_cpu(i) {
+		for (j = 0; j < sizeof(all_fp)/sizeof(char *); j++)
+			fprintf(fd, " %s%d", all_fp[j], i);
+	}
+
+	fprintf(fd,
+		"\n\nPassing only the --nofpu or -n argument is equivalent to "
+		"running:\n%s", progname);
+
+	for_each_cpu(i) {
+		for (j = 0; j < sizeof(all_nofp)/sizeof(char *); j++)
+			fprintf(fd, " %s%d", all_nofp[j], i);
+	}
+	fprintf(fd, "\n\n");
+}
+
+void application_usage(void)
+{
+	usage(stdout, get_program_name());
+}
+
+static sigjmp_buf jump;
+
+static void illegal_instruction(int sig)
+{
+	signal(sig, SIG_DFL);
+	siglongjmp(jump, 1);
+}
+
+/* We run the FPU check in a thread to avoid clobbering the main thread FPU
+   backup area. This is important on x86, where this results on all RT threads
+   FPU backup areas to be clobbered, and thus their FPU context being switched
+   systematically (and the case where FPU has never been used not to be tested). */
+static void *check_fpu_thread(void *cookie)
+{
+	int check;
+
+	/* Check if fp routines are dummy or if hw fpu is not supported. */
+	if (quiet < 2)
+		fprintf(stderr, "== Testing FPU check routines...\n");
+	if(sigsetjmp(jump, 1)) {
+		if (quiet < 2)
+			fprintf(stderr,
+			"== Hardware FPU not available on your board"
+			" or not enabled in Linux kernel\n== configuration:"
+			" skipping FPU switches tests.\n");
+		return NULL;
+	}
+	signal(SIGILL, illegal_instruction);
+	fp_regs_set(fp_features, 1);
+	check = check_fp_result(2);
+	signal(SIGILL, SIG_DFL);
+	if (check != 1) {
+		if (quiet < 2)
+			fprintf(stderr,
+				"== FPU check routines: unimplemented, "
+				"skipping FPU switches tests.\n");
+		return NULL;
+	}
+
+	if (quiet < 2)
+		fprintf(stderr, "== FPU check routines: OK.\n");
+
+	return (void *) 1;
+}
+
+static int check_fpu(void)
+{
+	pthread_t tid;
+	void *status;
+	int err;
+
+	err = __STD(pthread_create(&tid, NULL, check_fpu_thread, NULL));
+	if (err) {
+		fprintf(stderr, "pthread_create: %s\n", strerror(err));
+		exit(EXIT_FAILURE);
+	}
+
+	err = pthread_join(tid, &status);
+	if (err) {
+		fprintf(stderr, "pthread_join: %s\n", strerror(err));
+		exit(EXIT_FAILURE);
+	}
+
+	return (long) status;
+}
+
+int main(int argc, const char *argv[])
+{
+	unsigned i, j, n, use_fp = 1, stress = 0;
+	pthread_attr_t rt_attr;
+	const char *progname = argv[0];
+	struct cpu_tasks *cpus;
+	struct sched_param sp;
+	char devname[RTDM_MAX_DEVNAME_LEN+1];
+	sigset_t mask;
+	int sig;
+
+	status = EXIT_SUCCESS;
+	main_tid = pthread_self();
+
+	/* Initializations. */
+	if (__STD(sem_init(&sleeper_start, 0, 0))) {
+		perror("sem_init");
+		exit(EXIT_FAILURE);
+	}
+
+#if CONFIG_SMP
+	nr_cpus = CPU_COUNT(&__base_setup_data.cpu_affinity);
+	if (nr_cpus == 0) {
+		nr_cpus = sysconf(_SC_NPROCESSORS_ONLN);
+		if (nr_cpus == -1) {
+			fprintf(stderr,
+				"Error %d while getting the number of cpus (%s)\n",
+				errno,
+				strerror(errno));
+			exit(EXIT_FAILURE);
+		}
+		for (i = 0; i < nr_cpus; i++)
+			CPU_SET(i, &__base_setup_data.cpu_affinity);
+	}
+#else /* !CONFIG_SMP */
+	nr_cpus = 1;
+	CPU_SET(0, &__base_setup_data.cpu_affinity);
+#endif /* !CONFIG_SMP */
+
+	fp_features = cobalt_fp_detect();
+
+	/* Parse command line options. */
+	opterr = 0;
+	for (;;) {
+		static struct option long_options[] = {
+			{ "freeze",  0, NULL, 'f' },
+			{ "help",    0, NULL, 'h' },
+			{ "lines",   1, NULL, 'l' },
+			{ "nofpu",   0, NULL, 'n' },
+			{ "quiet",   0, NULL, 'q' },
+			{ "really-quiet", 0, NULL, 'Q' },
+			{ "stress",  1, NULL, 's' },
+			{ "timeout", 1, NULL, 'T' },
+			{ NULL,      0, NULL, 0   }
+		};
+		int i = 0;
+		int c = getopt_long(argc, (char *const *) argv, "fhl:nqQs:T:",
+				    long_options, &i);
+
+		if (c == -1)
+			break;
+
+		switch(c) {
+		case 'f':
+			freeze_on_error = 1;
+			break;
+
+		case 'h':
+			usage(stdout, progname);
+			exit(EXIT_SUCCESS);
+
+		case 'l':
+			data_lines = xatoul(optarg);
+			break;
+
+		case 'n':
+			use_fp = 0;
+			break;
+
+		case 'q':
+			quiet = 1;
+			break;
+
+		case 'Q':
+			quiet = 2;
+			break;
+
+		case 's':
+			stress = xatoul(optarg);
+			break;
+
+		case 'T':
+			alarm(xatoul(optarg));
+			break;
+
+		case '?':
+			usage(stderr, progname);
+			fprintf(stderr, "%s: Invalid option.\n", argv[optind-1]);
+			exit(EXIT_FAILURE);
+
+		case ':':
+			usage(stderr, progname);
+			fprintf(stderr, "Missing argument of option %s.\n",
+				argv[optind-1]);
+			exit(EXIT_FAILURE);
+		}
+	}
+
+	if (setvbuf(stdout, NULL, _IOLBF, 0)) {
+		perror("setvbuf");
+		exit(EXIT_FAILURE);
+	}
+
+	/* If no argument was passed (or only -n), replace argc and argv with
+	   default values, given by all_fp or all_nofp depending on the presence
+	   of the -n flag. */
+	if (optind == argc) {
+		const char **all;
+		char buffer[32];
+		unsigned count;
+
+		if (use_fp)
+			use_fp = check_fpu();
+
+		if (use_fp) {
+			all = all_fp;
+			count = sizeof(all_fp)/sizeof(char *);
+		} else {
+			all = all_nofp;
+			count = sizeof(all_nofp)/sizeof(char *);
+		}
+
+		argc = count * nr_cpus + 1;
+		argv = (const char **) malloc(argc * sizeof(char *));
+		argv[0] = progname;
+		for_each_cpu_index(i, n) {
+			for (j = 0; j < count; j++) {
+				snprintf(buffer,
+					 sizeof(buffer),
+					 "%s%d",
+					 all[j],
+					 i);
+				argv[n * count + j + 1] = strdup(buffer);
+			}
+		}
+
+		optind = 1;
+	}
+
+	cpus = malloc(sizeof(*cpus) * nr_cpus);
+	if (!cpus) {
+		perror("malloc");
+		exit(EXIT_FAILURE);
+	}
+
+	for_each_cpu_index(i, n) {
+		size_t size;
+		cpus[n].fd = -1;
+		cpus[n].index = i;
+		cpus[n].capacity = 2;
+		size = cpus[n].capacity * sizeof(struct task_params);
+		cpus[n].tasks_count = 1;
+		cpus[n].tasks = (struct task_params *) malloc(size);
+		cpus[n].last_switches_count = 0;
+
+		if (!cpus[n].tasks) {
+			perror("malloc");
+			exit(EXIT_FAILURE);
+		}
+
+		cpus[n].tasks[0].type = stress ? SWITCHER : SLEEPER;
+		cpus[n].tasks[0].fp = use_fp ? UFPS : 0;
+		cpus[n].tasks[0].cpu = &cpus[n];
+		cpus[n].tasks[0].thread = 0;
+		cpus[n].tasks[0].swt.index = cpus[n].tasks[0].swt.flags = 0;
+	}
+
+	/* Parse arguments and build data structures. */
+	for(i = optind; i < argc; i++) {
+		struct task_params params;
+		struct cpu_tasks *cpu;
+
+		if(parse_arg(&params, argv[i], cpus)) {
+			usage(stderr, progname);
+			fprintf(stderr, "Unable to parse %s as a thread type. "
+				"Aborting.\n", argv[i]);
+			exit(EXIT_FAILURE);
+		}
+
+		if (!check_arg(&params, &cpus[nr_cpus])) {
+			usage(stderr, progname);
+			fprintf(stderr,
+				"Invalid parameters %s. Aborting\n",
+				argv[i]);
+			exit(EXIT_FAILURE);
+		}
+
+		if (!use_fp && params.fp) {
+			usage(stderr, progname);
+			fprintf(stderr,
+				"%s is invalid because FPU is disabled"
+				" (option -n passed).\n", argv[i]);
+			exit(EXIT_FAILURE);
+		}
+
+		cpu = params.cpu;
+		if(++cpu->tasks_count > cpu->capacity) {
+			size_t size;
+			cpu->capacity += cpu->capacity / 2;
+			size = cpu->capacity * sizeof(struct task_params);
+			cpu->tasks =
+				(struct task_params *) realloc(cpu->tasks, size);
+			if (!cpu->tasks) {
+				perror("realloc");
+				exit(EXIT_FAILURE);
+			}
+		}
+
+		params.thread = 0;
+		params.swt.index = params.swt.flags = 0;
+		cpu->tasks[cpu->tasks_count - 1] = params;
+	}
+
+	if (stress)
+		for_each_cpu_index(i, n) {
+			struct task_params params;
+			struct cpu_tasks *cpu = &cpus[n];
+
+			if (cpu->tasks_count + 1 > cpu->capacity) {
+				size_t size;
+				cpu->capacity += cpu->capacity / 2;
+				size = cpu->capacity * sizeof(struct task_params);
+				cpu->tasks = realloc(cpu->tasks, size);
+				if (!cpu->tasks) {
+					perror("realloc");
+					exit(EXIT_FAILURE);
+				}
+			}
+
+			params.type = FPU_STRESS;
+			params.fp = UFPS;
+			params.cpu = cpu;
+			params.thread = 0;
+			params.swt.index = cpu->tasks_count;
+			params.swt.flags = 0;
+			cpu->tasks[cpu->tasks_count] = params;
+		}
+
+	/* For best compatibility with both LinuxThreads and NPTL, block the
+	   termination signals on all threads. */
+	sigemptyset(&mask);
+	sigaddset(&mask, SIGINT);
+	sigaddset(&mask, SIGTERM);
+	sigaddset(&mask, SIGALRM);
+	pthread_sigmask(SIG_BLOCK, &mask, NULL);
+
+	__STD(pthread_mutex_init(&headers_lock, NULL));
+
+	/* Prepare attributes for real-time tasks. */
+	pthread_attr_init(&rt_attr);
+	pthread_attr_setinheritsched(&rt_attr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&rt_attr, SCHED_FIFO);
+	sp.sched_priority = 1;
+	pthread_attr_setschedparam(&rt_attr, &sp);
+
+	if (quiet < 2)
+		printf("== Threads:");
+
+	/* Create and register all tasks. */
+	for_each_cpu_index(i, n) {
+		struct cpu_tasks *cpu = &cpus[n];
+		char buffer[64];
+
+		cpu->fd = open_rttest(devname,sizeof(devname),cpu->tasks_count);
+
+		if (cpu->fd == -1)
+			goto failure;
+
+		if (ioctl(cpu->fd, RTTST_RTIOC_SWTEST_SET_CPU, i)) {
+			perror("ioctl(RTTST_RTIOC_SWTEST_SET_CPU)");
+			goto failure;
+		}
+
+		if (stress &&
+		    ioctl(cpu->fd, RTTST_RTIOC_SWTEST_SET_PAUSE, stress)) {
+			perror("ioctl(RTTST_RTIOC_SWTEST_SET_PAUSE)");
+			goto failure;
+		}
+
+		for (j = 0; j < cpu->tasks_count + !!stress; j++) {
+			struct task_params *param = &cpu->tasks[j];
+			if (task_create(cpu, param, &rt_attr)) {
+			  failure:
+				status = EXIT_FAILURE;
+				goto cleanup;
+			}
+			if (quiet < 2)
+				printf(" %s",
+				       task_name(buffer, sizeof(buffer),
+						 param->cpu, param->swt.index));
+		}
+	}
+	if (quiet < 2)
+		printf("\n");
+
+	clock_gettime(CLOCK_REALTIME, &start);
+
+	/* Start the sleeper tasks. */
+	for (i = 0; i < nr_cpus; i ++)
+		__STD(sem_post(&sleeper_start));
+
+	/* Wait for interruption. */
+	__STD(sigwait(&mask, &sig));
+
+	/* Allow a second Ctrl-C in case of lockup. */
+	pthread_sigmask(SIG_UNBLOCK, &mask, NULL);
+
+	/* Cleanup. */
+  cleanup:
+	for_each_cpu_index(i, n) {
+		struct cpu_tasks *cpu = &cpus[n];
+
+		/* kill the user-space tasks. */
+		for (j = 0; j < cpu->tasks_count + !!stress; j++) {
+			struct task_params *param = &cpu->tasks[j];
+
+			if (param->type != RTK && param->thread)
+				pthread_cancel(param->thread);
+		}
+	}
+
+	for_each_cpu_index(i, n) {
+		struct cpu_tasks *cpu = &cpus[n];
+
+		/* join the user-space tasks. */
+		for (j = 0; j < cpu->tasks_count + !!stress; j++) {
+			struct task_params *param = &cpu->tasks[j];
+
+			if (param->type != RTK && param->thread)
+				pthread_join(param->thread, NULL);
+		}
+
+		if (cpu->fd != -1) {
+			struct timespec now;
+
+			clock_gettime(CLOCK_REALTIME, &now);
+
+			if (quiet == 1)
+				quiet = 0;
+			display_switches_count(cpu, &now);
+
+			/* Kill the kernel-space tasks. */
+			close(cpu->fd);
+		}
+		free(cpu->tasks);
+	}
+	free(cpus);
+	__STD(sem_destroy(&sleeper_start));
+	__STD(pthread_mutex_destroy(&headers_lock));
+
+	return status;
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/xeno-test/Makefile.am b/kernel/xenomai-v3.2.4/testsuite/xeno-test/Makefile.am
new file mode 100644
index 0000000..cd809ab
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/xeno-test/Makefile.am
@@ -0,0 +1,16 @@
+testdir = @XENO_TEST_DIR@
+pkgdir = $(pkgdatadir)
+
+test_SCRIPTS = xeno-test-run-wrapper dohell
+test_PROGRAMS = xeno-test-run
+bin_SCRIPTS = xeno-test
+
+xeno_test_run_CPPFLAGS = -DTESTDIR=\"$(testdir)\" -D_GNU_SOURCE
+xeno_test_run_LDADD = -lpthread -lrt
+
+xeno-test: $(srcdir)/xeno-test.in Makefile
+	sed "s,@testdir@,$(testdir),;s,@pkgdir@,$(pkgdir)," $< > $@
+
+EXTRA_DIST = $(test_SCRIPTS) xeno-test.in
+
+CLEANFILES = xeno-test
diff --git a/kernel/xenomai-v3.2.4/testsuite/xeno-test/dohell b/kernel/xenomai-v3.2.4/testsuite/xeno-test/dohell
new file mode 100644
index 0000000..68c5a5f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/xeno-test/dohell
@@ -0,0 +1,96 @@
+#! /bin/sh
+
+usage() {
+    cat <<EOF
+$0 [ -b path ] [ -s server ] [ -p port ] [ -m mntpoint ] [ -l path | seconds ]
+
+Generate load, using an assorted set of commands and optionnaly:
+- hackbench if the path to the hackbench binary is specified with -b;
+- nc to send TCP data to "server" port "port" if -s is specified (if -p
+is not specified, the port 9, aka discard is used);
+- dd to write data under "mntpoint" if -m is specified.
+
+during the runtime of the LTP test if the path to the LTP installation
+directory is specifed with -l or during "seconds" seconds.
+EOF
+}
+
+port=9
+while [ $# -gt 0 ]; do
+    case $1 in
+	-h|--help) usage
+	    exit 0;;
+	-b) shift; hackbench="$1"; shift
+	    ;;
+	-s) shift; server="$1"; shift
+	    ;;
+	-p) shift; port="$1"; shift
+	    ;;
+	-m) shift; mntpoint="$1"; shift
+	    ;;
+	-l) shift; ltpdir="$1"; shift
+	    ;;
+	-*) usage
+	    exit 1;;
+	*) break;;
+    esac
+done
+
+if [ -z "$ltpdir" -a $# -ne 1 ]; then
+    usage
+    exit 1
+fi
+
+pids=""
+
+if [ -n "$server" ]; then
+    if type nc > /dev/null 2>&1; then
+	nc=nc
+    elif type netcat > /dev/null 2>&1; then
+	nc=netcat
+    else
+	echo netcat or nc not found
+	exit 1
+    fi
+
+    seq 1 399999 > /tmp/netcat.data
+    ( while :; do cat /tmp/netcat.data; sleep 15; done | $nc $server $port ) &
+    pids="$!"
+fi
+
+if [ -n "$mntpoint" ]; then
+    while :; do dd if=/dev/zero of=$mntpoint/bigfile bs=1024000 count=100; sync; done &
+    pids="$pids $!"
+fi
+
+if [ -n "$hackbench" ]; then
+    while :; do $hackbench 1; done &
+    pids="$pids $!"
+fi
+
+while :; do cat /proc/interrupts; done > /dev/null 2>&1 &
+pids="$pids $!"
+
+while :; do ps w; done > /dev/null 2>&1 &
+pids="$pids $!"
+
+dd if=/dev/zero of=/dev/null &
+pids="$pids $!"
+
+while :; do ls -lR / > /dev/null 2>&1; done &
+pids="$pids $!"
+
+test -e /proc/sys/kernel/hung_task_timeout_secs && \
+echo 0 > /proc/sys/kernel/hung_task_timeout_secs
+
+if [ -n "$ltpdir" ]; then
+    cd "$ltpdir" && ./runalltests.sh
+    cd "$ltpdir" && ./runalltests.sh
+else
+    sleep $1
+fi
+
+kill $pids > /dev/null 2>&1
+sleep 5
+killall -KILL cat $nc dd hackbench ls ps > /dev/null 2>&1
+killall -KILL `basename $0` sleep > /dev/null 2>&1
diff --git a/kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run-wrapper b/kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run-wrapper
new file mode 100644
index 0000000..24698bb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run-wrapper
@@ -0,0 +1,22 @@
+#! /bin/sh
+
+XENO_TEST_IN="/tmp/xeno-test-in-$$"
+XENO_TEST_OUT="/tmp/xeno-test-out-$$"
+
+check_alive()
+{
+    echo check_alive ${1+"$@"} > "$XENO_TEST_OUT"
+}
+
+start_load()
+{
+    echo start_load > "$XENO_TEST_OUT"
+}
+
+wait_load()
+{
+    read rc < "$XENO_TEST_IN"
+}
+
+script="$1"; shift
+. $script
diff --git a/kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run.c b/kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run.c
new file mode 100644
index 0000000..6d1bb96
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test-run.c
@@ -0,0 +1,678 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <ctype.h>
+
+#include <sys/types.h>
+#include <fcntl.h>
+#include <sys/stat.h>
+#include <unistd.h>
+#include <signal.h>
+#include <sys/wait.h>
+#include <sys/select.h>
+#include <time.h>
+
+#define CHILD_SCRIPT  0
+#define CHILD_CHECKED 1
+#define CHILD_LOAD    2
+#define CHILD_ANY     -1
+
+#define TIMEOUT 30
+
+struct child {
+	unsigned type: 2;
+	unsigned dead: 1;
+	pid_t pid;
+	struct child *next;
+	int in, out;
+	time_t timeout;
+	int exit_status;
+	void (*handle)(struct child *, fd_set *);
+};
+
+#define fail_fprintf(f, fmt, args...) \
+	fprintf(f, "%s failed: " fmt, scriptname , ##args)
+
+#define fail_perror(str) \
+	fail_fprintf(stderr, "%s: %s\n", str, strerror(errno))
+
+static const char *scriptname;
+static volatile int sigexit;
+static time_t termload_start, sigexit_start = 0;
+static sigset_t sigchld_mask;
+static struct child *first_child;
+static char default_loadcmd[] = "dohell 900";
+static char *loadcmd = default_loadcmd;
+static fd_set inputs;
+static struct child script, load;
+
+void handle_checked_child(struct child *child, fd_set *fds);
+void handle_script_child(struct child *child, fd_set *fds);
+void handle_load_child(struct child *child, fd_set *fds);
+
+static int exit_global = EXIT_SUCCESS;
+
+static inline time_t mono_time(void)
+{
+	struct timespec ts;
+	clock_gettime(CLOCK_MONOTONIC, &ts);
+	return ts.tv_sec;
+}
+
+int child_initv(struct child *child, int type, char *argv[])
+{
+	char pipe_in_name[256];
+	char pipe_out_name[256];
+	int pipe_out[2];
+	int err, i;
+	pid_t pid;
+
+	if (type != CHILD_SCRIPT) {
+		if (pipe(pipe_out) < 0)
+			return -errno;
+
+		/* Set the CLOEXEC flag so that we do not leak file
+		   descriptors in our children. */
+		fcntl(pipe_out[0], F_SETFD,
+		      fcntl(pipe_out[0], F_GETFD) | FD_CLOEXEC);
+		fcntl(pipe_out[1], F_SETFD,
+		      fcntl(pipe_out[0], F_GETFD) | FD_CLOEXEC);
+	}
+
+	sigprocmask(SIG_BLOCK, &sigchld_mask, NULL);
+	pid = vfork();
+	if (pid < 0) {
+		sigprocmask(SIG_UNBLOCK, &sigchld_mask, NULL);
+		err = -errno;
+		goto err_close_pipe_out;
+	}
+
+	if (pid == 0) {
+		sigprocmask(SIG_UNBLOCK, &sigchld_mask, NULL);
+
+		switch(type) {
+		case CHILD_CHECKED:
+		case CHILD_LOAD:
+			if (dup2(pipe_out[1], STDOUT_FILENO) < 0) {
+				fail_perror("dup2(pipe_out)");
+				_exit(EXIT_FAILURE);
+			}
+			if (dup2(pipe_out[1], STDERR_FILENO) < 0) {
+				fail_perror("dup2(pipe_err)");
+				_exit(EXIT_FAILURE);
+			}
+			/* Detach child from terminal,
+			   to avoid child catching SIGINT */
+			setsid();
+
+			break;
+
+		case CHILD_SCRIPT:
+			snprintf(pipe_in_name, sizeof(pipe_in_name),
+				 "/tmp/xeno-test-in-%u",
+				 (unsigned)getpid());
+			unlink(pipe_in_name);
+			if (mkfifo(pipe_in_name, 0666) < 0) {
+				fail_perror("mkfifo(pipe_in)");
+				_exit(EXIT_FAILURE);
+			}
+
+			snprintf(pipe_out_name, sizeof(pipe_out_name),
+				 "/tmp/xeno-test-out-%u",
+				 (unsigned)getpid());
+			unlink(pipe_out_name);
+			if (mkfifo(pipe_out_name, 0666) < 0) {
+				fail_perror("mkfifo(pipe_in)");
+				_exit(EXIT_FAILURE);
+			}
+
+			break;
+		}
+
+		err = execvp(argv[0], argv);
+		if (err < 0) {
+			fail_fprintf(stderr, "execvp(%s): %m", argv[0]);
+			_exit(EXIT_FAILURE);
+		}
+	}
+	child->type = type;
+	child->dead = 0;
+	child->pid = pid;
+
+	child->next = first_child;
+	first_child = child;
+	sigprocmask(SIG_UNBLOCK, &sigchld_mask, NULL);
+
+	fprintf(stderr, "Started child %d:", pid);
+	for (i = 0; argv[i]; i++)
+		fprintf(stderr, " %s", argv[i]);
+	fputc('\n', stderr);
+
+	if (type != CHILD_SCRIPT) {
+		close(pipe_out[1]);
+		fcntl(pipe_out[0], F_SETFL,
+		      fcntl(pipe_out[0], F_GETFL) | O_NONBLOCK);
+		child->out = pipe_out[0];
+	} else {
+		child->out = open(pipe_out_name, O_RDONLY | O_NONBLOCK);
+		if (child->out == -1)
+			return -errno;
+
+		/*
+		 * We can not open pipe_in right now (opening in non
+		 * blocking mode would returns -ENXIO, and opening in
+		 * blocking mode would block the process until the
+		 * child opens the other end of the fifo, which is not
+		 * what we want).
+		 */
+		child->in = -1;
+	}
+	FD_SET(child->out, &inputs);
+
+	time(&child->timeout);
+	child->timeout += TIMEOUT * 60;
+
+	switch(type) {
+	case CHILD_CHECKED:
+		child->handle = handle_checked_child;
+		break;
+	case CHILD_SCRIPT:
+		child->handle = handle_script_child;
+		break;
+	case CHILD_LOAD:
+		child->handle = handle_load_child;
+		break;
+	}
+
+	return 0;
+
+  err_close_pipe_out:
+	if (type != CHILD_SCRIPT) {
+		close(pipe_out[0]);
+		close(pipe_out[1]);
+	}
+	return err;
+}
+
+int child_init(struct child *child, int type, char *cmdline)
+{
+	char **argv = malloc(sizeof(*argv));
+	unsigned argc = 0;
+	int rc;
+
+	if (!argv)
+		return -ENOMEM;
+
+	do {
+		char **new_argv = realloc(argv, sizeof(*argv) * (argc + 2));
+		if (!new_argv) {
+			free(argv);
+			return -ENOMEM;
+		}
+		argv = new_argv;
+
+		argv[argc++] = cmdline;
+		cmdline = strpbrk(cmdline, " \t");
+		if (cmdline)
+			do {
+				*cmdline++ = '\0';
+			} while (isspace(*cmdline));
+	} while (cmdline && *cmdline);
+	argv[argc] = NULL;
+
+	rc = child_initv(child, type, argv);
+
+	free(argv);
+
+	return rc;
+}
+
+void child_cleanup(struct child *child)
+{
+	struct child *prev;
+
+	if (child == first_child)
+		first_child = child->next;
+	else
+		for (prev = first_child; prev; prev = prev->next)
+			if (prev->next == child) {
+				prev->next = child->next;
+				break;
+			}
+
+	FD_CLR(child->out, &inputs);
+	close(child->out);
+	if (child->type == CHILD_SCRIPT) {
+		char pipe_in_name[256];
+		char pipe_out_name[256];
+		snprintf(pipe_in_name, sizeof(pipe_in_name),
+			 "/tmp/xeno-test-in-%u", (unsigned)child->pid);
+		unlink(pipe_in_name);
+
+		snprintf(pipe_out_name, sizeof(pipe_out_name),
+			 "/tmp/xeno-test-out-%u", (unsigned)child->pid);
+		unlink(pipe_out_name);
+		close(child->in);
+	}
+}
+
+struct child *child_search_pid(pid_t pid)
+{
+	struct child *child;
+
+	for (child = first_child; child; child = child->next)
+		if (child->pid == pid)
+			break;
+
+	return child;
+}
+
+struct child *child_search_type(int type)
+{
+	struct child *child;
+
+	for (child = first_child; child; child = child->next)
+		if (child->type == type)
+			return child;
+
+	return NULL;
+}
+
+int children_done_p(int type)
+{
+	struct child *child;
+
+	for (child = first_child; child; child = child->next)
+		if ((type == CHILD_ANY || type == child->type) && !child->dead)
+			return 0;
+
+	return 1;
+}
+
+int children_kill(int type, int sig)
+{
+	struct child *child;
+	struct timespec ts;
+
+	if (children_done_p(type))
+		return 1;
+
+	for (child = first_child; child; child = child->next)
+		if ((type == CHILD_ANY || child->type == type)
+		    && !child->dead)
+			kill(child->pid, sig);
+
+	return children_done_p(type);
+}
+
+void sigchld_handler(int sig)
+{
+	struct child *child;
+	int status;
+	pid_t pid;
+
+	while ((pid = waitpid(-1, &status, WNOHANG)) > 0) {
+		child = child_search_pid(pid);
+		if (!child) {
+			fail_fprintf(stderr, "dead child %d not found!\n", pid);
+			exit(EXIT_FAILURE);
+		}
+
+		child->exit_status = status;
+		child->dead = 1;
+		fprintf(stderr, "child %d returned: ", pid);
+		if (WIFEXITED(status)) {
+			if (WEXITSTATUS(status))
+				exit_global = EXIT_FAILURE;
+			fprintf(stderr, "exited with status %d\n",
+				WEXITSTATUS(status));
+		} else if WIFSIGNALED(status) {
+			fprintf(stderr, "killed by signal %d\n",
+				WTERMSIG(status));
+		} else {
+			fprintf(stderr, "unknown reason\n");
+		}
+	}
+}
+
+void cleanup(void)
+{
+	children_kill(CHILD_ANY, SIGKILL);
+}
+
+void termsig(int sig)
+{
+	sigexit = sig;
+	sigexit_start = mono_time();
+	children_kill(CHILD_ANY, SIGTERM);
+	signal(sig, SIG_DFL);
+}
+
+void copy(int from, int to)
+{
+	char buffer[4096];
+	ssize_t sz;
+
+	do {
+		ssize_t written, wsz;
+		sz = read(from, buffer, sizeof(buffer));
+		if (sz == -1) {
+			if (errno == EAGAIN)
+				break;
+			fail_perror("read");
+			exit(EXIT_FAILURE);
+		}
+
+		for (written = 0; written < sz;
+		     written += (wsz > 0 ? wsz : 0)) {
+			wsz = write(to, buffer + written, sz - written);
+			if (wsz == -1) {
+				fail_perror("write");
+				exit(EXIT_FAILURE);
+			}
+		}
+	} while (sz > 0);
+}
+
+void handle_checked_child(struct child *child, fd_set *fds)
+{
+	time_t now = mono_time();
+
+	if (FD_ISSET(child->out, fds)) {
+		copy(child->out, STDOUT_FILENO);
+		child->timeout = now + TIMEOUT * 60;
+	}
+
+	if (child->dead) {
+		int status = child->exit_status;
+
+		/* A checked child died, this may be abnormal if no
+		   termination signal was sent. */
+		if (WIFEXITED(status)) {
+			if (sigexit || termload_start)
+				goto cleanup;
+			fail_fprintf(stderr,
+				   "child %d exited with status %d\n",
+				   child->pid, WEXITSTATUS(status));
+		}
+
+		if (WIFSIGNALED(status)) {
+			if (sigexit || termload_start) {
+			  cleanup:
+				child_cleanup(child);
+				free(child);
+				return;
+			}
+			fail_fprintf(stderr, "child %d exited with signal %d\n",
+				   child->pid, WTERMSIG(status));
+			if (WCOREDUMP(status))
+				fprintf(stderr, "(core dumped)\n");
+		}
+
+		exit(EXIT_FAILURE);
+		return;
+	}
+
+	if (now > child->timeout) {
+		fail_fprintf(stderr, "child %d produced no output for %d minutes.\n",
+			     child->pid, TIMEOUT);
+		exit(EXIT_FAILURE);
+	}
+
+}
+
+void handle_script_child(struct child *child, fd_set *fds)
+{
+	static char buffer[4096];
+	static unsigned pos;
+	char *l, *eol;
+	ssize_t sz;
+	int rc;
+
+	if (child->dead) {
+		child_cleanup(child);
+		return;
+	}
+
+	if (!FD_ISSET(child->out, fds))
+		return;
+
+	sz = read(child->out, buffer + pos, sizeof(buffer) - (pos + 1));
+	buffer[pos + sz] = '\0';
+
+	for (l = buffer; (eol = strchr(l, '\n')); l = eol + 1) {
+		char buf[16];
+		*eol = '\0';
+
+		if (!memcmp(l, "check_alive ", 12)) {
+			struct child *new_child;
+
+			new_child = malloc(sizeof(*new_child));
+			if (!new_child) {
+				fail_fprintf(stderr, "allocation failed\n");
+				exit(EXIT_FAILURE);
+			}
+
+			rc = child_init(new_child, CHILD_CHECKED, l + 12);
+			if (rc) {
+				fail_perror("child_init");
+				exit(EXIT_FAILURE);
+			}
+		} else if (!memcmp(l, "start_load", 10)) {
+			if (!load.dead) {
+				fail_fprintf(stderr, "start_load run while load"
+					     " script is already running.\n");
+				exit(EXIT_FAILURE);
+			}
+			rc = child_init(&load, CHILD_LOAD, loadcmd);
+			if (rc) {
+				fail_perror("child_init");
+				exit(EXIT_FAILURE);
+			}
+		} else {
+			fprintf(stderr, "Invalid command %s\n", l);
+			exit(EXIT_FAILURE);
+		}
+	}
+	if (l != buffer) {
+		pos = strlen(l);
+		memmove(buffer, l, pos + 1);
+	}
+}
+
+void handle_load_child(struct child *child, fd_set *fds)
+{
+	struct child *next;
+	int ret;
+
+	if (FD_ISSET(child->out, fds))
+		copy(child->out, STDOUT_FILENO);
+
+	if (child->dead) {
+		time_t now = mono_time();
+
+		if (!termload_start) {
+			if (sigexit) {
+				child_cleanup(child);
+				return;
+			}
+
+			fprintf(stderr, "Load script terminated, "
+				"terminating checked scripts\n");
+
+			children_kill(CHILD_CHECKED, SIGTERM);
+			termload_start = now;
+		} else {
+			if (child_search_type(CHILD_CHECKED)
+			    && now < termload_start + 30)
+				return;
+
+			if (now >= termload_start + 30) {
+				fail_fprintf(stderr, "timeout waiting for "
+					     "checked children, "
+					     "sending SIGKILL\n");
+				children_kill(CHILD_ANY, SIGKILL);
+			}
+
+			child_cleanup(child);
+			if (sigexit)
+				return;
+
+			if (script.in == -1) {
+				char pipe_in_name[256];
+				snprintf(pipe_in_name, sizeof(pipe_in_name),
+					 "/tmp/xeno-test-in-%u",
+					 (unsigned)script.pid);
+				fprintf(stderr, "pipe_in: %s\n", pipe_in_name);
+				script.in = open(pipe_in_name, O_WRONLY);
+			}
+			if (script.in != -1) {
+				ret = write(script.in, "0\n", 2);
+				(void)ret;
+			}
+			termload_start = 0;
+		}
+	}
+}
+
+void usage(const char *progname)
+{
+	fprintf(stderr, "%s [-l \"load command\"] script arguments...\n"
+		"Run \"script\" with \"arguments\" in a shell supplemented with"
+		" a few commands\nsuitable for running Xenomai tests.\n"
+		"\"load command\" is a command line to be run in order to"
+		" generate load\nwhile running tests.\n", progname);
+}
+
+void setpath(void)
+{
+	char *path;
+	size_t path_len;
+
+	path_len = strlen(getenv("PATH") ?: "") + strlen(TESTDIR) + 2;
+	path = malloc(path_len);
+	if (!path) {
+		perror("malloc");
+		exit(EXIT_FAILURE);
+	}
+	if (getenv("PATH"))
+		snprintf(path, path_len, TESTDIR ":%s", getenv("PATH"));
+	else
+		snprintf(path, path_len, TESTDIR);
+
+	setenv("PATH", path, 1);
+	free(path);
+}
+
+int main(int argc, char *argv[])
+{
+	struct sigaction action;
+	char **new_argv;
+	int rc, maxfd;
+	unsigned i;
+	int j, k;
+
+	for (j = 0; j < argc; j++) {
+		if (!strcmp(argv[j], "-l")) {
+			if (j == argc -1) {
+				usage(argv[0]);
+				exit(EXIT_FAILURE);
+			}
+
+			loadcmd = argv[j + 1];
+			for (k = j - 1; k >= 0; k--)
+				argv[k + 2] = argv[k];
+
+			argv += 2;
+			j -= 2;
+			argc -= 2;
+		}
+	}
+	scriptname = argv[1];
+
+	setpath();
+
+	action.sa_handler = termsig;
+	sigemptyset(&action.sa_mask);
+	action.sa_flags = SA_RESTART;
+	if (sigaction(SIGTERM, &action, NULL) < 0) {
+		fail_perror("sigaction(SIGTERM)");
+		exit(EXIT_FAILURE);
+	}
+	if (sigaction(SIGINT, &action, NULL) < 0) {
+		fail_perror("sigaction(SIGTERM)");
+		exit(EXIT_FAILURE);
+	}
+
+	action.sa_flags |= SA_NOCLDSTOP;
+	action.sa_handler = sigchld_handler;
+	if (sigaction(SIGCHLD, &action, NULL) < 0) {
+		fail_perror("sigaction(SIGCHLD)");
+		exit(EXIT_FAILURE);
+	}
+	atexit(&cleanup);
+
+	load.dead = 1;
+	FD_ZERO(&inputs);
+
+	new_argv = malloc(sizeof(*new_argv) * (argc + 2));
+	if (!new_argv) {
+		fail_fprintf(stderr, "memory allocation failed\n");
+		exit(EXIT_FAILURE);
+	}
+	new_argv[0] = getenv("SHELL") ?: "/bin/bash";
+	new_argv[1] = TESTDIR "/xeno-test-run-wrapper";
+	for (i = 1; i < argc + 1; i++)
+		new_argv[i + 1] = argv[i];
+
+	rc = child_initv(&script, CHILD_SCRIPT, new_argv);
+	if (rc < 0) {
+		fail_fprintf(stderr, "script creation failed: %s\n", strerror(-rc));
+		exit(EXIT_FAILURE);
+	}
+	maxfd = script.out;
+
+	while (first_child) {
+		struct child *child, *next;
+		struct timeval tv;
+		fd_set in;
+		int rc;
+
+		tv.tv_sec = 0;
+		tv.tv_usec = 100000;
+
+		in = inputs;
+		rc = select(maxfd + 1, &in, NULL, NULL, &tv);
+
+		if (rc == -1) {
+			if (errno == EINTR)
+				continue;
+			fail_perror("select");
+			exit(EXIT_FAILURE);
+		}
+
+		maxfd = 0;
+		for (child = first_child; child; child = next) {
+			next = child->next;
+
+			if (child->out > maxfd)
+				maxfd = child->out;
+
+			child->handle(child, &in);
+		}
+
+		if (sigexit_start && mono_time() >= sigexit_start + 30) {
+			fail_fprintf(stderr, "timeout waiting for all "
+				     "children, sending SIGKILL\n");
+			children_kill(CHILD_ANY, SIGKILL);
+			sigexit_start = 0;
+		}
+	}
+
+	if (sigexit) {
+		signal(sigexit, SIG_DFL);
+		raise(sigexit);
+	}
+	exit(exit_global);
+}
diff --git a/kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test.in b/kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test.in
new file mode 100644
index 0000000..cfa28df
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/testsuite/xeno-test/xeno-test.in
@@ -0,0 +1,116 @@
+#! @testdir@/xeno-test-run
+
+usage()
+{
+    cat <<EOF
+xeno-test -h or xeno-test --help
+
+This help text.
+
+
+xeno-test [ -l "load command" ] [ -k ] [ -r ] [ -- ] [ latency test options ]
+
+Run a basic test/benchmark of Xenomai on your platform, by first starting a
+few unit tests, then running the latency test under the load generated by
+"load-command".
+
+By default, the load command is "dohell 900", which will generate load during
+15 minutes. To generate a more realistic load see dohell help.
+
+This script accepts the -k option to tell the unit test loop to keep
+going upon a failing test. Otherwise xeno-test stops immediately.
+
+If the script is passed the -r option, real-time stress is added to the test,
+with the help of the "switchtest" test. But beware: the latency test figures are
+then no longer meaningful.
+
+For running test in a virtual machine use the --vm option. This option relaxes
+some of the timing thresholds.
+
+Any other option passed on the command line is passed to the latency test.
+
+Example:
+xeno-test -l "dohell -s 192.168.0.5 -m /mnt -l /ltp" -t 2
+
+Will generate load including network load using the server at IP address
+192.168.0.5, some I/O under the moint point /mnt, and the LTP testsuite
+installed under the /ltp directory, and use the latency test by measuring the
+timer irq latency.
+EOF
+}
+
+start_timesyncd()
+{
+    if $timesyncd_was_running; then
+        systemctl start systemd-timesyncd
+        timesyncd_was_running=false
+    fi
+}
+trap start_timesyncd EXIT
+
+keep_going=
+run_on_vm=
+rt_load=false
+timesyncd_was_running=false
+
+while :; do
+    case "$1" in
+	-h|--help)
+	    usage
+	    exit 0
+	    ;;
+
+	-k)
+	    keep_going=--keep-going
+	    shift
+	    ;;
+
+	--vm)
+	    run_on_vm=--vm
+	    shift
+	    ;;
+
+	-r)
+	    rt_load=true
+	    shift
+	    ;;
+
+	--)
+	    shift
+	    break
+	    ;;
+
+	""|*)
+	    break
+	    ;;
+    esac
+done
+
+set -ex
+
+echo 0 > /proc/xenomai/latency || :
+
+testdir=@testdir@
+
+if which systemctl > /dev/null && systemctl is-active --quiet systemd-timesyncd; then
+    timesyncd_was_running=true
+    systemctl stop systemd-timesyncd
+fi
+
+$testdir/smokey --run $run_on_vm $keep_going random_alloc_rounds=64 pattern_check_rounds=64
+
+start_timesyncd
+
+$testdir/clocktest -D -T 30 -C CLOCK_HOST_REALTIME || $testdir/clocktest -T 30
+$testdir/switchtest -T 30
+
+start_load
+
+if $rt_load; then
+    check_alive $testdir/switchtest
+    check_alive $testdir/switchtest -s 1000
+fi
+
+check_alive $testdir/latency ${1+"$@"}
+
+wait_load
diff --git a/kernel/xenomai-v3.2.4/utils/Makefile.am b/kernel/xenomai-v3.2.4/utils/Makefile.am
new file mode 100644
index 0000000..4de6434
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/Makefile.am
@@ -0,0 +1,5 @@
+SUBDIRS = hdb
+if XENO_COBALT
+SUBDIRS += analogy autotune can net ps slackspot corectl
+endif
+SUBDIRS += chkkconf
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/Makefile.am b/kernel/xenomai-v3.2.4/utils/analogy/Makefile.am
new file mode 100644
index 0000000..a1506de
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/Makefile.am
@@ -0,0 +1,100 @@
+sbin_PROGRAMS = analogy_config analogy_calibrate
+
+bin_PROGRAMS = \
+	cmd_read \
+	cmd_write \
+	cmd_bits \
+	insn_read \
+	insn_write \
+	insn_bits \
+	wf_generate
+
+AM_CPPFLAGS = 						\
+	@XENO_USER_CFLAGS@ 				\
+	-ggdb						\
+	-I$(top_srcdir)/include                         \
+        -I$(top_srcdir)/lib/analogy
+
+noinst_HEADERS = wf_facilities.h analogy_calibrate.h calibration_ni_m.h
+
+noinst_LTLIBRARIES = libwaveform.la
+
+libwaveform_la_SOURCES = wf_facilities.c
+
+analogy_config_SOURCES = analogy_config.c
+analogy_config_LDADD = \
+	@XENO_AUTOINIT_LDFLAGS@		\
+	../../lib/analogy/libanalogy.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lrt -lpthread -lm
+
+analogy_calibrate_SOURCES = analogy_calibrate.c calibration_ni_m.c
+analogy_calibrate.c: calibration_ni_m.h
+analogy_calibrate_LDADD = \
+	@XENO_AUTOINIT_LDFLAGS@		\
+	../../lib/analogy/libanalogy.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lpthread -lrt -lm
+
+cmd_read_SOURCES = cmd_read.c
+cmd_read_LDADD = \
+	@XENO_AUTOINIT_LDFLAGS@		\
+	../../lib/analogy/libanalogy.la \
+	../../lib/alchemy/libalchemy@CORE@.la \
+	../../lib/copperplate/libcopperplate@CORE@.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lrt -lpthread -lm
+
+cmd_write_SOURCES = cmd_write.c
+cmd_write_LDADD = \
+	@XENO_AUTOINIT_LDFLAGS@		\
+	../../lib/analogy/libanalogy.la \
+	../../lib/alchemy/libalchemy@CORE@.la \
+	../../lib/copperplate/libcopperplate@CORE@.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lrt -lpthread -lm
+
+cmd_bits_SOURCES = cmd_bits.c
+cmd_bits_LDADD = \
+	@XENO_AUTOINIT_LDFLAGS@		\
+	../../lib/analogy/libanalogy.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lrt -lpthread -lm
+
+insn_read_SOURCES = insn_read.c
+insn_read_LDADD = \
+	@XENO_AUTOINIT_LDFLAGS@		\
+	../../lib/analogy/libanalogy.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lrt -lpthread -lm
+
+insn_write_SOURCES = insn_write.c
+insn_write_LDADD = \
+	@XENO_AUTOINIT_LDFLAGS@		\
+	../../lib/analogy/libanalogy.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lrt -lpthread -lm
+
+insn_bits_SOURCES = insn_bits.c
+insn_bits_LDADD = \
+	@XENO_AUTOINIT_LDFLAGS@		\
+	../../lib/analogy/libanalogy.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lrt -lpthread -lm
+
+wf_generate_SOURCES = wf_generate.c
+wf_generate_LDADD = \
+	@XENO_AUTOINIT_LDFLAGS@		\
+	 ./libwaveform.la \
+	../../lib/analogy/libanalogy.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lrt -lpthread -lm
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.c b/kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.c
new file mode 100644
index 0000000..c5990a7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.c
@@ -0,0 +1,124 @@
+/*
+ * Analogy for Linux, calibration program
+ *
+ * Copyright (C) 2014 Jorge A. Ramirez-Ortiz <jro@xenomai.org>
+ *
+ * from original code from the Comedi project
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <sys/time.h>
+#include <sys/resource.h>
+#include <getopt.h>
+#include <pthread.h>
+#include <sys/mman.h>
+#include <xeno_config.h>
+#include <rtdm/analogy.h>
+#include "analogy_calibrate.h"
+#include "calibration_ni_m.h"
+
+struct timespec calibration_start_time;
+a4l_desc_t descriptor;
+
+static const struct option options[] = {
+	{
+#define help_opt	0
+		.name = "help",
+		.has_arg = no_argument,
+	},
+	{
+#define device_opt	1
+		.name = "device",
+		.has_arg = required_argument,
+	},
+	{
+#define output_opt	2
+		.name = "output",
+		.has_arg = required_argument,
+	},
+	{ /* Sentinel */ }
+};
+
+static void
+print_usage(void)
+{
+	fprintf(stderr, "Usage: analogy_calibrate \n"
+	       "  --help                  : this menu \n"
+	       "  --device /dev/analogyX  : analogy device to calibrate \n"
+	       "  --output filename       : calibration results \n"
+	      );
+}
+
+static void __attribute__ ((constructor)) __analogy_calibrate_init(void)
+{
+	clock_gettime(CLOCK_MONOTONIC, &calibration_start_time);
+}
+
+/*
+ *
+ * the calibration file generated by the Analogy software calibrate utility is
+ * not compatible with Comedi's despite holding the exact same information.
+ *
+ */
+int main(int argc, char *argv[])
+{
+	char *device = NULL, *file = NULL;
+	int v, i, fd, err = 0;
+	FILE *p = NULL;
+
+	for (;;) {
+		i = -1;
+		v = getopt_long_only(argc, argv, "", options, &i);
+		if (v == EOF)
+			break;
+		switch (i) {
+		case help_opt:
+			print_usage();
+			exit(0);
+		case device_opt:
+			device = optarg;
+			break;
+		case output_opt:
+			file = optarg;
+			p = fopen(file, "w+");
+			__debug("calibration output: %s \n", file);
+			break;
+		default:
+			print_usage();
+			exit(EXIT_FAILURE);
+		}
+	}
+
+	if (!p || !device)
+		error(EXIT, errno, "missing input parameters");
+
+	fd = a4l_open(&descriptor, device);
+	if (fd < 0)
+		error(EXIT, 0, "open %s failed (%d)", device, fd);
+
+	err = ni_m_board_supported(descriptor.driver_name);
+	if (err)
+		error(EXIT, 0, "board %s: driver %s not supported",
+		      descriptor.board_name, descriptor.driver_name);
+
+	err = ni_m_software_calibrate(p);
+	if (err)
+		error(CONT, 0, "software calibration failed (%d)", err);
+
+	a4l_close(&descriptor);
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.h b/kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.h
new file mode 100644
index 0000000..aeb6355
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/analogy_calibrate.h
@@ -0,0 +1,150 @@
+/*
+ * Analogy for Linux, calibration program
+ *
+ * Copyright (C) 2014 Jorge A. Ramirez-Ortiz <jro@xenomai.org>
+ *
+ * from original code from the Comedi project
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+
+#ifndef __ANALOGY_CALIBRATE_H__
+#define __ANALOGY_CALIBRATE_H__
+
+#include <string.h>
+#include <errno.h>
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <time.h>
+#include "error.h"
+
+extern struct timespec calibration_start_time;
+extern a4l_desc_t descriptor;
+extern FILE *cal;
+
+#define ARRAY_LEN(a)  (sizeof(a) / sizeof((a)[0]))
+
+#define RETURN	 1
+#define CONT 	 0
+#define EXIT	-1
+
+#define error(action, code, fmt, ...) 						\
+do {										\
+       error_at_line(action, code, __FILE__, __LINE__, fmt, ##__VA_ARGS__ );	\
+       if (action == RETURN) 							\
+               return -1;							\
+}while(0)
+
+struct breakdown_time {
+	int ms;
+	int us;
+	int ns;
+};
+
+static inline void do_time_breakdown(struct breakdown_time *p,
+				     const struct timespec *t)
+{
+	unsigned long long ms, us, ns;
+
+	ns = t->tv_sec * 1000000000ULL;
+	ns += t->tv_nsec;
+	ms = ns / 1000000ULL;
+	us = (ns % 1000000ULL) / 1000ULL;
+
+	p->ms = (int)ms;
+	p->us = (int)us;
+	p->ns = (int)ns;
+}
+
+static inline void timespec_sub(struct timespec *r,
+				const struct timespec *__restrict t1,
+				const struct timespec *__restrict t2)
+{
+	r->tv_sec = t1->tv_sec - t2->tv_sec;
+	r->tv_nsec = t1->tv_nsec - t2->tv_nsec;
+	if (r->tv_nsec < 0) {
+		r->tv_sec--;
+		r->tv_nsec += 1000000000;
+	}
+}
+
+static inline void __debug(char *fmt, ...)
+{
+	struct timespec now, delta;
+	struct breakdown_time tm;
+	char *header, *msg;
+	int hlen, mlen;
+	va_list ap;
+	__attribute__((unused)) int err;
+
+	va_start(ap, fmt);
+
+	clock_gettime(CLOCK_MONOTONIC, &now);
+	timespec_sub(&delta, &now, &calibration_start_time);
+	do_time_breakdown(&tm, &delta);
+
+	hlen = asprintf(&header, "%4d\"%.3d.%.3d| ",
+			tm.ms / 1000, tm.ms % 1000, tm.us);
+
+	mlen = vasprintf(&msg, fmt, ap);
+
+	err = write(fileno(stdout), header, hlen);
+	err = write(fileno(stdout), msg, mlen);
+
+	free(header);
+	free(msg);
+
+	va_end(ap);
+}
+
+
+static inline int __array_search(char *elem, const char *array[], int len)
+{
+	int i;
+
+	for (i = 0; i < len; i++)
+		if (strncmp(elem, array[i], strlen(array[i])) == 0)
+			return 0;
+
+	return -1;
+}
+
+
+static inline double rng_max(a4l_rnginfo_t *range)
+{
+	double a, b;
+	b = A4L_RNG_FACTOR * 1.0;
+
+	a = (double) range->max;
+	a = a / b;
+	return a;
+}
+
+static inline double rng_min(a4l_rnginfo_t *range)
+{
+	double a, b;
+
+	b = A4L_RNG_FACTOR * 1.0;
+	a = (double) range->min;
+	a = a / b;
+	return a;
+}
+
+
+
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/analogy_config.c b/kernel/xenomai-v3.2.4/utils/analogy/analogy_config.c
new file mode 100644
index 0000000..96bc66a
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/analogy_config.c
@@ -0,0 +1,305 @@
+/*
+ * Analogy for Linux, configuration program
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <unistd.h>
+#include <stdlib.h>
+#include <getopt.h>
+#include <string.h>
+#include <errno.h>
+#include <xeno_config.h>
+#include <rtdm/analogy.h>
+
+#define ANALOGY_DRIVERS_PROC "/proc/analogy/drivers"
+#define ANALOGY_DEVICES_PROC "/proc/analogy/devices"
+
+#define __OPTS_DELIMITER ","
+
+enum actions {
+	DO_ATTACH = 0x1,
+	DO_DETACH = 0x2,
+	DO_BUFCONFIG = 0x4,
+};
+
+/* Declare prog variables */
+int vlevel = 1;
+enum actions actions = 0;
+int bufsize = -1;
+struct option a4l_conf_opts[] = {
+	{"help", no_argument, NULL, 'h'},
+	{"verbose", no_argument, NULL, 'v'},
+	{"quiet", no_argument, NULL, 'q'},
+	{"version", no_argument, NULL, 'V'},
+	{"remove", no_argument, NULL, 'r'},
+	{"read-buffer-size", required_argument, NULL, 'R'},
+	{"write-buffer-size", required_argument, NULL, 'W'},
+	{"buffer-size", required_argument, NULL, 'S'},
+	{0},
+};
+
+/* Misc functions */
+static void print_version(void)
+{
+	fprintf(stdout, "analogy_config: version %s\n", PACKAGE_VERSION);
+}
+
+static void print_usage(void)
+{
+	fprintf(stdout, "usage:\tanalogy_config [OPTS] devfile driver "
+			"<driver specific options>"
+		        "- ex: [OPTS] analogy0 analogy_fake 0x378,7,18 \n");
+	fprintf(stdout, "\tOPTS:\t -v, --verbose: verbose output\n");
+	fprintf(stdout, "\t\t -q, --quiet: quiet output\n");
+	fprintf(stdout, "\t\t -V, --version: print program version\n");
+	fprintf(stdout, "\t\t -r, --remove: detach a device\n");
+	fprintf(stdout, "\t\t -S, --buffer-size: set default size in kB\n");
+	fprintf(stdout, "\tDeprecated options:\n");
+	fprintf(stdout, "\t\t -R, --read-buffer-size: read buffer size in kB\n");
+	fprintf(stdout, "\t\t -W, --write-buffer-size: write buffer size in kB\n");
+}
+
+static int parse_extra_arg(char const *opts, a4l_lnkdesc_t *lnkdsc)
+{
+	int i, err, cnt, len, ofs;
+	unsigned long *p;
+	char const *q;
+
+	/* count the numer of driver specific comma separated arguments */
+	q = opts;
+	cnt = 1;
+	while ((q = strstr(q, __OPTS_DELIMITER)) != NULL) {
+		   q += strlen(__OPTS_DELIMITER);
+		   cnt++;
+	}
+
+	/* alloc memory for the individual params converted to unsigned long */
+	len = cnt * sizeof(unsigned long);
+	p = malloc(len);
+	if (!p) {
+		fprintf(stderr, "analogy_config: memory allocation failed\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	lnkdsc->opts = (void *)p;
+	lnkdsc->opts_size = len;
+
+	/* We set errno to 0 so as to be sure that strtoul did not fail */
+	errno = 0;
+	i = 0;
+	do {
+		len = strlen(opts);
+		ofs = strcspn(opts, __OPTS_DELIMITER);
+		p[i] = strtoul(opts, NULL, 0);
+		if (errno != 0) {
+			err = -errno;
+			goto fail;
+		}
+		opts += ofs + 1;
+		i++;
+	} while (len != ofs);
+
+	return 0;
+
+fail:
+        free(p);
+out:
+	lnkdsc->opts  = NULL;
+	lnkdsc->opts_size = 0;
+
+	return err;
+}
+
+static inline int do_detach(int fd, char *devfile)
+{
+	int err;
+
+	err = a4l_sys_detach(fd);
+	if (err < 0)
+		fprintf(stderr,"analogy_config: a4l_detach(%s) failed err=%d\n",
+			devfile, err);
+	return err;
+}
+
+static inline int do_attach(int fd, int argc, char *argv[], int optind)
+{
+	a4l_lnkdesc_t lnkdsc;
+	int err;
+
+	memset(&lnkdsc, 0, sizeof(a4l_lnkdesc_t));
+	lnkdsc.bname = argv[optind + 1];
+	lnkdsc.bname_size = strlen(argv[optind + 1]);
+
+	/* Process driver specific options if any */
+	if (argc - optind == 3) {
+		err = parse_extra_arg(argv[optind + 2], &lnkdsc);
+		if (err < 0) {
+			fprintf(stderr, "analogy_config: "
+					"driver specific options failed\n");
+			fprintf(stderr, "\twarning: driver specific "
+					"options must be integers \n");
+			print_usage();
+			return err;
+		}
+	}
+
+	err = a4l_sys_attach(fd, &lnkdsc);
+	if (err < 0)
+		fprintf(stderr, "analogy_config: a4l_attach(%s) failed err=%d\n",
+			lnkdsc.bname, err);
+
+	if (lnkdsc.opts != NULL)
+		free(lnkdsc.opts);
+
+	return err;
+}
+
+static inline int do_bufcfg(int fd, char *devfile, int bufsize)
+{
+	int err;
+	/*
+	 * inform the driver of the size of the buffer it will need to
+	 * allocate at opening.
+	 */
+	err = a4l_sys_bufcfg(fd, A4L_BUF_DEFMAGIC, bufsize);
+	if (err < 0) {
+		fprintf(stderr,
+			"analogy_config: a4l_bufcfg(%s) configuration failed "
+			"err=%d\n", devfile, err);
+	}
+
+	return err;
+}
+
+static inline int check_params(enum actions *actions, const int argc, int optind)
+{
+	/* Here we have choice:
+	 *  - if the option -r is set, only one additional option is useful
+	 *  - if the option -S is set without no attach options
+	 *  - if the option -S is set with attach options
+	 */
+
+	if ((*actions & DO_DETACH) && argc - optind < 1 ) {
+		fprintf(stderr, "analogy_config: specify a device to detach\n");
+		return -EINVAL;
+	}
+
+	if ((*actions & DO_DETACH) && (*actions & DO_BUFCONFIG)) {
+		fprintf(stderr,
+			"analogy_config: skipping buffer size configuration"
+			"because of detach action\n");
+	}
+
+	if (!(*actions & DO_DETACH) &&
+	    !(*actions & DO_BUFCONFIG) && argc - optind < 2) {
+		print_usage();
+		return -EINVAL;;
+	}
+	else if (!(*actions & DO_DETACH) && argc - optind >= 2)
+		*actions |= DO_ATTACH;
+
+	return 0;
+}
+
+int main(int argc, char *argv[])
+{
+	int err = 0, fd = -1;
+	char *devfile;
+	int c;
+
+	/* Compute arguments */
+	while ((c =
+		getopt_long(argc, argv, "hvqVrR:W:S:", a4l_conf_opts,
+			    NULL)) >= 0) {
+		switch (c) {
+		case 'h':
+			print_usage();
+			goto done;
+		case 'v':
+			vlevel = 2;
+			break;
+		case 'q':
+			vlevel = 0;
+			break;
+		case 'V':
+			print_version();
+			goto done;
+		case 'r':
+			actions |= DO_DETACH;
+			break;
+		case 'R':
+		case 'W':
+			fprintf(stdout,
+				"analogy_config: the option --read-buffer-size "
+				"and --write-buffer-size will be deprecated; "
+				"please use --buffer-size instead (-S)\n");
+		case 'S':
+			actions |= DO_BUFCONFIG;
+			bufsize = strtoul(optarg, NULL, 0);
+			break;
+		default:
+			print_usage();
+			goto done;
+		}
+	}
+
+	err = check_params(&actions, argc, optind);
+	if (err)
+		goto done;
+
+	devfile = argv[optind];
+
+	fd = a4l_sys_open(devfile);
+	if (fd < 0) {
+		err = fd;
+		fprintf(stderr,"analogy_config: a4l_open(%s) failed err=%d\n",
+			devfile, err);
+		goto done;
+	}
+
+	if (actions & DO_DETACH) {
+		err = do_detach(fd, devfile);
+	}
+	else {
+		if (actions & DO_ATTACH)
+			err = do_attach(fd, argc, argv, optind);
+
+		if (err)
+			goto done;
+
+		if (actions & DO_BUFCONFIG)
+			err = do_bufcfg(fd, devfile, bufsize);
+	}
+
+done:
+        if (err < 0)
+		fprintf(stderr,
+			"analogy_config: check the procfs information:\n"
+			" - analogy devices: %s \n"
+			" - analogy drivers: %s \n",
+			ANALOGY_DEVICES_PROC,
+			ANALOGY_DRIVERS_PROC);
+
+	if (fd >= 0)
+		a4l_sys_close(fd);
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.c b/kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.c
new file mode 100644
index 0000000..6f9be6f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.c
@@ -0,0 +1,1282 @@
+/*
+ * Analogy for Linux, NI - M calibration program
+ *
+ * Copyright (C) 2014 Jorge A. Ramirez-Ortiz <jro@xenomai.org>
+ *
+ * from original code from the Comedi project
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <rtdm/uapi/analogy.h>
+#include <rtdm/analogy.h>
+#include <math.h>
+
+#include "calibration_ni_m.h"
+#include "calibration.h"
+struct listobj ai_calibration_list;
+struct listobj ao_calibration_list;
+
+static struct references references;
+static struct a4l_calibration_subdev mem_subd;
+static struct a4l_calibration_subdev cal_subd;
+static struct a4l_calibration_subdev ao_subd;
+static struct a4l_calibration_subdev ai_subd;
+static struct subdev_ops ops;
+static struct eeprom eeprom;
+static struct gnumath math;
+
+/*
+ *  eeprom
+ */
+static int eeprom_read_byte(unsigned address, unsigned *val)
+{
+	ops.data.read(val, &mem_subd, address, 0, 0);
+	if (*val > 0xff)
+		error(EXIT, 0, "failed to read byte from EEPROM %d > 0xff", *val);
+
+	return 0;
+}
+
+static int eeprom_read_uint16(unsigned address, unsigned *val)
+{
+	unsigned a = 0, b = 0;
+	int err;
+
+	err = eeprom_read_byte(address, &a);
+	if (err)
+		error(EXIT, 0, "failed to read byte from EEPROM");
+	a = a << 8;
+
+	err = eeprom_read_byte(address + 1, &b);
+	if (err)
+		error(EXIT, 0, "failed to read byte from EEPROM");
+
+	*val = a | b;
+
+	return 0;
+}
+
+static int eeprom_get_calibration_base_address(unsigned *address)
+{
+	eeprom_read_uint16(24, address);
+
+	return 0;
+}
+
+static int eeprom_read_float(unsigned address, float *val)
+{
+	union float_converter {
+		unsigned u;
+		float f;
+	} converter;
+
+	unsigned a = 0, b = 0, c = 0, d = 0;
+
+	if (sizeof(float) != sizeof(uint32_t))
+		error(EXIT, 0, "eeprom_read_float");
+
+	eeprom_read_byte(address++, &a);
+	a = a << 24;
+	eeprom_read_byte(address++, &b);
+	b = b << 16;
+	eeprom_read_byte(address++, &c);
+	c = c << 8;
+	eeprom_read_byte(address++, &d);
+
+	converter.u = a | b | c | d;
+	*val = converter.f;
+
+	return 0;
+}
+
+static int eeprom_read_reference_voltage(float *val)
+{
+	unsigned address;
+
+	eeprom_get_calibration_base_address(&address);
+	eeprom_read_float(address + eeprom.voltage_ref_offset, val);
+
+	return 0;
+}
+
+/*
+ * subdevice operations
+ */
+static int data_read_hint(struct a4l_calibration_subdev *s, int channel,
+	                  int range, int aref)
+{
+	sampl_t dummy_data;
+	a4l_insn_t insn;
+	int err;
+
+	memset(&insn, 0, sizeof(insn));
+	insn.chan_desc = PACK(channel, range, aref);
+	insn.idx_subd = s->idx;
+	insn.type = A4L_INSN_READ;
+	insn.data = &dummy_data;
+	insn.data_size = 0;
+
+	err = a4l_snd_insn(&descriptor, &insn);
+	if (err < 0)
+		error(EXIT, 0, "a4l_snd_insn (%d)", err);
+
+	return 0;
+}
+
+static int data_read(unsigned *data, struct a4l_calibration_subdev *s,
+	             int channel, int range, int aref)
+{
+	a4l_insn_t insn;
+	int err;
+
+	memset(&insn, 0, sizeof(insn));
+	insn.chan_desc = PACK(channel, range, aref);
+	insn.idx_subd = s->idx;
+	insn.type = A4L_INSN_READ;
+	insn.data = data;
+	insn.data_size = 1;
+
+	err = a4l_snd_insn(&descriptor, &insn);
+	if (err < 0)
+		error(EXIT, 0, "a4l_snd_insn (%d)", err);
+
+	return 0;
+}
+
+static int data_write(long int *data, struct a4l_calibration_subdev *s,
+	              int channel, int range, int aref)
+{
+	a4l_insn_t insn;
+	int err;
+
+	memset(&insn, 0, sizeof(insn));
+	insn.chan_desc = PACK(channel, range, aref);
+	insn.idx_subd = s->idx;
+	insn.type = A4L_INSN_WRITE;
+	insn.data = data;
+	insn.data_size = sizeof(*data);
+
+	err = a4l_snd_insn(&descriptor, &insn);
+	if (err < 0)
+		error(EXIT, 0, "a4l_snd_insn (%d)", err);
+
+	return 0;
+}
+
+static int data_read_async(void *dst, struct a4l_calibration_subdev *s,
+	                   unsigned int nb_samples, int speriod, int irange)
+{
+	int i, len, err;
+	a4l_cmd_t cmd;
+	unsigned int chan_descs[] = {
+		PACK(CR_ALT_SOURCE|CR_ALT_FILTER, irange, AREF_DIFF)
+	};
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.scan_begin_src = TRIG_TIMER;
+	cmd.scan_end_src = TRIG_COUNT;
+	cmd.convert_src = TRIG_TIMER;
+	cmd.stop_src = TRIG_COUNT;
+	cmd.start_src = TRIG_NOW;
+	cmd.scan_end_arg = 1;
+	cmd.convert_arg = 0;
+	cmd.nb_chan = 1;
+	cmd.scan_begin_arg = speriod;
+	cmd.chan_descs = chan_descs;
+	cmd.idx_subd = s->idx;
+	cmd.stop_arg = nb_samples;
+	cmd.flags = A4L_CMD_SIMUL;
+	SET_BIT(3, &cmd.valid_simul_stages);
+
+	/* get driver specific info into the command structure */
+	for (i = 0; i< 4; i++)
+		a4l_snd_command(&descriptor, &cmd);
+
+	/* send the real command */
+	cmd.flags = 0;
+	err = a4l_snd_command(&descriptor, &cmd);
+	if (err)
+		error(EXIT, 0, "a4l_snd_command (%d)", err);
+
+	len = nb_samples * ai_subd.slen;
+	for (;;) {
+		err = a4l_async_read(&descriptor, dst, len, A4L_INFINITE);
+		if (err <0)
+			error(EXIT, 0, "a4l_async_read (%d)", err);
+		if (err < len) {
+			dst = dst + err;
+			len = len - err;
+		} else
+			break;
+	}
+	a4l_snd_cancel(&descriptor, ai_subd.idx);
+
+	return 0;
+}
+
+/*
+ *
+ * math: uses the a4l statistic helpers and the math library
+ *
+ */
+static void
+statistics_standard_deviation_of_mean(double *dst, double src[], int len,
+	                              double mean)
+{
+	a4l_math_stddev_of_mean(dst, mean, src, len);
+}
+
+static void
+statistics_standard_deviation(double *dst, double src[], int len, double mean)
+{
+	a4l_math_stddev(dst, mean, src, len);
+}
+
+static void statistics_mean(double *dst, double src[], int len)
+{
+	a4l_math_mean(dst, src, len);
+}
+
+static int polynomial_fit(struct polynomial *dst, struct codes_info *src)
+{
+	double *measured;
+	double *nominal;
+	int i, ret;
+
+	dst->nb_coefficients = dst->order + 1;
+	dst->coefficients = malloc(sizeof(double) * dst->nb_coefficients);
+	measured = malloc(sizeof(double) * src->nb_codes);
+	nominal = malloc(sizeof(double) * src->nb_codes);
+
+	if (!dst->coefficients || !measured || !nominal)
+		return -ENOMEM;
+
+	for (i = 0; i < src->nb_codes; i++) {
+		measured[i] = src->codes[i].measured;
+		nominal[i] = src->codes[i].nominal;
+	}
+
+	ret = a4l_math_polyfit(dst->nb_coefficients, dst->coefficients,
+		               dst->expansion_origin,
+		               src->nb_codes, nominal, measured);
+
+	return ret;
+}
+
+static int polynomial_linearize(double *dst, struct polynomial *p, double val)
+{
+	double a = 0.0, b = 1.0;
+	int i;
+
+	for (i = 0; i < p->nb_coefficients; i++) {
+		a = a + p->coefficients[i] * b;
+		b = b * (val - p->expansion_origin);
+	}
+	*dst = a;
+
+	return 0;
+}
+
+/*
+ *
+ * reference
+ *
+ */
+static int reference_get_min_sampling_period(int *val)
+{
+	unsigned int chan_descs[] = { 0};
+	a4l_cmd_t cmd;
+	int err;
+
+	memset(&cmd, 0, sizeof(cmd));
+	cmd.scan_begin_src = TRIG_TIMER;
+	cmd.scan_end_src = TRIG_COUNT;
+	cmd.convert_src = TRIG_TIMER;
+	cmd.stop_src = TRIG_COUNT;
+	cmd.start_src = TRIG_NOW;
+	cmd.scan_begin_arg = 0;
+	cmd.convert_arg = 0;
+	cmd.stop_arg = 1;
+	cmd.nb_chan = 1;
+	cmd.scan_end_arg = ai_subd.info->nb_chan;
+	cmd.chan_descs = chan_descs;
+	cmd.idx_subd = ai_subd.idx;
+	cmd.flags = A4L_CMD_SIMUL;
+	SET_BIT(3, &cmd.valid_simul_stages);
+
+	err = a4l_snd_command(&descriptor, &cmd);
+	if (err)
+		error(EXIT, 0, "a4l_snd_command (%d)", err);
+
+	*val = cmd.scan_begin_arg;
+
+	return 0;
+}
+
+static int reference_set_bits(unsigned int bits)
+{
+	unsigned int data[2] = { A4L_INSN_CONFIG_ALT_SOURCE, bits};
+	a4l_insn_t insn;
+	int err;
+
+	insn.data_size = sizeof(data);
+	insn.type = A4L_INSN_CONFIG;
+	insn.idx_subd = ai_subd.idx;
+	insn.chan_desc = 0;
+	insn.data = data;
+
+	err = a4l_snd_insn(&descriptor, &insn);
+	if (err)
+		error(EXIT, 0, "a4l_snd_insn (%d)", err);
+
+	return 0;
+}
+
+static int reference_set_pwm(struct a4l_calibration_subdev *s, unsigned int h,
+	                     unsigned int d, unsigned int *rh, unsigned int *rd)
+{
+	unsigned int data[5] = {
+		[0] = A4L_INSN_CONFIG_PWM_OUTPUT,
+		[1] = TRIG_ROUND_NEAREST,
+		[2] = h,
+		[3] = TRIG_ROUND_NEAREST,
+		[4] = d
+	};
+	a4l_insn_t insn;
+	int err;
+
+	insn.data_size = sizeof(data);
+	insn.idx_subd = s->idx;
+	insn.type = A4L_INSN_CONFIG;
+	insn.chan_desc = 0;
+	insn.data = data;
+
+	err = a4l_snd_insn(&descriptor, &insn);
+	if (err)
+		error(EXIT, 0, "a4l_snd_insn (%d)", err);
+
+	*rh = data[2];
+	*rd = data[4];
+
+	return 0;
+}
+
+static int reference_read_doubles(double dst[], unsigned int nb_samples,
+		                  int speriod, int irange)
+{
+	int i, err = 0;
+	sampl_t *p;
+
+	p = malloc(nb_samples * ai_subd.slen);
+	if (!p)
+		error(EXIT, 0, "malloc");
+
+	err = references.read_samples(p, nb_samples, speriod, irange);
+	if (err) {
+		free(p);
+		error(EXIT, 0, "read_samples");
+	}
+
+	for (i = 0; i < nb_samples; i++)
+		dst[i] = p[i];
+
+	free(p);
+
+	return 0;
+}
+
+static int reference_read_samples(void *dst, unsigned int nb_samples,
+	                          int speriod, int irange)
+{
+	int err;
+
+	if (!nb_samples)
+		error(EXIT, 0, "invalid nb samples (%d)", nb_samples);
+
+	err = ops.data.read_hint(&ai_subd, CR_ALT_SOURCE|CR_ALT_FILTER,
+				 irange, AREF_DIFF);
+	if (err)
+		error(EXIT, 0, "read_hint (%d)", err);
+
+	err = ops.data.read_async(dst, &ai_subd, nb_samples, speriod, irange);
+	if (err)
+		error(EXIT, 0, "read_async (%d)", err);
+
+	return 0;
+}
+
+/*
+ *
+ * calibrator
+ *
+ *
+ */
+const char *ni_m_boards[] = {
+	"pci-6220", "pci-6221", "pci-6221_37pin", "pci-6224", "pci-6225",
+	"pci-6229", "pci-6250", "pci-6251", "pci-6254", "pci-6259", "pcie-6259",
+	"pci-6280", "pci-6281", "pxi-6281", "pci-6284", "pci-6289"};
+
+const int nr_ni_m_boards = ARRAY_LEN(ni_m_boards);
+
+static inline int pwm_period_ticks(void)
+{
+	int min_speriod, speriod_ticks, ticks;
+	int err;
+
+	err = references.get_min_speriod(&min_speriod);
+	if (err || !min_speriod)
+		error(EXIT, 0, "couldn't retrieve the sampling period");
+
+	speriod_ticks = min_speriod / NI_M_MASTER_CLOCK_PERIOD;
+	ticks = (NI_M_TARGET_PWM_PERIOD_TICKS + speriod_ticks - 1) /
+		speriod_ticks;
+	ticks = ticks * speriod_ticks;
+
+	return ++ticks;
+}
+
+static inline int pwm_rounded_nsamples(void)
+{
+	int pwm_period, total_speriod, min_speriod;
+	int err;
+
+	err = references.get_min_speriod(&min_speriod);
+	if (err || !min_speriod)
+		error(EXIT, 0, "couldn't retrieve the sampling period");
+
+	pwm_period = pwm_period_ticks() * NI_M_MASTER_CLOCK_PERIOD;
+	total_speriod = (NI_M_NR_SAMPLES * min_speriod + pwm_period / 2) /
+			pwm_period;
+	total_speriod = total_speriod * pwm_period;
+
+	return total_speriod / min_speriod;
+}
+
+static int check_buf_size(int slen)
+{
+	unsigned long blen, req_blen;
+	int err;
+
+	err = a4l_get_bufsize(&descriptor, ai_subd.idx, &blen);
+	if (err)
+		error(EXIT, 0, "a4l_get_bufsize (%d)", err);
+
+	req_blen = slen * pwm_rounded_nsamples();
+	if (blen < req_blen)
+		error(EXIT, 0, "blen (%ld) < req_blen (%ld)", blen, req_blen);
+
+	return 0;
+}
+
+static int set_pwm_up_ticks(int t)
+{
+	unsigned int up_p, down_p, real_up_p, real_down_p;
+	int  err;
+
+	up_p = t * NI_M_MASTER_CLOCK_PERIOD;
+	down_p = (pwm_period_ticks() - t) * NI_M_MASTER_CLOCK_PERIOD;
+	err = references.set_pwm(&cal_subd, up_p, down_p, &real_up_p,
+				 &real_down_p);
+	if (err)
+		error(EXIT, 0, "reference_set_pwm");
+
+	return 0;
+}
+
+static int characterize_pwm(struct pwm_info *dst, int pref, unsigned range)
+{
+	int i, up_ticks, err, speriod, len;
+	double mean, stddev, stddev_of_mean;
+	double *p;
+
+	err = references.set_bits(pref | REF_NEG_CAL_GROUND);
+	if (err)
+		error(EXIT, EINVAL, "reference_set_bits");
+
+	len = pwm_rounded_nsamples() * sizeof(*p);
+	p = malloc(len);
+	if (!p)
+		error(EXIT, 0, "malloc (%d)", len);
+
+	for (i = 0; i < dst->nb_nodes; i++) {
+
+		up_ticks = NI_M_MIN_PWM_PULSE_TICKS * (i + 1);
+		err = set_pwm_up_ticks(up_ticks);
+		if (err)
+			error(EXIT, 0, "set_pwm_up_ticks");
+
+		err = references.get_min_speriod(&speriod);
+		if (err)
+			error(EXIT, 0, "get_min_speriod");
+
+		err = references.read_doubles(p, len/sizeof(*p), speriod, range);
+		if (err)
+			error(EXIT, 0, "read_doubles");
+
+		math.stats.mean(&mean, p, len/sizeof(*p));
+		math.stats.stddev(&stddev, p, len/sizeof(*p), mean);
+		math.stats.stddev_of_mean(&stddev_of_mean, p,
+			                  len/sizeof(*p), mean);
+		dst->node[i].up_tick = up_ticks;
+		dst->node[i].mean = mean;
+	}
+	free(p);
+
+	return 0;
+}
+
+static void print_polynomial(struct polynomial *p)
+{
+	int i;
+
+	__debug("Polynomial :\n");
+	__debug("\torder =  %d \n", p->order);
+	__debug("\texpansion origin =  %f \n", p->expansion_origin);
+
+	for (i = 0; i < p->nb_coefficients; i++)
+		__debug("\torder  %d  coefficient =  %g \n",
+			i, p->coefficients[i]);
+}
+
+static int calibrate_non_linearity(struct polynomial *poly,
+	                           struct pwm_info *src)
+{
+	unsigned int max_data = (1 << ai_subd.slen * 8)  - 2;
+	unsigned up_ticks, down_ticks, i;
+	struct codes_info data;
+	int len;
+
+	data.nb_codes = src->nb_nodes;
+	len = data.nb_codes * sizeof(*data.codes);
+	data.codes = malloc(len);
+	if (!data.codes)
+		error (EXIT, 0, "malloc (%d)", len);
+
+	for (i = 0; i < data.nb_codes; i++) {
+		up_ticks = src->node[i].up_tick;
+		down_ticks = pwm_period_ticks() - up_ticks;
+		data.codes[i].nominal = max_data * down_ticks /
+					pwm_period_ticks();
+		data.codes[i].measured = src->node[i].mean;
+	}
+
+	poly->order = 3;
+	poly->expansion_origin = max_data / 2;
+	math.polynomial.fit(poly, &data);
+
+	print_polynomial(poly);
+	free(data.codes);
+
+	return 0;
+}
+
+static int calibrate_ai_gain_and_offset(struct polynomial *dst,
+	                                struct polynomial *src,
+			                unsigned pos_ref, float volt_ref,
+	                                unsigned range)
+{
+	double *p;
+	double measured_gnd_cde, linearized_gnd_cde;
+	double measured_ref_cde, linearized_ref_cde;
+	double gain, offset;
+	int i, len, err, speriod;
+	double a, b;
+
+	len = pwm_rounded_nsamples() * sizeof(*p);
+	p = malloc(len);
+	if (!p)
+		error(EXIT, 0, "malloc (%d)", len);
+
+	/* ground */
+	references.set_bits(REF_POS_CAL_GROUND | REF_NEG_CAL_GROUND);
+	err = references.get_min_speriod(&speriod);
+	if (err)
+		error(EXIT, 0, "get_min_speriod");
+	err = references.read_doubles(p, len/sizeof(*p), speriod, range);
+	if (err)
+		error(EXIT, 0, "read_doubles");
+	math.stats.mean(&measured_gnd_cde, p, len/sizeof(*p));
+	math.polynomial.linearize(&linearized_gnd_cde, src,
+				  measured_gnd_cde);
+
+	/* reference */
+	references.set_bits(pos_ref | REF_NEG_CAL_GROUND);
+	err = references.get_min_speriod(&speriod);
+	if (err)
+		error(EXIT, 0, "get_min_speriod");
+	err = references.read_doubles(p, len/sizeof(*p), speriod, range);
+	if (err)
+		error(EXIT, 0, "read_doubles");
+	math.stats.mean(&measured_ref_cde, p, len/sizeof(*p));
+	math.polynomial.linearize(&linearized_ref_cde, src, measured_ref_cde);
+
+	gain = volt_ref / (linearized_ref_cde - linearized_gnd_cde);
+
+	/*
+	 * update output
+	 */
+
+	dst->coefficients = malloc(src->nb_coefficients * sizeof(double));
+	if (!dst->coefficients)
+		error(EXIT, 0, "malloc");
+
+	dst->expansion_origin = src->expansion_origin;
+	dst->nb_coefficients = src->nb_coefficients;
+	dst->order = src->order;
+	for (i = 0; i < dst->nb_coefficients; i++)
+		dst->coefficients[i] = src->coefficients[i] * gain;
+
+	math.polynomial.linearize(&offset, dst, measured_gnd_cde);
+	dst->coefficients[0] = dst->coefficients[0] - offset;
+
+	__debug("volt_ref                = %g \n", volt_ref);
+	__debug("measured_gnd_cde    = %g, linearized_gnd_cde     = %g \n",
+		measured_gnd_cde, linearized_gnd_cde);
+	__debug("measured_ref_cde = %g, linearized_ref_cde  = %g \n",
+		measured_ref_cde, linearized_ref_cde);
+
+	math.polynomial.linearize(&a, dst, measured_gnd_cde);
+	__debug("full_correction(measured_gnd_cde)    = %g \n", a);
+	math.polynomial.linearize(&b, dst, measured_ref_cde);
+	__debug("full_correction(measured_ref_cde) = %g \n", b);
+
+	print_polynomial(dst);
+
+	free(p);
+
+	return 0;
+}
+
+static int calibrate_base_range(struct polynomial *dst, struct polynomial *src)
+{
+	float volt_ref;
+	int err;
+
+	eeprom.ops.read_reference_voltage(&volt_ref);
+	err = calibrate_ai_gain_and_offset(dst, src, REF_POS_CAL, volt_ref,
+					   NI_M_BASE_RANGE);
+	if (err)
+		error(EXIT, 0, "calibrate_ai_gain_and_offset");
+
+	return err;
+}
+
+
+static struct subdevice_calibration_node *get_calibration_node(struct listobj *l,
+	                                                       unsigned channel,
+	                                                       unsigned range)
+{
+	struct subdevice_calibration_node *e, *t;
+
+	if (list_empty(l))
+		return NULL;
+
+	list_for_each_entry_safe(e, t, l, node) {
+		if (e->channel == channel || e->channel == ALL_CHANNELS ||
+		    channel == ALL_CHANNELS) {
+			if (e->range == range || e->range == ALL_RANGES ||
+			    range == ALL_RANGES) {
+				return e;
+			}
+		}
+	}
+
+	return NULL;
+}
+
+static int calibrate_pwm(struct polynomial *dst, struct pwm_info *pwm_info,
+	                 struct subdevice_calibration_node *range_calibration)
+{
+	double pwm_cal, adrange_cal, lsb_error;
+	double aprox_volts_per_bit, a, b;
+	double measured_voltages;
+	struct codes_info info;
+	int i;
+
+	if (!pwm_info->nb_nodes)
+		error(EXIT, 0, "no pwm nodes \n");
+
+	info.nb_codes = pwm_info->nb_nodes;
+	info.codes = malloc (info.nb_codes * sizeof(*info.codes));
+
+	for (i = 0; i < pwm_info->nb_nodes; i++) {
+		info.codes[i].nominal = pwm_info->node[i].up_tick;
+		math.polynomial.linearize(&measured_voltages,
+					  range_calibration->polynomial,
+					  pwm_info->node[i].mean);
+		info.codes[i].measured = measured_voltages;
+	}
+
+	dst->order = 1;
+	dst->expansion_origin = pwm_period_ticks() / 2;
+	math.polynomial.fit(dst, &info);
+
+	math.polynomial.linearize(&a, range_calibration->polynomial, 1);
+	math.polynomial.linearize(&b, range_calibration->polynomial, 0);
+	aprox_volts_per_bit = a - b;
+
+	for (i = 0; i < pwm_info->nb_nodes; i++) {
+		math.polynomial.linearize(&pwm_cal, dst,
+					  pwm_info->node[i].up_tick);
+		math.polynomial.linearize(&adrange_cal,
+					  range_calibration->polynomial,
+					  pwm_info->node[i].mean);
+		lsb_error = (adrange_cal - pwm_cal) / aprox_volts_per_bit;
+		__debug("upTicks=%d code=%g "
+			"pwm_cal=%g adrange_cal=%g lsb_error=%g \n",
+			pwm_info->node[i].up_tick, pwm_info->node[i].mean,
+			pwm_cal, adrange_cal, lsb_error);
+	}
+
+	return 0;
+}
+
+static int append_calibration_node(struct listobj *l, struct polynomial *polynomial,
+			           unsigned channel, unsigned range)
+{
+	struct subdevice_calibration_node *q;
+
+	q = malloc(sizeof(struct subdevice_calibration_node));
+	if (!q)
+		error(EXIT, 0, "malloc");
+
+	q->polynomial = polynomial;
+	q->channel = channel;
+	q->range = range;
+	list_append(&q->node ,l);
+
+	return 0;
+}
+
+static int calibrate_ai_range(struct polynomial *dst,
+	                      struct polynomial *pwm_calibration,
+		              struct polynomial *non_linearity_correction,
+	                      unsigned pos_ref,
+		              unsigned range)
+{
+	struct polynomial inv_pwm_cal;
+	double reference_voltage;
+	a4l_rnginfo_t *rng;
+	unsigned up_ticks;
+	double *p, val;
+	int err;
+
+	if (pwm_calibration->order != 1)
+		error(EXIT, -1, "pwm_calibration order \n");
+
+	inv_pwm_cal.expansion_origin = pwm_calibration->coefficients[0];
+	p = malloc((pwm_calibration->order + 1) * sizeof(double));
+	if (!p)
+		error(EXIT,0,"malloc\n");
+
+	inv_pwm_cal.order = pwm_calibration->order;
+	inv_pwm_cal.nb_coefficients = pwm_calibration->order + 1;
+	inv_pwm_cal.coefficients = p;
+	inv_pwm_cal.coefficients[0] = pwm_calibration->expansion_origin;
+	inv_pwm_cal.coefficients[1] = 1.0 / pwm_calibration->coefficients[1];
+
+	err = a4l_get_rnginfo(&descriptor, ai_subd.idx, 0, range, &rng);
+	if (err < 0)
+		error(EXIT,0,"a4l_get_rnginfo (%d)\n", err);
+
+	__debug("adjusted rng_max: %g \n", rng_max(rng) * 0.9);
+
+	math.polynomial.linearize(&val, &inv_pwm_cal,
+				  rng_max(rng) * 0.9);
+	up_ticks = lrint(val);
+	free(p);
+
+	if (up_ticks > pwm_period_ticks() - NI_M_MIN_PWM_PULSE_TICKS)
+		up_ticks = pwm_period_ticks() - NI_M_MIN_PWM_PULSE_TICKS;
+
+	set_pwm_up_ticks(up_ticks);
+	math.polynomial.linearize(&val, pwm_calibration, up_ticks);
+	reference_voltage = val;
+	calibrate_ai_gain_and_offset(dst, non_linearity_correction, pos_ref,
+				     reference_voltage, range);
+
+	return 0;
+}
+
+static int calibrate_ranges_above_threshold(struct polynomial *pwm_calibration,
+				            struct polynomial *non_lin_correct,
+				            unsigned pos_ref,
+				            struct listobj *calibration_list,
+				            struct calibrated_ranges *calibrated,
+				            double max_range_threshold )
+{
+	struct polynomial *dst;
+	a4l_rnginfo_t *rnginfo;
+	int err, i;
+
+	for (i = 0; i < calibrated->nb_ranges; i++) {
+		if (calibrated->ranges[i] == 1)
+			continue;
+
+		err = a4l_get_rnginfo(&descriptor, ai_subd.idx, 0, i, &rnginfo);
+		if (err < 0)
+			error(EXIT,0,"a4l_get_rnginfo (%d)\n", err);
+
+		if (rng_max(rnginfo) < max_range_threshold)
+			continue;
+
+		dst = malloc(sizeof(*dst));
+		if (!dst)
+			error(EXIT, 0, "malloc");
+
+		__debug("calibrating range %d \n", i);
+		calibrate_ai_range(dst, pwm_calibration, non_lin_correct,
+				   pos_ref, i);
+		append_calibration_node(calibration_list, dst, ALL_CHANNELS, i);
+		calibrated->ranges[i] = 1;
+		__debug("done \n");
+	}
+
+	return 0;
+}
+
+static int get_min_range_containing(struct calibrated_ranges *calibrated,
+	                            double value)
+{
+	a4l_rnginfo_t *rnginfo, *smallest = NULL;
+	unsigned smallest_range = 0;
+	int err, i;
+
+	for (i = 0; i < calibrated->nb_ranges; i++) {
+		if (!calibrated->ranges[i])
+			continue;
+
+		err = a4l_get_rnginfo(&descriptor, ai_subd.idx, 0, i, &rnginfo);
+		if (err < 0)
+			error(EXIT,0,"a4l_get_rnginfo (%d)\n", err);
+
+		if (rng_max(rnginfo) > value &&
+		    (smallest_range == 0 ||
+		     rng_max(rnginfo) < rng_max(smallest))) {
+			smallest_range = i;
+			smallest = rnginfo;
+		}
+	}
+	if (!smallest)
+		error(EXIT,0,"no cal range with max volt above %g V found \n", value);
+
+	return smallest_range;
+}
+
+static int ni_m_calibrate_ai(void)
+{
+	const unsigned PWM_CAL_POINTS = (NI_M_TARGET_PWM_PERIOD_TICKS /
+					 NI_M_MIN_PWM_PULSE_TICKS);
+	const double MEDIUM_RANGE = 0.499;
+	const double LARGE_RANGE = 1.99;
+	const double SMALL_RANGE = 0.0;
+	struct polynomial non_lin_correct, full_correct;
+	struct subdevice_calibration_node *node;
+	struct calibrated_ranges calibrated;
+	struct polynomial pwm_calibration;
+	struct pwm_info pwm_info;
+	a4l_chinfo_t *chan_info;
+	int i, err;
+	struct calibration_loop {
+		const char *message;
+		unsigned ref_pos;
+		double threshold;
+		double item;
+		int range;
+	} cal_info [] = {
+		[0] = {
+			.message = "low gain range ",
+			.ref_pos = REF_POS_CAL_PWM_10V,
+			.threshold = LARGE_RANGE,
+			.range = NI_M_BASE_RANGE,
+			.item = - 1,
+		},
+		[1] = {
+			.message = "medium gain range ",
+			.ref_pos = REF_POS_CAL_PWM_2V,
+			.threshold = MEDIUM_RANGE,
+			.item = LARGE_RANGE,
+			.range = -1,
+		},
+		[2] = {
+			.message = "high gain range ",
+			.ref_pos = REF_POS_CAL_PWM_500mV,
+			.threshold = SMALL_RANGE,
+			.item =  MEDIUM_RANGE,
+			.range = -1,
+		},
+	};
+
+	list_init(&ai_calibration_list);
+
+	/*
+	 * check if the buffer is big enough
+	 */
+	err = a4l_get_chinfo(&descriptor, ai_subd.idx, 0, &chan_info);
+	if (err)
+		error(EXIT, 0,"a4l_get_chinfo (%d)", err);
+
+	calibrated.nb_ranges = chan_info->nb_rng;
+	calibrated.ranges = malloc(chan_info->nb_rng * sizeof(unsigned));
+	if (!calibrated.ranges)
+		error(EXIT, 0,"malloc");
+
+	memset(calibrated.ranges, 0, calibrated.nb_ranges * sizeof(unsigned));
+
+	ai_subd.slen = a4l_sizeof_chan(chan_info);
+	if (ai_subd.slen < 0)
+		error (RETURN, 0, "a4l_sizeof_chan (%d)", err);
+
+	err = check_buf_size(ai_subd.slen);
+	if (err)
+		error(EXIT, -1, "ni_m_check_buf_size: device buffer too small, "
+		      "please re-attach a bigger buffer");
+
+	pwm_info.nb_nodes = PWM_CAL_POINTS;
+	pwm_info.node = malloc(PWM_CAL_POINTS * sizeof(*pwm_info.node));
+	if (err)
+		error(EXIT, -ENOMEM, "malloc error");
+
+	/*
+	 * calibrate base range
+	 */
+	err = characterize_pwm(&pwm_info, REF_POS_CAL_PWM_10V, NI_M_BASE_RANGE);
+	if (err)
+		error(EXIT, 0, "characterize_pwm");
+
+	err = calibrate_non_linearity(&non_lin_correct, &pwm_info);
+	if (err)
+		error(EXIT, 0, "calibrate_non_linearity");
+
+	err = calibrate_base_range(&full_correct, &non_lin_correct);
+	if (err)
+		error(EXIT, 0, "calibrate_ai_base_range");
+
+	append_calibration_node(&ai_calibration_list, &full_correct,
+				ALL_CHANNELS, NI_M_BASE_RANGE);
+	calibrated.ranges[NI_M_BASE_RANGE] = 1;
+
+
+	/*
+	 * calibrate low, medium and high gain ranges
+	 */
+	for (i = 0; i < ARRAY_LEN(cal_info); i++) {
+		__debug("Calibrating AI: %s \n", cal_info[i].message);
+
+		if (cal_info[i].range >= 0)
+			goto calibrate;
+
+		cal_info[i].range = get_min_range_containing(&calibrated,
+							     cal_info[i].item);
+		if (!calibrated.ranges[cal_info[i].range])
+			error(EXIT, 0, "not calibrated yet \n" );
+
+		err = characterize_pwm(&pwm_info,
+				       cal_info[i].ref_pos,
+				       cal_info[i].range);
+		if (err)
+			error(EXIT, 0, "characterize_pwm \n");
+
+calibrate:
+		node = get_calibration_node(&ai_calibration_list, 0,
+					    cal_info[i].range);
+		if (!node)
+			error(EXIT, 0, "couldnt find node \n");
+
+		err = calibrate_pwm(&pwm_calibration, &pwm_info, node);
+		if (err)
+			error(EXIT, 0, "calibrate_pwm \n");
+
+		err = calibrate_ranges_above_threshold(&pwm_calibration,
+						       &non_lin_correct,
+						       cal_info[i].ref_pos,
+						       &ai_calibration_list,
+						       &calibrated,
+						       cal_info[i].threshold);
+		if (err)
+			error(EXIT, 0, "calibrate_ranges_above_threshold \n");
+	}
+
+	return 0;
+}
+
+static unsigned find_ai_range_for_ao(unsigned ao_range)
+{
+	a4l_rnginfo_t *ao_rng_info, *ai_rng_info, *rng_info = NULL;
+	a4l_chinfo_t *ai_chan_info;
+	unsigned range = 0xFFFF;
+	double max_ao_voltage;
+	int num_ai_ranges;
+	int i, err;
+
+	err = a4l_get_chinfo(&descriptor, ai_subd.idx, 0, &ai_chan_info);
+	if (err)
+		error(EXIT, 0,"a4l_get_chinfo (%d)", err);
+
+	num_ai_ranges = ai_chan_info->nb_rng;
+
+	err = a4l_get_rnginfo(&descriptor, ao_subd.idx, 0, ao_range, &ao_rng_info);
+	if (err)
+		error(EXIT, 0, "a4l_get_rng_info (%d)", err);
+
+	max_ao_voltage = rng_max(ao_rng_info);
+
+	for (i = 0; i < num_ai_ranges; i++) {
+		err = a4l_get_rnginfo(&descriptor, ai_subd.idx, 0, i, &ai_rng_info);
+		if (err)
+			error(EXIT, 0, "a4l_get_rng_info (%d)", err);
+
+		if (rng_info == NULL ||
+		    (rng_max(ai_rng_info) > max_ao_voltage &&
+		     rng_max(ai_rng_info) < rng_max(rng_info)) ||
+		    (rng_max(rng_info) < max_ao_voltage &&
+		     rng_max(ai_rng_info) > rng_max(rng_info))) {
+
+			range = i;
+			rng_info = ai_rng_info;
+		}
+	}
+
+	if (rng_info == NULL)
+		error(EXIT, 0, "cant find range");
+
+	return range;
+}
+
+static long int get_high_code(unsigned ai_rng, unsigned ao_rng)
+{
+	unsigned int ao_max_data = (1 << ao_subd.slen * 8)  - 2;
+	a4l_rnginfo_t *ai, *ao;
+	double fractional_code;
+
+	a4l_get_rnginfo(&descriptor, ai_subd.idx, 0, ai_rng, &ai);
+	a4l_get_rnginfo(&descriptor, ao_subd.idx, 0, ai_rng, &ao);
+
+	if (rng_max(ai) > rng_max(ao))
+		return lrint(ao_max_data * 0.9);
+
+	fractional_code = (0.9 * rng_max(ai) - rng_min(ao)) /
+		          (rng_max(ao) - rng_min(ao));
+
+	if (fractional_code < 0.0 || fractional_code > 1.0)
+		error(EXIT, 0, "error looking for high code");
+
+	return lrint(ao_max_data * fractional_code);
+}
+
+static int calibrate_ao_channel_and_range(unsigned ai_rng, unsigned ao_channel,
+	                                  unsigned ao_rng)
+{
+	unsigned int ao_max_data = (1 << ao_subd.slen * 8)  - 2;
+	double measured_low_code, measured_high_code, tmp;
+	long int low_code = lrint(ao_max_data * 0.1);
+	struct subdevice_calibration_node *node;
+	struct codes_info data;
+	struct polynomial poly;
+	long int high_code;
+	double *readings;
+	int speriod;
+	int i;
+
+	node = get_calibration_node(&ai_calibration_list, 0, ai_rng);
+	if (!node)
+		error(EXIT, 0, "couldnt find node \n");
+
+	data.nb_codes = 2;
+	data.codes = malloc (data.nb_codes * sizeof(*data.codes));
+	readings = malloc(NI_M_NR_SAMPLES * sizeof(*readings));
+	if (data.codes == NULL || readings == NULL)
+		error(EXIT,0, "malloc");
+
+	if ((ao_channel & 0xf) != ao_channel)
+		error(EXIT,0, "wrong ao channel (%d)", ao_channel);
+
+	references.set_bits(REF_POS_CAL_AO |
+		            REF_NEG_CAL_GROUND | ao_channel << 15);
+
+	/* low nominals */
+	data.codes[0].nominal = low_code;
+
+	ops.data.write(&low_code, &ao_subd, ao_channel, ao_rng, AREF_GROUND);
+	references.get_min_speriod(&speriod);
+	references.read_doubles(readings,
+				NI_M_NR_SAMPLES,
+				speriod,
+				ai_rng);
+	math.stats.mean(&measured_low_code, readings, NI_M_NR_SAMPLES);
+	math.polynomial.linearize(&data.codes[0].measured,
+				  node->polynomial,
+				  measured_low_code);
+
+	/* high nominals */
+	high_code = get_high_code(ai_rng, ao_rng);
+	data.codes[1].nominal = (1.0) * (double) high_code;
+
+	ops.data.write(&high_code, &ao_subd, ao_channel, ao_rng, AREF_GROUND);
+	references.get_min_speriod(&speriod);
+	references.read_doubles(readings,
+				NI_M_NR_SAMPLES,
+				speriod,
+				ai_rng);
+	math.stats.mean(&measured_high_code, readings, NI_M_NR_SAMPLES);
+	math.polynomial.linearize(&data.codes[1].measured,
+				  node->polynomial,
+				  measured_high_code);
+
+	poly.order = data.nb_codes - 1;
+	poly.expansion_origin = 0.0;
+	__debug("AO calibration for channel %d, range %d \n", ao_channel, ao_rng);
+
+	for (i = 0; i < data.nb_codes ; i++)
+		__debug("set ao to %g, measured %g \n",
+			data.codes[i].nominal,
+			data.codes[i].measured);
+
+	/*----------------------------------------------------------------------
+	 * the comedi calibration seems to invert the nominal and measured
+	 * values (I suppose they know about this) so I will have to hack it
+	 */
+	for (i = 0; i < data.nb_codes; i++) {
+		tmp = data.codes[i].measured ;
+		data.codes[i].measured = data.codes[i].nominal;
+		data.codes[i].nominal = tmp;
+	}
+	/*--------------------------------------------------------------------*/
+	math.polynomial.fit(&poly, &data);
+
+	append_calibration_node(&ao_calibration_list, &poly, ao_channel, ao_rng);
+	print_polynomial(&poly);
+	free(data.codes);
+
+	return 0;
+}
+
+static int ni_m_calibrate_ao(void)
+{
+	a4l_rnginfo_t *range_info;
+	a4l_chinfo_t *chan_info;
+	unsigned channel, range;
+	unsigned ai_range;
+	int err;
+
+	list_init(&ao_calibration_list);
+
+	err = a4l_get_chinfo(&descriptor, ao_subd.idx, 0, &chan_info);
+	if (err)
+		error(EXIT, 0,"a4l_get_chinfo (%d)", err);
+
+	ao_subd.slen = a4l_sizeof_chan(chan_info);
+	if (ao_subd.slen < 0)
+		error (RETURN, 0, "a4l_sizeof_chan (%d)", err);
+
+	for (channel = 0; channel < ao_subd.info->nb_chan; channel++) {
+		for (range = 0 ; range < chan_info->nb_rng; range++) {
+
+			err = a4l_get_rnginfo(&descriptor, ao_subd.idx, 0,
+				              range, &range_info);
+			if (err)
+				error(EXIT, 0, "a4l_get_rng_info (%d)", err);
+
+			if (A4L_RNG_UNIT(range_info->flags) !=  A4L_RNG_VOLT_UNIT)
+				continue;
+
+			ai_range = find_ai_range_for_ao(range);
+			err = calibrate_ao_channel_and_range(ai_range,
+				                             channel, range);
+			if (err)
+				error(EXIT, 0, "calibrate_ao");
+		}
+	}
+
+	return 0;
+}
+
+/*
+ * main entry
+ */
+int ni_m_software_calibrate(FILE *p)
+{
+	a4l_sbinfo_t *sbinfo;
+	int i, err;
+
+	__debug("calibrating device: %s \n", descriptor.board_name);
+
+	descriptor.sbdata = malloc(descriptor.sbsize);
+	if (descriptor.sbdata == NULL)
+		error(EXIT, 0, "malloc ENOMEM (requested %d)", descriptor.sbsize);
+
+	err = a4l_fill_desc(&descriptor);
+	if (err)
+		error(EXIT, 0, "a4l_fill_desc (%d)", err);
+
+	for (i = 0; i < descriptor.nb_subd; i++) {
+
+		err = a4l_get_subdinfo(&descriptor, i, &sbinfo);
+		if (err < 0)
+			error(EXIT, 0, "a4l_get_subdinfo (%d)", err);
+
+		switch (sbinfo->flags & A4L_SUBD_TYPES) {
+		case A4L_SUBD_CALIB:
+			SET_SUBD(cal, i, sbinfo, CALIBRATION_SUBD_STR);
+			break;
+		case A4L_SUBD_AI:
+			SET_SUBD(ai, i, sbinfo, AI_SUBD_STR);
+			break;
+		case A4L_SUBD_AO:
+			SET_SUBD(ao, i, sbinfo, AO_SUBD_STR);
+			break;
+		case A4L_SUBD_MEMORY:
+			SET_SUBD(mem, i, sbinfo, MEMORY_SUBD_STR);
+			break;
+		}
+	}
+
+	if (cal_subd.idx < 0 || ai_subd.idx < 0 || mem_subd.idx < 0)
+		error(EXIT, 0, "can't find subdevice");
+
+	err = ni_m_calibrate_ai();
+	if (err)
+		error(EXIT, 0, "ai calibration error (%d)", err);
+
+	write_calibration_file(p, &ai_calibration_list, &ai_subd, &descriptor);
+
+	/* only calibrate the analog output subdevice if present */
+	if (ao_subd.idx < 0) {
+		__debug("analog output not present \n");
+		return 0;
+	}
+
+	err = ni_m_calibrate_ao();
+	if (err)
+		error(EXIT, 0, "ao calibration error (%d)", err);
+
+	write_calibration_file(p, &ao_calibration_list, &ao_subd, NULL);
+
+	return 0;
+}
+
+static void __attribute__ ((constructor)) __ni_m_calibrate_init(void)
+{
+	init_interface(references, REFERENCES);
+	init_interface(eeprom, EEPROM);
+	init_interface(ops,SUBDEV_OPS);
+	init_interface(mem_subd, SUBD);
+	init_interface(cal_subd, SUBD);
+	init_interface(math, GNU_MATH);
+	init_interface(ao_subd, SUBD);
+	init_interface(ai_subd, SUBD);
+}
+
+
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.h b/kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.h
new file mode 100644
index 0000000..6612d21
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/calibration_ni_m.h
@@ -0,0 +1,260 @@
+/*
+ * Analogy for Linux, NI - M calibration program
+ *
+ * Copyright (C) 2014 Jorge A. Ramirez-Ortiz <jro@xenomai.org>
+ *
+ * from original code from the Comedi project
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#ifndef __NI_M_SOFTWARE_CALIBRATE_H__
+#define __NI_M_SOFTWARE_CALIBRATE_H__
+#include <rtdm/uapi/analogy.h>
+#include "calibration.h"
+#include "analogy_calibrate.h"
+#include "boilerplate/list.h"
+
+extern const char *ni_m_boards[];
+extern const int nr_ni_m_boards;
+
+#define ni_m_board_supported(id) __array_search(id, ni_m_boards, nr_ni_m_boards)
+
+int ni_m_software_calibrate(FILE *p);
+
+#define init_interface(a, b)  a = ((typeof(a)) INIT_##b);
+
+#define	 SET_BIT(n,set)	do { *(set) |= (1 << n); } while(0)
+
+/*
+ * subdevice
+ */
+#define SET_SUBD(type, a, b, c)			\
+         do {    type##_subd.idx = a;		\
+	         type##_subd.info = b;		\
+		 type##_subd.name = c;		\
+         } while (0)
+
+#define INIT_SUBD 				\
+{						\
+        .slen = 0,				\
+        .idx = -1,				\
+	.info = NULL,				\
+	.name = NULL,				\
+}
+
+/*
+ * eeprom
+ */
+#define INIT_EEPROM_OPS								\
+{										\
+	.get_calibration_base_address = eeprom_get_calibration_base_address,	\
+	.read_reference_voltage = eeprom_read_reference_voltage,		\
+	.read_uint16 = eeprom_read_uint16,					\
+	.read_float = eeprom_read_float,					\
+	.read_byte = eeprom_read_byte,						\
+}
+
+#define INIT_EEPROM 			\
+{					\
+        .ops = INIT_EEPROM_OPS,		\
+	.voltage_ref_offset = 12,	\
+}
+
+typedef int (*eeprom_get_calibration_base_address_function)(unsigned *);
+typedef int (*eeprom_read_uint16_function)(unsigned, unsigned *);
+typedef int (*eeprom_read_byte_function)(unsigned, unsigned *);
+typedef int (*eeprom_read_reference_voltage_function)(float *);
+typedef int (*eeprom_read_float_function)(unsigned, float *);
+
+struct eeprom_ops {
+	eeprom_get_calibration_base_address_function get_calibration_base_address;
+	eeprom_read_reference_voltage_function read_reference_voltage;
+	eeprom_read_uint16_function read_uint16;
+	eeprom_read_float_function read_float;
+	eeprom_read_byte_function read_byte;
+};
+
+struct eeprom {
+	struct eeprom_ops ops;
+	int voltage_ref_offset;
+};
+
+/*
+ * subdevice operations
+ */
+#define INIT_SUBDEV_DATA_OPS		\
+{					\
+	.read_async = data_read_async,	\
+        .read_hint = data_read_hint,	\
+	.read = data_read,		\
+	.write = data_write,		\
+}
+
+#define INIT_SUBDEV_OPS			\
+{					\
+        .data = INIT_SUBDEV_DATA_OPS	\
+}
+
+typedef int (*data_read_async_function)(void *, struct a4l_calibration_subdev *, unsigned , int , int);
+typedef int (*data_read_hint_function)(struct a4l_calibration_subdev *s, int, int, int);
+typedef int (*data_read_function)(unsigned *, struct a4l_calibration_subdev *, int, int, int);
+typedef int (*data_write_function)(long int *, struct a4l_calibration_subdev *s, int, int, int);
+
+struct subdev_ops {
+	struct data_ops {
+		data_read_async_function read_async;
+		data_read_hint_function read_hint;
+		data_write_function write;
+		data_read_function read;
+	} data;
+};
+
+
+/*
+ * gnumath
+ */
+#define INIT_GNU_MATH_STATS						\
+{									\
+	.stddev_of_mean = statistics_standard_deviation_of_mean,	\
+	.stddev = statistics_standard_deviation,			\
+        .mean = statistics_mean,					\
+}
+
+#define INIT_GNU_MATH_POLYNOMIAL		\
+{						\
+	.fit = polynomial_fit,			\
+	.linearize = polynomial_linearize,	\
+}
+
+#define INIT_GNU_MATH				\
+{						\
+        .stats = INIT_GNU_MATH_STATS,		\
+	.polynomial = INIT_GNU_MATH_POLYNOMIAL,	\
+}
+
+
+struct codes {
+	double measured;
+	double nominal;
+};
+
+struct codes_info {
+	struct codes *codes;
+	int nb_codes;
+};
+
+typedef void (*statistics_standard_deviation_of_mean_function)(double *, double [], int, double );
+typedef void (*statistics_standard_deviation_function)(double *, double [], int, double);
+typedef void (*statistics_mean_function)(double *, double [], int);
+
+struct statistics_ops {
+	statistics_standard_deviation_of_mean_function stddev_of_mean;
+	statistics_standard_deviation_function stddev;
+	statistics_mean_function mean;
+};
+
+
+typedef int (*polynomial_linearize_function) (double *, struct polynomial *, double);
+typedef int (*polynomial_fit_function)(struct polynomial *, struct codes_info *);
+struct polynomial_ops {
+	polynomial_fit_function fit;
+	polynomial_linearize_function linearize;
+};
+
+struct gnumath {
+	struct statistics_ops stats;
+	struct polynomial_ops polynomial;
+};
+
+/*
+ * reference
+ */
+#define	positive_cal_shift  	 7
+#define	negative_cal_shift 	 10
+#define	REF_POS_CAL  		(2 << positive_cal_shift)
+#define	REF_POS_CAL_PWM_500mV  	(3 << positive_cal_shift)
+#define	REF_POS_CAL_PWM_2V	(4 << positive_cal_shift)
+#define	REF_POS_CAL_PWM_10V 	(5 << positive_cal_shift)
+#define	REF_POS_CAL_GROUND	(6 << positive_cal_shift)
+#define	REF_POS_CAL_AO 		(7 << positive_cal_shift)
+
+#define	REF_NEG_CAL_1V		(2 << negative_cal_shift)
+#define	REF_NEG_CAL_1mV 	(3 << negative_cal_shift)
+#define	REF_NEG_CAL_GROUND	(5 << negative_cal_shift)
+#define	REF_NEG_CAL_GROUND2 	(6 << negative_cal_shift)
+#define	REF_NEG_CAL_PWM_10V 	(7 << negative_cal_shift)
+
+#define INIT_REFERENCES 					\
+{								\
+	.get_min_speriod = reference_get_min_sampling_period,	\
+	.set_bits = reference_set_bits,				\
+	.set_pwm = reference_set_pwm,				\
+	.read_samples = reference_read_samples,			\
+	.read_doubles = reference_read_doubles,			\
+}
+
+typedef int (*reference_set_pwm_function)(struct a4l_calibration_subdev *s, unsigned, unsigned, unsigned *, unsigned *);
+typedef int (*reference_read_reference_doubles_function)(double [], unsigned, int, int);
+typedef int (*reference_read_reference_samples_function)(void *, unsigned, int, int);
+typedef int (*reference_get_min_sampling_period_function)(int *);
+typedef int (*reference_set_reference_channel_function)(void);
+typedef int (*reference_set_reference_src_function)(void);
+typedef int (*reference_set_bits_function)(unsigned);
+
+struct references {
+	reference_set_reference_src_function set_src;
+	reference_set_reference_channel_function set_chan;
+	reference_set_pwm_function set_pwm;
+	reference_read_reference_samples_function read_samples;
+	reference_read_reference_doubles_function read_doubles;
+	reference_get_min_sampling_period_function get_min_speriod;
+	/* private */
+	reference_set_bits_function set_bits;
+
+};
+
+struct characterization_node {
+	double mean;
+	int up_tick;
+};
+
+struct pwm_info {
+	struct characterization_node *node;
+	unsigned nb_nodes;
+};
+
+/*
+ * NI M calibrator data
+ */
+
+#define NI_M_MIN_PWM_PULSE_TICKS	( 0x20 )
+#define NI_M_MASTER_CLOCK_PERIOD	( 50 )
+#define NI_M_TARGET_PWM_PERIOD_TICKS	( 20 * NI_M_MIN_PWM_PULSE_TICKS )
+#define NI_M_NR_SAMPLES			( 15000 )
+#define NI_M_BASE_RANGE			( 0 )
+
+
+struct calibrated_ranges {
+	unsigned *ranges;
+	unsigned nb_ranges;
+};
+
+#define ALL_CHANNELS	0xFFFFFFFF
+#define ALL_RANGES	0xFFFFFFFF
+
+
+
+#endif
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/cmd_bits.c b/kernel/xenomai-v3.2.4/utils/analogy/cmd_bits.c
new file mode 100644
index 0000000..17dc496
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/cmd_bits.c
@@ -0,0 +1,279 @@
+/*
+ * Analogy for Linux, digital command test program
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <getopt.h>
+#include <rtdm/analogy.h>
+
+#define FILENAME "analogy0"
+
+static char *filename = FILENAME;
+static int verbose;
+
+/* TODO: to be removed */
+static unsigned int chans[4] = {0, 1, 2, 3};
+
+/* The command to send by default */
+a4l_cmd_t cmd = {
+	.idx_subd = -1,
+	.flags = 0,
+	.start_src = TRIG_INT,
+	.start_arg = 0,
+	.scan_begin_src = TRIG_EXT,
+	.scan_begin_arg = 28, /* in ns */
+	.convert_src = TRIG_NOW,
+	.convert_arg = 0, /* in ns */
+	.scan_end_src = TRIG_COUNT,
+	.scan_end_arg = 4,
+	.stop_src = TRIG_NONE,
+	.stop_arg = 0,
+	.nb_chan = 4,
+	.chan_descs = chans,
+};
+
+a4l_insn_t insn = {
+	.type = A4L_INSN_INTTRIG,
+	.idx_subd = -1,
+	.data_size = 0,
+};
+
+struct option cmd_bits_opts[] = {
+	{"verbose", no_argument, NULL, 'v'},
+	{"device", required_argument, NULL, 'd'},
+	{"subdevice", required_argument, NULL, 's'},
+	{"help", no_argument, NULL, 'h'},
+	{0},
+};
+
+static void do_print_usage(void)
+{
+	fprintf(stdout, "usage:\tcmd_bits [OPTS] <bits_values> <mask>\n");
+	fprintf(stdout, "\tOPTS:\t -v, --verbose: verbose output\n");
+	fprintf(stdout,
+		"\t\t -d, --device: "
+		"device filename (analogy0, analogy1, ...)\n");
+	fprintf(stdout, "\t\t -s, --subdevice: subdevice index\n");
+	fprintf(stdout, "\t\t -h, --help: print this help\n");
+}
+
+int main(int argc, char *argv[])
+{
+	int i = 0, err = 0;
+	a4l_desc_t dsc = { .sbdata = NULL };
+	a4l_sbinfo_t *sbinfo;
+	int scan_size, idx_subd = -1;
+	int value, mask = 0;
+
+	/* Trigger status, written data..., before triggering */
+	int triggered = 0, total = 0, trigger_threshold = 128;
+
+	/* Compute arguments */
+	while ((err = getopt_long(argc,
+				  argv,
+				  "vd:s:h", cmd_bits_opts, NULL)) >= 0) {
+		switch (err) {
+		case 'v':
+			verbose = 1;
+			break;
+		case 'd':
+			filename = optarg;
+			break;
+		case 's':
+			idx_subd = strtoul(optarg, NULL, 0);
+			break;
+		case 'h':
+		default:
+			do_print_usage();
+			return 0;
+		}
+	}
+
+	value = (argc - optind > 0) ? strtoul(argv[optind], NULL, 0) : 0;
+	mask = (argc - optind > 1) ? strtoul(argv[optind + 1], NULL, 0) : 0;
+
+	/* Open the device */
+	err = a4l_open(&dsc, filename);
+	if (err < 0) {
+		fprintf(stderr,
+			"cmd_bits: a4l_open %s failed (err=%d)\n",
+			FILENAME, err);
+		return err;
+	}
+
+	if (verbose != 0) {
+		printf("cmd_bits: device %s opened (fd=%d)\n",
+		       filename, dsc.fd);
+		printf("cmd_bits: basic descriptor retrieved\n");
+		printf("\t subdevices count = %d\n", dsc.nb_subd);
+		printf("\t read subdevice index = %d\n", dsc.idx_read_subd);
+		printf("\t write subdevice index = %d\n", dsc.idx_write_subd);
+	}
+
+	/* Allocate a buffer so as to get more info (subd, chan, rng) */
+	dsc.sbdata = malloc(dsc.sbsize);
+	if (dsc.sbdata == NULL) {
+		fprintf(stderr, "cmd_bits: malloc failed \n");
+		return -ENOMEM;
+	}
+
+	/* Get this data */
+	err = a4l_fill_desc(&dsc);
+	if (err < 0) {
+		fprintf(stderr,
+			"cmd_bits: a4l_get_desc failed (err=%d)\n", err);
+		goto out_cmd_bits;
+	}
+
+	if (verbose != 0)
+		printf("cmd_bits: complex descriptor retrieved\n");
+
+	/* If no subdevice index was set, choose for the first digital
+	   subdevice found */
+	while (idx_subd == -1 && i < dsc.nb_subd) {
+
+		err = a4l_get_subdinfo(&dsc, i, &sbinfo);
+		if (err < 0) {
+			fprintf(stderr,
+				"cmd_bits: "
+				"a4l_get_subdinfo(%d) failed (err = %d)\n",
+				i, err);
+			goto out_cmd_bits;
+		}
+
+		if ((sbinfo->flags & A4L_SUBD_TYPES) == A4L_SUBD_DIO ||
+		    (sbinfo->flags & A4L_SUBD_TYPES) == A4L_SUBD_DO) {
+			idx_subd = i;
+		}
+
+		i++;
+	}
+
+	if (idx_subd == -1) {
+		fprintf(stderr, "cmd_bits: no digital subdevice available\n");
+		err = -EINVAL;
+		goto  out_cmd_bits;
+	}
+
+	if (verbose != 0)
+		printf("cmd_bits: selected subdevice index = %d\n",
+		       idx_subd);
+
+	/* We must check that the subdevice is really a digital one
+	   (in case, the subdevice index was set with the option -s) */
+	err = a4l_get_subdinfo(&dsc, idx_subd, &sbinfo);
+	if (err < 0) {
+		fprintf(stderr,
+			"cmd_bits: get_sbinfo(%d) failed (err = %d)\n",
+			idx_subd, err);
+		err = -EINVAL;
+		goto out_cmd_bits;
+	}
+
+	cmd.idx_subd = insn.idx_subd = idx_subd;
+
+	if ((sbinfo->flags & A4L_SUBD_TYPES) != A4L_SUBD_DIO &&
+	    (sbinfo->flags & A4L_SUBD_TYPES) != A4L_SUBD_DO) {
+		fprintf(stderr,
+			"cmd_bits: selected subdevice is not digital\n");
+		err = -EINVAL;
+		goto out_cmd_bits;
+	}
+
+	/* Set the data size to read / write */
+	scan_size = a4l_sizeof_subd(sbinfo);
+
+	/* Handle little endian case with scan size < 32 */
+	if (scan_size == sizeof(uint8_t)) {
+		value *= 0x01010101;
+	}
+	else if (scan_size == sizeof(uint16_t)) {
+		value *= 0x00010001;
+	}
+
+	/* Configure the polarities */
+	for (i = 0; i < scan_size; i++) {
+		int mode = (mask & (1 << i)) ?
+			A4L_INSN_CONFIG_DIO_OUTPUT : A4L_INSN_CONFIG_DIO_INPUT;
+
+		err = a4l_config_subd(&dsc, cmd.idx_subd, mode, i);
+		if (err < 0) {
+			fprintf(stderr,
+				"cmd_bits: configuration of "
+				"line %d failed (err=%d)\n",
+				i, err);
+			goto out_cmd_bits;
+		}
+	}
+
+	/* Send the command to the output device */
+	err = a4l_snd_command(&dsc, &cmd);
+	if (err < 0) {
+		fprintf(stderr,
+			"cmd_bits: a4l_snd_command failed (err=%d)\n", err);
+		goto out_cmd_bits;
+	}
+
+	if (verbose != 0)
+		printf("cmd_bits: command successfully sent\n");
+
+	/* Perform the write operations */
+	do {
+		err = a4l_async_write(&dsc, &value, scan_size, A4L_INFINITE);
+		if (err < 0) {
+			fprintf(stderr,
+				"cmd_bits: a4l_write failed (err=%d)\n", err);
+			goto out_cmd_bits;
+		}
+
+		total += err;
+
+		if (!triggered && total > trigger_threshold) {
+			err = a4l_snd_insn(&dsc, &insn);
+			if (err < 0) {
+				fprintf(stderr,
+					"cmd_bits: triggering failed (err=%d)\n",
+					err);
+				goto out_cmd_bits;
+			}
+		}
+
+	} while (err > 0);
+
+out_cmd_bits:
+
+	/* Free the buffer used as device descriptor */
+	if (dsc.sbdata != NULL)
+		free(dsc.sbdata);
+
+	/* The fd closure will automatically trigger a cancel; so,
+	   wait a little bit for the end of the transfer */
+	sleep(1);
+
+	/* Release the file descriptor */
+	a4l_close(&dsc);
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/cmd_read.c b/kernel/xenomai-v3.2.4/utils/analogy/cmd_read.c
new file mode 100644
index 0000000..86522f4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/cmd_read.c
@@ -0,0 +1,435 @@
+/*
+ * Analogy for Linux, input command test program
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <error.h>
+#include <getopt.h>
+#include <string.h>
+#include <signal.h>
+#include <pthread.h>
+#include <rtdm/analogy.h>
+
+typedef int (*dump_function_t) (a4l_desc_t *, a4l_cmd_t*, unsigned char *, int);
+
+struct arguments {
+	int argc;
+	char **argv;
+};
+
+#define MAX_NB_CHAN 32
+#define NB_SCAN 100
+#define ID_SUBD 0
+
+#define FILENAME "analogy0"
+#define BUF_SIZE 10000
+
+static unsigned int chans[MAX_NB_CHAN];
+static unsigned char buf[BUF_SIZE];
+static char *str_chans = "0,1,2,3";
+static char *filename = FILENAME;
+
+static unsigned long wake_count = 0;
+static int real_time = 0;
+static int use_mmap = 0;
+static int verbose = 0;
+
+#define exit_err(fmt, args ...) error(1,0, fmt "\n", ##args)
+#define output(fmt, args ...) fprintf(stdout, fmt "\n", ##args)
+#define debug(fmt, args...)  if (verbose &&  printf(fmt "\n", ##args))
+
+/* The command to send by default */
+a4l_cmd_t cmd = {
+	.idx_subd = ID_SUBD,
+	.flags = 0,
+	.start_src = TRIG_NOW,
+	.start_arg = 0,
+	.scan_begin_src = TRIG_TIMER,
+	.scan_begin_arg = 8000000,	/* in ns */
+	.convert_src = TRIG_TIMER,
+	.convert_arg = 500000,	/* in ns */
+	.scan_end_src = TRIG_COUNT,
+	.scan_end_arg = 0,
+	.stop_src = TRIG_COUNT,
+	.stop_arg = NB_SCAN,
+	.nb_chan = 0,
+	.chan_descs = chans,
+};
+
+struct option cmd_read_opts[] = {
+	{"verbose", no_argument, NULL, 'v'},
+	{"real-time", no_argument, NULL, 'r'},
+	{"device", required_argument, NULL, 'd'},
+	{"subdevice", required_argument, NULL, 's'},
+	{"scan-count", required_argument, NULL, 'S'},
+	{"channels", required_argument, NULL, 'c'},
+	{"mmap", no_argument, NULL, 'm'},
+	{"raw", no_argument, NULL, 'w'},
+	{"wake-count", required_argument, NULL, 'k'},
+	{"help", no_argument, NULL, 'h'},
+	{0},
+};
+
+static void do_print_usage(void)
+{
+	output("usage:\tcmd_read [OPTS]");
+	output("\tOPTS:\t -v, --verbose: verbose output");
+	output("\t\t -r, --real-time: enable real-time acquisition mode");
+	output("\t\t -d, --device: device filename (analogy0, analogy1, ...)");
+	output("\t\t -s, --subdevice: subdevice index");
+	output("\t\t -S, --scan-count: count of scan to perform");
+	output("\t\t -c, --channels: channels to use (ex.: -c 0,1)");
+	output("\t\t -m, --mmap: mmap the buffer");
+	output("\t\t -w, --raw: dump data in raw format");
+	output("\t\t -k, --wake-count: space available before waking up the process");
+	output("\t\t -h, --help: output this help");
+}
+
+static inline int dump_raw(a4l_desc_t *dsc, a4l_cmd_t *cmd, unsigned char *buf, int size)
+{
+	return fwrite(buf, size, 1, stdout);
+}
+
+static int dump_text(a4l_desc_t *dsc, a4l_cmd_t *cmd, unsigned char *buf, int size)
+{
+	a4l_chinfo_t *chans[MAX_NB_CHAN];
+	int i, err = 0, tmp_size = 0;
+	char *fmts[MAX_NB_CHAN];
+	static int cur_chan;
+
+	for (i = 0; i < cmd->nb_chan; i++) {
+		int width;
+
+		err = a4l_get_chinfo(dsc, cmd->idx_subd, cmd->chan_descs[i], &chans[i]);
+		if (err < 0)
+			exit_err("a4l_get_chinfo failed (ret=%d)", err);
+
+		width = a4l_sizeof_chan(chans[i]);
+		if (width < 0)
+			exit_err("incoherent info for channel %d", cmd->chan_descs[i]);
+
+		switch (width) {
+		case 1:
+			fmts[i] = "0x%02x ";
+			break;
+		case 2:
+			fmts[i] = "0x%04x ";
+			break;
+		case 4:
+		default:
+			fmts[i] = "0x%08x ";
+			break;
+		}
+	}
+
+	while (tmp_size < size) {
+		unsigned long value;
+		err = a4l_rawtoul(chans[cur_chan], &value, buf + tmp_size, 1);
+		if (err < 0)
+			goto out;
+
+		fprintf(stdout, fmts[cur_chan], value);
+
+		/* We assume a4l_sizeof_chan() cannot return because we already
+		 * called it on the very same channel descriptor */
+		tmp_size += a4l_sizeof_chan(chans[cur_chan]);
+
+		if (++cur_chan == cmd->nb_chan) {
+			fprintf(stdout, "\n");
+			cur_chan = 0;
+		}
+	}
+
+	fflush(stdout);
+out:
+	return err;
+}
+
+static int fetch_data(a4l_desc_t *dsc, void *buf, unsigned int *cnt, dump_function_t dump)
+{
+	int ret;
+
+	for (;;) {
+		ret = a4l_async_read(dsc, buf, BUF_SIZE, A4L_INFINITE);
+
+		if (ret == 0) {
+			debug("no more data in the buffer ");
+			break;
+		}
+
+		if (ret < 0)
+			exit_err("a4l_read failed (ret=%d)", ret);
+
+		*cnt += ret;
+
+		ret = dump(dsc, &cmd, buf, ret);
+		if (ret < 0)
+			return -EIO;
+	}
+
+	return ret;
+}
+
+static int fetch_data_mmap(a4l_desc_t *dsc, unsigned int *cnt, dump_function_t dump,
+			   void *map, unsigned long buf_size)
+{
+	unsigned long cnt_current = 0, cnt_updated = 0;
+	int ret;
+
+	for (;;) {
+
+		/* Retrieve and update the buffer's state
+		 * In input case, recover how many bytes are available to read
+		 */
+		ret = a4l_mark_bufrw(dsc, cmd.idx_subd, cnt_current, &cnt_updated);
+
+		if (ret == -ENOENT)
+			break;
+
+		if (ret < 0)
+			exit_err("a4l_mark_bufrw() failed (ret=%d)", ret);
+
+		/* If there is nothing to read, wait for an event
+		   (Note that a4l_poll() also retrieves the data amount
+		   to read; in our case it is useless as we have to update
+		   the data read counter) */
+		if (!cnt_updated) {
+			ret = a4l_poll(dsc, cmd.idx_subd, A4L_INFINITE);
+			if (ret < 0)
+				exit_err("a4l_poll() failed (ret=%d)", ret);
+
+			if (ret == 0)
+				break;
+
+			cnt_current = cnt_updated;
+			continue;
+		}
+
+		ret = dump(dsc, &cmd, map + (*cnt % buf_size), cnt_updated);
+		if (ret < 0)
+			return -EIO;
+
+		*cnt += cnt_updated;
+		cnt_current = cnt_updated;
+	}
+
+	return 0;
+}
+
+static int map_subdevice_buffer(a4l_desc_t *dsc, unsigned long *buf_size, void **map)
+{
+	void *buf;
+	int ret;
+
+	/* Get the buffer size to map */
+	ret = a4l_get_bufsize(dsc, cmd.idx_subd, buf_size);
+	if (ret < 0)
+		exit_err("a4l_get_bufsize() failed (ret=%d)", ret);
+	debug("buffer size = %lu bytes", *buf_size);
+
+	/* Map the analog input subdevice buffer */
+	ret = a4l_mmap(dsc, cmd.idx_subd, *buf_size, &buf);
+	if (ret < 0)
+		exit_err("a4l_mmap() failed (ret=%d)", ret);
+	debug("mmap done (map=0x%p)", buf);
+
+	*map = buf;
+
+	return 0;
+}
+
+static int cmd_read(struct arguments *arg)
+{
+	unsigned int i, scan_size = 0, cnt = 0, len, ofs;
+	dump_function_t dump_function = dump_text;
+	a4l_desc_t dsc = { .sbdata = NULL };
+	unsigned long buf_size;
+	char **argv = arg->argv;
+	int ret = 0, argc = arg->argc;
+	void *map = NULL;
+
+	for (;;) {
+		ret = getopt_long(argc, argv, "vrd:s:S:c:mwk:h",
+				  cmd_read_opts, NULL);
+
+		if (ret == -1)
+			break;
+
+		switch (ret) {
+		case 'v':
+			verbose = 1;
+			break;
+		case 'r':
+			real_time = 1;
+			break;
+		case 'd':
+			filename = optarg;
+			break;
+		case 's':
+			cmd.idx_subd = strtoul(optarg, NULL, 0);
+			break;
+		case 'S':
+			cmd.stop_arg = strtoul(optarg, NULL, 0);
+			break;
+		case 'c':
+			str_chans = optarg;
+			break;
+		case 'm':
+			use_mmap = 1;
+			break;
+		case 'w':
+			dump_function = dump_raw;
+			break;
+		case 'k':
+			wake_count = strtoul(optarg, NULL, 0);
+			break;
+		case 'h':
+		default:
+			do_print_usage();
+			return -EINVAL;
+		}
+	}
+
+	if (isatty(STDOUT_FILENO) && dump_function == dump_raw)
+		exit_err("cannot dump raw data on a terminal\n");
+
+	/* Recover the channels to compute */
+	do {
+		cmd.nb_chan++;
+		len = strlen(str_chans);
+		ofs = strcspn(str_chans, ",");
+
+		if (sscanf(str_chans, "%u", &chans[cmd.nb_chan - 1]) == 0)
+			exit_err("bad channel argument");
+
+		str_chans += ofs + 1;
+	} while (len != ofs);
+
+	/* Update the command structure */
+	cmd.scan_end_arg = cmd.nb_chan;
+	cmd.stop_src = cmd.stop_arg != 0 ? TRIG_COUNT : TRIG_NONE;
+
+	ret = a4l_open(&dsc, filename);
+	if (ret < 0)
+		exit_err("a4l_open %s failed (ret=%d)", filename, ret);
+
+	debug("device %s opened (fd=%d)", filename, dsc.fd);
+	debug("basic descriptor retrieved");
+	debug("\t subdevices count = %d", dsc.nb_subd);
+	debug("\t read subdevice index = %d", dsc.idx_read_subd);
+	debug("\t write subdevice index = %d", dsc.idx_write_subd);
+
+	/* Allocate a buffer so as to get more info (subd, chan, rng) */
+	dsc.sbdata = malloc(dsc.sbsize);
+	if (dsc.sbdata == NULL)
+		exit_err("malloc failed ");
+
+	/* Get this data */
+	ret = a4l_fill_desc(&dsc);
+	if (ret < 0)
+		exit_err("a4l_fill_desc failed (ret=%d)", ret);
+	debug("complex descriptor retrieved");
+
+	/* Get the size of a single acquisition */
+	for (i = 0; i < cmd.nb_chan; i++) {
+		a4l_chinfo_t *info;
+
+		ret = a4l_get_chinfo(&dsc,cmd.idx_subd, cmd.chan_descs[i], &info);
+		if (ret < 0)
+			exit_err("a4l_get_chinfo failed (ret=%d)", ret);
+
+		debug("channel %x", cmd.chan_descs[i]);
+		debug(" ranges count = %d", info->nb_rng);
+		debug(" bit width = %d (bits)", info->nb_bits);
+
+		scan_size += a4l_sizeof_chan(info);
+	}
+
+	debug("size to read = %u", scan_size * cmd.stop_arg);
+	debug("scan size = %u", scan_size);
+
+	/* Cancel any former command which might be in progress */
+	a4l_snd_cancel(&dsc, cmd.idx_subd);
+
+	if (use_mmap) {
+		ret = map_subdevice_buffer(&dsc, &buf_size, &map);
+		if (ret)
+			goto out;
+	}
+
+	ret = a4l_set_wakesize(&dsc, wake_count);
+	if (ret < 0)
+		exit_err("a4l_set_wakesize failed (ret=%d)", ret);
+	debug("wake size successfully set (%lu)", wake_count);
+
+	/* Send the command to the input device */
+	ret = a4l_snd_command(&dsc, &cmd);
+	if (ret < 0)
+		exit_err("a4l_snd_command failed (ret=%d)", ret);
+	debug("command sent");
+
+	if (use_mmap) {
+		ret = fetch_data_mmap(&dsc, &cnt, dump_function, map, buf_size);
+		if (ret)
+			exit_err("failed to fetch_data_mmap (ret=%d)", ret);
+	}
+	else {
+		ret = fetch_data(&dsc, buf, &cnt, dump_function);
+		if (ret)
+			exit_err("failed to fetch_data (ret=%d)", ret);
+	}
+	debug("%d bytes successfully received (ret=%d)", cnt, ret);
+
+	return 0;
+
+out:
+	if (use_mmap)
+		munmap(map, buf_size);
+
+	/* Free the buffer used as device descriptor */
+	if (dsc.sbdata != NULL)
+		free(dsc.sbdata);
+
+	/* Release the file descriptor */
+	a4l_close(&dsc);
+
+	return ret;
+}
+
+int main(int argc, char *argv[])
+{
+	struct sched_param param = {.sched_priority = 99};
+	struct arguments args = {.argc = argc, .argv = argv};
+	int ret;
+
+	ret = pthread_setschedparam(pthread_self(), SCHED_FIFO, &param);
+	if (ret)
+		exit_err("pthread_setschedparam failed (ret=0x%x) ", ret);
+
+	ret = cmd_read(&args);
+	if (ret)
+		exit_err("cmd_read error (ret=0x%x) ", ret);
+
+	return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/cmd_write.c b/kernel/xenomai-v3.2.4/utils/analogy/cmd_write.c
new file mode 100644
index 0000000..7b8870f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/cmd_write.c
@@ -0,0 +1,551 @@
+/*
+ * Analogy for Linux, output command test program
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <getopt.h>
+#include <string.h>
+#include <rtdm/analogy.h>
+#include <fcntl.h>
+
+#define BUFFER_DEPTH 1024
+
+struct config {
+
+	/* Configuration parameters
+	   TODO: add real_time and use_mmap*/
+
+	int verbose;
+
+	int subd;
+	char *str_chans;
+	unsigned int *chans;
+	int chans_count;
+	char *str_ranges;
+	unsigned long scans_count;
+	unsigned long wake_count;
+
+	char *filename;
+	FILE *input;
+
+	/* Analogy stuff */
+
+	a4l_desc_t dsc;
+	a4l_chinfo_t *cinfo;
+	a4l_rnginfo_t *rinfo;
+
+	/* Buffer stuff
+	   TODO: add buffer depth / size (useful for mmap) */
+	void *buffer;
+
+};
+
+/* --- Options / arguments part --- */
+
+struct option options[] = {
+	{"verbose", no_argument, NULL, 'v'},
+	{"device", required_argument, NULL, 'd'},
+	{"subdevice", required_argument, NULL, 's'},
+	{"scans-count", required_argument, NULL, 'S'},
+	{"channels", required_argument, NULL, 'c'},
+	{"range", required_argument, NULL, 'c'},
+	{"wake-count", required_argument, NULL, 'k'},
+	{"input", required_argument, NULL, 'i'},
+	{"help", no_argument, NULL, 'h'},
+	{0},
+};
+
+static void print_usage(void)
+{
+	fprintf(stdout, "usage:\tcmd_write [OPTS]\n");
+	fprintf(stdout, "\tOPTS:\t -v, --verbose: verbose output\n");
+	fprintf(stdout,
+		"\t\t -d, --device: "
+		"device filename (analogy0, analogy1, ...)\n");
+	fprintf(stdout, "\t\t -s, --subdevice: subdevice index\n");
+	fprintf(stdout, "\t\t -S, --scans-count: count of scan to perform\n");
+	fprintf(stdout,
+		"\t\t -c, --channels: "
+		"channels to use <i,j,...> (ex.: -c 0,1)\n");
+	fprintf(stdout,
+		"\t\t -R, --range: "
+		"range to use <min,max,unit> (ex.: -R 0,1,V)\n");
+	fprintf(stdout,
+		"\t\t -k, --wake-count: "
+		"space available before waking up the process\n");
+	fprintf(stdout,
+		"\t\t -i, --input: file to use for input  (default stdin) \n"
+		"\t\t\t      use wf_generate to create the file\n");
+
+	fprintf(stdout, "\t\t -h, --help: print this help\n");
+}
+
+/* --- Configuration related stuff --- */
+
+static int init_dsc_config(struct config *cfg)
+{
+	int err = 0;
+
+	/* Here we have to open the Analogy device file */
+	err = a4l_open(&cfg->dsc, cfg->filename);
+	if (err < 0) {
+		fprintf(stderr, "cmd_write: a4l_open %s failed (ret=%d)\n",
+			cfg->filename, err);
+		goto error;
+	}
+
+	/* Allocate a buffer so as to get more info (subd, chan, rng) */
+	cfg->dsc.sbdata = malloc(cfg->dsc.sbsize);
+	if (!cfg->dsc.sbdata) {
+		err = -ENOMEM;
+		fprintf(stderr, "cmd_write: malloc failed\n");
+		goto error;
+	}
+
+	/* Get the device architecture information */
+	err = a4l_fill_desc(&cfg->dsc);
+	if (err < 0) {
+		fprintf(stderr, "cmd_write: a4l_get_desc failed (err=%d)\n", err);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	if  (cfg->buffer)
+		free(cfg->buffer);
+
+	if (cfg->dsc.sbdata)
+		free(cfg->dsc.sbdata);
+
+	cfg->buffer = NULL;
+	cfg->dsc.sbdata = NULL;
+
+	return err;
+}
+
+static int init_chans_config(struct config *cfg)
+{
+	int err = 0;
+	int len, offset;
+	char *str_chans = cfg->str_chans;
+
+	/* Recover the number of arguments */
+	do {
+		cfg->chans_count++;
+		len = strlen(str_chans);
+		offset = strcspn(str_chans, ",");
+		str_chans += offset + 1;
+	} while (len != offset);
+
+	cfg->chans = malloc(cfg->chans_count * sizeof(int));
+	if (!cfg->chans) {
+		fprintf(stderr, "cmd_write: basic allocation failed\n");
+		err = -ENOMEM;
+		goto error;
+	}
+
+	/* rewind and start again. */
+	str_chans = cfg->str_chans;
+	cfg->chans_count = 0;
+
+	/* ...we recover the channels */
+	do {
+		cfg->chans_count++;
+		len = strlen(str_chans);
+		offset = strcspn(str_chans, ",");
+		if (sscanf(str_chans, "%u", &cfg->chans[cfg->chans_count - 1]) == 0) {
+			fprintf(stderr, "cmd_write: bad channels argument\n");
+			err = -EINVAL;
+			goto error;
+		}
+		str_chans += offset + 1;
+	} while (len != offset);
+
+	/* We consider in this program that all the channels are
+	   identical so we took a pointer to a chinfo structure for only
+	   one of them */
+	err = a4l_get_chinfo(&cfg->dsc, cfg->subd, cfg->chans[0], &cfg->cinfo);
+	if (err < 0) {
+		fprintf(stderr, "cmd_write: channel info recovery failed (err=%d)\n",
+			err);
+		goto error;
+	}
+
+	return 0;
+
+error:
+	if (cfg->chans)
+		free(cfg->chans);
+
+	cfg->chans = NULL;
+
+	fprintf(stderr, "cmd_write: leaving %s in error\n", __FUNCTION__);
+
+	return err;
+}
+
+static int init_range_config(struct config *cfg)
+{
+	int index = 0, err = 0;
+	int len, offset;
+	int limits[2];
+	unsigned long unit;
+	char * str_ranges = cfg->str_ranges;
+
+	/* Convert min and max values */
+	do {
+		len = strlen(str_ranges);
+		offset = strcspn(str_ranges, ",");
+		if (sscanf(str_ranges, "%d", &limits[index++]) == 0) {
+			err = -EINVAL;
+			fprintf(stderr, "cmd_write: bad range min/max value\n");
+			goto out;
+		}
+		str_ranges += offset + 1;
+	} while (len != offset && index < 2);
+
+	/* Find the unit among Volt, Ampere, external or no unit */
+	if (!strcmp(str_ranges, "V"))
+		unit = A4L_RNG_VOLT_UNIT;
+	else if (!strcmp(str_ranges, "mA"))
+		unit = A4L_RNG_MAMP_UNIT;
+	else if (!strcmp(str_ranges, "ext"))
+		unit = A4L_RNG_EXT_UNIT;
+	else if (!strlen(str_ranges))
+		unit = A4L_RNG_NO_UNIT;
+	else {
+		err = -EINVAL;
+		fprintf(stderr, "cmd_write: bad range unit value\n");
+		goto out;
+	}
+
+	err = a4l_find_range(&cfg->dsc,
+				    cfg->subd,
+				    cfg->chans[0],
+				    unit, limits[0], limits[1], &cfg->rinfo);
+	if (err < 0) {
+		fprintf(stderr,
+			"cmd_write: no range found for %s\n", cfg->str_ranges);
+	} else
+		err = 0;
+
+out:
+	return err;
+}
+
+static void print_config(struct config *cfg)
+{
+	printf("cmd_write configuration:\n");
+	printf("\tRTDM device name: %s\n", cfg->filename);
+	printf("\tSubdevice index: %d\n", cfg->subd);
+	printf("\tSelected channels: %s\n", cfg->str_chans);
+	printf("\tSelected range: %s\n", cfg->str_ranges);
+	printf("\tScans count: %lu\n", cfg->scans_count);
+	printf("\tWake count: %lu\n", cfg->wake_count);
+}
+
+static void cleanup_config(struct config *cfg)
+{
+	if (cfg->buffer) {
+		free(cfg->buffer);
+		cfg->buffer = NULL;
+	}
+
+	if (cfg->dsc.sbdata) {
+		free(cfg->dsc.sbdata);
+		cfg->dsc.sbdata = NULL;
+	}
+
+	if (cfg->dsc.fd != -1) {
+		a4l_close(&cfg->dsc);
+		cfg->dsc.fd = -1;
+	}
+}
+
+static int init_config(struct config *cfg, int argc, char *argv[])
+{
+	int scan_size, err = 0;
+	FILE *ifd = NULL;
+
+	memset(cfg, 0, sizeof(struct config));
+	cfg->str_chans = "0,1";
+	cfg->str_ranges = "0,5,V";
+	cfg->filename = "analogy0";
+	cfg->dsc.fd = -1;
+
+	while ((err = getopt_long(argc,
+				  argv,
+				  "vd:s:S:c:R:k:i:h", options, NULL)) >= 0) {
+		switch (err) {
+		case 'v':
+			cfg->verbose = 1;
+			break;
+		case 'd':
+			cfg->filename = optarg;
+			break;
+		case 's':
+			cfg->subd = strtoul(optarg, NULL, 0);
+			break;
+		case 'S':
+			cfg->scans_count = strtoul(optarg, NULL, 0);
+			break;
+		case 'c':
+			cfg->str_chans = optarg;
+			break;
+		case 'R':
+			cfg->str_ranges = optarg;
+			break;
+		case 'k':
+			cfg->wake_count = strtoul(optarg, NULL, 0);
+			break;
+		case 'i':
+			ifd = fopen(optarg, "r");
+			if (!ifd)
+				fprintf(stderr, "cmd_write: cant open input file \n");
+			break;
+		case 'h':
+		default:
+			print_usage();
+			return -EINVAL;
+		};
+	}
+
+	cfg->input = ifd ? ifd : stdin;
+
+	/* Open the analogy device and retrieve pointers on the info
+	   structures */
+	err = init_dsc_config(cfg);
+	if (err < 0)
+		goto out;
+
+	/* Parse the channel option so as to know which and how many
+	   channels will be used */
+	err = init_chans_config(cfg);
+	if (err < 0)
+		goto out;
+
+	/* Find out the most suitable range for the acquisition */
+	err = init_range_config(cfg);
+	if (err < 0)
+		goto out;
+
+	/* Compute the width of a scan */
+	scan_size = cfg->chans_count * a4l_sizeof_chan(cfg->cinfo);
+	if (scan_size < 0) {
+		fprintf(stderr, "cmd_write: a4l_sizeof_chan failed (err=%d)\n", err);
+		goto out;
+	}
+
+	/* Allocate a temporary buffer
+	   TODO: implement mmap */
+	cfg->buffer = malloc(BUFFER_DEPTH * scan_size);
+	if (!cfg->buffer) {
+		fprintf(stderr, "cmd_write: malloc failed\n");
+		err = -ENOMEM;
+		goto out;
+	}
+
+	/* If stdin is a terminal, we can't read binary data from it */
+	if (isatty(fileno(cfg->input))) {
+		memset(cfg->buffer, 0, BUFFER_DEPTH * scan_size);
+		cfg->input = NULL;
+	}
+
+out:
+	if (err < 0)
+		cleanup_config(cfg);
+
+	return err;
+}
+
+/* --- Input management part --- */
+
+static int process_input(struct config *cfg, int *elements)
+{
+	int err = 0, filled = 0;
+
+	/* The return value of a4l_sizeof_chan() was already
+	controlled in init_config so no need to do it twice */
+	int chan_size = a4l_sizeof_chan(cfg->cinfo);
+	int scan_size = cfg->chans_count * chan_size;
+
+	while (filled < BUFFER_DEPTH) {
+		int i;
+		double value;
+		char tmp[128];
+
+		/* stdin data are supposed to be double values from wf_generate  */
+		err = fread(&value, sizeof(double), 1, cfg->input);
+		if (err != 1 && !feof(cfg->input)) {
+			err = -errno;
+			fprintf(stderr,
+				"cmd_write: stdin IO error (err=%d)\n", err);
+			goto out;
+		}
+		else if (err == 0 && feof(cfg->input))
+			goto out;
+
+		/* the data is just for one channel */
+		err = a4l_dtoraw(cfg->cinfo, cfg->rinfo, tmp, &value, 1);
+		if (err < 0) {
+			fprintf(stderr,
+				"cmd_write: conversion "
+				"from stdin failed (err=%d)\n", err);
+			goto out;
+		}
+
+		/* so we have to duplicate the conversion if many
+		   channels are selected for the acquisition */
+		for (i = 0; i < cfg->chans_count; i++)
+			memcpy(cfg->buffer + filled * scan_size + i * chan_size,
+			       tmp, chan_size);
+		filled ++;
+	}
+out:
+        if (err < 0)
+		return err;
+
+        *elements = filled;
+
+	fprintf(stderr, "cmd_write: converted %d doubles [each on %d bytes] \n",
+		filled, chan_size);
+
+	return 0;
+}
+
+/* --- Acquisition related stuff --- */
+static int run_acquisition(struct config *cfg)
+{
+	int err = 0, elements = BUFFER_DEPTH;
+
+	/* The return value of a4l_sizeof_chan() was already
+	controlled in init_config so no need to do it twice */
+	int chan_size = a4l_sizeof_chan(cfg->cinfo);
+	int scan_size = cfg->chans_count * chan_size;
+
+	if (cfg->input) {
+		err = process_input(cfg, &elements);
+		if (err < 0)
+			return err;
+		if (elements == 0)
+			return -ENOENT;
+	}
+
+	fprintf(stderr, "cmd_write: write %d elements [%d bytes per element] on "
+			"%d channels \n", elements, chan_size, cfg->chans_count );
+
+	/* write data to the asynchronous buffer */
+	err = a4l_async_write(&cfg->dsc, cfg->buffer, elements * scan_size,
+		              A4L_INFINITE);
+	if (err < 0) {
+		fprintf(stderr, "cmd_write: a4l_async_write failed (%d) \n", err );
+		return err;
+	}
+
+	return 0;
+}
+
+static int init_acquisition(struct config *cfg)
+{
+	int err = 0;
+
+	a4l_cmd_t cmd = {
+		.idx_subd = cfg->subd,
+		.flags = 0,
+		.start_src = TRIG_INT,
+		.start_arg = 0,
+		.scan_begin_src = TRIG_TIMER,
+		.scan_begin_arg = 2000000, /* in ns */
+		.convert_src = TRIG_NOW,
+		.convert_arg = 0,
+		.scan_end_src = TRIG_COUNT,
+		.scan_end_arg = cfg->chans_count,
+		.stop_src = cfg->scans_count ? TRIG_COUNT : TRIG_NONE,
+		.stop_arg = cfg->scans_count,
+		.nb_chan = cfg->chans_count,
+		.chan_descs = cfg->chans
+	};
+
+	a4l_insn_t insn = {
+		.type = A4L_INSN_INTTRIG,
+		.idx_subd = cfg->subd,
+		.data_size = 0,
+	};
+
+	/* Cancel any former command which might be in progress */
+	a4l_snd_cancel(&cfg->dsc, cfg->subd);
+
+	err = a4l_set_wakesize(&cfg->dsc, cfg->wake_count);
+	if (err < 0) {
+		fprintf(stderr,"cmd_read: a4l_set_wakesize failed (ret=%d)\n", err);
+		goto out;
+	}
+
+	/* Send the command so as to initialize the asynchronous acquisition */
+	err = a4l_snd_command(&cfg->dsc, &cmd);
+	if (err < 0) {
+		fprintf(stderr, "cmd_write: a4l_snd_command failed (err=%d)\n", err);
+		goto out;
+	}
+
+	/* Fill the asynchronous buffer with data */
+	err = run_acquisition(cfg);
+	if (err < 0)
+		goto out;
+
+	/* ...before triggering the start of the output device feeding	*/
+	err = a4l_snd_insn(&cfg->dsc, &insn);
+
+out:
+	return err;
+}
+
+int main(int argc, char *argv[])
+{
+	int err = 0;
+	struct config cfg;
+
+	err = init_config(&cfg, argc, argv);
+	if (err < 0)
+		goto out;
+
+	if (cfg.verbose)
+		print_config(&cfg);
+
+	err = init_acquisition(&cfg);
+	if (err < 0)
+		goto out;
+
+	while ((err = run_acquisition(&cfg)) == 0);
+
+	err = (err == -ENOENT) ? 0 : err;
+
+	sleep(1);
+
+out:
+	cleanup_config(&cfg);
+
+	return err < 0 ? 1 : 0;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/insn_bits.c b/kernel/xenomai-v3.2.4/utils/analogy/insn_bits.c
new file mode 100644
index 0000000..f3ff800
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/insn_bits.c
@@ -0,0 +1,227 @@
+/**
+ * Analogy for Linux, instruction bits test program
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdint.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <getopt.h>
+#include <string.h>
+#include <rtdm/analogy.h>
+
+#define FILENAME "analogy0"
+
+static char *filename = FILENAME;
+static int verbose;
+static int idx_subd = -1;
+
+struct option insn_bits_opts[] = {
+	{"verbose", no_argument, NULL, 'v'},
+	{"device", required_argument, NULL, 'd'},
+	{"subdevice", required_argument, NULL, 's'},
+	{"help", no_argument, NULL, 'h'},
+	{0},
+};
+
+static void do_print_usage(void)
+{
+	fprintf(stdout, "usage:\tinsn_bits [OPTS] <bits_values> <mask>\n");
+	fprintf(stdout, "\tOPTS:\t -v, --verbose: verbose output\n");
+	fprintf(stdout,
+		"\t\t -d, --device: device filename (analogy0, analogy1, ...)\n");
+	fprintf(stdout, "\t\t -s, --subdevice: subdevice index\n");
+	fprintf(stdout, "\t\t -h, --help: print this help\n");
+}
+
+int main(int argc, char *argv[])
+{
+	int i = 0, err = 0;
+	a4l_desc_t dsc = { .sbdata = NULL };
+	a4l_sbinfo_t *sbinfo;
+	int scan_size, value, mask;
+
+	/* Compute arguments */
+	while ((err = getopt_long(argc,
+				  argv,
+				  "vd:s:h", insn_bits_opts,
+				  NULL)) >= 0) {
+		switch (err) {
+		case 'v':
+			verbose = 1;
+			break;
+		case 'd':
+			filename = optarg;
+			break;
+		case 's':
+			idx_subd = strtoul(optarg, NULL, 0);
+			break;
+		case 'h':
+		default:
+			do_print_usage();
+			return 0;
+		}
+	}
+
+	value = (argc - optind > 0) ? strtoul(argv[optind], NULL, 0) : 0;
+	mask = (argc - optind > 1) ? strtoul(argv[optind + 1], NULL, 0) : 0;
+
+	/* Open the device */
+	err = a4l_open(&dsc, filename);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_bits: a4l_open %s failed (err=%d)\n",
+			filename, err);
+		return err;
+	}
+
+	if (verbose != 0) {
+		printf("insn_bits: device %s opened (fd=%d)\n", filename,
+		       dsc.fd);
+		printf("insn_bits: basic descriptor retrieved\n");
+		printf("\t subdevices count = %d\n", dsc.nb_subd);
+		printf("\t read subdevice index = %d\n", dsc.idx_read_subd);
+		printf("\t write subdevice index = %d\n", dsc.idx_write_subd);
+	}
+
+	/* Allocate a buffer so as to get more info (subd, chan, rng) */
+	dsc.sbdata = malloc(dsc.sbsize);
+	if (dsc.sbdata == NULL) {
+		err = -ENOMEM;
+		fprintf(stderr, "insn_bits: info buffer allocation failed\n");
+		goto out_insn_bits;
+	}
+
+	/* Get this data */
+	err = a4l_fill_desc(&dsc);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_bits: a4l_fill_desc failed (err=%d)\n", err);
+		goto out_insn_bits;
+	}
+
+	if (verbose != 0)
+		printf("insn_bits: complex descriptor retrieved\n");
+
+	/* If no subdevice index was set, choose for the first digital
+	   subdevice found */
+	while (idx_subd == -1 && i < dsc.nb_subd) {
+
+		err = a4l_get_subdinfo(&dsc, i, &sbinfo);
+		if (err < 0) {
+			fprintf(stderr,
+				"insn_bits: get_sbinfo(%d) failed (err = %d)\n",
+				i, err);
+			goto out_insn_bits;
+		}
+
+		if ((sbinfo->flags & A4L_SUBD_TYPES) == A4L_SUBD_DIO ||
+		    (sbinfo->flags & A4L_SUBD_TYPES) == A4L_SUBD_DI ||
+		    (sbinfo->flags & A4L_SUBD_TYPES) == A4L_SUBD_DO) {
+			idx_subd = i;
+		}
+
+		i++;
+	}
+
+	if (idx_subd == -1) {
+		fprintf(stderr, "insn_bits: no digital subdevice available\n");
+		err = -EINVAL;
+		goto  out_insn_bits;
+	}
+
+	if (verbose != 0)
+		printf("insn_bits: selected subdevice index = %d\n", idx_subd);
+
+	/* We must check that the subdevice is really a digital one
+	   (in case, the subdevice index was set with the option -s) */
+	err = a4l_get_subdinfo(&dsc, idx_subd, &sbinfo);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_bits: get_sbinfo(%d) failed (err = %d)\n",
+			idx_subd, err);
+		err = -EINVAL;
+		goto out_insn_bits;
+	}
+
+	if ((sbinfo->flags & A4L_SUBD_TYPES) != A4L_SUBD_DIO &&
+	    (sbinfo->flags & A4L_SUBD_TYPES) != A4L_SUBD_DI &&
+	    (sbinfo->flags & A4L_SUBD_TYPES) != A4L_SUBD_DO) {
+		fprintf(stderr,
+			"insn_bits: selected subdevice is not digital\n");
+		err = -EINVAL;
+		goto out_insn_bits;
+	}
+
+	/* Set the data size to read / write */
+	scan_size = a4l_sizeof_subd(sbinfo);
+
+	if ((sbinfo->flags & A4L_SUBD_TYPES) != A4L_SUBD_DI) {
+		printf("insn_bits: mask = 0x%x\n", mask);
+		printf("insn_bits: value = 0x%x\n", value);
+	}
+
+	/* Handle little endian case with scan size < 32 */
+	if (scan_size == sizeof(uint8_t)) {
+		mask *= 0x01010101;
+		value *= 0x01010101;
+	}
+	else if (scan_size == sizeof(uint16_t)) {
+		mask *= 0x00010001;
+		value *= 0x00010001;
+	}
+
+	/* Perform the synchronous operation */
+	err = a4l_sync_dio(&dsc, idx_subd, &mask, &value);
+
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_bits: a4l_sync_dio() failed (err=%d)\n", err);
+		goto out_insn_bits;
+	}
+
+	if (scan_size == sizeof(uint8_t)) {
+		uint8_t tmp;
+		memcpy(&tmp, &value, sizeof(uint8_t));
+		value = (int)tmp;
+	}
+	else if (scan_size == sizeof(uint16_t)) {
+		uint16_t tmp;
+		memcpy(&tmp, &value, sizeof(uint16_t));
+		value = (int)tmp;
+	}
+
+	if ((sbinfo->flags & A4L_SUBD_TYPES) != A4L_SUBD_DO)
+		printf("insn_bits: result = 0x%x\n", value);
+	else
+		printf("insn_bits: operation succeeded\n");
+
+out_insn_bits:
+
+	/* Free the information buffer */
+	if (dsc.sbdata != NULL)
+		free(dsc.sbdata);
+
+	/* Release the file descriptor */
+	a4l_close(&dsc);
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/insn_read.c b/kernel/xenomai-v3.2.4/utils/analogy/insn_read.c
new file mode 100644
index 0000000..8aa0740
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/insn_read.c
@@ -0,0 +1,462 @@
+/**
+ * Analogy for Linux, instruction read test program
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <getopt.h>
+#include <rtdm/analogy.h>
+
+#define FILENAME "analogy0"
+#define BUF_SIZE 10000
+#define SCAN_CNT 10
+
+static unsigned char buf[BUF_SIZE];
+static char *filename = FILENAME;
+static int verbose;
+static int idx_subd = -1;
+static int idx_chan;
+static int idx_rng = -1;
+static unsigned int scan_size = SCAN_CNT;
+static char *calibration_file = NULL;
+
+struct option insn_read_opts[] = {
+	{"verbose", no_argument, NULL, 'v'},
+	{"device", required_argument, NULL, 'd'},
+	{"subdevice", required_argument, NULL, 's'},
+	{"scan-count", required_argument, NULL, 'S'},
+	{"channel", required_argument, NULL, 'c'},
+	{"range", required_argument, NULL, 'R'},
+	{"cal", required_argument, NULL, 'y'},
+	{"raw", no_argument, NULL, 'w'},
+	{"help", no_argument, NULL, 'h'},
+	{0},
+};
+
+static void do_print_usage(void)
+{
+	fprintf(stdout, "usage:\tinsn_read [OPTS]\n");
+	fprintf(stdout, "\tOPTS:\t -v, --verbose: verbose output\n");
+	fprintf(stdout,
+		"\t\t -d, --device: device filename (analogy0, analogy1, ...)\n");
+	fprintf(stdout, "\t\t -s, --subdevice: subdevice index\n");
+	fprintf(stdout, "\t\t -S, --scan-count: count of scan to perform\n");
+	fprintf(stdout, "\t\t -c, --channel: channel to use\n");
+	fprintf(stdout, "\t\t -R, --range: range to use\n");
+	fprintf(stdout, "\t\t -w, --raw: dump data in raw format\n");
+	fprintf(stdout, "\t\t -y, --cal: /path/to/calibration.bin \n");
+	fprintf(stdout, "\t\t -h, --help: print this help\n");
+}
+
+static int dump_raw(a4l_desc_t *dsc, unsigned char *buf, int size)
+{
+	return fwrite(buf, size, 1, stdout);
+}
+
+static int dump_text(a4l_desc_t *dsc, unsigned char *buf, int size)
+{
+	int err = 0, width, tmp_size = 0;
+	char *fmt;
+	a4l_chinfo_t *chan;
+
+	/* Retrieve the subdevice data size */
+	err = a4l_get_chinfo(dsc, idx_subd, idx_chan, &chan);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: info for channel %d "
+			"on subdevice %d not available (err=%d)\n",
+			idx_chan, idx_subd, err);
+		goto out;
+	}
+
+	width = a4l_sizeof_chan(chan);
+	if (width < 0) {
+		fprintf(stderr,
+			"insn_read: incoherent info for channel %d\n",
+			idx_chan);
+		err = width;
+		goto out;
+	}
+
+	switch(width) {
+	case 1:
+		fmt = "0x%02x\n";
+		break;
+	case 2:
+		fmt = "0x%04x\n";
+		break;
+	case 4:
+	default:
+		fmt = "0x%08x\n";
+		break;
+	}
+
+	while (size - tmp_size > 0) {
+		unsigned long values[64];
+		int i, tmp_cnt = ((size - tmp_size) / width > 64) ?
+			64 : ((size - tmp_size) / width);
+
+		err = a4l_rawtoul(chan, values, buf + tmp_size, tmp_cnt);
+		if (err < 0)
+			goto out;
+
+		for (i = 0; i < tmp_cnt; i++)
+			fprintf(stdout, fmt, values[i]);
+
+		tmp_size += tmp_cnt * width;
+	}
+
+out:
+	return err;
+}
+
+static int dump_converted(a4l_desc_t *dsc, unsigned char *buf, int size)
+{
+	int err = 0, width, tmp_size = 0;
+	a4l_chinfo_t *chan;
+	a4l_rnginfo_t *rng;
+
+	/* Retrieve the channel info */
+	err = a4l_get_chinfo(dsc, idx_subd, idx_chan, &chan);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: info for channel %d "
+			"on subdevice %d not available (err=%d)\n",
+			idx_chan, idx_subd, err);
+		goto out;
+	}
+
+	/* Retrieve the range info */
+	err = a4l_get_rnginfo(dsc, idx_subd, idx_chan, idx_rng, &rng);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: failed to recover range descriptor\n");
+		goto out;
+	}
+
+	width = a4l_sizeof_chan(chan);
+	if (width < 0) {
+		fprintf(stderr,
+			"insn_read: incoherent info for channel %d\n",
+			idx_chan);
+		err = width;
+		goto out;
+	}
+
+	fprintf(stdout, "Non Calibrated values: \n");
+	while (size - tmp_size > 0) {
+		double values[64];
+		int i, tmp_cnt = ((size - tmp_size) / width > 64) ?
+			64 : ((size - tmp_size) / width);
+
+		err = a4l_rawtod(chan, rng, values, buf + tmp_size, tmp_cnt);
+		if (err < 0)
+			goto out;
+
+		for (i = 0; i < tmp_cnt; i++) {
+			fprintf(stdout, "%F\n", values[i]);
+		}
+
+		tmp_size += tmp_cnt * width;
+	}
+
+out:
+	return err;
+}
+
+static int dump_calibrated(a4l_desc_t *dsc, unsigned char *buf, int size)
+{
+	struct a4l_calibration_data cal_info;
+	struct a4l_polynomial converter;
+	int err = 0, width, tmp_size = 0;
+	a4l_chinfo_t *chan;
+	a4l_rnginfo_t *rng;
+
+
+	/* Retrieve the channel info */
+	err = a4l_get_chinfo(dsc, idx_subd, idx_chan, &chan);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: info for channel %d "
+			"on subdevice %d not available (err=%d)\n",
+			idx_chan, idx_subd, err);
+		goto out;
+	}
+
+	/* Retrieve the range info */
+	err = a4l_get_rnginfo(dsc, idx_subd, idx_chan, idx_rng, &rng);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: failed to recover range descriptor\n");
+		goto out;
+	}
+
+	width = a4l_sizeof_chan(chan);
+	if (width < 0) {
+		fprintf(stderr,
+			"insn_read: incoherent info for channel %d\n",
+			idx_chan);
+		err = width;
+		goto out;
+	}
+
+	err = a4l_read_calibration_file(calibration_file, &cal_info);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: error reading the calibration file \n");
+		goto out;
+	}
+
+	err = a4l_get_softcal_converter(&converter, idx_subd, idx_chan, idx_rng,
+		                        &cal_info);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: failed to get the softcal converter  \n");
+		goto out;
+	}
+
+	fprintf(stdout, "Calibrated values: \n");
+	while (size - tmp_size > 0) {
+		double values[64];
+		int i, tmp_cnt = ((size - tmp_size) / width > 64) ?
+			64 : ((size - tmp_size) / width);
+
+		err = a4l_rawtodcal(chan, values, buf + tmp_size, tmp_cnt,
+			            &converter);
+		if (err < 0)
+			goto out;
+
+		for (i = 0; i < tmp_cnt; i++)
+			fprintf(stdout, "%F\n", values[i]);
+
+		tmp_size += tmp_cnt * width;
+	}
+
+out:
+	return err;
+}
+
+int main(int argc, char *argv[])
+{
+	int err = 0;
+	unsigned int cnt = 0;
+	a4l_desc_t dsc = { .sbdata = NULL };
+	a4l_sbinfo_t *sbinfo;
+	a4l_chinfo_t *chinfo;
+	a4l_rnginfo_t *rnginfo;
+
+	int (*dump_function) (a4l_desc_t *, unsigned char *, int) = dump_text;
+
+	/* Compute arguments */
+	while ((err = getopt_long(argc,
+				  argv,
+				  "vrd:s:S:c:R:y:wh", insn_read_opts,
+				  NULL)) >= 0) {
+		switch (err) {
+		case 'v':
+			verbose = 1;
+			break;
+		case 'd':
+			filename = optarg;
+			break;
+		case 's':
+			idx_subd = strtoul(optarg, NULL, 0);
+			break;
+		case 'S':
+			scan_size = strtoul(optarg, NULL, 0);
+			break;
+		case 'c':
+			idx_chan = strtoul(optarg, NULL, 0);
+			break;
+		case 'R':
+			idx_rng = strtoul(optarg, NULL, 0);
+			dump_function = dump_converted;
+			break;
+		case 'w':
+			dump_function = dump_raw;
+			break;
+		case 'y':
+			dump_function = dump_calibrated;
+			calibration_file = optarg;
+			break;
+		case 'h':
+		default:
+			do_print_usage();
+			return 0;
+		}
+	}
+
+	if (isatty(STDOUT_FILENO) && dump_function == dump_raw) {
+		fprintf(stderr,
+			"insn_read: cannot dump raw data on a terminal\n\n");
+		return -EINVAL;
+	}
+
+	/* Open the device */
+	err = a4l_open(&dsc, filename);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: a4l_open %s failed (err=%d)\n",
+			filename, err);
+		return err;
+	}
+
+	if (verbose != 0) {
+		printf("insn_read: device %s opened (fd=%d)\n", filename,
+		       dsc.fd);
+		printf("insn_read: basic descriptor retrieved\n");
+		printf("\t subdevices count = %d\n", dsc.nb_subd);
+		printf("\t read subdevice index = %d\n", dsc.idx_read_subd);
+		printf("\t write subdevice index = %d\n", dsc.idx_write_subd);
+	}
+
+	/* Allocate a buffer so as to get more info (subd, chan, rng) */
+	dsc.sbdata = malloc(dsc.sbsize);
+	if (dsc.sbdata == NULL) {
+		err = -ENOMEM;
+		fprintf(stderr, "insn_read: info buffer allocation failed\n");
+		goto out_insn_read;
+	}
+
+	/* Get this data */
+	err = a4l_fill_desc(&dsc);
+	if (err < 0) {
+		fprintf(stderr, "insn_read: a4l_fill_desc failed (err=%d)\n",
+			err);
+		goto out_insn_read;
+	}
+
+	if (verbose != 0)
+		printf("insn_read: complex descriptor retrieved\n");
+
+	/* If no subdevice index was set, look for an analog input
+	   subdevice */
+	if (idx_subd == -1)
+		idx_subd = dsc.idx_read_subd;
+
+	if (idx_subd == -1) {
+		fprintf(stderr,
+			"insn_read: no analog input subdevice available\n");
+		err = -EINVAL;
+		goto  out_insn_read;
+	}
+
+	if (verbose != 0)
+		printf("insn_read: selected subdevice index = %d\n", idx_subd);
+
+	/* We must check that the subdevice is really an AI one
+	   (in case, the subdevice index was set with the option -s) */
+	err = a4l_get_subdinfo(&dsc, idx_subd, &sbinfo);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: get_sbinfo(%d) failed (err = %d)\n",
+			idx_subd, err);
+		err = -EINVAL;
+		goto out_insn_read;
+	}
+
+	if ((sbinfo->flags & A4L_SUBD_TYPES) != A4L_SUBD_AI) {
+		fprintf(stderr,
+			"insn_read: wrong subdevice selected "
+			"(not an analog input)\n");
+		err = -EINVAL;
+		goto out_insn_read;
+	}
+
+	if (idx_rng >= 0) {
+
+		err = a4l_get_rnginfo(&dsc,
+				      idx_subd, idx_chan, idx_rng, &rnginfo);
+		if (err < 0) {
+			fprintf(stderr,
+				"insn_read: failed to recover range descriptor\n");
+			goto out_insn_read;
+		}
+
+		if (verbose != 0) {
+			printf("insn_read: range descriptor retrieved\n");
+			printf("\t min = %ld\n", rnginfo->min);
+			printf("\t max = %ld\n", rnginfo->max);
+		}
+	}
+
+	/* Retrieve the subdevice data size */
+	err = a4l_get_chinfo(&dsc, idx_subd, idx_chan, &chinfo);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_read: info for channel %d on subdevice %d not available (err=%d)\n",
+			idx_chan, idx_subd, err);
+		goto out_insn_read;
+	}
+
+	/* Set the data size to read */
+	scan_size *= a4l_sizeof_chan(chinfo);
+
+	if (verbose != 0) {
+		printf("insn_read: channel width is %u bits\n",
+		       chinfo->nb_bits);
+		printf("insn_read: global scan size is %u\n", scan_size);
+	}
+
+	while (cnt < scan_size) {
+		int tmp = (scan_size - cnt) < BUF_SIZE ?
+			(scan_size - cnt) : BUF_SIZE;
+
+		/* Perform the synchronous read */
+		err = a4l_sync_read(&dsc,
+				    idx_subd, CHAN(idx_chan), 0, buf, tmp);
+
+		if (err < 0) {
+			fprintf(stderr,
+				"insn_read: a4l_sync_read failed (err=%d)\n",
+				err);
+			goto out_insn_read;
+		}
+
+		/* Dump the read data */
+		tmp = dump_function(&dsc, buf, err);
+		if (tmp < 0) {
+			err = tmp;
+			goto out_insn_read;
+		}
+
+		/* Update the count */
+		cnt += err;
+	}
+
+	if (verbose != 0)
+		printf("insn_read: %u bytes successfully received\n", cnt);
+
+	err = 0;
+
+out_insn_read:
+
+	/* Free the information buffer */
+	if (dsc.sbdata != NULL)
+		free(dsc.sbdata);
+
+	/* Release the file descriptor */
+	a4l_close(&dsc);
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/insn_write.c b/kernel/xenomai-v3.2.4/utils/analogy/insn_write.c
new file mode 100644
index 0000000..b2bbf66
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/insn_write.c
@@ -0,0 +1,279 @@
+/*
+ * Analogy for Linux, instruction write test program
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <sys/mman.h>
+#include <errno.h>
+#include <getopt.h>
+#include <rtdm/analogy.h>
+
+/* Ten triggered scans by default */
+#define SCAN_CNT 10
+
+#define FILENAME "analogy0"
+
+#define BUF_SIZE 10000
+
+static int value = 0;
+static double dvalue = 0;
+static char *filename = FILENAME;
+static int verbose;
+static int idx_subd = -1;
+static int idx_chan;
+static int idx_rng = -1;
+
+struct option insn_write_opts[] = {
+	{"verbose", no_argument, NULL, 'v'},
+	{"device", required_argument, NULL, 'd'},
+	{"subdevice", required_argument, NULL, 's'},
+	{"scan-count", required_argument, NULL, 'S'},
+	{"channel", required_argument, NULL, 'c'},
+	{"range", required_argument, NULL, 'R'},
+	{"help", no_argument, NULL, 'h'},
+	{0},
+};
+
+static void do_print_usage(void)
+{
+	fprintf(stdout, "usage:\tinsn_write [OPTS]\n");
+	fprintf(stdout, "\tOPTS:\t -v, --verbose: verbose output\n");
+	fprintf(stdout,
+		"\t\t -d, --device: "
+		"device filename (analogy0, analogy1, ...)\n");
+	fprintf(stdout, "\t\t -s, --subdevice: subdevice index\n");
+	fprintf(stdout, "\t\t -c, --channel: channel to use\n");
+	fprintf(stdout, "\t\t -R, --range: range to use\n");
+	fprintf(stdout, "\t\t -V, --value: value to write\n");
+	fprintf(stdout, "\t\t -h, --help: print this help\n");
+}
+
+int main(int argc, char *argv[])
+{
+	int err = 0;
+	a4l_desc_t dsc = { .sbdata = NULL };
+	a4l_sbinfo_t *sbinfo;
+	a4l_chinfo_t *chinfo;
+	a4l_rnginfo_t *rnginfo;
+	unsigned int scan_size;
+
+	/* Compute arguments */
+	while ((err = getopt_long(argc,
+				  argv,
+				  "vd:s:c:R:V:h", insn_write_opts,
+				  NULL)) >= 0) {
+		switch (err) {
+		case 'v':
+			verbose = 1;
+			break;
+		case 'd':
+			filename = optarg;
+			break;
+		case 's':
+			idx_subd = strtoul(optarg, NULL, 0);
+			break;
+		case 'c':
+			idx_chan = strtoul(optarg, NULL, 0);
+			break;
+		case 'R':
+			idx_rng = strtoul(optarg, NULL, 0);
+			break;
+		case 'V':
+			/* Do not perform the conversion until we know
+			   which variable we need */
+			break;
+		case 'h':
+		default:
+			do_print_usage();
+			return 0;
+		}
+	}
+
+	/* Restart the argument scanning */
+	optind = 1;
+
+	while ((err = getopt_long(argc,
+				  argv,
+				  "vrd:s:c:R:V:h", insn_write_opts,
+				  NULL)) >= 0) {
+		switch (err) {
+		case 'V':
+			if (idx_rng < 0)
+				value = (int)strtoul(optarg, NULL, 0);
+			else
+				dvalue = strtod(optarg, NULL);
+		}
+	}
+
+	/* Open the device */
+	err = a4l_open(&dsc, filename);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_write: a4l_open %s failed (err=%d)\n",
+			filename, err);
+		return err;
+	}
+
+	if (verbose != 0) {
+		printf("insn_write: device %s opened (fd=%d)\n", filename,
+		       dsc.fd);
+		printf("insn_write: basic descriptor retrieved\n");
+		printf("\t subdevices count = %d\n", dsc.nb_subd);
+		printf("\t read subdevice index = %d\n", dsc.idx_read_subd);
+		printf("\t write subdevice index = %d\n", dsc.idx_write_subd);
+	}
+
+	/* Allocate a buffer so as to get more info (subd, chan, rng) */
+	dsc.sbdata = malloc(dsc.sbsize);
+	if (dsc.sbdata == NULL) {
+		err = -ENOMEM;
+		fprintf(stderr, "insn_write: info buffer allocation failed\n");
+		goto out_insn_write;
+	}
+
+	/* Get this data */
+	err = a4l_fill_desc(&dsc);
+	if (err < 0) {
+		fprintf(stderr, "insn_write: a4l_fill_desc failed (err=%d)\n",
+			err);
+		goto out_insn_write;
+	}
+
+	if (verbose != 0)
+		printf("insn_write: complex descriptor retrieved\n");
+
+	/* If no subdevice index was set, look for an analog output
+	   subdevice */
+	if (idx_subd == -1)
+		idx_subd = dsc.idx_write_subd;
+
+	if (idx_subd == -1) {
+		fprintf(stderr,
+			"insn_write: no analog output subdevice available\n");
+		err = -EINVAL;
+		goto  out_insn_write;
+	}
+
+	if (verbose != 0)
+		printf("insn_write: selected subdevice index = %d\n", idx_subd);
+
+	/* We must check that the subdevice is really an AO one
+	   (in case, the subdevice index was set with the option -s) */
+	err = a4l_get_subdinfo(&dsc, idx_subd, &sbinfo);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_write: get_sbinfo(%d) failed (err = %d)\n",
+			idx_subd, err);
+		err = -EINVAL;
+		goto out_insn_write;
+	}
+
+	if ((sbinfo->flags & A4L_SUBD_TYPES) != A4L_SUBD_AO) {
+		fprintf(stderr,
+			"insn_write: wrong subdevice selected "
+			"(not an analog output)\n");
+		err = -EINVAL;
+		goto out_insn_write;
+	}
+
+	if (idx_rng >= 0) {
+
+		err = a4l_get_rnginfo(&dsc,
+				      idx_subd, idx_chan, idx_rng, &rnginfo);
+		if (err < 0) {
+			fprintf(stderr,
+				"insn_write: failed to recover range descriptor\n");
+			goto out_insn_write;
+		}
+
+		if (verbose != 0) {
+			printf("insn_write: range descriptor retrieved\n");
+			printf("\t min = %ld\n", rnginfo->min);
+			printf("\t max = %ld\n", rnginfo->max);
+		}
+	}
+
+	/* Retrieve the subdevice data size */
+	err = a4l_get_chinfo(&dsc, idx_subd, idx_chan, &chinfo);
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_write: info for channel %d on subdevice %d "
+			"not available (err=%d)\n",
+			idx_chan, idx_subd, err);
+		goto out_insn_write;
+	}
+
+	/* Set the data size to write */
+	scan_size = (chinfo->nb_bits % 8 == 0) ?
+		chinfo->nb_bits / 8 : (chinfo->nb_bits / 8) + 1;
+
+	if (verbose != 0) {
+		printf("insn_write: channel width is %u bits\n",
+		       chinfo->nb_bits);
+		printf("insn_write: global scan size is %u\n", scan_size);
+	}
+
+	/* If a range was selected, converts the samples */
+	if (idx_rng >= 0) {
+		if (a4l_dtoraw(chinfo, rnginfo, &value, &dvalue, 1) < 0) {
+			fprintf(stderr,
+				"insn_write: data conversion failed (err=%d)\n",
+				err);
+			goto out_insn_write;
+		}
+
+		if (verbose != 0)
+			printf("insn_write: writing value %F (raw=0x%x)\n",
+			       dvalue, value);
+
+	} else if (verbose != 0)
+		printf("insn_write: writing raw value 0x%x\n", value);
+
+	/* Handle little endian case with bit range < 32 */
+	if (scan_size == sizeof(char))
+		value *= 0x01010101;
+	else if (scan_size == sizeof(short))
+		value *= 0x00010001;
+
+	/* Perform the write operation */
+	err = a4l_sync_write(&dsc,
+			     idx_subd, CHAN(idx_chan), 0, &value, scan_size);
+
+	if (err < 0) {
+		fprintf(stderr,
+			"insn_write: a4l_sync_write failed (err=%d)\n", err);
+		goto out_insn_write;
+	}
+
+	if (verbose != 0)
+		printf("insn_write: %u bytes successfully sent\n", scan_size);
+
+out_insn_write:
+
+	/* Free the information buffer */
+	if (dsc.sbdata != NULL)
+		free(dsc.sbdata);
+
+	/* Release the file descriptor */
+	a4l_close(&dsc);
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.c b/kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.c
new file mode 100644
index 0000000..2281bcf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.c
@@ -0,0 +1,177 @@
+/*
+ * Analogy for Linux, test program for waveform generation
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#include <stdio.h>
+#include <stdlib.h>
+#include <assert.h>
+#include <errno.h>
+#include <math.h>
+
+#include "wf_facilities.h"
+
+#ifndef PI
+#define PI 3.14159265358979323846
+#endif
+
+void a4l_wf_init_sine(struct waveform_config *config, double *values)
+{
+	int i;
+
+	double ratio = config->wf_frequency / config->spl_frequency;
+
+	for (i = 0; i < config->spl_count; i++) {
+
+		values[i] = config->wf_offset -
+			config->wf_amplitude / 2 +
+			0.5 * config->wf_amplitude * cos(i * 2 * PI * ratio);
+	}
+}
+
+void a4l_wf_init_sawtooth(struct waveform_config *config, double *values)
+{
+	int i;
+
+	double ratio = config->wf_frequency / config->spl_frequency;
+
+	for (i = 0; i < config->spl_count; i++) {
+
+		int period_idx = (int)floor(i * ratio);
+
+		values[i] = config->wf_offset -
+			config->wf_amplitude / 2 -
+			period_idx * config->wf_amplitude +
+			i * ratio * config->wf_amplitude;
+	}
+}
+
+void a4l_wf_init_triangular(struct waveform_config *config, double *values)
+{
+	int i;
+
+	double ratio = config->wf_frequency / config->spl_frequency;
+
+	for (i = 0; i < config->spl_count; i++) {
+
+		int period_idx = (int)floor(i * ratio);
+		int half_period_idx = (int)floor(i * 2 * ratio);
+		int rise = ((half_period_idx % 2) == 0) ? 1 : 0;
+
+		if (rise) {
+			values[i] = config->wf_offset -
+				config->wf_amplitude / 2 -
+				2 * period_idx * config->wf_amplitude +
+				2 * i * ratio * config->wf_amplitude;
+		} else {
+			values[i] = config->wf_offset -
+				config->wf_amplitude / 2 +
+				2 * (period_idx + 1) * config->wf_amplitude -
+				2 * i * ratio * config->wf_amplitude;
+		}
+	}
+}
+
+void a4l_wf_init_steps(struct waveform_config *config, double *values)
+{
+	int i;
+
+	double ratio = config->wf_frequency / config->spl_frequency;
+
+	for (i = 0; i < config->spl_count; i++) {
+		int half_period_idx = (int)floor(i * 2 * ratio);
+		int even = (half_period_idx % 2 == 0);
+
+		values[i] = config->wf_offset -
+			config->wf_amplitude / 2 + even * config->wf_amplitude;
+	}
+}
+
+void a4l_wf_set_sample_count(struct waveform_config *config)
+{
+	int sample_count = MIN_SAMPLE_COUNT;
+	int best_count = MIN_SAMPLE_COUNT;
+	double lowest_diff = INFINITY;
+
+	while (sample_count < MAX_SAMPLE_COUNT) {
+
+		double ratio = (double)sample_count *
+			(config->wf_frequency / config->spl_frequency);
+		int ceiling = ceil(ratio);
+		double diff = (double)ceiling - ratio;
+
+		assert(diff >= 0);
+
+		if (diff < lowest_diff) {
+			lowest_diff = diff;
+			best_count = sample_count;
+		}
+
+		if (diff == 0)
+			break;
+
+		sample_count++;
+	}
+
+	if (lowest_diff != 0) {
+		fprintf(stderr,
+			"Warning: unable to create a contiguous signal\n");
+		fprintf(stderr, "Warning: an approximation is performed\n");
+	}
+
+	config->spl_count = best_count;
+}
+
+int a4l_wf_check_config(struct waveform_config *config)
+{
+
+	if (config->wf_amplitude == 0)
+		fprintf(stderr, "Warning: the signal will be constant\n");
+
+	if (config->wf_frequency * 2 > config->spl_frequency) {
+		fprintf(stderr,
+			"Error: the sampling frequency is not correct\n");
+		fprintf(stderr,
+			"Error: sampling frequency >= 2 * signal frequency\n");
+		return -EINVAL;
+	}
+
+	/* TODO: check with find_range */
+
+	return 0;
+}
+
+static void (* init_values[])(struct waveform_config *, double *) = {
+	a4l_wf_init_sine,
+	a4l_wf_init_sawtooth,
+	a4l_wf_init_triangular,
+	a4l_wf_init_steps,
+};
+
+void a4l_wf_init_values(struct waveform_config *config, double *values)
+{
+	init_values[config->wf_kind](config, values);
+}
+
+void a4l_wf_dump_values(struct waveform_config *config, double *values)
+{
+	int i;
+
+	for (i = 0; i < config->spl_count; i++)
+		fprintf(stderr, "%f\n", values[i]);
+}
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.h b/kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.h
new file mode 100644
index 0000000..f3df82f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/wf_facilities.h
@@ -0,0 +1,56 @@
+/*
+ * Analogy for Linux, test program for waveform generation
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+#ifndef __SIGNAL_GENERATION_H__
+#define  __SIGNAL_GENERATION_H__
+
+#include <stdio.h>
+
+#define MAX_SAMPLE_COUNT 8096
+#define MIN_SAMPLE_COUNT 2
+
+#define WAVEFORM_SINE 0
+#define WAVEFORM_SAWTOOTH 1
+#define WAVEFORM_TRIANGULAR 2
+#define WAVEFORM_STEPS 3
+
+struct waveform_config {
+
+	/* Waveform stuff */
+	int wf_kind;
+	double wf_frequency;
+	double wf_amplitude;
+	double wf_offset;
+
+	/* Sampling stuff */
+	double spl_frequency;
+	int spl_count;
+};
+
+void a4l_wf_init_sine(struct waveform_config *config, double *values);
+void a4l_wf_init_sawtooth(struct waveform_config *config, double *values);
+void a4l_wf_init_triangular(struct waveform_config *config, double *values);
+void a4l_wf_init_steps(struct waveform_config *config, double *values);
+void a4l_wf_set_sample_count(struct waveform_config *config);
+int a4l_wf_check_config(struct waveform_config *config);
+void a4l_wf_init_values(struct waveform_config *config, double *values);
+void a4l_wf_dump_values(struct waveform_config *config, double *values);
+
+#endif /*  __SIGNAL_GENERATION_H__ */
diff --git a/kernel/xenomai-v3.2.4/utils/analogy/wf_generate.c b/kernel/xenomai-v3.2.4/utils/analogy/wf_generate.c
new file mode 100644
index 0000000..e937309
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/analogy/wf_generate.c
@@ -0,0 +1,251 @@
+/*
+ * Analogy for Linux, test program for waveform generation
+ *
+ * Copyright (C) 1997-2000 David A. Schleef <ds@schleef.org>
+ * Copyright (C) 2008 Alexis Berlemont <alexis.berlemont@free.fr>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software Foundation,
+ * Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <unistd.h>
+#include <errno.h>
+#include <getopt.h>
+#include <string.h>
+
+#include "wf_facilities.h"
+
+static void do_print_usage(void)
+{
+	fprintf(stdout, "usage:\twf_generate [OPTS]\n");
+	fprintf(stdout, "\tOPTS:\t -v, --verbose: verbose output\n");
+	fprintf(stdout,
+		"\t\t -t, --type: waveform type "
+		"(sine, sawtooth, triangular, steps\n");
+	fprintf(stdout, "\t\t -f, --frequency: waveform frequency\n");
+	fprintf(stdout, "\t\t -a, --amplitude: waveform amplitude\n");
+	fprintf(stdout, "\t\t -o, --offset: waveform offet\n");
+	fprintf(stdout, "\t\t -s, --sampling-frequency: sampling frequency\n");
+	fprintf(stdout, "\t\t -O, --outpout: output file (or stdout)\n");
+	fprintf(stdout, "\t\t -h, --help: print this help\n");
+}
+
+static struct option opts[] = {
+	{"verbose", no_argument, NULL, 'v'},
+	{"type", required_argument, NULL, 't'},
+	{"frequency", required_argument, NULL, 'f'},
+	{"amplitude", required_argument, NULL, 'a'},
+	{"offset", required_argument, NULL, 'o'},
+	{"sampling-frequency", required_argument, NULL, 's'},
+	{"output", required_argument, NULL, 'O'},
+	{"help", no_argument, NULL, 'h'},
+	{0},
+};
+
+static int select_type(struct waveform_config *config, char *arg)
+{
+	int err = 0;
+
+	if (!strcmp(arg, "sine"))
+		config->wf_kind = WAVEFORM_SINE;
+	else if (!strcmp(arg, "sawtooth"))
+		config->wf_kind = WAVEFORM_SAWTOOTH;
+	else if (!strcmp(arg, "triangular"))
+		config->wf_kind = WAVEFORM_TRIANGULAR;
+	else if (!strcmp(arg, "steps"))
+		config->wf_kind = WAVEFORM_STEPS;
+	else {
+		fprintf(stderr, "Error: type %s is not recognized\n", arg);
+		err = -EINVAL;
+	}
+
+	return err;
+}
+
+struct config {
+	int verbose;
+	char *filename;
+	FILE *output;
+	struct waveform_config wf;
+};
+
+static void cleanup_config(struct config *cfg)
+{
+	if (cfg->output && strcmp(cfg->filename, "stdout")) {
+		fclose(cfg->output);
+	}
+}
+
+static int init_config(struct config *cfg, int argc, char *argv[])
+{
+	int err = 0;
+
+	memset(cfg, 0, sizeof(struct config));
+
+	cfg->wf.wf_kind = WAVEFORM_SINE;
+	cfg->wf.wf_frequency = 500.0;
+	cfg->wf.wf_amplitude = 1.0;
+	cfg->wf.wf_offset = 0.0;
+	cfg->wf.spl_frequency = 1000.0;
+	cfg->wf.spl_count = 0;
+
+	while ((err = getopt_long(argc,
+				  argv, "vt:f:a:o:s:O:h", opts, NULL)) >= 0) {
+
+		switch (err) {
+
+		case 'v':
+			cfg->verbose = 1;
+			break;
+		case 't':
+			err = select_type(&cfg->wf, optarg);
+			if (err < 0)
+				goto out;
+			break;
+		case 'f':
+			errno = 0;
+			cfg->wf.wf_frequency = strtod(optarg, NULL);
+			if (errno) {
+				err = -errno;
+				goto bad_conversion;
+			}
+			break;
+		case 'a':
+			errno = 0;
+			cfg->wf.wf_amplitude = strtod(optarg, NULL);
+			if (errno) {
+				err = -errno;
+				goto bad_conversion;
+			}
+			break;
+		case 'o':
+			errno = 0;
+			cfg->wf.wf_offset = strtod(optarg, NULL);
+			if (errno) {
+				err = -errno;
+				goto bad_conversion;
+			}
+			break;
+		case 's':
+			errno = 0;
+			cfg->wf.spl_frequency = strtod(optarg, NULL);
+			if (errno) {
+				err = -errno;
+				goto bad_conversion;
+			}
+			break;
+		case 'O':
+			cfg->filename = optarg;
+			break;
+		case 'h':
+		default:
+			err = -EINVAL;
+			do_print_usage();
+			goto out;
+		}
+	}
+
+	err = 0;
+
+	if (cfg->filename != NULL) {
+		cfg->output = fopen(cfg->filename, "w");
+		if (cfg->output == NULL) {
+			err = -errno;
+			fprintf(stderr, "%s: %s\n", cfg->filename, strerror(errno));
+			goto out;
+		}
+	} else {
+		cfg->output = stdout;
+		cfg->filename = "stdout";
+	}
+
+	if (isatty(fileno(cfg->output))) {
+		err = -EINVAL;
+		fprintf(stderr,
+			"Error: output terminals are not allowed (%s)\n",
+			cfg->filename);
+		goto out;
+	}
+
+out:
+	if (err < 0)
+		cleanup_config(cfg);
+
+	return err;
+
+bad_conversion:
+	fprintf(stderr, "Error:  bad option(s) value(s)\n");
+	do_print_usage();
+	return err;
+}
+
+int main(int argc, char *argv[])
+{
+	int err = 0;
+	struct config cfg;
+	double *values = NULL;
+
+	err = init_config(&cfg, argc, argv);
+	if (err < 0)
+		goto out;
+
+	err = a4l_wf_check_config(&cfg.wf);
+	if (err < 0)
+		goto out;
+
+	a4l_wf_set_sample_count(&cfg.wf);
+
+	if (cfg.verbose) {
+		char *types[] = {"sine", "sawtooth", "triangular", "steps"};
+		fprintf(stderr, "Waveform type: %s\n", types[cfg.wf.wf_kind]);
+		fprintf(stderr, "Amplitude: %F\n", cfg.wf.wf_amplitude);
+		fprintf(stderr, "Frequency: %F\n", cfg.wf.wf_frequency);
+		fprintf(stderr, "Offset: %F\n", cfg.wf.wf_offset);
+		fprintf(stderr,
+			"Sampling frequency: %F\n", cfg.wf.spl_frequency);
+		fprintf(stderr, "Samples count: %d\n", cfg.wf.spl_count);
+		fprintf(stderr, "Output file: %s\n", cfg.filename);
+	}
+
+	values = malloc(cfg.wf.spl_count * sizeof(double));
+	if (!values) {
+		err = -ENOMEM;
+		fprintf(stderr, "Error: values allocations failed\n");
+		goto out;
+	}
+
+	a4l_wf_init_values(&cfg.wf, values);
+
+	err = fwrite(values, sizeof(double), cfg.wf.spl_count, cfg.output);
+	if (err != cfg.wf.spl_count) {
+		err = -errno;
+		perror("Error: output file write: )");
+		goto out;
+	}
+
+	if (cfg.verbose) {
+		int i;
+
+		fprintf(stderr, "Dumping values:\n");
+		for (i = 0; i < cfg.wf.spl_count; i++)
+			fprintf(stderr, "[%d]: %F\n", i, values[i]);
+	}
+
+out:
+	cleanup_config(&cfg);
+
+	return err;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/autotune/Makefile.am b/kernel/xenomai-v3.2.4/utils/autotune/Makefile.am
new file mode 100644
index 0000000..7d2143e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/autotune/Makefile.am
@@ -0,0 +1,17 @@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+sbin_PROGRAMS = autotune
+
+autotune_SOURCES = autotune.c
+
+autotune_CPPFLAGS = 		\
+	$(XENO_USER_CFLAGS)	\
+	-I$(top_srcdir)/include
+
+autotune_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+autotune_LDADD =			\
+	 @XENO_CORE_LDADD@		\
+	 @XENO_USER_LDADD@		\
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/utils/autotune/autotune.c b/kernel/xenomai-v3.2.4/utils/autotune/autotune.c
new file mode 100644
index 0000000..f52733b
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/autotune/autotune.c
@@ -0,0 +1,338 @@
+/*
+ * Copyright (C) 2014 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <fcntl.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <pthread.h>
+#include <limits.h>
+#include <time.h>
+#include <signal.h>
+#include <error.h>
+#include <sys/cobalt.h>
+#include <rtdm/autotune.h>
+#include <xenomai/init.h>
+
+static int tune_irqlat, tune_kernlat, tune_userlat;
+
+static int reset, noload, background;
+
+/*
+ * --verbosity_level=0 means fully quiet, =1 means almost quiet.
+ */
+#define verbose (__base_setup_data.verbosity_level)
+
+static const struct option base_options[] = {
+	{
+#define irq_opt		0
+		.name = "irq",
+		.has_arg = no_argument,
+		.flag = &tune_irqlat,
+		.val = 1
+	},
+	{
+#define kernel_opt	1
+		.name = "kernel",
+		.has_arg = no_argument,
+		.flag = &tune_kernlat,
+		.val = 1
+	},
+	{
+#define user_opt	2
+		.name = "user",
+		.has_arg = no_argument,
+		.flag = &tune_userlat,
+		.val = 1
+	},
+	{
+#define reset_opt	3
+		.name = "reset",
+		.has_arg = no_argument,
+		.flag = &reset,
+		.val = 1
+	},
+	{
+#define noload_opt	4
+		.name = "noload",
+		.has_arg = no_argument,
+		.flag = &noload,
+		.val = 1
+	},
+	{
+#define period_opt	5
+		.name = "period",
+		.has_arg = required_argument,
+	},
+	{
+#define background_opt	6
+		.name = "background",
+		.has_arg = no_argument,
+		.flag = &background,
+		.val = 1,
+	},
+	{ /* Sentinel */ }
+};
+
+static void *sampler_thread(void *arg)
+{
+	int fd = (long)arg, ret, n = 0;
+	__u64 timestamp = 0;
+	struct timespec now;
+
+	for (;;) {
+		ret = ioctl(fd, AUTOTUNE_RTIOC_PULSE, &timestamp);
+		if (ret) {
+			if (errno != EPIPE)
+				error(1, errno, "pulse failed");
+			timestamp = 0; /* Next tuning period. */
+			n = 0;
+		} else {
+			n++;
+			clock_gettime(CLOCK_MONOTONIC, &now);
+			timestamp = (__u64)now.tv_sec * 1000000000 + now.tv_nsec;
+		}
+	}
+
+	return NULL;
+}
+
+static void *load_thread(void *arg)
+{
+	int fdi, fdo, count = 0, wakelim;
+	ssize_t nbytes, ret;
+	struct timespec rqt;
+	char buf[512];
+
+	fdi = open("/dev/zero", O_RDONLY);
+	if (fdi < 0)
+		error(1, errno, "/dev/zero");
+
+	fdo = open("/dev/null", O_WRONLY);
+	if (fdi < 0)
+		error(1, errno, "/dev/null");
+
+	rqt.tv_sec = 0;
+	rqt.tv_nsec = CONFIG_XENO_DEFAULT_PERIOD * 2;
+	wakelim = 20000000 / rqt.tv_nsec;
+
+	for (;;) {
+		clock_nanosleep(CLOCK_MONOTONIC, 0, &rqt, NULL);
+
+		if ((++count % wakelim) == 0) {
+			cobalt_thread_relax();
+			continue;
+		}
+
+		nbytes = read(fdi, buf, sizeof(buf));
+		if (nbytes <= 0)
+			error(1, EIO, "load streaming");
+		if (nbytes > 0) {
+			ret = write(fdo, buf, nbytes);
+			(void)ret;
+		}
+	}
+
+	return NULL;
+}
+
+static void create_sampler(pthread_t *tid, int fd)
+{
+	struct sched_param param;
+	pthread_attr_t attr;
+	int ret;
+
+	pthread_attr_init(&attr);
+	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
+	param.sched_priority = 99;
+	pthread_attr_setschedparam(&attr, &param);
+	ret = pthread_create(tid, &attr, sampler_thread, (void *)(long)fd);
+	if (ret)
+		error(1, ret, "sampling thread");
+
+	pthread_attr_destroy(&attr);
+	pthread_setname_np(*tid, "sampler");
+}
+
+static void create_load(pthread_t *tid)
+{
+	struct sched_param param;
+	pthread_attr_t attr;
+	int ret;
+
+	pthread_attr_init(&attr);
+	pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_JOINABLE);
+	pthread_attr_setinheritsched(&attr, PTHREAD_EXPLICIT_SCHED);
+	pthread_attr_setschedpolicy(&attr, SCHED_FIFO);
+	param.sched_priority = 1;
+	pthread_attr_setschedparam(&attr, &param);
+	ret = pthread_create(tid, &attr, load_thread, NULL);
+	if (ret)
+		error(1, ret, "load thread");
+
+	pthread_attr_destroy(&attr);
+	pthread_setname_np(*tid, "loadgen");
+}
+
+void application_usage(void)
+{
+        fprintf(stderr, "usage: %s [options]:\n", get_program_name());
+	fprintf(stderr, "--irq				tune for interrupt latency\n");
+	fprintf(stderr, "--kernel			tune for kernel scheduling latency\n");
+	fprintf(stderr, "--user				tune for user scheduling latency\n");
+	fprintf(stderr, "    [ if none of --irq, --kernel and --user is given,\n"
+  		        "      tune for all contexts ]\n");
+	fprintf(stderr, "--period			set the sampling period\n");
+	fprintf(stderr, "--reset 			reset core timer gravity to factory defaults\n");
+	fprintf(stderr, "--noload			disable load generation\n");
+	fprintf(stderr, "--background 			run in the background\n");
+}
+
+static void run_tuner(int fd, unsigned int op, int period, const char *type)
+{
+	struct autotune_setup setup;
+	pthread_t sampler;
+	__u32 gravity;
+	int ret;
+
+	setup.period = period;
+	setup.quiet = verbose > 2 ? 0 : 2 - verbose;
+	ret = ioctl(fd, op, &setup);
+	if (ret)
+		error(1, errno, "setup failed (%s)", type);
+
+	if (verbose) {
+		printf("%s gravity... ", type);
+		fflush(stdout);
+	}
+
+	if (op == AUTOTUNE_RTIOC_USER)
+		create_sampler(&sampler, fd);
+
+	ret = ioctl(fd, AUTOTUNE_RTIOC_RUN, &gravity);
+	if (ret)
+		error(1, errno, "tuning failed (%s)", type);
+
+	if (op == AUTOTUNE_RTIOC_USER)
+		pthread_cancel(sampler);
+
+	if (verbose)
+		printf("%u ns\n", gravity);
+}
+
+int main(int argc, char *const argv[])
+{
+	int fd, period, ret, c, lindex, tuned = 0;
+	pthread_t load_pth;
+	cpu_set_t cpu_set;
+	time_t start;
+
+	period = CONFIG_XENO_DEFAULT_PERIOD;
+
+	for (;;) {
+		c = getopt_long_only(argc, argv, "", base_options, &lindex);
+		if (c == EOF)
+			break;
+		if (c == '?') {
+			xenomai_usage();
+			return EINVAL;
+		}
+		if (c > 0)
+			continue;
+
+		switch (lindex) {
+		case period_opt:
+			period = atoi(optarg);
+			if (period <= 0)
+				error(1, EINVAL, "invalid sampling period (default %d)",
+				      CONFIG_XENO_DEFAULT_PERIOD);
+			break;
+		case noload_opt:
+		case background_opt:
+			break;
+		case irq_opt:
+		case kernel_opt:
+		case user_opt:
+		case reset_opt:
+			tuned = 1;
+			break;
+		default:
+			return EINVAL;
+		}
+	}
+
+	CPU_ZERO(&cpu_set);
+	CPU_SET(0, &cpu_set);
+	ret = sched_setaffinity(0, sizeof(cpu_set), &cpu_set);
+	if (ret)
+		error(1, errno, "cannot set CPU affinity");
+
+	if (background) {
+		signal(SIGHUP, SIG_IGN);
+		ret = daemon(0, 0);
+		if (ret)
+			error(1, errno, "cannot daemonize");
+	}
+
+	fd = open("/dev/rtdm/autotune", O_RDONLY);
+	if (fd < 0)
+		error(1, errno, "cannot open autotune device");
+
+	if (!tuned)
+		tune_irqlat = tune_kernlat = tune_userlat = 1;
+
+	if (reset) {
+		ret = ioctl(fd, AUTOTUNE_RTIOC_RESET);
+		if (ret)
+			error(1, errno, "reset failed");
+	}
+
+	if (tune_irqlat || tune_kernlat || tune_userlat) {
+		if (!noload)
+			create_load(&load_pth);
+		if (verbose)
+			printf("== auto-tuning started, period=%d ns (may take a while)\n",
+			       period);
+	} else
+		noload = 1;
+
+	time(&start);
+
+	if (tune_irqlat)
+		run_tuner(fd, AUTOTUNE_RTIOC_IRQ, period, "irq");
+
+	if (tune_kernlat)
+		run_tuner(fd, AUTOTUNE_RTIOC_KERN, period, "kernel");
+
+	if (tune_userlat)
+		run_tuner(fd, AUTOTUNE_RTIOC_USER, period, "user");
+
+	if (verbose && (tune_userlat || tune_kernlat || tune_userlat))
+		printf("== auto-tuning completed after %ds\n",
+		       (int)(time(NULL) - start));
+
+	if (!noload)
+		pthread_cancel(load_pth);
+
+	close(fd);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/can/Makefile.am b/kernel/xenomai-v3.2.4/utils/can/Makefile.am
new file mode 100644
index 0000000..e41d266
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/can/Makefile.am
@@ -0,0 +1,36 @@
+sbin_PROGRAMS = rtcanconfig
+
+bin_PROGRAMS = rtcanrecv rtcansend
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+AM_CPPFLAGS = 						\
+	@XENO_USER_CFLAGS@ 				\
+	-I$(top_srcdir)/include
+
+AM_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+rtcanconfig_SOURCES = rtcanconfig.c
+
+rtcanconfig_LDADD = \
+	 @XENO_CORE_LDADD@		\
+	 @XENO_USER_LDADD@		\
+	-lpthread -lrt
+
+rtcanrecv_SOURCES = rtcanrecv.c
+
+rtcanrecv_LDADD = \
+	../../lib/alchemy/libalchemy@CORE@.la \
+	../../lib/copperplate/libcopperplate@CORE@.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lpthread -lrt
+
+rtcansend_SOURCES = rtcansend.c
+
+rtcansend_LDADD = \
+	../../lib/alchemy/libalchemy@CORE@.la \
+	../../lib/copperplate/libcopperplate@CORE@.la \
+	@XENO_CORE_LDADD@		\
+	@XENO_USER_LDADD@		\
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/utils/can/README b/kernel/xenomai-v3.2.4/utils/can/README
new file mode 100644
index 0000000..c1bec54
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/can/README
@@ -0,0 +1,150 @@
+Utilities for RT-Socket-CAN
+===========================
+
+For further information on RT-Socket-CAN, especially the kernel space
+part, please have a look to "kernel/drivers/can/README".
+
+Installation:
+------------
+
+Please install the Xenomai user space part as described in the
+README.INSTALL.
+
+  $ export CROSS_COMPILE=ppc_82xx-
+  $ cd <xenomai-root>
+  $ ./configure --prefix=/usr/xenomai --host=ppc-linux
+  $ make
+  $ export DESTDIR=/opt/eldk/ppc_82xx
+  $ make install
+
+Utilities for RT-Socket-CAN are available in "src/drivers/can".
+With "make install" they get copied to the Xenomai installation
+directory.
+
+
+Running and using RT-Socket-CAN:
+-------------------------------
+
+Now boot the Xenomai enabled kernel on your target system.
+
+In case RT-Socket-CAN is built as kernel modules, you need to load
+them using modprobe or insmod, e.g. for this example build:
+
+  # export MODDIR=/lib/modules/2.4.25/kernel/drivers/xenomai/rtcan
+  # insmod $MODDIR/xeno_can.o
+  # insmod $MODDIR/mscan/xeno_can_mscan.o
+  # insmod $MODDIR/sja1000/xeno_can_sja1000.o
+  # insmod $MODDIR/sja1000/xeno_can_peak_pci.o
+
+Note that various kernel module parameters can be passed with insmod.
+Please use "modinfo" to list them or check the corresponding source
+code files for further information
+
+There are a few RT-Socket-CAN utilities to configure RTCAN and to send
+and receive CAN messages, which have been installed in the Xenomai
+installation directory with "make install":
+
+  # export XENO_ROOT=/usr/xenomai
+  # export PATH=$PATH:$XENO_ROOT/bin
+  # export LD_LIBRARY_PATH=$XENO_ROOT/lib
+
+  # rtcanconfig --help
+  Usage: rtcanconfig <can-interface> [Options] [up|down|start|stop|sleep]
+  Options:
+   -v, --verbose            be verbose
+   -h, --help               this help
+   -c, --ctrlmode=M1:M2:... listenonly or loopback mode
+   -b, --baudrate=BPS       baudrate in bits/sec
+   -B, --bittime=BTR0:BTR1  BTR or standard bit-time
+   -B, --bittime=BRP:PROP_SEG:PHASE_SEG1:PHASE_SEG2:SJW:SAM
+
+  # rtcanrecv --help
+  Usage: rtcanrecv <can-interface> [Options]
+  Options:
+   -f  --filter=id:mask[:id:mask]... apply filter
+   -e  --error=mask      receive error messages
+   -t, --timeout=MS      timeout in ms
+   -v, --verbose         be verbose
+   -p, --print=MODULO    print every MODULO message
+   -n, --name=STRING     name of the RT task
+   -h, --help            this help
+
+  # rtcansend --help
+  Usage: rtcansend <can-interface> [Options] <can-msg>
+  <can-msg> can consist of up to 8 bytes given as a space separated list
+  Options:
+   -i, --identifier=ID   CAN Identifier (default = 1)
+   -r  --rtr             send remote request
+   -e  --extended        send extended frame
+   -l  --loop=COUNT      send message COUNT times
+   -c, --count           message count in data[0-3]
+   -d, --delay=MS        delay in ms (default = 1ms)
+   -t, --timeout=MS      timeout in ms
+   -v, --verbose         be verbose
+   -p, --print=MODULO    print every MODULO message
+   -h, --help            this help
+
+Here are a few self-explanary commands:
+
+  # rtcanconfig rtcan0 --baudrate=125000 start
+
+  # rtcansend rtcan2 --verbose --identifier=0x123 0xde 0xad
+  <0x123> [2] de ad
+
+  # rtcanrecv rtcan0 --verbose
+  #1: <0x123> [2] de ad
+
+  bash-2.05b# rtcanrecv rtcan0 --filter=0x120:0x120
+  Filter #0: id=0x00000120 mask=0x00000120
+  #0: <0x124> [2] de ad
+  #1: <0x123> [3] 12 34 56
+  #2: <0x133> [4] 11 22 33 44
+
+  # rtcanrecv rtcan0 --error=0xffff
+  #1: !0x00000008! [8] 00 00 80 19 00 00 00 00 ERROR
+
+
+PROC filesystem: the followingfiles provide useful information
+on the status of the CAN controller, filter settings, registers,
+etc.
+
+  # cat /proc/rtcan/devices
+  Name___________ _Baudrate State___ TX_Counter RX_Counter ____Errors
+  rtcan0             125000 active            0          8          0
+  rtcan1             125000 active            0          8          0
+  rtcan2             125000 passive           8          0      14714
+
+  # cat /proc/rtcan/sockets
+  fd Name___________ Filter ErrMask RX_Timeout_ns TX_Timeout_ns RX_BufFull
+   0 rtcan0               1 0x0ffff      infinite      infinite          0
+   1 rtcan0               1 0x00000      infinite      infinite          0
+
+  # cat /proc/rtcan/rtcan2/info
+  Device     rtcan2
+  Controller SJA1000
+  Board      PEAK-PCI
+  Clock-Hz   8000000
+  Baudrate   125000
+  Bit-time   brp=4 prop_seg=0 phase_seg1=13 phase_seg2=2 sjw=1 sam=0
+  Ctrl-Mode
+  State      passive
+  TX-Counter 3
+  RX-Counter 0
+  Errors     45424
+  Refcount   0
+
+  # cat /proc/rtcan/rtcan0/filters
+  fd __CAN_ID__ _CAN_Mask_ MatchCount
+   0 0x00000000 0x00000000          0
+   1 0x00000120 0x00000120          3
+
+  # cat /proc/rtcan/rtcan0/registers
+  MSCAN registers at f0000900
+  canctl0  0x90 rxfrm synch
+  canctl1  0xc0 cane clksrc
+  ...
+
+  # cat /proc/rtcan/rtcan2/registers
+  SJA1000 registers
+  00: 00 00 4c 00 ff 00 03 1c 1a 00 00 02 d6 60 14 88
+  10: 02 26 60 de ad 04 04 00 ef c7 ef ef 40 00 00 c7
diff --git a/kernel/xenomai-v3.2.4/utils/can/rtcanconfig.c b/kernel/xenomai-v3.2.4/utils/can/rtcanconfig.c
new file mode 100644
index 0000000..395e036
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/can/rtcanconfig.c
@@ -0,0 +1,258 @@
+/*
+ * Program to configuring the CAN controller
+ *
+ * Copyright (C) 2006 Wolfgang Grandegger <wg@grandegger.com>
+ *
+ * Copyright (C) 2005, 2006 Sebastian Smolorz
+ *                          <Sebastian.Smolorz@stud.uni-hannover.de>
+ *
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <string.h>
+#include <time.h>
+#include <errno.h>
+#include <getopt.h>
+#include <sys/mman.h>
+#include <boilerplate/ancillaries.h>
+
+#include <rtdm/can.h>
+
+static void print_usage(char *prg)
+{
+    fprintf(stderr,
+	    "Usage: %s <can-interface> [Options] [up|down|start|stop|sleep]\n"
+	    "Options:\n"
+	    " -v, --verbose            be verbose\n"
+	    " -h, --help               this help\n"
+	    " -c, --ctrlmode=CTRLMODE  listenonly, loopback or none\n"
+	    " -b, --baudrate=BPS       baudrate in bits/sec\n"
+	    " -B, --bittime=BTR0:BTR1  BTR or standard bit-time\n"
+	    " -B, --bittime=BRP:PROP_SEG:PHASE_SEG1:PHASE_SEG2:SJW:SAM\n",
+	    prg);
+}
+
+static can_baudrate_t string_to_baudrate(char *str)
+{
+    can_baudrate_t baudrate;
+    if (sscanf(str, "%i", &baudrate) != 1)
+	return -1;
+    return baudrate;
+}
+
+static int string_to_mode(char *str)
+{
+    if ( !strcmp(str, "up") || !strcmp(str, "start") )
+	return CAN_MODE_START;
+    else if ( !strcmp(str, "down") || !strcmp(str, "stop") )
+	return CAN_MODE_STOP;
+    else if ( !strcmp(str, "sleep") )
+	return CAN_MODE_SLEEP;
+    return -EINVAL;
+}
+
+static int string_to_ctrlmode(char *str)
+{
+    if ( !strcmp(str, "listenonly") )
+	return CAN_CTRLMODE_LISTENONLY;
+    else if ( !strcmp(str, "loopback") )
+	return CAN_CTRLMODE_LOOPBACK;
+    else if ( !strcmp(str, "none") )
+	return 0;
+
+    return -1;
+}
+
+int main(int argc, char *argv[])
+{
+    char    ifname[IFNAMSIZ];
+    int     can_fd = -1;
+    int     new_baudrate = -1;
+    int     new_mode = -1;
+    int     new_ctrlmode = 0, set_ctrlmode = 0;
+    int     verbose = 0;
+    int     bittime_count = 0, bittime_data[6];
+    struct can_ifreq ifr;
+    struct can_bittime *bittime;
+    int opt, ret;
+    char* ptr;
+
+    struct option long_options[] = {
+	{ "help", no_argument, 0, 'h' },
+	{ "verbose", no_argument, 0, 'v'},
+	{ "baudrate", required_argument, 0, 'b'},
+	{ "bittime", required_argument, 0, 'B'},
+	{ "ctrlmode", required_argument, 0, 'c'},
+	{ 0, 0, 0, 0},
+    };
+
+    while ((opt = getopt_long(argc, argv, "hvb:B:c:",
+			      long_options, NULL)) != -1) {
+	switch (opt) {
+	case 'h':
+	    print_usage(argv[0]);
+	    exit(0);
+
+	case 'v':
+	    verbose = 1;
+	    break;
+
+	case 'b':
+	    new_baudrate = string_to_baudrate(optarg);
+	    if (new_baudrate == -1) {
+		print_usage(argv[0]);
+		exit(0);
+	    }
+	    break;
+
+	case 'B':
+	    ptr = optarg;
+	    while (1) {
+		bittime_data[bittime_count++] = strtoul(ptr, NULL, 0);
+		if (!(ptr = strchr(ptr, ':')))
+		    break;
+		ptr++;
+	    }
+	    if (bittime_count != 2 && bittime_count != 6) {
+		print_usage(argv[0]);
+		exit(0);
+	    }
+	    break;
+
+	case 'c':
+	    ret = string_to_ctrlmode(optarg);
+	    if (ret == -1) {
+		print_usage(argv[0]);
+		exit(0);
+	    }
+	    new_ctrlmode |= ret;
+	    set_ctrlmode = 1;
+	    break;
+
+	    break;
+
+	default:
+	    fprintf(stderr, "Unknown option %c\n", opt);
+	    break;
+	}
+    }
+
+    /* Get CAN interface name */
+    if (optind != argc - 1 && optind != argc - 2) {
+	print_usage(argv[0]);
+	return 0;
+    }
+
+    namecpy(ifname, argv[optind]);
+    namecpy(ifr.ifr_name, ifname);
+
+    if (optind == argc - 2) {   /* Get mode setting */
+	new_mode = string_to_mode(argv[optind + 1]);
+	if (verbose)
+	    printf("mode: %s (%#x)\n", argv[optind + 1], new_mode);
+	if (new_mode < 0) {
+	    print_usage(argv[0]);
+	    return 0;
+	}
+    }
+
+    can_fd = socket(PF_CAN, SOCK_RAW, CAN_RAW);
+    if (can_fd < 0) {
+	fprintf(stderr, "Cannot open RTDM CAN socket. Maybe driver not loaded? \n");
+	return can_fd;
+    }
+
+    ret = ioctl(can_fd, SIOCGIFINDEX, &ifr);
+    if (ret) {
+	fprintf(stderr,"Can't get interface index for %s, errno = %d\n", ifname, errno);
+	return ret;
+    }
+
+
+    if (new_baudrate != -1) {
+	if (verbose)
+	    printf("baudrate: %d\n", new_baudrate);
+	ifr.ifr_ifru.baudrate = new_baudrate;
+	ret = ioctl(can_fd, SIOCSCANBAUDRATE, &ifr);
+	if (ret) {
+	    goto abort;
+	}
+    }
+
+    if (bittime_count) {
+	bittime = &ifr.ifr_ifru.bittime;
+	if (bittime_count == 2) {
+	    bittime->type = CAN_BITTIME_BTR;
+	    bittime->btr.btr0 = bittime_data[0];
+	    bittime->btr.btr1 = bittime_data[1];
+	    if (verbose)
+		printf("bit-time: btr0=0x%02x btr1=0x%02x\n",
+		       bittime->btr.btr0, bittime->btr.btr1);
+	} else {
+	    bittime->type = CAN_BITTIME_STD;
+	    bittime->std.brp = bittime_data[0];
+	    bittime->std.prop_seg = bittime_data[1];
+	    bittime->std.phase_seg1 = bittime_data[2];
+	    bittime->std.phase_seg2 = bittime_data[3];
+	    bittime->std.sjw = bittime_data[4];
+	    bittime->std.sam = bittime_data[5];
+	    if (verbose)
+		printf("bit-time: brp=%d prop_seg=%d phase_seg1=%d "
+		       "phase_seg2=%d sjw=%d sam=%d\n",
+		       bittime->std.brp,
+		       bittime->std.prop_seg,
+		       bittime->std.phase_seg1,
+		       bittime->std.phase_seg2,
+		       bittime->std.sjw,
+		       bittime->std.sam);
+	}
+
+	ret = ioctl(can_fd, SIOCSCANCUSTOMBITTIME, &ifr);
+	if (ret) {
+	    goto abort;
+	}
+
+    }
+
+    if (set_ctrlmode != 0) {
+	ifr.ifr_ifru.ctrlmode = new_ctrlmode;
+	if (verbose)
+	    printf("ctrlmode: %#x\n", new_ctrlmode);
+	ret = ioctl(can_fd, SIOCSCANCTRLMODE, &ifr);
+	if (ret) {
+	    goto abort;
+	}
+    }
+
+    if (new_mode != -1) {
+	ifr.ifr_ifru.mode = new_mode;
+	ret = ioctl(can_fd, SIOCSCANMODE, &ifr);
+	if (ret) {
+	    goto abort;
+	}
+    }
+
+    close(can_fd);
+    return 0;
+
+ abort:
+    close(can_fd);
+    return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/can/rtcanrecv.c b/kernel/xenomai-v3.2.4/utils/can/rtcanrecv.c
new file mode 100644
index 0000000..1b7101d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/can/rtcanrecv.c
@@ -0,0 +1,324 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <time.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <alchemy/task.h>
+#include <boilerplate/ancillaries.h>
+
+#include <rtdm/can.h>
+
+static void print_usage(char *prg)
+{
+    fprintf(stderr,
+	    "Usage: %s [<can-interface>] [Options]\n"
+	    "Options:\n"
+	    " -f  --filter=id:mask[:id:mask]... apply filter\n"
+	    " -e  --error=mask      receive error messages\n"
+	    " -t, --timeout=MS      timeout in ms\n"
+	    " -T, --timestamp       with absolute timestamp\n"
+	    " -R, --timestamp-rel   with relative timestamp\n"
+	    " -v, --verbose         be verbose\n"
+	    " -p, --print=MODULO    print every MODULO message\n"
+	    " -h, --help            this help\n",
+	    prg);
+}
+
+
+extern int optind, opterr, optopt;
+
+static int s = -1, verbose = 0, print = 1;
+static nanosecs_rel_t timeout = 0, with_timestamp = 0, timestamp_rel = 0;
+
+RT_TASK rt_task_desc;
+
+#define BUF_SIZ	255
+#define MAX_FILTER 16
+
+struct sockaddr_can recv_addr;
+struct can_filter recv_filter[MAX_FILTER];
+static int filter_count = 0;
+
+static int add_filter(u_int32_t id, u_int32_t mask)
+{
+    if (filter_count >= MAX_FILTER)
+	return -1;
+    recv_filter[filter_count].can_id = id;
+    recv_filter[filter_count].can_mask = mask;
+    printf("Filter #%d: id=0x%08x mask=0x%08x\n", filter_count, id, mask);
+    filter_count++;
+    return 0;
+}
+
+static void cleanup(void)
+{
+    int ret;
+
+    if (verbose)
+	printf("Cleaning up...\n");
+
+    if (s >= 0) {
+	ret = close(s);
+	s = -1;
+	if (ret) {
+	    fprintf(stderr, "close: %s\n", strerror(errno));
+	}
+	exit(EXIT_SUCCESS);
+    }
+}
+
+static void cleanup_and_exit(int sig)
+{
+    if (verbose)
+	printf("Signal %d received\n", sig);
+    cleanup();
+    exit(0);
+}
+
+static void rt_task(void)
+{
+    int i, ret, count = 0;
+    struct can_frame frame;
+    struct sockaddr_can addr;
+    socklen_t addrlen = sizeof(addr);
+    struct msghdr msg;
+    struct iovec iov;
+    nanosecs_abs_t timestamp, timestamp_prev = 0;
+
+    if (with_timestamp) {
+	msg.msg_iov = &iov;
+	msg.msg_iovlen = 1;
+	msg.msg_name = (void *)&addr;
+	msg.msg_namelen = sizeof(struct sockaddr_can);
+	msg.msg_control = (void *)&timestamp;
+	msg.msg_controllen = sizeof(nanosecs_abs_t);
+    }
+
+    while (1) {
+	if (with_timestamp) {
+	    iov.iov_base = (void *)&frame;
+	    iov.iov_len = sizeof(can_frame_t);
+	    ret = recvmsg(s, &msg, 0);
+	} else
+	    ret = recvfrom(s, (void *)&frame, sizeof(can_frame_t), 0,
+				  (struct sockaddr *)&addr, &addrlen);
+	if (ret < 0) {
+	    switch (errno) {
+	    case ETIMEDOUT:
+		if (verbose)
+		    printf("recv: timed out\n");
+		continue;
+	    case EBADF:
+		if (verbose)
+		    printf("recv: aborted because socket was closed\n");
+		break;
+	    default:
+		fprintf(stderr, "recv: %s\n", strerror(errno));
+	    }
+	    break;
+	}
+
+	if (print && (count % print) == 0) {
+	    printf("#%d: (%d) ", count, addr.can_ifindex);
+	    if (with_timestamp && msg.msg_controllen) {
+		if (timestamp_rel) {
+		printf("%lldns ", (long long)(timestamp - timestamp_prev));
+		    timestamp_prev = timestamp;
+		} else
+		    printf("%lldns ", (long long)timestamp);
+	    }
+	    if (frame.can_id & CAN_ERR_FLAG)
+		printf("!0x%08x!", frame.can_id & CAN_ERR_MASK);
+	    else if (frame.can_id & CAN_EFF_FLAG)
+		printf("<0x%08x>", frame.can_id & CAN_EFF_MASK);
+	    else
+		printf("<0x%03x>", frame.can_id & CAN_SFF_MASK);
+
+	    printf(" [%d]", frame.can_dlc);
+	    if (!(frame.can_id & CAN_RTR_FLAG))
+		for (i = 0; i < frame.can_dlc; i++) {
+		    printf(" %02x", frame.data[i]);
+		}
+	    if (frame.can_id & CAN_ERR_FLAG) {
+		printf(" ERROR ");
+		if (frame.can_id & CAN_ERR_BUSOFF)
+		    printf("bus-off");
+		if (frame.can_id & CAN_ERR_CRTL)
+		    printf("controller problem");
+	    } else if (frame.can_id & CAN_RTR_FLAG)
+		printf(" remote request");
+	    printf("\n");
+	}
+	count++;
+    }
+}
+
+int main(int argc, char **argv)
+{
+    int opt, ret;
+    u_int32_t id, mask;
+    u_int32_t err_mask = 0;
+    struct can_ifreq ifr;
+    char *ptr;
+    char name[32];
+
+    struct option long_options[] = {
+	{ "help", no_argument, 0, 'h' },
+	{ "verbose", no_argument, 0, 'v'},
+	{ "filter", required_argument, 0, 'f'},
+	{ "error", required_argument, 0, 'e'},
+	{ "timeout", required_argument, 0, 't'},
+	{ "timestamp", no_argument, 0, 'T'},
+	{ "timestamp-rel", no_argument, 0, 'R'},
+	{ 0, 0, 0, 0},
+    };
+
+    signal(SIGTERM, cleanup_and_exit);
+    signal(SIGINT, cleanup_and_exit);
+
+    while ((opt = getopt_long(argc, argv, "hve:f:t:p:RT",
+			      long_options, NULL)) != -1) {
+	switch (opt) {
+	case 'h':
+	    print_usage(argv[0]);
+	    exit(0);
+
+	case 'p':
+	    print = strtoul(optarg, NULL, 0);
+	    break;
+
+	case 'v':
+	    verbose = 1;
+	    break;
+
+	case 'e':
+	    err_mask = strtoul(optarg, NULL, 0);
+	    break;
+
+	case 'f':
+	    ptr = optarg;
+	    while (1) {
+		id = strtoul(ptr, NULL, 0);
+		ptr = strchr(ptr, ':');
+		if (!ptr) {
+		    fprintf(stderr, "filter must be applied in the form id:mask[:id:mask]...\n");
+		    exit(1);
+		}
+		ptr++;
+		mask = strtoul(ptr, NULL, 0);
+		ptr = strchr(ptr, ':');
+		add_filter(id, mask);
+		if (!ptr)
+		    break;
+		ptr++;
+	    }
+	    break;
+
+	case 't':
+	    timeout = (nanosecs_rel_t)strtoul(optarg, NULL, 0) * 1000000;
+	    break;
+
+	case 'R':
+	    timestamp_rel = 1;
+	case 'T':
+	    with_timestamp = 1;
+	    break;
+
+	default:
+	    fprintf(stderr, "Unknown option %c\n", opt);
+	    break;
+	}
+    }
+
+    ret = socket(PF_CAN, SOCK_RAW, CAN_RAW);
+    if (ret < 0) {
+	fprintf(stderr, "socket: %s\n", strerror(errno));
+	return -1;
+    }
+    s = ret;
+
+    if (argv[optind] == NULL) {
+	if (verbose)
+	    printf("interface all\n");
+
+	ifr.ifr_ifindex = 0;
+    } else {
+	if (verbose)
+	    printf("interface %s\n", argv[optind]);
+
+	namecpy(ifr.ifr_name, argv[optind]);
+	if (verbose)
+	    printf("s=%d, ifr_name=%s\n", s, ifr.ifr_name);
+
+	ret = ioctl(s, SIOCGIFINDEX, &ifr);
+	if (ret < 0) {
+	    fprintf(stderr, "ioctl GET_IFINDEX: %s\n", strerror(errno));
+	    goto failure;
+	}
+    }
+
+    if (err_mask) {
+	ret = setsockopt(s, SOL_CAN_RAW, CAN_RAW_ERR_FILTER,
+				&err_mask, sizeof(err_mask));
+	if (ret < 0) {
+	    fprintf(stderr, "setsockopt: %s\n", strerror(errno));
+	    goto failure;
+	}
+	if (verbose)
+	    printf("Using err_mask=%#x\n", err_mask);
+    }
+
+    if (filter_count) {
+	ret = setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER,
+				&recv_filter, filter_count *
+				sizeof(struct can_filter));
+	if (ret < 0) {
+	    fprintf(stderr, "setsockopt: %s\n", strerror(errno));
+	    goto failure;
+	}
+    }
+
+    recv_addr.can_family = AF_CAN;
+    recv_addr.can_ifindex = ifr.ifr_ifindex;
+    ret = bind(s, (struct sockaddr *)&recv_addr,
+		      sizeof(struct sockaddr_can));
+    if (ret < 0) {
+	fprintf(stderr, "bind: %s\n", strerror(errno));
+	goto failure;
+    }
+
+    if (timeout) {
+	if (verbose)
+	    printf("Timeout: %lld ns\n", (long long)timeout);
+	ret = ioctl(s, RTCAN_RTIOC_RCV_TIMEOUT, &timeout);
+	if (ret) {
+	    fprintf(stderr, "ioctl RCV_TIMEOUT: %s\n", strerror(errno));
+	    goto failure;
+	}
+    }
+
+    if (with_timestamp) {
+	ret = ioctl(s, RTCAN_RTIOC_TAKE_TIMESTAMP, RTCAN_TAKE_TIMESTAMPS);
+	if (ret) {
+	    fprintf(stderr, "ioctl TAKE_TIMESTAMP: %s\n", strerror(errno));
+	    goto failure;
+	}
+    }
+
+    snprintf(name, sizeof(name), "rtcanrecv-%d", getpid());
+    ret = rt_task_shadow(&rt_task_desc, name, 0, 0);
+    if (ret) {
+	fprintf(stderr, "rt_task_shadow: %s\n", strerror(-ret));
+	goto failure;
+    }
+
+    rt_task();
+    /* never returns */
+
+ failure:
+    cleanup();
+    return -1;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/can/rtcansend.c b/kernel/xenomai-v3.2.4/utils/can/rtcansend.c
new file mode 100644
index 0000000..ac8f8c1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/can/rtcansend.c
@@ -0,0 +1,306 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <signal.h>
+#include <unistd.h>
+#include <time.h>
+#include <errno.h>
+#include <getopt.h>
+
+#include <boilerplate/ancillaries.h>
+#include <alchemy/task.h>
+#include <alchemy/timer.h>
+
+#include <rtdm/can.h>
+
+extern int optind, opterr, optopt;
+
+static void print_usage(char *prg)
+{
+    fprintf(stderr,
+	    "Usage: %s <can-interface> [Options] <can-msg>\n"
+	    "<can-msg> can consist of up to 8 bytes given as a space separated list\n"
+	    "Options:\n"
+	    " -i, --identifier=ID   CAN Identifier (default = 1)\n"
+	    " -r  --rtr             send remote request\n"
+	    " -e  --extended        send extended frame\n"
+	    " -l  --loop=COUNT      send message COUNT times\n"
+	    " -c, --count           message count in data[0-3]\n"
+	    " -d, --delay=MS        delay in ms (default = 1ms)\n"
+	    " -s, --send            use send instead of sendto\n"
+	    " -t, --timeout=MS      timeout in ms\n"
+	    " -L, --loopback=0|1    switch local loopback off or on\n"
+	    " -v, --verbose         be verbose\n"
+	    " -p, --print=MODULO    print every MODULO message\n"
+	    " -h, --help            this help\n",
+	    prg);
+}
+
+
+RT_TASK rt_task_desc;
+
+static int s=-1, dlc=0, rtr=0, extended=0, verbose=0, loops=1;
+static SRTIME delay=1000000;
+static int count=0, print=1, use_send=0, loopback=-1;
+static nanosecs_rel_t timeout = 0;
+static struct can_frame frame;
+static struct sockaddr_can to_addr;
+
+
+static void cleanup(void)
+{
+    int ret;
+
+    if (verbose)
+	printf("Cleaning up...\n");
+
+    usleep(100000);
+
+    if (s >= 0) {
+	ret = close(s);
+	s = -1;
+	if (ret) {
+	    fprintf(stderr, "close: %s\n", strerror(errno));
+	}
+	exit(EXIT_SUCCESS);
+    }
+}
+
+static void cleanup_and_exit(int sig)
+{
+    if (verbose)
+	printf("Signal %d received\n", sig);
+    cleanup();
+    exit(0);
+}
+
+static void rt_task(void)
+{
+    int i, j, ret;
+
+    for (i = 0; i < loops; i++) {
+	rt_task_sleep(rt_timer_ns2ticks(delay));
+	if (count)
+	    memcpy(&frame.data[0], &i, sizeof(i));
+	/* Note: sendto avoids the definiton of a receive filter list */
+	if (use_send)
+	    ret = send(s, (void *)&frame, sizeof(can_frame_t), 0);
+	else
+	    ret = sendto(s, (void *)&frame, sizeof(can_frame_t), 0,
+				(struct sockaddr *)&to_addr, sizeof(to_addr));
+	if (ret < 0) {
+	    switch (errno) {
+	    case ETIMEDOUT:
+		if (verbose)
+		    printf("send(to): timed out\n");
+		break;
+	    case EBADF:
+		if (verbose)
+		    printf("send(to): aborted because socket was closed\n");
+		break;
+	    default:
+		fprintf(stderr, "send: %s\n", strerror(errno));
+		break;
+	    }
+	    i = loops;		/* abort */
+	    break;
+	}
+	if (verbose && (i % print) == 0) {
+	    if (frame.can_id & CAN_EFF_FLAG)
+		printf("<0x%08x>", frame.can_id & CAN_EFF_MASK);
+	    else
+		printf("<0x%03x>", frame.can_id & CAN_SFF_MASK);
+	    printf(" [%d]", frame.can_dlc);
+	    for (j = 0; j < frame.can_dlc; j++) {
+		printf(" %02x", frame.data[j]);
+	    }
+	    printf("\n");
+	}
+    }
+}
+
+int main(int argc, char **argv)
+{
+    int i, opt, ret;
+    struct can_ifreq ifr;
+    char name[32];
+
+    struct option long_options[] = {
+	{ "help", no_argument, 0, 'h' },
+	{ "identifier", required_argument, 0, 'i'},
+	{ "rtr", no_argument, 0, 'r'},
+	{ "extended", no_argument, 0, 'e'},
+	{ "verbose", no_argument, 0, 'v'},
+	{ "count", no_argument, 0, 'c'},
+	{ "print", required_argument, 0, 'p'},
+	{ "loop", required_argument, 0, 'l'},
+	{ "delay", required_argument, 0, 'd'},
+	{ "send", no_argument, 0, 's'},
+	{ "timeout", required_argument, 0, 't'},
+	{ "loopback", required_argument, 0, 'L'},
+	{ 0, 0, 0, 0},
+    };
+
+    signal(SIGTERM, cleanup_and_exit);
+    signal(SIGINT, cleanup_and_exit);
+
+    frame.can_id = 1;
+
+    while ((opt = getopt_long(argc, argv, "hvi:l:red:t:cp:sL:",
+			      long_options, NULL)) != -1) {
+	switch (opt) {
+	case 'h':
+	    print_usage(argv[0]);
+	    exit(0);
+
+	case 'p':
+	    print = strtoul(optarg, NULL, 0);
+
+	case 'v':
+	    verbose = 1;
+	    break;
+
+	case 'c':
+	    count = 1;
+	    break;
+
+	case 'l':
+	    loops = strtoul(optarg, NULL, 0);
+	    break;
+
+	case 'i':
+	    frame.can_id = strtoul(optarg, NULL, 0);
+	    break;
+
+	case 'r':
+	    rtr = 1;
+	    break;
+
+	case 'e':
+	    extended = 1;
+	    break;
+
+	case 'd':
+	    delay = strtoul(optarg, NULL, 0) * 1000000LL;
+	    break;
+
+	case 's':
+	    use_send = 1;
+	    break;
+
+	case 't':
+	    timeout = strtoul(optarg, NULL, 0) * 1000000LL;
+	    break;
+
+	case 'L':
+	    loopback = strtoul(optarg, NULL, 0);
+	    break;
+
+	default:
+	    fprintf(stderr, "Unknown option %c\n", opt);
+	    break;
+	}
+    }
+
+    if (optind == argc) {
+	print_usage(argv[0]);
+	exit(0);
+    }
+
+    if (argv[optind] == NULL) {
+	fprintf(stderr, "No Interface supplied\n");
+	exit(-1);
+    }
+
+    if (verbose)
+	printf("interface %s\n", argv[optind]);
+
+    ret = socket(PF_CAN, SOCK_RAW, CAN_RAW);
+    if (ret < 0) {
+	fprintf(stderr, "socket: %s\n", strerror(errno));
+	return -1;
+    }
+    s = ret;
+
+    if (loopback >= 0) {
+	ret = setsockopt(s, SOL_CAN_RAW, CAN_RAW_LOOPBACK,
+				&loopback, sizeof(loopback));
+	if (ret < 0) {
+	    fprintf(stderr, "setsockopt: %s\n", strerror(errno));
+	    goto failure;
+	}
+	if (verbose)
+	    printf("Using loopback=%d\n", loopback);
+    }
+
+    namecpy(ifr.ifr_name, argv[optind]);
+    if (verbose)
+	printf("s=%d, ifr_name=%s\n", s, ifr.ifr_name);
+
+    ret = ioctl(s, SIOCGIFINDEX, &ifr);
+    if (ret < 0) {
+	fprintf(stderr, "ioctl: %s\n", strerror(errno));
+	goto failure;
+    }
+
+    memset(&to_addr, 0, sizeof(to_addr));
+    to_addr.can_ifindex = ifr.ifr_ifindex;
+    to_addr.can_family = AF_CAN;
+    if (use_send) {
+	/* Suppress definiton of a default receive filter list */
+	ret = setsockopt(s, SOL_CAN_RAW, CAN_RAW_FILTER, NULL, 0);
+	if (ret < 0) {
+	    fprintf(stderr, "setsockopt: %s\n", strerror(errno));
+	    goto failure;
+	}
+
+	ret = bind(s, (struct sockaddr *)&to_addr, sizeof(to_addr));
+	if (ret < 0) {
+	    fprintf(stderr, "bind: %s\n", strerror(errno));
+	    goto failure;
+	}
+    }
+
+    if (count)
+	frame.can_dlc = sizeof(int);
+    else {
+	for (i = optind + 1; i < argc; i++) {
+	    frame.data[dlc] = strtoul(argv[i], NULL, 0);
+	    dlc++;
+	    if( dlc == 8 )
+		break;
+	}
+	frame.can_dlc = dlc;
+    }
+
+    if (rtr)
+	frame.can_id |= CAN_RTR_FLAG;
+
+    if (extended)
+	frame.can_id |= CAN_EFF_FLAG;
+
+    if (timeout) {
+	if (verbose)
+	    printf("Timeout: %lld ns\n", (long long)timeout);
+	ret = ioctl(s, RTCAN_RTIOC_SND_TIMEOUT, &timeout);
+	if (ret) {
+	    fprintf(stderr, "ioctl SND_TIMEOUT: %s\n", strerror(errno));
+	    goto failure;
+	}
+    }
+
+    snprintf(name, sizeof(name), "rtcansend-%d", getpid());
+    ret = rt_task_shadow(&rt_task_desc, name, 1, 0);
+    if (ret) {
+	fprintf(stderr, "rt_task_shadow: %s\n", strerror(-ret));
+	goto failure;
+    }
+
+    rt_task();
+
+    cleanup();
+    return 0;
+
+ failure:
+    cleanup();
+    return -1;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/chkkconf/Makefile.am b/kernel/xenomai-v3.2.4/utils/chkkconf/Makefile.am
new file mode 100644
index 0000000..25e73d7
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/chkkconf/Makefile.am
@@ -0,0 +1,11 @@
+
+data_DATA = kconf-checklist
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+sbin_PROGRAMS = chkkconf
+
+chkkconf_SOURCES = checkconfig.c
+
+chkkconf_CPPFLAGS = 		\
+	-DDATADIR=\"$(datadir)\" -D_GNU_SOURCE
diff --git a/kernel/xenomai-v3.2.4/utils/chkkconf/checkconfig.c b/kernel/xenomai-v3.2.4/utils/chkkconf/checkconfig.c
new file mode 100644
index 0000000..134d8e5
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/chkkconf/checkconfig.c
@@ -0,0 +1,331 @@
+/*
+ * SPDX-License-Identifier: MIT
+ *
+ * Copyright (C) 2020 Philippe Gerum  <rpm@xenomai.org>
+ */
+
+#include <unistd.h>
+#include <getopt.h>
+#include <stdbool.h>
+#include <ctype.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <string.h>
+#include <search.h>
+#include <error.h>
+#include <errno.h>
+#include <sys/utsname.h>
+
+#define DEFAULT_KCONFIG    "/proc/config.gz"
+#define DEFAULT_CHECKLIST  DATADIR"/kconf-checklist"
+
+#define short_optlist "@hqf:L:a:H:"
+
+static int hash_size = 16384;
+
+static bool quiet;
+
+static const struct option options[] = {
+	{"file", required_argument, NULL, 'f'},
+	{"check-list", required_argument, NULL, 'L'},
+	{"arch", required_argument, NULL, 'a'},
+	{"hash-size", required_argument, NULL, 'H'},
+	{"quiet", no_argument, NULL, 'q'},
+	{"help", no_argument, NULL, 'h'},
+	{0}
+};
+
+static char *hash_config(FILE *kconfp)
+{
+	char buf[BUFSIZ], *sym, *val, *arch = NULL;
+	ENTRY entry, *e;
+	int ret;
+
+	ret = hcreate(hash_size);
+	if (!ret)
+		error(1, errno, "hcreate(%d)", hash_size);
+
+	while (fgets(buf, sizeof(buf), kconfp)) {
+		if (*buf == '#') {
+			sscanf(buf, "# Linux/%m[^ ]", &arch);
+			continue;
+		}
+		ret = sscanf(buf, "%m[^=]=%ms\n", &sym, &val);
+		if (ret != 2)
+			continue;
+		if (strncmp(sym, "CONFIG_", 7))
+			continue;
+		if (strcmp(val, "y") && strcmp(val, "m"))
+			continue;
+		entry.key = sym;
+		entry.data = NULL;
+		e = hsearch(entry, FIND);
+		if (e)
+			continue; /* uhh? */
+		entry.data = val;
+		if (!hsearch(entry, ENTER))
+			error(1, ENOMEM, "h-table full -- try -H");
+	}
+
+	return arch;
+}
+
+static char *next_token(char **next)
+{
+	char *p = *next, *s;
+
+	for (;;) {
+		if (!*p) {
+			*next = p;
+			return strdup("");
+		}
+		if (!isspace(*p))
+			break;
+		p++;
+	}
+
+	s = p;
+
+	if (!isalnum(*p) && *p != '_') {
+		*next = p + 1;
+		return strndup(s, 1);
+	}
+
+	do {
+		if (!isalnum(*p) && *p != '_') {
+			*next = p;
+			return strndup(s, p - s);
+		}
+	} while (*++p);
+
+	*next = p;
+
+	return strdup(s);
+}
+
+static const char *get_arch_alias(const char *arch)
+{
+	if (!strcmp(arch, "arm64"))
+		return "aarch64";
+
+	if (!strcmp(arch, "arm"))
+		return "aarch32";
+
+	return arch;
+}
+
+static int apply_checklist(FILE *checkfp, const char *cpuarch)
+{
+	char buf[BUFSIZ], *token, *next, *sym, *val;
+	int lineno = 0, failed = 0;
+	bool not, notcond;
+	const char *arch;
+	ENTRY entry, *e;
+
+	while (fgets(buf, sizeof(buf), checkfp)) {
+		lineno++;
+		next = buf;
+
+		token = next_token(&next);
+		if (!*token || !strcmp(token, "#")) {
+			free(token);
+			continue;
+		}
+
+		not = *token == '!';
+		if (not) {
+			free(token);
+			token = next_token(&next);
+		}
+
+		sym = token;
+		if (strncmp(sym, "CONFIG_", 7))
+			error(1, EINVAL,
+				"invalid check list symbol '%s' at line %d",
+				sym, lineno);
+
+		token = next_token(&next);
+		val = NULL;
+		if (*token == '=') {
+			free(token);
+			val = next_token(&next);
+			token = next_token(&next);
+		}
+
+		if (!strcmp(token, "if")) {
+			free(token);
+			token = next_token(&next);
+			notcond = *token == '!';
+			if (notcond) {
+				free(token);
+				token = next_token(&next);
+			}
+			if (strncmp(token, "CONFIG_", 7))
+				error(1, EINVAL,
+					"invalid condition symbol '%s' at line %d",
+					token, lineno);
+			entry.key = token;
+			entry.data = NULL;
+			e = hsearch(entry, FIND);
+			free(token);
+			if (!((e && !notcond) || (!e && notcond)))
+				continue;
+			token = next_token(&next);
+		}
+
+		if (!strcmp(token, "on")) {
+			free(token);
+			token = next_token(&next);
+			arch = get_arch_alias(token);
+			if (strncmp(cpuarch, arch, strlen(arch))) {
+				free(token);
+				continue;
+			}
+		}
+
+		free(token);
+
+		entry.key = sym;
+		entry.data = NULL;
+		e = hsearch(entry, FIND);
+
+		if (val && !strcmp(val, "n"))
+			not = !not;
+
+		if (e && (not || (val && strcmp(val, e->data)))) {
+			if (!quiet)
+				printf("%s=%s\n", sym, (const char *)e->data);
+			failed++;
+		} else if (!e && !not) {
+			if (!quiet)
+				printf("%s=n\n", sym);
+			failed++;
+		}
+
+		free(sym);
+		if (val)
+			free(val);
+	}
+
+	return failed;
+}
+
+static void usage(char *arg0)
+{
+	fprintf(stderr, "usage: %s [options]:\n", basename(arg0));
+	fprintf(stderr, "-f --file=<.config>     Kconfig file to check [=/proc/config.gz]\n");
+	fprintf(stderr, "-L --check-list=<file>  configuration check list [="DATADIR"/kconf-checklist]\n");
+	fprintf(stderr, "-a --arch=<cpuarch>     CPU architecture assumed\n");
+	fprintf(stderr, "-H --hash-size=<N>      set the hash table size [=16384]\n");
+	fprintf(stderr, "-q --quiet              suppress output\n");
+	fprintf(stderr, "-h --help               this help\n");
+}
+
+int main(int argc, char *const argv[])
+{
+	const char *kconfig = DEFAULT_KCONFIG, *check_list = NULL,
+		*defarch, *arch = NULL, *p;
+	FILE *kconfp, *checkfp;
+	struct utsname ubuf;
+	int c, ret;
+	char *cmd;
+
+	opterr = 0;
+
+	for (;;) {
+		c = getopt_long(argc, argv, short_optlist, options, NULL);
+		if (c == -1)
+			break;
+
+		switch (c) {
+		case 0:
+			break;
+		case 'f':
+			kconfig = optarg;
+			break;
+		case 'L':
+			check_list = optarg;
+			break;
+		case 'a':
+			arch = optarg;
+			break;
+		case 'H':
+			hash_size = atoi(optarg);
+			if (hash_size < 16384)
+				hash_size = 16384;
+			break;
+		case 'q':
+			quiet = true;
+			break;
+		case 'h':
+			usage(argv[0]);
+			return 0;
+		case '@':
+			printf("check kernel configuration\n");
+			return 0;
+		default:
+			usage(argv[0]);
+			return 1;
+		}
+	}
+	if (optind < argc) {
+		usage(argv[0]);
+		return 1;
+	}
+
+	/*
+	 * We may be given a gzipped input file. Finding gunzip on a
+	 * minimalist rootfs (e.g. busybox) may be more likely than
+	 * having the zlib development files available from a common
+	 * cross-toolchain. So go for popen-ing gunzip on the target
+	 * instead of depending on libz on the development host.
+	 */
+	if (!strcmp(kconfig, "-")) {
+		kconfp = stdin;
+	} else {
+		p = strstr(kconfig, ".gz");
+		if (!p || strcmp(p, ".gz")) {
+			kconfp = fopen(kconfig, "r");
+		} else {
+			if (access(kconfig, R_OK))
+				error(1, errno, "cannot access %s%s",
+					kconfig,
+					strcmp(kconfig, DEFAULT_KCONFIG) ? "" :
+					"\n(you need CONFIG_IKCONFIG_PROC enabled)");
+			ret = asprintf(&cmd, "gunzip -c %s", kconfig);
+			if (ret < 0)
+				error(1, ENOMEM, "asprintf()");
+			kconfp = popen(cmd, "r");
+			free(cmd);
+		}
+		if (kconfp == NULL)
+			error(1, errno, "cannot open %s for reading", kconfig);
+	}
+
+	defarch = hash_config(kconfp);
+
+	if (check_list == NULL)
+		check_list = DEFAULT_CHECKLIST;
+
+	if (access(check_list, R_OK))
+		error(1, errno, "cannot access %s", check_list);
+
+	checkfp = fopen(check_list, "r");
+	if (checkfp == NULL)
+		error(1, errno, "cannot open %s for reading", check_list);
+
+	if (arch == NULL) {
+		if (defarch) {
+			arch = get_arch_alias(defarch);
+		} else {
+			ret = uname(&ubuf);
+			if (ret)
+				error(1, errno, "utsname()");
+			arch = ubuf.machine;
+		}
+	} else {
+		arch = get_arch_alias(arch);
+	}
+
+	return apply_checklist(checkfp, arch);
+}
diff --git a/kernel/xenomai-v3.2.4/utils/chkkconf/kconf-checklist b/kernel/xenomai-v3.2.4/utils/chkkconf/kconf-checklist
new file mode 100644
index 0000000..ca7c039
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/chkkconf/kconf-checklist
@@ -0,0 +1,51 @@
+# Kconfig check list for dovetail-based xenomai
+#
+# This file contains assertions testing a set of configuration
+# settings from a kernel .config file, which are fed to evl-check.
+# Any failed assertion is reported.
+#
+#
+# check_list  : assertion
+# 	      | check_list assertion
+#
+# assertion   : expr conditions
+# 	      | "!" expr conditions
+#
+# expr	    : symbol /* matches =y and =m */
+# 	    | symbol "=" tristate
+#
+# tristate  : "y"
+# 	    | "m"
+# 	    | "n"
+#
+# conditions  : dependency
+# 	      | dependency arch
+#
+# dependency  : "if" symbol	/* true if set as y/m */
+#
+# arch	    : "on" cputype
+#
+# cputype   : $(uname -m)
+#
+# <arch> should match $(uname -m) or some abbreviated portion
+# of it.
+#
+# e.g.
+# "CONFIG_FOO must be set whenever CONFIG_BAR is UNset"
+# translates to: CONFIG_FOO if !CONFIG_BAR
+# "CONFIG_FOO must not be set"
+# translates to: !CONFIG_FOO, or conversely CONFIG_FOO=n
+# "CONFIG_FOO must be built as module on aarch32 or aarch64"
+# translates to: CONFIG_FOO=m on aarch
+
+CONFIG_CPU_FREQ_DEFAULT_GOV_PERFORMANCE=y if CONFIG_CPU_FREQ
+CONFIG_DEBUG_HARD_LOCKS=n
+CONFIG_ACPI_PROCESSOR_IDLE=n
+CONFIG_LOCKDEP=n
+CONFIG_DEBUG_LIST=n
+CONFIG_DEBUG_VM=n
+CONFIG_DEBUG_PER_CPU_MAPS=n
+CONFIG_KASAN=n
+CONFIG_DEBUG_ENTRY=n
+CONFIG_FTRACE=n
+CONFIG_MIGRATION=n
diff --git a/kernel/xenomai-v3.2.4/utils/corectl/Makefile.am b/kernel/xenomai-v3.2.4/utils/corectl/Makefile.am
new file mode 100644
index 0000000..4d9382c
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/corectl/Makefile.am
@@ -0,0 +1,17 @@
+
+CCLD = $(top_srcdir)/scripts/wrap-link.sh $(CC)
+
+sbin_PROGRAMS = corectl
+
+corectl_SOURCES = corectl.c
+
+corectl_CPPFLAGS = 		\
+	$(XENO_USER_CFLAGS)	\
+	-I$(top_srcdir)/include
+
+corectl_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@ $(XENO_POSIX_WRAPPERS)
+
+corectl_LDADD =					\
+	 @XENO_CORE_LDADD@			\
+	 @XENO_USER_LDADD@			\
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/utils/corectl/corectl.c b/kernel/xenomai-v3.2.4/utils/corectl/corectl.c
new file mode 100644
index 0000000..41b8773
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/corectl/corectl.c
@@ -0,0 +1,153 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <xeno_config.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <stdlib.h>
+#include <stdio.h>
+#include <error.h>
+#include <sys/cobalt.h>
+#include <xenomai/init.h>
+
+int __cobalt_control_bind = 1;
+
+static int action;
+
+static const struct option options[] = {
+	{
+#define status_opt	0	/* Set this first, default action. */
+		.name = "status",
+		.has_arg = no_argument,
+		.flag = &action,
+		.val = status_opt,
+	},
+	{
+#define stop_opt	1
+		.name = "stop",
+		.has_arg = optional_argument,
+		.flag = &action,
+		.val = stop_opt,
+	},
+	{
+#define start_opt	2
+		.name = "start",
+		.has_arg = no_argument,
+		.flag = &action,
+		.val = start_opt,
+	},
+	{ /* Sentinel */ }
+};
+
+void application_usage(void)
+{
+        fprintf(stderr, "usage: %s <option>:\n", get_program_name());
+	fprintf(stderr, "--stop [<grace-seconds>]	stop Xenomai/cobalt services\n");
+	fprintf(stderr, "--start  			start Xenomai/cobalt services\n");
+	fprintf(stderr, "--status			query Xenomai/cobalt status\n");
+}
+
+static int core_stop(__u32 grace_period)
+{
+	return cobalt_corectl(_CC_COBALT_STOP_CORE,
+			      &grace_period, sizeof(grace_period));
+}
+
+static int core_start(void)
+{
+	return cobalt_corectl(_CC_COBALT_START_CORE, NULL, 0);
+}
+
+static int core_status(void)
+{
+	enum cobalt_run_states state = COBALT_STATE_DISABLED;
+	int ret;
+
+	ret = cobalt_corectl(_CC_COBALT_GET_CORE_STATUS,
+			     &state, sizeof(state));
+	if (ret && ret != -ENOSYS)
+		return ret;
+
+	switch (state) {
+	case COBALT_STATE_RUNNING:
+		printf("running\n");
+		break;
+	case COBALT_STATE_STOPPED:
+		printf("stopped\n");
+		break;
+	case COBALT_STATE_DISABLED:
+		printf("disabled\n");
+		break;
+	case COBALT_STATE_WARMUP:
+		printf("warmup\n");
+		break;
+	case COBALT_STATE_TEARDOWN:
+		printf("teardown\n");
+		break;
+	}
+
+	return 0;
+}
+
+int main(int argc, char *const argv[])
+{
+	__u32 grace_period = 0;
+	int lindex, c, ret;
+	
+	for (;;) {
+		c = getopt_long_only(argc, argv, "", options, &lindex);
+		if (c == EOF)
+			break;
+		if (c == '?') {
+			xenomai_usage();
+			return EINVAL;
+		}
+		if (c > 0)
+			continue;
+
+		switch (lindex) {
+		case stop_opt:
+			grace_period = optarg ? atoi(optarg) : 0;
+		case start_opt:
+		case status_opt:
+			break;
+		default:
+			return EINVAL;
+		}
+	}
+
+	switch (action) {
+	case stop_opt:
+		ret = core_stop(grace_period);
+		break;
+	case start_opt:
+		ret = core_start();
+		break;
+	case status_opt:
+		ret = core_status();
+		break;
+	default:
+		xenomai_usage();
+		exit(1);
+	}
+
+	if (ret)
+		error(1, -ret, "'%s' request failed", options[action].name);
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/hdb/Makefile.am b/kernel/xenomai-v3.2.4/utils/hdb/Makefile.am
new file mode 100644
index 0000000..f78f3c2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/hdb/Makefile.am
@@ -0,0 +1,15 @@
+sbin_PROGRAMS = hdb
+
+hdb_SOURCES = hdb.c
+
+hdb_CPPFLAGS = 			\
+	$(XENO_USER_CFLAGS)	\
+	-I$(top_srcdir)/include
+
+hdb_LDFLAGS = @XENO_AUTOINIT_LDFLAGS@
+
+hdb_LDADD =					\
+	../../lib/copperplate/libcopperplate@CORE@.la	\
+	@XENO_CORE_LDADD@			\
+	@XENO_USER_LDADD@			\
+	-lpthread -lrt
diff --git a/kernel/xenomai-v3.2.4/utils/hdb/hdb.c b/kernel/xenomai-v3.2.4/utils/hdb/hdb.c
new file mode 100644
index 0000000..1ecce6e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/hdb/hdb.c
@@ -0,0 +1,148 @@
+/*
+ * Copyright (C) 2015 Philippe Gerum <rpm@xenomai.org>
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <xeno_config.h>
+#include <unistd.h>
+#include <getopt.h>
+#include <stdlib.h>
+#include <malloc.h>
+#include <stdio.h>
+#include <error.h>
+#include <fcntl.h>
+#include <copperplate/cluster.h>
+#include <xenomai/init.h>
+
+static const struct option options[] = {
+	{
+#define dump_cluster_opt	0
+		.name = "dump-cluster",
+		.has_arg = required_argument,
+	},
+	{ /* Sentinel */ }
+};
+
+void application_usage(void)
+{
+        fprintf(stderr, "usage: %s <option>:\n", get_program_name());
+	fprintf(stderr, "--dump-cluster <name>		dump cluster <name>\n");
+}
+
+static int check_shared_heap(const char *cmd)
+{
+#ifndef CONFIG_XENO_PSHARED
+	fprintf(stderr,
+		"%s is available for dumping shared clusters\n"
+		" only. However --disable-pshared was given for building\n"
+		" this particular instance of the hdb program.\n", cmd);
+	return -ENOTSUP;
+#else
+	return 0;
+#endif
+}
+
+static int get_full_owner_info(pid_t pid, char *buf, size_t len)
+{
+	int fd, ret;
+	char *path;
+
+	ret = asprintf(&path, "/proc/%d/cmdline", pid);
+	if (ret < 0)
+		return -ENOMEM;
+
+	fd = open(path, O_RDONLY);
+	free(path);
+	if (fd < 0)
+		return -errno;
+
+	ret = read(fd, buf, len - 1);
+	close(fd);
+	if (ret < 0)
+		return -errno;
+
+	buf[ret] = '\0';
+
+	return 0;
+}
+
+static int walk_cluster(struct cluster *c, struct clusterobj *cobj)
+{
+	char pid[16], cmdline[50];
+	int ret;
+
+	ret = get_full_owner_info(clusterobj_cnode(cobj),
+				  cmdline, sizeof(cmdline));
+	if (ret)
+		return ret == -ENOENT ? 0 : ret;
+
+	snprintf(pid, sizeof(pid), "[%d]", clusterobj_cnode(cobj));
+	printf("%-9s %-20s %.*s\n", pid, cmdline,
+	       (int)clusterobj_keylen(cobj),
+	       (const char *)clusterobj_key(cobj));
+	
+	return 0;
+}
+
+static int dump_cluster(const char *name)
+{
+	struct cluster cluster;
+	int ret;
+
+	ret = check_shared_heap("--dump-cluster");
+	if (ret)
+		return ret;
+
+	ret = cluster_init(&cluster, name);
+	if (ret)
+		return ret;
+
+	return cluster_walk(&cluster, walk_cluster);
+}
+
+int main(int argc, char *const argv[])
+{
+	const char *cluster_name = NULL;
+	int lindex, c, ret = 0;
+
+	for (;;) {
+		c = getopt_long_only(argc, argv, "", options, &lindex);
+		if (c == EOF)
+			break;
+		if (c == '?') {
+			xenomai_usage();
+			return EINVAL;
+		}
+		if (c > 0)
+			continue;
+
+		switch (lindex) {
+		case dump_cluster_opt:
+			cluster_name = optarg;
+			break;
+		default:
+			return EINVAL;
+		}
+	}
+
+	if (cluster_name)
+		ret = dump_cluster(cluster_name);
+
+	if (ret)
+		error(1, -ret, "hdb");
+
+	return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/net/Makefile.am b/kernel/xenomai-v3.2.4/utils/net/Makefile.am
new file mode 100644
index 0000000..86d351e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/Makefile.am
@@ -0,0 +1,16 @@
+nodist_sysconf_DATA = rtnet.conf
+
+dist_sysconf_DATA = tdma.conf
+
+AM_CPPFLAGS = -I$(top_srcdir)/kernel/drivers/net/stack/include
+
+sbin_SCRIPTS = rtnet
+
+sbin_PROGRAMS = \
+	nomaccfg \
+	rtcfg \
+	rtifconfig \
+	rtiwconfig \
+	rtping  \
+	rtroute \
+	tdmacfg
diff --git a/kernel/xenomai-v3.2.4/utils/net/nomaccfg.c b/kernel/xenomai-v3.2.4/utils/net/nomaccfg.c
new file mode 100644
index 0000000..f0b5daf
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/nomaccfg.c
@@ -0,0 +1,109 @@
+/***
+ *
+ *  tools/nomaccfg.c
+ *  Configuration tool for the RTmac/NoMAC discipline
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002       Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003, 2004 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+
+#include <nomac_chrdev.h>
+
+
+static int                     f;
+static struct nomac_config     nomac_cfg;
+
+
+void help(void)
+{
+    fprintf(stderr, "Usage:\n"
+        "\tnomaccfg <dev> attach\n"
+        "\tnomaccfg <dev> detach\n");
+
+    exit(1);
+}
+
+
+
+void do_attach(int argc, char *argv[])
+{
+    int r;
+
+
+    if (argc != 3)
+        help();
+
+    r = ioctl(f, NOMAC_IOC_ATTACH, &nomac_cfg);
+    if (r < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void do_detach(int argc, char *argv[])
+{
+    int r;
+
+
+    if (argc != 3)
+        help();
+
+    r = ioctl(f, NOMAC_IOC_DETACH, &nomac_cfg);
+    if (r < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+int main(int argc, char *argv[])
+{
+    if ((argc < 3) || (strcmp(argv[1], "--help") == 0))
+        help();
+
+    f = open("/dev/rtnet", O_RDWR);
+
+    if (f < 0) {
+        perror("/dev/rtnet");
+        exit(1);
+    }
+
+    strncpy(nomac_cfg.head.if_name, argv[1], IFNAMSIZ);
+
+    if (strcmp(argv[2], "attach") == 0)
+        do_attach(argc,argv);
+    if (strcmp(argv[2], "detach") == 0)
+        do_detach(argc,argv);
+
+    help();
+
+    return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/net/rtcfg.c b/kernel/xenomai-v3.2.4/utils/net/rtcfg.c
new file mode 100644
index 0000000..e68bf87
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/rtcfg.c
@@ -0,0 +1,562 @@
+/***
+ *
+ *  tools/rtcfg.c
+ *
+ *  Real-Time Configuration Distribution Protocol
+ *
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <limits.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/socket.h>
+#include <net/if.h>
+#include <netinet/in.h>
+#include <netinet/ether.h>
+#include <arpa/inet.h>
+
+#include <rtcfg_chrdev.h>
+
+
+#define DFLT_PACKET_SIZE        1500 /* Ethernet packet */
+#define DFLT_CLIENT_BURST_RATE  4
+
+#define MIN(a, b) ((a) < (b) ? (a) : (b))
+
+
+int              f;
+struct rtcfg_cmd cmd;
+
+
+void help(void)
+{
+    fprintf(stderr, "usage (server):\n"
+        "\trtcfg <dev> server [-p period] [-b burstrate] [-h <heartbeat>]\n"
+        "\t      [-t <threshold>] [-r]\n"
+        "\trtcfg <dev> add <address> [-hw <hw_address>] "
+            "[-stage1 <stage1_file>]\n"
+        "\t      [-stage2 <stage2_file>] [-t <timeout>]\n"
+        "\trtcfg <dev> del <address>\n"
+        "\trtcfg <dev> wait [-t <timeout>]\n"
+        "\trtcfg <dev> ready [-t <timeout>]\n"
+        "\trtcfg <dev> detach\n\n"
+        "usage (client):\n"
+        "\trtcfg <dev> client [-t <timeout>] [-c|-f <stage1_file>] "
+            "[-m maxstations]\n"
+        "\trtcfg <dev> announce [-t <timeout>] [-c|-f <stage2_file>]\n"
+        "\t      [-b burstrate] [-r]\n"
+        "\trtcfg <dev> ready [-t <timeout>]\n"
+        "\trtcfg <dev> detach\n");
+
+    exit(1);
+}
+
+
+
+int getintopt(int argc, int pos, char *argv[], int min)
+{
+    int result;
+
+
+    if (pos >= argc)
+        help();
+    if ((sscanf(argv[pos], "%u", &result) != 1) || (result < min)) {
+        fprintf(stderr, "invalid parameter: %s %s\n", argv[pos-1], argv[pos]);
+        exit(1);
+    }
+
+    return result;
+}
+
+
+
+void cmd_server(int argc, char *argv[])
+{
+    int i;
+
+
+    cmd.args.server.period    = 1000;
+    cmd.args.server.burstrate = 4;
+    cmd.args.server.heartbeat = 1000;
+    cmd.args.server.threshold = 2;
+    cmd.args.server.flags     = 0;
+
+    for (i = 3; i < argc; i++) {
+        if (strcmp(argv[i], "-p") == 0)
+            cmd.args.server.period = getintopt(argc, ++i, argv, 1);
+        else if (strcmp(argv[i], "-b") == 0)
+            cmd.args.server.burstrate = getintopt(argc, ++i, argv, 1);
+        else if (strcmp(argv[i], "-h") == 0)
+            cmd.args.server.heartbeat = getintopt(argc, ++i, argv, 0);
+        else if (strcmp(argv[i], "-t") == 0)
+            cmd.args.server.threshold = getintopt(argc, ++i, argv, 1);
+        else if (strcmp(argv[i], "-r") == 0)
+            cmd.args.server.flags |= FLAG_READY;
+        else
+            help();
+    }
+
+    i = ioctl(f, RTCFG_IOC_SERVER, &cmd);
+    if (i < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void cmd_add(int argc, char *argv[])
+{
+    int               i;
+    struct in_addr    ip_addr;
+    struct ether_addr mac_addr;
+    const char        *stage1_filename = NULL;
+    const char        *stage2_filename = NULL;
+    int               file;
+    size_t            buf_size;
+    void              *new_buf;
+
+
+    if (argc < 4)
+        help();
+
+    if (inet_aton(argv[3], &ip_addr)) {
+        cmd.args.add.addr_type = RTCFG_ADDR_IP;
+        cmd.args.add.ip_addr   = ip_addr.s_addr;
+    } else if (ether_aton_r(argv[3], &mac_addr) != NULL) {
+        cmd.args.add.addr_type = RTCFG_ADDR_MAC;
+        memcpy(cmd.args.add.mac_addr, mac_addr.ether_addr_octet,
+               sizeof(mac_addr.ether_addr_octet));
+    } else {
+        fprintf(stderr, "invalid IP or physical address: %s\n", argv[3]);
+        exit(1);
+    }
+
+    cmd.args.add.stage1_data     = NULL;
+    cmd.args.add.stage1_size     = 0;
+    cmd.args.add.stage2_filename = NULL;
+    cmd.args.add.timeout         = 0;   /* infinite */
+
+    for (i = 4; i < argc; i++) {
+        if (strcmp(argv[i], "-hw") == 0) {
+            if ((++i >= argc) || (ether_aton_r(argv[i], &mac_addr) == NULL))
+                help();
+            cmd.args.add.addr_type = RTCFG_ADDR_IP | FLAG_ASSIGN_ADDR_BY_MAC;
+            memcpy(cmd.args.add.mac_addr, mac_addr.ether_addr_octet,
+                   sizeof(mac_addr.ether_addr_octet));
+        } else if (strcmp(argv[i], "-stage1") == 0) {
+            if (++i >= argc)
+                help();
+            stage1_filename = argv[i];
+        } else if (strcmp(argv[i], "-stage2") == 0) {
+            if (++i >= argc)
+                help();
+            stage2_filename = argv[i];
+        } else if (strcmp(argv[i], "-t") == 0)
+            cmd.args.add.timeout = getintopt(argc, ++i, argv, 0);
+        else
+            help();
+    }
+
+    if (stage1_filename != NULL) {
+        if (strcmp(stage1_filename, "-") == 0)
+            file = 0; /* stdin */
+        else {
+            file = open(stage1_filename, O_RDONLY);
+            if (file < 0) {
+                perror("open stage 1 file");
+                exit(1);
+            }
+        }
+
+        buf_size = 0;
+        do {
+            buf_size += 4096;
+
+            new_buf = realloc(cmd.args.add.stage1_data, buf_size);
+            if (new_buf == NULL) {
+                fprintf(stderr, "insufficient memory\n");
+                if (cmd.args.add.stage1_data != NULL)
+                    free(cmd.args.add.stage1_data);
+                exit(1);
+            }
+            cmd.args.add.stage1_data = new_buf;
+
+            i = read(file, cmd.args.add.stage1_data+cmd.args.add.stage1_size,
+                     4096);
+            if (i < 0) {
+                perror("read stage 1 file");
+                free(cmd.args.add.stage1_data);
+                exit(1);
+            }
+            cmd.args.add.stage1_size += i;
+        } while (i == 4096);
+
+        close(file);
+    }
+
+    if (stage2_filename != NULL) {
+        cmd.args.add.stage2_filename = malloc(PATH_MAX);
+        if (cmd.args.add.stage2_filename == NULL) {
+            fprintf(stderr, "insufficient memory\n");
+            if (cmd.args.add.stage1_data != NULL)
+                free(cmd.args.add.stage1_data);
+            exit(1);
+        }
+
+        if (realpath(stage2_filename,
+                     (char *)cmd.args.add.stage2_filename) == NULL) {
+            perror("resolve stage 2 file");
+            free((void *)cmd.args.add.stage2_filename);
+            if (cmd.args.add.stage1_data != NULL)
+                free(cmd.args.add.stage1_data);
+            exit(1);
+        }
+    }
+
+    i = ioctl(f, RTCFG_IOC_ADD, &cmd);
+
+    if (cmd.args.add.stage1_data != NULL)
+        free(cmd.args.add.stage1_data);
+    if (cmd.args.add.stage2_filename != NULL)
+        free((void *)cmd.args.add.stage2_filename);
+
+    if (i < 0) {
+        switch (errno) {
+            case ESTAGE1SIZE:
+                fprintf(stderr, "stage 1 file too big\n");
+                break;
+
+            case EEXIST:
+                fprintf(stderr, "client entry already exists\n");
+                break;
+
+            default:
+                perror("ioctl (add)");
+        }
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void cmd_del(int argc, char *argv[])
+{
+    int                 i;
+    struct in_addr      ip_addr;
+    struct ether_addr   mac_addr;
+
+
+    if (argc != 4)
+        help();
+
+    if (inet_aton(argv[3], &ip_addr)) {
+        cmd.args.del.addr_type = RTCFG_ADDR_IP;
+        cmd.args.del.ip_addr   = ip_addr.s_addr;
+    } else if (ether_aton_r(argv[3], &mac_addr) != NULL) {
+        cmd.args.del.addr_type = RTCFG_ADDR_MAC;
+        memcpy(cmd.args.del.mac_addr, mac_addr.ether_addr_octet,
+               sizeof(mac_addr.ether_addr_octet));
+    } else {
+        fprintf(stderr, "invalid IP or physical address: %s\n", argv[3]);
+        exit(1);
+    }
+
+    i = ioctl(f, RTCFG_IOC_DEL, &cmd);
+    if (i < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void cmd_wait(int argc, char *argv[])
+{
+    int i;
+
+
+    cmd.args.wait.timeout = 0;  /* infinite */
+
+    for (i = 3; i < argc; i++) {
+        if (strcmp(argv[i], "-t") == 0)
+            cmd.args.wait.timeout = getintopt(argc, ++i, argv, 0);
+        else
+            help();
+    }
+
+    i = ioctl(f, RTCFG_IOC_WAIT, &cmd);
+    if (i < 0) {
+        if (errno != ETIME)
+            perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void cmd_client(int argc, char *argv[])
+{
+    int  i;
+    int  cfg_size;
+    int  cfg_file      = -1;
+    char *cfg_filename = NULL;
+    int  buffer_size   = 0;
+
+
+    cmd.args.client.timeout      = 0; /* infinite */
+    cmd.args.client.max_stations = 32;
+
+    for (i = 3; i < argc; i++) {
+        if (strcmp(argv[i], "-t") == 0) {
+            cmd.args.client.timeout = getintopt(argc, ++i, argv, 0);
+        } else if (strcmp(argv[i], "-c") == 0) {
+            cfg_file    = 1; /* standard output file descriptor */
+            buffer_size = DFLT_PACKET_SIZE;
+        } else if (strcmp(argv[i], "-f") == 0) {
+            if (++i >= argc)
+                help();
+            cfg_filename = argv[i];
+            buffer_size  = DFLT_PACKET_SIZE;
+        } else if (strcmp(argv[i], "-m") == 0)
+            cmd.args.client.max_stations = getintopt(argc, ++i, argv, 1);
+        else
+            help();
+    }
+
+    if (buffer_size > 0) {
+        cmd.args.client.buffer = malloc(buffer_size);
+        if (cmd.args.client.buffer == NULL) {
+            fprintf(stderr, "insufficient memory\n");
+            exit(1);
+        }
+    }
+    cmd.args.client.buffer_size = buffer_size;
+
+    cfg_size = ioctl(f, RTCFG_IOC_CLIENT, &cmd);
+
+    /* Buffer too small? Let's try again! */
+    if (cfg_size > buffer_size) {
+        free(cmd.args.client.buffer);
+
+        buffer_size = i;
+        cmd.args.client.buffer = malloc(buffer_size);
+        if (cmd.args.client.buffer == NULL) {
+            fprintf(stderr, "insufficient memory\n");
+            exit(1);
+        }
+        cmd.args.client.buffer_size = buffer_size;
+
+        cfg_size = ioctl(f, RTCFG_IOC_CLIENT, &cmd);
+    }
+
+    if (cfg_size < 0) {
+        if (errno != ETIME)
+            perror("ioctl");
+        if (cmd.args.client.buffer_size > 0)
+            free(cmd.args.client.buffer);
+        exit(1);
+    }
+
+    if (cfg_filename != NULL) {
+        cfg_file = open(cfg_filename, O_CREAT | O_WRONLY | O_TRUNC,
+                        S_IREAD | S_IWRITE);
+        if (cfg_file < 0) {
+            perror("create output file");
+            free(cmd.args.client.buffer);
+            exit(1);
+        }
+    }
+
+    if (cfg_file > 0) {
+        i = write(cfg_file, cmd.args.client.buffer, cfg_size);
+        free(cmd.args.client.buffer);
+        if (i < 0) {
+            perror("write output file");
+            exit(1);
+        }
+    }
+
+    exit(0);
+}
+
+
+
+void cmd_announce(int argc, char *argv[])
+{
+    int    i;
+    int    cfg_size;
+    int    cfg_file      = -1;
+    char   *cfg_filename = NULL;
+    size_t buffer_size   = 0;
+
+
+    cmd.args.announce.timeout     = 0; /* infinite */
+    cmd.args.announce.buffer_size = 0;
+    cmd.args.announce.flags       = 0;
+    cmd.args.announce.burstrate   = DFLT_CLIENT_BURST_RATE;
+
+    for (i = 3; i < argc; i++) {
+        if (strcmp(argv[i], "-t") == 0)
+            cmd.args.announce.timeout = getintopt(argc, ++i, argv, 0);
+        else if (strcmp(argv[i], "-c") == 0) {
+            cfg_file    = 1; /* standard output file descriptor */
+            buffer_size = DFLT_CLIENT_BURST_RATE * DFLT_PACKET_SIZE;
+        } else if (strcmp(argv[i], "-f") == 0) {
+            if (++i >= argc)
+                help();
+            cfg_filename = argv[i];
+            buffer_size  = DFLT_CLIENT_BURST_RATE * DFLT_PACKET_SIZE;
+        } else if (strcmp(argv[i], "-b") == 0)
+            cmd.args.announce.burstrate = getintopt(argc, ++i, argv, 1);
+        else if (strcmp(argv[i], "-r") == 0)
+            cmd.args.announce.flags |= FLAG_READY;
+        else
+            help();
+    }
+
+    if (buffer_size > 0) {
+        cmd.args.announce.buffer = malloc(buffer_size);
+        if (cmd.args.announce.buffer == NULL) {
+            fprintf(stderr, "insufficient memory\n");
+            exit(1);
+        }
+        cmd.args.announce.flags |= FLAG_STAGE_2_DATA;
+    }
+    cmd.args.announce.buffer_size = buffer_size;
+
+    if (cfg_filename != NULL) {
+        cfg_file = open(cfg_filename, O_CREAT | O_WRONLY | O_TRUNC,
+                        S_IREAD | S_IWRITE);
+        if (cfg_file < 0) {
+            perror("create output file");
+            free(cmd.args.announce.buffer);
+            exit(1);
+        }
+    }
+
+    while ((cfg_size = ioctl(f, RTCFG_IOC_ANNOUNCE, &cmd)) > 0) {
+        i = write(cfg_file, cmd.args.announce.buffer, cfg_size);
+        if (i < 0) {
+            perror("write output file");
+            exit(1);
+        }
+    }
+
+    if (cmd.args.announce.buffer != NULL)
+        free(cmd.args.announce.buffer);
+
+    if (i < 0) {
+        if (errno != ETIME)
+            perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void cmd_ready(int argc, char *argv[])
+{
+    int i;
+
+
+    cmd.args.ready.timeout = 0; /* infinite */
+
+    for (i = 3; i < argc; i++) {
+        if (strcmp(argv[i], "-t") == 0)
+            cmd.args.ready.timeout = getintopt(argc, ++i, argv, 0);
+        else
+            help();
+    }
+
+    i = ioctl(f, RTCFG_IOC_READY, &cmd);
+    if (i < 0) {
+        if (errno != ETIME)
+            perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void cmd_detach(int argc, char *argv[])
+{
+    if (argc > 3)
+        help();
+
+    if (ioctl(f, RTCFG_IOC_DETACH, &cmd) < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+int main(int argc, char *argv[])
+{
+    if ((argc < 3) || (strcmp(argv[1], "--help") == 0))
+        help();
+
+    f = open("/dev/rtnet", O_RDWR);
+
+    if (f < 0) {
+        perror("/dev/rtnet");
+        exit(1);
+    }
+
+    memset(&cmd, 0, sizeof(cmd));
+    strncpy(cmd.head.if_name, argv[1], IFNAMSIZ);
+
+    if (strcmp(argv[2], "server") == 0)
+        cmd_server(argc, argv);
+    if (strcmp(argv[2], "add") == 0)
+        cmd_add(argc, argv);
+    if (strcmp(argv[2], "del") == 0)
+        cmd_del(argc, argv);
+    if (strcmp(argv[2], "wait") == 0)
+        cmd_wait(argc, argv);
+
+    if (strcmp(argv[2], "client") == 0)
+        cmd_client(argc, argv);
+    if (strcmp(argv[2], "announce") == 0)
+        cmd_announce(argc, argv);
+    if (strcmp(argv[2], "ready") == 0)
+        cmd_ready(argc, argv);
+
+    if (strcmp(argv[2], "detach") == 0)
+        cmd_detach(argc, argv);
+
+    help();
+
+    return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/net/rtifconfig.c b/kernel/xenomai-v3.2.4/utils/net/rtifconfig.c
new file mode 100644
index 0000000..9a9abfc
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/rtifconfig.c
@@ -0,0 +1,440 @@
+/***
+ *
+ *  tools/rtifconfig.c
+ *  ifconfig replacement for RTnet
+ *
+ *  rtnet - real-time networking subsystem
+ *  Copyright (C) 1999, 2000 Zentropic Computing, LLC
+ *                2004, 2005 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <ctype.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <net/ethernet.h>
+#include <net/if_arp.h>
+#include <netinet/ether.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <rtnet_chrdev.h>
+
+/* Some old toolchains don't ARPHRD_IEEE1394 defined */
+#ifndef ARPHRD_IEEE1394
+#define ARPHRD_IEEE1394 24
+#endif
+
+
+#define PRINT_FLAG_ALL          1
+#define PRINT_FLAG_INACTIVE     2
+
+
+int                     f;
+struct rtnet_core_cmd   cmd;
+
+struct user_net_device_stats {
+    unsigned long long rx_packets;      /* total packets received       */
+    unsigned long long tx_packets;      /* total packets transmitted    */
+    unsigned long long rx_bytes;        /* total bytes received         */
+    unsigned long long tx_bytes;        /* total bytes transmitted      */
+    unsigned long rx_errors;            /* bad packets received         */
+    unsigned long tx_errors;            /* packet transmit problems     */
+    unsigned long rx_dropped;           /* no space in linux buffers    */
+    unsigned long tx_dropped;           /* no space available in linux  */
+    unsigned long rx_multicast;         /* multicast packets received   */
+    unsigned long rx_compressed;
+    unsigned long tx_compressed;
+    unsigned long collisions;
+
+    /* detailed rx_errors: */
+    unsigned long rx_length_errors;
+    unsigned long rx_over_errors;       /* receiver ring buff overflow  */
+    unsigned long rx_crc_errors;        /* recved pkt with crc error    */
+    unsigned long rx_frame_errors;      /* recv'd frame alignment error */
+    unsigned long rx_fifo_errors;       /* recv'r fifo overrun          */
+    unsigned long rx_missed_errors;     /* receiver missed packet     */
+    /* detailed tx_errors */
+    unsigned long tx_aborted_errors;
+    unsigned long tx_carrier_errors;
+    unsigned long tx_fifo_errors;
+    unsigned long tx_heartbeat_errors;
+    unsigned long tx_window_errors;
+};
+
+struct itf_stats {
+    char name[IFNAMSIZ];
+    struct user_net_device_stats stats;
+    struct itf_stats *next;
+};
+
+static struct itf_stats *itf_stats_head;
+
+void parse_stats(void)
+{
+    struct itf_stats *itf;
+    char buf[512];
+    FILE *fh;
+    int ret;
+
+    fh = fopen("/proc/rtnet/stats", "r");
+    if (!fh)
+        return;
+
+    fgets(buf, sizeof buf, fh); /* eat headers */
+    fgets(buf, sizeof buf, fh);
+
+    while (fgets(buf, sizeof buf, fh)) {
+        char *name, *p;
+
+        itf = malloc(sizeof(*itf));
+        if (!itf)
+            return;
+
+        name = buf;
+        while (isspace(*name))
+            name++;
+        p = name;
+        while (*p && *p != ':')
+            p++;
+        *p = '\0';
+        snprintf(itf->name, sizeof(itf->name), "%s", name);
+
+        p++;
+        ret = sscanf(p,
+               "%llu %llu %lu %lu %lu %lu %lu %lu %llu %llu %lu %lu %lu %lu %lu %lu",
+               &itf->stats.rx_bytes,
+               &itf->stats.rx_packets,
+               &itf->stats.rx_errors,
+               &itf->stats.rx_dropped,
+               &itf->stats.rx_fifo_errors,
+               &itf->stats.rx_frame_errors,
+               &itf->stats.rx_compressed,
+               &itf->stats.rx_multicast,
+               &itf->stats.tx_bytes,
+               &itf->stats.tx_packets,
+               &itf->stats.tx_errors,
+               &itf->stats.tx_dropped,
+               &itf->stats.tx_fifo_errors,
+               &itf->stats.collisions,
+               &itf->stats.tx_carrier_errors,
+               &itf->stats.tx_compressed);
+        if (ret < 16) {
+            free(itf);
+            continue;
+        }
+
+        itf->next = itf_stats_head;
+        itf_stats_head = itf;
+    }
+
+    fclose(fh);
+}
+
+struct itf_stats *find_stats(const char *itf_name)
+{
+    struct itf_stats *itf;
+
+    for(itf = itf_stats_head; itf; itf = itf->next)
+        if(!strcmp(itf->name, itf_name))
+            break;
+
+    return itf;
+}
+
+
+void help(void)
+{
+    fprintf(stderr, "Usage:\n"
+        "\trtifconfig [-a] [<dev>]\n"
+        "\trtifconfig <dev> up [<addr> [netmask <mask>]] "
+            "[hw <HW> <address>] [[-]promisc]\n"
+        "\trtifconfig <dev> down\n"
+        );
+
+    exit(1);
+}
+
+
+
+void print_dev(void)
+{
+    struct in_addr  ip_addr;
+    struct in_addr  broadcast_ip;
+    unsigned int    flags;
+    struct itf_stats *itf;
+
+
+    cmd.head.if_name[IFNAMSIZ - 1] = 0;
+
+    printf("%-9s Medium: ", cmd.head.if_name);
+
+    if ((cmd.args.info.flags & IFF_LOOPBACK) != 0)
+        printf("Local Loopback\n");
+    else
+        switch (cmd.args.info.type) {
+            case ARPHRD_ETHER:
+            case ARPHRD_IEEE1394:
+                printf("%s Hardware address: "
+                       "%02X:%02X:%02X:%02X:%02X:%02X\n",
+                       (cmd.args.info.type == ARPHRD_ETHER) ?
+                           "Ethernet " : "Eth1394 ",
+                       cmd.args.info.dev_addr[0], cmd.args.info.dev_addr[1],
+                       cmd.args.info.dev_addr[2], cmd.args.info.dev_addr[3],
+                       cmd.args.info.dev_addr[4], cmd.args.info.dev_addr[5]);
+                break;
+
+            default:
+                printf("unknown (%X)\n", cmd.args.info.type);
+        }
+
+    if (cmd.args.info.ip_addr != 0) {
+        ip_addr.s_addr      = cmd.args.info.ip_addr;
+        broadcast_ip.s_addr = cmd.args.info.broadcast_ip;
+        printf("          IP address: %s  ", inet_ntoa(ip_addr));
+        if (cmd.args.info.flags & IFF_BROADCAST)
+            printf("Broadcast address: %s", inet_ntoa(broadcast_ip));
+        printf("\n");
+    }
+
+    flags = cmd.args.info.flags &
+        (IFF_UP | IFF_BROADCAST | IFF_LOOPBACK | IFF_RUNNING | IFF_PROMISC);
+    printf("          %s%s%s%s%s%s MTU: %d\n",
+           ((flags & IFF_UP) != 0) ? "UP " : "",
+           ((flags & IFF_BROADCAST) != 0) ? "BROADCAST " : "",
+           ((flags & IFF_LOOPBACK) != 0) ? "LOOPBACK " : "",
+           ((flags & IFF_RUNNING) != 0) ? "RUNNING " : "",
+           ((flags & IFF_PROMISC) != 0) ? "PROMISC " : "",
+           (flags == 0) ? "[NO FLAGS] " : "", cmd.args.info.mtu);
+
+    if ((itf = find_stats(cmd.head.if_name))) {
+        unsigned long long rx, tx, short_rx, short_tx;
+        char Rext[5]="b";
+        char Text[5]="b";
+
+        printf("          ");
+        printf("RX packets:%llu errors:%lu dropped:%lu overruns:%lu frame:%lu\n",
+               itf->stats.rx_packets, itf->stats.rx_errors,
+               itf->stats.rx_dropped, itf->stats.rx_fifo_errors,
+               itf->stats.rx_frame_errors);
+
+        printf("          ");
+        printf("TX packets:%llu errors:%lu dropped:%lu overruns:%lu carrier:%lu\n",
+               itf->stats.tx_packets, itf->stats.tx_errors,
+               itf->stats.tx_dropped, itf->stats.tx_fifo_errors,
+               itf->stats.tx_carrier_errors);
+
+        printf("          collisions:%lu ", itf->stats.collisions);
+        printf("\n          ");
+
+        rx = itf->stats.rx_bytes;  
+        tx = itf->stats.tx_bytes;
+        short_rx = rx * 10;  
+        short_tx = tx * 10;
+        if (rx > 1048576) {
+            short_rx /= 1048576;
+            strcpy(Rext, "Mb");
+        } else if (rx > 1024) {
+            short_rx /= 1024;
+            strcpy(Rext, "Kb");
+        }
+        if (tx > 1048576) {
+            short_tx /= 1048576;
+            strcpy(Text, "Mb");
+        } else if (tx > 1024) {
+            short_tx /= 1024;
+            strcpy(Text, "Kb");
+        }
+
+        printf("RX bytes:%llu (%lu.%lu %s)  TX bytes:%llu (%lu.%lu %s)\n",
+               rx, (unsigned long)(short_rx / 10),
+               (unsigned long)(short_rx % 10), Rext, 
+               tx, (unsigned long)(short_tx / 10), 
+               (unsigned long)(short_tx % 10), Text);
+    }
+    printf("\n");
+}
+
+
+
+void do_display(int print_flags)
+{
+    int i;
+    int ret;
+
+
+    parse_stats();
+
+    if ((print_flags & PRINT_FLAG_ALL) != 0)
+        for (i = 1; i <= MAX_RT_DEVICES; i++) {
+            cmd.args.info.ifindex = i;
+
+            ret = ioctl(f, IOC_RT_IFINFO, &cmd);
+            if (ret == 0) {
+                if (((print_flags & PRINT_FLAG_INACTIVE) != 0) ||
+                    ((cmd.args.info.flags & IFF_UP) != 0))
+                    print_dev();
+            } else if (errno != ENODEV) {
+                perror("ioctl");
+                exit(1);
+            }
+        }
+    else {
+        cmd.args.info.ifindex = 0;
+
+        ret = ioctl(f, IOC_RT_IFINFO, &cmd);
+        if (ret < 0) {
+            perror("ioctl");
+            exit(1);
+        }
+
+        print_dev();
+    }
+
+    exit(0);
+}
+
+
+
+void do_up(int argc, char *argv[])
+{
+    int                 ret;
+    int                 i;
+    struct in_addr      addr;
+    __u32               ip_mask;
+    struct ether_addr   hw_addr;
+
+
+    if ((argc > 3) && (inet_aton(argv[3], &addr))) {
+        i = 4;
+        cmd.args.up.ip_addr = addr.s_addr;
+        if (addr.s_addr == 0xFFFFFFFF) {
+            fprintf(stderr, "Invalid IP address!\n");
+            exit(1);
+        }
+    } else {
+        i = 3;
+        /* don't change ip settings */
+        cmd.args.up.ip_addr = 0xFFFFFFFF;
+    }
+
+    /* set default netmask */
+    if (ntohl(cmd.args.up.ip_addr) <= 0x7FFFFFFF)       /* 127.255.255.255  */
+        ip_mask = 0x000000FF;                           /* 255.0.0.0        */
+    else if (ntohl(cmd.args.up.ip_addr) <= 0xBFFFFFFF)  /* 191.255.255.255  */
+        ip_mask = 0x0000FFFF;                           /* 255.255.0.0      */
+    else
+        ip_mask = 0x00FFFFFF;                           /* 255.255.255.0    */
+
+    /* default: don't change flags, don't set dev_addr */
+    cmd.args.up.set_dev_flags   = 0;
+    cmd.args.up.clear_dev_flags = 0;
+    cmd.args.up.dev_addr_type   = 0xFFFF;
+
+    /* parse optional parameters */
+    for ( ; i < argc; i++) {
+        if (strcmp(argv[i], "netmask") == 0) {
+            if ((++i >= argc) || (cmd.args.up.ip_addr == 0) ||
+                (!inet_aton(argv[i], &addr)))
+                help();
+            ip_mask = addr.s_addr;
+        } else if (strcmp(argv[i], "hw") == 0) {
+            if ((++i >= argc) || (strcmp(argv[i], "ether") != 0) ||
+                (++i >= argc) || (ether_aton_r(argv[i], &hw_addr) == NULL))
+                help();
+            memcpy(cmd.args.up.dev_addr, hw_addr.ether_addr_octet,
+                   sizeof(hw_addr.ether_addr_octet));
+            cmd.args.up.dev_addr_type = ARPHRD_ETHER;
+        } else if (strcmp(argv[i], "promisc") == 0) {
+            cmd.args.up.set_dev_flags   |= IFF_PROMISC;
+            cmd.args.up.clear_dev_flags &= ~IFF_PROMISC;
+        } else if (strcmp(argv[i], "-promisc") == 0) {
+            cmd.args.up.set_dev_flags   &= ~IFF_PROMISC;
+            cmd.args.up.clear_dev_flags |= IFF_PROMISC;
+        } else
+            help();
+    }
+
+    cmd.args.up.broadcast_ip = cmd.args.up.ip_addr | (~ip_mask);
+
+    ret = ioctl(f, IOC_RT_IFUP, &cmd);
+    if (ret < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void do_down(int argc,char *argv[])
+{
+    int r;
+
+    if (argc > 3)
+        help();
+
+    r = ioctl(f, IOC_RT_IFDOWN, &cmd);
+    if (r < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+int main(int argc, char *argv[])
+{
+    if ((argc > 1) && (strcmp(argv[1], "--help") == 0))
+        help();
+
+    f = open("/dev/rtnet", O_RDWR);
+
+    if (f < 0) {
+        perror("/dev/rtnet");
+        exit(1);
+    }
+
+    if (argc == 1)
+        do_display(PRINT_FLAG_ALL);
+
+    if (strcmp(argv[1], "-a") == 0) {
+        if (argc == 3) {
+            strncpy(cmd.head.if_name, argv[2], IFNAMSIZ);
+            do_display(PRINT_FLAG_INACTIVE);
+        } else
+            do_display(PRINT_FLAG_INACTIVE | PRINT_FLAG_ALL);
+    } else
+        strncpy(cmd.head.if_name, argv[1], IFNAMSIZ);
+
+    if (argc < 3)
+        do_display(0);
+
+    if (strcmp(argv[2], "up") == 0)
+        do_up(argc,argv);
+    if (strcmp(argv[2], "down") == 0)
+        do_down(argc,argv);
+
+    help();
+
+    return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/net/rtiwconfig.c b/kernel/xenomai-v3.2.4/utils/net/rtiwconfig.c
new file mode 100644
index 0000000..eecb293
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/rtiwconfig.c
@@ -0,0 +1,236 @@
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <sys/ioctl.h>
+#include <net/ethernet.h>
+#include <net/if_arp.h>
+#include <netinet/ether.h>
+#include <netinet/in.h>
+
+#include <rtwlan_io.h>
+
+#define PRINT_FLAG_ALL          1
+#define PRINT_FLAG_INACTIVE     2
+
+int f;
+struct rtwlan_cmd cmd;
+
+void help(void) {
+
+    fprintf(stderr, "Usage:\n"
+            "\trtiwconfig --help\n"
+            "\trtiwconfig [<dev>]\n"
+            "\trtiwconfig <dev> bitrate <2|4|11|22|12|18|24|36|48|72|96|108>\n"
+            "\trtiwconfig <dev> channel <1-13>\n"
+            "\rrtiwconfig <dev> retry   <0-255>\n"
+            "\trtiwconfig <dev> txpower <0-100>\n"
+            "\trtiwconfig <dev> bbpsens <0-127>\n"
+            "\trtiwconfig <dev> mode <raw|ack|monitor>\n"
+            "\trtiwconfig <dev> autoresponder <0|1>\n"
+            "\trtiwconfig <dev> dropbcast <0|1>\n"
+            "\trtiwconfig <dev> dropmcast <0|1>\n"
+            "\t-- WARNING: Direct register access may cause system hang ! --\n"
+            "\trtiwconfig <dev> regdump\n"
+            "\trtiwconfig <dev> regread <offset>\n"
+            "\trtiwconfig <dev> regwrite <offset> <value>\n"
+            "\trtiwconfig <dev> bbpwrite <reg_id> <value>\n"
+            );
+
+    exit(1);
+}
+
+void print_dev(void) {
+  
+    printf("\n");
+    printf("%s\n", cmd.head.if_name);
+    printf("bitrate: %d\t\t", cmd.args.info.bitrate);
+
+    printf("txpower: %d\n", cmd.args.info.txpower);
+    printf("channel: %d\t\t", cmd.args.info.channel);
+    printf("retry: %d\n", cmd.args.info.retry);
+    printf("autoresponder: %d\t", cmd.args.info.autoresponder);
+    printf("bbp sensibility: %d\n", cmd.args.info.bbpsens);
+    printf("drop broadcast: %d\t", cmd.args.info.dropbcast);
+    printf("rx packets: %5d\n", cmd.args.info.rx_packets);
+    printf("drop multicast: %d\t", cmd.args.info.dropmcast);
+    printf("tx packets: %5d\n", cmd.args.info.tx_packets);
+    printf("tx mode: ");
+    switch(cmd.args.info.mode) {
+    case RTWLAN_TXMODE_RAW:
+        printf("raw");
+        break;
+    case RTWLAN_TXMODE_ACK:
+        printf("ack");
+        break;
+    case RTWLAN_TXMODE_MCAST:
+        printf("mcast");
+        break;
+    default:
+        printf("unknown");
+    }
+    printf("\t\ttx retry: %7d\n", cmd.args.info.tx_retry);
+}
+
+void do_display(int print_flags) {
+
+    int i;
+    int ret;
+  
+    if ((print_flags & PRINT_FLAG_ALL) != 0)
+        for (i = 1; i <= MAX_RT_DEVICES; i++) {
+            cmd.args.info.ifindex = i;
+
+            ret = ioctl(f, IOC_RTWLAN_IFINFO, &cmd);
+            if (ret == 0) {
+                if (((print_flags & PRINT_FLAG_INACTIVE) != 0) ||
+                    ((cmd.args.info.flags & IFF_RUNNING) != 0))
+                    print_dev();
+            } 
+            else if (ret == -ENORTWLANDEV) {
+                continue;
+            }
+            else if (errno != ENODEV) {
+                perror("ioctl");
+                exit(1);
+            }
+        }
+    else {
+        cmd.args.info.ifindex = 0;
+    
+        ret = ioctl(f, IOC_RTWLAN_IFINFO, &cmd);
+        if(ret == -ENORTWLANDEV) {
+            printf("Device %s has no wireless extensions !\n", cmd.head.if_name);
+            exit(1);
+        } 
+        else if (ret < 0) {
+            perror("ioctl");
+            exit(1);
+        }
+    
+        print_dev();
+         
+    }
+
+    printf("\n");
+
+    exit(0);
+}
+
+int main(int argc, char * argv[]) {
+
+    int offset, ret = 0;
+
+    if ((argc > 1) && (strcmp(argv[1], "--help") == 0))
+        help();
+
+    f = open("/dev/rtnet", O_RDWR);
+
+    if (f < 0) {
+        perror("/dev/rtnet");
+        exit(1);
+    }
+
+    if(argc > 1)
+        strncpy(cmd.head.if_name, argv[1], IFNAMSIZ);
+
+    switch(argc) {
+    case 1:
+        do_display(PRINT_FLAG_ALL);
+        break;
+    case 2:
+        do_display(0);
+        break;
+    case 3:
+        if(strcmp(argv[2], "regdump") == 0) {
+
+            for(offset=0x0; offset <= 0x0174; offset+=0x04) {
+	
+                cmd.args.reg.address = offset;
+                ret = ioctl(f, IOC_RTWLAN_REGREAD, &cmd);
+                printf("rtiwconfig: offset=%3x reg=%8x\n", cmd.args.reg.address, cmd.args.reg.value);
+            }
+        } else
+            help();
+        break;
+    case 4:
+        if (strcmp(argv[2], "channel") == 0) {
+            cmd.args.set.channel = atoi(argv[3]);
+            ret = ioctl(f, IOC_RTWLAN_CHANNEL, &cmd);
+        } 
+        else if(strcmp(argv[2], "bitrate") == 0) {
+            cmd.args.set.bitrate = atoi(argv[3]);
+            ret = ioctl(f, IOC_RTWLAN_BITRATE, &cmd);
+        }
+        else if(strcmp(argv[2], "txpower") == 0) {
+            cmd.args.set.txpower = atoi(argv[3]);
+            ret = ioctl(f, IOC_RTWLAN_TXPOWER, &cmd);
+        }
+        else if(strcmp(argv[2], "retry") == 0) {
+            cmd.args.set.retry = atoi(argv[3]);
+            ret = ioctl(f, IOC_RTWLAN_RETRY, &cmd);
+        }
+        else if(strcmp(argv[2], "regread") == 0) {
+            sscanf(argv[3], "%x", &cmd.args.reg.address);
+            ret = ioctl(f, IOC_RTWLAN_REGREAD, &cmd);
+            printf("rtiwconfig: regread: address=%3x value=%8x\n", cmd.args.reg.address, cmd.args.reg.value);
+        }
+        else if(strcmp(argv[2], "bbpread") == 0) {
+            sscanf(argv[3], "%x", &cmd.args.reg.address);
+            ret = ioctl(f, IOC_RTWLAN_BBPREAD, &cmd);
+            printf("rtiwconfig: bbpread: address=%3x value=%4x\n", cmd.args.reg.address, cmd.args.reg.value); 
+        }
+        else if(strcmp(argv[2], "dropbcast") == 0) {
+            cmd.args.set.dropbcast = atoi(argv[3]);
+            ret = ioctl(f, IOC_RTWLAN_DROPBCAST, &cmd);
+        }
+        else if(strcmp(argv[2], "dropmcast") == 0) {
+            cmd.args.set.dropmcast = atoi(argv[3]);
+            ret = ioctl(f, IOC_RTWLAN_DROPMCAST, &cmd);
+        }
+        else if(strcmp(argv[2], "mode") == 0) {
+            if(strcmp(argv[3], "raw") == 0)
+                cmd.args.set.mode = RTWLAN_TXMODE_RAW;
+            else if(strcmp(argv[3], "ack") == 0)
+                cmd.args.set.mode = RTWLAN_TXMODE_ACK;
+            else if(strcmp(argv[3], "mcast") == 0)
+                cmd.args.set.mode = RTWLAN_TXMODE_MCAST;
+            ret = ioctl(f, IOC_RTWLAN_TXMODE, &cmd);
+        }
+        else if(strcmp(argv[2], "bbpsens") == 0) {
+            cmd.args.set.bbpsens = atoi(argv[3]);
+            ret = ioctl(f, IOC_RTWLAN_BBPSENS, &cmd);
+        }
+        else if(strcmp(argv[2], "autoresponder") == 0) {
+            cmd.args.set.autoresponder = atoi(argv[3]);
+            ret = ioctl(f, IOC_RTWLAN_AUTORESP, &cmd);
+        }
+        else
+            help();
+        break;
+    case 5:
+        if(strcmp(argv[2], "regwrite") == 0) {
+            sscanf(argv[3], "%x", &cmd.args.reg.address);
+            printf("regwrite: address=%x\n", cmd.args.reg.address);
+            sscanf(argv[4], "%x", &cmd.args.reg.value);
+            printf("regwrite: value=%x\n", cmd.args.reg.value);
+            ret = ioctl(f, IOC_RTWLAN_REGWRITE, &cmd);
+        }
+        else if(strcmp(argv[2], "bbpwrite") == 0) {
+            sscanf(argv[3], "%x", &cmd.args.reg.address);
+            sscanf(argv[4], "%x", &cmd.args.reg.value);
+            ret = ioctl(f, IOC_RTWLAN_BBPWRITE, &cmd);
+        }
+        break;
+    default:
+        help();
+    }
+
+    if(ret) {
+        perror("ioctl");
+        exit(1);
+    }
+  
+    return ret;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/net/rtnet.conf.in b/kernel/xenomai-v3.2.4/utils/net/rtnet.conf.in
new file mode 100644
index 0000000..12335d2
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/rtnet.conf.in
@@ -0,0 +1,79 @@
+# This file is usually located in <PREFIX>/etc/rtnet.conf
+# Please adapt it to your system.
+# This configuration file is used with the rtnet script.
+
+# RTnet installation path
+prefix="@prefix@"
+exec_prefix="@exec_prefix@"
+RTNET_MOD="/lib/modules/`uname -r`/kernel/drivers/xenomai/net"
+RTIFCONFIG="@sbindir@/rtifconfig"
+RTCFG="@sbindir@/rtcfg"
+TDMACFG="@sbindir@/tdmacfg"
+
+# Module suffix: ".o" for 2.4 kernels, ".ko" for later versions
+MODULE_EXT=".ko"
+
+
+
+# RT-NIC driver
+RT_DRIVER="rt_eepro100"
+RT_DRIVER_OPTIONS=""
+
+# PCI addresses of RT-NICs to claim (format: 0000:00:00.0)
+#   If both Linux and RTnet drivers for the same hardware are loaded, this
+#   list instructs the start script to rebind the given PCI devices, detaching
+#   from their Linux driver, attaching it to the RT driver above. Example:
+#   REBIND_RT_NICS="0000:00:19.0 0000:01:1d.1"
+REBIND_RT_NICS=""
+
+# IP address and netmask of this station
+#   The TDMA_CONFIG file overrides these parameters for masters and backup
+#   masters. Leave blank if you do not use IP addresses or if this station is
+#   intended to retrieve its IP from the master based on its MAC address.
+IPADDR="10.0.0.1"
+NETMASK=""
+
+# Start realtime loopback device ("yes" or "no")
+RT_LOOPBACK="yes"
+
+# Use the following RTnet protocol drivers
+RT_PROTOCOLS="udp packet"
+
+# Start capturing interface ("yes" or "no")
+RTCAP="no"
+
+
+
+# Common RTcfg stage 2 config data (master mode only)
+#   The TDMA_CONFIG file overrides this parameter.
+STAGE_2_SRC=""
+
+# Stage 2 config data destination file (slave mode only)
+STAGE_2_DST=""
+
+# Command to be executed after stage 2 phase (slave mode only)
+STAGE_2_CMDS=""
+
+
+
+# TDMA mode of the station ("master" or "slave")
+#   Start backup masters in slave mode, it will then be switched to master
+#   mode automatically during startup.
+TDMA_MODE="master"
+
+
+# Master parameters
+
+# Simple setup: List of TDMA slaves
+TDMA_SLAVES="10.0.0.2 10.0.0.3 10.0.0.4"
+
+# Simple setup: Cycle time in microsecond
+TDMA_CYCLE="5000"
+
+# Simple setup: Offset in microsecond between TDMA slots
+TDMA_OFFSET="200"
+
+# Advanced setup: Config file containing all TDMA station parameters
+#   To use this mode, uncomment the following line and disable the
+#   three master parameters above (SLAVES, CYCLE, and OFFSET).
+#TDMA_CONFIG="@sysconfdir@/tdma.conf"
diff --git a/kernel/xenomai-v3.2.4/utils/net/rtnet.in b/kernel/xenomai-v3.2.4/utils/net/rtnet.in
new file mode 100644
index 0000000..f81a7bb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/rtnet.in
@@ -0,0 +1,371 @@
+#!/bin/sh
+#
+# script for loading/unloading RTnet, RTmac/TMDA, and RTcap
+#
+
+prefix="@prefix@"
+RTNETCFG="@sysconfdir@/rtnet.conf"
+
+
+debug_func() {
+    echo "$*"
+    eval $*
+}
+
+usage() {
+    cat << EOF
+Usage:
+    $0 [-cf <config-file>] [-v] [-c] {start|stop}
+	Start or stop station according to configuration file
+
+    $0 [-cf <config-file>] [-v] [-c] master <slave_ip1> [<slave_ip2> ...]
+	Start station as master for given list of slaves
+
+    $0 [-cf <config-file>] [-v] capture
+	Start only passive realtime capturing
+
+    The additional switch -v enables verbose output.
+    The additional switch -c enables capturing mode to allow use of a network
+	analyzer such as Wireshark (if rtnet was built with --enable-rtcap).
+EOF
+}
+
+init_rtnet() {
+    modprobe rtnet >/dev/null || exit 1
+    modprobe rtipv4 >/dev/null || exit 1
+    modprobe $RT_DRIVER $RT_DRIVER_OPTIONS >/dev/null || exit 1
+
+    for dev in $REBIND_RT_NICS; do
+	if [ -d /sys/bus/pci/devices/$dev/driver ]; then
+	    echo $dev > /sys/bus/pci/devices/$dev/driver/unbind
+	fi
+	echo $dev > /sys/bus/pci/drivers/$RT_DRIVER/bind
+    done
+
+    for PROTOCOL in $RT_PROTOCOLS; do
+	modprobe rt$PROTOCOL >/dev/null || exit 1
+    done
+
+    if [ $RT_LOOPBACK = "yes" ]; then
+	modprobe rt_loopback >/dev/null || exit 1
+    fi
+
+    if [ $RTCAP = "yes" ]; then
+	modprobe rtcap >/dev/null || exit 1
+    fi
+
+    if [ $RT_LOOPBACK = "yes" ]; then
+	$RTIFCONFIG rtlo up 127.0.0.1
+    fi
+
+    if [ $RTCAP = "yes" ]; then
+	ifconfig rteth0 up
+	ifconfig rteth0-mac up
+	if [ $RT_LOOPBACK = "yes" ]; then
+	    ifconfig rtlo up
+	fi
+    fi
+
+    modprobe rtcfg >/dev/null
+    modprobe rtmac >/dev/null
+    modprobe tdma >/dev/null
+}
+
+submit_cfg() {
+    case "$STATION_TYPE" in
+	master)
+	    $RTIFCONFIG rteth0 up $STATION_IP
+
+	    $TDMACFG rteth0 master $TDMA_CYCLE
+	    eval "$TDMA_SLOTS"
+
+	    IPADDR=$STATION_IP
+	    NETMASK_OPT=
+	    ;;
+	slave)
+	    if [ ! "$STATION_IP" = "" ]; then
+		if [ ! "$STATION_MAC" = "" ]; then
+		    RTCFG_CLIENT="$STATION_IP -hw $STATION_MAC"
+		else
+		    RTCFG_CLIENT="$STATION_IP"
+		fi
+	    else
+		RTCFG_CLIENT="$STATION_MAC"
+	    fi
+
+	    ADD_STAGE1_CMDS="ifconfig vnic0 up $STATION_IP"
+
+	    echo "$TDMA_SLOTS$ADD_STAGE1_CMDS" | \
+		$RTCFG rteth0 add $RTCFG_CLIENT -stage1 -
+	    ;;
+	backup-master)
+	    if [ ! "$STATION_IP" = "" ]; then
+		if [ ! "$STATION_MAC" = "" ]; then
+		    RTCFG_CLIENT="$STATION_IP -hw $STATION_MAC"
+		else
+		    RTCFG_CLIENT="$STATION_IP"
+		fi
+	    else
+		RTCFG_CLIENT="$STATION_MAC"
+	    fi
+
+	    ADD_STAGE1_CMDS="ifconfig vnic0 up $STATION_IP"
+
+	    STAGE_2_OPT=
+	    if [ ! "$STATION_STAGE_2_SRC" = "" ]; then
+		STAGE_2_OPT="-stage2 $STATION_STAGE_2_SRC"
+	    fi
+
+	    echo "\$TDMACFG rteth0 detach;\$TDMACFG rteth0 master $TDMA_CYCLE -b $TDMA_BACKUP_OFFS;$TDMA_SLOTS$ADD_STAGE1_CMDS" | \
+		$RTCFG rteth0 add $RTCFG_CLIENT -stage1 - $STAGE_2_OPT
+	    ;;
+    esac
+
+    STATION_TYPE=
+    STATION_IP=
+    STATION_MAC=
+    STATION_STAGE_2_SRC=
+    TDMA_SLOTS=
+    TDMA_BACKUP_OFFS=
+}
+
+start_master() {
+    $RTCFG rteth0 server
+
+    STAGE_2_OPT=
+    if [ ! "$STAGE_2_SRC" = "" ]; then
+	STAGE_2_OPT="-stage2 $STAGE_2_SRC"
+    fi
+
+    if [ ! "$TDMA_SLAVES" = "" ]; then
+	# Simple setup:
+	#   Sync / Master Slot / + TDMA_OFFSET us / Slave 1 /
+	#   + TDMA_OFFSET us / Slave 2 / + TDMA_OFFSET us / ... / Slave n
+
+	$RTIFCONFIG rteth0 up $IPADDR $NETMASK_OPT
+
+	$TDMACFG rteth0 master $TDMA_CYCLE
+	$TDMACFG rteth0 slot 0 0
+
+	OFFSET=$TDMA_OFFSET
+	for SLAVE in $TDMA_SLAVES; do
+	    echo "\$TDMACFG rteth0 slot 0 $OFFSET;ifconfig vnic0 up \$IPADDR \$NETMASK_OPT" | \
+		$RTCFG rteth0 add $SLAVE -stage1 - $STAGE_2_OPT
+	    OFFSET=$(($OFFSET+$TDMA_OFFSET))
+	done
+    else
+	# Get setup from TDMA_CONFIG file:
+	#
+	# master:
+	# [ip 1.2.3.4]
+	# cycle <cycle_in_us>
+	# slot <id> <offset_in_us> [<phasing>/<period> [<size> [<joint_slot_id>]]]
+	# slot ...
+	#
+	# slave:
+	# ip 1.2.3.4
+	# mac AA:BB:CC:DD:EE:FF
+	# [stage2 <file>]
+	# slot ...
+	#
+	# slave:
+	# ip 1.2.3.4
+	# [stage2 <file>]
+	# slot ...
+	#
+	# slave:
+	# mac AA:BB:CC:DD:EE:FF
+	# [stage2 <file>]
+	# slot ...
+	#
+	# backup-master:
+	# ip 1.2.3.4    (or ip+mac or mac only, just like slaves)
+	# backup-slot <offset_in_us>
+	# [stage2 <file>]
+	# slot ...
+	#
+
+	if [ ! -r $TDMA_CONFIG ]; then
+	    echo "Could not read $TDMA_CONFIG"
+	    exit 1
+	fi
+
+	while read ARG1 ARG2 ARG3 ARG4 ARG5 ARG6; do
+	    case "$ARG1" in
+		"master:")
+		    submit_cfg
+		    STATION_TYPE=master
+		    ;;
+		"cycle")
+		    TDMA_CYCLE="$ARG2"
+		    ;;
+		"backup-master:")
+		    submit_cfg
+		    STATION_TYPE=backup-master
+		    ;;
+		"backup-offset")
+		    TDMA_BACKUP_OFFS="$ARG2"
+		    ;;
+		"slave:")
+		    submit_cfg
+		    STATION_TYPE=slave
+		    ;;
+		"ip")
+		    STATION_IP="$ARG2"
+		    ;;
+		"mac")
+		    STATION_MAC="$ARG2"
+		    ;;
+		"stage2")
+		    STATION_STAGE_2="$ARG2"
+		    ;;
+		"slot")
+		    TDMA_SLOTS="$TDMA_SLOTS\$TDMACFG rteth0 slot $ARG2 $ARG3"
+		    if [ ! "$ARG4" = "" ]; then
+			TDMA_SLOTS="$TDMA_SLOTS -p $ARG4"
+		    fi
+		    if [ ! "$ARG5" = "" ]; then
+			TDMA_SLOTS="$TDMA_SLOTS -s $ARG5"
+		    fi
+		    if [ ! "$ARG6" = "" ]; then
+			TDMA_SLOTS="$TDMA_SLOTS -j $ARG6"
+		    fi
+		    TDMA_SLOTS="$TDMA_SLOTS;"
+		    ;;
+	    esac
+	done < $TDMA_CONFIG
+	submit_cfg
+    fi
+
+    ifconfig vnic0 up $IPADDR $NETMASK_OPT
+
+    echo -n "Waiting for all slaves..."
+    $RTCFG rteth0 wait
+    $RTCFG rteth0 ready
+    echo
+}
+
+
+if [ "$1" = "-cf" ]; then
+    RTNETCFG="$2"
+    shift 2
+fi
+
+if [ -r $RTNETCFG ]; then
+    . $RTNETCFG
+else
+    echo "Could not read $RTNETCFG"
+    exit 1
+fi
+
+if [ "$1" = "-v" ]; then
+    echo "Turning on verbose mode"
+    RTIFCONFIG="debug_func $RTIFCONFIG"
+    RTCFG="debug_func $RTCFG"
+    TDMACFG="debug_func $TDMACFG"
+    shift 1
+fi
+
+if [ "$1" = "-c" ]; then
+    RTCAP="yes"
+    shift 1
+fi
+
+NETMASK_OPT=
+if [ ! "$NETMASK" = "" ]; then
+    NETMASK_OPT="netmask $NETMASK"
+fi
+
+
+case "$1" in
+    start)
+	init_rtnet
+
+	if [ $TDMA_MODE = "master" ]; then
+	    start_master
+	else
+	    $TDMACFG rteth0 slave
+
+	    $RTIFCONFIG rteth0 up $IPADDR $NETMASK_OPT
+
+	    echo -n "Stage 1: searching for master..."
+	    eval "`$RTCFG rteth0 client -c`"
+	    echo
+
+	    echo -n "Stage 2: waiting for other slaves..."
+	    if [ ! "$STAGE_2_DST" = "" ]; then
+		$RTCFG rteth0 announce -f $STAGE_2_DST
+		echo
+		eval "$STAGE_2_CMDS"
+	    else
+		$RTCFG rteth0 announce
+		echo
+	    fi
+
+	    echo -n "Stage 3: waiting for common setup completion..."
+	    $RTCFG rteth0 ready
+	    echo
+	fi
+	;;
+
+    stop)
+	ifconfig vnic0 down 2>/dev/null
+	ifconfig rteth0 down 2>/dev/null
+	ifconfig rteth0-mac down 2>/dev/null
+	ifconfig rtlo down 2>/dev/null
+
+	$RTIFCONFIG rteth0 down 2>/dev/null
+	$RTIFCONFIG rtlo down 2>/dev/null
+
+	rmmod tdma rtmac rtcfg rtcap rt_loopback $RT_DRIVER rtpacket rtudp rttcp rtipv4 rtnet 2>/dev/null
+
+	for dev in $REBIND_RT_NICS; do
+	    echo 1 > /sys/bus/pci/devices/$dev/remove
+	done
+	if [ ! "$REBIND_RT_NICS" = "" ]; then
+	    sleep 1
+	    echo 1 > /sys/bus/pci/rescan
+	fi
+	;;
+
+    master)
+	shift
+	init_rtnet
+	TDMA_SLAVES=$*
+	start_master
+	;;
+
+    capture)
+	modprobe rtnet >/dev/null || exit 1
+	modprobe $RT_DRIVER $RT_DRIVER_OPTIONS >/dev/null || exit 1
+	modprobe rtcap >/dev/null || exit 1
+	$RTIFCONFIG rteth0 up promisc
+	ifconfig rteth0 up
+	ifconfig rteth0-mac up
+	;;
+
+    loopback)
+	modprobe rtnet >/dev/null || exit 1
+	modprobe rtipv4 >/dev/null || exit 1
+
+	for PROTOCOL in $RT_PROTOCOLS; do
+	    modprobe rt$PROTOCOL >/dev/null || exit 1
+	done
+
+	modprobe rt_loopback >/dev/null || exit 1
+
+	if [ $RTCAP = "yes" ]; then
+	    modprobe rtcap >/dev/null || exit 1
+	fi
+
+	$RTIFCONFIG rtlo up 127.0.0.1
+
+	if [ $RTCAP = "yes" ]; then
+	    ifconfig rtlo up
+	fi
+	;;
+
+    *)
+	usage
+	exit 1
+esac
diff --git a/kernel/xenomai-v3.2.4/utils/net/rtping.c b/kernel/xenomai-v3.2.4/utils/net/rtping.c
new file mode 100644
index 0000000..bdaf6cb
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/rtping.c
@@ -0,0 +1,183 @@
+/***
+ *
+ *  tools/rtping.c
+ *  sends real-time ICMP echo requests
+ *
+ *  rtnet - real-time networking subsystem
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <stdio.h>
+#include <stdlib.h>
+#include <errno.h>
+#include <fcntl.h>
+#include <signal.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <sys/ioctl.h>
+#include <sys/time.h>
+#include <netinet/in.h>
+#include <arpa/inet.h>
+
+#include <ipv4_chrdev.h>
+
+
+int             f;
+struct ipv4_cmd cmd;
+struct in_addr  addr;
+unsigned int    count    = 0;
+int             delay    = 1000;
+unsigned int    sent     = 0;
+unsigned int    received = 0;
+float           wc_rtt   = 0;
+
+
+void help(void)
+{
+    fprintf(stderr, "Usage:\n"
+        "\trtping [-c count] [-i interval] [-s packetsize] <addr>\n"
+        );
+
+    exit(1);
+}
+
+
+
+int getintopt(int argc, int pos, char *argv[], int min)
+{
+    int result;
+
+
+    if (pos >= argc)
+        help();
+    if ((sscanf(argv[pos], "%u", &result) != 1) || (result < min)) {
+        fprintf(stderr, "invalid parameter: %s %s\n", argv[pos-1], argv[pos]);
+        exit(1);
+    }
+
+    return result;
+}
+
+
+
+void print_statistics()
+{
+    printf("\n--- %s rtping statistics ---\n"
+           "%d packets transmitted, %d received, %d%% packet loss\n"
+           "worst case rtt = %.1f us\n",
+           inet_ntoa(addr), sent, received, 100 - ((received * 100) / sent),
+           wc_rtt);
+    exit(0);
+}
+
+
+
+void terminate(int signal)
+{
+    print_statistics();
+}
+
+
+
+void ping(int signal)
+{
+    int             ret;
+    struct in_addr  from;
+    float           rtt;
+
+
+    cmd.args.ping.ip_addr = addr.s_addr;
+    sent++;
+
+    ret = ioctl(f, IOC_RT_PING, &cmd);
+    if (ret < 0) {
+        if (errno == ETIME)
+            goto done;
+        perror("ioctl");
+        exit(1);
+    }
+
+    received++;
+    from.s_addr = cmd.args.ping.ip_addr;
+    rtt = (float)cmd.args.ping.rtt / (float)1000;
+    if (rtt > wc_rtt)
+        wc_rtt = rtt;
+    printf("%d bytes from %s: icmp_seq=%d time=%.1f us\n",
+           ret, inet_ntoa(from), cmd.args.ping.sequence, rtt);
+
+  done:
+    cmd.args.ping.sequence++;
+    if (count > 0 && sent == count)
+        print_statistics();
+}
+
+
+
+int main(int argc, char *argv[])
+{
+    const char          rtnet_dev[] = "/dev/rtnet";
+    struct timeval      time;
+    struct itimerval    timer = {{0, 0}, {0, 1}};
+    int                 i;
+
+
+    if (argc < 2)
+        help();
+
+    gettimeofday(&time, NULL);
+    cmd.args.ping.id       = time.tv_usec & 0xFFFF;
+    cmd.args.ping.sequence = 1;
+    cmd.args.ping.msg_size = 56;
+    cmd.args.ping.timeout  = 500;
+
+    for (i = 1; i < argc-1; i++) {
+        if (strcmp(argv[i], "-c") == 0)
+            count = getintopt(argc, ++i, argv, 1);
+        else if (strcmp(argv[i], "-i") == 0)
+            delay = getintopt(argc, ++i, argv, 1);
+        else if (strcmp(argv[i], "-s") == 0) {
+            cmd.args.ping.msg_size = getintopt(argc, ++i, argv, 0);
+            if (cmd.args.ping.msg_size > 1472)
+                cmd.args.ping.msg_size = 1472;
+        } else
+            help();
+    }
+
+    if (!inet_aton(argv[i], &addr))
+        help();
+
+    f = open(rtnet_dev, O_RDWR);
+    if (f < 0) {
+        perror(rtnet_dev);
+        exit(1);
+    }
+
+    printf("Real-time PING %s %u(%u) bytes of data.\n",
+           inet_ntoa(addr), cmd.args.ping.msg_size,
+           cmd.args.ping.msg_size + 28);
+
+    signal(SIGINT, terminate);
+    signal(SIGALRM, ping);
+    timer.it_interval.tv_sec  = delay / 1000;
+    timer.it_interval.tv_usec = (delay % 1000) * 1000;
+    setitimer(ITIMER_REAL, &timer, NULL);
+
+    while (1) pause();
+}
diff --git a/kernel/xenomai-v3.2.4/utils/net/rtroute.c b/kernel/xenomai-v3.2.4/utils/net/rtroute.c
new file mode 100644
index 0000000..ef6b56e
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/rtroute.c
@@ -0,0 +1,393 @@
+/***
+ *
+ *  tools/rtroute.c
+ *  manages IP host and network routes for RTnet
+ *
+ *  rtnet - real-time networking subsystem
+ *  Copyright (C) 2004 Jan Kiszka <jan.kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <netinet/in.h>
+#include <netinet/ether.h>
+#include <arpa/inet.h>
+
+#include <ipv4_chrdev.h>
+
+
+int             f;
+struct ipv4_cmd cmd;
+struct in_addr  addr;
+
+
+/* help gcc a bit... */
+void help(void) __attribute__((noreturn));
+
+void help(void)
+{
+    fprintf(stderr, "Usage:\n"
+        "\trtroute\n"
+        "\trtroute solicit <addr> dev <dev>\n"
+        "\trtroute add <addr> <hwaddr> dev <dev>\n"
+        "\trtroute add <addr> netmask <mask> gw <gw-addr>\n"
+        "\trtroute del <addr> [dev <dev>]\n"
+        "\trtroute del <addr> netmask <mask>\n"
+        "\trtroute get <addr> [dev <dev>]\n"
+        "\trtroute -f <host-routes-file>\n"
+        );
+
+    exit(1);
+}
+
+
+
+void print_routes(void)
+{
+    char        buf[4096];
+    int         proc;
+    size_t      size;
+    const char  host_route[] = "/proc/rtnet/ipv4/host_route";
+    const char  net_route[]  = "/proc/rtnet/ipv4/net_route";
+
+
+    if ((proc = open(host_route, O_RDONLY)) < 0) {
+        perror(host_route);
+        exit(1);
+    }
+
+    printf("Host Routing Table\n");
+    while ((size = read(proc, buf, sizeof(buf))) > 0)
+        write(STDOUT_FILENO, buf, size);
+
+    close(f);
+
+    if ((proc = open(net_route, O_RDONLY)) < 0) {
+        if (errno == ENOENT) {
+            /* Network routing is not available */
+            exit(0);
+        }
+        perror(net_route);
+        exit(1);
+    }
+
+    printf("\nNetwork Routing Table\n");
+    while ((size = read(proc, buf, sizeof(buf))) > 0)
+        write(STDOUT_FILENO, buf, size);
+
+    close(f);
+
+    exit(0);
+}
+
+
+
+void route_solicit(int argc, char *argv[])
+{
+    int ret;
+
+
+    if ((argc != 5) || (strcmp(argv[3], "dev") != 0))
+        help();
+
+    strncpy(cmd.head.if_name, argv[4], IFNAMSIZ);
+    cmd.args.solicit.ip_addr = addr.s_addr;
+
+    ret = ioctl(f, IOC_RT_HOST_ROUTE_SOLICIT, &cmd);
+    if (ret < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void route_add(int argc, char *argv[])
+{
+    struct ether_addr   dev_addr;
+    int                 ret;
+
+
+    if (argc == 6) {
+        /*** add host route ***/
+        if ((ether_aton_r(argv[3], &dev_addr) == NULL) ||
+            (strcmp(argv[4], "dev") != 0))
+            help();
+
+        cmd.args.addhost.ip_addr = addr.s_addr;
+        memcpy(cmd.args.addhost.dev_addr, dev_addr.ether_addr_octet,
+               sizeof(dev_addr.ether_addr_octet));
+        strncpy(cmd.head.if_name, argv[5], IFNAMSIZ);
+
+        ret = ioctl(f, IOC_RT_HOST_ROUTE_ADD, &cmd);
+    } else if (argc == 7) {
+        /*** add network route ***/
+        if ((strcmp(argv[3], "netmask") != 0) || (strcmp(argv[5], "gw") != 0))
+            help();
+
+        cmd.args.addnet.net_addr = addr.s_addr;
+        if (!inet_aton(argv[4], &addr))
+            help();
+        cmd.args.addnet.net_mask = addr.s_addr;
+        if (!inet_aton(argv[6], &addr))
+            help();
+        cmd.args.addnet.gw_addr = addr.s_addr;
+
+        ret = ioctl(f, IOC_RT_NET_ROUTE_ADD, &cmd);
+    } else
+        help();
+
+    if (ret < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+
+    exit(0);
+}
+
+
+
+void invalid_line_format(int line, char *file)
+{
+    fprintf(stderr, "error on line %u of file %s, expected file format:\n"
+            "# comment\n"
+            "<addr> <hwaddr> <dev>\n"
+            "...\n", line, file);
+}
+
+
+
+void route_listadd(char *name)
+{
+    FILE                *fp;
+    int                 line = 0;
+    int                 argc = 0;
+    int                 ret;
+    struct ether_addr   dev_addr;
+    char                buf[100];
+    char                *sp;
+    char                *args[4];
+    const char          space[] = " \t";
+
+
+    /*** try to open file ***/
+    fp = fopen(name, "r");
+    if (!fp) {
+        ret = errno;
+        fprintf(stderr, "opening file %s", name);
+        perror(NULL);
+        exit(1);
+    }
+
+    /*** fill buffer from file and add route ***/
+    while (fgets(buf, sizeof(buf), fp)) {
+        line++;
+
+        /* find newline char and make it end of string */
+        sp = strchr(buf, '\n');
+        if (sp)
+            *sp = '\0';
+
+        /* ignore comments and empty lines */
+        if ((buf[0] == '#') || (buf[0] == '\0'))
+            continue;
+
+        /* split string into tokens */
+        argc = 0;
+        args[argc] = strtok(buf, space);
+        do {
+            if (++argc > 3)
+                break;
+            args[argc] = strtok(NULL, space);
+        } while (args[argc]);
+
+        /* wrong number of arguments? */
+        if (argc != 3) {
+            invalid_line_format(line, name);
+            continue;
+        }
+
+        /*** check data ***/
+
+        /* check MAC */
+        if (ether_aton_r(args[1], &dev_addr) == NULL) {
+            invalid_line_format(line, name);
+            continue;
+        }
+
+        /* check IP */
+        if (!inet_aton(args[0], &addr)) {
+            invalid_line_format(line, name);
+            continue;
+        }
+
+        /*** turn it all into a cmd for rtnet and execute ***/
+        cmd.args.addhost.ip_addr = addr.s_addr;
+        memcpy(cmd.args.addhost.dev_addr, dev_addr.ether_addr_octet,
+               sizeof(dev_addr.ether_addr_octet));
+
+        /* use device <dev> */
+        strncpy(cmd.head.if_name, args[2], IFNAMSIZ);
+
+        ret = ioctl(f, IOC_RT_HOST_ROUTE_ADD, &cmd);
+
+        if (ret < 0) {
+            perror("ioctl");
+            exit(1);
+        }
+
+    }
+    fclose(fp);
+
+    exit(0);
+}
+
+
+
+void route_delete(int argc, char *argv[])
+{
+    int ret;
+
+
+    if (argc == 3) {
+        /*** delete host route ***/
+        cmd.args.delhost.ip_addr = addr.s_addr;
+
+        ret = ioctl(f, IOC_RT_HOST_ROUTE_DELETE, &cmd);
+    } else if (argc == 5) {
+        /*** delete device specific route ***/
+        if (strcmp(argv[3], "dev") == 0) {
+            cmd.args.delhost.ip_addr = addr.s_addr;
+            strncpy(cmd.head.if_name, argv[4], IFNAMSIZ);
+            ret = ioctl(f, IOC_RT_HOST_ROUTE_DELETE_DEV, &cmd);
+        }
+        /*** delete network route ***/
+        else if (strcmp(argv[3], "netmask") == 0) {
+            cmd.args.delnet.net_addr = addr.s_addr;
+            if (!inet_aton(argv[4], &addr))
+                help();
+            cmd.args.delnet.net_mask = addr.s_addr;
+
+            ret = ioctl(f, IOC_RT_NET_ROUTE_DELETE, &cmd);
+        }
+        else
+            help();
+    } else
+        help();
+
+    if (ret < 0) {
+        if (errno == ENOENT)
+            fprintf(stderr, "Specified route not found\n");
+        else
+            perror("ioctl");
+        exit(1);
+    }
+
+    exit(0);
+}
+
+
+
+void route_get(int argc, char *argv[])
+{
+    int ret;
+
+
+    if (argc == 3) {
+        /*** get host route ***/
+        cmd.args.gethost.ip_addr = addr.s_addr;
+
+        ret = ioctl(f, IOC_RT_HOST_ROUTE_GET, &cmd);
+    } else if (argc == 5) {
+        /*** get device specific route ***/
+        if (strcmp(argv[3], "dev") == 0) {
+            cmd.args.delhost.ip_addr = addr.s_addr;
+            strncpy(cmd.head.if_name, argv[4], IFNAMSIZ);
+            ret = ioctl(f, IOC_RT_HOST_ROUTE_GET_DEV, &cmd);
+        }
+        else
+            help();
+    } else
+        help();
+
+    if (ret >= 0) {
+        unsigned char *p = cmd.args.gethost.dev_addr;
+        printf("Destination\tHW Address\t\tDevice\n"
+               "%s\t%02x:%02x:%02x:%02x:%02x:%02x\t%s\n", argv[2],
+               p[0], p[1], p[2] , p[3], p[4], p[5], cmd.head.if_name);
+    } else {
+        if (errno == ENOENT) {
+            fprintf(stderr, "No route for host %s", argv[2]);
+            if (argc == 5)
+                fprintf(stderr, "on device %s", argv[4]);
+            fprintf(stderr, " found\n");
+        } else
+            perror("ioctl");
+        exit(1);
+    }
+
+    exit(0);
+}
+
+
+
+int main(int argc, char *argv[])
+{
+    const char  rtnet_dev[] = "/dev/rtnet";
+
+
+    if (argc == 1)
+        print_routes();
+
+    if ((strcmp(argv[1], "--help") == 0) || (argc < 3))
+        help();
+
+    f = open(rtnet_dev, O_RDWR);
+    if (f < 0) {
+        perror(rtnet_dev);
+        exit(1);
+    }
+
+    /* add host routes from file? */
+    if (strcmp(argv[1], "-f") == 0)
+        route_listadd(argv[2]);
+
+    /* second argument is now always an IP address */
+    if (!inet_aton(argv[2], &addr))
+        help();
+
+    if (strcmp(argv[1], "solicit") == 0)
+        route_solicit(argc, argv);
+    if (strcmp(argv[1], "add") == 0)
+        route_add(argc, argv);
+    if (strcmp(argv[1], "del") == 0)
+        route_delete(argc, argv);
+    if (strcmp(argv[1], "get") == 0)
+        route_get(argc, argv);
+
+    help();
+
+    return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/net/tdma.conf b/kernel/xenomai-v3.2.4/utils/net/tdma.conf
new file mode 100644
index 0000000..3a343c1
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/tdma.conf
@@ -0,0 +1,39 @@
+#
+# Examplary TDMA configuration file
+#
+
+# Primary master
+
+master:
+ip 10.0.0.1
+cycle 5000
+slot 0 0
+slot 1 1000
+
+
+# Backup master
+#  Cycle is defined by the primary master
+
+backup-master:
+ip 10.0.0.2
+backup-offset 200
+slot 0 400
+
+
+# Slave A
+#  MAC is unknown, slave will be pre-configured to the given IP
+
+slave:
+ip 10.0.0.3
+slot 0 2000
+slot 1 2200 1/2
+
+
+# Slave B
+#  IP is assigned to the slave via its known MAC address
+
+slave:
+ip 10.0.0.4
+mac 00:12:34:56:AA:FF
+slot 0 2400
+slot 1 2200 2/2
diff --git a/kernel/xenomai-v3.2.4/utils/net/tdmacfg.c b/kernel/xenomai-v3.2.4/utils/net/tdmacfg.c
new file mode 100644
index 0000000..b1937b3
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/net/tdmacfg.c
@@ -0,0 +1,332 @@
+/***
+ *
+ *  tools/tdmacfg.c
+ *  Configuration tool for the RTmac/TDMA discipline
+ *
+ *  RTmac - real-time networking media access control subsystem
+ *  Copyright (C) 2002      Marc Kleine-Budde <kleine-budde@gmx.de>,
+ *                2003-2005 Jan Kiszka <Jan.Kiszka@web.de>
+ *
+ *  This program is free software; you can redistribute it and/or modify
+ *  it under the terms of the GNU General Public License as published by
+ *  the Free Software Foundation; either version 2 of the License, or
+ *  (at your option) any later version.
+ *
+ *  This program is distributed in the hope that it will be useful,
+ *  but WITHOUT ANY WARRANTY; without even the implied warranty of
+ *  MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ *  GNU General Public License for more details.
+ *
+ *  You should have received a copy of the GNU General Public License
+ *  along with this program; if not, write to the Free Software
+ *  Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ */
+
+#include <errno.h>
+#include <fcntl.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <string.h>
+#include <unistd.h>
+#include <sys/ioctl.h>
+#include <sys/stat.h>
+#include <arpa/inet.h>
+
+#include <tdma_chrdev.h>
+
+
+static int                  f;
+static struct tdma_config   tdma_cfg;
+
+
+static void help(void)
+{
+    fprintf(stderr, "Usage:\n"
+        "\ttdmacfg <dev> master <cycle_period> [-b <backup_offset>]\n"
+        "\t        [-c calibration_rounds] [-i max_slot_id]\n"
+        "\t        [-m max_calibration_requests]\n"
+        "\ttdmacfg <dev> slave [-c calibration_rounds] [-i max_slot_id]\n"
+        "\ttdmacfg <dev> slot <id> [<offset> [-p <phasing>/<period>] "
+            "[-s <size>]\n"
+        "\t         [-j <joint_slot_id>] [-l calibration_log_file]\n"
+        "\t         [-t calibration_timeout]]\n"
+        "\ttdmacfg <dev> detach\n");
+
+    exit(1);
+}
+
+
+
+int getintopt(int argc, int pos, char *argv[], int min)
+{
+    int result;
+
+
+    if (pos >= argc)
+        help();
+    if ((sscanf(argv[pos], "%i", &result) != 1) || (result < min)) {
+        fprintf(stderr, "invalid parameter: %s %s\n", argv[pos-1], argv[pos]);
+        exit(1);
+    }
+
+    return result;
+}
+
+
+
+void write_calibration_log(char *log_filename, unsigned int rounds,
+                           __u64 *cal_results)
+{
+    char    str_buf[32];
+    int     log_file;
+    int     i;
+    int     r;
+
+
+    log_file = open(log_filename, O_CREAT | O_WRONLY | O_TRUNC,
+                    S_IREAD | S_IWRITE);
+    if (log_file < 0) {
+        perror("create output file");
+        free(cal_results);
+        exit(1);
+    }
+
+    for (i = rounds-1; i >= 0; i--) {
+        r = sprintf(str_buf, "%llu\n", (unsigned long long)cal_results[i]);
+        if (write(log_file, str_buf, r) < 0) {
+            perror("write output file");
+            free(cal_results);
+            exit(1);
+        }
+    }
+
+    close(log_file);
+    free(cal_results);
+}
+
+
+
+void do_master(int argc, char *argv[])
+{
+    int     r;
+    int     i;
+
+
+    if (argc < 4)
+        help();
+
+    if ((sscanf(argv[3], "%u", &r) != 1) || (r <= 0)) {
+        fprintf(stderr, "invalid cycle period: %s\n", argv[3]);
+        exit(1);
+    }
+    tdma_cfg.args.master.cycle_period = ((uint64_t)r) * 1000;
+
+    tdma_cfg.args.master.backup_sync_offset = 0;
+    tdma_cfg.args.master.cal_rounds         = 100;
+    tdma_cfg.args.master.max_cal_requests   = 64;
+    tdma_cfg.args.master.max_slot_id        = 7;
+
+    for (i = 4; i < argc; i++) {
+        if (strcmp(argv[i], "-b") == 0)
+            tdma_cfg.args.master.backup_sync_offset =
+                getintopt(argc, ++i, argv, 0) * 1000;
+        else if (strcmp(argv[i], "-c") == 0)
+            tdma_cfg.args.master.cal_rounds = getintopt(argc, ++i, argv, 0);
+        else if (strcmp(argv[i], "-i") == 0)
+            tdma_cfg.args.master.max_slot_id = getintopt(argc, ++i, argv, 0);
+        else if (strcmp(argv[i], "-m") == 0)
+            tdma_cfg.args.master.max_cal_requests =
+                getintopt(argc, ++i, argv, 1);
+        else
+            help();
+    }
+
+    r = ioctl(f, TDMA_IOC_MASTER, &tdma_cfg);
+    if (r < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void do_slave(int argc, char *argv[])
+{
+    int     i;
+    int     r;
+
+
+    if (argc < 3)
+        help();
+
+    tdma_cfg.args.slave.cal_rounds  = 100;
+    tdma_cfg.args.slave.max_slot_id = 7;
+
+    for (i = 3; i < argc; i++) {
+        if (strcmp(argv[i], "-c") == 0)
+            tdma_cfg.args.slave.cal_rounds = getintopt(argc, ++i, argv, 0);
+        else if (strcmp(argv[i], "-i") == 0)
+            tdma_cfg.args.slave.max_slot_id = getintopt(argc, ++i, argv, 0);
+        else
+            help();
+    }
+
+    r = ioctl(f, TDMA_IOC_SLAVE, &tdma_cfg);
+    if (r < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+void do_slot(int argc, char *argv[])
+{
+    char            *log_filename = NULL;
+    int             result_size = 0;
+    unsigned int    ioc;
+    int             r;
+    int             i;
+
+
+    if (argc < 4)
+        help();
+
+    if ((sscanf(argv[3], "%u", &r) != 1) || (r < 0)) {
+        fprintf(stderr, "invalid slot id: %s\n", argv[3]);
+        exit(1);
+    }
+
+    if (argc > 4) {
+        tdma_cfg.args.set_slot.id = r;
+
+        if ((sscanf(argv[4], "%u", &r) != 1) || (r < 0)) {
+            fprintf(stderr, "invalid slot offset: %s\n", argv[4]);
+            exit(1);
+        }
+        tdma_cfg.args.set_slot.offset = ((uint64_t)r) * 1000;
+
+        tdma_cfg.args.set_slot.period      = 1;
+        tdma_cfg.args.set_slot.phasing     = 0;
+        tdma_cfg.args.set_slot.size        = 0;
+        tdma_cfg.args.set_slot.cal_timeout = 0;
+        tdma_cfg.args.set_slot.joint_slot  = -1;
+        tdma_cfg.args.set_slot.cal_results = NULL;
+
+        for (i = 5; i < argc; i++) {
+            if (strcmp(argv[i], "-l") == 0) {
+                if (++i >= argc)
+                    help();
+                log_filename = argv[i];
+            } else if (strcmp(argv[i], "-p") == 0) {
+                if (++i >= argc)
+                    help();
+                if ((sscanf(argv[i], "%u/%u",
+                            &tdma_cfg.args.set_slot.phasing,
+                            &tdma_cfg.args.set_slot.period) != 2) ||
+                    (tdma_cfg.args.set_slot.phasing < 1) ||
+                    (tdma_cfg.args.set_slot.period < 1) ||
+                    (tdma_cfg.args.set_slot.phasing >
+                        tdma_cfg.args.set_slot.period)) {
+                    fprintf(stderr, "invalid parameter: %s %s\n", argv[i-1],
+                            argv[i]);
+                    exit(1);
+                }
+                tdma_cfg.args.set_slot.phasing--;
+            } else if (strcmp(argv[i], "-s") == 0)
+                tdma_cfg.args.set_slot.size =
+                    getintopt(argc, ++i, argv, MIN_SLOT_SIZE);
+            else if (strcmp(argv[i], "-t") == 0)
+                tdma_cfg.args.set_slot.cal_timeout =
+                    getintopt(argc, ++i, argv, 0);
+            else if (strcmp(argv[i], "-j") == 0)
+                tdma_cfg.args.set_slot.joint_slot =
+                    getintopt(argc, ++i, argv, 0);
+            else
+                help();
+        }
+
+        if (log_filename) {
+            /* note: we can reuse tdma_cfg here as the head is the same and
+             *       will remain unmodified */
+            result_size = ioctl(f, TDMA_IOC_CAL_RESULT_SIZE, &tdma_cfg);
+            if (result_size > 0) {
+                tdma_cfg.args.set_slot.cal_results =
+                    (__u64 *)malloc(result_size * sizeof(__u64));
+                if (!tdma_cfg.args.set_slot.cal_results) {
+                    fprintf(stderr, "insufficient memory\n");
+                    exit(1);
+                }
+            } else
+                log_filename = NULL;
+        }
+
+        ioc = TDMA_IOC_SET_SLOT;
+    } else {
+        tdma_cfg.args.remove_slot.id = r;
+
+        ioc = TDMA_IOC_REMOVE_SLOT;
+    }
+
+    r = ioctl(f, ioc, &tdma_cfg);
+    if (r < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+
+    if (log_filename)
+        write_calibration_log(log_filename, result_size,
+                              tdma_cfg.args.set_slot.cal_results);
+    exit(0);
+}
+
+
+
+void do_detach(int argc, char *argv[])
+{
+    int r;
+
+
+    if (argc != 3)
+        help();
+
+    r = ioctl(f, TDMA_IOC_DETACH, &tdma_cfg);
+    if (r < 0) {
+        perror("ioctl");
+        exit(1);
+    }
+    exit(0);
+}
+
+
+
+int main(int argc, char *argv[])
+{
+    if ((argc < 3) || (strcmp(argv[1], "--help") == 0))
+        help();
+
+    f = open("/dev/rtnet", O_RDWR);
+
+    if (f < 0) {
+        perror("/dev/rtnet");
+        exit(1);
+    }
+
+    strncpy(tdma_cfg.head.if_name, argv[1], IFNAMSIZ);
+
+    if (strcmp(argv[2], "master") == 0)
+        do_master(argc, argv);
+    if (strcmp(argv[2], "slave") == 0)
+        do_slave(argc, argv);
+    if (strcmp(argv[2], "slot") == 0)
+        do_slot(argc, argv);
+    if (strcmp(argv[2], "detach") == 0)
+        do_detach(argc, argv);
+
+    help();
+
+    return 0;
+}
diff --git a/kernel/xenomai-v3.2.4/utils/ps/Makefile.am b/kernel/xenomai-v3.2.4/utils/ps/Makefile.am
new file mode 100644
index 0000000..1120e6d
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/ps/Makefile.am
@@ -0,0 +1,7 @@
+sbin_PROGRAMS = rtps
+
+AM_CPPFLAGS = 						\
+	@XENO_USER_CFLAGS@				\
+	-I$(top_srcdir)/include
+
+rtps_SOURCES = rtps.c
diff --git a/kernel/xenomai-v3.2.4/utils/ps/rtps.c b/kernel/xenomai-v3.2.4/utils/ps/rtps.c
new file mode 100644
index 0000000..6a07f11
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/ps/rtps.c
@@ -0,0 +1,91 @@
+/**
+ * @note Copyright (C) 2013 Philippe Gerum <rpm@xenomai.org>.
+ *
+ * Xenomai is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * Xenomai is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with Xenomai; if not, write to the Free Software
+ * Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA
+ * 02111-1307, USA.
+ */
+#include <string.h>
+#include <stdio.h>
+#include <error.h>
+#include <errno.h>
+#include <stdlib.h>
+
+#define PROC_ACCT  "/proc/xenomai/sched/acct"
+#define PROC_PID  "/proc/%d/cmdline"
+
+#define ACCT_FMT_1  "%u %d %lu %lu %lu %lu %lx %Lu %Lu %Lu"
+#define ACCT_FMT_2  ACCT_FMT_1 " %[^\n]"
+#define ACCT_NFMT_1 10
+#define ACCT_NFMT_2 11
+
+int main(int argc, char *argv[])
+{
+	char cmdpath[sizeof(PROC_PID) + 32], cmdbuf[BUFSIZ], acctbuf[BUFSIZ], name[64];
+	unsigned long ssw, csw, xsc, pf, state, sec;
+	unsigned long long account_period,
+		exectime_period, exectime_total, v;
+	unsigned int cpu, hr, min, msec, usec;
+	FILE *acctfp, *cmdfp;
+	int pid;
+
+	acctfp = fopen(PROC_ACCT, "r");
+	if (acctfp == NULL)
+		error(1, errno, "cannot open %s\n", PROC_ACCT);
+
+	printf("%-6s %-17s   %-24s %s\n\n",
+	       "PID", "TIME", "THREAD", "CMD");
+
+	while (fgets(acctbuf, sizeof(acctbuf), acctfp) != NULL) {
+		if (sscanf(acctbuf, ACCT_FMT_2,
+		      &cpu, &pid, &ssw, &csw, &xsc, &pf, &state,
+		      &account_period, &exectime_period,
+		      &exectime_total, name) != ACCT_NFMT_2) {
+			strcpy(name, "");
+			if (sscanf(acctbuf, ACCT_FMT_1,
+			      &cpu, &pid, &ssw, &csw, &xsc, &pf, &state,
+			     &account_period, &exectime_period,
+			     &exectime_total) != ACCT_NFMT_1) {
+				break;
+			}
+		}
+
+		snprintf(cmdpath, sizeof(cmdpath), PROC_PID, pid);
+		cmdfp = fopen(cmdpath, "r");
+
+		if (cmdfp == NULL ||
+		    fgets(cmdbuf, sizeof(cmdbuf), cmdfp) == NULL)
+			strcpy(cmdbuf, "-");
+
+		if (cmdfp)
+			fclose(cmdfp);
+
+		v = exectime_total;
+		sec = v / 1000000000LL;
+		v %= 1000000000LL;
+		msec = v / 1000000LL;
+		v %= 1000000LL;
+		usec = v / 1000LL;
+		hr = sec / (60 * 60);
+		sec %= (60 * 60);
+		min = sec / 60;
+		sec %= 60;
+		printf("%-6d %.3u:%.2u:%.2lu.%.3u,%.3u   %-24s %s\n",
+		       pid,
+		       hr, min, sec, msec, usec,
+		       name, cmdbuf);
+	}
+
+	exit(0);
+}
diff --git a/kernel/xenomai-v3.2.4/utils/slackspot/Makefile.am b/kernel/xenomai-v3.2.4/utils/slackspot/Makefile.am
new file mode 100644
index 0000000..87f99e4
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/slackspot/Makefile.am
@@ -0,0 +1,7 @@
+sbin_PROGRAMS = slackspot
+
+AM_CPPFLAGS = 				\
+	@XENO_USER_CFLAGS_STDLIB@	\
+	-I$(top_srcdir)/include
+
+slackspot_SOURCES = slackspot.c
diff --git a/kernel/xenomai-v3.2.4/utils/slackspot/slackspot.c b/kernel/xenomai-v3.2.4/utils/slackspot/slackspot.c
new file mode 100644
index 0000000..031b97f
--- /dev/null
+++ b/kernel/xenomai-v3.2.4/utils/slackspot/slackspot.c
@@ -0,0 +1,684 @@
+/*
+ * Copyright (C) 2010 Philippe Gerum <rpm@xenomai.org>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, write to the Free Software
+ * Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
+ *
+ * This utility parses the output of the /proc/xenomai/debug/relax
+ * vfile, to get backtraces of spurious relaxes.
+ */
+
+#include <sys/types.h>
+#include <sys/stat.h>
+#include <stdio.h>
+#include <error.h>
+#include <stdint.h>
+#include <stdlib.h>
+#include <fnmatch.h>
+#include <search.h>
+#include <unistd.h>
+#include <string.h>
+#include <errno.h>
+#include <limits.h>
+#include <malloc.h>
+#include <getopt.h>
+#include <signal.h>
+#include <cobalt/uapi/signal.h>
+
+static const struct option base_options[] = {
+	{
+#define help_opt	0
+		.name = "help",
+		.has_arg = no_argument,
+	},
+#define file_opt	1
+	{
+		.name = "file",
+		.has_arg = required_argument,
+	},
+#define path_opt	2
+	{
+		.name = "path",
+		.has_arg = required_argument,
+	},
+#define filter_opt	3	/* Alias for filter-in */
+	{
+		.name = "filter",
+		.has_arg = required_argument,
+	},
+#define filter_in_opt	4
+	{
+		.name = "filter-in",
+		.has_arg = required_argument,
+	},
+#define filter_out_opt	5	/* Alias for !filter-in */
+	{
+		.name = "filter-out",
+		.has_arg = required_argument,
+	},
+	{ /* Sentinel */ }
+};
+
+struct relax_spot;
+
+struct filter {
+	int (*op)(struct filter *f, struct relax_spot *p);
+	char *exp;
+	struct filter *next;
+} *filter_list = NULL;
+
+int filter_not = 0;
+
+struct ldpath_dir {
+	char *path;
+	struct ldpath_dir *next;
+} *ldpath_list = NULL;
+
+struct location {
+	unsigned long pc;
+	char *function;
+	char *file;
+	int lineno;
+	struct location *next;	/* next in mapping. */
+};
+
+struct mapping {
+	char *name;
+	struct location *locs;
+	struct mapping *next;
+} *mapping_list = NULL;
+
+static const struct location undefined_location; /* All zero. */
+
+struct relax_spot {
+	char *exe_path;
+	char *thread_name;
+	char *reason;
+	pid_t pid;
+	int hits;
+	int depth;
+	struct backtrace {
+		unsigned long pc;
+		struct mapping *mapping;
+		const struct location *where;
+	} backtrace[SIGSHADOW_BACKTRACE_DEPTH];
+	struct relax_spot *next;
+} *spot_list = NULL;
+
+int spot_count, filtered_count = 0;
+
+const char *toolchain_prefix;
+
+static int filter_thread(struct filter *f, struct relax_spot *p)
+{
+	return fnmatch(f->exp, p->thread_name, 0);
+}
+
+static int filter_pid(struct filter *f,  struct relax_spot *p)
+{
+	char pid[16];
+
+	sprintf(pid, "%d", p->pid);
+
+	return fnmatch(f->exp, pid, 0);
+}
+
+static int filter_exe(struct filter *f, struct relax_spot *p)
+{
+	return fnmatch(f->exp, p->exe_path, FNM_PATHNAME);
+}
+
+static int filter_function(struct filter *f, struct relax_spot *p)
+{
+	struct backtrace *b;
+	int depth;
+
+	for (depth = 0, b = p->backtrace; depth < p->depth; b++, depth++) {
+		if (b->where->function &&
+		    !fnmatch(f->exp, b->where->function, 0))
+			return 0;
+	}
+
+	return FNM_NOMATCH;
+}
+
+static int filter_file(struct filter *f, struct relax_spot *p)
+{
+	struct backtrace *b;
+	int depth;
+
+	for (depth = 0, b = p->backtrace; depth < p->depth; b++, depth++) {
+		if (b->where->file &&
+		    !fnmatch(f->exp, b->where->file, FNM_PATHNAME))
+			return 0;
+	}
+
+	return FNM_NOMATCH;
+}
+
+static int filter_map(struct filter *f, struct relax_spot *p)
+{
+	struct backtrace *b;
+	int depth;
+
+	for (depth = 0, b = p->backtrace; depth < p->depth; b++, depth++) {
+		if (*b->mapping->name != '?' &&
+		    !fnmatch(f->exp, b->mapping->name, FNM_PATHNAME))
+			return 0;
+	}
+
+	return FNM_NOMATCH;
+}
+
+static int build_filter_list(const char *filters)
+{
+	char *filter, *name;
+	struct filter *f;
+	int ret;
+
+	if (filters == NULL)
+		return 0;
+
+	filter = strdup(filters);
+	while ((filter = strtok(filter, ",")) != NULL) {
+		f = malloc(sizeof(*f));
+		ret = sscanf(filter, "%m[a-z]=%m[^\n]", &name, &f->exp);
+		if (ret != 2)
+			return EINVAL;
+		if (strcmp(name, "thread") == 0)
+			f->op = filter_thread;
+		else if (strcmp(name, "pid") == 0)
+			f->op = filter_pid;
+		else if (strcmp(name, "exe") == 0)
+			f->op = filter_exe;
+		else if (strcmp(name, "function") == 0)
+			f->op = filter_function;
+		else if (strcmp(name, "file") == 0)
+			f->op = filter_file;
+		else if (strcmp(name, "map") == 0)
+			f->op = filter_map;
+		else
+			return EINVAL;
+		f->next = filter_list;
+		filter_list = f;
+		filter = NULL;
+	}
+
+	return 0;
+}
+
+static inline int match_filter_list(struct relax_spot *p)
+{
+	struct filter *f;
+
+	for (f = filter_list; f; f = f->next) {
+		if (f->op(f, p))
+			return 1 ^ filter_not;
+	}
+
+	return 0 ^ filter_not;	/* All matched. */
+}
+
+static void build_ldpath_list(const char *ldpath)
+{
+	char *dir, *cccmd, *search_path, *p;
+	struct ldpath_dir *dpath;
+	FILE *fp;
+	int ret;
+
+	if (ldpath == NULL)
+		goto cross_toolchain;
+
+	dir = strdup(ldpath);
+	while ((dir = strtok(dir, ":")) != NULL) {
+		dpath = malloc(sizeof(*dpath));
+		dpath->path = dir;
+		dpath->next = ldpath_list;
+		ldpath_list = dpath;
+		dir = NULL;
+	}
+
+cross_toolchain:
+	if (toolchain_prefix == NULL)
+		return;
+
+	ret = asprintf(&cccmd, "%sgcc -print-search-dirs|grep '^libraries: ='",
+		       toolchain_prefix);
+	if (ret < 0)
+		goto no_mem;
+
+	fp = popen(cccmd, "r");
+	if (fp == NULL)
+		error(1, errno, "cannot run %s", cccmd);
+	free(cccmd);
+
+	ret = fscanf(fp, "libraries: =%m[^\n]\n", &search_path);
+	if (ret != 1)
+		goto bad_output;
+
+	/*
+	 * Feed our ldpath list with the cross-compiler's search list
+	 * for libraries.
+	 */
+	dir = search_path;
+	while ((dir = strtok(dir, ":")) != NULL) {
+		p = strrchr(dir, '/');
+		if (*p)
+			*p = '\0';
+		dpath = malloc(sizeof(*dpath));
+		dpath->path = dir;
+		dpath->next = ldpath_list;
+		ldpath_list = dpath;
+		dir = NULL;
+	}
+
+	pclose(fp);
+
+	return;
+no_mem:
+	error(1, ENOMEM, "build_ldpath_list failed");
+
+bad_output:
+	error(1, 0, "garbled gcc output for -print-search-dirs");
+}
+
+static char *resolve_path(char *mapping)
+{
+	struct ldpath_dir *dpath;
+	char *path, *basename;
+	int ret;
+
+	/*
+	 * Don't use the original mapping name verbatim if
+	 * CROSS_COMPILE was specified, it is unlikely that the right
+	 * target file could be found at the same place on the host.
+	 */
+	if (*mapping == '?' ||
+	    (toolchain_prefix == NULL && access(mapping, F_OK) == 0))
+		return mapping;
+
+	basename = strrchr(mapping, '/');
+	if (basename++ == NULL)
+		basename = mapping;
+
+	for (dpath = ldpath_list; dpath; dpath = dpath->next) {
+		ret = asprintf(&path, "%s/%s", dpath->path, basename);
+		if (ret < 0)
+			goto no_mem;
+		/* Pick first match. */
+		if (access(path, F_OK) == 0) {
+			free(mapping);
+			return path;
+		}
+		free(path);
+	}
+
+	/* No match. Leave the mapping name unchanged */
+	return mapping;
+
+no_mem:
+	error(1, ENOMEM, "resolve_path failed");
+	return NULL;		/* not reached. */
+}
+
+static void read_spots(FILE *fp)
+{
+	struct relax_spot *p;
+	struct mapping *m;
+	unsigned long pc;
+	char *mapping, c;
+	ENTRY e, *ep;
+	int ret;
+
+	ret = fscanf(fp, "%d\n", &spot_count);
+	if (ret != 1) {
+		if (feof(fp))
+			return;
+		goto bad_input;
+	}
+
+	hcreate(spot_count * SIGSHADOW_BACKTRACE_DEPTH);
+
+	for (;;) {
+		p = malloc(sizeof(*p));
+		if (p == NULL)
+			error(1, 0, "out of memory");
+
+		ret = fscanf(fp, "%m[^\n]\n", &p->exe_path);
+		if (ret != 1) {
+			if (feof(fp))
+				return;
+			goto bad_input;
+		}
+
+		ret = fscanf(fp, "%d %d %m[^ ] %m[^\n]\n",
+			     &p->pid, &p->hits, &p->reason, &p->thread_name);
+		if (ret != 4)
+			goto bad_input;
+
+		p->depth = 0;
+		for (;;) {
+			if (p->depth >= SIGSHADOW_BACKTRACE_DEPTH)
+				break;
+			c = getc(fp);
+			if (c == '.' && getc(fp) == '\n')
+				break;
+			ungetc(c, fp);
+			ret = fscanf(fp, "%lx %m[^\n]\n", &pc, &mapping);
+			if (ret != 2)
+				goto bad_input;
+
+			mapping = resolve_path(mapping);
+			e.key = mapping;
+			ep = hsearch(e, FIND);
+			if (ep == NULL) {
+				m = malloc(sizeof(*m));
+				if (m == NULL)
+					goto no_mem;
+				m->name = mapping;
+				m->locs = NULL;
+				m->next = mapping_list;
+				mapping_list = m;
+				e.data = m;
+				ep = hsearch(e, ENTER);
+				if (ep == NULL)
+					goto no_mem;
+			} else {
+				free(mapping);
+				m = ep->data;
+			}
+
+			/*
+			 * Move one byte backward to point to the call
+			 * site, not to the next instruction. This
+			 * usually works fine...
+			 */
+			p->backtrace[p->depth].pc = pc - 1;
+			p->backtrace[p->depth].mapping = m;
+			p->backtrace[p->depth].where = &undefined_location;
+			p->depth++;
+		}
+
+		if (p->depth == 0)
+			goto bad_input;
+
+		p->next = spot_list;
+		spot_list = p;
+	}
+
+bad_input:
+	error(1, 0, "garbled trace input");
+no_mem:
+	error(1, ENOMEM, "read_spots failed");
+}
+
+static inline
+struct location *find_location(struct location *head, unsigned long pc)
+{
+	struct location *l = head;
+
+	while (l) {
+		if (l->pc == pc)
+			return l;
+		l = l->next;
+	}
+
+	return NULL;
+}
+
+static void resolve_spots(void)
+{
+	char *a2l, *a2lcmd, *s, buf[BUFSIZ];
+	struct relax_spot *p;
+	struct backtrace *b;
+	struct location *l;
+	struct mapping *m;
+	struct stat sbuf;
+	int ret, depth;
+	FILE *fp;
+
+	/*
+	 * Fill the mapping cache with one location record per
+	 * distinct PC value mentioned for each mapping.  The basic
+	 * idea is to exec a single addr2line instance for all PCs
+	 * belonging to any given mapping, instead of one instance per
+	 * call site in each and every frame. This way, we may run
+	 * slackspot on low-end targets with limited CPU horsepower,
+	 * without going for unreasonably long coffee breaks.
+	 */
+	for (p = spot_list; p; p = p->next) {
+		for (depth = 0; depth < p->depth; depth++) {
+			b = p->backtrace + depth;
+			l = find_location(b->mapping->locs, b->pc);
+			if (l) {
+				/* PC found in mapping cache. */
+				b->where = l;
+				continue;
+			}
+
+			l = malloc(sizeof(*l));
+			if (l == NULL)
+				goto no_mem;
+
+			l->pc = b->pc;
+			l->function = NULL;
+			l->file = NULL;
+			l->lineno = 0;
+			b->where = l;
+			l->next = b->mapping->locs;
+			b->mapping->locs = l;
+		}
+	}
+
+	/*
+	 * For each mapping, try resolving PC values as source
+	 * locations.
+	 */
+	for (m = mapping_list; m; m = m->next) {
+		if (*m->name == '?')
+			continue;
+
+		ret = stat(m->name, &sbuf);
+		if (ret || !S_ISREG(sbuf.st_mode))
+			continue;
+
+		ret = asprintf(&a2l,
+			       "%saddr2line --demangle --inlines --functions --exe=%s",
+			       toolchain_prefix, m->name);
+		if (ret < 0)
+			goto no_mem;
+
+		for (l = m->locs, s = a2l, a2lcmd = NULL; l; l = l->next) {
+			ret = asprintf(&a2lcmd, "%s 0x%lx", s, l->pc);
+			if (ret < 0)
+				goto no_mem;
+			free(s);
+			s = a2lcmd;
+		}
+
+		fp = popen(a2lcmd, "r");
+		if (fp == NULL)
+			error(1, errno, "cannot run %s", a2lcmd);
+
+		for (l = m->locs; l; l = l->next) {
+			ret = fscanf(fp, "%ms\n", &l->function);
+			if (ret != 1)
+				goto bad_output;
+			/*
+			 * Don't trust fscanf range specifier, we may
+			 * have colons in the pathname.
+			 */
+			s = fgets(buf, sizeof(buf), fp);
+			if (s == NULL)
+				goto bad_output;
+			s = strrchr(s, ':');
+			if (s == NULL)
+				continue;
+			*s++ = '\0';
+			if (strcmp(buf, "??")) {
+				l->lineno = atoi(s);
+				l->file = strdup(buf);
+			}
+		}
+
+		pclose(fp);
+		free(a2lcmd);
+	}
+
+	return;
+
+bad_output:
+	error(1, 0, "garbled addr2line output");
+no_mem:
+	error(1, ENOMEM, "resolve_locations failed");
+}
+
+static inline void put_location(struct relax_spot *p, int depth)
+{
+	struct backtrace *b = p->backtrace + depth;
+	const struct location *where = b->where;
+
+	printf("   #%-2d 0x%.*lx ", depth, LONG_BIT / 4, where->pc);
+	if (where->function)
+		printf("%s() ", where->function);
+	if (where->file) {
+		printf("in %s", where->file);
+		if (where->lineno)
+			printf(":%d", where->lineno);
+	} else {
+		if (where->function == NULL)
+			printf("??? ");
+		if (*b->mapping->name != '?')
+			printf("in [%s]", b->mapping->name);
+	}
+	putchar('\n');
+}
+
+static void display_spots(void)
+{
+	struct relax_spot *p;
+	int depth, hits;
+
+	for (p = spot_list, hits = 0; p; p = p->next) {
+		hits += p->hits;
+		if (match_filter_list(p)) {
+			filtered_count++;
+			continue;
+		}
+		printf("\nThread[%d] \"%s\" started by %s",
+		       p->pid, p->thread_name, p->exe_path);
+		if (p->hits > 1)
+			printf(" (%d times)", p->hits);
+		printf(":\n");
+		printf("Caused by: %s\n", p->reason);
+		for (depth = 0; depth < p->depth; depth++)
+			put_location(p, depth);
+	}
+
+	if (filtered_count)
+		printf("\n(%d spots filtered out)\n",
+		       filtered_count);
+	if (hits < spot_count)
+		printf("\nWARNING: only %d/%d spots retrieved (some were lost)\n",
+		       hits, spot_count);
+}
+
+static void usage(void)
+{
+	fprintf(stderr, "usage: slackspot [CROSS_COMPILE=<toolchain-prefix>] [options]\n");
+	fprintf(stderr, "   --file <file>				use trace file\n");
+	fprintf(stderr, "   --path <dir[:dir...]>			set search path for exec files\n");
+	fprintf(stderr, "   --filter-in <name=exp[,name...]>		exclude non-matching spots\n");
+	fprintf(stderr, "   --filter <name=exp[,name...]>		alias for --filter-in\n");
+	fprintf(stderr, "   --filter-out <name=exp[,name...]>		exclude matching spots\n");
+	fprintf(stderr, "   --help					print this help\n");
+}
+
+int main(int argc, char *const argv[])
+{
+	const char *trace_file, *filters;
+	const char *ldpath;
+	int c, lindex, ret;
+	FILE *fp;
+
+	trace_file = NULL;
+	ldpath = NULL;
+	filters = NULL;
+	toolchain_prefix = getenv("CROSS_COMPILE");
+	if (toolchain_prefix == NULL)
+		toolchain_prefix = "";
+
+	for (;;) {
+		c = getopt_long_only(argc, argv, "", base_options, &lindex);
+		if (c == EOF)
+			break;
+		if (c == '?') {
+			usage();
+			return EINVAL;
+		}
+		if (c > 0)
+			continue;
+
+		switch (lindex) {
+		case help_opt:
+			usage();
+			exit(0);
+		case file_opt:
+			trace_file = optarg;
+			break;
+		case path_opt:
+			ldpath = optarg;
+			break;
+		case filter_out_opt:
+			filter_not = 1;
+		case filter_in_opt:
+		case filter_opt:
+			filters = optarg;
+			break;
+		default:
+			return EINVAL;
+		}
+	}
+
+	fp = stdin;
+	if (trace_file == NULL) {
+		if (isatty(fileno(stdin))) {
+			trace_file = "/proc/xenomai/debug/relax";
+			goto open;
+		}
+	} else if (strcmp(trace_file, "-")) {
+	open:
+		fp = fopen(trace_file, "r");
+		if (fp == NULL)
+			error(1, errno, "cannot open trace file %s",
+			      trace_file);
+	}
+
+	ret = build_filter_list(filters);
+	if (ret)
+		error(1, 0, "bad filter expression: %s", filters);
+
+	build_ldpath_list(ldpath);
+	read_spots(fp);
+
+	if (spot_list == NULL) {
+		fputs("no slacker\n", stderr);
+		return 0;	/* This is not an error. */
+	}
+
+	resolve_spots();
+	display_spots();
+
+	return 0;
+}

--
Gitblit v1.6.2